diff --git a/pkg/terraform/exec/plugins/Gopkg.lock b/pkg/terraform/exec/plugins/Gopkg.lock index 9f5937db36a..93caa005499 100644 --- a/pkg/terraform/exec/plugins/Gopkg.lock +++ b/pkg/terraform/exec/plugins/Gopkg.lock @@ -2,7 +2,7 @@ [[projects]] - digest = "1:05a9cb1a5eeaf2e8111957459b5eca270e0becc39f701ea663d0afcd6b9433f1" + digest = "1:2874de5cf8e832433f0b3153ca900b4818b38d1126444fe2fefd73761cfab50e" name = "cloud.google.com/go" packages = [ "bigtable", @@ -10,10 +10,13 @@ "bigtable/internal/option", "compute/metadata", "iam", + "internal", "internal/optional", "internal/trace", + "internal/version", "longrunning", "longrunning/autogen", + "storage", ] pruneopts = "NUT" revision = "457ea5c15ccf3b87db582c450e80101989da35f7" @@ -126,6 +129,14 @@ revision = "41959bdd855fb7db467f78865d5f9044507df1cd" version = "v2" +[[projects]] + digest = "1:8bd40ec66a32437126e8ff3080f26b11ca5926917daa01eba1285d0b059416bc" + name = "github.com/agext/levenshtein" + packages = ["."] + pruneopts = "NUT" + revision = "0ded9c86537917af2ff89bc9c78de6bd58477894" + version = "v1.2.2" + [[projects]] digest = "1:1929b21a34400d463a99336f8e2908d2a154dc525c52411a8d99bb519942dc4c" name = "github.com/apparentlymart/go-cidr" @@ -135,7 +146,23 @@ version = "v1.0.0" [[projects]] - digest = "1:b64410231651b7912fc7cbfed95cba05c5c61af1cc8c676e2accc79ea6c1d6ef" + digest = "1:ca4ebf76293ee2653f577bf737fbcbd90b07abb54c58cf786c02411413e5fa15" + name = "github.com/apparentlymart/go-textseg" + packages = ["textseg"] + pruneopts = "NUT" + revision = "fb01f485ebef760e5ee06d55e1b07534dda2d295" + version = "v1.0.0" + +[[projects]] + digest = "1:e5ca3dcabf1452b51be600af6e2ce0a93a94978ae231af802cf9736bdbd835cb" + name = "github.com/armon/go-radix" + packages = ["."] + pruneopts = "NUT" + revision = "1a2de0c21c94309923825da3df33a4381872c795" + version = "v1.0.0" + +[[projects]] + digest = "1:cac380c59636badf091045c9b5f5dbc4db80c53e4889f2efc35ed40d9295635c" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -161,6 +188,7 @@ "internal/ini", "internal/s3err", "internal/sdkio", + "internal/sdkmath", "internal/sdkrand", "internal/sdkuri", "internal/shareddefaults", @@ -179,13 +207,17 @@ "private/signer/v2", "service/acm", "service/acmpca", + "service/amplify", "service/apigateway", "service/apigatewayv2", "service/applicationautoscaling", + "service/applicationinsights", "service/appmesh", + "service/appstream", "service/appsync", "service/athena", "service/autoscaling", + "service/autoscalingplans", "service/backup", "service/batch", "service/budgets", @@ -230,6 +262,7 @@ "service/emr", "service/firehose", "service/fms", + "service/forecastservice", "service/fsx", "service/gamelift", "service/glacier", @@ -239,12 +272,15 @@ "service/iam", "service/inspector", "service/iot", + "service/iotanalytics", + "service/iotevents", "service/kafka", "service/kinesis", "service/kinesisanalytics", "service/kinesisanalyticsv2", "service/kinesisvideo", "service/kms", + "service/lakeformation", "service/lambda", "service/lexmodelbuildingservice", "service/licensemanager", @@ -261,8 +297,10 @@ "service/neptune", "service/opsworks", "service/organizations", + "service/personalize", "service/pinpoint", "service/pricing", + "service/qldb", "service/quicksight", "service/ram", "service/rds", @@ -278,6 +316,7 @@ "service/serverlessapplicationrepository", "service/servicecatalog", "service/servicediscovery", + "service/servicequotas", "service/ses", "service/sfn", "service/shield", @@ -287,6 +326,7 @@ "service/ssm", "service/storagegateway", "service/sts", + "service/sts/stsiface", "service/swf", "service/transfer", "service/waf", @@ -296,8 +336,8 @@ "service/xray", ] pruneopts = "NUT" - revision = "bdd9cf7816bd404510114e712564cf76683b8355" - version = "v1.19.26" + revision = "502ed1c4f99faa3ca47b41a74ff5e0f1150f9354" + version = "v1.25.35" [[projects]] digest = "1:15e3271f463f2f40d98bf426aabb86941fc66b10272ccfdfebe548683e37acb1" @@ -307,6 +347,22 @@ revision = "8aee6516be3b1163bb6450c35c50e4969e3a3aa8" version = "v1.1.0" +[[projects]] + branch = "master" + digest = "1:37011b20a70e205b93ebea5287e1afa5618db54bf3998c36ff5a8e4b146a170a" + name = "github.com/bgentry/go-netrc" + packages = ["netrc"] + pruneopts = "NUT" + revision = "9fd32a8b3d3d3f9d43c341bfe098430e07609480" + +[[projects]] + digest = "1:aef384a4c419ec6504905842b9c70c6a800d95db9eb7543f54c061f4e3aee23b" + name = "github.com/bgentry/speakeasy" + packages = ["."] + pruneopts = "NUT" + revision = "4aabc24848ce5fd31929f7d1e4ea74d3709c14cd" + version = "v0.1.0" + [[projects]] branch = "master" digest = "1:04873dc5e06932b750eac24a1d90eabae58249df63bed6823e77b62a0160a297" @@ -401,6 +457,14 @@ pruneopts = "NUT" revision = "d3c2ba80e75eeef10c5cf2fc76d2c809637376b3" +[[projects]] + digest = "1:ade392a843b2035effb4b4a2efa2c3bab3eb29b992e98bacf9c898b0ecb54e45" + name = "github.com/fatih/color" + packages = ["."] + pruneopts = "NUT" + revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4" + version = "v1.7.0" + [[projects]] branch = "master" digest = "1:09c0a5767800a5cf29bb7dade4c3bcaa1fb43ddebad68d2fa98d037cf0f06a3b" @@ -450,6 +514,14 @@ revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" version = "v1.3.1" +[[projects]] + digest = "1:7f114b78210bf5b75f307fc97cff293633c835bab1e0ea8a744a44b39c042dfe" + name = "github.com/golang/snappy" + packages = ["."] + pruneopts = "NUT" + revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a" + version = "v0.0.1" + [[projects]] digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" name = "github.com/google/btree" @@ -458,6 +530,20 @@ revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" version = "v1.0.0" +[[projects]] + digest = "1:1d1cbf539d9ac35eb3148129f96be5537f1a1330cadcc7e3a83b4e72a59672a3" + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/internal/diff", + "cmp/internal/flags", + "cmp/internal/function", + "cmp/internal/value", + ] + pruneopts = "NUT" + revision = "2d0692c2e9617365a95b295612ac0d4415ba4627" + version = "v0.3.1" + [[projects]] branch = "master" digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" @@ -653,13 +739,24 @@ version = "v0.5.1" [[projects]] - digest = "1:c1c0f5025b799b9e45af268e7c02a7026673f281c44c0b7523ca9a4d4b686618" + digest = "1:69df3dc31f0d95db569d575c159fdff83cc7920dd731ae3bd44a615e74cc4ba5" name = "github.com/hashicorp/go-getter" - packages = ["helper/url"] + packages = [ + ".", + "helper/url", + ] pruneopts = "NUT" revision = "e1437d0bbb37a1fa61cdb924b034352c823cb89b" version = "v1.2.0" +[[projects]] + digest = "1:b1559b664503c1bb85be5b8fdaaa86c4e6e773947214ff57c377bcbd65a98d4f" + name = "github.com/hashicorp/go-hclog" + packages = ["."] + pruneopts = "NUT" + revision = "234833755cb25ae46996d0ef823326f492f89243" + version = "v0.10.0" + [[projects]] digest = "1:2ed138049ab373f696db2081ca48f15c5abdf20893803612a284f2bdce2bf443" name = "github.com/hashicorp/go-multierror" @@ -668,6 +765,25 @@ revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1" version = "v1.0.0" +[[projects]] + digest = "1:446e0a4f73191887866ac789d1fd210e3408bb8eb8aeaf199d5f3a73ff63c108" + name = "github.com/hashicorp/go-plugin" + packages = [ + ".", + "internal/plugin", + ] + pruneopts = "NUT" + revision = "9e3e1c37db188a1acb66561ee0ed4bf4d5e77554" + version = "v1.0.1" + +[[projects]] + digest = "1:fbab03227343a0285fc74a68dd2ff46cda7edecbbe5a3e98d2cecd00cc67b217" + name = "github.com/hashicorp/go-safetemp" + packages = ["."] + pruneopts = "NUT" + revision = "c9a55de4fe06c920a71964b53cfe3dd293a3c743" + version = "v1.0.0" + [[projects]] digest = "1:6876abc0847343a3e222ebd074431ff7102ce215087c797be0562e9e21e24db4" name = "github.com/hashicorp/go-uuid" @@ -692,6 +808,136 @@ revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" version = "v0.5.1" +[[projects]] + digest = "1:39f543569bf189e228c84a294c50aca8ea56c82b3d9df5c9b788249907d7049a" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token", + ] + pruneopts = "NUT" + revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:d6639ce96a317847be4b8a17b1a34524c55b37e5e4858ac692c43a498074ea57" + name = "github.com/hashicorp/hcl2" + packages = [ + "ext/dynblock", + "ext/typeexpr", + "gohcl", + "hcl", + "hcl/hclsyntax", + "hcl/json", + "hcldec", + "hcled", + "hclparse", + "hclwrite", + ] + pruneopts = "NUT" + revision = "fb75b3253c80b3bc7ca99c4bfa2ad6743841b1af" + +[[projects]] + branch = "master" + digest = "1:7a0c75c83ef1377300a29045c4e7a833b3b6074324e517273fc6a8e2b35d11d6" + name = "github.com/hashicorp/hil" + packages = ["ast"] + pruneopts = "NUT" + revision = "97b3a9cdfa9349086cfad7ea2fe3165bfe3cbf63" + +[[projects]] + digest = "1:9e12c5649c4b1a47e113593d61acf2eb131dad59fea70a05111a89fa4aead0ee" + name = "github.com/hashicorp/logutils" + packages = ["."] + pruneopts = "NUT" + revision = "a335183dfd075f638afcc820c90591ca3c97eba6" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:edadaf345cfd25b5e3284ed7e602f910e4c617e80dcb2a355e6c8cd9ece51ab9" + name = "github.com/hashicorp/terraform-config-inspect" + packages = ["tfconfig"] + pruneopts = "NUT" + revision = "82a99dc22ef46de5d9e0de955ac453674e912a45" + +[[projects]] + digest = "1:438a7dc6fbc90b0f1f82f67b7338d102e3dc83b1a617b6fe98a11bd84d7a3952" + name = "github.com/hashicorp/terraform-plugin-sdk" + packages = [ + "helper/customdiff", + "helper/encryption", + "helper/hashcode", + "helper/logging", + "helper/mutexkv", + "helper/resource", + "helper/schema", + "helper/structure", + "helper/validation", + "httpclient", + "internal/addrs", + "internal/command/format", + "internal/configs", + "internal/configs/configload", + "internal/configs/configschema", + "internal/configs/hcl2shim", + "internal/dag", + "internal/earlyconfig", + "internal/flatmap", + "internal/helper/config", + "internal/helper/didyoumean", + "internal/helper/plugin", + "internal/httpclient", + "internal/initwd", + "internal/lang", + "internal/lang/blocktoattr", + "internal/lang/funcs", + "internal/modsdir", + "internal/moduledeps", + "internal/plans", + "internal/plans/objchange", + "internal/plugin/convert", + "internal/plugin/discovery", + "internal/providers", + "internal/provisioners", + "internal/registry", + "internal/registry/regsrc", + "internal/registry/response", + "internal/states", + "internal/states/statefile", + "internal/svchost", + "internal/svchost/auth", + "internal/svchost/disco", + "internal/tfdiags", + "internal/tfplugin5", + "internal/vault/helper/pgpkeys", + "internal/vault/sdk/helper/compressutil", + "internal/vault/sdk/helper/jsonutil", + "internal/version", + "meta", + "plugin", + "terraform", + ] + pruneopts = "NUT" + revision = "cff43270c4ac655105e5c5f69cad3b5227473dc3" + version = "v1.2.0" + +[[projects]] + branch = "master" + digest = "1:26f63172b8c0484ef9805697291cecd0ea265fbbe088bfeb90fda646d66ba208" + name = "github.com/hashicorp/yamux" + packages = ["."] + pruneopts = "NUT" + revision = "df201c70410deac145ff09e1126f3f8834ead1ec" + [[projects]] digest = "1:2f3840740b71b2798a9c85f191367530771debdd812900c4a088ac1f3e31b20e" name = "github.com/hooklift/iso9660" @@ -715,6 +961,28 @@ pruneopts = "NUT" revision = "c2b33e84" +[[projects]] + branch = "master" + digest = "1:86639e70e7ed2494ec2f7e140433990525dbbbff7f93a7fcdc63c23ae89b02b5" + name = "github.com/keybase/go-crypto" + packages = [ + "brainpool", + "cast5", + "curve25519", + "ed25519", + "ed25519/internal/edwards25519", + "openpgp", + "openpgp/armor", + "openpgp/ecdh", + "openpgp/elgamal", + "openpgp/errors", + "openpgp/packet", + "openpgp/s2k", + "rsa", + ] + pruneopts = "NUT" + revision = "a054578053044161085030449d77f6ceee2cc31a" + [[projects]] digest = "1:92819c3dc15d822e470ba6ddddc4b3790a8bb902f65e273f7ad5a1b288e39a60" name = "github.com/kubernetes-sigs/aws-iam-authenticator" @@ -742,6 +1010,38 @@ revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f" version = "v1.1.0" +[[projects]] + digest = "1:08c231ec84231a7e23d67e4b58f975e1423695a32467a362ee55a803f9de8061" + name = "github.com/mattn/go-colorable" + packages = ["."] + pruneopts = "NUT" + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + digest = "1:f70baa68d82763379f5d039daca4a568d2246beec2dde096f756aa4768cfb786" + name = "github.com/mattn/go-isatty" + packages = ["."] + pruneopts = "NUT" + revision = "88ba11cfdc67c7588b30042edf244b2875f892b6" + version = "v0.0.10" + +[[projects]] + digest = "1:874dbb1944c970cb6c5c683cc947b7a849b5fb2e6bdce41b1f95b3187916ed4b" + name = "github.com/mitchellh/cli" + packages = ["."] + pruneopts = "NUT" + revision = "3d22a244be8aa6fb16ac24af0e195c08b7d973aa" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:58ed95018a7eb9f94125846e7cf1f12843c0d46308e9a7a6a58e895270da5b99" + name = "github.com/mitchellh/colorstring" + packages = ["."] + pruneopts = "NUT" + revision = "d06e56a500db4d08c33db0b79461e7c9beafca2d" + [[projects]] digest = "1:1e5a8bf6fb005f1808852ad1a4bedb7b4b3291af90dec12c4c9e4a955a42c9b2" name = "github.com/mitchellh/copystructure" @@ -758,6 +1058,22 @@ revision = "af06845cf3004701891bf4fdb884bfe4920b3727" version = "v1.1.0" +[[projects]] + digest = "1:18b773b92ac82a451c1276bd2776c1e55ce057ee202691ab33c8d6690efcc048" + name = "github.com/mitchellh/go-testing-interface" + packages = ["."] + pruneopts = "NUT" + revision = "6d0b8010fcc857872e42fc6c931227569016843c" + version = "v1.0.0" + +[[projects]] + digest = "1:9f63926f52745d1f9d6053f8fbbb3bd3983c2c97c0565957e682b8d2f7aad3af" + name = "github.com/mitchellh/go-wordwrap" + packages = ["."] + pruneopts = "NUT" + revision = "9e67c67572bc5dd02aef930e2b0ae3c02a4b5a5c" + version = "v1.0.0" + [[projects]] digest = "1:e34decedbcec12332c5836d16a6838f864e0b43c5b4f9aa9d9a85101015f87c2" name = "github.com/mitchellh/hashstructure" @@ -790,6 +1106,14 @@ revision = "eecee6c969c02c8cc2ae48e1e269843ae8590796" version = "v1.0.0" +[[projects]] + digest = "1:3b517122f3aad1ecce45a630ea912b3092b4729f25532a911d0cb2935a1f9352" + name = "github.com/oklog/run" + packages = ["."] + pruneopts = "NUT" + revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39" + version = "v1.0.0" + [[projects]] digest = "1:eba01da9d7286b80ecfb6b6ea6299b57f7a74ada8c94534e35eb9573816cfefc" name = "github.com/openshift-metal3/terraform-provider-ironic" @@ -814,6 +1138,29 @@ revision = "0be1b92a6df0e4f5cb0a5d15fb7f643d0ad93ce6" version = "v3.0.0" +[[projects]] + digest = "1:ea86ccc0e0aee36342acb8960743166c68667d499a0db97ca416f3fd0113c58a" + name = "github.com/pierrec/lz4" + packages = [ + ".", + "internal/xxh32", + ] + pruneopts = "NUT" + revision = "645f9b948eee34cbcc335c70999f79c29c420fbf" + version = "v2.3.0" + +[[projects]] + digest = "1:6b8897b9b37c5f4e4e3b4a5c62ef30e593f4452e2b2a2e404e692b5cceeb185f" + name = "github.com/posener/complete" + packages = [ + ".", + "cmd", + "cmd/install", + ] + pruneopts = "NUT" + revision = "98a0c28ec7908620d626d072df3a49c34eadc2b6" + version = "v1.2.2" + [[projects]] digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca" name = "github.com/satori/go.uuid" @@ -830,6 +1177,17 @@ revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" version = "v1.2.0" +[[projects]] + digest = "1:6792bb72ea0e7112157d02e4e175cd421b43d004a853f56316a19beca6e0c074" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "NUT" + revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424" + version = "v1.2.2" + [[projects]] digest = "1:232ab5b495f4faf58aea3d1c25042d6a557f4267d340b880d196864181a1d339" name = "github.com/stoewer/go-strcase" @@ -839,12 +1197,16 @@ version = "v1.0.2" [[projects]] - digest = "1:79b615b7115a6a52c5eea9ecfc2f69d275aa8fceb387327229b3620a23870e93" + digest = "1:3f3e36213e06d46b50a3a2b4952d2382cda12f7cedeb6f48228a042562433d53" name = "github.com/terraform-providers/terraform-provider-aws" - packages = ["aws"] + packages = [ + "aws", + "aws/internal/flatmap", + "aws/internal/keyvaluetags", + ] pruneopts = "NUT" - revision = "4b894dbf13f642f43dce2bb13bdf70e93e75babe" - version = "v2.10.0" + revision = "907e76d4b11205d817067e581ecaab474fd0aa61" + version = "v2.36.0" [[projects]] digest = "1:95e2fb03b69a29c7f843830c392f684f110da8ca83649c821c20bad61d9116b5" @@ -908,6 +1270,19 @@ revision = "0ce299bbf59ecbfdf628fcb7e63b234d1cd696f1" version = "v2.2.0" +[[projects]] + digest = "1:5f252b001e9573efb54ffae53b96bb5bfa4963afec4f35d607121ebb4a4dff81" + name = "github.com/ulikunitz/xz" + packages = [ + ".", + "internal/hash", + "internal/xlog", + "lzma", + ] + pruneopts = "NUT" + revision = "6f934d456d51e742b4eeab20d925a827ef22320a" + version = "v0.5.6" + [[projects]] branch = "master" digest = "1:c9b456727ce4101594aabddaa816c80464cd21a1e5fcd145297a902303be0085" @@ -916,6 +1291,54 @@ pruneopts = "NUT" revision = "9a301d65acbb728fcc3ace14f45f511a4cfeea9c" +[[projects]] + digest = "1:e3b1a9a30daf6e9088bb2aeda3cc5806b7c361c062b10079d7ed4246e85de144" + name = "github.com/vmihailenco/msgpack" + packages = [ + ".", + "codes", + ] + pruneopts = "NUT" + revision = "cd92a145e6d2ce09e792f838dc0b669b680aea29" + version = "v4.1.1" + +[[projects]] + digest = "1:33664cc0ef6c6539d84f299593b451a43f4ae62d5cf08b998de35f3537069903" + name = "github.com/vmihailenco/tagparser" + packages = [ + ".", + "internal", + "internal/parser", + ] + pruneopts = "NUT" + revision = "e4b6c8cf4f3d7bb572551d5a531a9f2068b2fd6e" + version = "v0.1.1" + +[[projects]] + digest = "1:930282caabe5c7a76835159409d95ebe42bbd25b6bb14e2f31eb57e2c5d703a0" + name = "github.com/zclconf/go-cty" + packages = [ + "cty", + "cty/convert", + "cty/function", + "cty/function/stdlib", + "cty/gocty", + "cty/json", + "cty/msgpack", + "cty/set", + ] + pruneopts = "NUT" + revision = "01e2a242a65eb6f8ee372ddcea68c111fd4da15e" + version = "v1.1.0" + +[[projects]] + digest = "1:5d9f365c4be0f02ad29f31b0cf9269de56586f92c86459c6e4e09a1dc9ad4cc6" + name = "github.com/zclconf/go-cty-yaml" + packages = ["."] + pruneopts = "NUT" + revision = "bc34c981dadb5ed30af852693e3aba8fb6546f42" + version = "v1.0.1" + [[projects]] digest = "1:58f2854b50ff8862eb6a347f20dedaac83e1166f4040472e17bc37736841a12f" name = "go.opencensus.io" @@ -945,9 +1368,18 @@ [[projects]] branch = "master" - digest = "1:994c4915a59f821705d08ea77b117ec7a3e6a46cc867fd194d887500dac1c3c2" + digest = "1:da7df8935ee7d6c4aa73c5d6c1a2108b47b782c7eab6d5f9b96bb64296cd777d" name = "golang.org/x/crypto" packages = [ + "bcrypt", + "blowfish", + "cast5", + "openpgp", + "openpgp/armor", + "openpgp/elgamal", + "openpgp/errors", + "openpgp/packet", + "openpgp/s2k", "pkcs12", "pkcs12/internal/rc2", ] @@ -984,6 +1416,21 @@ pruneopts = "NUT" revision = "953cdadca894cdc07be76fc99f95b40c28f06623" +[[projects]] + digest = "1:1093f2eb4b344996604f7d8b29a16c5b22ab9e1b25652140d3fede39f640d5cd" + name = "golang.org/x/text" + packages = [ + "internal/gen", + "internal/triegen", + "internal/ucd", + "transform", + "unicode/cldr", + "unicode/norm", + ] + pruneopts = "NUT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + [[projects]] digest = "1:43497bc2024018930950306dfb47802bcfd1aeb36890a70580dfa38bfc9214a7" name = "google.golang.org/api" @@ -1037,10 +1484,13 @@ version = "v0.3.2" [[projects]] - digest = "1:a955e7c44c2be14b61aa2ddda744edfdfbc6817e993703a16e303c277ba84449" + digest = "1:faacaa9b32f9d7c38351390ea71b22e2b1661741eb5240a1a85b869e14e99ea2" name = "google.golang.org/appengine" packages = [ ".", + "datastore", + "datastore/internal/cloudkey", + "datastore/internal/cloudpb", "internal", "internal/app_identity", "internal/base", @@ -1076,7 +1526,7 @@ revision = "e7d98fc518a78c9f8b5ee77be7b0b317475d89e1" [[projects]] - digest = "1:5929ef256f4dbc5b216cc0abd39b868526857fffaa1f917301a687c4c80797c2" + digest = "1:cc40cd56e7a875bfb6b755c8c3612c146d7b90b87abd41e42e812f7d4108d723" name = "google.golang.org/grpc" packages = [ ".", @@ -1092,6 +1542,8 @@ "encoding", "encoding/proto", "grpclog", + "health", + "health/grpc_health_v1", "internal", "internal/backoff", "internal/balancerload", @@ -1112,6 +1564,7 @@ "stats", "status", "tap", + "test/bufconn", ] pruneopts = "NUT" revision = "25c4f928eaa6d96443009bd842389fb4fa48664e" diff --git a/pkg/terraform/exec/plugins/Gopkg.toml b/pkg/terraform/exec/plugins/Gopkg.toml index 7b4c65a2d34..e06fbe3c780 100644 --- a/pkg/terraform/exec/plugins/Gopkg.toml +++ b/pkg/terraform/exec/plugins/Gopkg.toml @@ -15,11 +15,11 @@ ignored = [ [[constraint]] name = "github.com/terraform-providers/terraform-provider-aws" - version = "=2.10.0" + version = "=2.36.0" [[override]] name = "github.com/aws/aws-sdk-go" - version = "=1.19.26" + version = "=1.25.35" [[override]] name = "github.com/kubernetes-sigs/aws-iam-authenticator" diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/internal/annotate.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/internal/annotate.go new file mode 100644 index 00000000000..6435695ba34 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/internal/annotate.go @@ -0,0 +1,54 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc/status" +) + +// Annotate prepends msg to the error message in err, attempting +// to preserve other information in err, like an error code. +// +// Annotate panics if err is nil. +// +// Annotate knows about these error types: +// - "google.golang.org/grpc/status".Status +// - "google.golang.org/api/googleapi".Error +// If the error is not one of these types, Annotate behaves +// like +// fmt.Errorf("%s: %v", msg, err) +func Annotate(err error, msg string) error { + if err == nil { + panic("Annotate called with nil") + } + if s, ok := status.FromError(err); ok { + p := s.Proto() + p.Message = msg + ": " + p.Message + return status.ErrorProto(p) + } + if g, ok := err.(*googleapi.Error); ok { + g.Message = msg + ": " + g.Message + return g + } + return fmt.Errorf("%s: %v", msg, err) +} + +// Annotatef uses format and args to format a string, then calls Annotate. +func Annotatef(err error, format string, args ...interface{}) error { + return Annotate(err, fmt.Sprintf(format, args...)) +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/internal/retry.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 00000000000..7a7b4c2052d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,54 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "time" + + gax "github.com/googleapis/gax-go/v2" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with an error that +// includes both ctx.Error() and the last error returned by f. +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if cerr := sleep(ctx, p); cerr != nil { + if lastErr != nil { + return Annotatef(lastErr, "retry failed with %v; last error", cerr) + } + return cerr + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/internal/version/version.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 00000000000..4a2a8c19ff1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20180226" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/acl.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/acl.go new file mode 100644 index 00000000000..7855d110ad4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/acl.go @@ -0,0 +1,335 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "net/http" + "reflect" + + "cloud.google.com/go/internal/trace" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the level of access to grant. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" + RoleWriter ACLRole = "WRITER" +) + +// ACLEntity refers to a user or group. +// They are sometimes referred to as grantees. +// +// It could be in the form of: +// "user-", "user-", "group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents a grant for a role to an entity (user, group or team) for a +// Google Cloud Storage object or bucket. +type ACLRule struct { + Entity ACLEntity + EntityID string + Role ACLRole + Domain string + Email string + ProjectTeam *ProjectTeam +} + +// ProjectTeam is the project team associated with the entity, if any. +type ProjectTeam struct { + ProjectNumber string + Team string +} + +// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +type ACLHandle struct { + c *Client + bucket string + object string + isDefault bool + userProject string // for requester-pays buckets +} + +// Delete permanently deletes the ACL entry for the given entity. +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectDelete(ctx, entity) + } + if a.isDefault { + return a.bucketDefaultDelete(ctx, entity) + } + return a.bucketDelete(ctx, entity) +} + +// Set sets the role for the given entity. +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectSet(ctx, entity, role, false) + } + if a.isDefault { + return a.objectSet(ctx, entity, role, true) + } + return a.bucketSet(ctx, entity, role) +} + +// List retrieves ACL entries. +func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectList(ctx) + } + if a.isDefault { + return a.bucketDefaultList(ctx) + } + return a.bucketList(ctx) +} + +func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) + a.configureCall(ctx, req) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(ctx, req) + return req.Do() + }) +} + +func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.BucketAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.List(a.bucket) + a.configureCall(ctx, req) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toBucketACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) + a.configureCall(ctx, req) + _, err := req.Do() + return err + }) + if err != nil { + return err + } + return nil +} + +func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(ctx, req) + return req.Do() + }) +} + +func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) + a.configureCall(ctx, req) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} + +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + if isBucketDefault { + req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) + } else { + req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) + } + a.configureCall(ctx, req) + return runWithRetry(ctx, func() error { + _, err := req.Do() + return err + }) +} + +func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) + a.configureCall(ctx, req) + return req.Do() + }) +} + +func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { + vc := reflect.ValueOf(call) + vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) + if a.userProject != "" { + vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) + } + setClientHeader(call.Header()) +} + +func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toObjectACLRule(item)) + } + return rs +} + +func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toBucketACLRule(item)) + } + return rs +} + +func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.Entity), + EntityID: a.EntityId, + Role: ACLRole(a.Role), + Domain: a.Domain, + Email: a.Email, + ProjectTeam: toObjectProjectTeam(a.ProjectTeam), + } +} + +func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.Entity), + EntityID: a.EntityId, + Role: ACLRole(a.Role), + Domain: a.Domain, + Email: a.Email, + ProjectTeam: toBucketProjectTeam(a.ProjectTeam), + } +} + +func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*raw.ObjectAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary + } + return r +} + +func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*raw.BucketAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary + } + return r +} + +func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { + return &raw.BucketAccessControl{ + Bucket: bucket, + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl { + return &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.ProjectNumber, + Team: p.Team, + } +} + +func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.ProjectNumber, + Team: p.Team, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/bucket.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 00000000000..bbfc59b2199 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,1186 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "fmt" + "net/http" + "reflect" + "time" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// BucketHandle provides operations on a Google Cloud Storage bucket. +// Use Client.Bucket to get a handle. +type BucketHandle struct { + c *Client + name string + acl ACLHandle + defaultObjectACL ACLHandle + conds *BucketConditions + userProject string // project for Requester Pays buckets +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// The supplied name must contain only lowercase letters, numbers, dashes, +// underscores, and dots. The full specification for valid bucket names can be +// found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (c *Client) Bucket(name string) *BucketHandle { + return &BucketHandle{ + c: c, + name: name, + acl: ACLHandle{ + c: c, + bucket: name, + }, + defaultObjectACL: ACLHandle{ + c: c, + bucket: name, + isDefault: true, + }, + } +} + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = b.name + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if bkt.Location == "" && bkt.Lifecycle != nil { + bkt.Location = "US" + } + req := b.c.raw.Buckets.Insert(projectID, bkt) + setClientHeader(req.Header()) + if attrs != nil && attrs.PredefinedACL != "" { + req.PredefinedAcl(attrs.PredefinedACL) + } + if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) + } + return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newDeleteCall() + if err != nil { + return err + } + return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) +} + +func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { + req := b.c.raw.Buckets.Delete(b.name) + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (b *BucketHandle) ACL() *ACLHandle { + return &b.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (b *BucketHandle) DefaultObjectACL() *ACLHandle { + return &b.defaultObjectACL +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (b *BucketHandle) Object(name string) *ObjectHandle { + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + userProject: b.userProject, + }, + gen: -1, + userProject: b.userProject, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newGetCall() + if err != nil { + return nil, err + } + var resp *raw.Bucket + err = runWithRetry(ctx, func() error { + resp, err = req.Context(ctx).Do() + return err + }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp) +} + +func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { + req := b.c.raw.Buckets.Get(b.name).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// Update updates a bucket's attributes. +func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newPatchCall(&uattrs) + if err != nil { + return nil, err + } + if uattrs.PredefinedACL != "" { + req.PredefinedAcl(uattrs.PredefinedACL) + } + if uattrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) + } + // TODO(jba): retry iff metagen is set? + rb, err := req.Context(ctx).Do() + if err != nil { + return nil, err + } + return newBucket(rb) +} + +func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { + rb := uattrs.toRawBucket() + req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +// Read-only fields are ignored by BucketHandle.Create. +type BucketAttrs struct { + // Name is the name of the bucket. + // This field is read-only. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // BucketPolicyOnly configures access checks to use only bucket-level IAM + // policies. + BucketPolicyOnly BucketPolicyOnly + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // DefaultEventBasedHold is the default value for event-based hold on + // newly created objects in this bucket. It defaults to false. + DefaultEventBasedHold bool + + // If not empty, applies a predefined set of access controls. It should be set + // only when creating a bucket. + // It is always empty for BucketAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert + // for valid values. + PredefinedACL string + + // If not empty, applies a predefined set of default object access controls. + // It should be set only when creating a bucket. + // It is always empty for BucketAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert + // for valid values. + PredefinedDefaultObjectACL string + + // Location is the location of the bucket. It defaults to "US". + Location string + + // MetaGeneration is the metadata generation of the bucket. + // This field is read-only. + MetaGeneration int64 + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "MULTI_REGIONAL", + // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which + // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on + // the bucket's location settings. + StorageClass string + + // Created is the creation time of the bucket. + // This field is read-only. + Created time.Time + + // VersioningEnabled reports whether this bucket has versioning enabled. + VersioningEnabled bool + + // Labels are the bucket's labels. + Labels map[string]string + + // RequesterPays reports whether the bucket is a Requester Pays bucket. + // Clients performing operations on Requester Pays buckets must provide + // a user project (see BucketHandle.UserProject), which will be billed + // for the operations. + RequesterPays bool + + // Lifecycle is the lifecycle configuration for objects in the bucket. + Lifecycle Lifecycle + + // Retention policy enforces a minimum retention time for all objects + // contained in the bucket. A RetentionPolicy of nil implies the bucket + // has no minimum data retention. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // The bucket's Cross-Origin Resource Sharing (CORS) configuration. + CORS []CORS + + // The encryption configuration used by default for newly inserted objects. + Encryption *BucketEncryption + + // The logging configuration. + Logging *BucketLogging + + // The website configuration. + Website *BucketWebsite + + // Etag is the HTTP/1.1 Entity tag for the bucket. + // This field is read-only. + Etag string +} + +// BucketPolicyOnly configures access checks to use only bucket-level IAM +// policies. +type BucketPolicyOnly struct { + // Enabled specifies whether access checks use only bucket-level IAM + // policies. Enabled may be disabled until the locked time. + Enabled bool + // LockedTime specifies the deadline for changing Enabled from true to + // false. + LockedTime time.Time +} + +// Lifecycle is the lifecycle configuration for objects in the bucket. +type Lifecycle struct { + Rules []LifecycleRule +} + +// RetentionPolicy enforces a minimum retention time for all objects +// contained in the bucket. +// +// Any attempt to overwrite or delete objects younger than the retention +// period will result in an error. An unlocked retention policy can be +// modified or removed from the bucket via the Update method. A +// locked retention policy cannot be removed or shortened in duration +// for the lifetime of the bucket. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +type RetentionPolicy struct { + // RetentionPeriod specifies the duration that objects need to be + // retained. Retention duration must be greater than zero and less than + // 100 years. Note that enforcement of retention periods less than a day + // is not guaranteed. Such periods should only be used for testing + // purposes. + RetentionPeriod time.Duration + + // EffectiveTime is the time from which the policy was enforced and + // effective. This field is read-only. + EffectiveTime time.Time + + // IsLocked describes whether the bucket is locked. Once locked, an + // object retention policy cannot be modified. + // This field is read-only. + IsLocked bool +} + +const ( + // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. + rfc3339Date = "2006-01-02" + + // DeleteAction is a lifecycle action that deletes a live and/or archived + // objects. Takes precedence over SetStorageClass actions. + DeleteAction = "Delete" + + // SetStorageClassAction changes the storage class of live and/or archived + // objects. + SetStorageClassAction = "SetStorageClass" +) + +// LifecycleRule is a lifecycle configuration rule. +// +// When all the configured conditions are met by an object in the bucket, the +// configured action will automatically be taken on that object. +type LifecycleRule struct { + // Action is the action to take when all of the associated conditions are + // met. + Action LifecycleAction + + // Condition is the set of conditions that must be met for the associated + // action to be taken. + Condition LifecycleCondition +} + +// LifecycleAction is a lifecycle configuration action. +type LifecycleAction struct { + // Type is the type of action to take on matching objects. + // + // Acceptable values are "Delete" to delete matching objects and + // "SetStorageClass" to set the storage class defined in StorageClass on + // matching objects. + Type string + + // StorageClass is the storage class to set on matching objects if the Action + // is "SetStorageClass". + StorageClass string +} + +// Liveness specifies whether the object is live or not. +type Liveness int + +const ( + // LiveAndArchived includes both live and archived objects. + LiveAndArchived Liveness = iota + // Live specifies that the object is still live. + Live + // Archived specifies that the object is archived. + Archived +) + +// LifecycleCondition is a set of conditions used to match objects and take an +// action automatically. +// +// All configured conditions must be met for the associated action to be taken. +type LifecycleCondition struct { + // AgeInDays is the age of the object in days. + AgeInDays int64 + + // CreatedBefore is the time the object was created. + // + // This condition is satisfied when an object is created before midnight of + // the specified date in UTC. + CreatedBefore time.Time + + // Liveness specifies the object's liveness. Relevant only for versioned objects + Liveness Liveness + + // MatchesStorageClasses is the condition matching the object's storage + // class. + // + // Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", + // "STANDARD", and "DURABLE_REDUCED_AVAILABILITY". + MatchesStorageClasses []string + + // NumNewerVersions is the condition matching objects with a number of newer versions. + // + // If the value is N, this condition is satisfied when there are at least N + // versions (including the live version) newer than this version of the + // object. + NumNewerVersions int64 +} + +// BucketLogging holds the bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // The destination bucket where the current bucket's logs + // should be placed. + LogBucket string + + // A prefix for log object names. + LogObjectPrefix string +} + +// BucketWebsite holds the bucket's website configuration, controlling how the +// service behaves when accessing bucket contents as a web site. See +// https://cloud.google.com/storage/docs/static-website for more information. +type BucketWebsite struct { + // If the requested object path is missing, the service will ensure the path has + // a trailing '/', append this suffix, and attempt to retrieve the resulting + // object. This allows the creation of index.html objects to represent directory + // pages. + MainPageSuffix string + + // If the requested object path is missing, and any mainPageSuffix object is + // missing, if applicable, the service will return the named object from this + // bucket as the content for a 404 Not Found result. + NotFoundPage string +} + +func newBucket(b *raw.Bucket) (*BucketAttrs, error) { + if b == nil { + return nil, nil + } + rp, err := toRetentionPolicy(b.RetentionPolicy) + if err != nil { + return nil, err + } + return &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + DefaultEventBasedHold: b.DefaultEventBasedHold, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + ACL: toBucketACLRules(b.Acl), + DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), + Labels: b.Labels, + RequesterPays: b.Billing != nil && b.Billing.RequesterPays, + Lifecycle: toLifecycle(b.Lifecycle), + RetentionPolicy: rp, + CORS: toCORS(b.Cors), + Encryption: toBucketEncryption(b.Encryption), + Logging: toBucketLogging(b.Logging), + Website: toBucketWebsite(b.Website), + BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), + Etag: b.Etag, + }, nil +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + // Copy label map. + var labels map[string]string + if len(b.Labels) > 0 { + labels = make(map[string]string, len(b.Labels)) + for k, v := range b.Labels { + labels[k] = v + } + } + // Ignore VersioningEnabled if it is false. This is OK because + // we only call this method when creating a bucket, and by default + // new buckets have versioning off. + var v *raw.BucketVersioning + if b.VersioningEnabled { + v = &raw.BucketVersioning{Enabled: true} + } + var bb *raw.BucketBilling + if b.RequesterPays { + bb = &raw.BucketBilling{RequesterPays: true} + } + var bktIAM *raw.BucketIamConfiguration + if b.BucketPolicyOnly.Enabled { + bktIAM = &raw.BucketIamConfiguration{ + BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ + Enabled: true, + }, + } + } + return &raw.Bucket{ + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toRawBucketACL(b.ACL), + DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toRawLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), + Cors: toRawCORS(b.CORS), + Encryption: b.Encryption.toRawBucketEncryption(), + Logging: b.Logging.toRawBucketLogging(), + Website: b.Website.toRawBucketWebsite(), + IamConfiguration: bktIAM, + } +} + +// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration. +type CORS struct { + // MaxAge is the value to return in the Access-Control-Max-Age + // header used in preflight responses. + MaxAge time.Duration + + // Methods is the list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Methods []string + + // Origins is the list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origins []string + + // ResponseHeaders is the list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeaders []string +} + +// BucketEncryption is a bucket's encryption configuration. +type BucketEncryption struct { + // A Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt + // objects inserted into this bucket, if no encryption method is specified. + // The key's location must be the same as the bucket's. + DefaultKMSKeyName string +} + +// BucketAttrsToUpdate define the attributes to update during an Update call. +type BucketAttrsToUpdate struct { + // If set, updates whether the bucket uses versioning. + VersioningEnabled optional.Bool + + // If set, updates whether the bucket is a Requester Pays bucket. + RequesterPays optional.Bool + + // DefaultEventBasedHold is the default value for event-based hold on + // newly created objects in this bucket. + DefaultEventBasedHold optional.Bool + + // BucketPolicyOnly configures access checks to use only bucket-level IAM + // policies. + BucketPolicyOnly *BucketPolicyOnly + + // If set, updates the retention policy of the bucket. Using + // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // If set, replaces the CORS configuration with a new configuration. + // An empty (rather than nil) slice causes all CORS policies to be removed. + CORS []CORS + + // If set, replaces the encryption configuration of the bucket. Using + // BucketEncryption.DefaultKMSKeyName = "" will delete the existing + // configuration. + Encryption *BucketEncryption + + // If set, replaces the lifecycle configuration of the bucket. + Lifecycle *Lifecycle + + // If set, replaces the logging configuration of the bucket. + Logging *BucketLogging + + // If set, replaces the website configuration of the bucket. + Website *BucketWebsite + + // If not empty, applies a predefined set of access controls. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. + PredefinedACL string + + // If not empty, applies a predefined set of default object access controls. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. + PredefinedDefaultObjectACL string + + setLabels map[string]string + deleteLabels map[string]bool +} + +// SetLabel causes a label to be added or modified when ua is used +// in a call to Bucket.Update. +func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { + if ua.setLabels == nil { + ua.setLabels = map[string]string{} + } + ua.setLabels[name] = value +} + +// DeleteLabel causes a label to be deleted when ua is used in a +// call to Bucket.Update. +func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { + if ua.deleteLabels == nil { + ua.deleteLabels = map[string]bool{} + } + ua.deleteLabels[name] = true +} + +func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { + rb := &raw.Bucket{} + if ua.CORS != nil { + rb.Cors = toRawCORS(ua.CORS) + rb.ForceSendFields = append(rb.ForceSendFields, "Cors") + } + if ua.DefaultEventBasedHold != nil { + rb.DefaultEventBasedHold = optional.ToBool(ua.DefaultEventBasedHold) + rb.ForceSendFields = append(rb.ForceSendFields, "DefaultEventBasedHold") + } + if ua.RetentionPolicy != nil { + if ua.RetentionPolicy.RetentionPeriod == 0 { + rb.NullFields = append(rb.NullFields, "RetentionPolicy") + rb.RetentionPolicy = nil + } else { + rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy() + } + } + if ua.VersioningEnabled != nil { + rb.Versioning = &raw.BucketVersioning{ + Enabled: optional.ToBool(ua.VersioningEnabled), + ForceSendFields: []string{"Enabled"}, + } + } + if ua.RequesterPays != nil { + rb.Billing = &raw.BucketBilling{ + RequesterPays: optional.ToBool(ua.RequesterPays), + ForceSendFields: []string{"RequesterPays"}, + } + } + if ua.BucketPolicyOnly != nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{ + BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ + Enabled: ua.BucketPolicyOnly.Enabled, + }, + } + } + if ua.Encryption != nil { + if ua.Encryption.DefaultKMSKeyName == "" { + rb.NullFields = append(rb.NullFields, "Encryption") + rb.Encryption = nil + } else { + rb.Encryption = ua.Encryption.toRawBucketEncryption() + } + } + if ua.Lifecycle != nil { + rb.Lifecycle = toRawLifecycle(*ua.Lifecycle) + } + if ua.Logging != nil { + if *ua.Logging == (BucketLogging{}) { + rb.NullFields = append(rb.NullFields, "Logging") + rb.Logging = nil + } else { + rb.Logging = ua.Logging.toRawBucketLogging() + } + } + if ua.Website != nil { + if *ua.Website == (BucketWebsite{}) { + rb.NullFields = append(rb.NullFields, "Website") + rb.Website = nil + } else { + rb.Website = ua.Website.toRawBucketWebsite() + } + } + if ua.PredefinedACL != "" { + // Clear ACL or the call will fail. + rb.Acl = nil + rb.ForceSendFields = append(rb.ForceSendFields, "Acl") + } + if ua.PredefinedDefaultObjectACL != "" { + // Clear ACLs or the call will fail. + rb.DefaultObjectAcl = nil + rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl") + } + if ua.setLabels != nil || ua.deleteLabels != nil { + rb.Labels = map[string]string{} + for k, v := range ua.setLabels { + rb.Labels[k] = v + } + if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { + rb.ForceSendFields = append(rb.ForceSendFields, "Labels") + } + for l := range ua.deleteLabels { + rb.NullFields = append(rb.NullFields, "Labels."+l) + } + } + return rb +} + +// If returns a new BucketHandle that applies a set of preconditions. +// Preconditions already set on the BucketHandle are ignored. +// Operations on the new handle will return an error if the preconditions are not +// satisfied. The only valid preconditions for buckets are MetagenerationMatch +// and MetagenerationNotMatch. +func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { + b2 := *b + b2.conds = &conds + return &b2 +} + +// BucketConditions constrain bucket methods to act on specific metagenerations. +// +// The zero value is an empty set of constraints. +type BucketConditions struct { + // MetagenerationMatch specifies that the bucket must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the bucket must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *BucketConditions) validate(method string) error { + if *c == (BucketConditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +// UserProject returns a new BucketHandle that passes the project ID as the user +// project for all subsequent calls. Calls with a user project will be billed to that +// project rather than to the bucket's owning project. +// +// A user project is required for all operations on Requester Pays buckets. +func (b *BucketHandle) UserProject(projectID string) *BucketHandle { + b2 := *b + b2.userProject = projectID + b2.acl.userProject = projectID + b2.defaultObjectACL.userProject = projectID + return &b2 +} + +// LockRetentionPolicy locks a bucket's retention policy until a previously-configured +// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less +// than a day, the retention policy is treated as a development configuration and locking +// will have no effect. The BucketHandle must have a metageneration condition that +// matches the bucket's metageneration. See BucketHandle.If. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { + var metageneration int64 + if b.conds != nil { + metageneration = b.conds.MetagenerationMatch + } + req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration) + _, err := req.Context(ctx).Do() + return err +} + +// applyBucketConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + cval := reflect.ValueOf(call) + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { + if rp == nil { + return nil + } + return &raw.BucketRetentionPolicy{ + RetentionPeriod: int64(rp.RetentionPeriod / time.Second), + } +} + +func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { + if rp == nil { + return nil, nil + } + t, err := time.Parse(time.RFC3339, rp.EffectiveTime) + if err != nil { + return nil, err + } + return &RetentionPolicy{ + RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second, + EffectiveTime: t, + IsLocked: rp.IsLocked, + }, nil +} + +func toRawCORS(c []CORS) []*raw.BucketCors { + var out []*raw.BucketCors + for _, v := range c { + out = append(out, &raw.BucketCors{ + MaxAgeSeconds: int64(v.MaxAge / time.Second), + Method: v.Methods, + Origin: v.Origins, + ResponseHeader: v.ResponseHeaders, + }) + } + return out +} + +func toCORS(rc []*raw.BucketCors) []CORS { + var out []CORS + for _, v := range rc { + out = append(out, CORS{ + MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second, + Methods: v.Method, + Origins: v.Origin, + ResponseHeaders: v.ResponseHeader, + }) + } + return out +} + +func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { + var rl raw.BucketLifecycle + if len(l.Rules) == 0 { + return nil + } + for _, r := range l.Rules { + rr := &raw.BucketLifecycleRule{ + Action: &raw.BucketLifecycleRuleAction{ + Type: r.Action.Type, + StorageClass: r.Action.StorageClass, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: r.Condition.AgeInDays, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + NumNewerVersions: r.Condition.NumNewerVersions, + }, + } + + switch r.Condition.Liveness { + case LiveAndArchived: + rr.Condition.IsLive = nil + case Live: + rr.Condition.IsLive = googleapi.Bool(true) + case Archived: + rr.Condition.IsLive = googleapi.Bool(false) + } + + if !r.Condition.CreatedBefore.IsZero() { + rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) + } + rl.Rule = append(rl.Rule, rr) + } + return &rl +} + +func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { + var l Lifecycle + if rl == nil { + return l + } + for _, rr := range rl.Rule { + r := LifecycleRule{ + Action: LifecycleAction{ + Type: rr.Action.Type, + StorageClass: rr.Action.StorageClass, + }, + Condition: LifecycleCondition{ + AgeInDays: rr.Condition.Age, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + NumNewerVersions: rr.Condition.NumNewerVersions, + }, + } + + switch { + case rr.Condition.IsLive == nil: + r.Condition.Liveness = LiveAndArchived + case *rr.Condition.IsLive == true: + r.Condition.Liveness = Live + case *rr.Condition.IsLive == false: + r.Condition.Liveness = Archived + } + + if rr.Condition.CreatedBefore != "" { + r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) + } + l.Rules = append(l.Rules, r) + } + return l +} + +func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption { + if e == nil { + return nil + } + return &raw.BucketEncryption{ + DefaultKmsKeyName: e.DefaultKMSKeyName, + } +} + +func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { + if e == nil { + return nil + } + return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName} +} + +func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { + if b == nil { + return nil + } + return &raw.BucketLogging{ + LogBucket: b.LogBucket, + LogObjectPrefix: b.LogObjectPrefix, + } +} + +func toBucketLogging(b *raw.BucketLogging) *BucketLogging { + if b == nil { + return nil + } + return &BucketLogging{ + LogBucket: b.LogBucket, + LogObjectPrefix: b.LogObjectPrefix, + } +} + +func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { + if w == nil { + return nil + } + return &raw.BucketWebsite{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + +func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { + if w == nil { + return nil + } + return &BucketWebsite{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + +func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { + if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled { + return BucketPolicyOnly{} + } + lt, err := time.Parse(time.RFC3339, b.BucketPolicyOnly.LockedTime) + if err != nil { + return BucketPolicyOnly{ + Enabled: true, + } + } + return BucketPolicyOnly{ + Enabled: true, + LockedTime: lt, + } +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + it := &ObjectIterator{ + ctx: ctx, + bucket: b, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + if q != nil { + it.query = *q + } + return it +} + +// An ObjectIterator is an iterator over ObjectAttrs. +type ObjectIterator struct { + ctx context.Context + bucket *BucketHandle + query Query + pageInfo *iterator.PageInfo + nextFunc func() error + items []*ObjectAttrs +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.bucket.c.raw.Objects.List(it.bucket.name) + setClientHeader(req.Header()) + req.Projection("full") + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.Versions(it.query.Versions) + req.PageToken(pageToken) + if it.bucket.userProject != "" { + req.UserProject(it.bucket.userProject) + } + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil +} + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + it := &BucketIterator{ + ctx: ctx, + client: c, + projectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it +} + +// A BucketIterator is an iterator over BucketAttrs. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + client *Client + projectID string + buckets []*BucketAttrs + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) { + req := it.client.raw.Buckets.List(it.projectID) + setClientHeader(req.Header()) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + return "", err + } + for _, item := range resp.Items { + b, err := newBucket(item) + if err != nil { + return "", err + } + it.buckets = append(it.buckets, b) + } + return resp.NextPageToken, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/copy.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/copy.go new file mode 100644 index 00000000000..52162e72d10 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/copy.go @@ -0,0 +1,228 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" +) + +// CopierFrom creates a Copier that can copy src to dst. +// You can immediately call Run on the returned Copier, or +// you can configure it first. +// +// For Requester Pays buckets, the user project of dst is billed, unless it is empty, +// in which case the user project of src is billed. +func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { + return &Copier{dst: dst, src: src} +} + +// A Copier copies a source object to a destination. +type Copier struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Copier. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + // RewriteToken can be set before calling Run to resume a copy + // operation. After Run returns a non-nil error, RewriteToken will + // have been updated to contain the value needed to resume the copy. + RewriteToken string + + // ProgressFunc can be used to monitor the progress of a multi-RPC copy + // operation. If ProgressFunc is not nil and copying requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then + // ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far and the total size in bytes of the source object. + // + // ProgressFunc is intended to make upload progress available to the + // application. For example, the implementation of ProgressFunc may update + // a progress bar in the application's UI, or log the result of + // float64(copiedBytes)/float64(totalBytes). + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(copiedBytes, totalBytes uint64) + + // The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K, + // that will be used to encrypt the object. Overrides the object's KMSKeyName, if + // any. + // + // Providing both a DestinationKMSKeyName and a customer-supplied encryption key + // (via ObjectHandle.Key) on the destination object will result in an error when + // Run is called. + DestinationKMSKeyName string + + dst, src *ObjectHandle +} + +// Run performs the copy. +func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.src.validate(); err != nil { + return nil, err + } + if err := c.dst.validate(); err != nil { + return nil, err + } + if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { + return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") + } + // Convert destination attributes to raw form, omitting the bucket. + // If the bucket is included but name or content-type aren't, the service + // returns a 400 with "Required" as the only message. Omitting the bucket + // does not cause any problems. + rawObject := c.ObjectAttrs.toRawObject("") + for { + res, err := c.callRewrite(ctx, rawObject) + if err != nil { + return nil, err + } + if c.ProgressFunc != nil { + c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) + } + if res.Done { // Finished successfully. + return newObject(res.Resource), nil + } + } +} + +func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { + call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) + + call.Context(ctx).Projection("full") + if c.RewriteToken != "" { + call.RewriteToken(c.RewriteToken) + } + if c.DestinationKMSKeyName != "" { + call.DestinationKmsKeyName(c.DestinationKMSKeyName) + } + if c.PredefinedACL != "" { + call.DestinationPredefinedAcl(c.PredefinedACL) + } + if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } else if c.src.userProject != "" { + call.UserProject(c.src.userProject) + } + if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { + return nil, err + } + var res *raw.RewriteResponse + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) + if err != nil { + return nil, err + } + c.RewriteToken = res.RewriteToken + return res, nil +} + +// ComposerFrom creates a Composer that can compose srcs into dst. +// You can immediately call Run on the returned Composer, or you can +// configure it first. +// +// The encryption key for the destination object will be used to decrypt all +// source objects and encrypt the destination object. It is an error +// to specify an encryption key for any of the source objects. +func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { + return &Composer{dst: dst, srcs: srcs} +} + +// A Composer composes source objects into a destination object. +// +// For Requester Pays buckets, the user project of dst is billed. +type Composer struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Composer. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + dst *ObjectHandle + srcs []*ObjectHandle +} + +// Run performs the compose operation. +func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.dst.validate(); err != nil { + return nil, err + } + if len(c.srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + req := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + for _, src := range c.srcs { + if err := src.validate(); err != nil { + return nil, err + } + if src.bucket != c.dst.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) + } + if src.encryptionKey != nil { + return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) + } + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.object, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + req.SourceObjects = append(req.SourceObjects, srcObj) + } + + call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) + if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } + if c.PredefinedACL != "" { + call.DestinationPredefinedAcl(c.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if err != nil { + return nil, err + } + return newObject(obj), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/doc.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/doc.go new file mode 100644 index 00000000000..88f6459046c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/doc.go @@ -0,0 +1,176 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package storage provides an easy way to work with Google Cloud Storage. +Google Cloud Storage stores data in named objects, which are grouped into buckets. + +More information about Google Cloud Storage is available at +https://cloud.google.com/storage/docs. + +See https://godoc.org/cloud.google.com/go for authentication, timeouts, +connection pooling and similar aspects of this package. + +All of the methods of this package use exponential backoff to retry calls that fail +with certain errors, as described in +https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues +indefinitely unless the controlling context is canceled or the client is closed. See +context.WithTimeout and context.WithCancel. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + +The client will use your default application credentials. + +If you only wish to access public data, you can create +an unauthenticated client with + + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + +Buckets + +A Google Cloud Storage bucket is a collection of objects. To work with a +bucket, make a bucket handle: + + bkt := client.Bucket(bucketName) + +A handle is a reference to a bucket. You can have a handle even if the +bucket doesn't exist yet. To create a bucket in Google Cloud Storage, +call Create on the handle: + + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Note that although buckets are associated with projects, bucket names are +global across all projects. + +Each bucket has associated metadata, represented in this package by +BucketAttrs. The third argument to BucketHandle.Create allows you to set +the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use +Attrs: + + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + +Objects + +An object holds arbitrary data as a sequence of bytes, like a file. You +refer to objects using a handle, just as with buckets, but unlike buckets +you don't explicitly create an object. Instead, the first time you write +to an object it will be created. You can use the standard Go io.Reader +and io.Writer interfaces to read and write object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will either create the object or overwrite whatever is there already. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with Attrs: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +ACLs + +Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of +ACLRules, each of which specifies the role of a user, group or project. ACLs +are suitable for fine-grained control, but you may prefer using IAM to control +access at the project level (see +https://cloud.google.com/storage/docs/access-control/iam). + +To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: + + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } + +You can also set and delete ACLs. + +Conditions + +Every object has a generation and a metageneration. The generation changes +whenever the content changes, and the metageneration changes whenever the +metadata changes. Conditions let you check these values before an operation; +the operation only executes if the conditions match. You can use conditions to +prevent race conditions in read-modify-write operations. + +For example, say you've read an object's metadata into objAttrs. Now +you want to write to that object, but only if its contents haven't changed +since you read it. Here is how to express that: + + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. + +Signed URLs + +You can obtain a URL that lets anyone read or write an object for a limited time. +You don't need to create a client to do this. See the documentation of +SignedURL for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +Errors + +Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). +These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example: + + if e, ok := err.(*googleapi.Error); ok { + if e.Code == 409 { ... } + } +*/ +package storage // import "cloud.google.com/go/storage" diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/go110.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/go110.go new file mode 100644 index 00000000000..206813f0cea --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/go110.go @@ -0,0 +1,32 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.10 + +package storage + +import "google.golang.org/api/googleapi" + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case interface{ Temporary() bool }: + return e.Temporary() + default: + return false + } +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/iam.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/iam.go new file mode 100644 index 00000000000..9d936067129 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/iam.go @@ -0,0 +1,130 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// IAM provides access to IAM access control for the bucket. +func (b *BucketHandle) IAM() *iam.Handle { + return iam.InternalNewHandleClient(&iamClient{ + raw: b.c.raw, + userProject: b.userProject, + }, b.name) +} + +// iamClient implements the iam.client interface. +type iamClient struct { + raw *raw.Service + userProject string +} + +func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") + defer func() { trace.EndSpan(ctx, err) }() + + call := c.raw.Buckets.GetIamPolicy(resource) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var rp *raw.Policy + err = runWithRetry(ctx, func() error { + rp, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return iamFromStoragePolicy(rp), nil +} + +func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") + defer func() { trace.EndSpan(ctx, err) }() + + rp := iamToStoragePolicy(p) + call := c.raw.Buckets.SetIamPolicy(resource, rp) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + return runWithRetry(ctx, func() error { + _, err := call.Context(ctx).Do() + return err + }) +} + +func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") + defer func() { trace.EndSpan(ctx, err) }() + + call := c.raw.Buckets.TestIamPermissions(resource, perms) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var res *raw.TestIamPermissionsResponse + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { + return &raw.Policy{ + Bindings: iamToStorageBindings(ip.Bindings), + Etag: string(ip.Etag), + } +} + +func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { + var rbs []*raw.PolicyBindings + for _, ib := range ibs { + rbs = append(rbs, &raw.PolicyBindings{ + Role: ib.Role, + Members: ib.Members, + }) + } + return rbs +} + +func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { + return &iampb.Policy{ + Bindings: iamFromStorageBindings(rp.Bindings), + Etag: []byte(rp.Etag), + } +} + +func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { + var ibs []*iampb.Binding + for _, rb := range rbs { + ibs = append(ibs, &iampb.Binding{ + Role: rb.Role, + Members: rb.Members, + }) + } + return ibs +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/invoke.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/invoke.go new file mode 100644 index 00000000000..e755f197de8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/invoke.go @@ -0,0 +1,37 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go/v2" +) + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +func runWithRetry(ctx context.Context, call func() error) error { + return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + if shouldRetry(err) { + return false, nil + } + return true, err + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/not_go110.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/not_go110.go new file mode 100644 index 00000000000..66fa45bea2f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/not_go110.go @@ -0,0 +1,42 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.10 + +package storage + +import ( + "net/url" + "strings" + + "google.golang.org/api/googleapi" +) + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry on REFUSED_STREAM. + // Unfortunately the error type is unexported, so we resort to string + // matching. + return strings.Contains(e.Error(), "REFUSED_STREAM") + case interface{ Temporary() bool }: + return e.Temporary() + default: + return false + } +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/notifications.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/notifications.go new file mode 100644 index 00000000000..84619b6d58c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/notifications.go @@ -0,0 +1,188 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "regexp" + + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" +) + +// A Notification describes how to send Cloud PubSub messages when certain +// events occur in a bucket. +type Notification struct { + //The ID of the notification. + ID string + + // The ID of the topic to which this subscription publishes. + TopicID string + + // The ID of the project to which the topic belongs. + TopicProjectID string + + // Only send notifications about listed event types. If empty, send notifications + // for all event types. + // See https://cloud.google.com/storage/docs/pubsub-notifications#events. + EventTypes []string + + // If present, only apply this notification configuration to object names that + // begin with this prefix. + ObjectNamePrefix string + + // An optional list of additional attributes to attach to each Cloud PubSub + // message published for this notification subscription. + CustomAttributes map[string]string + + // The contents of the message payload. + // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. + PayloadFormat string +} + +// Values for Notification.PayloadFormat. +const ( + // Send no payload with notification messages. + NoPayload = "NONE" + + // Send object metadata as JSON with notification messages. + JSONPayload = "JSON_API_V1" +) + +// Values for Notification.EventTypes. +const ( + // Event that occurs when an object is successfully created. + ObjectFinalizeEvent = "OBJECT_FINALIZE" + + // Event that occurs when the metadata of an existing object changes. + ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" + + // Event that occurs when an object is permanently deleted. + ObjectDeleteEvent = "OBJECT_DELETE" + + // Event that occurs when the live version of an object becomes an + // archived version. + ObjectArchiveEvent = "OBJECT_ARCHIVE" +) + +func toNotification(rn *raw.Notification) *Notification { + n := &Notification{ + ID: rn.Id, + EventTypes: rn.EventTypes, + ObjectNamePrefix: rn.ObjectNamePrefix, + CustomAttributes: rn.CustomAttributes, + PayloadFormat: rn.PayloadFormat, + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) + return n +} + +var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") + +// parseNotificationTopic extracts the project and topic IDs from from the full +// resource name returned by the service. If the name is malformed, it returns +// "?" for both IDs. +func parseNotificationTopic(nt string) (projectID, topicID string) { + matches := topicRE.FindStringSubmatch(nt) + if matches == nil { + return "?", "?" + } + return matches[1], matches[2] +} + +func toRawNotification(n *Notification) *raw.Notification { + return &raw.Notification{ + Id: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: string(n.PayloadFormat), + } +} + +// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID +// and PayloadFormat, and must not set its ID. The other fields are all optional. The +// returned Notification's ID can be used to refer to it. +func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") + defer func() { trace.EndSpan(ctx, err) }() + + if n.ID != "" { + return nil, errors.New("storage: AddNotification: ID must not be set") + } + if n.TopicProjectID == "" { + return nil, errors.New("storage: AddNotification: missing TopicProjectID") + } + if n.TopicID == "" { + return nil, errors.New("storage: AddNotification: missing TopicID") + } + call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + rn, err := call.Context(ctx).Do() + if err != nil { + return nil, err + } + return toNotification(rn), nil +} + +// Notifications returns all the Notifications configured for this bucket, as a map +// indexed by notification ID. +func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") + defer func() { trace.EndSpan(ctx, err) }() + + call := b.c.raw.Notifications.List(b.name) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + var res *raw.Notifications + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil +} + +func notificationsToMap(rns []*raw.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, rn := range rns { + m[rn.Id] = toNotification(rn) + } + return m +} + +// DeleteNotification deletes the notification with the given ID. +func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + call := b.c.raw.Notifications.Delete(b.name, id) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + return call.Context(ctx).Do() +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/reader.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/reader.go new file mode 100644 index 00000000000..50f381f91e5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/reader.go @@ -0,0 +1,385 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "cloud.google.com/go/internal/trace" + "google.golang.org/api/googleapi" +) + +var crc32cTable = crc32.MakeTable(crc32.Castagnoli) + +// ReaderObjectAttrs are attributes about the object being read. These are populated +// during the New call. This struct only holds a subset of object attributes: to +// get the full set of attributes, use ObjectHandle.Attrs. +// +// Each field is read-only. +type ReaderObjectAttrs struct { + // Size is the length of the object's content. + Size int64 + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // CacheControl specifies whether and for how long browser and Internet + // caches are allowed to cache your objects. + CacheControl string + + // LastModified is the time that the object was last modified. + LastModified time.Time + + // Generation is the generation number of the object's content. + Generation int64 + + // Metageneration is the version of the metadata for this object at + // this generation. This field is used for preconditions and for + // detecting changes in metadata. A metageneration number is only + // meaningful in the context of a particular generation of a + // particular object. + Metageneration int64 +} + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + if offset < 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) + } + if o.conds != nil { + if err := o.conds.validate("NewRangeReader"); err != nil { + return nil, err + } + } + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), + } + verb := "GET" + if length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if o.userProject != "" { + req.Header.Set("X-Goog-User-Project", o.userProject) + } + if o.readCompressed { + req.Header.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { + return nil, err + } + + gen := o.gen + + // Define a function that initiates a Read with offset and length, assuming we + // have already read seen bytes. + reopen := func(seen int64) (*http.Response, error) { + start := offset + seen + if length < 0 && start > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if length > 0 { + // The end character isn't affected by how many bytes we've seen. + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1)) + } + // We wait to assign conditions here because the generation number can change in between reopen() runs. + req.URL.RawQuery = conditionsQuery(gen, o.conds) + var res *http.Response + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + // If a generation hasn't been specified, and this is the first response we get, let's record the + // generation. In future requests we'll use this generation as a precondition to avoid data races. + if gen < 0 && res.Header.Get("X-Goog-Generation") != "" { + gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) + if err != nil { + return err + } + gen = gen64 + } + return nil + }) + if err != nil { + return nil, err + } + return res, nil + } + + res, err := reopen(0) + if err != nil { + return nil, err + } + var ( + size int64 // total size of object, even if a range was requested. + checkCRC bool + crc uint32 + ) + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + } else { + size = res.ContentLength + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + if length != 0 && !res.Uncompressed && !uncompressedByServer(res) { + crc, checkCRC = parseCRC32c(res) + } + } + + remain := res.ContentLength + body := res.Body + if length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var metaGen int64 + if res.Header.Get("X-Goog-Generation") != "" { + metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) + if err != nil { + return nil, err + } + } + + var lm time.Time + if res.Header.Get("Last-Modified") != "" { + lm, err = http.ParseTime(res.Header.Get("Last-Modified")) + if err != nil { + return nil, err + } + } + + attrs := ReaderObjectAttrs{ + Size: size, + ContentType: res.Header.Get("Content-Type"), + ContentEncoding: res.Header.Get("Content-Encoding"), + CacheControl: res.Header.Get("Cache-Control"), + LastModified: lm, + Generation: gen, + Metageneration: metaGen, + } + return &Reader{ + Attrs: attrs, + body: body, + size: size, + remain: remain, + wantCRC: crc, + checkCRC: checkCRC, + reopen: reopen, + }, nil +} + +func uncompressedByServer(res *http.Response) bool { + // If the data is stored as gzip but is not encoded as gzip, then it + // was uncompressed by the server. + return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" && + res.Header.Get("Content-Encoding") != "gzip" +} + +func parseCRC32c(res *http.Response) (uint32, bool) { + const prefix = "crc32c=" + for _, spec := range res.Header["X-Goog-Hash"] { + if strings.HasPrefix(spec, prefix) { + c, err := decodeUint32(spec[len(prefix):]) + if err == nil { + return c, true + } + } + } + return 0, false +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// Reader reads a Cloud Storage object. +// It implements io.Reader. +// +// Typically, a Reader computes the CRC of the downloaded content and compares it to +// the stored CRC, returning an error from Read if there is a mismatch. This integrity check +// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. +type Reader struct { + Attrs ReaderObjectAttrs + body io.ReadCloser + seen, remain, size int64 + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc + reopen func(seen int64) (*http.Response, error) +} + +// Close closes the Reader. It must be called when done reading. +func (r *Reader) Close() error { + return r.body.Close() +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.readWithRetry(p) + if r.remain != -1 { + r.remain -= int64(n) + } + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if err == io.EOF { + if r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } + } + return n, err +} + +func (r *Reader) readWithRetry(p []byte) (int, error) { + n := 0 + for len(p[n:]) > 0 { + m, err := r.body.Read(p[n:]) + n += m + r.seen += int64(m) + if !shouldRetryRead(err) { + return n, err + } + // Read failed, but we will try again. Send a ranged read request that takes + // into account the number of bytes we've already seen. + res, err := r.reopen(r.seen) + if err != nil { + // reopen already retries + return n, err + } + r.body.Close() + r.body = res.Body + } + return n, nil +} + +func shouldRetryRead(err error) bool { + if err == nil { + return false + } + return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2") +} + +// Size returns the size of the object in bytes. +// The returned value is always the same and is not affected by +// calls to Read or Close. +// +// Deprecated: use Reader.Attrs.Size. +func (r *Reader) Size() int64 { + return r.Attrs.Size +} + +// Remain returns the number of bytes left to read, or -1 if unknown. +func (r *Reader) Remain() int64 { + return r.remain +} + +// ContentType returns the content type of the object. +// +// Deprecated: use Reader.Attrs.ContentType. +func (r *Reader) ContentType() string { + return r.Attrs.ContentType +} + +// ContentEncoding returns the content encoding of the object. +// +// Deprecated: use Reader.Attrs.ContentEncoding. +func (r *Reader) ContentEncoding() string { + return r.Attrs.ContentEncoding +} + +// CacheControl returns the cache control of the object. +// +// Deprecated: use Reader.Attrs.CacheControl. +func (r *Reader) CacheControl() string { + return r.Attrs.CacheControl +} + +// LastModified returns the value of the Last-Modified header. +// +// Deprecated: use Reader.Attrs.LastModified. +func (r *Reader) LastModified() (time.Time, error) { + return r.Attrs.LastModified, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/storage.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/storage.go new file mode 100644 index 00000000000..c5c5c59ba9f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/storage.go @@ -0,0 +1,1351 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "cloud.google.com/go/internal/version" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + raw "google.golang.org/api/storage/v1" + htransport "google.golang.org/api/transport/http" +) + +var ( + // ErrBucketNotExist indicates that the bucket does not exist. + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + // ErrObjectNotExist indicates that the object does not exist. + ErrObjectNotExist = errors.New("storage: object doesn't exist") +) + +const userAgent = "gcloud-golang-storage/20151204" + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + +func setClientHeader(headers http.Header) { + headers.Set("x-goog-api-client", xGoogHeader) +} + +// Client is a client for interacting with Google Cloud Storage. +// +// Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +type Client struct { + hc *http.Client + raw *raw.Service +} + +// NewClient creates a new Google Cloud Storage client. +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), + } + opts = append(o, opts...) + hc, ep, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + rawService, err := raw.New(hc) + if err != nil { + return nil, fmt.Errorf("storage client: %v", err) + } + if ep != "" { + rawService.BasePath = ep + } + return &Client{ + hc: hc, + raw: rawService, + }, nil +} + +// Close closes the Client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + // Set fields to nil so that subsequent uses will panic. + c.hc = nil + c.raw = nil + return nil +} + +// SigningScheme determines the API version to use when signing URLs. +type SigningScheme int + +const ( + // SigningSchemeDefault is presently V2 and will change to V4 in the future. + SigningSchemeDefault SigningScheme = iota + + // SigningSchemeV2 uses the V2 scheme to sign URLs. + SigningSchemeV2 + + // SigningSchemeV4 uses the V4 scheme to sign URLs. + SigningSchemeV4 +) + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. For example, if + // your application is running on Google App Engine, you can use + // appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. For SigningSchemeV4, the expiration may be no + // more than seven days in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extension headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 string + + // Scheme determines the version of URL signing to use. Default is + // SigningSchemeV2. + Scheme SigningScheme +} + +var ( + tabRegex = regexp.MustCompile(`[\t]+`) + // I was tempted to call this spacex. :) + spaceRegex = regexp.MustCompile(` +`) + + canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`) + excludedCanonicalHeaders = map[string]bool{ + "x-goog-encryption-key": true, + "x-goog-encryption-key-sha256": true, + } +) + +// v2SanitizeHeaders applies the specifications for canonical extension headers at +// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +func v2SanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + var header, value string + // Only keep canonical headers, discard any others. + headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader) + if len(headerMatches) == 0 { + continue + } + header = headerMatches[1] + value = headerMatches[2] + + header = strings.ToLower(strings.TrimSpace(header)) + value = strings.TrimSpace(value) + + if excludedCanonicalHeaders[header] { + // Do not keep any deliberately excluded canonical headers when signing. + continue + } + + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[header] = append(headerMap[header], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the header name + // from the header value or around the values themselves. The values + // should be separated by commas. + // + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases anyway and we + // should assume that there will be no canonical headers using empty + // values. Any such headers are discarded at the regexp stage above. + sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) + } + sort.Strings(sanitizedHeaders) + return sanitizedHeaders +} + +// v4SanitizeHeaders applies the specifications for canonical extension headers +// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// +// V4 does a couple things differently from V2: +// - Headers get sorted by key, instead of by key:value. We do this in +// signedURLV4. +// - There's no canonical regexp: we simply split headers on :. +// - We don't exclude canonical headers. +// - We replace leading and trailing spaces in header values, like v2, but also +// all intermediate space duplicates get stripped. That is, there's only ever +// a single consecutive space. +func v4SanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + var key, value string + headerMatches := strings.Split(sanitizedHeader, ":") + if len(headerMatches) < 2 { + continue + } + + key = headerMatches[0] + value = headerMatches[1] + + key = strings.ToLower(strings.TrimSpace(key)) + value = strings.TrimSpace(value) + value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" "))) + value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t"))) + + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[key] = append(headerMap[key], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the header name + // from the header value or around the values themselves. The values + // should be separated by commas. + // + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases anyway and we + // should assume that there will be no canonical headers using empty + // values. Any such headers are discarded at the regexp stage above. + sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) + } + return sanitizedHeaders +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + now := utcNow() + if err := validateOptions(opts, now); err != nil { + return "", err + } + + switch opts.Scheme { + case SigningSchemeV2: + opts.Headers = v2SanitizeHeaders(opts.Headers) + return signedURLV2(bucket, name, opts) + case SigningSchemeV4: + opts.Headers = v4SanitizeHeaders(opts.Headers) + return signedURLV4(bucket, name, opts, now) + default: // SigningSchemeDefault + opts.Headers = v2SanitizeHeaders(opts.Headers) + return signedURLV2(bucket, name, opts) + } +} + +func validateOptions(opts *SignedURLOptions, now time.Time) error { + if opts == nil { + return errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" { + return errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Method == "" { + return errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return errors.New("storage: missing required expires option") + } + if opts.MD5 != "" { + md5, err := base64.StdEncoding.DecodeString(opts.MD5) + if err != nil || len(md5) != 16 { + return errors.New("storage: invalid MD5 checksum") + } + } + if opts.Scheme == SigningSchemeV4 { + cutoff := now.Add(604801 * time.Second) // 7 days + 1 second + if !opts.Expires.Before(cutoff) { + return errors.New("storage: expires must be within seven days from now") + } + } + return nil +} + +const ( + iso8601 = "20060102T150405Z" + yearMonthDay = "20060102" +) + +// utcNow returns the current time in UTC and is a variable to allow for +// reassignment in tests to provide deterministic signed URL values. +var utcNow = func() time.Time { + return time.Now().UTC() +} + +// extractHeaderNames takes in a series of key:value headers and returns the +// header names only. +func extractHeaderNames(kvs []string) []string { + var res []string + for _, header := range kvs { + nameValue := strings.Split(header, ":") + res = append(res, nameValue[0]) + } + return res +} + +// signedURLV4 creates a signed URL using the sigV4 algorithm. +func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) { + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + u := &url.URL{Path: bucket} + if name != "" { + u.Path += "/" + name + } + + // Note: we have to add a / here because GCS does so auto-magically, despite + // Go's EscapedPath not doing so (and we have to exactly match their + // canonical query). + fmt.Fprintf(buf, "/%s\n", u.EscapedPath()) + + headerNames := append(extractHeaderNames(opts.Headers), "host") + if opts.ContentType != "" { + headerNames = append(headerNames, "content-type") + } + if opts.MD5 != "" { + headerNames = append(headerNames, "content-md5") + } + sort.Strings(headerNames) + signedHeaders := strings.Join(headerNames, ";") + timestamp := now.Format(iso8601) + credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay)) + canonicalQueryString := url.Values{ + "X-Goog-Algorithm": {"GOOG4-RSA-SHA256"}, + "X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)}, + "X-Goog-Date": {timestamp}, + "X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))}, + "X-Goog-SignedHeaders": {signedHeaders}, + } + fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode()) + + u.Host = "storage.googleapis.com" + + var headersWithValue []string + headersWithValue = append(headersWithValue, "host:"+u.Host) + headersWithValue = append(headersWithValue, opts.Headers...) + if opts.ContentType != "" { + headersWithValue = append(headersWithValue, "content-type:"+strings.TrimSpace(opts.ContentType)) + } + if opts.MD5 != "" { + headersWithValue = append(headersWithValue, "content-md5:"+strings.TrimSpace(opts.MD5)) + } + canonicalHeaders := strings.Join(sortHeadersByKey(headersWithValue), "\n") + fmt.Fprintf(buf, "%s\n\n", canonicalHeaders) + fmt.Fprintf(buf, "%s\n", signedHeaders) + fmt.Fprint(buf, "UNSIGNED-PAYLOAD") + + sum := sha256.Sum256(buf.Bytes()) + hexDigest := hex.EncodeToString(sum[:]) + signBuf := &bytes.Buffer{} + fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n") + fmt.Fprintf(signBuf, "%s\n", timestamp) + fmt.Fprintf(signBuf, "%s\n", credentialScope) + fmt.Fprintf(signBuf, "%s", hexDigest) + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + b, err := signBytes(signBuf.Bytes()) + if err != nil { + return "", err + } + signature := hex.EncodeToString(b) + canonicalQueryString.Set("X-Goog-Signature", string(signature)) + u.Scheme = "https" + u.RawQuery = canonicalQueryString.Encode() + return u.String(), nil +} + +// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header +// key. +func sortHeadersByKey(hdrs []string) []string { + headersMap := map[string]string{} + var headersKeys []string + for _, h := range hdrs { + parts := strings.Split(h, ":") + k := parts[0] + v := parts[1] + headersMap[k] = v + headersKeys = append(headersKeys, k) + } + sort.Strings(headersKeys) + var sorted []string + for _, k := range headersKeys { + v := headersMap[k] + sorted = append(sorted, fmt.Sprintf("%s:%s", k, v)) + } + return sorted +} + +func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + + u := &url.URL{ + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + if len(opts.Headers) > 0 { + fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) + } + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u.Scheme = "https" + u.Host = "storage.googleapis.com" + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. +// Use BucketHandle.Object to get a handle. +type ObjectHandle struct { + c *Client + bucket string + object string + acl ACLHandle + gen int64 // a negative value indicates latest + conds *Conditions + encryptionKey []byte // AES-256 key + userProject string // for requester-pays buckets + readCompressed bool // Accept-Encoding: gzip +} + +// ACL provides access to the object's access control list. +// This controls who can read and write this object. +// This call does not perform any network operations. +func (o *ObjectHandle) ACL() *ACLHandle { + return &o.acl +} + +// Generation returns a new ObjectHandle that operates on a specific generation +// of the object. +// By default, the handle operates on the latest generation. Not +// all operations work when given a specific generation; check the API +// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. +func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { + o2 := *o + o2.gen = gen + return &o2 +} + +// If returns a new ObjectHandle that applies a set of preconditions. +// Preconditions already set on the ObjectHandle are ignored. +// Operations on the new handle will return an error if the preconditions are not +// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions +// for more details. +func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { + o2 := *o + o2.conds = &conds + return &o2 +} + +// Key returns a new ObjectHandle that uses the supplied encryption +// key to encrypt and decrypt the object's contents. +// +// Encryption key must be a 32-byte AES-256 key. +// See https://cloud.google.com/storage/docs/encryption for details. +func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { + o2 := *o + o2.encryptionKey = encryptionKey + return &o2 +} + +// Attrs returns meta information about the object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Update updates an object with the provided attributes. +// All zero-value attributes are ignored. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + // For ContentType, sending the empty string is a no-op. + // Instead we send a null. + if attrs.ContentType == "" { + nullFields = append(nullFields, "ContentType") + } else { + forceSendFields = append(forceSendFields, "ContentType") + } + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage it's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentEncoding") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.EventBasedHold != nil { + attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) + forceSendFields = append(forceSendFields, "EventBasedHold") + } + if uattrs.TemporaryHold != nil { + attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + forceSendFields = append(forceSendFields, "TemporaryHold") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(o.bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if uattrs.PredefinedACL != "" { + call.PredefinedAcl(uattrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// BucketName returns the name of the bucket. +func (o *ObjectHandle) BucketName() string { + return o.bucket +} + +// ObjectName returns the name of the object. +func (o *ObjectHandle) ObjectName() string { + return o.object +} + +// ObjectAttrsToUpdate is used to update the attributes of an object. +// Only fields set to non-nil values will be updated. +// Set a field to its zero value to delete it. +// +// For example, to change ContentType and delete ContentEncoding and +// Metadata, use +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } +type ObjectAttrsToUpdate struct { + EventBasedHold optional.Bool + TemporaryHold optional.Bool + ContentType optional.String + ContentLanguage optional.String + ContentEncoding optional.String + ContentDisposition optional.String + CacheControl optional.String + Metadata map[string]string // set to map[string]string{} to delete + ACL []ACLRule + + // If not empty, applies a predefined set of access controls. ACL must be nil. + // See https://cloud.google.com/storage/docs/json_api/v1/objects/patch. + PredefinedACL string +} + +// Delete deletes the single specified object. +func (o *ObjectHandle) Delete(ctx context.Context) error { + if err := o.validate(); err != nil { + return err + } + call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) + if err := applyConds("Delete", o.gen, o.conds, call); err != nil { + return err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + // Encryption doesn't apply to Delete. + setClientHeader(call.Header()) + err := runWithRetry(ctx, func() error { return call.Do() }) + switch e := err.(type) { + case nil: + return nil + case *googleapi.Error: + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err +} + +// ReadCompressed when true causes the read to happen without decompressing. +func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { + o2 := *o + o2.readCompressed = compressed + return &o2 +} + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created unless an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// It is the caller's responsibility to call Close when writing is done. To +// stop writing without saving the data, cancel the context. +func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + return &Writer{ + ctx: ctx, + o: o, + donec: make(chan struct{}), + ObjectAttrs: ObjectAttrs{Name: o.object}, + ChunkSize: googleapi.DefaultUploadChunkSize, + } +} + +func (o *ObjectHandle) validate() error { + if o.bucket == "" { + return errors.New("storage: bucket name is empty") + } + if o.object == "" { + return errors.New("storage: object name is empty") + } + if !utf8.ValidString(o.object) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + return nil +} + +// parseKey converts the binary contents of a private key file to an +// *rsa.PrivateKey. It detects whether the private key is in a PEM container or +// not. If so, it extracts the private key from PEM container before +// conversion. It only supports PEM containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} + +// toRawObject copies the editable attributes from o to the raw library's Object type. +func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { + var ret string + if !o.RetentionExpirationTime.IsZero() { + ret = o.RetentionExpirationTime.Format(time.RFC3339) + } + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + EventBasedHold: o.EventBasedHold, + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: ret, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: toRawObjectACL(o.ACL), + Metadata: o.Metadata, + } +} + +// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. +type ObjectAttrs struct { + // Bucket is the name of the bucket containing this GCS object. + // This field is read-only. + Bucket string + + // Name is the name of the object within the bucket. + // This field is read-only. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // EventBasedHold specifies whether an object is under event-based hold. New + // objects created in a bucket whose DefaultEventBasedHold is set will + // default to that value. + EventBasedHold bool + + // TemporaryHold specifies whether an object is under temporary hold. While + // this flag is set to true, the object is protected against deletion and + // overwrites. + TemporaryHold bool + + // RetentionExpirationTime is a server-determined value that specifies the + // earliest time that the object's retention period expires. + // This is a read-only field. + RetentionExpirationTime time.Time + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // If not empty, applies a predefined set of access controls. It should be set + // only when writing, copying or composing an object. When copying or composing, + // it acts as the destinationPredefinedAcl parameter. + // PredefinedACL is always empty for ObjectAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/objects/insert + // for valid values. + PredefinedACL string + + // Owner is the owner of the object. This field is read-only. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. This field is read-only. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // MD5 is the MD5 hash of the object's content. This field is read-only, + // except when used from a Writer. If set on a Writer, the uploaded + // data is rejected if its MD5 hash does not match this field. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. This field is read-only, except when + // used from a Writer. If set on a Writer and Writer.SendCRC32C + // is true, the uploaded data is rejected if its CRC32c hash does not + // match this field. + CRC32C uint32 + + // MediaLink is an URL to the object's content. This field is read-only. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + // This field is read-only. + Generation int64 + + // Metageneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. This field is read-only. + Metageneration int64 + + // StorageClass is the storage class of the object. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" + // and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" + // or "REGIONAL" depending on the bucket's location settings. + StorageClass string + + // Created is the time the object was created. This field is read-only. + Created time.Time + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. This field is read-only. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. This field is read-only. + Updated time.Time + + // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the + // customer-supplied encryption key for the object. It is empty if there is + // no customer-supplied encryption key. + // See // https://cloud.google.com/storage/docs/encryption for more about + // encryption in Google Cloud Storage. + CustomerKeySHA256 string + + // Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object, + // if the object is encrypted by such a key. + // + // Providing both a KMSKeyName and a customer-supplied encryption key (via + // ObjectHandle.Key) will result in an error when writing an object. + KMSKeyName string + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string + + // Etag is the HTTP/1.1 Entity tag for the object. + // This field is read-only. + Etag string +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *ObjectAttrs { + if o == nil { + return nil + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + crc32c, _ := decodeUint32(o.Crc32c) + var sha256 string + if o.CustomerEncryption != nil { + sha256 = o.CustomerEncryption.KeySha256 + } + return &ObjectAttrs{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + EventBasedHold: o.EventBasedHold, + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: convertTime(o.RetentionExpirationTime), + ACL: toObjectACLRules(o.Acl), + Owner: owner, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + CustomerKeySHA256: sha256, + KMSKeyName: o.KmsKeyName, + Created: convertTime(o.TimeCreated), + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + Etag: o.Etag, + } +} + +// Decode a uint32 encoded in Base64 in big-endian byte order. +func decodeUint32(b64 string) (uint32, error) { + d, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return 0, err + } + if len(d) != 4 { + return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) + } + return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil +} + +// Encode a uint32 as Base64 in big-endian byte order. +func encodeUint32(u uint32) string { + b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} + return base64.StdEncoding.EncodeToString(b) +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool +} + +// Conditions constrain methods to act on specific generations of +// objects. +// +// The zero value is an empty set of constraints. Not all conditions or +// combinations of conditions are applicable to all methods. +// See https://cloud.google.com/storage/docs/generations-preconditions +// for details on how these operate. +type Conditions struct { + // Generation constraints. + // At most one of the following can be set to a non-zero value. + + // GenerationMatch specifies that the object must have the given generation + // for the operation to occur. + // If GenerationMatch is zero, it has no effect. + // Use DoesNotExist to specify that the object does not exist in the bucket. + GenerationMatch int64 + + // GenerationNotMatch specifies that the object must not have the given + // generation for the operation to occur. + // If GenerationNotMatch is zero, it has no effect. + GenerationNotMatch int64 + + // DoesNotExist specifies that the object must not exist in the bucket for + // the operation to occur. + // If DoesNotExist is false, it has no effect. + DoesNotExist bool + + // Metadata generation constraints. + // At most one of the following can be set to a non-zero value. + + // MetagenerationMatch specifies that the object must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the object must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *Conditions) validate(method string) error { + if *c == (Conditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if !c.isGenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) + } + if !c.isMetagenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +func (c *Conditions) isGenerationValid() bool { + n := 0 + if c.GenerationMatch != 0 { + n++ + } + if c.GenerationNotMatch != 0 { + n++ + } + if c.DoesNotExist { + n++ + } + return n <= 1 +} + +func (c *Conditions) isMetagenerationValid() bool { + return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 +} + +// applyConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { + cval := reflect.ValueOf(call) + if gen >= 0 { + if !setConditionField(cval, "Generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionField(cval, "IfGenerationMatch", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { + if gen >= 0 { + call.SourceGeneration(gen) + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) + } + return nil +} + +// setConditionField sets a field on a *raw.WhateverCall. +// We can't use anonymous interfaces because the return type is +// different, since the field setters are builders. +func setConditionField(call reflect.Value, name string, value interface{}) bool { + m := call.MethodByName(name) + if !m.IsValid() { + return false + } + m.Call([]reflect.Value{reflect.ValueOf(value)}) + return true +} + +// conditionsQuery returns the generation and conditions as a URL query +// string suitable for URL.RawQuery. It assumes that the conditions +// have been validated. +func conditionsQuery(gen int64, conds *Conditions) string { + // URL escapes are elided because integer strings are URL-safe. + var buf []byte + + appendParam := func(s string, n int64) { + if len(buf) > 0 { + buf = append(buf, '&') + } + buf = append(buf, s...) + buf = strconv.AppendInt(buf, n, 10) + } + + if gen >= 0 { + appendParam("generation=", gen) + } + if conds == nil { + return string(buf) + } + switch { + case conds.GenerationMatch != 0: + appendParam("ifGenerationMatch=", conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) + case conds.DoesNotExist: + appendParam("ifGenerationMatch=", 0) + } + switch { + case conds.MetagenerationMatch != 0: + appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) + } + return string(buf) +} + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { + if key == nil { + return nil + } + // TODO(jbd): Ask the API team to return a more user-friendly error + // and avoid doing this check at the client level. + if len(key) != 32 { + return errors.New("storage: not a 32-byte AES-256 key") + } + var cs string + if copySource { + cs = "copy-source-" + } + headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) + keyHash := sha256.Sum256(key) + headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) + return nil +} + +// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. +func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { + r := c.raw.Projects.ServiceAccount.Get(projectID) + res, err := r.Context(ctx).Do() + if err != nil { + return "", err + } + return res.EmailAddress, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/writer.go b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/writer.go new file mode 100644 index 00000000000..3a58c404e0d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/cloud.google.com/go/storage/writer.go @@ -0,0 +1,261 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + "unicode/utf8" + + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Writer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data written does not match the checksum, + // the write will be rejected. + SendCRC32C bool + + // ChunkSize controls the maximum number of bytes of the object that the + // Writer will attempt to send to the server in a single request. Objects + // smaller than the size will be sent in a single request, while larger + // objects will be split over multiple requests. The size will be rounded up + // to the nearest multiple of 256K. If zero, chunking will be disabled and + // the object will be uploaded in a single request. + // + // ChunkSize will default to a reasonable value. If you perform many concurrent + // writes of small objects, you may wish set ChunkSize to a value that matches + // your objects' sizes to avoid consuming large amounts of memory. + // + // ChunkSize must be set before the first Write call. + ChunkSize int + + // ProgressFunc can be used to monitor the progress of a large write. + // operation. If ProgressFunc is not nil and writing requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), + // then ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far. + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(int64) + + ctx context.Context + o *ObjectHandle + + opened bool + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + obj *ObjectAttrs + + mu sync.Mutex + err error +} + +func (w *Writer) open() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { + return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") + } + pr, pw := io.Pipe() + w.pw = pw + w.opened = true + + go w.monitorCancel() + + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must be non-negative") + } + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(w.ChunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + + go func() { + defer close(w.donec) + + rawObj := attrs.toRawObject(w.o.bucket) + if w.SendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if w.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) + } + call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(w.ctx) + if w.ProgressFunc != nil { + call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) + } + if attrs.KMSKeyName != "" { + call.KmsKeyName(attrs.KMSKeyName) + } + if attrs.PredefinedACL != "" { + call.PredefinedAcl(attrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { + w.mu.Lock() + w.err = err + w.mu.Unlock() + pr.CloseWithError(err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", w.o.gen, w.o.conds, call) + if err == nil { + if w.o.userProject != "" { + call.UserProject(w.o.userProject) + } + setClientHeader(call.Header()) + // If the chunk size is zero, then no chunking is done on the Reader, + // which means we cannot retry: the first call will read the data, and if + // it fails, there is no way to re-read. + if w.ChunkSize == 0 { + resp, err = call.Do() + } else { + // We will only retry here if the initial POST, which obtains a URI for + // the resumable upload, fails with a retryable error. The upload itself + // has its own retry logic. + err = runWithRetry(w.ctx, func() error { + var err2 error + resp, err2 = call.Do() + return err2 + }) + } + } + if err != nil { + w.mu.Lock() + w.err = err + w.mu.Unlock() + pr.CloseWithError(err) + return + } + w.obj = newObject(resp) + }() + return nil +} + +// Write appends to w. It implements the io.Writer interface. +// +// Since writes happen asynchronously, Write may return a nil +// error even though the write failed (or will fail). Always +// use the error returned from Writer.Close to determine if +// the upload was successful. +func (w *Writer) Write(p []byte) (n int, err error) { + w.mu.Lock() + werr := w.err + w.mu.Unlock() + if werr != nil { + return 0, werr + } + if !w.opened { + if err := w.open(); err != nil { + return 0, err + } + } + n, err = w.pw.Write(p) + if err != nil { + w.mu.Lock() + werr := w.err + w.mu.Unlock() + // Preserve existing functionality that when context is canceled, Write will return + // context.Canceled instead of "io: read/write on closed pipe". This hides the + // pipe implementation detail from users and makes Write seem as though it's an RPC. + if werr == context.Canceled || werr == context.DeadlineExceeded { + return n, werr + } + } + return n, err +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Attrs. +func (w *Writer) Close() error { + if !w.opened { + if err := w.open(); err != nil { + return err + } + } + + // Closing either the read or write causes the entire pipe to close. + if err := w.pw.Close(); err != nil { + return err + } + + <-w.donec + w.mu.Lock() + defer w.mu.Unlock() + return w.err +} + +// monitorCancel is intended to be used as a background goroutine. It monitors the +// the context, and when it observes that the context has been canceled, it manually +// closes things that do not take a context. +func (w *Writer) monitorCancel() { + select { + case <-w.ctx.Done(): + w.mu.Lock() + werr := w.ctx.Err() + w.err = werr + w.mu.Unlock() + + // Closing either the read or write causes the entire pipe to close. + w.CloseWithError(werr) + case <-w.donec: + } +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +// +// Deprecated: cancel the context passed to NewWriter instead. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// Attrs returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Attrs() *ObjectAttrs { + return w.obj +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/NOTICE b/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/NOTICE new file mode 100644 index 00000000000..eaffaab94c4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/NOTICE @@ -0,0 +1,5 @@ +Alrux Go EXTensions (AGExt) - package levenshtein +Copyright 2016 ALRUX Inc. + +This product includes software developed at ALRUX Inc. +(http://www.alrux.com/). diff --git a/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/levenshtein.go b/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/levenshtein.go new file mode 100644 index 00000000000..df69ce70165 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/levenshtein.go @@ -0,0 +1,290 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure. + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. +*/ +package levenshtein + +// Calculate determines the Levenshtein distance between two strings, using +// the given costs for each edit operation. It returns the distance along with +// the lengths of the longest common prefix and suffix. +// +// If maxCost is non-zero, the calculation stops as soon as the distance is determined +// to be greater than maxCost. Therefore, any return value higher than maxCost is a +// lower bound for the actual distance. +func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) { + l1, l2 := len(str1), len(str2) + // trim common prefix, if any, as it doesn't affect the distance + for ; prefixLen < l1 && prefixLen < l2; prefixLen++ { + if str1[prefixLen] != str2[prefixLen] { + break + } + } + str1, str2 = str1[prefixLen:], str2[prefixLen:] + l1 -= prefixLen + l2 -= prefixLen + // trim common suffix, if any, as it doesn't affect the distance + for 0 < l1 && 0 < l2 { + if str1[l1-1] != str2[l2-1] { + str1, str2 = str1[:l1], str2[:l2] + break + } + l1-- + l2-- + suffixLen++ + } + // if the first string is empty, the distance is the length of the second string times the cost of insertion + if l1 == 0 { + dist = l2 * insCost + return + } + // if the second string is empty, the distance is the length of the first string times the cost of deletion + if l2 == 0 { + dist = l1 * delCost + return + } + + // variables used in inner "for" loops + var y, dy, c, l int + + // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited' + if maxCost > 0 { + if subCost < delCost+insCost { + if maxCost >= l1*subCost+(l2-l1)*insCost { + maxCost = 0 + } + } else { + if maxCost >= l1*delCost+l2*insCost { + maxCost = 0 + } + } + } + + if maxCost > 0 { + // prefer the longer string first, to minimize time; + // a swap also transposes the meanings of insertion and deletion. + if l1 < l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + + // the length differential times cost of deletion is a lower bound for the cost; + // if it is higher than the maxCost, there is no point going into the main calculation. + if dist = (l1 - l2) * delCost; dist > maxCost { + return + } + + d := make([]int, l1+1) + + // offset and length of d in the current row + doff, dlen := 0, 1 + for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ { + d[y] = dy + y++ + dy = y * delCost + } + // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + + for x := 0; x < l2; x++ { + dy, d[doff] = d[doff], d[doff]+insCost + for d[doff] > maxCost && dlen > 0 { + if str1[doff] != str2[x] { + dy += subCost + } + doff++ + dlen-- + if c = d[doff] + insCost; c < dy { + dy = c + } + dy, d[doff] = d[doff], dy + } + for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + if y < l1 { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy { + y++ + dlen++ + } + } + // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + if dlen == 0 { + dist = maxCost + 1 + return + } + } + if doff+dlen-1 < l1 { + dist = maxCost + 1 + return + } + dist = d[l1] + } else { + // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is + // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space + // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html + + // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway; + // a swap also transposes the meanings of insertion and deletion. + if l1 > l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + d := make([]int, l1+1) + + for y = 1; y <= l1; y++ { + d[y] = y * delCost + } + for x := 0; x < l2; x++ { + dy, d[0] = d[0], d[0]+insCost + for y = 0; y < l1; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + } + dist = d[l1] + } + + return +} + +// Distance returns the Levenshtein distance between str1 and str2, using the +// default or provided cost values. Pass nil for the third argument to use the +// default cost of 1 for all three operations, with no maximum. +func Distance(str1, str2 string, p *Params) int { + if p == nil { + p = defaultParams + } + dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost) + return dist +} + +// Similarity returns a score in the range of 0..1 for how similar the two strings are. +// A score of 1 means the strings are identical, and 0 means they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Similarity(str1, str2 string, p *Params) float64 { + return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus +} + +// Match returns a similarity score adjusted by the same method as proposed by Winkler for +// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their +// similarity score is already over a threshold. +// +// The score is in the range of 0..1, with 1 meaning the strings are identical, +// and 0 meaning they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations, maximum length of +// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Match(str1, str2 string, p *Params) float64 { + s1, s2 := []rune(str1), []rune(str2) + l1, l2 := len(s1), len(s2) + // two empty strings are identical; shortcut also avoids divByZero issues later on. + if l1 == 0 && l2 == 0 { + return 1 + } + + if p == nil { + p = defaultParams + } + + // a min over 1 can never be satisfied, so the score is 0. + if p.minScore > 1 { + return 0 + } + + insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0 + if l1 > l2 { + l1, l2, insCost, delCost = l2, l1, delCost, insCost + } + + if p.subCost < delCost+insCost { + maxDist = l1*p.subCost + (l2-l1)*insCost + } else { + maxDist = l1*delCost + l2*insCost + } + + // a zero min is always satisfied, so no need to set a max cost. + if p.minScore > 0 { + // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula + // for the max cost, because a sim score below min cannot receive a bonus. + if p.minScore < p.bonusThreshold { + // round down the max - a cost equal to a rounded up max would already be under min. + max = int((1 - p.minScore) * float64(maxDist)) + } else { + // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim) + // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist)) + // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist + // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist + // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist + max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale)) + } + } + + dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost) + if max > 0 && dist > max { + return 0 + } + sim := 1 - float64(dist)/float64(maxDist) + + if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 { + if pl > p.bonusPrefix { + pl = p.bonusPrefix + } + sim += float64(pl) * p.bonusScale * (1 - sim) + } + + if sim < p.minScore { + return 0 + } + + return sim +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/params.go b/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/params.go new file mode 100644 index 00000000000..a85727b3eff --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/agext/levenshtein/params.go @@ -0,0 +1,152 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levenshtein + +// Params represents a set of parameter values for the various formulas involved +// in the calculation of the Levenshtein string metrics. +type Params struct { + insCost int + subCost int + delCost int + maxCost int + minScore float64 + bonusPrefix int + bonusScale float64 + bonusThreshold float64 +} + +var ( + defaultParams = NewParams() +) + +// NewParams creates a new set of parameters and initializes it with the default values. +func NewParams() *Params { + return &Params{ + insCost: 1, + subCost: 1, + delCost: 1, + maxCost: 0, + minScore: 0, + bonusPrefix: 4, + bonusScale: .1, + bonusThreshold: .7, + } +} + +// Clone returns a pointer to a copy of the receiver parameter set, or of a new +// default parameter set if the receiver is nil. +func (p *Params) Clone() *Params { + if p == nil { + return NewParams() + } + return &Params{ + insCost: p.insCost, + subCost: p.subCost, + delCost: p.delCost, + maxCost: p.maxCost, + minScore: p.minScore, + bonusPrefix: p.bonusPrefix, + bonusScale: p.bonusScale, + bonusThreshold: p.bonusThreshold, + } +} + +// InsCost overrides the default value of 1 for the cost of insertion. +// The new value must be zero or positive. +func (p *Params) InsCost(v int) *Params { + if v >= 0 { + p.insCost = v + } + return p +} + +// SubCost overrides the default value of 1 for the cost of substitution. +// The new value must be zero or positive. +func (p *Params) SubCost(v int) *Params { + if v >= 0 { + p.subCost = v + } + return p +} + +// DelCost overrides the default value of 1 for the cost of deletion. +// The new value must be zero or positive. +func (p *Params) DelCost(v int) *Params { + if v >= 0 { + p.delCost = v + } + return p +} + +// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost. +// The calculation of Distance() stops when the result is guaranteed to exceed +// this maximum, returning a lower-bound rather than exact value. +// The new value must be zero or positive. +func (p *Params) MaxCost(v int) *Params { + if v >= 0 { + p.maxCost = v + } + return p +} + +// MinScore overrides the default value of 0 for the minimum similarity score. +// Scores below this threshold are returned as 0 by Similarity() and Match(). +// The new value must be zero or positive. Note that a minimum greater than 1 +// can never be satisfied, resulting in a score of 0 for any pair of strings. +func (p *Params) MinScore(v float64) *Params { + if v >= 0 { + p.minScore = v + } + return p +} + +// BonusPrefix overrides the default value for the maximum length of +// common prefix to be considered for bonus by Match(). +// The new value must be zero or positive. +func (p *Params) BonusPrefix(v int) *Params { + if v >= 0 { + p.bonusPrefix = v + } + return p +} + +// BonusScale overrides the default value for the scaling factor used by Match() +// in calculating the bonus. +// The new value must be zero or positive. To guarantee that the similarity score +// remains in the interval 0..1, this scaling factor is not allowed to exceed +// 1 / BonusPrefix. +func (p *Params) BonusScale(v float64) *Params { + if v >= 0 { + p.bonusScale = v + } + + // the bonus cannot exceed (1-sim), or the score may become greater than 1. + if float64(p.bonusPrefix)*p.bonusScale > 1 { + p.bonusScale = 1 / float64(p.bonusPrefix) + } + + return p +} + +// BonusThreshold overrides the default value for the minimum similarity score +// for which Match() can assign a bonus. +// The new value must be zero or positive. Note that a threshold greater than 1 +// effectively makes Match() become the equivalent of Similarity(). +func (p *Params) BonusThreshold(v float64) *Params { + if v >= 0 { + p.bonusThreshold = v + } + return p +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/LICENSE new file mode 100644 index 00000000000..684b03b4a27 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/LICENSE @@ -0,0 +1,95 @@ +Copyright (c) 2017 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------- + +Unicode table generation programs are under a separate copyright and license: + +Copyright (c) 2014 Couchbase, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the +License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See the License for the specific language governing permissions +and limitations under the License. + +--------- + +Grapheme break data is provided as part of the Unicode character database, +copright 2016 Unicode, Inc, which is provided with the following license: + +Unicode Data Files include all data files under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +Unicode Data Files do not include PDF online code charts under the +directory http://www.unicode.org/Public/. + +Software includes any source code published in the Unicode Standard +or under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +NOTICE TO USER: Carefully read the following legal agreement. +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +THE DATA FILES OR SOFTWARE. + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2017 Unicode, Inc. All rights reserved. +Distributed under the Terms of Use in http://www.unicode.org/copyright.html. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Unicode data files and any associated documentation +(the "Data Files") or Unicode software and any associated documentation +(the "Software") to deal in the Data Files or Software +without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, and/or sell copies of +the Data Files or Software, and to permit persons to whom the Data Files +or Software are furnished to do so, provided that either +(a) this copyright and permission notice appear with all copies +of the Data Files or Software, or +(b) this copyright and permission notice appear in associated +Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT OF THIRD PARTY RIGHTS. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder +shall not be used in advertising or otherwise to promote the sale, +use or other dealings in these Data Files or Software without prior +written authorization of the copyright holder. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go new file mode 100644 index 00000000000..5752e9ef8f7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go @@ -0,0 +1,30 @@ +package textseg + +import ( + "bufio" + "bytes" +) + +// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of +// all of the recognized tokens in the given buffer. +func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret [][]byte + for scanner.Scan() { + ret = append(ret, scanner.Bytes()) + } + return ret, scanner.Err() +} + +// TokenCount is a utility that uses a bufio.SplitFunc to count the number of +// recognized tokens in the given buffer. +func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret int + for scanner.Scan() { + ret++ + } + return ret, scanner.Err() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go new file mode 100644 index 00000000000..81f3a747178 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go @@ -0,0 +1,7 @@ +package textseg + +//go:generate go run make_tables.go -output tables.go +//go:generate go run make_test_tables.go -output tables_test.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl +//go:generate ragel -Z grapheme_clusters.rl +//go:generate gofmt -w grapheme_clusters.go diff --git a/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go new file mode 100644 index 00000000000..012bc690aa1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go @@ -0,0 +1,5276 @@ + +// line 1 "grapheme_clusters.rl" +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT + +// line 13 "grapheme_clusters.go" +var _graphclust_actions []byte = []byte{ + 0, 1, 0, 1, 4, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 21, 2, 1, + 7, 2, 1, 8, 2, 2, 3, 2, + 5, 1, 3, 0, 1, 8, 3, 5, + 0, 1, 3, 5, 1, 6, +} + +var _graphclust_key_offsets []int16 = []int16{ + 0, 0, 1, 3, 5, 7, 10, 15, + 17, 20, 28, 31, 33, 35, 37, 67, + 75, 77, 81, 84, 89, 94, 104, 116, + 122, 127, 137, 140, 147, 151, 159, 169, + 173, 181, 183, 191, 194, 196, 201, 203, + 210, 212, 220, 221, 242, 246, 252, 257, + 259, 263, 267, 269, 273, 275, 278, 282, + 284, 291, 293, 297, 301, 305, 307, 309, + 318, 322, 327, 329, 335, 337, 338, 340, + 341, 343, 345, 347, 349, 364, 368, 370, + 372, 377, 381, 385, 387, 389, 393, 397, + 399, 403, 410, 415, 419, 422, 423, 427, + 434, 439, 440, 441, 443, 452, 454, 477, + 481, 483, 487, 491, 492, 496, 500, 503, + 505, 510, 523, 525, 527, 529, 531, 535, + 539, 541, 543, 545, 549, 553, 557, 559, + 561, 563, 565, 566, 568, 574, 580, 586, + 588, 592, 596, 601, 604, 614, 616, 618, + 621, 623, 625, 627, 629, 632, 637, 639, + 642, 650, 653, 655, 657, 659, 690, 698, + 700, 704, 711, 723, 730, 744, 750, 768, + 779, 785, 797, 800, 809, 814, 824, 830, + 844, 850, 862, 874, 878, 880, 886, 888, + 895, 898, 906, 907, 928, 937, 945, 951, + 953, 957, 961, 966, 972, 974, 977, 990, + 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, + 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, + 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, + 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, + 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, + 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, + 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, + 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, + 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, + 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, + 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, + 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, + 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, + 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, + 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, + 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, + 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, + 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, + 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, + 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, + 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, + 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, + 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, + 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, + 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, + 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, + 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, + 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, + 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, + 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, + 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, + 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, + 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, + 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, + 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, + 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, + 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, + 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, + 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, + 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, + 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, + 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, + 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, + 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, + 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, + 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, + 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, + 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, + 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, + 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, + 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, + 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, + 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, + 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, + 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, + 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, + 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, + 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, + 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, + 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, + 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, + 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, + 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, + 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, + 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, + 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, + 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, + 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, + 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, + 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, + 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, + 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, + 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, + 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, + 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, + 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, + 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, + 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, + 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, + 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, + 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, + 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, + 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, + 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, + 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, + 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, + 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, + 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, + 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, + 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, + 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, + 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, + 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, + 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, + 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, + 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, + 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, + 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, + 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, + 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, + 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, + 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, + 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, + 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, + 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, + 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, + 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, + 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, + 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, + 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, + 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, + 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, + 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, + 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, + 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, + 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, + 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, + 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, + 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, + 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, + 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, + 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, + 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, + 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, + 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, + 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, + 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, + 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, + 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, + 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, + 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, + 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, + 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, + 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, + 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, + 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, + 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, + 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, + 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, + 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, + 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, + 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, + 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, + 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, + 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, + 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, + 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, + 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, + 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, + 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, + 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, + 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, + 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, + 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, + 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, + 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, + 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, + 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, + 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, + 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, + 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, + 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, + 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, + 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, + 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, + 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, + 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, + 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, + 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, + 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, + 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, + 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, + 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, + 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, + 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, + 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, + 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, + 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, + 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, + 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, + 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, + 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, + 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, + 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, + 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, + 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, + 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, + 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, + 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, + 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, + 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, + 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, + 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, + 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, + 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, + 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, + 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, + 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, + 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, + 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, + 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, + 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, + 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, + 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, + 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, + 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, + 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, + 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, + 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, + 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, + 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, + 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, + 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, + 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, + 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, + 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, + 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, + 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, + 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, + 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, + 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, + 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, + 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, + 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, + 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, + 11052, 11072, 11092, 11112, +} + +var _graphclust_trans_keys []byte = []byte{ + 10, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 167, 169, 171, 173, 174, 175, + 176, 177, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 166, + 170, 172, 178, 150, 153, 155, 163, 165, + 167, 169, 173, 153, 155, 148, 161, 163, + 255, 189, 132, 185, 144, 152, 161, 164, + 255, 188, 129, 131, 190, 255, 133, 134, + 137, 138, 142, 150, 152, 161, 164, 255, + 131, 134, 137, 138, 142, 144, 146, 175, + 178, 180, 182, 255, 134, 138, 142, 161, + 164, 255, 188, 129, 131, 190, 191, 128, + 132, 135, 136, 139, 141, 150, 151, 162, + 163, 130, 190, 191, 151, 128, 130, 134, + 136, 138, 141, 128, 131, 190, 255, 133, + 137, 142, 148, 151, 161, 164, 255, 128, + 132, 134, 136, 138, 141, 149, 150, 162, + 163, 129, 131, 190, 255, 133, 137, 142, + 150, 152, 161, 164, 255, 130, 131, 138, + 150, 143, 148, 152, 159, 178, 179, 177, + 179, 186, 135, 142, 177, 179, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 177, 191, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 173, 183, 185, 190, 150, 153, + 158, 160, 177, 180, 130, 141, 157, 132, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 180, 255, 148, 156, 158, + 255, 139, 141, 169, 133, 134, 160, 171, + 176, 187, 151, 155, 160, 162, 191, 149, + 158, 165, 188, 176, 190, 128, 132, 180, + 255, 133, 170, 180, 255, 128, 130, 161, + 173, 166, 179, 164, 183, 173, 144, 146, + 148, 168, 178, 180, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 128, 131, 157, 179, 181, 183, 144, + 176, 164, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 163, + 167, 128, 129, 180, 255, 134, 159, 178, + 255, 166, 173, 135, 147, 128, 131, 179, + 255, 129, 164, 166, 255, 169, 182, 131, + 188, 140, 141, 176, 178, 180, 183, 184, + 190, 191, 129, 171, 175, 181, 182, 163, + 170, 172, 173, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 128, 130, 184, + 255, 135, 190, 131, 175, 187, 255, 128, + 130, 167, 180, 179, 128, 130, 179, 255, + 129, 137, 141, 255, 190, 172, 183, 159, + 170, 188, 128, 131, 190, 191, 151, 128, + 132, 135, 136, 139, 141, 162, 163, 166, + 172, 176, 180, 181, 191, 128, 134, 176, + 255, 132, 255, 175, 181, 184, 255, 129, + 155, 158, 255, 129, 255, 171, 183, 157, + 171, 175, 182, 184, 191, 146, 167, 169, + 182, 171, 172, 189, 190, 176, 180, 176, + 182, 145, 190, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 128, 182, 187, 255, + 173, 180, 182, 255, 132, 155, 159, 161, + 175, 128, 163, 165, 128, 134, 136, 152, + 155, 161, 163, 164, 166, 170, 144, 150, + 132, 138, 145, 146, 151, 166, 169, 0, + 127, 176, 255, 131, 137, 191, 145, 189, + 135, 129, 130, 132, 133, 144, 154, 176, + 139, 159, 150, 156, 159, 164, 167, 168, + 170, 173, 145, 176, 255, 139, 255, 166, + 176, 171, 179, 160, 161, 163, 164, 165, + 166, 167, 169, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 168, 170, 150, 153, 155, 163, 165, 167, + 169, 173, 153, 155, 148, 161, 163, 255, + 131, 187, 189, 132, 185, 190, 255, 141, + 144, 129, 136, 145, 151, 152, 161, 162, + 163, 164, 255, 129, 188, 190, 130, 131, + 191, 255, 141, 151, 129, 132, 133, 134, + 137, 138, 142, 161, 162, 163, 164, 255, + 131, 188, 129, 130, 190, 255, 145, 181, + 129, 130, 131, 134, 135, 136, 137, 138, + 139, 141, 142, 175, 176, 177, 178, 255, + 134, 138, 141, 129, 136, 142, 161, 162, + 163, 164, 255, 129, 188, 130, 131, 190, + 191, 128, 141, 129, 132, 135, 136, 139, + 140, 150, 151, 162, 163, 130, 190, 191, + 128, 141, 151, 129, 130, 134, 136, 138, + 140, 128, 129, 131, 190, 255, 133, 137, + 129, 132, 142, 148, 151, 161, 164, 255, + 129, 188, 190, 191, 130, 131, 130, 134, + 128, 132, 135, 136, 138, 139, 140, 141, + 149, 150, 162, 163, 129, 190, 130, 131, + 191, 255, 133, 137, 141, 151, 129, 132, + 142, 161, 162, 163, 164, 255, 138, 143, + 150, 159, 144, 145, 146, 148, 152, 158, + 178, 179, 177, 179, 180, 186, 135, 142, + 177, 179, 180, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 191, + 177, 190, 128, 132, 134, 135, 141, 151, + 153, 188, 134, 128, 129, 130, 141, 156, + 157, 158, 159, 160, 162, 164, 168, 169, + 170, 172, 173, 174, 175, 176, 179, 183, + 177, 173, 183, 185, 186, 187, 188, 189, + 190, 150, 151, 152, 153, 158, 160, 177, + 180, 130, 132, 141, 157, 133, 134, 157, + 159, 146, 148, 178, 180, 146, 147, 178, + 179, 182, 180, 189, 190, 255, 134, 157, + 137, 147, 148, 255, 139, 141, 169, 133, + 134, 178, 160, 162, 163, 166, 167, 168, + 169, 171, 176, 184, 185, 187, 155, 151, + 152, 153, 154, 150, 160, 162, 191, 149, + 151, 152, 158, 165, 172, 173, 178, 179, + 188, 176, 190, 132, 181, 187, 128, 131, + 180, 188, 189, 255, 130, 133, 170, 171, + 179, 180, 255, 130, 161, 170, 128, 129, + 162, 165, 166, 167, 168, 173, 167, 173, + 166, 169, 170, 174, 175, 177, 178, 179, + 164, 171, 172, 179, 180, 181, 182, 183, + 161, 173, 180, 144, 146, 148, 168, 178, + 179, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 144, 176, + 175, 177, 191, 160, 191, 128, 130, 170, + 175, 153, 154, 153, 154, 155, 160, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 175, 175, 178, 180, 189, 158, 159, + 176, 177, 130, 134, 139, 167, 163, 164, + 165, 166, 132, 133, 134, 159, 160, 177, + 178, 255, 166, 173, 135, 145, 146, 147, + 131, 179, 188, 128, 130, 180, 181, 182, + 185, 186, 255, 165, 129, 255, 169, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 131, 140, 141, 188, 176, 178, 180, 183, + 184, 190, 191, 129, 171, 181, 182, 172, + 173, 174, 175, 165, 168, 172, 173, 163, + 170, 172, 184, 190, 158, 128, 143, 160, + 175, 144, 145, 150, 155, 157, 158, 159, + 135, 139, 141, 168, 171, 189, 160, 182, + 186, 191, 129, 131, 133, 134, 140, 143, + 184, 186, 165, 166, 128, 129, 130, 132, + 133, 134, 135, 136, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 156, 176, 178, 129, 128, 130, 184, 255, + 135, 190, 130, 131, 175, 176, 178, 183, + 184, 187, 255, 172, 128, 130, 167, 180, + 179, 130, 128, 129, 179, 181, 182, 190, + 191, 255, 129, 137, 138, 140, 141, 255, + 180, 190, 172, 174, 175, 177, 178, 181, + 182, 183, 159, 160, 162, 163, 170, 188, + 190, 191, 128, 129, 130, 131, 128, 151, + 129, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 183, 184, 191, + 133, 128, 129, 130, 134, 176, 185, 189, + 177, 178, 179, 186, 187, 190, 191, 255, + 129, 132, 255, 175, 190, 176, 177, 178, + 181, 184, 187, 188, 255, 129, 155, 158, + 255, 189, 176, 178, 179, 186, 187, 190, + 191, 255, 129, 255, 172, 182, 171, 173, + 174, 175, 176, 183, 166, 157, 159, 160, + 161, 162, 171, 175, 190, 176, 182, 184, + 191, 169, 177, 180, 146, 167, 170, 182, + 171, 172, 189, 190, 176, 180, 176, 182, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 166, 173, 165, 169, 174, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 128, 182, 187, 255, 173, + 180, 182, 255, 132, 155, 159, 161, 175, + 128, 163, 165, 128, 134, 136, 152, 155, + 161, 163, 164, 166, 170, 144, 150, 132, + 138, 143, 187, 191, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 139, + 168, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 144, 145, 150, 155, + 157, 158, 128, 191, 173, 128, 159, 160, + 191, 156, 128, 133, 134, 191, 0, 127, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 128, + 191, 160, 172, 174, 191, 128, 133, 134, + 155, 157, 191, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 128, 255, 128, 129, 130, 132, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 160, 255, 128, 129, + 130, 133, 134, 135, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 160, 255, + 168, 255, 128, 129, 130, 134, 135, 141, + 156, 157, 158, 159, 160, 162, 164, 168, + 169, 170, 172, 173, 174, 175, 176, 179, + 183, 168, 255, 192, 255, 159, 139, 187, + 158, 159, 176, 255, 135, 138, 139, 187, + 188, 255, 168, 255, 153, 154, 155, 160, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 175, 177, 178, 179, 180, 181, + 182, 184, 185, 186, 187, 188, 189, 191, + 176, 190, 192, 255, 135, 147, 160, 188, + 128, 156, 184, 129, 255, 128, 129, 130, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 158, 159, 135, 255, + 148, 176, 140, 168, 132, 160, 188, 152, + 180, 144, 172, 136, 164, 192, 255, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 128, 156, 160, 255, 136, 164, + 175, 176, 255, 128, 141, 143, 191, 128, + 129, 152, 155, 156, 130, 191, 140, 141, + 128, 138, 144, 167, 175, 191, 128, 159, + 176, 191, 157, 128, 191, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 128, 156, 160, 255, 136, 164, 175, + 176, 255, 135, 138, 139, 187, 188, 191, + 192, 255, 187, 191, 128, 190, 128, 190, + 188, 128, 175, 190, 191, 145, 155, 157, + 159, 128, 191, 130, 135, 128, 191, 189, + 128, 191, 128, 129, 130, 131, 132, 191, + 178, 128, 191, 128, 159, 164, 191, 133, + 128, 191, 128, 178, 187, 191, 135, 142, + 143, 145, 146, 149, 150, 153, 154, 155, + 164, 128, 191, 128, 165, 166, 191, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 129, 135, 132, + 134, 128, 175, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 0, 127, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 166, 167, 169, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 168, 170, 150, 153, 155, + 163, 165, 167, 169, 173, 153, 155, 148, + 161, 163, 255, 131, 187, 189, 132, 185, + 190, 255, 141, 144, 129, 136, 145, 151, + 152, 161, 162, 163, 164, 255, 129, 188, + 190, 130, 131, 191, 255, 141, 151, 129, + 132, 133, 134, 137, 138, 142, 161, 162, + 163, 164, 255, 131, 188, 129, 130, 190, + 255, 145, 181, 129, 130, 131, 134, 135, + 136, 137, 138, 139, 141, 142, 175, 176, + 177, 178, 255, 134, 138, 141, 129, 136, + 142, 161, 162, 163, 164, 255, 129, 188, + 130, 131, 190, 191, 128, 141, 129, 132, + 135, 136, 139, 140, 150, 151, 162, 163, + 130, 190, 191, 128, 141, 151, 129, 130, + 134, 136, 138, 140, 128, 129, 131, 190, + 255, 133, 137, 129, 132, 142, 148, 151, + 161, 164, 255, 129, 188, 190, 191, 130, + 131, 130, 134, 128, 132, 135, 136, 138, + 139, 140, 141, 149, 150, 162, 163, 129, + 190, 130, 131, 191, 255, 133, 137, 141, + 151, 129, 132, 142, 161, 162, 163, 164, + 255, 138, 143, 150, 159, 144, 145, 146, + 148, 152, 158, 178, 179, 177, 179, 180, + 186, 135, 142, 177, 179, 180, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 191, 177, 190, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 177, 173, 183, 185, 186, + 187, 188, 189, 190, 150, 151, 152, 153, + 158, 160, 177, 180, 130, 132, 141, 157, + 133, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 182, 180, 189, 190, + 255, 134, 157, 137, 147, 148, 255, 139, + 141, 169, 133, 134, 178, 160, 162, 163, + 166, 167, 168, 169, 171, 176, 184, 185, + 187, 155, 151, 152, 153, 154, 150, 160, + 162, 191, 149, 151, 152, 158, 165, 172, + 173, 178, 179, 188, 176, 190, 132, 181, + 187, 128, 131, 180, 188, 189, 255, 130, + 133, 170, 171, 179, 180, 255, 130, 161, + 170, 128, 129, 162, 165, 166, 167, 168, + 173, 167, 173, 166, 169, 170, 174, 175, + 177, 178, 179, 164, 171, 172, 179, 180, + 181, 182, 183, 161, 173, 180, 144, 146, + 148, 168, 178, 179, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 144, 176, 175, 177, 191, 160, 191, + 128, 130, 170, 175, 153, 154, 153, 154, + 155, 160, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 175, 175, 178, 180, + 189, 158, 159, 176, 177, 130, 134, 139, + 167, 163, 164, 165, 166, 132, 133, 134, + 159, 160, 177, 178, 255, 166, 173, 135, + 145, 146, 147, 131, 179, 188, 128, 130, + 180, 181, 182, 185, 186, 255, 165, 129, + 255, 169, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 131, 140, 141, 188, 176, + 178, 180, 183, 184, 190, 191, 129, 171, + 181, 182, 172, 173, 174, 175, 165, 168, + 172, 173, 163, 170, 172, 184, 190, 158, + 128, 143, 160, 175, 144, 145, 150, 155, + 157, 158, 159, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 129, 128, + 130, 184, 255, 135, 190, 130, 131, 175, + 176, 178, 183, 184, 187, 255, 172, 128, + 130, 167, 180, 179, 130, 128, 129, 179, + 181, 182, 190, 191, 255, 129, 137, 138, + 140, 141, 255, 180, 190, 172, 174, 175, + 177, 178, 181, 182, 183, 159, 160, 162, + 163, 170, 188, 190, 191, 128, 129, 130, + 131, 128, 151, 129, 132, 135, 136, 139, + 141, 162, 163, 166, 172, 176, 180, 181, + 183, 184, 191, 133, 128, 129, 130, 134, + 176, 185, 189, 177, 178, 179, 186, 187, + 190, 191, 255, 129, 132, 255, 175, 190, + 176, 177, 178, 181, 184, 187, 188, 255, + 129, 155, 158, 255, 189, 176, 178, 179, + 186, 187, 190, 191, 255, 129, 255, 172, + 182, 171, 173, 174, 175, 176, 183, 166, + 157, 159, 160, 161, 162, 171, 175, 190, + 176, 182, 184, 191, 169, 177, 180, 146, + 167, 170, 182, 171, 172, 189, 190, 176, + 180, 176, 182, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 166, 173, + 165, 169, 174, 178, 187, 255, 131, 132, + 140, 169, 174, 255, 130, 132, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 163, 165, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 144, 150, 132, 138, 143, 187, 191, 160, + 128, 129, 132, 135, 133, 134, 160, 255, + 192, 255, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 128, 129, 130, + 132, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 160, 255, 128, + 129, 130, 133, 134, 135, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 160, + 255, 168, 255, 128, 129, 130, 134, 135, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 168, 255, 192, 255, 159, 139, + 187, 158, 159, 176, 255, 135, 138, 139, + 187, 188, 255, 168, 255, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 177, 178, 179, 180, + 181, 182, 184, 185, 186, 187, 188, 189, + 191, 176, 190, 192, 255, 135, 147, 160, + 188, 128, 156, 184, 129, 255, 128, 129, + 130, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 158, 159, 135, + 255, 148, 176, 140, 168, 132, 160, 188, + 152, 180, 144, 172, 136, 164, 192, 255, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 128, 156, 160, 255, 136, + 164, 175, 176, 255, 142, 128, 191, 128, + 129, 152, 155, 156, 130, 191, 139, 141, + 128, 140, 142, 143, 144, 167, 168, 174, + 175, 191, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 167, 169, 171, 173, 174, + 175, 176, 177, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 166, 170, 172, 178, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 189, 132, 185, 144, 152, 161, + 164, 255, 188, 129, 131, 190, 255, 133, + 134, 137, 138, 142, 150, 152, 161, 164, + 255, 131, 134, 137, 138, 142, 144, 146, + 175, 178, 180, 182, 255, 134, 138, 142, + 161, 164, 255, 188, 129, 131, 190, 191, + 128, 132, 135, 136, 139, 141, 150, 151, + 162, 163, 130, 190, 191, 151, 128, 130, + 134, 136, 138, 141, 128, 131, 190, 255, + 133, 137, 142, 148, 151, 161, 164, 255, + 128, 132, 134, 136, 138, 141, 149, 150, + 162, 163, 129, 131, 190, 255, 133, 137, + 142, 150, 152, 161, 164, 255, 130, 131, + 138, 150, 143, 148, 152, 159, 178, 179, + 177, 179, 186, 135, 142, 177, 179, 185, + 187, 188, 136, 141, 181, 183, 185, 152, + 153, 190, 191, 177, 191, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 173, 183, 185, 190, 150, + 153, 158, 160, 177, 180, 130, 141, 157, + 132, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 180, 255, 148, 156, + 158, 255, 139, 141, 169, 133, 134, 160, + 171, 176, 187, 151, 155, 160, 162, 191, + 149, 158, 165, 188, 176, 190, 128, 132, + 180, 255, 133, 170, 180, 255, 128, 130, + 161, 173, 166, 179, 164, 183, 173, 144, + 146, 148, 168, 178, 180, 184, 185, 128, + 181, 187, 191, 128, 131, 179, 181, 183, + 140, 141, 144, 176, 175, 177, 191, 160, + 191, 128, 130, 170, 175, 153, 154, 153, + 154, 155, 160, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 175, 175, 178, + 180, 189, 158, 159, 176, 177, 130, 134, + 139, 163, 167, 128, 129, 180, 255, 134, + 159, 178, 255, 166, 173, 135, 147, 128, + 131, 179, 255, 129, 164, 166, 255, 169, + 182, 131, 188, 140, 141, 176, 178, 180, + 183, 184, 190, 191, 129, 171, 175, 181, + 182, 163, 170, 172, 173, 172, 184, 190, + 158, 128, 143, 160, 175, 144, 145, 150, + 155, 157, 158, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 128, 130, + 184, 255, 135, 190, 131, 175, 187, 255, + 128, 130, 167, 180, 179, 128, 130, 179, + 255, 129, 137, 141, 255, 190, 172, 183, + 159, 170, 188, 128, 131, 190, 191, 151, + 128, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 191, 128, 134, + 176, 255, 132, 255, 175, 181, 184, 255, + 129, 155, 158, 255, 129, 255, 171, 183, + 157, 171, 175, 182, 184, 191, 146, 167, + 169, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 145, 190, 143, 146, 178, 157, + 158, 133, 134, 137, 168, 169, 170, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 160, 128, 129, 132, 135, + 133, 134, 160, 255, 192, 255, 128, 131, + 157, 179, 181, 183, 164, 144, 145, 150, + 155, 157, 158, 159, 145, 146, 151, 166, + 169, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 166, 167, 169, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 168, 170, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 131, 187, 189, 132, 185, 190, + 255, 141, 144, 129, 136, 145, 151, 152, + 161, 162, 163, 164, 255, 129, 188, 190, + 130, 131, 191, 255, 141, 151, 129, 132, + 133, 134, 137, 138, 142, 161, 162, 163, + 164, 255, 131, 188, 129, 130, 190, 255, + 145, 181, 129, 130, 131, 134, 135, 136, + 137, 138, 139, 141, 142, 175, 176, 177, + 178, 255, 134, 138, 141, 129, 136, 142, + 161, 162, 163, 164, 255, 129, 188, 130, + 131, 190, 191, 128, 141, 129, 132, 135, + 136, 139, 140, 150, 151, 162, 163, 130, + 190, 191, 128, 141, 151, 129, 130, 134, + 136, 138, 140, 128, 129, 131, 190, 255, + 133, 137, 129, 132, 142, 148, 151, 161, + 164, 255, 129, 188, 190, 191, 130, 131, + 130, 134, 128, 132, 135, 136, 138, 139, + 140, 141, 149, 150, 162, 163, 129, 190, + 130, 131, 191, 255, 133, 137, 141, 151, + 129, 132, 142, 161, 162, 163, 164, 255, + 138, 143, 150, 159, 144, 145, 146, 148, + 152, 158, 178, 179, 177, 179, 180, 186, + 135, 142, 177, 179, 180, 185, 187, 188, + 136, 141, 181, 183, 185, 152, 153, 190, + 191, 191, 177, 190, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 177, 173, 183, 185, 186, 187, + 188, 189, 190, 150, 151, 152, 153, 158, + 160, 177, 180, 130, 132, 141, 157, 133, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 182, 180, 189, 190, 255, + 134, 157, 137, 147, 148, 255, 139, 141, + 169, 133, 134, 178, 160, 162, 163, 166, + 167, 168, 169, 171, 176, 184, 185, 187, + 155, 151, 152, 153, 154, 150, 160, 162, + 191, 149, 151, 152, 158, 165, 172, 173, + 178, 179, 188, 176, 190, 132, 181, 187, + 128, 131, 180, 188, 189, 255, 130, 133, + 170, 171, 179, 180, 255, 130, 161, 170, + 128, 129, 162, 165, 166, 167, 168, 173, + 167, 173, 166, 169, 170, 174, 175, 177, + 178, 179, 164, 171, 172, 179, 180, 181, + 182, 183, 161, 173, 180, 144, 146, 148, + 168, 178, 179, 184, 185, 128, 181, 187, + 191, 128, 131, 179, 181, 183, 140, 141, + 144, 176, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 167, + 163, 164, 165, 166, 132, 133, 134, 159, + 160, 177, 178, 255, 166, 173, 135, 145, + 146, 147, 131, 179, 188, 128, 130, 180, + 181, 182, 185, 186, 255, 165, 129, 255, + 169, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 131, 140, 141, 188, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 181, + 182, 172, 173, 174, 175, 165, 168, 172, + 173, 163, 170, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 129, 128, 130, + 184, 255, 135, 190, 130, 131, 175, 176, + 178, 183, 184, 187, 255, 172, 128, 130, + 167, 180, 179, 130, 128, 129, 179, 181, + 182, 190, 191, 255, 129, 137, 138, 140, + 141, 255, 180, 190, 172, 174, 175, 177, + 178, 181, 182, 183, 159, 160, 162, 163, + 170, 188, 190, 191, 128, 129, 130, 131, + 128, 151, 129, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 183, + 184, 191, 133, 128, 129, 130, 134, 176, + 185, 189, 177, 178, 179, 186, 187, 190, + 191, 255, 129, 132, 255, 175, 190, 176, + 177, 178, 181, 184, 187, 188, 255, 129, + 155, 158, 255, 189, 176, 178, 179, 186, + 187, 190, 191, 255, 129, 255, 172, 182, + 171, 173, 174, 175, 176, 183, 166, 157, + 159, 160, 161, 162, 171, 175, 190, 176, + 182, 184, 191, 169, 177, 180, 146, 167, + 170, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 143, 146, 178, 157, 158, 133, + 134, 137, 168, 169, 170, 166, 173, 165, + 169, 174, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 143, 187, 191, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 139, 168, 128, 159, 160, 175, 176, + 191, 157, 128, 191, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 141, 144, 129, + 136, 145, 151, 152, 161, 162, 163, 164, + 255, 129, 188, 190, 130, 131, 191, 255, + 141, 151, 129, 132, 133, 134, 137, 138, + 142, 161, 162, 163, 164, 255, 131, 188, + 129, 130, 190, 255, 145, 181, 129, 130, + 131, 134, 135, 136, 137, 138, 139, 141, + 142, 175, 176, 177, 178, 255, 134, 138, + 141, 129, 136, 142, 161, 162, 163, 164, + 255, 129, 188, 130, 131, 190, 191, 128, + 141, 129, 132, 135, 136, 139, 140, 150, + 151, 162, 163, 130, 190, 191, 128, 141, + 151, 129, 130, 134, 136, 138, 140, 128, + 129, 131, 190, 255, 133, 137, 129, 132, + 142, 148, 151, 161, 164, 255, 129, 188, + 190, 191, 130, 131, 130, 134, 128, 132, + 135, 136, 138, 139, 140, 141, 149, 150, + 162, 163, 129, 190, 130, 131, 191, 255, + 133, 137, 141, 151, 129, 132, 142, 161, + 162, 163, 164, 255, 138, 143, 150, 159, + 144, 145, 146, 148, 152, 158, 178, 179, + 177, 179, 180, 186, 135, 142, 177, 179, + 180, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 191, 177, 190, + 128, 132, 134, 135, 141, 151, 153, 188, + 134, 128, 129, 130, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 177, 173, + 183, 185, 186, 187, 188, 189, 190, 150, + 151, 152, 153, 158, 160, 177, 180, 130, + 132, 141, 157, 133, 134, 157, 159, 146, + 148, 178, 180, 146, 147, 178, 179, 182, + 180, 189, 190, 255, 134, 157, 137, 147, + 148, 255, 139, 141, 169, 133, 134, 178, + 160, 162, 163, 166, 167, 168, 169, 171, + 176, 184, 185, 187, 155, 151, 152, 153, + 154, 150, 160, 162, 191, 149, 151, 152, + 158, 165, 172, 173, 178, 179, 188, 176, + 190, 132, 181, 187, 128, 131, 180, 188, + 189, 255, 130, 133, 170, 171, 179, 180, + 255, 130, 161, 170, 128, 129, 162, 165, + 166, 167, 168, 173, 167, 173, 166, 169, + 170, 174, 175, 177, 178, 179, 164, 171, + 172, 179, 180, 181, 182, 183, 161, 173, + 180, 144, 146, 148, 168, 178, 179, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 144, 176, 175, 177, + 191, 160, 191, 128, 130, 170, 175, 153, + 154, 153, 154, 155, 160, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 175, + 175, 178, 180, 189, 158, 159, 176, 177, + 130, 134, 139, 167, 163, 164, 165, 166, + 132, 133, 134, 159, 160, 177, 178, 255, + 166, 173, 135, 145, 146, 147, 131, 179, + 188, 128, 130, 180, 181, 182, 185, 186, + 255, 165, 129, 255, 169, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 131, 140, + 141, 188, 176, 178, 180, 183, 184, 190, + 191, 129, 171, 181, 182, 172, 173, 174, + 175, 165, 168, 172, 173, 163, 170, 172, + 184, 190, 158, 128, 143, 160, 175, 144, + 145, 150, 155, 157, 158, 159, 135, 139, + 141, 168, 171, 189, 160, 182, 186, 191, + 129, 131, 133, 134, 140, 143, 184, 186, + 165, 166, 128, 129, 130, 132, 133, 134, + 135, 136, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 176, + 178, 129, 128, 130, 184, 255, 135, 190, + 130, 131, 175, 176, 178, 183, 184, 187, + 255, 172, 128, 130, 167, 180, 179, 130, + 128, 129, 179, 181, 182, 190, 191, 255, + 129, 137, 138, 140, 141, 255, 180, 190, + 172, 174, 175, 177, 178, 181, 182, 183, + 159, 160, 162, 163, 170, 188, 190, 191, + 128, 129, 130, 131, 128, 151, 129, 132, + 135, 136, 139, 141, 162, 163, 166, 172, + 176, 180, 181, 183, 184, 191, 133, 128, + 129, 130, 134, 176, 185, 189, 177, 178, + 179, 186, 187, 190, 191, 255, 129, 132, + 255, 175, 190, 176, 177, 178, 181, 184, + 187, 188, 255, 129, 155, 158, 255, 189, + 176, 178, 179, 186, 187, 190, 191, 255, + 129, 255, 172, 182, 171, 173, 174, 175, + 176, 183, 166, 157, 159, 160, 161, 162, + 171, 175, 190, 176, 182, 184, 191, 169, + 177, 180, 146, 167, 170, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 166, 173, 165, 169, 174, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 143, + 187, 191, 160, 128, 129, 132, 135, 133, + 134, 160, 255, 192, 255, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 191, 128, 156, 161, 190, 192, + 255, 136, 164, 175, 176, 255, 135, 138, + 139, 187, 188, 191, 192, 255, 0, 127, + 192, 255, 187, 191, 128, 190, 191, 128, + 190, 188, 128, 175, 176, 189, 190, 191, + 145, 155, 157, 159, 128, 191, 130, 135, + 128, 191, 189, 128, 191, 128, 129, 130, + 131, 132, 191, 178, 128, 191, 128, 159, + 160, 163, 164, 191, 133, 128, 191, 128, + 178, 179, 186, 187, 191, 135, 142, 143, + 145, 146, 149, 150, 153, 154, 155, 164, + 128, 191, 128, 165, 166, 191, 128, 255, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 130, 131, 135, + 191, 129, 134, 136, 190, 128, 159, 160, + 191, 128, 175, 176, 255, 10, 13, 127, + 194, 216, 219, 220, 224, 225, 226, 234, + 235, 236, 237, 239, 240, 243, 0, 31, + 128, 191, 192, 223, 227, 238, 241, 247, + 248, 255, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 194, 216, + 219, 220, 224, 225, 226, 234, 235, 236, + 237, 239, 240, 243, 32, 126, 192, 223, + 227, 238, 241, 247, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 235, 236, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 237, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 235, 236, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 237, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, +} + +var _graphclust_single_lengths []byte = []byte{ + 0, 1, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 2, 3, 2, 2, 2, 3, + 2, 2, 3, 3, 1, 2, 4, 2, + 2, 4, 4, 2, 0, 2, 0, 3, + 1, 0, 1, 21, 1, 0, 4, 0, + 0, 0, 1, 2, 0, 1, 1, 1, + 4, 0, 3, 1, 3, 2, 0, 3, + 0, 5, 2, 0, 0, 1, 0, 2, + 0, 0, 15, 0, 0, 0, 4, 0, + 0, 0, 3, 1, 0, 4, 1, 4, + 4, 3, 1, 0, 7, 5, 1, 1, + 0, 1, 0, 23, 1, 0, 1, 1, + 1, 1, 0, 2, 1, 3, 2, 0, + 1, 3, 1, 2, 0, 1, 0, 2, + 1, 2, 3, 4, 0, 0, 0, 1, + 0, 6, 2, 0, 0, 0, 0, 1, + 3, 0, 0, 0, 1, 0, 1, 4, + 0, 0, 0, 1, 1, 1, 4, 0, + 0, 0, 6, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 6, + 0, 1, 0, 1, 0, 2, 0, 0, + 15, 0, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 0, 2, 1, 1, 0, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 0, 0, 0, 0, 1, + 0, 0, 1, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 4, 0, 0, 0, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 3, 0, 0, 0, 0, + 1, 1, 0, 1, 0, 1, 0, 0, + 0, 29, 0, 0, 0, 3, 2, 3, + 2, 2, 2, 3, 2, 2, 3, 3, + 1, 2, 4, 2, 2, 4, 4, 2, + 0, 2, 0, 3, 1, 0, 1, 21, + 1, 0, 4, 0, 0, 0, 1, 2, + 0, 1, 1, 1, 4, 0, 3, 1, + 3, 2, 0, 3, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 4, 0, 0, 0, 3, 1, + 0, 4, 1, 4, 4, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 1, 0, 1, 1, 1, 1, 0, 2, + 1, 3, 2, 0, 1, 3, 1, 2, + 0, 1, 0, 2, 1, 2, 3, 4, + 0, 0, 0, 1, 0, 6, 2, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 1, 0, 1, 4, 0, 0, 0, 1, + 1, 1, 4, 0, 0, 0, 6, 0, + 0, 0, 1, 1, 2, 1, 1, 5, + 0, 24, 0, 24, 0, 0, 23, 0, + 0, 1, 0, 2, 0, 0, 0, 28, + 0, 3, 23, 2, 0, 2, 2, 3, + 2, 2, 2, 0, 54, 54, 27, 1, + 0, 5, 2, 0, 1, 1, 0, 0, + 14, 0, 3, 2, 2, 3, 2, 2, + 2, 54, 54, 27, 1, 0, 2, 0, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 7, 1, 0, 1, 0, + 2, 3, 2, 1, 0, 1, 1, 3, + 0, 1, 3, 0, 1, 1, 2, 1, + 1, 5, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 24, 0, 24, 0, + 0, 23, 0, 0, 1, 0, 2, 0, + 0, 0, 28, 0, 3, 23, 2, 0, + 2, 2, 3, 2, 2, 2, 0, 54, + 54, 27, 1, 1, 5, 2, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 2, 1, 1, 0, 3, 1, + 0, 6, 5, 1, 1, 0, 1, 0, + 23, 0, 0, 0, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 0, 0, 0, 0, 1, 0, 6, 0, + 0, 0, 0, 0, 1, 3, 0, 0, + 0, 1, 4, 0, 0, 0, 6, 1, + 7, 3, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 29, + 0, 0, 0, 3, 2, 3, 2, 2, + 2, 3, 2, 2, 3, 3, 1, 2, + 4, 2, 2, 4, 4, 2, 0, 2, + 0, 3, 1, 0, 1, 21, 1, 0, + 4, 0, 0, 0, 1, 2, 0, 1, + 1, 1, 4, 0, 3, 1, 3, 2, + 0, 3, 0, 5, 2, 0, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 4, 0, 0, 0, 3, 1, 0, 4, + 1, 4, 4, 3, 1, 0, 7, 5, + 1, 1, 0, 1, 0, 23, 1, 0, + 1, 1, 1, 1, 0, 2, 1, 3, + 2, 0, 1, 3, 1, 2, 0, 1, + 0, 2, 1, 2, 3, 4, 0, 0, + 0, 1, 0, 6, 2, 0, 0, 0, + 0, 1, 3, 0, 0, 0, 1, 0, + 1, 4, 0, 0, 0, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 1, 1, 1, 4, 0, 0, 0, + 6, 2, 3, 2, 2, 2, 3, 2, + 2, 3, 3, 1, 2, 4, 2, 2, + 4, 4, 2, 0, 2, 0, 3, 1, + 0, 1, 21, 1, 0, 4, 0, 0, + 0, 1, 2, 0, 1, 1, 1, 4, + 0, 3, 1, 3, 2, 0, 3, 0, + 5, 2, 0, 0, 1, 0, 2, 0, + 0, 15, 0, 0, 0, 4, 0, 0, + 0, 3, 1, 0, 4, 1, 4, 4, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 1, 0, 1, 1, 1, + 1, 0, 2, 1, 3, 2, 0, 1, + 3, 1, 2, 0, 1, 0, 2, 1, + 2, 3, 4, 0, 0, 0, 1, 0, + 6, 2, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 1, 0, 1, 4, 0, + 0, 0, 1, 0, 0, 14, 0, 3, + 2, 2, 3, 2, 2, 2, 54, 54, + 29, 1, 0, 0, 0, 0, 2, 1, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 7, 1, 0, 1, + 0, 2, 3, 2, 1, 0, 1, 1, + 3, 0, 1, 5, 0, 0, 17, 20, + 20, 20, 14, 20, 20, 20, 23, 21, + 21, 21, 20, 23, 20, 20, 20, 21, + 21, 21, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, +} + +var _graphclust_range_lengths []byte = []byte{ + 0, 0, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 5, 2, 6, 2, 8, 4, + 2, 5, 0, 3, 2, 4, 1, 6, + 2, 4, 4, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 4, 4, 1, 1, + 2, 2, 2, 2, 1, 1, 6, 2, + 5, 1, 3, 3, 4, 4, 4, 4, + 2, 0, 0, 1, 1, 0, 1, 0, + 1, 1, 0, 2, 1, 1, 2, 4, + 1, 2, 4, 1, 5, 0, 3, 2, + 1, 0, 0, 2, 0, 0, 0, 0, + 1, 4, 1, 0, 2, 1, 4, 2, + 0, 4, 3, 4, 2, 2, 6, 2, + 2, 4, 1, 4, 2, 4, 1, 3, + 3, 2, 2, 0, 1, 1, 1, 0, + 1, 0, 3, 3, 1, 2, 2, 2, + 0, 5, 1, 1, 0, 1, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 1, 2, 2, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 0, + 1, 0, 1, 0, 1, 0, 1, 1, + 0, 2, 1, 1, 1, 2, 2, 1, + 1, 2, 2, 1, 1, 3, 2, 2, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 2, 2, 0, + 2, 2, 1, 1, 2, 6, 1, 1, + 1, 1, 2, 2, 1, 1, 1, 2, + 2, 0, 1, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 1, 1, 1, + 1, 2, 1, 1, 4, 1, 1, 1, + 1, 1, 4, 1, 2, 2, 5, 2, + 6, 2, 8, 4, 2, 5, 0, 3, + 2, 4, 1, 6, 2, 4, 4, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 4, 4, 1, 1, 2, 2, 2, 2, + 1, 1, 6, 2, 5, 1, 3, 3, + 4, 4, 4, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 2, 4, 1, 2, 4, 1, + 5, 0, 3, 2, 1, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 4, 2, 0, 4, 3, 4, + 2, 2, 6, 2, 2, 4, 1, 4, + 2, 4, 1, 3, 3, 2, 2, 0, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 2, 3, 1, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 1, 0, 1, + 1, 0, 1, 0, 1, 3, 1, 2, + 2, 1, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 1, 1, 2, 2, + 2, 1, 3, 2, 1, 1, 3, 1, + 3, 3, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 2, 2, 4, 1, 1, + 2, 1, 1, 1, 3, 1, 2, 1, + 2, 1, 2, 0, 0, 1, 1, 5, + 9, 2, 1, 3, 5, 3, 1, 6, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 1, 0, 1, + 1, 0, 1, 1, 0, 1, 0, 1, + 3, 1, 2, 2, 1, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 1, + 1, 2, 2, 1, 1, 5, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 1, 2, 2, 1, 1, 2, + 2, 1, 1, 3, 2, 2, 0, 0, + 2, 0, 0, 0, 0, 1, 4, 1, + 0, 2, 1, 2, 2, 0, 2, 2, + 1, 1, 2, 6, 1, 1, 1, 1, + 2, 2, 1, 1, 1, 2, 2, 0, + 1, 1, 1, 1, 0, 1, 0, 3, + 3, 1, 2, 2, 2, 0, 5, 1, + 1, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 1, + 4, 1, 2, 2, 5, 2, 6, 2, + 8, 4, 2, 5, 0, 3, 2, 4, + 1, 6, 2, 4, 4, 1, 1, 2, + 1, 2, 1, 4, 0, 0, 4, 4, + 1, 1, 2, 2, 2, 2, 1, 1, + 6, 2, 5, 1, 3, 3, 4, 4, + 4, 4, 2, 0, 0, 1, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 2, 4, 1, 2, 4, 1, 5, 0, + 3, 2, 1, 0, 0, 2, 0, 0, + 0, 0, 1, 4, 1, 0, 2, 1, + 4, 2, 0, 4, 3, 4, 2, 2, + 6, 2, 2, 4, 1, 4, 2, 4, + 1, 3, 3, 2, 2, 0, 1, 1, + 1, 0, 1, 0, 3, 3, 1, 2, + 2, 2, 0, 5, 1, 1, 0, 1, + 0, 1, 1, 1, 0, 0, 0, 3, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 1, 0, + 0, 5, 2, 6, 2, 8, 4, 2, + 5, 0, 3, 2, 4, 1, 6, 2, + 4, 4, 1, 1, 2, 1, 2, 1, + 4, 0, 0, 4, 4, 1, 1, 2, + 2, 2, 2, 1, 1, 6, 2, 5, + 1, 3, 3, 4, 4, 4, 4, 2, + 0, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 2, 1, 1, 2, 4, 1, + 2, 4, 1, 5, 0, 3, 2, 1, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 4, 2, 0, + 4, 3, 4, 2, 2, 6, 2, 2, + 4, 1, 4, 2, 4, 1, 3, 3, + 2, 2, 0, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 0, 1, 1, + 1, 0, 1, 3, 1, 3, 3, 1, + 0, 0, 0, 0, 0, 1, 1, 1, + 3, 2, 4, 1, 0, 1, 1, 1, + 3, 1, 1, 1, 3, 1, 3, 1, + 3, 1, 2, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 5, 9, 2, 1, 3, 5, 3, 1, + 6, 1, 1, 2, 2, 2, 6, 0, + 0, 0, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 6, 8, 11, 15, + 17, 20, 25, 28, 30, 32, 34, 63, + 68, 70, 73, 76, 80, 84, 90, 97, + 102, 106, 112, 115, 120, 123, 129, 135, + 138, 144, 146, 152, 155, 157, 161, 163, + 169, 171, 176, 178, 200, 203, 207, 212, + 214, 217, 220, 222, 225, 227, 230, 233, + 235, 241, 243, 246, 249, 252, 254, 256, + 262, 265, 271, 274, 281, 283, 285, 287, + 289, 291, 294, 296, 298, 314, 317, 319, + 321, 326, 329, 332, 334, 336, 339, 342, + 344, 348, 353, 357, 360, 364, 366, 369, + 377, 383, 385, 387, 389, 395, 397, 421, + 424, 426, 429, 432, 434, 437, 440, 443, + 445, 449, 457, 459, 461, 463, 465, 468, + 471, 473, 475, 477, 480, 483, 488, 490, + 492, 494, 496, 498, 500, 507, 511, 515, + 517, 520, 523, 527, 531, 537, 539, 541, + 545, 547, 549, 551, 553, 556, 560, 562, + 565, 570, 573, 575, 577, 579, 610, 615, + 617, 620, 626, 634, 640, 649, 654, 665, + 673, 678, 686, 690, 697, 701, 708, 714, + 723, 728, 737, 746, 750, 752, 757, 759, + 765, 768, 773, 775, 797, 803, 808, 814, + 816, 819, 822, 826, 831, 833, 836, 844, + 848, 858, 860, 867, 872, 880, 887, 892, + 900, 903, 909, 912, 914, 916, 918, 920, + 923, 925, 927, 943, 946, 948, 950, 957, + 962, 964, 967, 975, 978, 984, 989, 994, + 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, + 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, + 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, + 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, + 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, + 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, + 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, + 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, + 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, + 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, + 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, + 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, + 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, + 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, + 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, + 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, + 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, + 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, + 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, + 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, + 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, + 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, + 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, + 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, + 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, + 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, + 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, + 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, + 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, + 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, + 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, + 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, + 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, + 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, + 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, + 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, + 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, + 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, + 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, + 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, + 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, + 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, + 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, + 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, + 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, + 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, + 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, + 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, + 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, + 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, + 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, + 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, + 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, + 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, + 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, + 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, + 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, + 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, + 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, + 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, + 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, + 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, + 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, + 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, + 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, + 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, + 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, + 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, + 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, + 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, + 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, + 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, + 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, + 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, + 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, + 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, + 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, + 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, + 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, + 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, + 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, + 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, + 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, + 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, + 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, + 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, + 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, + 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, + 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, + 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, + 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, + 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, + 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, + 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, + 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, + 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, + 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, + 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, + 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, + 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, + 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, + 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, + 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, + 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, + 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, + 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, + 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, + 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, + 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, + 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, + 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, + 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, + 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, + 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, + 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, + 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, + 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, + 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, + 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, + 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, + 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, + 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, + 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, + 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, + 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, + 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, + 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, + 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, + 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, + 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, + 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, + 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, + 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, + 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, + 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, + 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, + 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, + 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, + 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, + 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, + 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, + 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, + 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, + 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, + 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, + 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, + 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, + 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, + 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, + 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, + 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, + 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, + 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, + 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, + 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, + 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, + 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, + 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, + 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, + 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, + 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, + 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, + 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, + 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, + 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, + 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, + 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, + 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, + 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, + 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, + 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, + 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, + 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, + 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, + 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, + 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, + 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, + 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, + 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, + 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, + 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, + 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, + 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, + 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, + 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, + 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, + 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, + 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, + 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, + 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, + 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, + 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, + 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, + 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, + 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, + 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, + 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, + 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, + 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, + 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, + 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, + 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, + 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, + 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, + 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, + 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, + 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, + 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, + 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, + 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, + 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, + 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, + 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, + 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, + 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, + 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, + 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, + 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, + 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, + 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, + 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, + 9718, 9739, 9760, 9781, +} + +var _graphclust_indicies []int16 = []int16{ + 0, 1, 3, 2, 2, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 2, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 2, 2, 3, 3, 2, + 3, 2, 4, 5, 6, 7, 8, 10, + 11, 12, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 9, 13, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 2, 2, 3, 2, 2, 2, 3, + 3, 3, 3, 2, 2, 2, 2, 2, + 2, 3, 2, 2, 2, 2, 2, 2, + 3, 2, 2, 2, 2, 3, 3, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 3, 2, + 3, 3, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 2, 3, + 3, 2, 2, 2, 2, 2, 2, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 2, + 3, 2, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 3, 3, 2, 3, 2, 2, 2, + 3, 3, 2, 3, 3, 2, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 2, 2, 2, + 3, 3, 3, 2, 3, 2, 3, 2, + 3, 3, 3, 3, 3, 2, 3, 3, + 2, 53, 54, 55, 56, 57, 2, 3, + 58, 2, 53, 54, 59, 55, 56, 57, + 2, 3, 2, 3, 2, 3, 2, 3, + 2, 3, 2, 60, 61, 2, 3, 2, + 3, 2, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, + 76, 2, 3, 3, 2, 3, 2, 3, + 2, 3, 3, 3, 3, 2, 3, 3, + 2, 2, 2, 3, 3, 2, 3, 2, + 3, 3, 2, 2, 2, 3, 3, 2, + 3, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 3, 2, 3, 3, 2, + 77, 78, 63, 2, 3, 2, 3, 3, + 2, 79, 80, 81, 82, 83, 84, 85, + 2, 86, 87, 88, 89, 90, 2, 3, + 2, 3, 2, 3, 2, 3, 3, 3, + 3, 3, 2, 3, 2, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 104, 108, + 109, 110, 111, 112, 2, 3, 3, 2, + 2, 3, 2, 2, 3, 3, 3, 2, + 3, 2, 3, 3, 2, 2, 2, 3, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 3, 2, 2, + 3, 3, 3, 2, 2, 2, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 2, + 3, 3, 2, 113, 114, 115, 116, 2, + 3, 2, 3, 2, 3, 2, 3, 2, + 117, 2, 3, 2, 118, 119, 120, 121, + 122, 123, 2, 3, 3, 3, 2, 2, + 2, 2, 3, 3, 2, 3, 3, 2, + 2, 2, 3, 3, 3, 3, 2, 124, + 125, 126, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 127, 128, 129, + 2, 130, 2, 2, 130, 2, 130, 130, + 2, 130, 130, 2, 130, 130, 130, 2, + 130, 2, 130, 130, 2, 130, 130, 130, + 130, 2, 130, 130, 2, 2, 130, 130, + 2, 130, 2, 131, 132, 133, 134, 135, + 136, 137, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 22, 151, + 152, 153, 154, 155, 156, 157, 158, 159, + 138, 2, 130, 130, 130, 130, 2, 130, + 2, 130, 130, 2, 3, 3, 2, 2, + 3, 130, 130, 2, 130, 130, 2, 130, + 2, 3, 130, 130, 130, 3, 3, 2, + 130, 130, 130, 2, 2, 2, 130, 2, + 3, 3, 130, 130, 3, 2, 130, 130, + 130, 2, 130, 2, 130, 2, 130, 2, + 3, 2, 2, 130, 130, 2, 130, 2, + 3, 130, 130, 3, 130, 2, 3, 130, + 130, 3, 3, 130, 130, 2, 130, 130, + 3, 2, 130, 130, 130, 3, 3, 3, + 2, 130, 3, 130, 2, 2, 2, 3, + 2, 2, 2, 130, 130, 130, 3, 130, + 3, 2, 130, 130, 3, 3, 3, 130, + 130, 130, 2, 130, 130, 3, 3, 2, + 2, 2, 130, 130, 130, 2, 130, 2, + 3, 130, 130, 130, 130, 3, 130, 3, + 3, 2, 130, 3, 130, 2, 130, 2, + 130, 3, 130, 130, 2, 130, 2, 130, + 130, 130, 130, 3, 2, 3, 130, 2, + 130, 130, 130, 130, 2, 130, 2, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 2, 3, 130, 130, + 3, 130, 2, 3, 130, 130, 130, 2, + 130, 3, 130, 130, 130, 2, 130, 2, + 130, 130, 2, 130, 130, 2, 3, 130, + 3, 2, 130, 130, 130, 2, 3, 130, + 2, 130, 130, 2, 130, 130, 3, 130, + 3, 3, 130, 2, 130, 130, 3, 2, + 130, 130, 130, 130, 3, 130, 130, 3, + 130, 2, 130, 2, 3, 3, 3, 130, + 130, 3, 2, 130, 2, 130, 2, 3, + 3, 3, 3, 130, 130, 3, 130, 2, + 3, 130, 130, 3, 130, 3, 2, 3, + 130, 3, 130, 2, 3, 130, 130, 130, + 130, 3, 130, 2, 130, 130, 2, 181, + 182, 183, 184, 185, 2, 130, 58, 2, + 130, 2, 130, 2, 130, 2, 130, 2, + 186, 187, 2, 130, 2, 130, 2, 188, + 189, 190, 191, 66, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 2, 130, + 130, 2, 130, 2, 130, 2, 130, 130, + 130, 3, 3, 130, 2, 130, 2, 130, + 2, 3, 130, 2, 130, 3, 2, 3, + 130, 130, 130, 3, 130, 3, 2, 130, + 2, 3, 130, 3, 130, 3, 130, 2, + 130, 130, 3, 130, 2, 130, 130, 130, + 130, 2, 130, 3, 3, 130, 130, 3, + 2, 130, 130, 3, 130, 3, 2, 202, + 203, 189, 2, 130, 2, 130, 130, 2, + 204, 205, 206, 207, 208, 209, 210, 2, + 211, 212, 213, 214, 215, 2, 130, 2, + 130, 2, 130, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 2, 130, 3, 130, 2, + 2, 130, 3, 2, 3, 3, 2, 130, + 3, 130, 130, 2, 130, 2, 3, 130, + 3, 130, 3, 2, 2, 130, 2, 3, + 130, 130, 3, 130, 3, 130, 2, 130, + 3, 130, 2, 130, 130, 3, 130, 3, + 2, 130, 130, 3, 3, 3, 3, 130, + 130, 2, 3, 130, 2, 3, 3, 130, + 2, 130, 3, 130, 3, 130, 3, 130, + 2, 3, 2, 130, 130, 3, 3, 130, + 3, 130, 2, 2, 2, 130, 130, 3, + 130, 3, 130, 2, 2, 130, 3, 3, + 130, 3, 130, 2, 3, 130, 3, 130, + 2, 3, 3, 130, 130, 2, 3, 3, + 3, 130, 130, 2, 239, 240, 115, 241, + 2, 130, 2, 130, 2, 130, 2, 242, + 2, 130, 2, 243, 244, 245, 246, 247, + 248, 2, 3, 3, 130, 130, 130, 2, + 2, 2, 2, 130, 130, 2, 130, 130, + 2, 2, 2, 130, 130, 130, 130, 2, + 249, 250, 251, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 130, 2, 252, 2, + 3, 2, 253, 2, 254, 255, 256, 258, + 257, 2, 130, 2, 2, 130, 130, 3, + 2, 3, 2, 259, 2, 260, 261, 262, + 264, 263, 2, 3, 2, 2, 3, 3, + 79, 80, 81, 82, 83, 84, 2, 3, + 1, 265, 265, 3, 1, 265, 266, 3, + 1, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 267, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 267, 267, 268, 268, 267, 268, + 267, 269, 270, 271, 272, 273, 275, 276, + 277, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, + 294, 295, 296, 274, 278, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 267, 267, 268, 267, 267, 267, 268, 268, + 268, 268, 267, 267, 267, 267, 267, 267, + 268, 267, 267, 267, 267, 267, 267, 268, + 267, 267, 267, 267, 268, 268, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 268, 267, 268, + 268, 267, 267, 267, 267, 267, 267, 268, + 268, 268, 268, 268, 268, 267, 268, 268, + 267, 267, 267, 267, 267, 267, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 267, 268, + 267, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 315, 316, 317, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 268, 268, 267, 268, 267, 267, 267, 268, + 268, 267, 268, 268, 267, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 267, 267, 267, 268, + 268, 268, 267, 268, 267, 268, 267, 268, + 268, 268, 268, 268, 267, 268, 268, 267, + 318, 319, 320, 321, 322, 267, 268, 323, + 267, 318, 319, 324, 320, 321, 322, 267, + 268, 267, 268, 267, 268, 267, 268, 267, + 268, 267, 325, 326, 267, 268, 267, 268, + 267, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 267, 268, 268, 267, 268, 267, 268, 267, + 268, 268, 268, 268, 267, 268, 268, 267, + 267, 267, 268, 268, 267, 268, 267, 268, + 268, 267, 267, 267, 268, 268, 267, 268, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 268, 267, 268, 268, 267, 342, + 343, 328, 267, 268, 267, 268, 268, 267, + 344, 345, 346, 347, 348, 349, 350, 267, + 351, 352, 353, 354, 355, 267, 268, 267, + 268, 267, 268, 267, 268, 268, 268, 268, + 268, 267, 268, 267, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 366, 367, + 368, 369, 370, 371, 372, 369, 373, 374, + 375, 376, 377, 267, 268, 268, 267, 267, + 268, 267, 267, 268, 268, 268, 267, 268, + 267, 268, 268, 267, 267, 267, 268, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 268, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 268, 267, 267, 268, + 268, 268, 267, 267, 267, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 378, 379, 380, 381, 267, 268, + 267, 268, 267, 268, 267, 268, 267, 382, + 267, 268, 267, 383, 384, 385, 386, 387, + 388, 267, 268, 268, 268, 267, 267, 267, + 267, 268, 268, 267, 268, 268, 267, 267, + 267, 268, 268, 268, 268, 267, 389, 390, + 391, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 392, 393, 394, 267, + 395, 267, 395, 267, 267, 395, 395, 267, + 395, 395, 267, 395, 395, 395, 267, 395, + 267, 395, 395, 267, 395, 395, 395, 395, + 267, 395, 395, 267, 267, 395, 395, 267, + 395, 267, 396, 397, 398, 399, 400, 401, + 402, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 287, 416, 417, + 418, 419, 420, 421, 422, 423, 424, 403, + 267, 395, 395, 395, 395, 267, 395, 267, + 395, 395, 267, 268, 268, 267, 267, 268, + 395, 395, 267, 395, 395, 267, 395, 267, + 268, 395, 395, 395, 268, 268, 267, 395, + 395, 395, 267, 267, 267, 395, 267, 268, + 268, 395, 395, 268, 267, 395, 395, 395, + 267, 395, 267, 395, 267, 395, 267, 268, + 267, 267, 395, 395, 267, 395, 267, 268, + 395, 395, 268, 395, 267, 268, 395, 395, + 268, 268, 395, 395, 267, 395, 395, 268, + 267, 395, 395, 395, 268, 268, 268, 267, + 395, 268, 395, 267, 267, 267, 268, 267, + 267, 267, 395, 395, 395, 268, 395, 268, + 267, 395, 395, 268, 268, 268, 395, 395, + 395, 267, 395, 395, 268, 268, 267, 267, + 267, 395, 395, 395, 267, 395, 267, 268, + 395, 395, 395, 395, 268, 395, 268, 268, + 267, 395, 268, 395, 267, 395, 267, 395, + 268, 395, 395, 267, 395, 267, 395, 395, + 395, 395, 268, 267, 268, 395, 267, 395, + 395, 395, 395, 267, 395, 267, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 445, 267, 268, 395, 395, 268, + 395, 267, 268, 395, 395, 395, 267, 395, + 268, 395, 395, 395, 267, 395, 267, 395, + 395, 267, 395, 395, 267, 268, 395, 268, + 267, 395, 395, 395, 267, 268, 395, 267, + 395, 395, 267, 395, 395, 268, 395, 268, + 268, 395, 267, 395, 395, 268, 267, 395, + 395, 395, 395, 268, 395, 395, 268, 395, + 267, 395, 267, 268, 268, 268, 395, 395, + 268, 267, 395, 267, 395, 267, 268, 268, + 268, 268, 395, 395, 268, 395, 267, 268, + 395, 395, 268, 395, 268, 267, 268, 395, + 268, 395, 267, 268, 395, 395, 395, 395, + 268, 395, 267, 395, 395, 267, 446, 447, + 448, 449, 450, 267, 395, 323, 267, 395, + 267, 395, 267, 395, 267, 395, 267, 451, + 452, 267, 395, 267, 395, 267, 453, 454, + 455, 456, 331, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 267, 395, 395, + 267, 395, 267, 395, 267, 395, 395, 395, + 268, 268, 395, 267, 395, 267, 395, 267, + 268, 395, 267, 395, 268, 267, 268, 395, + 395, 395, 268, 395, 268, 267, 395, 267, + 268, 395, 268, 395, 268, 395, 267, 395, + 395, 268, 395, 267, 395, 395, 395, 395, + 267, 395, 268, 268, 395, 395, 268, 267, + 395, 395, 268, 395, 268, 267, 467, 468, + 454, 267, 395, 267, 395, 395, 267, 469, + 470, 471, 472, 473, 474, 475, 267, 476, + 477, 478, 479, 480, 267, 395, 267, 395, + 267, 395, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 481, 482, 483, 484, 485, + 486, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 267, 395, 268, 395, 267, 267, + 395, 268, 267, 268, 268, 267, 395, 268, + 395, 395, 267, 395, 267, 268, 395, 268, + 395, 268, 267, 267, 395, 267, 268, 395, + 395, 268, 395, 268, 395, 267, 395, 268, + 395, 267, 395, 395, 268, 395, 268, 267, + 395, 395, 268, 268, 268, 268, 395, 395, + 267, 268, 395, 267, 268, 268, 395, 267, + 395, 268, 395, 268, 395, 268, 395, 267, + 268, 267, 395, 395, 268, 268, 395, 268, + 395, 267, 267, 267, 395, 395, 268, 395, + 268, 395, 267, 267, 395, 268, 268, 395, + 268, 395, 267, 268, 395, 268, 395, 267, + 268, 268, 395, 395, 267, 268, 268, 268, + 395, 395, 267, 504, 505, 380, 506, 267, + 395, 267, 395, 267, 395, 267, 507, 267, + 395, 267, 508, 509, 510, 511, 512, 513, + 267, 268, 268, 395, 395, 395, 267, 267, + 267, 267, 395, 395, 267, 395, 395, 267, + 267, 267, 395, 395, 395, 395, 267, 514, + 515, 516, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 395, 267, 517, 267, 268, + 267, 518, 267, 519, 520, 521, 523, 522, + 267, 395, 267, 267, 395, 395, 268, 267, + 268, 267, 524, 267, 525, 526, 527, 529, + 528, 267, 268, 267, 267, 268, 268, 344, + 345, 346, 347, 348, 349, 267, 268, 267, + 268, 268, 267, 266, 268, 268, 267, 266, + 268, 267, 266, 268, 267, 531, 532, 530, + 267, 266, 268, 267, 266, 268, 267, 533, + 534, 535, 536, 537, 530, 267, 538, 267, + 297, 298, 299, 533, 534, 539, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311, 312, 313, 314, 315, 316, 317, + 267, 540, 538, 297, 298, 299, 541, 535, + 536, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 267, 540, 267, 542, 540, + 297, 298, 299, 543, 536, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 267, + 542, 267, 267, 542, 544, 267, 542, 267, + 545, 546, 267, 540, 267, 267, 542, 267, + 540, 267, 540, 327, 328, 329, 330, 331, + 332, 333, 547, 335, 336, 337, 338, 339, + 340, 341, 549, 550, 551, 552, 553, 554, + 549, 550, 551, 552, 553, 554, 549, 548, + 555, 267, 268, 538, 267, 556, 556, 556, + 542, 267, 297, 298, 299, 541, 539, 300, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 267, 545, 557, 267, 267, 540, 556, + 556, 542, 556, 556, 542, 556, 556, 556, + 542, 556, 556, 542, 556, 556, 542, 556, + 556, 267, 542, 542, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 550, 555, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 551, 555, 553, 554, 548, 549, + 550, 551, 553, 554, 548, 549, 550, 551, + 553, 554, 548, 549, 550, 551, 553, 554, + 548, 549, 550, 551, 553, 558, 557, 552, + 267, 555, 556, 267, 540, 542, 268, 268, + 267, 559, 560, 561, 562, 563, 530, 267, + 268, 323, 268, 268, 268, 267, 268, 268, + 267, 395, 268, 267, 395, 268, 267, 268, + 395, 268, 267, 530, 267, 564, 566, 567, + 568, 569, 570, 571, 566, 567, 568, 569, + 570, 571, 566, 530, 565, 555, 267, 268, + 538, 268, 267, 540, 540, 540, 542, 267, + 540, 540, 542, 540, 540, 542, 540, 540, + 540, 542, 540, 540, 542, 540, 540, 542, + 540, 540, 267, 542, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 567, 555, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 568, 555, 570, 571, 565, 566, + 567, 568, 570, 571, 565, 566, 567, 568, + 570, 571, 565, 566, 567, 568, 570, 571, + 565, 566, 567, 568, 570, 572, 573, 569, + 267, 555, 540, 268, 540, 542, 268, 542, + 268, 267, 540, 574, 575, 530, 267, 268, + 267, 268, 268, 268, 267, 577, 578, 579, + 580, 576, 267, 581, 582, 530, 267, 266, + 268, 267, 268, 266, 268, 267, 583, 530, + 267, 268, 268, 267, 584, 530, 267, 268, + 268, 267, 585, 586, 587, 588, 589, 590, + 591, 592, 593, 594, 595, 530, 267, 268, + 596, 267, 344, 345, 346, 347, 348, 349, + 597, 267, 598, 267, 268, 267, 395, 268, + 267, 268, 395, 268, 395, 268, 267, 395, + 395, 268, 395, 268, 395, 268, 395, 268, + 395, 268, 267, 268, 268, 395, 395, 268, + 267, 395, 395, 268, 267, 395, 268, 395, + 268, 267, 268, 395, 268, 395, 268, 267, + 395, 268, 395, 268, 267, 395, 268, 267, + 395, 395, 268, 268, 395, 268, 395, 268, + 395, 267, 576, 267, 599, 576, 267, 322, + 530, 600, 530, 267, 268, 267, 266, 3, + 1, 266, 3, 1, 602, 603, 601, 1, + 266, 3, 1, 266, 3, 1, 604, 605, + 606, 607, 608, 601, 1, 609, 610, 612, + 611, 611, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 611, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 611, 611, 612, 612, 611, 612, 611, 613, + 614, 615, 616, 617, 619, 620, 621, 623, + 624, 625, 626, 627, 628, 629, 630, 631, + 632, 633, 634, 635, 636, 637, 638, 639, + 640, 618, 622, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 611, 611, + 612, 611, 611, 611, 612, 612, 612, 612, + 611, 611, 611, 611, 611, 611, 612, 611, + 611, 611, 611, 611, 611, 612, 611, 611, + 611, 611, 612, 612, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 612, 611, 612, 612, 611, + 611, 611, 611, 611, 611, 612, 612, 612, + 612, 612, 612, 611, 612, 612, 611, 611, + 611, 611, 611, 611, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 611, 612, 611, 641, + 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 612, 612, + 611, 612, 611, 611, 611, 612, 612, 611, + 612, 612, 611, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 611, 611, 611, 612, 612, 612, + 611, 612, 611, 612, 611, 612, 612, 612, + 612, 612, 611, 612, 612, 611, 662, 663, + 664, 665, 666, 611, 612, 667, 611, 662, + 663, 668, 664, 665, 666, 611, 612, 611, + 612, 611, 612, 611, 612, 611, 612, 611, + 669, 670, 611, 612, 611, 612, 611, 671, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 683, 684, 685, 611, 612, + 612, 611, 612, 611, 612, 611, 612, 612, + 612, 612, 611, 612, 612, 611, 611, 611, + 612, 612, 611, 612, 611, 612, 612, 611, + 611, 611, 612, 612, 611, 612, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 612, 611, 612, 612, 611, 686, 687, 672, + 611, 612, 611, 612, 612, 611, 688, 689, + 690, 691, 692, 693, 694, 611, 695, 696, + 697, 698, 699, 611, 612, 611, 612, 611, + 612, 611, 612, 612, 612, 612, 612, 611, + 612, 611, 700, 701, 702, 703, 704, 705, + 706, 707, 708, 709, 710, 711, 712, 713, + 714, 715, 716, 713, 717, 718, 719, 720, + 721, 611, 612, 612, 611, 611, 612, 611, + 611, 612, 612, 612, 611, 612, 611, 612, + 612, 611, 611, 611, 612, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 612, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 612, 611, 611, 612, 612, 612, + 611, 611, 611, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 611, 612, 612, 611, + 722, 723, 724, 725, 611, 612, 611, 612, + 611, 612, 611, 612, 611, 726, 611, 612, + 611, 727, 728, 729, 730, 731, 732, 611, + 612, 612, 612, 611, 611, 611, 611, 612, + 612, 611, 612, 612, 611, 611, 611, 612, + 612, 612, 612, 611, 733, 734, 735, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 736, 737, 738, 611, 739, 611, + 739, 611, 611, 739, 739, 611, 739, 739, + 611, 739, 739, 739, 611, 739, 611, 739, + 739, 611, 739, 739, 739, 739, 611, 739, + 739, 611, 611, 739, 739, 611, 739, 611, + 740, 741, 742, 743, 744, 745, 746, 748, + 749, 750, 751, 752, 753, 754, 755, 756, + 757, 758, 759, 631, 760, 761, 762, 763, + 764, 765, 766, 767, 768, 747, 611, 739, + 739, 739, 739, 611, 739, 611, 739, 739, + 611, 612, 612, 611, 611, 612, 739, 739, + 611, 739, 739, 611, 739, 611, 612, 739, + 739, 739, 612, 612, 611, 739, 739, 739, + 611, 611, 611, 739, 611, 612, 612, 739, + 739, 612, 611, 739, 739, 739, 611, 739, + 611, 739, 611, 739, 611, 612, 611, 611, + 739, 739, 611, 739, 611, 612, 739, 739, + 612, 739, 611, 612, 739, 739, 612, 612, + 739, 739, 611, 739, 739, 612, 611, 739, + 739, 739, 612, 612, 612, 611, 739, 612, + 739, 611, 611, 611, 612, 611, 611, 611, + 739, 739, 739, 612, 739, 612, 611, 739, + 739, 612, 612, 612, 739, 739, 739, 611, + 739, 739, 612, 612, 611, 611, 611, 739, + 739, 739, 611, 739, 611, 612, 739, 739, + 739, 739, 612, 739, 612, 612, 611, 739, + 612, 739, 611, 739, 611, 739, 612, 739, + 739, 611, 739, 611, 739, 739, 739, 739, + 612, 611, 612, 739, 611, 739, 739, 739, + 739, 611, 739, 611, 769, 770, 771, 772, + 773, 774, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 787, 788, + 789, 611, 612, 739, 739, 612, 739, 611, + 612, 739, 739, 739, 611, 739, 612, 739, + 739, 739, 611, 739, 611, 739, 739, 611, + 739, 739, 611, 612, 739, 612, 611, 739, + 739, 739, 611, 612, 739, 611, 739, 739, + 611, 739, 739, 612, 739, 612, 612, 739, + 611, 739, 739, 612, 611, 739, 739, 739, + 739, 612, 739, 739, 612, 739, 611, 739, + 611, 612, 612, 612, 739, 739, 612, 611, + 739, 611, 739, 611, 612, 612, 612, 612, + 739, 739, 612, 739, 611, 612, 739, 739, + 612, 739, 612, 611, 612, 739, 612, 739, + 611, 612, 739, 739, 739, 739, 612, 739, + 611, 739, 739, 611, 790, 791, 792, 793, + 794, 611, 739, 667, 611, 739, 611, 739, + 611, 739, 611, 739, 611, 795, 796, 611, + 739, 611, 739, 611, 797, 798, 799, 800, + 675, 801, 802, 803, 804, 805, 806, 807, + 808, 809, 810, 611, 739, 739, 611, 739, + 611, 739, 611, 739, 739, 739, 612, 612, + 739, 611, 739, 611, 739, 611, 612, 739, + 611, 739, 612, 611, 612, 739, 739, 739, + 612, 739, 612, 611, 739, 611, 612, 739, + 612, 739, 612, 739, 611, 739, 739, 612, + 739, 611, 739, 739, 739, 739, 611, 739, + 612, 612, 739, 739, 612, 611, 739, 739, + 612, 739, 612, 611, 811, 812, 798, 611, + 739, 611, 739, 739, 611, 813, 814, 815, + 816, 817, 818, 819, 611, 820, 821, 822, + 823, 824, 611, 739, 611, 739, 611, 739, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 825, 826, 827, 828, 829, 830, 831, + 832, 833, 834, 835, 836, 837, 838, 839, + 840, 841, 842, 843, 844, 845, 846, 847, + 611, 739, 612, 739, 611, 611, 739, 612, + 611, 612, 612, 611, 739, 612, 739, 739, + 611, 739, 611, 612, 739, 612, 739, 612, + 611, 611, 739, 611, 612, 739, 739, 612, + 739, 612, 739, 611, 739, 612, 739, 611, + 739, 739, 612, 739, 612, 611, 739, 739, + 612, 612, 612, 612, 739, 739, 611, 612, + 739, 611, 612, 612, 739, 611, 739, 612, + 739, 612, 739, 612, 739, 611, 612, 611, + 739, 739, 612, 612, 739, 612, 739, 611, + 611, 611, 739, 739, 612, 739, 612, 739, + 611, 611, 739, 612, 612, 739, 612, 739, + 611, 612, 739, 612, 739, 611, 612, 612, + 739, 739, 611, 612, 612, 612, 739, 739, + 611, 848, 849, 724, 850, 611, 739, 611, + 739, 611, 739, 611, 851, 611, 739, 611, + 852, 853, 854, 855, 856, 857, 611, 612, + 612, 739, 739, 739, 611, 611, 611, 611, + 739, 739, 611, 739, 739, 611, 611, 611, + 739, 739, 739, 739, 611, 858, 859, 860, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 739, 611, 861, 611, 612, 611, 862, + 611, 863, 864, 865, 867, 866, 611, 739, + 611, 611, 739, 739, 612, 611, 612, 611, + 868, 611, 869, 870, 871, 873, 872, 611, + 612, 611, 611, 612, 612, 688, 689, 690, + 691, 692, 693, 611, 641, 642, 643, 604, + 605, 874, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 875, 610, 641, + 642, 643, 876, 606, 607, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 611, + 875, 611, 877, 875, 641, 642, 643, 878, + 607, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 611, 877, 611, 609, 877, + 879, 611, 877, 611, 880, 881, 611, 875, + 611, 611, 877, 611, 875, 611, 875, 671, + 672, 673, 674, 675, 676, 677, 882, 679, + 680, 681, 682, 683, 684, 685, 884, 885, + 886, 887, 888, 889, 884, 885, 886, 887, + 888, 889, 884, 883, 890, 611, 612, 610, + 611, 891, 891, 891, 877, 611, 641, 642, + 643, 876, 874, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 611, 880, 892, + 611, 611, 875, 891, 891, 877, 891, 891, + 877, 891, 891, 891, 877, 891, 891, 877, + 891, 891, 877, 891, 891, 611, 877, 877, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 885, 890, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 886, 890, + 888, 889, 883, 884, 885, 886, 888, 889, + 883, 884, 885, 886, 888, 889, 883, 884, + 885, 886, 888, 889, 883, 884, 885, 886, + 888, 893, 892, 887, 611, 890, 891, 611, + 875, 877, 265, 3, 1, 894, 895, 896, + 897, 898, 601, 1, 265, 899, 3, 265, + 3, 265, 3, 1, 901, 900, 900, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 900, 901, 901, 900, 901, 901, + 901, 901, 900, 901, 901, 900, 900, 901, + 901, 900, 901, 900, 902, 903, 904, 905, + 906, 908, 909, 910, 912, 913, 914, 915, + 916, 917, 918, 919, 920, 921, 922, 923, + 924, 925, 926, 927, 928, 929, 907, 911, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 900, 900, 901, 900, 900, + 900, 901, 901, 901, 901, 900, 900, 900, + 900, 900, 900, 901, 900, 900, 900, 900, + 900, 900, 901, 900, 900, 900, 900, 901, + 901, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 900, 900, 900, 900, + 900, 900, 901, 901, 901, 901, 901, 901, + 900, 901, 901, 900, 900, 900, 900, 900, + 900, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 900, 901, 900, 930, 931, 932, 933, + 934, 935, 936, 937, 938, 939, 940, 941, + 942, 943, 944, 945, 946, 947, 948, 949, + 950, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 901, 901, 900, 901, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 901, 900, + 901, 901, 900, 951, 952, 953, 954, 955, + 900, 901, 899, 900, 901, 900, 901, 900, + 901, 900, 901, 900, 956, 957, 900, 901, + 900, 901, 900, 958, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 900, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 900, 901, + 901, 900, 900, 900, 901, 901, 900, 901, + 900, 901, 901, 900, 900, 900, 901, 901, + 900, 901, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 901, 900, 901, 901, + 900, 973, 974, 959, 900, 901, 900, 901, + 901, 900, 975, 976, 977, 978, 979, 980, + 900, 981, 982, 983, 984, 985, 900, 901, + 900, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 999, 1003, + 1004, 1005, 1006, 1007, 900, 901, 901, 900, + 900, 901, 900, 900, 901, 901, 901, 900, + 901, 900, 901, 901, 900, 900, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 901, 900, 900, + 901, 901, 901, 900, 900, 900, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 900, + 901, 901, 900, 1008, 1009, 1010, 1011, 900, + 901, 900, 901, 900, 901, 900, 901, 900, + 1012, 900, 901, 900, 1013, 1014, 1015, 1016, + 1017, 1018, 900, 901, 901, 901, 900, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 901, 900, 1019, + 1020, 1021, 900, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 1022, 900, 1023, + 1024, 1025, 1027, 1026, 900, 901, 900, 900, + 901, 901, 951, 952, 1028, 953, 954, 955, + 900, 901, 900, 975, 976, 977, 978, 979, + 980, 1029, 900, 1030, 1031, 1032, 900, 1033, + 900, 1033, 900, 900, 1033, 1033, 900, 1033, + 1033, 900, 1033, 1033, 1033, 900, 1033, 900, + 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 1033, 900, 900, 1033, 1033, 900, 1033, + 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, + 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, + 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, + 1033, 900, 901, 901, 900, 900, 901, 1033, + 1033, 900, 1033, 1033, 900, 1033, 900, 901, + 1033, 1033, 1033, 901, 901, 900, 1033, 1033, + 1033, 900, 900, 900, 1033, 900, 901, 901, + 1033, 1033, 901, 900, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 901, 900, + 900, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 901, 1033, 900, 901, 1033, 1033, 901, + 901, 1033, 1033, 900, 1033, 1033, 901, 900, + 1033, 1033, 1033, 901, 901, 901, 900, 1033, + 901, 1033, 900, 900, 900, 901, 900, 900, + 900, 1033, 1033, 1033, 901, 1033, 901, 900, + 1033, 1033, 901, 901, 901, 1033, 1033, 1033, + 900, 1033, 1033, 901, 901, 900, 900, 900, + 1033, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 1033, 1033, 901, 1033, 901, 901, 900, + 1033, 901, 1033, 900, 1033, 900, 1033, 901, + 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, + 1033, 901, 900, 901, 1033, 900, 1033, 1033, + 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, + 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, + 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, + 1082, 1083, 900, 901, 1033, 1033, 901, 1033, + 900, 901, 1033, 1033, 1033, 900, 1033, 901, + 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, + 900, 1033, 1033, 900, 901, 1033, 901, 900, + 1033, 1033, 1033, 900, 901, 1033, 900, 1033, + 1033, 900, 1033, 1033, 901, 1033, 901, 901, + 1033, 900, 1033, 1033, 901, 900, 1033, 1033, + 1033, 1033, 901, 1033, 1033, 901, 1033, 900, + 1033, 900, 901, 901, 901, 1033, 1033, 901, + 900, 1033, 900, 1033, 900, 901, 901, 901, + 901, 1033, 1033, 901, 1033, 900, 901, 1033, + 1033, 901, 1033, 901, 900, 901, 1033, 901, + 1033, 900, 901, 1033, 1033, 1033, 1033, 901, + 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, + 1087, 1088, 900, 1033, 899, 900, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 1089, 1090, + 900, 1033, 900, 1033, 900, 1091, 1092, 1093, + 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, + 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 1033, 1033, 901, + 901, 1033, 900, 1033, 900, 1033, 900, 901, + 1033, 900, 1033, 901, 900, 901, 1033, 1033, + 1033, 901, 1033, 901, 900, 1033, 900, 901, + 1033, 901, 1033, 901, 1033, 900, 1033, 1033, + 901, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 901, 901, 1033, 1033, 901, 900, 1033, + 1033, 901, 1033, 901, 900, 1105, 1106, 1092, + 900, 1033, 900, 1033, 1033, 900, 1107, 1108, + 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, + 1116, 1117, 1118, 900, 1033, 900, 1033, 900, + 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, + 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, + 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, + 1141, 900, 1033, 901, 1033, 900, 900, 1033, + 901, 900, 901, 901, 900, 1033, 901, 1033, + 1033, 900, 1033, 900, 901, 1033, 901, 1033, + 901, 900, 900, 1033, 900, 901, 1033, 1033, + 901, 1033, 901, 1033, 900, 1033, 901, 1033, + 900, 1033, 1033, 901, 1033, 901, 900, 1033, + 1033, 901, 901, 901, 901, 1033, 1033, 900, + 901, 1033, 900, 901, 901, 1033, 900, 1033, + 901, 1033, 901, 1033, 901, 1033, 900, 901, + 900, 1033, 1033, 901, 901, 1033, 901, 1033, + 900, 900, 900, 1033, 1033, 901, 1033, 901, + 1033, 900, 900, 1033, 901, 901, 1033, 901, + 1033, 900, 901, 1033, 901, 1033, 900, 901, + 901, 1033, 1033, 900, 901, 901, 901, 1033, + 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, + 900, 1033, 900, 1033, 900, 1145, 900, 1033, + 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, + 901, 901, 1033, 1033, 1033, 900, 900, 900, + 900, 1033, 1033, 900, 1033, 1033, 900, 900, + 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, + 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1155, 900, 901, 900, + 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, + 1033, 900, 900, 1033, 1033, 901, 900, 901, + 900, 3, 265, 3, 1, 1162, 3, 1, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, + 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, + 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, + 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, + 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, + 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, + 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, + 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, + 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, + 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, + 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, + 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, + 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, + 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, + 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, + 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, + 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, + 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, + 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, + 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, + 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, + 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, + 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, + 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, + 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, + 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, + 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, + 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, + 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, + 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, + 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, + 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, + 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, + 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, + 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, + 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, + 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, + 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, + 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, + 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, + 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, + 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, + 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, + 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, + 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, + 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, + 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, + 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, + 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, + 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, + 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, + 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, + 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, + 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, + 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, + 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, + 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, + 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, + 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, + 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, + 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, + 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, + 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, + 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, + 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, + 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, + 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, + 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, + 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, + 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, + 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, + 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, + 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, + 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, + 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, + 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, + 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, + 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, + 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, + 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, + 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, + 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, + 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, + 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, + 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, + 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, + 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, + 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, + 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, + 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, + 3, 1162, 3, 1, 601, 1, 1425, 1427, + 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, + 1430, 1431, 1432, 1427, 601, 1426, 890, 1, + 3, 610, 3, 1, 875, 875, 875, 877, + 1, 875, 875, 877, 875, 875, 877, 875, + 875, 875, 877, 875, 875, 877, 875, 875, + 877, 875, 875, 1, 877, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, + 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, + 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, + 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, + 1435, 1437, 1430, 1436, 1, 890, 875, 3, + 875, 877, 3, 877, 3, 1, 875, 1, + 265, 265, 1, 265, 1438, 1439, 601, 1, + 265, 3, 1, 3, 3, 265, 3, 1, + 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, + 601, 1, 266, 3, 1, 3, 266, 3, + 1, 1447, 601, 1, 3, 265, 3, 1, + 1448, 601, 1, 3, 265, 3, 1, 1449, + 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, + 1458, 1459, 601, 1, 3, 1460, 1, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, + 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, + 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, + 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, + 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, + 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, + 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, + 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, + 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, + 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, + 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, + 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, + 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, + 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, + 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, + 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, + 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, + 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, + 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, + 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, + 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, + 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, + 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, + 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, + 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, + 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, + 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, + 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, + 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, + 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, + 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, + 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, + 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, + 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, + 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, + 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, + 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, + 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, + 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, + 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, + 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, + 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, + 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, + 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, + 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, + 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, + 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, + 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, + 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, + 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, + 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, + 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, + 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, + 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, + 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, + 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, + 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, + 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, + 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, + 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, + 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, + 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, + 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, + 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, + 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, + 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, + 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, + 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, + 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, + 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, + 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, + 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, + 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, + 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, + 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, + 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, + 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, + 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, + 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, + 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, + 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, + 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, + 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, + 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, + 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, + 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, + 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, + 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, + 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, + 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, + 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, + 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, + 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, + 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, + 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, + 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, + 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, + 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, + 1162, 3, 1, 3, 1162, 3, 1162, 3, + 1, 1162, 1162, 3, 1162, 3, 1162, 3, + 1162, 3, 1162, 3, 1, 3, 3, 1162, + 1162, 3, 1, 1162, 1162, 3, 1, 1162, + 3, 1162, 3, 1, 3, 1162, 3, 1162, + 3, 1, 1162, 3, 1162, 3, 1, 1162, + 3, 1, 1162, 1162, 3, 3, 1162, 3, + 1162, 3, 1162, 1, 1440, 1, 1726, 1440, + 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, + 1, 265, 3, 1, 3, 265, 1, 1, + 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, + 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, + 1729, 1, 1732, 1740, 1747, 1, 1731, 262, + 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, + 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, + 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, + 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, + 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, + 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, + 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, + 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, + 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, + 1799, 1800, 1801, 1803, 268, 530, 576, 1802, + 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, + 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, + 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, + 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, + 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, + 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, + 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, + 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, + 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, + 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, + 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, + 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, + 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, + 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, + 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, + 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, + 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, + 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, + 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, + 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, + 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, + 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, + 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, + 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, + 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, + 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, + 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, + 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, + 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, + 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, + 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, + 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, + 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, + 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, + 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, + 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, + 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, + 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, + 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, + 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, + 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, + 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, + 2021, 1982, +} + +var _graphclust_trans_targs []int16 = []int16{ + 1974, 0, 1974, 1975, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 68, 70, + 71, 72, 1976, 69, 74, 75, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 93, 94, 96, + 102, 125, 130, 132, 139, 143, 97, 98, + 99, 100, 101, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, + 124, 126, 127, 128, 129, 131, 133, 134, + 135, 136, 137, 138, 140, 141, 142, 144, + 291, 292, 1977, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 210, 211, 212, + 213, 214, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 234, 235, 237, 243, 267, 271, + 273, 280, 284, 238, 239, 240, 241, 242, + 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 268, + 269, 270, 272, 274, 275, 276, 277, 278, + 279, 281, 282, 283, 285, 287, 288, 289, + 145, 290, 146, 294, 295, 296, 2, 297, + 3, 1974, 1978, 1974, 1979, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 344, 345, 346, 347, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 358, + 359, 360, 361, 362, 363, 364, 366, 368, + 370, 371, 372, 1980, 369, 374, 375, 377, + 378, 379, 380, 381, 382, 383, 384, 385, + 386, 387, 388, 389, 390, 391, 393, 394, + 396, 402, 425, 430, 432, 439, 443, 397, + 398, 399, 400, 401, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 426, 427, 428, 429, 431, 433, + 434, 435, 436, 437, 438, 440, 441, 442, + 444, 591, 592, 1981, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 469, + 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, + 486, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 504, 505, 506, 507, 508, 510, 511, + 512, 513, 514, 516, 517, 519, 520, 521, + 522, 523, 524, 525, 526, 527, 528, 529, + 530, 531, 532, 534, 535, 537, 543, 567, + 571, 573, 580, 584, 538, 539, 540, 541, + 542, 544, 545, 546, 547, 548, 549, 550, + 551, 552, 553, 554, 555, 556, 557, 558, + 559, 560, 561, 562, 563, 564, 565, 566, + 568, 569, 570, 572, 574, 575, 576, 577, + 578, 579, 581, 582, 583, 585, 587, 588, + 589, 445, 590, 446, 594, 595, 596, 302, + 597, 303, 599, 605, 606, 608, 610, 613, + 616, 640, 1982, 622, 1983, 612, 1984, 615, + 618, 620, 621, 624, 625, 629, 630, 631, + 632, 633, 634, 635, 1985, 628, 639, 642, + 643, 644, 645, 646, 649, 650, 651, 652, + 653, 654, 655, 656, 660, 661, 663, 664, + 647, 666, 669, 671, 673, 667, 668, 670, + 672, 674, 678, 679, 680, 681, 682, 683, + 684, 685, 686, 687, 1986, 676, 677, 690, + 691, 299, 695, 696, 698, 997, 1000, 1003, + 1027, 1974, 1987, 1974, 1988, 712, 713, 714, + 715, 716, 717, 718, 719, 720, 721, 722, + 723, 724, 725, 726, 727, 728, 729, 730, + 731, 732, 733, 734, 735, 736, 737, 738, + 739, 741, 742, 743, 744, 745, 746, 747, + 748, 749, 750, 751, 752, 753, 754, 755, + 756, 757, 758, 759, 760, 761, 763, 765, + 767, 768, 769, 1989, 766, 771, 772, 774, + 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 785, 786, 787, 788, 790, 791, + 793, 799, 822, 827, 829, 836, 840, 794, + 795, 796, 797, 798, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 823, 824, 825, 826, 828, 830, + 831, 832, 833, 834, 835, 837, 838, 839, + 841, 988, 989, 1990, 855, 856, 857, 858, + 859, 860, 861, 862, 863, 864, 865, 866, + 867, 868, 869, 870, 871, 872, 873, 874, + 875, 876, 877, 878, 879, 880, 881, 882, + 883, 885, 886, 887, 888, 889, 890, 891, + 892, 893, 894, 895, 896, 897, 898, 899, + 900, 901, 902, 903, 904, 905, 907, 908, + 909, 910, 911, 913, 914, 916, 917, 918, + 919, 920, 921, 922, 923, 924, 925, 926, + 927, 928, 929, 931, 932, 934, 940, 964, + 968, 970, 977, 981, 935, 936, 937, 938, + 939, 941, 942, 943, 944, 945, 946, 947, + 948, 949, 950, 951, 952, 953, 954, 955, + 956, 957, 958, 959, 960, 961, 962, 963, + 965, 966, 967, 969, 971, 972, 973, 974, + 975, 976, 978, 979, 980, 982, 984, 985, + 986, 842, 987, 843, 991, 992, 993, 699, + 994, 700, 1009, 1991, 999, 1992, 1002, 1005, + 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, + 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, + 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, + 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, + 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, + 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, + 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, + 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, + 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, + 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, + 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, + 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, + 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, + 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, + 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, + 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, + 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, + 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, + 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, + 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, + 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, + 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, + 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, + 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, + 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, + 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, + 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, + 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, + 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, + 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, + 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, + 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, + 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, + 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, + 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, + 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, + 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, + 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, + 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, + 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, + 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, + 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, + 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, + 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, + 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, + 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, + 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, + 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, + 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, + 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, + 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, + 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, + 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, + 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, + 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, + 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, + 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, + 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, + 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, + 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, + 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, + 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, + 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, + 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, + 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, + 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, + 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, + 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, + 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, + 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, + 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, + 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, + 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, + 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, + 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, + 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, + 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, + 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, + 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, + 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, + 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, + 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, + 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, + 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, + 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, + 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, + 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, + 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, + 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, + 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, + 1973, 1974, 1, 1975, 299, 300, 301, 692, + 693, 694, 697, 1028, 1628, 1629, 1638, 1639, + 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, + 14, 43, 65, 73, 76, 92, 298, 293, + 67, 95, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 187, 209, 215, + 218, 233, 236, 286, 1974, 600, 601, 602, + 603, 604, 607, 641, 648, 657, 658, 659, + 662, 665, 688, 689, 304, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 343, + 365, 373, 376, 392, 598, 593, 367, 395, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 487, 509, 515, 518, 533, + 536, 586, 609, 623, 636, 637, 638, 611, + 619, 614, 617, 626, 627, 675, 1974, 701, + 702, 703, 704, 705, 706, 707, 708, 709, + 710, 711, 996, 762, 770, 1010, 1023, 1024, + 1025, 789, 995, 990, 740, 773, 764, 792, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 884, 906, 912, 915, 930, + 933, 983, 998, 1006, 1001, 1004, 1013, 1014, + 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, + 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, + 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, + 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, + 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, + 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, + 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, + 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, + 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, + 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, + 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, + 1866, 1872, 1875, 1890, 1893, 1943, +} + +var _graphclust_trans_actions []byte = []byte{ + 31, 0, 27, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 34, 40, 25, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 40, 0, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 29, 51, 17, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 51, 0, 51, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 40, 21, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 40, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 19, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 40, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 23, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 43, 1, 47, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 15, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 13, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _graphclust_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 37, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_eof_trans []int16 = []int16{ + 0, 0, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 0, 0, 0, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 0, 0, 0, 0, + 0, 0, 610, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 610, 612, 612, + 610, 612, 612, 610, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 610, 612, + 612, 612, 612, 0, 0, 0, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 0, + 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1750, + 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, + 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, + 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, + 1983, 1983, 1983, 1983, +} + +const graphclust_start int = 1974 +const graphclust_first_final int = 1974 +const graphclust_error int = 0 + +const graphclust_en_main int = 1974 + + +// line 14 "grapheme_clusters.rl" + + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + +// line 4976 "grapheme_clusters.go" + { + cs = graphclust_start + ts = 0 + te = 0 + act = 0 + } + +// line 4984 "grapheme_clusters.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } +_resume: + _acts = int(_graphclust_from_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts - 1] { + case 4: +// line 1 "NONE" + +ts = p + +// line 5008 "grapheme_clusters.go" + } + } + + _keys = int(_graphclust_key_offsets[cs]) + _trans = int(_graphclust_index_offsets[cs]) + + _klen = int(_graphclust_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _graphclust_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_graphclust_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _graphclust_trans_keys[_mid + 1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + +_match: + _trans = int(_graphclust_indicies[_trans]) +_eof_trans: + cs = int(_graphclust_trans_targs[_trans]) + + if _graphclust_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_graphclust_trans_actions[_trans]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 0: +// line 46 "grapheme_clusters.rl" + + + startPos = p + + case 1: +// line 50 "grapheme_clusters.rl" + + + endPos = p + + case 5: +// line 1 "NONE" + +te = p+1 + + case 6: +// line 54 "grapheme_clusters.rl" + +act = 3; + case 7: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 8: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 9: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 10: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 11: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 12: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 13: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 14: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 15: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 16: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 17: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 18: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 19: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 20: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 21: +// line 1 "NONE" + + switch act { + case 0: + {cs = 0 +goto _again +} + case 3: + {p = (te) - 1 + + return endPos+1, data[startPos:endPos+1], nil + } + } + +// line 5218 "grapheme_clusters.go" + } + } + +_again: + _acts = int(_graphclust_to_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 2: +// line 1 "NONE" + +ts = 0 + + case 3: +// line 1 "NONE" + +act = 0 + +// line 5238 "grapheme_clusters.go" + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: {} + if p == eof { + if _graphclust_eof_trans[cs] > 0 { + _trans = int(_graphclust_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: {} + } + +// line 116 "grapheme_clusters.rl" + + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go new file mode 100644 index 00000000000..aad3d0506a7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go @@ -0,0 +1,307 @@ +// Copyright (c) 2014 Couchbase, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +// except in compliance with the License. You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed under the +// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +// either express or implied. See the License for the specific language governing permissions +// and limitations under the License. + +// Modified by Martin Atkins to serve the needs of package textseg. + +// +build ignore + +package main + +import ( + "bufio" + "flag" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "sort" + "strconv" + "strings" + "unicode" +) + +var url = flag.String("url", + "http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/", + "URL of Unicode database directory") +var verbose = flag.Bool("verbose", + false, + "write data to stdout as it is parsed") +var localFiles = flag.Bool("local", + false, + "data files have been copied to the current directory; for debugging only") +var outputFile = flag.String("output", + "", + "output file for generated tables; default stdout") + +var output *bufio.Writer + +func main() { + flag.Parse() + setupOutput() + + graphemePropertyRanges := make(map[string]*unicode.RangeTable) + loadUnicodeData("GraphemeBreakProperty.txt", graphemePropertyRanges) + wordPropertyRanges := make(map[string]*unicode.RangeTable) + loadUnicodeData("WordBreakProperty.txt", wordPropertyRanges) + sentencePropertyRanges := make(map[string]*unicode.RangeTable) + loadUnicodeData("SentenceBreakProperty.txt", sentencePropertyRanges) + + fmt.Fprintf(output, fileHeader, *url) + generateTables("Grapheme", graphemePropertyRanges) + generateTables("Word", wordPropertyRanges) + generateTables("Sentence", sentencePropertyRanges) + + flushOutput() +} + +// WordBreakProperty.txt has the form: +// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD +// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ +func openReader(file string) (input io.ReadCloser) { + if *localFiles { + f, err := os.Open(file) + if err != nil { + log.Fatal(err) + } + input = f + } else { + path := *url + file + resp, err := http.Get(path) + if err != nil { + log.Fatal(err) + } + if resp.StatusCode != 200 { + log.Fatal("bad GET status for "+file, resp.Status) + } + input = resp.Body + } + return +} + +func loadUnicodeData(filename string, propertyRanges map[string]*unicode.RangeTable) { + f := openReader(filename) + defer f.Close() + bufioReader := bufio.NewReader(f) + line, err := bufioReader.ReadString('\n') + for err == nil { + parseLine(line, propertyRanges) + line, err = bufioReader.ReadString('\n') + } + // if the err was EOF still need to process last value + if err == io.EOF { + parseLine(line, propertyRanges) + } +} + +const comment = "#" +const sep = ";" +const rnge = ".." + +func parseLine(line string, propertyRanges map[string]*unicode.RangeTable) { + if strings.HasPrefix(line, comment) { + return + } + line = strings.TrimSpace(line) + if len(line) == 0 { + return + } + commentStart := strings.Index(line, comment) + if commentStart > 0 { + line = line[0:commentStart] + } + pieces := strings.Split(line, sep) + if len(pieces) != 2 { + log.Printf("unexpected %d pieces in %s", len(pieces), line) + return + } + + propertyName := strings.TrimSpace(pieces[1]) + + rangeTable, ok := propertyRanges[propertyName] + if !ok { + rangeTable = &unicode.RangeTable{ + LatinOffset: 0, + } + propertyRanges[propertyName] = rangeTable + } + + codepointRange := strings.TrimSpace(pieces[0]) + rngeIndex := strings.Index(codepointRange, rnge) + + if rngeIndex < 0 { + // single codepoint, not range + codepointInt, err := strconv.ParseUint(codepointRange, 16, 64) + if err != nil { + log.Printf("error parsing int: %v", err) + return + } + if codepointInt < 0x10000 { + r16 := unicode.Range16{ + Lo: uint16(codepointInt), + Hi: uint16(codepointInt), + Stride: 1, + } + addR16ToTable(rangeTable, r16) + } else { + r32 := unicode.Range32{ + Lo: uint32(codepointInt), + Hi: uint32(codepointInt), + Stride: 1, + } + addR32ToTable(rangeTable, r32) + } + } else { + rngeStart := codepointRange[0:rngeIndex] + rngeEnd := codepointRange[rngeIndex+2:] + rngeStartInt, err := strconv.ParseUint(rngeStart, 16, 64) + if err != nil { + log.Printf("error parsing int: %v", err) + return + } + rngeEndInt, err := strconv.ParseUint(rngeEnd, 16, 64) + if err != nil { + log.Printf("error parsing int: %v", err) + return + } + if rngeStartInt < 0x10000 && rngeEndInt < 0x10000 { + r16 := unicode.Range16{ + Lo: uint16(rngeStartInt), + Hi: uint16(rngeEndInt), + Stride: 1, + } + addR16ToTable(rangeTable, r16) + } else if rngeStartInt >= 0x10000 && rngeEndInt >= 0x10000 { + r32 := unicode.Range32{ + Lo: uint32(rngeStartInt), + Hi: uint32(rngeEndInt), + Stride: 1, + } + addR32ToTable(rangeTable, r32) + } else { + log.Printf("unexpected range") + } + } +} + +func addR16ToTable(r *unicode.RangeTable, r16 unicode.Range16) { + if r.R16 == nil { + r.R16 = make([]unicode.Range16, 0, 1) + } + r.R16 = append(r.R16, r16) + if r16.Hi <= unicode.MaxLatin1 { + r.LatinOffset++ + } +} + +func addR32ToTable(r *unicode.RangeTable, r32 unicode.Range32) { + if r.R32 == nil { + r.R32 = make([]unicode.Range32, 0, 1) + } + r.R32 = append(r.R32, r32) +} + +func generateTables(prefix string, propertyRanges map[string]*unicode.RangeTable) { + prNames := make([]string, 0, len(propertyRanges)) + for k := range propertyRanges { + prNames = append(prNames, k) + } + sort.Strings(prNames) + for _, key := range prNames { + rt := propertyRanges[key] + fmt.Fprintf(output, "var _%s%s = %s\n", prefix, key, generateRangeTable(rt)) + } + fmt.Fprintf(output, "type _%sRuneRange unicode.RangeTable\n", prefix) + + fmt.Fprintf(output, "func _%sRuneType(r rune) *_%sRuneRange {\n", prefix, prefix) + fmt.Fprintf(output, "\tswitch {\n") + for _, key := range prNames { + fmt.Fprintf(output, "\tcase unicode.Is(_%s%s, r):\n\t\treturn (*_%sRuneRange)(_%s%s)\n", prefix, key, prefix, prefix, key) + } + fmt.Fprintf(output, "\tdefault:\n\t\treturn nil\n") + fmt.Fprintf(output, "\t}\n") + fmt.Fprintf(output, "}\n") + + fmt.Fprintf(output, "func (rng *_%sRuneRange) String() string {\n", prefix) + fmt.Fprintf(output, "\tswitch (*unicode.RangeTable)(rng) {\n") + for _, key := range prNames { + fmt.Fprintf(output, "\tcase _%s%s:\n\t\treturn %q\n", prefix, key, key) + } + fmt.Fprintf(output, "\tdefault:\n\t\treturn \"Other\"\n") + fmt.Fprintf(output, "\t}\n") + fmt.Fprintf(output, "}\n") +} + +func generateRangeTable(rt *unicode.RangeTable) string { + rv := "&unicode.RangeTable{\n" + if rt.R16 != nil { + rv += "\tR16: []unicode.Range16{\n" + for _, r16 := range rt.R16 { + rv += fmt.Sprintf("\t\t%#v,\n", r16) + } + rv += "\t},\n" + } + if rt.R32 != nil { + rv += "\tR32: []unicode.Range32{\n" + for _, r32 := range rt.R32 { + rv += fmt.Sprintf("\t\t%#v,\n", r32) + } + rv += "\t},\n" + } + rv += fmt.Sprintf("\t\tLatinOffset: %d,\n", rt.LatinOffset) + rv += "}\n" + return rv +} + +const fileHeader = `// Generated by running +// maketables --url=%s +// DO NOT EDIT + +package textseg + +import( + "unicode" +) +` + +func setupOutput() { + output = bufio.NewWriter(startGofmt()) +} + +// startGofmt connects output to a gofmt process if -output is set. +func startGofmt() io.Writer { + if *outputFile == "" { + return os.Stdout + } + stdout, err := os.Create(*outputFile) + if err != nil { + log.Fatal(err) + } + // Pipe output to gofmt. + gofmt := exec.Command("gofmt") + fd, err := gofmt.StdinPipe() + if err != nil { + log.Fatal(err) + } + gofmt.Stdout = stdout + gofmt.Stderr = os.Stderr + err = gofmt.Start() + if err != nil { + log.Fatal(err) + } + return fd +} + +func flushOutput() { + err := output.Flush() + if err != nil { + log.Fatal(err) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go new file mode 100644 index 00000000000..ac4200260b0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go @@ -0,0 +1,212 @@ +// Copyright (c) 2014 Couchbase, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +// except in compliance with the License. You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed under the +// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +// either express or implied. See the License for the specific language governing permissions +// and limitations under the License. + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "strconv" + "strings" + "unicode" +) + +var url = flag.String("url", + "http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/", + "URL of Unicode database directory") +var verbose = flag.Bool("verbose", + false, + "write data to stdout as it is parsed") +var localFiles = flag.Bool("local", + false, + "data files have been copied to the current directory; for debugging only") + +var outputFile = flag.String("output", + "", + "output file for generated tables; default stdout") + +var output *bufio.Writer + +func main() { + flag.Parse() + setupOutput() + + graphemeTests := make([]test, 0) + graphemeTests = loadUnicodeData("GraphemeBreakTest.txt", graphemeTests) + wordTests := make([]test, 0) + wordTests = loadUnicodeData("WordBreakTest.txt", wordTests) + sentenceTests := make([]test, 0) + sentenceTests = loadUnicodeData("SentenceBreakTest.txt", sentenceTests) + + fmt.Fprintf(output, fileHeader, *url) + generateTestTables("Grapheme", graphemeTests) + generateTestTables("Word", wordTests) + generateTestTables("Sentence", sentenceTests) + + flushOutput() +} + +// WordBreakProperty.txt has the form: +// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD +// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ +func openReader(file string) (input io.ReadCloser) { + if *localFiles { + f, err := os.Open(file) + if err != nil { + log.Fatal(err) + } + input = f + } else { + path := *url + file + resp, err := http.Get(path) + if err != nil { + log.Fatal(err) + } + if resp.StatusCode != 200 { + log.Fatal("bad GET status for "+file, resp.Status) + } + input = resp.Body + } + return +} + +func loadUnicodeData(filename string, tests []test) []test { + f := openReader(filename) + defer f.Close() + bufioReader := bufio.NewReader(f) + line, err := bufioReader.ReadString('\n') + for err == nil { + tests = parseLine(line, tests) + line, err = bufioReader.ReadString('\n') + } + // if the err was EOF still need to process last value + if err == io.EOF { + tests = parseLine(line, tests) + } + return tests +} + +const comment = "#" +const brk = "÷" +const nbrk = "×" + +type test [][]byte + +func parseLine(line string, tests []test) []test { + if strings.HasPrefix(line, comment) { + return tests + } + line = strings.TrimSpace(line) + if len(line) == 0 { + return tests + } + commentStart := strings.Index(line, comment) + if commentStart > 0 { + line = line[0:commentStart] + } + pieces := strings.Split(line, brk) + t := make(test, 0) + for _, piece := range pieces { + piece = strings.TrimSpace(piece) + if len(piece) > 0 { + codePoints := strings.Split(piece, nbrk) + word := "" + for _, codePoint := range codePoints { + codePoint = strings.TrimSpace(codePoint) + r, err := strconv.ParseInt(codePoint, 16, 64) + if err != nil { + log.Printf("err: %v for '%s'", err, string(r)) + return tests + } + + word += string(r) + } + t = append(t, []byte(word)) + } + } + tests = append(tests, t) + return tests +} + +func generateTestTables(prefix string, tests []test) { + fmt.Fprintf(output, testHeader, prefix) + for _, t := range tests { + fmt.Fprintf(output, "\t\t{\n") + fmt.Fprintf(output, "\t\t\tinput: %#v,\n", bytes.Join(t, []byte{})) + fmt.Fprintf(output, "\t\t\toutput: %s,\n", generateTest(t)) + fmt.Fprintf(output, "\t\t},\n") + } + fmt.Fprintf(output, "}\n") +} + +func generateTest(t test) string { + rv := "[][]byte{" + for _, te := range t { + rv += fmt.Sprintf("%#v,", te) + } + rv += "}" + return rv +} + +const fileHeader = `// Generated by running +// maketesttables --url=%s +// DO NOT EDIT + +package textseg +` + +const testHeader = `var unicode%sTests = []struct { + input []byte + output [][]byte + }{ +` + +func setupOutput() { + output = bufio.NewWriter(startGofmt()) +} + +// startGofmt connects output to a gofmt process if -output is set. +func startGofmt() io.Writer { + if *outputFile == "" { + return os.Stdout + } + stdout, err := os.Create(*outputFile) + if err != nil { + log.Fatal(err) + } + // Pipe output to gofmt. + gofmt := exec.Command("gofmt") + fd, err := gofmt.StdinPipe() + if err != nil { + log.Fatal(err) + } + gofmt.Stdout = stdout + gofmt.Stderr = os.Stderr + err = gofmt.Start() + if err != nil { + log.Fatal(err) + } + return fd +} + +func flushOutput() { + err := output.Flush() + if err != nil { + log.Fatal(err) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go new file mode 100644 index 00000000000..fab7e842955 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go @@ -0,0 +1,5700 @@ +// Generated by running +// maketables --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/ +// DO NOT EDIT + +package textseg + +import ( + "unicode" +) + +var _GraphemeCR = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _GraphemeControl = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x0, Hi: 0x9, Stride: 0x1}, + unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1}, + unicode.Range16{Lo: 0xe, Hi: 0x1f, Stride: 0x1}, + unicode.Range16{Lo: 0x7f, Hi: 0x9f, Stride: 0x1}, + unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1}, + unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1}, + unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1}, + unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1}, + unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1}, + unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1}, + unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1}, + unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1}, + unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1}, + unicode.Range16{Lo: 0x2065, Hi: 0x2065, Stride: 0x1}, + unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1}, + unicode.Range16{Lo: 0xd800, Hi: 0xdfff, Stride: 0x1}, + unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1}, + unicode.Range16{Lo: 0xfff0, Hi: 0xfff8, Stride: 0x1}, + unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0000, Hi: 0xe0000, Stride: 0x1}, + unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1}, + unicode.Range32{Lo: 0xe0002, Hi: 0xe001f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0080, Hi: 0xe00ff, Stride: 0x1}, + unicode.Range32{Lo: 0xe01f0, Hi: 0xe0fff, Stride: 0x1}, + }, + LatinOffset: 5, +} + +var _GraphemeE_Base = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x261d, Hi: 0x261d, Stride: 0x1}, + unicode.Range16{Lo: 0x26f9, Hi: 0x26f9, Stride: 0x1}, + unicode.Range16{Lo: 0x270a, Hi: 0x270d, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f385, Hi: 0x1f385, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3c3, Hi: 0x1f3c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3ca, Hi: 0x1f3cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1f442, Hi: 0x1f443, Stride: 0x1}, + unicode.Range32{Lo: 0x1f446, Hi: 0x1f450, Stride: 0x1}, + unicode.Range32{Lo: 0x1f46e, Hi: 0x1f46e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f470, Hi: 0x1f478, Stride: 0x1}, + unicode.Range32{Lo: 0x1f47c, Hi: 0x1f47c, Stride: 0x1}, + unicode.Range32{Lo: 0x1f481, Hi: 0x1f483, Stride: 0x1}, + unicode.Range32{Lo: 0x1f485, Hi: 0x1f487, Stride: 0x1}, + unicode.Range32{Lo: 0x1f4aa, Hi: 0x1f4aa, Stride: 0x1}, + unicode.Range32{Lo: 0x1f575, Hi: 0x1f575, Stride: 0x1}, + unicode.Range32{Lo: 0x1f57a, Hi: 0x1f57a, Stride: 0x1}, + unicode.Range32{Lo: 0x1f590, Hi: 0x1f590, Stride: 0x1}, + unicode.Range32{Lo: 0x1f595, Hi: 0x1f596, Stride: 0x1}, + unicode.Range32{Lo: 0x1f645, Hi: 0x1f647, Stride: 0x1}, + unicode.Range32{Lo: 0x1f64b, Hi: 0x1f64f, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6a3, Hi: 0x1f6a3, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6b4, Hi: 0x1f6b6, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6c0, Hi: 0x1f6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1f918, Hi: 0x1f91e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f926, Hi: 0x1f926, Stride: 0x1}, + unicode.Range32{Lo: 0x1f930, Hi: 0x1f930, Stride: 0x1}, + unicode.Range32{Lo: 0x1f933, Hi: 0x1f939, Stride: 0x1}, + unicode.Range32{Lo: 0x1f93c, Hi: 0x1f93e, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeE_Base_GAZ = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f466, Hi: 0x1f469, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeE_Modifier = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeExtend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1}, + unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1}, + unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1}, + unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1}, + unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1}, + unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1}, + unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1}, + unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1}, + unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1}, + unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1}, + unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1}, + unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1}, + unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1}, + unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1}, + unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1}, + unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1}, + unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1}, + unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1}, + unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1}, + unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1}, + unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1}, + unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1}, + unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1}, + unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1}, + unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1}, + unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1}, + unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1}, + unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1}, + unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1}, + unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1}, + unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1}, + unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1}, + unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1}, + unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1}, + unicode.Range16{Lo: 0x9be, Hi: 0x9be, Stride: 0x1}, + unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1}, + unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1}, + unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1}, + unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1}, + unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1}, + unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1}, + unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1}, + unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1}, + unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1}, + unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1}, + unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1}, + unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1}, + unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1}, + unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1}, + unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1}, + unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1}, + unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1}, + unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1}, + unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1}, + unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1}, + unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1}, + unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1}, + unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1}, + unicode.Range16{Lo: 0xbbe, Hi: 0xbbe, Stride: 0x1}, + unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1}, + unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1}, + unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1}, + unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1}, + unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1}, + unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1}, + unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1}, + unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1}, + unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1}, + unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1}, + unicode.Range16{Lo: 0xcc2, Hi: 0xcc2, Stride: 0x1}, + unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1}, + unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1}, + unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1}, + unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1}, + unicode.Range16{Lo: 0xd3e, Hi: 0xd3e, Stride: 0x1}, + unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1}, + unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1}, + unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1}, + unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1}, + unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1}, + unicode.Range16{Lo: 0xdcf, Hi: 0xdcf, Stride: 0x1}, + unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1}, + unicode.Range16{Lo: 0xddf, Hi: 0xddf, Stride: 0x1}, + unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1}, + unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1}, + unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1}, + unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1}, + unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1}, + unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1}, + unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1}, + unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1}, + unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1}, + unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1}, + unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1}, + unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1}, + unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1}, + unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1}, + unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1}, + unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1}, + unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1}, + unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1}, + unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1}, + unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1}, + unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1}, + unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1}, + unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1}, + unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1}, + unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1}, + unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1}, + unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1}, + unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1}, + unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1}, + unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1}, + unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1}, + unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1}, + unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1}, + unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1}, + unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1}, + unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1}, + unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1}, + unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1}, + unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1}, + unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1}, + unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1}, + unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1}, + unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1}, + unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1}, + unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1}, + unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1}, + unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1}, + unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1}, + unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1}, + unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1}, + unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1}, + unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1}, + unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1}, + unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1}, + unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1}, + unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1}, + unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1}, + unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1}, + unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1}, + unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1}, + unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1}, + unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1}, + unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1}, + unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1}, + unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1}, + unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1}, + unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1}, + unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1}, + unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1}, + unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1}, + unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1}, + unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1}, + unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1}, + unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1}, + unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1}, + unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1}, + unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1}, + unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1}, + unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1}, + unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1}, + unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1}, + unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1}, + unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1}, + unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1}, + unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1}, + unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1}, + unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1}, + unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1}, + unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1}, + unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1}, + unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1}, + unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1}, + unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1}, + unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1}, + unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1}, + unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1}, + unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1}, + unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1}, + unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1}, + unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1}, + unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1}, + unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1}, + unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1}, + unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1}, + unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1}, + unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1}, + unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1}, + unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1}, + unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1}, + unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1}, + unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1}, + unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1}, + unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1}, + unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1}, + unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1}, + unicode.Range32{Lo: 0x1133e, Hi: 0x1133e, Stride: 0x1}, + unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1}, + unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1}, + unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1}, + unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1}, + unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1}, + unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1}, + unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1}, + unicode.Range32{Lo: 0x114b0, Hi: 0x114b0, Stride: 0x1}, + unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1}, + unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1}, + unicode.Range32{Lo: 0x114bd, Hi: 0x114bd, Stride: 0x1}, + unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1}, + unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115af, Hi: 0x115af, Stride: 0x1}, + unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1}, + unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1}, + unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1}, + unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1}, + unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1}, + unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1}, + unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1}, + unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1}, + unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1}, + unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1}, + unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1}, + unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1}, + unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1}, + unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1}, + unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1}, + unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1}, + unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1}, + unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1}, + unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1}, + unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d165, Hi: 0x1d165, Stride: 0x1}, + unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16e, Hi: 0x1d172, Stride: 0x1}, + unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1}, + unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1}, + unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1}, + unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1}, + unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1}, + unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1}, + unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1}, + unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1}, + unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1}, + unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1}, + unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1}, + unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1}, + unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1}, + unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1}, + unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeGlue_After_Zwj = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2764, Hi: 0x2764, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f48b, Hi: 0x1f48b, Stride: 0x1}, + unicode.Range32{Lo: 0x1f5e8, Hi: 0x1f5e8, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeL = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x1100, Hi: 0x115f, Stride: 0x1}, + unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeLF = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _GraphemeLV = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xac00, Hi: 0xac00, Stride: 0x1}, + unicode.Range16{Lo: 0xac1c, Hi: 0xac1c, Stride: 0x1}, + unicode.Range16{Lo: 0xac38, Hi: 0xac38, Stride: 0x1}, + unicode.Range16{Lo: 0xac54, Hi: 0xac54, Stride: 0x1}, + unicode.Range16{Lo: 0xac70, Hi: 0xac70, Stride: 0x1}, + unicode.Range16{Lo: 0xac8c, Hi: 0xac8c, Stride: 0x1}, + unicode.Range16{Lo: 0xaca8, Hi: 0xaca8, Stride: 0x1}, + unicode.Range16{Lo: 0xacc4, Hi: 0xacc4, Stride: 0x1}, + unicode.Range16{Lo: 0xace0, Hi: 0xace0, Stride: 0x1}, + unicode.Range16{Lo: 0xacfc, Hi: 0xacfc, Stride: 0x1}, + unicode.Range16{Lo: 0xad18, Hi: 0xad18, Stride: 0x1}, + unicode.Range16{Lo: 0xad34, Hi: 0xad34, Stride: 0x1}, + unicode.Range16{Lo: 0xad50, Hi: 0xad50, Stride: 0x1}, + unicode.Range16{Lo: 0xad6c, Hi: 0xad6c, Stride: 0x1}, + unicode.Range16{Lo: 0xad88, Hi: 0xad88, Stride: 0x1}, + unicode.Range16{Lo: 0xada4, Hi: 0xada4, Stride: 0x1}, + unicode.Range16{Lo: 0xadc0, Hi: 0xadc0, Stride: 0x1}, + unicode.Range16{Lo: 0xaddc, Hi: 0xaddc, Stride: 0x1}, + unicode.Range16{Lo: 0xadf8, Hi: 0xadf8, Stride: 0x1}, + unicode.Range16{Lo: 0xae14, Hi: 0xae14, Stride: 0x1}, + unicode.Range16{Lo: 0xae30, Hi: 0xae30, Stride: 0x1}, + unicode.Range16{Lo: 0xae4c, Hi: 0xae4c, Stride: 0x1}, + unicode.Range16{Lo: 0xae68, Hi: 0xae68, Stride: 0x1}, + unicode.Range16{Lo: 0xae84, Hi: 0xae84, Stride: 0x1}, + unicode.Range16{Lo: 0xaea0, Hi: 0xaea0, Stride: 0x1}, + unicode.Range16{Lo: 0xaebc, Hi: 0xaebc, Stride: 0x1}, + unicode.Range16{Lo: 0xaed8, Hi: 0xaed8, Stride: 0x1}, + unicode.Range16{Lo: 0xaef4, Hi: 0xaef4, Stride: 0x1}, + unicode.Range16{Lo: 0xaf10, Hi: 0xaf10, Stride: 0x1}, + unicode.Range16{Lo: 0xaf2c, Hi: 0xaf2c, Stride: 0x1}, + unicode.Range16{Lo: 0xaf48, Hi: 0xaf48, Stride: 0x1}, + unicode.Range16{Lo: 0xaf64, Hi: 0xaf64, Stride: 0x1}, + unicode.Range16{Lo: 0xaf80, Hi: 0xaf80, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9c, Hi: 0xaf9c, Stride: 0x1}, + unicode.Range16{Lo: 0xafb8, Hi: 0xafb8, Stride: 0x1}, + unicode.Range16{Lo: 0xafd4, Hi: 0xafd4, Stride: 0x1}, + unicode.Range16{Lo: 0xaff0, Hi: 0xaff0, Stride: 0x1}, + unicode.Range16{Lo: 0xb00c, Hi: 0xb00c, Stride: 0x1}, + unicode.Range16{Lo: 0xb028, Hi: 0xb028, Stride: 0x1}, + unicode.Range16{Lo: 0xb044, Hi: 0xb044, Stride: 0x1}, + unicode.Range16{Lo: 0xb060, Hi: 0xb060, Stride: 0x1}, + unicode.Range16{Lo: 0xb07c, Hi: 0xb07c, Stride: 0x1}, + unicode.Range16{Lo: 0xb098, Hi: 0xb098, Stride: 0x1}, + unicode.Range16{Lo: 0xb0b4, Hi: 0xb0b4, Stride: 0x1}, + unicode.Range16{Lo: 0xb0d0, Hi: 0xb0d0, Stride: 0x1}, + unicode.Range16{Lo: 0xb0ec, Hi: 0xb0ec, Stride: 0x1}, + unicode.Range16{Lo: 0xb108, Hi: 0xb108, Stride: 0x1}, + unicode.Range16{Lo: 0xb124, Hi: 0xb124, Stride: 0x1}, + unicode.Range16{Lo: 0xb140, Hi: 0xb140, Stride: 0x1}, + unicode.Range16{Lo: 0xb15c, Hi: 0xb15c, Stride: 0x1}, + unicode.Range16{Lo: 0xb178, Hi: 0xb178, Stride: 0x1}, + unicode.Range16{Lo: 0xb194, Hi: 0xb194, Stride: 0x1}, + unicode.Range16{Lo: 0xb1b0, Hi: 0xb1b0, Stride: 0x1}, + unicode.Range16{Lo: 0xb1cc, Hi: 0xb1cc, Stride: 0x1}, + unicode.Range16{Lo: 0xb1e8, Hi: 0xb1e8, Stride: 0x1}, + unicode.Range16{Lo: 0xb204, Hi: 0xb204, Stride: 0x1}, + unicode.Range16{Lo: 0xb220, Hi: 0xb220, Stride: 0x1}, + unicode.Range16{Lo: 0xb23c, Hi: 0xb23c, Stride: 0x1}, + unicode.Range16{Lo: 0xb258, Hi: 0xb258, Stride: 0x1}, + unicode.Range16{Lo: 0xb274, Hi: 0xb274, Stride: 0x1}, + unicode.Range16{Lo: 0xb290, Hi: 0xb290, Stride: 0x1}, + unicode.Range16{Lo: 0xb2ac, Hi: 0xb2ac, Stride: 0x1}, + unicode.Range16{Lo: 0xb2c8, Hi: 0xb2c8, Stride: 0x1}, + unicode.Range16{Lo: 0xb2e4, Hi: 0xb2e4, Stride: 0x1}, + unicode.Range16{Lo: 0xb300, Hi: 0xb300, Stride: 0x1}, + unicode.Range16{Lo: 0xb31c, Hi: 0xb31c, Stride: 0x1}, + unicode.Range16{Lo: 0xb338, Hi: 0xb338, Stride: 0x1}, + unicode.Range16{Lo: 0xb354, Hi: 0xb354, Stride: 0x1}, + unicode.Range16{Lo: 0xb370, Hi: 0xb370, Stride: 0x1}, + unicode.Range16{Lo: 0xb38c, Hi: 0xb38c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3a8, Hi: 0xb3a8, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c4, Hi: 0xb3c4, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e0, Hi: 0xb3e0, Stride: 0x1}, + unicode.Range16{Lo: 0xb3fc, Hi: 0xb3fc, Stride: 0x1}, + unicode.Range16{Lo: 0xb418, Hi: 0xb418, Stride: 0x1}, + unicode.Range16{Lo: 0xb434, Hi: 0xb434, Stride: 0x1}, + unicode.Range16{Lo: 0xb450, Hi: 0xb450, Stride: 0x1}, + unicode.Range16{Lo: 0xb46c, Hi: 0xb46c, Stride: 0x1}, + unicode.Range16{Lo: 0xb488, Hi: 0xb488, Stride: 0x1}, + unicode.Range16{Lo: 0xb4a4, Hi: 0xb4a4, Stride: 0x1}, + unicode.Range16{Lo: 0xb4c0, Hi: 0xb4c0, Stride: 0x1}, + unicode.Range16{Lo: 0xb4dc, Hi: 0xb4dc, Stride: 0x1}, + unicode.Range16{Lo: 0xb4f8, Hi: 0xb4f8, Stride: 0x1}, + unicode.Range16{Lo: 0xb514, Hi: 0xb514, Stride: 0x1}, + unicode.Range16{Lo: 0xb530, Hi: 0xb530, Stride: 0x1}, + unicode.Range16{Lo: 0xb54c, Hi: 0xb54c, Stride: 0x1}, + unicode.Range16{Lo: 0xb568, Hi: 0xb568, Stride: 0x1}, + unicode.Range16{Lo: 0xb584, Hi: 0xb584, Stride: 0x1}, + unicode.Range16{Lo: 0xb5a0, Hi: 0xb5a0, Stride: 0x1}, + unicode.Range16{Lo: 0xb5bc, Hi: 0xb5bc, Stride: 0x1}, + unicode.Range16{Lo: 0xb5d8, Hi: 0xb5d8, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f4, Hi: 0xb5f4, Stride: 0x1}, + unicode.Range16{Lo: 0xb610, Hi: 0xb610, Stride: 0x1}, + unicode.Range16{Lo: 0xb62c, Hi: 0xb62c, Stride: 0x1}, + unicode.Range16{Lo: 0xb648, Hi: 0xb648, Stride: 0x1}, + unicode.Range16{Lo: 0xb664, Hi: 0xb664, Stride: 0x1}, + unicode.Range16{Lo: 0xb680, Hi: 0xb680, Stride: 0x1}, + unicode.Range16{Lo: 0xb69c, Hi: 0xb69c, Stride: 0x1}, + unicode.Range16{Lo: 0xb6b8, Hi: 0xb6b8, Stride: 0x1}, + unicode.Range16{Lo: 0xb6d4, Hi: 0xb6d4, Stride: 0x1}, + unicode.Range16{Lo: 0xb6f0, Hi: 0xb6f0, Stride: 0x1}, + unicode.Range16{Lo: 0xb70c, Hi: 0xb70c, Stride: 0x1}, + unicode.Range16{Lo: 0xb728, Hi: 0xb728, Stride: 0x1}, + unicode.Range16{Lo: 0xb744, Hi: 0xb744, Stride: 0x1}, + unicode.Range16{Lo: 0xb760, Hi: 0xb760, Stride: 0x1}, + unicode.Range16{Lo: 0xb77c, Hi: 0xb77c, Stride: 0x1}, + unicode.Range16{Lo: 0xb798, Hi: 0xb798, Stride: 0x1}, + unicode.Range16{Lo: 0xb7b4, Hi: 0xb7b4, Stride: 0x1}, + unicode.Range16{Lo: 0xb7d0, Hi: 0xb7d0, Stride: 0x1}, + unicode.Range16{Lo: 0xb7ec, Hi: 0xb7ec, Stride: 0x1}, + unicode.Range16{Lo: 0xb808, Hi: 0xb808, Stride: 0x1}, + unicode.Range16{Lo: 0xb824, Hi: 0xb824, Stride: 0x1}, + unicode.Range16{Lo: 0xb840, Hi: 0xb840, Stride: 0x1}, + unicode.Range16{Lo: 0xb85c, Hi: 0xb85c, Stride: 0x1}, + unicode.Range16{Lo: 0xb878, Hi: 0xb878, Stride: 0x1}, + unicode.Range16{Lo: 0xb894, Hi: 0xb894, Stride: 0x1}, + unicode.Range16{Lo: 0xb8b0, Hi: 0xb8b0, Stride: 0x1}, + unicode.Range16{Lo: 0xb8cc, Hi: 0xb8cc, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e8, Hi: 0xb8e8, Stride: 0x1}, + unicode.Range16{Lo: 0xb904, Hi: 0xb904, Stride: 0x1}, + unicode.Range16{Lo: 0xb920, Hi: 0xb920, Stride: 0x1}, + unicode.Range16{Lo: 0xb93c, Hi: 0xb93c, Stride: 0x1}, + unicode.Range16{Lo: 0xb958, Hi: 0xb958, Stride: 0x1}, + unicode.Range16{Lo: 0xb974, Hi: 0xb974, Stride: 0x1}, + unicode.Range16{Lo: 0xb990, Hi: 0xb990, Stride: 0x1}, + unicode.Range16{Lo: 0xb9ac, Hi: 0xb9ac, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c8, Hi: 0xb9c8, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e4, Hi: 0xb9e4, Stride: 0x1}, + unicode.Range16{Lo: 0xba00, Hi: 0xba00, Stride: 0x1}, + unicode.Range16{Lo: 0xba1c, Hi: 0xba1c, Stride: 0x1}, + unicode.Range16{Lo: 0xba38, Hi: 0xba38, Stride: 0x1}, + unicode.Range16{Lo: 0xba54, Hi: 0xba54, Stride: 0x1}, + unicode.Range16{Lo: 0xba70, Hi: 0xba70, Stride: 0x1}, + unicode.Range16{Lo: 0xba8c, Hi: 0xba8c, Stride: 0x1}, + unicode.Range16{Lo: 0xbaa8, Hi: 0xbaa8, Stride: 0x1}, + unicode.Range16{Lo: 0xbac4, Hi: 0xbac4, Stride: 0x1}, + unicode.Range16{Lo: 0xbae0, Hi: 0xbae0, Stride: 0x1}, + unicode.Range16{Lo: 0xbafc, Hi: 0xbafc, Stride: 0x1}, + unicode.Range16{Lo: 0xbb18, Hi: 0xbb18, Stride: 0x1}, + unicode.Range16{Lo: 0xbb34, Hi: 0xbb34, Stride: 0x1}, + unicode.Range16{Lo: 0xbb50, Hi: 0xbb50, Stride: 0x1}, + unicode.Range16{Lo: 0xbb6c, Hi: 0xbb6c, Stride: 0x1}, + unicode.Range16{Lo: 0xbb88, Hi: 0xbb88, Stride: 0x1}, + unicode.Range16{Lo: 0xbba4, Hi: 0xbba4, Stride: 0x1}, + unicode.Range16{Lo: 0xbbc0, Hi: 0xbbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbbdc, Hi: 0xbbdc, Stride: 0x1}, + unicode.Range16{Lo: 0xbbf8, Hi: 0xbbf8, Stride: 0x1}, + unicode.Range16{Lo: 0xbc14, Hi: 0xbc14, Stride: 0x1}, + unicode.Range16{Lo: 0xbc30, Hi: 0xbc30, Stride: 0x1}, + unicode.Range16{Lo: 0xbc4c, Hi: 0xbc4c, Stride: 0x1}, + unicode.Range16{Lo: 0xbc68, Hi: 0xbc68, Stride: 0x1}, + unicode.Range16{Lo: 0xbc84, Hi: 0xbc84, Stride: 0x1}, + unicode.Range16{Lo: 0xbca0, Hi: 0xbca0, Stride: 0x1}, + unicode.Range16{Lo: 0xbcbc, Hi: 0xbcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd8, Hi: 0xbcd8, Stride: 0x1}, + unicode.Range16{Lo: 0xbcf4, Hi: 0xbcf4, Stride: 0x1}, + unicode.Range16{Lo: 0xbd10, Hi: 0xbd10, Stride: 0x1}, + unicode.Range16{Lo: 0xbd2c, Hi: 0xbd2c, Stride: 0x1}, + unicode.Range16{Lo: 0xbd48, Hi: 0xbd48, Stride: 0x1}, + unicode.Range16{Lo: 0xbd64, Hi: 0xbd64, Stride: 0x1}, + unicode.Range16{Lo: 0xbd80, Hi: 0xbd80, Stride: 0x1}, + unicode.Range16{Lo: 0xbd9c, Hi: 0xbd9c, Stride: 0x1}, + unicode.Range16{Lo: 0xbdb8, Hi: 0xbdb8, Stride: 0x1}, + unicode.Range16{Lo: 0xbdd4, Hi: 0xbdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xbdf0, Hi: 0xbdf0, Stride: 0x1}, + unicode.Range16{Lo: 0xbe0c, Hi: 0xbe0c, Stride: 0x1}, + unicode.Range16{Lo: 0xbe28, Hi: 0xbe28, Stride: 0x1}, + unicode.Range16{Lo: 0xbe44, Hi: 0xbe44, Stride: 0x1}, + unicode.Range16{Lo: 0xbe60, Hi: 0xbe60, Stride: 0x1}, + unicode.Range16{Lo: 0xbe7c, Hi: 0xbe7c, Stride: 0x1}, + unicode.Range16{Lo: 0xbe98, Hi: 0xbe98, Stride: 0x1}, + unicode.Range16{Lo: 0xbeb4, Hi: 0xbeb4, Stride: 0x1}, + unicode.Range16{Lo: 0xbed0, Hi: 0xbed0, Stride: 0x1}, + unicode.Range16{Lo: 0xbeec, Hi: 0xbeec, Stride: 0x1}, + unicode.Range16{Lo: 0xbf08, Hi: 0xbf08, Stride: 0x1}, + unicode.Range16{Lo: 0xbf24, Hi: 0xbf24, Stride: 0x1}, + unicode.Range16{Lo: 0xbf40, Hi: 0xbf40, Stride: 0x1}, + unicode.Range16{Lo: 0xbf5c, Hi: 0xbf5c, Stride: 0x1}, + unicode.Range16{Lo: 0xbf78, Hi: 0xbf78, Stride: 0x1}, + unicode.Range16{Lo: 0xbf94, Hi: 0xbf94, Stride: 0x1}, + unicode.Range16{Lo: 0xbfb0, Hi: 0xbfb0, Stride: 0x1}, + unicode.Range16{Lo: 0xbfcc, Hi: 0xbfcc, Stride: 0x1}, + unicode.Range16{Lo: 0xbfe8, Hi: 0xbfe8, Stride: 0x1}, + unicode.Range16{Lo: 0xc004, Hi: 0xc004, Stride: 0x1}, + unicode.Range16{Lo: 0xc020, Hi: 0xc020, Stride: 0x1}, + unicode.Range16{Lo: 0xc03c, Hi: 0xc03c, Stride: 0x1}, + unicode.Range16{Lo: 0xc058, Hi: 0xc058, Stride: 0x1}, + unicode.Range16{Lo: 0xc074, Hi: 0xc074, Stride: 0x1}, + unicode.Range16{Lo: 0xc090, Hi: 0xc090, Stride: 0x1}, + unicode.Range16{Lo: 0xc0ac, Hi: 0xc0ac, Stride: 0x1}, + unicode.Range16{Lo: 0xc0c8, Hi: 0xc0c8, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e4, Hi: 0xc0e4, Stride: 0x1}, + unicode.Range16{Lo: 0xc100, Hi: 0xc100, Stride: 0x1}, + unicode.Range16{Lo: 0xc11c, Hi: 0xc11c, Stride: 0x1}, + unicode.Range16{Lo: 0xc138, Hi: 0xc138, Stride: 0x1}, + unicode.Range16{Lo: 0xc154, Hi: 0xc154, Stride: 0x1}, + unicode.Range16{Lo: 0xc170, Hi: 0xc170, Stride: 0x1}, + unicode.Range16{Lo: 0xc18c, Hi: 0xc18c, Stride: 0x1}, + unicode.Range16{Lo: 0xc1a8, Hi: 0xc1a8, Stride: 0x1}, + unicode.Range16{Lo: 0xc1c4, Hi: 0xc1c4, Stride: 0x1}, + unicode.Range16{Lo: 0xc1e0, Hi: 0xc1e0, Stride: 0x1}, + unicode.Range16{Lo: 0xc1fc, Hi: 0xc1fc, Stride: 0x1}, + unicode.Range16{Lo: 0xc218, Hi: 0xc218, Stride: 0x1}, + unicode.Range16{Lo: 0xc234, Hi: 0xc234, Stride: 0x1}, + unicode.Range16{Lo: 0xc250, Hi: 0xc250, Stride: 0x1}, + unicode.Range16{Lo: 0xc26c, Hi: 0xc26c, Stride: 0x1}, + unicode.Range16{Lo: 0xc288, Hi: 0xc288, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a4, Hi: 0xc2a4, Stride: 0x1}, + unicode.Range16{Lo: 0xc2c0, Hi: 0xc2c0, Stride: 0x1}, + unicode.Range16{Lo: 0xc2dc, Hi: 0xc2dc, Stride: 0x1}, + unicode.Range16{Lo: 0xc2f8, Hi: 0xc2f8, Stride: 0x1}, + unicode.Range16{Lo: 0xc314, Hi: 0xc314, Stride: 0x1}, + unicode.Range16{Lo: 0xc330, Hi: 0xc330, Stride: 0x1}, + unicode.Range16{Lo: 0xc34c, Hi: 0xc34c, Stride: 0x1}, + unicode.Range16{Lo: 0xc368, Hi: 0xc368, Stride: 0x1}, + unicode.Range16{Lo: 0xc384, Hi: 0xc384, Stride: 0x1}, + unicode.Range16{Lo: 0xc3a0, Hi: 0xc3a0, Stride: 0x1}, + unicode.Range16{Lo: 0xc3bc, Hi: 0xc3bc, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d8, Hi: 0xc3d8, Stride: 0x1}, + unicode.Range16{Lo: 0xc3f4, Hi: 0xc3f4, Stride: 0x1}, + unicode.Range16{Lo: 0xc410, Hi: 0xc410, Stride: 0x1}, + unicode.Range16{Lo: 0xc42c, Hi: 0xc42c, Stride: 0x1}, + unicode.Range16{Lo: 0xc448, Hi: 0xc448, Stride: 0x1}, + unicode.Range16{Lo: 0xc464, Hi: 0xc464, Stride: 0x1}, + unicode.Range16{Lo: 0xc480, Hi: 0xc480, Stride: 0x1}, + unicode.Range16{Lo: 0xc49c, Hi: 0xc49c, Stride: 0x1}, + unicode.Range16{Lo: 0xc4b8, Hi: 0xc4b8, Stride: 0x1}, + unicode.Range16{Lo: 0xc4d4, Hi: 0xc4d4, Stride: 0x1}, + unicode.Range16{Lo: 0xc4f0, Hi: 0xc4f0, Stride: 0x1}, + unicode.Range16{Lo: 0xc50c, Hi: 0xc50c, Stride: 0x1}, + unicode.Range16{Lo: 0xc528, Hi: 0xc528, Stride: 0x1}, + unicode.Range16{Lo: 0xc544, Hi: 0xc544, Stride: 0x1}, + unicode.Range16{Lo: 0xc560, Hi: 0xc560, Stride: 0x1}, + unicode.Range16{Lo: 0xc57c, Hi: 0xc57c, Stride: 0x1}, + unicode.Range16{Lo: 0xc598, Hi: 0xc598, Stride: 0x1}, + unicode.Range16{Lo: 0xc5b4, Hi: 0xc5b4, Stride: 0x1}, + unicode.Range16{Lo: 0xc5d0, Hi: 0xc5d0, Stride: 0x1}, + unicode.Range16{Lo: 0xc5ec, Hi: 0xc5ec, Stride: 0x1}, + unicode.Range16{Lo: 0xc608, Hi: 0xc608, Stride: 0x1}, + unicode.Range16{Lo: 0xc624, Hi: 0xc624, Stride: 0x1}, + unicode.Range16{Lo: 0xc640, Hi: 0xc640, Stride: 0x1}, + unicode.Range16{Lo: 0xc65c, Hi: 0xc65c, Stride: 0x1}, + unicode.Range16{Lo: 0xc678, Hi: 0xc678, Stride: 0x1}, + unicode.Range16{Lo: 0xc694, Hi: 0xc694, Stride: 0x1}, + unicode.Range16{Lo: 0xc6b0, Hi: 0xc6b0, Stride: 0x1}, + unicode.Range16{Lo: 0xc6cc, Hi: 0xc6cc, Stride: 0x1}, + unicode.Range16{Lo: 0xc6e8, Hi: 0xc6e8, Stride: 0x1}, + unicode.Range16{Lo: 0xc704, Hi: 0xc704, Stride: 0x1}, + unicode.Range16{Lo: 0xc720, Hi: 0xc720, Stride: 0x1}, + unicode.Range16{Lo: 0xc73c, Hi: 0xc73c, Stride: 0x1}, + unicode.Range16{Lo: 0xc758, Hi: 0xc758, Stride: 0x1}, + unicode.Range16{Lo: 0xc774, Hi: 0xc774, Stride: 0x1}, + unicode.Range16{Lo: 0xc790, Hi: 0xc790, Stride: 0x1}, + unicode.Range16{Lo: 0xc7ac, Hi: 0xc7ac, Stride: 0x1}, + unicode.Range16{Lo: 0xc7c8, Hi: 0xc7c8, Stride: 0x1}, + unicode.Range16{Lo: 0xc7e4, Hi: 0xc7e4, Stride: 0x1}, + unicode.Range16{Lo: 0xc800, Hi: 0xc800, Stride: 0x1}, + unicode.Range16{Lo: 0xc81c, Hi: 0xc81c, Stride: 0x1}, + unicode.Range16{Lo: 0xc838, Hi: 0xc838, Stride: 0x1}, + unicode.Range16{Lo: 0xc854, Hi: 0xc854, Stride: 0x1}, + unicode.Range16{Lo: 0xc870, Hi: 0xc870, Stride: 0x1}, + unicode.Range16{Lo: 0xc88c, Hi: 0xc88c, Stride: 0x1}, + unicode.Range16{Lo: 0xc8a8, Hi: 0xc8a8, Stride: 0x1}, + unicode.Range16{Lo: 0xc8c4, Hi: 0xc8c4, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e0, Hi: 0xc8e0, Stride: 0x1}, + unicode.Range16{Lo: 0xc8fc, Hi: 0xc8fc, Stride: 0x1}, + unicode.Range16{Lo: 0xc918, Hi: 0xc918, Stride: 0x1}, + unicode.Range16{Lo: 0xc934, Hi: 0xc934, Stride: 0x1}, + unicode.Range16{Lo: 0xc950, Hi: 0xc950, Stride: 0x1}, + unicode.Range16{Lo: 0xc96c, Hi: 0xc96c, Stride: 0x1}, + unicode.Range16{Lo: 0xc988, Hi: 0xc988, Stride: 0x1}, + unicode.Range16{Lo: 0xc9a4, Hi: 0xc9a4, Stride: 0x1}, + unicode.Range16{Lo: 0xc9c0, Hi: 0xc9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xc9dc, Hi: 0xc9dc, Stride: 0x1}, + unicode.Range16{Lo: 0xc9f8, Hi: 0xc9f8, Stride: 0x1}, + unicode.Range16{Lo: 0xca14, Hi: 0xca14, Stride: 0x1}, + unicode.Range16{Lo: 0xca30, Hi: 0xca30, Stride: 0x1}, + unicode.Range16{Lo: 0xca4c, Hi: 0xca4c, Stride: 0x1}, + unicode.Range16{Lo: 0xca68, Hi: 0xca68, Stride: 0x1}, + unicode.Range16{Lo: 0xca84, Hi: 0xca84, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa0, Hi: 0xcaa0, Stride: 0x1}, + unicode.Range16{Lo: 0xcabc, Hi: 0xcabc, Stride: 0x1}, + unicode.Range16{Lo: 0xcad8, Hi: 0xcad8, Stride: 0x1}, + unicode.Range16{Lo: 0xcaf4, Hi: 0xcaf4, Stride: 0x1}, + unicode.Range16{Lo: 0xcb10, Hi: 0xcb10, Stride: 0x1}, + unicode.Range16{Lo: 0xcb2c, Hi: 0xcb2c, Stride: 0x1}, + unicode.Range16{Lo: 0xcb48, Hi: 0xcb48, Stride: 0x1}, + unicode.Range16{Lo: 0xcb64, Hi: 0xcb64, Stride: 0x1}, + unicode.Range16{Lo: 0xcb80, Hi: 0xcb80, Stride: 0x1}, + unicode.Range16{Lo: 0xcb9c, Hi: 0xcb9c, Stride: 0x1}, + unicode.Range16{Lo: 0xcbb8, Hi: 0xcbb8, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd4, Hi: 0xcbd4, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf0, Hi: 0xcbf0, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0c, Hi: 0xcc0c, Stride: 0x1}, + unicode.Range16{Lo: 0xcc28, Hi: 0xcc28, Stride: 0x1}, + unicode.Range16{Lo: 0xcc44, Hi: 0xcc44, Stride: 0x1}, + unicode.Range16{Lo: 0xcc60, Hi: 0xcc60, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7c, Hi: 0xcc7c, Stride: 0x1}, + unicode.Range16{Lo: 0xcc98, Hi: 0xcc98, Stride: 0x1}, + unicode.Range16{Lo: 0xccb4, Hi: 0xccb4, Stride: 0x1}, + unicode.Range16{Lo: 0xccd0, Hi: 0xccd0, Stride: 0x1}, + unicode.Range16{Lo: 0xccec, Hi: 0xccec, Stride: 0x1}, + unicode.Range16{Lo: 0xcd08, Hi: 0xcd08, Stride: 0x1}, + unicode.Range16{Lo: 0xcd24, Hi: 0xcd24, Stride: 0x1}, + unicode.Range16{Lo: 0xcd40, Hi: 0xcd40, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5c, Hi: 0xcd5c, Stride: 0x1}, + unicode.Range16{Lo: 0xcd78, Hi: 0xcd78, Stride: 0x1}, + unicode.Range16{Lo: 0xcd94, Hi: 0xcd94, Stride: 0x1}, + unicode.Range16{Lo: 0xcdb0, Hi: 0xcdb0, Stride: 0x1}, + unicode.Range16{Lo: 0xcdcc, Hi: 0xcdcc, Stride: 0x1}, + unicode.Range16{Lo: 0xcde8, Hi: 0xcde8, Stride: 0x1}, + unicode.Range16{Lo: 0xce04, Hi: 0xce04, Stride: 0x1}, + unicode.Range16{Lo: 0xce20, Hi: 0xce20, Stride: 0x1}, + unicode.Range16{Lo: 0xce3c, Hi: 0xce3c, Stride: 0x1}, + unicode.Range16{Lo: 0xce58, Hi: 0xce58, Stride: 0x1}, + unicode.Range16{Lo: 0xce74, Hi: 0xce74, Stride: 0x1}, + unicode.Range16{Lo: 0xce90, Hi: 0xce90, Stride: 0x1}, + unicode.Range16{Lo: 0xceac, Hi: 0xceac, Stride: 0x1}, + unicode.Range16{Lo: 0xcec8, Hi: 0xcec8, Stride: 0x1}, + unicode.Range16{Lo: 0xcee4, Hi: 0xcee4, Stride: 0x1}, + unicode.Range16{Lo: 0xcf00, Hi: 0xcf00, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1c, Hi: 0xcf1c, Stride: 0x1}, + unicode.Range16{Lo: 0xcf38, Hi: 0xcf38, Stride: 0x1}, + unicode.Range16{Lo: 0xcf54, Hi: 0xcf54, Stride: 0x1}, + unicode.Range16{Lo: 0xcf70, Hi: 0xcf70, Stride: 0x1}, + unicode.Range16{Lo: 0xcf8c, Hi: 0xcf8c, Stride: 0x1}, + unicode.Range16{Lo: 0xcfa8, Hi: 0xcfa8, Stride: 0x1}, + unicode.Range16{Lo: 0xcfc4, Hi: 0xcfc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcfe0, Hi: 0xcfe0, Stride: 0x1}, + unicode.Range16{Lo: 0xcffc, Hi: 0xcffc, Stride: 0x1}, + unicode.Range16{Lo: 0xd018, Hi: 0xd018, Stride: 0x1}, + unicode.Range16{Lo: 0xd034, Hi: 0xd034, Stride: 0x1}, + unicode.Range16{Lo: 0xd050, Hi: 0xd050, Stride: 0x1}, + unicode.Range16{Lo: 0xd06c, Hi: 0xd06c, Stride: 0x1}, + unicode.Range16{Lo: 0xd088, Hi: 0xd088, Stride: 0x1}, + unicode.Range16{Lo: 0xd0a4, Hi: 0xd0a4, Stride: 0x1}, + unicode.Range16{Lo: 0xd0c0, Hi: 0xd0c0, Stride: 0x1}, + unicode.Range16{Lo: 0xd0dc, Hi: 0xd0dc, Stride: 0x1}, + unicode.Range16{Lo: 0xd0f8, Hi: 0xd0f8, Stride: 0x1}, + unicode.Range16{Lo: 0xd114, Hi: 0xd114, Stride: 0x1}, + unicode.Range16{Lo: 0xd130, Hi: 0xd130, Stride: 0x1}, + unicode.Range16{Lo: 0xd14c, Hi: 0xd14c, Stride: 0x1}, + unicode.Range16{Lo: 0xd168, Hi: 0xd168, Stride: 0x1}, + unicode.Range16{Lo: 0xd184, Hi: 0xd184, Stride: 0x1}, + unicode.Range16{Lo: 0xd1a0, Hi: 0xd1a0, Stride: 0x1}, + unicode.Range16{Lo: 0xd1bc, Hi: 0xd1bc, Stride: 0x1}, + unicode.Range16{Lo: 0xd1d8, Hi: 0xd1d8, Stride: 0x1}, + unicode.Range16{Lo: 0xd1f4, Hi: 0xd1f4, Stride: 0x1}, + unicode.Range16{Lo: 0xd210, Hi: 0xd210, Stride: 0x1}, + unicode.Range16{Lo: 0xd22c, Hi: 0xd22c, Stride: 0x1}, + unicode.Range16{Lo: 0xd248, Hi: 0xd248, Stride: 0x1}, + unicode.Range16{Lo: 0xd264, Hi: 0xd264, Stride: 0x1}, + unicode.Range16{Lo: 0xd280, Hi: 0xd280, Stride: 0x1}, + unicode.Range16{Lo: 0xd29c, Hi: 0xd29c, Stride: 0x1}, + unicode.Range16{Lo: 0xd2b8, Hi: 0xd2b8, Stride: 0x1}, + unicode.Range16{Lo: 0xd2d4, Hi: 0xd2d4, Stride: 0x1}, + unicode.Range16{Lo: 0xd2f0, Hi: 0xd2f0, Stride: 0x1}, + unicode.Range16{Lo: 0xd30c, Hi: 0xd30c, Stride: 0x1}, + unicode.Range16{Lo: 0xd328, Hi: 0xd328, Stride: 0x1}, + unicode.Range16{Lo: 0xd344, Hi: 0xd344, Stride: 0x1}, + unicode.Range16{Lo: 0xd360, Hi: 0xd360, Stride: 0x1}, + unicode.Range16{Lo: 0xd37c, Hi: 0xd37c, Stride: 0x1}, + unicode.Range16{Lo: 0xd398, Hi: 0xd398, Stride: 0x1}, + unicode.Range16{Lo: 0xd3b4, Hi: 0xd3b4, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d0, Hi: 0xd3d0, Stride: 0x1}, + unicode.Range16{Lo: 0xd3ec, Hi: 0xd3ec, Stride: 0x1}, + unicode.Range16{Lo: 0xd408, Hi: 0xd408, Stride: 0x1}, + unicode.Range16{Lo: 0xd424, Hi: 0xd424, Stride: 0x1}, + unicode.Range16{Lo: 0xd440, Hi: 0xd440, Stride: 0x1}, + unicode.Range16{Lo: 0xd45c, Hi: 0xd45c, Stride: 0x1}, + unicode.Range16{Lo: 0xd478, Hi: 0xd478, Stride: 0x1}, + unicode.Range16{Lo: 0xd494, Hi: 0xd494, Stride: 0x1}, + unicode.Range16{Lo: 0xd4b0, Hi: 0xd4b0, Stride: 0x1}, + unicode.Range16{Lo: 0xd4cc, Hi: 0xd4cc, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e8, Hi: 0xd4e8, Stride: 0x1}, + unicode.Range16{Lo: 0xd504, Hi: 0xd504, Stride: 0x1}, + unicode.Range16{Lo: 0xd520, Hi: 0xd520, Stride: 0x1}, + unicode.Range16{Lo: 0xd53c, Hi: 0xd53c, Stride: 0x1}, + unicode.Range16{Lo: 0xd558, Hi: 0xd558, Stride: 0x1}, + unicode.Range16{Lo: 0xd574, Hi: 0xd574, Stride: 0x1}, + unicode.Range16{Lo: 0xd590, Hi: 0xd590, Stride: 0x1}, + unicode.Range16{Lo: 0xd5ac, Hi: 0xd5ac, Stride: 0x1}, + unicode.Range16{Lo: 0xd5c8, Hi: 0xd5c8, Stride: 0x1}, + unicode.Range16{Lo: 0xd5e4, Hi: 0xd5e4, Stride: 0x1}, + unicode.Range16{Lo: 0xd600, Hi: 0xd600, Stride: 0x1}, + unicode.Range16{Lo: 0xd61c, Hi: 0xd61c, Stride: 0x1}, + unicode.Range16{Lo: 0xd638, Hi: 0xd638, Stride: 0x1}, + unicode.Range16{Lo: 0xd654, Hi: 0xd654, Stride: 0x1}, + unicode.Range16{Lo: 0xd670, Hi: 0xd670, Stride: 0x1}, + unicode.Range16{Lo: 0xd68c, Hi: 0xd68c, Stride: 0x1}, + unicode.Range16{Lo: 0xd6a8, Hi: 0xd6a8, Stride: 0x1}, + unicode.Range16{Lo: 0xd6c4, Hi: 0xd6c4, Stride: 0x1}, + unicode.Range16{Lo: 0xd6e0, Hi: 0xd6e0, Stride: 0x1}, + unicode.Range16{Lo: 0xd6fc, Hi: 0xd6fc, Stride: 0x1}, + unicode.Range16{Lo: 0xd718, Hi: 0xd718, Stride: 0x1}, + unicode.Range16{Lo: 0xd734, Hi: 0xd734, Stride: 0x1}, + unicode.Range16{Lo: 0xd750, Hi: 0xd750, Stride: 0x1}, + unicode.Range16{Lo: 0xd76c, Hi: 0xd76c, Stride: 0x1}, + unicode.Range16{Lo: 0xd788, Hi: 0xd788, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeLVT = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xac01, Hi: 0xac1b, Stride: 0x1}, + unicode.Range16{Lo: 0xac1d, Hi: 0xac37, Stride: 0x1}, + unicode.Range16{Lo: 0xac39, Hi: 0xac53, Stride: 0x1}, + unicode.Range16{Lo: 0xac55, Hi: 0xac6f, Stride: 0x1}, + unicode.Range16{Lo: 0xac71, Hi: 0xac8b, Stride: 0x1}, + unicode.Range16{Lo: 0xac8d, Hi: 0xaca7, Stride: 0x1}, + unicode.Range16{Lo: 0xaca9, Hi: 0xacc3, Stride: 0x1}, + unicode.Range16{Lo: 0xacc5, Hi: 0xacdf, Stride: 0x1}, + unicode.Range16{Lo: 0xace1, Hi: 0xacfb, Stride: 0x1}, + unicode.Range16{Lo: 0xacfd, Hi: 0xad17, Stride: 0x1}, + unicode.Range16{Lo: 0xad19, Hi: 0xad33, Stride: 0x1}, + unicode.Range16{Lo: 0xad35, Hi: 0xad4f, Stride: 0x1}, + unicode.Range16{Lo: 0xad51, Hi: 0xad6b, Stride: 0x1}, + unicode.Range16{Lo: 0xad6d, Hi: 0xad87, Stride: 0x1}, + unicode.Range16{Lo: 0xad89, Hi: 0xada3, Stride: 0x1}, + unicode.Range16{Lo: 0xada5, Hi: 0xadbf, Stride: 0x1}, + unicode.Range16{Lo: 0xadc1, Hi: 0xaddb, Stride: 0x1}, + unicode.Range16{Lo: 0xaddd, Hi: 0xadf7, Stride: 0x1}, + unicode.Range16{Lo: 0xadf9, Hi: 0xae13, Stride: 0x1}, + unicode.Range16{Lo: 0xae15, Hi: 0xae2f, Stride: 0x1}, + unicode.Range16{Lo: 0xae31, Hi: 0xae4b, Stride: 0x1}, + unicode.Range16{Lo: 0xae4d, Hi: 0xae67, Stride: 0x1}, + unicode.Range16{Lo: 0xae69, Hi: 0xae83, Stride: 0x1}, + unicode.Range16{Lo: 0xae85, Hi: 0xae9f, Stride: 0x1}, + unicode.Range16{Lo: 0xaea1, Hi: 0xaebb, Stride: 0x1}, + unicode.Range16{Lo: 0xaebd, Hi: 0xaed7, Stride: 0x1}, + unicode.Range16{Lo: 0xaed9, Hi: 0xaef3, Stride: 0x1}, + unicode.Range16{Lo: 0xaef5, Hi: 0xaf0f, Stride: 0x1}, + unicode.Range16{Lo: 0xaf11, Hi: 0xaf2b, Stride: 0x1}, + unicode.Range16{Lo: 0xaf2d, Hi: 0xaf47, Stride: 0x1}, + unicode.Range16{Lo: 0xaf49, Hi: 0xaf63, Stride: 0x1}, + unicode.Range16{Lo: 0xaf65, Hi: 0xaf7f, Stride: 0x1}, + unicode.Range16{Lo: 0xaf81, Hi: 0xaf9b, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9d, Hi: 0xafb7, Stride: 0x1}, + unicode.Range16{Lo: 0xafb9, Hi: 0xafd3, Stride: 0x1}, + unicode.Range16{Lo: 0xafd5, Hi: 0xafef, Stride: 0x1}, + unicode.Range16{Lo: 0xaff1, Hi: 0xb00b, Stride: 0x1}, + unicode.Range16{Lo: 0xb00d, Hi: 0xb027, Stride: 0x1}, + unicode.Range16{Lo: 0xb029, Hi: 0xb043, Stride: 0x1}, + unicode.Range16{Lo: 0xb045, Hi: 0xb05f, Stride: 0x1}, + unicode.Range16{Lo: 0xb061, Hi: 0xb07b, Stride: 0x1}, + unicode.Range16{Lo: 0xb07d, Hi: 0xb097, Stride: 0x1}, + unicode.Range16{Lo: 0xb099, Hi: 0xb0b3, Stride: 0x1}, + unicode.Range16{Lo: 0xb0b5, Hi: 0xb0cf, Stride: 0x1}, + unicode.Range16{Lo: 0xb0d1, Hi: 0xb0eb, Stride: 0x1}, + unicode.Range16{Lo: 0xb0ed, Hi: 0xb107, Stride: 0x1}, + unicode.Range16{Lo: 0xb109, Hi: 0xb123, Stride: 0x1}, + unicode.Range16{Lo: 0xb125, Hi: 0xb13f, Stride: 0x1}, + unicode.Range16{Lo: 0xb141, Hi: 0xb15b, Stride: 0x1}, + unicode.Range16{Lo: 0xb15d, Hi: 0xb177, Stride: 0x1}, + unicode.Range16{Lo: 0xb179, Hi: 0xb193, Stride: 0x1}, + unicode.Range16{Lo: 0xb195, Hi: 0xb1af, Stride: 0x1}, + unicode.Range16{Lo: 0xb1b1, Hi: 0xb1cb, Stride: 0x1}, + unicode.Range16{Lo: 0xb1cd, Hi: 0xb1e7, Stride: 0x1}, + unicode.Range16{Lo: 0xb1e9, Hi: 0xb203, Stride: 0x1}, + unicode.Range16{Lo: 0xb205, Hi: 0xb21f, Stride: 0x1}, + unicode.Range16{Lo: 0xb221, Hi: 0xb23b, Stride: 0x1}, + unicode.Range16{Lo: 0xb23d, Hi: 0xb257, Stride: 0x1}, + unicode.Range16{Lo: 0xb259, Hi: 0xb273, Stride: 0x1}, + unicode.Range16{Lo: 0xb275, Hi: 0xb28f, Stride: 0x1}, + unicode.Range16{Lo: 0xb291, Hi: 0xb2ab, Stride: 0x1}, + unicode.Range16{Lo: 0xb2ad, Hi: 0xb2c7, Stride: 0x1}, + unicode.Range16{Lo: 0xb2c9, Hi: 0xb2e3, Stride: 0x1}, + unicode.Range16{Lo: 0xb2e5, Hi: 0xb2ff, Stride: 0x1}, + unicode.Range16{Lo: 0xb301, Hi: 0xb31b, Stride: 0x1}, + unicode.Range16{Lo: 0xb31d, Hi: 0xb337, Stride: 0x1}, + unicode.Range16{Lo: 0xb339, Hi: 0xb353, Stride: 0x1}, + unicode.Range16{Lo: 0xb355, Hi: 0xb36f, Stride: 0x1}, + unicode.Range16{Lo: 0xb371, Hi: 0xb38b, Stride: 0x1}, + unicode.Range16{Lo: 0xb38d, Hi: 0xb3a7, Stride: 0x1}, + unicode.Range16{Lo: 0xb3a9, Hi: 0xb3c3, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c5, Hi: 0xb3df, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e1, Hi: 0xb3fb, Stride: 0x1}, + unicode.Range16{Lo: 0xb3fd, Hi: 0xb417, Stride: 0x1}, + unicode.Range16{Lo: 0xb419, Hi: 0xb433, Stride: 0x1}, + unicode.Range16{Lo: 0xb435, Hi: 0xb44f, Stride: 0x1}, + unicode.Range16{Lo: 0xb451, Hi: 0xb46b, Stride: 0x1}, + unicode.Range16{Lo: 0xb46d, Hi: 0xb487, Stride: 0x1}, + unicode.Range16{Lo: 0xb489, Hi: 0xb4a3, Stride: 0x1}, + unicode.Range16{Lo: 0xb4a5, Hi: 0xb4bf, Stride: 0x1}, + unicode.Range16{Lo: 0xb4c1, Hi: 0xb4db, Stride: 0x1}, + unicode.Range16{Lo: 0xb4dd, Hi: 0xb4f7, Stride: 0x1}, + unicode.Range16{Lo: 0xb4f9, Hi: 0xb513, Stride: 0x1}, + unicode.Range16{Lo: 0xb515, Hi: 0xb52f, Stride: 0x1}, + unicode.Range16{Lo: 0xb531, Hi: 0xb54b, Stride: 0x1}, + unicode.Range16{Lo: 0xb54d, Hi: 0xb567, Stride: 0x1}, + unicode.Range16{Lo: 0xb569, Hi: 0xb583, Stride: 0x1}, + unicode.Range16{Lo: 0xb585, Hi: 0xb59f, Stride: 0x1}, + unicode.Range16{Lo: 0xb5a1, Hi: 0xb5bb, Stride: 0x1}, + unicode.Range16{Lo: 0xb5bd, Hi: 0xb5d7, Stride: 0x1}, + unicode.Range16{Lo: 0xb5d9, Hi: 0xb5f3, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f5, Hi: 0xb60f, Stride: 0x1}, + unicode.Range16{Lo: 0xb611, Hi: 0xb62b, Stride: 0x1}, + unicode.Range16{Lo: 0xb62d, Hi: 0xb647, Stride: 0x1}, + unicode.Range16{Lo: 0xb649, Hi: 0xb663, Stride: 0x1}, + unicode.Range16{Lo: 0xb665, Hi: 0xb67f, Stride: 0x1}, + unicode.Range16{Lo: 0xb681, Hi: 0xb69b, Stride: 0x1}, + unicode.Range16{Lo: 0xb69d, Hi: 0xb6b7, Stride: 0x1}, + unicode.Range16{Lo: 0xb6b9, Hi: 0xb6d3, Stride: 0x1}, + unicode.Range16{Lo: 0xb6d5, Hi: 0xb6ef, Stride: 0x1}, + unicode.Range16{Lo: 0xb6f1, Hi: 0xb70b, Stride: 0x1}, + unicode.Range16{Lo: 0xb70d, Hi: 0xb727, Stride: 0x1}, + unicode.Range16{Lo: 0xb729, Hi: 0xb743, Stride: 0x1}, + unicode.Range16{Lo: 0xb745, Hi: 0xb75f, Stride: 0x1}, + unicode.Range16{Lo: 0xb761, Hi: 0xb77b, Stride: 0x1}, + unicode.Range16{Lo: 0xb77d, Hi: 0xb797, Stride: 0x1}, + unicode.Range16{Lo: 0xb799, Hi: 0xb7b3, Stride: 0x1}, + unicode.Range16{Lo: 0xb7b5, Hi: 0xb7cf, Stride: 0x1}, + unicode.Range16{Lo: 0xb7d1, Hi: 0xb7eb, Stride: 0x1}, + unicode.Range16{Lo: 0xb7ed, Hi: 0xb807, Stride: 0x1}, + unicode.Range16{Lo: 0xb809, Hi: 0xb823, Stride: 0x1}, + unicode.Range16{Lo: 0xb825, Hi: 0xb83f, Stride: 0x1}, + unicode.Range16{Lo: 0xb841, Hi: 0xb85b, Stride: 0x1}, + unicode.Range16{Lo: 0xb85d, Hi: 0xb877, Stride: 0x1}, + unicode.Range16{Lo: 0xb879, Hi: 0xb893, Stride: 0x1}, + unicode.Range16{Lo: 0xb895, Hi: 0xb8af, Stride: 0x1}, + unicode.Range16{Lo: 0xb8b1, Hi: 0xb8cb, Stride: 0x1}, + unicode.Range16{Lo: 0xb8cd, Hi: 0xb8e7, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e9, Hi: 0xb903, Stride: 0x1}, + unicode.Range16{Lo: 0xb905, Hi: 0xb91f, Stride: 0x1}, + unicode.Range16{Lo: 0xb921, Hi: 0xb93b, Stride: 0x1}, + unicode.Range16{Lo: 0xb93d, Hi: 0xb957, Stride: 0x1}, + unicode.Range16{Lo: 0xb959, Hi: 0xb973, Stride: 0x1}, + unicode.Range16{Lo: 0xb975, Hi: 0xb98f, Stride: 0x1}, + unicode.Range16{Lo: 0xb991, Hi: 0xb9ab, Stride: 0x1}, + unicode.Range16{Lo: 0xb9ad, Hi: 0xb9c7, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c9, Hi: 0xb9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e5, Hi: 0xb9ff, Stride: 0x1}, + unicode.Range16{Lo: 0xba01, Hi: 0xba1b, Stride: 0x1}, + unicode.Range16{Lo: 0xba1d, Hi: 0xba37, Stride: 0x1}, + unicode.Range16{Lo: 0xba39, Hi: 0xba53, Stride: 0x1}, + unicode.Range16{Lo: 0xba55, Hi: 0xba6f, Stride: 0x1}, + unicode.Range16{Lo: 0xba71, Hi: 0xba8b, Stride: 0x1}, + unicode.Range16{Lo: 0xba8d, Hi: 0xbaa7, Stride: 0x1}, + unicode.Range16{Lo: 0xbaa9, Hi: 0xbac3, Stride: 0x1}, + unicode.Range16{Lo: 0xbac5, Hi: 0xbadf, Stride: 0x1}, + unicode.Range16{Lo: 0xbae1, Hi: 0xbafb, Stride: 0x1}, + unicode.Range16{Lo: 0xbafd, Hi: 0xbb17, Stride: 0x1}, + unicode.Range16{Lo: 0xbb19, Hi: 0xbb33, Stride: 0x1}, + unicode.Range16{Lo: 0xbb35, Hi: 0xbb4f, Stride: 0x1}, + unicode.Range16{Lo: 0xbb51, Hi: 0xbb6b, Stride: 0x1}, + unicode.Range16{Lo: 0xbb6d, Hi: 0xbb87, Stride: 0x1}, + unicode.Range16{Lo: 0xbb89, Hi: 0xbba3, Stride: 0x1}, + unicode.Range16{Lo: 0xbba5, Hi: 0xbbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbbc1, Hi: 0xbbdb, Stride: 0x1}, + unicode.Range16{Lo: 0xbbdd, Hi: 0xbbf7, Stride: 0x1}, + unicode.Range16{Lo: 0xbbf9, Hi: 0xbc13, Stride: 0x1}, + unicode.Range16{Lo: 0xbc15, Hi: 0xbc2f, Stride: 0x1}, + unicode.Range16{Lo: 0xbc31, Hi: 0xbc4b, Stride: 0x1}, + unicode.Range16{Lo: 0xbc4d, Hi: 0xbc67, Stride: 0x1}, + unicode.Range16{Lo: 0xbc69, Hi: 0xbc83, Stride: 0x1}, + unicode.Range16{Lo: 0xbc85, Hi: 0xbc9f, Stride: 0x1}, + unicode.Range16{Lo: 0xbca1, Hi: 0xbcbb, Stride: 0x1}, + unicode.Range16{Lo: 0xbcbd, Hi: 0xbcd7, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd9, Hi: 0xbcf3, Stride: 0x1}, + unicode.Range16{Lo: 0xbcf5, Hi: 0xbd0f, Stride: 0x1}, + unicode.Range16{Lo: 0xbd11, Hi: 0xbd2b, Stride: 0x1}, + unicode.Range16{Lo: 0xbd2d, Hi: 0xbd47, Stride: 0x1}, + unicode.Range16{Lo: 0xbd49, Hi: 0xbd63, Stride: 0x1}, + unicode.Range16{Lo: 0xbd65, Hi: 0xbd7f, Stride: 0x1}, + unicode.Range16{Lo: 0xbd81, Hi: 0xbd9b, Stride: 0x1}, + unicode.Range16{Lo: 0xbd9d, Hi: 0xbdb7, Stride: 0x1}, + unicode.Range16{Lo: 0xbdb9, Hi: 0xbdd3, Stride: 0x1}, + unicode.Range16{Lo: 0xbdd5, Hi: 0xbdef, Stride: 0x1}, + unicode.Range16{Lo: 0xbdf1, Hi: 0xbe0b, Stride: 0x1}, + unicode.Range16{Lo: 0xbe0d, Hi: 0xbe27, Stride: 0x1}, + unicode.Range16{Lo: 0xbe29, Hi: 0xbe43, Stride: 0x1}, + unicode.Range16{Lo: 0xbe45, Hi: 0xbe5f, Stride: 0x1}, + unicode.Range16{Lo: 0xbe61, Hi: 0xbe7b, Stride: 0x1}, + unicode.Range16{Lo: 0xbe7d, Hi: 0xbe97, Stride: 0x1}, + unicode.Range16{Lo: 0xbe99, Hi: 0xbeb3, Stride: 0x1}, + unicode.Range16{Lo: 0xbeb5, Hi: 0xbecf, Stride: 0x1}, + unicode.Range16{Lo: 0xbed1, Hi: 0xbeeb, Stride: 0x1}, + unicode.Range16{Lo: 0xbeed, Hi: 0xbf07, Stride: 0x1}, + unicode.Range16{Lo: 0xbf09, Hi: 0xbf23, Stride: 0x1}, + unicode.Range16{Lo: 0xbf25, Hi: 0xbf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xbf41, Hi: 0xbf5b, Stride: 0x1}, + unicode.Range16{Lo: 0xbf5d, Hi: 0xbf77, Stride: 0x1}, + unicode.Range16{Lo: 0xbf79, Hi: 0xbf93, Stride: 0x1}, + unicode.Range16{Lo: 0xbf95, Hi: 0xbfaf, Stride: 0x1}, + unicode.Range16{Lo: 0xbfb1, Hi: 0xbfcb, Stride: 0x1}, + unicode.Range16{Lo: 0xbfcd, Hi: 0xbfe7, Stride: 0x1}, + unicode.Range16{Lo: 0xbfe9, Hi: 0xc003, Stride: 0x1}, + unicode.Range16{Lo: 0xc005, Hi: 0xc01f, Stride: 0x1}, + unicode.Range16{Lo: 0xc021, Hi: 0xc03b, Stride: 0x1}, + unicode.Range16{Lo: 0xc03d, Hi: 0xc057, Stride: 0x1}, + unicode.Range16{Lo: 0xc059, Hi: 0xc073, Stride: 0x1}, + unicode.Range16{Lo: 0xc075, Hi: 0xc08f, Stride: 0x1}, + unicode.Range16{Lo: 0xc091, Hi: 0xc0ab, Stride: 0x1}, + unicode.Range16{Lo: 0xc0ad, Hi: 0xc0c7, Stride: 0x1}, + unicode.Range16{Lo: 0xc0c9, Hi: 0xc0e3, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e5, Hi: 0xc0ff, Stride: 0x1}, + unicode.Range16{Lo: 0xc101, Hi: 0xc11b, Stride: 0x1}, + unicode.Range16{Lo: 0xc11d, Hi: 0xc137, Stride: 0x1}, + unicode.Range16{Lo: 0xc139, Hi: 0xc153, Stride: 0x1}, + unicode.Range16{Lo: 0xc155, Hi: 0xc16f, Stride: 0x1}, + unicode.Range16{Lo: 0xc171, Hi: 0xc18b, Stride: 0x1}, + unicode.Range16{Lo: 0xc18d, Hi: 0xc1a7, Stride: 0x1}, + unicode.Range16{Lo: 0xc1a9, Hi: 0xc1c3, Stride: 0x1}, + unicode.Range16{Lo: 0xc1c5, Hi: 0xc1df, Stride: 0x1}, + unicode.Range16{Lo: 0xc1e1, Hi: 0xc1fb, Stride: 0x1}, + unicode.Range16{Lo: 0xc1fd, Hi: 0xc217, Stride: 0x1}, + unicode.Range16{Lo: 0xc219, Hi: 0xc233, Stride: 0x1}, + unicode.Range16{Lo: 0xc235, Hi: 0xc24f, Stride: 0x1}, + unicode.Range16{Lo: 0xc251, Hi: 0xc26b, Stride: 0x1}, + unicode.Range16{Lo: 0xc26d, Hi: 0xc287, Stride: 0x1}, + unicode.Range16{Lo: 0xc289, Hi: 0xc2a3, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a5, Hi: 0xc2bf, Stride: 0x1}, + unicode.Range16{Lo: 0xc2c1, Hi: 0xc2db, Stride: 0x1}, + unicode.Range16{Lo: 0xc2dd, Hi: 0xc2f7, Stride: 0x1}, + unicode.Range16{Lo: 0xc2f9, Hi: 0xc313, Stride: 0x1}, + unicode.Range16{Lo: 0xc315, Hi: 0xc32f, Stride: 0x1}, + unicode.Range16{Lo: 0xc331, Hi: 0xc34b, Stride: 0x1}, + unicode.Range16{Lo: 0xc34d, Hi: 0xc367, Stride: 0x1}, + unicode.Range16{Lo: 0xc369, Hi: 0xc383, Stride: 0x1}, + unicode.Range16{Lo: 0xc385, Hi: 0xc39f, Stride: 0x1}, + unicode.Range16{Lo: 0xc3a1, Hi: 0xc3bb, Stride: 0x1}, + unicode.Range16{Lo: 0xc3bd, Hi: 0xc3d7, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d9, Hi: 0xc3f3, Stride: 0x1}, + unicode.Range16{Lo: 0xc3f5, Hi: 0xc40f, Stride: 0x1}, + unicode.Range16{Lo: 0xc411, Hi: 0xc42b, Stride: 0x1}, + unicode.Range16{Lo: 0xc42d, Hi: 0xc447, Stride: 0x1}, + unicode.Range16{Lo: 0xc449, Hi: 0xc463, Stride: 0x1}, + unicode.Range16{Lo: 0xc465, Hi: 0xc47f, Stride: 0x1}, + unicode.Range16{Lo: 0xc481, Hi: 0xc49b, Stride: 0x1}, + unicode.Range16{Lo: 0xc49d, Hi: 0xc4b7, Stride: 0x1}, + unicode.Range16{Lo: 0xc4b9, Hi: 0xc4d3, Stride: 0x1}, + unicode.Range16{Lo: 0xc4d5, Hi: 0xc4ef, Stride: 0x1}, + unicode.Range16{Lo: 0xc4f1, Hi: 0xc50b, Stride: 0x1}, + unicode.Range16{Lo: 0xc50d, Hi: 0xc527, Stride: 0x1}, + unicode.Range16{Lo: 0xc529, Hi: 0xc543, Stride: 0x1}, + unicode.Range16{Lo: 0xc545, Hi: 0xc55f, Stride: 0x1}, + unicode.Range16{Lo: 0xc561, Hi: 0xc57b, Stride: 0x1}, + unicode.Range16{Lo: 0xc57d, Hi: 0xc597, Stride: 0x1}, + unicode.Range16{Lo: 0xc599, Hi: 0xc5b3, Stride: 0x1}, + unicode.Range16{Lo: 0xc5b5, Hi: 0xc5cf, Stride: 0x1}, + unicode.Range16{Lo: 0xc5d1, Hi: 0xc5eb, Stride: 0x1}, + unicode.Range16{Lo: 0xc5ed, Hi: 0xc607, Stride: 0x1}, + unicode.Range16{Lo: 0xc609, Hi: 0xc623, Stride: 0x1}, + unicode.Range16{Lo: 0xc625, Hi: 0xc63f, Stride: 0x1}, + unicode.Range16{Lo: 0xc641, Hi: 0xc65b, Stride: 0x1}, + unicode.Range16{Lo: 0xc65d, Hi: 0xc677, Stride: 0x1}, + unicode.Range16{Lo: 0xc679, Hi: 0xc693, Stride: 0x1}, + unicode.Range16{Lo: 0xc695, Hi: 0xc6af, Stride: 0x1}, + unicode.Range16{Lo: 0xc6b1, Hi: 0xc6cb, Stride: 0x1}, + unicode.Range16{Lo: 0xc6cd, Hi: 0xc6e7, Stride: 0x1}, + unicode.Range16{Lo: 0xc6e9, Hi: 0xc703, Stride: 0x1}, + unicode.Range16{Lo: 0xc705, Hi: 0xc71f, Stride: 0x1}, + unicode.Range16{Lo: 0xc721, Hi: 0xc73b, Stride: 0x1}, + unicode.Range16{Lo: 0xc73d, Hi: 0xc757, Stride: 0x1}, + unicode.Range16{Lo: 0xc759, Hi: 0xc773, Stride: 0x1}, + unicode.Range16{Lo: 0xc775, Hi: 0xc78f, Stride: 0x1}, + unicode.Range16{Lo: 0xc791, Hi: 0xc7ab, Stride: 0x1}, + unicode.Range16{Lo: 0xc7ad, Hi: 0xc7c7, Stride: 0x1}, + unicode.Range16{Lo: 0xc7c9, Hi: 0xc7e3, Stride: 0x1}, + unicode.Range16{Lo: 0xc7e5, Hi: 0xc7ff, Stride: 0x1}, + unicode.Range16{Lo: 0xc801, Hi: 0xc81b, Stride: 0x1}, + unicode.Range16{Lo: 0xc81d, Hi: 0xc837, Stride: 0x1}, + unicode.Range16{Lo: 0xc839, Hi: 0xc853, Stride: 0x1}, + unicode.Range16{Lo: 0xc855, Hi: 0xc86f, Stride: 0x1}, + unicode.Range16{Lo: 0xc871, Hi: 0xc88b, Stride: 0x1}, + unicode.Range16{Lo: 0xc88d, Hi: 0xc8a7, Stride: 0x1}, + unicode.Range16{Lo: 0xc8a9, Hi: 0xc8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xc8c5, Hi: 0xc8df, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e1, Hi: 0xc8fb, Stride: 0x1}, + unicode.Range16{Lo: 0xc8fd, Hi: 0xc917, Stride: 0x1}, + unicode.Range16{Lo: 0xc919, Hi: 0xc933, Stride: 0x1}, + unicode.Range16{Lo: 0xc935, Hi: 0xc94f, Stride: 0x1}, + unicode.Range16{Lo: 0xc951, Hi: 0xc96b, Stride: 0x1}, + unicode.Range16{Lo: 0xc96d, Hi: 0xc987, Stride: 0x1}, + unicode.Range16{Lo: 0xc989, Hi: 0xc9a3, Stride: 0x1}, + unicode.Range16{Lo: 0xc9a5, Hi: 0xc9bf, Stride: 0x1}, + unicode.Range16{Lo: 0xc9c1, Hi: 0xc9db, Stride: 0x1}, + unicode.Range16{Lo: 0xc9dd, Hi: 0xc9f7, Stride: 0x1}, + unicode.Range16{Lo: 0xc9f9, Hi: 0xca13, Stride: 0x1}, + unicode.Range16{Lo: 0xca15, Hi: 0xca2f, Stride: 0x1}, + unicode.Range16{Lo: 0xca31, Hi: 0xca4b, Stride: 0x1}, + unicode.Range16{Lo: 0xca4d, Hi: 0xca67, Stride: 0x1}, + unicode.Range16{Lo: 0xca69, Hi: 0xca83, Stride: 0x1}, + unicode.Range16{Lo: 0xca85, Hi: 0xca9f, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa1, Hi: 0xcabb, Stride: 0x1}, + unicode.Range16{Lo: 0xcabd, Hi: 0xcad7, Stride: 0x1}, + unicode.Range16{Lo: 0xcad9, Hi: 0xcaf3, Stride: 0x1}, + unicode.Range16{Lo: 0xcaf5, Hi: 0xcb0f, Stride: 0x1}, + unicode.Range16{Lo: 0xcb11, Hi: 0xcb2b, Stride: 0x1}, + unicode.Range16{Lo: 0xcb2d, Hi: 0xcb47, Stride: 0x1}, + unicode.Range16{Lo: 0xcb49, Hi: 0xcb63, Stride: 0x1}, + unicode.Range16{Lo: 0xcb65, Hi: 0xcb7f, Stride: 0x1}, + unicode.Range16{Lo: 0xcb81, Hi: 0xcb9b, Stride: 0x1}, + unicode.Range16{Lo: 0xcb9d, Hi: 0xcbb7, Stride: 0x1}, + unicode.Range16{Lo: 0xcbb9, Hi: 0xcbd3, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd5, Hi: 0xcbef, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf1, Hi: 0xcc0b, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0d, Hi: 0xcc27, Stride: 0x1}, + unicode.Range16{Lo: 0xcc29, Hi: 0xcc43, Stride: 0x1}, + unicode.Range16{Lo: 0xcc45, Hi: 0xcc5f, Stride: 0x1}, + unicode.Range16{Lo: 0xcc61, Hi: 0xcc7b, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7d, Hi: 0xcc97, Stride: 0x1}, + unicode.Range16{Lo: 0xcc99, Hi: 0xccb3, Stride: 0x1}, + unicode.Range16{Lo: 0xccb5, Hi: 0xcccf, Stride: 0x1}, + unicode.Range16{Lo: 0xccd1, Hi: 0xcceb, Stride: 0x1}, + unicode.Range16{Lo: 0xcced, Hi: 0xcd07, Stride: 0x1}, + unicode.Range16{Lo: 0xcd09, Hi: 0xcd23, Stride: 0x1}, + unicode.Range16{Lo: 0xcd25, Hi: 0xcd3f, Stride: 0x1}, + unicode.Range16{Lo: 0xcd41, Hi: 0xcd5b, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5d, Hi: 0xcd77, Stride: 0x1}, + unicode.Range16{Lo: 0xcd79, Hi: 0xcd93, Stride: 0x1}, + unicode.Range16{Lo: 0xcd95, Hi: 0xcdaf, Stride: 0x1}, + unicode.Range16{Lo: 0xcdb1, Hi: 0xcdcb, Stride: 0x1}, + unicode.Range16{Lo: 0xcdcd, Hi: 0xcde7, Stride: 0x1}, + unicode.Range16{Lo: 0xcde9, Hi: 0xce03, Stride: 0x1}, + unicode.Range16{Lo: 0xce05, Hi: 0xce1f, Stride: 0x1}, + unicode.Range16{Lo: 0xce21, Hi: 0xce3b, Stride: 0x1}, + unicode.Range16{Lo: 0xce3d, Hi: 0xce57, Stride: 0x1}, + unicode.Range16{Lo: 0xce59, Hi: 0xce73, Stride: 0x1}, + unicode.Range16{Lo: 0xce75, Hi: 0xce8f, Stride: 0x1}, + unicode.Range16{Lo: 0xce91, Hi: 0xceab, Stride: 0x1}, + unicode.Range16{Lo: 0xcead, Hi: 0xcec7, Stride: 0x1}, + unicode.Range16{Lo: 0xcec9, Hi: 0xcee3, Stride: 0x1}, + unicode.Range16{Lo: 0xcee5, Hi: 0xceff, Stride: 0x1}, + unicode.Range16{Lo: 0xcf01, Hi: 0xcf1b, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1d, Hi: 0xcf37, Stride: 0x1}, + unicode.Range16{Lo: 0xcf39, Hi: 0xcf53, Stride: 0x1}, + unicode.Range16{Lo: 0xcf55, Hi: 0xcf6f, Stride: 0x1}, + unicode.Range16{Lo: 0xcf71, Hi: 0xcf8b, Stride: 0x1}, + unicode.Range16{Lo: 0xcf8d, Hi: 0xcfa7, Stride: 0x1}, + unicode.Range16{Lo: 0xcfa9, Hi: 0xcfc3, Stride: 0x1}, + unicode.Range16{Lo: 0xcfc5, Hi: 0xcfdf, Stride: 0x1}, + unicode.Range16{Lo: 0xcfe1, Hi: 0xcffb, Stride: 0x1}, + unicode.Range16{Lo: 0xcffd, Hi: 0xd017, Stride: 0x1}, + unicode.Range16{Lo: 0xd019, Hi: 0xd033, Stride: 0x1}, + unicode.Range16{Lo: 0xd035, Hi: 0xd04f, Stride: 0x1}, + unicode.Range16{Lo: 0xd051, Hi: 0xd06b, Stride: 0x1}, + unicode.Range16{Lo: 0xd06d, Hi: 0xd087, Stride: 0x1}, + unicode.Range16{Lo: 0xd089, Hi: 0xd0a3, Stride: 0x1}, + unicode.Range16{Lo: 0xd0a5, Hi: 0xd0bf, Stride: 0x1}, + unicode.Range16{Lo: 0xd0c1, Hi: 0xd0db, Stride: 0x1}, + unicode.Range16{Lo: 0xd0dd, Hi: 0xd0f7, Stride: 0x1}, + unicode.Range16{Lo: 0xd0f9, Hi: 0xd113, Stride: 0x1}, + unicode.Range16{Lo: 0xd115, Hi: 0xd12f, Stride: 0x1}, + unicode.Range16{Lo: 0xd131, Hi: 0xd14b, Stride: 0x1}, + unicode.Range16{Lo: 0xd14d, Hi: 0xd167, Stride: 0x1}, + unicode.Range16{Lo: 0xd169, Hi: 0xd183, Stride: 0x1}, + unicode.Range16{Lo: 0xd185, Hi: 0xd19f, Stride: 0x1}, + unicode.Range16{Lo: 0xd1a1, Hi: 0xd1bb, Stride: 0x1}, + unicode.Range16{Lo: 0xd1bd, Hi: 0xd1d7, Stride: 0x1}, + unicode.Range16{Lo: 0xd1d9, Hi: 0xd1f3, Stride: 0x1}, + unicode.Range16{Lo: 0xd1f5, Hi: 0xd20f, Stride: 0x1}, + unicode.Range16{Lo: 0xd211, Hi: 0xd22b, Stride: 0x1}, + unicode.Range16{Lo: 0xd22d, Hi: 0xd247, Stride: 0x1}, + unicode.Range16{Lo: 0xd249, Hi: 0xd263, Stride: 0x1}, + unicode.Range16{Lo: 0xd265, Hi: 0xd27f, Stride: 0x1}, + unicode.Range16{Lo: 0xd281, Hi: 0xd29b, Stride: 0x1}, + unicode.Range16{Lo: 0xd29d, Hi: 0xd2b7, Stride: 0x1}, + unicode.Range16{Lo: 0xd2b9, Hi: 0xd2d3, Stride: 0x1}, + unicode.Range16{Lo: 0xd2d5, Hi: 0xd2ef, Stride: 0x1}, + unicode.Range16{Lo: 0xd2f1, Hi: 0xd30b, Stride: 0x1}, + unicode.Range16{Lo: 0xd30d, Hi: 0xd327, Stride: 0x1}, + unicode.Range16{Lo: 0xd329, Hi: 0xd343, Stride: 0x1}, + unicode.Range16{Lo: 0xd345, Hi: 0xd35f, Stride: 0x1}, + unicode.Range16{Lo: 0xd361, Hi: 0xd37b, Stride: 0x1}, + unicode.Range16{Lo: 0xd37d, Hi: 0xd397, Stride: 0x1}, + unicode.Range16{Lo: 0xd399, Hi: 0xd3b3, Stride: 0x1}, + unicode.Range16{Lo: 0xd3b5, Hi: 0xd3cf, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d1, Hi: 0xd3eb, Stride: 0x1}, + unicode.Range16{Lo: 0xd3ed, Hi: 0xd407, Stride: 0x1}, + unicode.Range16{Lo: 0xd409, Hi: 0xd423, Stride: 0x1}, + unicode.Range16{Lo: 0xd425, Hi: 0xd43f, Stride: 0x1}, + unicode.Range16{Lo: 0xd441, Hi: 0xd45b, Stride: 0x1}, + unicode.Range16{Lo: 0xd45d, Hi: 0xd477, Stride: 0x1}, + unicode.Range16{Lo: 0xd479, Hi: 0xd493, Stride: 0x1}, + unicode.Range16{Lo: 0xd495, Hi: 0xd4af, Stride: 0x1}, + unicode.Range16{Lo: 0xd4b1, Hi: 0xd4cb, Stride: 0x1}, + unicode.Range16{Lo: 0xd4cd, Hi: 0xd4e7, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e9, Hi: 0xd503, Stride: 0x1}, + unicode.Range16{Lo: 0xd505, Hi: 0xd51f, Stride: 0x1}, + unicode.Range16{Lo: 0xd521, Hi: 0xd53b, Stride: 0x1}, + unicode.Range16{Lo: 0xd53d, Hi: 0xd557, Stride: 0x1}, + unicode.Range16{Lo: 0xd559, Hi: 0xd573, Stride: 0x1}, + unicode.Range16{Lo: 0xd575, Hi: 0xd58f, Stride: 0x1}, + unicode.Range16{Lo: 0xd591, Hi: 0xd5ab, Stride: 0x1}, + unicode.Range16{Lo: 0xd5ad, Hi: 0xd5c7, Stride: 0x1}, + unicode.Range16{Lo: 0xd5c9, Hi: 0xd5e3, Stride: 0x1}, + unicode.Range16{Lo: 0xd5e5, Hi: 0xd5ff, Stride: 0x1}, + unicode.Range16{Lo: 0xd601, Hi: 0xd61b, Stride: 0x1}, + unicode.Range16{Lo: 0xd61d, Hi: 0xd637, Stride: 0x1}, + unicode.Range16{Lo: 0xd639, Hi: 0xd653, Stride: 0x1}, + unicode.Range16{Lo: 0xd655, Hi: 0xd66f, Stride: 0x1}, + unicode.Range16{Lo: 0xd671, Hi: 0xd68b, Stride: 0x1}, + unicode.Range16{Lo: 0xd68d, Hi: 0xd6a7, Stride: 0x1}, + unicode.Range16{Lo: 0xd6a9, Hi: 0xd6c3, Stride: 0x1}, + unicode.Range16{Lo: 0xd6c5, Hi: 0xd6df, Stride: 0x1}, + unicode.Range16{Lo: 0xd6e1, Hi: 0xd6fb, Stride: 0x1}, + unicode.Range16{Lo: 0xd6fd, Hi: 0xd717, Stride: 0x1}, + unicode.Range16{Lo: 0xd719, Hi: 0xd733, Stride: 0x1}, + unicode.Range16{Lo: 0xd735, Hi: 0xd74f, Stride: 0x1}, + unicode.Range16{Lo: 0xd751, Hi: 0xd76b, Stride: 0x1}, + unicode.Range16{Lo: 0xd76d, Hi: 0xd787, Stride: 0x1}, + unicode.Range16{Lo: 0xd789, Hi: 0xd7a3, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemePrepend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1}, + unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1}, + unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1}, + unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1}, + unicode.Range32{Lo: 0x111c2, Hi: 0x111c3, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeRegional_Indicator = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeSpacingMark = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1}, + unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1}, + unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1}, + unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1}, + unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1}, + unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1}, + unicode.Range16{Lo: 0x9bf, Hi: 0x9c0, Stride: 0x1}, + unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1}, + unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1}, + unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1}, + unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1}, + unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1}, + unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1}, + unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1}, + unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1}, + unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1}, + unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1}, + unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1}, + unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1}, + unicode.Range16{Lo: 0xbbf, Hi: 0xbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1}, + unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1}, + unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1}, + unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1}, + unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1}, + unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1}, + unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0, Hi: 0xcc1, Stride: 0x1}, + unicode.Range16{Lo: 0xcc3, Hi: 0xcc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1}, + unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1}, + unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1}, + unicode.Range16{Lo: 0xd3f, Hi: 0xd40, Stride: 0x1}, + unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1}, + unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1}, + unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1}, + unicode.Range16{Lo: 0xdd0, Hi: 0xdd1, Stride: 0x1}, + unicode.Range16{Lo: 0xdd8, Hi: 0xdde, Stride: 0x1}, + unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1}, + unicode.Range16{Lo: 0xe33, Hi: 0xe33, Stride: 0x1}, + unicode.Range16{Lo: 0xeb3, Hi: 0xeb3, Stride: 0x1}, + unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1}, + unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1}, + unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1}, + unicode.Range16{Lo: 0x1084, Hi: 0x1084, Stride: 0x1}, + unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1}, + unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1}, + unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1}, + unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1}, + unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1}, + unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1}, + unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1}, + unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1}, + unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1}, + unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1}, + unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1}, + unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1}, + unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1}, + unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1}, + unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1}, + unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1}, + unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1}, + unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1}, + unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1}, + unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1}, + unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1}, + unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1}, + unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1}, + unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1}, + unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1}, + unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1}, + unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1}, + unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1}, + unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1}, + unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1}, + unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1}, + unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1}, + unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1}, + unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1}, + unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1}, + unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1}, + unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1}, + unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1}, + unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1}, + unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1}, + unicode.Range32{Lo: 0x1133f, Hi: 0x1133f, Stride: 0x1}, + unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1}, + unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1}, + unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1}, + unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1}, + unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1}, + unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1}, + unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1}, + unicode.Range32{Lo: 0x114b1, Hi: 0x114b2, Stride: 0x1}, + unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1}, + unicode.Range32{Lo: 0x114bb, Hi: 0x114bc, Stride: 0x1}, + unicode.Range32{Lo: 0x114be, Hi: 0x114be, Stride: 0x1}, + unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b0, Hi: 0x115b1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1}, + unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1}, + unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1}, + unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1}, + unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1}, + unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1}, + unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1}, + unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1}, + unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1}, + unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1}, + unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1}, + unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1}, + unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d166, Hi: 0x1d166, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16d, Hi: 0x1d16d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeT = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x11a8, Hi: 0x11ff, Stride: 0x1}, + unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeV = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x1160, Hi: 0x11a7, Stride: 0x1}, + unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeZWJ = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +type _GraphemeRuneRange unicode.RangeTable + +func _GraphemeRuneType(r rune) *_GraphemeRuneRange { + switch { + case unicode.Is(_GraphemeCR, r): + return (*_GraphemeRuneRange)(_GraphemeCR) + case unicode.Is(_GraphemeControl, r): + return (*_GraphemeRuneRange)(_GraphemeControl) + case unicode.Is(_GraphemeE_Base, r): + return (*_GraphemeRuneRange)(_GraphemeE_Base) + case unicode.Is(_GraphemeE_Base_GAZ, r): + return (*_GraphemeRuneRange)(_GraphemeE_Base_GAZ) + case unicode.Is(_GraphemeE_Modifier, r): + return (*_GraphemeRuneRange)(_GraphemeE_Modifier) + case unicode.Is(_GraphemeExtend, r): + return (*_GraphemeRuneRange)(_GraphemeExtend) + case unicode.Is(_GraphemeGlue_After_Zwj, r): + return (*_GraphemeRuneRange)(_GraphemeGlue_After_Zwj) + case unicode.Is(_GraphemeL, r): + return (*_GraphemeRuneRange)(_GraphemeL) + case unicode.Is(_GraphemeLF, r): + return (*_GraphemeRuneRange)(_GraphemeLF) + case unicode.Is(_GraphemeLV, r): + return (*_GraphemeRuneRange)(_GraphemeLV) + case unicode.Is(_GraphemeLVT, r): + return (*_GraphemeRuneRange)(_GraphemeLVT) + case unicode.Is(_GraphemePrepend, r): + return (*_GraphemeRuneRange)(_GraphemePrepend) + case unicode.Is(_GraphemeRegional_Indicator, r): + return (*_GraphemeRuneRange)(_GraphemeRegional_Indicator) + case unicode.Is(_GraphemeSpacingMark, r): + return (*_GraphemeRuneRange)(_GraphemeSpacingMark) + case unicode.Is(_GraphemeT, r): + return (*_GraphemeRuneRange)(_GraphemeT) + case unicode.Is(_GraphemeV, r): + return (*_GraphemeRuneRange)(_GraphemeV) + case unicode.Is(_GraphemeZWJ, r): + return (*_GraphemeRuneRange)(_GraphemeZWJ) + default: + return nil + } +} +func (rng *_GraphemeRuneRange) String() string { + switch (*unicode.RangeTable)(rng) { + case _GraphemeCR: + return "CR" + case _GraphemeControl: + return "Control" + case _GraphemeE_Base: + return "E_Base" + case _GraphemeE_Base_GAZ: + return "E_Base_GAZ" + case _GraphemeE_Modifier: + return "E_Modifier" + case _GraphemeExtend: + return "Extend" + case _GraphemeGlue_After_Zwj: + return "Glue_After_Zwj" + case _GraphemeL: + return "L" + case _GraphemeLF: + return "LF" + case _GraphemeLV: + return "LV" + case _GraphemeLVT: + return "LVT" + case _GraphemePrepend: + return "Prepend" + case _GraphemeRegional_Indicator: + return "Regional_Indicator" + case _GraphemeSpacingMark: + return "SpacingMark" + case _GraphemeT: + return "T" + case _GraphemeV: + return "V" + case _GraphemeZWJ: + return "ZWJ" + default: + return "Other" + } +} + +var _WordALetter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1}, + unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1}, + unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1}, + unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1}, + unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1}, + unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1}, + unicode.Range16{Lo: 0xd8, Hi: 0xf6, Stride: 0x1}, + unicode.Range16{Lo: 0xf8, Hi: 0x1ba, Stride: 0x1}, + unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1}, + unicode.Range16{Lo: 0x1bc, Hi: 0x1bf, Stride: 0x1}, + unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4, Hi: 0x293, Stride: 0x1}, + unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1}, + unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1}, + unicode.Range16{Lo: 0x2b0, Hi: 0x2c1, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1}, + unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1}, + unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1}, + unicode.Range16{Lo: 0x370, Hi: 0x373, Stride: 0x1}, + unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1}, + unicode.Range16{Lo: 0x376, Hi: 0x377, Stride: 0x1}, + unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1}, + unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1}, + unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1}, + unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1}, + unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1}, + unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1}, + unicode.Range16{Lo: 0x38e, Hi: 0x3a1, Stride: 0x1}, + unicode.Range16{Lo: 0x3a3, Hi: 0x3f5, Stride: 0x1}, + unicode.Range16{Lo: 0x3f7, Hi: 0x481, Stride: 0x1}, + unicode.Range16{Lo: 0x48a, Hi: 0x52f, Stride: 0x1}, + unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1}, + unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1}, + unicode.Range16{Lo: 0x561, Hi: 0x587, Stride: 0x1}, + unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1}, + unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1}, + unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1}, + unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1}, + unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1}, + unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1}, + unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1}, + unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1}, + unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1}, + unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1}, + unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1}, + unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1}, + unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1}, + unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1}, + unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1}, + unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1}, + unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1}, + unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1}, + unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1}, + unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1}, + unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1}, + unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1}, + unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1}, + unicode.Range16{Lo: 0x8a0, Hi: 0x8b4, Stride: 0x1}, + unicode.Range16{Lo: 0x8b6, Hi: 0x8bd, Stride: 0x1}, + unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1}, + unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1}, + unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1}, + unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1}, + unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1}, + unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1}, + unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1}, + unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1}, + unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1}, + unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1}, + unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1}, + unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1}, + unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1}, + unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1}, + unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1}, + unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1}, + unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1}, + unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1}, + unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1}, + unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1}, + unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1}, + unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1}, + unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1}, + unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1}, + unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1}, + unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1}, + unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1}, + unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1}, + unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1}, + unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1}, + unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1}, + unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1}, + unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1}, + unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1}, + unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1}, + unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1}, + unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1}, + unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1}, + unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1}, + unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1}, + unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1}, + unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1}, + unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1}, + unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1}, + unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1}, + unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1}, + unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1}, + unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1}, + unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1}, + unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1}, + unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1}, + unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1}, + unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1}, + unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1}, + unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1}, + unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1}, + unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1}, + unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1}, + unicode.Range16{Lo: 0xcde, Hi: 0xcde, Stride: 0x1}, + unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1}, + unicode.Range16{Lo: 0xd05, Hi: 0xd0c, Stride: 0x1}, + unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1}, + unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1}, + unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1}, + unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1}, + unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1}, + unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1}, + unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1}, + unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1}, + unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1}, + unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1}, + unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1}, + unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1}, + unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1}, + unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1}, + unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1}, + unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1}, + unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1}, + unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1}, + unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1}, + unicode.Range16{Lo: 0x10fd, Hi: 0x1248, Stride: 0x1}, + unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1}, + unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1}, + unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1}, + unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1}, + unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1}, + unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1}, + unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1}, + unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1}, + unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1}, + unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1}, + unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1}, + unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1}, + unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1}, + unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1}, + unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1}, + unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1}, + unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1}, + unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1}, + unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1}, + unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1}, + unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1}, + unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1}, + unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1}, + unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1700, Hi: 0x170c, Stride: 0x1}, + unicode.Range16{Lo: 0x170e, Hi: 0x1711, Stride: 0x1}, + unicode.Range16{Lo: 0x1720, Hi: 0x1731, Stride: 0x1}, + unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1}, + unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1}, + unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1}, + unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1}, + unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1}, + unicode.Range16{Lo: 0x1844, Hi: 0x1877, Stride: 0x1}, + unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1}, + unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1}, + unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1}, + unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1}, + unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1}, + unicode.Range16{Lo: 0x1b45, Hi: 0x1b4b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1}, + unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1}, + unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1}, + unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1}, + unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1}, + unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1}, + unicode.Range16{Lo: 0x1cee, Hi: 0x1cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1}, + unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1}, + unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1}, + unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1}, + unicode.Range16{Lo: 0x1e00, Hi: 0x1f15, Stride: 0x1}, + unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f20, Hi: 0x1f45, Stride: 0x1}, + unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1}, + unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f80, Hi: 0x1fb4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe0, Hi: 0x1fec, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 0x1}, + unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1}, + unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1}, + unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1}, + unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1}, + unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1}, + unicode.Range16{Lo: 0x210a, Hi: 0x2113, Stride: 0x1}, + unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1}, + unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1}, + unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1}, + unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1}, + unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1}, + unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1}, + unicode.Range16{Lo: 0x212f, Hi: 0x2134, Stride: 0x1}, + unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1}, + unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1}, + unicode.Range16{Lo: 0x213c, Hi: 0x213f, Stride: 0x1}, + unicode.Range16{Lo: 0x2145, Hi: 0x2149, Stride: 0x1}, + unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1}, + unicode.Range16{Lo: 0x2160, Hi: 0x2182, Stride: 0x1}, + unicode.Range16{Lo: 0x2183, Hi: 0x2184, Stride: 0x1}, + unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1}, + unicode.Range16{Lo: 0x24b6, Hi: 0x24e9, Stride: 0x1}, + unicode.Range16{Lo: 0x2c00, Hi: 0x2c2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c30, Hi: 0x2c5e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c60, Hi: 0x2c7b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7e, Hi: 0x2ce4, Stride: 0x1}, + unicode.Range16{Lo: 0x2ceb, Hi: 0x2cee, Stride: 0x1}, + unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1}, + unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1}, + unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1}, + unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1}, + unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1}, + unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1}, + unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1}, + unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1}, + unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1}, + unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1}, + unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1}, + unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1}, + unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1}, + unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1}, + unicode.Range16{Lo: 0x3105, Hi: 0x312d, Stride: 0x1}, + unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1}, + unicode.Range16{Lo: 0x31a0, Hi: 0x31ba, Stride: 0x1}, + unicode.Range16{Lo: 0xa000, Hi: 0xa014, Stride: 0x1}, + unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1}, + unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1}, + unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1}, + unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1}, + unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1}, + unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1}, + unicode.Range16{Lo: 0xa640, Hi: 0xa66d, Stride: 0x1}, + unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1}, + unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1}, + unicode.Range16{Lo: 0xa680, Hi: 0xa69b, Stride: 0x1}, + unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1}, + unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1}, + unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1}, + unicode.Range16{Lo: 0xa722, Hi: 0xa76f, Stride: 0x1}, + unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1}, + unicode.Range16{Lo: 0xa771, Hi: 0xa787, Stride: 0x1}, + unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1}, + unicode.Range16{Lo: 0xa78b, Hi: 0xa78e, Stride: 0x1}, + unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1}, + unicode.Range16{Lo: 0xa790, Hi: 0xa7ae, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b0, Hi: 0xa7b7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1}, + unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1}, + unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1}, + unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1}, + unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1}, + unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1}, + unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1}, + unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1}, + unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1}, + unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1}, + unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1}, + unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1}, + unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1}, + unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1}, + unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1}, + unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1}, + unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1}, + unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1}, + unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1}, + unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1}, + unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1}, + unicode.Range16{Lo: 0xab60, Hi: 0xab65, Stride: 0x1}, + unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1}, + unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1}, + unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1}, + unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1}, + unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1}, + unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1}, + unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1}, + unicode.Range16{Lo: 0xfb50, Hi: 0xfbb1, Stride: 0x1}, + unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1}, + unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1}, + unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1}, + unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1}, + unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1}, + unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1}, + unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1}, + unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1}, + unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1}, + unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1}, + unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1}, + unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1}, + unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1}, + unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1}, + unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1}, + unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1}, + unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1}, + unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1}, + unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1}, + unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1}, + unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1}, + unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1}, + unicode.Range32{Lo: 0x10330, Hi: 0x10340, Stride: 0x1}, + unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1}, + unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1}, + unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1}, + unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1}, + unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1}, + unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1}, + unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1}, + unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1}, + unicode.Range32{Lo: 0x10400, Hi: 0x1044f, Stride: 0x1}, + unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1}, + unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1}, + unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1}, + unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1}, + unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1}, + unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1}, + unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1}, + unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1}, + unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1}, + unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1}, + unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1}, + unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1}, + unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1}, + unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1}, + unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1}, + unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1}, + unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1}, + unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1}, + unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1}, + unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1}, + unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1}, + unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1}, + unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1}, + unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1}, + unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1}, + unicode.Range32{Lo: 0x10a19, Hi: 0x10a33, Stride: 0x1}, + unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1}, + unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1}, + unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1}, + unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1}, + unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1}, + unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1}, + unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1}, + unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1}, + unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1}, + unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1}, + unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1}, + unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1}, + unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1}, + unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1}, + unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1}, + unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1}, + unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1}, + unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1}, + unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1}, + unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1}, + unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1}, + unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1}, + unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1}, + unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1}, + unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1}, + unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1}, + unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1}, + unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1}, + unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1}, + unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1}, + unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1}, + unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1}, + unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1}, + unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1}, + unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1}, + unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1}, + unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1}, + unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1}, + unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1}, + unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1}, + unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1}, + unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1}, + unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1}, + unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1}, + unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1}, + unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1}, + unicode.Range32{Lo: 0x118a0, Hi: 0x118df, Stride: 0x1}, + unicode.Range32{Lo: 0x118ff, Hi: 0x118ff, Stride: 0x1}, + unicode.Range32{Lo: 0x11ac0, Hi: 0x11af8, Stride: 0x1}, + unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1}, + unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1}, + unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1}, + unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1}, + unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1}, + unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1}, + unicode.Range32{Lo: 0x13000, Hi: 0x1342e, Stride: 0x1}, + unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1}, + unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1}, + unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1}, + unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1}, + unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1}, + unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1}, + unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1}, + unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1}, + unicode.Range32{Lo: 0x16f00, Hi: 0x16f44, Stride: 0x1}, + unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1}, + unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1}, + unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe0, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1}, + unicode.Range32{Lo: 0x1d400, Hi: 0x1d454, Stride: 0x1}, + unicode.Range32{Lo: 0x1d456, Hi: 0x1d49c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d505, Stride: 0x1}, + unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1}, + unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1}, + unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d51e, Hi: 0x1d539, Stride: 0x1}, + unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1}, + unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1}, + unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1}, + unicode.Range32{Lo: 0x1d552, Hi: 0x1d6a5, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6fa, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1}, + unicode.Range32{Lo: 0x1d716, Hi: 0x1d734, Stride: 0x1}, + unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d750, Hi: 0x1d76e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1}, + unicode.Range32{Lo: 0x1d78a, Hi: 0x1d7a8, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1e900, Hi: 0x1e943, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1}, + unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1}, + unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1}, + unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1}, + unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1}, + }, + LatinOffset: 7, +} + +var _WordCR = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordDouble_Quote = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordE_Base = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x261d, Hi: 0x261d, Stride: 0x1}, + unicode.Range16{Lo: 0x26f9, Hi: 0x26f9, Stride: 0x1}, + unicode.Range16{Lo: 0x270a, Hi: 0x270d, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f385, Hi: 0x1f385, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3c3, Hi: 0x1f3c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3ca, Hi: 0x1f3cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1f442, Hi: 0x1f443, Stride: 0x1}, + unicode.Range32{Lo: 0x1f446, Hi: 0x1f450, Stride: 0x1}, + unicode.Range32{Lo: 0x1f46e, Hi: 0x1f46e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f470, Hi: 0x1f478, Stride: 0x1}, + unicode.Range32{Lo: 0x1f47c, Hi: 0x1f47c, Stride: 0x1}, + unicode.Range32{Lo: 0x1f481, Hi: 0x1f483, Stride: 0x1}, + unicode.Range32{Lo: 0x1f485, Hi: 0x1f487, Stride: 0x1}, + unicode.Range32{Lo: 0x1f4aa, Hi: 0x1f4aa, Stride: 0x1}, + unicode.Range32{Lo: 0x1f575, Hi: 0x1f575, Stride: 0x1}, + unicode.Range32{Lo: 0x1f57a, Hi: 0x1f57a, Stride: 0x1}, + unicode.Range32{Lo: 0x1f590, Hi: 0x1f590, Stride: 0x1}, + unicode.Range32{Lo: 0x1f595, Hi: 0x1f596, Stride: 0x1}, + unicode.Range32{Lo: 0x1f645, Hi: 0x1f647, Stride: 0x1}, + unicode.Range32{Lo: 0x1f64b, Hi: 0x1f64f, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6a3, Hi: 0x1f6a3, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6b4, Hi: 0x1f6b6, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6c0, Hi: 0x1f6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1f918, Hi: 0x1f91e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f926, Hi: 0x1f926, Stride: 0x1}, + unicode.Range32{Lo: 0x1f930, Hi: 0x1f930, Stride: 0x1}, + unicode.Range32{Lo: 0x1f933, Hi: 0x1f939, Stride: 0x1}, + unicode.Range32{Lo: 0x1f93c, Hi: 0x1f93e, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordE_Base_GAZ = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f466, Hi: 0x1f469, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordE_Modifier = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordExtend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1}, + unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1}, + unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1}, + unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1}, + unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1}, + unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1}, + unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1}, + unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1}, + unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1}, + unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1}, + unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1}, + unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1}, + unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1}, + unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1}, + unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1}, + unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1}, + unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1}, + unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1}, + unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1}, + unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1}, + unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1}, + unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1}, + unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1}, + unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1}, + unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1}, + unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1}, + unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1}, + unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1}, + unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1}, + unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1}, + unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1}, + unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1}, + unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1}, + unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1}, + unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1}, + unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1}, + unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1}, + unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1}, + unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1}, + unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1}, + unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1}, + unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1}, + unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1}, + unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1}, + unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1}, + unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1}, + unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1}, + unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1}, + unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1}, + unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1}, + unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1}, + unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1}, + unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1}, + unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1}, + unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1}, + unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1}, + unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1}, + unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1}, + unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1}, + unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1}, + unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1}, + unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1}, + unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1}, + unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1}, + unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1}, + unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1}, + unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1}, + unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1}, + unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1}, + unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1}, + unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1}, + unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1}, + unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1}, + unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1}, + unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1}, + unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1}, + unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1}, + unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1}, + unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1}, + unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1}, + unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1}, + unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1}, + unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1}, + unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1}, + unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1}, + unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1}, + unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1}, + unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1}, + unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1}, + unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1}, + unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1}, + unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1}, + unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1}, + unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1}, + unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1}, + unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1}, + unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1}, + unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1}, + unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1}, + unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1}, + unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1}, + unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1}, + unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1}, + unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1}, + unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1}, + unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1}, + unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1}, + unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1}, + unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1}, + unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1}, + unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1}, + unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1}, + unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1}, + unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1}, + unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1}, + unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1}, + unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1}, + unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1}, + unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1}, + unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1}, + unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1}, + unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1}, + unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1}, + unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1}, + unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1}, + unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1}, + unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1}, + unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1}, + unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1}, + unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1}, + unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1}, + unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1}, + unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1}, + unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1}, + unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1}, + unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1}, + unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1}, + unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1}, + unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1}, + unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1}, + unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1}, + unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1}, + unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1}, + unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1}, + unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1}, + unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1}, + unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1}, + unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1}, + unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1}, + unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1}, + unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1}, + unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1}, + unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1}, + unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1}, + unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1}, + unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1}, + unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1}, + unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1}, + unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1}, + unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1}, + unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1}, + unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1}, + unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1}, + unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1}, + unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1}, + unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1}, + unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1}, + unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1}, + unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1}, + unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1}, + unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1}, + unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1}, + unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1}, + unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1}, + unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1}, + unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1}, + unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1}, + unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1}, + unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1}, + unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1}, + unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1}, + unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1}, + unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1}, + unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1}, + unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1}, + unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1}, + unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1}, + unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1}, + unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1}, + unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1}, + unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1}, + unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1}, + unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1}, + unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1}, + unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1}, + unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1}, + unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1}, + unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1}, + unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1}, + unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1}, + unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1}, + unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1}, + unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1}, + unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1}, + unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1}, + unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1}, + unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1}, + unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1}, + unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1}, + unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1}, + unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1}, + unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1}, + unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1}, + unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1}, + unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1}, + unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1}, + unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1}, + unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1}, + unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1}, + unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1}, + unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1}, + unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1}, + unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1}, + unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1}, + unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1}, + unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1}, + unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1}, + unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1}, + unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1}, + unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1}, + unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1}, + unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1}, + unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1}, + unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1}, + unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1}, + unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1}, + unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1}, + unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1}, + unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1}, + unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1}, + unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1}, + unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1}, + unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1}, + unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1}, + unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1}, + unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1}, + unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1}, + unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1}, + unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1}, + unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1}, + unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1}, + unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1}, + unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1}, + unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1}, + unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1}, + unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1}, + unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1}, + unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1}, + unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1}, + unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1}, + unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1}, + unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1}, + unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1}, + unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1}, + unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1}, + unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1}, + unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1}, + unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1}, + unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1}, + unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1}, + unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1}, + unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1}, + unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1}, + unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1}, + unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1}, + unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1}, + unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1}, + unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1}, + unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1}, + unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1}, + unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1}, + unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1}, + unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1}, + unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1}, + unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1}, + unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1}, + unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1}, + unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1}, + unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1}, + unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1}, + unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1}, + unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1}, + unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1}, + unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1}, + unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1}, + unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1}, + unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1}, + unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1}, + unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1}, + unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1}, + unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1}, + unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1}, + unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1}, + unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1}, + unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1}, + unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1}, + unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1}, + unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1}, + unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1}, + unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1}, + unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1}, + unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1}, + unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1}, + unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1}, + unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1}, + unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1}, + unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1}, + unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1}, + unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1}, + unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1}, + unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1}, + unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1}, + unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1}, + unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1}, + unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1}, + unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1}, + unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1}, + unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1}, + unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1}, + unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1}, + unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1}, + unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1}, + unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1}, + unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1}, + unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1}, + unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1}, + unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1}, + unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1}, + unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1}, + unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1}, + unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1}, + unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1}, + unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1}, + unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1}, + unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1}, + unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1}, + unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1}, + unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1}, + unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1}, + unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1}, + unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1}, + unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1}, + unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1}, + unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1}, + unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1}, + unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1}, + unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordExtendNumLet = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x5f, Hi: 0x5f, Stride: 0x1}, + unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1}, + unicode.Range16{Lo: 0x203f, Hi: 0x2040, Stride: 0x1}, + unicode.Range16{Lo: 0x2054, Hi: 0x2054, Stride: 0x1}, + unicode.Range16{Lo: 0xfe33, Hi: 0xfe34, Stride: 0x1}, + unicode.Range16{Lo: 0xfe4d, Hi: 0xfe4f, Stride: 0x1}, + unicode.Range16{Lo: 0xff3f, Hi: 0xff3f, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordFormat = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1}, + unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1}, + unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1}, + unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1}, + unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1}, + unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1}, + unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1}, + unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1}, + unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1}, + unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1}, + unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1}, + unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1}, + unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1}, + unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordGlue_After_Zwj = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2764, Hi: 0x2764, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f48b, Hi: 0x1f48b, Stride: 0x1}, + unicode.Range32{Lo: 0x1f5e8, Hi: 0x1f5e8, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordHebrew_Letter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1}, + unicode.Range16{Lo: 0x5f0, Hi: 0x5f2, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1}, + unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1}, + unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1}, + unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1}, + unicode.Range16{Lo: 0xfb46, Hi: 0xfb4f, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordKatakana = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1}, + unicode.Range16{Lo: 0x309b, Hi: 0x309c, Stride: 0x1}, + unicode.Range16{Lo: 0x30a0, Hi: 0x30a0, Stride: 0x1}, + unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1}, + unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1}, + unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1}, + unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1}, + unicode.Range16{Lo: 0x32d0, Hi: 0x32fe, Stride: 0x1}, + unicode.Range16{Lo: 0x3300, Hi: 0x3357, Stride: 0x1}, + unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1}, + unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1}, + unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1b000, Hi: 0x1b000, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordLF = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordMidLetter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1}, + unicode.Range16{Lo: 0xb7, Hi: 0xb7, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7, Hi: 0x2d7, Stride: 0x1}, + unicode.Range16{Lo: 0x387, Hi: 0x387, Stride: 0x1}, + unicode.Range16{Lo: 0x5f4, Hi: 0x5f4, Stride: 0x1}, + unicode.Range16{Lo: 0x2027, Hi: 0x2027, Stride: 0x1}, + unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1}, + unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1}, + unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _WordMidNum = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1}, + unicode.Range16{Lo: 0x3b, Hi: 0x3b, Stride: 0x1}, + unicode.Range16{Lo: 0x37e, Hi: 0x37e, Stride: 0x1}, + unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1}, + unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1}, + unicode.Range16{Lo: 0x66c, Hi: 0x66c, Stride: 0x1}, + unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1}, + unicode.Range16{Lo: 0x2044, Hi: 0x2044, Stride: 0x1}, + unicode.Range16{Lo: 0xfe10, Hi: 0xfe10, Stride: 0x1}, + unicode.Range16{Lo: 0xfe14, Hi: 0xfe14, Stride: 0x1}, + unicode.Range16{Lo: 0xfe50, Hi: 0xfe50, Stride: 0x1}, + unicode.Range16{Lo: 0xfe54, Hi: 0xfe54, Stride: 0x1}, + unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1}, + unicode.Range16{Lo: 0xff1b, Hi: 0xff1b, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _WordMidNumLet = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1}, + unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1}, + unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1}, + unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1}, + unicode.Range16{Lo: 0xff07, Hi: 0xff07, Stride: 0x1}, + unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordNewline = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1}, + unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1}, + unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1}, + unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _WordNumeric = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1}, + unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1}, + unicode.Range16{Lo: 0x66b, Hi: 0x66b, Stride: 0x1}, + unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1}, + unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1}, + unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1}, + unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1}, + unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1}, + unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1}, + unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1}, + unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1}, + unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1}, + unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1}, + unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1}, + unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1}, + unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1}, + unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1}, + unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1}, + unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1}, + unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1}, + unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1}, + unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1}, + unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1}, + unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1}, + unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1}, + unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1}, + unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1}, + unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1}, + unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1}, + unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1}, + unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1}, + unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1}, + unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1}, + unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1}, + unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1}, + unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1}, + unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1}, + unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1}, + unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1}, + unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1}, + unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1}, + unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1}, + unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1}, + unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1}, + unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1}, + unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1}, + unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordRegional_Indicator = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordSingle_Quote = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordZWJ = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +type _WordRuneRange unicode.RangeTable + +func _WordRuneType(r rune) *_WordRuneRange { + switch { + case unicode.Is(_WordALetter, r): + return (*_WordRuneRange)(_WordALetter) + case unicode.Is(_WordCR, r): + return (*_WordRuneRange)(_WordCR) + case unicode.Is(_WordDouble_Quote, r): + return (*_WordRuneRange)(_WordDouble_Quote) + case unicode.Is(_WordE_Base, r): + return (*_WordRuneRange)(_WordE_Base) + case unicode.Is(_WordE_Base_GAZ, r): + return (*_WordRuneRange)(_WordE_Base_GAZ) + case unicode.Is(_WordE_Modifier, r): + return (*_WordRuneRange)(_WordE_Modifier) + case unicode.Is(_WordExtend, r): + return (*_WordRuneRange)(_WordExtend) + case unicode.Is(_WordExtendNumLet, r): + return (*_WordRuneRange)(_WordExtendNumLet) + case unicode.Is(_WordFormat, r): + return (*_WordRuneRange)(_WordFormat) + case unicode.Is(_WordGlue_After_Zwj, r): + return (*_WordRuneRange)(_WordGlue_After_Zwj) + case unicode.Is(_WordHebrew_Letter, r): + return (*_WordRuneRange)(_WordHebrew_Letter) + case unicode.Is(_WordKatakana, r): + return (*_WordRuneRange)(_WordKatakana) + case unicode.Is(_WordLF, r): + return (*_WordRuneRange)(_WordLF) + case unicode.Is(_WordMidLetter, r): + return (*_WordRuneRange)(_WordMidLetter) + case unicode.Is(_WordMidNum, r): + return (*_WordRuneRange)(_WordMidNum) + case unicode.Is(_WordMidNumLet, r): + return (*_WordRuneRange)(_WordMidNumLet) + case unicode.Is(_WordNewline, r): + return (*_WordRuneRange)(_WordNewline) + case unicode.Is(_WordNumeric, r): + return (*_WordRuneRange)(_WordNumeric) + case unicode.Is(_WordRegional_Indicator, r): + return (*_WordRuneRange)(_WordRegional_Indicator) + case unicode.Is(_WordSingle_Quote, r): + return (*_WordRuneRange)(_WordSingle_Quote) + case unicode.Is(_WordZWJ, r): + return (*_WordRuneRange)(_WordZWJ) + default: + return nil + } +} +func (rng *_WordRuneRange) String() string { + switch (*unicode.RangeTable)(rng) { + case _WordALetter: + return "ALetter" + case _WordCR: + return "CR" + case _WordDouble_Quote: + return "Double_Quote" + case _WordE_Base: + return "E_Base" + case _WordE_Base_GAZ: + return "E_Base_GAZ" + case _WordE_Modifier: + return "E_Modifier" + case _WordExtend: + return "Extend" + case _WordExtendNumLet: + return "ExtendNumLet" + case _WordFormat: + return "Format" + case _WordGlue_After_Zwj: + return "Glue_After_Zwj" + case _WordHebrew_Letter: + return "Hebrew_Letter" + case _WordKatakana: + return "Katakana" + case _WordLF: + return "LF" + case _WordMidLetter: + return "MidLetter" + case _WordMidNum: + return "MidNum" + case _WordMidNumLet: + return "MidNumLet" + case _WordNewline: + return "Newline" + case _WordNumeric: + return "Numeric" + case _WordRegional_Indicator: + return "Regional_Indicator" + case _WordSingle_Quote: + return "Single_Quote" + case _WordZWJ: + return "ZWJ" + default: + return "Other" + } +} + +var _SentenceATerm = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1}, + unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1}, + unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceCR = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceClose = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1}, + unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1}, + unicode.Range16{Lo: 0x28, Hi: 0x28, Stride: 0x1}, + unicode.Range16{Lo: 0x29, Hi: 0x29, Stride: 0x1}, + unicode.Range16{Lo: 0x5b, Hi: 0x5b, Stride: 0x1}, + unicode.Range16{Lo: 0x5d, Hi: 0x5d, Stride: 0x1}, + unicode.Range16{Lo: 0x7b, Hi: 0x7b, Stride: 0x1}, + unicode.Range16{Lo: 0x7d, Hi: 0x7d, Stride: 0x1}, + unicode.Range16{Lo: 0xab, Hi: 0xab, Stride: 0x1}, + unicode.Range16{Lo: 0xbb, Hi: 0xbb, Stride: 0x1}, + unicode.Range16{Lo: 0xf3a, Hi: 0xf3a, Stride: 0x1}, + unicode.Range16{Lo: 0xf3b, Hi: 0xf3b, Stride: 0x1}, + unicode.Range16{Lo: 0xf3c, Hi: 0xf3c, Stride: 0x1}, + unicode.Range16{Lo: 0xf3d, Hi: 0xf3d, Stride: 0x1}, + unicode.Range16{Lo: 0x169b, Hi: 0x169b, Stride: 0x1}, + unicode.Range16{Lo: 0x169c, Hi: 0x169c, Stride: 0x1}, + unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1}, + unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1}, + unicode.Range16{Lo: 0x201a, Hi: 0x201a, Stride: 0x1}, + unicode.Range16{Lo: 0x201b, Hi: 0x201c, Stride: 0x1}, + unicode.Range16{Lo: 0x201d, Hi: 0x201d, Stride: 0x1}, + unicode.Range16{Lo: 0x201e, Hi: 0x201e, Stride: 0x1}, + unicode.Range16{Lo: 0x201f, Hi: 0x201f, Stride: 0x1}, + unicode.Range16{Lo: 0x2039, Hi: 0x2039, Stride: 0x1}, + unicode.Range16{Lo: 0x203a, Hi: 0x203a, Stride: 0x1}, + unicode.Range16{Lo: 0x2045, Hi: 0x2045, Stride: 0x1}, + unicode.Range16{Lo: 0x2046, Hi: 0x2046, Stride: 0x1}, + unicode.Range16{Lo: 0x207d, Hi: 0x207d, Stride: 0x1}, + unicode.Range16{Lo: 0x207e, Hi: 0x207e, Stride: 0x1}, + unicode.Range16{Lo: 0x208d, Hi: 0x208d, Stride: 0x1}, + unicode.Range16{Lo: 0x208e, Hi: 0x208e, Stride: 0x1}, + unicode.Range16{Lo: 0x2308, Hi: 0x2308, Stride: 0x1}, + unicode.Range16{Lo: 0x2309, Hi: 0x2309, Stride: 0x1}, + unicode.Range16{Lo: 0x230a, Hi: 0x230a, Stride: 0x1}, + unicode.Range16{Lo: 0x230b, Hi: 0x230b, Stride: 0x1}, + unicode.Range16{Lo: 0x2329, Hi: 0x2329, Stride: 0x1}, + unicode.Range16{Lo: 0x232a, Hi: 0x232a, Stride: 0x1}, + unicode.Range16{Lo: 0x275b, Hi: 0x2760, Stride: 0x1}, + unicode.Range16{Lo: 0x2768, Hi: 0x2768, Stride: 0x1}, + unicode.Range16{Lo: 0x2769, Hi: 0x2769, Stride: 0x1}, + unicode.Range16{Lo: 0x276a, Hi: 0x276a, Stride: 0x1}, + unicode.Range16{Lo: 0x276b, Hi: 0x276b, Stride: 0x1}, + unicode.Range16{Lo: 0x276c, Hi: 0x276c, Stride: 0x1}, + unicode.Range16{Lo: 0x276d, Hi: 0x276d, Stride: 0x1}, + unicode.Range16{Lo: 0x276e, Hi: 0x276e, Stride: 0x1}, + unicode.Range16{Lo: 0x276f, Hi: 0x276f, Stride: 0x1}, + unicode.Range16{Lo: 0x2770, Hi: 0x2770, Stride: 0x1}, + unicode.Range16{Lo: 0x2771, Hi: 0x2771, Stride: 0x1}, + unicode.Range16{Lo: 0x2772, Hi: 0x2772, Stride: 0x1}, + unicode.Range16{Lo: 0x2773, Hi: 0x2773, Stride: 0x1}, + unicode.Range16{Lo: 0x2774, Hi: 0x2774, Stride: 0x1}, + unicode.Range16{Lo: 0x2775, Hi: 0x2775, Stride: 0x1}, + unicode.Range16{Lo: 0x27c5, Hi: 0x27c5, Stride: 0x1}, + unicode.Range16{Lo: 0x27c6, Hi: 0x27c6, Stride: 0x1}, + unicode.Range16{Lo: 0x27e6, Hi: 0x27e6, Stride: 0x1}, + unicode.Range16{Lo: 0x27e7, Hi: 0x27e7, Stride: 0x1}, + unicode.Range16{Lo: 0x27e8, Hi: 0x27e8, Stride: 0x1}, + unicode.Range16{Lo: 0x27e9, Hi: 0x27e9, Stride: 0x1}, + unicode.Range16{Lo: 0x27ea, Hi: 0x27ea, Stride: 0x1}, + unicode.Range16{Lo: 0x27eb, Hi: 0x27eb, Stride: 0x1}, + unicode.Range16{Lo: 0x27ec, Hi: 0x27ec, Stride: 0x1}, + unicode.Range16{Lo: 0x27ed, Hi: 0x27ed, Stride: 0x1}, + unicode.Range16{Lo: 0x27ee, Hi: 0x27ee, Stride: 0x1}, + unicode.Range16{Lo: 0x27ef, Hi: 0x27ef, Stride: 0x1}, + unicode.Range16{Lo: 0x2983, Hi: 0x2983, Stride: 0x1}, + unicode.Range16{Lo: 0x2984, Hi: 0x2984, Stride: 0x1}, + unicode.Range16{Lo: 0x2985, Hi: 0x2985, Stride: 0x1}, + unicode.Range16{Lo: 0x2986, Hi: 0x2986, Stride: 0x1}, + unicode.Range16{Lo: 0x2987, Hi: 0x2987, Stride: 0x1}, + unicode.Range16{Lo: 0x2988, Hi: 0x2988, Stride: 0x1}, + unicode.Range16{Lo: 0x2989, Hi: 0x2989, Stride: 0x1}, + unicode.Range16{Lo: 0x298a, Hi: 0x298a, Stride: 0x1}, + unicode.Range16{Lo: 0x298b, Hi: 0x298b, Stride: 0x1}, + unicode.Range16{Lo: 0x298c, Hi: 0x298c, Stride: 0x1}, + unicode.Range16{Lo: 0x298d, Hi: 0x298d, Stride: 0x1}, + unicode.Range16{Lo: 0x298e, Hi: 0x298e, Stride: 0x1}, + unicode.Range16{Lo: 0x298f, Hi: 0x298f, Stride: 0x1}, + unicode.Range16{Lo: 0x2990, Hi: 0x2990, Stride: 0x1}, + unicode.Range16{Lo: 0x2991, Hi: 0x2991, Stride: 0x1}, + unicode.Range16{Lo: 0x2992, Hi: 0x2992, Stride: 0x1}, + unicode.Range16{Lo: 0x2993, Hi: 0x2993, Stride: 0x1}, + unicode.Range16{Lo: 0x2994, Hi: 0x2994, Stride: 0x1}, + unicode.Range16{Lo: 0x2995, Hi: 0x2995, Stride: 0x1}, + unicode.Range16{Lo: 0x2996, Hi: 0x2996, Stride: 0x1}, + unicode.Range16{Lo: 0x2997, Hi: 0x2997, Stride: 0x1}, + unicode.Range16{Lo: 0x2998, Hi: 0x2998, Stride: 0x1}, + unicode.Range16{Lo: 0x29d8, Hi: 0x29d8, Stride: 0x1}, + unicode.Range16{Lo: 0x29d9, Hi: 0x29d9, Stride: 0x1}, + unicode.Range16{Lo: 0x29da, Hi: 0x29da, Stride: 0x1}, + unicode.Range16{Lo: 0x29db, Hi: 0x29db, Stride: 0x1}, + unicode.Range16{Lo: 0x29fc, Hi: 0x29fc, Stride: 0x1}, + unicode.Range16{Lo: 0x29fd, Hi: 0x29fd, Stride: 0x1}, + unicode.Range16{Lo: 0x2e00, Hi: 0x2e01, Stride: 0x1}, + unicode.Range16{Lo: 0x2e02, Hi: 0x2e02, Stride: 0x1}, + unicode.Range16{Lo: 0x2e03, Hi: 0x2e03, Stride: 0x1}, + unicode.Range16{Lo: 0x2e04, Hi: 0x2e04, Stride: 0x1}, + unicode.Range16{Lo: 0x2e05, Hi: 0x2e05, Stride: 0x1}, + unicode.Range16{Lo: 0x2e06, Hi: 0x2e08, Stride: 0x1}, + unicode.Range16{Lo: 0x2e09, Hi: 0x2e09, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0a, Hi: 0x2e0a, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0b, Hi: 0x2e0b, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0c, Hi: 0x2e0c, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0d, Hi: 0x2e0d, Stride: 0x1}, + unicode.Range16{Lo: 0x2e1c, Hi: 0x2e1c, Stride: 0x1}, + unicode.Range16{Lo: 0x2e1d, Hi: 0x2e1d, Stride: 0x1}, + unicode.Range16{Lo: 0x2e20, Hi: 0x2e20, Stride: 0x1}, + unicode.Range16{Lo: 0x2e21, Hi: 0x2e21, Stride: 0x1}, + unicode.Range16{Lo: 0x2e22, Hi: 0x2e22, Stride: 0x1}, + unicode.Range16{Lo: 0x2e23, Hi: 0x2e23, Stride: 0x1}, + unicode.Range16{Lo: 0x2e24, Hi: 0x2e24, Stride: 0x1}, + unicode.Range16{Lo: 0x2e25, Hi: 0x2e25, Stride: 0x1}, + unicode.Range16{Lo: 0x2e26, Hi: 0x2e26, Stride: 0x1}, + unicode.Range16{Lo: 0x2e27, Hi: 0x2e27, Stride: 0x1}, + unicode.Range16{Lo: 0x2e28, Hi: 0x2e28, Stride: 0x1}, + unicode.Range16{Lo: 0x2e29, Hi: 0x2e29, Stride: 0x1}, + unicode.Range16{Lo: 0x2e42, Hi: 0x2e42, Stride: 0x1}, + unicode.Range16{Lo: 0x3008, Hi: 0x3008, Stride: 0x1}, + unicode.Range16{Lo: 0x3009, Hi: 0x3009, Stride: 0x1}, + unicode.Range16{Lo: 0x300a, Hi: 0x300a, Stride: 0x1}, + unicode.Range16{Lo: 0x300b, Hi: 0x300b, Stride: 0x1}, + unicode.Range16{Lo: 0x300c, Hi: 0x300c, Stride: 0x1}, + unicode.Range16{Lo: 0x300d, Hi: 0x300d, Stride: 0x1}, + unicode.Range16{Lo: 0x300e, Hi: 0x300e, Stride: 0x1}, + unicode.Range16{Lo: 0x300f, Hi: 0x300f, Stride: 0x1}, + unicode.Range16{Lo: 0x3010, Hi: 0x3010, Stride: 0x1}, + unicode.Range16{Lo: 0x3011, Hi: 0x3011, Stride: 0x1}, + unicode.Range16{Lo: 0x3014, Hi: 0x3014, Stride: 0x1}, + unicode.Range16{Lo: 0x3015, Hi: 0x3015, Stride: 0x1}, + unicode.Range16{Lo: 0x3016, Hi: 0x3016, Stride: 0x1}, + unicode.Range16{Lo: 0x3017, Hi: 0x3017, Stride: 0x1}, + unicode.Range16{Lo: 0x3018, Hi: 0x3018, Stride: 0x1}, + unicode.Range16{Lo: 0x3019, Hi: 0x3019, Stride: 0x1}, + unicode.Range16{Lo: 0x301a, Hi: 0x301a, Stride: 0x1}, + unicode.Range16{Lo: 0x301b, Hi: 0x301b, Stride: 0x1}, + unicode.Range16{Lo: 0x301d, Hi: 0x301d, Stride: 0x1}, + unicode.Range16{Lo: 0x301e, Hi: 0x301f, Stride: 0x1}, + unicode.Range16{Lo: 0xfd3e, Hi: 0xfd3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfd3f, Hi: 0xfd3f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe17, Hi: 0xfe17, Stride: 0x1}, + unicode.Range16{Lo: 0xfe18, Hi: 0xfe18, Stride: 0x1}, + unicode.Range16{Lo: 0xfe35, Hi: 0xfe35, Stride: 0x1}, + unicode.Range16{Lo: 0xfe36, Hi: 0xfe36, Stride: 0x1}, + unicode.Range16{Lo: 0xfe37, Hi: 0xfe37, Stride: 0x1}, + unicode.Range16{Lo: 0xfe38, Hi: 0xfe38, Stride: 0x1}, + unicode.Range16{Lo: 0xfe39, Hi: 0xfe39, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3a, Hi: 0xfe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3b, Hi: 0xfe3b, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3c, Hi: 0xfe3c, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3d, Hi: 0xfe3d, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3e, Hi: 0xfe3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3f, Hi: 0xfe3f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe40, Hi: 0xfe40, Stride: 0x1}, + unicode.Range16{Lo: 0xfe41, Hi: 0xfe41, Stride: 0x1}, + unicode.Range16{Lo: 0xfe42, Hi: 0xfe42, Stride: 0x1}, + unicode.Range16{Lo: 0xfe43, Hi: 0xfe43, Stride: 0x1}, + unicode.Range16{Lo: 0xfe44, Hi: 0xfe44, Stride: 0x1}, + unicode.Range16{Lo: 0xfe47, Hi: 0xfe47, Stride: 0x1}, + unicode.Range16{Lo: 0xfe48, Hi: 0xfe48, Stride: 0x1}, + unicode.Range16{Lo: 0xfe59, Hi: 0xfe59, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5a, Hi: 0xfe5a, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5b, Hi: 0xfe5b, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5c, Hi: 0xfe5c, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5d, Hi: 0xfe5d, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5e, Hi: 0xfe5e, Stride: 0x1}, + unicode.Range16{Lo: 0xff08, Hi: 0xff08, Stride: 0x1}, + unicode.Range16{Lo: 0xff09, Hi: 0xff09, Stride: 0x1}, + unicode.Range16{Lo: 0xff3b, Hi: 0xff3b, Stride: 0x1}, + unicode.Range16{Lo: 0xff3d, Hi: 0xff3d, Stride: 0x1}, + unicode.Range16{Lo: 0xff5b, Hi: 0xff5b, Stride: 0x1}, + unicode.Range16{Lo: 0xff5d, Hi: 0xff5d, Stride: 0x1}, + unicode.Range16{Lo: 0xff5f, Hi: 0xff5f, Stride: 0x1}, + unicode.Range16{Lo: 0xff60, Hi: 0xff60, Stride: 0x1}, + unicode.Range16{Lo: 0xff62, Hi: 0xff62, Stride: 0x1}, + unicode.Range16{Lo: 0xff63, Hi: 0xff63, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f676, Hi: 0x1f678, Stride: 0x1}, + }, + LatinOffset: 10, +} + +var _SentenceExtend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1}, + unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1}, + unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1}, + unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1}, + unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1}, + unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1}, + unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1}, + unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1}, + unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1}, + unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1}, + unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1}, + unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1}, + unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1}, + unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1}, + unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1}, + unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1}, + unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1}, + unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1}, + unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1}, + unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1}, + unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1}, + unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1}, + unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1}, + unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1}, + unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1}, + unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1}, + unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1}, + unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1}, + unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1}, + unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1}, + unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1}, + unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1}, + unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1}, + unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1}, + unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1}, + unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1}, + unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1}, + unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1}, + unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1}, + unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1}, + unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1}, + unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1}, + unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1}, + unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1}, + unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1}, + unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1}, + unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1}, + unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1}, + unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1}, + unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1}, + unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1}, + unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1}, + unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1}, + unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1}, + unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1}, + unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1}, + unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1}, + unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1}, + unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1}, + unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1}, + unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1}, + unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1}, + unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1}, + unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1}, + unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1}, + unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1}, + unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1}, + unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1}, + unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1}, + unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1}, + unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1}, + unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1}, + unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1}, + unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1}, + unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1}, + unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1}, + unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1}, + unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1}, + unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1}, + unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1}, + unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1}, + unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1}, + unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1}, + unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1}, + unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1}, + unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1}, + unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1}, + unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1}, + unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1}, + unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1}, + unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1}, + unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1}, + unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1}, + unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1}, + unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1}, + unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1}, + unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1}, + unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1}, + unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1}, + unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1}, + unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1}, + unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1}, + unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1}, + unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1}, + unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1}, + unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1}, + unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1}, + unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1}, + unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1}, + unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1}, + unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1}, + unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1}, + unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1}, + unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1}, + unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1}, + unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1}, + unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1}, + unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1}, + unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1}, + unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1}, + unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1}, + unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1}, + unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1}, + unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1}, + unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1}, + unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1}, + unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1}, + unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1}, + unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1}, + unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1}, + unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1}, + unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1}, + unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1}, + unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1}, + unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1}, + unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1}, + unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1}, + unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1}, + unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1}, + unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1}, + unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1}, + unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1}, + unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1}, + unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1}, + unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1}, + unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1}, + unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1}, + unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1}, + unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1}, + unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1}, + unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1}, + unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1}, + unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1}, + unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1}, + unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1}, + unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1}, + unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1}, + unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1}, + unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1}, + unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1}, + unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1}, + unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1}, + unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1}, + unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1}, + unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1}, + unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1}, + unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1}, + unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1}, + unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1}, + unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1}, + unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1}, + unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1}, + unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1}, + unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1}, + unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1}, + unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1}, + unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1}, + unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1}, + unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1}, + unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1}, + unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1}, + unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1}, + unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1}, + unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1}, + unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1}, + unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1}, + unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1}, + unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1}, + unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1}, + unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1}, + unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1}, + unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1}, + unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1}, + unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1}, + unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1}, + unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1}, + unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1}, + unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1}, + unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1}, + unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1}, + unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1}, + unicode.Range16{Lo: 0x200c, Hi: 0x200d, Stride: 0x1}, + unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1}, + unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1}, + unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1}, + unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1}, + unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1}, + unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1}, + unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1}, + unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1}, + unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1}, + unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1}, + unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1}, + unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1}, + unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1}, + unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1}, + unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1}, + unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1}, + unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1}, + unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1}, + unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1}, + unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1}, + unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1}, + unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1}, + unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1}, + unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1}, + unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1}, + unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1}, + unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1}, + unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1}, + unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1}, + unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1}, + unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1}, + unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1}, + unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1}, + unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1}, + unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1}, + unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1}, + unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1}, + unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1}, + unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1}, + unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1}, + unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1}, + unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1}, + unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1}, + unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1}, + unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1}, + unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1}, + unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1}, + unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1}, + unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1}, + unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1}, + unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1}, + unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1}, + unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1}, + unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1}, + unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1}, + unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1}, + unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1}, + unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1}, + unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1}, + unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1}, + unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1}, + unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1}, + unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1}, + unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1}, + unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1}, + unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1}, + unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1}, + unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1}, + unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1}, + unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1}, + unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1}, + unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1}, + unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1}, + unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1}, + unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1}, + unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1}, + unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1}, + unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1}, + unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1}, + unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1}, + unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1}, + unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1}, + unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1}, + unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1}, + unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1}, + unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1}, + unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1}, + unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1}, + unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1}, + unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1}, + unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1}, + unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1}, + unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1}, + unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1}, + unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1}, + unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1}, + unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1}, + unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1}, + unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1}, + unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1}, + unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1}, + unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1}, + unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1}, + unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1}, + unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1}, + unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1}, + unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1}, + unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1}, + unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1}, + unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1}, + unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1}, + unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1}, + unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1}, + unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1}, + unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1}, + unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1}, + unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1}, + unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1}, + unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1}, + unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1}, + unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1}, + unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1}, + unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1}, + unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1}, + unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1}, + unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1}, + unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1}, + unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1}, + unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1}, + unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1}, + unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1}, + unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1}, + unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1}, + unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1}, + unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1}, + unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1}, + unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1}, + unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1}, + unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1}, + unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1}, + unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1}, + unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1}, + unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1}, + unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1}, + unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1}, + unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1}, + unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1}, + unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1}, + unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1}, + unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1}, + unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1}, + unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1}, + unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1}, + unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1}, + unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _SentenceFormat = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1}, + unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1}, + unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1}, + unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1}, + unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1}, + unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1}, + unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1}, + unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1}, + unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1}, + unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1}, + unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1}, + unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1}, + unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1}, + unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1}, + unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceLF = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceLower = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1}, + unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1}, + unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1}, + unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1}, + unicode.Range16{Lo: 0xdf, Hi: 0xf6, Stride: 0x1}, + unicode.Range16{Lo: 0xf8, Hi: 0xff, Stride: 0x1}, + unicode.Range16{Lo: 0x101, Hi: 0x101, Stride: 0x1}, + unicode.Range16{Lo: 0x103, Hi: 0x103, Stride: 0x1}, + unicode.Range16{Lo: 0x105, Hi: 0x105, Stride: 0x1}, + unicode.Range16{Lo: 0x107, Hi: 0x107, Stride: 0x1}, + unicode.Range16{Lo: 0x109, Hi: 0x109, Stride: 0x1}, + unicode.Range16{Lo: 0x10b, Hi: 0x10b, Stride: 0x1}, + unicode.Range16{Lo: 0x10d, Hi: 0x10d, Stride: 0x1}, + unicode.Range16{Lo: 0x10f, Hi: 0x10f, Stride: 0x1}, + unicode.Range16{Lo: 0x111, Hi: 0x111, Stride: 0x1}, + unicode.Range16{Lo: 0x113, Hi: 0x113, Stride: 0x1}, + unicode.Range16{Lo: 0x115, Hi: 0x115, Stride: 0x1}, + unicode.Range16{Lo: 0x117, Hi: 0x117, Stride: 0x1}, + unicode.Range16{Lo: 0x119, Hi: 0x119, Stride: 0x1}, + unicode.Range16{Lo: 0x11b, Hi: 0x11b, Stride: 0x1}, + unicode.Range16{Lo: 0x11d, Hi: 0x11d, Stride: 0x1}, + unicode.Range16{Lo: 0x11f, Hi: 0x11f, Stride: 0x1}, + unicode.Range16{Lo: 0x121, Hi: 0x121, Stride: 0x1}, + unicode.Range16{Lo: 0x123, Hi: 0x123, Stride: 0x1}, + unicode.Range16{Lo: 0x125, Hi: 0x125, Stride: 0x1}, + unicode.Range16{Lo: 0x127, Hi: 0x127, Stride: 0x1}, + unicode.Range16{Lo: 0x129, Hi: 0x129, Stride: 0x1}, + unicode.Range16{Lo: 0x12b, Hi: 0x12b, Stride: 0x1}, + unicode.Range16{Lo: 0x12d, Hi: 0x12d, Stride: 0x1}, + unicode.Range16{Lo: 0x12f, Hi: 0x12f, Stride: 0x1}, + unicode.Range16{Lo: 0x131, Hi: 0x131, Stride: 0x1}, + unicode.Range16{Lo: 0x133, Hi: 0x133, Stride: 0x1}, + unicode.Range16{Lo: 0x135, Hi: 0x135, Stride: 0x1}, + unicode.Range16{Lo: 0x137, Hi: 0x138, Stride: 0x1}, + unicode.Range16{Lo: 0x13a, Hi: 0x13a, Stride: 0x1}, + unicode.Range16{Lo: 0x13c, Hi: 0x13c, Stride: 0x1}, + unicode.Range16{Lo: 0x13e, Hi: 0x13e, Stride: 0x1}, + unicode.Range16{Lo: 0x140, Hi: 0x140, Stride: 0x1}, + unicode.Range16{Lo: 0x142, Hi: 0x142, Stride: 0x1}, + unicode.Range16{Lo: 0x144, Hi: 0x144, Stride: 0x1}, + unicode.Range16{Lo: 0x146, Hi: 0x146, Stride: 0x1}, + unicode.Range16{Lo: 0x148, Hi: 0x149, Stride: 0x1}, + unicode.Range16{Lo: 0x14b, Hi: 0x14b, Stride: 0x1}, + unicode.Range16{Lo: 0x14d, Hi: 0x14d, Stride: 0x1}, + unicode.Range16{Lo: 0x14f, Hi: 0x14f, Stride: 0x1}, + unicode.Range16{Lo: 0x151, Hi: 0x151, Stride: 0x1}, + unicode.Range16{Lo: 0x153, Hi: 0x153, Stride: 0x1}, + unicode.Range16{Lo: 0x155, Hi: 0x155, Stride: 0x1}, + unicode.Range16{Lo: 0x157, Hi: 0x157, Stride: 0x1}, + unicode.Range16{Lo: 0x159, Hi: 0x159, Stride: 0x1}, + unicode.Range16{Lo: 0x15b, Hi: 0x15b, Stride: 0x1}, + unicode.Range16{Lo: 0x15d, Hi: 0x15d, Stride: 0x1}, + unicode.Range16{Lo: 0x15f, Hi: 0x15f, Stride: 0x1}, + unicode.Range16{Lo: 0x161, Hi: 0x161, Stride: 0x1}, + unicode.Range16{Lo: 0x163, Hi: 0x163, Stride: 0x1}, + unicode.Range16{Lo: 0x165, Hi: 0x165, Stride: 0x1}, + unicode.Range16{Lo: 0x167, Hi: 0x167, Stride: 0x1}, + unicode.Range16{Lo: 0x169, Hi: 0x169, Stride: 0x1}, + unicode.Range16{Lo: 0x16b, Hi: 0x16b, Stride: 0x1}, + unicode.Range16{Lo: 0x16d, Hi: 0x16d, Stride: 0x1}, + unicode.Range16{Lo: 0x16f, Hi: 0x16f, Stride: 0x1}, + unicode.Range16{Lo: 0x171, Hi: 0x171, Stride: 0x1}, + unicode.Range16{Lo: 0x173, Hi: 0x173, Stride: 0x1}, + unicode.Range16{Lo: 0x175, Hi: 0x175, Stride: 0x1}, + unicode.Range16{Lo: 0x177, Hi: 0x177, Stride: 0x1}, + unicode.Range16{Lo: 0x17a, Hi: 0x17a, Stride: 0x1}, + unicode.Range16{Lo: 0x17c, Hi: 0x17c, Stride: 0x1}, + unicode.Range16{Lo: 0x17e, Hi: 0x180, Stride: 0x1}, + unicode.Range16{Lo: 0x183, Hi: 0x183, Stride: 0x1}, + unicode.Range16{Lo: 0x185, Hi: 0x185, Stride: 0x1}, + unicode.Range16{Lo: 0x188, Hi: 0x188, Stride: 0x1}, + unicode.Range16{Lo: 0x18c, Hi: 0x18d, Stride: 0x1}, + unicode.Range16{Lo: 0x192, Hi: 0x192, Stride: 0x1}, + unicode.Range16{Lo: 0x195, Hi: 0x195, Stride: 0x1}, + unicode.Range16{Lo: 0x199, Hi: 0x19b, Stride: 0x1}, + unicode.Range16{Lo: 0x19e, Hi: 0x19e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1, Hi: 0x1a1, Stride: 0x1}, + unicode.Range16{Lo: 0x1a3, Hi: 0x1a3, Stride: 0x1}, + unicode.Range16{Lo: 0x1a5, Hi: 0x1a5, Stride: 0x1}, + unicode.Range16{Lo: 0x1a8, Hi: 0x1a8, Stride: 0x1}, + unicode.Range16{Lo: 0x1aa, Hi: 0x1ab, Stride: 0x1}, + unicode.Range16{Lo: 0x1ad, Hi: 0x1ad, Stride: 0x1}, + unicode.Range16{Lo: 0x1b0, Hi: 0x1b0, Stride: 0x1}, + unicode.Range16{Lo: 0x1b4, Hi: 0x1b4, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6, Hi: 0x1b6, Stride: 0x1}, + unicode.Range16{Lo: 0x1b9, Hi: 0x1ba, Stride: 0x1}, + unicode.Range16{Lo: 0x1bd, Hi: 0x1bf, Stride: 0x1}, + unicode.Range16{Lo: 0x1c6, Hi: 0x1c6, Stride: 0x1}, + unicode.Range16{Lo: 0x1c9, Hi: 0x1c9, Stride: 0x1}, + unicode.Range16{Lo: 0x1cc, Hi: 0x1cc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce, Hi: 0x1ce, Stride: 0x1}, + unicode.Range16{Lo: 0x1d0, Hi: 0x1d0, Stride: 0x1}, + unicode.Range16{Lo: 0x1d2, Hi: 0x1d2, Stride: 0x1}, + unicode.Range16{Lo: 0x1d4, Hi: 0x1d4, Stride: 0x1}, + unicode.Range16{Lo: 0x1d6, Hi: 0x1d6, Stride: 0x1}, + unicode.Range16{Lo: 0x1d8, Hi: 0x1d8, Stride: 0x1}, + unicode.Range16{Lo: 0x1da, Hi: 0x1da, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc, Hi: 0x1dd, Stride: 0x1}, + unicode.Range16{Lo: 0x1df, Hi: 0x1df, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1, Hi: 0x1e1, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3, Hi: 0x1e3, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5, Hi: 0x1e5, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7, Hi: 0x1e7, Stride: 0x1}, + unicode.Range16{Lo: 0x1e9, Hi: 0x1e9, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb, Hi: 0x1eb, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed, Hi: 0x1ed, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef, Hi: 0x1f0, Stride: 0x1}, + unicode.Range16{Lo: 0x1f3, Hi: 0x1f3, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5, Hi: 0x1f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1f9, Hi: 0x1f9, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb, Hi: 0x1fb, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd, Hi: 0x1fd, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff, Hi: 0x1ff, Stride: 0x1}, + unicode.Range16{Lo: 0x201, Hi: 0x201, Stride: 0x1}, + unicode.Range16{Lo: 0x203, Hi: 0x203, Stride: 0x1}, + unicode.Range16{Lo: 0x205, Hi: 0x205, Stride: 0x1}, + unicode.Range16{Lo: 0x207, Hi: 0x207, Stride: 0x1}, + unicode.Range16{Lo: 0x209, Hi: 0x209, Stride: 0x1}, + unicode.Range16{Lo: 0x20b, Hi: 0x20b, Stride: 0x1}, + unicode.Range16{Lo: 0x20d, Hi: 0x20d, Stride: 0x1}, + unicode.Range16{Lo: 0x20f, Hi: 0x20f, Stride: 0x1}, + unicode.Range16{Lo: 0x211, Hi: 0x211, Stride: 0x1}, + unicode.Range16{Lo: 0x213, Hi: 0x213, Stride: 0x1}, + unicode.Range16{Lo: 0x215, Hi: 0x215, Stride: 0x1}, + unicode.Range16{Lo: 0x217, Hi: 0x217, Stride: 0x1}, + unicode.Range16{Lo: 0x219, Hi: 0x219, Stride: 0x1}, + unicode.Range16{Lo: 0x21b, Hi: 0x21b, Stride: 0x1}, + unicode.Range16{Lo: 0x21d, Hi: 0x21d, Stride: 0x1}, + unicode.Range16{Lo: 0x21f, Hi: 0x21f, Stride: 0x1}, + unicode.Range16{Lo: 0x221, Hi: 0x221, Stride: 0x1}, + unicode.Range16{Lo: 0x223, Hi: 0x223, Stride: 0x1}, + unicode.Range16{Lo: 0x225, Hi: 0x225, Stride: 0x1}, + unicode.Range16{Lo: 0x227, Hi: 0x227, Stride: 0x1}, + unicode.Range16{Lo: 0x229, Hi: 0x229, Stride: 0x1}, + unicode.Range16{Lo: 0x22b, Hi: 0x22b, Stride: 0x1}, + unicode.Range16{Lo: 0x22d, Hi: 0x22d, Stride: 0x1}, + unicode.Range16{Lo: 0x22f, Hi: 0x22f, Stride: 0x1}, + unicode.Range16{Lo: 0x231, Hi: 0x231, Stride: 0x1}, + unicode.Range16{Lo: 0x233, Hi: 0x239, Stride: 0x1}, + unicode.Range16{Lo: 0x23c, Hi: 0x23c, Stride: 0x1}, + unicode.Range16{Lo: 0x23f, Hi: 0x240, Stride: 0x1}, + unicode.Range16{Lo: 0x242, Hi: 0x242, Stride: 0x1}, + unicode.Range16{Lo: 0x247, Hi: 0x247, Stride: 0x1}, + unicode.Range16{Lo: 0x249, Hi: 0x249, Stride: 0x1}, + unicode.Range16{Lo: 0x24b, Hi: 0x24b, Stride: 0x1}, + unicode.Range16{Lo: 0x24d, Hi: 0x24d, Stride: 0x1}, + unicode.Range16{Lo: 0x24f, Hi: 0x293, Stride: 0x1}, + unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1}, + unicode.Range16{Lo: 0x2b0, Hi: 0x2b8, Stride: 0x1}, + unicode.Range16{Lo: 0x2c0, Hi: 0x2c1, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1}, + unicode.Range16{Lo: 0x371, Hi: 0x371, Stride: 0x1}, + unicode.Range16{Lo: 0x373, Hi: 0x373, Stride: 0x1}, + unicode.Range16{Lo: 0x377, Hi: 0x377, Stride: 0x1}, + unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1}, + unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1}, + unicode.Range16{Lo: 0x390, Hi: 0x390, Stride: 0x1}, + unicode.Range16{Lo: 0x3ac, Hi: 0x3ce, Stride: 0x1}, + unicode.Range16{Lo: 0x3d0, Hi: 0x3d1, Stride: 0x1}, + unicode.Range16{Lo: 0x3d5, Hi: 0x3d7, Stride: 0x1}, + unicode.Range16{Lo: 0x3d9, Hi: 0x3d9, Stride: 0x1}, + unicode.Range16{Lo: 0x3db, Hi: 0x3db, Stride: 0x1}, + unicode.Range16{Lo: 0x3dd, Hi: 0x3dd, Stride: 0x1}, + unicode.Range16{Lo: 0x3df, Hi: 0x3df, Stride: 0x1}, + unicode.Range16{Lo: 0x3e1, Hi: 0x3e1, Stride: 0x1}, + unicode.Range16{Lo: 0x3e3, Hi: 0x3e3, Stride: 0x1}, + unicode.Range16{Lo: 0x3e5, Hi: 0x3e5, Stride: 0x1}, + unicode.Range16{Lo: 0x3e7, Hi: 0x3e7, Stride: 0x1}, + unicode.Range16{Lo: 0x3e9, Hi: 0x3e9, Stride: 0x1}, + unicode.Range16{Lo: 0x3eb, Hi: 0x3eb, Stride: 0x1}, + unicode.Range16{Lo: 0x3ed, Hi: 0x3ed, Stride: 0x1}, + unicode.Range16{Lo: 0x3ef, Hi: 0x3f3, Stride: 0x1}, + unicode.Range16{Lo: 0x3f5, Hi: 0x3f5, Stride: 0x1}, + unicode.Range16{Lo: 0x3f8, Hi: 0x3f8, Stride: 0x1}, + unicode.Range16{Lo: 0x3fb, Hi: 0x3fc, Stride: 0x1}, + unicode.Range16{Lo: 0x430, Hi: 0x45f, Stride: 0x1}, + unicode.Range16{Lo: 0x461, Hi: 0x461, Stride: 0x1}, + unicode.Range16{Lo: 0x463, Hi: 0x463, Stride: 0x1}, + unicode.Range16{Lo: 0x465, Hi: 0x465, Stride: 0x1}, + unicode.Range16{Lo: 0x467, Hi: 0x467, Stride: 0x1}, + unicode.Range16{Lo: 0x469, Hi: 0x469, Stride: 0x1}, + unicode.Range16{Lo: 0x46b, Hi: 0x46b, Stride: 0x1}, + unicode.Range16{Lo: 0x46d, Hi: 0x46d, Stride: 0x1}, + unicode.Range16{Lo: 0x46f, Hi: 0x46f, Stride: 0x1}, + unicode.Range16{Lo: 0x471, Hi: 0x471, Stride: 0x1}, + unicode.Range16{Lo: 0x473, Hi: 0x473, Stride: 0x1}, + unicode.Range16{Lo: 0x475, Hi: 0x475, Stride: 0x1}, + unicode.Range16{Lo: 0x477, Hi: 0x477, Stride: 0x1}, + unicode.Range16{Lo: 0x479, Hi: 0x479, Stride: 0x1}, + unicode.Range16{Lo: 0x47b, Hi: 0x47b, Stride: 0x1}, + unicode.Range16{Lo: 0x47d, Hi: 0x47d, Stride: 0x1}, + unicode.Range16{Lo: 0x47f, Hi: 0x47f, Stride: 0x1}, + unicode.Range16{Lo: 0x481, Hi: 0x481, Stride: 0x1}, + unicode.Range16{Lo: 0x48b, Hi: 0x48b, Stride: 0x1}, + unicode.Range16{Lo: 0x48d, Hi: 0x48d, Stride: 0x1}, + unicode.Range16{Lo: 0x48f, Hi: 0x48f, Stride: 0x1}, + unicode.Range16{Lo: 0x491, Hi: 0x491, Stride: 0x1}, + unicode.Range16{Lo: 0x493, Hi: 0x493, Stride: 0x1}, + unicode.Range16{Lo: 0x495, Hi: 0x495, Stride: 0x1}, + unicode.Range16{Lo: 0x497, Hi: 0x497, Stride: 0x1}, + unicode.Range16{Lo: 0x499, Hi: 0x499, Stride: 0x1}, + unicode.Range16{Lo: 0x49b, Hi: 0x49b, Stride: 0x1}, + unicode.Range16{Lo: 0x49d, Hi: 0x49d, Stride: 0x1}, + unicode.Range16{Lo: 0x49f, Hi: 0x49f, Stride: 0x1}, + unicode.Range16{Lo: 0x4a1, Hi: 0x4a1, Stride: 0x1}, + unicode.Range16{Lo: 0x4a3, Hi: 0x4a3, Stride: 0x1}, + unicode.Range16{Lo: 0x4a5, Hi: 0x4a5, Stride: 0x1}, + unicode.Range16{Lo: 0x4a7, Hi: 0x4a7, Stride: 0x1}, + unicode.Range16{Lo: 0x4a9, Hi: 0x4a9, Stride: 0x1}, + unicode.Range16{Lo: 0x4ab, Hi: 0x4ab, Stride: 0x1}, + unicode.Range16{Lo: 0x4ad, Hi: 0x4ad, Stride: 0x1}, + unicode.Range16{Lo: 0x4af, Hi: 0x4af, Stride: 0x1}, + unicode.Range16{Lo: 0x4b1, Hi: 0x4b1, Stride: 0x1}, + unicode.Range16{Lo: 0x4b3, Hi: 0x4b3, Stride: 0x1}, + unicode.Range16{Lo: 0x4b5, Hi: 0x4b5, Stride: 0x1}, + unicode.Range16{Lo: 0x4b7, Hi: 0x4b7, Stride: 0x1}, + unicode.Range16{Lo: 0x4b9, Hi: 0x4b9, Stride: 0x1}, + unicode.Range16{Lo: 0x4bb, Hi: 0x4bb, Stride: 0x1}, + unicode.Range16{Lo: 0x4bd, Hi: 0x4bd, Stride: 0x1}, + unicode.Range16{Lo: 0x4bf, Hi: 0x4bf, Stride: 0x1}, + unicode.Range16{Lo: 0x4c2, Hi: 0x4c2, Stride: 0x1}, + unicode.Range16{Lo: 0x4c4, Hi: 0x4c4, Stride: 0x1}, + unicode.Range16{Lo: 0x4c6, Hi: 0x4c6, Stride: 0x1}, + unicode.Range16{Lo: 0x4c8, Hi: 0x4c8, Stride: 0x1}, + unicode.Range16{Lo: 0x4ca, Hi: 0x4ca, Stride: 0x1}, + unicode.Range16{Lo: 0x4cc, Hi: 0x4cc, Stride: 0x1}, + unicode.Range16{Lo: 0x4ce, Hi: 0x4cf, Stride: 0x1}, + unicode.Range16{Lo: 0x4d1, Hi: 0x4d1, Stride: 0x1}, + unicode.Range16{Lo: 0x4d3, Hi: 0x4d3, Stride: 0x1}, + unicode.Range16{Lo: 0x4d5, Hi: 0x4d5, Stride: 0x1}, + unicode.Range16{Lo: 0x4d7, Hi: 0x4d7, Stride: 0x1}, + unicode.Range16{Lo: 0x4d9, Hi: 0x4d9, Stride: 0x1}, + unicode.Range16{Lo: 0x4db, Hi: 0x4db, Stride: 0x1}, + unicode.Range16{Lo: 0x4dd, Hi: 0x4dd, Stride: 0x1}, + unicode.Range16{Lo: 0x4df, Hi: 0x4df, Stride: 0x1}, + unicode.Range16{Lo: 0x4e1, Hi: 0x4e1, Stride: 0x1}, + unicode.Range16{Lo: 0x4e3, Hi: 0x4e3, Stride: 0x1}, + unicode.Range16{Lo: 0x4e5, Hi: 0x4e5, Stride: 0x1}, + unicode.Range16{Lo: 0x4e7, Hi: 0x4e7, Stride: 0x1}, + unicode.Range16{Lo: 0x4e9, Hi: 0x4e9, Stride: 0x1}, + unicode.Range16{Lo: 0x4eb, Hi: 0x4eb, Stride: 0x1}, + unicode.Range16{Lo: 0x4ed, Hi: 0x4ed, Stride: 0x1}, + unicode.Range16{Lo: 0x4ef, Hi: 0x4ef, Stride: 0x1}, + unicode.Range16{Lo: 0x4f1, Hi: 0x4f1, Stride: 0x1}, + unicode.Range16{Lo: 0x4f3, Hi: 0x4f3, Stride: 0x1}, + unicode.Range16{Lo: 0x4f5, Hi: 0x4f5, Stride: 0x1}, + unicode.Range16{Lo: 0x4f7, Hi: 0x4f7, Stride: 0x1}, + unicode.Range16{Lo: 0x4f9, Hi: 0x4f9, Stride: 0x1}, + unicode.Range16{Lo: 0x4fb, Hi: 0x4fb, Stride: 0x1}, + unicode.Range16{Lo: 0x4fd, Hi: 0x4fd, Stride: 0x1}, + unicode.Range16{Lo: 0x4ff, Hi: 0x4ff, Stride: 0x1}, + unicode.Range16{Lo: 0x501, Hi: 0x501, Stride: 0x1}, + unicode.Range16{Lo: 0x503, Hi: 0x503, Stride: 0x1}, + unicode.Range16{Lo: 0x505, Hi: 0x505, Stride: 0x1}, + unicode.Range16{Lo: 0x507, Hi: 0x507, Stride: 0x1}, + unicode.Range16{Lo: 0x509, Hi: 0x509, Stride: 0x1}, + unicode.Range16{Lo: 0x50b, Hi: 0x50b, Stride: 0x1}, + unicode.Range16{Lo: 0x50d, Hi: 0x50d, Stride: 0x1}, + unicode.Range16{Lo: 0x50f, Hi: 0x50f, Stride: 0x1}, + unicode.Range16{Lo: 0x511, Hi: 0x511, Stride: 0x1}, + unicode.Range16{Lo: 0x513, Hi: 0x513, Stride: 0x1}, + unicode.Range16{Lo: 0x515, Hi: 0x515, Stride: 0x1}, + unicode.Range16{Lo: 0x517, Hi: 0x517, Stride: 0x1}, + unicode.Range16{Lo: 0x519, Hi: 0x519, Stride: 0x1}, + unicode.Range16{Lo: 0x51b, Hi: 0x51b, Stride: 0x1}, + unicode.Range16{Lo: 0x51d, Hi: 0x51d, Stride: 0x1}, + unicode.Range16{Lo: 0x51f, Hi: 0x51f, Stride: 0x1}, + unicode.Range16{Lo: 0x521, Hi: 0x521, Stride: 0x1}, + unicode.Range16{Lo: 0x523, Hi: 0x523, Stride: 0x1}, + unicode.Range16{Lo: 0x525, Hi: 0x525, Stride: 0x1}, + unicode.Range16{Lo: 0x527, Hi: 0x527, Stride: 0x1}, + unicode.Range16{Lo: 0x529, Hi: 0x529, Stride: 0x1}, + unicode.Range16{Lo: 0x52b, Hi: 0x52b, Stride: 0x1}, + unicode.Range16{Lo: 0x52d, Hi: 0x52d, Stride: 0x1}, + unicode.Range16{Lo: 0x52f, Hi: 0x52f, Stride: 0x1}, + unicode.Range16{Lo: 0x561, Hi: 0x587, Stride: 0x1}, + unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1}, + unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1}, + unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1}, + unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1}, + unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1}, + unicode.Range16{Lo: 0x1e01, Hi: 0x1e01, Stride: 0x1}, + unicode.Range16{Lo: 0x1e03, Hi: 0x1e03, Stride: 0x1}, + unicode.Range16{Lo: 0x1e05, Hi: 0x1e05, Stride: 0x1}, + unicode.Range16{Lo: 0x1e07, Hi: 0x1e07, Stride: 0x1}, + unicode.Range16{Lo: 0x1e09, Hi: 0x1e09, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0b, Hi: 0x1e0b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0d, Hi: 0x1e0d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0f, Hi: 0x1e0f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e11, Hi: 0x1e11, Stride: 0x1}, + unicode.Range16{Lo: 0x1e13, Hi: 0x1e13, Stride: 0x1}, + unicode.Range16{Lo: 0x1e15, Hi: 0x1e15, Stride: 0x1}, + unicode.Range16{Lo: 0x1e17, Hi: 0x1e17, Stride: 0x1}, + unicode.Range16{Lo: 0x1e19, Hi: 0x1e19, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1b, Hi: 0x1e1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1d, Hi: 0x1e1d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1f, Hi: 0x1e1f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e21, Hi: 0x1e21, Stride: 0x1}, + unicode.Range16{Lo: 0x1e23, Hi: 0x1e23, Stride: 0x1}, + unicode.Range16{Lo: 0x1e25, Hi: 0x1e25, Stride: 0x1}, + unicode.Range16{Lo: 0x1e27, Hi: 0x1e27, Stride: 0x1}, + unicode.Range16{Lo: 0x1e29, Hi: 0x1e29, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2b, Hi: 0x1e2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2d, Hi: 0x1e2d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2f, Hi: 0x1e2f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e31, Hi: 0x1e31, Stride: 0x1}, + unicode.Range16{Lo: 0x1e33, Hi: 0x1e33, Stride: 0x1}, + unicode.Range16{Lo: 0x1e35, Hi: 0x1e35, Stride: 0x1}, + unicode.Range16{Lo: 0x1e37, Hi: 0x1e37, Stride: 0x1}, + unicode.Range16{Lo: 0x1e39, Hi: 0x1e39, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3b, Hi: 0x1e3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3d, Hi: 0x1e3d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3f, Hi: 0x1e3f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e41, Hi: 0x1e41, Stride: 0x1}, + unicode.Range16{Lo: 0x1e43, Hi: 0x1e43, Stride: 0x1}, + unicode.Range16{Lo: 0x1e45, Hi: 0x1e45, Stride: 0x1}, + unicode.Range16{Lo: 0x1e47, Hi: 0x1e47, Stride: 0x1}, + unicode.Range16{Lo: 0x1e49, Hi: 0x1e49, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4b, Hi: 0x1e4b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4d, Hi: 0x1e4d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4f, Hi: 0x1e4f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e51, Hi: 0x1e51, Stride: 0x1}, + unicode.Range16{Lo: 0x1e53, Hi: 0x1e53, Stride: 0x1}, + unicode.Range16{Lo: 0x1e55, Hi: 0x1e55, Stride: 0x1}, + unicode.Range16{Lo: 0x1e57, Hi: 0x1e57, Stride: 0x1}, + unicode.Range16{Lo: 0x1e59, Hi: 0x1e59, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5b, Hi: 0x1e5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5d, Hi: 0x1e5d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5f, Hi: 0x1e5f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e61, Hi: 0x1e61, Stride: 0x1}, + unicode.Range16{Lo: 0x1e63, Hi: 0x1e63, Stride: 0x1}, + unicode.Range16{Lo: 0x1e65, Hi: 0x1e65, Stride: 0x1}, + unicode.Range16{Lo: 0x1e67, Hi: 0x1e67, Stride: 0x1}, + unicode.Range16{Lo: 0x1e69, Hi: 0x1e69, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6b, Hi: 0x1e6b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6d, Hi: 0x1e6d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6f, Hi: 0x1e6f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e71, Hi: 0x1e71, Stride: 0x1}, + unicode.Range16{Lo: 0x1e73, Hi: 0x1e73, Stride: 0x1}, + unicode.Range16{Lo: 0x1e75, Hi: 0x1e75, Stride: 0x1}, + unicode.Range16{Lo: 0x1e77, Hi: 0x1e77, Stride: 0x1}, + unicode.Range16{Lo: 0x1e79, Hi: 0x1e79, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7b, Hi: 0x1e7b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7d, Hi: 0x1e7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7f, Hi: 0x1e7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e81, Hi: 0x1e81, Stride: 0x1}, + unicode.Range16{Lo: 0x1e83, Hi: 0x1e83, Stride: 0x1}, + unicode.Range16{Lo: 0x1e85, Hi: 0x1e85, Stride: 0x1}, + unicode.Range16{Lo: 0x1e87, Hi: 0x1e87, Stride: 0x1}, + unicode.Range16{Lo: 0x1e89, Hi: 0x1e89, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8b, Hi: 0x1e8b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8d, Hi: 0x1e8d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8f, Hi: 0x1e8f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e91, Hi: 0x1e91, Stride: 0x1}, + unicode.Range16{Lo: 0x1e93, Hi: 0x1e93, Stride: 0x1}, + unicode.Range16{Lo: 0x1e95, Hi: 0x1e9d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e9f, Hi: 0x1e9f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea1, Hi: 0x1ea1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea3, Hi: 0x1ea3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea5, Hi: 0x1ea5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea7, Hi: 0x1ea7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea9, Hi: 0x1ea9, Stride: 0x1}, + unicode.Range16{Lo: 0x1eab, Hi: 0x1eab, Stride: 0x1}, + unicode.Range16{Lo: 0x1ead, Hi: 0x1ead, Stride: 0x1}, + unicode.Range16{Lo: 0x1eaf, Hi: 0x1eaf, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb1, Hi: 0x1eb1, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb3, Hi: 0x1eb3, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb5, Hi: 0x1eb5, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb7, Hi: 0x1eb7, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb9, Hi: 0x1eb9, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebb, Hi: 0x1ebb, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebd, Hi: 0x1ebd, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebf, Hi: 0x1ebf, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec1, Hi: 0x1ec1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec3, Hi: 0x1ec3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec5, Hi: 0x1ec5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec7, Hi: 0x1ec7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec9, Hi: 0x1ec9, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecb, Hi: 0x1ecb, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecd, Hi: 0x1ecd, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecf, Hi: 0x1ecf, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed1, Hi: 0x1ed1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed3, Hi: 0x1ed3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed5, Hi: 0x1ed5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed7, Hi: 0x1ed7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed9, Hi: 0x1ed9, Stride: 0x1}, + unicode.Range16{Lo: 0x1edb, Hi: 0x1edb, Stride: 0x1}, + unicode.Range16{Lo: 0x1edd, Hi: 0x1edd, Stride: 0x1}, + unicode.Range16{Lo: 0x1edf, Hi: 0x1edf, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee1, Hi: 0x1ee1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee3, Hi: 0x1ee3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee5, Hi: 0x1ee5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee7, Hi: 0x1ee7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee9, Hi: 0x1ee9, Stride: 0x1}, + unicode.Range16{Lo: 0x1eeb, Hi: 0x1eeb, Stride: 0x1}, + unicode.Range16{Lo: 0x1eed, Hi: 0x1eed, Stride: 0x1}, + unicode.Range16{Lo: 0x1eef, Hi: 0x1eef, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef1, Hi: 0x1ef1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef3, Hi: 0x1ef3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef5, Hi: 0x1ef5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef7, Hi: 0x1ef7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef9, Hi: 0x1ef9, Stride: 0x1}, + unicode.Range16{Lo: 0x1efb, Hi: 0x1efb, Stride: 0x1}, + unicode.Range16{Lo: 0x1efd, Hi: 0x1efd, Stride: 0x1}, + unicode.Range16{Lo: 0x1eff, Hi: 0x1f07, Stride: 0x1}, + unicode.Range16{Lo: 0x1f10, Hi: 0x1f15, Stride: 0x1}, + unicode.Range16{Lo: 0x1f20, Hi: 0x1f27, Stride: 0x1}, + unicode.Range16{Lo: 0x1f30, Hi: 0x1f37, Stride: 0x1}, + unicode.Range16{Lo: 0x1f40, Hi: 0x1f45, Stride: 0x1}, + unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1}, + unicode.Range16{Lo: 0x1f60, Hi: 0x1f67, Stride: 0x1}, + unicode.Range16{Lo: 0x1f70, Hi: 0x1f7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f80, Hi: 0x1f87, Stride: 0x1}, + unicode.Range16{Lo: 0x1f90, Hi: 0x1f97, Stride: 0x1}, + unicode.Range16{Lo: 0x1fa0, Hi: 0x1fa7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb0, Hi: 0x1fb4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb6, Hi: 0x1fb7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc6, Hi: 0x1fc7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd6, Hi: 0x1fd7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe0, Hi: 0x1fe7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff6, Hi: 0x1ff7, Stride: 0x1}, + unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1}, + unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1}, + unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1}, + unicode.Range16{Lo: 0x210a, Hi: 0x210a, Stride: 0x1}, + unicode.Range16{Lo: 0x210e, Hi: 0x210f, Stride: 0x1}, + unicode.Range16{Lo: 0x2113, Hi: 0x2113, Stride: 0x1}, + unicode.Range16{Lo: 0x212f, Hi: 0x212f, Stride: 0x1}, + unicode.Range16{Lo: 0x2134, Hi: 0x2134, Stride: 0x1}, + unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1}, + unicode.Range16{Lo: 0x213c, Hi: 0x213d, Stride: 0x1}, + unicode.Range16{Lo: 0x2146, Hi: 0x2149, Stride: 0x1}, + unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1}, + unicode.Range16{Lo: 0x2170, Hi: 0x217f, Stride: 0x1}, + unicode.Range16{Lo: 0x2184, Hi: 0x2184, Stride: 0x1}, + unicode.Range16{Lo: 0x24d0, Hi: 0x24e9, Stride: 0x1}, + unicode.Range16{Lo: 0x2c30, Hi: 0x2c5e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c61, Hi: 0x2c61, Stride: 0x1}, + unicode.Range16{Lo: 0x2c65, Hi: 0x2c66, Stride: 0x1}, + unicode.Range16{Lo: 0x2c68, Hi: 0x2c68, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6a, Hi: 0x2c6a, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6c, Hi: 0x2c6c, Stride: 0x1}, + unicode.Range16{Lo: 0x2c71, Hi: 0x2c71, Stride: 0x1}, + unicode.Range16{Lo: 0x2c73, Hi: 0x2c74, Stride: 0x1}, + unicode.Range16{Lo: 0x2c76, Hi: 0x2c7b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c81, Hi: 0x2c81, Stride: 0x1}, + unicode.Range16{Lo: 0x2c83, Hi: 0x2c83, Stride: 0x1}, + unicode.Range16{Lo: 0x2c85, Hi: 0x2c85, Stride: 0x1}, + unicode.Range16{Lo: 0x2c87, Hi: 0x2c87, Stride: 0x1}, + unicode.Range16{Lo: 0x2c89, Hi: 0x2c89, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8b, Hi: 0x2c8b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8d, Hi: 0x2c8d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8f, Hi: 0x2c8f, Stride: 0x1}, + unicode.Range16{Lo: 0x2c91, Hi: 0x2c91, Stride: 0x1}, + unicode.Range16{Lo: 0x2c93, Hi: 0x2c93, Stride: 0x1}, + unicode.Range16{Lo: 0x2c95, Hi: 0x2c95, Stride: 0x1}, + unicode.Range16{Lo: 0x2c97, Hi: 0x2c97, Stride: 0x1}, + unicode.Range16{Lo: 0x2c99, Hi: 0x2c99, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9b, Hi: 0x2c9b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9d, Hi: 0x2c9d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9f, Hi: 0x2c9f, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca1, Hi: 0x2ca1, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca3, Hi: 0x2ca3, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca5, Hi: 0x2ca5, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca7, Hi: 0x2ca7, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca9, Hi: 0x2ca9, Stride: 0x1}, + unicode.Range16{Lo: 0x2cab, Hi: 0x2cab, Stride: 0x1}, + unicode.Range16{Lo: 0x2cad, Hi: 0x2cad, Stride: 0x1}, + unicode.Range16{Lo: 0x2caf, Hi: 0x2caf, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb1, Hi: 0x2cb1, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb3, Hi: 0x2cb3, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb5, Hi: 0x2cb5, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb7, Hi: 0x2cb7, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb9, Hi: 0x2cb9, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbb, Hi: 0x2cbb, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbd, Hi: 0x2cbd, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbf, Hi: 0x2cbf, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc1, Hi: 0x2cc1, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc3, Hi: 0x2cc3, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc5, Hi: 0x2cc5, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc7, Hi: 0x2cc7, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc9, Hi: 0x2cc9, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccb, Hi: 0x2ccb, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccd, Hi: 0x2ccd, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccf, Hi: 0x2ccf, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd1, Hi: 0x2cd1, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd3, Hi: 0x2cd3, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd5, Hi: 0x2cd5, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd7, Hi: 0x2cd7, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd9, Hi: 0x2cd9, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdb, Hi: 0x2cdb, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdd, Hi: 0x2cdd, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdf, Hi: 0x2cdf, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce1, Hi: 0x2ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce3, Hi: 0x2ce4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cec, Hi: 0x2cec, Stride: 0x1}, + unicode.Range16{Lo: 0x2cee, Hi: 0x2cee, Stride: 0x1}, + unicode.Range16{Lo: 0x2cf3, Hi: 0x2cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1}, + unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1}, + unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1}, + unicode.Range16{Lo: 0xa641, Hi: 0xa641, Stride: 0x1}, + unicode.Range16{Lo: 0xa643, Hi: 0xa643, Stride: 0x1}, + unicode.Range16{Lo: 0xa645, Hi: 0xa645, Stride: 0x1}, + unicode.Range16{Lo: 0xa647, Hi: 0xa647, Stride: 0x1}, + unicode.Range16{Lo: 0xa649, Hi: 0xa649, Stride: 0x1}, + unicode.Range16{Lo: 0xa64b, Hi: 0xa64b, Stride: 0x1}, + unicode.Range16{Lo: 0xa64d, Hi: 0xa64d, Stride: 0x1}, + unicode.Range16{Lo: 0xa64f, Hi: 0xa64f, Stride: 0x1}, + unicode.Range16{Lo: 0xa651, Hi: 0xa651, Stride: 0x1}, + unicode.Range16{Lo: 0xa653, Hi: 0xa653, Stride: 0x1}, + unicode.Range16{Lo: 0xa655, Hi: 0xa655, Stride: 0x1}, + unicode.Range16{Lo: 0xa657, Hi: 0xa657, Stride: 0x1}, + unicode.Range16{Lo: 0xa659, Hi: 0xa659, Stride: 0x1}, + unicode.Range16{Lo: 0xa65b, Hi: 0xa65b, Stride: 0x1}, + unicode.Range16{Lo: 0xa65d, Hi: 0xa65d, Stride: 0x1}, + unicode.Range16{Lo: 0xa65f, Hi: 0xa65f, Stride: 0x1}, + unicode.Range16{Lo: 0xa661, Hi: 0xa661, Stride: 0x1}, + unicode.Range16{Lo: 0xa663, Hi: 0xa663, Stride: 0x1}, + unicode.Range16{Lo: 0xa665, Hi: 0xa665, Stride: 0x1}, + unicode.Range16{Lo: 0xa667, Hi: 0xa667, Stride: 0x1}, + unicode.Range16{Lo: 0xa669, Hi: 0xa669, Stride: 0x1}, + unicode.Range16{Lo: 0xa66b, Hi: 0xa66b, Stride: 0x1}, + unicode.Range16{Lo: 0xa66d, Hi: 0xa66d, Stride: 0x1}, + unicode.Range16{Lo: 0xa681, Hi: 0xa681, Stride: 0x1}, + unicode.Range16{Lo: 0xa683, Hi: 0xa683, Stride: 0x1}, + unicode.Range16{Lo: 0xa685, Hi: 0xa685, Stride: 0x1}, + unicode.Range16{Lo: 0xa687, Hi: 0xa687, Stride: 0x1}, + unicode.Range16{Lo: 0xa689, Hi: 0xa689, Stride: 0x1}, + unicode.Range16{Lo: 0xa68b, Hi: 0xa68b, Stride: 0x1}, + unicode.Range16{Lo: 0xa68d, Hi: 0xa68d, Stride: 0x1}, + unicode.Range16{Lo: 0xa68f, Hi: 0xa68f, Stride: 0x1}, + unicode.Range16{Lo: 0xa691, Hi: 0xa691, Stride: 0x1}, + unicode.Range16{Lo: 0xa693, Hi: 0xa693, Stride: 0x1}, + unicode.Range16{Lo: 0xa695, Hi: 0xa695, Stride: 0x1}, + unicode.Range16{Lo: 0xa697, Hi: 0xa697, Stride: 0x1}, + unicode.Range16{Lo: 0xa699, Hi: 0xa699, Stride: 0x1}, + unicode.Range16{Lo: 0xa69b, Hi: 0xa69b, Stride: 0x1}, + unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1}, + unicode.Range16{Lo: 0xa723, Hi: 0xa723, Stride: 0x1}, + unicode.Range16{Lo: 0xa725, Hi: 0xa725, Stride: 0x1}, + unicode.Range16{Lo: 0xa727, Hi: 0xa727, Stride: 0x1}, + unicode.Range16{Lo: 0xa729, Hi: 0xa729, Stride: 0x1}, + unicode.Range16{Lo: 0xa72b, Hi: 0xa72b, Stride: 0x1}, + unicode.Range16{Lo: 0xa72d, Hi: 0xa72d, Stride: 0x1}, + unicode.Range16{Lo: 0xa72f, Hi: 0xa731, Stride: 0x1}, + unicode.Range16{Lo: 0xa733, Hi: 0xa733, Stride: 0x1}, + unicode.Range16{Lo: 0xa735, Hi: 0xa735, Stride: 0x1}, + unicode.Range16{Lo: 0xa737, Hi: 0xa737, Stride: 0x1}, + unicode.Range16{Lo: 0xa739, Hi: 0xa739, Stride: 0x1}, + unicode.Range16{Lo: 0xa73b, Hi: 0xa73b, Stride: 0x1}, + unicode.Range16{Lo: 0xa73d, Hi: 0xa73d, Stride: 0x1}, + unicode.Range16{Lo: 0xa73f, Hi: 0xa73f, Stride: 0x1}, + unicode.Range16{Lo: 0xa741, Hi: 0xa741, Stride: 0x1}, + unicode.Range16{Lo: 0xa743, Hi: 0xa743, Stride: 0x1}, + unicode.Range16{Lo: 0xa745, Hi: 0xa745, Stride: 0x1}, + unicode.Range16{Lo: 0xa747, Hi: 0xa747, Stride: 0x1}, + unicode.Range16{Lo: 0xa749, Hi: 0xa749, Stride: 0x1}, + unicode.Range16{Lo: 0xa74b, Hi: 0xa74b, Stride: 0x1}, + unicode.Range16{Lo: 0xa74d, Hi: 0xa74d, Stride: 0x1}, + unicode.Range16{Lo: 0xa74f, Hi: 0xa74f, Stride: 0x1}, + unicode.Range16{Lo: 0xa751, Hi: 0xa751, Stride: 0x1}, + unicode.Range16{Lo: 0xa753, Hi: 0xa753, Stride: 0x1}, + unicode.Range16{Lo: 0xa755, Hi: 0xa755, Stride: 0x1}, + unicode.Range16{Lo: 0xa757, Hi: 0xa757, Stride: 0x1}, + unicode.Range16{Lo: 0xa759, Hi: 0xa759, Stride: 0x1}, + unicode.Range16{Lo: 0xa75b, Hi: 0xa75b, Stride: 0x1}, + unicode.Range16{Lo: 0xa75d, Hi: 0xa75d, Stride: 0x1}, + unicode.Range16{Lo: 0xa75f, Hi: 0xa75f, Stride: 0x1}, + unicode.Range16{Lo: 0xa761, Hi: 0xa761, Stride: 0x1}, + unicode.Range16{Lo: 0xa763, Hi: 0xa763, Stride: 0x1}, + unicode.Range16{Lo: 0xa765, Hi: 0xa765, Stride: 0x1}, + unicode.Range16{Lo: 0xa767, Hi: 0xa767, Stride: 0x1}, + unicode.Range16{Lo: 0xa769, Hi: 0xa769, Stride: 0x1}, + unicode.Range16{Lo: 0xa76b, Hi: 0xa76b, Stride: 0x1}, + unicode.Range16{Lo: 0xa76d, Hi: 0xa76d, Stride: 0x1}, + unicode.Range16{Lo: 0xa76f, Hi: 0xa76f, Stride: 0x1}, + unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1}, + unicode.Range16{Lo: 0xa771, Hi: 0xa778, Stride: 0x1}, + unicode.Range16{Lo: 0xa77a, Hi: 0xa77a, Stride: 0x1}, + unicode.Range16{Lo: 0xa77c, Hi: 0xa77c, Stride: 0x1}, + unicode.Range16{Lo: 0xa77f, Hi: 0xa77f, Stride: 0x1}, + unicode.Range16{Lo: 0xa781, Hi: 0xa781, Stride: 0x1}, + unicode.Range16{Lo: 0xa783, Hi: 0xa783, Stride: 0x1}, + unicode.Range16{Lo: 0xa785, Hi: 0xa785, Stride: 0x1}, + unicode.Range16{Lo: 0xa787, Hi: 0xa787, Stride: 0x1}, + unicode.Range16{Lo: 0xa78c, Hi: 0xa78c, Stride: 0x1}, + unicode.Range16{Lo: 0xa78e, Hi: 0xa78e, Stride: 0x1}, + unicode.Range16{Lo: 0xa791, Hi: 0xa791, Stride: 0x1}, + unicode.Range16{Lo: 0xa793, Hi: 0xa795, Stride: 0x1}, + unicode.Range16{Lo: 0xa797, Hi: 0xa797, Stride: 0x1}, + unicode.Range16{Lo: 0xa799, Hi: 0xa799, Stride: 0x1}, + unicode.Range16{Lo: 0xa79b, Hi: 0xa79b, Stride: 0x1}, + unicode.Range16{Lo: 0xa79d, Hi: 0xa79d, Stride: 0x1}, + unicode.Range16{Lo: 0xa79f, Hi: 0xa79f, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a1, Hi: 0xa7a1, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a3, Hi: 0xa7a3, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a5, Hi: 0xa7a5, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a7, Hi: 0xa7a7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a9, Hi: 0xa7a9, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b5, Hi: 0xa7b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b7, Hi: 0xa7b7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1}, + unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1}, + unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1}, + unicode.Range16{Lo: 0xab60, Hi: 0xab65, Stride: 0x1}, + unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1}, + unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1}, + unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1}, + unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10428, Hi: 0x1044f, Stride: 0x1}, + unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1}, + unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1}, + unicode.Range32{Lo: 0x118c0, Hi: 0x118df, Stride: 0x1}, + unicode.Range32{Lo: 0x1d41a, Hi: 0x1d433, Stride: 0x1}, + unicode.Range32{Lo: 0x1d44e, Hi: 0x1d454, Stride: 0x1}, + unicode.Range32{Lo: 0x1d456, Hi: 0x1d467, Stride: 0x1}, + unicode.Range32{Lo: 0x1d482, Hi: 0x1d49b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4b6, Hi: 0x1d4b9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d4cf, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4ea, Hi: 0x1d503, Stride: 0x1}, + unicode.Range32{Lo: 0x1d51e, Hi: 0x1d537, Stride: 0x1}, + unicode.Range32{Lo: 0x1d552, Hi: 0x1d56b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d586, Hi: 0x1d59f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5ba, Hi: 0x1d5d3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5ee, Hi: 0x1d607, Stride: 0x1}, + unicode.Range32{Lo: 0x1d622, Hi: 0x1d63b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d656, Hi: 0x1d66f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d68a, Hi: 0x1d6a5, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6e1, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1}, + unicode.Range32{Lo: 0x1d716, Hi: 0x1d71b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d750, Hi: 0x1d755, Stride: 0x1}, + unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1}, + unicode.Range32{Lo: 0x1d78a, Hi: 0x1d78f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7c9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7cb, Hi: 0x1d7cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1e922, Hi: 0x1e943, Stride: 0x1}, + }, + LatinOffset: 6, +} + +var _SentenceNumeric = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1}, + unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1}, + unicode.Range16{Lo: 0x66b, Hi: 0x66c, Stride: 0x1}, + unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1}, + unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1}, + unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1}, + unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1}, + unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1}, + unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1}, + unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1}, + unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1}, + unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1}, + unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1}, + unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1}, + unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1}, + unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1}, + unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1}, + unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1}, + unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1}, + unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1}, + unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1}, + unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1}, + unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1}, + unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1}, + unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1}, + unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1}, + unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1}, + unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1}, + unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1}, + unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1}, + unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1}, + unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1}, + unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1}, + unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1}, + unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1}, + unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1}, + unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1}, + unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1}, + unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1}, + unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1}, + unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1}, + unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1}, + unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1}, + unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1}, + unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1}, + unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1}, + unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceOLetter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1}, + unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1}, + unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1}, + unicode.Range16{Lo: 0x2b9, Hi: 0x2bf, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1}, + unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1}, + unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1}, + unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1}, + unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1}, + unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1}, + unicode.Range16{Lo: 0x5f0, Hi: 0x5f2, Stride: 0x1}, + unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1}, + unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1}, + unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1}, + unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1}, + unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1}, + unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1}, + unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1}, + unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1}, + unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1}, + unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1}, + unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1}, + unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1}, + unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1}, + unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1}, + unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1}, + unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1}, + unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1}, + unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1}, + unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1}, + unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1}, + unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1}, + unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1}, + unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1}, + unicode.Range16{Lo: 0x8a0, Hi: 0x8b4, Stride: 0x1}, + unicode.Range16{Lo: 0x8b6, Hi: 0x8bd, Stride: 0x1}, + unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1}, + unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1}, + unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1}, + unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1}, + unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1}, + unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1}, + unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1}, + unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1}, + unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1}, + unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1}, + unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1}, + unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1}, + unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1}, + unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1}, + unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1}, + unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1}, + unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1}, + unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1}, + unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1}, + unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1}, + unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1}, + unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1}, + unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1}, + unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1}, + unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1}, + unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1}, + unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1}, + unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1}, + unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1}, + unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1}, + unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1}, + unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1}, + unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1}, + unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1}, + unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1}, + unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1}, + unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1}, + unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1}, + unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1}, + unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1}, + unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1}, + unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1}, + unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1}, + unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1}, + unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1}, + unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1}, + unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1}, + unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1}, + unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1}, + unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1}, + unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1}, + unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1}, + unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1}, + unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1}, + unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1}, + unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1}, + unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1}, + unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1}, + unicode.Range16{Lo: 0xcde, Hi: 0xcde, Stride: 0x1}, + unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1}, + unicode.Range16{Lo: 0xd05, Hi: 0xd0c, Stride: 0x1}, + unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1}, + unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1}, + unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1}, + unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1}, + unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1}, + unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1}, + unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1}, + unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1}, + unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1}, + unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1}, + unicode.Range16{Lo: 0xe01, Hi: 0xe30, Stride: 0x1}, + unicode.Range16{Lo: 0xe32, Hi: 0xe33, Stride: 0x1}, + unicode.Range16{Lo: 0xe40, Hi: 0xe45, Stride: 0x1}, + unicode.Range16{Lo: 0xe46, Hi: 0xe46, Stride: 0x1}, + unicode.Range16{Lo: 0xe81, Hi: 0xe82, Stride: 0x1}, + unicode.Range16{Lo: 0xe84, Hi: 0xe84, Stride: 0x1}, + unicode.Range16{Lo: 0xe87, Hi: 0xe88, Stride: 0x1}, + unicode.Range16{Lo: 0xe8a, Hi: 0xe8a, Stride: 0x1}, + unicode.Range16{Lo: 0xe8d, Hi: 0xe8d, Stride: 0x1}, + unicode.Range16{Lo: 0xe94, Hi: 0xe97, Stride: 0x1}, + unicode.Range16{Lo: 0xe99, Hi: 0xe9f, Stride: 0x1}, + unicode.Range16{Lo: 0xea1, Hi: 0xea3, Stride: 0x1}, + unicode.Range16{Lo: 0xea5, Hi: 0xea5, Stride: 0x1}, + unicode.Range16{Lo: 0xea7, Hi: 0xea7, Stride: 0x1}, + unicode.Range16{Lo: 0xeaa, Hi: 0xeab, Stride: 0x1}, + unicode.Range16{Lo: 0xead, Hi: 0xeb0, Stride: 0x1}, + unicode.Range16{Lo: 0xeb2, Hi: 0xeb3, Stride: 0x1}, + unicode.Range16{Lo: 0xebd, Hi: 0xebd, Stride: 0x1}, + unicode.Range16{Lo: 0xec0, Hi: 0xec4, Stride: 0x1}, + unicode.Range16{Lo: 0xec6, Hi: 0xec6, Stride: 0x1}, + unicode.Range16{Lo: 0xedc, Hi: 0xedf, Stride: 0x1}, + unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1}, + unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1}, + unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1}, + unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1}, + unicode.Range16{Lo: 0x1000, Hi: 0x102a, Stride: 0x1}, + unicode.Range16{Lo: 0x103f, Hi: 0x103f, Stride: 0x1}, + unicode.Range16{Lo: 0x1050, Hi: 0x1055, Stride: 0x1}, + unicode.Range16{Lo: 0x105a, Hi: 0x105d, Stride: 0x1}, + unicode.Range16{Lo: 0x1061, Hi: 0x1061, Stride: 0x1}, + unicode.Range16{Lo: 0x1065, Hi: 0x1066, Stride: 0x1}, + unicode.Range16{Lo: 0x106e, Hi: 0x1070, Stride: 0x1}, + unicode.Range16{Lo: 0x1075, Hi: 0x1081, Stride: 0x1}, + unicode.Range16{Lo: 0x108e, Hi: 0x108e, Stride: 0x1}, + unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1}, + unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1}, + unicode.Range16{Lo: 0x10fd, Hi: 0x1248, Stride: 0x1}, + unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1}, + unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1}, + unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1}, + unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1}, + unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1}, + unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1}, + unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1}, + unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1}, + unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1}, + unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1}, + unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1}, + unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1}, + unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1}, + unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1}, + unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1}, + unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1}, + unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1}, + unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1}, + unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1}, + unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1}, + unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1}, + unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1700, Hi: 0x170c, Stride: 0x1}, + unicode.Range16{Lo: 0x170e, Hi: 0x1711, Stride: 0x1}, + unicode.Range16{Lo: 0x1720, Hi: 0x1731, Stride: 0x1}, + unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1}, + unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1}, + unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1}, + unicode.Range16{Lo: 0x1780, Hi: 0x17b3, Stride: 0x1}, + unicode.Range16{Lo: 0x17d7, Hi: 0x17d7, Stride: 0x1}, + unicode.Range16{Lo: 0x17dc, Hi: 0x17dc, Stride: 0x1}, + unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1}, + unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1}, + unicode.Range16{Lo: 0x1844, Hi: 0x1877, Stride: 0x1}, + unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1}, + unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1}, + unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1}, + unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1}, + unicode.Range16{Lo: 0x1950, Hi: 0x196d, Stride: 0x1}, + unicode.Range16{Lo: 0x1970, Hi: 0x1974, Stride: 0x1}, + unicode.Range16{Lo: 0x1980, Hi: 0x19ab, Stride: 0x1}, + unicode.Range16{Lo: 0x19b0, Hi: 0x19c9, Stride: 0x1}, + unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1}, + unicode.Range16{Lo: 0x1a20, Hi: 0x1a54, Stride: 0x1}, + unicode.Range16{Lo: 0x1aa7, Hi: 0x1aa7, Stride: 0x1}, + unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1}, + unicode.Range16{Lo: 0x1b45, Hi: 0x1b4b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1}, + unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1}, + unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1}, + unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1}, + unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1}, + unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1}, + unicode.Range16{Lo: 0x1cee, Hi: 0x1cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1}, + unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1}, + unicode.Range16{Lo: 0x2180, Hi: 0x2182, Stride: 0x1}, + unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1}, + unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1}, + unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1}, + unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1}, + unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1}, + unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1}, + unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1}, + unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1}, + unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1}, + unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1}, + unicode.Range16{Lo: 0x3006, Hi: 0x3006, Stride: 0x1}, + unicode.Range16{Lo: 0x3007, Hi: 0x3007, Stride: 0x1}, + unicode.Range16{Lo: 0x3021, Hi: 0x3029, Stride: 0x1}, + unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1}, + unicode.Range16{Lo: 0x3038, Hi: 0x303a, Stride: 0x1}, + unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1}, + unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1}, + unicode.Range16{Lo: 0x3041, Hi: 0x3096, Stride: 0x1}, + unicode.Range16{Lo: 0x309d, Hi: 0x309e, Stride: 0x1}, + unicode.Range16{Lo: 0x309f, Hi: 0x309f, Stride: 0x1}, + unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1}, + unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1}, + unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1}, + unicode.Range16{Lo: 0x3105, Hi: 0x312d, Stride: 0x1}, + unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1}, + unicode.Range16{Lo: 0x31a0, Hi: 0x31ba, Stride: 0x1}, + unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1}, + unicode.Range16{Lo: 0x3400, Hi: 0x4db5, Stride: 0x1}, + unicode.Range16{Lo: 0x4e00, Hi: 0x9fd5, Stride: 0x1}, + unicode.Range16{Lo: 0xa000, Hi: 0xa014, Stride: 0x1}, + unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1}, + unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1}, + unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1}, + unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1}, + unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1}, + unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1}, + unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1}, + unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1}, + unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1}, + unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1}, + unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1}, + unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1}, + unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1}, + unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1}, + unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1}, + unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1}, + unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1}, + unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1}, + unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1}, + unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e0, Hi: 0xa9e4, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e6, Hi: 0xa9e6, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e7, Hi: 0xa9ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa9fa, Hi: 0xa9fe, Stride: 0x1}, + unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1}, + unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1}, + unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1}, + unicode.Range16{Lo: 0xaa60, Hi: 0xaa6f, Stride: 0x1}, + unicode.Range16{Lo: 0xaa70, Hi: 0xaa70, Stride: 0x1}, + unicode.Range16{Lo: 0xaa71, Hi: 0xaa76, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7a, Hi: 0xaa7a, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7e, Hi: 0xaaaf, Stride: 0x1}, + unicode.Range16{Lo: 0xaab1, Hi: 0xaab1, Stride: 0x1}, + unicode.Range16{Lo: 0xaab5, Hi: 0xaab6, Stride: 0x1}, + unicode.Range16{Lo: 0xaab9, Hi: 0xaabd, Stride: 0x1}, + unicode.Range16{Lo: 0xaac0, Hi: 0xaac0, Stride: 0x1}, + unicode.Range16{Lo: 0xaac2, Hi: 0xaac2, Stride: 0x1}, + unicode.Range16{Lo: 0xaadb, Hi: 0xaadc, Stride: 0x1}, + unicode.Range16{Lo: 0xaadd, Hi: 0xaadd, Stride: 0x1}, + unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1}, + unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1}, + unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1}, + unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1}, + unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1}, + unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1}, + unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1}, + unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1}, + unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1}, + unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1}, + unicode.Range16{Lo: 0xf900, Hi: 0xfa6d, Stride: 0x1}, + unicode.Range16{Lo: 0xfa70, Hi: 0xfad9, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1}, + unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1}, + unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1}, + unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1}, + unicode.Range16{Lo: 0xfb46, Hi: 0xfbb1, Stride: 0x1}, + unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1}, + unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1}, + unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1}, + unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1}, + unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1}, + unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1}, + unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1}, + unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1}, + unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1}, + unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1}, + unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1}, + unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1}, + unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1}, + unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1}, + unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1}, + unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1}, + unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1}, + unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1}, + unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1}, + unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1}, + unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1}, + unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1}, + unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1}, + unicode.Range32{Lo: 0x10330, Hi: 0x10340, Stride: 0x1}, + unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1}, + unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1}, + unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1}, + unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1}, + unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1}, + unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1}, + unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1}, + unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1}, + unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1}, + unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1}, + unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1}, + unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1}, + unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1}, + unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1}, + unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1}, + unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1}, + unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1}, + unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1}, + unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1}, + unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1}, + unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1}, + unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1}, + unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1}, + unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1}, + unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1}, + unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1}, + unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1}, + unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1}, + unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1}, + unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1}, + unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1}, + unicode.Range32{Lo: 0x10a19, Hi: 0x10a33, Stride: 0x1}, + unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1}, + unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1}, + unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1}, + unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1}, + unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1}, + unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1}, + unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1}, + unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1}, + unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1}, + unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1}, + unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1}, + unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1}, + unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1}, + unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1}, + unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1}, + unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1}, + unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1}, + unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1}, + unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1}, + unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1}, + unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1}, + unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1}, + unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1}, + unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1}, + unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1}, + unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1}, + unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1}, + unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1}, + unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1}, + unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1}, + unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1}, + unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1}, + unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1}, + unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1}, + unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1}, + unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1}, + unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1}, + unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1}, + unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1}, + unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1}, + unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1}, + unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1}, + unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1}, + unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1}, + unicode.Range32{Lo: 0x11700, Hi: 0x11719, Stride: 0x1}, + unicode.Range32{Lo: 0x118ff, Hi: 0x118ff, Stride: 0x1}, + unicode.Range32{Lo: 0x11ac0, Hi: 0x11af8, Stride: 0x1}, + unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1}, + unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1}, + unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1}, + unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1}, + unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1}, + unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1}, + unicode.Range32{Lo: 0x13000, Hi: 0x1342e, Stride: 0x1}, + unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1}, + unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1}, + unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1}, + unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1}, + unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1}, + unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1}, + unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1}, + unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1}, + unicode.Range32{Lo: 0x16f00, Hi: 0x16f44, Stride: 0x1}, + unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1}, + unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1}, + unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe0, Stride: 0x1}, + unicode.Range32{Lo: 0x17000, Hi: 0x187ec, Stride: 0x1}, + unicode.Range32{Lo: 0x18800, Hi: 0x18af2, Stride: 0x1}, + unicode.Range32{Lo: 0x1b000, Hi: 0x1b001, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1}, + unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1}, + unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1}, + unicode.Range32{Lo: 0x20000, Hi: 0x2a6d6, Stride: 0x1}, + unicode.Range32{Lo: 0x2a700, Hi: 0x2b734, Stride: 0x1}, + unicode.Range32{Lo: 0x2b740, Hi: 0x2b81d, Stride: 0x1}, + unicode.Range32{Lo: 0x2b820, Hi: 0x2cea1, Stride: 0x1}, + unicode.Range32{Lo: 0x2f800, Hi: 0x2fa1d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _SentenceSContinue = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1}, + unicode.Range16{Lo: 0x2d, Hi: 0x2d, Stride: 0x1}, + unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1}, + unicode.Range16{Lo: 0x55d, Hi: 0x55d, Stride: 0x1}, + unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1}, + unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1802, Hi: 0x1802, Stride: 0x1}, + unicode.Range16{Lo: 0x1808, Hi: 0x1808, Stride: 0x1}, + unicode.Range16{Lo: 0x2013, Hi: 0x2014, Stride: 0x1}, + unicode.Range16{Lo: 0x3001, Hi: 0x3001, Stride: 0x1}, + unicode.Range16{Lo: 0xfe10, Hi: 0xfe11, Stride: 0x1}, + unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1}, + unicode.Range16{Lo: 0xfe31, Hi: 0xfe32, Stride: 0x1}, + unicode.Range16{Lo: 0xfe50, Hi: 0xfe51, Stride: 0x1}, + unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1}, + unicode.Range16{Lo: 0xfe58, Hi: 0xfe58, Stride: 0x1}, + unicode.Range16{Lo: 0xfe63, Hi: 0xfe63, Stride: 0x1}, + unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1}, + unicode.Range16{Lo: 0xff0d, Hi: 0xff0d, Stride: 0x1}, + unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1}, + unicode.Range16{Lo: 0xff64, Hi: 0xff64, Stride: 0x1}, + }, + LatinOffset: 3, +} + +var _SentenceSTerm = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x21, Hi: 0x21, Stride: 0x1}, + unicode.Range16{Lo: 0x3f, Hi: 0x3f, Stride: 0x1}, + unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1}, + unicode.Range16{Lo: 0x61f, Hi: 0x61f, Stride: 0x1}, + unicode.Range16{Lo: 0x6d4, Hi: 0x6d4, Stride: 0x1}, + unicode.Range16{Lo: 0x700, Hi: 0x702, Stride: 0x1}, + unicode.Range16{Lo: 0x7f9, Hi: 0x7f9, Stride: 0x1}, + unicode.Range16{Lo: 0x964, Hi: 0x965, Stride: 0x1}, + unicode.Range16{Lo: 0x104a, Hi: 0x104b, Stride: 0x1}, + unicode.Range16{Lo: 0x1362, Hi: 0x1362, Stride: 0x1}, + unicode.Range16{Lo: 0x1367, Hi: 0x1368, Stride: 0x1}, + unicode.Range16{Lo: 0x166e, Hi: 0x166e, Stride: 0x1}, + unicode.Range16{Lo: 0x1735, Hi: 0x1736, Stride: 0x1}, + unicode.Range16{Lo: 0x1803, Hi: 0x1803, Stride: 0x1}, + unicode.Range16{Lo: 0x1809, Hi: 0x1809, Stride: 0x1}, + unicode.Range16{Lo: 0x1944, Hi: 0x1945, Stride: 0x1}, + unicode.Range16{Lo: 0x1aa8, Hi: 0x1aab, Stride: 0x1}, + unicode.Range16{Lo: 0x1b5a, Hi: 0x1b5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b5e, Hi: 0x1b5f, Stride: 0x1}, + unicode.Range16{Lo: 0x1c3b, Hi: 0x1c3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1c7e, Hi: 0x1c7f, Stride: 0x1}, + unicode.Range16{Lo: 0x203c, Hi: 0x203d, Stride: 0x1}, + unicode.Range16{Lo: 0x2047, Hi: 0x2049, Stride: 0x1}, + unicode.Range16{Lo: 0x2e2e, Hi: 0x2e2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2e3c, Hi: 0x2e3c, Stride: 0x1}, + unicode.Range16{Lo: 0x3002, Hi: 0x3002, Stride: 0x1}, + unicode.Range16{Lo: 0xa4ff, Hi: 0xa4ff, Stride: 0x1}, + unicode.Range16{Lo: 0xa60e, Hi: 0xa60f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f3, Hi: 0xa6f3, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f7, Hi: 0xa6f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa876, Hi: 0xa877, Stride: 0x1}, + unicode.Range16{Lo: 0xa8ce, Hi: 0xa8cf, Stride: 0x1}, + unicode.Range16{Lo: 0xa92f, Hi: 0xa92f, Stride: 0x1}, + unicode.Range16{Lo: 0xa9c8, Hi: 0xa9c9, Stride: 0x1}, + unicode.Range16{Lo: 0xaa5d, Hi: 0xaa5f, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf0, Hi: 0xaaf1, Stride: 0x1}, + unicode.Range16{Lo: 0xabeb, Hi: 0xabeb, Stride: 0x1}, + unicode.Range16{Lo: 0xfe56, Hi: 0xfe57, Stride: 0x1}, + unicode.Range16{Lo: 0xff01, Hi: 0xff01, Stride: 0x1}, + unicode.Range16{Lo: 0xff1f, Hi: 0xff1f, Stride: 0x1}, + unicode.Range16{Lo: 0xff61, Hi: 0xff61, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10a56, Hi: 0x10a57, Stride: 0x1}, + unicode.Range32{Lo: 0x11047, Hi: 0x11048, Stride: 0x1}, + unicode.Range32{Lo: 0x110be, Hi: 0x110c1, Stride: 0x1}, + unicode.Range32{Lo: 0x11141, Hi: 0x11143, Stride: 0x1}, + unicode.Range32{Lo: 0x111c5, Hi: 0x111c6, Stride: 0x1}, + unicode.Range32{Lo: 0x111cd, Hi: 0x111cd, Stride: 0x1}, + unicode.Range32{Lo: 0x111de, Hi: 0x111df, Stride: 0x1}, + unicode.Range32{Lo: 0x11238, Hi: 0x11239, Stride: 0x1}, + unicode.Range32{Lo: 0x1123b, Hi: 0x1123c, Stride: 0x1}, + unicode.Range32{Lo: 0x112a9, Hi: 0x112a9, Stride: 0x1}, + unicode.Range32{Lo: 0x1144b, Hi: 0x1144c, Stride: 0x1}, + unicode.Range32{Lo: 0x115c2, Hi: 0x115c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115c9, Hi: 0x115d7, Stride: 0x1}, + unicode.Range32{Lo: 0x11641, Hi: 0x11642, Stride: 0x1}, + unicode.Range32{Lo: 0x1173c, Hi: 0x1173e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c41, Hi: 0x11c42, Stride: 0x1}, + unicode.Range32{Lo: 0x16a6e, Hi: 0x16a6f, Stride: 0x1}, + unicode.Range32{Lo: 0x16af5, Hi: 0x16af5, Stride: 0x1}, + unicode.Range32{Lo: 0x16b37, Hi: 0x16b38, Stride: 0x1}, + unicode.Range32{Lo: 0x16b44, Hi: 0x16b44, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9f, Hi: 0x1bc9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1da88, Hi: 0x1da88, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _SentenceSep = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1}, + unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1}, + unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceSp = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x9, Hi: 0x9, Stride: 0x1}, + unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1}, + unicode.Range16{Lo: 0x20, Hi: 0x20, Stride: 0x1}, + unicode.Range16{Lo: 0xa0, Hi: 0xa0, Stride: 0x1}, + unicode.Range16{Lo: 0x1680, Hi: 0x1680, Stride: 0x1}, + unicode.Range16{Lo: 0x2000, Hi: 0x200a, Stride: 0x1}, + unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1}, + unicode.Range16{Lo: 0x205f, Hi: 0x205f, Stride: 0x1}, + unicode.Range16{Lo: 0x3000, Hi: 0x3000, Stride: 0x1}, + }, + LatinOffset: 4, +} + +var _SentenceUpper = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1}, + unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1}, + unicode.Range16{Lo: 0xd8, Hi: 0xde, Stride: 0x1}, + unicode.Range16{Lo: 0x100, Hi: 0x100, Stride: 0x1}, + unicode.Range16{Lo: 0x102, Hi: 0x102, Stride: 0x1}, + unicode.Range16{Lo: 0x104, Hi: 0x104, Stride: 0x1}, + unicode.Range16{Lo: 0x106, Hi: 0x106, Stride: 0x1}, + unicode.Range16{Lo: 0x108, Hi: 0x108, Stride: 0x1}, + unicode.Range16{Lo: 0x10a, Hi: 0x10a, Stride: 0x1}, + unicode.Range16{Lo: 0x10c, Hi: 0x10c, Stride: 0x1}, + unicode.Range16{Lo: 0x10e, Hi: 0x10e, Stride: 0x1}, + unicode.Range16{Lo: 0x110, Hi: 0x110, Stride: 0x1}, + unicode.Range16{Lo: 0x112, Hi: 0x112, Stride: 0x1}, + unicode.Range16{Lo: 0x114, Hi: 0x114, Stride: 0x1}, + unicode.Range16{Lo: 0x116, Hi: 0x116, Stride: 0x1}, + unicode.Range16{Lo: 0x118, Hi: 0x118, Stride: 0x1}, + unicode.Range16{Lo: 0x11a, Hi: 0x11a, Stride: 0x1}, + unicode.Range16{Lo: 0x11c, Hi: 0x11c, Stride: 0x1}, + unicode.Range16{Lo: 0x11e, Hi: 0x11e, Stride: 0x1}, + unicode.Range16{Lo: 0x120, Hi: 0x120, Stride: 0x1}, + unicode.Range16{Lo: 0x122, Hi: 0x122, Stride: 0x1}, + unicode.Range16{Lo: 0x124, Hi: 0x124, Stride: 0x1}, + unicode.Range16{Lo: 0x126, Hi: 0x126, Stride: 0x1}, + unicode.Range16{Lo: 0x128, Hi: 0x128, Stride: 0x1}, + unicode.Range16{Lo: 0x12a, Hi: 0x12a, Stride: 0x1}, + unicode.Range16{Lo: 0x12c, Hi: 0x12c, Stride: 0x1}, + unicode.Range16{Lo: 0x12e, Hi: 0x12e, Stride: 0x1}, + unicode.Range16{Lo: 0x130, Hi: 0x130, Stride: 0x1}, + unicode.Range16{Lo: 0x132, Hi: 0x132, Stride: 0x1}, + unicode.Range16{Lo: 0x134, Hi: 0x134, Stride: 0x1}, + unicode.Range16{Lo: 0x136, Hi: 0x136, Stride: 0x1}, + unicode.Range16{Lo: 0x139, Hi: 0x139, Stride: 0x1}, + unicode.Range16{Lo: 0x13b, Hi: 0x13b, Stride: 0x1}, + unicode.Range16{Lo: 0x13d, Hi: 0x13d, Stride: 0x1}, + unicode.Range16{Lo: 0x13f, Hi: 0x13f, Stride: 0x1}, + unicode.Range16{Lo: 0x141, Hi: 0x141, Stride: 0x1}, + unicode.Range16{Lo: 0x143, Hi: 0x143, Stride: 0x1}, + unicode.Range16{Lo: 0x145, Hi: 0x145, Stride: 0x1}, + unicode.Range16{Lo: 0x147, Hi: 0x147, Stride: 0x1}, + unicode.Range16{Lo: 0x14a, Hi: 0x14a, Stride: 0x1}, + unicode.Range16{Lo: 0x14c, Hi: 0x14c, Stride: 0x1}, + unicode.Range16{Lo: 0x14e, Hi: 0x14e, Stride: 0x1}, + unicode.Range16{Lo: 0x150, Hi: 0x150, Stride: 0x1}, + unicode.Range16{Lo: 0x152, Hi: 0x152, Stride: 0x1}, + unicode.Range16{Lo: 0x154, Hi: 0x154, Stride: 0x1}, + unicode.Range16{Lo: 0x156, Hi: 0x156, Stride: 0x1}, + unicode.Range16{Lo: 0x158, Hi: 0x158, Stride: 0x1}, + unicode.Range16{Lo: 0x15a, Hi: 0x15a, Stride: 0x1}, + unicode.Range16{Lo: 0x15c, Hi: 0x15c, Stride: 0x1}, + unicode.Range16{Lo: 0x15e, Hi: 0x15e, Stride: 0x1}, + unicode.Range16{Lo: 0x160, Hi: 0x160, Stride: 0x1}, + unicode.Range16{Lo: 0x162, Hi: 0x162, Stride: 0x1}, + unicode.Range16{Lo: 0x164, Hi: 0x164, Stride: 0x1}, + unicode.Range16{Lo: 0x166, Hi: 0x166, Stride: 0x1}, + unicode.Range16{Lo: 0x168, Hi: 0x168, Stride: 0x1}, + unicode.Range16{Lo: 0x16a, Hi: 0x16a, Stride: 0x1}, + unicode.Range16{Lo: 0x16c, Hi: 0x16c, Stride: 0x1}, + unicode.Range16{Lo: 0x16e, Hi: 0x16e, Stride: 0x1}, + unicode.Range16{Lo: 0x170, Hi: 0x170, Stride: 0x1}, + unicode.Range16{Lo: 0x172, Hi: 0x172, Stride: 0x1}, + unicode.Range16{Lo: 0x174, Hi: 0x174, Stride: 0x1}, + unicode.Range16{Lo: 0x176, Hi: 0x176, Stride: 0x1}, + unicode.Range16{Lo: 0x178, Hi: 0x179, Stride: 0x1}, + unicode.Range16{Lo: 0x17b, Hi: 0x17b, Stride: 0x1}, + unicode.Range16{Lo: 0x17d, Hi: 0x17d, Stride: 0x1}, + unicode.Range16{Lo: 0x181, Hi: 0x182, Stride: 0x1}, + unicode.Range16{Lo: 0x184, Hi: 0x184, Stride: 0x1}, + unicode.Range16{Lo: 0x186, Hi: 0x187, Stride: 0x1}, + unicode.Range16{Lo: 0x189, Hi: 0x18b, Stride: 0x1}, + unicode.Range16{Lo: 0x18e, Hi: 0x191, Stride: 0x1}, + unicode.Range16{Lo: 0x193, Hi: 0x194, Stride: 0x1}, + unicode.Range16{Lo: 0x196, Hi: 0x198, Stride: 0x1}, + unicode.Range16{Lo: 0x19c, Hi: 0x19d, Stride: 0x1}, + unicode.Range16{Lo: 0x19f, Hi: 0x1a0, Stride: 0x1}, + unicode.Range16{Lo: 0x1a2, Hi: 0x1a2, Stride: 0x1}, + unicode.Range16{Lo: 0x1a4, Hi: 0x1a4, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6, Hi: 0x1a7, Stride: 0x1}, + unicode.Range16{Lo: 0x1a9, Hi: 0x1a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1ac, Hi: 0x1ac, Stride: 0x1}, + unicode.Range16{Lo: 0x1ae, Hi: 0x1af, Stride: 0x1}, + unicode.Range16{Lo: 0x1b1, Hi: 0x1b3, Stride: 0x1}, + unicode.Range16{Lo: 0x1b5, Hi: 0x1b5, Stride: 0x1}, + unicode.Range16{Lo: 0x1b7, Hi: 0x1b8, Stride: 0x1}, + unicode.Range16{Lo: 0x1bc, Hi: 0x1bc, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4, Hi: 0x1c5, Stride: 0x1}, + unicode.Range16{Lo: 0x1c7, Hi: 0x1c8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ca, Hi: 0x1cb, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd, Hi: 0x1cd, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf, Hi: 0x1cf, Stride: 0x1}, + unicode.Range16{Lo: 0x1d1, Hi: 0x1d1, Stride: 0x1}, + unicode.Range16{Lo: 0x1d3, Hi: 0x1d3, Stride: 0x1}, + unicode.Range16{Lo: 0x1d5, Hi: 0x1d5, Stride: 0x1}, + unicode.Range16{Lo: 0x1d7, Hi: 0x1d7, Stride: 0x1}, + unicode.Range16{Lo: 0x1d9, Hi: 0x1d9, Stride: 0x1}, + unicode.Range16{Lo: 0x1db, Hi: 0x1db, Stride: 0x1}, + unicode.Range16{Lo: 0x1de, Hi: 0x1de, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0, Hi: 0x1e0, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2, Hi: 0x1e2, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4, Hi: 0x1e4, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6, Hi: 0x1e6, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8, Hi: 0x1e8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea, Hi: 0x1ea, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec, Hi: 0x1ec, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee, Hi: 0x1ee, Stride: 0x1}, + unicode.Range16{Lo: 0x1f1, Hi: 0x1f2, Stride: 0x1}, + unicode.Range16{Lo: 0x1f4, Hi: 0x1f4, Stride: 0x1}, + unicode.Range16{Lo: 0x1f6, Hi: 0x1f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1fa, Hi: 0x1fa, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc, Hi: 0x1fc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe, Hi: 0x1fe, Stride: 0x1}, + unicode.Range16{Lo: 0x200, Hi: 0x200, Stride: 0x1}, + unicode.Range16{Lo: 0x202, Hi: 0x202, Stride: 0x1}, + unicode.Range16{Lo: 0x204, Hi: 0x204, Stride: 0x1}, + unicode.Range16{Lo: 0x206, Hi: 0x206, Stride: 0x1}, + unicode.Range16{Lo: 0x208, Hi: 0x208, Stride: 0x1}, + unicode.Range16{Lo: 0x20a, Hi: 0x20a, Stride: 0x1}, + unicode.Range16{Lo: 0x20c, Hi: 0x20c, Stride: 0x1}, + unicode.Range16{Lo: 0x20e, Hi: 0x20e, Stride: 0x1}, + unicode.Range16{Lo: 0x210, Hi: 0x210, Stride: 0x1}, + unicode.Range16{Lo: 0x212, Hi: 0x212, Stride: 0x1}, + unicode.Range16{Lo: 0x214, Hi: 0x214, Stride: 0x1}, + unicode.Range16{Lo: 0x216, Hi: 0x216, Stride: 0x1}, + unicode.Range16{Lo: 0x218, Hi: 0x218, Stride: 0x1}, + unicode.Range16{Lo: 0x21a, Hi: 0x21a, Stride: 0x1}, + unicode.Range16{Lo: 0x21c, Hi: 0x21c, Stride: 0x1}, + unicode.Range16{Lo: 0x21e, Hi: 0x21e, Stride: 0x1}, + unicode.Range16{Lo: 0x220, Hi: 0x220, Stride: 0x1}, + unicode.Range16{Lo: 0x222, Hi: 0x222, Stride: 0x1}, + unicode.Range16{Lo: 0x224, Hi: 0x224, Stride: 0x1}, + unicode.Range16{Lo: 0x226, Hi: 0x226, Stride: 0x1}, + unicode.Range16{Lo: 0x228, Hi: 0x228, Stride: 0x1}, + unicode.Range16{Lo: 0x22a, Hi: 0x22a, Stride: 0x1}, + unicode.Range16{Lo: 0x22c, Hi: 0x22c, Stride: 0x1}, + unicode.Range16{Lo: 0x22e, Hi: 0x22e, Stride: 0x1}, + unicode.Range16{Lo: 0x230, Hi: 0x230, Stride: 0x1}, + unicode.Range16{Lo: 0x232, Hi: 0x232, Stride: 0x1}, + unicode.Range16{Lo: 0x23a, Hi: 0x23b, Stride: 0x1}, + unicode.Range16{Lo: 0x23d, Hi: 0x23e, Stride: 0x1}, + unicode.Range16{Lo: 0x241, Hi: 0x241, Stride: 0x1}, + unicode.Range16{Lo: 0x243, Hi: 0x246, Stride: 0x1}, + unicode.Range16{Lo: 0x248, Hi: 0x248, Stride: 0x1}, + unicode.Range16{Lo: 0x24a, Hi: 0x24a, Stride: 0x1}, + unicode.Range16{Lo: 0x24c, Hi: 0x24c, Stride: 0x1}, + unicode.Range16{Lo: 0x24e, Hi: 0x24e, Stride: 0x1}, + unicode.Range16{Lo: 0x370, Hi: 0x370, Stride: 0x1}, + unicode.Range16{Lo: 0x372, Hi: 0x372, Stride: 0x1}, + unicode.Range16{Lo: 0x376, Hi: 0x376, Stride: 0x1}, + unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1}, + unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1}, + unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1}, + unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1}, + unicode.Range16{Lo: 0x38e, Hi: 0x38f, Stride: 0x1}, + unicode.Range16{Lo: 0x391, Hi: 0x3a1, Stride: 0x1}, + unicode.Range16{Lo: 0x3a3, Hi: 0x3ab, Stride: 0x1}, + unicode.Range16{Lo: 0x3cf, Hi: 0x3cf, Stride: 0x1}, + unicode.Range16{Lo: 0x3d2, Hi: 0x3d4, Stride: 0x1}, + unicode.Range16{Lo: 0x3d8, Hi: 0x3d8, Stride: 0x1}, + unicode.Range16{Lo: 0x3da, Hi: 0x3da, Stride: 0x1}, + unicode.Range16{Lo: 0x3dc, Hi: 0x3dc, Stride: 0x1}, + unicode.Range16{Lo: 0x3de, Hi: 0x3de, Stride: 0x1}, + unicode.Range16{Lo: 0x3e0, Hi: 0x3e0, Stride: 0x1}, + unicode.Range16{Lo: 0x3e2, Hi: 0x3e2, Stride: 0x1}, + unicode.Range16{Lo: 0x3e4, Hi: 0x3e4, Stride: 0x1}, + unicode.Range16{Lo: 0x3e6, Hi: 0x3e6, Stride: 0x1}, + unicode.Range16{Lo: 0x3e8, Hi: 0x3e8, Stride: 0x1}, + unicode.Range16{Lo: 0x3ea, Hi: 0x3ea, Stride: 0x1}, + unicode.Range16{Lo: 0x3ec, Hi: 0x3ec, Stride: 0x1}, + unicode.Range16{Lo: 0x3ee, Hi: 0x3ee, Stride: 0x1}, + unicode.Range16{Lo: 0x3f4, Hi: 0x3f4, Stride: 0x1}, + unicode.Range16{Lo: 0x3f7, Hi: 0x3f7, Stride: 0x1}, + unicode.Range16{Lo: 0x3f9, Hi: 0x3fa, Stride: 0x1}, + unicode.Range16{Lo: 0x3fd, Hi: 0x42f, Stride: 0x1}, + unicode.Range16{Lo: 0x460, Hi: 0x460, Stride: 0x1}, + unicode.Range16{Lo: 0x462, Hi: 0x462, Stride: 0x1}, + unicode.Range16{Lo: 0x464, Hi: 0x464, Stride: 0x1}, + unicode.Range16{Lo: 0x466, Hi: 0x466, Stride: 0x1}, + unicode.Range16{Lo: 0x468, Hi: 0x468, Stride: 0x1}, + unicode.Range16{Lo: 0x46a, Hi: 0x46a, Stride: 0x1}, + unicode.Range16{Lo: 0x46c, Hi: 0x46c, Stride: 0x1}, + unicode.Range16{Lo: 0x46e, Hi: 0x46e, Stride: 0x1}, + unicode.Range16{Lo: 0x470, Hi: 0x470, Stride: 0x1}, + unicode.Range16{Lo: 0x472, Hi: 0x472, Stride: 0x1}, + unicode.Range16{Lo: 0x474, Hi: 0x474, Stride: 0x1}, + unicode.Range16{Lo: 0x476, Hi: 0x476, Stride: 0x1}, + unicode.Range16{Lo: 0x478, Hi: 0x478, Stride: 0x1}, + unicode.Range16{Lo: 0x47a, Hi: 0x47a, Stride: 0x1}, + unicode.Range16{Lo: 0x47c, Hi: 0x47c, Stride: 0x1}, + unicode.Range16{Lo: 0x47e, Hi: 0x47e, Stride: 0x1}, + unicode.Range16{Lo: 0x480, Hi: 0x480, Stride: 0x1}, + unicode.Range16{Lo: 0x48a, Hi: 0x48a, Stride: 0x1}, + unicode.Range16{Lo: 0x48c, Hi: 0x48c, Stride: 0x1}, + unicode.Range16{Lo: 0x48e, Hi: 0x48e, Stride: 0x1}, + unicode.Range16{Lo: 0x490, Hi: 0x490, Stride: 0x1}, + unicode.Range16{Lo: 0x492, Hi: 0x492, Stride: 0x1}, + unicode.Range16{Lo: 0x494, Hi: 0x494, Stride: 0x1}, + unicode.Range16{Lo: 0x496, Hi: 0x496, Stride: 0x1}, + unicode.Range16{Lo: 0x498, Hi: 0x498, Stride: 0x1}, + unicode.Range16{Lo: 0x49a, Hi: 0x49a, Stride: 0x1}, + unicode.Range16{Lo: 0x49c, Hi: 0x49c, Stride: 0x1}, + unicode.Range16{Lo: 0x49e, Hi: 0x49e, Stride: 0x1}, + unicode.Range16{Lo: 0x4a0, Hi: 0x4a0, Stride: 0x1}, + unicode.Range16{Lo: 0x4a2, Hi: 0x4a2, Stride: 0x1}, + unicode.Range16{Lo: 0x4a4, Hi: 0x4a4, Stride: 0x1}, + unicode.Range16{Lo: 0x4a6, Hi: 0x4a6, Stride: 0x1}, + unicode.Range16{Lo: 0x4a8, Hi: 0x4a8, Stride: 0x1}, + unicode.Range16{Lo: 0x4aa, Hi: 0x4aa, Stride: 0x1}, + unicode.Range16{Lo: 0x4ac, Hi: 0x4ac, Stride: 0x1}, + unicode.Range16{Lo: 0x4ae, Hi: 0x4ae, Stride: 0x1}, + unicode.Range16{Lo: 0x4b0, Hi: 0x4b0, Stride: 0x1}, + unicode.Range16{Lo: 0x4b2, Hi: 0x4b2, Stride: 0x1}, + unicode.Range16{Lo: 0x4b4, Hi: 0x4b4, Stride: 0x1}, + unicode.Range16{Lo: 0x4b6, Hi: 0x4b6, Stride: 0x1}, + unicode.Range16{Lo: 0x4b8, Hi: 0x4b8, Stride: 0x1}, + unicode.Range16{Lo: 0x4ba, Hi: 0x4ba, Stride: 0x1}, + unicode.Range16{Lo: 0x4bc, Hi: 0x4bc, Stride: 0x1}, + unicode.Range16{Lo: 0x4be, Hi: 0x4be, Stride: 0x1}, + unicode.Range16{Lo: 0x4c0, Hi: 0x4c1, Stride: 0x1}, + unicode.Range16{Lo: 0x4c3, Hi: 0x4c3, Stride: 0x1}, + unicode.Range16{Lo: 0x4c5, Hi: 0x4c5, Stride: 0x1}, + unicode.Range16{Lo: 0x4c7, Hi: 0x4c7, Stride: 0x1}, + unicode.Range16{Lo: 0x4c9, Hi: 0x4c9, Stride: 0x1}, + unicode.Range16{Lo: 0x4cb, Hi: 0x4cb, Stride: 0x1}, + unicode.Range16{Lo: 0x4cd, Hi: 0x4cd, Stride: 0x1}, + unicode.Range16{Lo: 0x4d0, Hi: 0x4d0, Stride: 0x1}, + unicode.Range16{Lo: 0x4d2, Hi: 0x4d2, Stride: 0x1}, + unicode.Range16{Lo: 0x4d4, Hi: 0x4d4, Stride: 0x1}, + unicode.Range16{Lo: 0x4d6, Hi: 0x4d6, Stride: 0x1}, + unicode.Range16{Lo: 0x4d8, Hi: 0x4d8, Stride: 0x1}, + unicode.Range16{Lo: 0x4da, Hi: 0x4da, Stride: 0x1}, + unicode.Range16{Lo: 0x4dc, Hi: 0x4dc, Stride: 0x1}, + unicode.Range16{Lo: 0x4de, Hi: 0x4de, Stride: 0x1}, + unicode.Range16{Lo: 0x4e0, Hi: 0x4e0, Stride: 0x1}, + unicode.Range16{Lo: 0x4e2, Hi: 0x4e2, Stride: 0x1}, + unicode.Range16{Lo: 0x4e4, Hi: 0x4e4, Stride: 0x1}, + unicode.Range16{Lo: 0x4e6, Hi: 0x4e6, Stride: 0x1}, + unicode.Range16{Lo: 0x4e8, Hi: 0x4e8, Stride: 0x1}, + unicode.Range16{Lo: 0x4ea, Hi: 0x4ea, Stride: 0x1}, + unicode.Range16{Lo: 0x4ec, Hi: 0x4ec, Stride: 0x1}, + unicode.Range16{Lo: 0x4ee, Hi: 0x4ee, Stride: 0x1}, + unicode.Range16{Lo: 0x4f0, Hi: 0x4f0, Stride: 0x1}, + unicode.Range16{Lo: 0x4f2, Hi: 0x4f2, Stride: 0x1}, + unicode.Range16{Lo: 0x4f4, Hi: 0x4f4, Stride: 0x1}, + unicode.Range16{Lo: 0x4f6, Hi: 0x4f6, Stride: 0x1}, + unicode.Range16{Lo: 0x4f8, Hi: 0x4f8, Stride: 0x1}, + unicode.Range16{Lo: 0x4fa, Hi: 0x4fa, Stride: 0x1}, + unicode.Range16{Lo: 0x4fc, Hi: 0x4fc, Stride: 0x1}, + unicode.Range16{Lo: 0x4fe, Hi: 0x4fe, Stride: 0x1}, + unicode.Range16{Lo: 0x500, Hi: 0x500, Stride: 0x1}, + unicode.Range16{Lo: 0x502, Hi: 0x502, Stride: 0x1}, + unicode.Range16{Lo: 0x504, Hi: 0x504, Stride: 0x1}, + unicode.Range16{Lo: 0x506, Hi: 0x506, Stride: 0x1}, + unicode.Range16{Lo: 0x508, Hi: 0x508, Stride: 0x1}, + unicode.Range16{Lo: 0x50a, Hi: 0x50a, Stride: 0x1}, + unicode.Range16{Lo: 0x50c, Hi: 0x50c, Stride: 0x1}, + unicode.Range16{Lo: 0x50e, Hi: 0x50e, Stride: 0x1}, + unicode.Range16{Lo: 0x510, Hi: 0x510, Stride: 0x1}, + unicode.Range16{Lo: 0x512, Hi: 0x512, Stride: 0x1}, + unicode.Range16{Lo: 0x514, Hi: 0x514, Stride: 0x1}, + unicode.Range16{Lo: 0x516, Hi: 0x516, Stride: 0x1}, + unicode.Range16{Lo: 0x518, Hi: 0x518, Stride: 0x1}, + unicode.Range16{Lo: 0x51a, Hi: 0x51a, Stride: 0x1}, + unicode.Range16{Lo: 0x51c, Hi: 0x51c, Stride: 0x1}, + unicode.Range16{Lo: 0x51e, Hi: 0x51e, Stride: 0x1}, + unicode.Range16{Lo: 0x520, Hi: 0x520, Stride: 0x1}, + unicode.Range16{Lo: 0x522, Hi: 0x522, Stride: 0x1}, + unicode.Range16{Lo: 0x524, Hi: 0x524, Stride: 0x1}, + unicode.Range16{Lo: 0x526, Hi: 0x526, Stride: 0x1}, + unicode.Range16{Lo: 0x528, Hi: 0x528, Stride: 0x1}, + unicode.Range16{Lo: 0x52a, Hi: 0x52a, Stride: 0x1}, + unicode.Range16{Lo: 0x52c, Hi: 0x52c, Stride: 0x1}, + unicode.Range16{Lo: 0x52e, Hi: 0x52e, Stride: 0x1}, + unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1}, + unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1}, + unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1}, + unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1}, + unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1e00, Hi: 0x1e00, Stride: 0x1}, + unicode.Range16{Lo: 0x1e02, Hi: 0x1e02, Stride: 0x1}, + unicode.Range16{Lo: 0x1e04, Hi: 0x1e04, Stride: 0x1}, + unicode.Range16{Lo: 0x1e06, Hi: 0x1e06, Stride: 0x1}, + unicode.Range16{Lo: 0x1e08, Hi: 0x1e08, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0a, Hi: 0x1e0a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0c, Hi: 0x1e0c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0e, Hi: 0x1e0e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e10, Hi: 0x1e10, Stride: 0x1}, + unicode.Range16{Lo: 0x1e12, Hi: 0x1e12, Stride: 0x1}, + unicode.Range16{Lo: 0x1e14, Hi: 0x1e14, Stride: 0x1}, + unicode.Range16{Lo: 0x1e16, Hi: 0x1e16, Stride: 0x1}, + unicode.Range16{Lo: 0x1e18, Hi: 0x1e18, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1a, Hi: 0x1e1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1c, Hi: 0x1e1c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1e, Hi: 0x1e1e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e20, Hi: 0x1e20, Stride: 0x1}, + unicode.Range16{Lo: 0x1e22, Hi: 0x1e22, Stride: 0x1}, + unicode.Range16{Lo: 0x1e24, Hi: 0x1e24, Stride: 0x1}, + unicode.Range16{Lo: 0x1e26, Hi: 0x1e26, Stride: 0x1}, + unicode.Range16{Lo: 0x1e28, Hi: 0x1e28, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2a, Hi: 0x1e2a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2c, Hi: 0x1e2c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2e, Hi: 0x1e2e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e30, Hi: 0x1e30, Stride: 0x1}, + unicode.Range16{Lo: 0x1e32, Hi: 0x1e32, Stride: 0x1}, + unicode.Range16{Lo: 0x1e34, Hi: 0x1e34, Stride: 0x1}, + unicode.Range16{Lo: 0x1e36, Hi: 0x1e36, Stride: 0x1}, + unicode.Range16{Lo: 0x1e38, Hi: 0x1e38, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3a, Hi: 0x1e3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3c, Hi: 0x1e3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3e, Hi: 0x1e3e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e40, Hi: 0x1e40, Stride: 0x1}, + unicode.Range16{Lo: 0x1e42, Hi: 0x1e42, Stride: 0x1}, + unicode.Range16{Lo: 0x1e44, Hi: 0x1e44, Stride: 0x1}, + unicode.Range16{Lo: 0x1e46, Hi: 0x1e46, Stride: 0x1}, + unicode.Range16{Lo: 0x1e48, Hi: 0x1e48, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4a, Hi: 0x1e4a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4c, Hi: 0x1e4c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4e, Hi: 0x1e4e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e50, Hi: 0x1e50, Stride: 0x1}, + unicode.Range16{Lo: 0x1e52, Hi: 0x1e52, Stride: 0x1}, + unicode.Range16{Lo: 0x1e54, Hi: 0x1e54, Stride: 0x1}, + unicode.Range16{Lo: 0x1e56, Hi: 0x1e56, Stride: 0x1}, + unicode.Range16{Lo: 0x1e58, Hi: 0x1e58, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5a, Hi: 0x1e5a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5c, Hi: 0x1e5c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5e, Hi: 0x1e5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e60, Hi: 0x1e60, Stride: 0x1}, + unicode.Range16{Lo: 0x1e62, Hi: 0x1e62, Stride: 0x1}, + unicode.Range16{Lo: 0x1e64, Hi: 0x1e64, Stride: 0x1}, + unicode.Range16{Lo: 0x1e66, Hi: 0x1e66, Stride: 0x1}, + unicode.Range16{Lo: 0x1e68, Hi: 0x1e68, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6a, Hi: 0x1e6a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6c, Hi: 0x1e6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6e, Hi: 0x1e6e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e70, Hi: 0x1e70, Stride: 0x1}, + unicode.Range16{Lo: 0x1e72, Hi: 0x1e72, Stride: 0x1}, + unicode.Range16{Lo: 0x1e74, Hi: 0x1e74, Stride: 0x1}, + unicode.Range16{Lo: 0x1e76, Hi: 0x1e76, Stride: 0x1}, + unicode.Range16{Lo: 0x1e78, Hi: 0x1e78, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7a, Hi: 0x1e7a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7c, Hi: 0x1e7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7e, Hi: 0x1e7e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e80, Hi: 0x1e80, Stride: 0x1}, + unicode.Range16{Lo: 0x1e82, Hi: 0x1e82, Stride: 0x1}, + unicode.Range16{Lo: 0x1e84, Hi: 0x1e84, Stride: 0x1}, + unicode.Range16{Lo: 0x1e86, Hi: 0x1e86, Stride: 0x1}, + unicode.Range16{Lo: 0x1e88, Hi: 0x1e88, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8a, Hi: 0x1e8a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8c, Hi: 0x1e8c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8e, Hi: 0x1e8e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e90, Hi: 0x1e90, Stride: 0x1}, + unicode.Range16{Lo: 0x1e92, Hi: 0x1e92, Stride: 0x1}, + unicode.Range16{Lo: 0x1e94, Hi: 0x1e94, Stride: 0x1}, + unicode.Range16{Lo: 0x1e9e, Hi: 0x1e9e, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea0, Hi: 0x1ea0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea2, Hi: 0x1ea2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea4, Hi: 0x1ea4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea6, Hi: 0x1ea6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea8, Hi: 0x1ea8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eaa, Hi: 0x1eaa, Stride: 0x1}, + unicode.Range16{Lo: 0x1eac, Hi: 0x1eac, Stride: 0x1}, + unicode.Range16{Lo: 0x1eae, Hi: 0x1eae, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb0, Hi: 0x1eb0, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb2, Hi: 0x1eb2, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb4, Hi: 0x1eb4, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb6, Hi: 0x1eb6, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb8, Hi: 0x1eb8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eba, Hi: 0x1eba, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebc, Hi: 0x1ebc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebe, Hi: 0x1ebe, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec0, Hi: 0x1ec0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec2, Hi: 0x1ec2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec4, Hi: 0x1ec4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec6, Hi: 0x1ec6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec8, Hi: 0x1ec8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eca, Hi: 0x1eca, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecc, Hi: 0x1ecc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ece, Hi: 0x1ece, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed0, Hi: 0x1ed0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed2, Hi: 0x1ed2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed4, Hi: 0x1ed4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed6, Hi: 0x1ed6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed8, Hi: 0x1ed8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eda, Hi: 0x1eda, Stride: 0x1}, + unicode.Range16{Lo: 0x1edc, Hi: 0x1edc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ede, Hi: 0x1ede, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee0, Hi: 0x1ee0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee2, Hi: 0x1ee2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee4, Hi: 0x1ee4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee6, Hi: 0x1ee6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee8, Hi: 0x1ee8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eea, Hi: 0x1eea, Stride: 0x1}, + unicode.Range16{Lo: 0x1eec, Hi: 0x1eec, Stride: 0x1}, + unicode.Range16{Lo: 0x1eee, Hi: 0x1eee, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef0, Hi: 0x1ef0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef2, Hi: 0x1ef2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef4, Hi: 0x1ef4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef6, Hi: 0x1ef6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef8, Hi: 0x1ef8, Stride: 0x1}, + unicode.Range16{Lo: 0x1efa, Hi: 0x1efa, Stride: 0x1}, + unicode.Range16{Lo: 0x1efc, Hi: 0x1efc, Stride: 0x1}, + unicode.Range16{Lo: 0x1efe, Hi: 0x1efe, Stride: 0x1}, + unicode.Range16{Lo: 0x1f08, Hi: 0x1f0f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f28, Hi: 0x1f2f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f38, Hi: 0x1f3f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5f, Hi: 0x1f5f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f68, Hi: 0x1f6f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f88, Hi: 0x1f8f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f98, Hi: 0x1f9f, Stride: 0x1}, + unicode.Range16{Lo: 0x1fa8, Hi: 0x1faf, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb8, Hi: 0x1fbc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc8, Hi: 0x1fcc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd8, Hi: 0x1fdb, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe8, Hi: 0x1fec, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff8, Hi: 0x1ffc, Stride: 0x1}, + unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1}, + unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1}, + unicode.Range16{Lo: 0x210b, Hi: 0x210d, Stride: 0x1}, + unicode.Range16{Lo: 0x2110, Hi: 0x2112, Stride: 0x1}, + unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1}, + unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1}, + unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1}, + unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1}, + unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1}, + unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1}, + unicode.Range16{Lo: 0x2130, Hi: 0x2133, Stride: 0x1}, + unicode.Range16{Lo: 0x213e, Hi: 0x213f, Stride: 0x1}, + unicode.Range16{Lo: 0x2145, Hi: 0x2145, Stride: 0x1}, + unicode.Range16{Lo: 0x2160, Hi: 0x216f, Stride: 0x1}, + unicode.Range16{Lo: 0x2183, Hi: 0x2183, Stride: 0x1}, + unicode.Range16{Lo: 0x24b6, Hi: 0x24cf, Stride: 0x1}, + unicode.Range16{Lo: 0x2c00, Hi: 0x2c2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c60, Hi: 0x2c60, Stride: 0x1}, + unicode.Range16{Lo: 0x2c62, Hi: 0x2c64, Stride: 0x1}, + unicode.Range16{Lo: 0x2c67, Hi: 0x2c67, Stride: 0x1}, + unicode.Range16{Lo: 0x2c69, Hi: 0x2c69, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6b, Hi: 0x2c6b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6d, Hi: 0x2c70, Stride: 0x1}, + unicode.Range16{Lo: 0x2c72, Hi: 0x2c72, Stride: 0x1}, + unicode.Range16{Lo: 0x2c75, Hi: 0x2c75, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7e, Hi: 0x2c80, Stride: 0x1}, + unicode.Range16{Lo: 0x2c82, Hi: 0x2c82, Stride: 0x1}, + unicode.Range16{Lo: 0x2c84, Hi: 0x2c84, Stride: 0x1}, + unicode.Range16{Lo: 0x2c86, Hi: 0x2c86, Stride: 0x1}, + unicode.Range16{Lo: 0x2c88, Hi: 0x2c88, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8a, Hi: 0x2c8a, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8c, Hi: 0x2c8c, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8e, Hi: 0x2c8e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c90, Hi: 0x2c90, Stride: 0x1}, + unicode.Range16{Lo: 0x2c92, Hi: 0x2c92, Stride: 0x1}, + unicode.Range16{Lo: 0x2c94, Hi: 0x2c94, Stride: 0x1}, + unicode.Range16{Lo: 0x2c96, Hi: 0x2c96, Stride: 0x1}, + unicode.Range16{Lo: 0x2c98, Hi: 0x2c98, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9a, Hi: 0x2c9a, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9c, Hi: 0x2c9c, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9e, Hi: 0x2c9e, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca0, Hi: 0x2ca0, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca2, Hi: 0x2ca2, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca4, Hi: 0x2ca4, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca6, Hi: 0x2ca6, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca8, Hi: 0x2ca8, Stride: 0x1}, + unicode.Range16{Lo: 0x2caa, Hi: 0x2caa, Stride: 0x1}, + unicode.Range16{Lo: 0x2cac, Hi: 0x2cac, Stride: 0x1}, + unicode.Range16{Lo: 0x2cae, Hi: 0x2cae, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb0, Hi: 0x2cb0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb2, Hi: 0x2cb2, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb4, Hi: 0x2cb4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb6, Hi: 0x2cb6, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb8, Hi: 0x2cb8, Stride: 0x1}, + unicode.Range16{Lo: 0x2cba, Hi: 0x2cba, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbc, Hi: 0x2cbc, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbe, Hi: 0x2cbe, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc0, Hi: 0x2cc0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc2, Hi: 0x2cc2, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc4, Hi: 0x2cc4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc6, Hi: 0x2cc6, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc8, Hi: 0x2cc8, Stride: 0x1}, + unicode.Range16{Lo: 0x2cca, Hi: 0x2cca, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccc, Hi: 0x2ccc, Stride: 0x1}, + unicode.Range16{Lo: 0x2cce, Hi: 0x2cce, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd0, Hi: 0x2cd0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd2, Hi: 0x2cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd4, Hi: 0x2cd4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd6, Hi: 0x2cd6, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd8, Hi: 0x2cd8, Stride: 0x1}, + unicode.Range16{Lo: 0x2cda, Hi: 0x2cda, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdc, Hi: 0x2cdc, Stride: 0x1}, + unicode.Range16{Lo: 0x2cde, Hi: 0x2cde, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce0, Hi: 0x2ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce2, Hi: 0x2ce2, Stride: 0x1}, + unicode.Range16{Lo: 0x2ceb, Hi: 0x2ceb, Stride: 0x1}, + unicode.Range16{Lo: 0x2ced, Hi: 0x2ced, Stride: 0x1}, + unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf2, Stride: 0x1}, + unicode.Range16{Lo: 0xa640, Hi: 0xa640, Stride: 0x1}, + unicode.Range16{Lo: 0xa642, Hi: 0xa642, Stride: 0x1}, + unicode.Range16{Lo: 0xa644, Hi: 0xa644, Stride: 0x1}, + unicode.Range16{Lo: 0xa646, Hi: 0xa646, Stride: 0x1}, + unicode.Range16{Lo: 0xa648, Hi: 0xa648, Stride: 0x1}, + unicode.Range16{Lo: 0xa64a, Hi: 0xa64a, Stride: 0x1}, + unicode.Range16{Lo: 0xa64c, Hi: 0xa64c, Stride: 0x1}, + unicode.Range16{Lo: 0xa64e, Hi: 0xa64e, Stride: 0x1}, + unicode.Range16{Lo: 0xa650, Hi: 0xa650, Stride: 0x1}, + unicode.Range16{Lo: 0xa652, Hi: 0xa652, Stride: 0x1}, + unicode.Range16{Lo: 0xa654, Hi: 0xa654, Stride: 0x1}, + unicode.Range16{Lo: 0xa656, Hi: 0xa656, Stride: 0x1}, + unicode.Range16{Lo: 0xa658, Hi: 0xa658, Stride: 0x1}, + unicode.Range16{Lo: 0xa65a, Hi: 0xa65a, Stride: 0x1}, + unicode.Range16{Lo: 0xa65c, Hi: 0xa65c, Stride: 0x1}, + unicode.Range16{Lo: 0xa65e, Hi: 0xa65e, Stride: 0x1}, + unicode.Range16{Lo: 0xa660, Hi: 0xa660, Stride: 0x1}, + unicode.Range16{Lo: 0xa662, Hi: 0xa662, Stride: 0x1}, + unicode.Range16{Lo: 0xa664, Hi: 0xa664, Stride: 0x1}, + unicode.Range16{Lo: 0xa666, Hi: 0xa666, Stride: 0x1}, + unicode.Range16{Lo: 0xa668, Hi: 0xa668, Stride: 0x1}, + unicode.Range16{Lo: 0xa66a, Hi: 0xa66a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66c, Hi: 0xa66c, Stride: 0x1}, + unicode.Range16{Lo: 0xa680, Hi: 0xa680, Stride: 0x1}, + unicode.Range16{Lo: 0xa682, Hi: 0xa682, Stride: 0x1}, + unicode.Range16{Lo: 0xa684, Hi: 0xa684, Stride: 0x1}, + unicode.Range16{Lo: 0xa686, Hi: 0xa686, Stride: 0x1}, + unicode.Range16{Lo: 0xa688, Hi: 0xa688, Stride: 0x1}, + unicode.Range16{Lo: 0xa68a, Hi: 0xa68a, Stride: 0x1}, + unicode.Range16{Lo: 0xa68c, Hi: 0xa68c, Stride: 0x1}, + unicode.Range16{Lo: 0xa68e, Hi: 0xa68e, Stride: 0x1}, + unicode.Range16{Lo: 0xa690, Hi: 0xa690, Stride: 0x1}, + unicode.Range16{Lo: 0xa692, Hi: 0xa692, Stride: 0x1}, + unicode.Range16{Lo: 0xa694, Hi: 0xa694, Stride: 0x1}, + unicode.Range16{Lo: 0xa696, Hi: 0xa696, Stride: 0x1}, + unicode.Range16{Lo: 0xa698, Hi: 0xa698, Stride: 0x1}, + unicode.Range16{Lo: 0xa69a, Hi: 0xa69a, Stride: 0x1}, + unicode.Range16{Lo: 0xa722, Hi: 0xa722, Stride: 0x1}, + unicode.Range16{Lo: 0xa724, Hi: 0xa724, Stride: 0x1}, + unicode.Range16{Lo: 0xa726, Hi: 0xa726, Stride: 0x1}, + unicode.Range16{Lo: 0xa728, Hi: 0xa728, Stride: 0x1}, + unicode.Range16{Lo: 0xa72a, Hi: 0xa72a, Stride: 0x1}, + unicode.Range16{Lo: 0xa72c, Hi: 0xa72c, Stride: 0x1}, + unicode.Range16{Lo: 0xa72e, Hi: 0xa72e, Stride: 0x1}, + unicode.Range16{Lo: 0xa732, Hi: 0xa732, Stride: 0x1}, + unicode.Range16{Lo: 0xa734, Hi: 0xa734, Stride: 0x1}, + unicode.Range16{Lo: 0xa736, Hi: 0xa736, Stride: 0x1}, + unicode.Range16{Lo: 0xa738, Hi: 0xa738, Stride: 0x1}, + unicode.Range16{Lo: 0xa73a, Hi: 0xa73a, Stride: 0x1}, + unicode.Range16{Lo: 0xa73c, Hi: 0xa73c, Stride: 0x1}, + unicode.Range16{Lo: 0xa73e, Hi: 0xa73e, Stride: 0x1}, + unicode.Range16{Lo: 0xa740, Hi: 0xa740, Stride: 0x1}, + unicode.Range16{Lo: 0xa742, Hi: 0xa742, Stride: 0x1}, + unicode.Range16{Lo: 0xa744, Hi: 0xa744, Stride: 0x1}, + unicode.Range16{Lo: 0xa746, Hi: 0xa746, Stride: 0x1}, + unicode.Range16{Lo: 0xa748, Hi: 0xa748, Stride: 0x1}, + unicode.Range16{Lo: 0xa74a, Hi: 0xa74a, Stride: 0x1}, + unicode.Range16{Lo: 0xa74c, Hi: 0xa74c, Stride: 0x1}, + unicode.Range16{Lo: 0xa74e, Hi: 0xa74e, Stride: 0x1}, + unicode.Range16{Lo: 0xa750, Hi: 0xa750, Stride: 0x1}, + unicode.Range16{Lo: 0xa752, Hi: 0xa752, Stride: 0x1}, + unicode.Range16{Lo: 0xa754, Hi: 0xa754, Stride: 0x1}, + unicode.Range16{Lo: 0xa756, Hi: 0xa756, Stride: 0x1}, + unicode.Range16{Lo: 0xa758, Hi: 0xa758, Stride: 0x1}, + unicode.Range16{Lo: 0xa75a, Hi: 0xa75a, Stride: 0x1}, + unicode.Range16{Lo: 0xa75c, Hi: 0xa75c, Stride: 0x1}, + unicode.Range16{Lo: 0xa75e, Hi: 0xa75e, Stride: 0x1}, + unicode.Range16{Lo: 0xa760, Hi: 0xa760, Stride: 0x1}, + unicode.Range16{Lo: 0xa762, Hi: 0xa762, Stride: 0x1}, + unicode.Range16{Lo: 0xa764, Hi: 0xa764, Stride: 0x1}, + unicode.Range16{Lo: 0xa766, Hi: 0xa766, Stride: 0x1}, + unicode.Range16{Lo: 0xa768, Hi: 0xa768, Stride: 0x1}, + unicode.Range16{Lo: 0xa76a, Hi: 0xa76a, Stride: 0x1}, + unicode.Range16{Lo: 0xa76c, Hi: 0xa76c, Stride: 0x1}, + unicode.Range16{Lo: 0xa76e, Hi: 0xa76e, Stride: 0x1}, + unicode.Range16{Lo: 0xa779, Hi: 0xa779, Stride: 0x1}, + unicode.Range16{Lo: 0xa77b, Hi: 0xa77b, Stride: 0x1}, + unicode.Range16{Lo: 0xa77d, Hi: 0xa77e, Stride: 0x1}, + unicode.Range16{Lo: 0xa780, Hi: 0xa780, Stride: 0x1}, + unicode.Range16{Lo: 0xa782, Hi: 0xa782, Stride: 0x1}, + unicode.Range16{Lo: 0xa784, Hi: 0xa784, Stride: 0x1}, + unicode.Range16{Lo: 0xa786, Hi: 0xa786, Stride: 0x1}, + unicode.Range16{Lo: 0xa78b, Hi: 0xa78b, Stride: 0x1}, + unicode.Range16{Lo: 0xa78d, Hi: 0xa78d, Stride: 0x1}, + unicode.Range16{Lo: 0xa790, Hi: 0xa790, Stride: 0x1}, + unicode.Range16{Lo: 0xa792, Hi: 0xa792, Stride: 0x1}, + unicode.Range16{Lo: 0xa796, Hi: 0xa796, Stride: 0x1}, + unicode.Range16{Lo: 0xa798, Hi: 0xa798, Stride: 0x1}, + unicode.Range16{Lo: 0xa79a, Hi: 0xa79a, Stride: 0x1}, + unicode.Range16{Lo: 0xa79c, Hi: 0xa79c, Stride: 0x1}, + unicode.Range16{Lo: 0xa79e, Hi: 0xa79e, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a0, Hi: 0xa7a0, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a2, Hi: 0xa7a2, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a4, Hi: 0xa7a4, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a6, Hi: 0xa7a6, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a8, Hi: 0xa7a8, Stride: 0x1}, + unicode.Range16{Lo: 0xa7aa, Hi: 0xa7ae, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b0, Hi: 0xa7b4, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b6, Hi: 0xa7b6, Stride: 0x1}, + unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10400, Hi: 0x10427, Stride: 0x1}, + unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1}, + unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1}, + unicode.Range32{Lo: 0x118a0, Hi: 0x118bf, Stride: 0x1}, + unicode.Range32{Lo: 0x1d400, Hi: 0x1d419, Stride: 0x1}, + unicode.Range32{Lo: 0x1d434, Hi: 0x1d44d, Stride: 0x1}, + unicode.Range32{Lo: 0x1d468, Hi: 0x1d481, Stride: 0x1}, + unicode.Range32{Lo: 0x1d49c, Hi: 0x1d49c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b5, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4d0, Hi: 0x1d4e9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d504, Hi: 0x1d505, Stride: 0x1}, + unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1}, + unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1}, + unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d538, Hi: 0x1d539, Stride: 0x1}, + unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1}, + unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1}, + unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1}, + unicode.Range32{Lo: 0x1d56c, Hi: 0x1d585, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5a0, Hi: 0x1d5b9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5d4, Hi: 0x1d5ed, Stride: 0x1}, + unicode.Range32{Lo: 0x1d608, Hi: 0x1d621, Stride: 0x1}, + unicode.Range32{Lo: 0x1d63c, Hi: 0x1d655, Stride: 0x1}, + unicode.Range32{Lo: 0x1d670, Hi: 0x1d689, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6e2, Hi: 0x1d6fa, Stride: 0x1}, + unicode.Range32{Lo: 0x1d71c, Hi: 0x1d734, Stride: 0x1}, + unicode.Range32{Lo: 0x1d756, Hi: 0x1d76e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d790, Hi: 0x1d7a8, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7ca, Hi: 0x1d7ca, Stride: 0x1}, + unicode.Range32{Lo: 0x1e900, Hi: 0x1e921, Stride: 0x1}, + unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1}, + unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1}, + unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1}, + }, + LatinOffset: 3, +} + +type _SentenceRuneRange unicode.RangeTable + +func _SentenceRuneType(r rune) *_SentenceRuneRange { + switch { + case unicode.Is(_SentenceATerm, r): + return (*_SentenceRuneRange)(_SentenceATerm) + case unicode.Is(_SentenceCR, r): + return (*_SentenceRuneRange)(_SentenceCR) + case unicode.Is(_SentenceClose, r): + return (*_SentenceRuneRange)(_SentenceClose) + case unicode.Is(_SentenceExtend, r): + return (*_SentenceRuneRange)(_SentenceExtend) + case unicode.Is(_SentenceFormat, r): + return (*_SentenceRuneRange)(_SentenceFormat) + case unicode.Is(_SentenceLF, r): + return (*_SentenceRuneRange)(_SentenceLF) + case unicode.Is(_SentenceLower, r): + return (*_SentenceRuneRange)(_SentenceLower) + case unicode.Is(_SentenceNumeric, r): + return (*_SentenceRuneRange)(_SentenceNumeric) + case unicode.Is(_SentenceOLetter, r): + return (*_SentenceRuneRange)(_SentenceOLetter) + case unicode.Is(_SentenceSContinue, r): + return (*_SentenceRuneRange)(_SentenceSContinue) + case unicode.Is(_SentenceSTerm, r): + return (*_SentenceRuneRange)(_SentenceSTerm) + case unicode.Is(_SentenceSep, r): + return (*_SentenceRuneRange)(_SentenceSep) + case unicode.Is(_SentenceSp, r): + return (*_SentenceRuneRange)(_SentenceSp) + case unicode.Is(_SentenceUpper, r): + return (*_SentenceRuneRange)(_SentenceUpper) + default: + return nil + } +} +func (rng *_SentenceRuneRange) String() string { + switch (*unicode.RangeTable)(rng) { + case _SentenceATerm: + return "ATerm" + case _SentenceCR: + return "CR" + case _SentenceClose: + return "Close" + case _SentenceExtend: + return "Extend" + case _SentenceFormat: + return "Format" + case _SentenceLF: + return "LF" + case _SentenceLower: + return "Lower" + case _SentenceNumeric: + return "Numeric" + case _SentenceOLetter: + return "OLetter" + case _SentenceSContinue: + return "SContinue" + case _SentenceSTerm: + return "STerm" + case _SentenceSep: + return "Sep" + case _SentenceSp: + return "Sp" + case _SentenceUpper: + return "Upper" + default: + return "Other" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go new file mode 100644 index 00000000000..6b14bef0e25 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go @@ -0,0 +1,19 @@ +package textseg + +import "unicode/utf8" + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on UTF8 sequence boundaries. +// +// This is included largely for completeness, since this behavior is already +// built in to Go when ranging over a string. +func ScanUTF8Sequences(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + r, seqLen := utf8.DecodeRune(data) + if r == utf8.RuneError && !atEOF { + return 0, nil, nil + } + return seqLen, data[:seqLen], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/armon/go-radix/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/armon/go-radix/LICENSE new file mode 100644 index 00000000000..a5df10e675d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/armon/go-radix/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/armon/go-radix/radix.go b/pkg/terraform/exec/plugins/vendor/github.com/armon/go-radix/radix.go new file mode 100644 index 00000000000..e2bb22eb91d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/armon/go-radix/radix.go @@ -0,0 +1,540 @@ +package radix + +import ( + "sort" + "strings" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(s string, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + key string + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix string + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf != nil +} + +func (n *node) addEdge(e edge) { + n.edges = append(n.edges, e) + n.edges.Sort() +} + +func (n *node) updateEdge(label byte, node *node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + n.edges[idx].node = node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.updateEdge(search[0], child) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s string) int { + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix string) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s string, v interface{}) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = nil + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index 56fdfc2bfc7..99849c0e19c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -138,8 +138,27 @@ type RequestFailure interface { RequestID() string } -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { return newRequestError(err, statusCode, reqID) } + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index 0202a008f5d..9cf7eaf4007 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -1,6 +1,9 @@ package awserr -import "fmt" +import ( + "encoding/hex" + "fmt" +) // SprintError returns a string of the formatted error code. // @@ -119,6 +122,7 @@ type requestError struct { awsError statusCode int requestID string + bytes []byte } // newRequestError returns a wrapped error with additional information for @@ -170,6 +174,29 @@ func (r requestError) OrigErrs() []error { return []error{r.OrigErr()} } +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + // An error list that satisfies the golang interface type errorList []error @@ -181,7 +208,7 @@ func (e errorList) Error() string { // How do we want to handle the array size being zero if size := len(e); size > 0 { for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) + msg += e[i].Error() // We check the next index to see if it is within the slice. // If it is, then we append a newline. We do this, because unit tests // could be broken with the additional '\n' diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 11c52c38968..a4eb6a7f43a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -70,7 +70,7 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer value = value.FieldByNameFunc(func(name string) bool { if c == name { return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + } else if !caseSensitive && strings.EqualFold(name, c) { return true } return false @@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { // SetValueAtPath sets a value at the case insensitive lexical path inside // of a structure. func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue } + setValue(rval, v) } } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 70960538409..03334d69207 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -12,6 +12,7 @@ import ( type Config struct { Config *aws.Config Handlers request.Handlers + PartitionID string Endpoint string SigningRegion string SigningName string @@ -64,7 +65,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op default: maxRetries := aws.IntValue(cfg.MaxRetries) if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 + maxRetries = DefaultRetryerMaxNumRetries } svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index a397b0d044c..9f6af19dd45 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,6 +1,7 @@ package client import ( + "math" "strconv" "time" @@ -9,82 +10,142 @@ import ( ) // DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. // -// type retryer struct { -// client.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration } +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + // MaxRetries returns the number of maximum returns the service will use to make // an individual API request. func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - if delay, ok := getRetryDelay(r); ok { - return delay - } - minTime = 500 + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay } retryCount := r.RetryCount - if throttle && retryCount > 8 { - retryCount = 8 - } else if retryCount > 13 { - retryCount = 13 + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) } + return delay + initialDelay +} - delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) } // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + // If one of the other handlers already set the retry state // we don't want to override it based on the service's state if r.Retryable != nil { return *r.Retryable } - - if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - - return true + return r.IsErrorRetryable() || r.IsErrorThrottle() } // This will look in the Retry-After header, RFC 7231, for how long // it will wait before attempting another request -func getRetryDelay(r *request.Request) (time.Duration, bool) { +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { if !canUseRetryAfterHeader(r) { return 0, false } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 7b5e1276acf..8958c32d4e9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -67,10 +67,14 @@ func logRequest(r *request.Request) { if !bodySeekable { r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) } - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } } r.Config.Logger.Log(fmt.Sprintf(logReqMsg, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 920e9fddf87..0c48f72e08e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -5,6 +5,7 @@ type ClientInfo struct { ServiceName string ServiceID string APIVersion string + PartitionID string Endpoint string SigningName string SigningRegion string diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 00000000000..881d575f010 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/config.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/config.go index 10634d173d3..93ebbcc13f8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,7 +20,7 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), @@ -246,12 +246,18 @@ type Config struct { // Disabling this feature is useful when you want to use local endpoints // for testing that do not support the modeled host prefix pattern. DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), @@ -420,6 +426,20 @@ func (c *Config) MergeIn(cfgs ...*Config) { } } +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + func mergeInConfig(dst *Config, other *Config) { if other == nil { return @@ -520,6 +540,14 @@ func mergeInConfig(dst *Config, other *Config) { if other.DisableEndpointHostPrefix != nil { dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index ff5d58e0683..4e076c1837a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Int64 returns a pointer to the int64 value passed in. func Int64(v int64) *int64 { return &v @@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Float64 returns a pointer to the float64 value passed in. func Float64(v float64) *float64 { return &v diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index f8853d78af2..0c60e612ea5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) { Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } - // Catch all other request errors. + // Catch all request errors, and let the default retrier determine + // if the error is retryable. r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable // Override the error with a context canceled error, if that was canceled. ctx := r.Context() @@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } - r.RetryCount++ - r.Error = nil - } -}} + r.RetryCount++ + r.Error = nil + } + }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 894bbc7f82c..4af59215814 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -50,9 +50,10 @@ package credentials import ( "fmt" - "github.com/aws/aws-sdk-go/aws/awserr" "sync" "time" + + "github.com/aws/aws-sdk-go/aws/awserr" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -83,6 +84,12 @@ type Value struct { ProviderName string } +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + // A Provider is the interface for any component which will provide credentials // Value. A provider is required to manage its own Expired state, and what to // be expired means. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index 0ed791be641..43d4ed386ab 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" ) @@ -142,7 +143,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { } if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) } return credsList, nil @@ -164,7 +166,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred respCreds := ec2RoleCredRespBody{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index ace51313820..1a7af53a4da 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -39,6 +39,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) // ProviderName is the name of the credentials provider. @@ -97,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin return p } -// NewCredentialsClient returns a Credentials wrapper for retrieving credentials -// from an arbitrary endpoint concurrently. The client will request the +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) } @@ -174,7 +175,7 @@ func unmarshalHandler(r *request.Request) { out := r.Data.(*getCredentialsOutput) if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode endpoint credentials", err, ) @@ -185,11 +186,15 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() var errOut errorOutput - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, ) + return } // Response body format is not consistent between metadata endpoints. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index b6dbfd2467d..2e528d130d4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -200,7 +200,7 @@ type AssumeRoleProvider struct { // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must // have a value between 0 and 1. Any other value may lead to expected behavior. // With a MaxJitterFrac value of 0, default) will no jitter will be used. - // + // // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the // AssumeRole call will be made with an arbitrary Duration between 27m and // 30m. @@ -258,7 +258,6 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000000..b20b6339484 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,100 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + + client stsiface.STSAPI + ExpiryWindow time.Duration + + tokenFilePath string + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFilePath: path, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + b, err := ioutil.ReadFile(p.tokenFilePath) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + }) + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go index 152d785b362..25a66d1dda2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -1,30 +1,61 @@ -// Package csm provides Client Side Monitoring (CSM) which enables sending metrics -// via UDP connection. Using the Start function will enable the reporting of -// metrics on a given port. If Start is called, with different parameters, again, -// a panic will occur. +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. // -// Pause can be called to pause any metrics publishing on a given port. Sessions -// that have had their handlers modified via InjectHandlers may still be used. -// However, the handlers will act as a no-op meaning no metrics will be published. +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. // -// Example: // r, err := csm.Start("clientID", ":31000") // if err != nil { // panic(fmt.Errorf("failed starting CSM: %v", err)) // } // +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// // sess, err := session.NewSession(&aws.Config{}) // if err != nil { // panic(fmt.Errorf("failed loading session: %v", err)) // } // +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. // r.InjectHandlers(&sess.Handlers) // -// client := s3.New(sess) -// resp, err := client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() // // // Will pause monitoring // r.Pause() @@ -35,12 +66,4 @@ // // // Resume monitoring // r.Continue() -// -// Start returns a Reporter that is used to enable or disable monitoring. If -// access to the Reporter is required later, calling Get will return the Reporter -// singleton. -// -// Example: -// r := csm.Get() -// r.Continue() package csm diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go index 2f0c6eac9a8..4b19e2800e3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -2,6 +2,7 @@ package csm import ( "fmt" + "strings" "sync" ) @@ -9,19 +10,40 @@ var ( lock sync.Mutex ) -// Client side metric handler names const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" ) -// Start will start the a long running go routine to capture +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture // client side metrics. Calling start multiple time will only // start the metric listener once and will panic if a different // client ID or port is passed in. // -// Example: -// r, err := csm.Start("clientID", "127.0.0.1:8094") +// r, err := csm.Start("clientID", "127.0.0.1:31000") // if err != nil { // panic(fmt.Errorf("expected no error, but received %v", err)) // } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go index 514fc3739a5..82a3e345e93 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -16,25 +16,26 @@ var ( type metricChan struct { ch chan metric - paused int64 + paused *int64 } func newMetricChan(size int) metricChan { return metricChan{ - ch: make(chan metric, size), + ch: make(chan metric, size), + paused: new(int64), } } func (ch *metricChan) Pause() { - atomic.StoreInt64(&ch.paused, pausedEnum) + atomic.StoreInt64(ch.paused, pausedEnum) } func (ch *metricChan) Continue() { - atomic.StoreInt64(&ch.paused, runningEnum) + atomic.StoreInt64(ch.paused, runningEnum) } func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(&ch.paused) + v := atomic.LoadInt64(ch.paused) return v == pausedEnum } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index 0b5571acfbf..9186587fc04 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -10,11 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -const ( - // DefaultPort is used when no port is specified - DefaultPort = "31000" -) - // Reporter will gather metrics of API requests made and // send those metrics to the CSM endpoint. type Reporter struct { @@ -71,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { XAmzRequestID: aws.String(r.RequestID), - AttemptCount: aws.Int(r.RetryCount + 1), AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), AccessKey: aws.String(creds.AccessKeyID), } @@ -96,7 +90,7 @@ func getMetricException(err awserr.Error) metricException { switch code { case "RequestError", - "SerializationError", + request.ErrCodeSerialization, request.CanceledErrorCode: return sdkException{ requestException{exception: code, message: msg}, @@ -123,7 +117,7 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) { Type: aws.String("ApiCall"), AttemptCount: aws.Int(r.RetryCount + 1), Region: r.Config.Region, - Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), XAmzRequestID: aws.String(r.RequestID), MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), } @@ -190,8 +184,9 @@ func (rep *Reporter) start() { } } -// Pause will pause the metric channel preventing any new metrics from -// being added. +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. func (rep *Reporter) Pause() { lock.Lock() defer lock.Unlock() @@ -203,8 +198,9 @@ func (rep *Reporter) Pause() { rep.close() } -// Continue will reopen the metric channel and allow for monitoring -// to be resumed. +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. func (rep *Reporter) Continue() { lock.Lock() defer lock.Unlock() @@ -219,10 +215,18 @@ func (rep *Reporter) Continue() { rep.metricsCh.Continue() } +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + // InjectHandlers will will enable client side metrics and inject the proper // handlers to handle how metrics are sent. // -// Example: +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// // // Start must be called in order to inject the correct handlers // r, err := csm.Start("clientID", "127.0.0.1:8094") // if err != nil { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index d57a1af5992..d126764ce4e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -82,7 +82,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument doc := EC2InstanceIdentityDocument{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 instance identity document", err) } @@ -101,7 +101,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { info := EC2IAMInfo{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { return EC2IAMInfo{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 IAM info", err) } @@ -152,18 +152,19 @@ type EC2IAMInfo struct { // An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index f4438eae9c9..4c5636e3500 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -123,7 +123,7 @@ func unmarshalHandler(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err) return } @@ -136,7 +136,7 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err) return } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 87b9ff3ffec..343a2106f81 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -83,6 +83,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol p := &ps[i] custAddEC2Metadata(p) custAddS3DualStack(p) + custRegionalS3(p) custRmIotDataService(p) custFixAppAutoscalingChina(p) custFixAppAutoscalingUsGov(p) @@ -100,6 +101,33 @@ func custAddS3DualStack(p *partition) { custAddDualstack(p, "s3-control") } +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + func custAddDualstack(p *partition, svcName string) { s, ok := p.Services[svcName] if !ok { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index a7c5fc669e6..de07715d57a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -11,6 +11,8 @@ const ( AwsPartitionID = "aws" // AWS Standard partition. AwsCnPartitionID = "aws-cn" // AWS China partition. AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. ) // AWS Standard partition's regions. @@ -27,6 +29,7 @@ const ( EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). EuWest3RegionID = "eu-west-3" // EU (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -46,8 +49,18 @@ const ( UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). ) +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -55,7 +68,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -69,6 +82,8 @@ var defaultPartitions = partitions{ awsPartition, awscnPartition, awsusgovPartition, + awsisoPartition, + awsisobPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -82,7 +97,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") return reg }(), }, @@ -128,6 +143,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "EU (Paris)", }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -166,6 +184,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -178,6 +197,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -189,6 +209,8 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -270,6 +292,12 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, "sa-east-1": endpoint{ Hostname: "api.ecr.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -308,6 +336,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -327,6 +356,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -334,8 +364,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", @@ -381,6 +415,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -409,6 +444,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -416,6 +452,25 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "appmesh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "appstream2": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -430,8 +485,14 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "appsync": service{ @@ -453,6 +514,7 @@ var awsPartition = partition{ "athena": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -460,10 +522,14 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -484,6 +550,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -515,9 +582,33 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "backup": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -529,6 +620,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -584,6 +676,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -619,6 +712,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -662,6 +756,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -673,6 +768,8 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -709,6 +806,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -719,6 +817,7 @@ var awsPartition = partition{ "codebuild": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -726,9 +825,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -770,6 +871,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -779,11 +881,12 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "codedeploy": service{ @@ -801,6 +904,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -842,6 +946,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -938,10 +1043,13 @@ var awsPartition = partition{ "comprehendmedical": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "config": service{ @@ -959,6 +1067,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -966,6 +1075,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cur": service{ Endpoints: endpoints{ @@ -985,6 +1104,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "datapipeline": service{ Endpoints: endpoints{ @@ -1002,12 +1137,40 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dax": service{ @@ -1019,6 +1182,8 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1035,6 +1200,7 @@ var awsPartition = partition{ "directconnect": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1046,6 +1212,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1074,6 +1241,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1096,6 +1264,24 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1108,6 +1294,18 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1131,6 +1329,7 @@ var awsPartition = partition{ "ds": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1138,6 +1337,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, @@ -1159,11 +1359,17 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1171,11 +1377,36 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "ec2": service{ @@ -1195,6 +1426,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1228,6 +1460,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1256,16 +1489,18 @@ var awsPartition = partition{ Region: "us-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticbeanstalk": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1277,6 +1512,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1289,12 +1525,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1318,6 +1556,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1345,6 +1584,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", @@ -1409,11 +1649,12 @@ var awsPartition = partition{ Region: "us-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "events": service{ @@ -1431,6 +1672,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1441,6 +1683,7 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1448,9 +1691,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1465,53 +1710,85 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "fsx": service{ + "forecast": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "gamelift": service{ + "forecastquery": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "fsx": service{ + Endpoints: endpoints{ - "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1523,6 +1800,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1533,6 +1811,7 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1540,9 +1819,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1556,19 +1838,32 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, + "groundstation": service{ + + Endpoints: endpoints{ + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "guardduty": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1576,14 +1871,40 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "health": service{ @@ -1628,7 +1949,9 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1642,16 +1965,23 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1666,6 +1996,123 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -1681,6 +2128,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1691,11 +2139,19 @@ var awsPartition = partition{ "kinesisanalytics": service{ Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "kinesisvideo": service{ @@ -1712,12 +2168,6 @@ var awsPartition = partition{ "kms": service{ Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1730,6 +2180,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1737,6 +2188,19 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -1752,6 +2216,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1762,16 +2227,23 @@ var awsPartition = partition{ "license-manager": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1808,6 +2280,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1837,6 +2310,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1876,9 +2350,13 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1892,6 +2370,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -1931,6 +2410,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1979,6 +2459,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1991,26 +2472,53 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - "us-east-1": endpoint{}, - }, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, }, "neptune": service{ @@ -2045,12 +2553,24 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, "eu-west-1": endpoint{ Hostname: "rds.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2063,6 +2583,12 @@ var awsPartition = partition{ Region: "eu-west-2", }, }, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2083,6 +2609,65 @@ var awsPartition = partition{ }, }, }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "opsworks": service{ Endpoints: endpoints{ @@ -2166,6 +2751,107 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ @@ -2181,6 +2867,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", @@ -2205,6 +2892,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2218,10 +2906,14 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2240,19 +2932,47 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "robomaker": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2311,6 +3031,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2318,16 +3039,44 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "s3": service{ - PartitionEndpoint: "us-east-1", + PartitionEndpoint: "aws-global", IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -2352,6 +3101,12 @@ var awsPartition = partition{ Hostname: "s3.ap-southeast-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, @@ -2359,8 +3114,9 @@ var awsPartition = partition{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -2372,10 +3128,7 @@ var awsPartition = partition{ Hostname: "s3.sa-east-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, + "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{ Hostname: "s3.us-west-1.amazonaws.com", @@ -2538,6 +3291,19 @@ var awsPartition = partition{ }, }, }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "sdb": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -2559,6 +3325,7 @@ var awsPartition = partition{ "secretsmanager": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2570,6 +3337,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -2604,6 +3372,7 @@ var awsPartition = partition{ "securityhub": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2611,9 +3380,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2626,6 +3397,9 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, "ap-northeast-1": endpoint{ Protocols: []string{"https"}, }, @@ -2659,6 +3433,9 @@ var awsPartition = partition{ "eu-west-3": endpoint{ Protocols: []string{"https"}, }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, "sa-east-1": endpoint{ Protocols: []string{"https"}, }, @@ -2724,6 +3501,7 @@ var awsPartition = partition{ "servicediscovery": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2731,9 +3509,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2741,6 +3521,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ @@ -2754,6 +3544,7 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2765,6 +3556,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2776,6 +3568,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2808,6 +3601,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2857,7 +3651,8 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", }, @@ -2881,6 +3676,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2903,6 +3699,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2913,6 +3710,7 @@ var awsPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2924,6 +3722,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2945,11 +3744,17 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -2957,47 +3762,63 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "sts": service{ PartitionEndpoint: "aws-global", - Defaults: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "sts.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "sts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3028,9 +3849,15 @@ var awsPartition = partition{ }, }, "support": service{ + PartitionEndpoint: "aws-global", Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "swf": service{ @@ -3048,6 +3875,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3070,6 +3898,30 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3077,6 +3929,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "transfer": service{ Endpoints: endpoints{ @@ -3087,9 +3950,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3101,7 +3966,11 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, @@ -3145,12 +4014,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3197,6 +4070,7 @@ var awsPartition = partition{ "xray": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3208,6 +4082,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3293,6 +4168,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "cloudformation": service{ Endpoints: endpoints{ @@ -3348,6 +4230,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -3473,6 +4361,21 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "glue": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -3493,7 +4396,8 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "kinesis": service{ @@ -3503,6 +4407,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -3510,6 +4421,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -3660,6 +4578,18 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, "swf": service{ Endpoints: endpoints{ @@ -3674,6 +4604,31 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, }, } @@ -3713,6 +4668,15 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "api.ecr": service{ Endpoints: endpoints{ @@ -3755,13 +4719,31 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "athena": service{ - + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, - "autoscaling": service{ + "athena": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, @@ -3807,9 +4789,17 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "codecommit": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3847,6 +4837,18 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -3872,6 +4874,12 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", @@ -3973,6 +4981,7 @@ var awsusgovPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3987,6 +4996,16 @@ var awsusgovPartition = partition{ }, "glue": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, @@ -4000,6 +5019,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "health": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -4084,6 +5109,7 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4094,6 +5120,23 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -4113,6 +5156,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ @@ -4133,6 +5183,45 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "runtime.sagemaker": service{ Endpoints: endpoints{ @@ -4196,6 +5285,50 @@ var awsusgovPartition = partition{ }, }, }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -4257,6 +5390,12 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", @@ -4287,6 +5426,14 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "translate": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -4315,3 +5462,630 @@ var awsusgovPartition = partition{ }, }, } + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index f82babf6f95..1f53d9cb686 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -3,6 +3,7 @@ package endpoints import ( "fmt" "regexp" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" ) @@ -46,6 +47,108 @@ type Options struct { // // This option is ignored if StrictMatching is enabled. ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } } // Set combines all of the option functions together. @@ -79,6 +182,12 @@ func ResolveUnknownServiceOption(o *Options) { o.ResolveUnknownService = true } +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + // A Resolver provides the interface for functionality to resolve endpoints. // The build in Partition and DefaultResolver return value satisfy this interface. type Resolver interface { @@ -170,10 +279,13 @@ func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { - id string - p *partition + id, dnsSuffix string + p *partition } +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + // ID returns the identifier of the partition. func (p Partition) ID() string { return p.id } @@ -191,7 +303,7 @@ func (p Partition) ID() string { return p.id } // require the provided service and region to be known by the partition. // If the endpoint cannot be strictly resolved an error will be returned. This // mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching enabled the endpoint returned may look valid but may not work. // StrictMatching requires the SDK to be updated if you want to take advantage // of new regions and services expansions. // @@ -347,6 +459,9 @@ type ResolvedEndpoint struct { // The endpoint URL URL string + // The endpoint partition + PartitionID string + // The region that should be used for signing requests. SigningRegion string diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 00000000000..df75e899adb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index ff6f76db6eb..eb2ac83c992 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -54,8 +54,9 @@ type partition struct { func (p partition) Partition() Partition { return Partition{ - id: p.ID, - p: &p, + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, } } @@ -74,24 +75,56 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool) return p.RegionRegex.MatchString(region) } +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { var opt Options opt.Set(opts...) s, hasService := p.Services[service] - if !(hasService || opt.ResolveUnknownService) { + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { // Only return error if the resolver will not fallback to creating // endpoint based on service endpoint ID passed in. return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) } + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + e, hasEndpoint := s.endpointForRegion(region) - if !hasEndpoint && opt.StrictMatching { + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) } defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, region, p.DNSSuffix, defs, opt), nil + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil } func serviceList(ss services) []string { @@ -200,7 +233,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -208,11 +241,23 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op merged.mergeIn(e) e = merged - hostname := e.Hostname + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname // Offset the hostname for dualstack if enabled if opts.UseDualStack && e.HasDualStack == boxedTrue { hostname = e.DualStackHostname + region = signingRegion } u := strings.Replace(hostname, "{service}", service, 1) @@ -222,20 +267,9 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) u = fmt.Sprintf("%s://%s", scheme, u) - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - return ResolvedEndpoint{ URL: u, + PartitionID: partitionID, SigningRegion: signingRegion, SigningName: signingName, SigningNameDerived: signingNameDerived, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go index 271da432ce1..d9b37f4d32a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -1,18 +1,17 @@ -// +build !appengine,!plan9 - package request import ( - "net" - "os" - "syscall" + "strings" ) func isErrConnectionReset(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true } return false diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go deleted file mode 100644 index daf9eca4373..00000000000 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine plan9 - -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - return strings.Contains(err.Error(), "connection reset") -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 8ef8548a96d..185b073181e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -23,7 +23,7 @@ type Handlers struct { Complete HandlerList } -// Copy returns of this handler's lists. +// Copy returns a copy of this handler's lists. func (h *Handlers) Copy() Handlers { return Handlers{ Validate: h.Validate.copy(), @@ -42,7 +42,7 @@ func (h *Handlers) Copy() Handlers { } } -// Clear removes callback functions for all handlers +// Clear removes callback functions for all handlers. func (h *Handlers) Clear() { h.Validate.Clear() h.Build.Clear() @@ -59,6 +59,51 @@ func (h *Handlers) Clear() { h.Complete.Clear() } +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + // A HandlerListRunItem represents an entry in the HandlerList which // is being run. type HandlerListRunItem struct { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index b0c2ef4fe67..9370fa50c38 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -15,12 +15,15 @@ type offsetReader struct { closed bool } -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { reader := &offsetReader{} - buf.Seek(offset, sdkio.SeekStart) + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } reader.buf = buf - return reader + return reader, nil } // Close will close the instance of the offset reader's access to @@ -54,7 +57,9 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } return newOffsetReader(o.buf, offset) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 8f2eb3e43c5..52178141da6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -64,6 +64,15 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + // A value greater than 0 instructs the request to be signed as Presigned URL // You should not set this field directly. Instead use Request's // Presign or PresignRequest methods. @@ -90,8 +99,12 @@ type Operation struct { BeforePresignFn func(r *Request) error } -// New returns a new Request pointer for the service API -// operation and parameters. +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. // // Params is any value of input parameters to be the request payload. // Data is pointer value to an object which the request's response @@ -99,6 +112,10 @@ type Operation struct { func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + if retryer == nil { + retryer = noOpRetryer{} + } + method := operation.HTTPMethod if method == "" { method = "POST" @@ -231,6 +248,10 @@ func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + // ParamsFilled returns if the request's parameters have been populated // and the parameters are valid. False is returned if no parameters are // provided or invalid. @@ -259,7 +280,18 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader - r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } r.ResetBody() } @@ -330,16 +362,15 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } -func debugLogReqError(r *Request, stage string, retrying bool, err error) { +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return } - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) } @@ -358,12 +389,12 @@ func (r *Request) Build() error { if !r.built { r.Handlers.Validate.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) + debugLogReqError(r, "Validate Request", notRetrying, r.Error) return r.Error } r.Handlers.Build.Run(r) if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } r.built = true @@ -379,7 +410,7 @@ func (r *Request) Build() error { func (r *Request) Sign() error { r.Build() if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } @@ -387,12 +418,16 @@ func (r *Request) Sign() error { return r.Error } -func (r *Request) getNextRequestBody() (io.ReadCloser, error) { +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { if r.safeBody != nil { r.safeBody.Close() } - r.safeBody = newOffsetReader(r.Body, r.BodyStart) + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } // Go 1.8 tightened and clarified the rules code needs to use when building // requests with the http package. Go 1.8 removed the automatic detection @@ -409,10 +444,10 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Related golang/go#18257 l, err := aws.SeekerLen(r.Body) if err != nil { - return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) } - var body io.ReadCloser if l == 0 { body = NoBody } else if l > 0 { @@ -473,29 +508,28 @@ func (r *Request) Send() error { r.AttemptTime = time.Now() if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", false, err) + debugLogReqError(r, "Sign Request", notRetrying, err) return err } if err := r.sendRequest(); err == nil { return nil - } else if !shouldRetryCancel(r.Error) { - return err - } else { - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } - r.prepareRetry() - continue + if err := r.prepareRetry(); err != nil { + r.Error = err + return err } } } -func (r *Request) prepareRetry() { +func (r *Request) prepareRetry() error { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) @@ -506,12 +540,19 @@ func (r *Request) prepareRetry() { // the request's body even though the Client's Do returned. r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } // Closing response body to ensure that no response body is leaked // between retry attempts. if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { r.HTTPResponse.Body.Close() } + + return nil } func (r *Request) sendRequest() (sendErr error) { @@ -520,7 +561,9 @@ func (r *Request) sendRequest() (sendErr error) { r.Retryable = nil r.Handlers.Send.Run(r) if r.Error != nil { - debugLogReqError(r, "Send Request", r.WillRetry(), r.Error) + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } @@ -528,13 +571,17 @@ func (r *Request) sendRequest() (sendErr error) { r.Handlers.ValidateResponse.Run(r) if r.Error != nil { r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } r.Handlers.Unmarshal.Run(r) if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error) + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } @@ -561,48 +608,6 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -type temporary interface { - Temporary() bool -} - -func shouldRetryCancel(err error) bool { - switch err := err.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - return shouldRetryCancel(err.OrigErr()) - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryCancel(err.Err) - case temporary: - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retriable. See - // TestRequest4xxUnretryable for an example. - return true - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - // SanitizeHostForHeader removes default port from host and updates request.Host func SanitizeHostForHeader(r *http.Request) { host := getHost(r) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index 7c6a8000f67..de1292f45a2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -4,6 +4,8 @@ package request import ( "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" ) // NoBody is a http.NoBody reader instructing Go HTTP client to not include @@ -24,7 +26,8 @@ var NoBody = http.NoBody func (r *Request) ResetBody() { body, err := r.getNextRequestBody() if err != nil { - r.Error = err + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) return } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index a633ed5acfa..64784e16f3d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -17,11 +17,13 @@ import ( // does the pagination between API operations, and Paginator defines the // configuration that will be used per page request. // -// cont := true -// for p.Next() && cont { +// for p.Next() { // data := p.Page().(*s3.ListObjectsOutput) // // process the page's data +// // ... +// // break out of loop to stop fetching additional pages // } +// // return p.Err() // // See service client API operation Pages methods for examples how the SDK will @@ -146,7 +148,7 @@ func (r *Request) nextPageTokens() []interface{} { return nil } case bool: - if v == false { + if !v { return nil } } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index d0aa54c6d10..8015acc67eb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -1,26 +1,75 @@ package request import ( + "net" + "net/url" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" ) -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. MaxRetries() int } -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } cfg.Retryer = retryer return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 } // retryableCodes is a collection of service response codes which are retry-able @@ -76,10 +125,6 @@ var validParentCodes = map[string]struct{}{ ErrCodeRead: {}, } -type temporaryError interface { - Temporary() bool -} - func isNestedErrorRetryable(parentErr awserr.Error) bool { if parentErr == nil { return false @@ -98,7 +143,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { return isCodeRetryable(aerr.Code()) } - if t, ok := err.(temporaryError); ok { + if t, ok := err.(temporary); ok { return t.Temporary() || isErrConnectionReset(err) } @@ -108,32 +153,90 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry := shouldRetryError(origErr) + if err.Code() == "RequestError" && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true } - return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. // Returns false if error is nil. func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) } return false } -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) } return false } @@ -143,17 +246,58 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } return IsErrorRetryable(r.Error) } -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. // // Alias for the utility function IsErrorThrottle func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + return IsErrorThrottle(r.Error) } +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000000..cc64e24f1d5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,259 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + opt.Duration = sessOpts.AssumeRoleDuration + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 38a7b05a621..7ec66e7e589 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -1,97 +1,93 @@ /* -Package session provides configuration for the SDK's service clients. - -Sessions can be shared across all service clients that share the same base -configuration. The Session is built from the SDK's default configuration and -request handlers. - -Sessions should be cached when possible, because creating a new Session will -load all configuration values from the environment, and config files each time -the Session is created. Sharing the Session value across all of your service -clients will ensure the configuration is loaded the fewest number of times possible. - -Concurrency +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. Sessions are safe to use concurrently as long as the Session is not being -modified. The SDK will not modify the Session once the Session has been created. -Creating service clients concurrently from a shared Session is safe. - -Sessions from Shared Config - -Sessions can be created using the method above that will only load the -additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. -Alternatively you can explicitly create a Session with shared config enabled. -To do this you can use NewSessionWithOptions to configure how the Session will -be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG -environment variable was set. +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. -Creating Sessions - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. +Sessions options from Shared Config By default NewSession will only load credentials from the shared credentials file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value the Session will be created from the configuration values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. See the section Sessions from Shared Config for -more information. +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. -Create a Session with the default config and request handlers. With credentials -region, and profile loaded from the environment and shared config automatically. -Requires the AWS_PROFILE to be set, or "default" is used. +Credential and config loading order - // Create Session - sess := session.Must(session.NewSession()) +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: - // Create a Session with a custom region - sess := session.Must(session.NewSession(&aws.Config{ - Region: aws.String("us-east-1"), - })) + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) - // Create a S3 client instance from a session - sess := session.Must(session.NewSession()) +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) - svc := s3.New(sess) +Creating Sessions -Create Session With Option Overrides +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. -In addition to NewSession, Sessions can be created using NewSessionWithOptions. -This func allows you to control and override how the Session will be created -through code instead of being driven by environment variables only. + // Create Session + sess, err := session.NewSession() -Use NewSessionWithOptions when you want to provide the config profile, or -override the shared config state (AWS_SDK_LOAD_CONFIG). + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). // Equivalent to session.NewSession() - sess := session.Must(session.NewSessionWithOptions(session.Options{ + sess, err := session.NewSessionWithOptions(session.Options{ // Options - })) + }) - // Specify profile to load for the session's config - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Profile: "profile_name", - })) + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", - // Specify profile for config and region for requests - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Config: aws.Config{Region: aws.String("us-east-1")}, - Profile: "profile_name", - })) + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, - // Force enable Shared Config support - sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Force enable Shared Config support SharedConfigState: session.SharedConfigEnable, - })) + }) Adding Handlers -You can add handlers to a session for processing HTTP requests. All service -clients that use the session inherit the handlers. For example, the following -handler logs every request and its payload made by a service client: +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. @@ -99,22 +95,15 @@ handler logs every request and its payload made by a service client: sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload - logger.Printf("Request: %s/%s, Payload: %s", + logger.Printf("Request: %s/%s, Params: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) }) -Deprecated "New" function - -The New session function has been deprecated because it does not provide good -way to return errors that occur when loading the configuration files and values. -Because of this, NewSession was created so errors can be retrieved when -creating a session fails. - Shared Config Fields -By default the SDK will only load the shared credentials file's (~/.aws/credentials) -credentials values, and all other config is provided by the environment variables, -SDK defaults, and user provided aws.Config values. +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable option is used to create the Session the full shared config values will be @@ -125,24 +114,31 @@ files have the same format. If both config files are present the configuration from both files will be read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). -Credentials are the values the SDK should use for authenticating requests with -AWS Services. They are from a configuration file will need to include both -aws_access_key_id and aws_secret_access_key must be provided together in the -same file to be considered valid. The values will be ignored if not a complete -group. aws_session_token is an optional field that can be provided if both of -the other two fields are also provided. +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. aws_access_key_id = AKID aws_secret_access_key = SECRET aws_session_token = TOKEN -Assume Role values allow you to configure the SDK to assume an IAM role using -a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK supports assuming -a role with MFA token if the session option AssumeRoleTokenProvider -is set. + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". role_arn = arn:aws:iam:::role/ source_profile = profile_with_creds @@ -150,40 +146,16 @@ is set. mfa_serial = role_session_name = session_name -Region is the region the SDK should use for looking up AWS service endpoints -and signing requests. - - region = us-east-1 - -Assume Role with MFA token -To create a session with support for assuming an IAM role with MFA set the -session option AssumeRoleTokenProvider to a function that will prompt for the -MFA token code when the SDK assumes the role and refreshes the role's credentials. -This allows you to configure the SDK via the shared config to assumea role -with MFA tokens. - -In order for the SDK to assume a role with MFA the SharedConfigState -session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG -environment variable set. - -The shared configuration instructs the SDK to assume an IAM role with MFA -when the mfa_serial configuration field is set in the shared config -(~/.aws/config) or shared credentials (~/.aws/credentials) file. - -If mfa_serial is set in the configuration, the SDK will assume the role, and -the AssumeRoleTokenProvider session option is not set an an error will -be returned when creating the session. +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. sess := session.Must(session.NewSessionWithOptions(session.Options{ AssumeRoleTokenProvider: stscreds.StdinTokenProvider, })) - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess) - -To setup assume role outside of a session see the stscreds.AssumeRoleProvider +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider documentation. Environment Variables diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index e3959b959ef..4092ab8fb7e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -1,12 +1,14 @@ package session import ( + "fmt" "os" "strconv" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" ) // EnvProviderName provides a name of the provider when config is loaded from environment. @@ -99,21 +101,55 @@ type envConfig struct { CustomCABundle string csmEnabled string - CSMEnabled bool + CSMEnabled *bool CSMPort string + CSMHost string CSMClientID string - enableEndpointDiscovery string // Enables endpoint discovery via environment variables. // // AWS_ENABLE_ENDPOINT_DISCOVERY=true EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } var ( csmEnabledEnvKey = []string{ "AWS_CSM_ENABLED", } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } csmPortEnvKey = []string{ "AWS_CSM_PORT", } @@ -150,6 +186,21 @@ var ( sharedConfigFileEnvKey = []string{ "AWS_CONFIG_FILE", } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -158,7 +209,7 @@ var ( // If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value // the shared SDK config will be loaded in addition to the SDK's specific // configuration values. -func loadEnvConfig() envConfig { +func loadEnvConfig() (envConfig, error) { enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) return envConfigLoad(enableSharedConfig) } @@ -169,30 +220,42 @@ func loadEnvConfig() envConfig { // Loads the shared configuration in addition to the SDK's specific configuration. // This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` // environment variable is set. -func loadSharedEnvConfig() envConfig { +func loadSharedEnvConfig() (envConfig, error) { return envConfigLoad(true) } -func envConfigLoad(enableSharedConfig bool) envConfig { +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { cfg := envConfig{} cfg.EnableSharedConfig = enableSharedConfig - setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) // CSM environment variables setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - cfg.CSMEnabled = len(cfg.csmEnabled) > 0 - // Require logical grouping of credentials - if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { - cfg.Creds = credentials.Value{} - } else { - cfg.Creds.ProviderName = EnvProviderName + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v } regionKeys := regionEnvKeys @@ -223,12 +286,33 @@ func envConfigLoad(enableSharedConfig bool) envConfig { cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - return cfg + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + return cfg, nil } func setFromEnvVal(dst *string, keys []string) { for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { + if v := os.Getenv(k); len(v) != 0 { *dst = v break } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index be4b5f07772..ab6daac7c30 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -8,19 +8,17 @@ import ( "io/ioutil" "net/http" "os" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/processcreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" ) const ( @@ -75,7 +73,7 @@ type Session struct { // func is called instead of waiting to receive an error until a request is made. func New(cfgs ...*aws.Config) *Session { // load initial config from environment - envCfg := loadEnvConfig() + envCfg, envErr := loadEnvConfig() if envCfg.EnableSharedConfig { var cfg aws.Config @@ -95,19 +93,28 @@ func New(cfgs ...*aws.Config) *Session { // Session creation failed, need to report the error and prevent // any requests from succeeding. s = &Session{Config: defaults.Config()} - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + s.logDeprecatedNewSessionError(msg, err, cfgs) } return s } s := deprecatedNewSession(cfgs...) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } } return s @@ -126,7 +133,7 @@ func New(cfgs ...*aws.Config) *Session { // to be built with retrieving credentials with AssumeRole set in the config. // // See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created. Such as specifying the +// control through code how the Session will be created, such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { opts := Options{} @@ -210,6 +217,12 @@ type Options struct { // the config enables assume role wit MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + // Reader for a custom Credentials Authority (CA) bundle in PEM format that // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. @@ -224,6 +237,12 @@ type Options struct { // to also enable this feature. CustomCABundle session option field has priority // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -257,13 +276,20 @@ type Options struct { // })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig + var err error if opts.SharedConfigState == SharedConfigEnable { - envCfg = loadSharedEnvConfig() + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } } else { - envCfg = loadEnvConfig() + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } } - if len(opts.Profile) > 0 { + if len(opts.Profile) != 0 { envCfg.Profile = opts.Profile } @@ -329,27 +355,33 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { return s } -func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { - logger.Log("Enabling CSM") - if len(port) == 0 { - port = csm.DefaultPort +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") } - r, err := csm.Start(clientID, "127.0.0.1:"+port) + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) if err != nil { - return + return err } r.InjectHandlers(handlers) + + return nil } func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() - handlers := defaults.Handlers() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } // Get a merged version of the user provided config to determine if // credentials were. userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) // Ordered config files will be loaded in with later files overwriting // previous config file values. @@ -366,9 +398,17 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - return nil, err + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } } if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { @@ -381,8 +421,16 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } } // Setup HTTP client with custom cert bundle if enabled @@ -395,6 +443,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return s, nil } +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { @@ -443,9 +531,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) { return p, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { - // Merge in user provided configuration - cfg.MergeIn(userCfg) +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { // Region if not already set by user if len(aws.StringValue(cfg.Region)) == 0 { @@ -464,162 +554,51 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } - // Configure credentials if not already set + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + // Configure credentials if not already set by the user when creating the + // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - - // inspect the profile to see if a credential source has been specified. - if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { - - // if both credential_source and source_profile have been set, return an error - // as this is undefined behavior. - if len(sharedCfg.AssumeRole.SourceProfile) > 0 { - return ErrSharedConfigSourceCollision - } - - // valid credential source values - const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" - ) - - switch sharedCfg.AssumeRole.CredentialSource { - case credSourceEc2Metadata: - cfgCp := *cfg - p := defaults.RemoteCredProvider(cfgCp, handlers) - cfgCp.Credentials = credentials.NewCredentials(p) - - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - - cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - case credSourceEnvironment: - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return ErrSharedConfigECSContainerEnvVarEmpty - } - - cfgCp := *cfg - p := defaults.RemoteCredProvider(cfgCp, handlers) - creds := credentials.NewCredentials(p) - - cfg.Credentials = creds - default: - return ErrSharedConfigInvalidCredSource - } - - return nil - } - - if len(envCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { - cfgCp := *cfg - cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.AssumeRoleSource.Creds, - ) - - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - - cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - } else if len(sharedCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - } else if len(sharedCfg.CredentialProcess) > 0 { - cfg.Credentials = processcreds.NewCredentials( - sharedCfg.CredentialProcess, - ) - } else { - // Fallback to default credentials provider, include mock errors - // for the credential chain so user can identify why credentials - // failed to be retrieved. - cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, - &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err } + cfg.Credentials = creds } return nil } -func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials { - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - - // Assume role with external ID - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.AssumeRole.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ) -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the -// MFAToken option is not set when shared config is configured load assume a -// role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } } -var emptyCreds = credentials.Value{} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } } func initHandlers(s *Session) { @@ -630,7 +609,7 @@ func initHandlers(s *Session) { } } -// Copy creates and returns a copy of the current Session, coping the config +// Copy creates and returns a copy of the current Session, copying the config // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // @@ -650,37 +629,15 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - // Backwards compatibility, the error will be eaten if user calls ClientConfig - // directly. All SDK services will use ClientconfigWithError. - cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) - - return cfg -} - -func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) - var resolved endpoints.ResolvedEndpoint - var err error - region := aws.StringValue(s.Config.Region) - - if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { - resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region - } else { - resolved, err = s.Config.EndpointResolver.EndpointFor( - serviceName, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) - opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil && s.Config.Logger != nil { + s.Config.Logger.Log(fmt.Sprintf( + "ERROR: unable to resolve endpoint for service %q, region %q, err: %v", + service, region, err)) } return client.Config{ @@ -690,7 +647,42 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, - }, err + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil } // ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception @@ -700,12 +692,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf s = s.Copy(cfgs...) var resolved endpoints.ResolvedEndpoint - - region := aws.StringValue(s.Config.Region) - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region + resolved.SigningRegion = aws.StringValue(s.Config.Region) } return client.Config{ @@ -717,3 +706,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf SigningName: resolved.SigningName, } } + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 7cb44021b3f..1d7b049cf7c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/internal/ini" ) @@ -23,13 +23,29 @@ const ( mfaSerialKey = `mfa_serial` // optional roleSessionNameKey = `role_session_name` // optional + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + // Additional Config fields regionKey = `region` // endpoint discovery group enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // External Credential Process - credentialProcessKey = `credential_process` + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name @@ -37,36 +53,33 @@ const ( DefaultSharedConfigProfile = `default` ) -type assumeRoleConfig struct { - RoleARN string - SourceProfile string - CredentialSource string - ExternalID string - MFASerial string - RoleSessionName string -} - // sharedConfig represents the configuration fields of the SDK config files. type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. // // aws_access_key_id // aws_secret_access_key // aws_session_token Creds credentials.Value - AssumeRole assumeRoleConfig - AssumeRoleSource *sharedConfig + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string - // An external process to request credentials - CredentialProcess string + SourceProfileName string + SourceProfile *sharedConfig - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. // // region Region string @@ -76,6 +89,23 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } type sharedConfigFile struct { @@ -83,17 +113,18 @@ type sharedConfigFile struct { IniData ini.Sections } -// loadSharedConfig retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in // earlier files. // // For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. +// of the files are A then B, B's credential values will be used instead of +// A's. // // See sharedConfig.setFromFile for information how the config files // will be loaded. -func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { if len(profile) == 0 { profile = DefaultSharedConfigProfile } @@ -104,16 +135,11 @@ func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) } cfg := sharedConfig{} - if err = cfg.setFromIniFiles(profile, files); err != nil { + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { return sharedConfig{}, err } - if len(cfg.AssumeRole.SourceProfile) > 0 { - if err := cfg.setAssumeRoleSource(profile, files); err != nil { - return sharedConfig{}, err - } - } - return cfg, nil } @@ -137,60 +163,88 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { return files, nil } -func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { - var assumeRoleSrc sharedConfig - - if len(cfg.AssumeRole.CredentialSource) > 0 { - // setAssumeRoleSource is only called when source_profile is found. - // If both source_profile and credential_source are set, then - // ErrSharedConfigSourceCollision will be returned - return ErrSharedConfigSourceCollision +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr } - // Multiple level assume role chains are not support - if cfg.AssumeRole.SourceProfile == origProfile { - assumeRoleSrc = *cfg - assumeRoleSrc.AssumeRole = assumeRoleConfig{} + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() } else { - err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) - if err != nil { + // First time a profile has been seen, It must either be a assume role + // or credentials. Assert if the credential type requires a role ARN, + // the ARN is also set. + if err := cfg.validateCredentialsRequireARN(profile); err != nil { return err } } + profiles[profile] = struct{}{} - if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { - return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + if err := cfg.validateCredentialType(); err != nil { + return err } - cfg.AssumeRoleSource = &assumeRoleSrc + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() - return nil -} - -func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { - // Trim files from the list that don't exist. - for _, f := range files { - if err := cfg.setFromIniFile(profile, f); err != nil { + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore proviles missings - continue + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } } return err } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg } return nil } -// setFromFile loads the configuration from the file using -// the profile provided. A sharedConfig pointer type value is used so that -// multiple config file loadings can be chained. +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. // // Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { section, ok := file.IniData.GetSection(profile) if !ok { // Fallback to to alternate profile name: profile @@ -200,53 +254,160 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e } } - // Shared Credentials - akid := section.String(accessKeyIDKey) - secret := section.String(secretAccessKey) - if len(akid) > 0 && len(secret) > 0 { - cfg.Creds = credentials.Value{ - AccessKeyID: akid, - SecretAccessKey: secret, - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre } - } - // Assume Role - roleArn := section.String(roleArnKey) - srcProfile := section.String(sourceProfileKey) - credentialSource := section.String(credentialSourceKey) - hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 - if len(roleArn) > 0 && hasSource { - cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - CredentialSource: credentialSource, - ExternalID: section.String(externalIDKey), - MFASerial: section.String(mfaSerialKey), - RoleSessionName: section.String(roleSessionNameKey), + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre } } - // `credential_process` - if credProc := section.String(credentialProcessKey); len(credProc) > 0 { - cfg.CredentialProcess = credProc - } + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) - // Region - if v := section.String(regionKey); len(v) > 0 { - cfg.Region = v + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds } // Endpoint discovery - if section.Has(enableEndpointDiscoveryKey) { - v := section.Bool(enableEndpointDiscoveryKey) - cfg.EnableEndpointDiscovery = &v + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } } return nil } +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + // SharedConfigLoadError is an error for the shared config file failed to load. type SharedConfigLoadError struct { Filename string @@ -304,7 +465,8 @@ func (e SharedConfigProfileNotExistsError) Error() string { // profile contains assume role information, but that information is invalid // or not complete. type SharedConfigAssumeRoleError struct { - RoleARN string + RoleARN string + SourceProfile string } // Code is the short id of the error. @@ -314,8 +476,10 @@ func (e SharedConfigAssumeRoleError) Code() string { // Message is the description of the error func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", - e.RoleARN) + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) } // OrigErr is the underlying error that caused the failure. @@ -327,3 +491,36 @@ func (e SharedConfigAssumeRoleError) OrigErr() error { func (e SharedConfigAssumeRoleError) Error() string { return awserr.SprintError(e.Code(), e.Message(), "", nil) } + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 523db79f8d2..8104793aa5b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -687,7 +687,11 @@ func (ctx *signingCtx) buildBodyDigest() error { if !aws.IsReaderSeekable(ctx.Body) { return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) } - hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) } if includeSHA256Header { @@ -734,10 +738,16 @@ func makeSha256(data []byte) []byte { return hash.Sum(nil) } -func makeSha256Reader(reader io.ReadSeeker) []byte { +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { hash := sha256.New() - start, _ := reader.Seek(0, sdkio.SeekCurrent) - defer reader.Seek(start, sdkio.SeekStart) + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. @@ -748,7 +758,7 @@ func makeSha256Reader(reader io.ReadSeeker) []byte { io.CopyN(hash, reader, size) } - return hash.Sum(nil) + return hash.Sum(nil), nil } const doubleSpace = " " diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/types.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/types.go index 8b6f23425a6..455091540fd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -7,13 +7,18 @@ import ( "github.com/aws/aws-sdk-go/internal/sdkio" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. // -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -43,7 +48,8 @@ func IsReaderSeekable(r io.Reader) bool { // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. // // Performs the same functionality as io.Reader Read func (r ReaderSeekerCloser) Read(p []byte) (int, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/version.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/version.go index 7beac6a789e..b03cfb752dd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.19.26" +const SDKVersion = "1.25.35" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go index f99703372c4..cf9fad81e70 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -162,7 +162,7 @@ loop: if len(tokens) == 0 { break loop } - + // if should skip is true, we skip the tokens until should skip is set to false. step = SkipTokenState } @@ -218,7 +218,7 @@ loop: // S -> equal_expr' expr_stmt' switch k.Kind { case ASTKindEqualExpr: - // assiging a value to some key + // assigning a value to some key k.AppendChild(newExpression(tok)) stack.Push(newExprStatement(k)) case ASTKindExpr: @@ -250,6 +250,13 @@ loop: if !runeCompare(tok.Raw(), openBrace) { return nil, NewParseError("expected '['") } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } stmt := newStatement() stack.Push(stmt) @@ -304,7 +311,9 @@ loop: stmt := newCommentStatement(tok) stack.Push(stmt) default: - return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) } if len(tokens) > 0 { @@ -314,7 +323,7 @@ loop: // this occurs when a statement has not been completed if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) } // returns a sublist which excludes the start symbol diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go index 6bb6964475e..da7a4049cfa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -22,24 +22,24 @@ func newSkipper() skipper { } func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). if s.shouldSkip && s.prevTok.Type() == TokenNL && tok.Type() != TokenWS { - s.Continue() return false } s.prevTok = tok - return s.shouldSkip } func (s *skipper) Skip() { s.shouldSkip = true - s.prevTok = emptyToken } func (s *skipper) Continue() { s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false s.prevTok = emptyToken } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 00000000000..6c443988bbc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 00000000000..44898eed0fd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 00000000000..810ec7f08b0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 00000000000..f4651da2da5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 00000000000..b1d93a33d48 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go index 3104e6ce4c9..50c5ed76005 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go @@ -21,7 +21,8 @@ func Build(r *request.Request) { "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, true); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) + r.Error = awserr.New(request.ErrCodeSerialization, + "failed encoding EC2 Query request", err) } if !r.IsPresigned() { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go index 5793c047373..105d732f9d3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go @@ -4,7 +4,6 @@ package ec2query import ( "encoding/xml" - "io" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -28,7 +27,8 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding EC2 Query response", err), + awserr.New(request.ErrCodeSerialization, + "failed decoding EC2 Query response", err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -39,7 +39,11 @@ func Unmarshal(r *request.Request) { // UnmarshalMeta unmarshals response headers for the EC2 protocol. func UnmarshalMeta(r *request.Request) { - // TODO implement unmarshaling of request IDs + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } } type xmlErrorResponse struct { @@ -53,19 +57,21 @@ type xmlErrorResponse struct { func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { + var respErr xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding EC2 Query error response", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), r.HTTPResponse.StatusCode, r.RequestID, ) - } else { - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - resp.RequestID, - ) + return } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + respErr.RequestID, + ) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index b11f3ee45b5..ea0da79a5e0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -1,6 +1,7 @@ package jsonutil import ( + "bytes" "encoding/base64" "encoding/json" "fmt" @@ -9,9 +10,30 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/private/protocol" ) +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + // UnmarshalJSON reads a stream and unmarshals the results in object v. func UnmarshalJSON(v interface{}, stream io.Reader) error { var out interface{} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go index 36ceab088c0..bfedc9fd422 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -6,8 +6,6 @@ package jsonrpc //go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go import ( - "encoding/json" - "io" "strings" "github.com/aws/aws-sdk-go/aws/awserr" @@ -37,7 +35,7 @@ func Build(req *request.Request) { if req.ParamsFilled() { buf, err = jsonutil.BuildJSON(req.Params) if err != nil { - req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) return } } else { @@ -68,7 +66,7 @@ func Unmarshal(req *request.Request) { err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) if err != nil { req.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding JSON RPC response", err), + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), req.HTTPResponse.StatusCode, req.RequestID, ) @@ -87,17 +85,11 @@ func UnmarshalError(req *request.Request) { defer req.HTTPResponse.Body.Close() var jsonErr jsonErrorResponse - err := json.NewDecoder(req.HTTPResponse.Body).Decode(&jsonErr) - if err == io.EOF { + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { req.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", req.HTTPResponse.Status, nil), - req.HTTPResponse.StatusCode, - req.RequestID, - ) - return - } else if err != nil { - req.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding JSON RPC error response", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), req.HTTPResponse.StatusCode, req.RequestID, ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 60e5b09d548..0cb99eb5796 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -21,7 +21,7 @@ func Build(r *request.Request) { "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) return } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index 3495c73070b..f69c1efc93a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -24,7 +24,7 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding Query response", err), + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index 46d354e826f..831b0110c54 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -2,73 +2,68 @@ package query import ( "encoding/xml" - "io/ioutil" + "fmt" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` } -type xmlServiceUnavailableResponse struct { - XMLName xml.Name `xml:"ServiceUnavailableException"` +type xmlResponseError struct { + xmlErrorResponse } -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} // UnmarshalError unmarshals an error response for an AWS Query service. func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to read from query HTTP response body", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - // First check for specific error - resp := xmlErrorResponse{} - decodeErr := xml.Unmarshal(bodyBytes, &resp) - if decodeErr == nil { - reqID := resp.RequestID - if reqID == "" { - reqID = r.RequestID - } - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) - return - } - - // Check for unhandled error - servUnavailResp := xmlServiceUnavailableResponse{} - unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) - if unavailErr == nil { - r.Error = awserr.NewRequestFailure( - awserr.New("ServiceUnavailableException", "service is unavailable", nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID } - // Failed to retrieve any error message from the response body r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr), + awserr.New(respErr.Code, respErr.Message, nil), r.HTTPResponse.StatusCode, - r.RequestID, + reqID, ) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index b80f84fbb86..1301b149d35 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -25,6 +25,8 @@ var noEscape [256]bool var errValueNotSet = fmt.Errorf("value not set") +var byteSliceType = reflect.TypeOf([]byte{}) + func init() { for i := 0; i < len(noEscape); i++ { // AWS expects every character except these to be escaped @@ -94,6 +96,14 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo continue } + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + var err error switch field.Tag.Get("location") { case "headers": // header maps @@ -137,7 +147,7 @@ func buildBody(r *request.Request, v reflect.Value) { case string: r.SetStringBody(reader) default: - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to encode REST request", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -152,7 +162,7 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect. if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } name = strings.TrimSpace(name) @@ -170,7 +180,7 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) if err == errValueNotSet { continue } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } keyStr := strings.TrimSpace(key.String()) @@ -186,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) e if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) @@ -219,7 +229,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string, tag reflec if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } query.Set(name, str) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 33fd53b126a..74e361e070d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -57,7 +57,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } else { payload.Set(reflect.ValueOf(b)) } @@ -65,7 +65,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } else { str := string(b) payload.Set(reflect.ValueOf(&str)) @@ -77,7 +77,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { case "io.ReadSeeker": b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to read response body", err) return } @@ -85,7 +85,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -115,14 +115,14 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { case "header": err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) break } case "headers": prefix := field.Tag.Get("locationName") err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) break } } @@ -146,6 +146,9 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + if len(headers) == 0 { + return nil + } switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} @@ -155,19 +158,28 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err out[k[len(prefix):]] = &v[0] } } - r.Set(reflect.ValueOf(out)) + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + } return nil } func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { + switch tag.Get("type") { + case "jsonvalue": if len(header) == 0 { return nil } - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } } switch v.Interface().(type) { @@ -178,7 +190,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if err != nil { return err } - v.Set(reflect.ValueOf(&b)) + v.Set(reflect.ValueOf(b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go index 8e88f3042aa..af4f6154d70 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -6,12 +6,11 @@ package restjson //go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go import ( - "encoding/json" - "io" "strings" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" "github.com/aws/aws-sdk-go/private/protocol/rest" ) @@ -59,17 +58,11 @@ func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() var jsonErr jsonErrorResponse - err := json.NewDecoder(r.HTTPResponse.Body).Decode(&jsonErr) - if err == io.EOF { + err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", r.HTTPResponse.Status, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } else if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding REST JSON error response", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index b0f4e245661..07a6187ea62 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -37,8 +37,9 @@ func Build(r *request.Request) { err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to encode rest XML request", err), - r.HTTPResponse.StatusCode, + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, r.RequestID, ) return @@ -55,7 +56,8 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to decode REST XML response", err), + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index b7ed6c6f810..05d4ff51925 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -1,8 +1,11 @@ package protocol import ( + "math" "strconv" "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" ) // Names of time formats supported by the SDK @@ -13,12 +16,19 @@ const ( ) // Time formats supported by the SDK +// Output time is intended to not contain decimals const ( // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05Z" + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -42,9 +52,9 @@ func FormatTime(name string, t time.Time) string { switch name { case RFC822TimeFormatName: - return t.Format(RFC822TimeFormat) + return t.Format(RFC822OutputTimeFormat) case ISO8601TimeFormatName: - return t.Format(ISO8601TimeFormat) + return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: return strconv.FormatInt(t.Unix(), 10) default: @@ -62,10 +72,12 @@ func ParseTime(formatName, value string) (time.Time, error) { return time.Parse(ISO8601TimeFormat, value) case UnixTimeFormatName: v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 if err != nil { return time.Time{}, err } - return time.Unix(int64(v), 0), nil + return time.Unix(int64(v), int64(dec*(1e9))), nil default: panic("unknown timestamp format name, " + formatName) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 00000000000..c1a511851f6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index ff1ef6830b9..7108d380093 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -1,6 +1,7 @@ package xmlutil import ( + "bytes" "encoding/base64" "encoding/xml" "fmt" @@ -10,9 +11,27 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/private/protocol" ) +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + // UnmarshalXML deserializes an xml.Decoder into the container v. V // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 515ce15215b..42f71648eee 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -119,7 +119,18 @@ func (n *XMLNode) findElem(name string) (string, bool) { // StructToXML writes an XMLNode to a xml.Encoder as tokens. func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) if node.Text != "" { e.EncodeToken(xml.CharData([]byte(node.Text))) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acm/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acm/api.go index a4332951aa0..301fda23bf0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acm/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acm/api.go @@ -717,7 +717,7 @@ func (c *ACM) ListCertificatesWithContext(ctx aws.Context, input *ListCertificat // // Example iterating over at most 3 pages of a ListCertificates operation. // pageNum := 0 // err := client.ListCertificatesPages(params, -// func(page *ListCertificatesOutput, lastPage bool) bool { +// func(page *acm.ListCertificatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -749,10 +749,12 @@ func (c *ACM) ListCertificatesPagesWithContext(ctx aws.Context, input *ListCerti }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListCertificatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListCertificatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1269,7 +1271,7 @@ func (c *ACM) UpdateCertificateOptionsRequest(input *UpdateCertificateOptionsInp // // Updates a certificate. Currently, you can use this function to specify whether // to opt in to or out of recording your certificate in a certificate transparency -// log. For more information, see Opting Out of Certificate Transparency Logging +// log. For more information, see Opting Out of Certificate Transparency Logging // (https://docs.aws.amazon.com/acm/latest/userguide/acm-bestpractices.html#best-practices-transparency). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acm/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acm/service.go index 9817d0c0a5a..ac0bee1943a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acm/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acm/service.go @@ -46,11 +46,11 @@ const ( // svc := acm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ACM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ACM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ACM { svc := &ACM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-12-08", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/api.go index bdeaa9e53fc..c600b04c0a5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/api.go @@ -57,16 +57,16 @@ func (c *ACMPCA) CreateCertificateAuthorityRequest(input *CreateCertificateAutho // CreateCertificateAuthority API operation for AWS Certificate Manager Private Certificate Authority. // -// Creates a private subordinate certificate authority (CA). You must specify -// the CA configuration, the revocation configuration, the CA type, and an optional -// idempotency token. The CA configuration specifies the name of the algorithm +// Creates a root or subordinate private certificate authority (CA). You must +// specify the CA configuration, the certificate revocation list (CRL) configuration, +// the CA type, and an optional idempotency token to avoid accidental creation +// of multiple CAs. The CA configuration specifies the name of the algorithm // and key size to be used to create the CA private key, the type of signing -// algorithm that the CA uses to sign, and X.500 subject information. The CRL -// (certificate revocation list) configuration specifies the CRL expiration -// period in days (the validity period of the CRL), the Amazon S3 bucket that -// will contain the CRL, and a CNAME alias for the S3 bucket that is included -// in certificates issued by the CA. If successful, this operation returns the -// Amazon Resource Name (ARN) of the CA. +// algorithm that the CA uses, and X.500 subject information. The CRL configuration +// specifies the CRL expiration period in days (the validity period of the CRL), +// the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the +// S3 bucket that is included in certificates issued by the CA. If successful, +// this action returns the Amazon Resource Name (ARN) of the CA. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -80,16 +80,16 @@ func (c *ACMPCA) CreateCertificateAuthorityRequest(input *CreateCertificateAutho // One or more of the specified arguments was not valid. // // * ErrCodeInvalidPolicyException "InvalidPolicyException" -// The S3 bucket policy is not valid. The policy must give ACM PCA rights to -// read from and write to the bucket and find the bucket location. +// The S3 bucket policy is not valid. The policy must give ACM Private CA rights +// to read from and write to the bucket and find the bucket location. // // * ErrCodeInvalidTagException "InvalidTagException" // The tag associated with the CA is not valid. The invalid argument is contained // in the message field. // // * ErrCodeLimitExceededException "LimitExceededException" -// An ACM PCA limit has been exceeded. See the exception message returned to -// determine the limit that was exceeded. +// An ACM Private CA limit has been exceeded. See the exception message returned +// to determine the limit that was exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/CreateCertificateAuthority func (c *ACMPCA) CreateCertificateAuthority(input *CreateCertificateAuthorityInput) (*CreateCertificateAuthorityOutput, error) { @@ -159,8 +159,7 @@ func (c *ACMPCA) CreateCertificateAuthorityAuditReportRequest(input *CreateCerti // // Creates an audit report that lists every time that your CA private key is // used. The report is saved in the Amazon S3 bucket that you specify on input. -// The IssueCertificate and RevokeCertificate operations use the private key. -// You can generate a new report every 30 minutes. +// The IssueCertificate and RevokeCertificate actions use the private key. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -265,8 +264,8 @@ func (c *ACMPCA) CreatePermissionRequest(input *CreatePermissionInput) (req *req // all possible permissions from the CA to the ACM service principal. // // At this time, you can only assign permissions to ACM (acm.amazonaws.com). -// Permissions can be revoked with the DeletePermission operation and listed -// with the ListPermissions operation. +// Permissions can be revoked with the DeletePermission action and listed with +// the ListPermissions action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -287,8 +286,8 @@ func (c *ACMPCA) CreatePermissionRequest(input *CreatePermissionInput) (req *req // The designated permission has already been given to the user. // // * ErrCodeLimitExceededException "LimitExceededException" -// An ACM PCA limit has been exceeded. See the exception message returned to -// determine the limit that was exceeded. +// An ACM Private CA limit has been exceeded. See the exception message returned +// to determine the limit that was exceeded. // // * ErrCodeInvalidStateException "InvalidStateException" // The private CA is in a state during which a report or certificate cannot @@ -364,25 +363,29 @@ func (c *ACMPCA) DeleteCertificateAuthorityRequest(input *DeleteCertificateAutho // DeleteCertificateAuthority API operation for AWS Certificate Manager Private Certificate Authority. // -// Deletes a private certificate authority (CA). You must provide the ARN (Amazon -// Resource Name) of the private CA that you want to delete. You can find the -// ARN by calling the ListCertificateAuthorities operation. Before you can delete -// a CA, you must disable it. Call the UpdateCertificateAuthority operation -// and set the CertificateAuthorityStatus parameter to DISABLED. +// Deletes a private certificate authority (CA). You must provide the Amazon +// Resource Name (ARN) of the private CA that you want to delete. You can find +// the ARN by calling the ListCertificateAuthorities action. +// +// Deleting a CA will invalidate other CAs and certificates below it in your +// CA hierarchy. +// +// Before you can delete a CA that you have created and activated, you must +// disable it. To do this, call the UpdateCertificateAuthority action and set +// the CertificateAuthorityStatus parameter to DISABLED. // // Additionally, you can delete a CA if you are waiting for it to be created -// (the Status field of the CertificateAuthority is CREATING). You can also -// delete it if the CA has been created but you haven't yet imported the signed -// certificate (the Status is PENDING_CERTIFICATE) into ACM PCA. -// -// If the CA is in one of the previously mentioned states and you call DeleteCertificateAuthority, -// the CA's status changes to DELETED. However, the CA won't be permanently -// deleted until the restoration period has passed. By default, if you do not -// set the PermanentDeletionTimeInDays parameter, the CA remains restorable -// for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority -// operation returns the time remaining in the restoration window of a Private -// CA in the DELETED state. To restore an eligible CA, call the RestoreCertificateAuthority -// operation. +// (that is, the status of the CA is CREATING). You can also delete it if the +// CA has been created but you haven't yet imported the signed certificate into +// ACM Private CA (that is, the status of the CA is PENDING_CERTIFICATE). +// +// When you successfully call DeleteCertificateAuthority, the CA's status changes +// to DELETED. However, the CA won't be permanently deleted until the restoration +// period has passed. By default, if you do not set the PermanentDeletionTimeInDays +// parameter, the CA remains restorable for 30 days. You can set the parameter +// from 7 to 30 days. The DescribeCertificateAuthority action returns the time +// remaining in the restoration window of a private CA in the DELETED state. +// To restore an eligible CA, call the RestoreCertificateAuthority action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -474,8 +477,8 @@ func (c *ACMPCA) DeletePermissionRequest(input *DeletePermissionInput) (req *req // DeletePermission API operation for AWS Certificate Manager Private Certificate Authority. // // Revokes permissions that a private CA assigned to a designated AWS service. -// Permissions can be created with the CreatePermission operation and listed -// with the ListPermissions operation. +// Permissions can be created with the CreatePermission action and listed with +// the ListPermissions action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -569,11 +572,11 @@ func (c *ACMPCA) DescribeCertificateAuthorityRequest(input *DescribeCertificateA // the private CA on input by its ARN (Amazon Resource Name). The output contains // the status of your CA. This can be any of the following: // -// * CREATING - ACM PCA is creating your private certificate authority. +// * CREATING - ACM Private CA is creating your private certificate authority. // // * PENDING_CERTIFICATE - The certificate is pending. You must use your -// on-premises root or subordinate CA to sign your private CA CSR and then -// import it into PCA. +// ACM Private CA-hosted or on-premises root or subordinate CA to sign your +// private CA CSR and then import it into PCA. // // * ACTIVE - Your private CA is active. // @@ -587,7 +590,7 @@ func (c *ACMPCA) DescribeCertificateAuthorityRequest(input *DescribeCertificateA // // * DELETED - Your private CA is within the restoration period, after which // it is permanently deleted. The length of time remaining in the CA's restoration -// period is also included in this operation's output. +// period is also included in this action's output. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -671,9 +674,9 @@ func (c *ACMPCA) DescribeCertificateAuthorityAuditReportRequest(input *DescribeC // DescribeCertificateAuthorityAuditReport API operation for AWS Certificate Manager Private Certificate Authority. // // Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport -// operation. Audit information is created every time the certificate authority +// action. Audit information is created every time the certificate authority // (CA) private key is used. The private key is used when you call the IssueCertificate -// operation or the RevokeCertificate operation. +// action or the RevokeCertificate action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -760,11 +763,11 @@ func (c *ACMPCA) GetCertificateRequest(input *GetCertificateInput) (req *request // GetCertificate API operation for AWS Certificate Manager Private Certificate Authority. // // Retrieves a certificate from your private CA. The ARN of the certificate -// is returned when you call the IssueCertificate operation. You must specify -// both the ARN of your private CA and the ARN of the issued certificate when -// calling the GetCertificate operation. You can retrieve the certificate if -// it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport -// operation to create a report that contains information about all of the certificates +// is returned when you call the IssueCertificate action. You must specify both +// the ARN of your private CA and the ARN of the issued certificate when calling +// the GetCertificate action. You can retrieve the certificate if it is in the +// ISSUED state. You can call the CreateCertificateAuthorityAuditReport action +// to create a report that contains information about all of the certificates // issued and revoked by your private CA. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -950,10 +953,10 @@ func (c *ACMPCA) GetCertificateAuthorityCsrRequest(input *GetCertificateAuthorit // // Retrieves the certificate signing request (CSR) for your private certificate // authority (CA). The CSR is created when you call the CreateCertificateAuthority -// operation. Take the CSR to your on-premises X.509 infrastructure and sign -// it by using your root or a subordinate CA. Then import the signed certificate -// back into ACM PCA by calling the ImportCertificateAuthorityCertificate operation. -// The CSR is returned as a base64 PEM-encoded string. +// action. Sign the CSR with your ACM Private CA-hosted or on-premises root +// or subordinate CA. Then import the signed certificate back into ACM Private +// CA by calling the ImportCertificateAuthorityCertificate action. The CSR is +// returned as a base64 PEM-encoded string. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1047,23 +1050,40 @@ func (c *ACMPCA) ImportCertificateAuthorityCertificateRequest(input *ImportCerti // ImportCertificateAuthorityCertificate API operation for AWS Certificate Manager Private Certificate Authority. // -// Imports your signed private CA certificate into ACM PCA. Before you can call -// this operation, you must create the private certificate authority by calling -// the CreateCertificateAuthority operation. You must then generate a certificate -// signing request (CSR) by calling the GetCertificateAuthorityCsr operation. -// Take the CSR to your on-premises CA and use the root certificate or a subordinate -// certificate to sign it. Create a certificate chain and copy the signed certificate -// and the certificate chain to your working directory. +// Imports a signed private CA certificate into ACM Private CA. This action +// is used when you are using a chain of trust whose root is located outside +// ACM Private CA. Before you can call this action, the following preparations +// must in place: +// +// In ACM Private CA, call the CreateCertificateAuthority action to create the +// private CA that that you plan to back with the imported certificate. +// +// Call the GetCertificateAuthorityCsr action to generate a certificate signing +// request (CSR). +// +// Sign the CSR using a root or intermediate CA hosted either by an on-premises +// PKI hierarchy or a commercial CA.. +// +// Create a certificate chain and copy the signed certificate and the certificate +// chain to your working directory. // -// Your certificate chain must not include the private CA certificate that you -// are importing. +// The following requirements apply when you import a CA certificate. // -// Your on-premises CA certificate must be the last certificate in your chain. -// The subordinate certificate, if any, that your root CA signed must be next -// to last. The subordinate certificate signed by the preceding subordinate -// CA must come next, and so on until your chain is built. +// * You cannot import a non-self-signed certificate for use as a root CA. // -// The chain must be PEM-encoded. +// * You cannot import a self-signed certificate for use as a subordinate +// CA. +// +// * Your certificate chain must not include the private CA certificate that +// you are importing. +// +// * Your ACM Private CA-hosted or on-premises CA certificate must be the +// last certificate in your chain. The subordinate certificate, if any, that +// your root CA signed must be next to last. The subordinate certificate +// signed by the preceding subordinate CA must come next, and so on until +// your chain is built. +// +// * The chain must be PEM-encoded. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1089,6 +1109,9 @@ func (c *ACMPCA) ImportCertificateAuthorityCertificateRequest(input *ImportCerti // * ErrCodeInvalidArnException "InvalidArnException" // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request action cannot be performed or is prohibited. +// // * ErrCodeInvalidStateException "InvalidStateException" // The private CA is in a state during which a report or certificate cannot // be generated. @@ -1167,12 +1190,12 @@ func (c *ACMPCA) IssueCertificateRequest(input *IssueCertificateInput) (req *req // IssueCertificate API operation for AWS Certificate Manager Private Certificate Authority. // // Uses your private certificate authority (CA) to issue a client certificate. -// This operation returns the Amazon Resource Name (ARN) of the certificate. -// You can retrieve the certificate by calling the GetCertificate operation -// and specifying the ARN. +// This action returns the Amazon Resource Name (ARN) of the certificate. You +// can retrieve the certificate by calling the GetCertificate action and specifying +// the ARN. // -// You cannot use the ACM ListCertificateAuthorities operation to retrieve the -// ARNs of the certificates that you issue by using ACM PCA. +// You cannot use the ACM ListCertificateAuthorities action to retrieve the +// ARNs of the certificates that you issue by using ACM Private CA. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1183,8 +1206,8 @@ func (c *ACMPCA) IssueCertificateRequest(input *IssueCertificateInput) (req *req // // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceededException" -// An ACM PCA limit has been exceeded. See the exception message returned to -// determine the limit that was exceeded. +// An ACM Private CA limit has been exceeded. See the exception message returned +// to determine the limit that was exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A resource such as a private CA, S3 bucket, certificate, or audit report @@ -1276,7 +1299,7 @@ func (c *ACMPCA) ListCertificateAuthoritiesRequest(input *ListCertificateAuthori // ListCertificateAuthorities API operation for AWS Certificate Manager Private Certificate Authority. // // Lists the private certificate authorities that you created by using the CreateCertificateAuthority -// operation. +// action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1323,7 +1346,7 @@ func (c *ACMPCA) ListCertificateAuthoritiesWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListCertificateAuthorities operation. // pageNum := 0 // err := client.ListCertificateAuthoritiesPages(params, -// func(page *ListCertificateAuthoritiesOutput, lastPage bool) bool { +// func(page *acmpca.ListCertificateAuthoritiesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1355,10 +1378,12 @@ func (c *ACMPCA) ListCertificateAuthoritiesPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListCertificateAuthoritiesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListCertificateAuthoritiesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1413,8 +1438,8 @@ func (c *ACMPCA) ListPermissionsRequest(input *ListPermissionsInput) (req *reque // ListPermissions API operation for AWS Certificate Manager Private Certificate Authority. // // Lists all the permissions, if any, that have been assigned by a private CA. -// Permissions can be granted with the CreatePermission operation and revoked -// with the DeletePermission operation. +// Permissions can be granted with the CreatePermission action and revoked with +// the DeletePermission action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1475,7 +1500,7 @@ func (c *ACMPCA) ListPermissionsWithContext(ctx aws.Context, input *ListPermissi // // Example iterating over at most 3 pages of a ListPermissions operation. // pageNum := 0 // err := client.ListPermissionsPages(params, -// func(page *ListPermissionsOutput, lastPage bool) bool { +// func(page *acmpca.ListPermissionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1507,10 +1532,12 @@ func (c *ACMPCA) ListPermissionsPagesWithContext(ctx aws.Context, input *ListPer }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPermissionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPermissionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1566,8 +1593,8 @@ func (c *ACMPCA) ListTagsRequest(input *ListTagsInput) (req *request.Request, ou // // Lists the tags, if any, that are associated with your private CA. Tags are // labels that you can use to identify and organize your CAs. Each tag consists -// of a key and an optional value. Call the TagCertificateAuthority operation -// to add one or more tags to your CA. Call the UntagCertificateAuthority operation +// of a key and an optional value. Call the TagCertificateAuthority action to +// add one or more tags to your CA. Call the UntagCertificateAuthority action // to remove tags. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1585,6 +1612,10 @@ func (c *ACMPCA) ListTagsRequest(input *ListTagsInput) (req *request.Request, ou // * ErrCodeInvalidArnException "InvalidArnException" // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // +// * ErrCodeInvalidStateException "InvalidStateException" +// The private CA is in a state during which a report or certificate cannot +// be generated. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/ListTags func (c *ACMPCA) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { req, out := c.ListTagsRequest(input) @@ -1618,7 +1649,7 @@ func (c *ACMPCA) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts // // Example iterating over at most 3 pages of a ListTags operation. // pageNum := 0 // err := client.ListTagsPages(params, -// func(page *ListTagsOutput, lastPage bool) bool { +// func(page *acmpca.ListTagsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1650,10 +1681,12 @@ func (c *ACMPCA) ListTagsPagesWithContext(ctx aws.Context, input *ListTagsInput, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1704,18 +1737,17 @@ func (c *ACMPCA) RestoreCertificateAuthorityRequest(input *RestoreCertificateAut // // Restores a certificate authority (CA) that is in the DELETED state. You can // restore a CA during the period that you defined in the PermanentDeletionTimeInDays -// parameter of the DeleteCertificateAuthority operation. Currently, you can -// specify 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays -// value, by default you can restore the CA at any time in a 30 day period. -// You can check the time remaining in the restoration period of a private CA -// in the DELETED state by calling the DescribeCertificateAuthority or ListCertificateAuthorities -// operations. The status of a restored CA is set to its pre-deletion status -// when the RestoreCertificateAuthority operation returns. To change its status -// to ACTIVE, call the UpdateCertificateAuthority operation. If the private -// CA was in the PENDING_CERTIFICATE state at deletion, you must use the ImportCertificateAuthorityCertificate -// operation to import a certificate authority into the private CA before it -// can be activated. You cannot restore a CA after the restoration period has -// ended. +// parameter of the DeleteCertificateAuthority action. Currently, you can specify +// 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, +// by default you can restore the CA at any time in a 30 day period. You can +// check the time remaining in the restoration period of a private CA in the +// DELETED state by calling the DescribeCertificateAuthority or ListCertificateAuthorities +// actions. The status of a restored CA is set to its pre-deletion status when +// the RestoreCertificateAuthority action returns. To change its status to ACTIVE, +// call the UpdateCertificateAuthority action. If the private CA was in the +// PENDING_CERTIFICATE state at deletion, you must use the ImportCertificateAuthorityCertificate +// action to import a certificate authority into the private CA before it can +// be activated. You cannot restore a CA after the restoration period has ended. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1803,14 +1835,16 @@ func (c *ACMPCA) RevokeCertificateRequest(input *RevokeCertificateInput) (req *r // RevokeCertificate API operation for AWS Certificate Manager Private Certificate Authority. // -// Revokes a certificate that you issued by calling the IssueCertificate operation. -// If you enable a certificate revocation list (CRL) when you create or update -// your private CA, information about the revoked certificates will be included -// in the CRL. ACM PCA writes the CRL to an S3 bucket that you specify. For -// more information about revocation, see the CrlConfiguration structure. ACM -// PCA also writes revocation information to the audit report. For more information, +// Revokes a certificate that was issued inside ACM Private CA. If you enable +// a certificate revocation list (CRL) when you create or update your private +// CA, information about the revoked certificates will be included in the CRL. +// ACM Private CA writes the CRL to an S3 bucket that you specify. For more +// information about revocation, see the CrlConfiguration structure. ACM Private +// CA also writes revocation information to the audit report. For more information, // see CreateCertificateAuthorityAuditReport. // +// You cannot revoke a root CA self-signed certificate. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1825,13 +1859,16 @@ func (c *ACMPCA) RevokeCertificateRequest(input *RevokeCertificateInput) (req *r // * ErrCodeInvalidArnException "InvalidArnException" // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request action cannot be performed or is prohibited. +// // * ErrCodeInvalidStateException "InvalidStateException" // The private CA is in a state during which a report or certificate cannot // be generated. // // * ErrCodeLimitExceededException "LimitExceededException" -// An ACM PCA limit has been exceeded. See the exception message returned to -// determine the limit that was exceeded. +// An ACM Private CA limit has been exceeded. See the exception message returned +// to determine the limit that was exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A resource such as a private CA, S3 bucket, certificate, or audit report @@ -1920,8 +1957,8 @@ func (c *ACMPCA) TagCertificateAuthorityRequest(input *TagCertificateAuthorityIn // a tag to just one private CA if you want to identify a specific characteristic // of that CA, or you can apply the same tag to multiple private CAs if you // want to filter for a common relationship among those CAs. To remove one or -// more tags, use the UntagCertificateAuthority operation. Call the ListTags -// operation to see what tags are associated with your CA. +// more tags, use the UntagCertificateAuthority action. Call the ListTags action +// to see what tags are associated with your CA. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2019,10 +2056,10 @@ func (c *ACMPCA) UntagCertificateAuthorityRequest(input *UntagCertificateAuthori // // Remove one or more tags from your private CA. A tag consists of a key-value // pair. If you do not specify the value portion of the tag when calling this -// operation, the tag will be removed regardless of value. If you specify a -// value, the tag is removed only if it is associated with the specified value. -// To add tags to a private CA, use the TagCertificateAuthority. Call the ListTags -// operation to see what tags are associated with your CA. +// action, the tag will be removed regardless of value. If you specify a value, +// the tag is removed only if it is associated with the specified value. To +// add tags to a private CA, use the TagCertificateAuthority. Call the ListTags +// action to see what tags are associated with your CA. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2145,8 +2182,8 @@ func (c *ACMPCA) UpdateCertificateAuthorityRequest(input *UpdateCertificateAutho // be generated. // // * ErrCodeInvalidPolicyException "InvalidPolicyException" -// The S3 bucket policy is not valid. The policy must give ACM PCA rights to -// read from and write to the bucket and find the bucket location. +// The S3 bucket policy is not valid. The policy must give ACM Private CA rights +// to read from and write to the bucket and find the bucket location. // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/UpdateCertificateAuthority func (c *ACMPCA) UpdateCertificateAuthority(input *UpdateCertificateAuthorityInput) (*UpdateCertificateAuthorityOutput, error) { @@ -2333,16 +2370,16 @@ func (s *ASN1Subject) SetTitle(v string) *ASN1Subject { // private CA can issue and revoke X.509 digital certificates. Digital certificates // verify that the entity named in the certificate Subject field owns or controls // the public key contained in the Subject Public Key Info field. Call the CreateCertificateAuthority -// operation to create your private CA. You must then call the GetCertificateAuthorityCertificate -// operation to retrieve a private CA certificate signing request (CSR). Take -// the CSR to your on-premises CA and sign it with the root CA certificate or -// a subordinate certificate. Call the ImportCertificateAuthorityCertificate -// operation to import the signed certificate into AWS Certificate Manager (ACM). +// action to create your private CA. You must then call the GetCertificateAuthorityCertificate +// action to retrieve a private CA certificate signing request (CSR). Sign the +// CSR with your ACM Private CA-hosted or on-premises root or subordinate CA +// certificate. Call the ImportCertificateAuthorityCertificate action to import +// the signed certificate into AWS Certificate Manager (ACM). type CertificateAuthority struct { _ struct{} `type:"structure"` // Amazon Resource Name (ARN) for your private certificate authority (CA). The - // format is 12345678-1234-1234-1234-123456789012. + // format is 12345678-1234-1234-1234-123456789012 . Arn *string `min:"5" type:"string"` // Your private CA configuration. @@ -2365,7 +2402,7 @@ type CertificateAuthority struct { // The period during which a deleted CA can be restored. For more information, // see the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthorityRequest - // operation. + // action. RestorableUntil *time.Time `type:"timestamp"` // Information about the certificate revocation list (CRL) created and maintained @@ -2469,12 +2506,13 @@ func (s *CertificateAuthority) SetType(v string) *CertificateAuthority { // the key pair that your private CA creates when it issues a certificate. It // also includes the signature algorithm that it uses when issuing certificates, // and its X.500 distinguished name. You must specify this information when -// you call the CreateCertificateAuthority operation. +// you call the CreateCertificateAuthority action. type CertificateAuthorityConfiguration struct { _ struct{} `type:"structure"` // Type of the public key algorithm and size, in bits, of the key pair that - // your key pair creates when it issues a certificate. + // your CA creates when it issues a certificate. When you create a subordinate + // CA, you must use a key algorithm supported by the parent CA. // // KeyAlgorithm is a required field KeyAlgorithm *string `type:"string" required:"true" enum:"KeyAlgorithm"` @@ -2548,7 +2586,7 @@ type CreateCertificateAuthorityAuditReportInput struct { // The Amazon Resource Name (ARN) of the CA to be audited. This is of the form: // - // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012. + // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // // CertificateAuthorityArn is a required field CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` @@ -2650,7 +2688,7 @@ type CreateCertificateAuthorityInput struct { // CertificateAuthorityConfiguration is a required field CertificateAuthorityConfiguration *CertificateAuthorityConfiguration `type:"structure" required:"true"` - // The type of the certificate authority. Currently, this must be SUBORDINATE. + // The type of the certificate authority. // // CertificateAuthorityType is a required field CertificateAuthorityType *string `type:"string" required:"true" enum:"CertificateAuthorityType"` @@ -2658,20 +2696,23 @@ type CreateCertificateAuthorityInput struct { // Alphanumeric string that can be used to distinguish between calls to CreateCertificateAuthority. // Idempotency tokens time out after five minutes. Therefore, if you call CreateCertificateAuthority // multiple times with the same idempotency token within a five minute period, - // ACM PCA recognizes that you are requesting only one certificate. As a result, - // ACM PCA issues only one. If you change the idempotency token for each call, - // however, ACM PCA recognizes that you are requesting multiple certificates. + // ACM Private CA recognizes that you are requesting only one certificate. As + // a result, ACM Private CA issues only one. If you change the idempotency token + // for each call, however, ACM Private CA recognizes that you are requesting + // multiple certificates. IdempotencyToken *string `min:"1" type:"string"` // Contains a Boolean value that you can use to enable a certification revocation - // list (CRL) for the CA, the name of the S3 bucket to which ACM PCA will write - // the CRL, and an optional CNAME alias that you can use to hide the name of - // your bucket in the CRL Distribution Points extension of your CA certificate. - // For more information, see the CrlConfiguration structure. + // list (CRL) for the CA, the name of the S3 bucket to which ACM Private CA + // will write the CRL, and an optional CNAME alias that you can use to hide + // the name of your bucket in the CRL Distribution Points extension of your + // CA certificate. For more information, see the CrlConfiguration structure. RevocationConfiguration *RevocationConfiguration `type:"structure"` // Key-value pairs that will be attached to the new private CA. You can associate - // up to 50 tags with a private CA. + // up to 50 tags with a private CA. For information using tags with + // + // IAM to manage permissions, see Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). Tags []*Tag `min:"1" type:"list"` } @@ -2763,7 +2804,7 @@ type CreateCertificateAuthorityOutput struct { // If successful, the Amazon Resource Name (ARN) of the certificate authority // (CA). This is of the form: // - // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012. + // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . CertificateAuthorityArn *string `min:"5" type:"string"` } @@ -2793,10 +2834,10 @@ type CreatePermissionInput struct { Actions []*string `min:"1" type:"list" required:"true"` // The Amazon Resource Name (ARN) of the CA that grants the permissions. You - // can find the ARN by calling the ListCertificateAuthorities operation. This - // must have the following form: + // can find the ARN by calling the ListCertificateAuthorities action. This must + // have the following form: // - // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012. + // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // // CertificateAuthorityArn is a required field CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` @@ -2895,7 +2936,7 @@ func (s CreatePermissionOutput) GoString() string { // the name of your bucket by specifying a value for the CustomCname parameter. // Your private CA copies the CNAME or the S3 bucket name to the CRL Distribution // Points extension of each certificate it issues. Your S3 bucket policy must -// give write permission to ACM PCA. +// give write permission to ACM Private CA. // // Your private CA uses the value in the ExpirationInDays parameter to calculate // the nextUpdate field in the CRL. The CRL is refreshed at 1/2 the age of next @@ -2919,29 +2960,22 @@ func (s CreatePermissionOutput) GoString() string { // * Next Update: The day and time by which the next CRL will be issued. // // * Revoked Certificates: List of revoked certificates. Each list item contains -// the following information. +// the following information. Serial Number: The serial number, in hexadecimal +// format, of the revoked certificate. Revocation Date: Date and time the +// certificate was revoked. CRL Entry Extensions: Optional extensions for +// the CRL entry. X509v3 CRL Reason Code: Reason the certificate was revoked. // -// Serial Number: The serial number, in hexadecimal format, of the revoked certificate. -// -// Revocation Date: Date and time the certificate was revoked. -// -// CRL Entry Extensions: Optional extensions for the CRL entry. -// -// X509v3 CRL Reason Code: Reason the certificate was revoked. -// -// * CRL Extensions: Optional extensions for the CRL. -// -// X509v3 Authority Key Identifier: Identifies the public key associated with -// the private key used to sign the certificate. -// -// X509v3 CRL Number:: Decimal sequence number for the CRL. +// * CRL Extensions: Optional extensions for the CRL. X509v3 Authority Key +// Identifier: Identifies the public key associated with the private key +// used to sign the certificate. X509v3 CRL Number:: Decimal sequence number +// for the CRL. // // * Signature Algorithm: Algorithm used by your private CA to sign the CRL. // // * Signature Value: Signature computed over the CRL. // -// Certificate revocation lists created by ACM PCA are DER-encoded. You can -// use the following OpenSSL command to list a CRL. +// Certificate revocation lists created by ACM Private CA are DER-encoded. You +// can use the following OpenSSL command to list a CRL. // // openssl crl -inform DER -text -in crl_path -noout type CrlConfiguration struct { @@ -2954,8 +2988,8 @@ type CrlConfiguration struct { // Boolean value that specifies whether certificate revocation lists (CRLs) // are enabled. You can use this value to enable certificate revocation for - // a new CA when you call the CreateCertificateAuthority operation or for an - // existing CA when you call the UpdateCertificateAuthority operation. + // a new CA when you call the CreateCertificateAuthority action or for an existing + // CA when you call the UpdateCertificateAuthority action. // // Enabled is a required field Enabled *bool `type:"boolean" required:"true"` @@ -2966,9 +3000,9 @@ type CrlConfiguration struct { // Name of the S3 bucket that contains the CRL. If you do not provide a value // for the CustomCname argument, the name of your S3 bucket is placed into the // CRL Distribution Points extension of the issued certificate. You can change - // the name of your bucket by calling the UpdateCertificateAuthority operation. - // You must specify a bucket policy that allows ACM PCA to write the CRL to - // your bucket. + // the name of your bucket by calling the UpdateCertificateAuthority action. + // You must specify a bucket policy that allows ACM Private CA to write the + // CRL to your bucket. S3BucketName *string `min:"3" type:"string"` } @@ -3031,7 +3065,7 @@ type DeleteCertificateAuthorityInput struct { // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. // This must have the following form: // - // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012. + // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // // CertificateAuthorityArn is a required field CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` @@ -3100,10 +3134,10 @@ type DeletePermissionInput struct { _ struct{} `type:"structure"` // The Amazon Resource Number (ARN) of the private CA that issued the permissions. - // You can find the CA's ARN by calling the ListCertificateAuthorities operation. + // You can find the CA's ARN by calling the ListCertificateAuthorities action. // This must have the following form: // - // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012. + // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // // CertificateAuthorityArn is a required field CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` @@ -3114,7 +3148,7 @@ type DeletePermissionInput struct { // Principal is a required field Principal *string `type:"string" required:"true"` - // The AWS account that calls this operation. + // The AWS account that calls this action. SourceAccount *string `min:"12" type:"string"` } @@ -3186,14 +3220,14 @@ type DescribeCertificateAuthorityAuditReportInput struct { _ struct{} `type:"structure"` // The report ID returned by calling the CreateCertificateAuthorityAuditReport - // operation. + // action. // // AuditReportId is a required field AuditReportId *string `min:"36" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the private CA. This must be of the form: // - // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012. + // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // // CertificateAuthorityArn is a required field CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` @@ -3299,7 +3333,7 @@ type DescribeCertificateAuthorityInput struct { // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. // This must be of the form: // - // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012. + // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // // CertificateAuthorityArn is a required field CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` @@ -3366,7 +3400,7 @@ type GetCertificateAuthorityCertificateInput struct { // The Amazon Resource Name (ARN) of your private CA. This is of the form: // - // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012. + // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // // CertificateAuthorityArn is a required field CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` @@ -3413,6 +3447,7 @@ type GetCertificateAuthorityCertificateOutput struct { // Base64-encoded certificate chain that includes any intermediate certificates // and chains up to root on-premises certificate that you used to sign your // private CA certificate. The chain does not include your private CA certificate. + // If this is a root CA, the value will be null. CertificateChain *string `type:"string"` } @@ -3442,7 +3477,7 @@ type GetCertificateAuthorityCsrInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority - // operation. This must be of the form: + // action. This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 // @@ -3520,7 +3555,7 @@ type GetCertificateInput struct { // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. // This must be of the form: // - // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012. + // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // // CertificateAuthorityArn is a required field CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` @@ -3606,8 +3641,8 @@ func (s *GetCertificateOutput) SetCertificateChain(v string) *GetCertificateOutp type ImportCertificateAuthorityCertificateInput struct { _ struct{} `type:"structure"` - // The PEM-encoded certificate for your private CA. This must be signed by using - // your on-premises CA. + // The PEM-encoded certificate for a private CA. This may be a self-signed certificate + // in the case of a root CA, or it may be signed by another CA that you control. // // Certificate is automatically base64 encoded/decoded by the SDK. // @@ -3623,14 +3658,15 @@ type ImportCertificateAuthorityCertificateInput struct { CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` // A PEM-encoded file that contains all of your certificates, other than the - // certificate you're importing, chaining up to your root CA. Your on-premises - // root certificate is the last in the chain, and each certificate in the chain - // signs the one preceding. + // certificate you're importing, chaining up to your root CA. Your ACM Private + // CA-hosted or on-premises root certificate is the last in the chain, and each + // certificate in the chain signs the one preceding. // - // CertificateChain is automatically base64 encoded/decoded by the SDK. + // This parameter must be supplied when you import a subordinate CA. When you + // import a root CA, there is no chain. // - // CertificateChain is a required field - CertificateChain []byte `type:"blob" required:"true"` + // CertificateChain is automatically base64 encoded/decoded by the SDK. + CertificateChain []byte `type:"blob"` } // String returns the string representation @@ -3658,9 +3694,6 @@ func (s *ImportCertificateAuthorityCertificateInput) Validate() error { if s.CertificateAuthorityArn != nil && len(*s.CertificateAuthorityArn) < 5 { invalidParams.Add(request.NewErrParamMinLen("CertificateAuthorityArn", 5)) } - if s.CertificateChain == nil { - invalidParams.Add(request.NewErrParamRequired("CertificateChain")) - } if invalidParams.Len() > 0 { return invalidParams @@ -3731,9 +3764,9 @@ type IssueCertificateInput struct { Csr []byte `min:"1" type:"blob" required:"true"` // Custom string that can be used to distinguish between calls to the IssueCertificate - // operation. Idempotency tokens time out after one hour. Therefore, if you - // call IssueCertificate multiple times with the same idempotency token within - // 5 minutes, ACM PCA recognizes that you are requesting only one certificate + // action. Idempotency tokens time out after one hour. Therefore, if you call + // IssueCertificate multiple times with the same idempotency token within 5 + // minutes, ACM Private CA recognizes that you are requesting only one certificate // and will issue only one. If you change the idempotency token for each call, // PCA recognizes that you are requesting multiple certificates. IdempotencyToken *string `min:"1" type:"string"` @@ -3744,6 +3777,28 @@ type IssueCertificateInput struct { // SigningAlgorithm is a required field SigningAlgorithm *string `type:"string" required:"true" enum:"SigningAlgorithm"` + // Specifies a custom configuration template to use when issuing a certificate. + // If this parameter is not provided, ACM Private CA defaults to the EndEntityCertificate/V1 + // template. + // + // The following service-owned TemplateArn values are supported by ACM Private + // CA: + // + // * arn:aws:acm-pca:::template/EndEntityCertificate/V1 + // + // * arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen0/V1 + // + // * arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen1/V1 + // + // * arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen2/V1 + // + // * arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen3/V1 + // + // * arn:aws:acm-pca:::template/RootCACertificate/V1 + // + // For more information, see Using Templates (https://docs.aws.amazon.com/acm-pca/latest/userguide/UsingTemplates.html). + TemplateArn *string `min:"5" type:"string"` + // The type of the validity period. // // Validity is a required field @@ -3781,6 +3836,9 @@ func (s *IssueCertificateInput) Validate() error { if s.SigningAlgorithm == nil { invalidParams.Add(request.NewErrParamRequired("SigningAlgorithm")) } + if s.TemplateArn != nil && len(*s.TemplateArn) < 5 { + invalidParams.Add(request.NewErrParamMinLen("TemplateArn", 5)) + } if s.Validity == nil { invalidParams.Add(request.NewErrParamRequired("Validity")) } @@ -3820,6 +3878,12 @@ func (s *IssueCertificateInput) SetSigningAlgorithm(v string) *IssueCertificateI return s } +// SetTemplateArn sets the TemplateArn field's value. +func (s *IssueCertificateInput) SetTemplateArn(v string) *IssueCertificateInput { + s.TemplateArn = &v + return s +} + // SetValidity sets the Validity field's value. func (s *IssueCertificateInput) SetValidity(v *Validity) *IssueCertificateInput { s.Validity = v @@ -3942,10 +4006,10 @@ type ListPermissionsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Number (ARN) of the private CA to inspect. You can find - // the ARN by calling the ListCertificateAuthorities operation. This must be - // of the form: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 + // the ARN by calling the ListCertificateAuthorities action. This must be of + // the form: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 // You can get a private CA's ARN by running the ListCertificateAuthorities - // operation. + // action. // // CertificateAuthorityArn is a required field CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` @@ -4050,7 +4114,7 @@ type ListTagsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority - // operation. This must be of the form: + // action. This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 // @@ -4152,16 +4216,16 @@ func (s *ListTagsOutput) SetTags(v []*Tag) *ListTagsOutput { return s } -// Permissions designate which private CA operations can be performed by an -// AWS service or entity. In order for ACM to automatically renew private certificates, +// Permissions designate which private CA actions can be performed by an AWS +// service or entity. In order for ACM to automatically renew private certificates, // you must give the ACM service principal all available permissions (IssueCertificate, // GetCertificate, and ListPermissions). Permissions can be assigned with the -// CreatePermission operation, removed with the DeletePermission operation, -// and listed with the ListPermissions operation. +// CreatePermission action, removed with the DeletePermission action, and listed +// with the ListPermissions action. type Permission struct { _ struct{} `type:"structure"` - // The private CA operations that can be performed by the designated AWS service. + // The private CA actions that can be performed by the designated AWS service. Actions []*string `min:"1" type:"list"` // The Amazon Resource Number (ARN) of the private CA from which the permission @@ -4232,7 +4296,7 @@ type RestoreCertificateAuthorityInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority - // operation. This must be of the form: + // action. This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 // @@ -4287,7 +4351,7 @@ func (s RestoreCertificateAuthorityOutput) GoString() string { } // Certificate revocation information used by the CreateCertificateAuthority -// and UpdateCertificateAuthority operations. Your private certificate authority +// and UpdateCertificateAuthority actions. Your private certificate authority // (CA) can create and maintain a certificate revocation list (CRL). A CRL contains // information about certificates revoked by your CA. For more information, // see RevokeCertificate. @@ -4344,15 +4408,15 @@ type RevokeCertificateInput struct { // Serial number of the certificate to be revoked. This must be in hexadecimal // format. You can retrieve the serial number by calling GetCertificate with // the Amazon Resource Name (ARN) of the certificate you want and the ARN of - // your private CA. The GetCertificate operation retrieves the certificate in - // the PEM format. You can use the following OpenSSL command to list the certificate + // your private CA. The GetCertificate action retrieves the certificate in the + // PEM format. You can use the following OpenSSL command to list the certificate // in text format and copy the hexadecimal serial number. // // openssl x509 -in file_path -text -noout // // You can also copy the serial number from the console or use the DescribeCertificate // (https://docs.aws.amazon.com/acm/latest/APIReference/API_DescribeCertificate.html) - // operation in the AWS Certificate Manager API Reference. + // action in the AWS Certificate Manager API Reference. // // CertificateSerial is a required field CertificateSerial *string `type:"string" required:"true"` @@ -4430,8 +4494,8 @@ func (s RevokeCertificateOutput) GoString() string { // Tags are labels that you can use to identify and organize your private CAs. // Each tag consists of a key and an optional value. You can associate up to // 50 tags with a private CA. To add one or more tags to a private CA, call -// the TagCertificateAuthority operation. To remove a tag, call the UntagCertificateAuthority -// operation. +// the TagCertificateAuthority action. To remove a tag, call the UntagCertificateAuthority +// action. type Tag struct { _ struct{} `type:"structure"` @@ -4735,7 +4799,7 @@ func (s UpdateCertificateAuthorityOutput) GoString() string { // Length of time for which the certificate issued by your private certificate // authority (CA), or by the private CA itself, is valid in days, months, or -// years. You can issue a certificate by calling the IssueCertificate operation. +// years. You can issue a certificate by calling the IssueCertificate action. type Validity struct { _ struct{} `type:"structure"` @@ -4845,6 +4909,9 @@ const ( ) const ( + // CertificateAuthorityTypeRoot is a CertificateAuthorityType enum value + CertificateAuthorityTypeRoot = "ROOT" + // CertificateAuthorityTypeSubordinate is a CertificateAuthorityType enum value CertificateAuthorityTypeSubordinate = "SUBORDINATE" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/doc.go index 8c9304cd054..ca360e0e476 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/doc.go @@ -3,38 +3,19 @@ // Package acmpca provides the client and types for making API // requests to AWS Certificate Manager Private Certificate Authority. // -// You can use the ACM PCA API to create a private certificate authority (CA). -// You must first call the CreateCertificateAuthority operation. If successful, -// the operation returns an Amazon Resource Name (ARN) for your private CA. -// Use this ARN as input to the GetCertificateAuthorityCsr operation to retrieve -// the certificate signing request (CSR) for your private CA certificate. Sign -// the CSR using the root or an intermediate CA in your on-premises PKI hierarchy, -// and call the ImportCertificateAuthorityCertificate to import your signed -// private CA certificate into ACM PCA. -// -// Use your private CA to issue and revoke certificates. These are private certificates -// that identify and secure client computers, servers, applications, services, -// devices, and users over SSLS/TLS connections within your organization. Call -// the IssueCertificate operation to issue a certificate. Call the RevokeCertificate -// operation to revoke a certificate. -// -// Certificates issued by your private CA can be trusted only within your organization, -// not publicly. -// -// Your private CA can optionally create a certificate revocation list (CRL) -// to track the certificates you revoke. To create a CRL, you must specify a -// RevocationConfiguration object when you call the CreateCertificateAuthority -// operation. ACM PCA writes the CRL to an S3 bucket that you specify. You must -// specify a bucket policy that grants ACM PCA write permission. -// -// You can also call the CreateCertificateAuthorityAuditReport to create an -// optional audit report, which enumerates all of the issued, valid, expired, -// and revoked certificates from the CA. -// -// Each ACM PCA API operation has a throttling limit which determines the number -// of times the operation can be called per second. For more information, see -// API Rate Limits in ACM PCA (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaLimits.html#PcaLimits-api) -// in the ACM PCA user guide. +// This is the ACM Private CA API Reference. It provides descriptions, syntax, +// and usage examples for each of the actions and data types involved in creating +// and managing private certificate authorities (CA) for your organization. +// +// The documentation for each action shows the Query API request parameters +// and the XML response. Alternatively, you can use one of the AWS SDKs to access +// an API that's tailored to the programming language or platform that you're +// using. For more information, see AWS SDKs (https://aws.amazon.com/tools/#SDKs). +// +// Each ACM Private CA API action has a throttling limit which determines the +// number of times the action can be called per second. For more information, +// see API Rate Limits in ACM Private CA (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaLimits.html#PcaLimits-api) +// in the ACM Private CA user guide. // // See https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/errors.go index d982f197471..c9095bbe2af 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/errors.go @@ -39,10 +39,16 @@ const ( // ErrCodeInvalidPolicyException for service response error code // "InvalidPolicyException". // - // The S3 bucket policy is not valid. The policy must give ACM PCA rights to - // read from and write to the bucket and find the bucket location. + // The S3 bucket policy is not valid. The policy must give ACM Private CA rights + // to read from and write to the bucket and find the bucket location. ErrCodeInvalidPolicyException = "InvalidPolicyException" + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // The request action cannot be performed or is prohibited. + ErrCodeInvalidRequestException = "InvalidRequestException" + // ErrCodeInvalidStateException for service response error code // "InvalidStateException". // @@ -60,8 +66,8 @@ const ( // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // - // An ACM PCA limit has been exceeded. See the exception message returned to - // determine the limit that was exceeded. + // An ACM Private CA limit has been exceeded. See the exception message returned + // to determine the limit that was exceeded. ErrCodeLimitExceededException = "LimitExceededException" // ErrCodeMalformedCSRException for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go index 6c231c1d700..c041442ae4d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go @@ -46,11 +46,11 @@ const ( // svc := acmpca.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ACMPCA { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ACMPCA { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ACMPCA { svc := &ACMPCA{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-08-22", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/api.go new file mode 100644 index 00000000000..3766dc2c544 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/api.go @@ -0,0 +1,8124 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package amplify + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opCreateApp = "CreateApp" + +// CreateAppRequest generates a "aws/request.Request" representing the +// client's request for the CreateApp operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateApp for more information on using the CreateApp +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateAppRequest method. +// req, resp := client.CreateAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateApp +func (c *Amplify) CreateAppRequest(input *CreateAppInput) (req *request.Request, output *CreateAppOutput) { + op := &request.Operation{ + Name: opCreateApp, + HTTPMethod: "POST", + HTTPPath: "/apps", + } + + if input == nil { + input = &CreateAppInput{} + } + + output = &CreateAppOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateApp API operation for AWS Amplify. +// +// Creates a new Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation CreateApp for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateApp +func (c *Amplify) CreateApp(input *CreateAppInput) (*CreateAppOutput, error) { + req, out := c.CreateAppRequest(input) + return out, req.Send() +} + +// CreateAppWithContext is the same as CreateApp with the addition of +// the ability to pass a context and additional request options. +// +// See CreateApp for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) CreateAppWithContext(ctx aws.Context, input *CreateAppInput, opts ...request.Option) (*CreateAppOutput, error) { + req, out := c.CreateAppRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBranch = "CreateBranch" + +// CreateBranchRequest generates a "aws/request.Request" representing the +// client's request for the CreateBranch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBranch for more information on using the CreateBranch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBranchRequest method. +// req, resp := client.CreateBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateBranch +func (c *Amplify) CreateBranchRequest(input *CreateBranchInput) (req *request.Request, output *CreateBranchOutput) { + op := &request.Operation{ + Name: opCreateBranch, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}/branches", + } + + if input == nil { + input = &CreateBranchInput{} + } + + output = &CreateBranchOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBranch API operation for AWS Amplify. +// +// Creates a new Branch for an Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation CreateBranch for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateBranch +func (c *Amplify) CreateBranch(input *CreateBranchInput) (*CreateBranchOutput, error) { + req, out := c.CreateBranchRequest(input) + return out, req.Send() +} + +// CreateBranchWithContext is the same as CreateBranch with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBranch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) CreateBranchWithContext(ctx aws.Context, input *CreateBranchInput, opts ...request.Option) (*CreateBranchOutput, error) { + req, out := c.CreateBranchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDeployment = "CreateDeployment" + +// CreateDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeployment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDeployment for more information on using the CreateDeployment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDeploymentRequest method. +// req, resp := client.CreateDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDeployment +func (c *Amplify) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) { + op := &request.Operation{ + Name: opCreateDeployment, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}/branches/{branchName}/deployments", + } + + if input == nil { + input = &CreateDeploymentInput{} + } + + output = &CreateDeploymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDeployment API operation for AWS Amplify. +// +// Create a deployment for manual deploy apps. (Apps are not connected to repository) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation CreateDeployment for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDeployment +func (c *Amplify) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { + req, out := c.CreateDeploymentRequest(input) + return out, req.Send() +} + +// CreateDeploymentWithContext is the same as CreateDeployment with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDeployment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) CreateDeploymentWithContext(ctx aws.Context, input *CreateDeploymentInput, opts ...request.Option) (*CreateDeploymentOutput, error) { + req, out := c.CreateDeploymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDomainAssociation = "CreateDomainAssociation" + +// CreateDomainAssociationRequest generates a "aws/request.Request" representing the +// client's request for the CreateDomainAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDomainAssociation for more information on using the CreateDomainAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDomainAssociationRequest method. +// req, resp := client.CreateDomainAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDomainAssociation +func (c *Amplify) CreateDomainAssociationRequest(input *CreateDomainAssociationInput) (req *request.Request, output *CreateDomainAssociationOutput) { + op := &request.Operation{ + Name: opCreateDomainAssociation, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}/domains", + } + + if input == nil { + input = &CreateDomainAssociationInput{} + } + + output = &CreateDomainAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDomainAssociation API operation for AWS Amplify. +// +// Create a new DomainAssociation on an App +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation CreateDomainAssociation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDomainAssociation +func (c *Amplify) CreateDomainAssociation(input *CreateDomainAssociationInput) (*CreateDomainAssociationOutput, error) { + req, out := c.CreateDomainAssociationRequest(input) + return out, req.Send() +} + +// CreateDomainAssociationWithContext is the same as CreateDomainAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDomainAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) CreateDomainAssociationWithContext(ctx aws.Context, input *CreateDomainAssociationInput, opts ...request.Option) (*CreateDomainAssociationOutput, error) { + req, out := c.CreateDomainAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateWebhook = "CreateWebhook" + +// CreateWebhookRequest generates a "aws/request.Request" representing the +// client's request for the CreateWebhook operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateWebhook for more information on using the CreateWebhook +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateWebhookRequest method. +// req, resp := client.CreateWebhookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateWebhook +func (c *Amplify) CreateWebhookRequest(input *CreateWebhookInput) (req *request.Request, output *CreateWebhookOutput) { + op := &request.Operation{ + Name: opCreateWebhook, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}/webhooks", + } + + if input == nil { + input = &CreateWebhookInput{} + } + + output = &CreateWebhookOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateWebhook API operation for AWS Amplify. +// +// Create a new webhook on an App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation CreateWebhook for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateWebhook +func (c *Amplify) CreateWebhook(input *CreateWebhookInput) (*CreateWebhookOutput, error) { + req, out := c.CreateWebhookRequest(input) + return out, req.Send() +} + +// CreateWebhookWithContext is the same as CreateWebhook with the addition of +// the ability to pass a context and additional request options. +// +// See CreateWebhook for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) CreateWebhookWithContext(ctx aws.Context, input *CreateWebhookInput, opts ...request.Option) (*CreateWebhookOutput, error) { + req, out := c.CreateWebhookRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteApp = "DeleteApp" + +// DeleteAppRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApp operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteApp for more information on using the DeleteApp +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAppRequest method. +// req, resp := client.DeleteAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteApp +func (c *Amplify) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, output *DeleteAppOutput) { + op := &request.Operation{ + Name: opDeleteApp, + HTTPMethod: "DELETE", + HTTPPath: "/apps/{appId}", + } + + if input == nil { + input = &DeleteAppInput{} + } + + output = &DeleteAppOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteApp API operation for AWS Amplify. +// +// Delete an existing Amplify App by appId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation DeleteApp for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteApp +func (c *Amplify) DeleteApp(input *DeleteAppInput) (*DeleteAppOutput, error) { + req, out := c.DeleteAppRequest(input) + return out, req.Send() +} + +// DeleteAppWithContext is the same as DeleteApp with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteApp for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) DeleteAppWithContext(ctx aws.Context, input *DeleteAppInput, opts ...request.Option) (*DeleteAppOutput, error) { + req, out := c.DeleteAppRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBranch = "DeleteBranch" + +// DeleteBranchRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBranch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBranch for more information on using the DeleteBranch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBranchRequest method. +// req, resp := client.DeleteBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteBranch +func (c *Amplify) DeleteBranchRequest(input *DeleteBranchInput) (req *request.Request, output *DeleteBranchOutput) { + op := &request.Operation{ + Name: opDeleteBranch, + HTTPMethod: "DELETE", + HTTPPath: "/apps/{appId}/branches/{branchName}", + } + + if input == nil { + input = &DeleteBranchInput{} + } + + output = &DeleteBranchOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteBranch API operation for AWS Amplify. +// +// Deletes a branch for an Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation DeleteBranch for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteBranch +func (c *Amplify) DeleteBranch(input *DeleteBranchInput) (*DeleteBranchOutput, error) { + req, out := c.DeleteBranchRequest(input) + return out, req.Send() +} + +// DeleteBranchWithContext is the same as DeleteBranch with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBranch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) DeleteBranchWithContext(ctx aws.Context, input *DeleteBranchInput, opts ...request.Option) (*DeleteBranchOutput, error) { + req, out := c.DeleteBranchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDomainAssociation = "DeleteDomainAssociation" + +// DeleteDomainAssociationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDomainAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDomainAssociation for more information on using the DeleteDomainAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDomainAssociationRequest method. +// req, resp := client.DeleteDomainAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteDomainAssociation +func (c *Amplify) DeleteDomainAssociationRequest(input *DeleteDomainAssociationInput) (req *request.Request, output *DeleteDomainAssociationOutput) { + op := &request.Operation{ + Name: opDeleteDomainAssociation, + HTTPMethod: "DELETE", + HTTPPath: "/apps/{appId}/domains/{domainName}", + } + + if input == nil { + input = &DeleteDomainAssociationInput{} + } + + output = &DeleteDomainAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteDomainAssociation API operation for AWS Amplify. +// +// Deletes a DomainAssociation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation DeleteDomainAssociation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteDomainAssociation +func (c *Amplify) DeleteDomainAssociation(input *DeleteDomainAssociationInput) (*DeleteDomainAssociationOutput, error) { + req, out := c.DeleteDomainAssociationRequest(input) + return out, req.Send() +} + +// DeleteDomainAssociationWithContext is the same as DeleteDomainAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDomainAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) DeleteDomainAssociationWithContext(ctx aws.Context, input *DeleteDomainAssociationInput, opts ...request.Option) (*DeleteDomainAssociationOutput, error) { + req, out := c.DeleteDomainAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteJob = "DeleteJob" + +// DeleteJobRequest generates a "aws/request.Request" representing the +// client's request for the DeleteJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteJob for more information on using the DeleteJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteJobRequest method. +// req, resp := client.DeleteJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteJob +func (c *Amplify) DeleteJobRequest(input *DeleteJobInput) (req *request.Request, output *DeleteJobOutput) { + op := &request.Operation{ + Name: opDeleteJob, + HTTPMethod: "DELETE", + HTTPPath: "/apps/{appId}/branches/{branchName}/jobs/{jobId}", + } + + if input == nil { + input = &DeleteJobInput{} + } + + output = &DeleteJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteJob API operation for AWS Amplify. +// +// Delete a job, for an Amplify branch, part of Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation DeleteJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteJob +func (c *Amplify) DeleteJob(input *DeleteJobInput) (*DeleteJobOutput, error) { + req, out := c.DeleteJobRequest(input) + return out, req.Send() +} + +// DeleteJobWithContext is the same as DeleteJob with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) DeleteJobWithContext(ctx aws.Context, input *DeleteJobInput, opts ...request.Option) (*DeleteJobOutput, error) { + req, out := c.DeleteJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteWebhook = "DeleteWebhook" + +// DeleteWebhookRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWebhook operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteWebhook for more information on using the DeleteWebhook +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteWebhookRequest method. +// req, resp := client.DeleteWebhookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteWebhook +func (c *Amplify) DeleteWebhookRequest(input *DeleteWebhookInput) (req *request.Request, output *DeleteWebhookOutput) { + op := &request.Operation{ + Name: opDeleteWebhook, + HTTPMethod: "DELETE", + HTTPPath: "/webhooks/{webhookId}", + } + + if input == nil { + input = &DeleteWebhookInput{} + } + + output = &DeleteWebhookOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteWebhook API operation for AWS Amplify. +// +// Deletes a webhook. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation DeleteWebhook for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteWebhook +func (c *Amplify) DeleteWebhook(input *DeleteWebhookInput) (*DeleteWebhookOutput, error) { + req, out := c.DeleteWebhookRequest(input) + return out, req.Send() +} + +// DeleteWebhookWithContext is the same as DeleteWebhook with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteWebhook for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) DeleteWebhookWithContext(ctx aws.Context, input *DeleteWebhookInput, opts ...request.Option) (*DeleteWebhookOutput, error) { + req, out := c.DeleteWebhookRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGenerateAccessLogs = "GenerateAccessLogs" + +// GenerateAccessLogsRequest generates a "aws/request.Request" representing the +// client's request for the GenerateAccessLogs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GenerateAccessLogs for more information on using the GenerateAccessLogs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GenerateAccessLogsRequest method. +// req, resp := client.GenerateAccessLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GenerateAccessLogs +func (c *Amplify) GenerateAccessLogsRequest(input *GenerateAccessLogsInput) (req *request.Request, output *GenerateAccessLogsOutput) { + op := &request.Operation{ + Name: opGenerateAccessLogs, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}/accesslogs", + } + + if input == nil { + input = &GenerateAccessLogsInput{} + } + + output = &GenerateAccessLogsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GenerateAccessLogs API operation for AWS Amplify. +// +// Retrieve website access logs for a specific time range via a pre-signed URL. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation GenerateAccessLogs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GenerateAccessLogs +func (c *Amplify) GenerateAccessLogs(input *GenerateAccessLogsInput) (*GenerateAccessLogsOutput, error) { + req, out := c.GenerateAccessLogsRequest(input) + return out, req.Send() +} + +// GenerateAccessLogsWithContext is the same as GenerateAccessLogs with the addition of +// the ability to pass a context and additional request options. +// +// See GenerateAccessLogs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) GenerateAccessLogsWithContext(ctx aws.Context, input *GenerateAccessLogsInput, opts ...request.Option) (*GenerateAccessLogsOutput, error) { + req, out := c.GenerateAccessLogsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetApp = "GetApp" + +// GetAppRequest generates a "aws/request.Request" representing the +// client's request for the GetApp operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetApp for more information on using the GetApp +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAppRequest method. +// req, resp := client.GetAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetApp +func (c *Amplify) GetAppRequest(input *GetAppInput) (req *request.Request, output *GetAppOutput) { + op := &request.Operation{ + Name: opGetApp, + HTTPMethod: "GET", + HTTPPath: "/apps/{appId}", + } + + if input == nil { + input = &GetAppInput{} + } + + output = &GetAppOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetApp API operation for AWS Amplify. +// +// Retrieves an existing Amplify App by appId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation GetApp for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetApp +func (c *Amplify) GetApp(input *GetAppInput) (*GetAppOutput, error) { + req, out := c.GetAppRequest(input) + return out, req.Send() +} + +// GetAppWithContext is the same as GetApp with the addition of +// the ability to pass a context and additional request options. +// +// See GetApp for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) GetAppWithContext(ctx aws.Context, input *GetAppInput, opts ...request.Option) (*GetAppOutput, error) { + req, out := c.GetAppRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetArtifactUrl = "GetArtifactUrl" + +// GetArtifactUrlRequest generates a "aws/request.Request" representing the +// client's request for the GetArtifactUrl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetArtifactUrl for more information on using the GetArtifactUrl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetArtifactUrlRequest method. +// req, resp := client.GetArtifactUrlRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetArtifactUrl +func (c *Amplify) GetArtifactUrlRequest(input *GetArtifactUrlInput) (req *request.Request, output *GetArtifactUrlOutput) { + op := &request.Operation{ + Name: opGetArtifactUrl, + HTTPMethod: "GET", + HTTPPath: "/artifacts/{artifactId}", + } + + if input == nil { + input = &GetArtifactUrlInput{} + } + + output = &GetArtifactUrlOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetArtifactUrl API operation for AWS Amplify. +// +// Retrieves artifact info that corresponds to a artifactId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation GetArtifactUrl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetArtifactUrl +func (c *Amplify) GetArtifactUrl(input *GetArtifactUrlInput) (*GetArtifactUrlOutput, error) { + req, out := c.GetArtifactUrlRequest(input) + return out, req.Send() +} + +// GetArtifactUrlWithContext is the same as GetArtifactUrl with the addition of +// the ability to pass a context and additional request options. +// +// See GetArtifactUrl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) GetArtifactUrlWithContext(ctx aws.Context, input *GetArtifactUrlInput, opts ...request.Option) (*GetArtifactUrlOutput, error) { + req, out := c.GetArtifactUrlRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBranch = "GetBranch" + +// GetBranchRequest generates a "aws/request.Request" representing the +// client's request for the GetBranch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBranch for more information on using the GetBranch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBranchRequest method. +// req, resp := client.GetBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetBranch +func (c *Amplify) GetBranchRequest(input *GetBranchInput) (req *request.Request, output *GetBranchOutput) { + op := &request.Operation{ + Name: opGetBranch, + HTTPMethod: "GET", + HTTPPath: "/apps/{appId}/branches/{branchName}", + } + + if input == nil { + input = &GetBranchInput{} + } + + output = &GetBranchOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBranch API operation for AWS Amplify. +// +// Retrieves a branch for an Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation GetBranch for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetBranch +func (c *Amplify) GetBranch(input *GetBranchInput) (*GetBranchOutput, error) { + req, out := c.GetBranchRequest(input) + return out, req.Send() +} + +// GetBranchWithContext is the same as GetBranch with the addition of +// the ability to pass a context and additional request options. +// +// See GetBranch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) GetBranchWithContext(ctx aws.Context, input *GetBranchInput, opts ...request.Option) (*GetBranchOutput, error) { + req, out := c.GetBranchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDomainAssociation = "GetDomainAssociation" + +// GetDomainAssociationRequest generates a "aws/request.Request" representing the +// client's request for the GetDomainAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDomainAssociation for more information on using the GetDomainAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDomainAssociationRequest method. +// req, resp := client.GetDomainAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetDomainAssociation +func (c *Amplify) GetDomainAssociationRequest(input *GetDomainAssociationInput) (req *request.Request, output *GetDomainAssociationOutput) { + op := &request.Operation{ + Name: opGetDomainAssociation, + HTTPMethod: "GET", + HTTPPath: "/apps/{appId}/domains/{domainName}", + } + + if input == nil { + input = &GetDomainAssociationInput{} + } + + output = &GetDomainAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDomainAssociation API operation for AWS Amplify. +// +// Retrieves domain info that corresponds to an appId and domainName. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation GetDomainAssociation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetDomainAssociation +func (c *Amplify) GetDomainAssociation(input *GetDomainAssociationInput) (*GetDomainAssociationOutput, error) { + req, out := c.GetDomainAssociationRequest(input) + return out, req.Send() +} + +// GetDomainAssociationWithContext is the same as GetDomainAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See GetDomainAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) GetDomainAssociationWithContext(ctx aws.Context, input *GetDomainAssociationInput, opts ...request.Option) (*GetDomainAssociationOutput, error) { + req, out := c.GetDomainAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetJob = "GetJob" + +// GetJobRequest generates a "aws/request.Request" representing the +// client's request for the GetJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetJob for more information on using the GetJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetJobRequest method. +// req, resp := client.GetJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetJob +func (c *Amplify) GetJobRequest(input *GetJobInput) (req *request.Request, output *GetJobOutput) { + op := &request.Operation{ + Name: opGetJob, + HTTPMethod: "GET", + HTTPPath: "/apps/{appId}/branches/{branchName}/jobs/{jobId}", + } + + if input == nil { + input = &GetJobInput{} + } + + output = &GetJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetJob API operation for AWS Amplify. +// +// Get a job for a branch, part of an Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation GetJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetJob +func (c *Amplify) GetJob(input *GetJobInput) (*GetJobOutput, error) { + req, out := c.GetJobRequest(input) + return out, req.Send() +} + +// GetJobWithContext is the same as GetJob with the addition of +// the ability to pass a context and additional request options. +// +// See GetJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) GetJobWithContext(ctx aws.Context, input *GetJobInput, opts ...request.Option) (*GetJobOutput, error) { + req, out := c.GetJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetWebhook = "GetWebhook" + +// GetWebhookRequest generates a "aws/request.Request" representing the +// client's request for the GetWebhook operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetWebhook for more information on using the GetWebhook +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetWebhookRequest method. +// req, resp := client.GetWebhookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetWebhook +func (c *Amplify) GetWebhookRequest(input *GetWebhookInput) (req *request.Request, output *GetWebhookOutput) { + op := &request.Operation{ + Name: opGetWebhook, + HTTPMethod: "GET", + HTTPPath: "/webhooks/{webhookId}", + } + + if input == nil { + input = &GetWebhookInput{} + } + + output = &GetWebhookOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetWebhook API operation for AWS Amplify. +// +// Retrieves webhook info that corresponds to a webhookId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation GetWebhook for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetWebhook +func (c *Amplify) GetWebhook(input *GetWebhookInput) (*GetWebhookOutput, error) { + req, out := c.GetWebhookRequest(input) + return out, req.Send() +} + +// GetWebhookWithContext is the same as GetWebhook with the addition of +// the ability to pass a context and additional request options. +// +// See GetWebhook for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) GetWebhookWithContext(ctx aws.Context, input *GetWebhookInput, opts ...request.Option) (*GetWebhookOutput, error) { + req, out := c.GetWebhookRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListApps = "ListApps" + +// ListAppsRequest generates a "aws/request.Request" representing the +// client's request for the ListApps operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListApps for more information on using the ListApps +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAppsRequest method. +// req, resp := client.ListAppsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListApps +func (c *Amplify) ListAppsRequest(input *ListAppsInput) (req *request.Request, output *ListAppsOutput) { + op := &request.Operation{ + Name: opListApps, + HTTPMethod: "GET", + HTTPPath: "/apps", + } + + if input == nil { + input = &ListAppsInput{} + } + + output = &ListAppsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListApps API operation for AWS Amplify. +// +// Lists existing Amplify Apps. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation ListApps for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListApps +func (c *Amplify) ListApps(input *ListAppsInput) (*ListAppsOutput, error) { + req, out := c.ListAppsRequest(input) + return out, req.Send() +} + +// ListAppsWithContext is the same as ListApps with the addition of +// the ability to pass a context and additional request options. +// +// See ListApps for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) ListAppsWithContext(ctx aws.Context, input *ListAppsInput, opts ...request.Option) (*ListAppsOutput, error) { + req, out := c.ListAppsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListArtifacts = "ListArtifacts" + +// ListArtifactsRequest generates a "aws/request.Request" representing the +// client's request for the ListArtifacts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListArtifacts for more information on using the ListArtifacts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListArtifactsRequest method. +// req, resp := client.ListArtifactsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListArtifacts +func (c *Amplify) ListArtifactsRequest(input *ListArtifactsInput) (req *request.Request, output *ListArtifactsOutput) { + op := &request.Operation{ + Name: opListArtifacts, + HTTPMethod: "GET", + HTTPPath: "/apps/{appId}/branches/{branchName}/jobs/{jobId}/artifacts", + } + + if input == nil { + input = &ListArtifactsInput{} + } + + output = &ListArtifactsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListArtifacts API operation for AWS Amplify. +// +// List artifacts with an app, a branch, a job and an artifact type. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation ListArtifacts for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListArtifacts +func (c *Amplify) ListArtifacts(input *ListArtifactsInput) (*ListArtifactsOutput, error) { + req, out := c.ListArtifactsRequest(input) + return out, req.Send() +} + +// ListArtifactsWithContext is the same as ListArtifacts with the addition of +// the ability to pass a context and additional request options. +// +// See ListArtifacts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) ListArtifactsWithContext(ctx aws.Context, input *ListArtifactsInput, opts ...request.Option) (*ListArtifactsOutput, error) { + req, out := c.ListArtifactsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBranches = "ListBranches" + +// ListBranchesRequest generates a "aws/request.Request" representing the +// client's request for the ListBranches operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBranches for more information on using the ListBranches +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBranchesRequest method. +// req, resp := client.ListBranchesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListBranches +func (c *Amplify) ListBranchesRequest(input *ListBranchesInput) (req *request.Request, output *ListBranchesOutput) { + op := &request.Operation{ + Name: opListBranches, + HTTPMethod: "GET", + HTTPPath: "/apps/{appId}/branches", + } + + if input == nil { + input = &ListBranchesInput{} + } + + output = &ListBranchesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBranches API operation for AWS Amplify. +// +// Lists branches for an Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation ListBranches for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListBranches +func (c *Amplify) ListBranches(input *ListBranchesInput) (*ListBranchesOutput, error) { + req, out := c.ListBranchesRequest(input) + return out, req.Send() +} + +// ListBranchesWithContext is the same as ListBranches with the addition of +// the ability to pass a context and additional request options. +// +// See ListBranches for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) ListBranchesWithContext(ctx aws.Context, input *ListBranchesInput, opts ...request.Option) (*ListBranchesOutput, error) { + req, out := c.ListBranchesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDomainAssociations = "ListDomainAssociations" + +// ListDomainAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the ListDomainAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDomainAssociations for more information on using the ListDomainAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDomainAssociationsRequest method. +// req, resp := client.ListDomainAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListDomainAssociations +func (c *Amplify) ListDomainAssociationsRequest(input *ListDomainAssociationsInput) (req *request.Request, output *ListDomainAssociationsOutput) { + op := &request.Operation{ + Name: opListDomainAssociations, + HTTPMethod: "GET", + HTTPPath: "/apps/{appId}/domains", + } + + if input == nil { + input = &ListDomainAssociationsInput{} + } + + output = &ListDomainAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDomainAssociations API operation for AWS Amplify. +// +// List domains with an app +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation ListDomainAssociations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListDomainAssociations +func (c *Amplify) ListDomainAssociations(input *ListDomainAssociationsInput) (*ListDomainAssociationsOutput, error) { + req, out := c.ListDomainAssociationsRequest(input) + return out, req.Send() +} + +// ListDomainAssociationsWithContext is the same as ListDomainAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See ListDomainAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) ListDomainAssociationsWithContext(ctx aws.Context, input *ListDomainAssociationsInput, opts ...request.Option) (*ListDomainAssociationsOutput, error) { + req, out := c.ListDomainAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListJobs = "ListJobs" + +// ListJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListJobs for more information on using the ListJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListJobsRequest method. +// req, resp := client.ListJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListJobs +func (c *Amplify) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { + op := &request.Operation{ + Name: opListJobs, + HTTPMethod: "GET", + HTTPPath: "/apps/{appId}/branches/{branchName}/jobs", + } + + if input == nil { + input = &ListJobsInput{} + } + + output = &ListJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListJobs API operation for AWS Amplify. +// +// List Jobs for a branch, part of an Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation ListJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListJobs +func (c *Amplify) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) + return out, req.Send() +} + +// ListJobsWithContext is the same as ListJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListTagsForResource +func (c *Amplify) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS Amplify. +// +// List tags for resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Exception thrown when an operation fails due to non-existent resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListTagsForResource +func (c *Amplify) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListWebhooks = "ListWebhooks" + +// ListWebhooksRequest generates a "aws/request.Request" representing the +// client's request for the ListWebhooks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListWebhooks for more information on using the ListWebhooks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListWebhooksRequest method. +// req, resp := client.ListWebhooksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListWebhooks +func (c *Amplify) ListWebhooksRequest(input *ListWebhooksInput) (req *request.Request, output *ListWebhooksOutput) { + op := &request.Operation{ + Name: opListWebhooks, + HTTPMethod: "GET", + HTTPPath: "/apps/{appId}/webhooks", + } + + if input == nil { + input = &ListWebhooksInput{} + } + + output = &ListWebhooksOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListWebhooks API operation for AWS Amplify. +// +// List webhooks with an app. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation ListWebhooks for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListWebhooks +func (c *Amplify) ListWebhooks(input *ListWebhooksInput) (*ListWebhooksOutput, error) { + req, out := c.ListWebhooksRequest(input) + return out, req.Send() +} + +// ListWebhooksWithContext is the same as ListWebhooks with the addition of +// the ability to pass a context and additional request options. +// +// See ListWebhooks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) ListWebhooksWithContext(ctx aws.Context, input *ListWebhooksInput, opts ...request.Option) (*ListWebhooksOutput, error) { + req, out := c.ListWebhooksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartDeployment = "StartDeployment" + +// StartDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the StartDeployment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDeployment for more information on using the StartDeployment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartDeploymentRequest method. +// req, resp := client.StartDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartDeployment +func (c *Amplify) StartDeploymentRequest(input *StartDeploymentInput) (req *request.Request, output *StartDeploymentOutput) { + op := &request.Operation{ + Name: opStartDeployment, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}/branches/{branchName}/deployments/start", + } + + if input == nil { + input = &StartDeploymentInput{} + } + + output = &StartDeploymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartDeployment API operation for AWS Amplify. +// +// Start a deployment for manual deploy apps. (Apps are not connected to repository) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation StartDeployment for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartDeployment +func (c *Amplify) StartDeployment(input *StartDeploymentInput) (*StartDeploymentOutput, error) { + req, out := c.StartDeploymentRequest(input) + return out, req.Send() +} + +// StartDeploymentWithContext is the same as StartDeployment with the addition of +// the ability to pass a context and additional request options. +// +// See StartDeployment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) StartDeploymentWithContext(ctx aws.Context, input *StartDeploymentInput, opts ...request.Option) (*StartDeploymentOutput, error) { + req, out := c.StartDeploymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartJob = "StartJob" + +// StartJobRequest generates a "aws/request.Request" representing the +// client's request for the StartJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartJob for more information on using the StartJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartJobRequest method. +// req, resp := client.StartJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartJob +func (c *Amplify) StartJobRequest(input *StartJobInput) (req *request.Request, output *StartJobOutput) { + op := &request.Operation{ + Name: opStartJob, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}/branches/{branchName}/jobs", + } + + if input == nil { + input = &StartJobInput{} + } + + output = &StartJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartJob API operation for AWS Amplify. +// +// Starts a new job for a branch, part of an Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation StartJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartJob +func (c *Amplify) StartJob(input *StartJobInput) (*StartJobOutput, error) { + req, out := c.StartJobRequest(input) + return out, req.Send() +} + +// StartJobWithContext is the same as StartJob with the addition of +// the ability to pass a context and additional request options. +// +// See StartJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) StartJobWithContext(ctx aws.Context, input *StartJobInput, opts ...request.Option) (*StartJobOutput, error) { + req, out := c.StartJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopJob = "StopJob" + +// StopJobRequest generates a "aws/request.Request" representing the +// client's request for the StopJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopJob for more information on using the StopJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopJobRequest method. +// req, resp := client.StopJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StopJob +func (c *Amplify) StopJobRequest(input *StopJobInput) (req *request.Request, output *StopJobOutput) { + op := &request.Operation{ + Name: opStopJob, + HTTPMethod: "DELETE", + HTTPPath: "/apps/{appId}/branches/{branchName}/jobs/{jobId}/stop", + } + + if input == nil { + input = &StopJobInput{} + } + + output = &StopJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopJob API operation for AWS Amplify. +// +// Stop a job that is in progress, for an Amplify branch, part of Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation StopJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Exception thrown when a resource could not be created because of service +// limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StopJob +func (c *Amplify) StopJob(input *StopJobInput) (*StopJobOutput, error) { + req, out := c.StopJobRequest(input) + return out, req.Send() +} + +// StopJobWithContext is the same as StopJob with the addition of +// the ability to pass a context and additional request options. +// +// See StopJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) StopJobWithContext(ctx aws.Context, input *StopJobInput, opts ...request.Option) (*StopJobOutput, error) { + req, out := c.StopJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/TagResource +func (c *Amplify) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Amplify. +// +// Tag resource with tag key and value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Exception thrown when an operation fails due to non-existent resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/TagResource +func (c *Amplify) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UntagResource +func (c *Amplify) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Amplify. +// +// Untag resource with resourceArn. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Exception thrown when an operation fails due to non-existent resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UntagResource +func (c *Amplify) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateApp = "UpdateApp" + +// UpdateAppRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApp operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateApp for more information on using the UpdateApp +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAppRequest method. +// req, resp := client.UpdateAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateApp +func (c *Amplify) UpdateAppRequest(input *UpdateAppInput) (req *request.Request, output *UpdateAppOutput) { + op := &request.Operation{ + Name: opUpdateApp, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}", + } + + if input == nil { + input = &UpdateAppInput{} + } + + output = &UpdateAppOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateApp API operation for AWS Amplify. +// +// Updates an existing Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation UpdateApp for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateApp +func (c *Amplify) UpdateApp(input *UpdateAppInput) (*UpdateAppOutput, error) { + req, out := c.UpdateAppRequest(input) + return out, req.Send() +} + +// UpdateAppWithContext is the same as UpdateApp with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateApp for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) UpdateAppWithContext(ctx aws.Context, input *UpdateAppInput, opts ...request.Option) (*UpdateAppOutput, error) { + req, out := c.UpdateAppRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateBranch = "UpdateBranch" + +// UpdateBranchRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBranch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBranch for more information on using the UpdateBranch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBranchRequest method. +// req, resp := client.UpdateBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateBranch +func (c *Amplify) UpdateBranchRequest(input *UpdateBranchInput) (req *request.Request, output *UpdateBranchOutput) { + op := &request.Operation{ + Name: opUpdateBranch, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}/branches/{branchName}", + } + + if input == nil { + input = &UpdateBranchInput{} + } + + output = &UpdateBranchOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBranch API operation for AWS Amplify. +// +// Updates a branch for an Amplify App. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation UpdateBranch for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateBranch +func (c *Amplify) UpdateBranch(input *UpdateBranchInput) (*UpdateBranchOutput, error) { + req, out := c.UpdateBranchRequest(input) + return out, req.Send() +} + +// UpdateBranchWithContext is the same as UpdateBranch with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBranch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) UpdateBranchWithContext(ctx aws.Context, input *UpdateBranchInput, opts ...request.Option) (*UpdateBranchOutput, error) { + req, out := c.UpdateBranchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDomainAssociation = "UpdateDomainAssociation" + +// UpdateDomainAssociationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDomainAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDomainAssociation for more information on using the UpdateDomainAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDomainAssociationRequest method. +// req, resp := client.UpdateDomainAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateDomainAssociation +func (c *Amplify) UpdateDomainAssociationRequest(input *UpdateDomainAssociationInput) (req *request.Request, output *UpdateDomainAssociationOutput) { + op := &request.Operation{ + Name: opUpdateDomainAssociation, + HTTPMethod: "POST", + HTTPPath: "/apps/{appId}/domains/{domainName}", + } + + if input == nil { + input = &UpdateDomainAssociationInput{} + } + + output = &UpdateDomainAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDomainAssociation API operation for AWS Amplify. +// +// Create a new DomainAssociation on an App +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation UpdateDomainAssociation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateDomainAssociation +func (c *Amplify) UpdateDomainAssociation(input *UpdateDomainAssociationInput) (*UpdateDomainAssociationOutput, error) { + req, out := c.UpdateDomainAssociationRequest(input) + return out, req.Send() +} + +// UpdateDomainAssociationWithContext is the same as UpdateDomainAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDomainAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) UpdateDomainAssociationWithContext(ctx aws.Context, input *UpdateDomainAssociationInput, opts ...request.Option) (*UpdateDomainAssociationOutput, error) { + req, out := c.UpdateDomainAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateWebhook = "UpdateWebhook" + +// UpdateWebhookRequest generates a "aws/request.Request" representing the +// client's request for the UpdateWebhook operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateWebhook for more information on using the UpdateWebhook +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateWebhookRequest method. +// req, resp := client.UpdateWebhookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateWebhook +func (c *Amplify) UpdateWebhookRequest(input *UpdateWebhookInput) (req *request.Request, output *UpdateWebhookOutput) { + op := &request.Operation{ + Name: opUpdateWebhook, + HTTPMethod: "POST", + HTTPPath: "/webhooks/{webhookId}", + } + + if input == nil { + input = &UpdateWebhookInput{} + } + + output = &UpdateWebhookOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateWebhook API operation for AWS Amplify. +// +// Update a webhook. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Amplify's +// API operation UpdateWebhook for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Exception thrown when a request contains unexpected data. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Exception thrown when an operation fails due to a lack of access. +// +// * ErrCodeNotFoundException "NotFoundException" +// Exception thrown when an entity has not been found during an operation. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// Exception thrown when the service fails to perform an operation due to an +// internal issue. +// +// * ErrCodeDependentServiceFailureException "DependentServiceFailureException" +// Exception thrown when an operation fails due to a dependent service throwing +// an exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateWebhook +func (c *Amplify) UpdateWebhook(input *UpdateWebhookInput) (*UpdateWebhookOutput, error) { + req, out := c.UpdateWebhookRequest(input) + return out, req.Send() +} + +// UpdateWebhookWithContext is the same as UpdateWebhook with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateWebhook for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Amplify) UpdateWebhookWithContext(ctx aws.Context, input *UpdateWebhookInput, opts ...request.Option) (*UpdateWebhookOutput, error) { + req, out := c.UpdateWebhookRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Amplify App represents different branches of a repository for building, deploying, +// and hosting. +type App struct { + _ struct{} `type:"structure"` + + // ARN for the Amplify App. + // + // AppArn is a required field + AppArn *string `locationName:"appArn" type:"string" required:"true"` + + // Unique Id for the Amplify App. + // + // AppId is a required field + AppId *string `locationName:"appId" min:"1" type:"string" required:"true"` + + // Automated branch creation config for the Amplify App. + AutoBranchCreationConfig *AutoBranchCreationConfig `locationName:"autoBranchCreationConfig" type:"structure"` + + // Automated branch creation glob patterns for the Amplify App. + AutoBranchCreationPatterns []*string `locationName:"autoBranchCreationPatterns" type:"list"` + + // Basic Authorization credentials for branches for the Amplify App. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + + // BuildSpec content for Amplify App. + BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` + + // Create date / time for the Amplify App. + // + // CreateTime is a required field + CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"` + + // Custom redirect / rewrite rules for the Amplify App. + CustomRules []*CustomRule `locationName:"customRules" type:"list"` + + // Default domain for the Amplify App. + // + // DefaultDomain is a required field + DefaultDomain *string `locationName:"defaultDomain" min:"1" type:"string" required:"true"` + + // Description for the Amplify App. + // + // Description is a required field + Description *string `locationName:"description" type:"string" required:"true"` + + // Enables automated branch creation for the Amplify App. + EnableAutoBranchCreation *bool `locationName:"enableAutoBranchCreation" type:"boolean"` + + // Enables Basic Authorization for branches for the Amplify App. + // + // EnableBasicAuth is a required field + EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean" required:"true"` + + // Enables auto-building of branches for the Amplify App. + // + // EnableBranchAutoBuild is a required field + EnableBranchAutoBuild *bool `locationName:"enableBranchAutoBuild" type:"boolean" required:"true"` + + // Environment Variables for the Amplify App. + // + // EnvironmentVariables is a required field + EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map" required:"true"` + + // IAM service role ARN for the Amplify App. + IamServiceRoleArn *string `locationName:"iamServiceRoleArn" min:"1" type:"string"` + + // Name for the Amplify App. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Platform for the Amplify App. + // + // Platform is a required field + Platform *string `locationName:"platform" type:"string" required:"true" enum:"Platform"` + + // Structure with Production Branch information. + ProductionBranch *ProductionBranch `locationName:"productionBranch" type:"structure"` + + // Repository for the Amplify App. + // + // Repository is a required field + Repository *string `locationName:"repository" type:"string" required:"true"` + + // Tag for Amplify App. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + + // Update date / time for the Amplify App. + // + // UpdateTime is a required field + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s App) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s App) GoString() string { + return s.String() +} + +// SetAppArn sets the AppArn field's value. +func (s *App) SetAppArn(v string) *App { + s.AppArn = &v + return s +} + +// SetAppId sets the AppId field's value. +func (s *App) SetAppId(v string) *App { + s.AppId = &v + return s +} + +// SetAutoBranchCreationConfig sets the AutoBranchCreationConfig field's value. +func (s *App) SetAutoBranchCreationConfig(v *AutoBranchCreationConfig) *App { + s.AutoBranchCreationConfig = v + return s +} + +// SetAutoBranchCreationPatterns sets the AutoBranchCreationPatterns field's value. +func (s *App) SetAutoBranchCreationPatterns(v []*string) *App { + s.AutoBranchCreationPatterns = v + return s +} + +// SetBasicAuthCredentials sets the BasicAuthCredentials field's value. +func (s *App) SetBasicAuthCredentials(v string) *App { + s.BasicAuthCredentials = &v + return s +} + +// SetBuildSpec sets the BuildSpec field's value. +func (s *App) SetBuildSpec(v string) *App { + s.BuildSpec = &v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *App) SetCreateTime(v time.Time) *App { + s.CreateTime = &v + return s +} + +// SetCustomRules sets the CustomRules field's value. +func (s *App) SetCustomRules(v []*CustomRule) *App { + s.CustomRules = v + return s +} + +// SetDefaultDomain sets the DefaultDomain field's value. +func (s *App) SetDefaultDomain(v string) *App { + s.DefaultDomain = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *App) SetDescription(v string) *App { + s.Description = &v + return s +} + +// SetEnableAutoBranchCreation sets the EnableAutoBranchCreation field's value. +func (s *App) SetEnableAutoBranchCreation(v bool) *App { + s.EnableAutoBranchCreation = &v + return s +} + +// SetEnableBasicAuth sets the EnableBasicAuth field's value. +func (s *App) SetEnableBasicAuth(v bool) *App { + s.EnableBasicAuth = &v + return s +} + +// SetEnableBranchAutoBuild sets the EnableBranchAutoBuild field's value. +func (s *App) SetEnableBranchAutoBuild(v bool) *App { + s.EnableBranchAutoBuild = &v + return s +} + +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *App) SetEnvironmentVariables(v map[string]*string) *App { + s.EnvironmentVariables = v + return s +} + +// SetIamServiceRoleArn sets the IamServiceRoleArn field's value. +func (s *App) SetIamServiceRoleArn(v string) *App { + s.IamServiceRoleArn = &v + return s +} + +// SetName sets the Name field's value. +func (s *App) SetName(v string) *App { + s.Name = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *App) SetPlatform(v string) *App { + s.Platform = &v + return s +} + +// SetProductionBranch sets the ProductionBranch field's value. +func (s *App) SetProductionBranch(v *ProductionBranch) *App { + s.ProductionBranch = v + return s +} + +// SetRepository sets the Repository field's value. +func (s *App) SetRepository(v string) *App { + s.Repository = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *App) SetTags(v map[string]*string) *App { + s.Tags = v + return s +} + +// SetUpdateTime sets the UpdateTime field's value. +func (s *App) SetUpdateTime(v time.Time) *App { + s.UpdateTime = &v + return s +} + +// Structure for artifact. +type Artifact struct { + _ struct{} `type:"structure"` + + // File name for the artifact. + // + // ArtifactFileName is a required field + ArtifactFileName *string `locationName:"artifactFileName" type:"string" required:"true"` + + // Unique Id for a artifact. + // + // ArtifactId is a required field + ArtifactId *string `locationName:"artifactId" type:"string" required:"true"` +} + +// String returns the string representation +func (s Artifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Artifact) GoString() string { + return s.String() +} + +// SetArtifactFileName sets the ArtifactFileName field's value. +func (s *Artifact) SetArtifactFileName(v string) *Artifact { + s.ArtifactFileName = &v + return s +} + +// SetArtifactId sets the ArtifactId field's value. +func (s *Artifact) SetArtifactId(v string) *Artifact { + s.ArtifactId = &v + return s +} + +// Structure with auto branch creation config. +type AutoBranchCreationConfig struct { + _ struct{} `type:"structure"` + + // Basic Authorization credentials for the auto created branch. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + + // BuildSpec for the auto created branch. + BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` + + // Enables auto building for the auto created branch. + EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean"` + + // Enables Basic Auth for the auto created branch. + EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` + + // Enables Pull Request Preview for auto created branch. + EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean"` + + // Environment Variables for the auto created branch. + EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` + + // Framework for the auto created branch. + Framework *string `locationName:"framework" type:"string"` + + // The Amplify Environment name for the pull request. + PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"` + + // Stage for the auto created branch. + Stage *string `locationName:"stage" type:"string" enum:"Stage"` +} + +// String returns the string representation +func (s AutoBranchCreationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoBranchCreationConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoBranchCreationConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoBranchCreationConfig"} + if s.BuildSpec != nil && len(*s.BuildSpec) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBasicAuthCredentials sets the BasicAuthCredentials field's value. +func (s *AutoBranchCreationConfig) SetBasicAuthCredentials(v string) *AutoBranchCreationConfig { + s.BasicAuthCredentials = &v + return s +} + +// SetBuildSpec sets the BuildSpec field's value. +func (s *AutoBranchCreationConfig) SetBuildSpec(v string) *AutoBranchCreationConfig { + s.BuildSpec = &v + return s +} + +// SetEnableAutoBuild sets the EnableAutoBuild field's value. +func (s *AutoBranchCreationConfig) SetEnableAutoBuild(v bool) *AutoBranchCreationConfig { + s.EnableAutoBuild = &v + return s +} + +// SetEnableBasicAuth sets the EnableBasicAuth field's value. +func (s *AutoBranchCreationConfig) SetEnableBasicAuth(v bool) *AutoBranchCreationConfig { + s.EnableBasicAuth = &v + return s +} + +// SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value. +func (s *AutoBranchCreationConfig) SetEnablePullRequestPreview(v bool) *AutoBranchCreationConfig { + s.EnablePullRequestPreview = &v + return s +} + +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *AutoBranchCreationConfig) SetEnvironmentVariables(v map[string]*string) *AutoBranchCreationConfig { + s.EnvironmentVariables = v + return s +} + +// SetFramework sets the Framework field's value. +func (s *AutoBranchCreationConfig) SetFramework(v string) *AutoBranchCreationConfig { + s.Framework = &v + return s +} + +// SetPullRequestEnvironmentName sets the PullRequestEnvironmentName field's value. +func (s *AutoBranchCreationConfig) SetPullRequestEnvironmentName(v string) *AutoBranchCreationConfig { + s.PullRequestEnvironmentName = &v + return s +} + +// SetStage sets the Stage field's value. +func (s *AutoBranchCreationConfig) SetStage(v string) *AutoBranchCreationConfig { + s.Stage = &v + return s +} + +// Branch for an Amplify App, which maps to a 3rd party repository branch. +type Branch struct { + _ struct{} `type:"structure"` + + // Id of the active job for a branch, part of an Amplify App. + // + // ActiveJobId is a required field + ActiveJobId *string `locationName:"activeJobId" type:"string" required:"true"` + + // List of custom resources that are linked to this branch. + AssociatedResources []*string `locationName:"associatedResources" type:"list"` + + // ARN for a Backend Environment, part of an Amplify App. + BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string"` + + // Basic Authorization credentials for a branch, part of an Amplify App. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + + // ARN for a branch, part of an Amplify App. + // + // BranchArn is a required field + BranchArn *string `locationName:"branchArn" type:"string" required:"true"` + + // Name for a branch, part of an Amplify App. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // BuildSpec content for branch for Amplify App. + BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` + + // Creation date and time for a branch, part of an Amplify App. + // + // CreateTime is a required field + CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"` + + // Custom domains for a branch, part of an Amplify App. + // + // CustomDomains is a required field + CustomDomains []*string `locationName:"customDomains" type:"list" required:"true"` + + // Description for a branch, part of an Amplify App. + // + // Description is a required field + Description *string `locationName:"description" type:"string" required:"true"` + + // The destination branch if the branch is a pull request branch. + DestinationBranch *string `locationName:"destinationBranch" min:"1" type:"string"` + + // Display name for a branch, will use as the default domain prefix. + // + // DisplayName is a required field + DisplayName *string `locationName:"displayName" type:"string" required:"true"` + + // Enables auto-building on push for a branch, part of an Amplify App. + // + // EnableAutoBuild is a required field + EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean" required:"true"` + + // Enables Basic Authorization for a branch, part of an Amplify App. + // + // EnableBasicAuth is a required field + EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean" required:"true"` + + // Enables notifications for a branch, part of an Amplify App. + // + // EnableNotification is a required field + EnableNotification *bool `locationName:"enableNotification" type:"boolean" required:"true"` + + // Enables Pull Request Preview for this branch. + // + // EnablePullRequestPreview is a required field + EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean" required:"true"` + + // Environment Variables specific to a branch, part of an Amplify App. + // + // EnvironmentVariables is a required field + EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map" required:"true"` + + // Framework for a branch, part of an Amplify App. + // + // Framework is a required field + Framework *string `locationName:"framework" type:"string" required:"true"` + + // The Amplify Environment name for the pull request. + PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"` + + // The source branch if the branch is a pull request branch. + SourceBranch *string `locationName:"sourceBranch" min:"1" type:"string"` + + // Stage for a branch, part of an Amplify App. + // + // Stage is a required field + Stage *string `locationName:"stage" type:"string" required:"true" enum:"Stage"` + + // Tag for branch for Amplify App. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + + // Thumbnail URL for the branch. + ThumbnailUrl *string `locationName:"thumbnailUrl" min:"1" type:"string"` + + // Total number of Jobs part of an Amplify App. + // + // TotalNumberOfJobs is a required field + TotalNumberOfJobs *string `locationName:"totalNumberOfJobs" type:"string" required:"true"` + + // The content TTL for the website in seconds. + // + // Ttl is a required field + Ttl *string `locationName:"ttl" type:"string" required:"true"` + + // Last updated date and time for a branch, part of an Amplify App. + // + // UpdateTime is a required field + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s Branch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Branch) GoString() string { + return s.String() +} + +// SetActiveJobId sets the ActiveJobId field's value. +func (s *Branch) SetActiveJobId(v string) *Branch { + s.ActiveJobId = &v + return s +} + +// SetAssociatedResources sets the AssociatedResources field's value. +func (s *Branch) SetAssociatedResources(v []*string) *Branch { + s.AssociatedResources = v + return s +} + +// SetBackendEnvironmentArn sets the BackendEnvironmentArn field's value. +func (s *Branch) SetBackendEnvironmentArn(v string) *Branch { + s.BackendEnvironmentArn = &v + return s +} + +// SetBasicAuthCredentials sets the BasicAuthCredentials field's value. +func (s *Branch) SetBasicAuthCredentials(v string) *Branch { + s.BasicAuthCredentials = &v + return s +} + +// SetBranchArn sets the BranchArn field's value. +func (s *Branch) SetBranchArn(v string) *Branch { + s.BranchArn = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *Branch) SetBranchName(v string) *Branch { + s.BranchName = &v + return s +} + +// SetBuildSpec sets the BuildSpec field's value. +func (s *Branch) SetBuildSpec(v string) *Branch { + s.BuildSpec = &v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *Branch) SetCreateTime(v time.Time) *Branch { + s.CreateTime = &v + return s +} + +// SetCustomDomains sets the CustomDomains field's value. +func (s *Branch) SetCustomDomains(v []*string) *Branch { + s.CustomDomains = v + return s +} + +// SetDescription sets the Description field's value. +func (s *Branch) SetDescription(v string) *Branch { + s.Description = &v + return s +} + +// SetDestinationBranch sets the DestinationBranch field's value. +func (s *Branch) SetDestinationBranch(v string) *Branch { + s.DestinationBranch = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Branch) SetDisplayName(v string) *Branch { + s.DisplayName = &v + return s +} + +// SetEnableAutoBuild sets the EnableAutoBuild field's value. +func (s *Branch) SetEnableAutoBuild(v bool) *Branch { + s.EnableAutoBuild = &v + return s +} + +// SetEnableBasicAuth sets the EnableBasicAuth field's value. +func (s *Branch) SetEnableBasicAuth(v bool) *Branch { + s.EnableBasicAuth = &v + return s +} + +// SetEnableNotification sets the EnableNotification field's value. +func (s *Branch) SetEnableNotification(v bool) *Branch { + s.EnableNotification = &v + return s +} + +// SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value. +func (s *Branch) SetEnablePullRequestPreview(v bool) *Branch { + s.EnablePullRequestPreview = &v + return s +} + +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *Branch) SetEnvironmentVariables(v map[string]*string) *Branch { + s.EnvironmentVariables = v + return s +} + +// SetFramework sets the Framework field's value. +func (s *Branch) SetFramework(v string) *Branch { + s.Framework = &v + return s +} + +// SetPullRequestEnvironmentName sets the PullRequestEnvironmentName field's value. +func (s *Branch) SetPullRequestEnvironmentName(v string) *Branch { + s.PullRequestEnvironmentName = &v + return s +} + +// SetSourceBranch sets the SourceBranch field's value. +func (s *Branch) SetSourceBranch(v string) *Branch { + s.SourceBranch = &v + return s +} + +// SetStage sets the Stage field's value. +func (s *Branch) SetStage(v string) *Branch { + s.Stage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Branch) SetTags(v map[string]*string) *Branch { + s.Tags = v + return s +} + +// SetThumbnailUrl sets the ThumbnailUrl field's value. +func (s *Branch) SetThumbnailUrl(v string) *Branch { + s.ThumbnailUrl = &v + return s +} + +// SetTotalNumberOfJobs sets the TotalNumberOfJobs field's value. +func (s *Branch) SetTotalNumberOfJobs(v string) *Branch { + s.TotalNumberOfJobs = &v + return s +} + +// SetTtl sets the Ttl field's value. +func (s *Branch) SetTtl(v string) *Branch { + s.Ttl = &v + return s +} + +// SetUpdateTime sets the UpdateTime field's value. +func (s *Branch) SetUpdateTime(v time.Time) *Branch { + s.UpdateTime = &v + return s +} + +// Request structure used to create Apps in Amplify. +type CreateAppInput struct { + _ struct{} `type:"structure"` + + // Personal Access token for 3rd party source control system for an Amplify + // App, used to create webhook and read-only deploy key. Token is not stored. + AccessToken *string `locationName:"accessToken" min:"1" type:"string"` + + // Automated branch creation config for the Amplify App. + AutoBranchCreationConfig *AutoBranchCreationConfig `locationName:"autoBranchCreationConfig" type:"structure"` + + // Automated branch creation glob patterns for the Amplify App. + AutoBranchCreationPatterns []*string `locationName:"autoBranchCreationPatterns" type:"list"` + + // Credentials for Basic Authorization for an Amplify App. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + + // BuildSpec for an Amplify App + BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` + + // Custom rewrite / redirect rules for an Amplify App. + CustomRules []*CustomRule `locationName:"customRules" type:"list"` + + // Description for an Amplify App + Description *string `locationName:"description" type:"string"` + + // Enables automated branch creation for the Amplify App. + EnableAutoBranchCreation *bool `locationName:"enableAutoBranchCreation" type:"boolean"` + + // Enable Basic Authorization for an Amplify App, this will apply to all branches + // part of this App. + EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` + + // Enable the auto building of branches for an Amplify App. + EnableBranchAutoBuild *bool `locationName:"enableBranchAutoBuild" type:"boolean"` + + // Environment variables map for an Amplify App. + EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` + + // AWS IAM service role for an Amplify App + IamServiceRoleArn *string `locationName:"iamServiceRoleArn" min:"1" type:"string"` + + // Name for the Amplify App + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // OAuth token for 3rd party source control system for an Amplify App, used + // to create webhook and read-only deploy key. OAuth token is not stored. + OauthToken *string `locationName:"oauthToken" type:"string"` + + // Platform / framework for an Amplify App + Platform *string `locationName:"platform" type:"string" enum:"Platform"` + + // Repository for an Amplify App + Repository *string `locationName:"repository" type:"string"` + + // Tag for an Amplify App + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation +func (s CreateAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAppInput"} + if s.AccessToken != nil && len(*s.AccessToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccessToken", 1)) + } + if s.BuildSpec != nil && len(*s.BuildSpec) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1)) + } + if s.IamServiceRoleArn != nil && len(*s.IamServiceRoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IamServiceRoleArn", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.AutoBranchCreationConfig != nil { + if err := s.AutoBranchCreationConfig.Validate(); err != nil { + invalidParams.AddNested("AutoBranchCreationConfig", err.(request.ErrInvalidParams)) + } + } + if s.CustomRules != nil { + for i, v := range s.CustomRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CustomRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateAppInput) SetAccessToken(v string) *CreateAppInput { + s.AccessToken = &v + return s +} + +// SetAutoBranchCreationConfig sets the AutoBranchCreationConfig field's value. +func (s *CreateAppInput) SetAutoBranchCreationConfig(v *AutoBranchCreationConfig) *CreateAppInput { + s.AutoBranchCreationConfig = v + return s +} + +// SetAutoBranchCreationPatterns sets the AutoBranchCreationPatterns field's value. +func (s *CreateAppInput) SetAutoBranchCreationPatterns(v []*string) *CreateAppInput { + s.AutoBranchCreationPatterns = v + return s +} + +// SetBasicAuthCredentials sets the BasicAuthCredentials field's value. +func (s *CreateAppInput) SetBasicAuthCredentials(v string) *CreateAppInput { + s.BasicAuthCredentials = &v + return s +} + +// SetBuildSpec sets the BuildSpec field's value. +func (s *CreateAppInput) SetBuildSpec(v string) *CreateAppInput { + s.BuildSpec = &v + return s +} + +// SetCustomRules sets the CustomRules field's value. +func (s *CreateAppInput) SetCustomRules(v []*CustomRule) *CreateAppInput { + s.CustomRules = v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateAppInput) SetDescription(v string) *CreateAppInput { + s.Description = &v + return s +} + +// SetEnableAutoBranchCreation sets the EnableAutoBranchCreation field's value. +func (s *CreateAppInput) SetEnableAutoBranchCreation(v bool) *CreateAppInput { + s.EnableAutoBranchCreation = &v + return s +} + +// SetEnableBasicAuth sets the EnableBasicAuth field's value. +func (s *CreateAppInput) SetEnableBasicAuth(v bool) *CreateAppInput { + s.EnableBasicAuth = &v + return s +} + +// SetEnableBranchAutoBuild sets the EnableBranchAutoBuild field's value. +func (s *CreateAppInput) SetEnableBranchAutoBuild(v bool) *CreateAppInput { + s.EnableBranchAutoBuild = &v + return s +} + +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *CreateAppInput) SetEnvironmentVariables(v map[string]*string) *CreateAppInput { + s.EnvironmentVariables = v + return s +} + +// SetIamServiceRoleArn sets the IamServiceRoleArn field's value. +func (s *CreateAppInput) SetIamServiceRoleArn(v string) *CreateAppInput { + s.IamServiceRoleArn = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateAppInput) SetName(v string) *CreateAppInput { + s.Name = &v + return s +} + +// SetOauthToken sets the OauthToken field's value. +func (s *CreateAppInput) SetOauthToken(v string) *CreateAppInput { + s.OauthToken = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *CreateAppInput) SetPlatform(v string) *CreateAppInput { + s.Platform = &v + return s +} + +// SetRepository sets the Repository field's value. +func (s *CreateAppInput) SetRepository(v string) *CreateAppInput { + s.Repository = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateAppInput) SetTags(v map[string]*string) *CreateAppInput { + s.Tags = v + return s +} + +type CreateAppOutput struct { + _ struct{} `type:"structure"` + + // Amplify App represents different branches of a repository for building, deploying, + // and hosting. + // + // App is a required field + App *App `locationName:"app" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppOutput) GoString() string { + return s.String() +} + +// SetApp sets the App field's value. +func (s *CreateAppOutput) SetApp(v *App) *CreateAppOutput { + s.App = v + return s +} + +// Request structure for a branch create request. +type CreateBranchInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // ARN for a Backend Environment, part of an Amplify App. + BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string"` + + // Basic Authorization credentials for the branch. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + + // Name for the branch. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // BuildSpec for the branch. + BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` + + // Description for the branch. + Description *string `locationName:"description" type:"string"` + + // Display name for a branch, will use as the default domain prefix. + DisplayName *string `locationName:"displayName" type:"string"` + + // Enables auto building for the branch. + EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean"` + + // Enables Basic Auth for the branch. + EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` + + // Enables notifications for the branch. + EnableNotification *bool `locationName:"enableNotification" type:"boolean"` + + // Enables Pull Request Preview for this branch. + EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean"` + + // Environment Variables for the branch. + EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` + + // Framework for the branch. + Framework *string `locationName:"framework" type:"string"` + + // The Amplify Environment name for the pull request. + PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"` + + // Stage for the branch. + Stage *string `locationName:"stage" type:"string" enum:"Stage"` + + // Tag for the branch. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + + // The content TTL for the website in seconds. + Ttl *string `locationName:"ttl" type:"string"` +} + +// String returns the string representation +func (s CreateBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBranchInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BackendEnvironmentArn != nil && len(*s.BackendEnvironmentArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BackendEnvironmentArn", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.BuildSpec != nil && len(*s.BuildSpec) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *CreateBranchInput) SetAppId(v string) *CreateBranchInput { + s.AppId = &v + return s +} + +// SetBackendEnvironmentArn sets the BackendEnvironmentArn field's value. +func (s *CreateBranchInput) SetBackendEnvironmentArn(v string) *CreateBranchInput { + s.BackendEnvironmentArn = &v + return s +} + +// SetBasicAuthCredentials sets the BasicAuthCredentials field's value. +func (s *CreateBranchInput) SetBasicAuthCredentials(v string) *CreateBranchInput { + s.BasicAuthCredentials = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *CreateBranchInput) SetBranchName(v string) *CreateBranchInput { + s.BranchName = &v + return s +} + +// SetBuildSpec sets the BuildSpec field's value. +func (s *CreateBranchInput) SetBuildSpec(v string) *CreateBranchInput { + s.BuildSpec = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateBranchInput) SetDescription(v string) *CreateBranchInput { + s.Description = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *CreateBranchInput) SetDisplayName(v string) *CreateBranchInput { + s.DisplayName = &v + return s +} + +// SetEnableAutoBuild sets the EnableAutoBuild field's value. +func (s *CreateBranchInput) SetEnableAutoBuild(v bool) *CreateBranchInput { + s.EnableAutoBuild = &v + return s +} + +// SetEnableBasicAuth sets the EnableBasicAuth field's value. +func (s *CreateBranchInput) SetEnableBasicAuth(v bool) *CreateBranchInput { + s.EnableBasicAuth = &v + return s +} + +// SetEnableNotification sets the EnableNotification field's value. +func (s *CreateBranchInput) SetEnableNotification(v bool) *CreateBranchInput { + s.EnableNotification = &v + return s +} + +// SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value. +func (s *CreateBranchInput) SetEnablePullRequestPreview(v bool) *CreateBranchInput { + s.EnablePullRequestPreview = &v + return s +} + +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *CreateBranchInput) SetEnvironmentVariables(v map[string]*string) *CreateBranchInput { + s.EnvironmentVariables = v + return s +} + +// SetFramework sets the Framework field's value. +func (s *CreateBranchInput) SetFramework(v string) *CreateBranchInput { + s.Framework = &v + return s +} + +// SetPullRequestEnvironmentName sets the PullRequestEnvironmentName field's value. +func (s *CreateBranchInput) SetPullRequestEnvironmentName(v string) *CreateBranchInput { + s.PullRequestEnvironmentName = &v + return s +} + +// SetStage sets the Stage field's value. +func (s *CreateBranchInput) SetStage(v string) *CreateBranchInput { + s.Stage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateBranchInput) SetTags(v map[string]*string) *CreateBranchInput { + s.Tags = v + return s +} + +// SetTtl sets the Ttl field's value. +func (s *CreateBranchInput) SetTtl(v string) *CreateBranchInput { + s.Ttl = &v + return s +} + +// Result structure for create branch request. +type CreateBranchOutput struct { + _ struct{} `type:"structure"` + + // Branch structure for an Amplify App. + // + // Branch is a required field + Branch *Branch `locationName:"branch" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchOutput) GoString() string { + return s.String() +} + +// SetBranch sets the Branch field's value. +func (s *CreateBranchOutput) SetBranch(v *Branch) *CreateBranchOutput { + s.Branch = v + return s +} + +// Request structure for create a new deployment. +type CreateDeploymentInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for the branch, for the Job. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` + + // Optional file map that contains file name as the key and file content md5 + // hash as the value. If this argument is provided, the service will generate + // different upload url per file. Otherwise, the service will only generate + // a single upload url for the zipped files. + FileMap map[string]*string `locationName:"fileMap" type:"map"` +} + +// String returns the string representation +func (s CreateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDeploymentInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *CreateDeploymentInput) SetAppId(v string) *CreateDeploymentInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *CreateDeploymentInput) SetBranchName(v string) *CreateDeploymentInput { + s.BranchName = &v + return s +} + +// SetFileMap sets the FileMap field's value. +func (s *CreateDeploymentInput) SetFileMap(v map[string]*string) *CreateDeploymentInput { + s.FileMap = v + return s +} + +// Result structure for create a new deployment. +type CreateDeploymentOutput struct { + _ struct{} `type:"structure"` + + // When the fileMap argument is provided in the request, the fileUploadUrls + // will contain a map of file names to upload url. + // + // FileUploadUrls is a required field + FileUploadUrls map[string]*string `locationName:"fileUploadUrls" type:"map" required:"true"` + + // The jobId for this deployment, will supply to start deployment api. + JobId *string `locationName:"jobId" type:"string"` + + // When the fileMap argument is NOT provided. This zipUploadUrl will be returned. + // + // ZipUploadUrl is a required field + ZipUploadUrl *string `locationName:"zipUploadUrl" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentOutput) GoString() string { + return s.String() +} + +// SetFileUploadUrls sets the FileUploadUrls field's value. +func (s *CreateDeploymentOutput) SetFileUploadUrls(v map[string]*string) *CreateDeploymentOutput { + s.FileUploadUrls = v + return s +} + +// SetJobId sets the JobId field's value. +func (s *CreateDeploymentOutput) SetJobId(v string) *CreateDeploymentOutput { + s.JobId = &v + return s +} + +// SetZipUploadUrl sets the ZipUploadUrl field's value. +func (s *CreateDeploymentOutput) SetZipUploadUrl(v string) *CreateDeploymentOutput { + s.ZipUploadUrl = &v + return s +} + +// Request structure for create Domain Association request. +type CreateDomainAssociationInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Domain name for the Domain Association. + // + // DomainName is a required field + DomainName *string `locationName:"domainName" type:"string" required:"true"` + + // Enables automated creation of Subdomains for branches. (Currently not supported) + EnableAutoSubDomain *bool `locationName:"enableAutoSubDomain" type:"boolean"` + + // Setting structure for the Subdomain. + // + // SubDomainSettings is a required field + SubDomainSettings []*SubDomainSetting `locationName:"subDomainSettings" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateDomainAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDomainAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDomainAssociationInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.SubDomainSettings == nil { + invalidParams.Add(request.NewErrParamRequired("SubDomainSettings")) + } + if s.SubDomainSettings != nil { + for i, v := range s.SubDomainSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SubDomainSettings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *CreateDomainAssociationInput) SetAppId(v string) *CreateDomainAssociationInput { + s.AppId = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *CreateDomainAssociationInput) SetDomainName(v string) *CreateDomainAssociationInput { + s.DomainName = &v + return s +} + +// SetEnableAutoSubDomain sets the EnableAutoSubDomain field's value. +func (s *CreateDomainAssociationInput) SetEnableAutoSubDomain(v bool) *CreateDomainAssociationInput { + s.EnableAutoSubDomain = &v + return s +} + +// SetSubDomainSettings sets the SubDomainSettings field's value. +func (s *CreateDomainAssociationInput) SetSubDomainSettings(v []*SubDomainSetting) *CreateDomainAssociationInput { + s.SubDomainSettings = v + return s +} + +// Result structure for the create Domain Association request. +type CreateDomainAssociationOutput struct { + _ struct{} `type:"structure"` + + // Domain Association structure. + // + // DomainAssociation is a required field + DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDomainAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainAssociationOutput) GoString() string { + return s.String() +} + +// SetDomainAssociation sets the DomainAssociation field's value. +func (s *CreateDomainAssociationOutput) SetDomainAssociation(v *DomainAssociation) *CreateDomainAssociationOutput { + s.DomainAssociation = v + return s +} + +// Request structure for create webhook request. +type CreateWebhookInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for a branch, part of an Amplify App. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // Description for a webhook. + Description *string `locationName:"description" type:"string"` +} + +// String returns the string representation +func (s CreateWebhookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebhookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWebhookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWebhookInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *CreateWebhookInput) SetAppId(v string) *CreateWebhookInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *CreateWebhookInput) SetBranchName(v string) *CreateWebhookInput { + s.BranchName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateWebhookInput) SetDescription(v string) *CreateWebhookInput { + s.Description = &v + return s +} + +// Result structure for the create webhook request. +type CreateWebhookOutput struct { + _ struct{} `type:"structure"` + + // Webhook structure. + // + // Webhook is a required field + Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateWebhookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebhookOutput) GoString() string { + return s.String() +} + +// SetWebhook sets the Webhook field's value. +func (s *CreateWebhookOutput) SetWebhook(v *Webhook) *CreateWebhookOutput { + s.Webhook = v + return s +} + +// Custom rewrite / redirect rule. +type CustomRule struct { + _ struct{} `type:"structure"` + + // The condition for a URL rewrite or redirect rule, e.g. country code. + Condition *string `locationName:"condition" min:"1" type:"string"` + + // The source pattern for a URL rewrite or redirect rule. + // + // Source is a required field + Source *string `locationName:"source" min:"1" type:"string" required:"true"` + + // The status code for a URL rewrite or redirect rule. + Status *string `locationName:"status" min:"3" type:"string"` + + // The target pattern for a URL rewrite or redirect rule. + // + // Target is a required field + Target *string `locationName:"target" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CustomRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomRule"} + if s.Condition != nil && len(*s.Condition) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Condition", 1)) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Source != nil && len(*s.Source) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Source", 1)) + } + if s.Status != nil && len(*s.Status) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Status", 3)) + } + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + if s.Target != nil && len(*s.Target) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Target", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *CustomRule) SetCondition(v string) *CustomRule { + s.Condition = &v + return s +} + +// SetSource sets the Source field's value. +func (s *CustomRule) SetSource(v string) *CustomRule { + s.Source = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CustomRule) SetStatus(v string) *CustomRule { + s.Status = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *CustomRule) SetTarget(v string) *CustomRule { + s.Target = &v + return s +} + +// Request structure for an Amplify App delete request. +type DeleteAppInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAppInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *DeleteAppInput) SetAppId(v string) *DeleteAppInput { + s.AppId = &v + return s +} + +// Result structure for an Amplify App delete request. +type DeleteAppOutput struct { + _ struct{} `type:"structure"` + + // Amplify App represents different branches of a repository for building, deploying, + // and hosting. + // + // App is a required field + App *App `locationName:"app" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppOutput) GoString() string { + return s.String() +} + +// SetApp sets the App field's value. +func (s *DeleteAppOutput) SetApp(v *App) *DeleteAppOutput { + s.App = v + return s +} + +// Request structure for delete branch request. +type DeleteBranchInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for the branch. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBranchInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *DeleteBranchInput) SetAppId(v string) *DeleteBranchInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *DeleteBranchInput) SetBranchName(v string) *DeleteBranchInput { + s.BranchName = &v + return s +} + +// Result structure for delete branch request. +type DeleteBranchOutput struct { + _ struct{} `type:"structure"` + + // Branch structure for an Amplify App. + // + // Branch is a required field + Branch *Branch `locationName:"branch" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBranchOutput) GoString() string { + return s.String() +} + +// SetBranch sets the Branch field's value. +func (s *DeleteBranchOutput) SetBranch(v *Branch) *DeleteBranchOutput { + s.Branch = v + return s +} + +// Request structure for the delete Domain Association request. +type DeleteDomainAssociationInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name of the domain. + // + // DomainName is a required field + DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDomainAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDomainAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDomainAssociationInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *DeleteDomainAssociationInput) SetAppId(v string) *DeleteDomainAssociationInput { + s.AppId = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *DeleteDomainAssociationInput) SetDomainName(v string) *DeleteDomainAssociationInput { + s.DomainName = &v + return s +} + +type DeleteDomainAssociationOutput struct { + _ struct{} `type:"structure"` + + // Structure for Domain Association, which associates a custom domain with an + // Amplify App. + // + // DomainAssociation is a required field + DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteDomainAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainAssociationOutput) GoString() string { + return s.String() +} + +// SetDomainAssociation sets the DomainAssociation field's value. +func (s *DeleteDomainAssociationOutput) SetDomainAssociation(v *DomainAssociation) *DeleteDomainAssociationOutput { + s.DomainAssociation = v + return s +} + +// Request structure for delete job request. +type DeleteJobInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for the branch, for the Job. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` + + // Unique Id for the Job. + // + // JobId is a required field + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteJobInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *DeleteJobInput) SetAppId(v string) *DeleteJobInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *DeleteJobInput) SetBranchName(v string) *DeleteJobInput { + s.BranchName = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *DeleteJobInput) SetJobId(v string) *DeleteJobInput { + s.JobId = &v + return s +} + +// Result structure for the delete job request. +type DeleteJobOutput struct { + _ struct{} `type:"structure"` + + // Structure for the summary of a Job. + // + // JobSummary is a required field + JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteJobOutput) GoString() string { + return s.String() +} + +// SetJobSummary sets the JobSummary field's value. +func (s *DeleteJobOutput) SetJobSummary(v *JobSummary) *DeleteJobOutput { + s.JobSummary = v + return s +} + +// Request structure for the delete webhook request. +type DeleteWebhookInput struct { + _ struct{} `type:"structure"` + + // Unique Id for a webhook. + // + // WebhookId is a required field + WebhookId *string `location:"uri" locationName:"webhookId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteWebhookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWebhookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteWebhookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteWebhookInput"} + if s.WebhookId == nil { + invalidParams.Add(request.NewErrParamRequired("WebhookId")) + } + if s.WebhookId != nil && len(*s.WebhookId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebhookId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWebhookId sets the WebhookId field's value. +func (s *DeleteWebhookInput) SetWebhookId(v string) *DeleteWebhookInput { + s.WebhookId = &v + return s +} + +// Result structure for the delete webhook request. +type DeleteWebhookOutput struct { + _ struct{} `type:"structure"` + + // Webhook structure. + // + // Webhook is a required field + Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteWebhookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWebhookOutput) GoString() string { + return s.String() +} + +// SetWebhook sets the Webhook field's value. +func (s *DeleteWebhookOutput) SetWebhook(v *Webhook) *DeleteWebhookOutput { + s.Webhook = v + return s +} + +// Structure for Domain Association, which associates a custom domain with an +// Amplify App. +type DomainAssociation struct { + _ struct{} `type:"structure"` + + // DNS Record for certificate verification. + CertificateVerificationDNSRecord *string `locationName:"certificateVerificationDNSRecord" type:"string"` + + // ARN for the Domain Association. + // + // DomainAssociationArn is a required field + DomainAssociationArn *string `locationName:"domainAssociationArn" type:"string" required:"true"` + + // Name of the domain. + // + // DomainName is a required field + DomainName *string `locationName:"domainName" type:"string" required:"true"` + + // Status fo the Domain Association. + // + // DomainStatus is a required field + DomainStatus *string `locationName:"domainStatus" type:"string" required:"true" enum:"DomainStatus"` + + // Enables automated creation of Subdomains for branches. (Currently not supported) + // + // EnableAutoSubDomain is a required field + EnableAutoSubDomain *bool `locationName:"enableAutoSubDomain" type:"boolean" required:"true"` + + // Reason for the current status of the Domain Association. + // + // StatusReason is a required field + StatusReason *string `locationName:"statusReason" type:"string" required:"true"` + + // Subdomains for the Domain Association. + // + // SubDomains is a required field + SubDomains []*SubDomain `locationName:"subDomains" type:"list" required:"true"` +} + +// String returns the string representation +func (s DomainAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainAssociation) GoString() string { + return s.String() +} + +// SetCertificateVerificationDNSRecord sets the CertificateVerificationDNSRecord field's value. +func (s *DomainAssociation) SetCertificateVerificationDNSRecord(v string) *DomainAssociation { + s.CertificateVerificationDNSRecord = &v + return s +} + +// SetDomainAssociationArn sets the DomainAssociationArn field's value. +func (s *DomainAssociation) SetDomainAssociationArn(v string) *DomainAssociation { + s.DomainAssociationArn = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *DomainAssociation) SetDomainName(v string) *DomainAssociation { + s.DomainName = &v + return s +} + +// SetDomainStatus sets the DomainStatus field's value. +func (s *DomainAssociation) SetDomainStatus(v string) *DomainAssociation { + s.DomainStatus = &v + return s +} + +// SetEnableAutoSubDomain sets the EnableAutoSubDomain field's value. +func (s *DomainAssociation) SetEnableAutoSubDomain(v bool) *DomainAssociation { + s.EnableAutoSubDomain = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *DomainAssociation) SetStatusReason(v string) *DomainAssociation { + s.StatusReason = &v + return s +} + +// SetSubDomains sets the SubDomains field's value. +func (s *DomainAssociation) SetSubDomains(v []*SubDomain) *DomainAssociation { + s.SubDomains = v + return s +} + +// Request structure for the generate access logs request. +type GenerateAccessLogsInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name of the domain. + // + // DomainName is a required field + DomainName *string `locationName:"domainName" type:"string" required:"true"` + + // The time at which the logs should end, inclusive. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // The time at which the logs should start, inclusive. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` +} + +// String returns the string representation +func (s GenerateAccessLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateAccessLogsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GenerateAccessLogsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GenerateAccessLogsInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *GenerateAccessLogsInput) SetAppId(v string) *GenerateAccessLogsInput { + s.AppId = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *GenerateAccessLogsInput) SetDomainName(v string) *GenerateAccessLogsInput { + s.DomainName = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *GenerateAccessLogsInput) SetEndTime(v time.Time) *GenerateAccessLogsInput { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GenerateAccessLogsInput) SetStartTime(v time.Time) *GenerateAccessLogsInput { + s.StartTime = &v + return s +} + +// Result structure for the generate access logs request. +type GenerateAccessLogsOutput struct { + _ struct{} `type:"structure"` + + // Pre-signed URL for the requested access logs. + LogUrl *string `locationName:"logUrl" type:"string"` +} + +// String returns the string representation +func (s GenerateAccessLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateAccessLogsOutput) GoString() string { + return s.String() +} + +// SetLogUrl sets the LogUrl field's value. +func (s *GenerateAccessLogsOutput) SetLogUrl(v string) *GenerateAccessLogsOutput { + s.LogUrl = &v + return s +} + +// Request structure for get App request. +type GetAppInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAppInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAppInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *GetAppInput) SetAppId(v string) *GetAppInput { + s.AppId = &v + return s +} + +type GetAppOutput struct { + _ struct{} `type:"structure"` + + // Amplify App represents different branches of a repository for building, deploying, + // and hosting. + // + // App is a required field + App *App `locationName:"app" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAppOutput) GoString() string { + return s.String() +} + +// SetApp sets the App field's value. +func (s *GetAppOutput) SetApp(v *App) *GetAppOutput { + s.App = v + return s +} + +// Request structure for the get artifact request. +type GetArtifactUrlInput struct { + _ struct{} `type:"structure"` + + // Unique Id for a artifact. + // + // ArtifactId is a required field + ArtifactId *string `location:"uri" locationName:"artifactId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetArtifactUrlInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetArtifactUrlInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetArtifactUrlInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetArtifactUrlInput"} + if s.ArtifactId == nil { + invalidParams.Add(request.NewErrParamRequired("ArtifactId")) + } + if s.ArtifactId != nil && len(*s.ArtifactId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ArtifactId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArtifactId sets the ArtifactId field's value. +func (s *GetArtifactUrlInput) SetArtifactId(v string) *GetArtifactUrlInput { + s.ArtifactId = &v + return s +} + +// Result structure for the get artifact request. +type GetArtifactUrlOutput struct { + _ struct{} `type:"structure"` + + // Unique Id for a artifact. + // + // ArtifactId is a required field + ArtifactId *string `locationName:"artifactId" type:"string" required:"true"` + + // Presigned url for the artifact. + // + // ArtifactUrl is a required field + ArtifactUrl *string `locationName:"artifactUrl" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetArtifactUrlOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetArtifactUrlOutput) GoString() string { + return s.String() +} + +// SetArtifactId sets the ArtifactId field's value. +func (s *GetArtifactUrlOutput) SetArtifactId(v string) *GetArtifactUrlOutput { + s.ArtifactId = &v + return s +} + +// SetArtifactUrl sets the ArtifactUrl field's value. +func (s *GetArtifactUrlOutput) SetArtifactUrl(v string) *GetArtifactUrlOutput { + s.ArtifactUrl = &v + return s +} + +// Request structure for get branch request. +type GetBranchInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for the branch. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBranchInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *GetBranchInput) SetAppId(v string) *GetBranchInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *GetBranchInput) SetBranchName(v string) *GetBranchInput { + s.BranchName = &v + return s +} + +type GetBranchOutput struct { + _ struct{} `type:"structure"` + + // Branch for an Amplify App, which maps to a 3rd party repository branch. + // + // Branch is a required field + Branch *Branch `locationName:"branch" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBranchOutput) GoString() string { + return s.String() +} + +// SetBranch sets the Branch field's value. +func (s *GetBranchOutput) SetBranch(v *Branch) *GetBranchOutput { + s.Branch = v + return s +} + +// Request structure for the get Domain Association request. +type GetDomainAssociationInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name of the domain. + // + // DomainName is a required field + DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDomainAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDomainAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDomainAssociationInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *GetDomainAssociationInput) SetAppId(v string) *GetDomainAssociationInput { + s.AppId = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *GetDomainAssociationInput) SetDomainName(v string) *GetDomainAssociationInput { + s.DomainName = &v + return s +} + +// Result structure for the get Domain Association request. +type GetDomainAssociationOutput struct { + _ struct{} `type:"structure"` + + // Domain Association structure. + // + // DomainAssociation is a required field + DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetDomainAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainAssociationOutput) GoString() string { + return s.String() +} + +// SetDomainAssociation sets the DomainAssociation field's value. +func (s *GetDomainAssociationOutput) SetDomainAssociation(v *DomainAssociation) *GetDomainAssociationOutput { + s.DomainAssociation = v + return s +} + +// Request structure for get job request. +type GetJobInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for the branch, for the Job. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` + + // Unique Id for the Job. + // + // JobId is a required field + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *GetJobInput) SetAppId(v string) *GetJobInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *GetJobInput) SetBranchName(v string) *GetJobInput { + s.BranchName = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *GetJobInput) SetJobId(v string) *GetJobInput { + s.JobId = &v + return s +} + +type GetJobOutput struct { + _ struct{} `type:"structure"` + + // Structure for an execution job for an Amplify App. + // + // Job is a required field + Job *Job `locationName:"job" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobOutput) GoString() string { + return s.String() +} + +// SetJob sets the Job field's value. +func (s *GetJobOutput) SetJob(v *Job) *GetJobOutput { + s.Job = v + return s +} + +// Request structure for the get webhook request. +type GetWebhookInput struct { + _ struct{} `type:"structure"` + + // Unique Id for a webhook. + // + // WebhookId is a required field + WebhookId *string `location:"uri" locationName:"webhookId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetWebhookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebhookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetWebhookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWebhookInput"} + if s.WebhookId == nil { + invalidParams.Add(request.NewErrParamRequired("WebhookId")) + } + if s.WebhookId != nil && len(*s.WebhookId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebhookId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWebhookId sets the WebhookId field's value. +func (s *GetWebhookInput) SetWebhookId(v string) *GetWebhookInput { + s.WebhookId = &v + return s +} + +// Result structure for the get webhook request. +type GetWebhookOutput struct { + _ struct{} `type:"structure"` + + // Webhook structure. + // + // Webhook is a required field + Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetWebhookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebhookOutput) GoString() string { + return s.String() +} + +// SetWebhook sets the Webhook field's value. +func (s *GetWebhookOutput) SetWebhook(v *Webhook) *GetWebhookOutput { + s.Webhook = v + return s +} + +// Structure for an execution job for an Amplify App. +type Job struct { + _ struct{} `type:"structure"` + + // Execution steps for an execution job, for an Amplify App. + // + // Steps is a required field + Steps []*Step `locationName:"steps" type:"list" required:"true"` + + // Summary for an execution job for an Amplify App. + // + // Summary is a required field + Summary *JobSummary `locationName:"summary" type:"structure" required:"true"` +} + +// String returns the string representation +func (s Job) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Job) GoString() string { + return s.String() +} + +// SetSteps sets the Steps field's value. +func (s *Job) SetSteps(v []*Step) *Job { + s.Steps = v + return s +} + +// SetSummary sets the Summary field's value. +func (s *Job) SetSummary(v *JobSummary) *Job { + s.Summary = v + return s +} + +// Structure for the summary of a Job. +type JobSummary struct { + _ struct{} `type:"structure"` + + // Commit Id from 3rd party repository provider for the Job. + // + // CommitId is a required field + CommitId *string `locationName:"commitId" type:"string" required:"true"` + + // Commit message from 3rd party repository provider for the Job. + // + // CommitMessage is a required field + CommitMessage *string `locationName:"commitMessage" type:"string" required:"true"` + + // Commit date / time for the Job. + // + // CommitTime is a required field + CommitTime *time.Time `locationName:"commitTime" type:"timestamp" required:"true"` + + // End date / time for the Job. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // Arn for the Job. + // + // JobArn is a required field + JobArn *string `locationName:"jobArn" type:"string" required:"true"` + + // Unique Id for the Job. + // + // JobId is a required field + JobId *string `locationName:"jobId" type:"string" required:"true"` + + // Type for the Job. \n "RELEASE": Manually released from source by using StartJob + // API. "RETRY": Manually retried by using StartJob API. "WEB_HOOK": Automatically + // triggered by WebHooks. + // + // JobType is a required field + JobType *string `locationName:"jobType" type:"string" required:"true" enum:"JobType"` + + // Start date / time for the Job. + // + // StartTime is a required field + StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` + + // Status for the Job. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"` +} + +// String returns the string representation +func (s JobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobSummary) GoString() string { + return s.String() +} + +// SetCommitId sets the CommitId field's value. +func (s *JobSummary) SetCommitId(v string) *JobSummary { + s.CommitId = &v + return s +} + +// SetCommitMessage sets the CommitMessage field's value. +func (s *JobSummary) SetCommitMessage(v string) *JobSummary { + s.CommitMessage = &v + return s +} + +// SetCommitTime sets the CommitTime field's value. +func (s *JobSummary) SetCommitTime(v time.Time) *JobSummary { + s.CommitTime = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *JobSummary) SetEndTime(v time.Time) *JobSummary { + s.EndTime = &v + return s +} + +// SetJobArn sets the JobArn field's value. +func (s *JobSummary) SetJobArn(v string) *JobSummary { + s.JobArn = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *JobSummary) SetJobId(v string) *JobSummary { + s.JobId = &v + return s +} + +// SetJobType sets the JobType field's value. +func (s *JobSummary) SetJobType(v string) *JobSummary { + s.JobType = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *JobSummary) SetStartTime(v time.Time) *JobSummary { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *JobSummary) SetStatus(v string) *JobSummary { + s.Status = &v + return s +} + +// Request structure for an Amplify App list request. +type ListAppsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of records to list in a single response. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // Pagination token. If non-null pagination token is returned in a result, then + // pass its value in another request to fetch more entries. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAppsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAppsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAppsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAppsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAppsInput) SetMaxResults(v int64) *ListAppsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAppsInput) SetNextToken(v string) *ListAppsInput { + s.NextToken = &v + return s +} + +// Result structure for an Amplify App list request. +type ListAppsOutput struct { + _ struct{} `type:"structure"` + + // List of Amplify Apps. + // + // Apps is a required field + Apps []*App `locationName:"apps" type:"list" required:"true"` + + // Pagination token. Set to null to start listing Apps from start. If non-null + // pagination token is returned in a result, then pass its value in here to + // list more projects. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAppsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAppsOutput) GoString() string { + return s.String() +} + +// SetApps sets the Apps field's value. +func (s *ListAppsOutput) SetApps(v []*App) *ListAppsOutput { + s.Apps = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAppsOutput) SetNextToken(v string) *ListAppsOutput { + s.NextToken = &v + return s +} + +// Request structure for the list artifacts request. +type ListArtifactsInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for a branch, part of an Amplify App. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` + + // Unique Id for an Job. + // + // JobId is a required field + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` + + // Maximum number of records to list in a single response. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // Pagination token. Set to null to start listing artifacts from start. If non-null + // pagination token is returned in a result, then pass its value in here to + // list more artifacts. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListArtifactsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListArtifactsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListArtifactsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListArtifactsInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *ListArtifactsInput) SetAppId(v string) *ListArtifactsInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *ListArtifactsInput) SetBranchName(v string) *ListArtifactsInput { + s.BranchName = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *ListArtifactsInput) SetJobId(v string) *ListArtifactsInput { + s.JobId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListArtifactsInput) SetMaxResults(v int64) *ListArtifactsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListArtifactsInput) SetNextToken(v string) *ListArtifactsInput { + s.NextToken = &v + return s +} + +// Result structure for the list artifacts request. +type ListArtifactsOutput struct { + _ struct{} `type:"structure"` + + // List of artifacts. + // + // Artifacts is a required field + Artifacts []*Artifact `locationName:"artifacts" type:"list" required:"true"` + + // Pagination token. If non-null pagination token is returned in a result, then + // pass its value in another request to fetch more entries. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListArtifactsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListArtifactsOutput) GoString() string { + return s.String() +} + +// SetArtifacts sets the Artifacts field's value. +func (s *ListArtifactsOutput) SetArtifacts(v []*Artifact) *ListArtifactsOutput { + s.Artifacts = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListArtifactsOutput) SetNextToken(v string) *ListArtifactsOutput { + s.NextToken = &v + return s +} + +// Request structure for list branches request. +type ListBranchesInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Maximum number of records to list in a single response. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // Pagination token. Set to null to start listing branches from start. If a + // non-null pagination token is returned in a result, then pass its value in + // here to list more branches. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListBranchesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBranchesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBranchesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBranchesInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *ListBranchesInput) SetAppId(v string) *ListBranchesInput { + s.AppId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListBranchesInput) SetMaxResults(v int64) *ListBranchesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBranchesInput) SetNextToken(v string) *ListBranchesInput { + s.NextToken = &v + return s +} + +// Result structure for list branches request. +type ListBranchesOutput struct { + _ struct{} `type:"structure"` + + // List of branches for an Amplify App. + // + // Branches is a required field + Branches []*Branch `locationName:"branches" type:"list" required:"true"` + + // Pagination token. If non-null pagination token is returned in a result, then + // pass its value in another request to fetch more entries. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListBranchesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBranchesOutput) GoString() string { + return s.String() +} + +// SetBranches sets the Branches field's value. +func (s *ListBranchesOutput) SetBranches(v []*Branch) *ListBranchesOutput { + s.Branches = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBranchesOutput) SetNextToken(v string) *ListBranchesOutput { + s.NextToken = &v + return s +} + +// Request structure for the list Domain Associations request. +type ListDomainAssociationsInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Maximum number of records to list in a single response. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // Pagination token. Set to null to start listing Apps from start. If non-null + // pagination token is returned in a result, then pass its value in here to + // list more projects. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDomainAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDomainAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDomainAssociationsInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *ListDomainAssociationsInput) SetAppId(v string) *ListDomainAssociationsInput { + s.AppId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDomainAssociationsInput) SetMaxResults(v int64) *ListDomainAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDomainAssociationsInput) SetNextToken(v string) *ListDomainAssociationsInput { + s.NextToken = &v + return s +} + +// Result structure for the list Domain Association request. +type ListDomainAssociationsOutput struct { + _ struct{} `type:"structure"` + + // List of Domain Associations. + // + // DomainAssociations is a required field + DomainAssociations []*DomainAssociation `locationName:"domainAssociations" type:"list" required:"true"` + + // Pagination token. If non-null pagination token is returned in a result, then + // pass its value in another request to fetch more entries. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDomainAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainAssociationsOutput) GoString() string { + return s.String() +} + +// SetDomainAssociations sets the DomainAssociations field's value. +func (s *ListDomainAssociationsOutput) SetDomainAssociations(v []*DomainAssociation) *ListDomainAssociationsOutput { + s.DomainAssociations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDomainAssociationsOutput) SetNextToken(v string) *ListDomainAssociationsOutput { + s.NextToken = &v + return s +} + +// Request structure for list job request. +type ListJobsInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for a branch. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` + + // Maximum number of records to list in a single response. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // Pagination token. Set to null to start listing steps from start. If a non-null + // pagination token is returned in a result, then pass its value in here to + // list more steps. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *ListJobsInput) SetAppId(v string) *ListJobsInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *ListJobsInput) SetBranchName(v string) *ListJobsInput { + s.BranchName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { + s.NextToken = &v + return s +} + +// Maximum number of records to list in a single response. +type ListJobsOutput struct { + _ struct{} `type:"structure"` + + // Result structure for list job result request. + // + // JobSummaries is a required field + JobSummaries []*JobSummary `locationName:"jobSummaries" type:"list" required:"true"` + + // Pagination token. If non-null pagination token is returned in a result, then + // pass its value in another request to fetch more entries. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsOutput) GoString() string { + return s.String() +} + +// SetJobSummaries sets the JobSummaries field's value. +func (s *ListJobsOutput) SetJobSummaries(v []*JobSummary) *ListJobsOutput { + s.JobSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { + s.NextToken = &v + return s +} + +// Request structure used to list tags for resource. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // Resource arn used to list tags. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +// Response for list tags. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Tags result for response. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// Request structure for the list webhooks request. +type ListWebhooksInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Maximum number of records to list in a single response. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // Pagination token. Set to null to start listing webhooks from start. If non-null + // pagination token is returned in a result, then pass its value in here to + // list more webhooks. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListWebhooksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWebhooksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListWebhooksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListWebhooksInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *ListWebhooksInput) SetAppId(v string) *ListWebhooksInput { + s.AppId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListWebhooksInput) SetMaxResults(v int64) *ListWebhooksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListWebhooksInput) SetNextToken(v string) *ListWebhooksInput { + s.NextToken = &v + return s +} + +// Result structure for the list webhooks request. +type ListWebhooksOutput struct { + _ struct{} `type:"structure"` + + // Pagination token. If non-null pagination token is returned in a result, then + // pass its value in another request to fetch more entries. + NextToken *string `locationName:"nextToken" type:"string"` + + // List of webhooks. + // + // Webhooks is a required field + Webhooks []*Webhook `locationName:"webhooks" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListWebhooksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWebhooksOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListWebhooksOutput) SetNextToken(v string) *ListWebhooksOutput { + s.NextToken = &v + return s +} + +// SetWebhooks sets the Webhooks field's value. +func (s *ListWebhooksOutput) SetWebhooks(v []*Webhook) *ListWebhooksOutput { + s.Webhooks = v + return s +} + +// Structure with Production Branch information. +type ProductionBranch struct { + _ struct{} `type:"structure"` + + // Branch Name for Production Branch. + BranchName *string `locationName:"branchName" min:"1" type:"string"` + + // Last Deploy Time of Production Branch. + LastDeployTime *time.Time `locationName:"lastDeployTime" type:"timestamp"` + + // Status of Production Branch. + Status *string `locationName:"status" min:"3" type:"string"` + + // Thumbnail URL for Production Branch. + ThumbnailUrl *string `locationName:"thumbnailUrl" min:"1" type:"string"` +} + +// String returns the string representation +func (s ProductionBranch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductionBranch) GoString() string { + return s.String() +} + +// SetBranchName sets the BranchName field's value. +func (s *ProductionBranch) SetBranchName(v string) *ProductionBranch { + s.BranchName = &v + return s +} + +// SetLastDeployTime sets the LastDeployTime field's value. +func (s *ProductionBranch) SetLastDeployTime(v time.Time) *ProductionBranch { + s.LastDeployTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ProductionBranch) SetStatus(v string) *ProductionBranch { + s.Status = &v + return s +} + +// SetThumbnailUrl sets the ThumbnailUrl field's value. +func (s *ProductionBranch) SetThumbnailUrl(v string) *ProductionBranch { + s.ThumbnailUrl = &v + return s +} + +// Request structure for start a deployment. +type StartDeploymentInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for the branch, for the Job. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` + + // The job id for this deployment, generated by create deployment request. + JobId *string `locationName:"jobId" type:"string"` + + // The sourceUrl for this deployment, used when calling start deployment without + // create deployment. SourceUrl can be any HTTP GET url that is public accessible + // and downloads a single zip. + SourceUrl *string `locationName:"sourceUrl" type:"string"` +} + +// String returns the string representation +func (s StartDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDeploymentInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *StartDeploymentInput) SetAppId(v string) *StartDeploymentInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *StartDeploymentInput) SetBranchName(v string) *StartDeploymentInput { + s.BranchName = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *StartDeploymentInput) SetJobId(v string) *StartDeploymentInput { + s.JobId = &v + return s +} + +// SetSourceUrl sets the SourceUrl field's value. +func (s *StartDeploymentInput) SetSourceUrl(v string) *StartDeploymentInput { + s.SourceUrl = &v + return s +} + +// Result structure for start a deployment. +type StartDeploymentOutput struct { + _ struct{} `type:"structure"` + + // Summary for the Job. + // + // JobSummary is a required field + JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDeploymentOutput) GoString() string { + return s.String() +} + +// SetJobSummary sets the JobSummary field's value. +func (s *StartDeploymentOutput) SetJobSummary(v *JobSummary) *StartDeploymentOutput { + s.JobSummary = v + return s +} + +// Request structure for Start job request. +type StartJobInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for the branch, for the Job. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` + + // Commit Id from 3rd party repository provider for the Job. + CommitId *string `locationName:"commitId" type:"string"` + + // Commit message from 3rd party repository provider for the Job. + CommitMessage *string `locationName:"commitMessage" type:"string"` + + // Commit date / time for the Job. + CommitTime *time.Time `locationName:"commitTime" type:"timestamp"` + + // Unique Id for an existing job. Required for "RETRY" JobType. + JobId *string `locationName:"jobId" type:"string"` + + // Descriptive reason for starting this job. + JobReason *string `locationName:"jobReason" type:"string"` + + // Type for the Job. Available JobTypes are: \n "RELEASE": Start a new job with + // the latest change from the specified branch. Only available for apps that + // have connected to a repository. "RETRY": Retry an existing job. JobId is + // required for this type of job. + // + // JobType is a required field + JobType *string `locationName:"jobType" type:"string" required:"true" enum:"JobType"` +} + +// String returns the string representation +func (s StartJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartJobInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.JobType == nil { + invalidParams.Add(request.NewErrParamRequired("JobType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *StartJobInput) SetAppId(v string) *StartJobInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *StartJobInput) SetBranchName(v string) *StartJobInput { + s.BranchName = &v + return s +} + +// SetCommitId sets the CommitId field's value. +func (s *StartJobInput) SetCommitId(v string) *StartJobInput { + s.CommitId = &v + return s +} + +// SetCommitMessage sets the CommitMessage field's value. +func (s *StartJobInput) SetCommitMessage(v string) *StartJobInput { + s.CommitMessage = &v + return s +} + +// SetCommitTime sets the CommitTime field's value. +func (s *StartJobInput) SetCommitTime(v time.Time) *StartJobInput { + s.CommitTime = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *StartJobInput) SetJobId(v string) *StartJobInput { + s.JobId = &v + return s +} + +// SetJobReason sets the JobReason field's value. +func (s *StartJobInput) SetJobReason(v string) *StartJobInput { + s.JobReason = &v + return s +} + +// SetJobType sets the JobType field's value. +func (s *StartJobInput) SetJobType(v string) *StartJobInput { + s.JobType = &v + return s +} + +// Result structure for run job request. +type StartJobOutput struct { + _ struct{} `type:"structure"` + + // Summary for the Job. + // + // JobSummary is a required field + JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartJobOutput) GoString() string { + return s.String() +} + +// SetJobSummary sets the JobSummary field's value. +func (s *StartJobOutput) SetJobSummary(v *JobSummary) *StartJobOutput { + s.JobSummary = v + return s +} + +// Structure for an execution step for an execution job, for an Amplify App. +type Step struct { + _ struct{} `type:"structure"` + + // URL to the artifact for the execution step. + ArtifactsUrl *string `locationName:"artifactsUrl" type:"string"` + + // The context for current step, will include build image if step is build. + Context *string `locationName:"context" type:"string"` + + // End date/ time of the execution step. + // + // EndTime is a required field + EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"` + + // URL to the logs for the execution step. + LogUrl *string `locationName:"logUrl" type:"string"` + + // List of screenshot URLs for the execution step, if relevant. + Screenshots map[string]*string `locationName:"screenshots" type:"map"` + + // Start date/ time of the execution step. + // + // StartTime is a required field + StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` + + // Status of the execution step. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"` + + // The reason for current step status. + StatusReason *string `locationName:"statusReason" type:"string"` + + // Name of the execution step. + // + // StepName is a required field + StepName *string `locationName:"stepName" type:"string" required:"true"` + + // URL to the test artifact for the execution step. + TestArtifactsUrl *string `locationName:"testArtifactsUrl" type:"string"` + + // URL to the test config for the execution step. + TestConfigUrl *string `locationName:"testConfigUrl" type:"string"` +} + +// String returns the string representation +func (s Step) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Step) GoString() string { + return s.String() +} + +// SetArtifactsUrl sets the ArtifactsUrl field's value. +func (s *Step) SetArtifactsUrl(v string) *Step { + s.ArtifactsUrl = &v + return s +} + +// SetContext sets the Context field's value. +func (s *Step) SetContext(v string) *Step { + s.Context = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *Step) SetEndTime(v time.Time) *Step { + s.EndTime = &v + return s +} + +// SetLogUrl sets the LogUrl field's value. +func (s *Step) SetLogUrl(v string) *Step { + s.LogUrl = &v + return s +} + +// SetScreenshots sets the Screenshots field's value. +func (s *Step) SetScreenshots(v map[string]*string) *Step { + s.Screenshots = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *Step) SetStartTime(v time.Time) *Step { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Step) SetStatus(v string) *Step { + s.Status = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *Step) SetStatusReason(v string) *Step { + s.StatusReason = &v + return s +} + +// SetStepName sets the StepName field's value. +func (s *Step) SetStepName(v string) *Step { + s.StepName = &v + return s +} + +// SetTestArtifactsUrl sets the TestArtifactsUrl field's value. +func (s *Step) SetTestArtifactsUrl(v string) *Step { + s.TestArtifactsUrl = &v + return s +} + +// SetTestConfigUrl sets the TestConfigUrl field's value. +func (s *Step) SetTestConfigUrl(v string) *Step { + s.TestConfigUrl = &v + return s +} + +// Request structure for stop job request. +type StopJobInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name for the branch, for the Job. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` + + // Unique Id for the Job. + // + // JobId is a required field + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopJobInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *StopJobInput) SetAppId(v string) *StopJobInput { + s.AppId = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *StopJobInput) SetBranchName(v string) *StopJobInput { + s.BranchName = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *StopJobInput) SetJobId(v string) *StopJobInput { + s.JobId = &v + return s +} + +// Result structure for the stop job request. +type StopJobOutput struct { + _ struct{} `type:"structure"` + + // Summary for the Job. + // + // JobSummary is a required field + JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StopJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopJobOutput) GoString() string { + return s.String() +} + +// SetJobSummary sets the JobSummary field's value. +func (s *StopJobOutput) SetJobSummary(v *JobSummary) *StopJobOutput { + s.JobSummary = v + return s +} + +// Subdomain for the Domain Association. +type SubDomain struct { + _ struct{} `type:"structure"` + + // DNS record for the Subdomain. + // + // DnsRecord is a required field + DnsRecord *string `locationName:"dnsRecord" type:"string" required:"true"` + + // Setting structure for the Subdomain. + // + // SubDomainSetting is a required field + SubDomainSetting *SubDomainSetting `locationName:"subDomainSetting" type:"structure" required:"true"` + + // Verified status of the Subdomain + // + // Verified is a required field + Verified *bool `locationName:"verified" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s SubDomain) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubDomain) GoString() string { + return s.String() +} + +// SetDnsRecord sets the DnsRecord field's value. +func (s *SubDomain) SetDnsRecord(v string) *SubDomain { + s.DnsRecord = &v + return s +} + +// SetSubDomainSetting sets the SubDomainSetting field's value. +func (s *SubDomain) SetSubDomainSetting(v *SubDomainSetting) *SubDomain { + s.SubDomainSetting = v + return s +} + +// SetVerified sets the Verified field's value. +func (s *SubDomain) SetVerified(v bool) *SubDomain { + s.Verified = &v + return s +} + +// Setting for the Subdomain. +type SubDomainSetting struct { + _ struct{} `type:"structure"` + + // Branch name setting for the Subdomain. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // Prefix setting for the Subdomain. + // + // Prefix is a required field + Prefix *string `locationName:"prefix" type:"string" required:"true"` +} + +// String returns the string representation +func (s SubDomainSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubDomainSetting) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SubDomainSetting) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SubDomainSetting"} + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBranchName sets the BranchName field's value. +func (s *SubDomainSetting) SetBranchName(v string) *SubDomainSetting { + s.BranchName = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *SubDomainSetting) SetPrefix(v string) *SubDomainSetting { + s.Prefix = &v + return s +} + +// Request structure used to tag resource. +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // Resource arn used to tag resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // Tags used to tag resource. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +// Response for tag resource. +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// Request structure used to untag resource. +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // Resource arn used to untag resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // Tag keys used to untag resource. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +// Response for untag resource. +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +// Request structure for update App request. +type UpdateAppInput struct { + _ struct{} `type:"structure"` + + // Personal Access token for 3rd party source control system for an Amplify + // App, used to create webhook and read-only deploy key. Token is not stored. + AccessToken *string `locationName:"accessToken" min:"1" type:"string"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Automated branch creation branchConfig for the Amplify App. + AutoBranchCreationConfig *AutoBranchCreationConfig `locationName:"autoBranchCreationConfig" type:"structure"` + + // Automated branch creation glob patterns for the Amplify App. + AutoBranchCreationPatterns []*string `locationName:"autoBranchCreationPatterns" type:"list"` + + // Basic Authorization credentials for an Amplify App. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + + // BuildSpec for an Amplify App. + BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` + + // Custom redirect / rewrite rules for an Amplify App. + CustomRules []*CustomRule `locationName:"customRules" type:"list"` + + // Description for an Amplify App. + Description *string `locationName:"description" type:"string"` + + // Enables automated branch creation for the Amplify App. + EnableAutoBranchCreation *bool `locationName:"enableAutoBranchCreation" type:"boolean"` + + // Enables Basic Authorization for an Amplify App. + EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` + + // Enables branch auto-building for an Amplify App. + EnableBranchAutoBuild *bool `locationName:"enableBranchAutoBuild" type:"boolean"` + + // Environment Variables for an Amplify App. + EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` + + // IAM service role for an Amplify App. + IamServiceRoleArn *string `locationName:"iamServiceRoleArn" min:"1" type:"string"` + + // Name for an Amplify App. + Name *string `locationName:"name" min:"1" type:"string"` + + // OAuth token for 3rd party source control system for an Amplify App, used + // to create webhook and read-only deploy key. OAuth token is not stored. + OauthToken *string `locationName:"oauthToken" type:"string"` + + // Platform for an Amplify App. + Platform *string `locationName:"platform" type:"string" enum:"Platform"` + + // Repository for an Amplify App + Repository *string `locationName:"repository" type:"string"` +} + +// String returns the string representation +func (s UpdateAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAppInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAppInput"} + if s.AccessToken != nil && len(*s.AccessToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccessToken", 1)) + } + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BuildSpec != nil && len(*s.BuildSpec) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1)) + } + if s.IamServiceRoleArn != nil && len(*s.IamServiceRoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IamServiceRoleArn", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.AutoBranchCreationConfig != nil { + if err := s.AutoBranchCreationConfig.Validate(); err != nil { + invalidParams.AddNested("AutoBranchCreationConfig", err.(request.ErrInvalidParams)) + } + } + if s.CustomRules != nil { + for i, v := range s.CustomRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CustomRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *UpdateAppInput) SetAccessToken(v string) *UpdateAppInput { + s.AccessToken = &v + return s +} + +// SetAppId sets the AppId field's value. +func (s *UpdateAppInput) SetAppId(v string) *UpdateAppInput { + s.AppId = &v + return s +} + +// SetAutoBranchCreationConfig sets the AutoBranchCreationConfig field's value. +func (s *UpdateAppInput) SetAutoBranchCreationConfig(v *AutoBranchCreationConfig) *UpdateAppInput { + s.AutoBranchCreationConfig = v + return s +} + +// SetAutoBranchCreationPatterns sets the AutoBranchCreationPatterns field's value. +func (s *UpdateAppInput) SetAutoBranchCreationPatterns(v []*string) *UpdateAppInput { + s.AutoBranchCreationPatterns = v + return s +} + +// SetBasicAuthCredentials sets the BasicAuthCredentials field's value. +func (s *UpdateAppInput) SetBasicAuthCredentials(v string) *UpdateAppInput { + s.BasicAuthCredentials = &v + return s +} + +// SetBuildSpec sets the BuildSpec field's value. +func (s *UpdateAppInput) SetBuildSpec(v string) *UpdateAppInput { + s.BuildSpec = &v + return s +} + +// SetCustomRules sets the CustomRules field's value. +func (s *UpdateAppInput) SetCustomRules(v []*CustomRule) *UpdateAppInput { + s.CustomRules = v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateAppInput) SetDescription(v string) *UpdateAppInput { + s.Description = &v + return s +} + +// SetEnableAutoBranchCreation sets the EnableAutoBranchCreation field's value. +func (s *UpdateAppInput) SetEnableAutoBranchCreation(v bool) *UpdateAppInput { + s.EnableAutoBranchCreation = &v + return s +} + +// SetEnableBasicAuth sets the EnableBasicAuth field's value. +func (s *UpdateAppInput) SetEnableBasicAuth(v bool) *UpdateAppInput { + s.EnableBasicAuth = &v + return s +} + +// SetEnableBranchAutoBuild sets the EnableBranchAutoBuild field's value. +func (s *UpdateAppInput) SetEnableBranchAutoBuild(v bool) *UpdateAppInput { + s.EnableBranchAutoBuild = &v + return s +} + +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *UpdateAppInput) SetEnvironmentVariables(v map[string]*string) *UpdateAppInput { + s.EnvironmentVariables = v + return s +} + +// SetIamServiceRoleArn sets the IamServiceRoleArn field's value. +func (s *UpdateAppInput) SetIamServiceRoleArn(v string) *UpdateAppInput { + s.IamServiceRoleArn = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateAppInput) SetName(v string) *UpdateAppInput { + s.Name = &v + return s +} + +// SetOauthToken sets the OauthToken field's value. +func (s *UpdateAppInput) SetOauthToken(v string) *UpdateAppInput { + s.OauthToken = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *UpdateAppInput) SetPlatform(v string) *UpdateAppInput { + s.Platform = &v + return s +} + +// SetRepository sets the Repository field's value. +func (s *UpdateAppInput) SetRepository(v string) *UpdateAppInput { + s.Repository = &v + return s +} + +// Result structure for an Amplify App update request. +type UpdateAppOutput struct { + _ struct{} `type:"structure"` + + // App structure for the updated App. + // + // App is a required field + App *App `locationName:"app" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAppOutput) GoString() string { + return s.String() +} + +// SetApp sets the App field's value. +func (s *UpdateAppOutput) SetApp(v *App) *UpdateAppOutput { + s.App = v + return s +} + +// Request structure for update branch request. +type UpdateBranchInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // ARN for a Backend Environment, part of an Amplify App. + BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string"` + + // Basic Authorization credentials for the branch. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + + // Name for the branch. + // + // BranchName is a required field + BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` + + // BuildSpec for the branch. + BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` + + // Description for the branch. + Description *string `locationName:"description" type:"string"` + + // Display name for a branch, will use as the default domain prefix. + DisplayName *string `locationName:"displayName" type:"string"` + + // Enables auto building for the branch. + EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean"` + + // Enables Basic Auth for the branch. + EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` + + // Enables notifications for the branch. + EnableNotification *bool `locationName:"enableNotification" type:"boolean"` + + // Enables Pull Request Preview for this branch. + EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean"` + + // Environment Variables for the branch. + EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` + + // Framework for the branch. + Framework *string `locationName:"framework" type:"string"` + + // The Amplify Environment name for the pull request. + PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"` + + // Stage for the branch. + Stage *string `locationName:"stage" type:"string" enum:"Stage"` + + // The content TTL for the website in seconds. + Ttl *string `locationName:"ttl" type:"string"` +} + +// String returns the string representation +func (s UpdateBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBranchInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.BackendEnvironmentArn != nil && len(*s.BackendEnvironmentArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BackendEnvironmentArn", 1)) + } + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.BuildSpec != nil && len(*s.BuildSpec) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *UpdateBranchInput) SetAppId(v string) *UpdateBranchInput { + s.AppId = &v + return s +} + +// SetBackendEnvironmentArn sets the BackendEnvironmentArn field's value. +func (s *UpdateBranchInput) SetBackendEnvironmentArn(v string) *UpdateBranchInput { + s.BackendEnvironmentArn = &v + return s +} + +// SetBasicAuthCredentials sets the BasicAuthCredentials field's value. +func (s *UpdateBranchInput) SetBasicAuthCredentials(v string) *UpdateBranchInput { + s.BasicAuthCredentials = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *UpdateBranchInput) SetBranchName(v string) *UpdateBranchInput { + s.BranchName = &v + return s +} + +// SetBuildSpec sets the BuildSpec field's value. +func (s *UpdateBranchInput) SetBuildSpec(v string) *UpdateBranchInput { + s.BuildSpec = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateBranchInput) SetDescription(v string) *UpdateBranchInput { + s.Description = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *UpdateBranchInput) SetDisplayName(v string) *UpdateBranchInput { + s.DisplayName = &v + return s +} + +// SetEnableAutoBuild sets the EnableAutoBuild field's value. +func (s *UpdateBranchInput) SetEnableAutoBuild(v bool) *UpdateBranchInput { + s.EnableAutoBuild = &v + return s +} + +// SetEnableBasicAuth sets the EnableBasicAuth field's value. +func (s *UpdateBranchInput) SetEnableBasicAuth(v bool) *UpdateBranchInput { + s.EnableBasicAuth = &v + return s +} + +// SetEnableNotification sets the EnableNotification field's value. +func (s *UpdateBranchInput) SetEnableNotification(v bool) *UpdateBranchInput { + s.EnableNotification = &v + return s +} + +// SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value. +func (s *UpdateBranchInput) SetEnablePullRequestPreview(v bool) *UpdateBranchInput { + s.EnablePullRequestPreview = &v + return s +} + +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *UpdateBranchInput) SetEnvironmentVariables(v map[string]*string) *UpdateBranchInput { + s.EnvironmentVariables = v + return s +} + +// SetFramework sets the Framework field's value. +func (s *UpdateBranchInput) SetFramework(v string) *UpdateBranchInput { + s.Framework = &v + return s +} + +// SetPullRequestEnvironmentName sets the PullRequestEnvironmentName field's value. +func (s *UpdateBranchInput) SetPullRequestEnvironmentName(v string) *UpdateBranchInput { + s.PullRequestEnvironmentName = &v + return s +} + +// SetStage sets the Stage field's value. +func (s *UpdateBranchInput) SetStage(v string) *UpdateBranchInput { + s.Stage = &v + return s +} + +// SetTtl sets the Ttl field's value. +func (s *UpdateBranchInput) SetTtl(v string) *UpdateBranchInput { + s.Ttl = &v + return s +} + +// Result structure for update branch request. +type UpdateBranchOutput struct { + _ struct{} `type:"structure"` + + // Branch structure for an Amplify App. + // + // Branch is a required field + Branch *Branch `locationName:"branch" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBranchOutput) GoString() string { + return s.String() +} + +// SetBranch sets the Branch field's value. +func (s *UpdateBranchOutput) SetBranch(v *Branch) *UpdateBranchOutput { + s.Branch = v + return s +} + +// Request structure for update Domain Association request. +type UpdateDomainAssociationInput struct { + _ struct{} `type:"structure"` + + // Unique Id for an Amplify App. + // + // AppId is a required field + AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` + + // Name of the domain. + // + // DomainName is a required field + DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"` + + // Enables automated creation of Subdomains for branches. (Currently not supported) + EnableAutoSubDomain *bool `locationName:"enableAutoSubDomain" type:"boolean"` + + // Setting structure for the Subdomain. + // + // SubDomainSettings is a required field + SubDomainSettings []*SubDomainSetting `locationName:"subDomainSettings" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDomainAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDomainAssociationInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.AppId != nil && len(*s.AppId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppId", 1)) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } + if s.SubDomainSettings == nil { + invalidParams.Add(request.NewErrParamRequired("SubDomainSettings")) + } + if s.SubDomainSettings != nil { + for i, v := range s.SubDomainSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SubDomainSettings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppId sets the AppId field's value. +func (s *UpdateDomainAssociationInput) SetAppId(v string) *UpdateDomainAssociationInput { + s.AppId = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *UpdateDomainAssociationInput) SetDomainName(v string) *UpdateDomainAssociationInput { + s.DomainName = &v + return s +} + +// SetEnableAutoSubDomain sets the EnableAutoSubDomain field's value. +func (s *UpdateDomainAssociationInput) SetEnableAutoSubDomain(v bool) *UpdateDomainAssociationInput { + s.EnableAutoSubDomain = &v + return s +} + +// SetSubDomainSettings sets the SubDomainSettings field's value. +func (s *UpdateDomainAssociationInput) SetSubDomainSettings(v []*SubDomainSetting) *UpdateDomainAssociationInput { + s.SubDomainSettings = v + return s +} + +// Result structure for the update Domain Association request. +type UpdateDomainAssociationOutput struct { + _ struct{} `type:"structure"` + + // Domain Association structure. + // + // DomainAssociation is a required field + DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainAssociationOutput) GoString() string { + return s.String() +} + +// SetDomainAssociation sets the DomainAssociation field's value. +func (s *UpdateDomainAssociationOutput) SetDomainAssociation(v *DomainAssociation) *UpdateDomainAssociationOutput { + s.DomainAssociation = v + return s +} + +// Request structure for update webhook request. +type UpdateWebhookInput struct { + _ struct{} `type:"structure"` + + // Name for a branch, part of an Amplify App. + BranchName *string `locationName:"branchName" min:"1" type:"string"` + + // Description for a webhook. + Description *string `locationName:"description" type:"string"` + + // Unique Id for a webhook. + // + // WebhookId is a required field + WebhookId *string `location:"uri" locationName:"webhookId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateWebhookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateWebhookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateWebhookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateWebhookInput"} + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.WebhookId == nil { + invalidParams.Add(request.NewErrParamRequired("WebhookId")) + } + if s.WebhookId != nil && len(*s.WebhookId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebhookId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBranchName sets the BranchName field's value. +func (s *UpdateWebhookInput) SetBranchName(v string) *UpdateWebhookInput { + s.BranchName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateWebhookInput) SetDescription(v string) *UpdateWebhookInput { + s.Description = &v + return s +} + +// SetWebhookId sets the WebhookId field's value. +func (s *UpdateWebhookInput) SetWebhookId(v string) *UpdateWebhookInput { + s.WebhookId = &v + return s +} + +// Result structure for the update webhook request. +type UpdateWebhookOutput struct { + _ struct{} `type:"structure"` + + // Webhook structure. + // + // Webhook is a required field + Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateWebhookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateWebhookOutput) GoString() string { + return s.String() +} + +// SetWebhook sets the Webhook field's value. +func (s *UpdateWebhookOutput) SetWebhook(v *Webhook) *UpdateWebhookOutput { + s.Webhook = v + return s +} + +// Structure for webhook, which associates a webhook with an Amplify App. +type Webhook struct { + _ struct{} `type:"structure"` + + // Name for a branch, part of an Amplify App. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // Create date / time for a webhook. + // + // CreateTime is a required field + CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"` + + // Description for a webhook. + // + // Description is a required field + Description *string `locationName:"description" type:"string" required:"true"` + + // Update date / time for a webhook. + // + // UpdateTime is a required field + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"` + + // ARN for the webhook. + // + // WebhookArn is a required field + WebhookArn *string `locationName:"webhookArn" type:"string" required:"true"` + + // Id of the webhook. + // + // WebhookId is a required field + WebhookId *string `locationName:"webhookId" type:"string" required:"true"` + + // Url of the webhook. + // + // WebhookUrl is a required field + WebhookUrl *string `locationName:"webhookUrl" type:"string" required:"true"` +} + +// String returns the string representation +func (s Webhook) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Webhook) GoString() string { + return s.String() +} + +// SetBranchName sets the BranchName field's value. +func (s *Webhook) SetBranchName(v string) *Webhook { + s.BranchName = &v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *Webhook) SetCreateTime(v time.Time) *Webhook { + s.CreateTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Webhook) SetDescription(v string) *Webhook { + s.Description = &v + return s +} + +// SetUpdateTime sets the UpdateTime field's value. +func (s *Webhook) SetUpdateTime(v time.Time) *Webhook { + s.UpdateTime = &v + return s +} + +// SetWebhookArn sets the WebhookArn field's value. +func (s *Webhook) SetWebhookArn(v string) *Webhook { + s.WebhookArn = &v + return s +} + +// SetWebhookId sets the WebhookId field's value. +func (s *Webhook) SetWebhookId(v string) *Webhook { + s.WebhookId = &v + return s +} + +// SetWebhookUrl sets the WebhookUrl field's value. +func (s *Webhook) SetWebhookUrl(v string) *Webhook { + s.WebhookUrl = &v + return s +} + +const ( + // DomainStatusPendingVerification is a DomainStatus enum value + DomainStatusPendingVerification = "PENDING_VERIFICATION" + + // DomainStatusInProgress is a DomainStatus enum value + DomainStatusInProgress = "IN_PROGRESS" + + // DomainStatusAvailable is a DomainStatus enum value + DomainStatusAvailable = "AVAILABLE" + + // DomainStatusPendingDeployment is a DomainStatus enum value + DomainStatusPendingDeployment = "PENDING_DEPLOYMENT" + + // DomainStatusFailed is a DomainStatus enum value + DomainStatusFailed = "FAILED" + + // DomainStatusCreating is a DomainStatus enum value + DomainStatusCreating = "CREATING" + + // DomainStatusRequestingCertificate is a DomainStatus enum value + DomainStatusRequestingCertificate = "REQUESTING_CERTIFICATE" + + // DomainStatusUpdating is a DomainStatus enum value + DomainStatusUpdating = "UPDATING" +) + +const ( + // JobStatusPending is a JobStatus enum value + JobStatusPending = "PENDING" + + // JobStatusProvisioning is a JobStatus enum value + JobStatusProvisioning = "PROVISIONING" + + // JobStatusRunning is a JobStatus enum value + JobStatusRunning = "RUNNING" + + // JobStatusFailed is a JobStatus enum value + JobStatusFailed = "FAILED" + + // JobStatusSucceed is a JobStatus enum value + JobStatusSucceed = "SUCCEED" + + // JobStatusCancelling is a JobStatus enum value + JobStatusCancelling = "CANCELLING" + + // JobStatusCancelled is a JobStatus enum value + JobStatusCancelled = "CANCELLED" +) + +const ( + // JobTypeRelease is a JobType enum value + JobTypeRelease = "RELEASE" + + // JobTypeRetry is a JobType enum value + JobTypeRetry = "RETRY" + + // JobTypeManual is a JobType enum value + JobTypeManual = "MANUAL" + + // JobTypeWebHook is a JobType enum value + JobTypeWebHook = "WEB_HOOK" +) + +const ( + // PlatformWeb is a Platform enum value + PlatformWeb = "WEB" +) + +const ( + // StageProduction is a Stage enum value + StageProduction = "PRODUCTION" + + // StageBeta is a Stage enum value + StageBeta = "BETA" + + // StageDevelopment is a Stage enum value + StageDevelopment = "DEVELOPMENT" + + // StageExperimental is a Stage enum value + StageExperimental = "EXPERIMENTAL" + + // StagePullRequest is a Stage enum value + StagePullRequest = "PULL_REQUEST" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/doc.go new file mode 100644 index 00000000000..d00cb08ebab --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/doc.go @@ -0,0 +1,29 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package amplify provides the client and types for making API +// requests to AWS Amplify. +// +// Amplify is a fully managed continuous deployment and hosting service for +// modern web apps. +// +// See https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25 for more information on this service. +// +// See amplify package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/amplify/ +// +// Using the Client +// +// To contact AWS Amplify with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Amplify client Amplify for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/amplify/#New +package amplify diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/errors.go new file mode 100644 index 00000000000..ba7fd5a9acb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/errors.go @@ -0,0 +1,51 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package amplify + +const ( + + // ErrCodeBadRequestException for service response error code + // "BadRequestException". + // + // Exception thrown when a request contains unexpected data. + ErrCodeBadRequestException = "BadRequestException" + + // ErrCodeDependentServiceFailureException for service response error code + // "DependentServiceFailureException". + // + // Exception thrown when an operation fails due to a dependent service throwing + // an exception. + ErrCodeDependentServiceFailureException = "DependentServiceFailureException" + + // ErrCodeInternalFailureException for service response error code + // "InternalFailureException". + // + // Exception thrown when the service fails to perform an operation due to an + // internal issue. + ErrCodeInternalFailureException = "InternalFailureException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // Exception thrown when a resource could not be created because of service + // limits. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeNotFoundException for service response error code + // "NotFoundException". + // + // Exception thrown when an entity has not been found during an operation. + ErrCodeNotFoundException = "NotFoundException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // Exception thrown when an operation fails due to non-existent resource. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeUnauthorizedException for service response error code + // "UnauthorizedException". + // + // Exception thrown when an operation fails due to a lack of access. + ErrCodeUnauthorizedException = "UnauthorizedException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go new file mode 100644 index 00000000000..7d4fa8ea876 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go @@ -0,0 +1,99 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package amplify + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// Amplify provides the API operation methods for making requests to +// AWS Amplify. See this package's package overview docs +// for details on the service. +// +// Amplify methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type Amplify struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "Amplify" // Name of service. + EndpointsID = "amplify" // ID to lookup a service endpoint with. + ServiceID = "Amplify" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the Amplify client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Amplify client from just a session. +// svc := amplify.New(mySession) +// +// // Create a Amplify client with additional configuration +// svc := amplify.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Amplify { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "amplify" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Amplify { + svc := &Amplify{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2017-07-25", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Amplify operation and runs any +// custom request initialization. +func (c *Amplify) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go index e2bb14128e9..2ac48ac1cbe 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go @@ -2085,6 +2085,10 @@ func (c *APIGateway) DeleteDomainNameRequest(input *DeleteDomainNameInput) (req // The request has reached its throttling limit. Retry after the specified time // period. // +// * ErrCodeBadRequestException "BadRequestException" +// The submitted request is not valid, for example, the input is incomplete +// or incorrect. See the accompanying error message for details. +// func (c *APIGateway) DeleteDomainName(input *DeleteDomainNameInput) (*DeleteDomainNameOutput, error) { req, out := c.DeleteDomainNameRequest(input) return out, req.Send() @@ -3806,7 +3810,7 @@ func (c *APIGateway) GetApiKeysWithContext(ctx aws.Context, input *GetApiKeysInp // // Example iterating over at most 3 pages of a GetApiKeys operation. // pageNum := 0 // err := client.GetApiKeysPages(params, -// func(page *GetApiKeysOutput, lastPage bool) bool { +// func(page *apigateway.GetApiKeysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3838,10 +3842,12 @@ func (c *APIGateway) GetApiKeysPagesWithContext(ctx aws.Context, input *GetApiKe }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetApiKeysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetApiKeysOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4202,7 +4208,7 @@ func (c *APIGateway) GetBasePathMappingsWithContext(ctx aws.Context, input *GetB // // Example iterating over at most 3 pages of a GetBasePathMappings operation. // pageNum := 0 // err := client.GetBasePathMappingsPages(params, -// func(page *GetBasePathMappingsOutput, lastPage bool) bool { +// func(page *apigateway.GetBasePathMappingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4234,10 +4240,12 @@ func (c *APIGateway) GetBasePathMappingsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBasePathMappingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetBasePathMappingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4425,7 +4433,7 @@ func (c *APIGateway) GetClientCertificatesWithContext(ctx aws.Context, input *Ge // // Example iterating over at most 3 pages of a GetClientCertificates operation. // pageNum := 0 // err := client.GetClientCertificatesPages(params, -// func(page *GetClientCertificatesOutput, lastPage bool) bool { +// func(page *apigateway.GetClientCertificatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4457,10 +4465,12 @@ func (c *APIGateway) GetClientCertificatesPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetClientCertificatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetClientCertificatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4656,7 +4666,7 @@ func (c *APIGateway) GetDeploymentsWithContext(ctx aws.Context, input *GetDeploy // // Example iterating over at most 3 pages of a GetDeployments operation. // pageNum := 0 // err := client.GetDeploymentsPages(params, -// func(page *GetDeploymentsOutput, lastPage bool) bool { +// func(page *apigateway.GetDeploymentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4688,10 +4698,12 @@ func (c *APIGateway) GetDeploymentsPagesWithContext(ctx aws.Context, input *GetD }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetDeploymentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetDeploymentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5216,7 +5228,7 @@ func (c *APIGateway) GetDomainNamesWithContext(ctx aws.Context, input *GetDomain // // Example iterating over at most 3 pages of a GetDomainNames operation. // pageNum := 0 // err := client.GetDomainNamesPages(params, -// func(page *GetDomainNamesOutput, lastPage bool) bool { +// func(page *apigateway.GetDomainNamesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5248,10 +5260,12 @@ func (c *APIGateway) GetDomainNamesPagesWithContext(ctx aws.Context, input *GetD }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetDomainNamesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetDomainNamesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6126,7 +6140,7 @@ func (c *APIGateway) GetModelsWithContext(ctx aws.Context, input *GetModelsInput // // Example iterating over at most 3 pages of a GetModels operation. // pageNum := 0 // err := client.GetModelsPages(params, -// func(page *GetModelsOutput, lastPage bool) bool { +// func(page *apigateway.GetModelsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6158,10 +6172,12 @@ func (c *APIGateway) GetModelsPagesWithContext(ctx aws.Context, input *GetModels }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetModelsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetModelsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6522,7 +6538,7 @@ func (c *APIGateway) GetResourcesWithContext(ctx aws.Context, input *GetResource // // Example iterating over at most 3 pages of a GetResources operation. // pageNum := 0 // err := client.GetResourcesPages(params, -// func(page *GetResourcesOutput, lastPage bool) bool { +// func(page *apigateway.GetResourcesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6554,10 +6570,12 @@ func (c *APIGateway) GetResourcesPagesWithContext(ctx aws.Context, input *GetRes }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetResourcesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetResourcesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6745,7 +6763,7 @@ func (c *APIGateway) GetRestApisWithContext(ctx aws.Context, input *GetRestApisI // // Example iterating over at most 3 pages of a GetRestApis operation. // pageNum := 0 // err := client.GetRestApisPages(params, -// func(page *GetRestApisOutput, lastPage bool) bool { +// func(page *apigateway.GetRestApisOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6777,10 +6795,12 @@ func (c *APIGateway) GetRestApisPagesWithContext(ctx aws.Context, input *GetRest }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetRestApisOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetRestApisOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7394,7 +7414,7 @@ func (c *APIGateway) GetUsageWithContext(ctx aws.Context, input *GetUsageInput, // // Example iterating over at most 3 pages of a GetUsage operation. // pageNum := 0 // err := client.GetUsagePages(params, -// func(page *Usage, lastPage bool) bool { +// func(page *apigateway.Usage, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7426,10 +7446,12 @@ func (c *APIGateway) GetUsagePagesWithContext(ctx aws.Context, input *GetUsageIn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*Usage), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*Usage), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7712,7 +7734,7 @@ func (c *APIGateway) GetUsagePlanKeysWithContext(ctx aws.Context, input *GetUsag // // Example iterating over at most 3 pages of a GetUsagePlanKeys operation. // pageNum := 0 // err := client.GetUsagePlanKeysPages(params, -// func(page *GetUsagePlanKeysOutput, lastPage bool) bool { +// func(page *apigateway.GetUsagePlanKeysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7744,10 +7766,12 @@ func (c *APIGateway) GetUsagePlanKeysPagesWithContext(ctx aws.Context, input *Ge }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetUsagePlanKeysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetUsagePlanKeysOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7859,7 +7883,7 @@ func (c *APIGateway) GetUsagePlansWithContext(ctx aws.Context, input *GetUsagePl // // Example iterating over at most 3 pages of a GetUsagePlans operation. // pageNum := 0 // err := client.GetUsagePlansPages(params, -// func(page *GetUsagePlansOutput, lastPage bool) bool { +// func(page *apigateway.GetUsagePlansOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7891,10 +7915,12 @@ func (c *APIGateway) GetUsagePlansPagesWithContext(ctx aws.Context, input *GetUs }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetUsagePlansOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetUsagePlansOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8082,7 +8108,7 @@ func (c *APIGateway) GetVpcLinksWithContext(ctx aws.Context, input *GetVpcLinksI // // Example iterating over at most 3 pages of a GetVpcLinks operation. // pageNum := 0 // err := client.GetVpcLinksPages(params, -// func(page *GetVpcLinksOutput, lastPage bool) bool { +// func(page *apigateway.GetVpcLinksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8114,10 +8140,12 @@ func (c *APIGateway) GetVpcLinksPagesWithContext(ctx aws.Context, input *GetVpcL }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetVpcLinksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetVpcLinksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -9096,7 +9124,8 @@ func (c *APIGateway) TestInvokeAuthorizerRequest(input *TestInvokeAuthorizerInpu // Simulate the execution of an Authorizer in your RestApi with headers, parameters, // and an incoming request body. // -// Enable custom authorizers (https://docs.aws.amazon.com/apigateway/latest/developerguide/use-custom-authorizer.html) +// Use Lambda Function as Authorizer (https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html) +// Use Cognito User Pool as Authorizer (https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-integrate-with-cognito.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11348,33 +11377,34 @@ func (s *AccessLogSettings) SetFormat(v string) *AccessLogSettings { // // The following exception may be thrown when the request fails. // -// UnauthorizedException -// NotFoundException -// TooManyRequestsException +// * UnauthorizedException +// +// * NotFoundException +// +// * TooManyRequestsException +// // For detailed error code information, including the corresponding HTTP Status // Codes, see API Gateway Error Codes (https://docs.aws.amazon.com/apigateway/api-reference/handling-errors/#api-error-codes) // // Example: Get the information about an account. // // Request -// -// GET /account HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com -// X-Amz-Date: 20160531T184618Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/us-east-1/apigateway/aws4_request, -// SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} +// GET /account HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com +// X-Amz-Date: 20160531T184618Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/us-east-1/apigateway/aws4_request, +// SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} // Response // // The successful response returns a 200 OK status code and a payload similar // to the following: -// -// { "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/account-apigateway-{rel}.html", -// "name": "account", "templated": true }, "self": { "href": "/account" }, "account:update": -// { "href": "/account" } }, "cloudwatchRoleArn": "arn:aws:iam::123456789012:role/apigAwsProxyRole", -// "throttleSettings": { "rateLimit": 500, "burstLimit": 1000 } } +// { "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/account-apigateway-{rel}.html", +// "name": "account", "templated": true }, "self": { "href": "/account" }, +// "account:update": { "href": "/account" } }, "cloudwatchRoleArn": "arn:aws:iam::123456789012:role/apigAwsProxyRole", +// "throttleSettings": { "rateLimit": 500, "burstLimit": 1000 } } // In addition to making the REST API call directly, you can use the AWS CLI // and an AWS SDK to access this resource. // -// API Gateway Limits (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-limits.html)Developer -// Guide (https://docs.aws.amazon.com/apigateway/latest/developerguide/welcome.html), +// API Gateway Limits (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-limits.html) +// Developer Guide (https://docs.aws.amazon.com/apigateway/latest/developerguide/welcome.html), // AWS CLI (https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-account.html) type Account struct { _ struct{} `type:"structure"` @@ -11461,6 +11491,9 @@ type ApiKey struct { // A list of Stage resources that are associated with the ApiKey resource. StageKeys []*string `locationName:"stageKeys" type:"list"` + // The collection of tags. Each tag element is associated with a given resource. + Tags map[string]*string `locationName:"tags" type:"map"` + // The value of the API Key. Value *string `locationName:"value" type:"string"` } @@ -11523,6 +11556,12 @@ func (s *ApiKey) SetStageKeys(v []*string) *ApiKey { return s } +// SetTags sets the Tags field's value. +func (s *ApiKey) SetTags(v map[string]*string) *ApiKey { + s.Tags = v + return s +} + // SetValue sets the Value field's value. func (s *ApiKey) SetValue(v string) *ApiKey { s.Value = &v @@ -11575,7 +11614,8 @@ func (s *ApiStage) SetThrottle(v map[string]*ThrottleSettings) *ApiStage { // Represents an authorization layer for methods. If enabled on a method, API // Gateway will activate the authorizer when a client calls the method. // -// Enable custom authorization (https://docs.aws.amazon.com/apigateway/latest/developerguide/use-custom-authorizer.html) +// Use Lambda Function as Authorizer (https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html) +// Use Cognito User Pool as Authorizer (https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-integrate-with-cognito.html) type Authorizer struct { _ struct{} `type:"structure"` @@ -11608,31 +11648,34 @@ type Authorizer struct { // The identifier for the authorizer resource. Id *string `locationName:"id" type:"string"` - // The identity source for which authorization is requested. For a TOKEN or - // COGNITO_USER_POOLS authorizer, this is required and specifies the request - // header mapping expression for the custom header holding the authorization - // token submitted by the client. For example, if the token header name is Auth, - // the header mapping expression is method.request.header.Auth. - // For the REQUEST authorizer, this is required when authorization caching is - // enabled. The value is a comma-separated string of one or more mapping expressions - // of the specified request parameters. For example, if an Auth header, a Name - // query string parameter are defined as identity sources, this value is method.request.header.Auth, - // method.request.querystring.Name. These parameters will be used to derive - // the authorization caching key and to perform runtime validation of the REQUEST - // authorizer by verifying all of the identity-related request parameters are - // present, not null and non-empty. Only when this is true does the authorizer - // invoke the authorizer Lambda function, otherwise, it returns a 401 Unauthorized - // response without calling the Lambda function. The valid value is a string - // of comma-separated mapping expressions of the specified request parameters. - // When the authorization caching is not enabled, this property is optional. + // The identity source for which authorization is requested. + // * For a TOKEN or COGNITO_USER_POOLS authorizer, this is required and specifies + // the request header mapping expression for the custom header holding the + // authorization token submitted by the client. For example, if the token + // header name is Auth, the header mapping expression is method.request.header.Auth. + // + // * For the REQUEST authorizer, this is required when authorization caching + // is enabled. The value is a comma-separated string of one or more mapping + // expressions of the specified request parameters. For example, if an Auth + // header, a Name query string parameter are defined as identity sources, + // this value is method.request.header.Auth, method.request.querystring.Name. + // These parameters will be used to derive the authorization caching key + // and to perform runtime validation of the REQUEST authorizer by verifying + // all of the identity-related request parameters are present, not null and + // non-empty. Only when this is true does the authorizer invoke the authorizer + // Lambda function, otherwise, it returns a 401 Unauthorized response without + // calling the Lambda function. The valid value is a string of comma-separated + // mapping expressions of the specified request parameters. When the authorization + // caching is not enabled, this property is optional. IdentitySource *string `locationName:"identitySource" type:"string"` // A validation expression for the incoming identity token. For TOKEN authorizers, - // this value is a regular expression. API Gateway will match the aud field - // of the incoming token from the client against the specified regular expression. - // It will invoke the authorizer's Lambda function when there is a match. Otherwise, - // it will return a 401 Unauthorized response without calling the Lambda function. - // The validation expression does not apply to the REQUEST authorizer. + // this value is a regular expression. For COGNITO_USER_POOLS authorizers, API + // Gateway will match the aud field of the incoming token from the client against + // the specified regular expression. It will invoke the authorizer's Lambda + // function when there is a match. Otherwise, it will return a 401 Unauthorized + // response without calling the Lambda function. The validation expression does + // not apply to the REQUEST authorizer. IdentityValidationExpression *string `locationName:"identityValidationExpression" type:"string"` // [Required] The name of the authorizer. @@ -11725,6 +11768,7 @@ func (s *Authorizer) SetType(v string) *Authorizer { // // A custom domain name plus a BasePathMapping specification identifies a deployed // RestApi in a given stage of the owner Account. +// // Use Custom Domain Names (https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-custom-domains.html) type BasePathMapping struct { _ struct{} `type:"structure"` @@ -11828,6 +11872,7 @@ func (s *CanarySettings) SetUseStageCache(v bool) *CanarySettings { // Client certificates are used to authenticate an API by the backend server. // To authenticate an API client (or user), use IAM roles and policies, a custom // Authorizer or an Amazon Cognito user pool. +// // Use Client-Side Certificate (https://docs.aws.amazon.com/apigateway/latest/developerguide/getting-started-client-side-ssl-authentication.html) type ClientCertificate struct { _ struct{} `type:"structure"` @@ -11847,6 +11892,9 @@ type ClientCertificate struct { // The PEM-encoded public key of the client certificate, which can be used to // configure certificate authentication in the integration endpoint . PemEncodedCertificate *string `locationName:"pemEncodedCertificate" type:"string"` + + // The collection of tags. Each tag element is associated with a given resource. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -11889,6 +11937,12 @@ func (s *ClientCertificate) SetPemEncodedCertificate(v string) *ClientCertificat return s } +// SetTags sets the Tags field's value. +func (s *ClientCertificate) SetTags(v map[string]*string) *ClientCertificate { + s.Tags = v + return s +} + // Request to create an ApiKey resource. type CreateApiKeyInput struct { _ struct{} `type:"structure"` @@ -11913,6 +11967,11 @@ type CreateApiKeyInput struct { // DEPRECATED FOR USAGE PLANS - Specifies stages associated with the API key. StageKeys []*StageKey `locationName:"stageKeys" type:"list"` + // The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. + // The tag key can be up to 128 characters and must not start with aws:. The + // tag value can be up to 256 characters. + Tags map[string]*string `locationName:"tags" type:"map"` + // Specifies a value of the API key. Value *string `locationName:"value" type:"string"` } @@ -11963,6 +12022,12 @@ func (s *CreateApiKeyInput) SetStageKeys(v []*StageKey) *CreateApiKeyInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateApiKeyInput) SetTags(v map[string]*string) *CreateApiKeyInput { + s.Tags = v + return s +} + // SetValue sets the Value field's value. func (s *CreateApiKeyInput) SetValue(v string) *CreateApiKeyInput { s.Value = &v @@ -11999,31 +12064,34 @@ type CreateAuthorizerInput struct { // is usually of the form /2015-03-31/functions/[FunctionARN]/invocations. AuthorizerUri *string `locationName:"authorizerUri" type:"string"` - // The identity source for which authorization is requested. For a TOKEN or - // COGNITO_USER_POOLS authorizer, this is required and specifies the request - // header mapping expression for the custom header holding the authorization - // token submitted by the client. For example, if the token header name is Auth, - // the header mapping expression is method.request.header.Auth. - // For the REQUEST authorizer, this is required when authorization caching is - // enabled. The value is a comma-separated string of one or more mapping expressions - // of the specified request parameters. For example, if an Auth header, a Name - // query string parameter are defined as identity sources, this value is method.request.header.Auth, - // method.request.querystring.Name. These parameters will be used to derive - // the authorization caching key and to perform runtime validation of the REQUEST - // authorizer by verifying all of the identity-related request parameters are - // present, not null and non-empty. Only when this is true does the authorizer - // invoke the authorizer Lambda function, otherwise, it returns a 401 Unauthorized - // response without calling the Lambda function. The valid value is a string - // of comma-separated mapping expressions of the specified request parameters. - // When the authorization caching is not enabled, this property is optional. + // The identity source for which authorization is requested. + // * For a TOKEN or COGNITO_USER_POOLS authorizer, this is required and specifies + // the request header mapping expression for the custom header holding the + // authorization token submitted by the client. For example, if the token + // header name is Auth, the header mapping expression is method.request.header.Auth. + // + // * For the REQUEST authorizer, this is required when authorization caching + // is enabled. The value is a comma-separated string of one or more mapping + // expressions of the specified request parameters. For example, if an Auth + // header, a Name query string parameter are defined as identity sources, + // this value is method.request.header.Auth, method.request.querystring.Name. + // These parameters will be used to derive the authorization caching key + // and to perform runtime validation of the REQUEST authorizer by verifying + // all of the identity-related request parameters are present, not null and + // non-empty. Only when this is true does the authorizer invoke the authorizer + // Lambda function, otherwise, it returns a 401 Unauthorized response without + // calling the Lambda function. The valid value is a string of comma-separated + // mapping expressions of the specified request parameters. When the authorization + // caching is not enabled, this property is optional. IdentitySource *string `locationName:"identitySource" type:"string"` // A validation expression for the incoming identity token. For TOKEN authorizers, - // this value is a regular expression. API Gateway will match the aud field - // of the incoming token from the client against the specified regular expression. - // It will invoke the authorizer's Lambda function when there is a match. Otherwise, - // it will return a 401 Unauthorized response without calling the Lambda function. - // The validation expression does not apply to the REQUEST authorizer. + // this value is a regular expression. For COGNITO_USER_POOLS authorizers, API + // Gateway will match the aud field of the incoming token from the client against + // the specified regular expression. It will invoke the authorizer's Lambda + // function when there is a match. Otherwise, it will return a 401 Unauthorized + // response without calling the Lambda function. The validation expression does + // not apply to the REQUEST authorizer. IdentityValidationExpression *string `locationName:"identityValidationExpression" type:"string"` // [Required] The name of the authorizer. @@ -12148,7 +12216,7 @@ type CreateBasePathMappingInput struct { // The base path name that callers of the API must provide as part of the URL // after the domain name. This value must be unique for all of the mappings - // across a single API. Leave this blank if you do not want callers to specify + // across a single API. Specify '(none)' if you do not want callers to specify // a base path name after the domain name. BasePath *string `locationName:"basePath" type:"string"` @@ -12162,8 +12230,8 @@ type CreateBasePathMappingInput struct { // RestApiId is a required field RestApiId *string `locationName:"restApiId" type:"string" required:"true"` - // The name of the API's stage that you want to use for this mapping. Leave - // this blank if you do not want callers to explicitly specify the stage name + // The name of the API's stage that you want to use for this mapping. Specify + // '(none)' if you do not want callers to explicitly specify the stage name // after any base path name. Stage *string `locationName:"stage" type:"string"` } @@ -12538,6 +12606,15 @@ type CreateDomainNameInput struct { // The user-friendly name of the certificate that will be used by regional endpoint // for this domain name. RegionalCertificateName *string `locationName:"regionalCertificateName" type:"string"` + + // The Transport Layer Security (TLS) version + cipher suite for this DomainName. + // The valid values are TLS_1_0 and TLS_1_2. + SecurityPolicy *string `locationName:"securityPolicy" type:"string" enum:"SecurityPolicy"` + + // The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. + // The tag key can be up to 128 characters and must not start with aws:. The + // tag value can be up to 256 characters. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -12617,6 +12694,18 @@ func (s *CreateDomainNameInput) SetRegionalCertificateName(v string) *CreateDoma return s } +// SetSecurityPolicy sets the SecurityPolicy field's value. +func (s *CreateDomainNameInput) SetSecurityPolicy(v string) *CreateDomainNameInput { + s.SecurityPolicy = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDomainNameInput) SetTags(v map[string]*string) *CreateDomainNameInput { + s.Tags = v + return s +} + // Request to add a new Model to an existing RestApi resource. type CreateModelInput struct { _ struct{} `type:"structure"` @@ -12855,10 +12944,11 @@ type CreateRestApiInput struct { _ struct{} `type:"structure"` // The source of the API key for metering requests according to a usage plan. - // Valid values are: HEADER to read the API key from the X-API-Key header of - // a request. - // AUTHORIZER to read the API key from the UsageIdentifierKey from a custom - // authorizer. + // Valid values are: + // * HEADER to read the API key from the X-API-Key header of a request. + // + // * AUTHORIZER to read the API key from the UsageIdentifierKey from a custom + // authorizer. ApiKeySource *string `locationName:"apiKeySource" type:"string" enum:"ApiKeySourceType"` // The list of binary media types supported by the RestApi. By default, the @@ -12888,9 +12978,14 @@ type CreateRestApiInput struct { Name *string `locationName:"name" type:"string" required:"true"` // A stringified JSON policy document that applies to this RestApi regardless - // of the caller and Method + // of the caller and Method configuration. Policy *string `locationName:"policy" type:"string"` + // The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. + // The tag key can be up to 128 characters and must not start with aws:. The + // tag value can be up to 256 characters. + Tags map[string]*string `locationName:"tags" type:"map"` + // A version identifier for the API. Version *string `locationName:"version" type:"string"` } @@ -12966,6 +13061,12 @@ func (s *CreateRestApiInput) SetPolicy(v string) *CreateRestApiInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateRestApiInput) SetTags(v map[string]*string) *CreateRestApiInput { + s.Tags = v + return s +} + // SetVersion sets the Version field's value. func (s *CreateRestApiInput) SetVersion(v string) *CreateRestApiInput { s.Version = &v @@ -13001,7 +13102,9 @@ type CreateStageInput struct { // RestApiId is a required field RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` - // [Required] The name for the Stage resource. + // [Required] The name for the Stage resource. Stage names can only contain + // alphanumeric characters, hyphens, and underscores. Maximum length is 128 + // characters. // // StageName is a required field StageName *string `locationName:"stageName" type:"string" required:"true"` @@ -13138,6 +13241,11 @@ type CreateUsagePlanInput struct { // The quota of the usage plan. Quota *QuotaSettings `locationName:"quota" type:"structure"` + // The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. + // The tag key can be up to 128 characters and must not start with aws:. The + // tag value can be up to 256 characters. + Tags map[string]*string `locationName:"tags" type:"map"` + // The throttling limits of the usage plan. Throttle *ThrottleSettings `locationName:"throttle" type:"structure"` } @@ -13189,6 +13297,12 @@ func (s *CreateUsagePlanInput) SetQuota(v *QuotaSettings) *CreateUsagePlanInput return s } +// SetTags sets the Tags field's value. +func (s *CreateUsagePlanInput) SetTags(v map[string]*string) *CreateUsagePlanInput { + s.Tags = v + return s +} + // SetThrottle sets the Throttle field's value. func (s *CreateUsagePlanInput) SetThrottle(v *ThrottleSettings) *CreateUsagePlanInput { s.Throttle = v @@ -13282,6 +13396,11 @@ type CreateVpcLinkInput struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` + // The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. + // The tag key can be up to 128 characters and must not start with aws:. The + // tag value can be up to 256 characters. + Tags map[string]*string `locationName:"tags" type:"map"` + // [Required] The ARNs of network load balancers of the VPC targeted by the // VPC link. The network load balancers must be owned by the same AWS account // of the API owner. @@ -13328,6 +13447,12 @@ func (s *CreateVpcLinkInput) SetName(v string) *CreateVpcLinkInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateVpcLinkInput) SetTags(v map[string]*string) *CreateVpcLinkInput { + s.Tags = v + return s +} + // SetTargetArns sets the TargetArns field's value. func (s *CreateVpcLinkInput) SetTargetArns(v []*string) *CreateVpcLinkInput { s.TargetArns = v @@ -13469,6 +13594,8 @@ type DeleteBasePathMappingInput struct { // [Required] The base path name of the BasePathMapping resource to delete. // + // To specify an empty base path, set this parameter to '(none)'. + // // BasePath is a required field BasePath *string `location:"uri" locationName:"base_path" type:"string" required:"true"` @@ -13872,27 +13999,47 @@ func (s DeleteDomainNameOutput) GoString() string { type DeleteGatewayResponseInput struct { _ struct{} `type:"structure"` - // [Required] The response type of the associated GatewayResponse. Valid values - // are ACCESS_DENIED - // API_CONFIGURATION_ERROR - // AUTHORIZER_FAILURE - // AUTHORIZER_CONFIGURATION_ERROR - // BAD_REQUEST_PARAMETERS - // BAD_REQUEST_BODY - // DEFAULT_4XX - // DEFAULT_5XX - // EXPIRED_TOKEN - // INVALID_SIGNATURE - // INTEGRATION_FAILURE - // INTEGRATION_TIMEOUT - // INVALID_API_KEY - // MISSING_AUTHENTICATION_TOKEN - // QUOTA_EXCEEDED - // REQUEST_TOO_LARGE - // RESOURCE_NOT_FOUND - // THROTTLED - // UNAUTHORIZED - // UNSUPPORTED_MEDIA_TYPE + // [Required] + // The response type of the associated GatewayResponse. Valid values are + // * ACCESS_DENIED + // + // * API_CONFIGURATION_ERROR + // + // * AUTHORIZER_FAILURE + // + // * AUTHORIZER_CONFIGURATION_ERROR + // + // * BAD_REQUEST_PARAMETERS + // + // * BAD_REQUEST_BODY + // + // * DEFAULT_4XX + // + // * DEFAULT_5XX + // + // * EXPIRED_TOKEN + // + // * INVALID_SIGNATURE + // + // * INTEGRATION_FAILURE + // + // * INTEGRATION_TIMEOUT + // + // * INVALID_API_KEY + // + // * MISSING_AUTHENTICATION_TOKEN + // + // * QUOTA_EXCEEDED + // + // * REQUEST_TOO_LARGE + // + // * RESOURCE_NOT_FOUND + // + // * THROTTLED + // + // * UNAUTHORIZED + // + // * UNSUPPORTED_MEDIA_TYPE // // ResponseType is a required field ResponseType *string `location:"uri" locationName:"response_type" type:"string" required:"true" enum:"GatewayResponseType"` @@ -15257,6 +15404,16 @@ type DomainName struct { // The custom domain name as an API host name, for example, my-api.example.com. DomainName *string `locationName:"domainName" type:"string"` + // The status of the DomainName migration. The valid values are AVAILABLE and + // UPDATING. If the status is UPDATING, the domain cannot be modified further + // until the existing operation is complete. If it is AVAILABLE, the domain + // can be updated. + DomainNameStatus *string `locationName:"domainNameStatus" type:"string" enum:"DomainNameStatus"` + + // An optional text message containing detailed information about status of + // the DomainName migration. + DomainNameStatusMessage *string `locationName:"domainNameStatusMessage" type:"string"` + // The endpoint configuration of this DomainName showing the endpoint types // of the domain name. EndpointConfiguration *EndpointConfiguration `locationName:"endpointConfiguration" type:"structure"` @@ -15279,6 +15436,13 @@ type DomainName struct { // For more information, see Set up a Regional Custom Domain Name (https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-regional-api-custom-domain-create.html) // and AWS Regions and Endpoints for API Gateway (https://docs.aws.amazon.com/general/latest/gr/rande.html#apigateway_region). RegionalHostedZoneId *string `locationName:"regionalHostedZoneId" type:"string"` + + // The Transport Layer Security (TLS) version + cipher suite for this DomainName. + // The valid values are TLS_1_0 and TLS_1_2. + SecurityPolicy *string `locationName:"securityPolicy" type:"string" enum:"SecurityPolicy"` + + // The collection of tags. Each tag element is associated with a given resource. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -15327,6 +15491,18 @@ func (s *DomainName) SetDomainName(v string) *DomainName { return s } +// SetDomainNameStatus sets the DomainNameStatus field's value. +func (s *DomainName) SetDomainNameStatus(v string) *DomainName { + s.DomainNameStatus = &v + return s +} + +// SetDomainNameStatusMessage sets the DomainNameStatusMessage field's value. +func (s *DomainName) SetDomainNameStatusMessage(v string) *DomainName { + s.DomainNameStatusMessage = &v + return s +} + // SetEndpointConfiguration sets the EndpointConfiguration field's value. func (s *DomainName) SetEndpointConfiguration(v *EndpointConfiguration) *DomainName { s.EndpointConfiguration = v @@ -15357,6 +15533,18 @@ func (s *DomainName) SetRegionalHostedZoneId(v string) *DomainName { return s } +// SetSecurityPolicy sets the SecurityPolicy field's value. +func (s *DomainName) SetSecurityPolicy(v string) *DomainName { + s.SecurityPolicy = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *DomainName) SetTags(v map[string]*string) *DomainName { + s.Tags = v + return s +} + // The endpoint configuration to indicate the types of endpoints an API (RestApi) // or its custom domain name (DomainName) has. type EndpointConfiguration struct { @@ -15367,6 +15555,10 @@ type EndpointConfiguration struct { // "EDGE". For a regional API and its custom domain name, the endpoint type // is REGIONAL. For a private API, the endpoint type is PRIVATE. Types []*string `locationName:"types" type:"list"` + + // A list of VpcEndpointIds of an API (RestApi) against which to create Route53 + // ALIASes. It is only supported for PRIVATE endpoint type. + VpcEndpointIds []*string `locationName:"vpcEndpointIds" type:"list"` } // String returns the string representation @@ -15385,6 +15577,12 @@ func (s *EndpointConfiguration) SetTypes(v []*string) *EndpointConfiguration { return s } +// SetVpcEndpointIds sets the VpcEndpointIds field's value. +func (s *EndpointConfiguration) SetVpcEndpointIds(v []*string) *EndpointConfiguration { + s.VpcEndpointIds = v + return s +} + // Request to flush authorizer cache entries on a specified stage. type FlushStageAuthorizersCacheInput struct { _ struct{} `type:"structure"` @@ -15537,6 +15735,11 @@ type GenerateClientCertificateInput struct { // The description of the ClientCertificate. Description *string `locationName:"description" type:"string"` + + // The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. + // The tag key can be up to 128 characters and must not start with aws:. The + // tag value can be up to 256 characters. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -15555,6 +15758,12 @@ func (s *GenerateClientCertificateInput) SetDescription(v string) *GenerateClien return s } +// SetTags sets the Tags field's value. +func (s *GenerateClientCertificateInput) SetTags(v map[string]*string) *GenerateClientCertificateInput { + s.Tags = v + return s +} + // Requests API Gateway to get information about the current Account resource. type GetAccountInput struct { _ struct{} `type:"structure"` @@ -15851,7 +16060,8 @@ func (s *GetAuthorizersInput) SetRestApiId(v string) *GetAuthorizersInput { // Represents a collection of Authorizer resources. // -// Enable custom authorization (https://docs.aws.amazon.com/apigateway/latest/developerguide/use-custom-authorizer.html) +// Use Lambda Function as Authorizer (https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html) +// Use Cognito User Pool as Authorizer (https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-integrate-with-cognito.html) type GetAuthorizersOutput struct { _ struct{} `type:"structure"` @@ -15889,7 +16099,7 @@ type GetBasePathMappingInput struct { // [Required] The base path name that callers of the API must provide as part // of the URL after the domain name. This value must be unique for all of the - // mappings across a single API. Leave this blank if you do not want callers + // mappings across a single API. Specify '(none)' if you do not want callers // to specify any base path name after the domain name. // // BasePath is a required field @@ -16489,7 +16699,8 @@ func (s *GetDocumentationPartsInput) SetType(v string) *GetDocumentationPartsInp // The collection of documentation parts of an API. // -// Documenting an API (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-documenting-api.html), DocumentationPart +// Documenting an API (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-documenting-api.html), +// DocumentationPart type GetDocumentationPartsOutput struct { _ struct{} `type:"structure"` @@ -16805,7 +17016,7 @@ type GetExportInput struct { ExportType *string `location:"uri" locationName:"export_type" type:"string" required:"true"` // A key-value map of query string parameters that specify properties of the - // export, depending on the requested exportType. For exportTypeoas30 and swagger, + // export, depending on the requested exportType. For exportType oas30 and swagger, // any combination of the following parameters are supported: extensions='integrations' // or extensions='apigateway' will export the API with x-amazon-apigateway-integration // extensions. extensions='authorizers' will export the API with x-amazon-apigateway-authorizer @@ -16939,27 +17150,47 @@ func (s *GetExportOutput) SetContentType(v string) *GetExportOutput { type GetGatewayResponseInput struct { _ struct{} `type:"structure"` - // [Required] The response type of the associated GatewayResponse. Valid values - // are ACCESS_DENIED - // API_CONFIGURATION_ERROR - // AUTHORIZER_FAILURE - // AUTHORIZER_CONFIGURATION_ERROR - // BAD_REQUEST_PARAMETERS - // BAD_REQUEST_BODY - // DEFAULT_4XX - // DEFAULT_5XX - // EXPIRED_TOKEN - // INVALID_SIGNATURE - // INTEGRATION_FAILURE - // INTEGRATION_TIMEOUT - // INVALID_API_KEY - // MISSING_AUTHENTICATION_TOKEN - // QUOTA_EXCEEDED - // REQUEST_TOO_LARGE - // RESOURCE_NOT_FOUND - // THROTTLED - // UNAUTHORIZED - // UNSUPPORTED_MEDIA_TYPE + // [Required] + // The response type of the associated GatewayResponse. Valid values are + // * ACCESS_DENIED + // + // * API_CONFIGURATION_ERROR + // + // * AUTHORIZER_FAILURE + // + // * AUTHORIZER_CONFIGURATION_ERROR + // + // * BAD_REQUEST_PARAMETERS + // + // * BAD_REQUEST_BODY + // + // * DEFAULT_4XX + // + // * DEFAULT_5XX + // + // * EXPIRED_TOKEN + // + // * INVALID_SIGNATURE + // + // * INTEGRATION_FAILURE + // + // * INTEGRATION_TIMEOUT + // + // * INVALID_API_KEY + // + // * MISSING_AUTHENTICATION_TOKEN + // + // * QUOTA_EXCEEDED + // + // * REQUEST_TOO_LARGE + // + // * RESOURCE_NOT_FOUND + // + // * THROTTLED + // + // * UNAUTHORIZED + // + // * UNSUPPORTED_MEDIA_TYPE // // ResponseType is a required field ResponseType *string `location:"uri" locationName:"response_type" type:"string" required:"true" enum:"GatewayResponseType"` @@ -17085,159 +17316,161 @@ func (s *GetGatewayResponsesInput) SetRestApiId(v string) *GetGatewayResponsesIn // this collection. // // For more information about valid gateway response types, see Gateway Response -// Types Supported by API Gateway (https://docs.aws.amazon.com/apigateway/latest/developerguide/supported-gateway-response-types.html)Example: -// Get the collection of gateway responses of an API +// Types Supported by API Gateway (https://docs.aws.amazon.com/apigateway/latest/developerguide/supported-gateway-response-types.html) +// +// Example: Get the collection of gateway responses of an API // // Request // // This example request shows how to retrieve the GatewayResponses collection // from an API. -// -// GET /restapis/o81lxisefl/gatewayresponses HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com -// Content-Type: application/json X-Amz-Date: 20170503T220604Z Authorization: -// AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, -// SignedHeaders=content-type;host;x-amz-date, Signature=59b42fe54a76a5de8adf2c67baa6d39206f8e9ad49a1d77ccc6a5da3103a398a -// Cache-Control: no-cache Postman-Token: 5637af27-dc29-fc5c-9dfe-0645d52cb515 +// GET /restapis/o81lxisefl/gatewayresponses HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com +// Content-Type: application/json X-Amz-Date: 20170503T220604Z Authorization: +// AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, +// SignedHeaders=content-type;host;x-amz-date, Signature=59b42fe54a76a5de8adf2c67baa6d39206f8e9ad49a1d77ccc6a5da3103a398a +// Cache-Control: no-cache Postman-Token: 5637af27-dc29-fc5c-9dfe-0645d52cb515 // // Response // // The successful operation returns the 200 OK status code and a payload similar // to the following: -// -// { "_links": { "curies": { "href": "http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html", -// "name": "gatewayresponse", "templated": true }, "self": { "href": "/restapis/o81lxisefl/gatewayresponses" -// }, "first": { "href": "/restapis/o81lxisefl/gatewayresponses" }, "gatewayresponse:by-type": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "item": [ { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE" -// }, { "href": "/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND" }, -// { "href": "/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE" }, { -// "href": "/restapis/o81lxisefl/gatewayresponses/THROTTLED" }, { "href": "/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE" -// }, { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR" -// }, { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX" }, { "href": -// "/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX" }, { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS" -// }, { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY" }, -// { "href": "/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN" }, { "href": -// "/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED" }, { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY" -// }, { "href": "/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED" }, { "href": -// "/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR" }, { "href": -// "/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED" }, { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT" -// }, { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" -// }, { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE" }, -// { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE" } ] -// }, "_embedded": { "item": [ { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE" -// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", -// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "INTEGRATION_FAILURE", "statusCode": "504" }, { "_links": { "self": { "href": -// "/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "RESOURCE_NOT_FOUND", "statusCode": "404" }, { "_links": { "self": { "href": -// "/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "REQUEST_TOO_LARGE", "statusCode": "413" }, { "_links": { "self": { "href": -// "/restapis/o81lxisefl/gatewayresponses/THROTTLED" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/THROTTLED" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "THROTTLED", "statusCode": "429" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE" -// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", -// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "UNSUPPORTED_MEDIA_TYPE", "statusCode": "415" }, { "_links": { "self": { -// "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR" -// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", -// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "AUTHORIZER_CONFIGURATION_ERROR", "statusCode": "500" }, { "_links": { "self": -// { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "DEFAULT_5XX" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX" -// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", -// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "DEFAULT_4XX" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS" -// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", -// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "BAD_REQUEST_PARAMETERS", "statusCode": "400" }, { "_links": { "self": { -// "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "BAD_REQUEST_BODY", "statusCode": "400" }, { "_links": { "self": { "href": -// "/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "EXPIRED_TOKEN", "statusCode": "403" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED" -// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", -// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "ACCESS_DENIED", "statusCode": "403" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY" -// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", -// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "INVALID_API_KEY", "statusCode": "403" }, { "_links": { "self": { "href": -// "/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "UNAUTHORIZED", "statusCode": "401" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR" -// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", -// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "API_CONFIGURATION_ERROR", "statusCode": "500" }, { "_links": { "self": { -// "href": "/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "QUOTA_EXCEEDED", "statusCode": "429" }, { "_links": { "self": { "href": -// "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "INTEGRATION_TIMEOUT", "statusCode": "504" }, { "_links": { "self": { "href": -// "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "MISSING_AUTHENTICATION_TOKEN", "statusCode": "403" }, { "_links": { "self": -// { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "INVALID_SIGNATURE", "statusCode": "403" }, { "_links": { "self": { "href": -// "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE" }, "gatewayresponse:put": -// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": -// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE" -// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": -// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": -// "AUTHORIZER_FAILURE", "statusCode": "500" } ] } } +// { "_links": { "curies": { "href": "http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html", +// "name": "gatewayresponse", "templated": true }, "self": { "href": "/restapis/o81lxisefl/gatewayresponses" +// }, "first": { "href": "/restapis/o81lxisefl/gatewayresponses" }, "gatewayresponse:by-type": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "item": [ { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE" +// }, { "href": "/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND" +// }, { "href": "/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE" }, +// { "href": "/restapis/o81lxisefl/gatewayresponses/THROTTLED" }, { "href": +// "/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE" }, { "href": +// "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR" }, +// { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX" }, { "href": +// "/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX" }, { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS" +// }, { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY" }, +// { "href": "/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN" }, { "href": +// "/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED" }, { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY" +// }, { "href": "/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED" }, { "href": +// "/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR" }, { "href": +// "/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED" }, { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT" +// }, { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" +// }, { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE" }, +// { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE" } ] +// }, "_embedded": { "item": [ { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE" +// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", +// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "INTEGRATION_FAILURE", "statusCode": "504" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "RESOURCE_NOT_FOUND", "statusCode": "404" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "REQUEST_TOO_LARGE", "statusCode": "413" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/THROTTLED" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/THROTTLED" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "THROTTLED", "statusCode": "429" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE" +// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", +// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "UNSUPPORTED_MEDIA_TYPE", "statusCode": "415" }, { "_links": { "self": { +// "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR" +// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", +// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "AUTHORIZER_CONFIGURATION_ERROR", "statusCode": "500" }, { "_links": { "self": +// { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "DEFAULT_5XX" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX" +// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", +// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "DEFAULT_4XX" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS" +// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", +// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "BAD_REQUEST_PARAMETERS", "statusCode": "400" }, { "_links": { "self": { +// "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "BAD_REQUEST_BODY", "statusCode": "400" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "EXPIRED_TOKEN", "statusCode": "403" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "ACCESS_DENIED", "statusCode": "403" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "INVALID_API_KEY", "statusCode": "403" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "UNAUTHORIZED", "statusCode": "401" }, { "_links": { "self": { "href": "/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR" +// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", +// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "API_CONFIGURATION_ERROR", "statusCode": "500" }, { "_links": { "self": +// { "href": "/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "QUOTA_EXCEEDED", "statusCode": "429" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "INTEGRATION_TIMEOUT", "statusCode": "504" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" }, +// "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", +// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "MISSING_AUTHENTICATION_TOKEN", "statusCode": "403" }, { "_links": { "self": +// { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "INVALID_SIGNATURE", "statusCode": "403" }, { "_links": { "self": { "href": +// "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE" }, "gatewayresponse:put": +// { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", "templated": +// true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE" +// } }, "defaultResponse": true, "responseParameters": {}, "responseTemplates": +// { "application/json": "{\"message\":$context.error.messageString}" }, "responseType": +// "AUTHORIZER_FAILURE", "statusCode": "500" } ] } } // // Customize Gateway Responses (https://docs.aws.amazon.com/apigateway/latest/developerguide/customize-gateway-responses.html) type GetGatewayResponsesOutput struct { @@ -18700,7 +18933,7 @@ type GetTagsInput struct { Position *string `location:"querystring" locationName:"position" type:"string"` // [Required] The ARN of a resource that can be tagged. The resource ARN must - // be URL-encoded. At present, Stage is the only taggable resource. + // be URL-encoded. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resource_arn" type:"string" required:"true"` @@ -19454,6 +19687,7 @@ func (s *ImportDocumentationPartsInput) SetRestApiId(v string) *ImportDocumentat // // This is used to return the result when documentation parts in an external // (e.g., OpenAPI) file are imported into API Gateway +// // Documenting an API (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-documenting-api.html), // documentationpart:import (https://docs.aws.amazon.com/apigateway/api-reference/link-relation/documentationpart-import/), // DocumentationPart @@ -19515,14 +19749,15 @@ type ImportRestApiInput struct { // endpointConfigurationTypes=REGIONAL, or endpointConfigurationTypes=PRIVATE. // The default endpoint type is EDGE. // - // To handle imported basePath, set parameters as basePath=ignore, basePath=prepend - // or basePath=split. + // To handle imported basepath, set parameters as basepath=ignore, basepath=prepend + // or basepath=split. // // For example, the AWS CLI command to exclude documentation from the imported // API is: // // aws apigateway import-rest-api --parameters ignore=documentation --body // 'file:///path/to/imported-api-body.json' + // // The AWS CLI command to set the regional endpoint on the imported API is: // // aws apigateway import-rest-api --parameters endpointConfigurationTypes=REGIONAL @@ -19579,10 +19814,14 @@ func (s *ImportRestApiInput) SetParameters(v map[string]*string) *ImportRestApiI type Integration struct { _ struct{} `type:"structure"` - // Specifies the integration's cache key parameters. + // A list of request parameters whose values API Gateway caches. To be valid + // values for cacheKeyParameters, these parameters must also be specified for + // Method requestParameters. CacheKeyParameters []*string `locationName:"cacheKeyParameters" type:"list"` - // Specifies the integration's cache namespace. + // An API-specific tag group of related cached parameters. To be valid values + // for cacheKeyParameters, these parameters must also be specified for Method + // requestParameters. CacheNamespace *string `locationName:"cacheNamespace" type:"string"` // The (id (https://docs.aws.amazon.com/apigateway/api-reference/resource/vpc-link/#id)) @@ -19607,7 +19846,7 @@ type Integration struct { // // If this property is not defined, the request payload will be passed through // from the method request to integration request without modification, provided - // that the passthroughBehaviors is configured to support payload pass-through. + // that the passthroughBehavior is configured to support payload pass-through. ContentHandling *string `locationName:"contentHandling" type:"string" enum:"ContentHandlingStrategy"` // Specifies the credentials required for the integration, if any. For AWS integrations, @@ -19625,23 +19864,23 @@ type Integration struct { // Example: Get integration responses of a method // // Request - // - // GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200 - // HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com - // X-Amz-Date: 20160607T191449Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160607/us-east-1/apigateway/aws4_request, - // SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} + // GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200 + // HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com + // X-Amz-Date: 20160607T191449Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160607/us-east-1/apigateway/aws4_request, + // SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} // Response // - // The successful response returns 200 OKstatus and a payload as follows: - // - // { "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html", - // "name": "integrationresponse", "templated": true }, "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", - // "title": "200" }, "integrationresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" - // }, "integrationresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" - // } }, "responseParameters": { "method.response.header.Content-Type": "'application/xml'" - // }, "responseTemplates": { "application/json": "$util.urlDecode(\"%3CkinesisStreams%3E#foreach($stream - // in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\")\n" - // }, "statusCode": "200" } + // The successful response returns 200 OK status and a payload as follows: + // { "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html", + // "name": "integrationresponse", "templated": true }, "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", + // "title": "200" }, "integrationresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" + // }, "integrationresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" + // } }, "responseParameters": { "method.response.header.Content-Type": "'application/xml'" + // }, "responseTemplates": { "application/json": "$util.urlDecode(\"%3CkinesisStreams%3E#foreach($stream + // in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\")\n" + // }, "statusCode": "200" } + // + // Creating an API (https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-create-api.html) IntegrationResponses map[string]*IntegrationResponse `locationName:"integrationResponses" type:"map"` // Specifies how the method request body of an unmapped content type will be @@ -19650,19 +19889,22 @@ type Integration struct { // or the content type does not match any of the mapped content types, as specified // in requestTemplates. The valid value is one of the following: // - // WHEN_NO_MATCH: passes the method request body through the integration request - // to the back end without transformation when the method request content type - // does not match any content type associated with the mapping templates defined - // in the integration request. - // WHEN_NO_TEMPLATES: passes the method request body through the integration - // request to the back end without transformation when no mapping template is - // defined in the integration request. If a template is defined when this option - // is selected, the method request of an unmapped content-type will be rejected - // with an HTTP 415 Unsupported Media Type response. - // NEVER: rejects the method request with an HTTP 415 Unsupported Media Type - // response when either the method request content type does not match any content - // type associated with the mapping templates defined in the integration request - // or no mapping template is defined in the integration request. + // * WHEN_NO_MATCH: passes the method request body through the integration + // request to the back end without transformation when the method request + // content type does not match any content type associated with the mapping + // templates defined in the integration request. + // + // * WHEN_NO_TEMPLATES: passes the method request body through the integration + // request to the back end without transformation when no mapping template + // is defined in the integration request. If a template is defined when this + // option is selected, the method request of an unmapped content-type will + // be rejected with an HTTP 415 Unsupported Media Type response. + // + // * NEVER: rejects the method request with an HTTP 415 Unsupported Media + // Type response when either the method request content type does not match + // any content type associated with the mapping templates defined in the + // integration request or no mapping template is defined in the integration + // request. PassthroughBehavior *string `locationName:"passthroughBehavior" type:"string"` // A key-value map specifying request parameters that are passed from the method @@ -19690,18 +19932,22 @@ type Integration struct { // including the Lambda function-invoking action. With the Lambda function-invoking // action, this is referred to as the Lambda custom integration. With any // other AWS service action, this is known as AWS integration. + // // * AWS_PROXY: for integrating the API method request with the Lambda function-invoking // action with the client request passed through as-is. This integration // is also referred to as the Lambda proxy integration. + // // * HTTP: for integrating the API method request with an HTTP endpoint, // including a private HTTP endpoint within a VPC. This integration is also // referred to as the HTTP custom integration. + // // * HTTP_PROXY: for integrating the API method request with an HTTP endpoint, // including a private HTTP endpoint within a VPC, with the client request // passed through as-is. This is also referred to as the HTTP proxy integration. // // * MOCK: for integrating the API method request with API Gateway as a "loop-back" // endpoint without invoking any backend. + // // For the HTTP and HTTP proxy integrations, each integration can specify a // protocol (http/https), port and path. Standard 80 and 443 ports are supported // as well as custom ports above 1024. An HTTP or HTTP proxy integration with @@ -19711,13 +19957,13 @@ type Integration struct { // Specifies Uniform Resource Identifier (URI) of the integration endpoint. // - // * For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, - // encoded HTTP(S) URL according to the RFC-3986 specification (_blank), + // * For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, + // encoded HTTP(S) URL according to the RFC-3986 specification (https://en.wikipedia.org/wiki/Uniform_Resource_Identifier), // for either standard integration, where connectionType is not VPC_LINK, // or private integration, where connectionType is VPC_LINK. For a private // HTTP integration, the URI is not used for routing. // - // * For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. + // * For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. // Here, {Region} is the API Gateway region (e.g., us-east-1); {service} // is the name of the integrated AWS service (e.g., s3); and {subdomain} // is a designated subdomain supported by certain AWS service for fast host-name @@ -19937,60 +20183,59 @@ func (s *IntegrationResponse) SetStatusCode(v string) *IntegrationResponse { // // The following example request retrieves the information about the GET method // on an API resource (3kzxbg5sa2) of an API (fugvjdxtri). -// -// GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: -// application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: -// 20160603T210259Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160603/us-east-1/apigateway/aws4_request, -// SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} +// GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: +// application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160603T210259Z +// Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160603/us-east-1/apigateway/aws4_request, +// SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} // Response // // The successful response returns a 200 OK status code and a payload similar // to the following: -// -// { "_links": { "curies": [ { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html", -// "name": "integration", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html", -// "name": "integrationresponse", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-{rel}.html", -// "name": "method", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", -// "name": "methodresponse", "templated": true } ], "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET", -// "name": "GET", "title": "GET" }, "integration:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" -// }, "method:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" -// }, "method:integration": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" -// }, "method:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", -// "name": "200", "title": "200" }, "method:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" -// }, "methodresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/{status_code}", -// "templated": true } }, "apiKeyRequired": true, "authorizationType": "NONE", -// "httpMethod": "GET", "_embedded": { "method:integration": { "_links": -// { "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" -// }, "integration:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" -// }, "integration:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", -// "name": "200", "title": "200" }, "integration:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" -// }, "integrationresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/{status_code}", -// "templated": true } }, "cacheKeyParameters": [], "cacheNamespace": "3kzxbg5sa2", -// "credentials": "arn:aws:iam::123456789012:role/apigAwsProxyRole", "httpMethod": -// "POST", "passthroughBehavior": "WHEN_NO_MATCH", "requestParameters": { -// "integration.request.header.Content-Type": "'application/x-amz-json-1.1'" -// }, "requestTemplates": { "application/json": "{\n}" }, "type": "AWS", -// "uri": "arn:aws:apigateway:us-east-1:kinesis:action/ListStreams", "_embedded": -// { "integration:responses": { "_links": { "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", -// "name": "200", "title": "200" }, "integrationresponse:delete": { "href": -// "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" -// }, "integrationresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" -// } }, "responseParameters": { "method.response.header.Content-Type": "'application/xml'" -// }, "responseTemplates": { "application/json": "$util.urlDecode(\"%3CkinesisStreams%3E%23foreach(%24stream%20in%20%24input.path(%27%24.StreamNames%27))%3Cstream%3E%3Cname%3E%24stream%3C%2Fname%3E%3C%2Fstream%3E%23end%3C%2FkinesisStreams%3E\")" -// }, "statusCode": "200" } } }, "method:responses": { "_links": { "self": -// { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", -// "name": "200", "title": "200" }, "methodresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" -// }, "methodresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" -// } }, "responseModels": { "application/json": "Empty" }, "responseParameters": -// { "method.response.header.Content-Type": false }, "statusCode": "200" -// } } } +// { "_links": { "curies": [ { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html", +// "name": "integration", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html", +// "name": "integrationresponse", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-{rel}.html", +// "name": "method", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", +// "name": "methodresponse", "templated": true } ], "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET", +// "name": "GET", "title": "GET" }, "integration:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" +// }, "method:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" +// }, "method:integration": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" +// }, "method:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", +// "name": "200", "title": "200" }, "method:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" +// }, "methodresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/{status_code}", +// "templated": true } }, "apiKeyRequired": true, "authorizationType": "NONE", +// "httpMethod": "GET", "_embedded": { "method:integration": { "_links": { +// "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" +// }, "integration:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" +// }, "integration:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", +// "name": "200", "title": "200" }, "integration:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" +// }, "integrationresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/{status_code}", +// "templated": true } }, "cacheKeyParameters": [], "cacheNamespace": "3kzxbg5sa2", +// "credentials": "arn:aws:iam::123456789012:role/apigAwsProxyRole", "httpMethod": +// "POST", "passthroughBehavior": "WHEN_NO_MATCH", "requestParameters": { "integration.request.header.Content-Type": +// "'application/x-amz-json-1.1'" }, "requestTemplates": { "application/json": +// "{\n}" }, "type": "AWS", "uri": "arn:aws:apigateway:us-east-1:kinesis:action/ListStreams", +// "_embedded": { "integration:responses": { "_links": { "self": { "href": +// "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", +// "name": "200", "title": "200" }, "integrationresponse:delete": { "href": +// "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" +// }, "integrationresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" +// } }, "responseParameters": { "method.response.header.Content-Type": "'application/xml'" +// }, "responseTemplates": { "application/json": "$util.urlDecode(\"%3CkinesisStreams%3E%23foreach(%24stream%20in%20%24input.path(%27%24.StreamNames%27))%3Cstream%3E%3Cname%3E%24stream%3C%2Fname%3E%3C%2Fstream%3E%23end%3C%2FkinesisStreams%3E\")" +// }, "statusCode": "200" } } }, "method:responses": { "_links": { "self": +// { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", +// "name": "200", "title": "200" }, "methodresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" +// }, "methodresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" +// } }, "responseModels": { "application/json": "Empty" }, "responseParameters": +// { "method.response.header.Content-Type": false }, "statusCode": "200" } +// } } // In the example above, the response template for the 200 OK response maps // the JSON output from the ListStreams action in the back end to an XML output. // The mapping template is URL-encoded as %3CkinesisStreams%3E%23foreach(%24stream%20in%20%24input.path(%27%24.StreamNames%27))%3Cstream%3E%3Cname%3E%24stream%3C%2Fname%3E%3C%2Fstream%3E%23end%3C%2FkinesisStreams%3E // and the output is decoded using the $util.urlDecode() (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#util-templat-reference) // helper function. // -// MethodResponse, Integration, IntegrationResponse, Resource, Set up an API's method (https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-method-settings.html) +// MethodResponse, Integration, IntegrationResponse, Resource, Set up an API's +// method (https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-method-settings.html) type Method struct { _ struct{} `type:"structure"` @@ -20027,38 +20272,42 @@ type Method struct { // Example: // // Request - // - // GET /restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration HTTP/1.1 - // Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com Content-Length: - // 117 X-Amz-Date: 20160613T213210Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160613/us-east-1/apigateway/aws4_request, - // SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} + // GET /restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration HTTP/1.1 + // Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com + // Content-Length: 117 X-Amz-Date: 20160613T213210Z Authorization: AWS4-HMAC-SHA256 + // Credential={access_key_ID}/20160613/us-east-1/apigateway/aws4_request, + // SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} // Response // - // The successful response returns a 200 OKstatus code and a payload similar to the following: - // - // { "_links": { "curies": [ { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html", - // "name": "integration", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html", - // "name": "integrationresponse", "templated": true } ], "self": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration" - // }, "integration:delete": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration" - // }, "integration:responses": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200", - // "name": "200", "title": "200" }, "integration:update": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration" - // }, "integrationresponse:put": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/{status_code}", - // "templated": true } }, "cacheKeyParameters": [], "cacheNamespace": "0cjtch", - // "credentials": "arn:aws:iam::123456789012:role/apigAwsProxyRole", "httpMethod": - // "POST", "passthroughBehavior": "WHEN_NO_MATCH", "requestTemplates": { "application/json": - // "{\n \"a\": \"$input.params('operand1')\",\n \"b\": \"$input.params('operand2')\", - // \n \"op\": \"$input.params('operator')\" \n}" }, "type": "AWS", "uri": "arn:aws:apigateway:us-west-2:lambda:path//2015-03-31/functions/arn:aws:lambda:us-west-2:123456789012:function:Calc/invocations", - // "_embedded": { "integration:responses": { "_links": { "self": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200", - // "name": "200", "title": "200" }, "integrationresponse:delete": { "href": - // "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200" - // }, "integrationresponse:update": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200" - // } }, "responseParameters": { "method.response.header.operator": "integration.response.body.op", - // "method.response.header.operand_2": "integration.response.body.b", "method.response.header.operand_1": - // "integration.response.body.a" }, "responseTemplates": { "application/json": - // "#set($res = $input.path('$'))\n{\n \"result\": \"$res.a, $res.b, $res.op - // => $res.c\",\n \"a\" : \"$res.a\",\n \"b\" : \"$res.b\",\n \"op\" : \"$res.op\",\n - // \"c\" : \"$res.c\"\n}" }, "selectionPattern": "", "statusCode": "200" } } - // } + // The successful response returns a 200 OK status code and a payload similar + // to the following: + // { "_links": { "curies": [ { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html", + // "name": "integration", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html", + // "name": "integrationresponse", "templated": true } ], "self": { "href": + // "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration" }, "integration:delete": + // { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration" + // }, "integration:responses": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200", + // "name": "200", "title": "200" }, "integration:update": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration" + // }, "integrationresponse:put": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/{status_code}", + // "templated": true } }, "cacheKeyParameters": [], "cacheNamespace": "0cjtch", + // "credentials": "arn:aws:iam::123456789012:role/apigAwsProxyRole", "httpMethod": + // "POST", "passthroughBehavior": "WHEN_NO_MATCH", "requestTemplates": { "application/json": + // "{\n \"a\": \"$input.params('operand1')\",\n \"b\": \"$input.params('operand2')\", + // \n \"op\": \"$input.params('operator')\" \n}" }, "type": "AWS", "uri": "arn:aws:apigateway:us-west-2:lambda:path//2015-03-31/functions/arn:aws:lambda:us-west-2:123456789012:function:Calc/invocations", + // "_embedded": { "integration:responses": { "_links": { "self": { "href": + // "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200", + // "name": "200", "title": "200" }, "integrationresponse:delete": { "href": + // "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200" + // }, "integrationresponse:update": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/integration/responses/200" + // } }, "responseParameters": { "method.response.header.operator": "integration.response.body.op", + // "method.response.header.operand_2": "integration.response.body.b", "method.response.header.operand_1": + // "integration.response.body.a" }, "responseTemplates": { "application/json": + // "#set($res = $input.path('$'))\n{\n \"result\": \"$res.a, $res.b, $res.op + // => $res.c\",\n \"a\" : \"$res.a\",\n \"b\" : \"$res.b\",\n \"op\" : \"$res.op\",\n + // \"c\" : \"$res.c\"\n}" }, "selectionPattern": "", "statusCode": "200" } + // } } + // + // AWS CLI (https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-integration.html) MethodIntegration *Integration `locationName:"methodIntegration" type:"structure"` // Gets a method response associated with a given HTTP status code. @@ -20071,30 +20320,30 @@ type Method struct { // Example: Get a 200 OK response of a GET method // // Request - // - // GET /restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200 HTTP/1.1 - // Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com - // Content-Length: 117 X-Amz-Date: 20160613T215008Z Authorization: AWS4-HMAC-SHA256 - // Credential={access_key_ID}/20160613/us-east-1/apigateway/aws4_request, - // SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} + // GET /restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200 HTTP/1.1 + // Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com + // Content-Length: 117 X-Amz-Date: 20160613T215008Z Authorization: AWS4-HMAC-SHA256 + // Credential={access_key_ID}/20160613/us-east-1/apigateway/aws4_request, + // SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} // Response // // The successful response returns a 200 OK status code and a payload similar // to the following: - // - // { "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", - // "name": "methodresponse", "templated": true }, "self": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200", - // "title": "200" }, "methodresponse:delete": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200" - // }, "methodresponse:update": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200" - // } }, "responseModels": { "application/json": "Empty" }, "responseParameters": - // { "method.response.header.operator": false, "method.response.header.operand_2": - // false, "method.response.header.operand_1": false }, "statusCode": "200" - // } + // { "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", + // "name": "methodresponse", "templated": true }, "self": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200", + // "title": "200" }, "methodresponse:delete": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200" + // }, "methodresponse:update": { "href": "/restapis/uojnr9hd57/resources/0cjtch/methods/GET/responses/200" + // } }, "responseModels": { "application/json": "Empty" }, "responseParameters": + // { "method.response.header.operator": false, "method.response.header.operand_2": + // false, "method.response.header.operand_1": false }, "statusCode": "200" + // } + // + // AWS CLI (https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-method-response.html) MethodResponses map[string]*MethodResponse `locationName:"methodResponses" type:"map"` // A human-friendly operation identifier for the method. For example, you can - // assign the operationName of ListPets for the GET /pets method in PetStore - // (https://petstore-demo-endpoint.execute-api.com/petstore/pets) example. + // assign the operationName of ListPets for the GET /pets method in the PetStore + // example. OperationName *string `locationName:"operationName" type:"string"` // A key-value map specifying data schemas, represented by Model resources, @@ -20201,22 +20450,21 @@ func (s *Method) SetRequestValidatorId(v string) *Method { // Request // // The example request retrieves a MethodResponse of the 200 status code. -// -// GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200 -// HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com -// X-Amz-Date: 20160603T222952Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160603/us-east-1/apigateway/aws4_request, -// SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} +// GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200 +// HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com +// X-Amz-Date: 20160603T222952Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160603/us-east-1/apigateway/aws4_request, +// SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} // Response // // The successful response returns 200 OK status and a payload as follows: -// -// { "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", -// "name": "methodresponse", "templated": true }, "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", -// "title": "200" }, "methodresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" -// }, "methodresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" -// } }, "responseModels": { "application/json": "Empty" }, "responseParameters": -// { "method.response.header.Content-Type": false }, "statusCode": "200" -// } +// { "_links": { "curies": { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", +// "name": "methodresponse", "templated": true }, "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", +// "title": "200" }, "methodresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" +// }, "methodresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" +// } }, "responseModels": { "application/json": "Empty" }, "responseParameters": +// { "method.response.header.Content-Type": false }, "statusCode": "200" } +// +// Method, IntegrationResponse, Integration Creating an API (https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-create-api.html) type MethodResponse struct { _ struct{} `type:"structure"` @@ -20591,27 +20839,47 @@ type PutGatewayResponseInput struct { // pairs. ResponseTemplates map[string]*string `locationName:"responseTemplates" type:"map"` - // [Required] The response type of the associated GatewayResponse. Valid values - // are ACCESS_DENIED - // API_CONFIGURATION_ERROR - // AUTHORIZER_FAILURE - // AUTHORIZER_CONFIGURATION_ERROR - // BAD_REQUEST_PARAMETERS - // BAD_REQUEST_BODY - // DEFAULT_4XX - // DEFAULT_5XX - // EXPIRED_TOKEN - // INVALID_SIGNATURE - // INTEGRATION_FAILURE - // INTEGRATION_TIMEOUT - // INVALID_API_KEY - // MISSING_AUTHENTICATION_TOKEN - // QUOTA_EXCEEDED - // REQUEST_TOO_LARGE - // RESOURCE_NOT_FOUND - // THROTTLED - // UNAUTHORIZED - // UNSUPPORTED_MEDIA_TYPE + // [Required] + // The response type of the associated GatewayResponse. Valid values are + // * ACCESS_DENIED + // + // * API_CONFIGURATION_ERROR + // + // * AUTHORIZER_FAILURE + // + // * AUTHORIZER_CONFIGURATION_ERROR + // + // * BAD_REQUEST_PARAMETERS + // + // * BAD_REQUEST_BODY + // + // * DEFAULT_4XX + // + // * DEFAULT_5XX + // + // * EXPIRED_TOKEN + // + // * INVALID_SIGNATURE + // + // * INTEGRATION_FAILURE + // + // * INTEGRATION_TIMEOUT + // + // * INVALID_API_KEY + // + // * MISSING_AUTHENTICATION_TOKEN + // + // * QUOTA_EXCEEDED + // + // * REQUEST_TOO_LARGE + // + // * RESOURCE_NOT_FOUND + // + // * THROTTLED + // + // * UNAUTHORIZED + // + // * UNSUPPORTED_MEDIA_TYPE // // ResponseType is a required field ResponseType *string `location:"uri" locationName:"response_type" type:"string" required:"true" enum:"GatewayResponseType"` @@ -20621,7 +20889,7 @@ type PutGatewayResponseInput struct { // RestApiId is a required field RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` - // The HTTP status code of the GatewayResponse + // The HTTP status code of the GatewayResponse. StatusCode *string `locationName:"statusCode" type:"string"` } @@ -20691,10 +20959,10 @@ func (s *PutGatewayResponseInput) SetStatusCode(v string) *PutGatewayResponseInp type PutIntegrationInput struct { _ struct{} `type:"structure"` - // Specifies a put integration input's cache key parameters. + // An API-specific tag group of related cached parameters. CacheKeyParameters []*string `locationName:"cacheKeyParameters" type:"list"` - // Specifies a put integration input's cache namespace. + // A list of request parameters whose values are to be cached. CacheNamespace *string `locationName:"cacheNamespace" type:"string"` // The (id (https://docs.aws.amazon.com/apigateway/api-reference/resource/vpc-link/#id)) @@ -20719,7 +20987,7 @@ type PutIntegrationInput struct { // // If this property is not defined, the request payload will be passed through // from the method request to integration request without modification, provided - // that the passthroughBehaviors is configured to support payload pass-through. + // that the passthroughBehavior is configured to support payload pass-through. ContentHandling *string `locationName:"contentHandling" type:"string" enum:"ContentHandlingStrategy"` // Specifies whether credentials are required for a put integration. @@ -20786,13 +21054,13 @@ type PutIntegrationInput struct { // Specifies Uniform Resource Identifier (URI) of the integration endpoint. // - // * For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, - // encoded HTTP(S) URL according to the RFC-3986 specification (_blank), + // * For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, + // encoded HTTP(S) URL according to the RFC-3986 specification (https://en.wikipedia.org/wiki/Uniform_Resource_Identifier), // for either standard integration, where connectionType is not VPC_LINK, // or private integration, where connectionType is VPC_LINK. For a private // HTTP integration, the URI is not used for routing. // - // * For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. + // * For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. // Here, {Region} is the API Gateway region (e.g., us-east-1); {service} // is the name of the integrated AWS service (e.g., s3); and {subdomain} // is a designated subdomain supported by certain AWS service for fast host-name @@ -21129,8 +21397,8 @@ type PutMethodInput struct { HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` // A human-friendly operation identifier for the method. For example, you can - // assign the operationName of ListPets for the GET /pets method in PetStore - // (https://petstore-demo-endpoint.execute-api.com/petstore/pets) example. + // assign the operationName of ListPets for the GET /pets method in the PetStore + // example. OperationName *string `locationName:"operationName" type:"string"` // Specifies the Model resources used for the request's content type. Request @@ -21555,50 +21823,49 @@ type Resource struct { // Example: Get the GET method of an API resource // // Request - // - // GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: - // application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20170223T031827Z - // Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20170223/us-east-1/apigateway/aws4_request, - // SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} + // GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: + // application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20170223T031827Z + // Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20170223/us-east-1/apigateway/aws4_request, + // SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} // Response - // - // { "_links": { "curies": [ { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html", - // "name": "integration", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html", - // "name": "integrationresponse", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-{rel}.html", - // "name": "method", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", - // "name": "methodresponse", "templated": true } ], "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET", - // "name": "GET", "title": "GET" }, "integration:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" - // }, "method:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" - // }, "method:integration": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" - // }, "method:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", - // "name": "200", "title": "200" }, "method:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" - // }, "methodresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/{status_code}", - // "templated": true } }, "apiKeyRequired": false, "authorizationType": "NONE", - // "httpMethod": "GET", "_embedded": { "method:integration": { "_links": { "self": - // { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" - // }, "integration:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" - // }, "integration:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", - // "name": "200", "title": "200" }, "integration:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" - // }, "integrationresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/{status_code}", - // "templated": true } }, "cacheKeyParameters": [], "cacheNamespace": "3kzxbg5sa2", - // "credentials": "arn:aws:iam::123456789012:role/apigAwsProxyRole", "httpMethod": - // "POST", "passthroughBehavior": "WHEN_NO_MATCH", "requestParameters": { "integration.request.header.Content-Type": - // "'application/x-amz-json-1.1'" }, "requestTemplates": { "application/json": - // "{\n}" }, "type": "AWS", "uri": "arn:aws:apigateway:us-east-1:kinesis:action/ListStreams", - // "_embedded": { "integration:responses": { "_links": { "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", - // "name": "200", "title": "200" }, "integrationresponse:delete": { "href": - // "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" - // }, "integrationresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" - // } }, "responseParameters": { "method.response.header.Content-Type": "'application/xml'" - // }, "responseTemplates": { "application/json": "$util.urlDecode(\"%3CkinesisStreams%3E#foreach($stream - // in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\")\n" - // }, "statusCode": "200" } } }, "method:responses": { "_links": { "self": { - // "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", - // "name": "200", "title": "200" }, "methodresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" - // }, "methodresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" - // } }, "responseModels": { "application/json": "Empty" }, "responseParameters": - // { "method.response.header.Content-Type": false }, "statusCode": "200" } } - // } + // { "_links": { "curies": [ { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-{rel}.html", + // "name": "integration", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html", + // "name": "integrationresponse", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-{rel}.html", + // "name": "method", "templated": true }, { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-method-response-{rel}.html", + // "name": "methodresponse", "templated": true } ], "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET", + // "name": "GET", "title": "GET" }, "integration:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" + // }, "method:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" + // }, "method:integration": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" + // }, "method:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", + // "name": "200", "title": "200" }, "method:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET" + // }, "methodresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/{status_code}", + // "templated": true } }, "apiKeyRequired": false, "authorizationType": "NONE", + // "httpMethod": "GET", "_embedded": { "method:integration": { "_links": { + // "self": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" + // }, "integration:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" + // }, "integration:responses": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", + // "name": "200", "title": "200" }, "integration:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration" + // }, "integrationresponse:put": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/{status_code}", + // "templated": true } }, "cacheKeyParameters": [], "cacheNamespace": "3kzxbg5sa2", + // "credentials": "arn:aws:iam::123456789012:role/apigAwsProxyRole", "httpMethod": + // "POST", "passthroughBehavior": "WHEN_NO_MATCH", "requestParameters": { "integration.request.header.Content-Type": + // "'application/x-amz-json-1.1'" }, "requestTemplates": { "application/json": + // "{\n}" }, "type": "AWS", "uri": "arn:aws:apigateway:us-east-1:kinesis:action/ListStreams", + // "_embedded": { "integration:responses": { "_links": { "self": { "href": + // "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200", + // "name": "200", "title": "200" }, "integrationresponse:delete": { "href": + // "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" + // }, "integrationresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200" + // } }, "responseParameters": { "method.response.header.Content-Type": "'application/xml'" + // }, "responseTemplates": { "application/json": "$util.urlDecode(\"%3CkinesisStreams%3E#foreach($stream + // in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\")\n" + // }, "statusCode": "200" } } }, "method:responses": { "_links": { "self": + // { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200", + // "name": "200", "title": "200" }, "methodresponse:delete": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" + // }, "methodresponse:update": { "href": "/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/responses/200" + // } }, "responseModels": { "application/json": "Empty" }, "responseParameters": + // { "method.response.header.Content-Type": false }, "statusCode": "200" } + // } } // If the OPTIONS is enabled on the resource, you can follow the example here // to get that method. Just replace the GET of the last path segment in the // request URL with OPTIONS. @@ -21652,10 +21919,11 @@ type RestApi struct { _ struct{} `type:"structure"` // The source of the API key for metering requests according to a usage plan. - // Valid values are: HEADER to read the API key from the X-API-Key header of - // a request. - // AUTHORIZER to read the API key from the UsageIdentifierKey from a custom - // authorizer. + // Valid values are: + // * HEADER to read the API key from the X-API-Key header of a request. + // + // * AUTHORIZER to read the API key from the UsageIdentifierKey from a custom + // authorizer. ApiKeySource *string `locationName:"apiKeySource" type:"string" enum:"ApiKeySourceType"` // The list of binary media types supported by the RestApi. By default, the @@ -21687,9 +21955,12 @@ type RestApi struct { Name *string `locationName:"name" type:"string"` // A stringified JSON policy document that applies to this RestApi regardless - // of the caller and Method + // of the caller and Method configuration. Policy *string `locationName:"policy" type:"string"` + // The collection of tags. Each tag element is associated with a given resource. + Tags map[string]*string `locationName:"tags" type:"map"` + // A version identifier for the API. Version *string `locationName:"version" type:"string"` @@ -21762,6 +22033,12 @@ func (s *RestApi) SetPolicy(v string) *RestApi { return s } +// SetTags sets the Tags field's value. +func (s *RestApi) SetTags(v map[string]*string) *RestApi { + s.Tags = v + return s +} + // SetVersion sets the Version field's value. func (s *RestApi) SetVersion(v string) *RestApi { s.Version = &v @@ -21933,7 +22210,8 @@ type Stage struct { MethodSettings map[string]*MethodSetting `locationName:"methodSettings" type:"map"` // The name of the stage is the first path segment in the Uniform Resource Identifier - // (URI) of a call to API Gateway. + // (URI) of a call to API Gateway. Stage names can only contain alphanumeric + // characters, hyphens, and underscores. Maximum length is 128 characters. StageName *string `locationName:"stageName" type:"string"` // The collection of tags. Each tag element is associated with a given resource. @@ -22101,7 +22379,7 @@ type TagResourceInput struct { _ struct{} `type:"structure"` // [Required] The ARN of a resource that can be tagged. The resource ARN must - // be URL-encoded. At present, Stage is the only taggable resource. + // be URL-encoded. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resource_arn" type:"string" required:"true"` @@ -22614,7 +22892,7 @@ type UntagResourceInput struct { _ struct{} `type:"structure"` // [Required] The ARN of a resource that can be tagged. The resource ARN must - // be URL-encoded. At present, Stage is the only taggable resource. + // be URL-encoded. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resource_arn" type:"string" required:"true"` @@ -22832,6 +23110,8 @@ type UpdateBasePathMappingInput struct { // [Required] The base path of the BasePathMapping resource to change. // + // To specify an empty base path, set this parameter to '(none)'. + // // BasePath is a required field BasePath *string `location:"uri" locationName:"base_path" type:"string" required:"true"` @@ -23215,27 +23495,47 @@ type UpdateGatewayResponseInput struct { // the order specified in this list. PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` - // [Required] The response type of the associated GatewayResponse. Valid values - // are ACCESS_DENIED - // API_CONFIGURATION_ERROR - // AUTHORIZER_FAILURE - // AUTHORIZER_CONFIGURATION_ERROR - // BAD_REQUEST_PARAMETERS - // BAD_REQUEST_BODY - // DEFAULT_4XX - // DEFAULT_5XX - // EXPIRED_TOKEN - // INVALID_SIGNATURE - // INTEGRATION_FAILURE - // INTEGRATION_TIMEOUT - // INVALID_API_KEY - // MISSING_AUTHENTICATION_TOKEN - // QUOTA_EXCEEDED - // REQUEST_TOO_LARGE - // RESOURCE_NOT_FOUND - // THROTTLED - // UNAUTHORIZED - // UNSUPPORTED_MEDIA_TYPE + // [Required] + // The response type of the associated GatewayResponse. Valid values are + // * ACCESS_DENIED + // + // * API_CONFIGURATION_ERROR + // + // * AUTHORIZER_FAILURE + // + // * AUTHORIZER_CONFIGURATION_ERROR + // + // * BAD_REQUEST_PARAMETERS + // + // * BAD_REQUEST_BODY + // + // * DEFAULT_4XX + // + // * DEFAULT_5XX + // + // * EXPIRED_TOKEN + // + // * INVALID_SIGNATURE + // + // * INTEGRATION_FAILURE + // + // * INTEGRATION_TIMEOUT + // + // * INVALID_API_KEY + // + // * MISSING_AUTHENTICATION_TOKEN + // + // * QUOTA_EXCEEDED + // + // * REQUEST_TOO_LARGE + // + // * RESOURCE_NOT_FOUND + // + // * THROTTLED + // + // * UNAUTHORIZED + // + // * UNSUPPORTED_MEDIA_TYPE // // ResponseType is a required field ResponseType *string `location:"uri" locationName:"response_type" type:"string" required:"true" enum:"GatewayResponseType"` @@ -23300,41 +23600,39 @@ func (s *UpdateGatewayResponseInput) SetRestApiId(v string) *UpdateGatewayRespon // response parameters and mapping templates. // // For more information about valid gateway response types, see Gateway Response -// Types Supported by API Gateway (https://docs.aws.amazon.com/apigateway/latest/developerguide/supported-gateway-response-types.html)Example: -// Get a Gateway Response of a given response type +// Types Supported by API Gateway (https://docs.aws.amazon.com/apigateway/latest/developerguide/supported-gateway-response-types.html) +// +// Example: Get a Gateway Response of a given response type // // Request // // This example shows how to get a gateway response of the MISSING_AUTHENTICATION_TOKEN // type. -// -// GET /restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN HTTP/1.1 -// Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json -// X-Amz-Date: 20170503T202516Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, -// SignedHeaders=content-type;host;x-amz-date, Signature=1b52460e3159c1a26cff29093855d50ea141c1c5b937528fecaf60f51129697a -// Cache-Control: no-cache Postman-Token: 3b2a1ce9-c848-2e26-2e2f-9c2caefbed45 -// +// GET /restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN HTTP/1.1 +// Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json +// X-Amz-Date: 20170503T202516Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, +// SignedHeaders=content-type;host;x-amz-date, Signature=1b52460e3159c1a26cff29093855d50ea141c1c5b937528fecaf60f51129697a +// Cache-Control: no-cache Postman-Token: 3b2a1ce9-c848-2e26-2e2f-9c2caefbed45 // The response type is specified as a URL path. // // Response // // The successful operation returns the 200 OK status code and a payload similar // to the following: -// -// { "_links": { "curies": { "href": "http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html", -// "name": "gatewayresponse", "templated": true }, "self": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" -// }, "gatewayresponse:delete": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" -// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", -// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" -// } }, "defaultResponse": false, "responseParameters": { "gatewayresponse.header.x-request-path": -// "method.request.path.petId", "gatewayresponse.header.Access-Control-Allow-Origin": -// "'a.b.c'", "gatewayresponse.header.x-request-query": "method.request.querystring.q", -// "gatewayresponse.header.x-request-header": "method.request.header.Accept" -// }, "responseTemplates": { "application/json": "{\n \"message\": $context.error.messageString,\n -// \"type\": \"$context.error.responseType\",\n \"stage\": \"$context.stage\",\n -// \"resourcePath\": \"$context.resourcePath\",\n \"stageVariables.a\": \"$stageVariables.a\",\n -// \"statusCode\": \"'404'\"\n}" }, "responseType": "MISSING_AUTHENTICATION_TOKEN", -// "statusCode": "404" } +// { "_links": { "curies": { "href": "http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html", +// "name": "gatewayresponse", "templated": true }, "self": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" +// }, "gatewayresponse:delete": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" +// }, "gatewayresponse:put": { "href": "/restapis/o81lxisefl/gatewayresponses/{response_type}", +// "templated": true }, "gatewayresponse:update": { "href": "/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN" +// } }, "defaultResponse": false, "responseParameters": { "gatewayresponse.header.x-request-path": +// "method.request.path.petId", "gatewayresponse.header.Access-Control-Allow-Origin": +// "'a.b.c'", "gatewayresponse.header.x-request-query": "method.request.querystring.q", +// "gatewayresponse.header.x-request-header": "method.request.header.Accept" +// }, "responseTemplates": { "application/json": "{\n \"message\": $context.error.messageString,\n +// \"type\": \"$context.error.responseType\",\n \"stage\": \"$context.stage\",\n +// \"resourcePath\": \"$context.resourcePath\",\n \"stageVariables.a\": \"$stageVariables.a\",\n +// \"statusCode\": \"'404'\"\n}" }, "responseType": "MISSING_AUTHENTICATION_TOKEN", +// "statusCode": "404" } // // Customize Gateway Responses (https://docs.aws.amazon.com/apigateway/latest/developerguide/customize-gateway-responses.html) type UpdateGatewayResponseOutput struct { @@ -23353,27 +23651,46 @@ type UpdateGatewayResponseOutput struct { // pairs. ResponseTemplates map[string]*string `locationName:"responseTemplates" type:"map"` - // The response type of the associated GatewayResponse. Valid values are ACCESS_DENIED - // - // API_CONFIGURATION_ERROR - // AUTHORIZER_FAILURE - // AUTHORIZER_CONFIGURATION_ERROR - // BAD_REQUEST_PARAMETERS - // BAD_REQUEST_BODY - // DEFAULT_4XX - // DEFAULT_5XX - // EXPIRED_TOKEN - // INVALID_SIGNATURE - // INTEGRATION_FAILURE - // INTEGRATION_TIMEOUT - // INVALID_API_KEY - // MISSING_AUTHENTICATION_TOKEN - // QUOTA_EXCEEDED - // REQUEST_TOO_LARGE - // RESOURCE_NOT_FOUND - // THROTTLED - // UNAUTHORIZED - // UNSUPPORTED_MEDIA_TYPE + // The response type of the associated GatewayResponse. Valid values are + // * ACCESS_DENIED + // + // * API_CONFIGURATION_ERROR + // + // * AUTHORIZER_FAILURE + // + // * AUTHORIZER_CONFIGURATION_ERROR + // + // * BAD_REQUEST_PARAMETERS + // + // * BAD_REQUEST_BODY + // + // * DEFAULT_4XX + // + // * DEFAULT_5XX + // + // * EXPIRED_TOKEN + // + // * INVALID_SIGNATURE + // + // * INTEGRATION_FAILURE + // + // * INTEGRATION_TIMEOUT + // + // * INVALID_API_KEY + // + // * MISSING_AUTHENTICATION_TOKEN + // + // * QUOTA_EXCEEDED + // + // * REQUEST_TOO_LARGE + // + // * RESOURCE_NOT_FOUND + // + // * THROTTLED + // + // * UNAUTHORIZED + // + // * UNSUPPORTED_MEDIA_TYPE ResponseType *string `locationName:"responseType" type:"string" enum:"GatewayResponseType"` // The HTTP status code for this GatewayResponse. @@ -24394,6 +24711,9 @@ type UpdateVpcLinkOutput struct { // A description about the VPC link status. StatusMessage *string `locationName:"statusMessage" type:"string"` + // The collection of tags. Each tag element is associated with a given resource. + Tags map[string]*string `locationName:"tags" type:"map"` + // The ARNs of network load balancers of the VPC targeted by the VPC link. The // network load balancers must be owned by the same AWS account of the API owner. TargetArns []*string `locationName:"targetArns" type:"list"` @@ -24439,6 +24759,12 @@ func (s *UpdateVpcLinkOutput) SetStatusMessage(v string) *UpdateVpcLinkOutput { return s } +// SetTags sets the Tags field's value. +func (s *UpdateVpcLinkOutput) SetTags(v map[string]*string) *UpdateVpcLinkOutput { + s.Tags = v + return s +} + // SetTargetArns sets the TargetArns field's value. func (s *UpdateVpcLinkOutput) SetTargetArns(v []*string) *UpdateVpcLinkOutput { s.TargetArns = v @@ -24447,7 +24773,8 @@ func (s *UpdateVpcLinkOutput) SetTargetArns(v []*string) *UpdateVpcLinkOutput { // Represents the usage data of a usage plan. // -// Create and Use Usage Plans (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-api-usage-plans.html), Manage Usage in a Usage Plan (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-usage-plans-with-console.html#api-gateway-usage-plan-manage-usage) +// Create and Use Usage Plans (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-api-usage-plans.html), +// Manage Usage in a Usage Plan (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-usage-plans-with-console.html#api-gateway-usage-plan-manage-usage) type Usage struct { _ struct{} `type:"structure"` @@ -24540,6 +24867,9 @@ type UsagePlan struct { // The maximum number of permitted requests per a given unit time interval. Quota *QuotaSettings `locationName:"quota" type:"structure"` + // The collection of tags. Each tag element is associated with a given resource. + Tags map[string]*string `locationName:"tags" type:"map"` + // The request throttle limits of a usage plan. Throttle *ThrottleSettings `locationName:"throttle" type:"structure"` } @@ -24590,6 +24920,12 @@ func (s *UsagePlan) SetQuota(v *QuotaSettings) *UsagePlan { return s } +// SetTags sets the Tags field's value. +func (s *UsagePlan) SetTags(v map[string]*string) *UsagePlan { + s.Tags = v + return s +} + // SetThrottle sets the Throttle field's value. func (s *UsagePlan) SetThrottle(v *ThrottleSettings) *UsagePlan { s.Throttle = v @@ -24601,7 +24937,8 @@ func (s *UsagePlan) SetThrottle(v *ThrottleSettings) *UsagePlan { // To associate an API stage with a selected API key in a usage plan, you must // create a UsagePlanKey resource to represent the selected ApiKey. // -// " Create and Use Usage Plans (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-api-usage-plans.html) +// " +// Create and Use Usage Plans (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-api-usage-plans.html) type UsagePlanKey struct { _ struct{} `type:"structure"` @@ -24779,6 +25116,17 @@ const ( DocumentationPartTypeResponseBody = "RESPONSE_BODY" ) +const ( + // DomainNameStatusAvailable is a DomainNameStatus enum value + DomainNameStatusAvailable = "AVAILABLE" + + // DomainNameStatusUpdating is a DomainNameStatus enum value + DomainNameStatusUpdating = "UPDATING" + + // DomainNameStatusPending is a DomainNameStatus enum value + DomainNameStatusPending = "PENDING" +) + // The endpoint type. The valid values are EDGE for edge-optimized API setup, // most suitable for mobile applications; REGIONAL for regional API endpoint // setup, most suitable for calling from AWS Region; and PRIVATE for private @@ -24924,6 +25272,14 @@ const ( QuotaPeriodTypeMonth = "MONTH" ) +const ( + // SecurityPolicyTls10 is a SecurityPolicy enum value + SecurityPolicyTls10 = "TLS_1_0" + + // SecurityPolicyTls12 is a SecurityPolicy enum value + SecurityPolicyTls12 = "TLS_1_2" +) + const ( // UnauthorizedCacheControlHeaderStrategyFailWith403 is a UnauthorizedCacheControlHeaderStrategy enum value UnauthorizedCacheControlHeaderStrategyFailWith403 = "FAIL_WITH_403" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go index 8064d24fc02..cf398a11402 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go @@ -46,11 +46,11 @@ const ( // svc := apigateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *APIGateway { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *APIGateway { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *APIGateway { svc := &APIGateway{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-07-09", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/api.go index b1acb962162..75480a1c75d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/api.go @@ -3909,6 +3909,280 @@ func (c *ApiGatewayV2) GetStagesWithContext(ctx aws.Context, input *GetStagesInp return out, req.Send() } +const opGetTags = "GetTags" + +// GetTagsRequest generates a "aws/request.Request" representing the +// client's request for the GetTags operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTags for more information on using the GetTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTagsRequest method. +// req, resp := client.GetTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/GetTags +func (c *ApiGatewayV2) GetTagsRequest(input *GetTagsInput) (req *request.Request, output *GetTagsOutput) { + op := &request.Operation{ + Name: opGetTags, + HTTPMethod: "GET", + HTTPPath: "/v2/tags/{resource-arn}", + } + + if input == nil { + input = &GetTagsInput{} + } + + output = &GetTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTags API operation for AmazonApiGatewayV2. +// +// Gets the Tags for a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonApiGatewayV2's +// API operation GetTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// The resource specified in the request was not found. See the message field +// for more information. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// A limit has been exceeded. See the accompanying error message for details. +// +// * ErrCodeBadRequestException "BadRequestException" +// The request is not valid, for example, the input is incomplete or incorrect. +// See the accompanying error message for details. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. See the accompanying error message for details. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/GetTags +func (c *ApiGatewayV2) GetTags(input *GetTagsInput) (*GetTagsOutput, error) { + req, out := c.GetTagsRequest(input) + return out, req.Send() +} + +// GetTagsWithContext is the same as GetTags with the addition of +// the ability to pass a context and additional request options. +// +// See GetTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApiGatewayV2) GetTagsWithContext(ctx aws.Context, input *GetTagsInput, opts ...request.Option) (*GetTagsOutput, error) { + req, out := c.GetTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/TagResource +func (c *ApiGatewayV2) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/v2/tags/{resource-arn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AmazonApiGatewayV2. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonApiGatewayV2's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// The resource specified in the request was not found. See the message field +// for more information. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// A limit has been exceeded. See the accompanying error message for details. +// +// * ErrCodeBadRequestException "BadRequestException" +// The request is not valid, for example, the input is incomplete or incorrect. +// See the accompanying error message for details. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. See the accompanying error message for details. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/TagResource +func (c *ApiGatewayV2) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApiGatewayV2) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/UntagResource +func (c *ApiGatewayV2) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/v2/tags/{resource-arn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AmazonApiGatewayV2. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonApiGatewayV2's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// The resource specified in the request was not found. See the message field +// for more information. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// A limit has been exceeded. See the accompanying error message for details. +// +// * ErrCodeBadRequestException "BadRequestException" +// The request is not valid, for example, the input is incomplete or incorrect. +// See the accompanying error message for details. +// +// * ErrCodeConflictException "ConflictException" +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. See the accompanying error message for details. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/UntagResource +func (c *ApiGatewayV2) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApiGatewayV2) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateApi = "UpdateApi" // UpdateApiRequest generates a "aws/request.Request" representing the @@ -4994,6 +5268,9 @@ type Api struct { // RouteSelectionExpression is a required field RouteSelectionExpression *string `locationName:"routeSelectionExpression" type:"string" required:"true"` + // Tags for the API. + Tags map[string]*string `locationName:"tags" type:"map"` + // A version identifier for the API. Version *string `locationName:"version" type:"string"` @@ -5066,6 +5343,12 @@ func (s *Api) SetRouteSelectionExpression(v string) *Api { return s } +// SetTags sets the Tags field's value. +func (s *Api) SetTags(v map[string]*string) *Api { + s.Tags = v + return s +} + // SetVersion sets the Version field's value. func (s *Api) SetVersion(v string) *Api { s.Version = &v @@ -5983,6 +6266,10 @@ type CreateDomainNameInput struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -6020,6 +6307,12 @@ func (s *CreateDomainNameInput) SetDomainNameConfigurations(v []*DomainNameConfi return s } +// SetTags sets the Tags field's value. +func (s *CreateDomainNameInput) SetTags(v map[string]*string) *CreateDomainNameInput { + s.Tags = v + return s +} + type CreateDomainNameOutput struct { _ struct{} `type:"structure"` @@ -6033,6 +6326,10 @@ type CreateDomainNameOutput struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -6063,6 +6360,12 @@ func (s *CreateDomainNameOutput) SetDomainNameConfigurations(v []*DomainNameConf return s } +// SetTags sets the Tags field's value. +func (s *CreateDomainNameOutput) SetTags(v map[string]*string) *CreateDomainNameOutput { + s.Tags = v + return s +} + type CreateIntegrationInput struct { _ struct{} `type:"structure"` @@ -7246,6 +7549,10 @@ type CreateStageInput struct { // The stage variable map. StageVariables map[string]*string `locationName:"stageVariables" type:"map"` + + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -7331,6 +7638,12 @@ func (s *CreateStageInput) SetStageVariables(v map[string]*string) *CreateStageI return s } +// SetTags sets the Tags field's value. +func (s *CreateStageInput) SetTags(v map[string]*string) *CreateStageInput { + s.Tags = v + return s +} + type CreateStageOutput struct { _ struct{} `type:"structure"` @@ -7361,6 +7674,10 @@ type CreateStageOutput struct { // The stage variable map. StageVariables map[string]*string `locationName:"stageVariables" type:"map"` + + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -7433,6 +7750,12 @@ func (s *CreateStageOutput) SetStageVariables(v map[string]*string) *CreateStage return s } +// SetTags sets the Tags field's value. +func (s *CreateStageOutput) SetTags(v map[string]*string) *CreateStageOutput { + s.Tags = v + return s +} + type DeleteApiInput struct { _ struct{} `type:"structure"` @@ -8256,6 +8579,9 @@ type DomainName struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + + // Tags for the DomainName. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -8286,6 +8612,12 @@ func (s *DomainName) SetDomainNameConfigurations(v []*DomainNameConfiguration) * return s } +// SetTags sets the Tags field's value. +func (s *DomainName) SetTags(v map[string]*string) *DomainName { + s.Tags = v + return s +} + // The domain name configuration. type DomainNameConfiguration struct { _ struct{} `type:"structure"` @@ -8305,11 +8637,19 @@ type DomainNameConfiguration struct { // for this domain name was uploaded. CertificateUploadDate *time.Time `locationName:"certificateUploadDate" type:"timestamp" timestampFormat:"iso8601"` + DomainNameStatus *string `locationName:"domainNameStatus" type:"string" enum:"DomainNameStatus"` + + // An optional text message containing detailed information about status of + // the domain name migration. + DomainNameStatusMessage *string `locationName:"domainNameStatusMessage" type:"string"` + // The endpoint type. EndpointType *string `locationName:"endpointType" type:"string" enum:"EndpointType"` // The Amazon Route 53 Hosted Zone ID of the endpoint. HostedZoneId *string `locationName:"hostedZoneId" type:"string"` + + SecurityPolicy *string `locationName:"securityPolicy" type:"string" enum:"SecurityPolicy"` } // String returns the string representation @@ -8346,6 +8686,18 @@ func (s *DomainNameConfiguration) SetCertificateUploadDate(v time.Time) *DomainN return s } +// SetDomainNameStatus sets the DomainNameStatus field's value. +func (s *DomainNameConfiguration) SetDomainNameStatus(v string) *DomainNameConfiguration { + s.DomainNameStatus = &v + return s +} + +// SetDomainNameStatusMessage sets the DomainNameStatusMessage field's value. +func (s *DomainNameConfiguration) SetDomainNameStatusMessage(v string) *DomainNameConfiguration { + s.DomainNameStatusMessage = &v + return s +} + // SetEndpointType sets the EndpointType field's value. func (s *DomainNameConfiguration) SetEndpointType(v string) *DomainNameConfiguration { s.EndpointType = &v @@ -8358,6 +8710,12 @@ func (s *DomainNameConfiguration) SetHostedZoneId(v string) *DomainNameConfigura return s } +// SetSecurityPolicy sets the SecurityPolicy field's value. +func (s *DomainNameConfiguration) SetSecurityPolicy(v string) *DomainNameConfiguration { + s.SecurityPolicy = &v + return s +} + type GetApiInput struct { _ struct{} `type:"structure"` @@ -8622,6 +8980,10 @@ type GetApiOutput struct { // for more information. RouteSelectionExpression *string `locationName:"routeSelectionExpression" type:"string"` + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` + // A string with a length between [1-64]. Version *string `locationName:"version" type:"string"` @@ -8692,6 +9054,12 @@ func (s *GetApiOutput) SetRouteSelectionExpression(v string) *GetApiOutput { return s } +// SetTags sets the Tags field's value. +func (s *GetApiOutput) SetTags(v map[string]*string) *GetApiOutput { + s.Tags = v + return s +} + // SetVersion sets the Version field's value. func (s *GetApiOutput) SetVersion(v string) *GetApiOutput { s.Version = &v @@ -9265,6 +9633,10 @@ type GetDomainNameOutput struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -9295,6 +9667,12 @@ func (s *GetDomainNameOutput) SetDomainNameConfigurations(v []*DomainNameConfigu return s } +// SetTags sets the Tags field's value. +func (s *GetDomainNameOutput) SetTags(v map[string]*string) *GetDomainNameOutput { + s.Tags = v + return s +} + type GetDomainNamesInput struct { _ struct{} `type:"structure"` @@ -10792,6 +11170,10 @@ type GetStageOutput struct { // The stage variable map. StageVariables map[string]*string `locationName:"stageVariables" type:"map"` + + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -10864,6 +11246,12 @@ func (s *GetStageOutput) SetStageVariables(v map[string]*string) *GetStageOutput return s } +// SetTags sets the Tags field's value. +func (s *GetStageOutput) SetTags(v map[string]*string) *GetStageOutput { + s.Tags = v + return s +} + type GetStagesInput struct { _ struct{} `type:"structure"` @@ -10951,6 +11339,67 @@ func (s *GetStagesOutput) SetNextToken(v string) *GetStagesOutput { return s } +type GetTagsInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTagsInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetTagsInput) SetResourceArn(v string) *GetTagsInput { + s.ResourceArn = &v + return s +} + +type GetTagsOutput struct { + _ struct{} `type:"structure"` + + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s GetTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTagsOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *GetTagsOutput) SetTags(v map[string]*string) *GetTagsOutput { + s.Tags = v + return s +} + // Represents an integration. type Integration struct { _ struct{} `type:"structure"` @@ -11642,6 +12091,9 @@ type Stage struct { // can have alphanumeric and underscore characters, and the values must match // [A-Za-z0-9-._~:/?#&=,]+. StageVariables map[string]*string `locationName:"stageVariables" type:"map"` + + // Tags for the Stage. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -11714,6 +12166,140 @@ func (s *Stage) SetStageVariables(v map[string]*string) *Stage { return s } +// SetTags sets the Tags field's value. +func (s *Stage) SetTags(v map[string]*string) *Stage { + s.Tags = v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` + + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` + + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateApiInput struct { _ struct{} `type:"structure"` @@ -11984,6 +12570,10 @@ type UpdateApiOutput struct { // for more information. RouteSelectionExpression *string `locationName:"routeSelectionExpression" type:"string"` + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` + // A string with a length between [1-64]. Version *string `locationName:"version" type:"string"` @@ -12054,6 +12644,12 @@ func (s *UpdateApiOutput) SetRouteSelectionExpression(v string) *UpdateApiOutput return s } +// SetTags sets the Tags field's value. +func (s *UpdateApiOutput) SetTags(v map[string]*string) *UpdateApiOutput { + s.Tags = v + return s +} + // SetVersion sets the Version field's value. func (s *UpdateApiOutput) SetVersion(v string) *UpdateApiOutput { s.Version = &v @@ -12494,6 +13090,10 @@ type UpdateDomainNameOutput struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -12524,6 +13124,12 @@ func (s *UpdateDomainNameOutput) SetDomainNameConfigurations(v []*DomainNameConf return s } +// SetTags sets the Tags field's value. +func (s *UpdateDomainNameOutput) SetTags(v map[string]*string) *UpdateDomainNameOutput { + s.Tags = v + return s +} + type UpdateIntegrationInput struct { _ struct{} `type:"structure"` @@ -13868,6 +14474,10 @@ type UpdateStageOutput struct { // The stage variable map. StageVariables map[string]*string `locationName:"stageVariables" type:"map"` + + // A key value pair of string with key length between[1-128] and value length + // between[1-256] + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -13940,6 +14550,12 @@ func (s *UpdateStageOutput) SetStageVariables(v map[string]*string) *UpdateStage return s } +// SetTags sets the Tags field's value. +func (s *UpdateStageOutput) SetTags(v map[string]*string) *UpdateStageOutput { + s.Tags = v + return s +} + // The authorization type. Valid values are NONE for open access, AWS_IAM for // using AWS IAM permissions, and CUSTOM for using a Lambda authorizer. const ( @@ -13990,6 +14606,14 @@ const ( DeploymentStatusDeployed = "DEPLOYED" ) +const ( + // DomainNameStatusAvailable is a DomainNameStatus enum value + DomainNameStatusAvailable = "AVAILABLE" + + // DomainNameStatusUpdating is a DomainNameStatus enum value + DomainNameStatusUpdating = "UPDATING" +) + // Represents an endpoint type. const ( // EndpointTypeRegional is a EndpointType enum value @@ -14045,3 +14669,11 @@ const ( // ProtocolTypeWebsocket is a ProtocolType enum value ProtocolTypeWebsocket = "WEBSOCKET" ) + +const ( + // SecurityPolicyTls10 is a SecurityPolicy enum value + SecurityPolicyTls10 = "TLS_1_0" + + // SecurityPolicyTls12 is a SecurityPolicy enum value + SecurityPolicyTls12 = "TLS_1_2" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go index 02f80c40830..75199eb30aa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApiGatewayV2 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "apigateway" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ApiGatewayV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApiGatewayV2 { svc := &ApiGatewayV2{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-29", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go index 87e230ce880..4a7e21e00ff 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go @@ -58,11 +58,16 @@ func (c *ApplicationAutoScaling) DeleteScalingPolicyRequest(input *DeleteScaling // DeleteScalingPolicy API operation for Application Auto Scaling. // -// Deletes the specified Application Auto Scaling scaling policy. +// Deletes the specified scaling policy for an Application Auto Scaling scalable +// target. +// +// Deleting a step scaling policy deletes the underlying alarm action, but does +// not delete the CloudWatch alarm associated with the scaling policy, even +// if it no longer has an associated action. // -// Deleting a policy deletes the underlying alarm action, but does not delete -// the CloudWatch alarm associated with the scaling policy, even if it no longer -// has an associated action. +// For more information, see Delete a Step Scaling Policy (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html#delete-step-scaling-policy) +// and Delete a Target Tracking Scaling Policy (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html#delete-target-tracking-policy) +// in the Application Auto Scaling User Guide. // // To create a scaling policy or update an existing one, see PutScalingPolicy. // @@ -159,7 +164,11 @@ func (c *ApplicationAutoScaling) DeleteScheduledActionRequest(input *DeleteSched // DeleteScheduledAction API operation for Application Auto Scaling. // -// Deletes the specified Application Auto Scaling scheduled action. +// Deletes the specified scheduled action for an Application Auto Scaling scalable +// target. +// +// For more information, see Delete a Scheduled Action (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html#delete-scheduled-action) +// in the Application Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -254,7 +263,7 @@ func (c *ApplicationAutoScaling) DeregisterScalableTargetRequest(input *Deregist // DeregisterScalableTarget API operation for Application Auto Scaling. // -// Deregisters a scalable target. +// Deregisters an Application Auto Scaling scalable target. // // Deregistering a scalable target deletes the scaling policies that are associated // with it. @@ -361,7 +370,7 @@ func (c *ApplicationAutoScaling) DescribeScalableTargetsRequest(input *DescribeS // // Gets information about the scalable targets in the specified namespace. // -// You can filter the results using the ResourceIds and ScalableDimension parameters. +// You can filter the results using ResourceIds and ScalableDimension. // // To create a scalable target or update an existing one, see RegisterScalableTarget. // If you are no longer using a scalable target, you can deregister it using @@ -422,7 +431,7 @@ func (c *ApplicationAutoScaling) DescribeScalableTargetsWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a DescribeScalableTargets operation. // pageNum := 0 // err := client.DescribeScalableTargetsPages(params, -// func(page *DescribeScalableTargetsOutput, lastPage bool) bool { +// func(page *applicationautoscaling.DescribeScalableTargetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -454,10 +463,12 @@ func (c *ApplicationAutoScaling) DescribeScalableTargetsPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeScalableTargetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeScalableTargetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -514,7 +525,7 @@ func (c *ApplicationAutoScaling) DescribeScalingActivitiesRequest(input *Describ // Provides descriptive information about the scaling activities in the specified // namespace from the previous six weeks. // -// You can filter the results using the ResourceId and ScalableDimension parameters. +// You can filter the results using ResourceId and ScalableDimension. // // Scaling activities are triggered by CloudWatch alarms that are associated // with scaling policies. To view the scaling policies for a service namespace, @@ -576,7 +587,7 @@ func (c *ApplicationAutoScaling) DescribeScalingActivitiesWithContext(ctx aws.Co // // Example iterating over at most 3 pages of a DescribeScalingActivities operation. // pageNum := 0 // err := client.DescribeScalingActivitiesPages(params, -// func(page *DescribeScalingActivitiesOutput, lastPage bool) bool { +// func(page *applicationautoscaling.DescribeScalingActivitiesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -608,10 +619,12 @@ func (c *ApplicationAutoScaling) DescribeScalingActivitiesPagesWithContext(ctx a }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeScalingActivitiesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeScalingActivitiesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -665,10 +678,10 @@ func (c *ApplicationAutoScaling) DescribeScalingPoliciesRequest(input *DescribeS // DescribeScalingPolicies API operation for Application Auto Scaling. // -// Describes the scaling policies for the specified service namespace. +// Describes the Application Auto Scaling scaling policies for the specified +// service namespace. // -// You can filter the results using the ResourceId, ScalableDimension, and PolicyNames -// parameters. +// You can filter the results using ResourceId, ScalableDimension, and PolicyNames. // // To create a scaling policy or update an existing one, see PutScalingPolicy. // If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy. @@ -736,7 +749,7 @@ func (c *ApplicationAutoScaling) DescribeScalingPoliciesWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a DescribeScalingPolicies operation. // pageNum := 0 // err := client.DescribeScalingPoliciesPages(params, -// func(page *DescribeScalingPoliciesOutput, lastPage bool) bool { +// func(page *applicationautoscaling.DescribeScalingPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -768,10 +781,12 @@ func (c *ApplicationAutoScaling) DescribeScalingPoliciesPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeScalingPoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeScalingPoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -806,6 +821,12 @@ func (c *ApplicationAutoScaling) DescribeScheduledActionsRequest(input *Describe Name: opDescribeScheduledActions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -819,7 +840,8 @@ func (c *ApplicationAutoScaling) DescribeScheduledActionsRequest(input *Describe // DescribeScheduledActions API operation for Application Auto Scaling. // -// Describes the scheduled actions for the specified service namespace. +// Describes the Application Auto Scaling scheduled actions for the specified +// service namespace. // // You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames // parameters. @@ -871,6 +893,58 @@ func (c *ApplicationAutoScaling) DescribeScheduledActionsWithContext(ctx aws.Con return out, req.Send() } +// DescribeScheduledActionsPages iterates over the pages of a DescribeScheduledActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScheduledActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScheduledActions operation. +// pageNum := 0 +// err := client.DescribeScheduledActionsPages(params, +// func(page *applicationautoscaling.DescribeScheduledActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ApplicationAutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool) error { + return c.DescribeScheduledActionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeScheduledActionsPagesWithContext same as DescribeScheduledActionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationAutoScaling) DescribeScheduledActionsPagesWithContext(ctx aws.Context, input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeScheduledActionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeScheduledActionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeScheduledActionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opPutScalingPolicy = "PutScalingPolicy" // PutScalingPolicyRequest generates a "aws/request.Request" representing the @@ -934,7 +1008,7 @@ func (c *ApplicationAutoScaling) PutScalingPolicyRequest(input *PutScalingPolicy // more step scaling policies, or both. However, there is a chance that multiple // policies could conflict, instructing the scalable target to scale out or // in at the same time. Application Auto Scaling gives precedence to the policy -// that provides the largest capacity for both scale in and scale out. For example, +// that provides the largest capacity for both scale out and scale in. For example, // if one policy increases capacity by 3, another policy increases capacity // by 200 percent, and the current capacity is 10, Application Auto Scaling // uses the policy with the highest calculated capacity (200% of 10 = 20) and @@ -1165,16 +1239,25 @@ func (c *ApplicationAutoScaling) RegisterScalableTargetRequest(input *RegisterSc // RegisterScalableTarget API operation for Application Auto Scaling. // // Registers or updates a scalable target. A scalable target is a resource that -// Application Auto Scaling can scale out and scale in. Each scalable target -// has a resource ID, scalable dimension, and namespace, as well as values for -// minimum and maximum capacity. +// Application Auto Scaling can scale out and scale in. Scalable targets are +// uniquely identified by the combination of resource ID, scalable dimension, +// and namespace. +// +// When you register a new scalable target, you must specify values for minimum +// and maximum capacity. Application Auto Scaling will not scale capacity to +// values that are outside of this range. +// +// To update a scalable target, specify the parameter that you want to change +// as well as the following parameters that identify the scalable target: resource +// ID, scalable dimension, and namespace. Any parameters that you don't specify +// are not changed by this update request. // // After you register a scalable target, you do not need to register it again // to use other Application Auto Scaling operations. To see which resources // have been registered, use DescribeScalableTargets. You can also view the -// scaling policies for a service namespace using DescribeScalableTargets. +// scaling policies for a service namespace by using DescribeScalableTargets. // -// If you no longer need a scalable target, you can deregister it using DeregisterScalableTarget. +// If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1386,8 +1469,8 @@ type DeleteScalingPolicyInput struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -1421,7 +1504,7 @@ type DeleteScalingPolicyInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -1456,7 +1539,7 @@ type DeleteScalingPolicyInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -1548,8 +1631,8 @@ type DeleteScheduledActionInput struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -1583,7 +1666,7 @@ type DeleteScheduledActionInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -1623,7 +1706,7 @@ type DeleteScheduledActionInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -1715,8 +1798,8 @@ type DeregisterScalableTargetInput struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -1750,7 +1833,7 @@ type DeregisterScalableTargetInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -1785,7 +1868,7 @@ type DeregisterScalableTargetInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -1878,8 +1961,8 @@ type DescribeScalableTargetsInput struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -1912,7 +1995,7 @@ type DescribeScalableTargetsInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -1945,7 +2028,7 @@ type DescribeScalableTargetsInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -2060,8 +2143,8 @@ type DescribeScalingActivitiesInput struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -2094,7 +2177,7 @@ type DescribeScalingActivitiesInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -2127,7 +2210,7 @@ type DescribeScalingActivitiesInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -2248,8 +2331,8 @@ type DescribeScalingPoliciesInput struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -2282,7 +2365,7 @@ type DescribeScalingPoliciesInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -2315,7 +2398,7 @@ type DescribeScalingPoliciesInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -2439,8 +2522,8 @@ type DescribeScheduledActionsInput struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -2473,7 +2556,7 @@ type DescribeScheduledActionsInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -2509,7 +2592,7 @@ type DescribeScheduledActionsInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -2670,14 +2753,14 @@ type PredefinedMetricSpecification struct { _ struct{} `type:"structure"` // The metric type. The ALBRequestCountPerTarget metric type applies only to - // Spot fleet requests and ECS services. + // Spot Fleet requests and ECS services. // // PredefinedMetricType is a required field PredefinedMetricType *string `type:"string" required:"true" enum:"MetricType"` // Identifies the resource associated with the metric type. You can't specify // a resource label unless the metric type is ALBRequestCountPerTarget and there - // is a target group attached to the Spot fleet request or ECS service. + // is a target group attached to the Spot Fleet request or ECS service. // // The format is app///targetgroup//, // where: @@ -2739,9 +2822,14 @@ type PutScalingPolicyInput struct { // The policy type. This parameter is required if you are creating a scaling // policy. // - // For information on which services do not support StepScaling or TargetTrackingScaling, - // see the information about Limits in Step Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) - // and Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) + // The following policy types are supported: + // + // TargetTrackingScaling—Not supported for Amazon EMR or AppStream + // + // StepScaling—Not supported for Amazon DynamoDB + // + // For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) + // and Step Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) // in the Application Auto Scaling User Guide. PolicyType *string `type:"string" enum:"PolicyType"` @@ -2751,8 +2839,8 @@ type PutScalingPolicyInput struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -2786,7 +2874,7 @@ type PutScalingPolicyInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -2821,7 +2909,7 @@ type PutScalingPolicyInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -2977,8 +3065,8 @@ type PutScheduledActionInput struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -3012,7 +3100,7 @@ type PutScheduledActionInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -3056,7 +3144,7 @@ type PutScheduledActionInput struct { // // * At expressions - "at(yyyy-mm-ddThh:mm:ss)" // - // * Rate expressions - "rate(valueunit)" + // * Rate expressions - "rate(value unit)" // // * Cron expressions - "cron(fields)" // @@ -3076,7 +3164,7 @@ type PutScheduledActionInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -3192,22 +3280,22 @@ func (s PutScheduledActionOutput) GoString() string { type RegisterScalableTargetInput struct { _ struct{} `type:"structure"` - // The maximum value to scale to in response to a scale-out event. This parameter + // The maximum value to scale to in response to a scale-out event. MaxCapacity // is required to register a scalable target. MaxCapacity *int64 `type:"integer"` - // The minimum value to scale to in response to a scale-in event. This parameter + // The minimum value to scale to in response to a scale-in event. MinCapacity // is required to register a scalable target. MinCapacity *int64 `type:"integer"` - // The identifier of the resource associated with the scalable target. This - // string consists of the resource type and unique identifier. + // The identifier of the resource that is associated with the scalable target. + // This string consists of the resource type and unique identifier. // // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -3239,9 +3327,9 @@ type RegisterScalableTargetInput struct { // to modify the scalable target on your behalf. For more information, see Service-Linked // Roles for Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-service-linked-roles.html). // - // For resources that are not supported using a service-linked role, this parameter - // is required and must specify the ARN of an IAM role that allows Application - // Auto Scaling to modify the scalable target on your behalf. + // For Amazon EMR, this parameter is required, and it must specify the ARN of + // an IAM role that allows Application Auto Scaling to modify the scalable target + // on your behalf. RoleARN *string `min:"1" type:"string"` // The scalable dimension associated with the scalable target. This string consists @@ -3250,7 +3338,7 @@ type RegisterScalableTargetInput struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -3285,11 +3373,31 @@ type RegisterScalableTargetInput struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // An embedded object that contains attributes and attribute values that are + // used to suspend and resume automatic scaling. Setting the value of an attribute + // to true suspends the specified scaling activities. Setting it to false (default) + // resumes the specified scaling activities. + // + // Suspension Outcomes + // + // * For DynamicScalingInSuspended, while a suspension is in effect, all + // scale-in activities that are triggered by a scaling policy are suspended. + // + // * For DynamicScalingOutSuspended, while a suspension is in effect, all + // scale-out activities that are triggered by a scaling policy are suspended. + // + // * For ScheduledScalingSuspended, while a suspension is in effect, all + // scaling activities that involve scheduled actions are suspended. + // + // For more information, see Suspend and Resume Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html) + // in the Application Auto Scaling User Guide. + SuspendedState *SuspendedState `type:"structure"` } // String returns the string representation @@ -3363,6 +3471,12 @@ func (s *RegisterScalableTargetInput) SetServiceNamespace(v string) *RegisterSca return s } +// SetSuspendedState sets the SuspendedState field's value. +func (s *RegisterScalableTargetInput) SetSuspendedState(v *SuspendedState) *RegisterScalableTargetInput { + s.SuspendedState = v + return s +} + type RegisterScalableTargetOutput struct { _ struct{} `type:"structure"` } @@ -3402,8 +3516,8 @@ type ScalableTarget struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -3443,7 +3557,7 @@ type ScalableTarget struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -3478,11 +3592,15 @@ type ScalableTarget struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // Specifies whether the scaling activities for a scalable target are in a suspended + // state. + SuspendedState *SuspendedState `type:"structure"` } // String returns the string representation @@ -3537,6 +3655,12 @@ func (s *ScalableTarget) SetServiceNamespace(v string) *ScalableTarget { return s } +// SetSuspendedState sets the SuspendedState field's value. +func (s *ScalableTarget) SetSuspendedState(v *SuspendedState) *ScalableTarget { + s.SuspendedState = v + return s +} + // Represents the minimum and maximum capacity for a scheduled action. type ScalableTargetAction struct { _ struct{} `type:"structure"` @@ -3601,8 +3725,8 @@ type ScalingActivity struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -3636,7 +3760,7 @@ type ScalingActivity struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -3671,7 +3795,7 @@ type ScalingActivity struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -3800,8 +3924,8 @@ type ScalingPolicy struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -3835,7 +3959,7 @@ type ScalingPolicy struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -3870,7 +3994,7 @@ type ScalingPolicy struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -3971,8 +4095,8 @@ type ScheduledAction struct { // * ECS service - The resource type is service and the unique identifier // is the cluster name and service name. Example: service/default/sample-webapp. // - // * Spot fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // // * EMR cluster - The resource type is instancegroup and the unique identifier // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. @@ -4006,7 +4130,7 @@ type ScheduledAction struct { // * ecs:service:DesiredCount - The desired task count of an ECS service. // // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // fleet request. + // Fleet request. // // * elasticmapreduce:instancegroup:InstanceCount - The instance count of // an EMR Instance Group. @@ -4048,7 +4172,7 @@ type ScheduledAction struct { // // * At expressions - "at(yyyy-mm-ddThh:mm:ss)" // - // * Rate expressions - "rate(valueunit)" + // * Rate expressions - "rate(value unit)" // // * Cron expressions - "cron(fields)" // @@ -4075,7 +4199,7 @@ type ScheduledAction struct { // The namespace of the AWS service that provides the resource or custom-resource // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) // in the Amazon Web Services General Reference. // // ServiceNamespace is a required field @@ -4257,8 +4381,8 @@ func (s *StepAdjustment) SetScalingAdjustment(v int64) *StepAdjustment { type StepScalingPolicyConfiguration struct { _ struct{} `type:"structure"` - // The adjustment type, which specifies how the ScalingAdjustment parameter - // in a StepAdjustment is interpreted. + // Specifies whether the ScalingAdjustment value in a StepAdjustment is an absolute + // number or a percentage of the current capacity. AdjustmentType *string `type:"string" enum:"AdjustmentType"` // The amount of time, in seconds, after a scaling activity completes where @@ -4365,6 +4489,55 @@ func (s *StepScalingPolicyConfiguration) SetStepAdjustments(v []*StepAdjustment) return s } +// Specifies whether the scaling activities for a scalable target are in a suspended +// state. +type SuspendedState struct { + _ struct{} `type:"structure"` + + // Whether scale in by a target tracking scaling policy or a step scaling policy + // is suspended. Set the value to true if you don't want Application Auto Scaling + // to remove capacity when a scaling policy is triggered. The default is false. + DynamicScalingInSuspended *bool `type:"boolean"` + + // Whether scale out by a target tracking scaling policy or a step scaling policy + // is suspended. Set the value to true if you don't want Application Auto Scaling + // to add capacity when a scaling policy is triggered. The default is false. + DynamicScalingOutSuspended *bool `type:"boolean"` + + // Whether scheduled scaling is suspended. Set the value to true if you don't + // want Application Auto Scaling to add or remove capacity by initiating scheduled + // actions. The default is false. + ScheduledScalingSuspended *bool `type:"boolean"` +} + +// String returns the string representation +func (s SuspendedState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendedState) GoString() string { + return s.String() +} + +// SetDynamicScalingInSuspended sets the DynamicScalingInSuspended field's value. +func (s *SuspendedState) SetDynamicScalingInSuspended(v bool) *SuspendedState { + s.DynamicScalingInSuspended = &v + return s +} + +// SetDynamicScalingOutSuspended sets the DynamicScalingOutSuspended field's value. +func (s *SuspendedState) SetDynamicScalingOutSuspended(v bool) *SuspendedState { + s.DynamicScalingOutSuspended = &v + return s +} + +// SetScheduledScalingSuspended sets the ScheduledScalingSuspended field's value. +func (s *SuspendedState) SetScheduledScalingSuspended(v bool) *SuspendedState { + s.ScheduledScalingSuspended = &v + return s +} + // Represents a target tracking scaling policy configuration to use with Application // Auto Scaling. type TargetTrackingScalingPolicyConfiguration struct { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go index 6083bac0928..fbaa9db4693 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go @@ -3,50 +3,44 @@ // Package applicationautoscaling provides the client and types for making API // requests to Application Auto Scaling. // -// With Application Auto Scaling, you can configure automatic scaling for your -// scalable resources. You can use Application Auto Scaling to accomplish the -// following tasks: +// With Application Auto Scaling, you can configure automatic scaling for the +// following resources: // -// * Define scaling policies to automatically scale your AWS or custom resources +// * Amazon ECS services // -// * Scale your resources in response to CloudWatch alarms +// * Amazon EC2 Spot Fleet requests // -// * Schedule one-time or recurring scaling actions +// * Amazon EMR clusters // -// * View the history of your scaling events +// * Amazon AppStream 2.0 fleets // -// Application Auto Scaling can scale the following resources: +// * Amazon DynamoDB tables and global secondary indexes throughput capacity // -// * Amazon ECS services. For more information, see Service Auto Scaling -// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-auto-scaling.html) -// in the Amazon Elastic Container Service Developer Guide. +// * Amazon Aurora Replicas // -// * Amazon EC2 Spot fleets. For more information, see Automatic Scaling -// for Spot Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/fleet-auto-scaling.html) -// in the Amazon EC2 User Guide. +// * Amazon SageMaker endpoint variants // -// * Amazon EMR clusters. For more information, see Using Automatic Scaling -// in Amazon EMR (https://docs.aws.amazon.com/ElasticMapReduce/latest/ManagementGuide/emr-automatic-scaling.html) -// in the Amazon EMR Management Guide. +// * Custom resources provided by your own applications or services // -// * AppStream 2.0 fleets. For more information, see Fleet Auto Scaling for -// Amazon AppStream 2.0 (https://docs.aws.amazon.com/appstream2/latest/developerguide/autoscaling.html) -// in the Amazon AppStream 2.0 Developer Guide. +// API Summary // -// * Provisioned read and write capacity for Amazon DynamoDB tables and global -// secondary indexes. For more information, see Managing Throughput Capacity -// Automatically with DynamoDB Auto Scaling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/AutoScaling.html) -// in the Amazon DynamoDB Developer Guide. +// The Application Auto Scaling service API includes three key sets of actions: // -// * Amazon Aurora Replicas. For more information, see Using Amazon Aurora -// Auto Scaling with Aurora Replicas (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Integrating.AutoScaling.html). +// * Register and manage scalable targets - Register AWS or custom resources +// as scalable targets (a resource that Application Auto Scaling can scale), +// set minimum and maximum capacity limits, and retrieve information on existing +// scalable targets. // -// * Amazon SageMaker endpoint variants. For more information, see Automatically -// Scaling Amazon SageMaker Models (https://docs.aws.amazon.com/sagemaker/latest/dg/endpoint-auto-scaling.html). -// -// * Custom resources provided by your own applications or services. More -// information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). +// * Configure and manage automatic scaling - Define scaling policies to +// dynamically scale your resources in response to CloudWatch alarms, schedule +// one-time or recurring scaling actions, and retrieve your recent scaling +// activity history. // +// * Suspend and resume scaling - Temporarily suspend and later resume automatic +// scaling by calling the RegisterScalableTarget action for any Application +// Auto Scaling scalable target. You can suspend and resume, individually +// or in combination, scale-out activities triggered by a scaling policy, +// scale-in activities triggered by a scaling policy, and scheduled scaling. // // To learn more about Application Auto Scaling, including information about // granting IAM users required permissions for Application Auto Scaling actions, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go index 902d81d426e..0872f66dde3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationAutoScaling { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "application-autoscaling" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ApplicationAutoScaling { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApplicationAutoScaling { svc := &ApplicationAutoScaling{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-02-06", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/api.go new file mode 100644 index 00000000000..737a831f0e1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/api.go @@ -0,0 +1,3422 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package applicationinsights + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCreateApplication = "CreateApplication" + +// CreateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the CreateApplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateApplication for more information on using the CreateApplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateApplicationRequest method. +// req, resp := client.CreateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/CreateApplication +func (c *ApplicationInsights) CreateApplicationRequest(input *CreateApplicationInput) (req *request.Request, output *CreateApplicationOutput) { + op := &request.Operation{ + Name: opCreateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApplicationInput{} + } + + output = &CreateApplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateApplication API operation for Amazon CloudWatch Application Insights. +// +// Adds an application that is created from a resource group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation CreateApplication for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is already created or in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/CreateApplication +func (c *ApplicationInsights) CreateApplication(input *CreateApplicationInput) (*CreateApplicationOutput, error) { + req, out := c.CreateApplicationRequest(input) + return out, req.Send() +} + +// CreateApplicationWithContext is the same as CreateApplication with the addition of +// the ability to pass a context and additional request options. +// +// See CreateApplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) CreateApplicationWithContext(ctx aws.Context, input *CreateApplicationInput, opts ...request.Option) (*CreateApplicationOutput, error) { + req, out := c.CreateApplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateComponent = "CreateComponent" + +// CreateComponentRequest generates a "aws/request.Request" representing the +// client's request for the CreateComponent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateComponent for more information on using the CreateComponent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateComponentRequest method. +// req, resp := client.CreateComponentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/CreateComponent +func (c *ApplicationInsights) CreateComponentRequest(input *CreateComponentInput) (req *request.Request, output *CreateComponentOutput) { + op := &request.Operation{ + Name: opCreateComponent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateComponentInput{} + } + + output = &CreateComponentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateComponent API operation for Amazon CloudWatch Application Insights. +// +// Creates a custom component by grouping similar standalone instances to monitor. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation CreateComponent for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is already created or in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/CreateComponent +func (c *ApplicationInsights) CreateComponent(input *CreateComponentInput) (*CreateComponentOutput, error) { + req, out := c.CreateComponentRequest(input) + return out, req.Send() +} + +// CreateComponentWithContext is the same as CreateComponent with the addition of +// the ability to pass a context and additional request options. +// +// See CreateComponent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) CreateComponentWithContext(ctx aws.Context, input *CreateComponentInput, opts ...request.Option) (*CreateComponentOutput, error) { + req, out := c.CreateComponentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteApplication = "DeleteApplication" + +// DeleteApplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteApplication for more information on using the DeleteApplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteApplicationRequest method. +// req, resp := client.DeleteApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DeleteApplication +func (c *ApplicationInsights) DeleteApplicationRequest(input *DeleteApplicationInput) (req *request.Request, output *DeleteApplicationOutput) { + op := &request.Operation{ + Name: opDeleteApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApplicationInput{} + } + + output = &DeleteApplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteApplication API operation for Amazon CloudWatch Application Insights. +// +// Removes the specified application from monitoring. Does not delete the application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation DeleteApplication for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeBadRequestException "BadRequestException" +// The request is not understood by the server. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DeleteApplication +func (c *ApplicationInsights) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { + req, out := c.DeleteApplicationRequest(input) + return out, req.Send() +} + +// DeleteApplicationWithContext is the same as DeleteApplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteApplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) DeleteApplicationWithContext(ctx aws.Context, input *DeleteApplicationInput, opts ...request.Option) (*DeleteApplicationOutput, error) { + req, out := c.DeleteApplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteComponent = "DeleteComponent" + +// DeleteComponentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteComponent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteComponent for more information on using the DeleteComponent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteComponentRequest method. +// req, resp := client.DeleteComponentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DeleteComponent +func (c *ApplicationInsights) DeleteComponentRequest(input *DeleteComponentInput) (req *request.Request, output *DeleteComponentOutput) { + op := &request.Operation{ + Name: opDeleteComponent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteComponentInput{} + } + + output = &DeleteComponentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteComponent API operation for Amazon CloudWatch Application Insights. +// +// Ungroups a custom component. When you ungroup custom components, all applicable +// monitors that are set up for the component are removed and the instances +// revert to their standalone status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation DeleteComponent for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DeleteComponent +func (c *ApplicationInsights) DeleteComponent(input *DeleteComponentInput) (*DeleteComponentOutput, error) { + req, out := c.DeleteComponentRequest(input) + return out, req.Send() +} + +// DeleteComponentWithContext is the same as DeleteComponent with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteComponent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) DeleteComponentWithContext(ctx aws.Context, input *DeleteComponentInput, opts ...request.Option) (*DeleteComponentOutput, error) { + req, out := c.DeleteComponentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeApplication = "DescribeApplication" + +// DescribeApplicationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeApplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeApplication for more information on using the DescribeApplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeApplicationRequest method. +// req, resp := client.DescribeApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeApplication +func (c *ApplicationInsights) DescribeApplicationRequest(input *DescribeApplicationInput) (req *request.Request, output *DescribeApplicationOutput) { + op := &request.Operation{ + Name: opDescribeApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeApplicationInput{} + } + + output = &DescribeApplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeApplication API operation for Amazon CloudWatch Application Insights. +// +// Describes the application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation DescribeApplication for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeApplication +func (c *ApplicationInsights) DescribeApplication(input *DescribeApplicationInput) (*DescribeApplicationOutput, error) { + req, out := c.DescribeApplicationRequest(input) + return out, req.Send() +} + +// DescribeApplicationWithContext is the same as DescribeApplication with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeApplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) DescribeApplicationWithContext(ctx aws.Context, input *DescribeApplicationInput, opts ...request.Option) (*DescribeApplicationOutput, error) { + req, out := c.DescribeApplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeComponent = "DescribeComponent" + +// DescribeComponentRequest generates a "aws/request.Request" representing the +// client's request for the DescribeComponent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeComponent for more information on using the DescribeComponent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeComponentRequest method. +// req, resp := client.DescribeComponentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeComponent +func (c *ApplicationInsights) DescribeComponentRequest(input *DescribeComponentInput) (req *request.Request, output *DescribeComponentOutput) { + op := &request.Operation{ + Name: opDescribeComponent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeComponentInput{} + } + + output = &DescribeComponentOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeComponent API operation for Amazon CloudWatch Application Insights. +// +// Describes a component and lists the resources that are grouped together in +// a component. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation DescribeComponent for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeComponent +func (c *ApplicationInsights) DescribeComponent(input *DescribeComponentInput) (*DescribeComponentOutput, error) { + req, out := c.DescribeComponentRequest(input) + return out, req.Send() +} + +// DescribeComponentWithContext is the same as DescribeComponent with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeComponent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) DescribeComponentWithContext(ctx aws.Context, input *DescribeComponentInput, opts ...request.Option) (*DescribeComponentOutput, error) { + req, out := c.DescribeComponentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeComponentConfiguration = "DescribeComponentConfiguration" + +// DescribeComponentConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeComponentConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeComponentConfiguration for more information on using the DescribeComponentConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeComponentConfigurationRequest method. +// req, resp := client.DescribeComponentConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeComponentConfiguration +func (c *ApplicationInsights) DescribeComponentConfigurationRequest(input *DescribeComponentConfigurationInput) (req *request.Request, output *DescribeComponentConfigurationOutput) { + op := &request.Operation{ + Name: opDescribeComponentConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeComponentConfigurationInput{} + } + + output = &DescribeComponentConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeComponentConfiguration API operation for Amazon CloudWatch Application Insights. +// +// Describes the monitoring configuration of the component. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation DescribeComponentConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeComponentConfiguration +func (c *ApplicationInsights) DescribeComponentConfiguration(input *DescribeComponentConfigurationInput) (*DescribeComponentConfigurationOutput, error) { + req, out := c.DescribeComponentConfigurationRequest(input) + return out, req.Send() +} + +// DescribeComponentConfigurationWithContext is the same as DescribeComponentConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeComponentConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) DescribeComponentConfigurationWithContext(ctx aws.Context, input *DescribeComponentConfigurationInput, opts ...request.Option) (*DescribeComponentConfigurationOutput, error) { + req, out := c.DescribeComponentConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeComponentConfigurationRecommendation = "DescribeComponentConfigurationRecommendation" + +// DescribeComponentConfigurationRecommendationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeComponentConfigurationRecommendation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeComponentConfigurationRecommendation for more information on using the DescribeComponentConfigurationRecommendation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeComponentConfigurationRecommendationRequest method. +// req, resp := client.DescribeComponentConfigurationRecommendationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeComponentConfigurationRecommendation +func (c *ApplicationInsights) DescribeComponentConfigurationRecommendationRequest(input *DescribeComponentConfigurationRecommendationInput) (req *request.Request, output *DescribeComponentConfigurationRecommendationOutput) { + op := &request.Operation{ + Name: opDescribeComponentConfigurationRecommendation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeComponentConfigurationRecommendationInput{} + } + + output = &DescribeComponentConfigurationRecommendationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeComponentConfigurationRecommendation API operation for Amazon CloudWatch Application Insights. +// +// Describes the recommended monitoring configuration of the component. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation DescribeComponentConfigurationRecommendation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeComponentConfigurationRecommendation +func (c *ApplicationInsights) DescribeComponentConfigurationRecommendation(input *DescribeComponentConfigurationRecommendationInput) (*DescribeComponentConfigurationRecommendationOutput, error) { + req, out := c.DescribeComponentConfigurationRecommendationRequest(input) + return out, req.Send() +} + +// DescribeComponentConfigurationRecommendationWithContext is the same as DescribeComponentConfigurationRecommendation with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeComponentConfigurationRecommendation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) DescribeComponentConfigurationRecommendationWithContext(ctx aws.Context, input *DescribeComponentConfigurationRecommendationInput, opts ...request.Option) (*DescribeComponentConfigurationRecommendationOutput, error) { + req, out := c.DescribeComponentConfigurationRecommendationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeObservation = "DescribeObservation" + +// DescribeObservationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeObservation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeObservation for more information on using the DescribeObservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeObservationRequest method. +// req, resp := client.DescribeObservationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeObservation +func (c *ApplicationInsights) DescribeObservationRequest(input *DescribeObservationInput) (req *request.Request, output *DescribeObservationOutput) { + op := &request.Operation{ + Name: opDescribeObservation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeObservationInput{} + } + + output = &DescribeObservationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeObservation API operation for Amazon CloudWatch Application Insights. +// +// Describes an anomaly or error with the application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation DescribeObservation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeObservation +func (c *ApplicationInsights) DescribeObservation(input *DescribeObservationInput) (*DescribeObservationOutput, error) { + req, out := c.DescribeObservationRequest(input) + return out, req.Send() +} + +// DescribeObservationWithContext is the same as DescribeObservation with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeObservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) DescribeObservationWithContext(ctx aws.Context, input *DescribeObservationInput, opts ...request.Option) (*DescribeObservationOutput, error) { + req, out := c.DescribeObservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeProblem = "DescribeProblem" + +// DescribeProblemRequest generates a "aws/request.Request" representing the +// client's request for the DescribeProblem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeProblem for more information on using the DescribeProblem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeProblemRequest method. +// req, resp := client.DescribeProblemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeProblem +func (c *ApplicationInsights) DescribeProblemRequest(input *DescribeProblemInput) (req *request.Request, output *DescribeProblemOutput) { + op := &request.Operation{ + Name: opDescribeProblem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeProblemInput{} + } + + output = &DescribeProblemOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeProblem API operation for Amazon CloudWatch Application Insights. +// +// Describes an application problem. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation DescribeProblem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeProblem +func (c *ApplicationInsights) DescribeProblem(input *DescribeProblemInput) (*DescribeProblemOutput, error) { + req, out := c.DescribeProblemRequest(input) + return out, req.Send() +} + +// DescribeProblemWithContext is the same as DescribeProblem with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeProblem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) DescribeProblemWithContext(ctx aws.Context, input *DescribeProblemInput, opts ...request.Option) (*DescribeProblemOutput, error) { + req, out := c.DescribeProblemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeProblemObservations = "DescribeProblemObservations" + +// DescribeProblemObservationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeProblemObservations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeProblemObservations for more information on using the DescribeProblemObservations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeProblemObservationsRequest method. +// req, resp := client.DescribeProblemObservationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeProblemObservations +func (c *ApplicationInsights) DescribeProblemObservationsRequest(input *DescribeProblemObservationsInput) (req *request.Request, output *DescribeProblemObservationsOutput) { + op := &request.Operation{ + Name: opDescribeProblemObservations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeProblemObservationsInput{} + } + + output = &DescribeProblemObservationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeProblemObservations API operation for Amazon CloudWatch Application Insights. +// +// Describes the anomalies or errors associated with the problem. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation DescribeProblemObservations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeProblemObservations +func (c *ApplicationInsights) DescribeProblemObservations(input *DescribeProblemObservationsInput) (*DescribeProblemObservationsOutput, error) { + req, out := c.DescribeProblemObservationsRequest(input) + return out, req.Send() +} + +// DescribeProblemObservationsWithContext is the same as DescribeProblemObservations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeProblemObservations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) DescribeProblemObservationsWithContext(ctx aws.Context, input *DescribeProblemObservationsInput, opts ...request.Option) (*DescribeProblemObservationsOutput, error) { + req, out := c.DescribeProblemObservationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListApplications = "ListApplications" + +// ListApplicationsRequest generates a "aws/request.Request" representing the +// client's request for the ListApplications operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListApplications for more information on using the ListApplications +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListApplicationsRequest method. +// req, resp := client.ListApplicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListApplications +func (c *ApplicationInsights) ListApplicationsRequest(input *ListApplicationsInput) (req *request.Request, output *ListApplicationsOutput) { + op := &request.Operation{ + Name: opListApplications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListApplicationsInput{} + } + + output = &ListApplicationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListApplications API operation for Amazon CloudWatch Application Insights. +// +// Lists the IDs of the applications that you are monitoring. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation ListApplications for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListApplications +func (c *ApplicationInsights) ListApplications(input *ListApplicationsInput) (*ListApplicationsOutput, error) { + req, out := c.ListApplicationsRequest(input) + return out, req.Send() +} + +// ListApplicationsWithContext is the same as ListApplications with the addition of +// the ability to pass a context and additional request options. +// +// See ListApplications for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) ListApplicationsWithContext(ctx aws.Context, input *ListApplicationsInput, opts ...request.Option) (*ListApplicationsOutput, error) { + req, out := c.ListApplicationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListApplicationsPages iterates over the pages of a ListApplications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListApplications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListApplications operation. +// pageNum := 0 +// err := client.ListApplicationsPages(params, +// func(page *applicationinsights.ListApplicationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ApplicationInsights) ListApplicationsPages(input *ListApplicationsInput, fn func(*ListApplicationsOutput, bool) bool) error { + return c.ListApplicationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListApplicationsPagesWithContext same as ListApplicationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) ListApplicationsPagesWithContext(ctx aws.Context, input *ListApplicationsInput, fn func(*ListApplicationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListApplicationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListApplicationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListApplicationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListComponents = "ListComponents" + +// ListComponentsRequest generates a "aws/request.Request" representing the +// client's request for the ListComponents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListComponents for more information on using the ListComponents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListComponentsRequest method. +// req, resp := client.ListComponentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListComponents +func (c *ApplicationInsights) ListComponentsRequest(input *ListComponentsInput) (req *request.Request, output *ListComponentsOutput) { + op := &request.Operation{ + Name: opListComponents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListComponentsInput{} + } + + output = &ListComponentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListComponents API operation for Amazon CloudWatch Application Insights. +// +// Lists the auto-grouped, standalone, and custom components of the application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation ListComponents for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListComponents +func (c *ApplicationInsights) ListComponents(input *ListComponentsInput) (*ListComponentsOutput, error) { + req, out := c.ListComponentsRequest(input) + return out, req.Send() +} + +// ListComponentsWithContext is the same as ListComponents with the addition of +// the ability to pass a context and additional request options. +// +// See ListComponents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) ListComponentsWithContext(ctx aws.Context, input *ListComponentsInput, opts ...request.Option) (*ListComponentsOutput, error) { + req, out := c.ListComponentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListComponentsPages iterates over the pages of a ListComponents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListComponents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListComponents operation. +// pageNum := 0 +// err := client.ListComponentsPages(params, +// func(page *applicationinsights.ListComponentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ApplicationInsights) ListComponentsPages(input *ListComponentsInput, fn func(*ListComponentsOutput, bool) bool) error { + return c.ListComponentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListComponentsPagesWithContext same as ListComponentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) ListComponentsPagesWithContext(ctx aws.Context, input *ListComponentsInput, fn func(*ListComponentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListComponentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListComponentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListComponentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListProblems = "ListProblems" + +// ListProblemsRequest generates a "aws/request.Request" representing the +// client's request for the ListProblems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListProblems for more information on using the ListProblems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListProblemsRequest method. +// req, resp := client.ListProblemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListProblems +func (c *ApplicationInsights) ListProblemsRequest(input *ListProblemsInput) (req *request.Request, output *ListProblemsOutput) { + op := &request.Operation{ + Name: opListProblems, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListProblemsInput{} + } + + output = &ListProblemsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListProblems API operation for Amazon CloudWatch Application Insights. +// +// Lists the problems with your application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation ListProblems for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListProblems +func (c *ApplicationInsights) ListProblems(input *ListProblemsInput) (*ListProblemsOutput, error) { + req, out := c.ListProblemsRequest(input) + return out, req.Send() +} + +// ListProblemsWithContext is the same as ListProblems with the addition of +// the ability to pass a context and additional request options. +// +// See ListProblems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) ListProblemsWithContext(ctx aws.Context, input *ListProblemsInput, opts ...request.Option) (*ListProblemsOutput, error) { + req, out := c.ListProblemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListProblemsPages iterates over the pages of a ListProblems operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListProblems method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListProblems operation. +// pageNum := 0 +// err := client.ListProblemsPages(params, +// func(page *applicationinsights.ListProblemsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ApplicationInsights) ListProblemsPages(input *ListProblemsInput, fn func(*ListProblemsOutput, bool) bool) error { + return c.ListProblemsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListProblemsPagesWithContext same as ListProblemsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) ListProblemsPagesWithContext(ctx aws.Context, input *ListProblemsInput, fn func(*ListProblemsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListProblemsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListProblemsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListProblemsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opUpdateApplication = "UpdateApplication" + +// UpdateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateApplication for more information on using the UpdateApplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateApplicationRequest method. +// req, resp := client.UpdateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateApplication +func (c *ApplicationInsights) UpdateApplicationRequest(input *UpdateApplicationInput) (req *request.Request, output *UpdateApplicationOutput) { + op := &request.Operation{ + Name: opUpdateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApplicationInput{} + } + + output = &UpdateApplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateApplication API operation for Amazon CloudWatch Application Insights. +// +// Updates the application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation UpdateApplication for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateApplication +func (c *ApplicationInsights) UpdateApplication(input *UpdateApplicationInput) (*UpdateApplicationOutput, error) { + req, out := c.UpdateApplicationRequest(input) + return out, req.Send() +} + +// UpdateApplicationWithContext is the same as UpdateApplication with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateApplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) UpdateApplicationWithContext(ctx aws.Context, input *UpdateApplicationInput, opts ...request.Option) (*UpdateApplicationOutput, error) { + req, out := c.UpdateApplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateComponent = "UpdateComponent" + +// UpdateComponentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateComponent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateComponent for more information on using the UpdateComponent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateComponentRequest method. +// req, resp := client.UpdateComponentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateComponent +func (c *ApplicationInsights) UpdateComponentRequest(input *UpdateComponentInput) (req *request.Request, output *UpdateComponentOutput) { + op := &request.Operation{ + Name: opUpdateComponent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateComponentInput{} + } + + output = &UpdateComponentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateComponent API operation for Amazon CloudWatch Application Insights. +// +// Updates the custom component name and/or the list of resources that make +// up the component. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation UpdateComponent for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is already created or in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateComponent +func (c *ApplicationInsights) UpdateComponent(input *UpdateComponentInput) (*UpdateComponentOutput, error) { + req, out := c.UpdateComponentRequest(input) + return out, req.Send() +} + +// UpdateComponentWithContext is the same as UpdateComponent with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateComponent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) UpdateComponentWithContext(ctx aws.Context, input *UpdateComponentInput, opts ...request.Option) (*UpdateComponentOutput, error) { + req, out := c.UpdateComponentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateComponentConfiguration = "UpdateComponentConfiguration" + +// UpdateComponentConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateComponentConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateComponentConfiguration for more information on using the UpdateComponentConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateComponentConfigurationRequest method. +// req, resp := client.UpdateComponentConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateComponentConfiguration +func (c *ApplicationInsights) UpdateComponentConfigurationRequest(input *UpdateComponentConfigurationInput) (req *request.Request, output *UpdateComponentConfigurationOutput) { + op := &request.Operation{ + Name: opUpdateComponentConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateComponentConfigurationInput{} + } + + output = &UpdateComponentConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateComponentConfiguration API operation for Amazon CloudWatch Application Insights. +// +// Updates the monitoring configurations for the component. The configuration +// input parameter is an escaped JSON of the configuration and should match +// the schema of what is returned by DescribeComponentConfigurationRecommendation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Application Insights's +// API operation UpdateComponentConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource does not exist in the customer account. +// +// * ErrCodeValidationException "ValidationException" +// The parameter is not valid. +// +// * ErrCodeInternalServerException "InternalServerException" +// The server encountered an internal error and is unable to complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateComponentConfiguration +func (c *ApplicationInsights) UpdateComponentConfiguration(input *UpdateComponentConfigurationInput) (*UpdateComponentConfigurationOutput, error) { + req, out := c.UpdateComponentConfigurationRequest(input) + return out, req.Send() +} + +// UpdateComponentConfigurationWithContext is the same as UpdateComponentConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateComponentConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationInsights) UpdateComponentConfigurationWithContext(ctx aws.Context, input *UpdateComponentConfigurationInput, opts ...request.Option) (*UpdateComponentConfigurationOutput, error) { + req, out := c.UpdateComponentConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Describes a standalone resource or similarly grouped resources that the application +// is made up of. +type ApplicationComponent struct { + _ struct{} `type:"structure"` + + // The name of the component. + ComponentName *string `type:"string"` + + // Indicates whether the application component is monitored. + Monitor *bool `type:"boolean"` + + // The resource type. Supported resource types include EC2 instances, Auto Scaling + // group, Classic ELB, Application ELB, and SQS Queue. + ResourceType *string `type:"string"` + + // The stack tier of the application component. + Tier *string `type:"string"` +} + +// String returns the string representation +func (s ApplicationComponent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationComponent) GoString() string { + return s.String() +} + +// SetComponentName sets the ComponentName field's value. +func (s *ApplicationComponent) SetComponentName(v string) *ApplicationComponent { + s.ComponentName = &v + return s +} + +// SetMonitor sets the Monitor field's value. +func (s *ApplicationComponent) SetMonitor(v bool) *ApplicationComponent { + s.Monitor = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ApplicationComponent) SetResourceType(v string) *ApplicationComponent { + s.ResourceType = &v + return s +} + +// SetTier sets the Tier field's value. +func (s *ApplicationComponent) SetTier(v string) *ApplicationComponent { + s.Tier = &v + return s +} + +// Describes the status of the application. +type ApplicationInfo struct { + _ struct{} `type:"structure"` + + // The lifecycle of the application. + LifeCycle *string `type:"string"` + + // Indicates whether Application Insights will create opsItems for any problem + // detected by Application Insights for an application. + OpsCenterEnabled *bool `type:"boolean"` + + // The SNS topic provided to Application Insights that is associated to the + // created opsItems to receive SNS notifications for opsItem updates. + OpsItemSNSTopicArn *string `type:"string"` + + // The issues on the user side that block Application Insights from successfully + // monitoring an application. + Remarks *string `type:"string"` + + // The name of the resource group used for the application. + ResourceGroupName *string `type:"string"` +} + +// String returns the string representation +func (s ApplicationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationInfo) GoString() string { + return s.String() +} + +// SetLifeCycle sets the LifeCycle field's value. +func (s *ApplicationInfo) SetLifeCycle(v string) *ApplicationInfo { + s.LifeCycle = &v + return s +} + +// SetOpsCenterEnabled sets the OpsCenterEnabled field's value. +func (s *ApplicationInfo) SetOpsCenterEnabled(v bool) *ApplicationInfo { + s.OpsCenterEnabled = &v + return s +} + +// SetOpsItemSNSTopicArn sets the OpsItemSNSTopicArn field's value. +func (s *ApplicationInfo) SetOpsItemSNSTopicArn(v string) *ApplicationInfo { + s.OpsItemSNSTopicArn = &v + return s +} + +// SetRemarks sets the Remarks field's value. +func (s *ApplicationInfo) SetRemarks(v string) *ApplicationInfo { + s.Remarks = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *ApplicationInfo) SetResourceGroupName(v string) *ApplicationInfo { + s.ResourceGroupName = &v + return s +} + +type CreateApplicationInput struct { + _ struct{} `type:"structure"` + + // When set to true, creates opsItems for any problems detected on an application. + OpsCenterEnabled *bool `type:"boolean"` + + // The SNS topic provided to Application Insights that is associated to the + // created opsItem. Allows you to receive notifications for updates to the opsItem. + OpsItemSNSTopicArn *string `type:"string"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateApplicationInput"} + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOpsCenterEnabled sets the OpsCenterEnabled field's value. +func (s *CreateApplicationInput) SetOpsCenterEnabled(v bool) *CreateApplicationInput { + s.OpsCenterEnabled = &v + return s +} + +// SetOpsItemSNSTopicArn sets the OpsItemSNSTopicArn field's value. +func (s *CreateApplicationInput) SetOpsItemSNSTopicArn(v string) *CreateApplicationInput { + s.OpsItemSNSTopicArn = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *CreateApplicationInput) SetResourceGroupName(v string) *CreateApplicationInput { + s.ResourceGroupName = &v + return s +} + +type CreateApplicationOutput struct { + _ struct{} `type:"structure"` + + // Information about the application. + ApplicationInfo *ApplicationInfo `type:"structure"` +} + +// String returns the string representation +func (s CreateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationOutput) GoString() string { + return s.String() +} + +// SetApplicationInfo sets the ApplicationInfo field's value. +func (s *CreateApplicationOutput) SetApplicationInfo(v *ApplicationInfo) *CreateApplicationOutput { + s.ApplicationInfo = v + return s +} + +type CreateComponentInput struct { + _ struct{} `type:"structure"` + + // The name of the component. + // + // ComponentName is a required field + ComponentName *string `type:"string" required:"true"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` + + // The list of resource ARNs that belong to the component. + // + // ResourceList is a required field + ResourceList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateComponentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateComponentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateComponentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateComponentInput"} + if s.ComponentName == nil { + invalidParams.Add(request.NewErrParamRequired("ComponentName")) + } + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + if s.ResourceList == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComponentName sets the ComponentName field's value. +func (s *CreateComponentInput) SetComponentName(v string) *CreateComponentInput { + s.ComponentName = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *CreateComponentInput) SetResourceGroupName(v string) *CreateComponentInput { + s.ResourceGroupName = &v + return s +} + +// SetResourceList sets the ResourceList field's value. +func (s *CreateComponentInput) SetResourceList(v []*string) *CreateComponentInput { + s.ResourceList = v + return s +} + +type CreateComponentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateComponentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateComponentOutput) GoString() string { + return s.String() +} + +type DeleteApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteApplicationInput"} + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *DeleteApplicationInput) SetResourceGroupName(v string) *DeleteApplicationInput { + s.ResourceGroupName = &v + return s +} + +type DeleteApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationOutput) GoString() string { + return s.String() +} + +type DeleteComponentInput struct { + _ struct{} `type:"structure"` + + // The name of the component. + // + // ComponentName is a required field + ComponentName *string `type:"string" required:"true"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteComponentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteComponentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteComponentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteComponentInput"} + if s.ComponentName == nil { + invalidParams.Add(request.NewErrParamRequired("ComponentName")) + } + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComponentName sets the ComponentName field's value. +func (s *DeleteComponentInput) SetComponentName(v string) *DeleteComponentInput { + s.ComponentName = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *DeleteComponentInput) SetResourceGroupName(v string) *DeleteComponentInput { + s.ResourceGroupName = &v + return s +} + +type DeleteComponentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteComponentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteComponentOutput) GoString() string { + return s.String() +} + +type DescribeApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeApplicationInput"} + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *DescribeApplicationInput) SetResourceGroupName(v string) *DescribeApplicationInput { + s.ResourceGroupName = &v + return s +} + +type DescribeApplicationOutput struct { + _ struct{} `type:"structure"` + + // Information about the application. + ApplicationInfo *ApplicationInfo `type:"structure"` +} + +// String returns the string representation +func (s DescribeApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationOutput) GoString() string { + return s.String() +} + +// SetApplicationInfo sets the ApplicationInfo field's value. +func (s *DescribeApplicationOutput) SetApplicationInfo(v *ApplicationInfo) *DescribeApplicationOutput { + s.ApplicationInfo = v + return s +} + +type DescribeComponentConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the component. + // + // ComponentName is a required field + ComponentName *string `type:"string" required:"true"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeComponentConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComponentConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeComponentConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeComponentConfigurationInput"} + if s.ComponentName == nil { + invalidParams.Add(request.NewErrParamRequired("ComponentName")) + } + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComponentName sets the ComponentName field's value. +func (s *DescribeComponentConfigurationInput) SetComponentName(v string) *DescribeComponentConfigurationInput { + s.ComponentName = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *DescribeComponentConfigurationInput) SetResourceGroupName(v string) *DescribeComponentConfigurationInput { + s.ResourceGroupName = &v + return s +} + +type DescribeComponentConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The configuration settings of the component. The value is the escaped JSON + // of the configuration. + ComponentConfiguration *string `type:"string"` + + // Indicates whether the application component is monitored. + Monitor *bool `type:"boolean"` + + // The tier of the application component. Supported tiers include DOT_NET_WORKER, + // DOT_NET_WEB, SQL_SERVER, and DEFAULT + Tier *string `type:"string"` +} + +// String returns the string representation +func (s DescribeComponentConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComponentConfigurationOutput) GoString() string { + return s.String() +} + +// SetComponentConfiguration sets the ComponentConfiguration field's value. +func (s *DescribeComponentConfigurationOutput) SetComponentConfiguration(v string) *DescribeComponentConfigurationOutput { + s.ComponentConfiguration = &v + return s +} + +// SetMonitor sets the Monitor field's value. +func (s *DescribeComponentConfigurationOutput) SetMonitor(v bool) *DescribeComponentConfigurationOutput { + s.Monitor = &v + return s +} + +// SetTier sets the Tier field's value. +func (s *DescribeComponentConfigurationOutput) SetTier(v string) *DescribeComponentConfigurationOutput { + s.Tier = &v + return s +} + +type DescribeComponentConfigurationRecommendationInput struct { + _ struct{} `type:"structure"` + + // The name of the component. + // + // ComponentName is a required field + ComponentName *string `type:"string" required:"true"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` + + // The tier of the application component. Supported tiers include DOT_NET_WORKER, + // DOT_NET_WEB, SQL_SERVER, and DEFAULT. + // + // Tier is a required field + Tier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeComponentConfigurationRecommendationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComponentConfigurationRecommendationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeComponentConfigurationRecommendationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeComponentConfigurationRecommendationInput"} + if s.ComponentName == nil { + invalidParams.Add(request.NewErrParamRequired("ComponentName")) + } + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComponentName sets the ComponentName field's value. +func (s *DescribeComponentConfigurationRecommendationInput) SetComponentName(v string) *DescribeComponentConfigurationRecommendationInput { + s.ComponentName = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *DescribeComponentConfigurationRecommendationInput) SetResourceGroupName(v string) *DescribeComponentConfigurationRecommendationInput { + s.ResourceGroupName = &v + return s +} + +// SetTier sets the Tier field's value. +func (s *DescribeComponentConfigurationRecommendationInput) SetTier(v string) *DescribeComponentConfigurationRecommendationInput { + s.Tier = &v + return s +} + +type DescribeComponentConfigurationRecommendationOutput struct { + _ struct{} `type:"structure"` + + // The recommended configuration settings of the component. The value is the + // escaped JSON of the configuration. + ComponentConfiguration *string `type:"string"` +} + +// String returns the string representation +func (s DescribeComponentConfigurationRecommendationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComponentConfigurationRecommendationOutput) GoString() string { + return s.String() +} + +// SetComponentConfiguration sets the ComponentConfiguration field's value. +func (s *DescribeComponentConfigurationRecommendationOutput) SetComponentConfiguration(v string) *DescribeComponentConfigurationRecommendationOutput { + s.ComponentConfiguration = &v + return s +} + +type DescribeComponentInput struct { + _ struct{} `type:"structure"` + + // The name of the component. + // + // ComponentName is a required field + ComponentName *string `type:"string" required:"true"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeComponentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComponentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeComponentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeComponentInput"} + if s.ComponentName == nil { + invalidParams.Add(request.NewErrParamRequired("ComponentName")) + } + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComponentName sets the ComponentName field's value. +func (s *DescribeComponentInput) SetComponentName(v string) *DescribeComponentInput { + s.ComponentName = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *DescribeComponentInput) SetResourceGroupName(v string) *DescribeComponentInput { + s.ResourceGroupName = &v + return s +} + +type DescribeComponentOutput struct { + _ struct{} `type:"structure"` + + // Describes a standalone resource or similarly grouped resources that the application + // is made up of. + ApplicationComponent *ApplicationComponent `type:"structure"` + + // The list of resource ARNs that belong to the component. + ResourceList []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeComponentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComponentOutput) GoString() string { + return s.String() +} + +// SetApplicationComponent sets the ApplicationComponent field's value. +func (s *DescribeComponentOutput) SetApplicationComponent(v *ApplicationComponent) *DescribeComponentOutput { + s.ApplicationComponent = v + return s +} + +// SetResourceList sets the ResourceList field's value. +func (s *DescribeComponentOutput) SetResourceList(v []*string) *DescribeComponentOutput { + s.ResourceList = v + return s +} + +type DescribeObservationInput struct { + _ struct{} `type:"structure"` + + // The ID of the observation. + // + // ObservationId is a required field + ObservationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeObservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeObservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeObservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeObservationInput"} + if s.ObservationId == nil { + invalidParams.Add(request.NewErrParamRequired("ObservationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObservationId sets the ObservationId field's value. +func (s *DescribeObservationInput) SetObservationId(v string) *DescribeObservationInput { + s.ObservationId = &v + return s +} + +type DescribeObservationOutput struct { + _ struct{} `type:"structure"` + + // Information about the observation. + Observation *Observation `type:"structure"` +} + +// String returns the string representation +func (s DescribeObservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeObservationOutput) GoString() string { + return s.String() +} + +// SetObservation sets the Observation field's value. +func (s *DescribeObservationOutput) SetObservation(v *Observation) *DescribeObservationOutput { + s.Observation = v + return s +} + +type DescribeProblemInput struct { + _ struct{} `type:"structure"` + + // The ID of the problem. + // + // ProblemId is a required field + ProblemId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeProblemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProblemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeProblemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeProblemInput"} + if s.ProblemId == nil { + invalidParams.Add(request.NewErrParamRequired("ProblemId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetProblemId sets the ProblemId field's value. +func (s *DescribeProblemInput) SetProblemId(v string) *DescribeProblemInput { + s.ProblemId = &v + return s +} + +type DescribeProblemObservationsInput struct { + _ struct{} `type:"structure"` + + // The ID of the problem. + // + // ProblemId is a required field + ProblemId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeProblemObservationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProblemObservationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeProblemObservationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeProblemObservationsInput"} + if s.ProblemId == nil { + invalidParams.Add(request.NewErrParamRequired("ProblemId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetProblemId sets the ProblemId field's value. +func (s *DescribeProblemObservationsInput) SetProblemId(v string) *DescribeProblemObservationsInput { + s.ProblemId = &v + return s +} + +type DescribeProblemObservationsOutput struct { + _ struct{} `type:"structure"` + + // Observations related to the problem. + RelatedObservations *RelatedObservations `type:"structure"` +} + +// String returns the string representation +func (s DescribeProblemObservationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProblemObservationsOutput) GoString() string { + return s.String() +} + +// SetRelatedObservations sets the RelatedObservations field's value. +func (s *DescribeProblemObservationsOutput) SetRelatedObservations(v *RelatedObservations) *DescribeProblemObservationsOutput { + s.RelatedObservations = v + return s +} + +type DescribeProblemOutput struct { + _ struct{} `type:"structure"` + + // Information about the problem. + Problem *Problem `type:"structure"` +} + +// String returns the string representation +func (s DescribeProblemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProblemOutput) GoString() string { + return s.String() +} + +// SetProblem sets the Problem field's value. +func (s *DescribeProblemOutput) SetProblem(v *Problem) *DescribeProblemOutput { + s.Problem = v + return s +} + +type ListApplicationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListApplicationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListApplicationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListApplicationsInput) SetMaxResults(v int64) *ListApplicationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListApplicationsInput) SetNextToken(v string) *ListApplicationsInput { + s.NextToken = &v + return s +} + +type ListApplicationsOutput struct { + _ struct{} `type:"structure"` + + // The list of applications. + ApplicationInfoList []*ApplicationInfo `type:"list"` + + // The token used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsOutput) GoString() string { + return s.String() +} + +// SetApplicationInfoList sets the ApplicationInfoList field's value. +func (s *ListApplicationsOutput) SetApplicationInfoList(v []*ApplicationInfo) *ListApplicationsOutput { + s.ApplicationInfoList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListApplicationsOutput) SetNextToken(v string) *ListApplicationsOutput { + s.NextToken = &v + return s +} + +type ListComponentsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListComponentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListComponentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListComponentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListComponentsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListComponentsInput) SetMaxResults(v int64) *ListComponentsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListComponentsInput) SetNextToken(v string) *ListComponentsInput { + s.NextToken = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *ListComponentsInput) SetResourceGroupName(v string) *ListComponentsInput { + s.ResourceGroupName = &v + return s +} + +type ListComponentsOutput struct { + _ struct{} `type:"structure"` + + // The list of application components. + ApplicationComponentList []*ApplicationComponent `type:"list"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListComponentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListComponentsOutput) GoString() string { + return s.String() +} + +// SetApplicationComponentList sets the ApplicationComponentList field's value. +func (s *ListComponentsOutput) SetApplicationComponentList(v []*ApplicationComponent) *ListComponentsOutput { + s.ApplicationComponentList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListComponentsOutput) SetNextToken(v string) *ListComponentsOutput { + s.NextToken = &v + return s +} + +type ListProblemsInput struct { + _ struct{} `type:"structure"` + + // The time when the problem ended, in epoch seconds. If not specified, problems + // within the past seven days are returned. + EndTime *time.Time `type:"timestamp"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` + + // The name of the resource group. + ResourceGroupName *string `type:"string"` + + // The time when the problem was detected, in epoch seconds. If you don't specify + // a time frame for the request, problems within the past seven days are returned. + StartTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ListProblemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProblemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListProblemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListProblemsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *ListProblemsInput) SetEndTime(v time.Time) *ListProblemsInput { + s.EndTime = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListProblemsInput) SetMaxResults(v int64) *ListProblemsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListProblemsInput) SetNextToken(v string) *ListProblemsInput { + s.NextToken = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *ListProblemsInput) SetResourceGroupName(v string) *ListProblemsInput { + s.ResourceGroupName = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ListProblemsInput) SetStartTime(v time.Time) *ListProblemsInput { + s.StartTime = &v + return s +} + +type ListProblemsOutput struct { + _ struct{} `type:"structure"` + + // The token used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `type:"string"` + + // The list of problems. + ProblemList []*Problem `type:"list"` +} + +// String returns the string representation +func (s ListProblemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProblemsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListProblemsOutput) SetNextToken(v string) *ListProblemsOutput { + s.NextToken = &v + return s +} + +// SetProblemList sets the ProblemList field's value. +func (s *ListProblemsOutput) SetProblemList(v []*Problem) *ListProblemsOutput { + s.ProblemList = v + return s +} + +// Describes an anomaly or error with the application. +type Observation struct { + _ struct{} `type:"structure"` + + // The time when the observation ended, in epoch seconds. + EndTime *time.Time `type:"timestamp"` + + // The ID of the observation type. + Id *string `type:"string"` + + // The timestamp in the CloudWatch Logs that specifies when the matched line + // occurred. + LineTime *time.Time `type:"timestamp"` + + // The log filter of the observation. + LogFilter *string `type:"string" enum:"LogFilter"` + + // The log group name. + LogGroup *string `type:"string"` + + // The log text of the observation. + LogText *string `type:"string"` + + // The name of the observation metric. + MetricName *string `type:"string"` + + // The namespace of the observation metric. + MetricNamespace *string `type:"string"` + + // The source resource ARN of the observation. + SourceARN *string `type:"string"` + + // The source type of the observation. + SourceType *string `type:"string"` + + // The time when the observation was first detected, in epoch seconds. + StartTime *time.Time `type:"timestamp"` + + // The unit of the source observation metric. + Unit *string `type:"string"` + + // The value of the source observation metric. + Value *float64 `type:"double"` +} + +// String returns the string representation +func (s Observation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Observation) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *Observation) SetEndTime(v time.Time) *Observation { + s.EndTime = &v + return s +} + +// SetId sets the Id field's value. +func (s *Observation) SetId(v string) *Observation { + s.Id = &v + return s +} + +// SetLineTime sets the LineTime field's value. +func (s *Observation) SetLineTime(v time.Time) *Observation { + s.LineTime = &v + return s +} + +// SetLogFilter sets the LogFilter field's value. +func (s *Observation) SetLogFilter(v string) *Observation { + s.LogFilter = &v + return s +} + +// SetLogGroup sets the LogGroup field's value. +func (s *Observation) SetLogGroup(v string) *Observation { + s.LogGroup = &v + return s +} + +// SetLogText sets the LogText field's value. +func (s *Observation) SetLogText(v string) *Observation { + s.LogText = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *Observation) SetMetricName(v string) *Observation { + s.MetricName = &v + return s +} + +// SetMetricNamespace sets the MetricNamespace field's value. +func (s *Observation) SetMetricNamespace(v string) *Observation { + s.MetricNamespace = &v + return s +} + +// SetSourceARN sets the SourceARN field's value. +func (s *Observation) SetSourceARN(v string) *Observation { + s.SourceARN = &v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *Observation) SetSourceType(v string) *Observation { + s.SourceType = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *Observation) SetStartTime(v time.Time) *Observation { + s.StartTime = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *Observation) SetUnit(v string) *Observation { + s.Unit = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Observation) SetValue(v float64) *Observation { + s.Value = &v + return s +} + +// Describes a problem that is detected by correlating observations. +type Problem struct { + _ struct{} `type:"structure"` + + // The resource affected by the problem. + AffectedResource *string `type:"string"` + + // The time when the problem ended, in epoch seconds. + EndTime *time.Time `type:"timestamp"` + + // Feedback provided by the user about the problem. + Feedback map[string]*string `type:"map"` + + // The ID of the problem. + Id *string `type:"string"` + + // A detailed analysis of the problem using machine learning. + Insights *string `type:"string"` + + // The name of the resource group affected by the problem. + ResourceGroupName *string `type:"string"` + + // A measure of the level of impact of the problem. + SeverityLevel *string `type:"string" enum:"SeverityLevel"` + + // The time when the problem started, in epoch seconds. + StartTime *time.Time `type:"timestamp"` + + // The status of the problem. + Status *string `type:"string" enum:"Status"` + + // The name of the problem. + Title *string `type:"string"` +} + +// String returns the string representation +func (s Problem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Problem) GoString() string { + return s.String() +} + +// SetAffectedResource sets the AffectedResource field's value. +func (s *Problem) SetAffectedResource(v string) *Problem { + s.AffectedResource = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *Problem) SetEndTime(v time.Time) *Problem { + s.EndTime = &v + return s +} + +// SetFeedback sets the Feedback field's value. +func (s *Problem) SetFeedback(v map[string]*string) *Problem { + s.Feedback = v + return s +} + +// SetId sets the Id field's value. +func (s *Problem) SetId(v string) *Problem { + s.Id = &v + return s +} + +// SetInsights sets the Insights field's value. +func (s *Problem) SetInsights(v string) *Problem { + s.Insights = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *Problem) SetResourceGroupName(v string) *Problem { + s.ResourceGroupName = &v + return s +} + +// SetSeverityLevel sets the SeverityLevel field's value. +func (s *Problem) SetSeverityLevel(v string) *Problem { + s.SeverityLevel = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *Problem) SetStartTime(v time.Time) *Problem { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Problem) SetStatus(v string) *Problem { + s.Status = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *Problem) SetTitle(v string) *Problem { + s.Title = &v + return s +} + +// Describes observations related to the problem. +type RelatedObservations struct { + _ struct{} `type:"structure"` + + // The list of observations related to the problem. + ObservationList []*Observation `type:"list"` +} + +// String returns the string representation +func (s RelatedObservations) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RelatedObservations) GoString() string { + return s.String() +} + +// SetObservationList sets the ObservationList field's value. +func (s *RelatedObservations) SetObservationList(v []*Observation) *RelatedObservations { + s.ObservationList = v + return s +} + +type UpdateApplicationInput struct { + _ struct{} `type:"structure"` + + // When set to true, creates opsItems for any problems detected on an application. + OpsCenterEnabled *bool `type:"boolean"` + + // The SNS topic provided to Application Insights that is associated to the + // created opsItem. Allows you to receive notifications for updates to the opsItem. + OpsItemSNSTopicArn *string `type:"string"` + + // Disassociates the SNS topic from the opsItem created for detected problems. + RemoveSNSTopic *bool `type:"boolean"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateApplicationInput"} + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOpsCenterEnabled sets the OpsCenterEnabled field's value. +func (s *UpdateApplicationInput) SetOpsCenterEnabled(v bool) *UpdateApplicationInput { + s.OpsCenterEnabled = &v + return s +} + +// SetOpsItemSNSTopicArn sets the OpsItemSNSTopicArn field's value. +func (s *UpdateApplicationInput) SetOpsItemSNSTopicArn(v string) *UpdateApplicationInput { + s.OpsItemSNSTopicArn = &v + return s +} + +// SetRemoveSNSTopic sets the RemoveSNSTopic field's value. +func (s *UpdateApplicationInput) SetRemoveSNSTopic(v bool) *UpdateApplicationInput { + s.RemoveSNSTopic = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *UpdateApplicationInput) SetResourceGroupName(v string) *UpdateApplicationInput { + s.ResourceGroupName = &v + return s +} + +type UpdateApplicationOutput struct { + _ struct{} `type:"structure"` + + // Information about the application. + ApplicationInfo *ApplicationInfo `type:"structure"` +} + +// String returns the string representation +func (s UpdateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationOutput) GoString() string { + return s.String() +} + +// SetApplicationInfo sets the ApplicationInfo field's value. +func (s *UpdateApplicationOutput) SetApplicationInfo(v *ApplicationInfo) *UpdateApplicationOutput { + s.ApplicationInfo = v + return s +} + +type UpdateComponentConfigurationInput struct { + _ struct{} `type:"structure"` + + // The configuration settings of the component. The value is the escaped JSON + // of the configuration. For more information about the JSON format, see Working + // with JSON (https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/working-with-json.html). + // You can send a request to DescribeComponentConfigurationRecommendation to + // see the recommended configuration for a component. + ComponentConfiguration *string `type:"string"` + + // The name of the component. + // + // ComponentName is a required field + ComponentName *string `type:"string" required:"true"` + + // Indicates whether the application component is monitored. + Monitor *bool `type:"boolean"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` + + // The tier of the application component. Supported tiers include DOT_NET_WORKER, + // DOT_NET_WEB, SQL_SERVER, and DEFAULT. + Tier *string `type:"string"` +} + +// String returns the string representation +func (s UpdateComponentConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateComponentConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateComponentConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateComponentConfigurationInput"} + if s.ComponentName == nil { + invalidParams.Add(request.NewErrParamRequired("ComponentName")) + } + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComponentConfiguration sets the ComponentConfiguration field's value. +func (s *UpdateComponentConfigurationInput) SetComponentConfiguration(v string) *UpdateComponentConfigurationInput { + s.ComponentConfiguration = &v + return s +} + +// SetComponentName sets the ComponentName field's value. +func (s *UpdateComponentConfigurationInput) SetComponentName(v string) *UpdateComponentConfigurationInput { + s.ComponentName = &v + return s +} + +// SetMonitor sets the Monitor field's value. +func (s *UpdateComponentConfigurationInput) SetMonitor(v bool) *UpdateComponentConfigurationInput { + s.Monitor = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *UpdateComponentConfigurationInput) SetResourceGroupName(v string) *UpdateComponentConfigurationInput { + s.ResourceGroupName = &v + return s +} + +// SetTier sets the Tier field's value. +func (s *UpdateComponentConfigurationInput) SetTier(v string) *UpdateComponentConfigurationInput { + s.Tier = &v + return s +} + +type UpdateComponentConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateComponentConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateComponentConfigurationOutput) GoString() string { + return s.String() +} + +type UpdateComponentInput struct { + _ struct{} `type:"structure"` + + // The name of the component. + // + // ComponentName is a required field + ComponentName *string `type:"string" required:"true"` + + // The new name of the component. + NewComponentName *string `type:"string"` + + // The name of the resource group. + // + // ResourceGroupName is a required field + ResourceGroupName *string `type:"string" required:"true"` + + // The list of resource ARNs that belong to the component. + ResourceList []*string `type:"list"` +} + +// String returns the string representation +func (s UpdateComponentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateComponentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateComponentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateComponentInput"} + if s.ComponentName == nil { + invalidParams.Add(request.NewErrParamRequired("ComponentName")) + } + if s.ResourceGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComponentName sets the ComponentName field's value. +func (s *UpdateComponentInput) SetComponentName(v string) *UpdateComponentInput { + s.ComponentName = &v + return s +} + +// SetNewComponentName sets the NewComponentName field's value. +func (s *UpdateComponentInput) SetNewComponentName(v string) *UpdateComponentInput { + s.NewComponentName = &v + return s +} + +// SetResourceGroupName sets the ResourceGroupName field's value. +func (s *UpdateComponentInput) SetResourceGroupName(v string) *UpdateComponentInput { + s.ResourceGroupName = &v + return s +} + +// SetResourceList sets the ResourceList field's value. +func (s *UpdateComponentInput) SetResourceList(v []*string) *UpdateComponentInput { + s.ResourceList = v + return s +} + +type UpdateComponentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateComponentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateComponentOutput) GoString() string { + return s.String() +} + +const ( + // FeedbackKeyInsightsFeedback is a FeedbackKey enum value + FeedbackKeyInsightsFeedback = "INSIGHTS_FEEDBACK" +) + +const ( + // FeedbackValueNotSpecified is a FeedbackValue enum value + FeedbackValueNotSpecified = "NOT_SPECIFIED" + + // FeedbackValueUseful is a FeedbackValue enum value + FeedbackValueUseful = "USEFUL" + + // FeedbackValueNotUseful is a FeedbackValue enum value + FeedbackValueNotUseful = "NOT_USEFUL" +) + +const ( + // LogFilterError is a LogFilter enum value + LogFilterError = "ERROR" + + // LogFilterWarn is a LogFilter enum value + LogFilterWarn = "WARN" + + // LogFilterInfo is a LogFilter enum value + LogFilterInfo = "INFO" +) + +const ( + // SeverityLevelLow is a SeverityLevel enum value + SeverityLevelLow = "LOW" + + // SeverityLevelMedium is a SeverityLevel enum value + SeverityLevelMedium = "MEDIUM" + + // SeverityLevelHigh is a SeverityLevel enum value + SeverityLevelHigh = "HIGH" +) + +const ( + // StatusIgnore is a Status enum value + StatusIgnore = "IGNORE" + + // StatusResolved is a Status enum value + StatusResolved = "RESOLVED" + + // StatusPending is a Status enum value + StatusPending = "PENDING" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/doc.go new file mode 100644 index 00000000000..70f426d54c6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/doc.go @@ -0,0 +1,41 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package applicationinsights provides the client and types for making API +// requests to Amazon CloudWatch Application Insights. +// +// Amazon CloudWatch Application Insights for .NET and SQL Server is a service +// that helps you detect common problems with your .NET and SQL Server-based +// applications. It enables you to pinpoint the source of issues in your applications +// (built with technologies such as Microsoft IIS, .NET, and Microsoft SQL Server), +// by providing key insights into detected problems. +// +// After you onboard your application, CloudWatch Application Insights for .NET +// and SQL Server identifies, recommends, and sets up metrics and logs. It continuously +// analyzes and correlates your metrics and logs for unusual behavior to surface +// actionable problems with your application. For example, if your application +// is slow and unresponsive and leading to HTTP 500 errors in your Application +// Load Balancer (ALB), Application Insights informs you that a memory pressure +// problem with your SQL Server database is occurring. It bases this analysis +// on impactful metrics and log errors. +// +// See https://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25 for more information on this service. +// +// See applicationinsights package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/applicationinsights/ +// +// Using the Client +// +// To contact Amazon CloudWatch Application Insights with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon CloudWatch Application Insights client ApplicationInsights for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/applicationinsights/#New +package applicationinsights diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/errors.go new file mode 100644 index 00000000000..c75e208d945 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/errors.go @@ -0,0 +1,36 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package applicationinsights + +const ( + + // ErrCodeBadRequestException for service response error code + // "BadRequestException". + // + // The request is not understood by the server. + ErrCodeBadRequestException = "BadRequestException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // The server encountered an internal error and is unable to complete the request. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // The resource is already created or in use. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource does not exist in the customer account. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // The parameter is not valid. + ErrCodeValidationException = "ValidationException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go new file mode 100644 index 00000000000..b90f92dc11f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go @@ -0,0 +1,101 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package applicationinsights + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// ApplicationInsights provides the API operation methods for making requests to +// Amazon CloudWatch Application Insights. See this package's package overview docs +// for details on the service. +// +// ApplicationInsights methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type ApplicationInsights struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "Application Insights" // Name of service. + EndpointsID = "applicationinsights" // ID to lookup a service endpoint with. + ServiceID = "Application Insights" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the ApplicationInsights client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ApplicationInsights client from just a session. +// svc := applicationinsights.New(mySession) +// +// // Create a ApplicationInsights client with additional configuration +// svc := applicationinsights.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationInsights { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "applicationinsights" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApplicationInsights { + svc := &ApplicationInsights{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2018-11-25", + JSONVersion: "1.1", + TargetPrefix: "EC2WindowsBarleyService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ApplicationInsights operation and runs any +// custom request initialization. +func (c *ApplicationInsights) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go index fc69ab8e881..8edd40ee04d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go @@ -288,7 +288,7 @@ func (c *AppMesh) CreateVirtualNodeRequest(input *CreateVirtualNodeInput) (req * // // A virtual node acts as a logical pointer to a particular task group, such // as an Amazon ECS service or a Kubernetes deployment. When you create a virtual -// node, you must specify the DNS service discovery hostname for your task group. +// node, you can specify the service discovery information for your task group. // // Any inbound traffic that your virtual node expects should be specified as // a listener. Any outbound traffic that your virtual node expects to reach @@ -1702,7 +1702,7 @@ func (c *AppMesh) ListMeshesWithContext(ctx aws.Context, input *ListMeshesInput, // // Example iterating over at most 3 pages of a ListMeshes operation. // pageNum := 0 // err := client.ListMeshesPages(params, -// func(page *ListMeshesOutput, lastPage bool) bool { +// func(page *appmesh.ListMeshesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1734,10 +1734,12 @@ func (c *AppMesh) ListMeshesPagesWithContext(ctx aws.Context, input *ListMeshesI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMeshesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMeshesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1855,7 +1857,7 @@ func (c *AppMesh) ListRoutesWithContext(ctx aws.Context, input *ListRoutesInput, // // Example iterating over at most 3 pages of a ListRoutes operation. // pageNum := 0 // err := client.ListRoutesPages(params, -// func(page *ListRoutesOutput, lastPage bool) bool { +// func(page *appmesh.ListRoutesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1887,10 +1889,12 @@ func (c *AppMesh) ListRoutesPagesWithContext(ctx aws.Context, input *ListRoutesI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRoutesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRoutesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1957,6 +1961,9 @@ func (c *AppMesh) ListTagsForResourceRequest(input *ListTagsForResourceInput) (r // * ErrCodeBadRequestException "BadRequestException" // The request syntax was malformed. Check your request syntax and try again. // +// * ErrCodeForbiddenException "ForbiddenException" +// You don't have permissions to perform this action. +// // * ErrCodeInternalServerErrorException "InternalServerErrorException" // The request processing has failed because of an unknown error, exception, // or failure. @@ -1967,6 +1974,11 @@ func (c *AppMesh) ListTagsForResourceRequest(input *ListTagsForResourceInput) (r // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // The request has failed due to a temporary failure of the service. // +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListTagsForResource func (c *AppMesh) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) @@ -2000,7 +2012,7 @@ func (c *AppMesh) ListTagsForResourceWithContext(ctx aws.Context, input *ListTag // // Example iterating over at most 3 pages of a ListTagsForResource operation. // pageNum := 0 // err := client.ListTagsForResourcePages(params, -// func(page *ListTagsForResourceOutput, lastPage bool) bool { +// func(page *appmesh.ListTagsForResourceOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2032,10 +2044,12 @@ func (c *AppMesh) ListTagsForResourcePagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2153,7 +2167,7 @@ func (c *AppMesh) ListVirtualNodesWithContext(ctx aws.Context, input *ListVirtua // // Example iterating over at most 3 pages of a ListVirtualNodes operation. // pageNum := 0 // err := client.ListVirtualNodesPages(params, -// func(page *ListVirtualNodesOutput, lastPage bool) bool { +// func(page *appmesh.ListVirtualNodesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2185,10 +2199,12 @@ func (c *AppMesh) ListVirtualNodesPagesWithContext(ctx aws.Context, input *ListV }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListVirtualNodesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListVirtualNodesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2306,7 +2322,7 @@ func (c *AppMesh) ListVirtualRoutersWithContext(ctx aws.Context, input *ListVirt // // Example iterating over at most 3 pages of a ListVirtualRouters operation. // pageNum := 0 // err := client.ListVirtualRoutersPages(params, -// func(page *ListVirtualRoutersOutput, lastPage bool) bool { +// func(page *appmesh.ListVirtualRoutersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2338,10 +2354,12 @@ func (c *AppMesh) ListVirtualRoutersPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListVirtualRoutersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListVirtualRoutersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2459,7 +2477,7 @@ func (c *AppMesh) ListVirtualServicesWithContext(ctx aws.Context, input *ListVir // // Example iterating over at most 3 pages of a ListVirtualServices operation. // pageNum := 0 // err := client.ListVirtualServicesPages(params, -// func(page *ListVirtualServicesOutput, lastPage bool) bool { +// func(page *appmesh.ListVirtualServicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2491,10 +2509,12 @@ func (c *AppMesh) ListVirtualServicesPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListVirtualServicesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListVirtualServicesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2559,6 +2579,9 @@ func (c *AppMesh) TagResourceRequest(input *TagResourceInput) (req *request.Requ // * ErrCodeBadRequestException "BadRequestException" // The request syntax was malformed. Check your request syntax and try again. // +// * ErrCodeForbiddenException "ForbiddenException" +// You don't have permissions to perform this action. +// // * ErrCodeInternalServerErrorException "InternalServerErrorException" // The request processing has failed because of an unknown error, exception, // or failure. @@ -2569,6 +2592,11 @@ func (c *AppMesh) TagResourceRequest(input *TagResourceInput) (req *request.Requ // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // The request has failed due to a temporary failure of the service. // +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// // * ErrCodeTooManyTagsException "TooManyTagsException" // The request exceeds the maximum allowed number of tags allowed per resource. // The current limit is 50 user tags per resource. You must reduce the number @@ -2654,6 +2682,9 @@ func (c *AppMesh) UntagResourceRequest(input *UntagResourceInput) (req *request. // * ErrCodeBadRequestException "BadRequestException" // The request syntax was malformed. Check your request syntax and try again. // +// * ErrCodeForbiddenException "ForbiddenException" +// You don't have permissions to perform this action. +// // * ErrCodeInternalServerErrorException "InternalServerErrorException" // The request processing has failed because of an unknown error, exception, // or failure. @@ -2664,6 +2695,11 @@ func (c *AppMesh) UntagResourceRequest(input *UntagResourceInput) (req *request. // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // The request has failed due to a temporary failure of the service. // +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UntagResource func (c *AppMesh) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) @@ -3216,11 +3252,11 @@ func (c *AppMesh) UpdateVirtualServiceWithContext(ctx aws.Context, input *Update return out, req.Send() } -// An object representing the access logging information for a virtual node. +// An object that represents the access logging information for a virtual node. type AccessLog struct { _ struct{} `type:"structure"` - // The file object to send virtual node access logs to. + // An object that represents an access log file. File *FileAccessLog `locationName:"file" type:"structure"` } @@ -3255,12 +3291,142 @@ func (s *AccessLog) SetFile(v *FileAccessLog) *AccessLog { return s } -// An object representing the backends that a virtual node is expected to send -// outbound traffic to. +// An object that represents the AWS Cloud Map attribute information for your +// virtual node. +type AwsCloudMapInstanceAttribute struct { + _ struct{} `type:"structure"` + + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // Value is a required field + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AwsCloudMapInstanceAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudMapInstanceAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsCloudMapInstanceAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsCloudMapInstanceAttribute"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *AwsCloudMapInstanceAttribute) SetKey(v string) *AwsCloudMapInstanceAttribute { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AwsCloudMapInstanceAttribute) SetValue(v string) *AwsCloudMapInstanceAttribute { + s.Value = &v + return s +} + +// An object that represents the AWS Cloud Map service discovery information +// for your virtual node. +type AwsCloudMapServiceDiscovery struct { + _ struct{} `type:"structure"` + + Attributes []*AwsCloudMapInstanceAttribute `locationName:"attributes" type:"list"` + + // NamespaceName is a required field + NamespaceName *string `locationName:"namespaceName" min:"1" type:"string" required:"true"` + + // ServiceName is a required field + ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AwsCloudMapServiceDiscovery) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudMapServiceDiscovery) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsCloudMapServiceDiscovery) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsCloudMapServiceDiscovery"} + if s.NamespaceName == nil { + invalidParams.Add(request.NewErrParamRequired("NamespaceName")) + } + if s.NamespaceName != nil && len(*s.NamespaceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamespaceName", 1)) + } + if s.ServiceName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceName")) + } + if s.ServiceName != nil && len(*s.ServiceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1)) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *AwsCloudMapServiceDiscovery) SetAttributes(v []*AwsCloudMapInstanceAttribute) *AwsCloudMapServiceDiscovery { + s.Attributes = v + return s +} + +// SetNamespaceName sets the NamespaceName field's value. +func (s *AwsCloudMapServiceDiscovery) SetNamespaceName(v string) *AwsCloudMapServiceDiscovery { + s.NamespaceName = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *AwsCloudMapServiceDiscovery) SetServiceName(v string) *AwsCloudMapServiceDiscovery { + s.ServiceName = &v + return s +} + +// An object that represents the backends that a virtual node is expected to +// send outbound traffic to. type Backend struct { _ struct{} `type:"structure"` - // Specifies a virtual service to use as a backend for a virtual node. + // An object that represents a virtual service backend for a virtual node. VirtualService *VirtualServiceBackend `locationName:"virtualService" type:"structure"` } @@ -3298,22 +3464,14 @@ func (s *Backend) SetVirtualService(v *VirtualServiceBackend) *Backend { type CreateMeshInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name to use for the service mesh. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // The service mesh specification to apply. + // An object that represents the specification of a service mesh. Spec *MeshSpec `locationName:"spec" type:"structure"` - // Optional metadata that you can apply to the service mesh to assist with categorization - // and organization. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. Tags []*TagRef `locationName:"tags" type:"list"` } @@ -3385,7 +3543,7 @@ func (s *CreateMeshInput) SetTags(v []*TagRef) *CreateMeshInput { type CreateMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // The full description of your service mesh following the create call. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -3410,33 +3568,21 @@ func (s *CreateMeshOutput) SetMesh(v *MeshData) *CreateMeshOutput { type CreateRouteInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name of the service mesh to create the route in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name to use for the route. - // // RouteName is a required field RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` - // The route specification to apply. + // An object that represents a route specification. Specify one route type. // // Spec is a required field Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` - // Optional metadata that you can apply to the route to assist with categorization - // and organization. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. Tags []*TagRef `locationName:"tags" type:"list"` - // The name of the virtual router in which to create the route. - // // VirtualRouterName is a required field VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -3536,7 +3682,7 @@ func (s *CreateRouteInput) SetVirtualRouterName(v string) *CreateRouteInput { type CreateRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // The full description of your mesh following the create call. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -3561,28 +3707,18 @@ func (s *CreateRouteOutput) SetRoute(v *RouteData) *CreateRouteOutput { type CreateVirtualNodeInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name of the service mesh to create the virtual node in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The virtual node specification to apply. + // An object that represents the specification of a virtual node. // // Spec is a required field Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` - // Optional metadata that you can apply to the virtual node to assist with categorization - // and organization. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. Tags []*TagRef `locationName:"tags" type:"list"` - // The name to use for the virtual node. - // // VirtualNodeName is a required field VirtualNodeName *string `locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } @@ -3670,7 +3806,7 @@ func (s *CreateVirtualNodeInput) SetVirtualNodeName(v string) *CreateVirtualNode type CreateVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // The full description of your virtual node following the create call. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -3695,28 +3831,18 @@ func (s *CreateVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *CreateVirt type CreateVirtualRouterInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name of the service mesh to create the virtual router in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The virtual router specification to apply. + // An object that represents the specification of a virtual router. // // Spec is a required field Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` - // Optional metadata that you can apply to the virtual router to assist with - // categorization and organization. Each tag consists of a key and an optional - // value, both of which you define. Tag keys can have a maximum character length - // of 128 characters, and tag values can have a maximum length of 256 characters. Tags []*TagRef `locationName:"tags" type:"list"` - // The name to use for the virtual router. - // // VirtualRouterName is a required field VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -3804,7 +3930,7 @@ func (s *CreateVirtualRouterInput) SetVirtualRouterName(v string) *CreateVirtual type CreateVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // The full description of your virtual router following the create call. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -3829,28 +3955,18 @@ func (s *CreateVirtualRouterOutput) SetVirtualRouter(v *VirtualRouterData) *Crea type CreateVirtualServiceInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name of the service mesh to create the virtual service in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The virtual service specification to apply. + // An object that represents the specification of a virtual service. // // Spec is a required field Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` - // Optional metadata that you can apply to the virtual service to assist with - // categorization and organization. Each tag consists of a key and an optional - // value, both of which you define. Tag keys can have a maximum character length - // of 128 characters, and tag values can have a maximum length of 256 characters. Tags []*TagRef `locationName:"tags" type:"list"` - // The name to use for the virtual service. - // // VirtualServiceName is a required field VirtualServiceName *string `locationName:"virtualServiceName" type:"string" required:"true"` } @@ -3935,7 +4051,7 @@ func (s *CreateVirtualServiceInput) SetVirtualServiceName(v string) *CreateVirtu type CreateVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // The full description of your virtual service following the create call. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -3960,8 +4076,6 @@ func (s *CreateVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) *C type DeleteMeshInput struct { _ struct{} `type:"structure"` - // The name of the service mesh to delete. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` } @@ -4001,7 +4115,7 @@ func (s *DeleteMeshInput) SetMeshName(v string) *DeleteMeshInput { type DeleteMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // The service mesh that was deleted. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -4026,18 +4140,12 @@ func (s *DeleteMeshOutput) SetMesh(v *MeshData) *DeleteMeshOutput { type DeleteRouteInput struct { _ struct{} `type:"structure"` - // The name of the service mesh to delete the route in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the route to delete. - // // RouteName is a required field RouteName *string `location:"uri" locationName:"routeName" min:"1" type:"string" required:"true"` - // The name of the virtual router to delete the route in. - // // VirtualRouterName is a required field VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -4101,7 +4209,7 @@ func (s *DeleteRouteInput) SetVirtualRouterName(v string) *DeleteRouteInput { type DeleteRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // The route that was deleted. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -4126,13 +4234,9 @@ func (s *DeleteRouteOutput) SetRoute(v *RouteData) *DeleteRouteOutput { type DeleteVirtualNodeInput struct { _ struct{} `type:"structure"` - // The name of the service mesh to delete the virtual node in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the virtual node to delete. - // // VirtualNodeName is a required field VirtualNodeName *string `location:"uri" locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } @@ -4184,7 +4288,7 @@ func (s *DeleteVirtualNodeInput) SetVirtualNodeName(v string) *DeleteVirtualNode type DeleteVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // The virtual node that was deleted. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -4209,13 +4313,9 @@ func (s *DeleteVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *DeleteVirt type DeleteVirtualRouterInput struct { _ struct{} `type:"structure"` - // The name of the service mesh to delete the virtual router in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the virtual router to delete. - // // VirtualRouterName is a required field VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -4267,7 +4367,7 @@ func (s *DeleteVirtualRouterInput) SetVirtualRouterName(v string) *DeleteVirtual type DeleteVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // The virtual router that was deleted. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -4292,13 +4392,9 @@ func (s *DeleteVirtualRouterOutput) SetVirtualRouter(v *VirtualRouterData) *Dele type DeleteVirtualServiceInput struct { _ struct{} `type:"structure"` - // The name of the service mesh to delete the virtual service in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the virtual service to delete. - // // VirtualServiceName is a required field VirtualServiceName *string `location:"uri" locationName:"virtualServiceName" type:"string" required:"true"` } @@ -4350,7 +4446,7 @@ func (s *DeleteVirtualServiceInput) SetVirtualServiceName(v string) *DeleteVirtu type DeleteVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // The virtual service that was deleted. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -4375,8 +4471,6 @@ func (s *DeleteVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) *D type DescribeMeshInput struct { _ struct{} `type:"structure"` - // The name of the service mesh to describe. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` } @@ -4416,7 +4510,7 @@ func (s *DescribeMeshInput) SetMeshName(v string) *DescribeMeshInput { type DescribeMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // The full description of your service mesh. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -4441,18 +4535,12 @@ func (s *DescribeMeshOutput) SetMesh(v *MeshData) *DescribeMeshOutput { type DescribeRouteInput struct { _ struct{} `type:"structure"` - // The name of the service mesh that the route resides in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the route to describe. - // // RouteName is a required field RouteName *string `location:"uri" locationName:"routeName" min:"1" type:"string" required:"true"` - // The name of the virtual router that the route is associated with. - // // VirtualRouterName is a required field VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -4516,7 +4604,7 @@ func (s *DescribeRouteInput) SetVirtualRouterName(v string) *DescribeRouteInput type DescribeRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // The full description of your route. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -4541,13 +4629,9 @@ func (s *DescribeRouteOutput) SetRoute(v *RouteData) *DescribeRouteOutput { type DescribeVirtualNodeInput struct { _ struct{} `type:"structure"` - // The name of the service mesh that the virtual node resides in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the virtual node to describe. - // // VirtualNodeName is a required field VirtualNodeName *string `location:"uri" locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } @@ -4599,7 +4683,7 @@ func (s *DescribeVirtualNodeInput) SetVirtualNodeName(v string) *DescribeVirtual type DescribeVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // The full description of your virtual node. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -4624,13 +4708,9 @@ func (s *DescribeVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *Describe type DescribeVirtualRouterInput struct { _ struct{} `type:"structure"` - // The name of the service mesh that the virtual router resides in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the virtual router to describe. - // // VirtualRouterName is a required field VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -4682,7 +4762,7 @@ func (s *DescribeVirtualRouterInput) SetVirtualRouterName(v string) *DescribeVir type DescribeVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // The full description of your virtual router. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -4707,13 +4787,9 @@ func (s *DescribeVirtualRouterOutput) SetVirtualRouter(v *VirtualRouterData) *De type DescribeVirtualServiceInput struct { _ struct{} `type:"structure"` - // The name of the service mesh that the virtual service resides in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the virtual service to describe. - // // VirtualServiceName is a required field VirtualServiceName *string `location:"uri" locationName:"virtualServiceName" type:"string" required:"true"` } @@ -4765,7 +4841,7 @@ func (s *DescribeVirtualServiceInput) SetVirtualServiceName(v string) *DescribeV type DescribeVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // The full description of your virtual service. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -4787,13 +4863,11 @@ func (s *DescribeVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) return s } -// An object representing the DNS service discovery information for your virtual -// node. +// An object that represents the DNS service discovery information for your +// virtual node. type DnsServiceDiscovery struct { _ struct{} `type:"structure"` - // Specifies the DNS service discovery hostname for the virtual node. - // // Hostname is a required field Hostname *string `locationName:"hostname" type:"string" required:"true"` } @@ -4827,16 +4901,41 @@ func (s *DnsServiceDiscovery) SetHostname(v string) *DnsServiceDiscovery { return s } -// An object representing the egress filter rules for a service mesh. +// An object that represents a duration of time. +type Duration struct { + _ struct{} `type:"structure"` + + Unit *string `locationName:"unit" type:"string" enum:"DurationUnit"` + + Value *int64 `locationName:"value" type:"long"` +} + +// String returns the string representation +func (s Duration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Duration) GoString() string { + return s.String() +} + +// SetUnit sets the Unit field's value. +func (s *Duration) SetUnit(v string) *Duration { + s.Unit = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Duration) SetValue(v int64) *Duration { + s.Value = &v + return s +} + +// An object that represents the egress filter rules for a service mesh. type EgressFilter struct { _ struct{} `type:"structure"` - // The egress filter type. By default, the type is DROP_ALL, which allows egress - // only from virtual nodes to other defined resources in the service mesh (and - // any traffic to *.amazonaws.com for AWS API calls). You can set the egress - // filter type to ALLOW_ALL to allow egress to any endpoint inside or outside - // of the service mesh. - // // Type is a required field Type *string `locationName:"type" type:"string" required:"true" enum:"EgressFilterType"` } @@ -4870,19 +4969,10 @@ func (s *EgressFilter) SetType(v string) *EgressFilter { return s } -// An object representing an access log file. +// An object that represents an access log file. type FileAccessLog struct { _ struct{} `type:"structure"` - // The file path to write access logs to. You can use /dev/stdout to send access - // logs to standard out and configure your Envoy container to use a log driver, - // such as awslogs, to export the access logs to a log storage service such - // as Amazon CloudWatch Logs. You can also specify a path in the Envoy container's - // file system to write the files to disk. - // - // The Envoy process must have write permissions to the path that you specify - // here. Otherwise, Envoy fails to bootstrap properly. - // // Path is a required field Path *string `locationName:"path" min:"1" type:"string" required:"true"` } @@ -4919,90 +5009,54 @@ func (s *FileAccessLog) SetPath(v string) *FileAccessLog { return s } -// An object representing the health check policy for a virtual node's listener. -type HealthCheckPolicy struct { +// An object that represents a retry policy. Specify at least one value for +// at least one of the types of RetryEvents, a value for maxRetries, and a value +// for perRetryTimeout. +type GrpcRetryPolicy struct { _ struct{} `type:"structure"` - // The number of consecutive successful health checks that must occur before - // declaring listener healthy. - // - // HealthyThreshold is a required field - HealthyThreshold *int64 `locationName:"healthyThreshold" min:"2" type:"integer" required:"true"` - - // The time period in milliseconds between each health check execution. - // - // IntervalMillis is a required field - IntervalMillis *int64 `locationName:"intervalMillis" min:"5000" type:"long" required:"true"` - - // The destination path for the health check request. This is required only - // if the specified protocol is HTTP. If the protocol is TCP, this parameter - // is ignored. - Path *string `locationName:"path" type:"string"` + GrpcRetryEvents []*string `locationName:"grpcRetryEvents" min:"1" type:"list"` - // The destination port for the health check request. This port must match the - // port defined in the PortMapping for the listener. - Port *int64 `locationName:"port" min:"1" type:"integer"` + HttpRetryEvents []*string `locationName:"httpRetryEvents" min:"1" type:"list"` - // The protocol for the health check request. - // - // Protocol is a required field - Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"PortProtocol"` + // MaxRetries is a required field + MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` - // The amount of time to wait when receiving a response from the health check, - // in milliseconds. + // An object that represents a duration of time. // - // TimeoutMillis is a required field - TimeoutMillis *int64 `locationName:"timeoutMillis" min:"2000" type:"long" required:"true"` + // PerRetryTimeout is a required field + PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` - // The number of consecutive failed health checks that must occur before declaring - // a virtual node unhealthy. - // - // UnhealthyThreshold is a required field - UnhealthyThreshold *int64 `locationName:"unhealthyThreshold" min:"2" type:"integer" required:"true"` + TcpRetryEvents []*string `locationName:"tcpRetryEvents" min:"1" type:"list"` } // String returns the string representation -func (s HealthCheckPolicy) String() string { +func (s GrpcRetryPolicy) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HealthCheckPolicy) GoString() string { +func (s GrpcRetryPolicy) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HealthCheckPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HealthCheckPolicy"} - if s.HealthyThreshold == nil { - invalidParams.Add(request.NewErrParamRequired("HealthyThreshold")) - } - if s.HealthyThreshold != nil && *s.HealthyThreshold < 2 { - invalidParams.Add(request.NewErrParamMinValue("HealthyThreshold", 2)) - } - if s.IntervalMillis == nil { - invalidParams.Add(request.NewErrParamRequired("IntervalMillis")) - } - if s.IntervalMillis != nil && *s.IntervalMillis < 5000 { - invalidParams.Add(request.NewErrParamMinValue("IntervalMillis", 5000)) - } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) +func (s *GrpcRetryPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRetryPolicy"} + if s.GrpcRetryEvents != nil && len(s.GrpcRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GrpcRetryEvents", 1)) } - if s.Protocol == nil { - invalidParams.Add(request.NewErrParamRequired("Protocol")) - } - if s.TimeoutMillis == nil { - invalidParams.Add(request.NewErrParamRequired("TimeoutMillis")) + if s.HttpRetryEvents != nil && len(s.HttpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HttpRetryEvents", 1)) } - if s.TimeoutMillis != nil && *s.TimeoutMillis < 2000 { - invalidParams.Add(request.NewErrParamMinValue("TimeoutMillis", 2000)) + if s.MaxRetries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxRetries")) } - if s.UnhealthyThreshold == nil { - invalidParams.Add(request.NewErrParamRequired("UnhealthyThreshold")) + if s.PerRetryTimeout == nil { + invalidParams.Add(request.NewErrParamRequired("PerRetryTimeout")) } - if s.UnhealthyThreshold != nil && *s.UnhealthyThreshold < 2 { - invalidParams.Add(request.NewErrParamMinValue("UnhealthyThreshold", 2)) + if s.TcpRetryEvents != nil && len(s.TcpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TcpRetryEvents", 1)) } if invalidParams.Len() > 0 { @@ -5011,70 +5065,684 @@ func (s *HealthCheckPolicy) Validate() error { return nil } -// SetHealthyThreshold sets the HealthyThreshold field's value. -func (s *HealthCheckPolicy) SetHealthyThreshold(v int64) *HealthCheckPolicy { - s.HealthyThreshold = &v - return s -} - -// SetIntervalMillis sets the IntervalMillis field's value. -func (s *HealthCheckPolicy) SetIntervalMillis(v int64) *HealthCheckPolicy { - s.IntervalMillis = &v - return s -} - -// SetPath sets the Path field's value. -func (s *HealthCheckPolicy) SetPath(v string) *HealthCheckPolicy { - s.Path = &v +// SetGrpcRetryEvents sets the GrpcRetryEvents field's value. +func (s *GrpcRetryPolicy) SetGrpcRetryEvents(v []*string) *GrpcRetryPolicy { + s.GrpcRetryEvents = v return s } -// SetPort sets the Port field's value. -func (s *HealthCheckPolicy) SetPort(v int64) *HealthCheckPolicy { - s.Port = &v +// SetHttpRetryEvents sets the HttpRetryEvents field's value. +func (s *GrpcRetryPolicy) SetHttpRetryEvents(v []*string) *GrpcRetryPolicy { + s.HttpRetryEvents = v return s } -// SetProtocol sets the Protocol field's value. -func (s *HealthCheckPolicy) SetProtocol(v string) *HealthCheckPolicy { - s.Protocol = &v +// SetMaxRetries sets the MaxRetries field's value. +func (s *GrpcRetryPolicy) SetMaxRetries(v int64) *GrpcRetryPolicy { + s.MaxRetries = &v return s } -// SetTimeoutMillis sets the TimeoutMillis field's value. -func (s *HealthCheckPolicy) SetTimeoutMillis(v int64) *HealthCheckPolicy { - s.TimeoutMillis = &v +// SetPerRetryTimeout sets the PerRetryTimeout field's value. +func (s *GrpcRetryPolicy) SetPerRetryTimeout(v *Duration) *GrpcRetryPolicy { + s.PerRetryTimeout = v return s } -// SetUnhealthyThreshold sets the UnhealthyThreshold field's value. -func (s *HealthCheckPolicy) SetUnhealthyThreshold(v int64) *HealthCheckPolicy { - s.UnhealthyThreshold = &v +// SetTcpRetryEvents sets the TcpRetryEvents field's value. +func (s *GrpcRetryPolicy) SetTcpRetryEvents(v []*string) *GrpcRetryPolicy { + s.TcpRetryEvents = v return s } -// An object representing the HTTP routing specification for a route. -type HttpRoute struct { +// An object that represents a GRPC route type. +type GrpcRoute struct { _ struct{} `type:"structure"` - // The action to take if a match is determined. + // An object that represents the action to take if a match is determined. // // Action is a required field - Action *HttpRouteAction `locationName:"action" type:"structure" required:"true"` + Action *GrpcRouteAction `locationName:"action" type:"structure" required:"true"` - // The criteria for determining an HTTP request match. + // An object that represents the criteria for determining a request match. // // Match is a required field - Match *HttpRouteMatch `locationName:"match" type:"structure" required:"true"` + Match *GrpcRouteMatch `locationName:"match" type:"structure" required:"true"` + + // An object that represents a retry policy. Specify at least one value for + // at least one of the types of RetryEvents, a value for maxRetries, and a value + // for perRetryTimeout. + RetryPolicy *GrpcRetryPolicy `locationName:"retryPolicy" type:"structure"` } // String returns the string representation -func (s HttpRoute) String() string { +func (s GrpcRoute) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HttpRoute) GoString() string { +func (s GrpcRoute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRoute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRoute"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Match == nil { + invalidParams.Add(request.NewErrParamRequired("Match")) + } + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + } + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } + } + if s.RetryPolicy != nil { + if err := s.RetryPolicy.Validate(); err != nil { + invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *GrpcRoute) SetAction(v *GrpcRouteAction) *GrpcRoute { + s.Action = v + return s +} + +// SetMatch sets the Match field's value. +func (s *GrpcRoute) SetMatch(v *GrpcRouteMatch) *GrpcRoute { + s.Match = v + return s +} + +// SetRetryPolicy sets the RetryPolicy field's value. +func (s *GrpcRoute) SetRetryPolicy(v *GrpcRetryPolicy) *GrpcRoute { + s.RetryPolicy = v + return s +} + +// An object that represents the action to take if a match is determined. +type GrpcRouteAction struct { + _ struct{} `type:"structure"` + + // WeightedTargets is a required field + WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s GrpcRouteAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteAction"} + if s.WeightedTargets == nil { + invalidParams.Add(request.NewErrParamRequired("WeightedTargets")) + } + if s.WeightedTargets != nil && len(s.WeightedTargets) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WeightedTargets", 1)) + } + if s.WeightedTargets != nil { + for i, v := range s.WeightedTargets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "WeightedTargets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWeightedTargets sets the WeightedTargets field's value. +func (s *GrpcRouteAction) SetWeightedTargets(v []*WeightedTarget) *GrpcRouteAction { + s.WeightedTargets = v + return s +} + +// An object that represents the criteria for determining a request match. +type GrpcRouteMatch struct { + _ struct{} `type:"structure"` + + Metadata []*GrpcRouteMetadata `locationName:"metadata" min:"1" type:"list"` + + MethodName *string `locationName:"methodName" min:"1" type:"string"` + + ServiceName *string `locationName:"serviceName" type:"string"` +} + +// String returns the string representation +func (s GrpcRouteMatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteMatch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteMatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMatch"} + if s.Metadata != nil && len(s.Metadata) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Metadata", 1)) + } + if s.MethodName != nil && len(*s.MethodName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MethodName", 1)) + } + if s.Metadata != nil { + for i, v := range s.Metadata { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Metadata", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetadata sets the Metadata field's value. +func (s *GrpcRouteMatch) SetMetadata(v []*GrpcRouteMetadata) *GrpcRouteMatch { + s.Metadata = v + return s +} + +// SetMethodName sets the MethodName field's value. +func (s *GrpcRouteMatch) SetMethodName(v string) *GrpcRouteMatch { + s.MethodName = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *GrpcRouteMatch) SetServiceName(v string) *GrpcRouteMatch { + s.ServiceName = &v + return s +} + +// An object that represents the match metadata for the route. +type GrpcRouteMetadata struct { + _ struct{} `type:"structure"` + + Invert *bool `locationName:"invert" type:"boolean"` + + // An object that represents the match method. Specify one of the match values. + Match *GrpcRouteMetadataMatchMethod `locationName:"match" type:"structure"` + + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GrpcRouteMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteMetadata) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteMetadata) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadata"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInvert sets the Invert field's value. +func (s *GrpcRouteMetadata) SetInvert(v bool) *GrpcRouteMetadata { + s.Invert = &v + return s +} + +// SetMatch sets the Match field's value. +func (s *GrpcRouteMetadata) SetMatch(v *GrpcRouteMetadataMatchMethod) *GrpcRouteMetadata { + s.Match = v + return s +} + +// SetName sets the Name field's value. +func (s *GrpcRouteMetadata) SetName(v string) *GrpcRouteMetadata { + s.Name = &v + return s +} + +// An object that represents the match method. Specify one of the match values. +type GrpcRouteMetadataMatchMethod struct { + _ struct{} `type:"structure"` + + Exact *string `locationName:"exact" min:"1" type:"string"` + + Prefix *string `locationName:"prefix" min:"1" type:"string"` + + // An object that represents the range of values to match on. The first character + // of the range is included in the range, though the last character is not. + // For example, if the range specified were 1-100, only values 1-99 would be + // matched. + Range *MatchRange `locationName:"range" type:"structure"` + + Regex *string `locationName:"regex" min:"1" type:"string"` + + Suffix *string `locationName:"suffix" min:"1" type:"string"` +} + +// String returns the string representation +func (s GrpcRouteMetadataMatchMethod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteMetadataMatchMethod) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteMetadataMatchMethod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadataMatchMethod"} + if s.Exact != nil && len(*s.Exact) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Exact", 1)) + } + if s.Prefix != nil && len(*s.Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) + } + if s.Regex != nil && len(*s.Regex) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regex", 1)) + } + if s.Suffix != nil && len(*s.Suffix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Suffix", 1)) + } + if s.Range != nil { + if err := s.Range.Validate(); err != nil { + invalidParams.AddNested("Range", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExact sets the Exact field's value. +func (s *GrpcRouteMetadataMatchMethod) SetExact(v string) *GrpcRouteMetadataMatchMethod { + s.Exact = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *GrpcRouteMetadataMatchMethod) SetPrefix(v string) *GrpcRouteMetadataMatchMethod { + s.Prefix = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GrpcRouteMetadataMatchMethod) SetRange(v *MatchRange) *GrpcRouteMetadataMatchMethod { + s.Range = v + return s +} + +// SetRegex sets the Regex field's value. +func (s *GrpcRouteMetadataMatchMethod) SetRegex(v string) *GrpcRouteMetadataMatchMethod { + s.Regex = &v + return s +} + +// SetSuffix sets the Suffix field's value. +func (s *GrpcRouteMetadataMatchMethod) SetSuffix(v string) *GrpcRouteMetadataMatchMethod { + s.Suffix = &v + return s +} + +// An object that represents the method and value to match with the header value +// sent in a request. Specify one match method. +type HeaderMatchMethod struct { + _ struct{} `type:"structure"` + + Exact *string `locationName:"exact" min:"1" type:"string"` + + Prefix *string `locationName:"prefix" min:"1" type:"string"` + + // An object that represents the range of values to match on. The first character + // of the range is included in the range, though the last character is not. + // For example, if the range specified were 1-100, only values 1-99 would be + // matched. + Range *MatchRange `locationName:"range" type:"structure"` + + Regex *string `locationName:"regex" min:"1" type:"string"` + + Suffix *string `locationName:"suffix" min:"1" type:"string"` +} + +// String returns the string representation +func (s HeaderMatchMethod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeaderMatchMethod) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeaderMatchMethod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeaderMatchMethod"} + if s.Exact != nil && len(*s.Exact) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Exact", 1)) + } + if s.Prefix != nil && len(*s.Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) + } + if s.Regex != nil && len(*s.Regex) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regex", 1)) + } + if s.Suffix != nil && len(*s.Suffix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Suffix", 1)) + } + if s.Range != nil { + if err := s.Range.Validate(); err != nil { + invalidParams.AddNested("Range", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExact sets the Exact field's value. +func (s *HeaderMatchMethod) SetExact(v string) *HeaderMatchMethod { + s.Exact = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *HeaderMatchMethod) SetPrefix(v string) *HeaderMatchMethod { + s.Prefix = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeaderMatchMethod) SetRange(v *MatchRange) *HeaderMatchMethod { + s.Range = v + return s +} + +// SetRegex sets the Regex field's value. +func (s *HeaderMatchMethod) SetRegex(v string) *HeaderMatchMethod { + s.Regex = &v + return s +} + +// SetSuffix sets the Suffix field's value. +func (s *HeaderMatchMethod) SetSuffix(v string) *HeaderMatchMethod { + s.Suffix = &v + return s +} + +// An object that represents the health check policy for a virtual node's listener. +type HealthCheckPolicy struct { + _ struct{} `type:"structure"` + + // HealthyThreshold is a required field + HealthyThreshold *int64 `locationName:"healthyThreshold" min:"2" type:"integer" required:"true"` + + // IntervalMillis is a required field + IntervalMillis *int64 `locationName:"intervalMillis" min:"5000" type:"long" required:"true"` + + Path *string `locationName:"path" type:"string"` + + Port *int64 `locationName:"port" min:"1" type:"integer"` + + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"PortProtocol"` + + // TimeoutMillis is a required field + TimeoutMillis *int64 `locationName:"timeoutMillis" min:"2000" type:"long" required:"true"` + + // UnhealthyThreshold is a required field + UnhealthyThreshold *int64 `locationName:"unhealthyThreshold" min:"2" type:"integer" required:"true"` +} + +// String returns the string representation +func (s HealthCheckPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HealthCheckPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HealthCheckPolicy"} + if s.HealthyThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("HealthyThreshold")) + } + if s.HealthyThreshold != nil && *s.HealthyThreshold < 2 { + invalidParams.Add(request.NewErrParamMinValue("HealthyThreshold", 2)) + } + if s.IntervalMillis == nil { + invalidParams.Add(request.NewErrParamRequired("IntervalMillis")) + } + if s.IntervalMillis != nil && *s.IntervalMillis < 5000 { + invalidParams.Add(request.NewErrParamMinValue("IntervalMillis", 5000)) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.TimeoutMillis == nil { + invalidParams.Add(request.NewErrParamRequired("TimeoutMillis")) + } + if s.TimeoutMillis != nil && *s.TimeoutMillis < 2000 { + invalidParams.Add(request.NewErrParamMinValue("TimeoutMillis", 2000)) + } + if s.UnhealthyThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("UnhealthyThreshold")) + } + if s.UnhealthyThreshold != nil && *s.UnhealthyThreshold < 2 { + invalidParams.Add(request.NewErrParamMinValue("UnhealthyThreshold", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHealthyThreshold sets the HealthyThreshold field's value. +func (s *HealthCheckPolicy) SetHealthyThreshold(v int64) *HealthCheckPolicy { + s.HealthyThreshold = &v + return s +} + +// SetIntervalMillis sets the IntervalMillis field's value. +func (s *HealthCheckPolicy) SetIntervalMillis(v int64) *HealthCheckPolicy { + s.IntervalMillis = &v + return s +} + +// SetPath sets the Path field's value. +func (s *HealthCheckPolicy) SetPath(v string) *HealthCheckPolicy { + s.Path = &v + return s +} + +// SetPort sets the Port field's value. +func (s *HealthCheckPolicy) SetPort(v int64) *HealthCheckPolicy { + s.Port = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *HealthCheckPolicy) SetProtocol(v string) *HealthCheckPolicy { + s.Protocol = &v + return s +} + +// SetTimeoutMillis sets the TimeoutMillis field's value. +func (s *HealthCheckPolicy) SetTimeoutMillis(v int64) *HealthCheckPolicy { + s.TimeoutMillis = &v + return s +} + +// SetUnhealthyThreshold sets the UnhealthyThreshold field's value. +func (s *HealthCheckPolicy) SetUnhealthyThreshold(v int64) *HealthCheckPolicy { + s.UnhealthyThreshold = &v + return s +} + +// An object that represents a retry policy. Specify at least one value for +// at least one of the types of RetryEvents, a value for maxRetries, and a value +// for perRetryTimeout. +type HttpRetryPolicy struct { + _ struct{} `type:"structure"` + + HttpRetryEvents []*string `locationName:"httpRetryEvents" min:"1" type:"list"` + + // MaxRetries is a required field + MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` + + // An object that represents a duration of time. + // + // PerRetryTimeout is a required field + PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` + + TcpRetryEvents []*string `locationName:"tcpRetryEvents" min:"1" type:"list"` +} + +// String returns the string representation +func (s HttpRetryPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpRetryPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpRetryPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpRetryPolicy"} + if s.HttpRetryEvents != nil && len(s.HttpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HttpRetryEvents", 1)) + } + if s.MaxRetries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxRetries")) + } + if s.PerRetryTimeout == nil { + invalidParams.Add(request.NewErrParamRequired("PerRetryTimeout")) + } + if s.TcpRetryEvents != nil && len(s.TcpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TcpRetryEvents", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHttpRetryEvents sets the HttpRetryEvents field's value. +func (s *HttpRetryPolicy) SetHttpRetryEvents(v []*string) *HttpRetryPolicy { + s.HttpRetryEvents = v + return s +} + +// SetMaxRetries sets the MaxRetries field's value. +func (s *HttpRetryPolicy) SetMaxRetries(v int64) *HttpRetryPolicy { + s.MaxRetries = &v + return s +} + +// SetPerRetryTimeout sets the PerRetryTimeout field's value. +func (s *HttpRetryPolicy) SetPerRetryTimeout(v *Duration) *HttpRetryPolicy { + s.PerRetryTimeout = v + return s +} + +// SetTcpRetryEvents sets the TcpRetryEvents field's value. +func (s *HttpRetryPolicy) SetTcpRetryEvents(v []*string) *HttpRetryPolicy { + s.TcpRetryEvents = v + return s +} + +// An object that represents an HTTP or HTTP2 route type. +type HttpRoute struct { + _ struct{} `type:"structure"` + + // An object that represents the action to take if a match is determined. + // + // Action is a required field + Action *HttpRouteAction `locationName:"action" type:"structure" required:"true"` + + // An object that represents the requirements for a route to match HTTP requests + // for a virtual router. + // + // Match is a required field + Match *HttpRouteMatch `locationName:"match" type:"structure" required:"true"` + + // An object that represents a retry policy. Specify at least one value for + // at least one of the types of RetryEvents, a value for maxRetries, and a value + // for perRetryTimeout. + RetryPolicy *HttpRetryPolicy `locationName:"retryPolicy" type:"structure"` +} + +// String returns the string representation +func (s HttpRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpRoute) GoString() string { return s.String() } @@ -5097,6 +5765,11 @@ func (s *HttpRoute) Validate() error { invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) } } + if s.RetryPolicy != nil { + if err := s.RetryPolicy.Validate(); err != nil { + invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5116,15 +5789,16 @@ func (s *HttpRoute) SetMatch(v *HttpRouteMatch) *HttpRoute { return s } -// An object representing the traffic distribution requirements for matched -// HTTP requests. +// SetRetryPolicy sets the RetryPolicy field's value. +func (s *HttpRoute) SetRetryPolicy(v *HttpRetryPolicy) *HttpRoute { + s.RetryPolicy = v + return s +} + +// An object that represents the action to take if a match is determined. type HttpRouteAction struct { _ struct{} `type:"structure"` - // The targets that traffic is routed to when a request matches the route. You - // can specify one or more targets and their relative weights to distribute - // traffic with. - // // WeightedTargets is a required field WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` } @@ -5171,19 +5845,82 @@ func (s *HttpRouteAction) SetWeightedTargets(v []*WeightedTarget) *HttpRouteActi return s } -// An object representing the requirements for a route to match HTTP requests +// An object that represents the HTTP header in the request. +type HttpRouteHeader struct { + _ struct{} `type:"structure"` + + Invert *bool `locationName:"invert" type:"boolean"` + + // An object that represents the method and value to match with the header value + // sent in a request. Specify one match method. + Match *HeaderMatchMethod `locationName:"match" type:"structure"` + + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s HttpRouteHeader) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpRouteHeader) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpRouteHeader) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpRouteHeader"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInvert sets the Invert field's value. +func (s *HttpRouteHeader) SetInvert(v bool) *HttpRouteHeader { + s.Invert = &v + return s +} + +// SetMatch sets the Match field's value. +func (s *HttpRouteHeader) SetMatch(v *HeaderMatchMethod) *HttpRouteHeader { + s.Match = v + return s +} + +// SetName sets the Name field's value. +func (s *HttpRouteHeader) SetName(v string) *HttpRouteHeader { + s.Name = &v + return s +} + +// An object that represents the requirements for a route to match HTTP requests // for a virtual router. type HttpRouteMatch struct { _ struct{} `type:"structure"` - // Specifies the path to match requests with. This parameter must always start - // with /, which by itself matches all requests to the virtual service name. - // You can also match for path-based routing of requests. For example, if your - // virtual service name is my-service.local and you want the route to match - // requests to my-service.local/metrics, your prefix should be /metrics. - // + Headers []*HttpRouteHeader `locationName:"headers" min:"1" type:"list"` + + Method *string `locationName:"method" type:"string" enum:"HttpMethod"` + // Prefix is a required field Prefix *string `locationName:"prefix" type:"string" required:"true"` + + Scheme *string `locationName:"scheme" type:"string" enum:"HttpScheme"` } // String returns the string representation @@ -5199,9 +5936,22 @@ func (s HttpRouteMatch) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *HttpRouteMatch) Validate() error { invalidParams := request.ErrInvalidParams{Context: "HttpRouteMatch"} + if s.Headers != nil && len(s.Headers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Headers", 1)) + } if s.Prefix == nil { invalidParams.Add(request.NewErrParamRequired("Prefix")) } + if s.Headers != nil { + for i, v := range s.Headers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Headers", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5209,31 +5959,35 @@ func (s *HttpRouteMatch) Validate() error { return nil } +// SetHeaders sets the Headers field's value. +func (s *HttpRouteMatch) SetHeaders(v []*HttpRouteHeader) *HttpRouteMatch { + s.Headers = v + return s +} + +// SetMethod sets the Method field's value. +func (s *HttpRouteMatch) SetMethod(v string) *HttpRouteMatch { + s.Method = &v + return s +} + // SetPrefix sets the Prefix field's value. func (s *HttpRouteMatch) SetPrefix(v string) *HttpRouteMatch { s.Prefix = &v return s } +// SetScheme sets the Scheme field's value. +func (s *HttpRouteMatch) SetScheme(v string) *HttpRouteMatch { + s.Scheme = &v + return s +} + type ListMeshesInput struct { _ struct{} `type:"structure"` - // The maximum number of results returned by ListMeshes in paginated output. - // When you use this parameter, ListMeshes returns only limit results in a single - // page along with a nextToken response element. You can see the remaining results - // of the initial request by sending another ListMeshes request with the returned - // nextToken value. This value can be between 1 and 100. If you don't use this - // parameter, ListMeshes returns up to 100 results and a nextToken value if - // applicable. Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - // The nextToken value returned from a previous paginated ListMeshes request - // where limit was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. - // - // This token should be treated as an opaque identifier that is used only to - // retrieve the next items in a list and not for other programmatic purposes. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -5275,15 +6029,9 @@ func (s *ListMeshesInput) SetNextToken(v string) *ListMeshesInput { type ListMeshesOutput struct { _ struct{} `type:"structure"` - // The list of existing service meshes. - // // Meshes is a required field Meshes []*MeshRef `locationName:"meshes" type:"list" required:"true"` - // The nextToken value to include in a future ListMeshes request. When the results - // of a ListMeshes request exceed limit, you can use this value to retrieve - // the next page of results. This value is null when there are no more results - // to return. NextToken *string `locationName:"nextToken" type:"string"` } @@ -5312,28 +6060,13 @@ func (s *ListMeshesOutput) SetNextToken(v string) *ListMeshesOutput { type ListRoutesInput struct { _ struct{} `type:"structure"` - // The maximum number of results returned by ListRoutes in paginated output. - // When you use this parameter, ListRoutes returns only limit results in a single - // page along with a nextToken response element. You can see the remaining results - // of the initial request by sending another ListRoutes request with the returned - // nextToken value. This value can be between 1 and 100. If you don't use this - // parameter, ListRoutes returns up to 100 results and a nextToken value if - // applicable. Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - // The name of the service mesh to list routes in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The nextToken value returned from a previous paginated ListRoutes request - // where limit was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - // The name of the virtual router to list routes in. - // // VirtualRouterName is a required field VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -5400,14 +6133,8 @@ func (s *ListRoutesInput) SetVirtualRouterName(v string) *ListRoutesInput { type ListRoutesOutput struct { _ struct{} `type:"structure"` - // The nextToken value to include in a future ListRoutes request. When the results - // of a ListRoutes request exceed limit, you can use this value to retrieve - // the next page of results. This value is null when there are no more results - // to return. NextToken *string `locationName:"nextToken" type:"string"` - // The list of existing routes for the specified service mesh and virtual router. - // // Routes is a required field Routes []*RouteRef `locationName:"routes" type:"list" required:"true"` } @@ -5437,24 +6164,10 @@ func (s *ListRoutesOutput) SetRoutes(v []*RouteRef) *ListRoutesOutput { type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // The maximum number of tag results returned by ListTagsForResource in paginated - // output. When this parameter is used, ListTagsForResource returns only limit - // results in a single page along with a nextToken response element. You can - // see the remaining results of the initial request by sending another ListTagsForResource - // request with the returned nextToken value. This value can be between 1 and - // 100. If you don't use this parameter, ListTagsForResource returns up to 100 - // results and a nextToken value if applicable. Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - // The nextToken value returned from a previous paginated ListTagsForResource - // request where limit was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - // The Amazon Resource Name (ARN) that identifies the resource to list the tags - // for. - // // ResourceArn is a required field ResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string" required:"true"` } @@ -5506,14 +6219,8 @@ func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResource type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // The nextToken value to include in a future ListTagsForResource request. When - // the results of a ListTagsForResource request exceed limit, you can use this - // value to retrieve the next page of results. This value is null when there - // are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // The tags for the resource. - // // Tags is a required field Tags []*TagRef `locationName:"tags" type:"list" required:"true"` } @@ -5543,24 +6250,11 @@ func (s *ListTagsForResourceOutput) SetTags(v []*TagRef) *ListTagsForResourceOut type ListVirtualNodesInput struct { _ struct{} `type:"structure"` - // The maximum number of results returned by ListVirtualNodes in paginated output. - // When you use this parameter, ListVirtualNodes returns only limit results - // in a single page along with a nextToken response element. You can see the - // remaining results of the initial request by sending another ListVirtualNodes - // request with the returned nextToken value. This value can be between 1 and - // 100. If you don't use this parameter, ListVirtualNodes returns up to 100 - // results and a nextToken value if applicable. Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - // The name of the service mesh to list virtual nodes in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The nextToken value returned from a previous paginated ListVirtualNodes request - // where limit was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -5614,14 +6308,8 @@ func (s *ListVirtualNodesInput) SetNextToken(v string) *ListVirtualNodesInput { type ListVirtualNodesOutput struct { _ struct{} `type:"structure"` - // The nextToken value to include in a future ListVirtualNodes request. When - // the results of a ListVirtualNodes request exceed limit, you can use this - // value to retrieve the next page of results. This value is null when there - // are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // The list of existing virtual nodes for the specified service mesh. - // // VirtualNodes is a required field VirtualNodes []*VirtualNodeRef `locationName:"virtualNodes" type:"list" required:"true"` } @@ -5651,24 +6339,11 @@ func (s *ListVirtualNodesOutput) SetVirtualNodes(v []*VirtualNodeRef) *ListVirtu type ListVirtualRoutersInput struct { _ struct{} `type:"structure"` - // The maximum number of results returned by ListVirtualRouters in paginated - // output. When you use this parameter, ListVirtualRouters returns only limit - // results in a single page along with a nextToken response element. You can - // see the remaining results of the initial request by sending another ListVirtualRouters - // request with the returned nextToken value. This value can be between 1 and - // 100. If you don't use this parameter, ListVirtualRouters returns up to 100 - // results and a nextToken value if applicable. Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - // The name of the service mesh to list virtual routers in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The nextToken value returned from a previous paginated ListVirtualRouters - // request where limit was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -5722,14 +6397,8 @@ func (s *ListVirtualRoutersInput) SetNextToken(v string) *ListVirtualRoutersInpu type ListVirtualRoutersOutput struct { _ struct{} `type:"structure"` - // The nextToken value to include in a future ListVirtualRouters request. When - // the results of a ListVirtualRouters request exceed limit, you can use this - // value to retrieve the next page of results. This value is null when there - // are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // The list of existing virtual routers for the specified service mesh. - // // VirtualRouters is a required field VirtualRouters []*VirtualRouterRef `locationName:"virtualRouters" type:"list" required:"true"` } @@ -5759,24 +6428,11 @@ func (s *ListVirtualRoutersOutput) SetVirtualRouters(v []*VirtualRouterRef) *Lis type ListVirtualServicesInput struct { _ struct{} `type:"structure"` - // The maximum number of results returned by ListVirtualServices in paginated - // output. When you use this parameter, ListVirtualServices returns only limit - // results in a single page along with a nextToken response element. You can - // see the remaining results of the initial request by sending another ListVirtualServices - // request with the returned nextToken value. This value can be between 1 and - // 100. If you don't use this parameter, ListVirtualServices returns up to 100 - // results and a nextToken value if applicable. Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - // The name of the service mesh to list virtual services in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The nextToken value returned from a previous paginated ListVirtualServices - // request where limit was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -5830,14 +6486,8 @@ func (s *ListVirtualServicesInput) SetNextToken(v string) *ListVirtualServicesIn type ListVirtualServicesOutput struct { _ struct{} `type:"structure"` - // The nextToken value to include in a future ListVirtualServices request. When - // the results of a ListVirtualServices request exceed limit, you can use this - // value to retrieve the next page of results. This value is null when there - // are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // The list of existing virtual services for the specified service mesh. - // // VirtualServices is a required field VirtualServices []*VirtualServiceRef `locationName:"virtualServices" type:"list" required:"true"` } @@ -5864,14 +6514,14 @@ func (s *ListVirtualServicesOutput) SetVirtualServices(v []*VirtualServiceRef) * return s } -// An object representing a listener for a virtual node. +// An object that represents a listener for a virtual node. type Listener struct { _ struct{} `type:"structure"` - // The health check information for the listener. + // An object that represents the health check policy for a virtual node's listener. HealthCheck *HealthCheckPolicy `locationName:"healthCheck" type:"structure"` - // The port mapping information for the listener. + // An object that represents a port mapping. // // PortMapping is a required field PortMapping *PortMapping `locationName:"portMapping" type:"structure" required:"true"` @@ -5922,11 +6572,11 @@ func (s *Listener) SetPortMapping(v *PortMapping) *Listener { return s } -// An object representing the logging information for a virtual node. +// An object that represents the logging information for a virtual node. type Logging struct { _ struct{} `type:"structure"` - // The access log configuration for a virtual node. + // An object that represents the access logging information for a virtual node. AccessLog *AccessLog `locationName:"accessLog" type:"structure"` } @@ -5961,26 +6611,76 @@ func (s *Logging) SetAccessLog(v *AccessLog) *Logging { return s } -// An object representing a service mesh returned by a describe operation. +// An object that represents the range of values to match on. The first character +// of the range is included in the range, though the last character is not. +// For example, if the range specified were 1-100, only values 1-99 would be +// matched. +type MatchRange struct { + _ struct{} `type:"structure"` + + // End is a required field + End *int64 `locationName:"end" type:"long" required:"true"` + + // Start is a required field + Start *int64 `locationName:"start" type:"long" required:"true"` +} + +// String returns the string representation +func (s MatchRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MatchRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MatchRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MatchRange"} + if s.End == nil { + invalidParams.Add(request.NewErrParamRequired("End")) + } + if s.Start == nil { + invalidParams.Add(request.NewErrParamRequired("Start")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnd sets the End field's value. +func (s *MatchRange) SetEnd(v int64) *MatchRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *MatchRange) SetStart(v int64) *MatchRange { + s.Start = &v + return s +} + +// An object that represents a service mesh returned by a describe operation. type MeshData struct { _ struct{} `type:"structure"` - // The name of the service mesh. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // The associated metadata for the service mesh. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // The associated specification for the service mesh. + // An object that represents the specification of a service mesh. // // Spec is a required field Spec *MeshSpec `locationName:"spec" type:"structure" required:"true"` - // The status of the service mesh. + // An object that represents the status of a service mesh. // // Status is a required field Status *MeshStatus `locationName:"status" type:"structure" required:"true"` @@ -6020,17 +6720,13 @@ func (s *MeshData) SetStatus(v *MeshStatus) *MeshData { return s } -// An object representing a service mesh returned by a list operation. +// An object that represents a service mesh returned by a list operation. type MeshRef struct { _ struct{} `type:"structure"` - // The full Amazon Resource Name (ARN) of the service mesh. - // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` - // The name of the service mesh. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` } @@ -6057,11 +6753,11 @@ func (s *MeshRef) SetMeshName(v string) *MeshRef { return s } -// An object representing the specification of a service mesh. +// An object that represents the specification of a service mesh. type MeshSpec struct { _ struct{} `type:"structure"` - // The egress filter rules for the service mesh. + // An object that represents the egress filter rules for a service mesh. EgressFilter *EgressFilter `locationName:"egressFilter" type:"structure"` } @@ -6096,11 +6792,10 @@ func (s *MeshSpec) SetEgressFilter(v *EgressFilter) *MeshSpec { return s } -// An object representing the status of a service mesh. +// An object that represents the status of a service mesh. type MeshStatus struct { _ struct{} `type:"structure"` - // The current mesh status. Status *string `locationName:"status" type:"string" enum:"MeshStatusCode"` } @@ -6120,17 +6815,13 @@ func (s *MeshStatus) SetStatus(v string) *MeshStatus { return s } -// An object representing a virtual node or virtual router listener port mapping. +// An object that represents a port mapping. type PortMapping struct { _ struct{} `type:"structure"` - // The port used for the port mapping. - // // Port is a required field Port *int64 `locationName:"port" min:"1" type:"integer" required:"true"` - // The protocol used for the port mapping. - // // Protocol is a required field Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"PortProtocol"` } @@ -6176,33 +6867,22 @@ func (s *PortMapping) SetProtocol(v string) *PortMapping { return s } -// An object representing metadata for a resource. +// An object that represents metadata for a resource. type ResourceMetadata struct { _ struct{} `type:"structure"` - // The full Amazon Resource Name (ARN) for the resource. - // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` - // The Unix epoch timestamp in seconds for when the resource was created. - // // CreatedAt is a required field CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` - // The Unix epoch timestamp in seconds for when the resource was last updated. - // // LastUpdatedAt is a required field LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` - // The unique identifier for the resource. - // // Uid is a required field Uid *string `locationName:"uid" type:"string" required:"true"` - // The version of the resource. Resources are created at version 1, and this - // version is incremented each time that they're updated. - // // Version is a required field Version *int64 `locationName:"version" type:"long" required:"true"` } @@ -6247,37 +6927,31 @@ func (s *ResourceMetadata) SetVersion(v int64) *ResourceMetadata { return s } -// An object representing a route returned by a describe operation. +// An object that represents a route returned by a describe operation. type RouteData struct { _ struct{} `type:"structure"` - // The name of the service mesh that the route resides in. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // The associated metadata for the route. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // The name of the route. - // // RouteName is a required field RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` - // The specifications of the route. + // An object that represents a route specification. Specify one route type. // // Spec is a required field Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` - // The status of the route. + // An object that represents the current status of a route. // // Status is a required field Status *RouteStatus `locationName:"status" type:"structure" required:"true"` - // The virtual router that the route is associated with. - // // VirtualRouterName is a required field VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -6328,27 +7002,19 @@ func (s *RouteData) SetVirtualRouterName(v string) *RouteData { return s } -// An object representing a route returned by a list operation. +// An object that represents a route returned by a list operation. type RouteRef struct { _ struct{} `type:"structure"` - // The full Amazon Resource Name (ARN) for the route. - // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` - // The name of the service mesh that the route resides in. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the route. - // // RouteName is a required field RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` - // The virtual router that the route is associated with. - // // VirtualRouterName is a required field VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -6387,14 +7053,22 @@ func (s *RouteRef) SetVirtualRouterName(v string) *RouteRef { return s } -// An object representing the specification of a route. +// An object that represents a route specification. Specify one route type. type RouteSpec struct { _ struct{} `type:"structure"` - // The HTTP routing information for the route. + // An object that represents a GRPC route type. + GrpcRoute *GrpcRoute `locationName:"grpcRoute" type:"structure"` + + // An object that represents an HTTP or HTTP2 route type. + Http2Route *HttpRoute `locationName:"http2Route" type:"structure"` + + // An object that represents an HTTP or HTTP2 route type. HttpRoute *HttpRoute `locationName:"httpRoute" type:"structure"` - // The TCP routing information for the route. + Priority *int64 `locationName:"priority" type:"integer"` + + // An object that represents a TCP route type. TcpRoute *TcpRoute `locationName:"tcpRoute" type:"structure"` } @@ -6411,6 +7085,16 @@ func (s RouteSpec) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RouteSpec) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RouteSpec"} + if s.GrpcRoute != nil { + if err := s.GrpcRoute.Validate(); err != nil { + invalidParams.AddNested("GrpcRoute", err.(request.ErrInvalidParams)) + } + } + if s.Http2Route != nil { + if err := s.Http2Route.Validate(); err != nil { + invalidParams.AddNested("Http2Route", err.(request.ErrInvalidParams)) + } + } if s.HttpRoute != nil { if err := s.HttpRoute.Validate(); err != nil { invalidParams.AddNested("HttpRoute", err.(request.ErrInvalidParams)) @@ -6428,24 +7112,40 @@ func (s *RouteSpec) Validate() error { return nil } +// SetGrpcRoute sets the GrpcRoute field's value. +func (s *RouteSpec) SetGrpcRoute(v *GrpcRoute) *RouteSpec { + s.GrpcRoute = v + return s +} + +// SetHttp2Route sets the Http2Route field's value. +func (s *RouteSpec) SetHttp2Route(v *HttpRoute) *RouteSpec { + s.Http2Route = v + return s +} + // SetHttpRoute sets the HttpRoute field's value. func (s *RouteSpec) SetHttpRoute(v *HttpRoute) *RouteSpec { s.HttpRoute = v return s } +// SetPriority sets the Priority field's value. +func (s *RouteSpec) SetPriority(v int64) *RouteSpec { + s.Priority = &v + return s +} + // SetTcpRoute sets the TcpRoute field's value. func (s *RouteSpec) SetTcpRoute(v *TcpRoute) *RouteSpec { s.TcpRoute = v return s } -// An object representing the current status of a route. +// An object that represents the current status of a route. type RouteStatus struct { _ struct{} `type:"structure"` - // The current status for the route. - // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"RouteStatusCode"` } @@ -6466,11 +7166,17 @@ func (s *RouteStatus) SetStatus(v string) *RouteStatus { return s } -// An object representing the service discovery information for a virtual node. +// An object that represents the service discovery information for a virtual +// node. type ServiceDiscovery struct { _ struct{} `type:"structure"` - // Specifies the DNS information for the virtual node. + // An object that represents the AWS Cloud Map service discovery information + // for your virtual node. + AwsCloudMap *AwsCloudMapServiceDiscovery `locationName:"awsCloudMap" type:"structure"` + + // An object that represents the DNS service discovery information for your + // virtual node. Dns *DnsServiceDiscovery `locationName:"dns" type:"structure"` } @@ -6487,6 +7193,11 @@ func (s ServiceDiscovery) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ServiceDiscovery) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ServiceDiscovery"} + if s.AwsCloudMap != nil { + if err := s.AwsCloudMap.Validate(); err != nil { + invalidParams.AddNested("AwsCloudMap", err.(request.ErrInvalidParams)) + } + } if s.Dns != nil { if err := s.Dns.Validate(); err != nil { invalidParams.AddNested("Dns", err.(request.ErrInvalidParams)) @@ -6499,6 +7210,12 @@ func (s *ServiceDiscovery) Validate() error { return nil } +// SetAwsCloudMap sets the AwsCloudMap field's value. +func (s *ServiceDiscovery) SetAwsCloudMap(v *AwsCloudMapServiceDiscovery) *ServiceDiscovery { + s.AwsCloudMap = v + return s +} + // SetDns sets the Dns field's value. func (s *ServiceDiscovery) SetDns(v *DnsServiceDiscovery) *ServiceDiscovery { s.Dns = v @@ -6512,14 +7229,9 @@ func (s *ServiceDiscovery) SetDns(v *DnsServiceDiscovery) *ServiceDiscovery { type TagRef struct { _ struct{} `type:"structure"` - // One part of a key-value pair that make up a tag. A key is a general label - // that acts like a category for more specific tag values. - // // Key is a required field Key *string `locationName:"key" min:"1" type:"string" required:"true"` - // The optional part of a key-value pair that make up a tag. A value acts as - // a descriptor within a tag category (key). Value *string `locationName:"value" type:"string"` } @@ -6564,15 +7276,9 @@ func (s *TagRef) SetValue(v string) *TagRef { type TagResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource to add tags to. - // // ResourceArn is a required field ResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string" required:"true"` - // The tags to add to the resource. A tag is an array of key-value pairs. Tag - // keys can have a maximum character length of 128 characters, and tag values - // can have a maximum length of 256 characters. - // // Tags is a required field Tags []*TagRef `locationName:"tags" type:"list" required:"true"` } @@ -6639,11 +7345,11 @@ func (s TagResourceOutput) GoString() string { return s.String() } -// An object representing the TCP routing specification for a route. +// An object that represents a TCP route type. type TcpRoute struct { _ struct{} `type:"structure"` - // The action to take if a match is determined. + // An object that represents the action to take if a match is determined. // // Action is a required field Action *TcpRouteAction `locationName:"action" type:"structure" required:"true"` @@ -6683,15 +7389,10 @@ func (s *TcpRoute) SetAction(v *TcpRouteAction) *TcpRoute { return s } -// An object representing the traffic distribution requirements for matched -// TCP requests. +// An object that represents the action to take if a match is determined. type TcpRouteAction struct { _ struct{} `type:"structure"` - // The targets that traffic is routed to when a request matches the route. You - // can specify one or more targets and their relative weights to distribute - // traffic with. - // // WeightedTargets is a required field WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` } @@ -6741,13 +7442,9 @@ func (s *TcpRouteAction) SetWeightedTargets(v []*WeightedTarget) *TcpRouteAction type UntagResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource to delete tags from. - // // ResourceArn is a required field ResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string" required:"true"` - // The keys of the tags to be removed. - // // TagKeys is a required field TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` } @@ -6807,16 +7504,12 @@ func (s UntagResourceOutput) GoString() string { type UpdateMeshInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name of the service mesh to update. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The service mesh specification to apply. + // An object that represents the specification of a service mesh. Spec *MeshSpec `locationName:"spec" type:"structure"` } @@ -6872,7 +7565,7 @@ func (s *UpdateMeshInput) SetSpec(v *MeshSpec) *UpdateMeshInput { type UpdateMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // An object representing a service mesh returned by a describe operation. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -6897,27 +7590,19 @@ func (s *UpdateMeshOutput) SetMesh(v *MeshData) *UpdateMeshOutput { type UpdateRouteInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name of the service mesh that the route resides in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the route to update. - // // RouteName is a required field RouteName *string `location:"uri" locationName:"routeName" min:"1" type:"string" required:"true"` - // The new route specification to apply. This overwrites the existing data. + // An object that represents a route specification. Specify one route type. // // Spec is a required field Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` - // The name of the virtual router that the route is associated with. - // // VirtualRouterName is a required field VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -7001,7 +7686,7 @@ func (s *UpdateRouteInput) SetVirtualRouterName(v string) *UpdateRouteInput { type UpdateRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // A full description of the route that was updated. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -7026,23 +7711,16 @@ func (s *UpdateRouteOutput) SetRoute(v *RouteData) *UpdateRouteOutput { type UpdateVirtualNodeInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name of the service mesh that the virtual node resides in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The new virtual node specification to apply. This overwrites the existing - // data. + // An object that represents the specification of a virtual node. // // Spec is a required field Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` - // The name of the virtual node to update. - // // VirtualNodeName is a required field VirtualNodeName *string `location:"uri" locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } @@ -7114,7 +7792,7 @@ func (s *UpdateVirtualNodeInput) SetVirtualNodeName(v string) *UpdateVirtualNode type UpdateVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // A full description of the virtual node that was updated. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -7139,23 +7817,16 @@ func (s *UpdateVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *UpdateVirt type UpdateVirtualRouterInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name of the service mesh that the virtual router resides in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The new virtual router specification to apply. This overwrites the existing - // data. + // An object that represents the specification of a virtual router. // // Spec is a required field Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` - // The name of the virtual router to update. - // // VirtualRouterName is a required field VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -7227,7 +7898,7 @@ func (s *UpdateVirtualRouterInput) SetVirtualRouterName(v string) *UpdateVirtual type UpdateVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // A full description of the virtual router that was updated. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -7252,23 +7923,16 @@ func (s *UpdateVirtualRouterOutput) SetVirtualRouter(v *VirtualRouterData) *Upda type UpdateVirtualServiceInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of therequest. Up to 36 letters, numbers, hyphens, and underscores are allowed. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The name of the service mesh that the virtual service resides in. - // // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // The new virtual service specification to apply. This overwrites the existing - // data. + // An object that represents the specification of a virtual service. // // Spec is a required field Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` - // The name of the virtual service to update. - // // VirtualServiceName is a required field VirtualServiceName *string `location:"uri" locationName:"virtualServiceName" type:"string" required:"true"` } @@ -7340,7 +8004,7 @@ func (s *UpdateVirtualServiceInput) SetVirtualServiceName(v string) *UpdateVirtu type UpdateVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // A full description of the virtual service that was updated. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -7362,32 +8026,28 @@ func (s *UpdateVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) *U return s } -// An object representing a virtual node returned by a describe operation. +// An object that represents a virtual node returned by a describe operation. type VirtualNodeData struct { _ struct{} `type:"structure"` - // The name of the service mesh that the virtual node resides in. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // The associated metadata for the virtual node. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // The specifications of the virtual node. + // An object that represents the specification of a virtual node. // // Spec is a required field Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` - // The current status for the virtual node. + // An object that represents the current status of the virtual node. // // Status is a required field Status *VirtualNodeStatus `locationName:"status" type:"structure" required:"true"` - // The name of the virtual node. - // // VirtualNodeName is a required field VirtualNodeName *string `locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } @@ -7432,22 +8092,16 @@ func (s *VirtualNodeData) SetVirtualNodeName(v string) *VirtualNodeData { return s } -// An object representing a virtual node returned by a list operation. +// An object that represents a virtual node returned by a list operation. type VirtualNodeRef struct { _ struct{} `type:"structure"` - // The full Amazon Resource Name (ARN) for the virtual node. - // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` - // The name of the service mesh that the virtual node resides in. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the virtual node. - // // VirtualNodeName is a required field VirtualNodeName *string `locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } @@ -7480,12 +8134,10 @@ func (s *VirtualNodeRef) SetVirtualNodeName(v string) *VirtualNodeRef { return s } -// An object representing a virtual node service provider. +// An object that represents a virtual node service provider. type VirtualNodeServiceProvider struct { _ struct{} `type:"structure"` - // The name of the virtual node that is acting as a service provider. - // // VirtualNodeName is a required field VirtualNodeName *string `locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } @@ -7522,22 +8174,19 @@ func (s *VirtualNodeServiceProvider) SetVirtualNodeName(v string) *VirtualNodeSe return s } -// An object representing the specification of a virtual node. +// An object that represents the specification of a virtual node. type VirtualNodeSpec struct { _ struct{} `type:"structure"` - // The backends that the virtual node is expected to send outbound traffic to. Backends []*Backend `locationName:"backends" type:"list"` - // The listeners that the virtual node is expected to receive inbound traffic - // from. Currently only one listener is supported per virtual node. Listeners []*Listener `locationName:"listeners" type:"list"` - // The inbound and outbound access logging information for the virtual node. + // An object that represents the logging information for a virtual node. Logging *Logging `locationName:"logging" type:"structure"` - // The service discovery information for the virtual node. If your virtual node - // does not expect ingress traffic, you can omit this parameter. + // An object that represents the service discovery information for a virtual + // node. ServiceDiscovery *ServiceDiscovery `locationName:"serviceDiscovery" type:"structure"` } @@ -7615,12 +8264,10 @@ func (s *VirtualNodeSpec) SetServiceDiscovery(v *ServiceDiscovery) *VirtualNodeS return s } -// An object representing the current status of the virtual node. +// An object that represents the current status of the virtual node. type VirtualNodeStatus struct { _ struct{} `type:"structure"` - // The current status of the virtual node. - // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"VirtualNodeStatusCode"` } @@ -7641,32 +8288,28 @@ func (s *VirtualNodeStatus) SetStatus(v string) *VirtualNodeStatus { return s } -// An object representing a virtual router returned by a describe operation. +// An object that represents a virtual router returned by a describe operation. type VirtualRouterData struct { _ struct{} `type:"structure"` - // The name of the service mesh that the virtual router resides in. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // The associated metadata for the virtual router. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // The specifications of the virtual router. + // An object that represents the specification of a virtual router. // // Spec is a required field Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` - // The current status of the virtual router. + // An object that represents the status of a virtual router. // // Status is a required field Status *VirtualRouterStatus `locationName:"status" type:"structure" required:"true"` - // The name of the virtual router. - // // VirtualRouterName is a required field VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -7711,11 +8354,11 @@ func (s *VirtualRouterData) SetVirtualRouterName(v string) *VirtualRouterData { return s } -// An object representing a virtual router listener. +// An object that represents a virtual router listener. type VirtualRouterListener struct { _ struct{} `type:"structure"` - // An object representing a virtual node or virtual router listener port mapping. + // An object that represents a port mapping. // // PortMapping is a required field PortMapping *PortMapping `locationName:"portMapping" type:"structure" required:"true"` @@ -7755,22 +8398,16 @@ func (s *VirtualRouterListener) SetPortMapping(v *PortMapping) *VirtualRouterLis return s } -// An object representing a virtual router returned by a list operation. +// An object that represents a virtual router returned by a list operation. type VirtualRouterRef struct { _ struct{} `type:"structure"` - // The full Amazon Resource Name (ARN) for the virtual router. - // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` - // The name of the service mesh that the virtual router resides in. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the virtual router. - // // VirtualRouterName is a required field VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -7803,12 +8440,10 @@ func (s *VirtualRouterRef) SetVirtualRouterName(v string) *VirtualRouterRef { return s } -// An object representing a virtual node service provider. +// An object that represents a virtual node service provider. type VirtualRouterServiceProvider struct { _ struct{} `type:"structure"` - // The name of the virtual router that is acting as a service provider. - // // VirtualRouterName is a required field VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -7845,15 +8480,11 @@ func (s *VirtualRouterServiceProvider) SetVirtualRouterName(v string) *VirtualRo return s } -// An object representing the specification of a virtual router. +// An object that represents the specification of a virtual router. type VirtualRouterSpec struct { _ struct{} `type:"structure"` - // The listeners that the virtual router is expected to receive inbound traffic - // from. Currently only one listener is supported per virtual router. - // - // Listeners is a required field - Listeners []*VirtualRouterListener `locationName:"listeners" min:"1" type:"list" required:"true"` + Listeners []*VirtualRouterListener `locationName:"listeners" min:"1" type:"list"` } // String returns the string representation @@ -7869,9 +8500,6 @@ func (s VirtualRouterSpec) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *VirtualRouterSpec) Validate() error { invalidParams := request.ErrInvalidParams{Context: "VirtualRouterSpec"} - if s.Listeners == nil { - invalidParams.Add(request.NewErrParamRequired("Listeners")) - } if s.Listeners != nil && len(s.Listeners) < 1 { invalidParams.Add(request.NewErrParamMinLen("Listeners", 1)) } @@ -7898,12 +8526,10 @@ func (s *VirtualRouterSpec) SetListeners(v []*VirtualRouterListener) *VirtualRou return s } -// An object representing the status of a virtual router. +// An object that represents the status of a virtual router. type VirtualRouterStatus struct { _ struct{} `type:"structure"` - // The current status of the virtual router. - // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"VirtualRouterStatusCode"` } @@ -7924,12 +8550,10 @@ func (s *VirtualRouterStatus) SetStatus(v string) *VirtualRouterStatus { return s } -// An object representing a virtual service backend for a virtual node. +// An object that represents a virtual service backend for a virtual node. type VirtualServiceBackend struct { _ struct{} `type:"structure"` - // The name of the virtual service that is acting as a virtual node backend. - // // VirtualServiceName is a required field VirtualServiceName *string `locationName:"virtualServiceName" type:"string" required:"true"` } @@ -7963,32 +8587,28 @@ func (s *VirtualServiceBackend) SetVirtualServiceName(v string) *VirtualServiceB return s } -// An object representing a virtual service returned by a describe operation. +// An object that represents a virtual service returned by a describe operation. type VirtualServiceData struct { _ struct{} `type:"structure"` - // The name of the service mesh that the virtual service resides in. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // The specifications of the virtual service. + // An object that represents the specification of a virtual service. // // Spec is a required field Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` - // The current status of the virtual service. + // An object that represents the status of a virtual service. // // Status is a required field Status *VirtualServiceStatus `locationName:"status" type:"structure" required:"true"` - // The name of the virtual service. - // // VirtualServiceName is a required field VirtualServiceName *string `locationName:"virtualServiceName" type:"string" required:"true"` } @@ -8033,14 +8653,14 @@ func (s *VirtualServiceData) SetVirtualServiceName(v string) *VirtualServiceData return s } -// An object representing the provider for a virtual service. +// An object that represents the provider for a virtual service. type VirtualServiceProvider struct { _ struct{} `type:"structure"` - // The virtual node associated with a virtual service. + // An object that represents a virtual node service provider. VirtualNode *VirtualNodeServiceProvider `locationName:"virtualNode" type:"structure"` - // The virtual router associated with a virtual service. + // An object that represents a virtual node service provider. VirtualRouter *VirtualRouterServiceProvider `locationName:"virtualRouter" type:"structure"` } @@ -8086,22 +8706,16 @@ func (s *VirtualServiceProvider) SetVirtualRouter(v *VirtualRouterServiceProvide return s } -// An object representing a virtual service returned by a list operation. +// An object that represents a virtual service returned by a list operation. type VirtualServiceRef struct { _ struct{} `type:"structure"` - // The full Amazon Resource Name (ARN) for the virtual service. - // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` - // The name of the service mesh that the virtual service resides in. - // // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // The name of the virtual service. - // // VirtualServiceName is a required field VirtualServiceName *string `locationName:"virtualServiceName" type:"string" required:"true"` } @@ -8134,12 +8748,11 @@ func (s *VirtualServiceRef) SetVirtualServiceName(v string) *VirtualServiceRef { return s } -// An object representing the specification of a virtual service. +// An object that represents the specification of a virtual service. type VirtualServiceSpec struct { _ struct{} `type:"structure"` - // The App Mesh object that is acting as the provider for a virtual service. - // You can specify a single virtual node or virtual router. + // An object that represents the provider for a virtual service. Provider *VirtualServiceProvider `locationName:"provider" type:"structure"` } @@ -8174,12 +8787,10 @@ func (s *VirtualServiceSpec) SetProvider(v *VirtualServiceProvider) *VirtualServ return s } -// An object representing the status of a virtual service. +// An object that represents the status of a virtual service. type VirtualServiceStatus struct { _ struct{} `type:"structure"` - // The current status of the virtual service. - // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"VirtualServiceStatusCode"` } @@ -8200,20 +8811,17 @@ func (s *VirtualServiceStatus) SetStatus(v string) *VirtualServiceStatus { return s } -// An object representing a target and its relative weight. Traffic is distributed +// An object that represents a target and its relative weight. Traffic is distributed // across targets according to their relative weight. For example, a weighted // target with a relative weight of 50 receives five times as much traffic as -// one with a relative weight of 10. +// one with a relative weight of 10. The total weight for all targets combined +// must be less than or equal to 100. type WeightedTarget struct { _ struct{} `type:"structure"` - // The virtual node to associate with the weighted target. - // // VirtualNode is a required field VirtualNode *string `locationName:"virtualNode" min:"1" type:"string" required:"true"` - // The relative weight of the weighted target. - // // Weight is a required field Weight *int64 `locationName:"weight" type:"integer" required:"true"` } @@ -8259,6 +8867,14 @@ func (s *WeightedTarget) SetWeight(v int64) *WeightedTarget { return s } +const ( + // DurationUnitMs is a DurationUnit enum value + DurationUnitMs = "ms" + + // DurationUnitS is a DurationUnit enum value + DurationUnitS = "s" +) + const ( // EgressFilterTypeAllowAll is a EgressFilterType enum value EgressFilterTypeAllowAll = "ALLOW_ALL" @@ -8267,6 +8883,60 @@ const ( EgressFilterTypeDropAll = "DROP_ALL" ) +const ( + // GrpcRetryPolicyEventCancelled is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventCancelled = "cancelled" + + // GrpcRetryPolicyEventDeadlineExceeded is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventDeadlineExceeded = "deadline-exceeded" + + // GrpcRetryPolicyEventInternal is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventInternal = "internal" + + // GrpcRetryPolicyEventResourceExhausted is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventResourceExhausted = "resource-exhausted" + + // GrpcRetryPolicyEventUnavailable is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventUnavailable = "unavailable" +) + +const ( + // HttpMethodConnect is a HttpMethod enum value + HttpMethodConnect = "CONNECT" + + // HttpMethodDelete is a HttpMethod enum value + HttpMethodDelete = "DELETE" + + // HttpMethodGet is a HttpMethod enum value + HttpMethodGet = "GET" + + // HttpMethodHead is a HttpMethod enum value + HttpMethodHead = "HEAD" + + // HttpMethodOptions is a HttpMethod enum value + HttpMethodOptions = "OPTIONS" + + // HttpMethodPatch is a HttpMethod enum value + HttpMethodPatch = "PATCH" + + // HttpMethodPost is a HttpMethod enum value + HttpMethodPost = "POST" + + // HttpMethodPut is a HttpMethod enum value + HttpMethodPut = "PUT" + + // HttpMethodTrace is a HttpMethod enum value + HttpMethodTrace = "TRACE" +) + +const ( + // HttpSchemeHttp is a HttpScheme enum value + HttpSchemeHttp = "http" + + // HttpSchemeHttps is a HttpScheme enum value + HttpSchemeHttps = "https" +) + const ( // MeshStatusCodeActive is a MeshStatusCode enum value MeshStatusCodeActive = "ACTIVE" @@ -8279,9 +8949,15 @@ const ( ) const ( + // PortProtocolGrpc is a PortProtocol enum value + PortProtocolGrpc = "grpc" + // PortProtocolHttp is a PortProtocol enum value PortProtocolHttp = "http" + // PortProtocolHttp2 is a PortProtocol enum value + PortProtocolHttp2 = "http2" + // PortProtocolTcp is a PortProtocol enum value PortProtocolTcp = "tcp" ) @@ -8297,6 +8973,11 @@ const ( RouteStatusCodeInactive = "INACTIVE" ) +const ( + // TcpRetryPolicyEventConnectionError is a TcpRetryPolicyEvent enum value + TcpRetryPolicyEventConnectionError = "connection-error" +) + const ( // VirtualNodeStatusCodeActive is a VirtualNodeStatusCode enum value VirtualNodeStatusCodeActive = "ACTIVE" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/doc.go index c855b698568..09a02953834 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/doc.go @@ -10,13 +10,11 @@ // // App Mesh gives you consistent visibility and network traffic controls for // every microservice in an application. You can use App Mesh with AWS Fargate, -// Amazon ECS, Amazon EKS, and Kubernetes on AWS. +// Amazon ECS, Amazon EKS, Kubernetes on AWS, and Amazon EC2. // // App Mesh supports microservice applications that use service discovery naming -// for their components. To use App Mesh, you must have an application running -// on Amazon EC2 instances, hosted in either Amazon ECS, Amazon EKS, or Kubernetes -// on AWS. For more information about service discovery on Amazon ECS, see Service -// Discovery (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) +// for their components. For more information about service discovery on Amazon +// ECS, see Service Discovery (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) // in the Amazon Elastic Container Service Developer Guide. Kubernetes kube-dns // and coredns are supported. For more information, see DNS for Services and // Pods (https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go index 88ea209b40e..962ada5cea9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppMesh { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "appmesh" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppMesh { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AppMesh { svc := &AppMesh{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2019-01-25", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go new file mode 100644 index 00000000000..933088a1b45 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go @@ -0,0 +1,11393 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package appstream + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAssociateFleet = "AssociateFleet" + +// AssociateFleetRequest generates a "aws/request.Request" representing the +// client's request for the AssociateFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateFleet for more information on using the AssociateFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateFleetRequest method. +// req, resp := client.AssociateFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/AssociateFleet +func (c *AppStream) AssociateFleetRequest(input *AssociateFleetInput) (req *request.Request, output *AssociateFleetOutput) { + op := &request.Operation{ + Name: opAssociateFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateFleetInput{} + } + + output = &AssociateFleetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AssociateFleet API operation for Amazon AppStream. +// +// Associates the specified fleet with the specified stack. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation AssociateFleet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// * ErrCodeIncompatibleImageException "IncompatibleImageException" +// The image does not support storage connectors. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/AssociateFleet +func (c *AppStream) AssociateFleet(input *AssociateFleetInput) (*AssociateFleetOutput, error) { + req, out := c.AssociateFleetRequest(input) + return out, req.Send() +} + +// AssociateFleetWithContext is the same as AssociateFleet with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) AssociateFleetWithContext(ctx aws.Context, input *AssociateFleetInput, opts ...request.Option) (*AssociateFleetOutput, error) { + req, out := c.AssociateFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchAssociateUserStack = "BatchAssociateUserStack" + +// BatchAssociateUserStackRequest generates a "aws/request.Request" representing the +// client's request for the BatchAssociateUserStack operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchAssociateUserStack for more information on using the BatchAssociateUserStack +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchAssociateUserStackRequest method. +// req, resp := client.BatchAssociateUserStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/BatchAssociateUserStack +func (c *AppStream) BatchAssociateUserStackRequest(input *BatchAssociateUserStackInput) (req *request.Request, output *BatchAssociateUserStackOutput) { + op := &request.Operation{ + Name: opBatchAssociateUserStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchAssociateUserStackInput{} + } + + output = &BatchAssociateUserStackOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchAssociateUserStack API operation for Amazon AppStream. +// +// Associates the specified users with the specified stacks. Users in a user +// pool cannot be assigned to stacks with fleets that are joined to an Active +// Directory domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation BatchAssociateUserStack for usage and error information. +// +// Returned Error Codes: +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/BatchAssociateUserStack +func (c *AppStream) BatchAssociateUserStack(input *BatchAssociateUserStackInput) (*BatchAssociateUserStackOutput, error) { + req, out := c.BatchAssociateUserStackRequest(input) + return out, req.Send() +} + +// BatchAssociateUserStackWithContext is the same as BatchAssociateUserStack with the addition of +// the ability to pass a context and additional request options. +// +// See BatchAssociateUserStack for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) BatchAssociateUserStackWithContext(ctx aws.Context, input *BatchAssociateUserStackInput, opts ...request.Option) (*BatchAssociateUserStackOutput, error) { + req, out := c.BatchAssociateUserStackRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchDisassociateUserStack = "BatchDisassociateUserStack" + +// BatchDisassociateUserStackRequest generates a "aws/request.Request" representing the +// client's request for the BatchDisassociateUserStack operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchDisassociateUserStack for more information on using the BatchDisassociateUserStack +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchDisassociateUserStackRequest method. +// req, resp := client.BatchDisassociateUserStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/BatchDisassociateUserStack +func (c *AppStream) BatchDisassociateUserStackRequest(input *BatchDisassociateUserStackInput) (req *request.Request, output *BatchDisassociateUserStackOutput) { + op := &request.Operation{ + Name: opBatchDisassociateUserStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDisassociateUserStackInput{} + } + + output = &BatchDisassociateUserStackOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchDisassociateUserStack API operation for Amazon AppStream. +// +// Disassociates the specified users from the specified stacks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation BatchDisassociateUserStack for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/BatchDisassociateUserStack +func (c *AppStream) BatchDisassociateUserStack(input *BatchDisassociateUserStackInput) (*BatchDisassociateUserStackOutput, error) { + req, out := c.BatchDisassociateUserStackRequest(input) + return out, req.Send() +} + +// BatchDisassociateUserStackWithContext is the same as BatchDisassociateUserStack with the addition of +// the ability to pass a context and additional request options. +// +// See BatchDisassociateUserStack for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) BatchDisassociateUserStackWithContext(ctx aws.Context, input *BatchDisassociateUserStackInput, opts ...request.Option) (*BatchDisassociateUserStackOutput, error) { + req, out := c.BatchDisassociateUserStackRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyImage = "CopyImage" + +// CopyImageRequest generates a "aws/request.Request" representing the +// client's request for the CopyImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyImage for more information on using the CopyImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyImageRequest method. +// req, resp := client.CopyImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CopyImage +func (c *AppStream) CopyImageRequest(input *CopyImageInput) (req *request.Request, output *CopyImageOutput) { + op := &request.Operation{ + Name: opCopyImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyImageInput{} + } + + output = &CopyImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyImage API operation for Amazon AppStream. +// +// Copies the image within the same region or to a new region within the same +// AWS account. Note that any tags you added to the image will not be copied. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CopyImage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeResourceNotAvailableException "ResourceNotAvailableException" +// The specified resource exists and is not in use, but isn't available. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeIncompatibleImageException "IncompatibleImageException" +// The image does not support storage connectors. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CopyImage +func (c *AppStream) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) { + req, out := c.CopyImageRequest(input) + return out, req.Send() +} + +// CopyImageWithContext is the same as CopyImage with the addition of +// the ability to pass a context and additional request options. +// +// See CopyImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CopyImageWithContext(ctx aws.Context, input *CopyImageInput, opts ...request.Option) (*CopyImageOutput, error) { + req, out := c.CopyImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDirectoryConfig = "CreateDirectoryConfig" + +// CreateDirectoryConfigRequest generates a "aws/request.Request" representing the +// client's request for the CreateDirectoryConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDirectoryConfig for more information on using the CreateDirectoryConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDirectoryConfigRequest method. +// req, resp := client.CreateDirectoryConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateDirectoryConfig +func (c *AppStream) CreateDirectoryConfigRequest(input *CreateDirectoryConfigInput) (req *request.Request, output *CreateDirectoryConfigOutput) { + op := &request.Operation{ + Name: opCreateDirectoryConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDirectoryConfigInput{} + } + + output = &CreateDirectoryConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDirectoryConfig API operation for Amazon AppStream. +// +// Creates a Directory Config object in AppStream 2.0. This object includes +// the configuration information required to join fleets and image builders +// to Microsoft Active Directory domains. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateDirectoryConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateDirectoryConfig +func (c *AppStream) CreateDirectoryConfig(input *CreateDirectoryConfigInput) (*CreateDirectoryConfigOutput, error) { + req, out := c.CreateDirectoryConfigRequest(input) + return out, req.Send() +} + +// CreateDirectoryConfigWithContext is the same as CreateDirectoryConfig with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDirectoryConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateDirectoryConfigWithContext(ctx aws.Context, input *CreateDirectoryConfigInput, opts ...request.Option) (*CreateDirectoryConfigOutput, error) { + req, out := c.CreateDirectoryConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateFleet = "CreateFleet" + +// CreateFleetRequest generates a "aws/request.Request" representing the +// client's request for the CreateFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFleet for more information on using the CreateFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFleetRequest method. +// req, resp := client.CreateFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateFleet +func (c *AppStream) CreateFleetRequest(input *CreateFleetInput) (req *request.Request, output *CreateFleetOutput) { + op := &request.Operation{ + Name: opCreateFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFleetInput{} + } + + output = &CreateFleetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFleet API operation for Amazon AppStream. +// +// Creates a fleet. A fleet consists of streaming instances that run a specified +// image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateFleet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeResourceNotAvailableException "ResourceNotAvailableException" +// The specified resource exists and is not in use, but isn't available. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeInvalidRoleException "InvalidRoleException" +// The specified role is invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// * ErrCodeIncompatibleImageException "IncompatibleImageException" +// The image does not support storage connectors. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateFleet +func (c *AppStream) CreateFleet(input *CreateFleetInput) (*CreateFleetOutput, error) { + req, out := c.CreateFleetRequest(input) + return out, req.Send() +} + +// CreateFleetWithContext is the same as CreateFleet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateFleetWithContext(ctx aws.Context, input *CreateFleetInput, opts ...request.Option) (*CreateFleetOutput, error) { + req, out := c.CreateFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateImageBuilder = "CreateImageBuilder" + +// CreateImageBuilderRequest generates a "aws/request.Request" representing the +// client's request for the CreateImageBuilder operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateImageBuilder for more information on using the CreateImageBuilder +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateImageBuilderRequest method. +// req, resp := client.CreateImageBuilderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateImageBuilder +func (c *AppStream) CreateImageBuilderRequest(input *CreateImageBuilderInput) (req *request.Request, output *CreateImageBuilderOutput) { + op := &request.Operation{ + Name: opCreateImageBuilder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateImageBuilderInput{} + } + + output = &CreateImageBuilderOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateImageBuilder API operation for Amazon AppStream. +// +// Creates an image builder. An image builder is a virtual machine that is used +// to create an image. +// +// The initial state of the builder is PENDING. When it is ready, the state +// is RUNNING. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateImageBuilder for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeResourceNotAvailableException "ResourceNotAvailableException" +// The specified resource exists and is not in use, but isn't available. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeInvalidRoleException "InvalidRoleException" +// The specified role is invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// * ErrCodeIncompatibleImageException "IncompatibleImageException" +// The image does not support storage connectors. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateImageBuilder +func (c *AppStream) CreateImageBuilder(input *CreateImageBuilderInput) (*CreateImageBuilderOutput, error) { + req, out := c.CreateImageBuilderRequest(input) + return out, req.Send() +} + +// CreateImageBuilderWithContext is the same as CreateImageBuilder with the addition of +// the ability to pass a context and additional request options. +// +// See CreateImageBuilder for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateImageBuilderWithContext(ctx aws.Context, input *CreateImageBuilderInput, opts ...request.Option) (*CreateImageBuilderOutput, error) { + req, out := c.CreateImageBuilderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateImageBuilderStreamingURL = "CreateImageBuilderStreamingURL" + +// CreateImageBuilderStreamingURLRequest generates a "aws/request.Request" representing the +// client's request for the CreateImageBuilderStreamingURL operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateImageBuilderStreamingURL for more information on using the CreateImageBuilderStreamingURL +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateImageBuilderStreamingURLRequest method. +// req, resp := client.CreateImageBuilderStreamingURLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateImageBuilderStreamingURL +func (c *AppStream) CreateImageBuilderStreamingURLRequest(input *CreateImageBuilderStreamingURLInput) (req *request.Request, output *CreateImageBuilderStreamingURLOutput) { + op := &request.Operation{ + Name: opCreateImageBuilderStreamingURL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateImageBuilderStreamingURLInput{} + } + + output = &CreateImageBuilderStreamingURLOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateImageBuilderStreamingURL API operation for Amazon AppStream. +// +// Creates a URL to start an image builder streaming session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateImageBuilderStreamingURL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateImageBuilderStreamingURL +func (c *AppStream) CreateImageBuilderStreamingURL(input *CreateImageBuilderStreamingURLInput) (*CreateImageBuilderStreamingURLOutput, error) { + req, out := c.CreateImageBuilderStreamingURLRequest(input) + return out, req.Send() +} + +// CreateImageBuilderStreamingURLWithContext is the same as CreateImageBuilderStreamingURL with the addition of +// the ability to pass a context and additional request options. +// +// See CreateImageBuilderStreamingURL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateImageBuilderStreamingURLWithContext(ctx aws.Context, input *CreateImageBuilderStreamingURLInput, opts ...request.Option) (*CreateImageBuilderStreamingURLOutput, error) { + req, out := c.CreateImageBuilderStreamingURLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateStack = "CreateStack" + +// CreateStackRequest generates a "aws/request.Request" representing the +// client's request for the CreateStack operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateStack for more information on using the CreateStack +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateStackRequest method. +// req, resp := client.CreateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateStack +func (c *AppStream) CreateStackRequest(input *CreateStackInput) (req *request.Request, output *CreateStackOutput) { + op := &request.Operation{ + Name: opCreateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStackInput{} + } + + output = &CreateStackOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateStack API operation for Amazon AppStream. +// +// Creates a stack to start streaming applications to users. A stack consists +// of an associated fleet, user access policies, and storage configurations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateStack for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// * ErrCodeInvalidRoleException "InvalidRoleException" +// The specified role is invalid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateStack +func (c *AppStream) CreateStack(input *CreateStackInput) (*CreateStackOutput, error) { + req, out := c.CreateStackRequest(input) + return out, req.Send() +} + +// CreateStackWithContext is the same as CreateStack with the addition of +// the ability to pass a context and additional request options. +// +// See CreateStack for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateStackWithContext(ctx aws.Context, input *CreateStackInput, opts ...request.Option) (*CreateStackOutput, error) { + req, out := c.CreateStackRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateStreamingURL = "CreateStreamingURL" + +// CreateStreamingURLRequest generates a "aws/request.Request" representing the +// client's request for the CreateStreamingURL operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateStreamingURL for more information on using the CreateStreamingURL +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateStreamingURLRequest method. +// req, resp := client.CreateStreamingURLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateStreamingURL +func (c *AppStream) CreateStreamingURLRequest(input *CreateStreamingURLInput) (req *request.Request, output *CreateStreamingURLOutput) { + op := &request.Operation{ + Name: opCreateStreamingURL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStreamingURLInput{} + } + + output = &CreateStreamingURLOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateStreamingURL API operation for Amazon AppStream. +// +// Creates a temporary URL to start an AppStream 2.0 streaming session for the +// specified user. A streaming URL enables application streaming to be tested +// without user setup. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateStreamingURL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeResourceNotAvailableException "ResourceNotAvailableException" +// The specified resource exists and is not in use, but isn't available. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateStreamingURL +func (c *AppStream) CreateStreamingURL(input *CreateStreamingURLInput) (*CreateStreamingURLOutput, error) { + req, out := c.CreateStreamingURLRequest(input) + return out, req.Send() +} + +// CreateStreamingURLWithContext is the same as CreateStreamingURL with the addition of +// the ability to pass a context and additional request options. +// +// See CreateStreamingURL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateStreamingURLWithContext(ctx aws.Context, input *CreateStreamingURLInput, opts ...request.Option) (*CreateStreamingURLOutput, error) { + req, out := c.CreateStreamingURLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateUsageReportSubscription = "CreateUsageReportSubscription" + +// CreateUsageReportSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateUsageReportSubscription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateUsageReportSubscription for more information on using the CreateUsageReportSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateUsageReportSubscriptionRequest method. +// req, resp := client.CreateUsageReportSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateUsageReportSubscription +func (c *AppStream) CreateUsageReportSubscriptionRequest(input *CreateUsageReportSubscriptionInput) (req *request.Request, output *CreateUsageReportSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateUsageReportSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUsageReportSubscriptionInput{} + } + + output = &CreateUsageReportSubscriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateUsageReportSubscription API operation for Amazon AppStream. +// +// Creates a usage report subscription. Usage reports are generated daily. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateUsageReportSubscription for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRoleException "InvalidRoleException" +// The specified role is invalid. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateUsageReportSubscription +func (c *AppStream) CreateUsageReportSubscription(input *CreateUsageReportSubscriptionInput) (*CreateUsageReportSubscriptionOutput, error) { + req, out := c.CreateUsageReportSubscriptionRequest(input) + return out, req.Send() +} + +// CreateUsageReportSubscriptionWithContext is the same as CreateUsageReportSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See CreateUsageReportSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateUsageReportSubscriptionWithContext(ctx aws.Context, input *CreateUsageReportSubscriptionInput, opts ...request.Option) (*CreateUsageReportSubscriptionOutput, error) { + req, out := c.CreateUsageReportSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateUser = "CreateUser" + +// CreateUserRequest generates a "aws/request.Request" representing the +// client's request for the CreateUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateUser for more information on using the CreateUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateUserRequest method. +// req, resp := client.CreateUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateUser +func (c *AppStream) CreateUserRequest(input *CreateUserInput) (req *request.Request, output *CreateUserOutput) { + op := &request.Operation{ + Name: opCreateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserInput{} + } + + output = &CreateUserOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateUser API operation for Amazon AppStream. +// +// Creates a new user in the user pool. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateUser +func (c *AppStream) CreateUser(input *CreateUserInput) (*CreateUserOutput, error) { + req, out := c.CreateUserRequest(input) + return out, req.Send() +} + +// CreateUserWithContext is the same as CreateUser with the addition of +// the ability to pass a context and additional request options. +// +// See CreateUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateUserWithContext(ctx aws.Context, input *CreateUserInput, opts ...request.Option) (*CreateUserOutput, error) { + req, out := c.CreateUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDirectoryConfig = "DeleteDirectoryConfig" + +// DeleteDirectoryConfigRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDirectoryConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDirectoryConfig for more information on using the DeleteDirectoryConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDirectoryConfigRequest method. +// req, resp := client.DeleteDirectoryConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteDirectoryConfig +func (c *AppStream) DeleteDirectoryConfigRequest(input *DeleteDirectoryConfigInput) (req *request.Request, output *DeleteDirectoryConfigOutput) { + op := &request.Operation{ + Name: opDeleteDirectoryConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDirectoryConfigInput{} + } + + output = &DeleteDirectoryConfigOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDirectoryConfig API operation for Amazon AppStream. +// +// Deletes the specified Directory Config object from AppStream 2.0. This object +// includes the information required to join streaming instances to an Active +// Directory domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DeleteDirectoryConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteDirectoryConfig +func (c *AppStream) DeleteDirectoryConfig(input *DeleteDirectoryConfigInput) (*DeleteDirectoryConfigOutput, error) { + req, out := c.DeleteDirectoryConfigRequest(input) + return out, req.Send() +} + +// DeleteDirectoryConfigWithContext is the same as DeleteDirectoryConfig with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDirectoryConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DeleteDirectoryConfigWithContext(ctx aws.Context, input *DeleteDirectoryConfigInput, opts ...request.Option) (*DeleteDirectoryConfigOutput, error) { + req, out := c.DeleteDirectoryConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteFleet = "DeleteFleet" + +// DeleteFleetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFleet for more information on using the DeleteFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFleetRequest method. +// req, resp := client.DeleteFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteFleet +func (c *AppStream) DeleteFleetRequest(input *DeleteFleetInput) (req *request.Request, output *DeleteFleetOutput) { + op := &request.Operation{ + Name: opDeleteFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFleetInput{} + } + + output = &DeleteFleetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteFleet API operation for Amazon AppStream. +// +// Deletes the specified fleet. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DeleteFleet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteFleet +func (c *AppStream) DeleteFleet(input *DeleteFleetInput) (*DeleteFleetOutput, error) { + req, out := c.DeleteFleetRequest(input) + return out, req.Send() +} + +// DeleteFleetWithContext is the same as DeleteFleet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DeleteFleetWithContext(ctx aws.Context, input *DeleteFleetInput, opts ...request.Option) (*DeleteFleetOutput, error) { + req, out := c.DeleteFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteImage = "DeleteImage" + +// DeleteImageRequest generates a "aws/request.Request" representing the +// client's request for the DeleteImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteImage for more information on using the DeleteImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteImageRequest method. +// req, resp := client.DeleteImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteImage +func (c *AppStream) DeleteImageRequest(input *DeleteImageInput) (req *request.Request, output *DeleteImageOutput) { + op := &request.Operation{ + Name: opDeleteImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteImageInput{} + } + + output = &DeleteImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteImage API operation for Amazon AppStream. +// +// Deletes the specified image. You cannot delete an image when it is in use. +// After you delete an image, you cannot provision new capacity using the image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DeleteImage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteImage +func (c *AppStream) DeleteImage(input *DeleteImageInput) (*DeleteImageOutput, error) { + req, out := c.DeleteImageRequest(input) + return out, req.Send() +} + +// DeleteImageWithContext is the same as DeleteImage with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DeleteImageWithContext(ctx aws.Context, input *DeleteImageInput, opts ...request.Option) (*DeleteImageOutput, error) { + req, out := c.DeleteImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteImageBuilder = "DeleteImageBuilder" + +// DeleteImageBuilderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteImageBuilder operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteImageBuilder for more information on using the DeleteImageBuilder +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteImageBuilderRequest method. +// req, resp := client.DeleteImageBuilderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteImageBuilder +func (c *AppStream) DeleteImageBuilderRequest(input *DeleteImageBuilderInput) (req *request.Request, output *DeleteImageBuilderOutput) { + op := &request.Operation{ + Name: opDeleteImageBuilder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteImageBuilderInput{} + } + + output = &DeleteImageBuilderOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteImageBuilder API operation for Amazon AppStream. +// +// Deletes the specified image builder and releases the capacity. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DeleteImageBuilder for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteImageBuilder +func (c *AppStream) DeleteImageBuilder(input *DeleteImageBuilderInput) (*DeleteImageBuilderOutput, error) { + req, out := c.DeleteImageBuilderRequest(input) + return out, req.Send() +} + +// DeleteImageBuilderWithContext is the same as DeleteImageBuilder with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteImageBuilder for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DeleteImageBuilderWithContext(ctx aws.Context, input *DeleteImageBuilderInput, opts ...request.Option) (*DeleteImageBuilderOutput, error) { + req, out := c.DeleteImageBuilderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteImagePermissions = "DeleteImagePermissions" + +// DeleteImagePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteImagePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteImagePermissions for more information on using the DeleteImagePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteImagePermissionsRequest method. +// req, resp := client.DeleteImagePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteImagePermissions +func (c *AppStream) DeleteImagePermissionsRequest(input *DeleteImagePermissionsInput) (req *request.Request, output *DeleteImagePermissionsOutput) { + op := &request.Operation{ + Name: opDeleteImagePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteImagePermissionsInput{} + } + + output = &DeleteImagePermissionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteImagePermissions API operation for Amazon AppStream. +// +// Deletes permissions for the specified private image. After you delete permissions +// for an image, AWS accounts to which you previously granted these permissions +// can no longer use the image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DeleteImagePermissions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotAvailableException "ResourceNotAvailableException" +// The specified resource exists and is not in use, but isn't available. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteImagePermissions +func (c *AppStream) DeleteImagePermissions(input *DeleteImagePermissionsInput) (*DeleteImagePermissionsOutput, error) { + req, out := c.DeleteImagePermissionsRequest(input) + return out, req.Send() +} + +// DeleteImagePermissionsWithContext is the same as DeleteImagePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteImagePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DeleteImagePermissionsWithContext(ctx aws.Context, input *DeleteImagePermissionsInput, opts ...request.Option) (*DeleteImagePermissionsOutput, error) { + req, out := c.DeleteImagePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteStack = "DeleteStack" + +// DeleteStackRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStack operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteStack for more information on using the DeleteStack +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteStackRequest method. +// req, resp := client.DeleteStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteStack +func (c *AppStream) DeleteStackRequest(input *DeleteStackInput) (req *request.Request, output *DeleteStackOutput) { + op := &request.Operation{ + Name: opDeleteStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStackInput{} + } + + output = &DeleteStackOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteStack API operation for Amazon AppStream. +// +// Deletes the specified stack. After the stack is deleted, the application +// streaming environment provided by the stack is no longer available to users. +// Also, any reservations made for application streaming sessions for the stack +// are released. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DeleteStack for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteStack +func (c *AppStream) DeleteStack(input *DeleteStackInput) (*DeleteStackOutput, error) { + req, out := c.DeleteStackRequest(input) + return out, req.Send() +} + +// DeleteStackWithContext is the same as DeleteStack with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteStack for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DeleteStackWithContext(ctx aws.Context, input *DeleteStackInput, opts ...request.Option) (*DeleteStackOutput, error) { + req, out := c.DeleteStackRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteUsageReportSubscription = "DeleteUsageReportSubscription" + +// DeleteUsageReportSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUsageReportSubscription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteUsageReportSubscription for more information on using the DeleteUsageReportSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteUsageReportSubscriptionRequest method. +// req, resp := client.DeleteUsageReportSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteUsageReportSubscription +func (c *AppStream) DeleteUsageReportSubscriptionRequest(input *DeleteUsageReportSubscriptionInput) (req *request.Request, output *DeleteUsageReportSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteUsageReportSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUsageReportSubscriptionInput{} + } + + output = &DeleteUsageReportSubscriptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteUsageReportSubscription API operation for Amazon AppStream. +// +// Disables usage report generation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DeleteUsageReportSubscription for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteUsageReportSubscription +func (c *AppStream) DeleteUsageReportSubscription(input *DeleteUsageReportSubscriptionInput) (*DeleteUsageReportSubscriptionOutput, error) { + req, out := c.DeleteUsageReportSubscriptionRequest(input) + return out, req.Send() +} + +// DeleteUsageReportSubscriptionWithContext is the same as DeleteUsageReportSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteUsageReportSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DeleteUsageReportSubscriptionWithContext(ctx aws.Context, input *DeleteUsageReportSubscriptionInput, opts ...request.Option) (*DeleteUsageReportSubscriptionOutput, error) { + req, out := c.DeleteUsageReportSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteUser = "DeleteUser" + +// DeleteUserRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteUser for more information on using the DeleteUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteUserRequest method. +// req, resp := client.DeleteUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteUser +func (c *AppStream) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { + op := &request.Operation{ + Name: opDeleteUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserInput{} + } + + output = &DeleteUserOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteUser API operation for Amazon AppStream. +// +// Deletes a user from the user pool. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DeleteUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteUser +func (c *AppStream) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) + return out, req.Send() +} + +// DeleteUserWithContext is the same as DeleteUser with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DeleteUserWithContext(ctx aws.Context, input *DeleteUserInput, opts ...request.Option) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDirectoryConfigs = "DescribeDirectoryConfigs" + +// DescribeDirectoryConfigsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDirectoryConfigs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDirectoryConfigs for more information on using the DescribeDirectoryConfigs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDirectoryConfigsRequest method. +// req, resp := client.DescribeDirectoryConfigsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeDirectoryConfigs +func (c *AppStream) DescribeDirectoryConfigsRequest(input *DescribeDirectoryConfigsInput) (req *request.Request, output *DescribeDirectoryConfigsOutput) { + op := &request.Operation{ + Name: opDescribeDirectoryConfigs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDirectoryConfigsInput{} + } + + output = &DescribeDirectoryConfigsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDirectoryConfigs API operation for Amazon AppStream. +// +// Retrieves a list that describes one or more specified Directory Config objects +// for AppStream 2.0, if the names for these objects are provided. Otherwise, +// all Directory Config objects in the account are described. These objects +// include the configuration information required to join fleets and image builders +// to Microsoft Active Directory domains. +// +// Although the response syntax in this topic includes the account password, +// this password is not returned in the actual response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeDirectoryConfigs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeDirectoryConfigs +func (c *AppStream) DescribeDirectoryConfigs(input *DescribeDirectoryConfigsInput) (*DescribeDirectoryConfigsOutput, error) { + req, out := c.DescribeDirectoryConfigsRequest(input) + return out, req.Send() +} + +// DescribeDirectoryConfigsWithContext is the same as DescribeDirectoryConfigs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDirectoryConfigs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeDirectoryConfigsWithContext(ctx aws.Context, input *DescribeDirectoryConfigsInput, opts ...request.Option) (*DescribeDirectoryConfigsOutput, error) { + req, out := c.DescribeDirectoryConfigsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleets = "DescribeFleets" + +// DescribeFleetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleets for more information on using the DescribeFleets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetsRequest method. +// req, resp := client.DescribeFleetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeFleets +func (c *AppStream) DescribeFleetsRequest(input *DescribeFleetsInput) (req *request.Request, output *DescribeFleetsOutput) { + op := &request.Operation{ + Name: opDescribeFleets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetsInput{} + } + + output = &DescribeFleetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleets API operation for Amazon AppStream. +// +// Retrieves a list that describes one or more specified fleets, if the fleet +// names are provided. Otherwise, all fleets in the account are described. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeFleets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeFleets +func (c *AppStream) DescribeFleets(input *DescribeFleetsInput) (*DescribeFleetsOutput, error) { + req, out := c.DescribeFleetsRequest(input) + return out, req.Send() +} + +// DescribeFleetsWithContext is the same as DescribeFleets with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeFleetsWithContext(ctx aws.Context, input *DescribeFleetsInput, opts ...request.Option) (*DescribeFleetsOutput, error) { + req, out := c.DescribeFleetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeImageBuilders = "DescribeImageBuilders" + +// DescribeImageBuildersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImageBuilders operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImageBuilders for more information on using the DescribeImageBuilders +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImageBuildersRequest method. +// req, resp := client.DescribeImageBuildersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeImageBuilders +func (c *AppStream) DescribeImageBuildersRequest(input *DescribeImageBuildersInput) (req *request.Request, output *DescribeImageBuildersOutput) { + op := &request.Operation{ + Name: opDescribeImageBuilders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImageBuildersInput{} + } + + output = &DescribeImageBuildersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImageBuilders API operation for Amazon AppStream. +// +// Retrieves a list that describes one or more specified image builders, if +// the image builder names are provided. Otherwise, all image builders in the +// account are described. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeImageBuilders for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeImageBuilders +func (c *AppStream) DescribeImageBuilders(input *DescribeImageBuildersInput) (*DescribeImageBuildersOutput, error) { + req, out := c.DescribeImageBuildersRequest(input) + return out, req.Send() +} + +// DescribeImageBuildersWithContext is the same as DescribeImageBuilders with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImageBuilders for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeImageBuildersWithContext(ctx aws.Context, input *DescribeImageBuildersInput, opts ...request.Option) (*DescribeImageBuildersOutput, error) { + req, out := c.DescribeImageBuildersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeImagePermissions = "DescribeImagePermissions" + +// DescribeImagePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImagePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImagePermissions for more information on using the DescribeImagePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImagePermissionsRequest method. +// req, resp := client.DescribeImagePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeImagePermissions +func (c *AppStream) DescribeImagePermissionsRequest(input *DescribeImagePermissionsInput) (req *request.Request, output *DescribeImagePermissionsOutput) { + op := &request.Operation{ + Name: opDescribeImagePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeImagePermissionsInput{} + } + + output = &DescribeImagePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImagePermissions API operation for Amazon AppStream. +// +// Retrieves a list that describes the permissions for shared AWS account IDs +// on a private image that you own. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeImagePermissions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeImagePermissions +func (c *AppStream) DescribeImagePermissions(input *DescribeImagePermissionsInput) (*DescribeImagePermissionsOutput, error) { + req, out := c.DescribeImagePermissionsRequest(input) + return out, req.Send() +} + +// DescribeImagePermissionsWithContext is the same as DescribeImagePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImagePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeImagePermissionsWithContext(ctx aws.Context, input *DescribeImagePermissionsInput, opts ...request.Option) (*DescribeImagePermissionsOutput, error) { + req, out := c.DescribeImagePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeImagePermissionsPages iterates over the pages of a DescribeImagePermissions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeImagePermissions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeImagePermissions operation. +// pageNum := 0 +// err := client.DescribeImagePermissionsPages(params, +// func(page *appstream.DescribeImagePermissionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AppStream) DescribeImagePermissionsPages(input *DescribeImagePermissionsInput, fn func(*DescribeImagePermissionsOutput, bool) bool) error { + return c.DescribeImagePermissionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImagePermissionsPagesWithContext same as DescribeImagePermissionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeImagePermissionsPagesWithContext(ctx aws.Context, input *DescribeImagePermissionsInput, fn func(*DescribeImagePermissionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImagePermissionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImagePermissionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeImagePermissionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeImages = "DescribeImages" + +// DescribeImagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImages operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImages for more information on using the DescribeImages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImagesRequest method. +// req, resp := client.DescribeImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeImages +func (c *AppStream) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) { + op := &request.Operation{ + Name: opDescribeImages, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeImagesInput{} + } + + output = &DescribeImagesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImages API operation for Amazon AppStream. +// +// Retrieves a list that describes one or more specified images, if the image +// names or image ARNs are provided. Otherwise, all images in the account are +// described. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeImages for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeImages +func (c *AppStream) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + return out, req.Send() +} + +// DescribeImagesWithContext is the same as DescribeImages with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.Option) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeImagesPages iterates over the pages of a DescribeImages operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeImages method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeImages operation. +// pageNum := 0 +// err := client.DescribeImagesPages(params, +// func(page *appstream.DescribeImagesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AppStream) DescribeImagesPages(input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool) error { + return c.DescribeImagesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImagesPagesWithContext same as DescribeImagesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeImagesPagesWithContext(ctx aws.Context, input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeImagesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSessions = "DescribeSessions" + +// DescribeSessionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSessions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSessions for more information on using the DescribeSessions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSessionsRequest method. +// req, resp := client.DescribeSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeSessions +func (c *AppStream) DescribeSessionsRequest(input *DescribeSessionsInput) (req *request.Request, output *DescribeSessionsOutput) { + op := &request.Operation{ + Name: opDescribeSessions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSessionsInput{} + } + + output = &DescribeSessionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSessions API operation for Amazon AppStream. +// +// Retrieves a list that describes the streaming sessions for a specified stack +// and fleet. If a UserId is provided for the stack and fleet, only streaming +// sessions for that user are described. If an authentication type is not provided, +// the default is to authenticate users using a streaming URL. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeSessions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeSessions +func (c *AppStream) DescribeSessions(input *DescribeSessionsInput) (*DescribeSessionsOutput, error) { + req, out := c.DescribeSessionsRequest(input) + return out, req.Send() +} + +// DescribeSessionsWithContext is the same as DescribeSessions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeSessionsWithContext(ctx aws.Context, input *DescribeSessionsInput, opts ...request.Option) (*DescribeSessionsOutput, error) { + req, out := c.DescribeSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeStacks = "DescribeStacks" + +// DescribeStacksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStacks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeStacks for more information on using the DescribeStacks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeStacksRequest method. +// req, resp := client.DescribeStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeStacks +func (c *AppStream) DescribeStacksRequest(input *DescribeStacksInput) (req *request.Request, output *DescribeStacksOutput) { + op := &request.Operation{ + Name: opDescribeStacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStacksInput{} + } + + output = &DescribeStacksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeStacks API operation for Amazon AppStream. +// +// Retrieves a list that describes one or more specified stacks, if the stack +// names are provided. Otherwise, all stacks in the account are described. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeStacks for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeStacks +func (c *AppStream) DescribeStacks(input *DescribeStacksInput) (*DescribeStacksOutput, error) { + req, out := c.DescribeStacksRequest(input) + return out, req.Send() +} + +// DescribeStacksWithContext is the same as DescribeStacks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeStacks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeStacksWithContext(ctx aws.Context, input *DescribeStacksInput, opts ...request.Option) (*DescribeStacksOutput, error) { + req, out := c.DescribeStacksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeUsageReportSubscriptions = "DescribeUsageReportSubscriptions" + +// DescribeUsageReportSubscriptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUsageReportSubscriptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeUsageReportSubscriptions for more information on using the DescribeUsageReportSubscriptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeUsageReportSubscriptionsRequest method. +// req, resp := client.DescribeUsageReportSubscriptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeUsageReportSubscriptions +func (c *AppStream) DescribeUsageReportSubscriptionsRequest(input *DescribeUsageReportSubscriptionsInput) (req *request.Request, output *DescribeUsageReportSubscriptionsOutput) { + op := &request.Operation{ + Name: opDescribeUsageReportSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUsageReportSubscriptionsInput{} + } + + output = &DescribeUsageReportSubscriptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeUsageReportSubscriptions API operation for Amazon AppStream. +// +// Retrieves a list that describes one or more usage report subscriptions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeUsageReportSubscriptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeUsageReportSubscriptions +func (c *AppStream) DescribeUsageReportSubscriptions(input *DescribeUsageReportSubscriptionsInput) (*DescribeUsageReportSubscriptionsOutput, error) { + req, out := c.DescribeUsageReportSubscriptionsRequest(input) + return out, req.Send() +} + +// DescribeUsageReportSubscriptionsWithContext is the same as DescribeUsageReportSubscriptions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeUsageReportSubscriptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeUsageReportSubscriptionsWithContext(ctx aws.Context, input *DescribeUsageReportSubscriptionsInput, opts ...request.Option) (*DescribeUsageReportSubscriptionsOutput, error) { + req, out := c.DescribeUsageReportSubscriptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeUserStackAssociations = "DescribeUserStackAssociations" + +// DescribeUserStackAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUserStackAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeUserStackAssociations for more information on using the DescribeUserStackAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeUserStackAssociationsRequest method. +// req, resp := client.DescribeUserStackAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeUserStackAssociations +func (c *AppStream) DescribeUserStackAssociationsRequest(input *DescribeUserStackAssociationsInput) (req *request.Request, output *DescribeUserStackAssociationsOutput) { + op := &request.Operation{ + Name: opDescribeUserStackAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUserStackAssociationsInput{} + } + + output = &DescribeUserStackAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeUserStackAssociations API operation for Amazon AppStream. +// +// Retrieves a list that describes the UserStackAssociation objects. You must +// specify either or both of the following: +// +// * The stack name +// +// * The user name (email address of the user associated with the stack) +// and the authentication type for the user +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeUserStackAssociations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeUserStackAssociations +func (c *AppStream) DescribeUserStackAssociations(input *DescribeUserStackAssociationsInput) (*DescribeUserStackAssociationsOutput, error) { + req, out := c.DescribeUserStackAssociationsRequest(input) + return out, req.Send() +} + +// DescribeUserStackAssociationsWithContext is the same as DescribeUserStackAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeUserStackAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeUserStackAssociationsWithContext(ctx aws.Context, input *DescribeUserStackAssociationsInput, opts ...request.Option) (*DescribeUserStackAssociationsOutput, error) { + req, out := c.DescribeUserStackAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeUsers = "DescribeUsers" + +// DescribeUsersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUsers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeUsers for more information on using the DescribeUsers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeUsersRequest method. +// req, resp := client.DescribeUsersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeUsers +func (c *AppStream) DescribeUsersRequest(input *DescribeUsersInput) (req *request.Request, output *DescribeUsersOutput) { + op := &request.Operation{ + Name: opDescribeUsers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUsersInput{} + } + + output = &DescribeUsersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeUsers API operation for Amazon AppStream. +// +// Retrieves a list that describes one or more specified users in the user pool. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeUsers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeUsers +func (c *AppStream) DescribeUsers(input *DescribeUsersInput) (*DescribeUsersOutput, error) { + req, out := c.DescribeUsersRequest(input) + return out, req.Send() +} + +// DescribeUsersWithContext is the same as DescribeUsers with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeUsers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeUsersWithContext(ctx aws.Context, input *DescribeUsersInput, opts ...request.Option) (*DescribeUsersOutput, error) { + req, out := c.DescribeUsersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableUser = "DisableUser" + +// DisableUserRequest generates a "aws/request.Request" representing the +// client's request for the DisableUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableUser for more information on using the DisableUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableUserRequest method. +// req, resp := client.DisableUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DisableUser +func (c *AppStream) DisableUserRequest(input *DisableUserInput) (req *request.Request, output *DisableUserOutput) { + op := &request.Operation{ + Name: opDisableUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableUserInput{} + } + + output = &DisableUserOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisableUser API operation for Amazon AppStream. +// +// Disables the specified user in the user pool. Users can't sign in to AppStream +// 2.0 until they are re-enabled. This action does not delete the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DisableUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DisableUser +func (c *AppStream) DisableUser(input *DisableUserInput) (*DisableUserOutput, error) { + req, out := c.DisableUserRequest(input) + return out, req.Send() +} + +// DisableUserWithContext is the same as DisableUser with the addition of +// the ability to pass a context and additional request options. +// +// See DisableUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DisableUserWithContext(ctx aws.Context, input *DisableUserInput, opts ...request.Option) (*DisableUserOutput, error) { + req, out := c.DisableUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateFleet = "DisassociateFleet" + +// DisassociateFleetRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateFleet for more information on using the DisassociateFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateFleetRequest method. +// req, resp := client.DisassociateFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DisassociateFleet +func (c *AppStream) DisassociateFleetRequest(input *DisassociateFleetInput) (req *request.Request, output *DisassociateFleetOutput) { + op := &request.Operation{ + Name: opDisassociateFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateFleetInput{} + } + + output = &DisassociateFleetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisassociateFleet API operation for Amazon AppStream. +// +// Disassociates the specified fleet from the specified stack. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DisassociateFleet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DisassociateFleet +func (c *AppStream) DisassociateFleet(input *DisassociateFleetInput) (*DisassociateFleetOutput, error) { + req, out := c.DisassociateFleetRequest(input) + return out, req.Send() +} + +// DisassociateFleetWithContext is the same as DisassociateFleet with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DisassociateFleetWithContext(ctx aws.Context, input *DisassociateFleetInput, opts ...request.Option) (*DisassociateFleetOutput, error) { + req, out := c.DisassociateFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableUser = "EnableUser" + +// EnableUserRequest generates a "aws/request.Request" representing the +// client's request for the EnableUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableUser for more information on using the EnableUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableUserRequest method. +// req, resp := client.EnableUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/EnableUser +func (c *AppStream) EnableUserRequest(input *EnableUserInput) (req *request.Request, output *EnableUserOutput) { + op := &request.Operation{ + Name: opEnableUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableUserInput{} + } + + output = &EnableUserOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// EnableUser API operation for Amazon AppStream. +// +// Enables a user in the user pool. After being enabled, users can sign in to +// AppStream 2.0 and open applications from the stacks to which they are assigned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation EnableUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/EnableUser +func (c *AppStream) EnableUser(input *EnableUserInput) (*EnableUserOutput, error) { + req, out := c.EnableUserRequest(input) + return out, req.Send() +} + +// EnableUserWithContext is the same as EnableUser with the addition of +// the ability to pass a context and additional request options. +// +// See EnableUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) EnableUserWithContext(ctx aws.Context, input *EnableUserInput, opts ...request.Option) (*EnableUserOutput, error) { + req, out := c.EnableUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExpireSession = "ExpireSession" + +// ExpireSessionRequest generates a "aws/request.Request" representing the +// client's request for the ExpireSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExpireSession for more information on using the ExpireSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExpireSessionRequest method. +// req, resp := client.ExpireSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ExpireSession +func (c *AppStream) ExpireSessionRequest(input *ExpireSessionInput) (req *request.Request, output *ExpireSessionOutput) { + op := &request.Operation{ + Name: opExpireSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExpireSessionInput{} + } + + output = &ExpireSessionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ExpireSession API operation for Amazon AppStream. +// +// Immediately stops the specified streaming session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation ExpireSession for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ExpireSession +func (c *AppStream) ExpireSession(input *ExpireSessionInput) (*ExpireSessionOutput, error) { + req, out := c.ExpireSessionRequest(input) + return out, req.Send() +} + +// ExpireSessionWithContext is the same as ExpireSession with the addition of +// the ability to pass a context and additional request options. +// +// See ExpireSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) ExpireSessionWithContext(ctx aws.Context, input *ExpireSessionInput, opts ...request.Option) (*ExpireSessionOutput, error) { + req, out := c.ExpireSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAssociatedFleets = "ListAssociatedFleets" + +// ListAssociatedFleetsRequest generates a "aws/request.Request" representing the +// client's request for the ListAssociatedFleets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAssociatedFleets for more information on using the ListAssociatedFleets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAssociatedFleetsRequest method. +// req, resp := client.ListAssociatedFleetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ListAssociatedFleets +func (c *AppStream) ListAssociatedFleetsRequest(input *ListAssociatedFleetsInput) (req *request.Request, output *ListAssociatedFleetsOutput) { + op := &request.Operation{ + Name: opListAssociatedFleets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAssociatedFleetsInput{} + } + + output = &ListAssociatedFleetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAssociatedFleets API operation for Amazon AppStream. +// +// Retrieves the name of the fleet that is associated with the specified stack. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation ListAssociatedFleets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ListAssociatedFleets +func (c *AppStream) ListAssociatedFleets(input *ListAssociatedFleetsInput) (*ListAssociatedFleetsOutput, error) { + req, out := c.ListAssociatedFleetsRequest(input) + return out, req.Send() +} + +// ListAssociatedFleetsWithContext is the same as ListAssociatedFleets with the addition of +// the ability to pass a context and additional request options. +// +// See ListAssociatedFleets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) ListAssociatedFleetsWithContext(ctx aws.Context, input *ListAssociatedFleetsInput, opts ...request.Option) (*ListAssociatedFleetsOutput, error) { + req, out := c.ListAssociatedFleetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAssociatedStacks = "ListAssociatedStacks" + +// ListAssociatedStacksRequest generates a "aws/request.Request" representing the +// client's request for the ListAssociatedStacks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAssociatedStacks for more information on using the ListAssociatedStacks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAssociatedStacksRequest method. +// req, resp := client.ListAssociatedStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ListAssociatedStacks +func (c *AppStream) ListAssociatedStacksRequest(input *ListAssociatedStacksInput) (req *request.Request, output *ListAssociatedStacksOutput) { + op := &request.Operation{ + Name: opListAssociatedStacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAssociatedStacksInput{} + } + + output = &ListAssociatedStacksOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAssociatedStacks API operation for Amazon AppStream. +// +// Retrieves the name of the stack with which the specified fleet is associated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation ListAssociatedStacks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ListAssociatedStacks +func (c *AppStream) ListAssociatedStacks(input *ListAssociatedStacksInput) (*ListAssociatedStacksOutput, error) { + req, out := c.ListAssociatedStacksRequest(input) + return out, req.Send() +} + +// ListAssociatedStacksWithContext is the same as ListAssociatedStacks with the addition of +// the ability to pass a context and additional request options. +// +// See ListAssociatedStacks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) ListAssociatedStacksWithContext(ctx aws.Context, input *ListAssociatedStacksInput, opts ...request.Option) (*ListAssociatedStacksOutput, error) { + req, out := c.ListAssociatedStacksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ListTagsForResource +func (c *AppStream) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon AppStream. +// +// Retrieves a list of all tags for the specified AppStream 2.0 resource. You +// can tag AppStream 2.0 image builders, images, fleets, and stacks. +// +// For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html) +// in the Amazon AppStream 2.0 Administration Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ListTagsForResource +func (c *AppStream) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartFleet = "StartFleet" + +// StartFleetRequest generates a "aws/request.Request" representing the +// client's request for the StartFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartFleet for more information on using the StartFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartFleetRequest method. +// req, resp := client.StartFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/StartFleet +func (c *AppStream) StartFleetRequest(input *StartFleetInput) (req *request.Request, output *StartFleetOutput) { + op := &request.Operation{ + Name: opStartFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartFleetInput{} + } + + output = &StartFleetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StartFleet API operation for Amazon AppStream. +// +// Starts the specified fleet. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation StartFleet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// * ErrCodeResourceNotAvailableException "ResourceNotAvailableException" +// The specified resource exists and is not in use, but isn't available. +// +// * ErrCodeInvalidRoleException "InvalidRoleException" +// The specified role is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/StartFleet +func (c *AppStream) StartFleet(input *StartFleetInput) (*StartFleetOutput, error) { + req, out := c.StartFleetRequest(input) + return out, req.Send() +} + +// StartFleetWithContext is the same as StartFleet with the addition of +// the ability to pass a context and additional request options. +// +// See StartFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) StartFleetWithContext(ctx aws.Context, input *StartFleetInput, opts ...request.Option) (*StartFleetOutput, error) { + req, out := c.StartFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartImageBuilder = "StartImageBuilder" + +// StartImageBuilderRequest generates a "aws/request.Request" representing the +// client's request for the StartImageBuilder operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartImageBuilder for more information on using the StartImageBuilder +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartImageBuilderRequest method. +// req, resp := client.StartImageBuilderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/StartImageBuilder +func (c *AppStream) StartImageBuilderRequest(input *StartImageBuilderInput) (req *request.Request, output *StartImageBuilderOutput) { + op := &request.Operation{ + Name: opStartImageBuilder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartImageBuilderInput{} + } + + output = &StartImageBuilderOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartImageBuilder API operation for Amazon AppStream. +// +// Starts the specified image builder. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation StartImageBuilder for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotAvailableException "ResourceNotAvailableException" +// The specified resource exists and is not in use, but isn't available. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeIncompatibleImageException "IncompatibleImageException" +// The image does not support storage connectors. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/StartImageBuilder +func (c *AppStream) StartImageBuilder(input *StartImageBuilderInput) (*StartImageBuilderOutput, error) { + req, out := c.StartImageBuilderRequest(input) + return out, req.Send() +} + +// StartImageBuilderWithContext is the same as StartImageBuilder with the addition of +// the ability to pass a context and additional request options. +// +// See StartImageBuilder for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) StartImageBuilderWithContext(ctx aws.Context, input *StartImageBuilderInput, opts ...request.Option) (*StartImageBuilderOutput, error) { + req, out := c.StartImageBuilderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopFleet = "StopFleet" + +// StopFleetRequest generates a "aws/request.Request" representing the +// client's request for the StopFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopFleet for more information on using the StopFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopFleetRequest method. +// req, resp := client.StopFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/StopFleet +func (c *AppStream) StopFleetRequest(input *StopFleetInput) (req *request.Request, output *StopFleetOutput) { + op := &request.Operation{ + Name: opStopFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopFleetInput{} + } + + output = &StopFleetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopFleet API operation for Amazon AppStream. +// +// Stops the specified fleet. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation StopFleet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/StopFleet +func (c *AppStream) StopFleet(input *StopFleetInput) (*StopFleetOutput, error) { + req, out := c.StopFleetRequest(input) + return out, req.Send() +} + +// StopFleetWithContext is the same as StopFleet with the addition of +// the ability to pass a context and additional request options. +// +// See StopFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) StopFleetWithContext(ctx aws.Context, input *StopFleetInput, opts ...request.Option) (*StopFleetOutput, error) { + req, out := c.StopFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopImageBuilder = "StopImageBuilder" + +// StopImageBuilderRequest generates a "aws/request.Request" representing the +// client's request for the StopImageBuilder operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopImageBuilder for more information on using the StopImageBuilder +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopImageBuilderRequest method. +// req, resp := client.StopImageBuilderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/StopImageBuilder +func (c *AppStream) StopImageBuilderRequest(input *StopImageBuilderInput) (req *request.Request, output *StopImageBuilderOutput) { + op := &request.Operation{ + Name: opStopImageBuilder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopImageBuilderInput{} + } + + output = &StopImageBuilderOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopImageBuilder API operation for Amazon AppStream. +// +// Stops the specified image builder. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation StopImageBuilder for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/StopImageBuilder +func (c *AppStream) StopImageBuilder(input *StopImageBuilderInput) (*StopImageBuilderOutput, error) { + req, out := c.StopImageBuilderRequest(input) + return out, req.Send() +} + +// StopImageBuilderWithContext is the same as StopImageBuilder with the addition of +// the ability to pass a context and additional request options. +// +// See StopImageBuilder for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) StopImageBuilderWithContext(ctx aws.Context, input *StopImageBuilderInput, opts ...request.Option) (*StopImageBuilderOutput, error) { + req, out := c.StopImageBuilderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/TagResource +func (c *AppStream) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon AppStream. +// +// Adds or overwrites one or more tags for the specified AppStream 2.0 resource. +// You can tag AppStream 2.0 image builders, images, fleets, and stacks. +// +// Each tag consists of a key and an optional value. If a resource already has +// a tag with the same key, this operation updates its value. +// +// To list the current tags for your resources, use ListTagsForResource. To +// disassociate tags from your resources, use UntagResource. +// +// For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html) +// in the Amazon AppStream 2.0 Administration Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/TagResource +func (c *AppStream) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UntagResource +func (c *AppStream) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon AppStream. +// +// Disassociates one or more specified tags from the specified AppStream 2.0 +// resource. +// +// To list the current tags for your resources, use ListTagsForResource. +// +// For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html) +// in the Amazon AppStream 2.0 Administration Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UntagResource +func (c *AppStream) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDirectoryConfig = "UpdateDirectoryConfig" + +// UpdateDirectoryConfigRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDirectoryConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDirectoryConfig for more information on using the UpdateDirectoryConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDirectoryConfigRequest method. +// req, resp := client.UpdateDirectoryConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateDirectoryConfig +func (c *AppStream) UpdateDirectoryConfigRequest(input *UpdateDirectoryConfigInput) (req *request.Request, output *UpdateDirectoryConfigOutput) { + op := &request.Operation{ + Name: opUpdateDirectoryConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDirectoryConfigInput{} + } + + output = &UpdateDirectoryConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDirectoryConfig API operation for Amazon AppStream. +// +// Updates the specified Directory Config object in AppStream 2.0. This object +// includes the configuration information required to join fleets and image +// builders to Microsoft Active Directory domains. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation UpdateDirectoryConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateDirectoryConfig +func (c *AppStream) UpdateDirectoryConfig(input *UpdateDirectoryConfigInput) (*UpdateDirectoryConfigOutput, error) { + req, out := c.UpdateDirectoryConfigRequest(input) + return out, req.Send() +} + +// UpdateDirectoryConfigWithContext is the same as UpdateDirectoryConfig with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDirectoryConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) UpdateDirectoryConfigWithContext(ctx aws.Context, input *UpdateDirectoryConfigInput, opts ...request.Option) (*UpdateDirectoryConfigOutput, error) { + req, out := c.UpdateDirectoryConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFleet = "UpdateFleet" + +// UpdateFleetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFleet for more information on using the UpdateFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFleetRequest method. +// req, resp := client.UpdateFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateFleet +func (c *AppStream) UpdateFleetRequest(input *UpdateFleetInput) (req *request.Request, output *UpdateFleetOutput) { + op := &request.Operation{ + Name: opUpdateFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateFleetInput{} + } + + output = &UpdateFleetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFleet API operation for Amazon AppStream. +// +// Updates the specified fleet. +// +// If the fleet is in the STOPPED state, you can update any attribute except +// the fleet name. If the fleet is in the RUNNING state, you can update the +// DisplayName, ComputeCapacity, ImageARN, ImageName, IdleDisconnectTimeoutInSeconds, +// and DisconnectTimeoutInSeconds attributes. If the fleet is in the STARTING +// or STOPPING state, you can't update it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation UpdateFleet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeInvalidRoleException "InvalidRoleException" +// The specified role is invalid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeResourceNotAvailableException "ResourceNotAvailableException" +// The specified resource exists and is not in use, but isn't available. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// * ErrCodeIncompatibleImageException "IncompatibleImageException" +// The image does not support storage connectors. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateFleet +func (c *AppStream) UpdateFleet(input *UpdateFleetInput) (*UpdateFleetOutput, error) { + req, out := c.UpdateFleetRequest(input) + return out, req.Send() +} + +// UpdateFleetWithContext is the same as UpdateFleet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) UpdateFleetWithContext(ctx aws.Context, input *UpdateFleetInput, opts ...request.Option) (*UpdateFleetOutput, error) { + req, out := c.UpdateFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateImagePermissions = "UpdateImagePermissions" + +// UpdateImagePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateImagePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateImagePermissions for more information on using the UpdateImagePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateImagePermissionsRequest method. +// req, resp := client.UpdateImagePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateImagePermissions +func (c *AppStream) UpdateImagePermissionsRequest(input *UpdateImagePermissionsInput) (req *request.Request, output *UpdateImagePermissionsOutput) { + op := &request.Operation{ + Name: opUpdateImagePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateImagePermissionsInput{} + } + + output = &UpdateImagePermissionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateImagePermissions API operation for Amazon AppStream. +// +// Adds or updates permissions for the specified private image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation UpdateImagePermissions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeResourceNotAvailableException "ResourceNotAvailableException" +// The specified resource exists and is not in use, but isn't available. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateImagePermissions +func (c *AppStream) UpdateImagePermissions(input *UpdateImagePermissionsInput) (*UpdateImagePermissionsOutput, error) { + req, out := c.UpdateImagePermissionsRequest(input) + return out, req.Send() +} + +// UpdateImagePermissionsWithContext is the same as UpdateImagePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateImagePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) UpdateImagePermissionsWithContext(ctx aws.Context, input *UpdateImagePermissionsInput, opts ...request.Option) (*UpdateImagePermissionsOutput, error) { + req, out := c.UpdateImagePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateStack = "UpdateStack" + +// UpdateStackRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStack operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateStack for more information on using the UpdateStack +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateStackRequest method. +// req, resp := client.UpdateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateStack +func (c *AppStream) UpdateStackRequest(input *UpdateStackInput) (req *request.Request, output *UpdateStackOutput) { + op := &request.Operation{ + Name: opUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateStackInput{} + } + + output = &UpdateStackOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateStack API operation for Amazon AppStream. +// +// Updates the specified fields for the specified stack. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation UpdateStack for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeInvalidRoleException "InvalidRoleException" +// The specified role is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// * ErrCodeInvalidAccountStatusException "InvalidAccountStatusException" +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * ErrCodeIncompatibleImageException "IncompatibleImageException" +// The image does not support storage connectors. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateStack +func (c *AppStream) UpdateStack(input *UpdateStackInput) (*UpdateStackOutput, error) { + req, out := c.UpdateStackRequest(input) + return out, req.Send() +} + +// UpdateStackWithContext is the same as UpdateStack with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateStack for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) UpdateStackWithContext(ctx aws.Context, input *UpdateStackInput, opts ...request.Option) (*UpdateStackOutput, error) { + req, out := c.UpdateStackRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Describes an interface VPC endpoint (interface endpoint) that lets you create +// a private connection between the virtual private cloud (VPC) that you specify +// and AppStream 2.0. When you specify an interface endpoint for a stack, users +// of the stack can connect to AppStream 2.0 only through that endpoint. When +// you specify an interface endpoint for an image builder, administrators can +// connect to the image builder only through that endpoint. +type AccessEndpoint struct { + _ struct{} `type:"structure"` + + // The type of interface endpoint. + // + // EndpointType is a required field + EndpointType *string `type:"string" required:"true" enum:"AccessEndpointType"` + + // The identifier (ID) of the VPC in which the interface endpoint is used. + VpceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AccessEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessEndpoint) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessEndpoint) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessEndpoint"} + if s.EndpointType == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointType")) + } + if s.VpceId != nil && len(*s.VpceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VpceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointType sets the EndpointType field's value. +func (s *AccessEndpoint) SetEndpointType(v string) *AccessEndpoint { + s.EndpointType = &v + return s +} + +// SetVpceId sets the VpceId field's value. +func (s *AccessEndpoint) SetVpceId(v string) *AccessEndpoint { + s.VpceId = &v + return s +} + +// Describes an application in the application catalog. +type Application struct { + _ struct{} `type:"structure"` + + // The application name to display. + DisplayName *string `min:"1" type:"string"` + + // If there is a problem, the application can be disabled after image creation. + Enabled *bool `type:"boolean"` + + // The URL for the application icon. This URL might be time-limited. + IconURL *string `min:"1" type:"string"` + + // The arguments that are passed to the application at launch. + LaunchParameters *string `min:"1" type:"string"` + + // The path to the application executable in the instance. + LaunchPath *string `min:"1" type:"string"` + + // Additional attributes that describe the application. + Metadata map[string]*string `type:"map"` + + // The name of the application. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Application) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Application) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Application) SetDisplayName(v string) *Application { + s.DisplayName = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *Application) SetEnabled(v bool) *Application { + s.Enabled = &v + return s +} + +// SetIconURL sets the IconURL field's value. +func (s *Application) SetIconURL(v string) *Application { + s.IconURL = &v + return s +} + +// SetLaunchParameters sets the LaunchParameters field's value. +func (s *Application) SetLaunchParameters(v string) *Application { + s.LaunchParameters = &v + return s +} + +// SetLaunchPath sets the LaunchPath field's value. +func (s *Application) SetLaunchPath(v string) *Application { + s.LaunchPath = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *Application) SetMetadata(v map[string]*string) *Application { + s.Metadata = v + return s +} + +// SetName sets the Name field's value. +func (s *Application) SetName(v string) *Application { + s.Name = &v + return s +} + +// The persistent application settings for users of a stack. +type ApplicationSettings struct { + _ struct{} `type:"structure"` + + // Enables or disables persistent application settings for users during their + // streaming sessions. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` + + // The path prefix for the S3 bucket where users’ persistent application settings + // are stored. You can allow the same persistent application settings to be + // used across multiple stacks by specifying the same settings group for each + // stack. + SettingsGroup *string `type:"string"` +} + +// String returns the string representation +func (s ApplicationSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ApplicationSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ApplicationSettings"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnabled sets the Enabled field's value. +func (s *ApplicationSettings) SetEnabled(v bool) *ApplicationSettings { + s.Enabled = &v + return s +} + +// SetSettingsGroup sets the SettingsGroup field's value. +func (s *ApplicationSettings) SetSettingsGroup(v string) *ApplicationSettings { + s.SettingsGroup = &v + return s +} + +// Describes the persistent application settings for users of a stack. +type ApplicationSettingsResponse struct { + _ struct{} `type:"structure"` + + // Specifies whether persistent application settings are enabled for users during + // their streaming sessions. + Enabled *bool `type:"boolean"` + + // The S3 bucket where users’ persistent application settings are stored. + // When persistent application settings are enabled for the first time for an + // account in an AWS Region, an S3 bucket is created. The bucket is unique to + // the AWS account and the Region. + S3BucketName *string `min:"1" type:"string"` + + // The path prefix for the S3 bucket where users’ persistent application settings + // are stored. + SettingsGroup *string `type:"string"` +} + +// String returns the string representation +func (s ApplicationSettingsResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationSettingsResponse) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *ApplicationSettingsResponse) SetEnabled(v bool) *ApplicationSettingsResponse { + s.Enabled = &v + return s +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *ApplicationSettingsResponse) SetS3BucketName(v string) *ApplicationSettingsResponse { + s.S3BucketName = &v + return s +} + +// SetSettingsGroup sets the SettingsGroup field's value. +func (s *ApplicationSettingsResponse) SetSettingsGroup(v string) *ApplicationSettingsResponse { + s.SettingsGroup = &v + return s +} + +type AssociateFleetInput struct { + _ struct{} `type:"structure"` + + // The name of the fleet. + // + // FleetName is a required field + FleetName *string `min:"1" type:"string" required:"true"` + + // The name of the stack. + // + // StackName is a required field + StackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateFleetInput"} + if s.FleetName == nil { + invalidParams.Add(request.NewErrParamRequired("FleetName")) + } + if s.FleetName != nil && len(*s.FleetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetName", 1)) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetName sets the FleetName field's value. +func (s *AssociateFleetInput) SetFleetName(v string) *AssociateFleetInput { + s.FleetName = &v + return s +} + +// SetStackName sets the StackName field's value. +func (s *AssociateFleetInput) SetStackName(v string) *AssociateFleetInput { + s.StackName = &v + return s +} + +type AssociateFleetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateFleetOutput) GoString() string { + return s.String() +} + +type BatchAssociateUserStackInput struct { + _ struct{} `type:"structure"` + + // The list of UserStackAssociation objects. + // + // UserStackAssociations is a required field + UserStackAssociations []*UserStackAssociation `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchAssociateUserStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAssociateUserStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchAssociateUserStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchAssociateUserStackInput"} + if s.UserStackAssociations == nil { + invalidParams.Add(request.NewErrParamRequired("UserStackAssociations")) + } + if s.UserStackAssociations != nil && len(s.UserStackAssociations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserStackAssociations", 1)) + } + if s.UserStackAssociations != nil { + for i, v := range s.UserStackAssociations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserStackAssociations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserStackAssociations sets the UserStackAssociations field's value. +func (s *BatchAssociateUserStackInput) SetUserStackAssociations(v []*UserStackAssociation) *BatchAssociateUserStackInput { + s.UserStackAssociations = v + return s +} + +type BatchAssociateUserStackOutput struct { + _ struct{} `type:"structure"` + + // The list of UserStackAssociationError objects. + Errors []*UserStackAssociationError `locationName:"errors" type:"list"` +} + +// String returns the string representation +func (s BatchAssociateUserStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAssociateUserStackOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchAssociateUserStackOutput) SetErrors(v []*UserStackAssociationError) *BatchAssociateUserStackOutput { + s.Errors = v + return s +} + +type BatchDisassociateUserStackInput struct { + _ struct{} `type:"structure"` + + // The list of UserStackAssociation objects. + // + // UserStackAssociations is a required field + UserStackAssociations []*UserStackAssociation `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchDisassociateUserStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDisassociateUserStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDisassociateUserStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDisassociateUserStackInput"} + if s.UserStackAssociations == nil { + invalidParams.Add(request.NewErrParamRequired("UserStackAssociations")) + } + if s.UserStackAssociations != nil && len(s.UserStackAssociations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserStackAssociations", 1)) + } + if s.UserStackAssociations != nil { + for i, v := range s.UserStackAssociations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserStackAssociations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserStackAssociations sets the UserStackAssociations field's value. +func (s *BatchDisassociateUserStackInput) SetUserStackAssociations(v []*UserStackAssociation) *BatchDisassociateUserStackInput { + s.UserStackAssociations = v + return s +} + +type BatchDisassociateUserStackOutput struct { + _ struct{} `type:"structure"` + + // The list of UserStackAssociationError objects. + Errors []*UserStackAssociationError `locationName:"errors" type:"list"` +} + +// String returns the string representation +func (s BatchDisassociateUserStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDisassociateUserStackOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchDisassociateUserStackOutput) SetErrors(v []*UserStackAssociationError) *BatchDisassociateUserStackOutput { + s.Errors = v + return s +} + +// Describes the capacity for a fleet. +type ComputeCapacity struct { + _ struct{} `type:"structure"` + + // The desired number of streaming instances. + // + // DesiredInstances is a required field + DesiredInstances *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ComputeCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComputeCapacity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ComputeCapacity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ComputeCapacity"} + if s.DesiredInstances == nil { + invalidParams.Add(request.NewErrParamRequired("DesiredInstances")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDesiredInstances sets the DesiredInstances field's value. +func (s *ComputeCapacity) SetDesiredInstances(v int64) *ComputeCapacity { + s.DesiredInstances = &v + return s +} + +// Describes the capacity status for a fleet. +type ComputeCapacityStatus struct { + _ struct{} `type:"structure"` + + // The number of currently available instances that can be used to stream sessions. + Available *int64 `type:"integer"` + + // The desired number of streaming instances. + // + // Desired is a required field + Desired *int64 `type:"integer" required:"true"` + + // The number of instances in use for streaming. + InUse *int64 `type:"integer"` + + // The total number of simultaneous streaming instances that are running. + Running *int64 `type:"integer"` +} + +// String returns the string representation +func (s ComputeCapacityStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComputeCapacityStatus) GoString() string { + return s.String() +} + +// SetAvailable sets the Available field's value. +func (s *ComputeCapacityStatus) SetAvailable(v int64) *ComputeCapacityStatus { + s.Available = &v + return s +} + +// SetDesired sets the Desired field's value. +func (s *ComputeCapacityStatus) SetDesired(v int64) *ComputeCapacityStatus { + s.Desired = &v + return s +} + +// SetInUse sets the InUse field's value. +func (s *ComputeCapacityStatus) SetInUse(v int64) *ComputeCapacityStatus { + s.InUse = &v + return s +} + +// SetRunning sets the Running field's value. +func (s *ComputeCapacityStatus) SetRunning(v int64) *ComputeCapacityStatus { + s.Running = &v + return s +} + +type CopyImageInput struct { + _ struct{} `type:"structure"` + + // The description that the image will have when it is copied to the destination. + DestinationImageDescription *string `type:"string"` + + // The name that the image will have when it is copied to the destination. + // + // DestinationImageName is a required field + DestinationImageName *string `type:"string" required:"true"` + + // The destination region to which the image will be copied. This parameter + // is required, even if you are copying an image within the same region. + // + // DestinationRegion is a required field + DestinationRegion *string `min:"1" type:"string" required:"true"` + + // The name of the image to copy. + // + // SourceImageName is a required field + SourceImageName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyImageInput"} + if s.DestinationImageName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationImageName")) + } + if s.DestinationRegion == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationRegion")) + } + if s.DestinationRegion != nil && len(*s.DestinationRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationRegion", 1)) + } + if s.SourceImageName == nil { + invalidParams.Add(request.NewErrParamRequired("SourceImageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationImageDescription sets the DestinationImageDescription field's value. +func (s *CopyImageInput) SetDestinationImageDescription(v string) *CopyImageInput { + s.DestinationImageDescription = &v + return s +} + +// SetDestinationImageName sets the DestinationImageName field's value. +func (s *CopyImageInput) SetDestinationImageName(v string) *CopyImageInput { + s.DestinationImageName = &v + return s +} + +// SetDestinationRegion sets the DestinationRegion field's value. +func (s *CopyImageInput) SetDestinationRegion(v string) *CopyImageInput { + s.DestinationRegion = &v + return s +} + +// SetSourceImageName sets the SourceImageName field's value. +func (s *CopyImageInput) SetSourceImageName(v string) *CopyImageInput { + s.SourceImageName = &v + return s +} + +type CopyImageOutput struct { + _ struct{} `type:"structure"` + + // The name of the destination image. + DestinationImageName *string `type:"string"` +} + +// String returns the string representation +func (s CopyImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageOutput) GoString() string { + return s.String() +} + +// SetDestinationImageName sets the DestinationImageName field's value. +func (s *CopyImageOutput) SetDestinationImageName(v string) *CopyImageOutput { + s.DestinationImageName = &v + return s +} + +type CreateDirectoryConfigInput struct { + _ struct{} `type:"structure"` + + // The fully qualified name of the directory (for example, corp.example.com). + // + // DirectoryName is a required field + DirectoryName *string `type:"string" required:"true"` + + // The distinguished names of the organizational units for computer accounts. + // + // OrganizationalUnitDistinguishedNames is a required field + OrganizationalUnitDistinguishedNames []*string `type:"list" required:"true"` + + // The credentials for the service account used by the fleet or image builder + // to connect to the directory. + // + // ServiceAccountCredentials is a required field + ServiceAccountCredentials *ServiceAccountCredentials `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDirectoryConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDirectoryConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDirectoryConfigInput"} + if s.DirectoryName == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryName")) + } + if s.OrganizationalUnitDistinguishedNames == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationalUnitDistinguishedNames")) + } + if s.ServiceAccountCredentials == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceAccountCredentials")) + } + if s.ServiceAccountCredentials != nil { + if err := s.ServiceAccountCredentials.Validate(); err != nil { + invalidParams.AddNested("ServiceAccountCredentials", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *CreateDirectoryConfigInput) SetDirectoryName(v string) *CreateDirectoryConfigInput { + s.DirectoryName = &v + return s +} + +// SetOrganizationalUnitDistinguishedNames sets the OrganizationalUnitDistinguishedNames field's value. +func (s *CreateDirectoryConfigInput) SetOrganizationalUnitDistinguishedNames(v []*string) *CreateDirectoryConfigInput { + s.OrganizationalUnitDistinguishedNames = v + return s +} + +// SetServiceAccountCredentials sets the ServiceAccountCredentials field's value. +func (s *CreateDirectoryConfigInput) SetServiceAccountCredentials(v *ServiceAccountCredentials) *CreateDirectoryConfigInput { + s.ServiceAccountCredentials = v + return s +} + +type CreateDirectoryConfigOutput struct { + _ struct{} `type:"structure"` + + // Information about the directory configuration. + DirectoryConfig *DirectoryConfig `type:"structure"` +} + +// String returns the string representation +func (s CreateDirectoryConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryConfigOutput) GoString() string { + return s.String() +} + +// SetDirectoryConfig sets the DirectoryConfig field's value. +func (s *CreateDirectoryConfigOutput) SetDirectoryConfig(v *DirectoryConfig) *CreateDirectoryConfigOutput { + s.DirectoryConfig = v + return s +} + +type CreateFleetInput struct { + _ struct{} `type:"structure"` + + // The desired capacity for the fleet. + // + // ComputeCapacity is a required field + ComputeCapacity *ComputeCapacity `type:"structure" required:"true"` + + // The description to display. + Description *string `type:"string"` + + // The amount of time that a streaming session remains active after users disconnect. + // If users try to reconnect to the streaming session after a disconnection + // or network interruption within this time interval, they are connected to + // their previous session. Otherwise, they are connected to a new session with + // a new streaming instance. + // + // Specify a value between 60 and 360000. + DisconnectTimeoutInSeconds *int64 `type:"integer"` + + // The fleet name to display. + DisplayName *string `type:"string"` + + // The name of the directory and organizational unit (OU) to use to join the + // fleet to a Microsoft Active Directory domain. + DomainJoinInfo *DomainJoinInfo `type:"structure"` + + // Enables or disables default internet access for the fleet. + EnableDefaultInternetAccess *bool `type:"boolean"` + + // The fleet type. + // + // ALWAYS_ON + // + // Provides users with instant-on access to their apps. You are charged for + // all running instances in your fleet, even if no users are streaming apps. + // + // ON_DEMAND + // + // Provide users with access to applications after they connect, which takes + // one to two minutes. You are charged for instance streaming when users are + // connected and a small hourly fee for instances that are not streaming apps. + FleetType *string `type:"string" enum:"FleetType"` + + // The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To + // assume a role, a fleet instance calls the AWS Security Token Service (STS) + // AssumeRole API operation and passes the ARN of the role to use. The operation + // creates a new session with temporary credentials. AppStream 2.0 retrieves + // the temporary credentials and creates the AppStream_Machine_Role credential + // profile on the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. + IamRoleArn *string `type:"string"` + + // The amount of time that users can be idle (inactive) before they are disconnected + // from their streaming session and the DisconnectTimeoutInSeconds time interval + // begins. Users are notified before they are disconnected due to inactivity. + // If they try to reconnect to the streaming session before the time interval + // specified in DisconnectTimeoutInSeconds elapses, they are connected to their + // previous session. Users are considered idle when they stop providing keyboard + // or mouse input during their streaming session. File uploads and downloads, + // audio in, audio out, and pixels changing do not qualify as user activity. + // If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds + // elapses, they are disconnected. + // + // To prevent users from being disconnected due to inactivity, specify a value + // of 0. Otherwise, specify a value between 60 and 3600. The default value is + // 0. + // + // If you enable this feature, we recommend that you specify a value that corresponds + // exactly to a whole number of minutes (for example, 60, 120, and 180). If + // you don't do this, the value is rounded to the nearest minute. For example, + // if you specify a value of 70, users are disconnected after 1 minute of inactivity. + // If you specify a value that is at the midpoint between two different minutes, + // the value is rounded up. For example, if you specify a value of 90, users + // are disconnected after 2 minutes of inactivity. + IdleDisconnectTimeoutInSeconds *int64 `type:"integer"` + + // The ARN of the public, private, or shared image to use. + ImageArn *string `type:"string"` + + // The name of the image used to create the fleet. + ImageName *string `min:"1" type:"string"` + + // The instance type to use when launching fleet instances. The following instance + // types are available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge + // + // InstanceType is a required field + InstanceType *string `min:"1" type:"string" required:"true"` + + // The maximum amount of time that a streaming session can remain active, in + // seconds. If users are still connected to a streaming instance five minutes + // before this limit is reached, they are prompted to save any open documents + // before being disconnected. After this time elapses, the instance is terminated + // and replaced by a new instance. + // + // Specify a value between 600 and 360000. + MaxUserDurationInSeconds *int64 `type:"integer"` + + // A unique name for the fleet. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The tags to associate with the fleet. A tag is a key-value pair, and the + // value is optional. For example, Environment=Test. If you do not specify a + // value, Environment=. + // + // If you do not specify a value, the value is set to an empty string. + // + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following special characters: + // + // _ . : / = + \ - @ + // + // For more information, see Tagging Your Resources (https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html) + // in the Amazon AppStream 2.0 Administration Guide. + Tags map[string]*string `min:"1" type:"map"` + + // The VPC configuration for the fleet. + VpcConfig *VpcConfig `type:"structure"` +} + +// String returns the string representation +func (s CreateFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFleetInput"} + if s.ComputeCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("ComputeCapacity")) + } + if s.ImageName != nil && len(*s.ImageName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageName", 1)) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.InstanceType != nil && len(*s.InstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceType", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.ComputeCapacity != nil { + if err := s.ComputeCapacity.Validate(); err != nil { + invalidParams.AddNested("ComputeCapacity", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComputeCapacity sets the ComputeCapacity field's value. +func (s *CreateFleetInput) SetComputeCapacity(v *ComputeCapacity) *CreateFleetInput { + s.ComputeCapacity = v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateFleetInput) SetDescription(v string) *CreateFleetInput { + s.Description = &v + return s +} + +// SetDisconnectTimeoutInSeconds sets the DisconnectTimeoutInSeconds field's value. +func (s *CreateFleetInput) SetDisconnectTimeoutInSeconds(v int64) *CreateFleetInput { + s.DisconnectTimeoutInSeconds = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *CreateFleetInput) SetDisplayName(v string) *CreateFleetInput { + s.DisplayName = &v + return s +} + +// SetDomainJoinInfo sets the DomainJoinInfo field's value. +func (s *CreateFleetInput) SetDomainJoinInfo(v *DomainJoinInfo) *CreateFleetInput { + s.DomainJoinInfo = v + return s +} + +// SetEnableDefaultInternetAccess sets the EnableDefaultInternetAccess field's value. +func (s *CreateFleetInput) SetEnableDefaultInternetAccess(v bool) *CreateFleetInput { + s.EnableDefaultInternetAccess = &v + return s +} + +// SetFleetType sets the FleetType field's value. +func (s *CreateFleetInput) SetFleetType(v string) *CreateFleetInput { + s.FleetType = &v + return s +} + +// SetIamRoleArn sets the IamRoleArn field's value. +func (s *CreateFleetInput) SetIamRoleArn(v string) *CreateFleetInput { + s.IamRoleArn = &v + return s +} + +// SetIdleDisconnectTimeoutInSeconds sets the IdleDisconnectTimeoutInSeconds field's value. +func (s *CreateFleetInput) SetIdleDisconnectTimeoutInSeconds(v int64) *CreateFleetInput { + s.IdleDisconnectTimeoutInSeconds = &v + return s +} + +// SetImageArn sets the ImageArn field's value. +func (s *CreateFleetInput) SetImageArn(v string) *CreateFleetInput { + s.ImageArn = &v + return s +} + +// SetImageName sets the ImageName field's value. +func (s *CreateFleetInput) SetImageName(v string) *CreateFleetInput { + s.ImageName = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *CreateFleetInput) SetInstanceType(v string) *CreateFleetInput { + s.InstanceType = &v + return s +} + +// SetMaxUserDurationInSeconds sets the MaxUserDurationInSeconds field's value. +func (s *CreateFleetInput) SetMaxUserDurationInSeconds(v int64) *CreateFleetInput { + s.MaxUserDurationInSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateFleetInput) SetName(v string) *CreateFleetInput { + s.Name = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateFleetInput) SetTags(v map[string]*string) *CreateFleetInput { + s.Tags = v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *CreateFleetInput) SetVpcConfig(v *VpcConfig) *CreateFleetInput { + s.VpcConfig = v + return s +} + +type CreateFleetOutput struct { + _ struct{} `type:"structure"` + + // Information about the fleet. + Fleet *Fleet `type:"structure"` +} + +// String returns the string representation +func (s CreateFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetOutput) GoString() string { + return s.String() +} + +// SetFleet sets the Fleet field's value. +func (s *CreateFleetOutput) SetFleet(v *Fleet) *CreateFleetOutput { + s.Fleet = v + return s +} + +type CreateImageBuilderInput struct { + _ struct{} `type:"structure"` + + // The list of interface VPC endpoint (interface endpoint) objects. Administrators + // can connect to the image builder only through the specified endpoints. + AccessEndpoints []*AccessEndpoint `min:"1" type:"list"` + + // The version of the AppStream 2.0 agent to use for this image builder. To + // use the latest version of the AppStream 2.0 agent, specify [LATEST]. + AppstreamAgentVersion *string `min:"1" type:"string"` + + // The description to display. + Description *string `type:"string"` + + // The image builder name to display. + DisplayName *string `type:"string"` + + // The name of the directory and organizational unit (OU) to use to join the + // image builder to a Microsoft Active Directory domain. + DomainJoinInfo *DomainJoinInfo `type:"structure"` + + // Enables or disables default internet access for the image builder. + EnableDefaultInternetAccess *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the IAM role to apply to the image builder. + // To assume a role, the image builder calls the AWS Security Token Service + // (STS) AssumeRole API operation and passes the ARN of the role to use. The + // operation creates a new session with temporary credentials. AppStream 2.0 + // retrieves the temporary credentials and creates the AppStream_Machine_Role + // credential profile on the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. + IamRoleArn *string `type:"string"` + + // The ARN of the public, private, or shared image to use. + ImageArn *string `type:"string"` + + // The name of the image used to create the image builder. + ImageName *string `min:"1" type:"string"` + + // The instance type to use when launching the image builder. The following + // instance types are available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge + // + // InstanceType is a required field + InstanceType *string `min:"1" type:"string" required:"true"` + + // A unique name for the image builder. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The tags to associate with the image builder. A tag is a key-value pair, + // and the value is optional. For example, Environment=Test. If you do not specify + // a value, Environment=. + // + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following special characters: + // + // _ . : / = + \ - @ + // + // If you do not specify a value, the value is set to an empty string. + // + // For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html) + // in the Amazon AppStream 2.0 Administration Guide. + Tags map[string]*string `min:"1" type:"map"` + + // The VPC configuration for the image builder. You can specify only one subnet. + VpcConfig *VpcConfig `type:"structure"` +} + +// String returns the string representation +func (s CreateImageBuilderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageBuilderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateImageBuilderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateImageBuilderInput"} + if s.AccessEndpoints != nil && len(s.AccessEndpoints) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccessEndpoints", 1)) + } + if s.AppstreamAgentVersion != nil && len(*s.AppstreamAgentVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppstreamAgentVersion", 1)) + } + if s.ImageName != nil && len(*s.ImageName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageName", 1)) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.InstanceType != nil && len(*s.InstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceType", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.AccessEndpoints != nil { + for i, v := range s.AccessEndpoints { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessEndpoints", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessEndpoints sets the AccessEndpoints field's value. +func (s *CreateImageBuilderInput) SetAccessEndpoints(v []*AccessEndpoint) *CreateImageBuilderInput { + s.AccessEndpoints = v + return s +} + +// SetAppstreamAgentVersion sets the AppstreamAgentVersion field's value. +func (s *CreateImageBuilderInput) SetAppstreamAgentVersion(v string) *CreateImageBuilderInput { + s.AppstreamAgentVersion = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateImageBuilderInput) SetDescription(v string) *CreateImageBuilderInput { + s.Description = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *CreateImageBuilderInput) SetDisplayName(v string) *CreateImageBuilderInput { + s.DisplayName = &v + return s +} + +// SetDomainJoinInfo sets the DomainJoinInfo field's value. +func (s *CreateImageBuilderInput) SetDomainJoinInfo(v *DomainJoinInfo) *CreateImageBuilderInput { + s.DomainJoinInfo = v + return s +} + +// SetEnableDefaultInternetAccess sets the EnableDefaultInternetAccess field's value. +func (s *CreateImageBuilderInput) SetEnableDefaultInternetAccess(v bool) *CreateImageBuilderInput { + s.EnableDefaultInternetAccess = &v + return s +} + +// SetIamRoleArn sets the IamRoleArn field's value. +func (s *CreateImageBuilderInput) SetIamRoleArn(v string) *CreateImageBuilderInput { + s.IamRoleArn = &v + return s +} + +// SetImageArn sets the ImageArn field's value. +func (s *CreateImageBuilderInput) SetImageArn(v string) *CreateImageBuilderInput { + s.ImageArn = &v + return s +} + +// SetImageName sets the ImageName field's value. +func (s *CreateImageBuilderInput) SetImageName(v string) *CreateImageBuilderInput { + s.ImageName = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *CreateImageBuilderInput) SetInstanceType(v string) *CreateImageBuilderInput { + s.InstanceType = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateImageBuilderInput) SetName(v string) *CreateImageBuilderInput { + s.Name = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateImageBuilderInput) SetTags(v map[string]*string) *CreateImageBuilderInput { + s.Tags = v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *CreateImageBuilderInput) SetVpcConfig(v *VpcConfig) *CreateImageBuilderInput { + s.VpcConfig = v + return s +} + +type CreateImageBuilderOutput struct { + _ struct{} `type:"structure"` + + // Information about the image builder. + ImageBuilder *ImageBuilder `type:"structure"` +} + +// String returns the string representation +func (s CreateImageBuilderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageBuilderOutput) GoString() string { + return s.String() +} + +// SetImageBuilder sets the ImageBuilder field's value. +func (s *CreateImageBuilderOutput) SetImageBuilder(v *ImageBuilder) *CreateImageBuilderOutput { + s.ImageBuilder = v + return s +} + +type CreateImageBuilderStreamingURLInput struct { + _ struct{} `type:"structure"` + + // The name of the image builder. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The time that the streaming URL will be valid, in seconds. Specify a value + // between 1 and 604800 seconds. The default is 3600 seconds. + Validity *int64 `type:"long"` +} + +// String returns the string representation +func (s CreateImageBuilderStreamingURLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageBuilderStreamingURLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateImageBuilderStreamingURLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateImageBuilderStreamingURLInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateImageBuilderStreamingURLInput) SetName(v string) *CreateImageBuilderStreamingURLInput { + s.Name = &v + return s +} + +// SetValidity sets the Validity field's value. +func (s *CreateImageBuilderStreamingURLInput) SetValidity(v int64) *CreateImageBuilderStreamingURLInput { + s.Validity = &v + return s +} + +type CreateImageBuilderStreamingURLOutput struct { + _ struct{} `type:"structure"` + + // The elapsed time, in seconds after the Unix epoch, when this URL expires. + Expires *time.Time `type:"timestamp"` + + // The URL to start the AppStream 2.0 streaming session. + StreamingURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateImageBuilderStreamingURLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageBuilderStreamingURLOutput) GoString() string { + return s.String() +} + +// SetExpires sets the Expires field's value. +func (s *CreateImageBuilderStreamingURLOutput) SetExpires(v time.Time) *CreateImageBuilderStreamingURLOutput { + s.Expires = &v + return s +} + +// SetStreamingURL sets the StreamingURL field's value. +func (s *CreateImageBuilderStreamingURLOutput) SetStreamingURL(v string) *CreateImageBuilderStreamingURLOutput { + s.StreamingURL = &v + return s +} + +type CreateStackInput struct { + _ struct{} `type:"structure"` + + // The list of interface VPC endpoint (interface endpoint) objects. Users of + // the stack can connect to AppStream 2.0 only through the specified endpoints. + AccessEndpoints []*AccessEndpoint `min:"1" type:"list"` + + // The persistent application settings for users of a stack. When these settings + // are enabled, changes that users make to applications and Windows settings + // are automatically saved after each session and applied to the next session. + ApplicationSettings *ApplicationSettings `type:"structure"` + + // The description to display. + Description *string `type:"string"` + + // The stack name to display. + DisplayName *string `type:"string"` + + // The domains where AppStream 2.0 streaming sessions can be embedded in an + // iframe. You must approve the domains that you want to host embedded AppStream + // 2.0 streaming sessions. + EmbedHostDomains []*string `min:"1" type:"list"` + + // The URL that users are redirected to after they click the Send Feedback link. + // If no URL is specified, no Send Feedback link is displayed. + FeedbackURL *string `type:"string"` + + // The name of the stack. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The URL that users are redirected to after their streaming session ends. + RedirectURL *string `type:"string"` + + // The storage connectors to enable. + StorageConnectors []*StorageConnector `type:"list"` + + // The tags to associate with the stack. A tag is a key-value pair, and the + // value is optional. For example, Environment=Test. If you do not specify a + // value, Environment=. + // + // If you do not specify a value, the value is set to an empty string. + // + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following special characters: + // + // _ . : / = + \ - @ + // + // For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html) + // in the Amazon AppStream 2.0 Administration Guide. + Tags map[string]*string `min:"1" type:"map"` + + // The actions that are enabled or disabled for users during their streaming + // sessions. By default, these actions are enabled. + UserSettings []*UserSetting `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStackInput"} + if s.AccessEndpoints != nil && len(s.AccessEndpoints) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccessEndpoints", 1)) + } + if s.EmbedHostDomains != nil && len(s.EmbedHostDomains) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmbedHostDomains", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.UserSettings != nil && len(s.UserSettings) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserSettings", 1)) + } + if s.AccessEndpoints != nil { + for i, v := range s.AccessEndpoints { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessEndpoints", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ApplicationSettings != nil { + if err := s.ApplicationSettings.Validate(); err != nil { + invalidParams.AddNested("ApplicationSettings", err.(request.ErrInvalidParams)) + } + } + if s.StorageConnectors != nil { + for i, v := range s.StorageConnectors { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StorageConnectors", i), err.(request.ErrInvalidParams)) + } + } + } + if s.UserSettings != nil { + for i, v := range s.UserSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserSettings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessEndpoints sets the AccessEndpoints field's value. +func (s *CreateStackInput) SetAccessEndpoints(v []*AccessEndpoint) *CreateStackInput { + s.AccessEndpoints = v + return s +} + +// SetApplicationSettings sets the ApplicationSettings field's value. +func (s *CreateStackInput) SetApplicationSettings(v *ApplicationSettings) *CreateStackInput { + s.ApplicationSettings = v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateStackInput) SetDescription(v string) *CreateStackInput { + s.Description = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *CreateStackInput) SetDisplayName(v string) *CreateStackInput { + s.DisplayName = &v + return s +} + +// SetEmbedHostDomains sets the EmbedHostDomains field's value. +func (s *CreateStackInput) SetEmbedHostDomains(v []*string) *CreateStackInput { + s.EmbedHostDomains = v + return s +} + +// SetFeedbackURL sets the FeedbackURL field's value. +func (s *CreateStackInput) SetFeedbackURL(v string) *CreateStackInput { + s.FeedbackURL = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateStackInput) SetName(v string) *CreateStackInput { + s.Name = &v + return s +} + +// SetRedirectURL sets the RedirectURL field's value. +func (s *CreateStackInput) SetRedirectURL(v string) *CreateStackInput { + s.RedirectURL = &v + return s +} + +// SetStorageConnectors sets the StorageConnectors field's value. +func (s *CreateStackInput) SetStorageConnectors(v []*StorageConnector) *CreateStackInput { + s.StorageConnectors = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateStackInput) SetTags(v map[string]*string) *CreateStackInput { + s.Tags = v + return s +} + +// SetUserSettings sets the UserSettings field's value. +func (s *CreateStackInput) SetUserSettings(v []*UserSetting) *CreateStackInput { + s.UserSettings = v + return s +} + +type CreateStackOutput struct { + _ struct{} `type:"structure"` + + // Information about the stack. + Stack *Stack `type:"structure"` +} + +// String returns the string representation +func (s CreateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackOutput) GoString() string { + return s.String() +} + +// SetStack sets the Stack field's value. +func (s *CreateStackOutput) SetStack(v *Stack) *CreateStackOutput { + s.Stack = v + return s +} + +type CreateStreamingURLInput struct { + _ struct{} `type:"structure"` + + // The name of the application to launch after the session starts. This is the + // name that you specified as Name in the Image Assistant. + ApplicationId *string `min:"1" type:"string"` + + // The name of the fleet. + // + // FleetName is a required field + FleetName *string `min:"1" type:"string" required:"true"` + + // The session context. For more information, see Session Context (https://docs.aws.amazon.com/appstream2/latest/developerguide/managing-stacks-fleets.html#managing-stacks-fleets-parameters) + // in the Amazon AppStream 2.0 Administration Guide. + SessionContext *string `min:"1" type:"string"` + + // The name of the stack. + // + // StackName is a required field + StackName *string `min:"1" type:"string" required:"true"` + + // The identifier of the user. + // + // UserId is a required field + UserId *string `min:"2" type:"string" required:"true"` + + // The time that the streaming URL will be valid, in seconds. Specify a value + // between 1 and 604800 seconds. The default is 60 seconds. + Validity *int64 `type:"long"` +} + +// String returns the string representation +func (s CreateStreamingURLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingURLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStreamingURLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStreamingURLInput"} + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.FleetName == nil { + invalidParams.Add(request.NewErrParamRequired("FleetName")) + } + if s.FleetName != nil && len(*s.FleetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetName", 1)) + } + if s.SessionContext != nil && len(*s.SessionContext) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionContext", 1)) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + if s.UserId == nil { + invalidParams.Add(request.NewErrParamRequired("UserId")) + } + if s.UserId != nil && len(*s.UserId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("UserId", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *CreateStreamingURLInput) SetApplicationId(v string) *CreateStreamingURLInput { + s.ApplicationId = &v + return s +} + +// SetFleetName sets the FleetName field's value. +func (s *CreateStreamingURLInput) SetFleetName(v string) *CreateStreamingURLInput { + s.FleetName = &v + return s +} + +// SetSessionContext sets the SessionContext field's value. +func (s *CreateStreamingURLInput) SetSessionContext(v string) *CreateStreamingURLInput { + s.SessionContext = &v + return s +} + +// SetStackName sets the StackName field's value. +func (s *CreateStreamingURLInput) SetStackName(v string) *CreateStreamingURLInput { + s.StackName = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *CreateStreamingURLInput) SetUserId(v string) *CreateStreamingURLInput { + s.UserId = &v + return s +} + +// SetValidity sets the Validity field's value. +func (s *CreateStreamingURLInput) SetValidity(v int64) *CreateStreamingURLInput { + s.Validity = &v + return s +} + +type CreateStreamingURLOutput struct { + _ struct{} `type:"structure"` + + // The elapsed time, in seconds after the Unix epoch, when this URL expires. + Expires *time.Time `type:"timestamp"` + + // The URL to start the AppStream 2.0 streaming session. + StreamingURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateStreamingURLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingURLOutput) GoString() string { + return s.String() +} + +// SetExpires sets the Expires field's value. +func (s *CreateStreamingURLOutput) SetExpires(v time.Time) *CreateStreamingURLOutput { + s.Expires = &v + return s +} + +// SetStreamingURL sets the StreamingURL field's value. +func (s *CreateStreamingURLOutput) SetStreamingURL(v string) *CreateStreamingURLOutput { + s.StreamingURL = &v + return s +} + +type CreateUsageReportSubscriptionInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateUsageReportSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUsageReportSubscriptionInput) GoString() string { + return s.String() +} + +type CreateUsageReportSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket where generated reports are stored. + // + // If you enabled on-instance session scripts and Amazon S3 logging for your + // session script configuration, AppStream 2.0 created an S3 bucket to store + // the script output. The bucket is unique to your account and Region. When + // you enable usage reporting in this case, AppStream 2.0 uses the same bucket + // to store your usage reports. If you haven't already enabled on-instance session + // scripts, when you enable usage reports, AppStream 2.0 creates a new S3 bucket. + S3BucketName *string `min:"1" type:"string"` + + // The schedule for generating usage reports. + Schedule *string `type:"string" enum:"UsageReportSchedule"` +} + +// String returns the string representation +func (s CreateUsageReportSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUsageReportSubscriptionOutput) GoString() string { + return s.String() +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *CreateUsageReportSubscriptionOutput) SetS3BucketName(v string) *CreateUsageReportSubscriptionOutput { + s.S3BucketName = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *CreateUsageReportSubscriptionOutput) SetSchedule(v string) *CreateUsageReportSubscriptionOutput { + s.Schedule = &v + return s +} + +type CreateUserInput struct { + _ struct{} `type:"structure"` + + // The authentication type for the user. You must specify USERPOOL. + // + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"AuthenticationType"` + + // The first name, or given name, of the user. + FirstName *string `type:"string" sensitive:"true"` + + // The last name, or surname, of the user. + LastName *string `type:"string" sensitive:"true"` + + // The action to take for the welcome email that is sent to a user after the + // user is created in the user pool. If you specify SUPPRESS, no email is sent. + // If you specify RESEND, do not specify the first name or last name of the + // user. If the value is null, the email is sent. + // + // The temporary password in the welcome email is valid for only 7 days. If + // users don’t set their passwords within 7 days, you must send them a new + // welcome email. + MessageAction *string `type:"string" enum:"MessageAction"` + + // The email address of the user. + // + // Users' email addresses are case-sensitive. During login, if they specify + // an email address that doesn't use the same capitalization as the email address + // specified when their user pool account was created, a "user does not exist" + // error message displays. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s CreateUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUserInput"} + if s.AuthenticationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *CreateUserInput) SetAuthenticationType(v string) *CreateUserInput { + s.AuthenticationType = &v + return s +} + +// SetFirstName sets the FirstName field's value. +func (s *CreateUserInput) SetFirstName(v string) *CreateUserInput { + s.FirstName = &v + return s +} + +// SetLastName sets the LastName field's value. +func (s *CreateUserInput) SetLastName(v string) *CreateUserInput { + s.LastName = &v + return s +} + +// SetMessageAction sets the MessageAction field's value. +func (s *CreateUserInput) SetMessageAction(v string) *CreateUserInput { + s.MessageAction = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *CreateUserInput) SetUserName(v string) *CreateUserInput { + s.UserName = &v + return s +} + +type CreateUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserOutput) GoString() string { + return s.String() +} + +type DeleteDirectoryConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the directory configuration. + // + // DirectoryName is a required field + DirectoryName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDirectoryConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDirectoryConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDirectoryConfigInput"} + if s.DirectoryName == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *DeleteDirectoryConfigInput) SetDirectoryName(v string) *DeleteDirectoryConfigInput { + s.DirectoryName = &v + return s +} + +type DeleteDirectoryConfigOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDirectoryConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryConfigOutput) GoString() string { + return s.String() +} + +type DeleteFleetInput struct { + _ struct{} `type:"structure"` + + // The name of the fleet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFleetInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteFleetInput) SetName(v string) *DeleteFleetInput { + s.Name = &v + return s +} + +type DeleteFleetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetOutput) GoString() string { + return s.String() +} + +type DeleteImageBuilderInput struct { + _ struct{} `type:"structure"` + + // The name of the image builder. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteImageBuilderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteImageBuilderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteImageBuilderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteImageBuilderInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteImageBuilderInput) SetName(v string) *DeleteImageBuilderInput { + s.Name = &v + return s +} + +type DeleteImageBuilderOutput struct { + _ struct{} `type:"structure"` + + // Information about the image builder. + ImageBuilder *ImageBuilder `type:"structure"` +} + +// String returns the string representation +func (s DeleteImageBuilderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteImageBuilderOutput) GoString() string { + return s.String() +} + +// SetImageBuilder sets the ImageBuilder field's value. +func (s *DeleteImageBuilderOutput) SetImageBuilder(v *ImageBuilder) *DeleteImageBuilderOutput { + s.ImageBuilder = v + return s +} + +type DeleteImageInput struct { + _ struct{} `type:"structure"` + + // The name of the image. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteImageInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteImageInput) SetName(v string) *DeleteImageInput { + s.Name = &v + return s +} + +type DeleteImageOutput struct { + _ struct{} `type:"structure"` + + // Information about the image. + Image *Image `type:"structure"` +} + +// String returns the string representation +func (s DeleteImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteImageOutput) GoString() string { + return s.String() +} + +// SetImage sets the Image field's value. +func (s *DeleteImageOutput) SetImage(v *Image) *DeleteImageOutput { + s.Image = v + return s +} + +type DeleteImagePermissionsInput struct { + _ struct{} `type:"structure"` + + // The name of the private image. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The 12-digit identifier of the AWS account for which to delete image permissions. + // + // SharedAccountId is a required field + SharedAccountId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteImagePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteImagePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteImagePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteImagePermissionsInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SharedAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("SharedAccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteImagePermissionsInput) SetName(v string) *DeleteImagePermissionsInput { + s.Name = &v + return s +} + +// SetSharedAccountId sets the SharedAccountId field's value. +func (s *DeleteImagePermissionsInput) SetSharedAccountId(v string) *DeleteImagePermissionsInput { + s.SharedAccountId = &v + return s +} + +type DeleteImagePermissionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteImagePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteImagePermissionsOutput) GoString() string { + return s.String() +} + +type DeleteStackInput struct { + _ struct{} `type:"structure"` + + // The name of the stack. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStackInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteStackInput) SetName(v string) *DeleteStackInput { + s.Name = &v + return s +} + +type DeleteStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackOutput) GoString() string { + return s.String() +} + +type DeleteUsageReportSubscriptionInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUsageReportSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUsageReportSubscriptionInput) GoString() string { + return s.String() +} + +type DeleteUsageReportSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUsageReportSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUsageReportSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteUserInput struct { + _ struct{} `type:"structure"` + + // The authentication type for the user. You must specify USERPOOL. + // + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"AuthenticationType"` + + // The email address of the user. + // + // Users' email addresses are case-sensitive. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s DeleteUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserInput"} + if s.AuthenticationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *DeleteUserInput) SetAuthenticationType(v string) *DeleteUserInput { + s.AuthenticationType = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DeleteUserInput) SetUserName(v string) *DeleteUserInput { + s.UserName = &v + return s +} + +type DeleteUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserOutput) GoString() string { + return s.String() +} + +type DescribeDirectoryConfigsInput struct { + _ struct{} `type:"structure"` + + // The directory names. + DirectoryNames []*string `type:"list"` + + // The maximum size of each page of results. + MaxResults *int64 `type:"integer"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoryConfigsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoryConfigsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDirectoryConfigsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDirectoryConfigsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryNames sets the DirectoryNames field's value. +func (s *DescribeDirectoryConfigsInput) SetDirectoryNames(v []*string) *DescribeDirectoryConfigsInput { + s.DirectoryNames = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeDirectoryConfigsInput) SetMaxResults(v int64) *DescribeDirectoryConfigsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDirectoryConfigsInput) SetNextToken(v string) *DescribeDirectoryConfigsInput { + s.NextToken = &v + return s +} + +type DescribeDirectoryConfigsOutput struct { + _ struct{} `type:"structure"` + + // Information about the directory configurations. Note that although the response + // syntax in this topic includes the account password, this password is not + // returned in the actual response. + DirectoryConfigs []*DirectoryConfig `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoryConfigsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoryConfigsOutput) GoString() string { + return s.String() +} + +// SetDirectoryConfigs sets the DirectoryConfigs field's value. +func (s *DescribeDirectoryConfigsOutput) SetDirectoryConfigs(v []*DirectoryConfig) *DescribeDirectoryConfigsOutput { + s.DirectoryConfigs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDirectoryConfigsOutput) SetNextToken(v string) *DescribeDirectoryConfigsOutput { + s.NextToken = &v + return s +} + +type DescribeFleetsInput struct { + _ struct{} `type:"structure"` + + // The names of the fleets to describe. + Names []*string `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNames sets the Names field's value. +func (s *DescribeFleetsInput) SetNames(v []*string) *DescribeFleetsInput { + s.Names = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetsInput) SetNextToken(v string) *DescribeFleetsInput { + s.NextToken = &v + return s +} + +type DescribeFleetsOutput struct { + _ struct{} `type:"structure"` + + // Information about the fleets. + Fleets []*Fleet `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetsOutput) GoString() string { + return s.String() +} + +// SetFleets sets the Fleets field's value. +func (s *DescribeFleetsOutput) SetFleets(v []*Fleet) *DescribeFleetsOutput { + s.Fleets = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetsOutput) SetNextToken(v string) *DescribeFleetsOutput { + s.NextToken = &v + return s +} + +type DescribeImageBuildersInput struct { + _ struct{} `type:"structure"` + + // The maximum size of each page of results. + MaxResults *int64 `type:"integer"` + + // The names of the image builders to describe. + Names []*string `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeImageBuildersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageBuildersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImageBuildersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImageBuildersInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImageBuildersInput) SetMaxResults(v int64) *DescribeImageBuildersInput { + s.MaxResults = &v + return s +} + +// SetNames sets the Names field's value. +func (s *DescribeImageBuildersInput) SetNames(v []*string) *DescribeImageBuildersInput { + s.Names = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageBuildersInput) SetNextToken(v string) *DescribeImageBuildersInput { + s.NextToken = &v + return s +} + +type DescribeImageBuildersOutput struct { + _ struct{} `type:"structure"` + + // Information about the image builders. + ImageBuilders []*ImageBuilder `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeImageBuildersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageBuildersOutput) GoString() string { + return s.String() +} + +// SetImageBuilders sets the ImageBuilders field's value. +func (s *DescribeImageBuildersOutput) SetImageBuilders(v []*ImageBuilder) *DescribeImageBuildersOutput { + s.ImageBuilders = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageBuildersOutput) SetNextToken(v string) *DescribeImageBuildersOutput { + s.NextToken = &v + return s +} + +type DescribeImagePermissionsInput struct { + _ struct{} `type:"structure"` + + // The maximum size of each page of results. + MaxResults *int64 `type:"integer"` + + // The name of the private image for which to describe permissions. The image + // must be one that you own. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` + + // The 12-digit identifier of one or more AWS accounts with which the image + // is shared. + SharedAwsAccountIds []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeImagePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImagePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImagePermissionsInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.SharedAwsAccountIds != nil && len(s.SharedAwsAccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SharedAwsAccountIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImagePermissionsInput) SetMaxResults(v int64) *DescribeImagePermissionsInput { + s.MaxResults = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeImagePermissionsInput) SetName(v string) *DescribeImagePermissionsInput { + s.Name = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImagePermissionsInput) SetNextToken(v string) *DescribeImagePermissionsInput { + s.NextToken = &v + return s +} + +// SetSharedAwsAccountIds sets the SharedAwsAccountIds field's value. +func (s *DescribeImagePermissionsInput) SetSharedAwsAccountIds(v []*string) *DescribeImagePermissionsInput { + s.SharedAwsAccountIds = v + return s +} + +type DescribeImagePermissionsOutput struct { + _ struct{} `type:"structure"` + + // The name of the private image. + Name *string `type:"string"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` + + // The permissions for a private image that you own. + SharedImagePermissionsList []*SharedImagePermissions `type:"list"` +} + +// String returns the string representation +func (s DescribeImagePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagePermissionsOutput) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *DescribeImagePermissionsOutput) SetName(v string) *DescribeImagePermissionsOutput { + s.Name = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImagePermissionsOutput) SetNextToken(v string) *DescribeImagePermissionsOutput { + s.NextToken = &v + return s +} + +// SetSharedImagePermissionsList sets the SharedImagePermissionsList field's value. +func (s *DescribeImagePermissionsOutput) SetSharedImagePermissionsList(v []*SharedImagePermissions) *DescribeImagePermissionsOutput { + s.SharedImagePermissionsList = v + return s +} + +type DescribeImagesInput struct { + _ struct{} `type:"structure"` + + // The ARNs of the public, private, and shared images to describe. + Arns []*string `type:"list"` + + // The maximum size of each page of results. + MaxResults *int64 `type:"integer"` + + // The names of the public or private images to describe. + Names []*string `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` + + // The type of image (public, private, or shared) to describe. + Type *string `type:"string" enum:"VisibilityType"` +} + +// String returns the string representation +func (s DescribeImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImagesInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArns sets the Arns field's value. +func (s *DescribeImagesInput) SetArns(v []*string) *DescribeImagesInput { + s.Arns = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImagesInput) SetMaxResults(v int64) *DescribeImagesInput { + s.MaxResults = &v + return s +} + +// SetNames sets the Names field's value. +func (s *DescribeImagesInput) SetNames(v []*string) *DescribeImagesInput { + s.Names = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImagesInput) SetNextToken(v string) *DescribeImagesInput { + s.NextToken = &v + return s +} + +// SetType sets the Type field's value. +func (s *DescribeImagesInput) SetType(v string) *DescribeImagesInput { + s.Type = &v + return s +} + +type DescribeImagesOutput struct { + _ struct{} `type:"structure"` + + // Information about the images. + Images []*Image `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesOutput) GoString() string { + return s.String() +} + +// SetImages sets the Images field's value. +func (s *DescribeImagesOutput) SetImages(v []*Image) *DescribeImagesOutput { + s.Images = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImagesOutput) SetNextToken(v string) *DescribeImagesOutput { + s.NextToken = &v + return s +} + +type DescribeSessionsInput struct { + _ struct{} `type:"structure"` + + // The authentication method. Specify API for a user authenticated using a streaming + // URL or SAML for a SAML federated user. The default is to authenticate users + // using a streaming URL. + AuthenticationType *string `type:"string" enum:"AuthenticationType"` + + // The name of the fleet. This value is case-sensitive. + // + // FleetName is a required field + FleetName *string `min:"1" type:"string" required:"true"` + + // The size of each page of results. The default value is 20 and the maximum + // value is 50. + Limit *int64 `type:"integer"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` + + // The name of the stack. This value is case-sensitive. + // + // StackName is a required field + StackName *string `min:"1" type:"string" required:"true"` + + // The user identifier. + UserId *string `min:"2" type:"string"` +} + +// String returns the string representation +func (s DescribeSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSessionsInput"} + if s.FleetName == nil { + invalidParams.Add(request.NewErrParamRequired("FleetName")) + } + if s.FleetName != nil && len(*s.FleetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + if s.UserId != nil && len(*s.UserId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("UserId", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *DescribeSessionsInput) SetAuthenticationType(v string) *DescribeSessionsInput { + s.AuthenticationType = &v + return s +} + +// SetFleetName sets the FleetName field's value. +func (s *DescribeSessionsInput) SetFleetName(v string) *DescribeSessionsInput { + s.FleetName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeSessionsInput) SetLimit(v int64) *DescribeSessionsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSessionsInput) SetNextToken(v string) *DescribeSessionsInput { + s.NextToken = &v + return s +} + +// SetStackName sets the StackName field's value. +func (s *DescribeSessionsInput) SetStackName(v string) *DescribeSessionsInput { + s.StackName = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *DescribeSessionsInput) SetUserId(v string) *DescribeSessionsInput { + s.UserId = &v + return s +} + +type DescribeSessionsOutput struct { + _ struct{} `type:"structure"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` + + // Information about the streaming sessions. + Sessions []*Session `type:"list"` +} + +// String returns the string representation +func (s DescribeSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSessionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSessionsOutput) SetNextToken(v string) *DescribeSessionsOutput { + s.NextToken = &v + return s +} + +// SetSessions sets the Sessions field's value. +func (s *DescribeSessionsOutput) SetSessions(v []*Session) *DescribeSessionsOutput { + s.Sessions = v + return s +} + +type DescribeStacksInput struct { + _ struct{} `type:"structure"` + + // The names of the stacks to describe. + Names []*string `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStacksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStacksInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNames sets the Names field's value. +func (s *DescribeStacksInput) SetNames(v []*string) *DescribeStacksInput { + s.Names = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeStacksInput) SetNextToken(v string) *DescribeStacksInput { + s.NextToken = &v + return s +} + +type DescribeStacksOutput struct { + _ struct{} `type:"structure"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` + + // Information about the stacks. + Stacks []*Stack `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeStacksOutput) SetNextToken(v string) *DescribeStacksOutput { + s.NextToken = &v + return s +} + +// SetStacks sets the Stacks field's value. +func (s *DescribeStacksOutput) SetStacks(v []*Stack) *DescribeStacksOutput { + s.Stacks = v + return s +} + +type DescribeUsageReportSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // The maximum size of each page of results. + MaxResults *int64 `type:"integer"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeUsageReportSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUsageReportSubscriptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeUsageReportSubscriptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeUsageReportSubscriptionsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeUsageReportSubscriptionsInput) SetMaxResults(v int64) *DescribeUsageReportSubscriptionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeUsageReportSubscriptionsInput) SetNextToken(v string) *DescribeUsageReportSubscriptionsInput { + s.NextToken = &v + return s +} + +type DescribeUsageReportSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` + + // Information about the usage report subscription. + UsageReportSubscriptions []*UsageReportSubscription `type:"list"` +} + +// String returns the string representation +func (s DescribeUsageReportSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUsageReportSubscriptionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeUsageReportSubscriptionsOutput) SetNextToken(v string) *DescribeUsageReportSubscriptionsOutput { + s.NextToken = &v + return s +} + +// SetUsageReportSubscriptions sets the UsageReportSubscriptions field's value. +func (s *DescribeUsageReportSubscriptionsOutput) SetUsageReportSubscriptions(v []*UsageReportSubscription) *DescribeUsageReportSubscriptionsOutput { + s.UsageReportSubscriptions = v + return s +} + +type DescribeUserStackAssociationsInput struct { + _ struct{} `type:"structure"` + + // The authentication type for the user who is associated with the stack. You + // must specify USERPOOL. + AuthenticationType *string `type:"string" enum:"AuthenticationType"` + + // The maximum size of each page of results. + MaxResults *int64 `type:"integer"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` + + // The name of the stack that is associated with the user. + StackName *string `min:"1" type:"string"` + + // The email address of the user who is associated with the stack. + // + // Users' email addresses are case-sensitive. + UserName *string `min:"1" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s DescribeUserStackAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserStackAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeUserStackAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeUserStackAssociationsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *DescribeUserStackAssociationsInput) SetAuthenticationType(v string) *DescribeUserStackAssociationsInput { + s.AuthenticationType = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeUserStackAssociationsInput) SetMaxResults(v int64) *DescribeUserStackAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeUserStackAssociationsInput) SetNextToken(v string) *DescribeUserStackAssociationsInput { + s.NextToken = &v + return s +} + +// SetStackName sets the StackName field's value. +func (s *DescribeUserStackAssociationsInput) SetStackName(v string) *DescribeUserStackAssociationsInput { + s.StackName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DescribeUserStackAssociationsInput) SetUserName(v string) *DescribeUserStackAssociationsInput { + s.UserName = &v + return s +} + +type DescribeUserStackAssociationsOutput struct { + _ struct{} `type:"structure"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` + + // The UserStackAssociation objects. + UserStackAssociations []*UserStackAssociation `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeUserStackAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserStackAssociationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeUserStackAssociationsOutput) SetNextToken(v string) *DescribeUserStackAssociationsOutput { + s.NextToken = &v + return s +} + +// SetUserStackAssociations sets the UserStackAssociations field's value. +func (s *DescribeUserStackAssociationsOutput) SetUserStackAssociations(v []*UserStackAssociation) *DescribeUserStackAssociationsOutput { + s.UserStackAssociations = v + return s +} + +type DescribeUsersInput struct { + _ struct{} `type:"structure"` + + // The authentication type for the users in the user pool to describe. You must + // specify USERPOOL. + // + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"AuthenticationType"` + + // The maximum size of each page of results. + MaxResults *int64 `type:"integer"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeUsersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUsersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeUsersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeUsersInput"} + if s.AuthenticationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *DescribeUsersInput) SetAuthenticationType(v string) *DescribeUsersInput { + s.AuthenticationType = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeUsersInput) SetMaxResults(v int64) *DescribeUsersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeUsersInput) SetNextToken(v string) *DescribeUsersInput { + s.NextToken = &v + return s +} + +type DescribeUsersOutput struct { + _ struct{} `type:"structure"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` + + // Information about users in the user pool. + Users []*User `type:"list"` +} + +// String returns the string representation +func (s DescribeUsersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUsersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeUsersOutput) SetNextToken(v string) *DescribeUsersOutput { + s.NextToken = &v + return s +} + +// SetUsers sets the Users field's value. +func (s *DescribeUsersOutput) SetUsers(v []*User) *DescribeUsersOutput { + s.Users = v + return s +} + +// Describes the configuration information required to join fleets and image +// builders to Microsoft Active Directory domains. +type DirectoryConfig struct { + _ struct{} `type:"structure"` + + // The time the directory configuration was created. + CreatedTime *time.Time `type:"timestamp"` + + // The fully qualified name of the directory (for example, corp.example.com). + // + // DirectoryName is a required field + DirectoryName *string `type:"string" required:"true"` + + // The distinguished names of the organizational units for computer accounts. + OrganizationalUnitDistinguishedNames []*string `type:"list"` + + // The credentials for the service account used by the fleet or image builder + // to connect to the directory. + ServiceAccountCredentials *ServiceAccountCredentials `type:"structure"` +} + +// String returns the string representation +func (s DirectoryConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryConfig) GoString() string { + return s.String() +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *DirectoryConfig) SetCreatedTime(v time.Time) *DirectoryConfig { + s.CreatedTime = &v + return s +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *DirectoryConfig) SetDirectoryName(v string) *DirectoryConfig { + s.DirectoryName = &v + return s +} + +// SetOrganizationalUnitDistinguishedNames sets the OrganizationalUnitDistinguishedNames field's value. +func (s *DirectoryConfig) SetOrganizationalUnitDistinguishedNames(v []*string) *DirectoryConfig { + s.OrganizationalUnitDistinguishedNames = v + return s +} + +// SetServiceAccountCredentials sets the ServiceAccountCredentials field's value. +func (s *DirectoryConfig) SetServiceAccountCredentials(v *ServiceAccountCredentials) *DirectoryConfig { + s.ServiceAccountCredentials = v + return s +} + +type DisableUserInput struct { + _ struct{} `type:"structure"` + + // The authentication type for the user. You must specify USERPOOL. + // + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"AuthenticationType"` + + // The email address of the user. + // + // Users' email addresses are case-sensitive. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s DisableUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableUserInput"} + if s.AuthenticationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *DisableUserInput) SetAuthenticationType(v string) *DisableUserInput { + s.AuthenticationType = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DisableUserInput) SetUserName(v string) *DisableUserInput { + s.UserName = &v + return s +} + +type DisableUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableUserOutput) GoString() string { + return s.String() +} + +type DisassociateFleetInput struct { + _ struct{} `type:"structure"` + + // The name of the fleet. + // + // FleetName is a required field + FleetName *string `min:"1" type:"string" required:"true"` + + // The name of the stack. + // + // StackName is a required field + StackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateFleetInput"} + if s.FleetName == nil { + invalidParams.Add(request.NewErrParamRequired("FleetName")) + } + if s.FleetName != nil && len(*s.FleetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetName", 1)) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetName sets the FleetName field's value. +func (s *DisassociateFleetInput) SetFleetName(v string) *DisassociateFleetInput { + s.FleetName = &v + return s +} + +// SetStackName sets the StackName field's value. +func (s *DisassociateFleetInput) SetStackName(v string) *DisassociateFleetInput { + s.StackName = &v + return s +} + +type DisassociateFleetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateFleetOutput) GoString() string { + return s.String() +} + +// Describes the configuration information required to join fleets and image +// builders to Microsoft Active Directory domains. +type DomainJoinInfo struct { + _ struct{} `type:"structure"` + + // The fully qualified name of the directory (for example, corp.example.com). + DirectoryName *string `type:"string"` + + // The distinguished name of the organizational unit for computer accounts. + OrganizationalUnitDistinguishedName *string `type:"string"` +} + +// String returns the string representation +func (s DomainJoinInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainJoinInfo) GoString() string { + return s.String() +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *DomainJoinInfo) SetDirectoryName(v string) *DomainJoinInfo { + s.DirectoryName = &v + return s +} + +// SetOrganizationalUnitDistinguishedName sets the OrganizationalUnitDistinguishedName field's value. +func (s *DomainJoinInfo) SetOrganizationalUnitDistinguishedName(v string) *DomainJoinInfo { + s.OrganizationalUnitDistinguishedName = &v + return s +} + +type EnableUserInput struct { + _ struct{} `type:"structure"` + + // The authentication type for the user. You must specify USERPOOL. + // + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"AuthenticationType"` + + // The email address of the user. + // + // Users' email addresses are case-sensitive. During login, if they specify + // an email address that doesn't use the same capitalization as the email address + // specified when their user pool account was created, a "user does not exist" + // error message displays. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s EnableUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableUserInput"} + if s.AuthenticationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *EnableUserInput) SetAuthenticationType(v string) *EnableUserInput { + s.AuthenticationType = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *EnableUserInput) SetUserName(v string) *EnableUserInput { + s.UserName = &v + return s +} + +type EnableUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableUserOutput) GoString() string { + return s.String() +} + +type ExpireSessionInput struct { + _ struct{} `type:"structure"` + + // The identifier of the streaming session. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ExpireSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpireSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExpireSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExpireSessionInput"} + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSessionId sets the SessionId field's value. +func (s *ExpireSessionInput) SetSessionId(v string) *ExpireSessionInput { + s.SessionId = &v + return s +} + +type ExpireSessionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ExpireSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpireSessionOutput) GoString() string { + return s.String() +} + +// Describes a fleet. +type Fleet struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the fleet. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // The capacity status for the fleet. + // + // ComputeCapacityStatus is a required field + ComputeCapacityStatus *ComputeCapacityStatus `type:"structure" required:"true"` + + // The time the fleet was created. + CreatedTime *time.Time `type:"timestamp"` + + // The description to display. + Description *string `min:"1" type:"string"` + + // The amount of time that a streaming session remains active after users disconnect. + // If they try to reconnect to the streaming session after a disconnection or + // network interruption within this time interval, they are connected to their + // previous session. Otherwise, they are connected to a new session with a new + // streaming instance. + // + // Specify a value between 60 and 360000. + DisconnectTimeoutInSeconds *int64 `type:"integer"` + + // The fleet name to display. + DisplayName *string `min:"1" type:"string"` + + // The name of the directory and organizational unit (OU) to use to join the + // fleet to a Microsoft Active Directory domain. + DomainJoinInfo *DomainJoinInfo `type:"structure"` + + // Indicates whether default internet access is enabled for the fleet. + EnableDefaultInternetAccess *bool `type:"boolean"` + + // The fleet errors. + FleetErrors []*FleetError `type:"list"` + + // The fleet type. + // + // ALWAYS_ON + // + // Provides users with instant-on access to their apps. You are charged for + // all running instances in your fleet, even if no users are streaming apps. + // + // ON_DEMAND + // + // Provide users with access to applications after they connect, which takes + // one to two minutes. You are charged for instance streaming when users are + // connected and a small hourly fee for instances that are not streaming apps. + FleetType *string `type:"string" enum:"FleetType"` + + // The ARN of the IAM role that is applied to the fleet. To assume a role, the + // fleet instance calls the AWS Security Token Service (STS) AssumeRole API + // operation and passes the ARN of the role to use. The operation creates a + // new session with temporary credentials. AppStream 2.0 retrieves the temporary + // credentials and creates the AppStream_Machine_Role credential profile on + // the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. + IamRoleArn *string `type:"string"` + + // The amount of time that users can be idle (inactive) before they are disconnected + // from their streaming session and the DisconnectTimeoutInSeconds time interval + // begins. Users are notified before they are disconnected due to inactivity. + // If users try to reconnect to the streaming session before the time interval + // specified in DisconnectTimeoutInSeconds elapses, they are connected to their + // previous session. Users are considered idle when they stop providing keyboard + // or mouse input during their streaming session. File uploads and downloads, + // audio in, audio out, and pixels changing do not qualify as user activity. + // If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds + // elapses, they are disconnected. + // + // To prevent users from being disconnected due to inactivity, specify a value + // of 0. Otherwise, specify a value between 60 and 3600. The default value is + // 0. + // + // If you enable this feature, we recommend that you specify a value that corresponds + // exactly to a whole number of minutes (for example, 60, 120, and 180). If + // you don't do this, the value is rounded to the nearest minute. For example, + // if you specify a value of 70, users are disconnected after 1 minute of inactivity. + // If you specify a value that is at the midpoint between two different minutes, + // the value is rounded up. For example, if you specify a value of 90, users + // are disconnected after 2 minutes of inactivity. + IdleDisconnectTimeoutInSeconds *int64 `type:"integer"` + + // The ARN for the public, private, or shared image. + ImageArn *string `type:"string"` + + // The name of the image used to create the fleet. + ImageName *string `min:"1" type:"string"` + + // The instance type to use when launching fleet instances. The following instance + // types are available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge + // + // InstanceType is a required field + InstanceType *string `min:"1" type:"string" required:"true"` + + // The maximum amount of time that a streaming session can remain active, in + // seconds. If users are still connected to a streaming instance five minutes + // before this limit is reached, they are prompted to save any open documents + // before being disconnected. After this time elapses, the instance is terminated + // and replaced by a new instance. + // + // Specify a value between 600 and 360000. + MaxUserDurationInSeconds *int64 `type:"integer"` + + // The name of the fleet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The current state for the fleet. + // + // State is a required field + State *string `type:"string" required:"true" enum:"FleetState"` + + // The VPC configuration for the fleet. + VpcConfig *VpcConfig `type:"structure"` +} + +// String returns the string representation +func (s Fleet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Fleet) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Fleet) SetArn(v string) *Fleet { + s.Arn = &v + return s +} + +// SetComputeCapacityStatus sets the ComputeCapacityStatus field's value. +func (s *Fleet) SetComputeCapacityStatus(v *ComputeCapacityStatus) *Fleet { + s.ComputeCapacityStatus = v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *Fleet) SetCreatedTime(v time.Time) *Fleet { + s.CreatedTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Fleet) SetDescription(v string) *Fleet { + s.Description = &v + return s +} + +// SetDisconnectTimeoutInSeconds sets the DisconnectTimeoutInSeconds field's value. +func (s *Fleet) SetDisconnectTimeoutInSeconds(v int64) *Fleet { + s.DisconnectTimeoutInSeconds = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Fleet) SetDisplayName(v string) *Fleet { + s.DisplayName = &v + return s +} + +// SetDomainJoinInfo sets the DomainJoinInfo field's value. +func (s *Fleet) SetDomainJoinInfo(v *DomainJoinInfo) *Fleet { + s.DomainJoinInfo = v + return s +} + +// SetEnableDefaultInternetAccess sets the EnableDefaultInternetAccess field's value. +func (s *Fleet) SetEnableDefaultInternetAccess(v bool) *Fleet { + s.EnableDefaultInternetAccess = &v + return s +} + +// SetFleetErrors sets the FleetErrors field's value. +func (s *Fleet) SetFleetErrors(v []*FleetError) *Fleet { + s.FleetErrors = v + return s +} + +// SetFleetType sets the FleetType field's value. +func (s *Fleet) SetFleetType(v string) *Fleet { + s.FleetType = &v + return s +} + +// SetIamRoleArn sets the IamRoleArn field's value. +func (s *Fleet) SetIamRoleArn(v string) *Fleet { + s.IamRoleArn = &v + return s +} + +// SetIdleDisconnectTimeoutInSeconds sets the IdleDisconnectTimeoutInSeconds field's value. +func (s *Fleet) SetIdleDisconnectTimeoutInSeconds(v int64) *Fleet { + s.IdleDisconnectTimeoutInSeconds = &v + return s +} + +// SetImageArn sets the ImageArn field's value. +func (s *Fleet) SetImageArn(v string) *Fleet { + s.ImageArn = &v + return s +} + +// SetImageName sets the ImageName field's value. +func (s *Fleet) SetImageName(v string) *Fleet { + s.ImageName = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *Fleet) SetInstanceType(v string) *Fleet { + s.InstanceType = &v + return s +} + +// SetMaxUserDurationInSeconds sets the MaxUserDurationInSeconds field's value. +func (s *Fleet) SetMaxUserDurationInSeconds(v int64) *Fleet { + s.MaxUserDurationInSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *Fleet) SetName(v string) *Fleet { + s.Name = &v + return s +} + +// SetState sets the State field's value. +func (s *Fleet) SetState(v string) *Fleet { + s.State = &v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *Fleet) SetVpcConfig(v *VpcConfig) *Fleet { + s.VpcConfig = v + return s +} + +// Describes a fleet error. +type FleetError struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string" enum:"FleetErrorCode"` + + // The error message. + ErrorMessage *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s FleetError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetError) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *FleetError) SetErrorCode(v string) *FleetError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *FleetError) SetErrorMessage(v string) *FleetError { + s.ErrorMessage = &v + return s +} + +// Describes an image. +type Image struct { + _ struct{} `type:"structure"` + + // The applications associated with the image. + Applications []*Application `type:"list"` + + // The version of the AppStream 2.0 agent to use for instances that are launched + // from this image. + AppstreamAgentVersion *string `min:"1" type:"string"` + + // The ARN of the image. + Arn *string `type:"string"` + + // The ARN of the image from which this image was created. + BaseImageArn *string `type:"string"` + + // The time the image was created. + CreatedTime *time.Time `type:"timestamp"` + + // The description to display. + Description *string `min:"1" type:"string"` + + // The image name to display. + DisplayName *string `min:"1" type:"string"` + + // The name of the image builder that was used to create the private image. + // If the image is shared, this value is null. + ImageBuilderName *string `min:"1" type:"string"` + + // Indicates whether an image builder can be launched from this image. + ImageBuilderSupported *bool `type:"boolean"` + + // The permissions to provide to the destination AWS account for the specified + // image. + ImagePermissions *ImagePermissions `type:"structure"` + + // The name of the image. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The operating system platform of the image. + Platform *string `type:"string" enum:"PlatformType"` + + // The release date of the public base image. For private images, this date + // is the release date of the base image from which the image was created. + PublicBaseImageReleasedDate *time.Time `type:"timestamp"` + + // The image starts in the PENDING state. If image creation succeeds, the state + // is AVAILABLE. If image creation fails, the state is FAILED. + State *string `type:"string" enum:"ImageState"` + + // The reason why the last state change occurred. + StateChangeReason *ImageStateChangeReason `type:"structure"` + + // Indicates whether the image is public or private. + Visibility *string `type:"string" enum:"VisibilityType"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +// SetApplications sets the Applications field's value. +func (s *Image) SetApplications(v []*Application) *Image { + s.Applications = v + return s +} + +// SetAppstreamAgentVersion sets the AppstreamAgentVersion field's value. +func (s *Image) SetAppstreamAgentVersion(v string) *Image { + s.AppstreamAgentVersion = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *Image) SetArn(v string) *Image { + s.Arn = &v + return s +} + +// SetBaseImageArn sets the BaseImageArn field's value. +func (s *Image) SetBaseImageArn(v string) *Image { + s.BaseImageArn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *Image) SetCreatedTime(v time.Time) *Image { + s.CreatedTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Image) SetDescription(v string) *Image { + s.Description = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Image) SetDisplayName(v string) *Image { + s.DisplayName = &v + return s +} + +// SetImageBuilderName sets the ImageBuilderName field's value. +func (s *Image) SetImageBuilderName(v string) *Image { + s.ImageBuilderName = &v + return s +} + +// SetImageBuilderSupported sets the ImageBuilderSupported field's value. +func (s *Image) SetImageBuilderSupported(v bool) *Image { + s.ImageBuilderSupported = &v + return s +} + +// SetImagePermissions sets the ImagePermissions field's value. +func (s *Image) SetImagePermissions(v *ImagePermissions) *Image { + s.ImagePermissions = v + return s +} + +// SetName sets the Name field's value. +func (s *Image) SetName(v string) *Image { + s.Name = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *Image) SetPlatform(v string) *Image { + s.Platform = &v + return s +} + +// SetPublicBaseImageReleasedDate sets the PublicBaseImageReleasedDate field's value. +func (s *Image) SetPublicBaseImageReleasedDate(v time.Time) *Image { + s.PublicBaseImageReleasedDate = &v + return s +} + +// SetState sets the State field's value. +func (s *Image) SetState(v string) *Image { + s.State = &v + return s +} + +// SetStateChangeReason sets the StateChangeReason field's value. +func (s *Image) SetStateChangeReason(v *ImageStateChangeReason) *Image { + s.StateChangeReason = v + return s +} + +// SetVisibility sets the Visibility field's value. +func (s *Image) SetVisibility(v string) *Image { + s.Visibility = &v + return s +} + +// Describes a virtual machine that is used to create an image. +type ImageBuilder struct { + _ struct{} `type:"structure"` + + // The list of virtual private cloud (VPC) interface endpoint objects. Administrators + // can connect to the image builder only through the specified endpoints. + AccessEndpoints []*AccessEndpoint `min:"1" type:"list"` + + // The version of the AppStream 2.0 agent that is currently being used by the + // image builder. + AppstreamAgentVersion *string `min:"1" type:"string"` + + // The ARN for the image builder. + Arn *string `type:"string"` + + // The time stamp when the image builder was created. + CreatedTime *time.Time `type:"timestamp"` + + // The description to display. + Description *string `min:"1" type:"string"` + + // The image builder name to display. + DisplayName *string `min:"1" type:"string"` + + // The name of the directory and organizational unit (OU) to use to join the + // image builder to a Microsoft Active Directory domain. + DomainJoinInfo *DomainJoinInfo `type:"structure"` + + // Enables or disables default internet access for the image builder. + EnableDefaultInternetAccess *bool `type:"boolean"` + + // The ARN of the IAM role that is applied to the image builder. To assume a + // role, the image builder calls the AWS Security Token Service (STS) AssumeRole + // API operation and passes the ARN of the role to use. The operation creates + // a new session with temporary credentials. AppStream 2.0 retrieves the temporary + // credentials and creates the AppStream_Machine_Role credential profile on + // the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. + IamRoleArn *string `type:"string"` + + // The ARN of the image from which this builder was created. + ImageArn *string `type:"string"` + + // The image builder errors. + ImageBuilderErrors []*ResourceError `type:"list"` + + // The instance type for the image builder. The following instance types are + // available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge + InstanceType *string `min:"1" type:"string"` + + // The name of the image builder. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Describes the network details of the fleet or image builder instance. + NetworkAccessConfiguration *NetworkAccessConfiguration `type:"structure"` + + // The operating system platform of the image builder. + Platform *string `type:"string" enum:"PlatformType"` + + // The state of the image builder. + State *string `type:"string" enum:"ImageBuilderState"` + + // The reason why the last state change occurred. + StateChangeReason *ImageBuilderStateChangeReason `type:"structure"` + + // The VPC configuration of the image builder. + VpcConfig *VpcConfig `type:"structure"` +} + +// String returns the string representation +func (s ImageBuilder) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageBuilder) GoString() string { + return s.String() +} + +// SetAccessEndpoints sets the AccessEndpoints field's value. +func (s *ImageBuilder) SetAccessEndpoints(v []*AccessEndpoint) *ImageBuilder { + s.AccessEndpoints = v + return s +} + +// SetAppstreamAgentVersion sets the AppstreamAgentVersion field's value. +func (s *ImageBuilder) SetAppstreamAgentVersion(v string) *ImageBuilder { + s.AppstreamAgentVersion = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *ImageBuilder) SetArn(v string) *ImageBuilder { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *ImageBuilder) SetCreatedTime(v time.Time) *ImageBuilder { + s.CreatedTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImageBuilder) SetDescription(v string) *ImageBuilder { + s.Description = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *ImageBuilder) SetDisplayName(v string) *ImageBuilder { + s.DisplayName = &v + return s +} + +// SetDomainJoinInfo sets the DomainJoinInfo field's value. +func (s *ImageBuilder) SetDomainJoinInfo(v *DomainJoinInfo) *ImageBuilder { + s.DomainJoinInfo = v + return s +} + +// SetEnableDefaultInternetAccess sets the EnableDefaultInternetAccess field's value. +func (s *ImageBuilder) SetEnableDefaultInternetAccess(v bool) *ImageBuilder { + s.EnableDefaultInternetAccess = &v + return s +} + +// SetIamRoleArn sets the IamRoleArn field's value. +func (s *ImageBuilder) SetIamRoleArn(v string) *ImageBuilder { + s.IamRoleArn = &v + return s +} + +// SetImageArn sets the ImageArn field's value. +func (s *ImageBuilder) SetImageArn(v string) *ImageBuilder { + s.ImageArn = &v + return s +} + +// SetImageBuilderErrors sets the ImageBuilderErrors field's value. +func (s *ImageBuilder) SetImageBuilderErrors(v []*ResourceError) *ImageBuilder { + s.ImageBuilderErrors = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ImageBuilder) SetInstanceType(v string) *ImageBuilder { + s.InstanceType = &v + return s +} + +// SetName sets the Name field's value. +func (s *ImageBuilder) SetName(v string) *ImageBuilder { + s.Name = &v + return s +} + +// SetNetworkAccessConfiguration sets the NetworkAccessConfiguration field's value. +func (s *ImageBuilder) SetNetworkAccessConfiguration(v *NetworkAccessConfiguration) *ImageBuilder { + s.NetworkAccessConfiguration = v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ImageBuilder) SetPlatform(v string) *ImageBuilder { + s.Platform = &v + return s +} + +// SetState sets the State field's value. +func (s *ImageBuilder) SetState(v string) *ImageBuilder { + s.State = &v + return s +} + +// SetStateChangeReason sets the StateChangeReason field's value. +func (s *ImageBuilder) SetStateChangeReason(v *ImageBuilderStateChangeReason) *ImageBuilder { + s.StateChangeReason = v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *ImageBuilder) SetVpcConfig(v *VpcConfig) *ImageBuilder { + s.VpcConfig = v + return s +} + +// Describes the reason why the last image builder state change occurred. +type ImageBuilderStateChangeReason struct { + _ struct{} `type:"structure"` + + // The state change reason code. + Code *string `type:"string" enum:"ImageBuilderStateChangeReasonCode"` + + // The state change reason message. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ImageBuilderStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageBuilderStateChangeReason) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ImageBuilderStateChangeReason) SetCode(v string) *ImageBuilderStateChangeReason { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ImageBuilderStateChangeReason) SetMessage(v string) *ImageBuilderStateChangeReason { + s.Message = &v + return s +} + +// Describes the permissions for an image. +type ImagePermissions struct { + _ struct{} `type:"structure"` + + // Indicates whether the image can be used for a fleet. + AllowFleet *bool `locationName:"allowFleet" type:"boolean"` + + // Indicates whether the image can be used for an image builder. + AllowImageBuilder *bool `locationName:"allowImageBuilder" type:"boolean"` +} + +// String returns the string representation +func (s ImagePermissions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImagePermissions) GoString() string { + return s.String() +} + +// SetAllowFleet sets the AllowFleet field's value. +func (s *ImagePermissions) SetAllowFleet(v bool) *ImagePermissions { + s.AllowFleet = &v + return s +} + +// SetAllowImageBuilder sets the AllowImageBuilder field's value. +func (s *ImagePermissions) SetAllowImageBuilder(v bool) *ImagePermissions { + s.AllowImageBuilder = &v + return s +} + +// Describes the reason why the last image state change occurred. +type ImageStateChangeReason struct { + _ struct{} `type:"structure"` + + // The state change reason code. + Code *string `type:"string" enum:"ImageStateChangeReasonCode"` + + // The state change reason message. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ImageStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageStateChangeReason) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ImageStateChangeReason) SetCode(v string) *ImageStateChangeReason { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ImageStateChangeReason) SetMessage(v string) *ImageStateChangeReason { + s.Message = &v + return s +} + +// Describes the error that is returned when a usage report can't be generated. +type LastReportGenerationExecutionError struct { + _ struct{} `type:"structure"` + + // The error code for the error that is returned when a usage report can't be + // generated. + ErrorCode *string `type:"string" enum:"UsageReportExecutionErrorCode"` + + // The error message for the error that is returned when a usage report can't + // be generated. + ErrorMessage *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LastReportGenerationExecutionError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LastReportGenerationExecutionError) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *LastReportGenerationExecutionError) SetErrorCode(v string) *LastReportGenerationExecutionError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *LastReportGenerationExecutionError) SetErrorMessage(v string) *LastReportGenerationExecutionError { + s.ErrorMessage = &v + return s +} + +type ListAssociatedFleetsInput struct { + _ struct{} `type:"structure"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` + + // The name of the stack. + // + // StackName is a required field + StackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAssociatedFleetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociatedFleetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssociatedFleetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAssociatedFleetsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAssociatedFleetsInput) SetNextToken(v string) *ListAssociatedFleetsInput { + s.NextToken = &v + return s +} + +// SetStackName sets the StackName field's value. +func (s *ListAssociatedFleetsInput) SetStackName(v string) *ListAssociatedFleetsInput { + s.StackName = &v + return s +} + +type ListAssociatedFleetsOutput struct { + _ struct{} `type:"structure"` + + // The name of the fleet. + Names []*string `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssociatedFleetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociatedFleetsOutput) GoString() string { + return s.String() +} + +// SetNames sets the Names field's value. +func (s *ListAssociatedFleetsOutput) SetNames(v []*string) *ListAssociatedFleetsOutput { + s.Names = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAssociatedFleetsOutput) SetNextToken(v string) *ListAssociatedFleetsOutput { + s.NextToken = &v + return s +} + +type ListAssociatedStacksInput struct { + _ struct{} `type:"structure"` + + // The name of the fleet. + // + // FleetName is a required field + FleetName *string `min:"1" type:"string" required:"true"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssociatedStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociatedStacksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssociatedStacksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAssociatedStacksInput"} + if s.FleetName == nil { + invalidParams.Add(request.NewErrParamRequired("FleetName")) + } + if s.FleetName != nil && len(*s.FleetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetName sets the FleetName field's value. +func (s *ListAssociatedStacksInput) SetFleetName(v string) *ListAssociatedStacksInput { + s.FleetName = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAssociatedStacksInput) SetNextToken(v string) *ListAssociatedStacksInput { + s.NextToken = &v + return s +} + +type ListAssociatedStacksOutput struct { + _ struct{} `type:"structure"` + + // The name of the stack. + Names []*string `type:"list"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If there are no more pages, this value is null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssociatedStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociatedStacksOutput) GoString() string { + return s.String() +} + +// SetNames sets the Names field's value. +func (s *ListAssociatedStacksOutput) SetNames(v []*string) *ListAssociatedStacksOutput { + s.Names = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAssociatedStacksOutput) SetNextToken(v string) *ListAssociatedStacksOutput { + s.NextToken = &v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The information about the tags. + Tags map[string]*string `min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// Describes the network details of the fleet or image builder instance. +type NetworkAccessConfiguration struct { + _ struct{} `type:"structure"` + + // The resource identifier of the elastic network interface that is attached + // to instances in your VPC. All network interfaces have the eni-xxxxxxxx resource + // identifier. + EniId *string `min:"1" type:"string"` + + // The private IP address of the elastic network interface that is attached + // to instances in your VPC. + EniPrivateIpAddress *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s NetworkAccessConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAccessConfiguration) GoString() string { + return s.String() +} + +// SetEniId sets the EniId field's value. +func (s *NetworkAccessConfiguration) SetEniId(v string) *NetworkAccessConfiguration { + s.EniId = &v + return s +} + +// SetEniPrivateIpAddress sets the EniPrivateIpAddress field's value. +func (s *NetworkAccessConfiguration) SetEniPrivateIpAddress(v string) *NetworkAccessConfiguration { + s.EniPrivateIpAddress = &v + return s +} + +// Describes a resource error. +type ResourceError struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string" enum:"FleetErrorCode"` + + // The error message. + ErrorMessage *string `min:"1" type:"string"` + + // The time the error occurred. + ErrorTimestamp *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ResourceError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceError) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *ResourceError) SetErrorCode(v string) *ResourceError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *ResourceError) SetErrorMessage(v string) *ResourceError { + s.ErrorMessage = &v + return s +} + +// SetErrorTimestamp sets the ErrorTimestamp field's value. +func (s *ResourceError) SetErrorTimestamp(v time.Time) *ResourceError { + s.ErrorTimestamp = &v + return s +} + +// Describes the credentials for the service account used by the fleet or image +// builder to connect to the directory. +type ServiceAccountCredentials struct { + _ struct{} `type:"structure"` + + // The user name of the account. This account must have the following privileges: + // create computer objects, join computers to the domain, and change/reset the + // password on descendant computer objects for the organizational units specified. + // + // AccountName is a required field + AccountName *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // The password for the account. + // + // AccountPassword is a required field + AccountPassword *string `min:"1" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s ServiceAccountCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceAccountCredentials) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServiceAccountCredentials) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServiceAccountCredentials"} + if s.AccountName == nil { + invalidParams.Add(request.NewErrParamRequired("AccountName")) + } + if s.AccountName != nil && len(*s.AccountName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountName", 1)) + } + if s.AccountPassword == nil { + invalidParams.Add(request.NewErrParamRequired("AccountPassword")) + } + if s.AccountPassword != nil && len(*s.AccountPassword) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountPassword", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountName sets the AccountName field's value. +func (s *ServiceAccountCredentials) SetAccountName(v string) *ServiceAccountCredentials { + s.AccountName = &v + return s +} + +// SetAccountPassword sets the AccountPassword field's value. +func (s *ServiceAccountCredentials) SetAccountPassword(v string) *ServiceAccountCredentials { + s.AccountPassword = &v + return s +} + +// Describes a streaming session. +type Session struct { + _ struct{} `type:"structure"` + + // The authentication method. The user is authenticated using a streaming URL + // (API) or SAML 2.0 federation (SAML). + AuthenticationType *string `type:"string" enum:"AuthenticationType"` + + // Specifies whether a user is connected to the streaming session. + ConnectionState *string `type:"string" enum:"SessionConnectionState"` + + // The name of the fleet for the streaming session. + // + // FleetName is a required field + FleetName *string `min:"1" type:"string" required:"true"` + + // The identifier of the streaming session. + // + // Id is a required field + Id *string `min:"1" type:"string" required:"true"` + + // The time when the streaming session is set to expire. This time is based + // on the MaxUserDurationinSeconds value, which determines the maximum length + // of time that a streaming session can run. A streaming session might end earlier + // than the time specified in SessionMaxExpirationTime, when the DisconnectTimeOutInSeconds + // elapses or the user chooses to end his or her session. If the DisconnectTimeOutInSeconds + // elapses, or the user chooses to end his or her session, the streaming instance + // is terminated and the streaming session ends. + MaxExpirationTime *time.Time `type:"timestamp"` + + // The network details for the streaming session. + NetworkAccessConfiguration *NetworkAccessConfiguration `type:"structure"` + + // The name of the stack for the streaming session. + // + // StackName is a required field + StackName *string `min:"1" type:"string" required:"true"` + + // The time when a streaming instance is dedicated for the user. + StartTime *time.Time `type:"timestamp"` + + // The current state of the streaming session. + // + // State is a required field + State *string `type:"string" required:"true" enum:"SessionState"` + + // The identifier of the user for whom the session was created. + // + // UserId is a required field + UserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s Session) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Session) GoString() string { + return s.String() +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *Session) SetAuthenticationType(v string) *Session { + s.AuthenticationType = &v + return s +} + +// SetConnectionState sets the ConnectionState field's value. +func (s *Session) SetConnectionState(v string) *Session { + s.ConnectionState = &v + return s +} + +// SetFleetName sets the FleetName field's value. +func (s *Session) SetFleetName(v string) *Session { + s.FleetName = &v + return s +} + +// SetId sets the Id field's value. +func (s *Session) SetId(v string) *Session { + s.Id = &v + return s +} + +// SetMaxExpirationTime sets the MaxExpirationTime field's value. +func (s *Session) SetMaxExpirationTime(v time.Time) *Session { + s.MaxExpirationTime = &v + return s +} + +// SetNetworkAccessConfiguration sets the NetworkAccessConfiguration field's value. +func (s *Session) SetNetworkAccessConfiguration(v *NetworkAccessConfiguration) *Session { + s.NetworkAccessConfiguration = v + return s +} + +// SetStackName sets the StackName field's value. +func (s *Session) SetStackName(v string) *Session { + s.StackName = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *Session) SetStartTime(v time.Time) *Session { + s.StartTime = &v + return s +} + +// SetState sets the State field's value. +func (s *Session) SetState(v string) *Session { + s.State = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *Session) SetUserId(v string) *Session { + s.UserId = &v + return s +} + +// Describes the permissions that are available to the specified AWS account +// for a shared image. +type SharedImagePermissions struct { + _ struct{} `type:"structure"` + + // Describes the permissions for a shared image. + // + // ImagePermissions is a required field + ImagePermissions *ImagePermissions `locationName:"imagePermissions" type:"structure" required:"true"` + + // The 12-digit identifier of the AWS account with which the image is shared. + // + // SharedAccountId is a required field + SharedAccountId *string `locationName:"sharedAccountId" type:"string" required:"true"` +} + +// String returns the string representation +func (s SharedImagePermissions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SharedImagePermissions) GoString() string { + return s.String() +} + +// SetImagePermissions sets the ImagePermissions field's value. +func (s *SharedImagePermissions) SetImagePermissions(v *ImagePermissions) *SharedImagePermissions { + s.ImagePermissions = v + return s +} + +// SetSharedAccountId sets the SharedAccountId field's value. +func (s *SharedImagePermissions) SetSharedAccountId(v string) *SharedImagePermissions { + s.SharedAccountId = &v + return s +} + +// Describes a stack. +type Stack struct { + _ struct{} `type:"structure"` + + // The list of virtual private cloud (VPC) interface endpoint objects. Users + // of the stack can connect to AppStream 2.0 only through the specified endpoints. + AccessEndpoints []*AccessEndpoint `min:"1" type:"list"` + + // The persistent application settings for users of the stack. + ApplicationSettings *ApplicationSettingsResponse `type:"structure"` + + // The ARN of the stack. + Arn *string `type:"string"` + + // The time the stack was created. + CreatedTime *time.Time `type:"timestamp"` + + // The description to display. + Description *string `min:"1" type:"string"` + + // The stack name to display. + DisplayName *string `min:"1" type:"string"` + + // The domains where AppStream 2.0 streaming sessions can be embedded in an + // iframe. You must approve the domains that you want to host embedded AppStream + // 2.0 streaming sessions. + EmbedHostDomains []*string `min:"1" type:"list"` + + // The URL that users are redirected to after they click the Send Feedback link. + // If no URL is specified, no Send Feedback link is displayed. + FeedbackURL *string `type:"string"` + + // The name of the stack. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The URL that users are redirected to after their streaming session ends. + RedirectURL *string `type:"string"` + + // The errors for the stack. + StackErrors []*StackError `type:"list"` + + // The storage connectors to enable. + StorageConnectors []*StorageConnector `type:"list"` + + // The actions that are enabled or disabled for users during their streaming + // sessions. By default these actions are enabled. + UserSettings []*UserSetting `min:"1" type:"list"` +} + +// String returns the string representation +func (s Stack) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stack) GoString() string { + return s.String() +} + +// SetAccessEndpoints sets the AccessEndpoints field's value. +func (s *Stack) SetAccessEndpoints(v []*AccessEndpoint) *Stack { + s.AccessEndpoints = v + return s +} + +// SetApplicationSettings sets the ApplicationSettings field's value. +func (s *Stack) SetApplicationSettings(v *ApplicationSettingsResponse) *Stack { + s.ApplicationSettings = v + return s +} + +// SetArn sets the Arn field's value. +func (s *Stack) SetArn(v string) *Stack { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *Stack) SetCreatedTime(v time.Time) *Stack { + s.CreatedTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Stack) SetDescription(v string) *Stack { + s.Description = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Stack) SetDisplayName(v string) *Stack { + s.DisplayName = &v + return s +} + +// SetEmbedHostDomains sets the EmbedHostDomains field's value. +func (s *Stack) SetEmbedHostDomains(v []*string) *Stack { + s.EmbedHostDomains = v + return s +} + +// SetFeedbackURL sets the FeedbackURL field's value. +func (s *Stack) SetFeedbackURL(v string) *Stack { + s.FeedbackURL = &v + return s +} + +// SetName sets the Name field's value. +func (s *Stack) SetName(v string) *Stack { + s.Name = &v + return s +} + +// SetRedirectURL sets the RedirectURL field's value. +func (s *Stack) SetRedirectURL(v string) *Stack { + s.RedirectURL = &v + return s +} + +// SetStackErrors sets the StackErrors field's value. +func (s *Stack) SetStackErrors(v []*StackError) *Stack { + s.StackErrors = v + return s +} + +// SetStorageConnectors sets the StorageConnectors field's value. +func (s *Stack) SetStorageConnectors(v []*StorageConnector) *Stack { + s.StorageConnectors = v + return s +} + +// SetUserSettings sets the UserSettings field's value. +func (s *Stack) SetUserSettings(v []*UserSetting) *Stack { + s.UserSettings = v + return s +} + +// Describes a stack error. +type StackError struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string" enum:"StackErrorCode"` + + // The error message. + ErrorMessage *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s StackError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackError) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *StackError) SetErrorCode(v string) *StackError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *StackError) SetErrorMessage(v string) *StackError { + s.ErrorMessage = &v + return s +} + +type StartFleetInput struct { + _ struct{} `type:"structure"` + + // The name of the fleet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartFleetInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *StartFleetInput) SetName(v string) *StartFleetInput { + s.Name = &v + return s +} + +type StartFleetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartFleetOutput) GoString() string { + return s.String() +} + +type StartImageBuilderInput struct { + _ struct{} `type:"structure"` + + // The version of the AppStream 2.0 agent to use for this image builder. To + // use the latest version of the AppStream 2.0 agent, specify [LATEST]. + AppstreamAgentVersion *string `min:"1" type:"string"` + + // The name of the image builder. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartImageBuilderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageBuilderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartImageBuilderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartImageBuilderInput"} + if s.AppstreamAgentVersion != nil && len(*s.AppstreamAgentVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppstreamAgentVersion", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppstreamAgentVersion sets the AppstreamAgentVersion field's value. +func (s *StartImageBuilderInput) SetAppstreamAgentVersion(v string) *StartImageBuilderInput { + s.AppstreamAgentVersion = &v + return s +} + +// SetName sets the Name field's value. +func (s *StartImageBuilderInput) SetName(v string) *StartImageBuilderInput { + s.Name = &v + return s +} + +type StartImageBuilderOutput struct { + _ struct{} `type:"structure"` + + // Information about the image builder. + ImageBuilder *ImageBuilder `type:"structure"` +} + +// String returns the string representation +func (s StartImageBuilderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageBuilderOutput) GoString() string { + return s.String() +} + +// SetImageBuilder sets the ImageBuilder field's value. +func (s *StartImageBuilderOutput) SetImageBuilder(v *ImageBuilder) *StartImageBuilderOutput { + s.ImageBuilder = v + return s +} + +type StopFleetInput struct { + _ struct{} `type:"structure"` + + // The name of the fleet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopFleetInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *StopFleetInput) SetName(v string) *StopFleetInput { + s.Name = &v + return s +} + +type StopFleetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopFleetOutput) GoString() string { + return s.String() +} + +type StopImageBuilderInput struct { + _ struct{} `type:"structure"` + + // The name of the image builder. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopImageBuilderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopImageBuilderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopImageBuilderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopImageBuilderInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *StopImageBuilderInput) SetName(v string) *StopImageBuilderInput { + s.Name = &v + return s +} + +type StopImageBuilderOutput struct { + _ struct{} `type:"structure"` + + // Information about the image builder. + ImageBuilder *ImageBuilder `type:"structure"` +} + +// String returns the string representation +func (s StopImageBuilderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopImageBuilderOutput) GoString() string { + return s.String() +} + +// SetImageBuilder sets the ImageBuilder field's value. +func (s *StopImageBuilderOutput) SetImageBuilder(v *ImageBuilder) *StopImageBuilderOutput { + s.ImageBuilder = v + return s +} + +// Describes a connector that enables persistent storage for users. +type StorageConnector struct { + _ struct{} `type:"structure"` + + // The type of storage connector. + // + // ConnectorType is a required field + ConnectorType *string `type:"string" required:"true" enum:"StorageConnectorType"` + + // The names of the domains for the account. + Domains []*string `type:"list"` + + // The ARN of the storage connector. + ResourceIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s StorageConnector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageConnector) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageConnector) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageConnector"} + if s.ConnectorType == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectorType")) + } + if s.ResourceIdentifier != nil && len(*s.ResourceIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectorType sets the ConnectorType field's value. +func (s *StorageConnector) SetConnectorType(v string) *StorageConnector { + s.ConnectorType = &v + return s +} + +// SetDomains sets the Domains field's value. +func (s *StorageConnector) SetDomains(v []*string) *StorageConnector { + s.Domains = v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *StorageConnector) SetResourceIdentifier(v string) *StorageConnector { + s.ResourceIdentifier = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // The tags to associate. A tag is a key-value pair, and the value is optional. + // For example, Environment=Test. If you do not specify a value, Environment=. + // + // If you do not specify a value, the value is set to an empty string. + // + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following special characters: + // + // _ . : / = + \ - @ + // + // Tags is a required field + Tags map[string]*string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // The tag keys for the tags to disassociate. + // + // TagKeys is a required field + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateDirectoryConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the Directory Config object. + // + // DirectoryName is a required field + DirectoryName *string `type:"string" required:"true"` + + // The distinguished names of the organizational units for computer accounts. + OrganizationalUnitDistinguishedNames []*string `type:"list"` + + // The credentials for the service account used by the fleet or image builder + // to connect to the directory. + ServiceAccountCredentials *ServiceAccountCredentials `type:"structure"` +} + +// String returns the string representation +func (s UpdateDirectoryConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDirectoryConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDirectoryConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDirectoryConfigInput"} + if s.DirectoryName == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryName")) + } + if s.ServiceAccountCredentials != nil { + if err := s.ServiceAccountCredentials.Validate(); err != nil { + invalidParams.AddNested("ServiceAccountCredentials", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *UpdateDirectoryConfigInput) SetDirectoryName(v string) *UpdateDirectoryConfigInput { + s.DirectoryName = &v + return s +} + +// SetOrganizationalUnitDistinguishedNames sets the OrganizationalUnitDistinguishedNames field's value. +func (s *UpdateDirectoryConfigInput) SetOrganizationalUnitDistinguishedNames(v []*string) *UpdateDirectoryConfigInput { + s.OrganizationalUnitDistinguishedNames = v + return s +} + +// SetServiceAccountCredentials sets the ServiceAccountCredentials field's value. +func (s *UpdateDirectoryConfigInput) SetServiceAccountCredentials(v *ServiceAccountCredentials) *UpdateDirectoryConfigInput { + s.ServiceAccountCredentials = v + return s +} + +type UpdateDirectoryConfigOutput struct { + _ struct{} `type:"structure"` + + // Information about the Directory Config object. + DirectoryConfig *DirectoryConfig `type:"structure"` +} + +// String returns the string representation +func (s UpdateDirectoryConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDirectoryConfigOutput) GoString() string { + return s.String() +} + +// SetDirectoryConfig sets the DirectoryConfig field's value. +func (s *UpdateDirectoryConfigOutput) SetDirectoryConfig(v *DirectoryConfig) *UpdateDirectoryConfigOutput { + s.DirectoryConfig = v + return s +} + +type UpdateFleetInput struct { + _ struct{} `type:"structure"` + + // The fleet attributes to delete. + AttributesToDelete []*string `type:"list"` + + // The desired capacity for the fleet. + ComputeCapacity *ComputeCapacity `type:"structure"` + + // Deletes the VPC association for the specified fleet. + // + // Deprecated: DeleteVpcConfig has been deprecated + DeleteVpcConfig *bool `deprecated:"true" type:"boolean"` + + // The description to display. + Description *string `type:"string"` + + // The amount of time that a streaming session remains active after users disconnect. + // If users try to reconnect to the streaming session after a disconnection + // or network interruption within this time interval, they are connected to + // their previous session. Otherwise, they are connected to a new session with + // a new streaming instance. + // + // Specify a value between 60 and 360000. + DisconnectTimeoutInSeconds *int64 `type:"integer"` + + // The fleet name to display. + DisplayName *string `type:"string"` + + // The name of the directory and organizational unit (OU) to use to join the + // fleet to a Microsoft Active Directory domain. + DomainJoinInfo *DomainJoinInfo `type:"structure"` + + // Enables or disables default internet access for the fleet. + EnableDefaultInternetAccess *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To + // assume a role, a fleet instance calls the AWS Security Token Service (STS) + // AssumeRole API operation and passes the ARN of the role to use. The operation + // creates a new session with temporary credentials. AppStream 2.0 retrieves + // the temporary credentials and creates the AppStream_Machine_Role credential + // profile on the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. + IamRoleArn *string `type:"string"` + + // The amount of time that users can be idle (inactive) before they are disconnected + // from their streaming session and the DisconnectTimeoutInSeconds time interval + // begins. Users are notified before they are disconnected due to inactivity. + // If users try to reconnect to the streaming session before the time interval + // specified in DisconnectTimeoutInSeconds elapses, they are connected to their + // previous session. Users are considered idle when they stop providing keyboard + // or mouse input during their streaming session. File uploads and downloads, + // audio in, audio out, and pixels changing do not qualify as user activity. + // If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds + // elapses, they are disconnected. + // + // To prevent users from being disconnected due to inactivity, specify a value + // of 0. Otherwise, specify a value between 60 and 3600. The default value is + // 0. + // + // If you enable this feature, we recommend that you specify a value that corresponds + // exactly to a whole number of minutes (for example, 60, 120, and 180). If + // you don't do this, the value is rounded to the nearest minute. For example, + // if you specify a value of 70, users are disconnected after 1 minute of inactivity. + // If you specify a value that is at the midpoint between two different minutes, + // the value is rounded up. For example, if you specify a value of 90, users + // are disconnected after 2 minutes of inactivity. + IdleDisconnectTimeoutInSeconds *int64 `type:"integer"` + + // The ARN of the public, private, or shared image to use. + ImageArn *string `type:"string"` + + // The name of the image used to create the fleet. + ImageName *string `min:"1" type:"string"` + + // The instance type to use when launching fleet instances. The following instance + // types are available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge + InstanceType *string `min:"1" type:"string"` + + // The maximum amount of time that a streaming session can remain active, in + // seconds. If users are still connected to a streaming instance five minutes + // before this limit is reached, they are prompted to save any open documents + // before being disconnected. After this time elapses, the instance is terminated + // and replaced by a new instance. + // + // Specify a value between 600 and 360000. + MaxUserDurationInSeconds *int64 `type:"integer"` + + // A unique name for the fleet. + Name *string `min:"1" type:"string"` + + // The VPC configuration for the fleet. + VpcConfig *VpcConfig `type:"structure"` +} + +// String returns the string representation +func (s UpdateFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFleetInput"} + if s.ImageName != nil && len(*s.ImageName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageName", 1)) + } + if s.InstanceType != nil && len(*s.InstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceType", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.ComputeCapacity != nil { + if err := s.ComputeCapacity.Validate(); err != nil { + invalidParams.AddNested("ComputeCapacity", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToDelete sets the AttributesToDelete field's value. +func (s *UpdateFleetInput) SetAttributesToDelete(v []*string) *UpdateFleetInput { + s.AttributesToDelete = v + return s +} + +// SetComputeCapacity sets the ComputeCapacity field's value. +func (s *UpdateFleetInput) SetComputeCapacity(v *ComputeCapacity) *UpdateFleetInput { + s.ComputeCapacity = v + return s +} + +// SetDeleteVpcConfig sets the DeleteVpcConfig field's value. +func (s *UpdateFleetInput) SetDeleteVpcConfig(v bool) *UpdateFleetInput { + s.DeleteVpcConfig = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateFleetInput) SetDescription(v string) *UpdateFleetInput { + s.Description = &v + return s +} + +// SetDisconnectTimeoutInSeconds sets the DisconnectTimeoutInSeconds field's value. +func (s *UpdateFleetInput) SetDisconnectTimeoutInSeconds(v int64) *UpdateFleetInput { + s.DisconnectTimeoutInSeconds = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *UpdateFleetInput) SetDisplayName(v string) *UpdateFleetInput { + s.DisplayName = &v + return s +} + +// SetDomainJoinInfo sets the DomainJoinInfo field's value. +func (s *UpdateFleetInput) SetDomainJoinInfo(v *DomainJoinInfo) *UpdateFleetInput { + s.DomainJoinInfo = v + return s +} + +// SetEnableDefaultInternetAccess sets the EnableDefaultInternetAccess field's value. +func (s *UpdateFleetInput) SetEnableDefaultInternetAccess(v bool) *UpdateFleetInput { + s.EnableDefaultInternetAccess = &v + return s +} + +// SetIamRoleArn sets the IamRoleArn field's value. +func (s *UpdateFleetInput) SetIamRoleArn(v string) *UpdateFleetInput { + s.IamRoleArn = &v + return s +} + +// SetIdleDisconnectTimeoutInSeconds sets the IdleDisconnectTimeoutInSeconds field's value. +func (s *UpdateFleetInput) SetIdleDisconnectTimeoutInSeconds(v int64) *UpdateFleetInput { + s.IdleDisconnectTimeoutInSeconds = &v + return s +} + +// SetImageArn sets the ImageArn field's value. +func (s *UpdateFleetInput) SetImageArn(v string) *UpdateFleetInput { + s.ImageArn = &v + return s +} + +// SetImageName sets the ImageName field's value. +func (s *UpdateFleetInput) SetImageName(v string) *UpdateFleetInput { + s.ImageName = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *UpdateFleetInput) SetInstanceType(v string) *UpdateFleetInput { + s.InstanceType = &v + return s +} + +// SetMaxUserDurationInSeconds sets the MaxUserDurationInSeconds field's value. +func (s *UpdateFleetInput) SetMaxUserDurationInSeconds(v int64) *UpdateFleetInput { + s.MaxUserDurationInSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateFleetInput) SetName(v string) *UpdateFleetInput { + s.Name = &v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *UpdateFleetInput) SetVpcConfig(v *VpcConfig) *UpdateFleetInput { + s.VpcConfig = v + return s +} + +type UpdateFleetOutput struct { + _ struct{} `type:"structure"` + + // Information about the fleet. + Fleet *Fleet `type:"structure"` +} + +// String returns the string representation +func (s UpdateFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetOutput) GoString() string { + return s.String() +} + +// SetFleet sets the Fleet field's value. +func (s *UpdateFleetOutput) SetFleet(v *Fleet) *UpdateFleetOutput { + s.Fleet = v + return s +} + +type UpdateImagePermissionsInput struct { + _ struct{} `type:"structure"` + + // The permissions for the image. + // + // ImagePermissions is a required field + ImagePermissions *ImagePermissions `type:"structure" required:"true"` + + // The name of the private image. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The 12-digit identifier of the AWS account for which you want add or update + // image permissions. + // + // SharedAccountId is a required field + SharedAccountId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateImagePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateImagePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateImagePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateImagePermissionsInput"} + if s.ImagePermissions == nil { + invalidParams.Add(request.NewErrParamRequired("ImagePermissions")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SharedAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("SharedAccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImagePermissions sets the ImagePermissions field's value. +func (s *UpdateImagePermissionsInput) SetImagePermissions(v *ImagePermissions) *UpdateImagePermissionsInput { + s.ImagePermissions = v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateImagePermissionsInput) SetName(v string) *UpdateImagePermissionsInput { + s.Name = &v + return s +} + +// SetSharedAccountId sets the SharedAccountId field's value. +func (s *UpdateImagePermissionsInput) SetSharedAccountId(v string) *UpdateImagePermissionsInput { + s.SharedAccountId = &v + return s +} + +type UpdateImagePermissionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateImagePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateImagePermissionsOutput) GoString() string { + return s.String() +} + +type UpdateStackInput struct { + _ struct{} `type:"structure"` + + // The list of interface VPC endpoint (interface endpoint) objects. Users of + // the stack can connect to AppStream 2.0 only through the specified endpoints. + AccessEndpoints []*AccessEndpoint `min:"1" type:"list"` + + // The persistent application settings for users of a stack. When these settings + // are enabled, changes that users make to applications and Windows settings + // are automatically saved after each session and applied to the next session. + ApplicationSettings *ApplicationSettings `type:"structure"` + + // The stack attributes to delete. + AttributesToDelete []*string `type:"list"` + + // Deletes the storage connectors currently enabled for the stack. + // + // Deprecated: DeleteStorageConnectors has been deprecated + DeleteStorageConnectors *bool `deprecated:"true" type:"boolean"` + + // The description to display. + Description *string `type:"string"` + + // The stack name to display. + DisplayName *string `type:"string"` + + // The domains where AppStream 2.0 streaming sessions can be embedded in an + // iframe. You must approve the domains that you want to host embedded AppStream + // 2.0 streaming sessions. + EmbedHostDomains []*string `min:"1" type:"list"` + + // The URL that users are redirected to after they choose the Send Feedback + // link. If no URL is specified, no Send Feedback link is displayed. + FeedbackURL *string `type:"string"` + + // The name of the stack. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The URL that users are redirected to after their streaming session ends. + RedirectURL *string `type:"string"` + + // The storage connectors to enable. + StorageConnectors []*StorageConnector `type:"list"` + + // The actions that are enabled or disabled for users during their streaming + // sessions. By default, these actions are enabled. + UserSettings []*UserSetting `min:"1" type:"list"` +} + +// String returns the string representation +func (s UpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateStackInput"} + if s.AccessEndpoints != nil && len(s.AccessEndpoints) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccessEndpoints", 1)) + } + if s.EmbedHostDomains != nil && len(s.EmbedHostDomains) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmbedHostDomains", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.UserSettings != nil && len(s.UserSettings) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserSettings", 1)) + } + if s.AccessEndpoints != nil { + for i, v := range s.AccessEndpoints { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessEndpoints", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ApplicationSettings != nil { + if err := s.ApplicationSettings.Validate(); err != nil { + invalidParams.AddNested("ApplicationSettings", err.(request.ErrInvalidParams)) + } + } + if s.StorageConnectors != nil { + for i, v := range s.StorageConnectors { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StorageConnectors", i), err.(request.ErrInvalidParams)) + } + } + } + if s.UserSettings != nil { + for i, v := range s.UserSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserSettings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessEndpoints sets the AccessEndpoints field's value. +func (s *UpdateStackInput) SetAccessEndpoints(v []*AccessEndpoint) *UpdateStackInput { + s.AccessEndpoints = v + return s +} + +// SetApplicationSettings sets the ApplicationSettings field's value. +func (s *UpdateStackInput) SetApplicationSettings(v *ApplicationSettings) *UpdateStackInput { + s.ApplicationSettings = v + return s +} + +// SetAttributesToDelete sets the AttributesToDelete field's value. +func (s *UpdateStackInput) SetAttributesToDelete(v []*string) *UpdateStackInput { + s.AttributesToDelete = v + return s +} + +// SetDeleteStorageConnectors sets the DeleteStorageConnectors field's value. +func (s *UpdateStackInput) SetDeleteStorageConnectors(v bool) *UpdateStackInput { + s.DeleteStorageConnectors = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateStackInput) SetDescription(v string) *UpdateStackInput { + s.Description = &v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *UpdateStackInput) SetDisplayName(v string) *UpdateStackInput { + s.DisplayName = &v + return s +} + +// SetEmbedHostDomains sets the EmbedHostDomains field's value. +func (s *UpdateStackInput) SetEmbedHostDomains(v []*string) *UpdateStackInput { + s.EmbedHostDomains = v + return s +} + +// SetFeedbackURL sets the FeedbackURL field's value. +func (s *UpdateStackInput) SetFeedbackURL(v string) *UpdateStackInput { + s.FeedbackURL = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateStackInput) SetName(v string) *UpdateStackInput { + s.Name = &v + return s +} + +// SetRedirectURL sets the RedirectURL field's value. +func (s *UpdateStackInput) SetRedirectURL(v string) *UpdateStackInput { + s.RedirectURL = &v + return s +} + +// SetStorageConnectors sets the StorageConnectors field's value. +func (s *UpdateStackInput) SetStorageConnectors(v []*StorageConnector) *UpdateStackInput { + s.StorageConnectors = v + return s +} + +// SetUserSettings sets the UserSettings field's value. +func (s *UpdateStackInput) SetUserSettings(v []*UserSetting) *UpdateStackInput { + s.UserSettings = v + return s +} + +type UpdateStackOutput struct { + _ struct{} `type:"structure"` + + // Information about the stack. + Stack *Stack `type:"structure"` +} + +// String returns the string representation +func (s UpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackOutput) GoString() string { + return s.String() +} + +// SetStack sets the Stack field's value. +func (s *UpdateStackOutput) SetStack(v *Stack) *UpdateStackOutput { + s.Stack = v + return s +} + +// Describes information about the usage report subscription. +type UsageReportSubscription struct { + _ struct{} `type:"structure"` + + // The time when the last usage report was generated. + LastGeneratedReportDate *time.Time `type:"timestamp"` + + // The Amazon S3 bucket where generated reports are stored. + // + // If you enabled on-instance session scripts and Amazon S3 logging for your + // session script configuration, AppStream 2.0 created an S3 bucket to store + // the script output. The bucket is unique to your account and Region. When + // you enable usage reporting in this case, AppStream 2.0 uses the same bucket + // to store your usage reports. If you haven't already enabled on-instance session + // scripts, when you enable usage reports, AppStream 2.0 creates a new S3 bucket. + S3BucketName *string `min:"1" type:"string"` + + // The schedule for generating usage reports. + Schedule *string `type:"string" enum:"UsageReportSchedule"` + + // The errors that were returned if usage reports couldn't be generated. + SubscriptionErrors []*LastReportGenerationExecutionError `type:"list"` +} + +// String returns the string representation +func (s UsageReportSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UsageReportSubscription) GoString() string { + return s.String() +} + +// SetLastGeneratedReportDate sets the LastGeneratedReportDate field's value. +func (s *UsageReportSubscription) SetLastGeneratedReportDate(v time.Time) *UsageReportSubscription { + s.LastGeneratedReportDate = &v + return s +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *UsageReportSubscription) SetS3BucketName(v string) *UsageReportSubscription { + s.S3BucketName = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *UsageReportSubscription) SetSchedule(v string) *UsageReportSubscription { + s.Schedule = &v + return s +} + +// SetSubscriptionErrors sets the SubscriptionErrors field's value. +func (s *UsageReportSubscription) SetSubscriptionErrors(v []*LastReportGenerationExecutionError) *UsageReportSubscription { + s.SubscriptionErrors = v + return s +} + +// Describes a user in the user pool. +type User struct { + _ struct{} `type:"structure"` + + // The ARN of the user. + Arn *string `type:"string"` + + // The authentication type for the user. + // + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"AuthenticationType"` + + // The date and time the user was created in the user pool. + CreatedTime *time.Time `type:"timestamp"` + + // Specifies whether the user in the user pool is enabled. + Enabled *bool `type:"boolean"` + + // The first name, or given name, of the user. + FirstName *string `type:"string" sensitive:"true"` + + // The last name, or surname, of the user. + LastName *string `type:"string" sensitive:"true"` + + // The status of the user in the user pool. The status can be one of the following: + // + // * UNCONFIRMED – The user is created but not confirmed. + // + // * CONFIRMED – The user is confirmed. + // + // * ARCHIVED – The user is no longer active. + // + // * COMPROMISED – The user is disabled because of a potential security + // threat. + // + // * UNKNOWN – The user status is not known. + Status *string `min:"1" type:"string"` + + // The email address of the user. + // + // Users' email addresses are case-sensitive. + UserName *string `min:"1" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s User) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s User) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *User) SetArn(v string) *User { + s.Arn = &v + return s +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *User) SetAuthenticationType(v string) *User { + s.AuthenticationType = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *User) SetCreatedTime(v time.Time) *User { + s.CreatedTime = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *User) SetEnabled(v bool) *User { + s.Enabled = &v + return s +} + +// SetFirstName sets the FirstName field's value. +func (s *User) SetFirstName(v string) *User { + s.FirstName = &v + return s +} + +// SetLastName sets the LastName field's value. +func (s *User) SetLastName(v string) *User { + s.LastName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *User) SetStatus(v string) *User { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *User) SetUserName(v string) *User { + s.UserName = &v + return s +} + +// Describes an action and whether the action is enabled or disabled for users +// during their streaming sessions. +type UserSetting struct { + _ struct{} `type:"structure"` + + // The action that is enabled or disabled. + // + // Action is a required field + Action *string `type:"string" required:"true" enum:"Action"` + + // Indicates whether the action is enabled or disabled. + // + // Permission is a required field + Permission *string `type:"string" required:"true" enum:"Permission"` +} + +// String returns the string representation +func (s UserSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserSetting) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UserSetting) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UserSetting"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Permission == nil { + invalidParams.Add(request.NewErrParamRequired("Permission")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *UserSetting) SetAction(v string) *UserSetting { + s.Action = &v + return s +} + +// SetPermission sets the Permission field's value. +func (s *UserSetting) SetPermission(v string) *UserSetting { + s.Permission = &v + return s +} + +// Describes a user in the user pool and the associated stack. +type UserStackAssociation struct { + _ struct{} `type:"structure"` + + // The authentication type for the user. + // + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"AuthenticationType"` + + // Specifies whether a welcome email is sent to a user after the user is created + // in the user pool. + SendEmailNotification *bool `type:"boolean"` + + // The name of the stack that is associated with the user. + // + // StackName is a required field + StackName *string `min:"1" type:"string" required:"true"` + + // The email address of the user who is associated with the stack. + // + // Users' email addresses are case-sensitive. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s UserStackAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserStackAssociation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UserStackAssociation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UserStackAssociation"} + if s.AuthenticationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *UserStackAssociation) SetAuthenticationType(v string) *UserStackAssociation { + s.AuthenticationType = &v + return s +} + +// SetSendEmailNotification sets the SendEmailNotification field's value. +func (s *UserStackAssociation) SetSendEmailNotification(v bool) *UserStackAssociation { + s.SendEmailNotification = &v + return s +} + +// SetStackName sets the StackName field's value. +func (s *UserStackAssociation) SetStackName(v string) *UserStackAssociation { + s.StackName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UserStackAssociation) SetUserName(v string) *UserStackAssociation { + s.UserName = &v + return s +} + +// Describes the error that is returned when a user can’t be associated with +// or disassociated from a stack. +type UserStackAssociationError struct { + _ struct{} `type:"structure"` + + // The error code for the error that is returned when a user can’t be associated + // with or disassociated from a stack. + ErrorCode *string `type:"string" enum:"UserStackAssociationErrorCode"` + + // The error message for the error that is returned when a user can’t be associated + // with or disassociated from a stack. + ErrorMessage *string `min:"1" type:"string"` + + // Information about the user and associated stack. + UserStackAssociation *UserStackAssociation `type:"structure"` +} + +// String returns the string representation +func (s UserStackAssociationError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserStackAssociationError) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *UserStackAssociationError) SetErrorCode(v string) *UserStackAssociationError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *UserStackAssociationError) SetErrorMessage(v string) *UserStackAssociationError { + s.ErrorMessage = &v + return s +} + +// SetUserStackAssociation sets the UserStackAssociation field's value. +func (s *UserStackAssociationError) SetUserStackAssociation(v *UserStackAssociation) *UserStackAssociationError { + s.UserStackAssociation = v + return s +} + +// Describes VPC configuration information for fleets and image builders. +type VpcConfig struct { + _ struct{} `type:"structure"` + + // The identifiers of the security groups for the fleet or image builder. + SecurityGroupIds []*string `type:"list"` + + // The identifiers of the subnets to which a network interface is attached from + // the fleet instance or image builder instance. Fleet instances use one or + // more subnets. Image builder instances use one subnet. + SubnetIds []*string `type:"list"` +} + +// String returns the string representation +func (s VpcConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcConfig) GoString() string { + return s.String() +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *VpcConfig) SetSecurityGroupIds(v []*string) *VpcConfig { + s.SecurityGroupIds = v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *VpcConfig) SetSubnetIds(v []*string) *VpcConfig { + s.SubnetIds = v + return s +} + +const ( + // AccessEndpointTypeStreaming is a AccessEndpointType enum value + AccessEndpointTypeStreaming = "STREAMING" +) + +const ( + // ActionClipboardCopyFromLocalDevice is a Action enum value + ActionClipboardCopyFromLocalDevice = "CLIPBOARD_COPY_FROM_LOCAL_DEVICE" + + // ActionClipboardCopyToLocalDevice is a Action enum value + ActionClipboardCopyToLocalDevice = "CLIPBOARD_COPY_TO_LOCAL_DEVICE" + + // ActionFileUpload is a Action enum value + ActionFileUpload = "FILE_UPLOAD" + + // ActionFileDownload is a Action enum value + ActionFileDownload = "FILE_DOWNLOAD" + + // ActionPrintingToLocalDevice is a Action enum value + ActionPrintingToLocalDevice = "PRINTING_TO_LOCAL_DEVICE" +) + +const ( + // AuthenticationTypeApi is a AuthenticationType enum value + AuthenticationTypeApi = "API" + + // AuthenticationTypeSaml is a AuthenticationType enum value + AuthenticationTypeSaml = "SAML" + + // AuthenticationTypeUserpool is a AuthenticationType enum value + AuthenticationTypeUserpool = "USERPOOL" +) + +// The fleet attribute. +const ( + // FleetAttributeVpcConfiguration is a FleetAttribute enum value + FleetAttributeVpcConfiguration = "VPC_CONFIGURATION" + + // FleetAttributeVpcConfigurationSecurityGroupIds is a FleetAttribute enum value + FleetAttributeVpcConfigurationSecurityGroupIds = "VPC_CONFIGURATION_SECURITY_GROUP_IDS" + + // FleetAttributeDomainJoinInfo is a FleetAttribute enum value + FleetAttributeDomainJoinInfo = "DOMAIN_JOIN_INFO" + + // FleetAttributeIamRoleArn is a FleetAttribute enum value + FleetAttributeIamRoleArn = "IAM_ROLE_ARN" +) + +const ( + // FleetErrorCodeIamServiceRoleMissingEniDescribeAction is a FleetErrorCode enum value + FleetErrorCodeIamServiceRoleMissingEniDescribeAction = "IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION" + + // FleetErrorCodeIamServiceRoleMissingEniCreateAction is a FleetErrorCode enum value + FleetErrorCodeIamServiceRoleMissingEniCreateAction = "IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION" + + // FleetErrorCodeIamServiceRoleMissingEniDeleteAction is a FleetErrorCode enum value + FleetErrorCodeIamServiceRoleMissingEniDeleteAction = "IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION" + + // FleetErrorCodeNetworkInterfaceLimitExceeded is a FleetErrorCode enum value + FleetErrorCodeNetworkInterfaceLimitExceeded = "NETWORK_INTERFACE_LIMIT_EXCEEDED" + + // FleetErrorCodeInternalServiceError is a FleetErrorCode enum value + FleetErrorCodeInternalServiceError = "INTERNAL_SERVICE_ERROR" + + // FleetErrorCodeIamServiceRoleIsMissing is a FleetErrorCode enum value + FleetErrorCodeIamServiceRoleIsMissing = "IAM_SERVICE_ROLE_IS_MISSING" + + // FleetErrorCodeMachineRoleIsMissing is a FleetErrorCode enum value + FleetErrorCodeMachineRoleIsMissing = "MACHINE_ROLE_IS_MISSING" + + // FleetErrorCodeStsDisabledInRegion is a FleetErrorCode enum value + FleetErrorCodeStsDisabledInRegion = "STS_DISABLED_IN_REGION" + + // FleetErrorCodeSubnetHasInsufficientIpAddresses is a FleetErrorCode enum value + FleetErrorCodeSubnetHasInsufficientIpAddresses = "SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES" + + // FleetErrorCodeIamServiceRoleMissingDescribeSubnetAction is a FleetErrorCode enum value + FleetErrorCodeIamServiceRoleMissingDescribeSubnetAction = "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION" + + // FleetErrorCodeSubnetNotFound is a FleetErrorCode enum value + FleetErrorCodeSubnetNotFound = "SUBNET_NOT_FOUND" + + // FleetErrorCodeImageNotFound is a FleetErrorCode enum value + FleetErrorCodeImageNotFound = "IMAGE_NOT_FOUND" + + // FleetErrorCodeInvalidSubnetConfiguration is a FleetErrorCode enum value + FleetErrorCodeInvalidSubnetConfiguration = "INVALID_SUBNET_CONFIGURATION" + + // FleetErrorCodeSecurityGroupsNotFound is a FleetErrorCode enum value + FleetErrorCodeSecurityGroupsNotFound = "SECURITY_GROUPS_NOT_FOUND" + + // FleetErrorCodeIgwNotAttached is a FleetErrorCode enum value + FleetErrorCodeIgwNotAttached = "IGW_NOT_ATTACHED" + + // FleetErrorCodeIamServiceRoleMissingDescribeSecurityGroupsAction is a FleetErrorCode enum value + FleetErrorCodeIamServiceRoleMissingDescribeSecurityGroupsAction = "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION" + + // FleetErrorCodeDomainJoinErrorFileNotFound is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorFileNotFound = "DOMAIN_JOIN_ERROR_FILE_NOT_FOUND" + + // FleetErrorCodeDomainJoinErrorAccessDenied is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorAccessDenied = "DOMAIN_JOIN_ERROR_ACCESS_DENIED" + + // FleetErrorCodeDomainJoinErrorLogonFailure is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorLogonFailure = "DOMAIN_JOIN_ERROR_LOGON_FAILURE" + + // FleetErrorCodeDomainJoinErrorInvalidParameter is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorInvalidParameter = "DOMAIN_JOIN_ERROR_INVALID_PARAMETER" + + // FleetErrorCodeDomainJoinErrorMoreData is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorMoreData = "DOMAIN_JOIN_ERROR_MORE_DATA" + + // FleetErrorCodeDomainJoinErrorNoSuchDomain is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorNoSuchDomain = "DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN" + + // FleetErrorCodeDomainJoinErrorNotSupported is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorNotSupported = "DOMAIN_JOIN_ERROR_NOT_SUPPORTED" + + // FleetErrorCodeDomainJoinNerrInvalidWorkgroupName is a FleetErrorCode enum value + FleetErrorCodeDomainJoinNerrInvalidWorkgroupName = "DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME" + + // FleetErrorCodeDomainJoinNerrWorkstationNotStarted is a FleetErrorCode enum value + FleetErrorCodeDomainJoinNerrWorkstationNotStarted = "DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED" + + // FleetErrorCodeDomainJoinErrorDsMachineAccountQuotaExceeded is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorDsMachineAccountQuotaExceeded = "DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED" + + // FleetErrorCodeDomainJoinNerrPasswordExpired is a FleetErrorCode enum value + FleetErrorCodeDomainJoinNerrPasswordExpired = "DOMAIN_JOIN_NERR_PASSWORD_EXPIRED" + + // FleetErrorCodeDomainJoinInternalServiceError is a FleetErrorCode enum value + FleetErrorCodeDomainJoinInternalServiceError = "DOMAIN_JOIN_INTERNAL_SERVICE_ERROR" +) + +const ( + // FleetStateStarting is a FleetState enum value + FleetStateStarting = "STARTING" + + // FleetStateRunning is a FleetState enum value + FleetStateRunning = "RUNNING" + + // FleetStateStopping is a FleetState enum value + FleetStateStopping = "STOPPING" + + // FleetStateStopped is a FleetState enum value + FleetStateStopped = "STOPPED" +) + +const ( + // FleetTypeAlwaysOn is a FleetType enum value + FleetTypeAlwaysOn = "ALWAYS_ON" + + // FleetTypeOnDemand is a FleetType enum value + FleetTypeOnDemand = "ON_DEMAND" +) + +const ( + // ImageBuilderStatePending is a ImageBuilderState enum value + ImageBuilderStatePending = "PENDING" + + // ImageBuilderStateUpdatingAgent is a ImageBuilderState enum value + ImageBuilderStateUpdatingAgent = "UPDATING_AGENT" + + // ImageBuilderStateRunning is a ImageBuilderState enum value + ImageBuilderStateRunning = "RUNNING" + + // ImageBuilderStateStopping is a ImageBuilderState enum value + ImageBuilderStateStopping = "STOPPING" + + // ImageBuilderStateStopped is a ImageBuilderState enum value + ImageBuilderStateStopped = "STOPPED" + + // ImageBuilderStateRebooting is a ImageBuilderState enum value + ImageBuilderStateRebooting = "REBOOTING" + + // ImageBuilderStateSnapshotting is a ImageBuilderState enum value + ImageBuilderStateSnapshotting = "SNAPSHOTTING" + + // ImageBuilderStateDeleting is a ImageBuilderState enum value + ImageBuilderStateDeleting = "DELETING" + + // ImageBuilderStateFailed is a ImageBuilderState enum value + ImageBuilderStateFailed = "FAILED" +) + +const ( + // ImageBuilderStateChangeReasonCodeInternalError is a ImageBuilderStateChangeReasonCode enum value + ImageBuilderStateChangeReasonCodeInternalError = "INTERNAL_ERROR" + + // ImageBuilderStateChangeReasonCodeImageUnavailable is a ImageBuilderStateChangeReasonCode enum value + ImageBuilderStateChangeReasonCodeImageUnavailable = "IMAGE_UNAVAILABLE" +) + +const ( + // ImageStatePending is a ImageState enum value + ImageStatePending = "PENDING" + + // ImageStateAvailable is a ImageState enum value + ImageStateAvailable = "AVAILABLE" + + // ImageStateFailed is a ImageState enum value + ImageStateFailed = "FAILED" + + // ImageStateCopying is a ImageState enum value + ImageStateCopying = "COPYING" + + // ImageStateDeleting is a ImageState enum value + ImageStateDeleting = "DELETING" +) + +const ( + // ImageStateChangeReasonCodeInternalError is a ImageStateChangeReasonCode enum value + ImageStateChangeReasonCodeInternalError = "INTERNAL_ERROR" + + // ImageStateChangeReasonCodeImageBuilderNotAvailable is a ImageStateChangeReasonCode enum value + ImageStateChangeReasonCodeImageBuilderNotAvailable = "IMAGE_BUILDER_NOT_AVAILABLE" + + // ImageStateChangeReasonCodeImageCopyFailure is a ImageStateChangeReasonCode enum value + ImageStateChangeReasonCodeImageCopyFailure = "IMAGE_COPY_FAILURE" +) + +const ( + // MessageActionSuppress is a MessageAction enum value + MessageActionSuppress = "SUPPRESS" + + // MessageActionResend is a MessageAction enum value + MessageActionResend = "RESEND" +) + +const ( + // PermissionEnabled is a Permission enum value + PermissionEnabled = "ENABLED" + + // PermissionDisabled is a Permission enum value + PermissionDisabled = "DISABLED" +) + +const ( + // PlatformTypeWindows is a PlatformType enum value + PlatformTypeWindows = "WINDOWS" + + // PlatformTypeWindowsServer2016 is a PlatformType enum value + PlatformTypeWindowsServer2016 = "WINDOWS_SERVER_2016" + + // PlatformTypeWindowsServer2019 is a PlatformType enum value + PlatformTypeWindowsServer2019 = "WINDOWS_SERVER_2019" +) + +const ( + // SessionConnectionStateConnected is a SessionConnectionState enum value + SessionConnectionStateConnected = "CONNECTED" + + // SessionConnectionStateNotConnected is a SessionConnectionState enum value + SessionConnectionStateNotConnected = "NOT_CONNECTED" +) + +// Possible values for the state of a streaming session. +const ( + // SessionStateActive is a SessionState enum value + SessionStateActive = "ACTIVE" + + // SessionStatePending is a SessionState enum value + SessionStatePending = "PENDING" + + // SessionStateExpired is a SessionState enum value + SessionStateExpired = "EXPIRED" +) + +const ( + // StackAttributeStorageConnectors is a StackAttribute enum value + StackAttributeStorageConnectors = "STORAGE_CONNECTORS" + + // StackAttributeStorageConnectorHomefolders is a StackAttribute enum value + StackAttributeStorageConnectorHomefolders = "STORAGE_CONNECTOR_HOMEFOLDERS" + + // StackAttributeStorageConnectorGoogleDrive is a StackAttribute enum value + StackAttributeStorageConnectorGoogleDrive = "STORAGE_CONNECTOR_GOOGLE_DRIVE" + + // StackAttributeStorageConnectorOneDrive is a StackAttribute enum value + StackAttributeStorageConnectorOneDrive = "STORAGE_CONNECTOR_ONE_DRIVE" + + // StackAttributeRedirectUrl is a StackAttribute enum value + StackAttributeRedirectUrl = "REDIRECT_URL" + + // StackAttributeFeedbackUrl is a StackAttribute enum value + StackAttributeFeedbackUrl = "FEEDBACK_URL" + + // StackAttributeThemeName is a StackAttribute enum value + StackAttributeThemeName = "THEME_NAME" + + // StackAttributeUserSettings is a StackAttribute enum value + StackAttributeUserSettings = "USER_SETTINGS" + + // StackAttributeEmbedHostDomains is a StackAttribute enum value + StackAttributeEmbedHostDomains = "EMBED_HOST_DOMAINS" + + // StackAttributeIamRoleArn is a StackAttribute enum value + StackAttributeIamRoleArn = "IAM_ROLE_ARN" + + // StackAttributeAccessEndpoints is a StackAttribute enum value + StackAttributeAccessEndpoints = "ACCESS_ENDPOINTS" +) + +const ( + // StackErrorCodeStorageConnectorError is a StackErrorCode enum value + StackErrorCodeStorageConnectorError = "STORAGE_CONNECTOR_ERROR" + + // StackErrorCodeInternalServiceError is a StackErrorCode enum value + StackErrorCodeInternalServiceError = "INTERNAL_SERVICE_ERROR" +) + +// The type of storage connector. +const ( + // StorageConnectorTypeHomefolders is a StorageConnectorType enum value + StorageConnectorTypeHomefolders = "HOMEFOLDERS" + + // StorageConnectorTypeGoogleDrive is a StorageConnectorType enum value + StorageConnectorTypeGoogleDrive = "GOOGLE_DRIVE" + + // StorageConnectorTypeOneDrive is a StorageConnectorType enum value + StorageConnectorTypeOneDrive = "ONE_DRIVE" +) + +const ( + // UsageReportExecutionErrorCodeResourceNotFound is a UsageReportExecutionErrorCode enum value + UsageReportExecutionErrorCodeResourceNotFound = "RESOURCE_NOT_FOUND" + + // UsageReportExecutionErrorCodeAccessDenied is a UsageReportExecutionErrorCode enum value + UsageReportExecutionErrorCodeAccessDenied = "ACCESS_DENIED" + + // UsageReportExecutionErrorCodeInternalServiceError is a UsageReportExecutionErrorCode enum value + UsageReportExecutionErrorCodeInternalServiceError = "INTERNAL_SERVICE_ERROR" +) + +const ( + // UsageReportScheduleDaily is a UsageReportSchedule enum value + UsageReportScheduleDaily = "DAILY" +) + +const ( + // UserStackAssociationErrorCodeStackNotFound is a UserStackAssociationErrorCode enum value + UserStackAssociationErrorCodeStackNotFound = "STACK_NOT_FOUND" + + // UserStackAssociationErrorCodeUserNameNotFound is a UserStackAssociationErrorCode enum value + UserStackAssociationErrorCodeUserNameNotFound = "USER_NAME_NOT_FOUND" + + // UserStackAssociationErrorCodeInternalError is a UserStackAssociationErrorCode enum value + UserStackAssociationErrorCodeInternalError = "INTERNAL_ERROR" +) + +const ( + // VisibilityTypePublic is a VisibilityType enum value + VisibilityTypePublic = "PUBLIC" + + // VisibilityTypePrivate is a VisibilityType enum value + VisibilityTypePrivate = "PRIVATE" + + // VisibilityTypeShared is a VisibilityType enum value + VisibilityTypeShared = "SHARED" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/doc.go new file mode 100644 index 00000000000..5ac4b76bc17 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/doc.go @@ -0,0 +1,45 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package appstream provides the client and types for making API +// requests to Amazon AppStream. +// +// This is the Amazon AppStream 2.0 API Reference. This documentation provides +// descriptions and syntax for each of the actions and data types in AppStream +// 2.0. AppStream 2.0 is a fully managed, secure application streaming service +// that lets you stream desktop applications to users without rewriting applications. +// AppStream 2.0 manages the AWS resources that are required to host and run +// your applications, scales automatically, and provides access to your users +// on demand. +// +// You can call the AppStream 2.0 API operations by using an interface VPC endpoint +// (interface endpoint). For more information, see Access AppStream 2.0 API +// Operations and CLI Commands Through an Interface VPC Endpoint (https://docs.aws.amazon.com/appstream2/latest/developerguide/access-api-cli-through-interface-vpc-endpoint.html) +// in the Amazon AppStream 2.0 Administration Guide. +// +// To learn more about AppStream 2.0, see the following resources: +// +// * Amazon AppStream 2.0 product page (http://aws.amazon.com/appstream2) +// +// * Amazon AppStream 2.0 documentation (http://aws.amazon.com/documentation/appstream2) +// +// See https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01 for more information on this service. +// +// See appstream package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/appstream/ +// +// Using the Client +// +// To contact Amazon AppStream with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon AppStream client AppStream for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/appstream/#New +package appstream diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/errors.go new file mode 100644 index 00000000000..5f8f857ae11 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package appstream + +const ( + + // ErrCodeConcurrentModificationException for service response error code + // "ConcurrentModificationException". + // + // An API error occurred. Wait a few minutes and try again. + ErrCodeConcurrentModificationException = "ConcurrentModificationException" + + // ErrCodeIncompatibleImageException for service response error code + // "IncompatibleImageException". + // + // The image does not support storage connectors. + ErrCodeIncompatibleImageException = "IncompatibleImageException" + + // ErrCodeInvalidAccountStatusException for service response error code + // "InvalidAccountStatusException". + // + // The resource cannot be created because your AWS account is suspended. For + // assistance, contact AWS Support. + ErrCodeInvalidAccountStatusException = "InvalidAccountStatusException" + + // ErrCodeInvalidParameterCombinationException for service response error code + // "InvalidParameterCombinationException". + // + // Indicates an incorrect combination of parameters, or a missing parameter. + ErrCodeInvalidParameterCombinationException = "InvalidParameterCombinationException" + + // ErrCodeInvalidRoleException for service response error code + // "InvalidRoleException". + // + // The specified role is invalid. + ErrCodeInvalidRoleException = "InvalidRoleException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The requested limit exceeds the permitted limit for an account. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeOperationNotPermittedException for service response error code + // "OperationNotPermittedException". + // + // The attempted operation is not permitted. + ErrCodeOperationNotPermittedException = "OperationNotPermittedException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // The specified resource already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // The specified resource is in use. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceNotAvailableException for service response error code + // "ResourceNotAvailableException". + // + // The specified resource exists and is not in use, but isn't available. + ErrCodeResourceNotAvailableException = "ResourceNotAvailableException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource was not found. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go new file mode 100644 index 00000000000..51a7d38f3e6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go @@ -0,0 +1,101 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package appstream + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// AppStream provides the API operation methods for making requests to +// Amazon AppStream. See this package's package overview docs +// for details on the service. +// +// AppStream methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type AppStream struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "appstream2" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "AppStream" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the AppStream client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a AppStream client from just a session. +// svc := appstream.New(mySession) +// +// // Create a AppStream client with additional configuration +// svc := appstream.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppStream { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "appstream" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AppStream { + svc := &AppStream{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2016-12-01", + JSONVersion: "1.1", + TargetPrefix: "PhotonAdminProxyService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a AppStream operation and runs any +// custom request initialization. +func (c *AppStream) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/waiters.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/waiters.go new file mode 100644 index 00000000000..1f9f17f2dca --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appstream/waiters.go @@ -0,0 +1,122 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package appstream + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilFleetStarted uses the Amazon AppStream API operation +// DescribeFleets to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *AppStream) WaitUntilFleetStarted(input *DescribeFleetsInput) error { + return c.WaitUntilFleetStartedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilFleetStartedWithContext is an extended version of WaitUntilFleetStarted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) WaitUntilFleetStartedWithContext(ctx aws.Context, input *DescribeFleetsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilFleetStarted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Fleets[].State", + Expected: "ACTIVE", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", + Expected: "PENDING_DEACTIVATE", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", + Expected: "INACTIVE", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeFleetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFleetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilFleetStopped uses the Amazon AppStream API operation +// DescribeFleets to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *AppStream) WaitUntilFleetStopped(input *DescribeFleetsInput) error { + return c.WaitUntilFleetStoppedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilFleetStoppedWithContext is an extended version of WaitUntilFleetStopped. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) WaitUntilFleetStoppedWithContext(ctx aws.Context, input *DescribeFleetsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilFleetStopped", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Fleets[].State", + Expected: "INACTIVE", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", + Expected: "PENDING_ACTIVATE", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", + Expected: "ACTIVE", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeFleetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFleetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go index 6129b12770b..42881d35619 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go @@ -3921,7 +3921,7 @@ type CreateFunctionInput struct { // ApiId is a required field ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - // The FunctionDataSource name. + // The Function DataSource name. // // DataSourceName is a required field DataSourceName *string `locationName:"dataSourceName" type:"string" required:"true"` @@ -6745,24 +6745,21 @@ type LogConfig struct { // CloudWatchLogsRoleArn is a required field CloudWatchLogsRoleArn *string `locationName:"cloudWatchLogsRoleArn" type:"string" required:"true"` + // Set to TRUE to exclude sections that contain information such as headers, + // context, and evaluated mapping templates, regardless of logging level. + ExcludeVerboseContent *bool `locationName:"excludeVerboseContent" type:"boolean"` + // The field logging level. Values can be NONE, ERROR, or ALL. // // * NONE: No field-level logs are captured. // // * ERROR: Logs the following information only for the fields that are in - // error: - // - // The error section in the server response. - // - // Field-level errors. - // - // The generated request/response functions that got resolved for error fields. + // error: The error section in the server response. Field-level errors. The + // generated request/response functions that got resolved for error fields. // // * ALL: The following information is logged for all fields in the query: - // - // Field-level tracing information. - // - // The generated request/response functions that got resolved for each field. + // Field-level tracing information. The generated request/response functions + // that got resolved for each field. // // FieldLogLevel is a required field FieldLogLevel *string `locationName:"fieldLogLevel" type:"string" required:"true" enum:"FieldLogLevel"` @@ -6800,6 +6797,12 @@ func (s *LogConfig) SetCloudWatchLogsRoleArn(v string) *LogConfig { return s } +// SetExcludeVerboseContent sets the ExcludeVerboseContent field's value. +func (s *LogConfig) SetExcludeVerboseContent(v bool) *LogConfig { + s.ExcludeVerboseContent = &v + return s +} + // SetFieldLogLevel sets the FieldLogLevel field's value. func (s *LogConfig) SetFieldLogLevel(v string) *LogConfig { s.FieldLogLevel = &v @@ -7662,7 +7665,7 @@ type UpdateFunctionInput struct { // ApiId is a required field ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` - // The FunctionDataSource name. + // The Function DataSource name. // // DataSourceName is a required field DataSourceName *string `locationName:"dataSourceName" type:"string" required:"true"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go index 6dae3d4cca7..e2003998c16 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppSync { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "appsync" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppSync { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AppSync { svc := &AppSync{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-07-25", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/api.go index a05d7b4f8ad..b79ff0629d3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/api.go @@ -765,10 +765,21 @@ func (c *Athena) GetQueryResultsRequest(input *GetQueryResultsInput) (req *reque // GetQueryResults API operation for Amazon Athena. // -// Returns the results of a single query execution specified by QueryExecutionId -// if you have access to the workgroup in which the query ran. This request -// does not execute the query but returns results. Use StartQueryExecution to -// run a query. +// Streams the results of a single query execution specified by QueryExecutionId +// from the Athena query results location in Amazon S3. For more information, +// see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html) +// in the Amazon Athena User Guide. This request does not execute the query +// but returns results. Use StartQueryExecution to run a query. +// +// To stream query results successfully, the IAM principal with permission to +// call GetQueryResults also must have permissions to the Amazon S3 GetObject +// action for the Athena query results location. +// +// IAM principals with permission to the Amazon S3 GetObject action for the +// query results location are able to retrieve query results from Amazon S3 +// even if permission to the GetQueryResults action is denied. To restrict user +// or role access, ensure that Amazon S3 permissions to the Athena query location +// are denied. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -819,7 +830,7 @@ func (c *Athena) GetQueryResultsWithContext(ctx aws.Context, input *GetQueryResu // // Example iterating over at most 3 pages of a GetQueryResults operation. // pageNum := 0 // err := client.GetQueryResultsPages(params, -// func(page *GetQueryResultsOutput, lastPage bool) bool { +// func(page *athena.GetQueryResultsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -851,10 +862,12 @@ func (c *Athena) GetQueryResultsPagesWithContext(ctx aws.Context, input *GetQuer }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetQueryResultsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetQueryResultsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1048,7 +1061,7 @@ func (c *Athena) ListNamedQueriesWithContext(ctx aws.Context, input *ListNamedQu // // Example iterating over at most 3 pages of a ListNamedQueries operation. // pageNum := 0 // err := client.ListNamedQueriesPages(params, -// func(page *ListNamedQueriesOutput, lastPage bool) bool { +// func(page *athena.ListNamedQueriesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1080,10 +1093,12 @@ func (c *Athena) ListNamedQueriesPagesWithContext(ctx aws.Context, input *ListNa }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListNamedQueriesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListNamedQueriesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1194,7 +1209,7 @@ func (c *Athena) ListQueryExecutionsWithContext(ctx aws.Context, input *ListQuer // // Example iterating over at most 3 pages of a ListQueryExecutions operation. // pageNum := 0 // err := client.ListQueryExecutionsPages(params, -// func(page *ListQueryExecutionsOutput, lastPage bool) bool { +// func(page *athena.ListQueryExecutionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1226,10 +1241,12 @@ func (c *Athena) ListQueryExecutionsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListQueryExecutionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListQueryExecutionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1421,7 +1438,7 @@ func (c *Athena) ListWorkGroupsWithContext(ctx aws.Context, input *ListWorkGroup // // Example iterating over at most 3 pages of a ListWorkGroups operation. // pageNum := 0 // err := client.ListWorkGroupsPages(params, -// func(page *ListWorkGroupsOutput, lastPage bool) bool { +// func(page *athena.ListWorkGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1453,10 +1470,12 @@ func (c *Athena) ListWorkGroupsPagesWithContext(ctx aws.Context, input *ListWork }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListWorkGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListWorkGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2718,7 +2737,7 @@ type GetQueryResultsInput struct { _ struct{} `type:"structure"` // The maximum number of results (rows) to return in this request. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"1" type:"integer"` // The token that specifies where to start pagination if a previous request // was truncated. @@ -2743,6 +2762,9 @@ func (s GetQueryResultsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *GetQueryResultsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetQueryResultsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if s.NextToken != nil && len(*s.NextToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } @@ -3333,8 +3355,9 @@ type QueryExecution struct { // and DML, such as SHOW CREATE TABLE, or DESCRIBE . StatementType *string `type:"string" enum:"StatementType"` - // The amount of data scanned during the query execution and the amount of time - // that it took to execute, and the type of statement that was run. + // The location of a manifest file that tracks file locations generated by the + // query, the amount of data scanned by the query, and the amount of time that + // it took the query to run. Statistics *QueryExecutionStatistics `type:"structure"` // The completion date, current state, submission time, and state change reason @@ -3440,11 +3463,21 @@ func (s *QueryExecutionContext) SetDatabase(v string) *QueryExecutionContext { return s } -// The amount of data scanned during the query execution and the amount of time -// that it took to execute, and the type of statement that was run. +// The location of a manifest file that tracks file locations generated by the +// query, the amount of data scanned by the query, and the amount of time that +// it took the query to run. type QueryExecutionStatistics struct { _ struct{} `type:"structure"` + // The location and file name of a data manifest file. The manifest file is + // saved to the Athena query results location in Amazon S3. It tracks files + // that the query wrote to Amazon S3. If the query fails, the manifest file + // also tracks files that the query intended to write. The manifest is useful + // for identifying orphaned files resulting from a failed query. For more information, + // see Working with Query Output Files (https://docs.aws.amazon.com/athena/latest/ug/querying.html) + // in the Amazon Athena User Guide. + DataManifestLocation *string `type:"string"` + // The number of bytes in the data that was queried. DataScannedInBytes *int64 `type:"long"` @@ -3462,6 +3495,12 @@ func (s QueryExecutionStatistics) GoString() string { return s.String() } +// SetDataManifestLocation sets the DataManifestLocation field's value. +func (s *QueryExecutionStatistics) SetDataManifestLocation(v string) *QueryExecutionStatistics { + s.DataManifestLocation = &v + return s +} + // SetDataScannedInBytes sets the DataScannedInBytes field's value. func (s *QueryExecutionStatistics) SetDataScannedInBytes(v int64) *QueryExecutionStatistics { s.DataScannedInBytes = &v @@ -3534,8 +3573,7 @@ func (s *QueryExecutionStatus) SetSubmissionDateTime(v time.Time) *QueryExecutio // The location in Amazon S3 where query results are stored and the encryption // option, if any, used for query results. These are known as "client-side settings". // If workgroup settings override client-side settings, then the query uses -// the location for the query results and the encryption configuration that -// are specified for the workgroup. +// the workgroup settings. type ResultConfiguration struct { _ struct{} `type:"structure"` @@ -3549,12 +3587,13 @@ type ResultConfiguration struct { EncryptionConfiguration *EncryptionConfiguration `type:"structure"` // The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. - // For more information, see Queries and Query Result Files. (https://docs.aws.amazon.com/athena/latest/ug/querying.html) + // To run the query, you must specify the query results location using one of + // the ways: either for individual queries using either this setting (client-side), + // or in the workgroup, using WorkGroupConfiguration. If none of them is set, + // Athena issues an error that no output location is provided. For more information, + // see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html). // If workgroup settings override client-side settings, then the query uses - // the location for the query results and the encryption configuration that - // are specified for the workgroup. The "workgroup settings override" is specified - // in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. - // See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + // the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. OutputLocation *string `type:"string"` } @@ -3604,7 +3643,7 @@ type ResultConfigurationUpdates struct { EncryptionConfiguration *EncryptionConfiguration `type:"structure"` // The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. - // For more information, see Queries and Query Result Files. (https://docs.aws.amazon.com/athena/latest/ug/querying.html) + // For more information, see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html) // If workgroup settings override client-side settings, then the query uses // the location for the query results and the encryption configuration that // are specified for the workgroup. The "workgroup settings override" is specified @@ -4323,8 +4362,8 @@ type WorkGroup struct { // S3 where query results are stored, the encryption configuration, if any, // used for query results; whether the Amazon CloudWatch Metrics are enabled // for the workgroup; whether workgroup settings override client-side settings; - // and the data usage limit for the amount of data scanned per query, if it - // is specified. The workgroup settings override is specified in EnforceWorkGroupConfiguration + // and the data usage limits for the amount of data scanned per query or per + // workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration // (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. Configuration *WorkGroupConfiguration `type:"structure"` @@ -4387,7 +4426,7 @@ func (s *WorkGroup) SetState(v string) *WorkGroup { // S3 where query results are stored, the encryption option, if any, used for // query results, whether the Amazon CloudWatch Metrics are enabled for the // workgroup and whether workgroup settings override query settings, and the -// data usage limit for the amount of data scanned per query, if it is specified. +// data usage limits for the amount of data scanned per query or per workgroup. // The workgroup settings override is specified in EnforceWorkGroupConfiguration // (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. type WorkGroupConfiguration struct { @@ -4405,9 +4444,22 @@ type WorkGroupConfiguration struct { // Indicates that the Amazon CloudWatch metrics are enabled for the workgroup. PublishCloudWatchMetricsEnabled *bool `type:"boolean"` + // If set to true, allows members assigned to a workgroup to reference Amazon + // S3 Requester Pays buckets in queries. If set to false, workgroup members + // cannot query data from Requester Pays buckets, and queries that retrieve + // data from Requester Pays buckets cause an error. The default is false. For + // more information about Requester Pays buckets, see Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) + // in the Amazon Simple Storage Service Developer Guide. + RequesterPaysEnabled *bool `type:"boolean"` + // The configuration for the workgroup, which includes the location in Amazon // S3 where query results are stored and the encryption option, if any, used - // for query results. + // for query results. To run the query, you must specify the query results location + // using one of the ways: either in the workgroup using this setting, or for + // individual queries (client-side), using ResultConfiguration$OutputLocation. + // If none of them is set, Athena issues an error that no output location is + // provided. For more information, see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html). ResultConfiguration *ResultConfiguration `type:"structure"` } @@ -4457,6 +4509,12 @@ func (s *WorkGroupConfiguration) SetPublishCloudWatchMetricsEnabled(v bool) *Wor return s } +// SetRequesterPaysEnabled sets the RequesterPaysEnabled field's value. +func (s *WorkGroupConfiguration) SetRequesterPaysEnabled(v bool) *WorkGroupConfiguration { + s.RequesterPaysEnabled = &v + return s +} + // SetResultConfiguration sets the ResultConfiguration field's value. func (s *WorkGroupConfiguration) SetResultConfiguration(v *ResultConfiguration) *WorkGroupConfiguration { s.ResultConfiguration = v @@ -4487,6 +4545,15 @@ type WorkGroupConfigurationUpdates struct { // Indicates that the data usage control limit per query is removed. WorkGroupConfiguration$BytesScannedCutoffPerQuery RemoveBytesScannedCutoffPerQuery *bool `type:"boolean"` + // If set to true, allows members assigned to a workgroup to specify Amazon + // S3 Requester Pays buckets in queries. If set to false, workgroup members + // cannot query data from Requester Pays buckets, and queries that retrieve + // data from Requester Pays buckets cause an error. The default is false. For + // more information about Requester Pays buckets, see Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) + // in the Amazon Simple Storage Service Developer Guide. + RequesterPaysEnabled *bool `type:"boolean"` + // The result configuration information about the queries in this workgroup // that will be updated. Includes the updated results location and an updated // option for encrypting query results. @@ -4545,6 +4612,12 @@ func (s *WorkGroupConfigurationUpdates) SetRemoveBytesScannedCutoffPerQuery(v bo return s } +// SetRequesterPaysEnabled sets the RequesterPaysEnabled field's value. +func (s *WorkGroupConfigurationUpdates) SetRequesterPaysEnabled(v bool) *WorkGroupConfigurationUpdates { + s.RequesterPaysEnabled = &v + return s +} + // SetResultConfigurationUpdates sets the ResultConfigurationUpdates field's value. func (s *WorkGroupConfigurationUpdates) SetResultConfigurationUpdates(v *ResultConfigurationUpdates) *WorkGroupConfigurationUpdates { s.ResultConfigurationUpdates = v diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go index 76b6b8989bb..e4b02070ff7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go @@ -7,8 +7,8 @@ // SQL to analyze data directly in Amazon S3. You can point Athena at your data // in Amazon S3 and run ad-hoc queries and get results in seconds. Athena is // serverless, so there is no infrastructure to set up or manage. You pay only -// for the queries you run. Athena scales automatically—executing queries in -// parallel—so results are fast, even with large datasets and complex queries. +// for the queries you run. Athena scales automatically—executing queries +// in parallel—so results are fast, even with large datasets and complex queries. // For more information, see What is Amazon Athena (http://docs.aws.amazon.com/athena/latest/ug/what-is.html) // in the Amazon Athena User Guide. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/service.go index 6806a62ec95..1f2eaa078b2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/athena/service.go @@ -46,11 +46,11 @@ const ( // svc := athena.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Athena { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Athena { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Athena { svc := &Athena{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-05-18", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index 4af6308d1e2..87b84c1a8f2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -1274,8 +1274,12 @@ func (c *AutoScaling) DeletePolicyRequest(input *DeletePolicyInput) (req *reques // // Deletes the specified scaling policy. // -// Deleting a policy deletes the underlying alarm action, but does not delete -// the alarm, even if it no longer has an associated action. +// Deleting either a step scaling policy or a simple scaling policy deletes +// the underlying alarm action, but does not delete the alarm, even if it no +// longer has an associated action. +// +// For more information, see Deleting a Scaling Policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/deleting-scaling-policy.html) +// in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1744,7 +1748,7 @@ func (c *AutoScaling) DescribeAutoScalingGroupsWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a DescribeAutoScalingGroups operation. // pageNum := 0 // err := client.DescribeAutoScalingGroupsPages(params, -// func(page *DescribeAutoScalingGroupsOutput, lastPage bool) bool { +// func(page *autoscaling.DescribeAutoScalingGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1776,10 +1780,12 @@ func (c *AutoScaling) DescribeAutoScalingGroupsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeAutoScalingGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeAutoScalingGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1883,7 +1889,7 @@ func (c *AutoScaling) DescribeAutoScalingInstancesWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a DescribeAutoScalingInstances operation. // pageNum := 0 // err := client.DescribeAutoScalingInstancesPages(params, -// func(page *DescribeAutoScalingInstancesOutput, lastPage bool) bool { +// func(page *autoscaling.DescribeAutoScalingInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1915,10 +1921,12 @@ func (c *AutoScaling) DescribeAutoScalingInstancesPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeAutoScalingInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeAutoScalingInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2102,7 +2110,7 @@ func (c *AutoScaling) DescribeLaunchConfigurationsWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a DescribeLaunchConfigurations operation. // pageNum := 0 // err := client.DescribeLaunchConfigurationsPages(params, -// func(page *DescribeLaunchConfigurationsOutput, lastPage bool) bool { +// func(page *autoscaling.DescribeLaunchConfigurationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2134,10 +2142,12 @@ func (c *AutoScaling) DescribeLaunchConfigurationsPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeLaunchConfigurationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeLaunchConfigurationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2655,7 +2665,7 @@ func (c *AutoScaling) DescribeNotificationConfigurationsWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a DescribeNotificationConfigurations operation. // pageNum := 0 // err := client.DescribeNotificationConfigurationsPages(params, -// func(page *DescribeNotificationConfigurationsOutput, lastPage bool) bool { +// func(page *autoscaling.DescribeNotificationConfigurationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2687,10 +2697,12 @@ func (c *AutoScaling) DescribeNotificationConfigurationsPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeNotificationConfigurationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeNotificationConfigurationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2797,7 +2809,7 @@ func (c *AutoScaling) DescribePoliciesWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribePolicies operation. // pageNum := 0 // err := client.DescribePoliciesPages(params, -// func(page *DescribePoliciesOutput, lastPage bool) bool { +// func(page *autoscaling.DescribePoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2829,10 +2841,12 @@ func (c *AutoScaling) DescribePoliciesPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribePoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribePoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2936,7 +2950,7 @@ func (c *AutoScaling) DescribeScalingActivitiesWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a DescribeScalingActivities operation. // pageNum := 0 // err := client.DescribeScalingActivitiesPages(params, -// func(page *DescribeScalingActivitiesOutput, lastPage bool) bool { +// func(page *autoscaling.DescribeScalingActivitiesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2968,10 +2982,12 @@ func (c *AutoScaling) DescribeScalingActivitiesPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeScalingActivitiesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeScalingActivitiesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3106,7 +3122,8 @@ func (c *AutoScaling) DescribeScheduledActionsRequest(input *DescribeScheduledAc // DescribeScheduledActions API operation for Auto Scaling. // // Describes the actions scheduled for your Auto Scaling group that haven't -// run. To describe the actions that have already run, use DescribeScalingActivities. +// run or that have not reached their end time. To describe the actions that +// have already run, use DescribeScalingActivities. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3156,7 +3173,7 @@ func (c *AutoScaling) DescribeScheduledActionsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeScheduledActions operation. // pageNum := 0 // err := client.DescribeScheduledActionsPages(params, -// func(page *DescribeScheduledActionsOutput, lastPage bool) bool { +// func(page *autoscaling.DescribeScheduledActionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3188,10 +3205,12 @@ func (c *AutoScaling) DescribeScheduledActionsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeScheduledActionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeScheduledActionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3304,7 +3323,7 @@ func (c *AutoScaling) DescribeTagsWithContext(ctx aws.Context, input *DescribeTa // // Example iterating over at most 3 pages of a DescribeTags operation. // pageNum := 0 // err := client.DescribeTagsPages(params, -// func(page *DescribeTagsOutput, lastPage bool) bool { +// func(page *autoscaling.DescribeTagsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3336,10 +3355,12 @@ func (c *AutoScaling) DescribeTagsPagesWithContext(ctx aws.Context, input *Descr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTagsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTagsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4177,7 +4198,7 @@ func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req // launch or terminate. // // If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state using using RecordLifecycleActionHeartbeat. +// instance in a pending state using RecordLifecycleActionHeartbeat. // // If you finish before the timeout period ends, complete the lifecycle action // using CompleteLifecycleAction. @@ -4371,10 +4392,14 @@ func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req // PutScalingPolicy API operation for Auto Scaling. // -// Creates or updates a policy for an Auto Scaling group. To update an existing -// policy, use the existing policy name and set the parameters to change. Any -// existing parameter not changed in an update to an existing policy is not -// changed in this update request. +// Creates or updates a scaling policy for an Auto Scaling group. To update +// an existing scaling policy, use the existing policy name and set the parameters +// to change. Any existing parameter not changed in an update to an existing +// policy is not changed in this update request. +// +// For more information about using scaling policies to scale your Auto Scaling +// group automatically, see Dynamic Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scale-based-on-demand.html) +// in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5197,27 +5222,43 @@ func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGrou // // Updates the configuration for the specified Auto Scaling group. // -// The new settings take effect on any scaling activities after this call returns. -// Scaling activities that are currently in progress aren't affected. -// -// To update an Auto Scaling group with a launch configuration with InstanceMonitoring -// set to false, you must first disable the collection of group metrics. Otherwise, -// you get an error. If you have previously enabled the collection of group -// metrics, you can disable it using DisableMetricsCollection. -// -// Note the following: +// To update an Auto Scaling group, specify the name of the group and the parameter +// that you want to change. Any parameters that you don't specify are not changed +// by this update request. The new settings take effect on any scaling activities +// after this call returns. Scaling activities that are currently in progress +// aren't affected. +// +// If you associate a new launch configuration or template with an Auto Scaling +// group, all new instances will get the updated configuration. Existing instances +// continue to run with the configuration that they were originally launched +// with. When you update a group to specify a mixed instances policy instead +// of a launch configuration or template, existing instances may be replaced +// to match the new purchasing options that you specified in the policy. For +// example, if the group currently has 100% On-Demand capacity and the policy +// specifies 50% Spot capacity, this means that half of your instances will +// be gradually terminated and relaunched as Spot Instances. When replacing +// instances, Amazon EC2 Auto Scaling launches new instances before terminating +// the old ones, so that updating your group does not compromise the performance +// or availability of your application. +// +// Note the following about changing DesiredCapacity, MaxSize, or MinSize: +// +// * If a scale-in event occurs as a result of a new DesiredCapacity value +// that is lower than the current size of the group, the Auto Scaling group +// uses its termination policy to determine which instances to terminate. // // * If you specify a new value for MinSize without specifying a value for // DesiredCapacity, and the new MinSize is larger than the current size of -// the group, we implicitly call SetDesiredCapacity to set the size of the -// group to the new value of MinSize. +// the group, this sets the group's DesiredCapacity to the new MinSize value. // // * If you specify a new value for MaxSize without specifying a value for // DesiredCapacity, and the new MaxSize is smaller than the current size -// of the group, we implicitly call SetDesiredCapacity to set the size of -// the group to the new value of MaxSize. +// of the group, this sets the group's DesiredCapacity to the new MaxSize +// value. // -// * All other optional parameters are left unchanged if not specified. +// To see which parameters have been set, use DescribeAutoScalingGroups. You +// can also view the scaling policies for an Auto Scaling group using DescribePolicies. +// If the group has scaling policies, you can update them using PutScalingPolicy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6006,8 +6047,8 @@ func (s CompleteLifecycleActionOutput) GoString() string { type CreateAutoScalingGroupInput struct { _ struct{} `type:"structure"` - // The name of the Auto Scaling group. This name must be unique within the scope - // of your AWS account. + // The name of the Auto Scaling group. This name must be unique per Region per + // account. // // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` @@ -6026,10 +6067,11 @@ type CreateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling User Guide. DefaultCooldown *int64 `type:"integer"` - // The number of EC2 instances that should be running in the group. This number - // must be greater than or equal to the minimum size of the group and less than - // or equal to the maximum size of the group. If you do not specify a desired - // capacity, the default is the minimum size of the group. + // The number of Amazon EC2 instances that the Auto Scaling group attempts to + // maintain. This number must be greater than or equal to the minimum size of + // the group and less than or equal to the maximum size of the group. If you + // do not specify a desired capacity, the default is the minimum size of the + // group. DesiredCapacity *int64 `type:"integer"` // The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before @@ -6037,7 +6079,7 @@ type CreateAutoScalingGroupInput struct { // During this time, any health check failures for the instance are ignored. // The default value is 0. // - // For more information, see Health Checks for Auto Scaling Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) + // For more information, see Health Check Grace Period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period) // in the Amazon EC2 Auto Scaling User Guide. // // Conditional: This parameter is required if you are adding an ELB health check. @@ -6053,8 +6095,6 @@ type CreateAutoScalingGroupInput struct { HealthCheckType *string `min:"1" type:"string"` // The ID of the instance used to create a launch configuration for the group. - // This parameter, a launch configuration, a launch template, or a mixed instances - // policy must be specified. // // When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a // new launch configuration and associates it with the group. This launch configuration @@ -6064,31 +6104,34 @@ type CreateAutoScalingGroupInput struct { // For more information, see Create an Auto Scaling Group Using an EC2 Instance // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html) // in the Amazon EC2 Auto Scaling User Guide. + // + // You must specify one of the following parameters in your request: LaunchConfigurationName, + // LaunchTemplate, InstanceId, or MixedInstancesPolicy. InstanceId *string `min:"1" type:"string"` - // The name of the launch configuration. This parameter, a launch template, - // a mixed instances policy, or an EC2 instance must be specified. + // The name of the launch configuration. // - // For more information, see Creating an Auto Scaling Group Using a Launch Configuration - // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg.html) in - // the Amazon EC2 Auto Scaling User Guide. + // If you do not specify LaunchConfigurationName, you must specify one of the + // following parameters: InstanceId, LaunchTemplate, or MixedInstancesPolicy. LaunchConfigurationName *string `min:"1" type:"string"` - // The launch template to use to launch instances. This parameter, a launch - // configuration, a mixed instances policy, or an EC2 instance must be specified. + // The launch template to use to launch instances. // - // For more information, see Creating an Auto Scaling Group Using a Launch Template - // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-launch-template.html) - // in the Amazon EC2 Auto Scaling User Guide. + // For more information, see LaunchTemplateSpecification (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_LaunchTemplateSpecification.html) + // in the Amazon EC2 Auto Scaling API Reference. + // + // If you do not specify LaunchTemplate, you must specify one of the following + // parameters: InstanceId, LaunchConfigurationName, or MixedInstancesPolicy. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // One or more lifecycle hooks. LifecycleHookSpecificationList []*LifecycleHookSpecification `type:"list"` - // One or more Classic Load Balancers. To specify an Application Load Balancer - // or a Network Load Balancer, use TargetGroupARNs instead. + // A list of Classic Load Balancers associated with this Auto Scaling group. + // For Application Load Balancers and Network Load Balancers, specify a list + // of target groups using the TargetGroupARNs property instead. // - // For more information, see Using a Load Balancer With an Auto Scaling Group + // For more information, see Using a Load Balancer with an Auto Scaling Group // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html) // in the Amazon EC2 Auto Scaling User Guide. LoadBalancerNames []*string `type:"list"` @@ -6103,12 +6146,23 @@ type CreateAutoScalingGroupInput struct { // MinSize is a required field MinSize *int64 `type:"integer" required:"true"` - // The mixed instances policy to use to launch instances. This parameter, a - // launch template, a launch configuration, or an EC2 instance must be specified. + // An embedded object that specifies a mixed instances policy. The required + // parameters must be specified. If optional parameters are unspecified, their + // default values are used. + // + // The policy includes parameters that not only define the distribution of On-Demand + // Instances and Spot Instances, the maximum price to pay for Spot Instances, + // and how the Auto Scaling group allocates instance types to fulfill On-Demand + // and Spot capacity, but also the parameters that specify the instance configuration + // information—the launch template and instance types. // - // For more information, see Auto Scaling Groups with Multiple Instance Types - // and Purchase Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html) + // For more information, see MixedInstancesPolicy (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_MixedInstancesPolicy.html) + // in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with + // Multiple Instance Types and Purchase Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html) // in the Amazon EC2 Auto Scaling User Guide. + // + // You must specify one of the following parameters in your request: LaunchConfigurationName, + // LaunchTemplate, InstanceId, or MixedInstancesPolicy. MixedInstancesPolicy *MixedInstancesPolicy `type:"structure"` // Indicates whether newly launched instances are protected from termination @@ -6140,7 +6194,13 @@ type CreateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling User Guide. Tags []*Tag `type:"list"` - // The Amazon Resource Names (ARN) of the target groups. + // The Amazon Resource Names (ARN) of the target groups to associate with the + // Auto Scaling group. Instances are registered as targets in a target group, + // and traffic is routed to the target group. + // + // For more information, see Using a Load Balancer with an Auto Scaling Group + // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html) + // in the Amazon EC2 Auto Scaling User Guide. TargetGroupARNs []*string `type:"list"` // One or more termination policies used to select the instance to terminate. @@ -6387,30 +6447,36 @@ func (s CreateAutoScalingGroupOutput) GoString() string { type CreateLaunchConfigurationInput struct { _ struct{} `type:"structure"` - // Used for groups that launch instances into a virtual private cloud (VPC). - // Specifies whether to assign a public IP address to each instance. For more - // information, see Launching Auto Scaling Instances in a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) + // For Auto Scaling groups that are running in a virtual private cloud (VPC), + // specifies whether to assign a public IP address to the group's instances. + // If you specify true, each instance in the Auto Scaling group receives a unique + // public IP address. For more information, see Launching Auto Scaling Instances + // in a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) // in the Amazon EC2 Auto Scaling User Guide. // - // If you specify this parameter, be sure to specify at least one subnet when - // you create your group. + // If you specify this parameter, you must specify at least one subnet for VPCZoneIdentifier + // when you create your group. // - // Default: If the instance is launched into a default subnet, the default is - // to assign a public IP address. If the instance is launched into a nondefault - // subnet, the default is not to assign a public IP address. + // If the instance is launched into a default subnet, the default is to assign + // a public IP address, unless you disabled the option to assign a public IP + // address on the subnet. If the instance is launched into a nondefault subnet, + // the default is not to assign a public IP address, unless you enabled the + // option to assign a public IP address on the subnet. AssociatePublicIpAddress *bool `type:"boolean"` - // One or more mappings that specify how block devices are exposed to the instance. - // For more information, see Block Device Mapping (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) + // A block device mapping, which specifies the block devices for the instance. + // You can specify virtual devices and EBS volumes. For more information, see + // Block Device Mapping (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) // in the Amazon EC2 User Guide for Linux Instances. BlockDeviceMappings []*BlockDeviceMapping `type:"list"` // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. - // This parameter is supported only if you are launching EC2-Classic instances. // For more information, see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic // Instances to a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-ClassicLink) // in the Amazon EC2 Auto Scaling User Guide. + // + // This parameter can only be used if you are launching EC2-Classic instances. ClassicLinkVPCId *string `min:"1" type:"string"` // The IDs of one or more security groups for the specified ClassicLink-enabled @@ -6419,63 +6485,70 @@ type CreateLaunchConfigurationInput struct { // Instances to a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-ClassicLink) // in the Amazon EC2 Auto Scaling User Guide. // - // Conditional: This parameter is required if you specify a ClassicLink-enabled - // VPC, and is not supported otherwise. + // If you specify the ClassicLinkVPCId parameter, you must specify this parameter. ClassicLinkVPCSecurityGroups []*string `type:"list"` - // Indicates whether the instance is optimized for Amazon EBS I/O. By default, - // the instance is not optimized for EBS I/O. The optimization provides dedicated - // throughput to Amazon EBS and an optimized configuration stack to provide - // optimal I/O performance. This optimization is not available with all instance - // types. Additional usage charges apply. For more information, see Amazon EBS-Optimized + // Specifies whether the launch configuration is optimized for EBS I/O (true) + // or not (false). The optimization provides dedicated throughput to Amazon + // EBS and an optimized configuration stack to provide optimal I/O performance. + // This optimization is not available with all instance types. Additional fees + // are incurred when you enable EBS optimization for an instance type that is + // not EBS-optimized by default. For more information, see Amazon EBS-Optimized // Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) // in the Amazon EC2 User Guide for Linux Instances. + // + // The default value is false. EbsOptimized *bool `type:"boolean"` // The name or the Amazon Resource Name (ARN) of the instance profile associated - // with the IAM role for the instance. + // with the IAM role for the instance. The instance profile contains the IAM + // role. // - // EC2 instances launched with an IAM role automatically have AWS security credentials - // available. You can use IAM roles with Amazon EC2 Auto Scaling to automatically - // enable applications running on your EC2 instances to securely access other - // AWS resources. For more information, see Use an IAM Role for Applications - // That Run on Amazon EC2 Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/us-iam-role.html) + // For more information, see IAM Role for Applications That Run on Amazon EC2 + // Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/us-iam-role.html) // in the Amazon EC2 Auto Scaling User Guide. IamInstanceProfile *string `min:"1" type:"string"` - // The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. - // - // If you do not specify InstanceId, you must specify ImageId. - // + // The ID of the Amazon Machine Image (AMI) that was assigned during registration. // For more information, see Finding an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) // in the Amazon EC2 User Guide for Linux Instances. + // + // If you do not specify InstanceId, you must specify ImageId. ImageId *string `min:"1" type:"string"` // The ID of the instance to use to create the launch configuration. The new // launch configuration derives attributes from the instance, except for the // block device mapping. // - // If you do not specify InstanceId, you must specify both ImageId and InstanceType. - // // To create a launch configuration with a block device mapping or override // any other instance attributes, specify them as part of the same request. // // For more information, see Create a Launch Configuration Using an EC2 Instance // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-lc-with-instanceID.html) // in the Amazon EC2 Auto Scaling User Guide. + // + // If you do not specify InstanceId, you must specify both ImageId and InstanceType. InstanceId *string `min:"1" type:"string"` - // Enables detailed monitoring (true) or basic monitoring (false) for the Auto - // Scaling instances. The default value is true. + // Controls whether instances in this group are launched with detailed (true) + // or basic (false) monitoring. + // + // The default value is true (enabled). + // + // When detailed monitoring is enabled, Amazon CloudWatch generates metrics + // every minute and your account is charged a fee. When you disable detailed + // monitoring, CloudWatch generates metrics every 5 minutes. For more information, + // see Configure Monitoring for Auto Scaling Instances (https://docs.aws.amazon.com/autoscaling/latest/userguide/as-instance-monitoring.html#enable-as-instance-metrics) + // in the Amazon EC2 Auto Scaling User Guide. InstanceMonitoring *InstanceMonitoring `type:"structure"` - // The instance type of the EC2 instance. - // - // If you do not specify InstanceId, you must specify InstanceType. + // Specifies the instance type of the EC2 instance. // // For information about available instance types, see Available Instance Types // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) // in the Amazon EC2 User Guide for Linux Instances. + // + // If you do not specify InstanceId, you must specify InstanceType. InstanceType *string `min:"1" type:"string"` // The ID of the kernel associated with the AMI. @@ -6486,41 +6559,41 @@ type CreateLaunchConfigurationInput struct { // in the Amazon EC2 User Guide for Linux Instances. KeyName *string `min:"1" type:"string"` - // The name of the launch configuration. This name must be unique within the - // scope of your AWS account. + // The name of the launch configuration. This name must be unique per Region + // per account. // // LaunchConfigurationName is a required field LaunchConfigurationName *string `min:"1" type:"string" required:"true"` - // The tenancy of the instance. An instance with a tenancy of dedicated runs - // on single-tenant hardware and can only be launched into a VPC. + // The tenancy of the instance. An instance with dedicated tenancy runs on isolated, + // single-tenant hardware and can only be launched into a VPC. // - // To launch Dedicated Instances into a shared tenancy VPC (a VPC with the instance + // To launch dedicated instances into a shared tenancy VPC (a VPC with the instance // placement tenancy attribute set to default), you must set the value of this // parameter to dedicated. // - // If you specify this parameter, be sure to specify at least one subnet when - // you create your group. + // If you specify PlacementTenancy, you must specify at least one subnet for + // VPCZoneIdentifier when you create your group. // - // For more information, see Launching Auto Scaling Instances in a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) + // For more information, see Instance Placement Tenancy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-vpc-tenancy) // in the Amazon EC2 Auto Scaling User Guide. // // Valid values: default | dedicated PlacementTenancy *string `min:"1" type:"string"` - // The ID of the RAM disk associated with the AMI. + // The ID of the RAM disk to select. RamdiskId *string `min:"1" type:"string"` - // One or more security groups with which to associate the instances. - // - // If your instances are launched in EC2-Classic, you can either specify security - // group names or the security group IDs. For more information, see Amazon EC2 - // Security Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) - // in the Amazon EC2 User Guide for Linux Instances. + // A list that contains the security groups to assign to the instances in the + // Auto Scaling group. // - // If your instances are launched into a VPC, specify security group IDs. For - // more information, see Security Groups for Your VPC (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) + // [EC2-VPC] Specify the security group IDs. For more information, see Security + // Groups for Your VPC (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) // in the Amazon Virtual Private Cloud User Guide. + // + // [EC2-Classic] Specify either the security group names or the security group + // IDs. For more information, see Amazon EC2 Security Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) + // in the Amazon EC2 User Guide for Linux Instances. SecurityGroups []*string `type:"list"` // The maximum hourly price to be paid for any Spot Instance launched to fulfill @@ -6528,10 +6601,18 @@ type CreateLaunchConfigurationInput struct { // the current Spot market price. For more information, see Launching Spot Instances // in Your Auto Scaling Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html) // in the Amazon EC2 Auto Scaling User Guide. + // + // If a Spot price is set, then the Auto Scaling group will only launch instances + // when the Spot price has been met, regardless of the setting in the Auto Scaling + // group's DesiredCapacity. + // + // When you change your Spot price by creating a new launch configuration, running + // instances will continue to run as long as the Spot price for those running + // instances is higher than the current Spot market price. SpotPrice *string `min:"1" type:"string"` - // The user data to make available to the launched EC2 instances. For more information, - // see Instance Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // The Base64-encoded user data to make available to the launched EC2 instances. + // For more information, see Instance Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) // in the Amazon EC2 User Guide for Linux Instances. UserData *string `type:"string"` } @@ -7385,11 +7466,11 @@ type DescribeAccountLimitsOutput struct { _ struct{} `type:"structure"` // The maximum number of groups allowed for your AWS account. The default limit - // is 200 per region. + // is 200 per AWS Region. MaxNumberOfAutoScalingGroups *int64 `type:"integer"` // The maximum number of launch configurations allowed for your AWS account. - // The default limit is 200 per region. + // The default limit is 200 per AWS Region. MaxNumberOfLaunchConfigurations *int64 `type:"integer"` // The current number of groups for your AWS account. @@ -8990,37 +9071,56 @@ func (s DisableMetricsCollectionOutput) GoString() string { type Ebs struct { _ struct{} `type:"structure"` - // Indicates whether the volume is deleted on instance termination. The default - // value is true. + // Indicates whether the volume is deleted on instance termination. For Amazon + // EC2 Auto Scaling, the default value is true. DeleteOnTermination *bool `type:"boolean"` - // Specifies whether the volume should be encrypted. Encrypted EBS volumes must - // be attached to instances that support Amazon EBS encryption. Volumes that - // are created from encrypted snapshots are automatically encrypted. There is - // no way to create an encrypted volume from an unencrypted snapshot or an unencrypted - // volume from an encrypted snapshot. If your AMI uses encrypted volumes, you - // can only launch it on supported instance types. For more information, see - // Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) - // in the Amazon EC2 User Guide for Linux Instances. + // Specifies whether the volume should be encrypted. Encrypted EBS volumes can + // only be attached to instances that support Amazon EBS encryption. For more + // information, see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + // If your AMI uses encrypted volumes, you can also only launch it on supported + // instance types. + // + // If you are creating a volume from a snapshot, you cannot specify an encryption + // value. Volumes that are created from encrypted snapshots are automatically + // encrypted, and volumes that are created from unencrypted snapshots are automatically + // unencrypted. By default, encrypted snapshots use the AWS managed CMK that + // is used for EBS encryption, but you can specify a custom CMK when you create + // the snapshot. The ability to encrypt a snapshot during copying also allows + // you to apply a new CMK to an already-encrypted snapshot. Volumes restored + // from the resulting copy are only accessible using the new CMK. + // + // Enabling encryption by default (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) + // results in all EBS volumes being encrypted with the AWS managed CMK or a + // customer managed CMK, whether or not the snapshot was encrypted. + // + // For more information, see Using Encryption with EBS-Backed AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html) + // in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy + // for Use with Encrypted Volumes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/key-policy-requirements-EBS-encryption.html) + // in the Amazon EC2 Auto Scaling User Guide. Encrypted *bool `type:"boolean"` // The number of I/O operations per second (IOPS) to provision for the volume. - // For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // The maximum ratio of IOPS to volume size (in GiB) is 50:1. For more information, + // see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon EC2 User Guide for Linux Instances. // // Conditional: This parameter is required when the volume type is io1. (Not // used with standard, gp2, st1, or sc1 volumes.) Iops *int64 `min:"100" type:"integer"` - // The ID of the snapshot. This parameter is optional if you specify a volume - // size. + // The snapshot ID of the volume to use. + // + // Conditional: This parameter is optional if you specify a volume size. If + // you specify both SnapshotId and VolumeSize, VolumeSize must be equal or greater + // than the size of the snapshot. SnapshotId *string `min:"1" type:"string"` - // The volume size, in GiB. + // The volume size, in Gibibytes (GiB). // - // Constraints: 1-1,024 for standard, 4-16,384 for io1, 1-16,384 for gp2, and - // 500-16,384 for st1 and sc1. If you specify a snapshot, the volume size must - // be equal to or larger than the snapshot size. + // This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384 + // for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume + // size must be equal to or larger than the snapshot size. // // Default: If you create a volume from a snapshot and you don't specify a volume // size, the default is the snapshot size. @@ -9356,7 +9456,7 @@ type ExecutePolicyInput struct { // Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to // complete before executing the policy. // - // This parameter is not supported if the policy type is StepScaling. + // This parameter is not supported if the policy type is StepScaling or TargetTrackingScaling. // // For more information, see Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. @@ -10093,7 +10193,8 @@ func (s *InstanceMonitoring) SetEnabled(v bool) *InstanceMonitoring { // // The instances distribution specifies the distribution of On-Demand Instances // and Spot Instances, the maximum price to pay for Spot Instances, and how -// the Auto Scaling group allocates instance types. +// the Auto Scaling group allocates instance types to fulfill On-Demand and +// Spot capacity. type InstancesDistribution struct { _ struct{} `type:"structure"` @@ -10118,25 +10219,32 @@ type InstancesDistribution struct { OnDemandBaseCapacity *int64 `type:"integer"` // Controls the percentages of On-Demand Instances and Spot Instances for your - // additional capacity beyond OnDemandBaseCapacity. + // additional capacity beyond OnDemandBaseCapacity. The range is 0–100. // - // The range is 0–100. The default value is 100. If you leave this parameter - // set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot - // Instances. + // The default value is 100. If you leave this parameter set to 100, the percentages + // are 100% for On-Demand Instances and 0% for Spot Instances. OnDemandPercentageAboveBaseCapacity *int64 `type:"integer"` - // Indicates how to allocate Spot capacity across Spot pools. + // Indicates how to allocate instances across Spot Instance pools. + // + // If the allocation strategy is lowest-price, the Auto Scaling group launches + // instances using the Spot pools with the lowest price, and evenly allocates + // your instances across the number of Spot pools that you specify. If the allocation + // strategy is capacity-optimized, the Auto Scaling group launches instances + // using Spot pools that are optimally chosen based on the available Spot capacity. + // + // The default Spot allocation strategy for calls that you make through the + // API, the AWS CLI, or the AWS SDKs is lowest-price. The default Spot allocation + // strategy for the AWS Management Console is capacity-optimized. // - // The only valid value is lowest-price, which is also the default value. The - // Auto Scaling group selects the cheapest Spot pools and evenly allocates your - // Spot capacity across the number of Spot pools that you specify. + // Valid values: lowest-price | capacity-optimized SpotAllocationStrategy *string `type:"string"` - // The number of Spot pools to use to allocate your Spot capacity. The Spot - // pools are determined from the different instance types in the Overrides array - // of LaunchTemplate. + // The number of Spot Instance pools across which to allocate your Spot Instances. + // The Spot pools are determined from the different instance types in the Overrides + // array of LaunchTemplate. The range is 1–20. The default value is 2. // - // The range is 1–20 and the default is 2. + // Valid only when the Spot allocation strategy is lowest-price. SpotInstancePools *int64 `type:"integer"` // The maximum price per unit hour that you are willing to pay for a Spot Instance. @@ -10198,14 +10306,21 @@ func (s *InstancesDistribution) SetSpotMaxPrice(v string) *InstancesDistribution type LaunchConfiguration struct { _ struct{} `type:"structure"` - // [EC2-VPC] Indicates whether to assign a public IP address to each instance. + // For Auto Scaling groups that are running in a VPC, specifies whether to assign + // a public IP address to the group's instances. + // + // For more information, see Launching Auto Scaling Instances in a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) + // in the Amazon EC2 Auto Scaling User Guide. AssociatePublicIpAddress *bool `type:"boolean"` // A block device mapping, which specifies the block devices for the instance. + // + // For more information, see Block Device Mapping (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) + // in the Amazon EC2 User Guide for Linux Instances. BlockDeviceMappings []*BlockDeviceMapping `type:"list"` // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. - // This parameter can only be used if you are launching EC2-Classic instances. + // // For more information, see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic // Instances to a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-ClassicLink) @@ -10213,13 +10328,11 @@ type LaunchConfiguration struct { ClassicLinkVPCId *string `min:"1" type:"string"` // The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. + // // For more information, see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic // Instances to a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-ClassicLink) // in the Amazon EC2 Auto Scaling User Guide. - // - // Conditional: This parameter is required if you specify a ClassicLink-enabled - // VPC, and cannot be used otherwise. ClassicLinkVPCSecurityGroups []*string `type:"list"` // The creation date and time for the launch configuration. @@ -10227,24 +10340,44 @@ type LaunchConfiguration struct { // CreatedTime is a required field CreatedTime *time.Time `type:"timestamp" required:"true"` - // Controls whether the instance is optimized for EBS I/O (true) or not (false). + // Specifies whether the launch configuration is optimized for EBS I/O (true) + // or not (false). + // + // For more information, see Amazon EBS-Optimized Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) + // in the Amazon EC2 User Guide for Linux Instances. EbsOptimized *bool `type:"boolean"` - // The name or Amazon Resource Name (ARN) of the instance profile associated - // with the IAM role for the instance. + // The name or the Amazon Resource Name (ARN) of the instance profile associated + // with the IAM role for the instance. The instance profile contains the IAM + // role. + // + // For more information, see IAM Role for Applications That Run on Amazon EC2 + // Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/us-iam-role.html) + // in the Amazon EC2 Auto Scaling User Guide. IamInstanceProfile *string `min:"1" type:"string"` - // The ID of the Amazon Machine Image (AMI). + // The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. + // + // For more information, see Finding an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) + // in the Amazon EC2 User Guide for Linux Instances. // // ImageId is a required field ImageId *string `min:"1" type:"string" required:"true"` // Controls whether instances in this group are launched with detailed (true) // or basic (false) monitoring. + // + // For more information, see Configure Monitoring for Auto Scaling Instances + // (https://docs.aws.amazon.com/autoscaling/latest/userguide/as-instance-monitoring.html#enable-as-instance-metrics) + // in the Amazon EC2 Auto Scaling User Guide. InstanceMonitoring *InstanceMonitoring `type:"structure"` // The instance type for the instances. // + // For information about available instance types, see Available Instance Types + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) + // in the Amazon EC2 User Guide for Linux Instances. + // // InstanceType is a required field InstanceType *string `min:"1" type:"string" required:"true"` @@ -10252,6 +10385,9 @@ type LaunchConfiguration struct { KernelId *string `min:"1" type:"string"` // The name of the key pair. + // + // For more information, see Amazon EC2 Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) + // in the Amazon EC2 User Guide for Linux Instances. KeyName *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the launch configuration. @@ -10263,20 +10399,36 @@ type LaunchConfiguration struct { LaunchConfigurationName *string `min:"1" type:"string" required:"true"` // The tenancy of the instance, either default or dedicated. An instance with - // dedicated tenancy runs in an isolated, single-tenant hardware and can only - // be launched into a VPC. + // dedicated tenancy runs on isolated, single-tenant hardware and can only be + // launched into a VPC. + // + // For more information, see Instance Placement Tenancy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-vpc-tenancy) + // in the Amazon EC2 Auto Scaling User Guide. PlacementTenancy *string `min:"1" type:"string"` // The ID of the RAM disk associated with the AMI. RamdiskId *string `min:"1" type:"string"` - // The security groups to associate with the instances. + // A list that contains the security groups to assign to the instances in the + // Auto Scaling group. + // + // For more information, see Security Groups for Your VPC (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) + // in the Amazon Virtual Private Cloud User Guide. SecurityGroups []*string `type:"list"` - // The price to bid when launching Spot Instances. + // The maximum hourly price to be paid for any Spot Instance launched to fulfill + // the request. Spot Instances are launched when the price you specify exceeds + // the current Spot market price. + // + // For more information, see Launching Spot Instances in Your Auto Scaling Group + // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html) + // in the Amazon EC2 Auto Scaling User Guide. SpotPrice *string `min:"1" type:"string"` - // The user data available to the instances. + // The Base64-encoded user data to make available to the launched EC2 instances. + // + // For more information, see Instance Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // in the Amazon EC2 User Guide for Linux Instances. UserData *string `type:"string"` } @@ -10417,9 +10569,8 @@ type LaunchTemplate struct { LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"` // Any parameters that you specify override the same parameters in the launch - // template. Currently, the only supported override is instance type. - // - // You must specify between 2 and 20 overrides. + // template. Currently, the only supported override is instance type. You must + // specify between 2 and 20 overrides. Overrides []*LaunchTemplateOverrides `type:"list"` } @@ -10584,8 +10735,8 @@ func (s *LaunchTemplateSpecification) SetVersion(v string) *LaunchTemplateSpecif } // Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you -// want to perform an action whenever it launches instances or whenever it terminates -// instances. Used in response to DescribeLifecycleHooks. +// want to perform an action whenever it launches instances or terminates instances. +// Used in response to DescribeLifecycleHooks. type LifecycleHook struct { _ struct{} `type:"structure"` @@ -10717,7 +10868,7 @@ func (s *LifecycleHook) SetRoleARN(v string) *LifecycleHook { // launch or terminate. // // If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state using using RecordLifecycleActionHeartbeat. +// instance in a pending state using RecordLifecycleActionHeartbeat. // // If you finish before the timeout period ends, complete the lifecycle action // using CompleteLifecycleAction. @@ -11085,27 +11236,27 @@ func (s *MetricGranularityType) SetGranularity(v string) *MetricGranularityType // Describes a mixed instances policy for an Auto Scaling group. With mixed // instances, your Auto Scaling group can provision a combination of On-Demand -// Instances and Spot Instances across multiple instance types. Used in combination -// with CreateAutoScalingGroup. For more information, see Auto Scaling Groups -// with Multiple Instance Types and Purchase Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html) +// Instances and Spot Instances across multiple instance types. For more information, +// see Auto Scaling Groups with Multiple Instance Types and Purchase Options +// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html) // in the Amazon EC2 Auto Scaling User Guide. // -// When you create your Auto Scaling group, you can specify a launch configuration -// or template as a parameter for the top-level object, or you can specify a -// mixed instances policy, but not both at the same time. +// You can create a mixed instances policy for a new Auto Scaling group, or +// you can create it for an existing group by updating the group to specify +// MixedInstancesPolicy as the top-level parameter instead of a launch configuration +// or template. For more information, see CreateAutoScalingGroup and UpdateAutoScalingGroup. type MixedInstancesPolicy struct { _ struct{} `type:"structure"` // The instances distribution to use. // - // If you leave this parameter unspecified when creating the group, the default - // values are used. + // If you leave this parameter unspecified when creating a mixed instances policy, + // the default values are used. InstancesDistribution *InstancesDistribution `type:"structure"` - // The launch template and overrides. + // The launch template and instance types (overrides). // - // This parameter is required when creating an Auto Scaling group with a mixed - // instances policy, but is not required when updating the group. + // This parameter must be specified when creating a mixed instances policy. LaunchTemplate *LaunchTemplate `type:"structure"` } @@ -11222,14 +11373,14 @@ type PredefinedMetricSpecification struct { // interfaces by the Auto Scaling group. // // * ALBRequestCountPerTarget - Number of requests completed per target in - // an Application Load Balancer or a Network Load Balancer target group. + // an Application Load Balancer target group. // // For predefined metric types ASGAverageCPUUtilization, ASGAverageNetworkIn, // and ASGAverageNetworkOut, the parameter must not be specified as the resource // associated with the metric type is the Auto Scaling group. For predefined // metric type ALBRequestCountPerTarget, the parameter must be specified in - // the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id, - // where app/load-balancer-name/load-balancer-id is the final portion of the + // the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id + // , where app/load-balancer-name/load-balancer-id is the final portion of the // load balancer ARN, and targetgroup/target-group-name/target-group-id is the // final portion of the target group ARN. The target group must be attached // to the Auto Scaling group. @@ -11576,12 +11727,12 @@ func (s PutNotificationConfigurationOutput) GoString() string { type PutScalingPolicyInput struct { _ struct{} `type:"structure"` - // The adjustment type. The valid values are ChangeInCapacity, ExactCapacity, - // and PercentChangeInCapacity. - // - // This parameter is supported if the policy type is SimpleScaling or StepScaling. + // Specifies whether the ScalingAdjustment parameter is an absolute number or + // a percentage of the current capacity. The valid values are ChangeInCapacity, + // ExactCapacity, and PercentChangeInCapacity. // - // For more information, see Dynamic Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scale-based-on-demand.html) + // Valid only if the policy type is StepScaling or SimpleScaling. For more information, + // see Scaling Adjustment Types (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-adjustment) // in the Amazon EC2 Auto Scaling User Guide. AdjustmentType *string `min:"1" type:"string"` @@ -11590,13 +11741,12 @@ type PutScalingPolicyInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The amount of time, in seconds, after a scaling activity completes and before - // the next scaling activity can start. If this parameter is not specified, - // the default cooldown period for the group applies. - // - // This parameter is supported if the policy type is SimpleScaling. + // The amount of time, in seconds, after a scaling activity completes before + // any further dynamic scaling activities can start. If this parameter is not + // specified, the default cooldown period for the group applies. // - // For more information, see Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // Valid only if the policy type is SimpleScaling. For more information, see + // Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. Cooldown *int64 `type:"integer"` @@ -11604,14 +11754,14 @@ type PutScalingPolicyInput struct { // to the CloudWatch metrics. The default is to use the value specified for // the default cooldown period for the group. // - // This parameter is supported if the policy type is StepScaling or TargetTrackingScaling. + // Valid only if the policy type is StepScaling or TargetTrackingScaling. EstimatedInstanceWarmup *int64 `type:"integer"` // The aggregation type for the CloudWatch metrics. The valid values are Minimum, // Maximum, and Average. If the aggregation type is null, the value is treated // as Average. // - // This parameter is supported if the policy type is StepScaling. + // Valid only if the policy type is StepScaling. MetricAggregationType *string `min:"1" type:"string"` // The minimum number of instances to scale. If the value of AdjustmentType @@ -11619,7 +11769,14 @@ type PutScalingPolicyInput struct { // of the Auto Scaling group by at least this many instances. Otherwise, the // error is ValidationError. // - // This parameter is supported if the policy type is SimpleScaling or StepScaling. + // This property replaces the MinAdjustmentStep property. For example, suppose + // that you create a step scaling policy to scale out an Auto Scaling group + // by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the group + // has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. + // However, because you specified a MinAdjustmentMagnitude of 2, Amazon EC2 + // Auto Scaling scales out the group by 2 instances. + // + // Valid only if the policy type is SimpleScaling or StepScaling. MinAdjustmentMagnitude *int64 `type:"integer"` // Available for backward compatibility. Use MinAdjustmentMagnitude instead. @@ -11634,26 +11791,32 @@ type PutScalingPolicyInput struct { // If the policy type is null, the value is treated as SimpleScaling. PolicyType *string `min:"1" type:"string"` - // The amount by which to scale, based on the specified adjustment type. A positive - // value adds to the current capacity while a negative number removes from the - // current capacity. + // The amount by which a simple scaling policy scales the Auto Scaling group + // in response to an alarm breach. The adjustment is based on the value that + // you specified in the AdjustmentType parameter (either an absolute number + // or a percentage). A positive value adds to the current capacity and a negative + // value subtracts from the current capacity. For exact capacity, you must specify + // a positive value. // - // Conditional: This parameter is required if the policy type is SimpleScaling - // and not supported otherwise. + // Conditional: If you specify SimpleScaling for the policy type, you must specify + // this parameter. (Not used with any other policy type.) ScalingAdjustment *int64 `type:"integer"` // A set of adjustments that enable you to scale based on the size of the alarm // breach. // - // Conditional: This parameter is required if the policy type is StepScaling - // and not supported otherwise. + // Conditional: If you specify StepScaling for the policy type, you must specify + // this parameter. (Not used with any other policy type.) StepAdjustments []*StepAdjustment `type:"list"` // A target tracking scaling policy. Includes support for predefined or customized // metrics. // - // Conditional: This parameter is required if the policy type is TargetTrackingScaling - // and not supported otherwise. + // For more information, see TargetTrackingConfiguration (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_TargetTrackingConfiguration.html) + // in the Amazon EC2 Auto Scaling API Reference. + // + // Conditional: If you specify TargetTrackingScaling for the policy type, you + // must specify this parameter. (Not used with any other policy type.) TargetTrackingConfiguration *TargetTrackingConfiguration `type:"structure"` } @@ -11826,17 +11989,17 @@ type PutScheduledUpdateGroupActionInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The number of EC2 instances that should be running in the group. + // The number of EC2 instances that should be running in the Auto Scaling group. DesiredCapacity *int64 `type:"integer"` - // The time for the recurring schedule to end. Amazon EC2 Auto Scaling does - // not perform the action after this time. + // The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling + // does not perform the action after this time. EndTime *time.Time `type:"timestamp"` - // The maximum size for the Auto Scaling group. + // The maximum number of instances in the Auto Scaling group. MaxSize *int64 `type:"integer"` - // The minimum size for the Auto Scaling group. + // The minimum number of instances in the Auto Scaling group. MinSize *int64 `type:"integer"` // The recurring schedule for this action, in Unix cron syntax format. This @@ -11844,6 +12007,9 @@ type PutScheduledUpdateGroupActionInput struct { // [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes // (for example, "30 0 1 1,6,12 *"). For more information about this format, // see Crontab (http://crontab.org). + // + // When StartTime and EndTime are specified with Recurrence, they form the boundaries + // of when the recurring action starts and stops. Recurrence *string `min:"1" type:"string"` // The name of this scaling action. @@ -11851,8 +12017,8 @@ type PutScheduledUpdateGroupActionInput struct { // ScheduledActionName is a required field ScheduledActionName *string `min:"1" type:"string" required:"true"` - // The time for this action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT - // only and in quotes (for example, "2019-06-01T00:00:00Z"). + // The date and time for this action to start, in YYYY-MM-DDThh:mm:ssZ format + // in UTC/GMT only and in quotes (for example, "2019-06-01T00:00:00Z"). // // If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs // the action at this time, and then performs the action based on the specified @@ -11862,7 +12028,7 @@ type PutScheduledUpdateGroupActionInput struct { // an error message. StartTime *time.Time `type:"timestamp"` - // This parameter is deprecated. + // This parameter is no longer used. Time *time.Time `type:"timestamp"` } @@ -12122,7 +12288,7 @@ type ScalingPolicy struct { // The name of the scaling policy. PolicyName *string `min:"1" type:"string"` - // The policy type. The valid values are SimpleScaling and StepScaling. + // The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling. PolicyType *string `min:"1" type:"string"` // The amount by which to scale, based on the specified adjustment type. A positive @@ -12309,16 +12475,20 @@ type ScheduledUpdateGroupAction struct { // The number of instances you prefer to maintain in the group. DesiredCapacity *int64 `type:"integer"` - // The date and time that the action is scheduled to end. + // The date and time in UTC for the recurring schedule to end. For example, + // "2019-06-01T00:00:00Z". EndTime *time.Time `type:"timestamp"` - // The maximum size of the group. + // The maximum number of instances in the Auto Scaling group. MaxSize *int64 `type:"integer"` - // The minimum size of the group. + // The minimum number of instances in the Auto Scaling group. MinSize *int64 `type:"integer"` - // The recurring schedule for the action. + // The recurring schedule for the action, in Unix cron syntax format. + // + // When StartTime and EndTime are specified with Recurrence, they form the boundaries + // of when the recurring action starts and stops. Recurrence *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the scheduled action. @@ -12327,13 +12497,10 @@ type ScheduledUpdateGroupAction struct { // The name of the scheduled action. ScheduledActionName *string `min:"1" type:"string"` - // The date and time that the action is scheduled to begin. - // - // When StartTime and EndTime are specified with Recurrence, they form the boundaries - // of when the recurring action starts and stops. + // The date and time in UTC for this action to start. For example, "2019-06-01T00:00:00Z". StartTime *time.Time `type:"timestamp"` - // This parameter is deprecated. + // This parameter is no longer used. Time *time.Time `type:"timestamp"` } @@ -12418,20 +12585,23 @@ type ScheduledUpdateGroupActionRequest struct { // The number of EC2 instances that should be running in the group. DesiredCapacity *int64 `type:"integer"` - // The time for the recurring schedule to end. Amazon EC2 Auto Scaling does - // not perform the action after this time. + // The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling + // does not perform the action after this time. EndTime *time.Time `type:"timestamp"` - // The maximum size of the group. + // The maximum number of instances in the Auto Scaling group. MaxSize *int64 `type:"integer"` - // The minimum size of the group. + // The minimum number of instances in the Auto Scaling group. MinSize *int64 `type:"integer"` // The recurring schedule for the action, in Unix cron syntax format. This format // consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] // [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, // "30 0 1 1,6,12 *"). For more information about this format, see Crontab (http://crontab.org). + // + // When StartTime and EndTime are specified with Recurrence, they form the boundaries + // of when the recurring action starts and stops. Recurrence *string `min:"1" type:"string"` // The name of the scaling action. @@ -12439,8 +12609,8 @@ type ScheduledUpdateGroupActionRequest struct { // ScheduledActionName is a required field ScheduledActionName *string `min:"1" type:"string" required:"true"` - // The time for the action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT - // only and in quotes (for example, "2019-06-01T00:00:00Z"). + // The date and time for the action to start, in YYYY-MM-DDThh:mm:ssZ format + // in UTC/GMT only and in quotes (for example, "2019-06-01T00:00:00Z"). // // If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs // the action at this time, and then performs the action based on the specified @@ -13065,17 +13235,17 @@ func (s *TagDescription) SetValue(v string) *TagDescription { type TargetTrackingConfiguration struct { _ struct{} `type:"structure"` - // A customized metric. You can specify either a predefined metric or a customized + // A customized metric. You must specify either a predefined metric or a customized // metric. CustomizedMetricSpecification *CustomizedMetricSpecification `type:"structure"` // Indicates whether scaling in by the target tracking scaling policy is disabled. // If scaling in is disabled, the target tracking scaling policy doesn't remove // instances from the Auto Scaling group. Otherwise, the target tracking scaling - // policy can remove instances from the Auto Scaling group. The default is disabled. + // policy can remove instances from the Auto Scaling group. The default is false. DisableScaleIn *bool `type:"boolean"` - // A predefined metric. You can specify either a predefined metric or a customized + // A predefined metric. You must specify either a predefined metric or a customized // metric. PredefinedMetricSpecification *PredefinedMetricSpecification `type:"structure"` @@ -13233,9 +13403,12 @@ type UpdateAutoScalingGroupInput struct { AvailabilityZones []*string `min:"1" type:"list"` // The amount of time, in seconds, after a scaling activity completes before - // another scaling activity can start. The default value is 300. + // another scaling activity can start. The default value is 300. This cooldown + // period is not used when a scaling-specific cooldown is specified. // - // For more information, see Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // Cooldown periods are not supported for target tracking scaling policies, + // step scaling policies, or scheduled scaling. For more information, see Scaling + // Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. DefaultCooldown *int64 `type:"integer"` @@ -13248,7 +13421,7 @@ type UpdateAutoScalingGroupInput struct { // checking the health status of an EC2 instance that has come into service. // The default value is 0. // - // For more information, see Health Checks for Auto Scaling Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) + // For more information, see Health Check Grace Period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period) // in the Amazon EC2 Auto Scaling User Guide. // // Conditional: This parameter is required if you are adding an ELB health check. @@ -13260,13 +13433,21 @@ type UpdateAutoScalingGroupInput struct { // balancer health checks. HealthCheckType *string `min:"1" type:"string"` - // The name of the launch configuration. If you specify this parameter, you - // can't specify a launch template or a mixed instances policy. + // The name of the launch configuration. If you specify LaunchConfigurationName + // in your update request, you can't specify LaunchTemplate or MixedInstancesPolicy. + // + // To update an Auto Scaling group with a launch configuration with InstanceMonitoring + // set to false, you must first disable the collection of group metrics. Otherwise, + // you get an error. If you have previously enabled the collection of group + // metrics, you can disable it using DisableMetricsCollection. LaunchConfigurationName *string `min:"1" type:"string"` // The launch template and version to use to specify the updates. If you specify - // this parameter, you can't specify a launch configuration or a mixed instances - // policy. + // LaunchTemplate in your update request, you can't specify LaunchConfigurationName + // or MixedInstancesPolicy. + // + // For more information, see LaunchTemplateSpecification (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_LaunchTemplateSpecification.html) + // in the Amazon EC2 Auto Scaling API Reference. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // The maximum size of the Auto Scaling group. @@ -13275,11 +13456,14 @@ type UpdateAutoScalingGroupInput struct { // The minimum size of the Auto Scaling group. MinSize *int64 `type:"integer"` - // The mixed instances policy to use to specify the updates. If you specify - // this parameter, you can't specify a launch configuration or a launch template. + // An embedded object that specifies a mixed instances policy. + // + // In your call to UpdateAutoScalingGroup, you can make changes to the policy + // that is specified. All optional parameters are left unchanged if not specified. // - // For more information, see Auto Scaling Groups with Multiple Instance Types - // and Purchase Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html) + // For more information, see MixedInstancesPolicy (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_MixedInstancesPolicy.html) + // in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with + // Multiple Instance Types and Purchase Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-purchase-options.html) // in the Amazon EC2 Auto Scaling User Guide. MixedInstancesPolicy *MixedInstancesPolicy `type:"structure"` @@ -13313,7 +13497,7 @@ type UpdateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling User Guide. TerminationPolicies []*string `type:"list"` - // A comma-separated list of subnet IDs, if you are launching into a VPC. + // A comma-separated list of subnet IDs for virtual private cloud (VPC). // // If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that // you specify for this parameter must reside in those Availability Zones. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go index 96cf63083bc..0a2fe8c30f3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go @@ -4,9 +4,9 @@ // requests to Auto Scaling. // // Amazon EC2 Auto Scaling is designed to automatically launch or terminate -// EC2 instances based on user-defined policies, schedules, and health checks. -// Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load -// Balancing. +// EC2 instances based on user-defined scaling policies, scheduled actions, +// and health checks. Use this service with AWS Auto Scaling, Amazon CloudWatch, +// and Elastic Load Balancing. // // For more information, including information about granting IAM users required // permissions for Amazon EC2 Auto Scaling actions, see the Amazon EC2 Auto diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go index e1da9fd7546..4ce49ffd27d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go @@ -46,11 +46,11 @@ const ( // svc := autoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *AutoScaling { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AutoScaling { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AutoScaling { svc := &AutoScaling{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-01-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/api.go new file mode 100644 index 00000000000..20a6adb1272 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/api.go @@ -0,0 +1,2793 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package autoscalingplans + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCreateScalingPlan = "CreateScalingPlan" + +// CreateScalingPlanRequest generates a "aws/request.Request" representing the +// client's request for the CreateScalingPlan operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateScalingPlan for more information on using the CreateScalingPlan +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateScalingPlanRequest method. +// req, resp := client.CreateScalingPlanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/CreateScalingPlan +func (c *AutoScalingPlans) CreateScalingPlanRequest(input *CreateScalingPlanInput) (req *request.Request, output *CreateScalingPlanOutput) { + op := &request.Operation{ + Name: opCreateScalingPlan, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateScalingPlanInput{} + } + + output = &CreateScalingPlanOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateScalingPlan API operation for AWS Auto Scaling Plans. +// +// Creates a scaling plan. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Auto Scaling Plans's +// API operation CreateScalingPlan for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// An exception was thrown for a validation issue. Review the parameters provided. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Your account exceeded a limit. This exception is thrown when a per-account +// resource limit is exceeded. +// +// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" +// Concurrent updates caused an exception, for example, if you request an update +// to a scaling plan that already has a pending update. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an internal error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/CreateScalingPlan +func (c *AutoScalingPlans) CreateScalingPlan(input *CreateScalingPlanInput) (*CreateScalingPlanOutput, error) { + req, out := c.CreateScalingPlanRequest(input) + return out, req.Send() +} + +// CreateScalingPlanWithContext is the same as CreateScalingPlan with the addition of +// the ability to pass a context and additional request options. +// +// See CreateScalingPlan for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScalingPlans) CreateScalingPlanWithContext(ctx aws.Context, input *CreateScalingPlanInput, opts ...request.Option) (*CreateScalingPlanOutput, error) { + req, out := c.CreateScalingPlanRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteScalingPlan = "DeleteScalingPlan" + +// DeleteScalingPlanRequest generates a "aws/request.Request" representing the +// client's request for the DeleteScalingPlan operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteScalingPlan for more information on using the DeleteScalingPlan +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteScalingPlanRequest method. +// req, resp := client.DeleteScalingPlanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/DeleteScalingPlan +func (c *AutoScalingPlans) DeleteScalingPlanRequest(input *DeleteScalingPlanInput) (req *request.Request, output *DeleteScalingPlanOutput) { + op := &request.Operation{ + Name: opDeleteScalingPlan, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteScalingPlanInput{} + } + + output = &DeleteScalingPlanOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteScalingPlan API operation for AWS Auto Scaling Plans. +// +// Deletes the specified scaling plan. +// +// Deleting a scaling plan deletes the underlying ScalingInstruction for all +// of the scalable resources that are covered by the plan. +// +// If the plan has launched resources or has scaling activities in progress, +// you must delete those resources separately. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Auto Scaling Plans's +// API operation DeleteScalingPlan for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// An exception was thrown for a validation issue. Review the parameters provided. +// +// * ErrCodeObjectNotFoundException "ObjectNotFoundException" +// The specified object could not be found. +// +// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" +// Concurrent updates caused an exception, for example, if you request an update +// to a scaling plan that already has a pending update. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an internal error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/DeleteScalingPlan +func (c *AutoScalingPlans) DeleteScalingPlan(input *DeleteScalingPlanInput) (*DeleteScalingPlanOutput, error) { + req, out := c.DeleteScalingPlanRequest(input) + return out, req.Send() +} + +// DeleteScalingPlanWithContext is the same as DeleteScalingPlan with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteScalingPlan for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScalingPlans) DeleteScalingPlanWithContext(ctx aws.Context, input *DeleteScalingPlanInput, opts ...request.Option) (*DeleteScalingPlanOutput, error) { + req, out := c.DeleteScalingPlanRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeScalingPlanResources = "DescribeScalingPlanResources" + +// DescribeScalingPlanResourcesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingPlanResources operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeScalingPlanResources for more information on using the DescribeScalingPlanResources +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeScalingPlanResourcesRequest method. +// req, resp := client.DescribeScalingPlanResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/DescribeScalingPlanResources +func (c *AutoScalingPlans) DescribeScalingPlanResourcesRequest(input *DescribeScalingPlanResourcesInput) (req *request.Request, output *DescribeScalingPlanResourcesOutput) { + op := &request.Operation{ + Name: opDescribeScalingPlanResources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScalingPlanResourcesInput{} + } + + output = &DescribeScalingPlanResourcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeScalingPlanResources API operation for AWS Auto Scaling Plans. +// +// Describes the scalable resources in the specified scaling plan. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Auto Scaling Plans's +// API operation DescribeScalingPlanResources for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// An exception was thrown for a validation issue. Review the parameters provided. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token provided is not valid. +// +// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" +// Concurrent updates caused an exception, for example, if you request an update +// to a scaling plan that already has a pending update. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an internal error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/DescribeScalingPlanResources +func (c *AutoScalingPlans) DescribeScalingPlanResources(input *DescribeScalingPlanResourcesInput) (*DescribeScalingPlanResourcesOutput, error) { + req, out := c.DescribeScalingPlanResourcesRequest(input) + return out, req.Send() +} + +// DescribeScalingPlanResourcesWithContext is the same as DescribeScalingPlanResources with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScalingPlanResources for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScalingPlans) DescribeScalingPlanResourcesWithContext(ctx aws.Context, input *DescribeScalingPlanResourcesInput, opts ...request.Option) (*DescribeScalingPlanResourcesOutput, error) { + req, out := c.DescribeScalingPlanResourcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeScalingPlans = "DescribeScalingPlans" + +// DescribeScalingPlansRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingPlans operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeScalingPlans for more information on using the DescribeScalingPlans +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeScalingPlansRequest method. +// req, resp := client.DescribeScalingPlansRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/DescribeScalingPlans +func (c *AutoScalingPlans) DescribeScalingPlansRequest(input *DescribeScalingPlansInput) (req *request.Request, output *DescribeScalingPlansOutput) { + op := &request.Operation{ + Name: opDescribeScalingPlans, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScalingPlansInput{} + } + + output = &DescribeScalingPlansOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeScalingPlans API operation for AWS Auto Scaling Plans. +// +// Describes one or more of your scaling plans. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Auto Scaling Plans's +// API operation DescribeScalingPlans for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// An exception was thrown for a validation issue. Review the parameters provided. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token provided is not valid. +// +// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" +// Concurrent updates caused an exception, for example, if you request an update +// to a scaling plan that already has a pending update. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an internal error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/DescribeScalingPlans +func (c *AutoScalingPlans) DescribeScalingPlans(input *DescribeScalingPlansInput) (*DescribeScalingPlansOutput, error) { + req, out := c.DescribeScalingPlansRequest(input) + return out, req.Send() +} + +// DescribeScalingPlansWithContext is the same as DescribeScalingPlans with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScalingPlans for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScalingPlans) DescribeScalingPlansWithContext(ctx aws.Context, input *DescribeScalingPlansInput, opts ...request.Option) (*DescribeScalingPlansOutput, error) { + req, out := c.DescribeScalingPlansRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetScalingPlanResourceForecastData = "GetScalingPlanResourceForecastData" + +// GetScalingPlanResourceForecastDataRequest generates a "aws/request.Request" representing the +// client's request for the GetScalingPlanResourceForecastData operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetScalingPlanResourceForecastData for more information on using the GetScalingPlanResourceForecastData +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetScalingPlanResourceForecastDataRequest method. +// req, resp := client.GetScalingPlanResourceForecastDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/GetScalingPlanResourceForecastData +func (c *AutoScalingPlans) GetScalingPlanResourceForecastDataRequest(input *GetScalingPlanResourceForecastDataInput) (req *request.Request, output *GetScalingPlanResourceForecastDataOutput) { + op := &request.Operation{ + Name: opGetScalingPlanResourceForecastData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetScalingPlanResourceForecastDataInput{} + } + + output = &GetScalingPlanResourceForecastDataOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetScalingPlanResourceForecastData API operation for AWS Auto Scaling Plans. +// +// Retrieves the forecast data for a scalable resource. +// +// Capacity forecasts are represented as predicted values, or data points, that +// are calculated using historical data points from a specified CloudWatch load +// metric. Data points are available for up to 56 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Auto Scaling Plans's +// API operation GetScalingPlanResourceForecastData for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// An exception was thrown for a validation issue. Review the parameters provided. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an internal error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/GetScalingPlanResourceForecastData +func (c *AutoScalingPlans) GetScalingPlanResourceForecastData(input *GetScalingPlanResourceForecastDataInput) (*GetScalingPlanResourceForecastDataOutput, error) { + req, out := c.GetScalingPlanResourceForecastDataRequest(input) + return out, req.Send() +} + +// GetScalingPlanResourceForecastDataWithContext is the same as GetScalingPlanResourceForecastData with the addition of +// the ability to pass a context and additional request options. +// +// See GetScalingPlanResourceForecastData for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScalingPlans) GetScalingPlanResourceForecastDataWithContext(ctx aws.Context, input *GetScalingPlanResourceForecastDataInput, opts ...request.Option) (*GetScalingPlanResourceForecastDataOutput, error) { + req, out := c.GetScalingPlanResourceForecastDataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateScalingPlan = "UpdateScalingPlan" + +// UpdateScalingPlanRequest generates a "aws/request.Request" representing the +// client's request for the UpdateScalingPlan operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateScalingPlan for more information on using the UpdateScalingPlan +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateScalingPlanRequest method. +// req, resp := client.UpdateScalingPlanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/UpdateScalingPlan +func (c *AutoScalingPlans) UpdateScalingPlanRequest(input *UpdateScalingPlanInput) (req *request.Request, output *UpdateScalingPlanOutput) { + op := &request.Operation{ + Name: opUpdateScalingPlan, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateScalingPlanInput{} + } + + output = &UpdateScalingPlanOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateScalingPlan API operation for AWS Auto Scaling Plans. +// +// Updates the specified scaling plan. +// +// You cannot update a scaling plan if it is in the process of being created, +// updated, or deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Auto Scaling Plans's +// API operation UpdateScalingPlan for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// An exception was thrown for a validation issue. Review the parameters provided. +// +// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" +// Concurrent updates caused an exception, for example, if you request an update +// to a scaling plan that already has a pending update. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// The service encountered an internal error. +// +// * ErrCodeObjectNotFoundException "ObjectNotFoundException" +// The specified object could not be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06/UpdateScalingPlan +func (c *AutoScalingPlans) UpdateScalingPlan(input *UpdateScalingPlanInput) (*UpdateScalingPlanOutput, error) { + req, out := c.UpdateScalingPlanRequest(input) + return out, req.Send() +} + +// UpdateScalingPlanWithContext is the same as UpdateScalingPlan with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateScalingPlan for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScalingPlans) UpdateScalingPlanWithContext(ctx aws.Context, input *UpdateScalingPlanInput, opts ...request.Option) (*UpdateScalingPlanOutput, error) { + req, out := c.UpdateScalingPlanRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Represents an application source. +type ApplicationSource struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of a AWS CloudFormation stack. + CloudFormationStackARN *string `type:"string"` + + // A set of tags (up to 50). + TagFilters []*TagFilter `type:"list"` +} + +// String returns the string representation +func (s ApplicationSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ApplicationSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ApplicationSource"} + if s.TagFilters != nil { + for i, v := range s.TagFilters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagFilters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCloudFormationStackARN sets the CloudFormationStackARN field's value. +func (s *ApplicationSource) SetCloudFormationStackARN(v string) *ApplicationSource { + s.CloudFormationStackARN = &v + return s +} + +// SetTagFilters sets the TagFilters field's value. +func (s *ApplicationSource) SetTagFilters(v []*TagFilter) *ApplicationSource { + s.TagFilters = v + return s +} + +type CreateScalingPlanInput struct { + _ struct{} `type:"structure"` + + // A CloudFormation stack or set of tags. You can create one scaling plan per + // application source. + // + // ApplicationSource is a required field + ApplicationSource *ApplicationSource `type:"structure" required:"true"` + + // The scaling instructions. + // + // ScalingInstructions is a required field + ScalingInstructions []*ScalingInstruction `type:"list" required:"true"` + + // The name of the scaling plan. Names cannot contain vertical bars, colons, + // or forward slashes. + // + // ScalingPlanName is a required field + ScalingPlanName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateScalingPlanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateScalingPlanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateScalingPlanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateScalingPlanInput"} + if s.ApplicationSource == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationSource")) + } + if s.ScalingInstructions == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingInstructions")) + } + if s.ScalingPlanName == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingPlanName")) + } + if s.ScalingPlanName != nil && len(*s.ScalingPlanName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScalingPlanName", 1)) + } + if s.ApplicationSource != nil { + if err := s.ApplicationSource.Validate(); err != nil { + invalidParams.AddNested("ApplicationSource", err.(request.ErrInvalidParams)) + } + } + if s.ScalingInstructions != nil { + for i, v := range s.ScalingInstructions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScalingInstructions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationSource sets the ApplicationSource field's value. +func (s *CreateScalingPlanInput) SetApplicationSource(v *ApplicationSource) *CreateScalingPlanInput { + s.ApplicationSource = v + return s +} + +// SetScalingInstructions sets the ScalingInstructions field's value. +func (s *CreateScalingPlanInput) SetScalingInstructions(v []*ScalingInstruction) *CreateScalingPlanInput { + s.ScalingInstructions = v + return s +} + +// SetScalingPlanName sets the ScalingPlanName field's value. +func (s *CreateScalingPlanInput) SetScalingPlanName(v string) *CreateScalingPlanInput { + s.ScalingPlanName = &v + return s +} + +type CreateScalingPlanOutput struct { + _ struct{} `type:"structure"` + + // The version number of the scaling plan. This value is always 1. + // + // Currently, you cannot specify multiple scaling plan versions. + // + // ScalingPlanVersion is a required field + ScalingPlanVersion *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateScalingPlanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateScalingPlanOutput) GoString() string { + return s.String() +} + +// SetScalingPlanVersion sets the ScalingPlanVersion field's value. +func (s *CreateScalingPlanOutput) SetScalingPlanVersion(v int64) *CreateScalingPlanOutput { + s.ScalingPlanVersion = &v + return s +} + +// Represents a CloudWatch metric of your choosing that can be used for predictive +// scaling. +// +// For predictive scaling to work with a customized load metric specification, +// AWS Auto Scaling needs access to the Sum and Average statistics that CloudWatch +// computes from metric data. Statistics are calculations used to aggregate +// data over specified time periods. +// +// When you choose a load metric, make sure that the required Sum and Average +// statistics for your metric are available in CloudWatch and that they provide +// relevant data for predictive scaling. The Sum statistic must represent the +// total load on the resource, and the Average statistic must represent the +// average load per capacity unit of the resource. For example, there is a metric +// that counts the number of requests processed by your Auto Scaling group. +// If the Sum statistic represents the total request count processed by the +// group, then the Average statistic for the specified metric must represent +// the average request count processed by each instance of the group. +// +// For information about terminology, available metrics, or how to publish new +// metrics, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html) +// in the Amazon CloudWatch User Guide. +type CustomizedLoadMetricSpecification struct { + _ struct{} `type:"structure"` + + // The dimensions of the metric. + // + // Conditional: If you published your metric with dimensions, you must specify + // the same dimensions in your customized load metric specification. + Dimensions []*MetricDimension `type:"list"` + + // The name of the metric. + // + // MetricName is a required field + MetricName *string `type:"string" required:"true"` + + // The namespace of the metric. + // + // Namespace is a required field + Namespace *string `type:"string" required:"true"` + + // The statistic of the metric. Currently, the value must always be Sum. + // + // Statistic is a required field + Statistic *string `type:"string" required:"true" enum:"MetricStatistic"` + + // The unit of the metric. + Unit *string `type:"string"` +} + +// String returns the string representation +func (s CustomizedLoadMetricSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomizedLoadMetricSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomizedLoadMetricSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomizedLoadMetricSpecification"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Statistic == nil { + invalidParams.Add(request.NewErrParamRequired("Statistic")) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *CustomizedLoadMetricSpecification) SetDimensions(v []*MetricDimension) *CustomizedLoadMetricSpecification { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *CustomizedLoadMetricSpecification) SetMetricName(v string) *CustomizedLoadMetricSpecification { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *CustomizedLoadMetricSpecification) SetNamespace(v string) *CustomizedLoadMetricSpecification { + s.Namespace = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *CustomizedLoadMetricSpecification) SetStatistic(v string) *CustomizedLoadMetricSpecification { + s.Statistic = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *CustomizedLoadMetricSpecification) SetUnit(v string) *CustomizedLoadMetricSpecification { + s.Unit = &v + return s +} + +// Represents a CloudWatch metric of your choosing that can be used for dynamic +// scaling as part of a target tracking scaling policy. +// +// To create your customized scaling metric specification: +// +// * Add values for each required parameter from CloudWatch. You can use +// an existing metric, or a new metric that you create. To use your own metric, +// you must first publish the metric to CloudWatch. For more information, +// see Publish Custom Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) +// in the Amazon CloudWatch User Guide. +// +// * Choose a metric that changes proportionally with capacity. The value +// of the metric should increase or decrease in inverse proportion to the +// number of capacity units. That is, the value of the metric should decrease +// when capacity increases. +// +// For more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html). +type CustomizedScalingMetricSpecification struct { + _ struct{} `type:"structure"` + + // The dimensions of the metric. + // + // Conditional: If you published your metric with dimensions, you must specify + // the same dimensions in your customized scaling metric specification. + Dimensions []*MetricDimension `type:"list"` + + // The name of the metric. + // + // MetricName is a required field + MetricName *string `type:"string" required:"true"` + + // The namespace of the metric. + // + // Namespace is a required field + Namespace *string `type:"string" required:"true"` + + // The statistic of the metric. + // + // Statistic is a required field + Statistic *string `type:"string" required:"true" enum:"MetricStatistic"` + + // The unit of the metric. + Unit *string `type:"string"` +} + +// String returns the string representation +func (s CustomizedScalingMetricSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomizedScalingMetricSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomizedScalingMetricSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomizedScalingMetricSpecification"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Statistic == nil { + invalidParams.Add(request.NewErrParamRequired("Statistic")) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *CustomizedScalingMetricSpecification) SetDimensions(v []*MetricDimension) *CustomizedScalingMetricSpecification { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *CustomizedScalingMetricSpecification) SetMetricName(v string) *CustomizedScalingMetricSpecification { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *CustomizedScalingMetricSpecification) SetNamespace(v string) *CustomizedScalingMetricSpecification { + s.Namespace = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *CustomizedScalingMetricSpecification) SetStatistic(v string) *CustomizedScalingMetricSpecification { + s.Statistic = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *CustomizedScalingMetricSpecification) SetUnit(v string) *CustomizedScalingMetricSpecification { + s.Unit = &v + return s +} + +// Represents a single value in the forecast data used for predictive scaling. +type Datapoint struct { + _ struct{} `type:"structure"` + + // The time stamp for the data point in UTC format. + Timestamp *time.Time `type:"timestamp"` + + // The value of the data point. + Value *float64 `type:"double"` +} + +// String returns the string representation +func (s Datapoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Datapoint) GoString() string { + return s.String() +} + +// SetTimestamp sets the Timestamp field's value. +func (s *Datapoint) SetTimestamp(v time.Time) *Datapoint { + s.Timestamp = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Datapoint) SetValue(v float64) *Datapoint { + s.Value = &v + return s +} + +type DeleteScalingPlanInput struct { + _ struct{} `type:"structure"` + + // The name of the scaling plan. + // + // ScalingPlanName is a required field + ScalingPlanName *string `min:"1" type:"string" required:"true"` + + // The version number of the scaling plan. + // + // ScalingPlanVersion is a required field + ScalingPlanVersion *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s DeleteScalingPlanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScalingPlanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteScalingPlanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteScalingPlanInput"} + if s.ScalingPlanName == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingPlanName")) + } + if s.ScalingPlanName != nil && len(*s.ScalingPlanName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScalingPlanName", 1)) + } + if s.ScalingPlanVersion == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingPlanVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetScalingPlanName sets the ScalingPlanName field's value. +func (s *DeleteScalingPlanInput) SetScalingPlanName(v string) *DeleteScalingPlanInput { + s.ScalingPlanName = &v + return s +} + +// SetScalingPlanVersion sets the ScalingPlanVersion field's value. +func (s *DeleteScalingPlanInput) SetScalingPlanVersion(v int64) *DeleteScalingPlanInput { + s.ScalingPlanVersion = &v + return s +} + +type DeleteScalingPlanOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteScalingPlanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScalingPlanOutput) GoString() string { + return s.String() +} + +type DescribeScalingPlanResourcesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of scalable resources to return. The value must be between + // 1 and 50. The default value is 50. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The name of the scaling plan. + // + // ScalingPlanName is a required field + ScalingPlanName *string `min:"1" type:"string" required:"true"` + + // The version number of the scaling plan. + // + // ScalingPlanVersion is a required field + ScalingPlanVersion *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s DescribeScalingPlanResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPlanResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingPlanResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingPlanResourcesInput"} + if s.ScalingPlanName == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingPlanName")) + } + if s.ScalingPlanName != nil && len(*s.ScalingPlanName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScalingPlanName", 1)) + } + if s.ScalingPlanVersion == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingPlanVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeScalingPlanResourcesInput) SetMaxResults(v int64) *DescribeScalingPlanResourcesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScalingPlanResourcesInput) SetNextToken(v string) *DescribeScalingPlanResourcesInput { + s.NextToken = &v + return s +} + +// SetScalingPlanName sets the ScalingPlanName field's value. +func (s *DescribeScalingPlanResourcesInput) SetScalingPlanName(v string) *DescribeScalingPlanResourcesInput { + s.ScalingPlanName = &v + return s +} + +// SetScalingPlanVersion sets the ScalingPlanVersion field's value. +func (s *DescribeScalingPlanResourcesInput) SetScalingPlanVersion(v int64) *DescribeScalingPlanResourcesInput { + s.ScalingPlanVersion = &v + return s +} + +type DescribeScalingPlanResourcesOutput struct { + _ struct{} `type:"structure"` + + // The token required to get the next set of results. This value is null if + // there are no more results to return. + NextToken *string `type:"string"` + + // Information about the scalable resources. + ScalingPlanResources []*ScalingPlanResource `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingPlanResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPlanResourcesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScalingPlanResourcesOutput) SetNextToken(v string) *DescribeScalingPlanResourcesOutput { + s.NextToken = &v + return s +} + +// SetScalingPlanResources sets the ScalingPlanResources field's value. +func (s *DescribeScalingPlanResourcesOutput) SetScalingPlanResources(v []*ScalingPlanResource) *DescribeScalingPlanResourcesOutput { + s.ScalingPlanResources = v + return s +} + +type DescribeScalingPlansInput struct { + _ struct{} `type:"structure"` + + // The sources for the applications (up to 10). If you specify scaling plan + // names, you cannot specify application sources. + ApplicationSources []*ApplicationSource `type:"list"` + + // The maximum number of scalable resources to return. This value can be between + // 1 and 50. The default value is 50. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The names of the scaling plans (up to 10). If you specify application sources, + // you cannot specify scaling plan names. + ScalingPlanNames []*string `type:"list"` + + // The version number of the scaling plan. If you specify a scaling plan version, + // you must also specify a scaling plan name. + ScalingPlanVersion *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeScalingPlansInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPlansInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingPlansInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingPlansInput"} + if s.ApplicationSources != nil { + for i, v := range s.ApplicationSources { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ApplicationSources", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationSources sets the ApplicationSources field's value. +func (s *DescribeScalingPlansInput) SetApplicationSources(v []*ApplicationSource) *DescribeScalingPlansInput { + s.ApplicationSources = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeScalingPlansInput) SetMaxResults(v int64) *DescribeScalingPlansInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScalingPlansInput) SetNextToken(v string) *DescribeScalingPlansInput { + s.NextToken = &v + return s +} + +// SetScalingPlanNames sets the ScalingPlanNames field's value. +func (s *DescribeScalingPlansInput) SetScalingPlanNames(v []*string) *DescribeScalingPlansInput { + s.ScalingPlanNames = v + return s +} + +// SetScalingPlanVersion sets the ScalingPlanVersion field's value. +func (s *DescribeScalingPlansInput) SetScalingPlanVersion(v int64) *DescribeScalingPlansInput { + s.ScalingPlanVersion = &v + return s +} + +type DescribeScalingPlansOutput struct { + _ struct{} `type:"structure"` + + // The token required to get the next set of results. This value is null if + // there are no more results to return. + NextToken *string `type:"string"` + + // Information about the scaling plans. + ScalingPlans []*ScalingPlan `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingPlansOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPlansOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScalingPlansOutput) SetNextToken(v string) *DescribeScalingPlansOutput { + s.NextToken = &v + return s +} + +// SetScalingPlans sets the ScalingPlans field's value. +func (s *DescribeScalingPlansOutput) SetScalingPlans(v []*ScalingPlan) *DescribeScalingPlansOutput { + s.ScalingPlans = v + return s +} + +type GetScalingPlanResourceForecastDataInput struct { + _ struct{} `type:"structure"` + + // The exclusive end time of the time range for the forecast data to get. The + // maximum time duration between the start and end time is seven days. + // + // Although this parameter can accept a date and time that is more than two + // days in the future, the availability of forecast data has limits. AWS Auto + // Scaling only issues forecasts for periods of two days in advance. + // + // EndTime is a required field + EndTime *time.Time `type:"timestamp" required:"true"` + + // The type of forecast data to get. + // + // * LoadForecast: The load metric forecast. + // + // * CapacityForecast: The capacity forecast. + // + // * ScheduledActionMinCapacity: The minimum capacity for each scheduled + // scaling action. This data is calculated as the larger of two values: the + // capacity forecast or the minimum capacity in the scaling instruction. + // + // * ScheduledActionMaxCapacity: The maximum capacity for each scheduled + // scaling action. The calculation used is determined by the predictive scaling + // maximum capacity behavior setting in the scaling instruction. + // + // ForecastDataType is a required field + ForecastDataType *string `type:"string" required:"true" enum:"ForecastDataType"` + + // The ID of the resource. This string consists of the resource type and unique + // identifier. + // + // * Auto Scaling group - The resource type is autoScalingGroup and the unique + // identifier is the name of the Auto Scaling group. Example: autoScalingGroup/my-asg. + // + // * ECS service - The resource type is service and the unique identifier + // is the cluster name and service name. Example: service/default/sample-webapp. + // + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // + // * DynamoDB table - The resource type is table and the unique identifier + // is the resource ID. Example: table/my-table. + // + // * DynamoDB global secondary index - The resource type is index and the + // unique identifier is the resource ID. Example: table/my-table/index/my-table-index. + // + // * Aurora DB cluster - The resource type is cluster and the unique identifier + // is the cluster name. Example: cluster:my-db-cluster. + // + // ResourceId is a required field + ResourceId *string `type:"string" required:"true"` + + // The scalable dimension for the resource. + // + // ScalableDimension is a required field + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The name of the scaling plan. + // + // ScalingPlanName is a required field + ScalingPlanName *string `min:"1" type:"string" required:"true"` + + // The version number of the scaling plan. + // + // ScalingPlanVersion is a required field + ScalingPlanVersion *int64 `type:"long" required:"true"` + + // The namespace of the AWS service. + // + // ServiceNamespace is a required field + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // The inclusive start time of the time range for the forecast data to get. + // The date and time can be at most 56 days before the current date and time. + // + // StartTime is a required field + StartTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s GetScalingPlanResourceForecastDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetScalingPlanResourceForecastDataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetScalingPlanResourceForecastDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetScalingPlanResourceForecastDataInput"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.ForecastDataType == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastDataType")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ScalableDimension == nil { + invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) + } + if s.ScalingPlanName == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingPlanName")) + } + if s.ScalingPlanName != nil && len(*s.ScalingPlanName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScalingPlanName", 1)) + } + if s.ScalingPlanVersion == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingPlanVersion")) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *GetScalingPlanResourceForecastDataInput) SetEndTime(v time.Time) *GetScalingPlanResourceForecastDataInput { + s.EndTime = &v + return s +} + +// SetForecastDataType sets the ForecastDataType field's value. +func (s *GetScalingPlanResourceForecastDataInput) SetForecastDataType(v string) *GetScalingPlanResourceForecastDataInput { + s.ForecastDataType = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *GetScalingPlanResourceForecastDataInput) SetResourceId(v string) *GetScalingPlanResourceForecastDataInput { + s.ResourceId = &v + return s +} + +// SetScalableDimension sets the ScalableDimension field's value. +func (s *GetScalingPlanResourceForecastDataInput) SetScalableDimension(v string) *GetScalingPlanResourceForecastDataInput { + s.ScalableDimension = &v + return s +} + +// SetScalingPlanName sets the ScalingPlanName field's value. +func (s *GetScalingPlanResourceForecastDataInput) SetScalingPlanName(v string) *GetScalingPlanResourceForecastDataInput { + s.ScalingPlanName = &v + return s +} + +// SetScalingPlanVersion sets the ScalingPlanVersion field's value. +func (s *GetScalingPlanResourceForecastDataInput) SetScalingPlanVersion(v int64) *GetScalingPlanResourceForecastDataInput { + s.ScalingPlanVersion = &v + return s +} + +// SetServiceNamespace sets the ServiceNamespace field's value. +func (s *GetScalingPlanResourceForecastDataInput) SetServiceNamespace(v string) *GetScalingPlanResourceForecastDataInput { + s.ServiceNamespace = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetScalingPlanResourceForecastDataInput) SetStartTime(v time.Time) *GetScalingPlanResourceForecastDataInput { + s.StartTime = &v + return s +} + +type GetScalingPlanResourceForecastDataOutput struct { + _ struct{} `type:"structure"` + + // The data points to return. + // + // Datapoints is a required field + Datapoints []*Datapoint `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetScalingPlanResourceForecastDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetScalingPlanResourceForecastDataOutput) GoString() string { + return s.String() +} + +// SetDatapoints sets the Datapoints field's value. +func (s *GetScalingPlanResourceForecastDataOutput) SetDatapoints(v []*Datapoint) *GetScalingPlanResourceForecastDataOutput { + s.Datapoints = v + return s +} + +// Represents a dimension for a customized metric. +type MetricDimension struct { + _ struct{} `type:"structure"` + + // The name of the dimension. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The value of the dimension. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricDimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricDimension) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricDimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricDimension"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *MetricDimension) SetName(v string) *MetricDimension { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetricDimension) SetValue(v string) *MetricDimension { + s.Value = &v + return s +} + +// Represents a predefined metric that can be used for predictive scaling. +type PredefinedLoadMetricSpecification struct { + _ struct{} `type:"structure"` + + // The metric type. + // + // PredefinedLoadMetricType is a required field + PredefinedLoadMetricType *string `type:"string" required:"true" enum:"LoadMetricType"` + + // Identifies the resource associated with the metric type. You can't specify + // a resource label unless the metric type is ALBRequestCountPerTarget and there + // is a target group for an Application Load Balancer attached to the Auto Scaling + // group. + // + // The format is app///targetgroup//, + // where: + // + // * app// is the final portion of + // the load balancer ARN. + // + // * targetgroup// is the final portion + // of the target group ARN. + ResourceLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PredefinedLoadMetricSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PredefinedLoadMetricSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PredefinedLoadMetricSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PredefinedLoadMetricSpecification"} + if s.PredefinedLoadMetricType == nil { + invalidParams.Add(request.NewErrParamRequired("PredefinedLoadMetricType")) + } + if s.ResourceLabel != nil && len(*s.ResourceLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceLabel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPredefinedLoadMetricType sets the PredefinedLoadMetricType field's value. +func (s *PredefinedLoadMetricSpecification) SetPredefinedLoadMetricType(v string) *PredefinedLoadMetricSpecification { + s.PredefinedLoadMetricType = &v + return s +} + +// SetResourceLabel sets the ResourceLabel field's value. +func (s *PredefinedLoadMetricSpecification) SetResourceLabel(v string) *PredefinedLoadMetricSpecification { + s.ResourceLabel = &v + return s +} + +// Represents a predefined metric that can be used for dynamic scaling as part +// of a target tracking scaling policy. +type PredefinedScalingMetricSpecification struct { + _ struct{} `type:"structure"` + + // The metric type. The ALBRequestCountPerTarget metric type applies only to + // Auto Scaling groups, Spot Fleet requests, and ECS services. + // + // PredefinedScalingMetricType is a required field + PredefinedScalingMetricType *string `type:"string" required:"true" enum:"ScalingMetricType"` + + // Identifies the resource associated with the metric type. You can't specify + // a resource label unless the metric type is ALBRequestCountPerTarget and there + // is a target group for an Application Load Balancer attached to the Auto Scaling + // group, Spot Fleet request, or ECS service. + // + // The format is app///targetgroup//, + // where: + // + // * app// is the final portion of + // the load balancer ARN. + // + // * targetgroup// is the final portion + // of the target group ARN. + ResourceLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PredefinedScalingMetricSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PredefinedScalingMetricSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PredefinedScalingMetricSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PredefinedScalingMetricSpecification"} + if s.PredefinedScalingMetricType == nil { + invalidParams.Add(request.NewErrParamRequired("PredefinedScalingMetricType")) + } + if s.ResourceLabel != nil && len(*s.ResourceLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceLabel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPredefinedScalingMetricType sets the PredefinedScalingMetricType field's value. +func (s *PredefinedScalingMetricSpecification) SetPredefinedScalingMetricType(v string) *PredefinedScalingMetricSpecification { + s.PredefinedScalingMetricType = &v + return s +} + +// SetResourceLabel sets the ResourceLabel field's value. +func (s *PredefinedScalingMetricSpecification) SetResourceLabel(v string) *PredefinedScalingMetricSpecification { + s.ResourceLabel = &v + return s +} + +// Describes a scaling instruction for a scalable resource. +// +// The scaling instruction is used in combination with a scaling plan, which +// is a set of instructions for configuring dynamic scaling and predictive scaling +// for the scalable resources in your application. Each scaling instruction +// applies to one resource. +// +// AWS Auto Scaling creates target tracking scaling policies based on the scaling +// instructions. Target tracking scaling policies adjust the capacity of your +// scalable resource as required to maintain resource utilization at the target +// value that you specified. +// +// AWS Auto Scaling also configures predictive scaling for your Amazon EC2 Auto +// Scaling groups using a subset of parameters, including the load metric, the +// scaling metric, the target value for the scaling metric, the predictive scaling +// mode (forecast and scale or forecast only), and the desired behavior when +// the forecast capacity exceeds the maximum capacity of the resource. With +// predictive scaling, AWS Auto Scaling generates forecasts with traffic predictions +// for the two days ahead and schedules scaling actions that proactively add +// and remove resource capacity to match the forecast. +// +// We recommend waiting a minimum of 24 hours after creating an Auto Scaling +// group to configure predictive scaling. At minimum, there must be 24 hours +// of historical data to generate a forecast. +// +// For more information, see Getting Started with AWS Auto Scaling (https://docs.aws.amazon.com/autoscaling/plans/userguide/auto-scaling-getting-started.html). +type ScalingInstruction struct { + _ struct{} `type:"structure"` + + // The customized load metric to use for predictive scaling. This parameter + // or a PredefinedLoadMetricSpecification is required when configuring predictive + // scaling, and cannot be used otherwise. + CustomizedLoadMetricSpecification *CustomizedLoadMetricSpecification `type:"structure"` + + // Controls whether dynamic scaling by AWS Auto Scaling is disabled. When dynamic + // scaling is enabled, AWS Auto Scaling creates target tracking scaling policies + // based on the specified target tracking configurations. + // + // The default is enabled (false). + DisableDynamicScaling *bool `type:"boolean"` + + // The maximum capacity of the resource. The exception to this upper limit is + // if you specify a non-default setting for PredictiveScalingMaxCapacityBehavior. + // + // MaxCapacity is a required field + MaxCapacity *int64 `type:"integer" required:"true"` + + // The minimum capacity of the resource. + // + // MinCapacity is a required field + MinCapacity *int64 `type:"integer" required:"true"` + + // The predefined load metric to use for predictive scaling. This parameter + // or a CustomizedLoadMetricSpecification is required when configuring predictive + // scaling, and cannot be used otherwise. + PredefinedLoadMetricSpecification *PredefinedLoadMetricSpecification `type:"structure"` + + // Defines the behavior that should be applied if the forecast capacity approaches + // or exceeds the maximum capacity specified for the resource. The default value + // is SetForecastCapacityToMaxCapacity. + // + // The following are possible values: + // + // * SetForecastCapacityToMaxCapacity - AWS Auto Scaling cannot scale resource + // capacity higher than the maximum capacity. The maximum capacity is enforced + // as a hard limit. + // + // * SetMaxCapacityToForecastCapacity - AWS Auto Scaling may scale resource + // capacity higher than the maximum capacity to equal but not exceed forecast + // capacity. + // + // * SetMaxCapacityAboveForecastCapacity - AWS Auto Scaling may scale resource + // capacity higher than the maximum capacity by a specified buffer value. + // The intention is to give the target tracking scaling policy extra capacity + // if unexpected traffic occurs. + // + // Only valid when configuring predictive scaling. + PredictiveScalingMaxCapacityBehavior *string `type:"string" enum:"PredictiveScalingMaxCapacityBehavior"` + + // The size of the capacity buffer to use when the forecast capacity is close + // to or exceeds the maximum capacity. The value is specified as a percentage + // relative to the forecast capacity. For example, if the buffer is 10, this + // means a 10 percent buffer, such that if the forecast capacity is 50, and + // the maximum capacity is 40, then the effective maximum capacity is 55. + // + // Only valid when configuring predictive scaling. Required if the PredictiveScalingMaxCapacityBehavior + // is set to SetMaxCapacityAboveForecastCapacity, and cannot be used otherwise. + // + // The range is 1-100. + PredictiveScalingMaxCapacityBuffer *int64 `type:"integer"` + + // The predictive scaling mode. The default value is ForecastAndScale. Otherwise, + // AWS Auto Scaling forecasts capacity but does not create any scheduled scaling + // actions based on the capacity forecast. + PredictiveScalingMode *string `type:"string" enum:"PredictiveScalingMode"` + + // The ID of the resource. This string consists of the resource type and unique + // identifier. + // + // * Auto Scaling group - The resource type is autoScalingGroup and the unique + // identifier is the name of the Auto Scaling group. Example: autoScalingGroup/my-asg. + // + // * ECS service - The resource type is service and the unique identifier + // is the cluster name and service name. Example: service/default/sample-webapp. + // + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // + // * DynamoDB table - The resource type is table and the unique identifier + // is the resource ID. Example: table/my-table. + // + // * DynamoDB global secondary index - The resource type is index and the + // unique identifier is the resource ID. Example: table/my-table/index/my-table-index. + // + // * Aurora DB cluster - The resource type is cluster and the unique identifier + // is the cluster name. Example: cluster:my-db-cluster. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the resource. + // + // * autoscaling:autoScalingGroup:DesiredCapacity - The desired capacity + // of an Auto Scaling group. + // + // * ecs:service:DesiredCount - The desired task count of an ECS service. + // + // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot + // Fleet request. + // + // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for + // a DynamoDB table. + // + // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for + // a DynamoDB table. + // + // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for + // a DynamoDB global secondary index. + // + // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for + // a DynamoDB global secondary index. + // + // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora + // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible + // edition. + // + // ScalableDimension is a required field + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // Controls whether a resource's externally created scaling policies are kept + // or replaced. + // + // The default value is KeepExternalPolicies. If the parameter is set to ReplaceExternalPolicies, + // any scaling policies that are external to AWS Auto Scaling are deleted and + // new target tracking scaling policies created. + // + // Only valid when configuring dynamic scaling. + // + // Condition: The number of existing policies to be replaced must be less than + // or equal to 50. If there are more than 50 policies to be replaced, AWS Auto + // Scaling keeps all existing policies and does not create new ones. + ScalingPolicyUpdateBehavior *string `type:"string" enum:"ScalingPolicyUpdateBehavior"` + + // The amount of time, in seconds, to buffer the run time of scheduled scaling + // actions when scaling out. For example, if the forecast says to add capacity + // at 10:00 AM, and the buffer time is 5 minutes, then the run time of the corresponding + // scheduled scaling action will be 9:55 AM. The intention is to give resources + // time to be provisioned. For example, it can take a few minutes to launch + // an EC2 instance. The actual amount of time required depends on several factors, + // such as the size of the instance and whether there are startup scripts to + // complete. + // + // The value must be less than the forecast interval duration of 3600 seconds + // (60 minutes). The default is 300 seconds. + // + // Only valid when configuring predictive scaling. + ScheduledActionBufferTime *int64 `type:"integer"` + + // The namespace of the AWS service. + // + // ServiceNamespace is a required field + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // The structure that defines new target tracking configurations (up to 10). + // Each of these structures includes a specific scaling metric and a target + // value for the metric, along with various parameters to use with dynamic scaling. + // + // With predictive scaling and dynamic scaling, the resource scales based on + // the target tracking configuration that provides the largest capacity for + // both scale in and scale out. + // + // Condition: The scaling metric must be unique across target tracking configurations. + // + // TargetTrackingConfigurations is a required field + TargetTrackingConfigurations []*TargetTrackingConfiguration `type:"list" required:"true"` +} + +// String returns the string representation +func (s ScalingInstruction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingInstruction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScalingInstruction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScalingInstruction"} + if s.MaxCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("MaxCapacity")) + } + if s.MinCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("MinCapacity")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ScalableDimension == nil { + invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + if s.TargetTrackingConfigurations == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTrackingConfigurations")) + } + if s.CustomizedLoadMetricSpecification != nil { + if err := s.CustomizedLoadMetricSpecification.Validate(); err != nil { + invalidParams.AddNested("CustomizedLoadMetricSpecification", err.(request.ErrInvalidParams)) + } + } + if s.PredefinedLoadMetricSpecification != nil { + if err := s.PredefinedLoadMetricSpecification.Validate(); err != nil { + invalidParams.AddNested("PredefinedLoadMetricSpecification", err.(request.ErrInvalidParams)) + } + } + if s.TargetTrackingConfigurations != nil { + for i, v := range s.TargetTrackingConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetTrackingConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomizedLoadMetricSpecification sets the CustomizedLoadMetricSpecification field's value. +func (s *ScalingInstruction) SetCustomizedLoadMetricSpecification(v *CustomizedLoadMetricSpecification) *ScalingInstruction { + s.CustomizedLoadMetricSpecification = v + return s +} + +// SetDisableDynamicScaling sets the DisableDynamicScaling field's value. +func (s *ScalingInstruction) SetDisableDynamicScaling(v bool) *ScalingInstruction { + s.DisableDynamicScaling = &v + return s +} + +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *ScalingInstruction) SetMaxCapacity(v int64) *ScalingInstruction { + s.MaxCapacity = &v + return s +} + +// SetMinCapacity sets the MinCapacity field's value. +func (s *ScalingInstruction) SetMinCapacity(v int64) *ScalingInstruction { + s.MinCapacity = &v + return s +} + +// SetPredefinedLoadMetricSpecification sets the PredefinedLoadMetricSpecification field's value. +func (s *ScalingInstruction) SetPredefinedLoadMetricSpecification(v *PredefinedLoadMetricSpecification) *ScalingInstruction { + s.PredefinedLoadMetricSpecification = v + return s +} + +// SetPredictiveScalingMaxCapacityBehavior sets the PredictiveScalingMaxCapacityBehavior field's value. +func (s *ScalingInstruction) SetPredictiveScalingMaxCapacityBehavior(v string) *ScalingInstruction { + s.PredictiveScalingMaxCapacityBehavior = &v + return s +} + +// SetPredictiveScalingMaxCapacityBuffer sets the PredictiveScalingMaxCapacityBuffer field's value. +func (s *ScalingInstruction) SetPredictiveScalingMaxCapacityBuffer(v int64) *ScalingInstruction { + s.PredictiveScalingMaxCapacityBuffer = &v + return s +} + +// SetPredictiveScalingMode sets the PredictiveScalingMode field's value. +func (s *ScalingInstruction) SetPredictiveScalingMode(v string) *ScalingInstruction { + s.PredictiveScalingMode = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *ScalingInstruction) SetResourceId(v string) *ScalingInstruction { + s.ResourceId = &v + return s +} + +// SetScalableDimension sets the ScalableDimension field's value. +func (s *ScalingInstruction) SetScalableDimension(v string) *ScalingInstruction { + s.ScalableDimension = &v + return s +} + +// SetScalingPolicyUpdateBehavior sets the ScalingPolicyUpdateBehavior field's value. +func (s *ScalingInstruction) SetScalingPolicyUpdateBehavior(v string) *ScalingInstruction { + s.ScalingPolicyUpdateBehavior = &v + return s +} + +// SetScheduledActionBufferTime sets the ScheduledActionBufferTime field's value. +func (s *ScalingInstruction) SetScheduledActionBufferTime(v int64) *ScalingInstruction { + s.ScheduledActionBufferTime = &v + return s +} + +// SetServiceNamespace sets the ServiceNamespace field's value. +func (s *ScalingInstruction) SetServiceNamespace(v string) *ScalingInstruction { + s.ServiceNamespace = &v + return s +} + +// SetTargetTrackingConfigurations sets the TargetTrackingConfigurations field's value. +func (s *ScalingInstruction) SetTargetTrackingConfigurations(v []*TargetTrackingConfiguration) *ScalingInstruction { + s.TargetTrackingConfigurations = v + return s +} + +// Represents a scaling plan. +type ScalingPlan struct { + _ struct{} `type:"structure"` + + // The application source. + // + // ApplicationSource is a required field + ApplicationSource *ApplicationSource `type:"structure" required:"true"` + + // The Unix time stamp when the scaling plan was created. + CreationTime *time.Time `type:"timestamp"` + + // The scaling instructions. + // + // ScalingInstructions is a required field + ScalingInstructions []*ScalingInstruction `type:"list" required:"true"` + + // The name of the scaling plan. + // + // ScalingPlanName is a required field + ScalingPlanName *string `min:"1" type:"string" required:"true"` + + // The version number of the scaling plan. + // + // ScalingPlanVersion is a required field + ScalingPlanVersion *int64 `type:"long" required:"true"` + + // The status of the scaling plan. + // + // * Active - The scaling plan is active. + // + // * ActiveWithProblems - The scaling plan is active, but the scaling configuration + // for one or more resources could not be applied. + // + // * CreationInProgress - The scaling plan is being created. + // + // * CreationFailed - The scaling plan could not be created. + // + // * DeletionInProgress - The scaling plan is being deleted. + // + // * DeletionFailed - The scaling plan could not be deleted. + // + // * UpdateInProgress - The scaling plan is being updated. + // + // * UpdateFailed - The scaling plan could not be updated. + // + // StatusCode is a required field + StatusCode *string `type:"string" required:"true" enum:"ScalingPlanStatusCode"` + + // A simple message about the current status of the scaling plan. + StatusMessage *string `type:"string"` + + // The Unix time stamp when the scaling plan entered the current status. + StatusStartTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ScalingPlan) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPlan) GoString() string { + return s.String() +} + +// SetApplicationSource sets the ApplicationSource field's value. +func (s *ScalingPlan) SetApplicationSource(v *ApplicationSource) *ScalingPlan { + s.ApplicationSource = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ScalingPlan) SetCreationTime(v time.Time) *ScalingPlan { + s.CreationTime = &v + return s +} + +// SetScalingInstructions sets the ScalingInstructions field's value. +func (s *ScalingPlan) SetScalingInstructions(v []*ScalingInstruction) *ScalingPlan { + s.ScalingInstructions = v + return s +} + +// SetScalingPlanName sets the ScalingPlanName field's value. +func (s *ScalingPlan) SetScalingPlanName(v string) *ScalingPlan { + s.ScalingPlanName = &v + return s +} + +// SetScalingPlanVersion sets the ScalingPlanVersion field's value. +func (s *ScalingPlan) SetScalingPlanVersion(v int64) *ScalingPlan { + s.ScalingPlanVersion = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *ScalingPlan) SetStatusCode(v string) *ScalingPlan { + s.StatusCode = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ScalingPlan) SetStatusMessage(v string) *ScalingPlan { + s.StatusMessage = &v + return s +} + +// SetStatusStartTime sets the StatusStartTime field's value. +func (s *ScalingPlan) SetStatusStartTime(v time.Time) *ScalingPlan { + s.StatusStartTime = &v + return s +} + +// Represents a scalable resource. +type ScalingPlanResource struct { + _ struct{} `type:"structure"` + + // The ID of the resource. This string consists of the resource type and unique + // identifier. + // + // * Auto Scaling group - The resource type is autoScalingGroup and the unique + // identifier is the name of the Auto Scaling group. Example: autoScalingGroup/my-asg. + // + // * ECS service - The resource type is service and the unique identifier + // is the cluster name and service name. Example: service/default/sample-webapp. + // + // * Spot Fleet request - The resource type is spot-fleet-request and the + // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // + // * DynamoDB table - The resource type is table and the unique identifier + // is the resource ID. Example: table/my-table. + // + // * DynamoDB global secondary index - The resource type is index and the + // unique identifier is the resource ID. Example: table/my-table/index/my-table-index. + // + // * Aurora DB cluster - The resource type is cluster and the unique identifier + // is the cluster name. Example: cluster:my-db-cluster. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension for the resource. + // + // * autoscaling:autoScalingGroup:DesiredCapacity - The desired capacity + // of an Auto Scaling group. + // + // * ecs:service:DesiredCount - The desired task count of an ECS service. + // + // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot + // Fleet request. + // + // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for + // a DynamoDB table. + // + // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for + // a DynamoDB table. + // + // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for + // a DynamoDB global secondary index. + // + // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for + // a DynamoDB global secondary index. + // + // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora + // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible + // edition. + // + // ScalableDimension is a required field + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The name of the scaling plan. + // + // ScalingPlanName is a required field + ScalingPlanName *string `min:"1" type:"string" required:"true"` + + // The version number of the scaling plan. + // + // ScalingPlanVersion is a required field + ScalingPlanVersion *int64 `type:"long" required:"true"` + + // The scaling policies. + ScalingPolicies []*ScalingPolicy `type:"list"` + + // The scaling status of the resource. + // + // * Active - The scaling configuration is active. + // + // * Inactive - The scaling configuration is not active because the scaling + // plan is being created or the scaling configuration could not be applied. + // Check the status message for more information. + // + // * PartiallyActive - The scaling configuration is partially active because + // the scaling plan is being created or deleted or the scaling configuration + // could not be fully applied. Check the status message for more information. + // + // ScalingStatusCode is a required field + ScalingStatusCode *string `type:"string" required:"true" enum:"ScalingStatusCode"` + + // A simple message about the current scaling status of the resource. + ScalingStatusMessage *string `type:"string"` + + // The namespace of the AWS service. + // + // ServiceNamespace is a required field + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s ScalingPlanResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPlanResource) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *ScalingPlanResource) SetResourceId(v string) *ScalingPlanResource { + s.ResourceId = &v + return s +} + +// SetScalableDimension sets the ScalableDimension field's value. +func (s *ScalingPlanResource) SetScalableDimension(v string) *ScalingPlanResource { + s.ScalableDimension = &v + return s +} + +// SetScalingPlanName sets the ScalingPlanName field's value. +func (s *ScalingPlanResource) SetScalingPlanName(v string) *ScalingPlanResource { + s.ScalingPlanName = &v + return s +} + +// SetScalingPlanVersion sets the ScalingPlanVersion field's value. +func (s *ScalingPlanResource) SetScalingPlanVersion(v int64) *ScalingPlanResource { + s.ScalingPlanVersion = &v + return s +} + +// SetScalingPolicies sets the ScalingPolicies field's value. +func (s *ScalingPlanResource) SetScalingPolicies(v []*ScalingPolicy) *ScalingPlanResource { + s.ScalingPolicies = v + return s +} + +// SetScalingStatusCode sets the ScalingStatusCode field's value. +func (s *ScalingPlanResource) SetScalingStatusCode(v string) *ScalingPlanResource { + s.ScalingStatusCode = &v + return s +} + +// SetScalingStatusMessage sets the ScalingStatusMessage field's value. +func (s *ScalingPlanResource) SetScalingStatusMessage(v string) *ScalingPlanResource { + s.ScalingStatusMessage = &v + return s +} + +// SetServiceNamespace sets the ServiceNamespace field's value. +func (s *ScalingPlanResource) SetServiceNamespace(v string) *ScalingPlanResource { + s.ServiceNamespace = &v + return s +} + +// Represents a scaling policy. +type ScalingPolicy struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The type of scaling policy. + // + // PolicyType is a required field + PolicyType *string `type:"string" required:"true" enum:"PolicyType"` + + // The target tracking scaling policy. Includes support for predefined or customized + // metrics. + TargetTrackingConfiguration *TargetTrackingConfiguration `type:"structure"` +} + +// String returns the string representation +func (s ScalingPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPolicy) GoString() string { + return s.String() +} + +// SetPolicyName sets the PolicyName field's value. +func (s *ScalingPolicy) SetPolicyName(v string) *ScalingPolicy { + s.PolicyName = &v + return s +} + +// SetPolicyType sets the PolicyType field's value. +func (s *ScalingPolicy) SetPolicyType(v string) *ScalingPolicy { + s.PolicyType = &v + return s +} + +// SetTargetTrackingConfiguration sets the TargetTrackingConfiguration field's value. +func (s *ScalingPolicy) SetTargetTrackingConfiguration(v *TargetTrackingConfiguration) *ScalingPolicy { + s.TargetTrackingConfiguration = v + return s +} + +// Represents a tag. +type TagFilter struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `min:"1" type:"string"` + + // The tag values (0 to 20). + Values []*string `type:"list"` +} + +// String returns the string representation +func (s TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagFilter"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *TagFilter) SetKey(v string) *TagFilter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *TagFilter) SetValues(v []*string) *TagFilter { + s.Values = v + return s +} + +// Describes a target tracking configuration to use with AWS Auto Scaling. Used +// with ScalingInstruction and ScalingPolicy. +type TargetTrackingConfiguration struct { + _ struct{} `type:"structure"` + + // A customized metric. You can specify either a predefined metric or a customized + // metric. + CustomizedScalingMetricSpecification *CustomizedScalingMetricSpecification `type:"structure"` + + // Indicates whether scale in by the target tracking scaling policy is disabled. + // If the value is true, scale in is disabled and the target tracking scaling + // policy doesn't remove capacity from the scalable resource. Otherwise, scale + // in is enabled and the target tracking scaling policy can remove capacity + // from the scalable resource. + // + // The default value is false. + DisableScaleIn *bool `type:"boolean"` + + // The estimated time, in seconds, until a newly launched instance can contribute + // to the CloudWatch metrics. This value is used only if the resource is an + // Auto Scaling group. + EstimatedInstanceWarmup *int64 `type:"integer"` + + // A predefined metric. You can specify either a predefined metric or a customized + // metric. + PredefinedScalingMetricSpecification *PredefinedScalingMetricSpecification `type:"structure"` + + // The amount of time, in seconds, after a scale in activity completes before + // another scale in activity can start. This value is not used if the scalable + // resource is an Auto Scaling group. + // + // The cooldown period is used to block subsequent scale in requests until it + // has expired. The intention is to scale in conservatively to protect your + // application's availability. However, if another alarm triggers a scale-out + // policy during the cooldown period after a scale-in, AWS Auto Scaling scales + // out your scalable target immediately. + ScaleInCooldown *int64 `type:"integer"` + + // The amount of time, in seconds, after a scale-out activity completes before + // another scale-out activity can start. This value is not used if the scalable + // resource is an Auto Scaling group. + // + // While the cooldown period is in effect, the capacity that has been added + // by the previous scale-out event that initiated the cooldown is calculated + // as part of the desired capacity for the next scale out. The intention is + // to continuously (but not excessively) scale out. + ScaleOutCooldown *int64 `type:"integer"` + + // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 + // (Base 10) or 2e-360 to 2e360 (Base 2). + // + // TargetValue is a required field + TargetValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s TargetTrackingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetTrackingConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetTrackingConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetTrackingConfiguration"} + if s.TargetValue == nil { + invalidParams.Add(request.NewErrParamRequired("TargetValue")) + } + if s.CustomizedScalingMetricSpecification != nil { + if err := s.CustomizedScalingMetricSpecification.Validate(); err != nil { + invalidParams.AddNested("CustomizedScalingMetricSpecification", err.(request.ErrInvalidParams)) + } + } + if s.PredefinedScalingMetricSpecification != nil { + if err := s.PredefinedScalingMetricSpecification.Validate(); err != nil { + invalidParams.AddNested("PredefinedScalingMetricSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomizedScalingMetricSpecification sets the CustomizedScalingMetricSpecification field's value. +func (s *TargetTrackingConfiguration) SetCustomizedScalingMetricSpecification(v *CustomizedScalingMetricSpecification) *TargetTrackingConfiguration { + s.CustomizedScalingMetricSpecification = v + return s +} + +// SetDisableScaleIn sets the DisableScaleIn field's value. +func (s *TargetTrackingConfiguration) SetDisableScaleIn(v bool) *TargetTrackingConfiguration { + s.DisableScaleIn = &v + return s +} + +// SetEstimatedInstanceWarmup sets the EstimatedInstanceWarmup field's value. +func (s *TargetTrackingConfiguration) SetEstimatedInstanceWarmup(v int64) *TargetTrackingConfiguration { + s.EstimatedInstanceWarmup = &v + return s +} + +// SetPredefinedScalingMetricSpecification sets the PredefinedScalingMetricSpecification field's value. +func (s *TargetTrackingConfiguration) SetPredefinedScalingMetricSpecification(v *PredefinedScalingMetricSpecification) *TargetTrackingConfiguration { + s.PredefinedScalingMetricSpecification = v + return s +} + +// SetScaleInCooldown sets the ScaleInCooldown field's value. +func (s *TargetTrackingConfiguration) SetScaleInCooldown(v int64) *TargetTrackingConfiguration { + s.ScaleInCooldown = &v + return s +} + +// SetScaleOutCooldown sets the ScaleOutCooldown field's value. +func (s *TargetTrackingConfiguration) SetScaleOutCooldown(v int64) *TargetTrackingConfiguration { + s.ScaleOutCooldown = &v + return s +} + +// SetTargetValue sets the TargetValue field's value. +func (s *TargetTrackingConfiguration) SetTargetValue(v float64) *TargetTrackingConfiguration { + s.TargetValue = &v + return s +} + +type UpdateScalingPlanInput struct { + _ struct{} `type:"structure"` + + // A CloudFormation stack or set of tags. + ApplicationSource *ApplicationSource `type:"structure"` + + // The scaling instructions. + ScalingInstructions []*ScalingInstruction `type:"list"` + + // The name of the scaling plan. + // + // ScalingPlanName is a required field + ScalingPlanName *string `min:"1" type:"string" required:"true"` + + // The version number of the scaling plan. + // + // ScalingPlanVersion is a required field + ScalingPlanVersion *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s UpdateScalingPlanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateScalingPlanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateScalingPlanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateScalingPlanInput"} + if s.ScalingPlanName == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingPlanName")) + } + if s.ScalingPlanName != nil && len(*s.ScalingPlanName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScalingPlanName", 1)) + } + if s.ScalingPlanVersion == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingPlanVersion")) + } + if s.ApplicationSource != nil { + if err := s.ApplicationSource.Validate(); err != nil { + invalidParams.AddNested("ApplicationSource", err.(request.ErrInvalidParams)) + } + } + if s.ScalingInstructions != nil { + for i, v := range s.ScalingInstructions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScalingInstructions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationSource sets the ApplicationSource field's value. +func (s *UpdateScalingPlanInput) SetApplicationSource(v *ApplicationSource) *UpdateScalingPlanInput { + s.ApplicationSource = v + return s +} + +// SetScalingInstructions sets the ScalingInstructions field's value. +func (s *UpdateScalingPlanInput) SetScalingInstructions(v []*ScalingInstruction) *UpdateScalingPlanInput { + s.ScalingInstructions = v + return s +} + +// SetScalingPlanName sets the ScalingPlanName field's value. +func (s *UpdateScalingPlanInput) SetScalingPlanName(v string) *UpdateScalingPlanInput { + s.ScalingPlanName = &v + return s +} + +// SetScalingPlanVersion sets the ScalingPlanVersion field's value. +func (s *UpdateScalingPlanInput) SetScalingPlanVersion(v int64) *UpdateScalingPlanInput { + s.ScalingPlanVersion = &v + return s +} + +type UpdateScalingPlanOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateScalingPlanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateScalingPlanOutput) GoString() string { + return s.String() +} + +const ( + // ForecastDataTypeCapacityForecast is a ForecastDataType enum value + ForecastDataTypeCapacityForecast = "CapacityForecast" + + // ForecastDataTypeLoadForecast is a ForecastDataType enum value + ForecastDataTypeLoadForecast = "LoadForecast" + + // ForecastDataTypeScheduledActionMinCapacity is a ForecastDataType enum value + ForecastDataTypeScheduledActionMinCapacity = "ScheduledActionMinCapacity" + + // ForecastDataTypeScheduledActionMaxCapacity is a ForecastDataType enum value + ForecastDataTypeScheduledActionMaxCapacity = "ScheduledActionMaxCapacity" +) + +const ( + // LoadMetricTypeAsgtotalCpuutilization is a LoadMetricType enum value + LoadMetricTypeAsgtotalCpuutilization = "ASGTotalCPUUtilization" + + // LoadMetricTypeAsgtotalNetworkIn is a LoadMetricType enum value + LoadMetricTypeAsgtotalNetworkIn = "ASGTotalNetworkIn" + + // LoadMetricTypeAsgtotalNetworkOut is a LoadMetricType enum value + LoadMetricTypeAsgtotalNetworkOut = "ASGTotalNetworkOut" + + // LoadMetricTypeAlbtargetGroupRequestCount is a LoadMetricType enum value + LoadMetricTypeAlbtargetGroupRequestCount = "ALBTargetGroupRequestCount" +) + +const ( + // MetricStatisticAverage is a MetricStatistic enum value + MetricStatisticAverage = "Average" + + // MetricStatisticMinimum is a MetricStatistic enum value + MetricStatisticMinimum = "Minimum" + + // MetricStatisticMaximum is a MetricStatistic enum value + MetricStatisticMaximum = "Maximum" + + // MetricStatisticSampleCount is a MetricStatistic enum value + MetricStatisticSampleCount = "SampleCount" + + // MetricStatisticSum is a MetricStatistic enum value + MetricStatisticSum = "Sum" +) + +const ( + // PolicyTypeTargetTrackingScaling is a PolicyType enum value + PolicyTypeTargetTrackingScaling = "TargetTrackingScaling" +) + +const ( + // PredictiveScalingMaxCapacityBehaviorSetForecastCapacityToMaxCapacity is a PredictiveScalingMaxCapacityBehavior enum value + PredictiveScalingMaxCapacityBehaviorSetForecastCapacityToMaxCapacity = "SetForecastCapacityToMaxCapacity" + + // PredictiveScalingMaxCapacityBehaviorSetMaxCapacityToForecastCapacity is a PredictiveScalingMaxCapacityBehavior enum value + PredictiveScalingMaxCapacityBehaviorSetMaxCapacityToForecastCapacity = "SetMaxCapacityToForecastCapacity" + + // PredictiveScalingMaxCapacityBehaviorSetMaxCapacityAboveForecastCapacity is a PredictiveScalingMaxCapacityBehavior enum value + PredictiveScalingMaxCapacityBehaviorSetMaxCapacityAboveForecastCapacity = "SetMaxCapacityAboveForecastCapacity" +) + +const ( + // PredictiveScalingModeForecastAndScale is a PredictiveScalingMode enum value + PredictiveScalingModeForecastAndScale = "ForecastAndScale" + + // PredictiveScalingModeForecastOnly is a PredictiveScalingMode enum value + PredictiveScalingModeForecastOnly = "ForecastOnly" +) + +const ( + // ScalableDimensionAutoscalingAutoScalingGroupDesiredCapacity is a ScalableDimension enum value + ScalableDimensionAutoscalingAutoScalingGroupDesiredCapacity = "autoscaling:autoScalingGroup:DesiredCapacity" + + // ScalableDimensionEcsServiceDesiredCount is a ScalableDimension enum value + ScalableDimensionEcsServiceDesiredCount = "ecs:service:DesiredCount" + + // ScalableDimensionEc2SpotFleetRequestTargetCapacity is a ScalableDimension enum value + ScalableDimensionEc2SpotFleetRequestTargetCapacity = "ec2:spot-fleet-request:TargetCapacity" + + // ScalableDimensionRdsClusterReadReplicaCount is a ScalableDimension enum value + ScalableDimensionRdsClusterReadReplicaCount = "rds:cluster:ReadReplicaCount" + + // ScalableDimensionDynamodbTableReadCapacityUnits is a ScalableDimension enum value + ScalableDimensionDynamodbTableReadCapacityUnits = "dynamodb:table:ReadCapacityUnits" + + // ScalableDimensionDynamodbTableWriteCapacityUnits is a ScalableDimension enum value + ScalableDimensionDynamodbTableWriteCapacityUnits = "dynamodb:table:WriteCapacityUnits" + + // ScalableDimensionDynamodbIndexReadCapacityUnits is a ScalableDimension enum value + ScalableDimensionDynamodbIndexReadCapacityUnits = "dynamodb:index:ReadCapacityUnits" + + // ScalableDimensionDynamodbIndexWriteCapacityUnits is a ScalableDimension enum value + ScalableDimensionDynamodbIndexWriteCapacityUnits = "dynamodb:index:WriteCapacityUnits" +) + +const ( + // ScalingMetricTypeAsgaverageCpuutilization is a ScalingMetricType enum value + ScalingMetricTypeAsgaverageCpuutilization = "ASGAverageCPUUtilization" + + // ScalingMetricTypeAsgaverageNetworkIn is a ScalingMetricType enum value + ScalingMetricTypeAsgaverageNetworkIn = "ASGAverageNetworkIn" + + // ScalingMetricTypeAsgaverageNetworkOut is a ScalingMetricType enum value + ScalingMetricTypeAsgaverageNetworkOut = "ASGAverageNetworkOut" + + // ScalingMetricTypeDynamoDbreadCapacityUtilization is a ScalingMetricType enum value + ScalingMetricTypeDynamoDbreadCapacityUtilization = "DynamoDBReadCapacityUtilization" + + // ScalingMetricTypeDynamoDbwriteCapacityUtilization is a ScalingMetricType enum value + ScalingMetricTypeDynamoDbwriteCapacityUtilization = "DynamoDBWriteCapacityUtilization" + + // ScalingMetricTypeEcsserviceAverageCpuutilization is a ScalingMetricType enum value + ScalingMetricTypeEcsserviceAverageCpuutilization = "ECSServiceAverageCPUUtilization" + + // ScalingMetricTypeEcsserviceAverageMemoryUtilization is a ScalingMetricType enum value + ScalingMetricTypeEcsserviceAverageMemoryUtilization = "ECSServiceAverageMemoryUtilization" + + // ScalingMetricTypeAlbrequestCountPerTarget is a ScalingMetricType enum value + ScalingMetricTypeAlbrequestCountPerTarget = "ALBRequestCountPerTarget" + + // ScalingMetricTypeRdsreaderAverageCpuutilization is a ScalingMetricType enum value + ScalingMetricTypeRdsreaderAverageCpuutilization = "RDSReaderAverageCPUUtilization" + + // ScalingMetricTypeRdsreaderAverageDatabaseConnections is a ScalingMetricType enum value + ScalingMetricTypeRdsreaderAverageDatabaseConnections = "RDSReaderAverageDatabaseConnections" + + // ScalingMetricTypeEc2spotFleetRequestAverageCpuutilization is a ScalingMetricType enum value + ScalingMetricTypeEc2spotFleetRequestAverageCpuutilization = "EC2SpotFleetRequestAverageCPUUtilization" + + // ScalingMetricTypeEc2spotFleetRequestAverageNetworkIn is a ScalingMetricType enum value + ScalingMetricTypeEc2spotFleetRequestAverageNetworkIn = "EC2SpotFleetRequestAverageNetworkIn" + + // ScalingMetricTypeEc2spotFleetRequestAverageNetworkOut is a ScalingMetricType enum value + ScalingMetricTypeEc2spotFleetRequestAverageNetworkOut = "EC2SpotFleetRequestAverageNetworkOut" +) + +const ( + // ScalingPlanStatusCodeActive is a ScalingPlanStatusCode enum value + ScalingPlanStatusCodeActive = "Active" + + // ScalingPlanStatusCodeActiveWithProblems is a ScalingPlanStatusCode enum value + ScalingPlanStatusCodeActiveWithProblems = "ActiveWithProblems" + + // ScalingPlanStatusCodeCreationInProgress is a ScalingPlanStatusCode enum value + ScalingPlanStatusCodeCreationInProgress = "CreationInProgress" + + // ScalingPlanStatusCodeCreationFailed is a ScalingPlanStatusCode enum value + ScalingPlanStatusCodeCreationFailed = "CreationFailed" + + // ScalingPlanStatusCodeDeletionInProgress is a ScalingPlanStatusCode enum value + ScalingPlanStatusCodeDeletionInProgress = "DeletionInProgress" + + // ScalingPlanStatusCodeDeletionFailed is a ScalingPlanStatusCode enum value + ScalingPlanStatusCodeDeletionFailed = "DeletionFailed" + + // ScalingPlanStatusCodeUpdateInProgress is a ScalingPlanStatusCode enum value + ScalingPlanStatusCodeUpdateInProgress = "UpdateInProgress" + + // ScalingPlanStatusCodeUpdateFailed is a ScalingPlanStatusCode enum value + ScalingPlanStatusCodeUpdateFailed = "UpdateFailed" +) + +const ( + // ScalingPolicyUpdateBehaviorKeepExternalPolicies is a ScalingPolicyUpdateBehavior enum value + ScalingPolicyUpdateBehaviorKeepExternalPolicies = "KeepExternalPolicies" + + // ScalingPolicyUpdateBehaviorReplaceExternalPolicies is a ScalingPolicyUpdateBehavior enum value + ScalingPolicyUpdateBehaviorReplaceExternalPolicies = "ReplaceExternalPolicies" +) + +const ( + // ScalingStatusCodeInactive is a ScalingStatusCode enum value + ScalingStatusCodeInactive = "Inactive" + + // ScalingStatusCodePartiallyActive is a ScalingStatusCode enum value + ScalingStatusCodePartiallyActive = "PartiallyActive" + + // ScalingStatusCodeActive is a ScalingStatusCode enum value + ScalingStatusCodeActive = "Active" +) + +const ( + // ServiceNamespaceAutoscaling is a ServiceNamespace enum value + ServiceNamespaceAutoscaling = "autoscaling" + + // ServiceNamespaceEcs is a ServiceNamespace enum value + ServiceNamespaceEcs = "ecs" + + // ServiceNamespaceEc2 is a ServiceNamespace enum value + ServiceNamespaceEc2 = "ec2" + + // ServiceNamespaceRds is a ServiceNamespace enum value + ServiceNamespaceRds = "rds" + + // ServiceNamespaceDynamodb is a ServiceNamespace enum value + ServiceNamespaceDynamodb = "dynamodb" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/doc.go new file mode 100644 index 00000000000..4d4a408d808 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/doc.go @@ -0,0 +1,39 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package autoscalingplans provides the client and types for making API +// requests to AWS Auto Scaling Plans. +// +// Use AWS Auto Scaling to quickly discover all the scalable AWS resources for +// your application and configure dynamic scaling and predictive scaling for +// your resources using scaling plans. Use this service in conjunction with +// the Amazon EC2 Auto Scaling, Application Auto Scaling, Amazon CloudWatch, +// and AWS CloudFormation services. +// +// Currently, predictive scaling is only available for Amazon EC2 Auto Scaling +// groups. +// +// For more information about AWS Auto Scaling, including information about +// granting IAM users required permissions for AWS Auto Scaling actions, see +// the AWS Auto Scaling User Guide (https://docs.aws.amazon.com/autoscaling/plans/userguide/what-is-aws-auto-scaling.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/autoscaling-plans-2018-01-06 for more information on this service. +// +// See autoscalingplans package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/autoscalingplans/ +// +// Using the Client +// +// To contact AWS Auto Scaling Plans with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Auto Scaling Plans client AutoScalingPlans for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/autoscalingplans/#New +package autoscalingplans diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/errors.go new file mode 100644 index 00000000000..8f954385969 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/errors.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package autoscalingplans + +const ( + + // ErrCodeConcurrentUpdateException for service response error code + // "ConcurrentUpdateException". + // + // Concurrent updates caused an exception, for example, if you request an update + // to a scaling plan that already has a pending update. + ErrCodeConcurrentUpdateException = "ConcurrentUpdateException" + + // ErrCodeInternalServiceException for service response error code + // "InternalServiceException". + // + // The service encountered an internal error. + ErrCodeInternalServiceException = "InternalServiceException" + + // ErrCodeInvalidNextTokenException for service response error code + // "InvalidNextTokenException". + // + // The token provided is not valid. + ErrCodeInvalidNextTokenException = "InvalidNextTokenException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // Your account exceeded a limit. This exception is thrown when a per-account + // resource limit is exceeded. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeObjectNotFoundException for service response error code + // "ObjectNotFoundException". + // + // The specified object could not be found. + ErrCodeObjectNotFoundException = "ObjectNotFoundException" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // An exception was thrown for a validation issue. Review the parameters provided. + ErrCodeValidationException = "ValidationException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go new file mode 100644 index 00000000000..2c35b02391d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go @@ -0,0 +1,101 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package autoscalingplans + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// AutoScalingPlans provides the API operation methods for making requests to +// AWS Auto Scaling Plans. See this package's package overview docs +// for details on the service. +// +// AutoScalingPlans methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type AutoScalingPlans struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "autoscaling" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "Auto Scaling Plans" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the AutoScalingPlans client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a AutoScalingPlans client from just a session. +// svc := autoscalingplans.New(mySession) +// +// // Create a AutoScalingPlans client with additional configuration +// svc := autoscalingplans.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *AutoScalingPlans { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "autoscaling-plans" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AutoScalingPlans { + svc := &AutoScalingPlans{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2018-01-06", + JSONVersion: "1.1", + TargetPrefix: "AnyScaleScalingPlannerFrontendService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a AutoScalingPlans operation and runs any +// custom request initialization. +func (c *AutoScalingPlans) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/backup/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/backup/api.go index 2380f4b02d4..7d8513a621c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/backup/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/backup/api.go @@ -162,17 +162,9 @@ func (c *Backup) CreateBackupSelectionRequest(input *CreateBackupSelectionInput) // // * Resources: "arn:aws:ec2:region:account-id:volume/volume-id" // -// * ConditionKey:"department" +// * ConditionKey:"department" ConditionValue:"finance" ConditionType:"StringEquals" // -// ConditionValue:"finance" -// -// ConditionType:"StringEquals" -// -// * ConditionKey:"importance" -// -// ConditionValue:"critical" -// -// ConditionType:"StringEquals" +// * ConditionKey:"importance" ConditionValue:"critical" ConditionType:"StringEquals" // // Using these patterns would back up all Amazon Elastic Block Store (Amazon // EBS) volumes that are tagged as "department=finance", "importance=critical", @@ -2247,7 +2239,7 @@ func (c *Backup) ListBackupJobsWithContext(ctx aws.Context, input *ListBackupJob // // Example iterating over at most 3 pages of a ListBackupJobs operation. // pageNum := 0 // err := client.ListBackupJobsPages(params, -// func(page *ListBackupJobsOutput, lastPage bool) bool { +// func(page *backup.ListBackupJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2279,10 +2271,12 @@ func (c *Backup) ListBackupJobsPagesWithContext(ctx aws.Context, input *ListBack }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListBackupJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListBackupJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2393,7 +2387,7 @@ func (c *Backup) ListBackupPlanTemplatesWithContext(ctx aws.Context, input *List // // Example iterating over at most 3 pages of a ListBackupPlanTemplates operation. // pageNum := 0 // err := client.ListBackupPlanTemplatesPages(params, -// func(page *ListBackupPlanTemplatesOutput, lastPage bool) bool { +// func(page *backup.ListBackupPlanTemplatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2425,10 +2419,12 @@ func (c *Backup) ListBackupPlanTemplatesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListBackupPlanTemplatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListBackupPlanTemplatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2540,7 +2536,7 @@ func (c *Backup) ListBackupPlanVersionsWithContext(ctx aws.Context, input *ListB // // Example iterating over at most 3 pages of a ListBackupPlanVersions operation. // pageNum := 0 // err := client.ListBackupPlanVersionsPages(params, -// func(page *ListBackupPlanVersionsOutput, lastPage bool) bool { +// func(page *backup.ListBackupPlanVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2572,10 +2568,12 @@ func (c *Backup) ListBackupPlanVersionsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListBackupPlanVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListBackupPlanVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2687,7 +2685,7 @@ func (c *Backup) ListBackupPlansWithContext(ctx aws.Context, input *ListBackupPl // // Example iterating over at most 3 pages of a ListBackupPlans operation. // pageNum := 0 // err := client.ListBackupPlansPages(params, -// func(page *ListBackupPlansOutput, lastPage bool) bool { +// func(page *backup.ListBackupPlansOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2719,10 +2717,12 @@ func (c *Backup) ListBackupPlansPagesWithContext(ctx aws.Context, input *ListBac }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListBackupPlansOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListBackupPlansOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2833,7 +2833,7 @@ func (c *Backup) ListBackupSelectionsWithContext(ctx aws.Context, input *ListBac // // Example iterating over at most 3 pages of a ListBackupSelections operation. // pageNum := 0 // err := client.ListBackupSelectionsPages(params, -// func(page *ListBackupSelectionsOutput, lastPage bool) bool { +// func(page *backup.ListBackupSelectionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2865,10 +2865,12 @@ func (c *Backup) ListBackupSelectionsPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListBackupSelectionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListBackupSelectionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2979,7 +2981,7 @@ func (c *Backup) ListBackupVaultsWithContext(ctx aws.Context, input *ListBackupV // // Example iterating over at most 3 pages of a ListBackupVaults operation. // pageNum := 0 // err := client.ListBackupVaultsPages(params, -// func(page *ListBackupVaultsOutput, lastPage bool) bool { +// func(page *backup.ListBackupVaultsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3011,10 +3013,12 @@ func (c *Backup) ListBackupVaultsPagesWithContext(ctx aws.Context, input *ListBa }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListBackupVaultsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListBackupVaultsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3120,7 +3124,7 @@ func (c *Backup) ListProtectedResourcesWithContext(ctx aws.Context, input *ListP // // Example iterating over at most 3 pages of a ListProtectedResources operation. // pageNum := 0 // err := client.ListProtectedResourcesPages(params, -// func(page *ListProtectedResourcesOutput, lastPage bool) bool { +// func(page *backup.ListProtectedResourcesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3152,10 +3156,12 @@ func (c *Backup) ListProtectedResourcesPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListProtectedResourcesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListProtectedResourcesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3266,7 +3272,7 @@ func (c *Backup) ListRecoveryPointsByBackupVaultWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a ListRecoveryPointsByBackupVault operation. // pageNum := 0 // err := client.ListRecoveryPointsByBackupVaultPages(params, -// func(page *ListRecoveryPointsByBackupVaultOutput, lastPage bool) bool { +// func(page *backup.ListRecoveryPointsByBackupVaultOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3298,10 +3304,12 @@ func (c *Backup) ListRecoveryPointsByBackupVaultPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRecoveryPointsByBackupVaultOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRecoveryPointsByBackupVaultOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3412,7 +3420,7 @@ func (c *Backup) ListRecoveryPointsByResourceWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a ListRecoveryPointsByResource operation. // pageNum := 0 // err := client.ListRecoveryPointsByResourcePages(params, -// func(page *ListRecoveryPointsByResourceOutput, lastPage bool) bool { +// func(page *backup.ListRecoveryPointsByResourceOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3444,10 +3452,12 @@ func (c *Backup) ListRecoveryPointsByResourcePagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRecoveryPointsByResourceOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRecoveryPointsByResourceOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3558,7 +3568,7 @@ func (c *Backup) ListRestoreJobsWithContext(ctx aws.Context, input *ListRestoreJ // // Example iterating over at most 3 pages of a ListRestoreJobs operation. // pageNum := 0 // err := client.ListRestoreJobsPages(params, -// func(page *ListRestoreJobsOutput, lastPage bool) bool { +// func(page *backup.ListRestoreJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3590,10 +3600,12 @@ func (c *Backup) ListRestoreJobsPagesWithContext(ctx aws.Context, input *ListRes }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRestoreJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRestoreJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3704,7 +3716,7 @@ func (c *Backup) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts // // Example iterating over at most 3 pages of a ListTags operation. // pageNum := 0 // err := client.ListTagsPages(params, -// func(page *ListTagsOutput, lastPage bool) bool { +// func(page *backup.ListTagsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3736,10 +3748,12 @@ func (c *Backup) ListTagsPagesWithContext(ctx aws.Context, input *ListTagsInput, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4533,10 +4547,10 @@ func (c *Backup) UpdateRecoveryPointLifecycleRequest(input *UpdateRecoveryPointL // according to the lifecycle that you define. // // Backups transitioned to cold storage must be stored in cold storage for a -// minimum of 90 days. Therefore, the “expire after days” setting must be 90 -// days greater than the “transition to cold after days” setting. The “transition -// to cold after days” setting cannot be changed after a backup has been transitioned -// to cold. +// minimum of 90 days. Therefore, the “expire after days” setting must be +// 90 days greater than the “transition to cold after days” setting. The +// “transition to cold after days” setting cannot be changed after a backup +// has been transitioned to cold. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4589,10 +4603,10 @@ func (c *Backup) UpdateRecoveryPointLifecycleWithContext(ctx aws.Context, input // according to the lifecycle that you define. // // Backups transitioned to cold storage must be stored in cold storage for a -// minimum of 90 days. Therefore, the “expire after days” setting must be 90 -// days greater than the “transition to cold after days” setting. The “transition -// to cold after days” setting cannot be changed after a backup has been transitioned -// to cold. +// minimum of 90 days. Therefore, the “expire after days” setting must be +// 90 days greater than the “transition to cold after days” setting. The +// “transition to cold after days” setting cannot be changed after a backup +// has been transitioned to cold. type CalculatedLifecycle struct { _ struct{} `type:"structure"` @@ -6046,10 +6060,10 @@ type DescribeRecoveryPointOutput struct { // according to the lifecycle that you define. // // Backups that are transitioned to cold storage must be stored in cold storage - // for a minimum of 90 days. Therefore, the “expire after days” setting must - // be 90 days greater than the “transition to cold after days” setting. The - // “transition to cold after days” setting cannot be changed after a backup - // has been transitioned to cold. + // for a minimum of 90 days. Therefore, the “expire after days” setting + // must be 90 days greater than the “transition to cold after days” setting. + // The “transition to cold after days” setting cannot be changed after a + // backup has been transitioned to cold. Lifecycle *Lifecycle `type:"structure"` // An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. @@ -9040,10 +9054,10 @@ type RecoveryPointByBackupVault struct { // according to the lifecycle that you define. // // Backups transitioned to cold storage must be stored in cold storage for a - // minimum of 90 days. Therefore, the “expire after days” setting must be 90 - // days greater than the “transition to cold after days” setting. The “transition - // to cold after days” setting cannot be changed after a backup has been transitioned - // to cold. + // minimum of 90 days. Therefore, the “expire after days” setting must be + // 90 days greater than the “transition to cold after days” setting. The + // “transition to cold after days” setting cannot be changed after a backup + // has been transitioned to cold. Lifecycle *Lifecycle `type:"structure"` // An Amazon Resource Name (ARN) that uniquely identifies a recovery point; @@ -9440,10 +9454,10 @@ type Rule struct { // according to the lifecycle that you define. // // Backups transitioned to cold storage must be stored in cold storage for a - // minimum of 90 days. Therefore, the “expire after days” setting must be 90 - // days greater than the “transition to cold after days” setting. The “transition - // to cold after days” setting cannot be changed after a backup has been transitioned - // to cold. + // minimum of 90 days. Therefore, the “expire after days” setting must be + // 90 days greater than the “transition to cold after days” setting. The + // “transition to cold after days” setting cannot be changed after a backup + // has been transitioned to cold. Lifecycle *Lifecycle `type:"structure"` // An array of key-value pair strings that are assigned to resources that are @@ -9546,9 +9560,9 @@ type RuleInput struct { // according to the lifecycle that you define. // // Backups transitioned to cold storage must be stored in cold storage for a - // minimum of 90 days. Therefore, the “expire after days” setting must be 90 - // days greater than the “transition to cold after days”. The “transition to - // cold after days” setting cannot be changed after a backup has been transitioned + // minimum of 90 days. Therefore, the “expire after days” setting must be + // 90 days greater than the “transition to cold after days”. The “transition + // to cold after days” setting cannot be changed after a backup has been transitioned // to cold. Lifecycle *Lifecycle `type:"structure"` @@ -9838,10 +9852,10 @@ type StartBackupJobInput struct { // according to the lifecycle that you define. // // Backups transitioned to cold storage must be stored in cold storage for a - // minimum of 90 days. Therefore, the “expire after days” setting must be 90 - // days greater than the “transition to cold after days” setting. The “transition - // to cold after days” setting cannot be changed after a backup has been transitioned - // to cold. + // minimum of 90 days. Therefore, the “expire after days” setting must be + // 90 days greater than the “transition to cold after days” setting. The + // “transition to cold after days” setting cannot be changed after a backup + // has been transitioned to cold. Lifecycle *Lifecycle `type:"structure"` // To help organize your resources, you can assign your own metadata to the @@ -10431,10 +10445,10 @@ type UpdateRecoveryPointLifecycleInput struct { // according to the lifecycle that you define. // // Backups transitioned to cold storage must be stored in cold storage for a - // minimum of 90 days. Therefore, the “expire after days” setting must be 90 - // days greater than the “transition to cold after days” setting. The “transition - // to cold after days” setting cannot be changed after a backup has been transitioned - // to cold. + // minimum of 90 days. Therefore, the “expire after days” setting must be + // 90 days greater than the “transition to cold after days” setting. The + // “transition to cold after days” setting cannot be changed after a backup + // has been transitioned to cold. Lifecycle *Lifecycle `type:"structure"` // An Amazon Resource Name (ARN) that uniquely identifies a recovery point; @@ -10509,10 +10523,10 @@ type UpdateRecoveryPointLifecycleOutput struct { // according to the lifecycle that you define. // // Backups transitioned to cold storage must be stored in cold storage for a - // minimum of 90 days. Therefore, the “expire after days” setting must be 90 - // days greater than the “transition to cold after days” setting. The “transition - // to cold after days” setting cannot be changed after a backup has been transitioned - // to cold. + // minimum of 90 days. Therefore, the “expire after days” setting must be + // 90 days greater than the “transition to cold after days” setting. The + // “transition to cold after days” setting cannot be changed after a backup + // has been transitioned to cold. Lifecycle *Lifecycle `type:"structure"` // An Amazon Resource Name (ARN) that uniquely identifies a recovery point; diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/backup/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/backup/service.go index 2f60e1c1f54..873f24a9f9d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/backup/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/backup/service.go @@ -46,11 +46,11 @@ const ( // svc := backup.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Backup { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Backup { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Backup { svc := &Backup{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-15", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/batch/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/batch/api.go index 3da099a41ae..27943044c88 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/batch/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/batch/api.go @@ -611,6 +611,12 @@ func (c *Batch) DescribeComputeEnvironmentsRequest(input *DescribeComputeEnviron Name: opDescribeComputeEnvironments, HTTPMethod: "POST", HTTPPath: "/v1/describecomputeenvironments", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -668,6 +674,58 @@ func (c *Batch) DescribeComputeEnvironmentsWithContext(ctx aws.Context, input *D return out, req.Send() } +// DescribeComputeEnvironmentsPages iterates over the pages of a DescribeComputeEnvironments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeComputeEnvironments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeComputeEnvironments operation. +// pageNum := 0 +// err := client.DescribeComputeEnvironmentsPages(params, +// func(page *batch.DescribeComputeEnvironmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Batch) DescribeComputeEnvironmentsPages(input *DescribeComputeEnvironmentsInput, fn func(*DescribeComputeEnvironmentsOutput, bool) bool) error { + return c.DescribeComputeEnvironmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeComputeEnvironmentsPagesWithContext same as DescribeComputeEnvironmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) DescribeComputeEnvironmentsPagesWithContext(ctx aws.Context, input *DescribeComputeEnvironmentsInput, fn func(*DescribeComputeEnvironmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeComputeEnvironmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeComputeEnvironmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeComputeEnvironmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeJobDefinitions = "DescribeJobDefinitions" // DescribeJobDefinitionsRequest generates a "aws/request.Request" representing the @@ -699,6 +757,12 @@ func (c *Batch) DescribeJobDefinitionsRequest(input *DescribeJobDefinitionsInput Name: opDescribeJobDefinitions, HTTPMethod: "POST", HTTPPath: "/v1/describejobdefinitions", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -753,6 +817,58 @@ func (c *Batch) DescribeJobDefinitionsWithContext(ctx aws.Context, input *Descri return out, req.Send() } +// DescribeJobDefinitionsPages iterates over the pages of a DescribeJobDefinitions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeJobDefinitions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeJobDefinitions operation. +// pageNum := 0 +// err := client.DescribeJobDefinitionsPages(params, +// func(page *batch.DescribeJobDefinitionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Batch) DescribeJobDefinitionsPages(input *DescribeJobDefinitionsInput, fn func(*DescribeJobDefinitionsOutput, bool) bool) error { + return c.DescribeJobDefinitionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeJobDefinitionsPagesWithContext same as DescribeJobDefinitionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) DescribeJobDefinitionsPagesWithContext(ctx aws.Context, input *DescribeJobDefinitionsInput, fn func(*DescribeJobDefinitionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeJobDefinitionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeJobDefinitionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeJobDefinitionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeJobQueues = "DescribeJobQueues" // DescribeJobQueuesRequest generates a "aws/request.Request" representing the @@ -784,6 +900,12 @@ func (c *Batch) DescribeJobQueuesRequest(input *DescribeJobQueuesInput) (req *re Name: opDescribeJobQueues, HTTPMethod: "POST", HTTPPath: "/v1/describejobqueues", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -837,6 +959,58 @@ func (c *Batch) DescribeJobQueuesWithContext(ctx aws.Context, input *DescribeJob return out, req.Send() } +// DescribeJobQueuesPages iterates over the pages of a DescribeJobQueues operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeJobQueues method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeJobQueues operation. +// pageNum := 0 +// err := client.DescribeJobQueuesPages(params, +// func(page *batch.DescribeJobQueuesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Batch) DescribeJobQueuesPages(input *DescribeJobQueuesInput, fn func(*DescribeJobQueuesOutput, bool) bool) error { + return c.DescribeJobQueuesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeJobQueuesPagesWithContext same as DescribeJobQueuesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) DescribeJobQueuesPagesWithContext(ctx aws.Context, input *DescribeJobQueuesInput, fn func(*DescribeJobQueuesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeJobQueuesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeJobQueuesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeJobQueuesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeJobs = "DescribeJobs" // DescribeJobsRequest generates a "aws/request.Request" representing the @@ -952,6 +1126,12 @@ func (c *Batch) ListJobsRequest(input *ListJobsInput) (req *request.Request, out Name: opListJobs, HTTPMethod: "POST", HTTPPath: "/v1/listjobs", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -1016,6 +1196,58 @@ func (c *Batch) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts return out, req.Send() } +// ListJobsPages iterates over the pages of a ListJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobs operation. +// pageNum := 0 +// err := client.ListJobsPages(params, +// func(page *batch.ListJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Batch) ListJobsPages(input *ListJobsInput, fn func(*ListJobsOutput, bool) bool) error { + return c.ListJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListJobsPagesWithContext same as ListJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, fn func(*ListJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opRegisterJobDefinition = "RegisterJobDefinition" // RegisterJobDefinitionRequest generates a "aws/request.Request" representing the @@ -1917,19 +2149,35 @@ func (s *ComputeEnvironmentOrder) SetOrder(v int64) *ComputeEnvironmentOrder { type ComputeResource struct { _ struct{} `type:"structure"` + // The allocation strategy to use for the compute resource in case not enough + // instances of the best fitting instance type can be allocated. This could + // be due to availability of the instance type in the region or Amazon EC2 service + // limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html). + // If this is not specified, the default is BEST_FIT, which will use only the + // best fitting instance type, waiting for additional capacity if it's not available. + // This allocation strategy keeps costs lower but can limit scaling. BEST_FIT_PROGRESSIVE + // will select an additional instance type that is large enough to meet the + // requirements of the jobs in the queue, with a preference for an instance + // type with a lower cost. SPOT_CAPACITY_OPTIMIZED is only available for Spot + // Instance compute resources and will select an additional instance type that + // is large enough to meet the requirements of the jobs in the queue, with a + // preference for an instance type that is less likely to be interrupted. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"CRAllocationStrategy"` + // The maximum percentage that a Spot Instance price can be when compared with // the On-Demand price for that instance type before instances are launched. // For example, if your maximum percentage is 20%, then the Spot price must - // be below 20% of the current On-Demand price for that EC2 instance. You always - // pay the lowest (market) price and never more than your maximum percentage. - // If you leave this field empty, the default value is 100% of the On-Demand - // price. + // be below 20% of the current On-Demand price for that Amazon EC2 instance. + // You always pay the lowest (market) price and never more than your maximum + // percentage. If you leave this field empty, the default value is 100% of the + // On-Demand price. BidPercentage *int64 `locationName:"bidPercentage" type:"integer"` - // The desired number of EC2 vCPUS in the compute environment. + // The desired number of Amazon EC2 vCPUS in the compute environment. DesiredvCpus *int64 `locationName:"desiredvCpus" type:"integer"` - // The EC2 key pair that is used for instances launched in the compute environment. + // The Amazon EC2 key pair that is used for instances launched in the compute + // environment. Ec2KeyPair *string `locationName:"ec2KeyPair" type:"string"` // The Amazon Machine Image (AMI) ID used for instances launched in the compute @@ -1938,16 +2186,16 @@ type ComputeResource struct { // The Amazon ECS instance profile applied to Amazon EC2 instances in a compute // environment. You can specify the short name or full Amazon Resource Name - // (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam:::instance-profile/ecsInstanceRole. - // For more information, see Amazon ECS Instance Role (https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html) + // (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam:::instance-profile/ecsInstanceRole + // . For more information, see Amazon ECS Instance Role (https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html) // in the AWS Batch User Guide. // // InstanceRole is a required field InstanceRole *string `locationName:"instanceRole" type:"string" required:"true"` // The instances types that may be launched. You can specify instance families - // to launch any instance type within those families (for example, c4 or p3), - // or you can specify specific sizes within a family (such as c4.8xlarge). You + // to launch any instance type within those families (for example, c5 or p3), + // or you can specify specific sizes within a family (such as c5.8xlarge). You // can also choose optimal to pick instance types (from the C, M, and R instance // families) on the fly that match the demand of your job queues. // @@ -1962,13 +2210,13 @@ type ComputeResource struct { // in the AWS Batch User Guide. LaunchTemplate *LaunchTemplateSpecification `locationName:"launchTemplate" type:"structure"` - // The maximum number of EC2 vCPUs that an environment can reach. + // The maximum number of Amazon EC2 vCPUs that an environment can reach. // // MaxvCpus is a required field MaxvCpus *int64 `locationName:"maxvCpus" type:"integer" required:"true"` - // The minimum number of EC2 vCPUs that an environment should maintain (even - // if the compute environment is DISABLED). + // The minimum number of Amazon EC2 vCPUs that an environment should maintain + // (even if the compute environment is DISABLED). // // MinvCpus is a required field MinvCpus *int64 `locationName:"minvCpus" type:"integer" required:"true"` @@ -1982,8 +2230,11 @@ type ComputeResource struct { // in the Amazon EC2 User Guide for Linux Instances. PlacementGroup *string `locationName:"placementGroup" type:"string"` - // The EC2 security group that is associated with instances launched in the - // compute environment. + // The Amazon EC2 security groups associated with instances launched in the + // compute environment. One or more security groups must be specified, either + // in securityGroupIds or using a launch template referenced in launchTemplate. + // If security groups are specified using both securityGroupIds and launchTemplate, + // the values in securityGroupIds will be used. SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list"` // The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied @@ -1992,15 +2243,17 @@ type ComputeResource struct { // in the AWS Batch User Guide. SpotIamFleetRole *string `locationName:"spotIamFleetRole" type:"string"` - // The VPC subnets into which the compute resources are launched. + // The VPC subnets into which the compute resources are launched. For more information, + // see VPCs and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + // in the Amazon VPC User Guide. // // Subnets is a required field Subnets []*string `locationName:"subnets" type:"list" required:"true"` // Key-value pair tags to be applied to resources that are launched in the compute // environment. For AWS Batch, these take the form of "String1": "String2", - // where String1 is the tag key and String2 is the tag value—for example, { - // "Name": "AWS Batch Instance - C4OnDemand" }. + // where String1 is the tag key and String2 is the tag value—for example, + // { "Name": "AWS Batch Instance - C4OnDemand" }. Tags map[string]*string `locationName:"tags" type:"map"` // The type of compute environment: EC2 or SPOT. @@ -2047,6 +2300,12 @@ func (s *ComputeResource) Validate() error { return nil } +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *ComputeResource) SetAllocationStrategy(v string) *ComputeResource { + s.AllocationStrategy = &v + return s +} + // SetBidPercentage sets the BidPercentage field's value. func (s *ComputeResource) SetBidPercentage(v int64) *ComputeResource { s.BidPercentage = &v @@ -2142,13 +2401,13 @@ func (s *ComputeResource) SetType(v string) *ComputeResource { type ComputeResourceUpdate struct { _ struct{} `type:"structure"` - // The desired number of EC2 vCPUS in the compute environment. + // The desired number of Amazon EC2 vCPUS in the compute environment. DesiredvCpus *int64 `locationName:"desiredvCpus" type:"integer"` - // The maximum number of EC2 vCPUs that an environment can reach. + // The maximum number of Amazon EC2 vCPUs that an environment can reach. MaxvCpus *int64 `locationName:"maxvCpus" type:"integer"` - // The minimum number of EC2 vCPUs that an environment should maintain. + // The minimum number of Amazon EC2 vCPUs that an environment should maintain. MinvCpus *int64 `locationName:"minvCpus" type:"integer"` } @@ -2210,6 +2469,10 @@ type ContainerDetail struct { // The Amazon Resource Name (ARN) associated with the job upon execution. JobRoleArn *string `locationName:"jobRoleArn" type:"string"` + // Linux-specific modifications that are applied to the container, such as details + // for device mappings. + LinuxParameters *LinuxParameters `locationName:"linuxParameters" type:"structure"` + // The name of the CloudWatch Logs log stream associated with the container. // The log group for AWS Batch jobs is /aws/batch/job. Each container attempt // receives a log stream name when they reach the RUNNING status. @@ -2310,6 +2573,12 @@ func (s *ContainerDetail) SetJobRoleArn(v string) *ContainerDetail { return s } +// SetLinuxParameters sets the LinuxParameters field's value. +func (s *ContainerDetail) SetLinuxParameters(v *LinuxParameters) *ContainerDetail { + s.LinuxParameters = v + return s +} + // SetLogStreamName sets the LogStreamName field's value. func (s *ContainerDetail) SetLogStreamName(v string) *ContainerDetail { s.LogStreamName = &v @@ -2515,7 +2784,7 @@ type ContainerProperties struct { // The image used to start a container. This string is passed directly to the // Docker daemon. Images in the Docker Hub registry are available by default. - // Other repositories are specified with repository-url/image:tag. Up to 255 + // Other repositories are specified with repository-url/image:tag . Up to 255 // letters (uppercase and lowercase), numbers, hyphens, underscores, colons, // periods, forward slashes, and number signs are allowed. This parameter maps // to Image in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) @@ -2544,6 +2813,10 @@ type ContainerProperties struct { // for AWS permissions. JobRoleArn *string `locationName:"jobRoleArn" type:"string"` + // Linux-specific modifications that are applied to the container, such as details + // for device mappings. + LinuxParameters *LinuxParameters `locationName:"linuxParameters" type:"structure"` + // The hard limit (in MiB) of memory to present to the container. If your container // attempts to exceed the memory specified here, the container is killed. This // parameter maps to Memory in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) @@ -2618,6 +2891,11 @@ func (s ContainerProperties) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ContainerProperties) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ContainerProperties"} + if s.LinuxParameters != nil { + if err := s.LinuxParameters.Validate(); err != nil { + invalidParams.AddNested("LinuxParameters", err.(request.ErrInvalidParams)) + } + } if s.ResourceRequirements != nil { for i, v := range s.ResourceRequirements { if v == nil { @@ -2675,6 +2953,12 @@ func (s *ContainerProperties) SetJobRoleArn(v string) *ContainerProperties { return s } +// SetLinuxParameters sets the LinuxParameters field's value. +func (s *ContainerProperties) SetLinuxParameters(v *LinuxParameters) *ContainerProperties { + s.LinuxParameters = v + return s +} + // SetMemory sets the Memory field's value. func (s *ContainerProperties) SetMemory(v int64) *ContainerProperties { s.Memory = &v @@ -3540,6 +3824,65 @@ func (s *DescribeJobsOutput) SetJobs(v []*JobDetail) *DescribeJobsOutput { return s } +// An object representing a container instance host device. +type Device struct { + _ struct{} `type:"structure"` + + // The path inside the container at which to expose the host device. By default + // the hostPath value is used. + ContainerPath *string `locationName:"containerPath" type:"string"` + + // The path for the device on the host container instance. + // + // HostPath is a required field + HostPath *string `locationName:"hostPath" type:"string" required:"true"` + + // The explicit permissions to provide to the container for the device. By default, + // the container has permissions for read, write, and mknod for the device. + Permissions []*string `locationName:"permissions" type:"list"` +} + +// String returns the string representation +func (s Device) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Device) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Device) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Device"} + if s.HostPath == nil { + invalidParams.Add(request.NewErrParamRequired("HostPath")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerPath sets the ContainerPath field's value. +func (s *Device) SetContainerPath(v string) *Device { + s.ContainerPath = &v + return s +} + +// SetHostPath sets the HostPath field's value. +func (s *Device) SetHostPath(v string) *Device { + s.HostPath = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *Device) SetPermissions(v []*string) *Device { + s.Permissions = v + return s +} + // Determine whether your data volume persists on the host container instance // and where it is stored. If this parameter is empty, then the Docker daemon // assigns a host path for your data volume, but the data is not guaranteed @@ -3794,8 +4137,9 @@ type JobDetail struct { // The current status for the job. // - // If your jobs do not progress to STARTING, see Jobs Stuck in (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable)RUNNABLE - // Status in the troubleshooting section of the AWS Batch User Guide. + // If your jobs do not progress to STARTING, see Jobs Stuck in RUNNABLE Status + // (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable) + // in the troubleshooting section of the AWS Batch User Guide. // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"` @@ -4244,6 +4588,54 @@ func (s *LaunchTemplateSpecification) SetVersion(v string) *LaunchTemplateSpecif return s } +// Linux-specific modifications that are applied to the container, such as details +// for device mappings. +type LinuxParameters struct { + _ struct{} `type:"structure"` + + // Any host devices to expose to the container. This parameter maps to Devices + // in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) + // and the --device option to docker run (https://docs.docker.com/engine/reference/run/). + Devices []*Device `locationName:"devices" type:"list"` +} + +// String returns the string representation +func (s LinuxParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LinuxParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LinuxParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LinuxParameters"} + if s.Devices != nil { + for i, v := range s.Devices { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Devices", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDevices sets the Devices field's value. +func (s *LinuxParameters) SetDevices(v []*Device) *LinuxParameters { + s.Devices = v + return s +} + type ListJobsInput struct { _ struct{} `type:"structure"` @@ -5704,6 +6096,17 @@ const ( CETypeUnmanaged = "UNMANAGED" ) +const ( + // CRAllocationStrategyBestFit is a CRAllocationStrategy enum value + CRAllocationStrategyBestFit = "BEST_FIT" + + // CRAllocationStrategyBestFitProgressive is a CRAllocationStrategy enum value + CRAllocationStrategyBestFitProgressive = "BEST_FIT_PROGRESSIVE" + + // CRAllocationStrategySpotCapacityOptimized is a CRAllocationStrategy enum value + CRAllocationStrategySpotCapacityOptimized = "SPOT_CAPACITY_OPTIMIZED" +) + const ( // CRTypeEc2 is a CRType enum value CRTypeEc2 = "EC2" @@ -5712,6 +6115,17 @@ const ( CRTypeSpot = "SPOT" ) +const ( + // DeviceCgroupPermissionRead is a DeviceCgroupPermission enum value + DeviceCgroupPermissionRead = "READ" + + // DeviceCgroupPermissionWrite is a DeviceCgroupPermission enum value + DeviceCgroupPermissionWrite = "WRITE" + + // DeviceCgroupPermissionMknod is a DeviceCgroupPermission enum value + DeviceCgroupPermissionMknod = "MKNOD" +) + const ( // JQStateEnabled is a JQState enum value JQStateEnabled = "ENABLED" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/batch/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/batch/service.go index 6b10821b33f..e4f63ee8dc3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/batch/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/batch/service.go @@ -46,11 +46,11 @@ const ( // svc := batch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Batch { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Batch { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Batch { svc := &Batch{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-08-10", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go index 7bff348f406..575051caca2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go @@ -58,6 +58,11 @@ func (c *Budgets) CreateBudgetRequest(input *CreateBudgetInput) (req *request.Re // // Creates a budget and, if included, notifications and subscribers. // +// Only one of BudgetLimit or PlannedBudgetLimits can be present in the syntax +// at one time. Use the syntax that matches your case. The Request Syntax section +// shows the BudgetLimit syntax. For PlannedBudgetLimits, see the Examples (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_CreateBudget.html#API_CreateBudget_Examples) +// section. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -80,6 +85,9 @@ func (c *Budgets) CreateBudgetRequest(input *CreateBudgetInput) (req *request.Re // * ErrCodeDuplicateRecordException "DuplicateRecordException" // The budget name already exists. Budget names must be unique within an account. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) CreateBudget(input *CreateBudgetInput) (*CreateBudgetOutput, error) { req, out := c.CreateBudgetRequest(input) return out, req.Send() @@ -172,6 +180,9 @@ func (c *Budgets) CreateNotificationRequest(input *CreateNotificationInput) (req // * ErrCodeDuplicateRecordException "DuplicateRecordException" // The budget name already exists. Budget names must be unique within an account. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) CreateNotification(input *CreateNotificationInput) (*CreateNotificationOutput, error) { req, out := c.CreateNotificationRequest(input) return out, req.Send() @@ -264,6 +275,9 @@ func (c *Budgets) CreateSubscriberRequest(input *CreateSubscriberInput) (req *re // * ErrCodeNotFoundException "NotFoundException" // We can’t locate the resource that you specified. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) CreateSubscriber(input *CreateSubscriberInput) (*CreateSubscriberOutput, error) { req, out := c.CreateSubscriberRequest(input) return out, req.Send() @@ -352,6 +366,9 @@ func (c *Budgets) DeleteBudgetRequest(input *DeleteBudgetInput) (req *request.Re // * ErrCodeNotFoundException "NotFoundException" // We can’t locate the resource that you specified. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) DeleteBudget(input *DeleteBudgetInput) (*DeleteBudgetOutput, error) { req, out := c.DeleteBudgetRequest(input) return out, req.Send() @@ -440,6 +457,9 @@ func (c *Budgets) DeleteNotificationRequest(input *DeleteNotificationInput) (req // * ErrCodeNotFoundException "NotFoundException" // We can’t locate the resource that you specified. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) DeleteNotification(input *DeleteNotificationInput) (*DeleteNotificationOutput, error) { req, out := c.DeleteNotificationRequest(input) return out, req.Send() @@ -527,6 +547,9 @@ func (c *Budgets) DeleteSubscriberRequest(input *DeleteSubscriberInput) (req *re // * ErrCodeNotFoundException "NotFoundException" // We can’t locate the resource that you specified. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) DeleteSubscriber(input *DeleteSubscriberInput) (*DeleteSubscriberOutput, error) { req, out := c.DeleteSubscriberRequest(input) return out, req.Send() @@ -592,6 +615,10 @@ func (c *Budgets) DescribeBudgetRequest(input *DescribeBudgetInput) (req *reques // // Describes a budget. // +// The Request Syntax section shows the BudgetLimit syntax. For PlannedBudgetLimits, +// see the Examples (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_DescribeBudget.html#API_DescribeBudget_Examples) +// section. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -611,6 +638,9 @@ func (c *Budgets) DescribeBudgetRequest(input *DescribeBudgetInput) (req *reques // * ErrCodeNotFoundException "NotFoundException" // We can’t locate the resource that you specified. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) DescribeBudget(input *DescribeBudgetInput) (*DescribeBudgetOutput, error) { req, out := c.DescribeBudgetRequest(input) return out, req.Send() @@ -702,6 +732,9 @@ func (c *Budgets) DescribeBudgetPerformanceHistoryRequest(input *DescribeBudgetP // * ErrCodeExpiredNextTokenException "ExpiredNextTokenException" // The pagination token expired. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) DescribeBudgetPerformanceHistory(input *DescribeBudgetPerformanceHistoryInput) (*DescribeBudgetPerformanceHistoryOutput, error) { req, out := c.DescribeBudgetPerformanceHistoryRequest(input) return out, req.Send() @@ -767,6 +800,10 @@ func (c *Budgets) DescribeBudgetsRequest(input *DescribeBudgetsInput) (req *requ // // Lists the budgets that are associated with an account. // +// The Request Syntax section shows the BudgetLimit syntax. For PlannedBudgetLimits, +// see the Examples (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_DescribeBudgets.html#API_DescribeBudgets_Examples) +// section. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -792,6 +829,9 @@ func (c *Budgets) DescribeBudgetsRequest(input *DescribeBudgetsInput) (req *requ // * ErrCodeExpiredNextTokenException "ExpiredNextTokenException" // The pagination token expired. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) DescribeBudgets(input *DescribeBudgetsInput) (*DescribeBudgetsOutput, error) { req, out := c.DescribeBudgetsRequest(input) return out, req.Send() @@ -882,6 +922,9 @@ func (c *Budgets) DescribeNotificationsForBudgetRequest(input *DescribeNotificat // * ErrCodeExpiredNextTokenException "ExpiredNextTokenException" // The pagination token expired. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) DescribeNotificationsForBudget(input *DescribeNotificationsForBudgetInput) (*DescribeNotificationsForBudgetOutput, error) { req, out := c.DescribeNotificationsForBudgetRequest(input) return out, req.Send() @@ -972,6 +1015,9 @@ func (c *Budgets) DescribeSubscribersForNotificationRequest(input *DescribeSubsc // * ErrCodeExpiredNextTokenException "ExpiredNextTokenException" // The pagination token expired. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) DescribeSubscribersForNotification(input *DescribeSubscribersForNotificationInput) (*DescribeSubscribersForNotificationOutput, error) { req, out := c.DescribeSubscribersForNotificationRequest(input) return out, req.Send() @@ -1040,6 +1086,11 @@ func (c *Budgets) UpdateBudgetRequest(input *UpdateBudgetInput) (req *request.Re // and the calculatedSpend. When you modify a budget, the calculatedSpend drops // to zero until AWS has new usage data to use for forecasting. // +// Only one of BudgetLimit or PlannedBudgetLimits can be present in the syntax +// at one time. Use the syntax that matches your case. The Request Syntax section +// shows the BudgetLimit syntax. For PlannedBudgetLimits, see the Examples (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_UpdateBudget.html#API_UpdateBudget_Examples) +// section. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1059,6 +1110,9 @@ func (c *Budgets) UpdateBudgetRequest(input *UpdateBudgetInput) (req *request.Re // * ErrCodeNotFoundException "NotFoundException" // We can’t locate the resource that you specified. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) UpdateBudget(input *UpdateBudgetInput) (*UpdateBudgetOutput, error) { req, out := c.UpdateBudgetRequest(input) return out, req.Send() @@ -1147,6 +1201,9 @@ func (c *Budgets) UpdateNotificationRequest(input *UpdateNotificationInput) (req // * ErrCodeDuplicateRecordException "DuplicateRecordException" // The budget name already exists. Budget names must be unique within an account. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) UpdateNotification(input *UpdateNotificationInput) (*UpdateNotificationOutput, error) { req, out := c.UpdateNotificationRequest(input) return out, req.Send() @@ -1235,6 +1292,9 @@ func (c *Budgets) UpdateSubscriberRequest(input *UpdateSubscriberInput) (req *re // * ErrCodeDuplicateRecordException "DuplicateRecordException" // The budget name already exists. Budget names must be unique within an account. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You are not authorized to use this operation with the given parameters. +// func (c *Budgets) UpdateSubscriber(input *UpdateSubscriberInput) (*UpdateSubscriberOutput, error) { req, out := c.UpdateSubscriberRequest(input) return out, req.Send() @@ -1266,21 +1326,24 @@ func (c *Budgets) UpdateSubscriberWithContext(ctx aws.Context, input *UpdateSubs type Budget struct { _ struct{} `type:"structure"` - // The total amount of cost, usage, RI utilization, or RI coverage that you - // want to track with your budget. + // The total amount of cost, usage, RI utilization, RI coverage, Savings Plans + // utilization, or Savings Plans coverage that you want to track with your budget. // - // BudgetLimit is required for cost or usage budgets, but optional for RI utilization - // or coverage budgets. RI utilization or coverage budgets default to 100, which - // is the only valid value for RI utilization or coverage budgets. + // BudgetLimit is required for cost or usage budgets, but optional for RI or + // Savings Plans utilization or coverage budgets. RI and Savings Plans utilization + // or coverage budgets default to 100, which is the only valid value for RI + // or Savings Plans utilization or coverage budgets. You can't use BudgetLimit + // with PlannedBudgetLimits for CreateBudget and UpdateBudget actions. BudgetLimit *Spend `type:"structure"` - // The name of a budget. The name must be unique within accounts. The : and + // The name of a budget. The name must be unique within an account. The : and // \ characters aren't allowed in BudgetName. // // BudgetName is a required field BudgetName *string `min:"1" type:"string" required:"true"` - // Whether this budget tracks monetary costs, usage, RI utilization, or RI coverage. + // Whether this budget tracks costs, usage, RI utilization, RI coverage, Savings + // Plans utilization, or Savings Plans coverage. // // BudgetType is a required field BudgetType *string `type:"string" required:"true" enum:"BudgetType"` @@ -1288,7 +1351,7 @@ type Budget struct { // The actual and forecasted cost or usage that the budget tracks. CalculatedSpend *CalculatedSpend `type:"structure"` - // The cost filters, such as service or region, that are applied to a budget. + // The cost filters, such as service or tag, that are applied to a budget. // // AWS Budgets supports the following services as a filter for RI budgets: // @@ -1305,12 +1368,45 @@ type Budget struct { // The types of costs that are included in this COST budget. // - // USAGE, RI_UTILIZATION, and RI_COVERAGE budgets do not have CostTypes. + // USAGE, RI_UTILIZATION, RI_COVERAGE, Savings_Plans_Utilization, and Savings_Plans_Coverage + // budgets do not have CostTypes. CostTypes *CostTypes `type:"structure"` // The last time that you updated this budget. LastUpdatedTime *time.Time `type:"timestamp"` + // A map containing multiple BudgetLimit, including current or future limits. + // + // PlannedBudgetLimits is available for cost or usage budget and supports monthly + // and quarterly TimeUnit. + // + // For monthly budgets, provide 12 months of PlannedBudgetLimits values. This + // must start from the current month and include the next 11 months. The key + // is the start of the month, UTC in epoch seconds. + // + // For quarterly budgets, provide 4 quarters of PlannedBudgetLimits value entries + // in standard calendar quarter increments. This must start from the current + // quarter and include the next 3 quarters. The key is the start of the quarter, + // UTC in epoch seconds. + // + // If the planned budget expires before 12 months for monthly or 4 quarters + // for quarterly, provide the PlannedBudgetLimits values only for the remaining + // periods. + // + // If the budget begins at a date in the future, provide PlannedBudgetLimits + // values from the start date of the budget. + // + // After all of the BudgetLimit values in PlannedBudgetLimits are used, the + // budget continues to use the last limit as the BudgetLimit. At that point, + // the planned budget provides the same experience as a fixed budget. + // + // DescribeBudget and DescribeBudgets response along with PlannedBudgetLimits + // will also contain BudgetLimit representing the current month or quarter limit + // present in PlannedBudgetLimits. This only applies to budgets created with + // PlannedBudgetLimits. Budgets created without PlannedBudgetLimits will only + // contain BudgetLimit, and no PlannedBudgetLimits. + PlannedBudgetLimits map[string]*Spend `type:"map"` + // The period of time that is covered by a budget. The period has a start date // and an end date. The start date must come before the end date. The end date // must come before 06/15/87 00:00 UTC. @@ -1331,7 +1427,8 @@ type Budget struct { TimePeriod *TimePeriod `type:"structure"` // The length of time until a budget resets the actual and forecasted spend. - // DAILY is available only for RI_UTILIZATION and RI_COVERAGE budgets. + // DAILY is available only for RI_UTILIZATION, RI_COVERAGE, Savings_Plans_Utilization, + // and Savings_Plans_Coverage budgets. // // TimeUnit is a required field TimeUnit *string `type:"string" required:"true" enum:"TimeUnit"` @@ -1372,6 +1469,16 @@ func (s *Budget) Validate() error { invalidParams.AddNested("CalculatedSpend", err.(request.ErrInvalidParams)) } } + if s.PlannedBudgetLimits != nil { + for i, v := range s.PlannedBudgetLimits { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PlannedBudgetLimits", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1421,6 +1528,12 @@ func (s *Budget) SetLastUpdatedTime(v time.Time) *Budget { return s } +// SetPlannedBudgetLimits sets the PlannedBudgetLimits field's value. +func (s *Budget) SetPlannedBudgetLimits(v map[string]*Spend) *Budget { + s.PlannedBudgetLimits = v + return s +} + // SetTimePeriod sets the TimePeriod field's value. func (s *Budget) SetTimePeriod(v *TimePeriod) *Budget { s.TimePeriod = v @@ -3195,6 +3308,8 @@ type Subscriber struct { // The address that AWS sends budget notifications to, either an SNS topic or // an email. // + // When you create a subscriber, the value of Address can't contain line breaks. + // // Address is a required field Address *string `min:"1" type:"string" required:"true" sensitive:"true"` @@ -3629,6 +3744,12 @@ const ( // BudgetTypeRiCoverage is a BudgetType enum value BudgetTypeRiCoverage = "RI_COVERAGE" + + // BudgetTypeSavingsPlansUtilization is a BudgetType enum value + BudgetTypeSavingsPlansUtilization = "SAVINGS_PLANS_UTILIZATION" + + // BudgetTypeSavingsPlansCoverage is a BudgetType enum value + BudgetTypeSavingsPlansCoverage = "SAVINGS_PLANS_COVERAGE" ) // The comparison operator of a notification. Currently the service supports diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go index 8737c2daa53..a67e8ecc41e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go @@ -4,6 +4,12 @@ package budgets const ( + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You are not authorized to use this operation with the given parameters. + ErrCodeAccessDeniedException = "AccessDeniedException" + // ErrCodeCreationLimitExceededException for service response error code // "CreationLimitExceededException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go index dc10c2aaa1b..3d72b59e981 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go @@ -46,11 +46,11 @@ const ( // svc := budgets.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Budgets { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Budgets { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Budgets { svc := &Budgets{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-10-20", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go index 9b1020df463..299883bccd3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go @@ -520,7 +520,7 @@ func (c *Cloud9) DescribeEnvironmentMembershipsWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a DescribeEnvironmentMemberships operation. // pageNum := 0 // err := client.DescribeEnvironmentMembershipsPages(params, -// func(page *DescribeEnvironmentMembershipsOutput, lastPage bool) bool { +// func(page *cloud9.DescribeEnvironmentMembershipsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -552,10 +552,12 @@ func (c *Cloud9) DescribeEnvironmentMembershipsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEnvironmentMembershipsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEnvironmentMembershipsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -867,7 +869,7 @@ func (c *Cloud9) ListEnvironmentsWithContext(ctx aws.Context, input *ListEnviron // // Example iterating over at most 3 pages of a ListEnvironments operation. // pageNum := 0 // err := client.ListEnvironmentsPages(params, -// func(page *ListEnvironmentsOutput, lastPage bool) bool { +// func(page *cloud9.ListEnvironmentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -899,10 +901,12 @@ func (c *Cloud9) ListEnvironmentsPagesWithContext(ctx aws.Context, input *ListEn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEnvironmentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListEnvironmentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1810,11 +1814,15 @@ type EnvironmentLifecycle struct { // The current creation or deletion lifecycle state of the environment. // + // * CREATING: The environment is in the process of being created. + // // * CREATED: The environment was successfully created. // - // * DELETE_FAILED: The environment failed to delete. + // * CREATE_FAILED: The environment failed to be created. // // * DELETING: The environment is in the process of being deleted. + // + // * DELETE_FAILED: The environment failed to delete. Status *string `locationName:"status" type:"string" enum:"EnvironmentLifecycleStatus"` } @@ -2157,9 +2165,15 @@ func (s UpdateEnvironmentOutput) GoString() string { } const ( + // EnvironmentLifecycleStatusCreating is a EnvironmentLifecycleStatus enum value + EnvironmentLifecycleStatusCreating = "CREATING" + // EnvironmentLifecycleStatusCreated is a EnvironmentLifecycleStatus enum value EnvironmentLifecycleStatusCreated = "CREATED" + // EnvironmentLifecycleStatusCreateFailed is a EnvironmentLifecycleStatus enum value + EnvironmentLifecycleStatusCreateFailed = "CREATE_FAILED" + // EnvironmentLifecycleStatusDeleting is a EnvironmentLifecycleStatus enum value EnvironmentLifecycleStatusDeleting = "DELETING" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go index 8b9d30d5440..9f6d2871cfc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go @@ -46,11 +46,11 @@ const ( // svc := cloud9.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Cloud9 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Cloud9 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Cloud9 { svc := &Cloud9{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-23", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go index 212a58946fe..53df8f6eb52 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go @@ -143,7 +143,7 @@ func (c *CloudFormation) ContinueUpdateRollbackRequest(input *ContinueUpdateRoll // // For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues // rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause -// of the failure, you can manually fix the error (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-update-rollback-failed) +// of the failure, you can manually fix the error (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-update-rollback-failed) // and continue the rollback. By continuing the rollback, you can return your // stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try // to update the stack again. @@ -245,10 +245,11 @@ func (c *CloudFormation) CreateChangeSetRequest(input *CreateChangeSetInput) (re // // To create a change set for a stack that doesn't exist, for the ChangeSetType // parameter, specify CREATE. To create a change set for an existing stack, -// specify UPDATE for the ChangeSetType parameter. After the CreateChangeSet -// call successfully completes, AWS CloudFormation starts creating the change -// set. To check the status of the change set or to review it, use the DescribeChangeSet -// action. +// specify UPDATE for the ChangeSetType parameter. To create a change set for +// an import operation, specify IMPORT for the ChangeSetType parameter. After +// the CreateChangeSet call successfully completes, AWS CloudFormation starts +// creating the change set. To check the status of the change set or to review +// it, use the DescribeChangeSet action. // // When you are satisfied with the changes the change set will make, execute // the change set by using the ExecuteChangeSet action. AWS CloudFormation doesn't @@ -437,8 +438,8 @@ func (c *CloudFormation) CreateStackInstancesRequest(input *CreateStackInstances // // Creates stack instances for the specified accounts, within the specified // regions. A stack instance refers to a stack in a specific account and region. -// Accounts and Regions are required parameters—you must specify at least one -// account and one region. +// Accounts and Regions are required parameters—you must specify at least +// one account and one region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1272,7 +1273,7 @@ func (c *CloudFormation) DescribeStackEventsWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a DescribeStackEvents operation. // pageNum := 0 // err := client.DescribeStackEventsPages(params, -// func(page *DescribeStackEventsOutput, lastPage bool) bool { +// func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1304,10 +1305,12 @@ func (c *CloudFormation) DescribeStackEventsPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeStackEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeStackEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1576,7 +1579,7 @@ func (c *CloudFormation) DescribeStackResourceDriftsWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeStackResourceDrifts operation. // pageNum := 0 // err := client.DescribeStackResourceDriftsPages(params, -// func(page *DescribeStackResourceDriftsOutput, lastPage bool) bool { +// func(page *cloudformation.DescribeStackResourceDriftsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1608,10 +1611,12 @@ func (c *CloudFormation) DescribeStackResourceDriftsPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeStackResourceDriftsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeStackResourceDriftsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1961,7 +1966,7 @@ func (c *CloudFormation) DescribeStacksWithContext(ctx aws.Context, input *Descr // // Example iterating over at most 3 pages of a DescribeStacks operation. // pageNum := 0 // err := client.DescribeStacksPages(params, -// func(page *DescribeStacksOutput, lastPage bool) bool { +// func(page *cloudformation.DescribeStacksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1993,10 +1998,12 @@ func (c *CloudFormation) DescribeStacksPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeStacksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeStacksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2748,8 +2755,7 @@ func (c *CloudFormation) ListExportsRequest(input *ListExportsInput) (req *reque // import into other stacks. To import values, use the Fn::ImportValue (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-importvalue.html) // function. // -// For more information, see AWS CloudFormation Export Stack Output Values -// (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-exports.html). +// For more information, see AWS CloudFormation Export Stack Output Values (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-exports.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2790,7 +2796,7 @@ func (c *CloudFormation) ListExportsWithContext(ctx aws.Context, input *ListExpo // // Example iterating over at most 3 pages of a ListExports operation. // pageNum := 0 // err := client.ListExportsPages(params, -// func(page *ListExportsOutput, lastPage bool) bool { +// func(page *cloudformation.ListExportsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2822,10 +2828,12 @@ func (c *CloudFormation) ListExportsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2926,7 +2934,7 @@ func (c *CloudFormation) ListImportsWithContext(ctx aws.Context, input *ListImpo // // Example iterating over at most 3 pages of a ListImports operation. // pageNum := 0 // err := client.ListImportsPages(params, -// func(page *ListImportsOutput, lastPage bool) bool { +// func(page *cloudformation.ListImportsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2958,10 +2966,12 @@ func (c *CloudFormation) ListImportsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListImportsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListImportsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3140,7 +3150,7 @@ func (c *CloudFormation) ListStackResourcesWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListStackResources operation. // pageNum := 0 // err := client.ListStackResourcesPages(params, -// func(page *ListStackResourcesOutput, lastPage bool) bool { +// func(page *cloudformation.ListStackResourcesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3172,10 +3182,12 @@ func (c *CloudFormation) ListStackResourcesPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListStackResourcesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListStackResourcesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3510,7 +3522,7 @@ func (c *CloudFormation) ListStacksWithContext(ctx aws.Context, input *ListStack // // Example iterating over at most 3 pages of a ListStacks operation. // pageNum := 0 // err := client.ListStacksPages(params, -// func(page *ListStacksOutput, lastPage bool) bool { +// func(page *cloudformation.ListStacksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3542,10 +3554,12 @@ func (c *CloudFormation) ListStacksPagesWithContext(ctx aws.Context, input *List }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListStacksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListStacksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4143,10 +4157,10 @@ func (c *CloudFormation) UpdateTerminationProtectionRequest(input *UpdateTermina // Updates termination protection for the specified stack. If a user attempts // to delete a stack with termination protection enabled, the operation fails // and the stack remains unchanged. For more information, see Protecting a Stack -// From Being Deleted (AWSCloudFormation/latest/UserGuide/using-cfn-protect-stacks.html) +// From Being Deleted (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-protect-stacks.html) // in the AWS CloudFormation User Guide. // -// For nested stacks (AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html), +// For nested stacks (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html), // termination protection is set on the root stack and cannot be changed directly // on the nested stack. // @@ -4285,18 +4299,14 @@ type AccountGateResult struct { // and region to FAILED. // // * SKIPPED: AWS CloudFormation has skipped calling the account gate function - // for this account and region, for one of the following reasons: - // - // An account gate function has not been specified for the account and region. - // AWS CloudFormation proceeds with the stack set operation in this account - // and region. - // - // The AWSCloudFormationStackSetExecutionRole of the stack set adminstration + // for this account and region, for one of the following reasons: An account + // gate function has not been specified for the account and region. AWS CloudFormation + // proceeds with the stack set operation in this account and region. The + // AWSCloudFormationStackSetExecutionRole of the stack set adminstration // account lacks permissions to invoke the function. AWS CloudFormation proceeds - // with the stack set operation in this account and region. - // - // Either no action is necessary, or no action is possible, on the stack. AWS - // CloudFormation skips the stack set operation in this account and region. + // with the stack set operation in this account and region. Either no action + // is necessary, or no action is possible, on the stack. AWS CloudFormation + // skips the stack set operation in this account and region. Status *string `type:"string" enum:"AccountGateStatus"` // The reason for the account gate status assigned to this account and region @@ -4326,13 +4336,25 @@ func (s *AccountGateResult) SetStatusReason(v string) *AccountGateResult { return s } -// The AccountLimit data type. For more information about account limits, see -// AWS CloudFormation Limits (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html) +// The AccountLimit data type. +// +// CloudFormation has the following limits per account: +// +// * Number of concurrent resources +// +// * Number of stacks +// +// * Number of stack outputs +// +// For more information about these account limits, and other CloudFormation +// limits, see AWS CloudFormation Limits (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html) // in the AWS CloudFormation User Guide. type AccountLimit struct { _ struct{} `type:"structure"` // The name of the account limit. + // + // Values: ConcurrentResourcesLimit | StackLimit | StackOutputsLimit Name *string `type:"string"` // The value that is associated with the account limit name. @@ -4484,8 +4506,8 @@ type ChangeSetSummary struct { Description *string `min:"1" type:"string"` // If the change set execution status is AVAILABLE, you can execute the change - // set. If you can’t execute the change set, the status indicates why. For example, - // a change set might be in an UNAVAILABLE state because AWS CloudFormation + // set. If you can’t execute the change set, the status indicates why. For + // example, a change set might be in an UNAVAILABLE state because AWS CloudFormation // is still creating it or in an OBSOLETE state because the stack was already // updated. ExecutionStatus *string `type:"string" enum:"ExecutionStatus"` @@ -4589,7 +4611,7 @@ type ContinueUpdateRollbackInput struct { // reason. // // Specify this property to skip rolling back resources that AWS CloudFormation - // can't successfully roll back. We recommend that you troubleshoot (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-update-rollback-failed) + // can't successfully roll back. We recommend that you troubleshoot (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-update-rollback-failed) // resources before skipping them. AWS CloudFormation sets the status of the // specified resources to UPDATE_COMPLETE and continues to roll back the stack. // After the rollback is complete, the state of the skipped resources will be @@ -4714,71 +4736,48 @@ func (s ContinueUpdateRollbackOutput) GoString() string { type CreateChangeSetInput struct { _ struct{} `type:"structure"` - // In some cases, you must explicity acknowledge that your stack template contains + // In some cases, you must explicitly acknowledge that your stack template contains // certain capabilities in order for AWS CloudFormation to create the stack. // - // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM - // - // Some stack templates might include resources that can affect permissions - // in your AWS account; for example, by creating new AWS Identity and Access - // Management (IAM) users. For those stacks, you must explicitly acknowledge - // this by specifying one of these capabilities. - // - // The following IAM resources require you to specify either the CAPABILITY_IAM - // or CAPABILITY_NAMED_IAM capability. - // - // If you have IAM resources, you can specify either capability. - // - // If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. - // - // - // If you don't specify either of these capabilities, AWS CloudFormation returns - // an InsufficientCapabilities error. - // - // If your stack template contains these resources, we recommend that you review - // all permissions associated with them and edit their permissions if necessary. - // - // AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) - // - // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) - // - // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) - // - // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) - // - // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) - // - // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) - // - // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include + // resources that can affect permissions in your AWS account; for example, + // by creating new AWS Identity and Access Management (IAM) users. For those + // stacks, you must explicitly acknowledge this by specifying one of these + // capabilities. The following IAM resources require you to specify either + // the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM + // resources, you can specify either capability. If you have IAM resources + // with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't + // specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities + // error. If your stack template contains these resources, we recommend that + // you review all permissions associated with them and edit their permissions + // if necessary. AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) + // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) + // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) + // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) + // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) + // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) + // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) + // For more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // * CAPABILITY_AUTO_EXPAND - // - // Some template contain macros. Macros perform custom processing on templates; - // this can include simple actions like find-and-replace operations, all - // the way to extensive transformations of entire templates. Because of this, - // users typically create a change set from the processed template, so that - // they can review the changes resulting from the macros before actually - // creating the stack. If your stack template contains one or more macros, - // and you choose to create a stack directly from the processed template, - // without first reviewing the resulting changes in a change set, you must - // acknowledge this capability. This includes the AWS::Include (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html) + // * CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform + // custom processing on templates; this can include simple actions like find-and-replace + // operations, all the way to extensive transformations of entire templates. + // Because of this, users typically create a change set from the processed + // template, so that they can review the changes resulting from the macros + // before actually creating the stack. If your stack template contains one + // or more macros, and you choose to create a stack directly from the processed + // template, without first reviewing the resulting changes in a change set, + // you must acknowledge this capability. This includes the AWS::Include (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html) // and AWS::Serverless (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html) - // transforms, which are macros hosted by AWS CloudFormation. - // - // This capacity does not apply to creating change sets, and specifying it when - // creating change sets has no effect. - // - // Also, change sets do not currently support nested stacks. If you want to - // create a stack from a stack template that contains macros and nested stacks, - // you must create or update the stack directly from the template using the - // CreateStack or UpdateStack action, and specifying this capability. - // - // For more information on macros, see Using AWS CloudFormation Macros to Perform - // Custom Processing on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). + // transforms, which are macros hosted by AWS CloudFormation. This capacity + // does not apply to creating change sets, and specifying it when creating + // change sets has no effect. Also, change sets do not currently support + // nested stacks. If you want to create a stack from a stack template that + // contains macros and nested stacks, you must create or update the stack + // directly from the template using the CreateStack or UpdateStack action, + // and specifying this capability. For more information on macros, see Using + // AWS CloudFormation Macros to Perform Custom Processing on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). Capabilities []*string `type:"list"` // The name of the change set. The name must be unique among all change sets @@ -4793,6 +4792,7 @@ type CreateChangeSetInput struct { // The type of change set operation. To create a change set for a new stack, // specify CREATE. To create a change set for an existing stack, specify UPDATE. + // To create a change set for an import operation, specify IMPORT. // // If you create a change set for a new stack, AWS Cloudformation creates a // stack with a unique stack ID, but no template or resources. The stack will @@ -4835,6 +4835,9 @@ type CreateChangeSetInput struct { // in the AWS CloudFormation User Guide. ResourceTypes []*string `type:"list"` + // The resources to import into your stack. + ResourcesToImport []*ResourceToImport `type:"list"` + // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) // role that AWS CloudFormation assumes when executing the change set. AWS CloudFormation // uses the role's credentials to make calls on your behalf. AWS CloudFormation @@ -4925,6 +4928,16 @@ func (s *CreateChangeSetInput) Validate() error { if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) } + if s.ResourcesToImport != nil { + for i, v := range s.ResourcesToImport { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourcesToImport", i), err.(request.ErrInvalidParams)) + } + } + } if s.RollbackConfiguration != nil { if err := s.RollbackConfiguration.Validate(); err != nil { invalidParams.AddNested("RollbackConfiguration", err.(request.ErrInvalidParams)) @@ -4995,6 +5008,12 @@ func (s *CreateChangeSetInput) SetResourceTypes(v []*string) *CreateChangeSetInp return s } +// SetResourcesToImport sets the ResourcesToImport field's value. +func (s *CreateChangeSetInput) SetResourcesToImport(v []*ResourceToImport) *CreateChangeSetInput { + s.ResourcesToImport = v + return s +} + // SetRoleARN sets the RoleARN field's value. func (s *CreateChangeSetInput) SetRoleARN(v string) *CreateChangeSetInput { s.RoleARN = &v @@ -5074,74 +5093,50 @@ func (s *CreateChangeSetOutput) SetStackId(v string) *CreateChangeSetOutput { type CreateStackInput struct { _ struct{} `type:"structure"` - // In some cases, you must explicity acknowledge that your stack template contains + // In some cases, you must explicitly acknowledge that your stack template contains // certain capabilities in order for AWS CloudFormation to create the stack. // - // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM - // - // Some stack templates might include resources that can affect permissions - // in your AWS account; for example, by creating new AWS Identity and Access - // Management (IAM) users. For those stacks, you must explicitly acknowledge - // this by specifying one of these capabilities. - // - // The following IAM resources require you to specify either the CAPABILITY_IAM - // or CAPABILITY_NAMED_IAM capability. - // - // If you have IAM resources, you can specify either capability. - // - // If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. - // - // - // If you don't specify either of these capabilities, AWS CloudFormation returns - // an InsufficientCapabilities error. - // - // If your stack template contains these resources, we recommend that you review - // all permissions associated with them and edit their permissions if necessary. - // - // AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) - // - // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) - // - // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) - // - // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) - // - // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) - // - // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) - // - // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include + // resources that can affect permissions in your AWS account; for example, + // by creating new AWS Identity and Access Management (IAM) users. For those + // stacks, you must explicitly acknowledge this by specifying one of these + // capabilities. The following IAM resources require you to specify either + // the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM + // resources, you can specify either capability. If you have IAM resources + // with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't + // specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities + // error. If your stack template contains these resources, we recommend that + // you review all permissions associated with them and edit their permissions + // if necessary. AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) + // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) + // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) + // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) + // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) + // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) + // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) + // For more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // * CAPABILITY_AUTO_EXPAND - // - // Some template contain macros. Macros perform custom processing on templates; - // this can include simple actions like find-and-replace operations, all - // the way to extensive transformations of entire templates. Because of this, - // users typically create a change set from the processed template, so that - // they can review the changes resulting from the macros before actually - // creating the stack. If your stack template contains one or more macros, - // and you choose to create a stack directly from the processed template, - // without first reviewing the resulting changes in a change set, you must - // acknowledge this capability. This includes the AWS::Include (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html) + // * CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform + // custom processing on templates; this can include simple actions like find-and-replace + // operations, all the way to extensive transformations of entire templates. + // Because of this, users typically create a change set from the processed + // template, so that they can review the changes resulting from the macros + // before actually creating the stack. If your stack template contains one + // or more macros, and you choose to create a stack directly from the processed + // template, without first reviewing the resulting changes in a change set, + // you must acknowledge this capability. This includes the AWS::Include (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html) // and AWS::Serverless (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html) - // transforms, which are macros hosted by AWS CloudFormation. - // - // Change sets do not currently support nested stacks. If you want to create - // a stack from a stack template that contains macros and nested stacks, - // you must create the stack directly from the template using this capability. - // - // You should only create stacks directly from a stack template that contains - // macros if you know what processing the macro performs. - // - // Each macro relies on an underlying Lambda service function for processing - // stack templates. Be aware that the Lambda function owner can update the - // function operation without AWS CloudFormation being notified. - // - // For more information, see Using AWS CloudFormation Macros to Perform Custom - // Processing on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). + // transforms, which are macros hosted by AWS CloudFormation. Change sets + // do not currently support nested stacks. If you want to create a stack + // from a stack template that contains macros and nested stacks, you must + // create the stack directly from the template using this capability. You + // should only create stacks directly from a stack template that contains + // macros if you know what processing the macro performs. Each macro relies + // on an underlying Lambda service function for processing stack templates. + // Be aware that the Lambda function owner can update the function operation + // without AWS CloudFormation being notified. For more information, see Using + // AWS CloudFormation Macros to Perform Custom Processing on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). Capabilities []*string `type:"list"` // A unique identifier for this CreateStack request. Specify this token if you @@ -5239,8 +5234,8 @@ type CreateStackInput struct { // StackName is a required field StackName *string `type:"string" required:"true"` - // Structure containing the stack policy body. For more information, go to - // Prevent Updates to Stack Resources (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // Structure containing the stack policy body. For more information, go to Prevent + // Updates to Stack Resources (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) // in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody // or the StackPolicyURL parameter, but not both. StackPolicyBody *string `min:"1" type:"string"` @@ -5478,12 +5473,9 @@ type CreateStackInstancesInput struct { // and specify its value. // // * To leave a parameter set to its present value, you can do one of the - // following: - // - // Do not include the parameter in the list. - // - // Include the parameter and specify UsePreviousValue as true. (You cannot specify - // both a value and set UsePreviousValue to true.) + // following: Do not include the parameter in the list. Include the parameter + // and specify UsePreviousValue as true. (You cannot specify both a value + // and set UsePreviousValue to true.) // // * To set all overridden parameter back to the values specified in the // stack set, specify a parameter list but do not include any parameters. @@ -5645,57 +5637,38 @@ type CreateStackSetInput struct { // in the AWS CloudFormation User Guide. AdministrationRoleARN *string `min:"20" type:"string"` - // In some cases, you must explicity acknowledge that your stack set template + // In some cases, you must explicitly acknowledge that your stack set template // contains certain capabilities in order for AWS CloudFormation to create the // stack set and related stack instances. // - // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM - // - // Some stack templates might include resources that can affect permissions - // in your AWS account; for example, by creating new AWS Identity and Access - // Management (IAM) users. For those stack sets, you must explicitly acknowledge - // this by specifying one of these capabilities. - // - // The following IAM resources require you to specify either the CAPABILITY_IAM - // or CAPABILITY_NAMED_IAM capability. - // - // If you have IAM resources, you can specify either capability. - // - // If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. - // - // - // If you don't specify either of these capabilities, AWS CloudFormation returns - // an InsufficientCapabilities error. - // - // If your stack template contains these resources, we recommend that you review - // all permissions associated with them and edit their permissions if necessary. - // - // AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) - // - // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) - // - // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) - // - // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) - // - // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) - // - // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) - // - // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include + // resources that can affect permissions in your AWS account; for example, + // by creating new AWS Identity and Access Management (IAM) users. For those + // stack sets, you must explicitly acknowledge this by specifying one of + // these capabilities. The following IAM resources require you to specify + // either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have + // IAM resources, you can specify either capability. If you have IAM resources + // with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't + // specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities + // error. If your stack template contains these resources, we recommend that + // you review all permissions associated with them and edit their permissions + // if necessary. AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) + // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) + // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) + // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) + // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) + // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) + // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) + // For more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // * CAPABILITY_AUTO_EXPAND - // - // Some templates contain macros. If your stack template contains one or more - // macros, and you choose to create a stack directly from the processed template, - // without first reviewing the resulting changes in a change set, you must - // acknowledge this capability. For more information, see Using AWS CloudFormation - // Macros to Perform Custom Processing on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). - // - // Stack sets do not currently support macros in stack templates. (This includes + // * CAPABILITY_AUTO_EXPAND Some templates contain macros. If your stack + // template contains one or more macros, and you choose to create a stack + // directly from the processed template, without first reviewing the resulting + // changes in a change set, you must acknowledge this capability. For more + // information, see Using AWS CloudFormation Macros to Perform Custom Processing + // on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). + // Stack sets do not currently support macros in stack templates. (This includes // the AWS::Include (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html) // and AWS::Serverless (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html) // transforms, which are macros hosted by AWS CloudFormation.) Even if you @@ -6445,8 +6418,8 @@ type DescribeChangeSetOutput struct { Description *string `min:"1" type:"string"` // If the change set execution status is AVAILABLE, you can execute the change - // set. If you can’t execute the change set, the status indicates why. For example, - // a change set might be in an UNAVAILABLE state because AWS CloudFormation + // set. If you can’t execute the change set, the status indicates why. For + // example, a change set might be in an UNAVAILABLE state because AWS CloudFormation // is still creating it or in an OBSOLETE state because the stack was already // updated. ExecutionStatus *string `type:"string" enum:"ExecutionStatus"` @@ -6647,9 +6620,8 @@ type DescribeStackDriftDetectionStatusOutput struct { // * DETECTION_COMPLETE: The stack drift detection operation has successfully // completed for all resources in the stack that support drift detection. // (Resources that do not currently support stack detection remain unchecked.) - // - // If you specified logical resource IDs for AWS CloudFormation to use as a - // filter for the stack drift detection operation, only the resources with + // If you specified logical resource IDs for AWS CloudFormation to use as + // a filter for the stack drift detection operation, only the resources with // those logical IDs are checked for drift. // // * DETECTION_FAILED: The stack drift detection operation has failed for @@ -7913,7 +7885,7 @@ type GetStackPolicyOutput struct { _ struct{} `type:"structure"` // Structure containing the stack policy body. (For more information, go to - // Prevent Updates to Stack Resources (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // Prevent Updates to Stack Resources (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) // in the AWS CloudFormation User Guide.) StackPolicyBody *string `min:"1" type:"string"` } @@ -8168,6 +8140,12 @@ type GetTemplateSummaryOutput struct { // parameter. Parameters []*ParameterDeclaration `type:"list"` + // A list of resource identifier summaries that describe the target resources + // of an import operation and the properties you can provide during the import + // to identify the target resources. For example, BucketName is a possible identifier + // property for an AWS::S3::Bucket resource. + ResourceIdentifierSummaries []*ResourceIdentifierSummary `type:"list"` + // A list of all the template resource types that are defined in the template, // such as AWS::EC2::Instance, AWS::Dynamo::Table, and Custom::MyCustomInstance. ResourceTypes []*string `type:"list"` @@ -8223,6 +8201,12 @@ func (s *GetTemplateSummaryOutput) SetParameters(v []*ParameterDeclaration) *Get return s } +// SetResourceIdentifierSummaries sets the ResourceIdentifierSummaries field's value. +func (s *GetTemplateSummaryOutput) SetResourceIdentifierSummaries(v []*ResourceIdentifierSummary) *GetTemplateSummaryOutput { + s.ResourceIdentifierSummaries = v + return s +} + // SetResourceTypes sets the ResourceTypes field's value. func (s *GetTemplateSummaryOutput) SetResourceTypes(v []*string) *GetTemplateSummaryOutput { s.ResourceTypes = v @@ -9176,8 +9160,8 @@ type Parameter struct { ParameterValue *string `type:"string"` // Read-only. The value that corresponds to a Systems Manager parameter key. - // This field is returned only for SSM (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types) - // parameter types in the template. + // This field is returned only for SSM parameter types (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types) + // in the template. ResolvedValue *string `type:"string"` // During a stack update, use the existing parameter value that the stack is @@ -9617,6 +9601,53 @@ func (s *ResourceChangeDetail) SetTarget(v *ResourceTargetDefinition) *ResourceC return s } +// Describes the target resources of a specific type in your import template +// (for example, all AWS::S3::Bucket resources) and the properties you can provide +// during the import to identify resources of that type. +type ResourceIdentifierSummary struct { + _ struct{} `type:"structure"` + + // The logical IDs of the target resources of the specified ResourceType, as + // defined in the import template. + LogicalResourceIds []*string `min:"1" type:"list"` + + // The resource properties you can provide during the import to identify your + // target resources. For example, BucketName is a possible identifier property + // for AWS::S3::Bucket resources. + ResourceIdentifiers []*string `type:"list"` + + // The template resource type of the target resources, such as AWS::S3::Bucket. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceIdentifierSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceIdentifierSummary) GoString() string { + return s.String() +} + +// SetLogicalResourceIds sets the LogicalResourceIds field's value. +func (s *ResourceIdentifierSummary) SetLogicalResourceIds(v []*string) *ResourceIdentifierSummary { + s.LogicalResourceIds = v + return s +} + +// SetResourceIdentifiers sets the ResourceIdentifiers field's value. +func (s *ResourceIdentifierSummary) SetResourceIdentifiers(v []*string) *ResourceIdentifierSummary { + s.ResourceIdentifiers = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ResourceIdentifierSummary) SetResourceType(v string) *ResourceIdentifierSummary { + s.ResourceType = &v + return s +} + // The field that AWS CloudFormation will change, such as the name of a resource's // property, and whether the resource will be recreated. type ResourceTargetDefinition struct { @@ -9666,6 +9697,81 @@ func (s *ResourceTargetDefinition) SetRequiresRecreation(v string) *ResourceTarg return s } +// Describes the target resource of an import operation. +type ResourceToImport struct { + _ struct{} `type:"structure"` + + // The logical ID of the target resource as specified in the template. + // + // LogicalResourceId is a required field + LogicalResourceId *string `type:"string" required:"true"` + + // A key-value pair that identifies the target resource. The key is an identifier + // property (for example, BucketName for AWS::S3::Bucket resources) and the + // value is the actual property value (for example, MyS3Bucket). + // + // ResourceIdentifier is a required field + ResourceIdentifier map[string]*string `min:"1" type:"map" required:"true"` + + // The type of resource to import into your stack, such as AWS::S3::Bucket. + // + // ResourceType is a required field + ResourceType *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceToImport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceToImport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceToImport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceToImport"} + if s.LogicalResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("LogicalResourceId")) + } + if s.ResourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIdentifier")) + } + if s.ResourceIdentifier != nil && len(s.ResourceIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIdentifier", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.ResourceType != nil && len(*s.ResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogicalResourceId sets the LogicalResourceId field's value. +func (s *ResourceToImport) SetLogicalResourceId(v string) *ResourceToImport { + s.LogicalResourceId = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *ResourceToImport) SetResourceIdentifier(v map[string]*string) *ResourceToImport { + s.ResourceIdentifier = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ResourceToImport) SetResourceType(v string) *ResourceToImport { + s.ResourceType = &v + return s +} + // Structure containing the rollback triggers for AWS CloudFormation to monitor // during stack creation and updating operations, and for the specified monitoring // period afterwards. @@ -9834,8 +9940,8 @@ type SetStackPolicyInput struct { // StackName is a required field StackName *string `type:"string" required:"true"` - // Structure containing the stack policy body. For more information, go to - // Prevent Updates to Stack Resources (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // Structure containing the stack policy body. For more information, go to Prevent + // Updates to Stack Resources (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) // in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody // or the StackPolicyURL parameter, but not both. StackPolicyBody *string `min:"1" type:"string"` @@ -10089,7 +10195,7 @@ type Stack struct { RollbackConfiguration *RollbackConfiguration `type:"structure"` // For nested stacks--stacks created as resources for another stack--the stack - // ID of the the top-level stack to which the nested stack ultimately belongs. + // ID of the top-level stack to which the nested stack ultimately belongs. // // For more information, see Working with Nested Stacks (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) // in the AWS CloudFormation User Guide. @@ -10403,7 +10509,7 @@ type StackEvent struct { // Success/failure message associated with the resource. ResourceStatusReason *string `type:"string"` - // Type of resource. (For more information, go to AWS Resource Types Reference + // Type of resource. (For more information, go to AWS Resource Types Reference // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) // in the AWS CloudFormation User Guide.) ResourceType *string `min:"1" type:"string"` @@ -10503,10 +10609,10 @@ func (s *StackEvent) SetTimestamp(v time.Time) *StackEvent { // An AWS CloudFormation stack, in a specific account and region, that's part // of a stack set operation. A stack instance is a reference to an attempted // or actual stack in a given account within a given region. A stack instance -// can exist without a stack—for example, if the stack couldn't be created for -// some reason. A stack instance is associated with only one stack set. Each -// stack instance contains the ID of its associated stack set, as well as the -// ID of the actual stack and the stack status. +// can exist without a stack—for example, if the stack couldn't be created +// for some reason. A stack instance is associated with only one stack set. +// Each stack instance contains the ID of its associated stack set, as well +// as the ID of the actual stack and the stack status. type StackInstance struct { _ struct{} `type:"structure"` @@ -10537,12 +10643,9 @@ type StackInstance struct { // and then delete the stack manually. // // * OUTDATED: The stack isn't currently up to date with the stack set because: - // - // The associated stack failed during a CreateStackSet or UpdateStackSet operation. - // - // - // The stack was part of a CreateStackSet or UpdateStackSet operation that failed - // or was stopped before the stack was created or updated. + // The associated stack failed during a CreateStackSet or UpdateStackSet + // operation. The stack was part of a CreateStackSet or UpdateStackSet operation + // that failed or was stopped before the stack was created or updated. // // * CURRENT: The stack is currently up to date with the stack set. Status *string `type:"string" enum:"StackInstanceStatus"` @@ -10631,12 +10734,9 @@ type StackInstanceSummary struct { // and then delete the stack manually. // // * OUTDATED: The stack isn't currently up to date with the stack set because: - // - // The associated stack failed during a CreateStackSet or UpdateStackSet operation. - // - // - // The stack was part of a CreateStackSet or UpdateStackSet operation that failed - // or was stopped before the stack was created or updated. + // The associated stack failed during a CreateStackSet or UpdateStackSet + // operation. The stack was part of a CreateStackSet or UpdateStackSet operation + // that failed or was stopped before the stack was created or updated. // // * CURRENT: The stack is currently up to date with the stack set. Status *string `type:"string" enum:"StackInstanceStatus"` @@ -10721,7 +10821,7 @@ type StackResource struct { // Success/failure message associated with the resource. ResourceStatusReason *string `type:"string"` - // Type of resource. (For more information, go to AWS Resource Types Reference + // Type of resource. (For more information, go to AWS Resource Types Reference // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) // in the AWS CloudFormation User Guide.) // @@ -10850,7 +10950,7 @@ type StackResourceDetail struct { // Success/failure message associated with the resource. ResourceStatusReason *string `type:"string"` - // Type of resource. ((For more information, go to AWS Resource Types Reference + // Type of resource. ((For more information, go to AWS Resource Types Reference // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) // in the AWS CloudFormation User Guide.) // @@ -11110,12 +11210,9 @@ type StackResourceDriftInformation struct { // * MODIFIED: The resource differs from its expected configuration. // // * NOT_CHECKED: AWS CloudFormation has not checked if the resource differs - // from its expected configuration. - // - // Any resources that do not currently support drift detection have a status - // of NOT_CHECKED. For more information, see Resources that Support Drift - // Detection (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-drift-resource-list.html). - // + // from its expected configuration. Any resources that do not currently support + // drift detection have a status of NOT_CHECKED. For more information, see + // Resources that Support Drift Detection (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-drift-resource-list.html). // // * IN_SYNC: The resources's actual configuration matches its expected configuration. // @@ -11162,11 +11259,9 @@ type StackResourceDriftInformationSummary struct { // * MODIFIED: The resource differs from its expected configuration. // // * NOT_CHECKED: AWS CloudFormation has not checked if the resource differs - // from its expected configuration. - // - // Any resources that do not currently support drift detection have a status - // of NOT_CHECKED. For more information, see Resources that Support Drift - // Detection (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-drift-resource-list.html). + // from its expected configuration. Any resources that do not currently support + // drift detection have a status of NOT_CHECKED. For more information, see + // Resources that Support Drift Detection (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-drift-resource-list.html). // If you performed an ContinueUpdateRollback operation on a stack, any resources // included in ResourcesToSkip will also have a status of NOT_CHECKED. For // more information on skipping resources during rollback operations, see @@ -11233,7 +11328,7 @@ type StackResourceSummary struct { // Success/failure message associated with the resource. ResourceStatusReason *string `type:"string"` - // Type of resource. (For more information, go to AWS Resource Types Reference + // Type of resource. (For more information, go to AWS Resource Types Reference // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) // in the AWS CloudFormation User Guide.) // @@ -11494,7 +11589,6 @@ type StackSetOperation struct { // // * STOPPING: The operation is in the process of stopping, at user request. // - // // * SUCCEEDED: The operation completed creating or updating all the specified // stacks without exceeding the failure tolerance for the operation. Status *string `type:"string" enum:"StackSetOperationStatus"` @@ -11709,12 +11803,10 @@ type StackSetOperationResultSummary struct { // cancelled. This is either because a user has stopped the stack set operation, // or because the failure tolerance of the stack set operation has been exceeded. // - // * FAILED: The operation in the specified account and region failed. - // - // If the stack set operation fails in enough accounts within a region, the + // * FAILED: The operation in the specified account and region failed. If + // the stack set operation fails in enough accounts within a region, the // failure tolerance for the stack set operation as a whole might be exceeded. // - // // * RUNNING: The operation in the specified account and region is currently // in progress. // @@ -11810,7 +11902,6 @@ type StackSetOperationSummary struct { // // * STOPPING: The operation is in the process of stopping, at user request. // - // // * SUCCEEDED: The operation completed creating or updating all the specified // stacks without exceeding the failure tolerance for the operation. Status *string `type:"string" enum:"StackSetOperationStatus"` @@ -11940,7 +12031,7 @@ type StackSummary struct { ParentId *string `type:"string"` // For nested stacks--stacks created as resources for another stack--the stack - // ID of the the top-level stack to which the nested stack ultimately belongs. + // ID of the top-level stack to which the nested stack ultimately belongs. // // For more information, see Working with Nested Stacks (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) // in the AWS CloudFormation User Guide. @@ -12231,74 +12322,50 @@ func (s *TemplateParameter) SetParameterKey(v string) *TemplateParameter { type UpdateStackInput struct { _ struct{} `type:"structure"` - // In some cases, you must explicity acknowledge that your stack template contains + // In some cases, you must explicitly acknowledge that your stack template contains // certain capabilities in order for AWS CloudFormation to update the stack. // - // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM - // - // Some stack templates might include resources that can affect permissions - // in your AWS account; for example, by creating new AWS Identity and Access - // Management (IAM) users. For those stacks, you must explicitly acknowledge - // this by specifying one of these capabilities. - // - // The following IAM resources require you to specify either the CAPABILITY_IAM - // or CAPABILITY_NAMED_IAM capability. - // - // If you have IAM resources, you can specify either capability. - // - // If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. - // - // - // If you don't specify either of these capabilities, AWS CloudFormation returns - // an InsufficientCapabilities error. - // - // If your stack template contains these resources, we recommend that you review - // all permissions associated with them and edit their permissions if necessary. - // - // AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) - // - // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) - // - // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) - // - // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) - // - // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) - // - // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) - // - // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include + // resources that can affect permissions in your AWS account; for example, + // by creating new AWS Identity and Access Management (IAM) users. For those + // stacks, you must explicitly acknowledge this by specifying one of these + // capabilities. The following IAM resources require you to specify either + // the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM + // resources, you can specify either capability. If you have IAM resources + // with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't + // specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities + // error. If your stack template contains these resources, we recommend that + // you review all permissions associated with them and edit their permissions + // if necessary. AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) + // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) + // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) + // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) + // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) + // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) + // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) + // For more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // * CAPABILITY_AUTO_EXPAND - // - // Some template contain macros. Macros perform custom processing on templates; - // this can include simple actions like find-and-replace operations, all - // the way to extensive transformations of entire templates. Because of this, - // users typically create a change set from the processed template, so that - // they can review the changes resulting from the macros before actually - // updating the stack. If your stack template contains one or more macros, - // and you choose to update a stack directly from the processed template, - // without first reviewing the resulting changes in a change set, you must - // acknowledge this capability. This includes the AWS::Include (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html) + // * CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform + // custom processing on templates; this can include simple actions like find-and-replace + // operations, all the way to extensive transformations of entire templates. + // Because of this, users typically create a change set from the processed + // template, so that they can review the changes resulting from the macros + // before actually updating the stack. If your stack template contains one + // or more macros, and you choose to update a stack directly from the processed + // template, without first reviewing the resulting changes in a change set, + // you must acknowledge this capability. This includes the AWS::Include (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html) // and AWS::Serverless (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html) - // transforms, which are macros hosted by AWS CloudFormation. - // - // Change sets do not currently support nested stacks. If you want to update - // a stack from a stack template that contains macros and nested stacks, - // you must update the stack directly from the template using this capability. - // - // You should only update stacks directly from a stack template that contains - // macros if you know what processing the macro performs. - // - // Each macro relies on an underlying Lambda service function for processing - // stack templates. Be aware that the Lambda function owner can update the - // function operation without AWS CloudFormation being notified. - // - // For more information, see Using AWS CloudFormation Macros to Perform Custom - // Processing on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). + // transforms, which are macros hosted by AWS CloudFormation. Change sets + // do not currently support nested stacks. If you want to update a stack + // from a stack template that contains macros and nested stacks, you must + // update the stack directly from the template using this capability. You + // should only update stacks directly from a stack template that contains + // macros if you know what processing the macro performs. Each macro relies + // on an underlying Lambda service function for processing stack templates. + // Be aware that the Lambda function owner can update the function operation + // without AWS CloudFormation being notified. For more information, see Using + // AWS CloudFormation Macros to Perform Custom Processing on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). Capabilities []*string `type:"list"` // A unique identifier for this UpdateStack request. Specify this token if you @@ -12626,12 +12693,9 @@ type UpdateStackInstancesInput struct { // and specify its value. // // * To leave a parameter set to its present value, you can do one of the - // following: - // - // Do not include the parameter in the list. - // - // Include the parameter and specify UsePreviousValue as true. (You cannot specify - // both a value and set UsePreviousValue to true.) + // following: Do not include the parameter in the list. Include the parameter + // and specify UsePreviousValue as true. (You cannot specify both a value + // and set UsePreviousValue to true.) // // * To set all overridden parameter back to the values specified in the // stack set, specify a parameter list but do not include any parameters. @@ -12819,57 +12883,38 @@ type UpdateStackSetInput struct { // same customized administrator role used with this stack set previously. AdministrationRoleARN *string `min:"20" type:"string"` - // In some cases, you must explicity acknowledge that your stack template contains + // In some cases, you must explicitly acknowledge that your stack template contains // certain capabilities in order for AWS CloudFormation to update the stack // set and its associated stack instances. // - // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM - // - // Some stack templates might include resources that can affect permissions - // in your AWS account; for example, by creating new AWS Identity and Access - // Management (IAM) users. For those stacks sets, you must explicitly acknowledge - // this by specifying one of these capabilities. - // - // The following IAM resources require you to specify either the CAPABILITY_IAM - // or CAPABILITY_NAMED_IAM capability. - // - // If you have IAM resources, you can specify either capability. - // - // If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. - // - // - // If you don't specify either of these capabilities, AWS CloudFormation returns - // an InsufficientCapabilities error. - // - // If your stack template contains these resources, we recommend that you review - // all permissions associated with them and edit their permissions if necessary. - // - // AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) - // - // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) - // - // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) - // - // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) - // - // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) - // - // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) - // - // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include + // resources that can affect permissions in your AWS account; for example, + // by creating new AWS Identity and Access Management (IAM) users. For those + // stacks sets, you must explicitly acknowledge this by specifying one of + // these capabilities. The following IAM resources require you to specify + // either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have + // IAM resources, you can specify either capability. If you have IAM resources + // with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't + // specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities + // error. If your stack template contains these resources, we recommend that + // you review all permissions associated with them and edit their permissions + // if necessary. AWS::IAM::AccessKey (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) + // AWS::IAM::Group (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) + // AWS::IAM::InstanceProfile (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) + // AWS::IAM::Policy (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) + // AWS::IAM::Role (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) + // AWS::IAM::User (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) + // AWS::IAM::UserToGroupAddition (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) + // For more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // * CAPABILITY_AUTO_EXPAND - // - // Some templates contain macros. If your stack template contains one or more - // macros, and you choose to update a stack directly from the processed template, - // without first reviewing the resulting changes in a change set, you must - // acknowledge this capability. For more information, see Using AWS CloudFormation - // Macros to Perform Custom Processing on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). - // - // Stack sets do not currently support macros in stack templates. (This includes + // * CAPABILITY_AUTO_EXPAND Some templates contain macros. If your stack + // template contains one or more macros, and you choose to update a stack + // directly from the processed template, without first reviewing the resulting + // changes in a change set, you must acknowledge this capability. For more + // information, see Using AWS CloudFormation Macros to Perform Custom Processing + // on Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html). + // Stack sets do not currently support macros in stack templates. (This includes // the AWS::Include (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html) // and AWS::Serverless (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html) // transforms, which are macros hosted by AWS CloudFormation.) Even if you @@ -13391,6 +13436,9 @@ const ( // ChangeActionRemove is a ChangeAction enum value ChangeActionRemove = "Remove" + + // ChangeActionImport is a ChangeAction enum value + ChangeActionImport = "Import" ) const ( @@ -13416,6 +13464,9 @@ const ( // ChangeSetTypeUpdate is a ChangeSetType enum value ChangeSetTypeUpdate = "UPDATE" + + // ChangeSetTypeImport is a ChangeSetType enum value + ChangeSetTypeImport = "IMPORT" ) const ( @@ -13570,6 +13621,24 @@ const ( // ResourceStatusUpdateComplete is a ResourceStatus enum value ResourceStatusUpdateComplete = "UPDATE_COMPLETE" + + // ResourceStatusImportFailed is a ResourceStatus enum value + ResourceStatusImportFailed = "IMPORT_FAILED" + + // ResourceStatusImportComplete is a ResourceStatus enum value + ResourceStatusImportComplete = "IMPORT_COMPLETE" + + // ResourceStatusImportInProgress is a ResourceStatus enum value + ResourceStatusImportInProgress = "IMPORT_IN_PROGRESS" + + // ResourceStatusImportRollbackInProgress is a ResourceStatus enum value + ResourceStatusImportRollbackInProgress = "IMPORT_ROLLBACK_IN_PROGRESS" + + // ResourceStatusImportRollbackFailed is a ResourceStatus enum value + ResourceStatusImportRollbackFailed = "IMPORT_ROLLBACK_FAILED" + + // ResourceStatusImportRollbackComplete is a ResourceStatus enum value + ResourceStatusImportRollbackComplete = "IMPORT_ROLLBACK_COMPLETE" ) const ( @@ -13726,6 +13795,21 @@ const ( // StackStatusReviewInProgress is a StackStatus enum value StackStatusReviewInProgress = "REVIEW_IN_PROGRESS" + + // StackStatusImportInProgress is a StackStatus enum value + StackStatusImportInProgress = "IMPORT_IN_PROGRESS" + + // StackStatusImportComplete is a StackStatus enum value + StackStatusImportComplete = "IMPORT_COMPLETE" + + // StackStatusImportRollbackInProgress is a StackStatus enum value + StackStatusImportRollbackInProgress = "IMPORT_ROLLBACK_IN_PROGRESS" + + // StackStatusImportRollbackFailed is a StackStatus enum value + StackStatusImportRollbackFailed = "IMPORT_ROLLBACK_FAILED" + + // StackStatusImportRollbackComplete is a StackStatus enum value + StackStatusImportRollbackComplete = "IMPORT_ROLLBACK_COMPLETE" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go index 65df49a0cc8..ab37537cc1e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudformation.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFormation { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudFormation { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudFormation { svc := &CloudFormation{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-05-15", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go index afe8a1b2eb3..cabb91cee56 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go @@ -188,6 +188,11 @@ func (c *CloudFormation) WaitUntilStackDeleteCompleteWithContext(ctx aws.Context Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", Expected: "ROLLBACK_FAILED", }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_IN_PROGRESS", + }, { State: request.FailureWaiterState, Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", @@ -196,7 +201,7 @@ func (c *CloudFormation) WaitUntilStackDeleteCompleteWithContext(ctx aws.Context { State: request.FailureWaiterState, Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", - Expected: "UPDATE_ROLLBACK_IN_PROGRESS", + Expected: "UPDATE_ROLLBACK_COMPLETE", }, }, Logger: c.Config.Logger, @@ -268,6 +273,82 @@ func (c *CloudFormation) WaitUntilStackExistsWithContext(ctx aws.Context, input return w.WaitWithContext(ctx) } +// WaitUntilStackImportComplete uses the AWS CloudFormation API operation +// DescribeStacks to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *CloudFormation) WaitUntilStackImportComplete(input *DescribeStacksInput) error { + return c.WaitUntilStackImportCompleteWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilStackImportCompleteWithContext is an extended version of WaitUntilStackImportComplete. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) WaitUntilStackImportCompleteWithContext(ctx aws.Context, input *DescribeStacksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilStackImportComplete", + MaxAttempts: 120, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "IMPORT_COMPLETE", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "ROLLBACK_COMPLETE", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "ROLLBACK_FAILED", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "IMPORT_ROLLBACK_IN_PROGRESS", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "IMPORT_ROLLBACK_FAILED", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "IMPORT_ROLLBACK_COMPLETE", + }, + { + State: request.FailureWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ValidationError", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeStacksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeStacksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + // WaitUntilStackUpdateComplete uses the AWS CloudFormation API operation // DescribeStacks to wait for a condition to be met before returning. // If the condition is not met within the max attempt window, an error will diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go index e7808318f76..427393e37ce 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go @@ -13,7 +13,7 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restxml" ) -const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2018_11_05" +const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2019_03_26" // CreateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the // client's request for the CreateCloudFrontOriginAccessIdentity operation. The "output" return @@ -38,12 +38,12 @@ const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIden // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateCloudFrontOriginAccessIdentity func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *CreateCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opCreateCloudFrontOriginAccessIdentity, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/origin-access-identity/cloudfront", + HTTPPath: "/2019-03-26/origin-access-identity/cloudfront", } if input == nil { @@ -61,7 +61,7 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCl // origin, you can use an origin access identity to require users to access // your content using a CloudFront URL instead of the Amazon S3 URL. For more // information about how to use origin access identities, see Serving Private -// Content through CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) +// Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) // in the Amazon CloudFront Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -92,7 +92,7 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCl // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateCloudFrontOriginAccessIdentity func (c *CloudFront) CreateCloudFrontOriginAccessIdentity(input *CreateCloudFrontOriginAccessIdentityInput) (*CreateCloudFrontOriginAccessIdentityOutput, error) { req, out := c.CreateCloudFrontOriginAccessIdentityRequest(input) return out, req.Send() @@ -114,7 +114,7 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentityWithContext(ctx aws.Con return out, req.Send() } -const opCreateDistribution = "CreateDistribution2018_11_05" +const opCreateDistribution = "CreateDistribution2019_03_26" // CreateDistributionRequest generates a "aws/request.Request" representing the // client's request for the CreateDistribution operation. The "output" return @@ -139,12 +139,12 @@ const opCreateDistribution = "CreateDistribution2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateDistribution func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) (req *request.Request, output *CreateDistributionOutput) { op := &request.Operation{ Name: opCreateDistribution, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/distribution", + HTTPPath: "/2019-03-26/distribution", } if input == nil { @@ -164,16 +164,14 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // API version/distribution/distribution ID resource. // // When you update a distribution, there are more required fields than when -// you create a distribution. When you update your distribution by using UpdateDistribution, +// you create a distribution. When you update your distribution by using UpdateDistribution +// (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_UpdateDistribution.html), // follow the steps included in the documentation to get the current configuration // and then make your updates. This helps to make sure that you include all // of the required fields. To view a summary, see Required Fields for Create -// Distribution and Update Distribution (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html) +// Distribution and Update Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html) // in the Amazon CloudFront Developer Guide. // -// If you are using Adobe Flash Media Server's RTMP protocol, you set up a different -// kind of CloudFront distribution. For more information, see CreateStreamingDistribution. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -183,6 +181,7 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // // Returned Error Codes: // * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. // // * ErrCodeDistributionAlreadyExists "DistributionAlreadyExists" // The caller reference you attempted to create the distribution with is associated @@ -205,8 +204,10 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // One or more of your trusted signers don't exist. // // * ErrCodeInvalidViewerCertificate "InvalidViewerCertificate" +// A viewer certificate specified in the response body is not valid. // // * ErrCodeInvalidMinimumProtocolVersion "InvalidMinimumProtocolVersion" +// The minimum protocol version specified is not valid. // // * ErrCodeMissingBody "MissingBody" // This operation requires a body. Ensure that the body is present and the Content-Type @@ -227,8 +228,10 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // a slash (/). // // * ErrCodeInvalidErrorCode "InvalidErrorCode" +// An invalid error code was specified. // // * ErrCodeInvalidResponseCode "InvalidResponseCode" +// A response code specified in the response body is not valid. // // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. @@ -262,8 +265,10 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // when expected. // // * ErrCodeTooManyHeadersInForwardedValues "TooManyHeadersInForwardedValues" +// Your request contains too many headers in forwarded values. // // * ErrCodeInvalidHeadersForS3Origin "InvalidHeadersForS3Origin" +// The headers specified are not valid for an Amazon S3 origin. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. @@ -272,22 +277,29 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // You cannot create anymore custom SSL/TLS certificates. // // * ErrCodeInvalidLocationCode "InvalidLocationCode" +// The location code specified is not valid. // // * ErrCodeInvalidGeoRestrictionParameter "InvalidGeoRestrictionParameter" +// The specified geo restriction parameter is not valid. // // * ErrCodeInvalidProtocolSettings "InvalidProtocolSettings" // You cannot specify SSLv3 as the minimum protocol version if you only want // to support only clients that support Server Name Indication (SNI). // // * ErrCodeInvalidTTLOrder "InvalidTTLOrder" +// TTL order specified in the response body is not valid. // // * ErrCodeInvalidWebACLId "InvalidWebACLId" +// A web ACL id specified in the response body is not valid. // // * ErrCodeTooManyOriginCustomHeaders "TooManyOriginCustomHeaders" +// Your request contains too many origin custom headers. // // * ErrCodeTooManyQueryStringParameters "TooManyQueryStringParameters" +// Your request contains too many query string parameters. // // * ErrCodeInvalidQueryStringParameters "InvalidQueryStringParameters" +// Query string parameters specified in the response body are not valid. // // * ErrCodeTooManyDistributionsWithLambdaAssociations "TooManyDistributionsWithLambdaAssociations" // Processing your request would cause the maximum number of distributions with @@ -301,8 +313,10 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // The specified Lambda function association is invalid. // // * ErrCodeInvalidOriginReadTimeout "InvalidOriginReadTimeout" +// The read timeout specified for the origin is not valid. // // * ErrCodeInvalidOriginKeepaliveTimeout "InvalidOriginKeepaliveTimeout" +// The keep alive timeout specified for the origin is not valid. // // * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" // The specified configuration for field-level encryption doesn't exist. @@ -315,7 +329,7 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // The maximum number of distributions have been associated with the specified // configuration for field-level encryption. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateDistribution func (c *CloudFront) CreateDistribution(input *CreateDistributionInput) (*CreateDistributionOutput, error) { req, out := c.CreateDistributionRequest(input) return out, req.Send() @@ -337,7 +351,7 @@ func (c *CloudFront) CreateDistributionWithContext(ctx aws.Context, input *Creat return out, req.Send() } -const opCreateDistributionWithTags = "CreateDistributionWithTags2018_11_05" +const opCreateDistributionWithTags = "CreateDistributionWithTags2019_03_26" // CreateDistributionWithTagsRequest generates a "aws/request.Request" representing the // client's request for the CreateDistributionWithTags operation. The "output" return @@ -362,12 +376,12 @@ const opCreateDistributionWithTags = "CreateDistributionWithTags2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateDistributionWithTags +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateDistributionWithTags func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistributionWithTagsInput) (req *request.Request, output *CreateDistributionWithTagsOutput) { op := &request.Operation{ Name: opCreateDistributionWithTags, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/distribution?WithTags", + HTTPPath: "/2019-03-26/distribution?WithTags", } if input == nil { @@ -392,6 +406,7 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // // Returned Error Codes: // * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. // // * ErrCodeDistributionAlreadyExists "DistributionAlreadyExists" // The caller reference you attempted to create the distribution with is associated @@ -414,8 +429,10 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // One or more of your trusted signers don't exist. // // * ErrCodeInvalidViewerCertificate "InvalidViewerCertificate" +// A viewer certificate specified in the response body is not valid. // // * ErrCodeInvalidMinimumProtocolVersion "InvalidMinimumProtocolVersion" +// The minimum protocol version specified is not valid. // // * ErrCodeMissingBody "MissingBody" // This operation requires a body. Ensure that the body is present and the Content-Type @@ -436,8 +453,10 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // a slash (/). // // * ErrCodeInvalidErrorCode "InvalidErrorCode" +// An invalid error code was specified. // // * ErrCodeInvalidResponseCode "InvalidResponseCode" +// A response code specified in the response body is not valid. // // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. @@ -471,8 +490,10 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // when expected. // // * ErrCodeTooManyHeadersInForwardedValues "TooManyHeadersInForwardedValues" +// Your request contains too many headers in forwarded values. // // * ErrCodeInvalidHeadersForS3Origin "InvalidHeadersForS3Origin" +// The headers specified are not valid for an Amazon S3 origin. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. @@ -481,24 +502,32 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // You cannot create anymore custom SSL/TLS certificates. // // * ErrCodeInvalidLocationCode "InvalidLocationCode" +// The location code specified is not valid. // // * ErrCodeInvalidGeoRestrictionParameter "InvalidGeoRestrictionParameter" +// The specified geo restriction parameter is not valid. // // * ErrCodeInvalidProtocolSettings "InvalidProtocolSettings" // You cannot specify SSLv3 as the minimum protocol version if you only want // to support only clients that support Server Name Indication (SNI). // // * ErrCodeInvalidTTLOrder "InvalidTTLOrder" +// TTL order specified in the response body is not valid. // // * ErrCodeInvalidWebACLId "InvalidWebACLId" +// A web ACL id specified in the response body is not valid. // // * ErrCodeTooManyOriginCustomHeaders "TooManyOriginCustomHeaders" +// Your request contains too many origin custom headers. // // * ErrCodeInvalidTagging "InvalidTagging" +// Tagging specified in the response body is not valid. // // * ErrCodeTooManyQueryStringParameters "TooManyQueryStringParameters" +// Your request contains too many query string parameters. // // * ErrCodeInvalidQueryStringParameters "InvalidQueryStringParameters" +// Query string parameters specified in the response body are not valid. // // * ErrCodeTooManyDistributionsWithLambdaAssociations "TooManyDistributionsWithLambdaAssociations" // Processing your request would cause the maximum number of distributions with @@ -512,8 +541,10 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // The specified Lambda function association is invalid. // // * ErrCodeInvalidOriginReadTimeout "InvalidOriginReadTimeout" +// The read timeout specified for the origin is not valid. // // * ErrCodeInvalidOriginKeepaliveTimeout "InvalidOriginKeepaliveTimeout" +// The keep alive timeout specified for the origin is not valid. // // * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" // The specified configuration for field-level encryption doesn't exist. @@ -526,7 +557,7 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // The maximum number of distributions have been associated with the specified // configuration for field-level encryption. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateDistributionWithTags +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateDistributionWithTags func (c *CloudFront) CreateDistributionWithTags(input *CreateDistributionWithTagsInput) (*CreateDistributionWithTagsOutput, error) { req, out := c.CreateDistributionWithTagsRequest(input) return out, req.Send() @@ -548,7 +579,7 @@ func (c *CloudFront) CreateDistributionWithTagsWithContext(ctx aws.Context, inpu return out, req.Send() } -const opCreateFieldLevelEncryptionConfig = "CreateFieldLevelEncryptionConfig2018_11_05" +const opCreateFieldLevelEncryptionConfig = "CreateFieldLevelEncryptionConfig2019_03_26" // CreateFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the // client's request for the CreateFieldLevelEncryptionConfig operation. The "output" return @@ -573,12 +604,12 @@ const opCreateFieldLevelEncryptionConfig = "CreateFieldLevelEncryptionConfig2018 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateFieldLevelEncryptionConfig func (c *CloudFront) CreateFieldLevelEncryptionConfigRequest(input *CreateFieldLevelEncryptionConfigInput) (req *request.Request, output *CreateFieldLevelEncryptionConfigOutput) { op := &request.Operation{ Name: opCreateFieldLevelEncryptionConfig, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/field-level-encryption", + HTTPPath: "/2019-03-26/field-level-encryption", } if input == nil { @@ -629,7 +660,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionConfigRequest(input *CreateFieldL // * ErrCodeQueryArgProfileEmpty "QueryArgProfileEmpty" // No profile specified for the field-level encryption query argument. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateFieldLevelEncryptionConfig func (c *CloudFront) CreateFieldLevelEncryptionConfig(input *CreateFieldLevelEncryptionConfigInput) (*CreateFieldLevelEncryptionConfigOutput, error) { req, out := c.CreateFieldLevelEncryptionConfigRequest(input) return out, req.Send() @@ -651,7 +682,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionConfigWithContext(ctx aws.Context return out, req.Send() } -const opCreateFieldLevelEncryptionProfile = "CreateFieldLevelEncryptionProfile2018_11_05" +const opCreateFieldLevelEncryptionProfile = "CreateFieldLevelEncryptionProfile2019_03_26" // CreateFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the // client's request for the CreateFieldLevelEncryptionProfile operation. The "output" return @@ -676,12 +707,12 @@ const opCreateFieldLevelEncryptionProfile = "CreateFieldLevelEncryptionProfile20 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateFieldLevelEncryptionProfile func (c *CloudFront) CreateFieldLevelEncryptionProfileRequest(input *CreateFieldLevelEncryptionProfileInput) (req *request.Request, output *CreateFieldLevelEncryptionProfileOutput) { op := &request.Operation{ Name: opCreateFieldLevelEncryptionProfile, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/field-level-encryption-profile", + HTTPPath: "/2019-03-26/field-level-encryption-profile", } if input == nil { @@ -731,7 +762,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionProfileRequest(input *CreateField // The maximum number of field patterns for field-level encryption have been // created. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateFieldLevelEncryptionProfile func (c *CloudFront) CreateFieldLevelEncryptionProfile(input *CreateFieldLevelEncryptionProfileInput) (*CreateFieldLevelEncryptionProfileOutput, error) { req, out := c.CreateFieldLevelEncryptionProfileRequest(input) return out, req.Send() @@ -753,7 +784,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionProfileWithContext(ctx aws.Contex return out, req.Send() } -const opCreateInvalidation = "CreateInvalidation2018_11_05" +const opCreateInvalidation = "CreateInvalidation2019_03_26" // CreateInvalidationRequest generates a "aws/request.Request" representing the // client's request for the CreateInvalidation operation. The "output" return @@ -778,12 +809,12 @@ const opCreateInvalidation = "CreateInvalidation2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateInvalidation +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateInvalidation func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) (req *request.Request, output *CreateInvalidationOutput) { op := &request.Operation{ Name: opCreateInvalidation, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/distribution/{DistributionId}/invalidation", + HTTPPath: "/2019-03-26/distribution/{DistributionId}/invalidation", } if input == nil { @@ -821,6 +852,7 @@ func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) ( // The specified distribution does not exist. // // * ErrCodeBatchTooLarge "BatchTooLarge" +// Invalidation batch specified is too large. // // * ErrCodeTooManyInvalidationsInProgress "TooManyInvalidationsInProgress" // You have exceeded the maximum number of allowable InProgress invalidation @@ -829,7 +861,7 @@ func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) ( // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateInvalidation +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateInvalidation func (c *CloudFront) CreateInvalidation(input *CreateInvalidationInput) (*CreateInvalidationOutput, error) { req, out := c.CreateInvalidationRequest(input) return out, req.Send() @@ -851,7 +883,7 @@ func (c *CloudFront) CreateInvalidationWithContext(ctx aws.Context, input *Creat return out, req.Send() } -const opCreatePublicKey = "CreatePublicKey2018_11_05" +const opCreatePublicKey = "CreatePublicKey2019_03_26" // CreatePublicKeyRequest generates a "aws/request.Request" representing the // client's request for the CreatePublicKey operation. The "output" return @@ -876,12 +908,12 @@ const opCreatePublicKey = "CreatePublicKey2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreatePublicKey +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreatePublicKey func (c *CloudFront) CreatePublicKeyRequest(input *CreatePublicKeyInput) (req *request.Request, output *CreatePublicKeyOutput) { op := &request.Operation{ Name: opCreatePublicKey, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/public-key", + HTTPPath: "/2019-03-26/public-key", } if input == nil { @@ -916,7 +948,7 @@ func (c *CloudFront) CreatePublicKeyRequest(input *CreatePublicKeyInput) (req *r // The maximum number of public keys for field-level encryption have been created. // To create a new public key, delete one of the existing keys. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreatePublicKey +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreatePublicKey func (c *CloudFront) CreatePublicKey(input *CreatePublicKeyInput) (*CreatePublicKeyOutput, error) { req, out := c.CreatePublicKeyRequest(input) return out, req.Send() @@ -938,7 +970,7 @@ func (c *CloudFront) CreatePublicKeyWithContext(ctx aws.Context, input *CreatePu return out, req.Send() } -const opCreateStreamingDistribution = "CreateStreamingDistribution2018_11_05" +const opCreateStreamingDistribution = "CreateStreamingDistribution2019_03_26" // CreateStreamingDistributionRequest generates a "aws/request.Request" representing the // client's request for the CreateStreamingDistribution operation. The "output" return @@ -963,12 +995,12 @@ const opCreateStreamingDistribution = "CreateStreamingDistribution2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateStreamingDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateStreamingDistribution func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDistributionInput) (req *request.Request, output *CreateStreamingDistributionOutput) { op := &request.Operation{ Name: opCreateStreamingDistribution, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/streaming-distribution", + HTTPPath: "/2019-03-26/streaming-distribution", } if input == nil { @@ -982,13 +1014,13 @@ func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDi // CreateStreamingDistribution API operation for Amazon CloudFront. // -// Creates a new RMTP distribution. An RTMP distribution is similar to a web +// Creates a new RTMP distribution. An RTMP distribution is similar to a web // distribution, but an RTMP distribution streams media files using the Adobe // Real-Time Messaging Protocol (RTMP) instead of serving files using HTTP. // -// To create a new web distribution, submit a POST request to the CloudFront -// API version/distribution resource. The request body must include a document -// with a StreamingDistributionConfig element. The response echoes the StreamingDistributionConfig +// To create a new distribution, submit a POST request to the CloudFront API +// version/distribution resource. The request body must include a document with +// a StreamingDistributionConfig element. The response echoes the StreamingDistributionConfig // element and returns other information about the RTMP distribution. // // To get the status of your request, use the GET StreamingDistribution API @@ -997,7 +1029,7 @@ func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDi // minutes. // // For more information about web distributions, see Working with RTMP Distributions -// (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-rtmp.html) +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-rtmp.html) // in the Amazon CloudFront Developer Guide. // // Beginning with the 2012-05-05 version of the CloudFront API, we made substantial @@ -1020,8 +1052,11 @@ func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDi // // Returned Error Codes: // * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. // // * ErrCodeStreamingDistributionAlreadyExists "StreamingDistributionAlreadyExists" +// The caller reference you attempted to create the streaming distribution with +// is associated with another distribution // // * ErrCodeInvalidOrigin "InvalidOrigin" // The Amazon S3 origin server specified does not refer to a valid Amazon S3 @@ -1044,6 +1079,7 @@ func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDi // header is set. // // * ErrCodeTooManyStreamingDistributionCNAMEs "TooManyStreamingDistributionCNAMEs" +// Your request contains more CNAMEs than are allowed per distribution. // // * ErrCodeTooManyStreamingDistributions "TooManyStreamingDistributions" // Processing your request would cause you to exceed the maximum number of streaming @@ -1055,7 +1091,7 @@ func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDi // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateStreamingDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateStreamingDistribution func (c *CloudFront) CreateStreamingDistribution(input *CreateStreamingDistributionInput) (*CreateStreamingDistributionOutput, error) { req, out := c.CreateStreamingDistributionRequest(input) return out, req.Send() @@ -1077,7 +1113,7 @@ func (c *CloudFront) CreateStreamingDistributionWithContext(ctx aws.Context, inp return out, req.Send() } -const opCreateStreamingDistributionWithTags = "CreateStreamingDistributionWithTags2018_11_05" +const opCreateStreamingDistributionWithTags = "CreateStreamingDistributionWithTags2019_03_26" // CreateStreamingDistributionWithTagsRequest generates a "aws/request.Request" representing the // client's request for the CreateStreamingDistributionWithTags operation. The "output" return @@ -1102,12 +1138,12 @@ const opCreateStreamingDistributionWithTags = "CreateStreamingDistributionWithTa // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateStreamingDistributionWithTags +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateStreamingDistributionWithTags func (c *CloudFront) CreateStreamingDistributionWithTagsRequest(input *CreateStreamingDistributionWithTagsInput) (req *request.Request, output *CreateStreamingDistributionWithTagsOutput) { op := &request.Operation{ Name: opCreateStreamingDistributionWithTags, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/streaming-distribution?WithTags", + HTTPPath: "/2019-03-26/streaming-distribution?WithTags", } if input == nil { @@ -1132,8 +1168,11 @@ func (c *CloudFront) CreateStreamingDistributionWithTagsRequest(input *CreateStr // // Returned Error Codes: // * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. // // * ErrCodeStreamingDistributionAlreadyExists "StreamingDistributionAlreadyExists" +// The caller reference you attempted to create the streaming distribution with +// is associated with another distribution // // * ErrCodeInvalidOrigin "InvalidOrigin" // The Amazon S3 origin server specified does not refer to a valid Amazon S3 @@ -1156,6 +1195,7 @@ func (c *CloudFront) CreateStreamingDistributionWithTagsRequest(input *CreateStr // header is set. // // * ErrCodeTooManyStreamingDistributionCNAMEs "TooManyStreamingDistributionCNAMEs" +// Your request contains more CNAMEs than are allowed per distribution. // // * ErrCodeTooManyStreamingDistributions "TooManyStreamingDistributions" // Processing your request would cause you to exceed the maximum number of streaming @@ -1168,8 +1208,9 @@ func (c *CloudFront) CreateStreamingDistributionWithTagsRequest(input *CreateStr // The value of Quantity and the size of Items don't match. // // * ErrCodeInvalidTagging "InvalidTagging" +// Tagging specified in the response body is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/CreateStreamingDistributionWithTags +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateStreamingDistributionWithTags func (c *CloudFront) CreateStreamingDistributionWithTags(input *CreateStreamingDistributionWithTagsInput) (*CreateStreamingDistributionWithTagsOutput, error) { req, out := c.CreateStreamingDistributionWithTagsRequest(input) return out, req.Send() @@ -1191,7 +1232,7 @@ func (c *CloudFront) CreateStreamingDistributionWithTagsWithContext(ctx aws.Cont return out, req.Send() } -const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2018_11_05" +const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2019_03_26" // DeleteCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the // client's request for the DeleteCloudFrontOriginAccessIdentity operation. The "output" return @@ -1216,12 +1257,12 @@ const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIden // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteCloudFrontOriginAccessIdentity func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityRequest(input *DeleteCloudFrontOriginAccessIdentityInput) (req *request.Request, output *DeleteCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opDeleteCloudFrontOriginAccessIdentity, HTTPMethod: "DELETE", - HTTPPath: "/2018-11-05/origin-access-identity/cloudfront/{Id}", + HTTPPath: "/2019-03-26/origin-access-identity/cloudfront/{Id}", } if input == nil { @@ -1260,8 +1301,9 @@ func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityRequest(input *DeleteCl // to false. // // * ErrCodeOriginAccessIdentityInUse "CloudFrontOriginAccessIdentityInUse" +// The Origin Access Identity specified is already in use. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteCloudFrontOriginAccessIdentity func (c *CloudFront) DeleteCloudFrontOriginAccessIdentity(input *DeleteCloudFrontOriginAccessIdentityInput) (*DeleteCloudFrontOriginAccessIdentityOutput, error) { req, out := c.DeleteCloudFrontOriginAccessIdentityRequest(input) return out, req.Send() @@ -1283,7 +1325,7 @@ func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityWithContext(ctx aws.Con return out, req.Send() } -const opDeleteDistribution = "DeleteDistribution2018_11_05" +const opDeleteDistribution = "DeleteDistribution2019_03_26" // DeleteDistributionRequest generates a "aws/request.Request" representing the // client's request for the DeleteDistribution operation. The "output" return @@ -1308,12 +1350,12 @@ const opDeleteDistribution = "DeleteDistribution2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteDistribution func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) (req *request.Request, output *DeleteDistributionOutput) { op := &request.Operation{ Name: opDeleteDistribution, HTTPMethod: "DELETE", - HTTPPath: "/2018-11-05/distribution/{Id}", + HTTPPath: "/2019-03-26/distribution/{Id}", } if input == nil { @@ -1342,6 +1384,8 @@ func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) ( // Access denied. // // * ErrCodeDistributionNotDisabled "DistributionNotDisabled" +// The specified CloudFront distribution is not disabled. You must disable the +// distribution before you can delete it. // // * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" // The If-Match version is missing or not valid for the distribution. @@ -1353,7 +1397,7 @@ func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) ( // The precondition given in one or more of the request-header fields evaluated // to false. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteDistribution func (c *CloudFront) DeleteDistribution(input *DeleteDistributionInput) (*DeleteDistributionOutput, error) { req, out := c.DeleteDistributionRequest(input) return out, req.Send() @@ -1375,7 +1419,7 @@ func (c *CloudFront) DeleteDistributionWithContext(ctx aws.Context, input *Delet return out, req.Send() } -const opDeleteFieldLevelEncryptionConfig = "DeleteFieldLevelEncryptionConfig2018_11_05" +const opDeleteFieldLevelEncryptionConfig = "DeleteFieldLevelEncryptionConfig2019_03_26" // DeleteFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the // client's request for the DeleteFieldLevelEncryptionConfig operation. The "output" return @@ -1400,12 +1444,12 @@ const opDeleteFieldLevelEncryptionConfig = "DeleteFieldLevelEncryptionConfig2018 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteFieldLevelEncryptionConfig func (c *CloudFront) DeleteFieldLevelEncryptionConfigRequest(input *DeleteFieldLevelEncryptionConfigInput) (req *request.Request, output *DeleteFieldLevelEncryptionConfigOutput) { op := &request.Operation{ Name: opDeleteFieldLevelEncryptionConfig, HTTPMethod: "DELETE", - HTTPPath: "/2018-11-05/field-level-encryption/{Id}", + HTTPPath: "/2019-03-26/field-level-encryption/{Id}", } if input == nil { @@ -1446,7 +1490,7 @@ func (c *CloudFront) DeleteFieldLevelEncryptionConfigRequest(input *DeleteFieldL // * ErrCodeFieldLevelEncryptionConfigInUse "FieldLevelEncryptionConfigInUse" // The specified configuration for field-level encryption is in use. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteFieldLevelEncryptionConfig func (c *CloudFront) DeleteFieldLevelEncryptionConfig(input *DeleteFieldLevelEncryptionConfigInput) (*DeleteFieldLevelEncryptionConfigOutput, error) { req, out := c.DeleteFieldLevelEncryptionConfigRequest(input) return out, req.Send() @@ -1468,7 +1512,7 @@ func (c *CloudFront) DeleteFieldLevelEncryptionConfigWithContext(ctx aws.Context return out, req.Send() } -const opDeleteFieldLevelEncryptionProfile = "DeleteFieldLevelEncryptionProfile2018_11_05" +const opDeleteFieldLevelEncryptionProfile = "DeleteFieldLevelEncryptionProfile2019_03_26" // DeleteFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the // client's request for the DeleteFieldLevelEncryptionProfile operation. The "output" return @@ -1493,12 +1537,12 @@ const opDeleteFieldLevelEncryptionProfile = "DeleteFieldLevelEncryptionProfile20 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteFieldLevelEncryptionProfile func (c *CloudFront) DeleteFieldLevelEncryptionProfileRequest(input *DeleteFieldLevelEncryptionProfileInput) (req *request.Request, output *DeleteFieldLevelEncryptionProfileOutput) { op := &request.Operation{ Name: opDeleteFieldLevelEncryptionProfile, HTTPMethod: "DELETE", - HTTPPath: "/2018-11-05/field-level-encryption-profile/{Id}", + HTTPPath: "/2019-03-26/field-level-encryption-profile/{Id}", } if input == nil { @@ -1539,7 +1583,7 @@ func (c *CloudFront) DeleteFieldLevelEncryptionProfileRequest(input *DeleteField // * ErrCodeFieldLevelEncryptionProfileInUse "FieldLevelEncryptionProfileInUse" // The specified profile for field-level encryption is in use. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteFieldLevelEncryptionProfile func (c *CloudFront) DeleteFieldLevelEncryptionProfile(input *DeleteFieldLevelEncryptionProfileInput) (*DeleteFieldLevelEncryptionProfileOutput, error) { req, out := c.DeleteFieldLevelEncryptionProfileRequest(input) return out, req.Send() @@ -1561,7 +1605,7 @@ func (c *CloudFront) DeleteFieldLevelEncryptionProfileWithContext(ctx aws.Contex return out, req.Send() } -const opDeletePublicKey = "DeletePublicKey2018_11_05" +const opDeletePublicKey = "DeletePublicKey2019_03_26" // DeletePublicKeyRequest generates a "aws/request.Request" representing the // client's request for the DeletePublicKey operation. The "output" return @@ -1586,12 +1630,12 @@ const opDeletePublicKey = "DeletePublicKey2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeletePublicKey +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeletePublicKey func (c *CloudFront) DeletePublicKeyRequest(input *DeletePublicKeyInput) (req *request.Request, output *DeletePublicKeyOutput) { op := &request.Operation{ Name: opDeletePublicKey, HTTPMethod: "DELETE", - HTTPPath: "/2018-11-05/public-key/{Id}", + HTTPPath: "/2019-03-26/public-key/{Id}", } if input == nil { @@ -1632,7 +1676,7 @@ func (c *CloudFront) DeletePublicKeyRequest(input *DeletePublicKeyInput) (req *r // The precondition given in one or more of the request-header fields evaluated // to false. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeletePublicKey +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeletePublicKey func (c *CloudFront) DeletePublicKey(input *DeletePublicKeyInput) (*DeletePublicKeyOutput, error) { req, out := c.DeletePublicKeyRequest(input) return out, req.Send() @@ -1654,7 +1698,7 @@ func (c *CloudFront) DeletePublicKeyWithContext(ctx aws.Context, input *DeletePu return out, req.Send() } -const opDeleteStreamingDistribution = "DeleteStreamingDistribution2018_11_05" +const opDeleteStreamingDistribution = "DeleteStreamingDistribution2019_03_26" // DeleteStreamingDistributionRequest generates a "aws/request.Request" representing the // client's request for the DeleteStreamingDistribution operation. The "output" return @@ -1679,12 +1723,12 @@ const opDeleteStreamingDistribution = "DeleteStreamingDistribution2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteStreamingDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteStreamingDistribution func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDistributionInput) (req *request.Request, output *DeleteStreamingDistributionOutput) { op := &request.Operation{ Name: opDeleteStreamingDistribution, HTTPMethod: "DELETE", - HTTPPath: "/2018-11-05/streaming-distribution/{Id}", + HTTPPath: "/2019-03-26/streaming-distribution/{Id}", } if input == nil { @@ -1733,7 +1777,7 @@ func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDi // that the distribution was successfully deleted. // // For information about deleting a distribution using the CloudFront console, -// see Deleting a Distribution (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) +// see Deleting a Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) // in the Amazon CloudFront Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1748,6 +1792,8 @@ func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDi // Access denied. // // * ErrCodeStreamingDistributionNotDisabled "StreamingDistributionNotDisabled" +// The specified CloudFront distribution is not disabled. You must disable the +// distribution before you can delete it. // // * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" // The If-Match version is missing or not valid for the distribution. @@ -1759,7 +1805,7 @@ func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDi // The precondition given in one or more of the request-header fields evaluated // to false. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/DeleteStreamingDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteStreamingDistribution func (c *CloudFront) DeleteStreamingDistribution(input *DeleteStreamingDistributionInput) (*DeleteStreamingDistributionOutput, error) { req, out := c.DeleteStreamingDistributionRequest(input) return out, req.Send() @@ -1781,7 +1827,7 @@ func (c *CloudFront) DeleteStreamingDistributionWithContext(ctx aws.Context, inp return out, req.Send() } -const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2018_11_05" +const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2019_03_26" // GetCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCloudFrontOriginAccessIdentity operation. The "output" return @@ -1806,12 +1852,12 @@ const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity20 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetCloudFrontOriginAccessIdentity func (c *CloudFront) GetCloudFrontOriginAccessIdentityRequest(input *GetCloudFrontOriginAccessIdentityInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opGetCloudFrontOriginAccessIdentity, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/origin-access-identity/cloudfront/{Id}", + HTTPPath: "/2019-03-26/origin-access-identity/cloudfront/{Id}", } if input == nil { @@ -1841,7 +1887,7 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentityRequest(input *GetCloudFro // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetCloudFrontOriginAccessIdentity func (c *CloudFront) GetCloudFrontOriginAccessIdentity(input *GetCloudFrontOriginAccessIdentityInput) (*GetCloudFrontOriginAccessIdentityOutput, error) { req, out := c.GetCloudFrontOriginAccessIdentityRequest(input) return out, req.Send() @@ -1863,7 +1909,7 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentityWithContext(ctx aws.Contex return out, req.Send() } -const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2018_11_05" +const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2019_03_26" // GetCloudFrontOriginAccessIdentityConfigRequest generates a "aws/request.Request" representing the // client's request for the GetCloudFrontOriginAccessIdentityConfig operation. The "output" return @@ -1888,12 +1934,12 @@ const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIden // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetCloudFrontOriginAccessIdentityConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetCloudFrontOriginAccessIdentityConfig func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigRequest(input *GetCloudFrontOriginAccessIdentityConfigInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityConfigOutput) { op := &request.Operation{ Name: opGetCloudFrontOriginAccessIdentityConfig, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/origin-access-identity/cloudfront/{Id}/config", + HTTPPath: "/2019-03-26/origin-access-identity/cloudfront/{Id}/config", } if input == nil { @@ -1923,7 +1969,7 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigRequest(input *GetCl // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetCloudFrontOriginAccessIdentityConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetCloudFrontOriginAccessIdentityConfig func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfig(input *GetCloudFrontOriginAccessIdentityConfigInput) (*GetCloudFrontOriginAccessIdentityConfigOutput, error) { req, out := c.GetCloudFrontOriginAccessIdentityConfigRequest(input) return out, req.Send() @@ -1945,7 +1991,7 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigWithContext(ctx aws. return out, req.Send() } -const opGetDistribution = "GetDistribution2018_11_05" +const opGetDistribution = "GetDistribution2019_03_26" // GetDistributionRequest generates a "aws/request.Request" representing the // client's request for the GetDistribution operation. The "output" return @@ -1970,12 +2016,12 @@ const opGetDistribution = "GetDistribution2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetDistribution func (c *CloudFront) GetDistributionRequest(input *GetDistributionInput) (req *request.Request, output *GetDistributionOutput) { op := &request.Operation{ Name: opGetDistribution, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/distribution/{Id}", + HTTPPath: "/2019-03-26/distribution/{Id}", } if input == nil { @@ -2005,7 +2051,7 @@ func (c *CloudFront) GetDistributionRequest(input *GetDistributionInput) (req *r // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetDistribution func (c *CloudFront) GetDistribution(input *GetDistributionInput) (*GetDistributionOutput, error) { req, out := c.GetDistributionRequest(input) return out, req.Send() @@ -2027,7 +2073,7 @@ func (c *CloudFront) GetDistributionWithContext(ctx aws.Context, input *GetDistr return out, req.Send() } -const opGetDistributionConfig = "GetDistributionConfig2018_11_05" +const opGetDistributionConfig = "GetDistributionConfig2019_03_26" // GetDistributionConfigRequest generates a "aws/request.Request" representing the // client's request for the GetDistributionConfig operation. The "output" return @@ -2052,12 +2098,12 @@ const opGetDistributionConfig = "GetDistributionConfig2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetDistributionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetDistributionConfig func (c *CloudFront) GetDistributionConfigRequest(input *GetDistributionConfigInput) (req *request.Request, output *GetDistributionConfigOutput) { op := &request.Operation{ Name: opGetDistributionConfig, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/distribution/{Id}/config", + HTTPPath: "/2019-03-26/distribution/{Id}/config", } if input == nil { @@ -2087,7 +2133,7 @@ func (c *CloudFront) GetDistributionConfigRequest(input *GetDistributionConfigIn // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetDistributionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetDistributionConfig func (c *CloudFront) GetDistributionConfig(input *GetDistributionConfigInput) (*GetDistributionConfigOutput, error) { req, out := c.GetDistributionConfigRequest(input) return out, req.Send() @@ -2109,7 +2155,7 @@ func (c *CloudFront) GetDistributionConfigWithContext(ctx aws.Context, input *Ge return out, req.Send() } -const opGetFieldLevelEncryption = "GetFieldLevelEncryption2018_11_05" +const opGetFieldLevelEncryption = "GetFieldLevelEncryption2019_03_26" // GetFieldLevelEncryptionRequest generates a "aws/request.Request" representing the // client's request for the GetFieldLevelEncryption operation. The "output" return @@ -2134,12 +2180,12 @@ const opGetFieldLevelEncryption = "GetFieldLevelEncryption2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetFieldLevelEncryption +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryption func (c *CloudFront) GetFieldLevelEncryptionRequest(input *GetFieldLevelEncryptionInput) (req *request.Request, output *GetFieldLevelEncryptionOutput) { op := &request.Operation{ Name: opGetFieldLevelEncryption, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/field-level-encryption/{Id}", + HTTPPath: "/2019-03-26/field-level-encryption/{Id}", } if input == nil { @@ -2169,7 +2215,7 @@ func (c *CloudFront) GetFieldLevelEncryptionRequest(input *GetFieldLevelEncrypti // * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" // The specified configuration for field-level encryption doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetFieldLevelEncryption +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryption func (c *CloudFront) GetFieldLevelEncryption(input *GetFieldLevelEncryptionInput) (*GetFieldLevelEncryptionOutput, error) { req, out := c.GetFieldLevelEncryptionRequest(input) return out, req.Send() @@ -2191,7 +2237,7 @@ func (c *CloudFront) GetFieldLevelEncryptionWithContext(ctx aws.Context, input * return out, req.Send() } -const opGetFieldLevelEncryptionConfig = "GetFieldLevelEncryptionConfig2018_11_05" +const opGetFieldLevelEncryptionConfig = "GetFieldLevelEncryptionConfig2019_03_26" // GetFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the // client's request for the GetFieldLevelEncryptionConfig operation. The "output" return @@ -2216,12 +2262,12 @@ const opGetFieldLevelEncryptionConfig = "GetFieldLevelEncryptionConfig2018_11_05 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionConfig func (c *CloudFront) GetFieldLevelEncryptionConfigRequest(input *GetFieldLevelEncryptionConfigInput) (req *request.Request, output *GetFieldLevelEncryptionConfigOutput) { op := &request.Operation{ Name: opGetFieldLevelEncryptionConfig, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/field-level-encryption/{Id}/config", + HTTPPath: "/2019-03-26/field-level-encryption/{Id}/config", } if input == nil { @@ -2251,7 +2297,7 @@ func (c *CloudFront) GetFieldLevelEncryptionConfigRequest(input *GetFieldLevelEn // * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" // The specified configuration for field-level encryption doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionConfig func (c *CloudFront) GetFieldLevelEncryptionConfig(input *GetFieldLevelEncryptionConfigInput) (*GetFieldLevelEncryptionConfigOutput, error) { req, out := c.GetFieldLevelEncryptionConfigRequest(input) return out, req.Send() @@ -2273,7 +2319,7 @@ func (c *CloudFront) GetFieldLevelEncryptionConfigWithContext(ctx aws.Context, i return out, req.Send() } -const opGetFieldLevelEncryptionProfile = "GetFieldLevelEncryptionProfile2018_11_05" +const opGetFieldLevelEncryptionProfile = "GetFieldLevelEncryptionProfile2019_03_26" // GetFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the // client's request for the GetFieldLevelEncryptionProfile operation. The "output" return @@ -2298,12 +2344,12 @@ const opGetFieldLevelEncryptionProfile = "GetFieldLevelEncryptionProfile2018_11_ // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionProfile func (c *CloudFront) GetFieldLevelEncryptionProfileRequest(input *GetFieldLevelEncryptionProfileInput) (req *request.Request, output *GetFieldLevelEncryptionProfileOutput) { op := &request.Operation{ Name: opGetFieldLevelEncryptionProfile, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/field-level-encryption-profile/{Id}", + HTTPPath: "/2019-03-26/field-level-encryption-profile/{Id}", } if input == nil { @@ -2333,7 +2379,7 @@ func (c *CloudFront) GetFieldLevelEncryptionProfileRequest(input *GetFieldLevelE // * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" // The specified profile for field-level encryption doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionProfile func (c *CloudFront) GetFieldLevelEncryptionProfile(input *GetFieldLevelEncryptionProfileInput) (*GetFieldLevelEncryptionProfileOutput, error) { req, out := c.GetFieldLevelEncryptionProfileRequest(input) return out, req.Send() @@ -2355,7 +2401,7 @@ func (c *CloudFront) GetFieldLevelEncryptionProfileWithContext(ctx aws.Context, return out, req.Send() } -const opGetFieldLevelEncryptionProfileConfig = "GetFieldLevelEncryptionProfileConfig2018_11_05" +const opGetFieldLevelEncryptionProfileConfig = "GetFieldLevelEncryptionProfileConfig2019_03_26" // GetFieldLevelEncryptionProfileConfigRequest generates a "aws/request.Request" representing the // client's request for the GetFieldLevelEncryptionProfileConfig operation. The "output" return @@ -2380,12 +2426,12 @@ const opGetFieldLevelEncryptionProfileConfig = "GetFieldLevelEncryptionProfileCo // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetFieldLevelEncryptionProfileConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionProfileConfig func (c *CloudFront) GetFieldLevelEncryptionProfileConfigRequest(input *GetFieldLevelEncryptionProfileConfigInput) (req *request.Request, output *GetFieldLevelEncryptionProfileConfigOutput) { op := &request.Operation{ Name: opGetFieldLevelEncryptionProfileConfig, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/field-level-encryption-profile/{Id}/config", + HTTPPath: "/2019-03-26/field-level-encryption-profile/{Id}/config", } if input == nil { @@ -2415,7 +2461,7 @@ func (c *CloudFront) GetFieldLevelEncryptionProfileConfigRequest(input *GetField // * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" // The specified profile for field-level encryption doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetFieldLevelEncryptionProfileConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionProfileConfig func (c *CloudFront) GetFieldLevelEncryptionProfileConfig(input *GetFieldLevelEncryptionProfileConfigInput) (*GetFieldLevelEncryptionProfileConfigOutput, error) { req, out := c.GetFieldLevelEncryptionProfileConfigRequest(input) return out, req.Send() @@ -2437,7 +2483,7 @@ func (c *CloudFront) GetFieldLevelEncryptionProfileConfigWithContext(ctx aws.Con return out, req.Send() } -const opGetInvalidation = "GetInvalidation2018_11_05" +const opGetInvalidation = "GetInvalidation2019_03_26" // GetInvalidationRequest generates a "aws/request.Request" representing the // client's request for the GetInvalidation operation. The "output" return @@ -2462,12 +2508,12 @@ const opGetInvalidation = "GetInvalidation2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetInvalidation +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetInvalidation func (c *CloudFront) GetInvalidationRequest(input *GetInvalidationInput) (req *request.Request, output *GetInvalidationOutput) { op := &request.Operation{ Name: opGetInvalidation, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/distribution/{DistributionId}/invalidation/{Id}", + HTTPPath: "/2019-03-26/distribution/{DistributionId}/invalidation/{Id}", } if input == nil { @@ -2500,7 +2546,7 @@ func (c *CloudFront) GetInvalidationRequest(input *GetInvalidationInput) (req *r // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetInvalidation +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetInvalidation func (c *CloudFront) GetInvalidation(input *GetInvalidationInput) (*GetInvalidationOutput, error) { req, out := c.GetInvalidationRequest(input) return out, req.Send() @@ -2522,7 +2568,7 @@ func (c *CloudFront) GetInvalidationWithContext(ctx aws.Context, input *GetInval return out, req.Send() } -const opGetPublicKey = "GetPublicKey2018_11_05" +const opGetPublicKey = "GetPublicKey2019_03_26" // GetPublicKeyRequest generates a "aws/request.Request" representing the // client's request for the GetPublicKey operation. The "output" return @@ -2547,12 +2593,12 @@ const opGetPublicKey = "GetPublicKey2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetPublicKey +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetPublicKey func (c *CloudFront) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Request, output *GetPublicKeyOutput) { op := &request.Operation{ Name: opGetPublicKey, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/public-key/{Id}", + HTTPPath: "/2019-03-26/public-key/{Id}", } if input == nil { @@ -2582,7 +2628,7 @@ func (c *CloudFront) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request // * ErrCodeNoSuchPublicKey "NoSuchPublicKey" // The specified public key doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetPublicKey +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetPublicKey func (c *CloudFront) GetPublicKey(input *GetPublicKeyInput) (*GetPublicKeyOutput, error) { req, out := c.GetPublicKeyRequest(input) return out, req.Send() @@ -2604,7 +2650,7 @@ func (c *CloudFront) GetPublicKeyWithContext(ctx aws.Context, input *GetPublicKe return out, req.Send() } -const opGetPublicKeyConfig = "GetPublicKeyConfig2018_11_05" +const opGetPublicKeyConfig = "GetPublicKeyConfig2019_03_26" // GetPublicKeyConfigRequest generates a "aws/request.Request" representing the // client's request for the GetPublicKeyConfig operation. The "output" return @@ -2629,12 +2675,12 @@ const opGetPublicKeyConfig = "GetPublicKeyConfig2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetPublicKeyConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetPublicKeyConfig func (c *CloudFront) GetPublicKeyConfigRequest(input *GetPublicKeyConfigInput) (req *request.Request, output *GetPublicKeyConfigOutput) { op := &request.Operation{ Name: opGetPublicKeyConfig, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/public-key/{Id}/config", + HTTPPath: "/2019-03-26/public-key/{Id}/config", } if input == nil { @@ -2664,7 +2710,7 @@ func (c *CloudFront) GetPublicKeyConfigRequest(input *GetPublicKeyConfigInput) ( // * ErrCodeNoSuchPublicKey "NoSuchPublicKey" // The specified public key doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetPublicKeyConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetPublicKeyConfig func (c *CloudFront) GetPublicKeyConfig(input *GetPublicKeyConfigInput) (*GetPublicKeyConfigOutput, error) { req, out := c.GetPublicKeyConfigRequest(input) return out, req.Send() @@ -2686,7 +2732,7 @@ func (c *CloudFront) GetPublicKeyConfigWithContext(ctx aws.Context, input *GetPu return out, req.Send() } -const opGetStreamingDistribution = "GetStreamingDistribution2018_11_05" +const opGetStreamingDistribution = "GetStreamingDistribution2019_03_26" // GetStreamingDistributionRequest generates a "aws/request.Request" representing the // client's request for the GetStreamingDistribution operation. The "output" return @@ -2711,12 +2757,12 @@ const opGetStreamingDistribution = "GetStreamingDistribution2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetStreamingDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetStreamingDistribution func (c *CloudFront) GetStreamingDistributionRequest(input *GetStreamingDistributionInput) (req *request.Request, output *GetStreamingDistributionOutput) { op := &request.Operation{ Name: opGetStreamingDistribution, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/streaming-distribution/{Id}", + HTTPPath: "/2019-03-26/streaming-distribution/{Id}", } if input == nil { @@ -2747,7 +2793,7 @@ func (c *CloudFront) GetStreamingDistributionRequest(input *GetStreamingDistribu // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetStreamingDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetStreamingDistribution func (c *CloudFront) GetStreamingDistribution(input *GetStreamingDistributionInput) (*GetStreamingDistributionOutput, error) { req, out := c.GetStreamingDistributionRequest(input) return out, req.Send() @@ -2769,7 +2815,7 @@ func (c *CloudFront) GetStreamingDistributionWithContext(ctx aws.Context, input return out, req.Send() } -const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2018_11_05" +const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2019_03_26" // GetStreamingDistributionConfigRequest generates a "aws/request.Request" representing the // client's request for the GetStreamingDistributionConfig operation. The "output" return @@ -2794,12 +2840,12 @@ const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2018_11_ // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetStreamingDistributionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetStreamingDistributionConfig func (c *CloudFront) GetStreamingDistributionConfigRequest(input *GetStreamingDistributionConfigInput) (req *request.Request, output *GetStreamingDistributionConfigOutput) { op := &request.Operation{ Name: opGetStreamingDistributionConfig, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/streaming-distribution/{Id}/config", + HTTPPath: "/2019-03-26/streaming-distribution/{Id}/config", } if input == nil { @@ -2829,7 +2875,7 @@ func (c *CloudFront) GetStreamingDistributionConfigRequest(input *GetStreamingDi // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/GetStreamingDistributionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetStreamingDistributionConfig func (c *CloudFront) GetStreamingDistributionConfig(input *GetStreamingDistributionConfigInput) (*GetStreamingDistributionConfigOutput, error) { req, out := c.GetStreamingDistributionConfigRequest(input) return out, req.Send() @@ -2851,7 +2897,7 @@ func (c *CloudFront) GetStreamingDistributionConfigWithContext(ctx aws.Context, return out, req.Send() } -const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2018_11_05" +const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2019_03_26" // ListCloudFrontOriginAccessIdentitiesRequest generates a "aws/request.Request" representing the // client's request for the ListCloudFrontOriginAccessIdentities operation. The "output" return @@ -2876,12 +2922,12 @@ const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdenti // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListCloudFrontOriginAccessIdentities +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListCloudFrontOriginAccessIdentities func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesRequest(input *ListCloudFrontOriginAccessIdentitiesInput) (req *request.Request, output *ListCloudFrontOriginAccessIdentitiesOutput) { op := &request.Operation{ Name: opListCloudFrontOriginAccessIdentities, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/origin-access-identity/cloudfront", + HTTPPath: "/2019-03-26/origin-access-identity/cloudfront", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"CloudFrontOriginAccessIdentityList.NextMarker"}, @@ -2914,7 +2960,7 @@ func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesRequest(input *ListClou // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListCloudFrontOriginAccessIdentities +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListCloudFrontOriginAccessIdentities func (c *CloudFront) ListCloudFrontOriginAccessIdentities(input *ListCloudFrontOriginAccessIdentitiesInput) (*ListCloudFrontOriginAccessIdentitiesOutput, error) { req, out := c.ListCloudFrontOriginAccessIdentitiesRequest(input) return out, req.Send() @@ -2947,7 +2993,7 @@ func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesWithContext(ctx aws.Con // // Example iterating over at most 3 pages of a ListCloudFrontOriginAccessIdentities operation. // pageNum := 0 // err := client.ListCloudFrontOriginAccessIdentitiesPages(params, -// func(page *ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) bool { +// func(page *cloudfront.ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2979,14 +3025,16 @@ func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPagesWithContext(ctx aw }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListCloudFrontOriginAccessIdentitiesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListCloudFrontOriginAccessIdentitiesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opListDistributions = "ListDistributions2018_11_05" +const opListDistributions = "ListDistributions2019_03_26" // ListDistributionsRequest generates a "aws/request.Request" representing the // client's request for the ListDistributions operation. The "output" return @@ -3011,12 +3059,12 @@ const opListDistributions = "ListDistributions2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListDistributions +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListDistributions func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (req *request.Request, output *ListDistributionsOutput) { op := &request.Operation{ Name: opListDistributions, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/distribution", + HTTPPath: "/2019-03-26/distribution", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"DistributionList.NextMarker"}, @@ -3036,7 +3084,7 @@ func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (re // ListDistributions API operation for Amazon CloudFront. // -// List distributions. +// List CloudFront distributions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3049,7 +3097,7 @@ func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (re // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListDistributions +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListDistributions func (c *CloudFront) ListDistributions(input *ListDistributionsInput) (*ListDistributionsOutput, error) { req, out := c.ListDistributionsRequest(input) return out, req.Send() @@ -3082,7 +3130,7 @@ func (c *CloudFront) ListDistributionsWithContext(ctx aws.Context, input *ListDi // // Example iterating over at most 3 pages of a ListDistributions operation. // pageNum := 0 // err := client.ListDistributionsPages(params, -// func(page *ListDistributionsOutput, lastPage bool) bool { +// func(page *cloudfront.ListDistributionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3114,14 +3162,16 @@ func (c *CloudFront) ListDistributionsPagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDistributionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDistributionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2018_11_05" +const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2019_03_26" // ListDistributionsByWebACLIdRequest generates a "aws/request.Request" representing the // client's request for the ListDistributionsByWebACLId operation. The "output" return @@ -3146,12 +3196,12 @@ const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListDistributionsByWebACLId +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListDistributionsByWebACLId func (c *CloudFront) ListDistributionsByWebACLIdRequest(input *ListDistributionsByWebACLIdInput) (req *request.Request, output *ListDistributionsByWebACLIdOutput) { op := &request.Operation{ Name: opListDistributionsByWebACLId, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/distributionsByWebACLId/{WebACLId}", + HTTPPath: "/2019-03-26/distributionsByWebACLId/{WebACLId}", } if input == nil { @@ -3179,8 +3229,9 @@ func (c *CloudFront) ListDistributionsByWebACLIdRequest(input *ListDistributions // The argument is invalid. // // * ErrCodeInvalidWebACLId "InvalidWebACLId" +// A web ACL id specified in the response body is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListDistributionsByWebACLId +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListDistributionsByWebACLId func (c *CloudFront) ListDistributionsByWebACLId(input *ListDistributionsByWebACLIdInput) (*ListDistributionsByWebACLIdOutput, error) { req, out := c.ListDistributionsByWebACLIdRequest(input) return out, req.Send() @@ -3202,7 +3253,7 @@ func (c *CloudFront) ListDistributionsByWebACLIdWithContext(ctx aws.Context, inp return out, req.Send() } -const opListFieldLevelEncryptionConfigs = "ListFieldLevelEncryptionConfigs2018_11_05" +const opListFieldLevelEncryptionConfigs = "ListFieldLevelEncryptionConfigs2019_03_26" // ListFieldLevelEncryptionConfigsRequest generates a "aws/request.Request" representing the // client's request for the ListFieldLevelEncryptionConfigs operation. The "output" return @@ -3227,12 +3278,12 @@ const opListFieldLevelEncryptionConfigs = "ListFieldLevelEncryptionConfigs2018_1 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListFieldLevelEncryptionConfigs +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListFieldLevelEncryptionConfigs func (c *CloudFront) ListFieldLevelEncryptionConfigsRequest(input *ListFieldLevelEncryptionConfigsInput) (req *request.Request, output *ListFieldLevelEncryptionConfigsOutput) { op := &request.Operation{ Name: opListFieldLevelEncryptionConfigs, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/field-level-encryption", + HTTPPath: "/2019-03-26/field-level-encryption", } if input == nil { @@ -3260,7 +3311,7 @@ func (c *CloudFront) ListFieldLevelEncryptionConfigsRequest(input *ListFieldLeve // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListFieldLevelEncryptionConfigs +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListFieldLevelEncryptionConfigs func (c *CloudFront) ListFieldLevelEncryptionConfigs(input *ListFieldLevelEncryptionConfigsInput) (*ListFieldLevelEncryptionConfigsOutput, error) { req, out := c.ListFieldLevelEncryptionConfigsRequest(input) return out, req.Send() @@ -3282,7 +3333,7 @@ func (c *CloudFront) ListFieldLevelEncryptionConfigsWithContext(ctx aws.Context, return out, req.Send() } -const opListFieldLevelEncryptionProfiles = "ListFieldLevelEncryptionProfiles2018_11_05" +const opListFieldLevelEncryptionProfiles = "ListFieldLevelEncryptionProfiles2019_03_26" // ListFieldLevelEncryptionProfilesRequest generates a "aws/request.Request" representing the // client's request for the ListFieldLevelEncryptionProfiles operation. The "output" return @@ -3307,12 +3358,12 @@ const opListFieldLevelEncryptionProfiles = "ListFieldLevelEncryptionProfiles2018 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListFieldLevelEncryptionProfiles +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListFieldLevelEncryptionProfiles func (c *CloudFront) ListFieldLevelEncryptionProfilesRequest(input *ListFieldLevelEncryptionProfilesInput) (req *request.Request, output *ListFieldLevelEncryptionProfilesOutput) { op := &request.Operation{ Name: opListFieldLevelEncryptionProfiles, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/field-level-encryption-profile", + HTTPPath: "/2019-03-26/field-level-encryption-profile", } if input == nil { @@ -3340,7 +3391,7 @@ func (c *CloudFront) ListFieldLevelEncryptionProfilesRequest(input *ListFieldLev // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListFieldLevelEncryptionProfiles +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListFieldLevelEncryptionProfiles func (c *CloudFront) ListFieldLevelEncryptionProfiles(input *ListFieldLevelEncryptionProfilesInput) (*ListFieldLevelEncryptionProfilesOutput, error) { req, out := c.ListFieldLevelEncryptionProfilesRequest(input) return out, req.Send() @@ -3362,7 +3413,7 @@ func (c *CloudFront) ListFieldLevelEncryptionProfilesWithContext(ctx aws.Context return out, req.Send() } -const opListInvalidations = "ListInvalidations2018_11_05" +const opListInvalidations = "ListInvalidations2019_03_26" // ListInvalidationsRequest generates a "aws/request.Request" representing the // client's request for the ListInvalidations operation. The "output" return @@ -3387,12 +3438,12 @@ const opListInvalidations = "ListInvalidations2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListInvalidations +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListInvalidations func (c *CloudFront) ListInvalidationsRequest(input *ListInvalidationsInput) (req *request.Request, output *ListInvalidationsOutput) { op := &request.Operation{ Name: opListInvalidations, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/distribution/{DistributionId}/invalidation", + HTTPPath: "/2019-03-26/distribution/{DistributionId}/invalidation", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"InvalidationList.NextMarker"}, @@ -3431,7 +3482,7 @@ func (c *CloudFront) ListInvalidationsRequest(input *ListInvalidationsInput) (re // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListInvalidations +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListInvalidations func (c *CloudFront) ListInvalidations(input *ListInvalidationsInput) (*ListInvalidationsOutput, error) { req, out := c.ListInvalidationsRequest(input) return out, req.Send() @@ -3464,7 +3515,7 @@ func (c *CloudFront) ListInvalidationsWithContext(ctx aws.Context, input *ListIn // // Example iterating over at most 3 pages of a ListInvalidations operation. // pageNum := 0 // err := client.ListInvalidationsPages(params, -// func(page *ListInvalidationsOutput, lastPage bool) bool { +// func(page *cloudfront.ListInvalidationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3496,14 +3547,16 @@ func (c *CloudFront) ListInvalidationsPagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInvalidationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInvalidationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opListPublicKeys = "ListPublicKeys2018_11_05" +const opListPublicKeys = "ListPublicKeys2019_03_26" // ListPublicKeysRequest generates a "aws/request.Request" representing the // client's request for the ListPublicKeys operation. The "output" return @@ -3528,12 +3581,12 @@ const opListPublicKeys = "ListPublicKeys2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListPublicKeys +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListPublicKeys func (c *CloudFront) ListPublicKeysRequest(input *ListPublicKeysInput) (req *request.Request, output *ListPublicKeysOutput) { op := &request.Operation{ Name: opListPublicKeys, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/public-key", + HTTPPath: "/2019-03-26/public-key", } if input == nil { @@ -3560,7 +3613,7 @@ func (c *CloudFront) ListPublicKeysRequest(input *ListPublicKeysInput) (req *req // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListPublicKeys +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListPublicKeys func (c *CloudFront) ListPublicKeys(input *ListPublicKeysInput) (*ListPublicKeysOutput, error) { req, out := c.ListPublicKeysRequest(input) return out, req.Send() @@ -3582,7 +3635,7 @@ func (c *CloudFront) ListPublicKeysWithContext(ctx aws.Context, input *ListPubli return out, req.Send() } -const opListStreamingDistributions = "ListStreamingDistributions2018_11_05" +const opListStreamingDistributions = "ListStreamingDistributions2019_03_26" // ListStreamingDistributionsRequest generates a "aws/request.Request" representing the // client's request for the ListStreamingDistributions operation. The "output" return @@ -3607,12 +3660,12 @@ const opListStreamingDistributions = "ListStreamingDistributions2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListStreamingDistributions +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListStreamingDistributions func (c *CloudFront) ListStreamingDistributionsRequest(input *ListStreamingDistributionsInput) (req *request.Request, output *ListStreamingDistributionsOutput) { op := &request.Operation{ Name: opListStreamingDistributions, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/streaming-distribution", + HTTPPath: "/2019-03-26/streaming-distribution", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"StreamingDistributionList.NextMarker"}, @@ -3645,7 +3698,7 @@ func (c *CloudFront) ListStreamingDistributionsRequest(input *ListStreamingDistr // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListStreamingDistributions +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListStreamingDistributions func (c *CloudFront) ListStreamingDistributions(input *ListStreamingDistributionsInput) (*ListStreamingDistributionsOutput, error) { req, out := c.ListStreamingDistributionsRequest(input) return out, req.Send() @@ -3678,7 +3731,7 @@ func (c *CloudFront) ListStreamingDistributionsWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a ListStreamingDistributions operation. // pageNum := 0 // err := client.ListStreamingDistributionsPages(params, -// func(page *ListStreamingDistributionsOutput, lastPage bool) bool { +// func(page *cloudfront.ListStreamingDistributionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3710,14 +3763,16 @@ func (c *CloudFront) ListStreamingDistributionsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListStreamingDistributionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListStreamingDistributionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opListTagsForResource = "ListTagsForResource2018_11_05" +const opListTagsForResource = "ListTagsForResource2019_03_26" // ListTagsForResourceRequest generates a "aws/request.Request" representing the // client's request for the ListTagsForResource operation. The "output" return @@ -3742,12 +3797,12 @@ const opListTagsForResource = "ListTagsForResource2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListTagsForResource +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListTagsForResource func (c *CloudFront) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ Name: opListTagsForResource, HTTPMethod: "GET", - HTTPPath: "/2018-11-05/tagging", + HTTPPath: "/2019-03-26/tagging", } if input == nil { @@ -3778,10 +3833,12 @@ func (c *CloudFront) ListTagsForResourceRequest(input *ListTagsForResourceInput) // The argument is invalid. // // * ErrCodeInvalidTagging "InvalidTagging" +// Tagging specified in the response body is not valid. // // * ErrCodeNoSuchResource "NoSuchResource" +// A resource that was specified is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/ListTagsForResource +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListTagsForResource func (c *CloudFront) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) return out, req.Send() @@ -3803,7 +3860,7 @@ func (c *CloudFront) ListTagsForResourceWithContext(ctx aws.Context, input *List return out, req.Send() } -const opTagResource = "TagResource2018_11_05" +const opTagResource = "TagResource2019_03_26" // TagResourceRequest generates a "aws/request.Request" representing the // client's request for the TagResource operation. The "output" return @@ -3828,12 +3885,12 @@ const opTagResource = "TagResource2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/TagResource +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/TagResource func (c *CloudFront) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { op := &request.Operation{ Name: opTagResource, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/tagging?Operation=Tag", + HTTPPath: "/2019-03-26/tagging?Operation=Tag", } if input == nil { @@ -3865,10 +3922,12 @@ func (c *CloudFront) TagResourceRequest(input *TagResourceInput) (req *request.R // The argument is invalid. // // * ErrCodeInvalidTagging "InvalidTagging" +// Tagging specified in the response body is not valid. // // * ErrCodeNoSuchResource "NoSuchResource" +// A resource that was specified is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/TagResource +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/TagResource func (c *CloudFront) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { req, out := c.TagResourceRequest(input) return out, req.Send() @@ -3890,7 +3949,7 @@ func (c *CloudFront) TagResourceWithContext(ctx aws.Context, input *TagResourceI return out, req.Send() } -const opUntagResource = "UntagResource2018_11_05" +const opUntagResource = "UntagResource2019_03_26" // UntagResourceRequest generates a "aws/request.Request" representing the // client's request for the UntagResource operation. The "output" return @@ -3915,12 +3974,12 @@ const opUntagResource = "UntagResource2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UntagResource +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UntagResource func (c *CloudFront) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { op := &request.Operation{ Name: opUntagResource, HTTPMethod: "POST", - HTTPPath: "/2018-11-05/tagging?Operation=Untag", + HTTPPath: "/2019-03-26/tagging?Operation=Untag", } if input == nil { @@ -3952,10 +4011,12 @@ func (c *CloudFront) UntagResourceRequest(input *UntagResourceInput) (req *reque // The argument is invalid. // // * ErrCodeInvalidTagging "InvalidTagging" +// Tagging specified in the response body is not valid. // // * ErrCodeNoSuchResource "NoSuchResource" +// A resource that was specified is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UntagResource +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UntagResource func (c *CloudFront) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) return out, req.Send() @@ -3977,7 +4038,7 @@ func (c *CloudFront) UntagResourceWithContext(ctx aws.Context, input *UntagResou return out, req.Send() } -const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2018_11_05" +const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2019_03_26" // UpdateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the // client's request for the UpdateCloudFrontOriginAccessIdentity operation. The "output" return @@ -4002,12 +4063,12 @@ const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIden // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateCloudFrontOriginAccessIdentity func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *UpdateCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opUpdateCloudFrontOriginAccessIdentity, HTTPMethod: "PUT", - HTTPPath: "/2018-11-05/origin-access-identity/cloudfront/{Id}/config", + HTTPPath: "/2019-03-26/origin-access-identity/cloudfront/{Id}/config", } if input == nil { @@ -4057,7 +4118,7 @@ func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCl // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateCloudFrontOriginAccessIdentity func (c *CloudFront) UpdateCloudFrontOriginAccessIdentity(input *UpdateCloudFrontOriginAccessIdentityInput) (*UpdateCloudFrontOriginAccessIdentityOutput, error) { req, out := c.UpdateCloudFrontOriginAccessIdentityRequest(input) return out, req.Send() @@ -4079,7 +4140,7 @@ func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityWithContext(ctx aws.Con return out, req.Send() } -const opUpdateDistribution = "UpdateDistribution2018_11_05" +const opUpdateDistribution = "UpdateDistribution2019_03_26" // UpdateDistributionRequest generates a "aws/request.Request" representing the // client's request for the UpdateDistribution operation. The "output" return @@ -4104,12 +4165,12 @@ const opUpdateDistribution = "UpdateDistribution2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateDistribution func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { op := &request.Operation{ Name: opUpdateDistribution, HTTPMethod: "PUT", - HTTPPath: "/2018-11-05/distribution/{Id}/config", + HTTPPath: "/2019-03-26/distribution/{Id}/config", } if input == nil { @@ -4130,7 +4191,7 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // API action, follow the steps here to get the current configuration and then // make your updates, to make sure that you include all of the required fields. // To view a summary, see Required Fields for Create Distribution and Update -// Distribution (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html) +// Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html) // in the Amazon CloudFront Developer Guide. // // The update process includes getting the current distribution configuration, @@ -4138,13 +4199,13 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // submitting an UpdateDistribution request to make the updates. // // For information about updating a distribution using the CloudFront console -// instead, see Creating a Distribution (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-creating-console.html) +// instead, see Creating a Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-creating-console.html) // in the Amazon CloudFront Developer Guide. // // To update a web distribution using the CloudFront API // -// Submit a GetDistributionConfig request to get the current configuration and -// an Etag header for the distribution. +// Submit a GetDistributionConfig (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistributionConfig.html) +// request to get the current configuration and an Etag header for the distribution. // // If you update the distribution again, you must get a new Etag header. // @@ -4153,38 +4214,40 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // // When you edit the XML file, be aware of the following: // -// You must strip out the ETag parameter that is returned. +// * You must strip out the ETag parameter that is returned. // -// Additional fields are required when you update a distribution. There may -// be fields included in the XML file for features that you haven't configured -// for your distribution. This is expected and required to successfully update -// the distribution. +// * Additional fields are required when you update a distribution. There +// may be fields included in the XML file for features that you haven't configured +// for your distribution. This is expected and required to successfully update +// the distribution. // -// You can't change the value of CallerReference. If you try to change this -// value, CloudFront returns an IllegalUpdate error. +// * You can't change the value of CallerReference. If you try to change +// this value, CloudFront returns an IllegalUpdate error. // -// The new configuration replaces the existing configuration; the values that -// you specify in an UpdateDistribution request are not merged into your existing -// configuration. When you add, delete, or replace values in an element that -// allows multiple values (for example, CNAME), you must specify all of the -// values that you want to appear in the updated distribution. In addition, -// you must update the corresponding Quantity element. +// * The new configuration replaces the existing configuration; the values +// that you specify in an UpdateDistribution request are not merged into +// your existing configuration. When you add, delete, or replace values in +// an element that allows multiple values (for example, CNAME), you must +// specify all of the values that you want to appear in the updated distribution. +// In addition, you must update the corresponding Quantity element. // // Submit an UpdateDistribution request to update the configuration for your // distribution: // -// In the request body, include the XML document that you updated in Step 2. -// The request body must include an XML document with a DistributionConfig element. +// * In the request body, include the XML document that you updated in Step +// 2. The request body must include an XML document with a DistributionConfig +// element. // -// Set the value of the HTTP If-Match header to the value of the ETag header -// that CloudFront returned when you submitted the GetDistributionConfig request -// in Step 1. +// * Set the value of the HTTP If-Match header to the value of the ETag header +// that CloudFront returned when you submitted the GetDistributionConfig +// request in Step 1. // // Review the response to the UpdateDistribution request to confirm that the // configuration was successfully updated. // -// Optional: Submit a GetDistribution request to confirm that your changes have -// propagated. When propagation is complete, the value of Status is Deployed. +// Optional: Submit a GetDistribution (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistribution.html) +// request to confirm that your changes have propagated. When propagation is +// complete, the value of Status is Deployed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4198,6 +4261,7 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // Access denied. // // * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. // // * ErrCodeIllegalUpdate "IllegalUpdate" // Origin and CallerReference cannot be updated. @@ -4227,8 +4291,10 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // a slash (/). // // * ErrCodeInvalidErrorCode "InvalidErrorCode" +// An invalid error code was specified. // // * ErrCodeInvalidResponseCode "InvalidResponseCode" +// A response code specified in the response body is not valid. // // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. @@ -4243,8 +4309,10 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // One or more of your trusted signers don't exist. // // * ErrCodeInvalidViewerCertificate "InvalidViewerCertificate" +// A viewer certificate specified in the response body is not valid. // // * ErrCodeInvalidMinimumProtocolVersion "InvalidMinimumProtocolVersion" +// The minimum protocol version specified is not valid. // // * ErrCodeInvalidRequiredProtocol "InvalidRequiredProtocol" // This operation requires the HTTPS protocol. Ensure that you specify the HTTPS @@ -4275,8 +4343,10 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // when expected. // // * ErrCodeTooManyHeadersInForwardedValues "TooManyHeadersInForwardedValues" +// Your request contains too many headers in forwarded values. // // * ErrCodeInvalidHeadersForS3Origin "InvalidHeadersForS3Origin" +// The headers specified are not valid for an Amazon S3 origin. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. @@ -4285,18 +4355,25 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // You cannot create anymore custom SSL/TLS certificates. // // * ErrCodeInvalidLocationCode "InvalidLocationCode" +// The location code specified is not valid. // // * ErrCodeInvalidGeoRestrictionParameter "InvalidGeoRestrictionParameter" +// The specified geo restriction parameter is not valid. // // * ErrCodeInvalidTTLOrder "InvalidTTLOrder" +// TTL order specified in the response body is not valid. // // * ErrCodeInvalidWebACLId "InvalidWebACLId" +// A web ACL id specified in the response body is not valid. // // * ErrCodeTooManyOriginCustomHeaders "TooManyOriginCustomHeaders" +// Your request contains too many origin custom headers. // // * ErrCodeTooManyQueryStringParameters "TooManyQueryStringParameters" +// Your request contains too many query string parameters. // // * ErrCodeInvalidQueryStringParameters "InvalidQueryStringParameters" +// Query string parameters specified in the response body are not valid. // // * ErrCodeTooManyDistributionsWithLambdaAssociations "TooManyDistributionsWithLambdaAssociations" // Processing your request would cause the maximum number of distributions with @@ -4310,8 +4387,10 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // The specified Lambda function association is invalid. // // * ErrCodeInvalidOriginReadTimeout "InvalidOriginReadTimeout" +// The read timeout specified for the origin is not valid. // // * ErrCodeInvalidOriginKeepaliveTimeout "InvalidOriginKeepaliveTimeout" +// The keep alive timeout specified for the origin is not valid. // // * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" // The specified configuration for field-level encryption doesn't exist. @@ -4324,7 +4403,7 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // The maximum number of distributions have been associated with the specified // configuration for field-level encryption. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateDistribution func (c *CloudFront) UpdateDistribution(input *UpdateDistributionInput) (*UpdateDistributionOutput, error) { req, out := c.UpdateDistributionRequest(input) return out, req.Send() @@ -4346,7 +4425,7 @@ func (c *CloudFront) UpdateDistributionWithContext(ctx aws.Context, input *Updat return out, req.Send() } -const opUpdateFieldLevelEncryptionConfig = "UpdateFieldLevelEncryptionConfig2018_11_05" +const opUpdateFieldLevelEncryptionConfig = "UpdateFieldLevelEncryptionConfig2019_03_26" // UpdateFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the // client's request for the UpdateFieldLevelEncryptionConfig operation. The "output" return @@ -4371,12 +4450,12 @@ const opUpdateFieldLevelEncryptionConfig = "UpdateFieldLevelEncryptionConfig2018 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateFieldLevelEncryptionConfig func (c *CloudFront) UpdateFieldLevelEncryptionConfigRequest(input *UpdateFieldLevelEncryptionConfigInput) (req *request.Request, output *UpdateFieldLevelEncryptionConfigOutput) { op := &request.Operation{ Name: opUpdateFieldLevelEncryptionConfig, HTTPMethod: "PUT", - HTTPPath: "/2018-11-05/field-level-encryption/{Id}/config", + HTTPPath: "/2019-03-26/field-level-encryption/{Id}/config", } if input == nil { @@ -4436,7 +4515,7 @@ func (c *CloudFront) UpdateFieldLevelEncryptionConfigRequest(input *UpdateFieldL // * ErrCodeQueryArgProfileEmpty "QueryArgProfileEmpty" // No profile specified for the field-level encryption query argument. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateFieldLevelEncryptionConfig func (c *CloudFront) UpdateFieldLevelEncryptionConfig(input *UpdateFieldLevelEncryptionConfigInput) (*UpdateFieldLevelEncryptionConfigOutput, error) { req, out := c.UpdateFieldLevelEncryptionConfigRequest(input) return out, req.Send() @@ -4458,7 +4537,7 @@ func (c *CloudFront) UpdateFieldLevelEncryptionConfigWithContext(ctx aws.Context return out, req.Send() } -const opUpdateFieldLevelEncryptionProfile = "UpdateFieldLevelEncryptionProfile2018_11_05" +const opUpdateFieldLevelEncryptionProfile = "UpdateFieldLevelEncryptionProfile2019_03_26" // UpdateFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the // client's request for the UpdateFieldLevelEncryptionProfile operation. The "output" return @@ -4483,12 +4562,12 @@ const opUpdateFieldLevelEncryptionProfile = "UpdateFieldLevelEncryptionProfile20 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateFieldLevelEncryptionProfile func (c *CloudFront) UpdateFieldLevelEncryptionProfileRequest(input *UpdateFieldLevelEncryptionProfileInput) (req *request.Request, output *UpdateFieldLevelEncryptionProfileOutput) { op := &request.Operation{ Name: opUpdateFieldLevelEncryptionProfile, HTTPMethod: "PUT", - HTTPPath: "/2018-11-05/field-level-encryption-profile/{Id}/config", + HTTPPath: "/2019-03-26/field-level-encryption-profile/{Id}/config", } if input == nil { @@ -4551,7 +4630,7 @@ func (c *CloudFront) UpdateFieldLevelEncryptionProfileRequest(input *UpdateField // The maximum number of field patterns for field-level encryption have been // created. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateFieldLevelEncryptionProfile func (c *CloudFront) UpdateFieldLevelEncryptionProfile(input *UpdateFieldLevelEncryptionProfileInput) (*UpdateFieldLevelEncryptionProfileOutput, error) { req, out := c.UpdateFieldLevelEncryptionProfileRequest(input) return out, req.Send() @@ -4573,7 +4652,7 @@ func (c *CloudFront) UpdateFieldLevelEncryptionProfileWithContext(ctx aws.Contex return out, req.Send() } -const opUpdatePublicKey = "UpdatePublicKey2018_11_05" +const opUpdatePublicKey = "UpdatePublicKey2019_03_26" // UpdatePublicKeyRequest generates a "aws/request.Request" representing the // client's request for the UpdatePublicKey operation. The "output" return @@ -4598,12 +4677,12 @@ const opUpdatePublicKey = "UpdatePublicKey2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdatePublicKey +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdatePublicKey func (c *CloudFront) UpdatePublicKeyRequest(input *UpdatePublicKeyInput) (req *request.Request, output *UpdatePublicKeyOutput) { op := &request.Operation{ Name: opUpdatePublicKey, HTTPMethod: "PUT", - HTTPPath: "/2018-11-05/public-key/{Id}/config", + HTTPPath: "/2019-03-26/public-key/{Id}/config", } if input == nil { @@ -4650,7 +4729,7 @@ func (c *CloudFront) UpdatePublicKeyRequest(input *UpdatePublicKeyInput) (req *r // The precondition given in one or more of the request-header fields evaluated // to false. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdatePublicKey +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdatePublicKey func (c *CloudFront) UpdatePublicKey(input *UpdatePublicKeyInput) (*UpdatePublicKeyOutput, error) { req, out := c.UpdatePublicKeyRequest(input) return out, req.Send() @@ -4672,7 +4751,7 @@ func (c *CloudFront) UpdatePublicKeyWithContext(ctx aws.Context, input *UpdatePu return out, req.Send() } -const opUpdateStreamingDistribution = "UpdateStreamingDistribution2018_11_05" +const opUpdateStreamingDistribution = "UpdateStreamingDistribution2019_03_26" // UpdateStreamingDistributionRequest generates a "aws/request.Request" representing the // client's request for the UpdateStreamingDistribution operation. The "output" return @@ -4697,12 +4776,12 @@ const opUpdateStreamingDistribution = "UpdateStreamingDistribution2018_11_05" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateStreamingDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateStreamingDistribution func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDistributionInput) (req *request.Request, output *UpdateStreamingDistributionOutput) { op := &request.Operation{ Name: opUpdateStreamingDistribution, HTTPMethod: "PUT", - HTTPPath: "/2018-11-05/streaming-distribution/{Id}/config", + HTTPPath: "/2019-03-26/streaming-distribution/{Id}/config", } if input == nil { @@ -4730,6 +4809,7 @@ func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDi // Access denied. // // * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. // // * ErrCodeIllegalUpdate "IllegalUpdate" // Origin and CallerReference cannot be updated. @@ -4749,6 +4829,7 @@ func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDi // to false. // // * ErrCodeTooManyStreamingDistributionCNAMEs "TooManyStreamingDistributionCNAMEs" +// Your request contains more CNAMEs than are allowed per distribution. // // * ErrCodeInvalidArgument "InvalidArgument" // The argument is invalid. @@ -4765,7 +4846,7 @@ func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDi // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05/UpdateStreamingDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateStreamingDistribution func (c *CloudFront) UpdateStreamingDistribution(input *UpdateStreamingDistributionInput) (*UpdateStreamingDistributionOutput, error) { req, out := c.UpdateStreamingDistributionRequest(input) return out, req.Send() @@ -4797,30 +4878,23 @@ func (c *CloudFront) UpdateStreamingDistributionWithContext(ctx aws.Context, inp // are associated with the trusted signer's AWS account. If no KeyPairId element // appears for a Signer, that signer can't create signed URLs. // -// For more information, see Serving Private Content through CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) +// For more information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) // in the Amazon CloudFront Developer Guide. type ActiveTrustedSigners struct { _ struct{} `type:"structure"` // Enabled is true if any of the AWS accounts listed in the TrustedSigners complex - // type for this RTMP distribution have active CloudFront key pairs. If not, - // Enabled is false. - // - // For more information, see ActiveTrustedSigners. + // type for this distribution have active CloudFront key pairs. If not, Enabled + // is false. // // Enabled is a required field Enabled *bool `type:"boolean" required:"true"` // A complex type that contains one Signer complex type for each trusted signer // that is specified in the TrustedSigners complex type. - // - // For more information, see ActiveTrustedSigners. Items []*Signer `locationNameList:"Signer" type:"list"` - // A complex type that contains one Signer complex type for each trusted signer - // specified in the TrustedSigners complex type. - // - // For more information, see ActiveTrustedSigners. + // The number of trusted signers specified in the TrustedSigners complex type. // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` @@ -4854,6 +4928,63 @@ func (s *ActiveTrustedSigners) SetQuantity(v int64) *ActiveTrustedSigners { return s } +// AWS services in China customers must file for an Internet Content Provider +// (ICP) recordal if they want to serve content publicly on an alternate domain +// name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal +// provides the ICP recordal status for CNAMEs associated with distributions. +// The status is returned in the CloudFront response; you can't configure it +// yourself. +// +// For more information about ICP recordals, see Signup, Accounts, and Credentials +// (https://docs.amazonaws.cn/en_us/aws/latest/userguide/accounts-and-credentials.html) +// in Getting Started with AWS services in China. +type AliasICPRecordal struct { + _ struct{} `type:"structure"` + + // A domain name associated with a distribution. + CNAME *string `type:"string"` + + // The Internet Content Provider (ICP) recordal status for a CNAME. The ICPRecordalStatus + // is set to APPROVED for all CNAMEs (aliases) in regions outside of China. + // + // The status values returned are the following: + // + // * APPROVED indicates that the associated CNAME has a valid ICP recordal + // number. Multiple CNAMEs can be associated with a distribution, and CNAMEs + // can correspond to different ICP recordals. To be marked as APPROVED, that + // is, valid to use with China region, a CNAME must have one ICP recordal + // number associated with it. + // + // * SUSPENDED indicates that the associated CNAME does not have a valid + // ICP recordal number. + // + // * PENDING indicates that at least one CNAME associated with the distribution + // does not have a valid ICP recordal number. + ICPRecordalStatus *string `type:"string" enum:"ICPRecordalStatus"` +} + +// String returns the string representation +func (s AliasICPRecordal) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasICPRecordal) GoString() string { + return s.String() +} + +// SetCNAME sets the CNAME field's value. +func (s *AliasICPRecordal) SetCNAME(v string) *AliasICPRecordal { + s.CNAME = &v + return s +} + +// SetICPRecordalStatus sets the ICPRecordalStatus field's value. +func (s *AliasICPRecordal) SetICPRecordalStatus(v string) *AliasICPRecordal { + s.ICPRecordalStatus = &v + return s +} + // A complex type that contains information about CNAMEs (alternate domain names), // if any, for this distribution. type Aliases struct { @@ -5008,7 +5139,7 @@ func (s *AllowedMethods) SetQuantity(v int64) *AllowedMethods { // to get objects from one of the origins, but the other origin is never used. // // For the current limit on the number of cache behaviors that you can add to -// a distribution, see Amazon CloudFront Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_cloudfront) +// a distribution, see Amazon CloudFront Limits (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_cloudfront) // in the AWS General Reference. // // If you don't want to specify any cache behaviors, include only an empty CacheBehaviors @@ -5022,7 +5153,7 @@ func (s *AllowedMethods) SetQuantity(v int64) *AllowedMethods { // configuration and specify all of the cache behaviors that you want to include // in the updated distribution. // -// For more information about cache behaviors, see Cache Behaviors (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesCacheBehavior) +// For more information about cache behaviors, see Cache Behaviors (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesCacheBehavior) // in the Amazon CloudFront Developer Guide. type CacheBehavior struct { _ struct{} `type:"structure"` @@ -5046,7 +5177,7 @@ type CacheBehavior struct { // Whether you want CloudFront to automatically compress certain files for this // cache behavior. If so, specify true; if not, specify false. For more information, - // see Serving Compressed Files (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/ServingCompressedFiles.html) + // see Serving Compressed Files (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/ServingCompressedFiles.html) // in the Amazon CloudFront Developer Guide. Compress *bool `type:"boolean"` @@ -5054,8 +5185,8 @@ type CacheBehavior struct { // before CloudFront forwards another request to your origin to determine whether // the object has been updated. The value that you specify applies only when // your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control - // s-maxage, and Expires to objects. For more information, see Specifying How - // Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // s-maxage, and Expires to objects. For more information, see Managing How + // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) // in the Amazon CloudFront Developer Guide. DefaultTTL *int64 `type:"long"` @@ -5077,16 +5208,16 @@ type CacheBehavior struct { // before CloudFront forwards another request to your origin to determine whether // the object has been updated. The value that you specify applies only when // your origin adds HTTP headers such as Cache-Control max-age, Cache-Control - // s-maxage, and Expires to objects. For more information, see Specifying How - // Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // s-maxage, and Expires to objects. For more information, see Managing How + // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) // in the Amazon CloudFront Developer Guide. MaxTTL *int64 `type:"long"` // The minimum amount of time that you want objects to stay in CloudFront caches // before CloudFront forwards another request to your origin to determine whether - // the object has been updated. For more information, see Specifying How Long - // Objects and Errors Stay in a CloudFront Edge Cache (Expiration) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) - // in the Amazon Amazon CloudFront Developer Guide. + // the object has been updated. For more information, see Managing How Long + // Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. // // You must specify 0 for MinTTL if you configure CloudFront to forward all // headers to your origin (under Headers, if you specify 1 for Quantity and @@ -5108,8 +5239,8 @@ type CacheBehavior struct { // If the request for an object does not match the path pattern for any cache // behaviors, CloudFront applies the behavior in the default cache behavior. // - // For more information, see Path Pattern (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesPathPattern) - // in the Amazon CloudFront Developer Guide. + // For more information, see Path Pattern (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesPathPattern) + // in the Amazon CloudFront Developer Guide. // // PathPattern is a required field PathPattern *string `type:"string" required:"true"` @@ -5134,8 +5265,8 @@ type CacheBehavior struct { // If you want to require signed URLs in requests for objects in the target // origin that match the PathPattern for this cache behavior, specify true for // Enabled, and specify the applicable values for Quantity and Items. For more - // information, see Serving Private Content through CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) - // in the Amazon Amazon CloudFront Developer Guide. + // information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) + // in the Amazon CloudFront Developer Guide. // // If you don't want to require signed URLs in requests for objects that match // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. @@ -5157,12 +5288,11 @@ type CacheBehavior struct { // an HTTP status code of 301 (Moved Permanently) to the viewer along with // the HTTPS URL. The viewer then resubmits the request using the new URL. // - // // * https-only: If a viewer sends an HTTP request, CloudFront returns an // HTTP status code of 403 (Forbidden). // // For more information about requiring the HTTPS protocol, see Using an HTTPS - // Connection to Access Your Objects (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html) + // Connection to Access Your Objects (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html) // in the Amazon CloudFront Developer Guide. // // The only way to guarantee that viewers retrieve an object that was fetched @@ -5171,8 +5301,8 @@ type CacheBehavior struct { // you clear your objects' cache because cached objects are protocol agnostic. // That means that an edge location will return an object from the cache regardless // of whether the current request protocol matches the protocol used previously. - // For more information, see Specifying How Long Objects and Errors Stay in - // a CloudFront Edge Cache (Expiration) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // For more information, see Managing How Long Content Stays in an Edge Cache + // (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) // in the Amazon CloudFront Developer Guide. // // ViewerProtocolPolicy is a required field @@ -5618,8 +5748,7 @@ func (s *ContentTypeProfiles) SetQuantity(v int64) *ContentTypeProfiles { // A complex type that specifies whether you want CloudFront to forward cookies // to the origin and, if so, which ones. For more information about forwarding -// cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies -// (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) +// cookies to the origin, see Caching Content Based on Request Headers (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html) // in the Amazon CloudFront Developer Guide. type CookieNames struct { _ struct{} `type:"structure"` @@ -5672,8 +5801,7 @@ func (s *CookieNames) SetQuantity(v int64) *CookieNames { // A complex type that specifies whether you want CloudFront to forward cookies // to the origin and, if so, which ones. For more information about forwarding -// cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies -// (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) +// cookies to the origin, see Caching Content Based on Cookies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) // in the Amazon CloudFront Developer Guide. type CookiePreference struct { _ struct{} `type:"structure"` @@ -5699,7 +5827,7 @@ type CookiePreference struct { // deletes them automatically. // // For the current limit on the number of cookie names that you can whitelist - // for each cache behavior, see Amazon CloudFront Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_cloudfront) + // for each cache behavior, see CloudFront Limits (https://docs.aws.amazon.com/general/latest/gr/xrefaws_service_limits.html#limits_cloudfront) // in the AWS General Reference. WhitelistedNames *CookieNames `type:"structure"` } @@ -5744,14 +5872,19 @@ func (s *CookiePreference) SetWhitelistedNames(v *CookieNames) *CookiePreference return s } -// The request to create a new origin access identity. +// The request to create a new origin access identity (OAI). An origin access +// identity is a special CloudFront user that you can associate with Amazon +// S3 origins, so that you can secure all or just some of your Amazon S3 content. +// For more information, see Restricting Access to Amazon S3 Content by Using +// an Origin Access Identity (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) +// in the Amazon CloudFront Developer Guide. type CreateCloudFrontOriginAccessIdentityInput struct { - _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` + _ struct{} `locationName:"CreateCloudFrontOriginAccessIdentityRequest" type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` // The current configuration information for the identity. // // CloudFrontOriginAccessIdentityConfig is a required field - CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -5833,12 +5966,12 @@ func (s *CreateCloudFrontOriginAccessIdentityOutput) SetLocation(v string) *Crea // The request to create a new distribution. type CreateDistributionInput struct { - _ struct{} `type:"structure" payload:"DistributionConfig"` + _ struct{} `locationName:"CreateDistributionRequest" type:"structure" payload:"DistributionConfig"` // The distribution's configuration information. // // DistributionConfig is a required field - DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -5920,12 +6053,12 @@ func (s *CreateDistributionOutput) SetLocation(v string) *CreateDistributionOutp // The request to create a new distribution with tags. type CreateDistributionWithTagsInput struct { - _ struct{} `type:"structure" payload:"DistributionConfigWithTags"` + _ struct{} `locationName:"CreateDistributionWithTagsRequest" type:"structure" payload:"DistributionConfigWithTags"` // The distribution's configuration information. // // DistributionConfigWithTags is a required field - DistributionConfigWithTags *DistributionConfigWithTags `locationName:"DistributionConfigWithTags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + DistributionConfigWithTags *DistributionConfigWithTags `locationName:"DistributionConfigWithTags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -6006,12 +6139,12 @@ func (s *CreateDistributionWithTagsOutput) SetLocation(v string) *CreateDistribu } type CreateFieldLevelEncryptionConfigInput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionConfig"` + _ struct{} `locationName:"CreateFieldLevelEncryptionConfigRequest" type:"structure" payload:"FieldLevelEncryptionConfig"` // The request to create a new field-level encryption configuration. // // FieldLevelEncryptionConfig is a required field - FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `locationName:"FieldLevelEncryptionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `locationName:"FieldLevelEncryptionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -6092,12 +6225,12 @@ func (s *CreateFieldLevelEncryptionConfigOutput) SetLocation(v string) *CreateFi } type CreateFieldLevelEncryptionProfileInput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfileConfig"` + _ struct{} `locationName:"CreateFieldLevelEncryptionProfileRequest" type:"structure" payload:"FieldLevelEncryptionProfileConfig"` // The request to create a field-level encryption profile. // // FieldLevelEncryptionProfileConfig is a required field - FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `locationName:"FieldLevelEncryptionProfileConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `locationName:"FieldLevelEncryptionProfileConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -6178,7 +6311,7 @@ func (s *CreateFieldLevelEncryptionProfileOutput) SetLocation(v string) *CreateF // The request to create an invalidation. type CreateInvalidationInput struct { - _ struct{} `type:"structure" payload:"InvalidationBatch"` + _ struct{} `locationName:"CreateInvalidationRequest" type:"structure" payload:"InvalidationBatch"` // The distribution's id. // @@ -6188,7 +6321,7 @@ type CreateInvalidationInput struct { // The batch information for the invalidation. // // InvalidationBatch is a required field - InvalidationBatch *InvalidationBatch `locationName:"InvalidationBatch" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + InvalidationBatch *InvalidationBatch `locationName:"InvalidationBatch" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -6272,12 +6405,12 @@ func (s *CreateInvalidationOutput) SetLocation(v string) *CreateInvalidationOutp } type CreatePublicKeyInput struct { - _ struct{} `type:"structure" payload:"PublicKeyConfig"` + _ struct{} `locationName:"CreatePublicKeyRequest" type:"structure" payload:"PublicKeyConfig"` // The request to add a public key to CloudFront. // // PublicKeyConfig is a required field - PublicKeyConfig *PublicKeyConfig `locationName:"PublicKeyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + PublicKeyConfig *PublicKeyConfig `locationName:"PublicKeyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -6358,12 +6491,12 @@ func (s *CreatePublicKeyOutput) SetPublicKey(v *PublicKey) *CreatePublicKeyOutpu // The request to create a new streaming distribution. type CreateStreamingDistributionInput struct { - _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` + _ struct{} `locationName:"CreateStreamingDistributionRequest" type:"structure" payload:"StreamingDistributionConfig"` // The streaming distribution's configuration information. // // StreamingDistributionConfig is a required field - StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -6445,12 +6578,12 @@ func (s *CreateStreamingDistributionOutput) SetStreamingDistribution(v *Streamin // The request to create a new streaming distribution with tags. type CreateStreamingDistributionWithTagsInput struct { - _ struct{} `type:"structure" payload:"StreamingDistributionConfigWithTags"` + _ struct{} `locationName:"CreateStreamingDistributionWithTagsRequest" type:"structure" payload:"StreamingDistributionConfigWithTags"` // The streaming distribution's configuration information. // // StreamingDistributionConfigWithTags is a required field - StreamingDistributionConfigWithTags *StreamingDistributionConfigWithTags `locationName:"StreamingDistributionConfigWithTags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + StreamingDistributionConfigWithTags *StreamingDistributionConfigWithTags `locationName:"StreamingDistributionConfigWithTags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -6495,7 +6628,7 @@ type CreateStreamingDistributionWithTagsOutput struct { ETag *string `location:"header" locationName:"ETag" type:"string"` // The fully qualified URI of the new streaming distribution resource just created. - // For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8. + // For example:https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8. Location *string `location:"header" locationName:"Location" type:"string"` // The streaming distribution's information. @@ -6535,11 +6668,10 @@ func (s *CreateStreamingDistributionWithTagsOutput) SetStreamingDistribution(v * // * Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range // with custom error messages before returning the response to the viewer. // -// // * How long CloudFront caches HTTP status codes in the 4xx and 5xx range. // // For more information about custom error pages, see Customizing Error Responses -// (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) // in the Amazon CloudFront Developer Guide. type CustomErrorResponse struct { _ struct{} `type:"structure"` @@ -6552,7 +6684,7 @@ type CustomErrorResponse struct { // If you don't want to specify a value, include an empty element, , // in the XML document. // - // For more information, see Customizing Error Responses (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) + // For more information, see Customizing Error Responses (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) // in the Amazon CloudFront Developer Guide. ErrorCachingMinTTL *int64 `type:"long"` @@ -6596,7 +6728,6 @@ type CustomErrorResponse struct { // include a cache behavior for which the path pattern routes requests for // your custom error pages to that location, for example, /4xx-errors/*. // - // // * The value of TargetOriginId specifies the value of the ID element for // the origin that contains your custom error pages. // @@ -6666,7 +6797,7 @@ func (s *CustomErrorResponse) SetResponsePagePath(v string) *CustomErrorResponse // * How long CloudFront caches HTTP status codes in the 4xx and 5xx range. // // For more information about custom error pages, see Customizing Error Responses -// (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) // in the Amazon CloudFront Developer Guide. type CustomErrorResponses struct { _ struct{} `type:"structure"` @@ -6788,7 +6919,7 @@ func (s *CustomHeaders) SetQuantity(v int64) *CustomHeaders { return s } -// A customer origin or an Amazon S3 bucket configured as a website endpoint. +// A custom origin or an Amazon S3 bucket configured as a website endpoint. type CustomOriginConfig struct { _ struct{} `type:"structure"` @@ -6925,7 +7056,7 @@ type DefaultCacheBehavior struct { // Whether you want CloudFront to automatically compress certain files for this // cache behavior. If so, specify true; if not, specify false. For more information, - // see Serving Compressed Files (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/ServingCompressedFiles.html) + // see Serving Compressed Files (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/ServingCompressedFiles.html) // in the Amazon CloudFront Developer Guide. Compress *bool `type:"boolean"` @@ -6933,8 +7064,8 @@ type DefaultCacheBehavior struct { // before CloudFront forwards another request to your origin to determine whether // the object has been updated. The value that you specify applies only when // your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control - // s-maxage, and Expires to objects. For more information, see Specifying How - // Long Objects and Errors Stay in a CloudFront Edge Cache (Expiration) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // s-maxage, and Expires to objects. For more information, see Managing How + // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) // in the Amazon CloudFront Developer Guide. DefaultTTL *int64 `type:"long"` @@ -6952,13 +7083,20 @@ type DefaultCacheBehavior struct { // a cache behavior. LambdaFunctionAssociations *LambdaFunctionAssociations `type:"structure"` + // The maximum amount of time that you want objects to stay in CloudFront caches + // before CloudFront forwards another request to your origin to determine whether + // the object has been updated. The value that you specify applies only when + // your origin adds HTTP headers such as Cache-Control max-age, Cache-Control + // s-maxage, and Expires to objects. For more information, see Managing How + // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. MaxTTL *int64 `type:"long"` // The minimum amount of time that you want objects to stay in CloudFront caches // before CloudFront forwards another request to your origin to determine whether - // the object has been updated. For more information, see Specifying How Long - // Objects and Errors Stay in a CloudFront Edge Cache (Expiration) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) - // in the Amazon Amazon CloudFront Developer Guide. + // the object has been updated. For more information, see Managing How Long + // Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. // // You must specify 0 for MinTTL if you configure CloudFront to forward all // headers to your origin (under Headers, if you specify 1 for Quantity and @@ -6987,8 +7125,8 @@ type DefaultCacheBehavior struct { // If you want to require signed URLs in requests for objects in the target // origin that match the PathPattern for this cache behavior, specify true for // Enabled, and specify the applicable values for Quantity and Items. For more - // information, see Serving Private Content through CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) - // in the Amazon Amazon CloudFront Developer Guide. + // information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) + // in the Amazon CloudFront Developer Guide. // // If you don't want to require signed URLs in requests for objects that match // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. @@ -7014,7 +7152,7 @@ type DefaultCacheBehavior struct { // HTTP status code of 403 (Forbidden). // // For more information about requiring the HTTPS protocol, see Using an HTTPS - // Connection to Access Your Objects (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html) + // Connection to Access Your Objects (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html) // in the Amazon CloudFront Developer Guide. // // The only way to guarantee that viewers retrieve an object that was fetched @@ -7023,8 +7161,8 @@ type DefaultCacheBehavior struct { // you clear your objects' cache because cached objects are protocol agnostic. // That means that an edge location will return an object from the cache regardless // of whether the current request protocol matches the protocol used previously. - // For more information, see Specifying How Long Objects and Errors Stay in - // a CloudFront Edge Cache (Expiration) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // For more information, see Managing How Long Content Stays in an Edge Cache + // (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) // in the Amazon CloudFront Developer Guide. // // ViewerProtocolPolicy is a required field @@ -7160,7 +7298,7 @@ func (s *DefaultCacheBehavior) SetViewerProtocolPolicy(v string) *DefaultCacheBe // Deletes a origin access identity. type DeleteCloudFrontOriginAccessIdentityInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteCloudFrontOriginAccessIdentityRequest" type:"structure"` // The origin access identity's ID. // @@ -7257,10 +7395,10 @@ func (s DeleteCloudFrontOriginAccessIdentityOutput) GoString() string { // distribution was successfully deleted. // // For information about deleting a distribution using the CloudFront console, -// see Deleting a Distribution (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) +// see Deleting a Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) // in the Amazon CloudFront Developer Guide. type DeleteDistributionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteDistributionRequest" type:"structure"` // The distribution ID. // @@ -7325,7 +7463,7 @@ func (s DeleteDistributionOutput) GoString() string { } type DeleteFieldLevelEncryptionConfigInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteFieldLevelEncryptionConfigRequest" type:"structure"` // The ID of the configuration you want to delete from CloudFront. // @@ -7390,7 +7528,7 @@ func (s DeleteFieldLevelEncryptionConfigOutput) GoString() string { } type DeleteFieldLevelEncryptionProfileInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteFieldLevelEncryptionProfileRequest" type:"structure"` // Request the ID of the profile you want to delete from CloudFront. // @@ -7455,7 +7593,7 @@ func (s DeleteFieldLevelEncryptionProfileOutput) GoString() string { } type DeletePublicKeyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeletePublicKeyRequest" type:"structure"` // The ID of the public key you want to remove from CloudFront. // @@ -7521,7 +7659,7 @@ func (s DeletePublicKeyOutput) GoString() string { // The request to delete a streaming distribution. type DeleteStreamingDistributionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteStreamingDistributionRequest" type:"structure"` // The distribution ID. // @@ -7585,7 +7723,8 @@ func (s DeleteStreamingDistributionOutput) GoString() string { return s.String() } -// The distribution's information. +// A distribution tells CloudFront where you want content to be delivered from, +// and the details about how to track and manage content delivery. type Distribution struct { _ struct{} `type:"structure"` @@ -7607,6 +7746,16 @@ type Distribution struct { // ActiveTrustedSigners is a required field ActiveTrustedSigners *ActiveTrustedSigners `type:"structure" required:"true"` + // AWS services in China customers must file for an Internet Content Provider + // (ICP) recordal if they want to serve content publicly on an alternate domain + // name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal + // provides the ICP recordal status for CNAMEs associated with distributions. + // + // For more information about ICP recordals, see Signup, Accounts, and Credentials + // (https://docs.amazonaws.cn/en_us/aws/latest/userguide/accounts-and-credentials.html) + // in Getting Started with AWS services in China. + AliasICPRecordals []*AliasICPRecordal `locationNameList:"AliasICPRecordal" type:"list"` + // The current configuration information for the distribution. Send a GET request // to the /CloudFront API version/distribution ID/config resource. // @@ -7663,6 +7812,12 @@ func (s *Distribution) SetActiveTrustedSigners(v *ActiveTrustedSigners) *Distrib return s } +// SetAliasICPRecordals sets the AliasICPRecordals field's value. +func (s *Distribution) SetAliasICPRecordals(v []*AliasICPRecordal) *Distribution { + s.AliasICPRecordals = v + return s +} + // SetDistributionConfig sets the DistributionConfig field's value. func (s *Distribution) SetDistributionConfig(v *DistributionConfig) *Distribution { s.DistributionConfig = v @@ -7734,7 +7889,7 @@ type DistributionConfig struct { // the new comment. // // Comment is a required field - Comment *string `type:"string" required:"true"` + Comment *string `type:"string" required:"true" sensitive:"true"` // A complex type that controls the following: // @@ -7744,7 +7899,7 @@ type DistributionConfig struct { // * How long CloudFront caches HTTP status codes in the 4xx and 5xx range. // // For more information about custom error pages, see Customizing Error Responses - // (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) // in the Amazon CloudFront Developer Guide. CustomErrorResponses *CustomErrorResponses `type:"structure"` @@ -7773,7 +7928,7 @@ type DistributionConfig struct { // and specify the new object. // // For more information about the default root object, see Creating a Default - // Root Object (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) + // Root Object (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) // in the Amazon CloudFront Developer Guide. DefaultRootObject *string `type:"string"` @@ -7808,7 +7963,7 @@ type DistributionConfig struct { // that can access your content, don't enable IPv6. If you want to restrict // access to some content by IP address and not restrict access to other content // (or restrict access but not by IP address), you can create two distributions. - // For more information, see Creating a Signed URL Using a Custom Policy (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html) + // For more information, see Creating a Signed URL Using a Custom Policy (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html) // in the Amazon CloudFront Developer Guide. // // If you're using an Amazon Route 53 alias resource record set to route traffic @@ -7820,7 +7975,7 @@ type DistributionConfig struct { // * You're using alternate domain names in the URLs for your objects // // For more information, see Routing Traffic to an Amazon CloudFront Web Distribution - // by Using Your Domain Name (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-cloudfront-distribution.html) + // by Using Your Domain Name (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-cloudfront-distribution.html) // in the Amazon Route 53 Developer Guide. // // If you created a CNAME resource record set, either with Amazon Route 53 or @@ -7831,7 +7986,7 @@ type DistributionConfig struct { // A complex type that controls whether access logs are written for the distribution. // - // For more information about logging, see Access Logs (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html) + // For more information about logging, see Access Logs (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html) // in the Amazon CloudFront Developer Guide. Logging *LoggingConfig `type:"structure"` @@ -7854,10 +8009,10 @@ type DistributionConfig struct { // performance. // // For more information about price classes, see Choosing the Price Class for - // a CloudFront Distribution (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PriceClass.html) + // a CloudFront Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PriceClass.html) // in the Amazon CloudFront Developer Guide. For information about CloudFront // pricing, including how price classes (such as Price Class 100) map to CloudFront - // regions, see Amazon CloudFront Pricing (https://aws.amazon.com/cloudfront/pricing/). + // regions, see Amazon CloudFront Pricing (http://aws.amazon.com/cloudfront/pricing/). // For price class information, scroll down to see the table at the bottom of // the page. PriceClass *string `type:"string" enum:"PriceClass"` @@ -7866,92 +8021,10 @@ type DistributionConfig struct { // of your content. Restrictions *Restrictions `type:"structure"` - // A complex type that specifies the following: - // - // * Whether you want viewers to use HTTP or HTTPS to request your objects. - // - // * If you want viewers to use HTTPS, whether you're using an alternate - // domain name such as example.com or the CloudFront domain name for your - // distribution, such as d111111abcdef8.cloudfront.net. - // - // * If you're using an alternate domain name, whether AWS Certificate Manager - // (ACM) provided the certificate, or you purchased a certificate from a - // third-party certificate authority and imported it into ACM or uploaded - // it to the IAM certificate store. - // - // You must specify only one of the following values: - // - // * ViewerCertificate$ACMCertificateArn - // - // * ViewerCertificate$IAMCertificateId - // - // * ViewerCertificate$CloudFrontDefaultCertificate - // - // Don't specify false for CloudFrontDefaultCertificate. - // - // If you want viewers to use HTTP instead of HTTPS to request your objects: - // Specify the following value: - // - // true - // - // In addition, specify allow-all for ViewerProtocolPolicy for all of your cache - // behaviors. - // - // If you want viewers to use HTTPS to request your objects: Choose the type - // of certificate that you want to use based on whether you're using an alternate - // domain name for your objects or the CloudFront domain name: - // - // * If you're using an alternate domain name, such as example.com: Specify - // one of the following values, depending on whether ACM provided your certificate - // or you purchased your certificate from third-party certificate authority: - // - // ARN for ACM SSL/TLS certificate where - // ARN for ACM SSL/TLS certificate is the ARN for the ACM SSL/TLS certificate - // that you want to use for this distribution. - // - // IAM certificate ID where IAM certificate - // ID is the ID that IAM returned when you added the certificate to the IAM - // certificate store. - // - // If you specify ACMCertificateArn or IAMCertificateId, you must also specify - // a value for SSLSupportMethod. - // - // If you choose to use an ACM certificate or a certificate in the IAM certificate - // store, we recommend that you use only an alternate domain name in your - // object URLs (https://example.com/logo.jpg). If you use the domain name - // that is associated with your CloudFront distribution (such as https://d111111abcdef8.cloudfront.net/logo.jpg) - // and the viewer supports SNI, then CloudFront behaves normally. However, - // if the browser does not support SNI, the user's experience depends on - // the value that you choose for SSLSupportMethod: - // - // vip: The viewer displays a warning because there is a mismatch between the - // CloudFront domain name and the domain name in your SSL/TLS certificate. - // - // sni-only: CloudFront drops the connection with the browser without returning - // the object. - // - // * If you're using the CloudFront domain name for your distribution, such - // as d111111abcdef8.cloudfront.net: Specify the following value: - // - // true - // - // If you want viewers to use HTTPS, you must also specify one of the following - // values in your cache behaviors: - // - // * https-only - // - // * redirect-to-https - // - // You can also optionally require that CloudFront use HTTPS to communicate - // with your origin by specifying one of the following values for the applicable - // origins: - // - // * https-only - // - // * match-viewer - // - // For more information, see Using Alternate Domain Names and HTTPS (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS) - // in the Amazon CloudFront Developer Guide. + // A complex type that specifies whether you want viewers to use HTTP or HTTPS + // to request your objects, whether you're using an alternate domain name with + // HTTPS, and if so, if you're using AWS Certificate Manager (ACM) or a third-party + // certificate authority. ViewerCertificate *ViewerCertificate `type:"structure"` // A unique identifier that specifies the AWS WAF web ACL, if any, to associate @@ -8302,6 +8375,16 @@ type DistributionSummary struct { // ARN is a required field ARN *string `type:"string" required:"true"` + // AWS services in China customers must file for an Internet Content Provider + // (ICP) recordal if they want to serve content publicly on an alternate domain + // name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal + // provides the ICP recordal status for CNAMEs associated with distributions. + // + // For more information about ICP recordals, see Signup, Accounts, and Credentials + // (https://docs.amazonaws.cn/en_us/aws/latest/userguide/accounts-and-credentials.html) + // in Getting Started with AWS services in China. + AliasICPRecordals []*AliasICPRecordal `locationNameList:"AliasICPRecordal" type:"list"` + // A complex type that contains information about CNAMEs (alternate domain names), // if any, for this distribution. // @@ -8389,92 +8472,10 @@ type DistributionSummary struct { // Status is a required field Status *string `type:"string" required:"true"` - // A complex type that specifies the following: - // - // * Whether you want viewers to use HTTP or HTTPS to request your objects. - // - // * If you want viewers to use HTTPS, whether you're using an alternate - // domain name such as example.com or the CloudFront domain name for your - // distribution, such as d111111abcdef8.cloudfront.net. - // - // * If you're using an alternate domain name, whether AWS Certificate Manager - // (ACM) provided the certificate, or you purchased a certificate from a - // third-party certificate authority and imported it into ACM or uploaded - // it to the IAM certificate store. - // - // You must specify only one of the following values: - // - // * ViewerCertificate$ACMCertificateArn - // - // * ViewerCertificate$IAMCertificateId - // - // * ViewerCertificate$CloudFrontDefaultCertificate - // - // Don't specify false for CloudFrontDefaultCertificate. - // - // If you want viewers to use HTTP instead of HTTPS to request your objects: - // Specify the following value: - // - // true - // - // In addition, specify allow-all for ViewerProtocolPolicy for all of your cache - // behaviors. - // - // If you want viewers to use HTTPS to request your objects: Choose the type - // of certificate that you want to use based on whether you're using an alternate - // domain name for your objects or the CloudFront domain name: - // - // * If you're using an alternate domain name, such as example.com: Specify - // one of the following values, depending on whether ACM provided your certificate - // or you purchased your certificate from third-party certificate authority: - // - // ARN for ACM SSL/TLS certificate where - // ARN for ACM SSL/TLS certificate is the ARN for the ACM SSL/TLS certificate - // that you want to use for this distribution. - // - // IAM certificate ID where IAM certificate - // ID is the ID that IAM returned when you added the certificate to the IAM - // certificate store. - // - // If you specify ACMCertificateArn or IAMCertificateId, you must also specify - // a value for SSLSupportMethod. - // - // If you choose to use an ACM certificate or a certificate in the IAM certificate - // store, we recommend that you use only an alternate domain name in your - // object URLs (https://example.com/logo.jpg). If you use the domain name - // that is associated with your CloudFront distribution (such as https://d111111abcdef8.cloudfront.net/logo.jpg) - // and the viewer supports SNI, then CloudFront behaves normally. However, - // if the browser does not support SNI, the user's experience depends on - // the value that you choose for SSLSupportMethod: - // - // vip: The viewer displays a warning because there is a mismatch between the - // CloudFront domain name and the domain name in your SSL/TLS certificate. - // - // sni-only: CloudFront drops the connection with the browser without returning - // the object. - // - // * If you're using the CloudFront domain name for your distribution, such - // as d111111abcdef8.cloudfront.net: Specify the following value: - // - // true - // - // If you want viewers to use HTTPS, you must also specify one of the following - // values in your cache behaviors: - // - // * https-only - // - // * redirect-to-https - // - // You can also optionally require that CloudFront use HTTPS to communicate - // with your origin by specifying one of the following values for the applicable - // origins: - // - // * https-only - // - // * match-viewer - // - // For more information, see Using Alternate Domain Names and HTTPS (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS) - // in the Amazon CloudFront Developer Guide. + // A complex type that specifies whether you want viewers to use HTTP or HTTPS + // to request your objects, whether you're using an alternate domain name with + // HTTPS, and if so, if you're using AWS Certificate Manager (ACM) or a third-party + // certificate authority. // // ViewerCertificate is a required field ViewerCertificate *ViewerCertificate `type:"structure" required:"true"` @@ -8501,6 +8502,12 @@ func (s *DistributionSummary) SetARN(v string) *DistributionSummary { return s } +// SetAliasICPRecordals sets the AliasICPRecordals field's value. +func (s *DistributionSummary) SetAliasICPRecordals(v []*AliasICPRecordal) *DistributionSummary { + s.AliasICPRecordals = v + return s +} + // SetAliases sets the Aliases field's value. func (s *DistributionSummary) SetAliases(v *Aliases) *DistributionSummary { s.Aliases = v @@ -9321,14 +9328,19 @@ type ForwardedValues struct { // A complex type that specifies whether you want CloudFront to forward cookies // to the origin and, if so, which ones. For more information about forwarding // cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies - // (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) // in the Amazon CloudFront Developer Guide. // // Cookies is a required field Cookies *CookiePreference `type:"structure" required:"true"` // A complex type that specifies the Headers, if any, that you want CloudFront - // to base caching on for this cache behavior. + // to forward to the origin for this cache behavior (whitelisted headers). For + // the headers that you specify, CloudFront also caches separate versions of + // a specified object that is based on the header values in viewer requests. + // + // For more information, see Caching Content Based on Request Headers (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html) + // in the Amazon CloudFront Developer Guide. Headers *Headers `type:"structure"` // Indicates whether you want CloudFront to forward query strings to the origin @@ -9352,7 +9364,7 @@ type ForwardedValues struct { // parameters. // // For more information, see Configuring CloudFront to Cache Based on Query - // String Parameters (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/QueryStringParameters.html) + // String Parameters (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/QueryStringParameters.html) // in the Amazon CloudFront Developer Guide. // // QueryString is a required field @@ -9516,9 +9528,9 @@ func (s *GeoRestriction) SetRestrictionType(v string) *GeoRestriction { } // The origin access identity's configuration information. For more information, -// see CloudFrontOriginAccessIdentityConfigComplexType. +// see CloudFrontOriginAccessIdentityConfig (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CloudFrontOriginAccessIdentityConfig.html). type GetCloudFrontOriginAccessIdentityConfigInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetCloudFrontOriginAccessIdentityConfigRequest" type:"structure"` // The identity's ID. // @@ -9593,7 +9605,7 @@ func (s *GetCloudFrontOriginAccessIdentityConfigOutput) SetETag(v string) *GetCl // The request to get an origin access identity's information. type GetCloudFrontOriginAccessIdentityInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetCloudFrontOriginAccessIdentityRequest" type:"structure"` // The identity's ID. // @@ -9669,9 +9681,10 @@ func (s *GetCloudFrontOriginAccessIdentityOutput) SetETag(v string) *GetCloudFro // The request to get a distribution configuration. type GetDistributionConfigInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetDistributionConfigRequest" type:"structure"` - // The distribution's ID. + // The distribution's ID. If the ID is empty, an empty distribution configuration + // is returned. // // Id is a required field Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` @@ -9744,9 +9757,10 @@ func (s *GetDistributionConfigOutput) SetETag(v string) *GetDistributionConfigOu // The request to get a distribution's information. type GetDistributionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetDistributionRequest" type:"structure"` - // The distribution's ID. + // The distribution's ID. If the ID is empty, an empty distribution configuration + // is returned. // // Id is a required field Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` @@ -9818,7 +9832,7 @@ func (s *GetDistributionOutput) SetETag(v string) *GetDistributionOutput { } type GetFieldLevelEncryptionConfigInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetFieldLevelEncryptionConfigRequest" type:"structure"` // Request the ID for the field-level encryption configuration information. // @@ -9892,7 +9906,7 @@ func (s *GetFieldLevelEncryptionConfigOutput) SetFieldLevelEncryptionConfig(v *F } type GetFieldLevelEncryptionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetFieldLevelEncryptionRequest" type:"structure"` // Request the ID for the field-level encryption configuration information. // @@ -9966,7 +9980,7 @@ func (s *GetFieldLevelEncryptionOutput) SetFieldLevelEncryption(v *FieldLevelEnc } type GetFieldLevelEncryptionProfileConfigInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetFieldLevelEncryptionProfileConfigRequest" type:"structure"` // Get the ID for the field-level encryption profile configuration information. // @@ -10040,7 +10054,7 @@ func (s *GetFieldLevelEncryptionProfileConfigOutput) SetFieldLevelEncryptionProf } type GetFieldLevelEncryptionProfileInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetFieldLevelEncryptionProfileRequest" type:"structure"` // Get the ID for the field-level encryption profile information. // @@ -10114,7 +10128,7 @@ func (s *GetFieldLevelEncryptionProfileOutput) SetFieldLevelEncryptionProfile(v // The request to get an invalidation's information. type GetInvalidationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetInvalidationRequest" type:"structure"` // The distribution's ID. // @@ -10176,7 +10190,7 @@ type GetInvalidationOutput struct { _ struct{} `type:"structure" payload:"Invalidation"` // The invalidation's information. For more information, see Invalidation Complex - // Type (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/InvalidationDatatype.html). + // Type (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/InvalidationDatatype.html). Invalidation *Invalidation `type:"structure"` } @@ -10197,7 +10211,7 @@ func (s *GetInvalidationOutput) SetInvalidation(v *Invalidation) *GetInvalidatio } type GetPublicKeyConfigInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetPublicKeyConfigRequest" type:"structure"` // Request the ID for the public key configuration. // @@ -10270,7 +10284,7 @@ func (s *GetPublicKeyConfigOutput) SetPublicKeyConfig(v *PublicKeyConfig) *GetPu } type GetPublicKeyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetPublicKeyRequest" type:"structure"` // Request the ID for the public key. // @@ -10344,7 +10358,7 @@ func (s *GetPublicKeyOutput) SetPublicKey(v *PublicKey) *GetPublicKeyOutput { // To request to get a streaming distribution configuration. type GetStreamingDistributionConfigInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetStreamingDistributionConfigRequest" type:"structure"` // The streaming distribution's ID. // @@ -10419,7 +10433,7 @@ func (s *GetStreamingDistributionConfigOutput) SetStreamingDistributionConfig(v // The request to get a streaming distribution's information. type GetStreamingDistributionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetStreamingDistributionRequest" type:"structure"` // The streaming distribution's ID. // @@ -10503,7 +10517,7 @@ func (s *GetStreamingDistributionOutput) SetStreamingDistribution(v *StreamingDi // your content based on values in the product header. CloudFront forwards the // product header to the origin and caches the response from the origin once // for each header value. For more information about caching based on header -// values, see How CloudFront Forwards and Caches Headers (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html) +// values, see How CloudFront Forwards and Caches Headers (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html) // in the Amazon CloudFront Developer Guide. type Headers struct { _ struct{} `type:"structure"` @@ -10517,10 +10531,8 @@ type Headers struct { // distribution to do one of the following: // // * Forward all headers to your origin: Specify 1 for Quantity and * for - // Name. - // - // CloudFront doesn't cache the objects that are associated with this cache - // behavior. Instead, CloudFront sends every request to the origin. + // Name. CloudFront doesn't cache the objects that are associated with this + // cache behavior. Instead, CloudFront sends every request to the origin. // // * Forward a whitelist of headers you specify: Specify the number of headers // that you want CloudFront to base caching on. Then specify the header names @@ -10536,9 +10548,9 @@ type Headers struct { // the following documentation: // // * S3 bucket: See HTTP Request Headers That CloudFront Removes or Updates - // (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorS3Origin.html#request-s3-removed-headers) + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorS3Origin.html#request-s3-removed-headers) // - // * Custom origin: See HTTP Request Headers and CloudFront Behavior (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorCustomOrigin.html#request-custom-headers-behavior) + // * Custom origin: See HTTP Request Headers and CloudFront Behavior (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorCustomOrigin.html#request-custom-headers-behavior) // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` @@ -10664,7 +10676,7 @@ type InvalidationBatch struct { // A complex type that contains information about the objects that you want // to invalidate. For more information, see Specifying the Objects to Invalidate - // (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html#invalidation-specifying-objects) + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html#invalidation-specifying-objects) // in the Amazon CloudFront Developer Guide. // // Paths is a required field @@ -10716,7 +10728,7 @@ func (s *InvalidationBatch) SetPaths(v *Paths) *InvalidationBatch { // The InvalidationList complex type describes the list of invalidation objects. // For more information about invalidation, see Invalidating Objects (Web Distributions -// Only) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html) +// Only) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html) // in the Amazon CloudFront Developer Guide. type InvalidationList struct { _ struct{} `type:"structure"` @@ -10851,19 +10863,19 @@ func (s *InvalidationSummary) SetStatus(v string) *InvalidationSummary { // A complex type that lists the active CloudFront key pairs, if any, that are // associated with AwsAccountNumber. // -// For more information, see ActiveTrustedSigners. +// For more information, see ActiveTrustedSigners (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ActiveTrustedSigners.html). type KeyPairIds struct { _ struct{} `type:"structure"` // A complex type that lists the active CloudFront key pairs, if any, that are // associated with AwsAccountNumber. // - // For more information, see ActiveTrustedSigners. + // For more information, see ActiveTrustedSigners (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ActiveTrustedSigners.html). Items []*string `locationNameList:"KeyPairId" type:"list"` // The number of active CloudFront key pairs for AwsAccountNumber. // - // For more information, see ActiveTrustedSigners. + // For more information, see ActiveTrustedSigners (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ActiveTrustedSigners.html). // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` @@ -10912,17 +10924,15 @@ type LambdaFunctionAssociation struct { // // * viewer-response: The function executes before CloudFront returns the // requested object to the viewer. The function executes regardless of whether - // the object was already in the edge cache. - // - // If the origin returns an HTTP status code other than HTTP 200 (OK), the function - // doesn't execute. + // the object was already in the edge cache. If the origin returns an HTTP + // status code other than HTTP 200 (OK), the function doesn't execute. // // EventType is a required field EventType *string `type:"string" required:"true" enum:"EventType"` // A flag that allows a Lambda function to have read access to the body content. // For more information, see Accessing the Request Body by Choosing the Include - // Body Option (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-include-body-access.html) + // Body Option (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-include-body-access.html) // in the Amazon CloudFront Developer Guide. IncludeBody *bool `type:"boolean"` @@ -11048,7 +11058,7 @@ func (s *LambdaFunctionAssociations) SetQuantity(v int64) *LambdaFunctionAssocia // The request to list origin access identities. type ListCloudFrontOriginAccessIdentitiesInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListCloudFrontOriginAccessIdentitiesRequest" type:"structure"` // Use this when paginating results to indicate where to begin in your list // of origin access identities. The results include identities in the list that @@ -11110,7 +11120,7 @@ func (s *ListCloudFrontOriginAccessIdentitiesOutput) SetCloudFrontOriginAccessId // The request to list distributions that are associated with a specified AWS // WAF web ACL. type ListDistributionsByWebACLIdInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListDistributionsByWebACLIdRequest" type:"structure"` // Use Marker and MaxItems to control pagination of results. If you have more // than MaxItems distributions that satisfy the request, the response includes @@ -11202,7 +11212,7 @@ func (s *ListDistributionsByWebACLIdOutput) SetDistributionList(v *DistributionL // The request to list your distributions. type ListDistributionsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListDistributionsRequest" type:"structure"` // Use this when paginating results to indicate where to begin in your list // of distributions. The results include distributions in the list that occur @@ -11262,7 +11272,7 @@ func (s *ListDistributionsOutput) SetDistributionList(v *DistributionList) *List } type ListFieldLevelEncryptionConfigsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListFieldLevelEncryptionConfigsRequest" type:"structure"` // Use this when paginating results to indicate where to begin in your list // of configurations. The results include configurations in the list that occur @@ -11323,7 +11333,7 @@ func (s *ListFieldLevelEncryptionConfigsOutput) SetFieldLevelEncryptionList(v *F } type ListFieldLevelEncryptionProfilesInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListFieldLevelEncryptionProfilesRequest" type:"structure"` // Use this when paginating results to indicate where to begin in your list // of profiles. The results include profiles in the list that occur after the @@ -11385,7 +11395,7 @@ func (s *ListFieldLevelEncryptionProfilesOutput) SetFieldLevelEncryptionProfileL // The request to list invalidations. type ListInvalidationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListInvalidationsRequest" type:"structure"` // The distribution's ID. // @@ -11475,7 +11485,7 @@ func (s *ListInvalidationsOutput) SetInvalidationList(v *InvalidationList) *List } type ListPublicKeysInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListPublicKeysRequest" type:"structure"` // Use this when paginating results to indicate where to begin in your list // of public keys. The results include public keys in the list that occur after @@ -11536,7 +11546,7 @@ func (s *ListPublicKeysOutput) SetPublicKeyList(v *PublicKeyList) *ListPublicKey // The request to list your streaming distributions. type ListStreamingDistributionsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListStreamingDistributionsRequest" type:"structure"` // The value that you provided for the Marker request parameter. Marker *string `location:"querystring" locationName:"Marker" type:"string"` @@ -11593,7 +11603,7 @@ func (s *ListStreamingDistributionsOutput) SetStreamingDistributionList(v *Strea // The request to list tags for a CloudFront resource. type ListTagsForResourceInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListTagsForResourceRequest" type:"structure"` // An ARN of a CloudFront resource. // @@ -11756,7 +11766,7 @@ func (s *LoggingConfig) SetPrefix(v string) *LoggingConfig { // group. You must specify at least one origin or origin group. // // For the current limit on the number of origins or origin groups that you -// can specify for a distribution, see Amazon CloudFront Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_cloudfront) +// can specify for a distribution, see Amazon CloudFront Limits (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_cloudfront) // in the AWS General Reference. type Origin struct { _ struct{} `type:"structure"` @@ -11775,7 +11785,7 @@ type Origin struct { // Amazon S3 static website hosting endpoint for the bucket. // // For more information about specifying this value for different types of origins, - // see Origin Domain Name (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName) + // see Origin Domain Name (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName) // in the Amazon CloudFront Developer Guide. // // Constraints for Amazon S3 origins: @@ -11811,7 +11821,7 @@ type Origin struct { // the cache behavior to route requests by specifying the value of the Id element // for that origin. When a request matches the path pattern for that cache behavior, // CloudFront routes the request to the specified origin. For more information, - // see Cache Behavior Settings (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesCacheBehavior) + // see Cache Behavior Settings (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesCacheBehavior) // in the Amazon CloudFront Developer Guide. // // Id is a required field @@ -12188,8 +12198,8 @@ type OriginCustomHeader struct { // The name of a header that you want CloudFront to forward to your origin. // For more information, see Forwarding Custom Headers to Your Origin (Web Distributions - // Only) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html) - // in the Amazon Amazon CloudFront Developer Guide. + // Only) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html) + // in the Amazon CloudFront Developer Guide. // // HeaderName is a required field HeaderName *string `type:"string" required:"true"` @@ -12655,7 +12665,7 @@ func (s *Origins) SetQuantity(v int64) *Origins { // A complex type that contains information about the objects that you want // to invalidate. For more information, see Specifying the Objects to Invalidate -// (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html#invalidation-specifying-objects) +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html#invalidation-specifying-objects) // in the Amazon CloudFront Developer Guide. type Paths struct { _ struct{} `type:"structure"` @@ -12663,7 +12673,8 @@ type Paths struct { // A complex type that contains a list of the paths that you want to invalidate. Items []*string `locationNameList:"Path" type:"list"` - // The number of objects that you want to invalidate. + // The number of invalidation paths specified for the objects that you want + // to invalidate. // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` @@ -13127,15 +13138,17 @@ func (s *QueryArgProfiles) SetQuantity(v int64) *QueryArgProfiles { return s } +// A complex type that contains information about the query string parameters +// that you want CloudFront to use for caching for a cache behavior. type QueryStringCacheKeys struct { _ struct{} `type:"structure"` - // (Optional) A list that contains the query string parameters that you want - // CloudFront to use as a basis for caching for this cache behavior. If Quantity - // is 0, you can omit Items. + // A list that contains the query string parameters that you want CloudFront + // to use as a basis for caching for a cache behavior. If Quantity is 0, you + // can omit Items. Items []*string `locationNameList:"Name" type:"list"` - // The number of whitelisted query string parameters for this cache behavior. + // The number of whitelisted query string parameters for a cache behavior. // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` @@ -13232,7 +13245,7 @@ type S3Origin struct { // DomainName is a required field DomainName *string `type:"string" required:"true"` - // The CloudFront origin access identity to associate with the RTMP distribution. + // The CloudFront origin access identity to associate with the distribution. // Use an origin access identity to configure the distribution so that end users // can only access objects in an Amazon S3 bucket through CloudFront. // @@ -13247,8 +13260,8 @@ type S3Origin struct { // and specify the new origin access identity. // // For more information, see Using an Origin Access Identity to Restrict Access - // to Your Amazon S3 Content (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) - // in the Amazon Amazon CloudFront Developer Guide. + // to Your Amazon S3 Content (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) + // in the Amazon CloudFront Developer Guide. // // OriginAccessIdentity is a required field OriginAccessIdentity *string `type:"string" required:"true"` @@ -13318,7 +13331,7 @@ type S3OriginConfig struct { // and specify the new origin access identity. // // For more information about the origin access identity, see Serving Private - // Content through CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) + // Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) // in the Amazon CloudFront Developer Guide. // // OriginAccessIdentity is a required field @@ -13360,7 +13373,7 @@ type Signer struct { _ struct{} `type:"structure"` // An AWS account that is included in the TrustedSigners complex type for this - // RTMP distribution. Valid values include: + // distribution. Valid values include: // // * self, which is the AWS account used to create the distribution. // @@ -13451,7 +13464,9 @@ func (s *StatusCodes) SetQuantity(v int64) *StatusCodes { return s } -// A streaming distribution. +// A streaming distribution tells CloudFront where you want RTMP content to +// be delivered from, and the details about how to track and manage content +// delivery. type StreamingDistribution struct { _ struct{} `type:"structure"` @@ -13471,7 +13486,7 @@ type StreamingDistribution struct { // are associated with the trusted signer's AWS account. If no KeyPairId element // appears for a Signer, that signer can't create signed URLs. // - // For more information, see Serving Private Content through CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) + // For more information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) // in the Amazon CloudFront Developer Guide. // // ActiveTrustedSigners is a required field @@ -13605,7 +13620,7 @@ type StreamingDistributionConfig struct { // create signed URLs for private content. If you want the distribution to use // signed URLs, include this element; if you want the distribution to use public // URLs, remove this element. For more information, see Serving Private Content - // through CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) + // through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) // in the Amazon CloudFront Developer Guide. // // TrustedSigners is a required field @@ -13863,7 +13878,7 @@ func (s *StreamingDistributionList) SetQuantity(v int64) *StreamingDistributionL return s } -// A summary of the information for an Amazon CloudFront streaming distribution. +// A summary of the information for a CloudFront streaming distribution. type StreamingDistributionSummary struct { _ struct{} `type:"structure"` @@ -13905,6 +13920,9 @@ type StreamingDistributionSummary struct { // LastModifiedTime is a required field LastModifiedTime *time.Time `type:"timestamp" required:"true"` + // A complex type that contains information about price class for this streaming + // distribution. + // // PriceClass is a required field PriceClass *string `type:"string" required:"true" enum:"PriceClass"` @@ -13932,6 +13950,9 @@ type StreamingDistributionSummary struct { // as applicable, and specify all of the trusted signers that you want to include // in the updated distribution. // + // For more information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) + // in the Amazon CloudFront Developer Guide. + // // TrustedSigners is a required field TrustedSigners *TrustedSigners `type:"structure" required:"true"` } @@ -14171,7 +14192,7 @@ func (s *TagKeys) SetItems(v []*string) *TagKeys { // The request to add tags to a CloudFront resource. type TagResourceInput struct { - _ struct{} `type:"structure" payload:"Tags"` + _ struct{} `locationName:"TagResourceRequest" type:"structure" payload:"Tags"` // An ARN of a CloudFront resource. // @@ -14181,7 +14202,7 @@ type TagResourceInput struct { // A complex type that contains zero or more Tag elements. // // Tags is a required field - Tags *Tags `locationName:"Tags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + Tags *Tags `locationName:"Tags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -14291,8 +14312,8 @@ func (s *Tags) SetItems(v []*Tag) *Tags { // If you want to require signed URLs in requests for objects in the target // origin that match the PathPattern for this cache behavior, specify true for // Enabled, and specify the applicable values for Quantity and Items. For more -// information, see Serving Private Content through CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) -// in the Amazon Amazon CloudFront Developer Guide. +// information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) +// in the Amazon CloudFront Developer Guide. // // If you don't want to require signed URLs in requests for objects that match // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. @@ -14302,7 +14323,8 @@ func (s *Tags) SetItems(v []*Tag) *Tags { // all of the trusted signers that you want to include in the updated distribution. // // For more information about updating the distribution configuration, see DistributionConfig -// . +// (https://docs.aws.amazon.com/cloudfront/latest/APIReference/DistributionConfig.html) +// in the Amazon CloudFront API Reference. type TrustedSigners struct { _ struct{} `type:"structure"` @@ -14368,7 +14390,7 @@ func (s *TrustedSigners) SetQuantity(v int64) *TrustedSigners { // The request to remove tags from a CloudFront resource. type UntagResourceInput struct { - _ struct{} `type:"structure" payload:"TagKeys"` + _ struct{} `locationName:"UntagResourceRequest" type:"structure" payload:"TagKeys"` // An ARN of a CloudFront resource. // @@ -14378,7 +14400,7 @@ type UntagResourceInput struct { // A complex type that contains zero or more Tag key elements. // // TagKeys is a required field - TagKeys *TagKeys `locationName:"TagKeys" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + TagKeys *TagKeys `locationName:"TagKeys" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -14435,12 +14457,12 @@ func (s UntagResourceOutput) GoString() string { // The request to update an origin access identity. type UpdateCloudFrontOriginAccessIdentityInput struct { - _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` + _ struct{} `locationName:"UpdateCloudFrontOriginAccessIdentityRequest" type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` // The identity's configuration information. // // CloudFrontOriginAccessIdentityConfig is a required field - CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` // The identity's id. // @@ -14539,12 +14561,12 @@ func (s *UpdateCloudFrontOriginAccessIdentityOutput) SetETag(v string) *UpdateCl // The request to update a distribution. type UpdateDistributionInput struct { - _ struct{} `type:"structure" payload:"DistributionConfig"` + _ struct{} `locationName:"UpdateDistributionRequest" type:"structure" payload:"DistributionConfig"` // The distribution's configuration information. // // DistributionConfig is a required field - DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` // The distribution's id. // @@ -14642,12 +14664,12 @@ func (s *UpdateDistributionOutput) SetETag(v string) *UpdateDistributionOutput { } type UpdateFieldLevelEncryptionConfigInput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionConfig"` + _ struct{} `locationName:"UpdateFieldLevelEncryptionConfigRequest" type:"structure" payload:"FieldLevelEncryptionConfig"` // Request to update a field-level encryption configuration. // // FieldLevelEncryptionConfig is a required field - FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `locationName:"FieldLevelEncryptionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `locationName:"FieldLevelEncryptionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` // The ID of the configuration you want to update. // @@ -14745,12 +14767,12 @@ func (s *UpdateFieldLevelEncryptionConfigOutput) SetFieldLevelEncryption(v *Fiel } type UpdateFieldLevelEncryptionProfileInput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfileConfig"` + _ struct{} `locationName:"UpdateFieldLevelEncryptionProfileRequest" type:"structure" payload:"FieldLevelEncryptionProfileConfig"` // Request to update a field-level encryption profile. // // FieldLevelEncryptionProfileConfig is a required field - FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `locationName:"FieldLevelEncryptionProfileConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `locationName:"FieldLevelEncryptionProfileConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` // The ID of the field-level encryption profile request. // @@ -14847,7 +14869,7 @@ func (s *UpdateFieldLevelEncryptionProfileOutput) SetFieldLevelEncryptionProfile } type UpdatePublicKeyInput struct { - _ struct{} `type:"structure" payload:"PublicKeyConfig"` + _ struct{} `locationName:"UpdatePublicKeyRequest" type:"structure" payload:"PublicKeyConfig"` // ID of the public key to be updated. // @@ -14861,7 +14883,7 @@ type UpdatePublicKeyInput struct { // Request to update public key information. // // PublicKeyConfig is a required field - PublicKeyConfig *PublicKeyConfig `locationName:"PublicKeyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + PublicKeyConfig *PublicKeyConfig `locationName:"PublicKeyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -14950,7 +14972,7 @@ func (s *UpdatePublicKeyOutput) SetPublicKey(v *PublicKey) *UpdatePublicKeyOutpu // The request to update a streaming distribution. type UpdateStreamingDistributionInput struct { - _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` + _ struct{} `locationName:"UpdateStreamingDistributionRequest" type:"structure" payload:"StreamingDistributionConfig"` // The streaming distribution's id. // @@ -14964,7 +14986,7 @@ type UpdateStreamingDistributionInput struct { // The streaming distribution's configuration information. // // StreamingDistributionConfig is a required field - StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2018-11-05/"` + StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` } // String returns the string representation @@ -15065,112 +15087,68 @@ func (s *UpdateStreamingDistributionOutput) SetStreamingDistribution(v *Streamin // third-party certificate authority and imported it into ACM or uploaded // it to the IAM certificate store. // -// You must specify only one of the following values: -// -// * ViewerCertificate$ACMCertificateArn -// -// * ViewerCertificate$IAMCertificateId -// -// * ViewerCertificate$CloudFrontDefaultCertificate -// -// Don't specify false for CloudFrontDefaultCertificate. -// -// If you want viewers to use HTTP instead of HTTPS to request your objects: -// Specify the following value: -// -// true -// -// In addition, specify allow-all for ViewerProtocolPolicy for all of your cache -// behaviors. -// -// If you want viewers to use HTTPS to request your objects: Choose the type -// of certificate that you want to use based on whether you're using an alternate -// domain name for your objects or the CloudFront domain name: -// -// * If you're using an alternate domain name, such as example.com: Specify -// one of the following values, depending on whether ACM provided your certificate -// or you purchased your certificate from third-party certificate authority: -// -// ARN for ACM SSL/TLS certificate where -// ARN for ACM SSL/TLS certificate is the ARN for the ACM SSL/TLS certificate -// that you want to use for this distribution. -// -// IAM certificate ID where IAM certificate -// ID is the ID that IAM returned when you added the certificate to the IAM -// certificate store. +// Specify only one of the following values: // -// If you specify ACMCertificateArn or IAMCertificateId, you must also specify -// a value for SSLSupportMethod. +// * ACMCertificateArn (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-ACMCertificateArn) // -// If you choose to use an ACM certificate or a certificate in the IAM certificate -// store, we recommend that you use only an alternate domain name in your -// object URLs (https://example.com/logo.jpg). If you use the domain name -// that is associated with your CloudFront distribution (such as https://d111111abcdef8.cloudfront.net/logo.jpg) -// and the viewer supports SNI, then CloudFront behaves normally. However, -// if the browser does not support SNI, the user's experience depends on -// the value that you choose for SSLSupportMethod: +// * IAMCertificateId (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-IAMCertificateId) // -// vip: The viewer displays a warning because there is a mismatch between the -// CloudFront domain name and the domain name in your SSL/TLS certificate. +// * CloudFrontDefaultCertificate (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-CloudFrontDefaultCertificate) // -// sni-only: CloudFront drops the connection with the browser without returning -// the object. -// -// * If you're using the CloudFront domain name for your distribution, such -// as d111111abcdef8.cloudfront.net: Specify the following value: -// -// true -// -// If you want viewers to use HTTPS, you must also specify one of the following -// values in your cache behaviors: -// -// * https-only -// -// * redirect-to-https -// -// You can also optionally require that CloudFront use HTTPS to communicate -// with your origin by specifying one of the following values for the applicable -// origins: -// -// * https-only -// -// * match-viewer -// -// For more information, see Using Alternate Domain Names and HTTPS (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS) +// For more information, see Using Alternate Domain Names and HTTPS (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS) // in the Amazon CloudFront Developer Guide. type ViewerCertificate struct { _ struct{} `type:"structure"` - // For information about how and when to use ACMCertificateArn, see ViewerCertificate. + // If you want viewers to use HTTPS to request your objects and you're using + // an alternate domain name, you must choose the type of certificate that you + // want to use. Specify the following value if ACM provided your certificate: + // + // * ARN for ACM SSL/TLS certificate + // where ARN for ACM SSL/TLS certificate is the ARN for the ACM SSL/TLS certificate + // that you want to use for this distribution. + // + // If you specify ACMCertificateArn, you must also specify a value for SSLSupportMethod. ACMCertificateArn *string `type:"string"` - // This field has been deprecated. Use one of the following fields instead: + // This field is no longer used. Use one of the following fields instead: // - // * ViewerCertificate$ACMCertificateArn + // * ACMCertificateArn (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-ACMCertificateArn) // - // * ViewerCertificate$IAMCertificateId + // * IAMCertificateId (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-IAMCertificateId) // - // * ViewerCertificate$CloudFrontDefaultCertificate + // * CloudFrontDefaultCertificate (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-CloudFrontDefaultCertificate) // // Deprecated: Certificate has been deprecated Certificate *string `deprecated:"true" type:"string"` - // This field has been deprecated. Use one of the following fields instead: + // This field is no longer used. Use one of the following fields instead: // - // * ViewerCertificate$ACMCertificateArn + // * ACMCertificateArn (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-ACMCertificateArn) // - // * ViewerCertificate$IAMCertificateId + // * IAMCertificateId (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-IAMCertificateId) // - // * ViewerCertificate$CloudFrontDefaultCertificate + // * CloudFrontDefaultCertificate (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-CloudFrontDefaultCertificate) // // Deprecated: CertificateSource has been deprecated CertificateSource *string `deprecated:"true" type:"string" enum:"CertificateSource"` - // For information about how and when to use CloudFrontDefaultCertificate, see - // ViewerCertificate. + // If you're using the CloudFront domain name for your distribution, such as + // d111111abcdef8.cloudfront.net, specify the following value: + // + // * true CloudFrontDefaultCertificate *bool `type:"boolean"` - // For information about how and when to use IAMCertificateId, see ViewerCertificate. + // If you want viewers to use HTTPS to request your objects and you're using + // an alternate domain name, you must choose the type of certificate that you + // want to use. Specify the following value if you purchased your certificate + // from a third-party certificate authority: + // + // * IAM certificate ID where IAM certificate + // ID is the ID that IAM returned when you added the certificate to the IAM + // certificate store. + // + // If you specify IAMCertificateId, you must also specify a value for SSLSupportMethod. IAMCertificateId *string `type:"string"` // Specify the security policy that you want CloudFront to use for HTTPS connections. @@ -15201,38 +15179,35 @@ type ViewerCertificate struct { // // For information about the relationship between the security policy that you // choose and the protocols and ciphers that CloudFront uses to communicate - // with viewers, see Supported SSL/TLS Protocols and Ciphers for Communication - // Between Viewers and CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html#secure-connections-supported-ciphers) + // with viewers, see Supported SSL/TLS Protocols and Ciphers for Communication + // Between Viewers and CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html#secure-connections-supported-ciphers) // in the Amazon CloudFront Developer Guide. MinimumProtocolVersion *string `type:"string" enum:"MinimumProtocolVersion"` - // If you specify a value for ViewerCertificate$ACMCertificateArn or for ViewerCertificate$IAMCertificateId, + // If you specify a value for ACMCertificateArn (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-ACMCertificateArn) + // or for IAMCertificateId (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ViewerCertificate.html#cloudfront-Type-ViewerCertificate-IAMCertificateId), // you must also specify how you want CloudFront to serve HTTPS requests: using - // a method that works for all clients or one that works for most clients: - // - // * vip: CloudFront uses dedicated IP addresses for your content and can - // respond to HTTPS requests from any viewer. However, you will incur additional - // monthly charges. + // a method that works for browsers and clients released after 2010 or one that + // works for all clients. // // * sni-only: CloudFront can respond to HTTPS requests from viewers that // support Server Name Indication (SNI). All modern browsers support SNI, - // but some browsers still in use don't support SNI. If some of your users' - // browsers don't support SNI, we recommend that you do one of the following: - // - // Use the vip option (dedicated IP addresses) instead of sni-only. - // - // Use the CloudFront SSL/TLS certificate instead of a custom certificate. This - // requires that you use the CloudFront domain name of your distribution - // in the URLs for your objects, for example, https://d111111abcdef8.cloudfront.net/logo.png. + // but there are a few that don't. For a current list of the browsers that + // support SNI, see the Wikipedia entry Server Name Indication (http://en.wikipedia.org/wiki/Server_Name_Indication). + // To learn about options to explore if you have users with browsers that + // don't include SNI support, see Choosing How CloudFront Serves HTTPS Requests + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cnames-https-dedicated-ip-or-sni.html) + // in the Amazon CloudFront Developer Guide. // - // If you can control which browser your users use, upgrade the browser to one - // that supports SNI. - // - // Use HTTP instead of HTTPS. + // * vip: CloudFront uses dedicated IP addresses for your content and can + // respond to HTTPS requests from any viewer. However, there are additional + // monthly charges. For details, including specific pricing information, + // see Custom SSL options for Amazon CloudFront (http://aws.amazon.com/cloudfront/custom-ssl-domains/) + // on the AWS marketing site. // // Don't specify a value for SSLSupportMethod if you specified true. // - // For more information, see Using Alternate Domain Names and HTTPS (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS.html) + // For more information, see Choosing How CloudFront Serves HTTPS Requests (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cnames-https-dedicated-ip-or-sni.html) // in the Amazon CloudFront Developer Guide. SSLSupportMethod *string `type:"string" enum:"SSLSupportMethod"` } @@ -15338,6 +15313,17 @@ const ( HttpVersionHttp2 = "http2" ) +const ( + // ICPRecordalStatusApproved is a ICPRecordalStatus enum value + ICPRecordalStatusApproved = "APPROVED" + + // ICPRecordalStatusSuspended is a ICPRecordalStatus enum value + ICPRecordalStatusSuspended = "SUSPENDED" + + // ICPRecordalStatusPending is a ICPRecordalStatus enum value + ICPRecordalStatusPending = "PENDING" +) + const ( // ItemSelectionNone is a ItemSelection enum value ItemSelectionNone = "none" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go index 27960eab4d8..fb197f85b21 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go @@ -8,7 +8,7 @@ // errors. For detailed information about CloudFront features, see the Amazon // CloudFront Developer Guide. // -// See https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2018-11-05 for more information on this service. +// See https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26 for more information on this service. // // See cloudfront package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/cloudfront/ diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go index 7cedc1e46bc..db6258af0bf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go @@ -12,10 +12,14 @@ const ( // ErrCodeBatchTooLarge for service response error code // "BatchTooLarge". + // + // Invalidation batch specified is too large. ErrCodeBatchTooLarge = "BatchTooLarge" // ErrCodeCNAMEAlreadyExists for service response error code // "CNAMEAlreadyExists". + // + // The CNAME specified is already defined for CloudFront. ErrCodeCNAMEAlreadyExists = "CNAMEAlreadyExists" // ErrCodeCannotChangeImmutablePublicKeyFields for service response error code @@ -33,6 +37,9 @@ const ( // ErrCodeDistributionNotDisabled for service response error code // "DistributionNotDisabled". + // + // The specified CloudFront distribution is not disabled. You must disable the + // distribution before you can delete it. ErrCodeDistributionNotDisabled = "DistributionNotDisabled" // ErrCodeFieldLevelEncryptionConfigAlreadyExists for service response error code @@ -98,6 +105,8 @@ const ( // ErrCodeInvalidErrorCode for service response error code // "InvalidErrorCode". + // + // An invalid error code was specified. ErrCodeInvalidErrorCode = "InvalidErrorCode" // ErrCodeInvalidForwardCookies for service response error code @@ -111,10 +120,14 @@ const ( // ErrCodeInvalidGeoRestrictionParameter for service response error code // "InvalidGeoRestrictionParameter". + // + // The specified geo restriction parameter is not valid. ErrCodeInvalidGeoRestrictionParameter = "InvalidGeoRestrictionParameter" // ErrCodeInvalidHeadersForS3Origin for service response error code // "InvalidHeadersForS3Origin". + // + // The headers specified are not valid for an Amazon S3 origin. ErrCodeInvalidHeadersForS3Origin = "InvalidHeadersForS3Origin" // ErrCodeInvalidIfMatchVersion for service response error code @@ -131,10 +144,14 @@ const ( // ErrCodeInvalidLocationCode for service response error code // "InvalidLocationCode". + // + // The location code specified is not valid. ErrCodeInvalidLocationCode = "InvalidLocationCode" // ErrCodeInvalidMinimumProtocolVersion for service response error code // "InvalidMinimumProtocolVersion". + // + // The minimum protocol version specified is not valid. ErrCodeInvalidMinimumProtocolVersion = "InvalidMinimumProtocolVersion" // ErrCodeInvalidOrigin for service response error code @@ -152,10 +169,14 @@ const ( // ErrCodeInvalidOriginKeepaliveTimeout for service response error code // "InvalidOriginKeepaliveTimeout". + // + // The keep alive timeout specified for the origin is not valid. ErrCodeInvalidOriginKeepaliveTimeout = "InvalidOriginKeepaliveTimeout" // ErrCodeInvalidOriginReadTimeout for service response error code // "InvalidOriginReadTimeout". + // + // The read timeout specified for the origin is not valid. ErrCodeInvalidOriginReadTimeout = "InvalidOriginReadTimeout" // ErrCodeInvalidProtocolSettings for service response error code @@ -167,6 +188,8 @@ const ( // ErrCodeInvalidQueryStringParameters for service response error code // "InvalidQueryStringParameters". + // + // Query string parameters specified in the response body are not valid. ErrCodeInvalidQueryStringParameters = "InvalidQueryStringParameters" // ErrCodeInvalidRelativePath for service response error code @@ -186,22 +209,32 @@ const ( // ErrCodeInvalidResponseCode for service response error code // "InvalidResponseCode". + // + // A response code specified in the response body is not valid. ErrCodeInvalidResponseCode = "InvalidResponseCode" // ErrCodeInvalidTTLOrder for service response error code // "InvalidTTLOrder". + // + // TTL order specified in the response body is not valid. ErrCodeInvalidTTLOrder = "InvalidTTLOrder" // ErrCodeInvalidTagging for service response error code // "InvalidTagging". + // + // Tagging specified in the response body is not valid. ErrCodeInvalidTagging = "InvalidTagging" // ErrCodeInvalidViewerCertificate for service response error code // "InvalidViewerCertificate". + // + // A viewer certificate specified in the response body is not valid. ErrCodeInvalidViewerCertificate = "InvalidViewerCertificate" // ErrCodeInvalidWebACLId for service response error code // "InvalidWebACLId". + // + // A web ACL id specified in the response body is not valid. ErrCodeInvalidWebACLId = "InvalidWebACLId" // ErrCodeMissingBody for service response error code @@ -255,6 +288,8 @@ const ( // ErrCodeNoSuchResource for service response error code // "NoSuchResource". + // + // A resource that was specified is not valid. ErrCodeNoSuchResource = "NoSuchResource" // ErrCodeNoSuchStreamingDistribution for service response error code @@ -274,6 +309,8 @@ const ( // ErrCodeOriginAccessIdentityInUse for service response error code // "CloudFrontOriginAccessIdentityInUse". + // + // The Origin Access Identity specified is already in use. ErrCodeOriginAccessIdentityInUse = "CloudFrontOriginAccessIdentityInUse" // ErrCodePreconditionFailed for service response error code @@ -303,10 +340,16 @@ const ( // ErrCodeStreamingDistributionAlreadyExists for service response error code // "StreamingDistributionAlreadyExists". + // + // The caller reference you attempted to create the streaming distribution with + // is associated with another distribution ErrCodeStreamingDistributionAlreadyExists = "StreamingDistributionAlreadyExists" // ErrCodeStreamingDistributionNotDisabled for service response error code // "StreamingDistributionNotDisabled". + // + // The specified CloudFront distribution is not disabled. You must disable the + // distribution before you can delete it. ErrCodeStreamingDistributionNotDisabled = "StreamingDistributionNotDisabled" // ErrCodeTooManyCacheBehaviors for service response error code @@ -405,6 +448,8 @@ const ( // ErrCodeTooManyHeadersInForwardedValues for service response error code // "TooManyHeadersInForwardedValues". + // + // Your request contains too many headers in forwarded values. ErrCodeTooManyHeadersInForwardedValues = "TooManyHeadersInForwardedValues" // ErrCodeTooManyInvalidationsInProgress for service response error code @@ -423,6 +468,8 @@ const ( // ErrCodeTooManyOriginCustomHeaders for service response error code // "TooManyOriginCustomHeaders". + // + // Your request contains too many origin custom headers. ErrCodeTooManyOriginCustomHeaders = "TooManyOriginCustomHeaders" // ErrCodeTooManyOriginGroupsPerDistribution for service response error code @@ -447,10 +494,14 @@ const ( // ErrCodeTooManyQueryStringParameters for service response error code // "TooManyQueryStringParameters". + // + // Your request contains too many query string parameters. ErrCodeTooManyQueryStringParameters = "TooManyQueryStringParameters" // ErrCodeTooManyStreamingDistributionCNAMEs for service response error code // "TooManyStreamingDistributionCNAMEs". + // + // Your request contains more CNAMEs than are allowed per distribution. ErrCodeTooManyStreamingDistributionCNAMEs = "TooManyStreamingDistributionCNAMEs" // ErrCodeTooManyStreamingDistributions for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go index 7fe4399ded7..bfb1606aa4a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudfront.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFront { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudFront { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudFront { svc := &CloudFront{ Client: client.New( cfg, @@ -59,8 +59,9 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, - APIVersion: "2018-11-05", + APIVersion: "2019-03-26", }, handlers, ), diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go index 6ccd0cee418..cbebdcca83f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go @@ -28,7 +28,7 @@ func (c *CloudFront) WaitUntilDistributionDeployed(input *GetDistributionInput) func (c *CloudFront) WaitUntilDistributionDeployedWithContext(ctx aws.Context, input *GetDistributionInput, opts ...request.WaiterOption) error { w := request.Waiter{ Name: "WaitUntilDistributionDeployed", - MaxAttempts: 25, + MaxAttempts: 35, Delay: request.ConstantWaiterDelay(60 * time.Second), Acceptors: []request.WaiterAcceptor{ { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/api.go index 30dcd7da917..3011f741854 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/api.go @@ -696,7 +696,7 @@ func (c *CloudHSMV2) DescribeBackupsWithContext(ctx aws.Context, input *Describe // // Example iterating over at most 3 pages of a DescribeBackups operation. // pageNum := 0 // err := client.DescribeBackupsPages(params, -// func(page *DescribeBackupsOutput, lastPage bool) bool { +// func(page *cloudhsmv2.DescribeBackupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -728,10 +728,12 @@ func (c *CloudHSMV2) DescribeBackupsPagesWithContext(ctx aws.Context, input *Des }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeBackupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeBackupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -849,7 +851,7 @@ func (c *CloudHSMV2) DescribeClustersWithContext(ctx aws.Context, input *Describ // // Example iterating over at most 3 pages of a DescribeClusters operation. // pageNum := 0 // err := client.DescribeClustersPages(params, -// func(page *DescribeClustersOutput, lastPage bool) bool { +// func(page *cloudhsmv2.DescribeClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -881,10 +883,12 @@ func (c *CloudHSMV2) DescribeClustersPagesWithContext(ctx aws.Context, input *De }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClustersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClustersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1100,7 +1104,7 @@ func (c *CloudHSMV2) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, // // Example iterating over at most 3 pages of a ListTags operation. // pageNum := 0 // err := client.ListTagsPages(params, -// func(page *ListTagsOutput, lastPage bool) bool { +// func(page *cloudhsmv2.ListTagsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1132,10 +1136,12 @@ func (c *CloudHSMV2) ListTagsPagesWithContext(ctx aws.Context, input *ListTagsIn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go index c86db6ae7b0..38657874267 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudHSMV2 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "cloudhsm" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudHSMV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudHSMV2 { svc := &CloudHSMV2{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-04-28", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go index dbbf3a16aed..a39be361ffd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go @@ -1185,6 +1185,99 @@ func (c *CloudSearch) DescribeAvailabilityOptionsWithContext(ctx aws.Context, in return out, req.Send() } +const opDescribeDomainEndpointOptions = "DescribeDomainEndpointOptions" + +// DescribeDomainEndpointOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDomainEndpointOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDomainEndpointOptions for more information on using the DescribeDomainEndpointOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDomainEndpointOptionsRequest method. +// req, resp := client.DescribeDomainEndpointOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *CloudSearch) DescribeDomainEndpointOptionsRequest(input *DescribeDomainEndpointOptionsInput) (req *request.Request, output *DescribeDomainEndpointOptionsOutput) { + op := &request.Operation{ + Name: opDescribeDomainEndpointOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDomainEndpointOptionsInput{} + } + + output = &DescribeDomainEndpointOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDomainEndpointOptions API operation for Amazon CloudSearch. +// +// Returns the domain's endpoint options, specifically whether all requests +// to the domain must arrive over HTTPS. For more information, see Configuring +// Domain Endpoint Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-domain-endpoint-options.html) +// in the Amazon CloudSearch Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudSearch's +// API operation DescribeDomainEndpointOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBaseException "BaseException" +// An error occurred while processing the request. +// +// * ErrCodeInternalException "InternalException" +// An internal error occurred while processing the request. If this problem +// persists, report an issue from the Service Health Dashboard (http://status.aws.amazon.com/). +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because a resource limit has already been met. +// +// * ErrCodeResourceNotFoundException "ResourceNotFound" +// The request was rejected because it attempted to reference a resource that +// does not exist. +// +// * ErrCodeDisabledOperationException "DisabledAction" +// The request was rejected because it attempted an operation which is not enabled. +// +func (c *CloudSearch) DescribeDomainEndpointOptions(input *DescribeDomainEndpointOptionsInput) (*DescribeDomainEndpointOptionsOutput, error) { + req, out := c.DescribeDomainEndpointOptionsRequest(input) + return out, req.Send() +} + +// DescribeDomainEndpointOptionsWithContext is the same as DescribeDomainEndpointOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDomainEndpointOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudSearch) DescribeDomainEndpointOptionsWithContext(ctx aws.Context, input *DescribeDomainEndpointOptionsInput, opts ...request.Option) (*DescribeDomainEndpointOptionsOutput, error) { + req, out := c.DescribeDomainEndpointOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeDomains = "DescribeDomains" // DescribeDomainsRequest generates a "aws/request.Request" representing the @@ -1953,6 +2046,9 @@ func (c *CloudSearch) UpdateAvailabilityOptionsRequest(input *UpdateAvailability // * ErrCodeDisabledOperationException "DisabledAction" // The request was rejected because it attempted an operation which is not enabled. // +// * ErrCodeValidationException "ValidationException" +// The request was rejected because it has invalid parameters. +// func (c *CloudSearch) UpdateAvailabilityOptions(input *UpdateAvailabilityOptionsInput) (*UpdateAvailabilityOptionsOutput, error) { req, out := c.UpdateAvailabilityOptionsRequest(input) return out, req.Send() @@ -1974,6 +2070,105 @@ func (c *CloudSearch) UpdateAvailabilityOptionsWithContext(ctx aws.Context, inpu return out, req.Send() } +const opUpdateDomainEndpointOptions = "UpdateDomainEndpointOptions" + +// UpdateDomainEndpointOptionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDomainEndpointOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDomainEndpointOptions for more information on using the UpdateDomainEndpointOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDomainEndpointOptionsRequest method. +// req, resp := client.UpdateDomainEndpointOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *CloudSearch) UpdateDomainEndpointOptionsRequest(input *UpdateDomainEndpointOptionsInput) (req *request.Request, output *UpdateDomainEndpointOptionsOutput) { + op := &request.Operation{ + Name: opUpdateDomainEndpointOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainEndpointOptionsInput{} + } + + output = &UpdateDomainEndpointOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDomainEndpointOptions API operation for Amazon CloudSearch. +// +// Updates the domain's endpoint options, specifically whether all requests +// to the domain must arrive over HTTPS. For more information, see Configuring +// Domain Endpoint Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-domain-endpoint-options.html) +// in the Amazon CloudSearch Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudSearch's +// API operation UpdateDomainEndpointOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBaseException "BaseException" +// An error occurred while processing the request. +// +// * ErrCodeInternalException "InternalException" +// An internal error occurred while processing the request. If this problem +// persists, report an issue from the Service Health Dashboard (http://status.aws.amazon.com/). +// +// * ErrCodeInvalidTypeException "InvalidType" +// The request was rejected because it specified an invalid type definition. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because a resource limit has already been met. +// +// * ErrCodeResourceNotFoundException "ResourceNotFound" +// The request was rejected because it attempted to reference a resource that +// does not exist. +// +// * ErrCodeDisabledOperationException "DisabledAction" +// The request was rejected because it attempted an operation which is not enabled. +// +// * ErrCodeValidationException "ValidationException" +// The request was rejected because it has invalid parameters. +// +func (c *CloudSearch) UpdateDomainEndpointOptions(input *UpdateDomainEndpointOptionsInput) (*UpdateDomainEndpointOptionsOutput, error) { + req, out := c.UpdateDomainEndpointOptionsRequest(input) + return out, req.Send() +} + +// UpdateDomainEndpointOptionsWithContext is the same as UpdateDomainEndpointOptions with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDomainEndpointOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudSearch) UpdateDomainEndpointOptionsWithContext(ctx aws.Context, input *UpdateDomainEndpointOptionsInput, opts ...request.Option) (*UpdateDomainEndpointOptionsOutput, error) { + req, out := c.UpdateDomainEndpointOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateScalingParameters = "UpdateScalingParameters" // UpdateScalingParametersRequest generates a "aws/request.Request" representing the @@ -2114,7 +2309,7 @@ func (c *CloudSearch) UpdateServiceAccessPoliciesRequest(input *UpdateServiceAcc // UpdateServiceAccessPolicies API operation for Amazon CloudSearch. // // Configures the access rules that control access to the domain's document -// and search endpoints. For more information, see Configuring Access for an +// and search endpoints. For more information, see Configuring Access for an // Amazon CloudSearch Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3715,6 +3910,85 @@ func (s *DescribeAvailabilityOptionsOutput) SetAvailabilityOptions(v *Availabili return s } +// Container for the parameters to the DescribeDomainEndpointOptions operation. +// Specify the name of the domain you want to describe. To show the active configuration +// and exclude any pending changes, set the Deployed option to true. +type DescribeDomainEndpointOptionsInput struct { + _ struct{} `type:"structure"` + + // Whether to retrieve the latest configuration (which might be in a Processing + // state) or the current, active configuration. Defaults to false. + Deployed *bool `type:"boolean"` + + // A string that represents the name of a domain. + // + // DomainName is a required field + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDomainEndpointOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainEndpointOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDomainEndpointOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDomainEndpointOptionsInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeployed sets the Deployed field's value. +func (s *DescribeDomainEndpointOptionsInput) SetDeployed(v bool) *DescribeDomainEndpointOptionsInput { + s.Deployed = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *DescribeDomainEndpointOptionsInput) SetDomainName(v string) *DescribeDomainEndpointOptionsInput { + s.DomainName = &v + return s +} + +// The result of a DescribeDomainEndpointOptions request. Contains the status +// and configuration of a search domain's endpoint options. +type DescribeDomainEndpointOptionsOutput struct { + _ struct{} `type:"structure"` + + // The status and configuration of a search domain's endpoint options. + DomainEndpointOptions *DomainEndpointOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s DescribeDomainEndpointOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainEndpointOptionsOutput) GoString() string { + return s.String() +} + +// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. +func (s *DescribeDomainEndpointOptionsOutput) SetDomainEndpointOptions(v *DomainEndpointOptionsStatus) *DescribeDomainEndpointOptionsOutput { + s.DomainEndpointOptions = v + return s +} + // Container for the parameters to the DescribeDomains operation. By default // shows the status of all domains. To restrict the response to particular domains, // specify the names of the domains you want to describe. @@ -4269,6 +4543,76 @@ func (s *DocumentSuggesterOptions) SetSourceField(v string) *DocumentSuggesterOp return s } +// The domain's endpoint options. +type DomainEndpointOptions struct { + _ struct{} `type:"structure"` + + // Whether the domain is HTTPS only enabled. + EnforceHTTPS *bool `type:"boolean"` + + // The minimum required TLS version + TLSSecurityPolicy *string `type:"string" enum:"TLSSecurityPolicy"` +} + +// String returns the string representation +func (s DomainEndpointOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainEndpointOptions) GoString() string { + return s.String() +} + +// SetEnforceHTTPS sets the EnforceHTTPS field's value. +func (s *DomainEndpointOptions) SetEnforceHTTPS(v bool) *DomainEndpointOptions { + s.EnforceHTTPS = &v + return s +} + +// SetTLSSecurityPolicy sets the TLSSecurityPolicy field's value. +func (s *DomainEndpointOptions) SetTLSSecurityPolicy(v string) *DomainEndpointOptions { + s.TLSSecurityPolicy = &v + return s +} + +// The configuration and status of the domain's endpoint options. +type DomainEndpointOptionsStatus struct { + _ struct{} `type:"structure"` + + // The domain endpoint options configured for the domain. + // + // Options is a required field + Options *DomainEndpointOptions `type:"structure" required:"true"` + + // The status of the configured domain endpoint options. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DomainEndpointOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainEndpointOptionsStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *DomainEndpointOptionsStatus) SetOptions(v *DomainEndpointOptions) *DomainEndpointOptionsStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *DomainEndpointOptionsStatus) SetStatus(v *OptionStatus) *DomainEndpointOptionsStatus { + s.Status = v + return s +} + // The current status of the search domain. type DomainStatus struct { _ struct{} `type:"structure"` @@ -5486,9 +5830,11 @@ type OptionStatus struct { // // * RequiresIndexDocuments: the option's latest value will not be deployed // until IndexDocuments has been called and indexing is complete. + // // * Processing: the option's latest value is in the process of being activated. // // * Active: the option's latest value is completely deployed. + // // * FailedToValidate: the option value is not compatible with the domain's // data and cannot be used to index the data. You must either modify the // option value or update or remove the incompatible documents. @@ -6003,6 +6349,91 @@ func (s *UpdateAvailabilityOptionsOutput) SetAvailabilityOptions(v *Availability return s } +// Container for the parameters to the UpdateDomainEndpointOptions operation. +// Specifies the name of the domain you want to update and the domain endpoint +// options. +type UpdateDomainEndpointOptionsInput struct { + _ struct{} `type:"structure"` + + // Whether to require that all requests to the domain arrive over HTTPS. We + // recommend Policy-Min-TLS-1-2-2019-07 for TLSSecurityPolicy. For compatibility + // with older clients, the default is Policy-Min-TLS-1-0-2019-07. + // + // DomainEndpointOptions is a required field + DomainEndpointOptions *DomainEndpointOptions `type:"structure" required:"true"` + + // A string that represents the name of a domain. + // + // DomainName is a required field + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainEndpointOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainEndpointOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDomainEndpointOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDomainEndpointOptionsInput"} + if s.DomainEndpointOptions == nil { + invalidParams.Add(request.NewErrParamRequired("DomainEndpointOptions")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. +func (s *UpdateDomainEndpointOptionsInput) SetDomainEndpointOptions(v *DomainEndpointOptions) *UpdateDomainEndpointOptionsInput { + s.DomainEndpointOptions = v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *UpdateDomainEndpointOptionsInput) SetDomainName(v string) *UpdateDomainEndpointOptionsInput { + s.DomainName = &v + return s +} + +// The result of a UpdateDomainEndpointOptions request. Contains the configuration +// and status of the domain's endpoint options. +type UpdateDomainEndpointOptionsOutput struct { + _ struct{} `type:"structure"` + + // The newly-configured domain endpoint options. + DomainEndpointOptions *DomainEndpointOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s UpdateDomainEndpointOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainEndpointOptionsOutput) GoString() string { + return s.String() +} + +// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. +func (s *UpdateDomainEndpointOptionsOutput) SetDomainEndpointOptions(v *DomainEndpointOptionsStatus) *UpdateDomainEndpointOptionsOutput { + s.DomainEndpointOptions = v + return s +} + // Container for the parameters to the UpdateScalingParameters operation. Specifies // the name of the domain you want to update and the scaling parameters you // want to configure. @@ -6346,9 +6777,11 @@ const ( // // * RequiresIndexDocuments: The option's latest value will not be deployed // until IndexDocuments has been called and indexing is complete. +// // * Processing: The option's latest value is in the process of being activated. // // * Active: The option's latest value is fully deployed. +// // * FailedToValidate: The option value is not compatible with the domain's // data and cannot be used to index the data. You must either modify the // option value or update or remove the incompatible documents. @@ -6404,3 +6837,12 @@ const ( // SuggesterFuzzyMatchingHigh is a SuggesterFuzzyMatching enum value SuggesterFuzzyMatchingHigh = "high" ) + +// The minimum required TLS version. +const ( + // TLSSecurityPolicyPolicyMinTls10201907 is a TLSSecurityPolicy enum value + TLSSecurityPolicyPolicyMinTls10201907 = "Policy-Min-TLS-1-0-2019-07" + + // TLSSecurityPolicyPolicyMinTls12201907 is a TLSSecurityPolicy enum value + TLSSecurityPolicyPolicyMinTls12201907 = "Policy-Min-TLS-1-2-2019-07" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/errors.go index f0ed2f1e2fc..b807f29ab04 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/errors.go @@ -41,4 +41,10 @@ const ( // The request was rejected because it attempted to reference a resource that // does not exist. ErrCodeResourceNotFoundException = "ResourceNotFound" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // The request was rejected because it has invalid parameters. + ErrCodeValidationException = "ValidationException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go index 850bc137051..76c21764817 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudsearch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudSearch { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudSearch { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudSearch { svc := &CloudSearch{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-01-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go index dccb979a457..4a92c66f38c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go @@ -58,12 +58,13 @@ func (c *CloudTrail) AddTagsRequest(input *AddTagsInput) (req *request.Request, // AddTags API operation for AWS CloudTrail. // -// Adds one or more tags to a trail, up to a limit of 50. Tags must be unique -// per trail. Overwrites an existing tag's value when a new value is specified -// for an existing tag key. If you specify a key without a value, the tag will -// be created with the specified key and a value of null. You can tag a trail -// that applies to all regions only from the region in which the trail was created -// (that is, from its home region). +// Adds one or more tags to a trail, up to a limit of 50. Overwrites an existing +// tag's value when a new value is specified for an existing tag key. Tag key +// names must be unique for a trail; you cannot have two keys with the same +// name but different values. If you specify a key without a value, the tag +// will be created with the specified key and a value of null. You can tag a +// trail that applies to all AWS Regions only from the Region in which the trail +// was created (also known as its home region). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -107,8 +108,8 @@ func (c *CloudTrail) AddTagsRequest(input *AddTagsInput) (req *request.Request, // * Not be in IP address format (for example, 192.168.5.4) // // * ErrCodeInvalidTagParameterException "InvalidTagParameterException" -// This exception is thrown when the key or value specified for the tag does -// not match the regular expression ^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$. +// This exception is thrown when the specified tag key or values are not valid. +// It can also occur if there are duplicate tags or too many tags on the resource. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // This exception is thrown when the requested operation is not supported. @@ -189,8 +190,7 @@ func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.R // CreateTrail API operation for AWS CloudTrail. // // Creates a trail that specifies the settings for delivery of log data to an -// Amazon S3 bucket. A maximum of five trails can exist in a region, irrespective -// of the region in which they were created. +// Amazon S3 bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -248,7 +248,7 @@ func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.R // * Not be in IP address format (for example, 192.168.5.4) // // * ErrCodeTrailNotProvidedException "TrailNotProvidedException" -// This exception is deprecated. +// This exception is no longer in use. // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" // This exception is thrown when the combination of parameters provided is not @@ -259,7 +259,7 @@ func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.R // bucket and the KMS key are not in the same region. // // * ErrCodeKmsKeyDisabledException "KmsKeyDisabledException" -// This exception is deprecated. +// This exception is no longer in use. // // * ErrCodeKmsException "KmsException" // This exception is thrown when there is an issue with the specified KMS key @@ -274,6 +274,10 @@ func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.R // * ErrCodeCloudWatchLogsDeliveryUnavailableException "CloudWatchLogsDeliveryUnavailableException" // Cannot set a CloudWatch Logs delivery for this region. // +// * ErrCodeInvalidTagParameterException "InvalidTagParameterException" +// This exception is thrown when the specified tag key or values are not valid. +// It can also occur if there are duplicate tags or too many tags on the resource. +// // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // This exception is thrown when the requested operation is not supported. // @@ -495,8 +499,8 @@ func (c *CloudTrail) DescribeTrailsRequest(input *DescribeTrailsInput) (req *req // DescribeTrails API operation for AWS CloudTrail. // -// Retrieves settings for the trail associated with the current region for your -// account. +// Retrieves settings for one or more trails associated with the current region +// for your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -589,8 +593,7 @@ func (c *CloudTrail) GetEventSelectorsRequest(input *GetEventSelectorsInput) (re // * If your event selector includes data events, the Amazon S3 objects or // AWS Lambda functions that you are logging for data events. // -// For more information, see Logging Data and Management Events for Trails -// (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html) +// For more information, see Logging Data and Management Events for Trails (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html) // in the AWS CloudTrail User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -648,6 +651,107 @@ func (c *CloudTrail) GetEventSelectorsWithContext(ctx aws.Context, input *GetEve return out, req.Send() } +const opGetTrail = "GetTrail" + +// GetTrailRequest generates a "aws/request.Request" representing the +// client's request for the GetTrail operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTrail for more information on using the GetTrail +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTrailRequest method. +// req, resp := client.GetTrailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-2013-11-01/GetTrail +func (c *CloudTrail) GetTrailRequest(input *GetTrailInput) (req *request.Request, output *GetTrailOutput) { + op := &request.Operation{ + Name: opGetTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTrailInput{} + } + + output = &GetTrailOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTrail API operation for AWS CloudTrail. +// +// Returns settings information for a specified trail. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudTrail's +// API operation GetTrail for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTrailNotFoundException "TrailNotFoundException" +// This exception is thrown when the trail with the given name is not found. +// +// * ErrCodeInvalidTrailNameException "InvalidTrailNameException" +// This exception is thrown when the provided trail name is not valid. Trail +// names must meet the following requirements: +// +// * Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores +// (_), or dashes (-) +// +// * Start with a letter or number, and end with a letter or number +// +// * Be between 3 and 128 characters +// +// * Have no adjacent periods, underscores or dashes. Names like my-_namespace +// and my--namespace are invalid. +// +// * Not be in IP address format (for example, 192.168.5.4) +// +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// This exception is thrown when the requested operation is not supported. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// This exception is thrown when the requested operation is not permitted. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-2013-11-01/GetTrail +func (c *CloudTrail) GetTrail(input *GetTrailInput) (*GetTrailOutput, error) { + req, out := c.GetTrailRequest(input) + return out, req.Send() +} + +// GetTrailWithContext is the same as GetTrail with the addition of +// the ability to pass a context and additional request options. +// +// See GetTrail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudTrail) GetTrailWithContext(ctx aws.Context, input *GetTrailInput, opts ...request.Option) (*GetTrailOutput, error) { + req, out := c.GetTrailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetTrailStatus = "GetTrailStatus" // GetTrailStatusRequest generates a "aws/request.Request" representing the @@ -778,6 +882,12 @@ func (c *CloudTrail) ListPublicKeysRequest(input *ListPublicKeysInput) (req *req Name: opListPublicKeys, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { @@ -843,6 +953,58 @@ func (c *CloudTrail) ListPublicKeysWithContext(ctx aws.Context, input *ListPubli return out, req.Send() } +// ListPublicKeysPages iterates over the pages of a ListPublicKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPublicKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPublicKeys operation. +// pageNum := 0 +// err := client.ListPublicKeysPages(params, +// func(page *cloudtrail.ListPublicKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudTrail) ListPublicKeysPages(input *ListPublicKeysInput, fn func(*ListPublicKeysOutput, bool) bool) error { + return c.ListPublicKeysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPublicKeysPagesWithContext same as ListPublicKeysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudTrail) ListPublicKeysPagesWithContext(ctx aws.Context, input *ListPublicKeysInput, fn func(*ListPublicKeysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPublicKeysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPublicKeysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPublicKeysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTags = "ListTags" // ListTagsRequest generates a "aws/request.Request" representing the @@ -874,6 +1036,12 @@ func (c *CloudTrail) ListTagsRequest(input *ListTagsInput) (req *request.Request Name: opListTags, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { @@ -957,6 +1125,198 @@ func (c *CloudTrail) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, return out, req.Send() } +// ListTagsPages iterates over the pages of a ListTags operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTags method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTags operation. +// pageNum := 0 +// err := client.ListTagsPages(params, +// func(page *cloudtrail.ListTagsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudTrail) ListTagsPages(input *ListTagsInput, fn func(*ListTagsOutput, bool) bool) error { + return c.ListTagsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTagsPagesWithContext same as ListTagsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudTrail) ListTagsPagesWithContext(ctx aws.Context, input *ListTagsInput, fn func(*ListTagsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTagsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTagsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTrails = "ListTrails" + +// ListTrailsRequest generates a "aws/request.Request" representing the +// client's request for the ListTrails operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTrails for more information on using the ListTrails +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTrailsRequest method. +// req, resp := client.ListTrailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-2013-11-01/ListTrails +func (c *CloudTrail) ListTrailsRequest(input *ListTrailsInput) (req *request.Request, output *ListTrailsOutput) { + op := &request.Operation{ + Name: opListTrails, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTrailsInput{} + } + + output = &ListTrailsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTrails API operation for AWS CloudTrail. +// +// Lists trails that are in the current account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudTrail's +// API operation ListTrails for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// This exception is thrown when the requested operation is not supported. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// This exception is thrown when the requested operation is not permitted. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-2013-11-01/ListTrails +func (c *CloudTrail) ListTrails(input *ListTrailsInput) (*ListTrailsOutput, error) { + req, out := c.ListTrailsRequest(input) + return out, req.Send() +} + +// ListTrailsWithContext is the same as ListTrails with the addition of +// the ability to pass a context and additional request options. +// +// See ListTrails for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudTrail) ListTrailsWithContext(ctx aws.Context, input *ListTrailsInput, opts ...request.Option) (*ListTrailsOutput, error) { + req, out := c.ListTrailsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTrailsPages iterates over the pages of a ListTrails operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTrails method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTrails operation. +// pageNum := 0 +// err := client.ListTrailsPages(params, +// func(page *cloudtrail.ListTrailsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudTrail) ListTrailsPages(input *ListTrailsInput, fn func(*ListTrailsOutput, bool) bool) error { + return c.ListTrailsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTrailsPagesWithContext same as ListTrailsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudTrail) ListTrailsPagesWithContext(ctx aws.Context, input *ListTrailsInput, fn func(*ListTrailsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTrailsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTrailsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTrailsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opLookupEvents = "LookupEvents" // LookupEventsRequest generates a "aws/request.Request" representing the @@ -1008,8 +1368,8 @@ func (c *CloudTrail) LookupEventsRequest(input *LookupEventsInput) (req *request // LookupEvents API operation for AWS CloudTrail. // // Looks up management events (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events) -// captured by CloudTrail. Events for a region can be looked up in that region -// during the last 90 days. Lookup supports the following attributes: +// captured by CloudTrail. You can look up events that occurred in a region +// within the last 90 days. Lookup supports the following attributes: // // * AWS access key // @@ -1092,7 +1452,7 @@ func (c *CloudTrail) LookupEventsWithContext(ctx aws.Context, input *LookupEvent // // Example iterating over at most 3 pages of a LookupEvents operation. // pageNum := 0 // err := client.LookupEventsPages(params, -// func(page *LookupEventsOutput, lastPage bool) bool { +// func(page *cloudtrail.LookupEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1124,10 +1484,12 @@ func (c *CloudTrail) LookupEventsPagesWithContext(ctx aws.Context, input *Lookup }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*LookupEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*LookupEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1204,7 +1566,7 @@ func (c *CloudTrail) PutEventSelectorsRequest(input *PutEventSelectorsInput) (re // trail was created; otherwise, an InvalidHomeRegionException is thrown. // // You can configure up to five event selectors for each trail. For more information, -// see Logging Data and Management Events for Trails (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html) +// see Logging Data and Management Events for Trails (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html) // and Limits in AWS CloudTrail (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) // in the AWS CloudTrail User Guide. // @@ -1385,8 +1747,8 @@ func (c *CloudTrail) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Req // * Not be in IP address format (for example, 192.168.5.4) // // * ErrCodeInvalidTagParameterException "InvalidTagParameterException" -// This exception is thrown when the key or value specified for the tag does -// not match the regular expression ^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$. +// This exception is thrown when the specified tag key or values are not valid. +// It can also occur if there are duplicate tags or too many tags on the resource. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // This exception is thrown when the requested operation is not supported. @@ -1772,7 +2134,7 @@ func (c *CloudTrail) UpdateTrailRequest(input *UpdateTrailInput) (req *request.R // * Not be in IP address format (for example, 192.168.5.4) // // * ErrCodeTrailNotProvidedException "TrailNotProvidedException" -// This exception is deprecated. +// This exception is no longer in use. // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" // This exception is thrown when the combination of parameters provided is not @@ -1787,7 +2149,7 @@ func (c *CloudTrail) UpdateTrailRequest(input *UpdateTrailInput) (req *request.R // bucket and the KMS key are not in the same region. // // * ErrCodeKmsKeyDisabledException "KmsKeyDisabledException" -// This exception is deprecated. +// This exception is no longer in use. // // * ErrCodeKmsException "KmsException" // This exception is thrown when there is an issue with the specified KMS key @@ -1967,7 +2329,9 @@ type CreateTrailInput struct { IncludeGlobalServiceEvents *bool `type:"boolean"` // Specifies whether the trail is created in the current region or in all regions. - // The default is false. + // The default is false, which creates a trail only in the region where you + // are signed in. As a best practice, consider creating trails that log events + // in all regions. IsMultiRegionTrail *bool `type:"boolean"` // Specifies whether the trail is created for all accounts in an organization @@ -2009,20 +2373,23 @@ type CreateTrailInput struct { Name *string `type:"string" required:"true"` // Specifies the name of the Amazon S3 bucket designated for publishing log - // files. See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + // files. See Amazon S3 Bucket Naming Requirements (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). // // S3BucketName is a required field S3BucketName *string `type:"string" required:"true"` // Specifies the Amazon S3 key prefix that comes after the name of the bucket // you have designated for log file delivery. For more information, see Finding - // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // Your CloudTrail Log Files (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). // The maximum length is 200 characters. S3KeyPrefix *string `type:"string"` // Specifies the name of the Amazon SNS topic defined for notification of log // file delivery. The maximum length is 256 characters. SnsTopicName *string `type:"string"` + + // A list of tags. + TagsList []*Tag `type:"list"` } // String returns the string representation @@ -2044,6 +2411,16 @@ func (s *CreateTrailInput) Validate() error { if s.S3BucketName == nil { invalidParams.Add(request.NewErrParamRequired("S3BucketName")) } + if s.TagsList != nil { + for i, v := range s.TagsList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagsList", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2117,6 +2494,12 @@ func (s *CreateTrailInput) SetSnsTopicName(v string) *CreateTrailInput { return s } +// SetTagsList sets the TagsList field's value. +func (s *CreateTrailInput) SetTagsList(v []*Tag) *CreateTrailInput { + s.TagsList = v + return s +} + // Returns the objects or data listed below if successful. Otherwise, returns // an error. type CreateTrailOutput struct { @@ -2158,7 +2541,7 @@ type CreateTrailOutput struct { // Specifies the Amazon S3 key prefix that comes after the name of the bucket // you have designated for log file delivery. For more information, see Finding - // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // Your CloudTrail Log Files (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). S3KeyPrefix *string `type:"string"` // Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications @@ -2167,7 +2550,7 @@ type CreateTrailOutput struct { // arn:aws:sns:us-east-2:123456789012:MyTopic SnsTopicARN *string `type:"string"` - // This field is deprecated. Use SnsTopicARN. + // This field is no longer in use. Use SnsTopicARN. // // Deprecated: SnsTopicName has been deprecated SnsTopicName *string `deprecated:"true" type:"string"` @@ -2279,7 +2662,7 @@ func (s *CreateTrailOutput) SetTrailARN(v string) *CreateTrailOutput { // // The following example demonstrates how logging works when you configure logging // of all data events for an S3 bucket named bucket-1. In this example, the -// CloudTrail user spcified an empty prefix, and the option to log both Read +// CloudTrail user specified an empty prefix, and the option to log both Read // and Write data events. // // A user uploads an image file to bucket-1. @@ -2292,7 +2675,8 @@ func (s *CreateTrailOutput) SetTrailARN(v string) *CreateTrailOutput { // A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2. // // The PutObject API operation occurred for an object in an S3 bucket that the -// CloudTrail user didn't specify for the trail. The trail doesn’t log the event. +// CloudTrail user didn't specify for the trail. The trail doesn’t log the +// event. // // The following example demonstrates how logging works when you configure logging // of AWS Lambda data events for a Lambda function named MyLambdaFunction, but @@ -2321,16 +2705,10 @@ type DataResource struct { // the specified objects. // // * To log data events for all objects in all S3 buckets in your AWS account, - // specify the prefix as arn:aws:s3:::. - // - // This will also enable logging of data event activity performed by any user - // or role in your AWS account, even if that activity is performed on a bucket - // that belongs to another AWS account. - // - // * To log data events for all objects in all S3 buckets that include my-bucket - // in their names, specify the prefix as aws:s3:::my-bucket. The trail logs - // data events for all objects in all buckets whose name contains a match - // for my-bucket. + // specify the prefix as arn:aws:s3:::. This will also enable logging of + // data event activity performed by any user or role in your AWS account, + // even if that activity is performed on a bucket that belongs to another + // AWS account. // // * To log data events for all objects in an S3 bucket, specify the bucket // and an empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs @@ -2341,18 +2719,14 @@ type DataResource struct { // events for objects in this S3 bucket that match the prefix. // // * To log data events for all functions in your AWS account, specify the - // prefix as arn:aws:lambda. - // - // This will also enable logging of Invoke activity performed by any user or - // role in your AWS account, even if that activity is performed on a function - // that belongs to another AWS account. + // prefix as arn:aws:lambda. This will also enable logging of Invoke activity + // performed by any user or role in your AWS account, even if that activity + // is performed on a function that belongs to another AWS account. // - // * To log data eents for a specific Lambda function, specify the function - // ARN. - // - // Lambda function ARNs are exact. Unlike S3, you cannot use matching. For example, - // if you specify a function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, - // data events will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. + // * To log data events for a specific Lambda function, specify the function + // ARN. Lambda function ARNs are exact. For example, if you specify a function + // ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events + // will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. // They will not be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2. Values []*string `type:"list"` } @@ -2634,7 +3008,7 @@ type EventSelector struct { // selectors in a trail. This limit does not apply if you configure resource // logging for all data events. // - // For more information, see Data Events (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-data-events) + // For more information, see Data Events (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-data-events) // and Limits in AWS CloudTrail (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) // in the AWS CloudTrail User Guide. DataResources []*DataResource `type:"list"` @@ -2642,7 +3016,7 @@ type EventSelector struct { // Specify if you want your event selector to include management events for // your trail. // - // For more information, see Management Events (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-management-events) + // For more information, see Management Events (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-management-events) // in the AWS CloudTrail User Guide. // // By default, the value is true. @@ -2771,6 +3145,68 @@ func (s *GetEventSelectorsOutput) SetTrailARN(v string) *GetEventSelectorsOutput return s } +type GetTrailInput struct { + _ struct{} `type:"structure"` + + // The name or the Amazon Resource Name (ARN) of the trail for which you want + // to retrieve settings information. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTrailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTrailInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetTrailInput) SetName(v string) *GetTrailInput { + s.Name = &v + return s +} + +type GetTrailOutput struct { + _ struct{} `type:"structure"` + + // The settings for a trail. + Trail *Trail `type:"structure"` +} + +// String returns the string representation +func (s GetTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrailOutput) GoString() string { + return s.String() +} + +// SetTrail sets the Trail field's value. +func (s *GetTrailOutput) SetTrail(v *Trail) *GetTrailOutput { + s.Trail = v + return s +} + // The name of a trail about which you want the current status. type GetTrailStatusInput struct { _ struct{} `type:"structure"` @@ -2830,15 +3266,15 @@ type GetTrailStatusOutput struct { // CloudWatch Logs. LatestCloudWatchLogsDeliveryTime *time.Time `type:"timestamp"` - // This field is deprecated. + // This field is no longer in use. LatestDeliveryAttemptSucceeded *string `type:"string"` - // This field is deprecated. + // This field is no longer in use. LatestDeliveryAttemptTime *string `type:"string"` // Displays any Amazon S3 error that CloudTrail encountered when attempting // to deliver log files to the designated bucket. For more information see the - // topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // topic Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) // in the Amazon S3 API Reference. // // This error occurs only when there is a problem with the destination S3 bucket @@ -2853,7 +3289,7 @@ type GetTrailStatusOutput struct { // Displays any Amazon S3 error that CloudTrail encountered when attempting // to deliver a digest file to the designated bucket. For more information see - // the topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // the topic Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) // in the Amazon S3 API Reference. // // This error occurs only when there is a problem with the destination S3 bucket @@ -2866,15 +3302,15 @@ type GetTrailStatusOutput struct { // to an account's Amazon S3 bucket. LatestDigestDeliveryTime *time.Time `type:"timestamp"` - // This field is deprecated. + // This field is no longer in use. LatestNotificationAttemptSucceeded *string `type:"string"` - // This field is deprecated. + // This field is no longer in use. LatestNotificationAttemptTime *string `type:"string"` // Displays any Amazon SNS error that CloudTrail encountered when attempting // to send a notification. For more information about Amazon SNS errors, see - // the Amazon SNS Developer Guide (http://docs.aws.amazon.com/sns/latest/dg/welcome.html). + // the Amazon SNS Developer Guide (https://docs.aws.amazon.com/sns/latest/dg/welcome.html). LatestNotificationError *string `type:"string"` // Specifies the date and time of the most recent Amazon SNS notification that @@ -2889,10 +3325,10 @@ type GetTrailStatusOutput struct { // API calls for an AWS account. StopLoggingTime *time.Time `type:"timestamp"` - // This field is deprecated. + // This field is no longer in use. TimeLoggingStarted *string `type:"string"` - // This field is deprecated. + // This field is no longer in use. TimeLoggingStopped *string `type:"string"` } @@ -3174,6 +3610,59 @@ func (s *ListTagsOutput) SetResourceTagList(v []*ResourceTag) *ListTagsOutput { return s } +type ListTrailsInput struct { + _ struct{} `type:"structure"` + + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListTrailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrailsInput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTrailsInput) SetNextToken(v string) *ListTrailsInput { + s.NextToken = &v + return s +} + +type ListTrailsOutput struct { + _ struct{} `type:"structure"` + + NextToken *string `type:"string"` + + // Returns the name, ARN, and home region of trails in the current account. + Trails []*TrailInfo `type:"list"` +} + +// String returns the string representation +func (s ListTrailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrailsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTrailsOutput) SetNextToken(v string) *ListTrailsOutput { + s.NextToken = &v + return s +} + +// SetTrails sets the Trails field's value. +func (s *ListTrailsOutput) SetTrails(v []*TrailInfo) *ListTrailsOutput { + s.Trails = v + return s +} + // Specifies an attribute and value that filter the events returned. type LookupAttribute struct { _ struct{} `type:"structure"` @@ -3607,8 +4096,8 @@ type Resource struct { // The type of a resource referenced by the event returned. When the resource // type cannot be determined, null is returned. Some examples of resource types // are: Instance for EC2, Trail for CloudTrail, DBInstance for RDS, and AccessKey - // for IAM. For a list of resource types supported for event lookup, see Resource - // Types Supported for Event Lookup (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/lookup_supported_resourcetypes.html). + // for IAM. To learn more about how to look up and filter events by the resource + // types supported for a service, see Filtering CloudTrail Events (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/view-cloudtrail-events-console.html#filtering-cloudtrail-events). ResourceType *string `type:"string"` } @@ -3856,7 +4345,7 @@ type Trail struct { // Otherwise, False. IncludeGlobalServiceEvents *bool `type:"boolean"` - // Specifies whether the trail belongs only to one region or exists in all regions. + // Specifies whether the trail exists only in one region or exists in all regions. IsMultiRegionTrail *bool `type:"boolean"` // Specifies whether the trail is an organization trail. @@ -3875,12 +4364,12 @@ type Trail struct { Name *string `type:"string"` // Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. - // See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + // See Amazon S3 Bucket Naming Requirements (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). S3BucketName *string `type:"string"` // Specifies the Amazon S3 key prefix that comes after the name of the bucket // you have designated for log file delivery. For more information, see Finding - // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html).The + // Your CloudTrail Log Files (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html).The // maximum length is 200 characters. S3KeyPrefix *string `type:"string"` @@ -3890,7 +4379,7 @@ type Trail struct { // arn:aws:sns:us-east-2:123456789012:MyTopic SnsTopicARN *string `type:"string"` - // This field is deprecated. Use SnsTopicARN. + // This field is no longer in use. Use SnsTopicARN. // // Deprecated: SnsTopicName has been deprecated SnsTopicName *string `deprecated:"true" type:"string"` @@ -4001,6 +4490,49 @@ func (s *Trail) SetTrailARN(v string) *Trail { return s } +// Information about a CloudTrail trail, including the trail's name, home region, +// and Amazon Resource Name (ARN). +type TrailInfo struct { + _ struct{} `type:"structure"` + + // The AWS region in which a trail was created. + HomeRegion *string `type:"string"` + + // The name of a trail. + Name *string `type:"string"` + + // The ARN of a trail. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s TrailInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrailInfo) GoString() string { + return s.String() +} + +// SetHomeRegion sets the HomeRegion field's value. +func (s *TrailInfo) SetHomeRegion(v string) *TrailInfo { + s.HomeRegion = &v + return s +} + +// SetName sets the Name field's value. +func (s *TrailInfo) SetName(v string) *TrailInfo { + s.Name = &v + return s +} + +// SetTrailARN sets the TrailARN field's value. +func (s *TrailInfo) SetTrailARN(v string) *TrailInfo { + s.TrailARN = &v + return s +} + // Specifies settings to update for the trail. type UpdateTrailInput struct { _ struct{} `type:"structure"` @@ -4035,7 +4567,8 @@ type UpdateTrailInput struct { // and this value is set to true, shadow trails (replications of the trail) // will be created in the other regions. If the trail exists in all regions // and this value is set to false, the trail will remain in the region where - // it was created, and its shadow trails in other regions will be deleted. + // it was created, and its shadow trails in other regions will be deleted. As + // a best practice, consider using trails that log events in all regions. IsMultiRegionTrail *bool `type:"boolean"` // Specifies whether the trail is applied to all accounts in an organization @@ -4087,12 +4620,12 @@ type UpdateTrailInput struct { Name *string `type:"string" required:"true"` // Specifies the name of the Amazon S3 bucket designated for publishing log - // files. See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + // files. See Amazon S3 Bucket Naming Requirements (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). S3BucketName *string `type:"string"` // Specifies the Amazon S3 key prefix that comes after the name of the bucket // you have designated for log file delivery. For more information, see Finding - // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // Your CloudTrail Log Files (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). // The maximum length is 200 characters. S3KeyPrefix *string `type:"string"` @@ -4231,7 +4764,7 @@ type UpdateTrailOutput struct { // Specifies the Amazon S3 key prefix that comes after the name of the bucket // you have designated for log file delivery. For more information, see Finding - // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // Your CloudTrail Log Files (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). S3KeyPrefix *string `type:"string"` // Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications @@ -4240,7 +4773,7 @@ type UpdateTrailOutput struct { // arn:aws:sns:us-east-2:123456789012:MyTopic SnsTopicARN *string `type:"string"` - // This field is deprecated. Use SnsTopicARN. + // This field is no longer in use. Use SnsTopicARN. // // Deprecated: SnsTopicName has been deprecated SnsTopicName *string `deprecated:"true" type:"string"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/doc.go index 8226ac8b068..d2b23643d38 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/doc.go @@ -21,7 +21,7 @@ // to download and install them, see the Tools for Amazon Web Services page // (http://aws.amazon.com/tools/). // -// See the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html) +// See the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html) // for information about the data that is included with each AWS API call listed // in the log files. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/errors.go index 4fdd9a37b55..69720a1927f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/errors.go @@ -152,8 +152,8 @@ const ( // ErrCodeInvalidTagParameterException for service response error code // "InvalidTagParameterException". // - // This exception is thrown when the key or value specified for the tag does - // not match the regular expression ^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$. + // This exception is thrown when the specified tag key or values are not valid. + // It can also occur if there are duplicate tags or too many tags on the resource. ErrCodeInvalidTagParameterException = "InvalidTagParameterException" // ErrCodeInvalidTimeRangeException for service response error code @@ -198,7 +198,7 @@ const ( // ErrCodeKmsKeyDisabledException for service response error code // "KmsKeyDisabledException". // - // This exception is deprecated. + // This exception is no longer in use. ErrCodeKmsKeyDisabledException = "KmsKeyDisabledException" // ErrCodeKmsKeyNotFoundException for service response error code @@ -287,7 +287,7 @@ const ( // ErrCodeTrailNotProvidedException for service response error code // "TrailNotProvidedException". // - // This exception is deprecated. + // This exception is no longer in use. ErrCodeTrailNotProvidedException = "TrailNotProvidedException" // ErrCodeUnsupportedOperationException for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go index 5f1d4ddc41f..ee863344c95 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudtrail.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudTrail { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudTrail { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudTrail { svc := &CloudTrail{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-11-01", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go index 7703ca56ee1..558e792d03d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -58,7 +58,8 @@ func (c *CloudWatch) DeleteAlarmsRequest(input *DeleteAlarmsInput) (req *request // DeleteAlarms API operation for Amazon CloudWatch. // -// Deletes the specified alarms. In the event of an error, no alarms are deleted. +// Deletes the specified alarms. You can delete up to 50 alarms in one operation. +// In the event of an error, no alarms are deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -93,6 +94,95 @@ func (c *CloudWatch) DeleteAlarmsWithContext(ctx aws.Context, input *DeleteAlarm return out, req.Send() } +const opDeleteAnomalyDetector = "DeleteAnomalyDetector" + +// DeleteAnomalyDetectorRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAnomalyDetector operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAnomalyDetector for more information on using the DeleteAnomalyDetector +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAnomalyDetectorRequest method. +// req, resp := client.DeleteAnomalyDetectorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteAnomalyDetector +func (c *CloudWatch) DeleteAnomalyDetectorRequest(input *DeleteAnomalyDetectorInput) (req *request.Request, output *DeleteAnomalyDetectorOutput) { + op := &request.Operation{ + Name: opDeleteAnomalyDetector, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAnomalyDetectorInput{} + } + + output = &DeleteAnomalyDetectorOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAnomalyDetector API operation for Amazon CloudWatch. +// +// Deletes the specified anomaly detection model from your account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DeleteAnomalyDetector for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The named resource does not exist. +// +// * ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// * ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteAnomalyDetector +func (c *CloudWatch) DeleteAnomalyDetector(input *DeleteAnomalyDetectorInput) (*DeleteAnomalyDetectorOutput, error) { + req, out := c.DeleteAnomalyDetectorRequest(input) + return out, req.Send() +} + +// DeleteAnomalyDetectorWithContext is the same as DeleteAnomalyDetector with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAnomalyDetector for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DeleteAnomalyDetectorWithContext(ctx aws.Context, input *DeleteAnomalyDetectorInput, opts ...request.Option) (*DeleteAnomalyDetectorOutput, error) { + req, out := c.DeleteAnomalyDetectorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteDashboards = "DeleteDashboards" // DeleteDashboardsRequest generates a "aws/request.Request" representing the @@ -280,7 +370,7 @@ func (c *CloudWatch) DescribeAlarmHistoryWithContext(ctx aws.Context, input *Des // // Example iterating over at most 3 pages of a DescribeAlarmHistory operation. // pageNum := 0 // err := client.DescribeAlarmHistoryPages(params, -// func(page *DescribeAlarmHistoryOutput, lastPage bool) bool { +// func(page *cloudwatch.DescribeAlarmHistoryOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -312,10 +402,12 @@ func (c *CloudWatch) DescribeAlarmHistoryPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeAlarmHistoryOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeAlarmHistoryOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -417,7 +509,7 @@ func (c *CloudWatch) DescribeAlarmsWithContext(ctx aws.Context, input *DescribeA // // Example iterating over at most 3 pages of a DescribeAlarms operation. // pageNum := 0 // err := client.DescribeAlarmsPages(params, -// func(page *DescribeAlarmsOutput, lastPage bool) bool { +// func(page *cloudwatch.DescribeAlarmsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -449,10 +541,12 @@ func (c *CloudWatch) DescribeAlarmsPagesWithContext(ctx aws.Context, input *Desc }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeAlarmsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeAlarmsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -531,6 +625,93 @@ func (c *CloudWatch) DescribeAlarmsForMetricWithContext(ctx aws.Context, input * return out, req.Send() } +const opDescribeAnomalyDetectors = "DescribeAnomalyDetectors" + +// DescribeAnomalyDetectorsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAnomalyDetectors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAnomalyDetectors for more information on using the DescribeAnomalyDetectors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAnomalyDetectorsRequest method. +// req, resp := client.DescribeAnomalyDetectorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAnomalyDetectors +func (c *CloudWatch) DescribeAnomalyDetectorsRequest(input *DescribeAnomalyDetectorsInput) (req *request.Request, output *DescribeAnomalyDetectorsOutput) { + op := &request.Operation{ + Name: opDescribeAnomalyDetectors, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAnomalyDetectorsInput{} + } + + output = &DescribeAnomalyDetectorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAnomalyDetectors API operation for Amazon CloudWatch. +// +// Lists the anomaly detection models that you have created in your account. +// You can list all models in your account or filter the results to only the +// models that are related to a certain namespace, metric name, or metric dimension. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DescribeAnomalyDetectors for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextToken "InvalidNextToken" +// The next token specified is invalid. +// +// * ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAnomalyDetectors +func (c *CloudWatch) DescribeAnomalyDetectors(input *DescribeAnomalyDetectorsInput) (*DescribeAnomalyDetectorsOutput, error) { + req, out := c.DescribeAnomalyDetectorsRequest(input) + return out, req.Send() +} + +// DescribeAnomalyDetectorsWithContext is the same as DescribeAnomalyDetectors with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAnomalyDetectors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeAnomalyDetectorsWithContext(ctx aws.Context, input *DescribeAnomalyDetectorsInput, opts ...request.Option) (*DescribeAnomalyDetectorsOutput, error) { + req, out := c.DescribeAnomalyDetectorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisableAlarmActions = "DisableAlarmActions" // DisableAlarmActionsRequest generates a "aws/request.Request" representing the @@ -857,6 +1038,13 @@ func (c *CloudWatch) GetMetricDataRequest(input *GetMetricDataInput) (req *reque // and retrievable only with a resolution of 5 minutes. After 63 days, the data // is further aggregated and is available with a resolution of 1 hour. // +// If you omit Unit in your request, all data that was collected with any unit +// is returned, along with the corresponding units that were specified when +// the data was reported to CloudWatch. If you specify a unit, the operation +// returns only data data that was collected with that unit specified. If you +// specify a unit that does not match the data collected, the results of the +// operation are null. CloudWatch does not perform unit conversions. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -901,7 +1089,7 @@ func (c *CloudWatch) GetMetricDataWithContext(ctx aws.Context, input *GetMetricD // // Example iterating over at most 3 pages of a GetMetricData operation. // pageNum := 0 // err := client.GetMetricDataPages(params, -// func(page *GetMetricDataOutput, lastPage bool) bool { +// func(page *cloudwatch.GetMetricDataOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -933,10 +1121,12 @@ func (c *CloudWatch) GetMetricDataPagesWithContext(ctx aws.Context, input *GetMe }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetMetricDataOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetMetricDataOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1274,7 +1464,7 @@ func (c *CloudWatch) ListDashboardsWithContext(ctx aws.Context, input *ListDashb // // Example iterating over at most 3 pages of a ListDashboards operation. // pageNum := 0 // err := client.ListDashboardsPages(params, -// func(page *ListDashboardsOutput, lastPage bool) bool { +// func(page *cloudwatch.ListDashboardsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1306,10 +1496,12 @@ func (c *CloudWatch) ListDashboardsPagesWithContext(ctx aws.Context, input *List }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDashboardsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDashboardsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1420,7 +1612,7 @@ func (c *CloudWatch) ListMetricsWithContext(ctx aws.Context, input *ListMetricsI // // Example iterating over at most 3 pages of a ListMetrics operation. // pageNum := 0 // err := client.ListMetricsPages(params, -// func(page *ListMetricsOutput, lastPage bool) bool { +// func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1452,10 +1644,12 @@ func (c *CloudWatch) ListMetricsPagesWithContext(ctx aws.Context, input *ListMet }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMetricsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMetricsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1544,6 +1738,98 @@ func (c *CloudWatch) ListTagsForResourceWithContext(ctx aws.Context, input *List return out, req.Send() } +const opPutAnomalyDetector = "PutAnomalyDetector" + +// PutAnomalyDetectorRequest generates a "aws/request.Request" representing the +// client's request for the PutAnomalyDetector operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutAnomalyDetector for more information on using the PutAnomalyDetector +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutAnomalyDetectorRequest method. +// req, resp := client.PutAnomalyDetectorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutAnomalyDetector +func (c *CloudWatch) PutAnomalyDetectorRequest(input *PutAnomalyDetectorInput) (req *request.Request, output *PutAnomalyDetectorOutput) { + op := &request.Operation{ + Name: opPutAnomalyDetector, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutAnomalyDetectorInput{} + } + + output = &PutAnomalyDetectorOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutAnomalyDetector API operation for Amazon CloudWatch. +// +// Creates an anomaly detection model for a CloudWatch metric. You can use the +// model to display a band of expected normal values when the metric is graphed. +// +// For more information, see CloudWatch Anomaly Detection (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Anomaly_Detection.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation PutAnomalyDetector for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceededException" +// The operation exceeded one or more limits. +// +// * ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// * ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutAnomalyDetector +func (c *CloudWatch) PutAnomalyDetector(input *PutAnomalyDetectorInput) (*PutAnomalyDetectorOutput, error) { + req, out := c.PutAnomalyDetectorRequest(input) + return out, req.Send() +} + +// PutAnomalyDetectorWithContext is the same as PutAnomalyDetector with the addition of +// the ability to pass a context and additional request options. +// +// See PutAnomalyDetector for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) PutAnomalyDetectorWithContext(ctx aws.Context, input *PutAnomalyDetectorInput, opts ...request.Option) (*PutAnomalyDetectorOutput, error) { + req, out := c.PutAnomalyDetectorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutDashboard = "PutDashboard" // PutDashboardRequest generates a "aws/request.Request" representing the @@ -1592,8 +1878,7 @@ func (c *CloudWatch) PutDashboardRequest(input *PutDashboardInput) (req *request // dashboard. If you update a dashboard, the entire contents are replaced with // what you specify here. // -// There is no limit to the number of dashboards in your account. All dashboards -// in your account are global, not region-specific. +// All dashboards in your account are global, not region-specific. // // A simple way to create a dashboard using PutDashboard is to copy an existing // dashboard. To copy an existing dashboard using the console, you can load @@ -1689,8 +1974,10 @@ func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *req // PutMetricAlarm API operation for Amazon CloudWatch. // -// Creates or updates an alarm and associates it with the specified metric or -// metric math expression. +// Creates or updates an alarm and associates it with the specified metric, +// metric math expression, or anomaly detection model. +// +// Alarms based on anomaly detection models cannot have Auto Scaling actions. // // When this operation creates an alarm, the alarm state is immediately set // to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. @@ -2027,19 +2314,20 @@ func (c *CloudWatch) TagResourceRequest(input *TagResourceInput) (req *request.R // TagResource API operation for Amazon CloudWatch. // // Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. +// Currently, the only CloudWatch resources that can be tagged are alarms. +// // Tags can help you organize and categorize your resources. You can also use // them to scope user permissions, by granting a user permission to access or -// change only resources with certain tag values. In CloudWatch, alarms can -// be tagged. +// change only resources with certain tag values. // // Tags don't have any semantic meaning to AWS and are interpreted strictly // as strings of characters. // -// You can use the TagResource action with a resource that already has tags. -// If you specify a new tag key for the resource, this tag is appended to the -// list of tags associated with the resource. If you specify a tag key that -// is already associated with the resource, the new tag value that you specify -// replaces the previous value for that tag. +// You can use the TagResource action with an alarm that already has tags. If +// you specify a new tag key for the alarm, this tag is appended to the list +// of tags associated with the alarm. If you specify a tag key that is already +// associated with the alarm, the new tag value that you specify replaces the +// previous value for that tag. // // You can associate as many as 50 tags with a resource. // @@ -2234,6 +2522,133 @@ func (s *AlarmHistoryItem) SetTimestamp(v time.Time) *AlarmHistoryItem { return s } +// An anomaly detection model associated with a particular CloudWatch metric +// athresnd statistic. You can use the model to display a band of expected normal +// values when the metric is graphed. +type AnomalyDetector struct { + _ struct{} `type:"structure"` + + // The configuration specifies details about how the anomaly detection model + // is to be trained, including time ranges to exclude from use for training + // the model, and the time zone to use for the metric. + Configuration *AnomalyDetectorConfiguration `type:"structure"` + + // The metric dimensions associated with the anomaly detection model. + Dimensions []*Dimension `type:"list"` + + // The name of the metric associated with the anomaly detection model. + MetricName *string `min:"1" type:"string"` + + // The namespace of the metric associated with the anomaly detection model. + Namespace *string `min:"1" type:"string"` + + // The statistic associated with the anomaly detection model. + Stat *string `type:"string"` +} + +// String returns the string representation +func (s AnomalyDetector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnomalyDetector) GoString() string { + return s.String() +} + +// SetConfiguration sets the Configuration field's value. +func (s *AnomalyDetector) SetConfiguration(v *AnomalyDetectorConfiguration) *AnomalyDetector { + s.Configuration = v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *AnomalyDetector) SetDimensions(v []*Dimension) *AnomalyDetector { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *AnomalyDetector) SetMetricName(v string) *AnomalyDetector { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *AnomalyDetector) SetNamespace(v string) *AnomalyDetector { + s.Namespace = &v + return s +} + +// SetStat sets the Stat field's value. +func (s *AnomalyDetector) SetStat(v string) *AnomalyDetector { + s.Stat = &v + return s +} + +// The configuration specifies details about how the anomaly detection model +// is to be trained, including time ranges to exclude from use for training +// the model and the time zone to use for the metric. +type AnomalyDetectorConfiguration struct { + _ struct{} `type:"structure"` + + // An array of time ranges to exclude from use when the anomaly detection model + // is trained. Use this to make sure that events that could cause unusual values + // for the metric, such as deployments, aren't used when CloudWatch creates + // the model. + ExcludedTimeRanges []*Range `type:"list"` + + // The time zone to use for the metric. This is useful to enable the model to + // automatically account for daylight savings time changes if the metric is + // sensitive to such time changes. + // + // To specify a time zone, use the name of the time zone as specified in the + // standard tz database. For more information, see tz database (https://en.wikipedia.org/wiki/Tz_database). + MetricTimezone *string `type:"string"` +} + +// String returns the string representation +func (s AnomalyDetectorConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnomalyDetectorConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnomalyDetectorConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnomalyDetectorConfiguration"} + if s.ExcludedTimeRanges != nil { + for i, v := range s.ExcludedTimeRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExcludedTimeRanges", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExcludedTimeRanges sets the ExcludedTimeRanges field's value. +func (s *AnomalyDetectorConfiguration) SetExcludedTimeRanges(v []*Range) *AnomalyDetectorConfiguration { + s.ExcludedTimeRanges = v + return s +} + +// SetMetricTimezone sets the MetricTimezone field's value. +func (s *AnomalyDetectorConfiguration) SetMetricTimezone(v string) *AnomalyDetectorConfiguration { + s.MetricTimezone = &v + return s +} + // Represents a specific dashboard. type DashboardEntry struct { _ struct{} `type:"structure"` @@ -2460,6 +2875,111 @@ func (s DeleteAlarmsOutput) GoString() string { return s.String() } +type DeleteAnomalyDetectorInput struct { + _ struct{} `type:"structure"` + + // The metric dimensions associated with the anomaly detection model to delete. + Dimensions []*Dimension `type:"list"` + + // The metric name associated with the anomaly detection model to delete. + // + // MetricName is a required field + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace associated with the anomaly detection model to delete. + // + // Namespace is a required field + Namespace *string `min:"1" type:"string" required:"true"` + + // The statistic associated with the anomaly detection model to delete. + // + // Stat is a required field + Stat *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAnomalyDetectorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAnomalyDetectorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAnomalyDetectorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAnomalyDetectorInput"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Stat == nil { + invalidParams.Add(request.NewErrParamRequired("Stat")) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *DeleteAnomalyDetectorInput) SetDimensions(v []*Dimension) *DeleteAnomalyDetectorInput { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *DeleteAnomalyDetectorInput) SetMetricName(v string) *DeleteAnomalyDetectorInput { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *DeleteAnomalyDetectorInput) SetNamespace(v string) *DeleteAnomalyDetectorInput { + s.Namespace = &v + return s +} + +// SetStat sets the Stat field's value. +func (s *DeleteAnomalyDetectorInput) SetStat(v string) *DeleteAnomalyDetectorInput { + s.Stat = &v + return s +} + +type DeleteAnomalyDetectorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAnomalyDetectorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAnomalyDetectorOutput) GoString() string { + return s.String() +} + type DeleteDashboardsInput struct { _ struct{} `type:"structure"` @@ -2824,70 +3344,202 @@ func (s *DescribeAlarmsInput) Validate() error { return nil } -// SetActionPrefix sets the ActionPrefix field's value. -func (s *DescribeAlarmsInput) SetActionPrefix(v string) *DescribeAlarmsInput { - s.ActionPrefix = &v +// SetActionPrefix sets the ActionPrefix field's value. +func (s *DescribeAlarmsInput) SetActionPrefix(v string) *DescribeAlarmsInput { + s.ActionPrefix = &v + return s +} + +// SetAlarmNamePrefix sets the AlarmNamePrefix field's value. +func (s *DescribeAlarmsInput) SetAlarmNamePrefix(v string) *DescribeAlarmsInput { + s.AlarmNamePrefix = &v + return s +} + +// SetAlarmNames sets the AlarmNames field's value. +func (s *DescribeAlarmsInput) SetAlarmNames(v []*string) *DescribeAlarmsInput { + s.AlarmNames = v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeAlarmsInput) SetMaxRecords(v int64) *DescribeAlarmsInput { + s.MaxRecords = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAlarmsInput) SetNextToken(v string) *DescribeAlarmsInput { + s.NextToken = &v + return s +} + +// SetStateValue sets the StateValue field's value. +func (s *DescribeAlarmsInput) SetStateValue(v string) *DescribeAlarmsInput { + s.StateValue = &v + return s +} + +type DescribeAlarmsOutput struct { + _ struct{} `type:"structure"` + + // The information for the specified alarms. + MetricAlarms []*MetricAlarm `type:"list"` + + // The token that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsOutput) GoString() string { + return s.String() +} + +// SetMetricAlarms sets the MetricAlarms field's value. +func (s *DescribeAlarmsOutput) SetMetricAlarms(v []*MetricAlarm) *DescribeAlarmsOutput { + s.MetricAlarms = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAlarmsOutput) SetNextToken(v string) *DescribeAlarmsOutput { + s.NextToken = &v + return s +} + +type DescribeAnomalyDetectorsInput struct { + _ struct{} `type:"structure"` + + // Limits the results to only the anomaly detection models that are associated + // with the specified metric dimensions. If there are multiple metrics that + // have these dimensions and have anomaly detection models associated, they're + // all returned. + Dimensions []*Dimension `type:"list"` + + // The maximum number of results to return in one operation. The maximum value + // you can specify is 10. + // + // To retrieve the remaining results, make another call with the returned NextToken + // value. + MaxResults *int64 `min:"1" type:"integer"` + + // Limits the results to only the anomaly detection models that are associated + // with the specified metric name. If there are multiple metrics with this name + // in different namespaces that have anomaly detection models, they're all returned. + MetricName *string `min:"1" type:"string"` + + // Limits the results to only the anomaly detection models that are associated + // with the specified namespace. + Namespace *string `min:"1" type:"string"` + + // Use the token returned by the previous operation to request the next page + // of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAnomalyDetectorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAnomalyDetectorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAnomalyDetectorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAnomalyDetectorsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *DescribeAnomalyDetectorsInput) SetDimensions(v []*Dimension) *DescribeAnomalyDetectorsInput { + s.Dimensions = v return s } -// SetAlarmNamePrefix sets the AlarmNamePrefix field's value. -func (s *DescribeAlarmsInput) SetAlarmNamePrefix(v string) *DescribeAlarmsInput { - s.AlarmNamePrefix = &v +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeAnomalyDetectorsInput) SetMaxResults(v int64) *DescribeAnomalyDetectorsInput { + s.MaxResults = &v return s } -// SetAlarmNames sets the AlarmNames field's value. -func (s *DescribeAlarmsInput) SetAlarmNames(v []*string) *DescribeAlarmsInput { - s.AlarmNames = v +// SetMetricName sets the MetricName field's value. +func (s *DescribeAnomalyDetectorsInput) SetMetricName(v string) *DescribeAnomalyDetectorsInput { + s.MetricName = &v return s } -// SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeAlarmsInput) SetMaxRecords(v int64) *DescribeAlarmsInput { - s.MaxRecords = &v +// SetNamespace sets the Namespace field's value. +func (s *DescribeAnomalyDetectorsInput) SetNamespace(v string) *DescribeAnomalyDetectorsInput { + s.Namespace = &v return s } // SetNextToken sets the NextToken field's value. -func (s *DescribeAlarmsInput) SetNextToken(v string) *DescribeAlarmsInput { +func (s *DescribeAnomalyDetectorsInput) SetNextToken(v string) *DescribeAnomalyDetectorsInput { s.NextToken = &v return s } -// SetStateValue sets the StateValue field's value. -func (s *DescribeAlarmsInput) SetStateValue(v string) *DescribeAlarmsInput { - s.StateValue = &v - return s -} - -type DescribeAlarmsOutput struct { +type DescribeAnomalyDetectorsOutput struct { _ struct{} `type:"structure"` - // The information for the specified alarms. - MetricAlarms []*MetricAlarm `type:"list"` + // The list of anomaly detection models returned by the operation. + AnomalyDetectors []*AnomalyDetector `type:"list"` - // The token that marks the start of the next batch of returned results. + // A token that you can use in a subsequent operation to retrieve the next set + // of results. NextToken *string `type:"string"` } // String returns the string representation -func (s DescribeAlarmsOutput) String() string { +func (s DescribeAnomalyDetectorsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeAlarmsOutput) GoString() string { +func (s DescribeAnomalyDetectorsOutput) GoString() string { return s.String() } -// SetMetricAlarms sets the MetricAlarms field's value. -func (s *DescribeAlarmsOutput) SetMetricAlarms(v []*MetricAlarm) *DescribeAlarmsOutput { - s.MetricAlarms = v +// SetAnomalyDetectors sets the AnomalyDetectors field's value. +func (s *DescribeAnomalyDetectorsOutput) SetAnomalyDetectors(v []*AnomalyDetector) *DescribeAnomalyDetectorsOutput { + s.AnomalyDetectors = v return s } // SetNextToken sets the NextToken field's value. -func (s *DescribeAlarmsOutput) SetNextToken(v string) *DescribeAlarmsOutput { +func (s *DescribeAnomalyDetectorsOutput) SetNextToken(v string) *DescribeAnomalyDetectorsOutput { s.NextToken = &v return s } @@ -3195,6 +3847,9 @@ type GetMetricDataInput struct { // The time stamp indicating the latest data to be returned. // + // The value specified is exclusive; results include data points up to the specified + // time stamp. + // // For better performance, specify StartTime and EndTime values that align with // the value of the metric's Period and sync up with the beginning and end of // an hour. For example, if the Period of a metric is 5 minutes, specifying @@ -3228,6 +3883,28 @@ type GetMetricDataInput struct { // The time stamp indicating the earliest data to be returned. // + // The value specified is inclusive; results include data points with the specified + // time stamp. + // + // CloudWatch rounds the specified time stamp as follows: + // + // * Start time less than 15 days ago - Round down to the nearest whole minute. + // For example, 12:32:34 is rounded down to 12:32:00. + // + // * Start time between 15 and 63 days ago - Round down to the nearest 5-minute + // clock interval. For example, 12:32:34 is rounded down to 12:30:00. + // + // * Start time greater than 63 days ago - Round down to the nearest 1-hour + // clock interval. For example, 12:32:34 is rounded down to 12:00:00. + // + // If you set Period to 5, 10, or 30, the start time of your request is rounded + // down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions + // of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for + // the previous 10-second period, the start time of your request is rounded + // down and you receive data from 01:05:10 to 01:05:20. If you make a query + // at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, + // you receive data timestamped between 15:02:15 and 15:07:15. + // // For better performance, specify StartTime and EndTime values that align with // the value of the metric's Period and sync up with the beginning and end of // an hour. For example, if the Period of a metric is 5 minutes, specifying @@ -3456,9 +4133,12 @@ type GetMetricStatisticsInput struct { // either Statistics or ExtendedStatistics, but not both. Statistics []*string `min:"1" type:"list"` - // The unit for a given metric. Metrics may be reported in multiple units. Not - // supplying a unit results in all units being returned. If you specify only - // a unit that the metric does not report, the results of the call are null. + // The unit for a given metric. If you omit Unit, all data that was collected + // with any unit is returned, along with the corresponding units that were specified + // when the data was reported to CloudWatch. If you specify a unit, the operation + // returns only data data that was collected with that unit specified. If you + // specify a unit that does not match the data collected, the results of the + // operation are null. CloudWatch does not perform unit conversions. Unit *string `type:"string" enum:"StandardUnit"` } @@ -4112,9 +4792,15 @@ type MetricAlarm struct { // Name (ARN). InsufficientDataActions []*string `type:"list"` - // The name of the metric associated with the alarm. + // The name of the metric associated with the alarm, if this is an alarm based + // on a single metric. MetricName *string `min:"1" type:"string"` + // An array of MetricDataQuery structures, used in an alarm based on a metric + // math expression. Each structure either retrieves a metric or performs a math + // expression. One item in the Metrics array is the math expression that the + // alarm watches. This expression by designated by having ReturnValue set to + // true. Metrics []*MetricDataQuery `type:"list"` // The namespace of the metric associated with the alarm. @@ -4146,6 +4832,10 @@ type MetricAlarm struct { // The value to compare with the specified statistic. Threshold *float64 `type:"double"` + // In an alarm based on an anomaly detection model, this is the ID of the ANOMALY_DETECTION_BAND + // function used as the threshold for the alarm. + ThresholdMetricId *string `min:"1" type:"string"` + // Sets how this alarm is to handle missing data points. If this parameter is // omitted, the default behavior of missing is used. TreatMissingData *string `min:"1" type:"string"` @@ -4308,6 +4998,12 @@ func (s *MetricAlarm) SetThreshold(v float64) *MetricAlarm { return s } +// SetThresholdMetricId sets the ThresholdMetricId field's value. +func (s *MetricAlarm) SetThresholdMetricId(v string) *MetricAlarm { + s.ThresholdMetricId = &v + return s +} + // SetTreatMissingData sets the TreatMissingData field's value. func (s *MetricAlarm) SetTreatMissingData(v string) *MetricAlarm { s.TreatMissingData = &v @@ -4383,6 +5079,19 @@ type MetricDataQuery struct { // MetricStat but not both. MetricStat *MetricStat `type:"structure"` + // The granularity, in seconds, of the returned data points. For metrics with + // regular resolution, a period can be as short as one minute (60 seconds) and + // must be a multiple of 60. For high-resolution metrics that are collected + // at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, + // or any multiple of 60. High-resolution metrics are those metrics stored by + // a PutMetricData operation that includes a StorageResolution of 1 second. + // + // Use this field only when you are performing a GetMetricData operation, and + // only when you are specifying the Expression field. Do not use this field + // with a PutMetricAlarm operation or when you are specifying a MetricStat in + // a GetMetricData operation. + Period *int64 `min:"1" type:"integer"` + // When used in GetMetricData, this option indicates whether to return the timestamps // and raw data values of this metric. If you are performing this call just // to do math expressions and do not also need the raw data returned, you can @@ -4416,6 +5125,9 @@ func (s *MetricDataQuery) Validate() error { if s.Id != nil && len(*s.Id) < 1 { invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } if s.MetricStat != nil { if err := s.MetricStat.Validate(); err != nil { invalidParams.AddNested("MetricStat", err.(request.ErrInvalidParams)) @@ -4452,6 +5164,12 @@ func (s *MetricDataQuery) SetMetricStat(v *MetricStat) *MetricDataQuery { return s } +// SetPeriod sets the Period field's value. +func (s *MetricDataQuery) SetPeriod(v int64) *MetricDataQuery { + s.Period = &v + return s +} + // SetReturnData sets the ReturnData field's value. func (s *MetricDataQuery) SetReturnData(v bool) *MetricDataQuery { s.ReturnData = &v @@ -4578,7 +5296,10 @@ type MetricDatum struct { // since Jan 1, 1970 00:00:00 UTC. Timestamp *time.Time `type:"timestamp"` - // The unit of the metric. + // When you are using a Put operation, this defines what unit you want to use + // when storing the metric. + // + // In a Get operation, this displays the unit that is used for the metric. Unit *string `type:"string" enum:"StandardUnit"` // The value for the metric. @@ -4712,7 +5433,25 @@ type MetricStat struct { // Metric is a required field Metric *Metric `type:"structure" required:"true"` - // The period, in seconds, to use when retrieving the metric. + // The granularity, in seconds, of the returned data points. For metrics with + // regular resolution, a period can be as short as one minute (60 seconds) and + // must be a multiple of 60. For high-resolution metrics that are collected + // at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, + // or any multiple of 60. High-resolution metrics are those metrics stored by + // a PutMetricData call that includes a StorageResolution of 1 second. + // + // If the StartTime parameter specifies a time stamp that is greater than 3 + // hours ago, you must specify the period as follows or no data points in that + // time range is returned: + // + // * Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds + // (1 minute). + // + // * Start time between 15 and 63 days ago - Use a multiple of 300 seconds + // (5 minutes). + // + // * Start time greater than 63 days ago - Use a multiple of 3600 seconds + // (1 hour). // // Period is a required field Period *int64 `min:"1" type:"integer" required:"true"` @@ -4723,7 +5462,15 @@ type MetricStat struct { // Stat is a required field Stat *string `type:"string" required:"true"` - // The unit to use for the returned data points. + // When you are using a Put operation, this defines what unit you want to use + // when storing the metric. + // + // In a Get operation, if you omit Unit then all data that was collected with + // any unit is returned, along with the corresponding units that were specified + // when the data was reported to CloudWatch. If you specify a unit, the operation + // returns only data data that was collected with that unit specified. If you + // specify a unit that does not match the data collected, the results of the + // operation are null. CloudWatch does not perform unit conversions. Unit *string `type:"string" enum:"StandardUnit"` } @@ -4788,6 +5535,131 @@ func (s *MetricStat) SetUnit(v string) *MetricStat { return s } +type PutAnomalyDetectorInput struct { + _ struct{} `type:"structure"` + + // The configuration specifies details about how the anomaly detection model + // is to be trained, including time ranges to exclude when training and updating + // the model. You can specify as many as 10 time ranges. + // + // The configuration can also include the time zone to use for the metric. + // + // You can in + Configuration *AnomalyDetectorConfiguration `type:"structure"` + + // The metric dimensions to create the anomaly detection model for. + Dimensions []*Dimension `type:"list"` + + // The name of the metric to create the anomaly detection model for. + // + // MetricName is a required field + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric to create the anomaly detection model for. + // + // Namespace is a required field + Namespace *string `min:"1" type:"string" required:"true"` + + // The statistic to use for the metric and the anomaly detection model. + // + // Stat is a required field + Stat *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutAnomalyDetectorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAnomalyDetectorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutAnomalyDetectorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutAnomalyDetectorInput"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Stat == nil { + invalidParams.Add(request.NewErrParamRequired("Stat")) + } + if s.Configuration != nil { + if err := s.Configuration.Validate(); err != nil { + invalidParams.AddNested("Configuration", err.(request.ErrInvalidParams)) + } + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfiguration sets the Configuration field's value. +func (s *PutAnomalyDetectorInput) SetConfiguration(v *AnomalyDetectorConfiguration) *PutAnomalyDetectorInput { + s.Configuration = v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *PutAnomalyDetectorInput) SetDimensions(v []*Dimension) *PutAnomalyDetectorInput { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *PutAnomalyDetectorInput) SetMetricName(v string) *PutAnomalyDetectorInput { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *PutAnomalyDetectorInput) SetNamespace(v string) *PutAnomalyDetectorInput { + s.Namespace = &v + return s +} + +// SetStat sets the Stat field's value. +func (s *PutAnomalyDetectorInput) SetStat(v string) *PutAnomalyDetectorInput { + s.Stat = &v + return s +} + +type PutAnomalyDetectorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutAnomalyDetectorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAnomalyDetectorOutput) GoString() string { + return s.String() +} + type PutDashboardInput struct { _ struct{} `type:"structure"` @@ -4908,6 +5780,10 @@ type PutMetricAlarmInput struct { // The arithmetic operation to use when comparing the specified statistic and // threshold. The specified statistic value is used as the first operand. // + // The values LessThanLowerOrGreaterThanUpperThreshold, LessThanLowerThreshold, + // and GreaterThanUpperThreshold are used only for alarms based on anomaly detection + // models. + // // ComparisonOperator is a required field ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` @@ -4960,7 +5836,8 @@ type PutMetricAlarmInput struct { // | arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0 InsufficientDataActions []*string `type:"list"` - // The name for the metric associated with the alarm. + // The name for the metric associated with the alarm. For each PutMetricAlarm + // operation, you must specify either MetricName or a Metrics array. // // If you are creating an alarm based on a math expression, you cannot specify // this parameter, or any of the Dimensions, Period, Namespace, Statistic, or @@ -4969,8 +5846,11 @@ type PutMetricAlarmInput struct { MetricName *string `min:"1" type:"string"` // An array of MetricDataQuery structures that enable you to create an alarm - // based on the result of a metric math expression. Each item in the Metrics - // array either retrieves a metric or performs a math expression. + // based on the result of a metric math expression. For each PutMetricAlarm + // operation, you must specify either MetricName or a Metrics array. + // + // Each item in the Metrics array either retrieves a metric or performs a math + // expression. // // One item in the Metrics array is the expression that the alarm watches. You // designate this expression by setting ReturnValue to true for this object @@ -5000,6 +5880,10 @@ type PutMetricAlarmInput struct { // The length, in seconds, used each time the metric specified in MetricName // is evaluated. Valid values are 10, 30, and any multiple of 60. // + // Period is required for alarms based on static thresholds. If you are creating + // an alarm based on a metric math expression, you specify the period for each + // metric within the objects in the Metrics array. + // // Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData // call with a StorageResolution of 1. If you specify a period of 10 or 30 for // a metric that does not have sub-minute resolution, the alarm still attempts @@ -5030,8 +5914,18 @@ type PutMetricAlarmInput struct { // The value against which the specified statistic is compared. // - // Threshold is a required field - Threshold *float64 `type:"double" required:"true"` + // This parameter is required for alarms based on static thresholds, but should + // not be used for alarms based on anomaly detection models. + Threshold *float64 `type:"double"` + + // If this is an alarm based on an anomaly detection model, make this value + // match the ID of the ANOMALY_DETECTION_BAND function. + // + // For an example of how to use this parameter, see the Anomaly Detection Model + // Alarm example on this page. + // + // If your alarm uses this parameter, it cannot have Auto Scaling actions. + ThresholdMetricId *string `min:"1" type:"string"` // Sets how this alarm is to handle missing data points. If TreatMissingData // is omitted, the default behavior of missing is used. For more information, @@ -5047,8 +5941,17 @@ type PutMetricAlarmInput struct { // to your data. Metric data points that specify a unit of measure, such as // Percent, are aggregated separately. // - // If you specify a unit, you must use a unit that is appropriate for the metric. - // Otherwise, the CloudWatch alarm can get stuck in the INSUFFICIENT DATA state. + // If you don't specify Unit, CloudWatch retrieves all unit types that have + // been published for the metric and attempts to evaluate the alarm. Usually + // metrics are published with only one unit, so the alarm will work as intended. + // + // However, if the metric is published with multiple types of units and you + // don't specify a unit, the alarm's behavior is not defined and will behave + // un-predictably. + // + // We recommend omitting Unit so that you don't inadvertently specify an incorrect + // unit that is not published for this metric. Doing so causes the alarm to + // be stuck in the INSUFFICIENT DATA state. Unit *string `type:"string" enum:"StandardUnit"` } @@ -5095,8 +5998,8 @@ func (s *PutMetricAlarmInput) Validate() error { if s.Period != nil && *s.Period < 1 { invalidParams.Add(request.NewErrParamMinValue("Period", 1)) } - if s.Threshold == nil { - invalidParams.Add(request.NewErrParamRequired("Threshold")) + if s.ThresholdMetricId != nil && len(*s.ThresholdMetricId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThresholdMetricId", 1)) } if s.TreatMissingData != nil && len(*s.TreatMissingData) < 1 { invalidParams.Add(request.NewErrParamMinLen("TreatMissingData", 1)) @@ -5252,6 +6155,12 @@ func (s *PutMetricAlarmInput) SetThreshold(v float64) *PutMetricAlarmInput { return s } +// SetThresholdMetricId sets the ThresholdMetricId field's value. +func (s *PutMetricAlarmInput) SetThresholdMetricId(v string) *PutMetricAlarmInput { + s.ThresholdMetricId = &v + return s +} + // SetTreatMissingData sets the TreatMissingData field's value. func (s *PutMetricAlarmInput) SetTreatMissingData(v string) *PutMetricAlarmInput { s.TreatMissingData = &v @@ -5289,8 +6198,8 @@ type PutMetricDataInput struct { // The namespace for the metric data. // - // You cannot specify a namespace that begins with "AWS/". Namespaces that begin - // with "AWS/" are reserved for use by Amazon Web Services products. + // To avoid conflicts with AWS service namespaces, you should not specify a + // namespace that begins with AWS/ // // Namespace is a required field Namespace *string `min:"1" type:"string" required:"true"` @@ -5361,6 +6270,62 @@ func (s PutMetricDataOutput) GoString() string { return s.String() } +// Specifies one range of days or times to exclude from use for training an +// anomaly detection model. +type Range struct { + _ struct{} `type:"structure"` + + // The end time of the range to exclude. The format is yyyy-MM-dd'T'HH:mm:ss. + // For example, 2019-07-01T23:59:59. + // + // EndTime is a required field + EndTime *time.Time `type:"timestamp" required:"true"` + + // The start time of the range to exclude. The format is yyyy-MM-dd'T'HH:mm:ss. + // For example, 2019-07-01T23:59:59. + // + // StartTime is a required field + StartTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s Range) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Range) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Range) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Range"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *Range) SetEndTime(v time.Time) *Range { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *Range) SetStartTime(v time.Time) *Range { + s.StartTime = &v + return s +} + type SetAlarmStateInput struct { _ struct{} `type:"structure"` @@ -5595,14 +6560,13 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch resource that you're adding tags to. For more information - // on ARN format, see Example ARNs (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-cloudwatch) - // in the Amazon Web Services General Reference. + // The ARN of the CloudWatch alarm that you're adding tags to. The ARN format + // is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` - // The list of key-value pairs to associate with the resource. + // The list of key-value pairs to associate with the alarm. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` @@ -5756,6 +6720,15 @@ const ( // ComparisonOperatorLessThanOrEqualToThreshold is a ComparisonOperator enum value ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" + + // ComparisonOperatorLessThanLowerOrGreaterThanUpperThreshold is a ComparisonOperator enum value + ComparisonOperatorLessThanLowerOrGreaterThanUpperThreshold = "LessThanLowerOrGreaterThanUpperThreshold" + + // ComparisonOperatorLessThanLowerThreshold is a ComparisonOperator enum value + ComparisonOperatorLessThanLowerThreshold = "LessThanLowerThreshold" + + // ComparisonOperatorGreaterThanUpperThreshold is a ComparisonOperator enum value + ComparisonOperatorGreaterThanUpperThreshold = "GreaterThanUpperThreshold" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/errors.go index 7e650a682ea..77d0ded2005 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/errors.go @@ -52,6 +52,12 @@ const ( // The value of an input parameter is bad or out-of-range. ErrCodeInvalidParameterValueException = "InvalidParameterValue" + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The operation exceeded one or more limits. + ErrCodeLimitExceededException = "LimitExceededException" + // ErrCodeLimitExceededFault for service response error code // "LimitExceeded". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go index 0d478662240..9b43ce1f0b3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudwatch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatch { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudWatch { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatch { svc := &CloudWatch{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-08-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go index c914a3b84b4..99b12487c2b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go @@ -13,1853 +13,3961 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) -const opDeleteRule = "DeleteRule" +const opActivateEventSource = "ActivateEventSource" -// DeleteRuleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteRule operation. The "output" return +// ActivateEventSourceRequest generates a "aws/request.Request" representing the +// client's request for the ActivateEventSource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteRule for more information on using the DeleteRule +// See ActivateEventSource for more information on using the ActivateEventSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteRuleRequest method. -// req, resp := client.DeleteRuleRequest(params) +// // Example sending a request using the ActivateEventSourceRequest method. +// req, resp := client.ActivateEventSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeleteRule -func (c *CloudWatchEvents) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ActivateEventSource +func (c *CloudWatchEvents) ActivateEventSourceRequest(input *ActivateEventSourceInput) (req *request.Request, output *ActivateEventSourceOutput) { op := &request.Operation{ - Name: opDeleteRule, + Name: opActivateEventSource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteRuleInput{} + input = &ActivateEventSourceInput{} } - output = &DeleteRuleOutput{} + output = &ActivateEventSourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteRule API operation for Amazon CloudWatch Events. -// -// Deletes the specified rule. -// -// Before you can delete the rule, you must remove all targets, using RemoveTargets. +// ActivateEventSource API operation for Amazon CloudWatch Events. // -// When you delete a rule, incoming events might continue to match to the deleted -// rule. Allow a short period of time for changes to take effect. +// Activates a partner event source that has been deactivated. Once activated, +// your matching event bus will start receiving events from the event source. // -// Managed rules are rules created and managed by another AWS service on your -// behalf. These rules are created by those other AWS services to support functionality -// in those services. You can delete these rules using the Force option, but -// you should do so only if you are sure the other service is not still using -// that rule. +// This operation is performed by AWS customers, not by SaaS partners. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation DeleteRule for usage and error information. +// API operation ActivateEventSource for usage and error information. // // Returned Error Codes: -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. // -// * ErrCodeManagedRuleException "ManagedRuleException" -// This rule was created by an AWS service on behalf of your account. It is -// managed by that service. If you see this error in response to DeleteRule -// or RemoveTargets, you can use the Force parameter in those calls to delete -// the rule or remove targets from the rule. You cannot modify these managed -// rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, -// or UntagResource. +// * ErrCodeInvalidStateException "InvalidStateException" +// The specified state isn't a valid state for an event source. // // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeleteRule -func (c *CloudWatchEvents) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { - req, out := c.DeleteRuleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ActivateEventSource +func (c *CloudWatchEvents) ActivateEventSource(input *ActivateEventSourceInput) (*ActivateEventSourceOutput, error) { + req, out := c.ActivateEventSourceRequest(input) return out, req.Send() } -// DeleteRuleWithContext is the same as DeleteRule with the addition of +// ActivateEventSourceWithContext is the same as ActivateEventSource with the addition of // the ability to pass a context and additional request options. // -// See DeleteRule for details on how to use this API operation. +// See ActivateEventSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) DeleteRuleWithContext(ctx aws.Context, input *DeleteRuleInput, opts ...request.Option) (*DeleteRuleOutput, error) { - req, out := c.DeleteRuleRequest(input) +func (c *CloudWatchEvents) ActivateEventSourceWithContext(ctx aws.Context, input *ActivateEventSourceInput, opts ...request.Option) (*ActivateEventSourceOutput, error) { + req, out := c.ActivateEventSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeEventBus = "DescribeEventBus" +const opCreateEventBus = "CreateEventBus" -// DescribeEventBusRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEventBus operation. The "output" return +// CreateEventBusRequest generates a "aws/request.Request" representing the +// client's request for the CreateEventBus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeEventBus for more information on using the DescribeEventBus +// See CreateEventBus for more information on using the CreateEventBus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeEventBusRequest method. -// req, resp := client.DescribeEventBusRequest(params) +// // Example sending a request using the CreateEventBusRequest method. +// req, resp := client.CreateEventBusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeEventBus -func (c *CloudWatchEvents) DescribeEventBusRequest(input *DescribeEventBusInput) (req *request.Request, output *DescribeEventBusOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/CreateEventBus +func (c *CloudWatchEvents) CreateEventBusRequest(input *CreateEventBusInput) (req *request.Request, output *CreateEventBusOutput) { op := &request.Operation{ - Name: opDescribeEventBus, + Name: opCreateEventBus, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeEventBusInput{} + input = &CreateEventBusInput{} } - output = &DescribeEventBusOutput{} + output = &CreateEventBusOutput{} req = c.newRequest(op, input, output) return } -// DescribeEventBus API operation for Amazon CloudWatch Events. +// CreateEventBus API operation for Amazon CloudWatch Events. +// +// Creates a new event bus within your account. This can be a custom event bus +// which you can use to receive events from your own custom applications and +// services, or it can be a partner event bus which can be matched to a partner +// event source. // -// Displays the external AWS accounts that are permitted to write events to -// your account using your account's event bus, and the associated policy. To -// enable your account to receive events from other accounts, use PutPermission. +// This operation is used by AWS customers, not by SaaS partners. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation DescribeEventBus for usage and error information. +// API operation CreateEventBus for usage and error information. // // Returned Error Codes: +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource that you're trying to create already exists. +// // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. +// An entity that you specified doesn't exist. +// +// * ErrCodeInvalidStateException "InvalidStateException" +// The specified state isn't a valid state for an event source. // // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeEventBus -func (c *CloudWatchEvents) DescribeEventBus(input *DescribeEventBusInput) (*DescribeEventBusOutput, error) { - req, out := c.DescribeEventBusRequest(input) +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// You tried to create more resources than is allowed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/CreateEventBus +func (c *CloudWatchEvents) CreateEventBus(input *CreateEventBusInput) (*CreateEventBusOutput, error) { + req, out := c.CreateEventBusRequest(input) return out, req.Send() } -// DescribeEventBusWithContext is the same as DescribeEventBus with the addition of +// CreateEventBusWithContext is the same as CreateEventBus with the addition of // the ability to pass a context and additional request options. // -// See DescribeEventBus for details on how to use this API operation. +// See CreateEventBus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) DescribeEventBusWithContext(ctx aws.Context, input *DescribeEventBusInput, opts ...request.Option) (*DescribeEventBusOutput, error) { - req, out := c.DescribeEventBusRequest(input) +func (c *CloudWatchEvents) CreateEventBusWithContext(ctx aws.Context, input *CreateEventBusInput, opts ...request.Option) (*CreateEventBusOutput, error) { + req, out := c.CreateEventBusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeRule = "DescribeRule" +const opCreatePartnerEventSource = "CreatePartnerEventSource" -// DescribeRuleRequest generates a "aws/request.Request" representing the -// client's request for the DescribeRule operation. The "output" return +// CreatePartnerEventSourceRequest generates a "aws/request.Request" representing the +// client's request for the CreatePartnerEventSource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeRule for more information on using the DescribeRule +// See CreatePartnerEventSource for more information on using the CreatePartnerEventSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeRuleRequest method. -// req, resp := client.DescribeRuleRequest(params) +// // Example sending a request using the CreatePartnerEventSourceRequest method. +// req, resp := client.CreatePartnerEventSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeRule -func (c *CloudWatchEvents) DescribeRuleRequest(input *DescribeRuleInput) (req *request.Request, output *DescribeRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/CreatePartnerEventSource +func (c *CloudWatchEvents) CreatePartnerEventSourceRequest(input *CreatePartnerEventSourceInput) (req *request.Request, output *CreatePartnerEventSourceOutput) { op := &request.Operation{ - Name: opDescribeRule, + Name: opCreatePartnerEventSource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeRuleInput{} + input = &CreatePartnerEventSourceInput{} } - output = &DescribeRuleOutput{} + output = &CreatePartnerEventSourceOutput{} req = c.newRequest(op, input, output) return } -// DescribeRule API operation for Amazon CloudWatch Events. +// CreatePartnerEventSource API operation for Amazon CloudWatch Events. // -// Describes the specified rule. +// Called by an SaaS partner to create a partner event source. // -// DescribeRule does not list the targets of a rule. To see the targets associated -// with a rule, use ListTargetsByRule. +// This operation is not used by AWS customers. +// +// Each partner event source can be used by one AWS account to create a matching +// partner event bus in that AWS account. A SaaS partner must create one partner +// event source for each AWS account that wants to receive those event types. +// +// A partner event source creates events based on resources in the SaaS partner's +// service or application. +// +// An AWS account that creates a partner event bus that matches the partner +// event source can use that event bus to receive events from the partner, and +// then process them using AWS Events rules and targets. +// +// Partner event source names follow this format: +// +// aws.partner/partner_name/event_namespace/event_name +// +// * partner_name is determined during partner registration and identifies +// the partner to AWS customers. +// +// * For event_namespace, we recommend that partners use a string that identifies +// the AWS customer within the partner's system. This should not be the customer's +// AWS account ID. +// +// * event_name is determined by the partner, and should uniquely identify +// an event-generating resource within the partner system. This should help +// AWS customers decide whether to create an event bus to receive these events. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation DescribeRule for usage and error information. +// API operation CreatePartnerEventSource for usage and error information. // // Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource that you're trying to create already exists. // // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeRule -func (c *CloudWatchEvents) DescribeRule(input *DescribeRuleInput) (*DescribeRuleOutput, error) { - req, out := c.DescribeRuleRequest(input) +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// You tried to create more resources than is allowed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/CreatePartnerEventSource +func (c *CloudWatchEvents) CreatePartnerEventSource(input *CreatePartnerEventSourceInput) (*CreatePartnerEventSourceOutput, error) { + req, out := c.CreatePartnerEventSourceRequest(input) return out, req.Send() } -// DescribeRuleWithContext is the same as DescribeRule with the addition of +// CreatePartnerEventSourceWithContext is the same as CreatePartnerEventSource with the addition of // the ability to pass a context and additional request options. // -// See DescribeRule for details on how to use this API operation. +// See CreatePartnerEventSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) DescribeRuleWithContext(ctx aws.Context, input *DescribeRuleInput, opts ...request.Option) (*DescribeRuleOutput, error) { - req, out := c.DescribeRuleRequest(input) +func (c *CloudWatchEvents) CreatePartnerEventSourceWithContext(ctx aws.Context, input *CreatePartnerEventSourceInput, opts ...request.Option) (*CreatePartnerEventSourceOutput, error) { + req, out := c.CreatePartnerEventSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDisableRule = "DisableRule" +const opDeactivateEventSource = "DeactivateEventSource" -// DisableRuleRequest generates a "aws/request.Request" representing the -// client's request for the DisableRule operation. The "output" return +// DeactivateEventSourceRequest generates a "aws/request.Request" representing the +// client's request for the DeactivateEventSource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DisableRule for more information on using the DisableRule +// See DeactivateEventSource for more information on using the DeactivateEventSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DisableRuleRequest method. -// req, resp := client.DisableRuleRequest(params) +// // Example sending a request using the DeactivateEventSourceRequest method. +// req, resp := client.DeactivateEventSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DisableRule -func (c *CloudWatchEvents) DisableRuleRequest(input *DisableRuleInput) (req *request.Request, output *DisableRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeactivateEventSource +func (c *CloudWatchEvents) DeactivateEventSourceRequest(input *DeactivateEventSourceInput) (req *request.Request, output *DeactivateEventSourceOutput) { op := &request.Operation{ - Name: opDisableRule, + Name: opDeactivateEventSource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DisableRuleInput{} + input = &DeactivateEventSourceInput{} } - output = &DisableRuleOutput{} + output = &DeactivateEventSourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DisableRule API operation for Amazon CloudWatch Events. +// DeactivateEventSource API operation for Amazon CloudWatch Events. // -// Disables the specified rule. A disabled rule won't match any events, and -// won't self-trigger if it has a schedule expression. +// An AWS customer uses this operation to temporarily stop receiving events +// from the specified partner event source. The matching event bus isn't deleted. // -// When you disable a rule, incoming events might continue to match to the disabled -// rule. Allow a short period of time for changes to take effect. +// When you deactivate a partner event source, the source goes into PENDING +// state. If it remains in PENDING state for more than two weeks, it's deleted. +// +// To activate a deactivated partner event source, use ActivateEventSource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation DisableRule for usage and error information. +// API operation DeactivateEventSource for usage and error information. // // Returned Error Codes: // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. +// An entity that you specified doesn't exist. // -// * ErrCodeManagedRuleException "ManagedRuleException" -// This rule was created by an AWS service on behalf of your account. It is -// managed by that service. If you see this error in response to DeleteRule -// or RemoveTargets, you can use the Force parameter in those calls to delete -// the rule or remove targets from the rule. You cannot modify these managed -// rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, -// or UntagResource. +// * ErrCodeInvalidStateException "InvalidStateException" +// The specified state isn't a valid state for an event source. // // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DisableRule -func (c *CloudWatchEvents) DisableRule(input *DisableRuleInput) (*DisableRuleOutput, error) { - req, out := c.DisableRuleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeactivateEventSource +func (c *CloudWatchEvents) DeactivateEventSource(input *DeactivateEventSourceInput) (*DeactivateEventSourceOutput, error) { + req, out := c.DeactivateEventSourceRequest(input) return out, req.Send() } -// DisableRuleWithContext is the same as DisableRule with the addition of +// DeactivateEventSourceWithContext is the same as DeactivateEventSource with the addition of // the ability to pass a context and additional request options. // -// See DisableRule for details on how to use this API operation. +// See DeactivateEventSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) DisableRuleWithContext(ctx aws.Context, input *DisableRuleInput, opts ...request.Option) (*DisableRuleOutput, error) { - req, out := c.DisableRuleRequest(input) +func (c *CloudWatchEvents) DeactivateEventSourceWithContext(ctx aws.Context, input *DeactivateEventSourceInput, opts ...request.Option) (*DeactivateEventSourceOutput, error) { + req, out := c.DeactivateEventSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opEnableRule = "EnableRule" +const opDeleteEventBus = "DeleteEventBus" -// EnableRuleRequest generates a "aws/request.Request" representing the -// client's request for the EnableRule operation. The "output" return +// DeleteEventBusRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEventBus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See EnableRule for more information on using the EnableRule +// See DeleteEventBus for more information on using the DeleteEventBus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the EnableRuleRequest method. -// req, resp := client.EnableRuleRequest(params) +// // Example sending a request using the DeleteEventBusRequest method. +// req, resp := client.DeleteEventBusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/EnableRule -func (c *CloudWatchEvents) EnableRuleRequest(input *EnableRuleInput) (req *request.Request, output *EnableRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeleteEventBus +func (c *CloudWatchEvents) DeleteEventBusRequest(input *DeleteEventBusInput) (req *request.Request, output *DeleteEventBusOutput) { op := &request.Operation{ - Name: opEnableRule, + Name: opDeleteEventBus, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &EnableRuleInput{} + input = &DeleteEventBusInput{} } - output = &EnableRuleOutput{} + output = &DeleteEventBusOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// EnableRule API operation for Amazon CloudWatch Events. +// DeleteEventBus API operation for Amazon CloudWatch Events. // -// Enables the specified rule. If the rule does not exist, the operation fails. +// Deletes the specified custom event bus or partner event bus. All rules associated +// with this event bus are also deleted. You can't delete your account's default +// event bus. // -// When you enable a rule, incoming events might not immediately start matching -// to a newly enabled rule. Allow a short period of time for changes to take -// effect. +// This operation is performed by AWS customers, not by SaaS partners. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation EnableRule for usage and error information. +// API operation DeleteEventBus for usage and error information. // // Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. -// -// * ErrCodeManagedRuleException "ManagedRuleException" -// This rule was created by an AWS service on behalf of your account. It is -// managed by that service. If you see this error in response to DeleteRule -// or RemoveTargets, you can use the Force parameter in those calls to delete -// the rule or remove targets from the rule. You cannot modify these managed -// rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, -// or UntagResource. -// // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/EnableRule -func (c *CloudWatchEvents) EnableRule(input *EnableRuleInput) (*EnableRuleOutput, error) { - req, out := c.EnableRuleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeleteEventBus +func (c *CloudWatchEvents) DeleteEventBus(input *DeleteEventBusInput) (*DeleteEventBusOutput, error) { + req, out := c.DeleteEventBusRequest(input) return out, req.Send() } -// EnableRuleWithContext is the same as EnableRule with the addition of +// DeleteEventBusWithContext is the same as DeleteEventBus with the addition of // the ability to pass a context and additional request options. // -// See EnableRule for details on how to use this API operation. +// See DeleteEventBus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) EnableRuleWithContext(ctx aws.Context, input *EnableRuleInput, opts ...request.Option) (*EnableRuleOutput, error) { - req, out := c.EnableRuleRequest(input) +func (c *CloudWatchEvents) DeleteEventBusWithContext(ctx aws.Context, input *DeleteEventBusInput, opts ...request.Option) (*DeleteEventBusOutput, error) { + req, out := c.DeleteEventBusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListRuleNamesByTarget = "ListRuleNamesByTarget" +const opDeletePartnerEventSource = "DeletePartnerEventSource" -// ListRuleNamesByTargetRequest generates a "aws/request.Request" representing the -// client's request for the ListRuleNamesByTarget operation. The "output" return +// DeletePartnerEventSourceRequest generates a "aws/request.Request" representing the +// client's request for the DeletePartnerEventSource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListRuleNamesByTarget for more information on using the ListRuleNamesByTarget +// See DeletePartnerEventSource for more information on using the DeletePartnerEventSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListRuleNamesByTargetRequest method. -// req, resp := client.ListRuleNamesByTargetRequest(params) +// // Example sending a request using the DeletePartnerEventSourceRequest method. +// req, resp := client.DeletePartnerEventSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListRuleNamesByTarget -func (c *CloudWatchEvents) ListRuleNamesByTargetRequest(input *ListRuleNamesByTargetInput) (req *request.Request, output *ListRuleNamesByTargetOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeletePartnerEventSource +func (c *CloudWatchEvents) DeletePartnerEventSourceRequest(input *DeletePartnerEventSourceInput) (req *request.Request, output *DeletePartnerEventSourceOutput) { op := &request.Operation{ - Name: opListRuleNamesByTarget, + Name: opDeletePartnerEventSource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListRuleNamesByTargetInput{} + input = &DeletePartnerEventSourceInput{} } - output = &ListRuleNamesByTargetOutput{} + output = &DeletePartnerEventSourceOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// ListRuleNamesByTarget API operation for Amazon CloudWatch Events. +// DeletePartnerEventSource API operation for Amazon CloudWatch Events. +// +// This operation is used by SaaS partners to delete a partner event source. +// AWS customers don't use this operation. // -// Lists the rules for the specified target. You can see which of the rules -// in Amazon CloudWatch Events can invoke a specific target in your account. +// When you delete an event source, the status of the corresponding partner +// event bus in the AWS customer account becomes DELETED. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation ListRuleNamesByTarget for usage and error information. +// API operation DeletePartnerEventSource for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListRuleNamesByTarget -func (c *CloudWatchEvents) ListRuleNamesByTarget(input *ListRuleNamesByTargetInput) (*ListRuleNamesByTargetOutput, error) { - req, out := c.ListRuleNamesByTargetRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeletePartnerEventSource +func (c *CloudWatchEvents) DeletePartnerEventSource(input *DeletePartnerEventSourceInput) (*DeletePartnerEventSourceOutput, error) { + req, out := c.DeletePartnerEventSourceRequest(input) return out, req.Send() } -// ListRuleNamesByTargetWithContext is the same as ListRuleNamesByTarget with the addition of +// DeletePartnerEventSourceWithContext is the same as DeletePartnerEventSource with the addition of // the ability to pass a context and additional request options. // -// See ListRuleNamesByTarget for details on how to use this API operation. +// See DeletePartnerEventSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) ListRuleNamesByTargetWithContext(ctx aws.Context, input *ListRuleNamesByTargetInput, opts ...request.Option) (*ListRuleNamesByTargetOutput, error) { - req, out := c.ListRuleNamesByTargetRequest(input) +func (c *CloudWatchEvents) DeletePartnerEventSourceWithContext(ctx aws.Context, input *DeletePartnerEventSourceInput, opts ...request.Option) (*DeletePartnerEventSourceOutput, error) { + req, out := c.DeletePartnerEventSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListRules = "ListRules" +const opDeleteRule = "DeleteRule" -// ListRulesRequest generates a "aws/request.Request" representing the -// client's request for the ListRules operation. The "output" return +// DeleteRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListRules for more information on using the ListRules +// See DeleteRule for more information on using the DeleteRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListRulesRequest method. -// req, resp := client.ListRulesRequest(params) +// // Example sending a request using the DeleteRuleRequest method. +// req, resp := client.DeleteRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListRules -func (c *CloudWatchEvents) ListRulesRequest(input *ListRulesInput) (req *request.Request, output *ListRulesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeleteRule +func (c *CloudWatchEvents) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { op := &request.Operation{ - Name: opListRules, + Name: opDeleteRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListRulesInput{} + input = &DeleteRuleInput{} } - output = &ListRulesOutput{} + output = &DeleteRuleOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// ListRules API operation for Amazon CloudWatch Events. +// DeleteRule API operation for Amazon CloudWatch Events. // -// Lists your Amazon CloudWatch Events rules. You can either list all the rules -// or you can provide a prefix to match to the rule names. +// Deletes the specified rule. // -// ListRules does not list the targets of a rule. To see the targets associated -// with a rule, use ListTargetsByRule. +// Before you can delete the rule, you must remove all targets, using RemoveTargets. +// +// When you delete a rule, incoming events might continue to match to the deleted +// rule. Allow a short period of time for changes to take effect. +// +// Managed rules are rules created and managed by another AWS service on your +// behalf. These rules are created by those other AWS services to support functionality +// in those services. You can delete these rules using the Force option, but +// you should do so only if you're sure that the other service isn't still using +// that rule. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation ListRules for usage and error information. +// API operation DeleteRule for usage and error information. // // Returned Error Codes: +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// * ErrCodeManagedRuleException "ManagedRuleException" +// An AWS service created this rule on behalf of your account. That service +// manages it. If you see this error in response to DeleteRule or RemoveTargets, +// you can use the Force parameter in those calls to delete the rule or remove +// targets from the rule. You can't modify these managed rules by using DisableRule, +// EnableRule, PutTargets, PutRule, TagResource, or UntagResource. +// // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListRules -func (c *CloudWatchEvents) ListRules(input *ListRulesInput) (*ListRulesOutput, error) { - req, out := c.ListRulesRequest(input) +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeleteRule +func (c *CloudWatchEvents) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) return out, req.Send() } -// ListRulesWithContext is the same as ListRules with the addition of +// DeleteRuleWithContext is the same as DeleteRule with the addition of // the ability to pass a context and additional request options. // -// See ListRules for details on how to use this API operation. +// See DeleteRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) ListRulesWithContext(ctx aws.Context, input *ListRulesInput, opts ...request.Option) (*ListRulesOutput, error) { - req, out := c.ListRulesRequest(input) +func (c *CloudWatchEvents) DeleteRuleWithContext(ctx aws.Context, input *DeleteRuleInput, opts ...request.Option) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTagsForResource = "ListTagsForResource" +const opDescribeEventBus = "DescribeEventBus" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// DescribeEventBusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventBus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See DescribeEventBus for more information on using the DescribeEventBus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the DescribeEventBusRequest method. +// req, resp := client.DescribeEventBusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListTagsForResource -func (c *CloudWatchEvents) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeEventBus +func (c *CloudWatchEvents) DescribeEventBusRequest(input *DescribeEventBusInput) (req *request.Request, output *DescribeEventBusOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opDescribeEventBus, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListTagsForResourceInput{} + input = &DescribeEventBusInput{} } - output = &ListTagsForResourceOutput{} + output = &DescribeEventBusOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Amazon CloudWatch Events. +// DescribeEventBus API operation for Amazon CloudWatch Events. +// +// Displays details about an event bus in your account. This can include the +// external AWS accounts that are permitted to write events to your default +// event bus, and the associated policy. For custom event buses and partner +// event buses, it displays the name, ARN, policy, state, and creation time. +// +// To enable your account to receive events from other accounts on its default +// event bus, use PutPermission. // -// Displays the tags associated with a CloudWatch Events resource. In CloudWatch -// Events, rules can be tagged. +// For more information about partner event buses, see CreateEventBus. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation ListTagsForResource for usage and error information. +// API operation DescribeEventBus for usage and error information. // // Returned Error Codes: // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. +// An entity that you specified doesn't exist. // // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListTagsForResource -func (c *CloudWatchEvents) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeEventBus +func (c *CloudWatchEvents) DescribeEventBus(input *DescribeEventBusInput) (*DescribeEventBusOutput, error) { + req, out := c.DescribeEventBusRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// DescribeEventBusWithContext is the same as DescribeEventBus with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See DescribeEventBus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *CloudWatchEvents) DescribeEventBusWithContext(ctx aws.Context, input *DescribeEventBusInput, opts ...request.Option) (*DescribeEventBusOutput, error) { + req, out := c.DescribeEventBusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTargetsByRule = "ListTargetsByRule" +const opDescribeEventSource = "DescribeEventSource" -// ListTargetsByRuleRequest generates a "aws/request.Request" representing the -// client's request for the ListTargetsByRule operation. The "output" return +// DescribeEventSourceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventSource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTargetsByRule for more information on using the ListTargetsByRule +// See DescribeEventSource for more information on using the DescribeEventSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTargetsByRuleRequest method. -// req, resp := client.ListTargetsByRuleRequest(params) +// // Example sending a request using the DescribeEventSourceRequest method. +// req, resp := client.DescribeEventSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListTargetsByRule -func (c *CloudWatchEvents) ListTargetsByRuleRequest(input *ListTargetsByRuleInput) (req *request.Request, output *ListTargetsByRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeEventSource +func (c *CloudWatchEvents) DescribeEventSourceRequest(input *DescribeEventSourceInput) (req *request.Request, output *DescribeEventSourceOutput) { op := &request.Operation{ - Name: opListTargetsByRule, + Name: opDescribeEventSource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListTargetsByRuleInput{} + input = &DescribeEventSourceInput{} } - output = &ListTargetsByRuleOutput{} + output = &DescribeEventSourceOutput{} req = c.newRequest(op, input, output) return } -// ListTargetsByRule API operation for Amazon CloudWatch Events. +// DescribeEventSource API operation for Amazon CloudWatch Events. // -// Lists the targets assigned to the specified rule. +// This operation lists details about a partner event source that is shared +// with your account. +// +// This operation is run by AWS customers, not by SaaS partners. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation ListTargetsByRule for usage and error information. +// API operation DescribeEventSource for usage and error information. // // Returned Error Codes: // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. +// An entity that you specified doesn't exist. // // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListTargetsByRule -func (c *CloudWatchEvents) ListTargetsByRule(input *ListTargetsByRuleInput) (*ListTargetsByRuleOutput, error) { - req, out := c.ListTargetsByRuleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeEventSource +func (c *CloudWatchEvents) DescribeEventSource(input *DescribeEventSourceInput) (*DescribeEventSourceOutput, error) { + req, out := c.DescribeEventSourceRequest(input) return out, req.Send() } -// ListTargetsByRuleWithContext is the same as ListTargetsByRule with the addition of +// DescribeEventSourceWithContext is the same as DescribeEventSource with the addition of // the ability to pass a context and additional request options. // -// See ListTargetsByRule for details on how to use this API operation. +// See DescribeEventSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) ListTargetsByRuleWithContext(ctx aws.Context, input *ListTargetsByRuleInput, opts ...request.Option) (*ListTargetsByRuleOutput, error) { - req, out := c.ListTargetsByRuleRequest(input) +func (c *CloudWatchEvents) DescribeEventSourceWithContext(ctx aws.Context, input *DescribeEventSourceInput, opts ...request.Option) (*DescribeEventSourceOutput, error) { + req, out := c.DescribeEventSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutEvents = "PutEvents" +const opDescribePartnerEventSource = "DescribePartnerEventSource" -// PutEventsRequest generates a "aws/request.Request" representing the -// client's request for the PutEvents operation. The "output" return +// DescribePartnerEventSourceRequest generates a "aws/request.Request" representing the +// client's request for the DescribePartnerEventSource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutEvents for more information on using the PutEvents +// See DescribePartnerEventSource for more information on using the DescribePartnerEventSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutEventsRequest method. -// req, resp := client.PutEventsRequest(params) +// // Example sending a request using the DescribePartnerEventSourceRequest method. +// req, resp := client.DescribePartnerEventSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutEvents -func (c *CloudWatchEvents) PutEventsRequest(input *PutEventsInput) (req *request.Request, output *PutEventsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribePartnerEventSource +func (c *CloudWatchEvents) DescribePartnerEventSourceRequest(input *DescribePartnerEventSourceInput) (req *request.Request, output *DescribePartnerEventSourceOutput) { op := &request.Operation{ - Name: opPutEvents, + Name: opDescribePartnerEventSource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutEventsInput{} + input = &DescribePartnerEventSourceInput{} } - output = &PutEventsOutput{} + output = &DescribePartnerEventSourceOutput{} req = c.newRequest(op, input, output) return } -// PutEvents API operation for Amazon CloudWatch Events. +// DescribePartnerEventSource API operation for Amazon CloudWatch Events. // -// Sends custom events to Amazon CloudWatch Events so that they can be matched -// to rules. +// An SaaS partner can use this operation to list details about a partner event +// source that they have created. +// +// AWS customers do not use this operation. Instead, AWS customers can use DescribeEventSource +// to see details about a partner event source that is shared with them. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation PutEvents for usage and error information. +// API operation DescribePartnerEventSource for usage and error information. // // Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutEvents -func (c *CloudWatchEvents) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { - req, out := c.PutEventsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribePartnerEventSource +func (c *CloudWatchEvents) DescribePartnerEventSource(input *DescribePartnerEventSourceInput) (*DescribePartnerEventSourceOutput, error) { + req, out := c.DescribePartnerEventSourceRequest(input) return out, req.Send() } -// PutEventsWithContext is the same as PutEvents with the addition of +// DescribePartnerEventSourceWithContext is the same as DescribePartnerEventSource with the addition of // the ability to pass a context and additional request options. // -// See PutEvents for details on how to use this API operation. +// See DescribePartnerEventSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) PutEventsWithContext(ctx aws.Context, input *PutEventsInput, opts ...request.Option) (*PutEventsOutput, error) { - req, out := c.PutEventsRequest(input) +func (c *CloudWatchEvents) DescribePartnerEventSourceWithContext(ctx aws.Context, input *DescribePartnerEventSourceInput, opts ...request.Option) (*DescribePartnerEventSourceOutput, error) { + req, out := c.DescribePartnerEventSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutPermission = "PutPermission" +const opDescribeRule = "DescribeRule" -// PutPermissionRequest generates a "aws/request.Request" representing the -// client's request for the PutPermission operation. The "output" return +// DescribeRuleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutPermission for more information on using the PutPermission +// See DescribeRule for more information on using the DescribeRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutPermissionRequest method. -// req, resp := client.PutPermissionRequest(params) +// // Example sending a request using the DescribeRuleRequest method. +// req, resp := client.DescribeRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutPermission -func (c *CloudWatchEvents) PutPermissionRequest(input *PutPermissionInput) (req *request.Request, output *PutPermissionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeRule +func (c *CloudWatchEvents) DescribeRuleRequest(input *DescribeRuleInput) (req *request.Request, output *DescribeRuleOutput) { op := &request.Operation{ - Name: opPutPermission, + Name: opDescribeRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutPermissionInput{} + input = &DescribeRuleInput{} } - output = &PutPermissionOutput{} + output = &DescribeRuleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutPermission API operation for Amazon CloudWatch Events. -// -// Running PutPermission permits the specified AWS account or AWS organization -// to put events to your account's default event bus. CloudWatch Events rules -// in your account are triggered by these events arriving to your default event -// bus. -// -// For another account to send events to your account, that external account -// must have a CloudWatch Events rule with your account's default event bus -// as a target. -// -// To enable multiple AWS accounts to put events to your default event bus, -// run PutPermission once for each of these accounts. Or, if all the accounts -// are members of the same AWS organization, you can run PutPermission once -// specifying Principal as "*" and specifying the AWS organization ID in Condition, -// to grant permissions to all accounts in that organization. +// DescribeRule API operation for Amazon CloudWatch Events. // -// If you grant permissions using an organization, then accounts in that organization -// must specify a RoleArn with proper permissions when they use PutTarget to -// add your account's event bus as a target. For more information, see Sending -// and Receiving Events Between AWS Accounts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEvents-CrossAccountEventDelivery.html) -// in the Amazon CloudWatch Events User Guide. +// Describes the specified rule. // -// The permission policy on the default event bus cannot exceed 10 KB in size. +// DescribeRule doesn't list the targets of a rule. To see the targets associated +// with a rule, use ListTargetsByRule. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation PutPermission for usage and error information. +// API operation DescribeRule for usage and error information. // // Returned Error Codes: // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. -// -// * ErrCodePolicyLengthExceededException "PolicyLengthExceededException" -// The event bus policy is too long. For more information, see the limits. +// An entity that you specified doesn't exist. // // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutPermission -func (c *CloudWatchEvents) PutPermission(input *PutPermissionInput) (*PutPermissionOutput, error) { - req, out := c.PutPermissionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeRule +func (c *CloudWatchEvents) DescribeRule(input *DescribeRuleInput) (*DescribeRuleOutput, error) { + req, out := c.DescribeRuleRequest(input) return out, req.Send() } -// PutPermissionWithContext is the same as PutPermission with the addition of +// DescribeRuleWithContext is the same as DescribeRule with the addition of // the ability to pass a context and additional request options. // -// See PutPermission for details on how to use this API operation. +// See DescribeRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) PutPermissionWithContext(ctx aws.Context, input *PutPermissionInput, opts ...request.Option) (*PutPermissionOutput, error) { - req, out := c.PutPermissionRequest(input) +func (c *CloudWatchEvents) DescribeRuleWithContext(ctx aws.Context, input *DescribeRuleInput, opts ...request.Option) (*DescribeRuleOutput, error) { + req, out := c.DescribeRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutRule = "PutRule" +const opDisableRule = "DisableRule" -// PutRuleRequest generates a "aws/request.Request" representing the -// client's request for the PutRule operation. The "output" return +// DisableRuleRequest generates a "aws/request.Request" representing the +// client's request for the DisableRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutRule for more information on using the PutRule +// See DisableRule for more information on using the DisableRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutRuleRequest method. -// req, resp := client.PutRuleRequest(params) +// // Example sending a request using the DisableRuleRequest method. +// req, resp := client.DisableRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutRule -func (c *CloudWatchEvents) PutRuleRequest(input *PutRuleInput) (req *request.Request, output *PutRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DisableRule +func (c *CloudWatchEvents) DisableRuleRequest(input *DisableRuleInput) (req *request.Request, output *DisableRuleOutput) { op := &request.Operation{ - Name: opPutRule, + Name: opDisableRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutRuleInput{} + input = &DisableRuleInput{} } - output = &PutRuleOutput{} + output = &DisableRuleOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutRule API operation for Amazon CloudWatch Events. -// -// Creates or updates the specified rule. Rules are enabled by default, or based -// on value of the state. You can disable a rule using DisableRule. -// -// If you are updating an existing rule, the rule is replaced with what you -// specify in this PutRule command. If you omit arguments in PutRule, the old -// values for those arguments are not kept. Instead, they are replaced with -// null values. -// -// When you create or update a rule, incoming events might not immediately start -// matching to new or updated rules. Allow a short period of time for changes -// to take effect. -// -// A rule must contain at least an EventPattern or ScheduleExpression. Rules -// with EventPatterns are triggered when a matching event is observed. Rules -// with ScheduleExpressions self-trigger based on the given schedule. A rule -// can have both an EventPattern and a ScheduleExpression, in which case the -// rule triggers on matching events as well as on a schedule. -// -// When you initially create a rule, you can optionally assign one or more tags -// to the rule. Tags can help you organize and categorize your resources. You -// can also use them to scope user permissions, by granting a user permission -// to access or change only rules with certain tag values. To use the PutRule -// operation and assign tags, you must have both the events:PutRule and events:TagResource -// permissions. -// -// If you are updating an existing rule, any tags you specify in the PutRule -// operation are ignored. To update the tags of an existing rule, use TagResource -// and UntagResource. -// -// Most services in AWS treat : or / as the same character in Amazon Resource -// Names (ARNs). However, CloudWatch Events uses an exact match in event patterns -// and rules. Be sure to use the correct ARN characters when creating event -// patterns so that they match the ARN syntax in the event you want to match. -// -// In CloudWatch Events, it is possible to create rules that lead to infinite -// loops, where a rule is fired repeatedly. For example, a rule might detect -// that ACLs have changed on an S3 bucket, and trigger software to change them -// to the desired state. If the rule is not written carefully, the subsequent -// change to the ACLs fires the rule again, creating an infinite loop. +// DisableRule API operation for Amazon CloudWatch Events. // -// To prevent this, write the rules so that the triggered actions do not re-fire -// the same rule. For example, your rule could fire only if ACLs are found to -// be in a bad state, instead of after any change. +// Disables the specified rule. A disabled rule won't match any events and won't +// self-trigger if it has a schedule expression. // -// An infinite loop can quickly cause higher than expected charges. We recommend -// that you use budgeting, which alerts you when charges exceed your specified -// limit. For more information, see Managing Your Costs with Budgets (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/budgets-managing-costs.html). +// When you disable a rule, incoming events might continue to match to the disabled +// rule. Allow a short period of time for changes to take effect. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation PutRule for usage and error information. +// API operation DisableRule for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidEventPatternException "InvalidEventPatternException" -// The event pattern is not valid. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// You tried to create more rules or add more targets to a rule than is allowed. +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. // // * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. +// There is concurrent modification on a resource. // // * ErrCodeManagedRuleException "ManagedRuleException" -// This rule was created by an AWS service on behalf of your account. It is -// managed by that service. If you see this error in response to DeleteRule -// or RemoveTargets, you can use the Force parameter in those calls to delete -// the rule or remove targets from the rule. You cannot modify these managed -// rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, -// or UntagResource. +// An AWS service created this rule on behalf of your account. That service +// manages it. If you see this error in response to DeleteRule or RemoveTargets, +// you can use the Force parameter in those calls to delete the rule or remove +// targets from the rule. You can't modify these managed rules by using DisableRule, +// EnableRule, PutTargets, PutRule, TagResource, or UntagResource. // // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutRule -func (c *CloudWatchEvents) PutRule(input *PutRuleInput) (*PutRuleOutput, error) { - req, out := c.PutRuleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DisableRule +func (c *CloudWatchEvents) DisableRule(input *DisableRuleInput) (*DisableRuleOutput, error) { + req, out := c.DisableRuleRequest(input) return out, req.Send() } -// PutRuleWithContext is the same as PutRule with the addition of +// DisableRuleWithContext is the same as DisableRule with the addition of // the ability to pass a context and additional request options. // -// See PutRule for details on how to use this API operation. +// See DisableRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) PutRuleWithContext(ctx aws.Context, input *PutRuleInput, opts ...request.Option) (*PutRuleOutput, error) { - req, out := c.PutRuleRequest(input) +func (c *CloudWatchEvents) DisableRuleWithContext(ctx aws.Context, input *DisableRuleInput, opts ...request.Option) (*DisableRuleOutput, error) { + req, out := c.DisableRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutTargets = "PutTargets" +const opEnableRule = "EnableRule" -// PutTargetsRequest generates a "aws/request.Request" representing the -// client's request for the PutTargets operation. The "output" return +// EnableRuleRequest generates a "aws/request.Request" representing the +// client's request for the EnableRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutTargets for more information on using the PutTargets +// See EnableRule for more information on using the EnableRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutTargetsRequest method. -// req, resp := client.PutTargetsRequest(params) +// // Example sending a request using the EnableRuleRequest method. +// req, resp := client.EnableRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutTargets -func (c *CloudWatchEvents) PutTargetsRequest(input *PutTargetsInput) (req *request.Request, output *PutTargetsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/EnableRule +func (c *CloudWatchEvents) EnableRuleRequest(input *EnableRuleInput) (req *request.Request, output *EnableRuleOutput) { op := &request.Operation{ - Name: opPutTargets, + Name: opEnableRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutTargetsInput{} + input = &EnableRuleInput{} } - output = &PutTargetsOutput{} + output = &EnableRuleOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutTargets API operation for Amazon CloudWatch Events. +// EnableRule API operation for Amazon CloudWatch Events. // -// Adds the specified targets to the specified rule, or updates the targets -// if they are already associated with the rule. +// Enables the specified rule. If the rule doesn't exist, the operation fails. // -// Targets are the resources that are invoked when a rule is triggered. +// When you enable a rule, incoming events might not immediately start matching +// to a newly enabled rule. Allow a short period of time for changes to take +// effect. // -// You can configure the following as targets for CloudWatch Events: +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * EC2 instances +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation EnableRule for usage and error information. // -// * SSM Run Command +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. // -// * SSM Automation +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. // -// * AWS Lambda functions +// * ErrCodeManagedRuleException "ManagedRuleException" +// An AWS service created this rule on behalf of your account. That service +// manages it. If you see this error in response to DeleteRule or RemoveTargets, +// you can use the Force parameter in those calls to delete the rule or remove +// targets from the rule. You can't modify these managed rules by using DisableRule, +// EnableRule, PutTargets, PutRule, TagResource, or UntagResource. // -// * Data streams in Amazon Kinesis Data Streams +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. // -// * Data delivery streams in Amazon Kinesis Data Firehose +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/EnableRule +func (c *CloudWatchEvents) EnableRule(input *EnableRuleInput) (*EnableRuleOutput, error) { + req, out := c.EnableRuleRequest(input) + return out, req.Send() +} + +// EnableRuleWithContext is the same as EnableRule with the addition of +// the ability to pass a context and additional request options. // -// * Amazon ECS tasks -// -// * AWS Step Functions state machines -// -// * AWS Batch jobs -// -// * AWS CodeBuild projects -// -// * Pipelines in AWS CodePipeline -// -// * Amazon Inspector assessment templates -// -// * Amazon SNS topics -// -// * Amazon SQS queues, including FIFO queues -// -// * The default event bus of another AWS account -// -// Creating rules with built-in targets is supported only in the AWS Management -// Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances -// API call, EC2 StopInstances API call, and EC2 TerminateInstances API call. -// -// For some target types, PutTargets provides target-specific parameters. If -// the target is a Kinesis data stream, you can optionally specify which shard -// the event goes to by using the KinesisParameters argument. To invoke a command -// on multiple EC2 instances with one rule, you can use the RunCommandParameters -// field. -// -// To be able to make API calls against the resources that you own, Amazon CloudWatch -// Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, -// CloudWatch Events relies on resource-based policies. For EC2 instances, Kinesis -// data streams, and AWS Step Functions state machines, CloudWatch Events relies -// on IAM roles that you specify in the RoleARN argument in PutTargets. For -// more information, see Authentication and Access Control (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/auth-and-access-control-cwe.html) -// in the Amazon CloudWatch Events User Guide. -// -// If another AWS account is in the same region and has granted you permission -// (using PutPermission), you can send events to that account. Set that account's -// event bus as a target of the rules in your account. To send the matched events -// to the other account, specify that account's event bus as the Arn value when -// you run PutTargets. If your account sends events to another account, your -// account is charged for each sent event. Each event sent to another account -// is charged as a custom event. The account receiving the event is not charged. -// For more information, see Amazon CloudWatch Pricing (https://aws.amazon.com/cloudwatch/pricing/). -// -// If you are setting the event bus of another account as the target, and that -// account granted permission to your account through an organization instead -// of directly by the account ID, then you must specify a RoleArn with proper -// permissions in the Target structure. For more information, see Sending and -// Receiving Events Between AWS Accounts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEvents-CrossAccountEventDelivery.html) -// in the Amazon CloudWatch Events User Guide. -// -// For more information about enabling cross-account events, see PutPermission. -// -// Input, InputPath, and InputTransformer are mutually exclusive and optional -// parameters of a target. When a rule is triggered due to a matched event: -// -// * If none of the following arguments are specified for a target, then -// the entire event is passed to the target in JSON format (unless the target -// is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from -// the event is passed to the target). -// -// * If Input is specified in the form of valid JSON, then the matched event -// is overridden with this constant. -// -// * If InputPath is specified in the form of JSONPath (for example, $.detail), -// then only the part of the event specified in the path is passed to the -// target (for example, only the detail part of the event is passed). -// -// * If InputTransformer is specified, then one or more specified JSONPaths -// are extracted from the event and used as values in a template that you -// specify as the input to the target. -// -// When you specify InputPath or InputTransformer, you must use JSON dot notation, -// not bracket notation. -// -// When you add targets to a rule and the associated rule triggers soon after, -// new or updated targets might not be immediately invoked. Allow a short period -// of time for changes to take effect. -// -// This action can partially fail if too many requests are made at the same -// time. If that happens, FailedEntryCount is non-zero in the response and each -// entry in FailedEntries provides the ID of the failed target and the error -// code. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon CloudWatch Events's -// API operation PutTargets for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// You tried to create more rules or add more targets to a rule than is allowed. -// -// * ErrCodeManagedRuleException "ManagedRuleException" -// This rule was created by an AWS service on behalf of your account. It is -// managed by that service. If you see this error in response to DeleteRule -// or RemoveTargets, you can use the Force parameter in those calls to delete -// the rule or remove targets from the rule. You cannot modify these managed -// rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, -// or UntagResource. -// -// * ErrCodeInternalException "InternalException" -// This exception occurs due to unexpected causes. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutTargets -func (c *CloudWatchEvents) PutTargets(input *PutTargetsInput) (*PutTargetsOutput, error) { - req, out := c.PutTargetsRequest(input) - return out, req.Send() -} - -// PutTargetsWithContext is the same as PutTargets with the addition of -// the ability to pass a context and additional request options. -// -// See PutTargets for details on how to use this API operation. +// See EnableRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) PutTargetsWithContext(ctx aws.Context, input *PutTargetsInput, opts ...request.Option) (*PutTargetsOutput, error) { - req, out := c.PutTargetsRequest(input) +func (c *CloudWatchEvents) EnableRuleWithContext(ctx aws.Context, input *EnableRuleInput, opts ...request.Option) (*EnableRuleOutput, error) { + req, out := c.EnableRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRemovePermission = "RemovePermission" +const opListEventBuses = "ListEventBuses" -// RemovePermissionRequest generates a "aws/request.Request" representing the -// client's request for the RemovePermission operation. The "output" return +// ListEventBusesRequest generates a "aws/request.Request" representing the +// client's request for the ListEventBuses operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RemovePermission for more information on using the RemovePermission +// See ListEventBuses for more information on using the ListEventBuses // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RemovePermissionRequest method. -// req, resp := client.RemovePermissionRequest(params) +// // Example sending a request using the ListEventBusesRequest method. +// req, resp := client.ListEventBusesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/RemovePermission -func (c *CloudWatchEvents) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListEventBuses +func (c *CloudWatchEvents) ListEventBusesRequest(input *ListEventBusesInput) (req *request.Request, output *ListEventBusesOutput) { op := &request.Operation{ - Name: opRemovePermission, + Name: opListEventBuses, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RemovePermissionInput{} + input = &ListEventBusesInput{} } - output = &RemovePermissionOutput{} + output = &ListEventBusesOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// RemovePermission API operation for Amazon CloudWatch Events. +// ListEventBuses API operation for Amazon CloudWatch Events. // -// Revokes the permission of another AWS account to be able to put events to -// your default event bus. Specify the account to revoke by the StatementId -// value that you associated with the account when you granted it permission -// with PutPermission. You can find the StatementId by using DescribeEventBus. +// Lists all the event buses in your account, including the default event bus, +// custom event buses, and partner event buses. +// +// This operation is run by AWS customers, not by SaaS partners. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation RemovePermission for usage and error information. +// API operation ListEventBuses for usage and error information. // // Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. -// // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/RemovePermission -func (c *CloudWatchEvents) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { - req, out := c.RemovePermissionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListEventBuses +func (c *CloudWatchEvents) ListEventBuses(input *ListEventBusesInput) (*ListEventBusesOutput, error) { + req, out := c.ListEventBusesRequest(input) return out, req.Send() } -// RemovePermissionWithContext is the same as RemovePermission with the addition of +// ListEventBusesWithContext is the same as ListEventBuses with the addition of // the ability to pass a context and additional request options. // -// See RemovePermission for details on how to use this API operation. +// See ListEventBuses for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) RemovePermissionWithContext(ctx aws.Context, input *RemovePermissionInput, opts ...request.Option) (*RemovePermissionOutput, error) { - req, out := c.RemovePermissionRequest(input) +func (c *CloudWatchEvents) ListEventBusesWithContext(ctx aws.Context, input *ListEventBusesInput, opts ...request.Option) (*ListEventBusesOutput, error) { + req, out := c.ListEventBusesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRemoveTargets = "RemoveTargets" +const opListEventSources = "ListEventSources" -// RemoveTargetsRequest generates a "aws/request.Request" representing the -// client's request for the RemoveTargets operation. The "output" return +// ListEventSourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListEventSources operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RemoveTargets for more information on using the RemoveTargets +// See ListEventSources for more information on using the ListEventSources // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RemoveTargetsRequest method. -// req, resp := client.RemoveTargetsRequest(params) +// // Example sending a request using the ListEventSourcesRequest method. +// req, resp := client.ListEventSourcesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/RemoveTargets -func (c *CloudWatchEvents) RemoveTargetsRequest(input *RemoveTargetsInput) (req *request.Request, output *RemoveTargetsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListEventSources +func (c *CloudWatchEvents) ListEventSourcesRequest(input *ListEventSourcesInput) (req *request.Request, output *ListEventSourcesOutput) { op := &request.Operation{ - Name: opRemoveTargets, + Name: opListEventSources, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RemoveTargetsInput{} + input = &ListEventSourcesInput{} } - output = &RemoveTargetsOutput{} + output = &ListEventSourcesOutput{} req = c.newRequest(op, input, output) return } -// RemoveTargets API operation for Amazon CloudWatch Events. +// ListEventSources API operation for Amazon CloudWatch Events. // -// Removes the specified targets from the specified rule. When the rule is triggered, -// those targets are no longer be invoked. -// -// When you remove a target, when the associated rule triggers, removed targets -// might continue to be invoked. Allow a short period of time for changes to -// take effect. +// You can use this to see all the partner event sources that have been shared +// with your AWS account. For more information about partner event sources, +// see CreateEventBus. // -// This action can partially fail if too many requests are made at the same -// time. If that happens, FailedEntryCount is non-zero in the response and each -// entry in FailedEntries provides the ID of the failed target and the error -// code. +// This operation is run by AWS customers, not by SaaS partners. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation RemoveTargets for usage and error information. +// API operation ListEventSources for usage and error information. // // Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. -// -// * ErrCodeManagedRuleException "ManagedRuleException" -// This rule was created by an AWS service on behalf of your account. It is -// managed by that service. If you see this error in response to DeleteRule -// or RemoveTargets, you can use the Force parameter in those calls to delete -// the rule or remove targets from the rule. You cannot modify these managed -// rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, -// or UntagResource. -// // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/RemoveTargets -func (c *CloudWatchEvents) RemoveTargets(input *RemoveTargetsInput) (*RemoveTargetsOutput, error) { - req, out := c.RemoveTargetsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListEventSources +func (c *CloudWatchEvents) ListEventSources(input *ListEventSourcesInput) (*ListEventSourcesOutput, error) { + req, out := c.ListEventSourcesRequest(input) return out, req.Send() } -// RemoveTargetsWithContext is the same as RemoveTargets with the addition of +// ListEventSourcesWithContext is the same as ListEventSources with the addition of // the ability to pass a context and additional request options. // -// See RemoveTargets for details on how to use this API operation. +// See ListEventSources for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) RemoveTargetsWithContext(ctx aws.Context, input *RemoveTargetsInput, opts ...request.Option) (*RemoveTargetsOutput, error) { - req, out := c.RemoveTargetsRequest(input) +func (c *CloudWatchEvents) ListEventSourcesWithContext(ctx aws.Context, input *ListEventSourcesInput, opts ...request.Option) (*ListEventSourcesOutput, error) { + req, out := c.ListEventSourcesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opListPartnerEventSourceAccounts = "ListPartnerEventSourceAccounts" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// ListPartnerEventSourceAccountsRequest generates a "aws/request.Request" representing the +// client's request for the ListPartnerEventSourceAccounts operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See ListPartnerEventSourceAccounts for more information on using the ListPartnerEventSourceAccounts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the ListPartnerEventSourceAccountsRequest method. +// req, resp := client.ListPartnerEventSourceAccountsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/TagResource -func (c *CloudWatchEvents) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListPartnerEventSourceAccounts +func (c *CloudWatchEvents) ListPartnerEventSourceAccountsRequest(input *ListPartnerEventSourceAccountsInput) (req *request.Request, output *ListPartnerEventSourceAccountsOutput) { op := &request.Operation{ - Name: opTagResource, + Name: opListPartnerEventSourceAccounts, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TagResourceInput{} + input = &ListPartnerEventSourceAccountsInput{} } - output = &TagResourceOutput{} + output = &ListPartnerEventSourceAccountsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for Amazon CloudWatch Events. -// -// Assigns one or more tags (key-value pairs) to the specified CloudWatch Events -// resource. Tags can help you organize and categorize your resources. You can -// also use them to scope user permissions by granting a user permission to -// access or change only resources with certain tag values. In CloudWatch Events, -// rules can be tagged. -// -// Tags don't have any semantic meaning to AWS and are interpreted strictly -// as strings of characters. +// ListPartnerEventSourceAccounts API operation for Amazon CloudWatch Events. // -// You can use the TagResource action with a rule that already has tags. If -// you specify a new tag key for the rule, this tag is appended to the list -// of tags associated with the rule. If you specify a tag key that is already -// associated with the rule, the new tag value that you specify replaces the -// previous value for that tag. +// An SaaS partner can use this operation to display the AWS account ID that +// a particular partner event source name is associated with. // -// You can associate as many as 50 tags with a resource. +// This operation is used by SaaS partners, not by AWS customers. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation TagResource for usage and error information. +// API operation ListPartnerEventSourceAccounts for usage and error information. // // Returned Error Codes: // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. -// -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. +// An entity that you specified doesn't exist. // // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// * ErrCodeManagedRuleException "ManagedRuleException" -// This rule was created by an AWS service on behalf of your account. It is -// managed by that service. If you see this error in response to DeleteRule -// or RemoveTargets, you can use the Force parameter in those calls to delete -// the rule or remove targets from the rule. You cannot modify these managed -// rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, -// or UntagResource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/TagResource -func (c *CloudWatchEvents) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListPartnerEventSourceAccounts +func (c *CloudWatchEvents) ListPartnerEventSourceAccounts(input *ListPartnerEventSourceAccountsInput) (*ListPartnerEventSourceAccountsOutput, error) { + req, out := c.ListPartnerEventSourceAccountsRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// ListPartnerEventSourceAccountsWithContext is the same as ListPartnerEventSourceAccounts with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See ListPartnerEventSourceAccounts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *CloudWatchEvents) ListPartnerEventSourceAccountsWithContext(ctx aws.Context, input *ListPartnerEventSourceAccountsInput, opts ...request.Option) (*ListPartnerEventSourceAccountsOutput, error) { + req, out := c.ListPartnerEventSourceAccountsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTestEventPattern = "TestEventPattern" +const opListPartnerEventSources = "ListPartnerEventSources" -// TestEventPatternRequest generates a "aws/request.Request" representing the -// client's request for the TestEventPattern operation. The "output" return +// ListPartnerEventSourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListPartnerEventSources operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TestEventPattern for more information on using the TestEventPattern +// See ListPartnerEventSources for more information on using the ListPartnerEventSources // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TestEventPatternRequest method. -// req, resp := client.TestEventPatternRequest(params) +// // Example sending a request using the ListPartnerEventSourcesRequest method. +// req, resp := client.ListPartnerEventSourcesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/TestEventPattern -func (c *CloudWatchEvents) TestEventPatternRequest(input *TestEventPatternInput) (req *request.Request, output *TestEventPatternOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListPartnerEventSources +func (c *CloudWatchEvents) ListPartnerEventSourcesRequest(input *ListPartnerEventSourcesInput) (req *request.Request, output *ListPartnerEventSourcesOutput) { op := &request.Operation{ - Name: opTestEventPattern, + Name: opListPartnerEventSources, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TestEventPatternInput{} + input = &ListPartnerEventSourcesInput{} } - output = &TestEventPatternOutput{} + output = &ListPartnerEventSourcesOutput{} req = c.newRequest(op, input, output) return } -// TestEventPattern API operation for Amazon CloudWatch Events. +// ListPartnerEventSources API operation for Amazon CloudWatch Events. // -// Tests whether the specified event pattern matches the provided event. +// An SaaS partner can use this operation to list all the partner event source +// names that they have created. // -// Most services in AWS treat : or / as the same character in Amazon Resource -// Names (ARNs). However, CloudWatch Events uses an exact match in event patterns -// and rules. Be sure to use the correct ARN characters when creating event -// patterns so that they match the ARN syntax in the event you want to match. +// This operation is not used by AWS customers. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation TestEventPattern for usage and error information. +// API operation ListPartnerEventSources for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidEventPatternException "InvalidEventPatternException" -// The event pattern is not valid. -// // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/TestEventPattern -func (c *CloudWatchEvents) TestEventPattern(input *TestEventPatternInput) (*TestEventPatternOutput, error) { - req, out := c.TestEventPatternRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListPartnerEventSources +func (c *CloudWatchEvents) ListPartnerEventSources(input *ListPartnerEventSourcesInput) (*ListPartnerEventSourcesOutput, error) { + req, out := c.ListPartnerEventSourcesRequest(input) return out, req.Send() } -// TestEventPatternWithContext is the same as TestEventPattern with the addition of +// ListPartnerEventSourcesWithContext is the same as ListPartnerEventSources with the addition of // the ability to pass a context and additional request options. // -// See TestEventPattern for details on how to use this API operation. +// See ListPartnerEventSources for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) TestEventPatternWithContext(ctx aws.Context, input *TestEventPatternInput, opts ...request.Option) (*TestEventPatternOutput, error) { - req, out := c.TestEventPatternRequest(input) +func (c *CloudWatchEvents) ListPartnerEventSourcesWithContext(ctx aws.Context, input *ListPartnerEventSourcesInput, opts ...request.Option) (*ListPartnerEventSourcesOutput, error) { + req, out := c.ListPartnerEventSourcesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +const opListRuleNamesByTarget = "ListRuleNamesByTarget" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// ListRuleNamesByTargetRequest generates a "aws/request.Request" representing the +// client's request for the ListRuleNamesByTarget operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See ListRuleNamesByTarget for more information on using the ListRuleNamesByTarget // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the ListRuleNamesByTargetRequest method. +// req, resp := client.ListRuleNamesByTargetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/UntagResource -func (c *CloudWatchEvents) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListRuleNamesByTarget +func (c *CloudWatchEvents) ListRuleNamesByTargetRequest(input *ListRuleNamesByTargetInput) (req *request.Request, output *ListRuleNamesByTargetOutput) { op := &request.Operation{ - Name: opUntagResource, + Name: opListRuleNamesByTarget, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UntagResourceInput{} + input = &ListRuleNamesByTargetInput{} } - output = &UntagResourceOutput{} + output = &ListRuleNamesByTargetOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for Amazon CloudWatch Events. +// ListRuleNamesByTarget API operation for Amazon CloudWatch Events. // -// Removes one or more tags from the specified CloudWatch Events resource. In -// CloudWatch Events, rules can be tagged. +// Lists the rules for the specified target. You can see which rules can invoke +// a specific target in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudWatch Events's -// API operation UntagResource for usage and error information. +// API operation ListRuleNamesByTarget for usage and error information. // // Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// An entity that you specified does not exist. -// // * ErrCodeInternalException "InternalException" // This exception occurs due to unexpected causes. // -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// There is concurrent modification on a rule or target. -// -// * ErrCodeManagedRuleException "ManagedRuleException" -// This rule was created by an AWS service on behalf of your account. It is -// managed by that service. If you see this error in response to DeleteRule -// or RemoveTargets, you can use the Force parameter in those calls to delete -// the rule or remove targets from the rule. You cannot modify these managed -// rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, -// or UntagResource. +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/UntagResource -func (c *CloudWatchEvents) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListRuleNamesByTarget +func (c *CloudWatchEvents) ListRuleNamesByTarget(input *ListRuleNamesByTargetInput) (*ListRuleNamesByTargetOutput, error) { + req, out := c.ListRuleNamesByTargetRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// ListRuleNamesByTargetWithContext is the same as ListRuleNamesByTarget with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See ListRuleNamesByTarget for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudWatchEvents) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *CloudWatchEvents) ListRuleNamesByTargetWithContext(ctx aws.Context, input *ListRuleNamesByTargetInput, opts ...request.Option) (*ListRuleNamesByTargetOutput, error) { + req, out := c.ListRuleNamesByTargetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListRules = "ListRules" + +// ListRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRules operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRules for more information on using the ListRules +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListRulesRequest method. +// req, resp := client.ListRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListRules +func (c *CloudWatchEvents) ListRulesRequest(input *ListRulesInput) (req *request.Request, output *ListRulesOutput) { + op := &request.Operation{ + Name: opListRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRulesInput{} + } + + output = &ListRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRules API operation for Amazon CloudWatch Events. +// +// Lists your EventBridge rules. You can either list all the rules or provide +// a prefix to match to the rule names. +// +// ListRules doesn't list the targets of a rule. To see the targets associated +// with a rule, use ListTargetsByRule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation ListRules for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListRules +func (c *CloudWatchEvents) ListRules(input *ListRulesInput) (*ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + return out, req.Send() +} + +// ListRulesWithContext is the same as ListRules with the addition of +// the ability to pass a context and additional request options. +// +// See ListRules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) ListRulesWithContext(ctx aws.Context, input *ListRulesInput, opts ...request.Option) (*ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListTagsForResource +func (c *CloudWatchEvents) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon CloudWatch Events. +// +// Displays the tags associated with an EventBridge resource. In EventBridge, +// rules can be tagged. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListTagsForResource +func (c *CloudWatchEvents) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTargetsByRule = "ListTargetsByRule" + +// ListTargetsByRuleRequest generates a "aws/request.Request" representing the +// client's request for the ListTargetsByRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTargetsByRule for more information on using the ListTargetsByRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTargetsByRuleRequest method. +// req, resp := client.ListTargetsByRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListTargetsByRule +func (c *CloudWatchEvents) ListTargetsByRuleRequest(input *ListTargetsByRuleInput) (req *request.Request, output *ListTargetsByRuleOutput) { + op := &request.Operation{ + Name: opListTargetsByRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTargetsByRuleInput{} + } + + output = &ListTargetsByRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTargetsByRule API operation for Amazon CloudWatch Events. +// +// Lists the targets assigned to the specified rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation ListTargetsByRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListTargetsByRule +func (c *CloudWatchEvents) ListTargetsByRule(input *ListTargetsByRuleInput) (*ListTargetsByRuleOutput, error) { + req, out := c.ListTargetsByRuleRequest(input) + return out, req.Send() +} + +// ListTargetsByRuleWithContext is the same as ListTargetsByRule with the addition of +// the ability to pass a context and additional request options. +// +// See ListTargetsByRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) ListTargetsByRuleWithContext(ctx aws.Context, input *ListTargetsByRuleInput, opts ...request.Option) (*ListTargetsByRuleOutput, error) { + req, out := c.ListTargetsByRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// This structure specifies the VPC subnets and security groups for the task, -// and whether a public IP address is to be used. This structure is relevant -// only for ECS tasks that use the awsvpc network mode. -type AwsVpcConfiguration struct { +const opPutEvents = "PutEvents" + +// PutEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutEvents for more information on using the PutEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutEventsRequest method. +// req, resp := client.PutEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutEvents +func (c *CloudWatchEvents) PutEventsRequest(input *PutEventsInput) (req *request.Request, output *PutEventsOutput) { + op := &request.Operation{ + Name: opPutEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutEventsInput{} + } + + output = &PutEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutEvents API operation for Amazon CloudWatch Events. +// +// Sends custom events to EventBridge so that they can be matched to rules. +// These events can be from your custom applications and services. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation PutEvents for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutEvents +func (c *CloudWatchEvents) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { + req, out := c.PutEventsRequest(input) + return out, req.Send() +} + +// PutEventsWithContext is the same as PutEvents with the addition of +// the ability to pass a context and additional request options. +// +// See PutEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) PutEventsWithContext(ctx aws.Context, input *PutEventsInput, opts ...request.Option) (*PutEventsOutput, error) { + req, out := c.PutEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPartnerEvents = "PutPartnerEvents" + +// PutPartnerEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutPartnerEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPartnerEvents for more information on using the PutPartnerEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPartnerEventsRequest method. +// req, resp := client.PutPartnerEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutPartnerEvents +func (c *CloudWatchEvents) PutPartnerEventsRequest(input *PutPartnerEventsInput) (req *request.Request, output *PutPartnerEventsOutput) { + op := &request.Operation{ + Name: opPutPartnerEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutPartnerEventsInput{} + } + + output = &PutPartnerEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutPartnerEvents API operation for Amazon CloudWatch Events. +// +// This is used by SaaS partners to write events to a customer's partner event +// bus. +// +// AWS customers do not use this operation. Instead, AWS customers can use PutEvents +// to write custom events from their own applications to an event bus. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation PutPartnerEvents for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutPartnerEvents +func (c *CloudWatchEvents) PutPartnerEvents(input *PutPartnerEventsInput) (*PutPartnerEventsOutput, error) { + req, out := c.PutPartnerEventsRequest(input) + return out, req.Send() +} + +// PutPartnerEventsWithContext is the same as PutPartnerEvents with the addition of +// the ability to pass a context and additional request options. +// +// See PutPartnerEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) PutPartnerEventsWithContext(ctx aws.Context, input *PutPartnerEventsInput, opts ...request.Option) (*PutPartnerEventsOutput, error) { + req, out := c.PutPartnerEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPermission = "PutPermission" + +// PutPermissionRequest generates a "aws/request.Request" representing the +// client's request for the PutPermission operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPermission for more information on using the PutPermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPermissionRequest method. +// req, resp := client.PutPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutPermission +func (c *CloudWatchEvents) PutPermissionRequest(input *PutPermissionInput) (req *request.Request, output *PutPermissionOutput) { + op := &request.Operation{ + Name: opPutPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutPermissionInput{} + } + + output = &PutPermissionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutPermission API operation for Amazon CloudWatch Events. +// +// Running PutPermission permits the specified AWS account or AWS organization +// to put events to the specified event bus. Rules in your account are triggered +// by these events arriving to an event bus in your account. +// +// For another account to send events to your account, that external account +// must have a rule with your account's event bus as a target. +// +// To enable multiple AWS accounts to put events to an event bus, run PutPermission +// once for each of these accounts. Or, if all the accounts are members of the +// same AWS organization, you can run PutPermission once specifying Principal +// as "*" and specifying the AWS organization ID in Condition, to grant permissions +// to all accounts in that organization. +// +// If you grant permissions using an organization, then accounts in that organization +// must specify a RoleArn with proper permissions when they use PutTarget to +// add your account's event bus as a target. For more information, see Sending +// and Receiving Events Between AWS Accounts (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-cross-account-event-delivery.html) +// in the Amazon EventBridge User Guide. +// +// The permission policy on an event bus can't exceed 10 KB in size. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation PutPermission for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// * ErrCodePolicyLengthExceededException "PolicyLengthExceededException" +// The event bus policy is too long. For more information, see the limits. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutPermission +func (c *CloudWatchEvents) PutPermission(input *PutPermissionInput) (*PutPermissionOutput, error) { + req, out := c.PutPermissionRequest(input) + return out, req.Send() +} + +// PutPermissionWithContext is the same as PutPermission with the addition of +// the ability to pass a context and additional request options. +// +// See PutPermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) PutPermissionWithContext(ctx aws.Context, input *PutPermissionInput, opts ...request.Option) (*PutPermissionOutput, error) { + req, out := c.PutPermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutRule = "PutRule" + +// PutRuleRequest generates a "aws/request.Request" representing the +// client's request for the PutRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutRule for more information on using the PutRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutRuleRequest method. +// req, resp := client.PutRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutRule +func (c *CloudWatchEvents) PutRuleRequest(input *PutRuleInput) (req *request.Request, output *PutRuleOutput) { + op := &request.Operation{ + Name: opPutRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRuleInput{} + } + + output = &PutRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutRule API operation for Amazon CloudWatch Events. +// +// Creates or updates the specified rule. Rules are enabled by default or based +// on value of the state. You can disable a rule using DisableRule. +// +// A single rule watches for events from a single event bus. Events generated +// by AWS services go to your account's default event bus. Events generated +// by SaaS partner services or applications go to the matching partner event +// bus. If you have custom applications or services, you can specify whether +// their events go to your default event bus or a custom event bus that you +// have created. For more information, see CreateEventBus. +// +// If you're updating an existing rule, the rule is replaced with what you specify +// in this PutRule command. If you omit arguments in PutRule, the old values +// for those arguments aren't kept. Instead, they're replaced with null values. +// +// When you create or update a rule, incoming events might not immediately start +// matching to new or updated rules. Allow a short period of time for changes +// to take effect. +// +// A rule must contain at least an EventPattern or ScheduleExpression. Rules +// with EventPatterns are triggered when a matching event is observed. Rules +// with ScheduleExpressions self-trigger based on the given schedule. A rule +// can have both an EventPattern and a ScheduleExpression, in which case the +// rule triggers on matching events as well as on a schedule. +// +// When you initially create a rule, you can optionally assign one or more tags +// to the rule. Tags can help you organize and categorize your resources. You +// can also use them to scope user permissions, by granting a user permission +// to access or change only rules with certain tag values. To use the PutRule +// operation and assign tags, you must have both the events:PutRule and events:TagResource +// permissions. +// +// If you are updating an existing rule, any tags you specify in the PutRule +// operation are ignored. To update the tags of an existing rule, use TagResource +// and UntagResource. +// +// Most services in AWS treat : or / as the same character in Amazon Resource +// Names (ARNs). However, EventBridge uses an exact match in event patterns +// and rules. Be sure to use the correct ARN characters when creating event +// patterns so that they match the ARN syntax in the event that you want to +// match. +// +// In EventBridge, you could create rules that lead to infinite loops, where +// a rule is fired repeatedly. For example, a rule might detect that ACLs have +// changed on an S3 bucket, and trigger software to change them to the desired +// state. If you don't write the rule carefully, the subsequent change to the +// ACLs fires the rule again, creating an infinite loop. +// +// To prevent this, write the rules so that the triggered actions don't refire +// the same rule. For example, your rule could fire only if ACLs are found to +// be in a bad state, instead of after any change. +// +// An infinite loop can quickly cause higher than expected charges. We recommend +// that you use budgeting, which alerts you when charges exceed your specified +// limit. For more information, see Managing Your Costs with Budgets (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/budgets-managing-costs.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation PutRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidEventPatternException "InvalidEventPatternException" +// The event pattern isn't valid. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// You tried to create more resources than is allowed. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// * ErrCodeManagedRuleException "ManagedRuleException" +// An AWS service created this rule on behalf of your account. That service +// manages it. If you see this error in response to DeleteRule or RemoveTargets, +// you can use the Force parameter in those calls to delete the rule or remove +// targets from the rule. You can't modify these managed rules by using DisableRule, +// EnableRule, PutTargets, PutRule, TagResource, or UntagResource. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutRule +func (c *CloudWatchEvents) PutRule(input *PutRuleInput) (*PutRuleOutput, error) { + req, out := c.PutRuleRequest(input) + return out, req.Send() +} + +// PutRuleWithContext is the same as PutRule with the addition of +// the ability to pass a context and additional request options. +// +// See PutRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) PutRuleWithContext(ctx aws.Context, input *PutRuleInput, opts ...request.Option) (*PutRuleOutput, error) { + req, out := c.PutRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutTargets = "PutTargets" + +// PutTargetsRequest generates a "aws/request.Request" representing the +// client's request for the PutTargets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutTargets for more information on using the PutTargets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutTargetsRequest method. +// req, resp := client.PutTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutTargets +func (c *CloudWatchEvents) PutTargetsRequest(input *PutTargetsInput) (req *request.Request, output *PutTargetsOutput) { + op := &request.Operation{ + Name: opPutTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutTargetsInput{} + } + + output = &PutTargetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutTargets API operation for Amazon CloudWatch Events. +// +// Adds the specified targets to the specified rule, or updates the targets +// if they're already associated with the rule. +// +// Targets are the resources that are invoked when a rule is triggered. +// +// You can configure the following as targets in EventBridge: +// +// * EC2 instances +// +// * SSM Run Command +// +// * SSM Automation +// +// * AWS Lambda functions +// +// * Data streams in Amazon Kinesis Data Streams +// +// * Data delivery streams in Amazon Kinesis Data Firehose +// +// * Amazon ECS tasks +// +// * AWS Step Functions state machines +// +// * AWS Batch jobs +// +// * AWS CodeBuild projects +// +// * Pipelines in AWS CodePipeline +// +// * Amazon Inspector assessment templates +// +// * Amazon SNS topics +// +// * Amazon SQS queues, including FIFO queues +// +// * The default event bus of another AWS account +// +// Creating rules with built-in targets is supported only on the AWS Management +// Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances +// API call, EC2 StopInstances API call, and EC2 TerminateInstances API call. +// +// For some target types, PutTargets provides target-specific parameters. If +// the target is a Kinesis data stream, you can optionally specify which shard +// the event goes to by using the KinesisParameters argument. To invoke a command +// on multiple EC2 instances with one rule, you can use the RunCommandParameters +// field. +// +// To be able to make API calls against the resources that you own, Amazon EventBridge +// needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, +// EventBridge relies on resource-based policies. For EC2 instances, Kinesis +// data streams, and AWS Step Functions state machines, EventBridge relies on +// IAM roles that you specify in the RoleARN argument in PutTargets. For more +// information, see Authentication and Access Control (https://docs.aws.amazon.com/eventbridge/latest/userguide/auth-and-access-control-eventbridge.html) +// in the Amazon EventBridge User Guide. +// +// If another AWS account is in the same Region and has granted you permission +// (using PutPermission), you can send events to that account. Set that account's +// event bus as a target of the rules in your account. To send the matched events +// to the other account, specify that account's event bus as the Arn value when +// you run PutTargets. If your account sends events to another account, your +// account is charged for each sent event. Each event sent to another account +// is charged as a custom event. The account receiving the event isn't charged. +// For more information, see Amazon EventBridge Pricing (https://aws.amazon.com/eventbridge/pricing/). +// +// If you're setting an event bus in another account as the target and that +// account granted permission to your account through an organization instead +// of directly by the account ID, you must specify a RoleArn with proper permissions +// in the Target structure. For more information, see Sending and Receiving +// Events Between AWS Accounts (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-cross-account-event-delivery.html) +// in the Amazon EventBridge User Guide. +// +// For more information about enabling cross-account events, see PutPermission. +// +// Input, InputPath, and InputTransformer are mutually exclusive and optional +// parameters of a target. When a rule is triggered due to a matched event: +// +// * If none of the following arguments are specified for a target, the entire +// event is passed to the target in JSON format (unless the target is Amazon +// EC2 Run Command or Amazon ECS task, in which case nothing from the event +// is passed to the target). +// +// * If Input is specified in the form of valid JSON, then the matched event +// is overridden with this constant. +// +// * If InputPath is specified in the form of JSONPath (for example, $.detail), +// only the part of the event specified in the path is passed to the target +// (for example, only the detail part of the event is passed). +// +// * If InputTransformer is specified, one or more specified JSONPaths are +// extracted from the event and used as values in a template that you specify +// as the input to the target. +// +// When you specify InputPath or InputTransformer, you must use JSON dot notation, +// not bracket notation. +// +// When you add targets to a rule and the associated rule triggers soon after, +// new or updated targets might not be immediately invoked. Allow a short period +// of time for changes to take effect. +// +// This action can partially fail if too many requests are made at the same +// time. If that happens, FailedEntryCount is nonzero in the response, and each +// entry in FailedEntries provides the ID of the failed target and the error +// code. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation PutTargets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// You tried to create more resources than is allowed. +// +// * ErrCodeManagedRuleException "ManagedRuleException" +// An AWS service created this rule on behalf of your account. That service +// manages it. If you see this error in response to DeleteRule or RemoveTargets, +// you can use the Force parameter in those calls to delete the rule or remove +// targets from the rule. You can't modify these managed rules by using DisableRule, +// EnableRule, PutTargets, PutRule, TagResource, or UntagResource. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutTargets +func (c *CloudWatchEvents) PutTargets(input *PutTargetsInput) (*PutTargetsOutput, error) { + req, out := c.PutTargetsRequest(input) + return out, req.Send() +} + +// PutTargetsWithContext is the same as PutTargets with the addition of +// the ability to pass a context and additional request options. +// +// See PutTargets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) PutTargetsWithContext(ctx aws.Context, input *PutTargetsInput, opts ...request.Option) (*PutTargetsOutput, error) { + req, out := c.PutTargetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a "aws/request.Request" representing the +// client's request for the RemovePermission operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemovePermission for more information on using the RemovePermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemovePermissionRequest method. +// req, resp := client.RemovePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/RemovePermission +func (c *CloudWatchEvents) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + output = &RemovePermissionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RemovePermission API operation for Amazon CloudWatch Events. +// +// Revokes the permission of another AWS account to be able to put events to +// the specified event bus. Specify the account to revoke by the StatementId +// value that you associated with the account when you granted it permission +// with PutPermission. You can find the StatementId by using DescribeEventBus. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation RemovePermission for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/RemovePermission +func (c *CloudWatchEvents) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + return out, req.Send() +} + +// RemovePermissionWithContext is the same as RemovePermission with the addition of +// the ability to pass a context and additional request options. +// +// See RemovePermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) RemovePermissionWithContext(ctx aws.Context, input *RemovePermissionInput, opts ...request.Option) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveTargets = "RemoveTargets" + +// RemoveTargetsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTargets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveTargets for more information on using the RemoveTargets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveTargetsRequest method. +// req, resp := client.RemoveTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/RemoveTargets +func (c *CloudWatchEvents) RemoveTargetsRequest(input *RemoveTargetsInput) (req *request.Request, output *RemoveTargetsOutput) { + op := &request.Operation{ + Name: opRemoveTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTargetsInput{} + } + + output = &RemoveTargetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// RemoveTargets API operation for Amazon CloudWatch Events. +// +// Removes the specified targets from the specified rule. When the rule is triggered, +// those targets are no longer be invoked. +// +// When you remove a target, when the associated rule triggers, removed targets +// might continue to be invoked. Allow a short period of time for changes to +// take effect. +// +// This action can partially fail if too many requests are made at the same +// time. If that happens, FailedEntryCount is non-zero in the response and each +// entry in FailedEntries provides the ID of the failed target and the error +// code. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation RemoveTargets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// * ErrCodeManagedRuleException "ManagedRuleException" +// An AWS service created this rule on behalf of your account. That service +// manages it. If you see this error in response to DeleteRule or RemoveTargets, +// you can use the Force parameter in those calls to delete the rule or remove +// targets from the rule. You can't modify these managed rules by using DisableRule, +// EnableRule, PutTargets, PutRule, TagResource, or UntagResource. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/RemoveTargets +func (c *CloudWatchEvents) RemoveTargets(input *RemoveTargetsInput) (*RemoveTargetsOutput, error) { + req, out := c.RemoveTargetsRequest(input) + return out, req.Send() +} + +// RemoveTargetsWithContext is the same as RemoveTargets with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveTargets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) RemoveTargetsWithContext(ctx aws.Context, input *RemoveTargetsInput, opts ...request.Option) (*RemoveTargetsOutput, error) { + req, out := c.RemoveTargetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/TagResource +func (c *CloudWatchEvents) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon CloudWatch Events. +// +// Assigns one or more tags (key-value pairs) to the specified EventBridge resource. +// Tags can help you organize and categorize your resources. You can also use +// them to scope user permissions by granting a user permission to access or +// change only resources with certain tag values. In EventBridge, rules can +// be tagged. +// +// Tags don't have any semantic meaning to AWS and are interpreted strictly +// as strings of characters. +// +// You can use the TagResource action with a rule that already has tags. If +// you specify a new tag key for the rule, this tag is appended to the list +// of tags associated with the rule. If you specify a tag key that is already +// associated with the rule, the new tag value that you specify replaces the +// previous value for that tag. +// +// You can associate as many as 50 tags with a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// * ErrCodeManagedRuleException "ManagedRuleException" +// An AWS service created this rule on behalf of your account. That service +// manages it. If you see this error in response to DeleteRule or RemoveTargets, +// you can use the Force parameter in those calls to delete the rule or remove +// targets from the rule. You can't modify these managed rules by using DisableRule, +// EnableRule, PutTargets, PutRule, TagResource, or UntagResource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/TagResource +func (c *CloudWatchEvents) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTestEventPattern = "TestEventPattern" + +// TestEventPatternRequest generates a "aws/request.Request" representing the +// client's request for the TestEventPattern operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TestEventPattern for more information on using the TestEventPattern +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TestEventPatternRequest method. +// req, resp := client.TestEventPatternRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/TestEventPattern +func (c *CloudWatchEvents) TestEventPatternRequest(input *TestEventPatternInput) (req *request.Request, output *TestEventPatternOutput) { + op := &request.Operation{ + Name: opTestEventPattern, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestEventPatternInput{} + } + + output = &TestEventPatternOutput{} + req = c.newRequest(op, input, output) + return +} + +// TestEventPattern API operation for Amazon CloudWatch Events. +// +// Tests whether the specified event pattern matches the provided event. +// +// Most services in AWS treat : or / as the same character in Amazon Resource +// Names (ARNs). However, EventBridge uses an exact match in event patterns +// and rules. Be sure to use the correct ARN characters when creating event +// patterns so that they match the ARN syntax in the event that you want to +// match. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation TestEventPattern for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidEventPatternException "InvalidEventPatternException" +// The event pattern isn't valid. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/TestEventPattern +func (c *CloudWatchEvents) TestEventPattern(input *TestEventPatternInput) (*TestEventPatternOutput, error) { + req, out := c.TestEventPatternRequest(input) + return out, req.Send() +} + +// TestEventPatternWithContext is the same as TestEventPattern with the addition of +// the ability to pass a context and additional request options. +// +// See TestEventPattern for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) TestEventPatternWithContext(ctx aws.Context, input *TestEventPatternInput, opts ...request.Option) (*TestEventPatternOutput, error) { + req, out := c.TestEventPatternRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/UntagResource +func (c *CloudWatchEvents) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon CloudWatch Events. +// +// Removes one or more tags from the specified EventBridge resource. In EventBridge, +// rules can be tagged. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Events's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// An entity that you specified doesn't exist. +// +// * ErrCodeInternalException "InternalException" +// This exception occurs due to unexpected causes. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// There is concurrent modification on a resource. +// +// * ErrCodeManagedRuleException "ManagedRuleException" +// An AWS service created this rule on behalf of your account. That service +// manages it. If you see this error in response to DeleteRule or RemoveTargets, +// you can use the Force parameter in those calls to delete the rule or remove +// targets from the rule. You can't modify these managed rules by using DisableRule, +// EnableRule, PutTargets, PutRule, TagResource, or UntagResource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/UntagResource +func (c *CloudWatchEvents) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchEvents) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type ActivateEventSourceInput struct { + _ struct{} `type:"structure"` + + // The name of the partner event source to activate. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ActivateEventSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivateEventSourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActivateEventSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActivateEventSourceInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *ActivateEventSourceInput) SetName(v string) *ActivateEventSourceInput { + s.Name = &v + return s +} + +type ActivateEventSourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ActivateEventSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivateEventSourceOutput) GoString() string { + return s.String() +} + +// This structure specifies the VPC subnets and security groups for the task +// and whether a public IP address is to be used. This structure is relevant +// only for ECS tasks that use the awsvpc network mode. +type AwsVpcConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether the task's elastic network interface receives a public + // IP address. You can specify ENABLED only when LaunchType in EcsParameters + // is set to FARGATE. + AssignPublicIp *string `type:"string" enum:"AssignPublicIp"` + + // Specifies the security groups associated with the task. These security groups + // must all be in the same VPC. You can specify as many as five security groups. + // If you don't specify a security group, the default security group for the + // VPC is used. + SecurityGroups []*string `type:"list"` + + // Specifies the subnets associated with the task. These subnets must all be + // in the same VPC. You can specify as many as 16 subnets. + // + // Subnets is a required field + Subnets []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AwsVpcConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsVpcConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsVpcConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsVpcConfiguration"} + if s.Subnets == nil { + invalidParams.Add(request.NewErrParamRequired("Subnets")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssignPublicIp sets the AssignPublicIp field's value. +func (s *AwsVpcConfiguration) SetAssignPublicIp(v string) *AwsVpcConfiguration { + s.AssignPublicIp = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *AwsVpcConfiguration) SetSecurityGroups(v []*string) *AwsVpcConfiguration { + s.SecurityGroups = v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *AwsVpcConfiguration) SetSubnets(v []*string) *AwsVpcConfiguration { + s.Subnets = v + return s +} + +// The array properties for the submitted job, such as the size of the array. +// The array size can be between 2 and 10,000. If you specify array properties +// for a job, it becomes an array job. This parameter is used only if the target +// is an AWS Batch job. +type BatchArrayProperties struct { + _ struct{} `type:"structure"` + + // The size of the array, if this is an array batch job. Valid values are integers + // between 2 and 10,000. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s BatchArrayProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchArrayProperties) GoString() string { + return s.String() +} + +// SetSize sets the Size field's value. +func (s *BatchArrayProperties) SetSize(v int64) *BatchArrayProperties { + s.Size = &v + return s +} + +// The custom parameters to be used when the target is an AWS Batch job. +type BatchParameters struct { + _ struct{} `type:"structure"` + + // The array properties for the submitted job, such as the size of the array. + // The array size can be between 2 and 10,000. If you specify array properties + // for a job, it becomes an array job. This parameter is used only if the target + // is an AWS Batch job. + ArrayProperties *BatchArrayProperties `type:"structure"` + + // The ARN or name of the job definition to use if the event target is an AWS + // Batch job. This job definition must already exist. + // + // JobDefinition is a required field + JobDefinition *string `type:"string" required:"true"` + + // The name to use for this execution of the job, if the target is an AWS Batch + // job. + // + // JobName is a required field + JobName *string `type:"string" required:"true"` + + // The retry strategy to use for failed jobs if the target is an AWS Batch job. + // The retry strategy is the number of times to retry the failed job execution. + // Valid values are 1–10. When you specify a retry strategy here, it overrides + // the retry strategy defined in the job definition. + RetryStrategy *BatchRetryStrategy `type:"structure"` +} + +// String returns the string representation +func (s BatchParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchParameters"} + if s.JobDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("JobDefinition")) + } + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArrayProperties sets the ArrayProperties field's value. +func (s *BatchParameters) SetArrayProperties(v *BatchArrayProperties) *BatchParameters { + s.ArrayProperties = v + return s +} + +// SetJobDefinition sets the JobDefinition field's value. +func (s *BatchParameters) SetJobDefinition(v string) *BatchParameters { + s.JobDefinition = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *BatchParameters) SetJobName(v string) *BatchParameters { + s.JobName = &v + return s +} + +// SetRetryStrategy sets the RetryStrategy field's value. +func (s *BatchParameters) SetRetryStrategy(v *BatchRetryStrategy) *BatchParameters { + s.RetryStrategy = v + return s +} + +// The retry strategy to use for failed jobs if the target is an AWS Batch job. +// If you specify a retry strategy here, it overrides the retry strategy defined +// in the job definition. +type BatchRetryStrategy struct { + _ struct{} `type:"structure"` + + // The number of times to attempt to retry, if the job fails. Valid values are + // 1–10. + Attempts *int64 `type:"integer"` +} + +// String returns the string representation +func (s BatchRetryStrategy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchRetryStrategy) GoString() string { + return s.String() +} + +// SetAttempts sets the Attempts field's value. +func (s *BatchRetryStrategy) SetAttempts(v int64) *BatchRetryStrategy { + s.Attempts = &v + return s +} + +// A JSON string that you can use to limit the event bus permissions that you're +// granting to only accounts that fulfill the condition. Currently, the only +// supported condition is membership in a certain AWS organization. The string +// must contain Type, Key, and Value fields. The Value field specifies the ID +// of the AWS organization. The following is an example value for Condition: +// +// '{"Type" : "StringEquals", "Key": "aws:PrincipalOrgID", "Value": "o-1234567890"}' +type Condition struct { + _ struct{} `type:"structure"` + + // The key for the condition. Currently, the only supported key is aws:PrincipalOrgID. + // + // Key is a required field + Key *string `type:"string" required:"true"` + + // The type of condition. Currently, the only supported value is StringEquals. + // + // Type is a required field + Type *string `type:"string" required:"true"` + + // The value for the key. Currently, this must be the ID of the organization. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Condition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Condition"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Condition) SetKey(v string) *Condition { + s.Key = &v + return s +} + +// SetType sets the Type field's value. +func (s *Condition) SetType(v string) *Condition { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Condition) SetValue(v string) *Condition { + s.Value = &v + return s +} + +type CreateEventBusInput struct { + _ struct{} `type:"structure"` + + // If you're creating a partner event bus, this specifies the partner event + // source that the new event bus will be matched with. + EventSourceName *string `min:"1" type:"string"` + + // The name of the new event bus. + // + // The names of custom event buses can't contain the / character. You can't + // use the name default for a custom event bus because this name is already + // used for your account's default event bus. + // + // If this is a partner event bus, the name must exactly match the name of the + // partner event source that this event bus is matched to. This name will include + // the / character. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateEventBusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventBusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEventBusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEventBusInput"} + if s.EventSourceName != nil && len(*s.EventSourceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventSourceName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventSourceName sets the EventSourceName field's value. +func (s *CreateEventBusInput) SetEventSourceName(v string) *CreateEventBusInput { + s.EventSourceName = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateEventBusInput) SetName(v string) *CreateEventBusInput { + s.Name = &v + return s +} + +type CreateEventBusOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the new event bus. + EventBusArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateEventBusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventBusOutput) GoString() string { + return s.String() +} + +// SetEventBusArn sets the EventBusArn field's value. +func (s *CreateEventBusOutput) SetEventBusArn(v string) *CreateEventBusOutput { + s.EventBusArn = &v + return s +} + +type CreatePartnerEventSourceInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID of the customer who is permitted to create a matching + // partner event bus for this partner event source. + // + // Account is a required field + Account *string `min:"12" type:"string" required:"true"` + + // The name of the partner event source. This name must be unique and must be + // in the format partner_name/event_namespace/event_name . The AWS account that + // wants to use this partner event source must create a partner event bus with + // a name that matches the name of the partner event source. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePartnerEventSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePartnerEventSourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePartnerEventSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePartnerEventSourceInput"} + if s.Account == nil { + invalidParams.Add(request.NewErrParamRequired("Account")) + } + if s.Account != nil && len(*s.Account) < 12 { + invalidParams.Add(request.NewErrParamMinLen("Account", 12)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccount sets the Account field's value. +func (s *CreatePartnerEventSourceInput) SetAccount(v string) *CreatePartnerEventSourceInput { + s.Account = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreatePartnerEventSourceInput) SetName(v string) *CreatePartnerEventSourceInput { + s.Name = &v + return s +} + +type CreatePartnerEventSourceOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the partner event source. + EventSourceArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePartnerEventSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePartnerEventSourceOutput) GoString() string { + return s.String() +} + +// SetEventSourceArn sets the EventSourceArn field's value. +func (s *CreatePartnerEventSourceOutput) SetEventSourceArn(v string) *CreatePartnerEventSourceOutput { + s.EventSourceArn = &v + return s +} + +type DeactivateEventSourceInput struct { + _ struct{} `type:"structure"` + + // The name of the partner event source to deactivate. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeactivateEventSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateEventSourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeactivateEventSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeactivateEventSourceInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeactivateEventSourceInput) SetName(v string) *DeactivateEventSourceInput { + s.Name = &v + return s +} + +type DeactivateEventSourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeactivateEventSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateEventSourceOutput) GoString() string { + return s.String() +} + +type DeleteEventBusInput struct { + _ struct{} `type:"structure"` + + // The name of the event bus to delete. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventBusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventBusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEventBusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEventBusInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteEventBusInput) SetName(v string) *DeleteEventBusInput { + s.Name = &v + return s +} + +type DeleteEventBusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEventBusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventBusOutput) GoString() string { + return s.String() +} + +type DeletePartnerEventSourceInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID of the AWS customer that the event source was created + // for. + // + // Account is a required field + Account *string `min:"12" type:"string" required:"true"` + + // The name of the event source to delete. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePartnerEventSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePartnerEventSourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePartnerEventSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePartnerEventSourceInput"} + if s.Account == nil { + invalidParams.Add(request.NewErrParamRequired("Account")) + } + if s.Account != nil && len(*s.Account) < 12 { + invalidParams.Add(request.NewErrParamMinLen("Account", 12)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccount sets the Account field's value. +func (s *DeletePartnerEventSourceInput) SetAccount(v string) *DeletePartnerEventSourceInput { + s.Account = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeletePartnerEventSourceInput) SetName(v string) *DeletePartnerEventSourceInput { + s.Name = &v + return s +} + +type DeletePartnerEventSourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePartnerEventSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePartnerEventSourceOutput) GoString() string { + return s.String() +} + +type DeleteRuleInput struct { + _ struct{} `type:"structure"` + + // The event bus associated with the rule. If you omit this, the default event + // bus is used. + EventBusName *string `min:"1" type:"string"` + + // If this is a managed rule, created by an AWS service on your behalf, you + // must specify Force as True to delete the rule. This parameter is ignored + // for rules that are not managed rules. You can check whether a rule is a managed + // rule by using DescribeRule or ListRules and checking the ManagedBy field + // of the response. + Force *bool `type:"boolean"` + + // The name of the rule. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRuleInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventBusName sets the EventBusName field's value. +func (s *DeleteRuleInput) SetEventBusName(v string) *DeleteRuleInput { + s.EventBusName = &v + return s +} + +// SetForce sets the Force field's value. +func (s *DeleteRuleInput) SetForce(v bool) *DeleteRuleInput { + s.Force = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeleteRuleInput) SetName(v string) *DeleteRuleInput { + s.Name = &v + return s +} + +type DeleteRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleOutput) GoString() string { + return s.String() +} + +type DescribeEventBusInput struct { + _ struct{} `type:"structure"` + + // The name of the event bus to show details for. If you omit this, the default + // event bus is displayed. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeEventBusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventBusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEventBusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEventBusInput"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DescribeEventBusInput) SetName(v string) *DescribeEventBusInput { + s.Name = &v + return s +} + +type DescribeEventBusOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the account permitted to write events to + // the current account. + Arn *string `type:"string"` + + // The name of the event bus. Currently, this is always default. + Name *string `type:"string"` + + // The policy that enables the external account to send events to your account. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventBusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventBusOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DescribeEventBusOutput) SetArn(v string) *DescribeEventBusOutput { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeEventBusOutput) SetName(v string) *DescribeEventBusOutput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *DescribeEventBusOutput) SetPolicy(v string) *DescribeEventBusOutput { + s.Policy = &v + return s +} + +type DescribeEventSourceInput struct { _ struct{} `type:"structure"` - // Specifies whether the task's elastic network interface receives a public - // IP address. You can specify ENABLED only when LaunchType in EcsParameters - // is set to FARGATE. - AssignPublicIp *string `type:"string" enum:"AssignPublicIp"` + // The name of the partner event source to display the details of. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} - // Specifies the security groups associated with the task. These security groups - // must all be in the same VPC. You can specify as many as five security groups. - // If you do not specify a security group, the default security group for the - // VPC is used. - SecurityGroups []*string `type:"list"` +// String returns the string representation +func (s DescribeEventSourceInput) String() string { + return awsutil.Prettify(s) +} - // Specifies the subnets associated with the task. These subnets must all be - // in the same VPC. You can specify as many as 16 subnets. +// GoString returns the string representation +func (s DescribeEventSourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEventSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEventSourceInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DescribeEventSourceInput) SetName(v string) *DescribeEventSourceInput { + s.Name = &v + return s +} + +type DescribeEventSourceOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the partner event source. + Arn *string `type:"string"` + + // The name of the SaaS partner that created the event source. + CreatedBy *string `type:"string"` + + // The date and time that the event source was created. + CreationTime *time.Time `type:"timestamp"` + + // The date and time that the event source will expire if you don't create a + // matching event bus. + ExpirationTime *time.Time `type:"timestamp"` + + // The name of the partner event source. + Name *string `type:"string"` + + // The state of the event source. If it's ACTIVE, you have already created a + // matching event bus for this event source, and that event bus is active. If + // it's PENDING, either you haven't yet created a matching event bus, or that + // event bus is deactivated. If it's DELETED, you have created a matching event + // bus, but the event source has since been deleted. + State *string `type:"string" enum:"EventSourceState"` +} + +// String returns the string representation +func (s DescribeEventSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSourceOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DescribeEventSourceOutput) SetArn(v string) *DescribeEventSourceOutput { + s.Arn = &v + return s +} + +// SetCreatedBy sets the CreatedBy field's value. +func (s *DescribeEventSourceOutput) SetCreatedBy(v string) *DescribeEventSourceOutput { + s.CreatedBy = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeEventSourceOutput) SetCreationTime(v time.Time) *DescribeEventSourceOutput { + s.CreationTime = &v + return s +} + +// SetExpirationTime sets the ExpirationTime field's value. +func (s *DescribeEventSourceOutput) SetExpirationTime(v time.Time) *DescribeEventSourceOutput { + s.ExpirationTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeEventSourceOutput) SetName(v string) *DescribeEventSourceOutput { + s.Name = &v + return s +} + +// SetState sets the State field's value. +func (s *DescribeEventSourceOutput) SetState(v string) *DescribeEventSourceOutput { + s.State = &v + return s +} + +type DescribePartnerEventSourceInput struct { + _ struct{} `type:"structure"` + + // The name of the event source to display. // - // Subnets is a required field - Subnets []*string `type:"list" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s AwsVpcConfiguration) String() string { +func (s DescribePartnerEventSourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsVpcConfiguration) GoString() string { +func (s DescribePartnerEventSourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AwsVpcConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AwsVpcConfiguration"} - if s.Subnets == nil { - invalidParams.Add(request.NewErrParamRequired("Subnets")) +func (s *DescribePartnerEventSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePartnerEventSourceInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -1868,99 +3976,229 @@ func (s *AwsVpcConfiguration) Validate() error { return nil } -// SetAssignPublicIp sets the AssignPublicIp field's value. -func (s *AwsVpcConfiguration) SetAssignPublicIp(v string) *AwsVpcConfiguration { - s.AssignPublicIp = &v +// SetName sets the Name field's value. +func (s *DescribePartnerEventSourceInput) SetName(v string) *DescribePartnerEventSourceInput { + s.Name = &v return s } -// SetSecurityGroups sets the SecurityGroups field's value. -func (s *AwsVpcConfiguration) SetSecurityGroups(v []*string) *AwsVpcConfiguration { - s.SecurityGroups = v +type DescribePartnerEventSourceOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the event source. + Arn *string `type:"string"` + + // The name of the event source. + Name *string `type:"string"` +} + +// String returns the string representation +func (s DescribePartnerEventSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePartnerEventSourceOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DescribePartnerEventSourceOutput) SetArn(v string) *DescribePartnerEventSourceOutput { + s.Arn = &v return s } -// SetSubnets sets the Subnets field's value. -func (s *AwsVpcConfiguration) SetSubnets(v []*string) *AwsVpcConfiguration { - s.Subnets = v +// SetName sets the Name field's value. +func (s *DescribePartnerEventSourceOutput) SetName(v string) *DescribePartnerEventSourceOutput { + s.Name = &v return s } -// The array properties for the submitted job, such as the size of the array. -// The array size can be between 2 and 10,000. If you specify array properties -// for a job, it becomes an array job. This parameter is used only if the target -// is an AWS Batch job. -type BatchArrayProperties struct { +type DescribeRuleInput struct { _ struct{} `type:"structure"` - // The size of the array, if this is an array batch job. Valid values are integers - // between 2 and 10,000. - Size *int64 `type:"integer"` + // The event bus associated with the rule. If you omit this, the default event + // bus is used. + EventBusName *string `min:"1" type:"string"` + + // The name of the rule. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRuleInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventBusName sets the EventBusName field's value. +func (s *DescribeRuleInput) SetEventBusName(v string) *DescribeRuleInput { + s.EventBusName = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeRuleInput) SetName(v string) *DescribeRuleInput { + s.Name = &v + return s +} + +type DescribeRuleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the rule. + Arn *string `min:"1" type:"string"` + + // The description of the rule. + Description *string `type:"string"` + + // The event bus associated with the rule. + EventBusName *string `min:"1" type:"string"` + + // The event pattern. For more information, see Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + // in the Amazon EventBridge User Guide. + EventPattern *string `type:"string"` + + // If this is a managed rule, created by an AWS service on your behalf, this + // field displays the principal name of the AWS service that created the rule. + ManagedBy *string `min:"1" type:"string"` + + // The name of the rule. + Name *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM role associated with the rule. + RoleArn *string `min:"1" type:"string"` + + // The scheduling expression: for example, "cron(0 20 * * ? *)" or "rate(5 minutes)". + ScheduleExpression *string `type:"string"` + + // Specifies whether the rule is enabled or disabled. + State *string `type:"string" enum:"RuleState"` +} + +// String returns the string representation +func (s DescribeRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuleOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DescribeRuleOutput) SetArn(v string) *DescribeRuleOutput { + s.Arn = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *DescribeRuleOutput) SetDescription(v string) *DescribeRuleOutput { + s.Description = &v + return s +} + +// SetEventBusName sets the EventBusName field's value. +func (s *DescribeRuleOutput) SetEventBusName(v string) *DescribeRuleOutput { + s.EventBusName = &v + return s +} + +// SetEventPattern sets the EventPattern field's value. +func (s *DescribeRuleOutput) SetEventPattern(v string) *DescribeRuleOutput { + s.EventPattern = &v + return s +} + +// SetManagedBy sets the ManagedBy field's value. +func (s *DescribeRuleOutput) SetManagedBy(v string) *DescribeRuleOutput { + s.ManagedBy = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeRuleOutput) SetName(v string) *DescribeRuleOutput { + s.Name = &v + return s } -// String returns the string representation -func (s BatchArrayProperties) String() string { - return awsutil.Prettify(s) +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeRuleOutput) SetRoleArn(v string) *DescribeRuleOutput { + s.RoleArn = &v + return s } -// GoString returns the string representation -func (s BatchArrayProperties) GoString() string { - return s.String() +// SetScheduleExpression sets the ScheduleExpression field's value. +func (s *DescribeRuleOutput) SetScheduleExpression(v string) *DescribeRuleOutput { + s.ScheduleExpression = &v + return s } -// SetSize sets the Size field's value. -func (s *BatchArrayProperties) SetSize(v int64) *BatchArrayProperties { - s.Size = &v +// SetState sets the State field's value. +func (s *DescribeRuleOutput) SetState(v string) *DescribeRuleOutput { + s.State = &v return s } -// The custom parameters to be used when the target is an AWS Batch job. -type BatchParameters struct { +type DisableRuleInput struct { _ struct{} `type:"structure"` - // The array properties for the submitted job, such as the size of the array. - // The array size can be between 2 and 10,000. If you specify array properties - // for a job, it becomes an array job. This parameter is used only if the target - // is an AWS Batch job. - ArrayProperties *BatchArrayProperties `type:"structure"` - - // The ARN or name of the job definition to use if the event target is an AWS - // Batch job. This job definition must already exist. - // - // JobDefinition is a required field - JobDefinition *string `type:"string" required:"true"` + // The event bus associated with the rule. If you omit this, the default event + // bus is used. + EventBusName *string `min:"1" type:"string"` - // The name to use for this execution of the job, if the target is an AWS Batch - // job. + // The name of the rule. // - // JobName is a required field - JobName *string `type:"string" required:"true"` - - // The retry strategy to use for failed jobs, if the target is an AWS Batch - // job. The retry strategy is the number of times to retry the failed job execution. - // Valid values are 1–10. When you specify a retry strategy here, it overrides - // the retry strategy defined in the job definition. - RetryStrategy *BatchRetryStrategy `type:"structure"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s BatchParameters) String() string { +func (s DisableRuleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchParameters) GoString() string { +func (s DisableRuleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchParameters"} - if s.JobDefinition == nil { - invalidParams.Add(request.NewErrParamRequired("JobDefinition")) +func (s *DisableRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableRuleInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) } - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -1969,105 +4207,100 @@ func (s *BatchParameters) Validate() error { return nil } -// SetArrayProperties sets the ArrayProperties field's value. -func (s *BatchParameters) SetArrayProperties(v *BatchArrayProperties) *BatchParameters { - s.ArrayProperties = v - return s -} - -// SetJobDefinition sets the JobDefinition field's value. -func (s *BatchParameters) SetJobDefinition(v string) *BatchParameters { - s.JobDefinition = &v - return s -} - -// SetJobName sets the JobName field's value. -func (s *BatchParameters) SetJobName(v string) *BatchParameters { - s.JobName = &v +// SetEventBusName sets the EventBusName field's value. +func (s *DisableRuleInput) SetEventBusName(v string) *DisableRuleInput { + s.EventBusName = &v return s } -// SetRetryStrategy sets the RetryStrategy field's value. -func (s *BatchParameters) SetRetryStrategy(v *BatchRetryStrategy) *BatchParameters { - s.RetryStrategy = v +// SetName sets the Name field's value. +func (s *DisableRuleInput) SetName(v string) *DisableRuleInput { + s.Name = &v return s } -// The retry strategy to use for failed jobs, if the target is an AWS Batch -// job. If you specify a retry strategy here, it overrides the retry strategy -// defined in the job definition. -type BatchRetryStrategy struct { +type DisableRuleOutput struct { _ struct{} `type:"structure"` - - // The number of times to attempt to retry, if the job fails. Valid values are - // 1–10. - Attempts *int64 `type:"integer"` } // String returns the string representation -func (s BatchRetryStrategy) String() string { +func (s DisableRuleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchRetryStrategy) GoString() string { +func (s DisableRuleOutput) GoString() string { return s.String() } -// SetAttempts sets the Attempts field's value. -func (s *BatchRetryStrategy) SetAttempts(v int64) *BatchRetryStrategy { - s.Attempts = &v - return s -} - -// A JSON string which you can use to limit the event bus permissions you are -// granting to only accounts that fulfill the condition. Currently, the only -// supported condition is membership in a certain AWS organization. The string -// must contain Type, Key, and Value fields. The Value field specifies the ID -// of the AWS organization. Following is an example value for Condition: -// -// '{"Type" : "StringEquals", "Key": "aws:PrincipalOrgID", "Value": "o-1234567890"}' -type Condition struct { +// The custom parameters to be used when the target is an Amazon ECS task. +type EcsParameters struct { _ struct{} `type:"structure"` - // Specifies the key for the condition. Currently the only supported key is - // aws:PrincipalOrgID. + // Specifies an ECS task group for the task. The maximum length is 255 characters. + Group *string `type:"string"` + + // Specifies the launch type on which your task is running. The launch type + // that you specify here must match one of the launch type (compatibilities) + // of the target task. The FARGATE value is supported only in the Regions where + // AWS Fargate with Amazon ECS is supported. For more information, see AWS Fargate + // on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS-Fargate.html) + // in the Amazon Elastic Container Service Developer Guide. + LaunchType *string `type:"string" enum:"LaunchType"` + + // Use this structure if the ECS task uses the awsvpc network mode. This structure + // specifies the VPC subnets and security groups associated with the task and + // whether a public IP address is to be used. This structure is required if + // LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks. // - // Key is a required field - Key *string `type:"string" required:"true"` + // If you specify NetworkConfiguration when the target ECS task doesn't use + // the awsvpc network mode, the task fails. + NetworkConfiguration *NetworkConfiguration `type:"structure"` - // Specifies the type of condition. Currently the only supported value is StringEquals. + // Specifies the platform version for the task. Specify only the numeric portion + // of the platform version, such as 1.1.0. // - // Type is a required field - Type *string `type:"string" required:"true"` + // This structure is used only if LaunchType is FARGATE. For more information + // about valid platform versions, see AWS Fargate Platform Versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) + // in the Amazon Elastic Container Service Developer Guide. + PlatformVersion *string `type:"string"` + + // The number of tasks to create based on TaskDefinition. The default is 1. + TaskCount *int64 `min:"1" type:"integer"` - // Specifies the value for the key. Currently, this must be the ID of the organization. + // The ARN of the task definition to use if the event target is an Amazon ECS + // task. // - // Value is a required field - Value *string `type:"string" required:"true"` + // TaskDefinitionArn is a required field + TaskDefinitionArn *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s Condition) String() string { +func (s EcsParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Condition) GoString() string { +func (s EcsParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Condition) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Condition"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) +func (s *EcsParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EcsParameters"} + if s.TaskCount != nil && *s.TaskCount < 1 { + invalidParams.Add(request.NewErrParamMinValue("TaskCount", 1)) } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) + if s.TaskDefinitionArn == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinitionArn")) } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) + if s.TaskDefinitionArn != nil && len(*s.TaskDefinitionArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskDefinitionArn", 1)) + } + if s.NetworkConfiguration != nil { + if err := s.NetworkConfiguration.Validate(); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -2076,33 +4309,48 @@ func (s *Condition) Validate() error { return nil } -// SetKey sets the Key field's value. -func (s *Condition) SetKey(v string) *Condition { - s.Key = &v +// SetGroup sets the Group field's value. +func (s *EcsParameters) SetGroup(v string) *EcsParameters { + s.Group = &v return s } -// SetType sets the Type field's value. -func (s *Condition) SetType(v string) *Condition { - s.Type = &v +// SetLaunchType sets the LaunchType field's value. +func (s *EcsParameters) SetLaunchType(v string) *EcsParameters { + s.LaunchType = &v return s } -// SetValue sets the Value field's value. -func (s *Condition) SetValue(v string) *Condition { - s.Value = &v +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *EcsParameters) SetNetworkConfiguration(v *NetworkConfiguration) *EcsParameters { + s.NetworkConfiguration = v return s } -type DeleteRuleInput struct { +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *EcsParameters) SetPlatformVersion(v string) *EcsParameters { + s.PlatformVersion = &v + return s +} + +// SetTaskCount sets the TaskCount field's value. +func (s *EcsParameters) SetTaskCount(v int64) *EcsParameters { + s.TaskCount = &v + return s +} + +// SetTaskDefinitionArn sets the TaskDefinitionArn field's value. +func (s *EcsParameters) SetTaskDefinitionArn(v string) *EcsParameters { + s.TaskDefinitionArn = &v + return s +} + +type EnableRuleInput struct { _ struct{} `type:"structure"` - // If this is a managed rule, created by an AWS service on your behalf, you - // must specify Force as True to delete the rule. This parameter is ignored - // for rules that are not managed rules. You can check whether a rule is a managed - // rule by using DescribeRule or ListRules and checking the ManagedBy field - // of the response. - Force *bool `type:"boolean"` + // The event bus associated with the rule. If you omit this, the default event + // bus is used. + EventBusName *string `min:"1" type:"string"` // The name of the rule. // @@ -2111,18 +4359,21 @@ type DeleteRuleInput struct { } // String returns the string representation -func (s DeleteRuleInput) String() string { +func (s EnableRuleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRuleInput) GoString() string { +func (s EnableRuleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRuleInput"} +func (s *EnableRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableRuleInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -2136,115 +4387,231 @@ func (s *DeleteRuleInput) Validate() error { return nil } -// SetForce sets the Force field's value. -func (s *DeleteRuleInput) SetForce(v bool) *DeleteRuleInput { - s.Force = &v +// SetEventBusName sets the EventBusName field's value. +func (s *EnableRuleInput) SetEventBusName(v string) *EnableRuleInput { + s.EventBusName = &v return s } // SetName sets the Name field's value. -func (s *DeleteRuleInput) SetName(v string) *DeleteRuleInput { +func (s *EnableRuleInput) SetName(v string) *EnableRuleInput { s.Name = &v return s } -type DeleteRuleOutput struct { +type EnableRuleOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s DeleteRuleOutput) String() string { +func (s EnableRuleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRuleOutput) GoString() string { +func (s EnableRuleOutput) GoString() string { return s.String() } -type DescribeEventBusInput struct { +// An event bus receives events from a source and routes them to rules associated +// with that event bus. Your account's default event bus receives rules from +// AWS services. A custom event bus can receive rules from AWS services as well +// as your custom applications and services. A partner event bus receives events +// from an event source created by an SaaS partner. These events come from the +// partners services or applications. +type EventBus struct { _ struct{} `type:"structure"` + + // The ARN of the event bus. + Arn *string `type:"string"` + + // The name of the event bus. + Name *string `type:"string"` + + // The permissions policy of the event bus, describing which other AWS accounts + // can write events to this event bus. + Policy *string `type:"string"` } // String returns the string representation -func (s DescribeEventBusInput) String() string { +func (s EventBus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeEventBusInput) GoString() string { +func (s EventBus) GoString() string { return s.String() } -type DescribeEventBusOutput struct { +// SetArn sets the Arn field's value. +func (s *EventBus) SetArn(v string) *EventBus { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *EventBus) SetName(v string) *EventBus { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *EventBus) SetPolicy(v string) *EventBus { + s.Policy = &v + return s +} + +// A partner event source is created by an SaaS partner. If a customer creates +// a partner event bus that matches this event source, that AWS account can +// receive events from the partner's applications or services. +type EventSource struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the account permitted to write events to - // the current account. + // The ARN of the event source. Arn *string `type:"string"` - // The name of the event bus. Currently, this is always default. + // The name of the partner that created the event source. + CreatedBy *string `type:"string"` + + // The date and time when the event source was created. + CreationTime *time.Time `type:"timestamp"` + + // The date and time when the event source will expire if the AWS account doesn't + // create a matching event bus for it. + ExpirationTime *time.Time `type:"timestamp"` + + // The name of the event source. Name *string `type:"string"` - // The policy that enables the external account to send events to your account. - Policy *string `type:"string"` + // The state of the event source. If it's ACTIVE, you have already created a + // matching event bus for this event source, and that event bus is active. If + // it's PENDING, either you haven't yet created a matching event bus, or that + // event bus is deactivated. If it's DELETED, you have created a matching event + // bus, but the event source has since been deleted. + State *string `type:"string" enum:"EventSourceState"` } // String returns the string representation -func (s DescribeEventBusOutput) String() string { +func (s EventSource) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeEventBusOutput) GoString() string { +func (s EventSource) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *DescribeEventBusOutput) SetArn(v string) *DescribeEventBusOutput { +func (s *EventSource) SetArn(v string) *EventSource { s.Arn = &v return s } +// SetCreatedBy sets the CreatedBy field's value. +func (s *EventSource) SetCreatedBy(v string) *EventSource { + s.CreatedBy = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *EventSource) SetCreationTime(v time.Time) *EventSource { + s.CreationTime = &v + return s +} + +// SetExpirationTime sets the ExpirationTime field's value. +func (s *EventSource) SetExpirationTime(v time.Time) *EventSource { + s.ExpirationTime = &v + return s +} + // SetName sets the Name field's value. -func (s *DescribeEventBusOutput) SetName(v string) *DescribeEventBusOutput { +func (s *EventSource) SetName(v string) *EventSource { s.Name = &v return s } -// SetPolicy sets the Policy field's value. -func (s *DescribeEventBusOutput) SetPolicy(v string) *DescribeEventBusOutput { - s.Policy = &v +// SetState sets the State field's value. +func (s *EventSource) SetState(v string) *EventSource { + s.State = &v return s } -type DescribeRuleInput struct { +// Contains the parameters needed for you to provide custom input to a target +// based on one or more pieces of data extracted from the event. +type InputTransformer struct { _ struct{} `type:"structure"` - // The name of the rule. + // Map of JSON paths to be extracted from the event. You can then insert these + // in the template in InputTemplate to produce the output to be sent to the + // target. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // InputPathsMap is an array key-value pairs, where each value is a valid JSON + // path. You can have as many as 10 key-value pairs. You must use JSON dot notation, + // not bracket notation. + // + // The keys can't start with "AWS". + InputPathsMap map[string]*string `type:"map"` + + // Input template where you specify placeholders that will be filled with the + // values of the keys from InputPathsMap to customize the data sent to the target. + // Enclose each InputPathsMaps value in brackets: . The InputTemplate + // must be valid JSON. + // + // If InputTemplate is a JSON object (surrounded by curly braces), the following + // restrictions apply: + // + // * The placeholder can't be used as an object key + // + // * Object values can't include quote marks + // + // The following example shows the syntax for using InputPathsMap and InputTemplate. + // + // "InputTransformer": + // + // { + // + // "InputPathsMap": {"instance": "$.detail.instance","status": "$.detail.status"}, + // + // "InputTemplate": " is in state " + // + // } + // + // To have the InputTemplate include quote marks within a JSON string, escape + // each quote marks with a slash, as in the following example: + // + // "InputTransformer": + // + // { + // + // "InputPathsMap": {"instance": "$.detail.instance","status": "$.detail.status"}, + // + // "InputTemplate": " is in state \"\"" + // + // } + // + // InputTemplate is a required field + InputTemplate *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DescribeRuleInput) String() string { +func (s InputTransformer) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeRuleInput) GoString() string { +func (s InputTransformer) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeRuleInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *InputTransformer) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputTransformer"} + if s.InputTemplate == nil { + invalidParams.Add(request.NewErrParamRequired("InputTemplate")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.InputTemplate != nil && len(*s.InputTemplate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputTemplate", 1)) } if invalidParams.Len() > 0 { @@ -2253,127 +4620,99 @@ func (s *DescribeRuleInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *DescribeRuleInput) SetName(v string) *DescribeRuleInput { - s.Name = &v +// SetInputPathsMap sets the InputPathsMap field's value. +func (s *InputTransformer) SetInputPathsMap(v map[string]*string) *InputTransformer { + s.InputPathsMap = v return s } -type DescribeRuleOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the rule. - Arn *string `min:"1" type:"string"` - - // The description of the rule. - Description *string `type:"string"` - - // The event pattern. For more information, see Events and Event Patterns (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html) - // in the Amazon CloudWatch Events User Guide. - EventPattern *string `type:"string"` - - // If this is a managed rule, created by an AWS service on your behalf, this - // field displays the principal name of the AWS service that created the rule. - ManagedBy *string `min:"1" type:"string"` - - // The name of the rule. - Name *string `min:"1" type:"string"` - - // The Amazon Resource Name (ARN) of the IAM role associated with the rule. - RoleArn *string `min:"1" type:"string"` +// SetInputTemplate sets the InputTemplate field's value. +func (s *InputTransformer) SetInputTemplate(v string) *InputTransformer { + s.InputTemplate = &v + return s +} - // The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)". - ScheduleExpression *string `type:"string"` +// This object enables you to specify a JSON path to extract from the event +// and use as the partition key for the Amazon Kinesis data stream so that you +// can control the shard that the event goes to. If you don't include this parameter, +// the default is to use the eventId as the partition key. +type KinesisParameters struct { + _ struct{} `type:"structure"` - // Specifies whether the rule is enabled or disabled. - State *string `type:"string" enum:"RuleState"` + // The JSON path to be extracted from the event and used as the partition key. + // For more information, see Amazon Kinesis Streams Key Concepts (https://docs.aws.amazon.com/streams/latest/dev/key-concepts.html#partition-key) + // in the Amazon Kinesis Streams Developer Guide. + // + // PartitionKeyPath is a required field + PartitionKeyPath *string `type:"string" required:"true"` } // String returns the string representation -func (s DescribeRuleOutput) String() string { +func (s KinesisParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeRuleOutput) GoString() string { +func (s KinesisParameters) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DescribeRuleOutput) SetArn(v string) *DescribeRuleOutput { - s.Arn = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *DescribeRuleOutput) SetDescription(v string) *DescribeRuleOutput { - s.Description = &v - return s -} - -// SetEventPattern sets the EventPattern field's value. -func (s *DescribeRuleOutput) SetEventPattern(v string) *DescribeRuleOutput { - s.EventPattern = &v - return s -} - -// SetManagedBy sets the ManagedBy field's value. -func (s *DescribeRuleOutput) SetManagedBy(v string) *DescribeRuleOutput { - s.ManagedBy = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *KinesisParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KinesisParameters"} + if s.PartitionKeyPath == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionKeyPath")) + } -// SetName sets the Name field's value. -func (s *DescribeRuleOutput) SetName(v string) *DescribeRuleOutput { - s.Name = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetRoleArn sets the RoleArn field's value. -func (s *DescribeRuleOutput) SetRoleArn(v string) *DescribeRuleOutput { - s.RoleArn = &v +// SetPartitionKeyPath sets the PartitionKeyPath field's value. +func (s *KinesisParameters) SetPartitionKeyPath(v string) *KinesisParameters { + s.PartitionKeyPath = &v return s } -// SetScheduleExpression sets the ScheduleExpression field's value. -func (s *DescribeRuleOutput) SetScheduleExpression(v string) *DescribeRuleOutput { - s.ScheduleExpression = &v - return s -} +type ListEventBusesInput struct { + _ struct{} `type:"structure"` -// SetState sets the State field's value. -func (s *DescribeRuleOutput) SetState(v string) *DescribeRuleOutput { - s.State = &v - return s -} + // Specifying this limits the number of results returned by this operation. + // The operation also returns a NextToken that you can use in a subsequent operation + // to retrieve the next set of results. + Limit *int64 `min:"1" type:"integer"` -type DisableRuleInput struct { - _ struct{} `type:"structure"` + // Specifying this limits the results to only those event buses with names that + // start with the specified prefix. + NamePrefix *string `min:"1" type:"string"` - // The name of the rule. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The token returned by a previous call to retrieve the next set of results. + NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s DisableRuleInput) String() string { +func (s ListEventBusesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DisableRuleInput) GoString() string { +func (s ListEventBusesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DisableRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisableRuleInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *ListEventBusesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEventBusesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.NamePrefix != nil && len(*s.NamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -2382,94 +4721,94 @@ func (s *DisableRuleInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *DisableRuleInput) SetName(v string) *DisableRuleInput { - s.Name = &v +// SetLimit sets the Limit field's value. +func (s *ListEventBusesInput) SetLimit(v int64) *ListEventBusesInput { + s.Limit = &v return s } -type DisableRuleOutput struct { +// SetNamePrefix sets the NamePrefix field's value. +func (s *ListEventBusesInput) SetNamePrefix(v string) *ListEventBusesInput { + s.NamePrefix = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEventBusesInput) SetNextToken(v string) *ListEventBusesInput { + s.NextToken = &v + return s +} + +type ListEventBusesOutput struct { _ struct{} `type:"structure"` + + // This list of event buses. + EventBuses []*EventBus `type:"list"` + + // A token you can use in a subsequent operation to retrieve the next set of + // results. + NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s DisableRuleOutput) String() string { +func (s ListEventBusesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DisableRuleOutput) GoString() string { +func (s ListEventBusesOutput) GoString() string { return s.String() } -// The custom parameters to be used when the target is an Amazon ECS task. -type EcsParameters struct { - _ struct{} `type:"structure"` - - // Specifies an ECS task group for the task. The maximum length is 255 characters. - Group *string `type:"string"` - - // Specifies the launch type on which your task is running. The launch type - // that you specify here must match one of the launch type (compatibilities) - // of the target task. The FARGATE value is supported only in the Regions where - // AWS Fargate with Amazon ECS is supported. For more information, see AWS Fargate - // on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS-Fargate.html) - // in the Amazon Elastic Container Service Developer Guide. - LaunchType *string `type:"string" enum:"LaunchType"` +// SetEventBuses sets the EventBuses field's value. +func (s *ListEventBusesOutput) SetEventBuses(v []*EventBus) *ListEventBusesOutput { + s.EventBuses = v + return s +} - // Use this structure if the ECS task uses the awsvpc network mode. This structure - // specifies the VPC subnets and security groups associated with the task, and - // whether a public IP address is to be used. This structure is required if - // LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks. - // - // If you specify NetworkConfiguration when the target ECS task does not use - // the awsvpc network mode, the task fails. - NetworkConfiguration *NetworkConfiguration `type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *ListEventBusesOutput) SetNextToken(v string) *ListEventBusesOutput { + s.NextToken = &v + return s +} - // Specifies the platform version for the task. Specify only the numeric portion - // of the platform version, such as 1.1.0. - // - // This structure is used only if LaunchType is FARGATE. For more information - // about valid platform versions, see AWS Fargate Platform Versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) - // in the Amazon Elastic Container Service Developer Guide. - PlatformVersion *string `type:"string"` +type ListEventSourcesInput struct { + _ struct{} `type:"structure"` - // The number of tasks to create based on TaskDefinition. The default is 1. - TaskCount *int64 `min:"1" type:"integer"` + // Specifying this limits the number of results returned by this operation. + // The operation also returns a NextToken that you can use in a subsequent operation + // to retrieve the next set of results. + Limit *int64 `min:"1" type:"integer"` - // The ARN of the task definition to use if the event target is an Amazon ECS - // task. - // - // TaskDefinitionArn is a required field - TaskDefinitionArn *string `min:"1" type:"string" required:"true"` + // Specifying this limits the results to only those partner event sources with + // names that start with the specified prefix. + NamePrefix *string `min:"1" type:"string"` + + // The token returned by a previous call to retrieve the next set of results. + NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s EcsParameters) String() string { +func (s ListEventSourcesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EcsParameters) GoString() string { +func (s ListEventSourcesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *EcsParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EcsParameters"} - if s.TaskCount != nil && *s.TaskCount < 1 { - invalidParams.Add(request.NewErrParamMinValue("TaskCount", 1)) - } - if s.TaskDefinitionArn == nil { - invalidParams.Add(request.NewErrParamRequired("TaskDefinitionArn")) +func (s *ListEventSourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEventSourcesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } - if s.TaskDefinitionArn != nil && len(*s.TaskDefinitionArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskDefinitionArn", 1)) + if s.NamePrefix != nil && len(*s.NamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamePrefix", 1)) } - if s.NetworkConfiguration != nil { - if err := s.NetworkConfiguration.Validate(); err != nil { - invalidParams.AddNested("NetworkConfiguration", err.(request.ErrInvalidParams)) - } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -2478,69 +4817,99 @@ func (s *EcsParameters) Validate() error { return nil } -// SetGroup sets the Group field's value. -func (s *EcsParameters) SetGroup(v string) *EcsParameters { - s.Group = &v +// SetLimit sets the Limit field's value. +func (s *ListEventSourcesInput) SetLimit(v int64) *ListEventSourcesInput { + s.Limit = &v return s } -// SetLaunchType sets the LaunchType field's value. -func (s *EcsParameters) SetLaunchType(v string) *EcsParameters { - s.LaunchType = &v +// SetNamePrefix sets the NamePrefix field's value. +func (s *ListEventSourcesInput) SetNamePrefix(v string) *ListEventSourcesInput { + s.NamePrefix = &v return s } -// SetNetworkConfiguration sets the NetworkConfiguration field's value. -func (s *EcsParameters) SetNetworkConfiguration(v *NetworkConfiguration) *EcsParameters { - s.NetworkConfiguration = v +// SetNextToken sets the NextToken field's value. +func (s *ListEventSourcesInput) SetNextToken(v string) *ListEventSourcesInput { + s.NextToken = &v return s } -// SetPlatformVersion sets the PlatformVersion field's value. -func (s *EcsParameters) SetPlatformVersion(v string) *EcsParameters { - s.PlatformVersion = &v - return s +type ListEventSourcesOutput struct { + _ struct{} `type:"structure"` + + // The list of event sources. + EventSources []*EventSource `type:"list"` + + // A token you can use in a subsequent operation to retrieve the next set of + // results. + NextToken *string `min:"1" type:"string"` } -// SetTaskCount sets the TaskCount field's value. -func (s *EcsParameters) SetTaskCount(v int64) *EcsParameters { - s.TaskCount = &v +// String returns the string representation +func (s ListEventSourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventSourcesOutput) GoString() string { + return s.String() +} + +// SetEventSources sets the EventSources field's value. +func (s *ListEventSourcesOutput) SetEventSources(v []*EventSource) *ListEventSourcesOutput { + s.EventSources = v return s } -// SetTaskDefinitionArn sets the TaskDefinitionArn field's value. -func (s *EcsParameters) SetTaskDefinitionArn(v string) *EcsParameters { - s.TaskDefinitionArn = &v +// SetNextToken sets the NextToken field's value. +func (s *ListEventSourcesOutput) SetNextToken(v string) *ListEventSourcesOutput { + s.NextToken = &v return s } -type EnableRuleInput struct { +type ListPartnerEventSourceAccountsInput struct { _ struct{} `type:"structure"` - // The name of the rule. + // The name of the partner event source to display account information about. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // EventSourceName is a required field + EventSourceName *string `min:"1" type:"string" required:"true"` + + // Specifying this limits the number of results returned by this operation. + // The operation also returns a NextToken that you can use in a subsequent operation + // to retrieve the next set of results. + Limit *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to this operation. Specifying this + // retrieves the next set of results. + NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s EnableRuleInput) String() string { +func (s ListPartnerEventSourceAccountsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EnableRuleInput) GoString() string { +func (s ListPartnerEventSourceAccountsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *EnableRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EnableRuleInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *ListPartnerEventSourceAccountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartnerEventSourceAccountsInput"} + if s.EventSourceName == nil { + invalidParams.Add(request.NewErrParamRequired("EventSourceName")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.EventSourceName != nil && len(*s.EventSourceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventSourceName", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -2549,101 +4918,100 @@ func (s *EnableRuleInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *EnableRuleInput) SetName(v string) *EnableRuleInput { - s.Name = &v +// SetEventSourceName sets the EventSourceName field's value. +func (s *ListPartnerEventSourceAccountsInput) SetEventSourceName(v string) *ListPartnerEventSourceAccountsInput { + s.EventSourceName = &v return s } -type EnableRuleOutput struct { +// SetLimit sets the Limit field's value. +func (s *ListPartnerEventSourceAccountsInput) SetLimit(v int64) *ListPartnerEventSourceAccountsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPartnerEventSourceAccountsInput) SetNextToken(v string) *ListPartnerEventSourceAccountsInput { + s.NextToken = &v + return s +} + +type ListPartnerEventSourceAccountsOutput struct { _ struct{} `type:"structure"` + + // A token you can use in a subsequent operation to retrieve the next set of + // results. + NextToken *string `min:"1" type:"string"` + + // The list of partner event sources returned by the operation. + PartnerEventSourceAccounts []*PartnerEventSourceAccount `type:"list"` } // String returns the string representation -func (s EnableRuleOutput) String() string { +func (s ListPartnerEventSourceAccountsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EnableRuleOutput) GoString() string { +func (s ListPartnerEventSourceAccountsOutput) GoString() string { return s.String() } -// Contains the parameters needed for you to provide custom input to a target -// based on one or more pieces of data extracted from the event. -type InputTransformer struct { +// SetNextToken sets the NextToken field's value. +func (s *ListPartnerEventSourceAccountsOutput) SetNextToken(v string) *ListPartnerEventSourceAccountsOutput { + s.NextToken = &v + return s +} + +// SetPartnerEventSourceAccounts sets the PartnerEventSourceAccounts field's value. +func (s *ListPartnerEventSourceAccountsOutput) SetPartnerEventSourceAccounts(v []*PartnerEventSourceAccount) *ListPartnerEventSourceAccountsOutput { + s.PartnerEventSourceAccounts = v + return s +} + +type ListPartnerEventSourcesInput struct { _ struct{} `type:"structure"` - // Map of JSON paths to be extracted from the event. You can then insert these - // in the template in InputTemplate to produce the output you want to be sent - // to the target. - // - // InputPathsMap is an array key-value pairs, where each value is a valid JSON - // path. You can have as many as 10 key-value pairs. You must use JSON dot notation, - // not bracket notation. - // - // The keys cannot start with "AWS." - InputPathsMap map[string]*string `type:"map"` + // pecifying this limits the number of results returned by this operation. The + // operation also returns a NextToken that you can use in a subsequent operation + // to retrieve the next set of results. + Limit *int64 `min:"1" type:"integer"` - // Input template where you specify placeholders that will be filled with the - // values of the keys from InputPathsMap to customize the data sent to the target. - // Enclose each InputPathsMaps value in brackets: The InputTemplate - // must be valid JSON. - // - // If InputTemplate is a JSON object (surrounded by curly braces), the following - // restrictions apply: - // - // * The placeholder cannot be used as an object key. - // - // * Object values cannot include quote marks. - // - // The following example shows the syntax for using InputPathsMap and InputTemplate. + // If you specify this, the results are limited to only those partner event + // sources that start with the string you specify. // - // "InputTransformer": - // - // { - // - // "InputPathsMap": {"instance": "$.detail.instance","status": "$.detail.status"}, - // - // "InputTemplate": " is in state " - // - // } - // - // To have the InputTemplate include quote marks within a JSON string, escape - // each quote marks with a slash, as in the following example: - // - // "InputTransformer": - // - // { - // - // "InputPathsMap": {"instance": "$.detail.instance","status": "$.detail.status"}, - // - // "InputTemplate": " is in state \"\"" - // - // } - // - // InputTemplate is a required field - InputTemplate *string `min:"1" type:"string" required:"true"` + // NamePrefix is a required field + NamePrefix *string `min:"1" type:"string" required:"true"` + + // The token returned by a previous call to this operation. Specifying this + // retrieves the next set of results. + NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s InputTransformer) String() string { +func (s ListPartnerEventSourcesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InputTransformer) GoString() string { +func (s ListPartnerEventSourcesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *InputTransformer) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InputTransformer"} - if s.InputTemplate == nil { - invalidParams.Add(request.NewErrParamRequired("InputTemplate")) +func (s *ListPartnerEventSourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartnerEventSourcesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } - if s.InputTemplate != nil && len(*s.InputTemplate) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InputTemplate", 1)) + if s.NamePrefix == nil { + invalidParams.Add(request.NewErrParamRequired("NamePrefix")) + } + if s.NamePrefix != nil && len(*s.NamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -2652,65 +5020,64 @@ func (s *InputTransformer) Validate() error { return nil } -// SetInputPathsMap sets the InputPathsMap field's value. -func (s *InputTransformer) SetInputPathsMap(v map[string]*string) *InputTransformer { - s.InputPathsMap = v +// SetLimit sets the Limit field's value. +func (s *ListPartnerEventSourcesInput) SetLimit(v int64) *ListPartnerEventSourcesInput { + s.Limit = &v return s } -// SetInputTemplate sets the InputTemplate field's value. -func (s *InputTransformer) SetInputTemplate(v string) *InputTransformer { - s.InputTemplate = &v +// SetNamePrefix sets the NamePrefix field's value. +func (s *ListPartnerEventSourcesInput) SetNamePrefix(v string) *ListPartnerEventSourcesInput { + s.NamePrefix = &v return s } -// This object enables you to specify a JSON path to extract from the event -// and use as the partition key for the Amazon Kinesis data stream, so that -// you can control the shard to which the event goes. If you do not include -// this parameter, the default is to use the eventId as the partition key. -type KinesisParameters struct { +// SetNextToken sets the NextToken field's value. +func (s *ListPartnerEventSourcesInput) SetNextToken(v string) *ListPartnerEventSourcesInput { + s.NextToken = &v + return s +} + +type ListPartnerEventSourcesOutput struct { _ struct{} `type:"structure"` - // The JSON path to be extracted from the event and used as the partition key. - // For more information, see Amazon Kinesis Streams Key Concepts (https://docs.aws.amazon.com/streams/latest/dev/key-concepts.html#partition-key) - // in the Amazon Kinesis Streams Developer Guide. - // - // PartitionKeyPath is a required field - PartitionKeyPath *string `type:"string" required:"true"` + // A token you can use in a subsequent operation to retrieve the next set of + // results. + NextToken *string `min:"1" type:"string"` + + // The list of partner event sources returned by the operation. + PartnerEventSources []*PartnerEventSource `type:"list"` } // String returns the string representation -func (s KinesisParameters) String() string { +func (s ListPartnerEventSourcesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s KinesisParameters) GoString() string { +func (s ListPartnerEventSourcesOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *KinesisParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "KinesisParameters"} - if s.PartitionKeyPath == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionKeyPath")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNextToken sets the NextToken field's value. +func (s *ListPartnerEventSourcesOutput) SetNextToken(v string) *ListPartnerEventSourcesOutput { + s.NextToken = &v + return s } -// SetPartitionKeyPath sets the PartitionKeyPath field's value. -func (s *KinesisParameters) SetPartitionKeyPath(v string) *KinesisParameters { - s.PartitionKeyPath = &v +// SetPartnerEventSources sets the PartnerEventSources field's value. +func (s *ListPartnerEventSourcesOutput) SetPartnerEventSources(v []*PartnerEventSource) *ListPartnerEventSourcesOutput { + s.PartnerEventSources = v return s } type ListRuleNamesByTargetInput struct { _ struct{} `type:"structure"` + // Limits the results to show only the rules associated with the specified event + // bus. + EventBusName *string `min:"1" type:"string"` + // The maximum number of results to return. Limit *int64 `min:"1" type:"integer"` @@ -2736,6 +5103,9 @@ func (s ListRuleNamesByTargetInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ListRuleNamesByTargetInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListRuleNamesByTargetInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } @@ -2755,6 +5125,12 @@ func (s *ListRuleNamesByTargetInput) Validate() error { return nil } +// SetEventBusName sets the EventBusName field's value. +func (s *ListRuleNamesByTargetInput) SetEventBusName(v string) *ListRuleNamesByTargetInput { + s.EventBusName = &v + return s +} + // SetLimit sets the Limit field's value. func (s *ListRuleNamesByTargetInput) SetLimit(v int64) *ListRuleNamesByTargetInput { s.Limit = &v @@ -2809,6 +5185,10 @@ func (s *ListRuleNamesByTargetOutput) SetRuleNames(v []*string) *ListRuleNamesBy type ListRulesInput struct { _ struct{} `type:"structure"` + // Limits the results to show only the rules associated with the specified event + // bus. + EventBusName *string `min:"1" type:"string"` + // The maximum number of results to return. Limit *int64 `min:"1" type:"integer"` @@ -2832,6 +5212,9 @@ func (s ListRulesInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ListRulesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListRulesInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } @@ -2848,6 +5231,12 @@ func (s *ListRulesInput) Validate() error { return nil } +// SetEventBusName sets the EventBusName field's value. +func (s *ListRulesInput) SetEventBusName(v string) *ListRulesInput { + s.EventBusName = &v + return s +} + // SetLimit sets the Limit field's value. func (s *ListRulesInput) SetLimit(v int64) *ListRulesInput { s.Limit = &v @@ -2902,7 +5291,7 @@ func (s *ListRulesOutput) SetRules(v []*Rule) *ListRulesOutput { type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch Events rule for which you want to view tags. + // The ARN of the rule for which you want to view tags. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -2943,7 +5332,7 @@ func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResource type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // The list of tag keys and values associated with the rule you specified + // The list of tag keys and values associated with the rule that you specified. Tags []*Tag `type:"list"` } @@ -2966,6 +5355,10 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput type ListTargetsByRuleInput struct { _ struct{} `type:"structure"` + // The event bus associated with the rule. If you omit this, the default event + // bus is used. + EventBusName *string `min:"1" type:"string"` + // The maximum number of results to return. Limit *int64 `min:"1" type:"integer"` @@ -2991,6 +5384,9 @@ func (s ListTargetsByRuleInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ListTargetsByRuleInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListTargetsByRuleInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } @@ -3010,6 +5406,12 @@ func (s *ListTargetsByRuleInput) Validate() error { return nil } +// SetEventBusName sets the EventBusName field's value. +func (s *ListTargetsByRuleInput) SetEventBusName(v string) *ListTargetsByRuleInput { + s.EventBusName = &v + return s +} + // SetLimit sets the Limit field's value. func (s *ListTargetsByRuleInput) SetLimit(v int64) *ListTargetsByRuleInput { s.Limit = &v @@ -3066,7 +5468,7 @@ type NetworkConfiguration struct { _ struct{} `type:"structure"` // Use this structure to specify the VPC subnets and security groups for the - // task, and whether a public IP address is to be used. This structure is relevant + // task and whether a public IP address is to be used. This structure is relevant // only for ECS tasks that use the awsvpc network mode. AwsvpcConfiguration *AwsVpcConfiguration `locationName:"awsvpcConfiguration" type:"structure"` } @@ -3102,6 +5504,97 @@ func (s *NetworkConfiguration) SetAwsvpcConfiguration(v *AwsVpcConfiguration) *N return s } +// A partner event source is created by an SaaS partner. If a customer creates +// a partner event bus that matches this event source, that AWS account can +// receive events from the partner's applications or services. +type PartnerEventSource struct { + _ struct{} `type:"structure"` + + // The ARN of the partner event source. + Arn *string `type:"string"` + + // The name of the partner event source. + Name *string `type:"string"` +} + +// String returns the string representation +func (s PartnerEventSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PartnerEventSource) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *PartnerEventSource) SetArn(v string) *PartnerEventSource { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *PartnerEventSource) SetName(v string) *PartnerEventSource { + s.Name = &v + return s +} + +// The AWS account that a partner event source has been offered to. +type PartnerEventSourceAccount struct { + _ struct{} `type:"structure"` + + // The AWS account ID that the partner event source was offered to. + Account *string `min:"12" type:"string"` + + // The date and time when the event source was created. + CreationTime *time.Time `type:"timestamp"` + + // The date and time when the event source will expire if the AWS account doesn't + // create a matching event bus for it. + ExpirationTime *time.Time `type:"timestamp"` + + // The state of the event source. If it's ACTIVE, you have already created a + // matching event bus for this event source, and that event bus is active. If + // it's PENDING, either you haven't yet created a matching event bus, or that + // event bus is deactivated. If it's DELETED, you have created a matching event + // bus, but the event source has since been deleted. + State *string `type:"string" enum:"EventSourceState"` +} + +// String returns the string representation +func (s PartnerEventSourceAccount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PartnerEventSourceAccount) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *PartnerEventSourceAccount) SetAccount(v string) *PartnerEventSourceAccount { + s.Account = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *PartnerEventSourceAccount) SetCreationTime(v time.Time) *PartnerEventSourceAccount { + s.CreationTime = &v + return s +} + +// SetExpirationTime sets the ExpirationTime field's value. +func (s *PartnerEventSourceAccount) SetExpirationTime(v time.Time) *PartnerEventSourceAccount { + s.ExpirationTime = &v + return s +} + +// SetState sets the State field's value. +func (s *PartnerEventSourceAccount) SetState(v string) *PartnerEventSourceAccount { + s.State = &v + return s +} + type PutEventsInput struct { _ struct{} `type:"structure"` @@ -3132,6 +5625,16 @@ func (s *PutEventsInput) Validate() error { if s.Entries != nil && len(s.Entries) < 1 { invalidParams.Add(request.NewErrParamMinLen("Entries", 1)) } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3183,22 +5686,26 @@ func (s *PutEventsOutput) SetFailedEntryCount(v int64) *PutEventsOutput { type PutEventsRequestEntry struct { _ struct{} `type:"structure"` - // A valid JSON string. There is no other schema imposed. The JSON string may + // A valid JSON string. There is no other schema imposed. The JSON string can // contain fields and nested subobjects. Detail *string `type:"string"` - // Free-form string used to decide what fields to expect in the event detail. + // Free-form string used to decide which fields to expect in the event detail. DetailType *string `type:"string"` - // AWS resources, identified by Amazon Resource Name (ARN), which the event - // primarily concerns. Any number, including zero, may be present. + // The event bus that will receive the event. Only the rules that are associated + // with this event bus can match the event. + EventBusName *string `min:"1" type:"string"` + + // AWS resources, identified by Amazon Resource Name (ARN), that the event primarily + // concerns. Any number, including zero, can be present. Resources []*string `type:"list"` // The source of the event. This field is required. Source *string `type:"string"` - // The time stamp of the event, per RFC3339 (https://www.rfc-editor.org/rfc/rfc3339.txt). - // If no time stamp is provided, the time stamp of the PutEvents call is used. + // The timestamp of the event, per RFC3339 (https://www.rfc-editor.org/rfc/rfc3339.txt). + // If no timestamp is provided, the timestamp of the PutEvents call is used. Time *time.Time `type:"timestamp"` } @@ -3212,6 +5719,19 @@ func (s PutEventsRequestEntry) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutEventsRequestEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutEventsRequestEntry"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDetail sets the Detail field's value. func (s *PutEventsRequestEntry) SetDetail(v string) *PutEventsRequestEntry { s.Detail = &v @@ -3224,6 +5744,12 @@ func (s *PutEventsRequestEntry) SetDetailType(v string) *PutEventsRequestEntry { return s } +// SetEventBusName sets the EventBusName field's value. +func (s *PutEventsRequestEntry) SetEventBusName(v string) *PutEventsRequestEntry { + s.EventBusName = &v + return s +} + // SetResources sets the Resources field's value. func (s *PutEventsRequestEntry) SetResources(v []*string) *PutEventsRequestEntry { s.Resources = v @@ -3284,10 +5810,189 @@ func (s *PutEventsResultEntry) SetEventId(v string) *PutEventsResultEntry { return s } +type PutPartnerEventsInput struct { + _ struct{} `type:"structure"` + + // The list of events to write to the event bus. + // + // Entries is a required field + Entries []*PutPartnerEventsRequestEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutPartnerEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPartnerEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPartnerEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPartnerEventsInput"} + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.Entries != nil && len(s.Entries) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Entries", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntries sets the Entries field's value. +func (s *PutPartnerEventsInput) SetEntries(v []*PutPartnerEventsRequestEntry) *PutPartnerEventsInput { + s.Entries = v + return s +} + +type PutPartnerEventsOutput struct { + _ struct{} `type:"structure"` + + // The list of events from this operation that were successfully written to + // the partner event bus. + Entries []*PutPartnerEventsResultEntry `type:"list"` + + // The number of events from this operation that couldn't be written to the + // partner event bus. + FailedEntryCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s PutPartnerEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPartnerEventsOutput) GoString() string { + return s.String() +} + +// SetEntries sets the Entries field's value. +func (s *PutPartnerEventsOutput) SetEntries(v []*PutPartnerEventsResultEntry) *PutPartnerEventsOutput { + s.Entries = v + return s +} + +// SetFailedEntryCount sets the FailedEntryCount field's value. +func (s *PutPartnerEventsOutput) SetFailedEntryCount(v int64) *PutPartnerEventsOutput { + s.FailedEntryCount = &v + return s +} + +// The details about an event generated by an SaaS partner. +type PutPartnerEventsRequestEntry struct { + _ struct{} `type:"structure"` + + // A valid JSON string. There is no other schema imposed. The JSON string can + // contain fields and nested subobjects. + Detail *string `type:"string"` + + // A free-form string used to decide which fields to expect in the event detail. + DetailType *string `type:"string"` + + // AWS resources, identified by Amazon Resource Name (ARN), that the event primarily + // concerns. Any number, including zero, can be present. + Resources []*string `type:"list"` + + // The event source that is generating the evntry. + Source *string `type:"string"` + + // The date and time of the event. + Time *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s PutPartnerEventsRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPartnerEventsRequestEntry) GoString() string { + return s.String() +} + +// SetDetail sets the Detail field's value. +func (s *PutPartnerEventsRequestEntry) SetDetail(v string) *PutPartnerEventsRequestEntry { + s.Detail = &v + return s +} + +// SetDetailType sets the DetailType field's value. +func (s *PutPartnerEventsRequestEntry) SetDetailType(v string) *PutPartnerEventsRequestEntry { + s.DetailType = &v + return s +} + +// SetResources sets the Resources field's value. +func (s *PutPartnerEventsRequestEntry) SetResources(v []*string) *PutPartnerEventsRequestEntry { + s.Resources = v + return s +} + +// SetSource sets the Source field's value. +func (s *PutPartnerEventsRequestEntry) SetSource(v string) *PutPartnerEventsRequestEntry { + s.Source = &v + return s +} + +// SetTime sets the Time field's value. +func (s *PutPartnerEventsRequestEntry) SetTime(v time.Time) *PutPartnerEventsRequestEntry { + s.Time = &v + return s +} + +// Represents an event that a partner tried to generate but failed. +type PutPartnerEventsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code that indicates why the event submission failed. + ErrorCode *string `type:"string"` + + // The error message that explains why the event submission failed. + ErrorMessage *string `type:"string"` + + // The ID of the event. + EventId *string `type:"string"` +} + +// String returns the string representation +func (s PutPartnerEventsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPartnerEventsResultEntry) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *PutPartnerEventsResultEntry) SetErrorCode(v string) *PutPartnerEventsResultEntry { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *PutPartnerEventsResultEntry) SetErrorMessage(v string) *PutPartnerEventsResultEntry { + s.ErrorMessage = &v + return s +} + +// SetEventId sets the EventId field's value. +func (s *PutPartnerEventsResultEntry) SetEventId(v string) *PutPartnerEventsResultEntry { + s.EventId = &v + return s +} + type PutPermissionInput struct { _ struct{} `type:"structure"` - // The action that you are enabling the other account to perform. Currently, + // The action that you're enabling the other account to perform. Currently, // this must be events:PutEvents. // // Action is a required field @@ -3295,31 +6000,35 @@ type PutPermissionInput struct { // This parameter enables you to limit the permission to accounts that fulfill // a certain condition, such as being a member of a certain AWS organization. - // For more information about AWS Organizations, see What Is AWS Organizations + // For more information about AWS Organizations, see What Is AWS Organizations? // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_introduction.html) // in the AWS Organizations User Guide. // - // If you specify Condition with an AWS organization ID, and specify "*" as - // the value for Principal, you grant permission to all the accounts in the - // named organization. + // If you specify Condition with an AWS organization ID and specify "*" as the + // value for Principal, you grant permission to all the accounts in the named + // organization. // - // The Condition is a JSON string which must contain Type, Key, and Value fields. + // The Condition is a JSON string that must contain Type, Key, and Value fields. Condition *Condition `type:"structure"` + // The event bus associated with the rule. If you omit this, the default event + // bus is used. + EventBusName *string `min:"1" type:"string"` + // The 12-digit AWS account ID that you are permitting to put events to your // default event bus. Specify "*" to permit any account to put events to your // default event bus. // // If you specify "*" without specifying Condition, avoid creating rules that - // may match undesirable events. To create more secure rules, make sure that + // might match undesirable events. To create more secure rules, make sure that // the event pattern for each rule contains an account field with a specific - // account ID from which to receive events. Rules with an account field do not - // match any events sent from other accounts. + // account ID to receive events from. Rules with an account field don't match + // any events sent from other accounts. // // Principal is a required field Principal *string `min:"1" type:"string" required:"true"` - // An identifier string for the external account that you are granting permissions + // An identifier string for the external account that you're granting permissions // to. If you later want to revoke the permission for this external account, // specify this StatementId when you run RemovePermission. // @@ -3346,6 +6055,9 @@ func (s *PutPermissionInput) Validate() error { if s.Action != nil && len(*s.Action) < 1 { invalidParams.Add(request.NewErrParamMinLen("Action", 1)) } + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } if s.Principal == nil { invalidParams.Add(request.NewErrParamRequired("Principal")) } @@ -3382,6 +6094,12 @@ func (s *PutPermissionInput) SetCondition(v *Condition) *PutPermissionInput { return s } +// SetEventBusName sets the EventBusName field's value. +func (s *PutPermissionInput) SetEventBusName(v string) *PutPermissionInput { + s.EventBusName = &v + return s +} + // SetPrincipal sets the Principal field's value. func (s *PutPermissionInput) SetPrincipal(v string) *PutPermissionInput { s.Principal = &v @@ -3414,11 +6132,15 @@ type PutRuleInput struct { // A description of the rule. Description *string `type:"string"` - // The event pattern. For more information, see Events and Event Patterns (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html) - // in the Amazon CloudWatch Events User Guide. + // The event bus to associate with this rule. If you omit this, the default + // event bus is used. + EventBusName *string `min:"1" type:"string"` + + // The event pattern. For more information, see Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + // in the Amazon EventBridge User Guide. EventPattern *string `type:"string"` - // The name of the rule that you are creating or updating. + // The name of the rule that you're creating or updating. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -3426,7 +6148,7 @@ type PutRuleInput struct { // The Amazon Resource Name (ARN) of the IAM role associated with the rule. RoleArn *string `min:"1" type:"string"` - // The scheduling expression. For example, "cron(0 20 * * ? *)" or "rate(5 minutes)". + // The scheduling expression: for example, "cron(0 20 * * ? *)" or "rate(5 minutes)". ScheduleExpression *string `type:"string"` // Indicates whether the rule is enabled or disabled. @@ -3449,6 +6171,9 @@ func (s PutRuleInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PutRuleInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutRuleInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -3481,6 +6206,12 @@ func (s *PutRuleInput) SetDescription(v string) *PutRuleInput { return s } +// SetEventBusName sets the EventBusName field's value. +func (s *PutRuleInput) SetEventBusName(v string) *PutRuleInput { + s.EventBusName = &v + return s +} + // SetEventPattern sets the EventPattern field's value. func (s *PutRuleInput) SetEventPattern(v string) *PutRuleInput { s.EventPattern = &v @@ -3543,6 +6274,10 @@ func (s *PutRuleOutput) SetRuleArn(v string) *PutRuleOutput { type PutTargetsInput struct { _ struct{} `type:"structure"` + // The name of the event bus associated with the rule. If you omit this, the + // default event bus is used. + EventBusName *string `min:"1" type:"string"` + // The name of the rule. // // Rule is a required field @@ -3567,6 +6302,9 @@ func (s PutTargetsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PutTargetsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutTargetsInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } if s.Rule == nil { invalidParams.Add(request.NewErrParamRequired("Rule")) } @@ -3596,6 +6334,12 @@ func (s *PutTargetsInput) Validate() error { return nil } +// SetEventBusName sets the EventBusName field's value. +func (s *PutTargetsInput) SetEventBusName(v string) *PutTargetsInput { + s.EventBusName = &v + return s +} + // SetRule sets the Rule field's value. func (s *PutTargetsInput) SetRule(v string) *PutTargetsInput { s.Rule = &v @@ -3687,6 +6431,10 @@ func (s *PutTargetsResultEntry) SetTargetId(v string) *PutTargetsResultEntry { type RemovePermissionInput struct { _ struct{} `type:"structure"` + // The name of the event bus to revoke permissions for. If you omit this, the + // default event bus is used. + EventBusName *string `min:"1" type:"string"` + // The statement ID corresponding to the account that is no longer allowed to // put events to the default event bus. // @@ -3707,6 +6455,9 @@ func (s RemovePermissionInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RemovePermissionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RemovePermissionInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } if s.StatementId == nil { invalidParams.Add(request.NewErrParamRequired("StatementId")) } @@ -3720,6 +6471,12 @@ func (s *RemovePermissionInput) Validate() error { return nil } +// SetEventBusName sets the EventBusName field's value. +func (s *RemovePermissionInput) SetEventBusName(v string) *RemovePermissionInput { + s.EventBusName = &v + return s +} + // SetStatementId sets the StatementId field's value. func (s *RemovePermissionInput) SetStatementId(v string) *RemovePermissionInput { s.StatementId = &v @@ -3743,11 +6500,14 @@ func (s RemovePermissionOutput) GoString() string { type RemoveTargetsInput struct { _ struct{} `type:"structure"` - // If this is a managed rule, created by an AWS service on your behalf, you - // must specify Force as True to remove targets. This parameter is ignored for - // rules that are not managed rules. You can check whether a rule is a managed - // rule by using DescribeRule or ListRules and checking the ManagedBy field - // of the response. + // The name of the event bus associated with the rule. + EventBusName *string `min:"1" type:"string"` + + // If this is a managed rule created by an AWS service on your behalf, you must + // specify Force as True to remove targets. This parameter is ignored for rules + // that aren't managed rules. You can check whether a rule is a managed rule + // by using DescribeRule or ListRules and checking the ManagedBy field of the + // response. Force *bool `type:"boolean"` // The IDs of the targets to remove from the rule. @@ -3774,6 +6534,9 @@ func (s RemoveTargetsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RemoveTargetsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RemoveTargetsInput"} + if s.EventBusName != nil && len(*s.EventBusName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventBusName", 1)) + } if s.Ids == nil { invalidParams.Add(request.NewErrParamRequired("Ids")) } @@ -3793,6 +6556,12 @@ func (s *RemoveTargetsInput) Validate() error { return nil } +// SetEventBusName sets the EventBusName field's value. +func (s *RemoveTargetsInput) SetEventBusName(v string) *RemoveTargetsInput { + s.EventBusName = &v + return s +} + // SetForce sets the Force field's value. func (s *RemoveTargetsInput) SetForce(v bool) *RemoveTargetsInput { s.Force = &v @@ -3887,7 +6656,7 @@ func (s *RemoveTargetsResultEntry) SetTargetId(v string) *RemoveTargetsResultEnt return s } -// Contains information about a rule in Amazon CloudWatch Events. +// Contains information about a rule in Amazon EventBridge. type Rule struct { _ struct{} `type:"structure"` @@ -3897,13 +6666,15 @@ type Rule struct { // The description of the rule. Description *string `type:"string"` - // The event pattern of the rule. For more information, see Events and Event - // Patterns (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html) - // in the Amazon CloudWatch Events User Guide. + // The event bus associated with the rule. + EventBusName *string `min:"1" type:"string"` + + // The event pattern of the rule. For more information, see Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + // in the Amazon EventBridge User Guide. EventPattern *string `type:"string"` - // If the rule was created on behalf of your account by an AWS service, this - // field displays the principal name of the service that created the rule. + // If an AWS service created the rule on behalf of your account, this field + // displays the principal name of the service that created the rule. ManagedBy *string `min:"1" type:"string"` // The name of the rule. @@ -3912,7 +6683,7 @@ type Rule struct { // The Amazon Resource Name (ARN) of the role that is used for target invocation. RoleArn *string `min:"1" type:"string"` - // The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)". + // The scheduling expression: for example, "cron(0 20 * * ? *)" or "rate(5 minutes)". ScheduleExpression *string `type:"string"` // The state of the rule. @@ -3941,6 +6712,12 @@ func (s *Rule) SetDescription(v string) *Rule { return s } +// SetEventBusName sets the EventBusName field's value. +func (s *Rule) SetEventBusName(v string) *Rule { + s.EventBusName = &v + return s +} + // SetEventPattern sets the EventPattern field's value. func (s *Rule) SetEventPattern(v string) *Rule { s.EventPattern = &v @@ -4033,16 +6810,16 @@ func (s *RunCommandParameters) SetRunCommandTargets(v []*RunCommandTarget) *RunC // Information about the EC2 instances that are to be sent the command, specified // as key-value pairs. Each RunCommandTarget block can include only one key, -// but this key may specify multiple values. +// but this key can specify multiple values. type RunCommandTarget struct { _ struct{} `type:"structure"` - // Can be either tag:tag-key or InstanceIds. + // Can be either tag: tag-key or InstanceIds. // // Key is a required field Key *string `min:"1" type:"string" required:"true"` - // If Key is tag:tag-key, Values is a list of tag values. If Key is InstanceIds, + // If Key is tag: tag-key, Values is a list of tag values. If Key is InstanceIds, // Values is a list of Amazon EC2 instance IDs. // // Values is a required field @@ -4118,13 +6895,13 @@ func (s *SqsParameters) SetMessageGroupId(v string) *SqsParameters { return s } -// A key-value pair associated with an AWS resource. In CloudWatch Events, rules -// support tagging. +// A key-value pair associated with an AWS resource. In EventBridge, rules support +// tagging. type Tag struct { _ struct{} `type:"structure"` - // A string you can use to assign a value. The combination of tag keys and values - // can help you organize and categorize your resources. + // A string that you can use to assign a value. The combination of tag keys + // and values can help you organize and categorize your resources. // // Key is a required field Key *string `min:"1" type:"string" required:"true"` @@ -4179,7 +6956,7 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch Events rule that you're adding tags to. + // The ARN of the rule that you're adding tags to. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -4258,12 +7035,12 @@ func (s TagResourceOutput) GoString() string { // Targets are the resources to be invoked when a rule is triggered. For a complete // list of services and resources that can be set as a target, see PutTargets. // -// If you are setting the event bus of another account as the target, and that +// If you're setting the event bus of another account as the target and that // account granted permission to your account through an organization instead -// of directly by the account ID, then you must specify a RoleArn with proper -// permissions in the Target structure. For more information, see Sending and -// Receiving Events Between AWS Accounts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEvents-CrossAccountEventDelivery.html) -// in the Amazon CloudWatch Events User Guide. +// of directly by the account ID, you must specify a RoleArn with proper permissions +// in the Target structure. For more information, see Sending and Receiving +// Events Between AWS Accounts (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-cross-account-event-delivery.html) +// in the Amazon EventBridge User Guide. type Target struct { _ struct{} `type:"structure"` @@ -4277,9 +7054,9 @@ type Target struct { // in the AWS Batch User Guide. BatchParameters *BatchParameters `type:"structure"` - // Contains the Amazon ECS task definition and task count to be used, if the + // Contains the Amazon ECS task definition and task count to be used if the // event target is an Amazon ECS task. For more information about Amazon ECS - // tasks, see Task Definitions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) + // tasks, see Task Definitions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) // in the Amazon EC2 Container Service Developer Guide. EcsParameters *EcsParameters `type:"structure"` @@ -4303,9 +7080,9 @@ type Target struct { // then use that data to send customized input to the target. InputTransformer *InputTransformer `type:"structure"` - // The custom parameter you can use to control the shard assignment, when the - // target is a Kinesis data stream. If you do not include this parameter, the - // default is to use the eventId as the partition key. + // The custom parameter that you can use to control the shard assignment when + // the target is a Kinesis data stream. If you don't include this parameter, + // the default is to use the eventId as the partition key. KinesisParameters *KinesisParameters `type:"structure"` // The Amazon Resource Name (ARN) of the IAM role to be used for this target @@ -4457,8 +7234,8 @@ type TestEventPatternInput struct { // Event is a required field Event *string `type:"string" required:"true"` - // The event pattern. For more information, see Events and Event Patterns (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html) - // in the Amazon CloudWatch Events User Guide. + // The event pattern. For more information, see Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + // in the Amazon EventBridge User Guide. // // EventPattern is a required field EventPattern *string `type:"string" required:"true"` @@ -4528,7 +7305,7 @@ func (s *TestEventPatternOutput) SetResult(v bool) *TestEventPatternOutput { type UntagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch Events rule from which you are removing tags. + // The ARN of the rule that you're removing tags from. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -4602,6 +7379,17 @@ const ( AssignPublicIpDisabled = "DISABLED" ) +const ( + // EventSourceStatePending is a EventSourceState enum value + EventSourceStatePending = "PENDING" + + // EventSourceStateActive is a EventSourceState enum value + EventSourceStateActive = "ACTIVE" + + // EventSourceStateDeleted is a EventSourceState enum value + EventSourceStateDeleted = "DELETED" +) + const ( // LaunchTypeEc2 is a LaunchType enum value LaunchTypeEc2 = "EC2" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/doc.go index da232b4853a..e6efe0fa259 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/doc.go @@ -3,25 +3,25 @@ // Package cloudwatchevents provides the client and types for making API // requests to Amazon CloudWatch Events. // -// Amazon CloudWatch Events helps you to respond to state changes in your AWS -// resources. When your resources change state, they automatically send events -// into an event stream. You can create rules that match selected events in -// the stream and route them to targets to take action. You can also use rules -// to take action on a predetermined schedule. For example, you can configure -// rules to: +// Amazon EventBridge helps you to respond to state changes in your AWS resources. +// When your resources change state, they automatically send events into an +// event stream. You can create rules that match selected events in the stream +// and route them to targets to take action. You can also use rules to take +// action on a predetermined schedule. For example, you can configure rules +// to: // // * Automatically invoke an AWS Lambda function to update DNS entries when -// an event notifies you that Amazon EC2 instance enters the running state. +// an event notifies you that Amazon EC2 instance enters the running state // // * Direct specific API records from AWS CloudTrail to an Amazon Kinesis // data stream for detailed analysis of potential security or availability -// risks. +// risks // // * Periodically invoke a built-in target to create a snapshot of an Amazon -// EBS volume. +// EBS volume // -// For more information about the features of Amazon CloudWatch Events, see -// the Amazon CloudWatch Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events). +// For more information about the features of Amazon EventBridge, see the Amazon +// EventBridge User Guide (https://docs.aws.amazon.com/eventbridge/latest/userguide/). // // See https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/errors.go index bab0f862f5e..24efbdc1643 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/errors.go @@ -7,7 +7,7 @@ const ( // ErrCodeConcurrentModificationException for service response error code // "ConcurrentModificationException". // - // There is concurrent modification on a rule or target. + // There is concurrent modification on a resource. ErrCodeConcurrentModificationException = "ConcurrentModificationException" // ErrCodeInternalException for service response error code @@ -19,24 +19,29 @@ const ( // ErrCodeInvalidEventPatternException for service response error code // "InvalidEventPatternException". // - // The event pattern is not valid. + // The event pattern isn't valid. ErrCodeInvalidEventPatternException = "InvalidEventPatternException" + // ErrCodeInvalidStateException for service response error code + // "InvalidStateException". + // + // The specified state isn't a valid state for an event source. + ErrCodeInvalidStateException = "InvalidStateException" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // - // You tried to create more rules or add more targets to a rule than is allowed. + // You tried to create more resources than is allowed. ErrCodeLimitExceededException = "LimitExceededException" // ErrCodeManagedRuleException for service response error code // "ManagedRuleException". // - // This rule was created by an AWS service on behalf of your account. It is - // managed by that service. If you see this error in response to DeleteRule - // or RemoveTargets, you can use the Force parameter in those calls to delete - // the rule or remove targets from the rule. You cannot modify these managed - // rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, - // or UntagResource. + // An AWS service created this rule on behalf of your account. That service + // manages it. If you see this error in response to DeleteRule or RemoveTargets, + // you can use the Force parameter in those calls to delete the rule or remove + // targets from the rule. You can't modify these managed rules by using DisableRule, + // EnableRule, PutTargets, PutRule, TagResource, or UntagResource. ErrCodeManagedRuleException = "ManagedRuleException" // ErrCodePolicyLengthExceededException for service response error code @@ -45,9 +50,15 @@ const ( // The event bus policy is too long. For more information, see the limits. ErrCodePolicyLengthExceededException = "PolicyLengthExceededException" + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // The resource that you're trying to create already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". // - // An entity that you specified does not exist. + // An entity that you specified doesn't exist. ErrCodeResourceNotFoundException = "ResourceNotFoundException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go index 2a7f6969cd8..d20dca58bfa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudwatchevents.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchEvents { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudWatchEvents { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatchEvents { svc := &CloudWatchEvents{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-10-07", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go index 341be092229..2d5c1f0d7bd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go @@ -262,6 +262,9 @@ func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) ( // same S3 bucket. To separate out log data for each export task, you can specify // a prefix to be used as the Amazon S3 key prefix for all exported objects. // +// Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting +// to S3 buckets encrypted with SSE-KMS is not supported. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1251,7 +1254,7 @@ func (c *CloudWatchLogs) DescribeDestinationsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeDestinations operation. // pageNum := 0 // err := client.DescribeDestinationsPages(params, -// func(page *DescribeDestinationsOutput, lastPage bool) bool { +// func(page *cloudwatchlogs.DescribeDestinationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1284,10 +1287,12 @@ func (c *CloudWatchLogs) DescribeDestinationsPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDestinationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDestinationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1474,7 +1479,7 @@ func (c *CloudWatchLogs) DescribeLogGroupsWithContext(ctx aws.Context, input *De // // Example iterating over at most 3 pages of a DescribeLogGroups operation. // pageNum := 0 // err := client.DescribeLogGroupsPages(params, -// func(page *DescribeLogGroupsOutput, lastPage bool) bool { +// func(page *cloudwatchlogs.DescribeLogGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1507,10 +1512,12 @@ func (c *CloudWatchLogs) DescribeLogGroupsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeLogGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeLogGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1621,7 +1628,7 @@ func (c *CloudWatchLogs) DescribeLogStreamsWithContext(ctx aws.Context, input *D // // Example iterating over at most 3 pages of a DescribeLogStreams operation. // pageNum := 0 // err := client.DescribeLogStreamsPages(params, -// func(page *DescribeLogStreamsOutput, lastPage bool) bool { +// func(page *cloudwatchlogs.DescribeLogStreamsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1654,10 +1661,12 @@ func (c *CloudWatchLogs) DescribeLogStreamsPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeLogStreamsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeLogStreamsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1765,7 +1774,7 @@ func (c *CloudWatchLogs) DescribeMetricFiltersWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeMetricFilters operation. // pageNum := 0 // err := client.DescribeMetricFiltersPages(params, -// func(page *DescribeMetricFiltersOutput, lastPage bool) bool { +// func(page *cloudwatchlogs.DescribeMetricFiltersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1798,10 +1807,12 @@ func (c *CloudWatchLogs) DescribeMetricFiltersPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeMetricFiltersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeMetricFiltersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2079,7 +2090,7 @@ func (c *CloudWatchLogs) DescribeSubscriptionFiltersWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeSubscriptionFilters operation. // pageNum := 0 // err := client.DescribeSubscriptionFiltersPages(params, -// func(page *DescribeSubscriptionFiltersOutput, lastPage bool) bool { +// func(page *cloudwatchlogs.DescribeSubscriptionFiltersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2112,10 +2123,12 @@ func (c *CloudWatchLogs) DescribeSubscriptionFiltersPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeSubscriptionFiltersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeSubscriptionFiltersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2326,7 +2339,7 @@ func (c *CloudWatchLogs) FilterLogEventsWithContext(ctx aws.Context, input *Filt // // Example iterating over at most 3 pages of a FilterLogEvents operation. // pageNum := 0 // err := client.FilterLogEventsPages(params, -// func(page *FilterLogEventsOutput, lastPage bool) bool { +// func(page *cloudwatchlogs.FilterLogEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2359,10 +2372,12 @@ func (c *CloudWatchLogs) FilterLogEventsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*FilterLogEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*FilterLogEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2473,7 +2488,7 @@ func (c *CloudWatchLogs) GetLogEventsWithContext(ctx aws.Context, input *GetLogE // // Example iterating over at most 3 pages of a GetLogEvents operation. // pageNum := 0 // err := client.GetLogEventsPages(params, -// func(page *GetLogEventsOutput, lastPage bool) bool { +// func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2506,10 +2521,12 @@ func (c *CloudWatchLogs) GetLogEventsPagesWithContext(ctx aws.Context, input *Ge }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetLogEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetLogEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2746,12 +2763,18 @@ func (c *CloudWatchLogs) GetQueryResultsRequest(input *GetQueryResultsInput) (re // GetQueryResults API operation for Amazon CloudWatch Logs. // -// Returns the results from the specified query. If the query is in progress, -// partial results of that current execution are returned. Only the fields requested -// in the query are returned. +// Returns the results from the specified query. +// +// Only the fields requested in the query are returned, along with a @ptr field +// which is the identifier for the log record. You can use the value of @ptr +// in a operation to get the full log record. // // GetQueryResults does not start a query execution. To run a query, use . // +// If the value of the Status field in the output is Running, this operation +// returns only partial results. If you see a value of Scheduled or Running +// for the status, you can retry the operation later to see the final results. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2920,14 +2943,14 @@ func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req // Creates or updates a destination. A destination encapsulates a physical resource // (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time // stream of log events for a different account, ingested using PutLogEvents. -// Currently, the only supported physical resource is a Kinesis stream belonging -// to the same account as the destination. +// A destination can be an Amazon Kinesis stream, Amazon Kinesis Data Firehose +// strea, or an AWS Lambda function. // -// Through an access policy, a destination controls what is written to its Kinesis -// stream. By default, PutDestination does not set any access policy with the -// destination, which means a cross-account user cannot call PutSubscriptionFilter -// against this destination. To enable this, the destination owner must call -// PutDestinationPolicy after PutDestination. +// Through an access policy, a destination controls what is written to it. By +// default, PutDestination does not set any access policy with the destination, +// which means a cross-account user cannot call PutSubscriptionFilter against +// this destination. To enable this, the destination owner must call PutDestinationPolicy +// after PutDestination. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3118,8 +3141,8 @@ func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *req // * None of the log events in the batch can be more than 2 hours in the // future. // -// * None of the log events in the batch can be older than 14 days or the -// retention period of the log group. +// * None of the log events in the batch can be older than 14 days or older +// than the retention period of the log group. // // * The log events in the batch must be in chronological ordered by their // timestamp. The timestamp is the time the event occurred, expressed as @@ -3619,6 +3642,10 @@ func (c *CloudWatchLogs) StartQueryRequest(input *StartQueryInput) (req *request // // For more information, see CloudWatch Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). // +// Queries time out after 15 minutes of execution. If your queries are timing +// out, reduce the time range being searched, or partition your query into a +// number of queries. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6036,7 +6063,13 @@ type FilterLogEventsInput struct { // in a single response. If the value is false, all the matched log events in // the first log stream are searched first, then those in the next log stream, // and so on. The default is false. - Interleaved *bool `locationName:"interleaved" type:"boolean"` + // + // IMPORTANT: Starting on June 17, 2019, this parameter will be ignored and + // the value will be assumed to be true. The response from this operation will + // always interleave events from multiple log streams within a log group. + // + // Deprecated: Starting on June 17, 2019, this parameter will be ignored and the value will be assumed to be true. The response from this operation will always interleave events from multiple log streams within a log group. + Interleaved *bool `locationName:"interleaved" deprecated:"true" type:"boolean"` // The maximum number of events to return. The default is 10,000 events. Limit *int64 `locationName:"limit" min:"1" type:"integer"` @@ -6297,6 +6330,8 @@ type GetLogEventsInput struct { // If the value is true, the earliest log events are returned first. If the // value is false, the latest log events are returned first. The default value // is false. + // + // If you are using nextToken in this operation, you must specify true for startFromHead. StartFromHead *bool `locationName:"startFromHead" type:"boolean"` // The start of the time range, expressed as the number of milliseconds after @@ -6630,7 +6665,11 @@ type GetQueryResultsOutput struct { Statistics *QueryStatistics `locationName:"statistics" type:"structure"` // The status of the most recent running of the query. Possible values are Cancelled, - // Complete, Failed, Running, Scheduled, and Unknown. + // Complete, Failed, Running, Scheduled, Timeout, and Unknown. + // + // Queries time out after 15 minutes of execution. To avoid having your queries + // time out, reduce the time range being searched, or partition your query into + // a number of queries. Status *string `locationName:"status" type:"string" enum:"QueryStatus"` } @@ -6930,7 +6969,13 @@ type LogStream struct { LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` // The number of bytes stored. - StoredBytes *int64 `locationName:"storedBytes" type:"long"` + // + // IMPORTANT: Starting on June 17, 2019, this parameter will be deprecated for + // log streams, and will be reported as zero. This change applies only to log + // streams. The storedBytes parameter for log groups is not affected. + // + // Deprecated: Starting on June 17, 2019, this parameter will be deprecated for log streams, and will be reported as zero. This change applies only to log streams. The storedBytes parameter for log groups is not affected. + StoredBytes *int64 `locationName:"storedBytes" deprecated:"true" type:"long"` // The sequence token. UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` @@ -7102,7 +7147,7 @@ func (s *MetricFilterMatchRecord) SetExtractedValues(v map[string]*string) *Metr return s } -// Indicates how to transform ingested log eventsto metric data in a CloudWatch +// Indicates how to transform ingested log events to metric data in a CloudWatch // metric. type MetricTransformation struct { _ struct{} `type:"structure"` @@ -7648,6 +7693,7 @@ type PutResourcePolicyInput struct { // Details of the new policy, including the identity of the principal that is // enabled to put logs to this account. This is formatted as a JSON string. + // This parameter is required. // // The following example creates a resource policy enabling the Route 53 service // to put DNS query logs in to the specified log group. Replace "logArn" with @@ -8278,8 +8324,15 @@ type StartQueryInput struct { // The log group on which to perform the query. // - // LogGroupName is a required field - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + // A StartQuery operation must include a logGroupNames or a logGroupName parameter, + // but not both. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The list of log groups to be queried. You can include up to 20 log groups. + // + // A StartQuery operation must include a logGroupNames or a logGroupName parameter, + // but not both. + LogGroupNames []*string `locationName:"logGroupNames" type:"list"` // The query string to use. For more information, see CloudWatch Logs Insights // Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). @@ -8314,9 +8367,6 @@ func (s *StartQueryInput) Validate() error { if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) } @@ -8351,6 +8401,12 @@ func (s *StartQueryInput) SetLogGroupName(v string) *StartQueryInput { return s } +// SetLogGroupNames sets the LogGroupNames field's value. +func (s *StartQueryInput) SetLogGroupNames(v []*string) *StartQueryInput { + s.LogGroupNames = v + return s +} + // SetQueryString sets the QueryString field's value. func (s *StartQueryInput) SetQueryString(v string) *StartQueryInput { s.QueryString = &v diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go index 8d5f929df84..59b3512359d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudWatchLogs { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatchLogs { svc := &CloudWatchLogs{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-03-28", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go index 98ef82d448d..1b2174a993f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go @@ -747,6 +747,10 @@ func (c *CodeBuild) ImportSourceCredentialsRequest(input *ImportSourceCredential // * ErrCodeAccountLimitExceededException "AccountLimitExceededException" // An AWS service limit was exceeded for the calling AWS account. // +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ImportSourceCredentials func (c *CodeBuild) ImportSourceCredentials(input *ImportSourceCredentialsInput) (*ImportSourceCredentialsOutput, error) { req, out := c.ImportSourceCredentialsRequest(input) @@ -1810,6 +1814,11 @@ type Build struct { // Whether the build is complete. True if complete; otherwise, false. BuildComplete *bool `locationName:"buildComplete" type:"boolean"` + // The number of the build. For each project, the buildNumber of its first build + // is 1. The buildNumber of each subsequent build is incremented by 1. If a + // build is deleted, the buildNumber of other builds does not change. + BuildNumber *int64 `locationName:"buildNumber" type:"long"` + // The current status of the build. Valid values include: // // * FAILED: The build failed. @@ -1838,7 +1847,7 @@ type Build struct { // if your service role has permission to that key. // // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/alias-name). + // the CMK's alias (using the format alias/alias-name ). EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` // When the build process ended, expressed in Unix time format. @@ -1847,6 +1856,9 @@ type Build struct { // Information about the build environment for this build. Environment *ProjectEnvironment `locationName:"environment" type:"structure"` + // A list of exported environment variables for this build. + ExportedEnvironmentVariables []*ExportedEnvironmentVariable `locationName:"exportedEnvironmentVariables" type:"list"` + // The unique ID for the build. Id *string `locationName:"id" min:"1" type:"string"` @@ -1880,13 +1892,12 @@ type Build struct { // An identifier for the version of this build's source code. // - // * For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit + // * For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit // ID. // - // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. + // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. // - // - // * For Amazon Simple Storage Service (Amazon S3), this does not apply. + // * For Amazon Simple Storage Service (Amazon S3), this does not apply. ResolvedSourceVersion *string `locationName:"resolvedSourceVersion" min:"1" type:"string"` // An array of ProjectArtifacts objects. @@ -1922,7 +1933,12 @@ type Build struct { // Information about the source code to be built. Source *ProjectSource `locationName:"source" type:"structure"` - // Any version identifier for the version of the source code to be built. + // Any version identifier for the version of the source code to be built. If + // sourceVersion is specified at the project level, then this sourceVersion + // (at the build level) takes precedence. + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" min:"1" type:"string"` // When the build process started, expressed in Unix time format. @@ -1967,6 +1983,12 @@ func (s *Build) SetBuildComplete(v bool) *Build { return s } +// SetBuildNumber sets the BuildNumber field's value. +func (s *Build) SetBuildNumber(v int64) *Build { + s.BuildNumber = &v + return s +} + // SetBuildStatus sets the BuildStatus field's value. func (s *Build) SetBuildStatus(v string) *Build { s.BuildStatus = &v @@ -2003,6 +2025,12 @@ func (s *Build) SetEnvironment(v *ProjectEnvironment) *Build { return s } +// SetExportedEnvironmentVariables sets the ExportedEnvironmentVariables field's value. +func (s *Build) SetExportedEnvironmentVariables(v []*ExportedEnvironmentVariable) *Build { + s.ExportedEnvironmentVariables = v + return s +} + // SetId sets the Id field's value. func (s *Build) SetId(v string) *Build { s.Id = &v @@ -2422,7 +2450,7 @@ type CreateProjectInput struct { // if your service role has permission to that key. // // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/alias-name). + // the CMK's alias (using the format alias/alias-name ). EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` // Information about the build environment for the build project. @@ -2445,6 +2473,11 @@ type CreateProjectInput struct { // An array of ProjectArtifacts objects. SecondaryArtifacts []*ProjectArtifacts `locationName:"secondaryArtifacts" type:"list"` + // An array of ProjectSourceVersion objects. If secondarySourceVersions is specified + // at the build level, then they take precedence over these secondarySourceVersions + // (at the project level). + SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` + // An array of ProjectSource objects. SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` @@ -2460,6 +2493,33 @@ type CreateProjectInput struct { // Source is a required field Source *ProjectSource `locationName:"source" type:"structure" required:"true"` + // A version of the build input to be built for this project. If not specified, + // the latest version is used. If specified, it must be one of: + // + // * For AWS CodeCommit: the commit ID to use. + // + // * For GitHub: the commit ID, pull request ID, branch name, or tag name + // that corresponds to the version of the source code you want to build. + // If a pull request ID is specified, it must use the format pr/pull-request-ID + // (for example pr/25). If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // * For Bitbucket: the commit ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a branch name + // is specified, the branch's HEAD commit ID is used. If not specified, the + // default branch's HEAD commit ID is used. + // + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. + // + // If sourceVersion is specified at the build level, then that version takes + // precedence over this sourceVersion (at the project level). + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. + SourceVersion *string `locationName:"sourceVersion" type:"string"` + // A set of tags for this build project. // // These tags are available for use by AWS services that support AWS CodeBuild @@ -2548,6 +2608,16 @@ func (s *CreateProjectInput) Validate() error { } } } + if s.SecondarySourceVersions != nil { + for i, v := range s.SecondarySourceVersions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondarySourceVersions", i), err.(request.ErrInvalidParams)) + } + } + } if s.SecondarySources != nil { for i, v := range s.SecondarySources { if v == nil { @@ -2645,6 +2715,12 @@ func (s *CreateProjectInput) SetSecondaryArtifacts(v []*ProjectArtifacts) *Creat return s } +// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. +func (s *CreateProjectInput) SetSecondarySourceVersions(v []*ProjectSourceVersion) *CreateProjectInput { + s.SecondarySourceVersions = v + return s +} + // SetSecondarySources sets the SecondarySources field's value. func (s *CreateProjectInput) SetSecondarySources(v []*ProjectSource) *CreateProjectInput { s.SecondarySources = v @@ -2663,6 +2739,12 @@ func (s *CreateProjectInput) SetSource(v *ProjectSource) *CreateProjectInput { return s } +// SetSourceVersion sets the SourceVersion field's value. +func (s *CreateProjectInput) SetSourceVersion(v string) *CreateProjectInput { + s.SourceVersion = &v + return s +} + // SetTags sets the Tags field's value. func (s *CreateProjectInput) SetTags(v []*Tag) *CreateProjectInput { s.Tags = v @@ -3096,6 +3178,8 @@ type EnvironmentVariable struct { // Manager Parameter Store. // // * PLAINTEXT: An environment variable in plaintext format. + // + // * SECRETS_MANAGER: An environment variable stored in AWS Secrets Manager. Type *string `locationName:"type" type:"string" enum:"EnvironmentVariableType"` // The value of the environment variable. @@ -3156,6 +3240,44 @@ func (s *EnvironmentVariable) SetValue(v string) *EnvironmentVariable { return s } +// Information about an exported environment variable. +type ExportedEnvironmentVariable struct { + _ struct{} `type:"structure"` + + // The name of this exported environment variable. + Name *string `locationName:"name" min:"1" type:"string"` + + // The value assigned to this exported environment variable. + // + // During a build, the value of a variable is available starting with the install + // phase. It can be updated between the start of the install phase and the end + // of the post_build phase. After the post_build phase ends, the value of exported + // variables cannot change. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ExportedEnvironmentVariable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportedEnvironmentVariable) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *ExportedEnvironmentVariable) SetName(v string) *ExportedEnvironmentVariable { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ExportedEnvironmentVariable) SetValue(v string) *ExportedEnvironmentVariable { + s.Value = &v + return s +} + // Information about the Git submodules configuration for an AWS CodeBuild build // project. type GitSubmodulesConfig struct { @@ -3211,6 +3333,11 @@ type ImportSourceCredentialsInput struct { // ServerType is a required field ServerType *string `locationName:"serverType" type:"string" required:"true" enum:"ServerType"` + // Set to false to prevent overwriting the repository source credentials. Set + // to true to overwrite the repository source credentials. The default value + // is true. + ShouldOverwrite *bool `locationName:"shouldOverwrite" type:"boolean"` + // For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, // this is the app password. // @@ -3269,6 +3396,12 @@ func (s *ImportSourceCredentialsInput) SetServerType(v string) *ImportSourceCred return s } +// SetShouldOverwrite sets the ShouldOverwrite field's value. +func (s *ImportSourceCredentialsInput) SetShouldOverwrite(v bool) *ImportSourceCredentialsInput { + s.ShouldOverwrite = &v + return s +} + // SetToken sets the Token field's value. func (s *ImportSourceCredentialsInput) SetToken(v string) *ImportSourceCredentialsInput { s.Token = &v @@ -3949,7 +4082,7 @@ type Project struct { // if your service role has permission to that key. // // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/alias-name). + // the CMK's alias (using the format alias/alias-name ). EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` // Information about the build environment for this build project. @@ -3972,6 +4105,11 @@ type Project struct { // An array of ProjectArtifacts objects. SecondaryArtifacts []*ProjectArtifacts `locationName:"secondaryArtifacts" type:"list"` + // An array of ProjectSourceVersion objects. If secondarySourceVersions is specified + // at the build level, then they take over these secondarySourceVersions (at + // the project level). + SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` + // An array of ProjectSource objects. SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` @@ -3983,6 +4121,33 @@ type Project struct { // Information about the build input source code for this build project. Source *ProjectSource `locationName:"source" type:"structure"` + // A version of the build input to be built for this project. If not specified, + // the latest version is used. If specified, it must be one of: + // + // * For AWS CodeCommit: the commit ID to use. + // + // * For GitHub: the commit ID, pull request ID, branch name, or tag name + // that corresponds to the version of the source code you want to build. + // If a pull request ID is specified, it must use the format pr/pull-request-ID + // (for example pr/25). If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // * For Bitbucket: the commit ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a branch name + // is specified, the branch's HEAD commit ID is used. If not specified, the + // default branch's HEAD commit ID is used. + // + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. + // + // If sourceVersion is specified at the build level, then that version takes + // precedence over this sourceVersion (at the project level). + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. + SourceVersion *string `locationName:"sourceVersion" type:"string"` + // The tags for this build project. // // These tags are available for use by AWS services that support AWS CodeBuild @@ -4090,6 +4255,12 @@ func (s *Project) SetSecondaryArtifacts(v []*ProjectArtifacts) *Project { return s } +// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. +func (s *Project) SetSecondarySourceVersions(v []*ProjectSourceVersion) *Project { + s.SecondarySourceVersions = v + return s +} + // SetSecondarySources sets the SecondarySources field's value. func (s *Project) SetSecondarySources(v []*ProjectSource) *Project { s.SecondarySources = v @@ -4108,6 +4279,12 @@ func (s *Project) SetSource(v *ProjectSource) *Project { return s } +// SetSourceVersion sets the SourceVersion field's value. +func (s *Project) SetSourceVersion(v string) *Project { + s.SourceVersion = &v + return s +} + // SetTags sets the Tags field's value. func (s *Project) SetTags(v []*Tag) *Project { s.Tags = v @@ -4173,15 +4350,14 @@ type ProjectArtifacts struct { // // For example: // - // * If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and + // * If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and // name is set to MyArtifact.zip, then the output artifact is stored in MyArtifacts/build-ID/MyArtifact.zip. // + // * If path is empty, namespaceType is set to NONE, and name is set to "/", + // the output artifact is stored in the root of the output bucket. // - // * If path is empty, namespaceType is set to NONE, and name is set to - // "/", the output artifact is stored in the root of the output bucket. - // - // * If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and - // name is set to "/", the output artifact is stored in MyArtifacts/build-ID. + // * If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and + // name is set to "/", the output artifact is stored in MyArtifacts/build-ID . Name *string `locationName:"name" type:"string"` // Along with path and name, the pattern that AWS CodeBuild uses to determine @@ -4194,12 +4370,9 @@ type ProjectArtifacts struct { // * If type is set to NO_ARTIFACTS, this value is ignored if specified, // because no build output is produced. // - // * If type is set to S3, valid values include: - // - // BUILD_ID: Include the build ID in the location of the build output artifact. - // - // NONE: Do not include the build ID. This is the default if namespaceType is - // not specified. + // * If type is set to S3, valid values include: BUILD_ID: Include the build + // ID in the location of the build output artifact. NONE: Do not include + // the build ID. This is the default if namespaceType is not specified. // // For example, if path is set to MyArtifacts, namespaceType is set to BUILD_ID, // and name is set to MyArtifact.zip, the output artifact is stored in MyArtifacts/build-ID/MyArtifact.zip. @@ -4220,13 +4393,10 @@ type ProjectArtifacts struct { // * If type is set to NO_ARTIFACTS, this value is ignored if specified, // because no build output is produced. // - // * If type is set to S3, valid values include: - // - // NONE: AWS CodeBuild creates in the output bucket a folder that contains the - // build output. This is the default if packaging is not specified. - // - // ZIP: AWS CodeBuild creates in the output bucket a ZIP file that contains - // the build output. + // * If type is set to S3, valid values include: NONE: AWS CodeBuild creates + // in the output bucket a folder that contains the build output. This is + // the default if packaging is not specified. ZIP: AWS CodeBuild creates + // in the output bucket a ZIP file that contains the build output. Packaging *string `locationName:"packaging" type:"string" enum:"ArtifactPackaging"` // Along with namespaceType and name, the pattern that AWS CodeBuild uses to @@ -4250,7 +4420,7 @@ type ProjectArtifacts struct { // The type of build output artifact. Valid values include: // // * CODEPIPELINE: The build project has build output generated through AWS - // CodePipeline. + // CodePipeline. The CODEPIPELINE type is not supported for secondaryArtifacts. // // * NO_ARTIFACTS: The build project does not produce any build output. // @@ -4400,29 +4570,19 @@ type ProjectCache struct { // * LOCAL_DOCKER_LAYER_CACHE mode caches existing Docker layers. This mode // is a good choice for projects that build or pull large Docker images. // It can prevent the performance issues caused by pulling large Docker images - // down from the network. - // - // You can use a Docker layer cache in the Linux enviornment only. - // - // The privileged flag must be set so that your project has the required Docker - // permissions. - // - // You should consider the security implications before you use a Docker layer - // cache. + // down from the network. You can use a Docker layer cache in the Linux environment + // only. The privileged flag must be set so that your project has the required + // Docker permissions. You should consider the security implications before + // you use a Docker layer cache. // // * LOCAL_CUSTOM_CACHE mode caches directories you specify in the buildspec // file. This mode is a good choice if your build scenario is not suited // to one of the other three local cache modes. If you use a custom cache: - // - // - // Only directories can be specified for caching. You cannot specify individual - // files. - // - // Symlinks are used to reference cached directories. - // - // Cached directories are linked to your build before it downloads its project - // sources. Cached items are overriden if a source item has the same name. - // Directories are specified using cache paths in the buildspec file. + // Only directories can be specified for caching. You cannot specify individual + // files. Symlinks are used to reference cached directories. Cached directories + // are linked to your build before it downloads its project sources. Cached + // items are overriden if a source item has the same name. Directories are + // specified using cache paths in the buildspec file. Modes []*string `locationName:"modes" type:"list"` // The type of cache used by the build project. Valid values include: @@ -4495,6 +4655,9 @@ type ProjectEnvironment struct { // // * BUILD_GENERAL1_LARGE: Use up to 15 GB memory and 8 vCPUs for builds. // + // For more information, see Build Environment Compute Types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) + // in the AWS CodeBuild User Guide. + // // ComputeType is a required field ComputeType *string `locationName:"computeType" type:"string" required:"true" enum:"ComputeType"` @@ -4531,27 +4694,28 @@ type ProjectEnvironment struct { ImagePullCredentialsType *string `locationName:"imagePullCredentialsType" type:"string" enum:"ImagePullCredentialsType"` // Enables running the Docker daemon inside a Docker container. Set to true - // only if the build project is be used to build Docker images, and the specified - // build environment image is not provided by AWS CodeBuild with Docker support. - // Otherwise, all associated builds that attempt to interact with the Docker - // daemon fail. You must also start the Docker daemon so that builds can interact - // with it. One way to do this is to initialize the Docker daemon during the - // install phase of your build spec by running the following build commands. - // (Do not run these commands if the specified build environment image is provided - // by AWS CodeBuild with Docker support.) + // only if the build project is used to build Docker images. Otherwise, a build + // that attempts to interact with the Docker daemon fails. The default setting + // is false. + // + // You can initialize the Docker daemon during the install phase of your build + // by adding one of the following sets of commands to the install phase of your + // buildspec file: // // If the operating system's base image is Ubuntu Linux: // // - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 - // --storage-driver=overlay& - timeout 15 sh -c "until docker info; do echo - // .; sleep 1; done" + // --storage-driver=overlay& + // + // - timeout 15 sh -c "until docker info; do echo .; sleep 1; done" // - // If the operating system's base image is Alpine Linux, add the -t argument - // to timeout: + // If the operating system's base image is Alpine Linux and the previous command + // does not work, add the -t argument to timeout: // // - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 - // --storage-driver=overlay& - timeout 15 -t sh -c "until docker info; do echo - // .; sleep 1; done" + // --storage-driver=overlay& + // + // - timeout -t 15 sh -c "until docker info; do echo .; sleep 1; done" PrivilegedMode *bool `locationName:"privilegedMode" type:"boolean"` // The credentials for access to a private registry. @@ -4695,16 +4859,13 @@ type ProjectSource struct { // // * For source code in an AWS CodeCommit repository, the HTTPS clone URL // to the repository that contains the source code and the build spec (for - // example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name). + // example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name + // ). // // * For source code in an Amazon Simple Storage Service (Amazon S3) input - // bucket, one of the following. - // - // The path to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip). - // - // - // The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/). - // + // bucket, one of the following. The path to the ZIP file that contains the + // source code (for example, bucket-name/path/to/object-name.zip). The path + // to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/). // // * For source code in a GitHub repository, the HTTPS clone URL to the repository // that contains the source and the build spec. You must connect your AWS @@ -4734,6 +4895,9 @@ type ProjectSource struct { // provider. This option is valid only when your source provider is GitHub, // GitHub Enterprise, or Bitbucket. If this is set and you use a different source // provider, an invalidInputException is thrown. + // + // The status of a build triggered by a webhook is always reported to your source + // provider. ReportBuildStatus *bool `locationName:"reportBuildStatus" type:"boolean"` // An identifier for this project source. @@ -4751,6 +4915,8 @@ type ProjectSource struct { // // * GITHUB: The source code is in a GitHub repository. // + // * GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise repository. + // // * NO_SOURCE: The project does not have input source code. // // * S3: The source code is in an Amazon Simple Storage Service (Amazon S3) @@ -4876,6 +5042,9 @@ type ProjectSourceVersion struct { // * For Amazon Simple Storage Service (Amazon S3): the version ID of the // object that represents the build input ZIP file to use. // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. + // // SourceVersion is a required field SourceVersion *string `locationName:"sourceVersion" type:"string" required:"true"` } @@ -4923,7 +5092,7 @@ func (s *ProjectSourceVersion) SetSourceVersion(v string) *ProjectSourceVersion // // * imagePullCredentialsType must be set to SERVICE_ROLE. // -// * images cannot be curated or an Amazon ECR image. +// * images cannot be curated or an Amazon ECR image. // // For more information, see Private Registry with AWS Secrets Manager Sample // for AWS CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-private-registry.html). @@ -5062,6 +5231,7 @@ type SourceAuth struct { // The resource value that applies to the specified authorization type. Resource *string `locationName:"resource" type:"string"` + // // This data type is deprecated and is no longer accurate or used. // // The authorization type to use. The only valid value is OAUTH, which represents @@ -5243,6 +5413,9 @@ type StartBuildInput struct { // Set to true to report to your source provider the status of a build's start // and completion. If you use this option with a source provider other than // GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown. + // + // The status of a build triggered by a webhook is always reported to your source + // provider. ReportBuildStatusOverride *bool `locationName:"reportBuildStatusOverride" type:"boolean"` // An array of ProjectArtifacts objects. @@ -5291,6 +5464,12 @@ type StartBuildInput struct { // // * For Amazon Simple Storage Service (Amazon S3): the version ID of the // object that represents the build input ZIP file to use. + // + // If sourceVersion is specified at the project level, then this sourceVersion + // (at the build level) takes precedence. + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" type:"string"` // The number of build timeout minutes, from 5 to 480 (8 hours), that overrides, @@ -5737,7 +5916,7 @@ type UpdateProjectInput struct { // if your service role has permission to that key. // // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/alias-name). + // the CMK's alias (using the format alias/alias-name ). EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` // Information to be changed about the build environment for the build project. @@ -5760,6 +5939,11 @@ type UpdateProjectInput struct { // An array of ProjectSource objects. SecondaryArtifacts []*ProjectArtifacts `locationName:"secondaryArtifacts" type:"list"` + // An array of ProjectSourceVersion objects. If secondarySourceVersions is specified + // at the build level, then they take over these secondarySourceVersions (at + // the project level). + SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` + // An array of ProjectSource objects. SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` @@ -5772,6 +5956,33 @@ type UpdateProjectInput struct { // project. Source *ProjectSource `locationName:"source" type:"structure"` + // A version of the build input to be built for this project. If not specified, + // the latest version is used. If specified, it must be one of: + // + // * For AWS CodeCommit: the commit ID to use. + // + // * For GitHub: the commit ID, pull request ID, branch name, or tag name + // that corresponds to the version of the source code you want to build. + // If a pull request ID is specified, it must use the format pr/pull-request-ID + // (for example pr/25). If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // * For Bitbucket: the commit ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a branch name + // is specified, the branch's HEAD commit ID is used. If not specified, the + // default branch's HEAD commit ID is used. + // + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. + // + // If sourceVersion is specified at the build level, then that version takes + // precedence over this sourceVersion (at the project level). + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. + SourceVersion *string `locationName:"sourceVersion" type:"string"` + // The replacement set of tags for this build project. // // These tags are available for use by AWS services that support AWS CodeBuild @@ -5847,6 +6058,16 @@ func (s *UpdateProjectInput) Validate() error { } } } + if s.SecondarySourceVersions != nil { + for i, v := range s.SecondarySourceVersions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondarySourceVersions", i), err.(request.ErrInvalidParams)) + } + } + } if s.SecondarySources != nil { for i, v := range s.SecondarySources { if v == nil { @@ -5944,6 +6165,12 @@ func (s *UpdateProjectInput) SetSecondaryArtifacts(v []*ProjectArtifacts) *Updat return s } +// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. +func (s *UpdateProjectInput) SetSecondarySourceVersions(v []*ProjectSourceVersion) *UpdateProjectInput { + s.SecondarySourceVersions = v + return s +} + // SetSecondarySources sets the SecondarySources field's value. func (s *UpdateProjectInput) SetSecondarySources(v []*ProjectSource) *UpdateProjectInput { s.SecondarySources = v @@ -5962,6 +6189,12 @@ func (s *UpdateProjectInput) SetSource(v *ProjectSource) *UpdateProjectInput { return s } +// SetSourceVersion sets the SourceVersion field's value. +func (s *UpdateProjectInput) SetSourceVersion(v string) *UpdateProjectInput { + s.SourceVersion = &v + return s +} + // SetTags sets the Tags field's value. func (s *UpdateProjectInput) SetTags(v []*Tag) *UpdateProjectInput { s.Tags = v @@ -6015,7 +6248,8 @@ type UpdateWebhookInput struct { BranchFilter *string `locationName:"branchFilter" type:"string"` // An array of arrays of WebhookFilter objects used to determine if a webhook - // event can trigger a build. A filter group must pcontain at least one EVENTWebhookFilter. + // event can trigger a build. A filter group must pcontain at least one EVENT + // WebhookFilter. FilterGroups [][]*WebhookFilter `locationName:"filterGroups" type:"list"` // The name of the AWS CodeBuild project. @@ -6267,32 +6501,42 @@ type WebhookFilter struct { // The type of webhook filter. There are five webhook filter types: EVENT, ACTOR_ACCOUNT_ID, // HEAD_REF, BASE_REF, and FILE_PATH. // - // EVENT A webhook event triggers a build when the provided pattern matches - // one of four event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, - // and PULL_REQUEST_REOPENED. The EVENT patterns are specified as a comma-separated - // string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters - // all push, pull request created, and pull request updated events. + // EVENT + // + // A webhook event triggers a build when the provided pattern matches one of + // four event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, and PULL_REQUEST_REOPENED. + // The EVENT patterns are specified as a comma-separated string. For example, + // PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request + // created, and pull request updated events. // - // The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only. + // The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only. // - // ACTOR_ACCOUNT_ID A webhook event triggers a build when a GitHub, GitHub - // Enterprise, or Bitbucket account ID matches the regular expression pattern. + // ACTOR_ACCOUNT_ID // - // HEAD_REF A webhook event triggers a build when the head reference matches - // the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name. + // A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket + // account ID matches the regular expression pattern. // - // Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise + // HEAD_REF + // + // A webhook event triggers a build when the head reference matches the regular + // expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name. + // + // Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise // pull request, Bitbucket push, and Bitbucket pull request events. // - // BASE_REF A webhook event triggers a build when the base reference matches - // the regular expression pattern. For example, refs/heads/branch-name. + // BASE_REF + // + // A webhook event triggers a build when the base reference matches the regular + // expression pattern. For example, refs/heads/branch-name. // - // Works with pull request events only. + // Works with pull request events only. // - // FILE_PATH A webhook triggers a build when the path of a changed file matches - // the regular expression pattern. + // FILE_PATH // - // Works with GitHub and GitHub Enterprise push events only. + // A webhook triggers a build when the path of a changed file matches the regular + // expression pattern. + // + // Works with GitHub and GitHub Enterprise push events only. // // Type is a required field Type *string `locationName:"type" type:"string" required:"true" enum:"WebhookFilterType"` @@ -6451,6 +6695,9 @@ const ( // EnvironmentVariableTypeParameterStore is a EnvironmentVariableType enum value EnvironmentVariableTypeParameterStore = "PARAMETER_STORE" + + // EnvironmentVariableTypeSecretsManager is a EnvironmentVariableType enum value + EnvironmentVariableTypeSecretsManager = "SECRETS_MANAGER" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/doc.go index ac6bf62871f..880ee966667 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/doc.go @@ -11,7 +11,8 @@ // Maven, Gradle, and more. You can also fully customize build environments // in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically // to meet peak build requests. You pay only for the build time you consume. -// For more information about AWS CodeBuild, see the AWS CodeBuild User Guide. +// For more information about AWS CodeBuild, see the AWS CodeBuild User Guide +// (https://docs.aws.amazon.com/codebuild/latest/userguide/welcome.html). // // AWS CodeBuild supports these operations: // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go index b9ff2b6f76e..d04b3dab2b4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go @@ -46,11 +46,11 @@ const ( // svc := codebuild.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeBuild { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodeBuild { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodeBuild { svc := &CodeBuild{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-10-06", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go index bd901e84aef..d8a0fd563f0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go @@ -13,6 +13,266 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) +const opBatchDescribeMergeConflicts = "BatchDescribeMergeConflicts" + +// BatchDescribeMergeConflictsRequest generates a "aws/request.Request" representing the +// client's request for the BatchDescribeMergeConflicts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchDescribeMergeConflicts for more information on using the BatchDescribeMergeConflicts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchDescribeMergeConflictsRequest method. +// req, resp := client.BatchDescribeMergeConflictsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchDescribeMergeConflicts +func (c *CodeCommit) BatchDescribeMergeConflictsRequest(input *BatchDescribeMergeConflictsInput) (req *request.Request, output *BatchDescribeMergeConflictsOutput) { + op := &request.Operation{ + Name: opBatchDescribeMergeConflicts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDescribeMergeConflictsInput{} + } + + output = &BatchDescribeMergeConflictsOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchDescribeMergeConflicts API operation for AWS CodeCommit. +// +// Returns information about one or more merge conflicts in the attempted merge +// of two commit specifiers using the squash or three-way merge strategy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation BatchDescribeMergeConflicts for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeMergeOptionRequiredException "MergeOptionRequiredException" +// A merge option or stategy is required, and none was provided. +// +// * ErrCodeInvalidMergeOptionException "InvalidMergeOptionException" +// The specified merge option is not valid for this operation. Not all merge +// strategies are supported for all operations. +// +// * ErrCodeInvalidContinuationTokenException "InvalidContinuationTokenException" +// The specified continuation token is not valid. +// +// * ErrCodeCommitRequiredException "CommitRequiredException" +// A commit was not specified. +// +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. +// +// * ErrCodeInvalidCommitException "InvalidCommitException" +// The specified commit is not valid. +// +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. +// +// * ErrCodeInvalidMaxConflictFilesException "InvalidMaxConflictFilesException" +// The specified value for the number of conflict files to return is not valid. +// +// * ErrCodeInvalidMaxMergeHunksException "InvalidMaxMergeHunksException" +// The specified value for the number of merge hunks to return is not valid. +// +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. +// +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. +// +// * ErrCodeMaximumFileContentToLoadExceededException "MaximumFileContentToLoadExceededException" +// The number of files to load exceeds the allowed limit. +// +// * ErrCodeMaximumItemsToCompareExceededException "MaximumItemsToCompareExceededException" +// The maximum number of items to compare between the source or destination +// branches and the merge base has exceeded the maximum allowed. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchDescribeMergeConflicts +func (c *CodeCommit) BatchDescribeMergeConflicts(input *BatchDescribeMergeConflictsInput) (*BatchDescribeMergeConflictsOutput, error) { + req, out := c.BatchDescribeMergeConflictsRequest(input) + return out, req.Send() +} + +// BatchDescribeMergeConflictsWithContext is the same as BatchDescribeMergeConflicts with the addition of +// the ability to pass a context and additional request options. +// +// See BatchDescribeMergeConflicts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) BatchDescribeMergeConflictsWithContext(ctx aws.Context, input *BatchDescribeMergeConflictsInput, opts ...request.Option) (*BatchDescribeMergeConflictsOutput, error) { + req, out := c.BatchDescribeMergeConflictsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchGetCommits = "BatchGetCommits" + +// BatchGetCommitsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetCommits operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetCommits for more information on using the BatchGetCommits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchGetCommitsRequest method. +// req, resp := client.BatchGetCommitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchGetCommits +func (c *CodeCommit) BatchGetCommitsRequest(input *BatchGetCommitsInput) (req *request.Request, output *BatchGetCommitsOutput) { + op := &request.Operation{ + Name: opBatchGetCommits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetCommitsInput{} + } + + output = &BatchGetCommitsOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchGetCommits API operation for AWS CodeCommit. +// +// Returns information about the contents of one or more commits in a repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation BatchGetCommits for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCommitIdsListRequiredException "CommitIdsListRequiredException" +// +// * ErrCodeCommitIdsLimitExceededException "CommitIdsLimitExceededException" +// The maximum number of allowed commit IDs in a batch request is 100. Verify +// that your batch requests contains no more than 100 commit IDs, and then try +// again. +// +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchGetCommits +func (c *CodeCommit) BatchGetCommits(input *BatchGetCommitsInput) (*BatchGetCommitsOutput, error) { + req, out := c.BatchGetCommitsRequest(input) + return out, req.Send() +} + +// BatchGetCommitsWithContext is the same as BatchGetCommits with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetCommits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) BatchGetCommitsWithContext(ctx aws.Context, input *BatchGetCommitsInput, opts ...request.Option) (*BatchGetCommitsOutput, error) { + req, out := c.BatchGetCommitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opBatchGetRepositories = "BatchGetRepositories" // BatchGetRepositoriesRequest generates a "aws/request.Request" representing the @@ -391,14 +651,14 @@ func (c *CodeCommit) CreateCommitRequest(input *CreateCommitInput) (req *request // // * ErrCodeFileContentSizeLimitExceededException "FileContentSizeLimitExceededException" // The file cannot be added because it is too large. The maximum file size that -// can be added using PutFile is 6 MB, and the combined file content change -// size is 7 MB. Consider making these changes using a Git client. +// can be added is 6 MB, and the combined file content change size is 7 MB. +// Consider making these changes using a Git client. // // * ErrCodeFolderContentSizeLimitExceededException "FolderContentSizeLimitExceededException" // The commit cannot be created because at least one of the overall changes -// in the commit result in a folder contents exceeding the limit of 6 MB. Either -// reduce the number and size of your changes, or split the changes across multiple -// folders. +// in the commit results in a folder whose contents exceed the limit of 6 MB. +// Either reduce the number and size of your changes, or split the changes across +// multiple folders. // // * ErrCodeInvalidDeletionParameterException "InvalidDeletionParameterException" // The specified deletion parameter is not valid. @@ -417,8 +677,7 @@ func (c *CodeCommit) CreateCommitRequest(input *CreateCommitInput) (req *request // // * ErrCodeNameLengthExceededException "NameLengthExceededException" // The user name is not valid because it has exceeded the character limit for -// file names. File names, including the path to the file, cannot exceed the -// character limit. +// author names. // // * ErrCodeInvalidEmailException "InvalidEmailException" // The specified email address either contains one or more characters that are @@ -751,6 +1010,18 @@ func (c *CodeCommit) CreateRepositoryRequest(input *CreateRepositoryInput) (req // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // +// * ErrCodeInvalidTagsMapException "InvalidTagsMapException" +// The map of tags is not valid. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The maximum number of tags for an AWS CodeCommit resource has been exceeded. +// +// * ErrCodeInvalidSystemTagUsageException "InvalidSystemTagUsageException" +// The specified tag is not valid. Key names cannot be prefixed with aws:. +// +// * ErrCodeTagPolicyException "TagPolicyException" +// The tag policy is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/CreateRepository func (c *CodeCommit) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { req, out := c.CreateRepositoryRequest(input) @@ -773,67 +1044,70 @@ func (c *CodeCommit) CreateRepositoryWithContext(ctx aws.Context, input *CreateR return out, req.Send() } -const opDeleteBranch = "DeleteBranch" +const opCreateUnreferencedMergeCommit = "CreateUnreferencedMergeCommit" -// DeleteBranchRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBranch operation. The "output" return +// CreateUnreferencedMergeCommitRequest generates a "aws/request.Request" representing the +// client's request for the CreateUnreferencedMergeCommit operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteBranch for more information on using the DeleteBranch +// See CreateUnreferencedMergeCommit for more information on using the CreateUnreferencedMergeCommit // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteBranchRequest method. -// req, resp := client.DeleteBranchRequest(params) +// // Example sending a request using the CreateUnreferencedMergeCommitRequest method. +// req, resp := client.CreateUnreferencedMergeCommitRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteBranch -func (c *CodeCommit) DeleteBranchRequest(input *DeleteBranchInput) (req *request.Request, output *DeleteBranchOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/CreateUnreferencedMergeCommit +func (c *CodeCommit) CreateUnreferencedMergeCommitRequest(input *CreateUnreferencedMergeCommitInput) (req *request.Request, output *CreateUnreferencedMergeCommitOutput) { op := &request.Operation{ - Name: opDeleteBranch, + Name: opCreateUnreferencedMergeCommit, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteBranchInput{} + input = &CreateUnreferencedMergeCommitInput{} } - output = &DeleteBranchOutput{} + output = &CreateUnreferencedMergeCommitOutput{} req = c.newRequest(op, input, output) return } -// DeleteBranch API operation for AWS CodeCommit. +// CreateUnreferencedMergeCommit API operation for AWS CodeCommit. // -// Deletes a branch from a repository, unless that branch is the default branch -// for the repository. +// Creates an unreferenced commit that represents the result of merging two +// branches using a specified merge strategy. This can help you determine the +// outcome of a potential merge. This API cannot be used with the fast-forward +// merge strategy, as that strategy does not create a merge commit. +// +// This unreferenced merge commit can only be accessed using the GetCommit API +// or through git commands such as git fetch. To retrieve this commit, you must +// specify its commit ID or otherwise reference it. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation DeleteBranch for usage and error information. +// API operation CreateUnreferencedMergeCommit for usage and error information. // // Returned Error Codes: // * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" // A repository name is required but was not specified. // -// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" -// The specified repository does not exist. -// // * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" // At least one specified repository name is not valid. // @@ -841,33 +1115,246 @@ func (c *CodeCommit) DeleteBranchRequest(input *DeleteBranchInput) (req *request // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. // -// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" -// A branch name is required but was not specified. +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. // -// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" -// The specified reference name is not valid. +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. // -// * ErrCodeDefaultBranchCannotBeDeletedException "DefaultBranchCannotBeDeletedException" -// The specified branch is the default branch for the repository, and cannot -// be deleted. To delete this branch, you must first set another branch as the -// default branch. +// * ErrCodeCommitRequiredException "CommitRequiredException" +// A commit was not specified. // -// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" -// An encryption integrity check failed. +// * ErrCodeInvalidCommitException "InvalidCommitException" +// The specified commit is not valid. // -// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" -// An encryption key could not be accessed. +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. // -// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" -// The encryption key is disabled. +// * ErrCodeMergeOptionRequiredException "MergeOptionRequiredException" +// A merge option or stategy is required, and none was provided. // -// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" -// No encryption key was found. +// * ErrCodeInvalidMergeOptionException "InvalidMergeOptionException" +// The specified merge option is not valid for this operation. Not all merge +// strategies are supported for all operations. // -// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" -// The encryption key is not available. +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteBranch +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. +// +// * ErrCodeInvalidConflictResolutionException "InvalidConflictResolutionException" +// The specified conflict resolution list is not valid. +// +// * ErrCodeManualMergeRequiredException "ManualMergeRequiredException" +// The pull request cannot be merged automatically into the destination branch. +// You must manually merge the branches and resolve any conflicts. +// +// * ErrCodeMaximumConflictResolutionEntriesExceededException "MaximumConflictResolutionEntriesExceededException" +// The number of allowed conflict resolution entries was exceeded. +// +// * ErrCodeMultipleConflictResolutionEntriesException "MultipleConflictResolutionEntriesException" +// More than one conflict resolution entries exists for the conflict. A conflict +// can have only one conflict resolution entry. +// +// * ErrCodeReplacementTypeRequiredException "ReplacementTypeRequiredException" +// A replacement type is required. +// +// * ErrCodeInvalidReplacementTypeException "InvalidReplacementTypeException" +// Automerge was specified for resolving the conflict, but the specified replacement +// type is not valid. +// +// * ErrCodeReplacementContentRequiredException "ReplacementContentRequiredException" +// USE_NEW_CONTENT was specified but no replacement content has been provided. +// +// * ErrCodeInvalidReplacementContentException "InvalidReplacementContentException" +// Automerge was specified for resolving the conflict, but the replacement type +// is not valid or content is missing. +// +// * ErrCodePathRequiredException "PathRequiredException" +// The folderPath for a location cannot be null. +// +// * ErrCodeInvalidPathException "InvalidPathException" +// The specified path is not valid. +// +// * ErrCodeFileContentSizeLimitExceededException "FileContentSizeLimitExceededException" +// The file cannot be added because it is too large. The maximum file size that +// can be added is 6 MB, and the combined file content change size is 7 MB. +// Consider making these changes using a Git client. +// +// * ErrCodeFolderContentSizeLimitExceededException "FolderContentSizeLimitExceededException" +// The commit cannot be created because at least one of the overall changes +// in the commit results in a folder whose contents exceed the limit of 6 MB. +// Either reduce the number and size of your changes, or split the changes across +// multiple folders. +// +// * ErrCodeMaximumFileContentToLoadExceededException "MaximumFileContentToLoadExceededException" +// The number of files to load exceeds the allowed limit. +// +// * ErrCodeMaximumItemsToCompareExceededException "MaximumItemsToCompareExceededException" +// The maximum number of items to compare between the source or destination +// branches and the merge base has exceeded the maximum allowed. +// +// * ErrCodeConcurrentReferenceUpdateException "ConcurrentReferenceUpdateException" +// The merge cannot be completed because the target branch has been modified. +// Another user might have modified the target branch while the merge was in +// progress. Wait a few minutes, and then try again. +// +// * ErrCodeFileModeRequiredException "FileModeRequiredException" +// The commit cannot be created because a file mode is required to update mode +// permissions for an existing file, but no file mode has been specified. +// +// * ErrCodeInvalidFileModeException "InvalidFileModeException" +// The specified file mode permission is not valid. For a list of valid file +// mode permissions, see PutFile. +// +// * ErrCodeNameLengthExceededException "NameLengthExceededException" +// The user name is not valid because it has exceeded the character limit for +// author names. +// +// * ErrCodeInvalidEmailException "InvalidEmailException" +// The specified email address either contains one or more characters that are +// not allowed, or it exceeds the maximum number of characters allowed for an +// email address. +// +// * ErrCodeCommitMessageLengthExceededException "CommitMessageLengthExceededException" +// The commit message is too long. Provide a shorter string. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/CreateUnreferencedMergeCommit +func (c *CodeCommit) CreateUnreferencedMergeCommit(input *CreateUnreferencedMergeCommitInput) (*CreateUnreferencedMergeCommitOutput, error) { + req, out := c.CreateUnreferencedMergeCommitRequest(input) + return out, req.Send() +} + +// CreateUnreferencedMergeCommitWithContext is the same as CreateUnreferencedMergeCommit with the addition of +// the ability to pass a context and additional request options. +// +// See CreateUnreferencedMergeCommit for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) CreateUnreferencedMergeCommitWithContext(ctx aws.Context, input *CreateUnreferencedMergeCommitInput, opts ...request.Option) (*CreateUnreferencedMergeCommitOutput, error) { + req, out := c.CreateUnreferencedMergeCommitRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBranch = "DeleteBranch" + +// DeleteBranchRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBranch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBranch for more information on using the DeleteBranch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBranchRequest method. +// req, resp := client.DeleteBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteBranch +func (c *CodeCommit) DeleteBranchRequest(input *DeleteBranchInput) (req *request.Request, output *DeleteBranchOutput) { + op := &request.Operation{ + Name: opDeleteBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBranchInput{} + } + + output = &DeleteBranchOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteBranch API operation for AWS CodeCommit. +// +// Deletes a branch from a repository, unless that branch is the default branch +// for the repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation DeleteBranch for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" +// A branch name is required but was not specified. +// +// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" +// The specified reference name is not valid. +// +// * ErrCodeDefaultBranchCannotBeDeletedException "DefaultBranchCannotBeDeletedException" +// The specified branch is the default branch for the repository, and cannot +// be deleted. To delete this branch, you must first set another branch as the +// default branch. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteBranch func (c *CodeCommit) DeleteBranch(input *DeleteBranchInput) (*DeleteBranchOutput, error) { req, out := c.DeleteBranchRequest(input) return out, req.Send() @@ -1094,8 +1581,7 @@ func (c *CodeCommit) DeleteFileRequest(input *DeleteFileInput) (req *request.Req // // * ErrCodeNameLengthExceededException "NameLengthExceededException" // The user name is not valid because it has exceeded the character limit for -// file names. File names, including the path to the file, cannot exceed the -// character limit. +// author names. // // * ErrCodeInvalidEmailException "InvalidEmailException" // The specified email address either contains one or more characters that are @@ -1248,95 +1734,133 @@ func (c *CodeCommit) DeleteRepositoryWithContext(ctx aws.Context, input *DeleteR return out, req.Send() } -const opDescribePullRequestEvents = "DescribePullRequestEvents" +const opDescribeMergeConflicts = "DescribeMergeConflicts" -// DescribePullRequestEventsRequest generates a "aws/request.Request" representing the -// client's request for the DescribePullRequestEvents operation. The "output" return +// DescribeMergeConflictsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMergeConflicts operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribePullRequestEvents for more information on using the DescribePullRequestEvents +// See DescribeMergeConflicts for more information on using the DescribeMergeConflicts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribePullRequestEventsRequest method. -// req, resp := client.DescribePullRequestEventsRequest(params) +// // Example sending a request using the DescribeMergeConflictsRequest method. +// req, resp := client.DescribeMergeConflictsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DescribePullRequestEvents -func (c *CodeCommit) DescribePullRequestEventsRequest(input *DescribePullRequestEventsInput) (req *request.Request, output *DescribePullRequestEventsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DescribeMergeConflicts +func (c *CodeCommit) DescribeMergeConflictsRequest(input *DescribeMergeConflictsInput) (req *request.Request, output *DescribeMergeConflictsOutput) { op := &request.Operation{ - Name: opDescribePullRequestEvents, + Name: opDescribeMergeConflicts, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", + LimitToken: "maxMergeHunks", TruncationToken: "", }, } if input == nil { - input = &DescribePullRequestEventsInput{} + input = &DescribeMergeConflictsInput{} } - output = &DescribePullRequestEventsOutput{} + output = &DescribeMergeConflictsOutput{} req = c.newRequest(op, input, output) return } -// DescribePullRequestEvents API operation for AWS CodeCommit. +// DescribeMergeConflicts API operation for AWS CodeCommit. // -// Returns information about one or more pull request events. +// Returns information about one or more merge conflicts in the attempted merge +// of two commit specifiers using the squash or three-way merge strategy. If +// the merge option for the attempted merge is specified as FAST_FORWARD_MERGE, +// an exception will be thrown. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation DescribePullRequestEvents for usage and error information. +// API operation DescribeMergeConflicts for usage and error information. // // Returned Error Codes: -// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" -// The pull request ID could not be found. Make sure that you have specified -// the correct repository name and pull request ID, and then try again. -// -// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" -// The pull request ID is not valid. Make sure that you have provided the full -// ID and that the pull request is in the specified repository, and then try -// again. +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. // -// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" -// A pull request ID is required, but none was provided. +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. // -// * ErrCodeInvalidPullRequestEventTypeException "InvalidPullRequestEventTypeException" -// The pull request event type is not valid. +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. // -// * ErrCodeInvalidActorArnException "InvalidActorArnException" -// The Amazon Resource Name (ARN) is not valid. Make sure that you have provided -// the full ARN for the user who initiated the change for the pull request, -// and then try again. +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. // -// * ErrCodeActorDoesNotExistException "ActorDoesNotExistException" -// The specified Amazon Resource Name (ARN) does not exist in the AWS account. +// * ErrCodeMergeOptionRequiredException "MergeOptionRequiredException" +// A merge option or stategy is required, and none was provided. // -// * ErrCodeInvalidMaxResultsException "InvalidMaxResultsException" -// The specified number of maximum results is not valid. +// * ErrCodeInvalidMergeOptionException "InvalidMergeOptionException" +// The specified merge option is not valid for this operation. Not all merge +// strategies are supported for all operations. // // * ErrCodeInvalidContinuationTokenException "InvalidContinuationTokenException" // The specified continuation token is not valid. // +// * ErrCodeCommitRequiredException "CommitRequiredException" +// A commit was not specified. +// +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. +// +// * ErrCodeInvalidCommitException "InvalidCommitException" +// The specified commit is not valid. +// +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. +// +// * ErrCodePathRequiredException "PathRequiredException" +// The folderPath for a location cannot be null. +// +// * ErrCodeInvalidPathException "InvalidPathException" +// The specified path is not valid. +// +// * ErrCodeFileDoesNotExistException "FileDoesNotExistException" +// The specified file does not exist. Verify that you have provided the correct +// name of the file, including its full path and extension. +// +// * ErrCodeInvalidMaxMergeHunksException "InvalidMaxMergeHunksException" +// The specified value for the number of merge hunks to return is not valid. +// +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. +// +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. +// +// * ErrCodeMaximumFileContentToLoadExceededException "MaximumFileContentToLoadExceededException" +// The number of files to load exceeds the allowed limit. +// +// * ErrCodeMaximumItemsToCompareExceededException "MaximumItemsToCompareExceededException" +// The maximum number of items to compare between the source or destination +// branches and the merge base has exceeded the maximum allowed. +// // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. // @@ -1352,29 +1876,207 @@ func (c *CodeCommit) DescribePullRequestEventsRequest(input *DescribePullRequest // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DescribePullRequestEvents -func (c *CodeCommit) DescribePullRequestEvents(input *DescribePullRequestEventsInput) (*DescribePullRequestEventsOutput, error) { - req, out := c.DescribePullRequestEventsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DescribeMergeConflicts +func (c *CodeCommit) DescribeMergeConflicts(input *DescribeMergeConflictsInput) (*DescribeMergeConflictsOutput, error) { + req, out := c.DescribeMergeConflictsRequest(input) return out, req.Send() } -// DescribePullRequestEventsWithContext is the same as DescribePullRequestEvents with the addition of +// DescribeMergeConflictsWithContext is the same as DescribeMergeConflicts with the addition of // the ability to pass a context and additional request options. // -// See DescribePullRequestEvents for details on how to use this API operation. +// See DescribeMergeConflicts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) DescribePullRequestEventsWithContext(ctx aws.Context, input *DescribePullRequestEventsInput, opts ...request.Option) (*DescribePullRequestEventsOutput, error) { - req, out := c.DescribePullRequestEventsRequest(input) +func (c *CodeCommit) DescribeMergeConflictsWithContext(ctx aws.Context, input *DescribeMergeConflictsInput, opts ...request.Option) (*DescribeMergeConflictsOutput, error) { + req, out := c.DescribeMergeConflictsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribePullRequestEventsPages iterates over the pages of a DescribePullRequestEvents operation, +// DescribeMergeConflictsPages iterates over the pages of a DescribeMergeConflicts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMergeConflicts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMergeConflicts operation. +// pageNum := 0 +// err := client.DescribeMergeConflictsPages(params, +// func(page *codecommit.DescribeMergeConflictsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeCommit) DescribeMergeConflictsPages(input *DescribeMergeConflictsInput, fn func(*DescribeMergeConflictsOutput, bool) bool) error { + return c.DescribeMergeConflictsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMergeConflictsPagesWithContext same as DescribeMergeConflictsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) DescribeMergeConflictsPagesWithContext(ctx aws.Context, input *DescribeMergeConflictsInput, fn func(*DescribeMergeConflictsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMergeConflictsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMergeConflictsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMergeConflictsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribePullRequestEvents = "DescribePullRequestEvents" + +// DescribePullRequestEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePullRequestEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePullRequestEvents for more information on using the DescribePullRequestEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePullRequestEventsRequest method. +// req, resp := client.DescribePullRequestEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DescribePullRequestEvents +func (c *CodeCommit) DescribePullRequestEventsRequest(input *DescribePullRequestEventsInput) (req *request.Request, output *DescribePullRequestEventsOutput) { + op := &request.Operation{ + Name: opDescribePullRequestEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePullRequestEventsInput{} + } + + output = &DescribePullRequestEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePullRequestEvents API operation for AWS CodeCommit. +// +// Returns information about one or more pull request events. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation DescribePullRequestEvents for usage and error information. +// +// Returned Error Codes: +// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" +// The pull request ID could not be found. Make sure that you have specified +// the correct repository name and pull request ID, and then try again. +// +// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" +// The pull request ID is not valid. Make sure that you have provided the full +// ID and that the pull request is in the specified repository, and then try +// again. +// +// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" +// A pull request ID is required, but none was provided. +// +// * ErrCodeInvalidPullRequestEventTypeException "InvalidPullRequestEventTypeException" +// The pull request event type is not valid. +// +// * ErrCodeInvalidActorArnException "InvalidActorArnException" +// The Amazon Resource Name (ARN) is not valid. Make sure that you have provided +// the full ARN for the user who initiated the change for the pull request, +// and then try again. +// +// * ErrCodeActorDoesNotExistException "ActorDoesNotExistException" +// The specified Amazon Resource Name (ARN) does not exist in the AWS account. +// +// * ErrCodeInvalidMaxResultsException "InvalidMaxResultsException" +// The specified number of maximum results is not valid. +// +// * ErrCodeInvalidContinuationTokenException "InvalidContinuationTokenException" +// The specified continuation token is not valid. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DescribePullRequestEvents +func (c *CodeCommit) DescribePullRequestEvents(input *DescribePullRequestEventsInput) (*DescribePullRequestEventsOutput, error) { + req, out := c.DescribePullRequestEventsRequest(input) + return out, req.Send() +} + +// DescribePullRequestEventsWithContext is the same as DescribePullRequestEvents with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePullRequestEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) DescribePullRequestEventsWithContext(ctx aws.Context, input *DescribePullRequestEventsInput, opts ...request.Option) (*DescribePullRequestEventsOutput, error) { + req, out := c.DescribePullRequestEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribePullRequestEventsPages iterates over the pages of a DescribePullRequestEvents operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // @@ -1385,7 +2087,7 @@ func (c *CodeCommit) DescribePullRequestEventsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribePullRequestEvents operation. // pageNum := 0 // err := client.DescribePullRequestEventsPages(params, -// func(page *DescribePullRequestEventsOutput, lastPage bool) bool { +// func(page *codecommit.DescribePullRequestEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1417,10 +2119,12 @@ func (c *CodeCommit) DescribePullRequestEventsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribePullRequestEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribePullRequestEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1518,7 +2222,7 @@ func (c *CodeCommit) GetBlobRequest(input *GetBlobInput) (req *request.Request, // * ErrCodeFileTooLargeException "FileTooLargeException" // The specified file exceeds the file size limit for AWS CodeCommit. For more // information about limits in AWS CodeCommit, see AWS CodeCommit User Guide -// (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). +// (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetBlob func (c *CodeCommit) GetBlob(input *GetBlobInput) (*GetBlobOutput, error) { @@ -1884,7 +2588,7 @@ func (c *CodeCommit) GetCommentsForComparedCommitWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a GetCommentsForComparedCommit operation. // pageNum := 0 // err := client.GetCommentsForComparedCommitPages(params, -// func(page *GetCommentsForComparedCommitOutput, lastPage bool) bool { +// func(page *codecommit.GetCommentsForComparedCommitOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1916,10 +2620,12 @@ func (c *CodeCommit) GetCommentsForComparedCommitPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetCommentsForComparedCommitOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetCommentsForComparedCommitOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2077,7 +2783,7 @@ func (c *CodeCommit) GetCommentsForPullRequestWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a GetCommentsForPullRequest operation. // pageNum := 0 // err := client.GetCommentsForPullRequestPages(params, -// func(page *GetCommentsForPullRequestOutput, lastPage bool) bool { +// func(page *codecommit.GetCommentsForPullRequestOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2109,10 +2815,12 @@ func (c *CodeCommit) GetCommentsForPullRequestPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetCommentsForPullRequestOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetCommentsForPullRequestOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2378,7 +3086,7 @@ func (c *CodeCommit) GetDifferencesWithContext(ctx aws.Context, input *GetDiffer // // Example iterating over at most 3 pages of a GetDifferences operation. // pageNum := 0 // err := client.GetDifferencesPages(params, -// func(page *GetDifferencesOutput, lastPage bool) bool { +// func(page *codecommit.GetDifferencesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2410,10 +3118,12 @@ func (c *CodeCommit) GetDifferencesPagesWithContext(ctx aws.Context, input *GetD }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetDifferencesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetDifferencesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2519,7 +3229,7 @@ func (c *CodeCommit) GetFileRequest(input *GetFileInput) (req *request.Request, // * ErrCodeFileTooLargeException "FileTooLargeException" // The specified file exceeds the file size limit for AWS CodeCommit. For more // information about limits in AWS CodeCommit, see AWS CodeCommit User Guide -// (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). +// (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetFile func (c *CodeCommit) GetFile(input *GetFileInput) (*GetFileOutput, error) { @@ -2664,59 +3374,58 @@ func (c *CodeCommit) GetFolderWithContext(ctx aws.Context, input *GetFolderInput return out, req.Send() } -const opGetMergeConflicts = "GetMergeConflicts" +const opGetMergeCommit = "GetMergeCommit" -// GetMergeConflictsRequest generates a "aws/request.Request" representing the -// client's request for the GetMergeConflicts operation. The "output" return +// GetMergeCommitRequest generates a "aws/request.Request" representing the +// client's request for the GetMergeCommit operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetMergeConflicts for more information on using the GetMergeConflicts +// See GetMergeCommit for more information on using the GetMergeCommit // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetMergeConflictsRequest method. -// req, resp := client.GetMergeConflictsRequest(params) +// // Example sending a request using the GetMergeCommitRequest method. +// req, resp := client.GetMergeCommitRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetMergeConflicts -func (c *CodeCommit) GetMergeConflictsRequest(input *GetMergeConflictsInput) (req *request.Request, output *GetMergeConflictsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetMergeCommit +func (c *CodeCommit) GetMergeCommitRequest(input *GetMergeCommitInput) (req *request.Request, output *GetMergeCommitOutput) { op := &request.Operation{ - Name: opGetMergeConflicts, + Name: opGetMergeCommit, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetMergeConflictsInput{} + input = &GetMergeCommitInput{} } - output = &GetMergeConflictsOutput{} + output = &GetMergeCommitOutput{} req = c.newRequest(op, input, output) return } -// GetMergeConflicts API operation for AWS CodeCommit. +// GetMergeCommit API operation for AWS CodeCommit. // -// Returns information about merge conflicts between the before and after commit -// IDs for a pull request in a repository. +// Returns information about a specified merge commit. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation GetMergeConflicts for usage and error information. +// API operation GetMergeCommit for usage and error information. // // Returned Error Codes: // * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" @@ -2732,34 +3441,21 @@ func (c *CodeCommit) GetMergeConflictsRequest(input *GetMergeConflictsInput) (re // * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" // The specified repository does not exist. // -// * ErrCodeMergeOptionRequiredException "MergeOptionRequiredException" -// A merge option or stategy is required, and none was provided. -// -// * ErrCodeInvalidMergeOptionException "InvalidMergeOptionException" -// The specified merge option is not valid. The only valid value is FAST_FORWARD_MERGE. -// -// * ErrCodeInvalidDestinationCommitSpecifierException "InvalidDestinationCommitSpecifierException" -// The destination commit specifier is not valid. You must provide a valid branch -// name, tag, or full commit ID. -// -// * ErrCodeInvalidSourceCommitSpecifierException "InvalidSourceCommitSpecifierException" -// The source commit specifier is not valid. You must provide a valid branch -// name, tag, or full commit ID. -// // * ErrCodeCommitRequiredException "CommitRequiredException" // A commit was not specified. // +// * ErrCodeInvalidCommitException "InvalidCommitException" +// The specified commit is not valid. +// // * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" // The specified commit does not exist or no commit was specified, and the specified // repository has no default branch. // -// * ErrCodeInvalidCommitException "InvalidCommitException" -// The specified commit is not valid. +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. // -// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" -// The divergence between the tips of the provided commit specifiers is too -// great to determine whether there might be any merge conflicts. Locally compare -// the specifiers using git diff or a diff tool. +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. // // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. @@ -2776,93 +3472,150 @@ func (c *CodeCommit) GetMergeConflictsRequest(input *GetMergeConflictsInput) (re // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetMergeConflicts -func (c *CodeCommit) GetMergeConflicts(input *GetMergeConflictsInput) (*GetMergeConflictsOutput, error) { - req, out := c.GetMergeConflictsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetMergeCommit +func (c *CodeCommit) GetMergeCommit(input *GetMergeCommitInput) (*GetMergeCommitOutput, error) { + req, out := c.GetMergeCommitRequest(input) return out, req.Send() } -// GetMergeConflictsWithContext is the same as GetMergeConflicts with the addition of +// GetMergeCommitWithContext is the same as GetMergeCommit with the addition of // the ability to pass a context and additional request options. // -// See GetMergeConflicts for details on how to use this API operation. +// See GetMergeCommit for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) GetMergeConflictsWithContext(ctx aws.Context, input *GetMergeConflictsInput, opts ...request.Option) (*GetMergeConflictsOutput, error) { - req, out := c.GetMergeConflictsRequest(input) +func (c *CodeCommit) GetMergeCommitWithContext(ctx aws.Context, input *GetMergeCommitInput, opts ...request.Option) (*GetMergeCommitOutput, error) { + req, out := c.GetMergeCommitRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetPullRequest = "GetPullRequest" +const opGetMergeConflicts = "GetMergeConflicts" -// GetPullRequestRequest generates a "aws/request.Request" representing the -// client's request for the GetPullRequest operation. The "output" return +// GetMergeConflictsRequest generates a "aws/request.Request" representing the +// client's request for the GetMergeConflicts operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetPullRequest for more information on using the GetPullRequest +// See GetMergeConflicts for more information on using the GetMergeConflicts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetPullRequestRequest method. -// req, resp := client.GetPullRequestRequest(params) +// // Example sending a request using the GetMergeConflictsRequest method. +// req, resp := client.GetMergeConflictsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetPullRequest -func (c *CodeCommit) GetPullRequestRequest(input *GetPullRequestInput) (req *request.Request, output *GetPullRequestOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetMergeConflicts +func (c *CodeCommit) GetMergeConflictsRequest(input *GetMergeConflictsInput) (req *request.Request, output *GetMergeConflictsOutput) { op := &request.Operation{ - Name: opGetPullRequest, + Name: opGetMergeConflicts, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxConflictFiles", + TruncationToken: "", + }, } if input == nil { - input = &GetPullRequestInput{} + input = &GetMergeConflictsInput{} } - output = &GetPullRequestOutput{} + output = &GetMergeConflictsOutput{} req = c.newRequest(op, input, output) return } -// GetPullRequest API operation for AWS CodeCommit. +// GetMergeConflicts API operation for AWS CodeCommit. // -// Gets information about a pull request in a specified repository. +// Returns information about merge conflicts between the before and after commit +// IDs for a pull request in a repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation GetPullRequest for usage and error information. +// API operation GetMergeConflicts for usage and error information. // // Returned Error Codes: -// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" -// The pull request ID could not be found. Make sure that you have specified -// the correct repository name and pull request ID, and then try again. +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. // -// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" -// The pull request ID is not valid. Make sure that you have provided the full -// ID and that the pull request is in the specified repository, and then try -// again. +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. // -// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" -// A pull request ID is required, but none was provided. +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeMergeOptionRequiredException "MergeOptionRequiredException" +// A merge option or stategy is required, and none was provided. +// +// * ErrCodeInvalidMergeOptionException "InvalidMergeOptionException" +// The specified merge option is not valid for this operation. Not all merge +// strategies are supported for all operations. +// +// * ErrCodeInvalidContinuationTokenException "InvalidContinuationTokenException" +// The specified continuation token is not valid. +// +// * ErrCodeCommitRequiredException "CommitRequiredException" +// A commit was not specified. +// +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. +// +// * ErrCodeInvalidCommitException "InvalidCommitException" +// The specified commit is not valid. +// +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. +// +// * ErrCodeInvalidMaxConflictFilesException "InvalidMaxConflictFilesException" +// The specified value for the number of conflict files to return is not valid. +// +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. +// +// * ErrCodeInvalidDestinationCommitSpecifierException "InvalidDestinationCommitSpecifierException" +// The destination commit specifier is not valid. You must provide a valid branch +// name, tag, or full commit ID. +// +// * ErrCodeInvalidSourceCommitSpecifierException "InvalidSourceCommitSpecifierException" +// The source commit specifier is not valid. You must provide a valid branch +// name, tag, or full commit ID. +// +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. +// +// * ErrCodeMaximumFileContentToLoadExceededException "MaximumFileContentToLoadExceededException" +// The number of files to load exceeds the allowed limit. +// +// * ErrCodeMaximumItemsToCompareExceededException "MaximumItemsToCompareExceededException" +// The maximum number of items to compare between the source or destination +// branches and the merge base has exceeded the maximum allowed. // // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. @@ -2879,94 +3632,139 @@ func (c *CodeCommit) GetPullRequestRequest(input *GetPullRequestInput) (req *req // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetPullRequest -func (c *CodeCommit) GetPullRequest(input *GetPullRequestInput) (*GetPullRequestOutput, error) { - req, out := c.GetPullRequestRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetMergeConflicts +func (c *CodeCommit) GetMergeConflicts(input *GetMergeConflictsInput) (*GetMergeConflictsOutput, error) { + req, out := c.GetMergeConflictsRequest(input) return out, req.Send() } -// GetPullRequestWithContext is the same as GetPullRequest with the addition of +// GetMergeConflictsWithContext is the same as GetMergeConflicts with the addition of // the ability to pass a context and additional request options. // -// See GetPullRequest for details on how to use this API operation. +// See GetMergeConflicts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) GetPullRequestWithContext(ctx aws.Context, input *GetPullRequestInput, opts ...request.Option) (*GetPullRequestOutput, error) { - req, out := c.GetPullRequestRequest(input) +func (c *CodeCommit) GetMergeConflictsWithContext(ctx aws.Context, input *GetMergeConflictsInput, opts ...request.Option) (*GetMergeConflictsOutput, error) { + req, out := c.GetMergeConflictsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRepository = "GetRepository" +// GetMergeConflictsPages iterates over the pages of a GetMergeConflicts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetMergeConflicts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetMergeConflicts operation. +// pageNum := 0 +// err := client.GetMergeConflictsPages(params, +// func(page *codecommit.GetMergeConflictsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeCommit) GetMergeConflictsPages(input *GetMergeConflictsInput, fn func(*GetMergeConflictsOutput, bool) bool) error { + return c.GetMergeConflictsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// GetRepositoryRequest generates a "aws/request.Request" representing the -// client's request for the GetRepository operation. The "output" return +// GetMergeConflictsPagesWithContext same as GetMergeConflictsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) GetMergeConflictsPagesWithContext(ctx aws.Context, input *GetMergeConflictsInput, fn func(*GetMergeConflictsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetMergeConflictsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetMergeConflictsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetMergeConflictsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetMergeOptions = "GetMergeOptions" + +// GetMergeOptionsRequest generates a "aws/request.Request" representing the +// client's request for the GetMergeOptions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRepository for more information on using the GetRepository +// See GetMergeOptions for more information on using the GetMergeOptions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRepositoryRequest method. -// req, resp := client.GetRepositoryRequest(params) +// // Example sending a request using the GetMergeOptionsRequest method. +// req, resp := client.GetMergeOptionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetRepository -func (c *CodeCommit) GetRepositoryRequest(input *GetRepositoryInput) (req *request.Request, output *GetRepositoryOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetMergeOptions +func (c *CodeCommit) GetMergeOptionsRequest(input *GetMergeOptionsInput) (req *request.Request, output *GetMergeOptionsOutput) { op := &request.Operation{ - Name: opGetRepository, + Name: opGetMergeOptions, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRepositoryInput{} + input = &GetMergeOptionsInput{} } - output = &GetRepositoryOutput{} + output = &GetMergeOptionsOutput{} req = c.newRequest(op, input, output) return } -// GetRepository API operation for AWS CodeCommit. -// -// Returns information about a repository. +// GetMergeOptions API operation for AWS CodeCommit. // -// The description field for a repository accepts all HTML characters and all -// valid Unicode characters. Applications that do not HTML-encode the description -// and display it in a web page could expose users to potentially malicious -// code. Make sure that you HTML-encode the description field in any application -// that uses this API to display the repository description on a web page. +// Returns information about the merge options available for merging two specified +// branches. For details about why a particular merge option is not available, +// use GetMergeConflicts or DescribeMergeConflicts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation GetRepository for usage and error information. +// API operation GetMergeOptions for usage and error information. // // Returned Error Codes: // * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" // A repository name is required but was not specified. // -// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" -// The specified repository does not exist. -// // * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" // At least one specified repository name is not valid. // @@ -2974,6 +3772,37 @@ func (c *CodeCommit) GetRepositoryRequest(input *GetRepositoryInput) (req *reque // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. // +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeCommitRequiredException "CommitRequiredException" +// A commit was not specified. +// +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. +// +// * ErrCodeInvalidCommitException "InvalidCommitException" +// The specified commit is not valid. +// +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. +// +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. +// +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. +// +// * ErrCodeMaximumFileContentToLoadExceededException "MaximumFileContentToLoadExceededException" +// The number of files to load exceeds the allowed limit. +// +// * ErrCodeMaximumItemsToCompareExceededException "MaximumItemsToCompareExceededException" +// The maximum number of items to compare between the source or destination +// branches and the merge base has exceeded the maximum allowed. +// // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. // @@ -2989,94 +3818,93 @@ func (c *CodeCommit) GetRepositoryRequest(input *GetRepositoryInput) (req *reque // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetRepository -func (c *CodeCommit) GetRepository(input *GetRepositoryInput) (*GetRepositoryOutput, error) { - req, out := c.GetRepositoryRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetMergeOptions +func (c *CodeCommit) GetMergeOptions(input *GetMergeOptionsInput) (*GetMergeOptionsOutput, error) { + req, out := c.GetMergeOptionsRequest(input) return out, req.Send() } -// GetRepositoryWithContext is the same as GetRepository with the addition of +// GetMergeOptionsWithContext is the same as GetMergeOptions with the addition of // the ability to pass a context and additional request options. // -// See GetRepository for details on how to use this API operation. +// See GetMergeOptions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) GetRepositoryWithContext(ctx aws.Context, input *GetRepositoryInput, opts ...request.Option) (*GetRepositoryOutput, error) { - req, out := c.GetRepositoryRequest(input) +func (c *CodeCommit) GetMergeOptionsWithContext(ctx aws.Context, input *GetMergeOptionsInput, opts ...request.Option) (*GetMergeOptionsOutput, error) { + req, out := c.GetMergeOptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRepositoryTriggers = "GetRepositoryTriggers" +const opGetPullRequest = "GetPullRequest" -// GetRepositoryTriggersRequest generates a "aws/request.Request" representing the -// client's request for the GetRepositoryTriggers operation. The "output" return +// GetPullRequestRequest generates a "aws/request.Request" representing the +// client's request for the GetPullRequest operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRepositoryTriggers for more information on using the GetRepositoryTriggers +// See GetPullRequest for more information on using the GetPullRequest // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRepositoryTriggersRequest method. -// req, resp := client.GetRepositoryTriggersRequest(params) +// // Example sending a request using the GetPullRequestRequest method. +// req, resp := client.GetPullRequestRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetRepositoryTriggers -func (c *CodeCommit) GetRepositoryTriggersRequest(input *GetRepositoryTriggersInput) (req *request.Request, output *GetRepositoryTriggersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetPullRequest +func (c *CodeCommit) GetPullRequestRequest(input *GetPullRequestInput) (req *request.Request, output *GetPullRequestOutput) { op := &request.Operation{ - Name: opGetRepositoryTriggers, + Name: opGetPullRequest, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRepositoryTriggersInput{} + input = &GetPullRequestInput{} } - output = &GetRepositoryTriggersOutput{} + output = &GetPullRequestOutput{} req = c.newRequest(op, input, output) return } -// GetRepositoryTriggers API operation for AWS CodeCommit. +// GetPullRequest API operation for AWS CodeCommit. // -// Gets information about triggers configured for a repository. +// Gets information about a pull request in a specified repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation GetRepositoryTriggers for usage and error information. +// API operation GetPullRequest for usage and error information. // // Returned Error Codes: -// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" -// A repository name is required but was not specified. -// -// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" -// At least one specified repository name is not valid. +// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" +// The pull request ID could not be found. Make sure that you have specified +// the correct repository name and pull request ID, and then try again. // -// This exception only occurs when a specified repository name is not valid. -// Other exceptions occur when a required repository parameter is missing, or -// when a specified repository does not exist. +// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" +// The pull request ID is not valid. Make sure that you have provided the full +// ID and that the pull request is in the specified repository, and then try +// again. // -// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" -// The specified repository does not exist. +// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" +// A pull request ID is required, but none was provided. // // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. @@ -3093,86 +3921,86 @@ func (c *CodeCommit) GetRepositoryTriggersRequest(input *GetRepositoryTriggersIn // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetRepositoryTriggers -func (c *CodeCommit) GetRepositoryTriggers(input *GetRepositoryTriggersInput) (*GetRepositoryTriggersOutput, error) { - req, out := c.GetRepositoryTriggersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetPullRequest +func (c *CodeCommit) GetPullRequest(input *GetPullRequestInput) (*GetPullRequestOutput, error) { + req, out := c.GetPullRequestRequest(input) return out, req.Send() } -// GetRepositoryTriggersWithContext is the same as GetRepositoryTriggers with the addition of +// GetPullRequestWithContext is the same as GetPullRequest with the addition of // the ability to pass a context and additional request options. // -// See GetRepositoryTriggers for details on how to use this API operation. +// See GetPullRequest for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) GetRepositoryTriggersWithContext(ctx aws.Context, input *GetRepositoryTriggersInput, opts ...request.Option) (*GetRepositoryTriggersOutput, error) { - req, out := c.GetRepositoryTriggersRequest(input) +func (c *CodeCommit) GetPullRequestWithContext(ctx aws.Context, input *GetPullRequestInput, opts ...request.Option) (*GetPullRequestOutput, error) { + req, out := c.GetPullRequestRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListBranches = "ListBranches" +const opGetRepository = "GetRepository" -// ListBranchesRequest generates a "aws/request.Request" representing the -// client's request for the ListBranches operation. The "output" return +// GetRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the GetRepository operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListBranches for more information on using the ListBranches +// See GetRepository for more information on using the GetRepository // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListBranchesRequest method. -// req, resp := client.ListBranchesRequest(params) +// // Example sending a request using the GetRepositoryRequest method. +// req, resp := client.GetRepositoryRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListBranches -func (c *CodeCommit) ListBranchesRequest(input *ListBranchesInput) (req *request.Request, output *ListBranchesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetRepository +func (c *CodeCommit) GetRepositoryRequest(input *GetRepositoryInput) (req *request.Request, output *GetRepositoryOutput) { op := &request.Operation{ - Name: opListBranches, + Name: opGetRepository, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "", - TruncationToken: "", - }, } if input == nil { - input = &ListBranchesInput{} + input = &GetRepositoryInput{} } - output = &ListBranchesOutput{} + output = &GetRepositoryOutput{} req = c.newRequest(op, input, output) return } -// ListBranches API operation for AWS CodeCommit. +// GetRepository API operation for AWS CodeCommit. // -// Gets information about one or more branches in a repository. +// Returns information about a repository. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation ListBranches for usage and error information. +// API operation GetRepository for usage and error information. // // Returned Error Codes: // * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" @@ -3203,7 +4031,221 @@ func (c *CodeCommit) ListBranchesRequest(input *ListBranchesInput) (req *request // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// * ErrCodeInvalidContinuationTokenException "InvalidContinuationTokenException" +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetRepository +func (c *CodeCommit) GetRepository(input *GetRepositoryInput) (*GetRepositoryOutput, error) { + req, out := c.GetRepositoryRequest(input) + return out, req.Send() +} + +// GetRepositoryWithContext is the same as GetRepository with the addition of +// the ability to pass a context and additional request options. +// +// See GetRepository for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) GetRepositoryWithContext(ctx aws.Context, input *GetRepositoryInput, opts ...request.Option) (*GetRepositoryOutput, error) { + req, out := c.GetRepositoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetRepositoryTriggers = "GetRepositoryTriggers" + +// GetRepositoryTriggersRequest generates a "aws/request.Request" representing the +// client's request for the GetRepositoryTriggers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRepositoryTriggers for more information on using the GetRepositoryTriggers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRepositoryTriggersRequest method. +// req, resp := client.GetRepositoryTriggersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetRepositoryTriggers +func (c *CodeCommit) GetRepositoryTriggersRequest(input *GetRepositoryTriggersInput) (req *request.Request, output *GetRepositoryTriggersOutput) { + op := &request.Operation{ + Name: opGetRepositoryTriggers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRepositoryTriggersInput{} + } + + output = &GetRepositoryTriggersOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRepositoryTriggers API operation for AWS CodeCommit. +// +// Gets information about triggers configured for a repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation GetRepositoryTriggers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetRepositoryTriggers +func (c *CodeCommit) GetRepositoryTriggers(input *GetRepositoryTriggersInput) (*GetRepositoryTriggersOutput, error) { + req, out := c.GetRepositoryTriggersRequest(input) + return out, req.Send() +} + +// GetRepositoryTriggersWithContext is the same as GetRepositoryTriggers with the addition of +// the ability to pass a context and additional request options. +// +// See GetRepositoryTriggers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) GetRepositoryTriggersWithContext(ctx aws.Context, input *GetRepositoryTriggersInput, opts ...request.Option) (*GetRepositoryTriggersOutput, error) { + req, out := c.GetRepositoryTriggersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBranches = "ListBranches" + +// ListBranchesRequest generates a "aws/request.Request" representing the +// client's request for the ListBranches operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBranches for more information on using the ListBranches +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBranchesRequest method. +// req, resp := client.ListBranchesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListBranches +func (c *CodeCommit) ListBranchesRequest(input *ListBranchesInput) (req *request.Request, output *ListBranchesOutput) { + op := &request.Operation{ + Name: opListBranches, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListBranchesInput{} + } + + output = &ListBranchesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBranches API operation for AWS CodeCommit. +// +// Gets information about one or more branches in a repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation ListBranches for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// * ErrCodeInvalidContinuationTokenException "InvalidContinuationTokenException" // The specified continuation token is not valid. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListBranches @@ -3239,7 +4281,7 @@ func (c *CodeCommit) ListBranchesWithContext(ctx aws.Context, input *ListBranche // // Example iterating over at most 3 pages of a ListBranches operation. // pageNum := 0 // err := client.ListBranchesPages(params, -// func(page *ListBranchesOutput, lastPage bool) bool { +// func(page *codecommit.ListBranchesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3271,10 +4313,12 @@ func (c *CodeCommit) ListBranchesPagesWithContext(ctx aws.Context, input *ListBr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListBranchesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListBranchesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3417,7 +4461,7 @@ func (c *CodeCommit) ListPullRequestsWithContext(ctx aws.Context, input *ListPul // // Example iterating over at most 3 pages of a ListPullRequests operation. // pageNum := 0 // err := client.ListPullRequestsPages(params, -// func(page *ListPullRequestsOutput, lastPage bool) bool { +// func(page *codecommit.ListPullRequestsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3449,10 +4493,12 @@ func (c *CodeCommit) ListPullRequestsPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPullRequestsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPullRequestsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3558,7 +4604,7 @@ func (c *CodeCommit) ListRepositoriesWithContext(ctx aws.Context, input *ListRep // // Example iterating over at most 3 pages of a ListRepositories operation. // pageNum := 0 // err := client.ListRepositoriesPages(params, -// func(page *ListRepositoriesOutput, lastPage bool) bool { +// func(page *codecommit.ListRepositoriesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3590,99 +4636,169 @@ func (c *CodeCommit) ListRepositoriesPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRepositoriesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRepositoriesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opMergePullRequestByFastForward = "MergePullRequestByFastForward" +const opListTagsForResource = "ListTagsForResource" -// MergePullRequestByFastForwardRequest generates a "aws/request.Request" representing the -// client's request for the MergePullRequestByFastForward operation. The "output" return +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See MergePullRequestByFastForward for more information on using the MergePullRequestByFastForward +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the MergePullRequestByFastForwardRequest method. -// req, resp := client.MergePullRequestByFastForwardRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergePullRequestByFastForward -func (c *CodeCommit) MergePullRequestByFastForwardRequest(input *MergePullRequestByFastForwardInput) (req *request.Request, output *MergePullRequestByFastForwardOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListTagsForResource +func (c *CodeCommit) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ - Name: opMergePullRequestByFastForward, + Name: opListTagsForResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &MergePullRequestByFastForwardInput{} + input = &ListTagsForResourceInput{} } - output = &MergePullRequestByFastForwardOutput{} + output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) return } -// MergePullRequestByFastForward API operation for AWS CodeCommit. +// ListTagsForResource API operation for AWS CodeCommit. // -// Closes a pull request and attempts to merge the source commit of a pull request -// into the specified destination branch for that pull request at the specified -// commit using the fast-forward merge option. +// Gets information about AWS tags for a specified Amazon Resource Name (ARN) +// in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit +// Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation MergePullRequestByFastForward for usage and error information. +// API operation ListTagsForResource for usage and error information. // // Returned Error Codes: -// * ErrCodeManualMergeRequiredException "ManualMergeRequiredException" -// The pull request cannot be merged automatically into the destination branch. -// You must manually merge the branches and resolve any conflicts. +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. // -// * ErrCodePullRequestAlreadyClosedException "PullRequestAlreadyClosedException" -// The pull request status cannot be updated because it is already closed. +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. // -// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" -// The pull request ID could not be found. Make sure that you have specified -// the correct repository name and pull request ID, and then try again. +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. // -// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" -// The pull request ID is not valid. Make sure that you have provided the full -// ID and that the pull request is in the specified repository, and then try -// again. +// * ErrCodeResourceArnRequiredException "ResourceArnRequiredException" +// A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. +// For a list of valid resources in AWS CodeCommit, see CodeCommit Resources +// and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. // -// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" -// A pull request ID is required, but none was provided. +// * ErrCodeInvalidResourceArnException "InvalidResourceArnException" +// The value for the resource ARN is not valid. For more information about resources +// in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. // -// * ErrCodeTipOfSourceReferenceIsDifferentException "TipOfSourceReferenceIsDifferentException" -// The tip of the source branch in the destination repository does not match -// the tip of the source branch specified in your request. The pull request -// might have been updated. Make sure that you have the latest changes. +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListTagsForResource +func (c *CodeCommit) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. // -// * ErrCodeReferenceDoesNotExistException "ReferenceDoesNotExistException" -// The specified reference does not exist. You must provide a full commit ID. +// See ListTagsForResource for details on how to use this API operation. // -// * ErrCodeInvalidCommitIdException "InvalidCommitIdException" -// The specified commit ID is not valid. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opMergeBranchesByFastForward = "MergeBranchesByFastForward" + +// MergeBranchesByFastForwardRequest generates a "aws/request.Request" representing the +// client's request for the MergeBranchesByFastForward operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See MergeBranchesByFastForward for more information on using the MergeBranchesByFastForward +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// +// // Example sending a request using the MergeBranchesByFastForwardRequest method. +// req, resp := client.MergeBranchesByFastForwardRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergeBranchesByFastForward +func (c *CodeCommit) MergeBranchesByFastForwardRequest(input *MergeBranchesByFastForwardInput) (req *request.Request, output *MergeBranchesByFastForwardOutput) { + op := &request.Operation{ + Name: opMergeBranchesByFastForward, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MergeBranchesByFastForwardInput{} + } + + output = &MergeBranchesByFastForwardOutput{} + req = c.newRequest(op, input, output) + return +} + +// MergeBranchesByFastForward API operation for AWS CodeCommit. +// +// Merges two branches using the fast-forward merge strategy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation MergeBranchesByFastForward for usage and error information. +// +// Returned Error Codes: // * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" // A repository name is required but was not specified. // @@ -3696,6 +4812,47 @@ func (c *CodeCommit) MergePullRequestByFastForwardRequest(input *MergePullReques // * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" // The specified repository does not exist. // +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. +// +// * ErrCodeCommitRequiredException "CommitRequiredException" +// A commit was not specified. +// +// * ErrCodeInvalidCommitException "InvalidCommitException" +// The specified commit is not valid. +// +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. +// +// * ErrCodeInvalidTargetBranchException "InvalidTargetBranchException" +// The specified target branch is not valid. +// +// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" +// The specified reference name is not valid. +// +// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" +// A branch name is required but was not specified. +// +// * ErrCodeBranchNameIsTagNameException "BranchNameIsTagNameException" +// The specified branch name is not valid because it is a tag name. Type the +// name of a current branch in the repository. For a list of valid branch names, +// use ListBranches. +// +// * ErrCodeBranchDoesNotExistException "BranchDoesNotExistException" +// The specified branch does not exist. +// +// * ErrCodeManualMergeRequiredException "ManualMergeRequiredException" +// The pull request cannot be merged automatically into the destination branch. +// You must manually merge the branches and resolve any conflicts. +// +// * ErrCodeConcurrentReferenceUpdateException "ConcurrentReferenceUpdateException" +// The merge cannot be completed because the target branch has been modified. +// Another user might have modified the target branch while the merge was in +// progress. Wait a few minutes, and then try again. +// // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. // @@ -3711,88 +4868,85 @@ func (c *CodeCommit) MergePullRequestByFastForwardRequest(input *MergePullReques // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergePullRequestByFastForward -func (c *CodeCommit) MergePullRequestByFastForward(input *MergePullRequestByFastForwardInput) (*MergePullRequestByFastForwardOutput, error) { - req, out := c.MergePullRequestByFastForwardRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergeBranchesByFastForward +func (c *CodeCommit) MergeBranchesByFastForward(input *MergeBranchesByFastForwardInput) (*MergeBranchesByFastForwardOutput, error) { + req, out := c.MergeBranchesByFastForwardRequest(input) return out, req.Send() } -// MergePullRequestByFastForwardWithContext is the same as MergePullRequestByFastForward with the addition of +// MergeBranchesByFastForwardWithContext is the same as MergeBranchesByFastForward with the addition of // the ability to pass a context and additional request options. // -// See MergePullRequestByFastForward for details on how to use this API operation. +// See MergeBranchesByFastForward for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) MergePullRequestByFastForwardWithContext(ctx aws.Context, input *MergePullRequestByFastForwardInput, opts ...request.Option) (*MergePullRequestByFastForwardOutput, error) { - req, out := c.MergePullRequestByFastForwardRequest(input) +func (c *CodeCommit) MergeBranchesByFastForwardWithContext(ctx aws.Context, input *MergeBranchesByFastForwardInput, opts ...request.Option) (*MergeBranchesByFastForwardOutput, error) { + req, out := c.MergeBranchesByFastForwardRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPostCommentForComparedCommit = "PostCommentForComparedCommit" +const opMergeBranchesBySquash = "MergeBranchesBySquash" -// PostCommentForComparedCommitRequest generates a "aws/request.Request" representing the -// client's request for the PostCommentForComparedCommit operation. The "output" return +// MergeBranchesBySquashRequest generates a "aws/request.Request" representing the +// client's request for the MergeBranchesBySquash operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PostCommentForComparedCommit for more information on using the PostCommentForComparedCommit +// See MergeBranchesBySquash for more information on using the MergeBranchesBySquash // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PostCommentForComparedCommitRequest method. -// req, resp := client.PostCommentForComparedCommitRequest(params) +// // Example sending a request using the MergeBranchesBySquashRequest method. +// req, resp := client.MergeBranchesBySquashRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForComparedCommit -func (c *CodeCommit) PostCommentForComparedCommitRequest(input *PostCommentForComparedCommitInput) (req *request.Request, output *PostCommentForComparedCommitOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergeBranchesBySquash +func (c *CodeCommit) MergeBranchesBySquashRequest(input *MergeBranchesBySquashInput) (req *request.Request, output *MergeBranchesBySquashOutput) { op := &request.Operation{ - Name: opPostCommentForComparedCommit, + Name: opMergeBranchesBySquash, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PostCommentForComparedCommitInput{} + input = &MergeBranchesBySquashInput{} } - output = &PostCommentForComparedCommitOutput{} + output = &MergeBranchesBySquashOutput{} req = c.newRequest(op, input, output) return } -// PostCommentForComparedCommit API operation for AWS CodeCommit. +// MergeBranchesBySquash API operation for AWS CodeCommit. // -// Posts a comment on the comparison between two commits. +// Merges two branches using the squash merge strategy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation PostCommentForComparedCommit for usage and error information. +// API operation MergeBranchesBySquash for usage and error information. // // Returned Error Codes: // * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" // A repository name is required but was not specified. // -// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" -// The specified repository does not exist. -// // * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" // At least one specified repository name is not valid. // @@ -3800,47 +4954,123 @@ func (c *CodeCommit) PostCommentForComparedCommitRequest(input *PostCommentForCo // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. // -// * ErrCodeClientRequestTokenRequiredException "ClientRequestTokenRequiredException" -// A client request token is required. A client request token is an unique, -// client-generated idempotency token that when provided in a request, ensures -// the request cannot be repeated with a changed parameter. If a request is -// received with the same parameters and a token is included, the request will -// return information about the initial request that used that token. +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. // -// * ErrCodeInvalidClientRequestTokenException "InvalidClientRequestTokenException" -// The client request token is not valid. +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. // -// * ErrCodeIdempotencyParameterMismatchException "IdempotencyParameterMismatchException" -// The client request token is not valid. Either the token is not in a valid -// format, or the token has been used in a previous request and cannot be re-used. +// * ErrCodeCommitRequiredException "CommitRequiredException" +// A commit was not specified. // -// * ErrCodeCommentContentRequiredException "CommentContentRequiredException" -// The comment is empty. You must provide some content for a comment. The content -// cannot be null. +// * ErrCodeInvalidCommitException "InvalidCommitException" +// The specified commit is not valid. // -// * ErrCodeCommentContentSizeLimitExceededException "CommentContentSizeLimitExceededException" -// The comment is too large. Comments are limited to 1,000 characters. +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. // -// * ErrCodeInvalidFileLocationException "InvalidFileLocationException" -// The location of the file is not valid. Make sure that you include the extension -// of the file as well as the file name. +// * ErrCodeInvalidTargetBranchException "InvalidTargetBranchException" +// The specified target branch is not valid. // -// * ErrCodeInvalidRelativeFileVersionEnumException "InvalidRelativeFileVersionEnumException" -// Either the enum is not in a valid format, or the specified file version enum -// is not valid in respect to the current file version. +// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" +// The specified reference name is not valid. +// +// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" +// A branch name is required but was not specified. +// +// * ErrCodeBranchNameIsTagNameException "BranchNameIsTagNameException" +// The specified branch name is not valid because it is a tag name. Type the +// name of a current branch in the repository. For a list of valid branch names, +// use ListBranches. +// +// * ErrCodeBranchDoesNotExistException "BranchDoesNotExistException" +// The specified branch does not exist. +// +// * ErrCodeManualMergeRequiredException "ManualMergeRequiredException" +// The pull request cannot be merged automatically into the destination branch. +// You must manually merge the branches and resolve any conflicts. +// +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. +// +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. +// +// * ErrCodeInvalidConflictResolutionException "InvalidConflictResolutionException" +// The specified conflict resolution list is not valid. +// +// * ErrCodeMaximumConflictResolutionEntriesExceededException "MaximumConflictResolutionEntriesExceededException" +// The number of allowed conflict resolution entries was exceeded. +// +// * ErrCodeMultipleConflictResolutionEntriesException "MultipleConflictResolutionEntriesException" +// More than one conflict resolution entries exists for the conflict. A conflict +// can have only one conflict resolution entry. +// +// * ErrCodeReplacementTypeRequiredException "ReplacementTypeRequiredException" +// A replacement type is required. +// +// * ErrCodeInvalidReplacementTypeException "InvalidReplacementTypeException" +// Automerge was specified for resolving the conflict, but the specified replacement +// type is not valid. +// +// * ErrCodeReplacementContentRequiredException "ReplacementContentRequiredException" +// USE_NEW_CONTENT was specified but no replacement content has been provided. +// +// * ErrCodeInvalidReplacementContentException "InvalidReplacementContentException" +// Automerge was specified for resolving the conflict, but the replacement type +// is not valid or content is missing. // // * ErrCodePathRequiredException "PathRequiredException" // The folderPath for a location cannot be null. // -// * ErrCodeInvalidFilePositionException "InvalidFilePositionException" -// The position is not valid. Make sure that the line number exists in the version -// of the file you want to comment on. +// * ErrCodeInvalidPathException "InvalidPathException" +// The specified path is not valid. // -// * ErrCodeCommitIdRequiredException "CommitIdRequiredException" -// A commit ID was not specified. +// * ErrCodeFileContentSizeLimitExceededException "FileContentSizeLimitExceededException" +// The file cannot be added because it is too large. The maximum file size that +// can be added is 6 MB, and the combined file content change size is 7 MB. +// Consider making these changes using a Git client. // -// * ErrCodeInvalidCommitIdException "InvalidCommitIdException" -// The specified commit ID is not valid. +// * ErrCodeFolderContentSizeLimitExceededException "FolderContentSizeLimitExceededException" +// The commit cannot be created because at least one of the overall changes +// in the commit results in a folder whose contents exceed the limit of 6 MB. +// Either reduce the number and size of your changes, or split the changes across +// multiple folders. +// +// * ErrCodeMaximumFileContentToLoadExceededException "MaximumFileContentToLoadExceededException" +// The number of files to load exceeds the allowed limit. +// +// * ErrCodeMaximumItemsToCompareExceededException "MaximumItemsToCompareExceededException" +// The maximum number of items to compare between the source or destination +// branches and the merge base has exceeded the maximum allowed. +// +// * ErrCodeFileModeRequiredException "FileModeRequiredException" +// The commit cannot be created because a file mode is required to update mode +// permissions for an existing file, but no file mode has been specified. +// +// * ErrCodeInvalidFileModeException "InvalidFileModeException" +// The specified file mode permission is not valid. For a list of valid file +// mode permissions, see PutFile. +// +// * ErrCodeNameLengthExceededException "NameLengthExceededException" +// The user name is not valid because it has exceeded the character limit for +// author names. +// +// * ErrCodeInvalidEmailException "InvalidEmailException" +// The specified email address either contains one or more characters that are +// not allowed, or it exceeds the maximum number of characters allowed for an +// email address. +// +// * ErrCodeCommitMessageLengthExceededException "CommitMessageLengthExceededException" +// The commit message is too long. Provide a shorter string. +// +// * ErrCodeConcurrentReferenceUpdateException "ConcurrentReferenceUpdateException" +// The merge cannot be completed because the target branch has been modified. +// Another user might have modified the target branch while the merge was in +// progress. Wait a few minutes, and then try again. // // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. @@ -3857,118 +5087,84 @@ func (c *CodeCommit) PostCommentForComparedCommitRequest(input *PostCommentForCo // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// * ErrCodeBeforeCommitIdAndAfterCommitIdAreSameException "BeforeCommitIdAndAfterCommitIdAreSameException" -// The before commit ID and the after commit ID are the same, which is not valid. -// The before commit ID and the after commit ID must be different commit IDs. -// -// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" -// The specified commit does not exist or no commit was specified, and the specified -// repository has no default branch. -// -// * ErrCodeInvalidPathException "InvalidPathException" -// The specified path is not valid. -// -// * ErrCodePathDoesNotExistException "PathDoesNotExistException" -// The specified path does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForComparedCommit -func (c *CodeCommit) PostCommentForComparedCommit(input *PostCommentForComparedCommitInput) (*PostCommentForComparedCommitOutput, error) { - req, out := c.PostCommentForComparedCommitRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergeBranchesBySquash +func (c *CodeCommit) MergeBranchesBySquash(input *MergeBranchesBySquashInput) (*MergeBranchesBySquashOutput, error) { + req, out := c.MergeBranchesBySquashRequest(input) return out, req.Send() } -// PostCommentForComparedCommitWithContext is the same as PostCommentForComparedCommit with the addition of +// MergeBranchesBySquashWithContext is the same as MergeBranchesBySquash with the addition of // the ability to pass a context and additional request options. // -// See PostCommentForComparedCommit for details on how to use this API operation. +// See MergeBranchesBySquash for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) PostCommentForComparedCommitWithContext(ctx aws.Context, input *PostCommentForComparedCommitInput, opts ...request.Option) (*PostCommentForComparedCommitOutput, error) { - req, out := c.PostCommentForComparedCommitRequest(input) +func (c *CodeCommit) MergeBranchesBySquashWithContext(ctx aws.Context, input *MergeBranchesBySquashInput, opts ...request.Option) (*MergeBranchesBySquashOutput, error) { + req, out := c.MergeBranchesBySquashRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPostCommentForPullRequest = "PostCommentForPullRequest" +const opMergeBranchesByThreeWay = "MergeBranchesByThreeWay" -// PostCommentForPullRequestRequest generates a "aws/request.Request" representing the -// client's request for the PostCommentForPullRequest operation. The "output" return +// MergeBranchesByThreeWayRequest generates a "aws/request.Request" representing the +// client's request for the MergeBranchesByThreeWay operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PostCommentForPullRequest for more information on using the PostCommentForPullRequest +// See MergeBranchesByThreeWay for more information on using the MergeBranchesByThreeWay // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PostCommentForPullRequestRequest method. -// req, resp := client.PostCommentForPullRequestRequest(params) +// // Example sending a request using the MergeBranchesByThreeWayRequest method. +// req, resp := client.MergeBranchesByThreeWayRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForPullRequest -func (c *CodeCommit) PostCommentForPullRequestRequest(input *PostCommentForPullRequestInput) (req *request.Request, output *PostCommentForPullRequestOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergeBranchesByThreeWay +func (c *CodeCommit) MergeBranchesByThreeWayRequest(input *MergeBranchesByThreeWayInput) (req *request.Request, output *MergeBranchesByThreeWayOutput) { op := &request.Operation{ - Name: opPostCommentForPullRequest, + Name: opMergeBranchesByThreeWay, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PostCommentForPullRequestInput{} + input = &MergeBranchesByThreeWayInput{} } - output = &PostCommentForPullRequestOutput{} + output = &MergeBranchesByThreeWayOutput{} req = c.newRequest(op, input, output) return } -// PostCommentForPullRequest API operation for AWS CodeCommit. +// MergeBranchesByThreeWay API operation for AWS CodeCommit. // -// Posts a comment on a pull request. +// Merges two specified branches using the three-way merge strategy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation PostCommentForPullRequest for usage and error information. +// API operation MergeBranchesByThreeWay for usage and error information. // // Returned Error Codes: -// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" -// The pull request ID could not be found. Make sure that you have specified -// the correct repository name and pull request ID, and then try again. -// -// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" -// The pull request ID is not valid. Make sure that you have provided the full -// ID and that the pull request is in the specified repository, and then try -// again. -// -// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" -// A pull request ID is required, but none was provided. -// -// * ErrCodeRepositoryNotAssociatedWithPullRequestException "RepositoryNotAssociatedWithPullRequestException" -// The repository does not contain any pull requests with that pull request -// ID. Use GetPullRequest to verify the correct repository name for the pull -// request ID. -// -// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" -// A repository name is required but was not specified. -// -// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" -// The specified repository does not exist. +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. // // * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" // At least one specified repository name is not valid. @@ -3977,47 +5173,123 @@ func (c *CodeCommit) PostCommentForPullRequestRequest(input *PostCommentForPullR // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. // -// * ErrCodeClientRequestTokenRequiredException "ClientRequestTokenRequiredException" -// A client request token is required. A client request token is an unique, -// client-generated idempotency token that when provided in a request, ensures -// the request cannot be repeated with a changed parameter. If a request is -// received with the same parameters and a token is included, the request will -// return information about the initial request that used that token. +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. // -// * ErrCodeInvalidClientRequestTokenException "InvalidClientRequestTokenException" -// The client request token is not valid. +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. // -// * ErrCodeIdempotencyParameterMismatchException "IdempotencyParameterMismatchException" -// The client request token is not valid. Either the token is not in a valid -// format, or the token has been used in a previous request and cannot be re-used. +// * ErrCodeCommitRequiredException "CommitRequiredException" +// A commit was not specified. // -// * ErrCodeCommentContentRequiredException "CommentContentRequiredException" -// The comment is empty. You must provide some content for a comment. The content -// cannot be null. +// * ErrCodeInvalidCommitException "InvalidCommitException" +// The specified commit is not valid. // -// * ErrCodeCommentContentSizeLimitExceededException "CommentContentSizeLimitExceededException" -// The comment is too large. Comments are limited to 1,000 characters. +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. // -// * ErrCodeInvalidFileLocationException "InvalidFileLocationException" -// The location of the file is not valid. Make sure that you include the extension -// of the file as well as the file name. +// * ErrCodeInvalidTargetBranchException "InvalidTargetBranchException" +// The specified target branch is not valid. // -// * ErrCodeInvalidRelativeFileVersionEnumException "InvalidRelativeFileVersionEnumException" -// Either the enum is not in a valid format, or the specified file version enum -// is not valid in respect to the current file version. +// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" +// The specified reference name is not valid. +// +// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" +// A branch name is required but was not specified. +// +// * ErrCodeBranchNameIsTagNameException "BranchNameIsTagNameException" +// The specified branch name is not valid because it is a tag name. Type the +// name of a current branch in the repository. For a list of valid branch names, +// use ListBranches. +// +// * ErrCodeBranchDoesNotExistException "BranchDoesNotExistException" +// The specified branch does not exist. +// +// * ErrCodeManualMergeRequiredException "ManualMergeRequiredException" +// The pull request cannot be merged automatically into the destination branch. +// You must manually merge the branches and resolve any conflicts. +// +// * ErrCodeConcurrentReferenceUpdateException "ConcurrentReferenceUpdateException" +// The merge cannot be completed because the target branch has been modified. +// Another user might have modified the target branch while the merge was in +// progress. Wait a few minutes, and then try again. +// +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. +// +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. +// +// * ErrCodeInvalidConflictResolutionException "InvalidConflictResolutionException" +// The specified conflict resolution list is not valid. +// +// * ErrCodeMaximumConflictResolutionEntriesExceededException "MaximumConflictResolutionEntriesExceededException" +// The number of allowed conflict resolution entries was exceeded. +// +// * ErrCodeMultipleConflictResolutionEntriesException "MultipleConflictResolutionEntriesException" +// More than one conflict resolution entries exists for the conflict. A conflict +// can have only one conflict resolution entry. +// +// * ErrCodeReplacementTypeRequiredException "ReplacementTypeRequiredException" +// A replacement type is required. +// +// * ErrCodeInvalidReplacementTypeException "InvalidReplacementTypeException" +// Automerge was specified for resolving the conflict, but the specified replacement +// type is not valid. +// +// * ErrCodeReplacementContentRequiredException "ReplacementContentRequiredException" +// USE_NEW_CONTENT was specified but no replacement content has been provided. +// +// * ErrCodeInvalidReplacementContentException "InvalidReplacementContentException" +// Automerge was specified for resolving the conflict, but the replacement type +// is not valid or content is missing. // // * ErrCodePathRequiredException "PathRequiredException" // The folderPath for a location cannot be null. // -// * ErrCodeInvalidFilePositionException "InvalidFilePositionException" -// The position is not valid. Make sure that the line number exists in the version -// of the file you want to comment on. +// * ErrCodeInvalidPathException "InvalidPathException" +// The specified path is not valid. // -// * ErrCodeCommitIdRequiredException "CommitIdRequiredException" -// A commit ID was not specified. +// * ErrCodeFileContentSizeLimitExceededException "FileContentSizeLimitExceededException" +// The file cannot be added because it is too large. The maximum file size that +// can be added is 6 MB, and the combined file content change size is 7 MB. +// Consider making these changes using a Git client. // -// * ErrCodeInvalidCommitIdException "InvalidCommitIdException" -// The specified commit ID is not valid. +// * ErrCodeFolderContentSizeLimitExceededException "FolderContentSizeLimitExceededException" +// The commit cannot be created because at least one of the overall changes +// in the commit results in a folder whose contents exceed the limit of 6 MB. +// Either reduce the number and size of your changes, or split the changes across +// multiple folders. +// +// * ErrCodeMaximumFileContentToLoadExceededException "MaximumFileContentToLoadExceededException" +// The number of files to load exceeds the allowed limit. +// +// * ErrCodeMaximumItemsToCompareExceededException "MaximumItemsToCompareExceededException" +// The maximum number of items to compare between the source or destination +// branches and the merge base has exceeded the maximum allowed. +// +// * ErrCodeFileModeRequiredException "FileModeRequiredException" +// The commit cannot be created because a file mode is required to update mode +// permissions for an existing file, but no file mode has been specified. +// +// * ErrCodeInvalidFileModeException "InvalidFileModeException" +// The specified file mode permission is not valid. For a list of valid file +// mode permissions, see PutFile. +// +// * ErrCodeNameLengthExceededException "NameLengthExceededException" +// The user name is not valid because it has exceeded the character limit for +// author names. +// +// * ErrCodeInvalidEmailException "InvalidEmailException" +// The specified email address either contains one or more characters that are +// not allowed, or it exceeds the maximum number of characters allowed for an +// email address. +// +// * ErrCodeCommitMessageLengthExceededException "CommitMessageLengthExceededException" +// The commit message is too long. Provide a shorter string. // // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. @@ -4034,255 +5306,305 @@ func (c *CodeCommit) PostCommentForPullRequestRequest(input *PostCommentForPullR // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" -// The specified commit does not exist or no commit was specified, and the specified -// repository has no default branch. -// -// * ErrCodeInvalidPathException "InvalidPathException" -// The specified path is not valid. -// -// * ErrCodePathDoesNotExistException "PathDoesNotExistException" -// The specified path does not exist. -// -// * ErrCodePathRequiredException "PathRequiredException" -// The folderPath for a location cannot be null. -// -// * ErrCodeBeforeCommitIdAndAfterCommitIdAreSameException "BeforeCommitIdAndAfterCommitIdAreSameException" -// The before commit ID and the after commit ID are the same, which is not valid. -// The before commit ID and the after commit ID must be different commit IDs. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForPullRequest -func (c *CodeCommit) PostCommentForPullRequest(input *PostCommentForPullRequestInput) (*PostCommentForPullRequestOutput, error) { - req, out := c.PostCommentForPullRequestRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergeBranchesByThreeWay +func (c *CodeCommit) MergeBranchesByThreeWay(input *MergeBranchesByThreeWayInput) (*MergeBranchesByThreeWayOutput, error) { + req, out := c.MergeBranchesByThreeWayRequest(input) return out, req.Send() } -// PostCommentForPullRequestWithContext is the same as PostCommentForPullRequest with the addition of +// MergeBranchesByThreeWayWithContext is the same as MergeBranchesByThreeWay with the addition of // the ability to pass a context and additional request options. // -// See PostCommentForPullRequest for details on how to use this API operation. +// See MergeBranchesByThreeWay for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) PostCommentForPullRequestWithContext(ctx aws.Context, input *PostCommentForPullRequestInput, opts ...request.Option) (*PostCommentForPullRequestOutput, error) { - req, out := c.PostCommentForPullRequestRequest(input) +func (c *CodeCommit) MergeBranchesByThreeWayWithContext(ctx aws.Context, input *MergeBranchesByThreeWayInput, opts ...request.Option) (*MergeBranchesByThreeWayOutput, error) { + req, out := c.MergeBranchesByThreeWayRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPostCommentReply = "PostCommentReply" +const opMergePullRequestByFastForward = "MergePullRequestByFastForward" -// PostCommentReplyRequest generates a "aws/request.Request" representing the -// client's request for the PostCommentReply operation. The "output" return +// MergePullRequestByFastForwardRequest generates a "aws/request.Request" representing the +// client's request for the MergePullRequestByFastForward operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PostCommentReply for more information on using the PostCommentReply +// See MergePullRequestByFastForward for more information on using the MergePullRequestByFastForward // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PostCommentReplyRequest method. -// req, resp := client.PostCommentReplyRequest(params) +// // Example sending a request using the MergePullRequestByFastForwardRequest method. +// req, resp := client.MergePullRequestByFastForwardRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentReply -func (c *CodeCommit) PostCommentReplyRequest(input *PostCommentReplyInput) (req *request.Request, output *PostCommentReplyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergePullRequestByFastForward +func (c *CodeCommit) MergePullRequestByFastForwardRequest(input *MergePullRequestByFastForwardInput) (req *request.Request, output *MergePullRequestByFastForwardOutput) { op := &request.Operation{ - Name: opPostCommentReply, + Name: opMergePullRequestByFastForward, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PostCommentReplyInput{} + input = &MergePullRequestByFastForwardInput{} } - output = &PostCommentReplyOutput{} + output = &MergePullRequestByFastForwardOutput{} req = c.newRequest(op, input, output) return } -// PostCommentReply API operation for AWS CodeCommit. +// MergePullRequestByFastForward API operation for AWS CodeCommit. // -// Posts a comment in reply to an existing comment on a comparison between commits -// or a pull request. +// Attempts to merge the source commit of a pull request into the specified +// destination branch for that pull request at the specified commit using the +// fast-forward merge strategy. If the merge is successful, it closes the pull +// request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation PostCommentReply for usage and error information. +// API operation MergePullRequestByFastForward for usage and error information. // // Returned Error Codes: -// * ErrCodeClientRequestTokenRequiredException "ClientRequestTokenRequiredException" -// A client request token is required. A client request token is an unique, -// client-generated idempotency token that when provided in a request, ensures -// the request cannot be repeated with a changed parameter. If a request is -// received with the same parameters and a token is included, the request will -// return information about the initial request that used that token. +// * ErrCodeManualMergeRequiredException "ManualMergeRequiredException" +// The pull request cannot be merged automatically into the destination branch. +// You must manually merge the branches and resolve any conflicts. // -// * ErrCodeInvalidClientRequestTokenException "InvalidClientRequestTokenException" -// The client request token is not valid. +// * ErrCodePullRequestAlreadyClosedException "PullRequestAlreadyClosedException" +// The pull request status cannot be updated because it is already closed. // -// * ErrCodeIdempotencyParameterMismatchException "IdempotencyParameterMismatchException" -// The client request token is not valid. Either the token is not in a valid -// format, or the token has been used in a previous request and cannot be re-used. +// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" +// The pull request ID could not be found. Make sure that you have specified +// the correct repository name and pull request ID, and then try again. // -// * ErrCodeCommentContentRequiredException "CommentContentRequiredException" -// The comment is empty. You must provide some content for a comment. The content -// cannot be null. +// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" +// The pull request ID is not valid. Make sure that you have provided the full +// ID and that the pull request is in the specified repository, and then try +// again. // -// * ErrCodeCommentContentSizeLimitExceededException "CommentContentSizeLimitExceededException" -// The comment is too large. Comments are limited to 1,000 characters. +// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" +// A pull request ID is required, but none was provided. // -// * ErrCodeCommentDoesNotExistException "CommentDoesNotExistException" -// No comment exists with the provided ID. Verify that you have provided the -// correct ID, and then try again. +// * ErrCodeTipOfSourceReferenceIsDifferentException "TipOfSourceReferenceIsDifferentException" +// The tip of the source branch in the destination repository does not match +// the tip of the source branch specified in your request. The pull request +// might have been updated. Make sure that you have the latest changes. // -// * ErrCodeCommentIdRequiredException "CommentIdRequiredException" -// The comment ID is missing or null. A comment ID is required. +// * ErrCodeReferenceDoesNotExistException "ReferenceDoesNotExistException" +// The specified reference does not exist. You must provide a full commit ID. // -// * ErrCodeInvalidCommentIdException "InvalidCommentIdException" -// The comment ID is not in a valid format. Make sure that you have provided -// the full comment ID. +// * ErrCodeInvalidCommitIdException "InvalidCommitIdException" +// The specified commit ID is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentReply -func (c *CodeCommit) PostCommentReply(input *PostCommentReplyInput) (*PostCommentReplyOutput, error) { - req, out := c.PostCommentReplyRequest(input) +// * ErrCodeRepositoryNotAssociatedWithPullRequestException "RepositoryNotAssociatedWithPullRequestException" +// The repository does not contain any pull requests with that pull request +// ID. Use GetPullRequest to verify the correct repository name for the pull +// request ID. +// +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeConcurrentReferenceUpdateException "ConcurrentReferenceUpdateException" +// The merge cannot be completed because the target branch has been modified. +// Another user might have modified the target branch while the merge was in +// progress. Wait a few minutes, and then try again. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergePullRequestByFastForward +func (c *CodeCommit) MergePullRequestByFastForward(input *MergePullRequestByFastForwardInput) (*MergePullRequestByFastForwardOutput, error) { + req, out := c.MergePullRequestByFastForwardRequest(input) return out, req.Send() } -// PostCommentReplyWithContext is the same as PostCommentReply with the addition of +// MergePullRequestByFastForwardWithContext is the same as MergePullRequestByFastForward with the addition of // the ability to pass a context and additional request options. // -// See PostCommentReply for details on how to use this API operation. +// See MergePullRequestByFastForward for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) PostCommentReplyWithContext(ctx aws.Context, input *PostCommentReplyInput, opts ...request.Option) (*PostCommentReplyOutput, error) { - req, out := c.PostCommentReplyRequest(input) +func (c *CodeCommit) MergePullRequestByFastForwardWithContext(ctx aws.Context, input *MergePullRequestByFastForwardInput, opts ...request.Option) (*MergePullRequestByFastForwardOutput, error) { + req, out := c.MergePullRequestByFastForwardRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutFile = "PutFile" +const opMergePullRequestBySquash = "MergePullRequestBySquash" -// PutFileRequest generates a "aws/request.Request" representing the -// client's request for the PutFile operation. The "output" return +// MergePullRequestBySquashRequest generates a "aws/request.Request" representing the +// client's request for the MergePullRequestBySquash operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutFile for more information on using the PutFile +// See MergePullRequestBySquash for more information on using the MergePullRequestBySquash // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutFileRequest method. -// req, resp := client.PutFileRequest(params) +// // Example sending a request using the MergePullRequestBySquashRequest method. +// req, resp := client.MergePullRequestBySquashRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutFile -func (c *CodeCommit) PutFileRequest(input *PutFileInput) (req *request.Request, output *PutFileOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergePullRequestBySquash +func (c *CodeCommit) MergePullRequestBySquashRequest(input *MergePullRequestBySquashInput) (req *request.Request, output *MergePullRequestBySquashOutput) { op := &request.Operation{ - Name: opPutFile, + Name: opMergePullRequestBySquash, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutFileInput{} + input = &MergePullRequestBySquashInput{} } - output = &PutFileOutput{} + output = &MergePullRequestBySquashOutput{} req = c.newRequest(op, input, output) return } -// PutFile API operation for AWS CodeCommit. +// MergePullRequestBySquash API operation for AWS CodeCommit. // -// Adds or updates a file in a branch in an AWS CodeCommit repository, and generates -// a commit for the addition in the specified branch. +// Attempts to merge the source commit of a pull request into the specified +// destination branch for that pull request at the specified commit using the +// squash merge strategy. If the merge is successful, it closes the pull request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation PutFile for usage and error information. +// API operation MergePullRequestBySquash for usage and error information. // // Returned Error Codes: -// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" -// A repository name is required but was not specified. +// * ErrCodePullRequestAlreadyClosedException "PullRequestAlreadyClosedException" +// The pull request status cannot be updated because it is already closed. // -// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" -// At least one specified repository name is not valid. +// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" +// The pull request ID could not be found. Make sure that you have specified +// the correct repository name and pull request ID, and then try again. // -// This exception only occurs when a specified repository name is not valid. -// Other exceptions occur when a required repository parameter is missing, or -// when a specified repository does not exist. +// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" +// A pull request ID is required, but none was provided. // -// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" -// The specified repository does not exist. +// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" +// The pull request ID is not valid. Make sure that you have provided the full +// ID and that the pull request is in the specified repository, and then try +// again. // -// * ErrCodeParentCommitIdRequiredException "ParentCommitIdRequiredException" -// A parent commit ID is required. To view the full commit ID of a branch in -// a repository, use GetBranch or a Git command (for example, git pull or git -// log). +// * ErrCodeInvalidCommitIdException "InvalidCommitIdException" +// The specified commit ID is not valid. // -// * ErrCodeInvalidParentCommitIdException "InvalidParentCommitIdException" -// The parent commit ID is not valid. The commit ID cannot be empty, and must -// match the head commit ID for the branch of the repository where you want -// to add or update a file. +// * ErrCodeManualMergeRequiredException "ManualMergeRequiredException" +// The pull request cannot be merged automatically into the destination branch. +// You must manually merge the branches and resolve any conflicts. // -// * ErrCodeParentCommitDoesNotExistException "ParentCommitDoesNotExistException" -// The parent commit ID is not valid because it does not exist. The specified -// parent commit ID does not exist in the specified branch of the repository. +// * ErrCodeTipOfSourceReferenceIsDifferentException "TipOfSourceReferenceIsDifferentException" +// The tip of the source branch in the destination repository does not match +// the tip of the source branch specified in your request. The pull request +// might have been updated. Make sure that you have the latest changes. // -// * ErrCodeParentCommitIdOutdatedException "ParentCommitIdOutdatedException" -// The file could not be added because the provided parent commit ID is not -// the current tip of the specified branch. To view the full commit ID of the -// current head of the branch, use GetBranch. +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. // -// * ErrCodeFileContentRequiredException "FileContentRequiredException" -// The file cannot be added because it is empty. Empty files cannot be added -// to the repository with this API. +// * ErrCodeNameLengthExceededException "NameLengthExceededException" +// The user name is not valid because it has exceeded the character limit for +// author names. // -// * ErrCodeFileContentSizeLimitExceededException "FileContentSizeLimitExceededException" -// The file cannot be added because it is too large. The maximum file size that -// can be added using PutFile is 6 MB, and the combined file content change -// size is 7 MB. Consider making these changes using a Git client. +// * ErrCodeInvalidEmailException "InvalidEmailException" +// The specified email address either contains one or more characters that are +// not allowed, or it exceeds the maximum number of characters allowed for an +// email address. // -// * ErrCodeFolderContentSizeLimitExceededException "FolderContentSizeLimitExceededException" -// The commit cannot be created because at least one of the overall changes -// in the commit result in a folder contents exceeding the limit of 6 MB. Either -// reduce the number and size of your changes, or split the changes across multiple -// folders. +// * ErrCodeCommitMessageLengthExceededException "CommitMessageLengthExceededException" +// The commit message is too long. Provide a shorter string. +// +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. +// +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. +// +// * ErrCodeInvalidConflictResolutionException "InvalidConflictResolutionException" +// The specified conflict resolution list is not valid. +// +// * ErrCodeReplacementTypeRequiredException "ReplacementTypeRequiredException" +// A replacement type is required. +// +// * ErrCodeInvalidReplacementTypeException "InvalidReplacementTypeException" +// Automerge was specified for resolving the conflict, but the specified replacement +// type is not valid. +// +// * ErrCodeMultipleConflictResolutionEntriesException "MultipleConflictResolutionEntriesException" +// More than one conflict resolution entries exists for the conflict. A conflict +// can have only one conflict resolution entry. +// +// * ErrCodeReplacementContentRequiredException "ReplacementContentRequiredException" +// USE_NEW_CONTENT was specified but no replacement content has been provided. +// +// * ErrCodeMaximumConflictResolutionEntriesExceededException "MaximumConflictResolutionEntriesExceededException" +// The number of allowed conflict resolution entries was exceeded. +// +// * ErrCodeConcurrentReferenceUpdateException "ConcurrentReferenceUpdateException" +// The merge cannot be completed because the target branch has been modified. +// Another user might have modified the target branch while the merge was in +// progress. Wait a few minutes, and then try again. // // * ErrCodePathRequiredException "PathRequiredException" // The folderPath for a location cannot be null. @@ -4290,39 +5612,49 @@ func (c *CodeCommit) PutFileRequest(input *PutFileInput) (req *request.Request, // * ErrCodeInvalidPathException "InvalidPathException" // The specified path is not valid. // -// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" -// A branch name is required but was not specified. +// * ErrCodeInvalidFileModeException "InvalidFileModeException" +// The specified file mode permission is not valid. For a list of valid file +// mode permissions, see PutFile. // -// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" -// The specified reference name is not valid. +// * ErrCodeInvalidReplacementContentException "InvalidReplacementContentException" +// Automerge was specified for resolving the conflict, but the replacement type +// is not valid or content is missing. // -// * ErrCodeBranchDoesNotExistException "BranchDoesNotExistException" -// The specified branch does not exist. +// * ErrCodeFileContentSizeLimitExceededException "FileContentSizeLimitExceededException" +// The file cannot be added because it is too large. The maximum file size that +// can be added is 6 MB, and the combined file content change size is 7 MB. +// Consider making these changes using a Git client. // -// * ErrCodeBranchNameIsTagNameException "BranchNameIsTagNameException" -// The specified branch name is not valid because it is a tag name. Type the -// name of a current branch in the repository. For a list of valid branch names, -// use ListBranches. +// * ErrCodeFolderContentSizeLimitExceededException "FolderContentSizeLimitExceededException" +// The commit cannot be created because at least one of the overall changes +// in the commit results in a folder whose contents exceed the limit of 6 MB. +// Either reduce the number and size of your changes, or split the changes across +// multiple folders. // -// * ErrCodeInvalidFileModeException "InvalidFileModeException" -// The specified file mode permission is not valid. For a list of valid file -// mode permissions, see PutFile. +// * ErrCodeMaximumFileContentToLoadExceededException "MaximumFileContentToLoadExceededException" +// The number of files to load exceeds the allowed limit. // -// * ErrCodeNameLengthExceededException "NameLengthExceededException" -// The user name is not valid because it has exceeded the character limit for -// file names. File names, including the path to the file, cannot exceed the -// character limit. +// * ErrCodeMaximumItemsToCompareExceededException "MaximumItemsToCompareExceededException" +// The maximum number of items to compare between the source or destination +// branches and the merge base has exceeded the maximum allowed. // -// * ErrCodeInvalidEmailException "InvalidEmailException" -// The specified email address either contains one or more characters that are -// not allowed, or it exceeds the maximum number of characters allowed for an -// email address. +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. // -// * ErrCodeCommitMessageLengthExceededException "CommitMessageLengthExceededException" -// The commit message is too long. Provide a shorter string. +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. // -// * ErrCodeInvalidDeletionParameterException "InvalidDeletionParameterException" -// The specified deletion parameter is not valid. +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeRepositoryNotAssociatedWithPullRequestException "RepositoryNotAssociatedWithPullRequestException" +// The repository does not contain any pull requests with that pull request +// ID. Use GetPullRequest to verify the correct repository name for the pull +// request ID. // // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. @@ -4339,167 +5671,214 @@ func (c *CodeCommit) PutFileRequest(input *PutFileInput) (req *request.Request, // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// * ErrCodeSameFileContentException "SameFileContentException" -// The file was not added or updated because the content of the file is exactly -// the same as the content of that file in the repository and branch that you -// specified. -// -// * ErrCodeFileNameConflictsWithDirectoryNameException "FileNameConflictsWithDirectoryNameException" -// A file cannot be added to the repository because the specified file name -// has the same name as a directory in this repository. Either provide another -// name for the file, or add the file in a directory that does not match the -// file name. -// -// * ErrCodeDirectoryNameConflictsWithFileNameException "DirectoryNameConflictsWithFileNameException" -// A file cannot be added to the repository because the specified path name -// has the same name as a file that already exists in this repository. Either -// provide a different name for the file, or specify a different path for the -// file. -// -// * ErrCodeFilePathConflictsWithSubmodulePathException "FilePathConflictsWithSubmodulePathException" -// The commit cannot be created because a specified file path points to a submodule. -// Verify that the destination files have valid file paths that do not point -// to a submodule. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutFile -func (c *CodeCommit) PutFile(input *PutFileInput) (*PutFileOutput, error) { - req, out := c.PutFileRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergePullRequestBySquash +func (c *CodeCommit) MergePullRequestBySquash(input *MergePullRequestBySquashInput) (*MergePullRequestBySquashOutput, error) { + req, out := c.MergePullRequestBySquashRequest(input) return out, req.Send() } -// PutFileWithContext is the same as PutFile with the addition of +// MergePullRequestBySquashWithContext is the same as MergePullRequestBySquash with the addition of // the ability to pass a context and additional request options. // -// See PutFile for details on how to use this API operation. +// See MergePullRequestBySquash for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) PutFileWithContext(ctx aws.Context, input *PutFileInput, opts ...request.Option) (*PutFileOutput, error) { - req, out := c.PutFileRequest(input) +func (c *CodeCommit) MergePullRequestBySquashWithContext(ctx aws.Context, input *MergePullRequestBySquashInput, opts ...request.Option) (*MergePullRequestBySquashOutput, error) { + req, out := c.MergePullRequestBySquashRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutRepositoryTriggers = "PutRepositoryTriggers" +const opMergePullRequestByThreeWay = "MergePullRequestByThreeWay" -// PutRepositoryTriggersRequest generates a "aws/request.Request" representing the -// client's request for the PutRepositoryTriggers operation. The "output" return +// MergePullRequestByThreeWayRequest generates a "aws/request.Request" representing the +// client's request for the MergePullRequestByThreeWay operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutRepositoryTriggers for more information on using the PutRepositoryTriggers +// See MergePullRequestByThreeWay for more information on using the MergePullRequestByThreeWay // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutRepositoryTriggersRequest method. -// req, resp := client.PutRepositoryTriggersRequest(params) +// // Example sending a request using the MergePullRequestByThreeWayRequest method. +// req, resp := client.MergePullRequestByThreeWayRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutRepositoryTriggers -func (c *CodeCommit) PutRepositoryTriggersRequest(input *PutRepositoryTriggersInput) (req *request.Request, output *PutRepositoryTriggersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergePullRequestByThreeWay +func (c *CodeCommit) MergePullRequestByThreeWayRequest(input *MergePullRequestByThreeWayInput) (req *request.Request, output *MergePullRequestByThreeWayOutput) { op := &request.Operation{ - Name: opPutRepositoryTriggers, + Name: opMergePullRequestByThreeWay, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutRepositoryTriggersInput{} + input = &MergePullRequestByThreeWayInput{} } - output = &PutRepositoryTriggersOutput{} + output = &MergePullRequestByThreeWayOutput{} req = c.newRequest(op, input, output) return } -// PutRepositoryTriggers API operation for AWS CodeCommit. +// MergePullRequestByThreeWay API operation for AWS CodeCommit. // -// Replaces all triggers for a repository. This can be used to create or delete -// triggers. +// Attempts to merge the source commit of a pull request into the specified +// destination branch for that pull request at the specified commit using the +// three-way merge strategy. If the merge is successful, it closes the pull +// request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation PutRepositoryTriggers for usage and error information. +// API operation MergePullRequestByThreeWay for usage and error information. // // Returned Error Codes: -// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" -// The specified repository does not exist. +// * ErrCodePullRequestAlreadyClosedException "PullRequestAlreadyClosedException" +// The pull request status cannot be updated because it is already closed. // -// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" -// A repository name is required but was not specified. +// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" +// The pull request ID could not be found. Make sure that you have specified +// the correct repository name and pull request ID, and then try again. // -// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" -// At least one specified repository name is not valid. +// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" +// A pull request ID is required, but none was provided. // -// This exception only occurs when a specified repository name is not valid. -// Other exceptions occur when a required repository parameter is missing, or -// when a specified repository does not exist. +// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" +// The pull request ID is not valid. Make sure that you have provided the full +// ID and that the pull request is in the specified repository, and then try +// again. // -// * ErrCodeRepositoryTriggersListRequiredException "RepositoryTriggersListRequiredException" -// The list of triggers for the repository is required but was not specified. +// * ErrCodeInvalidCommitIdException "InvalidCommitIdException" +// The specified commit ID is not valid. // -// * ErrCodeMaximumRepositoryTriggersExceededException "MaximumRepositoryTriggersExceededException" -// The number of triggers allowed for the repository was exceeded. +// * ErrCodeManualMergeRequiredException "ManualMergeRequiredException" +// The pull request cannot be merged automatically into the destination branch. +// You must manually merge the branches and resolve any conflicts. // -// * ErrCodeInvalidRepositoryTriggerNameException "InvalidRepositoryTriggerNameException" -// The name of the trigger is not valid. +// * ErrCodeTipOfSourceReferenceIsDifferentException "TipOfSourceReferenceIsDifferentException" +// The tip of the source branch in the destination repository does not match +// the tip of the source branch specified in your request. The pull request +// might have been updated. Make sure that you have the latest changes. // -// * ErrCodeInvalidRepositoryTriggerDestinationArnException "InvalidRepositoryTriggerDestinationArnException" -// The Amazon Resource Name (ARN) for the trigger is not valid for the specified -// destination. The most common reason for this error is that the ARN does not -// meet the requirements for the service type. +// * ErrCodeTipsDivergenceExceededException "TipsDivergenceExceededException" +// The divergence between the tips of the provided commit specifiers is too +// great to determine whether there might be any merge conflicts. Locally compare +// the specifiers using git diff or a diff tool. // -// * ErrCodeInvalidRepositoryTriggerRegionException "InvalidRepositoryTriggerRegionException" -// The region for the trigger target does not match the region for the repository. -// Triggers must be created in the same region as the target for the trigger. +// * ErrCodeNameLengthExceededException "NameLengthExceededException" +// The user name is not valid because it has exceeded the character limit for +// author names. // -// * ErrCodeInvalidRepositoryTriggerCustomDataException "InvalidRepositoryTriggerCustomDataException" -// The custom data provided for the trigger is not valid. +// * ErrCodeInvalidEmailException "InvalidEmailException" +// The specified email address either contains one or more characters that are +// not allowed, or it exceeds the maximum number of characters allowed for an +// email address. // -// * ErrCodeMaximumBranchesExceededException "MaximumBranchesExceededException" -// The number of branches for the trigger was exceeded. +// * ErrCodeCommitMessageLengthExceededException "CommitMessageLengthExceededException" +// The commit message is too long. Provide a shorter string. // -// * ErrCodeInvalidRepositoryTriggerBranchNameException "InvalidRepositoryTriggerBranchNameException" -// One or more branch names specified for the trigger is not valid. +// * ErrCodeInvalidConflictDetailLevelException "InvalidConflictDetailLevelException" +// The specified conflict detail level is not valid. // -// * ErrCodeInvalidRepositoryTriggerEventsException "InvalidRepositoryTriggerEventsException" -// One or more events specified for the trigger is not valid. Check to make -// sure that all events specified match the requirements for allowed events. +// * ErrCodeInvalidConflictResolutionStrategyException "InvalidConflictResolutionStrategyException" +// The specified conflict resolution strategy is not valid. // -// * ErrCodeRepositoryTriggerNameRequiredException "RepositoryTriggerNameRequiredException" -// A name for the trigger is required but was not specified. +// * ErrCodeInvalidConflictResolutionException "InvalidConflictResolutionException" +// The specified conflict resolution list is not valid. // -// * ErrCodeRepositoryTriggerDestinationArnRequiredException "RepositoryTriggerDestinationArnRequiredException" -// A destination ARN for the target service for the trigger is required but -// was not specified. +// * ErrCodeReplacementTypeRequiredException "ReplacementTypeRequiredException" +// A replacement type is required. // -// * ErrCodeRepositoryTriggerBranchNameListRequiredException "RepositoryTriggerBranchNameListRequiredException" -// At least one branch name is required but was not specified in the trigger -// configuration. +// * ErrCodeInvalidReplacementTypeException "InvalidReplacementTypeException" +// Automerge was specified for resolving the conflict, but the specified replacement +// type is not valid. // -// * ErrCodeRepositoryTriggerEventsListRequiredException "RepositoryTriggerEventsListRequiredException" -// At least one event for the trigger is required but was not specified. +// * ErrCodeMultipleConflictResolutionEntriesException "MultipleConflictResolutionEntriesException" +// More than one conflict resolution entries exists for the conflict. A conflict +// can have only one conflict resolution entry. // -// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" -// An encryption integrity check failed. +// * ErrCodeReplacementContentRequiredException "ReplacementContentRequiredException" +// USE_NEW_CONTENT was specified but no replacement content has been provided. // -// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// * ErrCodeMaximumConflictResolutionEntriesExceededException "MaximumConflictResolutionEntriesExceededException" +// The number of allowed conflict resolution entries was exceeded. +// +// * ErrCodePathRequiredException "PathRequiredException" +// The folderPath for a location cannot be null. +// +// * ErrCodeInvalidPathException "InvalidPathException" +// The specified path is not valid. +// +// * ErrCodeInvalidFileModeException "InvalidFileModeException" +// The specified file mode permission is not valid. For a list of valid file +// mode permissions, see PutFile. +// +// * ErrCodeInvalidReplacementContentException "InvalidReplacementContentException" +// Automerge was specified for resolving the conflict, but the replacement type +// is not valid or content is missing. +// +// * ErrCodeFileContentSizeLimitExceededException "FileContentSizeLimitExceededException" +// The file cannot be added because it is too large. The maximum file size that +// can be added is 6 MB, and the combined file content change size is 7 MB. +// Consider making these changes using a Git client. +// +// * ErrCodeFolderContentSizeLimitExceededException "FolderContentSizeLimitExceededException" +// The commit cannot be created because at least one of the overall changes +// in the commit results in a folder whose contents exceed the limit of 6 MB. +// Either reduce the number and size of your changes, or split the changes across +// multiple folders. +// +// * ErrCodeMaximumFileContentToLoadExceededException "MaximumFileContentToLoadExceededException" +// The number of files to load exceeds the allowed limit. +// +// * ErrCodeMaximumItemsToCompareExceededException "MaximumItemsToCompareExceededException" +// The maximum number of items to compare between the source or destination +// branches and the merge base has exceeded the maximum allowed. +// +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeRepositoryNotAssociatedWithPullRequestException "RepositoryNotAssociatedWithPullRequestException" +// The repository does not contain any pull requests with that pull request +// ID. Use GetPullRequest to verify the correct repository name for the pull +// request ID. +// +// * ErrCodeConcurrentReferenceUpdateException "ConcurrentReferenceUpdateException" +// The merge cannot be completed because the target branch has been modified. +// Another user might have modified the target branch while the merge was in +// progress. Wait a few minutes, and then try again. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" // An encryption key could not be accessed. // // * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" @@ -4511,91 +5890,88 @@ func (c *CodeCommit) PutRepositoryTriggersRequest(input *PutRepositoryTriggersIn // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutRepositoryTriggers -func (c *CodeCommit) PutRepositoryTriggers(input *PutRepositoryTriggersInput) (*PutRepositoryTriggersOutput, error) { - req, out := c.PutRepositoryTriggersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/MergePullRequestByThreeWay +func (c *CodeCommit) MergePullRequestByThreeWay(input *MergePullRequestByThreeWayInput) (*MergePullRequestByThreeWayOutput, error) { + req, out := c.MergePullRequestByThreeWayRequest(input) return out, req.Send() } -// PutRepositoryTriggersWithContext is the same as PutRepositoryTriggers with the addition of +// MergePullRequestByThreeWayWithContext is the same as MergePullRequestByThreeWay with the addition of // the ability to pass a context and additional request options. // -// See PutRepositoryTriggers for details on how to use this API operation. +// See MergePullRequestByThreeWay for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) PutRepositoryTriggersWithContext(ctx aws.Context, input *PutRepositoryTriggersInput, opts ...request.Option) (*PutRepositoryTriggersOutput, error) { - req, out := c.PutRepositoryTriggersRequest(input) +func (c *CodeCommit) MergePullRequestByThreeWayWithContext(ctx aws.Context, input *MergePullRequestByThreeWayInput, opts ...request.Option) (*MergePullRequestByThreeWayOutput, error) { + req, out := c.MergePullRequestByThreeWayRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTestRepositoryTriggers = "TestRepositoryTriggers" +const opPostCommentForComparedCommit = "PostCommentForComparedCommit" -// TestRepositoryTriggersRequest generates a "aws/request.Request" representing the -// client's request for the TestRepositoryTriggers operation. The "output" return +// PostCommentForComparedCommitRequest generates a "aws/request.Request" representing the +// client's request for the PostCommentForComparedCommit operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TestRepositoryTriggers for more information on using the TestRepositoryTriggers +// See PostCommentForComparedCommit for more information on using the PostCommentForComparedCommit // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TestRepositoryTriggersRequest method. -// req, resp := client.TestRepositoryTriggersRequest(params) +// // Example sending a request using the PostCommentForComparedCommitRequest method. +// req, resp := client.PostCommentForComparedCommitRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/TestRepositoryTriggers -func (c *CodeCommit) TestRepositoryTriggersRequest(input *TestRepositoryTriggersInput) (req *request.Request, output *TestRepositoryTriggersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForComparedCommit +func (c *CodeCommit) PostCommentForComparedCommitRequest(input *PostCommentForComparedCommitInput) (req *request.Request, output *PostCommentForComparedCommitOutput) { op := &request.Operation{ - Name: opTestRepositoryTriggers, + Name: opPostCommentForComparedCommit, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TestRepositoryTriggersInput{} + input = &PostCommentForComparedCommitInput{} } - output = &TestRepositoryTriggersOutput{} + output = &PostCommentForComparedCommitOutput{} req = c.newRequest(op, input, output) return } -// TestRepositoryTriggers API operation for AWS CodeCommit. +// PostCommentForComparedCommit API operation for AWS CodeCommit. // -// Tests the functionality of repository triggers by sending information to -// the trigger target. If real data is available in the repository, the test -// will send data from the last commit. If no data is available, sample data -// will be generated. +// Posts a comment on the comparison between two commits. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation TestRepositoryTriggers for usage and error information. +// API operation PostCommentForComparedCommit for usage and error information. // // Returned Error Codes: -// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" -// The specified repository does not exist. -// // * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" // A repository name is required but was not specified. // +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// // * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" // At least one specified repository name is not valid. // @@ -4603,50 +5979,47 @@ func (c *CodeCommit) TestRepositoryTriggersRequest(input *TestRepositoryTriggers // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. // -// * ErrCodeRepositoryTriggersListRequiredException "RepositoryTriggersListRequiredException" -// The list of triggers for the repository is required but was not specified. -// -// * ErrCodeMaximumRepositoryTriggersExceededException "MaximumRepositoryTriggersExceededException" -// The number of triggers allowed for the repository was exceeded. -// -// * ErrCodeInvalidRepositoryTriggerNameException "InvalidRepositoryTriggerNameException" -// The name of the trigger is not valid. +// * ErrCodeClientRequestTokenRequiredException "ClientRequestTokenRequiredException" +// A client request token is required. A client request token is an unique, +// client-generated idempotency token that when provided in a request, ensures +// the request cannot be repeated with a changed parameter. If a request is +// received with the same parameters and a token is included, the request will +// return information about the initial request that used that token. // -// * ErrCodeInvalidRepositoryTriggerDestinationArnException "InvalidRepositoryTriggerDestinationArnException" -// The Amazon Resource Name (ARN) for the trigger is not valid for the specified -// destination. The most common reason for this error is that the ARN does not -// meet the requirements for the service type. +// * ErrCodeInvalidClientRequestTokenException "InvalidClientRequestTokenException" +// The client request token is not valid. // -// * ErrCodeInvalidRepositoryTriggerRegionException "InvalidRepositoryTriggerRegionException" -// The region for the trigger target does not match the region for the repository. -// Triggers must be created in the same region as the target for the trigger. +// * ErrCodeIdempotencyParameterMismatchException "IdempotencyParameterMismatchException" +// The client request token is not valid. Either the token is not in a valid +// format, or the token has been used in a previous request and cannot be re-used. // -// * ErrCodeInvalidRepositoryTriggerCustomDataException "InvalidRepositoryTriggerCustomDataException" -// The custom data provided for the trigger is not valid. +// * ErrCodeCommentContentRequiredException "CommentContentRequiredException" +// The comment is empty. You must provide some content for a comment. The content +// cannot be null. // -// * ErrCodeMaximumBranchesExceededException "MaximumBranchesExceededException" -// The number of branches for the trigger was exceeded. +// * ErrCodeCommentContentSizeLimitExceededException "CommentContentSizeLimitExceededException" +// The comment is too large. Comments are limited to 1,000 characters. // -// * ErrCodeInvalidRepositoryTriggerBranchNameException "InvalidRepositoryTriggerBranchNameException" -// One or more branch names specified for the trigger is not valid. +// * ErrCodeInvalidFileLocationException "InvalidFileLocationException" +// The location of the file is not valid. Make sure that you include the extension +// of the file as well as the file name. // -// * ErrCodeInvalidRepositoryTriggerEventsException "InvalidRepositoryTriggerEventsException" -// One or more events specified for the trigger is not valid. Check to make -// sure that all events specified match the requirements for allowed events. +// * ErrCodeInvalidRelativeFileVersionEnumException "InvalidRelativeFileVersionEnumException" +// Either the enum is not in a valid format, or the specified file version enum +// is not valid in respect to the current file version. // -// * ErrCodeRepositoryTriggerNameRequiredException "RepositoryTriggerNameRequiredException" -// A name for the trigger is required but was not specified. +// * ErrCodePathRequiredException "PathRequiredException" +// The folderPath for a location cannot be null. // -// * ErrCodeRepositoryTriggerDestinationArnRequiredException "RepositoryTriggerDestinationArnRequiredException" -// A destination ARN for the target service for the trigger is required but -// was not specified. +// * ErrCodeInvalidFilePositionException "InvalidFilePositionException" +// The position is not valid. Make sure that the line number exists in the version +// of the file you want to comment on. // -// * ErrCodeRepositoryTriggerBranchNameListRequiredException "RepositoryTriggerBranchNameListRequiredException" -// At least one branch name is required but was not specified in the trigger -// configuration. +// * ErrCodeCommitIdRequiredException "CommitIdRequiredException" +// A commit ID was not specified. // -// * ErrCodeRepositoryTriggerEventsListRequiredException "RepositoryTriggerEventsListRequiredException" -// At least one event for the trigger is required but was not specified. +// * ErrCodeInvalidCommitIdException "InvalidCommitIdException" +// The specified commit ID is not valid. // // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. @@ -4663,210 +6036,167 @@ func (c *CodeCommit) TestRepositoryTriggersRequest(input *TestRepositoryTriggers // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/TestRepositoryTriggers -func (c *CodeCommit) TestRepositoryTriggers(input *TestRepositoryTriggersInput) (*TestRepositoryTriggersOutput, error) { - req, out := c.TestRepositoryTriggersRequest(input) +// * ErrCodeBeforeCommitIdAndAfterCommitIdAreSameException "BeforeCommitIdAndAfterCommitIdAreSameException" +// The before commit ID and the after commit ID are the same, which is not valid. +// The before commit ID and the after commit ID must be different commit IDs. +// +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. +// +// * ErrCodeInvalidPathException "InvalidPathException" +// The specified path is not valid. +// +// * ErrCodePathDoesNotExistException "PathDoesNotExistException" +// The specified path does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForComparedCommit +func (c *CodeCommit) PostCommentForComparedCommit(input *PostCommentForComparedCommitInput) (*PostCommentForComparedCommitOutput, error) { + req, out := c.PostCommentForComparedCommitRequest(input) return out, req.Send() } -// TestRepositoryTriggersWithContext is the same as TestRepositoryTriggers with the addition of +// PostCommentForComparedCommitWithContext is the same as PostCommentForComparedCommit with the addition of // the ability to pass a context and additional request options. // -// See TestRepositoryTriggers for details on how to use this API operation. +// See PostCommentForComparedCommit for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) TestRepositoryTriggersWithContext(ctx aws.Context, input *TestRepositoryTriggersInput, opts ...request.Option) (*TestRepositoryTriggersOutput, error) { - req, out := c.TestRepositoryTriggersRequest(input) +func (c *CodeCommit) PostCommentForComparedCommitWithContext(ctx aws.Context, input *PostCommentForComparedCommitInput, opts ...request.Option) (*PostCommentForComparedCommitOutput, error) { + req, out := c.PostCommentForComparedCommitRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateComment = "UpdateComment" +const opPostCommentForPullRequest = "PostCommentForPullRequest" -// UpdateCommentRequest generates a "aws/request.Request" representing the -// client's request for the UpdateComment operation. The "output" return +// PostCommentForPullRequestRequest generates a "aws/request.Request" representing the +// client's request for the PostCommentForPullRequest operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateComment for more information on using the UpdateComment +// See PostCommentForPullRequest for more information on using the PostCommentForPullRequest // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateCommentRequest method. -// req, resp := client.UpdateCommentRequest(params) +// // Example sending a request using the PostCommentForPullRequestRequest method. +// req, resp := client.PostCommentForPullRequestRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateComment -func (c *CodeCommit) UpdateCommentRequest(input *UpdateCommentInput) (req *request.Request, output *UpdateCommentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForPullRequest +func (c *CodeCommit) PostCommentForPullRequestRequest(input *PostCommentForPullRequestInput) (req *request.Request, output *PostCommentForPullRequestOutput) { op := &request.Operation{ - Name: opUpdateComment, + Name: opPostCommentForPullRequest, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateCommentInput{} + input = &PostCommentForPullRequestInput{} } - output = &UpdateCommentOutput{} + output = &PostCommentForPullRequestOutput{} req = c.newRequest(op, input, output) return } -// UpdateComment API operation for AWS CodeCommit. +// PostCommentForPullRequest API operation for AWS CodeCommit. // -// Replaces the contents of a comment. +// Posts a comment on a pull request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation UpdateComment for usage and error information. +// API operation PostCommentForPullRequest for usage and error information. // // Returned Error Codes: -// * ErrCodeCommentContentRequiredException "CommentContentRequiredException" -// The comment is empty. You must provide some content for a comment. The content -// cannot be null. -// -// * ErrCodeCommentContentSizeLimitExceededException "CommentContentSizeLimitExceededException" -// The comment is too large. Comments are limited to 1,000 characters. +// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" +// The pull request ID could not be found. Make sure that you have specified +// the correct repository name and pull request ID, and then try again. // -// * ErrCodeCommentDoesNotExistException "CommentDoesNotExistException" -// No comment exists with the provided ID. Verify that you have provided the -// correct ID, and then try again. +// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" +// The pull request ID is not valid. Make sure that you have provided the full +// ID and that the pull request is in the specified repository, and then try +// again. // -// * ErrCodeCommentIdRequiredException "CommentIdRequiredException" -// The comment ID is missing or null. A comment ID is required. +// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" +// A pull request ID is required, but none was provided. // -// * ErrCodeInvalidCommentIdException "InvalidCommentIdException" -// The comment ID is not in a valid format. Make sure that you have provided -// the full comment ID. +// * ErrCodeRepositoryNotAssociatedWithPullRequestException "RepositoryNotAssociatedWithPullRequestException" +// The repository does not contain any pull requests with that pull request +// ID. Use GetPullRequest to verify the correct repository name for the pull +// request ID. // -// * ErrCodeCommentNotCreatedByCallerException "CommentNotCreatedByCallerException" -// You cannot modify or delete this comment. Only comment authors can modify -// or delete their comments. +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. // -// * ErrCodeCommentDeletedException "CommentDeletedException" -// This comment has already been deleted. You cannot edit or delete a deleted -// comment. +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateComment -func (c *CodeCommit) UpdateComment(input *UpdateCommentInput) (*UpdateCommentOutput, error) { - req, out := c.UpdateCommentRequest(input) - return out, req.Send() -} - -// UpdateCommentWithContext is the same as UpdateComment with the addition of -// the ability to pass a context and additional request options. +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. // -// See UpdateComment for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *CodeCommit) UpdateCommentWithContext(ctx aws.Context, input *UpdateCommentInput, opts ...request.Option) (*UpdateCommentOutput, error) { - req, out := c.UpdateCommentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateDefaultBranch = "UpdateDefaultBranch" - -// UpdateDefaultBranchRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDefaultBranch operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateDefaultBranch for more information on using the UpdateDefaultBranch -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateDefaultBranchRequest method. -// req, resp := client.UpdateDefaultBranchRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateDefaultBranch -func (c *CodeCommit) UpdateDefaultBranchRequest(input *UpdateDefaultBranchInput) (req *request.Request, output *UpdateDefaultBranchOutput) { - op := &request.Operation{ - Name: opUpdateDefaultBranch, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateDefaultBranchInput{} - } - - output = &UpdateDefaultBranchOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateDefaultBranch API operation for AWS CodeCommit. +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. // -// Sets or changes the default branch name for the specified repository. +// * ErrCodeClientRequestTokenRequiredException "ClientRequestTokenRequiredException" +// A client request token is required. A client request token is an unique, +// client-generated idempotency token that when provided in a request, ensures +// the request cannot be repeated with a changed parameter. If a request is +// received with the same parameters and a token is included, the request will +// return information about the initial request that used that token. // -// If you use this operation to change the default branch name to the current -// default branch name, a success message is returned even though the default -// branch did not change. +// * ErrCodeInvalidClientRequestTokenException "InvalidClientRequestTokenException" +// The client request token is not valid. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// * ErrCodeIdempotencyParameterMismatchException "IdempotencyParameterMismatchException" +// The client request token is not valid. Either the token is not in a valid +// format, or the token has been used in a previous request and cannot be re-used. // -// See the AWS API reference guide for AWS CodeCommit's -// API operation UpdateDefaultBranch for usage and error information. +// * ErrCodeCommentContentRequiredException "CommentContentRequiredException" +// The comment is empty. You must provide some content for a comment. The content +// cannot be null. // -// Returned Error Codes: -// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" -// A repository name is required but was not specified. +// * ErrCodeCommentContentSizeLimitExceededException "CommentContentSizeLimitExceededException" +// The comment is too large. Comments are limited to 1,000 characters. // -// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" -// The specified repository does not exist. +// * ErrCodeInvalidFileLocationException "InvalidFileLocationException" +// The location of the file is not valid. Make sure that you include the extension +// of the file as well as the file name. // -// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" -// At least one specified repository name is not valid. +// * ErrCodeInvalidRelativeFileVersionEnumException "InvalidRelativeFileVersionEnumException" +// Either the enum is not in a valid format, or the specified file version enum +// is not valid in respect to the current file version. // -// This exception only occurs when a specified repository name is not valid. -// Other exceptions occur when a required repository parameter is missing, or -// when a specified repository does not exist. +// * ErrCodePathRequiredException "PathRequiredException" +// The folderPath for a location cannot be null. // -// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" -// A branch name is required but was not specified. +// * ErrCodeInvalidFilePositionException "InvalidFilePositionException" +// The position is not valid. Make sure that the line number exists in the version +// of the file you want to comment on. // -// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" -// The specified reference name is not valid. +// * ErrCodeCommitIdRequiredException "CommitIdRequiredException" +// A commit ID was not specified. // -// * ErrCodeBranchDoesNotExistException "BranchDoesNotExistException" -// The specified branch does not exist. +// * ErrCodeInvalidCommitIdException "InvalidCommitIdException" +// The specified commit ID is not valid. // // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. @@ -4883,402 +6213,415 @@ func (c *CodeCommit) UpdateDefaultBranchRequest(input *UpdateDefaultBranchInput) // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateDefaultBranch -func (c *CodeCommit) UpdateDefaultBranch(input *UpdateDefaultBranchInput) (*UpdateDefaultBranchOutput, error) { - req, out := c.UpdateDefaultBranchRequest(input) +// * ErrCodeCommitDoesNotExistException "CommitDoesNotExistException" +// The specified commit does not exist or no commit was specified, and the specified +// repository has no default branch. +// +// * ErrCodeInvalidPathException "InvalidPathException" +// The specified path is not valid. +// +// * ErrCodePathDoesNotExistException "PathDoesNotExistException" +// The specified path does not exist. +// +// * ErrCodePathRequiredException "PathRequiredException" +// The folderPath for a location cannot be null. +// +// * ErrCodeBeforeCommitIdAndAfterCommitIdAreSameException "BeforeCommitIdAndAfterCommitIdAreSameException" +// The before commit ID and the after commit ID are the same, which is not valid. +// The before commit ID and the after commit ID must be different commit IDs. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForPullRequest +func (c *CodeCommit) PostCommentForPullRequest(input *PostCommentForPullRequestInput) (*PostCommentForPullRequestOutput, error) { + req, out := c.PostCommentForPullRequestRequest(input) return out, req.Send() } -// UpdateDefaultBranchWithContext is the same as UpdateDefaultBranch with the addition of +// PostCommentForPullRequestWithContext is the same as PostCommentForPullRequest with the addition of // the ability to pass a context and additional request options. // -// See UpdateDefaultBranch for details on how to use this API operation. +// See PostCommentForPullRequest for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) UpdateDefaultBranchWithContext(ctx aws.Context, input *UpdateDefaultBranchInput, opts ...request.Option) (*UpdateDefaultBranchOutput, error) { - req, out := c.UpdateDefaultBranchRequest(input) +func (c *CodeCommit) PostCommentForPullRequestWithContext(ctx aws.Context, input *PostCommentForPullRequestInput, opts ...request.Option) (*PostCommentForPullRequestOutput, error) { + req, out := c.PostCommentForPullRequestRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdatePullRequestDescription = "UpdatePullRequestDescription" +const opPostCommentReply = "PostCommentReply" -// UpdatePullRequestDescriptionRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePullRequestDescription operation. The "output" return +// PostCommentReplyRequest generates a "aws/request.Request" representing the +// client's request for the PostCommentReply operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdatePullRequestDescription for more information on using the UpdatePullRequestDescription +// See PostCommentReply for more information on using the PostCommentReply // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdatePullRequestDescriptionRequest method. -// req, resp := client.UpdatePullRequestDescriptionRequest(params) +// // Example sending a request using the PostCommentReplyRequest method. +// req, resp := client.PostCommentReplyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestDescription -func (c *CodeCommit) UpdatePullRequestDescriptionRequest(input *UpdatePullRequestDescriptionInput) (req *request.Request, output *UpdatePullRequestDescriptionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentReply +func (c *CodeCommit) PostCommentReplyRequest(input *PostCommentReplyInput) (req *request.Request, output *PostCommentReplyOutput) { op := &request.Operation{ - Name: opUpdatePullRequestDescription, + Name: opPostCommentReply, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdatePullRequestDescriptionInput{} + input = &PostCommentReplyInput{} } - output = &UpdatePullRequestDescriptionOutput{} + output = &PostCommentReplyOutput{} req = c.newRequest(op, input, output) return } -// UpdatePullRequestDescription API operation for AWS CodeCommit. +// PostCommentReply API operation for AWS CodeCommit. // -// Replaces the contents of the description of a pull request. +// Posts a comment in reply to an existing comment on a comparison between commits +// or a pull request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation UpdatePullRequestDescription for usage and error information. +// API operation PostCommentReply for usage and error information. // // Returned Error Codes: -// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" -// The pull request ID could not be found. Make sure that you have specified -// the correct repository name and pull request ID, and then try again. +// * ErrCodeClientRequestTokenRequiredException "ClientRequestTokenRequiredException" +// A client request token is required. A client request token is an unique, +// client-generated idempotency token that when provided in a request, ensures +// the request cannot be repeated with a changed parameter. If a request is +// received with the same parameters and a token is included, the request will +// return information about the initial request that used that token. // -// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" -// The pull request ID is not valid. Make sure that you have provided the full -// ID and that the pull request is in the specified repository, and then try -// again. +// * ErrCodeInvalidClientRequestTokenException "InvalidClientRequestTokenException" +// The client request token is not valid. // -// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" -// A pull request ID is required, but none was provided. +// * ErrCodeIdempotencyParameterMismatchException "IdempotencyParameterMismatchException" +// The client request token is not valid. Either the token is not in a valid +// format, or the token has been used in a previous request and cannot be re-used. // -// * ErrCodeInvalidDescriptionException "InvalidDescriptionException" -// The pull request description is not valid. Descriptions are limited to 1,000 -// characters in length. +// * ErrCodeCommentContentRequiredException "CommentContentRequiredException" +// The comment is empty. You must provide some content for a comment. The content +// cannot be null. // -// * ErrCodePullRequestAlreadyClosedException "PullRequestAlreadyClosedException" -// The pull request status cannot be updated because it is already closed. +// * ErrCodeCommentContentSizeLimitExceededException "CommentContentSizeLimitExceededException" +// The comment is too large. Comments are limited to 1,000 characters. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestDescription -func (c *CodeCommit) UpdatePullRequestDescription(input *UpdatePullRequestDescriptionInput) (*UpdatePullRequestDescriptionOutput, error) { - req, out := c.UpdatePullRequestDescriptionRequest(input) +// * ErrCodeCommentDoesNotExistException "CommentDoesNotExistException" +// No comment exists with the provided ID. Verify that you have provided the +// correct ID, and then try again. +// +// * ErrCodeCommentIdRequiredException "CommentIdRequiredException" +// The comment ID is missing or null. A comment ID is required. +// +// * ErrCodeInvalidCommentIdException "InvalidCommentIdException" +// The comment ID is not in a valid format. Make sure that you have provided +// the full comment ID. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentReply +func (c *CodeCommit) PostCommentReply(input *PostCommentReplyInput) (*PostCommentReplyOutput, error) { + req, out := c.PostCommentReplyRequest(input) return out, req.Send() } -// UpdatePullRequestDescriptionWithContext is the same as UpdatePullRequestDescription with the addition of +// PostCommentReplyWithContext is the same as PostCommentReply with the addition of // the ability to pass a context and additional request options. // -// See UpdatePullRequestDescription for details on how to use this API operation. +// See PostCommentReply for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) UpdatePullRequestDescriptionWithContext(ctx aws.Context, input *UpdatePullRequestDescriptionInput, opts ...request.Option) (*UpdatePullRequestDescriptionOutput, error) { - req, out := c.UpdatePullRequestDescriptionRequest(input) +func (c *CodeCommit) PostCommentReplyWithContext(ctx aws.Context, input *PostCommentReplyInput, opts ...request.Option) (*PostCommentReplyOutput, error) { + req, out := c.PostCommentReplyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdatePullRequestStatus = "UpdatePullRequestStatus" +const opPutFile = "PutFile" -// UpdatePullRequestStatusRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePullRequestStatus operation. The "output" return +// PutFileRequest generates a "aws/request.Request" representing the +// client's request for the PutFile operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdatePullRequestStatus for more information on using the UpdatePullRequestStatus +// See PutFile for more information on using the PutFile // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdatePullRequestStatusRequest method. -// req, resp := client.UpdatePullRequestStatusRequest(params) +// // Example sending a request using the PutFileRequest method. +// req, resp := client.PutFileRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestStatus -func (c *CodeCommit) UpdatePullRequestStatusRequest(input *UpdatePullRequestStatusInput) (req *request.Request, output *UpdatePullRequestStatusOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutFile +func (c *CodeCommit) PutFileRequest(input *PutFileInput) (req *request.Request, output *PutFileOutput) { op := &request.Operation{ - Name: opUpdatePullRequestStatus, + Name: opPutFile, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdatePullRequestStatusInput{} + input = &PutFileInput{} } - output = &UpdatePullRequestStatusOutput{} + output = &PutFileOutput{} req = c.newRequest(op, input, output) return } -// UpdatePullRequestStatus API operation for AWS CodeCommit. -// -// Updates the status of a pull request. +// PutFile API operation for AWS CodeCommit. +// +// Adds or updates a file in a branch in an AWS CodeCommit repository, and generates +// a commit for the addition in the specified branch. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation UpdatePullRequestStatus for usage and error information. +// API operation PutFile for usage and error information. // // Returned Error Codes: -// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" -// The pull request ID could not be found. Make sure that you have specified -// the correct repository name and pull request ID, and then try again. +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. // -// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" -// The pull request ID is not valid. Make sure that you have provided the full -// ID and that the pull request is in the specified repository, and then try -// again. +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. // -// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" -// A pull request ID is required, but none was provided. +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. // -// * ErrCodeInvalidPullRequestStatusUpdateException "InvalidPullRequestStatusUpdateException" -// The pull request status update is not valid. The only valid update is from -// OPEN to CLOSED. +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. // -// * ErrCodeInvalidPullRequestStatusException "InvalidPullRequestStatusException" -// The pull request status is not valid. The only valid values are OPEN and -// CLOSED. +// * ErrCodeParentCommitIdRequiredException "ParentCommitIdRequiredException" +// A parent commit ID is required. To view the full commit ID of a branch in +// a repository, use GetBranch or a Git command (for example, git pull or git +// log). // -// * ErrCodePullRequestStatusRequiredException "PullRequestStatusRequiredException" -// A pull request status is required, but none was provided. +// * ErrCodeInvalidParentCommitIdException "InvalidParentCommitIdException" +// The parent commit ID is not valid. The commit ID cannot be empty, and must +// match the head commit ID for the branch of the repository where you want +// to add or update a file. // -// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" -// An encryption integrity check failed. +// * ErrCodeParentCommitDoesNotExistException "ParentCommitDoesNotExistException" +// The parent commit ID is not valid because it does not exist. The specified +// parent commit ID does not exist in the specified branch of the repository. // -// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" -// An encryption key could not be accessed. +// * ErrCodeParentCommitIdOutdatedException "ParentCommitIdOutdatedException" +// The file could not be added because the provided parent commit ID is not +// the current tip of the specified branch. To view the full commit ID of the +// current head of the branch, use GetBranch. // -// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" -// The encryption key is disabled. +// * ErrCodeFileContentRequiredException "FileContentRequiredException" +// The file cannot be added because it is empty. Empty files cannot be added +// to the repository with this API. // -// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" -// No encryption key was found. +// * ErrCodeFileContentSizeLimitExceededException "FileContentSizeLimitExceededException" +// The file cannot be added because it is too large. The maximum file size that +// can be added is 6 MB, and the combined file content change size is 7 MB. +// Consider making these changes using a Git client. // -// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" -// The encryption key is not available. +// * ErrCodeFolderContentSizeLimitExceededException "FolderContentSizeLimitExceededException" +// The commit cannot be created because at least one of the overall changes +// in the commit results in a folder whose contents exceed the limit of 6 MB. +// Either reduce the number and size of your changes, or split the changes across +// multiple folders. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestStatus -func (c *CodeCommit) UpdatePullRequestStatus(input *UpdatePullRequestStatusInput) (*UpdatePullRequestStatusOutput, error) { - req, out := c.UpdatePullRequestStatusRequest(input) - return out, req.Send() -} - -// UpdatePullRequestStatusWithContext is the same as UpdatePullRequestStatus with the addition of -// the ability to pass a context and additional request options. +// * ErrCodePathRequiredException "PathRequiredException" +// The folderPath for a location cannot be null. // -// See UpdatePullRequestStatus for details on how to use this API operation. +// * ErrCodeInvalidPathException "InvalidPathException" +// The specified path is not valid. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *CodeCommit) UpdatePullRequestStatusWithContext(ctx aws.Context, input *UpdatePullRequestStatusInput, opts ...request.Option) (*UpdatePullRequestStatusOutput, error) { - req, out := c.UpdatePullRequestStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdatePullRequestTitle = "UpdatePullRequestTitle" - -// UpdatePullRequestTitleRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePullRequestTitle operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. +// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" +// A branch name is required but was not specified. // -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" +// The specified reference name is not valid. // -// See UpdatePullRequestTitle for more information on using the UpdatePullRequestTitle -// API call, and error handling. +// * ErrCodeBranchDoesNotExistException "BranchDoesNotExistException" +// The specified branch does not exist. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// * ErrCodeBranchNameIsTagNameException "BranchNameIsTagNameException" +// The specified branch name is not valid because it is a tag name. Type the +// name of a current branch in the repository. For a list of valid branch names, +// use ListBranches. // +// * ErrCodeInvalidFileModeException "InvalidFileModeException" +// The specified file mode permission is not valid. For a list of valid file +// mode permissions, see PutFile. // -// // Example sending a request using the UpdatePullRequestTitleRequest method. -// req, resp := client.UpdatePullRequestTitleRequest(params) +// * ErrCodeNameLengthExceededException "NameLengthExceededException" +// The user name is not valid because it has exceeded the character limit for +// author names. // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// * ErrCodeInvalidEmailException "InvalidEmailException" +// The specified email address either contains one or more characters that are +// not allowed, or it exceeds the maximum number of characters allowed for an +// email address. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestTitle -func (c *CodeCommit) UpdatePullRequestTitleRequest(input *UpdatePullRequestTitleInput) (req *request.Request, output *UpdatePullRequestTitleOutput) { - op := &request.Operation{ - Name: opUpdatePullRequestTitle, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdatePullRequestTitleInput{} - } - - output = &UpdatePullRequestTitleOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdatePullRequestTitle API operation for AWS CodeCommit. +// * ErrCodeCommitMessageLengthExceededException "CommitMessageLengthExceededException" +// The commit message is too long. Provide a shorter string. // -// Replaces the title of a pull request. +// * ErrCodeInvalidDeletionParameterException "InvalidDeletionParameterException" +// The specified deletion parameter is not valid. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. // -// See the AWS API reference guide for AWS CodeCommit's -// API operation UpdatePullRequestTitle for usage and error information. +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. // -// Returned Error Codes: -// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" -// The pull request ID could not be found. Make sure that you have specified -// the correct repository name and pull request ID, and then try again. +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. // -// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" -// The pull request ID is not valid. Make sure that you have provided the full -// ID and that the pull request is in the specified repository, and then try -// again. +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. // -// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" -// A pull request ID is required, but none was provided. +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. // -// * ErrCodeTitleRequiredException "TitleRequiredException" -// A pull request title is required. It cannot be empty or null. +// * ErrCodeSameFileContentException "SameFileContentException" +// The file was not added or updated because the content of the file is exactly +// the same as the content of that file in the repository and branch that you +// specified. // -// * ErrCodeInvalidTitleException "InvalidTitleException" -// The title of the pull request is not valid. Pull request titles cannot exceed -// 100 characters in length. +// * ErrCodeFileNameConflictsWithDirectoryNameException "FileNameConflictsWithDirectoryNameException" +// A file cannot be added to the repository because the specified file name +// has the same name as a directory in this repository. Either provide another +// name for the file, or add the file in a directory that does not match the +// file name. // -// * ErrCodePullRequestAlreadyClosedException "PullRequestAlreadyClosedException" -// The pull request status cannot be updated because it is already closed. +// * ErrCodeDirectoryNameConflictsWithFileNameException "DirectoryNameConflictsWithFileNameException" +// A file cannot be added to the repository because the specified path name +// has the same name as a file that already exists in this repository. Either +// provide a different name for the file, or specify a different path for the +// file. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestTitle -func (c *CodeCommit) UpdatePullRequestTitle(input *UpdatePullRequestTitleInput) (*UpdatePullRequestTitleOutput, error) { - req, out := c.UpdatePullRequestTitleRequest(input) +// * ErrCodeFilePathConflictsWithSubmodulePathException "FilePathConflictsWithSubmodulePathException" +// The commit cannot be created because a specified file path points to a submodule. +// Verify that the destination files have valid file paths that do not point +// to a submodule. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutFile +func (c *CodeCommit) PutFile(input *PutFileInput) (*PutFileOutput, error) { + req, out := c.PutFileRequest(input) return out, req.Send() } -// UpdatePullRequestTitleWithContext is the same as UpdatePullRequestTitle with the addition of +// PutFileWithContext is the same as PutFile with the addition of // the ability to pass a context and additional request options. // -// See UpdatePullRequestTitle for details on how to use this API operation. +// See PutFile for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) UpdatePullRequestTitleWithContext(ctx aws.Context, input *UpdatePullRequestTitleInput, opts ...request.Option) (*UpdatePullRequestTitleOutput, error) { - req, out := c.UpdatePullRequestTitleRequest(input) +func (c *CodeCommit) PutFileWithContext(ctx aws.Context, input *PutFileInput, opts ...request.Option) (*PutFileOutput, error) { + req, out := c.PutFileRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateRepositoryDescription = "UpdateRepositoryDescription" +const opPutRepositoryTriggers = "PutRepositoryTriggers" -// UpdateRepositoryDescriptionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateRepositoryDescription operation. The "output" return +// PutRepositoryTriggersRequest generates a "aws/request.Request" representing the +// client's request for the PutRepositoryTriggers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateRepositoryDescription for more information on using the UpdateRepositoryDescription +// See PutRepositoryTriggers for more information on using the PutRepositoryTriggers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateRepositoryDescriptionRequest method. -// req, resp := client.UpdateRepositoryDescriptionRequest(params) +// // Example sending a request using the PutRepositoryTriggersRequest method. +// req, resp := client.PutRepositoryTriggersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateRepositoryDescription -func (c *CodeCommit) UpdateRepositoryDescriptionRequest(input *UpdateRepositoryDescriptionInput) (req *request.Request, output *UpdateRepositoryDescriptionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutRepositoryTriggers +func (c *CodeCommit) PutRepositoryTriggersRequest(input *PutRepositoryTriggersInput) (req *request.Request, output *PutRepositoryTriggersOutput) { op := &request.Operation{ - Name: opUpdateRepositoryDescription, + Name: opPutRepositoryTriggers, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateRepositoryDescriptionInput{} + input = &PutRepositoryTriggersInput{} } - output = &UpdateRepositoryDescriptionOutput{} + output = &PutRepositoryTriggersOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateRepositoryDescription API operation for AWS CodeCommit. -// -// Sets or changes the comment or description for a repository. +// PutRepositoryTriggers API operation for AWS CodeCommit. // -// The description field for a repository accepts all HTML characters and all -// valid Unicode characters. Applications that do not HTML-encode the description -// and display it in a web page could expose users to potentially malicious -// code. Make sure that you HTML-encode the description field in any application -// that uses this API to display the repository description on a web page. +// Replaces all triggers for a repository. This can be used to create or delete +// triggers. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation UpdateRepositoryDescription for usage and error information. +// API operation PutRepositoryTriggers for usage and error information. // // Returned Error Codes: -// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" -// A repository name is required but was not specified. -// // * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" // The specified repository does not exist. // +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// // * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" // At least one specified repository name is not valid. // @@ -5286,8 +6629,50 @@ func (c *CodeCommit) UpdateRepositoryDescriptionRequest(input *UpdateRepositoryD // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. // -// * ErrCodeInvalidRepositoryDescriptionException "InvalidRepositoryDescriptionException" -// The specified repository description is not valid. +// * ErrCodeRepositoryTriggersListRequiredException "RepositoryTriggersListRequiredException" +// The list of triggers for the repository is required but was not specified. +// +// * ErrCodeMaximumRepositoryTriggersExceededException "MaximumRepositoryTriggersExceededException" +// The number of triggers allowed for the repository was exceeded. +// +// * ErrCodeInvalidRepositoryTriggerNameException "InvalidRepositoryTriggerNameException" +// The name of the trigger is not valid. +// +// * ErrCodeInvalidRepositoryTriggerDestinationArnException "InvalidRepositoryTriggerDestinationArnException" +// The Amazon Resource Name (ARN) for the trigger is not valid for the specified +// destination. The most common reason for this error is that the ARN does not +// meet the requirements for the service type. +// +// * ErrCodeInvalidRepositoryTriggerRegionException "InvalidRepositoryTriggerRegionException" +// The region for the trigger target does not match the region for the repository. +// Triggers must be created in the same region as the target for the trigger. +// +// * ErrCodeInvalidRepositoryTriggerCustomDataException "InvalidRepositoryTriggerCustomDataException" +// The custom data provided for the trigger is not valid. +// +// * ErrCodeMaximumBranchesExceededException "MaximumBranchesExceededException" +// The number of branches for the trigger was exceeded. +// +// * ErrCodeInvalidRepositoryTriggerBranchNameException "InvalidRepositoryTriggerBranchNameException" +// One or more branch names specified for the trigger is not valid. +// +// * ErrCodeInvalidRepositoryTriggerEventsException "InvalidRepositoryTriggerEventsException" +// One or more events specified for the trigger is not valid. Check to make +// sure that all events specified match the requirements for allowed events. +// +// * ErrCodeRepositoryTriggerNameRequiredException "RepositoryTriggerNameRequiredException" +// A name for the trigger is required but was not specified. +// +// * ErrCodeRepositoryTriggerDestinationArnRequiredException "RepositoryTriggerDestinationArnRequiredException" +// A destination ARN for the target service for the trigger is required but +// was not specified. +// +// * ErrCodeRepositoryTriggerBranchNameListRequiredException "RepositoryTriggerBranchNameListRequiredException" +// At least one branch name is required but was not specified in the trigger +// configuration. +// +// * ErrCodeRepositoryTriggerEventsListRequiredException "RepositoryTriggerEventsListRequiredException" +// At least one event for the trigger is required but was not specified. // // * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" // An encryption integrity check failed. @@ -5304,78 +6689,75 @@ func (c *CodeCommit) UpdateRepositoryDescriptionRequest(input *UpdateRepositoryD // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateRepositoryDescription -func (c *CodeCommit) UpdateRepositoryDescription(input *UpdateRepositoryDescriptionInput) (*UpdateRepositoryDescriptionOutput, error) { - req, out := c.UpdateRepositoryDescriptionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutRepositoryTriggers +func (c *CodeCommit) PutRepositoryTriggers(input *PutRepositoryTriggersInput) (*PutRepositoryTriggersOutput, error) { + req, out := c.PutRepositoryTriggersRequest(input) return out, req.Send() } -// UpdateRepositoryDescriptionWithContext is the same as UpdateRepositoryDescription with the addition of +// PutRepositoryTriggersWithContext is the same as PutRepositoryTriggers with the addition of // the ability to pass a context and additional request options. // -// See UpdateRepositoryDescription for details on how to use this API operation. +// See PutRepositoryTriggers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) UpdateRepositoryDescriptionWithContext(ctx aws.Context, input *UpdateRepositoryDescriptionInput, opts ...request.Option) (*UpdateRepositoryDescriptionOutput, error) { - req, out := c.UpdateRepositoryDescriptionRequest(input) +func (c *CodeCommit) PutRepositoryTriggersWithContext(ctx aws.Context, input *PutRepositoryTriggersInput, opts ...request.Option) (*PutRepositoryTriggersOutput, error) { + req, out := c.PutRepositoryTriggersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateRepositoryName = "UpdateRepositoryName" +const opTagResource = "TagResource" -// UpdateRepositoryNameRequest generates a "aws/request.Request" representing the -// client's request for the UpdateRepositoryName operation. The "output" return +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateRepositoryName for more information on using the UpdateRepositoryName +// See TagResource for more information on using the TagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateRepositoryNameRequest method. -// req, resp := client.UpdateRepositoryNameRequest(params) +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateRepositoryName -func (c *CodeCommit) UpdateRepositoryNameRequest(input *UpdateRepositoryNameInput) (req *request.Request, output *UpdateRepositoryNameOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/TagResource +func (c *CodeCommit) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { op := &request.Operation{ - Name: opUpdateRepositoryName, + Name: opTagResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateRepositoryNameInput{} + input = &TagResourceInput{} } - output = &UpdateRepositoryNameOutput{} + output = &TagResourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateRepositoryName API operation for AWS CodeCommit. +// TagResource API operation for AWS CodeCommit. // -// Renames a repository. The repository name must be unique across the calling -// AWS account. In addition, repository names are limited to 100 alphanumeric, -// dash, and underscore characters, and cannot include certain characters. The -// suffix ".git" is prohibited. For a full description of the limits on repository -// names, see Limits (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) +// Adds or updates tags for a resource in AWS CodeCommit. For a list of valid +// resources in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) // in the AWS CodeCommit User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5383,18 +6765,12 @@ func (c *CodeCommit) UpdateRepositoryNameRequest(input *UpdateRepositoryNameInpu // the error. // // See the AWS API reference guide for AWS CodeCommit's -// API operation UpdateRepositoryName for usage and error information. +// API operation TagResource for usage and error information. // // Returned Error Codes: // * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" // The specified repository does not exist. // -// * ErrCodeRepositoryNameExistsException "RepositoryNameExistsException" -// The specified repository name already exists. -// -// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" -// A repository name is required but was not specified. -// // * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" // At least one specified repository name is not valid. // @@ -5402,578 +6778,4014 @@ func (c *CodeCommit) UpdateRepositoryNameRequest(input *UpdateRepositoryNameInpu // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateRepositoryName -func (c *CodeCommit) UpdateRepositoryName(input *UpdateRepositoryNameInput) (*UpdateRepositoryNameOutput, error) { - req, out := c.UpdateRepositoryNameRequest(input) +// * ErrCodeResourceArnRequiredException "ResourceArnRequiredException" +// A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. +// For a list of valid resources in AWS CodeCommit, see CodeCommit Resources +// and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// * ErrCodeInvalidResourceArnException "InvalidResourceArnException" +// The value for the resource ARN is not valid. For more information about resources +// in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// * ErrCodeTagsMapRequiredException "TagsMapRequiredException" +// A map of tags is required. +// +// * ErrCodeInvalidTagsMapException "InvalidTagsMapException" +// The map of tags is not valid. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The maximum number of tags for an AWS CodeCommit resource has been exceeded. +// +// * ErrCodeInvalidSystemTagUsageException "InvalidSystemTagUsageException" +// The specified tag is not valid. Key names cannot be prefixed with aws:. +// +// * ErrCodeTagPolicyException "TagPolicyException" +// The tag policy is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/TagResource +func (c *CodeCommit) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) return out, req.Send() } -// UpdateRepositoryNameWithContext is the same as UpdateRepositoryName with the addition of +// TagResourceWithContext is the same as TagResource with the addition of // the ability to pass a context and additional request options. // -// See UpdateRepositoryName for details on how to use this API operation. +// See TagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeCommit) UpdateRepositoryNameWithContext(ctx aws.Context, input *UpdateRepositoryNameInput, opts ...request.Option) (*UpdateRepositoryNameOutput, error) { - req, out := c.UpdateRepositoryNameRequest(input) +func (c *CodeCommit) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Represents the input of a batch get repositories operation. -type BatchGetRepositoriesInput struct { - _ struct{} `type:"structure"` - - // The names of the repositories to get information about. - // - // RepositoryNames is a required field - RepositoryNames []*string `locationName:"repositoryNames" type:"list" required:"true"` -} - -// String returns the string representation -func (s BatchGetRepositoriesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchGetRepositoriesInput) GoString() string { - return s.String() -} +const opTestRepositoryTriggers = "TestRepositoryTriggers" -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetRepositoriesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetRepositoriesInput"} - if s.RepositoryNames == nil { - invalidParams.Add(request.NewErrParamRequired("RepositoryNames")) +// TestRepositoryTriggersRequest generates a "aws/request.Request" representing the +// client's request for the TestRepositoryTriggers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TestRepositoryTriggers for more information on using the TestRepositoryTriggers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TestRepositoryTriggersRequest method. +// req, resp := client.TestRepositoryTriggersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/TestRepositoryTriggers +func (c *CodeCommit) TestRepositoryTriggersRequest(input *TestRepositoryTriggersInput) (req *request.Request, output *TestRepositoryTriggersOutput) { + op := &request.Operation{ + Name: opTestRepositoryTriggers, + HTTPMethod: "POST", + HTTPPath: "/", } - if invalidParams.Len() > 0 { - return invalidParams + if input == nil { + input = &TestRepositoryTriggersInput{} } - return nil + + output = &TestRepositoryTriggersOutput{} + req = c.newRequest(op, input, output) + return } -// SetRepositoryNames sets the RepositoryNames field's value. -func (s *BatchGetRepositoriesInput) SetRepositoryNames(v []*string) *BatchGetRepositoriesInput { - s.RepositoryNames = v - return s +// TestRepositoryTriggers API operation for AWS CodeCommit. +// +// Tests the functionality of repository triggers by sending information to +// the trigger target. If real data is available in the repository, the test +// will send data from the last commit. If no data is available, sample data +// will be generated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation TestRepositoryTriggers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeRepositoryTriggersListRequiredException "RepositoryTriggersListRequiredException" +// The list of triggers for the repository is required but was not specified. +// +// * ErrCodeMaximumRepositoryTriggersExceededException "MaximumRepositoryTriggersExceededException" +// The number of triggers allowed for the repository was exceeded. +// +// * ErrCodeInvalidRepositoryTriggerNameException "InvalidRepositoryTriggerNameException" +// The name of the trigger is not valid. +// +// * ErrCodeInvalidRepositoryTriggerDestinationArnException "InvalidRepositoryTriggerDestinationArnException" +// The Amazon Resource Name (ARN) for the trigger is not valid for the specified +// destination. The most common reason for this error is that the ARN does not +// meet the requirements for the service type. +// +// * ErrCodeInvalidRepositoryTriggerRegionException "InvalidRepositoryTriggerRegionException" +// The region for the trigger target does not match the region for the repository. +// Triggers must be created in the same region as the target for the trigger. +// +// * ErrCodeInvalidRepositoryTriggerCustomDataException "InvalidRepositoryTriggerCustomDataException" +// The custom data provided for the trigger is not valid. +// +// * ErrCodeMaximumBranchesExceededException "MaximumBranchesExceededException" +// The number of branches for the trigger was exceeded. +// +// * ErrCodeInvalidRepositoryTriggerBranchNameException "InvalidRepositoryTriggerBranchNameException" +// One or more branch names specified for the trigger is not valid. +// +// * ErrCodeInvalidRepositoryTriggerEventsException "InvalidRepositoryTriggerEventsException" +// One or more events specified for the trigger is not valid. Check to make +// sure that all events specified match the requirements for allowed events. +// +// * ErrCodeRepositoryTriggerNameRequiredException "RepositoryTriggerNameRequiredException" +// A name for the trigger is required but was not specified. +// +// * ErrCodeRepositoryTriggerDestinationArnRequiredException "RepositoryTriggerDestinationArnRequiredException" +// A destination ARN for the target service for the trigger is required but +// was not specified. +// +// * ErrCodeRepositoryTriggerBranchNameListRequiredException "RepositoryTriggerBranchNameListRequiredException" +// At least one branch name is required but was not specified in the trigger +// configuration. +// +// * ErrCodeRepositoryTriggerEventsListRequiredException "RepositoryTriggerEventsListRequiredException" +// At least one event for the trigger is required but was not specified. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/TestRepositoryTriggers +func (c *CodeCommit) TestRepositoryTriggers(input *TestRepositoryTriggersInput) (*TestRepositoryTriggersOutput, error) { + req, out := c.TestRepositoryTriggersRequest(input) + return out, req.Send() } -// Represents the output of a batch get repositories operation. -type BatchGetRepositoriesOutput struct { +// TestRepositoryTriggersWithContext is the same as TestRepositoryTriggers with the addition of +// the ability to pass a context and additional request options. +// +// See TestRepositoryTriggers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) TestRepositoryTriggersWithContext(ctx aws.Context, input *TestRepositoryTriggersInput, opts ...request.Option) (*TestRepositoryTriggersOutput, error) { + req, out := c.TestRepositoryTriggersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UntagResource +func (c *CodeCommit) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS CodeCommit. +// +// Removes tags for a resource in AWS CodeCommit. For a list of valid resources +// in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeResourceArnRequiredException "ResourceArnRequiredException" +// A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. +// For a list of valid resources in AWS CodeCommit, see CodeCommit Resources +// and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// * ErrCodeInvalidResourceArnException "InvalidResourceArnException" +// The value for the resource ARN is not valid. For more information about resources +// in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// * ErrCodeTagKeysListRequiredException "TagKeysListRequiredException" +// A list of tag keys is required. The list cannot be empty or null. +// +// * ErrCodeInvalidTagKeysListException "InvalidTagKeysListException" +// The list of tags is not valid. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The maximum number of tags for an AWS CodeCommit resource has been exceeded. +// +// * ErrCodeInvalidSystemTagUsageException "InvalidSystemTagUsageException" +// The specified tag is not valid. Key names cannot be prefixed with aws:. +// +// * ErrCodeTagPolicyException "TagPolicyException" +// The tag policy is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UntagResource +func (c *CodeCommit) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateComment = "UpdateComment" + +// UpdateCommentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateComment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateComment for more information on using the UpdateComment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateCommentRequest method. +// req, resp := client.UpdateCommentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateComment +func (c *CodeCommit) UpdateCommentRequest(input *UpdateCommentInput) (req *request.Request, output *UpdateCommentOutput) { + op := &request.Operation{ + Name: opUpdateComment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateCommentInput{} + } + + output = &UpdateCommentOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateComment API operation for AWS CodeCommit. +// +// Replaces the contents of a comment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation UpdateComment for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCommentContentRequiredException "CommentContentRequiredException" +// The comment is empty. You must provide some content for a comment. The content +// cannot be null. +// +// * ErrCodeCommentContentSizeLimitExceededException "CommentContentSizeLimitExceededException" +// The comment is too large. Comments are limited to 1,000 characters. +// +// * ErrCodeCommentDoesNotExistException "CommentDoesNotExistException" +// No comment exists with the provided ID. Verify that you have provided the +// correct ID, and then try again. +// +// * ErrCodeCommentIdRequiredException "CommentIdRequiredException" +// The comment ID is missing or null. A comment ID is required. +// +// * ErrCodeInvalidCommentIdException "InvalidCommentIdException" +// The comment ID is not in a valid format. Make sure that you have provided +// the full comment ID. +// +// * ErrCodeCommentNotCreatedByCallerException "CommentNotCreatedByCallerException" +// You cannot modify or delete this comment. Only comment authors can modify +// or delete their comments. +// +// * ErrCodeCommentDeletedException "CommentDeletedException" +// This comment has already been deleted. You cannot edit or delete a deleted +// comment. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateComment +func (c *CodeCommit) UpdateComment(input *UpdateCommentInput) (*UpdateCommentOutput, error) { + req, out := c.UpdateCommentRequest(input) + return out, req.Send() +} + +// UpdateCommentWithContext is the same as UpdateComment with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateComment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) UpdateCommentWithContext(ctx aws.Context, input *UpdateCommentInput, opts ...request.Option) (*UpdateCommentOutput, error) { + req, out := c.UpdateCommentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDefaultBranch = "UpdateDefaultBranch" + +// UpdateDefaultBranchRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDefaultBranch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDefaultBranch for more information on using the UpdateDefaultBranch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDefaultBranchRequest method. +// req, resp := client.UpdateDefaultBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateDefaultBranch +func (c *CodeCommit) UpdateDefaultBranchRequest(input *UpdateDefaultBranchInput) (req *request.Request, output *UpdateDefaultBranchOutput) { + op := &request.Operation{ + Name: opUpdateDefaultBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDefaultBranchInput{} + } + + output = &UpdateDefaultBranchOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateDefaultBranch API operation for AWS CodeCommit. +// +// Sets or changes the default branch name for the specified repository. +// +// If you use this operation to change the default branch name to the current +// default branch name, a success message is returned even though the default +// branch did not change. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation UpdateDefaultBranch for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" +// A branch name is required but was not specified. +// +// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" +// The specified reference name is not valid. +// +// * ErrCodeBranchDoesNotExistException "BranchDoesNotExistException" +// The specified branch does not exist. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateDefaultBranch +func (c *CodeCommit) UpdateDefaultBranch(input *UpdateDefaultBranchInput) (*UpdateDefaultBranchOutput, error) { + req, out := c.UpdateDefaultBranchRequest(input) + return out, req.Send() +} + +// UpdateDefaultBranchWithContext is the same as UpdateDefaultBranch with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDefaultBranch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) UpdateDefaultBranchWithContext(ctx aws.Context, input *UpdateDefaultBranchInput, opts ...request.Option) (*UpdateDefaultBranchOutput, error) { + req, out := c.UpdateDefaultBranchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePullRequestDescription = "UpdatePullRequestDescription" + +// UpdatePullRequestDescriptionRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePullRequestDescription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePullRequestDescription for more information on using the UpdatePullRequestDescription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePullRequestDescriptionRequest method. +// req, resp := client.UpdatePullRequestDescriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestDescription +func (c *CodeCommit) UpdatePullRequestDescriptionRequest(input *UpdatePullRequestDescriptionInput) (req *request.Request, output *UpdatePullRequestDescriptionOutput) { + op := &request.Operation{ + Name: opUpdatePullRequestDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePullRequestDescriptionInput{} + } + + output = &UpdatePullRequestDescriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdatePullRequestDescription API operation for AWS CodeCommit. +// +// Replaces the contents of the description of a pull request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation UpdatePullRequestDescription for usage and error information. +// +// Returned Error Codes: +// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" +// The pull request ID could not be found. Make sure that you have specified +// the correct repository name and pull request ID, and then try again. +// +// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" +// The pull request ID is not valid. Make sure that you have provided the full +// ID and that the pull request is in the specified repository, and then try +// again. +// +// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" +// A pull request ID is required, but none was provided. +// +// * ErrCodeInvalidDescriptionException "InvalidDescriptionException" +// The pull request description is not valid. Descriptions are limited to 1,000 +// characters in length. +// +// * ErrCodePullRequestAlreadyClosedException "PullRequestAlreadyClosedException" +// The pull request status cannot be updated because it is already closed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestDescription +func (c *CodeCommit) UpdatePullRequestDescription(input *UpdatePullRequestDescriptionInput) (*UpdatePullRequestDescriptionOutput, error) { + req, out := c.UpdatePullRequestDescriptionRequest(input) + return out, req.Send() +} + +// UpdatePullRequestDescriptionWithContext is the same as UpdatePullRequestDescription with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePullRequestDescription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) UpdatePullRequestDescriptionWithContext(ctx aws.Context, input *UpdatePullRequestDescriptionInput, opts ...request.Option) (*UpdatePullRequestDescriptionOutput, error) { + req, out := c.UpdatePullRequestDescriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePullRequestStatus = "UpdatePullRequestStatus" + +// UpdatePullRequestStatusRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePullRequestStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePullRequestStatus for more information on using the UpdatePullRequestStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePullRequestStatusRequest method. +// req, resp := client.UpdatePullRequestStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestStatus +func (c *CodeCommit) UpdatePullRequestStatusRequest(input *UpdatePullRequestStatusInput) (req *request.Request, output *UpdatePullRequestStatusOutput) { + op := &request.Operation{ + Name: opUpdatePullRequestStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePullRequestStatusInput{} + } + + output = &UpdatePullRequestStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdatePullRequestStatus API operation for AWS CodeCommit. +// +// Updates the status of a pull request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation UpdatePullRequestStatus for usage and error information. +// +// Returned Error Codes: +// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" +// The pull request ID could not be found. Make sure that you have specified +// the correct repository name and pull request ID, and then try again. +// +// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" +// The pull request ID is not valid. Make sure that you have provided the full +// ID and that the pull request is in the specified repository, and then try +// again. +// +// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" +// A pull request ID is required, but none was provided. +// +// * ErrCodeInvalidPullRequestStatusUpdateException "InvalidPullRequestStatusUpdateException" +// The pull request status update is not valid. The only valid update is from +// OPEN to CLOSED. +// +// * ErrCodeInvalidPullRequestStatusException "InvalidPullRequestStatusException" +// The pull request status is not valid. The only valid values are OPEN and +// CLOSED. +// +// * ErrCodePullRequestStatusRequiredException "PullRequestStatusRequiredException" +// A pull request status is required, but none was provided. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestStatus +func (c *CodeCommit) UpdatePullRequestStatus(input *UpdatePullRequestStatusInput) (*UpdatePullRequestStatusOutput, error) { + req, out := c.UpdatePullRequestStatusRequest(input) + return out, req.Send() +} + +// UpdatePullRequestStatusWithContext is the same as UpdatePullRequestStatus with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePullRequestStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) UpdatePullRequestStatusWithContext(ctx aws.Context, input *UpdatePullRequestStatusInput, opts ...request.Option) (*UpdatePullRequestStatusOutput, error) { + req, out := c.UpdatePullRequestStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePullRequestTitle = "UpdatePullRequestTitle" + +// UpdatePullRequestTitleRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePullRequestTitle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePullRequestTitle for more information on using the UpdatePullRequestTitle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePullRequestTitleRequest method. +// req, resp := client.UpdatePullRequestTitleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestTitle +func (c *CodeCommit) UpdatePullRequestTitleRequest(input *UpdatePullRequestTitleInput) (req *request.Request, output *UpdatePullRequestTitleOutput) { + op := &request.Operation{ + Name: opUpdatePullRequestTitle, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePullRequestTitleInput{} + } + + output = &UpdatePullRequestTitleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdatePullRequestTitle API operation for AWS CodeCommit. +// +// Replaces the title of a pull request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation UpdatePullRequestTitle for usage and error information. +// +// Returned Error Codes: +// * ErrCodePullRequestDoesNotExistException "PullRequestDoesNotExistException" +// The pull request ID could not be found. Make sure that you have specified +// the correct repository name and pull request ID, and then try again. +// +// * ErrCodeInvalidPullRequestIdException "InvalidPullRequestIdException" +// The pull request ID is not valid. Make sure that you have provided the full +// ID and that the pull request is in the specified repository, and then try +// again. +// +// * ErrCodePullRequestIdRequiredException "PullRequestIdRequiredException" +// A pull request ID is required, but none was provided. +// +// * ErrCodeTitleRequiredException "TitleRequiredException" +// A pull request title is required. It cannot be empty or null. +// +// * ErrCodeInvalidTitleException "InvalidTitleException" +// The title of the pull request is not valid. Pull request titles cannot exceed +// 100 characters in length. +// +// * ErrCodePullRequestAlreadyClosedException "PullRequestAlreadyClosedException" +// The pull request status cannot be updated because it is already closed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestTitle +func (c *CodeCommit) UpdatePullRequestTitle(input *UpdatePullRequestTitleInput) (*UpdatePullRequestTitleOutput, error) { + req, out := c.UpdatePullRequestTitleRequest(input) + return out, req.Send() +} + +// UpdatePullRequestTitleWithContext is the same as UpdatePullRequestTitle with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePullRequestTitle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) UpdatePullRequestTitleWithContext(ctx aws.Context, input *UpdatePullRequestTitleInput, opts ...request.Option) (*UpdatePullRequestTitleOutput, error) { + req, out := c.UpdatePullRequestTitleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRepositoryDescription = "UpdateRepositoryDescription" + +// UpdateRepositoryDescriptionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRepositoryDescription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRepositoryDescription for more information on using the UpdateRepositoryDescription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRepositoryDescriptionRequest method. +// req, resp := client.UpdateRepositoryDescriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateRepositoryDescription +func (c *CodeCommit) UpdateRepositoryDescriptionRequest(input *UpdateRepositoryDescriptionInput) (req *request.Request, output *UpdateRepositoryDescriptionOutput) { + op := &request.Operation{ + Name: opUpdateRepositoryDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRepositoryDescriptionInput{} + } + + output = &UpdateRepositoryDescriptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateRepositoryDescription API operation for AWS CodeCommit. +// +// Sets or changes the comment or description for a repository. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation UpdateRepositoryDescription for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeInvalidRepositoryDescriptionException "InvalidRepositoryDescriptionException" +// The specified repository description is not valid. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateRepositoryDescription +func (c *CodeCommit) UpdateRepositoryDescription(input *UpdateRepositoryDescriptionInput) (*UpdateRepositoryDescriptionOutput, error) { + req, out := c.UpdateRepositoryDescriptionRequest(input) + return out, req.Send() +} + +// UpdateRepositoryDescriptionWithContext is the same as UpdateRepositoryDescription with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRepositoryDescription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) UpdateRepositoryDescriptionWithContext(ctx aws.Context, input *UpdateRepositoryDescriptionInput, opts ...request.Option) (*UpdateRepositoryDescriptionOutput, error) { + req, out := c.UpdateRepositoryDescriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRepositoryName = "UpdateRepositoryName" + +// UpdateRepositoryNameRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRepositoryName operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRepositoryName for more information on using the UpdateRepositoryName +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRepositoryNameRequest method. +// req, resp := client.UpdateRepositoryNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateRepositoryName +func (c *CodeCommit) UpdateRepositoryNameRequest(input *UpdateRepositoryNameInput) (req *request.Request, output *UpdateRepositoryNameOutput) { + op := &request.Operation{ + Name: opUpdateRepositoryName, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRepositoryNameInput{} + } + + output = &UpdateRepositoryNameOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateRepositoryName API operation for AWS CodeCommit. +// +// Renames a repository. The repository name must be unique across the calling +// AWS account. In addition, repository names are limited to 100 alphanumeric, +// dash, and underscore characters, and cannot include certain characters. The +// suffix ".git" is prohibited. For a full description of the limits on repository +// names, see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation UpdateRepositoryName for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeRepositoryNameExistsException "RepositoryNameExistsException" +// The specified repository name already exists. +// +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateRepositoryName +func (c *CodeCommit) UpdateRepositoryName(input *UpdateRepositoryNameInput) (*UpdateRepositoryNameOutput, error) { + req, out := c.UpdateRepositoryNameRequest(input) + return out, req.Send() +} + +// UpdateRepositoryNameWithContext is the same as UpdateRepositoryName with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRepositoryName for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) UpdateRepositoryNameWithContext(ctx aws.Context, input *UpdateRepositoryNameInput, opts ...request.Option) (*UpdateRepositoryNameOutput, error) { + req, out := c.UpdateRepositoryNameRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Information about errors in a BatchDescribeMergeConflicts operation. +type BatchDescribeMergeConflictsError struct { + _ struct{} `type:"structure"` + + // The name of the exception. + // + // ExceptionName is a required field + ExceptionName *string `locationName:"exceptionName" type:"string" required:"true"` + + // The path to the file. + // + // FilePath is a required field + FilePath *string `locationName:"filePath" type:"string" required:"true"` + + // The message provided by the exception. + // + // Message is a required field + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchDescribeMergeConflictsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDescribeMergeConflictsError) GoString() string { + return s.String() +} + +// SetExceptionName sets the ExceptionName field's value. +func (s *BatchDescribeMergeConflictsError) SetExceptionName(v string) *BatchDescribeMergeConflictsError { + s.ExceptionName = &v + return s +} + +// SetFilePath sets the FilePath field's value. +func (s *BatchDescribeMergeConflictsError) SetFilePath(v string) *BatchDescribeMergeConflictsError { + s.FilePath = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *BatchDescribeMergeConflictsError) SetMessage(v string) *BatchDescribeMergeConflictsError { + s.Message = &v + return s +} + +type BatchDescribeMergeConflictsInput struct { + _ struct{} `type:"structure"` + + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` + + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // DestinationCommitSpecifier is a required field + DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` + + // The path of the target files used to describe the conflicts. If not specified, + // the default is all conflict files. + FilePaths []*string `locationName:"filePaths" type:"list"` + + // The maximum number of files to include in the output. + MaxConflictFiles *int64 `locationName:"maxConflictFiles" type:"integer"` + + // The maximum number of merge hunks to include in the output. + MaxMergeHunks *int64 `locationName:"maxMergeHunks" type:"integer"` + + // The merge option or strategy you want to use to merge the code. + // + // MergeOption is a required field + MergeOption *string `locationName:"mergeOption" type:"string" required:"true" enum:"MergeOptionTypeEnum"` + + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the repository that contains the merge conflicts you want to + // review. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // SourceCommitSpecifier is a required field + SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchDescribeMergeConflictsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDescribeMergeConflictsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDescribeMergeConflictsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDescribeMergeConflictsInput"} + if s.DestinationCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCommitSpecifier")) + } + if s.MergeOption == nil { + invalidParams.Add(request.NewErrParamRequired("MergeOption")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + if s.SourceCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCommitSpecifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *BatchDescribeMergeConflictsInput) SetConflictDetailLevel(v string) *BatchDescribeMergeConflictsInput { + s.ConflictDetailLevel = &v + return s +} + +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *BatchDescribeMergeConflictsInput) SetConflictResolutionStrategy(v string) *BatchDescribeMergeConflictsInput { + s.ConflictResolutionStrategy = &v + return s +} + +// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. +func (s *BatchDescribeMergeConflictsInput) SetDestinationCommitSpecifier(v string) *BatchDescribeMergeConflictsInput { + s.DestinationCommitSpecifier = &v + return s +} + +// SetFilePaths sets the FilePaths field's value. +func (s *BatchDescribeMergeConflictsInput) SetFilePaths(v []*string) *BatchDescribeMergeConflictsInput { + s.FilePaths = v + return s +} + +// SetMaxConflictFiles sets the MaxConflictFiles field's value. +func (s *BatchDescribeMergeConflictsInput) SetMaxConflictFiles(v int64) *BatchDescribeMergeConflictsInput { + s.MaxConflictFiles = &v + return s +} + +// SetMaxMergeHunks sets the MaxMergeHunks field's value. +func (s *BatchDescribeMergeConflictsInput) SetMaxMergeHunks(v int64) *BatchDescribeMergeConflictsInput { + s.MaxMergeHunks = &v + return s +} + +// SetMergeOption sets the MergeOption field's value. +func (s *BatchDescribeMergeConflictsInput) SetMergeOption(v string) *BatchDescribeMergeConflictsInput { + s.MergeOption = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchDescribeMergeConflictsInput) SetNextToken(v string) *BatchDescribeMergeConflictsInput { + s.NextToken = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *BatchDescribeMergeConflictsInput) SetRepositoryName(v string) *BatchDescribeMergeConflictsInput { + s.RepositoryName = &v + return s +} + +// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. +func (s *BatchDescribeMergeConflictsInput) SetSourceCommitSpecifier(v string) *BatchDescribeMergeConflictsInput { + s.SourceCommitSpecifier = &v + return s +} + +type BatchDescribeMergeConflictsOutput struct { + _ struct{} `type:"structure"` + + // The commit ID of the merge base. + BaseCommitId *string `locationName:"baseCommitId" type:"string"` + + // A list of conflicts for each file, including the conflict metadata and the + // hunks of the differences between the files. + // + // Conflicts is a required field + Conflicts []*Conflict `locationName:"conflicts" type:"list" required:"true"` + + // The commit ID of the destination commit specifier that was used in the merge + // evaluation. + // + // DestinationCommitId is a required field + DestinationCommitId *string `locationName:"destinationCommitId" type:"string" required:"true"` + + // A list of any errors returned while describing the merge conflicts for each + // file. + Errors []*BatchDescribeMergeConflictsError `locationName:"errors" type:"list"` + + // An enumeration token that can be used in a request to return the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The commit ID of the source commit specifier that was used in the merge evaluation. + // + // SourceCommitId is a required field + SourceCommitId *string `locationName:"sourceCommitId" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchDescribeMergeConflictsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDescribeMergeConflictsOutput) GoString() string { + return s.String() +} + +// SetBaseCommitId sets the BaseCommitId field's value. +func (s *BatchDescribeMergeConflictsOutput) SetBaseCommitId(v string) *BatchDescribeMergeConflictsOutput { + s.BaseCommitId = &v + return s +} + +// SetConflicts sets the Conflicts field's value. +func (s *BatchDescribeMergeConflictsOutput) SetConflicts(v []*Conflict) *BatchDescribeMergeConflictsOutput { + s.Conflicts = v + return s +} + +// SetDestinationCommitId sets the DestinationCommitId field's value. +func (s *BatchDescribeMergeConflictsOutput) SetDestinationCommitId(v string) *BatchDescribeMergeConflictsOutput { + s.DestinationCommitId = &v + return s +} + +// SetErrors sets the Errors field's value. +func (s *BatchDescribeMergeConflictsOutput) SetErrors(v []*BatchDescribeMergeConflictsError) *BatchDescribeMergeConflictsOutput { + s.Errors = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchDescribeMergeConflictsOutput) SetNextToken(v string) *BatchDescribeMergeConflictsOutput { + s.NextToken = &v + return s +} + +// SetSourceCommitId sets the SourceCommitId field's value. +func (s *BatchDescribeMergeConflictsOutput) SetSourceCommitId(v string) *BatchDescribeMergeConflictsOutput { + s.SourceCommitId = &v + return s +} + +// Returns information about errors in a BatchGetCommits operation. +type BatchGetCommitsError struct { + _ struct{} `type:"structure"` + + // A commit ID that either could not be found or was not in a valid format. + CommitId *string `locationName:"commitId" type:"string"` + + // An error code that specifies whether the commit ID was not valid or not found. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // An error message that provides detail about why the commit ID either was + // not found or was not valid. + ErrorMessage *string `locationName:"errorMessage" type:"string"` +} + +// String returns the string representation +func (s BatchGetCommitsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetCommitsError) GoString() string { + return s.String() +} + +// SetCommitId sets the CommitId field's value. +func (s *BatchGetCommitsError) SetCommitId(v string) *BatchGetCommitsError { + s.CommitId = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *BatchGetCommitsError) SetErrorCode(v string) *BatchGetCommitsError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *BatchGetCommitsError) SetErrorMessage(v string) *BatchGetCommitsError { + s.ErrorMessage = &v + return s +} + +type BatchGetCommitsInput struct { + _ struct{} `type:"structure"` + + // The full commit IDs of the commits to get information about. + // + // You must supply the full SHAs of each commit. You cannot use shortened SHAs. + // + // CommitIds is a required field + CommitIds []*string `locationName:"commitIds" type:"list" required:"true"` + + // The name of the repository that contains the commits. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchGetCommitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetCommitsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetCommitsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetCommitsInput"} + if s.CommitIds == nil { + invalidParams.Add(request.NewErrParamRequired("CommitIds")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommitIds sets the CommitIds field's value. +func (s *BatchGetCommitsInput) SetCommitIds(v []*string) *BatchGetCommitsInput { + s.CommitIds = v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *BatchGetCommitsInput) SetRepositoryName(v string) *BatchGetCommitsInput { + s.RepositoryName = &v + return s +} + +type BatchGetCommitsOutput struct { + _ struct{} `type:"structure"` + + // An array of commit data type objects, each of which contains information + // about a specified commit. + Commits []*Commit `locationName:"commits" type:"list"` + + // Returns any commit IDs for which information could not be found. For example, + // if one of the commit IDs was a shortened SHA or that commit was not found + // in the specified repository, the ID will return an error object with additional + // information. + Errors []*BatchGetCommitsError `locationName:"errors" type:"list"` +} + +// String returns the string representation +func (s BatchGetCommitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetCommitsOutput) GoString() string { + return s.String() +} + +// SetCommits sets the Commits field's value. +func (s *BatchGetCommitsOutput) SetCommits(v []*Commit) *BatchGetCommitsOutput { + s.Commits = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *BatchGetCommitsOutput) SetErrors(v []*BatchGetCommitsError) *BatchGetCommitsOutput { + s.Errors = v + return s +} + +// Represents the input of a batch get repositories operation. +type BatchGetRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The names of the repositories to get information about. + // + // RepositoryNames is a required field + RepositoryNames []*string `locationName:"repositoryNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetRepositoriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetRepositoriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetRepositoriesInput"} + if s.RepositoryNames == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRepositoryNames sets the RepositoryNames field's value. +func (s *BatchGetRepositoriesInput) SetRepositoryNames(v []*string) *BatchGetRepositoriesInput { + s.RepositoryNames = v + return s +} + +// Represents the output of a batch get repositories operation. +type BatchGetRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of repositories returned by the batch get repositories operation. + Repositories []*RepositoryMetadata `locationName:"repositories" type:"list"` + + // Returns a list of repository names for which information could not be found. + RepositoriesNotFound []*string `locationName:"repositoriesNotFound" type:"list"` +} + +// String returns the string representation +func (s BatchGetRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetRepositoriesOutput) GoString() string { + return s.String() +} + +// SetRepositories sets the Repositories field's value. +func (s *BatchGetRepositoriesOutput) SetRepositories(v []*RepositoryMetadata) *BatchGetRepositoriesOutput { + s.Repositories = v + return s +} + +// SetRepositoriesNotFound sets the RepositoriesNotFound field's value. +func (s *BatchGetRepositoriesOutput) SetRepositoriesNotFound(v []*string) *BatchGetRepositoriesOutput { + s.RepositoriesNotFound = v + return s +} + +// Returns information about a specific Git blob object. +type BlobMetadata struct { + _ struct{} `type:"structure"` + + // The full ID of the blob. + BlobId *string `locationName:"blobId" type:"string"` + + // The file mode permissions of the blob. File mode permission codes include: + // + // * 100644 indicates read/write + // + // * 100755 indicates read/write/execute + // + // * 160000 indicates a submodule + // + // * 120000 indicates a symlink + Mode *string `locationName:"mode" type:"string"` + + // The path to the blob and any associated file name, if any. + Path *string `locationName:"path" type:"string"` +} + +// String returns the string representation +func (s BlobMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlobMetadata) GoString() string { + return s.String() +} + +// SetBlobId sets the BlobId field's value. +func (s *BlobMetadata) SetBlobId(v string) *BlobMetadata { + s.BlobId = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *BlobMetadata) SetMode(v string) *BlobMetadata { + s.Mode = &v + return s +} + +// SetPath sets the Path field's value. +func (s *BlobMetadata) SetPath(v string) *BlobMetadata { + s.Path = &v + return s +} + +// Returns information about a branch. +type BranchInfo struct { + _ struct{} `type:"structure"` + + // The name of the branch. + BranchName *string `locationName:"branchName" min:"1" type:"string"` + + // The ID of the last commit made to the branch. + CommitId *string `locationName:"commitId" type:"string"` +} + +// String returns the string representation +func (s BranchInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BranchInfo) GoString() string { + return s.String() +} + +// SetBranchName sets the BranchName field's value. +func (s *BranchInfo) SetBranchName(v string) *BranchInfo { + s.BranchName = &v + return s +} + +// SetCommitId sets the CommitId field's value. +func (s *BranchInfo) SetCommitId(v string) *BranchInfo { + s.CommitId = &v + return s +} + +// Returns information about a specific comment. +type Comment struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the person who posted the comment. + AuthorArn *string `locationName:"authorArn" type:"string"` + + // A unique, client-generated idempotency token that when provided in a request, + // ensures the request cannot be repeated with a changed parameter. If a request + // is received with the same parameters and a token is included, the request + // will return information about the initial request that used that token. + ClientRequestToken *string `locationName:"clientRequestToken" type:"string"` + + // The system-generated comment ID. + CommentId *string `locationName:"commentId" type:"string"` + + // The content of the comment. + Content *string `locationName:"content" type:"string"` + + // The date and time the comment was created, in timestamp format. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` + + // A Boolean value indicating whether the comment has been deleted. + Deleted *bool `locationName:"deleted" type:"boolean"` + + // The ID of the comment for which this comment is a reply, if any. + InReplyTo *string `locationName:"inReplyTo" type:"string"` + + // The date and time the comment was most recently modified, in timestamp format. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` +} + +// String returns the string representation +func (s Comment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Comment) GoString() string { + return s.String() +} + +// SetAuthorArn sets the AuthorArn field's value. +func (s *Comment) SetAuthorArn(v string) *Comment { + s.AuthorArn = &v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *Comment) SetClientRequestToken(v string) *Comment { + s.ClientRequestToken = &v + return s +} + +// SetCommentId sets the CommentId field's value. +func (s *Comment) SetCommentId(v string) *Comment { + s.CommentId = &v + return s +} + +// SetContent sets the Content field's value. +func (s *Comment) SetContent(v string) *Comment { + s.Content = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Comment) SetCreationDate(v time.Time) *Comment { + s.CreationDate = &v + return s +} + +// SetDeleted sets the Deleted field's value. +func (s *Comment) SetDeleted(v bool) *Comment { + s.Deleted = &v + return s +} + +// SetInReplyTo sets the InReplyTo field's value. +func (s *Comment) SetInReplyTo(v string) *Comment { + s.InReplyTo = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *Comment) SetLastModifiedDate(v time.Time) *Comment { + s.LastModifiedDate = &v + return s +} + +// Returns information about comments on the comparison between two commits. +type CommentsForComparedCommit struct { + _ struct{} `type:"structure"` + + // The full blob ID of the commit used to establish the 'after' of the comparison. + AfterBlobId *string `locationName:"afterBlobId" type:"string"` + + // The full commit ID of the commit used to establish the 'after' of the comparison. + AfterCommitId *string `locationName:"afterCommitId" type:"string"` + + // The full blob ID of the commit used to establish the 'before' of the comparison. + BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` + + // The full commit ID of the commit used to establish the 'before' of the comparison. + BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` + + // An array of comment objects. Each comment object contains information about + // a comment on the comparison between commits. + Comments []*Comment `locationName:"comments" type:"list"` + + // Location information about the comment on the comparison, including the file + // name, line number, and whether the version of the file where the comment + // was made is 'BEFORE' or 'AFTER'. + Location *Location `locationName:"location" type:"structure"` + + // The name of the repository that contains the compared commits. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s CommentsForComparedCommit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommentsForComparedCommit) GoString() string { + return s.String() +} + +// SetAfterBlobId sets the AfterBlobId field's value. +func (s *CommentsForComparedCommit) SetAfterBlobId(v string) *CommentsForComparedCommit { + s.AfterBlobId = &v + return s +} + +// SetAfterCommitId sets the AfterCommitId field's value. +func (s *CommentsForComparedCommit) SetAfterCommitId(v string) *CommentsForComparedCommit { + s.AfterCommitId = &v + return s +} + +// SetBeforeBlobId sets the BeforeBlobId field's value. +func (s *CommentsForComparedCommit) SetBeforeBlobId(v string) *CommentsForComparedCommit { + s.BeforeBlobId = &v + return s +} + +// SetBeforeCommitId sets the BeforeCommitId field's value. +func (s *CommentsForComparedCommit) SetBeforeCommitId(v string) *CommentsForComparedCommit { + s.BeforeCommitId = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CommentsForComparedCommit) SetComments(v []*Comment) *CommentsForComparedCommit { + s.Comments = v + return s +} + +// SetLocation sets the Location field's value. +func (s *CommentsForComparedCommit) SetLocation(v *Location) *CommentsForComparedCommit { + s.Location = v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *CommentsForComparedCommit) SetRepositoryName(v string) *CommentsForComparedCommit { + s.RepositoryName = &v + return s +} + +// Returns information about comments on a pull request. +type CommentsForPullRequest struct { + _ struct{} `type:"structure"` + + // The full blob ID of the file on which you want to comment on the source commit. + AfterBlobId *string `locationName:"afterBlobId" type:"string"` + + // he full commit ID of the commit that was the tip of the source branch at + // the time the comment was made. + AfterCommitId *string `locationName:"afterCommitId" type:"string"` + + // The full blob ID of the file on which you want to comment on the destination + // commit. + BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` + + // The full commit ID of the commit that was the tip of the destination branch + // when the pull request was created. This commit will be superceded by the + // after commit in the source branch when and if you merge the source branch + // into the destination branch. + BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` + + // An array of comment objects. Each comment object contains information about + // a comment on the pull request. + Comments []*Comment `locationName:"comments" type:"list"` + + // Location information about the comment on the pull request, including the + // file name, line number, and whether the version of the file where the comment + // was made is 'BEFORE' (destination branch) or 'AFTER' (source branch). + Location *Location `locationName:"location" type:"structure"` + + // The system-generated ID of the pull request. + PullRequestId *string `locationName:"pullRequestId" type:"string"` + + // The name of the repository that contains the pull request. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s CommentsForPullRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommentsForPullRequest) GoString() string { + return s.String() +} + +// SetAfterBlobId sets the AfterBlobId field's value. +func (s *CommentsForPullRequest) SetAfterBlobId(v string) *CommentsForPullRequest { + s.AfterBlobId = &v + return s +} + +// SetAfterCommitId sets the AfterCommitId field's value. +func (s *CommentsForPullRequest) SetAfterCommitId(v string) *CommentsForPullRequest { + s.AfterCommitId = &v + return s +} + +// SetBeforeBlobId sets the BeforeBlobId field's value. +func (s *CommentsForPullRequest) SetBeforeBlobId(v string) *CommentsForPullRequest { + s.BeforeBlobId = &v + return s +} + +// SetBeforeCommitId sets the BeforeCommitId field's value. +func (s *CommentsForPullRequest) SetBeforeCommitId(v string) *CommentsForPullRequest { + s.BeforeCommitId = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CommentsForPullRequest) SetComments(v []*Comment) *CommentsForPullRequest { + s.Comments = v + return s +} + +// SetLocation sets the Location field's value. +func (s *CommentsForPullRequest) SetLocation(v *Location) *CommentsForPullRequest { + s.Location = v + return s +} + +// SetPullRequestId sets the PullRequestId field's value. +func (s *CommentsForPullRequest) SetPullRequestId(v string) *CommentsForPullRequest { + s.PullRequestId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *CommentsForPullRequest) SetRepositoryName(v string) *CommentsForPullRequest { + s.RepositoryName = &v + return s +} + +// Returns information about a specific commit. +type Commit struct { + _ struct{} `type:"structure"` + + // Any additional data associated with the specified commit. + AdditionalData *string `locationName:"additionalData" type:"string"` + + // Information about the author of the specified commit. Information includes + // the date in timestamp format with GMT offset, the name of the author, and + // the email address for the author, as configured in Git. + Author *UserInfo `locationName:"author" type:"structure"` + + // The full SHA of the specified commit. + CommitId *string `locationName:"commitId" type:"string"` + + // Information about the person who committed the specified commit, also known + // as the committer. Information includes the date in timestamp format with + // GMT offset, the name of the committer, and the email address for the committer, + // as configured in Git. + // + // For more information about the difference between an author and a committer + // in Git, see Viewing the Commit History (http://git-scm.com/book/ch2-3.html) + // in Pro Git by Scott Chacon and Ben Straub. + Committer *UserInfo `locationName:"committer" type:"structure"` + + // The commit message associated with the specified commit. + Message *string `locationName:"message" type:"string"` + + // A list of parent commits for the specified commit. Each parent commit ID + // is the full commit ID. + Parents []*string `locationName:"parents" type:"list"` + + // Tree information for the specified commit. + TreeId *string `locationName:"treeId" type:"string"` +} + +// String returns the string representation +func (s Commit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Commit) GoString() string { + return s.String() +} + +// SetAdditionalData sets the AdditionalData field's value. +func (s *Commit) SetAdditionalData(v string) *Commit { + s.AdditionalData = &v + return s +} + +// SetAuthor sets the Author field's value. +func (s *Commit) SetAuthor(v *UserInfo) *Commit { + s.Author = v + return s +} + +// SetCommitId sets the CommitId field's value. +func (s *Commit) SetCommitId(v string) *Commit { + s.CommitId = &v + return s +} + +// SetCommitter sets the Committer field's value. +func (s *Commit) SetCommitter(v *UserInfo) *Commit { + s.Committer = v + return s +} + +// SetMessage sets the Message field's value. +func (s *Commit) SetMessage(v string) *Commit { + s.Message = &v + return s +} + +// SetParents sets the Parents field's value. +func (s *Commit) SetParents(v []*string) *Commit { + s.Parents = v + return s +} + +// SetTreeId sets the TreeId field's value. +func (s *Commit) SetTreeId(v string) *Commit { + s.TreeId = &v + return s +} + +// Information about conflicts in a merge operation. +type Conflict struct { + _ struct{} `type:"structure"` + + // Metadata about a conflict in a merge operation. + ConflictMetadata *ConflictMetadata `locationName:"conflictMetadata" type:"structure"` + + // A list of hunks that contain the differences between files or lines causing + // the conflict. + MergeHunks []*MergeHunk `locationName:"mergeHunks" type:"list"` +} + +// String returns the string representation +func (s Conflict) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Conflict) GoString() string { + return s.String() +} + +// SetConflictMetadata sets the ConflictMetadata field's value. +func (s *Conflict) SetConflictMetadata(v *ConflictMetadata) *Conflict { + s.ConflictMetadata = v + return s +} + +// SetMergeHunks sets the MergeHunks field's value. +func (s *Conflict) SetMergeHunks(v []*MergeHunk) *Conflict { + s.MergeHunks = v + return s +} + +// Information about the metadata for a conflict in a merge operation. +type ConflictMetadata struct { + _ struct{} `type:"structure"` + + // A boolean value indicating whether there are conflicts in the content of + // a file. + ContentConflict *bool `locationName:"contentConflict" type:"boolean"` + + // A boolean value indicating whether there are conflicts in the file mode of + // a file. + FileModeConflict *bool `locationName:"fileModeConflict" type:"boolean"` + + // The file modes of the file in the source, destination, and base of the merge. + FileModes *FileModes `locationName:"fileModes" type:"structure"` + + // The path of the file that contains conflicts. + FilePath *string `locationName:"filePath" type:"string"` + + // The file sizes of the file in the source, destination, and base of the merge. + FileSizes *FileSizes `locationName:"fileSizes" type:"structure"` + + // A boolean value (true or false) indicating whether the file is binary or + // textual in the source, destination, and base of the merge. + IsBinaryFile *IsBinaryFile `locationName:"isBinaryFile" type:"structure"` + + // Whether an add, modify, or delete operation caused the conflict between the + // source and destination of the merge. + MergeOperations *MergeOperations `locationName:"mergeOperations" type:"structure"` + + // The number of conflicts, including both hunk conflicts and metadata conflicts. + NumberOfConflicts *int64 `locationName:"numberOfConflicts" type:"integer"` + + // A boolean value (true or false) indicating whether there are conflicts between + // the branches in the object type of a file, folder, or submodule. + ObjectTypeConflict *bool `locationName:"objectTypeConflict" type:"boolean"` + + // Information about any object type conflicts in a merge operation. + ObjectTypes *ObjectTypes `locationName:"objectTypes" type:"structure"` +} + +// String returns the string representation +func (s ConflictMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConflictMetadata) GoString() string { + return s.String() +} + +// SetContentConflict sets the ContentConflict field's value. +func (s *ConflictMetadata) SetContentConflict(v bool) *ConflictMetadata { + s.ContentConflict = &v + return s +} + +// SetFileModeConflict sets the FileModeConflict field's value. +func (s *ConflictMetadata) SetFileModeConflict(v bool) *ConflictMetadata { + s.FileModeConflict = &v + return s +} + +// SetFileModes sets the FileModes field's value. +func (s *ConflictMetadata) SetFileModes(v *FileModes) *ConflictMetadata { + s.FileModes = v + return s +} + +// SetFilePath sets the FilePath field's value. +func (s *ConflictMetadata) SetFilePath(v string) *ConflictMetadata { + s.FilePath = &v + return s +} + +// SetFileSizes sets the FileSizes field's value. +func (s *ConflictMetadata) SetFileSizes(v *FileSizes) *ConflictMetadata { + s.FileSizes = v + return s +} + +// SetIsBinaryFile sets the IsBinaryFile field's value. +func (s *ConflictMetadata) SetIsBinaryFile(v *IsBinaryFile) *ConflictMetadata { + s.IsBinaryFile = v + return s +} + +// SetMergeOperations sets the MergeOperations field's value. +func (s *ConflictMetadata) SetMergeOperations(v *MergeOperations) *ConflictMetadata { + s.MergeOperations = v + return s +} + +// SetNumberOfConflicts sets the NumberOfConflicts field's value. +func (s *ConflictMetadata) SetNumberOfConflicts(v int64) *ConflictMetadata { + s.NumberOfConflicts = &v + return s +} + +// SetObjectTypeConflict sets the ObjectTypeConflict field's value. +func (s *ConflictMetadata) SetObjectTypeConflict(v bool) *ConflictMetadata { + s.ObjectTypeConflict = &v + return s +} + +// SetObjectTypes sets the ObjectTypes field's value. +func (s *ConflictMetadata) SetObjectTypes(v *ObjectTypes) *ConflictMetadata { + s.ObjectTypes = v + return s +} + +// A list of inputs to use when resolving conflicts during a merge if AUTOMERGE +// is chosen as the conflict resolution strategy. +type ConflictResolution struct { + _ struct{} `type:"structure"` + + // Files that will be deleted as part of the merge conflict resolution. + DeleteFiles []*DeleteFileEntry `locationName:"deleteFiles" type:"list"` + + // Files that will have content replaced as part of the merge conflict resolution. + ReplaceContents []*ReplaceContentEntry `locationName:"replaceContents" type:"list"` + + // File modes that will be set as part of the merge conflict resolution. + SetFileModes []*SetFileModeEntry `locationName:"setFileModes" type:"list"` +} + +// String returns the string representation +func (s ConflictResolution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConflictResolution) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConflictResolution) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConflictResolution"} + if s.DeleteFiles != nil { + for i, v := range s.DeleteFiles { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DeleteFiles", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ReplaceContents != nil { + for i, v := range s.ReplaceContents { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplaceContents", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SetFileModes != nil { + for i, v := range s.SetFileModes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SetFileModes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeleteFiles sets the DeleteFiles field's value. +func (s *ConflictResolution) SetDeleteFiles(v []*DeleteFileEntry) *ConflictResolution { + s.DeleteFiles = v + return s +} + +// SetReplaceContents sets the ReplaceContents field's value. +func (s *ConflictResolution) SetReplaceContents(v []*ReplaceContentEntry) *ConflictResolution { + s.ReplaceContents = v + return s +} + +// SetSetFileModes sets the SetFileModes field's value. +func (s *ConflictResolution) SetSetFileModes(v []*SetFileModeEntry) *ConflictResolution { + s.SetFileModes = v + return s +} + +// Represents the input of a create branch operation. +type CreateBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the new branch to create. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // The ID of the commit to point the new branch to. + // + // CommitId is a required field + CommitId *string `locationName:"commitId" type:"string" required:"true"` + + // The name of the repository in which you want to create the new branch. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBranchInput"} + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.CommitId == nil { + invalidParams.Add(request.NewErrParamRequired("CommitId")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBranchName sets the BranchName field's value. +func (s *CreateBranchInput) SetBranchName(v string) *CreateBranchInput { + s.BranchName = &v + return s +} + +// SetCommitId sets the CommitId field's value. +func (s *CreateBranchInput) SetCommitId(v string) *CreateBranchInput { + s.CommitId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *CreateBranchInput) SetRepositoryName(v string) *CreateBranchInput { + s.RepositoryName = &v + return s +} + +type CreateBranchOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchOutput) GoString() string { + return s.String() +} + +type CreateCommitInput struct { + _ struct{} `type:"structure"` + + // The name of the author who created the commit. This information will be used + // as both the author and committer for the commit. + AuthorName *string `locationName:"authorName" type:"string"` + + // The name of the branch where you will create the commit. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // The commit message you want to include as part of creating the commit. Commit + // messages are limited to 256 KB. If no message is specified, a default message + // will be used. + CommitMessage *string `locationName:"commitMessage" type:"string"` + + // The files to delete in this commit. These files will still exist in prior + // commits. + DeleteFiles []*DeleteFileEntry `locationName:"deleteFiles" type:"list"` + + // The email address of the person who created the commit. + Email *string `locationName:"email" type:"string"` + + // If the commit contains deletions, whether to keep a folder or folder structure + // if the changes leave the folders empty. If this is specified as true, a .gitkeep + // file will be created for empty folders. The default is false. + KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` + + // The ID of the commit that is the parent of the commit you will create. If + // this is an empty repository, this is not required. + ParentCommitId *string `locationName:"parentCommitId" type:"string"` + + // The files to add or update in this commit. + PutFiles []*PutFileEntry `locationName:"putFiles" type:"list"` + + // The name of the repository where you will create the commit. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The file modes to update for files in this commit. + SetFileModes []*SetFileModeEntry `locationName:"setFileModes" type:"list"` +} + +// String returns the string representation +func (s CreateCommitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCommitInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCommitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCommitInput"} + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + if s.DeleteFiles != nil { + for i, v := range s.DeleteFiles { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DeleteFiles", i), err.(request.ErrInvalidParams)) + } + } + } + if s.PutFiles != nil { + for i, v := range s.PutFiles { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PutFiles", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SetFileModes != nil { + for i, v := range s.SetFileModes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SetFileModes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthorName sets the AuthorName field's value. +func (s *CreateCommitInput) SetAuthorName(v string) *CreateCommitInput { + s.AuthorName = &v + return s +} + +// SetBranchName sets the BranchName field's value. +func (s *CreateCommitInput) SetBranchName(v string) *CreateCommitInput { + s.BranchName = &v + return s +} + +// SetCommitMessage sets the CommitMessage field's value. +func (s *CreateCommitInput) SetCommitMessage(v string) *CreateCommitInput { + s.CommitMessage = &v + return s +} + +// SetDeleteFiles sets the DeleteFiles field's value. +func (s *CreateCommitInput) SetDeleteFiles(v []*DeleteFileEntry) *CreateCommitInput { + s.DeleteFiles = v + return s +} + +// SetEmail sets the Email field's value. +func (s *CreateCommitInput) SetEmail(v string) *CreateCommitInput { + s.Email = &v + return s +} + +// SetKeepEmptyFolders sets the KeepEmptyFolders field's value. +func (s *CreateCommitInput) SetKeepEmptyFolders(v bool) *CreateCommitInput { + s.KeepEmptyFolders = &v + return s +} + +// SetParentCommitId sets the ParentCommitId field's value. +func (s *CreateCommitInput) SetParentCommitId(v string) *CreateCommitInput { + s.ParentCommitId = &v + return s +} + +// SetPutFiles sets the PutFiles field's value. +func (s *CreateCommitInput) SetPutFiles(v []*PutFileEntry) *CreateCommitInput { + s.PutFiles = v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *CreateCommitInput) SetRepositoryName(v string) *CreateCommitInput { + s.RepositoryName = &v + return s +} + +// SetSetFileModes sets the SetFileModes field's value. +func (s *CreateCommitInput) SetSetFileModes(v []*SetFileModeEntry) *CreateCommitInput { + s.SetFileModes = v + return s +} + +type CreateCommitOutput struct { + _ struct{} `type:"structure"` + + // The full commit ID of the commit that contains your committed file changes. + CommitId *string `locationName:"commitId" type:"string"` + + // The files added as part of the committed file changes. + FilesAdded []*FileMetadata `locationName:"filesAdded" type:"list"` + + // The files deleted as part of the committed file changes. + FilesDeleted []*FileMetadata `locationName:"filesDeleted" type:"list"` + + // The files updated as part of the commited file changes. + FilesUpdated []*FileMetadata `locationName:"filesUpdated" type:"list"` + + // The full SHA-1 pointer of the tree information for the commit that contains + // the commited file changes. + TreeId *string `locationName:"treeId" type:"string"` +} + +// String returns the string representation +func (s CreateCommitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCommitOutput) GoString() string { + return s.String() +} + +// SetCommitId sets the CommitId field's value. +func (s *CreateCommitOutput) SetCommitId(v string) *CreateCommitOutput { + s.CommitId = &v + return s +} + +// SetFilesAdded sets the FilesAdded field's value. +func (s *CreateCommitOutput) SetFilesAdded(v []*FileMetadata) *CreateCommitOutput { + s.FilesAdded = v + return s +} + +// SetFilesDeleted sets the FilesDeleted field's value. +func (s *CreateCommitOutput) SetFilesDeleted(v []*FileMetadata) *CreateCommitOutput { + s.FilesDeleted = v + return s +} + +// SetFilesUpdated sets the FilesUpdated field's value. +func (s *CreateCommitOutput) SetFilesUpdated(v []*FileMetadata) *CreateCommitOutput { + s.FilesUpdated = v + return s +} + +// SetTreeId sets the TreeId field's value. +func (s *CreateCommitOutput) SetTreeId(v string) *CreateCommitOutput { + s.TreeId = &v + return s +} + +type CreatePullRequestInput struct { + _ struct{} `type:"structure"` + + // A unique, client-generated idempotency token that when provided in a request, + // ensures the request cannot be repeated with a changed parameter. If a request + // is received with the same parameters and a token is included, the request + // will return information about the initial request that used that token. + // + // The AWS SDKs prepopulate client request tokens. If using an AWS SDK, you + // do not have to generate an idempotency token, as this will be done for you. + ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` + + // A description of the pull request. + Description *string `locationName:"description" type:"string"` + + // The targets for the pull request, including the source of the code to be + // reviewed (the source branch), and the destination where the creator of the + // pull request intends the code to be merged after the pull request is closed + // (the destination branch). + // + // Targets is a required field + Targets []*Target `locationName:"targets" type:"list" required:"true"` + + // The title of the pull request. This title will be used to identify the pull + // request to other users in the repository. + // + // Title is a required field + Title *string `locationName:"title" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePullRequestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePullRequestInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePullRequestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePullRequestInput"} + if s.Targets == nil { + invalidParams.Add(request.NewErrParamRequired("Targets")) + } + if s.Title == nil { + invalidParams.Add(request.NewErrParamRequired("Title")) + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreatePullRequestInput) SetClientRequestToken(v string) *CreatePullRequestInput { + s.ClientRequestToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreatePullRequestInput) SetDescription(v string) *CreatePullRequestInput { + s.Description = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *CreatePullRequestInput) SetTargets(v []*Target) *CreatePullRequestInput { + s.Targets = v + return s +} + +// SetTitle sets the Title field's value. +func (s *CreatePullRequestInput) SetTitle(v string) *CreatePullRequestInput { + s.Title = &v + return s +} + +type CreatePullRequestOutput struct { + _ struct{} `type:"structure"` + + // Information about the newly created pull request. + // + // PullRequest is a required field + PullRequest *PullRequest `locationName:"pullRequest" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePullRequestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePullRequestOutput) GoString() string { + return s.String() +} + +// SetPullRequest sets the PullRequest field's value. +func (s *CreatePullRequestOutput) SetPullRequest(v *PullRequest) *CreatePullRequestOutput { + s.PullRequest = v + return s +} + +// Represents the input of a create repository operation. +type CreateRepositoryInput struct { + _ struct{} `type:"structure"` + + // A comment or description about the new repository. + // + // The description field for a repository accepts all HTML characters and all + // valid Unicode characters. Applications that do not HTML-encode the description + // and display it in a web page could expose users to potentially malicious + // code. Make sure that you HTML-encode the description field in any application + // that uses this API to display the repository description on a web page. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The name of the new repository to be created. + // + // The repository name must be unique across the calling AWS account. In addition, + // repository names are limited to 100 alphanumeric, dash, and underscore characters, + // and cannot include certain characters. For a full description of the limits + // on repository names, see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) + // in the AWS CodeCommit User Guide. The suffix ".git" is prohibited. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // One or more tag key-value pairs to use when tagging this repository. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s CreateRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRepositoryInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRepositoryDescription sets the RepositoryDescription field's value. +func (s *CreateRepositoryInput) SetRepositoryDescription(v string) *CreateRepositoryInput { + s.RepositoryDescription = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *CreateRepositoryInput) SetRepositoryName(v string) *CreateRepositoryInput { + s.RepositoryName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateRepositoryInput) SetTags(v map[string]*string) *CreateRepositoryInput { + s.Tags = v + return s +} + +// Represents the output of a create repository operation. +type CreateRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the newly created repository. + RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` +} + +// String returns the string representation +func (s CreateRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryOutput) GoString() string { + return s.String() +} + +// SetRepositoryMetadata sets the RepositoryMetadata field's value. +func (s *CreateRepositoryOutput) SetRepositoryMetadata(v *RepositoryMetadata) *CreateRepositoryOutput { + s.RepositoryMetadata = v + return s +} + +type CreateUnreferencedMergeCommitInput struct { + _ struct{} `type:"structure"` + + // The name of the author who created the unreferenced commit. This information + // will be used as both the author and committer for the commit. + AuthorName *string `locationName:"authorName" type:"string"` + + // The commit message for the unreferenced commit. + CommitMessage *string `locationName:"commitMessage" type:"string"` + + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` + + // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE + // is chosen as the conflict resolution strategy. + ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` + + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // DestinationCommitSpecifier is a required field + DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` + + // The email address for the person who created the unreferenced commit. + Email *string `locationName:"email" type:"string"` + + // If the commit contains deletions, whether to keep a folder or folder structure + // if the changes leave the folders empty. If this is specified as true, a .gitkeep + // file will be created for empty folders. The default is false. + KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` + + // The merge option or strategy you want to use to merge the code. + // + // MergeOption is a required field + MergeOption *string `locationName:"mergeOption" type:"string" required:"true" enum:"MergeOptionTypeEnum"` + + // The name of the repository where you want to create the unreferenced merge + // commit. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // SourceCommitSpecifier is a required field + SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateUnreferencedMergeCommitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUnreferencedMergeCommitInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUnreferencedMergeCommitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUnreferencedMergeCommitInput"} + if s.DestinationCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCommitSpecifier")) + } + if s.MergeOption == nil { + invalidParams.Add(request.NewErrParamRequired("MergeOption")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + if s.SourceCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCommitSpecifier")) + } + if s.ConflictResolution != nil { + if err := s.ConflictResolution.Validate(); err != nil { + invalidParams.AddNested("ConflictResolution", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthorName sets the AuthorName field's value. +func (s *CreateUnreferencedMergeCommitInput) SetAuthorName(v string) *CreateUnreferencedMergeCommitInput { + s.AuthorName = &v + return s +} + +// SetCommitMessage sets the CommitMessage field's value. +func (s *CreateUnreferencedMergeCommitInput) SetCommitMessage(v string) *CreateUnreferencedMergeCommitInput { + s.CommitMessage = &v + return s +} + +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *CreateUnreferencedMergeCommitInput) SetConflictDetailLevel(v string) *CreateUnreferencedMergeCommitInput { + s.ConflictDetailLevel = &v + return s +} + +// SetConflictResolution sets the ConflictResolution field's value. +func (s *CreateUnreferencedMergeCommitInput) SetConflictResolution(v *ConflictResolution) *CreateUnreferencedMergeCommitInput { + s.ConflictResolution = v + return s +} + +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *CreateUnreferencedMergeCommitInput) SetConflictResolutionStrategy(v string) *CreateUnreferencedMergeCommitInput { + s.ConflictResolutionStrategy = &v + return s +} + +// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. +func (s *CreateUnreferencedMergeCommitInput) SetDestinationCommitSpecifier(v string) *CreateUnreferencedMergeCommitInput { + s.DestinationCommitSpecifier = &v + return s +} + +// SetEmail sets the Email field's value. +func (s *CreateUnreferencedMergeCommitInput) SetEmail(v string) *CreateUnreferencedMergeCommitInput { + s.Email = &v + return s +} + +// SetKeepEmptyFolders sets the KeepEmptyFolders field's value. +func (s *CreateUnreferencedMergeCommitInput) SetKeepEmptyFolders(v bool) *CreateUnreferencedMergeCommitInput { + s.KeepEmptyFolders = &v + return s +} + +// SetMergeOption sets the MergeOption field's value. +func (s *CreateUnreferencedMergeCommitInput) SetMergeOption(v string) *CreateUnreferencedMergeCommitInput { + s.MergeOption = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *CreateUnreferencedMergeCommitInput) SetRepositoryName(v string) *CreateUnreferencedMergeCommitInput { + s.RepositoryName = &v + return s +} + +// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. +func (s *CreateUnreferencedMergeCommitInput) SetSourceCommitSpecifier(v string) *CreateUnreferencedMergeCommitInput { + s.SourceCommitSpecifier = &v + return s +} + +type CreateUnreferencedMergeCommitOutput struct { + _ struct{} `type:"structure"` + + // The full commit ID of the commit that contains your merge results. + CommitId *string `locationName:"commitId" type:"string"` + + // The full SHA-1 pointer of the tree information for the commit that contains + // the merge results. + TreeId *string `locationName:"treeId" type:"string"` +} + +// String returns the string representation +func (s CreateUnreferencedMergeCommitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUnreferencedMergeCommitOutput) GoString() string { + return s.String() +} + +// SetCommitId sets the CommitId field's value. +func (s *CreateUnreferencedMergeCommitOutput) SetCommitId(v string) *CreateUnreferencedMergeCommitOutput { + s.CommitId = &v + return s +} + +// SetTreeId sets the TreeId field's value. +func (s *CreateUnreferencedMergeCommitOutput) SetTreeId(v string) *CreateUnreferencedMergeCommitOutput { + s.TreeId = &v + return s +} + +// Represents the input of a delete branch operation. +type DeleteBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the branch to delete. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // The name of the repository that contains the branch to be deleted. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBranchInput"} + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBranchName sets the BranchName field's value. +func (s *DeleteBranchInput) SetBranchName(v string) *DeleteBranchInput { + s.BranchName = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteBranchInput) SetRepositoryName(v string) *DeleteBranchInput { + s.RepositoryName = &v + return s +} + +// Represents the output of a delete branch operation. +type DeleteBranchOutput struct { + _ struct{} `type:"structure"` + + // Information about the branch deleted by the operation, including the branch + // name and the commit ID that was the tip of the branch. + DeletedBranch *BranchInfo `locationName:"deletedBranch" type:"structure"` +} + +// String returns the string representation +func (s DeleteBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBranchOutput) GoString() string { + return s.String() +} + +// SetDeletedBranch sets the DeletedBranch field's value. +func (s *DeleteBranchOutput) SetDeletedBranch(v *BranchInfo) *DeleteBranchOutput { + s.DeletedBranch = v + return s +} + +type DeleteCommentContentInput struct { + _ struct{} `type:"structure"` + + // The unique, system-generated ID of the comment. To get this ID, use GetCommentsForComparedCommit + // or GetCommentsForPullRequest. + // + // CommentId is a required field + CommentId *string `locationName:"commentId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCommentContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCommentContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCommentContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCommentContentInput"} + if s.CommentId == nil { + invalidParams.Add(request.NewErrParamRequired("CommentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommentId sets the CommentId field's value. +func (s *DeleteCommentContentInput) SetCommentId(v string) *DeleteCommentContentInput { + s.CommentId = &v + return s +} + +type DeleteCommentContentOutput struct { + _ struct{} `type:"structure"` + + // Information about the comment you just deleted. + Comment *Comment `locationName:"comment" type:"structure"` +} + +// String returns the string representation +func (s DeleteCommentContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCommentContentOutput) GoString() string { + return s.String() +} + +// SetComment sets the Comment field's value. +func (s *DeleteCommentContentOutput) SetComment(v *Comment) *DeleteCommentContentOutput { + s.Comment = v + return s +} + +// A file that will be deleted as part of a commit. +type DeleteFileEntry struct { + _ struct{} `type:"structure"` + + // The full path of the file that will be deleted, including the name of the + // file. + // + // FilePath is a required field + FilePath *string `locationName:"filePath" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFileEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFileEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFileEntry"} + if s.FilePath == nil { + invalidParams.Add(request.NewErrParamRequired("FilePath")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilePath sets the FilePath field's value. +func (s *DeleteFileEntry) SetFilePath(v string) *DeleteFileEntry { + s.FilePath = &v + return s +} + +type DeleteFileInput struct { + _ struct{} `type:"structure"` + + // The name of the branch where the commit will be made deleting the file. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // The commit message you want to include as part of deleting the file. Commit + // messages are limited to 256 KB. If no message is specified, a default message + // will be used. + CommitMessage *string `locationName:"commitMessage" type:"string"` + + // The email address for the commit that deletes the file. If no email address + // is specified, the email address will be left blank. + Email *string `locationName:"email" type:"string"` + + // The fully-qualified path to the file that will be deleted, including the + // full name and extension of that file. For example, /examples/file.md is a + // fully qualified path to a file named file.md in a folder named examples. + // + // FilePath is a required field + FilePath *string `locationName:"filePath" type:"string" required:"true"` + + // Specifies whether to delete the folder or directory that contains the file + // you want to delete if that file is the only object in the folder or directory. + // By default, empty folders will be deleted. This includes empty folders that + // are part of the directory structure. For example, if the path to a file is + // dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file + // in dir4 will also delete the empty folders dir4, dir3, and dir2. + KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` + + // The name of the author of the commit that deletes the file. If no name is + // specified, the user's ARN will be used as the author name and committer name. + Name *string `locationName:"name" type:"string"` + + // The ID of the commit that is the tip of the branch where you want to create + // the commit that will delete the file. This must be the HEAD commit for the + // branch. The commit that deletes the file will be created from this commit + // ID. + // + // ParentCommitId is a required field + ParentCommitId *string `locationName:"parentCommitId" type:"string" required:"true"` + + // The name of the repository that contains the file to delete. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFileInput"} + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.FilePath == nil { + invalidParams.Add(request.NewErrParamRequired("FilePath")) + } + if s.ParentCommitId == nil { + invalidParams.Add(request.NewErrParamRequired("ParentCommitId")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBranchName sets the BranchName field's value. +func (s *DeleteFileInput) SetBranchName(v string) *DeleteFileInput { + s.BranchName = &v + return s +} + +// SetCommitMessage sets the CommitMessage field's value. +func (s *DeleteFileInput) SetCommitMessage(v string) *DeleteFileInput { + s.CommitMessage = &v + return s +} + +// SetEmail sets the Email field's value. +func (s *DeleteFileInput) SetEmail(v string) *DeleteFileInput { + s.Email = &v + return s +} + +// SetFilePath sets the FilePath field's value. +func (s *DeleteFileInput) SetFilePath(v string) *DeleteFileInput { + s.FilePath = &v + return s +} + +// SetKeepEmptyFolders sets the KeepEmptyFolders field's value. +func (s *DeleteFileInput) SetKeepEmptyFolders(v bool) *DeleteFileInput { + s.KeepEmptyFolders = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeleteFileInput) SetName(v string) *DeleteFileInput { + s.Name = &v + return s +} + +// SetParentCommitId sets the ParentCommitId field's value. +func (s *DeleteFileInput) SetParentCommitId(v string) *DeleteFileInput { + s.ParentCommitId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteFileInput) SetRepositoryName(v string) *DeleteFileInput { + s.RepositoryName = &v + return s +} + +type DeleteFileOutput struct { _ struct{} `type:"structure"` - // A list of repositories returned by the batch get repositories operation. - Repositories []*RepositoryMetadata `locationName:"repositories" type:"list"` + // The blob ID removed from the tree as part of deleting the file. + // + // BlobId is a required field + BlobId *string `locationName:"blobId" type:"string" required:"true"` - // Returns a list of repository names for which information could not be found. - RepositoriesNotFound []*string `locationName:"repositoriesNotFound" type:"list"` + // The full commit ID of the commit that contains the change that deletes the + // file. + // + // CommitId is a required field + CommitId *string `locationName:"commitId" type:"string" required:"true"` + + // The fully-qualified path to the file that will be deleted, including the + // full name and extension of that file. + // + // FilePath is a required field + FilePath *string `locationName:"filePath" type:"string" required:"true"` + + // The full SHA-1 pointer of the tree information for the commit that contains + // the delete file change. + // + // TreeId is a required field + TreeId *string `locationName:"treeId" type:"string" required:"true"` } // String returns the string representation -func (s BatchGetRepositoriesOutput) String() string { +func (s DeleteFileOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetRepositoriesOutput) GoString() string { +func (s DeleteFileOutput) GoString() string { return s.String() } -// SetRepositories sets the Repositories field's value. -func (s *BatchGetRepositoriesOutput) SetRepositories(v []*RepositoryMetadata) *BatchGetRepositoriesOutput { - s.Repositories = v +// SetBlobId sets the BlobId field's value. +func (s *DeleteFileOutput) SetBlobId(v string) *DeleteFileOutput { + s.BlobId = &v return s } -// SetRepositoriesNotFound sets the RepositoriesNotFound field's value. -func (s *BatchGetRepositoriesOutput) SetRepositoriesNotFound(v []*string) *BatchGetRepositoriesOutput { - s.RepositoriesNotFound = v +// SetCommitId sets the CommitId field's value. +func (s *DeleteFileOutput) SetCommitId(v string) *DeleteFileOutput { + s.CommitId = &v return s } -// Returns information about a specific Git blob object. -type BlobMetadata struct { +// SetFilePath sets the FilePath field's value. +func (s *DeleteFileOutput) SetFilePath(v string) *DeleteFileOutput { + s.FilePath = &v + return s +} + +// SetTreeId sets the TreeId field's value. +func (s *DeleteFileOutput) SetTreeId(v string) *DeleteFileOutput { + s.TreeId = &v + return s +} + +// Represents the input of a delete repository operation. +type DeleteRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name of the repository to delete. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteRepositoryInput) SetRepositoryName(v string) *DeleteRepositoryInput { + s.RepositoryName = &v + return s +} + +// Represents the output of a delete repository operation. +type DeleteRepositoryOutput struct { + _ struct{} `type:"structure"` + + // The ID of the repository that was deleted. + RepositoryId *string `locationName:"repositoryId" type:"string"` +} + +// String returns the string representation +func (s DeleteRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryOutput) GoString() string { + return s.String() +} + +// SetRepositoryId sets the RepositoryId field's value. +func (s *DeleteRepositoryOutput) SetRepositoryId(v string) *DeleteRepositoryOutput { + s.RepositoryId = &v + return s +} + +type DescribeMergeConflictsInput struct { + _ struct{} `type:"structure"` + + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` + + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // DestinationCommitSpecifier is a required field + DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` + + // The path of the target files used to describe the conflicts. + // + // FilePath is a required field + FilePath *string `locationName:"filePath" type:"string" required:"true"` + + // The maximum number of merge hunks to include in the output. + MaxMergeHunks *int64 `locationName:"maxMergeHunks" type:"integer"` + + // The merge option or strategy you want to use to merge the code. + // + // MergeOption is a required field + MergeOption *string `locationName:"mergeOption" type:"string" required:"true" enum:"MergeOptionTypeEnum"` + + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the repository where you want to get information about a merge + // conflict. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // SourceCommitSpecifier is a required field + SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMergeConflictsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMergeConflictsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMergeConflictsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMergeConflictsInput"} + if s.DestinationCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCommitSpecifier")) + } + if s.FilePath == nil { + invalidParams.Add(request.NewErrParamRequired("FilePath")) + } + if s.MergeOption == nil { + invalidParams.Add(request.NewErrParamRequired("MergeOption")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + if s.SourceCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCommitSpecifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *DescribeMergeConflictsInput) SetConflictDetailLevel(v string) *DescribeMergeConflictsInput { + s.ConflictDetailLevel = &v + return s +} + +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *DescribeMergeConflictsInput) SetConflictResolutionStrategy(v string) *DescribeMergeConflictsInput { + s.ConflictResolutionStrategy = &v + return s +} + +// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. +func (s *DescribeMergeConflictsInput) SetDestinationCommitSpecifier(v string) *DescribeMergeConflictsInput { + s.DestinationCommitSpecifier = &v + return s +} + +// SetFilePath sets the FilePath field's value. +func (s *DescribeMergeConflictsInput) SetFilePath(v string) *DescribeMergeConflictsInput { + s.FilePath = &v + return s +} + +// SetMaxMergeHunks sets the MaxMergeHunks field's value. +func (s *DescribeMergeConflictsInput) SetMaxMergeHunks(v int64) *DescribeMergeConflictsInput { + s.MaxMergeHunks = &v + return s +} + +// SetMergeOption sets the MergeOption field's value. +func (s *DescribeMergeConflictsInput) SetMergeOption(v string) *DescribeMergeConflictsInput { + s.MergeOption = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMergeConflictsInput) SetNextToken(v string) *DescribeMergeConflictsInput { + s.NextToken = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DescribeMergeConflictsInput) SetRepositoryName(v string) *DescribeMergeConflictsInput { + s.RepositoryName = &v + return s +} + +// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. +func (s *DescribeMergeConflictsInput) SetSourceCommitSpecifier(v string) *DescribeMergeConflictsInput { + s.SourceCommitSpecifier = &v + return s +} + +type DescribeMergeConflictsOutput struct { _ struct{} `type:"structure"` - // The full ID of the blob. - BlobId *string `locationName:"blobId" type:"string"` + // The commit ID of the merge base. + BaseCommitId *string `locationName:"baseCommitId" type:"string"` - // The file mode permissions of the blob. File mode permission codes include: + // Contains metadata about the conflicts found in the merge. // - // * 100644 indicates read/write - // - // * 100755 indicates read/write/execute + // ConflictMetadata is a required field + ConflictMetadata *ConflictMetadata `locationName:"conflictMetadata" type:"structure" required:"true"` + + // The commit ID of the destination commit specifier that was used in the merge + // evaluation. // - // * 160000 indicates a submodule + // DestinationCommitId is a required field + DestinationCommitId *string `locationName:"destinationCommitId" type:"string" required:"true"` + + // A list of merge hunks of the differences between the files or lines. // - // * 120000 indicates a symlink - Mode *string `locationName:"mode" type:"string"` + // MergeHunks is a required field + MergeHunks []*MergeHunk `locationName:"mergeHunks" type:"list" required:"true"` - // The path to the blob and any associated file name, if any. - Path *string `locationName:"path" type:"string"` + // An enumeration token that can be used in a request to return the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The commit ID of the source commit specifier that was used in the merge evaluation. + // + // SourceCommitId is a required field + SourceCommitId *string `locationName:"sourceCommitId" type:"string" required:"true"` } // String returns the string representation -func (s BlobMetadata) String() string { +func (s DescribeMergeConflictsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BlobMetadata) GoString() string { +func (s DescribeMergeConflictsOutput) GoString() string { return s.String() } -// SetBlobId sets the BlobId field's value. -func (s *BlobMetadata) SetBlobId(v string) *BlobMetadata { - s.BlobId = &v +// SetBaseCommitId sets the BaseCommitId field's value. +func (s *DescribeMergeConflictsOutput) SetBaseCommitId(v string) *DescribeMergeConflictsOutput { + s.BaseCommitId = &v return s } -// SetMode sets the Mode field's value. -func (s *BlobMetadata) SetMode(v string) *BlobMetadata { - s.Mode = &v +// SetConflictMetadata sets the ConflictMetadata field's value. +func (s *DescribeMergeConflictsOutput) SetConflictMetadata(v *ConflictMetadata) *DescribeMergeConflictsOutput { + s.ConflictMetadata = v return s } -// SetPath sets the Path field's value. -func (s *BlobMetadata) SetPath(v string) *BlobMetadata { - s.Path = &v +// SetDestinationCommitId sets the DestinationCommitId field's value. +func (s *DescribeMergeConflictsOutput) SetDestinationCommitId(v string) *DescribeMergeConflictsOutput { + s.DestinationCommitId = &v return s } -// Returns information about a branch. -type BranchInfo struct { +// SetMergeHunks sets the MergeHunks field's value. +func (s *DescribeMergeConflictsOutput) SetMergeHunks(v []*MergeHunk) *DescribeMergeConflictsOutput { + s.MergeHunks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMergeConflictsOutput) SetNextToken(v string) *DescribeMergeConflictsOutput { + s.NextToken = &v + return s +} + +// SetSourceCommitId sets the SourceCommitId field's value. +func (s *DescribeMergeConflictsOutput) SetSourceCommitId(v string) *DescribeMergeConflictsOutput { + s.SourceCommitId = &v + return s +} + +type DescribePullRequestEventsInput struct { _ struct{} `type:"structure"` - // The name of the branch. - BranchName *string `locationName:"branchName" min:"1" type:"string"` + // The Amazon Resource Name (ARN) of the user whose actions resulted in the + // event. Examples include updating the pull request with additional commits + // or changing the status of a pull request. + ActorArn *string `locationName:"actorArn" type:"string"` - // The ID of the last commit made to the branch. - CommitId *string `locationName:"commitId" type:"string"` + // A non-negative integer used to limit the number of returned results. The + // default is 100 events, which is also the maximum number of events that can + // be returned in a result. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // Optional. The pull request event type about which you want to return information. + PullRequestEventType *string `locationName:"pullRequestEventType" type:"string" enum:"PullRequestEventType"` + + // The system-generated ID of the pull request. To get this ID, use ListPullRequests. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` } // String returns the string representation -func (s BranchInfo) String() string { +func (s DescribePullRequestEventsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BranchInfo) GoString() string { +func (s DescribePullRequestEventsInput) GoString() string { return s.String() } -// SetBranchName sets the BranchName field's value. -func (s *BranchInfo) SetBranchName(v string) *BranchInfo { - s.BranchName = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePullRequestEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePullRequestEventsInput"} + if s.PullRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("PullRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCommitId sets the CommitId field's value. -func (s *BranchInfo) SetCommitId(v string) *BranchInfo { - s.CommitId = &v +// SetActorArn sets the ActorArn field's value. +func (s *DescribePullRequestEventsInput) SetActorArn(v string) *DescribePullRequestEventsInput { + s.ActorArn = &v return s } -// Returns information about a specific comment. -type Comment struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the person who posted the comment. - AuthorArn *string `locationName:"authorArn" type:"string"` - - // A unique, client-generated idempotency token that when provided in a request, - // ensures the request cannot be repeated with a changed parameter. If a request - // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. - ClientRequestToken *string `locationName:"clientRequestToken" type:"string"` +// SetMaxResults sets the MaxResults field's value. +func (s *DescribePullRequestEventsInput) SetMaxResults(v int64) *DescribePullRequestEventsInput { + s.MaxResults = &v + return s +} - // The system-generated comment ID. - CommentId *string `locationName:"commentId" type:"string"` +// SetNextToken sets the NextToken field's value. +func (s *DescribePullRequestEventsInput) SetNextToken(v string) *DescribePullRequestEventsInput { + s.NextToken = &v + return s +} - // The content of the comment. - Content *string `locationName:"content" type:"string"` +// SetPullRequestEventType sets the PullRequestEventType field's value. +func (s *DescribePullRequestEventsInput) SetPullRequestEventType(v string) *DescribePullRequestEventsInput { + s.PullRequestEventType = &v + return s +} - // The date and time the comment was created, in timestamp format. - CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` +// SetPullRequestId sets the PullRequestId field's value. +func (s *DescribePullRequestEventsInput) SetPullRequestId(v string) *DescribePullRequestEventsInput { + s.PullRequestId = &v + return s +} - // A Boolean value indicating whether the comment has been deleted. - Deleted *bool `locationName:"deleted" type:"boolean"` +type DescribePullRequestEventsOutput struct { + _ struct{} `type:"structure"` - // The ID of the comment for which this comment is a reply, if any. - InReplyTo *string `locationName:"inReplyTo" type:"string"` + // An enumeration token that can be used in a request to return the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` - // The date and time the comment was most recently modified, in timestamp format. - LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + // Information about the pull request events. + // + // PullRequestEvents is a required field + PullRequestEvents []*PullRequestEvent `locationName:"pullRequestEvents" type:"list" required:"true"` } // String returns the string representation -func (s Comment) String() string { +func (s DescribePullRequestEventsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Comment) GoString() string { +func (s DescribePullRequestEventsOutput) GoString() string { return s.String() } -// SetAuthorArn sets the AuthorArn field's value. -func (s *Comment) SetAuthorArn(v string) *Comment { - s.AuthorArn = &v +// SetNextToken sets the NextToken field's value. +func (s *DescribePullRequestEventsOutput) SetNextToken(v string) *DescribePullRequestEventsOutput { + s.NextToken = &v return s } -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *Comment) SetClientRequestToken(v string) *Comment { - s.ClientRequestToken = &v +// SetPullRequestEvents sets the PullRequestEvents field's value. +func (s *DescribePullRequestEventsOutput) SetPullRequestEvents(v []*PullRequestEvent) *DescribePullRequestEventsOutput { + s.PullRequestEvents = v return s } -// SetCommentId sets the CommentId field's value. -func (s *Comment) SetCommentId(v string) *Comment { - s.CommentId = &v - return s +// Returns information about a set of differences for a commit specifier. +type Difference struct { + _ struct{} `type:"structure"` + + // Information about an afterBlob data type object, including the ID, the file + // mode permission code, and the path. + AfterBlob *BlobMetadata `locationName:"afterBlob" type:"structure"` + + // Information about a beforeBlob data type object, including the ID, the file + // mode permission code, and the path. + BeforeBlob *BlobMetadata `locationName:"beforeBlob" type:"structure"` + + // Whether the change type of the difference is an addition (A), deletion (D), + // or modification (M). + ChangeType *string `locationName:"changeType" type:"string" enum:"ChangeTypeEnum"` } -// SetContent sets the Content field's value. -func (s *Comment) SetContent(v string) *Comment { - s.Content = &v - return s +// String returns the string representation +func (s Difference) String() string { + return awsutil.Prettify(s) } -// SetCreationDate sets the CreationDate field's value. -func (s *Comment) SetCreationDate(v time.Time) *Comment { - s.CreationDate = &v - return s +// GoString returns the string representation +func (s Difference) GoString() string { + return s.String() } -// SetDeleted sets the Deleted field's value. -func (s *Comment) SetDeleted(v bool) *Comment { - s.Deleted = &v +// SetAfterBlob sets the AfterBlob field's value. +func (s *Difference) SetAfterBlob(v *BlobMetadata) *Difference { + s.AfterBlob = v return s } -// SetInReplyTo sets the InReplyTo field's value. -func (s *Comment) SetInReplyTo(v string) *Comment { - s.InReplyTo = &v +// SetBeforeBlob sets the BeforeBlob field's value. +func (s *Difference) SetBeforeBlob(v *BlobMetadata) *Difference { + s.BeforeBlob = v return s } -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *Comment) SetLastModifiedDate(v time.Time) *Comment { - s.LastModifiedDate = &v +// SetChangeType sets the ChangeType field's value. +func (s *Difference) SetChangeType(v string) *Difference { + s.ChangeType = &v return s } -// Returns information about comments on the comparison between two commits. -type CommentsForComparedCommit struct { +// Returns information about a file in a repository. +type File struct { _ struct{} `type:"structure"` - // The full blob ID of the commit used to establish the 'after' of the comparison. - AfterBlobId *string `locationName:"afterBlobId" type:"string"` - - // The full commit ID of the commit used to establish the 'after' of the comparison. - AfterCommitId *string `locationName:"afterCommitId" type:"string"` - - // The full blob ID of the commit used to establish the 'before' of the comparison. - BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` - - // The full commit ID of the commit used to establish the 'before' of the comparison. - BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - - // An array of comment objects. Each comment object contains information about - // a comment on the comparison between commits. - Comments []*Comment `locationName:"comments" type:"list"` + // The fully-qualified path to the file in the repository. + AbsolutePath *string `locationName:"absolutePath" type:"string"` - // Location information about the comment on the comparison, including the file - // name, line number, and whether the version of the file where the comment - // was made is 'BEFORE' or 'AFTER'. - Location *Location `locationName:"location" type:"structure"` + // The blob ID that contains the file information. + BlobId *string `locationName:"blobId" type:"string"` - // The name of the repository that contains the compared commits. - RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` + // The extrapolated file mode permissions for the file. Valid values include + // EXECUTABLE and NORMAL. + FileMode *string `locationName:"fileMode" type:"string" enum:"FileModeTypeEnum"` + + // The relative path of the file from the folder where the query originated. + RelativePath *string `locationName:"relativePath" type:"string"` } // String returns the string representation -func (s CommentsForComparedCommit) String() string { +func (s File) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CommentsForComparedCommit) GoString() string { +func (s File) GoString() string { return s.String() } -// SetAfterBlobId sets the AfterBlobId field's value. -func (s *CommentsForComparedCommit) SetAfterBlobId(v string) *CommentsForComparedCommit { - s.AfterBlobId = &v - return s -} - -// SetAfterCommitId sets the AfterCommitId field's value. -func (s *CommentsForComparedCommit) SetAfterCommitId(v string) *CommentsForComparedCommit { - s.AfterCommitId = &v - return s -} - -// SetBeforeBlobId sets the BeforeBlobId field's value. -func (s *CommentsForComparedCommit) SetBeforeBlobId(v string) *CommentsForComparedCommit { - s.BeforeBlobId = &v - return s -} - -// SetBeforeCommitId sets the BeforeCommitId field's value. -func (s *CommentsForComparedCommit) SetBeforeCommitId(v string) *CommentsForComparedCommit { - s.BeforeCommitId = &v +// SetAbsolutePath sets the AbsolutePath field's value. +func (s *File) SetAbsolutePath(v string) *File { + s.AbsolutePath = &v return s } -// SetComments sets the Comments field's value. -func (s *CommentsForComparedCommit) SetComments(v []*Comment) *CommentsForComparedCommit { - s.Comments = v +// SetBlobId sets the BlobId field's value. +func (s *File) SetBlobId(v string) *File { + s.BlobId = &v return s } -// SetLocation sets the Location field's value. -func (s *CommentsForComparedCommit) SetLocation(v *Location) *CommentsForComparedCommit { - s.Location = v +// SetFileMode sets the FileMode field's value. +func (s *File) SetFileMode(v string) *File { + s.FileMode = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *CommentsForComparedCommit) SetRepositoryName(v string) *CommentsForComparedCommit { - s.RepositoryName = &v +// SetRelativePath sets the RelativePath field's value. +func (s *File) SetRelativePath(v string) *File { + s.RelativePath = &v return s } -// Returns information about comments on a pull request. -type CommentsForPullRequest struct { +// A file that will be added, updated, or deleted as part of a commit. +type FileMetadata struct { _ struct{} `type:"structure"` - // The full blob ID of the file on which you want to comment on the source commit. - AfterBlobId *string `locationName:"afterBlobId" type:"string"` - - // he full commit ID of the commit that was the tip of the source branch at - // the time the comment was made. - AfterCommitId *string `locationName:"afterCommitId" type:"string"` - - // The full blob ID of the file on which you want to comment on the destination - // commit. - BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` - - // The full commit ID of the commit that was the tip of the destination branch - // when the pull request was created. This commit will be superceded by the - // after commit in the source branch when and if you merge the source branch - // into the destination branch. - BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - - // An array of comment objects. Each comment object contains information about - // a comment on the pull request. - Comments []*Comment `locationName:"comments" type:"list"` - - // Location information about the comment on the pull request, including the - // file name, line number, and whether the version of the file where the comment - // was made is 'BEFORE' (destination branch) or 'AFTER' (source branch). - Location *Location `locationName:"location" type:"structure"` + // The full path to the file that will be added or updated, including the name + // of the file. + AbsolutePath *string `locationName:"absolutePath" type:"string"` - // The system-generated ID of the pull request. - PullRequestId *string `locationName:"pullRequestId" type:"string"` + // The blob ID that contains the file information. + BlobId *string `locationName:"blobId" type:"string"` - // The name of the repository that contains the pull request. - RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` + // The extrapolated file mode permissions for the file. Valid values include + // EXECUTABLE and NORMAL. + FileMode *string `locationName:"fileMode" type:"string" enum:"FileModeTypeEnum"` } // String returns the string representation -func (s CommentsForPullRequest) String() string { +func (s FileMetadata) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CommentsForPullRequest) GoString() string { +func (s FileMetadata) GoString() string { return s.String() } -// SetAfterBlobId sets the AfterBlobId field's value. -func (s *CommentsForPullRequest) SetAfterBlobId(v string) *CommentsForPullRequest { - s.AfterBlobId = &v +// SetAbsolutePath sets the AbsolutePath field's value. +func (s *FileMetadata) SetAbsolutePath(v string) *FileMetadata { + s.AbsolutePath = &v return s } -// SetAfterCommitId sets the AfterCommitId field's value. -func (s *CommentsForPullRequest) SetAfterCommitId(v string) *CommentsForPullRequest { - s.AfterCommitId = &v +// SetBlobId sets the BlobId field's value. +func (s *FileMetadata) SetBlobId(v string) *FileMetadata { + s.BlobId = &v return s } -// SetBeforeBlobId sets the BeforeBlobId field's value. -func (s *CommentsForPullRequest) SetBeforeBlobId(v string) *CommentsForPullRequest { - s.BeforeBlobId = &v +// SetFileMode sets the FileMode field's value. +func (s *FileMetadata) SetFileMode(v string) *FileMetadata { + s.FileMode = &v return s } -// SetBeforeCommitId sets the BeforeCommitId field's value. -func (s *CommentsForPullRequest) SetBeforeCommitId(v string) *CommentsForPullRequest { - s.BeforeCommitId = &v - return s +// Information about file modes in a merge or pull request. +type FileModes struct { + _ struct{} `type:"structure"` + + // The file mode of a file in the base of a merge or pull request. + Base *string `locationName:"base" type:"string" enum:"FileModeTypeEnum"` + + // The file mode of a file in the destination of a merge or pull request. + Destination *string `locationName:"destination" type:"string" enum:"FileModeTypeEnum"` + + // The file mode of a file in the source of a merge or pull request. + Source *string `locationName:"source" type:"string" enum:"FileModeTypeEnum"` } -// SetComments sets the Comments field's value. -func (s *CommentsForPullRequest) SetComments(v []*Comment) *CommentsForPullRequest { - s.Comments = v - return s +// String returns the string representation +func (s FileModes) String() string { + return awsutil.Prettify(s) } -// SetLocation sets the Location field's value. -func (s *CommentsForPullRequest) SetLocation(v *Location) *CommentsForPullRequest { - s.Location = v +// GoString returns the string representation +func (s FileModes) GoString() string { + return s.String() +} + +// SetBase sets the Base field's value. +func (s *FileModes) SetBase(v string) *FileModes { + s.Base = &v return s } -// SetPullRequestId sets the PullRequestId field's value. -func (s *CommentsForPullRequest) SetPullRequestId(v string) *CommentsForPullRequest { - s.PullRequestId = &v +// SetDestination sets the Destination field's value. +func (s *FileModes) SetDestination(v string) *FileModes { + s.Destination = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *CommentsForPullRequest) SetRepositoryName(v string) *CommentsForPullRequest { - s.RepositoryName = &v +// SetSource sets the Source field's value. +func (s *FileModes) SetSource(v string) *FileModes { + s.Source = &v return s } -// Returns information about a specific commit. -type Commit struct { +// Information about the size of files in a merge or pull request. +type FileSizes struct { _ struct{} `type:"structure"` - // Any additional data associated with the specified commit. - AdditionalData *string `locationName:"additionalData" type:"string"` - - // Information about the author of the specified commit. Information includes - // the date in timestamp format with GMT offset, the name of the author, and - // the email address for the author, as configured in Git. - Author *UserInfo `locationName:"author" type:"structure"` - - // The full SHA of the specified commit. - CommitId *string `locationName:"commitId" type:"string"` - - // Information about the person who committed the specified commit, also known - // as the committer. Information includes the date in timestamp format with - // GMT offset, the name of the committer, and the email address for the committer, - // as configured in Git. - // - // For more information about the difference between an author and a committer - // in Git, see Viewing the Commit History (http://git-scm.com/book/ch2-3.html) - // in Pro Git by Scott Chacon and Ben Straub. - Committer *UserInfo `locationName:"committer" type:"structure"` - - // The commit message associated with the specified commit. - Message *string `locationName:"message" type:"string"` + // The size of a file in the base of a merge or pull request. + Base *int64 `locationName:"base" type:"long"` - // A list of parent commits for the specified commit. Each parent commit ID - // is the full commit ID. - Parents []*string `locationName:"parents" type:"list"` + // The size of a file in the destination of a merge or pull request. + Destination *int64 `locationName:"destination" type:"long"` - // Tree information for the specified commit. - TreeId *string `locationName:"treeId" type:"string"` + // The size of a file in the source of a merge or pull request. + Source *int64 `locationName:"source" type:"long"` } // String returns the string representation -func (s Commit) String() string { +func (s FileSizes) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Commit) GoString() string { +func (s FileSizes) GoString() string { return s.String() } -// SetAdditionalData sets the AdditionalData field's value. -func (s *Commit) SetAdditionalData(v string) *Commit { - s.AdditionalData = &v +// SetBase sets the Base field's value. +func (s *FileSizes) SetBase(v int64) *FileSizes { + s.Base = &v return s } -// SetAuthor sets the Author field's value. -func (s *Commit) SetAuthor(v *UserInfo) *Commit { - s.Author = v +// SetDestination sets the Destination field's value. +func (s *FileSizes) SetDestination(v int64) *FileSizes { + s.Destination = &v return s } -// SetCommitId sets the CommitId field's value. -func (s *Commit) SetCommitId(v string) *Commit { - s.CommitId = &v +// SetSource sets the Source field's value. +func (s *FileSizes) SetSource(v int64) *FileSizes { + s.Source = &v return s } -// SetCommitter sets the Committer field's value. -func (s *Commit) SetCommitter(v *UserInfo) *Commit { - s.Committer = v - return s +// Returns information about a folder in a repository. +type Folder struct { + _ struct{} `type:"structure"` + + // The fully-qualified path of the folder in the repository. + AbsolutePath *string `locationName:"absolutePath" type:"string"` + + // The relative path of the specified folder from the folder where the query + // originated. + RelativePath *string `locationName:"relativePath" type:"string"` + + // The full SHA-1 pointer of the tree information for the commit that contains + // the folder. + TreeId *string `locationName:"treeId" type:"string"` } -// SetMessage sets the Message field's value. -func (s *Commit) SetMessage(v string) *Commit { - s.Message = &v +// String returns the string representation +func (s Folder) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Folder) GoString() string { + return s.String() +} + +// SetAbsolutePath sets the AbsolutePath field's value. +func (s *Folder) SetAbsolutePath(v string) *Folder { + s.AbsolutePath = &v return s } -// SetParents sets the Parents field's value. -func (s *Commit) SetParents(v []*string) *Commit { - s.Parents = v +// SetRelativePath sets the RelativePath field's value. +func (s *Folder) SetRelativePath(v string) *Folder { + s.RelativePath = &v return s } // SetTreeId sets the TreeId field's value. -func (s *Commit) SetTreeId(v string) *Commit { +func (s *Folder) SetTreeId(v string) *Folder { s.TreeId = &v return s } -// Represents the input of a create branch operation. -type CreateBranchInput struct { +// Represents the input of a get blob operation. +type GetBlobInput struct { _ struct{} `type:"structure"` - // The name of the new branch to create. - // - // BranchName is a required field - BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - - // The ID of the commit to point the new branch to. + // The ID of the blob, which is its SHA-1 pointer. // - // CommitId is a required field - CommitId *string `locationName:"commitId" type:"string" required:"true"` + // BlobId is a required field + BlobId *string `locationName:"blobId" type:"string" required:"true"` - // The name of the repository in which you want to create the new branch. + // The name of the repository that contains the blob. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateBranchInput) String() string { +func (s GetBlobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateBranchInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBranchInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBranchInput"} - if s.BranchName == nil { - invalidParams.Add(request.NewErrParamRequired("BranchName")) - } - if s.BranchName != nil && len(*s.BranchName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) - } - if s.CommitId == nil { - invalidParams.Add(request.NewErrParamRequired("CommitId")) +func (s GetBlobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBlobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBlobInput"} + if s.BlobId == nil { + invalidParams.Add(request.NewErrParamRequired("BlobId")) } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) @@ -5988,138 +10800,77 @@ func (s *CreateBranchInput) Validate() error { return nil } -// SetBranchName sets the BranchName field's value. -func (s *CreateBranchInput) SetBranchName(v string) *CreateBranchInput { - s.BranchName = &v - return s -} - -// SetCommitId sets the CommitId field's value. -func (s *CreateBranchInput) SetCommitId(v string) *CreateBranchInput { - s.CommitId = &v +// SetBlobId sets the BlobId field's value. +func (s *GetBlobInput) SetBlobId(v string) *GetBlobInput { + s.BlobId = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *CreateBranchInput) SetRepositoryName(v string) *CreateBranchInput { +func (s *GetBlobInput) SetRepositoryName(v string) *GetBlobInput { s.RepositoryName = &v return s } -type CreateBranchOutput struct { +// Represents the output of a get blob operation. +type GetBlobOutput struct { _ struct{} `type:"structure"` + + // The content of the blob, usually a file. + // + // Content is automatically base64 encoded/decoded by the SDK. + // + // Content is a required field + Content []byte `locationName:"content" type:"blob" required:"true"` } // String returns the string representation -func (s CreateBranchOutput) String() string { +func (s GetBlobOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateBranchOutput) GoString() string { +func (s GetBlobOutput) GoString() string { return s.String() } -type CreateCommitInput struct { - _ struct{} `type:"structure"` - - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. - AuthorName *string `locationName:"authorName" type:"string"` - - // The name of the branch where you will create the commit. - // - // BranchName is a required field - BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - - // The commit message you want to include as part of creating the commit. Commit - // messages are limited to 256 KB. If no message is specified, a default message - // will be used. - CommitMessage *string `locationName:"commitMessage" type:"string"` - - // The files to delete in this commit. These files will still exist in prior - // commits. - DeleteFiles []*DeleteFileEntry `locationName:"deleteFiles" type:"list"` - - // The email address of the person who created the commit. - Email *string `locationName:"email" type:"string"` - - // If the commit contains deletions, whether to keep a folder or folder structure - // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. - KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` - - // The ID of the commit that is the parent of the commit you will create. If - // this is an empty repository, this is not required. - ParentCommitId *string `locationName:"parentCommitId" type:"string"` +// SetContent sets the Content field's value. +func (s *GetBlobOutput) SetContent(v []byte) *GetBlobOutput { + s.Content = v + return s +} - // The files to add or update in this commit. - PutFiles []*PutFileEntry `locationName:"putFiles" type:"list"` +// Represents the input of a get branch operation. +type GetBranchInput struct { + _ struct{} `type:"structure"` - // The name of the repository where you will create the commit. - // - // RepositoryName is a required field - RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + // The name of the branch for which you want to retrieve information. + BranchName *string `locationName:"branchName" min:"1" type:"string"` - // The file modes to update for files in this commit. - SetFileModes []*SetFileModeEntry `locationName:"setFileModes" type:"list"` + // The name of the repository that contains the branch for which you want to + // retrieve information. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` } // String returns the string representation -func (s CreateCommitInput) String() string { +func (s GetBranchInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCommitInput) GoString() string { +func (s GetBranchInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCommitInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCommitInput"} - if s.BranchName == nil { - invalidParams.Add(request.NewErrParamRequired("BranchName")) - } +func (s *GetBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBranchInput"} if s.BranchName != nil && len(*s.BranchName) < 1 { invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) } - if s.RepositoryName == nil { - invalidParams.Add(request.NewErrParamRequired("RepositoryName")) - } if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) } - if s.DeleteFiles != nil { - for i, v := range s.DeleteFiles { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DeleteFiles", i), err.(request.ErrInvalidParams)) - } - } - } - if s.PutFiles != nil { - for i, v := range s.PutFiles { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PutFiles", i), err.(request.ErrInvalidParams)) - } - } - } - if s.SetFileModes != nil { - for i, v := range s.SetFileModes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SetFileModes", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -6127,184 +10878,152 @@ func (s *CreateCommitInput) Validate() error { return nil } -// SetAuthorName sets the AuthorName field's value. -func (s *CreateCommitInput) SetAuthorName(v string) *CreateCommitInput { - s.AuthorName = &v - return s -} - // SetBranchName sets the BranchName field's value. -func (s *CreateCommitInput) SetBranchName(v string) *CreateCommitInput { +func (s *GetBranchInput) SetBranchName(v string) *GetBranchInput { s.BranchName = &v return s } -// SetCommitMessage sets the CommitMessage field's value. -func (s *CreateCommitInput) SetCommitMessage(v string) *CreateCommitInput { - s.CommitMessage = &v - return s -} - -// SetDeleteFiles sets the DeleteFiles field's value. -func (s *CreateCommitInput) SetDeleteFiles(v []*DeleteFileEntry) *CreateCommitInput { - s.DeleteFiles = v - return s -} - -// SetEmail sets the Email field's value. -func (s *CreateCommitInput) SetEmail(v string) *CreateCommitInput { - s.Email = &v +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetBranchInput) SetRepositoryName(v string) *GetBranchInput { + s.RepositoryName = &v return s } -// SetKeepEmptyFolders sets the KeepEmptyFolders field's value. -func (s *CreateCommitInput) SetKeepEmptyFolders(v bool) *CreateCommitInput { - s.KeepEmptyFolders = &v - return s -} +// Represents the output of a get branch operation. +type GetBranchOutput struct { + _ struct{} `type:"structure"` -// SetParentCommitId sets the ParentCommitId field's value. -func (s *CreateCommitInput) SetParentCommitId(v string) *CreateCommitInput { - s.ParentCommitId = &v - return s + // The name of the branch. + Branch *BranchInfo `locationName:"branch" type:"structure"` } -// SetPutFiles sets the PutFiles field's value. -func (s *CreateCommitInput) SetPutFiles(v []*PutFileEntry) *CreateCommitInput { - s.PutFiles = v - return s +// String returns the string representation +func (s GetBranchOutput) String() string { + return awsutil.Prettify(s) } -// SetRepositoryName sets the RepositoryName field's value. -func (s *CreateCommitInput) SetRepositoryName(v string) *CreateCommitInput { - s.RepositoryName = &v - return s +// GoString returns the string representation +func (s GetBranchOutput) GoString() string { + return s.String() } -// SetSetFileModes sets the SetFileModes field's value. -func (s *CreateCommitInput) SetSetFileModes(v []*SetFileModeEntry) *CreateCommitInput { - s.SetFileModes = v +// SetBranch sets the Branch field's value. +func (s *GetBranchOutput) SetBranch(v *BranchInfo) *GetBranchOutput { + s.Branch = v return s } -type CreateCommitOutput struct { +type GetCommentInput struct { _ struct{} `type:"structure"` - // The full commit ID of the commit that contains your committed file changes. - CommitId *string `locationName:"commitId" type:"string"` - - // The files added as part of the committed file changes. - FilesAdded []*FileMetadata `locationName:"filesAdded" type:"list"` - - // The files deleted as part of the committed file changes. - FilesDeleted []*FileMetadata `locationName:"filesDeleted" type:"list"` - - // The files updated as part of the commited file changes. - FilesUpdated []*FileMetadata `locationName:"filesUpdated" type:"list"` - - // The full SHA-1 pointer of the tree information for the commit that contains - // the commited file changes. - TreeId *string `locationName:"treeId" type:"string"` + // The unique, system-generated ID of the comment. To get this ID, use GetCommentsForComparedCommit + // or GetCommentsForPullRequest. + // + // CommentId is a required field + CommentId *string `locationName:"commentId" type:"string" required:"true"` } // String returns the string representation -func (s CreateCommitOutput) String() string { +func (s GetCommentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCommitOutput) GoString() string { +func (s GetCommentInput) GoString() string { return s.String() } -// SetCommitId sets the CommitId field's value. -func (s *CreateCommitOutput) SetCommitId(v string) *CreateCommitOutput { - s.CommitId = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCommentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCommentInput"} + if s.CommentId == nil { + invalidParams.Add(request.NewErrParamRequired("CommentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetFilesAdded sets the FilesAdded field's value. -func (s *CreateCommitOutput) SetFilesAdded(v []*FileMetadata) *CreateCommitOutput { - s.FilesAdded = v +// SetCommentId sets the CommentId field's value. +func (s *GetCommentInput) SetCommentId(v string) *GetCommentInput { + s.CommentId = &v return s } -// SetFilesDeleted sets the FilesDeleted field's value. -func (s *CreateCommitOutput) SetFilesDeleted(v []*FileMetadata) *CreateCommitOutput { - s.FilesDeleted = v - return s +type GetCommentOutput struct { + _ struct{} `type:"structure"` + + // The contents of the comment. + Comment *Comment `locationName:"comment" type:"structure"` } -// SetFilesUpdated sets the FilesUpdated field's value. -func (s *CreateCommitOutput) SetFilesUpdated(v []*FileMetadata) *CreateCommitOutput { - s.FilesUpdated = v - return s +// String returns the string representation +func (s GetCommentOutput) String() string { + return awsutil.Prettify(s) } -// SetTreeId sets the TreeId field's value. -func (s *CreateCommitOutput) SetTreeId(v string) *CreateCommitOutput { - s.TreeId = &v +// GoString returns the string representation +func (s GetCommentOutput) GoString() string { + return s.String() +} + +// SetComment sets the Comment field's value. +func (s *GetCommentOutput) SetComment(v *Comment) *GetCommentOutput { + s.Comment = v return s } -type CreatePullRequestInput struct { +type GetCommentsForComparedCommitInput struct { _ struct{} `type:"structure"` - // A unique, client-generated idempotency token that when provided in a request, - // ensures the request cannot be repeated with a changed parameter. If a request - // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // To establish the directionality of the comparison, the full commit ID of + // the 'after' commit. // - // The AWS SDKs prepopulate client request tokens. If using an AWS SDK, you - // do not have to generate an idempotency token, as this will be done for you. - ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` + // AfterCommitId is a required field + AfterCommitId *string `locationName:"afterCommitId" type:"string" required:"true"` - // A description of the pull request. - Description *string `locationName:"description" type:"string"` + // To establish the directionality of the comparison, the full commit ID of + // the 'before' commit. + BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - // The targets for the pull request, including the source of the code to be - // reviewed (the source branch), and the destination where the creator of the - // pull request intends the code to be merged after the pull request is closed - // (the destination branch). - // - // Targets is a required field - Targets []*Target `locationName:"targets" type:"list" required:"true"` + // A non-negative integer used to limit the number of returned results. The + // default is 100 comments, and is configurable up to 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The title of the pull request. This title will be used to identify the pull - // request to other users in the repository. + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the repository where you want to compare commits. // - // Title is a required field - Title *string `locationName:"title" type:"string" required:"true"` + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreatePullRequestInput) String() string { +func (s GetCommentsForComparedCommitInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreatePullRequestInput) GoString() string { +func (s GetCommentsForComparedCommitInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePullRequestInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePullRequestInput"} - if s.Targets == nil { - invalidParams.Add(request.NewErrParamRequired("Targets")) +func (s *GetCommentsForComparedCommitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCommentsForComparedCommitInput"} + if s.AfterCommitId == nil { + invalidParams.Add(request.NewErrParamRequired("AfterCommitId")) } - if s.Title == nil { - invalidParams.Add(request.NewErrParamRequired("Title")) + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } - if s.Targets != nil { - for i, v := range s.Targets { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) - } - } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) } if invalidParams.Len() > 0 { @@ -6313,95 +11032,113 @@ func (s *CreatePullRequestInput) Validate() error { return nil } -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *CreatePullRequestInput) SetClientRequestToken(v string) *CreatePullRequestInput { - s.ClientRequestToken = &v +// SetAfterCommitId sets the AfterCommitId field's value. +func (s *GetCommentsForComparedCommitInput) SetAfterCommitId(v string) *GetCommentsForComparedCommitInput { + s.AfterCommitId = &v return s } -// SetDescription sets the Description field's value. -func (s *CreatePullRequestInput) SetDescription(v string) *CreatePullRequestInput { - s.Description = &v +// SetBeforeCommitId sets the BeforeCommitId field's value. +func (s *GetCommentsForComparedCommitInput) SetBeforeCommitId(v string) *GetCommentsForComparedCommitInput { + s.BeforeCommitId = &v return s } -// SetTargets sets the Targets field's value. -func (s *CreatePullRequestInput) SetTargets(v []*Target) *CreatePullRequestInput { - s.Targets = v +// SetMaxResults sets the MaxResults field's value. +func (s *GetCommentsForComparedCommitInput) SetMaxResults(v int64) *GetCommentsForComparedCommitInput { + s.MaxResults = &v return s } -// SetTitle sets the Title field's value. -func (s *CreatePullRequestInput) SetTitle(v string) *CreatePullRequestInput { - s.Title = &v +// SetNextToken sets the NextToken field's value. +func (s *GetCommentsForComparedCommitInput) SetNextToken(v string) *GetCommentsForComparedCommitInput { + s.NextToken = &v return s } -type CreatePullRequestOutput struct { +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetCommentsForComparedCommitInput) SetRepositoryName(v string) *GetCommentsForComparedCommitInput { + s.RepositoryName = &v + return s +} + +type GetCommentsForComparedCommitOutput struct { _ struct{} `type:"structure"` - // Information about the newly created pull request. - // - // PullRequest is a required field - PullRequest *PullRequest `locationName:"pullRequest" type:"structure" required:"true"` + // A list of comment objects on the compared commit. + CommentsForComparedCommitData []*CommentsForComparedCommit `locationName:"commentsForComparedCommitData" type:"list"` + + // An enumeration token that can be used in a request to return the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s CreatePullRequestOutput) String() string { +func (s GetCommentsForComparedCommitOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreatePullRequestOutput) GoString() string { +func (s GetCommentsForComparedCommitOutput) GoString() string { return s.String() } -// SetPullRequest sets the PullRequest field's value. -func (s *CreatePullRequestOutput) SetPullRequest(v *PullRequest) *CreatePullRequestOutput { - s.PullRequest = v +// SetCommentsForComparedCommitData sets the CommentsForComparedCommitData field's value. +func (s *GetCommentsForComparedCommitOutput) SetCommentsForComparedCommitData(v []*CommentsForComparedCommit) *GetCommentsForComparedCommitOutput { + s.CommentsForComparedCommitData = v return s } -// Represents the input of a create repository operation. -type CreateRepositoryInput struct { +// SetNextToken sets the NextToken field's value. +func (s *GetCommentsForComparedCommitOutput) SetNextToken(v string) *GetCommentsForComparedCommitOutput { + s.NextToken = &v + return s +} + +type GetCommentsForPullRequestInput struct { _ struct{} `type:"structure"` - // A comment or description about the new repository. - // - // The description field for a repository accepts all HTML characters and all - // valid Unicode characters. Applications that do not HTML-encode the description - // and display it in a web page could expose users to potentially malicious - // code. Make sure that you HTML-encode the description field in any application - // that uses this API to display the repository description on a web page. - RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + // The full commit ID of the commit in the source branch that was the tip of + // the branch at the time the comment was made. + AfterCommitId *string `locationName:"afterCommitId" type:"string"` - // The name of the new repository to be created. - // - // The repository name must be unique across the calling AWS account. In addition, - // repository names are limited to 100 alphanumeric, dash, and underscore characters, - // and cannot include certain characters. For a full description of the limits - // on repository names, see Limits (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) - // in the AWS CodeCommit User Guide. The suffix ".git" is prohibited. + // The full commit ID of the commit in the destination branch that was the tip + // of the branch at the time the pull request was created. + BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` + + // A non-negative integer used to limit the number of returned results. The + // default is 100 comments. You can return up to 500 comments with a single + // request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The system-generated ID of the pull request. To get this ID, use ListPullRequests. // - // RepositoryName is a required field - RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The name of the repository that contains the pull request. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` } // String returns the string representation -func (s CreateRepositoryInput) String() string { +func (s GetCommentsForPullRequestInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRepositoryInput) GoString() string { +func (s GetCommentsForPullRequestInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateRepositoryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateRepositoryInput"} - if s.RepositoryName == nil { - invalidParams.Add(request.NewErrParamRequired("RepositoryName")) +func (s *GetCommentsForPullRequestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCommentsForPullRequestInput"} + if s.PullRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("PullRequestId")) } if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) @@ -6413,75 +11150,105 @@ func (s *CreateRepositoryInput) Validate() error { return nil } -// SetRepositoryDescription sets the RepositoryDescription field's value. -func (s *CreateRepositoryInput) SetRepositoryDescription(v string) *CreateRepositoryInput { - s.RepositoryDescription = &v +// SetAfterCommitId sets the AfterCommitId field's value. +func (s *GetCommentsForPullRequestInput) SetAfterCommitId(v string) *GetCommentsForPullRequestInput { + s.AfterCommitId = &v + return s +} + +// SetBeforeCommitId sets the BeforeCommitId field's value. +func (s *GetCommentsForPullRequestInput) SetBeforeCommitId(v string) *GetCommentsForPullRequestInput { + s.BeforeCommitId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetCommentsForPullRequestInput) SetMaxResults(v int64) *GetCommentsForPullRequestInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetCommentsForPullRequestInput) SetNextToken(v string) *GetCommentsForPullRequestInput { + s.NextToken = &v + return s +} + +// SetPullRequestId sets the PullRequestId field's value. +func (s *GetCommentsForPullRequestInput) SetPullRequestId(v string) *GetCommentsForPullRequestInput { + s.PullRequestId = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *CreateRepositoryInput) SetRepositoryName(v string) *CreateRepositoryInput { +func (s *GetCommentsForPullRequestInput) SetRepositoryName(v string) *GetCommentsForPullRequestInput { s.RepositoryName = &v return s } -// Represents the output of a create repository operation. -type CreateRepositoryOutput struct { +type GetCommentsForPullRequestOutput struct { _ struct{} `type:"structure"` - // Information about the newly created repository. - RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` + // An array of comment objects on the pull request. + CommentsForPullRequestData []*CommentsForPullRequest `locationName:"commentsForPullRequestData" type:"list"` + + // An enumeration token that can be used in a request to return the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s CreateRepositoryOutput) String() string { +func (s GetCommentsForPullRequestOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRepositoryOutput) GoString() string { +func (s GetCommentsForPullRequestOutput) GoString() string { return s.String() } -// SetRepositoryMetadata sets the RepositoryMetadata field's value. -func (s *CreateRepositoryOutput) SetRepositoryMetadata(v *RepositoryMetadata) *CreateRepositoryOutput { - s.RepositoryMetadata = v +// SetCommentsForPullRequestData sets the CommentsForPullRequestData field's value. +func (s *GetCommentsForPullRequestOutput) SetCommentsForPullRequestData(v []*CommentsForPullRequest) *GetCommentsForPullRequestOutput { + s.CommentsForPullRequestData = v return s } -// Represents the input of a delete branch operation. -type DeleteBranchInput struct { +// SetNextToken sets the NextToken field's value. +func (s *GetCommentsForPullRequestOutput) SetNextToken(v string) *GetCommentsForPullRequestOutput { + s.NextToken = &v + return s +} + +// Represents the input of a get commit operation. +type GetCommitInput struct { _ struct{} `type:"structure"` - // The name of the branch to delete. - // - // BranchName is a required field - BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + // The commit ID. Commit IDs are the full SHA of the commit. + // + // CommitId is a required field + CommitId *string `locationName:"commitId" type:"string" required:"true"` - // The name of the repository that contains the branch to be deleted. + // The name of the repository to which the commit was made. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBranchInput) String() string { +func (s GetCommitInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBranchInput) GoString() string { +func (s GetCommitInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBranchInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBranchInput"} - if s.BranchName == nil { - invalidParams.Add(request.NewErrParamRequired("BranchName")) - } - if s.BranchName != nil && len(*s.BranchName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) +func (s *GetCommitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCommitInput"} + if s.CommitId == nil { + invalidParams.Add(request.NewErrParamRequired("CommitId")) } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) @@ -6496,68 +11263,105 @@ func (s *DeleteBranchInput) Validate() error { return nil } -// SetBranchName sets the BranchName field's value. -func (s *DeleteBranchInput) SetBranchName(v string) *DeleteBranchInput { - s.BranchName = &v +// SetCommitId sets the CommitId field's value. +func (s *GetCommitInput) SetCommitId(v string) *GetCommitInput { + s.CommitId = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *DeleteBranchInput) SetRepositoryName(v string) *DeleteBranchInput { +func (s *GetCommitInput) SetRepositoryName(v string) *GetCommitInput { s.RepositoryName = &v return s } -// Represents the output of a delete branch operation. -type DeleteBranchOutput struct { +// Represents the output of a get commit operation. +type GetCommitOutput struct { _ struct{} `type:"structure"` - // Information about the branch deleted by the operation, including the branch - // name and the commit ID that was the tip of the branch. - DeletedBranch *BranchInfo `locationName:"deletedBranch" type:"structure"` + // A commit data type object that contains information about the specified commit. + // + // Commit is a required field + Commit *Commit `locationName:"commit" type:"structure" required:"true"` } // String returns the string representation -func (s DeleteBranchOutput) String() string { +func (s GetCommitOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBranchOutput) GoString() string { +func (s GetCommitOutput) GoString() string { return s.String() } -// SetDeletedBranch sets the DeletedBranch field's value. -func (s *DeleteBranchOutput) SetDeletedBranch(v *BranchInfo) *DeleteBranchOutput { - s.DeletedBranch = v +// SetCommit sets the Commit field's value. +func (s *GetCommitOutput) SetCommit(v *Commit) *GetCommitOutput { + s.Commit = v return s } -type DeleteCommentContentInput struct { +type GetDifferencesInput struct { _ struct{} `type:"structure"` - // The unique, system-generated ID of the comment. To get this ID, use GetCommentsForComparedCommit - // or GetCommentsForPullRequest. + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. // - // CommentId is a required field - CommentId *string `locationName:"commentId" type:"string" required:"true"` + // AfterCommitSpecifier is a required field + AfterCommitSpecifier *string `locationName:"afterCommitSpecifier" type:"string" required:"true"` + + // The file path in which to check differences. Limits the results to this path. + // Can also be used to specify the changed name of a directory or folder, if + // it has changed. If not specified, differences will be shown for all paths. + AfterPath *string `locationName:"afterPath" type:"string"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, the full commit ID. Optional. If not specified, all + // changes prior to the afterCommitSpecifier value will be shown. If you do + // not use beforeCommitSpecifier in your request, consider limiting the results + // with maxResults. + BeforeCommitSpecifier *string `locationName:"beforeCommitSpecifier" type:"string"` + + // The file path in which to check for differences. Limits the results to this + // path. Can also be used to specify the previous name of a directory or folder. + // If beforePath and afterPath are not specified, differences will be shown + // for all paths. + BeforePath *string `locationName:"beforePath" type:"string"` + + // A non-negative integer used to limit the number of returned results. + MaxResults *int64 `type:"integer"` + + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `type:"string"` + + // The name of the repository where you want to get differences. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteCommentContentInput) String() string { +func (s GetDifferencesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCommentContentInput) GoString() string { +func (s GetDifferencesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCommentContentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCommentContentInput"} - if s.CommentId == nil { - invalidParams.Add(request.NewErrParamRequired("CommentId")) +func (s *GetDifferencesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDifferencesInput"} + if s.AfterCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("AfterCommitSpecifier")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) } if invalidParams.Len() > 0 { @@ -6566,150 +11370,120 @@ func (s *DeleteCommentContentInput) Validate() error { return nil } -// SetCommentId sets the CommentId field's value. -func (s *DeleteCommentContentInput) SetCommentId(v string) *DeleteCommentContentInput { - s.CommentId = &v +// SetAfterCommitSpecifier sets the AfterCommitSpecifier field's value. +func (s *GetDifferencesInput) SetAfterCommitSpecifier(v string) *GetDifferencesInput { + s.AfterCommitSpecifier = &v return s } -type DeleteCommentContentOutput struct { - _ struct{} `type:"structure"` +// SetAfterPath sets the AfterPath field's value. +func (s *GetDifferencesInput) SetAfterPath(v string) *GetDifferencesInput { + s.AfterPath = &v + return s +} - // Information about the comment you just deleted. - Comment *Comment `locationName:"comment" type:"structure"` +// SetBeforeCommitSpecifier sets the BeforeCommitSpecifier field's value. +func (s *GetDifferencesInput) SetBeforeCommitSpecifier(v string) *GetDifferencesInput { + s.BeforeCommitSpecifier = &v + return s } -// String returns the string representation -func (s DeleteCommentContentOutput) String() string { - return awsutil.Prettify(s) +// SetBeforePath sets the BeforePath field's value. +func (s *GetDifferencesInput) SetBeforePath(v string) *GetDifferencesInput { + s.BeforePath = &v + return s } -// GoString returns the string representation -func (s DeleteCommentContentOutput) GoString() string { - return s.String() +// SetMaxResults sets the MaxResults field's value. +func (s *GetDifferencesInput) SetMaxResults(v int64) *GetDifferencesInput { + s.MaxResults = &v + return s } -// SetComment sets the Comment field's value. -func (s *DeleteCommentContentOutput) SetComment(v *Comment) *DeleteCommentContentOutput { - s.Comment = v +// SetNextToken sets the NextToken field's value. +func (s *GetDifferencesInput) SetNextToken(v string) *GetDifferencesInput { + s.NextToken = &v return s } -// A file that will be deleted as part of a commit. -type DeleteFileEntry struct { +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetDifferencesInput) SetRepositoryName(v string) *GetDifferencesInput { + s.RepositoryName = &v + return s +} + +type GetDifferencesOutput struct { _ struct{} `type:"structure"` - // The full path of the file that will be deleted, including the name of the - // file. - // - // FilePath is a required field - FilePath *string `locationName:"filePath" type:"string" required:"true"` + // A differences data type object that contains information about the differences, + // including whether the difference is added, modified, or deleted (A, D, M). + Differences []*Difference `locationName:"differences" type:"list"` + + // An enumeration token that can be used in a request to return the next batch + // of the results. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteFileEntry) String() string { +func (s GetDifferencesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFileEntry) GoString() string { +func (s GetDifferencesOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFileEntry) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteFileEntry"} - if s.FilePath == nil { - invalidParams.Add(request.NewErrParamRequired("FilePath")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDifferences sets the Differences field's value. +func (s *GetDifferencesOutput) SetDifferences(v []*Difference) *GetDifferencesOutput { + s.Differences = v + return s } -// SetFilePath sets the FilePath field's value. -func (s *DeleteFileEntry) SetFilePath(v string) *DeleteFileEntry { - s.FilePath = &v +// SetNextToken sets the NextToken field's value. +func (s *GetDifferencesOutput) SetNextToken(v string) *GetDifferencesOutput { + s.NextToken = &v return s } -type DeleteFileInput struct { +type GetFileInput struct { _ struct{} `type:"structure"` - // The name of the branch where the commit will be made deleting the file. - // - // BranchName is a required field - BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - - // The commit message you want to include as part of deleting the file. Commit - // messages are limited to 256 KB. If no message is specified, a default message - // will be used. - CommitMessage *string `locationName:"commitMessage" type:"string"` - - // The email address for the commit that deletes the file. If no email address - // is specified, the email address will be left blank. - Email *string `locationName:"email" type:"string"` + // The fully-quaified reference that identifies the commit that contains the + // file. For example, you could specify a full commit ID, a tag, a branch name, + // or a reference such as refs/heads/master. If none is provided, then the head + // commit will be used. + CommitSpecifier *string `locationName:"commitSpecifier" type:"string"` - // The fully-qualified path to the file that will be deleted, including the - // full name and extension of that file. For example, /examples/file.md is a - // fully qualified path to a file named file.md in a folder named examples. + // The fully-qualified path to the file, including the full name and extension + // of the file. For example, /examples/file.md is the fully-qualified path to + // a file named file.md in a folder named examples. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` - // Specifies whether to delete the folder or directory that contains the file - // you want to delete if that file is the only object in the folder or directory. - // By default, empty folders will be deleted. This includes empty folders that - // are part of the directory structure. For example, if the path to a file is - // dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file - // in dir4 will also delete the empty folders dir4, dir3, and dir2. - KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` - - // The name of the author of the commit that deletes the file. If no name is - // specified, the user's ARN will be used as the author name and committer name. - Name *string `locationName:"name" type:"string"` - - // The ID of the commit that is the tip of the branch where you want to create - // the commit that will delete the file. This must be the HEAD commit for the - // branch. The commit that deletes the file will be created from this commit - // ID. - // - // ParentCommitId is a required field - ParentCommitId *string `locationName:"parentCommitId" type:"string" required:"true"` - - // The name of the repository that contains the file to delete. + // The name of the repository that contains the file. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteFileInput) String() string { +func (s GetFileInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation -func (s DeleteFileInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFileInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteFileInput"} - if s.BranchName == nil { - invalidParams.Add(request.NewErrParamRequired("BranchName")) - } - if s.BranchName != nil && len(*s.BranchName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) - } +// GoString returns the string representation +func (s GetFileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFileInput"} if s.FilePath == nil { invalidParams.Add(request.NewErrParamRequired("FilePath")) } - if s.ParentCommitId == nil { - invalidParams.Add(request.NewErrParamRequired("ParentCommitId")) - } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } @@ -6723,138 +11497,151 @@ func (s *DeleteFileInput) Validate() error { return nil } -// SetBranchName sets the BranchName field's value. -func (s *DeleteFileInput) SetBranchName(v string) *DeleteFileInput { - s.BranchName = &v - return s -} - -// SetCommitMessage sets the CommitMessage field's value. -func (s *DeleteFileInput) SetCommitMessage(v string) *DeleteFileInput { - s.CommitMessage = &v - return s -} - -// SetEmail sets the Email field's value. -func (s *DeleteFileInput) SetEmail(v string) *DeleteFileInput { - s.Email = &v +// SetCommitSpecifier sets the CommitSpecifier field's value. +func (s *GetFileInput) SetCommitSpecifier(v string) *GetFileInput { + s.CommitSpecifier = &v return s } // SetFilePath sets the FilePath field's value. -func (s *DeleteFileInput) SetFilePath(v string) *DeleteFileInput { +func (s *GetFileInput) SetFilePath(v string) *GetFileInput { s.FilePath = &v return s } -// SetKeepEmptyFolders sets the KeepEmptyFolders field's value. -func (s *DeleteFileInput) SetKeepEmptyFolders(v bool) *DeleteFileInput { - s.KeepEmptyFolders = &v - return s -} - -// SetName sets the Name field's value. -func (s *DeleteFileInput) SetName(v string) *DeleteFileInput { - s.Name = &v - return s -} - -// SetParentCommitId sets the ParentCommitId field's value. -func (s *DeleteFileInput) SetParentCommitId(v string) *DeleteFileInput { - s.ParentCommitId = &v - return s -} - // SetRepositoryName sets the RepositoryName field's value. -func (s *DeleteFileInput) SetRepositoryName(v string) *DeleteFileInput { +func (s *GetFileInput) SetRepositoryName(v string) *GetFileInput { s.RepositoryName = &v return s } -type DeleteFileOutput struct { +type GetFileOutput struct { _ struct{} `type:"structure"` - // The blob ID removed from the tree as part of deleting the file. + // The blob ID of the object that represents the file content. // // BlobId is a required field BlobId *string `locationName:"blobId" type:"string" required:"true"` - // The full commit ID of the commit that contains the change that deletes the - // file. + // The full commit ID of the commit that contains the content returned by GetFile. // // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` - // The fully-qualified path to the file that will be deleted, including the - // full name and extension of that file. + // The base-64 encoded binary data object that represents the content of the + // file. + // + // FileContent is automatically base64 encoded/decoded by the SDK. + // + // FileContent is a required field + FileContent []byte `locationName:"fileContent" type:"blob" required:"true"` + + // The extrapolated file mode permissions of the blob. Valid values include + // strings such as EXECUTABLE and not numeric values. + // + // The file mode permissions returned by this API are not the standard file + // mode permission values, such as 100644, but rather extrapolated values. See + // below for a full list of supported return values. + // + // FileMode is a required field + FileMode *string `locationName:"fileMode" type:"string" required:"true" enum:"FileModeTypeEnum"` + + // The fully qualified path to the specified file. This returns the name and + // extension of the file. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` - // The full SHA-1 pointer of the tree information for the commit that contains - // the delete file change. + // The size of the contents of the file, in bytes. // - // TreeId is a required field - TreeId *string `locationName:"treeId" type:"string" required:"true"` + // FileSize is a required field + FileSize *int64 `locationName:"fileSize" type:"long" required:"true"` } // String returns the string representation -func (s DeleteFileOutput) String() string { +func (s GetFileOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFileOutput) GoString() string { +func (s GetFileOutput) GoString() string { return s.String() } // SetBlobId sets the BlobId field's value. -func (s *DeleteFileOutput) SetBlobId(v string) *DeleteFileOutput { +func (s *GetFileOutput) SetBlobId(v string) *GetFileOutput { s.BlobId = &v return s } // SetCommitId sets the CommitId field's value. -func (s *DeleteFileOutput) SetCommitId(v string) *DeleteFileOutput { +func (s *GetFileOutput) SetCommitId(v string) *GetFileOutput { s.CommitId = &v return s } +// SetFileContent sets the FileContent field's value. +func (s *GetFileOutput) SetFileContent(v []byte) *GetFileOutput { + s.FileContent = v + return s +} + +// SetFileMode sets the FileMode field's value. +func (s *GetFileOutput) SetFileMode(v string) *GetFileOutput { + s.FileMode = &v + return s +} + // SetFilePath sets the FilePath field's value. -func (s *DeleteFileOutput) SetFilePath(v string) *DeleteFileOutput { +func (s *GetFileOutput) SetFilePath(v string) *GetFileOutput { s.FilePath = &v return s } -// SetTreeId sets the TreeId field's value. -func (s *DeleteFileOutput) SetTreeId(v string) *DeleteFileOutput { - s.TreeId = &v +// SetFileSize sets the FileSize field's value. +func (s *GetFileOutput) SetFileSize(v int64) *GetFileOutput { + s.FileSize = &v return s } -// Represents the input of a delete repository operation. -type DeleteRepositoryInput struct { +type GetFolderInput struct { _ struct{} `type:"structure"` - // The name of the repository to delete. + // A fully-qualified reference used to identify a commit that contains the version + // of the folder's content to return. A fully-qualified reference can be a commit + // ID, branch name, tag, or reference such as HEAD. If no specifier is provided, + // the folder content will be returned as it exists in the HEAD commit. + CommitSpecifier *string `locationName:"commitSpecifier" type:"string"` + + // The fully-qualified path to the folder whose contents will be returned, including + // the folder name. For example, /examples is a fully-qualified path to a folder + // named examples that was created off of the root directory (/) of a repository. + // + // FolderPath is a required field + FolderPath *string `locationName:"folderPath" type:"string" required:"true"` + + // The name of the repository. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteRepositoryInput) String() string { +func (s GetFolderInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRepositoryInput) GoString() string { +func (s GetFolderInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRepositoryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryInput"} +func (s *GetFolderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFolderInput"} + if s.FolderPath == nil { + invalidParams.Add(request.NewErrParamRequired("FolderPath")) + } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } @@ -6868,77 +11655,167 @@ func (s *DeleteRepositoryInput) Validate() error { return nil } +// SetCommitSpecifier sets the CommitSpecifier field's value. +func (s *GetFolderInput) SetCommitSpecifier(v string) *GetFolderInput { + s.CommitSpecifier = &v + return s +} + +// SetFolderPath sets the FolderPath field's value. +func (s *GetFolderInput) SetFolderPath(v string) *GetFolderInput { + s.FolderPath = &v + return s +} + // SetRepositoryName sets the RepositoryName field's value. -func (s *DeleteRepositoryInput) SetRepositoryName(v string) *DeleteRepositoryInput { +func (s *GetFolderInput) SetRepositoryName(v string) *GetFolderInput { s.RepositoryName = &v return s } -// Represents the output of a delete repository operation. -type DeleteRepositoryOutput struct { +type GetFolderOutput struct { _ struct{} `type:"structure"` - // The ID of the repository that was deleted. - RepositoryId *string `locationName:"repositoryId" type:"string"` + // The full commit ID used as a reference for which version of the folder content + // is returned. + // + // CommitId is a required field + CommitId *string `locationName:"commitId" type:"string" required:"true"` + + // The list of files that exist in the specified folder, if any. + Files []*File `locationName:"files" type:"list"` + + // The fully-qualified path of the folder whose contents are returned. + // + // FolderPath is a required field + FolderPath *string `locationName:"folderPath" type:"string" required:"true"` + + // The list of folders that exist beneath the specified folder, if any. + SubFolders []*Folder `locationName:"subFolders" type:"list"` + + // The list of submodules that exist in the specified folder, if any. + SubModules []*SubModule `locationName:"subModules" type:"list"` + + // The list of symbolic links to other files and folders that exist in the specified + // folder, if any. + SymbolicLinks []*SymbolicLink `locationName:"symbolicLinks" type:"list"` + + // The full SHA-1 pointer of the tree information for the commit that contains + // the folder. + TreeId *string `locationName:"treeId" type:"string"` } // String returns the string representation -func (s DeleteRepositoryOutput) String() string { +func (s GetFolderOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRepositoryOutput) GoString() string { +func (s GetFolderOutput) GoString() string { return s.String() } -// SetRepositoryId sets the RepositoryId field's value. -func (s *DeleteRepositoryOutput) SetRepositoryId(v string) *DeleteRepositoryOutput { - s.RepositoryId = &v +// SetCommitId sets the CommitId field's value. +func (s *GetFolderOutput) SetCommitId(v string) *GetFolderOutput { + s.CommitId = &v return s } -type DescribePullRequestEventsInput struct { - _ struct{} `type:"structure"` +// SetFiles sets the Files field's value. +func (s *GetFolderOutput) SetFiles(v []*File) *GetFolderOutput { + s.Files = v + return s +} - // The Amazon Resource Name (ARN) of the user whose actions resulted in the - // event. Examples include updating the pull request with additional commits - // or changing the status of a pull request. - ActorArn *string `locationName:"actorArn" type:"string"` +// SetFolderPath sets the FolderPath field's value. +func (s *GetFolderOutput) SetFolderPath(v string) *GetFolderOutput { + s.FolderPath = &v + return s +} - // A non-negative integer used to limit the number of returned results. The - // default is 100 events, which is also the maximum number of events that can - // be returned in a result. - MaxResults *int64 `locationName:"maxResults" type:"integer"` +// SetSubFolders sets the SubFolders field's value. +func (s *GetFolderOutput) SetSubFolders(v []*Folder) *GetFolderOutput { + s.SubFolders = v + return s +} - // An enumeration token that when provided in a request, returns the next batch - // of the results. - NextToken *string `locationName:"nextToken" type:"string"` +// SetSubModules sets the SubModules field's value. +func (s *GetFolderOutput) SetSubModules(v []*SubModule) *GetFolderOutput { + s.SubModules = v + return s +} - // Optional. The pull request event type about which you want to return information. - PullRequestEventType *string `locationName:"pullRequestEventType" type:"string" enum:"PullRequestEventType"` +// SetSymbolicLinks sets the SymbolicLinks field's value. +func (s *GetFolderOutput) SetSymbolicLinks(v []*SymbolicLink) *GetFolderOutput { + s.SymbolicLinks = v + return s +} + +// SetTreeId sets the TreeId field's value. +func (s *GetFolderOutput) SetTreeId(v string) *GetFolderOutput { + s.TreeId = &v + return s +} + +type GetMergeCommitInput struct { + _ struct{} `type:"structure"` + + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` + + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // DestinationCommitSpecifier is a required field + DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` - // The system-generated ID of the pull request. To get this ID, use ListPullRequests. + // The name of the repository that contains the merge commit about which you + // want to get information. // - // PullRequestId is a required field - PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // SourceCommitSpecifier is a required field + SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` } // String returns the string representation -func (s DescribePullRequestEventsInput) String() string { +func (s GetMergeCommitInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribePullRequestEventsInput) GoString() string { +func (s GetMergeCommitInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribePullRequestEventsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribePullRequestEventsInput"} - if s.PullRequestId == nil { - invalidParams.Add(request.NewErrParamRequired("PullRequestId")) +func (s *GetMergeCommitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMergeCommitInput"} + if s.DestinationCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCommitSpecifier")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + if s.SourceCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCommitSpecifier")) } if invalidParams.Len() > 0 { @@ -6947,286 +11824,348 @@ func (s *DescribePullRequestEventsInput) Validate() error { return nil } -// SetActorArn sets the ActorArn field's value. -func (s *DescribePullRequestEventsInput) SetActorArn(v string) *DescribePullRequestEventsInput { - s.ActorArn = &v +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *GetMergeCommitInput) SetConflictDetailLevel(v string) *GetMergeCommitInput { + s.ConflictDetailLevel = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *DescribePullRequestEventsInput) SetMaxResults(v int64) *DescribePullRequestEventsInput { - s.MaxResults = &v +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *GetMergeCommitInput) SetConflictResolutionStrategy(v string) *GetMergeCommitInput { + s.ConflictResolutionStrategy = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribePullRequestEventsInput) SetNextToken(v string) *DescribePullRequestEventsInput { - s.NextToken = &v +// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. +func (s *GetMergeCommitInput) SetDestinationCommitSpecifier(v string) *GetMergeCommitInput { + s.DestinationCommitSpecifier = &v return s } -// SetPullRequestEventType sets the PullRequestEventType field's value. -func (s *DescribePullRequestEventsInput) SetPullRequestEventType(v string) *DescribePullRequestEventsInput { - s.PullRequestEventType = &v +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetMergeCommitInput) SetRepositoryName(v string) *GetMergeCommitInput { + s.RepositoryName = &v return s } -// SetPullRequestId sets the PullRequestId field's value. -func (s *DescribePullRequestEventsInput) SetPullRequestId(v string) *DescribePullRequestEventsInput { - s.PullRequestId = &v +// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. +func (s *GetMergeCommitInput) SetSourceCommitSpecifier(v string) *GetMergeCommitInput { + s.SourceCommitSpecifier = &v return s } -type DescribePullRequestEventsOutput struct { +type GetMergeCommitOutput struct { _ struct{} `type:"structure"` - // An enumeration token that can be used in a request to return the next batch - // of the results. - NextToken *string `locationName:"nextToken" type:"string"` + // The commit ID of the merge base. + BaseCommitId *string `locationName:"baseCommitId" type:"string"` - // Information about the pull request events. - // - // PullRequestEvents is a required field - PullRequestEvents []*PullRequestEvent `locationName:"pullRequestEvents" type:"list" required:"true"` + // The commit ID of the destination commit specifier that was used in the merge + // evaluation. + DestinationCommitId *string `locationName:"destinationCommitId" type:"string"` + + // The commit ID for the merge commit created when the source branch was merged + // into the destination branch. If the fast-forward merge strategy was used, + // no merge commit exists. + MergedCommitId *string `locationName:"mergedCommitId" type:"string"` + + // The commit ID of the source commit specifier that was used in the merge evaluation. + SourceCommitId *string `locationName:"sourceCommitId" type:"string"` } // String returns the string representation -func (s DescribePullRequestEventsOutput) String() string { +func (s GetMergeCommitOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribePullRequestEventsOutput) GoString() string { +func (s GetMergeCommitOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *DescribePullRequestEventsOutput) SetNextToken(v string) *DescribePullRequestEventsOutput { - s.NextToken = &v +// SetBaseCommitId sets the BaseCommitId field's value. +func (s *GetMergeCommitOutput) SetBaseCommitId(v string) *GetMergeCommitOutput { + s.BaseCommitId = &v return s } -// SetPullRequestEvents sets the PullRequestEvents field's value. -func (s *DescribePullRequestEventsOutput) SetPullRequestEvents(v []*PullRequestEvent) *DescribePullRequestEventsOutput { - s.PullRequestEvents = v +// SetDestinationCommitId sets the DestinationCommitId field's value. +func (s *GetMergeCommitOutput) SetDestinationCommitId(v string) *GetMergeCommitOutput { + s.DestinationCommitId = &v return s } -// Returns information about a set of differences for a commit specifier. -type Difference struct { - _ struct{} `type:"structure"` - - // Information about an afterBlob data type object, including the ID, the file - // mode permission code, and the path. - AfterBlob *BlobMetadata `locationName:"afterBlob" type:"structure"` - - // Information about a beforeBlob data type object, including the ID, the file - // mode permission code, and the path. - BeforeBlob *BlobMetadata `locationName:"beforeBlob" type:"structure"` - - // Whether the change type of the difference is an addition (A), deletion (D), - // or modification (M). - ChangeType *string `locationName:"changeType" type:"string" enum:"ChangeTypeEnum"` +// SetMergedCommitId sets the MergedCommitId field's value. +func (s *GetMergeCommitOutput) SetMergedCommitId(v string) *GetMergeCommitOutput { + s.MergedCommitId = &v + return s } -// String returns the string representation -func (s Difference) String() string { - return awsutil.Prettify(s) +// SetSourceCommitId sets the SourceCommitId field's value. +func (s *GetMergeCommitOutput) SetSourceCommitId(v string) *GetMergeCommitOutput { + s.SourceCommitId = &v + return s } -// GoString returns the string representation -func (s Difference) GoString() string { - return s.String() -} +type GetMergeConflictsInput struct { + _ struct{} `type:"structure"` -// SetAfterBlob sets the AfterBlob field's value. -func (s *Difference) SetAfterBlob(v *BlobMetadata) *Difference { - s.AfterBlob = v - return s -} + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` -// SetBeforeBlob sets the BeforeBlob field's value. -func (s *Difference) SetBeforeBlob(v *BlobMetadata) *Difference { - s.BeforeBlob = v - return s -} + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` -// SetChangeType sets the ChangeType field's value. -func (s *Difference) SetChangeType(v string) *Difference { - s.ChangeType = &v - return s -} + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // DestinationCommitSpecifier is a required field + DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` -// Returns information about a file in a repository. -type File struct { - _ struct{} `type:"structure"` + // The maximum number of files to include in the output. + MaxConflictFiles *int64 `locationName:"maxConflictFiles" type:"integer"` - // The fully-qualified path to the file in the repository. - AbsolutePath *string `locationName:"absolutePath" type:"string"` + // The merge option or strategy you want to use to merge the code. + // + // MergeOption is a required field + MergeOption *string `locationName:"mergeOption" type:"string" required:"true" enum:"MergeOptionTypeEnum"` - // The blob ID that contains the file information. - BlobId *string `locationName:"blobId" type:"string"` + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` - // The extrapolated file mode permissions for the file. Valid values include - // EXECUTABLE and NORMAL. - FileMode *string `locationName:"fileMode" type:"string" enum:"FileModeTypeEnum"` + // The name of the repository where the pull request was created. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` - // The relative path of the file from the folder where the query originated. - RelativePath *string `locationName:"relativePath" type:"string"` + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // SourceCommitSpecifier is a required field + SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` } // String returns the string representation -func (s File) String() string { +func (s GetMergeConflictsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s File) GoString() string { +func (s GetMergeConflictsInput) GoString() string { return s.String() } -// SetAbsolutePath sets the AbsolutePath field's value. -func (s *File) SetAbsolutePath(v string) *File { - s.AbsolutePath = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMergeConflictsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMergeConflictsInput"} + if s.DestinationCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCommitSpecifier")) + } + if s.MergeOption == nil { + invalidParams.Add(request.NewErrParamRequired("MergeOption")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + if s.SourceCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCommitSpecifier")) + } -// SetBlobId sets the BlobId field's value. -func (s *File) SetBlobId(v string) *File { - s.BlobId = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetFileMode sets the FileMode field's value. -func (s *File) SetFileMode(v string) *File { - s.FileMode = &v +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *GetMergeConflictsInput) SetConflictDetailLevel(v string) *GetMergeConflictsInput { + s.ConflictDetailLevel = &v return s } -// SetRelativePath sets the RelativePath field's value. -func (s *File) SetRelativePath(v string) *File { - s.RelativePath = &v +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *GetMergeConflictsInput) SetConflictResolutionStrategy(v string) *GetMergeConflictsInput { + s.ConflictResolutionStrategy = &v return s } -// A file that will be added, updated, or deleted as part of a commit. -type FileMetadata struct { - _ struct{} `type:"structure"` - - // The full path to the file that will be added or updated, including the name - // of the file. - AbsolutePath *string `locationName:"absolutePath" type:"string"` - - // The blob ID that contains the file information. - BlobId *string `locationName:"blobId" type:"string"` - - // The extrapolated file mode permissions for the file. Valid values include - // EXECUTABLE and NORMAL. - FileMode *string `locationName:"fileMode" type:"string" enum:"FileModeTypeEnum"` +// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. +func (s *GetMergeConflictsInput) SetDestinationCommitSpecifier(v string) *GetMergeConflictsInput { + s.DestinationCommitSpecifier = &v + return s } -// String returns the string representation -func (s FileMetadata) String() string { - return awsutil.Prettify(s) +// SetMaxConflictFiles sets the MaxConflictFiles field's value. +func (s *GetMergeConflictsInput) SetMaxConflictFiles(v int64) *GetMergeConflictsInput { + s.MaxConflictFiles = &v + return s } -// GoString returns the string representation -func (s FileMetadata) GoString() string { - return s.String() +// SetMergeOption sets the MergeOption field's value. +func (s *GetMergeConflictsInput) SetMergeOption(v string) *GetMergeConflictsInput { + s.MergeOption = &v + return s } -// SetAbsolutePath sets the AbsolutePath field's value. -func (s *FileMetadata) SetAbsolutePath(v string) *FileMetadata { - s.AbsolutePath = &v +// SetNextToken sets the NextToken field's value. +func (s *GetMergeConflictsInput) SetNextToken(v string) *GetMergeConflictsInput { + s.NextToken = &v return s } -// SetBlobId sets the BlobId field's value. -func (s *FileMetadata) SetBlobId(v string) *FileMetadata { - s.BlobId = &v +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetMergeConflictsInput) SetRepositoryName(v string) *GetMergeConflictsInput { + s.RepositoryName = &v return s } -// SetFileMode sets the FileMode field's value. -func (s *FileMetadata) SetFileMode(v string) *FileMetadata { - s.FileMode = &v +// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. +func (s *GetMergeConflictsInput) SetSourceCommitSpecifier(v string) *GetMergeConflictsInput { + s.SourceCommitSpecifier = &v return s } -// Returns information about a folder in a repository. -type Folder struct { +type GetMergeConflictsOutput struct { _ struct{} `type:"structure"` - // The fully-qualified path of the folder in the repository. - AbsolutePath *string `locationName:"absolutePath" type:"string"` + // The commit ID of the merge base. + BaseCommitId *string `locationName:"baseCommitId" type:"string"` - // The relative path of the specified folder from the folder where the query - // originated. - RelativePath *string `locationName:"relativePath" type:"string"` + // A list of metadata for any conflicting files. If the specified merge strategy + // is FAST_FORWARD_MERGE, this list will always be empty. + // + // ConflictMetadataList is a required field + ConflictMetadataList []*ConflictMetadata `locationName:"conflictMetadataList" type:"list" required:"true"` + + // The commit ID of the destination commit specifier that was used in the merge + // evaluation. + // + // DestinationCommitId is a required field + DestinationCommitId *string `locationName:"destinationCommitId" type:"string" required:"true"` + + // A Boolean value that indicates whether the code is mergeable by the specified + // merge option. + // + // Mergeable is a required field + Mergeable *bool `locationName:"mergeable" type:"boolean" required:"true"` - // The full SHA-1 pointer of the tree information for the commit that contains - // the folder. - TreeId *string `locationName:"treeId" type:"string"` + // An enumeration token that can be used in a request to return the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The commit ID of the source commit specifier that was used in the merge evaluation. + // + // SourceCommitId is a required field + SourceCommitId *string `locationName:"sourceCommitId" type:"string" required:"true"` } // String returns the string representation -func (s Folder) String() string { +func (s GetMergeConflictsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Folder) GoString() string { +func (s GetMergeConflictsOutput) GoString() string { return s.String() } -// SetAbsolutePath sets the AbsolutePath field's value. -func (s *Folder) SetAbsolutePath(v string) *Folder { - s.AbsolutePath = &v +// SetBaseCommitId sets the BaseCommitId field's value. +func (s *GetMergeConflictsOutput) SetBaseCommitId(v string) *GetMergeConflictsOutput { + s.BaseCommitId = &v return s } -// SetRelativePath sets the RelativePath field's value. -func (s *Folder) SetRelativePath(v string) *Folder { - s.RelativePath = &v +// SetConflictMetadataList sets the ConflictMetadataList field's value. +func (s *GetMergeConflictsOutput) SetConflictMetadataList(v []*ConflictMetadata) *GetMergeConflictsOutput { + s.ConflictMetadataList = v return s } -// SetTreeId sets the TreeId field's value. -func (s *Folder) SetTreeId(v string) *Folder { - s.TreeId = &v +// SetDestinationCommitId sets the DestinationCommitId field's value. +func (s *GetMergeConflictsOutput) SetDestinationCommitId(v string) *GetMergeConflictsOutput { + s.DestinationCommitId = &v return s } -// Represents the input of a get blob operation. -type GetBlobInput struct { +// SetMergeable sets the Mergeable field's value. +func (s *GetMergeConflictsOutput) SetMergeable(v bool) *GetMergeConflictsOutput { + s.Mergeable = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetMergeConflictsOutput) SetNextToken(v string) *GetMergeConflictsOutput { + s.NextToken = &v + return s +} + +// SetSourceCommitId sets the SourceCommitId field's value. +func (s *GetMergeConflictsOutput) SetSourceCommitId(v string) *GetMergeConflictsOutput { + s.SourceCommitId = &v + return s +} + +type GetMergeOptionsInput struct { _ struct{} `type:"structure"` - // The ID of the blob, which is its SHA-1 pointer. + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` + + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. // - // BlobId is a required field - BlobId *string `locationName:"blobId" type:"string" required:"true"` + // DestinationCommitSpecifier is a required field + DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` - // The name of the repository that contains the blob. + // The name of the repository that contains the commits about which you want + // to get merge options. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // SourceCommitSpecifier is a required field + SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` } // String returns the string representation -func (s GetBlobInput) String() string { +func (s GetMergeOptionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBlobInput) GoString() string { +func (s GetMergeOptionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBlobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBlobInput"} - if s.BlobId == nil { - invalidParams.Add(request.NewErrParamRequired("BlobId")) +func (s *GetMergeOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMergeOptionsInput"} + if s.DestinationCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCommitSpecifier")) } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) @@ -7234,6 +12173,9 @@ func (s *GetBlobInput) Validate() error { if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) } + if s.SourceCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCommitSpecifier")) + } if invalidParams.Len() > 0 { return invalidParams @@ -7241,145 +12183,119 @@ func (s *GetBlobInput) Validate() error { return nil } -// SetBlobId sets the BlobId field's value. -func (s *GetBlobInput) SetBlobId(v string) *GetBlobInput { - s.BlobId = &v +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *GetMergeOptionsInput) SetConflictDetailLevel(v string) *GetMergeOptionsInput { + s.ConflictDetailLevel = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *GetBlobInput) SetRepositoryName(v string) *GetBlobInput { - s.RepositoryName = &v +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *GetMergeOptionsInput) SetConflictResolutionStrategy(v string) *GetMergeOptionsInput { + s.ConflictResolutionStrategy = &v return s } -// Represents the output of a get blob operation. -type GetBlobOutput struct { - _ struct{} `type:"structure"` - - // The content of the blob, usually a file. - // - // Content is automatically base64 encoded/decoded by the SDK. - // - // Content is a required field - Content []byte `locationName:"content" type:"blob" required:"true"` -} - -// String returns the string representation -func (s GetBlobOutput) String() string { - return awsutil.Prettify(s) +// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. +func (s *GetMergeOptionsInput) SetDestinationCommitSpecifier(v string) *GetMergeOptionsInput { + s.DestinationCommitSpecifier = &v + return s } -// GoString returns the string representation -func (s GetBlobOutput) GoString() string { - return s.String() +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetMergeOptionsInput) SetRepositoryName(v string) *GetMergeOptionsInput { + s.RepositoryName = &v + return s } -// SetContent sets the Content field's value. -func (s *GetBlobOutput) SetContent(v []byte) *GetBlobOutput { - s.Content = v +// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. +func (s *GetMergeOptionsInput) SetSourceCommitSpecifier(v string) *GetMergeOptionsInput { + s.SourceCommitSpecifier = &v return s } -// Represents the input of a get branch operation. -type GetBranchInput struct { +type GetMergeOptionsOutput struct { _ struct{} `type:"structure"` - // The name of the branch for which you want to retrieve information. - BranchName *string `locationName:"branchName" min:"1" type:"string"` + // The commit ID of the merge base. + // + // BaseCommitId is a required field + BaseCommitId *string `locationName:"baseCommitId" type:"string" required:"true"` - // The name of the repository that contains the branch for which you want to - // retrieve information. - RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` + // The commit ID of the destination commit specifier that was used in the merge + // evaluation. + // + // DestinationCommitId is a required field + DestinationCommitId *string `locationName:"destinationCommitId" type:"string" required:"true"` + + // The merge option or strategy used to merge the code. + // + // MergeOptions is a required field + MergeOptions []*string `locationName:"mergeOptions" type:"list" required:"true"` + + // The commit ID of the source commit specifier that was used in the merge evaluation. + // + // SourceCommitId is a required field + SourceCommitId *string `locationName:"sourceCommitId" type:"string" required:"true"` } // String returns the string representation -func (s GetBranchInput) String() string { +func (s GetMergeOptionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBranchInput) GoString() string { +func (s GetMergeOptionsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBranchInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBranchInput"} - if s.BranchName != nil && len(*s.BranchName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) - } - if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBranchName sets the BranchName field's value. -func (s *GetBranchInput) SetBranchName(v string) *GetBranchInput { - s.BranchName = &v +// SetBaseCommitId sets the BaseCommitId field's value. +func (s *GetMergeOptionsOutput) SetBaseCommitId(v string) *GetMergeOptionsOutput { + s.BaseCommitId = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *GetBranchInput) SetRepositoryName(v string) *GetBranchInput { - s.RepositoryName = &v +// SetDestinationCommitId sets the DestinationCommitId field's value. +func (s *GetMergeOptionsOutput) SetDestinationCommitId(v string) *GetMergeOptionsOutput { + s.DestinationCommitId = &v return s } -// Represents the output of a get branch operation. -type GetBranchOutput struct { - _ struct{} `type:"structure"` - - // The name of the branch. - Branch *BranchInfo `locationName:"branch" type:"structure"` -} - -// String returns the string representation -func (s GetBranchOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBranchOutput) GoString() string { - return s.String() +// SetMergeOptions sets the MergeOptions field's value. +func (s *GetMergeOptionsOutput) SetMergeOptions(v []*string) *GetMergeOptionsOutput { + s.MergeOptions = v + return s } -// SetBranch sets the Branch field's value. -func (s *GetBranchOutput) SetBranch(v *BranchInfo) *GetBranchOutput { - s.Branch = v +// SetSourceCommitId sets the SourceCommitId field's value. +func (s *GetMergeOptionsOutput) SetSourceCommitId(v string) *GetMergeOptionsOutput { + s.SourceCommitId = &v return s } -type GetCommentInput struct { +type GetPullRequestInput struct { _ struct{} `type:"structure"` - // The unique, system-generated ID of the comment. To get this ID, use GetCommentsForComparedCommit - // or GetCommentsForPullRequest. + // The system-generated ID of the pull request. To get this ID, use ListPullRequests. // - // CommentId is a required field - CommentId *string `locationName:"commentId" type:"string" required:"true"` + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` } // String returns the string representation -func (s GetCommentInput) String() string { +func (s GetPullRequestInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCommentInput) GoString() string { +func (s GetPullRequestInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCommentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCommentInput"} - if s.CommentId == nil { - invalidParams.Add(request.NewErrParamRequired("CommentId")) +func (s *GetPullRequestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPullRequestInput"} + if s.PullRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("PullRequestId")) } if invalidParams.Len() > 0 { @@ -7388,78 +12304,60 @@ func (s *GetCommentInput) Validate() error { return nil } -// SetCommentId sets the CommentId field's value. -func (s *GetCommentInput) SetCommentId(v string) *GetCommentInput { - s.CommentId = &v +// SetPullRequestId sets the PullRequestId field's value. +func (s *GetPullRequestInput) SetPullRequestId(v string) *GetPullRequestInput { + s.PullRequestId = &v return s } -type GetCommentOutput struct { +type GetPullRequestOutput struct { _ struct{} `type:"structure"` - // The contents of the comment. - Comment *Comment `locationName:"comment" type:"structure"` + // Information about the specified pull request. + // + // PullRequest is a required field + PullRequest *PullRequest `locationName:"pullRequest" type:"structure" required:"true"` } // String returns the string representation -func (s GetCommentOutput) String() string { +func (s GetPullRequestOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCommentOutput) GoString() string { +func (s GetPullRequestOutput) GoString() string { return s.String() } -// SetComment sets the Comment field's value. -func (s *GetCommentOutput) SetComment(v *Comment) *GetCommentOutput { - s.Comment = v +// SetPullRequest sets the PullRequest field's value. +func (s *GetPullRequestOutput) SetPullRequest(v *PullRequest) *GetPullRequestOutput { + s.PullRequest = v return s } -type GetCommentsForComparedCommitInput struct { +// Represents the input of a get repository operation. +type GetRepositoryInput struct { _ struct{} `type:"structure"` - // To establish the directionality of the comparison, the full commit ID of - // the 'after' commit. - // - // AfterCommitId is a required field - AfterCommitId *string `locationName:"afterCommitId" type:"string" required:"true"` - - // To establish the directionality of the comparison, the full commit ID of - // the 'before' commit. - BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - - // A non-negative integer used to limit the number of returned results. The - // default is 100 comments, and is configurable up to 500. - MaxResults *int64 `locationName:"maxResults" type:"integer"` - - // An enumeration token that when provided in a request, returns the next batch - // of the results. - NextToken *string `locationName:"nextToken" type:"string"` - - // The name of the repository where you want to compare commits. + // The name of the repository to get information about. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetCommentsForComparedCommitInput) String() string { +func (s GetRepositoryInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCommentsForComparedCommitInput) GoString() string { +func (s GetRepositoryInput) GoString() string { return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCommentsForComparedCommitInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCommentsForComparedCommitInput"} - if s.AfterCommitId == nil { - invalidParams.Add(request.NewErrParamRequired("AfterCommitId")) - } +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRepositoryInput"} if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } @@ -7473,113 +12371,61 @@ func (s *GetCommentsForComparedCommitInput) Validate() error { return nil } -// SetAfterCommitId sets the AfterCommitId field's value. -func (s *GetCommentsForComparedCommitInput) SetAfterCommitId(v string) *GetCommentsForComparedCommitInput { - s.AfterCommitId = &v - return s -} - -// SetBeforeCommitId sets the BeforeCommitId field's value. -func (s *GetCommentsForComparedCommitInput) SetBeforeCommitId(v string) *GetCommentsForComparedCommitInput { - s.BeforeCommitId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetCommentsForComparedCommitInput) SetMaxResults(v int64) *GetCommentsForComparedCommitInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetCommentsForComparedCommitInput) SetNextToken(v string) *GetCommentsForComparedCommitInput { - s.NextToken = &v - return s -} - // SetRepositoryName sets the RepositoryName field's value. -func (s *GetCommentsForComparedCommitInput) SetRepositoryName(v string) *GetCommentsForComparedCommitInput { +func (s *GetRepositoryInput) SetRepositoryName(v string) *GetRepositoryInput { s.RepositoryName = &v return s } -type GetCommentsForComparedCommitOutput struct { +// Represents the output of a get repository operation. +type GetRepositoryOutput struct { _ struct{} `type:"structure"` - // A list of comment objects on the compared commit. - CommentsForComparedCommitData []*CommentsForComparedCommit `locationName:"commentsForComparedCommitData" type:"list"` - - // An enumeration token that can be used in a request to return the next batch - // of the results. - NextToken *string `locationName:"nextToken" type:"string"` + // Information about the repository. + RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` } // String returns the string representation -func (s GetCommentsForComparedCommitOutput) String() string { +func (s GetRepositoryOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCommentsForComparedCommitOutput) GoString() string { +func (s GetRepositoryOutput) GoString() string { return s.String() } -// SetCommentsForComparedCommitData sets the CommentsForComparedCommitData field's value. -func (s *GetCommentsForComparedCommitOutput) SetCommentsForComparedCommitData(v []*CommentsForComparedCommit) *GetCommentsForComparedCommitOutput { - s.CommentsForComparedCommitData = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetCommentsForComparedCommitOutput) SetNextToken(v string) *GetCommentsForComparedCommitOutput { - s.NextToken = &v +// SetRepositoryMetadata sets the RepositoryMetadata field's value. +func (s *GetRepositoryOutput) SetRepositoryMetadata(v *RepositoryMetadata) *GetRepositoryOutput { + s.RepositoryMetadata = v return s } -type GetCommentsForPullRequestInput struct { +// Represents the input of a get repository triggers operation. +type GetRepositoryTriggersInput struct { _ struct{} `type:"structure"` - // The full commit ID of the commit in the source branch that was the tip of - // the branch at the time the comment was made. - AfterCommitId *string `locationName:"afterCommitId" type:"string"` - - // The full commit ID of the commit in the destination branch that was the tip - // of the branch at the time the pull request was created. - BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - - // A non-negative integer used to limit the number of returned results. The - // default is 100 comments. You can return up to 500 comments with a single - // request. - MaxResults *int64 `locationName:"maxResults" type:"integer"` - - // An enumeration token that when provided in a request, returns the next batch - // of the results. - NextToken *string `locationName:"nextToken" type:"string"` - - // The system-generated ID of the pull request. To get this ID, use ListPullRequests. + // The name of the repository for which the trigger is configured. // - // PullRequestId is a required field - PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` - - // The name of the repository that contains the pull request. - RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetCommentsForPullRequestInput) String() string { +func (s GetRepositoryTriggersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCommentsForPullRequestInput) GoString() string { +func (s GetRepositoryTriggersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCommentsForPullRequestInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCommentsForPullRequestInput"} - if s.PullRequestId == nil { - invalidParams.Add(request.NewErrParamRequired("PullRequestId")) +func (s *GetRepositoryTriggersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRepositoryTriggersInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) @@ -7591,106 +12437,117 @@ func (s *GetCommentsForPullRequestInput) Validate() error { return nil } -// SetAfterCommitId sets the AfterCommitId field's value. -func (s *GetCommentsForPullRequestInput) SetAfterCommitId(v string) *GetCommentsForPullRequestInput { - s.AfterCommitId = &v +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetRepositoryTriggersInput) SetRepositoryName(v string) *GetRepositoryTriggersInput { + s.RepositoryName = &v return s } -// SetBeforeCommitId sets the BeforeCommitId field's value. -func (s *GetCommentsForPullRequestInput) SetBeforeCommitId(v string) *GetCommentsForPullRequestInput { - s.BeforeCommitId = &v - return s +// Represents the output of a get repository triggers operation. +type GetRepositoryTriggersOutput struct { + _ struct{} `type:"structure"` + + // The system-generated unique ID for the trigger. + ConfigurationId *string `locationName:"configurationId" type:"string"` + + // The JSON block of configuration information for each trigger. + Triggers []*RepositoryTrigger `locationName:"triggers" type:"list"` } -// SetMaxResults sets the MaxResults field's value. -func (s *GetCommentsForPullRequestInput) SetMaxResults(v int64) *GetCommentsForPullRequestInput { - s.MaxResults = &v - return s +// String returns the string representation +func (s GetRepositoryTriggersOutput) String() string { + return awsutil.Prettify(s) } -// SetNextToken sets the NextToken field's value. -func (s *GetCommentsForPullRequestInput) SetNextToken(v string) *GetCommentsForPullRequestInput { - s.NextToken = &v - return s +// GoString returns the string representation +func (s GetRepositoryTriggersOutput) GoString() string { + return s.String() } -// SetPullRequestId sets the PullRequestId field's value. -func (s *GetCommentsForPullRequestInput) SetPullRequestId(v string) *GetCommentsForPullRequestInput { - s.PullRequestId = &v +// SetConfigurationId sets the ConfigurationId field's value. +func (s *GetRepositoryTriggersOutput) SetConfigurationId(v string) *GetRepositoryTriggersOutput { + s.ConfigurationId = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *GetCommentsForPullRequestInput) SetRepositoryName(v string) *GetCommentsForPullRequestInput { - s.RepositoryName = &v +// SetTriggers sets the Triggers field's value. +func (s *GetRepositoryTriggersOutput) SetTriggers(v []*RepositoryTrigger) *GetRepositoryTriggersOutput { + s.Triggers = v return s } -type GetCommentsForPullRequestOutput struct { +// Information about whether a file is binary or textual in a merge or pull +// request operation. +type IsBinaryFile struct { _ struct{} `type:"structure"` - // An array of comment objects on the pull request. - CommentsForPullRequestData []*CommentsForPullRequest `locationName:"commentsForPullRequestData" type:"list"` + // The binary or non-binary status of a file in the base of a merge or pull + // request. + Base *bool `locationName:"base" type:"boolean"` - // An enumeration token that can be used in a request to return the next batch - // of the results. - NextToken *string `locationName:"nextToken" type:"string"` + // The binary or non-binary status of a file in the destination of a merge or + // pull request. + Destination *bool `locationName:"destination" type:"boolean"` + + // The binary or non-binary status of file in the source of a merge or pull + // request. + Source *bool `locationName:"source" type:"boolean"` } // String returns the string representation -func (s GetCommentsForPullRequestOutput) String() string { +func (s IsBinaryFile) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCommentsForPullRequestOutput) GoString() string { +func (s IsBinaryFile) GoString() string { return s.String() } -// SetCommentsForPullRequestData sets the CommentsForPullRequestData field's value. -func (s *GetCommentsForPullRequestOutput) SetCommentsForPullRequestData(v []*CommentsForPullRequest) *GetCommentsForPullRequestOutput { - s.CommentsForPullRequestData = v +// SetBase sets the Base field's value. +func (s *IsBinaryFile) SetBase(v bool) *IsBinaryFile { + s.Base = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetCommentsForPullRequestOutput) SetNextToken(v string) *GetCommentsForPullRequestOutput { - s.NextToken = &v +// SetDestination sets the Destination field's value. +func (s *IsBinaryFile) SetDestination(v bool) *IsBinaryFile { + s.Destination = &v return s } -// Represents the input of a get commit operation. -type GetCommitInput struct { +// SetSource sets the Source field's value. +func (s *IsBinaryFile) SetSource(v bool) *IsBinaryFile { + s.Source = &v + return s +} + +// Represents the input of a list branches operation. +type ListBranchesInput struct { _ struct{} `type:"structure"` - // The commit ID. Commit IDs are the full SHA of the commit. - // - // CommitId is a required field - CommitId *string `locationName:"commitId" type:"string" required:"true"` + // An enumeration token that allows the operation to batch the results. + NextToken *string `locationName:"nextToken" type:"string"` - // The name of the repository to which the commit was made. + // The name of the repository that contains the branches. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetCommitInput) String() string { +func (s ListBranchesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCommitInput) GoString() string { +func (s ListBranchesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCommitInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCommitInput"} - if s.CommitId == nil { - invalidParams.Add(request.NewErrParamRequired("CommitId")) - } +func (s *ListBranchesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBranchesInput"} if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } @@ -7704,100 +12561,89 @@ func (s *GetCommitInput) Validate() error { return nil } -// SetCommitId sets the CommitId field's value. -func (s *GetCommitInput) SetCommitId(v string) *GetCommitInput { - s.CommitId = &v +// SetNextToken sets the NextToken field's value. +func (s *ListBranchesInput) SetNextToken(v string) *ListBranchesInput { + s.NextToken = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *GetCommitInput) SetRepositoryName(v string) *GetCommitInput { +func (s *ListBranchesInput) SetRepositoryName(v string) *ListBranchesInput { s.RepositoryName = &v return s } -// Represents the output of a get commit operation. -type GetCommitOutput struct { +// Represents the output of a list branches operation. +type ListBranchesOutput struct { _ struct{} `type:"structure"` - // A commit data type object that contains information about the specified commit. - // - // Commit is a required field - Commit *Commit `locationName:"commit" type:"structure" required:"true"` + // The list of branch names. + Branches []*string `locationName:"branches" type:"list"` + + // An enumeration token that returns the batch of the results. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s GetCommitOutput) String() string { +func (s ListBranchesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCommitOutput) GoString() string { +func (s ListBranchesOutput) GoString() string { return s.String() } -// SetCommit sets the Commit field's value. -func (s *GetCommitOutput) SetCommit(v *Commit) *GetCommitOutput { - s.Commit = v +// SetBranches sets the Branches field's value. +func (s *ListBranchesOutput) SetBranches(v []*string) *ListBranchesOutput { + s.Branches = v return s } -type GetDifferencesInput struct { - _ struct{} `type:"structure"` - - // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. - // - // AfterCommitSpecifier is a required field - AfterCommitSpecifier *string `locationName:"afterCommitSpecifier" type:"string" required:"true"` - - // The file path in which to check differences. Limits the results to this path. - // Can also be used to specify the changed name of a directory or folder, if - // it has changed. If not specified, differences will be shown for all paths. - AfterPath *string `locationName:"afterPath" type:"string"` +// SetNextToken sets the NextToken field's value. +func (s *ListBranchesOutput) SetNextToken(v string) *ListBranchesOutput { + s.NextToken = &v + return s +} - // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, the full commit ID. Optional. If not specified, all - // changes prior to the afterCommitSpecifier value will be shown. If you do - // not use beforeCommitSpecifier in your request, consider limiting the results - // with maxResults. - BeforeCommitSpecifier *string `locationName:"beforeCommitSpecifier" type:"string"` +type ListPullRequestsInput struct { + _ struct{} `type:"structure"` - // The file path in which to check for differences. Limits the results to this - // path. Can also be used to specify the previous name of a directory or folder. - // If beforePath and afterPath are not specified, differences will be shown - // for all paths. - BeforePath *string `locationName:"beforePath" type:"string"` + // Optional. The Amazon Resource Name (ARN) of the user who created the pull + // request. If used, this filters the results to pull requests created by that + // user. + AuthorArn *string `locationName:"authorArn" type:"string"` // A non-negative integer used to limit the number of returned results. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `locationName:"maxResults" type:"integer"` // An enumeration token that when provided in a request, returns the next batch // of the results. - NextToken *string `type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` - // The name of the repository where you want to get differences. + // Optional. The status of the pull request. If used, this refines the results + // to the pull requests that match the specified status. + PullRequestStatus *string `locationName:"pullRequestStatus" type:"string" enum:"PullRequestStatusEnum"` + + // The name of the repository for which you want to list pull requests. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetDifferencesInput) String() string { +func (s ListPullRequestsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDifferencesInput) GoString() string { +func (s ListPullRequestsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetDifferencesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDifferencesInput"} - if s.AfterCommitSpecifier == nil { - invalidParams.Add(request.NewErrParamRequired("AfterCommitSpecifier")) - } +func (s *ListPullRequestsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPullRequestsInput"} if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } @@ -7811,125 +12657,181 @@ func (s *GetDifferencesInput) Validate() error { return nil } -// SetAfterCommitSpecifier sets the AfterCommitSpecifier field's value. -func (s *GetDifferencesInput) SetAfterCommitSpecifier(v string) *GetDifferencesInput { - s.AfterCommitSpecifier = &v +// SetAuthorArn sets the AuthorArn field's value. +func (s *ListPullRequestsInput) SetAuthorArn(v string) *ListPullRequestsInput { + s.AuthorArn = &v return s } -// SetAfterPath sets the AfterPath field's value. -func (s *GetDifferencesInput) SetAfterPath(v string) *GetDifferencesInput { - s.AfterPath = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListPullRequestsInput) SetMaxResults(v int64) *ListPullRequestsInput { + s.MaxResults = &v return s } -// SetBeforeCommitSpecifier sets the BeforeCommitSpecifier field's value. -func (s *GetDifferencesInput) SetBeforeCommitSpecifier(v string) *GetDifferencesInput { - s.BeforeCommitSpecifier = &v +// SetNextToken sets the NextToken field's value. +func (s *ListPullRequestsInput) SetNextToken(v string) *ListPullRequestsInput { + s.NextToken = &v return s } -// SetBeforePath sets the BeforePath field's value. -func (s *GetDifferencesInput) SetBeforePath(v string) *GetDifferencesInput { - s.BeforePath = &v +// SetPullRequestStatus sets the PullRequestStatus field's value. +func (s *ListPullRequestsInput) SetPullRequestStatus(v string) *ListPullRequestsInput { + s.PullRequestStatus = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *GetDifferencesInput) SetMaxResults(v int64) *GetDifferencesInput { - s.MaxResults = &v +// SetRepositoryName sets the RepositoryName field's value. +func (s *ListPullRequestsInput) SetRepositoryName(v string) *ListPullRequestsInput { + s.RepositoryName = &v return s } +type ListPullRequestsOutput struct { + _ struct{} `type:"structure"` + + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The system-generated IDs of the pull requests. + // + // PullRequestIds is a required field + PullRequestIds []*string `locationName:"pullRequestIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListPullRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPullRequestsOutput) GoString() string { + return s.String() +} + // SetNextToken sets the NextToken field's value. -func (s *GetDifferencesInput) SetNextToken(v string) *GetDifferencesInput { +func (s *ListPullRequestsOutput) SetNextToken(v string) *ListPullRequestsOutput { s.NextToken = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *GetDifferencesInput) SetRepositoryName(v string) *GetDifferencesInput { - s.RepositoryName = &v +// SetPullRequestIds sets the PullRequestIds field's value. +func (s *ListPullRequestsOutput) SetPullRequestIds(v []*string) *ListPullRequestsOutput { + s.PullRequestIds = v return s } -type GetDifferencesOutput struct { +// Represents the input of a list repositories operation. +type ListRepositoriesInput struct { _ struct{} `type:"structure"` - // A differences data type object that contains information about the differences, - // including whether the difference is added, modified, or deleted (A, D, M). - Differences []*Difference `locationName:"differences" type:"list"` + // An enumeration token that allows the operation to batch the results of the + // operation. Batch sizes are 1,000 for list repository operations. When the + // client sends the token back to AWS CodeCommit, another page of 1,000 records + // is retrieved. + NextToken *string `locationName:"nextToken" type:"string"` - // An enumeration token that can be used in a request to return the next batch - // of the results. - NextToken *string `type:"string"` + // The order in which to sort the results of a list repositories operation. + Order *string `locationName:"order" type:"string" enum:"OrderEnum"` + + // The criteria used to sort the results of a list repositories operation. + SortBy *string `locationName:"sortBy" type:"string" enum:"SortByEnum"` } // String returns the string representation -func (s GetDifferencesOutput) String() string { +func (s ListRepositoriesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDifferencesOutput) GoString() string { +func (s ListRepositoriesInput) GoString() string { return s.String() } -// SetDifferences sets the Differences field's value. -func (s *GetDifferencesOutput) SetDifferences(v []*Difference) *GetDifferencesOutput { - s.Differences = v +// SetNextToken sets the NextToken field's value. +func (s *ListRepositoriesInput) SetNextToken(v string) *ListRepositoriesInput { + s.NextToken = &v + return s +} + +// SetOrder sets the Order field's value. +func (s *ListRepositoriesInput) SetOrder(v string) *ListRepositoriesInput { + s.Order = &v + return s +} + +// SetSortBy sets the SortBy field's value. +func (s *ListRepositoriesInput) SetSortBy(v string) *ListRepositoriesInput { + s.SortBy = &v return s } +// Represents the output of a list repositories operation. +type ListRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results of the + // operation. Batch sizes are 1,000 for list repository operations. When the + // client sends the token back to AWS CodeCommit, another page of 1,000 records + // is retrieved. + NextToken *string `locationName:"nextToken" type:"string"` + + // Lists the repositories called by the list repositories operation. + Repositories []*RepositoryNameIdPair `locationName:"repositories" type:"list"` +} + +// String returns the string representation +func (s ListRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRepositoriesOutput) GoString() string { + return s.String() +} + // SetNextToken sets the NextToken field's value. -func (s *GetDifferencesOutput) SetNextToken(v string) *GetDifferencesOutput { +func (s *ListRepositoriesOutput) SetNextToken(v string) *ListRepositoriesOutput { s.NextToken = &v return s } -type GetFileInput struct { - _ struct{} `type:"structure"` +// SetRepositories sets the Repositories field's value. +func (s *ListRepositoriesOutput) SetRepositories(v []*RepositoryNameIdPair) *ListRepositoriesOutput { + s.Repositories = v + return s +} - // The fully-quaified reference that identifies the commit that contains the - // file. For example, you could specify a full commit ID, a tag, a branch name, - // or a reference such as refs/heads/master. If none is provided, then the head - // commit will be used. - CommitSpecifier *string `locationName:"commitSpecifier" type:"string"` +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` - // The fully-qualified path to the file, including the full name and extension - // of the file. For example, /examples/file.md is the fully-qualified path to - // a file named file.md in a folder named examples. - // - // FilePath is a required field - FilePath *string `locationName:"filePath" type:"string" required:"true"` + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` - // The name of the repository that contains the file. + // The Amazon Resource Name (ARN) of the resource for which you want to get + // information about tags, if any. // - // RepositoryName is a required field - RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` } // String returns the string representation -func (s GetFileInput) String() string { +func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFileInput) GoString() string { +func (s ListTagsForResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetFileInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFileInput"} - if s.FilePath == nil { - invalidParams.Add(request.NewErrParamRequired("FilePath")) - } - if s.RepositoryName == nil { - invalidParams.Add(request.NewErrParamRequired("RepositoryName")) - } - if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) } if invalidParams.Len() > 0 { @@ -7938,150 +12840,135 @@ func (s *GetFileInput) Validate() error { return nil } -// SetCommitSpecifier sets the CommitSpecifier field's value. -func (s *GetFileInput) SetCommitSpecifier(v string) *GetFileInput { - s.CommitSpecifier = &v - return s -} - -// SetFilePath sets the FilePath field's value. -func (s *GetFileInput) SetFilePath(v string) *GetFileInput { - s.FilePath = &v +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *GetFileInput) SetRepositoryName(v string) *GetFileInput { - s.RepositoryName = &v +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v return s } -type GetFileOutput struct { +type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // The blob ID of the object that represents the file content. - // - // BlobId is a required field - BlobId *string `locationName:"blobId" type:"string" required:"true"` - - // The full commit ID of the commit that contains the content returned by GetFile. - // - // CommitId is a required field - CommitId *string `locationName:"commitId" type:"string" required:"true"` - - // The base-64 encoded binary data object that represents the content of the - // file. - // - // FileContent is automatically base64 encoded/decoded by the SDK. - // - // FileContent is a required field - FileContent []byte `locationName:"fileContent" type:"blob" required:"true"` - - // The extrapolated file mode permissions of the blob. Valid values include - // strings such as EXECUTABLE and not numeric values. - // - // The file mode permissions returned by this API are not the standard file - // mode permission values, such as 100644, but rather extrapolated values. See - // below for a full list of supported return values. - // - // FileMode is a required field - FileMode *string `locationName:"fileMode" type:"string" required:"true" enum:"FileModeTypeEnum"` - - // The fully qualified path to the specified file. This returns the name and - // extension of the file. - // - // FilePath is a required field - FilePath *string `locationName:"filePath" type:"string" required:"true"` + // An enumeration token that allows the operation to batch the next results + // of the operation. + NextToken *string `locationName:"nextToken" type:"string"` - // The size of the contents of the file, in bytes. - // - // FileSize is a required field - FileSize *int64 `locationName:"fileSize" type:"long" required:"true"` + // A list of tag key and value pairs associated with the specified resource. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation -func (s GetFileOutput) String() string { +func (s ListTagsForResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFileOutput) GoString() string { +func (s ListTagsForResourceOutput) GoString() string { return s.String() } -// SetBlobId sets the BlobId field's value. -func (s *GetFileOutput) SetBlobId(v string) *GetFileOutput { - s.BlobId = &v +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v return s } -// SetCommitId sets the CommitId field's value. -func (s *GetFileOutput) SetCommitId(v string) *GetFileOutput { - s.CommitId = &v - return s +// Returns information about the location of a change or comment in the comparison +// between two commits or a pull request. +type Location struct { + _ struct{} `type:"structure"` + + // The name of the file being compared, including its extension and subdirectory, + // if any. + FilePath *string `locationName:"filePath" type:"string"` + + // The position of a change within a compared file, in line number format. + FilePosition *int64 `locationName:"filePosition" type:"long"` + + // In a comparison of commits or a pull request, whether the change is in the + // 'before' or 'after' of that comparison. + RelativeFileVersion *string `locationName:"relativeFileVersion" type:"string" enum:"RelativeFileVersionEnum"` } -// SetFileContent sets the FileContent field's value. -func (s *GetFileOutput) SetFileContent(v []byte) *GetFileOutput { - s.FileContent = v - return s +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) } -// SetFileMode sets the FileMode field's value. -func (s *GetFileOutput) SetFileMode(v string) *GetFileOutput { - s.FileMode = &v - return s +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() } // SetFilePath sets the FilePath field's value. -func (s *GetFileOutput) SetFilePath(v string) *GetFileOutput { +func (s *Location) SetFilePath(v string) *Location { s.FilePath = &v return s } -// SetFileSize sets the FileSize field's value. -func (s *GetFileOutput) SetFileSize(v int64) *GetFileOutput { - s.FileSize = &v +// SetFilePosition sets the FilePosition field's value. +func (s *Location) SetFilePosition(v int64) *Location { + s.FilePosition = &v return s } -type GetFolderInput struct { - _ struct{} `type:"structure"` +// SetRelativeFileVersion sets the RelativeFileVersion field's value. +func (s *Location) SetRelativeFileVersion(v string) *Location { + s.RelativeFileVersion = &v + return s +} - // A fully-qualified reference used to identify a commit that contains the version - // of the folder's content to return. A fully-qualified reference can be a commit - // ID, branch name, tag, or reference such as HEAD. If no specifier is provided, - // the folder content will be returned as it exists in the HEAD commit. - CommitSpecifier *string `locationName:"commitSpecifier" type:"string"` +type MergeBranchesByFastForwardInput struct { + _ struct{} `type:"structure"` - // The fully-qualified path to the folder whose contents will be returned, including - // the folder name. For example, /examples is a fully-qualified path to a folder - // named examples that was created off of the root directory (/) of a repository. + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. // - // FolderPath is a required field - FolderPath *string `locationName:"folderPath" type:"string" required:"true"` + // DestinationCommitSpecifier is a required field + DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` - // The name of the repository. + // The name of the repository where you want to merge two branches. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // SourceCommitSpecifier is a required field + SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` + + // The branch where the merge will be applied. + TargetBranch *string `locationName:"targetBranch" min:"1" type:"string"` } // String returns the string representation -func (s GetFolderInput) String() string { +func (s MergeBranchesByFastForwardInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFolderInput) GoString() string { +func (s MergeBranchesByFastForwardInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetFolderInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFolderInput"} - if s.FolderPath == nil { - invalidParams.Add(request.NewErrParamRequired("FolderPath")) +func (s *MergeBranchesByFastForwardInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MergeBranchesByFastForwardInput"} + if s.DestinationCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCommitSpecifier")) } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) @@ -8089,6 +12976,12 @@ func (s *GetFolderInput) Validate() error { if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) } + if s.SourceCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCommitSpecifier")) + } + if s.TargetBranch != nil && len(*s.TargetBranch) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetBranch", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8096,110 +12989,88 @@ func (s *GetFolderInput) Validate() error { return nil } -// SetCommitSpecifier sets the CommitSpecifier field's value. -func (s *GetFolderInput) SetCommitSpecifier(v string) *GetFolderInput { - s.CommitSpecifier = &v - return s -} - -// SetFolderPath sets the FolderPath field's value. -func (s *GetFolderInput) SetFolderPath(v string) *GetFolderInput { - s.FolderPath = &v +// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. +func (s *MergeBranchesByFastForwardInput) SetDestinationCommitSpecifier(v string) *MergeBranchesByFastForwardInput { + s.DestinationCommitSpecifier = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *GetFolderInput) SetRepositoryName(v string) *GetFolderInput { +func (s *MergeBranchesByFastForwardInput) SetRepositoryName(v string) *MergeBranchesByFastForwardInput { s.RepositoryName = &v return s } -type GetFolderOutput struct { - _ struct{} `type:"structure"` - - // The full commit ID used as a reference for which version of the folder content - // is returned. - // - // CommitId is a required field - CommitId *string `locationName:"commitId" type:"string" required:"true"` - - // The list of files that exist in the specified folder, if any. - Files []*File `locationName:"files" type:"list"` - - // The fully-qualified path of the folder whose contents are returned. - // - // FolderPath is a required field - FolderPath *string `locationName:"folderPath" type:"string" required:"true"` +// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. +func (s *MergeBranchesByFastForwardInput) SetSourceCommitSpecifier(v string) *MergeBranchesByFastForwardInput { + s.SourceCommitSpecifier = &v + return s +} - // The list of folders that exist beneath the specified folder, if any. - SubFolders []*Folder `locationName:"subFolders" type:"list"` +// SetTargetBranch sets the TargetBranch field's value. +func (s *MergeBranchesByFastForwardInput) SetTargetBranch(v string) *MergeBranchesByFastForwardInput { + s.TargetBranch = &v + return s +} - // The list of submodules that exist in the specified folder, if any. - SubModules []*SubModule `locationName:"subModules" type:"list"` +type MergeBranchesByFastForwardOutput struct { + _ struct{} `type:"structure"` - // The list of symbolic links to other files and folders that exist in the specified - // folder, if any. - SymbolicLinks []*SymbolicLink `locationName:"symbolicLinks" type:"list"` + // The commit ID of the merge in the destination or target branch. + CommitId *string `locationName:"commitId" type:"string"` - // The full SHA-1 pointer of the tree information for the commit that contains - // the folder. + // The tree ID of the merge in the destination or target branch. TreeId *string `locationName:"treeId" type:"string"` } // String returns the string representation -func (s GetFolderOutput) String() string { +func (s MergeBranchesByFastForwardOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFolderOutput) GoString() string { +func (s MergeBranchesByFastForwardOutput) GoString() string { return s.String() } // SetCommitId sets the CommitId field's value. -func (s *GetFolderOutput) SetCommitId(v string) *GetFolderOutput { +func (s *MergeBranchesByFastForwardOutput) SetCommitId(v string) *MergeBranchesByFastForwardOutput { s.CommitId = &v return s } -// SetFiles sets the Files field's value. -func (s *GetFolderOutput) SetFiles(v []*File) *GetFolderOutput { - s.Files = v +// SetTreeId sets the TreeId field's value. +func (s *MergeBranchesByFastForwardOutput) SetTreeId(v string) *MergeBranchesByFastForwardOutput { + s.TreeId = &v return s } -// SetFolderPath sets the FolderPath field's value. -func (s *GetFolderOutput) SetFolderPath(v string) *GetFolderOutput { - s.FolderPath = &v - return s -} +type MergeBranchesBySquashInput struct { + _ struct{} `type:"structure"` -// SetSubFolders sets the SubFolders field's value. -func (s *GetFolderOutput) SetSubFolders(v []*Folder) *GetFolderOutput { - s.SubFolders = v - return s -} + // The name of the author who created the commit. This information will be used + // as both the author and committer for the commit. + AuthorName *string `locationName:"authorName" type:"string"` -// SetSubModules sets the SubModules field's value. -func (s *GetFolderOutput) SetSubModules(v []*SubModule) *GetFolderOutput { - s.SubModules = v - return s -} + // The commit message for the merge. + CommitMessage *string `locationName:"commitMessage" type:"string"` -// SetSymbolicLinks sets the SymbolicLinks field's value. -func (s *GetFolderOutput) SetSymbolicLinks(v []*SymbolicLink) *GetFolderOutput { - s.SymbolicLinks = v - return s -} + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` -// SetTreeId sets the TreeId field's value. -func (s *GetFolderOutput) SetTreeId(v string) *GetFolderOutput { - s.TreeId = &v - return s -} + // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE + // is chosen as the conflict resolution strategy. + ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` -type GetMergeConflictsInput struct { - _ struct{} `type:"structure"` + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` // The branch, tag, HEAD, or other fully qualified reference used to identify // a commit. For example, a branch name or a full commit ID. @@ -8207,13 +13078,16 @@ type GetMergeConflictsInput struct { // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` - // The merge option or strategy you want to use to merge the code. The only - // valid value is FAST_FORWARD_MERGE. - // - // MergeOption is a required field - MergeOption *string `locationName:"mergeOption" type:"string" required:"true" enum:"MergeOptionTypeEnum"` + // The email address of the person merging the branches. This information will + // be used in the commit information for the merge. + Email *string `locationName:"email" type:"string"` - // The name of the repository where the pull request was created. + // If the commit contains deletions, whether to keep a folder or folder structure + // if the changes leave the folders empty. If this is specified as true, a .gitkeep + // file will be created for empty folders. The default is false. + KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` + + // The name of the repository where you want to merge two branches. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` @@ -8223,27 +13097,27 @@ type GetMergeConflictsInput struct { // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` + + // The branch where the merge will be applied. + TargetBranch *string `locationName:"targetBranch" min:"1" type:"string"` } // String returns the string representation -func (s GetMergeConflictsInput) String() string { +func (s MergeBranchesBySquashInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetMergeConflictsInput) GoString() string { +func (s MergeBranchesBySquashInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetMergeConflictsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetMergeConflictsInput"} +func (s *MergeBranchesBySquashInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MergeBranchesBySquashInput"} if s.DestinationCommitSpecifier == nil { invalidParams.Add(request.NewErrParamRequired("DestinationCommitSpecifier")) } - if s.MergeOption == nil { - invalidParams.Add(request.NewErrParamRequired("MergeOption")) - } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } @@ -8253,6 +13127,14 @@ func (s *GetMergeConflictsInput) Validate() error { if s.SourceCommitSpecifier == nil { invalidParams.Add(request.NewErrParamRequired("SourceCommitSpecifier")) } + if s.TargetBranch != nil && len(*s.TargetBranch) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetBranch", 1)) + } + if s.ConflictResolution != nil { + if err := s.ConflictResolution.Validate(); err != nil { + invalidParams.AddNested("ConflictResolution", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -8260,171 +13142,194 @@ func (s *GetMergeConflictsInput) Validate() error { return nil } -// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. -func (s *GetMergeConflictsInput) SetDestinationCommitSpecifier(v string) *GetMergeConflictsInput { - s.DestinationCommitSpecifier = &v +// SetAuthorName sets the AuthorName field's value. +func (s *MergeBranchesBySquashInput) SetAuthorName(v string) *MergeBranchesBySquashInput { + s.AuthorName = &v return s } -// SetMergeOption sets the MergeOption field's value. -func (s *GetMergeConflictsInput) SetMergeOption(v string) *GetMergeConflictsInput { - s.MergeOption = &v +// SetCommitMessage sets the CommitMessage field's value. +func (s *MergeBranchesBySquashInput) SetCommitMessage(v string) *MergeBranchesBySquashInput { + s.CommitMessage = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *GetMergeConflictsInput) SetRepositoryName(v string) *GetMergeConflictsInput { - s.RepositoryName = &v +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *MergeBranchesBySquashInput) SetConflictDetailLevel(v string) *MergeBranchesBySquashInput { + s.ConflictDetailLevel = &v return s } -// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. -func (s *GetMergeConflictsInput) SetSourceCommitSpecifier(v string) *GetMergeConflictsInput { - s.SourceCommitSpecifier = &v +// SetConflictResolution sets the ConflictResolution field's value. +func (s *MergeBranchesBySquashInput) SetConflictResolution(v *ConflictResolution) *MergeBranchesBySquashInput { + s.ConflictResolution = v return s } -type GetMergeConflictsOutput struct { - _ struct{} `type:"structure"` - - // The commit ID of the destination commit specifier that was used in the merge - // evaluation. - // - // DestinationCommitId is a required field - DestinationCommitId *string `locationName:"destinationCommitId" type:"string" required:"true"` - - // A Boolean value that indicates whether the code is mergable by the specified - // merge option. - // - // Mergeable is a required field - Mergeable *bool `locationName:"mergeable" type:"boolean" required:"true"` - - // The commit ID of the source commit specifier that was used in the merge evaluation. - // - // SourceCommitId is a required field - SourceCommitId *string `locationName:"sourceCommitId" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetMergeConflictsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetMergeConflictsOutput) GoString() string { - return s.String() -} - -// SetDestinationCommitId sets the DestinationCommitId field's value. -func (s *GetMergeConflictsOutput) SetDestinationCommitId(v string) *GetMergeConflictsOutput { - s.DestinationCommitId = &v +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *MergeBranchesBySquashInput) SetConflictResolutionStrategy(v string) *MergeBranchesBySquashInput { + s.ConflictResolutionStrategy = &v return s } -// SetMergeable sets the Mergeable field's value. -func (s *GetMergeConflictsOutput) SetMergeable(v bool) *GetMergeConflictsOutput { - s.Mergeable = &v +// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. +func (s *MergeBranchesBySquashInput) SetDestinationCommitSpecifier(v string) *MergeBranchesBySquashInput { + s.DestinationCommitSpecifier = &v return s } -// SetSourceCommitId sets the SourceCommitId field's value. -func (s *GetMergeConflictsOutput) SetSourceCommitId(v string) *GetMergeConflictsOutput { - s.SourceCommitId = &v +// SetEmail sets the Email field's value. +func (s *MergeBranchesBySquashInput) SetEmail(v string) *MergeBranchesBySquashInput { + s.Email = &v return s } -type GetPullRequestInput struct { - _ struct{} `type:"structure"` - - // The system-generated ID of the pull request. To get this ID, use ListPullRequests. - // - // PullRequestId is a required field - PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetPullRequestInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPullRequestInput) GoString() string { - return s.String() +// SetKeepEmptyFolders sets the KeepEmptyFolders field's value. +func (s *MergeBranchesBySquashInput) SetKeepEmptyFolders(v bool) *MergeBranchesBySquashInput { + s.KeepEmptyFolders = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPullRequestInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPullRequestInput"} - if s.PullRequestId == nil { - invalidParams.Add(request.NewErrParamRequired("PullRequestId")) - } +// SetRepositoryName sets the RepositoryName field's value. +func (s *MergeBranchesBySquashInput) SetRepositoryName(v string) *MergeBranchesBySquashInput { + s.RepositoryName = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. +func (s *MergeBranchesBySquashInput) SetSourceCommitSpecifier(v string) *MergeBranchesBySquashInput { + s.SourceCommitSpecifier = &v + return s } -// SetPullRequestId sets the PullRequestId field's value. -func (s *GetPullRequestInput) SetPullRequestId(v string) *GetPullRequestInput { - s.PullRequestId = &v +// SetTargetBranch sets the TargetBranch field's value. +func (s *MergeBranchesBySquashInput) SetTargetBranch(v string) *MergeBranchesBySquashInput { + s.TargetBranch = &v return s } -type GetPullRequestOutput struct { +type MergeBranchesBySquashOutput struct { _ struct{} `type:"structure"` - // Information about the specified pull request. - // - // PullRequest is a required field - PullRequest *PullRequest `locationName:"pullRequest" type:"structure" required:"true"` + // The commit ID of the merge in the destination or target branch. + CommitId *string `locationName:"commitId" type:"string"` + + // The tree ID of the merge in the destination or target branch. + TreeId *string `locationName:"treeId" type:"string"` } // String returns the string representation -func (s GetPullRequestOutput) String() string { +func (s MergeBranchesBySquashOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPullRequestOutput) GoString() string { +func (s MergeBranchesBySquashOutput) GoString() string { return s.String() } -// SetPullRequest sets the PullRequest field's value. -func (s *GetPullRequestOutput) SetPullRequest(v *PullRequest) *GetPullRequestOutput { - s.PullRequest = v +// SetCommitId sets the CommitId field's value. +func (s *MergeBranchesBySquashOutput) SetCommitId(v string) *MergeBranchesBySquashOutput { + s.CommitId = &v return s } -// Represents the input of a get repository operation. -type GetRepositoryInput struct { +// SetTreeId sets the TreeId field's value. +func (s *MergeBranchesBySquashOutput) SetTreeId(v string) *MergeBranchesBySquashOutput { + s.TreeId = &v + return s +} + +type MergeBranchesByThreeWayInput struct { _ struct{} `type:"structure"` - // The name of the repository to get information about. + // The name of the author who created the commit. This information will be used + // as both the author and committer for the commit. + AuthorName *string `locationName:"authorName" type:"string"` + + // The commit message to include in the commit information for the merge. + CommitMessage *string `locationName:"commitMessage" type:"string"` + + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` + + // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE + // is chosen as the conflict resolution strategy. + ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` + + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // DestinationCommitSpecifier is a required field + DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` + + // The email address of the person merging the branches. This information will + // be used in the commit information for the merge. + Email *string `locationName:"email" type:"string"` + + // If the commit contains deletions, whether to keep a folder or folder structure + // if the changes leave the folders empty. If this is specified as true, a .gitkeep + // file will be created for empty folders. The default is false. + KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` + + // The name of the repository where you want to merge two branches. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The branch, tag, HEAD, or other fully qualified reference used to identify + // a commit. For example, a branch name or a full commit ID. + // + // SourceCommitSpecifier is a required field + SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` + + // The branch where the merge will be applied. + TargetBranch *string `locationName:"targetBranch" min:"1" type:"string"` } // String returns the string representation -func (s GetRepositoryInput) String() string { +func (s MergeBranchesByThreeWayInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRepositoryInput) GoString() string { +func (s MergeBranchesByThreeWayInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetRepositoryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetRepositoryInput"} +func (s *MergeBranchesByThreeWayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MergeBranchesByThreeWayInput"} + if s.DestinationCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCommitSpecifier")) + } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) } + if s.SourceCommitSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCommitSpecifier")) + } + if s.TargetBranch != nil && len(*s.TargetBranch) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetBranch", 1)) + } + if s.ConflictResolution != nil { + if err := s.ConflictResolution.Validate(); err != nil { + invalidParams.AddNested("ConflictResolution", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -8432,233 +13337,325 @@ func (s *GetRepositoryInput) Validate() error { return nil } +// SetAuthorName sets the AuthorName field's value. +func (s *MergeBranchesByThreeWayInput) SetAuthorName(v string) *MergeBranchesByThreeWayInput { + s.AuthorName = &v + return s +} + +// SetCommitMessage sets the CommitMessage field's value. +func (s *MergeBranchesByThreeWayInput) SetCommitMessage(v string) *MergeBranchesByThreeWayInput { + s.CommitMessage = &v + return s +} + +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *MergeBranchesByThreeWayInput) SetConflictDetailLevel(v string) *MergeBranchesByThreeWayInput { + s.ConflictDetailLevel = &v + return s +} + +// SetConflictResolution sets the ConflictResolution field's value. +func (s *MergeBranchesByThreeWayInput) SetConflictResolution(v *ConflictResolution) *MergeBranchesByThreeWayInput { + s.ConflictResolution = v + return s +} + +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *MergeBranchesByThreeWayInput) SetConflictResolutionStrategy(v string) *MergeBranchesByThreeWayInput { + s.ConflictResolutionStrategy = &v + return s +} + +// SetDestinationCommitSpecifier sets the DestinationCommitSpecifier field's value. +func (s *MergeBranchesByThreeWayInput) SetDestinationCommitSpecifier(v string) *MergeBranchesByThreeWayInput { + s.DestinationCommitSpecifier = &v + return s +} + +// SetEmail sets the Email field's value. +func (s *MergeBranchesByThreeWayInput) SetEmail(v string) *MergeBranchesByThreeWayInput { + s.Email = &v + return s +} + +// SetKeepEmptyFolders sets the KeepEmptyFolders field's value. +func (s *MergeBranchesByThreeWayInput) SetKeepEmptyFolders(v bool) *MergeBranchesByThreeWayInput { + s.KeepEmptyFolders = &v + return s +} + // SetRepositoryName sets the RepositoryName field's value. -func (s *GetRepositoryInput) SetRepositoryName(v string) *GetRepositoryInput { +func (s *MergeBranchesByThreeWayInput) SetRepositoryName(v string) *MergeBranchesByThreeWayInput { s.RepositoryName = &v return s } -// Represents the output of a get repository operation. -type GetRepositoryOutput struct { +// SetSourceCommitSpecifier sets the SourceCommitSpecifier field's value. +func (s *MergeBranchesByThreeWayInput) SetSourceCommitSpecifier(v string) *MergeBranchesByThreeWayInput { + s.SourceCommitSpecifier = &v + return s +} + +// SetTargetBranch sets the TargetBranch field's value. +func (s *MergeBranchesByThreeWayInput) SetTargetBranch(v string) *MergeBranchesByThreeWayInput { + s.TargetBranch = &v + return s +} + +type MergeBranchesByThreeWayOutput struct { _ struct{} `type:"structure"` - // Information about the repository. - RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` + // The commit ID of the merge in the destination or target branch. + CommitId *string `locationName:"commitId" type:"string"` + + // The tree ID of the merge in the destination or target branch. + TreeId *string `locationName:"treeId" type:"string"` } // String returns the string representation -func (s GetRepositoryOutput) String() string { +func (s MergeBranchesByThreeWayOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRepositoryOutput) GoString() string { +func (s MergeBranchesByThreeWayOutput) GoString() string { return s.String() } -// SetRepositoryMetadata sets the RepositoryMetadata field's value. -func (s *GetRepositoryOutput) SetRepositoryMetadata(v *RepositoryMetadata) *GetRepositoryOutput { - s.RepositoryMetadata = v +// SetCommitId sets the CommitId field's value. +func (s *MergeBranchesByThreeWayOutput) SetCommitId(v string) *MergeBranchesByThreeWayOutput { + s.CommitId = &v return s } -// Represents the input of a get repository triggers operation. -type GetRepositoryTriggersInput struct { +// SetTreeId sets the TreeId field's value. +func (s *MergeBranchesByThreeWayOutput) SetTreeId(v string) *MergeBranchesByThreeWayOutput { + s.TreeId = &v + return s +} + +// Information about merge hunks in a merge or pull request operation. +type MergeHunk struct { _ struct{} `type:"structure"` - // The name of the repository for which the trigger is configured. - // - // RepositoryName is a required field - RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + // Information about the merge hunk in the base of a merge or pull request. + Base *MergeHunkDetail `locationName:"base" type:"structure"` + + // Information about the merge hunk in the destination of a merge or pull request. + Destination *MergeHunkDetail `locationName:"destination" type:"structure"` + + // A Boolean value indicating whether a combination of hunks contains a conflict. + // Conflicts occur when the same file or the same lines in a file were modified + // in both the source and destination of a merge or pull request. Valid values + // include true, false, and null. This will be true when the hunk represents + // a conflict and one or more files contains a line conflict. File mode conflicts + // in a merge will not set this to be true. + IsConflict *bool `locationName:"isConflict" type:"boolean"` + + // Information about the merge hunk in the source of a merge or pull request. + Source *MergeHunkDetail `locationName:"source" type:"structure"` } // String returns the string representation -func (s GetRepositoryTriggersInput) String() string { +func (s MergeHunk) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRepositoryTriggersInput) GoString() string { +func (s MergeHunk) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetRepositoryTriggersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetRepositoryTriggersInput"} - if s.RepositoryName == nil { - invalidParams.Add(request.NewErrParamRequired("RepositoryName")) - } - if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) - } +// SetBase sets the Base field's value. +func (s *MergeHunk) SetBase(v *MergeHunkDetail) *MergeHunk { + s.Base = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDestination sets the Destination field's value. +func (s *MergeHunk) SetDestination(v *MergeHunkDetail) *MergeHunk { + s.Destination = v + return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *GetRepositoryTriggersInput) SetRepositoryName(v string) *GetRepositoryTriggersInput { - s.RepositoryName = &v +// SetIsConflict sets the IsConflict field's value. +func (s *MergeHunk) SetIsConflict(v bool) *MergeHunk { + s.IsConflict = &v return s } -// Represents the output of a get repository triggers operation. -type GetRepositoryTriggersOutput struct { +// SetSource sets the Source field's value. +func (s *MergeHunk) SetSource(v *MergeHunkDetail) *MergeHunk { + s.Source = v + return s +} + +// Information about the details of a merge hunk that contains a conflict in +// a merge or pull request operation. +type MergeHunkDetail struct { _ struct{} `type:"structure"` - // The system-generated unique ID for the trigger. - ConfigurationId *string `locationName:"configurationId" type:"string"` + // The end position of the hunk in the merge result. + EndLine *int64 `locationName:"endLine" type:"integer"` - // The JSON block of configuration information for each trigger. - Triggers []*RepositoryTrigger `locationName:"triggers" type:"list"` + // The base-64 encoded content of the hunk merged region that might or might + // not contain a conflict. + HunkContent *string `locationName:"hunkContent" type:"string"` + + // The start position of the hunk in the merge result. + StartLine *int64 `locationName:"startLine" type:"integer"` } // String returns the string representation -func (s GetRepositoryTriggersOutput) String() string { +func (s MergeHunkDetail) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRepositoryTriggersOutput) GoString() string { +func (s MergeHunkDetail) GoString() string { return s.String() } -// SetConfigurationId sets the ConfigurationId field's value. -func (s *GetRepositoryTriggersOutput) SetConfigurationId(v string) *GetRepositoryTriggersOutput { - s.ConfigurationId = &v +// SetEndLine sets the EndLine field's value. +func (s *MergeHunkDetail) SetEndLine(v int64) *MergeHunkDetail { + s.EndLine = &v return s } -// SetTriggers sets the Triggers field's value. -func (s *GetRepositoryTriggersOutput) SetTriggers(v []*RepositoryTrigger) *GetRepositoryTriggersOutput { - s.Triggers = v +// SetHunkContent sets the HunkContent field's value. +func (s *MergeHunkDetail) SetHunkContent(v string) *MergeHunkDetail { + s.HunkContent = &v return s } -// Represents the input of a list branches operation. -type ListBranchesInput struct { +// SetStartLine sets the StartLine field's value. +func (s *MergeHunkDetail) SetStartLine(v int64) *MergeHunkDetail { + s.StartLine = &v + return s +} + +// Returns information about a merge or potential merge between a source reference +// and a destination reference in a pull request. +type MergeMetadata struct { _ struct{} `type:"structure"` - // An enumeration token that allows the operation to batch the results. - NextToken *string `locationName:"nextToken" type:"string"` + // A Boolean value indicating whether the merge has been made. + IsMerged *bool `locationName:"isMerged" type:"boolean"` - // The name of the repository that contains the branches. - // - // RepositoryName is a required field - RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + // The commit ID for the merge commit, if any. + MergeCommitId *string `locationName:"mergeCommitId" type:"string"` + + // The merge strategy used in the merge. + MergeOption *string `locationName:"mergeOption" type:"string" enum:"MergeOptionTypeEnum"` + + // The Amazon Resource Name (ARN) of the user who merged the branches. + MergedBy *string `locationName:"mergedBy" type:"string"` } // String returns the string representation -func (s ListBranchesInput) String() string { +func (s MergeMetadata) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBranchesInput) GoString() string { +func (s MergeMetadata) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBranchesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBranchesInput"} - if s.RepositoryName == nil { - invalidParams.Add(request.NewErrParamRequired("RepositoryName")) - } - if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) - } +// SetIsMerged sets the IsMerged field's value. +func (s *MergeMetadata) SetIsMerged(v bool) *MergeMetadata { + s.IsMerged = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMergeCommitId sets the MergeCommitId field's value. +func (s *MergeMetadata) SetMergeCommitId(v string) *MergeMetadata { + s.MergeCommitId = &v + return s } -// SetNextToken sets the NextToken field's value. -func (s *ListBranchesInput) SetNextToken(v string) *ListBranchesInput { - s.NextToken = &v +// SetMergeOption sets the MergeOption field's value. +func (s *MergeMetadata) SetMergeOption(v string) *MergeMetadata { + s.MergeOption = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *ListBranchesInput) SetRepositoryName(v string) *ListBranchesInput { - s.RepositoryName = &v +// SetMergedBy sets the MergedBy field's value. +func (s *MergeMetadata) SetMergedBy(v string) *MergeMetadata { + s.MergedBy = &v return s } -// Represents the output of a list branches operation. -type ListBranchesOutput struct { +// Information about the file operation conflicts in a merge operation. +type MergeOperations struct { _ struct{} `type:"structure"` - // The list of branch names. - Branches []*string `locationName:"branches" type:"list"` - - // An enumeration token that returns the batch of the results. - NextToken *string `locationName:"nextToken" type:"string"` + // The operation on a file in the destination of a merge or pull request. + Destination *string `locationName:"destination" type:"string" enum:"ChangeTypeEnum"` + + // The operation on a file (add, modify, or delete) of a file in the source + // of a merge or pull request. + Source *string `locationName:"source" type:"string" enum:"ChangeTypeEnum"` } // String returns the string representation -func (s ListBranchesOutput) String() string { +func (s MergeOperations) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBranchesOutput) GoString() string { +func (s MergeOperations) GoString() string { return s.String() } -// SetBranches sets the Branches field's value. -func (s *ListBranchesOutput) SetBranches(v []*string) *ListBranchesOutput { - s.Branches = v +// SetDestination sets the Destination field's value. +func (s *MergeOperations) SetDestination(v string) *MergeOperations { + s.Destination = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListBranchesOutput) SetNextToken(v string) *ListBranchesOutput { - s.NextToken = &v +// SetSource sets the Source field's value. +func (s *MergeOperations) SetSource(v string) *MergeOperations { + s.Source = &v return s } -type ListPullRequestsInput struct { +type MergePullRequestByFastForwardInput struct { _ struct{} `type:"structure"` - // Optional. The Amazon Resource Name (ARN) of the user who created the pull - // request. If used, this filters the results to pull requests created by that - // user. - AuthorArn *string `locationName:"authorArn" type:"string"` - - // A non-negative integer used to limit the number of returned results. - MaxResults *int64 `locationName:"maxResults" type:"integer"` - - // An enumeration token that when provided in a request, returns the next batch - // of the results. - NextToken *string `locationName:"nextToken" type:"string"` - - // Optional. The status of the pull request. If used, this refines the results - // to the pull requests that match the specified status. - PullRequestStatus *string `locationName:"pullRequestStatus" type:"string" enum:"PullRequestStatusEnum"` + // The system-generated ID of the pull request. To get this ID, use ListPullRequests. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` - // The name of the repository for which you want to list pull requests. + // The name of the repository where the pull request was created. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The full commit ID of the original or updated commit in the pull request + // source branch. Pass this value if you want an exception thrown if the current + // commit ID of the tip of the source branch does not match this commit ID. + SourceCommitId *string `locationName:"sourceCommitId" type:"string"` } // String returns the string representation -func (s ListPullRequestsInput) String() string { +func (s MergePullRequestByFastForwardInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListPullRequestsInput) GoString() string { +func (s MergePullRequestByFastForwardInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListPullRequestsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPullRequestsInput"} +func (s *MergePullRequestByFastForwardInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MergePullRequestByFastForwardInput"} + if s.PullRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("PullRequestId")) + } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } @@ -8672,234 +13669,253 @@ func (s *ListPullRequestsInput) Validate() error { return nil } -// SetAuthorArn sets the AuthorArn field's value. -func (s *ListPullRequestsInput) SetAuthorArn(v string) *ListPullRequestsInput { - s.AuthorArn = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListPullRequestsInput) SetMaxResults(v int64) *ListPullRequestsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPullRequestsInput) SetNextToken(v string) *ListPullRequestsInput { - s.NextToken = &v +// SetPullRequestId sets the PullRequestId field's value. +func (s *MergePullRequestByFastForwardInput) SetPullRequestId(v string) *MergePullRequestByFastForwardInput { + s.PullRequestId = &v return s } -// SetPullRequestStatus sets the PullRequestStatus field's value. -func (s *ListPullRequestsInput) SetPullRequestStatus(v string) *ListPullRequestsInput { - s.PullRequestStatus = &v +// SetRepositoryName sets the RepositoryName field's value. +func (s *MergePullRequestByFastForwardInput) SetRepositoryName(v string) *MergePullRequestByFastForwardInput { + s.RepositoryName = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *ListPullRequestsInput) SetRepositoryName(v string) *ListPullRequestsInput { - s.RepositoryName = &v +// SetSourceCommitId sets the SourceCommitId field's value. +func (s *MergePullRequestByFastForwardInput) SetSourceCommitId(v string) *MergePullRequestByFastForwardInput { + s.SourceCommitId = &v return s } -type ListPullRequestsOutput struct { +type MergePullRequestByFastForwardOutput struct { _ struct{} `type:"structure"` - // An enumeration token that when provided in a request, returns the next batch - // of the results. - NextToken *string `locationName:"nextToken" type:"string"` - - // The system-generated IDs of the pull requests. - // - // PullRequestIds is a required field - PullRequestIds []*string `locationName:"pullRequestIds" type:"list" required:"true"` + // Information about the specified pull request, including information about + // the merge. + PullRequest *PullRequest `locationName:"pullRequest" type:"structure"` } // String returns the string representation -func (s ListPullRequestsOutput) String() string { +func (s MergePullRequestByFastForwardOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListPullRequestsOutput) GoString() string { +func (s MergePullRequestByFastForwardOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListPullRequestsOutput) SetNextToken(v string) *ListPullRequestsOutput { - s.NextToken = &v - return s -} - -// SetPullRequestIds sets the PullRequestIds field's value. -func (s *ListPullRequestsOutput) SetPullRequestIds(v []*string) *ListPullRequestsOutput { - s.PullRequestIds = v +// SetPullRequest sets the PullRequest field's value. +func (s *MergePullRequestByFastForwardOutput) SetPullRequest(v *PullRequest) *MergePullRequestByFastForwardOutput { + s.PullRequest = v return s } -// Represents the input of a list repositories operation. -type ListRepositoriesInput struct { +type MergePullRequestBySquashInput struct { _ struct{} `type:"structure"` - // An enumeration token that allows the operation to batch the results of the - // operation. Batch sizes are 1,000 for list repository operations. When the - // client sends the token back to AWS CodeCommit, another page of 1,000 records - // is retrieved. - NextToken *string `locationName:"nextToken" type:"string"` + // The name of the author who created the commit. This information will be used + // as both the author and committer for the commit. + AuthorName *string `locationName:"authorName" type:"string"` - // The order in which to sort the results of a list repositories operation. - Order *string `locationName:"order" type:"string" enum:"OrderEnum"` + // The commit message to include in the commit information for the merge. + CommitMessage *string `locationName:"commitMessage" type:"string"` - // The criteria used to sort the results of a list repositories operation. - SortBy *string `locationName:"sortBy" type:"string" enum:"SortByEnum"` + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` + + // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE + // is chosen as the conflict resolution strategy. + ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` + + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` + + // The email address of the person merging the branches. This information will + // be used in the commit information for the merge. + Email *string `locationName:"email" type:"string"` + + // If the commit contains deletions, whether to keep a folder or folder structure + // if the changes leave the folders empty. If this is specified as true, a .gitkeep + // file will be created for empty folders. The default is false. + KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` + + // The system-generated ID of the pull request. To get this ID, use ListPullRequests. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The name of the repository where the pull request was created. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // The full commit ID of the original or updated commit in the pull request + // source branch. Pass this value if you want an exception thrown if the current + // commit ID of the tip of the source branch does not match this commit ID. + SourceCommitId *string `locationName:"sourceCommitId" type:"string"` } // String returns the string representation -func (s ListRepositoriesInput) String() string { +func (s MergePullRequestBySquashInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListRepositoriesInput) GoString() string { +func (s MergePullRequestBySquashInput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListRepositoriesInput) SetNextToken(v string) *ListRepositoriesInput { - s.NextToken = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *MergePullRequestBySquashInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MergePullRequestBySquashInput"} + if s.PullRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("PullRequestId")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + if s.ConflictResolution != nil { + if err := s.ConflictResolution.Validate(); err != nil { + invalidParams.AddNested("ConflictResolution", err.(request.ErrInvalidParams)) + } + } -// SetOrder sets the Order field's value. -func (s *ListRepositoriesInput) SetOrder(v string) *ListRepositoriesInput { - s.Order = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSortBy sets the SortBy field's value. -func (s *ListRepositoriesInput) SetSortBy(v string) *ListRepositoriesInput { - s.SortBy = &v +// SetAuthorName sets the AuthorName field's value. +func (s *MergePullRequestBySquashInput) SetAuthorName(v string) *MergePullRequestBySquashInput { + s.AuthorName = &v return s } -// Represents the output of a list repositories operation. -type ListRepositoriesOutput struct { - _ struct{} `type:"structure"` - - // An enumeration token that allows the operation to batch the results of the - // operation. Batch sizes are 1,000 for list repository operations. When the - // client sends the token back to AWS CodeCommit, another page of 1,000 records - // is retrieved. - NextToken *string `locationName:"nextToken" type:"string"` - - // Lists the repositories called by the list repositories operation. - Repositories []*RepositoryNameIdPair `locationName:"repositories" type:"list"` -} - -// String returns the string representation -func (s ListRepositoriesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListRepositoriesOutput) GoString() string { - return s.String() +// SetCommitMessage sets the CommitMessage field's value. +func (s *MergePullRequestBySquashInput) SetCommitMessage(v string) *MergePullRequestBySquashInput { + s.CommitMessage = &v + return s } -// SetNextToken sets the NextToken field's value. -func (s *ListRepositoriesOutput) SetNextToken(v string) *ListRepositoriesOutput { - s.NextToken = &v +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *MergePullRequestBySquashInput) SetConflictDetailLevel(v string) *MergePullRequestBySquashInput { + s.ConflictDetailLevel = &v return s } -// SetRepositories sets the Repositories field's value. -func (s *ListRepositoriesOutput) SetRepositories(v []*RepositoryNameIdPair) *ListRepositoriesOutput { - s.Repositories = v +// SetConflictResolution sets the ConflictResolution field's value. +func (s *MergePullRequestBySquashInput) SetConflictResolution(v *ConflictResolution) *MergePullRequestBySquashInput { + s.ConflictResolution = v return s } -// Returns information about the location of a change or comment in the comparison -// between two commits or a pull request. -type Location struct { - _ struct{} `type:"structure"` - - // The name of the file being compared, including its extension and subdirectory, - // if any. - FilePath *string `locationName:"filePath" type:"string"` - - // The position of a change within a compared file, in line number format. - FilePosition *int64 `locationName:"filePosition" type:"long"` - - // In a comparison of commits or a pull request, whether the change is in the - // 'before' or 'after' of that comparison. - RelativeFileVersion *string `locationName:"relativeFileVersion" type:"string" enum:"RelativeFileVersionEnum"` +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *MergePullRequestBySquashInput) SetConflictResolutionStrategy(v string) *MergePullRequestBySquashInput { + s.ConflictResolutionStrategy = &v + return s } -// String returns the string representation -func (s Location) String() string { - return awsutil.Prettify(s) +// SetEmail sets the Email field's value. +func (s *MergePullRequestBySquashInput) SetEmail(v string) *MergePullRequestBySquashInput { + s.Email = &v + return s } -// GoString returns the string representation -func (s Location) GoString() string { - return s.String() +// SetKeepEmptyFolders sets the KeepEmptyFolders field's value. +func (s *MergePullRequestBySquashInput) SetKeepEmptyFolders(v bool) *MergePullRequestBySquashInput { + s.KeepEmptyFolders = &v + return s } -// SetFilePath sets the FilePath field's value. -func (s *Location) SetFilePath(v string) *Location { - s.FilePath = &v +// SetPullRequestId sets the PullRequestId field's value. +func (s *MergePullRequestBySquashInput) SetPullRequestId(v string) *MergePullRequestBySquashInput { + s.PullRequestId = &v return s } -// SetFilePosition sets the FilePosition field's value. -func (s *Location) SetFilePosition(v int64) *Location { - s.FilePosition = &v +// SetRepositoryName sets the RepositoryName field's value. +func (s *MergePullRequestBySquashInput) SetRepositoryName(v string) *MergePullRequestBySquashInput { + s.RepositoryName = &v return s } -// SetRelativeFileVersion sets the RelativeFileVersion field's value. -func (s *Location) SetRelativeFileVersion(v string) *Location { - s.RelativeFileVersion = &v +// SetSourceCommitId sets the SourceCommitId field's value. +func (s *MergePullRequestBySquashInput) SetSourceCommitId(v string) *MergePullRequestBySquashInput { + s.SourceCommitId = &v return s } -// Returns information about a merge or potential merge between a source reference -// and a destination reference in a pull request. -type MergeMetadata struct { +type MergePullRequestBySquashOutput struct { _ struct{} `type:"structure"` - // A Boolean value indicating whether the merge has been made. - IsMerged *bool `locationName:"isMerged" type:"boolean"` - - // The Amazon Resource Name (ARN) of the user who merged the branches. - MergedBy *string `locationName:"mergedBy" type:"string"` + // Returns information about a pull request. + PullRequest *PullRequest `locationName:"pullRequest" type:"structure"` } // String returns the string representation -func (s MergeMetadata) String() string { +func (s MergePullRequestBySquashOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MergeMetadata) GoString() string { +func (s MergePullRequestBySquashOutput) GoString() string { return s.String() } -// SetIsMerged sets the IsMerged field's value. -func (s *MergeMetadata) SetIsMerged(v bool) *MergeMetadata { - s.IsMerged = &v - return s -} - -// SetMergedBy sets the MergedBy field's value. -func (s *MergeMetadata) SetMergedBy(v string) *MergeMetadata { - s.MergedBy = &v +// SetPullRequest sets the PullRequest field's value. +func (s *MergePullRequestBySquashOutput) SetPullRequest(v *PullRequest) *MergePullRequestBySquashOutput { + s.PullRequest = v return s } -type MergePullRequestByFastForwardInput struct { +type MergePullRequestByThreeWayInput struct { _ struct{} `type:"structure"` + // The name of the author who created the commit. This information will be used + // as both the author and committer for the commit. + AuthorName *string `locationName:"authorName" type:"string"` + + // The commit message to include in the commit information for the merge. + CommitMessage *string `locationName:"commitMessage" type:"string"` + + // The level of conflict detail to use. If unspecified, the default FILE_LEVEL + // is used, which will return a not mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict will be considered + // not mergeable if the same file in both branches has differences on the same + // line. + ConflictDetailLevel *string `locationName:"conflictDetailLevel" type:"string" enum:"ConflictDetailLevelTypeEnum"` + + // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE + // is chosen as the conflict resolution strategy. + ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` + + // Specifies which branch to use when resolving conflicts, or whether to attempt + // automatically merging two versions of a file. The default is NONE, which + // requires any conflicts to be resolved manually before the merge operation + // will be successful. + ConflictResolutionStrategy *string `locationName:"conflictResolutionStrategy" type:"string" enum:"ConflictResolutionStrategyTypeEnum"` + + // The email address of the person merging the branches. This information will + // be used in the commit information for the merge. + Email *string `locationName:"email" type:"string"` + + // If the commit contains deletions, whether to keep a folder or folder structure + // if the changes leave the folders empty. If this is specified as true, a .gitkeep + // file will be created for empty folders. The default is false. + KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` + // The system-generated ID of the pull request. To get this ID, use ListPullRequests. // // PullRequestId is a required field @@ -8917,18 +13933,18 @@ type MergePullRequestByFastForwardInput struct { } // String returns the string representation -func (s MergePullRequestByFastForwardInput) String() string { +func (s MergePullRequestByThreeWayInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MergePullRequestByFastForwardInput) GoString() string { +func (s MergePullRequestByThreeWayInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *MergePullRequestByFastForwardInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MergePullRequestByFastForwardInput"} +func (s *MergePullRequestByThreeWayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MergePullRequestByThreeWayInput"} if s.PullRequestId == nil { invalidParams.Add(request.NewErrParamRequired("PullRequestId")) } @@ -8938,6 +13954,11 @@ func (s *MergePullRequestByFastForwardInput) Validate() error { if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) } + if s.ConflictResolution != nil { + if err := s.ConflictResolution.Validate(); err != nil { + invalidParams.AddNested("ConflictResolution", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -8945,48 +13966,131 @@ func (s *MergePullRequestByFastForwardInput) Validate() error { return nil } +// SetAuthorName sets the AuthorName field's value. +func (s *MergePullRequestByThreeWayInput) SetAuthorName(v string) *MergePullRequestByThreeWayInput { + s.AuthorName = &v + return s +} + +// SetCommitMessage sets the CommitMessage field's value. +func (s *MergePullRequestByThreeWayInput) SetCommitMessage(v string) *MergePullRequestByThreeWayInput { + s.CommitMessage = &v + return s +} + +// SetConflictDetailLevel sets the ConflictDetailLevel field's value. +func (s *MergePullRequestByThreeWayInput) SetConflictDetailLevel(v string) *MergePullRequestByThreeWayInput { + s.ConflictDetailLevel = &v + return s +} + +// SetConflictResolution sets the ConflictResolution field's value. +func (s *MergePullRequestByThreeWayInput) SetConflictResolution(v *ConflictResolution) *MergePullRequestByThreeWayInput { + s.ConflictResolution = v + return s +} + +// SetConflictResolutionStrategy sets the ConflictResolutionStrategy field's value. +func (s *MergePullRequestByThreeWayInput) SetConflictResolutionStrategy(v string) *MergePullRequestByThreeWayInput { + s.ConflictResolutionStrategy = &v + return s +} + +// SetEmail sets the Email field's value. +func (s *MergePullRequestByThreeWayInput) SetEmail(v string) *MergePullRequestByThreeWayInput { + s.Email = &v + return s +} + +// SetKeepEmptyFolders sets the KeepEmptyFolders field's value. +func (s *MergePullRequestByThreeWayInput) SetKeepEmptyFolders(v bool) *MergePullRequestByThreeWayInput { + s.KeepEmptyFolders = &v + return s +} + // SetPullRequestId sets the PullRequestId field's value. -func (s *MergePullRequestByFastForwardInput) SetPullRequestId(v string) *MergePullRequestByFastForwardInput { +func (s *MergePullRequestByThreeWayInput) SetPullRequestId(v string) *MergePullRequestByThreeWayInput { s.PullRequestId = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *MergePullRequestByFastForwardInput) SetRepositoryName(v string) *MergePullRequestByFastForwardInput { +func (s *MergePullRequestByThreeWayInput) SetRepositoryName(v string) *MergePullRequestByThreeWayInput { s.RepositoryName = &v return s } // SetSourceCommitId sets the SourceCommitId field's value. -func (s *MergePullRequestByFastForwardInput) SetSourceCommitId(v string) *MergePullRequestByFastForwardInput { +func (s *MergePullRequestByThreeWayInput) SetSourceCommitId(v string) *MergePullRequestByThreeWayInput { s.SourceCommitId = &v return s } -type MergePullRequestByFastForwardOutput struct { +type MergePullRequestByThreeWayOutput struct { _ struct{} `type:"structure"` - // Information about the specified pull request, including information about - // the merge. + // Returns information about a pull request. PullRequest *PullRequest `locationName:"pullRequest" type:"structure"` } // String returns the string representation -func (s MergePullRequestByFastForwardOutput) String() string { +func (s MergePullRequestByThreeWayOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MergePullRequestByFastForwardOutput) GoString() string { +func (s MergePullRequestByThreeWayOutput) GoString() string { return s.String() } // SetPullRequest sets the PullRequest field's value. -func (s *MergePullRequestByFastForwardOutput) SetPullRequest(v *PullRequest) *MergePullRequestByFastForwardOutput { +func (s *MergePullRequestByThreeWayOutput) SetPullRequest(v *PullRequest) *MergePullRequestByThreeWayOutput { s.PullRequest = v return s } +// Information about the type of an object in a merge operation. +type ObjectTypes struct { + _ struct{} `type:"structure"` + + // The type of the object in the base commit of the merge. + Base *string `locationName:"base" type:"string" enum:"ObjectTypeEnum"` + + // The type of the object in the destination branch. + Destination *string `locationName:"destination" type:"string" enum:"ObjectTypeEnum"` + + // The type of the object in the source branch. + Source *string `locationName:"source" type:"string" enum:"ObjectTypeEnum"` +} + +// String returns the string representation +func (s ObjectTypes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectTypes) GoString() string { + return s.String() +} + +// SetBase sets the Base field's value. +func (s *ObjectTypes) SetBase(v string) *ObjectTypes { + s.Base = &v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ObjectTypes) SetDestination(v string) *ObjectTypes { + s.Destination = &v + return s +} + +// SetSource sets the Source field's value. +func (s *ObjectTypes) SetSource(v string) *ObjectTypes { + s.Source = &v + return s +} + type PostCommentForComparedCommitInput struct { _ struct{} `type:"structure"` @@ -8998,6 +14102,9 @@ type PostCommentForComparedCommitInput struct { // To establish the directionality of the comparison, the full commit ID of // the 'before' commit. + // + // This is required for commenting on any commit unless that commit is the initial + // commit. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` // A unique, client-generated idempotency token that when provided in a request, @@ -10284,6 +15391,80 @@ func (s *PutRepositoryTriggersOutput) SetConfigurationId(v string) *PutRepositor return s } +// Information about a replacement content entry in the conflict of a merge +// or pull request operation. +type ReplaceContentEntry struct { + _ struct{} `type:"structure"` + + // The base-64 encoded content to use when the replacement type is USE_NEW_CONTENT. + // + // Content is automatically base64 encoded/decoded by the SDK. + Content []byte `locationName:"content" type:"blob"` + + // The file mode to apply during conflict resoltion. + FileMode *string `locationName:"fileMode" type:"string" enum:"FileModeTypeEnum"` + + // The path of the conflicting file. + // + // FilePath is a required field + FilePath *string `locationName:"filePath" type:"string" required:"true"` + + // The replacement type to use when determining how to resolve the conflict. + // + // ReplacementType is a required field + ReplacementType *string `locationName:"replacementType" type:"string" required:"true" enum:"ReplacementTypeEnum"` +} + +// String returns the string representation +func (s ReplaceContentEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceContentEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceContentEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceContentEntry"} + if s.FilePath == nil { + invalidParams.Add(request.NewErrParamRequired("FilePath")) + } + if s.ReplacementType == nil { + invalidParams.Add(request.NewErrParamRequired("ReplacementType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContent sets the Content field's value. +func (s *ReplaceContentEntry) SetContent(v []byte) *ReplaceContentEntry { + s.Content = v + return s +} + +// SetFileMode sets the FileMode field's value. +func (s *ReplaceContentEntry) SetFileMode(v string) *ReplaceContentEntry { + s.FileMode = &v + return s +} + +// SetFilePath sets the FilePath field's value. +func (s *ReplaceContentEntry) SetFilePath(v string) *ReplaceContentEntry { + s.FilePath = &v + return s +} + +// SetReplacementType sets the ReplacementType field's value. +func (s *ReplaceContentEntry) SetReplacementType(v string) *ReplaceContentEntry { + s.ReplacementType = &v + return s +} + // Information about a repository. type RepositoryMetadata struct { _ struct{} `type:"structure"` @@ -10429,7 +15610,8 @@ type RepositoryTrigger struct { // The branches that will be included in the trigger configuration. If you specify // an empty array, the trigger will apply to all branches. // - // While no content is required in the array, you must include the array itself. + // Although no content is required in the array, you must include the array + // itself. Branches []*string `locationName:"branches" type:"list"` // Any custom data associated with the trigger that will be included in the @@ -10437,14 +15619,13 @@ type RepositoryTrigger struct { CustomData *string `locationName:"customData" type:"string"` // The ARN of the resource that is the target for a trigger. For example, the - // ARN of a topic in Amazon Simple Notification Service (SNS). + // ARN of a topic in Amazon SNS. // // DestinationArn is a required field DestinationArn *string `locationName:"destinationArn" type:"string" required:"true"` // The repository events that will cause the trigger to run actions in another - // service, such as sending a notification through Amazon Simple Notification - // Service (SNS). + // service, such as sending a notification through Amazon SNS. // // The valid value "all" cannot be used with any other values. // @@ -10745,6 +15926,73 @@ func (s *SymbolicLink) SetRelativePath(v string) *SymbolicLink { return s } +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to which you want to add or + // update tags. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The key-value pair to use when tagging this repository. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // Returns information about a target for a pull request. type Target struct { _ struct{} `type:"structure"` @@ -10913,6 +16161,73 @@ func (s *TestRepositoryTriggersOutput) SetSuccessfulExecutions(v []*string) *Tes return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to which you want to remove + // tags. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The tag key for each tag that you want to remove from the resource. + // + // TagKeys is a required field + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateCommentInput struct { _ struct{} `type:"structure"` @@ -11489,6 +16804,28 @@ const ( ChangeTypeEnumD = "D" ) +const ( + // ConflictDetailLevelTypeEnumFileLevel is a ConflictDetailLevelTypeEnum enum value + ConflictDetailLevelTypeEnumFileLevel = "FILE_LEVEL" + + // ConflictDetailLevelTypeEnumLineLevel is a ConflictDetailLevelTypeEnum enum value + ConflictDetailLevelTypeEnumLineLevel = "LINE_LEVEL" +) + +const ( + // ConflictResolutionStrategyTypeEnumNone is a ConflictResolutionStrategyTypeEnum enum value + ConflictResolutionStrategyTypeEnumNone = "NONE" + + // ConflictResolutionStrategyTypeEnumAcceptSource is a ConflictResolutionStrategyTypeEnum enum value + ConflictResolutionStrategyTypeEnumAcceptSource = "ACCEPT_SOURCE" + + // ConflictResolutionStrategyTypeEnumAcceptDestination is a ConflictResolutionStrategyTypeEnum enum value + ConflictResolutionStrategyTypeEnumAcceptDestination = "ACCEPT_DESTINATION" + + // ConflictResolutionStrategyTypeEnumAutomerge is a ConflictResolutionStrategyTypeEnum enum value + ConflictResolutionStrategyTypeEnumAutomerge = "AUTOMERGE" +) + const ( // FileModeTypeEnumExecutable is a FileModeTypeEnum enum value FileModeTypeEnumExecutable = "EXECUTABLE" @@ -11503,6 +16840,26 @@ const ( const ( // MergeOptionTypeEnumFastForwardMerge is a MergeOptionTypeEnum enum value MergeOptionTypeEnumFastForwardMerge = "FAST_FORWARD_MERGE" + + // MergeOptionTypeEnumSquashMerge is a MergeOptionTypeEnum enum value + MergeOptionTypeEnumSquashMerge = "SQUASH_MERGE" + + // MergeOptionTypeEnumThreeWayMerge is a MergeOptionTypeEnum enum value + MergeOptionTypeEnumThreeWayMerge = "THREE_WAY_MERGE" +) + +const ( + // ObjectTypeEnumFile is a ObjectTypeEnum enum value + ObjectTypeEnumFile = "FILE" + + // ObjectTypeEnumDirectory is a ObjectTypeEnum enum value + ObjectTypeEnumDirectory = "DIRECTORY" + + // ObjectTypeEnumGitLink is a ObjectTypeEnum enum value + ObjectTypeEnumGitLink = "GIT_LINK" + + // ObjectTypeEnumSymbolicLink is a ObjectTypeEnum enum value + ObjectTypeEnumSymbolicLink = "SYMBOLIC_LINK" ) const ( @@ -11543,6 +16900,20 @@ const ( RelativeFileVersionEnumAfter = "AFTER" ) +const ( + // ReplacementTypeEnumKeepBase is a ReplacementTypeEnum enum value + ReplacementTypeEnumKeepBase = "KEEP_BASE" + + // ReplacementTypeEnumKeepSource is a ReplacementTypeEnum enum value + ReplacementTypeEnumKeepSource = "KEEP_SOURCE" + + // ReplacementTypeEnumKeepDestination is a ReplacementTypeEnum enum value + ReplacementTypeEnumKeepDestination = "KEEP_DESTINATION" + + // ReplacementTypeEnumUseNewContent is a ReplacementTypeEnum enum value + ReplacementTypeEnumUseNewContent = "USE_NEW_CONTENT" +) + const ( // RepositoryTriggerEventEnumAll is a RepositoryTriggerEventEnum enum value RepositoryTriggerEventEnumAll = "all" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go index 71abd772b8b..9ee44edfbe4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go @@ -48,19 +48,22 @@ // * DeleteFile, which deletes the content of a specified file from a specified // branch. // +// * GetBlob, which returns the base-64 encoded content of an individual +// Git blob object within a repository. +// // * GetFile, which returns the base-64 encoded content of a specified file. // // * GetFolder, which returns the contents of a specified folder or directory. // -// * PutFile, which adds or modifies a file in a specified repository and -// branch. +// * PutFile, which adds or modifies a single file in a specified repository +// and branch. // -// Information about committed code in a repository, by calling the following: +// Commits, by calling the following: // -// * CreateCommit, which creates a commit for changes to a repository. +// * BatchGetCommits, which returns information about one or more commits +// in a repository // -// * GetBlob, which returns the base-64 encoded content of an individual -// Git blob object within a repository. +// * CreateCommit, which creates a commit for changes to a repository. // // * GetCommit, which returns information about a commit, including commit // messages and author and committer information. @@ -69,6 +72,37 @@ // valid commit specifier (such as a branch, tag, HEAD, commit ID or other // fully qualified reference). // +// Merges, by calling the following: +// +// * BatchDescribeMergeConflicts, which returns information about conflicts +// in a merge between commits in a repository. +// +// * CreateUnreferencedMergeCommit, which creates an unreferenced commit +// between two branches or commits for the purpose of comparing them and +// identifying any potential conflicts. +// +// * DescribeMergeConflicts, which returns information about merge conflicts +// between the base, source, and destination versions of a file in a potential +// merge. +// +// * GetMergeCommit, which returns information about the merge between a +// source and destination commit. +// +// * GetMergeConflicts, which returns information about merge conflicts between +// the source and destination branch in a pull request. +// +// * GetMergeOptions, which returns information about the available merge +// options between two branches or commit specifiers. +// +// * MergeBranchesByFastForward, which merges two branches using the fast-forward +// merge option. +// +// * MergeBranchesBySquash, which merges two branches using the squash merge +// option. +// +// * MergeBranchesByThreeWay, which merges two branches using the three-way +// merge option. +// // Pull requests, by calling the following: // // * CreatePullRequest, which creates a pull request in a specified repository. @@ -79,9 +113,6 @@ // * GetCommentsForPullRequest, which returns information about comments // on a specified pull request. // -// * GetMergeConflicts, which returns information about merge conflicts between -// the source and destination branch in a pull request. -// // * GetPullRequest, which returns information about a specified pull request. // // * ListPullRequests, which lists all pull requests for a repository. @@ -90,6 +121,14 @@ // of a pull request into the specified destination branch for that pull // request using the fast-forward merge option. // +// * MergePullRequestBySquash, which merges the source destination branch +// of a pull request into the specified destination branch for that pull +// request using the squash merge option. +// +// * MergePullRequestByThreeWay. which merges the source destination branch +// of a pull request into the specified destination branch for that pull +// request using the three-way merge option. +// // * PostCommentForPullRequest, which posts a comment to a pull request at // the specified line, file, or request. // @@ -100,7 +139,7 @@ // // * UpdatePullRequestTitle, which updates the title of a pull request. // -// Information about comments in a repository, by calling the following: +// Comments in a repository, by calling the following: // // * DeleteCommentContent, which deletes the content of a comment on a commit // in a repository. @@ -118,6 +157,16 @@ // * UpdateComment, which updates the content of a comment on a commit in // a repository. // +// Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the +// following: +// +// * ListTagsForResource, which gets information about AWS tags for a specified +// Amazon Resource Name (ARN) in AWS CodeCommit. +// +// * TagResource, which adds or updates tags for a resource in AWS CodeCommit. +// +// * UntagResource, which removes tags for a resource in AWS CodeCommit. +// // Triggers, by calling the following: // // * GetRepositoryTriggers, which returns information about triggers configured @@ -130,7 +179,7 @@ // trigger by sending data to the trigger target. // // For information about how to use AWS CodeCommit, see the AWS CodeCommit User -// Guide (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html). +// Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html). // // See https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go index e97adeb7d6c..210448016d7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go @@ -130,6 +130,18 @@ const ( // A commit ID was not specified. ErrCodeCommitIdRequiredException = "CommitIdRequiredException" + // ErrCodeCommitIdsLimitExceededException for service response error code + // "CommitIdsLimitExceededException". + // + // The maximum number of allowed commit IDs in a batch request is 100. Verify + // that your batch requests contains no more than 100 commit IDs, and then try + // again. + ErrCodeCommitIdsLimitExceededException = "CommitIdsLimitExceededException" + + // ErrCodeCommitIdsListRequiredException for service response error code + // "CommitIdsListRequiredException". + ErrCodeCommitIdsListRequiredException = "CommitIdsListRequiredException" + // ErrCodeCommitMessageLengthExceededException for service response error code // "CommitMessageLengthExceededException". // @@ -142,6 +154,14 @@ const ( // A commit was not specified. ErrCodeCommitRequiredException = "CommitRequiredException" + // ErrCodeConcurrentReferenceUpdateException for service response error code + // "ConcurrentReferenceUpdateException". + // + // The merge cannot be completed because the target branch has been modified. + // Another user might have modified the target branch while the merge was in + // progress. Wait a few minutes, and then try again. + ErrCodeConcurrentReferenceUpdateException = "ConcurrentReferenceUpdateException" + // ErrCodeDefaultBranchCannotBeDeletedException for service response error code // "DefaultBranchCannotBeDeletedException". // @@ -208,8 +228,8 @@ const ( // "FileContentSizeLimitExceededException". // // The file cannot be added because it is too large. The maximum file size that - // can be added using PutFile is 6 MB, and the combined file content change - // size is 7 MB. Consider making these changes using a Git client. + // can be added is 6 MB, and the combined file content change size is 7 MB. + // Consider making these changes using a Git client. ErrCodeFileContentSizeLimitExceededException = "FileContentSizeLimitExceededException" // ErrCodeFileDoesNotExistException for service response error code @@ -255,16 +275,16 @@ const ( // // The specified file exceeds the file size limit for AWS CodeCommit. For more // information about limits in AWS CodeCommit, see AWS CodeCommit User Guide - // (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). + // (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). ErrCodeFileTooLargeException = "FileTooLargeException" // ErrCodeFolderContentSizeLimitExceededException for service response error code // "FolderContentSizeLimitExceededException". // // The commit cannot be created because at least one of the overall changes - // in the commit result in a folder contents exceeding the limit of 6 MB. Either - // reduce the number and size of your changes, or split the changes across multiple - // folders. + // in the commit results in a folder whose contents exceed the limit of 6 MB. + // Either reduce the number and size of your changes, or split the changes across + // multiple folders. ErrCodeFolderContentSizeLimitExceededException = "FolderContentSizeLimitExceededException" // ErrCodeFolderDoesNotExistException for service response error code @@ -333,6 +353,24 @@ const ( // The specified commit ID is not valid. ErrCodeInvalidCommitIdException = "InvalidCommitIdException" + // ErrCodeInvalidConflictDetailLevelException for service response error code + // "InvalidConflictDetailLevelException". + // + // The specified conflict detail level is not valid. + ErrCodeInvalidConflictDetailLevelException = "InvalidConflictDetailLevelException" + + // ErrCodeInvalidConflictResolutionException for service response error code + // "InvalidConflictResolutionException". + // + // The specified conflict resolution list is not valid. + ErrCodeInvalidConflictResolutionException = "InvalidConflictResolutionException" + + // ErrCodeInvalidConflictResolutionStrategyException for service response error code + // "InvalidConflictResolutionStrategyException". + // + // The specified conflict resolution strategy is not valid. + ErrCodeInvalidConflictResolutionStrategyException = "InvalidConflictResolutionStrategyException" + // ErrCodeInvalidContinuationTokenException for service response error code // "InvalidContinuationTokenException". // @@ -388,6 +426,18 @@ const ( // of the file you want to comment on. ErrCodeInvalidFilePositionException = "InvalidFilePositionException" + // ErrCodeInvalidMaxConflictFilesException for service response error code + // "InvalidMaxConflictFilesException". + // + // The specified value for the number of conflict files to return is not valid. + ErrCodeInvalidMaxConflictFilesException = "InvalidMaxConflictFilesException" + + // ErrCodeInvalidMaxMergeHunksException for service response error code + // "InvalidMaxMergeHunksException". + // + // The specified value for the number of merge hunks to return is not valid. + ErrCodeInvalidMaxMergeHunksException = "InvalidMaxMergeHunksException" + // ErrCodeInvalidMaxResultsException for service response error code // "InvalidMaxResultsException". // @@ -397,7 +447,8 @@ const ( // ErrCodeInvalidMergeOptionException for service response error code // "InvalidMergeOptionException". // - // The specified merge option is not valid. The only valid value is FAST_FORWARD_MERGE. + // The specified merge option is not valid for this operation. Not all merge + // strategies are supported for all operations. ErrCodeInvalidMergeOptionException = "InvalidMergeOptionException" // ErrCodeInvalidOrderException for service response error code @@ -464,6 +515,20 @@ const ( // is not valid in respect to the current file version. ErrCodeInvalidRelativeFileVersionEnumException = "InvalidRelativeFileVersionEnumException" + // ErrCodeInvalidReplacementContentException for service response error code + // "InvalidReplacementContentException". + // + // Automerge was specified for resolving the conflict, but the replacement type + // is not valid or content is missing. + ErrCodeInvalidReplacementContentException = "InvalidReplacementContentException" + + // ErrCodeInvalidReplacementTypeException for service response error code + // "InvalidReplacementTypeException". + // + // Automerge was specified for resolving the conflict, but the specified replacement + // type is not valid. + ErrCodeInvalidReplacementTypeException = "InvalidReplacementTypeException" + // ErrCodeInvalidRepositoryDescriptionException for service response error code // "InvalidRepositoryDescriptionException". // @@ -520,6 +585,14 @@ const ( // Triggers must be created in the same region as the target for the trigger. ErrCodeInvalidRepositoryTriggerRegionException = "InvalidRepositoryTriggerRegionException" + // ErrCodeInvalidResourceArnException for service response error code + // "InvalidResourceArnException". + // + // The value for the resource ARN is not valid. For more information about resources + // in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) + // in the AWS CodeCommit User Guide. + ErrCodeInvalidResourceArnException = "InvalidResourceArnException" + // ErrCodeInvalidSortByException for service response error code // "InvalidSortByException". // @@ -533,6 +606,30 @@ const ( // name, tag, or full commit ID. ErrCodeInvalidSourceCommitSpecifierException = "InvalidSourceCommitSpecifierException" + // ErrCodeInvalidSystemTagUsageException for service response error code + // "InvalidSystemTagUsageException". + // + // The specified tag is not valid. Key names cannot be prefixed with aws:. + ErrCodeInvalidSystemTagUsageException = "InvalidSystemTagUsageException" + + // ErrCodeInvalidTagKeysListException for service response error code + // "InvalidTagKeysListException". + // + // The list of tags is not valid. + ErrCodeInvalidTagKeysListException = "InvalidTagKeysListException" + + // ErrCodeInvalidTagsMapException for service response error code + // "InvalidTagsMapException". + // + // The map of tags is not valid. + ErrCodeInvalidTagsMapException = "InvalidTagsMapException" + + // ErrCodeInvalidTargetBranchException for service response error code + // "InvalidTargetBranchException". + // + // The specified target branch is not valid. + ErrCodeInvalidTargetBranchException = "InvalidTargetBranchException" + // ErrCodeInvalidTargetException for service response error code // "InvalidTargetException". // @@ -570,6 +667,18 @@ const ( // The number of branches for the trigger was exceeded. ErrCodeMaximumBranchesExceededException = "MaximumBranchesExceededException" + // ErrCodeMaximumConflictResolutionEntriesExceededException for service response error code + // "MaximumConflictResolutionEntriesExceededException". + // + // The number of allowed conflict resolution entries was exceeded. + ErrCodeMaximumConflictResolutionEntriesExceededException = "MaximumConflictResolutionEntriesExceededException" + + // ErrCodeMaximumFileContentToLoadExceededException for service response error code + // "MaximumFileContentToLoadExceededException". + // + // The number of files to load exceeds the allowed limit. + ErrCodeMaximumFileContentToLoadExceededException = "MaximumFileContentToLoadExceededException" + // ErrCodeMaximumFileEntriesExceededException for service response error code // "MaximumFileEntriesExceededException". // @@ -578,6 +687,13 @@ const ( // using a Git client for these changes. ErrCodeMaximumFileEntriesExceededException = "MaximumFileEntriesExceededException" + // ErrCodeMaximumItemsToCompareExceededException for service response error code + // "MaximumItemsToCompareExceededException". + // + // The maximum number of items to compare between the source or destination + // branches and the merge base has exceeded the maximum allowed. + ErrCodeMaximumItemsToCompareExceededException = "MaximumItemsToCompareExceededException" + // ErrCodeMaximumOpenPullRequestsExceededException for service response error code // "MaximumOpenPullRequestsExceededException". // @@ -605,6 +721,13 @@ const ( // A merge option or stategy is required, and none was provided. ErrCodeMergeOptionRequiredException = "MergeOptionRequiredException" + // ErrCodeMultipleConflictResolutionEntriesException for service response error code + // "MultipleConflictResolutionEntriesException". + // + // More than one conflict resolution entries exists for the conflict. A conflict + // can have only one conflict resolution entry. + ErrCodeMultipleConflictResolutionEntriesException = "MultipleConflictResolutionEntriesException" + // ErrCodeMultipleRepositoriesInPullRequestException for service response error code // "MultipleRepositoriesInPullRequestException". // @@ -617,8 +740,7 @@ const ( // "NameLengthExceededException". // // The user name is not valid because it has exceeded the character limit for - // file names. File names, including the path to the file, cannot exceed the - // character limit. + // author names. ErrCodeNameLengthExceededException = "NameLengthExceededException" // ErrCodeNoChangeException for service response error code @@ -713,6 +835,18 @@ const ( // The specified reference is not a supported type. ErrCodeReferenceTypeNotSupportedException = "ReferenceTypeNotSupportedException" + // ErrCodeReplacementContentRequiredException for service response error code + // "ReplacementContentRequiredException". + // + // USE_NEW_CONTENT was specified but no replacement content has been provided. + ErrCodeReplacementContentRequiredException = "ReplacementContentRequiredException" + + // ErrCodeReplacementTypeRequiredException for service response error code + // "ReplacementTypeRequiredException". + // + // A replacement type is required. + ErrCodeReplacementTypeRequiredException = "ReplacementTypeRequiredException" + // ErrCodeRepositoryDoesNotExistException for service response error code // "RepositoryDoesNotExistException". // @@ -783,6 +917,15 @@ const ( // The list of triggers for the repository is required but was not specified. ErrCodeRepositoryTriggersListRequiredException = "RepositoryTriggersListRequiredException" + // ErrCodeResourceArnRequiredException for service response error code + // "ResourceArnRequiredException". + // + // A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. + // For a list of valid resources in AWS CodeCommit, see CodeCommit Resources + // and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) + // in the AWS CodeCommit User Guide. + ErrCodeResourceArnRequiredException = "ResourceArnRequiredException" + // ErrCodeRestrictedSourceFileException for service response error code // "RestrictedSourceFileException". // @@ -821,6 +964,24 @@ const ( // been specified for the commit. ErrCodeSourceFileOrContentRequiredException = "SourceFileOrContentRequiredException" + // ErrCodeTagKeysListRequiredException for service response error code + // "TagKeysListRequiredException". + // + // A list of tag keys is required. The list cannot be empty or null. + ErrCodeTagKeysListRequiredException = "TagKeysListRequiredException" + + // ErrCodeTagPolicyException for service response error code + // "TagPolicyException". + // + // The tag policy is not valid. + ErrCodeTagPolicyException = "TagPolicyException" + + // ErrCodeTagsMapRequiredException for service response error code + // "TagsMapRequiredException". + // + // A map of tags is required. + ErrCodeTagsMapRequiredException = "TagsMapRequiredException" + // ErrCodeTargetRequiredException for service response error code // "TargetRequiredException". // @@ -856,4 +1017,10 @@ const ( // // A pull request title is required. It cannot be empty or null. ErrCodeTitleRequiredException = "TitleRequiredException" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // The maximum number of tags for an AWS CodeCommit resource has been exceeded. + ErrCodeTooManyTagsException = "TooManyTagsException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go index c8cad394ab0..63bc31e314c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go @@ -46,11 +46,11 @@ const ( // svc := codecommit.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeCommit { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodeCommit { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodeCommit { svc := &CodeCommit{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-04-13", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go index 600ee9d78fb..daa280bc127 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go @@ -155,7 +155,8 @@ func (c *CodeDeploy) BatchGetApplicationRevisionsRequest(input *BatchGetApplicat // BatchGetApplicationRevisions API operation for AWS CodeDeploy. // -// Gets information about one or more application revisions. +// Gets information about one or more application revisions. The maximum number +// of application revisions that can be returned is 25. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -249,7 +250,8 @@ func (c *CodeDeploy) BatchGetApplicationsRequest(input *BatchGetApplicationsInpu // BatchGetApplications API operation for AWS CodeDeploy. // -// Gets information about one or more applications. +// Gets information about one or more applications. The maximum number of applications +// that can be returned is 25. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -439,11 +441,13 @@ func (c *CodeDeploy) BatchGetDeploymentInstancesRequest(input *BatchGetDeploymen // BatchGetDeploymentInstances API operation for AWS CodeDeploy. // +// // This method works, but is deprecated. Use BatchGetDeploymentTargets instead. // -// Returns an array of instances associated with a deployment. This method works -// with EC2/On-premises and AWS Lambda compute platforms. The newer BatchGetDeploymentTargets -// works with all compute platforms. +// Returns an array of one or more instances associated with a deployment. This +// method works with EC2/On-premises and AWS Lambda compute platforms. The newer +// BatchGetDeploymentTargets works with all compute platforms. The maximum number +// of instances that can be returned is 25. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -544,8 +548,10 @@ func (c *CodeDeploy) BatchGetDeploymentTargetsRequest(input *BatchGetDeploymentT // BatchGetDeploymentTargets API operation for AWS CodeDeploy. // -// Returns an array of targets associated with a deployment. This method works -// with all compute types and should be used instead of the deprecated BatchGetDeploymentInstances. +// Returns an array of one or more targets associated with a deployment. This +// method works with all compute types and should be used instead of the deprecated +// BatchGetDeploymentInstances. The maximum number of targets that can be returned +// is 25. // // The type of targets returned depends on the deployment's compute platform: // @@ -572,6 +578,9 @@ func (c *CodeDeploy) BatchGetDeploymentTargetsRequest(input *BatchGetDeploymentT // * ErrCodeDeploymentDoesNotExistException "DeploymentDoesNotExistException" // The deployment with the IAM user or AWS account does not exist. // +// * ErrCodeDeploymentNotStartedException "DeploymentNotStartedException" +// The specified deployment has not started. +// // * ErrCodeDeploymentTargetIdRequiredException "DeploymentTargetIdRequiredException" // A deployment target ID was not provided. // @@ -653,7 +662,8 @@ func (c *CodeDeploy) BatchGetDeploymentsRequest(input *BatchGetDeploymentsInput) // BatchGetDeployments API operation for AWS CodeDeploy. // -// Gets information about one or more deployments. +// Gets information about one or more deployments. The maximum number of deployments +// that can be returned is 25. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -738,7 +748,8 @@ func (c *CodeDeploy) BatchGetOnPremisesInstancesRequest(input *BatchGetOnPremise // BatchGetOnPremisesInstances API operation for AWS CodeDeploy. // -// Gets information about one or more on-premises instances. +// Gets information about one or more on-premises instances. The maximum number +// of on-premises instances that can be returned is 25. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -955,6 +966,9 @@ func (c *CodeDeploy) CreateApplicationRequest(input *CreateApplicationInput) (re // * ErrCodeInvalidComputePlatformException "InvalidComputePlatformException" // The computePlatform is invalid. The computePlatform should be Lambda or Server. // +// * ErrCodeInvalidTagsToAddException "InvalidTagsToAddException" +// The specified tags are not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateApplication func (c *CodeDeploy) CreateApplication(input *CreateApplicationInput) (*CreateApplicationOutput, error) { req, out := c.CreateApplicationRequest(input) @@ -1409,6 +1423,9 @@ func (c *CodeDeploy) CreateDeploymentGroupRequest(input *CreateDeploymentGroupIn // The Amazon ECS service is associated with more than one deployment groups. // An Amazon ECS service can be associated with only one deployment group. // +// * ErrCodeInvalidTagsToAddException "InvalidTagsToAddException" +// The specified tags are not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateDeploymentGroup func (c *CodeDeploy) CreateDeploymentGroup(input *CreateDeploymentGroupInput) (*CreateDeploymentGroupOutput, error) { req, out := c.CreateDeploymentGroupRequest(input) @@ -1492,6 +1509,11 @@ func (c *CodeDeploy) DeleteApplicationRequest(input *DeleteApplicationInput) (re // * ErrCodeInvalidApplicationNameException "InvalidApplicationNameException" // The application name was specified in an invalid format. // +// * ErrCodeInvalidRoleException "InvalidRoleException" +// The service role ARN was specified in an invalid format. Or, if an Auto Scaling +// group was specified, the specified service role does not grant the appropriate +// permissions to Amazon EC2 Auto Scaling. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteApplication func (c *CodeDeploy) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { req, out := c.DeleteApplicationRequest(input) @@ -2496,6 +2518,9 @@ func (c *CodeDeploy) GetDeploymentTargetRequest(input *GetDeploymentTargetInput) // * ErrCodeDeploymentDoesNotExistException "DeploymentDoesNotExistException" // The deployment with the IAM user or AWS account does not exist. // +// * ErrCodeDeploymentNotStartedException "DeploymentNotStartedException" +// The specified deployment has not started. +// // * ErrCodeDeploymentTargetIdRequiredException "DeploymentTargetIdRequiredException" // A deployment target ID was not provided. // @@ -2739,7 +2764,7 @@ func (c *CodeDeploy) ListApplicationRevisionsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a ListApplicationRevisions operation. // pageNum := 0 // err := client.ListApplicationRevisionsPages(params, -// func(page *ListApplicationRevisionsOutput, lastPage bool) bool { +// func(page *codedeploy.ListApplicationRevisionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2771,10 +2796,12 @@ func (c *CodeDeploy) ListApplicationRevisionsPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListApplicationRevisionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListApplicationRevisionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2874,7 +2901,7 @@ func (c *CodeDeploy) ListApplicationsWithContext(ctx aws.Context, input *ListApp // // Example iterating over at most 3 pages of a ListApplications operation. // pageNum := 0 // err := client.ListApplicationsPages(params, -// func(page *ListApplicationsOutput, lastPage bool) bool { +// func(page *codedeploy.ListApplicationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2906,10 +2933,12 @@ func (c *CodeDeploy) ListApplicationsPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListApplicationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListApplicationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3009,7 +3038,7 @@ func (c *CodeDeploy) ListDeploymentConfigsWithContext(ctx aws.Context, input *Li // // Example iterating over at most 3 pages of a ListDeploymentConfigs operation. // pageNum := 0 // err := client.ListDeploymentConfigsPages(params, -// func(page *ListDeploymentConfigsOutput, lastPage bool) bool { +// func(page *codedeploy.ListDeploymentConfigsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3041,10 +3070,12 @@ func (c *CodeDeploy) ListDeploymentConfigsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDeploymentConfigsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDeploymentConfigsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3154,7 +3185,7 @@ func (c *CodeDeploy) ListDeploymentGroupsWithContext(ctx aws.Context, input *Lis // // Example iterating over at most 3 pages of a ListDeploymentGroups operation. // pageNum := 0 // err := client.ListDeploymentGroupsPages(params, -// func(page *ListDeploymentGroupsOutput, lastPage bool) bool { +// func(page *codedeploy.ListDeploymentGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3186,10 +3217,12 @@ func (c *CodeDeploy) ListDeploymentGroupsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDeploymentGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDeploymentGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3248,6 +3281,7 @@ func (c *CodeDeploy) ListDeploymentInstancesRequest(input *ListDeploymentInstanc // ListDeploymentInstances API operation for AWS CodeDeploy. // +// // The newer BatchGetDeploymentTargets should be used instead because it works // with all compute types. ListDeploymentInstances throws an exception if it // is used with a compute platform other than EC2/On-premises or AWS Lambda. @@ -3332,7 +3366,7 @@ func (c *CodeDeploy) ListDeploymentInstancesWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a ListDeploymentInstances operation. // pageNum := 0 // err := client.ListDeploymentInstancesPages(params, -// func(page *ListDeploymentInstancesOutput, lastPage bool) bool { +// func(page *codedeploy.ListDeploymentInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3368,10 +3402,12 @@ func (c *CodeDeploy) ListDeploymentInstancesPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDeploymentInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDeploymentInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3599,7 +3635,7 @@ func (c *CodeDeploy) ListDeploymentsWithContext(ctx aws.Context, input *ListDepl // // Example iterating over at most 3 pages of a ListDeployments operation. // pageNum := 0 // err := client.ListDeploymentsPages(params, -// func(page *ListDeploymentsOutput, lastPage bool) bool { +// func(page *codedeploy.ListDeploymentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3631,10 +3667,12 @@ func (c *CodeDeploy) ListDeploymentsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDeploymentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDeploymentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3812,6 +3850,93 @@ func (c *CodeDeploy) ListOnPremisesInstancesWithContext(ctx aws.Context, input * return out, req.Send() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/ListTagsForResource +func (c *CodeDeploy) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS CodeDeploy. +// +// Returns a list of tags for the resource identified by a specified ARN. Tags +// are used to organize and categorize your CodeDeploy resources. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeDeploy's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeArnNotSupportedException "ArnNotSupportedException" +// The specified ARN is not supported. For example, it might be an ARN for a +// resource that is not expected. +// +// * ErrCodeInvalidArnException "InvalidArnException" +// The specified ARN is not in a valid format. +// +// * ErrCodeResourceArnRequiredException "ResourceArnRequiredException" +// The ARN of a resource is required, but was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/ListTagsForResource +func (c *CodeDeploy) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeDeploy) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutLifecycleEventHookExecutionStatus = "PutLifecycleEventHookExecutionStatus" // PutLifecycleEventHookExecutionStatusRequest generates a "aws/request.Request" representing the @@ -4416,6 +4541,213 @@ func (c *CodeDeploy) StopDeploymentWithContext(ctx aws.Context, input *StopDeplo return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TagResource +func (c *CodeDeploy) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS CodeDeploy. +// +// Associates the list of tags in the input Tags parameter with the resource +// identified by the ResourceArn input parameter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeDeploy's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceArnRequiredException "ResourceArnRequiredException" +// The ARN of a resource is required, but was not found. +// +// * ErrCodeApplicationDoesNotExistException "ApplicationDoesNotExistException" +// The application does not exist with the IAM user or AWS account. +// +// * ErrCodeDeploymentGroupDoesNotExistException "DeploymentGroupDoesNotExistException" +// The named deployment group with the IAM user or AWS account does not exist. +// +// * ErrCodeDeploymentConfigDoesNotExistException "DeploymentConfigDoesNotExistException" +// The deployment configuration does not exist with the IAM user or AWS account. +// +// * ErrCodeTagRequiredException "TagRequiredException" +// A tag was not specified. +// +// * ErrCodeInvalidTagsToAddException "InvalidTagsToAddException" +// The specified tags are not valid. +// +// * ErrCodeArnNotSupportedException "ArnNotSupportedException" +// The specified ARN is not supported. For example, it might be an ARN for a +// resource that is not expected. +// +// * ErrCodeInvalidArnException "InvalidArnException" +// The specified ARN is not in a valid format. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TagResource +func (c *CodeDeploy) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeDeploy) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/UntagResource +func (c *CodeDeploy) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS CodeDeploy. +// +// Disassociates a resource from a list of tags. The resource is identified +// by the ResourceArn input parameter. The tags are identfied by the list of +// keys in the TagKeys input parameter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeDeploy's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceArnRequiredException "ResourceArnRequiredException" +// The ARN of a resource is required, but was not found. +// +// * ErrCodeApplicationDoesNotExistException "ApplicationDoesNotExistException" +// The application does not exist with the IAM user or AWS account. +// +// * ErrCodeDeploymentGroupDoesNotExistException "DeploymentGroupDoesNotExistException" +// The named deployment group with the IAM user or AWS account does not exist. +// +// * ErrCodeDeploymentConfigDoesNotExistException "DeploymentConfigDoesNotExistException" +// The deployment configuration does not exist with the IAM user or AWS account. +// +// * ErrCodeTagRequiredException "TagRequiredException" +// A tag was not specified. +// +// * ErrCodeInvalidTagsToAddException "InvalidTagsToAddException" +// The specified tags are not valid. +// +// * ErrCodeArnNotSupportedException "ArnNotSupportedException" +// The specified ARN is not supported. For example, it might be an ARN for a +// resource that is not expected. +// +// * ErrCodeInvalidArnException "InvalidArnException" +// The specified ARN is not in a valid format. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/UntagResource +func (c *CodeDeploy) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeDeploy) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateApplication = "UpdateApplication" // UpdateApplicationRequest generates a "aws/request.Request" representing the @@ -5036,7 +5368,9 @@ type BatchGetApplicationRevisionsInput struct { // ApplicationName is a required field ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` - // Information to get about the application revisions, including type and location. + // An array of RevisionLocation objects that specify information to get about + // the application revisions, including type and location. The maximum number + // of RevisionLocation objects you can specify is 25. // // Revisions is a required field Revisions []*RevisionLocation `locationName:"revisions" type:"list" required:"true"` @@ -5129,7 +5463,8 @@ func (s *BatchGetApplicationRevisionsOutput) SetRevisions(v []*RevisionInfo) *Ba type BatchGetApplicationsInput struct { _ struct{} `type:"structure"` - // A list of application names separated by spaces. + // A list of application names separated by spaces. The maximum number of application + // names you can specify is 25. // // ApplicationNames is a required field ApplicationNames []*string `locationName:"applicationNames" type:"list" required:"true"` @@ -5287,7 +5622,8 @@ type BatchGetDeploymentInstancesInput struct { // DeploymentId is a required field DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` - // The unique IDs of instances used in the deployment. + // The unique IDs of instances used in the deployment. The maximum number of + // instance IDs you can specify is 25. // // InstanceIds is a required field InstanceIds []*string `locationName:"instanceIds" type:"list" required:"true"` @@ -5371,17 +5707,16 @@ type BatchGetDeploymentTargetsInput struct { DeploymentId *string `locationName:"deploymentId" type:"string"` // The unique IDs of the deployment targets. The compute platform of the deployment - // determines the type of the targets and their formats. + // determines the type of the targets and their formats. The maximum number + // of deployment target IDs you can specify is 25. // - // * For deployments that use the EC2/On-premises compute platform, the - // target IDs are EC2 or on-premises instances IDs, and their target type - // is instanceTarget. + // * For deployments that use the EC2/On-premises compute platform, the target + // IDs are EC2 or on-premises instances IDs, and their target type is instanceTarget. // - // * For deployments that use the AWS Lambda compute platform, the target + // * For deployments that use the AWS Lambda compute platform, the target // IDs are the names of Lambda functions, and their target type is instanceTarget. // - // - // * For deployments that use the Amazon ECS compute platform, the target + // * For deployments that use the Amazon ECS compute platform, the target // IDs are pairs of Amazon ECS clusters and services specified using the // format :. Their target type is ecsTarget. TargetIds []*string `locationName:"targetIds" type:"list"` @@ -5418,7 +5753,6 @@ type BatchGetDeploymentTargetsOutput struct { // // * EC2/On-premises: Each target object is an EC2 or on-premises instance. // - // // * AWS Lambda: The target object is a specific version of an AWS Lambda // function. // @@ -5446,7 +5780,8 @@ func (s *BatchGetDeploymentTargetsOutput) SetDeploymentTargets(v []*DeploymentTa type BatchGetDeploymentsInput struct { _ struct{} `type:"structure"` - // A list of deployment IDs, separated by spaces. + // A list of deployment IDs, separated by spaces. The maximum number of deployment + // IDs you can specify is 25. // // DeploymentIds is a required field DeploymentIds []*string `locationName:"deploymentIds" type:"list" required:"true"` @@ -5509,7 +5844,8 @@ func (s *BatchGetDeploymentsOutput) SetDeploymentsInfo(v []*DeploymentInfo) *Bat type BatchGetOnPremisesInstancesInput struct { _ struct{} `type:"structure"` - // The names of the on-premises instances about which to get information. + // The names of the on-premises instances about which to get information. The + // maximum number of instance names you can specify is 25. // // InstanceNames is a required field InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` @@ -5614,7 +5950,8 @@ func (s *BlueGreenDeploymentConfiguration) SetTerminateBlueInstancesOnDeployment } // Information about whether instances in the original environment are terminated -// when a blue/green deployment is successful. +// when a blue/green deployment is successful. BlueInstanceTerminationOption +// does not apply to Lambda deployments. type BlueInstanceTerminationOption struct { _ struct{} `type:"structure"` @@ -5627,9 +5964,14 @@ type BlueInstanceTerminationOption struct { // the load balancer and removed from the deployment group. Action *string `locationName:"action" type:"string" enum:"InstanceAction"` - // The number of minutes to wait after a successful blue/green deployment before - // terminating instances from the original environment. The maximum setting - // is 2880 minutes (2 days). + // For an Amazon EC2 deployment, the number of minutes to wait after a successful + // blue/green deployment before terminating instances from the original environment. + // + // For an Amazon ECS deployment, the number of minutes before deleting the original + // (blue) task set. During an Amazon ECS deployment, CodeDeploy shifts traffic + // from the original (blue) task set to a replacement (green) task set. + // + // The maximum setting is 2880 minutes (2 days). TerminationWaitTimeInMinutes *int64 `locationName:"terminationWaitTimeInMinutes" type:"integer"` } @@ -5714,8 +6056,13 @@ type CreateApplicationInput struct { // ApplicationName is a required field ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` - // The destination platform type for the deployment (Lambda or Server). + // The destination platform type for the deployment (Lambda, Server, or ECS). ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` + + // The metadata that you apply to CodeDeploy applications to help you organize + // and categorize them. Each tag consists of a key and an optional value, both + // of which you define. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation @@ -5756,6 +6103,12 @@ func (s *CreateApplicationInput) SetComputePlatform(v string) *CreateApplication return s } +// SetTags sets the Tags field's value. +func (s *CreateApplicationInput) SetTags(v []*Tag) *CreateApplicationInput { + s.Tags = v + return s +} + // Represents the output of a CreateApplication operation. type CreateApplicationOutput struct { _ struct{} `type:"structure"` @@ -5784,7 +6137,7 @@ func (s *CreateApplicationOutput) SetApplicationId(v string) *CreateApplicationO type CreateDeploymentConfigInput struct { _ struct{} `type:"structure"` - // The destination platform type for the deployment (Lambda or Server>). + // The destination platform type for the deployment (Lambda, Server, or ECS). ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` // The name of the deployment configuration to create. @@ -5973,6 +6326,11 @@ type CreateDeploymentGroupInput struct { // ServiceRoleArn is a required field ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string" required:"true"` + // The metadata that you apply to CodeDeploy deployment groups to help you organize + // and categorize them. Each tag consists of a key and an optional value, both + // of which you define. + Tags []*Tag `locationName:"tags" type:"list"` + // Information about triggers to create when the deployment group is created. // For examples, see Create a Trigger for an AWS CodeDeploy Event (https://docs.aws.amazon.com/codedeploy/latest/userguide/how-to-notify-sns.html) // in the AWS CodeDeploy User Guide. @@ -6107,6 +6465,12 @@ func (s *CreateDeploymentGroupInput) SetServiceRoleArn(v string) *CreateDeployme return s } +// SetTags sets the Tags field's value. +func (s *CreateDeploymentGroupInput) SetTags(v []*Tag) *CreateDeploymentGroupInput { + s.Tags = v + return s +} + // SetTriggerConfigurations sets the TriggerConfigurations field's value. func (s *CreateDeploymentGroupInput) SetTriggerConfigurations(v []*TriggerConfig) *CreateDeploymentGroupInput { s.TriggerConfigurations = v @@ -6589,7 +6953,7 @@ func (s *DeleteGitHubAccountTokenOutput) SetTokenName(v string) *DeleteGitHubAcc type DeploymentConfigInfo struct { _ struct{} `type:"structure"` - // The destination platform type for the deployment (Lambda or Server). + // The destination platform type for the deployment (Lambda, Server, or ECS). ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` // The time at which the deployment configuration was created. @@ -6675,7 +7039,7 @@ type DeploymentGroupInfo struct { // Information about blue/green deployment options for a deployment group. BlueGreenDeploymentConfiguration *BlueGreenDeploymentConfiguration `locationName:"blueGreenDeploymentConfiguration" type:"structure"` - // The destination platform type for the deployment group (Lambda or Server). + // The destination platform type for the deployment (Lambda, Server, or ECS). ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` // The deployment configuration name. @@ -6726,7 +7090,10 @@ type DeploymentGroupInfo struct { // tag groups. Cannot be used in the same call as onPremisesInstanceTagFilters. OnPremisesTagSet *OnPremisesTagSet `locationName:"onPremisesTagSet" type:"structure"` - // A service role ARN. + // A service role Amazon Resource Name (ARN) that grants CodeDeploy permission + // to make calls to AWS services on your behalf. For more information, see Create + // a Service Role for AWS CodeDeploy (https://docs.aws.amazon.com/codedeploy/latest/userguide/getting-started-create-service-role.html) + // in the AWS CodeDeploy User Guide. ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` // Information about the deployment group's target revision, including type @@ -6894,7 +7261,7 @@ type DeploymentInfo struct { // A timestamp that indicates when the deployment was complete. CompleteTime *time.Time `locationName:"completeTime" type:"timestamp"` - // The destination platform type for the deployment (Lambda or Server). + // The destination platform type for the deployment (Lambda, Server, or ECS). ComputePlatform *string `locationName:"computePlatform" type:"string" enum:"ComputePlatform"` // A timestamp that indicates when the deployment was created. @@ -8860,26 +9227,92 @@ func (s *InstanceTarget) SetTargetId(v string) *InstanceTarget { return s } -// Information about the target AWS Lambda function during an AWS Lambda deployment. -type LambdaTarget struct { +// Information about a Lambda function specified in a deployment. +type LambdaFunctionInfo struct { _ struct{} `type:"structure"` - // The unique ID of a deployment. - DeploymentId *string `locationName:"deploymentId" type:"string"` + // The version of a Lambda function that production traffic points to. + CurrentVersion *string `locationName:"currentVersion" type:"string"` - // The date and time when the target Lambda function was updated by a deployment. - LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp"` + // The alias of a Lambda function. For more information, see Introduction to + // AWS Lambda Aliases (https://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). + FunctionAlias *string `locationName:"functionAlias" type:"string"` - // The lifecycle events of the deployment to this target Lambda function. - LifecycleEvents []*LifecycleEvent `locationName:"lifecycleEvents" type:"list"` + // The name of a Lambda function. + FunctionName *string `locationName:"functionName" type:"string"` - // The status an AWS Lambda deployment's target Lambda function. - Status *string `locationName:"status" type:"string" enum:"TargetStatus"` + // The version of a Lambda function that production traffic points to after + // the Lambda function is deployed. + TargetVersion *string `locationName:"targetVersion" type:"string"` - // The ARN of the target. - TargetArn *string `locationName:"targetArn" type:"string"` + // The percentage of production traffic that the target version of a Lambda + // function receives. + TargetVersionWeight *float64 `locationName:"targetVersionWeight" type:"double"` +} - // The unique ID of a deployment target that has a type of lambdaTarget. +// String returns the string representation +func (s LambdaFunctionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionInfo) GoString() string { + return s.String() +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *LambdaFunctionInfo) SetCurrentVersion(v string) *LambdaFunctionInfo { + s.CurrentVersion = &v + return s +} + +// SetFunctionAlias sets the FunctionAlias field's value. +func (s *LambdaFunctionInfo) SetFunctionAlias(v string) *LambdaFunctionInfo { + s.FunctionAlias = &v + return s +} + +// SetFunctionName sets the FunctionName field's value. +func (s *LambdaFunctionInfo) SetFunctionName(v string) *LambdaFunctionInfo { + s.FunctionName = &v + return s +} + +// SetTargetVersion sets the TargetVersion field's value. +func (s *LambdaFunctionInfo) SetTargetVersion(v string) *LambdaFunctionInfo { + s.TargetVersion = &v + return s +} + +// SetTargetVersionWeight sets the TargetVersionWeight field's value. +func (s *LambdaFunctionInfo) SetTargetVersionWeight(v float64) *LambdaFunctionInfo { + s.TargetVersionWeight = &v + return s +} + +// Information about the target AWS Lambda function during an AWS Lambda deployment. +type LambdaTarget struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // A LambdaFunctionInfo object that describes a target Lambda function. + LambdaFunctionInfo *LambdaFunctionInfo `locationName:"lambdaFunctionInfo" type:"structure"` + + // The date and time when the target Lambda function was updated by a deployment. + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp"` + + // The lifecycle events of the deployment to this target Lambda function. + LifecycleEvents []*LifecycleEvent `locationName:"lifecycleEvents" type:"list"` + + // The status an AWS Lambda deployment's target Lambda function. + Status *string `locationName:"status" type:"string" enum:"TargetStatus"` + + // The ARN of the target. + TargetArn *string `locationName:"targetArn" type:"string"` + + // The unique ID of a deployment target that has a type of lambdaTarget. TargetId *string `locationName:"targetId" type:"string"` } @@ -8899,6 +9332,12 @@ func (s *LambdaTarget) SetDeploymentId(v string) *LambdaTarget { return s } +// SetLambdaFunctionInfo sets the LambdaFunctionInfo field's value. +func (s *LambdaTarget) SetLambdaFunctionInfo(v *LambdaFunctionInfo) *LambdaTarget { + s.LambdaFunctionInfo = v + return s +} + // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *LambdaTarget) SetLastUpdatedAt(v time.Time) *LambdaTarget { s.LastUpdatedAt = &v @@ -9561,7 +10000,13 @@ type ListDeploymentTargetsInput struct { // It can be used to return the next set of deployment targets in the list. NextToken *string `locationName:"nextToken" type:"string"` - // A key used to filter the returned targets. + // A key used to filter the returned targets. The two valid values are: + // + // * TargetStatus - A TargetStatus filter string can be Failed, InProgress, + // Pending, Ready, Skipped, Succeeded, or Unknown. + // + // * ServerInstanceLabel - A ServerInstanceLabel filter string can be Blue + // or Green. TargetFilters map[string][]*string `locationName:"targetFilters" type:"map"` } @@ -9633,12 +10078,18 @@ type ListDeploymentsInput struct { // The name of an AWS CodeDeploy application associated with the IAM user or // AWS account. + // + // If applicationName is specified, then deploymentGroupName must be specified. + // If it is not specified, then deploymentGroupName must not be specified. ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` // A time range (start and end) for returning a subset of the list of deployments. CreateTimeRange *TimeRange `locationName:"createTimeRange" type:"structure"` // The name of a deployment group for the specified application. + // + // If deploymentGroupName is specified, then applicationName must be specified. + // If it is not specified, then applicationName must not be specified. DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` // A subset of deployments to list by status: @@ -9897,6 +10348,93 @@ func (s *ListOnPremisesInstancesOutput) SetNextToken(v string) *ListOnPremisesIn return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // An identifier returned from the previous ListTagsForResource call. It can + // be used to return the next set of applications in the list. + NextToken *string `type:"string"` + + // The ARN of a CodeDeploy resource. ListTagsForResource returns all the tags + // associated with the resource that is identified by the ResourceArn. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // If a large amount of information is returned, an identifier is also returned. + // It can be used in a subsequent list application revisions call to return + // the next set of application revisions in the list. + NextToken *string `type:"string"` + + // A list of tags returned by ListTagsForResource. The tags are associated with + // the resource identified by the input ResourceArn parameter. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // Information about the Elastic Load Balancing load balancer or target group // used in a deployment. type LoadBalancerInfo struct { @@ -9967,15 +10505,15 @@ type MinimumHealthyHosts struct { // time. The deployment is successful if four or more instance are deployed // to successfully. Otherwise, the deployment fails. // - // In a call to the get deployment configuration operation, CodeDeployDefault.OneAtATime - // returns a minimum healthy instance type of MOST_CONCURRENCY and a value of - // 1. This means a deployment to only one instance at a time. (You cannot set - // the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In addition, - // with CodeDeployDefault.OneAtATime, AWS CodeDeploy attempts to ensure that - // all instances but one are kept in a healthy state during the deployment. - // Although this allows one instance at a time to be taken offline for a new - // deployment, it also means that if the deployment to the last instance fails, - // the overall deployment is still successful. + // In a call to the GetDeploymentConfig, CodeDeployDefault.OneAtATime returns + // a minimum healthy instance type of MOST_CONCURRENCY and a value of 1. This + // means a deployment to only one instance at a time. (You cannot set the type + // to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In addition, with + // CodeDeployDefault.OneAtATime, AWS CodeDeploy attempts to ensure that all + // instances but one are kept in a healthy state during the deployment. Although + // this allows one instance at a time to be taken offline for a new deployment, + // it also means that if the deployment to the last instance fails, the overall + // deployment is still successful. // // For more information, see AWS CodeDeploy Instance Health (https://docs.aws.amazon.com/codedeploy/latest/userguide/instances-health.html) // in the AWS CodeDeploy User Guide. @@ -10789,6 +11327,76 @@ func (s *TagFilter) SetValue(v string) *TagFilter { return s } +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of a resource, such as a CodeDeploy application or deployment group. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // A list of tags that TagResource associates with a resource. The resource + // is identified by the ResourceArn input parameter. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // Information about a target group in Elastic Load Balancing to use in a deployment. // Instances are registered as targets in a target group, and traffic is routed // to the target group. @@ -11147,6 +11755,77 @@ func (s *TriggerConfig) SetTriggerTargetArn(v string) *TriggerConfig { return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies from which resource to disassociate the tags with + // the keys in the TagKeys input paramter. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // A list of keys of Tag objects. The Tag objects identified by the keys are + // disassociated from the resource specified by the ResourceArn input parameter. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // Represents the input of an UpdateApplication operation. type UpdateApplicationInput struct { _ struct{} `type:"structure"` @@ -11595,56 +12274,71 @@ const ( ) const ( - // ErrorCodeDeploymentGroupMissing is a ErrorCode enum value - ErrorCodeDeploymentGroupMissing = "DEPLOYMENT_GROUP_MISSING" + // ErrorCodeAgentIssue is a ErrorCode enum value + ErrorCodeAgentIssue = "AGENT_ISSUE" + + // ErrorCodeAlarmActive is a ErrorCode enum value + ErrorCodeAlarmActive = "ALARM_ACTIVE" // ErrorCodeApplicationMissing is a ErrorCode enum value ErrorCodeApplicationMissing = "APPLICATION_MISSING" - // ErrorCodeRevisionMissing is a ErrorCode enum value - ErrorCodeRevisionMissing = "REVISION_MISSING" + // ErrorCodeAutoscalingValidationError is a ErrorCode enum value + ErrorCodeAutoscalingValidationError = "AUTOSCALING_VALIDATION_ERROR" - // ErrorCodeIamRoleMissing is a ErrorCode enum value - ErrorCodeIamRoleMissing = "IAM_ROLE_MISSING" + // ErrorCodeAutoScalingConfiguration is a ErrorCode enum value + ErrorCodeAutoScalingConfiguration = "AUTO_SCALING_CONFIGURATION" - // ErrorCodeIamRolePermissions is a ErrorCode enum value - ErrorCodeIamRolePermissions = "IAM_ROLE_PERMISSIONS" + // ErrorCodeAutoScalingIamRolePermissions is a ErrorCode enum value + ErrorCodeAutoScalingIamRolePermissions = "AUTO_SCALING_IAM_ROLE_PERMISSIONS" - // ErrorCodeNoEc2Subscription is a ErrorCode enum value - ErrorCodeNoEc2Subscription = "NO_EC2_SUBSCRIPTION" + // ErrorCodeCodedeployResourceCannotBeFound is a ErrorCode enum value + ErrorCodeCodedeployResourceCannotBeFound = "CODEDEPLOY_RESOURCE_CANNOT_BE_FOUND" - // ErrorCodeOverMaxInstances is a ErrorCode enum value - ErrorCodeOverMaxInstances = "OVER_MAX_INSTANCES" + // ErrorCodeCustomerApplicationUnhealthy is a ErrorCode enum value + ErrorCodeCustomerApplicationUnhealthy = "CUSTOMER_APPLICATION_UNHEALTHY" - // ErrorCodeNoInstances is a ErrorCode enum value - ErrorCodeNoInstances = "NO_INSTANCES" + // ErrorCodeDeploymentGroupMissing is a ErrorCode enum value + ErrorCodeDeploymentGroupMissing = "DEPLOYMENT_GROUP_MISSING" - // ErrorCodeTimeout is a ErrorCode enum value - ErrorCodeTimeout = "TIMEOUT" + // ErrorCodeEcsUpdateError is a ErrorCode enum value + ErrorCodeEcsUpdateError = "ECS_UPDATE_ERROR" - // ErrorCodeHealthConstraintsInvalid is a ErrorCode enum value - ErrorCodeHealthConstraintsInvalid = "HEALTH_CONSTRAINTS_INVALID" + // ErrorCodeElasticLoadBalancingInvalid is a ErrorCode enum value + ErrorCodeElasticLoadBalancingInvalid = "ELASTIC_LOAD_BALANCING_INVALID" + + // ErrorCodeElbInvalidInstance is a ErrorCode enum value + ErrorCodeElbInvalidInstance = "ELB_INVALID_INSTANCE" // ErrorCodeHealthConstraints is a ErrorCode enum value ErrorCodeHealthConstraints = "HEALTH_CONSTRAINTS" + // ErrorCodeHealthConstraintsInvalid is a ErrorCode enum value + ErrorCodeHealthConstraintsInvalid = "HEALTH_CONSTRAINTS_INVALID" + + // ErrorCodeHookExecutionFailure is a ErrorCode enum value + ErrorCodeHookExecutionFailure = "HOOK_EXECUTION_FAILURE" + + // ErrorCodeIamRoleMissing is a ErrorCode enum value + ErrorCodeIamRoleMissing = "IAM_ROLE_MISSING" + + // ErrorCodeIamRolePermissions is a ErrorCode enum value + ErrorCodeIamRolePermissions = "IAM_ROLE_PERMISSIONS" + // ErrorCodeInternalError is a ErrorCode enum value ErrorCodeInternalError = "INTERNAL_ERROR" - // ErrorCodeThrottled is a ErrorCode enum value - ErrorCodeThrottled = "THROTTLED" - - // ErrorCodeAlarmActive is a ErrorCode enum value - ErrorCodeAlarmActive = "ALARM_ACTIVE" + // ErrorCodeInvalidEcsService is a ErrorCode enum value + ErrorCodeInvalidEcsService = "INVALID_ECS_SERVICE" - // ErrorCodeAgentIssue is a ErrorCode enum value - ErrorCodeAgentIssue = "AGENT_ISSUE" + // ErrorCodeInvalidLambdaConfiguration is a ErrorCode enum value + ErrorCodeInvalidLambdaConfiguration = "INVALID_LAMBDA_CONFIGURATION" - // ErrorCodeAutoScalingIamRolePermissions is a ErrorCode enum value - ErrorCodeAutoScalingIamRolePermissions = "AUTO_SCALING_IAM_ROLE_PERMISSIONS" + // ErrorCodeInvalidLambdaFunction is a ErrorCode enum value + ErrorCodeInvalidLambdaFunction = "INVALID_LAMBDA_FUNCTION" - // ErrorCodeAutoScalingConfiguration is a ErrorCode enum value - ErrorCodeAutoScalingConfiguration = "AUTO_SCALING_CONFIGURATION" + // ErrorCodeInvalidRevision is a ErrorCode enum value + ErrorCodeInvalidRevision = "INVALID_REVISION" // ErrorCodeManualStop is a ErrorCode enum value ErrorCodeManualStop = "MANUAL_STOP" @@ -11658,32 +12352,26 @@ const ( // ErrorCodeMissingGithubToken is a ErrorCode enum value ErrorCodeMissingGithubToken = "MISSING_GITHUB_TOKEN" - // ErrorCodeElasticLoadBalancingInvalid is a ErrorCode enum value - ErrorCodeElasticLoadBalancingInvalid = "ELASTIC_LOAD_BALANCING_INVALID" - - // ErrorCodeElbInvalidInstance is a ErrorCode enum value - ErrorCodeElbInvalidInstance = "ELB_INVALID_INSTANCE" - - // ErrorCodeInvalidLambdaConfiguration is a ErrorCode enum value - ErrorCodeInvalidLambdaConfiguration = "INVALID_LAMBDA_CONFIGURATION" + // ErrorCodeNoEc2Subscription is a ErrorCode enum value + ErrorCodeNoEc2Subscription = "NO_EC2_SUBSCRIPTION" - // ErrorCodeInvalidLambdaFunction is a ErrorCode enum value - ErrorCodeInvalidLambdaFunction = "INVALID_LAMBDA_FUNCTION" + // ErrorCodeNoInstances is a ErrorCode enum value + ErrorCodeNoInstances = "NO_INSTANCES" - // ErrorCodeHookExecutionFailure is a ErrorCode enum value - ErrorCodeHookExecutionFailure = "HOOK_EXECUTION_FAILURE" + // ErrorCodeOverMaxInstances is a ErrorCode enum value + ErrorCodeOverMaxInstances = "OVER_MAX_INSTANCES" - // ErrorCodeAutoscalingValidationError is a ErrorCode enum value - ErrorCodeAutoscalingValidationError = "AUTOSCALING_VALIDATION_ERROR" + // ErrorCodeResourceLimitExceeded is a ErrorCode enum value + ErrorCodeResourceLimitExceeded = "RESOURCE_LIMIT_EXCEEDED" - // ErrorCodeInvalidEcsService is a ErrorCode enum value - ErrorCodeInvalidEcsService = "INVALID_ECS_SERVICE" + // ErrorCodeRevisionMissing is a ErrorCode enum value + ErrorCodeRevisionMissing = "REVISION_MISSING" - // ErrorCodeEcsUpdateError is a ErrorCode enum value - ErrorCodeEcsUpdateError = "ECS_UPDATE_ERROR" + // ErrorCodeThrottled is a ErrorCode enum value + ErrorCodeThrottled = "THROTTLED" - // ErrorCodeInvalidRevision is a ErrorCode enum value - ErrorCodeInvalidRevision = "INVALID_REVISION" + // ErrorCodeTimeout is a ErrorCode enum value + ErrorCodeTimeout = "TIMEOUT" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/doc.go index 788d421da49..ec27e17e6fd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/doc.go @@ -52,8 +52,8 @@ // this is an AppSpec file that specifies the Amazon ECS task definition, // container, and port where production traffic is rerouted. For an EC2/On-premises // deployment, this is an archive file that contains source content—source -// code, webpages, executable files, and deployment scripts—along with an -// AppSpec file. Revisions are stored in Amazon S3 buckets or GitHub repositories. +// code, webpages, executable files, and deployment scripts—along with +// an AppSpec file. Revisions are stored in Amazon S3 buckets or GitHub repositories. // For Amazon S3, a revision is uniquely identified by its Amazon S3 object // key and its ETag, version, or both. For GitHub, a revision is uniquely // identified by its commit ID. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go index 50cfb414f76..2d4a5150740 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go @@ -35,6 +35,13 @@ const ( // The minimum number of required application names was not specified. ErrCodeApplicationNameRequiredException = "ApplicationNameRequiredException" + // ErrCodeArnNotSupportedException for service response error code + // "ArnNotSupportedException". + // + // The specified ARN is not supported. For example, it might be an ARN for a + // resource that is not expected. + ErrCodeArnNotSupportedException = "ArnNotSupportedException" + // ErrCodeBatchLimitExceededException for service response error code // "BatchLimitExceededException". // @@ -270,6 +277,12 @@ const ( // The application name was specified in an invalid format. ErrCodeInvalidApplicationNameException = "InvalidApplicationNameException" + // ErrCodeInvalidArnException for service response error code + // "InvalidArnException". + // + // The specified ARN is not in a valid format. + ErrCodeInvalidArnException = "InvalidArnException" + // ErrCodeInvalidAutoRollbackConfigException for service response error code // "InvalidAutoRollbackConfigException". // @@ -542,6 +555,12 @@ const ( // The tag filter was specified in an invalid format. ErrCodeInvalidTagFilterException = "InvalidTagFilterException" + // ErrCodeInvalidTagsToAddException for service response error code + // "InvalidTagsToAddException". + // + // The specified tags are not valid. + ErrCodeInvalidTagsToAddException = "InvalidTagsToAddException" + // ErrCodeInvalidTargetFilterNameException for service response error code // "InvalidTargetFilterNameException". // @@ -620,6 +639,12 @@ const ( // The API used does not support the deployment. ErrCodeOperationNotSupportedException = "OperationNotSupportedException" + // ErrCodeResourceArnRequiredException for service response error code + // "ResourceArnRequiredException". + // + // The ARN of a resource is required, but was not found. + ErrCodeResourceArnRequiredException = "ResourceArnRequiredException" + // ErrCodeResourceValidationException for service response error code // "ResourceValidationException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go index dd1eaf41355..dbb6a1bd26c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go @@ -46,11 +46,11 @@ const ( // svc := codedeploy.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeDeploy { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodeDeploy { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodeDeploy { svc := &CodeDeploy{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-06", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go index fedbc2bbdf6..8ff08bfc20a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go @@ -58,7 +58,7 @@ func (c *CodePipeline) AcknowledgeJobRequest(input *AcknowledgeJobInput) (req *r // AcknowledgeJob API operation for AWS CodePipeline. // // Returns information about a specified job and whether that job has been received -// by the job worker. Only used for custom actions. +// by the job worker. Used for custom actions only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -72,10 +72,10 @@ func (c *CodePipeline) AcknowledgeJobRequest(input *AcknowledgeJobInput) (req *r // The validation was specified in an invalid format. // // * ErrCodeInvalidNonceException "InvalidNonceException" -// The specified nonce was specified in an invalid format. +// The nonce was specified in an invalid format. // // * ErrCodeJobNotFoundException "JobNotFoundException" -// The specified job was specified in an invalid format or cannot be found. +// The job was specified in an invalid format or cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/AcknowledgeJob func (c *CodePipeline) AcknowledgeJob(input *AcknowledgeJobInput) (*AcknowledgeJobOutput, error) { @@ -143,8 +143,8 @@ func (c *CodePipeline) AcknowledgeThirdPartyJobRequest(input *AcknowledgeThirdPa // AcknowledgeThirdPartyJob API operation for AWS CodePipeline. // -// Confirms a job worker has received the specified job. Only used for partner -// actions. +// Confirms a job worker has received the specified job. Used for partner actions +// only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -158,10 +158,10 @@ func (c *CodePipeline) AcknowledgeThirdPartyJobRequest(input *AcknowledgeThirdPa // The validation was specified in an invalid format. // // * ErrCodeInvalidNonceException "InvalidNonceException" -// The specified nonce was specified in an invalid format. +// The nonce was specified in an invalid format. // // * ErrCodeJobNotFoundException "JobNotFoundException" -// The specified job was specified in an invalid format or cannot be found. +// The job was specified in an invalid format or cannot be found. // // * ErrCodeInvalidClientTokenException "InvalidClientTokenException" // The client token was specified in an invalid format @@ -250,6 +250,15 @@ func (c *CodePipeline) CreateCustomActionTypeRequest(input *CreateCustomActionTy // The number of pipelines associated with the AWS account has exceeded the // limit allowed for the account. // +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The tags limit for a resource has been exceeded. +// +// * ErrCodeInvalidTagsException "InvalidTagsException" +// The specified resource tags are invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Unable to modify the tag due to a simultaneous update request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/CreateCustomActionType func (c *CodePipeline) CreateCustomActionType(input *CreateCustomActionTypeInput) (*CreateCustomActionTypeOutput, error) { req, out := c.CreateCustomActionTypeRequest(input) @@ -318,6 +327,10 @@ func (c *CodePipeline) CreatePipelineRequest(input *CreatePipelineInput) (req *r // // Creates a pipeline. // +// In the pipeline structure, you must include either artifactStore or artifactStores +// in your pipeline, but you cannot use both. If you create a cross-region action +// in your pipeline, you must use artifactStores. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -333,21 +346,30 @@ func (c *CodePipeline) CreatePipelineRequest(input *CreatePipelineInput) (req *r // The specified pipeline name is already in use. // // * ErrCodeInvalidStageDeclarationException "InvalidStageDeclarationException" -// The specified stage declaration was specified in an invalid format. +// The stage declaration was specified in an invalid format. // // * ErrCodeInvalidActionDeclarationException "InvalidActionDeclarationException" -// The specified action declaration was specified in an invalid format. +// The action declaration was specified in an invalid format. // // * ErrCodeInvalidBlockerDeclarationException "InvalidBlockerDeclarationException" // Reserved for future use. // // * ErrCodeInvalidStructureException "InvalidStructureException" -// The specified structure was specified in an invalid format. +// The structure was specified in an invalid format. // // * ErrCodeLimitExceededException "LimitExceededException" // The number of pipelines associated with the AWS account has exceeded the // limit allowed for the account. // +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The tags limit for a resource has been exceeded. +// +// * ErrCodeInvalidTagsException "InvalidTagsException" +// The specified resource tags are invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Unable to modify the tag due to a simultaneous update request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/CreatePipeline func (c *CodePipeline) CreatePipeline(input *CreatePipelineInput) (*CreatePipelineOutput, error) { req, out := c.CreatePipelineRequest(input) @@ -415,8 +437,8 @@ func (c *CodePipeline) DeleteCustomActionTypeRequest(input *DeleteCustomActionTy // DeleteCustomActionType API operation for AWS CodePipeline. // -// Marks a custom action as deleted. PollForJobs for the custom action will -// fail after the action is marked for deletion. Only used for custom actions. +// Marks a custom action as deleted. PollForJobs for the custom action fails +// after the action is marked for deletion. Used for custom actions only. // // To re-create a custom action after it has been deleted you must use a string // in the version field that has never been used before. This string can be @@ -435,6 +457,9 @@ func (c *CodePipeline) DeleteCustomActionTypeRequest(input *DeleteCustomActionTy // * ErrCodeValidationException "ValidationException" // The validation was specified in an invalid format. // +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Unable to modify the tag due to a simultaneous update request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/DeleteCustomActionType func (c *CodePipeline) DeleteCustomActionType(input *DeleteCustomActionTypeInput) (*DeleteCustomActionTypeOutput, error) { req, out := c.DeleteCustomActionTypeRequest(input) @@ -515,6 +540,9 @@ func (c *CodePipeline) DeletePipelineRequest(input *DeletePipelineInput) (req *r // * ErrCodeValidationException "ValidationException" // The validation was specified in an invalid format. // +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Unable to modify the tag due to a simultaneous update request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/DeletePipeline func (c *CodePipeline) DeletePipeline(input *DeletePipelineInput) (*DeletePipelineOutput, error) { req, out := c.DeletePipelineRequest(input) @@ -584,9 +612,9 @@ func (c *CodePipeline) DeleteWebhookRequest(input *DeleteWebhookInput) (req *req // // Deletes a previously created webhook by name. Deleting the webhook stops // AWS CodePipeline from starting a pipeline every time an external event occurs. -// The API will return successfully when trying to delete a webhook that is -// already deleted. If a deleted webhook is re-created by calling PutWebhook -// with the same name, it will have a different URL. +// The API returns successfully when trying to delete a webhook that is already +// deleted. If a deleted webhook is re-created by calling PutWebhook with the +// same name, it will have a different URL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -599,6 +627,9 @@ func (c *CodePipeline) DeleteWebhookRequest(input *DeleteWebhookInput) (req *req // * ErrCodeValidationException "ValidationException" // The validation was specified in an invalid format. // +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Unable to modify the tag due to a simultaneous update request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/DeleteWebhook func (c *CodePipeline) DeleteWebhook(input *DeleteWebhookInput) (*DeleteWebhookOutput, error) { req, out := c.DeleteWebhookRequest(input) @@ -667,7 +698,7 @@ func (c *CodePipeline) DeregisterWebhookWithThirdPartyRequest(input *DeregisterW // DeregisterWebhookWithThirdParty API operation for AWS CodePipeline. // // Removes the connection between the webhook that was created by CodePipeline -// and the external tool with events to be detected. Currently only supported +// and the external tool with events to be detected. Currently supported only // for webhooks that target an action type of GitHub. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -766,10 +797,10 @@ func (c *CodePipeline) DisableStageTransitionRequest(input *DisableStageTransiti // The validation was specified in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // * ErrCodeStageNotFoundException "StageNotFoundException" -// The specified stage was specified in an invalid format or cannot be found. +// The stage was specified in an invalid format or cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/DisableStageTransition func (c *CodePipeline) DisableStageTransition(input *DisableStageTransitionInput) (*DisableStageTransitionOutput, error) { @@ -852,10 +883,10 @@ func (c *CodePipeline) EnableStageTransitionRequest(input *EnableStageTransition // The validation was specified in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // * ErrCodeStageNotFoundException "StageNotFoundException" -// The specified stage was specified in an invalid format or cannot be found. +// The stage was specified in an invalid format or cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/EnableStageTransition func (c *CodePipeline) EnableStageTransition(input *EnableStageTransitionInput) (*EnableStageTransitionOutput, error) { @@ -923,12 +954,12 @@ func (c *CodePipeline) GetJobDetailsRequest(input *GetJobDetailsInput) (req *req // GetJobDetails API operation for AWS CodePipeline. // -// Returns information about a job. Only used for custom actions. +// Returns information about a job. Used for custom actions only. // // When this API is called, AWS CodePipeline returns temporary credentials for // the Amazon S3 bucket used to store artifacts for the pipeline, if the action -// requires access to that Amazon S3 bucket for input or output artifacts. Additionally, -// this API returns any secret values defined for the action. +// requires access to that Amazon S3 bucket for input or output artifacts. This +// API also returns any secret values defined for the action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -942,7 +973,7 @@ func (c *CodePipeline) GetJobDetailsRequest(input *GetJobDetailsInput) (req *req // The validation was specified in an invalid format. // // * ErrCodeJobNotFoundException "JobNotFoundException" -// The specified job was specified in an invalid format or cannot be found. +// The job was specified in an invalid format or cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/GetJobDetails func (c *CodePipeline) GetJobDetails(input *GetJobDetailsInput) (*GetJobDetailsOutput, error) { @@ -1026,11 +1057,10 @@ func (c *CodePipeline) GetPipelineRequest(input *GetPipelineInput) (req *request // The validation was specified in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // * ErrCodePipelineVersionNotFoundException "PipelineVersionNotFoundException" -// The specified pipeline version was specified in an invalid format or cannot -// be found. +// The pipeline version was specified in an invalid format or cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/GetPipeline func (c *CodePipeline) GetPipeline(input *GetPipelineInput) (*GetPipelineOutput, error) { @@ -1114,7 +1144,7 @@ func (c *CodePipeline) GetPipelineExecutionRequest(input *GetPipelineExecutionIn // The validation was specified in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // * ErrCodePipelineExecutionNotFoundException "PipelineExecutionNotFoundException" // The pipeline execution was specified in an invalid format or cannot be found, @@ -1204,7 +1234,7 @@ func (c *CodePipeline) GetPipelineStateRequest(input *GetPipelineStateInput) (re // The validation was specified in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/GetPipelineState func (c *CodePipeline) GetPipelineState(input *GetPipelineStateInput) (*GetPipelineStateOutput, error) { @@ -1272,13 +1302,13 @@ func (c *CodePipeline) GetThirdPartyJobDetailsRequest(input *GetThirdPartyJobDet // GetThirdPartyJobDetails API operation for AWS CodePipeline. // -// Requests the details of a job for a third party action. Only used for partner -// actions. +// Requests the details of a job for a third party action. Used for partner +// actions only. // // When this API is called, AWS CodePipeline returns temporary credentials for // the Amazon S3 bucket used to store artifacts for the pipeline, if the action -// requires access to that Amazon S3 bucket for input or output artifacts. Additionally, -// this API returns any secret values defined for the action. +// requires access to that Amazon S3 bucket for input or output artifacts. This +// API also returns any secret values defined for the action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1289,7 +1319,7 @@ func (c *CodePipeline) GetThirdPartyJobDetailsRequest(input *GetThirdPartyJobDet // // Returned Error Codes: // * ErrCodeJobNotFoundException "JobNotFoundException" -// The specified job was specified in an invalid format or cannot be found. +// The job was specified in an invalid format or cannot be found. // // * ErrCodeValidationException "ValidationException" // The validation was specified in an invalid format. @@ -1298,7 +1328,7 @@ func (c *CodePipeline) GetThirdPartyJobDetailsRequest(input *GetThirdPartyJobDet // The client token was specified in an invalid format // // * ErrCodeInvalidJobException "InvalidJobException" -// The specified job was specified in an invalid format or cannot be found. +// The job was specified in an invalid format or cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/GetThirdPartyJobDetails func (c *CodePipeline) GetThirdPartyJobDetails(input *GetThirdPartyJobDetailsInput) (*GetThirdPartyJobDetailsOutput, error) { @@ -1353,6 +1383,12 @@ func (c *CodePipeline) ListActionExecutionsRequest(input *ListActionExecutionsIn Name: opListActionExecutions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -1380,11 +1416,11 @@ func (c *CodePipeline) ListActionExecutionsRequest(input *ListActionExecutionsIn // The validation was specified in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The next token was specified in an invalid format. Make sure that the next -// token you provided is the token returned by a previous call. +// token you provide is the token returned by a previous call. // // * ErrCodePipelineExecutionNotFoundException "PipelineExecutionNotFoundException" // The pipeline execution was specified in an invalid format or cannot be found, @@ -1412,6 +1448,58 @@ func (c *CodePipeline) ListActionExecutionsWithContext(ctx aws.Context, input *L return out, req.Send() } +// ListActionExecutionsPages iterates over the pages of a ListActionExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListActionExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListActionExecutions operation. +// pageNum := 0 +// err := client.ListActionExecutionsPages(params, +// func(page *codepipeline.ListActionExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodePipeline) ListActionExecutionsPages(input *ListActionExecutionsInput, fn func(*ListActionExecutionsOutput, bool) bool) error { + return c.ListActionExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListActionExecutionsPagesWithContext same as ListActionExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodePipeline) ListActionExecutionsPagesWithContext(ctx aws.Context, input *ListActionExecutionsInput, fn func(*ListActionExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListActionExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListActionExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListActionExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListActionTypes = "ListActionTypes" // ListActionTypesRequest generates a "aws/request.Request" representing the @@ -1443,6 +1531,12 @@ func (c *CodePipeline) ListActionTypesRequest(input *ListActionTypesInput) (req Name: opListActionTypes, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { @@ -1472,7 +1566,7 @@ func (c *CodePipeline) ListActionTypesRequest(input *ListActionTypesInput) (req // // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The next token was specified in an invalid format. Make sure that the next -// token you provided is the token returned by a previous call. +// token you provide is the token returned by a previous call. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListActionTypes func (c *CodePipeline) ListActionTypes(input *ListActionTypesInput) (*ListActionTypesOutput, error) { @@ -1496,6 +1590,58 @@ func (c *CodePipeline) ListActionTypesWithContext(ctx aws.Context, input *ListAc return out, req.Send() } +// ListActionTypesPages iterates over the pages of a ListActionTypes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListActionTypes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListActionTypes operation. +// pageNum := 0 +// err := client.ListActionTypesPages(params, +// func(page *codepipeline.ListActionTypesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodePipeline) ListActionTypesPages(input *ListActionTypesInput, fn func(*ListActionTypesOutput, bool) bool) error { + return c.ListActionTypesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListActionTypesPagesWithContext same as ListActionTypesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodePipeline) ListActionTypesPagesWithContext(ctx aws.Context, input *ListActionTypesInput, fn func(*ListActionTypesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListActionTypesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListActionTypesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListActionTypesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListPipelineExecutions = "ListPipelineExecutions" // ListPipelineExecutionsRequest generates a "aws/request.Request" representing the @@ -1527,6 +1673,12 @@ func (c *CodePipeline) ListPipelineExecutionsRequest(input *ListPipelineExecutio Name: opListPipelineExecutions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -1554,11 +1706,11 @@ func (c *CodePipeline) ListPipelineExecutionsRequest(input *ListPipelineExecutio // The validation was specified in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The next token was specified in an invalid format. Make sure that the next -// token you provided is the token returned by a previous call. +// token you provide is the token returned by a previous call. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListPipelineExecutions func (c *CodePipeline) ListPipelineExecutions(input *ListPipelineExecutionsInput) (*ListPipelineExecutionsOutput, error) { @@ -1582,6 +1734,58 @@ func (c *CodePipeline) ListPipelineExecutionsWithContext(ctx aws.Context, input return out, req.Send() } +// ListPipelineExecutionsPages iterates over the pages of a ListPipelineExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPipelineExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPipelineExecutions operation. +// pageNum := 0 +// err := client.ListPipelineExecutionsPages(params, +// func(page *codepipeline.ListPipelineExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodePipeline) ListPipelineExecutionsPages(input *ListPipelineExecutionsInput, fn func(*ListPipelineExecutionsOutput, bool) bool) error { + return c.ListPipelineExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPipelineExecutionsPagesWithContext same as ListPipelineExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodePipeline) ListPipelineExecutionsPagesWithContext(ctx aws.Context, input *ListPipelineExecutionsInput, fn func(*ListPipelineExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPipelineExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPipelineExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPipelineExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListPipelines = "ListPipelines" // ListPipelinesRequest generates a "aws/request.Request" representing the @@ -1613,6 +1817,12 @@ func (c *CodePipeline) ListPipelinesRequest(input *ListPipelinesInput) (req *req Name: opListPipelines, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { @@ -1641,7 +1851,7 @@ func (c *CodePipeline) ListPipelinesRequest(input *ListPipelinesInput) (req *req // // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The next token was specified in an invalid format. Make sure that the next -// token you provided is the token returned by a previous call. +// token you provide is the token returned by a previous call. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListPipelines func (c *CodePipeline) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) { @@ -1665,6 +1875,205 @@ func (c *CodePipeline) ListPipelinesWithContext(ctx aws.Context, input *ListPipe return out, req.Send() } +// ListPipelinesPages iterates over the pages of a ListPipelines operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPipelines method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPipelines operation. +// pageNum := 0 +// err := client.ListPipelinesPages(params, +// func(page *codepipeline.ListPipelinesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodePipeline) ListPipelinesPages(input *ListPipelinesInput, fn func(*ListPipelinesOutput, bool) bool) error { + return c.ListPipelinesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPipelinesPagesWithContext same as ListPipelinesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodePipeline) ListPipelinesPagesWithContext(ctx aws.Context, input *ListPipelinesInput, fn func(*ListPipelinesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPipelinesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPipelinesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPipelinesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListTagsForResource +func (c *CodePipeline) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS CodePipeline. +// +// Gets the set of key-value pairs (metadata) that are used to manage the resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodePipeline's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// The validation was specified in an invalid format. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was specified in an invalid format. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The next token was specified in an invalid format. Make sure that the next +// token you provide is the token returned by a previous call. +// +// * ErrCodeInvalidArnException "InvalidArnException" +// The specified resource ARN is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListTagsForResource +func (c *CodePipeline) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodePipeline) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTagsForResource method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTagsForResource operation. +// pageNum := 0 +// err := client.ListTagsForResourcePages(params, +// func(page *codepipeline.ListTagsForResourceOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodePipeline) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { + return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodePipeline) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTagsForResourceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTagsForResourceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListWebhooks = "ListWebhooks" // ListWebhooksRequest generates a "aws/request.Request" representing the @@ -1696,6 +2105,12 @@ func (c *CodePipeline) ListWebhooksRequest(input *ListWebhooksInput) (req *reque Name: opListWebhooks, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1709,8 +2124,8 @@ func (c *CodePipeline) ListWebhooksRequest(input *ListWebhooksInput) (req *reque // ListWebhooks API operation for AWS CodePipeline. // -// Gets a listing of all the webhooks in this region for this account. The output -// lists all webhooks and includes the webhook URL and ARN, as well the configuration +// Gets a listing of all the webhooks in this AWS Region for this account. The +// output lists all webhooks and includes the webhook URL and ARN and the configuration // for each webhook. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1726,7 +2141,7 @@ func (c *CodePipeline) ListWebhooksRequest(input *ListWebhooksInput) (req *reque // // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The next token was specified in an invalid format. Make sure that the next -// token you provided is the token returned by a previous call. +// token you provide is the token returned by a previous call. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/ListWebhooks func (c *CodePipeline) ListWebhooks(input *ListWebhooksInput) (*ListWebhooksOutput, error) { @@ -1750,6 +2165,58 @@ func (c *CodePipeline) ListWebhooksWithContext(ctx aws.Context, input *ListWebho return out, req.Send() } +// ListWebhooksPages iterates over the pages of a ListWebhooks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListWebhooks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListWebhooks operation. +// pageNum := 0 +// err := client.ListWebhooksPages(params, +// func(page *codepipeline.ListWebhooksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodePipeline) ListWebhooksPages(input *ListWebhooksInput, fn func(*ListWebhooksOutput, bool) bool) error { + return c.ListWebhooksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListWebhooksPagesWithContext same as ListWebhooksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodePipeline) ListWebhooksPagesWithContext(ctx aws.Context, input *ListWebhooksInput, fn func(*ListWebhooksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListWebhooksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListWebhooksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListWebhooksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opPollForJobs = "PollForJobs" // PollForJobsRequest generates a "aws/request.Request" representing the @@ -1794,15 +2261,15 @@ func (c *CodePipeline) PollForJobsRequest(input *PollForJobsInput) (req *request // PollForJobs API operation for AWS CodePipeline. // -// Returns information about any jobs for AWS CodePipeline to act upon. PollForJobs -// is only valid for action types with "Custom" in the owner field. If the action +// Returns information about any jobs for AWS CodePipeline to act on. PollForJobs +// is valid only for action types with "Custom" in the owner field. If the action // type contains "AWS" or "ThirdParty" in the owner field, the PollForJobs action // returns an error. // // When this API is called, AWS CodePipeline returns temporary credentials for // the Amazon S3 bucket used to store artifacts for the pipeline, if the action -// requires access to that Amazon S3 bucket for input or output artifacts. Additionally, -// this API returns any secret values defined for the action. +// requires access to that Amazon S3 bucket for input or output artifacts. This +// API also returns any secret values defined for the action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1885,7 +2352,7 @@ func (c *CodePipeline) PollForThirdPartyJobsRequest(input *PollForThirdPartyJobs // PollForThirdPartyJobs API operation for AWS CodePipeline. // // Determines whether there are any third party jobs for a job worker to act -// on. Only used for partner actions. +// on. Used for partner actions only. // // When this API is called, AWS CodePipeline returns temporary credentials for // the Amazon S3 bucket used to store artifacts for the pipeline, if the action @@ -1982,10 +2449,10 @@ func (c *CodePipeline) PutActionRevisionRequest(input *PutActionRevisionInput) ( // // Returned Error Codes: // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // * ErrCodeStageNotFoundException "StageNotFoundException" -// The specified stage was specified in an invalid format or cannot be found. +// The stage was specified in an invalid format or cannot be found. // // * ErrCodeActionNotFoundException "ActionNotFoundException" // The specified action cannot be found. @@ -2077,10 +2544,10 @@ func (c *CodePipeline) PutApprovalResultRequest(input *PutApprovalResultInput) ( // The approval action has already been approved or rejected. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // * ErrCodeStageNotFoundException "StageNotFoundException" -// The specified stage was specified in an invalid format or cannot be found. +// The stage was specified in an invalid format or cannot be found. // // * ErrCodeActionNotFoundException "ActionNotFoundException" // The specified action cannot be found. @@ -2156,7 +2623,7 @@ func (c *CodePipeline) PutJobFailureResultRequest(input *PutJobFailureResultInpu // PutJobFailureResult API operation for AWS CodePipeline. // // Represents the failure of a job as returned to the pipeline by a job worker. -// Only used for custom actions. +// Used for custom actions only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2170,10 +2637,10 @@ func (c *CodePipeline) PutJobFailureResultRequest(input *PutJobFailureResultInpu // The validation was specified in an invalid format. // // * ErrCodeJobNotFoundException "JobNotFoundException" -// The specified job was specified in an invalid format or cannot be found. +// The job was specified in an invalid format or cannot be found. // // * ErrCodeInvalidJobStateException "InvalidJobStateException" -// The specified job state was specified in an invalid format. +// The job state was specified in an invalid format. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutJobFailureResult func (c *CodePipeline) PutJobFailureResult(input *PutJobFailureResultInput) (*PutJobFailureResultOutput, error) { @@ -2243,7 +2710,7 @@ func (c *CodePipeline) PutJobSuccessResultRequest(input *PutJobSuccessResultInpu // PutJobSuccessResult API operation for AWS CodePipeline. // // Represents the success of a job as returned to the pipeline by a job worker. -// Only used for custom actions. +// Used for custom actions only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2257,10 +2724,13 @@ func (c *CodePipeline) PutJobSuccessResultRequest(input *PutJobSuccessResultInpu // The validation was specified in an invalid format. // // * ErrCodeJobNotFoundException "JobNotFoundException" -// The specified job was specified in an invalid format or cannot be found. +// The job was specified in an invalid format or cannot be found. // // * ErrCodeInvalidJobStateException "InvalidJobStateException" -// The specified job state was specified in an invalid format. +// The job state was specified in an invalid format. +// +// * ErrCodeOutputVariablesSizeExceededException "OutputVariablesSizeExceededException" +// Exceeded the total size limit for all variables in the pipeline. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutJobSuccessResult func (c *CodePipeline) PutJobSuccessResult(input *PutJobSuccessResultInput) (*PutJobSuccessResultOutput, error) { @@ -2330,7 +2800,7 @@ func (c *CodePipeline) PutThirdPartyJobFailureResultRequest(input *PutThirdParty // PutThirdPartyJobFailureResult API operation for AWS CodePipeline. // // Represents the failure of a third party job as returned to the pipeline by -// a job worker. Only used for partner actions. +// a job worker. Used for partner actions only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2344,10 +2814,10 @@ func (c *CodePipeline) PutThirdPartyJobFailureResultRequest(input *PutThirdParty // The validation was specified in an invalid format. // // * ErrCodeJobNotFoundException "JobNotFoundException" -// The specified job was specified in an invalid format or cannot be found. +// The job was specified in an invalid format or cannot be found. // // * ErrCodeInvalidJobStateException "InvalidJobStateException" -// The specified job state was specified in an invalid format. +// The job state was specified in an invalid format. // // * ErrCodeInvalidClientTokenException "InvalidClientTokenException" // The client token was specified in an invalid format @@ -2420,7 +2890,7 @@ func (c *CodePipeline) PutThirdPartyJobSuccessResultRequest(input *PutThirdParty // PutThirdPartyJobSuccessResult API operation for AWS CodePipeline. // // Represents the success of a third party job as returned to the pipeline by -// a job worker. Only used for partner actions. +// a job worker. Used for partner actions only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2434,10 +2904,10 @@ func (c *CodePipeline) PutThirdPartyJobSuccessResultRequest(input *PutThirdParty // The validation was specified in an invalid format. // // * ErrCodeJobNotFoundException "JobNotFoundException" -// The specified job was specified in an invalid format or cannot be found. +// The job was specified in an invalid format or cannot be found. // // * ErrCodeInvalidJobStateException "InvalidJobStateException" -// The specified job state was specified in an invalid format. +// The job state was specified in an invalid format. // // * ErrCodeInvalidClientTokenException "InvalidClientTokenException" // The client token was specified in an invalid format @@ -2539,7 +3009,16 @@ func (c *CodePipeline) PutWebhookRequest(input *PutWebhookInput) (req *request.R // The specified authentication type is in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The tags limit for a resource has been exceeded. +// +// * ErrCodeInvalidTagsException "InvalidTagsException" +// The specified resource tags are invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Unable to modify the tag due to a simultaneous update request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/PutWebhook func (c *CodePipeline) PutWebhook(input *PutWebhookInput) (*PutWebhookOutput, error) { @@ -2692,6 +3171,9 @@ func (c *CodePipeline) RetryStageExecutionRequest(input *RetryStageExecutionInpu // RetryStageExecution API operation for AWS CodePipeline. // // Resumes the pipeline execution by retrying the last failed actions in a stage. +// You can retry a stage immediately if any of the actions in the stage fail. +// When you retry, all actions that are still in progress continue working, +// and failed actions are triggered again. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2705,16 +3187,14 @@ func (c *CodePipeline) RetryStageExecutionRequest(input *RetryStageExecutionInpu // The validation was specified in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // * ErrCodeStageNotFoundException "StageNotFoundException" -// The specified stage was specified in an invalid format or cannot be found. +// The stage was specified in an invalid format or cannot be found. // // * ErrCodeStageNotRetryableException "StageNotRetryableException" -// The specified stage can't be retried because the pipeline structure or stage -// state changed after the stage was not completed; the stage contains no failed -// actions; one or more actions are still in progress; or another retry attempt -// is already in progress. +// Unable to retry. The pipeline structure or stage state might have changed +// while actions awaited retry, or the stage contains no failed actions. // // * ErrCodeNotLatestPipelineExecutionException "NotLatestPipelineExecutionException" // The stage has failed in a later run of the pipeline and the pipelineExecutionId @@ -2801,7 +3281,7 @@ func (c *CodePipeline) StartPipelineExecutionRequest(input *StartPipelineExecuti // The validation was specified in an invalid format. // // * ErrCodePipelineNotFoundException "PipelineNotFoundException" -// The specified pipeline was specified in an invalid format or cannot be found. +// The pipeline was specified in an invalid format or cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/StartPipelineExecution func (c *CodePipeline) StartPipelineExecution(input *StartPipelineExecutionInput) (*StartPipelineExecutionOutput, error) { @@ -2825,6 +3305,194 @@ func (c *CodePipeline) StartPipelineExecutionWithContext(ctx aws.Context, input return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/TagResource +func (c *CodePipeline) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS CodePipeline. +// +// Adds to or modifies the tags of the given resource. Tags are metadata that +// can be used to manage a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodePipeline's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// The validation was specified in an invalid format. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was specified in an invalid format. +// +// * ErrCodeInvalidArnException "InvalidArnException" +// The specified resource ARN is invalid. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The tags limit for a resource has been exceeded. +// +// * ErrCodeInvalidTagsException "InvalidTagsException" +// The specified resource tags are invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Unable to modify the tag due to a simultaneous update request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/TagResource +func (c *CodePipeline) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodePipeline) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/UntagResource +func (c *CodePipeline) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS CodePipeline. +// +// Removes tags from an AWS resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodePipeline's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// The validation was specified in an invalid format. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was specified in an invalid format. +// +// * ErrCodeInvalidArnException "InvalidArnException" +// The specified resource ARN is invalid. +// +// * ErrCodeInvalidTagsException "InvalidTagsException" +// The specified resource tags are invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Unable to modify the tag due to a simultaneous update request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codepipeline-2015-07-09/UntagResource +func (c *CodePipeline) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodePipeline) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdatePipeline = "UpdatePipeline" // UpdatePipelineRequest generates a "aws/request.Request" representing the @@ -2870,9 +3538,9 @@ func (c *CodePipeline) UpdatePipelineRequest(input *UpdatePipelineInput) (req *r // UpdatePipeline API operation for AWS CodePipeline. // // Updates a specified pipeline with edits or changes to its structure. Use -// a JSON file with the pipeline structure in conjunction with UpdatePipeline -// to provide the full structure of the pipeline. Updating the pipeline increases -// the version number of the pipeline by 1. +// a JSON file with the pipeline structure and UpdatePipeline to provide the +// full structure of the pipeline. Updating the pipeline increases the version +// number of the pipeline by 1. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2886,16 +3554,16 @@ func (c *CodePipeline) UpdatePipelineRequest(input *UpdatePipelineInput) (req *r // The validation was specified in an invalid format. // // * ErrCodeInvalidStageDeclarationException "InvalidStageDeclarationException" -// The specified stage declaration was specified in an invalid format. +// The stage declaration was specified in an invalid format. // // * ErrCodeInvalidActionDeclarationException "InvalidActionDeclarationException" -// The specified action declaration was specified in an invalid format. +// The action declaration was specified in an invalid format. // // * ErrCodeInvalidBlockerDeclarationException "InvalidBlockerDeclarationException" // Reserved for future use. // // * ErrCodeInvalidStructureException "InvalidStructureException" -// The specified structure was specified in an invalid format. +// The structure was specified in an invalid format. // // * ErrCodeLimitExceededException "LimitExceededException" // The number of pipelines associated with the AWS account has exceeded the @@ -3187,8 +3855,8 @@ func (s *ActionConfiguration) SetConfiguration(v map[string]*string) *ActionConf type ActionConfigurationProperty struct { _ struct{} `type:"structure"` - // The description of the action configuration property that will be displayed - // to users. + // The description of the action configuration property that is displayed to + // users. Description *string `locationName:"description" min:"1" type:"string"` // Whether the configuration property is a key. @@ -3201,14 +3869,14 @@ type ActionConfigurationProperty struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // Indicates that the property will be used in conjunction with PollForJobs. - // When creating a custom action, an action can have up to one queryable property. - // If it has one, that property must be both required and not secret. + // Indicates that the property is used with PollForJobs. When creating a custom + // action, an action can have up to one queryable property. If it has one, that + // property must be both required and not secret. // // If you create a pipeline with a custom action type, and that custom action // contains a queryable property, the value for that configuration property - // is subject to additional restrictions. The value must be less than or equal - // to twenty (20) characters. The value can contain only alphanumeric characters, + // is subject to other restrictions. The value must be less than or equal to + // twenty (20) characters. The value can contain only alphanumeric characters, // underscores, and hyphens. Queryable *bool `locationName:"queryable" type:"boolean"` @@ -3222,7 +3890,7 @@ type ActionConfigurationProperty struct { // PollForThirdPartyJobs. // // When updating a pipeline, passing * * * * * without changing any other values - // of the action will preserve the prior value of the secret. + // of the action preserves the previous value of the secret. // // Secret is a required field Secret *bool `locationName:"secret" type:"boolean" required:"true"` @@ -3311,15 +3979,14 @@ func (s *ActionConfigurationProperty) SetType(v string) *ActionConfigurationProp return s } -// Represents the context of an action within the stage of a pipeline to a job -// worker. +// Represents the context of an action in the stage of a pipeline to a job worker. type ActionContext struct { _ struct{} `type:"structure"` // The system-generated unique ID that corresponds to an action's execution. ActionExecutionId *string `locationName:"actionExecutionId" type:"string"` - // The name of the action within the context of a job. + // The name of the action in the context of a job. Name *string `locationName:"name" min:"1" type:"string"` } @@ -3349,12 +4016,26 @@ func (s *ActionContext) SetName(v string) *ActionContext { type ActionDeclaration struct { _ struct{} `type:"structure"` - // The configuration information for the action type. + // Specifies the action type and the provider of the action. // // ActionTypeId is a required field ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure" required:"true"` - // The action declaration's configuration. + // The action's configuration. These are key-value pairs that specify input + // values for an action. For more information, see Action Structure Requirements + // in CodePipeline (https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#action-requirements). + // For the list of configuration properties for the AWS CloudFormation action + // type in CodePipeline, see Configuration Properties Reference (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-action-reference.html) + // in the AWS CloudFormation User Guide. For template snippets with examples, + // see Using Parameter Override Functions with CodePipeline Pipelines (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-parameter-override-functions.html) + // in the AWS CloudFormation User Guide. + // + // The values can be represented in either JSON or YAML format. For example, + // the JSON configuration item format is as follows: + // + // JSON: + // + // "Configuration" : { Key : Value }, Configuration map[string]*string `locationName:"configuration" type:"map"` // The name or ID of the artifact consumed by the action, such as a test or @@ -3366,6 +4047,10 @@ type ActionDeclaration struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` + // The variable namespace associated with the action. All variables produced + // as output by this action fall under this namespace. + Namespace *string `locationName:"namespace" min:"1" type:"string"` + // The name or ID of the result of the action declaration, such as a test or // build artifact. OutputArtifacts []*OutputArtifact `locationName:"outputArtifacts" type:"list"` @@ -3373,8 +4058,8 @@ type ActionDeclaration struct { // The action declaration's AWS Region, such as us-east-1. Region *string `locationName:"region" min:"4" type:"string"` - // The ARN of the IAM service role that will perform the declared action. This - // is assumed through the roleArn for the pipeline. + // The ARN of the IAM service role that performs the declared action. This is + // assumed through the roleArn for the pipeline. RoleArn *string `locationName:"roleArn" type:"string"` // The order in which actions are run. @@ -3403,6 +4088,9 @@ func (s *ActionDeclaration) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } if s.Region != nil && len(*s.Region) < 4 { invalidParams.Add(request.NewErrParamMinLen("Region", 4)) } @@ -3465,6 +4153,12 @@ func (s *ActionDeclaration) SetName(v string) *ActionDeclaration { return s } +// SetNamespace sets the Namespace field's value. +func (s *ActionDeclaration) SetNamespace(v string) *ActionDeclaration { + s.Namespace = &v + return s +} + // SetOutputArtifacts sets the OutputArtifacts field's value. func (s *ActionDeclaration) SetOutputArtifacts(v []*OutputArtifact) *ActionDeclaration { s.OutputArtifacts = v @@ -3499,8 +4193,8 @@ type ActionExecution struct { // The external ID of the run of the action. ExternalExecutionId *string `locationName:"externalExecutionId" min:"1" type:"string"` - // The URL of a resource external to AWS that will be used when running the - // action, for example an external repository URL. + // The URL of a resource external to AWS that is used when running the action + // (for example, an external repository URL). ExternalExecutionUrl *string `locationName:"externalExecutionUrl" min:"1" type:"string"` // The last status change of the action. @@ -3521,7 +4215,7 @@ type ActionExecution struct { // The system-generated token used to identify a unique approval request. The // token for each open approval request can be obtained using the GetPipelineState - // command and is used to validate that the approval request corresponding to + // command. It is used to validate that the approval request corresponding to // this token is still valid. Token *string `locationName:"token" type:"string"` } @@ -3735,9 +4429,17 @@ type ActionExecutionInput struct { // Details of input artifacts of the action that correspond to the action execution. InputArtifacts []*ArtifactDetail `locationName:"inputArtifacts" type:"list"` + // The variable namespace associated with the action. All variables produced + // as output by this action fall under this namespace. + Namespace *string `locationName:"namespace" min:"1" type:"string"` + // The AWS Region for the action, such as us-east-1. Region *string `locationName:"region" min:"4" type:"string"` + // Configuration data for an action execution with all variable references replaced + // with their real values for the execution. + ResolvedConfiguration map[string]*string `locationName:"resolvedConfiguration" type:"map"` + // The ARN of the IAM service role that performs the declared action. This is // assumed through the roleArn for the pipeline. RoleArn *string `locationName:"roleArn" type:"string"` @@ -3771,12 +4473,24 @@ func (s *ActionExecutionInput) SetInputArtifacts(v []*ArtifactDetail) *ActionExe return s } +// SetNamespace sets the Namespace field's value. +func (s *ActionExecutionInput) SetNamespace(v string) *ActionExecutionInput { + s.Namespace = &v + return s +} + // SetRegion sets the Region field's value. func (s *ActionExecutionInput) SetRegion(v string) *ActionExecutionInput { s.Region = &v return s } +// SetResolvedConfiguration sets the ResolvedConfiguration field's value. +func (s *ActionExecutionInput) SetResolvedConfiguration(v map[string]*string) *ActionExecutionInput { + s.ResolvedConfiguration = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *ActionExecutionInput) SetRoleArn(v string) *ActionExecutionInput { s.RoleArn = &v @@ -3793,6 +4507,10 @@ type ActionExecutionOutput struct { // Details of output artifacts of the action that correspond to the action execution. OutputArtifacts []*ArtifactDetail `locationName:"outputArtifacts" type:"list"` + + // The outputVariables field shows the key-value pairs that were output as part + // of that execution. + OutputVariables map[string]*string `locationName:"outputVariables" type:"map"` } // String returns the string representation @@ -3817,6 +4535,12 @@ func (s *ActionExecutionOutput) SetOutputArtifacts(v []*ArtifactDetail) *ActionE return s } +// SetOutputVariables sets the OutputVariables field's value. +func (s *ActionExecutionOutput) SetOutputVariables(v map[string]*string) *ActionExecutionOutput { + s.OutputVariables = v + return s +} + // Execution result information, such as the external execution ID. type ActionExecutionResult struct { _ struct{} `type:"structure"` @@ -3870,8 +4594,8 @@ type ActionRevision struct { // Created is a required field Created *time.Time `locationName:"created" type:"timestamp" required:"true"` - // The unique identifier of the change that set the state to this revision, - // for example a deployment ID or timestamp. + // The unique identifier of the change that set the state to this revision (for + // example, a deployment ID or timestamp). // // RevisionChangeId is a required field RevisionChangeId *string `locationName:"revisionChangeId" min:"1" type:"string" required:"true"` @@ -4070,7 +4794,7 @@ type ActionTypeId struct { // A category defines what kind of action can be taken in the stage, and constrains // the provider type for the action. Valid categories are limited to one of - // the values below. + // the following values. // // Category is a required field Category *string `locationName:"category" type:"string" required:"true" enum:"ActionCategory"` @@ -4083,8 +4807,8 @@ type ActionTypeId struct { // The provider of the service being called by the action. Valid providers are // determined by the action category. For example, an action in the Deploy category // type might have a provider of AWS CodeDeploy, which would be specified as - // CodeDeploy. To reference a list of action providers by action type, see Valid - // Action Types and Providers in CodePipeline (https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers). + // CodeDeploy. For more information, see Valid Action Types and Providers in + // CodePipeline (https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers). // // Provider is a required field Provider *string `locationName:"provider" min:"1" type:"string" required:"true"` @@ -4164,11 +4888,11 @@ type ActionTypeSettings struct { // The URL returned to the AWS CodePipeline console that provides a deep link // to the resources of the external system, such as the configuration page for // an AWS CodeDeploy deployment group. This link is provided as part of the - // action display within the pipeline. + // action display in the pipeline. EntityUrlTemplate *string `locationName:"entityUrlTemplate" min:"1" type:"string"` // The URL returned to the AWS CodePipeline console that contains a link to - // the top-level landing page for the external system, such as console page + // the top-level landing page for the external system, such as the console page // for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS // CodePipeline console and provides a link to the execution entity of the external // action. @@ -4293,8 +5017,8 @@ func (s *ApprovalResult) SetSummary(v string) *ApprovalResult { return s } -// Represents information about an artifact that will be worked upon by actions -// in the pipeline. +// Represents information about an artifact that is worked on by actions in +// the pipeline. type Artifact struct { _ struct{} `type:"structure"` @@ -4465,7 +5189,7 @@ type ArtifactRevision struct { Created *time.Time `locationName:"created" type:"timestamp"` // The name of an artifact. This name might be system-generated, such as "MyApp", - // or might be defined by the user when an action is created. + // or defined by the user when an action is created. Name *string `locationName:"name" min:"1" type:"string"` // An additional identifier for a revision, such as a commit date or, for artifacts @@ -4533,7 +5257,11 @@ func (s *ArtifactRevision) SetRevisionUrl(v string) *ArtifactRevision { return s } -// The Amazon S3 bucket where artifacts are stored for the pipeline. +// The Amazon S3 bucket where artifacts for the pipeline are stored. +// +// You must include either artifactStore or artifactStores in your pipeline, +// but you cannot use both. If you create a cross-region action in your pipeline, +// you must use artifactStores. type ArtifactStore struct { _ struct{} `type:"structure"` @@ -4543,7 +5271,7 @@ type ArtifactStore struct { EncryptionKey *EncryptionKey `locationName:"encryptionKey" type:"structure"` // The Amazon S3 bucket used for storing the artifacts for a pipeline. You can - // specify the name of an S3 bucket but not a folder within the bucket. A folder + // specify the name of an S3 bucket but not a folder in the bucket. A folder // to contain the pipeline artifacts is created for you based on the name of // the pipeline. You can use any Amazon S3 bucket in the same AWS Region as // the pipeline to store your pipeline artifacts. @@ -4703,6 +5431,9 @@ type CreateCustomActionTypeInput struct { // URLs that provide users information about this custom action. Settings *ActionTypeSettings `locationName:"settings" type:"structure"` + // The tags for the custom action. + Tags []*Tag `locationName:"tags" type:"list"` + // The version identifier of the custom action. // // Version is a required field @@ -4768,6 +5499,16 @@ func (s *CreateCustomActionTypeInput) Validate() error { invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4811,6 +5552,12 @@ func (s *CreateCustomActionTypeInput) SetSettings(v *ActionTypeSettings) *Create return s } +// SetTags sets the Tags field's value. +func (s *CreateCustomActionTypeInput) SetTags(v []*Tag) *CreateCustomActionTypeInput { + s.Tags = v + return s +} + // SetVersion sets the Version field's value. func (s *CreateCustomActionTypeInput) SetVersion(v string) *CreateCustomActionTypeInput { s.Version = &v @@ -4825,6 +5572,9 @@ type CreateCustomActionTypeOutput struct { // // ActionType is a required field ActionType *ActionType `locationName:"actionType" type:"structure" required:"true"` + + // Specifies the tags applied to the custom action. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation @@ -4843,6 +5593,12 @@ func (s *CreateCustomActionTypeOutput) SetActionType(v *ActionType) *CreateCusto return s } +// SetTags sets the Tags field's value. +func (s *CreateCustomActionTypeOutput) SetTags(v []*Tag) *CreateCustomActionTypeOutput { + s.Tags = v + return s +} + // Represents the input of a CreatePipeline action. type CreatePipelineInput struct { _ struct{} `type:"structure"` @@ -4851,6 +5607,9 @@ type CreatePipelineInput struct { // // Pipeline is a required field Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure" required:"true"` + + // The tags for the pipeline. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation @@ -4874,6 +5633,16 @@ func (s *CreatePipelineInput) Validate() error { invalidParams.AddNested("Pipeline", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4887,12 +5656,21 @@ func (s *CreatePipelineInput) SetPipeline(v *PipelineDeclaration) *CreatePipelin return s } +// SetTags sets the Tags field's value. +func (s *CreatePipelineInput) SetTags(v []*Tag) *CreatePipelineInput { + s.Tags = v + return s +} + // Represents the output of a CreatePipeline action. type CreatePipelineOutput struct { _ struct{} `type:"structure"` // Represents the structure of actions and stages to be performed in the pipeline. Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure"` + + // Specifies the tags applied to the pipeline. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation @@ -4911,6 +5689,12 @@ func (s *CreatePipelineOutput) SetPipeline(v *PipelineDeclaration) *CreatePipeli return s } +// SetTags sets the Tags field's value. +func (s *CreatePipelineOutput) SetTags(v []*Tag) *CreatePipelineOutput { + s.Tags = v + return s +} + // Represents information about a current revision. type CurrentRevision struct { _ struct{} `type:"structure"` @@ -5252,7 +6036,7 @@ type DisableStageTransitionInput struct { // PipelineName is a required field PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` - // The reason given to the user why a stage is disabled, such as waiting for + // The reason given to the user that a stage is disabled, such as waiting for // manual approval or manual tests. This message is displayed in the pipeline // console UI. // @@ -5265,8 +6049,8 @@ type DisableStageTransitionInput struct { // StageName is a required field StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` - // Specifies whether artifacts will be prevented from transitioning into the - // stage and being processed by the actions in that stage (inbound), or prevented + // Specifies whether artifacts are prevented from transitioning into the stage + // and being processed by the actions in that stage (inbound), or prevented // from transitioning from the stage after they have been processed by the actions // in that stage (outbound). // @@ -5369,9 +6153,9 @@ type EnableStageTransitionInput struct { // StageName is a required field StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` - // Specifies whether artifacts will be allowed to enter the stage and be processed - // by the actions in that stage (inbound) or whether already-processed artifacts - // will be allowed to transition to the next stage (outbound). + // Specifies whether artifacts are allowed to enter the stage and be processed + // by the actions in that stage (inbound) or whether already processed artifacts + // are allowed to transition to the next stage (outbound). // // TransitionType is a required field TransitionType *string `locationName:"transitionType" type:"string" required:"true" enum:"StageTransitionType"` @@ -5449,8 +6233,12 @@ func (s EnableStageTransitionOutput) GoString() string { type EncryptionKey struct { _ struct{} `type:"structure"` - // The ID used to identify the key. For an AWS KMS key, this is the key ID or - // key ARN. + // The ID used to identify the key. For an AWS KMS key, you can use the key + // ID, the key ARN, or the alias ARN. + // + // Aliases are recognized only in the account that created the customer master + // key (CMK). For cross-account actions, you can only use the key ID or key + // ARN to identify the key. // // Id is a required field Id *string `locationName:"id" min:"1" type:"string" required:"true"` @@ -5507,7 +6295,7 @@ func (s *EncryptionKey) SetType(v string) *EncryptionKey { type ErrorDetails struct { _ struct{} `type:"structure"` - // The system ID or error number code of the error. + // The system ID or number code of the error. Code *string `locationName:"code" type:"string"` // The text of the error message. @@ -5546,7 +6334,7 @@ type ExecutionDetails struct { ExternalExecutionId *string `locationName:"externalExecutionId" min:"1" type:"string"` // The percentage of work completed on the action, represented on a scale of - // zero to one hundred percent. + // 0 to 100 percent. PercentComplete *int64 `locationName:"percentComplete" type:"integer"` // The summary of the current status of the actions. @@ -5597,6 +6385,42 @@ func (s *ExecutionDetails) SetSummary(v string) *ExecutionDetails { return s } +// The interaction or event that started a pipeline execution. +type ExecutionTrigger struct { + _ struct{} `type:"structure"` + + // Detail related to the event that started a pipeline execution, such as the + // webhook ARN of the webhook that triggered the pipeline execution or the user + // ARN for a user-initiated start-pipeline-execution CLI command. + TriggerDetail *string `locationName:"triggerDetail" type:"string"` + + // The type of change-detection method, command, or user interaction that started + // a pipeline execution. + TriggerType *string `locationName:"triggerType" type:"string" enum:"TriggerType"` +} + +// String returns the string representation +func (s ExecutionTrigger) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutionTrigger) GoString() string { + return s.String() +} + +// SetTriggerDetail sets the TriggerDetail field's value. +func (s *ExecutionTrigger) SetTriggerDetail(v string) *ExecutionTrigger { + s.TriggerDetail = &v + return s +} + +// SetTriggerType sets the TriggerType field's value. +func (s *ExecutionTrigger) SetTriggerType(v string) *ExecutionTrigger { + s.TriggerType = &v + return s +} + // Represents information about failure details. type FailureDetails struct { _ struct{} `type:"structure"` @@ -5816,13 +6640,13 @@ type GetPipelineInput struct { _ struct{} `type:"structure"` // The name of the pipeline for which you want to get information. Pipeline - // names must be unique under an Amazon Web Services (AWS) user account. + // names must be unique under an AWS user account. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` // The version number of the pipeline. If you do not specify a version, defaults - // to the most current version. + // to the current version. Version *int64 `locationName:"version" min:"1" type:"integer"` } @@ -5955,7 +6779,7 @@ type GetPipelineStateOutput struct { // The version number of the pipeline. // - // A newly-created pipeline is always assigned a version number of 1. + // A newly created pipeline is always assigned a version number of 1. PipelineVersion *int64 `locationName:"pipelineVersion" min:"1" type:"integer"` // A list of the pipeline stage output information, including stage name, state, @@ -6095,7 +6919,7 @@ func (s *GetThirdPartyJobDetailsOutput) SetJobDetails(v *ThirdPartyJobDetails) * type InputArtifact struct { _ struct{} `type:"structure"` - // The name of the artifact to be worked on, for example, "My App". + // The name of the artifact to be worked on (for example, "My App"). // // The input artifact of an action must exactly match the output artifact declared // in a preceding action, but the input artifact does not have to be the next @@ -6146,7 +6970,7 @@ type Job struct { // The ID of the AWS account to use when performing the job. AccountId *string `locationName:"accountId" type:"string"` - // Additional data about a job. + // Other data about a job. Data *JobData `locationName:"data" type:"structure"` // The unique system-generated ID of the job. @@ -6192,8 +7016,8 @@ func (s *Job) SetNonce(v string) *Job { return s } -// Represents additional information about a job required for a job worker to -// complete the job. +// Represents other information about a job required for a job worker to complete +// the job. type JobData struct { _ struct{} `type:"structure"` @@ -6209,8 +7033,8 @@ type JobData struct { // store artifacts for the pipeline in AWS CodePipeline. ArtifactCredentials *AWSSessionCredentials `locationName:"artifactCredentials" type:"structure" sensitive:"true"` - // A system-generated token, such as a AWS CodeDeploy deployment ID, that a - // job requires in order to continue the job asynchronously. + // A system-generated token, such as a AWS CodeDeploy deployment ID, required + // by a job to continue the job asynchronously. ContinuationToken *string `locationName:"continuationToken" min:"1" type:"string"` // Represents information about the key used to encrypt data in the artifact @@ -6225,7 +7049,7 @@ type JobData struct { // Represents information about a pipeline to a job worker. // - // Includes pipelineArn and pipelineExecutionId for Custom jobs. + // Includes pipelineArn and pipelineExecutionId for custom jobs. PipelineContext *PipelineContext `locationName:"pipelineContext" type:"structure"` } @@ -6294,8 +7118,8 @@ type JobDetails struct { // The AWS account ID associated with the job. AccountId *string `locationName:"accountId" type:"string"` - // Represents additional information about a job required for a job worker to - // complete the job. + // Represents other information about a job required for a job worker to complete + // the job. Data *JobData `locationName:"data" type:"structure"` // The unique system-generated ID of the job. @@ -6502,8 +7326,8 @@ type ListActionTypesOutput struct { ActionTypes []*ActionType `locationName:"actionTypes" type:"list" required:"true"` // If the amount of returned information is significantly large, an identifier - // is also returned which can be used in a subsequent list action types call - // to return the next set of action types in the list. + // is also returned. It can be used in a subsequent list action types call to + // return the next set of action types in the list. NextToken *string `locationName:"nextToken" min:"1" type:"string"` } @@ -6638,7 +7462,7 @@ func (s *ListPipelineExecutionsOutput) SetPipelineExecutionSummaries(v []*Pipeli type ListPipelinesInput struct { _ struct{} `type:"structure"` - // An identifier that was returned from the previous list pipelines call, which + // An identifier that was returned from the previous list pipelines call. It // can be used to return the next set of pipelines in the list. NextToken *string `locationName:"nextToken" min:"1" type:"string"` } @@ -6677,8 +7501,8 @@ type ListPipelinesOutput struct { _ struct{} `type:"structure"` // If the amount of returned information is significantly large, an identifier - // is also returned which can be used in a subsequent list pipelines call to - // return the next set of pipelines in the list. + // is also returned. It can be used in a subsequent list pipelines call to return + // the next set of pipelines in the list. NextToken *string `locationName:"nextToken" min:"1" type:"string"` // The list of pipelines. @@ -6707,6 +7531,105 @@ func (s *ListPipelinesOutput) SetPipelines(v []*PipelineSummary) *ListPipelinesO return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token that was returned from the previous API call, which would be used + // to return the next page of the list. The ListTagsforResource call lists all + // available tags in one call and does not use pagination. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the resource to get tags for. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTagsForResourceInput) SetMaxResults(v int64) *ListTagsForResourceInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // If the amount of returned information is significantly large, an identifier + // is also returned and can be used in a subsequent API call to return the next + // page of the list. The ListTagsforResource call lists all available tags in + // one call and does not use pagination. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The tags for the resource. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // The detail returned for each webhook after listing webhooks, such as the // webhook URL, the webhook name, and the webhook ARN. type ListWebhookItem struct { @@ -6731,10 +7654,13 @@ type ListWebhookItem struct { // format. LastTriggered *time.Time `locationName:"lastTriggered" type:"timestamp"` + // Specifies the tags applied to the webhook. + Tags []*Tag `locationName:"tags" type:"list"` + // A unique URL generated by CodePipeline. When a POST request is made to this // URL, the defined pipeline is started as long as the body of the post request // satisfies the defined authentication and filtering conditions. Deleting and - // re-creating a webhook will make the old URL invalid and generate a new URL. + // re-creating a webhook makes the old URL invalid and generates a new one. // // Url is a required field Url *string `locationName:"url" min:"1" type:"string" required:"true"` @@ -6780,6 +7706,12 @@ func (s *ListWebhookItem) SetLastTriggered(v time.Time) *ListWebhookItem { return s } +// SetTags sets the Tags field's value. +func (s *ListWebhookItem) SetTags(v []*Tag) *ListWebhookItem { + s.Tags = v + return s +} + // SetUrl sets the Url field's value. func (s *ListWebhookItem) SetUrl(v string) *ListWebhookItem { s.Url = &v @@ -6929,7 +7861,7 @@ func (s *OutputArtifact) SetName(v string) *OutputArtifact { type PipelineContext struct { _ struct{} `type:"structure"` - // The context of an action to a job worker within the stage of a pipeline. + // The context of an action to a job worker in the stage of a pipeline. Action *ActionContext `locationName:"action" type:"structure"` // The Amazon Resource Name (ARN) of the pipeline. @@ -6992,14 +7924,19 @@ type PipelineDeclaration struct { // Represents information about the Amazon S3 bucket where artifacts are stored // for the pipeline. + // + // You must include either artifactStore or artifactStores in your pipeline, + // but you cannot use both. If you create a cross-region action in your pipeline, + // you must use artifactStores. ArtifactStore *ArtifactStore `locationName:"artifactStore" type:"structure"` - // A mapping of artifactStore objects and their corresponding regions. There - // must be an artifact store for the pipeline region and for each cross-region - // action within the pipeline. You can only use either artifactStore or artifactStores, - // not both. + // A mapping of artifactStore objects and their corresponding AWS Regions. There + // must be an artifact store for the pipeline Region and for each cross-region + // action in the pipeline. // - // If you create a cross-region action in your pipeline, you must use artifactStores. + // You must include either artifactStore or artifactStores in your pipeline, + // but you cannot use both. If you create a cross-region action in your pipeline, + // you must use artifactStores. ArtifactStores map[string]*ArtifactStore `locationName:"artifactStores" type:"map"` // The name of the action to be performed. @@ -7020,7 +7957,7 @@ type PipelineDeclaration struct { Stages []*StageDeclaration `locationName:"stages" type:"list" required:"true"` // The version number of the pipeline. A new pipeline always has a version number - // of 1. This number is automatically incremented when a pipeline is updated. + // of 1. This number is incremented when a pipeline is updated. Version *int64 `locationName:"version" min:"1" type:"integer"` } @@ -7219,6 +8156,10 @@ type PipelineExecutionSummary struct { // // * Failed: The pipeline execution was not completed successfully. Status *string `locationName:"status" type:"string" enum:"PipelineExecutionStatus"` + + // The interaction or event that started a pipeline execution, such as automated + // change detection or a StartPipelineExecution API call. + Trigger *ExecutionTrigger `locationName:"trigger" type:"structure"` } // String returns the string representation @@ -7261,6 +8202,12 @@ func (s *PipelineExecutionSummary) SetStatus(v string) *PipelineExecutionSummary return s } +// SetTrigger sets the Trigger field's value. +func (s *PipelineExecutionSummary) SetTrigger(v *ExecutionTrigger) *PipelineExecutionSummary { + s.Trigger = v + return s +} + // Information about a pipeline. type PipelineMetadata struct { _ struct{} `type:"structure"` @@ -7369,7 +8316,7 @@ type PollForJobsInput struct { // A map of property names and values. For an action type with no queryable // properties, this value must be null or an empty map. For an action type with // a queryable property, you must supply that property as a key in the map. - // Only jobs whose action configuration matches the mapped value will be returned. + // Only jobs whose action configuration matches the mapped value are returned. QueryParam map[string]*string `locationName:"queryParam" type:"map"` } @@ -7530,7 +8477,7 @@ func (s *PollForThirdPartyJobsOutput) SetJobs(v []*ThirdPartyJob) *PollForThirdP type PutActionRevisionInput struct { _ struct{} `type:"structure"` - // The name of the action that will process the revision. + // The name of the action that processes the revision. // // ActionName is a required field ActionName *string `locationName:"actionName" min:"1" type:"string" required:"true"` @@ -7540,12 +8487,12 @@ type PutActionRevisionInput struct { // ActionRevision is a required field ActionRevision *ActionRevision `locationName:"actionRevision" type:"structure" required:"true"` - // The name of the pipeline that will start processing the revision to the source. + // The name of the pipeline that starts processing the revision to the source. // // PipelineName is a required field PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` - // The name of the stage that contains the action that will act upon the revision. + // The name of the stage that contains the action that acts on the revision. // // StageName is a required field StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` @@ -7681,7 +8628,7 @@ type PutApprovalResultInput struct { // The system-generated token used to identify a unique approval request. The // token for each open approval request can be obtained using the GetPipelineState - // action and is used to validate that the approval request corresponding to + // action. It is used to validate that the approval request corresponding to // this token is still valid. // // Token is a required field @@ -7870,13 +8817,12 @@ type PutJobSuccessResultInput struct { // A token generated by a job worker, such as an AWS CodeDeploy deployment ID, // that a successful job provides to identify a custom action in progress. Future - // jobs will use this token in order to identify the running instance of the - // action. It can be reused to return additional information about the progress - // of the custom action. When the action is complete, no continuation token - // should be supplied. + // jobs use this token to identify the running instance of the action. It can + // be reused to return more information about the progress of the custom action. + // When the action is complete, no continuation token should be supplied. ContinuationToken *string `locationName:"continuationToken" min:"1" type:"string"` - // The ID of the current revision of the artifact successfully worked upon by + // The ID of the current revision of the artifact successfully worked on by // the job. CurrentRevision *CurrentRevision `locationName:"currentRevision" type:"structure"` @@ -7889,6 +8835,11 @@ type PutJobSuccessResultInput struct { // // JobId is a required field JobId *string `locationName:"jobId" type:"string" required:"true"` + + // Key-value pairs produced as output by a job worker that can be made available + // to a downstream action configuration. outputVariables can be included only + // when there is no continuation token on the request. + OutputVariables map[string]*string `locationName:"outputVariables" type:"map"` } // String returns the string representation @@ -7951,6 +8902,12 @@ func (s *PutJobSuccessResultInput) SetJobId(v string) *PutJobSuccessResultInput return s } +// SetOutputVariables sets the OutputVariables field's value. +func (s *PutJobSuccessResultInput) SetOutputVariables(v map[string]*string) *PutJobSuccessResultInput { + s.OutputVariables = v + return s +} + type PutJobSuccessResultOutput struct { _ struct{} `type:"structure"` } @@ -8070,10 +9027,9 @@ type PutThirdPartyJobSuccessResultInput struct { // A token generated by a job worker, such as an AWS CodeDeploy deployment ID, // that a successful job provides to identify a partner action in progress. - // Future jobs will use this token in order to identify the running instance - // of the action. It can be reused to return additional information about the - // progress of the partner action. When the action is complete, no continuation - // token should be supplied. + // Future jobs use this token to identify the running instance of the action. + // It can be reused to return more information about the progress of the partner + // action. When the action is complete, no continuation token should be supplied. ContinuationToken *string `locationName:"continuationToken" min:"1" type:"string"` // Represents information about a current revision. @@ -8182,11 +9138,14 @@ func (s PutThirdPartyJobSuccessResultOutput) GoString() string { type PutWebhookInput struct { _ struct{} `type:"structure"` + // The tags for the webhook. + Tags []*Tag `locationName:"tags" type:"list"` + // The detail provided in an input file to create the webhook, such as the webhook // name, the pipeline name, and the action name. Give the webhook a unique name - // which identifies the webhook being defined. You may choose to name the webhook - // after the pipeline and action it targets so that you can easily recognize - // what it's used for later. + // that helps you identify it. You might name the webhook after the pipeline + // and action it targets so that you can easily recognize what it's used for + // later. // // Webhook is a required field Webhook *WebhookDefinition `locationName:"webhook" type:"structure" required:"true"` @@ -8208,6 +9167,16 @@ func (s *PutWebhookInput) Validate() error { if s.Webhook == nil { invalidParams.Add(request.NewErrParamRequired("Webhook")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if s.Webhook != nil { if err := s.Webhook.Validate(); err != nil { invalidParams.AddNested("Webhook", err.(request.ErrInvalidParams)) @@ -8220,6 +9189,12 @@ func (s *PutWebhookInput) Validate() error { return nil } +// SetTags sets the Tags field's value. +func (s *PutWebhookInput) SetTags(v []*Tag) *PutWebhookInput { + s.Tags = v + return s +} + // SetWebhook sets the Webhook field's value. func (s *PutWebhookInput) SetWebhook(v *WebhookDefinition) *PutWebhookInput { s.Webhook = v @@ -8822,8 +9797,140 @@ func (s *StartPipelineExecutionOutput) SetPipelineExecutionId(v string) *StartPi return s } +// A tag is a key-value pair that is used to manage the resource. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag's key. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The tag's value. + // + // Value is a required field + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource you want to add tags to. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The tags you want to modify or add to the resource. + // + // Tags is a required field + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // A response to a PollForThirdPartyJobs request returned by AWS CodePipeline -// when there is a job to be worked upon by a partner action. +// when there is a job to be worked on by a partner action. type ThirdPartyJob struct { _ struct{} `type:"structure"` @@ -8874,7 +9981,7 @@ type ThirdPartyJobData struct { ArtifactCredentials *AWSSessionCredentials `locationName:"artifactCredentials" type:"structure" sensitive:"true"` // A system-generated token, such as a AWS CodeDeploy deployment ID, that a - // job requires in order to continue the job asynchronously. + // job requires to continue the job asynchronously. ContinuationToken *string `locationName:"continuationToken" min:"1" type:"string"` // The encryption key used to encrypt and decrypt data in the artifact store @@ -8882,16 +9989,16 @@ type ThirdPartyJobData struct { // is optional and might not be present. EncryptionKey *EncryptionKey `locationName:"encryptionKey" type:"structure"` - // The name of the artifact that will be worked upon by the action, if any. - // This name might be system-generated, such as "MyApp", or might be defined - // by the user when the action is created. The input artifact name must match - // the name of an output artifact generated by an action in an earlier action - // or stage of the pipeline. + // The name of the artifact that is worked on by the action, if any. This name + // might be system-generated, such as "MyApp", or it might be defined by the + // user when the action is created. The input artifact name must match the name + // of an output artifact generated by an action in an earlier action or stage + // of the pipeline. InputArtifacts []*Artifact `locationName:"inputArtifacts" type:"list"` - // The name of the artifact that will be the result of the action, if any. This - // name might be system-generated, such as "MyBuiltApp", or might be defined - // by the user when the action is created. + // The name of the artifact that is the result of the action, if any. This name + // might be system-generated, such as "MyBuiltApp", or it might be defined by + // the user when the action is created. OutputArtifacts []*Artifact `locationName:"outputArtifacts" type:"list"` // Represents information about a pipeline to a job worker. @@ -9055,6 +10162,72 @@ func (s *TransitionState) SetLastChangedBy(v string) *TransitionState { return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to remove tags from. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The list of keys for the tags to be removed from the resource. + // + // TagKeys is a required field + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // Represents the input of an UpdatePipeline action. type UpdatePipelineInput struct { _ struct{} `type:"structure"` @@ -9127,9 +10300,9 @@ func (s *UpdatePipelineOutput) SetPipeline(v *PipelineDeclaration) *UpdatePipeli type WebhookAuthConfiguration struct { _ struct{} `type:"structure"` - // The property used to configure acceptance of webhooks within a specific IP - // range. For IP, only the AllowedIPRange property must be set, and this property - // must be set to a valid CIDR range. + // The property used to configure acceptance of webhooks in an IP address range. + // For IP, only the AllowedIPRange property must be set. This property must + // be set to a valid CIDR range. AllowedIPRange *string `min:"1" type:"string"` // The property used to configure GitHub authentication. For GITHUB_HMAC, only @@ -9179,17 +10352,16 @@ func (s *WebhookAuthConfiguration) SetSecretToken(v string) *WebhookAuthConfigur type WebhookDefinition struct { _ struct{} `type:"structure"` - // Supported options are GITHUB_HMAC, IP and UNAUTHENTICATED. + // Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED. // // * For information about the authentication scheme implemented by GITHUB_HMAC, // see Securing your webhooks (https://developer.github.com/webhooks/securing/) // on the GitHub Developer website. // - // * IP will reject webhooks trigger requests unless they originate from - // an IP within the IP range whitelisted in the authentication configuration. + // * IP rejects webhooks trigger requests unless they originate from an IP + // address in the IP range whitelisted in the authentication configuration. // - // * UNAUTHENTICATED will accept all webhook trigger requests regardless - // of origin. + // * UNAUTHENTICATED accepts all webhook trigger requests regardless of origin. // // Authentication is a required field Authentication *string `locationName:"authentication" type:"string" required:"true" enum:"WebhookAuthenticationType"` @@ -9330,24 +10502,24 @@ func (s *WebhookDefinition) SetTargetPipeline(v string) *WebhookDefinition { type WebhookFilterRule struct { _ struct{} `type:"structure"` - // A JsonPath expression that will be applied to the body/payload of the webhook. + // A JsonPath expression that is applied to the body/payload of the webhook. // The value selected by the JsonPath expression must match the value specified - // in the MatchEquals field, otherwise the request will be ignored. For more - // information about JsonPath expressions, see Java JsonPath implementation - // (https://github.com/json-path/JsonPath) in GitHub. + // in the MatchEquals field. Otherwise, the request is ignored. For more information, + // see Java JsonPath implementation (https://github.com/json-path/JsonPath) + // in GitHub. // // JsonPath is a required field JsonPath *string `locationName:"jsonPath" min:"1" type:"string" required:"true"` // The value selected by the JsonPath expression must match what is supplied - // in the MatchEquals field, otherwise the request will be ignored. Properties - // from the target action configuration can be included as placeholders in this - // value by surrounding the action configuration key with curly braces. For - // example, if the value supplied here is "refs/heads/{Branch}" and the target - // action has an action configuration property called "Branch" with a value - // of "master", the MatchEquals value will be evaluated as "refs/heads/master". - // For a list of action configuration properties for built-in action types, - // see Pipeline Structure Reference Action Requirements (https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#action-requirements). + // in the MatchEquals field. Otherwise, the request is ignored. Properties from + // the target action configuration can be included as placeholders in this value + // by surrounding the action configuration key with curly brackets. For example, + // if the value supplied here is "refs/heads/{Branch}" and the target action + // has an action configuration property called "Branch" with a value of "master", + // the MatchEquals value is evaluated as "refs/heads/master". For a list of + // action configuration properties for built-in action types, see Pipeline Structure + // Reference Action Requirements (https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#action-requirements). MatchEquals *string `locationName:"matchEquals" min:"1" type:"string"` } @@ -9554,6 +10726,26 @@ const ( StageTransitionTypeOutbound = "Outbound" ) +const ( + // TriggerTypeCreatePipeline is a TriggerType enum value + TriggerTypeCreatePipeline = "CreatePipeline" + + // TriggerTypeStartPipelineExecution is a TriggerType enum value + TriggerTypeStartPipelineExecution = "StartPipelineExecution" + + // TriggerTypePollForSourceChanges is a TriggerType enum value + TriggerTypePollForSourceChanges = "PollForSourceChanges" + + // TriggerTypeWebhook is a TriggerType enum value + TriggerTypeWebhook = "Webhook" + + // TriggerTypeCloudWatchEvent is a TriggerType enum value + TriggerTypeCloudWatchEvent = "CloudWatchEvent" + + // TriggerTypePutActionRevision is a TriggerType enum value + TriggerTypePutActionRevision = "PutActionRevision" +) + const ( // WebhookAuthenticationTypeGithubHmac is a WebhookAuthenticationType enum value WebhookAuthenticationTypeGithubHmac = "GITHUB_HMAC" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/doc.go index 0618243472d..45d248b2b85 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/doc.go @@ -7,18 +7,18 @@ // // This is the AWS CodePipeline API Reference. This guide provides descriptions // of the actions and data types for AWS CodePipeline. Some functionality for -// your pipeline is only configurable through the API. For additional information, +// your pipeline can only be configured through the API. For more information, // see the AWS CodePipeline User Guide (https://docs.aws.amazon.com/codepipeline/latest/userguide/welcome.html). // // You can use the AWS CodePipeline API to work with pipelines, stages, actions, -// and transitions, as described below. +// and transitions. // // Pipelines are models of automated release processes. Each pipeline is uniquely // named, and consists of stages, actions, and transitions. // // You can work with pipelines by calling: // -// * CreatePipeline, which creates a uniquely-named pipeline. +// * CreatePipeline, which creates a uniquely named pipeline. // // * DeletePipeline, which deletes the specified pipeline. // @@ -33,7 +33,7 @@ // // * ListActionExecutions, which returns action-level details for past executions. // The details include full stage and action-level details, including individual -// action duration, status, any errors which occurred during the execution, +// action duration, status, any errors that occurred during the execution, // and input and output artifact location details. // // * ListPipelines, which gets a summary of all of the pipelines associated @@ -42,26 +42,25 @@ // * ListPipelineExecutions, which gets a summary of the most recent executions // for a pipeline. // -// * StartPipelineExecution, which runs the the most recent revision of an -// artifact through the pipeline. +// * StartPipelineExecution, which runs the most recent revision of an artifact +// through the pipeline. // // * UpdatePipeline, which updates a pipeline with edits or changes to the // structure of the pipeline. // // Pipelines include stages. Each stage contains one or more actions that must -// complete before the next stage begins. A stage will result in success or -// failure. If a stage fails, then the pipeline stops at that stage and will -// remain stopped until either a new version of an artifact appears in the source -// location, or a user takes action to re-run the most recent artifact through -// the pipeline. You can call GetPipelineState, which displays the status of -// a pipeline, including the status of stages in the pipeline, or GetPipeline, -// which returns the entire structure of the pipeline, including the stages -// of that pipeline. For more information about the structure of stages and -// actions, also refer to the AWS CodePipeline Pipeline Structure Reference -// (https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-structure.html). -// -// Pipeline stages include actions, which are categorized into categories such -// as source or build actions performed within a stage of a pipeline. For example, +// complete before the next stage begins. A stage results in success or failure. +// If a stage fails, the pipeline stops at that stage and remains stopped until +// either a new version of an artifact appears in the source location, or a +// user takes action to rerun the most recent artifact through the pipeline. +// You can call GetPipelineState, which displays the status of a pipeline, including +// the status of stages in the pipeline, or GetPipeline, which returns the entire +// structure of the pipeline, including the stages of that pipeline. For more +// information about the structure of stages and actions, see AWS CodePipeline +// Pipeline Structure Reference (https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-structure.html). +// +// Pipeline stages include actions that are categorized into categories such +// as source or build actions performed in a stage of a pipeline. For example, // you can use a source action to import artifacts into a pipeline from a source // such as Amazon S3. Like stages, you do not work with actions directly in // most cases, but you do define and interact with actions when working with @@ -95,8 +94,8 @@ // // For third-party integrators or developers who want to create their own integrations // with AWS CodePipeline, the expected sequence varies from the standard API -// user. In order to integrate with AWS CodePipeline, developers will need to -// work with the following items: +// user. To integrate with AWS CodePipeline, developers need to work with the +// following items: // // Jobs, which are instances of an action. For example, a job for a source action // might import a revision of an artifact from a source. @@ -104,14 +103,13 @@ // You can work with jobs by calling: // // * AcknowledgeJob, which confirms whether a job worker has received the -// specified job, +// specified job. // -// * GetJobDetails, which returns the details of a job, +// * GetJobDetails, which returns the details of a job. // -// * PollForJobs, which determines whether there are any jobs to act upon, +// * PollForJobs, which determines whether there are any jobs to act on. // -// -// * PutJobFailureResult, which provides details of a job failure, and +// * PutJobFailureResult, which provides details of a job failure. // // * PutJobSuccessResult, which provides details of a job success. // @@ -122,16 +120,15 @@ // You can work with third party jobs by calling: // // * AcknowledgeThirdPartyJob, which confirms whether a job worker has received -// the specified job, +// the specified job. // // * GetThirdPartyJobDetails, which requests the details of a job for a partner -// action, +// action. // // * PollForThirdPartyJobs, which determines whether there are any jobs to -// act upon, +// act on. // -// * PutThirdPartyJobFailureResult, which provides details of a job failure, -// and +// * PutThirdPartyJobFailureResult, which provides details of a job failure. // // * PutThirdPartyJobSuccessResult, which provides details of a job success. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/errors.go index dfdffa34c79..4a279e6dbab 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/errors.go @@ -22,10 +22,16 @@ const ( // The approval action has already been approved or rejected. ErrCodeApprovalAlreadyCompletedException = "ApprovalAlreadyCompletedException" + // ErrCodeConcurrentModificationException for service response error code + // "ConcurrentModificationException". + // + // Unable to modify the tag due to a simultaneous update request. + ErrCodeConcurrentModificationException = "ConcurrentModificationException" + // ErrCodeInvalidActionDeclarationException for service response error code // "InvalidActionDeclarationException". // - // The specified action declaration was specified in an invalid format. + // The action declaration was specified in an invalid format. ErrCodeInvalidActionDeclarationException = "InvalidActionDeclarationException" // ErrCodeInvalidApprovalTokenException for service response error code @@ -34,6 +40,12 @@ const ( // The approval request already received a response or has expired. ErrCodeInvalidApprovalTokenException = "InvalidApprovalTokenException" + // ErrCodeInvalidArnException for service response error code + // "InvalidArnException". + // + // The specified resource ARN is invalid. + ErrCodeInvalidArnException = "InvalidArnException" + // ErrCodeInvalidBlockerDeclarationException for service response error code // "InvalidBlockerDeclarationException". // @@ -49,40 +61,46 @@ const ( // ErrCodeInvalidJobException for service response error code // "InvalidJobException". // - // The specified job was specified in an invalid format or cannot be found. + // The job was specified in an invalid format or cannot be found. ErrCodeInvalidJobException = "InvalidJobException" // ErrCodeInvalidJobStateException for service response error code // "InvalidJobStateException". // - // The specified job state was specified in an invalid format. + // The job state was specified in an invalid format. ErrCodeInvalidJobStateException = "InvalidJobStateException" // ErrCodeInvalidNextTokenException for service response error code // "InvalidNextTokenException". // // The next token was specified in an invalid format. Make sure that the next - // token you provided is the token returned by a previous call. + // token you provide is the token returned by a previous call. ErrCodeInvalidNextTokenException = "InvalidNextTokenException" // ErrCodeInvalidNonceException for service response error code // "InvalidNonceException". // - // The specified nonce was specified in an invalid format. + // The nonce was specified in an invalid format. ErrCodeInvalidNonceException = "InvalidNonceException" // ErrCodeInvalidStageDeclarationException for service response error code // "InvalidStageDeclarationException". // - // The specified stage declaration was specified in an invalid format. + // The stage declaration was specified in an invalid format. ErrCodeInvalidStageDeclarationException = "InvalidStageDeclarationException" // ErrCodeInvalidStructureException for service response error code // "InvalidStructureException". // - // The specified structure was specified in an invalid format. + // The structure was specified in an invalid format. ErrCodeInvalidStructureException = "InvalidStructureException" + // ErrCodeInvalidTagsException for service response error code + // "InvalidTagsException". + // + // The specified resource tags are invalid. + ErrCodeInvalidTagsException = "InvalidTagsException" + // ErrCodeInvalidWebhookAuthenticationParametersException for service response error code // "InvalidWebhookAuthenticationParametersException". // @@ -98,7 +116,7 @@ const ( // ErrCodeJobNotFoundException for service response error code // "JobNotFoundException". // - // The specified job was specified in an invalid format or cannot be found. + // The job was specified in an invalid format or cannot be found. ErrCodeJobNotFoundException = "JobNotFoundException" // ErrCodeLimitExceededException for service response error code @@ -115,6 +133,12 @@ const ( // associated with the request is out of date. ErrCodeNotLatestPipelineExecutionException = "NotLatestPipelineExecutionException" + // ErrCodeOutputVariablesSizeExceededException for service response error code + // "OutputVariablesSizeExceededException". + // + // Exceeded the total size limit for all variables in the pipeline. + ErrCodeOutputVariablesSizeExceededException = "OutputVariablesSizeExceededException" + // ErrCodePipelineExecutionNotFoundException for service response error code // "PipelineExecutionNotFoundException". // @@ -131,31 +155,40 @@ const ( // ErrCodePipelineNotFoundException for service response error code // "PipelineNotFoundException". // - // The specified pipeline was specified in an invalid format or cannot be found. + // The pipeline was specified in an invalid format or cannot be found. ErrCodePipelineNotFoundException = "PipelineNotFoundException" // ErrCodePipelineVersionNotFoundException for service response error code // "PipelineVersionNotFoundException". // - // The specified pipeline version was specified in an invalid format or cannot - // be found. + // The pipeline version was specified in an invalid format or cannot be found. ErrCodePipelineVersionNotFoundException = "PipelineVersionNotFoundException" + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource was specified in an invalid format. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + // ErrCodeStageNotFoundException for service response error code // "StageNotFoundException". // - // The specified stage was specified in an invalid format or cannot be found. + // The stage was specified in an invalid format or cannot be found. ErrCodeStageNotFoundException = "StageNotFoundException" // ErrCodeStageNotRetryableException for service response error code // "StageNotRetryableException". // - // The specified stage can't be retried because the pipeline structure or stage - // state changed after the stage was not completed; the stage contains no failed - // actions; one or more actions are still in progress; or another retry attempt - // is already in progress. + // Unable to retry. The pipeline structure or stage state might have changed + // while actions awaited retry, or the stage contains no failed actions. ErrCodeStageNotRetryableException = "StageNotRetryableException" + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // The tags limit for a resource has been exceeded. + ErrCodeTooManyTagsException = "TooManyTagsException" + // ErrCodeValidationException for service response error code // "ValidationException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go index 397a00f9eb2..0bc30449588 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go @@ -46,11 +46,11 @@ const ( // svc := codepipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodePipeline { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodePipeline { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodePipeline { svc := &CodePipeline{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-07-09", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go index 256beba518a..5b2bc748017 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go @@ -59,8 +59,8 @@ func (c *CognitoIdentity) CreateIdentityPoolRequest(input *CreateIdentityPoolInp // CreateIdentityPool API operation for Amazon Cognito Identity. // // Creates a new identity pool. The identity pool is a store of user identity -// information that is specific to your AWS account. The limit on identity pools -// is 60 per account. The keys for SupportedLoginProviders are as follows: +// information that is specific to your AWS account. The keys for SupportedLoginProviders +// are as follows: // // * Facebook: graph.facebook.com // @@ -2163,6 +2163,11 @@ func (c *CognitoIdentity) UpdateIdentityPoolWithContext(ctx aws.Context, input * type CreateIdentityPoolInput struct { _ struct{} `type:"structure"` + // Enables or disables the Basic (Classic) authentication flow. For more information, + // see Identity Pools (Federated Identities) Authentication Flow (https://docs.aws.amazon.com/cognito/latest/developerguide/authentication-flow.html) + // in the Amazon Cognito Developer Guide. + AllowClassicFlow *bool `type:"boolean"` + // TRUE if the identity pool supports unauthenticated logins. // // AllowUnauthenticatedIdentities is a required field @@ -2243,6 +2248,12 @@ func (s *CreateIdentityPoolInput) Validate() error { return nil } +// SetAllowClassicFlow sets the AllowClassicFlow field's value. +func (s *CreateIdentityPoolInput) SetAllowClassicFlow(v bool) *CreateIdentityPoolInput { + s.AllowClassicFlow = &v + return s +} + // SetAllowUnauthenticatedIdentities sets the AllowUnauthenticatedIdentities field's value. func (s *CreateIdentityPoolInput) SetAllowUnauthenticatedIdentities(v bool) *CreateIdentityPoolInput { s.AllowUnauthenticatedIdentities = &v @@ -2677,7 +2688,6 @@ type GetIdInput struct { // * Amazon Cognito user pool: cognito-idp..amazonaws.com/, // for example, cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789. // - // // * Google: accounts.google.com // // * Amazon: www.amazon.com @@ -2879,6 +2889,9 @@ type GetOpenIdTokenForDeveloperIdentityInput struct { // take care in setting the expiration time for a token, as there are significant // security implications: an attacker could use a leaked token to access your // AWS resources for the token's duration. + // + // Please provide for a small grace period, usually no more than 5 minutes, + // to account for clock skew. TokenDuration *int64 `min:"1" type:"long"` } @@ -3118,6 +3131,11 @@ func (s *IdentityDescription) SetLogins(v []*string) *IdentityDescription { type IdentityPool struct { _ struct{} `type:"structure"` + // Enables or disables the Basic (Classic) authentication flow. For more information, + // see Identity Pools (Federated Identities) Authentication Flow (https://docs.aws.amazon.com/cognito/latest/developerguide/authentication-flow.html) + // in the Amazon Cognito Developer Guide. + AllowClassicFlow *bool `type:"boolean"` + // TRUE if the identity pool supports unauthenticated logins. // // AllowUnauthenticatedIdentities is a required field @@ -3203,6 +3221,12 @@ func (s *IdentityPool) Validate() error { return nil } +// SetAllowClassicFlow sets the AllowClassicFlow field's value. +func (s *IdentityPool) SetAllowClassicFlow(v bool) *IdentityPool { + s.AllowClassicFlow = &v + return s +} + // SetAllowUnauthenticatedIdentities sets the AllowUnauthenticatedIdentities field's value. func (s *IdentityPool) SetAllowUnauthenticatedIdentities(v bool) *IdentityPool { s.AllowUnauthenticatedIdentities = &v @@ -4221,7 +4245,9 @@ type TagResourceInput struct { ResourceArn *string `min:"20" type:"string" required:"true"` // The tags to assign to the identity pool. - Tags map[string]*string `type:"map"` + // + // Tags is a required field + Tags map[string]*string `type:"map" required:"true"` } // String returns the string representation @@ -4243,6 +4269,9 @@ func (s *TagResourceInput) Validate() error { if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } if invalidParams.Len() > 0 { return invalidParams @@ -4511,7 +4540,9 @@ type UntagResourceInput struct { ResourceArn *string `min:"20" type:"string" required:"true"` // The keys of the tags to remove from the user pool. - TagKeys []*string `type:"list"` + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` } // String returns the string representation @@ -4533,6 +4564,9 @@ func (s *UntagResourceInput) Validate() error { if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } if invalidParams.Len() > 0 { return invalidParams diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go index 9ebae103f36..e5c2011c0a9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go @@ -46,11 +46,11 @@ const ( // svc := cognitoidentity.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentity { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CognitoIdentity { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CognitoIdentity { svc := &CognitoIdentity{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-06-30", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go index 8c6b517653d..a21cb8d27cc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go @@ -160,7 +160,7 @@ func (c *CognitoIdentityProvider) AdminAddUserToGroupRequest(input *AdminAddUser // // Adds the specified user to the specified group. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -261,7 +261,7 @@ func (c *CognitoIdentityProvider) AdminConfirmSignUpRequest(input *AdminConfirmS // Confirms user registration as an admin without using a confirmation code. // Works on any user. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -532,7 +532,7 @@ func (c *CognitoIdentityProvider) AdminDeleteUserRequest(input *AdminDeleteUserI // // Deletes a user as an administrator. Works on any user. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -633,7 +633,7 @@ func (c *CognitoIdentityProvider) AdminDeleteUserAttributesRequest(input *AdminD // Deletes the user attributes in a user pool as an administrator. Works on // any user. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -863,9 +863,9 @@ func (c *CognitoIdentityProvider) AdminDisableUserRequest(input *AdminDisableUse // AdminDisableUser API operation for Amazon Cognito Identity Provider. // -// Disables the specified user as an administrator. Works on any user. +// Disables the specified user. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -965,7 +965,7 @@ func (c *CognitoIdentityProvider) AdminEnableUserRequest(input *AdminEnableUserI // // Enables the specified user as an administrator. Works on any user. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1065,7 +1065,7 @@ func (c *CognitoIdentityProvider) AdminForgetDeviceRequest(input *AdminForgetDev // // Forgets the device, as an administrator. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1167,7 +1167,7 @@ func (c *CognitoIdentityProvider) AdminGetDeviceRequest(input *AdminGetDeviceInp // // Gets the device, as an administrator. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1267,7 +1267,7 @@ func (c *CognitoIdentityProvider) AdminGetUserRequest(input *AdminGetUserInput) // Gets the specified user by user name in a user pool as an administrator. // Works on any user. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1366,7 +1366,7 @@ func (c *CognitoIdentityProvider) AdminInitiateAuthRequest(input *AdminInitiateA // // Initiates the authentication flow, as an administrator. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1622,7 +1622,7 @@ func (c *CognitoIdentityProvider) AdminListDevicesRequest(input *AdminListDevice // // Lists devices, as an administrator. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1727,7 +1727,7 @@ func (c *CognitoIdentityProvider) AdminListGroupsForUserRequest(input *AdminList // // Lists the groups that the user belongs to. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1791,7 +1791,7 @@ func (c *CognitoIdentityProvider) AdminListGroupsForUserWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a AdminListGroupsForUser operation. // pageNum := 0 // err := client.AdminListGroupsForUserPages(params, -// func(page *AdminListGroupsForUserOutput, lastPage bool) bool { +// func(page *cognitoidentityprovider.AdminListGroupsForUserOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1823,10 +1823,12 @@ func (c *CognitoIdentityProvider) AdminListGroupsForUserPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*AdminListGroupsForUserOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*AdminListGroupsForUserOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1948,7 +1950,7 @@ func (c *CognitoIdentityProvider) AdminListUserAuthEventsWithContext(ctx aws.Con // // Example iterating over at most 3 pages of a AdminListUserAuthEvents operation. // pageNum := 0 // err := client.AdminListUserAuthEventsPages(params, -// func(page *AdminListUserAuthEventsOutput, lastPage bool) bool { +// func(page *cognitoidentityprovider.AdminListUserAuthEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1980,10 +1982,12 @@ func (c *CognitoIdentityProvider) AdminListUserAuthEventsPagesWithContext(ctx aw }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*AdminListUserAuthEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*AdminListUserAuthEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2034,7 +2038,7 @@ func (c *CognitoIdentityProvider) AdminRemoveUserFromGroupRequest(input *AdminRe // // Removes the specified user from the specified group. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2145,7 +2149,7 @@ func (c *CognitoIdentityProvider) AdminResetUserPasswordRequest(input *AdminRese // also result in sending a message to the end user with the code to change // their password. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2274,7 +2278,7 @@ func (c *CognitoIdentityProvider) AdminRespondToAuthChallengeRequest(input *Admi // // Responds to an authentication challenge, as an administrator. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2428,7 +2432,12 @@ func (c *CognitoIdentityProvider) AdminSetUserMFAPreferenceRequest(input *AdminS // AdminSetUserMFAPreference API operation for Amazon Cognito Identity Provider. // -// Sets the user's multi-factor authentication (MFA) preference. +// Sets the user's multi-factor authentication (MFA) preference, including which +// MFA options are enabled and if any are preferred. Only one factor can be +// set as preferred. The preferred MFA factor will be used to authenticate a +// user if multiple factors are enabled. If multiple options are enabled and +// no preference is set, a challenge to choose an MFA option will be returned +// during sign in. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2528,6 +2537,19 @@ func (c *CognitoIdentityProvider) AdminSetUserPasswordRequest(input *AdminSetUse // AdminSetUserPassword API operation for Amazon Cognito Identity Provider. // +// Sets the specified user's password in a user pool as an administrator. Works +// on any user. +// +// The password can be temporary or permanent. If it is temporary, the user +// status will be placed into the FORCE_CHANGE_PASSWORD state. When the user +// next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain +// the NEW_PASSWORD_REQUIRED challenge. If the user does not sign in before +// it expires, the user will not be able to sign in and their password will +// need to be reset by an administrator. +// +// Once the user has set a new password, or the password is permanent, the user +// status will be set to Confirmed. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2628,9 +2650,9 @@ func (c *CognitoIdentityProvider) AdminSetUserSettingsRequest(input *AdminSetUse // AdminSetUserSettings API operation for Amazon Cognito Identity Provider. // -// Sets all the user settings for a specified user name. Works on any user. -// -// Requires developer credentials. +// This action is no longer supported. You can use it to configure only SMS +// MFA. You can't use it to configure TOTP software token MFA. To configure +// either type of MFA, use the AdminSetUserMFAPreference action instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2829,7 +2851,7 @@ func (c *CognitoIdentityProvider) AdminUpdateDeviceStatusRequest(input *AdminUpd // // Updates the device status as an administrator. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2939,7 +2961,7 @@ func (c *CognitoIdentityProvider) AdminUpdateUserAttributesRequest(input *AdminU // In addition to updating user attributes, this API can also be used to mark // phone and email as verified. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3071,7 +3093,7 @@ func (c *CognitoIdentityProvider) AdminUserGlobalSignOutRequest(input *AdminUser // // Signs out users from all devices, as an administrator. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3763,7 +3785,7 @@ func (c *CognitoIdentityProvider) CreateGroupRequest(input *CreateGroupInput) (r // // Creates a new group in the specified user pool. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4479,7 +4501,7 @@ func (c *CognitoIdentityProvider) DeleteGroupRequest(input *DeleteGroupInput) (r // // Deletes a group. Currently only groups with no members can be deleted. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6037,8 +6059,7 @@ func (c *CognitoIdentityProvider) ForgotPasswordRequest(input *ForgotPasswordInp // for the user, the confirmation code is sent to the phone number. Otherwise, // if a verified email exists, the confirmation code is sent to the email. If // neither a verified phone number nor a verified email exists, InvalidParameterException -// is thrown. To use the confirmation code for resetting the password, call -// . +// is thrown. To use the confirmation code for resetting the password, call . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6374,7 +6395,7 @@ func (c *CognitoIdentityProvider) GetGroupRequest(input *GetGroupInput) (req *re // // Gets a group. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6575,6 +6596,10 @@ func (c *CognitoIdentityProvider) GetSigningCertificateRequest(input *GetSigning // * ErrCodeInternalErrorException "InternalErrorException" // This exception is thrown when Amazon Cognito encounters an internal error. // +// * ErrCodeInvalidParameterException "InvalidParameterException" +// This exception is thrown when the Amazon Cognito service encounters an invalid +// parameter. +// // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // This exception is thrown when the Amazon Cognito service cannot find the // requested resource. @@ -7230,6 +7255,16 @@ func (c *CognitoIdentityProvider) InitiateAuthRequest(input *InitiateAuthInput) // * ErrCodeInternalErrorException "InternalErrorException" // This exception is thrown when Amazon Cognito encounters an internal error. // +// * ErrCodeInvalidSmsRoleAccessPolicyException "InvalidSmsRoleAccessPolicyException" +// This exception is returned when the role provided for SMS configuration does +// not have permission to publish using Amazon SNS. +// +// * ErrCodeInvalidSmsRoleTrustRelationshipException "InvalidSmsRoleTrustRelationshipException" +// This exception is thrown when the trust relationship is invalid for the role +// provided for SMS configuration. This can happen if you do not trust cognito-idp.amazonaws.com +// or the external ID provided in the role does not match what is provided in +// the SMS configuration for the user pool. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/cognito-idp-2016-04-18/InitiateAuth func (c *CognitoIdentityProvider) InitiateAuth(input *InitiateAuthInput) (*InitiateAuthOutput, error) { req, out := c.InitiateAuthRequest(input) @@ -7410,7 +7445,7 @@ func (c *CognitoIdentityProvider) ListGroupsRequest(input *ListGroupsInput) (req // // Lists the groups associated with a user pool. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7471,7 +7506,7 @@ func (c *CognitoIdentityProvider) ListGroupsWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a ListGroups operation. // pageNum := 0 // err := client.ListGroupsPages(params, -// func(page *ListGroupsOutput, lastPage bool) bool { +// func(page *cognitoidentityprovider.ListGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7503,10 +7538,12 @@ func (c *CognitoIdentityProvider) ListGroupsPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7621,7 +7658,7 @@ func (c *CognitoIdentityProvider) ListIdentityProvidersWithContext(ctx aws.Conte // // Example iterating over at most 3 pages of a ListIdentityProviders operation. // pageNum := 0 // err := client.ListIdentityProvidersPages(params, -// func(page *ListIdentityProvidersOutput, lastPage bool) bool { +// func(page *cognitoidentityprovider.ListIdentityProvidersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7653,10 +7690,12 @@ func (c *CognitoIdentityProvider) ListIdentityProvidersPagesWithContext(ctx aws. }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListIdentityProvidersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListIdentityProvidersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7771,7 +7810,7 @@ func (c *CognitoIdentityProvider) ListResourceServersWithContext(ctx aws.Context // // Example iterating over at most 3 pages of a ListResourceServers operation. // pageNum := 0 // err := client.ListResourceServersPages(params, -// func(page *ListResourceServersOutput, lastPage bool) bool { +// func(page *cognitoidentityprovider.ListResourceServersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7803,10 +7842,12 @@ func (c *CognitoIdentityProvider) ListResourceServersPagesWithContext(ctx aws.Co }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListResourceServersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListResourceServersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8115,7 +8156,7 @@ func (c *CognitoIdentityProvider) ListUserPoolClientsWithContext(ctx aws.Context // // Example iterating over at most 3 pages of a ListUserPoolClients operation. // pageNum := 0 // err := client.ListUserPoolClientsPages(params, -// func(page *ListUserPoolClientsOutput, lastPage bool) bool { +// func(page *cognitoidentityprovider.ListUserPoolClientsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8147,10 +8188,12 @@ func (c *CognitoIdentityProvider) ListUserPoolClientsPagesWithContext(ctx aws.Co }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListUserPoolClientsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListUserPoolClientsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8261,7 +8304,7 @@ func (c *CognitoIdentityProvider) ListUserPoolsWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a ListUserPools operation. // pageNum := 0 // err := client.ListUserPoolsPages(params, -// func(page *ListUserPoolsOutput, lastPage bool) bool { +// func(page *cognitoidentityprovider.ListUserPoolsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8293,10 +8336,12 @@ func (c *CognitoIdentityProvider) ListUserPoolsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListUserPoolsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListUserPoolsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8331,6 +8376,12 @@ func (c *CognitoIdentityProvider) ListUsersRequest(input *ListUsersInput) (req * Name: opListUsers, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"PaginationToken"}, + OutputTokens: []string{"PaginationToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -8394,6 +8445,58 @@ func (c *CognitoIdentityProvider) ListUsersWithContext(ctx aws.Context, input *L return out, req.Send() } +// ListUsersPages iterates over the pages of a ListUsers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUsers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUsers operation. +// pageNum := 0 +// err := client.ListUsersPages(params, +// func(page *cognitoidentityprovider.ListUsersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CognitoIdentityProvider) ListUsersPages(input *ListUsersInput, fn func(*ListUsersOutput, bool) bool) error { + return c.ListUsersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListUsersPagesWithContext same as ListUsersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentityProvider) ListUsersPagesWithContext(ctx aws.Context, input *ListUsersInput, fn func(*ListUsersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListUsersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListUsersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListUsersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListUsersInGroup = "ListUsersInGroup" // ListUsersInGroupRequest generates a "aws/request.Request" representing the @@ -8446,7 +8549,7 @@ func (c *CognitoIdentityProvider) ListUsersInGroupRequest(input *ListUsersInGrou // // Lists the users in the specified group. // -// Requires developer credentials. +// Calling this action requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8507,7 +8610,7 @@ func (c *CognitoIdentityProvider) ListUsersInGroupWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a ListUsersInGroup operation. // pageNum := 0 // err := client.ListUsersInGroupPages(params, -// func(page *ListUsersInGroupOutput, lastPage bool) bool { +// func(page *cognitoidentityprovider.ListUsersInGroupOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8539,10 +8642,12 @@ func (c *CognitoIdentityProvider) ListUsersInGroupPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListUsersInGroupOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListUsersInGroupOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -9091,7 +9196,12 @@ func (c *CognitoIdentityProvider) SetUserMFAPreferenceRequest(input *SetUserMFAP // SetUserMFAPreference API operation for Amazon Cognito Identity Provider. // -// Set the user's multi-factor authentication (MFA) method preference. +// Set the user's multi-factor authentication (MFA) method preference, including +// which MFA factors are enabled and if any are preferred. Only one factor can +// be set as preferred. The preferred MFA factor will be used to authenticate +// a user if multiple factors are enabled. If multiple options are enabled and +// no preference is set, a challenge to choose an MFA option will be returned +// during sign in. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9190,7 +9300,7 @@ func (c *CognitoIdentityProvider) SetUserPoolMfaConfigRequest(input *SetUserPool // SetUserPoolMfaConfig API operation for Amazon Cognito Identity Provider. // -// Set the user pool MFA configuration. +// Set the user pool multi-factor authentication (MFA) configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9296,9 +9406,9 @@ func (c *CognitoIdentityProvider) SetUserSettingsRequest(input *SetUserSettingsI // SetUserSettings API operation for Amazon Cognito Identity Provider. // -// Sets the user settings like multi-factor authentication (MFA). If MFA is -// to be removed for a particular attribute pass the attribute with code delivery -// as null. If null list is passed, all MFA options are removed. +// This action is no longer supported. You can use it to configure only SMS +// MFA. You can't use it to configure TOTP software token MFA. To configure +// either type of MFA, use the SetUserMFAPreference action instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10144,7 +10254,10 @@ func (c *CognitoIdentityProvider) UpdateGroupRequest(input *UpdateGroupInput) (r // // Updates the specified group with the specified attributes. // -// Requires developer credentials. +// Calling this action requires developer credentials. +// +// If you don't provide a value for an attribute, it will be set to the default +// value. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10337,6 +10450,9 @@ func (c *CognitoIdentityProvider) UpdateResourceServerRequest(input *UpdateResou // // Updates the name and scopes of resource server. All other fields are read-only. // +// If you don't provide a value for an attribute, it will be set to the default +// value. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -10576,9 +10692,11 @@ func (c *CognitoIdentityProvider) UpdateUserPoolRequest(input *UpdateUserPoolInp // UpdateUserPool API operation for Amazon Cognito Identity Provider. // -// Updates the specified user pool with the specified attributes. If you don't -// provide a value for an attribute, it will be set to the default value. You -// can get a list of the current user pool settings with . +// Updates the specified user pool with the specified attributes. You can get +// a list of the current user pool settings with . +// +// If you don't provide a value for an attribute, it will be set to the default +// value. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10697,9 +10815,10 @@ func (c *CognitoIdentityProvider) UpdateUserPoolClientRequest(input *UpdateUserP // UpdateUserPoolClient API operation for Amazon Cognito Identity Provider. // // Updates the specified user pool app client with the specified attributes. +// You can get a list of the current user pool app client settings with . +// // If you don't provide a value for an attribute, it will be set to the default -// value. You can get a list of the current user pool app client settings with -// . +// value. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11485,6 +11604,36 @@ func (s AdminAddUserToGroupOutput) GoString() string { type AdminConfirmSignUpInput struct { _ struct{} `type:"structure"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // If your user pool configuration includes triggers, the AdminConfirmSignUp + // API action invokes the AWS Lambda function that is specified for the post + // confirmation trigger. When Amazon Cognito invokes this function, it passes + // a JSON payload, which the function receives as input. In this payload, the + // clientMetadata attribute provides the data that you assigned to the ClientMetadata + // parameter in your AdminConfirmSignUp request. In your function code in AWS + // Lambda, you can process the ClientMetadata value to enhance your workflow + // for your specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // The user pool ID for which you want to confirm user registration. // // UserPoolId is a required field @@ -11528,6 +11677,12 @@ func (s *AdminConfirmSignUpInput) Validate() error { return nil } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *AdminConfirmSignUpInput) SetClientMetadata(v map[string]*string) *AdminConfirmSignUpInput { + s.ClientMetadata = v + return s +} + // SetUserPoolId sets the UserPoolId field's value. func (s *AdminConfirmSignUpInput) SetUserPoolId(v string) *AdminConfirmSignUpInput { s.UserPoolId = &v @@ -11565,7 +11720,7 @@ type AdminCreateUserConfigType struct { // The message template to be used for the welcome message to new users. // - // See also Customizing User Invitation Messages (http://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization). + // See also Customizing User Invitation Messages (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization). InviteMessageTemplate *MessageTemplateType `type:"structure"` // The user account expiration limit, in days, after which the account is no @@ -11626,6 +11781,36 @@ func (s *AdminCreateUserConfigType) SetUnusedAccountValidityDays(v int64) *Admin type AdminCreateUserInput struct { _ struct{} `type:"structure"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes + // the function that is assigned to the pre sign-up trigger. When Amazon Cognito + // invokes this function, it passes a JSON payload, which the function receives + // as input. This payload contains a clientMetadata attribute, which provides + // the data that you assigned to the ClientMetadata parameter in your AdminCreateUser + // request. In your function code in AWS Lambda, you can process the clientMetadata + // value to enhance your workflow for your specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // Specify "EMAIL" if email will be used to send the welcome message. Specify // "SMS" if the phone number will be used. The default value is "SMS". More // than one value can be specified. @@ -11777,6 +11962,12 @@ func (s *AdminCreateUserInput) Validate() error { return nil } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *AdminCreateUserInput) SetClientMetadata(v map[string]*string) *AdminCreateUserInput { + s.ClientMetadata = v + return s +} + // SetDesiredDeliveryMediums sets the DesiredDeliveryMediums field's value. func (s *AdminCreateUserInput) SetDesiredDeliveryMediums(v []*string) *AdminCreateUserInput { s.DesiredDeliveryMediums = v @@ -12495,7 +12686,10 @@ type AdminGetUserOutput struct { // Indicates that the status is enabled. Enabled *bool `type:"boolean"` - // Specifies the options for MFA (e.g., email or phone number). + // This response parameter is no longer supported. It provides information only + // about SMS MFA configurations. It doesn't provide information about TOTP software + // token MFA configurations. To look up information about either type of MFA + // configuration, use the AdminGetUserResponse$UserMFASettingList response instead. MFAOptions []*MFAOptionType `type:"list"` // The user's preferred MFA setting. @@ -12510,7 +12704,8 @@ type AdminGetUserOutput struct { // The date the user was last modified. UserLastModifiedDate *time.Time `type:"timestamp"` - // The list of the user's MFA settings. + // The MFA options that are enabled for the user. The possible values in this + // list are SMS_MFA and SOFTWARE_TOKEN_MFA. UserMFASettingList []*string `type:"list"` // The user status. Can be one of the following: @@ -12642,6 +12837,11 @@ type AdminInitiateAuthInput struct { // will invoke the user migration Lambda if the USERNAME is not found in // the user pool. // + // * ADMIN_USER_PASSWORD_AUTH: Admin-based user password authentication. + // This replaces the ADMIN_NO_SRP_AUTH authentication flow. In this flow, + // Cognito receives the password in the request instead of using the SRP + // process to verify passwords. + // // AuthFlow is a required field AuthFlow *string `type:"string" required:"true" enum:"AuthFlowType"` @@ -12666,9 +12866,59 @@ type AdminInitiateAuthInput struct { // ClientId is a required field ClientId *string `min:"1" type:"string" required:"true" sensitive:"true"` - // This is a random key-value pair map which can contain any key and will be - // passed to your PreAuthentication Lambda trigger as-is. It can be used to - // implement additional validations around authentication. + // A map of custom key-value pairs that you can provide as input for certain + // custom workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the AdminInitiateAuth API action, Amazon Cognito invokes + // the AWS Lambda functions that are specified for various triggers. The ClientMetadata + // value is passed as input to the functions for only the following triggers: + // + // * Pre signup + // + // * Pre authentication + // + // * User migration + // + // When Amazon Cognito invokes the functions for these triggers, it passes a + // JSON payload, which the function receives as input. This payload contains + // a validationData attribute, which provides the data that you assigned to + // the ClientMetadata parameter in your AdminInitiateAuth request. In your function + // code in AWS Lambda, you can process the validationData value to enhance your + // workflow for your specific needs. + // + // When you use the AdminInitiateAuth API action, Amazon Cognito also invokes + // the functions for the following triggers, but it does not provide the ClientMetadata + // value as input: + // + // * Post authentication + // + // * Custom message + // + // * Pre token generation + // + // * Create auth challenge + // + // * Define auth challenge + // + // * Verify auth challenge + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string `type:"map"` // Contextual data such as the user's device fingerprint, IP address, or location @@ -12898,10 +13148,12 @@ type AdminLinkProviderForUserInput struct { // respectively. The ProviderAttributeValue for the user must be the same value // as the id, sub, or user_id value found in the social identity provider token. // - // For SAML, the ProviderAttributeNamecan be any value that matches a claim in the SAML assertion. If you wish - // to link SAML users based on the subject of the SAML assertion, you should - // map the subject to a claim through the SAML identity provider and submit - // that claim name as the ProviderAttributeName. If you set ProviderAttributeNameto Cognito_Subject + // For SAML, the ProviderAttributeName can be any value that matches a claim + // in the SAML assertion. If you wish to link SAML users based on the subject + // of the SAML assertion, you should map the subject to a claim through the + // SAML identity provider and submit that claim name as the ProviderAttributeName. + // If you set ProviderAttributeName to Cognito_Subject, Cognito will automatically + // parse the default unique identifier found in the subject from the SAML token. // // SourceUser is a required field SourceUser *ProviderUserIdentifierType `type:"structure" required:"true"` @@ -13414,6 +13666,37 @@ func (s AdminRemoveUserFromGroupOutput) GoString() string { type AdminResetUserPasswordInput struct { _ struct{} `type:"structure"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the AdminResetUserPassword API action, Amazon Cognito + // invokes the function that is assigned to the custom message trigger. When + // Amazon Cognito invokes this function, it passes a JSON payload, which the + // function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your AdminResetUserPassword request. In your function code in AWS Lambda, + // you can process the clientMetadata value to enhance your workflow for your + // specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // The user pool ID for the user pool where you want to reset the user's password. // // UserPoolId is a required field @@ -13457,6 +13740,12 @@ func (s *AdminResetUserPasswordInput) Validate() error { return nil } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *AdminResetUserPasswordInput) SetClientMetadata(v map[string]*string) *AdminResetUserPasswordInput { + s.ClientMetadata = v + return s +} + // SetUserPoolId sets the UserPoolId field's value. func (s *AdminResetUserPasswordInput) SetUserPoolId(v string) *AdminResetUserPasswordInput { s.UserPoolId = &v @@ -13513,7 +13802,6 @@ type AdminRespondToAuthChallengeInput struct { // * NEW_PASSWORD_REQUIRED: NEW_PASSWORD, any other required attributes, // USERNAME, SECRET_HASH (if app client is configured with client secret). // - // // The value of the USERNAME attribute must be the user's actual username, not // an alias (such as email address or phone number). To make this easier, the // AdminInitiateAuth response includes the actual username value in the USERNAMEUSER_ID_FOR_SRP @@ -13525,6 +13813,39 @@ type AdminRespondToAuthChallengeInput struct { // ClientId is a required field ClientId *string `min:"1" type:"string" required:"true" sensitive:"true"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the AdminRespondToAuthChallenge API action, Amazon + // Cognito invokes any functions that are assigned to the following triggers: + // pre sign-up, custom message, post authentication, user migration, pre token + // generation, define auth challenge, create auth challenge, and verify auth + // challenge response. When Amazon Cognito invokes any of these functions, it + // passes a JSON payload, which the function receives as input. This payload + // contains a clientMetadata attribute, which provides the data that you assigned + // to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. + // In your function code in AWS Lambda, you can process the clientMetadata value + // to enhance your workflow for your specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // Contextual data such as the user's device fingerprint, IP address, or location // used for evaluating the risk of an unexpected event by Amazon Cognito advanced // security. @@ -13610,6 +13931,12 @@ func (s *AdminRespondToAuthChallengeInput) SetClientId(v string) *AdminRespondTo return s } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *AdminRespondToAuthChallengeInput) SetClientMetadata(v map[string]*string) *AdminRespondToAuthChallengeInput { + s.ClientMetadata = v + return s +} + // SetContextData sets the ContextData field's value. func (s *AdminRespondToAuthChallengeInput) SetContextData(v *ContextDataType) *AdminRespondToAuthChallengeInput { s.ContextData = v @@ -13776,14 +14103,21 @@ func (s AdminSetUserMFAPreferenceOutput) GoString() string { type AdminSetUserPasswordInput struct { _ struct{} `type:"structure"` + // The password for the user. + // // Password is a required field Password *string `min:"6" type:"string" required:"true" sensitive:"true"` + // True if the password is permanent, False if it is temporary. Permanent *bool `type:"boolean"` + // The user pool ID for the user pool where you want to set the user's password. + // // UserPoolId is a required field UserPoolId *string `min:"1" type:"string" required:"true"` + // The user name of the user whose password you wish to set. + // // Username is a required field Username *string `min:"1" type:"string" required:"true" sensitive:"true"` } @@ -13864,22 +14198,24 @@ func (s AdminSetUserPasswordOutput) GoString() string { return s.String() } -// Represents the request to set user settings as an administrator. +// You can use this parameter to set an MFA configuration that uses the SMS +// delivery medium. type AdminSetUserSettingsInput struct { _ struct{} `type:"structure"` - // Specifies the options for MFA (e.g., email or phone number). + // You can use this parameter only to set an SMS configuration that uses SMS + // for delivery. // // MFAOptions is a required field MFAOptions []*MFAOptionType `type:"list" required:"true"` - // The user pool ID for the user pool where you want to set the user's settings, - // such as MFA options. + // The ID of the user pool that contains the user that you are setting options + // for. // // UserPoolId is a required field UserPoolId *string `min:"1" type:"string" required:"true"` - // The user name of the user for whom you wish to set user settings. + // The user name of the user that you are setting options for. // // Username is a required field Username *string `min:"1" type:"string" required:"true" sensitive:"true"` @@ -14170,6 +14506,37 @@ func (s AdminUpdateDeviceStatusOutput) GoString() string { type AdminUpdateUserAttributesInput struct { _ struct{} `type:"structure"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito + // invokes the function that is assigned to the custom message trigger. When + // Amazon Cognito invokes this function, it passes a JSON payload, which the + // function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your AdminUpdateUserAttributes request. In your function code in AWS Lambda, + // you can process the clientMetadata value to enhance your workflow for your + // specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // An array of name-value pairs representing user attributes. // // For custom attributes, you must prepend the custom: prefix to the attribute @@ -14234,6 +14601,12 @@ func (s *AdminUpdateUserAttributesInput) Validate() error { return nil } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *AdminUpdateUserAttributesInput) SetClientMetadata(v map[string]*string) *AdminUpdateUserAttributesInput { + s.ClientMetadata = v + return s +} + // SetUserAttributes sets the UserAttributes field's value. func (s *AdminUpdateUserAttributesInput) SetUserAttributes(v []*AttributeType) *AdminUpdateUserAttributesInput { s.UserAttributes = v @@ -15113,6 +15486,37 @@ type ConfirmForgotPasswordInput struct { // ClientId is a required field ClientId *string `min:"1" type:"string" required:"true" sensitive:"true"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito + // invokes the function that is assigned to the post confirmation trigger. When + // Amazon Cognito invokes this function, it passes a JSON payload, which the + // function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your ConfirmForgotPassword request. In your function code in AWS Lambda, + // you can process the clientMetadata value to enhance your workflow for your + // specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // The confirmation code sent by a user's request to retrieve a forgotten password. // For more information, see // @@ -15199,6 +15603,12 @@ func (s *ConfirmForgotPasswordInput) SetClientId(v string) *ConfirmForgotPasswor return s } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *ConfirmForgotPasswordInput) SetClientMetadata(v map[string]*string) *ConfirmForgotPasswordInput { + s.ClientMetadata = v + return s +} + // SetConfirmationCode sets the ConfirmationCode field's value. func (s *ConfirmForgotPasswordInput) SetConfirmationCode(v string) *ConfirmForgotPasswordInput { s.ConfirmationCode = &v @@ -15258,6 +15668,36 @@ type ConfirmSignUpInput struct { // ClientId is a required field ClientId *string `min:"1" type:"string" required:"true" sensitive:"true"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes + // the function that is assigned to the post confirmation trigger. When Amazon + // Cognito invokes this function, it passes a JSON payload, which the function + // receives as input. This payload contains a clientMetadata attribute, which + // provides the data that you assigned to the ClientMetadata parameter in your + // ConfirmSignUp request. In your function code in AWS Lambda, you can process + // the clientMetadata value to enhance your workflow for your specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // The confirmation code sent by a user's request to confirm registration. // // ConfirmationCode is a required field @@ -15339,6 +15779,12 @@ func (s *ConfirmSignUpInput) SetClientId(v string) *ConfirmSignUpInput { return s } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *ConfirmSignUpInput) SetClientMetadata(v map[string]*string) *ConfirmSignUpInput { + s.ClientMetadata = v + return s +} + // SetConfirmationCode sets the ConfirmationCode field's value. func (s *ConfirmSignUpInput) SetConfirmationCode(v string) *ConfirmSignUpInput { s.ConfirmationCode = &v @@ -15969,7 +16415,8 @@ type CreateUserPoolClientInput struct { AllowedOAuthFlowsUserPoolClient *bool `type:"boolean"` // A list of allowed OAuth scopes. Currently supported values are "phone", "email", - // "openid", and "Cognito". + // "openid", and "Cognito". In addition to these values, custom scopes created + // in Resource Servers are also supported. AllowedOAuthScopes []*string `type:"list"` // The Amazon Pinpoint analytics configuration for collecting metrics for this @@ -16017,7 +16464,28 @@ type CreateUserPoolClientInput struct { // App callback URLs such as myapp://example are also supported. DefaultRedirectURI *string `min:"1" type:"string"` - // The explicit authentication flows. + // The authentication flows that are supported by the user pool clients. Flow + // names without the ALLOW_ prefix are deprecated in favor of new names with + // the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along + // with values without ALLOW_ prefix. + // + // Valid values include: + // + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication + // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH + // setting. With this authentication flow, Cognito receives the password + // in the request instead of using the SRP (Secure Remote Password protocol) + // protocol to verify passwords. + // + // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. + // + // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. + // In this flow, Cognito receives the password in the request instead of + // using the SRP protocol to verify passwords. + // + // * ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []*string `type:"list"` // Boolean to specify whether you want to generate a secret for the user pool @@ -16027,6 +16495,44 @@ type CreateUserPoolClientInput struct { // A list of allowed logout URLs for the identity providers. LogoutURLs []*string `type:"list"` + // Use this setting to choose which errors and responses are returned by Cognito + // APIs during authentication, account confirmation, and password recovery when + // the user does not exist in the user pool. When set to ENABLED and the user + // does not exist, authentication returns an error indicating either the username + // or password was incorrect, and account confirmation and password recovery + // return a response indicating a code was sent to a simulated destination. + // When set to LEGACY, those APIs will return a UserNotFoundException exception + // if the user does not exist in the user pool. + // + // Valid values include: + // + // * ENABLED - This prevents user existence-related errors. + // + // * LEGACY - This represents the old behavior of Cognito where user existence + // related errors are not prevented. + // + // This setting affects the behavior of following APIs: + // + // * AdminInitiateAuth + // + // * AdminRespondToAuthChallenge + // + // * InitiateAuth + // + // * RespondToAuthChallenge + // + // * ForgotPassword + // + // * ConfirmForgotPassword + // + // * ConfirmSignUp + // + // * ResendConfirmationCode + // + // After January 1st 2020, the value of PreventUserExistenceErrors will default + // to ENABLED for newly created user pool clients if no value is provided. + PreventUserExistenceErrors *string `type:"string" enum:"PreventUserExistenceErrorTypes"` + // The read attributes. ReadAttributes []*string `type:"list"` @@ -16155,6 +16661,12 @@ func (s *CreateUserPoolClientInput) SetLogoutURLs(v []*string) *CreateUserPoolCl return s } +// SetPreventUserExistenceErrors sets the PreventUserExistenceErrors field's value. +func (s *CreateUserPoolClientInput) SetPreventUserExistenceErrors(v string) *CreateUserPoolClientInput { + s.PreventUserExistenceErrors = &v + return s +} + // SetReadAttributes sets the ReadAttributes field's value. func (s *CreateUserPoolClientInput) SetReadAttributes(v []*string) *CreateUserPoolClientInput { s.ReadAttributes = v @@ -16345,10 +16857,10 @@ type CreateUserPoolInput struct { // need permission to invoke a function. So you will need to make an extra call // to add permission for these event sources to invoke your Lambda function. // - // For more information on using the Lambda API to add permission, see AddPermission - // (https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html). + // For more information on using the Lambda API to add permission, see AddPermission + // (https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html). // - // For adding permission using the AWS CLI, see add-permission (https://docs.aws.amazon.com/cli/latest/reference/lambda/add-permission.html). + // For adding permission using the AWS CLI, see add-permission (https://docs.aws.amazon.com/cli/latest/reference/lambda/add-permission.html). LambdaConfig *LambdaConfigType `type:"structure"` // Specifies MFA configuration details. @@ -17968,12 +18480,14 @@ type EmailConfigurationType struct { // email functionality or your Amazon SES email configuration. Specify one of // the following values: // - // COGNITO_DEFAULTWhen Amazon Cognito emails your users, it uses its built-in - // email functionality. When you use the default option, Amazon Cognito allows - // only a limited number of emails each day for your user pool. For typical - // production environments, the default email limit is below the required delivery - // volume. To achieve a higher delivery volume, specify DEVELOPER to use your - // Amazon SES email configuration. + // COGNITO_DEFAULT + // + // When Amazon Cognito emails your users, it uses its built-in email functionality. + // When you use the default option, Amazon Cognito allows only a limited number + // of emails each day for your user pool. For typical production environments, + // the default email limit is below the required delivery volume. To achieve + // a higher delivery volume, specify DEVELOPER to use your Amazon SES email + // configuration. // // To look up the email delivery limit for the default option, see Limits in // Amazon Cognito (https://docs.aws.amazon.com/cognito/latest/developerguide/limits.html) @@ -17983,7 +18497,9 @@ type EmailConfigurationType struct { // the FROM address, provide the ARN of an Amazon SES verified email address // for the SourceArn parameter. // - // DEVELOPERWhen Amazon Cognito emails your users, it uses your Amazon SES configuration. + // DEVELOPER + // + // When Amazon Cognito emails your users, it uses your Amazon SES configuration. // Amazon Cognito calls Amazon SES on your behalf to send email from your verified // email address. When you use this option, the email delivery limits are the // same limits that apply to your Amazon SES verified email address in your @@ -18276,6 +18792,37 @@ type ForgotPasswordInput struct { // ClientId is a required field ClientId *string `min:"1" type:"string" required:"true" sensitive:"true"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the ForgotPassword API action, Amazon Cognito invokes + // any functions that are assigned to the following triggers: pre sign-up, custom + // message, and user migration. When Amazon Cognito invokes any of these functions, + // it passes a JSON payload, which the function receives as input. This payload + // contains a clientMetadata attribute, which provides the data that you assigned + // to the ClientMetadata parameter in your ForgotPassword request. In your function + // code in AWS Lambda, you can process the clientMetadata value to enhance your + // workflow for your specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // A keyed-hash message authentication code (HMAC) calculated using the secret // key of a user pool client and username plus the client ID in the message. SecretHash *string `min:"1" type:"string" sensitive:"true"` @@ -18339,6 +18886,12 @@ func (s *ForgotPasswordInput) SetClientId(v string) *ForgotPasswordInput { return s } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *ForgotPasswordInput) SetClientMetadata(v map[string]*string) *ForgotPasswordInput { + s.ClientMetadata = v + return s +} + // SetSecretHash sets the SecretHash field's value. func (s *ForgotPasswordInput) SetSecretHash(v string) *ForgotPasswordInput { s.SecretHash = &v @@ -18860,6 +19413,37 @@ type GetUserAttributeVerificationCodeInput struct { // // AttributeName is a required field AttributeName *string `min:"1" type:"string" required:"true"` + + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the GetUserAttributeVerificationCode API action, Amazon + // Cognito invokes the function that is assigned to the custom message trigger. + // When Amazon Cognito invokes this function, it passes a JSON payload, which + // the function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your GetUserAttributeVerificationCode request. In your function code in + // AWS Lambda, you can process the clientMetadata value to enhance your workflow + // for your specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` } // String returns the string representation @@ -18903,6 +19487,12 @@ func (s *GetUserAttributeVerificationCodeInput) SetAttributeName(v string) *GetU return s } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *GetUserAttributeVerificationCodeInput) SetClientMetadata(v map[string]*string) *GetUserAttributeVerificationCodeInput { + s.ClientMetadata = v + return s +} + // The verification code response returned by the server response to get the // user attribute verification code. type GetUserAttributeVerificationCodeOutput struct { @@ -18974,7 +19564,11 @@ func (s *GetUserInput) SetAccessToken(v string) *GetUserInput { type GetUserOutput struct { _ struct{} `type:"structure"` - // Specifies the options for MFA (e.g., email or phone number). + // This response parameter is no longer supported. It provides information only + // about SMS MFA configurations. It doesn't provide information about TOTP software + // token MFA configurations. To look up information about either type of MFA + // configuration, use the use the GetUserResponse$UserMFASettingList response + // instead. MFAOptions []*MFAOptionType `type:"list"` // The user's preferred MFA setting. @@ -18988,7 +19582,8 @@ type GetUserOutput struct { // UserAttributes is a required field UserAttributes []*AttributeType `type:"list" required:"true"` - // The list of the user's MFA settings. + // The MFA options that are enabled for the user. The possible values in this + // list are SMS_MFA and SOFTWARE_TOKEN_MFA. UserMFASettingList []*string `type:"list"` // The user name of the user you wish to retrieve from the get user request. @@ -19081,7 +19676,14 @@ func (s *GetUserPoolMfaConfigInput) SetUserPoolId(v string) *GetUserPoolMfaConfi type GetUserPoolMfaConfigOutput struct { _ struct{} `type:"structure"` - // The multi-factor (MFA) configuration. + // The multi-factor (MFA) configuration. Valid values include: + // + // * OFF MFA will not be used for any users. + // + // * ON MFA is required for all users to sign in. + // + // * OPTIONAL MFA will be required only for individual users who have an + // MFA factor enabled. MfaConfiguration *string `type:"string" enum:"UserPoolMfaType"` // The SMS text message multi-factor (MFA) configuration. @@ -19420,6 +20022,11 @@ type InitiateAuthInput struct { // will invoke the user migration Lambda if the USERNAME is not found in // the user pool. // + // * ADMIN_USER_PASSWORD_AUTH: Admin-based user password authentication. + // This replaces the ADMIN_NO_SRP_AUTH authentication flow. In this flow, + // Cognito receives the password in the request instead of using the SRP + // process to verify passwords. + // // ADMIN_NO_SRP_AUTH is not a valid value. // // AuthFlow is a required field @@ -19443,9 +20050,59 @@ type InitiateAuthInput struct { // ClientId is a required field ClientId *string `min:"1" type:"string" required:"true" sensitive:"true"` - // This is a random key-value pair map which can contain any key and will be - // passed to your PreAuthentication Lambda trigger as-is. It can be used to - // implement additional validations around authentication. + // A map of custom key-value pairs that you can provide as input for certain + // custom workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the InitiateAuth API action, Amazon Cognito invokes + // the AWS Lambda functions that are specified for various triggers. The ClientMetadata + // value is passed as input to the functions for only the following triggers: + // + // * Pre signup + // + // * Pre authentication + // + // * User migration + // + // When Amazon Cognito invokes the functions for these triggers, it passes a + // JSON payload, which the function receives as input. This payload contains + // a validationData attribute, which provides the data that you assigned to + // the ClientMetadata parameter in your InitiateAuth request. In your function + // code in AWS Lambda, you can process the validationData value to enhance your + // workflow for your specific needs. + // + // When you use the InitiateAuth API action, Amazon Cognito also invokes the + // functions for the following triggers, but it does not provide the ClientMetadata + // value as input: + // + // * Post authentication + // + // * Custom message + // + // * Pre token generation + // + // * Create auth challenge + // + // * Define auth challenge + // + // * Verify auth challenge + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string `type:"map"` // Contextual data such as the user's device fingerprint, IP address, or location @@ -20622,7 +21279,7 @@ type ListUsersInput struct { // attributes are returned. AttributesToGet []*string `type:"list"` - // A filter string of the form "AttributeNameFilter-Type "AttributeValue"". + // A filter string of the form "AttributeName Filter-Type "AttributeValue"". // Quotation marks within the filter string must be escaped using the backslash // (\) character. For example, "family_name = \"Reddy\"". // @@ -20661,8 +21318,8 @@ type ListUsersInput struct { // // Custom attributes are not searchable. // - // For more information, see Searching for Users Using the ListUsers API (http://docs.aws.amazon.com/cognito/latest/developerguide/how-to-manage-user-accounts.html#cognito-user-pools-searching-for-users-using-listusers-api) - // and Examples of Using the ListUsers API (http://docs.aws.amazon.com/cognito/latest/developerguide/how-to-manage-user-accounts.html#cognito-user-pools-searching-for-users-listusers-api-examples) + // For more information, see Searching for Users Using the ListUsers API (https://docs.aws.amazon.com/cognito/latest/developerguide/how-to-manage-user-accounts.html#cognito-user-pools-searching-for-users-using-listusers-api) + // and Examples of Using the ListUsers API (https://docs.aws.amazon.com/cognito/latest/developerguide/how-to-manage-user-accounts.html#cognito-user-pools-searching-for-users-listusers-api-examples) // in the Amazon Cognito Developer Guide. Filter *string `type:"string"` @@ -20772,14 +21429,22 @@ func (s *ListUsersOutput) SetUsers(v []*UserType) *ListUsersOutput { return s } -// Specifies the different settings for multi-factor authentication (MFA). +// This data type is no longer supported. You can use it only for SMS MFA configurations. +// You can't use it for TOTP software token MFA configurations. +// +// To set either type of MFA configuration, use the AdminSetUserMFAPreference +// or SetUserMFAPreference actions. +// +// To look up information about either type of MFA configuration, use the AdminGetUserResponse$UserMFASettingList +// or GetUserResponse$UserMFASettingList responses. type MFAOptionType struct { _ struct{} `type:"structure"` - // The attribute name of the MFA option type. + // The attribute name of the MFA option type. The only valid value is phone_number. AttributeName *string `min:"1" type:"string"` - // The delivery medium (email message or SMS message) to send the MFA code. + // The delivery medium to send the MFA code. You can use this parameter to set + // only the SMS delivery medium value. DeliveryMedium *string `type:"string" enum:"DeliveryMediumType"` } @@ -21143,6 +21808,13 @@ type PasswordPolicyType struct { // users to use at least one uppercase letter in their password. RequireUppercase *bool `type:"boolean"` + // In the password policy you have set, refers to the number of days a temporary + // password is valid. If the user does not sign-in during this time, their password + // will need to be reset by an administrator. + // + // When you set TemporaryPasswordValidityDays for a user pool, you will no longer + // be able to set the deprecated UnusedAccountValidityDays value for that user + // pool. TemporaryPasswordValidityDays *int64 `type:"integer"` } @@ -21324,6 +21996,37 @@ type ResendConfirmationCodeInput struct { // ClientId is a required field ClientId *string `min:"1" type:"string" required:"true" sensitive:"true"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the ResendConfirmationCode API action, Amazon Cognito + // invokes the function that is assigned to the custom message trigger. When + // Amazon Cognito invokes this function, it passes a JSON payload, which the + // function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your ResendConfirmationCode request. In your function code in AWS Lambda, + // you can process the clientMetadata value to enhance your workflow for your + // specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // A keyed-hash message authentication code (HMAC) calculated using the secret // key of a user pool client and username plus the client ID in the message. SecretHash *string `min:"1" type:"string" sensitive:"true"` @@ -21386,6 +22089,12 @@ func (s *ResendConfirmationCodeInput) SetClientId(v string) *ResendConfirmationC return s } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *ResendConfirmationCodeInput) SetClientMetadata(v map[string]*string) *ResendConfirmationCodeInput { + s.ClientMetadata = v + return s +} + // SetSecretHash sets the SecretHash field's value. func (s *ResendConfirmationCodeInput) SetSecretHash(v string) *ResendConfirmationCodeInput { s.SecretHash = &v @@ -21558,15 +22267,24 @@ type RespondToAuthChallengeInput struct { // The challenge responses. These are inputs corresponding to the value of ChallengeName, // for example: // - // * SMS_MFA: SMS_MFA_CODE, USERNAME, SECRET_HASH (if app client is configured - // with client secret). + // SECRET_HASH (if app client is configured with client secret) applies to all + // inputs below (including SOFTWARE_TOKEN_MFA). + // + // * SMS_MFA: SMS_MFA_CODE, USERNAME. // // * PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, - // TIMESTAMP, USERNAME, SECRET_HASH (if app client is configured with client - // secret). + // TIMESTAMP, USERNAME. // // * NEW_PASSWORD_REQUIRED: NEW_PASSWORD, any other required attributes, - // USERNAME, SECRET_HASH (if app client is configured with client secret). + // USERNAME. + // + // * SOFTWARE_TOKEN_MFA: USERNAME and SOFTWARE_TOKEN_MFA_CODE are required + // attributes. + // + // * DEVICE_SRP_AUTH requires USERNAME, DEVICE_KEY, SRP_A (and SECRET_HASH). + // + // * DEVICE_PASSWORD_VERIFIER requires everything that PASSWORD_VERIFIER + // requires plus DEVICE_KEY. ChallengeResponses map[string]*string `type:"map"` // The app client ID. @@ -21574,6 +22292,38 @@ type RespondToAuthChallengeInput struct { // ClientId is a required field ClientId *string `min:"1" type:"string" required:"true" sensitive:"true"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito + // invokes any functions that are assigned to the following triggers: post authentication, + // pre token generation, define auth challenge, create auth challenge, and verify + // auth challenge. When Amazon Cognito invokes any of these functions, it passes + // a JSON payload, which the function receives as input. This payload contains + // a clientMetadata attribute, which provides the data that you assigned to + // the ClientMetadata parameter in your RespondToAuthChallenge request. In your + // function code in AWS Lambda, you can process the clientMetadata value to + // enhance your workflow for your specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // The session which should be passed both ways in challenge-response calls // to the service. If InitiateAuth or RespondToAuthChallenge API call determines // that the caller needs to go through another challenge, they return a session @@ -21643,6 +22393,12 @@ func (s *RespondToAuthChallengeInput) SetClientId(v string) *RespondToAuthChalle return s } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *RespondToAuthChallengeInput) SetClientMetadata(v map[string]*string) *RespondToAuthChallengeInput { + s.ClientMetadata = v + return s +} + // SetSession sets the Session field's value. func (s *RespondToAuthChallengeInput) SetSession(v string) *RespondToAuthChallengeInput { s.Session = &v @@ -21818,14 +22574,14 @@ func (s *RiskExceptionConfigurationType) SetSkippedIPRangeList(v []*string) *Ris return s } -// The SMS multi-factor authentication (MFA) settings type. +// The type used for enabling SMS MFA at the user level. type SMSMfaSettingsType struct { _ struct{} `type:"structure"` // Specifies whether SMS text message MFA is enabled. Enabled *bool `type:"boolean"` - // The preferred MFA method. + // Specifies whether SMS is the preferred MFA method. PreferredMfa *bool `type:"boolean"` } @@ -22173,7 +22929,7 @@ func (s *SetUICustomizationOutput) SetUICustomization(v *UICustomizationType) *S type SetUserMFAPreferenceInput struct { _ struct{} `type:"structure"` - // The access token. + // The access token for the user. // // AccessToken is a required field AccessToken *string `type:"string" required:"true" sensitive:"true"` @@ -22243,7 +22999,14 @@ func (s SetUserMFAPreferenceOutput) GoString() string { type SetUserPoolMfaConfigInput struct { _ struct{} `type:"structure"` - // The MFA configuration. + // The MFA configuration. Valid values include: + // + // * OFF MFA will not be used for any users. + // + // * ON MFA is required for all users to sign in. + // + // * OPTIONAL MFA will be required only for individual users who have an + // MFA factor enabled. MfaConfiguration *string `type:"string" enum:"UserPoolMfaType"` // The SMS text message MFA configuration. @@ -22316,7 +23079,14 @@ func (s *SetUserPoolMfaConfigInput) SetUserPoolId(v string) *SetUserPoolMfaConfi type SetUserPoolMfaConfigOutput struct { _ struct{} `type:"structure"` - // The MFA configuration. + // The MFA configuration. Valid values include: + // + // * OFF MFA will not be used for any users. + // + // * ON MFA is required for all users to sign in. + // + // * OPTIONAL MFA will be required only for individual users who have an + // MFA factor enabled. MfaConfiguration *string `type:"string" enum:"UserPoolMfaType"` // The SMS text message MFA configuration. @@ -22363,7 +23133,8 @@ type SetUserSettingsInput struct { // AccessToken is a required field AccessToken *string `type:"string" required:"true" sensitive:"true"` - // Specifies the options for MFA (e.g., email or phone number). + // You can use this parameter only to set an SMS configuration that uses SMS + // for delivery. // // MFAOptions is a required field MFAOptions []*MFAOptionType `type:"list" required:"true"` @@ -22445,6 +23216,37 @@ type SignUpInput struct { // ClientId is a required field ClientId *string `min:"1" type:"string" required:"true" sensitive:"true"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the SignUp API action, Amazon Cognito invokes any + // functions that are assigned to the following triggers: pre sign-up, custom + // message, and post confirmation. When Amazon Cognito invokes any of these + // functions, it passes a JSON payload, which the function receives as input. + // This payload contains a clientMetadata attribute, which provides the data + // that you assigned to the ClientMetadata parameter in your SignUp request. + // In your function code in AWS Lambda, you can process the clientMetadata value + // to enhance your workflow for your specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // The password of the user you wish to register. // // Password is a required field @@ -22547,6 +23349,12 @@ func (s *SignUpInput) SetClientId(v string) *SignUpInput { return s } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *SignUpInput) SetClientMetadata(v map[string]*string) *SignUpInput { + s.ClientMetadata = v + return s +} + // SetPassword sets the Password field's value. func (s *SignUpInput) SetPassword(v string) *SignUpInput { s.Password = &v @@ -22630,15 +23438,25 @@ func (s *SignUpOutput) SetUserSub(v string) *SignUpOutput { return s } -// The SMS configuration type. +// The SMS configuration type that includes the settings the Cognito User Pool +// needs to call for the Amazon SNS service to send an SMS message from your +// AWS account. The Cognito User Pool makes the request to the Amazon SNS Service +// by using an AWS IAM role that you provide for your AWS account. type SmsConfigurationType struct { _ struct{} `type:"structure"` - // The external ID. + // The external ID is a value that we recommend you use to add security to your + // IAM role which is used to call Amazon SNS to send SMS messages for your user + // pool. If you provide an ExternalId, the Cognito User Pool will include it + // when attempting to assume your IAM role, so that you can set your roles trust + // policy to require the ExternalID. If you use the Cognito Management Console + // to create a role for SMS MFA, Cognito will create a role with the required + // permissions and a trust policy that demonstrates use of the ExternalId. ExternalId *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service - // (SNS) caller. + // (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito + // will use to send SMS messages. // // SnsCallerArn is a required field SnsCallerArn *string `min:"20" type:"string" required:"true"` @@ -22686,7 +23504,10 @@ func (s *SmsConfigurationType) SetSnsCallerArn(v string) *SmsConfigurationType { type SmsMfaConfigType struct { _ struct{} `type:"structure"` - // The SMS authentication message. + // The SMS authentication message that will be sent to users with the code they + // need to sign in. The message must contain the ‘{####}’ placeholder, which + // will be replaced with the code. If the message is not included, and default + // message will be used. SmsAuthenticationMessage *string `min:"6" type:"string"` // The SMS configuration. @@ -22764,7 +23585,7 @@ type SoftwareTokenMfaSettingsType struct { // Specifies whether software token MFA is enabled. Enabled *bool `type:"boolean"` - // The preferred MFA method. + // Specifies whether software token MFA is the preferred MFA method. PreferredMfa *bool `type:"boolean"` } @@ -23000,7 +23821,9 @@ type TagResourceInput struct { ResourceArn *string `min:"20" type:"string" required:"true"` // The tags to assign to the user pool. - Tags map[string]*string `type:"map"` + // + // Tags is a required field + Tags map[string]*string `type:"map" required:"true"` } // String returns the string representation @@ -23022,6 +23845,9 @@ func (s *TagResourceInput) Validate() error { if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } if invalidParams.Len() > 0 { return invalidParams @@ -23144,7 +23970,9 @@ type UntagResourceInput struct { ResourceArn *string `min:"20" type:"string" required:"true"` // The keys of the tags to remove from the user pool. - TagKeys []*string `type:"list"` + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` } // String returns the string representation @@ -23166,6 +23994,9 @@ func (s *UntagResourceInput) Validate() error { if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } if invalidParams.Len() > 0 { return invalidParams @@ -23747,6 +24578,37 @@ type UpdateUserAttributesInput struct { // AccessToken is a required field AccessToken *string `type:"string" required:"true" sensitive:"true"` + // A map of custom key-value pairs that you can provide as input for any custom + // workflows that this action triggers. + // + // You create custom workflows by assigning AWS Lambda functions to user pool + // triggers. When you use the UpdateUserAttributes API action, Amazon Cognito + // invokes the function that is assigned to the custom message trigger. When + // Amazon Cognito invokes this function, it passes a JSON payload, which the + // function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your UpdateUserAttributes request. In your function code in AWS Lambda, + // you can process the clientMetadata value to enhance your workflow for your + // specific needs. + // + // For more information, see Customizing User Pool Workflows with Lambda Triggers + // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) + // in the Amazon Cognito Developer Guide. + // + // Take the following limitations into consideration when you use the ClientMetadata + // parameter: + // + // * Amazon Cognito does not store the ClientMetadata value. This data is + // available only to AWS Lambda triggers that are assigned to a user pool + // to support custom workflows. If your user pool configuration does not + // include triggers, the ClientMetadata parameter serves no purpose. + // + // * Amazon Cognito does not validate the ClientMetadata value. + // + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. + ClientMetadata map[string]*string `type:"map"` + // An array of name-value pairs representing user attributes. // // For custom attributes, you must prepend the custom: prefix to the attribute @@ -23798,6 +24660,12 @@ func (s *UpdateUserAttributesInput) SetAccessToken(v string) *UpdateUserAttribut return s } +// SetClientMetadata sets the ClientMetadata field's value. +func (s *UpdateUserAttributesInput) SetClientMetadata(v map[string]*string) *UpdateUserAttributesInput { + s.ClientMetadata = v + return s +} + // SetUserAttributes sets the UserAttributes field's value. func (s *UpdateUserAttributesInput) SetUserAttributes(v []*AttributeType) *UpdateUserAttributesInput { s.UserAttributes = v @@ -23843,7 +24711,8 @@ type UpdateUserPoolClientInput struct { AllowedOAuthFlowsUserPoolClient *bool `type:"boolean"` // A list of allowed OAuth scopes. Currently supported values are "phone", "email", - // "openid", and "Cognito". + // "openid", and "Cognito". In addition to these values, custom scopes created + // in Resource Servers are also supported. AllowedOAuthScopes []*string `type:"list"` // The Amazon Pinpoint analytics configuration for collecting metrics for this @@ -23894,12 +24763,71 @@ type UpdateUserPoolClientInput struct { // App callback URLs such as myapp://example are also supported. DefaultRedirectURI *string `min:"1" type:"string"` - // Explicit authentication flows. + // The authentication flows that are supported by the user pool clients. Flow + // names without the ALLOW_ prefix are deprecated in favor of new names with + // the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along + // with values without ALLOW_ prefix. + // + // Valid values include: + // + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication + // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH + // setting. With this authentication flow, Cognito receives the password + // in the request instead of using the SRP (Secure Remote Password protocol) + // protocol to verify passwords. + // + // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. + // + // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. + // In this flow, Cognito receives the password in the request instead of + // using the SRP protocol to verify passwords. + // + // * ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []*string `type:"list"` // A list of allowed logout URLs for the identity providers. LogoutURLs []*string `type:"list"` + // Use this setting to choose which errors and responses are returned by Cognito + // APIs during authentication, account confirmation, and password recovery when + // the user does not exist in the user pool. When set to ENABLED and the user + // does not exist, authentication returns an error indicating either the username + // or password was incorrect, and account confirmation and password recovery + // return a response indicating a code was sent to a simulated destination. + // When set to LEGACY, those APIs will return a UserNotFoundException exception + // if the user does not exist in the user pool. + // + // Valid values include: + // + // * ENABLED - This prevents user existence-related errors. + // + // * LEGACY - This represents the old behavior of Cognito where user existence + // related errors are not prevented. + // + // This setting affects the behavior of following APIs: + // + // * AdminInitiateAuth + // + // * AdminRespondToAuthChallenge + // + // * InitiateAuth + // + // * RespondToAuthChallenge + // + // * ForgotPassword + // + // * ConfirmForgotPassword + // + // * ConfirmSignUp + // + // * ResendConfirmationCode + // + // After January 1st 2020, the value of PreventUserExistenceErrors will default + // to ENABLED for newly created user pool clients if no value is provided. + PreventUserExistenceErrors *string `type:"string" enum:"PreventUserExistenceErrorTypes"` + // The read-only attributes of the user pool. ReadAttributes []*string `type:"list"` @@ -24024,6 +24952,12 @@ func (s *UpdateUserPoolClientInput) SetLogoutURLs(v []*string) *UpdateUserPoolCl return s } +// SetPreventUserExistenceErrors sets the PreventUserExistenceErrors field's value. +func (s *UpdateUserPoolClientInput) SetPreventUserExistenceErrors(v string) *UpdateUserPoolClientInput { + s.PreventUserExistenceErrors = &v + return s +} + // SetReadAttributes sets the ReadAttributes field's value. func (s *UpdateUserPoolClientInput) SetReadAttributes(v []*string) *UpdateUserPoolClientInput { s.ReadAttributes = v @@ -24726,7 +25660,8 @@ type UserPoolClientType struct { AllowedOAuthFlowsUserPoolClient *bool `type:"boolean"` // A list of allowed OAuth scopes. Currently supported values are "phone", "email", - // "openid", and "Cognito". + // "openid", and "Cognito". In addition to these values, custom scopes created + // in Resource Servers are also supported. AllowedOAuthScopes []*string `type:"list"` // The Amazon Pinpoint analytics configuration for the user pool client. @@ -24780,7 +25715,28 @@ type UserPoolClientType struct { // App callback URLs such as myapp://example are also supported. DefaultRedirectURI *string `min:"1" type:"string"` - // The explicit authentication flows. + // The authentication flows that are supported by the user pool clients. Flow + // names without the ALLOW_ prefix are deprecated in favor of new names with + // the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along + // with values without ALLOW_ prefix. + // + // Valid values include: + // + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication + // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH + // setting. With this authentication flow, Cognito receives the password + // in the request instead of using the SRP (Secure Remote Password protocol) + // protocol to verify passwords. + // + // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. + // + // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. + // In this flow, Cognito receives the password in the request instead of + // using the SRP protocol to verify passwords. + // + // * ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []*string `type:"list"` // The date the user pool client was last modified. @@ -24789,6 +25745,44 @@ type UserPoolClientType struct { // A list of allowed logout URLs for the identity providers. LogoutURLs []*string `type:"list"` + // Use this setting to choose which errors and responses are returned by Cognito + // APIs during authentication, account confirmation, and password recovery when + // the user does not exist in the user pool. When set to ENABLED and the user + // does not exist, authentication returns an error indicating either the username + // or password was incorrect, and account confirmation and password recovery + // return a response indicating a code was sent to a simulated destination. + // When set to LEGACY, those APIs will return a UserNotFoundException exception + // if the user does not exist in the user pool. + // + // Valid values include: + // + // * ENABLED - This prevents user existence-related errors. + // + // * LEGACY - This represents the old behavior of Cognito where user existence + // related errors are not prevented. + // + // This setting affects the behavior of following APIs: + // + // * AdminInitiateAuth + // + // * AdminRespondToAuthChallenge + // + // * InitiateAuth + // + // * RespondToAuthChallenge + // + // * ForgotPassword + // + // * ConfirmForgotPassword + // + // * ConfirmSignUp + // + // * ResendConfirmationCode + // + // After January 1st 2020, the value of PreventUserExistenceErrors will default + // to ENABLED for newly created user pool clients if no value is provided. + PreventUserExistenceErrors *string `type:"string" enum:"PreventUserExistenceErrorTypes"` + // The Read-only attributes. ReadAttributes []*string `type:"list"` @@ -24895,6 +25889,12 @@ func (s *UserPoolClientType) SetLogoutURLs(v []*string) *UserPoolClientType { return s } +// SetPreventUserExistenceErrors sets the PreventUserExistenceErrors field's value. +func (s *UserPoolClientType) SetPreventUserExistenceErrors(v string) *UserPoolClientType { + s.PreventUserExistenceErrors = &v + return s +} + // SetReadAttributes sets the ReadAttributes field's value. func (s *UserPoolClientType) SetReadAttributes(v []*string) *UserPoolClientType { s.ReadAttributes = v @@ -25777,6 +26777,9 @@ const ( // AuthFlowTypeUserPasswordAuth is a AuthFlowType enum value AuthFlowTypeUserPasswordAuth = "USER_PASSWORD_AUTH" + + // AuthFlowTypeAdminUserPasswordAuth is a AuthFlowType enum value + AuthFlowTypeAdminUserPasswordAuth = "ADMIN_USER_PASSWORD_AUTH" ) const ( @@ -25923,6 +26926,21 @@ const ( // ExplicitAuthFlowsTypeUserPasswordAuth is a ExplicitAuthFlowsType enum value ExplicitAuthFlowsTypeUserPasswordAuth = "USER_PASSWORD_AUTH" + + // ExplicitAuthFlowsTypeAllowAdminUserPasswordAuth is a ExplicitAuthFlowsType enum value + ExplicitAuthFlowsTypeAllowAdminUserPasswordAuth = "ALLOW_ADMIN_USER_PASSWORD_AUTH" + + // ExplicitAuthFlowsTypeAllowCustomAuth is a ExplicitAuthFlowsType enum value + ExplicitAuthFlowsTypeAllowCustomAuth = "ALLOW_CUSTOM_AUTH" + + // ExplicitAuthFlowsTypeAllowUserPasswordAuth is a ExplicitAuthFlowsType enum value + ExplicitAuthFlowsTypeAllowUserPasswordAuth = "ALLOW_USER_PASSWORD_AUTH" + + // ExplicitAuthFlowsTypeAllowUserSrpAuth is a ExplicitAuthFlowsType enum value + ExplicitAuthFlowsTypeAllowUserSrpAuth = "ALLOW_USER_SRP_AUTH" + + // ExplicitAuthFlowsTypeAllowRefreshTokenAuth is a ExplicitAuthFlowsType enum value + ExplicitAuthFlowsTypeAllowRefreshTokenAuth = "ALLOW_REFRESH_TOKEN_AUTH" ) const ( @@ -25969,6 +26987,14 @@ const ( OAuthFlowTypeClientCredentials = "client_credentials" ) +const ( + // PreventUserExistenceErrorTypesLegacy is a PreventUserExistenceErrorTypes enum value + PreventUserExistenceErrorTypesLegacy = "LEGACY" + + // PreventUserExistenceErrorTypesEnabled is a PreventUserExistenceErrorTypes enum value + PreventUserExistenceErrorTypesEnabled = "ENABLED" +) + const ( // RiskDecisionTypeNoRisk is a RiskDecisionType enum value RiskDecisionTypeNoRisk = "NoRisk" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go index 68efbd80b6e..2c512449a10 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go @@ -46,11 +46,11 @@ const ( // svc := cognitoidentityprovider.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentityProvider { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CognitoIdentityProvider { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CognitoIdentityProvider { svc := &CognitoIdentityProvider{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-04-18", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go index ccacc07da69..8d209237b92 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go @@ -63,9 +63,9 @@ func (c *ConfigService) BatchGetAggregateResourceConfigRequest(input *BatchGetAg // resources, the operation returns an empty unprocessedResourceIdentifiers // list. // -// The API does not return results for deleted resources. +// * The API does not return results for deleted resources. // -// The API does not return tags and relationships. +// * The API does not return tags and relationships. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -152,10 +152,10 @@ func (c *ConfigService) BatchGetResourceConfigRequest(input *BatchGetResourceCon // current request. If there are no unprocessed resources, the operation returns // an empty unprocessedResourceKeys list. // -// The API does not return results for deleted resources. +// * The API does not return results for deleted resources. // -// The API does not return any tags for the requested resources. This information -// is filtered out of the supplementaryConfiguration section of the API response. +// * The API does not return any tags for the requested resources. This information +// is filtered out of the supplementaryConfiguration section of the API response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -342,8 +342,24 @@ func (c *ConfigService) DeleteConfigRuleRequest(input *DeleteConfigRuleInput) (r // rule names are correct and try again. // // * ErrCodeResourceInUseException "ResourceInUseException" -// The rule is currently being deleted or the rule is deleting your evaluation -// results. Try your request again later. +// You see this exception in the following cases: +// +// * For DeleteConfigRule API, AWS Config is deleting this rule. Try your +// request again later. +// +// * For DeleteConfigRule API, the rule is deleting your evaluation results. +// Try your request again later. +// +// * For DeleteConfigRule API, a remediation action is associated with the +// rule and AWS Config cannot delete this rule. Delete the remediation action +// associated with the rule before deleting the rule and try your request +// again later. +// +// * For PutConfigOrganizationRule, organization config rule deletion is +// in progress. Try your request again later. +// +// * For DeleteOrganizationConfigRule, organization config rule creation +// is in progress. Try your request again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteConfigRule func (c *ConfigService) DeleteConfigRule(input *DeleteConfigRuleInput) (*DeleteConfigRuleOutput, error) { @@ -687,8 +703,24 @@ func (c *ConfigService) DeleteEvaluationResultsRequest(input *DeleteEvaluationRe // rule names are correct and try again. // // * ErrCodeResourceInUseException "ResourceInUseException" -// The rule is currently being deleted or the rule is deleting your evaluation -// results. Try your request again later. +// You see this exception in the following cases: +// +// * For DeleteConfigRule API, AWS Config is deleting this rule. Try your +// request again later. +// +// * For DeleteConfigRule API, the rule is deleting your evaluation results. +// Try your request again later. +// +// * For DeleteConfigRule API, a remediation action is associated with the +// rule and AWS Config cannot delete this rule. Delete the remediation action +// associated with the rule before deleting the rule and try your request +// again later. +// +// * For PutConfigOrganizationRule, organization config rule deletion is +// in progress. Try your request again later. +// +// * For DeleteOrganizationConfigRule, organization config rule creation +// is in progress. Try your request again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteEvaluationResults func (c *ConfigService) DeleteEvaluationResults(input *DeleteEvaluationResultsInput) (*DeleteEvaluationResultsOutput, error) { @@ -712,6 +744,119 @@ func (c *ConfigService) DeleteEvaluationResultsWithContext(ctx aws.Context, inpu return out, req.Send() } +const opDeleteOrganizationConfigRule = "DeleteOrganizationConfigRule" + +// DeleteOrganizationConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOrganizationConfigRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteOrganizationConfigRule for more information on using the DeleteOrganizationConfigRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteOrganizationConfigRuleRequest method. +// req, resp := client.DeleteOrganizationConfigRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteOrganizationConfigRule +func (c *ConfigService) DeleteOrganizationConfigRuleRequest(input *DeleteOrganizationConfigRuleInput) (req *request.Request, output *DeleteOrganizationConfigRuleOutput) { + op := &request.Operation{ + Name: opDeleteOrganizationConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOrganizationConfigRuleInput{} + } + + output = &DeleteOrganizationConfigRuleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteOrganizationConfigRule API operation for AWS Config. +// +// Deletes the specified organization config rule and all of its evaluation +// results from all member accounts in that organization. Only a master account +// can delete an organization config rule. +// +// AWS Config sets the state of a rule to DELETE_IN_PROGRESS until the deletion +// is complete. You cannot update a rule while it is in this state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Config's +// API operation DeleteOrganizationConfigRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchOrganizationConfigRuleException "NoSuchOrganizationConfigRuleException" +// You specified one or more organization config rules that do not exist. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// You see this exception in the following cases: +// +// * For DeleteConfigRule API, AWS Config is deleting this rule. Try your +// request again later. +// +// * For DeleteConfigRule API, the rule is deleting your evaluation results. +// Try your request again later. +// +// * For DeleteConfigRule API, a remediation action is associated with the +// rule and AWS Config cannot delete this rule. Delete the remediation action +// associated with the rule before deleting the rule and try your request +// again later. +// +// * For PutConfigOrganizationRule, organization config rule deletion is +// in progress. Try your request again later. +// +// * For DeleteOrganizationConfigRule, organization config rule creation +// is in progress. Try your request again later. +// +// * ErrCodeOrganizationAccessDeniedException "OrganizationAccessDeniedException" +// For PutConfigAggregator API, no permission to call EnableAWSServiceAccess +// API. +// +// For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs +// are called from member accounts. All APIs must be called from organization +// master account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteOrganizationConfigRule +func (c *ConfigService) DeleteOrganizationConfigRule(input *DeleteOrganizationConfigRuleInput) (*DeleteOrganizationConfigRuleOutput, error) { + req, out := c.DeleteOrganizationConfigRuleRequest(input) + return out, req.Send() +} + +// DeleteOrganizationConfigRuleWithContext is the same as DeleteOrganizationConfigRule with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteOrganizationConfigRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) DeleteOrganizationConfigRuleWithContext(ctx aws.Context, input *DeleteOrganizationConfigRuleInput, opts ...request.Option) (*DeleteOrganizationConfigRuleOutput, error) { + req, out := c.DeleteOrganizationConfigRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeletePendingAggregationRequest = "DeletePendingAggregationRequest" // DeletePendingAggregationRequestRequest generates a "aws/request.Request" representing the @@ -852,6 +997,10 @@ func (c *ConfigService) DeleteRemediationConfigurationRequest(input *DeleteRemed // * ErrCodeNoSuchRemediationConfigurationException "NoSuchRemediationConfigurationException" // You specified an AWS Config rule without a remediation configuration. // +// * ErrCodeRemediationInProgressException "RemediationInProgressException" +// Remediation action is in progress. You can either cancel execution in AWS +// Systems Manager or wait and try again later. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteRemediationConfiguration func (c *ConfigService) DeleteRemediationConfiguration(input *DeleteRemediationConfigurationInput) (*DeleteRemediationConfigurationOutput, error) { req, out := c.DeleteRemediationConfigurationRequest(input) @@ -874,6 +1023,85 @@ func (c *ConfigService) DeleteRemediationConfigurationWithContext(ctx aws.Contex return out, req.Send() } +const opDeleteRemediationExceptions = "DeleteRemediationExceptions" + +// DeleteRemediationExceptionsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRemediationExceptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRemediationExceptions for more information on using the DeleteRemediationExceptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRemediationExceptionsRequest method. +// req, resp := client.DeleteRemediationExceptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteRemediationExceptions +func (c *ConfigService) DeleteRemediationExceptionsRequest(input *DeleteRemediationExceptionsInput) (req *request.Request, output *DeleteRemediationExceptionsOutput) { + op := &request.Operation{ + Name: opDeleteRemediationExceptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRemediationExceptionsInput{} + } + + output = &DeleteRemediationExceptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteRemediationExceptions API operation for AWS Config. +// +// Deletes one or more remediation exceptions mentioned in the resource keys. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Config's +// API operation DeleteRemediationExceptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchRemediationExceptionException "NoSuchRemediationExceptionException" +// You tried to delete a remediation exception that does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteRemediationExceptions +func (c *ConfigService) DeleteRemediationExceptions(input *DeleteRemediationExceptionsInput) (*DeleteRemediationExceptionsOutput, error) { + req, out := c.DeleteRemediationExceptionsRequest(input) + return out, req.Send() +} + +// DeleteRemediationExceptionsWithContext is the same as DeleteRemediationExceptions with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRemediationExceptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) DeleteRemediationExceptionsWithContext(ctx aws.Context, input *DeleteRemediationExceptionsInput, opts ...request.Option) (*DeleteRemediationExceptionsOutput, error) { + req, out := c.DeleteRemediationExceptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteRetentionConfiguration = "DeleteRetentionConfiguration" // DeleteRetentionConfigurationRequest generates a "aws/request.Request" representing the @@ -2147,1552 +2375,1551 @@ func (c *ConfigService) DescribeDeliveryChannelsWithContext(ctx aws.Context, inp return out, req.Send() } -const opDescribePendingAggregationRequests = "DescribePendingAggregationRequests" +const opDescribeOrganizationConfigRuleStatuses = "DescribeOrganizationConfigRuleStatuses" -// DescribePendingAggregationRequestsRequest generates a "aws/request.Request" representing the -// client's request for the DescribePendingAggregationRequests operation. The "output" return +// DescribeOrganizationConfigRuleStatusesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOrganizationConfigRuleStatuses operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribePendingAggregationRequests for more information on using the DescribePendingAggregationRequests +// See DescribeOrganizationConfigRuleStatuses for more information on using the DescribeOrganizationConfigRuleStatuses // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribePendingAggregationRequestsRequest method. -// req, resp := client.DescribePendingAggregationRequestsRequest(params) +// // Example sending a request using the DescribeOrganizationConfigRuleStatusesRequest method. +// req, resp := client.DescribeOrganizationConfigRuleStatusesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribePendingAggregationRequests -func (c *ConfigService) DescribePendingAggregationRequestsRequest(input *DescribePendingAggregationRequestsInput) (req *request.Request, output *DescribePendingAggregationRequestsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeOrganizationConfigRuleStatuses +func (c *ConfigService) DescribeOrganizationConfigRuleStatusesRequest(input *DescribeOrganizationConfigRuleStatusesInput) (req *request.Request, output *DescribeOrganizationConfigRuleStatusesOutput) { op := &request.Operation{ - Name: opDescribePendingAggregationRequests, + Name: opDescribeOrganizationConfigRuleStatuses, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribePendingAggregationRequestsInput{} + input = &DescribeOrganizationConfigRuleStatusesInput{} } - output = &DescribePendingAggregationRequestsOutput{} + output = &DescribeOrganizationConfigRuleStatusesOutput{} req = c.newRequest(op, input, output) return } -// DescribePendingAggregationRequests API operation for AWS Config. +// DescribeOrganizationConfigRuleStatuses API operation for AWS Config. // -// Returns a list of all pending aggregation requests. +// Provides organization config rule deployment status for an organization. +// +// The status is not considered successful until organization config rule is +// successfully deployed in all the member accounts with an exception of excluded +// accounts. +// +// When you specify the limit and the next token, you receive a paginated response. +// Limit and next token are not applicable if you specify organization config +// rule names. It is only applicable, when you request all the organization +// config rules. +// +// Only a master account can call this API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation DescribePendingAggregationRequests for usage and error information. +// API operation DescribeOrganizationConfigRuleStatuses for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// One or more of the specified parameters are invalid. Verify that your parameters -// are valid and try again. +// * ErrCodeNoSuchOrganizationConfigRuleException "NoSuchOrganizationConfigRuleException" +// You specified one or more organization config rules that do not exist. +// +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. // // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. // -// * ErrCodeInvalidLimitException "InvalidLimitException" -// The specified limit is outside the allowable range. +// * ErrCodeOrganizationAccessDeniedException "OrganizationAccessDeniedException" +// For PutConfigAggregator API, no permission to call EnableAWSServiceAccess +// API. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribePendingAggregationRequests -func (c *ConfigService) DescribePendingAggregationRequests(input *DescribePendingAggregationRequestsInput) (*DescribePendingAggregationRequestsOutput, error) { - req, out := c.DescribePendingAggregationRequestsRequest(input) +// For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs +// are called from member accounts. All APIs must be called from organization +// master account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeOrganizationConfigRuleStatuses +func (c *ConfigService) DescribeOrganizationConfigRuleStatuses(input *DescribeOrganizationConfigRuleStatusesInput) (*DescribeOrganizationConfigRuleStatusesOutput, error) { + req, out := c.DescribeOrganizationConfigRuleStatusesRequest(input) return out, req.Send() } -// DescribePendingAggregationRequestsWithContext is the same as DescribePendingAggregationRequests with the addition of +// DescribeOrganizationConfigRuleStatusesWithContext is the same as DescribeOrganizationConfigRuleStatuses with the addition of // the ability to pass a context and additional request options. // -// See DescribePendingAggregationRequests for details on how to use this API operation. +// See DescribeOrganizationConfigRuleStatuses for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) DescribePendingAggregationRequestsWithContext(ctx aws.Context, input *DescribePendingAggregationRequestsInput, opts ...request.Option) (*DescribePendingAggregationRequestsOutput, error) { - req, out := c.DescribePendingAggregationRequestsRequest(input) +func (c *ConfigService) DescribeOrganizationConfigRuleStatusesWithContext(ctx aws.Context, input *DescribeOrganizationConfigRuleStatusesInput, opts ...request.Option) (*DescribeOrganizationConfigRuleStatusesOutput, error) { + req, out := c.DescribeOrganizationConfigRuleStatusesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeRemediationConfigurations = "DescribeRemediationConfigurations" +const opDescribeOrganizationConfigRules = "DescribeOrganizationConfigRules" -// DescribeRemediationConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeRemediationConfigurations operation. The "output" return +// DescribeOrganizationConfigRulesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOrganizationConfigRules operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeRemediationConfigurations for more information on using the DescribeRemediationConfigurations +// See DescribeOrganizationConfigRules for more information on using the DescribeOrganizationConfigRules // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeRemediationConfigurationsRequest method. -// req, resp := client.DescribeRemediationConfigurationsRequest(params) +// // Example sending a request using the DescribeOrganizationConfigRulesRequest method. +// req, resp := client.DescribeOrganizationConfigRulesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationConfigurations -func (c *ConfigService) DescribeRemediationConfigurationsRequest(input *DescribeRemediationConfigurationsInput) (req *request.Request, output *DescribeRemediationConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeOrganizationConfigRules +func (c *ConfigService) DescribeOrganizationConfigRulesRequest(input *DescribeOrganizationConfigRulesInput) (req *request.Request, output *DescribeOrganizationConfigRulesOutput) { op := &request.Operation{ - Name: opDescribeRemediationConfigurations, + Name: opDescribeOrganizationConfigRules, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeRemediationConfigurationsInput{} + input = &DescribeOrganizationConfigRulesInput{} } - output = &DescribeRemediationConfigurationsOutput{} + output = &DescribeOrganizationConfigRulesOutput{} req = c.newRequest(op, input, output) return } -// DescribeRemediationConfigurations API operation for AWS Config. +// DescribeOrganizationConfigRules API operation for AWS Config. // -// Returns the details of one or more remediation configurations. +// Returns a list of organization config rules. +// +// When you specify the limit and the next token, you receive a paginated response. +// Limit and next token are not applicable if you specify organization config +// rule names. It is only applicable, when you request all the organization +// config rules. +// +// Only a master account can call this API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation DescribeRemediationConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationConfigurations -func (c *ConfigService) DescribeRemediationConfigurations(input *DescribeRemediationConfigurationsInput) (*DescribeRemediationConfigurationsOutput, error) { - req, out := c.DescribeRemediationConfigurationsRequest(input) +// API operation DescribeOrganizationConfigRules for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchOrganizationConfigRuleException "NoSuchOrganizationConfigRuleException" +// You specified one or more organization config rules that do not exist. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. +// +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. +// +// * ErrCodeOrganizationAccessDeniedException "OrganizationAccessDeniedException" +// For PutConfigAggregator API, no permission to call EnableAWSServiceAccess +// API. +// +// For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs +// are called from member accounts. All APIs must be called from organization +// master account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeOrganizationConfigRules +func (c *ConfigService) DescribeOrganizationConfigRules(input *DescribeOrganizationConfigRulesInput) (*DescribeOrganizationConfigRulesOutput, error) { + req, out := c.DescribeOrganizationConfigRulesRequest(input) return out, req.Send() } -// DescribeRemediationConfigurationsWithContext is the same as DescribeRemediationConfigurations with the addition of +// DescribeOrganizationConfigRulesWithContext is the same as DescribeOrganizationConfigRules with the addition of // the ability to pass a context and additional request options. // -// See DescribeRemediationConfigurations for details on how to use this API operation. +// See DescribeOrganizationConfigRules for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) DescribeRemediationConfigurationsWithContext(ctx aws.Context, input *DescribeRemediationConfigurationsInput, opts ...request.Option) (*DescribeRemediationConfigurationsOutput, error) { - req, out := c.DescribeRemediationConfigurationsRequest(input) +func (c *ConfigService) DescribeOrganizationConfigRulesWithContext(ctx aws.Context, input *DescribeOrganizationConfigRulesInput, opts ...request.Option) (*DescribeOrganizationConfigRulesOutput, error) { + req, out := c.DescribeOrganizationConfigRulesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeRemediationExecutionStatus = "DescribeRemediationExecutionStatus" +const opDescribePendingAggregationRequests = "DescribePendingAggregationRequests" -// DescribeRemediationExecutionStatusRequest generates a "aws/request.Request" representing the -// client's request for the DescribeRemediationExecutionStatus operation. The "output" return +// DescribePendingAggregationRequestsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePendingAggregationRequests operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeRemediationExecutionStatus for more information on using the DescribeRemediationExecutionStatus +// See DescribePendingAggregationRequests for more information on using the DescribePendingAggregationRequests // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeRemediationExecutionStatusRequest method. -// req, resp := client.DescribeRemediationExecutionStatusRequest(params) +// // Example sending a request using the DescribePendingAggregationRequestsRequest method. +// req, resp := client.DescribePendingAggregationRequestsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationExecutionStatus -func (c *ConfigService) DescribeRemediationExecutionStatusRequest(input *DescribeRemediationExecutionStatusInput) (req *request.Request, output *DescribeRemediationExecutionStatusOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribePendingAggregationRequests +func (c *ConfigService) DescribePendingAggregationRequestsRequest(input *DescribePendingAggregationRequestsInput) (req *request.Request, output *DescribePendingAggregationRequestsOutput) { op := &request.Operation{ - Name: opDescribeRemediationExecutionStatus, + Name: opDescribePendingAggregationRequests, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "Limit", - TruncationToken: "", - }, } if input == nil { - input = &DescribeRemediationExecutionStatusInput{} + input = &DescribePendingAggregationRequestsInput{} } - output = &DescribeRemediationExecutionStatusOutput{} + output = &DescribePendingAggregationRequestsOutput{} req = c.newRequest(op, input, output) return } -// DescribeRemediationExecutionStatus API operation for AWS Config. +// DescribePendingAggregationRequests API operation for AWS Config. // -// Provides a detailed view of a Remediation Execution for a set of resources -// including state, timestamps for when steps for the remediation execution -// occur, and any error messages for steps that have failed. When you specify -// the limit and the next token, you receive a paginated response. +// Returns a list of all pending aggregation requests. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation DescribeRemediationExecutionStatus for usage and error information. +// API operation DescribePendingAggregationRequests for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchRemediationConfigurationException "NoSuchRemediationConfigurationException" -// You specified an AWS Config rule without a remediation configuration. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationExecutionStatus -func (c *ConfigService) DescribeRemediationExecutionStatus(input *DescribeRemediationExecutionStatusInput) (*DescribeRemediationExecutionStatusOutput, error) { - req, out := c.DescribeRemediationExecutionStatusRequest(input) - return out, req.Send() -} - -// DescribeRemediationExecutionStatusWithContext is the same as DescribeRemediationExecutionStatus with the addition of -// the ability to pass a context and additional request options. +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. // -// See DescribeRemediationExecutionStatus for details on how to use this API operation. +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ConfigService) DescribeRemediationExecutionStatusWithContext(ctx aws.Context, input *DescribeRemediationExecutionStatusInput, opts ...request.Option) (*DescribeRemediationExecutionStatusOutput, error) { - req, out := c.DescribeRemediationExecutionStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribePendingAggregationRequests +func (c *ConfigService) DescribePendingAggregationRequests(input *DescribePendingAggregationRequestsInput) (*DescribePendingAggregationRequestsOutput, error) { + req, out := c.DescribePendingAggregationRequestsRequest(input) return out, req.Send() } -// DescribeRemediationExecutionStatusPages iterates over the pages of a DescribeRemediationExecutionStatus operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeRemediationExecutionStatus method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeRemediationExecutionStatus operation. -// pageNum := 0 -// err := client.DescribeRemediationExecutionStatusPages(params, -// func(page *DescribeRemediationExecutionStatusOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// DescribePendingAggregationRequestsWithContext is the same as DescribePendingAggregationRequests with the addition of +// the ability to pass a context and additional request options. // -func (c *ConfigService) DescribeRemediationExecutionStatusPages(input *DescribeRemediationExecutionStatusInput, fn func(*DescribeRemediationExecutionStatusOutput, bool) bool) error { - return c.DescribeRemediationExecutionStatusPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeRemediationExecutionStatusPagesWithContext same as DescribeRemediationExecutionStatusPages except -// it takes a Context and allows setting request options on the pages. +// See DescribePendingAggregationRequests for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) DescribeRemediationExecutionStatusPagesWithContext(ctx aws.Context, input *DescribeRemediationExecutionStatusInput, fn func(*DescribeRemediationExecutionStatusOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeRemediationExecutionStatusInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeRemediationExecutionStatusRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeRemediationExecutionStatusOutput), !p.HasNextPage()) - } - return p.Err() +func (c *ConfigService) DescribePendingAggregationRequestsWithContext(ctx aws.Context, input *DescribePendingAggregationRequestsInput, opts ...request.Option) (*DescribePendingAggregationRequestsOutput, error) { + req, out := c.DescribePendingAggregationRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opDescribeRetentionConfigurations = "DescribeRetentionConfigurations" +const opDescribeRemediationConfigurations = "DescribeRemediationConfigurations" -// DescribeRetentionConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeRetentionConfigurations operation. The "output" return +// DescribeRemediationConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRemediationConfigurations operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeRetentionConfigurations for more information on using the DescribeRetentionConfigurations +// See DescribeRemediationConfigurations for more information on using the DescribeRemediationConfigurations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeRetentionConfigurationsRequest method. -// req, resp := client.DescribeRetentionConfigurationsRequest(params) +// // Example sending a request using the DescribeRemediationConfigurationsRequest method. +// req, resp := client.DescribeRemediationConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRetentionConfigurations -func (c *ConfigService) DescribeRetentionConfigurationsRequest(input *DescribeRetentionConfigurationsInput) (req *request.Request, output *DescribeRetentionConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationConfigurations +func (c *ConfigService) DescribeRemediationConfigurationsRequest(input *DescribeRemediationConfigurationsInput) (req *request.Request, output *DescribeRemediationConfigurationsOutput) { op := &request.Operation{ - Name: opDescribeRetentionConfigurations, + Name: opDescribeRemediationConfigurations, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeRetentionConfigurationsInput{} + input = &DescribeRemediationConfigurationsInput{} } - output = &DescribeRetentionConfigurationsOutput{} + output = &DescribeRemediationConfigurationsOutput{} req = c.newRequest(op, input, output) return } -// DescribeRetentionConfigurations API operation for AWS Config. -// -// Returns the details of one or more retention configurations. If the retention -// configuration name is not specified, this action returns the details for -// all the retention configurations for that account. +// DescribeRemediationConfigurations API operation for AWS Config. // -// Currently, AWS Config supports only one retention configuration per region -// in your account. +// Returns the details of one or more remediation configurations. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation DescribeRetentionConfigurations for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// One or more of the specified parameters are invalid. Verify that your parameters -// are valid and try again. -// -// * ErrCodeNoSuchRetentionConfigurationException "NoSuchRetentionConfigurationException" -// You have specified a retention configuration that does not exist. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// The specified next token is invalid. Specify the nextToken string that was -// returned in the previous response to get the next page of results. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRetentionConfigurations -func (c *ConfigService) DescribeRetentionConfigurations(input *DescribeRetentionConfigurationsInput) (*DescribeRetentionConfigurationsOutput, error) { - req, out := c.DescribeRetentionConfigurationsRequest(input) +// API operation DescribeRemediationConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationConfigurations +func (c *ConfigService) DescribeRemediationConfigurations(input *DescribeRemediationConfigurationsInput) (*DescribeRemediationConfigurationsOutput, error) { + req, out := c.DescribeRemediationConfigurationsRequest(input) return out, req.Send() } -// DescribeRetentionConfigurationsWithContext is the same as DescribeRetentionConfigurations with the addition of +// DescribeRemediationConfigurationsWithContext is the same as DescribeRemediationConfigurations with the addition of // the ability to pass a context and additional request options. // -// See DescribeRetentionConfigurations for details on how to use this API operation. +// See DescribeRemediationConfigurations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) DescribeRetentionConfigurationsWithContext(ctx aws.Context, input *DescribeRetentionConfigurationsInput, opts ...request.Option) (*DescribeRetentionConfigurationsOutput, error) { - req, out := c.DescribeRetentionConfigurationsRequest(input) +func (c *ConfigService) DescribeRemediationConfigurationsWithContext(ctx aws.Context, input *DescribeRemediationConfigurationsInput, opts ...request.Option) (*DescribeRemediationConfigurationsOutput, error) { + req, out := c.DescribeRemediationConfigurationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetAggregateComplianceDetailsByConfigRule = "GetAggregateComplianceDetailsByConfigRule" +const opDescribeRemediationExceptions = "DescribeRemediationExceptions" -// GetAggregateComplianceDetailsByConfigRuleRequest generates a "aws/request.Request" representing the -// client's request for the GetAggregateComplianceDetailsByConfigRule operation. The "output" return +// DescribeRemediationExceptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRemediationExceptions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetAggregateComplianceDetailsByConfigRule for more information on using the GetAggregateComplianceDetailsByConfigRule +// See DescribeRemediationExceptions for more information on using the DescribeRemediationExceptions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAggregateComplianceDetailsByConfigRuleRequest method. -// req, resp := client.GetAggregateComplianceDetailsByConfigRuleRequest(params) +// // Example sending a request using the DescribeRemediationExceptionsRequest method. +// req, resp := client.DescribeRemediationExceptionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateComplianceDetailsByConfigRule -func (c *ConfigService) GetAggregateComplianceDetailsByConfigRuleRequest(input *GetAggregateComplianceDetailsByConfigRuleInput) (req *request.Request, output *GetAggregateComplianceDetailsByConfigRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationExceptions +func (c *ConfigService) DescribeRemediationExceptionsRequest(input *DescribeRemediationExceptionsInput) (req *request.Request, output *DescribeRemediationExceptionsOutput) { op := &request.Operation{ - Name: opGetAggregateComplianceDetailsByConfigRule, + Name: opDescribeRemediationExceptions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &GetAggregateComplianceDetailsByConfigRuleInput{} + input = &DescribeRemediationExceptionsInput{} } - output = &GetAggregateComplianceDetailsByConfigRuleOutput{} + output = &DescribeRemediationExceptionsOutput{} req = c.newRequest(op, input, output) return } -// GetAggregateComplianceDetailsByConfigRule API operation for AWS Config. +// DescribeRemediationExceptions API operation for AWS Config. // -// Returns the evaluation results for the specified AWS Config rule for a specific -// resource in a rule. The results indicate which AWS resources were evaluated -// by the rule, when each resource was last evaluated, and whether each resource -// complies with the rule. +// Returns the details of one or more remediation exceptions. A detailed view +// of a remediation exception for a set of resources that includes an explanation +// of an exception and the time when the exception will be deleted. When you +// specify the limit and the next token, you receive a paginated response. // -// The results can return an empty result page. But if you have a nextToken, -// the results are displayed on the next page. +// When you specify the limit and the next token, you receive a paginated response. +// +// Limit and next token are not applicable if you request resources in batch. +// It is only applicable, when you request all resources. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetAggregateComplianceDetailsByConfigRule for usage and error information. +// API operation DescribeRemediationExceptions for usage and error information. // // Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. -// -// * ErrCodeInvalidLimitException "InvalidLimitException" -// The specified limit is outside the allowable range. -// // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. // -// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" -// You have specified a configuration aggregator that does not exist. +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateComplianceDetailsByConfigRule -func (c *ConfigService) GetAggregateComplianceDetailsByConfigRule(input *GetAggregateComplianceDetailsByConfigRuleInput) (*GetAggregateComplianceDetailsByConfigRuleOutput, error) { - req, out := c.GetAggregateComplianceDetailsByConfigRuleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationExceptions +func (c *ConfigService) DescribeRemediationExceptions(input *DescribeRemediationExceptionsInput) (*DescribeRemediationExceptionsOutput, error) { + req, out := c.DescribeRemediationExceptionsRequest(input) return out, req.Send() } -// GetAggregateComplianceDetailsByConfigRuleWithContext is the same as GetAggregateComplianceDetailsByConfigRule with the addition of +// DescribeRemediationExceptionsWithContext is the same as DescribeRemediationExceptions with the addition of // the ability to pass a context and additional request options. // -// See GetAggregateComplianceDetailsByConfigRule for details on how to use this API operation. +// See DescribeRemediationExceptions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetAggregateComplianceDetailsByConfigRuleWithContext(ctx aws.Context, input *GetAggregateComplianceDetailsByConfigRuleInput, opts ...request.Option) (*GetAggregateComplianceDetailsByConfigRuleOutput, error) { - req, out := c.GetAggregateComplianceDetailsByConfigRuleRequest(input) +func (c *ConfigService) DescribeRemediationExceptionsWithContext(ctx aws.Context, input *DescribeRemediationExceptionsInput, opts ...request.Option) (*DescribeRemediationExceptionsOutput, error) { + req, out := c.DescribeRemediationExceptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetAggregateConfigRuleComplianceSummary = "GetAggregateConfigRuleComplianceSummary" +// DescribeRemediationExceptionsPages iterates over the pages of a DescribeRemediationExceptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeRemediationExceptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeRemediationExceptions operation. +// pageNum := 0 +// err := client.DescribeRemediationExceptionsPages(params, +// func(page *configservice.DescribeRemediationExceptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ConfigService) DescribeRemediationExceptionsPages(input *DescribeRemediationExceptionsInput, fn func(*DescribeRemediationExceptionsOutput, bool) bool) error { + return c.DescribeRemediationExceptionsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// GetAggregateConfigRuleComplianceSummaryRequest generates a "aws/request.Request" representing the -// client's request for the GetAggregateConfigRuleComplianceSummary operation. The "output" return +// DescribeRemediationExceptionsPagesWithContext same as DescribeRemediationExceptionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) DescribeRemediationExceptionsPagesWithContext(ctx aws.Context, input *DescribeRemediationExceptionsInput, fn func(*DescribeRemediationExceptionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeRemediationExceptionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeRemediationExceptionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeRemediationExceptionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeRemediationExecutionStatus = "DescribeRemediationExecutionStatus" + +// DescribeRemediationExecutionStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRemediationExecutionStatus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetAggregateConfigRuleComplianceSummary for more information on using the GetAggregateConfigRuleComplianceSummary +// See DescribeRemediationExecutionStatus for more information on using the DescribeRemediationExecutionStatus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAggregateConfigRuleComplianceSummaryRequest method. -// req, resp := client.GetAggregateConfigRuleComplianceSummaryRequest(params) +// // Example sending a request using the DescribeRemediationExecutionStatusRequest method. +// req, resp := client.DescribeRemediationExecutionStatusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateConfigRuleComplianceSummary -func (c *ConfigService) GetAggregateConfigRuleComplianceSummaryRequest(input *GetAggregateConfigRuleComplianceSummaryInput) (req *request.Request, output *GetAggregateConfigRuleComplianceSummaryOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationExecutionStatus +func (c *ConfigService) DescribeRemediationExecutionStatusRequest(input *DescribeRemediationExecutionStatusInput) (req *request.Request, output *DescribeRemediationExecutionStatusOutput) { op := &request.Operation{ - Name: opGetAggregateConfigRuleComplianceSummary, + Name: opDescribeRemediationExecutionStatus, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &GetAggregateConfigRuleComplianceSummaryInput{} + input = &DescribeRemediationExecutionStatusInput{} } - output = &GetAggregateConfigRuleComplianceSummaryOutput{} + output = &DescribeRemediationExecutionStatusOutput{} req = c.newRequest(op, input, output) return } -// GetAggregateConfigRuleComplianceSummary API operation for AWS Config. -// -// Returns the number of compliant and noncompliant rules for one or more accounts -// and regions in an aggregator. +// DescribeRemediationExecutionStatus API operation for AWS Config. // -// The results can return an empty result page, but if you have a nextToken, -// the results are displayed on the next page. +// Provides a detailed view of a Remediation Execution for a set of resources +// including state, timestamps for when steps for the remediation execution +// occur, and any error messages for steps that have failed. When you specify +// the limit and the next token, you receive a paginated response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetAggregateConfigRuleComplianceSummary for usage and error information. +// API operation DescribeRemediationExecutionStatus for usage and error information. // // Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. -// -// * ErrCodeInvalidLimitException "InvalidLimitException" -// The specified limit is outside the allowable range. +// * ErrCodeNoSuchRemediationConfigurationException "NoSuchRemediationConfigurationException" +// You specified an AWS Config rule without a remediation configuration. // // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. // -// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" -// You have specified a configuration aggregator that does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateConfigRuleComplianceSummary -func (c *ConfigService) GetAggregateConfigRuleComplianceSummary(input *GetAggregateConfigRuleComplianceSummaryInput) (*GetAggregateConfigRuleComplianceSummaryOutput, error) { - req, out := c.GetAggregateConfigRuleComplianceSummaryRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRemediationExecutionStatus +func (c *ConfigService) DescribeRemediationExecutionStatus(input *DescribeRemediationExecutionStatusInput) (*DescribeRemediationExecutionStatusOutput, error) { + req, out := c.DescribeRemediationExecutionStatusRequest(input) return out, req.Send() } -// GetAggregateConfigRuleComplianceSummaryWithContext is the same as GetAggregateConfigRuleComplianceSummary with the addition of +// DescribeRemediationExecutionStatusWithContext is the same as DescribeRemediationExecutionStatus with the addition of // the ability to pass a context and additional request options. // -// See GetAggregateConfigRuleComplianceSummary for details on how to use this API operation. +// See DescribeRemediationExecutionStatus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetAggregateConfigRuleComplianceSummaryWithContext(ctx aws.Context, input *GetAggregateConfigRuleComplianceSummaryInput, opts ...request.Option) (*GetAggregateConfigRuleComplianceSummaryOutput, error) { - req, out := c.GetAggregateConfigRuleComplianceSummaryRequest(input) +func (c *ConfigService) DescribeRemediationExecutionStatusWithContext(ctx aws.Context, input *DescribeRemediationExecutionStatusInput, opts ...request.Option) (*DescribeRemediationExecutionStatusOutput, error) { + req, out := c.DescribeRemediationExecutionStatusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetAggregateDiscoveredResourceCounts = "GetAggregateDiscoveredResourceCounts" +// DescribeRemediationExecutionStatusPages iterates over the pages of a DescribeRemediationExecutionStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeRemediationExecutionStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeRemediationExecutionStatus operation. +// pageNum := 0 +// err := client.DescribeRemediationExecutionStatusPages(params, +// func(page *configservice.DescribeRemediationExecutionStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ConfigService) DescribeRemediationExecutionStatusPages(input *DescribeRemediationExecutionStatusInput, fn func(*DescribeRemediationExecutionStatusOutput, bool) bool) error { + return c.DescribeRemediationExecutionStatusPagesWithContext(aws.BackgroundContext(), input, fn) +} -// GetAggregateDiscoveredResourceCountsRequest generates a "aws/request.Request" representing the -// client's request for the GetAggregateDiscoveredResourceCounts operation. The "output" return +// DescribeRemediationExecutionStatusPagesWithContext same as DescribeRemediationExecutionStatusPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) DescribeRemediationExecutionStatusPagesWithContext(ctx aws.Context, input *DescribeRemediationExecutionStatusInput, fn func(*DescribeRemediationExecutionStatusOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeRemediationExecutionStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeRemediationExecutionStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeRemediationExecutionStatusOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeRetentionConfigurations = "DescribeRetentionConfigurations" + +// DescribeRetentionConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRetentionConfigurations operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetAggregateDiscoveredResourceCounts for more information on using the GetAggregateDiscoveredResourceCounts +// See DescribeRetentionConfigurations for more information on using the DescribeRetentionConfigurations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAggregateDiscoveredResourceCountsRequest method. -// req, resp := client.GetAggregateDiscoveredResourceCountsRequest(params) +// // Example sending a request using the DescribeRetentionConfigurationsRequest method. +// req, resp := client.DescribeRetentionConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateDiscoveredResourceCounts -func (c *ConfigService) GetAggregateDiscoveredResourceCountsRequest(input *GetAggregateDiscoveredResourceCountsInput) (req *request.Request, output *GetAggregateDiscoveredResourceCountsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRetentionConfigurations +func (c *ConfigService) DescribeRetentionConfigurationsRequest(input *DescribeRetentionConfigurationsInput) (req *request.Request, output *DescribeRetentionConfigurationsOutput) { op := &request.Operation{ - Name: opGetAggregateDiscoveredResourceCounts, + Name: opDescribeRetentionConfigurations, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetAggregateDiscoveredResourceCountsInput{} + input = &DescribeRetentionConfigurationsInput{} } - output = &GetAggregateDiscoveredResourceCountsOutput{} + output = &DescribeRetentionConfigurationsOutput{} req = c.newRequest(op, input, output) return } -// GetAggregateDiscoveredResourceCounts API operation for AWS Config. +// DescribeRetentionConfigurations API operation for AWS Config. // -// Returns the resource counts across accounts and regions that are present -// in your AWS Config aggregator. You can request the resource counts by providing -// filters and GroupByKey. +// Returns the details of one or more retention configurations. If the retention +// configuration name is not specified, this action returns the details for +// all the retention configurations for that account. // -// For example, if the input contains accountID 12345678910 and region us-east-1 -// in filters, the API returns the count of resources in account ID 12345678910 -// and region us-east-1. If the input contains ACCOUNT_ID as a GroupByKey, the -// API returns resource counts for all source accounts that are present in your -// aggregator. +// Currently, AWS Config supports only one retention configuration per region +// in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetAggregateDiscoveredResourceCounts for usage and error information. +// API operation DescribeRetentionConfigurations for usage and error information. // // Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. // -// * ErrCodeInvalidLimitException "InvalidLimitException" -// The specified limit is outside the allowable range. +// * ErrCodeNoSuchRetentionConfigurationException "NoSuchRetentionConfigurationException" +// You have specified a retention configuration that does not exist. // // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. // -// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" -// You have specified a configuration aggregator that does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateDiscoveredResourceCounts -func (c *ConfigService) GetAggregateDiscoveredResourceCounts(input *GetAggregateDiscoveredResourceCountsInput) (*GetAggregateDiscoveredResourceCountsOutput, error) { - req, out := c.GetAggregateDiscoveredResourceCountsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeRetentionConfigurations +func (c *ConfigService) DescribeRetentionConfigurations(input *DescribeRetentionConfigurationsInput) (*DescribeRetentionConfigurationsOutput, error) { + req, out := c.DescribeRetentionConfigurationsRequest(input) return out, req.Send() } -// GetAggregateDiscoveredResourceCountsWithContext is the same as GetAggregateDiscoveredResourceCounts with the addition of +// DescribeRetentionConfigurationsWithContext is the same as DescribeRetentionConfigurations with the addition of // the ability to pass a context and additional request options. // -// See GetAggregateDiscoveredResourceCounts for details on how to use this API operation. +// See DescribeRetentionConfigurations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetAggregateDiscoveredResourceCountsWithContext(ctx aws.Context, input *GetAggregateDiscoveredResourceCountsInput, opts ...request.Option) (*GetAggregateDiscoveredResourceCountsOutput, error) { - req, out := c.GetAggregateDiscoveredResourceCountsRequest(input) +func (c *ConfigService) DescribeRetentionConfigurationsWithContext(ctx aws.Context, input *DescribeRetentionConfigurationsInput, opts ...request.Option) (*DescribeRetentionConfigurationsOutput, error) { + req, out := c.DescribeRetentionConfigurationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetAggregateResourceConfig = "GetAggregateResourceConfig" +const opGetAggregateComplianceDetailsByConfigRule = "GetAggregateComplianceDetailsByConfigRule" -// GetAggregateResourceConfigRequest generates a "aws/request.Request" representing the -// client's request for the GetAggregateResourceConfig operation. The "output" return +// GetAggregateComplianceDetailsByConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetAggregateComplianceDetailsByConfigRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetAggregateResourceConfig for more information on using the GetAggregateResourceConfig +// See GetAggregateComplianceDetailsByConfigRule for more information on using the GetAggregateComplianceDetailsByConfigRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAggregateResourceConfigRequest method. -// req, resp := client.GetAggregateResourceConfigRequest(params) +// // Example sending a request using the GetAggregateComplianceDetailsByConfigRuleRequest method. +// req, resp := client.GetAggregateComplianceDetailsByConfigRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateResourceConfig -func (c *ConfigService) GetAggregateResourceConfigRequest(input *GetAggregateResourceConfigInput) (req *request.Request, output *GetAggregateResourceConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateComplianceDetailsByConfigRule +func (c *ConfigService) GetAggregateComplianceDetailsByConfigRuleRequest(input *GetAggregateComplianceDetailsByConfigRuleInput) (req *request.Request, output *GetAggregateComplianceDetailsByConfigRuleOutput) { op := &request.Operation{ - Name: opGetAggregateResourceConfig, + Name: opGetAggregateComplianceDetailsByConfigRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetAggregateResourceConfigInput{} + input = &GetAggregateComplianceDetailsByConfigRuleInput{} } - output = &GetAggregateResourceConfigOutput{} + output = &GetAggregateComplianceDetailsByConfigRuleOutput{} req = c.newRequest(op, input, output) return } -// GetAggregateResourceConfig API operation for AWS Config. +// GetAggregateComplianceDetailsByConfigRule API operation for AWS Config. // -// Returns configuration item that is aggregated for your specific resource -// in a specific source account and region. +// Returns the evaluation results for the specified AWS Config rule for a specific +// resource in a rule. The results indicate which AWS resources were evaluated +// by the rule, when each resource was last evaluated, and whether each resource +// complies with the rule. +// +// The results can return an empty result page. But if you have a nextToken, +// the results are displayed on the next page. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetAggregateResourceConfig for usage and error information. +// API operation GetAggregateComplianceDetailsByConfigRule for usage and error information. // // Returned Error Codes: // * ErrCodeValidationException "ValidationException" // The requested action is not valid. // -// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" -// You have specified a configuration aggregator that does not exist. +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. // -// * ErrCodeOversizedConfigurationItemException "OversizedConfigurationItemException" -// The configuration item size is outside the allowable range. +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. // -// * ErrCodeResourceNotDiscoveredException "ResourceNotDiscoveredException" -// You have specified a resource that is either unknown or has not been discovered. +// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" +// You have specified a configuration aggregator that does not exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateResourceConfig -func (c *ConfigService) GetAggregateResourceConfig(input *GetAggregateResourceConfigInput) (*GetAggregateResourceConfigOutput, error) { - req, out := c.GetAggregateResourceConfigRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateComplianceDetailsByConfigRule +func (c *ConfigService) GetAggregateComplianceDetailsByConfigRule(input *GetAggregateComplianceDetailsByConfigRuleInput) (*GetAggregateComplianceDetailsByConfigRuleOutput, error) { + req, out := c.GetAggregateComplianceDetailsByConfigRuleRequest(input) return out, req.Send() } -// GetAggregateResourceConfigWithContext is the same as GetAggregateResourceConfig with the addition of +// GetAggregateComplianceDetailsByConfigRuleWithContext is the same as GetAggregateComplianceDetailsByConfigRule with the addition of // the ability to pass a context and additional request options. // -// See GetAggregateResourceConfig for details on how to use this API operation. +// See GetAggregateComplianceDetailsByConfigRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetAggregateResourceConfigWithContext(ctx aws.Context, input *GetAggregateResourceConfigInput, opts ...request.Option) (*GetAggregateResourceConfigOutput, error) { - req, out := c.GetAggregateResourceConfigRequest(input) +func (c *ConfigService) GetAggregateComplianceDetailsByConfigRuleWithContext(ctx aws.Context, input *GetAggregateComplianceDetailsByConfigRuleInput, opts ...request.Option) (*GetAggregateComplianceDetailsByConfigRuleOutput, error) { + req, out := c.GetAggregateComplianceDetailsByConfigRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetComplianceDetailsByConfigRule = "GetComplianceDetailsByConfigRule" +const opGetAggregateConfigRuleComplianceSummary = "GetAggregateConfigRuleComplianceSummary" -// GetComplianceDetailsByConfigRuleRequest generates a "aws/request.Request" representing the -// client's request for the GetComplianceDetailsByConfigRule operation. The "output" return +// GetAggregateConfigRuleComplianceSummaryRequest generates a "aws/request.Request" representing the +// client's request for the GetAggregateConfigRuleComplianceSummary operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetComplianceDetailsByConfigRule for more information on using the GetComplianceDetailsByConfigRule +// See GetAggregateConfigRuleComplianceSummary for more information on using the GetAggregateConfigRuleComplianceSummary // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetComplianceDetailsByConfigRuleRequest method. -// req, resp := client.GetComplianceDetailsByConfigRuleRequest(params) +// // Example sending a request using the GetAggregateConfigRuleComplianceSummaryRequest method. +// req, resp := client.GetAggregateConfigRuleComplianceSummaryRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceDetailsByConfigRule -func (c *ConfigService) GetComplianceDetailsByConfigRuleRequest(input *GetComplianceDetailsByConfigRuleInput) (req *request.Request, output *GetComplianceDetailsByConfigRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateConfigRuleComplianceSummary +func (c *ConfigService) GetAggregateConfigRuleComplianceSummaryRequest(input *GetAggregateConfigRuleComplianceSummaryInput) (req *request.Request, output *GetAggregateConfigRuleComplianceSummaryOutput) { op := &request.Operation{ - Name: opGetComplianceDetailsByConfigRule, + Name: opGetAggregateConfigRuleComplianceSummary, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetComplianceDetailsByConfigRuleInput{} + input = &GetAggregateConfigRuleComplianceSummaryInput{} } - output = &GetComplianceDetailsByConfigRuleOutput{} + output = &GetAggregateConfigRuleComplianceSummaryOutput{} req = c.newRequest(op, input, output) return } -// GetComplianceDetailsByConfigRule API operation for AWS Config. +// GetAggregateConfigRuleComplianceSummary API operation for AWS Config. // -// Returns the evaluation results for the specified AWS Config rule. The results -// indicate which AWS resources were evaluated by the rule, when each resource -// was last evaluated, and whether each resource complies with the rule. +// Returns the number of compliant and noncompliant rules for one or more accounts +// and regions in an aggregator. +// +// The results can return an empty result page, but if you have a nextToken, +// the results are displayed on the next page. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetComplianceDetailsByConfigRule for usage and error information. +// API operation GetAggregateConfigRuleComplianceSummary for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// One or more of the specified parameters are invalid. Verify that your parameters -// are valid and try again. +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. +// +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. // // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. // -// * ErrCodeNoSuchConfigRuleException "NoSuchConfigRuleException" -// One or more AWS Config rules in the request are invalid. Verify that the -// rule names are correct and try again. +// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" +// You have specified a configuration aggregator that does not exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceDetailsByConfigRule -func (c *ConfigService) GetComplianceDetailsByConfigRule(input *GetComplianceDetailsByConfigRuleInput) (*GetComplianceDetailsByConfigRuleOutput, error) { - req, out := c.GetComplianceDetailsByConfigRuleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateConfigRuleComplianceSummary +func (c *ConfigService) GetAggregateConfigRuleComplianceSummary(input *GetAggregateConfigRuleComplianceSummaryInput) (*GetAggregateConfigRuleComplianceSummaryOutput, error) { + req, out := c.GetAggregateConfigRuleComplianceSummaryRequest(input) return out, req.Send() } -// GetComplianceDetailsByConfigRuleWithContext is the same as GetComplianceDetailsByConfigRule with the addition of +// GetAggregateConfigRuleComplianceSummaryWithContext is the same as GetAggregateConfigRuleComplianceSummary with the addition of // the ability to pass a context and additional request options. // -// See GetComplianceDetailsByConfigRule for details on how to use this API operation. +// See GetAggregateConfigRuleComplianceSummary for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetComplianceDetailsByConfigRuleWithContext(ctx aws.Context, input *GetComplianceDetailsByConfigRuleInput, opts ...request.Option) (*GetComplianceDetailsByConfigRuleOutput, error) { - req, out := c.GetComplianceDetailsByConfigRuleRequest(input) +func (c *ConfigService) GetAggregateConfigRuleComplianceSummaryWithContext(ctx aws.Context, input *GetAggregateConfigRuleComplianceSummaryInput, opts ...request.Option) (*GetAggregateConfigRuleComplianceSummaryOutput, error) { + req, out := c.GetAggregateConfigRuleComplianceSummaryRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetComplianceDetailsByResource = "GetComplianceDetailsByResource" +const opGetAggregateDiscoveredResourceCounts = "GetAggregateDiscoveredResourceCounts" -// GetComplianceDetailsByResourceRequest generates a "aws/request.Request" representing the -// client's request for the GetComplianceDetailsByResource operation. The "output" return -// value will be populated with the request's response once the request completes +// GetAggregateDiscoveredResourceCountsRequest generates a "aws/request.Request" representing the +// client's request for the GetAggregateDiscoveredResourceCounts operation. The "output" return +// value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetComplianceDetailsByResource for more information on using the GetComplianceDetailsByResource +// See GetAggregateDiscoveredResourceCounts for more information on using the GetAggregateDiscoveredResourceCounts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetComplianceDetailsByResourceRequest method. -// req, resp := client.GetComplianceDetailsByResourceRequest(params) +// // Example sending a request using the GetAggregateDiscoveredResourceCountsRequest method. +// req, resp := client.GetAggregateDiscoveredResourceCountsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceDetailsByResource -func (c *ConfigService) GetComplianceDetailsByResourceRequest(input *GetComplianceDetailsByResourceInput) (req *request.Request, output *GetComplianceDetailsByResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateDiscoveredResourceCounts +func (c *ConfigService) GetAggregateDiscoveredResourceCountsRequest(input *GetAggregateDiscoveredResourceCountsInput) (req *request.Request, output *GetAggregateDiscoveredResourceCountsOutput) { op := &request.Operation{ - Name: opGetComplianceDetailsByResource, + Name: opGetAggregateDiscoveredResourceCounts, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetComplianceDetailsByResourceInput{} + input = &GetAggregateDiscoveredResourceCountsInput{} } - output = &GetComplianceDetailsByResourceOutput{} + output = &GetAggregateDiscoveredResourceCountsOutput{} req = c.newRequest(op, input, output) return } -// GetComplianceDetailsByResource API operation for AWS Config. +// GetAggregateDiscoveredResourceCounts API operation for AWS Config. // -// Returns the evaluation results for the specified AWS resource. The results -// indicate which AWS Config rules were used to evaluate the resource, when -// each rule was last used, and whether the resource complies with each rule. +// Returns the resource counts across accounts and regions that are present +// in your AWS Config aggregator. You can request the resource counts by providing +// filters and GroupByKey. +// +// For example, if the input contains accountID 12345678910 and region us-east-1 +// in filters, the API returns the count of resources in account ID 12345678910 +// and region us-east-1. If the input contains ACCOUNT_ID as a GroupByKey, the +// API returns resource counts for all source accounts that are present in your +// aggregator. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetComplianceDetailsByResource for usage and error information. +// API operation GetAggregateDiscoveredResourceCounts for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// One or more of the specified parameters are invalid. Verify that your parameters -// are valid and try again. +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceDetailsByResource -func (c *ConfigService) GetComplianceDetailsByResource(input *GetComplianceDetailsByResourceInput) (*GetComplianceDetailsByResourceOutput, error) { - req, out := c.GetComplianceDetailsByResourceRequest(input) +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. +// +// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" +// You have specified a configuration aggregator that does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateDiscoveredResourceCounts +func (c *ConfigService) GetAggregateDiscoveredResourceCounts(input *GetAggregateDiscoveredResourceCountsInput) (*GetAggregateDiscoveredResourceCountsOutput, error) { + req, out := c.GetAggregateDiscoveredResourceCountsRequest(input) return out, req.Send() } -// GetComplianceDetailsByResourceWithContext is the same as GetComplianceDetailsByResource with the addition of +// GetAggregateDiscoveredResourceCountsWithContext is the same as GetAggregateDiscoveredResourceCounts with the addition of // the ability to pass a context and additional request options. // -// See GetComplianceDetailsByResource for details on how to use this API operation. +// See GetAggregateDiscoveredResourceCounts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetComplianceDetailsByResourceWithContext(ctx aws.Context, input *GetComplianceDetailsByResourceInput, opts ...request.Option) (*GetComplianceDetailsByResourceOutput, error) { - req, out := c.GetComplianceDetailsByResourceRequest(input) +func (c *ConfigService) GetAggregateDiscoveredResourceCountsWithContext(ctx aws.Context, input *GetAggregateDiscoveredResourceCountsInput, opts ...request.Option) (*GetAggregateDiscoveredResourceCountsOutput, error) { + req, out := c.GetAggregateDiscoveredResourceCountsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetComplianceSummaryByConfigRule = "GetComplianceSummaryByConfigRule" +const opGetAggregateResourceConfig = "GetAggregateResourceConfig" -// GetComplianceSummaryByConfigRuleRequest generates a "aws/request.Request" representing the -// client's request for the GetComplianceSummaryByConfigRule operation. The "output" return +// GetAggregateResourceConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetAggregateResourceConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetComplianceSummaryByConfigRule for more information on using the GetComplianceSummaryByConfigRule +// See GetAggregateResourceConfig for more information on using the GetAggregateResourceConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetComplianceSummaryByConfigRuleRequest method. -// req, resp := client.GetComplianceSummaryByConfigRuleRequest(params) +// // Example sending a request using the GetAggregateResourceConfigRequest method. +// req, resp := client.GetAggregateResourceConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceSummaryByConfigRule -func (c *ConfigService) GetComplianceSummaryByConfigRuleRequest(input *GetComplianceSummaryByConfigRuleInput) (req *request.Request, output *GetComplianceSummaryByConfigRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateResourceConfig +func (c *ConfigService) GetAggregateResourceConfigRequest(input *GetAggregateResourceConfigInput) (req *request.Request, output *GetAggregateResourceConfigOutput) { op := &request.Operation{ - Name: opGetComplianceSummaryByConfigRule, + Name: opGetAggregateResourceConfig, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetComplianceSummaryByConfigRuleInput{} + input = &GetAggregateResourceConfigInput{} } - output = &GetComplianceSummaryByConfigRuleOutput{} + output = &GetAggregateResourceConfigOutput{} req = c.newRequest(op, input, output) return } -// GetComplianceSummaryByConfigRule API operation for AWS Config. +// GetAggregateResourceConfig API operation for AWS Config. // -// Returns the number of AWS Config rules that are compliant and noncompliant, -// up to a maximum of 25 for each. +// Returns configuration item that is aggregated for your specific resource +// in a specific source account and region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetComplianceSummaryByConfigRule for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceSummaryByConfigRule -func (c *ConfigService) GetComplianceSummaryByConfigRule(input *GetComplianceSummaryByConfigRuleInput) (*GetComplianceSummaryByConfigRuleOutput, error) { - req, out := c.GetComplianceSummaryByConfigRuleRequest(input) +// API operation GetAggregateResourceConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. +// +// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" +// You have specified a configuration aggregator that does not exist. +// +// * ErrCodeOversizedConfigurationItemException "OversizedConfigurationItemException" +// The configuration item size is outside the allowable range. +// +// * ErrCodeResourceNotDiscoveredException "ResourceNotDiscoveredException" +// You have specified a resource that is either unknown or has not been discovered. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetAggregateResourceConfig +func (c *ConfigService) GetAggregateResourceConfig(input *GetAggregateResourceConfigInput) (*GetAggregateResourceConfigOutput, error) { + req, out := c.GetAggregateResourceConfigRequest(input) return out, req.Send() } -// GetComplianceSummaryByConfigRuleWithContext is the same as GetComplianceSummaryByConfigRule with the addition of +// GetAggregateResourceConfigWithContext is the same as GetAggregateResourceConfig with the addition of // the ability to pass a context and additional request options. // -// See GetComplianceSummaryByConfigRule for details on how to use this API operation. +// See GetAggregateResourceConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetComplianceSummaryByConfigRuleWithContext(ctx aws.Context, input *GetComplianceSummaryByConfigRuleInput, opts ...request.Option) (*GetComplianceSummaryByConfigRuleOutput, error) { - req, out := c.GetComplianceSummaryByConfigRuleRequest(input) +func (c *ConfigService) GetAggregateResourceConfigWithContext(ctx aws.Context, input *GetAggregateResourceConfigInput, opts ...request.Option) (*GetAggregateResourceConfigOutput, error) { + req, out := c.GetAggregateResourceConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetComplianceSummaryByResourceType = "GetComplianceSummaryByResourceType" +const opGetComplianceDetailsByConfigRule = "GetComplianceDetailsByConfigRule" -// GetComplianceSummaryByResourceTypeRequest generates a "aws/request.Request" representing the -// client's request for the GetComplianceSummaryByResourceType operation. The "output" return +// GetComplianceDetailsByConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetComplianceDetailsByConfigRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetComplianceSummaryByResourceType for more information on using the GetComplianceSummaryByResourceType +// See GetComplianceDetailsByConfigRule for more information on using the GetComplianceDetailsByConfigRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetComplianceSummaryByResourceTypeRequest method. -// req, resp := client.GetComplianceSummaryByResourceTypeRequest(params) +// // Example sending a request using the GetComplianceDetailsByConfigRuleRequest method. +// req, resp := client.GetComplianceDetailsByConfigRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceSummaryByResourceType -func (c *ConfigService) GetComplianceSummaryByResourceTypeRequest(input *GetComplianceSummaryByResourceTypeInput) (req *request.Request, output *GetComplianceSummaryByResourceTypeOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceDetailsByConfigRule +func (c *ConfigService) GetComplianceDetailsByConfigRuleRequest(input *GetComplianceDetailsByConfigRuleInput) (req *request.Request, output *GetComplianceDetailsByConfigRuleOutput) { op := &request.Operation{ - Name: opGetComplianceSummaryByResourceType, + Name: opGetComplianceDetailsByConfigRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetComplianceSummaryByResourceTypeInput{} + input = &GetComplianceDetailsByConfigRuleInput{} } - output = &GetComplianceSummaryByResourceTypeOutput{} + output = &GetComplianceDetailsByConfigRuleOutput{} req = c.newRequest(op, input, output) return } -// GetComplianceSummaryByResourceType API operation for AWS Config. +// GetComplianceDetailsByConfigRule API operation for AWS Config. // -// Returns the number of resources that are compliant and the number that are -// noncompliant. You can specify one or more resource types to get these numbers -// for each resource type. The maximum number returned is 100. +// Returns the evaluation results for the specified AWS Config rule. The results +// indicate which AWS resources were evaluated by the rule, when each resource +// was last evaluated, and whether each resource complies with the rule. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetComplianceSummaryByResourceType for usage and error information. +// API operation GetComplianceDetailsByConfigRule for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // One or more of the specified parameters are invalid. Verify that your parameters // are valid and try again. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceSummaryByResourceType -func (c *ConfigService) GetComplianceSummaryByResourceType(input *GetComplianceSummaryByResourceTypeInput) (*GetComplianceSummaryByResourceTypeOutput, error) { - req, out := c.GetComplianceSummaryByResourceTypeRequest(input) +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. +// +// * ErrCodeNoSuchConfigRuleException "NoSuchConfigRuleException" +// One or more AWS Config rules in the request are invalid. Verify that the +// rule names are correct and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceDetailsByConfigRule +func (c *ConfigService) GetComplianceDetailsByConfigRule(input *GetComplianceDetailsByConfigRuleInput) (*GetComplianceDetailsByConfigRuleOutput, error) { + req, out := c.GetComplianceDetailsByConfigRuleRequest(input) return out, req.Send() } -// GetComplianceSummaryByResourceTypeWithContext is the same as GetComplianceSummaryByResourceType with the addition of +// GetComplianceDetailsByConfigRuleWithContext is the same as GetComplianceDetailsByConfigRule with the addition of // the ability to pass a context and additional request options. // -// See GetComplianceSummaryByResourceType for details on how to use this API operation. +// See GetComplianceDetailsByConfigRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetComplianceSummaryByResourceTypeWithContext(ctx aws.Context, input *GetComplianceSummaryByResourceTypeInput, opts ...request.Option) (*GetComplianceSummaryByResourceTypeOutput, error) { - req, out := c.GetComplianceSummaryByResourceTypeRequest(input) +func (c *ConfigService) GetComplianceDetailsByConfigRuleWithContext(ctx aws.Context, input *GetComplianceDetailsByConfigRuleInput, opts ...request.Option) (*GetComplianceDetailsByConfigRuleOutput, error) { + req, out := c.GetComplianceDetailsByConfigRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetDiscoveredResourceCounts = "GetDiscoveredResourceCounts" +const opGetComplianceDetailsByResource = "GetComplianceDetailsByResource" -// GetDiscoveredResourceCountsRequest generates a "aws/request.Request" representing the -// client's request for the GetDiscoveredResourceCounts operation. The "output" return +// GetComplianceDetailsByResourceRequest generates a "aws/request.Request" representing the +// client's request for the GetComplianceDetailsByResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetDiscoveredResourceCounts for more information on using the GetDiscoveredResourceCounts +// See GetComplianceDetailsByResource for more information on using the GetComplianceDetailsByResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetDiscoveredResourceCountsRequest method. -// req, resp := client.GetDiscoveredResourceCountsRequest(params) +// // Example sending a request using the GetComplianceDetailsByResourceRequest method. +// req, resp := client.GetComplianceDetailsByResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetDiscoveredResourceCounts -func (c *ConfigService) GetDiscoveredResourceCountsRequest(input *GetDiscoveredResourceCountsInput) (req *request.Request, output *GetDiscoveredResourceCountsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceDetailsByResource +func (c *ConfigService) GetComplianceDetailsByResourceRequest(input *GetComplianceDetailsByResourceInput) (req *request.Request, output *GetComplianceDetailsByResourceOutput) { op := &request.Operation{ - Name: opGetDiscoveredResourceCounts, + Name: opGetComplianceDetailsByResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetDiscoveredResourceCountsInput{} + input = &GetComplianceDetailsByResourceInput{} } - output = &GetDiscoveredResourceCountsOutput{} + output = &GetComplianceDetailsByResourceOutput{} req = c.newRequest(op, input, output) return } -// GetDiscoveredResourceCounts API operation for AWS Config. -// -// Returns the resource types, the number of each resource type, and the total -// number of resources that AWS Config is recording in this region for your -// AWS account. -// -// Example -// -// AWS Config is recording three resource types in the US East (Ohio) Region -// for your account: 25 EC2 instances, 20 IAM users, and 15 S3 buckets. -// -// You make a call to the GetDiscoveredResourceCounts action and specify that -// you want all resource types. -// -// AWS Config returns the following: -// -// The resource types (EC2 instances, IAM users, and S3 buckets). -// -// The number of each resource type (25, 20, and 15). -// -// The total number of all resources (60). -// -// The response is paginated. By default, AWS Config lists 100 ResourceCount -// objects on each page. You can customize this number with the limit parameter. -// The response includes a nextToken string. To get the next page of results, -// run the request again and specify the string for the nextToken parameter. -// -// If you make a call to the GetDiscoveredResourceCounts action, you might not -// immediately receive resource counts in the following situations: -// -// You are a new AWS Config customer. -// -// You just enabled resource recording. +// GetComplianceDetailsByResource API operation for AWS Config. // -// It might take a few minutes for AWS Config to record and count your resources. -// Wait a few minutes and then retry the GetDiscoveredResourceCounts action. +// Returns the evaluation results for the specified AWS resource. The results +// indicate which AWS Config rules were used to evaluate the resource, when +// each rule was last used, and whether the resource complies with each rule. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetDiscoveredResourceCounts for usage and error information. +// API operation GetComplianceDetailsByResource for usage and error information. // // Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. -// -// * ErrCodeInvalidLimitException "InvalidLimitException" -// The specified limit is outside the allowable range. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// The specified next token is invalid. Specify the nextToken string that was -// returned in the previous response to get the next page of results. +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetDiscoveredResourceCounts -func (c *ConfigService) GetDiscoveredResourceCounts(input *GetDiscoveredResourceCountsInput) (*GetDiscoveredResourceCountsOutput, error) { - req, out := c.GetDiscoveredResourceCountsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceDetailsByResource +func (c *ConfigService) GetComplianceDetailsByResource(input *GetComplianceDetailsByResourceInput) (*GetComplianceDetailsByResourceOutput, error) { + req, out := c.GetComplianceDetailsByResourceRequest(input) return out, req.Send() } -// GetDiscoveredResourceCountsWithContext is the same as GetDiscoveredResourceCounts with the addition of +// GetComplianceDetailsByResourceWithContext is the same as GetComplianceDetailsByResource with the addition of // the ability to pass a context and additional request options. // -// See GetDiscoveredResourceCounts for details on how to use this API operation. +// See GetComplianceDetailsByResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetDiscoveredResourceCountsWithContext(ctx aws.Context, input *GetDiscoveredResourceCountsInput, opts ...request.Option) (*GetDiscoveredResourceCountsOutput, error) { - req, out := c.GetDiscoveredResourceCountsRequest(input) +func (c *ConfigService) GetComplianceDetailsByResourceWithContext(ctx aws.Context, input *GetComplianceDetailsByResourceInput, opts ...request.Option) (*GetComplianceDetailsByResourceOutput, error) { + req, out := c.GetComplianceDetailsByResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetResourceConfigHistory = "GetResourceConfigHistory" +const opGetComplianceSummaryByConfigRule = "GetComplianceSummaryByConfigRule" -// GetResourceConfigHistoryRequest generates a "aws/request.Request" representing the -// client's request for the GetResourceConfigHistory operation. The "output" return +// GetComplianceSummaryByConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetComplianceSummaryByConfigRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetResourceConfigHistory for more information on using the GetResourceConfigHistory +// See GetComplianceSummaryByConfigRule for more information on using the GetComplianceSummaryByConfigRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetResourceConfigHistoryRequest method. -// req, resp := client.GetResourceConfigHistoryRequest(params) +// // Example sending a request using the GetComplianceSummaryByConfigRuleRequest method. +// req, resp := client.GetComplianceSummaryByConfigRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetResourceConfigHistory -func (c *ConfigService) GetResourceConfigHistoryRequest(input *GetResourceConfigHistoryInput) (req *request.Request, output *GetResourceConfigHistoryOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceSummaryByConfigRule +func (c *ConfigService) GetComplianceSummaryByConfigRuleRequest(input *GetComplianceSummaryByConfigRuleInput) (req *request.Request, output *GetComplianceSummaryByConfigRuleOutput) { op := &request.Operation{ - Name: opGetResourceConfigHistory, + Name: opGetComplianceSummaryByConfigRule, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, } if input == nil { - input = &GetResourceConfigHistoryInput{} + input = &GetComplianceSummaryByConfigRuleInput{} } - output = &GetResourceConfigHistoryOutput{} + output = &GetComplianceSummaryByConfigRuleOutput{} req = c.newRequest(op, input, output) return } -// GetResourceConfigHistory API operation for AWS Config. -// -// Returns a list of configuration items for the specified resource. The list -// contains details about each state of the resource during the specified time -// interval. If you specified a retention period to retain your ConfigurationItems -// between a minimum of 30 days and a maximum of 7 years (2557 days), AWS Config -// returns the ConfigurationItems for the specified retention period. -// -// The response is paginated. By default, AWS Config returns a limit of 10 configuration -// items per page. You can customize this number with the limit parameter. The -// response includes a nextToken string. To get the next page of results, run -// the request again and specify the string for the nextToken parameter. +// GetComplianceSummaryByConfigRule API operation for AWS Config. // -// Each call to the API is limited to span a duration of seven days. It is likely -// that the number of records returned is smaller than the specified limit. -// In such cases, you can make another call, using the nextToken. +// Returns the number of AWS Config rules that are compliant and noncompliant, +// up to a maximum of 25 for each. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation GetResourceConfigHistory for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. -// -// * ErrCodeInvalidTimeRangeException "InvalidTimeRangeException" -// The specified time range is not valid. The earlier time is not chronologically -// before the later time. -// -// * ErrCodeInvalidLimitException "InvalidLimitException" -// The specified limit is outside the allowable range. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// The specified next token is invalid. Specify the nextToken string that was -// returned in the previous response to get the next page of results. -// -// * ErrCodeNoAvailableConfigurationRecorderException "NoAvailableConfigurationRecorderException" -// There are no configuration recorders available to provide the role needed -// to describe your resources. Create a configuration recorder. -// -// * ErrCodeResourceNotDiscoveredException "ResourceNotDiscoveredException" -// You have specified a resource that is either unknown or has not been discovered. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetResourceConfigHistory -func (c *ConfigService) GetResourceConfigHistory(input *GetResourceConfigHistoryInput) (*GetResourceConfigHistoryOutput, error) { - req, out := c.GetResourceConfigHistoryRequest(input) +// API operation GetComplianceSummaryByConfigRule for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceSummaryByConfigRule +func (c *ConfigService) GetComplianceSummaryByConfigRule(input *GetComplianceSummaryByConfigRuleInput) (*GetComplianceSummaryByConfigRuleOutput, error) { + req, out := c.GetComplianceSummaryByConfigRuleRequest(input) return out, req.Send() } -// GetResourceConfigHistoryWithContext is the same as GetResourceConfigHistory with the addition of +// GetComplianceSummaryByConfigRuleWithContext is the same as GetComplianceSummaryByConfigRule with the addition of // the ability to pass a context and additional request options. // -// See GetResourceConfigHistory for details on how to use this API operation. +// See GetComplianceSummaryByConfigRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) GetResourceConfigHistoryWithContext(ctx aws.Context, input *GetResourceConfigHistoryInput, opts ...request.Option) (*GetResourceConfigHistoryOutput, error) { - req, out := c.GetResourceConfigHistoryRequest(input) +func (c *ConfigService) GetComplianceSummaryByConfigRuleWithContext(ctx aws.Context, input *GetComplianceSummaryByConfigRuleInput, opts ...request.Option) (*GetComplianceSummaryByConfigRuleOutput, error) { + req, out := c.GetComplianceSummaryByConfigRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetResourceConfigHistoryPages iterates over the pages of a GetResourceConfigHistory operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetResourceConfigHistory method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetResourceConfigHistory operation. -// pageNum := 0 -// err := client.GetResourceConfigHistoryPages(params, -// func(page *GetResourceConfigHistoryOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ConfigService) GetResourceConfigHistoryPages(input *GetResourceConfigHistoryInput, fn func(*GetResourceConfigHistoryOutput, bool) bool) error { - return c.GetResourceConfigHistoryPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetResourceConfigHistoryPagesWithContext same as GetResourceConfigHistoryPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ConfigService) GetResourceConfigHistoryPagesWithContext(ctx aws.Context, input *GetResourceConfigHistoryInput, fn func(*GetResourceConfigHistoryOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetResourceConfigHistoryInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetResourceConfigHistoryRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetResourceConfigHistoryOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListAggregateDiscoveredResources = "ListAggregateDiscoveredResources" +const opGetComplianceSummaryByResourceType = "GetComplianceSummaryByResourceType" -// ListAggregateDiscoveredResourcesRequest generates a "aws/request.Request" representing the -// client's request for the ListAggregateDiscoveredResources operation. The "output" return +// GetComplianceSummaryByResourceTypeRequest generates a "aws/request.Request" representing the +// client's request for the GetComplianceSummaryByResourceType operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListAggregateDiscoveredResources for more information on using the ListAggregateDiscoveredResources +// See GetComplianceSummaryByResourceType for more information on using the GetComplianceSummaryByResourceType // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListAggregateDiscoveredResourcesRequest method. -// req, resp := client.ListAggregateDiscoveredResourcesRequest(params) +// // Example sending a request using the GetComplianceSummaryByResourceTypeRequest method. +// req, resp := client.GetComplianceSummaryByResourceTypeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListAggregateDiscoveredResources -func (c *ConfigService) ListAggregateDiscoveredResourcesRequest(input *ListAggregateDiscoveredResourcesInput) (req *request.Request, output *ListAggregateDiscoveredResourcesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceSummaryByResourceType +func (c *ConfigService) GetComplianceSummaryByResourceTypeRequest(input *GetComplianceSummaryByResourceTypeInput) (req *request.Request, output *GetComplianceSummaryByResourceTypeOutput) { op := &request.Operation{ - Name: opListAggregateDiscoveredResources, + Name: opGetComplianceSummaryByResourceType, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListAggregateDiscoveredResourcesInput{} + input = &GetComplianceSummaryByResourceTypeInput{} } - output = &ListAggregateDiscoveredResourcesOutput{} + output = &GetComplianceSummaryByResourceTypeOutput{} req = c.newRequest(op, input, output) return } -// ListAggregateDiscoveredResources API operation for AWS Config. -// -// Accepts a resource type and returns a list of resource identifiers that are -// aggregated for a specific resource type across accounts and regions. A resource -// identifier includes the resource type, ID, (if available) the custom resource -// name, source account, and source region. You can narrow the results to include -// only resources that have specific resource IDs, or a resource name, or source -// account ID, or source region. +// GetComplianceSummaryByResourceType API operation for AWS Config. // -// For example, if the input consists of accountID 12345678910 and the region -// is us-east-1 for resource type AWS::EC2::Instance then the API returns all -// the EC2 instance identifiers of accountID 12345678910 and region us-east-1. +// Returns the number of resources that are compliant and the number that are +// noncompliant. You can specify one or more resource types to get these numbers +// for each resource type. The maximum number returned is 100. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation ListAggregateDiscoveredResources for usage and error information. +// API operation GetComplianceSummaryByResourceType for usage and error information. // // Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. -// -// * ErrCodeInvalidLimitException "InvalidLimitException" -// The specified limit is outside the allowable range. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// The specified next token is invalid. Specify the nextToken string that was -// returned in the previous response to get the next page of results. -// -// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" -// You have specified a configuration aggregator that does not exist. +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListAggregateDiscoveredResources -func (c *ConfigService) ListAggregateDiscoveredResources(input *ListAggregateDiscoveredResourcesInput) (*ListAggregateDiscoveredResourcesOutput, error) { - req, out := c.ListAggregateDiscoveredResourcesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetComplianceSummaryByResourceType +func (c *ConfigService) GetComplianceSummaryByResourceType(input *GetComplianceSummaryByResourceTypeInput) (*GetComplianceSummaryByResourceTypeOutput, error) { + req, out := c.GetComplianceSummaryByResourceTypeRequest(input) return out, req.Send() } -// ListAggregateDiscoveredResourcesWithContext is the same as ListAggregateDiscoveredResources with the addition of +// GetComplianceSummaryByResourceTypeWithContext is the same as GetComplianceSummaryByResourceType with the addition of // the ability to pass a context and additional request options. // -// See ListAggregateDiscoveredResources for details on how to use this API operation. +// See GetComplianceSummaryByResourceType for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) ListAggregateDiscoveredResourcesWithContext(ctx aws.Context, input *ListAggregateDiscoveredResourcesInput, opts ...request.Option) (*ListAggregateDiscoveredResourcesOutput, error) { - req, out := c.ListAggregateDiscoveredResourcesRequest(input) +func (c *ConfigService) GetComplianceSummaryByResourceTypeWithContext(ctx aws.Context, input *GetComplianceSummaryByResourceTypeInput, opts ...request.Option) (*GetComplianceSummaryByResourceTypeOutput, error) { + req, out := c.GetComplianceSummaryByResourceTypeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListDiscoveredResources = "ListDiscoveredResources" +const opGetDiscoveredResourceCounts = "GetDiscoveredResourceCounts" -// ListDiscoveredResourcesRequest generates a "aws/request.Request" representing the -// client's request for the ListDiscoveredResources operation. The "output" return +// GetDiscoveredResourceCountsRequest generates a "aws/request.Request" representing the +// client's request for the GetDiscoveredResourceCounts operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListDiscoveredResources for more information on using the ListDiscoveredResources +// See GetDiscoveredResourceCounts for more information on using the GetDiscoveredResourceCounts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListDiscoveredResourcesRequest method. -// req, resp := client.ListDiscoveredResourcesRequest(params) +// // Example sending a request using the GetDiscoveredResourceCountsRequest method. +// req, resp := client.GetDiscoveredResourceCountsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListDiscoveredResources -func (c *ConfigService) ListDiscoveredResourcesRequest(input *ListDiscoveredResourcesInput) (req *request.Request, output *ListDiscoveredResourcesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetDiscoveredResourceCounts +func (c *ConfigService) GetDiscoveredResourceCountsRequest(input *GetDiscoveredResourceCountsInput) (req *request.Request, output *GetDiscoveredResourceCountsOutput) { op := &request.Operation{ - Name: opListDiscoveredResources, + Name: opGetDiscoveredResourceCounts, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListDiscoveredResourcesInput{} + input = &GetDiscoveredResourceCountsInput{} } - output = &ListDiscoveredResourcesOutput{} + output = &GetDiscoveredResourceCountsOutput{} req = c.newRequest(op, input, output) return } -// ListDiscoveredResources API operation for AWS Config. +// GetDiscoveredResourceCounts API operation for AWS Config. // -// Accepts a resource type and returns a list of resource identifiers for the -// resources of that type. A resource identifier includes the resource type, -// ID, and (if available) the custom resource name. The results consist of resources -// that AWS Config has discovered, including those that AWS Config is not currently -// recording. You can narrow the results to include only resources that have -// specific resource IDs or a resource name. +// Returns the resource types, the number of each resource type, and the total +// number of resources that AWS Config is recording in this region for your +// AWS account. // -// You can specify either resource IDs or a resource name, but not both, in -// the same request. +// Example // -// The response is paginated. By default, AWS Config lists 100 resource identifiers -// on each page. You can customize this number with the limit parameter. The -// response includes a nextToken string. To get the next page of results, run -// the request again and specify the string for the nextToken parameter. +// AWS Config is recording three resource types in the US East (Ohio) Region +// for your account: 25 EC2 instances, 20 IAM users, and 15 S3 buckets. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// You make a call to the GetDiscoveredResourceCounts action and specify that +// you want all resource types. // -// See the AWS API reference guide for AWS Config's -// API operation ListDiscoveredResources for usage and error information. +// AWS Config returns the following: // -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. +// * The resource types (EC2 instances, IAM users, and S3 buckets). +// +// * The number of each resource type (25, 20, and 15). +// +// * The total number of all resources (60). +// +// The response is paginated. By default, AWS Config lists 100 ResourceCount +// objects on each page. You can customize this number with the limit parameter. +// The response includes a nextToken string. To get the next page of results, +// run the request again and specify the string for the nextToken parameter. +// +// If you make a call to the GetDiscoveredResourceCounts action, you might not +// immediately receive resource counts in the following situations: +// +// * You are a new AWS Config customer. +// +// * You just enabled resource recording. +// +// It might take a few minutes for AWS Config to record and count your resources. +// Wait a few minutes and then retry the GetDiscoveredResourceCounts action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Config's +// API operation GetDiscoveredResourceCounts for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. // // * ErrCodeInvalidLimitException "InvalidLimitException" // The specified limit is outside the allowable range. @@ -3701,91 +3928,87 @@ func (c *ConfigService) ListDiscoveredResourcesRequest(input *ListDiscoveredReso // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. // -// * ErrCodeNoAvailableConfigurationRecorderException "NoAvailableConfigurationRecorderException" -// There are no configuration recorders available to provide the role needed -// to describe your resources. Create a configuration recorder. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListDiscoveredResources -func (c *ConfigService) ListDiscoveredResources(input *ListDiscoveredResourcesInput) (*ListDiscoveredResourcesOutput, error) { - req, out := c.ListDiscoveredResourcesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetDiscoveredResourceCounts +func (c *ConfigService) GetDiscoveredResourceCounts(input *GetDiscoveredResourceCountsInput) (*GetDiscoveredResourceCountsOutput, error) { + req, out := c.GetDiscoveredResourceCountsRequest(input) return out, req.Send() } -// ListDiscoveredResourcesWithContext is the same as ListDiscoveredResources with the addition of +// GetDiscoveredResourceCountsWithContext is the same as GetDiscoveredResourceCounts with the addition of // the ability to pass a context and additional request options. // -// See ListDiscoveredResources for details on how to use this API operation. +// See GetDiscoveredResourceCounts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) ListDiscoveredResourcesWithContext(ctx aws.Context, input *ListDiscoveredResourcesInput, opts ...request.Option) (*ListDiscoveredResourcesOutput, error) { - req, out := c.ListDiscoveredResourcesRequest(input) +func (c *ConfigService) GetDiscoveredResourceCountsWithContext(ctx aws.Context, input *GetDiscoveredResourceCountsInput, opts ...request.Option) (*GetDiscoveredResourceCountsOutput, error) { + req, out := c.GetDiscoveredResourceCountsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTagsForResource = "ListTagsForResource" +const opGetOrganizationConfigRuleDetailedStatus = "GetOrganizationConfigRuleDetailedStatus" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// GetOrganizationConfigRuleDetailedStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetOrganizationConfigRuleDetailedStatus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See GetOrganizationConfigRuleDetailedStatus for more information on using the GetOrganizationConfigRuleDetailedStatus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the GetOrganizationConfigRuleDetailedStatusRequest method. +// req, resp := client.GetOrganizationConfigRuleDetailedStatusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListTagsForResource -func (c *ConfigService) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetOrganizationConfigRuleDetailedStatus +func (c *ConfigService) GetOrganizationConfigRuleDetailedStatusRequest(input *GetOrganizationConfigRuleDetailedStatusInput) (req *request.Request, output *GetOrganizationConfigRuleDetailedStatusOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opGetOrganizationConfigRuleDetailedStatus, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListTagsForResourceInput{} + input = &GetOrganizationConfigRuleDetailedStatusInput{} } - output = &ListTagsForResourceOutput{} + output = &GetOrganizationConfigRuleDetailedStatusOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for AWS Config. +// GetOrganizationConfigRuleDetailedStatus API operation for AWS Config. // -// List the tags for AWS Config resource. +// Returns detailed status for each member account within an organization for +// a given organization config rule. +// +// Only a master account can call this API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation ListTagsForResource for usage and error information. +// API operation GetOrganizationConfigRuleDetailedStatus for usage and error information. // // Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// You have specified a resource that does not exist. -// -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. +// * ErrCodeNoSuchOrganizationConfigRuleException "NoSuchOrganizationConfigRuleException" +// You specified one or more organization config rules that do not exist. // // * ErrCodeInvalidLimitException "InvalidLimitException" // The specified limit is outside the allowable range. @@ -3794,1791 +4017,3656 @@ func (c *ConfigService) ListTagsForResourceRequest(input *ListTagsForResourceInp // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListTagsForResource -func (c *ConfigService) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// * ErrCodeOrganizationAccessDeniedException "OrganizationAccessDeniedException" +// For PutConfigAggregator API, no permission to call EnableAWSServiceAccess +// API. +// +// For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs +// are called from member accounts. All APIs must be called from organization +// master account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetOrganizationConfigRuleDetailedStatus +func (c *ConfigService) GetOrganizationConfigRuleDetailedStatus(input *GetOrganizationConfigRuleDetailedStatusInput) (*GetOrganizationConfigRuleDetailedStatusOutput, error) { + req, out := c.GetOrganizationConfigRuleDetailedStatusRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// GetOrganizationConfigRuleDetailedStatusWithContext is the same as GetOrganizationConfigRuleDetailedStatus with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See GetOrganizationConfigRuleDetailedStatus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *ConfigService) GetOrganizationConfigRuleDetailedStatusWithContext(ctx aws.Context, input *GetOrganizationConfigRuleDetailedStatusInput, opts ...request.Option) (*GetOrganizationConfigRuleDetailedStatusOutput, error) { + req, out := c.GetOrganizationConfigRuleDetailedStatusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutAggregationAuthorization = "PutAggregationAuthorization" +const opGetResourceConfigHistory = "GetResourceConfigHistory" -// PutAggregationAuthorizationRequest generates a "aws/request.Request" representing the -// client's request for the PutAggregationAuthorization operation. The "output" return +// GetResourceConfigHistoryRequest generates a "aws/request.Request" representing the +// client's request for the GetResourceConfigHistory operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutAggregationAuthorization for more information on using the PutAggregationAuthorization +// See GetResourceConfigHistory for more information on using the GetResourceConfigHistory // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutAggregationAuthorizationRequest method. -// req, resp := client.PutAggregationAuthorizationRequest(params) +// // Example sending a request using the GetResourceConfigHistoryRequest method. +// req, resp := client.GetResourceConfigHistoryRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutAggregationAuthorization -func (c *ConfigService) PutAggregationAuthorizationRequest(input *PutAggregationAuthorizationInput) (req *request.Request, output *PutAggregationAuthorizationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetResourceConfigHistory +func (c *ConfigService) GetResourceConfigHistoryRequest(input *GetResourceConfigHistoryInput) (req *request.Request, output *GetResourceConfigHistoryOutput) { op := &request.Operation{ - Name: opPutAggregationAuthorization, + Name: opGetResourceConfigHistory, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, } if input == nil { - input = &PutAggregationAuthorizationInput{} + input = &GetResourceConfigHistoryInput{} } - output = &PutAggregationAuthorizationOutput{} + output = &GetResourceConfigHistoryOutput{} req = c.newRequest(op, input, output) return } -// PutAggregationAuthorization API operation for AWS Config. +// GetResourceConfigHistory API operation for AWS Config. // -// Authorizes the aggregator account and region to collect data from the source -// account and region. +// Returns a list of configuration items for the specified resource. The list +// contains details about each state of the resource during the specified time +// interval. If you specified a retention period to retain your ConfigurationItems +// between a minimum of 30 days and a maximum of 7 years (2557 days), AWS Config +// returns the ConfigurationItems for the specified retention period. +// +// The response is paginated. By default, AWS Config returns a limit of 10 configuration +// items per page. You can customize this number with the limit parameter. The +// response includes a nextToken string. To get the next page of results, run +// the request again and specify the string for the nextToken parameter. +// +// Each call to the API is limited to span a duration of seven days. It is likely +// that the number of records returned is smaller than the specified limit. +// In such cases, you can make another call, using the nextToken. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation PutAggregationAuthorization for usage and error information. +// API operation GetResourceConfigHistory for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// One or more of the specified parameters are invalid. Verify that your parameters -// are valid and try again. +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutAggregationAuthorization -func (c *ConfigService) PutAggregationAuthorization(input *PutAggregationAuthorizationInput) (*PutAggregationAuthorizationOutput, error) { - req, out := c.PutAggregationAuthorizationRequest(input) +// * ErrCodeInvalidTimeRangeException "InvalidTimeRangeException" +// The specified time range is not valid. The earlier time is not chronologically +// before the later time. +// +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. +// +// * ErrCodeNoAvailableConfigurationRecorderException "NoAvailableConfigurationRecorderException" +// There are no configuration recorders available to provide the role needed +// to describe your resources. Create a configuration recorder. +// +// * ErrCodeResourceNotDiscoveredException "ResourceNotDiscoveredException" +// You have specified a resource that is either unknown or has not been discovered. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetResourceConfigHistory +func (c *ConfigService) GetResourceConfigHistory(input *GetResourceConfigHistoryInput) (*GetResourceConfigHistoryOutput, error) { + req, out := c.GetResourceConfigHistoryRequest(input) return out, req.Send() } -// PutAggregationAuthorizationWithContext is the same as PutAggregationAuthorization with the addition of +// GetResourceConfigHistoryWithContext is the same as GetResourceConfigHistory with the addition of // the ability to pass a context and additional request options. // -// See PutAggregationAuthorization for details on how to use this API operation. +// See GetResourceConfigHistory for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) PutAggregationAuthorizationWithContext(ctx aws.Context, input *PutAggregationAuthorizationInput, opts ...request.Option) (*PutAggregationAuthorizationOutput, error) { - req, out := c.PutAggregationAuthorizationRequest(input) +func (c *ConfigService) GetResourceConfigHistoryWithContext(ctx aws.Context, input *GetResourceConfigHistoryInput, opts ...request.Option) (*GetResourceConfigHistoryOutput, error) { + req, out := c.GetResourceConfigHistoryRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutConfigRule = "PutConfigRule" +// GetResourceConfigHistoryPages iterates over the pages of a GetResourceConfigHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetResourceConfigHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetResourceConfigHistory operation. +// pageNum := 0 +// err := client.GetResourceConfigHistoryPages(params, +// func(page *configservice.GetResourceConfigHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ConfigService) GetResourceConfigHistoryPages(input *GetResourceConfigHistoryInput, fn func(*GetResourceConfigHistoryOutput, bool) bool) error { + return c.GetResourceConfigHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} -// PutConfigRuleRequest generates a "aws/request.Request" representing the -// client's request for the PutConfigRule operation. The "output" return +// GetResourceConfigHistoryPagesWithContext same as GetResourceConfigHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) GetResourceConfigHistoryPagesWithContext(ctx aws.Context, input *GetResourceConfigHistoryInput, fn func(*GetResourceConfigHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetResourceConfigHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetResourceConfigHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetResourceConfigHistoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAggregateDiscoveredResources = "ListAggregateDiscoveredResources" + +// ListAggregateDiscoveredResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListAggregateDiscoveredResources operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutConfigRule for more information on using the PutConfigRule +// See ListAggregateDiscoveredResources for more information on using the ListAggregateDiscoveredResources // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutConfigRuleRequest method. -// req, resp := client.PutConfigRuleRequest(params) +// // Example sending a request using the ListAggregateDiscoveredResourcesRequest method. +// req, resp := client.ListAggregateDiscoveredResourcesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigRule -func (c *ConfigService) PutConfigRuleRequest(input *PutConfigRuleInput) (req *request.Request, output *PutConfigRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListAggregateDiscoveredResources +func (c *ConfigService) ListAggregateDiscoveredResourcesRequest(input *ListAggregateDiscoveredResourcesInput) (req *request.Request, output *ListAggregateDiscoveredResourcesOutput) { op := &request.Operation{ - Name: opPutConfigRule, + Name: opListAggregateDiscoveredResources, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutConfigRuleInput{} + input = &ListAggregateDiscoveredResourcesInput{} } - output = &PutConfigRuleOutput{} + output = &ListAggregateDiscoveredResourcesOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutConfigRule API operation for AWS Config. +// ListAggregateDiscoveredResources API operation for AWS Config. // -// Adds or updates an AWS Config rule for evaluating whether your AWS resources -// comply with your desired configurations. +// Accepts a resource type and returns a list of resource identifiers that are +// aggregated for a specific resource type across accounts and regions. A resource +// identifier includes the resource type, ID, (if available) the custom resource +// name, source account, and source region. You can narrow the results to include +// only resources that have specific resource IDs, or a resource name, or source +// account ID, or source region. // -// You can use this action for custom AWS Config rules and AWS managed Config -// rules. A custom AWS Config rule is a rule that you develop and maintain. -// An AWS managed Config rule is a customizable, predefined rule that AWS Config -// provides. +// For example, if the input consists of accountID 12345678910 and the region +// is us-east-1 for resource type AWS::EC2::Instance then the API returns all +// the EC2 instance identifiers of accountID 12345678910 and region us-east-1. // -// If you are adding a new custom AWS Config rule, you must first create the -// AWS Lambda function that the rule invokes to evaluate your resources. When -// you use the PutConfigRule action to add the rule to AWS Config, you must -// specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. -// Specify the ARN for the SourceIdentifier key. This key is part of the Source -// object, which is part of the ConfigRule object. -// -// If you are adding an AWS managed Config rule, specify the rule's identifier -// for the SourceIdentifier key. To reference AWS managed Config rule identifiers, -// see About AWS Managed Config Rules (https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). -// -// For any new rule that you add, specify the ConfigRuleName in the ConfigRule -// object. Do not specify the ConfigRuleArn or the ConfigRuleId. These values -// are generated by AWS Config for new rules. -// -// If you are updating a rule that you added previously, you can specify the -// rule by ConfigRuleName, ConfigRuleId, or ConfigRuleArn in the ConfigRule -// data type that you use in this request. -// -// The maximum number of rules that AWS Config supports is 150. -// -// For information about requesting a rule limit increase, see AWS Config Limits -// (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_config) -// in the AWS General Reference Guide. -// -// For more information about developing and using AWS Config rules, see Evaluating -// AWS Resource Configurations with AWS Config (https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) -// in the AWS Config Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // // See the AWS API reference guide for AWS Config's -// API operation PutConfigRule for usage and error information. +// API operation ListAggregateDiscoveredResources for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// One or more of the specified parameters are invalid. Verify that your parameters -// are valid and try again. -// -// * ErrCodeMaxNumberOfConfigRulesExceededException "MaxNumberOfConfigRulesExceededException" -// Failed to add the AWS Config rule because the account already contains the -// maximum number of 150 rules. Consider deleting any deactivated rules before -// you add new rules. -// -// * ErrCodeResourceInUseException "ResourceInUseException" -// The rule is currently being deleted or the rule is deleting your evaluation -// results. Try your request again later. -// -// * ErrCodeInsufficientPermissionsException "InsufficientPermissionsException" -// Indicates one of the following errors: +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. // -// * The rule cannot be created because the IAM role assigned to AWS Config -// lacks permissions to perform the config:Put* action. +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. // -// * The AWS Lambda function cannot be invoked. Check the function ARN, and -// check the function's permissions. +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. // -// * ErrCodeNoAvailableConfigurationRecorderException "NoAvailableConfigurationRecorderException" -// There are no configuration recorders available to provide the role needed -// to describe your resources. Create a configuration recorder. +// * ErrCodeNoSuchConfigurationAggregatorException "NoSuchConfigurationAggregatorException" +// You have specified a configuration aggregator that does not exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigRule -func (c *ConfigService) PutConfigRule(input *PutConfigRuleInput) (*PutConfigRuleOutput, error) { - req, out := c.PutConfigRuleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListAggregateDiscoveredResources +func (c *ConfigService) ListAggregateDiscoveredResources(input *ListAggregateDiscoveredResourcesInput) (*ListAggregateDiscoveredResourcesOutput, error) { + req, out := c.ListAggregateDiscoveredResourcesRequest(input) return out, req.Send() } -// PutConfigRuleWithContext is the same as PutConfigRule with the addition of +// ListAggregateDiscoveredResourcesWithContext is the same as ListAggregateDiscoveredResources with the addition of // the ability to pass a context and additional request options. // -// See PutConfigRule for details on how to use this API operation. +// See ListAggregateDiscoveredResources for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) PutConfigRuleWithContext(ctx aws.Context, input *PutConfigRuleInput, opts ...request.Option) (*PutConfigRuleOutput, error) { - req, out := c.PutConfigRuleRequest(input) +func (c *ConfigService) ListAggregateDiscoveredResourcesWithContext(ctx aws.Context, input *ListAggregateDiscoveredResourcesInput, opts ...request.Option) (*ListAggregateDiscoveredResourcesOutput, error) { + req, out := c.ListAggregateDiscoveredResourcesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutConfigurationAggregator = "PutConfigurationAggregator" +const opListDiscoveredResources = "ListDiscoveredResources" -// PutConfigurationAggregatorRequest generates a "aws/request.Request" representing the -// client's request for the PutConfigurationAggregator operation. The "output" return +// ListDiscoveredResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListDiscoveredResources operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutConfigurationAggregator for more information on using the PutConfigurationAggregator +// See ListDiscoveredResources for more information on using the ListDiscoveredResources // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutConfigurationAggregatorRequest method. -// req, resp := client.PutConfigurationAggregatorRequest(params) +// // Example sending a request using the ListDiscoveredResourcesRequest method. +// req, resp := client.ListDiscoveredResourcesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigurationAggregator -func (c *ConfigService) PutConfigurationAggregatorRequest(input *PutConfigurationAggregatorInput) (req *request.Request, output *PutConfigurationAggregatorOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListDiscoveredResources +func (c *ConfigService) ListDiscoveredResourcesRequest(input *ListDiscoveredResourcesInput) (req *request.Request, output *ListDiscoveredResourcesOutput) { op := &request.Operation{ - Name: opPutConfigurationAggregator, + Name: opListDiscoveredResources, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutConfigurationAggregatorInput{} + input = &ListDiscoveredResourcesInput{} } - output = &PutConfigurationAggregatorOutput{} + output = &ListDiscoveredResourcesOutput{} req = c.newRequest(op, input, output) return } -// PutConfigurationAggregator API operation for AWS Config. +// ListDiscoveredResources API operation for AWS Config. // -// Creates and updates the configuration aggregator with the selected source -// accounts and regions. The source account can be individual account(s) or -// an organization. +// Accepts a resource type and returns a list of resource identifiers for the +// resources of that type. A resource identifier includes the resource type, +// ID, and (if available) the custom resource name. The results consist of resources +// that AWS Config has discovered, including those that AWS Config is not currently +// recording. You can narrow the results to include only resources that have +// specific resource IDs or a resource name. // -// AWS Config should be enabled in source accounts and regions you want to aggregate. +// You can specify either resource IDs or a resource name, but not both, in +// the same request. // -// If your source type is an organization, you must be signed in to the master -// account and all features must be enabled in your organization. AWS Config -// calls EnableAwsServiceAccess API to enable integration between AWS Config -// and AWS Organizations. +// The response is paginated. By default, AWS Config lists 100 resource identifiers +// on each page. You can customize this number with the limit parameter. The +// response includes a nextToken string. To get the next page of results, run +// the request again and specify the string for the nextToken parameter. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation PutConfigurationAggregator for usage and error information. +// API operation ListDiscoveredResources for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// One or more of the specified parameters are invalid. Verify that your parameters -// are valid and try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// For StartConfigRulesEvaluation API, this exception is thrown if an evaluation -// is in progress or if you call the StartConfigRulesEvaluation API more than -// once per minute. -// -// For PutConfigurationAggregator API, this exception is thrown if the number -// of accounts and aggregators exceeds the limit. -// -// * ErrCodeInvalidRoleException "InvalidRoleException" -// You have provided a null or empty role ARN. +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. // -// * ErrCodeOrganizationAccessDeniedException "OrganizationAccessDeniedException" -// No permission to call the EnableAWSServiceAccess API. +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. // -// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" -// Organization does is no longer available. +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. // -// * ErrCodeOrganizationAllFeaturesNotEnabledException "OrganizationAllFeaturesNotEnabledException" -// The configuration aggregator cannot be created because organization does -// not have all features enabled. +// * ErrCodeNoAvailableConfigurationRecorderException "NoAvailableConfigurationRecorderException" +// There are no configuration recorders available to provide the role needed +// to describe your resources. Create a configuration recorder. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigurationAggregator -func (c *ConfigService) PutConfigurationAggregator(input *PutConfigurationAggregatorInput) (*PutConfigurationAggregatorOutput, error) { - req, out := c.PutConfigurationAggregatorRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListDiscoveredResources +func (c *ConfigService) ListDiscoveredResources(input *ListDiscoveredResourcesInput) (*ListDiscoveredResourcesOutput, error) { + req, out := c.ListDiscoveredResourcesRequest(input) return out, req.Send() } -// PutConfigurationAggregatorWithContext is the same as PutConfigurationAggregator with the addition of +// ListDiscoveredResourcesWithContext is the same as ListDiscoveredResources with the addition of // the ability to pass a context and additional request options. // -// See PutConfigurationAggregator for details on how to use this API operation. +// See ListDiscoveredResources for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) PutConfigurationAggregatorWithContext(ctx aws.Context, input *PutConfigurationAggregatorInput, opts ...request.Option) (*PutConfigurationAggregatorOutput, error) { - req, out := c.PutConfigurationAggregatorRequest(input) +func (c *ConfigService) ListDiscoveredResourcesWithContext(ctx aws.Context, input *ListDiscoveredResourcesInput, opts ...request.Option) (*ListDiscoveredResourcesOutput, error) { + req, out := c.ListDiscoveredResourcesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutConfigurationRecorder = "PutConfigurationRecorder" +const opListTagsForResource = "ListTagsForResource" -// PutConfigurationRecorderRequest generates a "aws/request.Request" representing the -// client's request for the PutConfigurationRecorder operation. The "output" return +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutConfigurationRecorder for more information on using the PutConfigurationRecorder +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutConfigurationRecorderRequest method. -// req, resp := client.PutConfigurationRecorderRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigurationRecorder -func (c *ConfigService) PutConfigurationRecorderRequest(input *PutConfigurationRecorderInput) (req *request.Request, output *PutConfigurationRecorderOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListTagsForResource +func (c *ConfigService) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ - Name: opPutConfigurationRecorder, + Name: opListTagsForResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutConfigurationRecorderInput{} + input = &ListTagsForResourceInput{} } - output = &PutConfigurationRecorderOutput{} + output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutConfigurationRecorder API operation for AWS Config. -// -// Creates a new configuration recorder to record the selected resource configurations. -// -// You can use this action to change the role roleARN or the recordingGroup -// of an existing recorder. To change the role, call the action on the existing -// configuration recorder and specify a role. -// -// Currently, you can specify only one configuration recorder per region in -// your account. +// ListTagsForResource API operation for AWS Config. // -// If ConfigurationRecorder does not have the recordingGroup parameter specified, -// the default is to record all supported resource types. +// List the tags for AWS Config resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation PutConfigurationRecorder for usage and error information. +// API operation ListTagsForResource for usage and error information. // // Returned Error Codes: -// * ErrCodeMaxNumberOfConfigurationRecordersExceededException "MaxNumberOfConfigurationRecordersExceededException" -// You have reached the limit of the number of recorders you can create. +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// You have specified a resource that does not exist. // -// * ErrCodeInvalidConfigurationRecorderNameException "InvalidConfigurationRecorderNameException" -// You have provided a configuration recorder name that is not valid. +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. // -// * ErrCodeInvalidRoleException "InvalidRoleException" -// You have provided a null or empty role ARN. +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. // -// * ErrCodeInvalidRecordingGroupException "InvalidRecordingGroupException" -// AWS Config throws an exception if the recording group does not contain a -// valid list of resource types. Invalid values might also be incorrectly formatted. +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigurationRecorder -func (c *ConfigService) PutConfigurationRecorder(input *PutConfigurationRecorderInput) (*PutConfigurationRecorderOutput, error) { - req, out := c.PutConfigurationRecorderRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/ListTagsForResource +func (c *ConfigService) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } -// PutConfigurationRecorderWithContext is the same as PutConfigurationRecorder with the addition of +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of // the ability to pass a context and additional request options. // -// See PutConfigurationRecorder for details on how to use this API operation. +// See ListTagsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) PutConfigurationRecorderWithContext(ctx aws.Context, input *PutConfigurationRecorderInput, opts ...request.Option) (*PutConfigurationRecorderOutput, error) { - req, out := c.PutConfigurationRecorderRequest(input) +func (c *ConfigService) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutDeliveryChannel = "PutDeliveryChannel" +const opPutAggregationAuthorization = "PutAggregationAuthorization" -// PutDeliveryChannelRequest generates a "aws/request.Request" representing the -// client's request for the PutDeliveryChannel operation. The "output" return +// PutAggregationAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the PutAggregationAuthorization operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutDeliveryChannel for more information on using the PutDeliveryChannel +// See PutAggregationAuthorization for more information on using the PutAggregationAuthorization // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutDeliveryChannelRequest method. -// req, resp := client.PutDeliveryChannelRequest(params) +// // Example sending a request using the PutAggregationAuthorizationRequest method. +// req, resp := client.PutAggregationAuthorizationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutDeliveryChannel -func (c *ConfigService) PutDeliveryChannelRequest(input *PutDeliveryChannelInput) (req *request.Request, output *PutDeliveryChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutAggregationAuthorization +func (c *ConfigService) PutAggregationAuthorizationRequest(input *PutAggregationAuthorizationInput) (req *request.Request, output *PutAggregationAuthorizationOutput) { op := &request.Operation{ - Name: opPutDeliveryChannel, + Name: opPutAggregationAuthorization, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutDeliveryChannelInput{} + input = &PutAggregationAuthorizationInput{} } - output = &PutDeliveryChannelOutput{} + output = &PutAggregationAuthorizationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutDeliveryChannel API operation for AWS Config. +// PutAggregationAuthorization API operation for AWS Config. // -// Creates a delivery channel object to deliver configuration information to -// an Amazon S3 bucket and Amazon SNS topic. -// -// Before you can create a delivery channel, you must create a configuration -// recorder. -// -// You can use this action to change the Amazon S3 bucket or an Amazon SNS topic -// of the existing delivery channel. To change the Amazon S3 bucket or an Amazon -// SNS topic, call this action and specify the changed values for the S3 bucket -// and the SNS topic. If you specify a different value for either the S3 bucket -// or the SNS topic, this action will keep the existing value for the parameter -// that is not changed. -// -// You can have only one delivery channel per region in your account. +// Authorizes the aggregator account and region to collect data from the source +// account and region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation PutDeliveryChannel for usage and error information. +// API operation PutAggregationAuthorization for usage and error information. // // Returned Error Codes: -// * ErrCodeMaxNumberOfDeliveryChannelsExceededException "MaxNumberOfDeliveryChannelsExceededException" -// You have reached the limit of the number of delivery channels you can create. -// -// * ErrCodeNoAvailableConfigurationRecorderException "NoAvailableConfigurationRecorderException" -// There are no configuration recorders available to provide the role needed -// to describe your resources. Create a configuration recorder. -// -// * ErrCodeInvalidDeliveryChannelNameException "InvalidDeliveryChannelNameException" -// The specified delivery channel name is not valid. -// -// * ErrCodeNoSuchBucketException "NoSuchBucketException" -// The specified Amazon S3 bucket does not exist. -// -// * ErrCodeInvalidS3KeyPrefixException "InvalidS3KeyPrefixException" -// The specified Amazon S3 key prefix is not valid. -// -// * ErrCodeInvalidSNSTopicARNException "InvalidSNSTopicARNException" -// The specified Amazon SNS topic does not exist. -// -// * ErrCodeInsufficientDeliveryPolicyException "InsufficientDeliveryPolicyException" -// Your Amazon S3 bucket policy does not permit AWS Config to write to it. +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutDeliveryChannel -func (c *ConfigService) PutDeliveryChannel(input *PutDeliveryChannelInput) (*PutDeliveryChannelOutput, error) { - req, out := c.PutDeliveryChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutAggregationAuthorization +func (c *ConfigService) PutAggregationAuthorization(input *PutAggregationAuthorizationInput) (*PutAggregationAuthorizationOutput, error) { + req, out := c.PutAggregationAuthorizationRequest(input) return out, req.Send() } -// PutDeliveryChannelWithContext is the same as PutDeliveryChannel with the addition of +// PutAggregationAuthorizationWithContext is the same as PutAggregationAuthorization with the addition of // the ability to pass a context and additional request options. // -// See PutDeliveryChannel for details on how to use this API operation. +// See PutAggregationAuthorization for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) PutDeliveryChannelWithContext(ctx aws.Context, input *PutDeliveryChannelInput, opts ...request.Option) (*PutDeliveryChannelOutput, error) { - req, out := c.PutDeliveryChannelRequest(input) +func (c *ConfigService) PutAggregationAuthorizationWithContext(ctx aws.Context, input *PutAggregationAuthorizationInput, opts ...request.Option) (*PutAggregationAuthorizationOutput, error) { + req, out := c.PutAggregationAuthorizationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutEvaluations = "PutEvaluations" +const opPutConfigRule = "PutConfigRule" -// PutEvaluationsRequest generates a "aws/request.Request" representing the -// client's request for the PutEvaluations operation. The "output" return +// PutConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the PutConfigRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutEvaluations for more information on using the PutEvaluations +// See PutConfigRule for more information on using the PutConfigRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutEvaluationsRequest method. -// req, resp := client.PutEvaluationsRequest(params) +// // Example sending a request using the PutConfigRuleRequest method. +// req, resp := client.PutConfigRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutEvaluations -func (c *ConfigService) PutEvaluationsRequest(input *PutEvaluationsInput) (req *request.Request, output *PutEvaluationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigRule +func (c *ConfigService) PutConfigRuleRequest(input *PutConfigRuleInput) (req *request.Request, output *PutConfigRuleOutput) { op := &request.Operation{ - Name: opPutEvaluations, + Name: opPutConfigRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutEvaluationsInput{} + input = &PutConfigRuleInput{} } - output = &PutEvaluationsOutput{} + output = &PutConfigRuleOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutEvaluations API operation for AWS Config. +// PutConfigRule API operation for AWS Config. // -// Used by an AWS Lambda function to deliver evaluation results to AWS Config. -// This action is required in every AWS Lambda function that is invoked by an -// AWS Config rule. +// Adds or updates an AWS Config rule for evaluating whether your AWS resources +// comply with your desired configurations. +// +// You can use this action for custom AWS Config rules and AWS managed Config +// rules. A custom AWS Config rule is a rule that you develop and maintain. +// An AWS managed Config rule is a customizable, predefined rule that AWS Config +// provides. +// +// If you are adding a new custom AWS Config rule, you must first create the +// AWS Lambda function that the rule invokes to evaluate your resources. When +// you use the PutConfigRule action to add the rule to AWS Config, you must +// specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. +// Specify the ARN for the SourceIdentifier key. This key is part of the Source +// object, which is part of the ConfigRule object. +// +// If you are adding an AWS managed Config rule, specify the rule's identifier +// for the SourceIdentifier key. To reference AWS managed Config rule identifiers, +// see About AWS Managed Config Rules (https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). +// +// For any new rule that you add, specify the ConfigRuleName in the ConfigRule +// object. Do not specify the ConfigRuleArn or the ConfigRuleId. These values +// are generated by AWS Config for new rules. +// +// If you are updating a rule that you added previously, you can specify the +// rule by ConfigRuleName, ConfigRuleId, or ConfigRuleArn in the ConfigRule +// data type that you use in this request. +// +// The maximum number of rules that AWS Config supports is 150. +// +// For information about requesting a rule limit increase, see AWS Config Limits +// (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_config) +// in the AWS General Reference Guide. +// +// For more information about developing and using AWS Config rules, see Evaluating +// AWS Resource Configurations with AWS Config (https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) +// in the AWS Config Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation PutEvaluations for usage and error information. +// API operation PutConfigRule for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // One or more of the specified parameters are invalid. Verify that your parameters // are valid and try again. // -// * ErrCodeInvalidResultTokenException "InvalidResultTokenException" -// The specified ResultToken is invalid. +// * ErrCodeMaxNumberOfConfigRulesExceededException "MaxNumberOfConfigRulesExceededException" +// Failed to add the AWS Config rule because the account already contains the +// maximum number of 150 rules. Consider deleting any deactivated rules before +// you add new rules. // -// * ErrCodeNoSuchConfigRuleException "NoSuchConfigRuleException" -// One or more AWS Config rules in the request are invalid. Verify that the -// rule names are correct and try again. +// * ErrCodeResourceInUseException "ResourceInUseException" +// You see this exception in the following cases: // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutEvaluations -func (c *ConfigService) PutEvaluations(input *PutEvaluationsInput) (*PutEvaluationsOutput, error) { - req, out := c.PutEvaluationsRequest(input) +// * For DeleteConfigRule API, AWS Config is deleting this rule. Try your +// request again later. +// +// * For DeleteConfigRule API, the rule is deleting your evaluation results. +// Try your request again later. +// +// * For DeleteConfigRule API, a remediation action is associated with the +// rule and AWS Config cannot delete this rule. Delete the remediation action +// associated with the rule before deleting the rule and try your request +// again later. +// +// * For PutConfigOrganizationRule, organization config rule deletion is +// in progress. Try your request again later. +// +// * For DeleteOrganizationConfigRule, organization config rule creation +// is in progress. Try your request again later. +// +// * ErrCodeInsufficientPermissionsException "InsufficientPermissionsException" +// Indicates one of the following errors: +// +// * For PutConfigRule, the rule cannot be created because the IAM role assigned +// to AWS Config lacks permissions to perform the config:Put* action. +// +// * For PutConfigRule, the AWS Lambda function cannot be invoked. Check +// the function ARN, and check the function's permissions. +// +// * For OrganizationConfigRule, organization config rule cannot be created +// because you do not have permissions to call IAM GetRole action or create +// service linked role. +// +// * ErrCodeNoAvailableConfigurationRecorderException "NoAvailableConfigurationRecorderException" +// There are no configuration recorders available to provide the role needed +// to describe your resources. Create a configuration recorder. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigRule +func (c *ConfigService) PutConfigRule(input *PutConfigRuleInput) (*PutConfigRuleOutput, error) { + req, out := c.PutConfigRuleRequest(input) return out, req.Send() } -// PutEvaluationsWithContext is the same as PutEvaluations with the addition of +// PutConfigRuleWithContext is the same as PutConfigRule with the addition of // the ability to pass a context and additional request options. // -// See PutEvaluations for details on how to use this API operation. +// See PutConfigRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) PutEvaluationsWithContext(ctx aws.Context, input *PutEvaluationsInput, opts ...request.Option) (*PutEvaluationsOutput, error) { - req, out := c.PutEvaluationsRequest(input) +func (c *ConfigService) PutConfigRuleWithContext(ctx aws.Context, input *PutConfigRuleInput, opts ...request.Option) (*PutConfigRuleOutput, error) { + req, out := c.PutConfigRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutRemediationConfigurations = "PutRemediationConfigurations" +const opPutConfigurationAggregator = "PutConfigurationAggregator" -// PutRemediationConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the PutRemediationConfigurations operation. The "output" return +// PutConfigurationAggregatorRequest generates a "aws/request.Request" representing the +// client's request for the PutConfigurationAggregator operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutRemediationConfigurations for more information on using the PutRemediationConfigurations +// See PutConfigurationAggregator for more information on using the PutConfigurationAggregator // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutRemediationConfigurationsRequest method. -// req, resp := client.PutRemediationConfigurationsRequest(params) +// // Example sending a request using the PutConfigurationAggregatorRequest method. +// req, resp := client.PutConfigurationAggregatorRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRemediationConfigurations -func (c *ConfigService) PutRemediationConfigurationsRequest(input *PutRemediationConfigurationsInput) (req *request.Request, output *PutRemediationConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigurationAggregator +func (c *ConfigService) PutConfigurationAggregatorRequest(input *PutConfigurationAggregatorInput) (req *request.Request, output *PutConfigurationAggregatorOutput) { op := &request.Operation{ - Name: opPutRemediationConfigurations, + Name: opPutConfigurationAggregator, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutRemediationConfigurationsInput{} + input = &PutConfigurationAggregatorInput{} } - output = &PutRemediationConfigurationsOutput{} + output = &PutConfigurationAggregatorOutput{} req = c.newRequest(op, input, output) return } -// PutRemediationConfigurations API operation for AWS Config. +// PutConfigurationAggregator API operation for AWS Config. // -// Adds or updates the remediation configuration with a specific AWS Config -// rule with the selected target or action. The API creates the RemediationConfiguration -// object for the AWS Config rule. The AWS Config rule must already exist for -// you to add a remediation configuration. The target (SSM document) must exist -// and have permissions to use the target. +// Creates and updates the configuration aggregator with the selected source +// accounts and regions. The source account can be individual account(s) or +// an organization. +// +// AWS Config should be enabled in source accounts and regions you want to aggregate. +// +// If your source type is an organization, you must be signed in to the master +// account and all features must be enabled in your organization. AWS Config +// calls EnableAwsServiceAccess API to enable integration between AWS Config +// and AWS Organizations. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation PutRemediationConfigurations for usage and error information. +// API operation PutConfigurationAggregator for usage and error information. // // Returned Error Codes: -// * ErrCodeInsufficientPermissionsException "InsufficientPermissionsException" -// Indicates one of the following errors: -// -// * The rule cannot be created because the IAM role assigned to AWS Config -// lacks permissions to perform the config:Put* action. -// -// * The AWS Lambda function cannot be invoked. Check the function ARN, and -// check the function's permissions. -// // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // One or more of the specified parameters are invalid. Verify that your parameters // are valid and try again. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRemediationConfigurations -func (c *ConfigService) PutRemediationConfigurations(input *PutRemediationConfigurationsInput) (*PutRemediationConfigurationsOutput, error) { - req, out := c.PutRemediationConfigurationsRequest(input) +// * ErrCodeLimitExceededException "LimitExceededException" +// For StartConfigRulesEvaluation API, this exception is thrown if an evaluation +// is in progress or if you call the StartConfigRulesEvaluation API more than +// once per minute. +// +// For PutConfigurationAggregator API, this exception is thrown if the number +// of accounts and aggregators exceeds the limit. +// +// * ErrCodeInvalidRoleException "InvalidRoleException" +// You have provided a null or empty role ARN. +// +// * ErrCodeOrganizationAccessDeniedException "OrganizationAccessDeniedException" +// For PutConfigAggregator API, no permission to call EnableAWSServiceAccess +// API. +// +// For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs +// are called from member accounts. All APIs must be called from organization +// master account. +// +// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" +// Organization is no longer available. +// +// * ErrCodeOrganizationAllFeaturesNotEnabledException "OrganizationAllFeaturesNotEnabledException" +// AWS Config resource cannot be created because your organization does not +// have all features enabled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigurationAggregator +func (c *ConfigService) PutConfigurationAggregator(input *PutConfigurationAggregatorInput) (*PutConfigurationAggregatorOutput, error) { + req, out := c.PutConfigurationAggregatorRequest(input) return out, req.Send() } -// PutRemediationConfigurationsWithContext is the same as PutRemediationConfigurations with the addition of +// PutConfigurationAggregatorWithContext is the same as PutConfigurationAggregator with the addition of // the ability to pass a context and additional request options. // -// See PutRemediationConfigurations for details on how to use this API operation. +// See PutConfigurationAggregator for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) PutRemediationConfigurationsWithContext(ctx aws.Context, input *PutRemediationConfigurationsInput, opts ...request.Option) (*PutRemediationConfigurationsOutput, error) { - req, out := c.PutRemediationConfigurationsRequest(input) +func (c *ConfigService) PutConfigurationAggregatorWithContext(ctx aws.Context, input *PutConfigurationAggregatorInput, opts ...request.Option) (*PutConfigurationAggregatorOutput, error) { + req, out := c.PutConfigurationAggregatorRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutRetentionConfiguration = "PutRetentionConfiguration" +const opPutConfigurationRecorder = "PutConfigurationRecorder" -// PutRetentionConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutRetentionConfiguration operation. The "output" return +// PutConfigurationRecorderRequest generates a "aws/request.Request" representing the +// client's request for the PutConfigurationRecorder operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutRetentionConfiguration for more information on using the PutRetentionConfiguration +// See PutConfigurationRecorder for more information on using the PutConfigurationRecorder // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutRetentionConfigurationRequest method. -// req, resp := client.PutRetentionConfigurationRequest(params) +// // Example sending a request using the PutConfigurationRecorderRequest method. +// req, resp := client.PutConfigurationRecorderRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRetentionConfiguration -func (c *ConfigService) PutRetentionConfigurationRequest(input *PutRetentionConfigurationInput) (req *request.Request, output *PutRetentionConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigurationRecorder +func (c *ConfigService) PutConfigurationRecorderRequest(input *PutConfigurationRecorderInput) (req *request.Request, output *PutConfigurationRecorderOutput) { op := &request.Operation{ - Name: opPutRetentionConfiguration, + Name: opPutConfigurationRecorder, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutRetentionConfigurationInput{} + input = &PutConfigurationRecorderInput{} } - output = &PutRetentionConfigurationOutput{} + output = &PutConfigurationRecorderOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutRetentionConfiguration API operation for AWS Config. +// PutConfigurationRecorder API operation for AWS Config. // -// Creates and updates the retention configuration with details about retention -// period (number of days) that AWS Config stores your historical information. -// The API creates the RetentionConfiguration object and names the object as -// default. When you have a RetentionConfiguration object named default, calling -// the API modifies the default object. +// Creates a new configuration recorder to record the selected resource configurations. // -// Currently, AWS Config supports only one retention configuration per region -// in your account. +// You can use this action to change the role roleARN or the recordingGroup +// of an existing recorder. To change the role, call the action on the existing +// configuration recorder and specify a role. +// +// Currently, you can specify only one configuration recorder per region in +// your account. +// +// If ConfigurationRecorder does not have the recordingGroup parameter specified, +// the default is to record all supported resource types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation PutRetentionConfiguration for usage and error information. +// API operation PutConfigurationRecorder for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" -// One or more of the specified parameters are invalid. Verify that your parameters -// are valid and try again. +// * ErrCodeMaxNumberOfConfigurationRecordersExceededException "MaxNumberOfConfigurationRecordersExceededException" +// You have reached the limit of the number of recorders you can create. // -// * ErrCodeMaxNumberOfRetentionConfigurationsExceededException "MaxNumberOfRetentionConfigurationsExceededException" -// Failed to add the retention configuration because a retention configuration -// with that name already exists. +// * ErrCodeInvalidConfigurationRecorderNameException "InvalidConfigurationRecorderNameException" +// You have provided a configuration recorder name that is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRetentionConfiguration -func (c *ConfigService) PutRetentionConfiguration(input *PutRetentionConfigurationInput) (*PutRetentionConfigurationOutput, error) { - req, out := c.PutRetentionConfigurationRequest(input) +// * ErrCodeInvalidRoleException "InvalidRoleException" +// You have provided a null or empty role ARN. +// +// * ErrCodeInvalidRecordingGroupException "InvalidRecordingGroupException" +// AWS Config throws an exception if the recording group does not contain a +// valid list of resource types. Invalid values might also be incorrectly formatted. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConfigurationRecorder +func (c *ConfigService) PutConfigurationRecorder(input *PutConfigurationRecorderInput) (*PutConfigurationRecorderOutput, error) { + req, out := c.PutConfigurationRecorderRequest(input) return out, req.Send() } -// PutRetentionConfigurationWithContext is the same as PutRetentionConfiguration with the addition of +// PutConfigurationRecorderWithContext is the same as PutConfigurationRecorder with the addition of // the ability to pass a context and additional request options. // -// See PutRetentionConfiguration for details on how to use this API operation. +// See PutConfigurationRecorder for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) PutRetentionConfigurationWithContext(ctx aws.Context, input *PutRetentionConfigurationInput, opts ...request.Option) (*PutRetentionConfigurationOutput, error) { - req, out := c.PutRetentionConfigurationRequest(input) +func (c *ConfigService) PutConfigurationRecorderWithContext(ctx aws.Context, input *PutConfigurationRecorderInput, opts ...request.Option) (*PutConfigurationRecorderOutput, error) { + req, out := c.PutConfigurationRecorderRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opSelectResourceConfig = "SelectResourceConfig" +const opPutDeliveryChannel = "PutDeliveryChannel" -// SelectResourceConfigRequest generates a "aws/request.Request" representing the -// client's request for the SelectResourceConfig operation. The "output" return +// PutDeliveryChannelRequest generates a "aws/request.Request" representing the +// client's request for the PutDeliveryChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SelectResourceConfig for more information on using the SelectResourceConfig +// See PutDeliveryChannel for more information on using the PutDeliveryChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SelectResourceConfigRequest method. -// req, resp := client.SelectResourceConfigRequest(params) +// // Example sending a request using the PutDeliveryChannelRequest method. +// req, resp := client.PutDeliveryChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/SelectResourceConfig -func (c *ConfigService) SelectResourceConfigRequest(input *SelectResourceConfigInput) (req *request.Request, output *SelectResourceConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutDeliveryChannel +func (c *ConfigService) PutDeliveryChannelRequest(input *PutDeliveryChannelInput) (req *request.Request, output *PutDeliveryChannelOutput) { op := &request.Operation{ - Name: opSelectResourceConfig, + Name: opPutDeliveryChannel, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &SelectResourceConfigInput{} + input = &PutDeliveryChannelInput{} } - output = &SelectResourceConfigOutput{} + output = &PutDeliveryChannelOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// SelectResourceConfig API operation for AWS Config. +// PutDeliveryChannel API operation for AWS Config. // -// Accepts a structured query language (SQL) SELECT command, performs the corresponding -// search, and returns resource configurations matching the properties. +// Creates a delivery channel object to deliver configuration information to +// an Amazon S3 bucket and Amazon SNS topic. // -// For more information about query components, see the Query Components (https://docs.aws.amazon.com/config/latest/developerguide/query-components.html) -// section in the AWS Config Developer Guide. +// Before you can create a delivery channel, you must create a configuration +// recorder. +// +// You can use this action to change the Amazon S3 bucket or an Amazon SNS topic +// of the existing delivery channel. To change the Amazon S3 bucket or an Amazon +// SNS topic, call this action and specify the changed values for the S3 bucket +// and the SNS topic. If you specify a different value for either the S3 bucket +// or the SNS topic, this action will keep the existing value for the parameter +// that is not changed. +// +// You can have only one delivery channel per region in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation SelectResourceConfig for usage and error information. +// API operation PutDeliveryChannel for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidExpressionException "InvalidExpressionException" -// The syntax of the query is incorrect. +// * ErrCodeMaxNumberOfDeliveryChannelsExceededException "MaxNumberOfDeliveryChannelsExceededException" +// You have reached the limit of the number of delivery channels you can create. // -// * ErrCodeInvalidLimitException "InvalidLimitException" -// The specified limit is outside the allowable range. +// * ErrCodeNoAvailableConfigurationRecorderException "NoAvailableConfigurationRecorderException" +// There are no configuration recorders available to provide the role needed +// to describe your resources. Create a configuration recorder. // -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// The specified next token is invalid. Specify the nextToken string that was -// returned in the previous response to get the next page of results. +// * ErrCodeInvalidDeliveryChannelNameException "InvalidDeliveryChannelNameException" +// The specified delivery channel name is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/SelectResourceConfig -func (c *ConfigService) SelectResourceConfig(input *SelectResourceConfigInput) (*SelectResourceConfigOutput, error) { - req, out := c.SelectResourceConfigRequest(input) +// * ErrCodeNoSuchBucketException "NoSuchBucketException" +// The specified Amazon S3 bucket does not exist. +// +// * ErrCodeInvalidS3KeyPrefixException "InvalidS3KeyPrefixException" +// The specified Amazon S3 key prefix is not valid. +// +// * ErrCodeInvalidSNSTopicARNException "InvalidSNSTopicARNException" +// The specified Amazon SNS topic does not exist. +// +// * ErrCodeInsufficientDeliveryPolicyException "InsufficientDeliveryPolicyException" +// Your Amazon S3 bucket policy does not permit AWS Config to write to it. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutDeliveryChannel +func (c *ConfigService) PutDeliveryChannel(input *PutDeliveryChannelInput) (*PutDeliveryChannelOutput, error) { + req, out := c.PutDeliveryChannelRequest(input) return out, req.Send() } -// SelectResourceConfigWithContext is the same as SelectResourceConfig with the addition of +// PutDeliveryChannelWithContext is the same as PutDeliveryChannel with the addition of // the ability to pass a context and additional request options. // -// See SelectResourceConfig for details on how to use this API operation. +// See PutDeliveryChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) SelectResourceConfigWithContext(ctx aws.Context, input *SelectResourceConfigInput, opts ...request.Option) (*SelectResourceConfigOutput, error) { - req, out := c.SelectResourceConfigRequest(input) +func (c *ConfigService) PutDeliveryChannelWithContext(ctx aws.Context, input *PutDeliveryChannelInput, opts ...request.Option) (*PutDeliveryChannelOutput, error) { + req, out := c.PutDeliveryChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartConfigRulesEvaluation = "StartConfigRulesEvaluation" +const opPutEvaluations = "PutEvaluations" -// StartConfigRulesEvaluationRequest generates a "aws/request.Request" representing the -// client's request for the StartConfigRulesEvaluation operation. The "output" return +// PutEvaluationsRequest generates a "aws/request.Request" representing the +// client's request for the PutEvaluations operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartConfigRulesEvaluation for more information on using the StartConfigRulesEvaluation +// See PutEvaluations for more information on using the PutEvaluations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartConfigRulesEvaluationRequest method. -// req, resp := client.StartConfigRulesEvaluationRequest(params) +// // Example sending a request using the PutEvaluationsRequest method. +// req, resp := client.PutEvaluationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartConfigRulesEvaluation -func (c *ConfigService) StartConfigRulesEvaluationRequest(input *StartConfigRulesEvaluationInput) (req *request.Request, output *StartConfigRulesEvaluationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutEvaluations +func (c *ConfigService) PutEvaluationsRequest(input *PutEvaluationsInput) (req *request.Request, output *PutEvaluationsOutput) { op := &request.Operation{ - Name: opStartConfigRulesEvaluation, + Name: opPutEvaluations, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartConfigRulesEvaluationInput{} + input = &PutEvaluationsInput{} } - output = &StartConfigRulesEvaluationOutput{} + output = &PutEvaluationsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// StartConfigRulesEvaluation API operation for AWS Config. -// -// Runs an on-demand evaluation for the specified AWS Config rules against the -// last known configuration state of the resources. Use StartConfigRulesEvaluation -// when you want to test that a rule you updated is working as expected. StartConfigRulesEvaluation -// does not re-record the latest configuration state for your resources. It -// re-runs an evaluation against the last known state of your resources. -// -// You can specify up to 25 AWS Config rules per request. -// -// An existing StartConfigRulesEvaluation call for the specified rules must -// complete before you can call the API again. If you chose to have AWS Config -// stream to an Amazon SNS topic, you will receive a ConfigRuleEvaluationStarted -// notification when the evaluation starts. -// -// You don't need to call the StartConfigRulesEvaluation API to run an evaluation -// for a new rule. When you create a rule, AWS Config evaluates your resources -// against the rule automatically. -// -// The StartConfigRulesEvaluation API is useful if you want to run on-demand -// evaluations, such as the following example: -// -// You have a custom rule that evaluates your IAM resources every 24 hours. -// -// You update your Lambda function to add additional conditions to your rule. -// -// Instead of waiting for the next periodic evaluation, you call the StartConfigRulesEvaluation -// API. -// -// AWS Config invokes your Lambda function and evaluates your IAM resources. +// PutEvaluations API operation for AWS Config. // -// Your custom rule will still run periodic evaluations every 24 hours. +// Used by an AWS Lambda function to deliver evaluation results to AWS Config. +// This action is required in every AWS Lambda function that is invoked by an +// AWS Config rule. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation StartConfigRulesEvaluation for usage and error information. +// API operation PutEvaluations for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchConfigRuleException "NoSuchConfigRuleException" -// One or more AWS Config rules in the request are invalid. Verify that the -// rule names are correct and try again. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// For StartConfigRulesEvaluation API, this exception is thrown if an evaluation -// is in progress or if you call the StartConfigRulesEvaluation API more than -// once per minute. -// -// For PutConfigurationAggregator API, this exception is thrown if the number -// of accounts and aggregators exceeds the limit. -// -// * ErrCodeResourceInUseException "ResourceInUseException" -// The rule is currently being deleted or the rule is deleting your evaluation -// results. Try your request again later. -// // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // One or more of the specified parameters are invalid. Verify that your parameters // are valid and try again. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartConfigRulesEvaluation -func (c *ConfigService) StartConfigRulesEvaluation(input *StartConfigRulesEvaluationInput) (*StartConfigRulesEvaluationOutput, error) { - req, out := c.StartConfigRulesEvaluationRequest(input) +// * ErrCodeInvalidResultTokenException "InvalidResultTokenException" +// The specified ResultToken is invalid. +// +// * ErrCodeNoSuchConfigRuleException "NoSuchConfigRuleException" +// One or more AWS Config rules in the request are invalid. Verify that the +// rule names are correct and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutEvaluations +func (c *ConfigService) PutEvaluations(input *PutEvaluationsInput) (*PutEvaluationsOutput, error) { + req, out := c.PutEvaluationsRequest(input) return out, req.Send() } -// StartConfigRulesEvaluationWithContext is the same as StartConfigRulesEvaluation with the addition of +// PutEvaluationsWithContext is the same as PutEvaluations with the addition of // the ability to pass a context and additional request options. // -// See StartConfigRulesEvaluation for details on how to use this API operation. +// See PutEvaluations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) StartConfigRulesEvaluationWithContext(ctx aws.Context, input *StartConfigRulesEvaluationInput, opts ...request.Option) (*StartConfigRulesEvaluationOutput, error) { - req, out := c.StartConfigRulesEvaluationRequest(input) +func (c *ConfigService) PutEvaluationsWithContext(ctx aws.Context, input *PutEvaluationsInput, opts ...request.Option) (*PutEvaluationsOutput, error) { + req, out := c.PutEvaluationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartConfigurationRecorder = "StartConfigurationRecorder" +const opPutOrganizationConfigRule = "PutOrganizationConfigRule" -// StartConfigurationRecorderRequest generates a "aws/request.Request" representing the -// client's request for the StartConfigurationRecorder operation. The "output" return +// PutOrganizationConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the PutOrganizationConfigRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartConfigurationRecorder for more information on using the StartConfigurationRecorder +// See PutOrganizationConfigRule for more information on using the PutOrganizationConfigRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartConfigurationRecorderRequest method. -// req, resp := client.StartConfigurationRecorderRequest(params) +// // Example sending a request using the PutOrganizationConfigRuleRequest method. +// req, resp := client.PutOrganizationConfigRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartConfigurationRecorder -func (c *ConfigService) StartConfigurationRecorderRequest(input *StartConfigurationRecorderInput) (req *request.Request, output *StartConfigurationRecorderOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutOrganizationConfigRule +func (c *ConfigService) PutOrganizationConfigRuleRequest(input *PutOrganizationConfigRuleInput) (req *request.Request, output *PutOrganizationConfigRuleOutput) { op := &request.Operation{ - Name: opStartConfigurationRecorder, + Name: opPutOrganizationConfigRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartConfigurationRecorderInput{} + input = &PutOrganizationConfigRuleInput{} } - output = &StartConfigurationRecorderOutput{} + output = &PutOrganizationConfigRuleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// StartConfigurationRecorder API operation for AWS Config. +// PutOrganizationConfigRule API operation for AWS Config. // -// Starts recording configurations of the AWS resources you have selected to -// record in your AWS account. +// Adds or updates organization config rule for your entire organization evaluating +// whether your AWS resources comply with your desired configurations. Only +// a master account can create or update an organization config rule. // -// You must have created at least one delivery channel to successfully start -// the configuration recorder. +// This API enables organization service access through the EnableAWSServiceAccess +// action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup +// in the master account of your organization. The service linked role is created +// only when the role does not exist in the master account. AWS Config verifies +// the existence of role with GetRole action. +// +// You can use this action to create both custom AWS Config rules and AWS managed +// Config rules. If you are adding a new custom AWS Config rule, you must first +// create AWS Lambda function in the master account that the rule invokes to +// evaluate your resources. When you use the PutOrganizationConfigRule action +// to add the rule to AWS Config, you must specify the Amazon Resource Name +// (ARN) that AWS Lambda assigns to the function. If you are adding an AWS managed +// Config rule, specify the rule's identifier for the RuleIdentifier key. +// +// The maximum number of organization config rules that AWS Config supports +// is 150. +// +// Specify either OrganizationCustomRuleMetadata or OrganizationManagedRuleMetadata. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation StartConfigurationRecorder for usage and error information. +// API operation PutOrganizationConfigRule for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchConfigurationRecorderException "NoSuchConfigurationRecorderException" -// You have specified a configuration recorder that does not exist. +// * ErrCodeMaxNumberOfOrganizationConfigRulesExceededException "MaxNumberOfOrganizationConfigRulesExceededException" +// You have reached the limit of the number of organization config rules you +// can create. // -// * ErrCodeNoAvailableDeliveryChannelException "NoAvailableDeliveryChannelException" -// There is no delivery channel available to record configurations. +// * ErrCodeResourceInUseException "ResourceInUseException" +// You see this exception in the following cases: // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartConfigurationRecorder -func (c *ConfigService) StartConfigurationRecorder(input *StartConfigurationRecorderInput) (*StartConfigurationRecorderOutput, error) { - req, out := c.StartConfigurationRecorderRequest(input) +// * For DeleteConfigRule API, AWS Config is deleting this rule. Try your +// request again later. +// +// * For DeleteConfigRule API, the rule is deleting your evaluation results. +// Try your request again later. +// +// * For DeleteConfigRule API, a remediation action is associated with the +// rule and AWS Config cannot delete this rule. Delete the remediation action +// associated with the rule before deleting the rule and try your request +// again later. +// +// * For PutConfigOrganizationRule, organization config rule deletion is +// in progress. Try your request again later. +// +// * For DeleteOrganizationConfigRule, organization config rule creation +// is in progress. Try your request again later. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. +// +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. +// +// * ErrCodeOrganizationAccessDeniedException "OrganizationAccessDeniedException" +// For PutConfigAggregator API, no permission to call EnableAWSServiceAccess +// API. +// +// For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs +// are called from member accounts. All APIs must be called from organization +// master account. +// +// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" +// Organization is no longer available. +// +// * ErrCodeOrganizationAllFeaturesNotEnabledException "OrganizationAllFeaturesNotEnabledException" +// AWS Config resource cannot be created because your organization does not +// have all features enabled. +// +// * ErrCodeInsufficientPermissionsException "InsufficientPermissionsException" +// Indicates one of the following errors: +// +// * For PutConfigRule, the rule cannot be created because the IAM role assigned +// to AWS Config lacks permissions to perform the config:Put* action. +// +// * For PutConfigRule, the AWS Lambda function cannot be invoked. Check +// the function ARN, and check the function's permissions. +// +// * For OrganizationConfigRule, organization config rule cannot be created +// because you do not have permissions to call IAM GetRole action or create +// service linked role. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutOrganizationConfigRule +func (c *ConfigService) PutOrganizationConfigRule(input *PutOrganizationConfigRuleInput) (*PutOrganizationConfigRuleOutput, error) { + req, out := c.PutOrganizationConfigRuleRequest(input) return out, req.Send() } -// StartConfigurationRecorderWithContext is the same as StartConfigurationRecorder with the addition of +// PutOrganizationConfigRuleWithContext is the same as PutOrganizationConfigRule with the addition of // the ability to pass a context and additional request options. // -// See StartConfigurationRecorder for details on how to use this API operation. +// See PutOrganizationConfigRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) StartConfigurationRecorderWithContext(ctx aws.Context, input *StartConfigurationRecorderInput, opts ...request.Option) (*StartConfigurationRecorderOutput, error) { - req, out := c.StartConfigurationRecorderRequest(input) +func (c *ConfigService) PutOrganizationConfigRuleWithContext(ctx aws.Context, input *PutOrganizationConfigRuleInput, opts ...request.Option) (*PutOrganizationConfigRuleOutput, error) { + req, out := c.PutOrganizationConfigRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartRemediationExecution = "StartRemediationExecution" +const opPutRemediationConfigurations = "PutRemediationConfigurations" -// StartRemediationExecutionRequest generates a "aws/request.Request" representing the -// client's request for the StartRemediationExecution operation. The "output" return +// PutRemediationConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the PutRemediationConfigurations operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartRemediationExecution for more information on using the StartRemediationExecution +// See PutRemediationConfigurations for more information on using the PutRemediationConfigurations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartRemediationExecutionRequest method. -// req, resp := client.StartRemediationExecutionRequest(params) +// // Example sending a request using the PutRemediationConfigurationsRequest method. +// req, resp := client.PutRemediationConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartRemediationExecution -func (c *ConfigService) StartRemediationExecutionRequest(input *StartRemediationExecutionInput) (req *request.Request, output *StartRemediationExecutionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRemediationConfigurations +func (c *ConfigService) PutRemediationConfigurationsRequest(input *PutRemediationConfigurationsInput) (req *request.Request, output *PutRemediationConfigurationsOutput) { op := &request.Operation{ - Name: opStartRemediationExecution, + Name: opPutRemediationConfigurations, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartRemediationExecutionInput{} + input = &PutRemediationConfigurationsInput{} } - output = &StartRemediationExecutionOutput{} + output = &PutRemediationConfigurationsOutput{} req = c.newRequest(op, input, output) return } -// StartRemediationExecution API operation for AWS Config. -// -// Runs an on-demand remediation for the specified AWS Config rules against -// the last known remediation configuration. It runs an execution against the -// current state of your resources. Remediation execution is asynchronous. +// PutRemediationConfigurations API operation for AWS Config. // -// You can specify up to 100 resource keys per request. An existing StartRemediationExecution -// call for the specified resource keys must complete before you can call the -// API again. +// Adds or updates the remediation configuration with a specific AWS Config +// rule with the selected target or action. The API creates the RemediationConfiguration +// object for the AWS Config rule. The AWS Config rule must already exist for +// you to add a remediation configuration. The target (SSM document) must exist +// and have permissions to use the target. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation StartRemediationExecution for usage and error information. +// API operation PutRemediationConfigurations for usage and error information. // // Returned Error Codes: // * ErrCodeInsufficientPermissionsException "InsufficientPermissionsException" // Indicates one of the following errors: // -// * The rule cannot be created because the IAM role assigned to AWS Config -// lacks permissions to perform the config:Put* action. +// * For PutConfigRule, the rule cannot be created because the IAM role assigned +// to AWS Config lacks permissions to perform the config:Put* action. // -// * The AWS Lambda function cannot be invoked. Check the function ARN, and -// check the function's permissions. +// * For PutConfigRule, the AWS Lambda function cannot be invoked. Check +// the function ARN, and check the function's permissions. // -// * ErrCodeNoSuchRemediationConfigurationException "NoSuchRemediationConfigurationException" -// You specified an AWS Config rule without a remediation configuration. +// * For OrganizationConfigRule, organization config rule cannot be created +// because you do not have permissions to call IAM GetRole action or create +// service linked role. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartRemediationExecution -func (c *ConfigService) StartRemediationExecution(input *StartRemediationExecutionInput) (*StartRemediationExecutionOutput, error) { - req, out := c.StartRemediationExecutionRequest(input) +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRemediationConfigurations +func (c *ConfigService) PutRemediationConfigurations(input *PutRemediationConfigurationsInput) (*PutRemediationConfigurationsOutput, error) { + req, out := c.PutRemediationConfigurationsRequest(input) return out, req.Send() } -// StartRemediationExecutionWithContext is the same as StartRemediationExecution with the addition of +// PutRemediationConfigurationsWithContext is the same as PutRemediationConfigurations with the addition of // the ability to pass a context and additional request options. // -// See StartRemediationExecution for details on how to use this API operation. +// See PutRemediationConfigurations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) StartRemediationExecutionWithContext(ctx aws.Context, input *StartRemediationExecutionInput, opts ...request.Option) (*StartRemediationExecutionOutput, error) { - req, out := c.StartRemediationExecutionRequest(input) +func (c *ConfigService) PutRemediationConfigurationsWithContext(ctx aws.Context, input *PutRemediationConfigurationsInput, opts ...request.Option) (*PutRemediationConfigurationsOutput, error) { + req, out := c.PutRemediationConfigurationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopConfigurationRecorder = "StopConfigurationRecorder" +const opPutRemediationExceptions = "PutRemediationExceptions" -// StopConfigurationRecorderRequest generates a "aws/request.Request" representing the -// client's request for the StopConfigurationRecorder operation. The "output" return +// PutRemediationExceptionsRequest generates a "aws/request.Request" representing the +// client's request for the PutRemediationExceptions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopConfigurationRecorder for more information on using the StopConfigurationRecorder +// See PutRemediationExceptions for more information on using the PutRemediationExceptions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopConfigurationRecorderRequest method. -// req, resp := client.StopConfigurationRecorderRequest(params) +// // Example sending a request using the PutRemediationExceptionsRequest method. +// req, resp := client.PutRemediationExceptionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StopConfigurationRecorder -func (c *ConfigService) StopConfigurationRecorderRequest(input *StopConfigurationRecorderInput) (req *request.Request, output *StopConfigurationRecorderOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRemediationExceptions +func (c *ConfigService) PutRemediationExceptionsRequest(input *PutRemediationExceptionsInput) (req *request.Request, output *PutRemediationExceptionsOutput) { op := &request.Operation{ - Name: opStopConfigurationRecorder, + Name: opPutRemediationExceptions, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StopConfigurationRecorderInput{} + input = &PutRemediationExceptionsInput{} } - output = &StopConfigurationRecorderOutput{} + output = &PutRemediationExceptionsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// StopConfigurationRecorder API operation for AWS Config. +// PutRemediationExceptions API operation for AWS Config. // -// Stops recording configurations of the AWS resources you have selected to -// record in your AWS account. +// A remediation exception is when a specific resource is no longer considered +// for auto-remediation. This API adds a new exception or updates an exisiting +// exception for a specific resource with a specific AWS Config rule. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation StopConfigurationRecorder for usage and error information. +// API operation PutRemediationExceptions for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchConfigurationRecorderException "NoSuchConfigurationRecorderException" -// You have specified a configuration recorder that does not exist. +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StopConfigurationRecorder -func (c *ConfigService) StopConfigurationRecorder(input *StopConfigurationRecorderInput) (*StopConfigurationRecorderOutput, error) { - req, out := c.StopConfigurationRecorderRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRemediationExceptions +func (c *ConfigService) PutRemediationExceptions(input *PutRemediationExceptionsInput) (*PutRemediationExceptionsOutput, error) { + req, out := c.PutRemediationExceptionsRequest(input) return out, req.Send() } -// StopConfigurationRecorderWithContext is the same as StopConfigurationRecorder with the addition of +// PutRemediationExceptionsWithContext is the same as PutRemediationExceptions with the addition of // the ability to pass a context and additional request options. // -// See StopConfigurationRecorder for details on how to use this API operation. +// See PutRemediationExceptions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) StopConfigurationRecorderWithContext(ctx aws.Context, input *StopConfigurationRecorderInput, opts ...request.Option) (*StopConfigurationRecorderOutput, error) { - req, out := c.StopConfigurationRecorderRequest(input) +func (c *ConfigService) PutRemediationExceptionsWithContext(ctx aws.Context, input *PutRemediationExceptionsInput, opts ...request.Option) (*PutRemediationExceptionsOutput, error) { + req, out := c.PutRemediationExceptionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opPutRetentionConfiguration = "PutRetentionConfiguration" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// PutRetentionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutRetentionConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See PutRetentionConfiguration for more information on using the PutRetentionConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the PutRetentionConfigurationRequest method. +// req, resp := client.PutRetentionConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/TagResource -func (c *ConfigService) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRetentionConfiguration +func (c *ConfigService) PutRetentionConfigurationRequest(input *PutRetentionConfigurationInput) (req *request.Request, output *PutRetentionConfigurationOutput) { op := &request.Operation{ - Name: opTagResource, + Name: opPutRetentionConfiguration, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TagResourceInput{} + input = &PutRetentionConfigurationInput{} } - output = &TagResourceOutput{} + output = &PutRetentionConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for AWS Config. +// PutRetentionConfiguration API operation for AWS Config. // -// Associates the specified tags to a resource with the specified resourceArn. -// If existing tags on a resource are not specified in the request parameters, -// they are not changed. When a resource is deleted, the tags associated with -// that resource are deleted as well. +// Creates and updates the retention configuration with details about retention +// period (number of days) that AWS Config stores your historical information. +// The API creates the RetentionConfiguration object and names the object as +// default. When you have a RetentionConfiguration object named default, calling +// the API modifies the default object. +// +// Currently, AWS Config supports only one retention configuration per region +// in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation TagResource for usage and error information. +// API operation PutRetentionConfiguration for usage and error information. // // Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. -// -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// You have specified a resource that does not exist. +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. // -// * ErrCodeTooManyTagsException "TooManyTagsException" -// You have reached the limit of the number of tags you can use. You have more -// than 50 tags. +// * ErrCodeMaxNumberOfRetentionConfigurationsExceededException "MaxNumberOfRetentionConfigurationsExceededException" +// Failed to add the retention configuration because a retention configuration +// with that name already exists. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/TagResource -func (c *ConfigService) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRetentionConfiguration +func (c *ConfigService) PutRetentionConfiguration(input *PutRetentionConfigurationInput) (*PutRetentionConfigurationOutput, error) { + req, out := c.PutRetentionConfigurationRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// PutRetentionConfigurationWithContext is the same as PutRetentionConfiguration with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See PutRetentionConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *ConfigService) PutRetentionConfigurationWithContext(ctx aws.Context, input *PutRetentionConfigurationInput, opts ...request.Option) (*PutRetentionConfigurationOutput, error) { + req, out := c.PutRetentionConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +const opSelectResourceConfig = "SelectResourceConfig" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// SelectResourceConfigRequest generates a "aws/request.Request" representing the +// client's request for the SelectResourceConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See SelectResourceConfig for more information on using the SelectResourceConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the SelectResourceConfigRequest method. +// req, resp := client.SelectResourceConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/UntagResource -func (c *ConfigService) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/SelectResourceConfig +func (c *ConfigService) SelectResourceConfigRequest(input *SelectResourceConfigInput) (req *request.Request, output *SelectResourceConfigOutput) { op := &request.Operation{ - Name: opUntagResource, + Name: opSelectResourceConfig, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UntagResourceInput{} + input = &SelectResourceConfigInput{} } - output = &UntagResourceOutput{} + output = &SelectResourceConfigOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for AWS Config. +// SelectResourceConfig API operation for AWS Config. // -// Deletes specified tags from a resource. +// Accepts a structured query language (SQL) SELECT command, performs the corresponding +// search, and returns resource configurations matching the properties. +// +// For more information about query components, see the Query Components (https://docs.aws.amazon.com/config/latest/developerguide/query-components.html) +// section in the AWS Config Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Config's -// API operation UntagResource for usage and error information. +// API operation SelectResourceConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// The requested action is not valid. +// * ErrCodeInvalidExpressionException "InvalidExpressionException" +// The syntax of the query is incorrect. // -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// You have specified a resource that does not exist. +// * ErrCodeInvalidLimitException "InvalidLimitException" +// The specified limit is outside the allowable range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/UntagResource -func (c *ConfigService) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified next token is invalid. Specify the nextToken string that was +// returned in the previous response to get the next page of results. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/SelectResourceConfig +func (c *ConfigService) SelectResourceConfig(input *SelectResourceConfigInput) (*SelectResourceConfigOutput, error) { + req, out := c.SelectResourceConfigRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// SelectResourceConfigWithContext is the same as SelectResourceConfig with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See SelectResourceConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ConfigService) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *ConfigService) SelectResourceConfigWithContext(ctx aws.Context, input *SelectResourceConfigInput, opts ...request.Option) (*SelectResourceConfigOutput, error) { + req, out := c.SelectResourceConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// A collection of accounts and regions. -type AccountAggregationSource struct { - _ struct{} `type:"structure"` - - // The 12-digit account ID of the account being aggregated. - // - // AccountIds is a required field - AccountIds []*string `min:"1" type:"list" required:"true"` +const opStartConfigRulesEvaluation = "StartConfigRulesEvaluation" - // If true, aggregate existing AWS Config regions and future regions. - AllAwsRegions *bool `type:"boolean"` +// StartConfigRulesEvaluationRequest generates a "aws/request.Request" representing the +// client's request for the StartConfigRulesEvaluation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartConfigRulesEvaluation for more information on using the StartConfigRulesEvaluation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartConfigRulesEvaluationRequest method. +// req, resp := client.StartConfigRulesEvaluationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartConfigRulesEvaluation +func (c *ConfigService) StartConfigRulesEvaluationRequest(input *StartConfigRulesEvaluationInput) (req *request.Request, output *StartConfigRulesEvaluationOutput) { + op := &request.Operation{ + Name: opStartConfigRulesEvaluation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartConfigRulesEvaluationInput{} + } + + output = &StartConfigRulesEvaluationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StartConfigRulesEvaluation API operation for AWS Config. +// +// Runs an on-demand evaluation for the specified AWS Config rules against the +// last known configuration state of the resources. Use StartConfigRulesEvaluation +// when you want to test that a rule you updated is working as expected. StartConfigRulesEvaluation +// does not re-record the latest configuration state for your resources. It +// re-runs an evaluation against the last known state of your resources. +// +// You can specify up to 25 AWS Config rules per request. +// +// An existing StartConfigRulesEvaluation call for the specified rules must +// complete before you can call the API again. If you chose to have AWS Config +// stream to an Amazon SNS topic, you will receive a ConfigRuleEvaluationStarted +// notification when the evaluation starts. +// +// You don't need to call the StartConfigRulesEvaluation API to run an evaluation +// for a new rule. When you create a rule, AWS Config evaluates your resources +// against the rule automatically. +// +// The StartConfigRulesEvaluation API is useful if you want to run on-demand +// evaluations, such as the following example: +// +// You have a custom rule that evaluates your IAM resources every 24 hours. +// +// You update your Lambda function to add additional conditions to your rule. +// +// Instead of waiting for the next periodic evaluation, you call the StartConfigRulesEvaluation +// API. +// +// AWS Config invokes your Lambda function and evaluates your IAM resources. +// +// Your custom rule will still run periodic evaluations every 24 hours. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Config's +// API operation StartConfigRulesEvaluation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchConfigRuleException "NoSuchConfigRuleException" +// One or more AWS Config rules in the request are invalid. Verify that the +// rule names are correct and try again. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// For StartConfigRulesEvaluation API, this exception is thrown if an evaluation +// is in progress or if you call the StartConfigRulesEvaluation API more than +// once per minute. +// +// For PutConfigurationAggregator API, this exception is thrown if the number +// of accounts and aggregators exceeds the limit. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// You see this exception in the following cases: +// +// * For DeleteConfigRule API, AWS Config is deleting this rule. Try your +// request again later. +// +// * For DeleteConfigRule API, the rule is deleting your evaluation results. +// Try your request again later. +// +// * For DeleteConfigRule API, a remediation action is associated with the +// rule and AWS Config cannot delete this rule. Delete the remediation action +// associated with the rule before deleting the rule and try your request +// again later. +// +// * For PutConfigOrganizationRule, organization config rule deletion is +// in progress. Try your request again later. +// +// * For DeleteOrganizationConfigRule, organization config rule creation +// is in progress. Try your request again later. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartConfigRulesEvaluation +func (c *ConfigService) StartConfigRulesEvaluation(input *StartConfigRulesEvaluationInput) (*StartConfigRulesEvaluationOutput, error) { + req, out := c.StartConfigRulesEvaluationRequest(input) + return out, req.Send() +} + +// StartConfigRulesEvaluationWithContext is the same as StartConfigRulesEvaluation with the addition of +// the ability to pass a context and additional request options. +// +// See StartConfigRulesEvaluation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) StartConfigRulesEvaluationWithContext(ctx aws.Context, input *StartConfigRulesEvaluationInput, opts ...request.Option) (*StartConfigRulesEvaluationOutput, error) { + req, out := c.StartConfigRulesEvaluationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartConfigurationRecorder = "StartConfigurationRecorder" + +// StartConfigurationRecorderRequest generates a "aws/request.Request" representing the +// client's request for the StartConfigurationRecorder operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartConfigurationRecorder for more information on using the StartConfigurationRecorder +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartConfigurationRecorderRequest method. +// req, resp := client.StartConfigurationRecorderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartConfigurationRecorder +func (c *ConfigService) StartConfigurationRecorderRequest(input *StartConfigurationRecorderInput) (req *request.Request, output *StartConfigurationRecorderOutput) { + op := &request.Operation{ + Name: opStartConfigurationRecorder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartConfigurationRecorderInput{} + } + + output = &StartConfigurationRecorderOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StartConfigurationRecorder API operation for AWS Config. +// +// Starts recording configurations of the AWS resources you have selected to +// record in your AWS account. +// +// You must have created at least one delivery channel to successfully start +// the configuration recorder. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Config's +// API operation StartConfigurationRecorder for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchConfigurationRecorderException "NoSuchConfigurationRecorderException" +// You have specified a configuration recorder that does not exist. +// +// * ErrCodeNoAvailableDeliveryChannelException "NoAvailableDeliveryChannelException" +// There is no delivery channel available to record configurations. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartConfigurationRecorder +func (c *ConfigService) StartConfigurationRecorder(input *StartConfigurationRecorderInput) (*StartConfigurationRecorderOutput, error) { + req, out := c.StartConfigurationRecorderRequest(input) + return out, req.Send() +} + +// StartConfigurationRecorderWithContext is the same as StartConfigurationRecorder with the addition of +// the ability to pass a context and additional request options. +// +// See StartConfigurationRecorder for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) StartConfigurationRecorderWithContext(ctx aws.Context, input *StartConfigurationRecorderInput, opts ...request.Option) (*StartConfigurationRecorderOutput, error) { + req, out := c.StartConfigurationRecorderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartRemediationExecution = "StartRemediationExecution" + +// StartRemediationExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StartRemediationExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartRemediationExecution for more information on using the StartRemediationExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartRemediationExecutionRequest method. +// req, resp := client.StartRemediationExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartRemediationExecution +func (c *ConfigService) StartRemediationExecutionRequest(input *StartRemediationExecutionInput) (req *request.Request, output *StartRemediationExecutionOutput) { + op := &request.Operation{ + Name: opStartRemediationExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartRemediationExecutionInput{} + } + + output = &StartRemediationExecutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartRemediationExecution API operation for AWS Config. +// +// Runs an on-demand remediation for the specified AWS Config rules against +// the last known remediation configuration. It runs an execution against the +// current state of your resources. Remediation execution is asynchronous. +// +// You can specify up to 100 resource keys per request. An existing StartRemediationExecution +// call for the specified resource keys must complete before you can call the +// API again. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Config's +// API operation StartRemediationExecution for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. +// +// * ErrCodeInsufficientPermissionsException "InsufficientPermissionsException" +// Indicates one of the following errors: +// +// * For PutConfigRule, the rule cannot be created because the IAM role assigned +// to AWS Config lacks permissions to perform the config:Put* action. +// +// * For PutConfigRule, the AWS Lambda function cannot be invoked. Check +// the function ARN, and check the function's permissions. +// +// * For OrganizationConfigRule, organization config rule cannot be created +// because you do not have permissions to call IAM GetRole action or create +// service linked role. +// +// * ErrCodeNoSuchRemediationConfigurationException "NoSuchRemediationConfigurationException" +// You specified an AWS Config rule without a remediation configuration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StartRemediationExecution +func (c *ConfigService) StartRemediationExecution(input *StartRemediationExecutionInput) (*StartRemediationExecutionOutput, error) { + req, out := c.StartRemediationExecutionRequest(input) + return out, req.Send() +} + +// StartRemediationExecutionWithContext is the same as StartRemediationExecution with the addition of +// the ability to pass a context and additional request options. +// +// See StartRemediationExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) StartRemediationExecutionWithContext(ctx aws.Context, input *StartRemediationExecutionInput, opts ...request.Option) (*StartRemediationExecutionOutput, error) { + req, out := c.StartRemediationExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopConfigurationRecorder = "StopConfigurationRecorder" + +// StopConfigurationRecorderRequest generates a "aws/request.Request" representing the +// client's request for the StopConfigurationRecorder operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopConfigurationRecorder for more information on using the StopConfigurationRecorder +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopConfigurationRecorderRequest method. +// req, resp := client.StopConfigurationRecorderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StopConfigurationRecorder +func (c *ConfigService) StopConfigurationRecorderRequest(input *StopConfigurationRecorderInput) (req *request.Request, output *StopConfigurationRecorderOutput) { + op := &request.Operation{ + Name: opStopConfigurationRecorder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopConfigurationRecorderInput{} + } + + output = &StopConfigurationRecorderOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopConfigurationRecorder API operation for AWS Config. +// +// Stops recording configurations of the AWS resources you have selected to +// record in your AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Config's +// API operation StopConfigurationRecorder for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchConfigurationRecorderException "NoSuchConfigurationRecorderException" +// You have specified a configuration recorder that does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/StopConfigurationRecorder +func (c *ConfigService) StopConfigurationRecorder(input *StopConfigurationRecorderInput) (*StopConfigurationRecorderOutput, error) { + req, out := c.StopConfigurationRecorderRequest(input) + return out, req.Send() +} + +// StopConfigurationRecorderWithContext is the same as StopConfigurationRecorder with the addition of +// the ability to pass a context and additional request options. +// +// See StopConfigurationRecorder for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) StopConfigurationRecorderWithContext(ctx aws.Context, input *StopConfigurationRecorderInput, opts ...request.Option) (*StopConfigurationRecorderOutput, error) { + req, out := c.StopConfigurationRecorderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/TagResource +func (c *ConfigService) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Config. +// +// Associates the specified tags to a resource with the specified resourceArn. +// If existing tags on a resource are not specified in the request parameters, +// they are not changed. When a resource is deleted, the tags associated with +// that resource are deleted as well. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Config's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// You have specified a resource that does not exist. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit of the number of tags you can use. You have more +// than 50 tags. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/TagResource +func (c *ConfigService) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/UntagResource +func (c *ConfigService) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Config. +// +// Deletes specified tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Config's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeValidationException "ValidationException" +// The requested action is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// You have specified a resource that does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/UntagResource +func (c *ConfigService) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ConfigService) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// A collection of accounts and regions. +type AccountAggregationSource struct { + _ struct{} `type:"structure"` + + // The 12-digit account ID of the account being aggregated. + // + // AccountIds is a required field + AccountIds []*string `min:"1" type:"list" required:"true"` + + // If true, aggregate existing AWS Config regions and future regions. + AllAwsRegions *bool `type:"boolean"` + + // The source regions being aggregated. + AwsRegions []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s AccountAggregationSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAggregationSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccountAggregationSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccountAggregationSource"} + if s.AccountIds == nil { + invalidParams.Add(request.NewErrParamRequired("AccountIds")) + } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } + if s.AwsRegions != nil && len(s.AwsRegions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsRegions", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountIds sets the AccountIds field's value. +func (s *AccountAggregationSource) SetAccountIds(v []*string) *AccountAggregationSource { + s.AccountIds = v + return s +} + +// SetAllAwsRegions sets the AllAwsRegions field's value. +func (s *AccountAggregationSource) SetAllAwsRegions(v bool) *AccountAggregationSource { + s.AllAwsRegions = &v + return s +} + +// SetAwsRegions sets the AwsRegions field's value. +func (s *AccountAggregationSource) SetAwsRegions(v []*string) *AccountAggregationSource { + s.AwsRegions = v + return s +} + +// Indicates whether an AWS Config rule is compliant based on account ID, region, +// compliance, and rule name. +// +// A rule is compliant if all of the resources that the rule evaluated comply +// with it. It is noncompliant if any of these resources do not comply. +type AggregateComplianceByConfigRule struct { + _ struct{} `type:"structure"` + + // The 12-digit account ID of the source account. + AccountId *string `type:"string"` + + // The source region from where the data is aggregated. + AwsRegion *string `min:"1" type:"string"` + + // Indicates whether an AWS resource or AWS Config rule is compliant and provides + // the number of contributors that affect the compliance. + Compliance *Compliance `type:"structure"` + + // The name of the AWS Config rule. + ConfigRuleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AggregateComplianceByConfigRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AggregateComplianceByConfigRule) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AggregateComplianceByConfigRule) SetAccountId(v string) *AggregateComplianceByConfigRule { + s.AccountId = &v + return s +} + +// SetAwsRegion sets the AwsRegion field's value. +func (s *AggregateComplianceByConfigRule) SetAwsRegion(v string) *AggregateComplianceByConfigRule { + s.AwsRegion = &v + return s +} + +// SetCompliance sets the Compliance field's value. +func (s *AggregateComplianceByConfigRule) SetCompliance(v *Compliance) *AggregateComplianceByConfigRule { + s.Compliance = v + return s +} + +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *AggregateComplianceByConfigRule) SetConfigRuleName(v string) *AggregateComplianceByConfigRule { + s.ConfigRuleName = &v + return s +} + +// Returns the number of compliant and noncompliant rules for one or more accounts +// and regions in an aggregator. +type AggregateComplianceCount struct { + _ struct{} `type:"structure"` + + // The number of compliant and noncompliant AWS Config rules. + ComplianceSummary *ComplianceSummary `type:"structure"` + + // The 12-digit account ID or region based on the GroupByKey value. + GroupName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AggregateComplianceCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AggregateComplianceCount) GoString() string { + return s.String() +} + +// SetComplianceSummary sets the ComplianceSummary field's value. +func (s *AggregateComplianceCount) SetComplianceSummary(v *ComplianceSummary) *AggregateComplianceCount { + s.ComplianceSummary = v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *AggregateComplianceCount) SetGroupName(v string) *AggregateComplianceCount { + s.GroupName = &v + return s +} + +// The details of an AWS Config evaluation for an account ID and region in an +// aggregator. Provides the AWS resource that was evaluated, the compliance +// of the resource, related time stamps, and supplementary information. +type AggregateEvaluationResult struct { + _ struct{} `type:"structure"` + + // The 12-digit account ID of the source account. + AccountId *string `type:"string"` + + // Supplementary information about how the agrregate evaluation determined the + // compliance. + Annotation *string `min:"1" type:"string"` + + // The source region from where the data is aggregated. + AwsRegion *string `min:"1" type:"string"` + + // The resource compliance status. + // + // For the AggregationEvaluationResult data type, AWS Config supports only the + // COMPLIANT and NON_COMPLIANT. AWS Config does not support the NOT_APPLICABLE + // and INSUFFICIENT_DATA value. + ComplianceType *string `type:"string" enum:"ComplianceType"` + + // The time when the AWS Config rule evaluated the AWS resource. + ConfigRuleInvokedTime *time.Time `type:"timestamp"` + + // Uniquely identifies the evaluation result. + EvaluationResultIdentifier *EvaluationResultIdentifier `type:"structure"` + + // The time when AWS Config recorded the aggregate evaluation result. + ResultRecordedTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s AggregateEvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AggregateEvaluationResult) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AggregateEvaluationResult) SetAccountId(v string) *AggregateEvaluationResult { + s.AccountId = &v + return s +} + +// SetAnnotation sets the Annotation field's value. +func (s *AggregateEvaluationResult) SetAnnotation(v string) *AggregateEvaluationResult { + s.Annotation = &v + return s +} + +// SetAwsRegion sets the AwsRegion field's value. +func (s *AggregateEvaluationResult) SetAwsRegion(v string) *AggregateEvaluationResult { + s.AwsRegion = &v + return s +} + +// SetComplianceType sets the ComplianceType field's value. +func (s *AggregateEvaluationResult) SetComplianceType(v string) *AggregateEvaluationResult { + s.ComplianceType = &v + return s +} + +// SetConfigRuleInvokedTime sets the ConfigRuleInvokedTime field's value. +func (s *AggregateEvaluationResult) SetConfigRuleInvokedTime(v time.Time) *AggregateEvaluationResult { + s.ConfigRuleInvokedTime = &v + return s +} + +// SetEvaluationResultIdentifier sets the EvaluationResultIdentifier field's value. +func (s *AggregateEvaluationResult) SetEvaluationResultIdentifier(v *EvaluationResultIdentifier) *AggregateEvaluationResult { + s.EvaluationResultIdentifier = v + return s +} + +// SetResultRecordedTime sets the ResultRecordedTime field's value. +func (s *AggregateEvaluationResult) SetResultRecordedTime(v time.Time) *AggregateEvaluationResult { + s.ResultRecordedTime = &v + return s +} + +// The details that identify a resource that is collected by AWS Config aggregator, +// including the resource type, ID, (if available) the custom resource name, +// the source account, and source region. +type AggregateResourceIdentifier struct { + _ struct{} `type:"structure"` + + // The ID of the AWS resource. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // The name of the AWS resource. + ResourceName *string `type:"string"` + + // The type of the AWS resource. + // + // ResourceType is a required field + ResourceType *string `type:"string" required:"true" enum:"ResourceType"` + + // The 12-digit account ID of the source account. + // + // SourceAccountId is a required field + SourceAccountId *string `type:"string" required:"true"` + + // The source region where data is aggregated. + // + // SourceRegion is a required field + SourceRegion *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AggregateResourceIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AggregateResourceIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AggregateResourceIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AggregateResourceIdentifier"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.SourceAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceAccountId")) + } + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + if s.SourceRegion != nil && len(*s.SourceRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceRegion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceId sets the ResourceId field's value. +func (s *AggregateResourceIdentifier) SetResourceId(v string) *AggregateResourceIdentifier { + s.ResourceId = &v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *AggregateResourceIdentifier) SetResourceName(v string) *AggregateResourceIdentifier { + s.ResourceName = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *AggregateResourceIdentifier) SetResourceType(v string) *AggregateResourceIdentifier { + s.ResourceType = &v + return s +} + +// SetSourceAccountId sets the SourceAccountId field's value. +func (s *AggregateResourceIdentifier) SetSourceAccountId(v string) *AggregateResourceIdentifier { + s.SourceAccountId = &v + return s +} + +// SetSourceRegion sets the SourceRegion field's value. +func (s *AggregateResourceIdentifier) SetSourceRegion(v string) *AggregateResourceIdentifier { + s.SourceRegion = &v + return s +} + +// The current sync status between the source and the aggregator account. +type AggregatedSourceStatus struct { + _ struct{} `type:"structure"` + + // The region authorized to collect aggregated data. + AwsRegion *string `min:"1" type:"string"` + + // The error code that AWS Config returned when the source account aggregation + // last failed. + LastErrorCode *string `type:"string"` + + // The message indicating that the source account aggregation failed due to + // an error. + LastErrorMessage *string `type:"string"` + + // Filters the last updated status type. + // + // * Valid value FAILED indicates errors while moving data. + // + // * Valid value SUCCEEDED indicates the data was successfully moved. + // + // * Valid value OUTDATED indicates the data is not the most recent. + LastUpdateStatus *string `type:"string" enum:"AggregatedSourceStatusType"` + + // The time of the last update. + LastUpdateTime *time.Time `type:"timestamp"` + + // The source account ID or an organization. + SourceId *string `type:"string"` + + // The source account or an organization. + SourceType *string `type:"string" enum:"AggregatedSourceType"` +} + +// String returns the string representation +func (s AggregatedSourceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AggregatedSourceStatus) GoString() string { + return s.String() +} + +// SetAwsRegion sets the AwsRegion field's value. +func (s *AggregatedSourceStatus) SetAwsRegion(v string) *AggregatedSourceStatus { + s.AwsRegion = &v + return s +} + +// SetLastErrorCode sets the LastErrorCode field's value. +func (s *AggregatedSourceStatus) SetLastErrorCode(v string) *AggregatedSourceStatus { + s.LastErrorCode = &v + return s +} + +// SetLastErrorMessage sets the LastErrorMessage field's value. +func (s *AggregatedSourceStatus) SetLastErrorMessage(v string) *AggregatedSourceStatus { + s.LastErrorMessage = &v + return s +} + +// SetLastUpdateStatus sets the LastUpdateStatus field's value. +func (s *AggregatedSourceStatus) SetLastUpdateStatus(v string) *AggregatedSourceStatus { + s.LastUpdateStatus = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *AggregatedSourceStatus) SetLastUpdateTime(v time.Time) *AggregatedSourceStatus { + s.LastUpdateTime = &v + return s +} + +// SetSourceId sets the SourceId field's value. +func (s *AggregatedSourceStatus) SetSourceId(v string) *AggregatedSourceStatus { + s.SourceId = &v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *AggregatedSourceStatus) SetSourceType(v string) *AggregatedSourceStatus { + s.SourceType = &v + return s +} + +// An object that represents the authorizations granted to aggregator accounts +// and regions. +type AggregationAuthorization struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the aggregation object. + AggregationAuthorizationArn *string `type:"string"` + + // The 12-digit account ID of the account authorized to aggregate data. + AuthorizedAccountId *string `type:"string"` + + // The region authorized to collect aggregated data. + AuthorizedAwsRegion *string `min:"1" type:"string"` + + // The time stamp when the aggregation authorization was created. + CreationTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s AggregationAuthorization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AggregationAuthorization) GoString() string { + return s.String() +} + +// SetAggregationAuthorizationArn sets the AggregationAuthorizationArn field's value. +func (s *AggregationAuthorization) SetAggregationAuthorizationArn(v string) *AggregationAuthorization { + s.AggregationAuthorizationArn = &v + return s +} + +// SetAuthorizedAccountId sets the AuthorizedAccountId field's value. +func (s *AggregationAuthorization) SetAuthorizedAccountId(v string) *AggregationAuthorization { + s.AuthorizedAccountId = &v + return s +} + +// SetAuthorizedAwsRegion sets the AuthorizedAwsRegion field's value. +func (s *AggregationAuthorization) SetAuthorizedAwsRegion(v string) *AggregationAuthorization { + s.AuthorizedAwsRegion = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *AggregationAuthorization) SetCreationTime(v time.Time) *AggregationAuthorization { + s.CreationTime = &v + return s +} + +// The detailed configuration of a specified resource. +type BaseConfigurationItem struct { + _ struct{} `type:"structure"` + + // The 12-digit AWS account ID associated with the resource. + AccountId *string `locationName:"accountId" type:"string"` + + // The Amazon Resource Name (ARN) of the resource. + Arn *string `locationName:"arn" type:"string"` + + // The Availability Zone associated with the resource. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The region where the resource resides. + AwsRegion *string `locationName:"awsRegion" min:"1" type:"string"` + + // The description of the resource configuration. + Configuration *string `locationName:"configuration" type:"string"` + + // The time when the configuration recording was initiated. + ConfigurationItemCaptureTime *time.Time `locationName:"configurationItemCaptureTime" type:"timestamp"` + + // The configuration item status. + ConfigurationItemStatus *string `locationName:"configurationItemStatus" type:"string" enum:"ConfigurationItemStatus"` + + // An identifier that indicates the ordering of the configuration items of a + // resource. + ConfigurationStateId *string `locationName:"configurationStateId" type:"string"` + + // The time stamp when the resource was created. + ResourceCreationTime *time.Time `locationName:"resourceCreationTime" type:"timestamp"` + + // The ID of the resource (for example., sg-xxxxxx). + ResourceId *string `locationName:"resourceId" min:"1" type:"string"` + + // The custom name of the resource, if available. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The type of AWS resource. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // Configuration attributes that AWS Config returns for certain resource types + // to supplement the information returned for the configuration parameter. + SupplementaryConfiguration map[string]*string `locationName:"supplementaryConfiguration" type:"map"` + + // The version number of the resource configuration. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s BaseConfigurationItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BaseConfigurationItem) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *BaseConfigurationItem) SetAccountId(v string) *BaseConfigurationItem { + s.AccountId = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *BaseConfigurationItem) SetArn(v string) *BaseConfigurationItem { + s.Arn = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *BaseConfigurationItem) SetAvailabilityZone(v string) *BaseConfigurationItem { + s.AvailabilityZone = &v + return s +} + +// SetAwsRegion sets the AwsRegion field's value. +func (s *BaseConfigurationItem) SetAwsRegion(v string) *BaseConfigurationItem { + s.AwsRegion = &v + return s +} + +// SetConfiguration sets the Configuration field's value. +func (s *BaseConfigurationItem) SetConfiguration(v string) *BaseConfigurationItem { + s.Configuration = &v + return s +} + +// SetConfigurationItemCaptureTime sets the ConfigurationItemCaptureTime field's value. +func (s *BaseConfigurationItem) SetConfigurationItemCaptureTime(v time.Time) *BaseConfigurationItem { + s.ConfigurationItemCaptureTime = &v + return s +} + +// SetConfigurationItemStatus sets the ConfigurationItemStatus field's value. +func (s *BaseConfigurationItem) SetConfigurationItemStatus(v string) *BaseConfigurationItem { + s.ConfigurationItemStatus = &v + return s +} + +// SetConfigurationStateId sets the ConfigurationStateId field's value. +func (s *BaseConfigurationItem) SetConfigurationStateId(v string) *BaseConfigurationItem { + s.ConfigurationStateId = &v + return s +} + +// SetResourceCreationTime sets the ResourceCreationTime field's value. +func (s *BaseConfigurationItem) SetResourceCreationTime(v time.Time) *BaseConfigurationItem { + s.ResourceCreationTime = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *BaseConfigurationItem) SetResourceId(v string) *BaseConfigurationItem { + s.ResourceId = &v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *BaseConfigurationItem) SetResourceName(v string) *BaseConfigurationItem { + s.ResourceName = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *BaseConfigurationItem) SetResourceType(v string) *BaseConfigurationItem { + s.ResourceType = &v + return s +} + +// SetSupplementaryConfiguration sets the SupplementaryConfiguration field's value. +func (s *BaseConfigurationItem) SetSupplementaryConfiguration(v map[string]*string) *BaseConfigurationItem { + s.SupplementaryConfiguration = v + return s +} + +// SetVersion sets the Version field's value. +func (s *BaseConfigurationItem) SetVersion(v string) *BaseConfigurationItem { + s.Version = &v + return s +} + +type BatchGetAggregateResourceConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration aggregator. + // + // ConfigurationAggregatorName is a required field + ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` + + // A list of aggregate ResourceIdentifiers objects. + // + // ResourceIdentifiers is a required field + ResourceIdentifiers []*AggregateResourceIdentifier `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetAggregateResourceConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetAggregateResourceConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetAggregateResourceConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetAggregateResourceConfigInput"} + if s.ConfigurationAggregatorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) + } + if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) + } + if s.ResourceIdentifiers == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIdentifiers")) + } + if s.ResourceIdentifiers != nil && len(s.ResourceIdentifiers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIdentifiers", 1)) + } + if s.ResourceIdentifiers != nil { + for i, v := range s.ResourceIdentifiers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceIdentifiers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. +func (s *BatchGetAggregateResourceConfigInput) SetConfigurationAggregatorName(v string) *BatchGetAggregateResourceConfigInput { + s.ConfigurationAggregatorName = &v + return s +} + +// SetResourceIdentifiers sets the ResourceIdentifiers field's value. +func (s *BatchGetAggregateResourceConfigInput) SetResourceIdentifiers(v []*AggregateResourceIdentifier) *BatchGetAggregateResourceConfigInput { + s.ResourceIdentifiers = v + return s +} + +type BatchGetAggregateResourceConfigOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the current configuration of one or more resources. + BaseConfigurationItems []*BaseConfigurationItem `type:"list"` + + // A list of resource identifiers that were not processed with current scope. + // The list is empty if all the resources are processed. + UnprocessedResourceIdentifiers []*AggregateResourceIdentifier `type:"list"` +} + +// String returns the string representation +func (s BatchGetAggregateResourceConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetAggregateResourceConfigOutput) GoString() string { + return s.String() +} + +// SetBaseConfigurationItems sets the BaseConfigurationItems field's value. +func (s *BatchGetAggregateResourceConfigOutput) SetBaseConfigurationItems(v []*BaseConfigurationItem) *BatchGetAggregateResourceConfigOutput { + s.BaseConfigurationItems = v + return s +} + +// SetUnprocessedResourceIdentifiers sets the UnprocessedResourceIdentifiers field's value. +func (s *BatchGetAggregateResourceConfigOutput) SetUnprocessedResourceIdentifiers(v []*AggregateResourceIdentifier) *BatchGetAggregateResourceConfigOutput { + s.UnprocessedResourceIdentifiers = v + return s +} + +type BatchGetResourceConfigInput struct { + _ struct{} `type:"structure"` + + // A list of resource keys to be processed with the current request. Each element + // in the list consists of the resource type and resource ID. + // + // ResourceKeys is a required field + ResourceKeys []*ResourceKey `locationName:"resourceKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetResourceConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetResourceConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetResourceConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetResourceConfigInput"} + if s.ResourceKeys == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceKeys")) + } + if s.ResourceKeys != nil && len(s.ResourceKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceKeys", 1)) + } + if s.ResourceKeys != nil { + for i, v := range s.ResourceKeys { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceKeys", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceKeys sets the ResourceKeys field's value. +func (s *BatchGetResourceConfigInput) SetResourceKeys(v []*ResourceKey) *BatchGetResourceConfigInput { + s.ResourceKeys = v + return s +} + +type BatchGetResourceConfigOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the current configuration of one or more resources. + BaseConfigurationItems []*BaseConfigurationItem `locationName:"baseConfigurationItems" type:"list"` + + // A list of resource keys that were not processed with the current response. + // The unprocessesResourceKeys value is in the same form as ResourceKeys, so + // the value can be directly provided to a subsequent BatchGetResourceConfig + // operation. If there are no unprocessed resource keys, the response contains + // an empty unprocessedResourceKeys list. + UnprocessedResourceKeys []*ResourceKey `locationName:"unprocessedResourceKeys" min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchGetResourceConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetResourceConfigOutput) GoString() string { + return s.String() +} + +// SetBaseConfigurationItems sets the BaseConfigurationItems field's value. +func (s *BatchGetResourceConfigOutput) SetBaseConfigurationItems(v []*BaseConfigurationItem) *BatchGetResourceConfigOutput { + s.BaseConfigurationItems = v + return s +} + +// SetUnprocessedResourceKeys sets the UnprocessedResourceKeys field's value. +func (s *BatchGetResourceConfigOutput) SetUnprocessedResourceKeys(v []*ResourceKey) *BatchGetResourceConfigOutput { + s.UnprocessedResourceKeys = v + return s +} + +// Indicates whether an AWS resource or AWS Config rule is compliant and provides +// the number of contributors that affect the compliance. +type Compliance struct { + _ struct{} `type:"structure"` + + // The number of AWS resources or AWS Config rules that cause a result of NON_COMPLIANT, + // up to a maximum number. + ComplianceContributorCount *ComplianceContributorCount `type:"structure"` + + // Indicates whether an AWS resource or AWS Config rule is compliant. + // + // A resource is compliant if it complies with all of the AWS Config rules that + // evaluate it. A resource is noncompliant if it does not comply with one or + // more of these rules. + // + // A rule is compliant if all of the resources that the rule evaluates comply + // with it. A rule is noncompliant if any of these resources do not comply. + // + // AWS Config returns the INSUFFICIENT_DATA value when no evaluation results + // are available for the AWS resource or AWS Config rule. + // + // For the Compliance data type, AWS Config supports only COMPLIANT, NON_COMPLIANT, + // and INSUFFICIENT_DATA values. AWS Config does not support the NOT_APPLICABLE + // value for the Compliance data type. + ComplianceType *string `type:"string" enum:"ComplianceType"` +} + +// String returns the string representation +func (s Compliance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Compliance) GoString() string { + return s.String() +} + +// SetComplianceContributorCount sets the ComplianceContributorCount field's value. +func (s *Compliance) SetComplianceContributorCount(v *ComplianceContributorCount) *Compliance { + s.ComplianceContributorCount = v + return s +} + +// SetComplianceType sets the ComplianceType field's value. +func (s *Compliance) SetComplianceType(v string) *Compliance { + s.ComplianceType = &v + return s +} + +// Indicates whether an AWS Config rule is compliant. A rule is compliant if +// all of the resources that the rule evaluated comply with it. A rule is noncompliant +// if any of these resources do not comply. +type ComplianceByConfigRule struct { + _ struct{} `type:"structure"` + + // Indicates whether the AWS Config rule is compliant. + Compliance *Compliance `type:"structure"` + + // The name of the AWS Config rule. + ConfigRuleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ComplianceByConfigRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceByConfigRule) GoString() string { + return s.String() +} + +// SetCompliance sets the Compliance field's value. +func (s *ComplianceByConfigRule) SetCompliance(v *Compliance) *ComplianceByConfigRule { + s.Compliance = v + return s +} + +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *ComplianceByConfigRule) SetConfigRuleName(v string) *ComplianceByConfigRule { + s.ConfigRuleName = &v + return s +} + +// Indicates whether an AWS resource that is evaluated according to one or more +// AWS Config rules is compliant. A resource is compliant if it complies with +// all of the rules that evaluate it. A resource is noncompliant if it does +// not comply with one or more of these rules. +type ComplianceByResource struct { + _ struct{} `type:"structure"` + + // Indicates whether the AWS resource complies with all of the AWS Config rules + // that evaluated it. + Compliance *Compliance `type:"structure"` + + // The ID of the AWS resource that was evaluated. + ResourceId *string `min:"1" type:"string"` + + // The type of the AWS resource that was evaluated. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ComplianceByResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceByResource) GoString() string { + return s.String() +} + +// SetCompliance sets the Compliance field's value. +func (s *ComplianceByResource) SetCompliance(v *Compliance) *ComplianceByResource { + s.Compliance = v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *ComplianceByResource) SetResourceId(v string) *ComplianceByResource { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ComplianceByResource) SetResourceType(v string) *ComplianceByResource { + s.ResourceType = &v + return s +} + +// The number of AWS resources or AWS Config rules responsible for the current +// compliance of the item, up to a maximum number. +type ComplianceContributorCount struct { + _ struct{} `type:"structure"` + + // Indicates whether the maximum count is reached. + CapExceeded *bool `type:"boolean"` + + // The number of AWS resources or AWS Config rules responsible for the current + // compliance of the item. + CappedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ComplianceContributorCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceContributorCount) GoString() string { + return s.String() +} + +// SetCapExceeded sets the CapExceeded field's value. +func (s *ComplianceContributorCount) SetCapExceeded(v bool) *ComplianceContributorCount { + s.CapExceeded = &v + return s +} + +// SetCappedCount sets the CappedCount field's value. +func (s *ComplianceContributorCount) SetCappedCount(v int64) *ComplianceContributorCount { + s.CappedCount = &v + return s +} + +// The number of AWS Config rules or AWS resources that are compliant and noncompliant. +type ComplianceSummary struct { + _ struct{} `type:"structure"` + + // The time that AWS Config created the compliance summary. + ComplianceSummaryTimestamp *time.Time `type:"timestamp"` + + // The number of AWS Config rules or AWS resources that are compliant, up to + // a maximum of 25 for rules and 100 for resources. + CompliantResourceCount *ComplianceContributorCount `type:"structure"` + + // The number of AWS Config rules or AWS resources that are noncompliant, up + // to a maximum of 25 for rules and 100 for resources. + NonCompliantResourceCount *ComplianceContributorCount `type:"structure"` +} + +// String returns the string representation +func (s ComplianceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceSummary) GoString() string { + return s.String() +} + +// SetComplianceSummaryTimestamp sets the ComplianceSummaryTimestamp field's value. +func (s *ComplianceSummary) SetComplianceSummaryTimestamp(v time.Time) *ComplianceSummary { + s.ComplianceSummaryTimestamp = &v + return s +} + +// SetCompliantResourceCount sets the CompliantResourceCount field's value. +func (s *ComplianceSummary) SetCompliantResourceCount(v *ComplianceContributorCount) *ComplianceSummary { + s.CompliantResourceCount = v + return s +} + +// SetNonCompliantResourceCount sets the NonCompliantResourceCount field's value. +func (s *ComplianceSummary) SetNonCompliantResourceCount(v *ComplianceContributorCount) *ComplianceSummary { + s.NonCompliantResourceCount = v + return s +} + +// The number of AWS resources of a specific type that are compliant or noncompliant, +// up to a maximum of 100 for each. +type ComplianceSummaryByResourceType struct { + _ struct{} `type:"structure"` + + // The number of AWS resources that are compliant or noncompliant, up to a maximum + // of 100 for each. + ComplianceSummary *ComplianceSummary `type:"structure"` + + // The type of AWS resource. + ResourceType *string `min:"1" type:"string"` +} - // The source regions being aggregated. - AwsRegions []*string `min:"1" type:"list"` +// String returns the string representation +func (s ComplianceSummaryByResourceType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceSummaryByResourceType) GoString() string { + return s.String() +} + +// SetComplianceSummary sets the ComplianceSummary field's value. +func (s *ComplianceSummaryByResourceType) SetComplianceSummary(v *ComplianceSummary) *ComplianceSummaryByResourceType { + s.ComplianceSummary = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ComplianceSummaryByResourceType) SetResourceType(v string) *ComplianceSummaryByResourceType { + s.ResourceType = &v + return s +} + +// Provides status of the delivery of the snapshot or the configuration history +// to the specified Amazon S3 bucket. Also provides the status of notifications +// about the Amazon S3 delivery to the specified Amazon SNS topic. +type ConfigExportDeliveryInfo struct { + _ struct{} `type:"structure"` + + // The time of the last attempted delivery. + LastAttemptTime *time.Time `locationName:"lastAttemptTime" type:"timestamp"` + + // The error code from the last attempted delivery. + LastErrorCode *string `locationName:"lastErrorCode" type:"string"` + + // The error message from the last attempted delivery. + LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` + + // Status of the last attempted delivery. + LastStatus *string `locationName:"lastStatus" type:"string" enum:"DeliveryStatus"` + + // The time of the last successful delivery. + LastSuccessfulTime *time.Time `locationName:"lastSuccessfulTime" type:"timestamp"` + + // The time that the next delivery occurs. + NextDeliveryTime *time.Time `locationName:"nextDeliveryTime" type:"timestamp"` } // String returns the string representation -func (s AccountAggregationSource) String() string { +func (s ConfigExportDeliveryInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccountAggregationSource) GoString() string { +func (s ConfigExportDeliveryInfo) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AccountAggregationSource) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AccountAggregationSource"} - if s.AccountIds == nil { - invalidParams.Add(request.NewErrParamRequired("AccountIds")) - } - if s.AccountIds != nil && len(s.AccountIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) - } - if s.AwsRegions != nil && len(s.AwsRegions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AwsRegions", 1)) - } +// SetLastAttemptTime sets the LastAttemptTime field's value. +func (s *ConfigExportDeliveryInfo) SetLastAttemptTime(v time.Time) *ConfigExportDeliveryInfo { + s.LastAttemptTime = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLastErrorCode sets the LastErrorCode field's value. +func (s *ConfigExportDeliveryInfo) SetLastErrorCode(v string) *ConfigExportDeliveryInfo { + s.LastErrorCode = &v + return s } -// SetAccountIds sets the AccountIds field's value. -func (s *AccountAggregationSource) SetAccountIds(v []*string) *AccountAggregationSource { - s.AccountIds = v +// SetLastErrorMessage sets the LastErrorMessage field's value. +func (s *ConfigExportDeliveryInfo) SetLastErrorMessage(v string) *ConfigExportDeliveryInfo { + s.LastErrorMessage = &v return s } -// SetAllAwsRegions sets the AllAwsRegions field's value. -func (s *AccountAggregationSource) SetAllAwsRegions(v bool) *AccountAggregationSource { - s.AllAwsRegions = &v +// SetLastStatus sets the LastStatus field's value. +func (s *ConfigExportDeliveryInfo) SetLastStatus(v string) *ConfigExportDeliveryInfo { + s.LastStatus = &v return s } -// SetAwsRegions sets the AwsRegions field's value. -func (s *AccountAggregationSource) SetAwsRegions(v []*string) *AccountAggregationSource { - s.AwsRegions = v +// SetLastSuccessfulTime sets the LastSuccessfulTime field's value. +func (s *ConfigExportDeliveryInfo) SetLastSuccessfulTime(v time.Time) *ConfigExportDeliveryInfo { + s.LastSuccessfulTime = &v return s } -// Indicates whether an AWS Config rule is compliant based on account ID, region, -// compliance, and rule name. +// SetNextDeliveryTime sets the NextDeliveryTime field's value. +func (s *ConfigExportDeliveryInfo) SetNextDeliveryTime(v time.Time) *ConfigExportDeliveryInfo { + s.NextDeliveryTime = &v + return s +} + +// An AWS Config rule represents an AWS Lambda function that you create for +// a custom rule or a predefined function for an AWS managed rule. The function +// evaluates configuration items to assess whether your AWS resources comply +// with your desired configurations. This function can run when AWS Config detects +// a configuration change to an AWS resource and at a periodic frequency that +// you choose (for example, every 24 hours). // -// A rule is compliant if all of the resources that the rule evaluated comply -// with it. It is noncompliant if any of these resources do not comply. -type AggregateComplianceByConfigRule struct { +// You can use the AWS CLI and AWS SDKs if you want to create a rule that triggers +// evaluations for your resources when AWS Config delivers the configuration +// snapshot. For more information, see ConfigSnapshotDeliveryProperties. +// +// For more information about developing and using AWS Config rules, see Evaluating +// AWS Resource Configurations with AWS Config (https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) +// in the AWS Config Developer Guide. +type ConfigRule struct { _ struct{} `type:"structure"` - // The 12-digit account ID of the source account. - AccountId *string `type:"string"` - - // The source region from where the data is aggregated. - AwsRegion *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) of the AWS Config rule. + ConfigRuleArn *string `type:"string"` - // Indicates whether an AWS resource or AWS Config rule is compliant and provides - // the number of contributors that affect the compliance. - Compliance *Compliance `type:"structure"` + // The ID of the AWS Config rule. + ConfigRuleId *string `type:"string"` - // The name of the AWS Config rule. + // The name that you assign to the AWS Config rule. The name is required if + // you are adding a new rule. ConfigRuleName *string `min:"1" type:"string"` + + // Indicates whether the AWS Config rule is active or is currently being deleted + // by AWS Config. It can also indicate the evaluation status for the AWS Config + // rule. + // + // AWS Config sets the state of the rule to EVALUATING temporarily after you + // use the StartConfigRulesEvaluation request to evaluate your resources against + // the AWS Config rule. + // + // AWS Config sets the state of the rule to DELETING_RESULTS temporarily after + // you use the DeleteEvaluationResults request to delete the current evaluation + // results for the AWS Config rule. + // + // AWS Config temporarily sets the state of a rule to DELETING after you use + // the DeleteConfigRule request to delete the rule. After AWS Config deletes + // the rule, the rule and all of its evaluations are erased and are no longer + // available. + ConfigRuleState *string `type:"string" enum:"ConfigRuleState"` + + // Service principal name of the service that created the rule. + // + // The field is populated only if the service linked rule is created by a service. + // The field is empty if you create your own rule. + CreatedBy *string `min:"1" type:"string"` + + // The description that you provide for the AWS Config rule. + Description *string `type:"string"` + + // A string, in JSON format, that is passed to the AWS Config rule Lambda function. + InputParameters *string `min:"1" type:"string"` + + // The maximum frequency with which AWS Config runs evaluations for a rule. + // You can specify a value for MaximumExecutionFrequency when: + // + // * You are using an AWS managed rule that is triggered at a periodic frequency. + // + // * Your custom rule is triggered when AWS Config delivers the configuration + // snapshot. For more information, see ConfigSnapshotDeliveryProperties. + // + // By default, rules with a periodic trigger are evaluated every 24 hours. To + // change the frequency, specify a valid value for the MaximumExecutionFrequency + // parameter. + MaximumExecutionFrequency *string `type:"string" enum:"MaximumExecutionFrequency"` + + // Defines which resources can trigger an evaluation for the rule. The scope + // can include one or more resource types, a combination of one resource type + // and one resource ID, or a combination of a tag key and value. Specify a scope + // to constrain the resources that can trigger an evaluation for the rule. If + // you do not specify a scope, evaluations are triggered when any resource in + // the recording group changes. + Scope *Scope `type:"structure"` + + // Provides the rule owner (AWS or customer), the rule identifier, and the notifications + // that cause the function to evaluate your AWS resources. + // + // Source is a required field + Source *Source `type:"structure" required:"true"` } // String returns the string representation -func (s AggregateComplianceByConfigRule) String() string { +func (s ConfigRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AggregateComplianceByConfigRule) GoString() string { +func (s ConfigRule) GoString() string { return s.String() } -// SetAccountId sets the AccountId field's value. -func (s *AggregateComplianceByConfigRule) SetAccountId(v string) *AggregateComplianceByConfigRule { - s.AccountId = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigRule"} + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + if s.CreatedBy != nil && len(*s.CreatedBy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CreatedBy", 1)) + } + if s.InputParameters != nil && len(*s.InputParameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputParameters", 1)) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Scope != nil { + if err := s.Scope.Validate(); err != nil { + invalidParams.AddNested("Scope", err.(request.ErrInvalidParams)) + } + } + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetAwsRegion sets the AwsRegion field's value. -func (s *AggregateComplianceByConfigRule) SetAwsRegion(v string) *AggregateComplianceByConfigRule { - s.AwsRegion = &v +// SetConfigRuleArn sets the ConfigRuleArn field's value. +func (s *ConfigRule) SetConfigRuleArn(v string) *ConfigRule { + s.ConfigRuleArn = &v return s } -// SetCompliance sets the Compliance field's value. -func (s *AggregateComplianceByConfigRule) SetCompliance(v *Compliance) *AggregateComplianceByConfigRule { - s.Compliance = v +// SetConfigRuleId sets the ConfigRuleId field's value. +func (s *ConfigRule) SetConfigRuleId(v string) *ConfigRule { + s.ConfigRuleId = &v return s } // SetConfigRuleName sets the ConfigRuleName field's value. -func (s *AggregateComplianceByConfigRule) SetConfigRuleName(v string) *AggregateComplianceByConfigRule { +func (s *ConfigRule) SetConfigRuleName(v string) *ConfigRule { s.ConfigRuleName = &v return s } -// Returns the number of compliant and noncompliant rules for one or more accounts -// and regions in an aggregator. -type AggregateComplianceCount struct { - _ struct{} `type:"structure"` - - // The number of compliant and noncompliant AWS Config rules. - ComplianceSummary *ComplianceSummary `type:"structure"` - - // The 12-digit account ID or region based on the GroupByKey value. - GroupName *string `min:"1" type:"string"` +// SetConfigRuleState sets the ConfigRuleState field's value. +func (s *ConfigRule) SetConfigRuleState(v string) *ConfigRule { + s.ConfigRuleState = &v + return s +} + +// SetCreatedBy sets the CreatedBy field's value. +func (s *ConfigRule) SetCreatedBy(v string) *ConfigRule { + s.CreatedBy = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ConfigRule) SetDescription(v string) *ConfigRule { + s.Description = &v + return s } -// String returns the string representation -func (s AggregateComplianceCount) String() string { - return awsutil.Prettify(s) +// SetInputParameters sets the InputParameters field's value. +func (s *ConfigRule) SetInputParameters(v string) *ConfigRule { + s.InputParameters = &v + return s } -// GoString returns the string representation -func (s AggregateComplianceCount) GoString() string { - return s.String() +// SetMaximumExecutionFrequency sets the MaximumExecutionFrequency field's value. +func (s *ConfigRule) SetMaximumExecutionFrequency(v string) *ConfigRule { + s.MaximumExecutionFrequency = &v + return s } -// SetComplianceSummary sets the ComplianceSummary field's value. -func (s *AggregateComplianceCount) SetComplianceSummary(v *ComplianceSummary) *AggregateComplianceCount { - s.ComplianceSummary = v +// SetScope sets the Scope field's value. +func (s *ConfigRule) SetScope(v *Scope) *ConfigRule { + s.Scope = v return s } -// SetGroupName sets the GroupName field's value. -func (s *AggregateComplianceCount) SetGroupName(v string) *AggregateComplianceCount { - s.GroupName = &v +// SetSource sets the Source field's value. +func (s *ConfigRule) SetSource(v *Source) *ConfigRule { + s.Source = v return s } -// The details of an AWS Config evaluation for an account ID and region in an -// aggregator. Provides the AWS resource that was evaluated, the compliance -// of the resource, related time stamps, and supplementary information. -type AggregateEvaluationResult struct { +// Filters the compliance results based on account ID, region, compliance type, +// and rule name. +type ConfigRuleComplianceFilters struct { _ struct{} `type:"structure"` // The 12-digit account ID of the source account. AccountId *string `type:"string"` - // Supplementary information about how the agrregate evaluation determined the - // compliance. - Annotation *string `min:"1" type:"string"` - - // The source region from where the data is aggregated. + // The source region where the data is aggregated. AwsRegion *string `min:"1" type:"string"` - // The resource compliance status. + // The rule compliance status. // - // For the AggregationEvaluationResult data type, AWS Config supports only the - // COMPLIANT and NON_COMPLIANT. AWS Config does not support the NOT_APPLICABLE - // and INSUFFICIENT_DATA value. + // For the ConfigRuleComplianceFilters data type, AWS Config supports only COMPLIANT + // and NON_COMPLIANT. AWS Config does not support the NOT_APPLICABLE and the + // INSUFFICIENT_DATA values. ComplianceType *string `type:"string" enum:"ComplianceType"` - // The time when the AWS Config rule evaluated the AWS resource. - ConfigRuleInvokedTime *time.Time `type:"timestamp"` - - // Uniquely identifies the evaluation result. - EvaluationResultIdentifier *EvaluationResultIdentifier `type:"structure"` - - // The time when AWS Config recorded the aggregate evaluation result. - ResultRecordedTime *time.Time `type:"timestamp"` + // The name of the AWS Config rule. + ConfigRuleName *string `min:"1" type:"string"` } // String returns the string representation -func (s AggregateEvaluationResult) String() string { +func (s ConfigRuleComplianceFilters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AggregateEvaluationResult) GoString() string { +func (s ConfigRuleComplianceFilters) GoString() string { return s.String() } -// SetAccountId sets the AccountId field's value. -func (s *AggregateEvaluationResult) SetAccountId(v string) *AggregateEvaluationResult { - s.AccountId = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigRuleComplianceFilters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigRuleComplianceFilters"} + if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) + } + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetAnnotation sets the Annotation field's value. -func (s *AggregateEvaluationResult) SetAnnotation(v string) *AggregateEvaluationResult { - s.Annotation = &v +// SetAccountId sets the AccountId field's value. +func (s *ConfigRuleComplianceFilters) SetAccountId(v string) *ConfigRuleComplianceFilters { + s.AccountId = &v return s } // SetAwsRegion sets the AwsRegion field's value. -func (s *AggregateEvaluationResult) SetAwsRegion(v string) *AggregateEvaluationResult { +func (s *ConfigRuleComplianceFilters) SetAwsRegion(v string) *ConfigRuleComplianceFilters { s.AwsRegion = &v return s } // SetComplianceType sets the ComplianceType field's value. -func (s *AggregateEvaluationResult) SetComplianceType(v string) *AggregateEvaluationResult { +func (s *ConfigRuleComplianceFilters) SetComplianceType(v string) *ConfigRuleComplianceFilters { s.ComplianceType = &v return s } -// SetConfigRuleInvokedTime sets the ConfigRuleInvokedTime field's value. -func (s *AggregateEvaluationResult) SetConfigRuleInvokedTime(v time.Time) *AggregateEvaluationResult { - s.ConfigRuleInvokedTime = &v - return s -} - -// SetEvaluationResultIdentifier sets the EvaluationResultIdentifier field's value. -func (s *AggregateEvaluationResult) SetEvaluationResultIdentifier(v *EvaluationResultIdentifier) *AggregateEvaluationResult { - s.EvaluationResultIdentifier = v - return s -} - -// SetResultRecordedTime sets the ResultRecordedTime field's value. -func (s *AggregateEvaluationResult) SetResultRecordedTime(v time.Time) *AggregateEvaluationResult { - s.ResultRecordedTime = &v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *ConfigRuleComplianceFilters) SetConfigRuleName(v string) *ConfigRuleComplianceFilters { + s.ConfigRuleName = &v return s } -// The details that identify a resource that is collected by AWS Config aggregator, -// including the resource type, ID, (if available) the custom resource name, -// the source account, and source region. -type AggregateResourceIdentifier struct { +// Filters the results based on the account IDs and regions. +type ConfigRuleComplianceSummaryFilters struct { _ struct{} `type:"structure"` - // The ID of the AWS resource. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The name of the AWS resource. - ResourceName *string `type:"string"` - - // The type of the AWS resource. - // - // ResourceType is a required field - ResourceType *string `type:"string" required:"true" enum:"ResourceType"` - // The 12-digit account ID of the source account. - // - // SourceAccountId is a required field - SourceAccountId *string `type:"string" required:"true"` + AccountId *string `type:"string"` - // The source region where data is aggregated. - // - // SourceRegion is a required field - SourceRegion *string `min:"1" type:"string" required:"true"` + // The source region where the data is aggregated. + AwsRegion *string `min:"1" type:"string"` } // String returns the string representation -func (s AggregateResourceIdentifier) String() string { +func (s ConfigRuleComplianceSummaryFilters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AggregateResourceIdentifier) GoString() string { +func (s ConfigRuleComplianceSummaryFilters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AggregateResourceIdentifier) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AggregateResourceIdentifier"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ResourceType == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceType")) - } - if s.SourceAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("SourceAccountId")) - } - if s.SourceRegion == nil { - invalidParams.Add(request.NewErrParamRequired("SourceRegion")) - } - if s.SourceRegion != nil && len(*s.SourceRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SourceRegion", 1)) +func (s *ConfigRuleComplianceSummaryFilters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigRuleComplianceSummaryFilters"} + if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) } if invalidParams.Len() > 0 { @@ -5587,940 +7675,764 @@ func (s *AggregateResourceIdentifier) Validate() error { return nil } -// SetResourceId sets the ResourceId field's value. -func (s *AggregateResourceIdentifier) SetResourceId(v string) *AggregateResourceIdentifier { - s.ResourceId = &v +// SetAccountId sets the AccountId field's value. +func (s *ConfigRuleComplianceSummaryFilters) SetAccountId(v string) *ConfigRuleComplianceSummaryFilters { + s.AccountId = &v return s } -// SetResourceName sets the ResourceName field's value. -func (s *AggregateResourceIdentifier) SetResourceName(v string) *AggregateResourceIdentifier { - s.ResourceName = &v +// SetAwsRegion sets the AwsRegion field's value. +func (s *ConfigRuleComplianceSummaryFilters) SetAwsRegion(v string) *ConfigRuleComplianceSummaryFilters { + s.AwsRegion = &v return s } -// SetResourceType sets the ResourceType field's value. -func (s *AggregateResourceIdentifier) SetResourceType(v string) *AggregateResourceIdentifier { - s.ResourceType = &v - return s -} +// Status information for your AWS managed Config rules. The status includes +// information such as the last time the rule ran, the last time it failed, +// and the related error for the last failure. +// +// This action does not return status information about custom AWS Config rules. +type ConfigRuleEvaluationStatus struct { + _ struct{} `type:"structure"` -// SetSourceAccountId sets the SourceAccountId field's value. -func (s *AggregateResourceIdentifier) SetSourceAccountId(v string) *AggregateResourceIdentifier { - s.SourceAccountId = &v - return s -} + // The Amazon Resource Name (ARN) of the AWS Config rule. + ConfigRuleArn *string `type:"string"` -// SetSourceRegion sets the SourceRegion field's value. -func (s *AggregateResourceIdentifier) SetSourceRegion(v string) *AggregateResourceIdentifier { - s.SourceRegion = &v - return s -} + // The ID of the AWS Config rule. + ConfigRuleId *string `type:"string"` -// The current sync status between the source and the aggregator account. -type AggregatedSourceStatus struct { - _ struct{} `type:"structure"` + // The name of the AWS Config rule. + ConfigRuleName *string `min:"1" type:"string"` - // The region authorized to collect aggregated data. - AwsRegion *string `min:"1" type:"string"` + // The time that you first activated the AWS Config rule. + FirstActivatedTime *time.Time `type:"timestamp"` - // The error code that AWS Config returned when the source account aggregation - // last failed. + // Indicates whether AWS Config has evaluated your resources against the rule + // at least once. + // + // * true - AWS Config has evaluated your AWS resources against the rule + // at least once. + // + // * false - AWS Config has not once finished evaluating your AWS resources + // against the rule. + FirstEvaluationStarted *bool `type:"boolean"` + + // The error code that AWS Config returned when the rule last failed. LastErrorCode *string `type:"string"` - // The message indicating that the source account aggregation failed due to - // an error. + // The error message that AWS Config returned when the rule last failed. LastErrorMessage *string `type:"string"` - // Filters the last updated status type. - // - // * Valid value FAILED indicates errors while moving data. - // - // * Valid value SUCCEEDED indicates the data was successfully moved. - // - // * Valid value OUTDATED indicates the data is not the most recent. - LastUpdateStatus *string `type:"string" enum:"AggregatedSourceStatusType"` + // The time that AWS Config last failed to evaluate your AWS resources against + // the rule. + LastFailedEvaluationTime *time.Time `type:"timestamp"` - // The time of the last update. - LastUpdateTime *time.Time `type:"timestamp"` + // The time that AWS Config last failed to invoke the AWS Config rule to evaluate + // your AWS resources. + LastFailedInvocationTime *time.Time `type:"timestamp"` - // The source account ID or an organization. - SourceId *string `type:"string"` + // The time that AWS Config last successfully evaluated your AWS resources against + // the rule. + LastSuccessfulEvaluationTime *time.Time `type:"timestamp"` - // The source account or an organization. - SourceType *string `type:"string" enum:"AggregatedSourceType"` + // The time that AWS Config last successfully invoked the AWS Config rule to + // evaluate your AWS resources. + LastSuccessfulInvocationTime *time.Time `type:"timestamp"` } // String returns the string representation -func (s AggregatedSourceStatus) String() string { +func (s ConfigRuleEvaluationStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AggregatedSourceStatus) GoString() string { +func (s ConfigRuleEvaluationStatus) GoString() string { return s.String() } -// SetAwsRegion sets the AwsRegion field's value. -func (s *AggregatedSourceStatus) SetAwsRegion(v string) *AggregatedSourceStatus { - s.AwsRegion = &v +// SetConfigRuleArn sets the ConfigRuleArn field's value. +func (s *ConfigRuleEvaluationStatus) SetConfigRuleArn(v string) *ConfigRuleEvaluationStatus { + s.ConfigRuleArn = &v return s } -// SetLastErrorCode sets the LastErrorCode field's value. -func (s *AggregatedSourceStatus) SetLastErrorCode(v string) *AggregatedSourceStatus { - s.LastErrorCode = &v +// SetConfigRuleId sets the ConfigRuleId field's value. +func (s *ConfigRuleEvaluationStatus) SetConfigRuleId(v string) *ConfigRuleEvaluationStatus { + s.ConfigRuleId = &v return s } -// SetLastErrorMessage sets the LastErrorMessage field's value. -func (s *AggregatedSourceStatus) SetLastErrorMessage(v string) *AggregatedSourceStatus { - s.LastErrorMessage = &v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *ConfigRuleEvaluationStatus) SetConfigRuleName(v string) *ConfigRuleEvaluationStatus { + s.ConfigRuleName = &v return s } -// SetLastUpdateStatus sets the LastUpdateStatus field's value. -func (s *AggregatedSourceStatus) SetLastUpdateStatus(v string) *AggregatedSourceStatus { - s.LastUpdateStatus = &v +// SetFirstActivatedTime sets the FirstActivatedTime field's value. +func (s *ConfigRuleEvaluationStatus) SetFirstActivatedTime(v time.Time) *ConfigRuleEvaluationStatus { + s.FirstActivatedTime = &v return s } -// SetLastUpdateTime sets the LastUpdateTime field's value. -func (s *AggregatedSourceStatus) SetLastUpdateTime(v time.Time) *AggregatedSourceStatus { - s.LastUpdateTime = &v +// SetFirstEvaluationStarted sets the FirstEvaluationStarted field's value. +func (s *ConfigRuleEvaluationStatus) SetFirstEvaluationStarted(v bool) *ConfigRuleEvaluationStatus { + s.FirstEvaluationStarted = &v return s } -// SetSourceId sets the SourceId field's value. -func (s *AggregatedSourceStatus) SetSourceId(v string) *AggregatedSourceStatus { - s.SourceId = &v +// SetLastErrorCode sets the LastErrorCode field's value. +func (s *ConfigRuleEvaluationStatus) SetLastErrorCode(v string) *ConfigRuleEvaluationStatus { + s.LastErrorCode = &v return s } -// SetSourceType sets the SourceType field's value. -func (s *AggregatedSourceStatus) SetSourceType(v string) *AggregatedSourceStatus { - s.SourceType = &v +// SetLastErrorMessage sets the LastErrorMessage field's value. +func (s *ConfigRuleEvaluationStatus) SetLastErrorMessage(v string) *ConfigRuleEvaluationStatus { + s.LastErrorMessage = &v return s } -// An object that represents the authorizations granted to aggregator accounts -// and regions. -type AggregationAuthorization struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the aggregation object. - AggregationAuthorizationArn *string `type:"string"` - - // The 12-digit account ID of the account authorized to aggregate data. - AuthorizedAccountId *string `type:"string"` - - // The region authorized to collect aggregated data. - AuthorizedAwsRegion *string `min:"1" type:"string"` - - // The time stamp when the aggregation authorization was created. - CreationTime *time.Time `type:"timestamp"` -} - -// String returns the string representation -func (s AggregationAuthorization) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AggregationAuthorization) GoString() string { - return s.String() -} - -// SetAggregationAuthorizationArn sets the AggregationAuthorizationArn field's value. -func (s *AggregationAuthorization) SetAggregationAuthorizationArn(v string) *AggregationAuthorization { - s.AggregationAuthorizationArn = &v +// SetLastFailedEvaluationTime sets the LastFailedEvaluationTime field's value. +func (s *ConfigRuleEvaluationStatus) SetLastFailedEvaluationTime(v time.Time) *ConfigRuleEvaluationStatus { + s.LastFailedEvaluationTime = &v return s } -// SetAuthorizedAccountId sets the AuthorizedAccountId field's value. -func (s *AggregationAuthorization) SetAuthorizedAccountId(v string) *AggregationAuthorization { - s.AuthorizedAccountId = &v +// SetLastFailedInvocationTime sets the LastFailedInvocationTime field's value. +func (s *ConfigRuleEvaluationStatus) SetLastFailedInvocationTime(v time.Time) *ConfigRuleEvaluationStatus { + s.LastFailedInvocationTime = &v return s } -// SetAuthorizedAwsRegion sets the AuthorizedAwsRegion field's value. -func (s *AggregationAuthorization) SetAuthorizedAwsRegion(v string) *AggregationAuthorization { - s.AuthorizedAwsRegion = &v +// SetLastSuccessfulEvaluationTime sets the LastSuccessfulEvaluationTime field's value. +func (s *ConfigRuleEvaluationStatus) SetLastSuccessfulEvaluationTime(v time.Time) *ConfigRuleEvaluationStatus { + s.LastSuccessfulEvaluationTime = &v return s } -// SetCreationTime sets the CreationTime field's value. -func (s *AggregationAuthorization) SetCreationTime(v time.Time) *AggregationAuthorization { - s.CreationTime = &v +// SetLastSuccessfulInvocationTime sets the LastSuccessfulInvocationTime field's value. +func (s *ConfigRuleEvaluationStatus) SetLastSuccessfulInvocationTime(v time.Time) *ConfigRuleEvaluationStatus { + s.LastSuccessfulInvocationTime = &v return s } -// The detailed configuration of a specified resource. -type BaseConfigurationItem struct { +// Provides options for how often AWS Config delivers configuration snapshots +// to the Amazon S3 bucket in your delivery channel. +// +// The frequency for a rule that triggers evaluations for your resources when +// AWS Config delivers the configuration snapshot is set by one of two values, +// depending on which is less frequent: +// +// * The value for the deliveryFrequency parameter within the delivery channel +// configuration, which sets how often AWS Config delivers configuration +// snapshots. This value also sets how often AWS Config invokes evaluations +// for AWS Config rules. +// +// * The value for the MaximumExecutionFrequency parameter, which sets the +// maximum frequency with which AWS Config invokes evaluations for the rule. +// For more information, see ConfigRule. +// +// If the deliveryFrequency value is less frequent than the MaximumExecutionFrequency +// value for a rule, AWS Config invokes the rule only as often as the deliveryFrequency +// value. +// +// For example, you want your rule to run evaluations when AWS Config delivers +// the configuration snapshot. +// +// You specify the MaximumExecutionFrequency value for Six_Hours. +// +// You then specify the delivery channel deliveryFrequency value for TwentyFour_Hours. +// +// Because the value for deliveryFrequency is less frequent than MaximumExecutionFrequency, +// AWS Config invokes evaluations for the rule every 24 hours. +// +// You should set the MaximumExecutionFrequency value to be at least as frequent +// as the deliveryFrequency value. You can view the deliveryFrequency value +// by using the DescribeDeliveryChannnels action. +// +// To update the deliveryFrequency with which AWS Config delivers your configuration +// snapshots, use the PutDeliveryChannel action. +type ConfigSnapshotDeliveryProperties struct { _ struct{} `type:"structure"` - // The 12-digit AWS account ID associated with the resource. - AccountId *string `locationName:"accountId" type:"string"` - - // The Amazon Resource Name (ARN) of the resource. - Arn *string `locationName:"arn" type:"string"` - - // The Availability Zone associated with the resource. - AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - - // The region where the resource resides. - AwsRegion *string `locationName:"awsRegion" min:"1" type:"string"` - - // The description of the resource configuration. - Configuration *string `locationName:"configuration" type:"string"` - - // The time when the configuration recording was initiated. - ConfigurationItemCaptureTime *time.Time `locationName:"configurationItemCaptureTime" type:"timestamp"` - - // The configuration item status. - ConfigurationItemStatus *string `locationName:"configurationItemStatus" type:"string" enum:"ConfigurationItemStatus"` - - // An identifier that indicates the ordering of the configuration items of a - // resource. - ConfigurationStateId *string `locationName:"configurationStateId" type:"string"` - - // The time stamp when the resource was created. - ResourceCreationTime *time.Time `locationName:"resourceCreationTime" type:"timestamp"` - - // The ID of the resource (for example., sg-xxxxxx). - ResourceId *string `locationName:"resourceId" min:"1" type:"string"` - - // The custom name of the resource, if available. - ResourceName *string `locationName:"resourceName" type:"string"` - - // The type of AWS resource. - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` - - // Configuration attributes that AWS Config returns for certain resource types - // to supplement the information returned for the configuration parameter. - SupplementaryConfiguration map[string]*string `locationName:"supplementaryConfiguration" type:"map"` - - // The version number of the resource configuration. - Version *string `locationName:"version" type:"string"` + // The frequency with which AWS Config delivers configuration snapshots. + DeliveryFrequency *string `locationName:"deliveryFrequency" type:"string" enum:"MaximumExecutionFrequency"` } // String returns the string representation -func (s BaseConfigurationItem) String() string { +func (s ConfigSnapshotDeliveryProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BaseConfigurationItem) GoString() string { +func (s ConfigSnapshotDeliveryProperties) GoString() string { return s.String() } -// SetAccountId sets the AccountId field's value. -func (s *BaseConfigurationItem) SetAccountId(v string) *BaseConfigurationItem { - s.AccountId = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *BaseConfigurationItem) SetArn(v string) *BaseConfigurationItem { - s.Arn = &v - return s -} - -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *BaseConfigurationItem) SetAvailabilityZone(v string) *BaseConfigurationItem { - s.AvailabilityZone = &v +// SetDeliveryFrequency sets the DeliveryFrequency field's value. +func (s *ConfigSnapshotDeliveryProperties) SetDeliveryFrequency(v string) *ConfigSnapshotDeliveryProperties { + s.DeliveryFrequency = &v return s } -// SetAwsRegion sets the AwsRegion field's value. -func (s *BaseConfigurationItem) SetAwsRegion(v string) *BaseConfigurationItem { - s.AwsRegion = &v - return s -} +// A list that contains the status of the delivery of the configuration stream +// notification to the Amazon SNS topic. +type ConfigStreamDeliveryInfo struct { + _ struct{} `type:"structure"` -// SetConfiguration sets the Configuration field's value. -func (s *BaseConfigurationItem) SetConfiguration(v string) *BaseConfigurationItem { - s.Configuration = &v - return s -} + // The error code from the last attempted delivery. + LastErrorCode *string `locationName:"lastErrorCode" type:"string"` -// SetConfigurationItemCaptureTime sets the ConfigurationItemCaptureTime field's value. -func (s *BaseConfigurationItem) SetConfigurationItemCaptureTime(v time.Time) *BaseConfigurationItem { - s.ConfigurationItemCaptureTime = &v - return s -} + // The error message from the last attempted delivery. + LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` -// SetConfigurationItemStatus sets the ConfigurationItemStatus field's value. -func (s *BaseConfigurationItem) SetConfigurationItemStatus(v string) *BaseConfigurationItem { - s.ConfigurationItemStatus = &v - return s -} + // Status of the last attempted delivery. + // + // Note Providing an SNS topic on a DeliveryChannel (https://docs.aws.amazon.com/config/latest/APIReference/API_DeliveryChannel.html) + // for AWS Config is optional. If the SNS delivery is turned off, the last status + // will be Not_Applicable. + LastStatus *string `locationName:"lastStatus" type:"string" enum:"DeliveryStatus"` -// SetConfigurationStateId sets the ConfigurationStateId field's value. -func (s *BaseConfigurationItem) SetConfigurationStateId(v string) *BaseConfigurationItem { - s.ConfigurationStateId = &v - return s + // The time from the last status change. + LastStatusChangeTime *time.Time `locationName:"lastStatusChangeTime" type:"timestamp"` } -// SetResourceCreationTime sets the ResourceCreationTime field's value. -func (s *BaseConfigurationItem) SetResourceCreationTime(v time.Time) *BaseConfigurationItem { - s.ResourceCreationTime = &v - return s +// String returns the string representation +func (s ConfigStreamDeliveryInfo) String() string { + return awsutil.Prettify(s) } -// SetResourceId sets the ResourceId field's value. -func (s *BaseConfigurationItem) SetResourceId(v string) *BaseConfigurationItem { - s.ResourceId = &v - return s +// GoString returns the string representation +func (s ConfigStreamDeliveryInfo) GoString() string { + return s.String() } -// SetResourceName sets the ResourceName field's value. -func (s *BaseConfigurationItem) SetResourceName(v string) *BaseConfigurationItem { - s.ResourceName = &v +// SetLastErrorCode sets the LastErrorCode field's value. +func (s *ConfigStreamDeliveryInfo) SetLastErrorCode(v string) *ConfigStreamDeliveryInfo { + s.LastErrorCode = &v return s } -// SetResourceType sets the ResourceType field's value. -func (s *BaseConfigurationItem) SetResourceType(v string) *BaseConfigurationItem { - s.ResourceType = &v +// SetLastErrorMessage sets the LastErrorMessage field's value. +func (s *ConfigStreamDeliveryInfo) SetLastErrorMessage(v string) *ConfigStreamDeliveryInfo { + s.LastErrorMessage = &v return s } -// SetSupplementaryConfiguration sets the SupplementaryConfiguration field's value. -func (s *BaseConfigurationItem) SetSupplementaryConfiguration(v map[string]*string) *BaseConfigurationItem { - s.SupplementaryConfiguration = v +// SetLastStatus sets the LastStatus field's value. +func (s *ConfigStreamDeliveryInfo) SetLastStatus(v string) *ConfigStreamDeliveryInfo { + s.LastStatus = &v return s } -// SetVersion sets the Version field's value. -func (s *BaseConfigurationItem) SetVersion(v string) *BaseConfigurationItem { - s.Version = &v +// SetLastStatusChangeTime sets the LastStatusChangeTime field's value. +func (s *ConfigStreamDeliveryInfo) SetLastStatusChangeTime(v time.Time) *ConfigStreamDeliveryInfo { + s.LastStatusChangeTime = &v return s } -type BatchGetAggregateResourceConfigInput struct { +// The details about the configuration aggregator, including information about +// source accounts, regions, and metadata of the aggregator. +type ConfigurationAggregator struct { _ struct{} `type:"structure"` - // The name of the configuration aggregator. - // - // ConfigurationAggregatorName is a required field - ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` + // Provides a list of source accounts and regions to be aggregated. + AccountAggregationSources []*AccountAggregationSource `type:"list"` - // A list of aggregate ResourceIdentifiers objects. - // - // ResourceIdentifiers is a required field - ResourceIdentifiers []*AggregateResourceIdentifier `min:"1" type:"list" required:"true"` + // The Amazon Resource Name (ARN) of the aggregator. + ConfigurationAggregatorArn *string `type:"string"` + + // The name of the aggregator. + ConfigurationAggregatorName *string `min:"1" type:"string"` + + // The time stamp when the configuration aggregator was created. + CreationTime *time.Time `type:"timestamp"` + + // The time of the last update. + LastUpdatedTime *time.Time `type:"timestamp"` + + // Provides an organization and list of regions to be aggregated. + OrganizationAggregationSource *OrganizationAggregationSource `type:"structure"` } // String returns the string representation -func (s BatchGetAggregateResourceConfigInput) String() string { +func (s ConfigurationAggregator) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetAggregateResourceConfigInput) GoString() string { +func (s ConfigurationAggregator) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetAggregateResourceConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetAggregateResourceConfigInput"} - if s.ConfigurationAggregatorName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) - } - if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) - } - if s.ResourceIdentifiers == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceIdentifiers")) - } - if s.ResourceIdentifiers != nil && len(s.ResourceIdentifiers) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceIdentifiers", 1)) - } - if s.ResourceIdentifiers != nil { - for i, v := range s.ResourceIdentifiers { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceIdentifiers", i), err.(request.ErrInvalidParams)) - } - } - } +// SetAccountAggregationSources sets the AccountAggregationSources field's value. +func (s *ConfigurationAggregator) SetAccountAggregationSources(v []*AccountAggregationSource) *ConfigurationAggregator { + s.AccountAggregationSources = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetConfigurationAggregatorArn sets the ConfigurationAggregatorArn field's value. +func (s *ConfigurationAggregator) SetConfigurationAggregatorArn(v string) *ConfigurationAggregator { + s.ConfigurationAggregatorArn = &v + return s } // SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *BatchGetAggregateResourceConfigInput) SetConfigurationAggregatorName(v string) *BatchGetAggregateResourceConfigInput { +func (s *ConfigurationAggregator) SetConfigurationAggregatorName(v string) *ConfigurationAggregator { s.ConfigurationAggregatorName = &v return s } -// SetResourceIdentifiers sets the ResourceIdentifiers field's value. -func (s *BatchGetAggregateResourceConfigInput) SetResourceIdentifiers(v []*AggregateResourceIdentifier) *BatchGetAggregateResourceConfigInput { - s.ResourceIdentifiers = v +// SetCreationTime sets the CreationTime field's value. +func (s *ConfigurationAggregator) SetCreationTime(v time.Time) *ConfigurationAggregator { + s.CreationTime = &v return s } -type BatchGetAggregateResourceConfigOutput struct { +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *ConfigurationAggregator) SetLastUpdatedTime(v time.Time) *ConfigurationAggregator { + s.LastUpdatedTime = &v + return s +} + +// SetOrganizationAggregationSource sets the OrganizationAggregationSource field's value. +func (s *ConfigurationAggregator) SetOrganizationAggregationSource(v *OrganizationAggregationSource) *ConfigurationAggregator { + s.OrganizationAggregationSource = v + return s +} + +// A list that contains detailed configurations of a specified resource. +type ConfigurationItem struct { _ struct{} `type:"structure"` - // A list that contains the current configuration of one or more resources. - BaseConfigurationItems []*BaseConfigurationItem `type:"list"` + // The 12-digit AWS account ID associated with the resource. + AccountId *string `locationName:"accountId" type:"string"` - // A list of resource identifiers that were not processed with current scope. - // The list is empty if all the resources are processed. - UnprocessedResourceIdentifiers []*AggregateResourceIdentifier `type:"list"` -} + // accoun + Arn *string `locationName:"arn" type:"string"` -// String returns the string representation -func (s BatchGetAggregateResourceConfigOutput) String() string { - return awsutil.Prettify(s) -} + // The Availability Zone associated with the resource. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` -// GoString returns the string representation -func (s BatchGetAggregateResourceConfigOutput) GoString() string { - return s.String() -} + // The region where the resource resides. + AwsRegion *string `locationName:"awsRegion" min:"1" type:"string"` -// SetBaseConfigurationItems sets the BaseConfigurationItems field's value. -func (s *BatchGetAggregateResourceConfigOutput) SetBaseConfigurationItems(v []*BaseConfigurationItem) *BatchGetAggregateResourceConfigOutput { - s.BaseConfigurationItems = v - return s -} + // The description of the resource configuration. + Configuration *string `locationName:"configuration" type:"string"` -// SetUnprocessedResourceIdentifiers sets the UnprocessedResourceIdentifiers field's value. -func (s *BatchGetAggregateResourceConfigOutput) SetUnprocessedResourceIdentifiers(v []*AggregateResourceIdentifier) *BatchGetAggregateResourceConfigOutput { - s.UnprocessedResourceIdentifiers = v - return s -} + // The time when the configuration recording was initiated. + ConfigurationItemCaptureTime *time.Time `locationName:"configurationItemCaptureTime" type:"timestamp"` -type BatchGetResourceConfigInput struct { - _ struct{} `type:"structure"` + // Unique MD5 hash that represents the configuration item's state. + // + // You can use MD5 hash to compare the states of two or more configuration items + // that are associated with the same resource. + ConfigurationItemMD5Hash *string `locationName:"configurationItemMD5Hash" type:"string"` - // A list of resource keys to be processed with the current request. Each element - // in the list consists of the resource type and resource ID. + // The configuration item status. + ConfigurationItemStatus *string `locationName:"configurationItemStatus" type:"string" enum:"ConfigurationItemStatus"` + + // An identifier that indicates the ordering of the configuration items of a + // resource. + ConfigurationStateId *string `locationName:"configurationStateId" type:"string"` + + // A list of CloudTrail event IDs. // - // ResourceKeys is a required field - ResourceKeys []*ResourceKey `locationName:"resourceKeys" min:"1" type:"list" required:"true"` -} + // A populated field indicates that the current configuration was initiated + // by the events recorded in the CloudTrail log. For more information about + // CloudTrail, see What Is AWS CloudTrail (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). + // + // An empty field indicates that the current configuration was not initiated + // by any event. + RelatedEvents []*string `locationName:"relatedEvents" type:"list"` -// String returns the string representation -func (s BatchGetResourceConfigInput) String() string { - return awsutil.Prettify(s) -} + // A list of related AWS resources. + Relationships []*Relationship `locationName:"relationships" type:"list"` -// GoString returns the string representation -func (s BatchGetResourceConfigInput) GoString() string { - return s.String() -} + // The time stamp when the resource was created. + ResourceCreationTime *time.Time `locationName:"resourceCreationTime" type:"timestamp"` -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetResourceConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetResourceConfigInput"} - if s.ResourceKeys == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceKeys")) - } - if s.ResourceKeys != nil && len(s.ResourceKeys) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceKeys", 1)) - } - if s.ResourceKeys != nil { - for i, v := range s.ResourceKeys { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceKeys", i), err.(request.ErrInvalidParams)) - } - } - } + // The ID of the resource (for example, sg-xxxxxx). + ResourceId *string `locationName:"resourceId" min:"1" type:"string"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // The custom name of the resource, if available. + ResourceName *string `locationName:"resourceName" type:"string"` -// SetResourceKeys sets the ResourceKeys field's value. -func (s *BatchGetResourceConfigInput) SetResourceKeys(v []*ResourceKey) *BatchGetResourceConfigInput { - s.ResourceKeys = v - return s -} + // The type of AWS resource. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` -type BatchGetResourceConfigOutput struct { - _ struct{} `type:"structure"` + // Configuration attributes that AWS Config returns for certain resource types + // to supplement the information returned for the configuration parameter. + SupplementaryConfiguration map[string]*string `locationName:"supplementaryConfiguration" type:"map"` - // A list that contains the current configuration of one or more resources. - BaseConfigurationItems []*BaseConfigurationItem `locationName:"baseConfigurationItems" type:"list"` + // A mapping of key value tags associated with the resource. + Tags map[string]*string `locationName:"tags" type:"map"` - // A list of resource keys that were not processed with the current response. - // The unprocessesResourceKeys value is in the same form as ResourceKeys, so - // the value can be directly provided to a subsequent BatchGetResourceConfig - // operation. If there are no unprocessed resource keys, the response contains - // an empty unprocessedResourceKeys list. - UnprocessedResourceKeys []*ResourceKey `locationName:"unprocessedResourceKeys" min:"1" type:"list"` + // The version number of the resource configuration. + Version *string `locationName:"version" type:"string"` } // String returns the string representation -func (s BatchGetResourceConfigOutput) String() string { +func (s ConfigurationItem) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetResourceConfigOutput) GoString() string { +func (s ConfigurationItem) GoString() string { return s.String() } -// SetBaseConfigurationItems sets the BaseConfigurationItems field's value. -func (s *BatchGetResourceConfigOutput) SetBaseConfigurationItems(v []*BaseConfigurationItem) *BatchGetResourceConfigOutput { - s.BaseConfigurationItems = v +// SetAccountId sets the AccountId field's value. +func (s *ConfigurationItem) SetAccountId(v string) *ConfigurationItem { + s.AccountId = &v return s } -// SetUnprocessedResourceKeys sets the UnprocessedResourceKeys field's value. -func (s *BatchGetResourceConfigOutput) SetUnprocessedResourceKeys(v []*ResourceKey) *BatchGetResourceConfigOutput { - s.UnprocessedResourceKeys = v +// SetArn sets the Arn field's value. +func (s *ConfigurationItem) SetArn(v string) *ConfigurationItem { + s.Arn = &v return s } -// Indicates whether an AWS resource or AWS Config rule is compliant and provides -// the number of contributors that affect the compliance. -type Compliance struct { - _ struct{} `type:"structure"` - - // The number of AWS resources or AWS Config rules that cause a result of NON_COMPLIANT, - // up to a maximum number. - ComplianceContributorCount *ComplianceContributorCount `type:"structure"` - - // Indicates whether an AWS resource or AWS Config rule is compliant. - // - // A resource is compliant if it complies with all of the AWS Config rules that - // evaluate it. A resource is noncompliant if it does not comply with one or - // more of these rules. - // - // A rule is compliant if all of the resources that the rule evaluates comply - // with it. A rule is noncompliant if any of these resources do not comply. - // - // AWS Config returns the INSUFFICIENT_DATA value when no evaluation results - // are available for the AWS resource or AWS Config rule. - // - // For the Compliance data type, AWS Config supports only COMPLIANT, NON_COMPLIANT, - // and INSUFFICIENT_DATA values. AWS Config does not support the NOT_APPLICABLE - // value for the Compliance data type. - ComplianceType *string `type:"string" enum:"ComplianceType"` +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ConfigurationItem) SetAvailabilityZone(v string) *ConfigurationItem { + s.AvailabilityZone = &v + return s } -// String returns the string representation -func (s Compliance) String() string { - return awsutil.Prettify(s) +// SetAwsRegion sets the AwsRegion field's value. +func (s *ConfigurationItem) SetAwsRegion(v string) *ConfigurationItem { + s.AwsRegion = &v + return s } -// GoString returns the string representation -func (s Compliance) GoString() string { - return s.String() +// SetConfiguration sets the Configuration field's value. +func (s *ConfigurationItem) SetConfiguration(v string) *ConfigurationItem { + s.Configuration = &v + return s } -// SetComplianceContributorCount sets the ComplianceContributorCount field's value. -func (s *Compliance) SetComplianceContributorCount(v *ComplianceContributorCount) *Compliance { - s.ComplianceContributorCount = v +// SetConfigurationItemCaptureTime sets the ConfigurationItemCaptureTime field's value. +func (s *ConfigurationItem) SetConfigurationItemCaptureTime(v time.Time) *ConfigurationItem { + s.ConfigurationItemCaptureTime = &v return s } -// SetComplianceType sets the ComplianceType field's value. -func (s *Compliance) SetComplianceType(v string) *Compliance { - s.ComplianceType = &v +// SetConfigurationItemMD5Hash sets the ConfigurationItemMD5Hash field's value. +func (s *ConfigurationItem) SetConfigurationItemMD5Hash(v string) *ConfigurationItem { + s.ConfigurationItemMD5Hash = &v return s } -// Indicates whether an AWS Config rule is compliant. A rule is compliant if -// all of the resources that the rule evaluated comply with it. A rule is noncompliant -// if any of these resources do not comply. -type ComplianceByConfigRule struct { - _ struct{} `type:"structure"` - - // Indicates whether the AWS Config rule is compliant. - Compliance *Compliance `type:"structure"` - - // The name of the AWS Config rule. - ConfigRuleName *string `min:"1" type:"string"` +// SetConfigurationItemStatus sets the ConfigurationItemStatus field's value. +func (s *ConfigurationItem) SetConfigurationItemStatus(v string) *ConfigurationItem { + s.ConfigurationItemStatus = &v + return s } -// String returns the string representation -func (s ComplianceByConfigRule) String() string { - return awsutil.Prettify(s) +// SetConfigurationStateId sets the ConfigurationStateId field's value. +func (s *ConfigurationItem) SetConfigurationStateId(v string) *ConfigurationItem { + s.ConfigurationStateId = &v + return s } -// GoString returns the string representation -func (s ComplianceByConfigRule) GoString() string { - return s.String() +// SetRelatedEvents sets the RelatedEvents field's value. +func (s *ConfigurationItem) SetRelatedEvents(v []*string) *ConfigurationItem { + s.RelatedEvents = v + return s } -// SetCompliance sets the Compliance field's value. -func (s *ComplianceByConfigRule) SetCompliance(v *Compliance) *ComplianceByConfigRule { - s.Compliance = v +// SetRelationships sets the Relationships field's value. +func (s *ConfigurationItem) SetRelationships(v []*Relationship) *ConfigurationItem { + s.Relationships = v return s } -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *ComplianceByConfigRule) SetConfigRuleName(v string) *ComplianceByConfigRule { - s.ConfigRuleName = &v +// SetResourceCreationTime sets the ResourceCreationTime field's value. +func (s *ConfigurationItem) SetResourceCreationTime(v time.Time) *ConfigurationItem { + s.ResourceCreationTime = &v return s } -// Indicates whether an AWS resource that is evaluated according to one or more -// AWS Config rules is compliant. A resource is compliant if it complies with -// all of the rules that evaluate it. A resource is noncompliant if it does -// not comply with one or more of these rules. -type ComplianceByResource struct { - _ struct{} `type:"structure"` - - // Indicates whether the AWS resource complies with all of the AWS Config rules - // that evaluated it. - Compliance *Compliance `type:"structure"` - - // The ID of the AWS resource that was evaluated. - ResourceId *string `min:"1" type:"string"` - - // The type of the AWS resource that was evaluated. - ResourceType *string `min:"1" type:"string"` +// SetResourceId sets the ResourceId field's value. +func (s *ConfigurationItem) SetResourceId(v string) *ConfigurationItem { + s.ResourceId = &v + return s } -// String returns the string representation -func (s ComplianceByResource) String() string { - return awsutil.Prettify(s) +// SetResourceName sets the ResourceName field's value. +func (s *ConfigurationItem) SetResourceName(v string) *ConfigurationItem { + s.ResourceName = &v + return s } -// GoString returns the string representation -func (s ComplianceByResource) GoString() string { - return s.String() +// SetResourceType sets the ResourceType field's value. +func (s *ConfigurationItem) SetResourceType(v string) *ConfigurationItem { + s.ResourceType = &v + return s } -// SetCompliance sets the Compliance field's value. -func (s *ComplianceByResource) SetCompliance(v *Compliance) *ComplianceByResource { - s.Compliance = v +// SetSupplementaryConfiguration sets the SupplementaryConfiguration field's value. +func (s *ConfigurationItem) SetSupplementaryConfiguration(v map[string]*string) *ConfigurationItem { + s.SupplementaryConfiguration = v return s } -// SetResourceId sets the ResourceId field's value. -func (s *ComplianceByResource) SetResourceId(v string) *ComplianceByResource { - s.ResourceId = &v +// SetTags sets the Tags field's value. +func (s *ConfigurationItem) SetTags(v map[string]*string) *ConfigurationItem { + s.Tags = v return s } -// SetResourceType sets the ResourceType field's value. -func (s *ComplianceByResource) SetResourceType(v string) *ComplianceByResource { - s.ResourceType = &v +// SetVersion sets the Version field's value. +func (s *ConfigurationItem) SetVersion(v string) *ConfigurationItem { + s.Version = &v return s } -// The number of AWS resources or AWS Config rules responsible for the current -// compliance of the item, up to a maximum number. -type ComplianceContributorCount struct { +// An object that represents the recording of configuration changes of an AWS +// resource. +type ConfigurationRecorder struct { _ struct{} `type:"structure"` - // Indicates whether the maximum count is reached. - CapExceeded *bool `type:"boolean"` + // The name of the recorder. By default, AWS Config automatically assigns the + // name "default" when creating the configuration recorder. You cannot change + // the assigned name. + Name *string `locationName:"name" min:"1" type:"string"` - // The number of AWS resources or AWS Config rules responsible for the current - // compliance of the item. - CappedCount *int64 `type:"integer"` + // Specifies the types of AWS resources for which AWS Config records configuration + // changes. + RecordingGroup *RecordingGroup `locationName:"recordingGroup" type:"structure"` + + // Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources + // associated with the account. + RoleARN *string `locationName:"roleARN" type:"string"` } // String returns the string representation -func (s ComplianceContributorCount) String() string { +func (s ConfigurationRecorder) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ComplianceContributorCount) GoString() string { +func (s ConfigurationRecorder) GoString() string { return s.String() } -// SetCapExceeded sets the CapExceeded field's value. -func (s *ComplianceContributorCount) SetCapExceeded(v bool) *ComplianceContributorCount { - s.CapExceeded = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigurationRecorder) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigurationRecorder"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *ConfigurationRecorder) SetName(v string) *ConfigurationRecorder { + s.Name = &v return s } -// SetCappedCount sets the CappedCount field's value. -func (s *ComplianceContributorCount) SetCappedCount(v int64) *ComplianceContributorCount { - s.CappedCount = &v +// SetRecordingGroup sets the RecordingGroup field's value. +func (s *ConfigurationRecorder) SetRecordingGroup(v *RecordingGroup) *ConfigurationRecorder { + s.RecordingGroup = v return s } -// The number of AWS Config rules or AWS resources that are compliant and noncompliant. -type ComplianceSummary struct { +// SetRoleARN sets the RoleARN field's value. +func (s *ConfigurationRecorder) SetRoleARN(v string) *ConfigurationRecorder { + s.RoleARN = &v + return s +} + +// The current status of the configuration recorder. +type ConfigurationRecorderStatus struct { _ struct{} `type:"structure"` - // The time that AWS Config created the compliance summary. - ComplianceSummaryTimestamp *time.Time `type:"timestamp"` + // The error code indicating that the recording failed. + LastErrorCode *string `locationName:"lastErrorCode" type:"string"` - // The number of AWS Config rules or AWS resources that are compliant, up to - // a maximum of 25 for rules and 100 for resources. - CompliantResourceCount *ComplianceContributorCount `type:"structure"` + // The message indicating that the recording failed due to an error. + LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` - // The number of AWS Config rules or AWS resources that are noncompliant, up - // to a maximum of 25 for rules and 100 for resources. - NonCompliantResourceCount *ComplianceContributorCount `type:"structure"` + // The time the recorder was last started. + LastStartTime *time.Time `locationName:"lastStartTime" type:"timestamp"` + + // The last (previous) status of the recorder. + LastStatus *string `locationName:"lastStatus" type:"string" enum:"RecorderStatus"` + + // The time when the status was last changed. + LastStatusChangeTime *time.Time `locationName:"lastStatusChangeTime" type:"timestamp"` + + // The time the recorder was last stopped. + LastStopTime *time.Time `locationName:"lastStopTime" type:"timestamp"` + + // The name of the configuration recorder. + Name *string `locationName:"name" type:"string"` + + // Specifies whether or not the recorder is currently recording. + Recording *bool `locationName:"recording" type:"boolean"` } // String returns the string representation -func (s ComplianceSummary) String() string { +func (s ConfigurationRecorderStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ComplianceSummary) GoString() string { +func (s ConfigurationRecorderStatus) GoString() string { return s.String() } -// SetComplianceSummaryTimestamp sets the ComplianceSummaryTimestamp field's value. -func (s *ComplianceSummary) SetComplianceSummaryTimestamp(v time.Time) *ComplianceSummary { - s.ComplianceSummaryTimestamp = &v +// SetLastErrorCode sets the LastErrorCode field's value. +func (s *ConfigurationRecorderStatus) SetLastErrorCode(v string) *ConfigurationRecorderStatus { + s.LastErrorCode = &v return s } -// SetCompliantResourceCount sets the CompliantResourceCount field's value. -func (s *ComplianceSummary) SetCompliantResourceCount(v *ComplianceContributorCount) *ComplianceSummary { - s.CompliantResourceCount = v +// SetLastErrorMessage sets the LastErrorMessage field's value. +func (s *ConfigurationRecorderStatus) SetLastErrorMessage(v string) *ConfigurationRecorderStatus { + s.LastErrorMessage = &v return s } -// SetNonCompliantResourceCount sets the NonCompliantResourceCount field's value. -func (s *ComplianceSummary) SetNonCompliantResourceCount(v *ComplianceContributorCount) *ComplianceSummary { - s.NonCompliantResourceCount = v +// SetLastStartTime sets the LastStartTime field's value. +func (s *ConfigurationRecorderStatus) SetLastStartTime(v time.Time) *ConfigurationRecorderStatus { + s.LastStartTime = &v return s } -// The number of AWS resources of a specific type that are compliant or noncompliant, -// up to a maximum of 100 for each. -type ComplianceSummaryByResourceType struct { - _ struct{} `type:"structure"` - - // The number of AWS resources that are compliant or noncompliant, up to a maximum - // of 100 for each. - ComplianceSummary *ComplianceSummary `type:"structure"` - - // The type of AWS resource. - ResourceType *string `min:"1" type:"string"` +// SetLastStatus sets the LastStatus field's value. +func (s *ConfigurationRecorderStatus) SetLastStatus(v string) *ConfigurationRecorderStatus { + s.LastStatus = &v + return s } -// String returns the string representation -func (s ComplianceSummaryByResourceType) String() string { - return awsutil.Prettify(s) +// SetLastStatusChangeTime sets the LastStatusChangeTime field's value. +func (s *ConfigurationRecorderStatus) SetLastStatusChangeTime(v time.Time) *ConfigurationRecorderStatus { + s.LastStatusChangeTime = &v + return s } -// GoString returns the string representation -func (s ComplianceSummaryByResourceType) GoString() string { - return s.String() +// SetLastStopTime sets the LastStopTime field's value. +func (s *ConfigurationRecorderStatus) SetLastStopTime(v time.Time) *ConfigurationRecorderStatus { + s.LastStopTime = &v + return s } -// SetComplianceSummary sets the ComplianceSummary field's value. -func (s *ComplianceSummaryByResourceType) SetComplianceSummary(v *ComplianceSummary) *ComplianceSummaryByResourceType { - s.ComplianceSummary = v +// SetName sets the Name field's value. +func (s *ConfigurationRecorderStatus) SetName(v string) *ConfigurationRecorderStatus { + s.Name = &v return s } -// SetResourceType sets the ResourceType field's value. -func (s *ComplianceSummaryByResourceType) SetResourceType(v string) *ComplianceSummaryByResourceType { - s.ResourceType = &v +// SetRecording sets the Recording field's value. +func (s *ConfigurationRecorderStatus) SetRecording(v bool) *ConfigurationRecorderStatus { + s.Recording = &v return s } -// Provides status of the delivery of the snapshot or the configuration history -// to the specified Amazon S3 bucket. Also provides the status of notifications -// about the Amazon S3 delivery to the specified Amazon SNS topic. -type ConfigExportDeliveryInfo struct { +type DeleteAggregationAuthorizationInput struct { _ struct{} `type:"structure"` - // The time of the last attempted delivery. - LastAttemptTime *time.Time `locationName:"lastAttemptTime" type:"timestamp"` - - // The error code from the last attempted delivery. - LastErrorCode *string `locationName:"lastErrorCode" type:"string"` - - // The error message from the last attempted delivery. - LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` - - // Status of the last attempted delivery. - LastStatus *string `locationName:"lastStatus" type:"string" enum:"DeliveryStatus"` - - // The time of the last successful delivery. - LastSuccessfulTime *time.Time `locationName:"lastSuccessfulTime" type:"timestamp"` + // The 12-digit account ID of the account authorized to aggregate data. + // + // AuthorizedAccountId is a required field + AuthorizedAccountId *string `type:"string" required:"true"` - // The time that the next delivery occurs. - NextDeliveryTime *time.Time `locationName:"nextDeliveryTime" type:"timestamp"` + // The region authorized to collect aggregated data. + // + // AuthorizedAwsRegion is a required field + AuthorizedAwsRegion *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ConfigExportDeliveryInfo) String() string { +func (s DeleteAggregationAuthorizationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigExportDeliveryInfo) GoString() string { +func (s DeleteAggregationAuthorizationInput) GoString() string { return s.String() } -// SetLastAttemptTime sets the LastAttemptTime field's value. -func (s *ConfigExportDeliveryInfo) SetLastAttemptTime(v time.Time) *ConfigExportDeliveryInfo { - s.LastAttemptTime = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAggregationAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAggregationAuthorizationInput"} + if s.AuthorizedAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizedAccountId")) + } + if s.AuthorizedAwsRegion == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizedAwsRegion")) + } + if s.AuthorizedAwsRegion != nil && len(*s.AuthorizedAwsRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AuthorizedAwsRegion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastErrorCode sets the LastErrorCode field's value. -func (s *ConfigExportDeliveryInfo) SetLastErrorCode(v string) *ConfigExportDeliveryInfo { - s.LastErrorCode = &v +// SetAuthorizedAccountId sets the AuthorizedAccountId field's value. +func (s *DeleteAggregationAuthorizationInput) SetAuthorizedAccountId(v string) *DeleteAggregationAuthorizationInput { + s.AuthorizedAccountId = &v return s } -// SetLastErrorMessage sets the LastErrorMessage field's value. -func (s *ConfigExportDeliveryInfo) SetLastErrorMessage(v string) *ConfigExportDeliveryInfo { - s.LastErrorMessage = &v +// SetAuthorizedAwsRegion sets the AuthorizedAwsRegion field's value. +func (s *DeleteAggregationAuthorizationInput) SetAuthorizedAwsRegion(v string) *DeleteAggregationAuthorizationInput { + s.AuthorizedAwsRegion = &v return s } -// SetLastStatus sets the LastStatus field's value. -func (s *ConfigExportDeliveryInfo) SetLastStatus(v string) *ConfigExportDeliveryInfo { - s.LastStatus = &v - return s +type DeleteAggregationAuthorizationOutput struct { + _ struct{} `type:"structure"` } -// SetLastSuccessfulTime sets the LastSuccessfulTime field's value. -func (s *ConfigExportDeliveryInfo) SetLastSuccessfulTime(v time.Time) *ConfigExportDeliveryInfo { - s.LastSuccessfulTime = &v - return s +// String returns the string representation +func (s DeleteAggregationAuthorizationOutput) String() string { + return awsutil.Prettify(s) } -// SetNextDeliveryTime sets the NextDeliveryTime field's value. -func (s *ConfigExportDeliveryInfo) SetNextDeliveryTime(v time.Time) *ConfigExportDeliveryInfo { - s.NextDeliveryTime = &v - return s +// GoString returns the string representation +func (s DeleteAggregationAuthorizationOutput) GoString() string { + return s.String() } -// An AWS Config rule represents an AWS Lambda function that you create for -// a custom rule or a predefined function for an AWS managed rule. The function -// evaluates configuration items to assess whether your AWS resources comply -// with your desired configurations. This function can run when AWS Config detects -// a configuration change to an AWS resource and at a periodic frequency that -// you choose (for example, every 24 hours). -// -// You can use the AWS CLI and AWS SDKs if you want to create a rule that triggers -// evaluations for your resources when AWS Config delivers the configuration -// snapshot. For more information, see ConfigSnapshotDeliveryProperties. -// -// For more information about developing and using AWS Config rules, see Evaluating -// AWS Resource Configurations with AWS Config (https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) -// in the AWS Config Developer Guide. -type ConfigRule struct { +type DeleteConfigRuleInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the AWS Config rule. - ConfigRuleArn *string `type:"string"` - - // The ID of the AWS Config rule. - ConfigRuleId *string `type:"string"` - - // The name that you assign to the AWS Config rule. The name is required if - // you are adding a new rule. - ConfigRuleName *string `min:"1" type:"string"` - - // Indicates whether the AWS Config rule is active or is currently being deleted - // by AWS Config. It can also indicate the evaluation status for the AWS Config - // rule. - // - // AWS Config sets the state of the rule to EVALUATING temporarily after you - // use the StartConfigRulesEvaluation request to evaluate your resources against - // the AWS Config rule. - // - // AWS Config sets the state of the rule to DELETING_RESULTS temporarily after - // you use the DeleteEvaluationResults request to delete the current evaluation - // results for the AWS Config rule. - // - // AWS Config temporarily sets the state of a rule to DELETING after you use - // the DeleteConfigRule request to delete the rule. After AWS Config deletes - // the rule, the rule and all of its evaluations are erased and are no longer - // available. - ConfigRuleState *string `type:"string" enum:"ConfigRuleState"` - - // Service principal name of the service that created the rule. - // - // The field is populated only if the service linked rule is created by a service. - // The field is empty if you create your own rule. - CreatedBy *string `min:"1" type:"string"` - - // The description that you provide for the AWS Config rule. - Description *string `type:"string"` - - // A string, in JSON format, that is passed to the AWS Config rule Lambda function. - InputParameters *string `min:"1" type:"string"` - - // The maximum frequency with which AWS Config runs evaluations for a rule. - // You can specify a value for MaximumExecutionFrequency when: - // - // * You are using an AWS managed rule that is triggered at a periodic frequency. - // - // * Your custom rule is triggered when AWS Config delivers the configuration - // snapshot. For more information, see ConfigSnapshotDeliveryProperties. - // - // By default, rules with a periodic trigger are evaluated every 24 hours. To - // change the frequency, specify a valid value for the MaximumExecutionFrequency - // parameter. - MaximumExecutionFrequency *string `type:"string" enum:"MaximumExecutionFrequency"` - - // Defines which resources can trigger an evaluation for the rule. The scope - // can include one or more resource types, a combination of one resource type - // and one resource ID, or a combination of a tag key and value. Specify a scope - // to constrain the resources that can trigger an evaluation for the rule. If - // you do not specify a scope, evaluations are triggered when any resource in - // the recording group changes. - Scope *Scope `type:"structure"` - - // Provides the rule owner (AWS or customer), the rule identifier, and the notifications - // that cause the function to evaluate your AWS resources. + // The name of the AWS Config rule that you want to delete. // - // Source is a required field - Source *Source `type:"structure" required:"true"` + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ConfigRule) String() string { +func (s DeleteConfigRuleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigRule) GoString() string { +func (s DeleteConfigRuleInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ConfigRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConfigRule"} - if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) - } - if s.CreatedBy != nil && len(*s.CreatedBy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CreatedBy", 1)) - } - if s.InputParameters != nil && len(*s.InputParameters) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InputParameters", 1)) - } - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) - } - if s.Scope != nil { - if err := s.Scope.Validate(); err != nil { - invalidParams.AddNested("Scope", err.(request.ErrInvalidParams)) - } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigRuleInput"} + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) } - if s.Source != nil { - if err := s.Source.Validate(); err != nil { - invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) - } + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) } if invalidParams.Len() > 0 { @@ -6529,106 +8441,111 @@ func (s *ConfigRule) Validate() error { return nil } -// SetConfigRuleArn sets the ConfigRuleArn field's value. -func (s *ConfigRule) SetConfigRuleArn(v string) *ConfigRule { - s.ConfigRuleArn = &v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *DeleteConfigRuleInput) SetConfigRuleName(v string) *DeleteConfigRuleInput { + s.ConfigRuleName = &v return s } -// SetConfigRuleId sets the ConfigRuleId field's value. -func (s *ConfigRule) SetConfigRuleId(v string) *ConfigRule { - s.ConfigRuleId = &v - return s +type DeleteConfigRuleOutput struct { + _ struct{} `type:"structure"` } -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *ConfigRule) SetConfigRuleName(v string) *ConfigRule { - s.ConfigRuleName = &v - return s +// String returns the string representation +func (s DeleteConfigRuleOutput) String() string { + return awsutil.Prettify(s) } -// SetConfigRuleState sets the ConfigRuleState field's value. -func (s *ConfigRule) SetConfigRuleState(v string) *ConfigRule { - s.ConfigRuleState = &v - return s +// GoString returns the string representation +func (s DeleteConfigRuleOutput) GoString() string { + return s.String() } -// SetCreatedBy sets the CreatedBy field's value. -func (s *ConfigRule) SetCreatedBy(v string) *ConfigRule { - s.CreatedBy = &v - return s -} +type DeleteConfigurationAggregatorInput struct { + _ struct{} `type:"structure"` -// SetDescription sets the Description field's value. -func (s *ConfigRule) SetDescription(v string) *ConfigRule { - s.Description = &v - return s + // The name of the configuration aggregator. + // + // ConfigurationAggregatorName is a required field + ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` } -// SetInputParameters sets the InputParameters field's value. -func (s *ConfigRule) SetInputParameters(v string) *ConfigRule { - s.InputParameters = &v - return s +// String returns the string representation +func (s DeleteConfigurationAggregatorInput) String() string { + return awsutil.Prettify(s) } -// SetMaximumExecutionFrequency sets the MaximumExecutionFrequency field's value. -func (s *ConfigRule) SetMaximumExecutionFrequency(v string) *ConfigRule { - s.MaximumExecutionFrequency = &v - return s +// GoString returns the string representation +func (s DeleteConfigurationAggregatorInput) GoString() string { + return s.String() } -// SetScope sets the Scope field's value. -func (s *ConfigRule) SetScope(v *Scope) *ConfigRule { - s.Scope = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigurationAggregatorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationAggregatorInput"} + if s.ConfigurationAggregatorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) + } + if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSource sets the Source field's value. -func (s *ConfigRule) SetSource(v *Source) *ConfigRule { - s.Source = v +// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. +func (s *DeleteConfigurationAggregatorInput) SetConfigurationAggregatorName(v string) *DeleteConfigurationAggregatorInput { + s.ConfigurationAggregatorName = &v return s } -// Filters the compliance results based on account ID, region, compliance type, -// and rule name. -type ConfigRuleComplianceFilters struct { +type DeleteConfigurationAggregatorOutput struct { _ struct{} `type:"structure"` +} - // The 12-digit account ID of the source account. - AccountId *string `type:"string"` +// String returns the string representation +func (s DeleteConfigurationAggregatorOutput) String() string { + return awsutil.Prettify(s) +} - // The source region where the data is aggregated. - AwsRegion *string `min:"1" type:"string"` +// GoString returns the string representation +func (s DeleteConfigurationAggregatorOutput) GoString() string { + return s.String() +} - // The rule compliance status. - // - // For the ConfigRuleComplianceFilters data type, AWS Config supports only COMPLIANT - // and NON_COMPLIANT. AWS Config does not support the NOT_APPLICABLE and the - // INSUFFICIENT_DATA values. - ComplianceType *string `type:"string" enum:"ComplianceType"` +// The request object for the DeleteConfigurationRecorder action. +type DeleteConfigurationRecorderInput struct { + _ struct{} `type:"structure"` - // The name of the AWS Config rule. - ConfigRuleName *string `min:"1" type:"string"` + // The name of the configuration recorder to be deleted. You can retrieve the + // name of your configuration recorder by using the DescribeConfigurationRecorders + // action. + // + // ConfigurationRecorderName is a required field + ConfigurationRecorderName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ConfigRuleComplianceFilters) String() string { +func (s DeleteConfigurationRecorderInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigRuleComplianceFilters) GoString() string { +func (s DeleteConfigurationRecorderInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ConfigRuleComplianceFilters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConfigRuleComplianceFilters"} - if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) +func (s *DeleteConfigurationRecorderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationRecorderInput"} + if s.ConfigurationRecorderName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationRecorderName")) } - if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + if s.ConfigurationRecorderName != nil && len(*s.ConfigurationRecorderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationRecorderName", 1)) } if invalidParams.Len() > 0 { @@ -6637,56 +8554,55 @@ func (s *ConfigRuleComplianceFilters) Validate() error { return nil } -// SetAccountId sets the AccountId field's value. -func (s *ConfigRuleComplianceFilters) SetAccountId(v string) *ConfigRuleComplianceFilters { - s.AccountId = &v +// SetConfigurationRecorderName sets the ConfigurationRecorderName field's value. +func (s *DeleteConfigurationRecorderInput) SetConfigurationRecorderName(v string) *DeleteConfigurationRecorderInput { + s.ConfigurationRecorderName = &v return s } -// SetAwsRegion sets the AwsRegion field's value. -func (s *ConfigRuleComplianceFilters) SetAwsRegion(v string) *ConfigRuleComplianceFilters { - s.AwsRegion = &v - return s +type DeleteConfigurationRecorderOutput struct { + _ struct{} `type:"structure"` } -// SetComplianceType sets the ComplianceType field's value. -func (s *ConfigRuleComplianceFilters) SetComplianceType(v string) *ConfigRuleComplianceFilters { - s.ComplianceType = &v - return s +// String returns the string representation +func (s DeleteConfigurationRecorderOutput) String() string { + return awsutil.Prettify(s) } -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *ConfigRuleComplianceFilters) SetConfigRuleName(v string) *ConfigRuleComplianceFilters { - s.ConfigRuleName = &v - return s +// GoString returns the string representation +func (s DeleteConfigurationRecorderOutput) GoString() string { + return s.String() } -// Filters the results based on the account IDs and regions. -type ConfigRuleComplianceSummaryFilters struct { +// The input for the DeleteDeliveryChannel action. The action accepts the following +// data, in JSON format. +type DeleteDeliveryChannelInput struct { _ struct{} `type:"structure"` - // The 12-digit account ID of the source account. - AccountId *string `type:"string"` - - // The source region where the data is aggregated. - AwsRegion *string `min:"1" type:"string"` + // The name of the delivery channel to delete. + // + // DeliveryChannelName is a required field + DeliveryChannelName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ConfigRuleComplianceSummaryFilters) String() string { +func (s DeleteDeliveryChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigRuleComplianceSummaryFilters) GoString() string { +func (s DeleteDeliveryChannelInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ConfigRuleComplianceSummaryFilters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConfigRuleComplianceSummaryFilters"} - if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) +func (s *DeleteDeliveryChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeliveryChannelInput"} + if s.DeliveryChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryChannelName")) + } + if s.DeliveryChannelName != nil && len(*s.DeliveryChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryChannelName", 1)) } if invalidParams.Len() > 0 { @@ -6695,555 +8611,451 @@ func (s *ConfigRuleComplianceSummaryFilters) Validate() error { return nil } -// SetAccountId sets the AccountId field's value. -func (s *ConfigRuleComplianceSummaryFilters) SetAccountId(v string) *ConfigRuleComplianceSummaryFilters { - s.AccountId = &v - return s -} - -// SetAwsRegion sets the AwsRegion field's value. -func (s *ConfigRuleComplianceSummaryFilters) SetAwsRegion(v string) *ConfigRuleComplianceSummaryFilters { - s.AwsRegion = &v +// SetDeliveryChannelName sets the DeliveryChannelName field's value. +func (s *DeleteDeliveryChannelInput) SetDeliveryChannelName(v string) *DeleteDeliveryChannelInput { + s.DeliveryChannelName = &v return s } -// Status information for your AWS managed Config rules. The status includes -// information such as the last time the rule ran, the last time it failed, -// and the related error for the last failure. -// -// This action does not return status information about custom AWS Config rules. -type ConfigRuleEvaluationStatus struct { +type DeleteDeliveryChannelOutput struct { _ struct{} `type:"structure"` +} - // The Amazon Resource Name (ARN) of the AWS Config rule. - ConfigRuleArn *string `type:"string"` - - // The ID of the AWS Config rule. - ConfigRuleId *string `type:"string"` - - // The name of the AWS Config rule. - ConfigRuleName *string `min:"1" type:"string"` - - // The time that you first activated the AWS Config rule. - FirstActivatedTime *time.Time `type:"timestamp"` - - // Indicates whether AWS Config has evaluated your resources against the rule - // at least once. - // - // * true - AWS Config has evaluated your AWS resources against the rule - // at least once. - // - // * false - AWS Config has not once finished evaluating your AWS resources - // against the rule. - FirstEvaluationStarted *bool `type:"boolean"` - - // The error code that AWS Config returned when the rule last failed. - LastErrorCode *string `type:"string"` - - // The error message that AWS Config returned when the rule last failed. - LastErrorMessage *string `type:"string"` - - // The time that AWS Config last failed to evaluate your AWS resources against - // the rule. - LastFailedEvaluationTime *time.Time `type:"timestamp"` +// String returns the string representation +func (s DeleteDeliveryChannelOutput) String() string { + return awsutil.Prettify(s) +} - // The time that AWS Config last failed to invoke the AWS Config rule to evaluate - // your AWS resources. - LastFailedInvocationTime *time.Time `type:"timestamp"` +// GoString returns the string representation +func (s DeleteDeliveryChannelOutput) GoString() string { + return s.String() +} - // The time that AWS Config last successfully evaluated your AWS resources against - // the rule. - LastSuccessfulEvaluationTime *time.Time `type:"timestamp"` +type DeleteEvaluationResultsInput struct { + _ struct{} `type:"structure"` - // The time that AWS Config last successfully invoked the AWS Config rule to - // evaluate your AWS resources. - LastSuccessfulInvocationTime *time.Time `type:"timestamp"` + // The name of the AWS Config rule for which you want to delete the evaluation + // results. + // + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ConfigRuleEvaluationStatus) String() string { +func (s DeleteEvaluationResultsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigRuleEvaluationStatus) GoString() string { +func (s DeleteEvaluationResultsInput) GoString() string { return s.String() } -// SetConfigRuleArn sets the ConfigRuleArn field's value. -func (s *ConfigRuleEvaluationStatus) SetConfigRuleArn(v string) *ConfigRuleEvaluationStatus { - s.ConfigRuleArn = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEvaluationResultsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEvaluationResultsInput"} + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) + } + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } -// SetConfigRuleId sets the ConfigRuleId field's value. -func (s *ConfigRuleEvaluationStatus) SetConfigRuleId(v string) *ConfigRuleEvaluationStatus { - s.ConfigRuleId = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetConfigRuleName sets the ConfigRuleName field's value. -func (s *ConfigRuleEvaluationStatus) SetConfigRuleName(v string) *ConfigRuleEvaluationStatus { +func (s *DeleteEvaluationResultsInput) SetConfigRuleName(v string) *DeleteEvaluationResultsInput { s.ConfigRuleName = &v return s } -// SetFirstActivatedTime sets the FirstActivatedTime field's value. -func (s *ConfigRuleEvaluationStatus) SetFirstActivatedTime(v time.Time) *ConfigRuleEvaluationStatus { - s.FirstActivatedTime = &v - return s +// The output when you delete the evaluation results for the specified AWS Config +// rule. +type DeleteEvaluationResultsOutput struct { + _ struct{} `type:"structure"` } -// SetFirstEvaluationStarted sets the FirstEvaluationStarted field's value. -func (s *ConfigRuleEvaluationStatus) SetFirstEvaluationStarted(v bool) *ConfigRuleEvaluationStatus { - s.FirstEvaluationStarted = &v - return s +// String returns the string representation +func (s DeleteEvaluationResultsOutput) String() string { + return awsutil.Prettify(s) } -// SetLastErrorCode sets the LastErrorCode field's value. -func (s *ConfigRuleEvaluationStatus) SetLastErrorCode(v string) *ConfigRuleEvaluationStatus { - s.LastErrorCode = &v - return s +// GoString returns the string representation +func (s DeleteEvaluationResultsOutput) GoString() string { + return s.String() } -// SetLastErrorMessage sets the LastErrorMessage field's value. -func (s *ConfigRuleEvaluationStatus) SetLastErrorMessage(v string) *ConfigRuleEvaluationStatus { - s.LastErrorMessage = &v - return s +type DeleteOrganizationConfigRuleInput struct { + _ struct{} `type:"structure"` + + // The name of organization config rule that you want to delete. + // + // OrganizationConfigRuleName is a required field + OrganizationConfigRuleName *string `min:"1" type:"string" required:"true"` } -// SetLastFailedEvaluationTime sets the LastFailedEvaluationTime field's value. -func (s *ConfigRuleEvaluationStatus) SetLastFailedEvaluationTime(v time.Time) *ConfigRuleEvaluationStatus { - s.LastFailedEvaluationTime = &v - return s +// String returns the string representation +func (s DeleteOrganizationConfigRuleInput) String() string { + return awsutil.Prettify(s) } -// SetLastFailedInvocationTime sets the LastFailedInvocationTime field's value. -func (s *ConfigRuleEvaluationStatus) SetLastFailedInvocationTime(v time.Time) *ConfigRuleEvaluationStatus { - s.LastFailedInvocationTime = &v - return s +// GoString returns the string representation +func (s DeleteOrganizationConfigRuleInput) GoString() string { + return s.String() } -// SetLastSuccessfulEvaluationTime sets the LastSuccessfulEvaluationTime field's value. -func (s *ConfigRuleEvaluationStatus) SetLastSuccessfulEvaluationTime(v time.Time) *ConfigRuleEvaluationStatus { - s.LastSuccessfulEvaluationTime = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteOrganizationConfigRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteOrganizationConfigRuleInput"} + if s.OrganizationConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationConfigRuleName")) + } + if s.OrganizationConfigRuleName != nil && len(*s.OrganizationConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationConfigRuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastSuccessfulInvocationTime sets the LastSuccessfulInvocationTime field's value. -func (s *ConfigRuleEvaluationStatus) SetLastSuccessfulInvocationTime(v time.Time) *ConfigRuleEvaluationStatus { - s.LastSuccessfulInvocationTime = &v +// SetOrganizationConfigRuleName sets the OrganizationConfigRuleName field's value. +func (s *DeleteOrganizationConfigRuleInput) SetOrganizationConfigRuleName(v string) *DeleteOrganizationConfigRuleInput { + s.OrganizationConfigRuleName = &v return s } -// Provides options for how often AWS Config delivers configuration snapshots -// to the Amazon S3 bucket in your delivery channel. -// -// The frequency for a rule that triggers evaluations for your resources when -// AWS Config delivers the configuration snapshot is set by one of two values, -// depending on which is less frequent: -// -// * The value for the deliveryFrequency parameter within the delivery channel -// configuration, which sets how often AWS Config delivers configuration -// snapshots. This value also sets how often AWS Config invokes evaluations -// for AWS Config rules. -// -// * The value for the MaximumExecutionFrequency parameter, which sets the -// maximum frequency with which AWS Config invokes evaluations for the rule. -// For more information, see ConfigRule. -// -// If the deliveryFrequency value is less frequent than the MaximumExecutionFrequency -// value for a rule, AWS Config invokes the rule only as often as the deliveryFrequency -// value. -// -// For example, you want your rule to run evaluations when AWS Config delivers -// the configuration snapshot. -// -// You specify the MaximumExecutionFrequency value for Six_Hours. -// -// You then specify the delivery channel deliveryFrequency value for TwentyFour_Hours. -// -// Because the value for deliveryFrequency is less frequent than MaximumExecutionFrequency, -// AWS Config invokes evaluations for the rule every 24 hours. -// -// You should set the MaximumExecutionFrequency value to be at least as frequent -// as the deliveryFrequency value. You can view the deliveryFrequency value -// by using the DescribeDeliveryChannnels action. -// -// To update the deliveryFrequency with which AWS Config delivers your configuration -// snapshots, use the PutDeliveryChannel action. -type ConfigSnapshotDeliveryProperties struct { +type DeleteOrganizationConfigRuleOutput struct { _ struct{} `type:"structure"` - - // The frequency with which AWS Config delivers configuration snapshots. - DeliveryFrequency *string `locationName:"deliveryFrequency" type:"string" enum:"MaximumExecutionFrequency"` } // String returns the string representation -func (s ConfigSnapshotDeliveryProperties) String() string { +func (s DeleteOrganizationConfigRuleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigSnapshotDeliveryProperties) GoString() string { +func (s DeleteOrganizationConfigRuleOutput) GoString() string { return s.String() } -// SetDeliveryFrequency sets the DeliveryFrequency field's value. -func (s *ConfigSnapshotDeliveryProperties) SetDeliveryFrequency(v string) *ConfigSnapshotDeliveryProperties { - s.DeliveryFrequency = &v - return s -} - -// A list that contains the status of the delivery of the configuration stream -// notification to the Amazon SNS topic. -type ConfigStreamDeliveryInfo struct { +type DeletePendingAggregationRequestInput struct { _ struct{} `type:"structure"` - // The error code from the last attempted delivery. - LastErrorCode *string `locationName:"lastErrorCode" type:"string"` - - // The error message from the last attempted delivery. - LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` - - // Status of the last attempted delivery. + // The 12-digit account ID of the account requesting to aggregate data. // - // Note Providing an SNS topic on a DeliveryChannel (https://docs.aws.amazon.com/config/latest/APIReference/API_DeliveryChannel.html) - // for AWS Config is optional. If the SNS delivery is turned off, the last status - // will be Not_Applicable. - LastStatus *string `locationName:"lastStatus" type:"string" enum:"DeliveryStatus"` + // RequesterAccountId is a required field + RequesterAccountId *string `type:"string" required:"true"` - // The time from the last status change. - LastStatusChangeTime *time.Time `locationName:"lastStatusChangeTime" type:"timestamp"` + // The region requesting to aggregate data. + // + // RequesterAwsRegion is a required field + RequesterAwsRegion *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ConfigStreamDeliveryInfo) String() string { +func (s DeletePendingAggregationRequestInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigStreamDeliveryInfo) GoString() string { +func (s DeletePendingAggregationRequestInput) GoString() string { return s.String() } -// SetLastErrorCode sets the LastErrorCode field's value. -func (s *ConfigStreamDeliveryInfo) SetLastErrorCode(v string) *ConfigStreamDeliveryInfo { - s.LastErrorCode = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePendingAggregationRequestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePendingAggregationRequestInput"} + if s.RequesterAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("RequesterAccountId")) + } + if s.RequesterAwsRegion == nil { + invalidParams.Add(request.NewErrParamRequired("RequesterAwsRegion")) + } + if s.RequesterAwsRegion != nil && len(*s.RequesterAwsRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequesterAwsRegion", 1)) + } -// SetLastErrorMessage sets the LastErrorMessage field's value. -func (s *ConfigStreamDeliveryInfo) SetLastErrorMessage(v string) *ConfigStreamDeliveryInfo { - s.LastErrorMessage = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastStatus sets the LastStatus field's value. -func (s *ConfigStreamDeliveryInfo) SetLastStatus(v string) *ConfigStreamDeliveryInfo { - s.LastStatus = &v +// SetRequesterAccountId sets the RequesterAccountId field's value. +func (s *DeletePendingAggregationRequestInput) SetRequesterAccountId(v string) *DeletePendingAggregationRequestInput { + s.RequesterAccountId = &v return s } -// SetLastStatusChangeTime sets the LastStatusChangeTime field's value. -func (s *ConfigStreamDeliveryInfo) SetLastStatusChangeTime(v time.Time) *ConfigStreamDeliveryInfo { - s.LastStatusChangeTime = &v +// SetRequesterAwsRegion sets the RequesterAwsRegion field's value. +func (s *DeletePendingAggregationRequestInput) SetRequesterAwsRegion(v string) *DeletePendingAggregationRequestInput { + s.RequesterAwsRegion = &v return s } -// The details about the configuration aggregator, including information about -// source accounts, regions, and metadata of the aggregator. -type ConfigurationAggregator struct { +type DeletePendingAggregationRequestOutput struct { _ struct{} `type:"structure"` - - // Provides a list of source accounts and regions to be aggregated. - AccountAggregationSources []*AccountAggregationSource `type:"list"` - - // The Amazon Resource Name (ARN) of the aggregator. - ConfigurationAggregatorArn *string `type:"string"` - - // The name of the aggregator. - ConfigurationAggregatorName *string `min:"1" type:"string"` - - // The time stamp when the configuration aggregator was created. - CreationTime *time.Time `type:"timestamp"` - - // The time of the last update. - LastUpdatedTime *time.Time `type:"timestamp"` - - // Provides an organization and list of regions to be aggregated. - OrganizationAggregationSource *OrganizationAggregationSource `type:"structure"` } // String returns the string representation -func (s ConfigurationAggregator) String() string { +func (s DeletePendingAggregationRequestOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigurationAggregator) GoString() string { +func (s DeletePendingAggregationRequestOutput) GoString() string { return s.String() } -// SetAccountAggregationSources sets the AccountAggregationSources field's value. -func (s *ConfigurationAggregator) SetAccountAggregationSources(v []*AccountAggregationSource) *ConfigurationAggregator { - s.AccountAggregationSources = v - return s +type DeleteRemediationConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the AWS Config rule for which you want to delete remediation + // configuration. + // + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` + + // The type of a resource. + ResourceType *string `type:"string"` } -// SetConfigurationAggregatorArn sets the ConfigurationAggregatorArn field's value. -func (s *ConfigurationAggregator) SetConfigurationAggregatorArn(v string) *ConfigurationAggregator { - s.ConfigurationAggregatorArn = &v - return s +// String returns the string representation +func (s DeleteRemediationConfigurationInput) String() string { + return awsutil.Prettify(s) } -// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *ConfigurationAggregator) SetConfigurationAggregatorName(v string) *ConfigurationAggregator { - s.ConfigurationAggregatorName = &v - return s +// GoString returns the string representation +func (s DeleteRemediationConfigurationInput) GoString() string { + return s.String() } -// SetCreationTime sets the CreationTime field's value. -func (s *ConfigurationAggregator) SetCreationTime(v time.Time) *ConfigurationAggregator { - s.CreationTime = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRemediationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRemediationConfigurationInput"} + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) + } + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *ConfigurationAggregator) SetLastUpdatedTime(v time.Time) *ConfigurationAggregator { - s.LastUpdatedTime = &v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *DeleteRemediationConfigurationInput) SetConfigRuleName(v string) *DeleteRemediationConfigurationInput { + s.ConfigRuleName = &v return s } -// SetOrganizationAggregationSource sets the OrganizationAggregationSource field's value. -func (s *ConfigurationAggregator) SetOrganizationAggregationSource(v *OrganizationAggregationSource) *ConfigurationAggregator { - s.OrganizationAggregationSource = v +// SetResourceType sets the ResourceType field's value. +func (s *DeleteRemediationConfigurationInput) SetResourceType(v string) *DeleteRemediationConfigurationInput { + s.ResourceType = &v return s } -// A list that contains detailed configurations of a specified resource. -type ConfigurationItem struct { +type DeleteRemediationConfigurationOutput struct { _ struct{} `type:"structure"` +} - // The 12-digit AWS account ID associated with the resource. - AccountId *string `locationName:"accountId" type:"string"` - - // accoun - Arn *string `locationName:"arn" type:"string"` - - // The Availability Zone associated with the resource. - AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - - // The region where the resource resides. - AwsRegion *string `locationName:"awsRegion" min:"1" type:"string"` +// String returns the string representation +func (s DeleteRemediationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} - // The description of the resource configuration. - Configuration *string `locationName:"configuration" type:"string"` +// GoString returns the string representation +func (s DeleteRemediationConfigurationOutput) GoString() string { + return s.String() +} - // The time when the configuration recording was initiated. - ConfigurationItemCaptureTime *time.Time `locationName:"configurationItemCaptureTime" type:"timestamp"` +type DeleteRemediationExceptionsInput struct { + _ struct{} `type:"structure"` - // Unique MD5 hash that represents the configuration item's state. + // The name of the AWS Config rule for which you want to delete remediation + // exception configuration. // - // You can use MD5 hash to compare the states of two or more configuration items - // that are associated with the same resource. - ConfigurationItemMD5Hash *string `locationName:"configurationItemMD5Hash" type:"string"` - - // The configuration item status. - ConfigurationItemStatus *string `locationName:"configurationItemStatus" type:"string" enum:"ConfigurationItemStatus"` - - // An identifier that indicates the ordering of the configuration items of a - // resource. - ConfigurationStateId *string `locationName:"configurationStateId" type:"string"` + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` - // A list of CloudTrail event IDs. - // - // A populated field indicates that the current configuration was initiated - // by the events recorded in the CloudTrail log. For more information about - // CloudTrail, see What Is AWS CloudTrail (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). + // An exception list of resource exception keys to be processed with the current + // request. AWS Config adds exception for each resource key. For example, AWS + // Config adds 3 exceptions for 3 resource keys. // - // An empty field indicates that the current configuration was not initiated - // by any event. - RelatedEvents []*string `locationName:"relatedEvents" type:"list"` - - // A list of related AWS resources. - Relationships []*Relationship `locationName:"relationships" type:"list"` - - // The time stamp when the resource was created. - ResourceCreationTime *time.Time `locationName:"resourceCreationTime" type:"timestamp"` - - // The ID of the resource (for example, sg-xxxxxx). - ResourceId *string `locationName:"resourceId" min:"1" type:"string"` - - // The custom name of the resource, if available. - ResourceName *string `locationName:"resourceName" type:"string"` - - // The type of AWS resource. - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` - - // Configuration attributes that AWS Config returns for certain resource types - // to supplement the information returned for the configuration parameter. - SupplementaryConfiguration map[string]*string `locationName:"supplementaryConfiguration" type:"map"` - - // A mapping of key value tags associated with the resource. - Tags map[string]*string `locationName:"tags" type:"map"` - - // The version number of the resource configuration. - Version *string `locationName:"version" type:"string"` + // ResourceKeys is a required field + ResourceKeys []*RemediationExceptionResourceKey `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s ConfigurationItem) String() string { +func (s DeleteRemediationExceptionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigurationItem) GoString() string { +func (s DeleteRemediationExceptionsInput) GoString() string { return s.String() } -// SetAccountId sets the AccountId field's value. -func (s *ConfigurationItem) SetAccountId(v string) *ConfigurationItem { - s.AccountId = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRemediationExceptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRemediationExceptionsInput"} + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) + } + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + if s.ResourceKeys == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceKeys")) + } + if s.ResourceKeys != nil && len(s.ResourceKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceKeys", 1)) + } + if s.ResourceKeys != nil { + for i, v := range s.ResourceKeys { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceKeys", i), err.(request.ErrInvalidParams)) + } + } + } -// SetArn sets the Arn field's value. -func (s *ConfigurationItem) SetArn(v string) *ConfigurationItem { - s.Arn = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *ConfigurationItem) SetAvailabilityZone(v string) *ConfigurationItem { - s.AvailabilityZone = &v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *DeleteRemediationExceptionsInput) SetConfigRuleName(v string) *DeleteRemediationExceptionsInput { + s.ConfigRuleName = &v return s } -// SetAwsRegion sets the AwsRegion field's value. -func (s *ConfigurationItem) SetAwsRegion(v string) *ConfigurationItem { - s.AwsRegion = &v +// SetResourceKeys sets the ResourceKeys field's value. +func (s *DeleteRemediationExceptionsInput) SetResourceKeys(v []*RemediationExceptionResourceKey) *DeleteRemediationExceptionsInput { + s.ResourceKeys = v return s } -// SetConfiguration sets the Configuration field's value. -func (s *ConfigurationItem) SetConfiguration(v string) *ConfigurationItem { - s.Configuration = &v - return s -} +type DeleteRemediationExceptionsOutput struct { + _ struct{} `type:"structure"` -// SetConfigurationItemCaptureTime sets the ConfigurationItemCaptureTime field's value. -func (s *ConfigurationItem) SetConfigurationItemCaptureTime(v time.Time) *ConfigurationItem { - s.ConfigurationItemCaptureTime = &v - return s + // Returns a list of failed delete remediation exceptions batch objects. Each + // object in the batch consists of a list of failed items and failure messages. + FailedBatches []*FailedDeleteRemediationExceptionsBatch `type:"list"` } -// SetConfigurationItemMD5Hash sets the ConfigurationItemMD5Hash field's value. -func (s *ConfigurationItem) SetConfigurationItemMD5Hash(v string) *ConfigurationItem { - s.ConfigurationItemMD5Hash = &v - return s +// String returns the string representation +func (s DeleteRemediationExceptionsOutput) String() string { + return awsutil.Prettify(s) } -// SetConfigurationItemStatus sets the ConfigurationItemStatus field's value. -func (s *ConfigurationItem) SetConfigurationItemStatus(v string) *ConfigurationItem { - s.ConfigurationItemStatus = &v - return s +// GoString returns the string representation +func (s DeleteRemediationExceptionsOutput) GoString() string { + return s.String() } -// SetConfigurationStateId sets the ConfigurationStateId field's value. -func (s *ConfigurationItem) SetConfigurationStateId(v string) *ConfigurationItem { - s.ConfigurationStateId = &v +// SetFailedBatches sets the FailedBatches field's value. +func (s *DeleteRemediationExceptionsOutput) SetFailedBatches(v []*FailedDeleteRemediationExceptionsBatch) *DeleteRemediationExceptionsOutput { + s.FailedBatches = v return s } -// SetRelatedEvents sets the RelatedEvents field's value. -func (s *ConfigurationItem) SetRelatedEvents(v []*string) *ConfigurationItem { - s.RelatedEvents = v - return s -} +type DeleteRetentionConfigurationInput struct { + _ struct{} `type:"structure"` -// SetRelationships sets the Relationships field's value. -func (s *ConfigurationItem) SetRelationships(v []*Relationship) *ConfigurationItem { - s.Relationships = v - return s + // The name of the retention configuration to delete. + // + // RetentionConfigurationName is a required field + RetentionConfigurationName *string `min:"1" type:"string" required:"true"` } -// SetResourceCreationTime sets the ResourceCreationTime field's value. -func (s *ConfigurationItem) SetResourceCreationTime(v time.Time) *ConfigurationItem { - s.ResourceCreationTime = &v - return s +// String returns the string representation +func (s DeleteRetentionConfigurationInput) String() string { + return awsutil.Prettify(s) } -// SetResourceId sets the ResourceId field's value. -func (s *ConfigurationItem) SetResourceId(v string) *ConfigurationItem { - s.ResourceId = &v - return s +// GoString returns the string representation +func (s DeleteRetentionConfigurationInput) GoString() string { + return s.String() } -// SetResourceName sets the ResourceName field's value. -func (s *ConfigurationItem) SetResourceName(v string) *ConfigurationItem { - s.ResourceName = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRetentionConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRetentionConfigurationInput"} + if s.RetentionConfigurationName == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionConfigurationName")) + } + if s.RetentionConfigurationName != nil && len(*s.RetentionConfigurationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RetentionConfigurationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetResourceType sets the ResourceType field's value. -func (s *ConfigurationItem) SetResourceType(v string) *ConfigurationItem { - s.ResourceType = &v +// SetRetentionConfigurationName sets the RetentionConfigurationName field's value. +func (s *DeleteRetentionConfigurationInput) SetRetentionConfigurationName(v string) *DeleteRetentionConfigurationInput { + s.RetentionConfigurationName = &v return s } -// SetSupplementaryConfiguration sets the SupplementaryConfiguration field's value. -func (s *ConfigurationItem) SetSupplementaryConfiguration(v map[string]*string) *ConfigurationItem { - s.SupplementaryConfiguration = v - return s +type DeleteRetentionConfigurationOutput struct { + _ struct{} `type:"structure"` } -// SetTags sets the Tags field's value. -func (s *ConfigurationItem) SetTags(v map[string]*string) *ConfigurationItem { - s.Tags = v - return s +// String returns the string representation +func (s DeleteRetentionConfigurationOutput) String() string { + return awsutil.Prettify(s) } -// SetVersion sets the Version field's value. -func (s *ConfigurationItem) SetVersion(v string) *ConfigurationItem { - s.Version = &v - return s +// GoString returns the string representation +func (s DeleteRetentionConfigurationOutput) GoString() string { + return s.String() } -// An object that represents the recording of configuration changes of an AWS -// resource. -type ConfigurationRecorder struct { +// The input for the DeliverConfigSnapshot action. +type DeliverConfigSnapshotInput struct { _ struct{} `type:"structure"` - // The name of the recorder. By default, AWS Config automatically assigns the - // name "default" when creating the configuration recorder. You cannot change - // the assigned name. - Name *string `locationName:"name" min:"1" type:"string"` - - // Specifies the types of AWS resources for which AWS Config records configuration - // changes. - RecordingGroup *RecordingGroup `locationName:"recordingGroup" type:"structure"` - - // Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources - // associated with the account. - RoleARN *string `locationName:"roleARN" type:"string"` + // The name of the delivery channel through which the snapshot is delivered. + // + // DeliveryChannelName is a required field + DeliveryChannelName *string `locationName:"deliveryChannelName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ConfigurationRecorder) String() string { +func (s DeliverConfigSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigurationRecorder) GoString() string { +func (s DeliverConfigSnapshotInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ConfigurationRecorder) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConfigurationRecorder"} - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *DeliverConfigSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeliverConfigSnapshotInput"} + if s.DeliveryChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryChannelName")) + } + if s.DeliveryChannelName != nil && len(*s.DeliveryChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryChannelName", 1)) } if invalidParams.Len() > 0 { @@ -7252,207 +9064,226 @@ func (s *ConfigurationRecorder) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *ConfigurationRecorder) SetName(v string) *ConfigurationRecorder { - s.Name = &v +// SetDeliveryChannelName sets the DeliveryChannelName field's value. +func (s *DeliverConfigSnapshotInput) SetDeliveryChannelName(v string) *DeliverConfigSnapshotInput { + s.DeliveryChannelName = &v return s } -// SetRecordingGroup sets the RecordingGroup field's value. -func (s *ConfigurationRecorder) SetRecordingGroup(v *RecordingGroup) *ConfigurationRecorder { - s.RecordingGroup = v - return s -} +// The output for the DeliverConfigSnapshot action, in JSON format. +type DeliverConfigSnapshotOutput struct { + _ struct{} `type:"structure"` -// SetRoleARN sets the RoleARN field's value. -func (s *ConfigurationRecorder) SetRoleARN(v string) *ConfigurationRecorder { - s.RoleARN = &v - return s + // The ID of the snapshot that is being created. + ConfigSnapshotId *string `locationName:"configSnapshotId" type:"string"` } -// The current status of the configuration recorder. -type ConfigurationRecorderStatus struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s DeliverConfigSnapshotOutput) String() string { + return awsutil.Prettify(s) +} - // The error code indicating that the recording failed. - LastErrorCode *string `locationName:"lastErrorCode" type:"string"` +// GoString returns the string representation +func (s DeliverConfigSnapshotOutput) GoString() string { + return s.String() +} - // The message indicating that the recording failed due to an error. - LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` +// SetConfigSnapshotId sets the ConfigSnapshotId field's value. +func (s *DeliverConfigSnapshotOutput) SetConfigSnapshotId(v string) *DeliverConfigSnapshotOutput { + s.ConfigSnapshotId = &v + return s +} - // The time the recorder was last started. - LastStartTime *time.Time `locationName:"lastStartTime" type:"timestamp"` +// The channel through which AWS Config delivers notifications and updated configuration +// states. +type DeliveryChannel struct { + _ struct{} `type:"structure"` - // The last (previous) status of the recorder. - LastStatus *string `locationName:"lastStatus" type:"string" enum:"RecorderStatus"` + // The options for how often AWS Config delivers configuration snapshots to + // the Amazon S3 bucket. + ConfigSnapshotDeliveryProperties *ConfigSnapshotDeliveryProperties `locationName:"configSnapshotDeliveryProperties" type:"structure"` - // The time when the status was last changed. - LastStatusChangeTime *time.Time `locationName:"lastStatusChangeTime" type:"timestamp"` + // The name of the delivery channel. By default, AWS Config assigns the name + // "default" when creating the delivery channel. To change the delivery channel + // name, you must use the DeleteDeliveryChannel action to delete your current + // delivery channel, and then you must use the PutDeliveryChannel command to + // create a delivery channel that has the desired name. + Name *string `locationName:"name" min:"1" type:"string"` - // The time the recorder was last stopped. - LastStopTime *time.Time `locationName:"lastStopTime" type:"timestamp"` + // The name of the Amazon S3 bucket to which AWS Config delivers configuration + // snapshots and configuration history files. + // + // If you specify a bucket that belongs to another AWS account, that bucket + // must have policies that grant access permissions to AWS Config. For more + // information, see Permissions for the Amazon S3 Bucket (https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-policy.html) + // in the AWS Config Developer Guide. + S3BucketName *string `locationName:"s3BucketName" type:"string"` - // The name of the configuration recorder. - Name *string `locationName:"name" type:"string"` + // The prefix for the specified Amazon S3 bucket. + S3KeyPrefix *string `locationName:"s3KeyPrefix" type:"string"` - // Specifies whether or not the recorder is currently recording. - Recording *bool `locationName:"recording" type:"boolean"` + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config + // sends notifications about configuration changes. + // + // If you choose a topic from another account, the topic must have policies + // that grant access permissions to AWS Config. For more information, see Permissions + // for the Amazon SNS Topic (https://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html) + // in the AWS Config Developer Guide. + SnsTopicARN *string `locationName:"snsTopicARN" type:"string"` } // String returns the string representation -func (s ConfigurationRecorderStatus) String() string { +func (s DeliveryChannel) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigurationRecorderStatus) GoString() string { +func (s DeliveryChannel) GoString() string { return s.String() } -// SetLastErrorCode sets the LastErrorCode field's value. -func (s *ConfigurationRecorderStatus) SetLastErrorCode(v string) *ConfigurationRecorderStatus { - s.LastErrorCode = &v - return s -} - -// SetLastErrorMessage sets the LastErrorMessage field's value. -func (s *ConfigurationRecorderStatus) SetLastErrorMessage(v string) *ConfigurationRecorderStatus { - s.LastErrorMessage = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeliveryChannel) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeliveryChannel"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } -// SetLastStartTime sets the LastStartTime field's value. -func (s *ConfigurationRecorderStatus) SetLastStartTime(v time.Time) *ConfigurationRecorderStatus { - s.LastStartTime = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastStatus sets the LastStatus field's value. -func (s *ConfigurationRecorderStatus) SetLastStatus(v string) *ConfigurationRecorderStatus { - s.LastStatus = &v +// SetConfigSnapshotDeliveryProperties sets the ConfigSnapshotDeliveryProperties field's value. +func (s *DeliveryChannel) SetConfigSnapshotDeliveryProperties(v *ConfigSnapshotDeliveryProperties) *DeliveryChannel { + s.ConfigSnapshotDeliveryProperties = v return s } -// SetLastStatusChangeTime sets the LastStatusChangeTime field's value. -func (s *ConfigurationRecorderStatus) SetLastStatusChangeTime(v time.Time) *ConfigurationRecorderStatus { - s.LastStatusChangeTime = &v +// SetName sets the Name field's value. +func (s *DeliveryChannel) SetName(v string) *DeliveryChannel { + s.Name = &v return s } -// SetLastStopTime sets the LastStopTime field's value. -func (s *ConfigurationRecorderStatus) SetLastStopTime(v time.Time) *ConfigurationRecorderStatus { - s.LastStopTime = &v +// SetS3BucketName sets the S3BucketName field's value. +func (s *DeliveryChannel) SetS3BucketName(v string) *DeliveryChannel { + s.S3BucketName = &v return s } -// SetName sets the Name field's value. -func (s *ConfigurationRecorderStatus) SetName(v string) *ConfigurationRecorderStatus { - s.Name = &v +// SetS3KeyPrefix sets the S3KeyPrefix field's value. +func (s *DeliveryChannel) SetS3KeyPrefix(v string) *DeliveryChannel { + s.S3KeyPrefix = &v return s } -// SetRecording sets the Recording field's value. -func (s *ConfigurationRecorderStatus) SetRecording(v bool) *ConfigurationRecorderStatus { - s.Recording = &v +// SetSnsTopicARN sets the SnsTopicARN field's value. +func (s *DeliveryChannel) SetSnsTopicARN(v string) *DeliveryChannel { + s.SnsTopicARN = &v return s } -type DeleteAggregationAuthorizationInput struct { +// The status of a specified delivery channel. +// +// Valid values: Success | Failure +type DeliveryChannelStatus struct { _ struct{} `type:"structure"` - // The 12-digit account ID of the account authorized to aggregate data. - // - // AuthorizedAccountId is a required field - AuthorizedAccountId *string `type:"string" required:"true"` + // A list that contains the status of the delivery of the configuration history + // to the specified Amazon S3 bucket. + ConfigHistoryDeliveryInfo *ConfigExportDeliveryInfo `locationName:"configHistoryDeliveryInfo" type:"structure"` - // The region authorized to collect aggregated data. - // - // AuthorizedAwsRegion is a required field - AuthorizedAwsRegion *string `min:"1" type:"string" required:"true"` + // A list containing the status of the delivery of the snapshot to the specified + // Amazon S3 bucket. + ConfigSnapshotDeliveryInfo *ConfigExportDeliveryInfo `locationName:"configSnapshotDeliveryInfo" type:"structure"` + + // A list containing the status of the delivery of the configuration stream + // notification to the specified Amazon SNS topic. + ConfigStreamDeliveryInfo *ConfigStreamDeliveryInfo `locationName:"configStreamDeliveryInfo" type:"structure"` + + // The name of the delivery channel. + Name *string `locationName:"name" type:"string"` } // String returns the string representation -func (s DeleteAggregationAuthorizationInput) String() string { +func (s DeliveryChannelStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteAggregationAuthorizationInput) GoString() string { +func (s DeliveryChannelStatus) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAggregationAuthorizationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAggregationAuthorizationInput"} - if s.AuthorizedAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AuthorizedAccountId")) - } - if s.AuthorizedAwsRegion == nil { - invalidParams.Add(request.NewErrParamRequired("AuthorizedAwsRegion")) - } - if s.AuthorizedAwsRegion != nil && len(*s.AuthorizedAwsRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AuthorizedAwsRegion", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAuthorizedAccountId sets the AuthorizedAccountId field's value. -func (s *DeleteAggregationAuthorizationInput) SetAuthorizedAccountId(v string) *DeleteAggregationAuthorizationInput { - s.AuthorizedAccountId = &v +// SetConfigHistoryDeliveryInfo sets the ConfigHistoryDeliveryInfo field's value. +func (s *DeliveryChannelStatus) SetConfigHistoryDeliveryInfo(v *ConfigExportDeliveryInfo) *DeliveryChannelStatus { + s.ConfigHistoryDeliveryInfo = v return s } -// SetAuthorizedAwsRegion sets the AuthorizedAwsRegion field's value. -func (s *DeleteAggregationAuthorizationInput) SetAuthorizedAwsRegion(v string) *DeleteAggregationAuthorizationInput { - s.AuthorizedAwsRegion = &v +// SetConfigSnapshotDeliveryInfo sets the ConfigSnapshotDeliveryInfo field's value. +func (s *DeliveryChannelStatus) SetConfigSnapshotDeliveryInfo(v *ConfigExportDeliveryInfo) *DeliveryChannelStatus { + s.ConfigSnapshotDeliveryInfo = v return s } -type DeleteAggregationAuthorizationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteAggregationAuthorizationOutput) String() string { - return awsutil.Prettify(s) +// SetConfigStreamDeliveryInfo sets the ConfigStreamDeliveryInfo field's value. +func (s *DeliveryChannelStatus) SetConfigStreamDeliveryInfo(v *ConfigStreamDeliveryInfo) *DeliveryChannelStatus { + s.ConfigStreamDeliveryInfo = v + return s } -// GoString returns the string representation -func (s DeleteAggregationAuthorizationOutput) GoString() string { - return s.String() +// SetName sets the Name field's value. +func (s *DeliveryChannelStatus) SetName(v string) *DeliveryChannelStatus { + s.Name = &v + return s } -type DeleteConfigRuleInput struct { +type DescribeAggregateComplianceByConfigRulesInput struct { _ struct{} `type:"structure"` - // The name of the AWS Config rule that you want to delete. + // The name of the configuration aggregator. // - // ConfigRuleName is a required field - ConfigRuleName *string `min:"1" type:"string" required:"true"` + // ConfigurationAggregatorName is a required field + ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` + + // Filters the results by ConfigRuleComplianceFilters object. + Filters *ConfigRuleComplianceFilters `type:"structure"` + + // The maximum number of evaluation results returned on each page. The default + // is maximum. If you specify 0, AWS Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteConfigRuleInput) String() string { +func (s DescribeAggregateComplianceByConfigRulesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteConfigRuleInput) GoString() string { +func (s DescribeAggregateComplianceByConfigRulesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteConfigRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteConfigRuleInput"} - if s.ConfigRuleName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) +func (s *DescribeAggregateComplianceByConfigRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAggregateComplianceByConfigRulesInput"} + if s.ConfigurationAggregatorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) } - if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) + } + if s.Filters != nil { + if err := s.Filters.Validate(); err != nil { + invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -7461,168 +9292,254 @@ func (s *DeleteConfigRuleInput) Validate() error { return nil } -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *DeleteConfigRuleInput) SetConfigRuleName(v string) *DeleteConfigRuleInput { - s.ConfigRuleName = &v +// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. +func (s *DescribeAggregateComplianceByConfigRulesInput) SetConfigurationAggregatorName(v string) *DescribeAggregateComplianceByConfigRulesInput { + s.ConfigurationAggregatorName = &v return s } -type DeleteConfigRuleOutput struct { +// SetFilters sets the Filters field's value. +func (s *DescribeAggregateComplianceByConfigRulesInput) SetFilters(v *ConfigRuleComplianceFilters) *DescribeAggregateComplianceByConfigRulesInput { + s.Filters = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeAggregateComplianceByConfigRulesInput) SetLimit(v int64) *DescribeAggregateComplianceByConfigRulesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAggregateComplianceByConfigRulesInput) SetNextToken(v string) *DescribeAggregateComplianceByConfigRulesInput { + s.NextToken = &v + return s +} + +type DescribeAggregateComplianceByConfigRulesOutput struct { _ struct{} `type:"structure"` + + // Returns a list of AggregateComplianceByConfigRule object. + AggregateComplianceByConfigRules []*AggregateComplianceByConfigRule `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteConfigRuleOutput) String() string { +func (s DescribeAggregateComplianceByConfigRulesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteConfigRuleOutput) GoString() string { +func (s DescribeAggregateComplianceByConfigRulesOutput) GoString() string { return s.String() } -type DeleteConfigurationAggregatorInput struct { +// SetAggregateComplianceByConfigRules sets the AggregateComplianceByConfigRules field's value. +func (s *DescribeAggregateComplianceByConfigRulesOutput) SetAggregateComplianceByConfigRules(v []*AggregateComplianceByConfigRule) *DescribeAggregateComplianceByConfigRulesOutput { + s.AggregateComplianceByConfigRules = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAggregateComplianceByConfigRulesOutput) SetNextToken(v string) *DescribeAggregateComplianceByConfigRulesOutput { + s.NextToken = &v + return s +} + +type DescribeAggregationAuthorizationsInput struct { _ struct{} `type:"structure"` - // The name of the configuration aggregator. - // - // ConfigurationAggregatorName is a required field - ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` + // The maximum number of AggregationAuthorizations returned on each page. The + // default is maximum. If you specify 0, AWS Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteConfigurationAggregatorInput) String() string { +func (s DescribeAggregationAuthorizationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteConfigurationAggregatorInput) GoString() string { +func (s DescribeAggregationAuthorizationsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteConfigurationAggregatorInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationAggregatorInput"} - if s.ConfigurationAggregatorName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) - } - if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLimit sets the Limit field's value. +func (s *DescribeAggregationAuthorizationsInput) SetLimit(v int64) *DescribeAggregationAuthorizationsInput { + s.Limit = &v + return s } -// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *DeleteConfigurationAggregatorInput) SetConfigurationAggregatorName(v string) *DeleteConfigurationAggregatorInput { - s.ConfigurationAggregatorName = &v +// SetNextToken sets the NextToken field's value. +func (s *DescribeAggregationAuthorizationsInput) SetNextToken(v string) *DescribeAggregationAuthorizationsInput { + s.NextToken = &v return s } -type DeleteConfigurationAggregatorOutput struct { +type DescribeAggregationAuthorizationsOutput struct { _ struct{} `type:"structure"` + + // Returns a list of authorizations granted to various aggregator accounts and + // regions. + AggregationAuthorizations []*AggregationAuthorization `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteConfigurationAggregatorOutput) String() string { +func (s DescribeAggregationAuthorizationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteConfigurationAggregatorOutput) GoString() string { +func (s DescribeAggregationAuthorizationsOutput) GoString() string { return s.String() } -// The request object for the DeleteConfigurationRecorder action. -type DeleteConfigurationRecorderInput struct { +// SetAggregationAuthorizations sets the AggregationAuthorizations field's value. +func (s *DescribeAggregationAuthorizationsOutput) SetAggregationAuthorizations(v []*AggregationAuthorization) *DescribeAggregationAuthorizationsOutput { + s.AggregationAuthorizations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAggregationAuthorizationsOutput) SetNextToken(v string) *DescribeAggregationAuthorizationsOutput { + s.NextToken = &v + return s +} + +type DescribeComplianceByConfigRuleInput struct { _ struct{} `type:"structure"` - // The name of the configuration recorder to be deleted. You can retrieve the - // name of your configuration recorder by using the DescribeConfigurationRecorders - // action. + // Filters the results by compliance. // - // ConfigurationRecorderName is a required field - ConfigurationRecorderName *string `min:"1" type:"string" required:"true"` + // The allowed values are COMPLIANT and NON_COMPLIANT. + ComplianceTypes []*string `type:"list"` + + // Specify one or more AWS Config rule names to filter the results by rule. + ConfigRuleNames []*string `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteConfigurationRecorderInput) String() string { +func (s DescribeComplianceByConfigRuleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteConfigurationRecorderInput) GoString() string { +func (s DescribeComplianceByConfigRuleInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteConfigurationRecorderInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationRecorderInput"} - if s.ConfigurationRecorderName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationRecorderName")) - } - if s.ConfigurationRecorderName != nil && len(*s.ConfigurationRecorderName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationRecorderName", 1)) - } +// SetComplianceTypes sets the ComplianceTypes field's value. +func (s *DescribeComplianceByConfigRuleInput) SetComplianceTypes(v []*string) *DescribeComplianceByConfigRuleInput { + s.ComplianceTypes = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetConfigRuleNames sets the ConfigRuleNames field's value. +func (s *DescribeComplianceByConfigRuleInput) SetConfigRuleNames(v []*string) *DescribeComplianceByConfigRuleInput { + s.ConfigRuleNames = v + return s } -// SetConfigurationRecorderName sets the ConfigurationRecorderName field's value. -func (s *DeleteConfigurationRecorderInput) SetConfigurationRecorderName(v string) *DeleteConfigurationRecorderInput { - s.ConfigurationRecorderName = &v +// SetNextToken sets the NextToken field's value. +func (s *DescribeComplianceByConfigRuleInput) SetNextToken(v string) *DescribeComplianceByConfigRuleInput { + s.NextToken = &v return s } -type DeleteConfigurationRecorderOutput struct { +type DescribeComplianceByConfigRuleOutput struct { _ struct{} `type:"structure"` + + // Indicates whether each of the specified AWS Config rules is compliant. + ComplianceByConfigRules []*ComplianceByConfigRule `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteConfigurationRecorderOutput) String() string { +func (s DescribeComplianceByConfigRuleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteConfigurationRecorderOutput) GoString() string { +func (s DescribeComplianceByConfigRuleOutput) GoString() string { return s.String() } -// The input for the DeleteDeliveryChannel action. The action accepts the following -// data, in JSON format. -type DeleteDeliveryChannelInput struct { +// SetComplianceByConfigRules sets the ComplianceByConfigRules field's value. +func (s *DescribeComplianceByConfigRuleOutput) SetComplianceByConfigRules(v []*ComplianceByConfigRule) *DescribeComplianceByConfigRuleOutput { + s.ComplianceByConfigRules = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeComplianceByConfigRuleOutput) SetNextToken(v string) *DescribeComplianceByConfigRuleOutput { + s.NextToken = &v + return s +} + +type DescribeComplianceByResourceInput struct { _ struct{} `type:"structure"` - // The name of the delivery channel to delete. - // - // DeliveryChannelName is a required field - DeliveryChannelName *string `min:"1" type:"string" required:"true"` + // Filters the results by compliance. + // + // The allowed values are COMPLIANT, NON_COMPLIANT, and INSUFFICIENT_DATA. + ComplianceTypes []*string `type:"list"` + + // The maximum number of evaluation results returned on each page. The default + // is 10. You cannot specify a number greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The ID of the AWS resource for which you want compliance information. You + // can specify only one resource ID. If you specify a resource ID, you must + // also specify a type for ResourceType. + ResourceId *string `min:"1" type:"string"` + + // The types of AWS resources for which you want compliance information (for + // example, AWS::EC2::Instance). For this action, you can specify that the resource + // type is an AWS account by specifying AWS::::Account. + ResourceType *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteDeliveryChannelInput) String() string { +func (s DescribeComplianceByResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDeliveryChannelInput) GoString() string { +func (s DescribeComplianceByResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDeliveryChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDeliveryChannelInput"} - if s.DeliveryChannelName == nil { - invalidParams.Add(request.NewErrParamRequired("DeliveryChannelName")) +func (s *DescribeComplianceByResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeComplianceByResourceInput"} + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) } - if s.DeliveryChannelName != nil && len(*s.DeliveryChannelName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DeliveryChannelName", 1)) + if s.ResourceType != nil && len(*s.ResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) } if invalidParams.Len() > 0 { @@ -7631,184 +9548,268 @@ func (s *DeleteDeliveryChannelInput) Validate() error { return nil } -// SetDeliveryChannelName sets the DeliveryChannelName field's value. -func (s *DeleteDeliveryChannelInput) SetDeliveryChannelName(v string) *DeleteDeliveryChannelInput { - s.DeliveryChannelName = &v +// SetComplianceTypes sets the ComplianceTypes field's value. +func (s *DescribeComplianceByResourceInput) SetComplianceTypes(v []*string) *DescribeComplianceByResourceInput { + s.ComplianceTypes = v return s } -type DeleteDeliveryChannelOutput struct { +// SetLimit sets the Limit field's value. +func (s *DescribeComplianceByResourceInput) SetLimit(v int64) *DescribeComplianceByResourceInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeComplianceByResourceInput) SetNextToken(v string) *DescribeComplianceByResourceInput { + s.NextToken = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *DescribeComplianceByResourceInput) SetResourceId(v string) *DescribeComplianceByResourceInput { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *DescribeComplianceByResourceInput) SetResourceType(v string) *DescribeComplianceByResourceInput { + s.ResourceType = &v + return s +} + +type DescribeComplianceByResourceOutput struct { _ struct{} `type:"structure"` + + // Indicates whether the specified AWS resource complies with all of the AWS + // Config rules that evaluate it. + ComplianceByResources []*ComplianceByResource `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteDeliveryChannelOutput) String() string { +func (s DescribeComplianceByResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDeliveryChannelOutput) GoString() string { +func (s DescribeComplianceByResourceOutput) GoString() string { return s.String() } -type DeleteEvaluationResultsInput struct { +// SetComplianceByResources sets the ComplianceByResources field's value. +func (s *DescribeComplianceByResourceOutput) SetComplianceByResources(v []*ComplianceByResource) *DescribeComplianceByResourceOutput { + s.ComplianceByResources = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeComplianceByResourceOutput) SetNextToken(v string) *DescribeComplianceByResourceOutput { + s.NextToken = &v + return s +} + +type DescribeConfigRuleEvaluationStatusInput struct { _ struct{} `type:"structure"` - // The name of the AWS Config rule for which you want to delete the evaluation - // results. + // The name of the AWS managed Config rules for which you want status information. + // If you do not specify any names, AWS Config returns status information for + // all AWS managed Config rules that you use. + ConfigRuleNames []*string `type:"list"` + + // The number of rule evaluation results that you want returned. // - // ConfigRuleName is a required field - ConfigRuleName *string `min:"1" type:"string" required:"true"` + // This parameter is required if the rule limit for your account is more than + // the default of 150 rules. + // + // For information about requesting a rule limit increase, see AWS Config Limits + // (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_config) + // in the AWS General Reference Guide. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteEvaluationResultsInput) String() string { +func (s DescribeConfigRuleEvaluationStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteEvaluationResultsInput) GoString() string { +func (s DescribeConfigRuleEvaluationStatusInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteEvaluationResultsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteEvaluationResultsInput"} - if s.ConfigRuleName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) - } - if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) - } +// SetConfigRuleNames sets the ConfigRuleNames field's value. +func (s *DescribeConfigRuleEvaluationStatusInput) SetConfigRuleNames(v []*string) *DescribeConfigRuleEvaluationStatusInput { + s.ConfigRuleNames = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLimit sets the Limit field's value. +func (s *DescribeConfigRuleEvaluationStatusInput) SetLimit(v int64) *DescribeConfigRuleEvaluationStatusInput { + s.Limit = &v + return s } -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *DeleteEvaluationResultsInput) SetConfigRuleName(v string) *DeleteEvaluationResultsInput { - s.ConfigRuleName = &v +// SetNextToken sets the NextToken field's value. +func (s *DescribeConfigRuleEvaluationStatusInput) SetNextToken(v string) *DescribeConfigRuleEvaluationStatusInput { + s.NextToken = &v return s } -// The output when you delete the evaluation results for the specified AWS Config -// rule. -type DeleteEvaluationResultsOutput struct { +type DescribeConfigRuleEvaluationStatusOutput struct { _ struct{} `type:"structure"` + + // Status information about your AWS managed Config rules. + ConfigRulesEvaluationStatus []*ConfigRuleEvaluationStatus `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteEvaluationResultsOutput) String() string { +func (s DescribeConfigRuleEvaluationStatusOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteEvaluationResultsOutput) GoString() string { +func (s DescribeConfigRuleEvaluationStatusOutput) GoString() string { return s.String() } -type DeletePendingAggregationRequestInput struct { +// SetConfigRulesEvaluationStatus sets the ConfigRulesEvaluationStatus field's value. +func (s *DescribeConfigRuleEvaluationStatusOutput) SetConfigRulesEvaluationStatus(v []*ConfigRuleEvaluationStatus) *DescribeConfigRuleEvaluationStatusOutput { + s.ConfigRulesEvaluationStatus = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeConfigRuleEvaluationStatusOutput) SetNextToken(v string) *DescribeConfigRuleEvaluationStatusOutput { + s.NextToken = &v + return s +} + +type DescribeConfigRulesInput struct { _ struct{} `type:"structure"` - // The 12-digit account ID of the account requesting to aggregate data. - // - // RequesterAccountId is a required field - RequesterAccountId *string `type:"string" required:"true"` + // The names of the AWS Config rules for which you want details. If you do not + // specify any names, AWS Config returns details for all your rules. + ConfigRuleNames []*string `type:"list"` - // The region requesting to aggregate data. - // - // RequesterAwsRegion is a required field - RequesterAwsRegion *string `min:"1" type:"string" required:"true"` + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeletePendingAggregationRequestInput) String() string { +func (s DescribeConfigRulesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletePendingAggregationRequestInput) GoString() string { +func (s DescribeConfigRulesInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePendingAggregationRequestInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePendingAggregationRequestInput"} - if s.RequesterAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("RequesterAccountId")) - } - if s.RequesterAwsRegion == nil { - invalidParams.Add(request.NewErrParamRequired("RequesterAwsRegion")) - } - if s.RequesterAwsRegion != nil && len(*s.RequesterAwsRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RequesterAwsRegion", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRequesterAccountId sets the RequesterAccountId field's value. -func (s *DeletePendingAggregationRequestInput) SetRequesterAccountId(v string) *DeletePendingAggregationRequestInput { - s.RequesterAccountId = &v +// SetConfigRuleNames sets the ConfigRuleNames field's value. +func (s *DescribeConfigRulesInput) SetConfigRuleNames(v []*string) *DescribeConfigRulesInput { + s.ConfigRuleNames = v return s } -// SetRequesterAwsRegion sets the RequesterAwsRegion field's value. -func (s *DeletePendingAggregationRequestInput) SetRequesterAwsRegion(v string) *DeletePendingAggregationRequestInput { - s.RequesterAwsRegion = &v +// SetNextToken sets the NextToken field's value. +func (s *DescribeConfigRulesInput) SetNextToken(v string) *DescribeConfigRulesInput { + s.NextToken = &v return s } -type DeletePendingAggregationRequestOutput struct { +type DescribeConfigRulesOutput struct { _ struct{} `type:"structure"` + + // The details about your AWS Config rules. + ConfigRules []*ConfigRule `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeletePendingAggregationRequestOutput) String() string { +func (s DescribeConfigRulesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletePendingAggregationRequestOutput) GoString() string { +func (s DescribeConfigRulesOutput) GoString() string { return s.String() } -type DeleteRemediationConfigurationInput struct { +// SetConfigRules sets the ConfigRules field's value. +func (s *DescribeConfigRulesOutput) SetConfigRules(v []*ConfigRule) *DescribeConfigRulesOutput { + s.ConfigRules = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeConfigRulesOutput) SetNextToken(v string) *DescribeConfigRulesOutput { + s.NextToken = &v + return s +} + +type DescribeConfigurationAggregatorSourcesStatusInput struct { _ struct{} `type:"structure"` - // The name of the AWS Config rule for which you want to delete remediation - // configuration. - // - // ConfigRuleName is a required field - ConfigRuleName *string `min:"1" type:"string" required:"true"` + // The name of the configuration aggregator. + // + // ConfigurationAggregatorName is a required field + ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` + + // The maximum number of AggregatorSourceStatus returned on each page. The default + // is maximum. If you specify 0, AWS Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` - // The type of a resource. - ResourceType *string `type:"string"` + // Filters the status type. + // + // * Valid value FAILED indicates errors while moving data. + // + // * Valid value SUCCEEDED indicates the data was successfully moved. + // + // * Valid value OUTDATED indicates the data is not the most recent. + UpdateStatus []*string `min:"1" type:"list"` } // String returns the string representation -func (s DeleteRemediationConfigurationInput) String() string { +func (s DescribeConfigurationAggregatorSourcesStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRemediationConfigurationInput) GoString() string { +func (s DescribeConfigurationAggregatorSourcesStatusInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRemediationConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRemediationConfigurationInput"} - if s.ConfigRuleName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) +func (s *DescribeConfigurationAggregatorSourcesStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConfigurationAggregatorSourcesStatusInput"} + if s.ConfigurationAggregatorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) } - if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) + } + if s.UpdateStatus != nil && len(s.UpdateStatus) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UpdateStatus", 1)) } if invalidParams.Len() > 0 { @@ -7817,869 +9818,789 @@ func (s *DeleteRemediationConfigurationInput) Validate() error { return nil } -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *DeleteRemediationConfigurationInput) SetConfigRuleName(v string) *DeleteRemediationConfigurationInput { - s.ConfigRuleName = &v +// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. +func (s *DescribeConfigurationAggregatorSourcesStatusInput) SetConfigurationAggregatorName(v string) *DescribeConfigurationAggregatorSourcesStatusInput { + s.ConfigurationAggregatorName = &v return s } -// SetResourceType sets the ResourceType field's value. -func (s *DeleteRemediationConfigurationInput) SetResourceType(v string) *DeleteRemediationConfigurationInput { - s.ResourceType = &v +// SetLimit sets the Limit field's value. +func (s *DescribeConfigurationAggregatorSourcesStatusInput) SetLimit(v int64) *DescribeConfigurationAggregatorSourcesStatusInput { + s.Limit = &v return s } -type DeleteRemediationConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteRemediationConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetNextToken sets the NextToken field's value. +func (s *DescribeConfigurationAggregatorSourcesStatusInput) SetNextToken(v string) *DescribeConfigurationAggregatorSourcesStatusInput { + s.NextToken = &v + return s } -// GoString returns the string representation -func (s DeleteRemediationConfigurationOutput) GoString() string { - return s.String() +// SetUpdateStatus sets the UpdateStatus field's value. +func (s *DescribeConfigurationAggregatorSourcesStatusInput) SetUpdateStatus(v []*string) *DescribeConfigurationAggregatorSourcesStatusInput { + s.UpdateStatus = v + return s } -type DeleteRetentionConfigurationInput struct { +type DescribeConfigurationAggregatorSourcesStatusOutput struct { _ struct{} `type:"structure"` - // The name of the retention configuration to delete. - // - // RetentionConfigurationName is a required field - RetentionConfigurationName *string `min:"1" type:"string" required:"true"` + // Returns an AggregatedSourceStatus object. + AggregatedSourceStatusList []*AggregatedSourceStatus `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteRetentionConfigurationInput) String() string { +func (s DescribeConfigurationAggregatorSourcesStatusOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRetentionConfigurationInput) GoString() string { +func (s DescribeConfigurationAggregatorSourcesStatusOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRetentionConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRetentionConfigurationInput"} - if s.RetentionConfigurationName == nil { - invalidParams.Add(request.NewErrParamRequired("RetentionConfigurationName")) - } - if s.RetentionConfigurationName != nil && len(*s.RetentionConfigurationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RetentionConfigurationName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetAggregatedSourceStatusList sets the AggregatedSourceStatusList field's value. +func (s *DescribeConfigurationAggregatorSourcesStatusOutput) SetAggregatedSourceStatusList(v []*AggregatedSourceStatus) *DescribeConfigurationAggregatorSourcesStatusOutput { + s.AggregatedSourceStatusList = v + return s } -// SetRetentionConfigurationName sets the RetentionConfigurationName field's value. -func (s *DeleteRetentionConfigurationInput) SetRetentionConfigurationName(v string) *DeleteRetentionConfigurationInput { - s.RetentionConfigurationName = &v +// SetNextToken sets the NextToken field's value. +func (s *DescribeConfigurationAggregatorSourcesStatusOutput) SetNextToken(v string) *DescribeConfigurationAggregatorSourcesStatusOutput { + s.NextToken = &v return s } -type DeleteRetentionConfigurationOutput struct { +type DescribeConfigurationAggregatorsInput struct { _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteRetentionConfigurationOutput) String() string { - return awsutil.Prettify(s) -} -// GoString returns the string representation -func (s DeleteRetentionConfigurationOutput) GoString() string { - return s.String() -} + // The name of the configuration aggregators. + ConfigurationAggregatorNames []*string `type:"list"` -// The input for the DeliverConfigSnapshot action. -type DeliverConfigSnapshotInput struct { - _ struct{} `type:"structure"` + // The maximum number of configuration aggregators returned on each page. The + // default is maximum. If you specify 0, AWS Config uses the default. + Limit *int64 `type:"integer"` - // The name of the delivery channel through which the snapshot is delivered. - // - // DeliveryChannelName is a required field - DeliveryChannelName *string `locationName:"deliveryChannelName" min:"1" type:"string" required:"true"` + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeliverConfigSnapshotInput) String() string { +func (s DescribeConfigurationAggregatorsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeliverConfigSnapshotInput) GoString() string { +func (s DescribeConfigurationAggregatorsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeliverConfigSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeliverConfigSnapshotInput"} - if s.DeliveryChannelName == nil { - invalidParams.Add(request.NewErrParamRequired("DeliveryChannelName")) - } - if s.DeliveryChannelName != nil && len(*s.DeliveryChannelName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DeliveryChannelName", 1)) - } +// SetConfigurationAggregatorNames sets the ConfigurationAggregatorNames field's value. +func (s *DescribeConfigurationAggregatorsInput) SetConfigurationAggregatorNames(v []*string) *DescribeConfigurationAggregatorsInput { + s.ConfigurationAggregatorNames = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLimit sets the Limit field's value. +func (s *DescribeConfigurationAggregatorsInput) SetLimit(v int64) *DescribeConfigurationAggregatorsInput { + s.Limit = &v + return s } -// SetDeliveryChannelName sets the DeliveryChannelName field's value. -func (s *DeliverConfigSnapshotInput) SetDeliveryChannelName(v string) *DeliverConfigSnapshotInput { - s.DeliveryChannelName = &v +// SetNextToken sets the NextToken field's value. +func (s *DescribeConfigurationAggregatorsInput) SetNextToken(v string) *DescribeConfigurationAggregatorsInput { + s.NextToken = &v return s } -// The output for the DeliverConfigSnapshot action, in JSON format. -type DeliverConfigSnapshotOutput struct { +type DescribeConfigurationAggregatorsOutput struct { _ struct{} `type:"structure"` - // The ID of the snapshot that is being created. - ConfigSnapshotId *string `locationName:"configSnapshotId" type:"string"` + // Returns a ConfigurationAggregators object. + ConfigurationAggregators []*ConfigurationAggregator `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeliverConfigSnapshotOutput) String() string { +func (s DescribeConfigurationAggregatorsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeliverConfigSnapshotOutput) GoString() string { +func (s DescribeConfigurationAggregatorsOutput) GoString() string { return s.String() } -// SetConfigSnapshotId sets the ConfigSnapshotId field's value. -func (s *DeliverConfigSnapshotOutput) SetConfigSnapshotId(v string) *DeliverConfigSnapshotOutput { - s.ConfigSnapshotId = &v +// SetConfigurationAggregators sets the ConfigurationAggregators field's value. +func (s *DescribeConfigurationAggregatorsOutput) SetConfigurationAggregators(v []*ConfigurationAggregator) *DescribeConfigurationAggregatorsOutput { + s.ConfigurationAggregators = v return s } -// The channel through which AWS Config delivers notifications and updated configuration -// states. -type DeliveryChannel struct { - _ struct{} `type:"structure"` - - // The options for how often AWS Config delivers configuration snapshots to - // the Amazon S3 bucket. - ConfigSnapshotDeliveryProperties *ConfigSnapshotDeliveryProperties `locationName:"configSnapshotDeliveryProperties" type:"structure"` - - // The name of the delivery channel. By default, AWS Config assigns the name - // "default" when creating the delivery channel. To change the delivery channel - // name, you must use the DeleteDeliveryChannel action to delete your current - // delivery channel, and then you must use the PutDeliveryChannel command to - // create a delivery channel that has the desired name. - Name *string `locationName:"name" min:"1" type:"string"` - - // The name of the Amazon S3 bucket to which AWS Config delivers configuration - // snapshots and configuration history files. - // - // If you specify a bucket that belongs to another AWS account, that bucket - // must have policies that grant access permissions to AWS Config. For more - // information, see Permissions for the Amazon S3 Bucket (https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-policy.html) - // in the AWS Config Developer Guide. - S3BucketName *string `locationName:"s3BucketName" type:"string"` +// SetNextToken sets the NextToken field's value. +func (s *DescribeConfigurationAggregatorsOutput) SetNextToken(v string) *DescribeConfigurationAggregatorsOutput { + s.NextToken = &v + return s +} - // The prefix for the specified Amazon S3 bucket. - S3KeyPrefix *string `locationName:"s3KeyPrefix" type:"string"` +// The input for the DescribeConfigurationRecorderStatus action. +type DescribeConfigurationRecorderStatusInput struct { + _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config - // sends notifications about configuration changes. - // - // If you choose a topic from another account, the topic must have policies - // that grant access permissions to AWS Config. For more information, see Permissions - // for the Amazon SNS Topic (https://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html) - // in the AWS Config Developer Guide. - SnsTopicARN *string `locationName:"snsTopicARN" type:"string"` + // The name(s) of the configuration recorder. If the name is not specified, + // the action returns the current status of all the configuration recorders + // associated with the account. + ConfigurationRecorderNames []*string `type:"list"` } // String returns the string representation -func (s DeliveryChannel) String() string { +func (s DescribeConfigurationRecorderStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeliveryChannel) GoString() string { +func (s DescribeConfigurationRecorderStatusInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeliveryChannel) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeliveryChannel"} - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConfigSnapshotDeliveryProperties sets the ConfigSnapshotDeliveryProperties field's value. -func (s *DeliveryChannel) SetConfigSnapshotDeliveryProperties(v *ConfigSnapshotDeliveryProperties) *DeliveryChannel { - s.ConfigSnapshotDeliveryProperties = v +// SetConfigurationRecorderNames sets the ConfigurationRecorderNames field's value. +func (s *DescribeConfigurationRecorderStatusInput) SetConfigurationRecorderNames(v []*string) *DescribeConfigurationRecorderStatusInput { + s.ConfigurationRecorderNames = v return s } -// SetName sets the Name field's value. -func (s *DeliveryChannel) SetName(v string) *DeliveryChannel { - s.Name = &v - return s +// The output for the DescribeConfigurationRecorderStatus action, in JSON format. +type DescribeConfigurationRecorderStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains status of the specified recorders. + ConfigurationRecordersStatus []*ConfigurationRecorderStatus `type:"list"` } -// SetS3BucketName sets the S3BucketName field's value. -func (s *DeliveryChannel) SetS3BucketName(v string) *DeliveryChannel { - s.S3BucketName = &v - return s +// String returns the string representation +func (s DescribeConfigurationRecorderStatusOutput) String() string { + return awsutil.Prettify(s) } -// SetS3KeyPrefix sets the S3KeyPrefix field's value. -func (s *DeliveryChannel) SetS3KeyPrefix(v string) *DeliveryChannel { - s.S3KeyPrefix = &v - return s +// GoString returns the string representation +func (s DescribeConfigurationRecorderStatusOutput) GoString() string { + return s.String() } -// SetSnsTopicARN sets the SnsTopicARN field's value. -func (s *DeliveryChannel) SetSnsTopicARN(v string) *DeliveryChannel { - s.SnsTopicARN = &v +// SetConfigurationRecordersStatus sets the ConfigurationRecordersStatus field's value. +func (s *DescribeConfigurationRecorderStatusOutput) SetConfigurationRecordersStatus(v []*ConfigurationRecorderStatus) *DescribeConfigurationRecorderStatusOutput { + s.ConfigurationRecordersStatus = v return s } -// The status of a specified delivery channel. -// -// Valid values: Success | Failure -type DeliveryChannelStatus struct { +// The input for the DescribeConfigurationRecorders action. +type DescribeConfigurationRecordersInput struct { _ struct{} `type:"structure"` - // A list that contains the status of the delivery of the configuration history - // to the specified Amazon S3 bucket. - ConfigHistoryDeliveryInfo *ConfigExportDeliveryInfo `locationName:"configHistoryDeliveryInfo" type:"structure"` - - // A list containing the status of the delivery of the snapshot to the specified - // Amazon S3 bucket. - ConfigSnapshotDeliveryInfo *ConfigExportDeliveryInfo `locationName:"configSnapshotDeliveryInfo" type:"structure"` - - // A list containing the status of the delivery of the configuration stream - // notification to the specified Amazon SNS topic. - ConfigStreamDeliveryInfo *ConfigStreamDeliveryInfo `locationName:"configStreamDeliveryInfo" type:"structure"` - - // The name of the delivery channel. - Name *string `locationName:"name" type:"string"` + // A list of configuration recorder names. + ConfigurationRecorderNames []*string `type:"list"` } // String returns the string representation -func (s DeliveryChannelStatus) String() string { +func (s DescribeConfigurationRecordersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeliveryChannelStatus) GoString() string { +func (s DescribeConfigurationRecordersInput) GoString() string { return s.String() } -// SetConfigHistoryDeliveryInfo sets the ConfigHistoryDeliveryInfo field's value. -func (s *DeliveryChannelStatus) SetConfigHistoryDeliveryInfo(v *ConfigExportDeliveryInfo) *DeliveryChannelStatus { - s.ConfigHistoryDeliveryInfo = v +// SetConfigurationRecorderNames sets the ConfigurationRecorderNames field's value. +func (s *DescribeConfigurationRecordersInput) SetConfigurationRecorderNames(v []*string) *DescribeConfigurationRecordersInput { + s.ConfigurationRecorderNames = v return s } -// SetConfigSnapshotDeliveryInfo sets the ConfigSnapshotDeliveryInfo field's value. -func (s *DeliveryChannelStatus) SetConfigSnapshotDeliveryInfo(v *ConfigExportDeliveryInfo) *DeliveryChannelStatus { - s.ConfigSnapshotDeliveryInfo = v - return s +// The output for the DescribeConfigurationRecorders action. +type DescribeConfigurationRecordersOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the descriptions of the specified configuration recorders. + ConfigurationRecorders []*ConfigurationRecorder `type:"list"` } -// SetConfigStreamDeliveryInfo sets the ConfigStreamDeliveryInfo field's value. -func (s *DeliveryChannelStatus) SetConfigStreamDeliveryInfo(v *ConfigStreamDeliveryInfo) *DeliveryChannelStatus { - s.ConfigStreamDeliveryInfo = v - return s +// String returns the string representation +func (s DescribeConfigurationRecordersOutput) String() string { + return awsutil.Prettify(s) } -// SetName sets the Name field's value. -func (s *DeliveryChannelStatus) SetName(v string) *DeliveryChannelStatus { - s.Name = &v +// GoString returns the string representation +func (s DescribeConfigurationRecordersOutput) GoString() string { + return s.String() +} + +// SetConfigurationRecorders sets the ConfigurationRecorders field's value. +func (s *DescribeConfigurationRecordersOutput) SetConfigurationRecorders(v []*ConfigurationRecorder) *DescribeConfigurationRecordersOutput { + s.ConfigurationRecorders = v return s } -type DescribeAggregateComplianceByConfigRulesInput struct { +// The input for the DeliveryChannelStatus action. +type DescribeDeliveryChannelStatusInput struct { _ struct{} `type:"structure"` - // The name of the configuration aggregator. - // - // ConfigurationAggregatorName is a required field - ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` - - // Filters the results by ConfigRuleComplianceFilters object. - Filters *ConfigRuleComplianceFilters `type:"structure"` - - // The maximum number of evaluation results returned on each page. The default - // is maximum. If you specify 0, AWS Config uses the default. - Limit *int64 `type:"integer"` - - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` + // A list of delivery channel names. + DeliveryChannelNames []*string `type:"list"` } // String returns the string representation -func (s DescribeAggregateComplianceByConfigRulesInput) String() string { +func (s DescribeDeliveryChannelStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeAggregateComplianceByConfigRulesInput) GoString() string { +func (s DescribeDeliveryChannelStatusInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeAggregateComplianceByConfigRulesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeAggregateComplianceByConfigRulesInput"} - if s.ConfigurationAggregatorName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) - } - if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) - } - if s.Filters != nil { - if err := s.Filters.Validate(); err != nil { - invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDeliveryChannelNames sets the DeliveryChannelNames field's value. +func (s *DescribeDeliveryChannelStatusInput) SetDeliveryChannelNames(v []*string) *DescribeDeliveryChannelStatusInput { + s.DeliveryChannelNames = v + return s } -// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *DescribeAggregateComplianceByConfigRulesInput) SetConfigurationAggregatorName(v string) *DescribeAggregateComplianceByConfigRulesInput { - s.ConfigurationAggregatorName = &v - return s +// The output for the DescribeDeliveryChannelStatus action. +type DescribeDeliveryChannelStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the status of a specified delivery channel. + DeliveryChannelsStatus []*DeliveryChannelStatus `type:"list"` } -// SetFilters sets the Filters field's value. -func (s *DescribeAggregateComplianceByConfigRulesInput) SetFilters(v *ConfigRuleComplianceFilters) *DescribeAggregateComplianceByConfigRulesInput { - s.Filters = v - return s +// String returns the string representation +func (s DescribeDeliveryChannelStatusOutput) String() string { + return awsutil.Prettify(s) } -// SetLimit sets the Limit field's value. -func (s *DescribeAggregateComplianceByConfigRulesInput) SetLimit(v int64) *DescribeAggregateComplianceByConfigRulesInput { - s.Limit = &v - return s +// GoString returns the string representation +func (s DescribeDeliveryChannelStatusOutput) GoString() string { + return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *DescribeAggregateComplianceByConfigRulesInput) SetNextToken(v string) *DescribeAggregateComplianceByConfigRulesInput { - s.NextToken = &v +// SetDeliveryChannelsStatus sets the DeliveryChannelsStatus field's value. +func (s *DescribeDeliveryChannelStatusOutput) SetDeliveryChannelsStatus(v []*DeliveryChannelStatus) *DescribeDeliveryChannelStatusOutput { + s.DeliveryChannelsStatus = v return s } -type DescribeAggregateComplianceByConfigRulesOutput struct { +// The input for the DescribeDeliveryChannels action. +type DescribeDeliveryChannelsInput struct { _ struct{} `type:"structure"` - // Returns a list of AggregateComplianceByConfigRule object. - AggregateComplianceByConfigRules []*AggregateComplianceByConfigRule `type:"list"` - - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` + // A list of delivery channel names. + DeliveryChannelNames []*string `type:"list"` } // String returns the string representation -func (s DescribeAggregateComplianceByConfigRulesOutput) String() string { +func (s DescribeDeliveryChannelsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeAggregateComplianceByConfigRulesOutput) GoString() string { +func (s DescribeDeliveryChannelsInput) GoString() string { return s.String() } -// SetAggregateComplianceByConfigRules sets the AggregateComplianceByConfigRules field's value. -func (s *DescribeAggregateComplianceByConfigRulesOutput) SetAggregateComplianceByConfigRules(v []*AggregateComplianceByConfigRule) *DescribeAggregateComplianceByConfigRulesOutput { - s.AggregateComplianceByConfigRules = v +// SetDeliveryChannelNames sets the DeliveryChannelNames field's value. +func (s *DescribeDeliveryChannelsInput) SetDeliveryChannelNames(v []*string) *DescribeDeliveryChannelsInput { + s.DeliveryChannelNames = v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeAggregateComplianceByConfigRulesOutput) SetNextToken(v string) *DescribeAggregateComplianceByConfigRulesOutput { - s.NextToken = &v +// The output for the DescribeDeliveryChannels action. +type DescribeDeliveryChannelsOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the descriptions of the specified delivery channel. + DeliveryChannels []*DeliveryChannel `type:"list"` +} + +// String returns the string representation +func (s DescribeDeliveryChannelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryChannelsOutput) GoString() string { + return s.String() +} + +// SetDeliveryChannels sets the DeliveryChannels field's value. +func (s *DescribeDeliveryChannelsOutput) SetDeliveryChannels(v []*DeliveryChannel) *DescribeDeliveryChannelsOutput { + s.DeliveryChannels = v return s } -type DescribeAggregationAuthorizationsInput struct { +type DescribeOrganizationConfigRuleStatusesInput struct { _ struct{} `type:"structure"` - // The maximum number of AggregationAuthorizations returned on each page. The - // default is maximum. If you specify 0, AWS Config uses the default. + // The maximum number of OrganizationConfigRuleStatuses returned on each page. + // If you do no specify a number, AWS Config uses the default. The default is + // 100. Limit *int64 `type:"integer"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` + + // The names of organization config rules for which you want status details. + // If you do not specify any names, AWS Config returns details for all your + // organization AWS Confg rules. + OrganizationConfigRuleNames []*string `type:"list"` } // String returns the string representation -func (s DescribeAggregationAuthorizationsInput) String() string { +func (s DescribeOrganizationConfigRuleStatusesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeAggregationAuthorizationsInput) GoString() string { +func (s DescribeOrganizationConfigRuleStatusesInput) GoString() string { return s.String() } // SetLimit sets the Limit field's value. -func (s *DescribeAggregationAuthorizationsInput) SetLimit(v int64) *DescribeAggregationAuthorizationsInput { +func (s *DescribeOrganizationConfigRuleStatusesInput) SetLimit(v int64) *DescribeOrganizationConfigRuleStatusesInput { s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *DescribeAggregationAuthorizationsInput) SetNextToken(v string) *DescribeAggregationAuthorizationsInput { +func (s *DescribeOrganizationConfigRuleStatusesInput) SetNextToken(v string) *DescribeOrganizationConfigRuleStatusesInput { s.NextToken = &v return s } -type DescribeAggregationAuthorizationsOutput struct { - _ struct{} `type:"structure"` +// SetOrganizationConfigRuleNames sets the OrganizationConfigRuleNames field's value. +func (s *DescribeOrganizationConfigRuleStatusesInput) SetOrganizationConfigRuleNames(v []*string) *DescribeOrganizationConfigRuleStatusesInput { + s.OrganizationConfigRuleNames = v + return s +} - // Returns a list of authorizations granted to various aggregator accounts and - // regions. - AggregationAuthorizations []*AggregationAuthorization `type:"list"` +type DescribeOrganizationConfigRuleStatusesOutput struct { + _ struct{} `type:"structure"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` + + // A list of OrganizationConfigRuleStatus objects. + OrganizationConfigRuleStatuses []*OrganizationConfigRuleStatus `type:"list"` } // String returns the string representation -func (s DescribeAggregationAuthorizationsOutput) String() string { +func (s DescribeOrganizationConfigRuleStatusesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeAggregationAuthorizationsOutput) GoString() string { +func (s DescribeOrganizationConfigRuleStatusesOutput) GoString() string { return s.String() } -// SetAggregationAuthorizations sets the AggregationAuthorizations field's value. -func (s *DescribeAggregationAuthorizationsOutput) SetAggregationAuthorizations(v []*AggregationAuthorization) *DescribeAggregationAuthorizationsOutput { - s.AggregationAuthorizations = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeOrganizationConfigRuleStatusesOutput) SetNextToken(v string) *DescribeOrganizationConfigRuleStatusesOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeAggregationAuthorizationsOutput) SetNextToken(v string) *DescribeAggregationAuthorizationsOutput { - s.NextToken = &v +// SetOrganizationConfigRuleStatuses sets the OrganizationConfigRuleStatuses field's value. +func (s *DescribeOrganizationConfigRuleStatusesOutput) SetOrganizationConfigRuleStatuses(v []*OrganizationConfigRuleStatus) *DescribeOrganizationConfigRuleStatusesOutput { + s.OrganizationConfigRuleStatuses = v return s } -type DescribeComplianceByConfigRuleInput struct { +type DescribeOrganizationConfigRulesInput struct { _ struct{} `type:"structure"` - // Filters the results by compliance. - // - // The allowed values are COMPLIANT and NON_COMPLIANT. - ComplianceTypes []*string `type:"list"` - - // Specify one or more AWS Config rule names to filter the results by rule. - ConfigRuleNames []*string `type:"list"` + // The maximum number of organization config rules returned on each page. If + // you do no specify a number, AWS Config uses the default. The default is 100. + Limit *int64 `type:"integer"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` + + // The names of organization config rules for which you want details. If you + // do not specify any names, AWS Config returns details for all your organization + // config rules. + OrganizationConfigRuleNames []*string `type:"list"` } // String returns the string representation -func (s DescribeComplianceByConfigRuleInput) String() string { +func (s DescribeOrganizationConfigRulesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeComplianceByConfigRuleInput) GoString() string { +func (s DescribeOrganizationConfigRulesInput) GoString() string { return s.String() } -// SetComplianceTypes sets the ComplianceTypes field's value. -func (s *DescribeComplianceByConfigRuleInput) SetComplianceTypes(v []*string) *DescribeComplianceByConfigRuleInput { - s.ComplianceTypes = v +// SetLimit sets the Limit field's value. +func (s *DescribeOrganizationConfigRulesInput) SetLimit(v int64) *DescribeOrganizationConfigRulesInput { + s.Limit = &v return s } -// SetConfigRuleNames sets the ConfigRuleNames field's value. -func (s *DescribeComplianceByConfigRuleInput) SetConfigRuleNames(v []*string) *DescribeComplianceByConfigRuleInput { - s.ConfigRuleNames = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeOrganizationConfigRulesInput) SetNextToken(v string) *DescribeOrganizationConfigRulesInput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeComplianceByConfigRuleInput) SetNextToken(v string) *DescribeComplianceByConfigRuleInput { - s.NextToken = &v +// SetOrganizationConfigRuleNames sets the OrganizationConfigRuleNames field's value. +func (s *DescribeOrganizationConfigRulesInput) SetOrganizationConfigRuleNames(v []*string) *DescribeOrganizationConfigRulesInput { + s.OrganizationConfigRuleNames = v return s } -type DescribeComplianceByConfigRuleOutput struct { +type DescribeOrganizationConfigRulesOutput struct { _ struct{} `type:"structure"` - // Indicates whether each of the specified AWS Config rules is compliant. - ComplianceByConfigRules []*ComplianceByConfigRule `type:"list"` - - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. NextToken *string `type:"string"` + + // Retuns a list OrganizationConfigRule objects. + OrganizationConfigRules []*OrganizationConfigRule `type:"list"` } // String returns the string representation -func (s DescribeComplianceByConfigRuleOutput) String() string { +func (s DescribeOrganizationConfigRulesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeComplianceByConfigRuleOutput) GoString() string { +func (s DescribeOrganizationConfigRulesOutput) GoString() string { return s.String() } -// SetComplianceByConfigRules sets the ComplianceByConfigRules field's value. -func (s *DescribeComplianceByConfigRuleOutput) SetComplianceByConfigRules(v []*ComplianceByConfigRule) *DescribeComplianceByConfigRuleOutput { - s.ComplianceByConfigRules = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeOrganizationConfigRulesOutput) SetNextToken(v string) *DescribeOrganizationConfigRulesOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeComplianceByConfigRuleOutput) SetNextToken(v string) *DescribeComplianceByConfigRuleOutput { - s.NextToken = &v +// SetOrganizationConfigRules sets the OrganizationConfigRules field's value. +func (s *DescribeOrganizationConfigRulesOutput) SetOrganizationConfigRules(v []*OrganizationConfigRule) *DescribeOrganizationConfigRulesOutput { + s.OrganizationConfigRules = v return s } -type DescribeComplianceByResourceInput struct { +type DescribePendingAggregationRequestsInput struct { _ struct{} `type:"structure"` - // Filters the results by compliance. - // - // The allowed values are COMPLIANT, NON_COMPLIANT, and INSUFFICIENT_DATA. - ComplianceTypes []*string `type:"list"` - // The maximum number of evaluation results returned on each page. The default - // is 10. You cannot specify a number greater than 100. If you specify 0, AWS - // Config uses the default. + // is maximum. If you specify 0, AWS Config uses the default. Limit *int64 `type:"integer"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` - - // The ID of the AWS resource for which you want compliance information. You - // can specify only one resource ID. If you specify a resource ID, you must - // also specify a type for ResourceType. - ResourceId *string `min:"1" type:"string"` - - // The types of AWS resources for which you want compliance information (for - // example, AWS::EC2::Instance). For this action, you can specify that the resource - // type is an AWS account by specifying AWS::::Account. - ResourceType *string `min:"1" type:"string"` } // String returns the string representation -func (s DescribeComplianceByResourceInput) String() string { +func (s DescribePendingAggregationRequestsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeComplianceByResourceInput) GoString() string { +func (s DescribePendingAggregationRequestsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeComplianceByResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeComplianceByResourceInput"} - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ResourceType != nil && len(*s.ResourceType) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetComplianceTypes sets the ComplianceTypes field's value. -func (s *DescribeComplianceByResourceInput) SetComplianceTypes(v []*string) *DescribeComplianceByResourceInput { - s.ComplianceTypes = v - return s -} - // SetLimit sets the Limit field's value. -func (s *DescribeComplianceByResourceInput) SetLimit(v int64) *DescribeComplianceByResourceInput { +func (s *DescribePendingAggregationRequestsInput) SetLimit(v int64) *DescribePendingAggregationRequestsInput { s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *DescribeComplianceByResourceInput) SetNextToken(v string) *DescribeComplianceByResourceInput { +func (s *DescribePendingAggregationRequestsInput) SetNextToken(v string) *DescribePendingAggregationRequestsInput { s.NextToken = &v return s } -// SetResourceId sets the ResourceId field's value. -func (s *DescribeComplianceByResourceInput) SetResourceId(v string) *DescribeComplianceByResourceInput { - s.ResourceId = &v - return s -} - -// SetResourceType sets the ResourceType field's value. -func (s *DescribeComplianceByResourceInput) SetResourceType(v string) *DescribeComplianceByResourceInput { - s.ResourceType = &v - return s -} - -type DescribeComplianceByResourceOutput struct { +type DescribePendingAggregationRequestsOutput struct { _ struct{} `type:"structure"` - // Indicates whether the specified AWS resource complies with all of the AWS - // Config rules that evaluate it. - ComplianceByResources []*ComplianceByResource `type:"list"` - - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. NextToken *string `type:"string"` + + // Returns a PendingAggregationRequests object. + PendingAggregationRequests []*PendingAggregationRequest `type:"list"` } // String returns the string representation -func (s DescribeComplianceByResourceOutput) String() string { +func (s DescribePendingAggregationRequestsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeComplianceByResourceOutput) GoString() string { +func (s DescribePendingAggregationRequestsOutput) GoString() string { return s.String() } -// SetComplianceByResources sets the ComplianceByResources field's value. -func (s *DescribeComplianceByResourceOutput) SetComplianceByResources(v []*ComplianceByResource) *DescribeComplianceByResourceOutput { - s.ComplianceByResources = v +// SetNextToken sets the NextToken field's value. +func (s *DescribePendingAggregationRequestsOutput) SetNextToken(v string) *DescribePendingAggregationRequestsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeComplianceByResourceOutput) SetNextToken(v string) *DescribeComplianceByResourceOutput { - s.NextToken = &v +// SetPendingAggregationRequests sets the PendingAggregationRequests field's value. +func (s *DescribePendingAggregationRequestsOutput) SetPendingAggregationRequests(v []*PendingAggregationRequest) *DescribePendingAggregationRequestsOutput { + s.PendingAggregationRequests = v return s } -type DescribeConfigRuleEvaluationStatusInput struct { +type DescribeRemediationConfigurationsInput struct { _ struct{} `type:"structure"` - // The name of the AWS managed Config rules for which you want status information. - // If you do not specify any names, AWS Config returns status information for - // all AWS managed Config rules that you use. - ConfigRuleNames []*string `type:"list"` - - // The number of rule evaluation results that you want returned. - // - // This parameter is required if the rule limit for your account is more than - // the default of 150 rules. + // A list of AWS Config rule names of remediation configurations for which you + // want details. // - // For information about requesting a rule limit increase, see AWS Config Limits - // (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_config) - // in the AWS General Reference Guide. - Limit *int64 `type:"integer"` - - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` + // ConfigRuleNames is a required field + ConfigRuleNames []*string `type:"list" required:"true"` } // String returns the string representation -func (s DescribeConfigRuleEvaluationStatusInput) String() string { +func (s DescribeRemediationConfigurationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeConfigRuleEvaluationStatusInput) GoString() string { +func (s DescribeRemediationConfigurationsInput) GoString() string { return s.String() } -// SetConfigRuleNames sets the ConfigRuleNames field's value. -func (s *DescribeConfigRuleEvaluationStatusInput) SetConfigRuleNames(v []*string) *DescribeConfigRuleEvaluationStatusInput { - s.ConfigRuleNames = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRemediationConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRemediationConfigurationsInput"} + if s.ConfigRuleNames == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleNames")) + } -// SetLimit sets the Limit field's value. -func (s *DescribeConfigRuleEvaluationStatusInput) SetLimit(v int64) *DescribeConfigRuleEvaluationStatusInput { - s.Limit = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextToken sets the NextToken field's value. -func (s *DescribeConfigRuleEvaluationStatusInput) SetNextToken(v string) *DescribeConfigRuleEvaluationStatusInput { - s.NextToken = &v +// SetConfigRuleNames sets the ConfigRuleNames field's value. +func (s *DescribeRemediationConfigurationsInput) SetConfigRuleNames(v []*string) *DescribeRemediationConfigurationsInput { + s.ConfigRuleNames = v return s } -type DescribeConfigRuleEvaluationStatusOutput struct { +type DescribeRemediationConfigurationsOutput struct { _ struct{} `type:"structure"` - // Status information about your AWS managed Config rules. - ConfigRulesEvaluationStatus []*ConfigRuleEvaluationStatus `type:"list"` - - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. - NextToken *string `type:"string"` + // Returns a remediation configuration object. + RemediationConfigurations []*RemediationConfiguration `type:"list"` } // String returns the string representation -func (s DescribeConfigRuleEvaluationStatusOutput) String() string { +func (s DescribeRemediationConfigurationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeConfigRuleEvaluationStatusOutput) GoString() string { +func (s DescribeRemediationConfigurationsOutput) GoString() string { return s.String() } -// SetConfigRulesEvaluationStatus sets the ConfigRulesEvaluationStatus field's value. -func (s *DescribeConfigRuleEvaluationStatusOutput) SetConfigRulesEvaluationStatus(v []*ConfigRuleEvaluationStatus) *DescribeConfigRuleEvaluationStatusOutput { - s.ConfigRulesEvaluationStatus = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeConfigRuleEvaluationStatusOutput) SetNextToken(v string) *DescribeConfigRuleEvaluationStatusOutput { - s.NextToken = &v +// SetRemediationConfigurations sets the RemediationConfigurations field's value. +func (s *DescribeRemediationConfigurationsOutput) SetRemediationConfigurations(v []*RemediationConfiguration) *DescribeRemediationConfigurationsOutput { + s.RemediationConfigurations = v return s } -type DescribeConfigRulesInput struct { +type DescribeRemediationExceptionsInput struct { _ struct{} `type:"structure"` - // The names of the AWS Config rules for which you want details. If you do not - // specify any names, AWS Config returns details for all your rules. - ConfigRuleNames []*string `type:"list"` + // The name of the AWS Config rule. + // + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. + // The maximum number of RemediationExceptionResourceKey returned on each page. + // The default is 25. If you specify 0, AWS Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. NextToken *string `type:"string"` + + // An exception list of resource exception keys to be processed with the current + // request. AWS Config adds exception for each resource key. For example, AWS + // Config adds 3 exceptions for 3 resource keys. + ResourceKeys []*RemediationExceptionResourceKey `min:"1" type:"list"` } // String returns the string representation -func (s DescribeConfigRulesInput) String() string { +func (s DescribeRemediationExceptionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeConfigRulesInput) GoString() string { +func (s DescribeRemediationExceptionsInput) GoString() string { return s.String() } -// SetConfigRuleNames sets the ConfigRuleNames field's value. -func (s *DescribeConfigRulesInput) SetConfigRuleNames(v []*string) *DescribeConfigRulesInput { - s.ConfigRuleNames = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRemediationExceptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRemediationExceptionsInput"} + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) + } + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + if s.ResourceKeys != nil && len(s.ResourceKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceKeys", 1)) + } + if s.ResourceKeys != nil { + for i, v := range s.ResourceKeys { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceKeys", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *DescribeRemediationExceptionsInput) SetConfigRuleName(v string) *DescribeRemediationExceptionsInput { + s.ConfigRuleName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeRemediationExceptionsInput) SetLimit(v int64) *DescribeRemediationExceptionsInput { + s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *DescribeConfigRulesInput) SetNextToken(v string) *DescribeConfigRulesInput { +func (s *DescribeRemediationExceptionsInput) SetNextToken(v string) *DescribeRemediationExceptionsInput { s.NextToken = &v return s } -type DescribeConfigRulesOutput struct { +// SetResourceKeys sets the ResourceKeys field's value. +func (s *DescribeRemediationExceptionsInput) SetResourceKeys(v []*RemediationExceptionResourceKey) *DescribeRemediationExceptionsInput { + s.ResourceKeys = v + return s +} + +type DescribeRemediationExceptionsOutput struct { _ struct{} `type:"structure"` - // The details about your AWS Config rules. - ConfigRules []*ConfigRule `type:"list"` - - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. NextToken *string `type:"string"` + + // Returns a list of remediation exception objects. + RemediationExceptions []*RemediationException `type:"list"` } // String returns the string representation -func (s DescribeConfigRulesOutput) String() string { +func (s DescribeRemediationExceptionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeConfigRulesOutput) GoString() string { +func (s DescribeRemediationExceptionsOutput) GoString() string { return s.String() } -// SetConfigRules sets the ConfigRules field's value. -func (s *DescribeConfigRulesOutput) SetConfigRules(v []*ConfigRule) *DescribeConfigRulesOutput { - s.ConfigRules = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeRemediationExceptionsOutput) SetNextToken(v string) *DescribeRemediationExceptionsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeConfigRulesOutput) SetNextToken(v string) *DescribeConfigRulesOutput { - s.NextToken = &v +// SetRemediationExceptions sets the RemediationExceptions field's value. +func (s *DescribeRemediationExceptionsOutput) SetRemediationExceptions(v []*RemediationException) *DescribeRemediationExceptionsOutput { + s.RemediationExceptions = v return s } -type DescribeConfigurationAggregatorSourcesStatusInput struct { +type DescribeRemediationExecutionStatusInput struct { _ struct{} `type:"structure"` - // The name of the configuration aggregator. + // A list of AWS Config rule names. // - // ConfigurationAggregatorName is a required field - ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` - // The maximum number of AggregatorSourceStatus returned on each page. The default - // is maximum. If you specify 0, AWS Config uses the default. + // The maximum number of RemediationExecutionStatuses returned on each page. + // The default is maximum. If you specify 0, AWS Config uses the default. Limit *int64 `type:"integer"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` - // Filters the status type. - // - // * Valid value FAILED indicates errors while moving data. - // - // * Valid value SUCCEEDED indicates the data was successfully moved. - // - // * Valid value OUTDATED indicates the data is not the most recent. - UpdateStatus []*string `min:"1" type:"list"` + // A list of resource keys to be processed with the current request. Each element + // in the list consists of the resource type and resource ID. + ResourceKeys []*ResourceKey `min:"1" type:"list"` } // String returns the string representation -func (s DescribeConfigurationAggregatorSourcesStatusInput) String() string { +func (s DescribeRemediationExecutionStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeConfigurationAggregatorSourcesStatusInput) GoString() string { +func (s DescribeRemediationExecutionStatusInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeConfigurationAggregatorSourcesStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeConfigurationAggregatorSourcesStatusInput"} - if s.ConfigurationAggregatorName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) +func (s *DescribeRemediationExecutionStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRemediationExecutionStatusInput"} + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) } - if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) } - if s.UpdateStatus != nil && len(s.UpdateStatus) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UpdateStatus", 1)) + if s.ResourceKeys != nil && len(s.ResourceKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceKeys", 1)) + } + if s.ResourceKeys != nil { + for i, v := range s.ResourceKeys { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceKeys", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -8688,517 +10609,642 @@ func (s *DescribeConfigurationAggregatorSourcesStatusInput) Validate() error { return nil } -// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *DescribeConfigurationAggregatorSourcesStatusInput) SetConfigurationAggregatorName(v string) *DescribeConfigurationAggregatorSourcesStatusInput { - s.ConfigurationAggregatorName = &v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *DescribeRemediationExecutionStatusInput) SetConfigRuleName(v string) *DescribeRemediationExecutionStatusInput { + s.ConfigRuleName = &v return s } // SetLimit sets the Limit field's value. -func (s *DescribeConfigurationAggregatorSourcesStatusInput) SetLimit(v int64) *DescribeConfigurationAggregatorSourcesStatusInput { +func (s *DescribeRemediationExecutionStatusInput) SetLimit(v int64) *DescribeRemediationExecutionStatusInput { s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *DescribeConfigurationAggregatorSourcesStatusInput) SetNextToken(v string) *DescribeConfigurationAggregatorSourcesStatusInput { +func (s *DescribeRemediationExecutionStatusInput) SetNextToken(v string) *DescribeRemediationExecutionStatusInput { s.NextToken = &v return s } -// SetUpdateStatus sets the UpdateStatus field's value. -func (s *DescribeConfigurationAggregatorSourcesStatusInput) SetUpdateStatus(v []*string) *DescribeConfigurationAggregatorSourcesStatusInput { - s.UpdateStatus = v +// SetResourceKeys sets the ResourceKeys field's value. +func (s *DescribeRemediationExecutionStatusInput) SetResourceKeys(v []*ResourceKey) *DescribeRemediationExecutionStatusInput { + s.ResourceKeys = v return s } -type DescribeConfigurationAggregatorSourcesStatusOutput struct { +type DescribeRemediationExecutionStatusOutput struct { _ struct{} `type:"structure"` - // Returns an AggregatedSourceStatus object. - AggregatedSourceStatusList []*AggregatedSourceStatus `type:"list"` - // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` + + // Returns a list of remediation execution statuses objects. + RemediationExecutionStatuses []*RemediationExecutionStatus `type:"list"` } // String returns the string representation -func (s DescribeConfigurationAggregatorSourcesStatusOutput) String() string { +func (s DescribeRemediationExecutionStatusOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeConfigurationAggregatorSourcesStatusOutput) GoString() string { +func (s DescribeRemediationExecutionStatusOutput) GoString() string { return s.String() } -// SetAggregatedSourceStatusList sets the AggregatedSourceStatusList field's value. -func (s *DescribeConfigurationAggregatorSourcesStatusOutput) SetAggregatedSourceStatusList(v []*AggregatedSourceStatus) *DescribeConfigurationAggregatorSourcesStatusOutput { - s.AggregatedSourceStatusList = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeRemediationExecutionStatusOutput) SetNextToken(v string) *DescribeRemediationExecutionStatusOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeConfigurationAggregatorSourcesStatusOutput) SetNextToken(v string) *DescribeConfigurationAggregatorSourcesStatusOutput { - s.NextToken = &v +// SetRemediationExecutionStatuses sets the RemediationExecutionStatuses field's value. +func (s *DescribeRemediationExecutionStatusOutput) SetRemediationExecutionStatuses(v []*RemediationExecutionStatus) *DescribeRemediationExecutionStatusOutput { + s.RemediationExecutionStatuses = v return s } -type DescribeConfigurationAggregatorsInput struct { +type DescribeRetentionConfigurationsInput struct { _ struct{} `type:"structure"` - // The name of the configuration aggregators. - ConfigurationAggregatorNames []*string `type:"list"` + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` - // The maximum number of configuration aggregators returned on each page. The - // default is maximum. If you specify 0, AWS Config uses the default. - Limit *int64 `type:"integer"` + // A list of names of retention configurations for which you want details. If + // you do not specify a name, AWS Config returns details for all the retention + // configurations for that account. + // + // Currently, AWS Config supports only one retention configuration per region + // in your account. + RetentionConfigurationNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeRetentionConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRetentionConfigurationsInput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeRetentionConfigurationsInput) SetNextToken(v string) *DescribeRetentionConfigurationsInput { + s.NextToken = &v + return s +} + +// SetRetentionConfigurationNames sets the RetentionConfigurationNames field's value. +func (s *DescribeRetentionConfigurationsInput) SetRetentionConfigurationNames(v []*string) *DescribeRetentionConfigurationsInput { + s.RetentionConfigurationNames = v + return s +} + +type DescribeRetentionConfigurationsOutput struct { + _ struct{} `type:"structure"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` + + // Returns a retention configuration object. + RetentionConfigurations []*RetentionConfiguration `type:"list"` } // String returns the string representation -func (s DescribeConfigurationAggregatorsInput) String() string { +func (s DescribeRetentionConfigurationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeConfigurationAggregatorsInput) GoString() string { +func (s DescribeRetentionConfigurationsOutput) GoString() string { return s.String() } -// SetConfigurationAggregatorNames sets the ConfigurationAggregatorNames field's value. -func (s *DescribeConfigurationAggregatorsInput) SetConfigurationAggregatorNames(v []*string) *DescribeConfigurationAggregatorsInput { - s.ConfigurationAggregatorNames = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeRetentionConfigurationsOutput) SetNextToken(v string) *DescribeRetentionConfigurationsOutput { + s.NextToken = &v return s } -// SetLimit sets the Limit field's value. -func (s *DescribeConfigurationAggregatorsInput) SetLimit(v int64) *DescribeConfigurationAggregatorsInput { - s.Limit = &v +// SetRetentionConfigurations sets the RetentionConfigurations field's value. +func (s *DescribeRetentionConfigurationsOutput) SetRetentionConfigurations(v []*RetentionConfiguration) *DescribeRetentionConfigurationsOutput { + s.RetentionConfigurations = v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeConfigurationAggregatorsInput) SetNextToken(v string) *DescribeConfigurationAggregatorsInput { - s.NextToken = &v +// Identifies an AWS resource and indicates whether it complies with the AWS +// Config rule that it was evaluated against. +type Evaluation struct { + _ struct{} `type:"structure"` + + // Supplementary information about how the evaluation determined the compliance. + Annotation *string `min:"1" type:"string"` + + // The ID of the AWS resource that was evaluated. + // + // ComplianceResourceId is a required field + ComplianceResourceId *string `min:"1" type:"string" required:"true"` + + // The type of AWS resource that was evaluated. + // + // ComplianceResourceType is a required field + ComplianceResourceType *string `min:"1" type:"string" required:"true"` + + // Indicates whether the AWS resource complies with the AWS Config rule that + // it was evaluated against. + // + // For the Evaluation data type, AWS Config supports only the COMPLIANT, NON_COMPLIANT, + // and NOT_APPLICABLE values. AWS Config does not support the INSUFFICIENT_DATA + // value for this data type. + // + // Similarly, AWS Config does not accept INSUFFICIENT_DATA as the value for + // ComplianceType from a PutEvaluations request. For example, an AWS Lambda + // function for a custom AWS Config rule cannot pass an INSUFFICIENT_DATA value + // to AWS Config. + // + // ComplianceType is a required field + ComplianceType *string `type:"string" required:"true" enum:"ComplianceType"` + + // The time of the event in AWS Config that triggered the evaluation. For event-based + // evaluations, the time indicates when AWS Config created the configuration + // item that triggered the evaluation. For periodic evaluations, the time indicates + // when AWS Config triggered the evaluation at the frequency that you specified + // (for example, every 24 hours). + // + // OrderingTimestamp is a required field + OrderingTimestamp *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s Evaluation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Evaluation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Evaluation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Evaluation"} + if s.Annotation != nil && len(*s.Annotation) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Annotation", 1)) + } + if s.ComplianceResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ComplianceResourceId")) + } + if s.ComplianceResourceId != nil && len(*s.ComplianceResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ComplianceResourceId", 1)) + } + if s.ComplianceResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ComplianceResourceType")) + } + if s.ComplianceResourceType != nil && len(*s.ComplianceResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ComplianceResourceType", 1)) + } + if s.ComplianceType == nil { + invalidParams.Add(request.NewErrParamRequired("ComplianceType")) + } + if s.OrderingTimestamp == nil { + invalidParams.Add(request.NewErrParamRequired("OrderingTimestamp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnnotation sets the Annotation field's value. +func (s *Evaluation) SetAnnotation(v string) *Evaluation { + s.Annotation = &v return s } -type DescribeConfigurationAggregatorsOutput struct { - _ struct{} `type:"structure"` - - // Returns a ConfigurationAggregators object. - ConfigurationAggregators []*ConfigurationAggregator `type:"list"` - - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` -} - -// String returns the string representation -func (s DescribeConfigurationAggregatorsOutput) String() string { - return awsutil.Prettify(s) +// SetComplianceResourceId sets the ComplianceResourceId field's value. +func (s *Evaluation) SetComplianceResourceId(v string) *Evaluation { + s.ComplianceResourceId = &v + return s } -// GoString returns the string representation -func (s DescribeConfigurationAggregatorsOutput) GoString() string { - return s.String() +// SetComplianceResourceType sets the ComplianceResourceType field's value. +func (s *Evaluation) SetComplianceResourceType(v string) *Evaluation { + s.ComplianceResourceType = &v + return s } -// SetConfigurationAggregators sets the ConfigurationAggregators field's value. -func (s *DescribeConfigurationAggregatorsOutput) SetConfigurationAggregators(v []*ConfigurationAggregator) *DescribeConfigurationAggregatorsOutput { - s.ConfigurationAggregators = v +// SetComplianceType sets the ComplianceType field's value. +func (s *Evaluation) SetComplianceType(v string) *Evaluation { + s.ComplianceType = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeConfigurationAggregatorsOutput) SetNextToken(v string) *DescribeConfigurationAggregatorsOutput { - s.NextToken = &v +// SetOrderingTimestamp sets the OrderingTimestamp field's value. +func (s *Evaluation) SetOrderingTimestamp(v time.Time) *Evaluation { + s.OrderingTimestamp = &v return s } -// The input for the DescribeConfigurationRecorderStatus action. -type DescribeConfigurationRecorderStatusInput struct { +// The details of an AWS Config evaluation. Provides the AWS resource that was +// evaluated, the compliance of the resource, related time stamps, and supplementary +// information. +type EvaluationResult struct { _ struct{} `type:"structure"` - // The name(s) of the configuration recorder. If the name is not specified, - // the action returns the current status of all the configuration recorders - // associated with the account. - ConfigurationRecorderNames []*string `type:"list"` -} + // Supplementary information about how the evaluation determined the compliance. + Annotation *string `min:"1" type:"string"` -// String returns the string representation -func (s DescribeConfigurationRecorderStatusInput) String() string { - return awsutil.Prettify(s) -} + // Indicates whether the AWS resource complies with the AWS Config rule that + // evaluated it. + // + // For the EvaluationResult data type, AWS Config supports only the COMPLIANT, + // NON_COMPLIANT, and NOT_APPLICABLE values. AWS Config does not support the + // INSUFFICIENT_DATA value for the EvaluationResult data type. + ComplianceType *string `type:"string" enum:"ComplianceType"` -// GoString returns the string representation -func (s DescribeConfigurationRecorderStatusInput) GoString() string { - return s.String() -} + // The time when the AWS Config rule evaluated the AWS resource. + ConfigRuleInvokedTime *time.Time `type:"timestamp"` -// SetConfigurationRecorderNames sets the ConfigurationRecorderNames field's value. -func (s *DescribeConfigurationRecorderStatusInput) SetConfigurationRecorderNames(v []*string) *DescribeConfigurationRecorderStatusInput { - s.ConfigurationRecorderNames = v - return s -} + // Uniquely identifies the evaluation result. + EvaluationResultIdentifier *EvaluationResultIdentifier `type:"structure"` -// The output for the DescribeConfigurationRecorderStatus action, in JSON format. -type DescribeConfigurationRecorderStatusOutput struct { - _ struct{} `type:"structure"` + // The time when AWS Config recorded the evaluation result. + ResultRecordedTime *time.Time `type:"timestamp"` - // A list that contains status of the specified recorders. - ConfigurationRecordersStatus []*ConfigurationRecorderStatus `type:"list"` + // An encrypted token that associates an evaluation with an AWS Config rule. + // The token identifies the rule, the AWS resource being evaluated, and the + // event that triggered the evaluation. + ResultToken *string `type:"string"` } // String returns the string representation -func (s DescribeConfigurationRecorderStatusOutput) String() string { +func (s EvaluationResult) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeConfigurationRecorderStatusOutput) GoString() string { +func (s EvaluationResult) GoString() string { return s.String() } -// SetConfigurationRecordersStatus sets the ConfigurationRecordersStatus field's value. -func (s *DescribeConfigurationRecorderStatusOutput) SetConfigurationRecordersStatus(v []*ConfigurationRecorderStatus) *DescribeConfigurationRecorderStatusOutput { - s.ConfigurationRecordersStatus = v +// SetAnnotation sets the Annotation field's value. +func (s *EvaluationResult) SetAnnotation(v string) *EvaluationResult { + s.Annotation = &v return s } -// The input for the DescribeConfigurationRecorders action. -type DescribeConfigurationRecordersInput struct { - _ struct{} `type:"structure"` +// SetComplianceType sets the ComplianceType field's value. +func (s *EvaluationResult) SetComplianceType(v string) *EvaluationResult { + s.ComplianceType = &v + return s +} - // A list of configuration recorder names. - ConfigurationRecorderNames []*string `type:"list"` +// SetConfigRuleInvokedTime sets the ConfigRuleInvokedTime field's value. +func (s *EvaluationResult) SetConfigRuleInvokedTime(v time.Time) *EvaluationResult { + s.ConfigRuleInvokedTime = &v + return s } -// String returns the string representation -func (s DescribeConfigurationRecordersInput) String() string { - return awsutil.Prettify(s) +// SetEvaluationResultIdentifier sets the EvaluationResultIdentifier field's value. +func (s *EvaluationResult) SetEvaluationResultIdentifier(v *EvaluationResultIdentifier) *EvaluationResult { + s.EvaluationResultIdentifier = v + return s } -// GoString returns the string representation -func (s DescribeConfigurationRecordersInput) GoString() string { - return s.String() +// SetResultRecordedTime sets the ResultRecordedTime field's value. +func (s *EvaluationResult) SetResultRecordedTime(v time.Time) *EvaluationResult { + s.ResultRecordedTime = &v + return s } -// SetConfigurationRecorderNames sets the ConfigurationRecorderNames field's value. -func (s *DescribeConfigurationRecordersInput) SetConfigurationRecorderNames(v []*string) *DescribeConfigurationRecordersInput { - s.ConfigurationRecorderNames = v +// SetResultToken sets the ResultToken field's value. +func (s *EvaluationResult) SetResultToken(v string) *EvaluationResult { + s.ResultToken = &v return s } -// The output for the DescribeConfigurationRecorders action. -type DescribeConfigurationRecordersOutput struct { +// Uniquely identifies an evaluation result. +type EvaluationResultIdentifier struct { _ struct{} `type:"structure"` - // A list that contains the descriptions of the specified configuration recorders. - ConfigurationRecorders []*ConfigurationRecorder `type:"list"` + // Identifies an AWS Config rule used to evaluate an AWS resource, and provides + // the type and ID of the evaluated resource. + EvaluationResultQualifier *EvaluationResultQualifier `type:"structure"` + + // The time of the event that triggered the evaluation of your AWS resources. + // The time can indicate when AWS Config delivered a configuration item change + // notification, or it can indicate when AWS Config delivered the configuration + // snapshot, depending on which event triggered the evaluation. + OrderingTimestamp *time.Time `type:"timestamp"` } // String returns the string representation -func (s DescribeConfigurationRecordersOutput) String() string { +func (s EvaluationResultIdentifier) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeConfigurationRecordersOutput) GoString() string { +func (s EvaluationResultIdentifier) GoString() string { return s.String() } -// SetConfigurationRecorders sets the ConfigurationRecorders field's value. -func (s *DescribeConfigurationRecordersOutput) SetConfigurationRecorders(v []*ConfigurationRecorder) *DescribeConfigurationRecordersOutput { - s.ConfigurationRecorders = v +// SetEvaluationResultQualifier sets the EvaluationResultQualifier field's value. +func (s *EvaluationResultIdentifier) SetEvaluationResultQualifier(v *EvaluationResultQualifier) *EvaluationResultIdentifier { + s.EvaluationResultQualifier = v return s } -// The input for the DeliveryChannelStatus action. -type DescribeDeliveryChannelStatusInput struct { - _ struct{} `type:"structure"` - - // A list of delivery channel names. - DeliveryChannelNames []*string `type:"list"` -} - -// String returns the string representation -func (s DescribeDeliveryChannelStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDeliveryChannelStatusInput) GoString() string { - return s.String() -} - -// SetDeliveryChannelNames sets the DeliveryChannelNames field's value. -func (s *DescribeDeliveryChannelStatusInput) SetDeliveryChannelNames(v []*string) *DescribeDeliveryChannelStatusInput { - s.DeliveryChannelNames = v +// SetOrderingTimestamp sets the OrderingTimestamp field's value. +func (s *EvaluationResultIdentifier) SetOrderingTimestamp(v time.Time) *EvaluationResultIdentifier { + s.OrderingTimestamp = &v return s } -// The output for the DescribeDeliveryChannelStatus action. -type DescribeDeliveryChannelStatusOutput struct { +// Identifies an AWS Config rule that evaluated an AWS resource, and provides +// the type and ID of the resource that the rule evaluated. +type EvaluationResultQualifier struct { _ struct{} `type:"structure"` - // A list that contains the status of a specified delivery channel. - DeliveryChannelsStatus []*DeliveryChannelStatus `type:"list"` + // The name of the AWS Config rule that was used in the evaluation. + ConfigRuleName *string `min:"1" type:"string"` + + // The ID of the evaluated AWS resource. + ResourceId *string `min:"1" type:"string"` + + // The type of AWS resource that was evaluated. + ResourceType *string `min:"1" type:"string"` } // String returns the string representation -func (s DescribeDeliveryChannelStatusOutput) String() string { +func (s EvaluationResultQualifier) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDeliveryChannelStatusOutput) GoString() string { +func (s EvaluationResultQualifier) GoString() string { return s.String() } -// SetDeliveryChannelsStatus sets the DeliveryChannelsStatus field's value. -func (s *DescribeDeliveryChannelStatusOutput) SetDeliveryChannelsStatus(v []*DeliveryChannelStatus) *DescribeDeliveryChannelStatusOutput { - s.DeliveryChannelsStatus = v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *EvaluationResultQualifier) SetConfigRuleName(v string) *EvaluationResultQualifier { + s.ConfigRuleName = &v return s } -// The input for the DescribeDeliveryChannels action. -type DescribeDeliveryChannelsInput struct { - _ struct{} `type:"structure"` - - // A list of delivery channel names. - DeliveryChannelNames []*string `type:"list"` -} - -// String returns the string representation -func (s DescribeDeliveryChannelsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDeliveryChannelsInput) GoString() string { - return s.String() +// SetResourceId sets the ResourceId field's value. +func (s *EvaluationResultQualifier) SetResourceId(v string) *EvaluationResultQualifier { + s.ResourceId = &v + return s } -// SetDeliveryChannelNames sets the DeliveryChannelNames field's value. -func (s *DescribeDeliveryChannelsInput) SetDeliveryChannelNames(v []*string) *DescribeDeliveryChannelsInput { - s.DeliveryChannelNames = v +// SetResourceType sets the ResourceType field's value. +func (s *EvaluationResultQualifier) SetResourceType(v string) *EvaluationResultQualifier { + s.ResourceType = &v return s } -// The output for the DescribeDeliveryChannels action. -type DescribeDeliveryChannelsOutput struct { +// The controls that AWS Config uses for executing remediations. +type ExecutionControls struct { _ struct{} `type:"structure"` - // A list that contains the descriptions of the specified delivery channel. - DeliveryChannels []*DeliveryChannel `type:"list"` + // A SsmControls object. + SsmControls *SsmControls `type:"structure"` } // String returns the string representation -func (s DescribeDeliveryChannelsOutput) String() string { +func (s ExecutionControls) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDeliveryChannelsOutput) GoString() string { +func (s ExecutionControls) GoString() string { return s.String() } -// SetDeliveryChannels sets the DeliveryChannels field's value. -func (s *DescribeDeliveryChannelsOutput) SetDeliveryChannels(v []*DeliveryChannel) *DescribeDeliveryChannelsOutput { - s.DeliveryChannels = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecutionControls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecutionControls"} + if s.SsmControls != nil { + if err := s.SsmControls.Validate(); err != nil { + invalidParams.AddNested("SsmControls", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSsmControls sets the SsmControls field's value. +func (s *ExecutionControls) SetSsmControls(v *SsmControls) *ExecutionControls { + s.SsmControls = v return s } -type DescribePendingAggregationRequestsInput struct { +// List of each of the failed delete remediation exceptions with specific reasons. +type FailedDeleteRemediationExceptionsBatch struct { _ struct{} `type:"structure"` - // The maximum number of evaluation results returned on each page. The default - // is maximum. If you specify 0, AWS Config uses the default. - Limit *int64 `type:"integer"` + // Returns remediation exception resource key object of the failed items. + FailedItems []*RemediationExceptionResourceKey `min:"1" type:"list"` - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` + // Returns a failure message for delete remediation exception. For example, + // AWS Config creates an exception due to an internal error. + FailureMessage *string `type:"string"` } // String returns the string representation -func (s DescribePendingAggregationRequestsInput) String() string { +func (s FailedDeleteRemediationExceptionsBatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribePendingAggregationRequestsInput) GoString() string { +func (s FailedDeleteRemediationExceptionsBatch) GoString() string { return s.String() } -// SetLimit sets the Limit field's value. -func (s *DescribePendingAggregationRequestsInput) SetLimit(v int64) *DescribePendingAggregationRequestsInput { - s.Limit = &v +// SetFailedItems sets the FailedItems field's value. +func (s *FailedDeleteRemediationExceptionsBatch) SetFailedItems(v []*RemediationExceptionResourceKey) *FailedDeleteRemediationExceptionsBatch { + s.FailedItems = v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribePendingAggregationRequestsInput) SetNextToken(v string) *DescribePendingAggregationRequestsInput { - s.NextToken = &v +// SetFailureMessage sets the FailureMessage field's value. +func (s *FailedDeleteRemediationExceptionsBatch) SetFailureMessage(v string) *FailedDeleteRemediationExceptionsBatch { + s.FailureMessage = &v return s } -type DescribePendingAggregationRequestsOutput struct { +// List of each of the failed remediations with specific reasons. +type FailedRemediationBatch struct { _ struct{} `type:"structure"` - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` + // Returns remediation configurations of the failed items. + FailedItems []*RemediationConfiguration `type:"list"` - // Returns a PendingAggregationRequests object. - PendingAggregationRequests []*PendingAggregationRequest `type:"list"` + // Returns a failure message. For example, the resource is already compliant. + FailureMessage *string `type:"string"` } // String returns the string representation -func (s DescribePendingAggregationRequestsOutput) String() string { +func (s FailedRemediationBatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribePendingAggregationRequestsOutput) GoString() string { +func (s FailedRemediationBatch) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *DescribePendingAggregationRequestsOutput) SetNextToken(v string) *DescribePendingAggregationRequestsOutput { - s.NextToken = &v +// SetFailedItems sets the FailedItems field's value. +func (s *FailedRemediationBatch) SetFailedItems(v []*RemediationConfiguration) *FailedRemediationBatch { + s.FailedItems = v return s } -// SetPendingAggregationRequests sets the PendingAggregationRequests field's value. -func (s *DescribePendingAggregationRequestsOutput) SetPendingAggregationRequests(v []*PendingAggregationRequest) *DescribePendingAggregationRequestsOutput { - s.PendingAggregationRequests = v +// SetFailureMessage sets the FailureMessage field's value. +func (s *FailedRemediationBatch) SetFailureMessage(v string) *FailedRemediationBatch { + s.FailureMessage = &v return s } -type DescribeRemediationConfigurationsInput struct { +// List of each of the failed remediation exceptions with specific reasons. +type FailedRemediationExceptionBatch struct { _ struct{} `type:"structure"` - // A list of AWS Config rule names of remediation configurations for which you - // want details. - // - // ConfigRuleNames is a required field - ConfigRuleNames []*string `type:"list" required:"true"` + // Returns remediation exception resource key object of the failed items. + FailedItems []*RemediationException `type:"list"` + + // Returns a failure message. For example, the auto-remediation has failed. + FailureMessage *string `type:"string"` } // String returns the string representation -func (s DescribeRemediationConfigurationsInput) String() string { +func (s FailedRemediationExceptionBatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeRemediationConfigurationsInput) GoString() string { +func (s FailedRemediationExceptionBatch) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeRemediationConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeRemediationConfigurationsInput"} - if s.ConfigRuleNames == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigRuleNames")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetFailedItems sets the FailedItems field's value. +func (s *FailedRemediationExceptionBatch) SetFailedItems(v []*RemediationException) *FailedRemediationExceptionBatch { + s.FailedItems = v + return s } -// SetConfigRuleNames sets the ConfigRuleNames field's value. -func (s *DescribeRemediationConfigurationsInput) SetConfigRuleNames(v []*string) *DescribeRemediationConfigurationsInput { - s.ConfigRuleNames = v +// SetFailureMessage sets the FailureMessage field's value. +func (s *FailedRemediationExceptionBatch) SetFailureMessage(v string) *FailedRemediationExceptionBatch { + s.FailureMessage = &v return s } -type DescribeRemediationConfigurationsOutput struct { +// Details about the fields such as name of the field. +type FieldInfo struct { _ struct{} `type:"structure"` - // Returns a remediation configuration object. - RemediationConfigurations []*RemediationConfiguration `type:"list"` + // Name of the field. + Name *string `type:"string"` } // String returns the string representation -func (s DescribeRemediationConfigurationsOutput) String() string { +func (s FieldInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeRemediationConfigurationsOutput) GoString() string { +func (s FieldInfo) GoString() string { return s.String() } -// SetRemediationConfigurations sets the RemediationConfigurations field's value. -func (s *DescribeRemediationConfigurationsOutput) SetRemediationConfigurations(v []*RemediationConfiguration) *DescribeRemediationConfigurationsOutput { - s.RemediationConfigurations = v +// SetName sets the Name field's value. +func (s *FieldInfo) SetName(v string) *FieldInfo { + s.Name = &v return s } -type DescribeRemediationExecutionStatusInput struct { +type GetAggregateComplianceDetailsByConfigRuleInput struct { _ struct{} `type:"structure"` - // A list of AWS Config rule names. + // The 12-digit account ID of the source account. + // + // AccountId is a required field + AccountId *string `type:"string" required:"true"` + + // The source region from where the data is aggregated. + // + // AwsRegion is a required field + AwsRegion *string `min:"1" type:"string" required:"true"` + + // The resource compliance status. + // + // For the GetAggregateComplianceDetailsByConfigRuleRequest data type, AWS Config + // supports only the COMPLIANT and NON_COMPLIANT. AWS Config does not support + // the NOT_APPLICABLE and INSUFFICIENT_DATA values. + ComplianceType *string `type:"string" enum:"ComplianceType"` + + // The name of the AWS Config rule for which you want compliance information. // // ConfigRuleName is a required field ConfigRuleName *string `min:"1" type:"string" required:"true"` - // The maximum number of RemediationExecutionStatuses returned on each page. - // The default is maximum. If you specify 0, AWS Config uses the default. + // The name of the configuration aggregator. + // + // ConfigurationAggregatorName is a required field + ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` + + // The maximum number of evaluation results returned on each page. The default + // is 50. You cannot specify a number greater than 100. If you specify 0, AWS + // Config uses the default. Limit *int64 `type:"integer"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. - NextToken *string `min:"1" type:"string"` - - // A list of resource keys to be processed with the current request. Each element - // in the list consists of the resource type and resource ID. - ResourceKeys []*ResourceKey `min:"1" type:"list"` + NextToken *string `type:"string"` } // String returns the string representation -func (s DescribeRemediationExecutionStatusInput) String() string { +func (s GetAggregateComplianceDetailsByConfigRuleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeRemediationExecutionStatusInput) GoString() string { +func (s GetAggregateComplianceDetailsByConfigRuleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeRemediationExecutionStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeRemediationExecutionStatusInput"} +func (s *GetAggregateComplianceDetailsByConfigRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAggregateComplianceDetailsByConfigRuleInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AwsRegion == nil { + invalidParams.Add(request.NewErrParamRequired("AwsRegion")) + } + if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) + } if s.ConfigRuleName == nil { invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) } if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.ResourceKeys != nil && len(s.ResourceKeys) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceKeys", 1)) + if s.ConfigurationAggregatorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) } - if s.ResourceKeys != nil { - for i, v := range s.ResourceKeys { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceKeys", i), err.(request.ErrInvalidParams)) - } - } + if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) } if invalidParams.Len() > 0 { @@ -9207,210 +11253,128 @@ func (s *DescribeRemediationExecutionStatusInput) Validate() error { return nil } -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *DescribeRemediationExecutionStatusInput) SetConfigRuleName(v string) *DescribeRemediationExecutionStatusInput { - s.ConfigRuleName = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeRemediationExecutionStatusInput) SetLimit(v int64) *DescribeRemediationExecutionStatusInput { - s.Limit = &v +// SetAccountId sets the AccountId field's value. +func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetAccountId(v string) *GetAggregateComplianceDetailsByConfigRuleInput { + s.AccountId = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeRemediationExecutionStatusInput) SetNextToken(v string) *DescribeRemediationExecutionStatusInput { - s.NextToken = &v +// SetAwsRegion sets the AwsRegion field's value. +func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetAwsRegion(v string) *GetAggregateComplianceDetailsByConfigRuleInput { + s.AwsRegion = &v return s } -// SetResourceKeys sets the ResourceKeys field's value. -func (s *DescribeRemediationExecutionStatusInput) SetResourceKeys(v []*ResourceKey) *DescribeRemediationExecutionStatusInput { - s.ResourceKeys = v +// SetComplianceType sets the ComplianceType field's value. +func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetComplianceType(v string) *GetAggregateComplianceDetailsByConfigRuleInput { + s.ComplianceType = &v return s } -type DescribeRemediationExecutionStatusOutput struct { - _ struct{} `type:"structure"` - - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `min:"1" type:"string"` - - // Returns a list of remediation execution statuses objects. - RemediationExecutionStatuses []*RemediationExecutionStatus `type:"list"` -} - -// String returns the string representation -func (s DescribeRemediationExecutionStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeRemediationExecutionStatusOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeRemediationExecutionStatusOutput) SetNextToken(v string) *DescribeRemediationExecutionStatusOutput { - s.NextToken = &v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetConfigRuleName(v string) *GetAggregateComplianceDetailsByConfigRuleInput { + s.ConfigRuleName = &v return s } -// SetRemediationExecutionStatuses sets the RemediationExecutionStatuses field's value. -func (s *DescribeRemediationExecutionStatusOutput) SetRemediationExecutionStatuses(v []*RemediationExecutionStatus) *DescribeRemediationExecutionStatusOutput { - s.RemediationExecutionStatuses = v +// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. +func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetConfigurationAggregatorName(v string) *GetAggregateComplianceDetailsByConfigRuleInput { + s.ConfigurationAggregatorName = &v return s } -type DescribeRetentionConfigurationsInput struct { - _ struct{} `type:"structure"` - - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` - - // A list of names of retention configurations for which you want details. If - // you do not specify a name, AWS Config returns details for all the retention - // configurations for that account. - // - // Currently, AWS Config supports only one retention configuration per region - // in your account. - RetentionConfigurationNames []*string `type:"list"` -} - -// String returns the string representation -func (s DescribeRetentionConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeRetentionConfigurationsInput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeRetentionConfigurationsInput) SetNextToken(v string) *DescribeRetentionConfigurationsInput { - s.NextToken = &v +// SetLimit sets the Limit field's value. +func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetLimit(v int64) *GetAggregateComplianceDetailsByConfigRuleInput { + s.Limit = &v return s -} - -// SetRetentionConfigurationNames sets the RetentionConfigurationNames field's value. -func (s *DescribeRetentionConfigurationsInput) SetRetentionConfigurationNames(v []*string) *DescribeRetentionConfigurationsInput { - s.RetentionConfigurationNames = v +} + +// SetNextToken sets the NextToken field's value. +func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetNextToken(v string) *GetAggregateComplianceDetailsByConfigRuleInput { + s.NextToken = &v return s } -type DescribeRetentionConfigurationsOutput struct { +type GetAggregateComplianceDetailsByConfigRuleOutput struct { _ struct{} `type:"structure"` + // Returns an AggregateEvaluationResults object. + AggregateEvaluationResults []*AggregateEvaluationResult `type:"list"` + // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` - - // Returns a retention configuration object. - RetentionConfigurations []*RetentionConfiguration `type:"list"` } // String returns the string representation -func (s DescribeRetentionConfigurationsOutput) String() string { +func (s GetAggregateComplianceDetailsByConfigRuleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeRetentionConfigurationsOutput) GoString() string { +func (s GetAggregateComplianceDetailsByConfigRuleOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *DescribeRetentionConfigurationsOutput) SetNextToken(v string) *DescribeRetentionConfigurationsOutput { - s.NextToken = &v +// SetAggregateEvaluationResults sets the AggregateEvaluationResults field's value. +func (s *GetAggregateComplianceDetailsByConfigRuleOutput) SetAggregateEvaluationResults(v []*AggregateEvaluationResult) *GetAggregateComplianceDetailsByConfigRuleOutput { + s.AggregateEvaluationResults = v return s } -// SetRetentionConfigurations sets the RetentionConfigurations field's value. -func (s *DescribeRetentionConfigurationsOutput) SetRetentionConfigurations(v []*RetentionConfiguration) *DescribeRetentionConfigurationsOutput { - s.RetentionConfigurations = v +// SetNextToken sets the NextToken field's value. +func (s *GetAggregateComplianceDetailsByConfigRuleOutput) SetNextToken(v string) *GetAggregateComplianceDetailsByConfigRuleOutput { + s.NextToken = &v return s } -// Identifies an AWS resource and indicates whether it complies with the AWS -// Config rule that it was evaluated against. -type Evaluation struct { +type GetAggregateConfigRuleComplianceSummaryInput struct { _ struct{} `type:"structure"` - // Supplementary information about how the evaluation determined the compliance. - Annotation *string `min:"1" type:"string"` - - // The ID of the AWS resource that was evaluated. + // The name of the configuration aggregator. // - // ComplianceResourceId is a required field - ComplianceResourceId *string `min:"1" type:"string" required:"true"` + // ConfigurationAggregatorName is a required field + ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` - // The type of AWS resource that was evaluated. - // - // ComplianceResourceType is a required field - ComplianceResourceType *string `min:"1" type:"string" required:"true"` + // Filters the results based on the ConfigRuleComplianceSummaryFilters object. + Filters *ConfigRuleComplianceSummaryFilters `type:"structure"` - // Indicates whether the AWS resource complies with the AWS Config rule that - // it was evaluated against. - // - // For the Evaluation data type, AWS Config supports only the COMPLIANT, NON_COMPLIANT, - // and NOT_APPLICABLE values. AWS Config does not support the INSUFFICIENT_DATA - // value for this data type. - // - // Similarly, AWS Config does not accept INSUFFICIENT_DATA as the value for - // ComplianceType from a PutEvaluations request. For example, an AWS Lambda - // function for a custom AWS Config rule cannot pass an INSUFFICIENT_DATA value - // to AWS Config. - // - // ComplianceType is a required field - ComplianceType *string `type:"string" required:"true" enum:"ComplianceType"` + // Groups the result based on ACCOUNT_ID or AWS_REGION. + GroupByKey *string `type:"string" enum:"ConfigRuleComplianceSummaryGroupKey"` - // The time of the event in AWS Config that triggered the evaluation. For event-based - // evaluations, the time indicates when AWS Config created the configuration - // item that triggered the evaluation. For periodic evaluations, the time indicates - // when AWS Config triggered the evaluation at the frequency that you specified - // (for example, every 24 hours). - // - // OrderingTimestamp is a required field - OrderingTimestamp *time.Time `type:"timestamp" required:"true"` + // The maximum number of evaluation results returned on each page. The default + // is 1000. You cannot specify a number greater than 1000. If you specify 0, + // AWS Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s Evaluation) String() string { +func (s GetAggregateConfigRuleComplianceSummaryInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Evaluation) GoString() string { +func (s GetAggregateConfigRuleComplianceSummaryInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Evaluation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Evaluation"} - if s.Annotation != nil && len(*s.Annotation) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Annotation", 1)) - } - if s.ComplianceResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ComplianceResourceId")) - } - if s.ComplianceResourceId != nil && len(*s.ComplianceResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ComplianceResourceId", 1)) - } - if s.ComplianceResourceType == nil { - invalidParams.Add(request.NewErrParamRequired("ComplianceResourceType")) - } - if s.ComplianceResourceType != nil && len(*s.ComplianceResourceType) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ComplianceResourceType", 1)) +func (s *GetAggregateConfigRuleComplianceSummaryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAggregateConfigRuleComplianceSummaryInput"} + if s.ConfigurationAggregatorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) } - if s.ComplianceType == nil { - invalidParams.Add(request.NewErrParamRequired("ComplianceType")) + if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) } - if s.OrderingTimestamp == nil { - invalidParams.Add(request.NewErrParamRequired("OrderingTimestamp")) + if s.Filters != nil { + if err := s.Filters.Validate(); err != nil { + invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -9419,283 +11383,316 @@ func (s *Evaluation) Validate() error { return nil } -// SetAnnotation sets the Annotation field's value. -func (s *Evaluation) SetAnnotation(v string) *Evaluation { - s.Annotation = &v +// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. +func (s *GetAggregateConfigRuleComplianceSummaryInput) SetConfigurationAggregatorName(v string) *GetAggregateConfigRuleComplianceSummaryInput { + s.ConfigurationAggregatorName = &v return s } -// SetComplianceResourceId sets the ComplianceResourceId field's value. -func (s *Evaluation) SetComplianceResourceId(v string) *Evaluation { - s.ComplianceResourceId = &v +// SetFilters sets the Filters field's value. +func (s *GetAggregateConfigRuleComplianceSummaryInput) SetFilters(v *ConfigRuleComplianceSummaryFilters) *GetAggregateConfigRuleComplianceSummaryInput { + s.Filters = v return s } -// SetComplianceResourceType sets the ComplianceResourceType field's value. -func (s *Evaluation) SetComplianceResourceType(v string) *Evaluation { - s.ComplianceResourceType = &v +// SetGroupByKey sets the GroupByKey field's value. +func (s *GetAggregateConfigRuleComplianceSummaryInput) SetGroupByKey(v string) *GetAggregateConfigRuleComplianceSummaryInput { + s.GroupByKey = &v return s } -// SetComplianceType sets the ComplianceType field's value. -func (s *Evaluation) SetComplianceType(v string) *Evaluation { - s.ComplianceType = &v +// SetLimit sets the Limit field's value. +func (s *GetAggregateConfigRuleComplianceSummaryInput) SetLimit(v int64) *GetAggregateConfigRuleComplianceSummaryInput { + s.Limit = &v return s } -// SetOrderingTimestamp sets the OrderingTimestamp field's value. -func (s *Evaluation) SetOrderingTimestamp(v time.Time) *Evaluation { - s.OrderingTimestamp = &v +// SetNextToken sets the NextToken field's value. +func (s *GetAggregateConfigRuleComplianceSummaryInput) SetNextToken(v string) *GetAggregateConfigRuleComplianceSummaryInput { + s.NextToken = &v return s } -// The details of an AWS Config evaluation. Provides the AWS resource that was -// evaluated, the compliance of the resource, related time stamps, and supplementary -// information. -type EvaluationResult struct { +type GetAggregateConfigRuleComplianceSummaryOutput struct { _ struct{} `type:"structure"` - // Supplementary information about how the evaluation determined the compliance. - Annotation *string `min:"1" type:"string"` - - // Indicates whether the AWS resource complies with the AWS Config rule that - // evaluated it. - // - // For the EvaluationResult data type, AWS Config supports only the COMPLIANT, - // NON_COMPLIANT, and NOT_APPLICABLE values. AWS Config does not support the - // INSUFFICIENT_DATA value for the EvaluationResult data type. - ComplianceType *string `type:"string" enum:"ComplianceType"` - - // The time when the AWS Config rule evaluated the AWS resource. - ConfigRuleInvokedTime *time.Time `type:"timestamp"` - - // Uniquely identifies the evaluation result. - EvaluationResultIdentifier *EvaluationResultIdentifier `type:"structure"` + // Returns a list of AggregateComplianceCounts object. + AggregateComplianceCounts []*AggregateComplianceCount `type:"list"` - // The time when AWS Config recorded the evaluation result. - ResultRecordedTime *time.Time `type:"timestamp"` + // Groups the result based on ACCOUNT_ID or AWS_REGION. + GroupByKey *string `min:"1" type:"string"` - // An encrypted token that associates an evaluation with an AWS Config rule. - // The token identifies the rule, the AWS resource being evaluated, and the - // event that triggered the evaluation. - ResultToken *string `type:"string"` + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } // String returns the string representation -func (s EvaluationResult) String() string { +func (s GetAggregateConfigRuleComplianceSummaryOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EvaluationResult) GoString() string { +func (s GetAggregateConfigRuleComplianceSummaryOutput) GoString() string { return s.String() } -// SetAnnotation sets the Annotation field's value. -func (s *EvaluationResult) SetAnnotation(v string) *EvaluationResult { - s.Annotation = &v +// SetAggregateComplianceCounts sets the AggregateComplianceCounts field's value. +func (s *GetAggregateConfigRuleComplianceSummaryOutput) SetAggregateComplianceCounts(v []*AggregateComplianceCount) *GetAggregateConfigRuleComplianceSummaryOutput { + s.AggregateComplianceCounts = v return s } -// SetComplianceType sets the ComplianceType field's value. -func (s *EvaluationResult) SetComplianceType(v string) *EvaluationResult { - s.ComplianceType = &v +// SetGroupByKey sets the GroupByKey field's value. +func (s *GetAggregateConfigRuleComplianceSummaryOutput) SetGroupByKey(v string) *GetAggregateConfigRuleComplianceSummaryOutput { + s.GroupByKey = &v return s } -// SetConfigRuleInvokedTime sets the ConfigRuleInvokedTime field's value. -func (s *EvaluationResult) SetConfigRuleInvokedTime(v time.Time) *EvaluationResult { - s.ConfigRuleInvokedTime = &v +// SetNextToken sets the NextToken field's value. +func (s *GetAggregateConfigRuleComplianceSummaryOutput) SetNextToken(v string) *GetAggregateConfigRuleComplianceSummaryOutput { + s.NextToken = &v return s } -// SetEvaluationResultIdentifier sets the EvaluationResultIdentifier field's value. -func (s *EvaluationResult) SetEvaluationResultIdentifier(v *EvaluationResultIdentifier) *EvaluationResult { - s.EvaluationResultIdentifier = v - return s +type GetAggregateDiscoveredResourceCountsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration aggregator. + // + // ConfigurationAggregatorName is a required field + ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` + + // Filters the results based on the ResourceCountFilters object. + Filters *ResourceCountFilters `type:"structure"` + + // The key to group the resource counts. + GroupByKey *string `type:"string" enum:"ResourceCountGroupKey"` + + // The maximum number of GroupedResourceCount objects returned on each page. + // The default is 1000. You cannot specify a number greater than 1000. If you + // specify 0, AWS Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` } -// SetResultRecordedTime sets the ResultRecordedTime field's value. -func (s *EvaluationResult) SetResultRecordedTime(v time.Time) *EvaluationResult { - s.ResultRecordedTime = &v - return s +// String returns the string representation +func (s GetAggregateDiscoveredResourceCountsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAggregateDiscoveredResourceCountsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAggregateDiscoveredResourceCountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAggregateDiscoveredResourceCountsInput"} + if s.ConfigurationAggregatorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) + } + if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) + } + if s.Filters != nil { + if err := s.Filters.Validate(); err != nil { + invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetResultToken sets the ResultToken field's value. -func (s *EvaluationResult) SetResultToken(v string) *EvaluationResult { - s.ResultToken = &v +// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. +func (s *GetAggregateDiscoveredResourceCountsInput) SetConfigurationAggregatorName(v string) *GetAggregateDiscoveredResourceCountsInput { + s.ConfigurationAggregatorName = &v return s } -// Uniquely identifies an evaluation result. -type EvaluationResultIdentifier struct { - _ struct{} `type:"structure"` - - // Identifies an AWS Config rule used to evaluate an AWS resource, and provides - // the type and ID of the evaluated resource. - EvaluationResultQualifier *EvaluationResultQualifier `type:"structure"` - - // The time of the event that triggered the evaluation of your AWS resources. - // The time can indicate when AWS Config delivered a configuration item change - // notification, or it can indicate when AWS Config delivered the configuration - // snapshot, depending on which event triggered the evaluation. - OrderingTimestamp *time.Time `type:"timestamp"` -} - -// String returns the string representation -func (s EvaluationResultIdentifier) String() string { - return awsutil.Prettify(s) +// SetFilters sets the Filters field's value. +func (s *GetAggregateDiscoveredResourceCountsInput) SetFilters(v *ResourceCountFilters) *GetAggregateDiscoveredResourceCountsInput { + s.Filters = v + return s } -// GoString returns the string representation -func (s EvaluationResultIdentifier) GoString() string { - return s.String() +// SetGroupByKey sets the GroupByKey field's value. +func (s *GetAggregateDiscoveredResourceCountsInput) SetGroupByKey(v string) *GetAggregateDiscoveredResourceCountsInput { + s.GroupByKey = &v + return s } -// SetEvaluationResultQualifier sets the EvaluationResultQualifier field's value. -func (s *EvaluationResultIdentifier) SetEvaluationResultQualifier(v *EvaluationResultQualifier) *EvaluationResultIdentifier { - s.EvaluationResultQualifier = v +// SetLimit sets the Limit field's value. +func (s *GetAggregateDiscoveredResourceCountsInput) SetLimit(v int64) *GetAggregateDiscoveredResourceCountsInput { + s.Limit = &v return s } -// SetOrderingTimestamp sets the OrderingTimestamp field's value. -func (s *EvaluationResultIdentifier) SetOrderingTimestamp(v time.Time) *EvaluationResultIdentifier { - s.OrderingTimestamp = &v +// SetNextToken sets the NextToken field's value. +func (s *GetAggregateDiscoveredResourceCountsInput) SetNextToken(v string) *GetAggregateDiscoveredResourceCountsInput { + s.NextToken = &v return s } -// Identifies an AWS Config rule that evaluated an AWS resource, and provides -// the type and ID of the resource that the rule evaluated. -type EvaluationResultQualifier struct { +type GetAggregateDiscoveredResourceCountsOutput struct { _ struct{} `type:"structure"` - // The name of the AWS Config rule that was used in the evaluation. - ConfigRuleName *string `min:"1" type:"string"` + // The key passed into the request object. If GroupByKey is not provided, the + // result will be empty. + GroupByKey *string `min:"1" type:"string"` - // The ID of the evaluated AWS resource. - ResourceId *string `min:"1" type:"string"` + // Returns a list of GroupedResourceCount objects. + GroupedResourceCounts []*GroupedResourceCount `type:"list"` - // The type of AWS resource that was evaluated. - ResourceType *string `min:"1" type:"string"` + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The total number of resources that are present in an aggregator with the + // filters that you provide. + // + // TotalDiscoveredResources is a required field + TotalDiscoveredResources *int64 `type:"long" required:"true"` } // String returns the string representation -func (s EvaluationResultQualifier) String() string { +func (s GetAggregateDiscoveredResourceCountsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EvaluationResultQualifier) GoString() string { +func (s GetAggregateDiscoveredResourceCountsOutput) GoString() string { return s.String() } -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *EvaluationResultQualifier) SetConfigRuleName(v string) *EvaluationResultQualifier { - s.ConfigRuleName = &v +// SetGroupByKey sets the GroupByKey field's value. +func (s *GetAggregateDiscoveredResourceCountsOutput) SetGroupByKey(v string) *GetAggregateDiscoveredResourceCountsOutput { + s.GroupByKey = &v return s } -// SetResourceId sets the ResourceId field's value. -func (s *EvaluationResultQualifier) SetResourceId(v string) *EvaluationResultQualifier { - s.ResourceId = &v +// SetGroupedResourceCounts sets the GroupedResourceCounts field's value. +func (s *GetAggregateDiscoveredResourceCountsOutput) SetGroupedResourceCounts(v []*GroupedResourceCount) *GetAggregateDiscoveredResourceCountsOutput { + s.GroupedResourceCounts = v return s } -// SetResourceType sets the ResourceType field's value. -func (s *EvaluationResultQualifier) SetResourceType(v string) *EvaluationResultQualifier { - s.ResourceType = &v +// SetNextToken sets the NextToken field's value. +func (s *GetAggregateDiscoveredResourceCountsOutput) SetNextToken(v string) *GetAggregateDiscoveredResourceCountsOutput { + s.NextToken = &v return s } -// List of each of the failed remediations with specific reasons. -type FailedRemediationBatch struct { +// SetTotalDiscoveredResources sets the TotalDiscoveredResources field's value. +func (s *GetAggregateDiscoveredResourceCountsOutput) SetTotalDiscoveredResources(v int64) *GetAggregateDiscoveredResourceCountsOutput { + s.TotalDiscoveredResources = &v + return s +} + +type GetAggregateResourceConfigInput struct { _ struct{} `type:"structure"` - // Returns remediation configurations of the failed items. - FailedItems []*RemediationConfiguration `type:"list"` + // The name of the configuration aggregator. + // + // ConfigurationAggregatorName is a required field + ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` - // Returns a failure message. For example, the resource is already compliant. - FailureMessage *string `type:"string"` + // An object that identifies aggregate resource. + // + // ResourceIdentifier is a required field + ResourceIdentifier *AggregateResourceIdentifier `type:"structure" required:"true"` } // String returns the string representation -func (s FailedRemediationBatch) String() string { +func (s GetAggregateResourceConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FailedRemediationBatch) GoString() string { +func (s GetAggregateResourceConfigInput) GoString() string { return s.String() } -// SetFailedItems sets the FailedItems field's value. -func (s *FailedRemediationBatch) SetFailedItems(v []*RemediationConfiguration) *FailedRemediationBatch { - s.FailedItems = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAggregateResourceConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAggregateResourceConfigInput"} + if s.ConfigurationAggregatorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) + } + if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) + } + if s.ResourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIdentifier")) + } + if s.ResourceIdentifier != nil { + if err := s.ResourceIdentifier.Validate(); err != nil { + invalidParams.AddNested("ResourceIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. +func (s *GetAggregateResourceConfigInput) SetConfigurationAggregatorName(v string) *GetAggregateResourceConfigInput { + s.ConfigurationAggregatorName = &v return s } -// SetFailureMessage sets the FailureMessage field's value. -func (s *FailedRemediationBatch) SetFailureMessage(v string) *FailedRemediationBatch { - s.FailureMessage = &v +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *GetAggregateResourceConfigInput) SetResourceIdentifier(v *AggregateResourceIdentifier) *GetAggregateResourceConfigInput { + s.ResourceIdentifier = v return s } -// Details about the fields such as name of the field. -type FieldInfo struct { +type GetAggregateResourceConfigOutput struct { _ struct{} `type:"structure"` - // Name of the field. - Name *string `type:"string"` + // Returns a ConfigurationItem object. + ConfigurationItem *ConfigurationItem `type:"structure"` } // String returns the string representation -func (s FieldInfo) String() string { +func (s GetAggregateResourceConfigOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldInfo) GoString() string { +func (s GetAggregateResourceConfigOutput) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *FieldInfo) SetName(v string) *FieldInfo { - s.Name = &v +// SetConfigurationItem sets the ConfigurationItem field's value. +func (s *GetAggregateResourceConfigOutput) SetConfigurationItem(v *ConfigurationItem) *GetAggregateResourceConfigOutput { + s.ConfigurationItem = v return s } -type GetAggregateComplianceDetailsByConfigRuleInput struct { +type GetComplianceDetailsByConfigRuleInput struct { _ struct{} `type:"structure"` - // The 12-digit account ID of the source account. - // - // AccountId is a required field - AccountId *string `type:"string" required:"true"` - - // The source region from where the data is aggregated. - // - // AwsRegion is a required field - AwsRegion *string `min:"1" type:"string" required:"true"` - - // The resource compliance status. + // Filters the results by compliance. // - // For the GetAggregateComplianceDetailsByConfigRuleRequest data type, AWS Config - // supports only the COMPLIANT and NON_COMPLIANT. AWS Config does not support - // the NOT_APPLICABLE and INSUFFICIENT_DATA values. - ComplianceType *string `type:"string" enum:"ComplianceType"` + // The allowed values are COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE. + ComplianceTypes []*string `type:"list"` // The name of the AWS Config rule for which you want compliance information. // // ConfigRuleName is a required field ConfigRuleName *string `min:"1" type:"string" required:"true"` - // The name of the configuration aggregator. - // - // ConfigurationAggregatorName is a required field - ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` - // The maximum number of evaluation results returned on each page. The default - // is 50. You cannot specify a number greater than 100. If you specify 0, AWS + // is 10. You cannot specify a number greater than 100. If you specify 0, AWS // Config uses the default. Limit *int64 `type:"integer"` @@ -9705,39 +11702,24 @@ type GetAggregateComplianceDetailsByConfigRuleInput struct { } // String returns the string representation -func (s GetAggregateComplianceDetailsByConfigRuleInput) String() string { +func (s GetComplianceDetailsByConfigRuleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAggregateComplianceDetailsByConfigRuleInput) GoString() string { +func (s GetComplianceDetailsByConfigRuleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetAggregateComplianceDetailsByConfigRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAggregateComplianceDetailsByConfigRuleInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AwsRegion == nil { - invalidParams.Add(request.NewErrParamRequired("AwsRegion")) - } - if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) - } +func (s *GetComplianceDetailsByConfigRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetComplianceDetailsByConfigRuleInput"} if s.ConfigRuleName == nil { invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) } if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) } - if s.ConfigurationAggregatorName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) - } - if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -9745,128 +11727,111 @@ func (s *GetAggregateComplianceDetailsByConfigRuleInput) Validate() error { return nil } -// SetAccountId sets the AccountId field's value. -func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetAccountId(v string) *GetAggregateComplianceDetailsByConfigRuleInput { - s.AccountId = &v - return s -} - -// SetAwsRegion sets the AwsRegion field's value. -func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetAwsRegion(v string) *GetAggregateComplianceDetailsByConfigRuleInput { - s.AwsRegion = &v - return s -} - -// SetComplianceType sets the ComplianceType field's value. -func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetComplianceType(v string) *GetAggregateComplianceDetailsByConfigRuleInput { - s.ComplianceType = &v +// SetComplianceTypes sets the ComplianceTypes field's value. +func (s *GetComplianceDetailsByConfigRuleInput) SetComplianceTypes(v []*string) *GetComplianceDetailsByConfigRuleInput { + s.ComplianceTypes = v return s } // SetConfigRuleName sets the ConfigRuleName field's value. -func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetConfigRuleName(v string) *GetAggregateComplianceDetailsByConfigRuleInput { +func (s *GetComplianceDetailsByConfigRuleInput) SetConfigRuleName(v string) *GetComplianceDetailsByConfigRuleInput { s.ConfigRuleName = &v return s } -// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetConfigurationAggregatorName(v string) *GetAggregateComplianceDetailsByConfigRuleInput { - s.ConfigurationAggregatorName = &v - return s -} - // SetLimit sets the Limit field's value. -func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetLimit(v int64) *GetAggregateComplianceDetailsByConfigRuleInput { +func (s *GetComplianceDetailsByConfigRuleInput) SetLimit(v int64) *GetComplianceDetailsByConfigRuleInput { s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetAggregateComplianceDetailsByConfigRuleInput) SetNextToken(v string) *GetAggregateComplianceDetailsByConfigRuleInput { +func (s *GetComplianceDetailsByConfigRuleInput) SetNextToken(v string) *GetComplianceDetailsByConfigRuleInput { s.NextToken = &v return s } -type GetAggregateComplianceDetailsByConfigRuleOutput struct { +type GetComplianceDetailsByConfigRuleOutput struct { _ struct{} `type:"structure"` - // Returns an AggregateEvaluationResults object. - AggregateEvaluationResults []*AggregateEvaluationResult `type:"list"` + // Indicates whether the AWS resource complies with the specified AWS Config + // rule. + EvaluationResults []*EvaluationResult `type:"list"` - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. NextToken *string `type:"string"` } // String returns the string representation -func (s GetAggregateComplianceDetailsByConfigRuleOutput) String() string { +func (s GetComplianceDetailsByConfigRuleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAggregateComplianceDetailsByConfigRuleOutput) GoString() string { +func (s GetComplianceDetailsByConfigRuleOutput) GoString() string { return s.String() } -// SetAggregateEvaluationResults sets the AggregateEvaluationResults field's value. -func (s *GetAggregateComplianceDetailsByConfigRuleOutput) SetAggregateEvaluationResults(v []*AggregateEvaluationResult) *GetAggregateComplianceDetailsByConfigRuleOutput { - s.AggregateEvaluationResults = v +// SetEvaluationResults sets the EvaluationResults field's value. +func (s *GetComplianceDetailsByConfigRuleOutput) SetEvaluationResults(v []*EvaluationResult) *GetComplianceDetailsByConfigRuleOutput { + s.EvaluationResults = v return s } // SetNextToken sets the NextToken field's value. -func (s *GetAggregateComplianceDetailsByConfigRuleOutput) SetNextToken(v string) *GetAggregateComplianceDetailsByConfigRuleOutput { +func (s *GetComplianceDetailsByConfigRuleOutput) SetNextToken(v string) *GetComplianceDetailsByConfigRuleOutput { s.NextToken = &v return s } -type GetAggregateConfigRuleComplianceSummaryInput struct { +type GetComplianceDetailsByResourceInput struct { _ struct{} `type:"structure"` - // The name of the configuration aggregator. + // Filters the results by compliance. // - // ConfigurationAggregatorName is a required field - ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` - - // Filters the results based on the ConfigRuleComplianceSummaryFilters object. - Filters *ConfigRuleComplianceSummaryFilters `type:"structure"` - - // Groups the result based on ACCOUNT_ID or AWS_REGION. - GroupByKey *string `type:"string" enum:"ConfigRuleComplianceSummaryGroupKey"` - - // The maximum number of evaluation results returned on each page. The default - // is 1000. You cannot specify a number greater than 1000. If you specify 0, - // AWS Config uses the default. - Limit *int64 `type:"integer"` + // The allowed values are COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE. + ComplianceTypes []*string `type:"list"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` + + // The ID of the AWS resource for which you want compliance information. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // The type of the AWS resource for which you want compliance information. + // + // ResourceType is a required field + ResourceType *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetAggregateConfigRuleComplianceSummaryInput) String() string { +func (s GetComplianceDetailsByResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAggregateConfigRuleComplianceSummaryInput) GoString() string { +func (s GetComplianceDetailsByResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetAggregateConfigRuleComplianceSummaryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAggregateConfigRuleComplianceSummaryInput"} - if s.ConfigurationAggregatorName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) +func (s *GetComplianceDetailsByResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetComplianceDetailsByResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) } - if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) } - if s.Filters != nil { - if err := s.Filters.Validate(); err != nil { - invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) - } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.ResourceType != nil && len(*s.ResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) } if invalidParams.Len() > 0 { @@ -9875,342 +11840,302 @@ func (s *GetAggregateConfigRuleComplianceSummaryInput) Validate() error { return nil } -// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *GetAggregateConfigRuleComplianceSummaryInput) SetConfigurationAggregatorName(v string) *GetAggregateConfigRuleComplianceSummaryInput { - s.ConfigurationAggregatorName = &v - return s -} - -// SetFilters sets the Filters field's value. -func (s *GetAggregateConfigRuleComplianceSummaryInput) SetFilters(v *ConfigRuleComplianceSummaryFilters) *GetAggregateConfigRuleComplianceSummaryInput { - s.Filters = v +// SetComplianceTypes sets the ComplianceTypes field's value. +func (s *GetComplianceDetailsByResourceInput) SetComplianceTypes(v []*string) *GetComplianceDetailsByResourceInput { + s.ComplianceTypes = v return s } -// SetGroupByKey sets the GroupByKey field's value. -func (s *GetAggregateConfigRuleComplianceSummaryInput) SetGroupByKey(v string) *GetAggregateConfigRuleComplianceSummaryInput { - s.GroupByKey = &v +// SetNextToken sets the NextToken field's value. +func (s *GetComplianceDetailsByResourceInput) SetNextToken(v string) *GetComplianceDetailsByResourceInput { + s.NextToken = &v return s } -// SetLimit sets the Limit field's value. -func (s *GetAggregateConfigRuleComplianceSummaryInput) SetLimit(v int64) *GetAggregateConfigRuleComplianceSummaryInput { - s.Limit = &v +// SetResourceId sets the ResourceId field's value. +func (s *GetComplianceDetailsByResourceInput) SetResourceId(v string) *GetComplianceDetailsByResourceInput { + s.ResourceId = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetAggregateConfigRuleComplianceSummaryInput) SetNextToken(v string) *GetAggregateConfigRuleComplianceSummaryInput { - s.NextToken = &v +// SetResourceType sets the ResourceType field's value. +func (s *GetComplianceDetailsByResourceInput) SetResourceType(v string) *GetComplianceDetailsByResourceInput { + s.ResourceType = &v return s } -type GetAggregateConfigRuleComplianceSummaryOutput struct { +type GetComplianceDetailsByResourceOutput struct { _ struct{} `type:"structure"` - // Returns a list of AggregateComplianceCounts object. - AggregateComplianceCounts []*AggregateComplianceCount `type:"list"` - - // Groups the result based on ACCOUNT_ID or AWS_REGION. - GroupByKey *string `min:"1" type:"string"` + // Indicates whether the specified AWS resource complies each AWS Config rule. + EvaluationResults []*EvaluationResult `type:"list"` - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. NextToken *string `type:"string"` } // String returns the string representation -func (s GetAggregateConfigRuleComplianceSummaryOutput) String() string { +func (s GetComplianceDetailsByResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAggregateConfigRuleComplianceSummaryOutput) GoString() string { +func (s GetComplianceDetailsByResourceOutput) GoString() string { return s.String() } -// SetAggregateComplianceCounts sets the AggregateComplianceCounts field's value. -func (s *GetAggregateConfigRuleComplianceSummaryOutput) SetAggregateComplianceCounts(v []*AggregateComplianceCount) *GetAggregateConfigRuleComplianceSummaryOutput { - s.AggregateComplianceCounts = v - return s -} - -// SetGroupByKey sets the GroupByKey field's value. -func (s *GetAggregateConfigRuleComplianceSummaryOutput) SetGroupByKey(v string) *GetAggregateConfigRuleComplianceSummaryOutput { - s.GroupByKey = &v +// SetEvaluationResults sets the EvaluationResults field's value. +func (s *GetComplianceDetailsByResourceOutput) SetEvaluationResults(v []*EvaluationResult) *GetComplianceDetailsByResourceOutput { + s.EvaluationResults = v return s } // SetNextToken sets the NextToken field's value. -func (s *GetAggregateConfigRuleComplianceSummaryOutput) SetNextToken(v string) *GetAggregateConfigRuleComplianceSummaryOutput { +func (s *GetComplianceDetailsByResourceOutput) SetNextToken(v string) *GetComplianceDetailsByResourceOutput { s.NextToken = &v return s } -type GetAggregateDiscoveredResourceCountsInput struct { +type GetComplianceSummaryByConfigRuleInput struct { _ struct{} `type:"structure"` +} - // The name of the configuration aggregator. - // - // ConfigurationAggregatorName is a required field - ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` - - // Filters the results based on the ResourceCountFilters object. - Filters *ResourceCountFilters `type:"structure"` +// String returns the string representation +func (s GetComplianceSummaryByConfigRuleInput) String() string { + return awsutil.Prettify(s) +} - // The key to group the resource counts. - GroupByKey *string `type:"string" enum:"ResourceCountGroupKey"` +// GoString returns the string representation +func (s GetComplianceSummaryByConfigRuleInput) GoString() string { + return s.String() +} - // The maximum number of GroupedResourceCount objects returned on each page. - // The default is 1000. You cannot specify a number greater than 1000. If you - // specify 0, AWS Config uses the default. - Limit *int64 `type:"integer"` +type GetComplianceSummaryByConfigRuleOutput struct { + _ struct{} `type:"structure"` - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` + // The number of AWS Config rules that are compliant and the number that are + // noncompliant, up to a maximum of 25 for each. + ComplianceSummary *ComplianceSummary `type:"structure"` } // String returns the string representation -func (s GetAggregateDiscoveredResourceCountsInput) String() string { +func (s GetComplianceSummaryByConfigRuleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAggregateDiscoveredResourceCountsInput) GoString() string { +func (s GetComplianceSummaryByConfigRuleOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetAggregateDiscoveredResourceCountsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAggregateDiscoveredResourceCountsInput"} - if s.ConfigurationAggregatorName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) - } - if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) - } - if s.Filters != nil { - if err := s.Filters.Validate(); err != nil { - invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) - } - } +// SetComplianceSummary sets the ComplianceSummary field's value. +func (s *GetComplianceSummaryByConfigRuleOutput) SetComplianceSummary(v *ComplianceSummary) *GetComplianceSummaryByConfigRuleOutput { + s.ComplianceSummary = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +type GetComplianceSummaryByResourceTypeInput struct { + _ struct{} `type:"structure"` + + // Specify one or more resource types to get the number of resources that are + // compliant and the number that are noncompliant for each resource type. + // + // For this request, you can specify an AWS resource type such as AWS::EC2::Instance. + // You can specify that the resource type is an AWS account by specifying AWS::::Account. + ResourceTypes []*string `type:"list"` } -// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *GetAggregateDiscoveredResourceCountsInput) SetConfigurationAggregatorName(v string) *GetAggregateDiscoveredResourceCountsInput { - s.ConfigurationAggregatorName = &v - return s +// String returns the string representation +func (s GetComplianceSummaryByResourceTypeInput) String() string { + return awsutil.Prettify(s) } -// SetFilters sets the Filters field's value. -func (s *GetAggregateDiscoveredResourceCountsInput) SetFilters(v *ResourceCountFilters) *GetAggregateDiscoveredResourceCountsInput { - s.Filters = v - return s +// GoString returns the string representation +func (s GetComplianceSummaryByResourceTypeInput) GoString() string { + return s.String() } -// SetGroupByKey sets the GroupByKey field's value. -func (s *GetAggregateDiscoveredResourceCountsInput) SetGroupByKey(v string) *GetAggregateDiscoveredResourceCountsInput { - s.GroupByKey = &v +// SetResourceTypes sets the ResourceTypes field's value. +func (s *GetComplianceSummaryByResourceTypeInput) SetResourceTypes(v []*string) *GetComplianceSummaryByResourceTypeInput { + s.ResourceTypes = v return s } -// SetLimit sets the Limit field's value. -func (s *GetAggregateDiscoveredResourceCountsInput) SetLimit(v int64) *GetAggregateDiscoveredResourceCountsInput { - s.Limit = &v - return s +type GetComplianceSummaryByResourceTypeOutput struct { + _ struct{} `type:"structure"` + + // The number of resources that are compliant and the number that are noncompliant. + // If one or more resource types were provided with the request, the numbers + // are returned for each resource type. The maximum number returned is 100. + ComplianceSummariesByResourceType []*ComplianceSummaryByResourceType `type:"list"` } -// SetNextToken sets the NextToken field's value. -func (s *GetAggregateDiscoveredResourceCountsInput) SetNextToken(v string) *GetAggregateDiscoveredResourceCountsInput { - s.NextToken = &v - return s +// String returns the string representation +func (s GetComplianceSummaryByResourceTypeOutput) String() string { + return awsutil.Prettify(s) } -type GetAggregateDiscoveredResourceCountsOutput struct { - _ struct{} `type:"structure"` +// GoString returns the string representation +func (s GetComplianceSummaryByResourceTypeOutput) GoString() string { + return s.String() +} - // The key passed into the request object. If GroupByKey is not provided, the - // result will be empty. - GroupByKey *string `min:"1" type:"string"` +// SetComplianceSummariesByResourceType sets the ComplianceSummariesByResourceType field's value. +func (s *GetComplianceSummaryByResourceTypeOutput) SetComplianceSummariesByResourceType(v []*ComplianceSummaryByResourceType) *GetComplianceSummaryByResourceTypeOutput { + s.ComplianceSummariesByResourceType = v + return s +} - // Returns a list of GroupedResourceCount objects. - GroupedResourceCounts []*GroupedResourceCount `type:"list"` +type GetDiscoveredResourceCountsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of ResourceCount objects returned on each page. The default + // is 100. You cannot specify a number greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `locationName:"limit" type:"integer"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. - NextToken *string `type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` - // The total number of resources that are present in an aggregator with the - // filters that you provide. + // The comma-separated list that specifies the resource types that you want + // AWS Config to return (for example, "AWS::EC2::Instance", "AWS::IAM::User"). // - // TotalDiscoveredResources is a required field - TotalDiscoveredResources *int64 `type:"long" required:"true"` + // If a value for resourceTypes is not specified, AWS Config returns all resource + // types that AWS Config is recording in the region for your account. + // + // If the configuration recorder is turned off, AWS Config returns an empty + // list of ResourceCount objects. If the configuration recorder is not recording + // a specific resource type (for example, S3 buckets), that resource type is + // not returned in the list of ResourceCount objects. + ResourceTypes []*string `locationName:"resourceTypes" type:"list"` } // String returns the string representation -func (s GetAggregateDiscoveredResourceCountsOutput) String() string { +func (s GetDiscoveredResourceCountsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAggregateDiscoveredResourceCountsOutput) GoString() string { +func (s GetDiscoveredResourceCountsInput) GoString() string { return s.String() } -// SetGroupByKey sets the GroupByKey field's value. -func (s *GetAggregateDiscoveredResourceCountsOutput) SetGroupByKey(v string) *GetAggregateDiscoveredResourceCountsOutput { - s.GroupByKey = &v - return s -} - -// SetGroupedResourceCounts sets the GroupedResourceCounts field's value. -func (s *GetAggregateDiscoveredResourceCountsOutput) SetGroupedResourceCounts(v []*GroupedResourceCount) *GetAggregateDiscoveredResourceCountsOutput { - s.GroupedResourceCounts = v +// SetLimit sets the Limit field's value. +func (s *GetDiscoveredResourceCountsInput) SetLimit(v int64) *GetDiscoveredResourceCountsInput { + s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetAggregateDiscoveredResourceCountsOutput) SetNextToken(v string) *GetAggregateDiscoveredResourceCountsOutput { +func (s *GetDiscoveredResourceCountsInput) SetNextToken(v string) *GetDiscoveredResourceCountsInput { s.NextToken = &v return s } -// SetTotalDiscoveredResources sets the TotalDiscoveredResources field's value. -func (s *GetAggregateDiscoveredResourceCountsOutput) SetTotalDiscoveredResources(v int64) *GetAggregateDiscoveredResourceCountsOutput { - s.TotalDiscoveredResources = &v +// SetResourceTypes sets the ResourceTypes field's value. +func (s *GetDiscoveredResourceCountsInput) SetResourceTypes(v []*string) *GetDiscoveredResourceCountsInput { + s.ResourceTypes = v return s } -type GetAggregateResourceConfigInput struct { +type GetDiscoveredResourceCountsOutput struct { _ struct{} `type:"structure"` - // The name of the configuration aggregator. - // - // ConfigurationAggregatorName is a required field - ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `locationName:"nextToken" type:"string"` - // An object that identifies aggregate resource. + // The list of ResourceCount objects. Each object is listed in descending order + // by the number of resources. + ResourceCounts []*ResourceCount `locationName:"resourceCounts" type:"list"` + + // The total number of resources that AWS Config is recording in the region + // for your account. If you specify resource types in the request, AWS Config + // returns only the total number of resources for those resource types. // - // ResourceIdentifier is a required field - ResourceIdentifier *AggregateResourceIdentifier `type:"structure" required:"true"` + // Example + // + // AWS Config is recording three resource types in the US East (Ohio) Region + // for your account: 25 EC2 instances, 20 IAM users, and 15 S3 buckets, for + // a total of 60 resources. + // + // You make a call to the GetDiscoveredResourceCounts action and specify the + // resource type, "AWS::EC2::Instances", in the request. + // + // AWS Config returns 25 for totalDiscoveredResources. + TotalDiscoveredResources *int64 `locationName:"totalDiscoveredResources" type:"long"` } // String returns the string representation -func (s GetAggregateResourceConfigInput) String() string { +func (s GetDiscoveredResourceCountsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAggregateResourceConfigInput) GoString() string { +func (s GetDiscoveredResourceCountsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetAggregateResourceConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAggregateResourceConfigInput"} - if s.ConfigurationAggregatorName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) - } - if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) - } - if s.ResourceIdentifier == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceIdentifier")) - } - if s.ResourceIdentifier != nil { - if err := s.ResourceIdentifier.Validate(); err != nil { - invalidParams.AddNested("ResourceIdentifier", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *GetAggregateResourceConfigInput) SetConfigurationAggregatorName(v string) *GetAggregateResourceConfigInput { - s.ConfigurationAggregatorName = &v +// SetNextToken sets the NextToken field's value. +func (s *GetDiscoveredResourceCountsOutput) SetNextToken(v string) *GetDiscoveredResourceCountsOutput { + s.NextToken = &v return s } -// SetResourceIdentifier sets the ResourceIdentifier field's value. -func (s *GetAggregateResourceConfigInput) SetResourceIdentifier(v *AggregateResourceIdentifier) *GetAggregateResourceConfigInput { - s.ResourceIdentifier = v +// SetResourceCounts sets the ResourceCounts field's value. +func (s *GetDiscoveredResourceCountsOutput) SetResourceCounts(v []*ResourceCount) *GetDiscoveredResourceCountsOutput { + s.ResourceCounts = v return s } -type GetAggregateResourceConfigOutput struct { - _ struct{} `type:"structure"` - - // Returns a ConfigurationItem object. - ConfigurationItem *ConfigurationItem `type:"structure"` -} - -// String returns the string representation -func (s GetAggregateResourceConfigOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAggregateResourceConfigOutput) GoString() string { - return s.String() -} - -// SetConfigurationItem sets the ConfigurationItem field's value. -func (s *GetAggregateResourceConfigOutput) SetConfigurationItem(v *ConfigurationItem) *GetAggregateResourceConfigOutput { - s.ConfigurationItem = v +// SetTotalDiscoveredResources sets the TotalDiscoveredResources field's value. +func (s *GetDiscoveredResourceCountsOutput) SetTotalDiscoveredResources(v int64) *GetDiscoveredResourceCountsOutput { + s.TotalDiscoveredResources = &v return s } -type GetComplianceDetailsByConfigRuleInput struct { +type GetOrganizationConfigRuleDetailedStatusInput struct { _ struct{} `type:"structure"` - // Filters the results by compliance. - // - // The allowed values are COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE. - ComplianceTypes []*string `type:"list"` - - // The name of the AWS Config rule for which you want compliance information. - // - // ConfigRuleName is a required field - ConfigRuleName *string `min:"1" type:"string" required:"true"` + // A StatusDetailFilters object. + Filters *StatusDetailFilters `type:"structure"` - // The maximum number of evaluation results returned on each page. The default - // is 10. You cannot specify a number greater than 100. If you specify 0, AWS - // Config uses the default. + // The maximum number of OrganizationConfigRuleDetailedStatus returned on each + // page. If you do not specify a number, AWS Config uses the default. The default + // is 100. Limit *int64 `type:"integer"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` + + // The name of organization config rule for which you want status details for + // member accounts. + // + // OrganizationConfigRuleName is a required field + OrganizationConfigRuleName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetComplianceDetailsByConfigRuleInput) String() string { +func (s GetOrganizationConfigRuleDetailedStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetComplianceDetailsByConfigRuleInput) GoString() string { +func (s GetOrganizationConfigRuleDetailedStatusInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetComplianceDetailsByConfigRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetComplianceDetailsByConfigRuleInput"} - if s.ConfigRuleName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) +func (s *GetOrganizationConfigRuleDetailedStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOrganizationConfigRuleDetailedStatusInput"} + if s.OrganizationConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationConfigRuleName")) } - if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + if s.OrganizationConfigRuleName != nil && len(*s.OrganizationConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationConfigRuleName", 1)) } if invalidParams.Len() > 0 { @@ -10219,100 +12144,113 @@ func (s *GetComplianceDetailsByConfigRuleInput) Validate() error { return nil } -// SetComplianceTypes sets the ComplianceTypes field's value. -func (s *GetComplianceDetailsByConfigRuleInput) SetComplianceTypes(v []*string) *GetComplianceDetailsByConfigRuleInput { - s.ComplianceTypes = v - return s -} - -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *GetComplianceDetailsByConfigRuleInput) SetConfigRuleName(v string) *GetComplianceDetailsByConfigRuleInput { - s.ConfigRuleName = &v +// SetFilters sets the Filters field's value. +func (s *GetOrganizationConfigRuleDetailedStatusInput) SetFilters(v *StatusDetailFilters) *GetOrganizationConfigRuleDetailedStatusInput { + s.Filters = v return s } // SetLimit sets the Limit field's value. -func (s *GetComplianceDetailsByConfigRuleInput) SetLimit(v int64) *GetComplianceDetailsByConfigRuleInput { +func (s *GetOrganizationConfigRuleDetailedStatusInput) SetLimit(v int64) *GetOrganizationConfigRuleDetailedStatusInput { s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetComplianceDetailsByConfigRuleInput) SetNextToken(v string) *GetComplianceDetailsByConfigRuleInput { +func (s *GetOrganizationConfigRuleDetailedStatusInput) SetNextToken(v string) *GetOrganizationConfigRuleDetailedStatusInput { s.NextToken = &v return s } -type GetComplianceDetailsByConfigRuleOutput struct { - _ struct{} `type:"structure"` +// SetOrganizationConfigRuleName sets the OrganizationConfigRuleName field's value. +func (s *GetOrganizationConfigRuleDetailedStatusInput) SetOrganizationConfigRuleName(v string) *GetOrganizationConfigRuleDetailedStatusInput { + s.OrganizationConfigRuleName = &v + return s +} - // Indicates whether the AWS resource complies with the specified AWS Config - // rule. - EvaluationResults []*EvaluationResult `type:"list"` +type GetOrganizationConfigRuleDetailedStatusOutput struct { + _ struct{} `type:"structure"` - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. NextToken *string `type:"string"` + + // A list of MemberAccountStatus objects. + OrganizationConfigRuleDetailedStatus []*MemberAccountStatus `type:"list"` } // String returns the string representation -func (s GetComplianceDetailsByConfigRuleOutput) String() string { +func (s GetOrganizationConfigRuleDetailedStatusOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetComplianceDetailsByConfigRuleOutput) GoString() string { +func (s GetOrganizationConfigRuleDetailedStatusOutput) GoString() string { return s.String() } -// SetEvaluationResults sets the EvaluationResults field's value. -func (s *GetComplianceDetailsByConfigRuleOutput) SetEvaluationResults(v []*EvaluationResult) *GetComplianceDetailsByConfigRuleOutput { - s.EvaluationResults = v +// SetNextToken sets the NextToken field's value. +func (s *GetOrganizationConfigRuleDetailedStatusOutput) SetNextToken(v string) *GetOrganizationConfigRuleDetailedStatusOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetComplianceDetailsByConfigRuleOutput) SetNextToken(v string) *GetComplianceDetailsByConfigRuleOutput { - s.NextToken = &v +// SetOrganizationConfigRuleDetailedStatus sets the OrganizationConfigRuleDetailedStatus field's value. +func (s *GetOrganizationConfigRuleDetailedStatusOutput) SetOrganizationConfigRuleDetailedStatus(v []*MemberAccountStatus) *GetOrganizationConfigRuleDetailedStatusOutput { + s.OrganizationConfigRuleDetailedStatus = v return s } -type GetComplianceDetailsByResourceInput struct { +// The input for the GetResourceConfigHistory action. +type GetResourceConfigHistoryInput struct { _ struct{} `type:"structure"` - // Filters the results by compliance. - // - // The allowed values are COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE. - ComplianceTypes []*string `type:"list"` + // The chronological order for configuration items listed. By default, the results + // are listed in reverse chronological order. + ChronologicalOrder *string `locationName:"chronologicalOrder" type:"string" enum:"ChronologicalOrder"` + + // The time stamp that indicates an earlier time. If not specified, the action + // returns paginated results that contain configuration items that start when + // the first configuration item was recorded. + EarlierTime *time.Time `locationName:"earlierTime" type:"timestamp"` + + // The time stamp that indicates a later time. If not specified, current time + // is taken. + LaterTime *time.Time `locationName:"laterTime" type:"timestamp"` + + // The maximum number of configuration items returned on each page. The default + // is 10. You cannot specify a number greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `locationName:"limit" type:"integer"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. - NextToken *string `type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` - // The ID of the AWS resource for which you want compliance information. + // The ID of the resource (for example., sg-xxxxxx). // // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` + ResourceId *string `locationName:"resourceId" min:"1" type:"string" required:"true"` - // The type of the AWS resource for which you want compliance information. + // The resource type. // // ResourceType is a required field - ResourceType *string `min:"1" type:"string" required:"true"` + ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` } // String returns the string representation -func (s GetComplianceDetailsByResourceInput) String() string { +func (s GetResourceConfigHistoryInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetComplianceDetailsByResourceInput) GoString() string { +func (s GetResourceConfigHistoryInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetComplianceDetailsByResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetComplianceDetailsByResourceInput"} +func (s *GetResourceConfigHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResourceConfigHistoryInput"} if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } @@ -10322,291 +12260,259 @@ func (s *GetComplianceDetailsByResourceInput) Validate() error { if s.ResourceType == nil { invalidParams.Add(request.NewErrParamRequired("ResourceType")) } - if s.ResourceType != nil && len(*s.ResourceType) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) + + if invalidParams.Len() > 0 { + return invalidParams } + return nil +} + +// SetChronologicalOrder sets the ChronologicalOrder field's value. +func (s *GetResourceConfigHistoryInput) SetChronologicalOrder(v string) *GetResourceConfigHistoryInput { + s.ChronologicalOrder = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetEarlierTime sets the EarlierTime field's value. +func (s *GetResourceConfigHistoryInput) SetEarlierTime(v time.Time) *GetResourceConfigHistoryInput { + s.EarlierTime = &v + return s } -// SetComplianceTypes sets the ComplianceTypes field's value. -func (s *GetComplianceDetailsByResourceInput) SetComplianceTypes(v []*string) *GetComplianceDetailsByResourceInput { - s.ComplianceTypes = v +// SetLaterTime sets the LaterTime field's value. +func (s *GetResourceConfigHistoryInput) SetLaterTime(v time.Time) *GetResourceConfigHistoryInput { + s.LaterTime = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *GetResourceConfigHistoryInput) SetLimit(v int64) *GetResourceConfigHistoryInput { + s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetComplianceDetailsByResourceInput) SetNextToken(v string) *GetComplianceDetailsByResourceInput { +func (s *GetResourceConfigHistoryInput) SetNextToken(v string) *GetResourceConfigHistoryInput { s.NextToken = &v return s } // SetResourceId sets the ResourceId field's value. -func (s *GetComplianceDetailsByResourceInput) SetResourceId(v string) *GetComplianceDetailsByResourceInput { +func (s *GetResourceConfigHistoryInput) SetResourceId(v string) *GetResourceConfigHistoryInput { s.ResourceId = &v return s } // SetResourceType sets the ResourceType field's value. -func (s *GetComplianceDetailsByResourceInput) SetResourceType(v string) *GetComplianceDetailsByResourceInput { +func (s *GetResourceConfigHistoryInput) SetResourceType(v string) *GetResourceConfigHistoryInput { s.ResourceType = &v return s } -type GetComplianceDetailsByResourceOutput struct { +// The output for the GetResourceConfigHistory action. +type GetResourceConfigHistoryOutput struct { _ struct{} `type:"structure"` - // Indicates whether the specified AWS resource complies each AWS Config rule. - EvaluationResults []*EvaluationResult `type:"list"` + // A list that contains the configuration history of one or more resources. + ConfigurationItems []*ConfigurationItem `locationName:"configurationItems" type:"list"` // The string that you use in a subsequent request to get the next page of results // in a paginated response. - NextToken *string `type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s GetComplianceDetailsByResourceOutput) String() string { +func (s GetResourceConfigHistoryOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetComplianceDetailsByResourceOutput) GoString() string { +func (s GetResourceConfigHistoryOutput) GoString() string { return s.String() } -// SetEvaluationResults sets the EvaluationResults field's value. -func (s *GetComplianceDetailsByResourceOutput) SetEvaluationResults(v []*EvaluationResult) *GetComplianceDetailsByResourceOutput { - s.EvaluationResults = v +// SetConfigurationItems sets the ConfigurationItems field's value. +func (s *GetResourceConfigHistoryOutput) SetConfigurationItems(v []*ConfigurationItem) *GetResourceConfigHistoryOutput { + s.ConfigurationItems = v return s } // SetNextToken sets the NextToken field's value. -func (s *GetComplianceDetailsByResourceOutput) SetNextToken(v string) *GetComplianceDetailsByResourceOutput { +func (s *GetResourceConfigHistoryOutput) SetNextToken(v string) *GetResourceConfigHistoryOutput { s.NextToken = &v return s } -type GetComplianceSummaryByConfigRuleInput struct { +// The count of resources that are grouped by the group name. +type GroupedResourceCount struct { _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s GetComplianceSummaryByConfigRuleInput) String() string { - return awsutil.Prettify(s) -} -// GoString returns the string representation -func (s GetComplianceSummaryByConfigRuleInput) GoString() string { - return s.String() -} - -type GetComplianceSummaryByConfigRuleOutput struct { - _ struct{} `type:"structure"` + // The name of the group that can be region, account ID, or resource type. For + // example, region1, region2 if the region was chosen as GroupByKey. + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` - // The number of AWS Config rules that are compliant and the number that are - // noncompliant, up to a maximum of 25 for each. - ComplianceSummary *ComplianceSummary `type:"structure"` + // The number of resources in the group. + // + // ResourceCount is a required field + ResourceCount *int64 `type:"long" required:"true"` } // String returns the string representation -func (s GetComplianceSummaryByConfigRuleOutput) String() string { +func (s GroupedResourceCount) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetComplianceSummaryByConfigRuleOutput) GoString() string { +func (s GroupedResourceCount) GoString() string { return s.String() } -// SetComplianceSummary sets the ComplianceSummary field's value. -func (s *GetComplianceSummaryByConfigRuleOutput) SetComplianceSummary(v *ComplianceSummary) *GetComplianceSummaryByConfigRuleOutput { - s.ComplianceSummary = v +// SetGroupName sets the GroupName field's value. +func (s *GroupedResourceCount) SetGroupName(v string) *GroupedResourceCount { + s.GroupName = &v return s } -type GetComplianceSummaryByResourceTypeInput struct { - _ struct{} `type:"structure"` - - // Specify one or more resource types to get the number of resources that are - // compliant and the number that are noncompliant for each resource type. - // - // For this request, you can specify an AWS resource type such as AWS::EC2::Instance. - // You can specify that the resource type is an AWS account by specifying AWS::::Account. - ResourceTypes []*string `type:"list"` -} - -// String returns the string representation -func (s GetComplianceSummaryByResourceTypeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetComplianceSummaryByResourceTypeInput) GoString() string { - return s.String() -} - -// SetResourceTypes sets the ResourceTypes field's value. -func (s *GetComplianceSummaryByResourceTypeInput) SetResourceTypes(v []*string) *GetComplianceSummaryByResourceTypeInput { - s.ResourceTypes = v +// SetResourceCount sets the ResourceCount field's value. +func (s *GroupedResourceCount) SetResourceCount(v int64) *GroupedResourceCount { + s.ResourceCount = &v return s } -type GetComplianceSummaryByResourceTypeOutput struct { +type ListAggregateDiscoveredResourcesInput struct { _ struct{} `type:"structure"` - // The number of resources that are compliant and the number that are noncompliant. - // If one or more resource types were provided with the request, the numbers - // are returned for each resource type. The maximum number returned is 100. - ComplianceSummariesByResourceType []*ComplianceSummaryByResourceType `type:"list"` -} - -// String returns the string representation -func (s GetComplianceSummaryByResourceTypeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetComplianceSummaryByResourceTypeOutput) GoString() string { - return s.String() -} - -// SetComplianceSummariesByResourceType sets the ComplianceSummariesByResourceType field's value. -func (s *GetComplianceSummaryByResourceTypeOutput) SetComplianceSummariesByResourceType(v []*ComplianceSummaryByResourceType) *GetComplianceSummaryByResourceTypeOutput { - s.ComplianceSummariesByResourceType = v - return s -} + // The name of the configuration aggregator. + // + // ConfigurationAggregatorName is a required field + ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` -type GetDiscoveredResourceCountsInput struct { - _ struct{} `type:"structure"` + // Filters the results based on the ResourceFilters object. + Filters *ResourceFilters `type:"structure"` - // The maximum number of ResourceCount objects returned on each page. The default + // The maximum number of resource identifiers returned on each page. The default // is 100. You cannot specify a number greater than 100. If you specify 0, AWS // Config uses the default. - Limit *int64 `locationName:"limit" type:"integer"` + Limit *int64 `type:"integer"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. - NextToken *string `locationName:"nextToken" type:"string"` + NextToken *string `type:"string"` - // The comma-separated list that specifies the resource types that you want - // AWS Config to return (for example, "AWS::EC2::Instance", "AWS::IAM::User"). - // - // If a value for resourceTypes is not specified, AWS Config returns all resource - // types that AWS Config is recording in the region for your account. + // The type of resources that you want AWS Config to list in the response. // - // If the configuration recorder is turned off, AWS Config returns an empty - // list of ResourceCount objects. If the configuration recorder is not recording - // a specific resource type (for example, S3 buckets), that resource type is - // not returned in the list of ResourceCount objects. - ResourceTypes []*string `locationName:"resourceTypes" type:"list"` + // ResourceType is a required field + ResourceType *string `type:"string" required:"true" enum:"ResourceType"` } // String returns the string representation -func (s GetDiscoveredResourceCountsInput) String() string { +func (s ListAggregateDiscoveredResourcesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDiscoveredResourceCountsInput) GoString() string { +func (s ListAggregateDiscoveredResourcesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAggregateDiscoveredResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAggregateDiscoveredResourcesInput"} + if s.ConfigurationAggregatorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) + } + if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.Filters != nil { + if err := s.Filters.Validate(); err != nil { + invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. +func (s *ListAggregateDiscoveredResourcesInput) SetConfigurationAggregatorName(v string) *ListAggregateDiscoveredResourcesInput { + s.ConfigurationAggregatorName = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *ListAggregateDiscoveredResourcesInput) SetFilters(v *ResourceFilters) *ListAggregateDiscoveredResourcesInput { + s.Filters = v + return s +} + // SetLimit sets the Limit field's value. -func (s *GetDiscoveredResourceCountsInput) SetLimit(v int64) *GetDiscoveredResourceCountsInput { +func (s *ListAggregateDiscoveredResourcesInput) SetLimit(v int64) *ListAggregateDiscoveredResourcesInput { s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetDiscoveredResourceCountsInput) SetNextToken(v string) *GetDiscoveredResourceCountsInput { +func (s *ListAggregateDiscoveredResourcesInput) SetNextToken(v string) *ListAggregateDiscoveredResourcesInput { s.NextToken = &v return s } -// SetResourceTypes sets the ResourceTypes field's value. -func (s *GetDiscoveredResourceCountsInput) SetResourceTypes(v []*string) *GetDiscoveredResourceCountsInput { - s.ResourceTypes = v +// SetResourceType sets the ResourceType field's value. +func (s *ListAggregateDiscoveredResourcesInput) SetResourceType(v string) *ListAggregateDiscoveredResourcesInput { + s.ResourceType = &v return s } -type GetDiscoveredResourceCountsOutput struct { +type ListAggregateDiscoveredResourcesOutput struct { _ struct{} `type:"structure"` - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. - NextToken *string `locationName:"nextToken" type:"string"` - - // The list of ResourceCount objects. Each object is listed in descending order - // by the number of resources. - ResourceCounts []*ResourceCount `locationName:"resourceCounts" type:"list"` + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` - // The total number of resources that AWS Config is recording in the region - // for your account. If you specify resource types in the request, AWS Config - // returns only the total number of resources for those resource types. - // - // Example - // - // AWS Config is recording three resource types in the US East (Ohio) Region - // for your account: 25 EC2 instances, 20 IAM users, and 15 S3 buckets, for - // a total of 60 resources. - // - // You make a call to the GetDiscoveredResourceCounts action and specify the - // resource type, "AWS::EC2::Instances", in the request. - // - // AWS Config returns 25 for totalDiscoveredResources. - TotalDiscoveredResources *int64 `locationName:"totalDiscoveredResources" type:"long"` + // Returns a list of ResourceIdentifiers objects. + ResourceIdentifiers []*AggregateResourceIdentifier `type:"list"` } // String returns the string representation -func (s GetDiscoveredResourceCountsOutput) String() string { +func (s ListAggregateDiscoveredResourcesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDiscoveredResourceCountsOutput) GoString() string { +func (s ListAggregateDiscoveredResourcesOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. -func (s *GetDiscoveredResourceCountsOutput) SetNextToken(v string) *GetDiscoveredResourceCountsOutput { +func (s *ListAggregateDiscoveredResourcesOutput) SetNextToken(v string) *ListAggregateDiscoveredResourcesOutput { s.NextToken = &v return s } -// SetResourceCounts sets the ResourceCounts field's value. -func (s *GetDiscoveredResourceCountsOutput) SetResourceCounts(v []*ResourceCount) *GetDiscoveredResourceCountsOutput { - s.ResourceCounts = v - return s -} - -// SetTotalDiscoveredResources sets the TotalDiscoveredResources field's value. -func (s *GetDiscoveredResourceCountsOutput) SetTotalDiscoveredResources(v int64) *GetDiscoveredResourceCountsOutput { - s.TotalDiscoveredResources = &v +// SetResourceIdentifiers sets the ResourceIdentifiers field's value. +func (s *ListAggregateDiscoveredResourcesOutput) SetResourceIdentifiers(v []*AggregateResourceIdentifier) *ListAggregateDiscoveredResourcesOutput { + s.ResourceIdentifiers = v return s } -// The input for the GetResourceConfigHistory action. -type GetResourceConfigHistoryInput struct { +type ListDiscoveredResourcesInput struct { _ struct{} `type:"structure"` - // The chronological order for configuration items listed. By default, the results - // are listed in reverse chronological order. - ChronologicalOrder *string `locationName:"chronologicalOrder" type:"string" enum:"ChronologicalOrder"` - - // The time stamp that indicates an earlier time. If not specified, the action - // returns paginated results that contain configuration items that start when - // the first configuration item was recorded. - EarlierTime *time.Time `locationName:"earlierTime" type:"timestamp"` - - // The time stamp that indicates a later time. If not specified, current time - // is taken. - LaterTime *time.Time `locationName:"laterTime" type:"timestamp"` + // Specifies whether AWS Config includes deleted resources in the results. By + // default, deleted resources are not included. + IncludeDeletedResources *bool `locationName:"includeDeletedResources" type:"boolean"` - // The maximum number of configuration items returned on each page. The default - // is 10. You cannot specify a number greater than 100. If you specify 0, AWS + // The maximum number of resource identifiers returned on each page. The default + // is 100. You cannot specify a number greater than 100. If you specify 0, AWS // Config uses the default. Limit *int64 `locationName:"limit" type:"integer"` @@ -10614,36 +12520,35 @@ type GetResourceConfigHistoryInput struct { // next page of results in a paginated response. NextToken *string `locationName:"nextToken" type:"string"` - // The ID of the resource (for example., sg-xxxxxx). - // - // ResourceId is a required field - ResourceId *string `locationName:"resourceId" min:"1" type:"string" required:"true"` + // The IDs of only those resources that you want AWS Config to list in the response. + // If you do not specify this parameter, AWS Config lists all resources of the + // specified type that it has discovered. + ResourceIds []*string `locationName:"resourceIds" type:"list"` - // The resource type. + // The custom name of only those resources that you want AWS Config to list + // in the response. If you do not specify this parameter, AWS Config lists all + // resources of the specified type that it has discovered. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The type of resources that you want AWS Config to list in the response. // // ResourceType is a required field ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` } // String returns the string representation -func (s GetResourceConfigHistoryInput) String() string { +func (s ListDiscoveredResourcesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetResourceConfigHistoryInput) GoString() string { +func (s ListDiscoveredResourcesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetResourceConfigHistoryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetResourceConfigHistoryInput"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } +func (s *ListDiscoveredResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDiscoveredResourcesInput"} if s.ResourceType == nil { invalidParams.Add(request.NewErrParamRequired("ResourceType")) } @@ -10654,406 +12559,606 @@ func (s *GetResourceConfigHistoryInput) Validate() error { return nil } -// SetChronologicalOrder sets the ChronologicalOrder field's value. -func (s *GetResourceConfigHistoryInput) SetChronologicalOrder(v string) *GetResourceConfigHistoryInput { - s.ChronologicalOrder = &v - return s -} - -// SetEarlierTime sets the EarlierTime field's value. -func (s *GetResourceConfigHistoryInput) SetEarlierTime(v time.Time) *GetResourceConfigHistoryInput { - s.EarlierTime = &v - return s -} - -// SetLaterTime sets the LaterTime field's value. -func (s *GetResourceConfigHistoryInput) SetLaterTime(v time.Time) *GetResourceConfigHistoryInput { - s.LaterTime = &v +// SetIncludeDeletedResources sets the IncludeDeletedResources field's value. +func (s *ListDiscoveredResourcesInput) SetIncludeDeletedResources(v bool) *ListDiscoveredResourcesInput { + s.IncludeDeletedResources = &v return s } // SetLimit sets the Limit field's value. -func (s *GetResourceConfigHistoryInput) SetLimit(v int64) *GetResourceConfigHistoryInput { +func (s *ListDiscoveredResourcesInput) SetLimit(v int64) *ListDiscoveredResourcesInput { s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetResourceConfigHistoryInput) SetNextToken(v string) *GetResourceConfigHistoryInput { +func (s *ListDiscoveredResourcesInput) SetNextToken(v string) *ListDiscoveredResourcesInput { s.NextToken = &v return s } -// SetResourceId sets the ResourceId field's value. -func (s *GetResourceConfigHistoryInput) SetResourceId(v string) *GetResourceConfigHistoryInput { - s.ResourceId = &v +// SetResourceIds sets the ResourceIds field's value. +func (s *ListDiscoveredResourcesInput) SetResourceIds(v []*string) *ListDiscoveredResourcesInput { + s.ResourceIds = v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *ListDiscoveredResourcesInput) SetResourceName(v string) *ListDiscoveredResourcesInput { + s.ResourceName = &v return s } // SetResourceType sets the ResourceType field's value. -func (s *GetResourceConfigHistoryInput) SetResourceType(v string) *GetResourceConfigHistoryInput { +func (s *ListDiscoveredResourcesInput) SetResourceType(v string) *ListDiscoveredResourcesInput { s.ResourceType = &v return s } -// The output for the GetResourceConfigHistory action. -type GetResourceConfigHistoryOutput struct { +type ListDiscoveredResourcesOutput struct { _ struct{} `type:"structure"` - // A list that contains the configuration history of one or more resources. - ConfigurationItems []*ConfigurationItem `locationName:"configurationItems" type:"list"` - // The string that you use in a subsequent request to get the next page of results // in a paginated response. NextToken *string `locationName:"nextToken" type:"string"` + + // The details that identify a resource that is discovered by AWS Config, including + // the resource type, ID, and (if available) the custom resource name. + ResourceIdentifiers []*ResourceIdentifier `locationName:"resourceIdentifiers" type:"list"` } // String returns the string representation -func (s GetResourceConfigHistoryOutput) String() string { +func (s ListDiscoveredResourcesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetResourceConfigHistoryOutput) GoString() string { +func (s ListDiscoveredResourcesOutput) GoString() string { return s.String() } -// SetConfigurationItems sets the ConfigurationItems field's value. -func (s *GetResourceConfigHistoryOutput) SetConfigurationItems(v []*ConfigurationItem) *GetResourceConfigHistoryOutput { - s.ConfigurationItems = v +// SetNextToken sets the NextToken field's value. +func (s *ListDiscoveredResourcesOutput) SetNextToken(v string) *ListDiscoveredResourcesOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetResourceConfigHistoryOutput) SetNextToken(v string) *GetResourceConfigHistoryOutput { - s.NextToken = &v +// SetResourceIdentifiers sets the ResourceIdentifiers field's value. +func (s *ListDiscoveredResourcesOutput) SetResourceIdentifiers(v []*ResourceIdentifier) *ListDiscoveredResourcesOutput { + s.ResourceIdentifiers = v return s } -// The count of resources that are grouped by the group name. -type GroupedResourceCount struct { +type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // The name of the group that can be region, account ID, or resource type. For - // example, region1, region2 if the region was chosen as GroupByKey. - // - // GroupName is a required field - GroupName *string `min:"1" type:"string" required:"true"` + // The maximum number of tags returned on each page. The limit maximum is 50. + // You cannot specify a number greater than 50. If you specify 0, AWS Config + // uses the default. + Limit *int64 `type:"integer"` - // The number of resources in the group. + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The Amazon Resource Name (ARN) that identifies the resource for which to + // list the tags. Currently, the supported resources are ConfigRule, ConfigurationAggregator + // and AggregatorAuthorization. // - // ResourceCount is a required field - ResourceCount *int64 `type:"long" required:"true"` + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GroupedResourceCount) String() string { +func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GroupedResourceCount) GoString() string { +func (s ListTagsForResourceInput) GoString() string { return s.String() } -// SetGroupName sets the GroupName field's value. -func (s *GroupedResourceCount) SetGroupName(v string) *GroupedResourceCount { - s.GroupName = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetResourceCount sets the ResourceCount field's value. -func (s *GroupedResourceCount) SetResourceCount(v int64) *GroupedResourceCount { - s.ResourceCount = &v +// SetLimit sets the Limit field's value. +func (s *ListTagsForResourceInput) SetLimit(v int64) *ListTagsForResourceInput { + s.Limit = &v return s } -type ListAggregateDiscoveredResourcesInput struct { - _ struct{} `type:"structure"` - - // The name of the configuration aggregator. - // - // ConfigurationAggregatorName is a required field - ConfigurationAggregatorName *string `min:"1" type:"string" required:"true"` +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v + return s +} - // Filters the results based on the ResourceFilters object. - Filters *ResourceFilters `type:"structure"` +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} - // The maximum number of resource identifiers returned on each page. The default - // is 100. You cannot specify a number greater than 100. If you specify 0, AWS - // Config uses the default. - Limit *int64 `type:"integer"` +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` // The nextToken string returned on a previous page that you use to get the // next page of results in a paginated response. NextToken *string `type:"string"` - // The type of resources that you want AWS Config to list in the response. - // - // ResourceType is a required field - ResourceType *string `type:"string" required:"true" enum:"ResourceType"` + // The tags for the resource. + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation -func (s ListAggregateDiscoveredResourcesInput) String() string { +func (s ListTagsForResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListAggregateDiscoveredResourcesInput) GoString() string { +func (s ListTagsForResourceOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAggregateDiscoveredResourcesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAggregateDiscoveredResourcesInput"} - if s.ConfigurationAggregatorName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigurationAggregatorName")) - } - if s.ConfigurationAggregatorName != nil && len(*s.ConfigurationAggregatorName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationAggregatorName", 1)) - } - if s.ResourceType == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceType")) - } - if s.Filters != nil { - if err := s.Filters.Validate(); err != nil { - invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) - } - } +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// Organization config rule creation or deletion status in each member account. +// This includes the name of the rule, the status, error code and error message +// when the rule creation or deletion failed. +type MemberAccountStatus struct { + _ struct{} `type:"structure"` + + // The 12-digit account ID of a member account. + // + // AccountId is a required field + AccountId *string `type:"string" required:"true"` + + // The name of config rule deployed in the member account. + // + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` + + // An error code that is returned when config rule creation or deletion failed + // in the member account. + ErrorCode *string `type:"string"` + + // An error message indicating that config rule account creation or deletion + // has failed due to an error in the member account. + ErrorMessage *string `type:"string"` + + // The timestamp of the last status update. + LastUpdateTime *time.Time `type:"timestamp"` + + // Indicates deployment status for config rule in the member account. When master + // account calls PutOrganizationConfigRule action for the first time, config + // rule status is created in the member account. When master account calls PutOrganizationConfigRule + // action for the second time, config rule status is updated in the member account. + // Config rule status is deleted when the master account deletes OrganizationConfigRule + // and disables service access for config-multiaccountsetup.amazonaws.com. + // + // AWS Config sets the state of the rule to: + // + // * CREATE_SUCCESSFUL when config rule has been created in the member account. + // + // * CREATE_IN_PROGRESS when config rule is being created in the member account. + // + // * CREATE_FAILED when config rule creation has failed in the member account. + // + // * DELETE_FAILED when config rule deletion has failed in the member account. + // + // * DELETE_IN_PROGRESS when config rule is being deleted in the member account. + // + // * DELETE_SUCCESSFUL when config rule has been deleted in the member account. + // + // * UPDATE_SUCCESSFUL when config rule has been updated in the member account. + // + // * UPDATE_IN_PROGRESS when config rule is being updated in the member account. + // + // * UPDATE_FAILED when config rule deletion has failed in the member account. + // + // MemberAccountRuleStatus is a required field + MemberAccountRuleStatus *string `type:"string" required:"true" enum:"MemberAccountRuleStatus"` +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// String returns the string representation +func (s MemberAccountStatus) String() string { + return awsutil.Prettify(s) } -// SetConfigurationAggregatorName sets the ConfigurationAggregatorName field's value. -func (s *ListAggregateDiscoveredResourcesInput) SetConfigurationAggregatorName(v string) *ListAggregateDiscoveredResourcesInput { - s.ConfigurationAggregatorName = &v +// GoString returns the string representation +func (s MemberAccountStatus) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *MemberAccountStatus) SetAccountId(v string) *MemberAccountStatus { + s.AccountId = &v return s } -// SetFilters sets the Filters field's value. -func (s *ListAggregateDiscoveredResourcesInput) SetFilters(v *ResourceFilters) *ListAggregateDiscoveredResourcesInput { - s.Filters = v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *MemberAccountStatus) SetConfigRuleName(v string) *MemberAccountStatus { + s.ConfigRuleName = &v return s } -// SetLimit sets the Limit field's value. -func (s *ListAggregateDiscoveredResourcesInput) SetLimit(v int64) *ListAggregateDiscoveredResourcesInput { - s.Limit = &v +// SetErrorCode sets the ErrorCode field's value. +func (s *MemberAccountStatus) SetErrorCode(v string) *MemberAccountStatus { + s.ErrorCode = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListAggregateDiscoveredResourcesInput) SetNextToken(v string) *ListAggregateDiscoveredResourcesInput { - s.NextToken = &v +// SetErrorMessage sets the ErrorMessage field's value. +func (s *MemberAccountStatus) SetErrorMessage(v string) *MemberAccountStatus { + s.ErrorMessage = &v return s } -// SetResourceType sets the ResourceType field's value. -func (s *ListAggregateDiscoveredResourcesInput) SetResourceType(v string) *ListAggregateDiscoveredResourcesInput { - s.ResourceType = &v +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *MemberAccountStatus) SetLastUpdateTime(v time.Time) *MemberAccountStatus { + s.LastUpdateTime = &v return s } -type ListAggregateDiscoveredResourcesOutput struct { +// SetMemberAccountRuleStatus sets the MemberAccountRuleStatus field's value. +func (s *MemberAccountStatus) SetMemberAccountRuleStatus(v string) *MemberAccountStatus { + s.MemberAccountRuleStatus = &v + return s +} + +// This object contains regions to set up the aggregator and an IAM role to +// retrieve organization details. +type OrganizationAggregationSource struct { _ struct{} `type:"structure"` - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` + // If true, aggregate existing AWS Config regions and future regions. + AllAwsRegions *bool `type:"boolean"` - // Returns a list of ResourceIdentifiers objects. - ResourceIdentifiers []*AggregateResourceIdentifier `type:"list"` + // The source regions being aggregated. + AwsRegions []*string `min:"1" type:"list"` + + // ARN of the IAM role used to retrieve AWS Organization details associated + // with the aggregator account. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` } // String returns the string representation -func (s ListAggregateDiscoveredResourcesOutput) String() string { +func (s OrganizationAggregationSource) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListAggregateDiscoveredResourcesOutput) GoString() string { +func (s OrganizationAggregationSource) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListAggregateDiscoveredResourcesOutput) SetNextToken(v string) *ListAggregateDiscoveredResourcesOutput { - s.NextToken = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *OrganizationAggregationSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OrganizationAggregationSource"} + if s.AwsRegions != nil && len(s.AwsRegions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsRegions", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllAwsRegions sets the AllAwsRegions field's value. +func (s *OrganizationAggregationSource) SetAllAwsRegions(v bool) *OrganizationAggregationSource { + s.AllAwsRegions = &v return s } -// SetResourceIdentifiers sets the ResourceIdentifiers field's value. -func (s *ListAggregateDiscoveredResourcesOutput) SetResourceIdentifiers(v []*AggregateResourceIdentifier) *ListAggregateDiscoveredResourcesOutput { - s.ResourceIdentifiers = v +// SetAwsRegions sets the AwsRegions field's value. +func (s *OrganizationAggregationSource) SetAwsRegions(v []*string) *OrganizationAggregationSource { + s.AwsRegions = v return s } -type ListDiscoveredResourcesInput struct { +// SetRoleArn sets the RoleArn field's value. +func (s *OrganizationAggregationSource) SetRoleArn(v string) *OrganizationAggregationSource { + s.RoleArn = &v + return s +} + +// An organization config rule that has information about config rules that +// AWS Config creates in member accounts. +type OrganizationConfigRule struct { _ struct{} `type:"structure"` - // Specifies whether AWS Config includes deleted resources in the results. By - // default, deleted resources are not included. - IncludeDeletedResources *bool `locationName:"includeDeletedResources" type:"boolean"` + // A comma-separated list of accounts excluded from organization config rule. + ExcludedAccounts []*string `type:"list"` - // The maximum number of resource identifiers returned on each page. The default - // is 100. You cannot specify a number greater than 100. If you specify 0, AWS - // Config uses the default. - Limit *int64 `locationName:"limit" type:"integer"` + // The timestamp of the last update. + LastUpdateTime *time.Time `type:"timestamp"` - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `locationName:"nextToken" type:"string"` + // The Amazon Resource Name (ARN) of organization config rule. + // + // OrganizationConfigRuleArn is a required field + OrganizationConfigRuleArn *string `min:"1" type:"string" required:"true"` - // The IDs of only those resources that you want AWS Config to list in the response. - // If you do not specify this parameter, AWS Config lists all resources of the - // specified type that it has discovered. - ResourceIds []*string `locationName:"resourceIds" type:"list"` + // The name that you assign to organization config rule. + // + // OrganizationConfigRuleName is a required field + OrganizationConfigRuleName *string `min:"1" type:"string" required:"true"` - // The custom name of only those resources that you want AWS Config to list - // in the response. If you do not specify this parameter, AWS Config lists all - // resources of the specified type that it has discovered. - ResourceName *string `locationName:"resourceName" type:"string"` + // An OrganizationCustomRuleMetadata object. + OrganizationCustomRuleMetadata *OrganizationCustomRuleMetadata `type:"structure"` - // The type of resources that you want AWS Config to list in the response. - // - // ResourceType is a required field - ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` + // An OrganizationManagedRuleMetadata object. + OrganizationManagedRuleMetadata *OrganizationManagedRuleMetadata `type:"structure"` } // String returns the string representation -func (s ListDiscoveredResourcesInput) String() string { +func (s OrganizationConfigRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDiscoveredResourcesInput) GoString() string { +func (s OrganizationConfigRule) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListDiscoveredResourcesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDiscoveredResourcesInput"} - if s.ResourceType == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIncludeDeletedResources sets the IncludeDeletedResources field's value. -func (s *ListDiscoveredResourcesInput) SetIncludeDeletedResources(v bool) *ListDiscoveredResourcesInput { - s.IncludeDeletedResources = &v +// SetExcludedAccounts sets the ExcludedAccounts field's value. +func (s *OrganizationConfigRule) SetExcludedAccounts(v []*string) *OrganizationConfigRule { + s.ExcludedAccounts = v return s } -// SetLimit sets the Limit field's value. -func (s *ListDiscoveredResourcesInput) SetLimit(v int64) *ListDiscoveredResourcesInput { - s.Limit = &v +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *OrganizationConfigRule) SetLastUpdateTime(v time.Time) *OrganizationConfigRule { + s.LastUpdateTime = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListDiscoveredResourcesInput) SetNextToken(v string) *ListDiscoveredResourcesInput { - s.NextToken = &v +// SetOrganizationConfigRuleArn sets the OrganizationConfigRuleArn field's value. +func (s *OrganizationConfigRule) SetOrganizationConfigRuleArn(v string) *OrganizationConfigRule { + s.OrganizationConfigRuleArn = &v return s } -// SetResourceIds sets the ResourceIds field's value. -func (s *ListDiscoveredResourcesInput) SetResourceIds(v []*string) *ListDiscoveredResourcesInput { - s.ResourceIds = v +// SetOrganizationConfigRuleName sets the OrganizationConfigRuleName field's value. +func (s *OrganizationConfigRule) SetOrganizationConfigRuleName(v string) *OrganizationConfigRule { + s.OrganizationConfigRuleName = &v return s } -// SetResourceName sets the ResourceName field's value. -func (s *ListDiscoveredResourcesInput) SetResourceName(v string) *ListDiscoveredResourcesInput { - s.ResourceName = &v +// SetOrganizationCustomRuleMetadata sets the OrganizationCustomRuleMetadata field's value. +func (s *OrganizationConfigRule) SetOrganizationCustomRuleMetadata(v *OrganizationCustomRuleMetadata) *OrganizationConfigRule { + s.OrganizationCustomRuleMetadata = v return s } -// SetResourceType sets the ResourceType field's value. -func (s *ListDiscoveredResourcesInput) SetResourceType(v string) *ListDiscoveredResourcesInput { - s.ResourceType = &v +// SetOrganizationManagedRuleMetadata sets the OrganizationManagedRuleMetadata field's value. +func (s *OrganizationConfigRule) SetOrganizationManagedRuleMetadata(v *OrganizationManagedRuleMetadata) *OrganizationConfigRule { + s.OrganizationManagedRuleMetadata = v return s } -type ListDiscoveredResourcesOutput struct { +// Returns the status for an organization config rule in an organization. +type OrganizationConfigRuleStatus struct { _ struct{} `type:"structure"` - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. - NextToken *string `locationName:"nextToken" type:"string"` + // An error code that is returned when organization config rule creation or + // deletion has failed. + ErrorCode *string `type:"string"` - // The details that identify a resource that is discovered by AWS Config, including - // the resource type, ID, and (if available) the custom resource name. - ResourceIdentifiers []*ResourceIdentifier `locationName:"resourceIdentifiers" type:"list"` + // An error message indicating that organization config rule creation or deletion + // failed due to an error. + ErrorMessage *string `type:"string"` + + // The timestamp of the last update. + LastUpdateTime *time.Time `type:"timestamp"` + + // The name that you assign to organization config rule. + // + // OrganizationConfigRuleName is a required field + OrganizationConfigRuleName *string `min:"1" type:"string" required:"true"` + + // Indicates deployment status of an organization config rule. When master account + // calls PutOrganizationConfigRule action for the first time, config rule status + // is created in all the member accounts. When master account calls PutOrganizationConfigRule + // action for the second time, config rule status is updated in all the member + // accounts. Additionally, config rule status is updated when one or more member + // accounts join or leave an organization. Config rule status is deleted when + // the master account deletes OrganizationConfigRule in all the member accounts + // and disables service access for config-multiaccountsetup.amazonaws.com. + // + // AWS Config sets the state of the rule to: + // + // * CREATE_SUCCESSFUL when an organization config rule has been successfully + // created in all the member accounts. + // + // * CREATE_IN_PROGRESS when an organization config rule creation is in progress. + // + // * CREATE_FAILED when an organization config rule creation failed in one + // or more member accounts within that organization. + // + // * DELETE_FAILED when an organization config rule deletion failed in one + // or more member accounts within that organization. + // + // * DELETE_IN_PROGRESS when an organization config rule deletion is in progress. + // + // * DELETE_SUCCESSFUL when an organization config rule has been successfully + // deleted from all the member accounts. + // + // * UPDATE_SUCCESSFUL when an organization config rule has been successfully + // updated in all the member accounts. + // + // * UPDATE_IN_PROGRESS when an organization config rule update is in progress. + // + // * UPDATE_FAILED when an organization config rule update failed in one + // or more member accounts within that organization. + // + // OrganizationRuleStatus is a required field + OrganizationRuleStatus *string `type:"string" required:"true" enum:"OrganizationRuleStatus"` } // String returns the string representation -func (s ListDiscoveredResourcesOutput) String() string { +func (s OrganizationConfigRuleStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDiscoveredResourcesOutput) GoString() string { +func (s OrganizationConfigRuleStatus) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListDiscoveredResourcesOutput) SetNextToken(v string) *ListDiscoveredResourcesOutput { - s.NextToken = &v +// SetErrorCode sets the ErrorCode field's value. +func (s *OrganizationConfigRuleStatus) SetErrorCode(v string) *OrganizationConfigRuleStatus { + s.ErrorCode = &v return s } -// SetResourceIdentifiers sets the ResourceIdentifiers field's value. -func (s *ListDiscoveredResourcesOutput) SetResourceIdentifiers(v []*ResourceIdentifier) *ListDiscoveredResourcesOutput { - s.ResourceIdentifiers = v +// SetErrorMessage sets the ErrorMessage field's value. +func (s *OrganizationConfigRuleStatus) SetErrorMessage(v string) *OrganizationConfigRuleStatus { + s.ErrorMessage = &v return s } -type ListTagsForResourceInput struct { +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *OrganizationConfigRuleStatus) SetLastUpdateTime(v time.Time) *OrganizationConfigRuleStatus { + s.LastUpdateTime = &v + return s +} + +// SetOrganizationConfigRuleName sets the OrganizationConfigRuleName field's value. +func (s *OrganizationConfigRuleStatus) SetOrganizationConfigRuleName(v string) *OrganizationConfigRuleStatus { + s.OrganizationConfigRuleName = &v + return s +} + +// SetOrganizationRuleStatus sets the OrganizationRuleStatus field's value. +func (s *OrganizationConfigRuleStatus) SetOrganizationRuleStatus(v string) *OrganizationConfigRuleStatus { + s.OrganizationRuleStatus = &v + return s +} + +// An object that specifies organization custom rule metadata such as resource +// type, resource ID of AWS resource, Lamdba function ARN, and organization +// trigger types that trigger AWS Config to evaluate your AWS resources against +// a rule. It also provides the frequency with which you want AWS Config to +// run evaluations for the rule if the trigger type is periodic. +type OrganizationCustomRuleMetadata struct { _ struct{} `type:"structure"` - // The maximum number of tags returned on each page. The limit maximum is 50. - // You cannot specify a number greater than 50. If you specify 0, AWS Config - // uses the default. - Limit *int64 `type:"integer"` + // The description that you provide for organization config rule. + Description *string `type:"string"` - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` + // A string, in JSON format, that is passed to organization config rule Lambda + // function. + InputParameters *string `min:"1" type:"string"` - // The Amazon Resource Name (ARN) that identifies the resource for which to - // list the tags. Currently, the supported resources are ConfigRule, ConfigurationAggregator - // and AggregatorAuthorization. + // The lambda function ARN. // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `min:"1" type:"string" required:"true"` + + // The maximum frequency with which AWS Config runs evaluations for a rule. + // Your custom rule is triggered when AWS Config delivers the configuration + // snapshot. For more information, see ConfigSnapshotDeliveryProperties. + // + // By default, rules with a periodic trigger are evaluated every 24 hours. To + // change the frequency, specify a valid value for the MaximumExecutionFrequency + // parameter. + MaximumExecutionFrequency *string `type:"string" enum:"MaximumExecutionFrequency"` + + // The type of notification that triggers AWS Config to run an evaluation for + // a rule. You can specify the following notification types: + // + // * ConfigurationItemChangeNotification - Triggers an evaluation when AWS + // Config delivers a configuration item as a result of a resource change. + // + // * OversizedConfigurationItemChangeNotification - Triggers an evaluation + // when AWS Config delivers an oversized configuration item. AWS Config may + // generate this notification type when a resource changes and the notification + // exceeds the maximum size allowed by Amazon SNS. + // + // * ScheduledNotification - Triggers a periodic evaluation at the frequency + // specified for MaximumExecutionFrequency. + // + // OrganizationConfigRuleTriggerTypes is a required field + OrganizationConfigRuleTriggerTypes []*string `type:"list" required:"true"` + + // The ID of the AWS resource that was evaluated. + ResourceIdScope *string `min:"1" type:"string"` + + // The type of the AWS resource that was evaluated. + ResourceTypesScope []*string `type:"list"` + + // One part of a key-value pair that make up a tag. A key is a general label + // that acts like a category for more specific tag values. + TagKeyScope *string `min:"1" type:"string"` + + // The optional part of a key-value pair that make up a tag. A value acts as + // a descriptor within a tag category (key). + TagValueScope *string `min:"1" type:"string"` } // String returns the string representation -func (s ListTagsForResourceInput) String() string { +func (s OrganizationCustomRuleMetadata) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceInput) GoString() string { +func (s OrganizationCustomRuleMetadata) GoString() string { return s.String() } - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OrganizationCustomRuleMetadata) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OrganizationCustomRuleMetadata"} + if s.InputParameters != nil && len(*s.InputParameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputParameters", 1)) } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + if s.LambdaFunctionArn != nil && len(*s.LambdaFunctionArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LambdaFunctionArn", 1)) + } + if s.OrganizationConfigRuleTriggerTypes == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationConfigRuleTriggerTypes")) + } + if s.ResourceIdScope != nil && len(*s.ResourceIdScope) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIdScope", 1)) + } + if s.TagKeyScope != nil && len(*s.TagKeyScope) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeyScope", 1)) + } + if s.TagValueScope != nil && len(*s.TagValueScope) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagValueScope", 1)) } if invalidParams.Len() > 0 { @@ -11062,93 +13167,134 @@ func (s *ListTagsForResourceInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListTagsForResourceInput) SetLimit(v int64) *ListTagsForResourceInput { - s.Limit = &v +// SetDescription sets the Description field's value. +func (s *OrganizationCustomRuleMetadata) SetDescription(v string) *OrganizationCustomRuleMetadata { + s.Description = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { - s.NextToken = &v +// SetInputParameters sets the InputParameters field's value. +func (s *OrganizationCustomRuleMetadata) SetInputParameters(v string) *OrganizationCustomRuleMetadata { + s.InputParameters = &v return s } -// SetResourceArn sets the ResourceArn field's value. -func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { - s.ResourceArn = &v +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *OrganizationCustomRuleMetadata) SetLambdaFunctionArn(v string) *OrganizationCustomRuleMetadata { + s.LambdaFunctionArn = &v return s } -type ListTagsForResourceOutput struct { - _ struct{} `type:"structure"` - - // The nextToken string returned on a previous page that you use to get the - // next page of results in a paginated response. - NextToken *string `type:"string"` +// SetMaximumExecutionFrequency sets the MaximumExecutionFrequency field's value. +func (s *OrganizationCustomRuleMetadata) SetMaximumExecutionFrequency(v string) *OrganizationCustomRuleMetadata { + s.MaximumExecutionFrequency = &v + return s +} - // The tags for the resource. - Tags []*Tag `min:"1" type:"list"` +// SetOrganizationConfigRuleTriggerTypes sets the OrganizationConfigRuleTriggerTypes field's value. +func (s *OrganizationCustomRuleMetadata) SetOrganizationConfigRuleTriggerTypes(v []*string) *OrganizationCustomRuleMetadata { + s.OrganizationConfigRuleTriggerTypes = v + return s } -// String returns the string representation -func (s ListTagsForResourceOutput) String() string { - return awsutil.Prettify(s) +// SetResourceIdScope sets the ResourceIdScope field's value. +func (s *OrganizationCustomRuleMetadata) SetResourceIdScope(v string) *OrganizationCustomRuleMetadata { + s.ResourceIdScope = &v + return s } -// GoString returns the string representation -func (s ListTagsForResourceOutput) GoString() string { - return s.String() +// SetResourceTypesScope sets the ResourceTypesScope field's value. +func (s *OrganizationCustomRuleMetadata) SetResourceTypesScope(v []*string) *OrganizationCustomRuleMetadata { + s.ResourceTypesScope = v + return s } -// SetNextToken sets the NextToken field's value. -func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { - s.NextToken = &v +// SetTagKeyScope sets the TagKeyScope field's value. +func (s *OrganizationCustomRuleMetadata) SetTagKeyScope(v string) *OrganizationCustomRuleMetadata { + s.TagKeyScope = &v return s } -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { - s.Tags = v +// SetTagValueScope sets the TagValueScope field's value. +func (s *OrganizationCustomRuleMetadata) SetTagValueScope(v string) *OrganizationCustomRuleMetadata { + s.TagValueScope = &v return s } -// This object contains regions to set up the aggregator and an IAM role to -// retrieve organization details. -type OrganizationAggregationSource struct { +// An object that specifies organization managed rule metadata such as resource +// type and ID of AWS resource along with the rule identifier. It also provides +// the frequency with which you want AWS Config to run evaluations for the rule +// if the trigger type is periodic. +type OrganizationManagedRuleMetadata struct { _ struct{} `type:"structure"` - // If true, aggregate existing AWS Config regions and future regions. - AllAwsRegions *bool `type:"boolean"` + // The description that you provide for organization config rule. + Description *string `type:"string"` - // The source regions being aggregated. - AwsRegions []*string `min:"1" type:"list"` + // A string, in JSON format, that is passed to organization config rule Lambda + // function. + InputParameters *string `min:"1" type:"string"` - // ARN of the IAM role used to retrieve AWS Organization details associated - // with the aggregator account. + // The maximum frequency with which AWS Config runs evaluations for a rule. + // You are using an AWS managed rule that is triggered at a periodic frequency. // - // RoleArn is a required field - RoleArn *string `type:"string" required:"true"` + // By default, rules with a periodic trigger are evaluated every 24 hours. To + // change the frequency, specify a valid value for the MaximumExecutionFrequency + // parameter. + MaximumExecutionFrequency *string `type:"string" enum:"MaximumExecutionFrequency"` + + // The ID of the AWS resource that was evaluated. + ResourceIdScope *string `min:"1" type:"string"` + + // The type of the AWS resource that was evaluated. + ResourceTypesScope []*string `type:"list"` + + // For organization config managed rules, a predefined identifier from a list. + // For example, IAM_PASSWORD_POLICY is a managed rule. To reference a managed + // rule, see Using AWS Managed Config Rules (https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). + // + // RuleIdentifier is a required field + RuleIdentifier *string `min:"1" type:"string" required:"true"` + + // One part of a key-value pair that make up a tag. A key is a general label + // that acts like a category for more specific tag values. + TagKeyScope *string `min:"1" type:"string"` + + // The optional part of a key-value pair that make up a tag. A value acts as + // a descriptor within a tag category (key). + TagValueScope *string `min:"1" type:"string"` } // String returns the string representation -func (s OrganizationAggregationSource) String() string { +func (s OrganizationManagedRuleMetadata) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OrganizationAggregationSource) GoString() string { +func (s OrganizationManagedRuleMetadata) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *OrganizationAggregationSource) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OrganizationAggregationSource"} - if s.AwsRegions != nil && len(s.AwsRegions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AwsRegions", 1)) +func (s *OrganizationManagedRuleMetadata) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OrganizationManagedRuleMetadata"} + if s.InputParameters != nil && len(*s.InputParameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputParameters", 1)) } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) + if s.ResourceIdScope != nil && len(*s.ResourceIdScope) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIdScope", 1)) + } + if s.RuleIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("RuleIdentifier")) + } + if s.RuleIdentifier != nil && len(*s.RuleIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleIdentifier", 1)) + } + if s.TagKeyScope != nil && len(*s.TagKeyScope) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeyScope", 1)) + } + if s.TagValueScope != nil && len(*s.TagValueScope) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagValueScope", 1)) } if invalidParams.Len() > 0 { @@ -11157,21 +13303,51 @@ func (s *OrganizationAggregationSource) Validate() error { return nil } -// SetAllAwsRegions sets the AllAwsRegions field's value. -func (s *OrganizationAggregationSource) SetAllAwsRegions(v bool) *OrganizationAggregationSource { - s.AllAwsRegions = &v +// SetDescription sets the Description field's value. +func (s *OrganizationManagedRuleMetadata) SetDescription(v string) *OrganizationManagedRuleMetadata { + s.Description = &v return s } -// SetAwsRegions sets the AwsRegions field's value. -func (s *OrganizationAggregationSource) SetAwsRegions(v []*string) *OrganizationAggregationSource { - s.AwsRegions = v +// SetInputParameters sets the InputParameters field's value. +func (s *OrganizationManagedRuleMetadata) SetInputParameters(v string) *OrganizationManagedRuleMetadata { + s.InputParameters = &v return s } -// SetRoleArn sets the RoleArn field's value. -func (s *OrganizationAggregationSource) SetRoleArn(v string) *OrganizationAggregationSource { - s.RoleArn = &v +// SetMaximumExecutionFrequency sets the MaximumExecutionFrequency field's value. +func (s *OrganizationManagedRuleMetadata) SetMaximumExecutionFrequency(v string) *OrganizationManagedRuleMetadata { + s.MaximumExecutionFrequency = &v + return s +} + +// SetResourceIdScope sets the ResourceIdScope field's value. +func (s *OrganizationManagedRuleMetadata) SetResourceIdScope(v string) *OrganizationManagedRuleMetadata { + s.ResourceIdScope = &v + return s +} + +// SetResourceTypesScope sets the ResourceTypesScope field's value. +func (s *OrganizationManagedRuleMetadata) SetResourceTypesScope(v []*string) *OrganizationManagedRuleMetadata { + s.ResourceTypesScope = v + return s +} + +// SetRuleIdentifier sets the RuleIdentifier field's value. +func (s *OrganizationManagedRuleMetadata) SetRuleIdentifier(v string) *OrganizationManagedRuleMetadata { + s.RuleIdentifier = &v + return s +} + +// SetTagKeyScope sets the TagKeyScope field's value. +func (s *OrganizationManagedRuleMetadata) SetTagKeyScope(v string) *OrganizationManagedRuleMetadata { + s.TagKeyScope = &v + return s +} + +// SetTagValueScope sets the TagValueScope field's value. +func (s *OrganizationManagedRuleMetadata) SetTagValueScope(v string) *OrganizationManagedRuleMetadata { + s.TagValueScope = &v return s } @@ -11222,6 +13398,7 @@ type PutAggregationAuthorizationInput struct { // AuthorizedAwsRegion is a required field AuthorizedAwsRegion *string `min:"1" type:"string" required:"true"` + // An array of tag object. Tags []*Tag `type:"list"` } @@ -11313,6 +13490,7 @@ type PutConfigRuleInput struct { // ConfigRule is a required field ConfigRule *ConfigRule `type:"structure" required:"true"` + // An array of tag object. Tags []*Tag `type:"list"` } @@ -11394,6 +13572,7 @@ type PutConfigurationAggregatorInput struct { // An OrganizationAggregationSource object. OrganizationAggregationSource *OrganizationAggregationSource `type:"structure"` + // An array of tag object. Tags []*Tag `type:"list"` } @@ -11705,44 +13884,243 @@ func (s PutEvaluationsOutput) GoString() string { return s.String() } -// SetFailedEvaluations sets the FailedEvaluations field's value. -func (s *PutEvaluationsOutput) SetFailedEvaluations(v []*Evaluation) *PutEvaluationsOutput { - s.FailedEvaluations = v +// SetFailedEvaluations sets the FailedEvaluations field's value. +func (s *PutEvaluationsOutput) SetFailedEvaluations(v []*Evaluation) *PutEvaluationsOutput { + s.FailedEvaluations = v + return s +} + +type PutOrganizationConfigRuleInput struct { + _ struct{} `type:"structure"` + + // A comma-separated list of accounts that you want to exclude from an organization + // config rule. + ExcludedAccounts []*string `type:"list"` + + // The name that you assign to an organization config rule. + // + // OrganizationConfigRuleName is a required field + OrganizationConfigRuleName *string `min:"1" type:"string" required:"true"` + + // An OrganizationCustomRuleMetadata object. + OrganizationCustomRuleMetadata *OrganizationCustomRuleMetadata `type:"structure"` + + // An OrganizationManagedRuleMetadata object. + OrganizationManagedRuleMetadata *OrganizationManagedRuleMetadata `type:"structure"` +} + +// String returns the string representation +func (s PutOrganizationConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutOrganizationConfigRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutOrganizationConfigRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutOrganizationConfigRuleInput"} + if s.OrganizationConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationConfigRuleName")) + } + if s.OrganizationConfigRuleName != nil && len(*s.OrganizationConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationConfigRuleName", 1)) + } + if s.OrganizationCustomRuleMetadata != nil { + if err := s.OrganizationCustomRuleMetadata.Validate(); err != nil { + invalidParams.AddNested("OrganizationCustomRuleMetadata", err.(request.ErrInvalidParams)) + } + } + if s.OrganizationManagedRuleMetadata != nil { + if err := s.OrganizationManagedRuleMetadata.Validate(); err != nil { + invalidParams.AddNested("OrganizationManagedRuleMetadata", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExcludedAccounts sets the ExcludedAccounts field's value. +func (s *PutOrganizationConfigRuleInput) SetExcludedAccounts(v []*string) *PutOrganizationConfigRuleInput { + s.ExcludedAccounts = v + return s +} + +// SetOrganizationConfigRuleName sets the OrganizationConfigRuleName field's value. +func (s *PutOrganizationConfigRuleInput) SetOrganizationConfigRuleName(v string) *PutOrganizationConfigRuleInput { + s.OrganizationConfigRuleName = &v + return s +} + +// SetOrganizationCustomRuleMetadata sets the OrganizationCustomRuleMetadata field's value. +func (s *PutOrganizationConfigRuleInput) SetOrganizationCustomRuleMetadata(v *OrganizationCustomRuleMetadata) *PutOrganizationConfigRuleInput { + s.OrganizationCustomRuleMetadata = v + return s +} + +// SetOrganizationManagedRuleMetadata sets the OrganizationManagedRuleMetadata field's value. +func (s *PutOrganizationConfigRuleInput) SetOrganizationManagedRuleMetadata(v *OrganizationManagedRuleMetadata) *PutOrganizationConfigRuleInput { + s.OrganizationManagedRuleMetadata = v + return s +} + +type PutOrganizationConfigRuleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of an organization config rule. + OrganizationConfigRuleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutOrganizationConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutOrganizationConfigRuleOutput) GoString() string { + return s.String() +} + +// SetOrganizationConfigRuleArn sets the OrganizationConfigRuleArn field's value. +func (s *PutOrganizationConfigRuleOutput) SetOrganizationConfigRuleArn(v string) *PutOrganizationConfigRuleOutput { + s.OrganizationConfigRuleArn = &v + return s +} + +type PutRemediationConfigurationsInput struct { + _ struct{} `type:"structure"` + + // A list of remediation configuration objects. + // + // RemediationConfigurations is a required field + RemediationConfigurations []*RemediationConfiguration `type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRemediationConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRemediationConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRemediationConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRemediationConfigurationsInput"} + if s.RemediationConfigurations == nil { + invalidParams.Add(request.NewErrParamRequired("RemediationConfigurations")) + } + if s.RemediationConfigurations != nil { + for i, v := range s.RemediationConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RemediationConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRemediationConfigurations sets the RemediationConfigurations field's value. +func (s *PutRemediationConfigurationsInput) SetRemediationConfigurations(v []*RemediationConfiguration) *PutRemediationConfigurationsInput { + s.RemediationConfigurations = v + return s +} + +type PutRemediationConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // Returns a list of failed remediation batch objects. + FailedBatches []*FailedRemediationBatch `type:"list"` +} + +// String returns the string representation +func (s PutRemediationConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRemediationConfigurationsOutput) GoString() string { + return s.String() +} + +// SetFailedBatches sets the FailedBatches field's value. +func (s *PutRemediationConfigurationsOutput) SetFailedBatches(v []*FailedRemediationBatch) *PutRemediationConfigurationsOutput { + s.FailedBatches = v return s } -type PutRemediationConfigurationsInput struct { +type PutRemediationExceptionsInput struct { _ struct{} `type:"structure"` - // A list of remediation configuration objects. + // The name of the AWS Config rule for which you want to create remediation + // exception. // - // RemediationConfigurations is a required field - RemediationConfigurations []*RemediationConfiguration `type:"list" required:"true"` + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` + + // The exception is automatically deleted after the expiration date. + ExpirationTime *time.Time `type:"timestamp"` + + // The message contains an explanation of the exception. + Message *string `min:"1" type:"string"` + + // An exception list of resource exception keys to be processed with the current + // request. AWS Config adds exception for each resource key. For example, AWS + // Config adds 3 exceptions for 3 resource keys. + // + // ResourceKeys is a required field + ResourceKeys []*RemediationExceptionResourceKey `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s PutRemediationConfigurationsInput) String() string { +func (s PutRemediationExceptionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutRemediationConfigurationsInput) GoString() string { +func (s PutRemediationExceptionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutRemediationConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutRemediationConfigurationsInput"} - if s.RemediationConfigurations == nil { - invalidParams.Add(request.NewErrParamRequired("RemediationConfigurations")) +func (s *PutRemediationExceptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRemediationExceptionsInput"} + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) } - if s.RemediationConfigurations != nil { - for i, v := range s.RemediationConfigurations { + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + if s.Message != nil && len(*s.Message) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Message", 1)) + } + if s.ResourceKeys == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceKeys")) + } + if s.ResourceKeys != nil && len(s.ResourceKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceKeys", 1)) + } + if s.ResourceKeys != nil { + for i, v := range s.ResourceKeys { if v == nil { continue } if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RemediationConfigurations", i), err.(request.ErrInvalidParams)) + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceKeys", i), err.(request.ErrInvalidParams)) } } } @@ -11753,31 +14131,50 @@ func (s *PutRemediationConfigurationsInput) Validate() error { return nil } -// SetRemediationConfigurations sets the RemediationConfigurations field's value. -func (s *PutRemediationConfigurationsInput) SetRemediationConfigurations(v []*RemediationConfiguration) *PutRemediationConfigurationsInput { - s.RemediationConfigurations = v +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *PutRemediationExceptionsInput) SetConfigRuleName(v string) *PutRemediationExceptionsInput { + s.ConfigRuleName = &v return s } -type PutRemediationConfigurationsOutput struct { +// SetExpirationTime sets the ExpirationTime field's value. +func (s *PutRemediationExceptionsInput) SetExpirationTime(v time.Time) *PutRemediationExceptionsInput { + s.ExpirationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *PutRemediationExceptionsInput) SetMessage(v string) *PutRemediationExceptionsInput { + s.Message = &v + return s +} + +// SetResourceKeys sets the ResourceKeys field's value. +func (s *PutRemediationExceptionsInput) SetResourceKeys(v []*RemediationExceptionResourceKey) *PutRemediationExceptionsInput { + s.ResourceKeys = v + return s +} + +type PutRemediationExceptionsOutput struct { _ struct{} `type:"structure"` - // Returns a list of failed remediation batch objects. - FailedBatches []*FailedRemediationBatch `type:"list"` + // Returns a list of failed remediation exceptions batch objects. Each object + // in the batch consists of a list of failed items and failure messages. + FailedBatches []*FailedRemediationExceptionBatch `type:"list"` } // String returns the string representation -func (s PutRemediationConfigurationsOutput) String() string { +func (s PutRemediationExceptionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutRemediationConfigurationsOutput) GoString() string { +func (s PutRemediationExceptionsOutput) GoString() string { return s.String() } // SetFailedBatches sets the FailedBatches field's value. -func (s *PutRemediationConfigurationsOutput) SetFailedBatches(v []*FailedRemediationBatch) *PutRemediationConfigurationsOutput { +func (s *PutRemediationExceptionsOutput) SetFailedBatches(v []*FailedRemediationExceptionBatch) *PutRemediationExceptionsOutput { s.FailedBatches = v return s } @@ -12018,69 +14415,302 @@ func (s *Relationship) SetResourceName(v string) *Relationship { } // SetResourceType sets the ResourceType field's value. -func (s *Relationship) SetResourceType(v string) *Relationship { +func (s *Relationship) SetResourceType(v string) *Relationship { + s.ResourceType = &v + return s +} + +// An object that represents the details about the remediation configuration +// that includes the remediation action, parameters, and data to execute the +// action. +type RemediationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of remediation configuration. + Arn *string `min:"1" type:"string"` + + // The remediation is triggered automatically. + Automatic *bool `type:"boolean"` + + // The name of the AWS Config rule. + // + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` + + // Name of the service that owns the service linked rule, if applicable. + CreatedByService *string `min:"1" type:"string"` + + // An ExecutionControls object. + ExecutionControls *ExecutionControls `type:"structure"` + + // The maximum number of failed attempts for auto-remediation. If you do not + // select a number, the default is 5. + // + // For example, if you specify MaximumAutomaticAttempts as 5 with RetryAttemptsSeconds + // as 50 seconds, AWS Config throws an exception after the 5th failed attempt + // within 50 seconds. + MaximumAutomaticAttempts *int64 `min:"1" type:"integer"` + + // An object of the RemediationParameterValue. + Parameters map[string]*RemediationParameterValue `type:"map"` + + // The type of a resource. + ResourceType *string `type:"string"` + + // Maximum time in seconds that AWS Config runs auto-remediation. If you do + // not select a number, the default is 60 seconds. + // + // For example, if you specify RetryAttemptsSeconds as 50 seconds and MaximumAutomaticAttempts + // as 5, AWS Config will run auto-remediations 5 times within 50 seconds before + // throwing an exception. + RetryAttemptSeconds *int64 `min:"1" type:"long"` + + // Target ID is the name of the public document. + // + // TargetId is a required field + TargetId *string `min:"1" type:"string" required:"true"` + + // The type of the target. Target executes remediation. For example, SSM document. + // + // TargetType is a required field + TargetType *string `type:"string" required:"true" enum:"RemediationTargetType"` + + // Version of the target. For example, version of the SSM document. + TargetVersion *string `type:"string"` +} + +// String returns the string representation +func (s RemediationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemediationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemediationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemediationConfiguration"} + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) + } + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + if s.CreatedByService != nil && len(*s.CreatedByService) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CreatedByService", 1)) + } + if s.MaximumAutomaticAttempts != nil && *s.MaximumAutomaticAttempts < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaximumAutomaticAttempts", 1)) + } + if s.RetryAttemptSeconds != nil && *s.RetryAttemptSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("RetryAttemptSeconds", 1)) + } + if s.TargetId == nil { + invalidParams.Add(request.NewErrParamRequired("TargetId")) + } + if s.TargetId != nil && len(*s.TargetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetId", 1)) + } + if s.TargetType == nil { + invalidParams.Add(request.NewErrParamRequired("TargetType")) + } + if s.ExecutionControls != nil { + if err := s.ExecutionControls.Validate(); err != nil { + invalidParams.AddNested("ExecutionControls", err.(request.ErrInvalidParams)) + } + } + if s.Parameters != nil { + for i, v := range s.Parameters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Parameters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *RemediationConfiguration) SetArn(v string) *RemediationConfiguration { + s.Arn = &v + return s +} + +// SetAutomatic sets the Automatic field's value. +func (s *RemediationConfiguration) SetAutomatic(v bool) *RemediationConfiguration { + s.Automatic = &v + return s +} + +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *RemediationConfiguration) SetConfigRuleName(v string) *RemediationConfiguration { + s.ConfigRuleName = &v + return s +} + +// SetCreatedByService sets the CreatedByService field's value. +func (s *RemediationConfiguration) SetCreatedByService(v string) *RemediationConfiguration { + s.CreatedByService = &v + return s +} + +// SetExecutionControls sets the ExecutionControls field's value. +func (s *RemediationConfiguration) SetExecutionControls(v *ExecutionControls) *RemediationConfiguration { + s.ExecutionControls = v + return s +} + +// SetMaximumAutomaticAttempts sets the MaximumAutomaticAttempts field's value. +func (s *RemediationConfiguration) SetMaximumAutomaticAttempts(v int64) *RemediationConfiguration { + s.MaximumAutomaticAttempts = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *RemediationConfiguration) SetParameters(v map[string]*RemediationParameterValue) *RemediationConfiguration { + s.Parameters = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *RemediationConfiguration) SetResourceType(v string) *RemediationConfiguration { + s.ResourceType = &v + return s +} + +// SetRetryAttemptSeconds sets the RetryAttemptSeconds field's value. +func (s *RemediationConfiguration) SetRetryAttemptSeconds(v int64) *RemediationConfiguration { + s.RetryAttemptSeconds = &v + return s +} + +// SetTargetId sets the TargetId field's value. +func (s *RemediationConfiguration) SetTargetId(v string) *RemediationConfiguration { + s.TargetId = &v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *RemediationConfiguration) SetTargetType(v string) *RemediationConfiguration { + s.TargetType = &v + return s +} + +// SetTargetVersion sets the TargetVersion field's value. +func (s *RemediationConfiguration) SetTargetVersion(v string) *RemediationConfiguration { + s.TargetVersion = &v + return s +} + +// An object that represents the details about the remediation exception. The +// details include the rule name, an explanation of an exception, the time when +// the exception will be deleted, the resource ID, and resource type. +type RemediationException struct { + _ struct{} `type:"structure"` + + // The name of the AWS Config rule. + // + // ConfigRuleName is a required field + ConfigRuleName *string `min:"1" type:"string" required:"true"` + + // The time when the remediation exception will be deleted. + ExpirationTime *time.Time `type:"timestamp"` + + // An explanation of an remediation exception. + Message *string `min:"1" type:"string"` + + // The ID of the resource (for example., sg-xxxxxx). + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // The type of a resource. + // + // ResourceType is a required field + ResourceType *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemediationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemediationException) GoString() string { + return s.String() +} + +// SetConfigRuleName sets the ConfigRuleName field's value. +func (s *RemediationException) SetConfigRuleName(v string) *RemediationException { + s.ConfigRuleName = &v + return s +} + +// SetExpirationTime sets the ExpirationTime field's value. +func (s *RemediationException) SetExpirationTime(v time.Time) *RemediationException { + s.ExpirationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *RemediationException) SetMessage(v string) *RemediationException { + s.Message = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *RemediationException) SetResourceId(v string) *RemediationException { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *RemediationException) SetResourceType(v string) *RemediationException { s.ResourceType = &v return s } -// An object that represents the details about the remediation configuration -// that includes the remediation action, parameters, and data to execute the -// action. -type RemediationConfiguration struct { +// The details that identify a resource within AWS Config, including the resource +// type and resource ID. +type RemediationExceptionResourceKey struct { _ struct{} `type:"structure"` - // The name of the AWS Config rule. - // - // ConfigRuleName is a required field - ConfigRuleName *string `min:"1" type:"string" required:"true"` - - // An object of the RemediationParameterValue. - Parameters map[string]*RemediationParameterValue `type:"map"` + // The ID of the resource (for example., sg-xxxxxx). + ResourceId *string `min:"1" type:"string"` // The type of a resource. - ResourceType *string `type:"string"` - - // Target ID is the name of the public document. - // - // TargetId is a required field - TargetId *string `min:"1" type:"string" required:"true"` - - // The type of the target. Target executes remediation. For example, SSM document. - // - // TargetType is a required field - TargetType *string `type:"string" required:"true" enum:"RemediationTargetType"` - - // Version of the target. For example, version of the SSM document. - TargetVersion *string `type:"string"` + ResourceType *string `min:"1" type:"string"` } // String returns the string representation -func (s RemediationConfiguration) String() string { +func (s RemediationExceptionResourceKey) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RemediationConfiguration) GoString() string { +func (s RemediationExceptionResourceKey) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RemediationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RemediationConfiguration"} - if s.ConfigRuleName == nil { - invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) - } - if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) - } - if s.TargetId == nil { - invalidParams.Add(request.NewErrParamRequired("TargetId")) - } - if s.TargetId != nil && len(*s.TargetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TargetId", 1)) +func (s *RemediationExceptionResourceKey) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemediationExceptionResourceKey"} + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) } - if s.TargetType == nil { - invalidParams.Add(request.NewErrParamRequired("TargetType")) + if s.ResourceType != nil && len(*s.ResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) } if invalidParams.Len() > 0 { @@ -12089,42 +14719,18 @@ func (s *RemediationConfiguration) Validate() error { return nil } -// SetConfigRuleName sets the ConfigRuleName field's value. -func (s *RemediationConfiguration) SetConfigRuleName(v string) *RemediationConfiguration { - s.ConfigRuleName = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *RemediationConfiguration) SetParameters(v map[string]*RemediationParameterValue) *RemediationConfiguration { - s.Parameters = v +// SetResourceId sets the ResourceId field's value. +func (s *RemediationExceptionResourceKey) SetResourceId(v string) *RemediationExceptionResourceKey { + s.ResourceId = &v return s } // SetResourceType sets the ResourceType field's value. -func (s *RemediationConfiguration) SetResourceType(v string) *RemediationConfiguration { +func (s *RemediationExceptionResourceKey) SetResourceType(v string) *RemediationExceptionResourceKey { s.ResourceType = &v return s } -// SetTargetId sets the TargetId field's value. -func (s *RemediationConfiguration) SetTargetId(v string) *RemediationConfiguration { - s.TargetId = &v - return s -} - -// SetTargetType sets the TargetType field's value. -func (s *RemediationConfiguration) SetTargetType(v string) *RemediationConfiguration { - s.TargetType = &v - return s -} - -// SetTargetVersion sets the TargetVersion field's value. -func (s *RemediationConfiguration) SetTargetVersion(v string) *RemediationConfiguration { - s.TargetVersion = &v - return s -} - // Provides details of the current status of the invoked remediation action // for that resource. type RemediationExecutionStatus struct { @@ -12269,6 +14875,26 @@ func (s RemediationParameterValue) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemediationParameterValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemediationParameterValue"} + if s.ResourceValue != nil { + if err := s.ResourceValue.Validate(); err != nil { + invalidParams.AddNested("ResourceValue", err.(request.ErrInvalidParams)) + } + } + if s.StaticValue != nil { + if err := s.StaticValue.Validate(); err != nil { + invalidParams.AddNested("StaticValue", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetResourceValue sets the ResourceValue field's value. func (s *RemediationParameterValue) SetResourceValue(v *ResourceValue) *RemediationParameterValue { s.ResourceValue = v @@ -12551,7 +15177,9 @@ type ResourceValue struct { _ struct{} `type:"structure"` // The value is a resource ID. - Value *string `type:"string" enum:"ResourceValueType"` + // + // Value is a required field + Value *string `type:"string" required:"true" enum:"ResourceValueType"` } // String returns the string representation @@ -12564,6 +15192,19 @@ func (s ResourceValue) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceValue"} + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetValue sets the Value field's value. func (s *ResourceValue) SetValue(v string) *ResourceValue { s.Value = &v @@ -12944,6 +15585,62 @@ func (s *SourceDetail) SetMessageType(v string) *SourceDetail { return s } +// AWS Systems Manager (SSM) specific remediation controls. +type SsmControls struct { + _ struct{} `type:"structure"` + + // The maximum percentage of remediation actions allowed to run in parallel + // on the non-compliant resources for that specific rule. You can specify a + // percentage, such as 10%. The default value is 10. + ConcurrentExecutionRatePercentage *int64 `min:"1" type:"integer"` + + // The percentage of errors that are allowed before SSM stops running automations + // on non-compliant resources for that specific rule. You can specify a percentage + // of errors, for example 10%. If you do not specifiy a percentage, the default + // is 50%. For example, if you set the ErrorPercentage to 40% for 10 non-compliant + // resources, then SSM stops running the automations when the fifth error is + // received. + ErrorPercentage *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s SsmControls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SsmControls) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SsmControls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SsmControls"} + if s.ConcurrentExecutionRatePercentage != nil && *s.ConcurrentExecutionRatePercentage < 1 { + invalidParams.Add(request.NewErrParamMinValue("ConcurrentExecutionRatePercentage", 1)) + } + if s.ErrorPercentage != nil && *s.ErrorPercentage < 1 { + invalidParams.Add(request.NewErrParamMinValue("ErrorPercentage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConcurrentExecutionRatePercentage sets the ConcurrentExecutionRatePercentage field's value. +func (s *SsmControls) SetConcurrentExecutionRatePercentage(v int64) *SsmControls { + s.ConcurrentExecutionRatePercentage = &v + return s +} + +// SetErrorPercentage sets the ErrorPercentage field's value. +func (s *SsmControls) SetErrorPercentage(v int64) *SsmControls { + s.ErrorPercentage = &v + return s +} + type StartConfigRulesEvaluationInput struct { _ struct{} `type:"structure"` @@ -13160,7 +15857,9 @@ type StaticValue struct { _ struct{} `type:"structure"` // A list of values. For example, the ARN of the assumed role. - Values []*string `type:"list"` + // + // Values is a required field + Values []*string `type:"list" required:"true"` } // String returns the string representation @@ -13173,12 +15872,84 @@ func (s StaticValue) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *StaticValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StaticValue"} + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetValues sets the Values field's value. func (s *StaticValue) SetValues(v []*string) *StaticValue { s.Values = v return s } +// Status filter object to filter results based on specific member account ID +// or status type for an organization config rule. +type StatusDetailFilters struct { + _ struct{} `type:"structure"` + + // The 12-digit account ID of the member account within an organization. + AccountId *string `type:"string"` + + // Indicates deployment status for config rule in the member account. When master + // account calls PutOrganizationConfigRule action for the first time, config + // rule status is created in the member account. When master account calls PutOrganizationConfigRule + // action for the second time, config rule status is updated in the member account. + // Config rule status is deleted when the master account deletes OrganizationConfigRule + // and disables service access for config-multiaccountsetup.amazonaws.com. + // + // AWS Config sets the state of the rule to: + // + // * CREATE_SUCCESSFUL when config rule has been created in the member account. + // + // * CREATE_IN_PROGRESS when config rule is being created in the member account. + // + // * CREATE_FAILED when config rule creation has failed in the member account. + // + // * DELETE_FAILED when config rule deletion has failed in the member account. + // + // * DELETE_IN_PROGRESS when config rule is being deleted in the member account. + // + // * DELETE_SUCCESSFUL when config rule has been deleted in the member account. + // + // * UPDATE_SUCCESSFUL when config rule has been updated in the member account. + // + // * UPDATE_IN_PROGRESS when config rule is being updated in the member account. + // + // * UPDATE_FAILED when config rule deletion has failed in the member account. + MemberAccountRuleStatus *string `type:"string" enum:"MemberAccountRuleStatus"` +} + +// String returns the string representation +func (s StatusDetailFilters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusDetailFilters) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *StatusDetailFilters) SetAccountId(v string) *StatusDetailFilters { + s.AccountId = &v + return s +} + +// SetMemberAccountRuleStatus sets the MemberAccountRuleStatus field's value. +func (s *StatusDetailFilters) SetMemberAccountRuleStatus(v string) *StatusDetailFilters { + s.MemberAccountRuleStatus = &v + return s +} + // The input for the StopConfigurationRecorder action. type StopConfigurationRecorderInput struct { _ struct{} `type:"structure"` @@ -13558,6 +16329,35 @@ const ( MaximumExecutionFrequencyTwentyFourHours = "TwentyFour_Hours" ) +const ( + // MemberAccountRuleStatusCreateSuccessful is a MemberAccountRuleStatus enum value + MemberAccountRuleStatusCreateSuccessful = "CREATE_SUCCESSFUL" + + // MemberAccountRuleStatusCreateInProgress is a MemberAccountRuleStatus enum value + MemberAccountRuleStatusCreateInProgress = "CREATE_IN_PROGRESS" + + // MemberAccountRuleStatusCreateFailed is a MemberAccountRuleStatus enum value + MemberAccountRuleStatusCreateFailed = "CREATE_FAILED" + + // MemberAccountRuleStatusUpdateSuccessful is a MemberAccountRuleStatus enum value + MemberAccountRuleStatusUpdateSuccessful = "UPDATE_SUCCESSFUL" + + // MemberAccountRuleStatusUpdateFailed is a MemberAccountRuleStatus enum value + MemberAccountRuleStatusUpdateFailed = "UPDATE_FAILED" + + // MemberAccountRuleStatusUpdateInProgress is a MemberAccountRuleStatus enum value + MemberAccountRuleStatusUpdateInProgress = "UPDATE_IN_PROGRESS" + + // MemberAccountRuleStatusDeleteSuccessful is a MemberAccountRuleStatus enum value + MemberAccountRuleStatusDeleteSuccessful = "DELETE_SUCCESSFUL" + + // MemberAccountRuleStatusDeleteFailed is a MemberAccountRuleStatus enum value + MemberAccountRuleStatusDeleteFailed = "DELETE_FAILED" + + // MemberAccountRuleStatusDeleteInProgress is a MemberAccountRuleStatus enum value + MemberAccountRuleStatusDeleteInProgress = "DELETE_IN_PROGRESS" +) + const ( // MessageTypeConfigurationItemChangeNotification is a MessageType enum value MessageTypeConfigurationItemChangeNotification = "ConfigurationItemChangeNotification" @@ -13572,6 +16372,46 @@ const ( MessageTypeOversizedConfigurationItemChangeNotification = "OversizedConfigurationItemChangeNotification" ) +const ( + // OrganizationConfigRuleTriggerTypeConfigurationItemChangeNotification is a OrganizationConfigRuleTriggerType enum value + OrganizationConfigRuleTriggerTypeConfigurationItemChangeNotification = "ConfigurationItemChangeNotification" + + // OrganizationConfigRuleTriggerTypeOversizedConfigurationItemChangeNotification is a OrganizationConfigRuleTriggerType enum value + OrganizationConfigRuleTriggerTypeOversizedConfigurationItemChangeNotification = "OversizedConfigurationItemChangeNotification" + + // OrganizationConfigRuleTriggerTypeScheduledNotification is a OrganizationConfigRuleTriggerType enum value + OrganizationConfigRuleTriggerTypeScheduledNotification = "ScheduledNotification" +) + +const ( + // OrganizationRuleStatusCreateSuccessful is a OrganizationRuleStatus enum value + OrganizationRuleStatusCreateSuccessful = "CREATE_SUCCESSFUL" + + // OrganizationRuleStatusCreateInProgress is a OrganizationRuleStatus enum value + OrganizationRuleStatusCreateInProgress = "CREATE_IN_PROGRESS" + + // OrganizationRuleStatusCreateFailed is a OrganizationRuleStatus enum value + OrganizationRuleStatusCreateFailed = "CREATE_FAILED" + + // OrganizationRuleStatusUpdateSuccessful is a OrganizationRuleStatus enum value + OrganizationRuleStatusUpdateSuccessful = "UPDATE_SUCCESSFUL" + + // OrganizationRuleStatusUpdateFailed is a OrganizationRuleStatus enum value + OrganizationRuleStatusUpdateFailed = "UPDATE_FAILED" + + // OrganizationRuleStatusUpdateInProgress is a OrganizationRuleStatus enum value + OrganizationRuleStatusUpdateInProgress = "UPDATE_IN_PROGRESS" + + // OrganizationRuleStatusDeleteSuccessful is a OrganizationRuleStatus enum value + OrganizationRuleStatusDeleteSuccessful = "DELETE_SUCCESSFUL" + + // OrganizationRuleStatusDeleteFailed is a OrganizationRuleStatus enum value + OrganizationRuleStatusDeleteFailed = "DELETE_FAILED" + + // OrganizationRuleStatusDeleteInProgress is a OrganizationRuleStatus enum value + OrganizationRuleStatusDeleteInProgress = "DELETE_IN_PROGRESS" +) + const ( // OwnerCustomLambda is a Owner enum value OwnerCustomLambda = "CUSTOM_LAMBDA" @@ -13678,6 +16518,27 @@ const ( // ResourceTypeAwsEc2Vpngateway is a ResourceType enum value ResourceTypeAwsEc2Vpngateway = "AWS::EC2::VPNGateway" + // ResourceTypeAwsEc2RegisteredHainstance is a ResourceType enum value + ResourceTypeAwsEc2RegisteredHainstance = "AWS::EC2::RegisteredHAInstance" + + // ResourceTypeAwsEc2NatGateway is a ResourceType enum value + ResourceTypeAwsEc2NatGateway = "AWS::EC2::NatGateway" + + // ResourceTypeAwsEc2EgressOnlyInternetGateway is a ResourceType enum value + ResourceTypeAwsEc2EgressOnlyInternetGateway = "AWS::EC2::EgressOnlyInternetGateway" + + // ResourceTypeAwsEc2Vpcendpoint is a ResourceType enum value + ResourceTypeAwsEc2Vpcendpoint = "AWS::EC2::VPCEndpoint" + + // ResourceTypeAwsEc2VpcendpointService is a ResourceType enum value + ResourceTypeAwsEc2VpcendpointService = "AWS::EC2::VPCEndpointService" + + // ResourceTypeAwsEc2FlowLog is a ResourceType enum value + ResourceTypeAwsEc2FlowLog = "AWS::EC2::FlowLog" + + // ResourceTypeAwsEc2VpcpeeringConnection is a ResourceType enum value + ResourceTypeAwsEc2VpcpeeringConnection = "AWS::EC2::VPCPeeringConnection" + // ResourceTypeAwsIamGroup is a ResourceType enum value ResourceTypeAwsIamGroup = "AWS::IAM::Group" @@ -13690,12 +16551,21 @@ const ( // ResourceTypeAwsIamUser is a ResourceType enum value ResourceTypeAwsIamUser = "AWS::IAM::User" + // ResourceTypeAwsElasticLoadBalancingV2LoadBalancer is a ResourceType enum value + ResourceTypeAwsElasticLoadBalancingV2LoadBalancer = "AWS::ElasticLoadBalancingV2::LoadBalancer" + // ResourceTypeAwsAcmCertificate is a ResourceType enum value ResourceTypeAwsAcmCertificate = "AWS::ACM::Certificate" // ResourceTypeAwsRdsDbinstance is a ResourceType enum value ResourceTypeAwsRdsDbinstance = "AWS::RDS::DBInstance" + // ResourceTypeAwsRdsDbparameterGroup is a ResourceType enum value + ResourceTypeAwsRdsDbparameterGroup = "AWS::RDS::DBParameterGroup" + + // ResourceTypeAwsRdsDboptionGroup is a ResourceType enum value + ResourceTypeAwsRdsDboptionGroup = "AWS::RDS::DBOptionGroup" + // ResourceTypeAwsRdsDbsubnetGroup is a ResourceType enum value ResourceTypeAwsRdsDbsubnetGroup = "AWS::RDS::DBSubnetGroup" @@ -13705,17 +16575,23 @@ const ( // ResourceTypeAwsRdsDbsnapshot is a ResourceType enum value ResourceTypeAwsRdsDbsnapshot = "AWS::RDS::DBSnapshot" + // ResourceTypeAwsRdsDbcluster is a ResourceType enum value + ResourceTypeAwsRdsDbcluster = "AWS::RDS::DBCluster" + + // ResourceTypeAwsRdsDbclusterParameterGroup is a ResourceType enum value + ResourceTypeAwsRdsDbclusterParameterGroup = "AWS::RDS::DBClusterParameterGroup" + + // ResourceTypeAwsRdsDbclusterSnapshot is a ResourceType enum value + ResourceTypeAwsRdsDbclusterSnapshot = "AWS::RDS::DBClusterSnapshot" + // ResourceTypeAwsRdsEventSubscription is a ResourceType enum value ResourceTypeAwsRdsEventSubscription = "AWS::RDS::EventSubscription" - // ResourceTypeAwsElasticLoadBalancingV2LoadBalancer is a ResourceType enum value - ResourceTypeAwsElasticLoadBalancingV2LoadBalancer = "AWS::ElasticLoadBalancingV2::LoadBalancer" - // ResourceTypeAwsS3Bucket is a ResourceType enum value ResourceTypeAwsS3Bucket = "AWS::S3::Bucket" - // ResourceTypeAwsSsmManagedInstanceInventory is a ResourceType enum value - ResourceTypeAwsSsmManagedInstanceInventory = "AWS::SSM::ManagedInstanceInventory" + // ResourceTypeAwsS3AccountPublicAccessBlock is a ResourceType enum value + ResourceTypeAwsS3AccountPublicAccessBlock = "AWS::S3::AccountPublicAccessBlock" // ResourceTypeAwsRedshiftCluster is a ResourceType enum value ResourceTypeAwsRedshiftCluster = "AWS::Redshift::Cluster" @@ -13735,14 +16611,17 @@ const ( // ResourceTypeAwsRedshiftEventSubscription is a ResourceType enum value ResourceTypeAwsRedshiftEventSubscription = "AWS::Redshift::EventSubscription" + // ResourceTypeAwsSsmManagedInstanceInventory is a ResourceType enum value + ResourceTypeAwsSsmManagedInstanceInventory = "AWS::SSM::ManagedInstanceInventory" + // ResourceTypeAwsCloudWatchAlarm is a ResourceType enum value ResourceTypeAwsCloudWatchAlarm = "AWS::CloudWatch::Alarm" // ResourceTypeAwsCloudFormationStack is a ResourceType enum value ResourceTypeAwsCloudFormationStack = "AWS::CloudFormation::Stack" - // ResourceTypeAwsDynamoDbTable is a ResourceType enum value - ResourceTypeAwsDynamoDbTable = "AWS::DynamoDB::Table" + // ResourceTypeAwsElasticLoadBalancingLoadBalancer is a ResourceType enum value + ResourceTypeAwsElasticLoadBalancingLoadBalancer = "AWS::ElasticLoadBalancing::LoadBalancer" // ResourceTypeAwsAutoScalingAutoScalingGroup is a ResourceType enum value ResourceTypeAwsAutoScalingAutoScalingGroup = "AWS::AutoScaling::AutoScalingGroup" @@ -13756,6 +16635,9 @@ const ( // ResourceTypeAwsAutoScalingScheduledAction is a ResourceType enum value ResourceTypeAwsAutoScalingScheduledAction = "AWS::AutoScaling::ScheduledAction" + // ResourceTypeAwsDynamoDbTable is a ResourceType enum value + ResourceTypeAwsDynamoDbTable = "AWS::DynamoDB::Table" + // ResourceTypeAwsCodeBuildProject is a ResourceType enum value ResourceTypeAwsCodeBuildProject = "AWS::CodeBuild::Project" @@ -13765,6 +16647,9 @@ const ( // ResourceTypeAwsWafRule is a ResourceType enum value ResourceTypeAwsWafRule = "AWS::WAF::Rule" + // ResourceTypeAwsWafRuleGroup is a ResourceType enum value + ResourceTypeAwsWafRuleGroup = "AWS::WAF::RuleGroup" + // ResourceTypeAwsWafWebAcl is a ResourceType enum value ResourceTypeAwsWafWebAcl = "AWS::WAF::WebACL" @@ -13774,6 +16659,9 @@ const ( // ResourceTypeAwsWafregionalRule is a ResourceType enum value ResourceTypeAwsWafregionalRule = "AWS::WAFRegional::Rule" + // ResourceTypeAwsWafregionalRuleGroup is a ResourceType enum value + ResourceTypeAwsWafregionalRuleGroup = "AWS::WAFRegional::RuleGroup" + // ResourceTypeAwsWafregionalWebAcl is a ResourceType enum value ResourceTypeAwsWafregionalWebAcl = "AWS::WAFRegional::WebACL" @@ -13783,11 +16671,8 @@ const ( // ResourceTypeAwsCloudFrontStreamingDistribution is a ResourceType enum value ResourceTypeAwsCloudFrontStreamingDistribution = "AWS::CloudFront::StreamingDistribution" - // ResourceTypeAwsWafRuleGroup is a ResourceType enum value - ResourceTypeAwsWafRuleGroup = "AWS::WAF::RuleGroup" - - // ResourceTypeAwsWafregionalRuleGroup is a ResourceType enum value - ResourceTypeAwsWafregionalRuleGroup = "AWS::WAFRegional::RuleGroup" + // ResourceTypeAwsLambdaAlias is a ResourceType enum value + ResourceTypeAwsLambdaAlias = "AWS::Lambda::Alias" // ResourceTypeAwsLambdaFunction is a ResourceType enum value ResourceTypeAwsLambdaFunction = "AWS::Lambda::Function" @@ -13801,8 +16686,8 @@ const ( // ResourceTypeAwsElasticBeanstalkEnvironment is a ResourceType enum value ResourceTypeAwsElasticBeanstalkEnvironment = "AWS::ElasticBeanstalk::Environment" - // ResourceTypeAwsElasticLoadBalancingLoadBalancer is a ResourceType enum value - ResourceTypeAwsElasticLoadBalancingLoadBalancer = "AWS::ElasticLoadBalancing::LoadBalancer" + // ResourceTypeAwsMobileHubProject is a ResourceType enum value + ResourceTypeAwsMobileHubProject = "AWS::MobileHub::Project" // ResourceTypeAwsXrayEncryptionConfig is a ResourceType enum value ResourceTypeAwsXrayEncryptionConfig = "AWS::XRay::EncryptionConfig" @@ -13822,8 +16707,41 @@ const ( // ResourceTypeAwsConfigResourceCompliance is a ResourceType enum value ResourceTypeAwsConfigResourceCompliance = "AWS::Config::ResourceCompliance" + // ResourceTypeAwsLicenseManagerLicenseConfiguration is a ResourceType enum value + ResourceTypeAwsLicenseManagerLicenseConfiguration = "AWS::LicenseManager::LicenseConfiguration" + + // ResourceTypeAwsApiGatewayDomainName is a ResourceType enum value + ResourceTypeAwsApiGatewayDomainName = "AWS::ApiGateway::DomainName" + + // ResourceTypeAwsApiGatewayMethod is a ResourceType enum value + ResourceTypeAwsApiGatewayMethod = "AWS::ApiGateway::Method" + + // ResourceTypeAwsApiGatewayStage is a ResourceType enum value + ResourceTypeAwsApiGatewayStage = "AWS::ApiGateway::Stage" + + // ResourceTypeAwsApiGatewayRestApi is a ResourceType enum value + ResourceTypeAwsApiGatewayRestApi = "AWS::ApiGateway::RestApi" + + // ResourceTypeAwsApiGatewayV2DomainName is a ResourceType enum value + ResourceTypeAwsApiGatewayV2DomainName = "AWS::ApiGatewayV2::DomainName" + + // ResourceTypeAwsApiGatewayV2Stage is a ResourceType enum value + ResourceTypeAwsApiGatewayV2Stage = "AWS::ApiGatewayV2::Stage" + + // ResourceTypeAwsApiGatewayV2Api is a ResourceType enum value + ResourceTypeAwsApiGatewayV2Api = "AWS::ApiGatewayV2::Api" + // ResourceTypeAwsCodePipelinePipeline is a ResourceType enum value ResourceTypeAwsCodePipelinePipeline = "AWS::CodePipeline::Pipeline" + + // ResourceTypeAwsServiceCatalogCloudFormationProvisionedProduct is a ResourceType enum value + ResourceTypeAwsServiceCatalogCloudFormationProvisionedProduct = "AWS::ServiceCatalog::CloudFormationProvisionedProduct" + + // ResourceTypeAwsServiceCatalogCloudFormationProduct is a ResourceType enum value + ResourceTypeAwsServiceCatalogCloudFormationProduct = "AWS::ServiceCatalog::CloudFormationProduct" + + // ResourceTypeAwsServiceCatalogPortfolio is a ResourceType enum value + ResourceTypeAwsServiceCatalogPortfolio = "AWS::ServiceCatalog::Portfolio" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/errors.go index c918050d06a..7ecd56b1bd9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/errors.go @@ -15,11 +15,15 @@ const ( // // Indicates one of the following errors: // - // * The rule cannot be created because the IAM role assigned to AWS Config - // lacks permissions to perform the config:Put* action. + // * For PutConfigRule, the rule cannot be created because the IAM role assigned + // to AWS Config lacks permissions to perform the config:Put* action. // - // * The AWS Lambda function cannot be invoked. Check the function ARN, and - // check the function's permissions. + // * For PutConfigRule, the AWS Lambda function cannot be invoked. Check + // the function ARN, and check the function's permissions. + // + // * For OrganizationConfigRule, organization config rule cannot be created + // because you do not have permissions to call IAM GetRole action or create + // service linked role. ErrCodeInsufficientPermissionsException = "InsufficientPermissionsException" // ErrCodeInvalidConfigurationRecorderNameException for service response error code @@ -136,6 +140,13 @@ const ( // You have reached the limit of the number of delivery channels you can create. ErrCodeMaxNumberOfDeliveryChannelsExceededException = "MaxNumberOfDeliveryChannelsExceededException" + // ErrCodeMaxNumberOfOrganizationConfigRulesExceededException for service response error code + // "MaxNumberOfOrganizationConfigRulesExceededException". + // + // You have reached the limit of the number of organization config rules you + // can create. + ErrCodeMaxNumberOfOrganizationConfigRulesExceededException = "MaxNumberOfOrganizationConfigRulesExceededException" + // ErrCodeMaxNumberOfRetentionConfigurationsExceededException for service response error code // "MaxNumberOfRetentionConfigurationsExceededException". // @@ -159,7 +170,7 @@ const ( // ErrCodeNoAvailableOrganizationException for service response error code // "NoAvailableOrganizationException". // - // Organization does is no longer available. + // Organization is no longer available. ErrCodeNoAvailableOrganizationException = "NoAvailableOrganizationException" // ErrCodeNoRunningConfigurationRecorderException for service response error code @@ -199,12 +210,24 @@ const ( // You have specified a delivery channel that does not exist. ErrCodeNoSuchDeliveryChannelException = "NoSuchDeliveryChannelException" + // ErrCodeNoSuchOrganizationConfigRuleException for service response error code + // "NoSuchOrganizationConfigRuleException". + // + // You specified one or more organization config rules that do not exist. + ErrCodeNoSuchOrganizationConfigRuleException = "NoSuchOrganizationConfigRuleException" + // ErrCodeNoSuchRemediationConfigurationException for service response error code // "NoSuchRemediationConfigurationException". // // You specified an AWS Config rule without a remediation configuration. ErrCodeNoSuchRemediationConfigurationException = "NoSuchRemediationConfigurationException" + // ErrCodeNoSuchRemediationExceptionException for service response error code + // "NoSuchRemediationExceptionException". + // + // You tried to delete a remediation exception that does not exist. + ErrCodeNoSuchRemediationExceptionException = "NoSuchRemediationExceptionException" + // ErrCodeNoSuchRetentionConfigurationException for service response error code // "NoSuchRetentionConfigurationException". // @@ -214,14 +237,19 @@ const ( // ErrCodeOrganizationAccessDeniedException for service response error code // "OrganizationAccessDeniedException". // - // No permission to call the EnableAWSServiceAccess API. + // For PutConfigAggregator API, no permission to call EnableAWSServiceAccess + // API. + // + // For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs + // are called from member accounts. All APIs must be called from organization + // master account. ErrCodeOrganizationAccessDeniedException = "OrganizationAccessDeniedException" // ErrCodeOrganizationAllFeaturesNotEnabledException for service response error code // "OrganizationAllFeaturesNotEnabledException". // - // The configuration aggregator cannot be created because organization does - // not have all features enabled. + // AWS Config resource cannot be created because your organization does not + // have all features enabled. ErrCodeOrganizationAllFeaturesNotEnabledException = "OrganizationAllFeaturesNotEnabledException" // ErrCodeOversizedConfigurationItemException for service response error code @@ -230,11 +258,34 @@ const ( // The configuration item size is outside the allowable range. ErrCodeOversizedConfigurationItemException = "OversizedConfigurationItemException" + // ErrCodeRemediationInProgressException for service response error code + // "RemediationInProgressException". + // + // Remediation action is in progress. You can either cancel execution in AWS + // Systems Manager or wait and try again later. + ErrCodeRemediationInProgressException = "RemediationInProgressException" + // ErrCodeResourceInUseException for service response error code // "ResourceInUseException". // - // The rule is currently being deleted or the rule is deleting your evaluation - // results. Try your request again later. + // You see this exception in the following cases: + // + // * For DeleteConfigRule API, AWS Config is deleting this rule. Try your + // request again later. + // + // * For DeleteConfigRule API, the rule is deleting your evaluation results. + // Try your request again later. + // + // * For DeleteConfigRule API, a remediation action is associated with the + // rule and AWS Config cannot delete this rule. Delete the remediation action + // associated with the rule before deleting the rule and try your request + // again later. + // + // * For PutConfigOrganizationRule, organization config rule deletion is + // in progress. Try your request again later. + // + // * For DeleteOrganizationConfigRule, organization config rule creation + // is in progress. Try your request again later. ErrCodeResourceInUseException = "ResourceInUseException" // ErrCodeResourceNotDiscoveredException for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go index 2fdea95561f..3cd3cff327d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go @@ -46,11 +46,11 @@ const ( // svc := configservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ConfigService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ConfigService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ConfigService { svc := &ConfigService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-12", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go index 9c6b8ecd77f..9ae48a197a0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go @@ -190,7 +190,7 @@ func (c *CostandUsageReportService) DescribeReportDefinitionsWithContext(ctx aws // // Example iterating over at most 3 pages of a DescribeReportDefinitions operation. // pageNum := 0 // err := client.DescribeReportDefinitionsPages(params, -// func(page *DescribeReportDefinitionsOutput, lastPage bool) bool { +// func(page *costandusagereportservice.DescribeReportDefinitionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -222,13 +222,99 @@ func (c *CostandUsageReportService) DescribeReportDefinitionsPagesWithContext(ct }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReportDefinitionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReportDefinitionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } +const opModifyReportDefinition = "ModifyReportDefinition" + +// ModifyReportDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReportDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyReportDefinition for more information on using the ModifyReportDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyReportDefinitionRequest method. +// req, resp := client.ModifyReportDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cur-2017-01-06/ModifyReportDefinition +func (c *CostandUsageReportService) ModifyReportDefinitionRequest(input *ModifyReportDefinitionInput) (req *request.Request, output *ModifyReportDefinitionOutput) { + op := &request.Operation{ + Name: opModifyReportDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReportDefinitionInput{} + } + + output = &ModifyReportDefinitionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyReportDefinition API operation for AWS Cost and Usage Report Service. +// +// Allows you to programatically update your report preferences. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Cost and Usage Report Service's +// API operation ModifyReportDefinition for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * ErrCodeValidationException "ValidationException" +// The input fails to satisfy the constraints specified by an AWS service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cur-2017-01-06/ModifyReportDefinition +func (c *CostandUsageReportService) ModifyReportDefinition(input *ModifyReportDefinitionInput) (*ModifyReportDefinitionOutput, error) { + req, out := c.ModifyReportDefinitionRequest(input) + return out, req.Send() +} + +// ModifyReportDefinitionWithContext is the same as ModifyReportDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyReportDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CostandUsageReportService) ModifyReportDefinitionWithContext(ctx aws.Context, input *ModifyReportDefinitionInput, opts ...request.Option) (*ModifyReportDefinitionOutput, error) { + req, out := c.ModifyReportDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutReportDefinition = "PutReportDefinition" // PutReportDefinitionRequest generates a "aws/request.Request" representing the @@ -449,6 +535,80 @@ func (s *DescribeReportDefinitionsOutput) SetReportDefinitions(v []*ReportDefini return s } +type ModifyReportDefinitionInput struct { + _ struct{} `type:"structure"` + + // The definition of AWS Cost and Usage Report. You can specify the report name, + // time unit, report format, compression format, S3 bucket, additional artifacts, + // and schema elements in the definition. + // + // ReportDefinition is a required field + ReportDefinition *ReportDefinition `type:"structure" required:"true"` + + // The name of the report that you want to create. The name must be unique, + // is case sensitive, and can't include spaces. + // + // ReportName is a required field + ReportName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyReportDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReportDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyReportDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyReportDefinitionInput"} + if s.ReportDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("ReportDefinition")) + } + if s.ReportName == nil { + invalidParams.Add(request.NewErrParamRequired("ReportName")) + } + if s.ReportDefinition != nil { + if err := s.ReportDefinition.Validate(); err != nil { + invalidParams.AddNested("ReportDefinition", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReportDefinition sets the ReportDefinition field's value. +func (s *ModifyReportDefinitionInput) SetReportDefinition(v *ReportDefinition) *ModifyReportDefinitionInput { + s.ReportDefinition = v + return s +} + +// SetReportName sets the ReportName field's value. +func (s *ModifyReportDefinitionInput) SetReportName(v string) *ModifyReportDefinitionInput { + s.ReportName = &v + return s +} + +type ModifyReportDefinitionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyReportDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReportDefinitionOutput) GoString() string { + return s.String() +} + // Creates a Cost and Usage Report. type PutReportDefinitionInput struct { _ struct{} `type:"structure"` @@ -714,6 +874,9 @@ const ( // AWSRegionApNortheast3 is a AWSRegion enum value AWSRegionApNortheast3 = "ap-northeast-3" + + // AWSRegionApEast1 is a AWSRegion enum value + AWSRegionApEast1 = "ap-east-1" ) // The types of manifest that you want AWS to create for this report. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go index 39e3cedfeae..dca54fd5c95 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *CostandUsageReportServic if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "cur" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CostandUsageReportService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CostandUsageReportService { svc := &CostandUsageReportService{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-01-06", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go index 8f5e516fb99..f5682aca71d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go @@ -231,7 +231,7 @@ func (c *DatabaseMigrationService) CreateEndpointRequest(input *CreateEndpointIn // // Returned Error Codes: // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" -// AWS DMS cannot access the KMS key. +// AWS DMS cannot access the AWS KMS key. // // * ErrCodeResourceAlreadyExistsFault "ResourceAlreadyExistsFault" // The resource you are attempting to create already exists. @@ -358,20 +358,20 @@ func (c *DatabaseMigrationService) CreateEventSubscriptionRequest(input *CreateE // You are not authorized for the SNS subscription. // // * ErrCodeKMSAccessDeniedFault "KMSAccessDeniedFault" -// The ciphertext references a key that doesn't exist or DMS account doesn't -// have an access to +// The ciphertext references a key that doesn't exist or that the DMS account +// doesn't have access to. // // * ErrCodeKMSDisabledFault "KMSDisabledFault" // The specified master key (CMK) isn't enabled. // // * ErrCodeKMSInvalidStateFault "KMSInvalidStateFault" -// The state of the specified KMS resource isn't valid for this request. +// The state of the specified AWS KMS resource isn't valid for this request. // // * ErrCodeKMSNotFoundFault "KMSNotFoundFault" -// The specified KMS entity or resource can't be found. +// The specified AWS KMS entity or resource can't be found. // // * ErrCodeKMSThrottlingFault "KMSThrottlingFault" -// This request triggered KMS request throttling. +// This request triggered AWS KMS request throttling. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CreateEventSubscription func (c *DatabaseMigrationService) CreateEventSubscription(input *CreateEventSubscriptionInput) (*CreateEventSubscriptionOutput, error) { @@ -441,6 +441,13 @@ func (c *DatabaseMigrationService) CreateReplicationInstanceRequest(input *Creat // // Creates the replication instance using the specified parameters. // +// AWS DMS requires that your account have certain roles with appropriate permissions +// before you can create a replication instance. For information on the required +// roles, see Creating the IAM Roles to Use With the AWS CLI and AWS DMS API +// (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.APIRole.html). +// For information on the required permissions, see IAM Permissions Needed to +// Use AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.IAMPermissions.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -480,7 +487,7 @@ func (c *DatabaseMigrationService) CreateReplicationInstanceRequest(input *Creat // The subnet provided is invalid. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" -// AWS DMS cannot access the KMS key. +// AWS DMS cannot access the AWS KMS key. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CreateReplicationInstance func (c *DatabaseMigrationService) CreateReplicationInstance(input *CreateReplicationInstanceInput) (*CreateReplicationInstanceOutput, error) { @@ -669,7 +676,7 @@ func (c *DatabaseMigrationService) CreateReplicationTaskRequest(input *CreateRep // The resource could not be found. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" -// AWS DMS cannot access the KMS key. +// AWS DMS cannot access the AWS KMS key. // // * ErrCodeResourceQuotaExceededFault "ResourceQuotaExceededFault" // The quota for this resource quota has been exceeded. @@ -779,6 +786,93 @@ func (c *DatabaseMigrationService) DeleteCertificateWithContext(ctx aws.Context, return out, req.Send() } +const opDeleteConnection = "DeleteConnection" + +// DeleteConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteConnection for more information on using the DeleteConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteConnectionRequest method. +// req, resp := client.DeleteConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DeleteConnection +func (c *DatabaseMigrationService) DeleteConnectionRequest(input *DeleteConnectionInput) (req *request.Request, output *DeleteConnectionOutput) { + op := &request.Operation{ + Name: opDeleteConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConnectionInput{} + } + + output = &DeleteConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteConnection API operation for AWS Database Migration Service. +// +// Deletes the connection between a replication instance and an endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation DeleteConnection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedFault "AccessDeniedFault" +// AWS DMS was denied access to the endpoint. Check that the role is correctly +// configured. +// +// * ErrCodeResourceNotFoundFault "ResourceNotFoundFault" +// The resource could not be found. +// +// * ErrCodeInvalidResourceStateFault "InvalidResourceStateFault" +// The resource is in a state that prevents it from being used for database +// migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DeleteConnection +func (c *DatabaseMigrationService) DeleteConnection(input *DeleteConnectionInput) (*DeleteConnectionOutput, error) { + req, out := c.DeleteConnectionRequest(input) + return out, req.Send() +} + +// DeleteConnectionWithContext is the same as DeleteConnection with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DeleteConnectionWithContext(ctx aws.Context, input *DeleteConnectionInput, opts ...request.Option) (*DeleteConnectionOutput, error) { + req, out := c.DeleteConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteEndpoint = "DeleteEndpoint" // DeleteEndpointRequest generates a "aws/request.Request" representing the @@ -1245,10 +1339,13 @@ func (c *DatabaseMigrationService) DescribeAccountAttributesRequest(input *Descr // DescribeAccountAttributes API operation for AWS Database Migration Service. // -// Lists all of the AWS DMS attributes for a customer account. The attributes -// include AWS DMS quotas for the account, such as the number of replication -// instances allowed. The description for a quota includes the quota name, current -// usage toward that quota, and the quota's maximum value. +// Lists all of the AWS DMS attributes for a customer account. These attributes +// include AWS DMS quotas for the account and a unique account identifier in +// a particular DMS region. DMS quotas include a list of resource quotas supported +// by the account, such as the number of replication instances allowed. The +// description for each resource quota, includes the quota name, current usage +// toward that quota, and the quota's maximum value. DMS uses the unique account +// identifier to name each artifact used by DMS in the given region. // // This command does not take any parameters. // @@ -1376,7 +1473,7 @@ func (c *DatabaseMigrationService) DescribeCertificatesWithContext(ctx aws.Conte // // Example iterating over at most 3 pages of a DescribeCertificates operation. // pageNum := 0 // err := client.DescribeCertificatesPages(params, -// func(page *DescribeCertificatesOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeCertificatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1408,10 +1505,12 @@ func (c *DatabaseMigrationService) DescribeCertificatesPagesWithContext(ctx aws. }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeCertificatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeCertificatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1512,7 +1611,7 @@ func (c *DatabaseMigrationService) DescribeConnectionsWithContext(ctx aws.Contex // // Example iterating over at most 3 pages of a DescribeConnections operation. // pageNum := 0 // err := client.DescribeConnectionsPages(params, -// func(page *DescribeConnectionsOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeConnectionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1544,10 +1643,12 @@ func (c *DatabaseMigrationService) DescribeConnectionsPagesWithContext(ctx aws.C }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeConnectionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeConnectionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1642,7 +1743,7 @@ func (c *DatabaseMigrationService) DescribeEndpointTypesWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a DescribeEndpointTypes operation. // pageNum := 0 // err := client.DescribeEndpointTypesPages(params, -// func(page *DescribeEndpointTypesOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeEndpointTypesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1674,10 +1775,12 @@ func (c *DatabaseMigrationService) DescribeEndpointTypesPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEndpointTypesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEndpointTypesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1777,7 +1880,7 @@ func (c *DatabaseMigrationService) DescribeEndpointsWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeEndpoints operation. // pageNum := 0 // err := client.DescribeEndpointsPages(params, -// func(page *DescribeEndpointsOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeEndpointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1809,10 +1912,12 @@ func (c *DatabaseMigrationService) DescribeEndpointsPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEndpointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEndpointsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1994,7 +2099,7 @@ func (c *DatabaseMigrationService) DescribeEventSubscriptionsWithContext(ctx aws // // Example iterating over at most 3 pages of a DescribeEventSubscriptions operation. // pageNum := 0 // err := client.DescribeEventSubscriptionsPages(params, -// func(page *DescribeEventSubscriptionsOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeEventSubscriptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2026,10 +2131,12 @@ func (c *DatabaseMigrationService) DescribeEventSubscriptionsPagesWithContext(ct }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventSubscriptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventSubscriptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2127,7 +2234,7 @@ func (c *DatabaseMigrationService) DescribeEventsWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a DescribeEvents operation. // pageNum := 0 // err := client.DescribeEventsPages(params, -// func(page *DescribeEventsOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2159,10 +2266,12 @@ func (c *DatabaseMigrationService) DescribeEventsPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2258,7 +2367,7 @@ func (c *DatabaseMigrationService) DescribeOrderableReplicationInstancesWithCont // // Example iterating over at most 3 pages of a DescribeOrderableReplicationInstances operation. // pageNum := 0 // err := client.DescribeOrderableReplicationInstancesPages(params, -// func(page *DescribeOrderableReplicationInstancesOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeOrderableReplicationInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2290,10 +2399,12 @@ func (c *DatabaseMigrationService) DescribeOrderableReplicationInstancesPagesWit }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeOrderableReplicationInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeOrderableReplicationInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2393,7 +2504,7 @@ func (c *DatabaseMigrationService) DescribePendingMaintenanceActionsWithContext( // // Example iterating over at most 3 pages of a DescribePendingMaintenanceActions operation. // pageNum := 0 // err := client.DescribePendingMaintenanceActionsPages(params, -// func(page *DescribePendingMaintenanceActionsOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribePendingMaintenanceActionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2425,10 +2536,12 @@ func (c *DatabaseMigrationService) DescribePendingMaintenanceActionsPagesWithCon }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribePendingMaintenanceActionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribePendingMaintenanceActionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2615,7 +2728,7 @@ func (c *DatabaseMigrationService) DescribeReplicationInstanceTaskLogsWithContex // // Example iterating over at most 3 pages of a DescribeReplicationInstanceTaskLogs operation. // pageNum := 0 // err := client.DescribeReplicationInstanceTaskLogsPages(params, -// func(page *DescribeReplicationInstanceTaskLogsOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeReplicationInstanceTaskLogsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2647,10 +2760,12 @@ func (c *DatabaseMigrationService) DescribeReplicationInstanceTaskLogsPagesWithC }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReplicationInstanceTaskLogsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReplicationInstanceTaskLogsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2751,7 +2866,7 @@ func (c *DatabaseMigrationService) DescribeReplicationInstancesWithContext(ctx a // // Example iterating over at most 3 pages of a DescribeReplicationInstances operation. // pageNum := 0 // err := client.DescribeReplicationInstancesPages(params, -// func(page *DescribeReplicationInstancesOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeReplicationInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2783,10 +2898,12 @@ func (c *DatabaseMigrationService) DescribeReplicationInstancesPagesWithContext( }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReplicationInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReplicationInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2886,7 +3003,7 @@ func (c *DatabaseMigrationService) DescribeReplicationSubnetGroupsWithContext(ct // // Example iterating over at most 3 pages of a DescribeReplicationSubnetGroups operation. // pageNum := 0 // err := client.DescribeReplicationSubnetGroupsPages(params, -// func(page *DescribeReplicationSubnetGroupsOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeReplicationSubnetGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2918,10 +3035,12 @@ func (c *DatabaseMigrationService) DescribeReplicationSubnetGroupsPagesWithConte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReplicationSubnetGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReplicationSubnetGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3022,7 +3141,7 @@ func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentResultsWithC // // Example iterating over at most 3 pages of a DescribeReplicationTaskAssessmentResults operation. // pageNum := 0 // err := client.DescribeReplicationTaskAssessmentResultsPages(params, -// func(page *DescribeReplicationTaskAssessmentResultsOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeReplicationTaskAssessmentResultsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3054,10 +3173,12 @@ func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentResultsPages }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReplicationTaskAssessmentResultsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReplicationTaskAssessmentResultsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3158,7 +3279,7 @@ func (c *DatabaseMigrationService) DescribeReplicationTasksWithContext(ctx aws.C // // Example iterating over at most 3 pages of a DescribeReplicationTasks operation. // pageNum := 0 // err := client.DescribeReplicationTasksPages(params, -// func(page *DescribeReplicationTasksOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeReplicationTasksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3190,10 +3311,12 @@ func (c *DatabaseMigrationService) DescribeReplicationTasksPagesWithContext(ctx }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReplicationTasksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReplicationTasksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3297,7 +3420,7 @@ func (c *DatabaseMigrationService) DescribeSchemasWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a DescribeSchemas operation. // pageNum := 0 // err := client.DescribeSchemasPages(params, -// func(page *DescribeSchemasOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeSchemasOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3329,10 +3452,12 @@ func (c *DatabaseMigrationService) DescribeSchemasPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeSchemasOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeSchemasOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3441,7 +3566,7 @@ func (c *DatabaseMigrationService) DescribeTableStatisticsWithContext(ctx aws.Co // // Example iterating over at most 3 pages of a DescribeTableStatistics operation. // pageNum := 0 // err := client.DescribeTableStatisticsPages(params, -// func(page *DescribeTableStatisticsOutput, lastPage bool) bool { +// func(page *databasemigrationservice.DescribeTableStatisticsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3473,10 +3598,12 @@ func (c *DatabaseMigrationService) DescribeTableStatisticsPagesWithContext(ctx a }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTableStatisticsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTableStatisticsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3709,7 +3836,7 @@ func (c *DatabaseMigrationService) ModifyEndpointRequest(input *ModifyEndpointIn // The resource you are attempting to create already exists. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" -// AWS DMS cannot access the KMS key. +// AWS DMS cannot access the AWS KMS key. // // * ErrCodeAccessDeniedFault "AccessDeniedFault" // AWS DMS was denied access to the endpoint. Check that the role is correctly @@ -3804,20 +3931,20 @@ func (c *DatabaseMigrationService) ModifyEventSubscriptionRequest(input *ModifyE // You are not authorized for the SNS subscription. // // * ErrCodeKMSAccessDeniedFault "KMSAccessDeniedFault" -// The ciphertext references a key that doesn't exist or DMS account doesn't -// have an access to +// The ciphertext references a key that doesn't exist or that the DMS account +// doesn't have access to. // // * ErrCodeKMSDisabledFault "KMSDisabledFault" // The specified master key (CMK) isn't enabled. // // * ErrCodeKMSInvalidStateFault "KMSInvalidStateFault" -// The state of the specified KMS resource isn't valid for this request. +// The state of the specified AWS KMS resource isn't valid for this request. // // * ErrCodeKMSNotFoundFault "KMSNotFoundFault" -// The specified KMS entity or resource can't be found. +// The specified AWS KMS entity or resource can't be found. // // * ErrCodeKMSThrottlingFault "KMSThrottlingFault" -// This request triggered KMS request throttling. +// This request triggered AWS KMS request throttling. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/ModifyEventSubscription func (c *DatabaseMigrationService) ModifyEventSubscription(input *ModifyEventSubscriptionInput) (*ModifyEventSubscriptionOutput, error) { @@ -4112,7 +4239,7 @@ func (c *DatabaseMigrationService) ModifyReplicationTaskRequest(input *ModifyRep // The resource you are attempting to create already exists. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" -// AWS DMS cannot access the KMS key. +// AWS DMS cannot access the AWS KMS key. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/ModifyReplicationTask func (c *DatabaseMigrationService) ModifyReplicationTask(input *ModifyReplicationTaskInput) (*ModifyReplicationTaskOutput, error) { @@ -4284,7 +4411,7 @@ func (c *DatabaseMigrationService) RefreshSchemasRequest(input *RefreshSchemasIn // The resource could not be found. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" -// AWS DMS cannot access the KMS key. +// AWS DMS cannot access the AWS KMS key. // // * ErrCodeResourceQuotaExceededFault "ResourceQuotaExceededFault" // The quota for this resource quota has been exceeded. @@ -4521,7 +4648,7 @@ func (c *DatabaseMigrationService) StartReplicationTaskRequest(input *StartRepli // Starts the replication task. // // For more information about AWS DMS tasks, see Working with Migration Tasks -// (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.html) in the +// (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.html) in the // AWS Database Migration Service User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4794,7 +4921,7 @@ func (c *DatabaseMigrationService) TestConnectionRequest(input *TestConnectionIn // migration. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" -// AWS DMS cannot access the KMS key. +// AWS DMS cannot access the AWS KMS key. // // * ErrCodeResourceQuotaExceededFault "ResourceQuotaExceededFault" // The quota for this resource quota has been exceeded. @@ -4864,17 +4991,20 @@ func (s *AccountQuota) SetUsed(v int64) *AccountQuota { return s } +// Associates a set of tags with an AWS DMS resource. type AddTagsToResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the AWS DMS resource the tag is to be added - // to. AWS DMS resources include a replication instance, endpoint, and a replication + // Identifies the AWS DMS resource to which tags should be added. The value + // for this parameter is an Amazon Resource Name (ARN). + // + // For AWS DMS, you can tag a replication instance, an endpoint, or a replication // task. // // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` - // The tag to be assigned to the DMS resource. + // One or more tags to be assigned to the resource. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` @@ -4941,7 +5071,7 @@ type ApplyPendingMaintenanceActionInput struct { ApplyAction *string `type:"string" required:"true"` // A value that specifies the type of opt-in request, or undoes an opt-in request. - // An opt-in request of type immediate cannot be undone. + // You can't undo an opt-in request of type immediate. // // Valid values: // @@ -5067,17 +5197,18 @@ type Certificate struct { // The date that the certificate was created. CertificateCreationDate *time.Time `type:"timestamp"` - // The customer-assigned name of the certificate. Valid characters are A-z and - // 0-9. + // A customer-assigned name for the certificate. Identifiers must begin with + // a letter; must contain only ASCII letters, digits, and hyphens; and must + // not end with a hyphen or contain two consecutive hyphens. CertificateIdentifier *string `type:"string"` // The owner of the certificate. CertificateOwner *string `type:"string"` - // The contents of the .pem X.509 certificate file for the certificate. + // The contents of a .pem file, which contains an X.509 certificate. CertificatePem *string `type:"string"` - // The location of the imported Oracle Wallet certificate for use with SSL. + // The location of an imported Oracle Wallet certificate for use with SSL. // // CertificateWallet is automatically base64 encoded/decoded by the SDK. CertificateWallet []byte `type:"blob"` @@ -5247,21 +5378,21 @@ type CreateEndpointInput struct { // The settings in JSON format for the DMS transfer type of source endpoint. // - // Possible attributes include the following: + // Possible settings include the following: // - // * serviceAccessRoleArn - The IAM role that has permission to access the + // * ServiceAccessRoleArn - The IAM role that has permission to access the // Amazon S3 bucket. // - // * bucketName - The name of the S3 bucket to use. + // * BucketName - The name of the S3 bucket to use. // - // * compressionType - An optional parameter to use GZIP to compress the + // * CompressionType - An optional parameter to use GZIP to compress the // target files. To use GZIP, set this value to NONE (the default). To keep // the files uncompressed, don't use this value. // - // Shorthand syntax for these attributes is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string + // Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string // - // JSON syntax for these attributes is as follows: { "ServiceAccessRoleArn": - // "string", "BucketName": "string", "CompressionType": "none"|"gzip" } + // JSON syntax for these settings is as follows: { "ServiceAccessRoleArn": "string", + // "BucketName": "string", "CompressionType": "none"|"gzip" } DmsTransferSettings *DmsTransferSettings `type:"structure"` // Settings in JSON format for the target Amazon DynamoDB endpoint. For more @@ -5283,12 +5414,12 @@ type CreateEndpointInput struct { // EndpointIdentifier is a required field EndpointIdentifier *string `type:"string" required:"true"` - // The type of endpoint. + // The type of endpoint. Valid values are source and target. // // EndpointType is a required field EndpointType *string `type:"string" required:"true" enum:"ReplicationEndpointTypeValue"` - // The type of engine for the endpoint. Valid values, depending on the EndPointType + // The type of engine for the endpoint. Valid values, depending on the EndpointType // value, include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, // redshift, s3, db2, azuredb, sybase, dynamodb, mongodb, and sqlserver. // @@ -5298,25 +5429,33 @@ type CreateEndpointInput struct { // The external table definition. ExternalTableDefinition *string `type:"string"` - // Additional attributes associated with the connection. + // Additional attributes associated with the connection. Each attribute is specified + // as a name-value pair associated by an equal sign (=). Multiple attributes + // are separated by a semicolon (;) with no additional white space. For information + // on the attributes available for connecting your source or target endpoint, + // see Working with AWS DMS Endpoints (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Endpoints.html) + // in the AWS Database Migration Service User Guide. ExtraConnectionAttributes *string `type:"string"` // Settings in JSON format for the target Amazon Kinesis Data Streams endpoint. // For more information about the available settings, see Using Object Mapping - // to Migrate Data to a Kinesis Data Stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping - // ) in the AWS Database Migration User Guide. + // to Migrate Data to a Kinesis Data Stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping) + // in the AWS Database Migration User Guide. KinesisSettings *KinesisSettings `type:"structure"` - // The AWS KMS key identifier to use to encrypt the connection parameters. If - // you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your - // default encryption key. AWS KMS creates the default encryption key for your - // AWS account. Your AWS account has a different default encryption key for - // each AWS Region. + // An AWS KMS key identifier that is used to encrypt the connection parameters + // for the endpoint. + // + // If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses + // your default encryption key. + // + // AWS KMS creates the default encryption key for your AWS account. Your AWS + // account has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` // Settings in JSON format for the source MongoDB endpoint. For more information // about the available settings, see the configuration properties section in - // Using MongoDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html) + // Using MongoDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html) // in the AWS Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` @@ -5341,12 +5480,11 @@ type CreateEndpointInput struct { // to use to create the endpoint. ServiceAccessRoleArn *string `type:"string"` - // The Secure Sockets Layer (SSL) mode to use for the SSL connection. The SSL - // mode can be one of four values: none, require, verify-ca, verify-full. The - // default value is none. + // The Secure Sockets Layer (SSL) mode to use for the SSL connection. The default + // is none SslMode *string `type:"string" enum:"DmsSslModeValue"` - // Tags to be added to the endpoint. + // One or more tags to be assigned to the endpoint. Tags []*Tag `type:"list"` // The user name to be used to log in to the endpoint database. @@ -5555,10 +5693,8 @@ type CreateEventSubscriptionInput struct { Enabled *bool `type:"boolean"` // A list of event categories for a source type that you want to subscribe to. - // You can see a list of the categories for a given source type by calling the - // DescribeEventCategories action or in the topic Working with Events and Notifications - // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) in the - // AWS Database Migration Service User Guide. + // For more information, see Working with Events and Notifications (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) + // in the AWS Database Migration Service User Guide. EventCategories []*string `type:"list"` // The Amazon Resource Name (ARN) of the Amazon SNS topic created for event @@ -5568,10 +5704,13 @@ type CreateEventSubscriptionInput struct { // SnsTopicArn is a required field SnsTopicArn *string `type:"string" required:"true"` - // The list of identifiers of the event sources for which events will be returned. - // If not specified, then all sources are included in the response. An identifier - // must begin with a letter and must contain only ASCII letters, digits, and - // hyphens; it cannot end with a hyphen or contain two consecutive hyphens. + // A list of identifiers for which AWS DMS provides notification events. + // + // If you don't specify a value, notifications are provided for all sources. + // + // If you specify multiple values, they must be of the same type. For example, + // if you specify a database instance ID, then all of the other values must + // be database instance IDs. SourceIds []*string `type:"list"` // The type of AWS DMS resource that generates the events. For example, if you @@ -5579,17 +5718,16 @@ type CreateEventSubscriptionInput struct { // this parameter to replication-instance. If this value is not specified, all // events are returned. // - // Valid values: replication-instance | migration-task + // Valid values: replication-instance | replication-task SourceType *string `type:"string"` - // The name of the AWS DMS event notification subscription. - // - // Constraints: The name must be less than 255 characters. + // The name of the AWS DMS event notification subscription. This name must be + // less than 255 characters. // // SubscriptionName is a required field SubscriptionName *string `type:"string" required:"true"` - // A tag to be attached to the event subscription. + // One or more tags to be assigned to the event subscription. Tags []*Tag `type:"list"` } @@ -5691,17 +5829,16 @@ type CreateReplicationInstanceInput struct { // instance. AllocatedStorage *int64 `type:"integer"` - // Indicates that minor engine upgrades will be applied automatically to the - // replication instance during the maintenance window. + // Indicates whether minor engine upgrades will be applied automatically to + // the replication instance during the maintenance window. This parameter defaults + // to true. // // Default: true AutoMinorVersionUpgrade *bool `type:"boolean"` - // The EC2 Availability Zone that the replication instance will be created in. - // - // Default: A random, system-chosen Availability Zone in the endpoint's region. - // - // Example: us-east-1d + // The AWS Availability Zone where the replication instance will be created. + // The default value is a random, system-chosen Availability Zone in the endpoint's + // AWS Region, for example: us-east-1d AvailabilityZone *string `type:"string"` // A list of DNS name servers supported for the replication instance. @@ -5710,15 +5847,19 @@ type CreateReplicationInstanceInput struct { // The engine version number of the replication instance. EngineVersion *string `type:"string"` - // The AWS KMS key identifier that is used to encrypt the content on the replication - // instance. If you don't specify a value for the KmsKeyId parameter, then AWS - // DMS uses your default encryption key. AWS KMS creates the default encryption - // key for your AWS account. Your AWS account has a different default encryption - // key for each AWS Region. + // An AWS KMS key identifier that is used to encrypt the data on the replication + // instance. + // + // If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses + // your default encryption key. + // + // AWS KMS creates the default encryption key for your AWS account. Your AWS + // account has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` - // Specifies if the replication instance is a Multi-AZ deployment. You cannot - // set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. + // Specifies whether the replication instance is a Multi-AZ deployment. You + // cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set + // to true. MultiAZ *bool `type:"boolean"` // The weekly time range during which system maintenance can occur, in Universal @@ -5727,7 +5868,7 @@ type CreateReplicationInstanceInput struct { // Format: ddd:hh24:mi-ddd:hh24:mi // // Default: A 30-minute window selected at random from an 8-hour block of time - // per region, occurring on a random day of the week. + // per AWS Region, occurring on a random day of the week. // // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun // @@ -5767,7 +5908,7 @@ type CreateReplicationInstanceInput struct { // A subnet group to associate with the replication instance. ReplicationSubnetGroupIdentifier *string `type:"string"` - // Tags to be associated with the replication instance. + // One or more tags to be assigned to the replication instance. Tags []*Tag `type:"list"` // Specifies the VPC security group to be used with the replication instance. @@ -5928,12 +6069,12 @@ type CreateReplicationSubnetGroupInput struct { // ReplicationSubnetGroupIdentifier is a required field ReplicationSubnetGroupIdentifier *string `type:"string" required:"true"` - // The EC2 subnet IDs for the subnet group. + // One or more subnet IDs to be assigned to the subnet group. // // SubnetIds is a required field SubnetIds []*string `type:"list" required:"true"` - // The tag to be assigned to the subnet group. + // One or more tags to be assigned to the subnet group. Tags []*Tag `type:"list"` } @@ -6027,6 +6168,12 @@ type CreateReplicationTaskInput struct { // Checkpoint Example: --cdc-start-position "checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93" // // LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373” + // + // When you use this task setting with a source PostgreSQL database, a logical + // replication slot should already be created and associated with the source + // endpoint. You can verify this by setting the slotName extra connection attribute + // to the name of this logical replication slot. For more information, see Extra + // Connection Attributes When Using PostgreSQL as a Source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib). CdcStartPosition *string `type:"string"` // Indicates the start time for a change data capture (CDC) operation. Use either @@ -6045,17 +6192,17 @@ type CreateReplicationTaskInput struct { // “ CdcStopPosition *string `type:"string"` - // The migration type. + // The migration type. Valid values: full-load | cdc | full-load-and-cdc // // MigrationType is a required field MigrationType *string `type:"string" required:"true" enum:"MigrationTypeValue"` - // The Amazon Resource Name (ARN) of the replication instance. + // The Amazon Resource Name (ARN) of a replication instance. // // ReplicationInstanceArn is a required field ReplicationInstanceArn *string `type:"string" required:"true"` - // The replication task identifier. + // An identifier for the replication task. // // Constraints: // @@ -6068,30 +6215,27 @@ type CreateReplicationTaskInput struct { // ReplicationTaskIdentifier is a required field ReplicationTaskIdentifier *string `type:"string" required:"true"` - // Settings for the task, such as target metadata settings. For a complete list - // of task settings, see Task Settings for AWS Database Migration Service Tasks - // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html) + // Overall settings for the task, in JSON format. For more information, see + // Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html) // in the AWS Database Migration User Guide. ReplicationTaskSettings *string `type:"string"` - // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + // An Amazon Resource Name (ARN) that uniquely identifies the source endpoint. // // SourceEndpointArn is a required field SourceEndpointArn *string `type:"string" required:"true"` - // When using the AWS CLI or boto3, provide the path of the JSON file that contains - // the table mappings. Precede the path with "file://". When working with the - // DMS API, provide the JSON as the parameter value. - // - // For example, --table-mappings file://mappingfile.json + // The table mappings for the task, in JSON format. For more information, see + // Table Mapping (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.html) + // in the AWS Database Migration User Guide. // // TableMappings is a required field TableMappings *string `type:"string" required:"true"` - // Tags to be added to the replication instance. + // One or more tags to be assigned to the replication task. Tags []*Tag `type:"list"` - // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + // An Amazon Resource Name (ARN) that uniquely identifies the target endpoint. // // TargetEndpointArn is a required field TargetEndpointArn *string `type:"string" required:"true"` @@ -6285,6 +6429,81 @@ func (s *DeleteCertificateOutput) SetCertificate(v *Certificate) *DeleteCertific return s } +type DeleteConnectionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + // + // EndpointArn is a required field + EndpointArn *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the replication instance. + // + // ReplicationInstanceArn is a required field + ReplicationInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConnectionInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + if s.ReplicationInstanceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationInstanceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointArn sets the EndpointArn field's value. +func (s *DeleteConnectionInput) SetEndpointArn(v string) *DeleteConnectionInput { + s.EndpointArn = &v + return s +} + +// SetReplicationInstanceArn sets the ReplicationInstanceArn field's value. +func (s *DeleteConnectionInput) SetReplicationInstanceArn(v string) *DeleteConnectionInput { + s.ReplicationInstanceArn = &v + return s +} + +type DeleteConnectionOutput struct { + _ struct{} `type:"structure"` + + // The connection that is being deleted. + Connection *Connection `type:"structure"` +} + +// String returns the string representation +func (s DeleteConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConnectionOutput) GoString() string { + return s.String() +} + +// SetConnection sets the Connection field's value. +func (s *DeleteConnectionOutput) SetConnection(v *Connection) *DeleteConnectionOutput { + s.Connection = v + return s +} + type DeleteEndpointInput struct { _ struct{} `type:"structure"` @@ -6600,6 +6819,18 @@ type DescribeAccountAttributesOutput struct { // Account quota information. AccountQuotas []*AccountQuota `type:"list"` + + // A unique AWS DMS identifier for an account in a particular AWS Region. The + // value of this identifier has the following format: c99999999999. DMS uses + // this identifier to name artifacts. For example, DMS uses this identifier + // to name the default Amazon S3 bucket for storing task assessment reports + // in a given AWS Region. The format of this S3 bucket name is the following: + // dms-AccountNumber-UniqueAccountIdentifier. Here is an example name for this + // default S3 bucket: dms-111122223333-c44445555666. + // + // AWS DMS supports the UniqueAccountIdentifier parameter in versions 3.1.4 + // and later. + UniqueAccountIdentifier *string `type:"string"` } // String returns the string representation @@ -6618,6 +6849,12 @@ func (s *DescribeAccountAttributesOutput) SetAccountQuotas(v []*AccountQuota) *D return s } +// SetUniqueAccountIdentifier sets the UniqueAccountIdentifier field's value. +func (s *DescribeAccountAttributesOutput) SetUniqueAccountIdentifier(v string) *DescribeAccountAttributesOutput { + s.UniqueAccountIdentifier = &v + return s +} + type DescribeCertificatesInput struct { _ struct{} `type:"structure"` @@ -6626,7 +6863,7 @@ type DescribeCertificatesInput struct { // An optional pagination token provided by a previous request. If this parameter // is specified, the response includes only records beyond the marker, up to - // the value specified by MaxRecords. + // the vlue specified by MaxRecords. Marker *string `type:"string"` // The maximum number of records to include in the response. If more records @@ -6902,7 +7139,7 @@ type DescribeEndpointTypesOutput struct { // the value specified by MaxRecords. Marker *string `type:"string"` - // The type of endpoints that are supported. + // The types of endpoints that are supported. SupportedEndpointTypes []*SupportedEndpointType `type:"list"` } @@ -7041,7 +7278,7 @@ type DescribeEventCategoriesInput struct { // The type of AWS DMS resource that generates events. // - // Valid values: replication-instance | migration-task + // Valid values: replication-instance | replication-task SourceType *string `type:"string"` } @@ -7231,7 +7468,7 @@ type DescribeEventsInput struct { // The end time for the events to be listed. EndTime *time.Time `type:"timestamp"` - // A list of event categories for a source type that you want to subscribe to. + // A list of event categories for the source type that you've chosen. EventCategories []*string `type:"list"` // Filters applied to the action. @@ -7251,14 +7488,12 @@ type DescribeEventsInput struct { // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The identifier of the event source. An identifier must begin with a letter - // and must contain only ASCII letters, digits, and hyphens. It cannot end with - // a hyphen or contain two consecutive hyphens. + // The identifier of an event source. SourceIdentifier *string `type:"string"` // The type of AWS DMS resource that generates events. // - // Valid values: replication-instance | migration-task + // Valid values: replication-instance | replication-task SourceType *string `type:"string" enum:"SourceType"` // The start time for the events to be listed. @@ -7476,7 +7711,7 @@ type DescribePendingMaintenanceActionsInput struct { // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The ARN of the replication instance. + // The Amazon Resource Name (ARN) of the replication instance. ReplicationInstanceArn *string `type:"string"` } @@ -8063,9 +8298,9 @@ type DescribeReplicationTasksInput struct { // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // Set this flag to avoid returning setting information. Use this to reduce - // overhead when settings are too large. Choose TRUE to use this flag, otherwise - // choose FALSE (default). + // An option to set to avoid returning information about settings. Use this + // to reduce overhead when setting information is too large. To use this option, + // choose true; otherwise, choose false (the default). WithoutSettings *bool `type:"boolean"` } @@ -8460,7 +8695,7 @@ func (s *DynamoDbSettings) SetServiceAccessRoleArn(v string) *DynamoDbSettings { type ElasticsearchSettings struct { _ struct{} `type:"structure"` - // The endpoint for the ElasticSearch cluster. + // The endpoint for the Elasticsearch cluster. // // EndpointUri is a required field EndpointUri *string `type:"string" required:"true"` @@ -8540,21 +8775,21 @@ type Endpoint struct { // The settings in JSON format for the DMS transfer type of source endpoint. // - // Possible attributes include the following: + // Possible settings include the following: // - // * serviceAccessRoleArn - The IAM role that has permission to access the + // * ServiceAccessRoleArn - The IAM role that has permission to access the // Amazon S3 bucket. // - // * bucketName - The name of the S3 bucket to use. + // * BucketName - The name of the S3 bucket to use. // - // * compressionType - An optional parameter to use GZIP to compress the + // * CompressionType - An optional parameter to use GZIP to compress the // target files. To use GZIP, set this value to NONE (the default). To keep // the files uncompressed, don't use this value. // - // Shorthand syntax for these attributes is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string + // Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string // - // JSON syntax for these attributes is as follows: { "ServiceAccessRoleArn": - // "string", "BucketName": "string", "CompressionType": "none"|"gzip" } + // JSON syntax for these settings is as follows: { "ServiceAccessRoleArn": "string", + // "BucketName": "string", "CompressionType": "none"|"gzip" } DmsTransferSettings *DmsTransferSettings `type:"structure"` // The settings for the target DynamoDB database. For more information, see @@ -8573,16 +8808,16 @@ type Endpoint struct { // hyphen or contain two consecutive hyphens. EndpointIdentifier *string `type:"string"` - // The type of endpoint. + // The type of endpoint. Valid values are source and target. EndpointType *string `type:"string" enum:"ReplicationEndpointTypeValue"` // The expanded name for the engine name. For example, if the EngineName parameter // is "aurora," this value would be "Amazon Aurora MySQL." EngineDisplayName *string `type:"string"` - // The database engine name. Valid values, depending on the EndPointType, include + // The database engine name. Valid values, depending on the EndpointType, include // mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, - // db2, azuredb, sybase, sybase, dynamodb, mongodb, and sqlserver. + // db2, azuredb, sybase, dynamodb, mongodb, and sqlserver. EngineName *string `type:"string"` // Value returned by a call to CreateEndpoint that can be used for cross-account @@ -8600,11 +8835,14 @@ type Endpoint struct { // see the KinesisSettings structure. KinesisSettings *KinesisSettings `type:"structure"` - // The AWS KMS key identifier that is used to encrypt the content on the replication - // instance. If you don't specify a value for the KmsKeyId parameter, then AWS - // DMS uses your default encryption key. AWS KMS creates the default encryption - // key for your AWS account. Your AWS account has a different default encryption - // key for each AWS Region. + // An AWS KMS key identifier that is used to encrypt the connection parameters + // for the endpoint. + // + // If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses + // your default encryption key. + // + // AWS KMS creates the default encryption key for your AWS account. Your AWS + // account has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` // The settings for the MongoDB source endpoint. For more information, see the @@ -8614,7 +8852,7 @@ type Endpoint struct { // The port value used to access the endpoint. Port *int64 `type:"integer"` - // Settings for the Amazon Redshift endpoint + // Settings for the Amazon Redshift endpoint. RedshiftSettings *RedshiftSettings `type:"structure"` // The settings for the S3 target endpoint. For more information, see the S3Settings @@ -8627,11 +8865,7 @@ type Endpoint struct { // The Amazon Resource Name (ARN) used by the service access IAM role. ServiceAccessRoleArn *string `type:"string"` - // The SSL mode used to connect to the endpoint. - // - // SSL mode can be one of four values: none, require, verify-ca, verify-full. - // - // The default value is none. + // The SSL mode used to connect to the endpoint. The default value is none. SslMode *string `type:"string" enum:"DmsSslModeValue"` // The status of the endpoint. @@ -8807,16 +9041,12 @@ type Event struct { // The event message. Message *string `type:"string"` - // The identifier of the event source. An identifier must begin with a letter - // and must contain only ASCII letters, digits, and hyphens; it cannot end with - // a hyphen or contain two consecutive hyphens. - // - // Constraints:replication instance, endpoint, migration task + // The identifier of an event source. SourceIdentifier *string `type:"string"` // The type of AWS DMS resource that generates events. // - // Valid values: replication-instance | endpoint | migration-task + // Valid values: replication-instance | endpoint | replication-task SourceType *string `type:"string" enum:"SourceType"` } @@ -8863,13 +9093,13 @@ func (s *Event) SetSourceType(v string) *Event { type EventCategoryGroup struct { _ struct{} `type:"structure"` - // A list of event categories for a SourceType that you want to subscribe to. + // A list of event categories from a source type that you've chosen. EventCategories []*string `type:"list"` // The type of AWS DMS resource that generates events. // // Valid values: replication-instance | replication-server | security-group - // | migration-task + // | replication-task SourceType *string `type:"string"` } @@ -8919,7 +9149,7 @@ type EventSubscription struct { // The type of AWS DMS resource that generates events. // // Valid values: replication-instance | replication-server | security-group - // | migration-task + // | replication-task SourceType *string `type:"string"` // The status of the AWS DMS event notification subscription. @@ -9057,16 +9287,17 @@ func (s *Filter) SetValues(v []*string) *Filter { type ImportCertificateInput struct { _ struct{} `type:"structure"` - // The customer-assigned name of the certificate. Valid characters are A-z and - // 0-9. + // A customer-assigned name for the certificate. Identifiers must begin with + // a letter; must contain only ASCII letters, digits, and hyphens; and must + // not end with a hyphen or contain two consecutive hyphens. // // CertificateIdentifier is a required field CertificateIdentifier *string `type:"string" required:"true"` - // The contents of the .pem X.509 certificate file for the certificate. + // The contents of a .pem file, which contains an X.509 certificate. CertificatePem *string `type:"string"` - // The location of the imported Oracle Wallet certificate for use with SSL. + // The location of an imported Oracle Wallet certificate for use with SSL. // // CertificateWallet is automatically base64 encoded/decoded by the SDK. CertificateWallet []byte `type:"blob"` @@ -9302,12 +9533,12 @@ type ModifyEndpointInput struct { // hyphen or contain two consecutive hyphens. EndpointIdentifier *string `type:"string"` - // The type of endpoint. + // The type of endpoint. Valid values are source and target. EndpointType *string `type:"string" enum:"ReplicationEndpointTypeValue"` - // The type of engine for the endpoint. Valid values, depending on the EndPointType, + // The type of engine for the endpoint. Valid values, depending on the EndpointType, // include mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, - // s3, db2, azuredb, sybase, sybase, dynamodb, mongodb, and sqlserver. + // s3, db2, azuredb, sybase, dynamodb, mongodb, and sqlserver. EngineName *string `type:"string"` // The external table definition. @@ -9319,13 +9550,13 @@ type ModifyEndpointInput struct { // Settings in JSON format for the target Amazon Kinesis Data Streams endpoint. // For more information about the available settings, see Using Object Mapping - // to Migrate Data to a Kinesis Data Stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping - // ) in the AWS Database Migration User Guide. + // to Migrate Data to a Kinesis Data Stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping) + // in the AWS Database Migration User Guide. KinesisSettings *KinesisSettings `type:"structure"` // Settings in JSON format for the source MongoDB endpoint. For more information // about the available settings, see the configuration properties section in - // Using MongoDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html) + // Using MongoDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html) // in the AWS Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` @@ -9350,11 +9581,7 @@ type ModifyEndpointInput struct { // to modify the endpoint. ServiceAccessRoleArn *string `type:"string"` - // The SSL mode to be used. - // - // SSL mode can be one of four values: none, require, verify-ca, verify-full. - // - // The default value is none. + // The SSL mode used to connect to the endpoint. The default value is none. SslMode *string `type:"string" enum:"DmsSslModeValue"` // The user name to be used to login to the endpoint database. @@ -9561,7 +9788,7 @@ type ModifyEventSubscriptionInput struct { // The type of AWS DMS resource that generates the events you want to subscribe // to. // - // Valid values: replication-instance | migration-task + // Valid values: replication-instance | replication-task SourceType *string `type:"string"` // The name of the AWS DMS event notification subscription to be modified. @@ -9654,12 +9881,12 @@ type ModifyReplicationInstanceInput struct { AllocatedStorage *int64 `type:"integer"` // Indicates that major version upgrades are allowed. Changing this parameter - // does not result in an outage and the change is asynchronously applied as + // does not result in an outage, and the change is asynchronously applied as // soon as possible. // - // Constraints: This parameter must be set to true when specifying a value for - // the EngineVersion parameter that is a different major version than the replication - // instance's current version. + // This parameter must be set to true when specifying a value for the EngineVersion + // parameter that is a different major version than the replication instance's + // current version. AllowMajorVersionUpgrade *bool `type:"boolean"` // Indicates whether the changes should be applied immediately or during the @@ -9678,8 +9905,9 @@ type ModifyReplicationInstanceInput struct { // The engine version number of the replication instance. EngineVersion *string `type:"string"` - // Specifies if the replication instance is a Multi-AZ deployment. You cannot - // set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. + // Specifies whether the replication instance is a Multi-AZ deployment. You + // cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set + // to true. MultiAZ *bool `type:"boolean"` // The weekly time range (in UTC) during which system maintenance can occur, @@ -9834,7 +10062,7 @@ func (s *ModifyReplicationInstanceOutput) SetReplicationInstance(v *ReplicationI type ModifyReplicationSubnetGroupInput struct { _ struct{} `type:"structure"` - // The description of the replication instance subnet group. + // A description for the replication instance subnet group. ReplicationSubnetGroupDescription *string `type:"string"` // The name of the replication instance subnet group. @@ -9929,6 +10157,12 @@ type ModifyReplicationTaskInput struct { // Checkpoint Example: --cdc-start-position "checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93" // // LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373” + // + // When you use this task setting with a source PostgreSQL database, a logical + // replication slot should already be created and associated with the source + // endpoint. You can verify this by setting the slotName extra connection attribute + // to the name of this logical replication slot. For more information, see Extra + // Connection Attributes When Using PostgreSQL as a Source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib). CdcStartPosition *string `type:"string"` // Indicates the start time for a change data capture (CDC) operation. Use either @@ -9947,9 +10181,7 @@ type ModifyReplicationTaskInput struct { // “ CdcStopPosition *string `type:"string"` - // The migration type. - // - // Valid values: full-load | cdc | full-load-and-cdc + // The migration type. Valid values: full-load | cdc | full-load-and-cdc MigrationType *string `type:"string" enum:"MigrationTypeValue"` // The Amazon Resource Name (ARN) of the replication task. @@ -9972,10 +10204,9 @@ type ModifyReplicationTaskInput struct { ReplicationTaskSettings *string `type:"string"` // When using the AWS CLI or boto3, provide the path of the JSON file that contains - // the table mappings. Precede the path with "file://". When working with the - // DMS API, provide the JSON as the parameter value. - // - // For example, --table-mappings file://mappingfile.json + // the table mappings. Precede the path with file://. When working with the + // DMS API, provide the JSON as the parameter value, for example: --table-mappings + // file://mappingfile.json TableMappings *string `type:"string"` } @@ -10080,11 +10311,11 @@ type MongoDbSettings struct { // // Valid values: DEFAULT, MONGODB_CR, SCRAM_SHA_1 // - // DEFAULT – For MongoDB version 2.x, use MONGODB_CR. For MongoDB version 3.x, - // use SCRAM_SHA_1. This attribute is not used when authType=No. + // DEFAULT – For MongoDB version 2.x, use MONGODB_CR. For MongoDB version + // 3.x, use SCRAM_SHA_1. This setting is not used when authType=No. AuthMechanism *string `type:"string" enum:"AuthMechanismValue"` - // The MongoDB database name. This attribute is not used when authType=NO. + // The MongoDB database name. This setting is not used when authType=NO. // // The default is admin. AuthSource *string `type:"string"` @@ -10101,13 +10332,12 @@ type MongoDbSettings struct { DatabaseName *string `type:"string"` // Indicates the number of documents to preview to determine the document organization. - // Use this attribute when NestingLevel is set to ONE. + // Use this setting when NestingLevel is set to ONE. // // Must be a positive value greater than 0. Default value is 1000. DocsToInvestigate *string `type:"string"` - // Specifies the document ID. Use this attribute when NestingLevel is set to - // NONE. + // Specifies the document ID. Use this setting when NestingLevel is set to NONE. // // Default value is false. ExtractDocId *string `type:"string"` @@ -10225,7 +10455,7 @@ func (s *MongoDbSettings) SetUsername(v string) *MongoDbSettings { type OrderableReplicationInstance struct { _ struct{} `type:"structure"` - // List of availability zones for this replication instance. + // List of Availability Zones for this replication instance. AvailabilityZones []*string `type:"list"` // The default amount of storage (in gigabytes) that is allocated for the replication @@ -10247,6 +10477,12 @@ type OrderableReplicationInstance struct { // replication instance. MinAllocatedStorage *int64 `type:"integer"` + // The value returned when the specified EngineVersion of the replication instance + // is in Beta or test mode. This indicates some features might not work as expected. + // + // AWS DMS supports the ReleaseStatus parameter in versions 3.1.4 and later. + ReleaseStatus *string `type:"string" enum:"ReleaseStatusValues"` + // The compute and memory capacity of the replication instance. // // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large @@ -10303,6 +10539,12 @@ func (s *OrderableReplicationInstance) SetMinAllocatedStorage(v int64) *Orderabl return s } +// SetReleaseStatus sets the ReleaseStatus field's value. +func (s *OrderableReplicationInstance) SetReleaseStatus(v string) *OrderableReplicationInstance { + s.ReleaseStatus = &v + return s +} + // SetReplicationInstanceClass sets the ReplicationInstanceClass field's value. func (s *OrderableReplicationInstance) SetReplicationInstanceClass(v string) *OrderableReplicationInstance { s.ReplicationInstanceClass = &v @@ -10467,65 +10709,69 @@ func (s *RebootReplicationInstanceOutput) SetReplicationInstance(v *ReplicationI type RedshiftSettings struct { _ struct{} `type:"structure"` - // Allows any date format, including invalid formats such as 00/00/00 00:00:00, - // to be loaded without generating an error. You can choose TRUE or FALSE (default). + // A value that indicates to allow any date format, including invalid formats + // such as 00/00/00 00:00:00, to be loaded without generating an error. You + // can choose true or false (the default). // // This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE - // with the DATEFORMAT parameter. If the date format for the data does not match + // with the DATEFORMAT parameter. If the date format for the data doesn't match // the DATEFORMAT specification, Amazon Redshift inserts a NULL value into that // field. AcceptAnyDate *bool `type:"boolean"` - // Code to run after connecting. This should be the code, not a filename. + // Code to run after connecting. This parameter should contain the code itself, + // not the name of a file containing the code. AfterConnectScript *string `type:"string"` - // The location where the CSV files are stored before being uploaded to the - // S3 bucket. + // The location where the comma-separated value (.csv) files are stored before + // being uploaded to the S3 bucket. BucketFolder *string `type:"string"` // The name of the S3 bucket you want to use BucketName *string `type:"string"` - // Sets the amount of time to wait (in milliseconds) before timing out, beginning - // from when you initially establish a connection. + // A value that sets the amount of time to wait (in milliseconds) before timing + // out, beginning from when you initially establish a connection. ConnectionTimeout *int64 `type:"integer"` - // The name of the Amazon Redshift data warehouse (service) you are working + // The name of the Amazon Redshift data warehouse (service) that you are working // with. DatabaseName *string `type:"string"` - // The date format you are using. Valid values are auto (case-sensitive), your - // date format string enclosed in quotes, or NULL. If this is left unset (NULL), - // it defaults to a format of 'YYYY-MM-DD'. Using auto recognizes most strings, - // even some that are not supported when you use a date format string. + // The date format that you are using. Valid values are auto (case-sensitive), + // your date format string enclosed in quotes, or NULL. If this parameter is + // left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using auto recognizes + // most strings, even some that aren't supported when you use a date format + // string. // // If your date and time values use formats different from each other, set this // to auto. DateFormat *string `type:"string"` - // Specifies whether AWS DMS should migrate empty CHAR and VARCHAR fields as - // NULL. A value of TRUE sets empty CHAR and VARCHAR fields to null. The default - // is FALSE. + // A value that specifies whether AWS DMS should migrate empty CHAR and VARCHAR + // fields as NULL. A value of true sets empty CHAR and VARCHAR fields to null. + // The default is false. EmptyAsNull *bool `type:"boolean"` - // The type of server side encryption you want to use for your data. This is - // part of the endpoint settings or the extra connections attributes for Amazon - // S3. You can choose either SSE_S3 (default) or SSE_KMS. To use SSE_S3, create - // an IAM role with a policy that allows "arn:aws:s3:::*" to use the following - // actions: "s3:PutObject", "s3:ListBucket". + // The type of server-side encryption that you want to use for your data. This + // encryption type is part of the endpoint settings or the extra connections + // attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS. + // To use SSE_S3, create an AWS Identity and Access Management (IAM) role with + // a policy that allows "arn:aws:s3:::*" to use the following actions: "s3:PutObject", + // "s3:ListBucket" EncryptionMode *string `type:"string" enum:"EncryptionModeValue"` - // Specifies the number of threads used to upload a single file. This accepts - // a value between 1 and 64. It defaults to 10. + // The number of threads used to upload a single file. This parameter accepts + // a value from 1 through 64. It defaults to 10. FileTransferUploadStreams *int64 `type:"integer"` - // Sets the amount of time to wait (in milliseconds) before timing out, beginning + // The amount of time to wait (in milliseconds) before timing out, beginning // from when you begin loading. LoadTimeout *int64 `type:"integer"` - // Specifies the maximum size (in KB) of any CSV file used to transfer data - // to Amazon Redshift. This accepts a value between 1 and 1048576. It defaults - // to 32768 KB (32 MB). + // The maximum size (in KB) of any .csv file used to transfer data to Amazon + // Redshift. This accepts a value from 1 through 1,048,576. It defaults to 32,768 + // KB (32 MB). MaxFileSize *int64 `type:"integer"` // The password for the user named in the username property. @@ -10534,54 +10780,56 @@ type RedshiftSettings struct { // The port number for Amazon Redshift. The default value is 5439. Port *int64 `type:"integer"` - // Removes surrounding quotation marks from strings in the incoming data. All - // characters within the quotation marks, including delimiters, are retained. - // Choose TRUE to remove quotation marks. The default is FALSE. + // A value that specifies to remove surrounding quotation marks from strings + // in the incoming data. All characters within the quotation marks, including + // delimiters, are retained. Choose true to remove quotation marks. The default + // is false. RemoveQuotes *bool `type:"boolean"` - // Replaces invalid characters specified in ReplaceInvalidChars, substituting - // the specified value instead. The default is "?". + // A value that specifies to replaces the invalid characters specified in ReplaceInvalidChars, + // substituting the specified characters instead. The default is "?". ReplaceChars *string `type:"string"` - // A list of chars you want to replace. Use with ReplaceChars. + // A list of characters that you want to replace. Use with ReplaceChars. ReplaceInvalidChars *string `type:"string"` // The name of the Amazon Redshift cluster you are using. ServerName *string `type:"string"` - // If you are using SSE_KMS for the EncryptionMode, provide the KMS Key ID. - // The key you use needs an attached policy that enables IAM user permissions - // and allows use of the key. + // The AWS KMS key ID. If you are using SSE_KMS for the EncryptionMode, provide + // this key ID. The key that you use needs an attached policy that enables IAM + // user permissions and allows use of the key. ServerSideEncryptionKmsKeyId *string `type:"string"` - // The ARN of the role that has access to the Redshift service. + // The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon + // Redshift service. ServiceAccessRoleArn *string `type:"string"` - // The time format you want to use. Valid values are auto (case-sensitive), + // The time format that you want to use. Valid values are auto (case-sensitive), // 'timeformat_string', 'epochsecs', or 'epochmillisecs'. It defaults to 10. - // Using auto recognizes most strings, even some that are not supported when + // Using auto recognizes most strings, even some that aren't supported when // you use a time format string. // // If your date and time values use formats different from each other, set this - // to auto. + // parameter to auto. TimeFormat *string `type:"string"` - // Removes the trailing white space characters from a VARCHAR string. This parameter - // applies only to columns with a VARCHAR data type. Choose TRUE to remove unneeded - // white space. The default is FALSE. + // A value that specifies to remove the trailing white space characters from + // a VARCHAR string. This parameter applies only to columns with a VARCHAR data + // type. Choose true to remove unneeded white space. The default is false. TrimBlanks *bool `type:"boolean"` - // Truncates data in columns to the appropriate number of characters, so that - // it fits in the column. Applies only to columns with a VARCHAR or CHAR data - // type, and rows with a size of 4 MB or less. Choose TRUE to truncate data. - // The default is FALSE. + // A value that specifies to truncate data in columns to the appropriate number + // of characters, so that the data fits in the column. This parameter applies + // only to columns with a VARCHAR or CHAR data type, and rows with a size of + // 4 MB or less. Choose true to truncate data. The default is false. TruncateColumns *bool `type:"boolean"` // An Amazon Redshift user name for a registered user. Username *string `type:"string"` - // The size of the write buffer to use in rows. Valid values range from 1 to - // 2048. Defaults to 1024. Use this setting to tune performance. + // The size of the write buffer to use in rows. Valid values range from 1 through + // 2,048. The default is 1,024. Use this setting to tune performance. WriteBufferSize *int64 `type:"integer"` } @@ -10969,11 +11217,12 @@ func (s *ReloadTablesOutput) SetReplicationTaskArn(v string) *ReloadTablesOutput return s } +// Removes one or more tags from an AWS DMS resource. type RemoveTagsFromResourceInput struct { _ struct{} `type:"structure"` - // >The Amazon Resource Name (ARN) of the AWS DMS resource the tag is to be - // removed from. + // An AWS DMS resource from which you want to remove tag(s). The value for this + // parameter is an Amazon Resource Name (ARN). // // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` @@ -11063,15 +11312,19 @@ type ReplicationInstance struct { // The time the replication instance was created. InstanceCreateTime *time.Time `type:"timestamp"` - // The AWS KMS key identifier that is used to encrypt the content on the replication - // instance. If you don't specify a value for the KmsKeyId parameter, then AWS - // DMS uses your default encryption key. AWS KMS creates the default encryption - // key for your AWS account. Your AWS account has a different default encryption - // key for each AWS Region. + // An AWS KMS key identifier that is used to encrypt the data on the replication + // instance. + // + // If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses + // your default encryption key. + // + // AWS KMS creates the default encryption key for your AWS account. Your AWS + // account has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` - // Specifies if the replication instance is a Multi-AZ deployment. You cannot - // set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. + // Specifies whether the replication instance is a Multi-AZ deployment. You + // cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set + // to true. MultiAZ *bool `type:"boolean"` // The pending modification values. @@ -11113,7 +11366,7 @@ type ReplicationInstance struct { // Deprecated: ReplicationInstancePrivateIpAddress has been deprecated ReplicationInstancePrivateIpAddress *string `deprecated:"true" type:"string"` - // The private IP address of the replication instance. + // One or more private IP addresses for the replication instance. ReplicationInstancePrivateIpAddresses []*string `type:"list"` // The public IP address of the replication instance. @@ -11121,7 +11374,7 @@ type ReplicationInstance struct { // Deprecated: ReplicationInstancePublicIpAddress has been deprecated ReplicationInstancePublicIpAddress *string `deprecated:"true" type:"string"` - // The public IP address of the replication instance. + // One or more public IP addresses for the replication instance. ReplicationInstancePublicIpAddresses []*string `type:"list"` // The status of the replication instance. @@ -11337,8 +11590,9 @@ type ReplicationPendingModifiedValues struct { // The engine version number of the replication instance. EngineVersion *string `type:"string"` - // Specifies if the replication instance is a Multi-AZ deployment. You cannot - // set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. + // Specifies whether the replication instance is a Multi-AZ deployment. You + // cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set + // to true. MultiAZ *bool `type:"boolean"` // The compute and memory capacity of the replication instance. @@ -11385,7 +11639,7 @@ func (s *ReplicationPendingModifiedValues) SetReplicationInstanceClass(v string) type ReplicationSubnetGroup struct { _ struct{} `type:"structure"` - // The description of the replication subnet group. + // A description for the replication subnet group. ReplicationSubnetGroupDescription *string `type:"string"` // The identifier of the replication instance subnet group. @@ -11445,8 +11699,8 @@ type ReplicationTask struct { _ struct{} `type:"structure"` // Indicates when you want a change data capture (CDC) operation to start. Use - // either CdcStartPosition or CdcStartTime to specify when you want a CDC operation - // to start. Specifying both values results in an error. + // either CdcStartPosition or CdcStartTime to specify when you want the CDC + // operation to start. Specifying both values results in an error. // // The value can be in date, checkpoint, or LSN/SCN format. // @@ -11720,9 +11974,26 @@ type ReplicationTaskStats struct { // The elapsed time of the task, in milliseconds. ElapsedTimeMillis *int64 `type:"long"` + // The date the replication task was started either with a fresh start or a + // target reload. + FreshStartDate *time.Time `type:"timestamp"` + + // The date the replication task full load was completed. + FullLoadFinishDate *time.Time `type:"timestamp"` + // The percent complete for the full load migration task. FullLoadProgressPercent *int64 `type:"integer"` + // The date the the replication task full load was started. + FullLoadStartDate *time.Time `type:"timestamp"` + + // The date the replication task was started either with a fresh start or a + // resume. For more information, see StartReplicationTaskType (https://docs.aws.amazon.com/dms/latest/APIReference/API_StartReplicationTask.html#DMS-StartReplicationTask-request-StartReplicationTaskType). + StartDate *time.Time `type:"timestamp"` + + // The date the replication task was stopped. + StopDate *time.Time `type:"timestamp"` + // The number of errors that have occurred during this task. TablesErrored *int64 `type:"integer"` @@ -11752,12 +12023,42 @@ func (s *ReplicationTaskStats) SetElapsedTimeMillis(v int64) *ReplicationTaskSta return s } +// SetFreshStartDate sets the FreshStartDate field's value. +func (s *ReplicationTaskStats) SetFreshStartDate(v time.Time) *ReplicationTaskStats { + s.FreshStartDate = &v + return s +} + +// SetFullLoadFinishDate sets the FullLoadFinishDate field's value. +func (s *ReplicationTaskStats) SetFullLoadFinishDate(v time.Time) *ReplicationTaskStats { + s.FullLoadFinishDate = &v + return s +} + // SetFullLoadProgressPercent sets the FullLoadProgressPercent field's value. func (s *ReplicationTaskStats) SetFullLoadProgressPercent(v int64) *ReplicationTaskStats { s.FullLoadProgressPercent = &v return s } +// SetFullLoadStartDate sets the FullLoadStartDate field's value. +func (s *ReplicationTaskStats) SetFullLoadStartDate(v time.Time) *ReplicationTaskStats { + s.FullLoadStartDate = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *ReplicationTaskStats) SetStartDate(v time.Time) *ReplicationTaskStats { + s.StartDate = &v + return s +} + +// SetStopDate sets the StopDate field's value. +func (s *ReplicationTaskStats) SetStopDate(v time.Time) *ReplicationTaskStats { + s.StopDate = &v + return s +} + // SetTablesErrored sets the TablesErrored field's value. func (s *ReplicationTaskStats) SetTablesErrored(v int64) *ReplicationTaskStats { s.TablesErrored = &v @@ -11789,8 +12090,8 @@ type ResourcePendingMaintenanceActions struct { PendingMaintenanceActionDetails []*PendingMaintenanceAction `type:"list"` // The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance - // action applies to. For information about creating an ARN, see Constructing - // an Amazon Resource Name (ARN) (https://docs.aws.amazon.com/dms/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN) + // action applies to. For information about creating an ARN, see Constructing + // an Amazon Resource Name (ARN) for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Introduction.AWS.ARN.html) // in the DMS documentation. ResourceIdentifier *string `type:"string"` } @@ -11822,24 +12123,37 @@ type S3Settings struct { _ struct{} `type:"structure"` // An optional parameter to set a folder name in the S3 bucket. If provided, - // tables are created in the path ///. - // If this parameter is not specified, then the path used is //. + // tables are created in the path bucketFolder/schema_name/table_name/. If this + // parameter is not specified, then the path used is schema_name/table_name/. BucketFolder *string `type:"string"` // The name of the S3 bucket. BucketName *string `type:"string"` - // Option to write only INSERT operations to the comma-separated value (CSV) - // output files. By default, the first field in a CSV record contains the letter - // I (insert), U (update) or D (delete) to indicate whether the row was inserted, - // updated, or deleted at the source database. If cdcInsertsOnly is set to true, - // then only INSERTs are recorded in the CSV file, without the I annotation - // on each line. Valid values are TRUE and FALSE. + // A value that enables a change data capture (CDC) load to write only INSERT + // operations to .csv or columnar storage (.parquet) output files. By default + // (the false setting), the first field in a .csv or .parquet record contains + // the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether + // the row was inserted, updated, or deleted at the source database for a CDC + // load to the target. + // + // If CdcInsertsOnly is set to true or y, only INSERTs from the source database + // are migrated to the .csv or .parquet file. For .csv format only, how these + // INSERTs are recorded depends on the value of IncludeOpForFullLoad. If IncludeOpForFullLoad + // is set to true, the first field of every CDC record is set to I to indicate + // the INSERT operation at the source. If IncludeOpForFullLoad is set to false, + // every CDC record is written without a first field to indicate the INSERT + // operation at the source. For more information about how these settings work + // together, see Indicating Source DB Operations in Migrated S3 Data (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps) + // in the AWS Database Migration Service User Guide.. + // + // AWS DMS supports this interaction between the CdcInsertsOnly and IncludeOpForFullLoad + // parameters in versions 3.1.4 and later. CdcInsertsOnly *bool `type:"boolean"` // An optional parameter to use GZIP to compress the target files. Set to GZIP // to compress the target files. Set to NONE (the default) or do not use to - // leave the files uncompressed. Applies to both CSV and PARQUET data formats. + // leave the files uncompressed. Applies to both .csv and .parquet file formats. CompressionType *string `type:"string" enum:"CompressionTypeValue"` // The delimiter used to separate columns in the source files. The default is @@ -11850,47 +12164,49 @@ type S3Settings struct { // carriage return (\n). CsvRowDelimiter *string `type:"string"` - // The format of the data which you want to use for output. You can choose one + // The format of the data that you want to use for output. You can choose one // of the following: // - // * CSV : This is a row-based format with comma-separated values. + // * csv : This is a row-based file format with comma-separated values (.csv). // - // * PARQUET : Apache Parquet is a columnar storage format that features - // efficient compression and provides faster query response. + // * parquet : Apache Parquet (.parquet) is a columnar storage file format + // that features efficient compression and provides faster query response. DataFormat *string `type:"string" enum:"DataFormatValue"` - // The size of one data page in bytes. Defaults to 1024 * 1024 bytes (1MiB). - // For PARQUET format only. + // The size of one data page in bytes. This parameter defaults to 1024 * 1024 + // bytes (1 MiB). This number is used for .parquet file format only. DataPageSize *int64 `type:"integer"` // The maximum size of an encoded dictionary page of a column. If the dictionary // page exceeds this, this column is stored using an encoding type of PLAIN. - // Defaults to 1024 * 1024 bytes (1MiB), the maximum size of a dictionary page - // before it reverts to PLAIN encoding. For PARQUET format only. + // This parameter defaults to 1024 * 1024 bytes (1 MiB), the maximum size of + // a dictionary page before it reverts to PLAIN encoding. This size is used + // for .parquet file format only. DictPageSizeLimit *int64 `type:"integer"` - // Enables statistics for Parquet pages and rowGroups. Choose TRUE to enable - // statistics, choose FALSE to disable. Statistics include NULL, DISTINCT, MAX, - // and MIN values. Defaults to TRUE. For PARQUET format only. + // A value that enables statistics for Parquet pages and row groups. Choose + // true to enable statistics, false to disable. Statistics include NULL, DISTINCT, + // MAX, and MIN values. This parameter defaults to true. This value is used + // for .parquet file format only. EnableStatistics *bool `type:"boolean"` - // The type of encoding you are using: RLE_DICTIONARY (default), PLAIN, or PLAIN_DICTIONARY. + // The type of encoding you are using: // // * RLE_DICTIONARY uses a combination of bit-packing and run-length encoding - // to store repeated values more efficiently. + // to store repeated values more efficiently. This is the default. // - // * PLAIN does not use encoding at all. Values are stored as they are. + // * PLAIN doesn't use encoding at all. Values are stored as they are. // // * PLAIN_DICTIONARY builds a dictionary of the values encountered in a // given column. The dictionary is stored in a dictionary page for each column // chunk. EncodingType *string `type:"string" enum:"EncodingTypeValue"` - // The type of server side encryption you want to use for your data. This is - // part of the endpoint settings or the extra connections attributes for Amazon - // S3. You can choose either SSE_S3 (default) or SSE_KMS. To use SSE_S3, you - // need an IAM role with permission to allow "arn:aws:s3:::dms-*" to use the - // following actions: + // The type of server-side encryption that you want to use for your data. This + // encryption type is part of the endpoint settings or the extra connections + // attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS. + // To use SSE_S3, you need an AWS Identity and Access Management (IAM) role + // with permission to allow "arn:aws:s3:::dms-*" to use the following actions: // // * s3:CreateBucket // @@ -11918,28 +12234,96 @@ type S3Settings struct { // The external table definition. ExternalTableDefinition *string `type:"string"` - // The version of Apache Parquet format you want to use: PARQUET_1_0 (default) - // or PARQUET_2_0. + // A value that enables a full load to write INSERT operations to the comma-separated + // value (.csv) output files only to indicate how the rows were added to the + // source database. + // + // AWS DMS supports the IncludeOpForFullLoad parameter in versions 3.1.4 and + // later. + // + // For full load, records can only be inserted. By default (the false setting), + // no information is recorded in these output files for a full load to indicate + // that the rows were inserted at the source database. If IncludeOpForFullLoad + // is set to true or y, the INSERT is recorded as an I annotation in the first + // field of the .csv file. This allows the format of your target records from + // a full load to be consistent with the target records from a CDC load. + // + // This setting works together with the CdcInsertsOnly parameter for output + // to .csv files only. For more information about how these settings work together, + // see Indicating Source DB Operations in Migrated S3 Data (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps) + // in the AWS Database Migration Service User Guide.. + IncludeOpForFullLoad *bool `type:"boolean"` + + // A value that specifies the precision of any TIMESTAMP column values that + // are written to an Amazon S3 object file in .parquet format. + // + // AWS DMS supports the ParquetTimestampInMillisecond parameter in versions + // 3.1.4 and later. + // + // When ParquetTimestampInMillisecond is set to true or y, AWS DMS writes all + // TIMESTAMP columns in a .parquet formatted file with millisecond precision. + // Otherwise, DMS writes them with microsecond precision. + // + // Currently, Amazon Athena and AWS Glue can handle only millisecond precision + // for TIMESTAMP values. Set this parameter to true for S3 endpoint object files + // that are .parquet formatted only if you plan to query or process the data + // with Athena or AWS Glue. + // + // AWS DMS writes any TIMESTAMP column values written to an S3 file in .csv + // format with microsecond precision. + // + // Setting ParquetTimestampInMillisecond has no effect on the string format + // of the timestamp column value that is inserted by setting the TimestampColumnName + // parameter. + ParquetTimestampInMillisecond *bool `type:"boolean"` + + // The version of the Apache Parquet format that you want to use: parquet_1_0 + // (the default) or parquet_2_0. ParquetVersion *string `type:"string" enum:"ParquetVersionValue"` // The number of rows in a row group. A smaller row group size provides faster - // reads. But as the number of row groups grows, the slower writes become. Defaults - // to 10,000 (ten thousand) rows. For PARQUET format only. + // reads. But as the number of row groups grows, the slower writes become. This + // parameter defaults to 10,000 rows. This number is used for .parquet file + // format only. // // If you choose a value larger than the maximum, RowGroupLength is set to the // max row group length in bytes (64 * 1024 * 1024). RowGroupLength *int64 `type:"integer"` - // If you are using SSE_KMS for the EncryptionMode, provide the KMS Key ID. - // The key you use needs an attached policy that enables IAM user permissions - // and allows use of the key. + // If you are using SSE_KMS for the EncryptionMode, provide the AWS KMS key + // ID. The key that you use needs an attached policy that enables AWS Identity + // and Access Management (IAM) user permissions and allows use of the key. // - // Here is a CLI example: aws dms create-endpoint --endpoint-identifier - // --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=,BucketFolder=,BucketName=,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId= + // Here is a CLI example: aws dms create-endpoint --endpoint-identifier value + // --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value ServerSideEncryptionKmsKeyId *string `type:"string"` // The Amazon Resource Name (ARN) used by the service access IAM role. ServiceAccessRoleArn *string `type:"string"` + + // A value that when nonblank causes AWS DMS to add a column with timestamp + // information to the endpoint data for an Amazon S3 target. + // + // AWS DMS supports the TimestampColumnName parameter in versions 3.1.4 and + // later. + // + // DMS includes an additional STRING column in the .csv or .parquet object files + // of your migrated data when you set TimestampColumnName to a nonblank value. + // + // For a full load, each row of this timestamp column contains a timestamp for + // when the data was transferred from the source to the target by DMS. + // + // For a change data capture (CDC) load, each row of the timestamp column contains + // the timestamp for the commit of that row in the source database. + // + // The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. + // By default, the precision of this value is in microseconds. For a CDC load, + // the rounding of the precision depends on the commit timestamp supported by + // DMS for the source database. + // + // When the AddColumnName parameter is set to true, DMS also includes a name + // for the timestamp column that you set with TimestampColumnName. + TimestampColumnName *string `type:"string"` } // String returns the string representation @@ -12030,6 +12414,18 @@ func (s *S3Settings) SetExternalTableDefinition(v string) *S3Settings { return s } +// SetIncludeOpForFullLoad sets the IncludeOpForFullLoad field's value. +func (s *S3Settings) SetIncludeOpForFullLoad(v bool) *S3Settings { + s.IncludeOpForFullLoad = &v + return s +} + +// SetParquetTimestampInMillisecond sets the ParquetTimestampInMillisecond field's value. +func (s *S3Settings) SetParquetTimestampInMillisecond(v bool) *S3Settings { + s.ParquetTimestampInMillisecond = &v + return s +} + // SetParquetVersion sets the ParquetVersion field's value. func (s *S3Settings) SetParquetVersion(v string) *S3Settings { s.ParquetVersion = &v @@ -12054,6 +12450,12 @@ func (s *S3Settings) SetServiceAccessRoleArn(v string) *S3Settings { return s } +// SetTimestampColumnName sets the TimestampColumnName field's value. +func (s *S3Settings) SetTimestampColumnName(v string) *S3Settings { + s.TimestampColumnName = &v + return s +} + type StartReplicationTaskAssessmentInput struct { _ struct{} `type:"structure"` @@ -12129,6 +12531,12 @@ type StartReplicationTaskInput struct { // Checkpoint Example: --cdc-start-position "checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93" // // LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373” + // + // When you use this task setting with a source PostgreSQL database, a logical + // replication slot should already be created and associated with the source + // endpoint. You can verify this by setting the slotName extra connection attribute + // to the name of this logical replication slot. For more information, see Extra + // Connection Attributes When Using PostgreSQL as a Source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib). CdcStartPosition *string `type:"string"` // Indicates the start time for a change data capture (CDC) operation. Use either @@ -12342,16 +12750,16 @@ func (s *Subnet) SetSubnetStatus(v string) *Subnet { type SupportedEndpointType struct { _ struct{} `type:"structure"` - // The type of endpoint. + // The type of endpoint. Valid values are source and target. EndpointType *string `type:"string" enum:"ReplicationEndpointTypeValue"` // The expanded name for the engine name. For example, if the EngineName parameter // is "aurora," this value would be "Amazon Aurora MySQL." EngineDisplayName *string `type:"string"` - // The database engine name. Valid values, depending on the EndPointType, include + // The database engine name. Valid values, depending on the EndpointType, include // mysql, oracle, postgres, mariadb, aurora, aurora-postgresql, redshift, s3, - // db2, azuredb, sybase, sybase, dynamodb, mongodb, and sqlserver. + // db2, azuredb, sybase, dynamodb, mongodb, and sqlserver. EngineName *string `type:"string"` // Indicates if Change Data Capture (CDC) is supported. @@ -12450,16 +12858,16 @@ type TableStatistics struct { // // * Pending records—Some records in the table are waiting for validation. // - // * Mismatched records—Some records in the table do not match between the - // source and target. + // * Mismatched records—Some records in the table do not match between + // the source and target. // // * Suspended records—Some records in the table could not be validated. // - // * No primary key—The table could not be validated because it had no primary - // key. + // * No primary key—The table could not be validated because it had no + // primary key. // - // * Table error—The table was not validated because it was in an error state - // and some data was not migrated. + // * Table error—The table was not validated because it was in an error + // state and some data was not migrated. // // * Validated—All rows in the table were validated. If the table is updated, // the status can change from Validated. @@ -12868,6 +13276,11 @@ const ( RefreshSchemasStatusTypeValueRefreshing = "refreshing" ) +const ( + // ReleaseStatusValuesBeta is a ReleaseStatusValues enum value + ReleaseStatusValuesBeta = "beta" +) + const ( // ReloadOptionValueDataReload is a ReloadOptionValue enum value ReloadOptionValueDataReload = "data-reload" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/errors.go index 9527c97f761..35ed259eb45 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/errors.go @@ -39,8 +39,8 @@ const ( // ErrCodeKMSAccessDeniedFault for service response error code // "KMSAccessDeniedFault". // - // The ciphertext references a key that doesn't exist or DMS account doesn't - // have an access to + // The ciphertext references a key that doesn't exist or that the DMS account + // doesn't have access to. ErrCodeKMSAccessDeniedFault = "KMSAccessDeniedFault" // ErrCodeKMSDisabledFault for service response error code @@ -52,25 +52,25 @@ const ( // ErrCodeKMSInvalidStateFault for service response error code // "KMSInvalidStateFault". // - // The state of the specified KMS resource isn't valid for this request. + // The state of the specified AWS KMS resource isn't valid for this request. ErrCodeKMSInvalidStateFault = "KMSInvalidStateFault" // ErrCodeKMSKeyNotAccessibleFault for service response error code // "KMSKeyNotAccessibleFault". // - // AWS DMS cannot access the KMS key. + // AWS DMS cannot access the AWS KMS key. ErrCodeKMSKeyNotAccessibleFault = "KMSKeyNotAccessibleFault" // ErrCodeKMSNotFoundFault for service response error code // "KMSNotFoundFault". // - // The specified KMS entity or resource can't be found. + // The specified AWS KMS entity or resource can't be found. ErrCodeKMSNotFoundFault = "KMSNotFoundFault" // ErrCodeKMSThrottlingFault for service response error code // "KMSThrottlingFault". // - // This request triggered KMS request throttling. + // This request triggered AWS KMS request throttling. ErrCodeKMSThrottlingFault = "KMSThrottlingFault" // ErrCodeReplicationSubnetGroupDoesNotCoverEnoughAZs for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go index 8ee775e03ca..ff3e35d659c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go @@ -46,11 +46,11 @@ const ( // svc := databasemigrationservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DatabaseMigrationService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DatabaseMigrationService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DatabaseMigrationService { svc := &DatabaseMigrationService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-01-01", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go index 1dac2334651..d31631d8264 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go @@ -592,7 +592,7 @@ func (c *DataPipeline) DescribeObjectsWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeObjects operation. // pageNum := 0 // err := client.DescribeObjectsPages(params, -// func(page *DescribeObjectsOutput, lastPage bool) bool { +// func(page *datapipeline.DescribeObjectsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -624,10 +624,12 @@ func (c *DataPipeline) DescribeObjectsPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeObjectsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeObjectsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1020,7 +1022,7 @@ func (c *DataPipeline) ListPipelinesWithContext(ctx aws.Context, input *ListPipe // // Example iterating over at most 3 pages of a ListPipelines operation. // pageNum := 0 // err := client.ListPipelinesPages(params, -// func(page *ListPipelinesOutput, lastPage bool) bool { +// func(page *datapipeline.ListPipelinesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1052,10 +1054,12 @@ func (c *DataPipeline) ListPipelinesPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPipelinesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPipelinesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1209,12 +1213,9 @@ func (c *DataPipeline) PutPipelineDefinitionRequest(input *PutPipelineDefinition // PutPipelineDefinition also validates the configuration as it adds it to the // pipeline. Changes to the pipeline are saved unless one of the following three // validation errors exists in the pipeline. -// -// An object is missing a name or identifier field. -// A string or reference field is empty. -// The number of objects in the pipeline exceeds the maximum allowed objects. -// -// The pipeline is in a FINISHED state. +// An object is missing a name or identifier field. A string or reference +// field is empty. The number of objects in the pipeline exceeds the maximum +// allowed objects. The pipeline is in a FINISHED state. // Pipeline object definitions are passed to the PutPipelineDefinition action // and returned by the GetPipelineDefinition action. // @@ -1372,7 +1373,7 @@ func (c *DataPipeline) QueryObjectsWithContext(ctx aws.Context, input *QueryObje // // Example iterating over at most 3 pages of a QueryObjects operation. // pageNum := 0 // err := client.QueryObjectsPages(params, -// func(page *QueryObjectsOutput, lastPage bool) bool { +// func(page *datapipeline.QueryObjectsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1404,10 +1405,12 @@ func (c *DataPipeline) QueryObjectsPagesWithContext(ctx aws.Context, input *Quer }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*QueryObjectsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*QueryObjectsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2983,21 +2986,35 @@ type Operator struct { // The comparison operators EQ and REF_EQ act on the following fields: // // * name + // // * @sphere + // // * parent + // // * @componentParent + // // * @instanceParent + // // * @status + // // * @scheduledStartTime + // // * @scheduledEndTime + // // * @actualStartTime + // // * @actualEndTime + // // The comparison operators GE, LE, and BETWEEN act on the following fields: // // * @scheduledStartTime + // // * @scheduledEndTime + // // * @actualStartTime + // // * @actualEndTime + // // Note that fields beginning with the at sign (@) are read-only and set by // the web service. When you name fields, you should choose names containing // only alpha-numeric values, as symbols may be reserved by AWS Data Pipeline. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go index ebd5c29b2fc..8f48358db5b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go @@ -46,11 +46,11 @@ const ( // svc := datapipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DataPipeline { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DataPipeline { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DataPipeline { svc := &DataPipeline{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-10-29", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go index 3d4af4d2ac4..6a97a930fee 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go @@ -80,6 +80,9 @@ func (c *DataSync) CancelTaskExecutionRequest(input *CancelTaskExecutionInput) ( // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CancelTaskExecution func (c *DataSync) CancelTaskExecution(input *CancelTaskExecutionInput) (*CancelTaskExecutionOutput, error) { req, out := c.CancelTaskExecutionRequest(input) @@ -153,12 +156,14 @@ func (c *DataSync) CreateAgentRequest(input *CreateAgentInput) (req *request.Req // target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created // in this AWS Region. // +// You can activate the agent in a VPC (Virtual private Cloud) or provide the +// agent access to a VPC endpoint so you can run tasks without going over the +// public Internet. +// // You can use an agent for more than one location. If a task uses multiple // agents, all of them need to have status AVAILABLE for the task to run. If // you use multiple agents for a source location, the status of all the agents -// must be AVAILABLE for the task to run. For more information, see Activating -// a Sync Agent (https://docs.aws.amazon.com/sync-service/latest/userguide/working-with-sync-agents.html#activating-sync-agent) -// in the AWS DataSync User Guide. +// must be AVAILABLE for the task to run. // // Agents are automatically updated by AWS on a regular basis, using a mechanism // that ensures minimal interruption to your tasks. @@ -174,6 +179,9 @@ func (c *DataSync) CreateAgentRequest(input *CreateAgentInput) (req *request.Req // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateAgent func (c *DataSync) CreateAgent(input *CreateAgentInput) (*CreateAgentOutput, error) { req, out := c.CreateAgentRequest(input) @@ -253,6 +261,9 @@ func (c *DataSync) CreateLocationEfsRequest(input *CreateLocationEfsInput) (req // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationEfs func (c *DataSync) CreateLocationEfs(input *CreateLocationEfsInput) (*CreateLocationEfsOutput, error) { req, out := c.CreateLocationEfsRequest(input) @@ -319,7 +330,8 @@ func (c *DataSync) CreateLocationNfsRequest(input *CreateLocationNfsInput) (req // CreateLocationNfs API operation for AWS DataSync. // -// Creates an endpoint for a Network File System (NFS) file system. +// Defines a file system on a Network File System (NFS) server that can be read +// from or written to // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -332,6 +344,9 @@ func (c *DataSync) CreateLocationNfsRequest(input *CreateLocationNfsInput) (req // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationNfs func (c *DataSync) CreateLocationNfs(input *CreateLocationNfsInput) (*CreateLocationNfsOutput, error) { req, out := c.CreateLocationNfsRequest(input) @@ -404,8 +419,9 @@ func (c *DataSync) CreateLocationS3Request(input *CreateLocationS3Input) (req *r // and Access Management (IAM) role that has the required permissions. You can // set up the required permissions by creating an IAM policy that grants the // required permissions and attaching the policy to the role. An example of -// such a policy is shown in the examples section. For more information, see -// Configuring Amazon S3 Location Settings (https://docs.aws.amazon.com/sync-service/latest/userguide/configuring-s3-locations.html) +// such a policy is shown in the examples section. +// +// For more information, see https://docs.aws.amazon.com/datasync/latest/userguide/working-with-locations.html#create-s3-location // in the AWS DataSync User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -419,6 +435,9 @@ func (c *DataSync) CreateLocationS3Request(input *CreateLocationS3Input) (req *r // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationS3 func (c *DataSync) CreateLocationS3(input *CreateLocationS3Input) (*CreateLocationS3Output, error) { req, out := c.CreateLocationS3Request(input) @@ -441,6 +460,89 @@ func (c *DataSync) CreateLocationS3WithContext(ctx aws.Context, input *CreateLoc return out, req.Send() } +const opCreateLocationSmb = "CreateLocationSmb" + +// CreateLocationSmbRequest generates a "aws/request.Request" representing the +// client's request for the CreateLocationSmb operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLocationSmb for more information on using the CreateLocationSmb +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLocationSmbRequest method. +// req, resp := client.CreateLocationSmbRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationSmb +func (c *DataSync) CreateLocationSmbRequest(input *CreateLocationSmbInput) (req *request.Request, output *CreateLocationSmbOutput) { + op := &request.Operation{ + Name: opCreateLocationSmb, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLocationSmbInput{} + } + + output = &CreateLocationSmbOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLocationSmb API operation for AWS DataSync. +// +// Defines a file system on an Server Message Block (SMB) server that can be +// read from or written to +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation CreateLocationSmb for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// This exception is thrown when the client submits a malformed request. +// +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationSmb +func (c *DataSync) CreateLocationSmb(input *CreateLocationSmbInput) (*CreateLocationSmbOutput, error) { + req, out := c.CreateLocationSmbRequest(input) + return out, req.Send() +} + +// CreateLocationSmbWithContext is the same as CreateLocationSmb with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLocationSmb for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) CreateLocationSmbWithContext(ctx aws.Context, input *CreateLocationSmbInput, opts ...request.Option) (*CreateLocationSmbOutput, error) { + req, out := c.CreateLocationSmbRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateTask = "CreateTask" // CreateTaskRequest generates a "aws/request.Request" representing the @@ -486,16 +588,15 @@ func (c *DataSync) CreateTaskRequest(input *CreateTaskInput) (req *request.Reque // CreateTask API operation for AWS DataSync. // // Creates a task. A task is a set of two locations (source and destination) -// and a set of default OverrideOptions that you use to control the behavior -// of a task. If you don't specify default values for Options when you create -// a task, AWS DataSync populates them with safe service defaults. +// and a set of Options that you use to control the behavior of a task. If you +// don't specify Options when you create a task, AWS DataSync populates them +// with service defaults. // -// When you initially create a task, it enters the INITIALIZING status and then -// the CREATING status. In CREATING status, AWS DataSync attempts to mount the -// source Network File System (NFS) location. The task transitions to the AVAILABLE -// status without waiting for the destination location to mount. Instead, AWS -// DataSync mounts a destination before every task execution and then unmounts -// it after every task execution. +// When you create a task, it first enters the CREATING state. During CREATING +// AWS DataSync attempts to mount the on-premises Network File System (NFS) +// location. The task transitions to the AVAILABLE state without waiting for +// the AWS location to become mounted. If required, AWS DataSync mounts the +// AWS location before each task execution. // // If an agent that is associated with a source (NFS) location goes offline, // the task transitions to the UNAVAILABLE status. If the status of the task @@ -515,6 +616,9 @@ func (c *DataSync) CreateTaskRequest(input *CreateTaskInput) (req *request.Reque // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateTask func (c *DataSync) CreateTask(input *CreateTaskInput) (*CreateTaskOutput, error) { req, out := c.CreateTaskRequest(input) @@ -587,9 +691,6 @@ func (c *DataSync) DeleteAgentRequest(input *DeleteAgentInput) (req *request.Req // agent from your AWS account. However, it doesn't delete the agent virtual // machine (VM) from your on-premises environment. // -// After you delete an agent, you can't reactivate it and you longer pay software -// charges for it. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -601,6 +702,9 @@ func (c *DataSync) DeleteAgentRequest(input *DeleteAgentInput) (req *request.Req // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DeleteAgent func (c *DataSync) DeleteAgent(input *DeleteAgentInput) (*DeleteAgentOutput, error) { req, out := c.DeleteAgentRequest(input) @@ -681,6 +785,9 @@ func (c *DataSync) DeleteLocationRequest(input *DeleteLocationInput) (req *reque // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DeleteLocation func (c *DataSync) DeleteLocation(input *DeleteLocationInput) (*DeleteLocationOutput, error) { req, out := c.DeleteLocationRequest(input) @@ -761,6 +868,9 @@ func (c *DataSync) DeleteTaskRequest(input *DeleteTaskInput) (req *request.Reque // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DeleteTask func (c *DataSync) DeleteTask(input *DeleteTaskInput) (*DeleteTaskOutput, error) { req, out := c.DeleteTaskRequest(input) @@ -843,6 +953,9 @@ func (c *DataSync) DescribeAgentRequest(input *DescribeAgentInput) (req *request // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeAgent func (c *DataSync) DescribeAgent(input *DescribeAgentInput) (*DescribeAgentOutput, error) { req, out := c.DescribeAgentRequest(input) @@ -922,6 +1035,9 @@ func (c *DataSync) DescribeLocationEfsRequest(input *DescribeLocationEfsInput) ( // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationEfs func (c *DataSync) DescribeLocationEfs(input *DescribeLocationEfsInput) (*DescribeLocationEfsOutput, error) { req, out := c.DescribeLocationEfsRequest(input) @@ -1001,6 +1117,9 @@ func (c *DataSync) DescribeLocationNfsRequest(input *DescribeLocationNfsInput) ( // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationNfs func (c *DataSync) DescribeLocationNfs(input *DescribeLocationNfsInput) (*DescribeLocationNfsOutput, error) { req, out := c.DescribeLocationNfsRequest(input) @@ -1080,6 +1199,9 @@ func (c *DataSync) DescribeLocationS3Request(input *DescribeLocationS3Input) (re // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationS3 func (c *DataSync) DescribeLocationS3(input *DescribeLocationS3Input) (*DescribeLocationS3Output, error) { req, out := c.DescribeLocationS3Request(input) @@ -1102,6 +1224,88 @@ func (c *DataSync) DescribeLocationS3WithContext(ctx aws.Context, input *Describ return out, req.Send() } +const opDescribeLocationSmb = "DescribeLocationSmb" + +// DescribeLocationSmbRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocationSmb operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocationSmb for more information on using the DescribeLocationSmb +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocationSmbRequest method. +// req, resp := client.DescribeLocationSmbRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationSmb +func (c *DataSync) DescribeLocationSmbRequest(input *DescribeLocationSmbInput) (req *request.Request, output *DescribeLocationSmbOutput) { + op := &request.Operation{ + Name: opDescribeLocationSmb, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationSmbInput{} + } + + output = &DescribeLocationSmbOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocationSmb API operation for AWS DataSync. +// +// Returns metadata, such as the path and user information about a SMB location. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation DescribeLocationSmb for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// This exception is thrown when the client submits a malformed request. +// +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationSmb +func (c *DataSync) DescribeLocationSmb(input *DescribeLocationSmbInput) (*DescribeLocationSmbOutput, error) { + req, out := c.DescribeLocationSmbRequest(input) + return out, req.Send() +} + +// DescribeLocationSmbWithContext is the same as DescribeLocationSmb with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocationSmb for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) DescribeLocationSmbWithContext(ctx aws.Context, input *DescribeLocationSmbInput, opts ...request.Option) (*DescribeLocationSmbOutput, error) { + req, out := c.DescribeLocationSmbRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeTask = "DescribeTask" // DescribeTaskRequest generates a "aws/request.Request" representing the @@ -1159,6 +1363,9 @@ func (c *DataSync) DescribeTaskRequest(input *DescribeTaskInput) (req *request.R // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeTask func (c *DataSync) DescribeTask(input *DescribeTaskInput) (*DescribeTaskOutput, error) { req, out := c.DescribeTaskRequest(input) @@ -1238,6 +1445,9 @@ func (c *DataSync) DescribeTaskExecutionRequest(input *DescribeTaskExecutionInpu // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeTaskExecution func (c *DataSync) DescribeTaskExecution(input *DescribeTaskExecutionInput) (*DescribeTaskExecutionOutput, error) { req, out := c.DescribeTaskExecutionRequest(input) @@ -1333,6 +1543,9 @@ func (c *DataSync) ListAgentsRequest(input *ListAgentsInput) (req *request.Reque // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/ListAgents func (c *DataSync) ListAgents(input *ListAgentsInput) (*ListAgentsOutput, error) { req, out := c.ListAgentsRequest(input) @@ -1366,7 +1579,7 @@ func (c *DataSync) ListAgentsWithContext(ctx aws.Context, input *ListAgentsInput // // Example iterating over at most 3 pages of a ListAgents operation. // pageNum := 0 // err := client.ListAgentsPages(params, -// func(page *ListAgentsOutput, lastPage bool) bool { +// func(page *datasync.ListAgentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1398,10 +1611,12 @@ func (c *DataSync) ListAgentsPagesWithContext(ctx aws.Context, input *ListAgents }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAgentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAgentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1473,6 +1688,9 @@ func (c *DataSync) ListLocationsRequest(input *ListLocationsInput) (req *request // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/ListLocations func (c *DataSync) ListLocations(input *ListLocationsInput) (*ListLocationsOutput, error) { req, out := c.ListLocationsRequest(input) @@ -1506,7 +1724,7 @@ func (c *DataSync) ListLocationsWithContext(ctx aws.Context, input *ListLocation // // Example iterating over at most 3 pages of a ListLocations operation. // pageNum := 0 // err := client.ListLocationsPages(params, -// func(page *ListLocationsOutput, lastPage bool) bool { +// func(page *datasync.ListLocationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1538,10 +1756,12 @@ func (c *DataSync) ListLocationsPagesWithContext(ctx aws.Context, input *ListLoc }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListLocationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListLocationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1608,6 +1828,9 @@ func (c *DataSync) ListTagsForResourceRequest(input *ListTagsForResourceInput) ( // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/ListTagsForResource func (c *DataSync) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) @@ -1641,7 +1864,7 @@ func (c *DataSync) ListTagsForResourceWithContext(ctx aws.Context, input *ListTa // // Example iterating over at most 3 pages of a ListTagsForResource operation. // pageNum := 0 // err := client.ListTagsForResourcePages(params, -// func(page *ListTagsForResourceOutput, lastPage bool) bool { +// func(page *datasync.ListTagsForResourceOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1673,10 +1896,12 @@ func (c *DataSync) ListTagsForResourcePagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1743,6 +1968,9 @@ func (c *DataSync) ListTaskExecutionsRequest(input *ListTaskExecutionsInput) (re // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/ListTaskExecutions func (c *DataSync) ListTaskExecutions(input *ListTaskExecutionsInput) (*ListTaskExecutionsOutput, error) { req, out := c.ListTaskExecutionsRequest(input) @@ -1776,7 +2004,7 @@ func (c *DataSync) ListTaskExecutionsWithContext(ctx aws.Context, input *ListTas // // Example iterating over at most 3 pages of a ListTaskExecutions operation. // pageNum := 0 // err := client.ListTaskExecutionsPages(params, -// func(page *ListTaskExecutionsOutput, lastPage bool) bool { +// func(page *datasync.ListTaskExecutionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1808,10 +2036,12 @@ func (c *DataSync) ListTaskExecutionsPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTaskExecutionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTaskExecutionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1878,6 +2108,9 @@ func (c *DataSync) ListTasksRequest(input *ListTasksInput) (req *request.Request // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/ListTasks func (c *DataSync) ListTasks(input *ListTasksInput) (*ListTasksOutput, error) { req, out := c.ListTasksRequest(input) @@ -1911,7 +2144,7 @@ func (c *DataSync) ListTasksWithContext(ctx aws.Context, input *ListTasksInput, // // Example iterating over at most 3 pages of a ListTasks operation. // pageNum := 0 // err := client.ListTasksPages(params, -// func(page *ListTasksOutput, lastPage bool) bool { +// func(page *datasync.ListTasksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1943,10 +2176,12 @@ func (c *DataSync) ListTasksPagesWithContext(ctx aws.Context, input *ListTasksIn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTasksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTasksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2001,9 +2236,8 @@ func (c *DataSync) StartTaskExecutionRequest(input *StartTaskExecutionInput) (re // TaskExecution has the following transition phases: INITIALIZING | PREPARING // | TRANSFERRING | VERIFYING | SUCCESS/FAILURE. // -// For detailed information, see Task Execution in Components and Terminology -// (https://docs.aws.amazon.com/sync-service/latest/userguide/how-awssync-works.html#terminology) -// in the AWS DataSync User Guide. +// For detailed information, see the Task Execution section in the Components +// and Terminology topic in the AWS DataSync User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2016,6 +2250,9 @@ func (c *DataSync) StartTaskExecutionRequest(input *StartTaskExecutionInput) (re // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/StartTaskExecution func (c *DataSync) StartTaskExecution(input *StartTaskExecutionInput) (*StartTaskExecutionOutput, error) { req, out := c.StartTaskExecutionRequest(input) @@ -2096,6 +2333,9 @@ func (c *DataSync) TagResourceRequest(input *TagResourceInput) (req *request.Req // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/TagResource func (c *DataSync) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { req, out := c.TagResourceRequest(input) @@ -2176,6 +2416,9 @@ func (c *DataSync) UntagResourceRequest(input *UntagResourceInput) (req *request // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UntagResource func (c *DataSync) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) @@ -2256,6 +2499,9 @@ func (c *DataSync) UpdateAgentRequest(input *UpdateAgentInput) (req *request.Req // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UpdateAgent func (c *DataSync) UpdateAgent(input *UpdateAgentInput) (*UpdateAgentOutput, error) { req, out := c.UpdateAgentRequest(input) @@ -2336,6 +2582,9 @@ func (c *DataSync) UpdateTaskRequest(input *UpdateTaskInput) (req *request.Reque // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UpdateTask func (c *DataSync) UpdateTask(input *UpdateTaskInput) (*UpdateTaskOutput, error) { req, out := c.UpdateTaskRequest(input) @@ -2466,8 +2715,9 @@ type CreateAgentInput struct { // for your agent in the query string parameter activationKey. It might also // include other activation-related parameters; however, these are merely defaults. // The arguments you pass to this API call determine the actual configuration - // of your agent. For more information, see Activating a Sync Agent (https://docs.aws.amazon.com/sync-service/latest/userguide/working-with-sync-agents.html#activating-sync-agent) - // in the AWS DataSync User Guide. + // of your agent. + // + // For more information, see Activating an Agent in the AWS DataSync User Guide. // // ActivationKey is a required field ActivationKey *string `type:"string" required:"true"` @@ -2476,13 +2726,35 @@ type CreateAgentInput struct { // is used to identify the agent in the console. AgentName *string `min:"1" type:"string"` - // The key-value pair that represents the tag you want to associate with the - // agent. The value can be an empty string. This value helps you manage, filter, - // and search for your agents. + // The ARNs of the security groups used to protect your data transfer task subnets. + // See CreateAgentRequest$SubnetArns. + SecurityGroupArns []*string `min:"1" type:"list"` + + // The Amazon Resource Names (ARNs) of the subnets in which DataSync will create + // elastic network interfaces for each data transfer task. The agent that runs + // a task must be private. When you start a task that is associated with an + // agent created in a VPC, or one that has access to an IP address in a VPC, + // then the task is also private. In this case, DataSync creates four network + // interfaces for each task in your subnet. For a data transfer to work, the + // agent must be able to route to all these four network interfaces. + SubnetArns []*string `min:"1" type:"list"` + + // The key-value pair that represents the tag that you want to associate with + // the agent. The value can be an empty string. This value helps you manage, + // filter, and search for your agents. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. Tags []*TagListEntry `type:"list"` + + // The ID of the VPC (Virtual Private Cloud) endpoint that the agent has access + // to. This is the client-side VPC endpoint, also called a PrivateLink. If you + // don't have a PrivateLink VPC endpoint, see Creating a VPC Endpoint Service + // Configuration (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html#create-endpoint-service) + // in the AWS VPC User Guide. + // + // VPC endpoint ID looks like this: vpce-01234d5aff67890e1. + VpcEndpointId *string `type:"string"` } // String returns the string representation @@ -2504,6 +2776,12 @@ func (s *CreateAgentInput) Validate() error { if s.AgentName != nil && len(*s.AgentName) < 1 { invalidParams.Add(request.NewErrParamMinLen("AgentName", 1)) } + if s.SecurityGroupArns != nil && len(s.SecurityGroupArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityGroupArns", 1)) + } + if s.SubnetArns != nil && len(s.SubnetArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubnetArns", 1)) + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -2533,12 +2811,30 @@ func (s *CreateAgentInput) SetAgentName(v string) *CreateAgentInput { return s } +// SetSecurityGroupArns sets the SecurityGroupArns field's value. +func (s *CreateAgentInput) SetSecurityGroupArns(v []*string) *CreateAgentInput { + s.SecurityGroupArns = v + return s +} + +// SetSubnetArns sets the SubnetArns field's value. +func (s *CreateAgentInput) SetSubnetArns(v []*string) *CreateAgentInput { + s.SubnetArns = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateAgentInput) SetTags(v []*TagListEntry) *CreateAgentInput { s.Tags = v return s } +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *CreateAgentInput) SetVpcEndpointId(v string) *CreateAgentInput { + s.VpcEndpointId = &v + return s +} + // CreateAgentResponse type CreateAgentOutput struct { _ struct{} `type:"structure"` @@ -2568,7 +2864,25 @@ func (s *CreateAgentOutput) SetAgentArn(v string) *CreateAgentOutput { type CreateLocationEfsInput struct { _ struct{} `type:"structure"` - // The subnet and security group that the Amazon EFS file system uses. + // The subnet and security group that the Amazon EFS file system uses. The security + // group that you provide needs to be able to communicate with the security + // group on the mount target in the subnet specified. + // + // The exact relationship between security group M (of the mount target) and + // security group S (which you provide for DataSync to use at this stage) is + // as follows: + // + // * Security group M (which you associate with the mount target) must allow + // inbound access for the Transmission Control Protocol (TCP) on the NFS + // port (2049) from security group S. You can enable inbound connections + // either by IP address (CIDR range) or security group. + // + // * Security group S (provided to DataSync to access EFS) should have a + // rule that enables outbound connections to the NFS port on one of the file + // system’s mount targets. You can enable outbound connections either by + // IP address (CIDR range) or security group. For information about security + // groups and mount targets, see Security Groups for Amazon EC2 Instances + // and Mount Targets in the Amazon EFS User Guide. // // Ec2Config is a required field Ec2Config *Ec2Config `type:"structure" required:"true"` @@ -2581,9 +2895,7 @@ type CreateLocationEfsInput struct { // A subdirectory in the location’s path. This subdirectory in the EFS file // system is used to read data from the EFS source location or write data to // the EFS destination. By default, AWS DataSync uses the root directory. - // - // Subdirectory is a required field - Subdirectory *string `type:"string" required:"true"` + Subdirectory *string `type:"string"` // The key-value pair that represents a tag that you want to add to the resource. // The value can be an empty string. This value helps you manage, filter, and @@ -2611,9 +2923,6 @@ func (s *CreateLocationEfsInput) Validate() error { if s.EfsFilesystemArn == nil { invalidParams.Add(request.NewErrParamRequired("EfsFilesystemArn")) } - if s.Subdirectory == nil { - invalidParams.Add(request.NewErrParamRequired("Subdirectory")) - } if s.Ec2Config != nil { if err := s.Ec2Config.Validate(); err != nil { invalidParams.AddNested("Ec2Config", err.(request.ErrInvalidParams)) @@ -2689,6 +2998,9 @@ func (s *CreateLocationEfsOutput) SetLocationArn(v string) *CreateLocationEfsOut type CreateLocationNfsInput struct { _ struct{} `type:"structure"` + // The NFS mount options that DataSync can use to mount your NFS share. + MountOptions *NfsMountOptions `type:"structure"` + // Contains a list of Amazon Resource Names (ARNs) of agents that are used to // connect to an NFS server. // @@ -2719,11 +3031,12 @@ type CreateLocationNfsInput struct { // To transfer all the data in the folder you specified, DataSync needs to have // permissions to read all the data. To ensure this, either configure the NFS // export with no_root_squash, or ensure that the permissions for all of the - // files that you want sync allow read access for all users. Doing either enables - // the agent to read the files. For the agent to access directories, you must - // additionally enable all execute access. For information about NFS export - // configuration, see 18.7. The /etc/exports Configuration File (https://www.centos.org/docs/5/html/Deployment_Guide-en-US/s1-nfs-server-config-exports.html) - // in the Centos documentation. + // files that you want DataSync allow read access for all users. Doing either + // enables the agent to read the files. For the agent to access directories, + // you must additionally enable all execute access. + // + // For information about NFS export configuration, see 18.7. The /etc/exports + // Configuration File in the Red Hat Enterprise Linux documentation. // // Subdirectory is a required field Subdirectory *string `type:"string" required:"true"` @@ -2777,6 +3090,12 @@ func (s *CreateLocationNfsInput) Validate() error { return nil } +// SetMountOptions sets the MountOptions field's value. +func (s *CreateLocationNfsInput) SetMountOptions(v *NfsMountOptions) *CreateLocationNfsInput { + s.MountOptions = v + return s +} + // SetOnPremConfig sets the OnPremConfig field's value. func (s *CreateLocationNfsInput) SetOnPremConfig(v *OnPremConfig) *CreateLocationNfsInput { s.OnPremConfig = v @@ -2836,18 +3155,25 @@ type CreateLocationS3Input struct { S3BucketArn *string `type:"string" required:"true"` // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that is used to access an Amazon S3 bucket. For detailed information - // about using such a role, see Components and Terminology (https://alpha-aws-docs.aws.amazon.com/sync-service/latest/userguide/create-locations-cli.html#create-location-s3-cli) - // in the AWS DataSync User Guide. + // (IAM) role that is used to access an Amazon S3 bucket. + // + // For detailed information about using such a role, see Creating a Location + // for Amazon S3 in the AWS DataSync User Guide. // // S3Config is a required field S3Config *S3Config `type:"structure" required:"true"` + // The Amazon S3 storage class that you want to store your files in when this + // location is used as a task destination. For more information about S3 storage + // classes, see Amazon S3 Storage Classes (https://aws.amazon.com/s3/storage-classes/) + // in the Amazon Simple Storage Service Developer Guide. Some storage classes + // have behaviors that can affect your S3 storage cost. For detailed information, + // see using-storage-classes. + S3StorageClass *string `type:"string" enum:"S3StorageClass"` + // A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 is // used to read data from the S3 source location or write data to the S3 destination. - // - // Subdirectory is a required field - Subdirectory *string `type:"string" required:"true"` + Subdirectory *string `type:"string"` // The key-value pair that represents the tag that you want to add to the location. // The value can be an empty string. We recommend using tags to name your resources. @@ -2873,9 +3199,6 @@ func (s *CreateLocationS3Input) Validate() error { if s.S3Config == nil { invalidParams.Add(request.NewErrParamRequired("S3Config")) } - if s.Subdirectory == nil { - invalidParams.Add(request.NewErrParamRequired("Subdirectory")) - } if s.S3Config != nil { if err := s.S3Config.Validate(); err != nil { invalidParams.AddNested("S3Config", err.(request.ErrInvalidParams)) @@ -2910,6 +3233,12 @@ func (s *CreateLocationS3Input) SetS3Config(v *S3Config) *CreateLocationS3Input return s } +// SetS3StorageClass sets the S3StorageClass field's value. +func (s *CreateLocationS3Input) SetS3StorageClass(v string) *CreateLocationS3Input { + s.S3StorageClass = &v + return s +} + // SetSubdirectory sets the Subdirectory field's value. func (s *CreateLocationS3Input) SetSubdirectory(v string) *CreateLocationS3Input { s.Subdirectory = &v @@ -2947,55 +3276,244 @@ func (s *CreateLocationS3Output) SetLocationArn(v string) *CreateLocationS3Outpu return s } -// CreateTaskRequest -type CreateTaskInput struct { +// CreateLocationSmbRequest +type CreateLocationSmbInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is - // used to monitor and log events in the task. For more information on these - // groups, see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html) - // in the Amazon CloudWatch User Guide. + // The Amazon Resource Names (ARNs) of agents to use for a Simple Message Block + // (SMB) location. // - // For more information about how to useCloudWatchLogs with DataSync, see Monitoring - // Your Task (https://docs.aws.amazon.com/datasync/latest/userguide/monitor-datasync.html). - CloudWatchLogGroupArn *string `type:"string"` + // AgentArns is a required field + AgentArns []*string `min:"1" type:"list" required:"true"` - // The Amazon Resource Name (ARN) of an AWS storage resource's location. - // - // DestinationLocationArn is a required field - DestinationLocationArn *string `type:"string" required:"true"` + // The name of the Windows domain that the SMB server belongs to. + Domain *string `type:"string"` - // The name of a task. This value is a text reference that is used to identify - // the task in the console. - Name *string `min:"1" type:"string"` + // The mount options used by DataSync to access the SMB server. + MountOptions *SmbMountOptions `type:"structure"` - // The set of configuration options that control the behavior of a single execution - // of the task that occurs when you call StartTaskExecution. You can configure - // these options to preserve metadata such as user ID (UID) and group ID (GID), - // file permissions, data integrity verification, and so on. + // The password of the user who can mount the share, has the permissions to + // access files and folders in the SMB share. // - // For each individual task execution, you can override these options by specifying - // the OverrideOptions before starting a the task execution. For more information, - // see the operation. - Options *Options `type:"structure"` + // Password is a required field + Password *string `type:"string" required:"true"` - // The Amazon Resource Name (ARN) of the source location for the task. + // The name of the SMB server. This value is the IP address or Domain Name Service + // (DNS) name of the SMB server. An agent that is installed on-premises uses + // this hostname to mount the SMB server in a network. // - // SourceLocationArn is a required field - SourceLocationArn *string `type:"string" required:"true"` + // This name must either be DNS-compliant or must be an IP version 4 (IPv4) + // address. + // + // ServerHostname is a required field + ServerHostname *string `type:"string" required:"true"` - // The key-value pair that represents the tag that you want to add to the resource. - // The value can be an empty string. + // The subdirectory in the SMB file system that is used to read data from the + // SMB source location or write data to the SMB destination. The SMB path should + // be a path that's exported by the SMB server, or a subdirectory of that path. + // The path should be such that it can be mounted by other SMB clients in your + // network. + // + // To transfer all the data in the folder you specified, DataSync needs to have + // permissions to mount the SMB share, as well as to access all the data in + // that share. To ensure this, either ensure that the user/password specified + // belongs to the user who can mount the share, and who has the appropriate + // permissions for all of the files and directories that you want DataSync to + // access, or use credentials of a member of the Backup Operators group to mount + // the share. Doing either enables the agent to access the data. For the agent + // to access directories, you must additionally enable all execute access. + // + // Subdirectory is a required field + Subdirectory *string `type:"string" required:"true"` + + // The key-value pair that represents the tag that you want to add to the location. + // The value can be an empty string. We recommend using tags to name your resources. Tags []*TagListEntry `type:"list"` + + // The user who can mount the share, has the permissions to access files and + // folders in the SMB share. + // + // User is a required field + User *string `type:"string" required:"true"` } // String returns the string representation -func (s CreateTaskInput) String() string { +func (s CreateLocationSmbInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTaskInput) GoString() string { +func (s CreateLocationSmbInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLocationSmbInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLocationSmbInput"} + if s.AgentArns == nil { + invalidParams.Add(request.NewErrParamRequired("AgentArns")) + } + if s.AgentArns != nil && len(s.AgentArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AgentArns", 1)) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.ServerHostname == nil { + invalidParams.Add(request.NewErrParamRequired("ServerHostname")) + } + if s.Subdirectory == nil { + invalidParams.Add(request.NewErrParamRequired("Subdirectory")) + } + if s.User == nil { + invalidParams.Add(request.NewErrParamRequired("User")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAgentArns sets the AgentArns field's value. +func (s *CreateLocationSmbInput) SetAgentArns(v []*string) *CreateLocationSmbInput { + s.AgentArns = v + return s +} + +// SetDomain sets the Domain field's value. +func (s *CreateLocationSmbInput) SetDomain(v string) *CreateLocationSmbInput { + s.Domain = &v + return s +} + +// SetMountOptions sets the MountOptions field's value. +func (s *CreateLocationSmbInput) SetMountOptions(v *SmbMountOptions) *CreateLocationSmbInput { + s.MountOptions = v + return s +} + +// SetPassword sets the Password field's value. +func (s *CreateLocationSmbInput) SetPassword(v string) *CreateLocationSmbInput { + s.Password = &v + return s +} + +// SetServerHostname sets the ServerHostname field's value. +func (s *CreateLocationSmbInput) SetServerHostname(v string) *CreateLocationSmbInput { + s.ServerHostname = &v + return s +} + +// SetSubdirectory sets the Subdirectory field's value. +func (s *CreateLocationSmbInput) SetSubdirectory(v string) *CreateLocationSmbInput { + s.Subdirectory = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLocationSmbInput) SetTags(v []*TagListEntry) *CreateLocationSmbInput { + s.Tags = v + return s +} + +// SetUser sets the User field's value. +func (s *CreateLocationSmbInput) SetUser(v string) *CreateLocationSmbInput { + s.User = &v + return s +} + +// CreateLocationSmbResponse +type CreateLocationSmbOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the source SMB file system location that + // is created. + LocationArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateLocationSmbOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocationSmbOutput) GoString() string { + return s.String() +} + +// SetLocationArn sets the LocationArn field's value. +func (s *CreateLocationSmbOutput) SetLocationArn(v string) *CreateLocationSmbOutput { + s.LocationArn = &v + return s +} + +// CreateTaskRequest +type CreateTaskInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is + // used to monitor and log events in the task. + // + // For more information on these groups, see Working with Log Groups and Log + // Streams in the Amazon CloudWatch User Guide. + // + // For more information about how to use CloudWatch Logs with DataSync, see + // Monitoring Your Task in the AWS DataSync User Guide. + CloudWatchLogGroupArn *string `type:"string"` + + // The Amazon Resource Name (ARN) of an AWS storage resource's location. + // + // DestinationLocationArn is a required field + DestinationLocationArn *string `type:"string" required:"true"` + + // A list of filter rules that determines which files to exclude from a task. + // The list should contain a single filter string that consists of the patterns + // to exclude. The patterns are delimited by "|" (that is, a pipe), for example, + // "/folder1|/folder2" + Excludes []*FilterRule `type:"list"` + + // The name of a task. This value is a text reference that is used to identify + // the task in the console. + Name *string `min:"1" type:"string"` + + // The set of configuration options that control the behavior of a single execution + // of the task that occurs when you call StartTaskExecution. You can configure + // these options to preserve metadata such as user ID (UID) and group ID (GID), + // file permissions, data integrity verification, and so on. + // + // For each individual task execution, you can override these options by specifying + // the OverrideOptions before starting a the task execution. For more information, + // see the operation. + Options *Options `type:"structure"` + + // The Amazon Resource Name (ARN) of the source location for the task. + // + // SourceLocationArn is a required field + SourceLocationArn *string `type:"string" required:"true"` + + // The key-value pair that represents the tag that you want to add to the resource. + // The value can be an empty string. + Tags []*TagListEntry `type:"list"` +} + +// String returns the string representation +func (s CreateTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTaskInput) GoString() string { return s.String() } @@ -3045,6 +3563,12 @@ func (s *CreateTaskInput) SetDestinationLocationArn(v string) *CreateTaskInput { return s } +// SetExcludes sets the Excludes field's value. +func (s *CreateTaskInput) SetExcludes(v []*FilterRule) *CreateTaskInput { + s.Excludes = v + return s +} + // SetName sets the Name field's value. func (s *CreateTaskInput) SetName(v string) *CreateTaskInput { s.Name = &v @@ -3302,12 +3826,19 @@ type DescribeAgentOutput struct { // The time that the agent was activated (that is, created in your account). CreationTime *time.Time `type:"timestamp"` - // The time that the agent was last connected. + // The type of endpoint that your agent is connected to. If the endpoint is + // a VPC endpoint, the agent is not accessible over the public Internet. + EndpointType *string `type:"string" enum:"EndpointType"` + + // The time that the agent last connected to DataSyc. LastConnectionTime *time.Time `type:"timestamp"` // The name of the agent. Name *string `min:"1" type:"string"` + // The subnet and the security group that DataSync used to access a VPC endpoint. + PrivateLinkConfig *PrivateLinkConfig `type:"structure"` + // The status of the agent. If the status is ONLINE, then the agent is configured // properly and is available to use. The Running status is the normal running // status for an agent. If the status is OFFLINE, the agent's VM is turned off @@ -3338,6 +3869,12 @@ func (s *DescribeAgentOutput) SetCreationTime(v time.Time) *DescribeAgentOutput return s } +// SetEndpointType sets the EndpointType field's value. +func (s *DescribeAgentOutput) SetEndpointType(v string) *DescribeAgentOutput { + s.EndpointType = &v + return s +} + // SetLastConnectionTime sets the LastConnectionTime field's value. func (s *DescribeAgentOutput) SetLastConnectionTime(v time.Time) *DescribeAgentOutput { s.LastConnectionTime = &v @@ -3350,6 +3887,12 @@ func (s *DescribeAgentOutput) SetName(v string) *DescribeAgentOutput { return s } +// SetPrivateLinkConfig sets the PrivateLinkConfig field's value. +func (s *DescribeAgentOutput) SetPrivateLinkConfig(v *PrivateLinkConfig) *DescribeAgentOutput { + s.PrivateLinkConfig = v + return s +} + // SetStatus sets the Status field's value. func (s *DescribeAgentOutput) SetStatus(v string) *DescribeAgentOutput { s.Status = &v @@ -3402,27 +3945,10 @@ type DescribeLocationEfsOutput struct { // The time that the EFS location was created. CreationTime *time.Time `type:"timestamp"` - // The subnet and the security group that the target Amazon EFS file system - // uses. The subnet must have at least one mount target for that file system. - // The security group that you provide needs to be able to communicate with - // the security group on the mount target in the subnet specified. - // - // The exact relationship between security group M (of the mount target) and - // security group S (which you provide for DataSync to use at this stage) is - // as follows: - // - // * Security group M (which you associate with the mount target) must allow - // inbound access for the Transmission Control Protocol (TCP) on the NFS - // port (2049) from security group S. You can enable inbound connections - // either by IP address (CIDR range) or security group. - // - // * Security group S (provided to DataSync to access EFS) should have a - // rule that enables outbound connections to the NFS port on one of the file - // system’s mount targets. You can enable outbound connections either by - // IP address (CIDR range) or security group. For information about security - // groups and mount targets, see Security Groups for Amazon EC2 Instances - // and Mount Targets (https://docs.aws.amazon.com/efs/latest/ug/security-considerations.html#network-access) - // in the Amazon EFS User Guide. + // The subnet and the security group that DataSync uses to access target EFS + // file system. The subnet must have at least one mount target for that file + // system. The security group that you provide needs to be able to communicate + // with the security group on the mount target in the subnet specified. Ec2Config *Ec2Config `type:"structure"` // The Amazon resource Name (ARN) of the EFS location that was described. @@ -3518,6 +4044,9 @@ type DescribeLocationNfsOutput struct { // The URL of the source NFS location that was described. LocationUri *string `type:"string"` + // The NFS mount options that DataSync used to mount your NFS share. + MountOptions *NfsMountOptions `type:"structure"` + // A list of Amazon Resource Names (ARNs) of agents to use for a Network File // System (NFS) location. OnPremConfig *OnPremConfig `type:"structure"` @@ -3551,6 +4080,12 @@ func (s *DescribeLocationNfsOutput) SetLocationUri(v string) *DescribeLocationNf return s } +// SetMountOptions sets the MountOptions field's value. +func (s *DescribeLocationNfsOutput) SetMountOptions(v *NfsMountOptions) *DescribeLocationNfsOutput { + s.MountOptions = v + return s +} + // SetOnPremConfig sets the OnPremConfig field's value. func (s *DescribeLocationNfsOutput) SetOnPremConfig(v *OnPremConfig) *DescribeLocationNfsOutput { s.OnPremConfig = v @@ -3610,10 +4145,19 @@ type DescribeLocationS3Output struct { LocationUri *string `type:"string"` // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that is used to access an Amazon S3 bucket. For detailed information - // about using such a role, see Components and Terminology (https://alpha-aws-docs.aws.amazon.com/sync-service/latest/userguide/create-locations-cli.html#create-location-s3-cli) - // in the AWS DataSync User Guide. + // (IAM) role that is used to access an Amazon S3 bucket. + // + // For detailed information about using such a role, see Creating a Location + // for Amazon S3 in the AWS DataSync User Guide. S3Config *S3Config `type:"structure"` + + // The Amazon S3 storage class that you chose to store your files in when this + // location is used as a task destination. For more information about S3 storage + // classes, see Amazon S3 Storage Classes (https://aws.amazon.com/s3/storage-classes/) + // in the Amazon Simple Storage Service Developer Guide. Some storage classes + // have behaviors that can affect your S3 storage cost. For detailed information, + // see using-storage-classes. + S3StorageClass *string `type:"string" enum:"S3StorageClass"` } // String returns the string representation @@ -3650,6 +4194,132 @@ func (s *DescribeLocationS3Output) SetS3Config(v *S3Config) *DescribeLocationS3O return s } +// SetS3StorageClass sets the S3StorageClass field's value. +func (s *DescribeLocationS3Output) SetS3StorageClass(v string) *DescribeLocationS3Output { + s.S3StorageClass = &v + return s +} + +// DescribeLocationSmbRequest +type DescribeLocationSmbInput struct { + _ struct{} `type:"structure"` + + // The Amazon resource Name (ARN) of the SMB location to describe. + // + // LocationArn is a required field + LocationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLocationSmbInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationSmbInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocationSmbInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocationSmbInput"} + if s.LocationArn == nil { + invalidParams.Add(request.NewErrParamRequired("LocationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationSmbInput) SetLocationArn(v string) *DescribeLocationSmbInput { + s.LocationArn = &v + return s +} + +// DescribeLocationSmbResponse +type DescribeLocationSmbOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the source SMB file system location that + // is created. + AgentArns []*string `min:"1" type:"list"` + + // The time that the SMB location was created. + CreationTime *time.Time `type:"timestamp"` + + // The name of the Windows domain that the SMB server belongs to. + Domain *string `type:"string"` + + // The Amazon resource Name (ARN) of the SMB location that was described. + LocationArn *string `type:"string"` + + // The URL of the source SBM location that was described. + LocationUri *string `type:"string"` + + // The mount options that are available for DataSync to use to access an SMB + // location. + MountOptions *SmbMountOptions `type:"structure"` + + // The user who can mount the share, has the permissions to access files and + // folders in the SMB share. + User *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLocationSmbOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationSmbOutput) GoString() string { + return s.String() +} + +// SetAgentArns sets the AgentArns field's value. +func (s *DescribeLocationSmbOutput) SetAgentArns(v []*string) *DescribeLocationSmbOutput { + s.AgentArns = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeLocationSmbOutput) SetCreationTime(v time.Time) *DescribeLocationSmbOutput { + s.CreationTime = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *DescribeLocationSmbOutput) SetDomain(v string) *DescribeLocationSmbOutput { + s.Domain = &v + return s +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationSmbOutput) SetLocationArn(v string) *DescribeLocationSmbOutput { + s.LocationArn = &v + return s +} + +// SetLocationUri sets the LocationUri field's value. +func (s *DescribeLocationSmbOutput) SetLocationUri(v string) *DescribeLocationSmbOutput { + s.LocationUri = &v + return s +} + +// SetMountOptions sets the MountOptions field's value. +func (s *DescribeLocationSmbOutput) SetMountOptions(v *SmbMountOptions) *DescribeLocationSmbOutput { + s.MountOptions = v + return s +} + +// SetUser sets the User field's value. +func (s *DescribeLocationSmbOutput) SetUser(v string) *DescribeLocationSmbOutput { + s.User = &v + return s +} + // DescribeTaskExecutionRequest type DescribeTaskExecutionInput struct { _ struct{} `type:"structure"` @@ -3710,6 +4380,12 @@ type DescribeTaskExecutionOutput struct { // and finding the delta that needs to be transferred. EstimatedFilesToTransfer *int64 `type:"long"` + // A list of filter rules that determines which files to exclude from a task. + // The list should contain a single filter string that consists of the patterns + // to exclude. The patterns are delimited by "|" (that is, a pipe), for example: + // "/folder1|/folder2" + Excludes []*FilterRule `type:"list"` + // The actual number of files that was transferred over the network. This value // is calculated and updated on an ongoing basis during the TRANSFERRING phase. // It's updated periodically when each file is read from the source and sent @@ -3722,6 +4398,12 @@ type DescribeTaskExecutionOutput struct { // execution. FilesTransferred *int64 `type:"long"` + // A list of filter rules that determines which files to include when running + // a task. The list should contain a single filter string that consists of the + // patterns to include. The patterns are delimited by "|" (that is, a pipe), + // for example: "/folder1|/folder2" + Includes []*FilterRule `type:"list"` + // Represents the options that are available to control the behavior of a StartTaskExecution // operation. Behavior includes preserving metadata such as user ID (UID), group // ID (GID), and file permissions, and also overwriting files in the destination, @@ -3739,16 +4421,18 @@ type DescribeTaskExecutionOutput struct { // The time that the task execution was started. StartTime *time.Time `type:"timestamp"` - // The status of the task. For detailed information about sync statuses, see - // Understanding Sync Task Statuses (https://docs.aws.amazon.com/sync-service/latest/userguide/understand-sync-task-statuses.html). + // The status of the task execution. + // + // For detailed information about task execution statuses, see Understanding + // Task Statuses in the AWS DataSync User Guide. Status *string `type:"string" enum:"TaskExecutionStatus"` // The Amazon Resource Name (ARN) of the task execution that was described. // TaskExecutionArn is hierarchical and includes TaskArn for the task that was // executed. // - // For example, a TaskExecution value with the ARN arn:aws:sync:us-east-1:209870788375:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b - // executed the task with the ARN arn:aws:sync:us-east-1:209870788375:task/task-0208075f79cedf4a2. + // For example, a TaskExecution value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b + // executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2. TaskExecutionArn *string `type:"string"` } @@ -3786,12 +4470,24 @@ func (s *DescribeTaskExecutionOutput) SetEstimatedFilesToTransfer(v int64) *Desc return s } +// SetExcludes sets the Excludes field's value. +func (s *DescribeTaskExecutionOutput) SetExcludes(v []*FilterRule) *DescribeTaskExecutionOutput { + s.Excludes = v + return s +} + // SetFilesTransferred sets the FilesTransferred field's value. func (s *DescribeTaskExecutionOutput) SetFilesTransferred(v int64) *DescribeTaskExecutionOutput { s.FilesTransferred = &v return s } +// SetIncludes sets the Includes field's value. +func (s *DescribeTaskExecutionOutput) SetIncludes(v []*FilterRule) *DescribeTaskExecutionOutput { + s.Includes = v + return s +} + // SetOptions sets the Options field's value. func (s *DescribeTaskExecutionOutput) SetOptions(v *Options) *DescribeTaskExecutionOutput { s.Options = v @@ -3866,9 +4562,10 @@ type DescribeTaskOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was - // used to monitor and log events in the task. For more information on these - // groups, see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html) - // in the Amazon CloudWatch User Guide. + // used to monitor and log events in the task. + // + // For more information on these groups, see Working with Log Groups and Log + // Streams in the Amazon CloudWatch User Guide. CloudWatchLogGroupArn *string `type:"string"` // The time that the task was created. @@ -3880,6 +4577,10 @@ type DescribeTaskOutput struct { // The Amazon Resource Name (ARN) of the AWS storage resource's location. DestinationLocationArn *string `type:"string"` + // The Amazon Resource Name (ARN) of the destination ENIs (Elastic Network Interface) + // that was created for your subnet. + DestinationNetworkInterfaceArns []*string `type:"list"` + // Errors that AWS DataSync encountered during execution of the task. You can // use this error code to help troubleshoot issues. ErrorCode *string `type:"string"` @@ -3888,6 +4589,12 @@ type DescribeTaskOutput struct { // You can use this information to help troubleshoot issues. ErrorDetail *string `type:"string"` + // A list of filter rules that determines which files to exclude from a task. + // The list should contain a single filter string that consists of the patterns + // to exclude. The patterns are delimited by "|" (that is, a pipe), for example: + // "/folder1|/folder2" + Excludes []*FilterRule `type:"list"` + // The name of the task that was described. Name *string `min:"1" type:"string"` @@ -3903,8 +4610,14 @@ type DescribeTaskOutput struct { // The Amazon Resource Name (ARN) of the source file system's location. SourceLocationArn *string `type:"string"` - // The status of the task that was described. For detailed information about - // sync statuses, see Understanding Sync Task Statuses (https://docs.aws.amazon.com/sync-service/latest/userguide/understand-sync-task-statuses.html). + // The Amazon Resource Name (ARN) of the source ENIs (Elastic Network Interface) + // that was created for your subnet. + SourceNetworkInterfaceArns []*string `type:"list"` + + // The status of the task that was described. + // + // For detailed information about task execution statuses, see Understanding + // Task Statuses in the AWS DataSync User Guide. Status *string `type:"string" enum:"TaskStatus"` // The Amazon Resource Name (ARN) of the task that was described. @@ -3945,6 +4658,12 @@ func (s *DescribeTaskOutput) SetDestinationLocationArn(v string) *DescribeTaskOu return s } +// SetDestinationNetworkInterfaceArns sets the DestinationNetworkInterfaceArns field's value. +func (s *DescribeTaskOutput) SetDestinationNetworkInterfaceArns(v []*string) *DescribeTaskOutput { + s.DestinationNetworkInterfaceArns = v + return s +} + // SetErrorCode sets the ErrorCode field's value. func (s *DescribeTaskOutput) SetErrorCode(v string) *DescribeTaskOutput { s.ErrorCode = &v @@ -3957,6 +4676,12 @@ func (s *DescribeTaskOutput) SetErrorDetail(v string) *DescribeTaskOutput { return s } +// SetExcludes sets the Excludes field's value. +func (s *DescribeTaskOutput) SetExcludes(v []*FilterRule) *DescribeTaskOutput { + s.Excludes = v + return s +} + // SetName sets the Name field's value. func (s *DescribeTaskOutput) SetName(v string) *DescribeTaskOutput { s.Name = &v @@ -3975,6 +4700,12 @@ func (s *DescribeTaskOutput) SetSourceLocationArn(v string) *DescribeTaskOutput return s } +// SetSourceNetworkInterfaceArns sets the SourceNetworkInterfaceArns field's value. +func (s *DescribeTaskOutput) SetSourceNetworkInterfaceArns(v []*string) *DescribeTaskOutput { + s.SourceNetworkInterfaceArns = v + return s +} + // SetStatus sets the Status field's value. func (s *DescribeTaskOutput) SetStatus(v string) *DescribeTaskOutput { s.Status = &v @@ -3987,27 +4718,10 @@ func (s *DescribeTaskOutput) SetTaskArn(v string) *DescribeTaskOutput { return s } -// The subnet and the security group that the target Amazon EFS file system -// uses. The subnet must have at least one mount target for that file system. -// The security group that you provide needs to be able to communicate with -// the security group on the mount target in the subnet specified. -// -// The exact relationship between security group M (of the mount target) and -// security group S (which you provide for DataSync to use at this stage) is -// as follows: -// -// * Security group M (which you associate with the mount target) must allow -// inbound access for the Transmission Control Protocol (TCP) on the NFS -// port (2049) from security group S. You can enable inbound connections -// either by IP address (CIDR range) or security group. -// -// * Security group S (provided to DataSync to access EFS) should have a -// rule that enables outbound connections to the NFS port on one of the file -// system’s mount targets. You can enable outbound connections either by -// IP address (CIDR range) or security group. For information about security -// groups and mount targets, see Security Groups for Amazon EC2 Instances -// and Mount Targets (https://docs.aws.amazon.com/efs/latest/ug/security-considerations.html#network-access) -// in the Amazon EFS User Guide. +// The subnet and the security group that DataSync uses to access target EFS +// file system. The subnet must have at least one mount target for that file +// system. The security group that you provide needs to be able to communicate +// with the security group on the mount target in the subnet specified. type Ec2Config struct { _ struct{} `type:"structure"` @@ -4017,7 +4731,8 @@ type Ec2Config struct { // SecurityGroupArns is a required field SecurityGroupArns []*string `min:"1" type:"list" required:"true"` - // The ARN of the subnet that the Amazon EC2 resource belongs in. + // The ARN of the subnet and the security group that DataSync uses to access + // the target EFS file system. // // SubnetArn is a required field SubnetArn *string `type:"string" required:"true"` @@ -4064,6 +4779,42 @@ func (s *Ec2Config) SetSubnetArn(v string) *Ec2Config { return s } +// Specifies which files, folders and objects to include or exclude when transferring +// files from source to destination. +type FilterRule struct { + _ struct{} `type:"structure"` + + // The type of filter rule to apply. AWS DataSync only supports the SIMPLE_PATTERN + // rule type. + FilterType *string `type:"string" enum:"FilterType"` + + // A single filter string that consists of the patterns to include or exclude. + // The patterns are delimited by "|" (that is, a pipe), for example: /folder1|/folder2 + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +// SetFilterType sets the FilterType field's value. +func (s *FilterRule) SetFilterType(v string) *FilterRule { + s.FilterType = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + // ListAgentsRequest type ListAgentsInput struct { _ struct{} `type:"structure"` @@ -4489,6 +5240,47 @@ func (s *LocationListEntry) SetLocationUri(v string) *LocationListEntry { return s } +// Represents the mount options that are available for DataSync to access an +// NFS location. +type NfsMountOptions struct { + _ struct{} `type:"structure"` + + // The specific NFS version that you want DataSync to use to mount your NFS + // share. If the server refuses to use the version specified, the sync will + // fail. If you don't specify a version, DataSync defaults to AUTOMATIC. That + // is, DataSync automatically selects a version based on negotiation with the + // NFS server. + // + // You can specify the following NFS versions: + // + // * NFSv3 (https://tools.ietf.org/html/rfc1813) - stateless protocol version + // that allows for asynchronous writes on the server. + // + // * NFSv4.0 (https://tools.ietf.org/html/rfc3530) - stateful, firewall-friendly + // protocol version that supports delegations and pseudo filesystems. + // + // * NFSv4.1 (https://tools.ietf.org/html/rfc5661) - stateful protocol version + // that supports sessions, directory delegations, and parallel data processing. + // Version 4.1 also includes all features available in version 4.0. + Version *string `type:"string" enum:"NfsVersion"` +} + +// String returns the string representation +func (s NfsMountOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NfsMountOptions) GoString() string { + return s.String() +} + +// SetVersion sets the Version field's value. +func (s *NfsMountOptions) SetVersion(v string) *NfsMountOptions { + s.Version = &v + return s +} + // A list of Amazon Resource Names (ARNs) of agents to use for a Network File // System (NFS) location. type OnPremConfig struct { @@ -4589,6 +5381,17 @@ type Options struct { // If Mtime is set to NONE, Atime must also be set to NONE. Mtime *string `type:"string" enum:"Mtime"` + // A value that determines whether files at the destination should be overwritten + // or preserved when copying files. If set to NEVER a destination file will + // not be replaced by a source file, even if the destination file differs from + // the source file. If you modify files in the destination and you sync the + // files, you can use this value to protect against overwriting those changes. + // + // Some storage classes have specific behaviors that can affect your S3 storage + // cost. For detailed information, see using-storage-classes in the AWS DataSync + // User Guide. + OverwriteMode *string `type:"string" enum:"OverwriteMode"` + // A value that determines which users or groups can access a file for a specific // purpose such as reading, writing, or execution of the file. // @@ -4602,7 +5405,10 @@ type Options struct { PosixPermissions *string `type:"string" enum:"PosixPermissions"` // A value that specifies whether files in the destination that don't exist - // in the source file system should be preserved. + // in the source file system should be preserved. This option can affect your + // storage cost. If your task deletes objects, you might incur minimum storage + // duration charges for certain storage classes. For detailed information, see + // using-storage-classes in the AWS DataSync User Guide. // // Default value: PRESERVE. // @@ -4626,6 +5432,13 @@ type Options struct { // currently supported for Amazon EFS. PreserveDevices *string `type:"string" enum:"PreserveDevices"` + // A value that determines whether tasks should be queued before executing the + // tasks. If set to Enabled, the tasks will queued. The default is Enabled. + // + // If you use the same agent to run multiple tasks you can enable the tasks + // to run in series. For more information see task-queue. + TaskQueueing *string `type:"string" enum:"TaskQueueing"` + // The user ID (UID) of the file's owner. // // Default value: INT_VALUE. This preserves the integer value of the ID. @@ -4642,6 +5455,8 @@ type Options struct { // // POINT_IN_TIME_CONSISTENT: Perform verification (recommended). // + // ONLY_FILES_TRANSFERRED: Perform verification on only files that were transferred. + // // NONE: Skip verification. VerifyMode *string `type:"string" enum:"VerifyMode"` } @@ -4693,6 +5508,12 @@ func (s *Options) SetMtime(v string) *Options { return s } +// SetOverwriteMode sets the OverwriteMode field's value. +func (s *Options) SetOverwriteMode(v string) *Options { + s.OverwriteMode = &v + return s +} + // SetPosixPermissions sets the PosixPermissions field's value. func (s *Options) SetPosixPermissions(v string) *Options { s.PosixPermissions = &v @@ -4711,6 +5532,12 @@ func (s *Options) SetPreserveDevices(v string) *Options { return s } +// SetTaskQueueing sets the TaskQueueing field's value. +func (s *Options) SetTaskQueueing(v string) *Options { + s.TaskQueueing = &v + return s +} + // SetUid sets the Uid field's value. func (s *Options) SetUid(v string) *Options { s.Uid = &v @@ -4723,10 +5550,71 @@ func (s *Options) SetVerifyMode(v string) *Options { return s } +// The VPC endpoint, subnet and security group that an agent uses to access +// IP addresses in a VPC (Virtual Private Cloud). +type PrivateLinkConfig struct { + _ struct{} `type:"structure"` + + // The private endpoint that is configured for an agent that has access to IP + // addresses in a PrivateLink (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html). + // An agent that is configured with this endpoint will not be accessible over + // the public Internet. + PrivateLinkEndpoint *string `min:"7" type:"string"` + + // The Amazon Resource Names (ARNs) of the security groups that are configured + // for the EC2 resource that hosts an agent activated in a VPC or an agent that + // has access to a VPC endpoint. + SecurityGroupArns []*string `min:"1" type:"list"` + + // The Amazon Resource Names (ARNs) of the subnets that are configured for an + // agent activated in a VPC or an agent that has access to a VPC endpoint. + SubnetArns []*string `min:"1" type:"list"` + + // The ID of the VPC endpoint that is configured for an agent. An agent that + // is configured with a VPC endpoint will not be accessible over the public + // Internet. + VpcEndpointId *string `type:"string"` +} + +// String returns the string representation +func (s PrivateLinkConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrivateLinkConfig) GoString() string { + return s.String() +} + +// SetPrivateLinkEndpoint sets the PrivateLinkEndpoint field's value. +func (s *PrivateLinkConfig) SetPrivateLinkEndpoint(v string) *PrivateLinkConfig { + s.PrivateLinkEndpoint = &v + return s +} + +// SetSecurityGroupArns sets the SecurityGroupArns field's value. +func (s *PrivateLinkConfig) SetSecurityGroupArns(v []*string) *PrivateLinkConfig { + s.SecurityGroupArns = v + return s +} + +// SetSubnetArns sets the SubnetArns field's value. +func (s *PrivateLinkConfig) SetSubnetArns(v []*string) *PrivateLinkConfig { + s.SubnetArns = v + return s +} + +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *PrivateLinkConfig) SetVpcEndpointId(v string) *PrivateLinkConfig { + s.VpcEndpointId = &v + return s +} + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management -// (IAM) role that is used to access an Amazon S3 bucket. For detailed information -// about using such a role, see Components and Terminology (https://alpha-aws-docs.aws.amazon.com/sync-service/latest/userguide/create-locations-cli.html#create-location-s3-cli) -// in the AWS DataSync User Guide. +// (IAM) role that is used to access an Amazon S3 bucket. +// +// For detailed information about using such a role, see Creating a Location +// for Amazon S3 in the AWS DataSync User Guide. type S3Config struct { _ struct{} `type:"structure"` @@ -4766,10 +5654,44 @@ func (s *S3Config) SetBucketAccessRoleArn(v string) *S3Config { return s } +// Represents the mount options that are available for DataSync to access an +// SMB location. +type SmbMountOptions struct { + _ struct{} `type:"structure"` + + // The specific SMB version that you want DataSync to use to mount your SMB + // share. If you don't specify a version, DataSync defaults to AUTOMATIC. That + // is, DataSync automatically selects a version based on negotiation with the + // SMB server. + Version *string `type:"string" enum:"SmbVersion"` +} + +// String returns the string representation +func (s SmbMountOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SmbMountOptions) GoString() string { + return s.String() +} + +// SetVersion sets the Version field's value. +func (s *SmbMountOptions) SetVersion(v string) *SmbMountOptions { + s.Version = &v + return s +} + // StartTaskExecutionRequest type StartTaskExecutionInput struct { _ struct{} `type:"structure"` + // A list of filter rules that determines which files to include when running + // a task. The pattern should contain a single filter string that consists of + // the patterns to include. The patterns are delimited by "|" (that is, a pipe). + // For example: "/folder1|/folder2" + Includes []*FilterRule `type:"list"` + // Represents the options that are available to control the behavior of a StartTaskExecution // operation. Behavior includes preserving metadata such as user ID (UID), group // ID (GID), and file permissions, and also overwriting files in the destination, @@ -4815,6 +5737,12 @@ func (s *StartTaskExecutionInput) Validate() error { return nil } +// SetIncludes sets the Includes field's value. +func (s *StartTaskExecutionInput) SetIncludes(v []*FilterRule) *StartTaskExecutionInput { + s.Includes = v + return s +} + // SetOverrideOptions sets the OverrideOptions field's value. func (s *StartTaskExecutionInput) SetOverrideOptions(v *Options) *StartTaskExecutionInput { s.OverrideOptions = v @@ -4858,7 +5786,9 @@ type TagListEntry struct { _ struct{} `type:"structure"` // The key for an AWS resource tag. - Key *string `min:"1" type:"string"` + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` // The value for an AWS resource tag. Value *string `min:"1" type:"string"` @@ -4877,6 +5807,9 @@ func (s TagListEntry) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *TagListEntry) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TagListEntry"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } @@ -5290,6 +6223,15 @@ func (s UpdateAgentOutput) GoString() string { type UpdateTaskInput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the resource name of the CloudWatch LogGroup. + CloudWatchLogGroupArn *string `type:"string"` + + // A list of filter rules that determines which files to exclude from a task. + // The list should contain a single filter string that consists of the patterns + // to exclude. The patterns are delimited by "|" (that is, a pipe), for example: + // "/folder1|/folder2" + Excludes []*FilterRule `type:"list"` + // The name of the task to update. Name *string `min:"1" type:"string"` @@ -5341,6 +6283,18 @@ func (s *UpdateTaskInput) Validate() error { return nil } +// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. +func (s *UpdateTaskInput) SetCloudWatchLogGroupArn(v string) *UpdateTaskInput { + s.CloudWatchLogGroupArn = &v + return s +} + +// SetExcludes sets the Excludes field's value. +func (s *UpdateTaskInput) SetExcludes(v []*FilterRule) *UpdateTaskInput { + s.Excludes = v + return s +} + // SetName sets the Name field's value. func (s *UpdateTaskInput) SetName(v string) *UpdateTaskInput { s.Name = &v @@ -5389,6 +6343,19 @@ const ( AtimeBestEffort = "BEST_EFFORT" ) +const ( + // EndpointTypePublic is a EndpointType enum value + EndpointTypePublic = "PUBLIC" + + // EndpointTypePrivateLink is a EndpointType enum value + EndpointTypePrivateLink = "PRIVATE_LINK" +) + +const ( + // FilterTypeSimplePattern is a FilterType enum value + FilterTypeSimplePattern = "SIMPLE_PATTERN" +) + const ( // GidNone is a Gid enum value GidNone = "NONE" @@ -5411,6 +6378,28 @@ const ( MtimePreserve = "PRESERVE" ) +const ( + // NfsVersionAutomatic is a NfsVersion enum value + NfsVersionAutomatic = "AUTOMATIC" + + // NfsVersionNfs3 is a NfsVersion enum value + NfsVersionNfs3 = "NFS3" + + // NfsVersionNfs40 is a NfsVersion enum value + NfsVersionNfs40 = "NFS4_0" + + // NfsVersionNfs41 is a NfsVersion enum value + NfsVersionNfs41 = "NFS4_1" +) + +const ( + // OverwriteModeAlways is a OverwriteMode enum value + OverwriteModeAlways = "ALWAYS" + + // OverwriteModeNever is a OverwriteMode enum value + OverwriteModeNever = "NEVER" +) + const ( // PhaseStatusPending is a PhaseStatus enum value PhaseStatusPending = "PENDING" @@ -5450,6 +6439,40 @@ const ( ) const ( + // S3StorageClassStandard is a S3StorageClass enum value + S3StorageClassStandard = "STANDARD" + + // S3StorageClassStandardIa is a S3StorageClass enum value + S3StorageClassStandardIa = "STANDARD_IA" + + // S3StorageClassOnezoneIa is a S3StorageClass enum value + S3StorageClassOnezoneIa = "ONEZONE_IA" + + // S3StorageClassIntelligentTiering is a S3StorageClass enum value + S3StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // S3StorageClassGlacier is a S3StorageClass enum value + S3StorageClassGlacier = "GLACIER" + + // S3StorageClassDeepArchive is a S3StorageClass enum value + S3StorageClassDeepArchive = "DEEP_ARCHIVE" +) + +const ( + // SmbVersionAutomatic is a SmbVersion enum value + SmbVersionAutomatic = "AUTOMATIC" + + // SmbVersionSmb2 is a SmbVersion enum value + SmbVersionSmb2 = "SMB2" + + // SmbVersionSmb3 is a SmbVersion enum value + SmbVersionSmb3 = "SMB3" +) + +const ( + // TaskExecutionStatusQueued is a TaskExecutionStatus enum value + TaskExecutionStatusQueued = "QUEUED" + // TaskExecutionStatusLaunching is a TaskExecutionStatus enum value TaskExecutionStatusLaunching = "LAUNCHING" @@ -5469,6 +6492,14 @@ const ( TaskExecutionStatusError = "ERROR" ) +const ( + // TaskQueueingEnabled is a TaskQueueing enum value + TaskQueueingEnabled = "ENABLED" + + // TaskQueueingDisabled is a TaskQueueing enum value + TaskQueueingDisabled = "DISABLED" +) + const ( // TaskStatusAvailable is a TaskStatus enum value TaskStatusAvailable = "AVAILABLE" @@ -5476,6 +6507,9 @@ const ( // TaskStatusCreating is a TaskStatus enum value TaskStatusCreating = "CREATING" + // TaskStatusQueued is a TaskStatus enum value + TaskStatusQueued = "QUEUED" + // TaskStatusRunning is a TaskStatus enum value TaskStatusRunning = "RUNNING" @@ -5501,6 +6535,9 @@ const ( // VerifyModePointInTimeConsistent is a VerifyMode enum value VerifyModePointInTimeConsistent = "POINT_IN_TIME_CONSISTENT" + // VerifyModeOnlyFilesTransferred is a VerifyMode enum value + VerifyModeOnlyFilesTransferred = "ONLY_FILES_TRANSFERRED" + // VerifyModeNone is a VerifyMode enum value VerifyModeNone = "NONE" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/errors.go index bd7164f2f79..0d709f459e5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/errors.go @@ -4,6 +4,12 @@ package datasync const ( + // ErrCodeInternalException for service response error code + // "InternalException". + // + // This exception is thrown when an error occurs in the AWS DataSync service. + ErrCodeInternalException = "InternalException" + // ErrCodeInvalidRequestException for service response error code // "InvalidRequestException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go index 22d8ddcb90c..9f883b9b883 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *DataSync { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "datasync" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DataSync { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DataSync { svc := &DataSync{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-09", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/api.go index 4b836d462bd..d58a11c5464 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/api.go @@ -101,6 +101,7 @@ func (c *DAX) CreateClusterRequest(input *CreateClusterInput) (req *request.Requ // You have exceeded the maximum number of tags for this DAX cluster. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -195,6 +196,7 @@ func (c *DAX) CreateParameterGroupRequest(input *CreateParameterGroupInput) (req // One or more parameters in a parameter group are in an invalid state. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -293,6 +295,7 @@ func (c *DAX) CreateSubnetGroupRequest(input *CreateSubnetGroupInput) (req *requ // An invalid subnet identifier was specified. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/CreateSubnetGroup func (c *DAX) CreateSubnetGroup(input *CreateSubnetGroupInput) (*CreateSubnetGroupOutput, error) { @@ -383,6 +386,7 @@ func (c *DAX) DecreaseReplicationFactorRequest(input *DecreaseReplicationFactorI // The requested DAX cluster is not in the available state. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -476,6 +480,7 @@ func (c *DAX) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Requ // The requested DAX cluster is not in the available state. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -567,6 +572,7 @@ func (c *DAX) DeleteParameterGroupRequest(input *DeleteParameterGroupInput) (req // The specified parameter group does not exist. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -659,6 +665,7 @@ func (c *DAX) DeleteSubnetGroupRequest(input *DeleteSubnetGroupInput) (req *requ // The requested subnet group name does not refer to an existing subnet group. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DeleteSubnetGroup func (c *DAX) DeleteSubnetGroup(input *DeleteSubnetGroupInput) (*DeleteSubnetGroupOutput, error) { @@ -756,6 +763,7 @@ func (c *DAX) DescribeClustersRequest(input *DescribeClustersInput) (req *reques // The requested cluster ID does not refer to an existing DAX cluster. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -840,6 +848,7 @@ func (c *DAX) DescribeDefaultParametersRequest(input *DescribeDefaultParametersI // // Returned Error Codes: // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -917,7 +926,7 @@ func (c *DAX) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Re // events specific to a particular DAX cluster or parameter group by providing // the name as a parameter. // -// By default, only the events occurring within the last hour are returned; +// By default, only the events occurring within the last 24 hours are returned; // however, you can retrieve up to 14 days' worth of events if necessary. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -929,6 +938,7 @@ func (c *DAX) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Re // // Returned Error Codes: // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1017,6 +1027,7 @@ func (c *DAX) DescribeParameterGroupsRequest(input *DescribeParameterGroupsInput // The specified parameter group does not exist. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1104,6 +1115,7 @@ func (c *DAX) DescribeParametersRequest(input *DescribeParametersInput) (req *re // The specified parameter group does not exist. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1192,6 +1204,7 @@ func (c *DAX) DescribeSubnetGroupsRequest(input *DescribeSubnetGroupsInput) (req // The requested subnet group name does not refer to an existing subnet group. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/DescribeSubnetGroups func (c *DAX) DescribeSubnetGroups(input *DescribeSubnetGroupsInput) (*DescribeSubnetGroupsOutput, error) { @@ -1289,6 +1302,7 @@ func (c *DAX) IncreaseReplicationFactorRequest(input *IncreaseReplicationFactorI // You have attempted to exceed the maximum number of nodes for your AWS account. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1383,6 +1397,7 @@ func (c *DAX) ListTagsRequest(input *ListTagsInput) (req *request.Request, outpu // The requested DAX cluster is not in the available state. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1459,6 +1474,9 @@ func (c *DAX) RebootNodeRequest(input *RebootNodeInput) (req *request.Request, o // Reboots a single node of a DAX cluster. The reboot action takes place as // soon as possible. During the reboot, the node status is set to REBOOTING. // +// RebootNode restarts the DAX engine process and does not remove the contents +// of the cache. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1477,6 +1495,7 @@ func (c *DAX) RebootNodeRequest(input *RebootNodeInput) (req *request.Request, o // The requested DAX cluster is not in the available state. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1574,6 +1593,7 @@ func (c *DAX) TagResourceRequest(input *TagResourceInput) (req *request.Request, // The requested DAX cluster is not in the available state. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1671,6 +1691,7 @@ func (c *DAX) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ // The requested DAX cluster is not in the available state. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1769,6 +1790,7 @@ func (c *DAX) UpdateClusterRequest(input *UpdateClusterInput) (req *request.Requ // The specified parameter group does not exist. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1860,6 +1882,7 @@ func (c *DAX) UpdateParameterGroupRequest(input *UpdateParameterGroupInput) (req // The specified parameter group does not exist. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValueException" // The value for a parameter is invalid. @@ -1957,6 +1980,7 @@ func (c *DAX) UpdateSubnetGroupRequest(input *UpdateSubnetGroupInput) (req *requ // An invalid subnet identifier was specified. // // * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19/UpdateSubnetGroup func (c *DAX) UpdateSubnetGroup(input *UpdateSubnetGroupInput) (*UpdateSubnetGroupOutput, error) { @@ -2163,9 +2187,10 @@ func (s *Cluster) SetTotalNodes(v int64) *Cluster { type CreateClusterInput struct { _ struct{} `type:"structure"` - // The Availability Zones (AZs) in which the cluster nodes will be created. - // All nodes belonging to the cluster are placed in these Availability Zones. - // Use this parameter if you want to distribute the nodes across multiple AZs. + // The Availability Zones (AZs) in which the cluster nodes will reside after + // the cluster has been created or updated. If provided, the length of this + // list must equal the ReplicationFactor parameter. If you omit this parameter, + // DAX will spread the nodes across Availability Zones for the highest availability. AvailabilityZones []*string `type:"list"` // The cluster identifier. This parameter is stored as a lowercase string. @@ -2234,7 +2259,9 @@ type CreateClusterInput struct { // The number of nodes in the DAX cluster. A replication factor of 1 will create // a single-node cluster, without any read replicas. For additional fault tolerance, // you can create a multiple node cluster with one or more read replicas. To - // do this, set ReplicationFactor to 2 or more. + // do this, set ReplicationFactor to a number between 3 (one primary and two + // read replicas) and 10 (one primary and nine read replicas). If the AvailabilityZones + // parameter is provided, its length must equal the ReplicationFactor. // // AWS recommends that you have at least two read replicas per cluster. // @@ -4170,7 +4197,7 @@ func (s *SecurityGroupMembership) SetStatus(v string) *SecurityGroupMembership { type Subnet struct { _ struct{} `type:"structure"` - // The Availability Zone (AZ) for subnet subnet. + // The Availability Zone (AZ) for the subnet. SubnetAvailabilityZone *string `type:"string"` // The system-assigned identifier for the subnet. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go index f5ef87e87a0..20b67ab768a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/errors.go @@ -110,6 +110,8 @@ const ( // ErrCodeServiceLinkedRoleNotFoundFault for service response error code // "ServiceLinkedRoleNotFoundFault". + // + // The specified service linked role (SLR) was not found. ErrCodeServiceLinkedRoleNotFoundFault = "ServiceLinkedRoleNotFoundFault" // ErrCodeSubnetGroupAlreadyExistsFault for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/service.go index 545ea0312c3..9a7d90302f3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dax/service.go @@ -46,11 +46,11 @@ const ( // svc := dax.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DAX { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DAX { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DAX { svc := &DAX{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-04-19", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go index 3183204612f..2f7c56f935d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go @@ -3,6 +3,7 @@ package devicefarm import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws" @@ -343,6 +344,9 @@ func (c *DeviceFarm) CreateProjectRequest(input *CreateProjectInput) (req *reque // * ErrCodeServiceAccountException "ServiceAccountException" // There was a problem with the service account. // +// * ErrCodeTagOperationException "TagOperationException" +// The operation was not successful. Try again. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/devicefarm-2015-06-23/CreateProject func (c *DeviceFarm) CreateProject(input *CreateProjectInput) (*CreateProjectOutput, error) { req, out := c.CreateProjectRequest(input) @@ -2165,7 +2169,7 @@ func (c *DeviceFarm) GetOfferingStatusWithContext(ctx aws.Context, input *GetOff // // Example iterating over at most 3 pages of a GetOfferingStatus operation. // pageNum := 0 // err := client.GetOfferingStatusPages(params, -// func(page *GetOfferingStatusOutput, lastPage bool) bool { +// func(page *devicefarm.GetOfferingStatusOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2197,10 +2201,12 @@ func (c *DeviceFarm) GetOfferingStatusPagesWithContext(ctx aws.Context, input *G }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetOfferingStatusOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetOfferingStatusOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3013,7 +3019,7 @@ func (c *DeviceFarm) ListArtifactsWithContext(ctx aws.Context, input *ListArtifa // // Example iterating over at most 3 pages of a ListArtifacts operation. // pageNum := 0 // err := client.ListArtifactsPages(params, -// func(page *ListArtifactsOutput, lastPage bool) bool { +// func(page *devicefarm.ListArtifactsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3045,10 +3051,12 @@ func (c *DeviceFarm) ListArtifactsPagesWithContext(ctx aws.Context, input *ListA }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListArtifactsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListArtifactsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3246,7 +3254,7 @@ func (c *DeviceFarm) ListDevicePoolsWithContext(ctx aws.Context, input *ListDevi // // Example iterating over at most 3 pages of a ListDevicePools operation. // pageNum := 0 // err := client.ListDevicePoolsPages(params, -// func(page *ListDevicePoolsOutput, lastPage bool) bool { +// func(page *devicefarm.ListDevicePoolsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3278,10 +3286,12 @@ func (c *DeviceFarm) ListDevicePoolsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDevicePoolsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDevicePoolsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3390,7 +3400,7 @@ func (c *DeviceFarm) ListDevicesWithContext(ctx aws.Context, input *ListDevicesI // // Example iterating over at most 3 pages of a ListDevices operation. // pageNum := 0 // err := client.ListDevicesPages(params, -// func(page *ListDevicesOutput, lastPage bool) bool { +// func(page *devicefarm.ListDevicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3422,10 +3432,12 @@ func (c *DeviceFarm) ListDevicesPagesWithContext(ctx aws.Context, input *ListDev }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDevicesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDevicesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3622,7 +3634,7 @@ func (c *DeviceFarm) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, // // Example iterating over at most 3 pages of a ListJobs operation. // pageNum := 0 // err := client.ListJobsPages(params, -// func(page *ListJobsOutput, lastPage bool) bool { +// func(page *devicefarm.ListJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3654,10 +3666,12 @@ func (c *DeviceFarm) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsIn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3959,7 +3973,7 @@ func (c *DeviceFarm) ListOfferingTransactionsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a ListOfferingTransactions operation. // pageNum := 0 // err := client.ListOfferingTransactionsPages(params, -// func(page *ListOfferingTransactionsOutput, lastPage bool) bool { +// func(page *devicefarm.ListOfferingTransactionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3991,10 +4005,12 @@ func (c *DeviceFarm) ListOfferingTransactionsPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListOfferingTransactionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListOfferingTransactionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4112,7 +4128,7 @@ func (c *DeviceFarm) ListOfferingsWithContext(ctx aws.Context, input *ListOfferi // // Example iterating over at most 3 pages of a ListOfferings operation. // pageNum := 0 // err := client.ListOfferingsPages(params, -// func(page *ListOfferingsOutput, lastPage bool) bool { +// func(page *devicefarm.ListOfferingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4144,10 +4160,12 @@ func (c *DeviceFarm) ListOfferingsPagesWithContext(ctx aws.Context, input *ListO }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListOfferingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListOfferingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4256,7 +4274,7 @@ func (c *DeviceFarm) ListProjectsWithContext(ctx aws.Context, input *ListProject // // Example iterating over at most 3 pages of a ListProjects operation. // pageNum := 0 // err := client.ListProjectsPages(params, -// func(page *ListProjectsOutput, lastPage bool) bool { +// func(page *devicefarm.ListProjectsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4288,10 +4306,12 @@ func (c *DeviceFarm) ListProjectsPagesWithContext(ctx aws.Context, input *ListPr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListProjectsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListProjectsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4488,7 +4508,7 @@ func (c *DeviceFarm) ListRunsWithContext(ctx aws.Context, input *ListRunsInput, // // Example iterating over at most 3 pages of a ListRuns operation. // pageNum := 0 // err := client.ListRunsPages(params, -// func(page *ListRunsOutput, lastPage bool) bool { +// func(page *devicefarm.ListRunsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4520,10 +4540,12 @@ func (c *DeviceFarm) ListRunsPagesWithContext(ctx aws.Context, input *ListRunsIn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRunsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRunsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4632,7 +4654,7 @@ func (c *DeviceFarm) ListSamplesWithContext(ctx aws.Context, input *ListSamplesI // // Example iterating over at most 3 pages of a ListSamples operation. // pageNum := 0 // err := client.ListSamplesPages(params, -// func(page *ListSamplesOutput, lastPage bool) bool { +// func(page *devicefarm.ListSamplesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4664,10 +4686,12 @@ func (c *DeviceFarm) ListSamplesPagesWithContext(ctx aws.Context, input *ListSam }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSamplesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSamplesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4776,7 +4800,7 @@ func (c *DeviceFarm) ListSuitesWithContext(ctx aws.Context, input *ListSuitesInp // // Example iterating over at most 3 pages of a ListSuites operation. // pageNum := 0 // err := client.ListSuitesPages(params, -// func(page *ListSuitesOutput, lastPage bool) bool { +// func(page *devicefarm.ListSuitesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4808,13 +4832,97 @@ func (c *DeviceFarm) ListSuitesPagesWithContext(ctx aws.Context, input *ListSuit }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSuitesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSuitesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/devicefarm-2015-06-23/ListTagsForResource +func (c *DeviceFarm) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS Device Farm. +// +// List the tags for an AWS Device Farm resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Device Farm's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// The specified entity was not found. +// +// * ErrCodeTagOperationException "TagOperationException" +// The operation was not successful. Try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/devicefarm-2015-06-23/ListTagsForResource +func (c *DeviceFarm) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DeviceFarm) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTests = "ListTests" // ListTestsRequest generates a "aws/request.Request" representing the @@ -4920,7 +5028,7 @@ func (c *DeviceFarm) ListTestsWithContext(ctx aws.Context, input *ListTestsInput // // Example iterating over at most 3 pages of a ListTests operation. // pageNum := 0 // err := client.ListTestsPages(params, -// func(page *ListTestsOutput, lastPage bool) bool { +// func(page *devicefarm.ListTestsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4952,10 +5060,12 @@ func (c *DeviceFarm) ListTestsPagesWithContext(ctx aws.Context, input *ListTests }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTestsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTestsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5064,7 +5174,7 @@ func (c *DeviceFarm) ListUniqueProblemsWithContext(ctx aws.Context, input *ListU // // Example iterating over at most 3 pages of a ListUniqueProblems operation. // pageNum := 0 // err := client.ListUniqueProblemsPages(params, -// func(page *ListUniqueProblemsOutput, lastPage bool) bool { +// func(page *devicefarm.ListUniqueProblemsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5096,10 +5206,12 @@ func (c *DeviceFarm) ListUniqueProblemsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListUniqueProblemsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListUniqueProblemsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5208,7 +5320,7 @@ func (c *DeviceFarm) ListUploadsWithContext(ctx aws.Context, input *ListUploadsI // // Example iterating over at most 3 pages of a ListUploads operation. // pageNum := 0 // err := client.ListUploadsPages(params, -// func(page *ListUploadsOutput, lastPage bool) bool { +// func(page *devicefarm.ListUploadsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5240,10 +5352,12 @@ func (c *DeviceFarm) ListUploadsPagesWithContext(ctx aws.Context, input *ListUpl }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListUploadsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListUploadsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5888,6 +6002,183 @@ func (c *DeviceFarm) StopRunWithContext(ctx aws.Context, input *StopRunInput, op return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/devicefarm-2015-06-23/TagResource +func (c *DeviceFarm) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Device Farm. +// +// Associates the specified tags to a resource with the specified resourceArn. +// If existing tags on a resource are not specified in the request parameters, +// they are not changed. When a resource is deleted, the tags associated with +// that resource are deleted as well. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Device Farm's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// The specified entity was not found. +// +// * ErrCodeTagOperationException "TagOperationException" +// The operation was not successful. Try again. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The list of tags on the repository is over the limit. The maximum number +// of tags that can be applied to a repository is 50. +// +// * ErrCodeTagPolicyException "TagPolicyException" +// The request doesn't comply with the AWS Identity and Access Management (IAM) +// tag policy. Correct your request and then retry it. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/devicefarm-2015-06-23/TagResource +func (c *DeviceFarm) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DeviceFarm) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/devicefarm-2015-06-23/UntagResource +func (c *DeviceFarm) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Device Farm. +// +// Deletes the specified tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Device Farm's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// The specified entity was not found. +// +// * ErrCodeTagOperationException "TagOperationException" +// The operation was not successful. Try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/devicefarm-2015-06-23/UntagResource +func (c *DeviceFarm) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DeviceFarm) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateDeviceInstance = "UpdateDeviceInstance" // UpdateDeviceInstanceRequest generates a "aws/request.Request" representing the @@ -6516,7 +6807,7 @@ type AccountSettings struct { AwsAccountNumber *string `locationName:"awsAccountNumber" min:"2" type:"string"` // The default number of minutes (at the account level) a test run will execute - // before it times out. Default value is 60 minutes. + // before it times out. The default value is 150 minutes. DefaultJobTimeoutMinutes *int64 `locationName:"defaultJobTimeoutMinutes" type:"integer"` // The maximum number of minutes a test run will execute before it times out. @@ -6669,7 +6960,7 @@ type Artifact struct { // // * APPLICATION_CRASH_REPORT: The application crash report output type. // - // * XCTEST_LOG: The XCode test output type. + // * XCTEST_LOG: The Xcode test output type. // // * VIDEO: The Video output type. // @@ -7357,6 +7648,8 @@ type CreateRemoteAccessSessionInput struct { // on the same client, you should pass the same clientId value in each call // to CreateRemoteAccessSession. This is required only if remoteDebugEnabled // is set to true. + // + // Remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). ClientId *string `locationName:"clientId" type:"string"` // The configuration information for the remote access session request. @@ -7397,6 +7690,8 @@ type CreateRemoteAccessSessionInput struct { // Set to true if you want to access devices remotely for debugging in your // remote access session. + // + // Remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). RemoteDebugEnabled *bool `locationName:"remoteDebugEnabled" type:"boolean"` // The Amazon Resource Name (ARN) for the app to be recorded in the remote access @@ -7415,9 +7710,11 @@ type CreateRemoteAccessSessionInput struct { // Farm FAQs. SkipAppResign *bool `locationName:"skipAppResign" type:"boolean"` - // The public key of the ssh key pair you want to use for connecting to remote - // devices in your remote debugging session. This is only required if remoteDebugEnabled - // is set to true. + // Ignored. The public key of the ssh key pair you want to use for connecting + // to remote devices in your remote debugging session. This is only required + // if remoteDebugEnabled is set to true. + // + // Remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). SshPublicKey *string `locationName:"sshPublicKey" type:"string"` } @@ -7622,9 +7919,9 @@ type CreateUploadInput struct { // // * UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload. // - // * XCTEST_TEST_PACKAGE: An XCode test package upload. + // * XCTEST_TEST_PACKAGE: An Xcode test package upload. // - // * XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload. + // * XCTEST_UI_TEST_PACKAGE: An Xcode UI test package upload. // // * APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload. // @@ -7653,7 +7950,7 @@ type CreateUploadInput struct { // // * INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload. // - // * XCTEST_UI_TEST_SPEC: An XCode UI test spec upload. + // * XCTEST_UI_TEST_SPEC: An Xcode UI test spec upload. // // Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws // an ArgumentException error. @@ -8124,7 +8421,7 @@ func (s DeleteProjectOutput) GoString() string { type DeleteRemoteAccessSessionInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the sesssion for which you want to delete + // The Amazon Resource Name (ARN) of the session for which you want to delete // remote access. // // Arn is a required field @@ -8426,6 +8723,8 @@ type Device struct { RemoteAccessEnabled *bool `locationName:"remoteAccessEnabled" type:"boolean"` // This flag is set to true if remote debugging is enabled for the device. + // + // Remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). RemoteDebugEnabled *bool `locationName:"remoteDebugEnabled" type:"boolean"` // The resolution of the device. @@ -8584,55 +8883,81 @@ type DeviceFilter struct { // The supported operators for each attribute are provided in the following // list. // - // ARNThe Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". + // ARN + // + // The Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". // // Supported operators: EQUALS, IN, NOT_IN // - // PLATFORMThe device platform. Valid values are "ANDROID" or "IOS". + // PLATFORM + // + // The device platform. Valid values are "ANDROID" or "IOS". // // Supported operators: EQUALS // - // OS_VERSIONThe operating system version. For example, "10.3.2". + // OS_VERSION + // + // The operating system version. For example, "10.3.2". // // Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, // LESS_THAN_OR_EQUALS, NOT_IN // - // MODELThe device model. For example, "iPad 5th Gen". + // MODEL + // + // The device model. For example, "iPad 5th Gen". // // Supported operators: CONTAINS, EQUALS, IN, NOT_IN // - // AVAILABILITYThe current availability of the device. Valid values are "AVAILABLE", - // "HIGHLY_AVAILABLE", "BUSY", or "TEMPORARY_NOT_AVAILABLE". + // AVAILABILITY + // + // The current availability of the device. Valid values are "AVAILABLE", "HIGHLY_AVAILABLE", + // "BUSY", or "TEMPORARY_NOT_AVAILABLE". // // Supported operators: EQUALS // - // FORM_FACTORThe device form factor. Valid values are "PHONE" or "TABLET". + // FORM_FACTOR + // + // The device form factor. Valid values are "PHONE" or "TABLET". // // Supported operators: EQUALS // - // MANUFACTURERThe device manufacturer. For example, "Apple". + // MANUFACTURER + // + // The device manufacturer. For example, "Apple". // // Supported operators: EQUALS, IN, NOT_IN // - // REMOTE_ACCESS_ENABLEDWhether the device is enabled for remote access. Valid - // values are "TRUE" or "FALSE". + // REMOTE_ACCESS_ENABLED + // + // Whether the device is enabled for remote access. Valid values are "TRUE" + // or "FALSE". // // Supported operators: EQUALS // - // REMOTE_DEBUG_ENABLEDWhether the device is enabled for remote debugging. Valid - // values are "TRUE" or "FALSE". + // REMOTE_DEBUG_ENABLED + // + // Ignored.Whether the device is enabled for remote debugging. Valid values + // are "TRUE" or "FALSE". // // Supported operators: EQUALS // - // INSTANCE_ARNThe Amazon Resource Name (ARN) of the device instance. + // This filter will be ignored, as remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). + // + // INSTANCE_ARN + // + // The Amazon Resource Name (ARN) of the device instance. // // Supported operators: EQUALS, IN, NOT_IN // - // INSTANCE_LABELSThe label of the device instance. + // INSTANCE_LABELS + // + // The label of the device instance. // // Supported operators: CONTAINS // - // FLEET_TYPEThe fleet type. Valid values are "PUBLIC" or "PRIVATE". + // FLEET_TYPE + // + // The fleet type. Valid values are "PUBLIC" or "PRIVATE". // // Supported operators: EQUALS Attribute *string `locationName:"attribute" type:"string" enum:"DeviceFilterAttribute"` @@ -8940,75 +9265,38 @@ type DeviceSelectionConfiguration struct { // Used to dynamically select a set of devices for a test run. A filter is made // up of an attribute, an operator, and one or more values. // - // * Attribute - // - // The aspect of a device such as platform or model used as the selection criteria - // in a device filter. - // - // Allowed values include: - // - // ARN: The Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". - // - // PLATFORM: The device platform. Valid values are "ANDROID" or "IOS". - // - // OS_VERSION: The operating system version. For example, "10.3.2". - // - // MODEL: The device model. For example, "iPad 5th Gen". - // - // AVAILABILITY: The current availability of the device. Valid values are "AVAILABLE", - // "HIGHLY_AVAILABLE", "BUSY", or "TEMPORARY_NOT_AVAILABLE". - // - // FORM_FACTOR: The device form factor. Valid values are "PHONE" or "TABLET". - // - // MANUFACTURER: The device manufacturer. For example, "Apple". - // - // REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid - // values are "TRUE" or "FALSE". - // - // REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. - // Valid values are "TRUE" or "FALSE". - // - // INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance. - // - // INSTANCE_LABELS: The label of the device instance. - // - // FLEET_TYPE: The fleet type. Valid values are "PUBLIC" or "PRIVATE". - // - // * Operator - // - // The filter operator. - // - // The EQUALS operator is available for every attribute except INSTANCE_LABELS. - // - // The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes. - // - // The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, - // MANUFACTURER, and INSTANCE_ARN attributes. - // - // The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS + // * Attribute The aspect of a device such as platform or model used as the + // selection criteria in a device filter. Allowed values include: ARN: The + // Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". + // PLATFORM: The device platform. Valid values are "ANDROID" or "IOS". OS_VERSION: + // The operating system version. For example, "10.3.2". MODEL: The device + // model. For example, "iPad 5th Gen". AVAILABILITY: The current availability + // of the device. Valid values are "AVAILABLE", "HIGHLY_AVAILABLE", "BUSY", + // or "TEMPORARY_NOT_AVAILABLE". FORM_FACTOR: The device form factor. Valid + // values are "PHONE" or "TABLET". MANUFACTURER: The device manufacturer. + // For example, "Apple". REMOTE_ACCESS_ENABLED: Whether the device is enabled + // for remote access. Valid values are "TRUE" or "FALSE". REMOTE_DEBUG_ENABLED: + // Whether the device is enabled for remote debugging. Valid values are "TRUE" + // or "FALSE". This filter will be ignored, as remote debugging is no longer + // supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). + // INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance. INSTANCE_LABELS: + // The label of the device instance. FLEET_TYPE: The fleet type. Valid values + // are "PUBLIC" or "PRIVATE". + // + // * Operator The filter operator. The EQUALS operator is available for every + // attribute except INSTANCE_LABELS. The CONTAINS operator is available for + // the INSTANCE_LABELS and MODEL attributes. The IN and NOT_IN operators + // are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN + // attributes. The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS // operators are also available for the OS_VERSION attribute. // - // * Values - // - // An array of one or more filter values. - // - // Operator Values - // - // The IN and NOT_IN operators can take a values array that has more than one - // element. - // - // The other operators require an array with a single element. - // - // Attribute Values - // - // The PLATFORM attribute can be set to "ANDROID" or "IOS". - // - // The AVAILABILITY attribute can be set to "AVAILABLE", "HIGHLY_AVAILABLE", - // "BUSY", or "TEMPORARY_NOT_AVAILABLE". - // - // The FORM_FACTOR attribute can be set to "PHONE" or "TABLET". - // - // The FLEET_TYPE attribute can be set to "PUBLIC" or "PRIVATE". + // * Values An array of one or more filter values. Operator Values The IN + // and NOT_IN operators can take a values array that has more than one element. + // The other operators require an array with a single element. Attribute + // Values The PLATFORM attribute can be set to "ANDROID" or "IOS". The AVAILABILITY + // attribute can be set to "AVAILABLE", "HIGHLY_AVAILABLE", "BUSY", or "TEMPORARY_NOT_AVAILABLE". + // The FORM_FACTOR attribute can be set to "PHONE" or "TABLET". The FLEET_TYPE + // attribute can be set to "PUBLIC" or "PRIVATE". // // Filters is a required field Filters []*DeviceFilter `locationName:"filters" type:"list" required:"true"` @@ -9399,9 +9687,9 @@ type GetDevicePoolCompatibilityInput struct { // // * UIAUTOMATOR: The uiautomator type. // - // * XCTEST: The XCode test type. + // * XCTEST: The Xcode test type. // - // * XCTEST_UI: The XCode UI test type. + // * XCTEST_UI: The Xcode UI test type. TestType *string `locationName:"testType" type:"string" enum:"TestType"` } @@ -10636,9 +10924,9 @@ type Job struct { // // * UIAUTOMATOR: The uiautomator type. // - // * XCTEST: The XCode test type. + // * XCTEST: The Xcode test type. // - // * XCTEST_UI: The XCode UI test type. + // * XCTEST_UI: The Xcode UI test type. Type *string `locationName:"type" type:"string" enum:"TestType"` // This value is set to true if video capture is enabled; otherwise, it is set @@ -11058,57 +11346,35 @@ type ListDevicesInput struct { // operator, and one or more values. // // * Attribute: The aspect of a device such as platform or model used as - // the selction criteria in a device filter. - // - // Allowed values include: - // - // ARN: The Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". - // - // PLATFORM: The device platform. Valid values are "ANDROID" or "IOS". - // - // OS_VERSION: The operating system version. For example, "10.3.2". - // - // MODEL: The device model. For example, "iPad 5th Gen". - // - // AVAILABILITY: The current availability of the device. Valid values are "AVAILABLE", - // "HIGHLY_AVAILABLE", "BUSY", or "TEMPORARY_NOT_AVAILABLE". - // - // FORM_FACTOR: The device form factor. Valid values are "PHONE" or "TABLET". - // - // MANUFACTURER: The device manufacturer. For example, "Apple". - // - // REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid - // values are "TRUE" or "FALSE". - // - // REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. - // Valid values are "TRUE" or "FALSE". - // - // INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance. - // - // INSTANCE_LABELS: The label of the device instance. - // - // FLEET_TYPE: The fleet type. Valid values are "PUBLIC" or "PRIVATE". - // - // * Operator: The filter operator. - // - // The EQUALS operator is available for every attribute except INSTANCE_LABELS. - // - // The CONTAINS operator is available for the INSTANCE_LABELS and MODEL attributes. - // - // The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, - // MANUFACTURER, and INSTANCE_ARN attributes. - // - // The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS + // the selection criteria in a device filter. Allowed values include: ARN: + // The Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". + // PLATFORM: The device platform. Valid values are "ANDROID" or "IOS". OS_VERSION: + // The operating system version. For example, "10.3.2". MODEL: The device + // model. For example, "iPad 5th Gen". AVAILABILITY: The current availability + // of the device. Valid values are "AVAILABLE", "HIGHLY_AVAILABLE", "BUSY", + // or "TEMPORARY_NOT_AVAILABLE". FORM_FACTOR: The device form factor. Valid + // values are "PHONE" or "TABLET". MANUFACTURER: The device manufacturer. + // For example, "Apple". REMOTE_ACCESS_ENABLED: Whether the device is enabled + // for remote access. Valid values are "TRUE" or "FALSE". REMOTE_DEBUG_ENABLED: + // Whether the device is enabled for remote debugging. Valid values are "TRUE" + // or "FALSE". This attribute will be ignored, as remote debugging is no + // longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). + // INSTANCE_ARN: The Amazon Resource Name (ARN) of the device instance. INSTANCE_LABELS: + // The label of the device instance. FLEET_TYPE: The fleet type. Valid values + // are "PUBLIC" or "PRIVATE". + // + // * Operator: The filter operator. The EQUALS operator is available for + // every attribute except INSTANCE_LABELS. The CONTAINS operator is available + // for the INSTANCE_LABELS and MODEL attributes. The IN and NOT_IN operators + // are available for the ARN, OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN + // attributes. The LESS_THAN, GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS // operators are also available for the OS_VERSION attribute. // - // * Values: An array of one or more filter values. - // - // The IN and NOT_IN operators take a values array that has one or more elements. - // - // The other operators require an array with a single element. - // - // In a request, the AVAILABILITY attribute takes "AVAILABLE", "HIGHLY_AVAILABLE", - // "BUSY", or "TEMPORARY_NOT_AVAILABLE" as values. + // * Values: An array of one or more filter values. The IN and NOT_IN operators + // take a values array that has one or more elements. The other operators + // require an array with a single element. In a request, the AVAILABILITY + // attribute takes "AVAILABLE", "HIGHLY_AVAILABLE", "BUSY", or "TEMPORARY_NOT_AVAILABLE" + // as values. Filters []*DeviceFilter `locationName:"filters" type:"list"` // An identifier that was returned from the previous call to this operation, @@ -11769,8 +12035,8 @@ func (s *ListProjectsOutput) SetProjects(v []*Project) *ListProjectsOutput { type ListRemoteAccessSessionsInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the remote access session about which you - // are requesting information. + // The Amazon Resource Name (ARN) of the project about which you are requesting + // information. // // Arn is a required field Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` @@ -12128,6 +12394,75 @@ func (s *ListSuitesOutput) SetSuites(v []*Suite) *ListSuitesOutput { return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource(s) for which to list tags. + // You can associate tags with the following Device Farm resources: PROJECT, + // RUN, NETWORK_PROFILE, INSTANCE_PROFILE, DEVICE_INSTANCE, SESSION, DEVICE_POOL, + // DEVICE, and VPCE_CONFIGURATION. + // + // ResourceARN is a required field + ResourceARN *string `min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags to add to the resource. A tag is an array of key-value pairs. Tag + // keys can have a maximum character length of 128 characters, and tag values + // can have a maximum length of 256 characters. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // Represents a request to the list tests operation. type ListTestsInput struct { _ struct{} `type:"structure"` @@ -12346,7 +12681,7 @@ type ListUploadsInput struct { // // * IOS_APP: An iOS upload. // - // * WEB_APP: A web appliction upload. + // * WEB_APP: A web application upload. // // * EXTERNAL_DATA: An external data upload. // @@ -12384,9 +12719,9 @@ type ListUploadsInput struct { // // * UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload. // - // * XCTEST_TEST_PACKAGE: An XCode test package upload. + // * XCTEST_TEST_PACKAGE: An Xcode test package upload. // - // * XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload. + // * XCTEST_UI_TEST_PACKAGE: An Xcode UI test package upload. // // * APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload. // @@ -12396,7 +12731,7 @@ type ListUploadsInput struct { // // * APPIUM_NODE_TEST_SPEC: An Appium Node.js test spec upload. // - // * APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload. + // * APPIUM_RUBY_TEST_SPEC: An Appium Ruby test spec upload. // // * APPIUM_WEB_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload // for a web app. @@ -12415,7 +12750,7 @@ type ListUploadsInput struct { // // * INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload. // - // * XCTEST_UI_TEST_SPEC: An XCode UI test spec upload. + // * XCTEST_UI_TEST_SPEC: An Xcode UI test spec upload. Type *string `locationName:"type" type:"string" enum:"UploadType"` } @@ -13141,7 +13476,7 @@ type Project struct { Created *time.Time `locationName:"created" type:"timestamp"` // The default number of minutes (at the project level) a test run will execute - // before it times out. Default value is 60 minutes. + // before it times out. The default value is 150 minutes. DefaultJobTimeoutMinutes *int64 `locationName:"defaultJobTimeoutMinutes" type:"integer"` // The project's name. @@ -13363,6 +13698,8 @@ type RemoteAccessSession struct { // Unique identifier of your client for the remote access session. Only returned // if remote debugging is enabled for the remote access session. + // + // Remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). ClientId *string `locationName:"clientId" type:"string"` // The date and time the remote access session was created. @@ -13371,12 +13708,14 @@ type RemoteAccessSession struct { // The device (phone or tablet) used in the remote access session. Device *Device `locationName:"device" type:"structure"` - // The number of minutes a device is used in a remote access sesssion (including + // The number of minutes a device is used in a remote access session (including // setup and teardown minutes). DeviceMinutes *DeviceMinutes `locationName:"deviceMinutes" type:"structure"` // Unique device identifier for the remote device. Only returned if remote debugging // is enabled for the remote access session. + // + // Remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). DeviceUdid *string `locationName:"deviceUdid" type:"string"` // The endpoint for the remote access sesssion. @@ -13384,6 +13723,8 @@ type RemoteAccessSession struct { // IP address of the EC2 host where you need to connect to remotely debug devices. // Only returned if remote debugging is enabled for the remote access session. + // + // Remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). HostAddress *string `locationName:"hostAddress" type:"string"` // The Amazon Resource Name (ARN) of the instance. @@ -13411,6 +13752,8 @@ type RemoteAccessSession struct { // This flag is set to true if remote debugging is enabled for the remote access // session. + // + // Remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). RemoteDebugEnabled *bool `locationName:"remoteDebugEnabled" type:"boolean"` // The Amazon Resource Name (ARN) for the app to be recorded in the remote access @@ -13724,61 +14067,89 @@ type Rule struct { // The supported operators for each attribute are provided in the following // list. // - // APPIUM_VERSIONThe Appium version for the test. + // APPIUM_VERSION + // + // The Appium version for the test. // // Supported operators: CONTAINS // - // ARNThe Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". + // ARN + // + // The Amazon Resource Name (ARN) of the device. For example, "arn:aws:devicefarm:us-west-2::device:12345Example". // // Supported operators: EQUALS, IN, NOT_IN // - // AVAILABILITYThe current availability of the device. Valid values are "AVAILABLE", - // "HIGHLY_AVAILABLE", "BUSY", or "TEMPORARY_NOT_AVAILABLE". + // AVAILABILITY + // + // The current availability of the device. Valid values are "AVAILABLE", "HIGHLY_AVAILABLE", + // "BUSY", or "TEMPORARY_NOT_AVAILABLE". // // Supported operators: EQUALS // - // FLEET_TYPEThe fleet type. Valid values are "PUBLIC" or "PRIVATE". + // FLEET_TYPE + // + // The fleet type. Valid values are "PUBLIC" or "PRIVATE". // // Supported operators: EQUALS // - // FORM_FACTORThe device form factor. Valid values are "PHONE" or "TABLET". + // FORM_FACTOR + // + // The device form factor. Valid values are "PHONE" or "TABLET". // // Supported operators: EQUALS, IN, NOT_IN // - // INSTANCE_ARNThe Amazon Resource Name (ARN) of the device instance. + // INSTANCE_ARN + // + // The Amazon Resource Name (ARN) of the device instance. // // Supported operators: IN, NOT_IN // - // INSTANCE_LABELSThe label of the device instance. + // INSTANCE_LABELS + // + // The label of the device instance. // // Supported operators: CONTAINS // - // MANUFACTURERThe device manufacturer. For example, "Apple". + // MANUFACTURER + // + // The device manufacturer. For example, "Apple". // // Supported operators: EQUALS, IN, NOT_IN // - // MODELThe device model, such as "Apple iPad Air 2" or "Google Pixel". + // MODEL + // + // The device model, such as "Apple iPad Air 2" or "Google Pixel". // // Supported operators: CONTAINS, EQUALS, IN, NOT_IN // - // OS_VERSIONThe operating system version. For example, "10.3.2". + // OS_VERSION + // + // The operating system version. For example, "10.3.2". // // Supported operators: EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, IN, LESS_THAN, // LESS_THAN_OR_EQUALS, NOT_IN // - // PLATFORMThe device platform. Valid values are "ANDROID" or "IOS". + // PLATFORM + // + // The device platform. Valid values are "ANDROID" or "IOS". // // Supported operators: EQUALS, IN, NOT_IN // - // REMOTE_ACCESS_ENABLEDWhether the device is enabled for remote access. Valid - // values are "TRUE" or "FALSE". + // REMOTE_ACCESS_ENABLED + // + // Whether the device is enabled for remote access. Valid values are "TRUE" + // or "FALSE". // // Supported operators: EQUALS // - // REMOTE_DEBUG_ENABLEDWhether the device is enabled for remote debugging. Valid - // values are "TRUE" or "FALSE". + // REMOTE_DEBUG_ENABLED + // + // Whether the device is enabled for remote debugging. Valid values are "TRUE" + // or "FALSE". // // Supported operators: EQUALS + // + // This filter will be ignored, as remote debugging is no longer supported (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html). Attribute *string `locationName:"attribute" type:"string" enum:"DeviceAttribute"` // Specifies how Device Farm compares the rule's attribute to the value. For @@ -14001,9 +14372,9 @@ type Run struct { // // * UIAUTOMATOR: The uiautomator type. // - // * XCTEST: The XCode test type. + // * XCTEST: The Xcode test type. // - // * XCTEST_UI: The XCode UI test type. + // * XCTEST_UI: The Xcode UI test type. Type *string `locationName:"type" type:"string" enum:"TestType"` // The Device Farm console URL for the recording of the run. @@ -14295,7 +14666,8 @@ func (s *Sample) SetUrl(v string) *Sample { type ScheduleRunConfiguration struct { _ struct{} `type:"structure"` - // A list of auxiliary apps for the run. + // A list of Upload ARNs for app packages that will be installed alongside your + // app. AuxiliaryApps []*string `locationName:"auxiliaryApps" type:"list"` // Specifies the billing method for a test run: metered or unmetered. If the @@ -14597,15 +14969,11 @@ type ScheduleRunTest struct { // For Appium tests (all types): // // * appium_version: The Appium version. Currently supported values are "1.6.5" - // (and higher), "latest", and "default". - // - // “latest” will run the latest Appium version supported by Device Farm (1.9.1). - // - // For “default”, Device Farm will choose a compatible version of Appium for - // the device. The current behavior is to run 1.7.2 on Android devices and - // iOS 9 and earlier, 1.7.2 for iOS 10 and later. - // - // This behavior is subject to change. + // (and higher), "latest", and "default". “latest” will run the latest + // Appium version supported by Device Farm (1.9.1). For “default”, Device + // Farm will choose a compatible version of Appium for the device. The current + // behavior is to run 1.7.2 on Android devices and iOS 9 and earlier, 1.7.2 + // for iOS 10 and later. This behavior is subject to change. // // For Fuzz tests (Android only): // @@ -14628,35 +14996,22 @@ type ScheduleRunTest struct { // // For Instrumentation: // - // * filter: A test filter string. Examples: - // - // Running a single test case: "com.android.abc.Test1" - // - // Running a single test: "com.android.abc.Test1#smoke" - // - // Running multiple tests: "com.android.abc.Test1,com.android.abc.Test2" + // * filter: A test filter string. Examples: Running a single test case: + // "com.android.abc.Test1" Running a single test: "com.android.abc.Test1#smoke" + // Running multiple tests: "com.android.abc.Test1,com.android.abc.Test2" // // For XCTest and XCTestUI: // - // * filter: A test filter string. Examples: - // - // Running a single test class: "LoginTests" - // - // Running a multiple test classes: "LoginTests,SmokeTests" - // - // Running a single test: "LoginTests/testValid" - // - // Running multiple tests: "LoginTests/testValid,LoginTests/testInvalid" + // * filter: A test filter string. Examples: Running a single test class: + // "LoginTests" Running a multiple test classes: "LoginTests,SmokeTests" + // Running a single test: "LoginTests/testValid" Running multiple tests: + // "LoginTests/testValid,LoginTests/testInvalid" // // For UIAutomator: // - // * filter: A test filter string. Examples: - // - // Running a single test case: "com.android.abc.Test1" - // - // Running a single test: "com.android.abc.Test1#smoke" - // - // Running multiple tests: "com.android.abc.Test1,com.android.abc.Test2" + // * filter: A test filter string. Examples: Running a single test case: + // "com.android.abc.Test1" Running a single test: "com.android.abc.Test1#smoke" + // Running multiple tests: "com.android.abc.Test1,com.android.abc.Test2" Parameters map[string]*string `locationName:"parameters" type:"map"` // The ARN of the uploaded test that will be run. @@ -14703,9 +15058,9 @@ type ScheduleRunTest struct { // // * UIAUTOMATOR: The uiautomator type. // - // * XCTEST: The XCode test type. + // * XCTEST: The Xcode test type. // - // * XCTEST_UI: The XCode UI test type. + // * XCTEST_UI: The Xcode UI test type. // // Type is a required field Type *string `locationName:"type" type:"string" required:"true" enum:"TestType"` @@ -15078,9 +15433,9 @@ type Suite struct { // // * UIAUTOMATOR: The uiautomator type. // - // * XCTEST: The XCode test type. + // * XCTEST: The Xcode test type. // - // * XCTEST_UI: The XCode UI test type. + // * XCTEST_UI: The Xcode UI test type. Type *string `locationName:"type" type:"string" enum:"TestType"` } @@ -15160,6 +15515,151 @@ func (s *Suite) SetType(v string) *Suite { return s } +// The metadata that you apply to a resource to help you categorize and organize +// it. Each tag consists of a key and an optional value, both of which you define. +// Tag keys can have a maximum character length of 128 characters, and tag values +// can have a maximum length of 256 characters. +type Tag struct { + _ struct{} `type:"structure"` + + // One part of a key-value pair that make up a tag. A key is a general label + // that acts like a category for more specific tag values. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The optional part of a key-value pair that make up a tag. A value acts as + // a descriptor within a tag category (key). + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource(s) to which to add tags. You + // can associate tags with the following Device Farm resources: PROJECT, RUN, + // NETWORK_PROFILE, INSTANCE_PROFILE, DEVICE_INSTANCE, SESSION, DEVICE_POOL, + // DEVICE, and VPCE_CONFIGURATION. + // + // ResourceARN is a required field + ResourceARN *string `min:"32" type:"string" required:"true"` + + // The tags to add to the resource. A tag is an array of key-value pairs. Tag + // keys can have a maximum character length of 128 characters, and tag values + // can have a maximum length of 256 characters. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 32)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // Represents a condition that is evaluated. type Test struct { _ struct{} `type:"structure"` @@ -15268,9 +15768,9 @@ type Test struct { // // * UIAUTOMATOR: The uiautomator type. // - // * XCTEST: The XCode test type. + // * XCTEST: The Xcode test type. // - // * XCTEST_UI: The XCode UI test type. + // * XCTEST_UI: The Xcode UI test type. Type *string `locationName:"type" type:"string" enum:"TestType"` } @@ -15416,6 +15916,78 @@ func (s *UniqueProblem) SetProblems(v []*Problem) *UniqueProblem { return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource(s) from which to delete tags. + // You can associate tags with the following Device Farm resources: PROJECT, + // RUN, NETWORK_PROFILE, INSTANCE_PROFILE, DEVICE_INSTANCE, SESSION, DEVICE_POOL, + // DEVICE, and VPCE_CONFIGURATION. + // + // ResourceARN is a required field + ResourceARN *string `min:"32" type:"string" required:"true"` + + // The keys of the tags to be removed. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 32)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateDeviceInstanceInput struct { _ struct{} `type:"structure"` @@ -15506,7 +16078,7 @@ func (s *UpdateDeviceInstanceOutput) SetDeviceInstance(v *DeviceInstance) *Updat type UpdateDevicePoolInput struct { _ struct{} `type:"structure"` - // The Amazon Resourc Name (ARN) of the Device Farm device pool you wish to + // The Amazon Resource Name (ARN) of the Device Farm device pool you wish to // update. // // Arn is a required field @@ -15756,7 +16328,7 @@ type UpdateNetworkProfileInput struct { // Arn is a required field Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` - // The descriptoin of the network profile about which you are returning information. + // The description of the network profile about which you are returning information. Description *string `locationName:"description" type:"string"` // The data throughput rate in bits per second, as an integer from 0 to 104857600. @@ -16250,7 +16822,7 @@ type Upload struct { // // * IOS_APP: An iOS upload. // - // * WEB_APP: A web appliction upload. + // * WEB_APP: A web application upload. // // * EXTERNAL_DATA: An external data upload. // @@ -16288,9 +16860,9 @@ type Upload struct { // // * UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload. // - // * XCTEST_TEST_PACKAGE: An XCode test package upload. + // * XCTEST_TEST_PACKAGE: An Xcode test package upload. // - // * XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload. + // * XCTEST_UI_TEST_PACKAGE: An Xcode UI test package upload. // // * APPIUM_JAVA_JUNIT_TEST_SPEC: An Appium Java JUnit test spec upload. // @@ -16319,7 +16891,7 @@ type Upload struct { // // * INSTRUMENTATION_TEST_SPEC: An instrumentation test spec upload. // - // * XCTEST_UI_TEST_SPEC: An XCode UI test spec upload. + // * XCTEST_UI_TEST_SPEC: An Xcode UI test spec upload. Type *string `locationName:"type" type:"string" enum:"UploadType"` // The pre-signed Amazon S3 URL that was used to store a file through a corresponding diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/errors.go index 2d9e5abdfbf..4a479520cbe 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/errors.go @@ -47,4 +47,24 @@ const ( // // There was a problem with the service account. ErrCodeServiceAccountException = "ServiceAccountException" + + // ErrCodeTagOperationException for service response error code + // "TagOperationException". + // + // The operation was not successful. Try again. + ErrCodeTagOperationException = "TagOperationException" + + // ErrCodeTagPolicyException for service response error code + // "TagPolicyException". + // + // The request doesn't comply with the AWS Identity and Access Management (IAM) + // tag policy. Correct your request and then retry it. + ErrCodeTagPolicyException = "TagPolicyException" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // The list of tags on the repository is over the limit. The maximum number + // of tags that can be applied to a repository is 50. + ErrCodeTooManyTagsException = "TooManyTagsException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go index 0f2354a4e1c..4e10bf876dc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go @@ -46,11 +46,11 @@ const ( // svc := devicefarm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DeviceFarm { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DeviceFarm { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DeviceFarm { svc := &DeviceFarm{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-06-23", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go index 22c7465fbaa..100f224d179 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go @@ -257,6 +257,12 @@ func (c *DirectConnect) AllocateHostedConnectionRequest(input *AllocateHostedCon // API operation AllocateHostedConnection for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -343,6 +349,12 @@ func (c *DirectConnect) AllocatePrivateVirtualInterfaceRequest(input *AllocatePr // API operation AllocatePrivateVirtualInterface for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -437,6 +449,12 @@ func (c *DirectConnect) AllocatePublicVirtualInterfaceRequest(input *AllocatePub // API operation AllocatePublicVirtualInterface for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -529,6 +547,12 @@ func (c *DirectConnect) AllocateTransitVirtualInterfaceRequest(input *AllocateTr // API operation AllocateTransitVirtualInterface for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -1343,6 +1367,12 @@ func (c *DirectConnect) CreateConnectionRequest(input *CreateConnectionInput) (r // API operation CreateConnection for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -1707,6 +1737,12 @@ func (c *DirectConnect) CreateInterconnectRequest(input *CreateInterconnectInput // API operation CreateInterconnect for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -1813,6 +1849,12 @@ func (c *DirectConnect) CreateLagRequest(input *CreateLagInput) (req *request.Re // API operation CreateLag for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -1901,6 +1943,12 @@ func (c *DirectConnect) CreatePrivateVirtualInterfaceRequest(input *CreatePrivat // API operation CreatePrivateVirtualInterface for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -1989,6 +2037,12 @@ func (c *DirectConnect) CreatePublicVirtualInterfaceRequest(input *CreatePublicV // API operation CreatePublicVirtualInterface for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -2061,11 +2115,17 @@ func (c *DirectConnect) CreateTransitVirtualInterfaceRequest(input *CreateTransi // CreateTransitVirtualInterface API operation for AWS Direct Connect. // -// Creates a transit virtual interface. A transit virtual interface is a VLAN -// that transports traffic from a Direct Connect gateway to one or more transit +// Creates a transit virtual interface. A transit virtual interface should be +// used to access one or more transit gateways associated with Direct Connect // gateways. A transit virtual interface enables the connection of multiple // VPCs attached to a transit gateway to a Direct Connect gateway. // +// If you associate your transit gateway with one or more Direct Connect gateways, +// the Autonomous System Number (ASN) used by the transit gateway and the Direct +// Connect gateway must be different. For example, if you use the default ASN +// 64512 for both your the transit gateway and Direct Connect gateway, the association +// request fails. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2074,6 +2134,12 @@ func (c *DirectConnect) CreateTransitVirtualInterfaceRequest(input *CreateTransi // API operation CreateTransitVirtualInterface for usage and error information. // // Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned. +// // * ErrCodeServerException "DirectConnectServerException" // A server-side error occurred. // @@ -2320,8 +2386,7 @@ func (c *DirectConnect) DeleteDirectConnectGatewayRequest(input *DeleteDirectCon // // Deletes the specified Direct Connect gateway. You must first delete all virtual // interfaces that are attached to the Direct Connect gateway and disassociate -// all virtual private gateways that are associated with the Direct Connect -// gateway. +// all virtual private gateways associated with the Direct Connect gateway. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2406,6 +2471,11 @@ func (c *DirectConnect) DeleteDirectConnectGatewayAssociationRequest(input *Dele // Deletes the association between the specified Direct Connect gateway and // virtual private gateway. // +// We recommend that you specify the associationID to delete the association. +// Alternatively, if you own virtual gateway and a Direct Connect gateway association, +// you can specify the virtualGatewayId and directConnectGatewayId to delete +// an association. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4720,8 +4790,10 @@ type AcceptDirectConnectGatewayAssociationProposalInput struct { // DirectConnectGatewayId is a required field DirectConnectGatewayId *string `locationName:"directConnectGatewayId" type:"string" required:"true"` - // Overrides the existing Amazon VPC prefixes advertised to the Direct Connect - // gateway. + // Overrides the Amazon VPC prefixes advertised to the Direct Connect gateway. + // + // For information about how to set the prefixes, see Allowed Prefixes (https://docs.aws.amazon.com/directconnect/latest/UserGuide/multi-account-associate-vgw.html#allowed-prefixes) + // in the AWS Direct Connect User Guide. OverrideAllowedPrefixesToDirectConnectGateway []*RouteFilterPrefix `locationName:"overrideAllowedPrefixesToDirectConnectGateway" type:"list"` // The ID of the request proposal. @@ -4931,6 +5003,9 @@ type AllocateHostedConnectionInput struct { // OwnerAccount is a required field OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` + // The tags associated with the connection. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The dedicated VLAN provisioned to the hosted connection. // // Vlan is a required field @@ -4962,9 +5037,22 @@ func (s *AllocateHostedConnectionInput) Validate() error { if s.OwnerAccount == nil { invalidParams.Add(request.NewErrParamRequired("OwnerAccount")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.Vlan == nil { invalidParams.Add(request.NewErrParamRequired("Vlan")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4996,6 +5084,12 @@ func (s *AllocateHostedConnectionInput) SetOwnerAccount(v string) *AllocateHoste return s } +// SetTags sets the Tags field's value. +func (s *AllocateHostedConnectionInput) SetTags(v []*Tag) *AllocateHostedConnectionInput { + s.Tags = v + return s +} + // SetVlan sets the Vlan field's value. func (s *AllocateHostedConnectionInput) SetVlan(v int64) *AllocateHostedConnectionInput { s.Vlan = &v @@ -5185,6 +5279,11 @@ func (s *AllocateTransitVirtualInterfaceInput) Validate() error { if s.OwnerAccount == nil { invalidParams.Add(request.NewErrParamRequired("OwnerAccount")) } + if s.NewTransitVirtualInterfaceAllocation != nil { + if err := s.NewTransitVirtualInterfaceAllocation.Validate(); err != nil { + invalidParams.AddNested("NewTransitVirtualInterfaceAllocation", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5454,7 +5553,8 @@ type BGPPeer struct { // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. Asn *int64 `locationName:"asn" type:"integer"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The Direct Connect endpoint on which the BGP peer terminates. @@ -5649,7 +5749,7 @@ type ConfirmPrivateVirtualInterfaceInput struct { DirectConnectGatewayId *string `locationName:"directConnectGatewayId" type:"string"` // The ID of the virtual private gateway. - VirtualGatewayId *string `locationName:"virtualGatewayId" deprecated:"true" type:"string"` + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` // The ID of the virtual interface. // @@ -6011,9 +6111,15 @@ type Connection struct { // The name of the AWS Direct Connect service provider associated with the connection. PartnerName *string `locationName:"partnerName" type:"string"` + // The name of the service provider associated with the connection. + ProviderName *string `locationName:"providerName" type:"string"` + // The AWS Region where the connection is located. Region *string `locationName:"region" type:"string"` + // The tags associated with the connection. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The ID of the VLAN. Vlan *int64 `locationName:"vlan" type:"integer"` } @@ -6106,12 +6212,24 @@ func (s *Connection) SetPartnerName(v string) *Connection { return s } +// SetProviderName sets the ProviderName field's value. +func (s *Connection) SetProviderName(v string) *Connection { + s.ProviderName = &v + return s +} + // SetRegion sets the Region field's value. func (s *Connection) SetRegion(v string) *Connection { s.Region = &v return s } +// SetTags sets the Tags field's value. +func (s *Connection) SetTags(v []*Tag) *Connection { + s.Tags = v + return s +} + // SetVlan sets the Vlan field's value. func (s *Connection) SetVlan(v int64) *Connection { s.Vlan = &v @@ -6216,6 +6334,12 @@ type CreateConnectionInput struct { // // Location is a required field Location *string `locationName:"location" type:"string" required:"true"` + + // The name of the service provider associated with the requested connection. + ProviderName *string `locationName:"providerName" type:"string"` + + // The tags to associate with the lag. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` } // String returns the string representation @@ -6240,6 +6364,19 @@ func (s *CreateConnectionInput) Validate() error { if s.Location == nil { invalidParams.Add(request.NewErrParamRequired("Location")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6271,10 +6408,27 @@ func (s *CreateConnectionInput) SetLocation(v string) *CreateConnectionInput { return s } +// SetProviderName sets the ProviderName field's value. +func (s *CreateConnectionInput) SetProviderName(v string) *CreateConnectionInput { + s.ProviderName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateConnectionInput) SetTags(v []*Tag) *CreateConnectionInput { + s.Tags = v + return s +} + type CreateDirectConnectGatewayAssociationInput struct { _ struct{} `type:"structure"` // The Amazon VPC prefixes to advertise to the Direct Connect gateway + // + // This parameter is required when you create an association to a transit gateway. + // + // For information about how to set the prefixes, see Allowed Prefixes (https://docs.aws.amazon.com/directconnect/latest/UserGuide/multi-account-associate-vgw.html#allowed-prefixes) + // in the AWS Direct Connect User Guide. AddAllowedPrefixesToDirectConnectGateway []*RouteFilterPrefix `locationName:"addAllowedPrefixesToDirectConnectGateway" type:"list"` // The ID of the Direct Connect gateway. @@ -6286,7 +6440,7 @@ type CreateDirectConnectGatewayAssociationInput struct { GatewayId *string `locationName:"gatewayId" type:"string"` // The ID of the virtual private gateway. - VirtualGatewayId *string `locationName:"virtualGatewayId" deprecated:"true" type:"string"` + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` } // String returns the string representation @@ -6559,6 +6713,12 @@ type CreateInterconnectInput struct { // // Location is a required field Location *string `locationName:"location" type:"string" required:"true"` + + // The name of the service provider associated with the interconnect. + ProviderName *string `locationName:"providerName" type:"string"` + + // The tags to associate with the interconnect. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` } // String returns the string representation @@ -6583,6 +6743,19 @@ func (s *CreateInterconnectInput) Validate() error { if s.Location == nil { invalidParams.Add(request.NewErrParamRequired("Location")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6614,9 +6787,24 @@ func (s *CreateInterconnectInput) SetLocation(v string) *CreateInterconnectInput return s } +// SetProviderName sets the ProviderName field's value. +func (s *CreateInterconnectInput) SetProviderName(v string) *CreateInterconnectInput { + s.ProviderName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateInterconnectInput) SetTags(v []*Tag) *CreateInterconnectInput { + s.Tags = v + return s +} + type CreateLagInput struct { _ struct{} `type:"structure"` + // The tags to associate with the automtically created LAGs. + ChildConnectionTags []*Tag `locationName:"childConnectionTags" min:"1" type:"list"` + // The ID of an existing connection to migrate to the LAG. ConnectionId *string `locationName:"connectionId" type:"string"` @@ -6642,6 +6830,12 @@ type CreateLagInput struct { // // NumberOfConnections is a required field NumberOfConnections *int64 `locationName:"numberOfConnections" type:"integer" required:"true"` + + // The name of the service provider associated with the LAG. + ProviderName *string `locationName:"providerName" type:"string"` + + // The tags to associate with the LAG. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` } // String returns the string representation @@ -6657,6 +6851,9 @@ func (s CreateLagInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateLagInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateLagInput"} + if s.ChildConnectionTags != nil && len(s.ChildConnectionTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChildConnectionTags", 1)) + } if s.ConnectionsBandwidth == nil { invalidParams.Add(request.NewErrParamRequired("ConnectionsBandwidth")) } @@ -6669,6 +6866,29 @@ func (s *CreateLagInput) Validate() error { if s.NumberOfConnections == nil { invalidParams.Add(request.NewErrParamRequired("NumberOfConnections")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.ChildConnectionTags != nil { + for i, v := range s.ChildConnectionTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ChildConnectionTags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6676,6 +6896,12 @@ func (s *CreateLagInput) Validate() error { return nil } +// SetChildConnectionTags sets the ChildConnectionTags field's value. +func (s *CreateLagInput) SetChildConnectionTags(v []*Tag) *CreateLagInput { + s.ChildConnectionTags = v + return s +} + // SetConnectionId sets the ConnectionId field's value. func (s *CreateLagInput) SetConnectionId(v string) *CreateLagInput { s.ConnectionId = &v @@ -6706,6 +6932,18 @@ func (s *CreateLagInput) SetNumberOfConnections(v int64) *CreateLagInput { return s } +// SetProviderName sets the ProviderName field's value. +func (s *CreateLagInput) SetProviderName(v string) *CreateLagInput { + s.ProviderName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLagInput) SetTags(v []*Tag) *CreateLagInput { + s.Tags = v + return s +} + type CreatePrivateVirtualInterfaceInput struct { _ struct{} `type:"structure"` @@ -6853,6 +7091,11 @@ func (s *CreateTransitVirtualInterfaceInput) Validate() error { if s.NewTransitVirtualInterface == nil { invalidParams.Add(request.NewErrParamRequired("NewTransitVirtualInterface")) } + if s.NewTransitVirtualInterface != nil { + if err := s.NewTransitVirtualInterface.Validate(); err != nil { + invalidParams.AddNested("NewTransitVirtualInterface", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7016,7 +7259,7 @@ type DeleteDirectConnectGatewayAssociationInput struct { DirectConnectGatewayId *string `locationName:"directConnectGatewayId" type:"string"` // The ID of the virtual private gateway. - VirtualGatewayId *string `locationName:"virtualGatewayId" deprecated:"true" type:"string"` + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` } // String returns the string representation @@ -7658,7 +7901,7 @@ type DescribeDirectConnectGatewayAssociationsInput struct { NextToken *string `locationName:"nextToken" type:"string"` // The ID of the virtual private gateway. - VirtualGatewayId *string `locationName:"virtualGatewayId" deprecated:"true" type:"string"` + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` } // String returns the string representation @@ -8529,7 +8772,7 @@ type GatewayAssociation struct { StateChangeError *string `locationName:"stateChangeError" type:"string"` // The ID of the virtual private gateway. Applies only to private virtual interfaces. - VirtualGatewayId *string `locationName:"virtualGatewayId" deprecated:"true" type:"string"` + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` // The ID of the AWS account that owns the virtual private gateway. VirtualGatewayOwnerAccount *string `locationName:"virtualGatewayOwnerAccount" type:"string"` @@ -8842,8 +9085,14 @@ type Interconnect struct { // The location of the connection. Location *string `locationName:"location" type:"string"` + // The name of the service provider associated with the interconnect. + ProviderName *string `locationName:"providerName" type:"string"` + // The AWS Region where the connection is located. Region *string `locationName:"region" type:"string"` + + // The tags associated with the interconnect. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` } // String returns the string representation @@ -8922,12 +9171,24 @@ func (s *Interconnect) SetLocation(v string) *Interconnect { return s } +// SetProviderName sets the ProviderName field's value. +func (s *Interconnect) SetProviderName(v string) *Interconnect { + s.ProviderName = &v + return s +} + // SetRegion sets the Region field's value. func (s *Interconnect) SetRegion(v string) *Interconnect { s.Region = &v return s } +// SetTags sets the Tags field's value. +func (s *Interconnect) SetTags(v []*Tag) *Interconnect { + s.Tags = v + return s +} + // Information about a link aggregation group (LAG). type Lag struct { _ struct{} `type:"structure"` @@ -8994,8 +9255,14 @@ type Lag struct { // The ID of the AWS account that owns the LAG. OwnerAccount *string `locationName:"ownerAccount" type:"string"` + // The name of the service provider associated with the LAG. + ProviderName *string `locationName:"providerName" type:"string"` + // The AWS Region where the connection is located. Region *string `locationName:"region" type:"string"` + + // The tags associated with the LAG. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` } // String returns the string representation @@ -9092,12 +9359,24 @@ func (s *Lag) SetOwnerAccount(v string) *Lag { return s } +// SetProviderName sets the ProviderName field's value. +func (s *Lag) SetProviderName(v string) *Lag { + s.ProviderName = &v + return s +} + // SetRegion sets the Region field's value. func (s *Lag) SetRegion(v string) *Lag { s.Region = &v return s } +// SetTags sets the Tags field's value. +func (s *Lag) SetTags(v []*Tag) *Lag { + s.Tags = v + return s +} + // Information about a Letter of Authorization - Connecting Facility Assignment // (LOA-CFA) for a connection. type Loa struct { @@ -9142,6 +9421,9 @@ type Location struct { // The available port speeds for the location. AvailablePortSpeeds []*string `locationName:"availablePortSpeeds" type:"list"` + // The name of the service provider for the location. + AvailableProviders []*string `locationName:"availableProviders" type:"list"` + // The code for the location. LocationCode *string `locationName:"locationCode" type:"string"` @@ -9169,6 +9451,12 @@ func (s *Location) SetAvailablePortSpeeds(v []*string) *Location { return s } +// SetAvailableProviders sets the AvailableProviders field's value. +func (s *Location) SetAvailableProviders(v []*string) *Location { + s.AvailableProviders = v + return s +} + // SetLocationCode sets the LocationCode field's value. func (s *Location) SetLocationCode(v string) *Location { s.LocationCode = &v @@ -9200,7 +9488,8 @@ type NewBGPPeer struct { // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. Asn *int64 `locationName:"asn" type:"integer"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The IP address assigned to the customer interface. @@ -9259,10 +9548,13 @@ type NewPrivateVirtualInterface struct { // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. // + // The valid values are 1-2147483647. + // // Asn is a required field Asn *int64 `locationName:"asn" type:"integer" required:"true"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The IP address assigned to the customer interface. @@ -9275,8 +9567,11 @@ type NewPrivateVirtualInterface struct { // and 9001. The default value is 1500. Mtu *int64 `locationName:"mtu" type:"integer"` + // The tags associated with the private virtual interface. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The ID of the virtual private gateway. - VirtualGatewayId *string `locationName:"virtualGatewayId" deprecated:"true" type:"string"` + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` // The name of the virtual interface assigned by the customer network. // @@ -9305,12 +9600,25 @@ func (s *NewPrivateVirtualInterface) Validate() error { if s.Asn == nil { invalidParams.Add(request.NewErrParamRequired("Asn")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.VirtualInterfaceName == nil { invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) } if s.Vlan == nil { invalidParams.Add(request.NewErrParamRequired("Vlan")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9360,6 +9668,12 @@ func (s *NewPrivateVirtualInterface) SetMtu(v int64) *NewPrivateVirtualInterface return s } +// SetTags sets the Tags field's value. +func (s *NewPrivateVirtualInterface) SetTags(v []*Tag) *NewPrivateVirtualInterface { + s.Tags = v + return s +} + // SetVirtualGatewayId sets the VirtualGatewayId field's value. func (s *NewPrivateVirtualInterface) SetVirtualGatewayId(v string) *NewPrivateVirtualInterface { s.VirtualGatewayId = &v @@ -9390,10 +9704,13 @@ type NewPrivateVirtualInterfaceAllocation struct { // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. // + // The valid values are 1-2147483647. + // // Asn is a required field Asn *int64 `locationName:"asn" type:"integer" required:"true"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The IP address assigned to the customer interface. @@ -9403,6 +9720,9 @@ type NewPrivateVirtualInterfaceAllocation struct { // and 9001. The default value is 1500. Mtu *int64 `locationName:"mtu" type:"integer"` + // The tags associated with the private virtual interface. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The name of the virtual interface assigned by the customer network. // // VirtualInterfaceName is a required field @@ -9430,12 +9750,25 @@ func (s *NewPrivateVirtualInterfaceAllocation) Validate() error { if s.Asn == nil { invalidParams.Add(request.NewErrParamRequired("Asn")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.VirtualInterfaceName == nil { invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) } if s.Vlan == nil { invalidParams.Add(request.NewErrParamRequired("Vlan")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9479,6 +9812,12 @@ func (s *NewPrivateVirtualInterfaceAllocation) SetMtu(v int64) *NewPrivateVirtua return s } +// SetTags sets the Tags field's value. +func (s *NewPrivateVirtualInterfaceAllocation) SetTags(v []*Tag) *NewPrivateVirtualInterfaceAllocation { + s.Tags = v + return s +} + // SetVirtualInterfaceName sets the VirtualInterfaceName field's value. func (s *NewPrivateVirtualInterfaceAllocation) SetVirtualInterfaceName(v string) *NewPrivateVirtualInterfaceAllocation { s.VirtualInterfaceName = &v @@ -9503,10 +9842,13 @@ type NewPublicVirtualInterface struct { // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. // + // The valid values are 1-2147483647. + // // Asn is a required field Asn *int64 `locationName:"asn" type:"integer" required:"true"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The IP address assigned to the customer interface. @@ -9516,6 +9858,9 @@ type NewPublicVirtualInterface struct { // public virtual interfaces. RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list"` + // The tags associated with the public virtual interface. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The name of the virtual interface assigned by the customer network. // // VirtualInterfaceName is a required field @@ -9543,12 +9888,25 @@ func (s *NewPublicVirtualInterface) Validate() error { if s.Asn == nil { invalidParams.Add(request.NewErrParamRequired("Asn")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.VirtualInterfaceName == nil { invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) } if s.Vlan == nil { invalidParams.Add(request.NewErrParamRequired("Vlan")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9592,6 +9950,12 @@ func (s *NewPublicVirtualInterface) SetRouteFilterPrefixes(v []*RouteFilterPrefi return s } +// SetTags sets the Tags field's value. +func (s *NewPublicVirtualInterface) SetTags(v []*Tag) *NewPublicVirtualInterface { + s.Tags = v + return s +} + // SetVirtualInterfaceName sets the VirtualInterfaceName field's value. func (s *NewPublicVirtualInterface) SetVirtualInterfaceName(v string) *NewPublicVirtualInterface { s.VirtualInterfaceName = &v @@ -9616,10 +9980,13 @@ type NewPublicVirtualInterfaceAllocation struct { // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. // + // The valid values are 1-2147483647. + // // Asn is a required field Asn *int64 `locationName:"asn" type:"integer" required:"true"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The IP address assigned to the customer interface. @@ -9629,6 +9996,9 @@ type NewPublicVirtualInterfaceAllocation struct { // public virtual interfaces. RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list"` + // The tags associated with the public virtual interface. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The name of the virtual interface assigned by the customer network. // // VirtualInterfaceName is a required field @@ -9656,12 +10026,25 @@ func (s *NewPublicVirtualInterfaceAllocation) Validate() error { if s.Asn == nil { invalidParams.Add(request.NewErrParamRequired("Asn")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.VirtualInterfaceName == nil { invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) } if s.Vlan == nil { invalidParams.Add(request.NewErrParamRequired("Vlan")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9705,6 +10088,12 @@ func (s *NewPublicVirtualInterfaceAllocation) SetRouteFilterPrefixes(v []*RouteF return s } +// SetTags sets the Tags field's value. +func (s *NewPublicVirtualInterfaceAllocation) SetTags(v []*Tag) *NewPublicVirtualInterfaceAllocation { + s.Tags = v + return s +} + // SetVirtualInterfaceName sets the VirtualInterfaceName field's value. func (s *NewPublicVirtualInterfaceAllocation) SetVirtualInterfaceName(v string) *NewPublicVirtualInterfaceAllocation { s.VirtualInterfaceName = &v @@ -9728,9 +10117,12 @@ type NewTransitVirtualInterface struct { AmazonAddress *string `locationName:"amazonAddress" type:"string"` // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // The valid values are 1-2147483647. Asn *int64 `locationName:"asn" type:"integer"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The IP address assigned to the customer interface. @@ -9743,6 +10135,9 @@ type NewTransitVirtualInterface struct { // and 9001. The default value is 1500. Mtu *int64 `locationName:"mtu" type:"integer"` + // The tags associated with the transitive virtual interface. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The name of the virtual interface assigned by the customer network. VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string"` @@ -9760,6 +10155,29 @@ func (s NewTransitVirtualInterface) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewTransitVirtualInterface) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewTransitVirtualInterface"} + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAddressFamily sets the AddressFamily field's value. func (s *NewTransitVirtualInterface) SetAddressFamily(v string) *NewTransitVirtualInterface { s.AddressFamily = &v @@ -9802,6 +10220,12 @@ func (s *NewTransitVirtualInterface) SetMtu(v int64) *NewTransitVirtualInterface return s } +// SetTags sets the Tags field's value. +func (s *NewTransitVirtualInterface) SetTags(v []*Tag) *NewTransitVirtualInterface { + s.Tags = v + return s +} + // SetVirtualInterfaceName sets the VirtualInterfaceName field's value. func (s *NewTransitVirtualInterface) SetVirtualInterfaceName(v string) *NewTransitVirtualInterface { s.VirtualInterfaceName = &v @@ -9825,9 +10249,12 @@ type NewTransitVirtualInterfaceAllocation struct { AmazonAddress *string `locationName:"amazonAddress" type:"string"` // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // The valid values are 1-2147483647. Asn *int64 `locationName:"asn" type:"integer"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The IP address assigned to the customer interface. @@ -9837,6 +10264,9 @@ type NewTransitVirtualInterfaceAllocation struct { // and 9001. The default value is 1500. Mtu *int64 `locationName:"mtu" type:"integer"` + // The tags associated with the transitive virtual interface. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The name of the virtual interface assigned by the customer network. VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string"` @@ -9854,6 +10284,29 @@ func (s NewTransitVirtualInterfaceAllocation) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewTransitVirtualInterfaceAllocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewTransitVirtualInterfaceAllocation"} + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAddressFamily sets the AddressFamily field's value. func (s *NewTransitVirtualInterfaceAllocation) SetAddressFamily(v string) *NewTransitVirtualInterfaceAllocation { s.AddressFamily = &v @@ -9890,6 +10343,12 @@ func (s *NewTransitVirtualInterfaceAllocation) SetMtu(v int64) *NewTransitVirtua return s } +// SetTags sets the Tags field's value. +func (s *NewTransitVirtualInterfaceAllocation) SetTags(v []*Tag) *NewTransitVirtualInterfaceAllocation { + s.Tags = v + return s +} + // SetVirtualInterfaceName sets the VirtualInterfaceName field's value. func (s *NewTransitVirtualInterfaceAllocation) SetVirtualInterfaceName(v string) *NewTransitVirtualInterfaceAllocation { s.VirtualInterfaceName = &v @@ -10341,9 +10800,12 @@ type UpdateVirtualInterfaceAttributesOutput struct { AmazonSideAsn *int64 `locationName:"amazonSideAsn" type:"long"` // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // The valid values are 1-2147483647. Asn *int64 `locationName:"asn" type:"integer"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The Direct Connect endpoint on which the virtual interface terminates. @@ -10384,8 +10846,11 @@ type UpdateVirtualInterfaceAttributesOutput struct { // public virtual interfaces. RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list"` + // The tags associated with the virtual interface. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The ID of the virtual private gateway. Applies only to private virtual interfaces. - VirtualGatewayId *string `locationName:"virtualGatewayId" deprecated:"true" type:"string"` + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` // The ID of the virtual interface. VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` @@ -10544,6 +11009,12 @@ func (s *UpdateVirtualInterfaceAttributesOutput) SetRouteFilterPrefixes(v []*Rou return s } +// SetTags sets the Tags field's value. +func (s *UpdateVirtualInterfaceAttributesOutput) SetTags(v []*Tag) *UpdateVirtualInterfaceAttributesOutput { + s.Tags = v + return s +} + // SetVirtualGatewayId sets the VirtualGatewayId field's value. func (s *UpdateVirtualInterfaceAttributesOutput) SetVirtualGatewayId(v string) *UpdateVirtualInterfaceAttributesOutput { s.VirtualGatewayId = &v @@ -10585,7 +11056,7 @@ type VirtualGateway struct { _ struct{} `type:"structure"` // The ID of the virtual private gateway. - VirtualGatewayId *string `locationName:"virtualGatewayId" deprecated:"true" type:"string"` + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` // The state of the virtual private gateway. The following are the possible // values: @@ -10637,9 +11108,12 @@ type VirtualInterface struct { AmazonSideAsn *int64 `locationName:"amazonSideAsn" type:"long"` // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // The valid values are 1-2147483647. Asn *int64 `locationName:"asn" type:"integer"` - // The authentication key for BGP configuration. + // The authentication key for BGP configuration. This string has a minimum length + // of 6 characters and and a maximun lenth of 80 characters. AuthKey *string `locationName:"authKey" type:"string"` // The Direct Connect endpoint on which the virtual interface terminates. @@ -10680,8 +11154,11 @@ type VirtualInterface struct { // public virtual interfaces. RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list"` + // The tags associated with the virtual interface. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + // The ID of the virtual private gateway. Applies only to private virtual interfaces. - VirtualGatewayId *string `locationName:"virtualGatewayId" deprecated:"true" type:"string"` + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` // The ID of the virtual interface. VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` @@ -10840,6 +11317,12 @@ func (s *VirtualInterface) SetRouteFilterPrefixes(v []*RouteFilterPrefix) *Virtu return s } +// SetTags sets the Tags field's value. +func (s *VirtualInterface) SetTags(v []*Tag) *VirtualInterface { + s.Tags = v + return s +} + // SetVirtualGatewayId sets the VirtualGatewayId field's value. func (s *VirtualInterface) SetVirtualGatewayId(v string) *VirtualInterface { s.VirtualGatewayId = &v diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go index bb182821c59..be5ad460a9f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go @@ -46,11 +46,11 @@ const ( // svc := directconnect.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectConnect { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DirectConnect { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DirectConnect { svc := &DirectConnect{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-10-25", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go index 72b454d6b97..5805c3fd22d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go @@ -2127,7 +2127,7 @@ func (c *DirectoryService) DescribeDomainControllersWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeDomainControllers operation. // pageNum := 0 // err := client.DescribeDomainControllersPages(params, -// func(page *DescribeDomainControllersOutput, lastPage bool) bool { +// func(page *directoryservice.DescribeDomainControllersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2159,10 +2159,12 @@ func (c *DirectoryService) DescribeDomainControllersPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDomainControllersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDomainControllersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4907,7 +4909,10 @@ type AddIpRoutesInput struct { // // Outbound: // - // Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0 + // * Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0 + // + // These security rules impact an internal network interface that is not exposed + // publicly. UpdateSecurityGroupForDirectoryControllers *bool `type:"boolean"` } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go index 0743c9632ca..5c4c03db34f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go @@ -46,11 +46,11 @@ const ( // svc := directoryservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectoryService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DirectoryService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DirectoryService { svc := &DirectoryService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-04-16", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go index 2c83090c38c..e3df1b68844 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go @@ -361,6 +361,263 @@ func (c *DLM) GetLifecyclePolicyWithContext(ctx aws.Context, input *GetLifecycle return out, req.Send() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dlm-2018-01-12/ListTagsForResource +func (c *DLM) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Data Lifecycle Manager. +// +// Lists the tags for the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Data Lifecycle Manager's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// The service failed in an unexpected way. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// Bad request. The request is missing required parameters or has invalid parameters. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A requested resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dlm-2018-01-12/ListTagsForResource +func (c *DLM) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DLM) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dlm-2018-01-12/TagResource +func (c *DLM) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon Data Lifecycle Manager. +// +// Adds the specified tags to the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Data Lifecycle Manager's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// The service failed in an unexpected way. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// Bad request. The request is missing required parameters or has invalid parameters. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A requested resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dlm-2018-01-12/TagResource +func (c *DLM) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DLM) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dlm-2018-01-12/UntagResource +func (c *DLM) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Data Lifecycle Manager. +// +// Removes the specified tags from the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Data Lifecycle Manager's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// The service failed in an unexpected way. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// Bad request. The request is missing required parameters or has invalid parameters. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A requested resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dlm-2018-01-12/UntagResource +func (c *DLM) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DLM) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateLifecyclePolicy = "UpdateLifecyclePolicy" // UpdateLifecyclePolicyRequest generates a "aws/request.Request" representing the @@ -467,8 +724,6 @@ type CreateLifecyclePolicyInput struct { // The configuration details of the lifecycle policy. // - // Target tags cannot be re-used across lifecycle policies. - // // PolicyDetails is a required field PolicyDetails *PolicyDetails `type:"structure" required:"true"` @@ -476,6 +731,9 @@ type CreateLifecyclePolicyInput struct { // // State is a required field State *string `type:"string" required:"true" enum:"SettablePolicyStateValues"` + + // The tags to apply to the lifecycle policy during creation. + Tags map[string]*string `min:"1" type:"map"` } // String returns the string representation @@ -503,6 +761,9 @@ func (s *CreateLifecyclePolicyInput) Validate() error { if s.State == nil { invalidParams.Add(request.NewErrParamRequired("State")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.PolicyDetails != nil { if err := s.PolicyDetails.Validate(); err != nil { invalidParams.AddNested("PolicyDetails", err.(request.ErrInvalidParams)) @@ -539,6 +800,12 @@ func (s *CreateLifecyclePolicyInput) SetState(v string) *CreateLifecyclePolicyIn return s } +// SetTags sets the Tags field's value. +func (s *CreateLifecyclePolicyInput) SetTags(v map[string]*string) *CreateLifecyclePolicyInput { + s.Tags = v + return s +} + type CreateLifecyclePolicyOutput struct { _ struct{} `type:"structure"` @@ -566,7 +833,8 @@ func (s *CreateLifecyclePolicyOutput) SetPolicyId(v string) *CreateLifecyclePoli type CreateRule struct { _ struct{} `type:"structure"` - // The interval. The supported values are 12 and 24. + // The interval between snapshots. The supported values are 2, 3, 4, 6, 8, 12, + // and 24. // // Interval is a required field Interval *int64 `min:"1" type:"integer" required:"true"` @@ -576,7 +844,7 @@ type CreateRule struct { // IntervalUnit is a required field IntervalUnit *string `type:"string" required:"true" enum:"IntervalUnitValues"` - // The time, in UTC, to start the operation. + // The time, in UTC, to start the operation. The supported format is hh:mm. // // The operation occurs within a one-hour window following the specified time. Times []*string `type:"list"` @@ -870,6 +1138,9 @@ type LifecyclePolicy struct { // specified by the lifecycle policy. ExecutionRoleArn *string `type:"string"` + // The Amazon Resource Name (ARN) of the policy. + PolicyArn *string `type:"string"` + // The configuration of the lifecycle policy PolicyDetails *PolicyDetails `type:"structure"` @@ -878,6 +1149,12 @@ type LifecyclePolicy struct { // The activation state of the lifecycle policy. State *string `type:"string" enum:"GettablePolicyStateValues"` + + // The description of the status. + StatusMessage *string `type:"string"` + + // The tags. + Tags map[string]*string `min:"1" type:"map"` } // String returns the string representation @@ -914,6 +1191,12 @@ func (s *LifecyclePolicy) SetExecutionRoleArn(v string) *LifecyclePolicy { return s } +// SetPolicyArn sets the PolicyArn field's value. +func (s *LifecyclePolicy) SetPolicyArn(v string) *LifecyclePolicy { + s.PolicyArn = &v + return s +} + // SetPolicyDetails sets the PolicyDetails field's value. func (s *LifecyclePolicy) SetPolicyDetails(v *PolicyDetails) *LifecyclePolicy { s.PolicyDetails = v @@ -932,6 +1215,18 @@ func (s *LifecyclePolicy) SetState(v string) *LifecyclePolicy { return s } +// SetStatusMessage sets the StatusMessage field's value. +func (s *LifecyclePolicy) SetStatusMessage(v string) *LifecyclePolicy { + s.StatusMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecyclePolicy) SetTags(v map[string]*string) *LifecyclePolicy { + s.Tags = v + return s +} + // Summary information about a lifecycle policy. type LifecyclePolicySummary struct { _ struct{} `type:"structure"` @@ -944,6 +1239,9 @@ type LifecyclePolicySummary struct { // The activation state of the lifecycle policy. State *string `type:"string" enum:"GettablePolicyStateValues"` + + // The tags. + Tags map[string]*string `min:"1" type:"map"` } // String returns the string representation @@ -974,10 +1272,114 @@ func (s *LifecyclePolicySummary) SetState(v string) *LifecyclePolicySummary { return s } +// SetTags sets the Tags field's value. +func (s *LifecyclePolicySummary) SetTags(v map[string]*string) *LifecyclePolicySummary { + s.Tags = v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Information about the tags. + Tags map[string]*string `min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// Optional parameters that can be added to the policy. The set of valid parameters +// depends on the combination of policyType and resourceType values. +type Parameters struct { + _ struct{} `type:"structure"` + + // When executing an EBS Snapshot Management – Instance policy, execute all + // CreateSnapshots calls with the excludeBootVolume set to the supplied field. + // Defaults to false. Only valid for EBS Snapshot Management – Instance policies. + ExcludeBootVolume *bool `type:"boolean"` +} + +// String returns the string representation +func (s Parameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameters) GoString() string { + return s.String() +} + +// SetExcludeBootVolume sets the ExcludeBootVolume field's value. +func (s *Parameters) SetExcludeBootVolume(v bool) *Parameters { + s.ExcludeBootVolume = &v + return s +} + // Specifies the configuration of a lifecycle policy. type PolicyDetails struct { _ struct{} `type:"structure"` + // A set of optional parameters that can be provided by the policy. + Parameters *Parameters `type:"structure"` + + // This field determines the valid target resource types and actions a policy + // can manage. This field defaults to EBS_SNAPSHOT_MANAGEMENT if not present. + PolicyType *string `type:"string" enum:"PolicyTypeValues"` + // The resource type. ResourceTypes []*string `min:"1" type:"list"` @@ -1037,6 +1439,18 @@ func (s *PolicyDetails) Validate() error { return nil } +// SetParameters sets the Parameters field's value. +func (s *PolicyDetails) SetParameters(v *Parameters) *PolicyDetails { + s.Parameters = v + return s +} + +// SetPolicyType sets the PolicyType field's value. +func (s *PolicyDetails) SetPolicyType(v string) *PolicyDetails { + s.PolicyType = &v + return s +} + // SetResourceTypes sets the ResourceTypes field's value. func (s *PolicyDetails) SetResourceTypes(v []*string) *PolicyDetails { s.ResourceTypes = v @@ -1101,6 +1515,8 @@ func (s *RetainRule) SetCount(v int64) *RetainRule { type Schedule struct { _ struct{} `type:"structure"` + // Copy all user-defined tags on a source volume to snapshots of the volume + // created by this policy. CopyTags *bool `type:"boolean"` // The create rule. @@ -1115,6 +1531,12 @@ type Schedule struct { // The tags to apply to policy-created resources. These user-defined tags are // in addition to the AWS-added lifecycle tags. TagsToAdd []*Tag `type:"list"` + + // A collection of key/value pairs with values determined dynamically when the + // policy is executed. Keys may be any valid Amazon EC2 tag key. Values must + // be in one of the two following formats: $(instance-id) or $(timestamp). Variable + // tags are only valid for EBS Snapshot Management – Instance policies. + VariableTags []*Tag `type:"list"` } // String returns the string representation @@ -1150,6 +1572,16 @@ func (s *Schedule) Validate() error { } } } + if s.VariableTags != nil { + for i, v := range s.VariableTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "VariableTags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1187,6 +1619,12 @@ func (s *Schedule) SetTagsToAdd(v []*Tag) *Schedule { return s } +// SetVariableTags sets the VariableTags field's value. +func (s *Schedule) SetVariableTags(v []*Tag) *Schedule { + s.VariableTags = v + return s +} + // Specifies a tag for a resource. type Tag struct { _ struct{} `type:"structure"` @@ -1240,6 +1678,150 @@ func (s *Tag) SetValue(v string) *Tag { return s } +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // One or more tags. + // + // Tags is a required field + Tags map[string]*string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The tag keys. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateLifecyclePolicyInput struct { _ struct{} `type:"structure"` @@ -1250,9 +1832,8 @@ type UpdateLifecyclePolicyInput struct { // specified by the lifecycle policy. ExecutionRoleArn *string `type:"string"` - // The configuration of the lifecycle policy. - // - // Target tags cannot be re-used across policies. + // The configuration of the lifecycle policy. You cannot update the policy type + // or the resource type. PolicyDetails *PolicyDetails `type:"structure"` // The identifier of the lifecycle policy. @@ -1355,9 +1936,17 @@ const ( IntervalUnitValuesHours = "HOURS" ) +const ( + // PolicyTypeValuesEbsSnapshotManagement is a PolicyTypeValues enum value + PolicyTypeValuesEbsSnapshotManagement = "EBS_SNAPSHOT_MANAGEMENT" +) + const ( // ResourceTypeValuesVolume is a ResourceTypeValues enum value ResourceTypeValuesVolume = "VOLUME" + + // ResourceTypeValuesInstance is a ResourceTypeValues enum value + ResourceTypeValuesInstance = "INSTANCE" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go index b062fe30d63..ada01c825d7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *DLM { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "dlm" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DLM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DLM { svc := &DLM{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-12", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/docdb/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/docdb/api.go index 471a189f11e..8074f02c7a1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/docdb/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/docdb/api.go @@ -160,6 +160,12 @@ func (c *DocDB) ApplyPendingMaintenanceActionRequest(input *ApplyPendingMaintena // * ErrCodeResourceNotFoundFault "ResourceNotFoundFault" // The specified resource ID was not found. // +// * ErrCodeInvalidDBClusterStateFault "InvalidDBClusterStateFault" +// The DB cluster isn't in a valid state. +// +// * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" +// The specified DB instance isn't in the available state. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/docdb-2014-10-31/ApplyPendingMaintenanceAction func (c *DocDB) ApplyPendingMaintenanceAction(input *ApplyPendingMaintenanceActionInput) (*ApplyPendingMaintenanceActionOutput, error) { req, out := c.ApplyPendingMaintenanceActionRequest(input) @@ -1360,6 +1366,86 @@ func (c *DocDB) DeleteDBSubnetGroupWithContext(ctx aws.Context, input *DeleteDBS return out, req.Send() } +const opDescribeCertificates = "DescribeCertificates" + +// DescribeCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCertificates operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCertificates for more information on using the DescribeCertificates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCertificatesRequest method. +// req, resp := client.DescribeCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/docdb-2014-10-31/DescribeCertificates +func (c *DocDB) DescribeCertificatesRequest(input *DescribeCertificatesInput) (req *request.Request, output *DescribeCertificatesOutput) { + op := &request.Operation{ + Name: opDescribeCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCertificatesInput{} + } + + output = &DescribeCertificatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCertificates API operation for Amazon DocumentDB with MongoDB compatibility. +// +// Returns a list of certificate authority (CA) certificates provided by Amazon +// RDS for this AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DocumentDB with MongoDB compatibility's +// API operation DescribeCertificates for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCertificateNotFoundFault "CertificateNotFound" +// CertificateIdentifier doesn't refer to an existing certificate. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/docdb-2014-10-31/DescribeCertificates +func (c *DocDB) DescribeCertificates(input *DescribeCertificatesInput) (*DescribeCertificatesOutput, error) { + req, out := c.DescribeCertificatesRequest(input) + return out, req.Send() +} + +// DescribeCertificatesWithContext is the same as DescribeCertificates with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCertificates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DocDB) DescribeCertificatesWithContext(ctx aws.Context, input *DescribeCertificatesInput, opts ...request.Option) (*DescribeCertificatesOutput, error) { + req, out := c.DescribeCertificatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeDBClusterParameterGroups = "DescribeDBClusterParameterGroups" // DescribeDBClusterParameterGroupsRequest generates a "aws/request.Request" representing the @@ -1784,7 +1870,7 @@ func (c *DocDB) DescribeDBClustersWithContext(ctx aws.Context, input *DescribeDB // // Example iterating over at most 3 pages of a DescribeDBClusters operation. // pageNum := 0 // err := client.DescribeDBClustersPages(params, -// func(page *DescribeDBClustersOutput, lastPage bool) bool { +// func(page *docdb.DescribeDBClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1816,10 +1902,12 @@ func (c *DocDB) DescribeDBClustersPagesWithContext(ctx aws.Context, input *Descr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBClustersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBClustersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1914,7 +2002,7 @@ func (c *DocDB) DescribeDBEngineVersionsWithContext(ctx aws.Context, input *Desc // // Example iterating over at most 3 pages of a DescribeDBEngineVersions operation. // pageNum := 0 // err := client.DescribeDBEngineVersionsPages(params, -// func(page *DescribeDBEngineVersionsOutput, lastPage bool) bool { +// func(page *docdb.DescribeDBEngineVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1946,10 +2034,12 @@ func (c *DocDB) DescribeDBEngineVersionsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBEngineVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBEngineVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2050,7 +2140,7 @@ func (c *DocDB) DescribeDBInstancesWithContext(ctx aws.Context, input *DescribeD // // Example iterating over at most 3 pages of a DescribeDBInstances operation. // pageNum := 0 // err := client.DescribeDBInstancesPages(params, -// func(page *DescribeDBInstancesOutput, lastPage bool) bool { +// func(page *docdb.DescribeDBInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2082,10 +2172,12 @@ func (c *DocDB) DescribeDBInstancesPagesWithContext(ctx aws.Context, input *Desc }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2186,7 +2278,7 @@ func (c *DocDB) DescribeDBSubnetGroupsWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeDBSubnetGroups operation. // pageNum := 0 // err := client.DescribeDBSubnetGroupsPages(params, -// func(page *DescribeDBSubnetGroupsOutput, lastPage bool) bool { +// func(page *docdb.DescribeDBSubnetGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2218,10 +2310,12 @@ func (c *DocDB) DescribeDBSubnetGroupsPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBSubnetGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBSubnetGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2470,7 +2564,7 @@ func (c *DocDB) DescribeEventsWithContext(ctx aws.Context, input *DescribeEvents // // Example iterating over at most 3 pages of a DescribeEvents operation. // pageNum := 0 // err := client.DescribeEventsPages(params, -// func(page *DescribeEventsOutput, lastPage bool) bool { +// func(page *docdb.DescribeEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2502,10 +2596,12 @@ func (c *DocDB) DescribeEventsPagesWithContext(ctx aws.Context, input *DescribeE }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2600,7 +2696,7 @@ func (c *DocDB) DescribeOrderableDBInstanceOptionsWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a DescribeOrderableDBInstanceOptions operation. // pageNum := 0 // err := client.DescribeOrderableDBInstanceOptionsPages(params, -// func(page *DescribeOrderableDBInstanceOptionsOutput, lastPage bool) bool { +// func(page *docdb.DescribeOrderableDBInstanceOptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2632,10 +2728,12 @@ func (c *DocDB) DescribeOrderableDBInstanceOptionsPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeOrderableDBInstanceOptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeOrderableDBInstanceOptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3967,6 +4065,180 @@ func (c *DocDB) RestoreDBClusterToPointInTimeWithContext(ctx aws.Context, input return out, req.Send() } +const opStartDBCluster = "StartDBCluster" + +// StartDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the StartDBCluster operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDBCluster for more information on using the StartDBCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartDBClusterRequest method. +// req, resp := client.StartDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/docdb-2014-10-31/StartDBCluster +func (c *DocDB) StartDBClusterRequest(input *StartDBClusterInput) (req *request.Request, output *StartDBClusterOutput) { + op := &request.Operation{ + Name: opStartDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartDBClusterInput{} + } + + output = &StartDBClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartDBCluster API operation for Amazon DocumentDB with MongoDB compatibility. +// +// Restarts the stopped cluster that is specified by DBClusterIdentifier. For +// more information, see Stopping and Starting an Amazon DocumentDB Cluster +// (https://docs.aws.amazon.com/documentdb/latest/developerguide/db-cluster-stop-start.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DocumentDB with MongoDB compatibility's +// API operation StartDBCluster for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" +// DBClusterIdentifier doesn't refer to an existing DB cluster. +// +// * ErrCodeInvalidDBClusterStateFault "InvalidDBClusterStateFault" +// The DB cluster isn't in a valid state. +// +// * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" +// The specified DB instance isn't in the available state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/docdb-2014-10-31/StartDBCluster +func (c *DocDB) StartDBCluster(input *StartDBClusterInput) (*StartDBClusterOutput, error) { + req, out := c.StartDBClusterRequest(input) + return out, req.Send() +} + +// StartDBClusterWithContext is the same as StartDBCluster with the addition of +// the ability to pass a context and additional request options. +// +// See StartDBCluster for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DocDB) StartDBClusterWithContext(ctx aws.Context, input *StartDBClusterInput, opts ...request.Option) (*StartDBClusterOutput, error) { + req, out := c.StartDBClusterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopDBCluster = "StopDBCluster" + +// StopDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the StopDBCluster operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopDBCluster for more information on using the StopDBCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopDBClusterRequest method. +// req, resp := client.StopDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/docdb-2014-10-31/StopDBCluster +func (c *DocDB) StopDBClusterRequest(input *StopDBClusterInput) (req *request.Request, output *StopDBClusterOutput) { + op := &request.Operation{ + Name: opStopDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopDBClusterInput{} + } + + output = &StopDBClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopDBCluster API operation for Amazon DocumentDB with MongoDB compatibility. +// +// Stops the running cluster that is specified by DBClusterIdentifier. The cluster +// must be in the available state. For more information, see Stopping and Starting +// an Amazon DocumentDB Cluster (https://docs.aws.amazon.com/documentdb/latest/developerguide/db-cluster-stop-start.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DocumentDB with MongoDB compatibility's +// API operation StopDBCluster for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" +// DBClusterIdentifier doesn't refer to an existing DB cluster. +// +// * ErrCodeInvalidDBClusterStateFault "InvalidDBClusterStateFault" +// The DB cluster isn't in a valid state. +// +// * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" +// The specified DB instance isn't in the available state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/docdb-2014-10-31/StopDBCluster +func (c *DocDB) StopDBCluster(input *StopDBClusterInput) (*StopDBClusterOutput, error) { + req, out := c.StopDBClusterRequest(input) + return out, req.Send() +} + +// StopDBClusterWithContext is the same as StopDBCluster with the addition of +// the ability to pass a context and additional request options. +// +// See StopDBCluster for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DocDB) StopDBClusterWithContext(ctx aws.Context, input *StopDBClusterInput, opts ...request.Option) (*StopDBClusterOutput, error) { + req, out := c.StopDBClusterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // Represents the input to AddTagsToResource. type AddTagsToResourceInput struct { _ struct{} `type:"structure"` @@ -4162,6 +4434,85 @@ func (s *AvailabilityZone) SetName(v string) *AvailabilityZone { return s } +// A certificate authority (CA) certificate for an AWS account. +type Certificate struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the certificate. + // + // Example: arn:aws:rds:us-east-1::cert:rds-ca-2019 + CertificateArn *string `type:"string"` + + // The unique key that identifies a certificate. + // + // Example: rds-ca-2019 + CertificateIdentifier *string `type:"string"` + + // The type of the certificate. + // + // Example: CA + CertificateType *string `type:"string"` + + // The thumbprint of the certificate. + Thumbprint *string `type:"string"` + + // The starting date-time from which the certificate is valid. + // + // Example: 2019-07-31T17:57:09Z + ValidFrom *time.Time `type:"timestamp"` + + // The date-time after which the certificate is no longer valid. + // + // Example: 2024-07-31T17:57:09Z + ValidTill *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s Certificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Certificate) GoString() string { + return s.String() +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *Certificate) SetCertificateArn(v string) *Certificate { + s.CertificateArn = &v + return s +} + +// SetCertificateIdentifier sets the CertificateIdentifier field's value. +func (s *Certificate) SetCertificateIdentifier(v string) *Certificate { + s.CertificateIdentifier = &v + return s +} + +// SetCertificateType sets the CertificateType field's value. +func (s *Certificate) SetCertificateType(v string) *Certificate { + s.CertificateType = &v + return s +} + +// SetThumbprint sets the Thumbprint field's value. +func (s *Certificate) SetThumbprint(v string) *Certificate { + s.Thumbprint = &v + return s +} + +// SetValidFrom sets the ValidFrom field's value. +func (s *Certificate) SetValidFrom(v time.Time) *Certificate { + s.ValidFrom = &v + return s +} + +// SetValidTill sets the ValidTill field's value. +func (s *Certificate) SetValidTill(v time.Time) *Certificate { + s.ValidTill = &v + return s +} + // The configuration setting for the log types to be enabled for export to Amazon // CloudWatch Logs for a specific DB instance or DB cluster. // @@ -4551,6 +4902,12 @@ type CreateDBClusterInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` + // Specifies whether this cluster can be deleted. If DeletionProtection is enabled, + // the cluster cannot be deleted unless it is modified and DeletionProtection + // is disabled. DeletionProtection protects clusters from being accidentally + // deleted. + DeletionProtection *bool `type:"boolean"` + // A list of log types that need to be enabled for exporting to Amazon CloudWatch // Logs. EnableCloudwatchLogsExports []*string `type:"list"` @@ -4579,7 +4936,6 @@ type CreateDBClusterInput struct { // Amazon DocumentDB uses the encryption key that is used to encrypt the // source. Otherwise, Amazon DocumentDB uses your default encryption key. // - // // * If the StorageEncrypted parameter is true and ReplicationSourceIdentifier // is not specified, Amazon DocumentDB uses your default encryption key. // @@ -4592,21 +4948,26 @@ type CreateDBClusterInput struct { KmsKeyId *string `type:"string"` // The password for the master database user. This password can contain any - // printable ASCII character except "/", """, or "@". + // printable ASCII character except forward slash (/), double quote ("), or + // the "at" symbol (@). // - // Constraints: Must contain from 8 to 41 characters. - MasterUserPassword *string `type:"string"` + // Constraints: Must contain from 8 to 100 characters. + // + // MasterUserPassword is a required field + MasterUserPassword *string `type:"string" required:"true"` // The name of the master user for the DB cluster. // // Constraints: // - // * Must be from 1 to 16 letters or numbers. + // * Must be from 1 to 63 letters or numbers. // // * The first character must be a letter. // // * Cannot be a reserved word for the chosen database engine. - MasterUsername *string `type:"string"` + // + // MasterUsername is a required field + MasterUsername *string `type:"string" required:"true"` // The port number on which the instances in the DB cluster accept connections. Port *int64 `type:"integer"` @@ -4670,6 +5031,12 @@ func (s *CreateDBClusterInput) Validate() error { if s.Engine == nil { invalidParams.Add(request.NewErrParamRequired("Engine")) } + if s.MasterUserPassword == nil { + invalidParams.Add(request.NewErrParamRequired("MasterUserPassword")) + } + if s.MasterUsername == nil { + invalidParams.Add(request.NewErrParamRequired("MasterUsername")) + } if invalidParams.Len() > 0 { return invalidParams @@ -4707,6 +5074,12 @@ func (s *CreateDBClusterInput) SetDBSubnetGroupName(v string) *CreateDBClusterIn return s } +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *CreateDBClusterInput) SetDeletionProtection(v bool) *CreateDBClusterInput { + s.DeletionProtection = &v + return s +} + // SetEnableCloudwatchLogsExports sets the EnableCloudwatchLogsExports field's value. func (s *CreateDBClusterInput) SetEnableCloudwatchLogsExports(v []*string) *CreateDBClusterInput { s.EnableCloudwatchLogsExports = v @@ -5037,7 +5410,7 @@ type CreateDBInstanceInput struct { // DBClusterIdentifier is a required field DBClusterIdentifier *string `type:"string" required:"true"` - // The compute and memory capacity of the DB instance; for example, db.m4.large. + // The compute and memory capacity of the DB instance; for example, db.r5.large. // // DBInstanceClass is a required field DBInstanceClass *string `type:"string" required:"true"` @@ -5086,7 +5459,8 @@ type CreateDBInstanceInput struct { // Valid values: 0-15 PromotionTier *int64 `type:"integer"` - // The tags to be assigned to the DB instance. + // The tags to be assigned to the DB instance. You can assign up to 10 tags + // to an instance. Tags []*Tag `locationNameList:"Tag" type:"list"` } @@ -5346,6 +5720,12 @@ type DBCluster struct { // cluster is accessed. DbClusterResourceId *string `type:"string"` + // Specifies whether this cluster can be deleted. If DeletionProtection is enabled, + // the cluster cannot be deleted unless it is modified and DeletionProtection + // is disabled. DeletionProtection protects clusters from being accidentally + // deleted. + DeletionProtection *bool `type:"boolean"` + // The earliest time to which a database can be restored with point-in-time // restore. EarliestRestorableTime *time.Time `type:"timestamp"` @@ -5489,6 +5869,12 @@ func (s *DBCluster) SetDbClusterResourceId(v string) *DBCluster { return s } +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *DBCluster) SetDeletionProtection(v bool) *DBCluster { + s.DeletionProtection = &v + return s +} + // SetEarliestRestorableTime sets the EarliestRestorableTime field's value. func (s *DBCluster) SetEarliestRestorableTime(v time.Time) *DBCluster { s.EarliestRestorableTime = &v @@ -6108,6 +6494,9 @@ type DBInstance struct { // Specifies the number of days for which automatic DB snapshots are retained. BackupRetentionPeriod *int64 `type:"integer"` + // The identifier of the CA certificate for this DB instance. + CACertificateIdentifier *string `type:"string"` + // Contains the name of the DB cluster that the DB instance is a member of if // the DB instance is a member of a DB cluster. DBClusterIdentifier *string `type:"string"` @@ -6175,17 +6564,15 @@ type DBInstance struct { // instance. PromotionTier *int64 `type:"integer"` - // Specifies the availability options for the DB instance. A value of true specifies - // an internet-facing instance with a publicly resolvable DNS name, which resolves - // to a public IP address. A value of false specifies an internal instance with - // a DNS name that resolves to a private IP address. + // Not supported. Amazon DocumentDB does not currently support public endpoints. + // The value of PubliclyAccessible is always false. PubliclyAccessible *bool `type:"boolean"` // The status of a read replica. If the instance is not a read replica, this // is blank. StatusInfos []*DBInstanceStatusInfo `locationNameList:"DBInstanceStatusInfo" type:"list"` - // Specifies whether the DB instance is encrypted. + // Specifies whether or not the DB instance is encrypted. StorageEncrypted *bool `type:"boolean"` // Provides a list of VPC security group elements that the DB instance belongs @@ -6221,6 +6608,12 @@ func (s *DBInstance) SetBackupRetentionPeriod(v int64) *DBInstance { return s } +// SetCACertificateIdentifier sets the CACertificateIdentifier field's value. +func (s *DBInstance) SetCACertificateIdentifier(v string) *DBInstance { + s.CACertificateIdentifier = &v + return s +} + // SetDBClusterIdentifier sets the DBClusterIdentifier field's value. func (s *DBInstance) SetDBClusterIdentifier(v string) *DBInstance { s.DBClusterIdentifier = &v @@ -6411,7 +6804,7 @@ func (s *DBInstanceStatusInfo) SetStatusType(v string) *DBInstanceStatusInfo { type DBSubnetGroup struct { _ struct{} `type:"structure"` - // The Amazon Resource Identifier (ARN) for the DB subnet group. + // The Amazon Resource Name (ARN) for the DB subnet group. DBSubnetGroupArn *string `type:"string"` // Provides the description of the DB subnet group. @@ -6834,6 +7227,130 @@ func (s DeleteDBSubnetGroupOutput) GoString() string { return s.String() } +type DescribeCertificatesInput struct { + _ struct{} `type:"structure"` + + // The user-supplied certificate identifier. If this parameter is specified, + // information for only the specified certificate is returned. If this parameter + // is omitted, a list of up to MaxRecords certificates is returned. This parameter + // is not case sensitive. + // + // Constraints + // + // * Must match an existing CertificateIdentifier. + CertificateIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeCertificates + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: + // + // * Minimum: 20 + // + // * Maximum: 100 + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCertificatesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateIdentifier sets the CertificateIdentifier field's value. +func (s *DescribeCertificatesInput) SetCertificateIdentifier(v string) *DescribeCertificatesInput { + s.CertificateIdentifier = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCertificatesInput) SetFilters(v []*Filter) *DescribeCertificatesInput { + s.Filters = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeCertificatesInput) SetMarker(v string) *DescribeCertificatesInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCertificatesInput) SetMaxRecords(v int64) *DescribeCertificatesInput { + s.MaxRecords = &v + return s +} + +type DescribeCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of certificates for this AWS account. + Certificates []*Certificate `locationNameList:"Certificate" type:"list"` + + // An optional pagination token provided if the number of records retrieved + // is greater than MaxRecords. If this parameter is specified, the marker specifies + // the next record in the list. Including the value of Marker in the next call + // to DescribeCertificates results in the next page of certificates. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificatesOutput) GoString() string { + return s.String() +} + +// SetCertificates sets the Certificates field's value. +func (s *DescribeCertificatesOutput) SetCertificates(v []*Certificate) *DescribeCertificatesOutput { + s.Certificates = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeCertificatesOutput) SetMarker(v string) *DescribeCertificatesOutput { + s.Marker = &v + return s +} + // Represents the input to DescribeDBClusterParameterGroups. type DescribeDBClusterParameterGroupsInput struct { _ struct{} `type:"structure"` @@ -8983,15 +9500,22 @@ type ModifyDBClusterInput struct { // The name of the DB cluster parameter group to use for the DB cluster. DBClusterParameterGroupName *string `type:"string"` + // Specifies whether this cluster can be deleted. If DeletionProtection is enabled, + // the cluster cannot be deleted unless it is modified and DeletionProtection + // is disabled. DeletionProtection protects clusters from being accidentally + // deleted. + DeletionProtection *bool `type:"boolean"` + // The version number of the database engine to which you want to upgrade. Changing // this parameter results in an outage. The change is applied during the next // maintenance window unless the ApplyImmediately parameter is set to true. EngineVersion *string `type:"string"` - // The new password for the master database user. This password can contain - // any printable ASCII character except "/", """, or "@". + // The password for the master database user. This password can contain any + // printable ASCII character except forward slash (/), double quote ("), or + // the "at" symbol (@). // - // Constraints: Must contain from 8 to 41 characters. + // Constraints: Must contain from 8 to 100 characters. MasterUserPassword *string `type:"string"` // The new DB cluster identifier for the DB cluster when renaming a DB cluster. @@ -9103,6 +9627,12 @@ func (s *ModifyDBClusterInput) SetDBClusterParameterGroupName(v string) *ModifyD return s } +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *ModifyDBClusterInput) SetDeletionProtection(v bool) *ModifyDBClusterInput { + s.DeletionProtection = &v + return s +} + // SetEngineVersion sets the EngineVersion field's value. func (s *ModifyDBClusterInput) SetEngineVersion(v string) *ModifyDBClusterInput { s.EngineVersion = &v @@ -9391,7 +9921,10 @@ type ModifyDBInstanceInput struct { // and Amazon DocumentDB has enabled automatic patching for that engine version. AutoMinorVersionUpgrade *bool `type:"boolean"` - // The new compute and memory capacity of the DB instance; for example, db.m4.large. + // Indicates the certificate that needs to be associated with the instance. + CACertificateIdentifier *string `type:"string"` + + // The new compute and memory capacity of the DB instance; for example, db.r5.large. // Not all DB instance classes are available in all AWS Regions. // // If you modify the DB instance class, an outage occurs during the change. @@ -9490,6 +10023,12 @@ func (s *ModifyDBInstanceInput) SetAutoMinorVersionUpgrade(v bool) *ModifyDBInst return s } +// SetCACertificateIdentifier sets the CACertificateIdentifier field's value. +func (s *ModifyDBInstanceInput) SetCACertificateIdentifier(v string) *ModifyDBInstanceInput { + s.CACertificateIdentifier = &v + return s +} + // SetDBInstanceClass sets the DBInstanceClass field's value. func (s *ModifyDBInstanceInput) SetDBInstanceClass(v string) *ModifyDBInstanceInput { s.DBInstanceClass = &v @@ -10380,6 +10919,12 @@ type RestoreDBClusterFromSnapshotInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` + // Specifies whether this cluster can be deleted. If DeletionProtection is enabled, + // the cluster cannot be deleted unless it is modified and DeletionProtection + // is disabled. DeletionProtection protects clusters from being accidentally + // deleted. + DeletionProtection *bool `type:"boolean"` + // A list of log types that must be enabled for exporting to Amazon CloudWatch // Logs. EnableCloudwatchLogsExports []*string `type:"list"` @@ -10491,6 +11036,12 @@ func (s *RestoreDBClusterFromSnapshotInput) SetDBSubnetGroupName(v string) *Rest return s } +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *RestoreDBClusterFromSnapshotInput) SetDeletionProtection(v bool) *RestoreDBClusterFromSnapshotInput { + s.DeletionProtection = &v + return s +} + // SetEnableCloudwatchLogsExports sets the EnableCloudwatchLogsExports field's value. func (s *RestoreDBClusterFromSnapshotInput) SetEnableCloudwatchLogsExports(v []*string) *RestoreDBClusterFromSnapshotInput { s.EnableCloudwatchLogsExports = v @@ -10586,6 +11137,12 @@ type RestoreDBClusterToPointInTimeInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` + // Specifies whether this cluster can be deleted. If DeletionProtection is enabled, + // the cluster cannot be deleted unless it is modified and DeletionProtection + // is disabled. DeletionProtection protects clusters from being accidentally + // deleted. + DeletionProtection *bool `type:"boolean"` + // A list of log types that must be enabled for exporting to Amazon CloudWatch // Logs. EnableCloudwatchLogsExports []*string `type:"list"` @@ -10703,6 +11260,12 @@ func (s *RestoreDBClusterToPointInTimeInput) SetDBSubnetGroupName(v string) *Res return s } +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *RestoreDBClusterToPointInTimeInput) SetDeletionProtection(v bool) *RestoreDBClusterToPointInTimeInput { + s.DeletionProtection = &v + return s +} + // SetEnableCloudwatchLogsExports sets the EnableCloudwatchLogsExports field's value. func (s *RestoreDBClusterToPointInTimeInput) SetEnableCloudwatchLogsExports(v []*string) *RestoreDBClusterToPointInTimeInput { s.EnableCloudwatchLogsExports = v @@ -10774,6 +11337,128 @@ func (s *RestoreDBClusterToPointInTimeOutput) SetDBCluster(v *DBCluster) *Restor return s } +type StartDBClusterInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster to restart. Example: docdb-2019-05-28-15-24-52 + // + // DBClusterIdentifier is a required field + DBClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDBClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDBClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDBClusterInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDBClusterIdentifier sets the DBClusterIdentifier field's value. +func (s *StartDBClusterInput) SetDBClusterIdentifier(v string) *StartDBClusterInput { + s.DBClusterIdentifier = &v + return s +} + +type StartDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Detailed information about a DB cluster. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s StartDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDBClusterOutput) GoString() string { + return s.String() +} + +// SetDBCluster sets the DBCluster field's value. +func (s *StartDBClusterOutput) SetDBCluster(v *DBCluster) *StartDBClusterOutput { + s.DBCluster = v + return s +} + +type StopDBClusterInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster to stop. Example: docdb-2019-05-28-15-24-52 + // + // DBClusterIdentifier is a required field + DBClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDBClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopDBClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopDBClusterInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDBClusterIdentifier sets the DBClusterIdentifier field's value. +func (s *StopDBClusterInput) SetDBClusterIdentifier(v string) *StopDBClusterInput { + s.DBClusterIdentifier = &v + return s +} + +type StopDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Detailed information about a DB cluster. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s StopDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDBClusterOutput) GoString() string { + return s.String() +} + +// SetDBCluster sets the DBCluster field's value. +func (s *StopDBClusterOutput) SetDBCluster(v *DBCluster) *StopDBClusterOutput { + s.DBCluster = v + return s +} + // Detailed information about a subnet. type Subnet struct { _ struct{} `type:"structure"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go index cd0f3d91dc5..c891f31c420 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *DocDB { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "rds" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DocDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DocDB { svc := &DocDB{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-31", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index 4109ccce9b5..562870b35d1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -90,23 +90,23 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // one or more tables. You identify requested items by primary key. // // A single operation can retrieve up to 16 MB of data, which can contain as -// many as 100 items. BatchGetItem will return a partial result if the response +// many as 100 items. BatchGetItem returns a partial result if the response // size limit is exceeded, the table's provisioned throughput is exceeded, or // an internal processing failure occurs. If a partial result is returned, the // operation returns a value for UnprocessedKeys. You can use this value to // retry the operation starting with the next item to get. // -// If you request more than 100 items BatchGetItem will return a ValidationException -// with the message "Too many items requested for the BatchGetItem call". +// If you request more than 100 items, BatchGetItem returns a ValidationException +// with the message "Too many items requested for the BatchGetItem call." // // For example, if you ask to retrieve 100 items, but each individual item is // 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB // limit). It also returns an appropriate UnprocessedKeys value so you can get // the next page of results. If desired, your application can include its own -// logic to assemble the pages of results into one data set. +// logic to assemble the pages of results into one dataset. // // If none of the items can be processed due to insufficient provisioned throughput -// on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. +// on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. // If at least one of the items is successfully processed, then BatchGetItem // completes successfully, while returning the keys of the unread items in UnprocessedKeys. // @@ -133,7 +133,7 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // // If a requested item does not exist, it is not returned in the result. Requests // for nonexistent items consume the minimum read capacity units according to -// the type of read. For more information, see Capacity Units Calculations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) +// the type of read. For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) // in the Amazon DynamoDB Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -158,8 +158,8 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // // * ErrCodeRequestLimitExceeded "RequestLimitExceeded" // Throughput exceeds the current throughput limit for your account. Please -// contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support) -// to request a limit increase. +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -197,7 +197,7 @@ func (c *DynamoDB) BatchGetItemWithContext(ctx aws.Context, input *BatchGetItemI // // Example iterating over at most 3 pages of a BatchGetItem operation. // pageNum := 0 // err := client.BatchGetItemPages(params, -// func(page *BatchGetItemOutput, lastPage bool) bool { +// func(page *dynamodb.BatchGetItemOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -229,10 +229,12 @@ func (c *DynamoDB) BatchGetItemPagesWithContext(ctx aws.Context, input *BatchGet }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*BatchGetItemOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*BatchGetItemOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -317,9 +319,8 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // check for unprocessed items and submit a new BatchWriteItem request with // those unprocessed items until all items have been processed. // -// Note that if none of the items can be processed due to insufficient provisioned -// throughput on all of the tables in the request, then BatchWriteItem will -// return a ProvisionedThroughputExceededException. +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. // // If DynamoDB returns any unprocessed items, you should retry the batch operation // on those items. However, we strongly recommend that you use an exponential @@ -328,16 +329,15 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // tables. If you delay the batch operation using exponential backoff, the individual // requests in the batch are much more likely to succeed. // -// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations) // in the Amazon DynamoDB Developer Guide. // // With BatchWriteItem, you can efficiently write or delete large amounts of -// data, such as from Amazon Elastic MapReduce (EMR), or copy data from another -// database into DynamoDB. In order to improve performance with these large-scale -// operations, BatchWriteItem does not behave in the same way as individual -// PutItem and DeleteItem calls would. For example, you cannot specify conditions -// on individual put and delete requests, and BatchWriteItem does not return -// deleted items in the response. +// data, such as from Amazon EMR, or copy data from another database into DynamoDB. +// In order to improve performance with these large-scale operations, BatchWriteItem +// does not behave in the same way as individual PutItem and DeleteItem calls +// would. For example, you cannot specify conditions on individual put and delete +// requests, and BatchWriteItem does not return deleted items in the response. // // If you use a programming language that supports concurrency, you can use // threads to write items in parallel. Your application must include the necessary @@ -365,7 +365,7 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // BatchWriteItem request. For example, you cannot put and delete the same // item in the same BatchWriteItem request. // -// * Your request contains at least two items with identical hash and range +// * Your request contains at least two items with identical hash and range // keys (which essentially is two put operations). // // * There are more than 25 requests in the batch. @@ -400,8 +400,8 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // // * ErrCodeRequestLimitExceeded "RequestLimitExceeded" // Throughput exceeds the current throughput limit for your account. Please -// contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support) -// to request a limit increase. +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -495,10 +495,10 @@ func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.R // // Creates a backup for an existing table. // -// Each time you create an On-Demand Backup, the entire table data is backed +// Each time you create an on-demand backup, the entire table data is backed // up. There is no limit to the number of on-demand backups that can be taken. // -// When you create an On-Demand Backup, a time marker of the request is cataloged, +// When you create an on-demand backup, a time marker of the request is cataloged, // and the backup is created asynchronously, by applying all changes until the // time of the request to the last full table snapshot. Backup requests are // processed instantaneously and become available for restore within minutes. @@ -510,9 +510,8 @@ func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.R // // If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed // to contain all data committed to the table up to 14:24:00, and data committed -// after 14:26:00 will not be. The backup may or may not contain data modifications -// made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal -// consistency. +// after 14:26:00 will not be. The backup might contain data modifications made +// between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency. // // Along with data, the following are also included on the backups: // @@ -652,7 +651,7 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // // Creates a global table from an existing table. A global table creates a replication // relationship between two or more DynamoDB tables with the same table name -// in the provided regions. +// in the provided Regions. // // If you want to add a new replica table to a global table, each of the following // conditions must be true: @@ -669,9 +668,9 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // If global secondary indexes are specified, then the following conditions // must also be met: // -// * The global secondary indexes must have the same name. +// * The global secondary indexes must have the same name. // -// * The global secondary indexes must have the same hash key and sort key +// * The global secondary indexes must have the same hash key and sort key // (if present). // // Write capacity settings should be set consistently across your replica tables @@ -679,7 +678,7 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // to manage the write capacity settings for all of your global tables replicas // and indexes. // -// If you prefer to manage write capacity settings manually, you should provision +// If you prefer to manage write capacity settings manually, you should provision // equal replicated write capacity units to your replica tables. You should // also provision equal replicated write capacity units to matching secondary // indexes across your global table. @@ -804,8 +803,8 @@ func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Req // CreateTable API operation for Amazon DynamoDB. // // The CreateTable operation adds a new table to your account. In an AWS account, -// table names must be unique within each region. That is, you can have two -// tables with same name if you create the tables in different regions. +// table names must be unique within each Region. That is, you can have two +// tables with same name if you create the tables in different Regions. // // CreateTable is an asynchronous operation. Upon receiving a CreateTable request, // DynamoDB immediately returns a response with a TableStatus of CREATING. After @@ -1106,8 +1105,8 @@ func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Reque // // * ErrCodeRequestLimitExceeded "RequestLimitExceeded" // Throughput exceeds the current throughput limit for your account. Please -// contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support) -// to request a limit increase. +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -1449,8 +1448,8 @@ func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBac // If point in time recovery is enabled, PointInTimeRecoveryStatus will be set // to ENABLED. // -// Once continuous backups and point in time recovery are enabled, you can restore -// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. +// After continuous backups and point in time recovery are enabled, you can +// restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. // // LatestRestorableDateTime is typically 5 minutes before the current time. // You can restore your table to any point in time during the last 35 days. @@ -1796,7 +1795,7 @@ func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTable // DescribeGlobalTableSettings API operation for Amazon DynamoDB. // -// Describes region specific settings for a global table. +// Describes Region-specific settings for a global table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1900,12 +1899,12 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // DescribeLimits API operation for Amazon DynamoDB. // // Returns the current provisioned-capacity limits for your AWS account in a -// region, both for the region as a whole and for any one DynamoDB table that +// Region, both for the Region as a whole and for any one DynamoDB table that // you create there. // // When you establish an AWS account, the account has initial limits on the // maximum read capacity units and write capacity units that you can provision -// across all of your DynamoDB tables in a given region. Also, there are per-table +// across all of your DynamoDB tables in a given Region. Also, there are per-table // limits that apply when you create a table there. For more information, see // Limits (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // page in the Amazon DynamoDB Developer Guide. @@ -1918,26 +1917,27 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // // For example, you could use one of the AWS SDKs to do the following: // -// Call DescribeLimits for a particular region to obtain your current account +// Call DescribeLimits for a particular Region to obtain your current account // limits on provisioned capacity there. // // Create a variable to hold the aggregate read capacity units provisioned for -// all your tables in that region, and one to hold the aggregate write capacity +// all your tables in that Region, and one to hold the aggregate write capacity // units. Zero them both. // // Call ListTables to obtain a list of all your DynamoDB tables. // // For each table name listed by ListTables, do the following: // -// Call DescribeTable with the table name. +// * Call DescribeTable with the table name. // -// Use the data returned by DescribeTable to add the read capacity units and -// write capacity units provisioned for the table itself to your variables. +// * Use the data returned by DescribeTable to add the read capacity units +// and write capacity units provisioned for the table itself to your variables. // -// If the table has one or more global secondary indexes (GSIs), loop over these -// GSIs and add their provisioned capacity values to your variables as well. +// * If the table has one or more global secondary indexes (GSIs), loop over +// these GSIs and add their provisioned capacity values to your variables +// as well. // -// Report the account limits for that region returned by DescribeLimits, along +// Report the account limits for that Region returned by DescribeLimits, along // with the total current provisioned capacity levels you have calculated. // // This will let you see whether you are getting close to your account-level @@ -1947,8 +1947,8 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // the sum of the provisioned capacity of the new table itself and all its global // secondary indexes. // -// For existing tables and their GSIs, DynamoDB will not let you increase provisioned -// capacity extremely rapidly, but the only upper limit that applies is that +// For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned +// capacity extremely rapidly. But the only upper limit that applies is that // the aggregate provisioned capacity over all your tables and GSIs cannot exceed // either of the per-account limits. // @@ -2302,8 +2302,8 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou // // * ErrCodeRequestLimitExceeded "RequestLimitExceeded" // Throughput exceeds the current throughput limit for your account. Please -// contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support) -// to request a limit increase. +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -2397,13 +2397,13 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req // // List backups associated with an AWS account. To list backups for a given // table, specify TableName. ListBackups returns a paginated list of results -// with at most 1MB worth of items in a page. You can also specify a limit for -// the maximum number of entries to be returned in a page. +// with at most 1 MB worth of items in a page. You can also specify a limit +// for the maximum number of entries to be returned in a page. // -// In the request, start time is inclusive but end time is exclusive. Note that -// these limits are for the time at which the original backup was requested. +// In the request, start time is inclusive, but end time is exclusive. Note +// that these limits are for the time at which the original backup was requested. // -// You can call ListBackups a maximum of 5 times per second. +// You can call ListBackups a maximum of five times per second. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2503,7 +2503,7 @@ func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *r // ListGlobalTables API operation for Amazon DynamoDB. // -// Lists all global tables that have a replica in the specified region. +// Lists all global tables that have a replica in the specified Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2657,7 +2657,7 @@ func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput // // Example iterating over at most 3 pages of a ListTables operation. // pageNum := 0 // err := client.ListTablesPages(params, -// func(page *ListTablesOutput, lastPage bool) bool { +// func(page *dynamodb.ListTablesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2689,10 +2689,12 @@ func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTables }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2882,28 +2884,28 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // For information on how to call the PutItem API using the AWS SDK in specific // languages, see the following: // -// PutItem in the AWS Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem) +// * PutItem in the AWS Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem) // -// PutItem in the AWS SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem) +// * PutItem in the AWS SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem) // -// PutItem in the AWS SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem) +// * PutItem in the AWS SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem) // -// PutItem in the AWS SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem) +// * PutItem in the AWS SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem) // -// PutItem in the AWS SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem) +// * PutItem in the AWS SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem) // -// PutItem in the AWS SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem) +// * PutItem in the AWS SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem) // -// PutItem in the AWS SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem) +// * PutItem in the AWS SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem) // -// PutItem in the AWS SDK for Python (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem) +// * PutItem in the AWS SDK for Python (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem) // -// PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) +// * PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) // -// When you add an item, the primary key attribute(s) are the only required -// attributes. Attribute values cannot be null. String and Binary type attributes -// must have lengths greater than zero. Set type attributes cannot be empty. -// Requests with empty values will be rejected with a ValidationException exception. +// When you add an item, the primary key attributes are the only required attributes. +// Attribute values cannot be null. String and Binary type attributes must have +// lengths greater than zero. Set type attributes cannot be empty. Requests +// with empty values will be rejected with a ValidationException exception. // // To prevent a new item from replacing an existing item, use a conditional // expression that contains the attribute_not_exists function with the name @@ -2946,8 +2948,8 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // // * ErrCodeRequestLimitExceeded "RequestLimitExceeded" // Throughput exceeds the current throughput limit for your account. Please -// contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support) -// to request a limit increase. +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -3116,8 +3118,8 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output // // * ErrCodeRequestLimitExceeded "RequestLimitExceeded" // Throughput exceeds the current throughput limit for your account. Please -// contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support) -// to request a limit increase. +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -3155,7 +3157,7 @@ func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ... // // Example iterating over at most 3 pages of a Query operation. // pageNum := 0 // err := client.QueryPages(params, -// func(page *QueryOutput, lastPage bool) bool { +// func(page *dynamodb.QueryOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3187,10 +3189,12 @@ func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*QueryOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*QueryOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3270,7 +3274,7 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn // // * IAM policies // -// * Cloudwatch metrics and alarms +// * Amazon CloudWatch metrics and alarms // // * Tags // @@ -3421,10 +3425,8 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn // // * Provisioned read and write capacity // -// * Encryption settings -// -// All these settings come from the current settings of the source table at -// the time of restore. +// * Encryption settings All these settings come from the current settings +// of the source table at the time of restore. // // You must manually set up the following on the restored table: // @@ -3432,7 +3434,7 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn // // * IAM policies // -// * Cloudwatch metrics and alarms +// * Amazon CloudWatch metrics and alarms // // * Tags // @@ -3581,16 +3583,16 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // every item in a table or a secondary index. To have DynamoDB return fewer // items, you can provide a FilterExpression operation. // -// If the total number of scanned items exceeds the maximum data set size limit +// If the total number of scanned items exceeds the maximum dataset size limit // of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey // value to continue the scan in a subsequent operation. The results also include // the number of items exceeding the limit. A scan can result in no table data // meeting the filter criteria. // -// A single Scan operation will read up to the maximum number of items set (if -// using the Limit parameter) or a maximum of 1 MB of data and then apply any -// filtering to the results using FilterExpression. If LastEvaluatedKey is present -// in the response, you will need to paginate the result set. For more information, +// A single Scan operation reads up to the maximum number of items set (if using +// the Limit parameter) or a maximum of 1 MB of data and then apply any filtering +// to the results using FilterExpression. If LastEvaluatedKey is present in +// the response, you need to paginate the result set. For more information, // see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) // in the Amazon DynamoDB Developer Guide. // @@ -3628,8 +3630,8 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // // * ErrCodeRequestLimitExceeded "RequestLimitExceeded" // Throughput exceeds the current throughput limit for your account. Please -// contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support) -// to request a limit increase. +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -3667,7 +3669,7 @@ func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...re // // Example iterating over at most 3 pages of a Scan operation. // pageNum := 0 // err := client.ScanPages(params, -// func(page *ScanOutput, lastPage bool) bool { +// func(page *dynamodb.ScanOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3699,10 +3701,12 @@ func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn fu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ScanOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ScanOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3774,8 +3778,8 @@ func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Req // // Associate a set of tags with an Amazon DynamoDB resource. You can then activate // these user-defined tags so that they appear on the Billing and Cost Management -// console for cost allocation tracking. You can call TagResource up to 5 times -// per second, per account. +// console for cost allocation tracking. You can call TagResource up to five +// times per second, per account. // // For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) // in the Amazon DynamoDB Developer Guide. @@ -3903,10 +3907,11 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // // TransactGetItems is a synchronous operation that atomically retrieves multiple // items from one or more tables (but not from indexes) in a single account -// and region. A TransactGetItems call can contain up to 10 TransactGetItem +// and Region. A TransactGetItems call can contain up to 25 TransactGetItem // objects, each of which contains a Get structure that specifies an item to -// retrieve from a table in the account and region. A call to TransactGetItems -// cannot retrieve items from tables in more than one AWS account or region. +// retrieve from a table in the account and Region. A call to TransactGetItems +// cannot retrieve items from tables in more than one AWS account or Region. +// The aggregate size of the items in the transaction cannot exceed 4 MB. // // DynamoDB rejects the entire TransactGetItems request if any of the following // is true: @@ -3919,6 +3924,8 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // // * There is a user error, such as an invalid data format. // +// * The aggregate size of the items in the transaction cannot exceed 4 MB. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3932,9 +3939,9 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // might not be specified correctly, or its status might not be ACTIVE. // // * ErrCodeTransactionCanceledException "TransactionCanceledException" -// The entire transaction request was rejected. +// The entire transaction request was canceled. // -// DynamoDB rejects a TransactWriteItems request under the following circumstances: +// DynamoDB cancels a TransactWriteItems request under the following circumstances: // // * A condition in one of the condition expressions is not met. // @@ -3953,7 +3960,7 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // // * There is a user error, such as an invalid data format. // -// DynamoDB rejects a TransactGetItems request under the following circumstances: +// DynamoDB cancels a TransactGetItems request under the following circumstances: // // * There is an ongoing TransactGetItems operation that conflicts with a // concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. @@ -3967,6 +3974,57 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // // * There is a user error, such as an invalid data format. // +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have NONE code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// * No Errors: Code: NONE Message: null +// +// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// * Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// * Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // Your request rate is too high. The AWS SDKs for DynamoDB automatically retry // requests that receive this exception. Your request is eventually successful, @@ -3975,6 +4033,11 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. // @@ -4065,57 +4128,59 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // TransactWriteItems API operation for Amazon DynamoDB. // -// TransactWriteItems is a synchronous write operation that groups up to 10 +// TransactWriteItems is a synchronous write operation that groups up to 25 // action requests. These actions can target items in different tables, but -// not in different AWS accounts or regions, and no two actions can target the +// not in different AWS accounts or Regions, and no two actions can target the // same item. For example, you cannot both ConditionCheck and Update the same -// item. +// item. The aggregate size of the items in the transaction cannot exceed 4 +// MB. // // The actions are completed atomically so that either all of them succeed, // or all of them fail. They are defined by the following objects: // -// * Put  —   Initiates a PutItem operation to write a new item. This structure +// * Put — Initiates a PutItem operation to write a new item. This structure // specifies the primary key of the item to be written, the name of the table // to write it in, an optional condition expression that must be satisfied // for the write to succeed, a list of the item's attributes, and a field -// indicating whether or not to retrieve the item's attributes if the condition +// indicating whether to retrieve the item's attributes if the condition // is not met. // -// * Update  —   Initiates an UpdateItem operation to update an existing -// item. This structure specifies the primary key of the item to be updated, -// the name of the table where it resides, an optional condition expression -// that must be satisfied for the update to succeed, an expression that defines -// one or more attributes to be updated, and a field indicating whether or -// not to retrieve the item's attributes if the condition is not met. +// * Update — Initiates an UpdateItem operation to update an existing item. +// This structure specifies the primary key of the item to be updated, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the update to succeed, an expression that defines +// one or more attributes to be updated, and a field indicating whether to +// retrieve the item's attributes if the condition is not met. // -// * Delete  —   Initiates a DeleteItem operation to delete an existing item. +// * Delete — Initiates a DeleteItem operation to delete an existing item. // This structure specifies the primary key of the item to be deleted, the // name of the table where it resides, an optional condition expression that // must be satisfied for the deletion to succeed, and a field indicating -// whether or not to retrieve the item's attributes if the condition is not -// met. +// whether to retrieve the item's attributes if the condition is not met. // -// * ConditionCheck  —   Applies a condition to an item that is not being +// * ConditionCheck — Applies a condition to an item that is not being // modified by the transaction. This structure specifies the primary key // of the item to be checked, the name of the table where it resides, a condition // expression that must be satisfied for the transaction to succeed, and -// a field indicating whether or not to retrieve the item's attributes if -// the condition is not met. +// a field indicating whether to retrieve the item's attributes if the condition +// is not met. // // DynamoDB rejects the entire TransactWriteItems request if any of the following // is true: // // * A condition in one of the condition expressions is not met. // -// * A conflicting operation is in the process of updating the same item. +// * An ongoing operation is in the process of updating the same item. // // * There is insufficient provisioned capacity for the transaction to be // completed. // -// * An item size becomes too large (bigger than 400 KB), a Local Secondary -// Index (LSI) becomes too large, or a similar validation error occurs because +// * An item size becomes too large (bigger than 400 KB), a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because // of changes made by the transaction. // +// * The aggregate size of the items in the transaction exceeds 4 MB. +// // * There is a user error, such as an invalid data format. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4131,9 +4196,9 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // might not be specified correctly, or its status might not be ACTIVE. // // * ErrCodeTransactionCanceledException "TransactionCanceledException" -// The entire transaction request was rejected. +// The entire transaction request was canceled. // -// DynamoDB rejects a TransactWriteItems request under the following circumstances: +// DynamoDB cancels a TransactWriteItems request under the following circumstances: // // * A condition in one of the condition expressions is not met. // @@ -4152,7 +4217,7 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // // * There is a user error, such as an invalid data format. // -// DynamoDB rejects a TransactGetItems request under the following circumstances: +// DynamoDB cancels a TransactGetItems request under the following circumstances: // // * There is an ongoing TransactGetItems operation that conflicts with a // concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. @@ -4166,6 +4231,57 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // // * There is a user error, such as an invalid data format. // +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have NONE code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// * No Errors: Code: NONE Message: null +// +// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// * Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// * Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// // * ErrCodeTransactionInProgressException "TransactionInProgressException" // The transaction with the given request token is already in progress. // @@ -4181,6 +4297,11 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. // @@ -4273,7 +4394,7 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request // UntagResource API operation for Amazon DynamoDB. // // Removes the association of tags from an Amazon DynamoDB resource. You can -// call UntagResource up to 5 times per second, per account. +// call UntagResource up to five times per second, per account. // // For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) // in the Amazon DynamoDB Developer Guide. @@ -4409,7 +4530,7 @@ func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackups // to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. // // LatestRestorableDateTime is typically 5 minutes before the current time. -// You can restore your table to any point in time during the last 35 days.. +// You can restore your table to any point in time during the last 35 days. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4518,9 +4639,9 @@ func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req // // Adds or removes replicas in the specified global table. The global table // must already exist to be able to use this operation. Any replica to be added -// must be empty, must have the same name as the global table, must have the -// same key schema, and must have DynamoDB Streams enabled and must have same -// provisioned and maximum write capacity units. +// must be empty, have the same name as the global table, have the same key +// schema, have DynamoDB Streams enabled, and have the same provisioned and +// maximum write capacity units. // // Although you can use UpdateGlobalTable to add replicas and remove replicas // in a single request, for simplicity we recommend that you issue separate @@ -4529,12 +4650,12 @@ func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req // If global secondary indexes are specified, then the following conditions // must also be met: // -// * The global secondary indexes must have the same name. +// * The global secondary indexes must have the same name. // -// * The global secondary indexes must have the same hash key and sort key +// * The global secondary indexes must have the same hash key and sort key // (if present). // -// * The global secondary indexes must have the same provisioned and maximum +// * The global secondary indexes must have the same provisioned and maximum // write capacity units. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4817,8 +4938,8 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque // // * ErrCodeRequestLimitExceeded "RequestLimitExceeded" // Throughput exceeds the current throughput limit for your account. Please -// contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support) -// to request a limit increase. +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. // // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. @@ -4917,11 +5038,11 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req // // * Modify the provisioned throughput settings of the table. // -// * Enable or disable Streams on the table. +// * Enable or disable DynamoDB Streams on the table. // // * Remove a global secondary index from the table. // -// * Create a new global secondary index on the table. Once the index begins +// * Create a new global secondary index on the table. After the index begins // backfilling, you can use UpdateTable to perform other operations. // // UpdateTable is an asynchronous operation; while it is executing, the table @@ -5050,9 +5171,9 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r // UpdateTimeToLive API operation for Amazon DynamoDB. // -// The UpdateTimeToLive method will enable or disable TTL for the specified -// table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification; -// it may take up to one hour for the change to fully process. Any additional +// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the +// specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. +// It can take up to one hour for the change to fully process. Any additional // UpdateTimeToLive calls for the same table during this one hour duration result // in a ValidationException. // @@ -5062,7 +5183,7 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r // deleted. // // The epoch time format is the number of seconds elapsed since 12:00:00 AM -// January 1st, 1970 UTC. +// January 1, 1970 UTC. // // DynamoDB deletes expired items on a best-effort basis to ensure availability // of throughput for other data operations. @@ -5072,8 +5193,8 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r // to the nature of the workload. Items that have expired and not been deleted // will still show up in reads, queries, and scans. // -// As items are deleted, they are removed from any Local Secondary Index and -// Global Secondary Index immediately in the same eventually consistent way +// As items are deleted, they are removed from any local secondary index and +// global secondary index immediately in the same eventually consistent way // as a standard delete operation. // // For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) @@ -5362,47 +5483,38 @@ type AttributeValueUpdate struct { // // * DELETE - If no value is specified, the attribute and its value are removed // from the item. The data type of the specified value must match the existing - // value's data type. - // - // If a set of values is specified, then those values are subtracted from the - // old set. For example, if the attribute value was the set [a,b,c] and the - // DELETE action specified [a,c], then the final attribute value would be - // [b]. Specifying an empty set is an error. + // value's data type. If a set of values is specified, then those values + // are subtracted from the old set. For example, if the attribute value was + // the set [a,b,c] and the DELETE action specified [a,c], then the final + // attribute value would be [b]. Specifying an empty set is an error. // // * ADD - If the attribute does not already exist, then the attribute and // its values are added to the item. If the attribute does exist, then the - // behavior of ADD depends on the data type of the attribute: - // - // If the existing attribute is a number, and if Value is also a number, then - // the Value is mathematically added to the existing attribute. If Value - // is a negative number, then it is subtracted from the existing attribute. - // - // If you use ADD to increment or decrement a number value for an item that - // doesn't exist before the update, DynamoDB uses 0 as the initial value. - // - // In addition, if you use ADD to update an existing item, and intend to increment - // or decrement an attribute value which does not yet exist, DynamoDB uses - // 0 as the initial value. For example, suppose that the item you want to - // update does not yet have an attribute named itemcount, but you decide - // to ADD the number 3 to this attribute anyway, even though it currently - // does not exist. DynamoDB will create the itemcount attribute, set its - // initial value to 0, and finally add 3 to it. The result will be a new - // itemcount attribute in the item, with a value of 3. - // - // If the existing data type is a set, and if the Value is also a set, then - // the Value is added to the existing set. (This is a set operation, not - // mathematical addition.) For example, if the attribute value was the set - // [1,2], and the ADD action specified [3], then the final attribute value - // would be [1,2,3]. An error occurs if an Add action is specified for a - // set attribute and the attribute type specified does not match the existing - // set type. - // - // Both sets must have the same primitive data type. For example, if the existing - // data type is a set of strings, the Value must also be a set of strings. - // The same holds true for number sets and binary sets. - // - // This action is only valid for an existing attribute whose data type is number - // or is a set. Do not use ADD for any other data types. + // behavior of ADD depends on the data type of the attribute: If the existing + // attribute is a number, and if Value is also a number, then the Value is + // mathematically added to the existing attribute. If Value is a negative + // number, then it is subtracted from the existing attribute. If you use + // ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. In addition, + // if you use ADD to update an existing item, and intend to increment or + // decrement an attribute value which does not yet exist, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // does not yet have an attribute named itemcount, but you decide to ADD + // the number 3 to this attribute anyway, even though it currently does not + // exist. DynamoDB will create the itemcount attribute, set its initial value + // to 0, and finally add 3 to it. The result will be a new itemcount attribute + // in the item, with a value of 3. If the existing data type is a set, and + // if the Value is also a set, then the Value is added to the existing set. + // (This is a set operation, not mathematical addition.) For example, if + // the attribute value was the set [1,2], and the ADD action specified [3], + // then the final attribute value would be [1,2,3]. An error occurs if an + // Add action is specified for a set attribute and the attribute type specified + // does not match the existing set type. Both sets must have the same primitive + // data type. For example, if the existing data type is a set of strings, + // the Value must also be a set of strings. The same holds true for number + // sets and binary sets. This action is only valid for an existing attribute + // whose data type is number or is a set. Do not use ADD for any other data + // types. // // If no item with the specified Key is found: // @@ -5481,7 +5593,7 @@ func (s *AutoScalingPolicyDescription) SetTargetTrackingScalingPolicyConfigurati return s } -// Represents the autoscaling policy to be modified. +// Represents the auto scaling policy to be modified. type AutoScalingPolicyUpdate struct { _ struct{} `type:"structure"` @@ -5537,15 +5649,15 @@ func (s *AutoScalingPolicyUpdate) SetTargetTrackingScalingPolicyConfiguration(v return s } -// Represents the autoscaling settings for a global table or global secondary +// Represents the auto scaling settings for a global table or global secondary // index. type AutoScalingSettingsDescription struct { _ struct{} `type:"structure"` - // Disabled autoscaling for this global table or global secondary index. + // Disabled auto scaling for this global table or global secondary index. AutoScalingDisabled *bool `type:"boolean"` - // Role ARN used for configuring autoScaling policy. + // Role ARN used for configuring the auto scaling policy. AutoScalingRoleArn *string `type:"string"` // The maximum capacity units that a global table or global secondary index @@ -5600,15 +5712,15 @@ func (s *AutoScalingSettingsDescription) SetScalingPolicies(v []*AutoScalingPoli return s } -// Represents the autoscaling settings to be modified for a global table or +// Represents the auto scaling settings to be modified for a global table or // global secondary index. type AutoScalingSettingsUpdate struct { _ struct{} `type:"structure"` - // Disabled autoscaling for this global table or global secondary index. + // Disabled auto scaling for this global table or global secondary index. AutoScalingDisabled *bool `type:"boolean"` - // Role ARN used for configuring autoscaling policy. + // Role ARN used for configuring auto scaling policy. AutoScalingRoleArn *string `min:"1" type:"string"` // The maximum capacity units that a global table or global secondary index @@ -5704,7 +5816,7 @@ type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct { // subsequent scale in requests until it has expired. You should scale in conservatively // to protect your application's availability. However, if another alarm triggers // a scale out policy during the cooldown period after a scale-in, application - // autoscaling scales out your scalable target immediately. + // auto scaling scales out your scalable target immediately. ScaleInCooldown *int64 `type:"integer"` // The amount of time, in seconds, after a scale out activity completes before @@ -5772,7 +5884,7 @@ type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct { // subsequent scale in requests until it has expired. You should scale in conservatively // to protect your application's availability. However, if another alarm triggers // a scale out policy during the cooldown period after a scale-in, application - // autoscaling scales out your scalable target immediately. + // auto scaling scales out your scalable target immediately. ScaleInCooldown *int64 `type:"integer"` // The amount of time, in seconds, after a scale out activity completes before @@ -6107,37 +6219,21 @@ type BatchGetItemInput struct { // // * ExpressionAttributeNames - One or more substitution tokens for attribute // names in the ProjectionExpression parameter. The following are some use - // cases for using ExpressionAttributeNames: - // - // To access an attribute whose name conflicts with a DynamoDB reserved word. - // - // To create a placeholder for repeating occurrences of an attribute name in - // an expression. - // - // To prevent special characters in an attribute name from being misinterpreted - // in an expression. - // - // Use the # character in an expression to dereference an attribute name. For - // example, consider the following attribute name: - // - // Percentile - // - // The name of this attribute conflicts with a reserved word, so it cannot be + // cases for using ExpressionAttributeNames: To access an attribute whose + // name conflicts with a DynamoDB reserved word. To create a placeholder + // for repeating occurrences of an attribute name in an expression. To prevent + // special characters in an attribute name from being misinterpreted in an + // expression. Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: Percentile The + // name of this attribute conflicts with a reserved word, so it cannot be // used directly in an expression. (For the complete list of reserved words, // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could - // specify the following for ExpressionAttributeNames: - // - // {"#P":"Percentile"} - // - // You could then use this substitution in an expression, as in this example: - // - // #P = :val - // - // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. - // - // For more information on expression attribute names, see Accessing Item Attributes + // specify the following for ExpressionAttributeNames: {"#P":"Percentile"} + // You could then use this substitution in an expression, as in this example: + // #P = :val Tokens that begin with the : character are expression attribute + // values, which are placeholders for the actual value at runtime. For more + // information about expression attribute names, see Accessing Item Attributes // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. // @@ -6150,13 +6246,10 @@ type BatchGetItemInput struct { // * ProjectionExpression - A string that identifies one or more attributes // to retrieve from the table. These attributes can include scalars, sets, // or elements of a JSON document. The attributes in the expression must - // be separated by commas. - // - // If no attribute names are specified, then all attributes will be returned. - // If any of the requested attributes are not found, they will not appear - // in the result. - // - // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // be separated by commas. If no attribute names are specified, then all + // attributes are returned. If any of the requested attributes are not found, + // they do not appear in the result. For more information, see Accessing + // Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. // // * AttributesToGet - This is a legacy parameter. Use ProjectionExpression @@ -6171,11 +6264,9 @@ type BatchGetItemInput struct { // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. - // - // Note that some operations, such as GetItem and BatchGetItem, do not access - // any indexes at all. In these cases, specifying INDEXES will only return - // ConsumedCapacity information for table(s). + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. @@ -6310,27 +6401,23 @@ type BatchWriteItemInput struct { // of the following: // // * DeleteRequest - Perform a DeleteItem operation on the specified item. - // The item to be deleted is identified by a Key subelement: - // - // Key - A map of primary key attribute values that uniquely identify the item. - // Each entry in this map consists of an attribute name and an attribute - // value. For each primary key, you must provide all of the key attributes. - // For example, with a simple primary key, you only need to provide a value - // for the partition key. For a composite primary key, you must provide values + // The item to be deleted is identified by a Key subelement: Key - A map + // of primary key attribute values that uniquely identify the item. Each + // entry in this map consists of an attribute name and an attribute value. + // For each primary key, you must provide all of the key attributes. For + // example, with a simple primary key, you only need to provide a value for + // the partition key. For a composite primary key, you must provide values // for both the partition key and the sort key. // // * PutRequest - Perform a PutItem operation on the specified item. The - // item to be put is identified by an Item subelement: - // - // Item - A map of attributes and their values. Each entry in this map consists - // of an attribute name and an attribute value. Attribute values must not - // be null; string and binary type attributes must have lengths greater than - // zero; and set type attributes must not be empty. Requests that contain - // empty values will be rejected with a ValidationException exception. - // - // If you specify any attributes that are part of an index key, then the data - // types for those attributes must match those of the schema in the table's - // attribute definition. + // item to be put is identified by an Item subelement: Item - A map of attributes + // and their values. Each entry in this map consists of an attribute name + // and an attribute value. Attribute values must not be null; string and + // binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values are rejected + // with a ValidationException exception. If you specify any attributes that + // are part of an index key, then the data types for those attributes must + // match those of the schema in the table's attribute definition. // // RequestItems is a required field RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"` @@ -6340,11 +6427,9 @@ type BatchWriteItemInput struct { // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. - // - // Note that some operations, such as GetItem and BatchGetItem, do not access - // any indexes at all. In these cases, specifying INDEXES will only return - // ConsumedCapacity information for table(s). + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. @@ -6430,10 +6515,9 @@ type BatchWriteItemOutput struct { // bound for the estimate. The estimate includes the size of all the items // in the table, plus the size of all attributes projected into all of the // local secondary indexes on the table. Use this estimate to measure whether - // a local secondary index is approaching its size limit. - // - // The estimate is subject to change over time; therefore, do not rely on the - // precision or accuracy of the estimate. + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` // A map of tables and requests against those tables that were not processed. @@ -6445,24 +6529,19 @@ type BatchWriteItemOutput struct { // a list of operations to perform (DeleteRequest or PutRequest). // // * DeleteRequest - Perform a DeleteItem operation on the specified item. - // The item to be deleted is identified by a Key subelement: - // - // Key - A map of primary key attribute values that uniquely identify the item. - // Each entry in this map consists of an attribute name and an attribute - // value. + // The item to be deleted is identified by a Key subelement: Key - A map + // of primary key attribute values that uniquely identify the item. Each + // entry in this map consists of an attribute name and an attribute value. // // * PutRequest - Perform a PutItem operation on the specified item. The - // item to be put is identified by an Item subelement: - // - // Item - A map of attributes and their values. Each entry in this map consists - // of an attribute name and an attribute value. Attribute values must not - // be null; string and binary type attributes must have lengths greater than - // zero; and set type attributes must not be empty. Requests that contain - // empty values will be rejected with a ValidationException exception. - // - // If you specify any attributes that are part of an index key, then the data - // types for those attributes must match those of the schema in the table's - // attribute definition. + // item to be put is identified by an Item subelement: Item - A map of attributes + // and their values. Each entry in this map consists of an attribute name + // and an attribute value. Attribute values must not be null; string and + // binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values will + // be rejected with a ValidationException exception. If you specify any attributes + // that are part of an index key, then the data types for those attributes + // must match those of the schema in the table's attribute definition. // // If there are no unprocessed items remaining, the response contains an empty // UnprocessedItems map. @@ -6630,12 +6709,9 @@ func (s *Capacity) SetWriteCapacityUnits(v float64) *Capacity { // // * For a Query operation, Condition is used for specifying the KeyConditions // to use when querying a table or an index. For KeyConditions, only the -// following comparison operators are supported: -// -// EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN -// -// Condition is also used in a QueryFilter, which evaluates the query results -// and returns only the desired values. +// following comparison operators are supported: EQ | LE | LT | GE | GT | +// BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter, which evaluates +// the query results and returns only the desired values. // // * For a Scan operation, Condition is used in a ScanFilter, which evaluates // the scan results and returns only the desired values. @@ -6667,36 +6743,109 @@ type Condition struct { // The following are descriptions of each comparison operator. // // * EQ : Equal. EQ is supported for all data types, including lists and - // maps. + // maps. AttributeValueList can contain only one AttributeValue element of + // type String, Number, Binary, String Set, Number Set, or Binary Set. If + // an item contains an AttributeValue element of a different type than the + // one provided in the request, the value does not match. For example, {"S":"6"} + // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", + // "1"]}. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains - // an AttributeValue element of a different type than the one provided in + // * NE : Not equal. NE is supported for all data types, including lists + // and maps. AttributeValueList can contain only one AttributeValue of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue of a different type than the one provided in // the request, the value does not match. For example, {"S":"6"} does not // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // * NE : Not equal. NE is supported for all data types, including lists - // and maps. - // - // * AttributeValueList can contain only one AttributeValue of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains - // an AttributeValue of a different type than the one provided in the request, - // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. - // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. // - // * LE : Less than or equal. - // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue - // element of a different type than the one provided in the request, the value - // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not compare to {"NS":["6", "2", "1"]}. - // - // LT: Less than. - // - // AttributeValueListcan contain only one AttributeValueof type String, Number, or Binary (not a set type). If an item contains an - // AttributeValueelement of a different type than the one provided in the request, the value - // does not match. For example, {"S":"6"}does not equal {"N":"6"}. Also, {"N":"6"}does not compare to {"NS":["6", "2", "1"]} + // * LT : Less than. AttributeValueList can contain only one AttributeValue + // of type String, Number, or Binary (not a set type). If an item contains + // an AttributeValue element of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * GE : Greater than or equal. AttributeValueList can contain only one + // AttributeValue element of type String, Number, or Binary (not a set type). + // If an item contains an AttributeValue element of a different type than + // the one provided in the request, the value does not match. For example, + // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // {"NS":["6", "2", "1"]}. + // + // * GT : Greater than. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data + // types, including lists and maps. This operator tests for the existence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. + // This result is because the attribute "a" exists; its data type is not + // relevant to the NOT_NULL comparison operator. + // + // * NULL : The attribute does not exist. NULL is supported for all data + // types, including lists and maps. This operator tests for the nonexistence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NULL, the result is a Boolean false. + // This is because the attribute "a" exists; its data type is not relevant + // to the NULL comparison operator. + // + // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList + // can contain only one AttributeValue element of type String, Number, or + // Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // value in a set. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If the target + // attribute of the comparison is a String, then the operator checks for + // the absence of a substring match. If the target attribute of the comparison + // is Binary, then the operator checks for the absence of a subsequence of + // the target that matches the input. If the target attribute of the comparison + // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if + // it does not find an exact match with any member of the set. NOT_CONTAINS + // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be + // a list; however, "b" cannot be a set, a map, or a list. + // + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). + // The target attribute of the comparison must be of type String or Binary + // (not a Number or a set type). + // + // * IN : Checks for matching elements in a list. AttributeValueList can + // contain one or more AttributeValue elements of type String, Number, or + // Binary. These attributes are compared against an existing attribute of + // an item. If any elements of the input are equal to the item attribute, + // the expression evaluates to true. + // + // * BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. AttributeValueList must contain two AttributeValue + // elements of the same type, either String, Number, or Binary (not a set + // type). A target attribute matches if the target value is greater than, + // or equal to, the first element and less than, or equal to, the second + // element. If an item contains an AttributeValue element of a different + // type than the one provided in the request, the value does not match. For + // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does + // not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see Legacy + // Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. // // ComparisonOperator is a required field ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` @@ -6738,7 +6887,7 @@ func (s *Condition) SetComparisonOperator(v string) *Condition { } // Represents a request to perform a check that an item exists or to check the -// condition of specific attributes of the item.. +// condition of specific attributes of the item. type ConditionCheck struct { _ struct{} `type:"structure"` @@ -7155,7 +7304,7 @@ type CreateGlobalTableInput struct { // GlobalTableName is a required field GlobalTableName *string `min:"3" type:"string" required:"true"` - // The regions where the global table needs to be created. + // The Regions where the global table needs to be created. // // ReplicationGroup is a required field ReplicationGroup []*Replica `type:"list" required:"true"` @@ -7229,7 +7378,7 @@ func (s *CreateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescri type CreateReplicaAction struct { _ struct{} `type:"structure"` - // The region of the replica to be added. + // The Region of the replica to be added. // // RegionName is a required field RegionName *string `type:"string" required:"true"` @@ -7276,11 +7425,11 @@ type CreateTableInput struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. // - // * PROVISIONED - Sets the billing mode to PROVISIONED. We recommend using - // PROVISIONED for predictable workloads. + // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). // - // * PAY_PER_REQUEST - Sets the billing mode to PAY_PER_REQUEST. We recommend - // using PAY_PER_REQUEST for unpredictable workloads. + // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). BillingMode *string `type:"string" enum:"BillingMode"` // One or more global secondary indexes (the maximum is 20) to be created on @@ -7294,22 +7443,16 @@ type CreateTableInput struct { // * Projection - Specifies attributes that are copied (projected) from the // table into the index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. Each attribute - // specification is composed of: - // - // * ProjectionType - One of the following: - // - // KEYS_ONLY - Only the index and primary keys are projected into the index. - // - // INCLUDE - Only the specified table attributes are projected into the index. - // The list of projected attributes are in NonKeyAttributes. - // - // ALL - All of the table attributes are projected into the index. - // - // NonKeyAttributes - A list of one or more non-key attribute names that are - // projected into the secondary index. The total count of attributes provided - // in NonKeyAttributes, summed across all of the secondary indexes, must - // not exceed 100. If you project the same attribute into two different indexes, - // this counts as two distinct attributes when determining the total. + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. // // * ProvisionedThroughput - The provisioned throughput settings for the // global secondary index, consisting of read and write capacity units. @@ -7324,14 +7467,11 @@ type CreateTableInput struct { // // * AttributeName - The name of this key attribute. // - // * KeyType - The role that the key attribute will assume: - // - // HASH - partition key - // - // RANGE - sort key + // * KeyType - The role that the key attribute will assume: HASH - partition + // key RANGE - sort key // // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB' usage of an internal hash function + // "hash attribute" derives from the DynamoDB usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. // @@ -7346,7 +7486,7 @@ type CreateTableInput struct { // exactly two elements, in this order: The first element must have a KeyType // of HASH, and the second element must have a KeyType of RANGE. // - // For more information, see Specifying the Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) + // For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) // in the Amazon DynamoDB Developer Guide. // // KeySchema is a required field @@ -7368,22 +7508,16 @@ type CreateTableInput struct { // * Projection - Specifies attributes that are copied (projected) from the // table into the index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. Each attribute - // specification is composed of: - // - // * ProjectionType - One of the following: - // - // KEYS_ONLY - Only the index and primary keys are projected into the index. - // - // INCLUDE - Only the specified table attributes are projected into the index. - // The list of projected attributes are in NonKeyAttributes. - // - // ALL - All of the table attributes are projected into the index. - // - // NonKeyAttributes - A list of one or more non-key attribute names that are - // projected into the secondary index. The total count of attributes provided - // in NonKeyAttributes, summed across all of the secondary indexes, must - // not exceed 100. If you project the same attribute into two different indexes, - // this counts as two distinct attributes when determining the total. + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` // Represents the provisioned throughput settings for a specified table or index. @@ -7402,24 +7536,17 @@ type CreateTableInput struct { // The settings for DynamoDB Streams on the table. These settings consist of: // - // * StreamEnabled - Indicates whether Streams is to be enabled (true) or - // disabled (false). + // * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled + // (true) or disabled (false). // // * StreamViewType - When an item in the table is modified, StreamViewType // determines what information is written to the table's stream. Valid values - // for StreamViewType are: - // - // KEYS_ONLY - Only the key attributes of the modified item are written to the - // stream. - // - // NEW_IMAGE - The entire item, as it appears after it was modified, is written - // to the stream. - // - // OLD_IMAGE - The entire item, as it appeared before it was modified, is written - // to the stream. - // - // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are - // written to the stream. + // for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified + // item are written to the stream. NEW_IMAGE - The entire item, as it appears + // after it was modified, is written to the stream. OLD_IMAGE - The entire + // item, as it appeared before it was modified, is written to the stream. + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. StreamSpecification *StreamSpecification `type:"structure"` // The name of the table to create. @@ -7817,15 +7944,13 @@ type DeleteItemInput struct { // An expression can contain any of the following: // // * Functions: attribute_exists | attribute_not_exists | attribute_type - // | contains | begins_with | size - // - // These function names are case-sensitive. + // | contains | begins_with | size These function names are case-sensitive. // // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN // - // * Logical operators: AND | OR | NOT + // * Logical operators: AND | OR | NOT // - // For more information on condition expressions, see Specifying Conditions + // For more information about condition expressions, see Condition Expressions // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. ConditionExpression *string `type:"string"` @@ -7872,7 +7997,7 @@ type DeleteItemInput struct { // Tokens that begin with the : character are expression attribute values, which // are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes + // For more information on expression attribute names, see Specifying Item Attributes // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -7894,7 +8019,7 @@ type DeleteItemInput struct { // // ProductStatus IN (:avail, :back, :disc) // - // For more information on expression attribute values, see Specifying Conditions + // For more information on expression attribute values, see Condition Expressions // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeValues map[string]*AttributeValue `type:"map"` @@ -7915,11 +8040,9 @@ type DeleteItemInput struct { // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. - // - // Note that some operations, such as GetItem and BatchGetItem, do not access - // any indexes at all. In these cases, specifying INDEXES will only return - // ConsumedCapacity information for table(s). + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. @@ -8053,7 +8176,7 @@ type DeleteItemOutput struct { // includes the total provisioned throughput consumed, along with statistics // for the table and any indexes involved in the operation. ConsumedCapacity // is only returned if the ReturnConsumedCapacity parameter was specified. For - // more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // more information, see Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -8072,10 +8195,9 @@ type DeleteItemOutput struct { // bound for the estimate. The estimate includes the size of all the items // in the table, plus the size of all attributes projected into all of the // local secondary indexes on that table. Use this estimate to measure whether - // a local secondary index is approaching its size limit. - // - // The estimate is subject to change over time; therefore, do not rely on the - // precision or accuracy of the estimate. + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` } @@ -8111,7 +8233,7 @@ func (s *DeleteItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *D type DeleteReplicaAction struct { _ struct{} `type:"structure"` - // The region of the replica to be removed. + // The Region of the replica to be removed. // // RegionName is a required field RegionName *string `type:"string" required:"true"` @@ -8243,7 +8365,7 @@ func (s *DeleteTableOutput) SetTableDescription(v *TableDescription) *DeleteTabl type DescribeBackupInput struct { _ struct{} `type:"structure"` - // The ARN associated with the backup. + // The Amazon Resource Name (ARN) associated with the backup. // // BackupArn is a required field BackupArn *string `min:"37" type:"string" required:"true"` @@ -8520,7 +8642,7 @@ type DescribeGlobalTableSettingsOutput struct { // The name of the global table. GlobalTableName *string `min:"3" type:"string"` - // The region specific settings for the global table. + // The Region-specific settings for the global table. ReplicaSettings []*ReplicaSettingsDescription `type:"list"` } @@ -8566,20 +8688,20 @@ type DescribeLimitsOutput struct { _ struct{} `type:"structure"` // The maximum total read capacity units that your account allows you to provision - // across all of your tables in this region. + // across all of your tables in this Region. AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"` // The maximum total write capacity units that your account allows you to provision - // across all of your tables in this region. + // across all of your tables in this Region. AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"` // The maximum read capacity units that your account allows you to provision - // for a new table that you are creating in this region, including the read + // for a new table that you are creating in this Region, including the read // capacity units provisioned for its global secondary indexes (GSIs). TableMaxReadCapacityUnits *int64 `min:"1" type:"long"` // The maximum write capacity units that your account allows you to provision - // for a new table that you are creating in this region, including the write + // for a new table that you are creating in this Region, including the write // capacity units provisioned for its global secondary indexes (GSIs). TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"` } @@ -8786,7 +8908,7 @@ func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint { } // Represents a condition to be compared with an attribute value. This condition -// can be used with DeleteItem, PutItem or UpdateItem operations; if the comparison +// can be used with DeleteItem, PutItem, or UpdateItem operations; if the comparison // evaluates to true, the operation succeeds; if not, the operation fails. You // can use ExpectedAttributeValue in one of two different ways: // @@ -8836,36 +8958,105 @@ type ExpectedAttributeValue struct { // The following are descriptions of each comparison operator. // // * EQ : Equal. EQ is supported for all data types, including lists and - // maps. + // maps. AttributeValueList can contain only one AttributeValue element of + // type String, Number, Binary, String Set, Number Set, or Binary Set. If + // an item contains an AttributeValue element of a different type than the + // one provided in the request, the value does not match. For example, {"S":"6"} + // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", + // "1"]}. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains - // an AttributeValue element of a different type than the one provided in + // * NE : Not equal. NE is supported for all data types, including lists + // and maps. AttributeValueList can contain only one AttributeValue of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue of a different type than the one provided in // the request, the value does not match. For example, {"S":"6"} does not // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // * NE : Not equal. NE is supported for all data types, including lists - // and maps. - // - // * AttributeValueList can contain only one AttributeValue of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains - // an AttributeValue of a different type than the one provided in the request, - // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. - // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. // - // * LE : Less than or equal. - // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue - // element of a different type than the one provided in the request, the value - // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not compare to {"NS":["6", "2", "1"]}. - // - // LT: Less than. - // - // AttributeValueListcan contain only one AttributeValueof type String, Number, or Binary (not a set type). If an item contains an - // AttributeValueelement of a different type than the one provided in the request, the value - // does not match. For example, {"S":"6"}does not equal {"N":"6"}. Also, {"N":"6"}does not compare to {"NS":["6", "2", "1"]} + // * LT : Less than. AttributeValueList can contain only one AttributeValue + // of type String, Number, or Binary (not a set type). If an item contains + // an AttributeValue element of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * GE : Greater than or equal. AttributeValueList can contain only one + // AttributeValue element of type String, Number, or Binary (not a set type). + // If an item contains an AttributeValue element of a different type than + // the one provided in the request, the value does not match. For example, + // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // {"NS":["6", "2", "1"]}. + // + // * GT : Greater than. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data + // types, including lists and maps. This operator tests for the existence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. + // This result is because the attribute "a" exists; its data type is not + // relevant to the NOT_NULL comparison operator. + // + // * NULL : The attribute does not exist. NULL is supported for all data + // types, including lists and maps. This operator tests for the nonexistence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NULL, the result is a Boolean false. + // This is because the attribute "a" exists; its data type is not relevant + // to the NULL comparison operator. + // + // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList + // can contain only one AttributeValue element of type String, Number, or + // Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // value in a set. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If the target + // attribute of the comparison is a String, then the operator checks for + // the absence of a substring match. If the target attribute of the comparison + // is Binary, then the operator checks for the absence of a subsequence of + // the target that matches the input. If the target attribute of the comparison + // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if + // it does not find an exact match with any member of the set. NOT_CONTAINS + // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be + // a list; however, "b" cannot be a set, a map, or a list. + // + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). + // The target attribute of the comparison must be of type String or Binary + // (not a Number or a set type). + // + // * IN : Checks for matching elements in a list. AttributeValueList can + // contain one or more AttributeValue elements of type String, Number, or + // Binary. These attributes are compared against an existing attribute of + // an item. If any elements of the input are equal to the item attribute, + // the expression evaluates to true. + // + // * BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. AttributeValueList must contain two AttributeValue + // elements of the same type, either String, Number, or Binary (not a set + // type). A target attribute matches if the target value is greater than, + // or equal to, the first element and less than, or equal to, the second + // element. If an item contains an AttributeValue element of a different + // type than the one provided in the request, the value does not match. For + // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does + // not compare to {"NS":["6", "2", "1"]} ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` // Causes DynamoDB to evaluate the value before attempting a conditional operation: @@ -9063,7 +9254,7 @@ type GetItemInput struct { // Tokens that begin with the : character are expression attribute values, which // are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes + // For more information on expression attribute names, see Specifying Item Attributes // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -9083,11 +9274,11 @@ type GetItemInput struct { // These attributes can include scalars, sets, or elements of a JSON document. // The attributes in the expression must be separated by commas. // - // If no attribute names are specified, then all attributes will be returned. - // If any of the requested attributes are not found, they will not appear in - // the result. + // If no attribute names are specified, then all attributes are returned. If + // any of the requested attributes are not found, they do not appear in the + // result. // - // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ProjectionExpression *string `type:"string"` @@ -9096,11 +9287,9 @@ type GetItemInput struct { // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. - // - // Note that some operations, such as GetItem and BatchGetItem, do not access - // any indexes at all. In these cases, specifying INDEXES will only return - // ConsumedCapacity information for table(s). + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. @@ -9196,7 +9385,7 @@ type GetItemOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -9244,7 +9433,7 @@ type GlobalSecondaryIndex struct { // * RANGE - sort key // // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB' usage of an internal hash function + // "hash attribute" derives from DynamoDB's usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. // @@ -9361,6 +9550,11 @@ type GlobalSecondaryIndexDescription struct { // DynamoDB will do so. After all items have been processed, the backfilling // operation is complete and Backfilling is false. // + // You can delete an index that is being created during the Backfilling phase + // when IndexStatus is set to CREATING and Backfilling is true. You can't delete + // the index that is being created when IndexStatus is set to CREATING and Backfilling + // is false. + // // For indexes that were created during a CreateTable operation, the Backfilling // attribute does not appear in the DescribeTable output. Backfilling *bool `type:"boolean"` @@ -9399,7 +9593,7 @@ type GlobalSecondaryIndexDescription struct { // * RANGE - sort key // // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB' usage of an internal hash function + // "hash attribute" derives from DynamoDB's usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. // @@ -9502,7 +9696,7 @@ type GlobalSecondaryIndexInfo struct { // * RANGE - sort key // // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB' usage of an internal hash function + // "hash attribute" derives from DynamoDB's usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. // @@ -9648,7 +9842,7 @@ type GlobalTable struct { // The global table name. GlobalTableName *string `min:"3" type:"string"` - // The regions where the global table has replicas. + // The Regions where the global table has replicas. ReplicationGroup []*Replica `type:"list"` } @@ -9698,7 +9892,7 @@ type GlobalTableDescription struct { // * ACTIVE - The global table is ready for use. GlobalTableStatus *string `type:"string" enum:"GlobalTableStatus"` - // The regions where the global table has replicas. + // The Regions where the global table has replicas. ReplicationGroup []*ReplicaDescription `type:"list"` } @@ -9753,7 +9947,7 @@ type GlobalTableGlobalSecondaryIndexSettingsUpdate struct { // IndexName is a required field IndexName *string `min:"3" type:"string" required:"true"` - // AutoScaling settings for managing a global secondary index's write capacity + // Auto scaling settings for managing a global secondary index's write capacity // units. ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` @@ -9909,7 +10103,7 @@ type KeySchemaElement struct { // * RANGE - sort key // // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB' usage of an internal hash function + // "hash attribute" derives from DynamoDB's usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. // @@ -10110,10 +10304,10 @@ type ListBackupsInput struct { // * ALL - All types of on-demand backups (USER and SYSTEM). BackupType *string `type:"string" enum:"BackupTypeFilter"` - // LastEvaluatedBackupArn is the ARN of the backup last evaluated when the current - // page of results was returned, inclusive of the current page of results. This - // value may be specified as the ExclusiveStartBackupArn of a new ListBackups - // operation in order to fetch the next page of results. + // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last + // evaluated when the current page of results was returned, inclusive of the + // current page of results. This value may be specified as the ExclusiveStartBackupArn + // of a new ListBackups operation in order to fetch the next page of results. ExclusiveStartBackupArn *string `min:"37" type:"string"` // Maximum number of backups to return at once. @@ -10209,9 +10403,9 @@ type ListBackupsOutput struct { // If LastEvaluatedBackupArn is empty, then the last page of results has been // processed and there are no more results to be retrieved. // - // If LastEvaluatedBackupArn is not empty, this may or may not indicate there - // is more data to be returned. All results are guaranteed to have been returned - // if and only if no value for LastEvaluatedBackupArn is returned. + // If LastEvaluatedBackupArn is not empty, this may or may not indicate that + // there is more data to be returned. All results are guaranteed to have been + // returned if and only if no value for LastEvaluatedBackupArn is returned. LastEvaluatedBackupArn *string `min:"37" type:"string"` } @@ -10246,7 +10440,7 @@ type ListGlobalTablesInput struct { // The maximum number of table names to return. Limit *int64 `min:"1" type:"integer"` - // Lists the global tables in a specific region. + // Lists the global tables in a specific Region. RegionName *string `type:"string"` } @@ -10526,7 +10720,7 @@ type LocalSecondaryIndex struct { // * RANGE - sort key // // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB' usage of an internal hash function + // "hash attribute" derives from DynamoDB's usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. // @@ -10640,7 +10834,7 @@ type LocalSecondaryIndexDescription struct { // * RANGE - sort key // // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB' usage of an internal hash function + // "hash attribute" derives from DynamoDB's usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. // @@ -10717,7 +10911,7 @@ type LocalSecondaryIndexInfo struct { // * RANGE - sort key // // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB' usage of an internal hash function + // "hash attribute" derives from DynamoDB's usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. // @@ -10764,8 +10958,8 @@ func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIn type PointInTimeRecoveryDescription struct { _ struct{} `type:"structure"` - // Specifies the earliest point in time you can restore your table to. It You - // can restore your table to any point in time during the last 35 days. + // Specifies the earliest point in time you can restore your table to. You can + // restore your table to any point in time during the last 35 days. EarliestRestorableDateTime *time.Time `type:"timestamp"` // LatestRestorableDateTime is typically 5 minutes before the current time. @@ -10868,7 +11062,7 @@ type Projection struct { // * KEYS_ONLY - Only the index and primary keys are projected into the index. // // * INCLUDE - Only the specified table attributes are projected into the - // index. The list of projected attributes are in NonKeyAttributes. + // index. The list of projected attributes is in NonKeyAttributes. // // * ALL - All of the table attributes are projected into the index. ProjectionType *string `type:"string" enum:"ProjectionType"` @@ -11159,15 +11353,13 @@ type PutItemInput struct { // An expression can contain any of the following: // // * Functions: attribute_exists | attribute_not_exists | attribute_type - // | contains | begins_with | size - // - // These function names are case-sensitive. + // | contains | begins_with | size These function names are case-sensitive. // // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN // - // * Logical operators: AND | OR | NOT + // * Logical operators: AND | OR | NOT // - // For more information on condition expressions, see Specifying Conditions + // For more information on condition expressions, see Condition Expressions // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. ConditionExpression *string `type:"string"` @@ -11214,7 +11406,7 @@ type PutItemInput struct { // Tokens that begin with the : character are expression attribute values, which // are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes + // For more information on expression attribute names, see Specifying Item Attributes // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -11236,7 +11428,7 @@ type PutItemInput struct { // // ProductStatus IN (:avail, :back, :disc) // - // For more information on expression attribute values, see Specifying Conditions + // For more information on expression attribute values, see Condition Expressions // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeValues map[string]*AttributeValue `type:"map"` @@ -11254,7 +11446,7 @@ type PutItemInput struct { // types for those attributes must match those of the schema in the table's // attribute definition. // - // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey) // in the Amazon DynamoDB Developer Guide. // // Each element in the Item map is an AttributeValue object. @@ -11267,11 +11459,9 @@ type PutItemInput struct { // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. - // - // Note that some operations, such as GetItem and BatchGetItem, do not access - // any indexes at all. In these cases, specifying INDEXES will only return - // ConsumedCapacity information for table(s). + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. @@ -11407,7 +11597,7 @@ type PutItemOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -11426,10 +11616,9 @@ type PutItemOutput struct { // bound for the estimate. The estimate includes the size of all the items // in the table, plus the size of all attributes projected into all of the // local secondary indexes on that table. Use this estimate to measure whether - // a local secondary index is approaching its size limit. - // - // The estimate is subject to change over time; therefore, do not rely on the - // precision or accuracy of the estimate. + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` } @@ -11468,7 +11657,7 @@ type PutRequest struct { // A map of attribute name to attribute values, representing the primary key // of an item to be processed by PutItem. All of the table's primary key attributes // must be specified, and their data types must match those of the table's key - // schema. If any attributes are present in the item which are part of an index + // schema. If any attributes are present in the item that are part of an index // key schema for the table, their types must match the index key schema. // // Item is a required field @@ -11517,7 +11706,7 @@ type QueryInput struct { // The primary key of the first item that this operation will evaluate. Use // the value that was returned for LastEvaluatedKey in the previous operation. // - // The data type for ExclusiveStartKey must be String, Number or Binary. No + // The data type for ExclusiveStartKey must be String, Number, or Binary. No // set data types are allowed. ExclusiveStartKey map[string]*AttributeValue `type:"map"` @@ -11553,7 +11742,7 @@ type QueryInput struct { // Tokens that begin with the : character are expression attribute values, which // are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes + // For more information on expression attribute names, see Specifying Item Attributes // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -11599,7 +11788,7 @@ type QueryInput struct { // parameter, you must also provide TableName. IndexName *string `min:"3" type:"string"` - // The condition that specifies the key value(s) for items to be retrieved by + // The condition that specifies the key values for items to be retrieved by // the Query action. // // The condition must perform an equality test on a single partition key value. @@ -11612,34 +11801,35 @@ type QueryInput struct { // The partition key equality test is required, and must be specified in the // following format: // - // partitionKeyName=:partitionkeyval + // partitionKeyName = :partitionkeyval // // If you also want to provide a condition for the sort key, it must be combined // using AND with the condition for the sort key. Following is an example, using // the = comparison operator for the sort key: // - // partitionKeyName=:partitionkeyvalANDsortKeyName=:sortkeyval + // partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval // // Valid comparisons for the sort key condition are as follows: // - // * sortKeyName=:sortkeyval - true if the sort key value is equal to :sortkeyval. + // * sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval. // - // * sortKeyName<:sortkeyval - true if the sort key value is less than :sortkeyval. + // * sortKeyName < :sortkeyval - true if the sort key value is less than + // :sortkeyval. // - // * sortKeyName<=:sortkeyval - true if the sort key value is less than or - // equal to :sortkeyval. + // * sortKeyName <= :sortkeyval - true if the sort key value is less than + // or equal to :sortkeyval. // - // * sortKeyName>:sortkeyval - true if the sort key value is greater than + // * sortKeyName > :sortkeyval - true if the sort key value is greater than // :sortkeyval. // - // * sortKeyName>= :sortkeyval - true if the sort key value is greater than + // * sortKeyName >= :sortkeyval - true if the sort key value is greater than // or equal to :sortkeyval. // - // * sortKeyNameBETWEEN:sortkeyval1AND:sortkeyval2 - true if the sort key - // value is greater than or equal to :sortkeyval1, and less than or equal + // * sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort + // key value is greater than or equal to :sortkeyval1, and less than or equal // to :sortkeyval2. // - // * begins_with (sortKeyName, :sortkeyval) - true if the sort key value + // * begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value // begins with a particular operand. (You cannot use this function with a // sort key that is of type Number.) Note that the function name begins_with // is case-sensitive. @@ -11677,7 +11867,7 @@ type QueryInput struct { // items). If DynamoDB processes the number of items up to the limit while processing // the results, it stops the operation and returns the matching values up to // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, - // so that you can pick up where you left off. Also, if the processed data set + // so that you can pick up where you left off. Also, if the processed dataset // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation // and returns the matching values up to the limit, and a key in LastEvaluatedKey // to apply in a subsequent operation to continue the operation. For more information, @@ -11707,11 +11897,9 @@ type QueryInput struct { // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. - // - // Note that some operations, such as GetItem and BatchGetItem, do not access - // any indexes at all. In these cases, specifying INDEXES will only return - // ConsumedCapacity information for table(s). + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. @@ -11740,10 +11928,10 @@ type QueryInput struct { // // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified // table or index. If you query a local secondary index, then for each matching - // item in the index DynamoDB will fetch the entire item from the parent - // table. If the index is configured to project all item attributes, then - // all of the data can be obtained from the local secondary index, and no - // fetching is required. + // item in the index, DynamoDB fetches the entire item from the parent table. + // If the index is configured to project all item attributes, then all of + // the data can be obtained from the local secondary index, and no fetching + // is required. // // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves // all attributes that have been projected into the index. If the index is @@ -11755,18 +11943,15 @@ type QueryInput struct { // // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. // This return value is equivalent to specifying AttributesToGet without - // specifying any value for Select. - // - // If you query or scan a local secondary index and request only attributes - // that are projected into that index, the operation will read only the index - // and not the table. If any of the requested attributes are not projected - // into the local secondary index, DynamoDB will fetch each of these attributes - // from the parent table. This extra fetching incurs additional throughput - // cost and latency. - // - // If you query or scan a global secondary index, you can only request attributes - // that are projected into the index. Global secondary index queries cannot - // fetch attributes from the parent table. + // specifying any value for Select. If you query or scan a local secondary + // index and request only attributes that are projected into that index, + // the operation will read only the index and not the table. If any of the + // requested attributes are not projected into the local secondary index, + // DynamoDB fetches each of these attributes from the parent table. This + // extra fetching incurs additional throughput cost and latency. If you query + // or scan a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. // // If neither Select nor AttributesToGet are specified, DynamoDB defaults to // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when @@ -11950,8 +12135,8 @@ type QueryOutput struct { // The capacity units consumed by the Query operation. The data returned includes // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only - // returned if the ReturnConsumedCapacity parameter was specified For more information, - // see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -12035,7 +12220,7 @@ func (s *QueryOutput) SetScannedCount(v int64) *QueryOutput { type Replica struct { _ struct{} `type:"structure"` - // The region where the replica needs to be created. + // The Region where the replica needs to be created. RegionName *string `type:"string"` } @@ -12059,7 +12244,7 @@ func (s *Replica) SetRegionName(v string) *Replica { type ReplicaDescription struct { _ struct{} `type:"structure"` - // The name of the region. + // The name of the Region. RegionName *string `type:"string"` } @@ -12100,7 +12285,7 @@ type ReplicaGlobalSecondaryIndexSettingsDescription struct { // * ACTIVE - The global secondary index is ready for use. IndexStatus *string `type:"string" enum:"IndexStatus"` - // Autoscaling settings for a global secondary index replica's read capacity + // Auto scaling settings for a global secondary index replica's read capacity // units. ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` @@ -12108,7 +12293,7 @@ type ReplicaGlobalSecondaryIndexSettingsDescription struct { // DynamoDB returns a ThrottlingException. ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` - // AutoScaling settings for a global secondary index replica's write capacity + // Auto scaling settings for a global secondary index replica's write capacity // units. ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` @@ -12174,7 +12359,7 @@ type ReplicaGlobalSecondaryIndexSettingsUpdate struct { // IndexName is a required field IndexName *string `min:"3" type:"string" required:"true"` - // Autoscaling settings for managing a global secondary index replica's read + // Auto scaling settings for managing a global secondary index replica's read // capacity units. ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` @@ -12239,7 +12424,7 @@ func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityUn type ReplicaSettingsDescription struct { _ struct{} `type:"structure"` - // The region name of the replica. + // The Region name of the replica. // // RegionName is a required field RegionName *string `type:"string" required:"true"` @@ -12250,7 +12435,7 @@ type ReplicaSettingsDescription struct { // Replica global secondary index settings for the global table. ReplicaGlobalSecondaryIndexSettings []*ReplicaGlobalSecondaryIndexSettingsDescription `type:"list"` - // Autoscaling settings for a global table replica's read capacity units. + // Auto scaling settings for a global table replica's read capacity units. ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` // The maximum number of strongly consistent reads consumed per second before @@ -12259,7 +12444,7 @@ type ReplicaSettingsDescription struct { // in the Amazon DynamoDB Developer Guide. ReplicaProvisionedReadCapacityUnits *int64 `type:"long"` - // AutoScaling settings for a global table replica's write capacity units. + // Auto scaling settings for a global table replica's write capacity units. ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` // The maximum number of writes consumed per second before DynamoDB returns @@ -12268,15 +12453,15 @@ type ReplicaSettingsDescription struct { // in the Amazon DynamoDB Developer Guide. ReplicaProvisionedWriteCapacityUnits *int64 `type:"long"` - // The current state of the region: + // The current state of the Region: // - // * CREATING - The region is being created. + // * CREATING - The Region is being created. // - // * UPDATING - The region is being updated. + // * UPDATING - The Region is being updated. // - // * DELETING - The region is being deleted. + // * DELETING - The Region is being deleted. // - // * ACTIVE - The region is ready for use. + // * ACTIVE - The Region is ready for use. ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` } @@ -12338,11 +12523,11 @@ func (s *ReplicaSettingsDescription) SetReplicaStatus(v string) *ReplicaSettings return s } -// Represents the settings for a global table in a region that will be modified. +// Represents the settings for a global table in a Region that will be modified. type ReplicaSettingsUpdate struct { _ struct{} `type:"structure"` - // The region of the replica to be added. + // The Region of the replica to be added. // // RegionName is a required field RegionName *string `type:"string" required:"true"` @@ -12351,7 +12536,7 @@ type ReplicaSettingsUpdate struct { // will be modified. ReplicaGlobalSecondaryIndexSettingsUpdate []*ReplicaGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"` - // Autoscaling settings for managing a global table replica's read capacity + // Auto scaling settings for managing a global table replica's read capacity // units. ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` @@ -12503,10 +12688,10 @@ type RestoreSummary struct { // RestoreInProgress is a required field RestoreInProgress *bool `type:"boolean" required:"true"` - // ARN of the backup from which the table was restored. + // The Amazon Resource Name (ARN) of the backup from which the table was restored. SourceBackupArn *string `min:"37" type:"string"` - // ARN of the source table of the backup that is being restored. + // The ARN of the source table of the backup that is being restored. SourceTableArn *string `type:"string"` } @@ -12547,11 +12732,27 @@ func (s *RestoreSummary) SetSourceTableArn(v string) *RestoreSummary { type RestoreTableFromBackupInput struct { _ struct{} `type:"structure"` - // The ARN associated with the backup. + // The Amazon Resource Name (ARN) associated with the backup. // // BackupArn is a required field BackupArn *string `min:"37" type:"string" required:"true"` + // The billing mode of the restored table. + BillingModeOverride *string `type:"string" enum:"BillingMode"` + + // List of global secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"` + + // List of local secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + + // Provisioned throughput settings for the restored table. + ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` + // The name of the new table to which the backup must be restored. // // TargetTableName is a required field @@ -12583,6 +12784,31 @@ func (s *RestoreTableFromBackupInput) Validate() error { if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) } + if s.GlobalSecondaryIndexOverride != nil { + for i, v := range s.GlobalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexOverride != nil { + for i, v := range s.LocalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12596,6 +12822,30 @@ func (s *RestoreTableFromBackupInput) SetBackupArn(v string) *RestoreTableFromBa return s } +// SetBillingModeOverride sets the BillingModeOverride field's value. +func (s *RestoreTableFromBackupInput) SetBillingModeOverride(v string) *RestoreTableFromBackupInput { + s.BillingModeOverride = &v + return s +} + +// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value. +func (s *RestoreTableFromBackupInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableFromBackupInput { + s.GlobalSecondaryIndexOverride = v + return s +} + +// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value. +func (s *RestoreTableFromBackupInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableFromBackupInput { + s.LocalSecondaryIndexOverride = v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *RestoreTableFromBackupInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableFromBackupInput { + s.ProvisionedThroughputOverride = v + return s +} + // SetTargetTableName sets the TargetTableName field's value. func (s *RestoreTableFromBackupInput) SetTargetTableName(v string) *RestoreTableFromBackupInput { s.TargetTableName = &v @@ -12628,6 +12878,22 @@ func (s *RestoreTableFromBackupOutput) SetTableDescription(v *TableDescription) type RestoreTableToPointInTimeInput struct { _ struct{} `type:"structure"` + // The billing mode of the restored table. + BillingModeOverride *string `type:"string" enum:"BillingMode"` + + // List of global secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"` + + // List of local secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + + // Provisioned throughput settings for the restored table. + ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` + // Time in the past to restore the table to. RestoreDateTime *time.Time `type:"timestamp"` @@ -12671,6 +12937,31 @@ func (s *RestoreTableToPointInTimeInput) Validate() error { if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) } + if s.GlobalSecondaryIndexOverride != nil { + for i, v := range s.GlobalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexOverride != nil { + for i, v := range s.LocalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12678,6 +12969,30 @@ func (s *RestoreTableToPointInTimeInput) Validate() error { return nil } +// SetBillingModeOverride sets the BillingModeOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetBillingModeOverride(v string) *RestoreTableToPointInTimeInput { + s.BillingModeOverride = &v + return s +} + +// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableToPointInTimeInput { + s.GlobalSecondaryIndexOverride = v + return s +} + +// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableToPointInTimeInput { + s.LocalSecondaryIndexOverride = v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableToPointInTimeInput { + s.ProvisionedThroughputOverride = v + return s +} + // SetRestoreDateTime sets the RestoreDateTime field's value. func (s *RestoreTableToPointInTimeInput) SetRestoreDateTime(v time.Time) *RestoreTableToPointInTimeInput { s.RestoreDateTime = &v @@ -12729,28 +13044,21 @@ func (s *RestoreTableToPointInTimeOutput) SetTableDescription(v *TableDescriptio type SSEDescription struct { _ struct{} `type:"structure"` - // The KMS master key ARN used for the KMS encryption. + // The KMS customer master key (CMK) ARN used for the AWS KMS encryption. KMSMasterKeyArn *string `type:"string"` - // Server-side encryption type: - // - // * AES256 - Server-side encryption which uses the AES256 algorithm (not - // applicable). + // Server-side encryption type. The only supported value is: // - // * KMS - Server-side encryption which uses AWS Key Management Service. - // Key is stored in your account and is managed by AWS KMS (KMS charges apply). + // * KMS - Server-side encryption that uses AWS Key Management Service. The + // key is stored in your account and is managed by AWS KMS (AWS KMS charges + // apply). SSEType *string `type:"string" enum:"SSEType"` - // The current state of server-side encryption: - // - // * ENABLING - Server-side encryption is being enabled. + // Represents the current state of server-side encryption. The only supported + // values are: // // * ENABLED - Server-side encryption is enabled. // - // * DISABLING - Server-side encryption is being disabled. - // - // * DISABLED - Server-side encryption is disabled. - // // * UPDATING - Server-side encryption is being updated. Status *string `type:"string" enum:"SSEStatus"` } @@ -12787,25 +13095,23 @@ func (s *SSEDescription) SetStatus(v string) *SSEDescription { type SSESpecification struct { _ struct{} `type:"structure"` - // Indicates whether server-side encryption is enabled (true) or disabled (false) - // on the table. If enabled (true), server-side encryption type is set to KMS. - // If disabled (false) or not specified, server-side encryption is set to AWS - // owned CMK. + // Indicates whether server-side encryption is done using an AWS managed CMK + // or an AWS owned CMK. If enabled (true), server-side encryption type is set + // to KMS and an AWS managed CMK is used (AWS KMS charges apply). If disabled + // (false) or not specified, server-side encryption is set to AWS owned CMK. Enabled *bool `type:"boolean"` - // The KMS Master Key (CMK) which should be used for the KMS encryption. To - // specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or - // alias ARN. Note that you should only provide this parameter if the key is - // different from the default DynamoDB KMS Master Key alias/aws/dynamodb. + // The KMS customer master key (CMK) that should be used for the AWS KMS encryption. + // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, + // or alias ARN. Note that you should only provide this parameter if the key + // is different from the default DynamoDB customer master key alias/aws/dynamodb. KMSMasterKeyId *string `type:"string"` - // Server-side encryption type: + // Server-side encryption type. The only supported value is: // - // * AES256 - Server-side encryption which uses the AES256 algorithm (not - // applicable). - // - // * KMS - Server-side encryption which uses AWS Key Management Service. - // Key is stored in your account and is managed by AWS KMS (KMS charges apply). + // * KMS - Server-side encryption that uses AWS Key Management Service. The + // key is stored in your account and is managed by AWS KMS (AWS KMS charges + // apply). SSEType *string `type:"string" enum:"SSEType"` } @@ -12855,7 +13161,7 @@ type ScanInput struct { // // * If ConsistentRead is false, then the data returned from Scan might not // contain the results from other recently completed write operations (PutItem, - // UpdateItem or DeleteItem). + // UpdateItem, or DeleteItem). // // * If ConsistentRead is true, then all of the write operations that completed // before the Scan began are guaranteed to be contained in the Scan response. @@ -12910,7 +13216,7 @@ type ScanInput struct { // Tokens that begin with the : character are expression attribute values, which // are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes + // For more information on expression attribute names, see Specifying Item Attributes // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -12932,7 +13238,7 @@ type ScanInput struct { // // ProductStatus IN (:avail, :back, :disc) // - // For more information on expression attribute values, see Specifying Conditions + // For more information on expression attribute values, see Condition Expressions // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeValues map[string]*AttributeValue `type:"map"` @@ -12957,11 +13263,11 @@ type ScanInput struct { // items). If DynamoDB processes the number of items up to the limit while processing // the results, it stops the operation and returns the matching values up to // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, - // so that you can pick up where you left off. Also, if the processed data set + // so that you can pick up where you left off. Also, if the processed dataset // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation // and returns the matching values up to the limit, and a key in LastEvaluatedKey // to apply in a subsequent operation to continue the operation. For more information, - // see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) // in the Amazon DynamoDB Developer Guide. Limit *int64 `min:"1" type:"integer"` @@ -12973,7 +13279,7 @@ type ScanInput struct { // If any of the requested attributes are not found, they will not appear in // the result. // - // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ProjectionExpression *string `type:"string"` @@ -12982,11 +13288,9 @@ type ScanInput struct { // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. - // - // Note that some operations, such as GetItem and BatchGetItem, do not access - // any indexes at all. In these cases, specifying INDEXES will only return - // ConsumedCapacity information for table(s). + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. @@ -13023,10 +13327,10 @@ type ScanInput struct { // // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified // table or index. If you query a local secondary index, then for each matching - // item in the index DynamoDB will fetch the entire item from the parent - // table. If the index is configured to project all item attributes, then - // all of the data can be obtained from the local secondary index, and no - // fetching is required. + // item in the index, DynamoDB fetches the entire item from the parent table. + // If the index is configured to project all item attributes, then all of + // the data can be obtained from the local secondary index, and no fetching + // is required. // // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves // all attributes that have been projected into the index. If the index is @@ -13038,18 +13342,15 @@ type ScanInput struct { // // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. // This return value is equivalent to specifying AttributesToGet without - // specifying any value for Select. - // - // If you query or scan a local secondary index and request only attributes - // that are projected into that index, the operation will read only the index - // and not the table. If any of the requested attributes are not projected - // into the local secondary index, DynamoDB will fetch each of these attributes - // from the parent table. This extra fetching incurs additional throughput - // cost and latency. - // - // If you query or scan a global secondary index, you can only request attributes - // that are projected into the index. Global secondary index queries cannot - // fetch attributes from the parent table. + // specifying any value for Select. If you query or scan a local secondary + // index and request only attributes that are projected into that index, + // the operation reads only the index and not the table. If any of the requested + // attributes are not projected into the local secondary index, DynamoDB + // fetches each of these attributes from the parent table. This extra fetching + // incurs additional throughput cost and latency. If you query or scan a + // global secondary index, you can only request attributes that are projected + // into the index. Global secondary index queries cannot fetch attributes + // from the parent table. // // If neither Select nor AttributesToGet are specified, DynamoDB defaults to // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when @@ -13328,7 +13629,7 @@ type SourceTableDetails struct { // We recommend using PAY_PER_REQUEST for unpredictable workloads. BillingMode *string `type:"string" enum:"BillingMode"` - // Number of items in the table. Please note this is an approximate value. + // Number of items in the table. Note that this is an approximate value. ItemCount *int64 `type:"long"` // Schema of the table. @@ -13359,7 +13660,7 @@ type SourceTableDetails struct { // TableName is a required field TableName *string `min:"3" type:"string" required:"true"` - // Size of the table in bytes. Please note this is an approximate value. + // Size of the table in bytes. Note that this is an approximate value. TableSizeBytes *int64 `type:"long"` } @@ -13433,7 +13734,7 @@ type SourceTableFeatureDetails struct { _ struct{} `type:"structure"` // Represents the GSI properties for the table when the backup was created. - // It includes the IndexName, KeySchema, Projection and ProvisionedThroughput + // It includes the IndexName, KeySchema, Projection, and ProvisionedThroughput // for the GSIs on the table at the time of backup. GlobalSecondaryIndexes []*GlobalSecondaryIndexInfo `type:"list"` @@ -13567,9 +13868,14 @@ type TableDescription struct { // // * Backfilling - If true, then the index is currently in the backfilling // phase. Backfilling occurs only when a new global secondary index is added - // to the table; it is the process by which DynamoDB populates the new index + // to the table. It is the process by which DynamoDB populates the new index // with data from the table. (This attribute does not appear for indexes - // that were created during a CreateTable operation.) + // that were created during a CreateTable operation.) You can delete an index + // that is being created during the Backfilling phase when IndexStatus is + // set to CREATING and Backfilling is true. You can't delete the index that + // is being created when IndexStatus is set to CREATING and Backfilling is + // false. (This attribute does not appear for indexes that were created during + // a CreateTable operation.) // // * IndexName - The name of the global secondary index. // @@ -13577,15 +13883,9 @@ type TableDescription struct { // DynamoDB updates this value approximately every six hours. Recent changes // might not be reflected in this value. // - // * IndexStatus - The current status of the global secondary index: - // - // CREATING - The index is being created. - // - // UPDATING - The index is being updated. - // - // DELETING - The index is being deleted. - // - // ACTIVE - The index is ready for use. + // * IndexStatus - The current status of the global secondary index: CREATING + // - The index is being created. UPDATING - The index is being updated. DELETING + // - The index is being deleted. ACTIVE - The index is ready for use. // // * ItemCount - The number of items in the global secondary index. DynamoDB // updates this value approximately every six hours. Recent changes might @@ -13598,22 +13898,16 @@ type TableDescription struct { // * Projection - Specifies attributes that are copied (projected) from the // table into the index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. Each attribute - // specification is composed of: - // - // ProjectionType - One of the following: - // - // KEYS_ONLY - Only the index and primary keys are projected into the index. - // - // INCLUDE - Only the specified table attributes are projected into the index. - // The list of projected attributes are in NonKeyAttributes. - // - // ALL - All of the table attributes are projected into the index. - // - // NonKeyAttributes - A list of one or more non-key attribute names that are - // projected into the secondary index. The total count of attributes provided - // in NonKeyAttributes, summed across all of the secondary indexes, must - // not exceed 20. If you project the same attribute into two different indexes, - // this counts as two distinct attributes when determining the total. + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 20. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. // // * ProvisionedThroughput - The provisioned throughput settings for the // global secondary index, consisting of read and write capacity units, along @@ -13631,20 +13925,14 @@ type TableDescription struct { // // * AttributeName - The name of the attribute. // - // * KeyType - The role of the attribute: - // - // HASH - partition key - // - // RANGE - sort key - // - // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB' usage of an internal hash function - // to evenly distribute data items across partitions, based on their partition - // key values. - // - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. + // * KeyType - The role of the attribute: HASH - partition key RANGE - sort + // key The partition key of an item is also known as its hash attribute. + // The term "hash attribute" derives from DynamoDB's usage of an internal + // hash function to evenly distribute data items across partitions, based + // on their partition key values. The sort key of an item is also known as + // its range attribute. The term "range attribute" derives from the way DynamoDB + // stores items with the same partition key physically close together, in + // sorted order by the sort key value. // // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) // in the Amazon DynamoDB Developer Guide. @@ -13661,11 +13949,11 @@ type TableDescription struct { // However, the combination of the following three elements is guaranteed to // be unique: // - // * the AWS customer ID. + // * AWS customer ID // - // * the table name. + // * Table name // - // * the StreamLabel. + // * StreamLabel LatestStreamLabel *string `type:"string"` // Represents one or more local secondary indexes on the table. Each index is @@ -13683,22 +13971,16 @@ type TableDescription struct { // * Projection - Specifies attributes that are copied (projected) from the // table into the index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. Each attribute - // specification is composed of: - // - // ProjectionType - One of the following: - // - // KEYS_ONLY - Only the index and primary keys are projected into the index. - // - // INCLUDE - Only the specified table attributes are projected into the index. - // The list of projected attributes are in NonKeyAttributes. - // - // ALL - All of the table attributes are projected into the index. - // - // NonKeyAttributes - A list of one or more non-key attribute names that are - // projected into the secondary index. The total count of attributes provided - // in NonKeyAttributes, summed across all of the secondary indexes, must - // not exceed 20. If you project the same attribute into two different indexes, - // this counts as two distinct attributes when determining the total. + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 20. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. // // * IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB // updates this value approximately every six hours. Recent changes might @@ -13882,9 +14164,9 @@ func (s *TableDescription) SetTableStatus(v string) *TableDescription { type Tag struct { _ struct{} `type:"structure"` - // The key of the tag.Tag keys are case sensitive. Each DynamoDB table can only - // have up to one tag with the same key. If you try to add an existing tag (same - // key), the existing tag value will be updated to the new value. + // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can + // only have up to one tag with the same key. If you try to add an existing + // tag (same key), the existing tag value will be updated to the new value. // // Key is a required field Key *string `min:"1" type:"string" required:"true"` @@ -14020,10 +14302,10 @@ func (s TagResourceOutput) GoString() string { type TimeToLiveDescription struct { _ struct{} `type:"structure"` - // The name of the Time to Live attribute for items in the table. + // The name of the TTL attribute for items in the table. AttributeName *string `min:"1" type:"string"` - // The Time to Live status for the table. + // The TTL status for the table. TimeToLiveStatus *string `type:"string" enum:"TimeToLiveStatus"` } @@ -14049,19 +14331,19 @@ func (s *TimeToLiveDescription) SetTimeToLiveStatus(v string) *TimeToLiveDescrip return s } -// Represents the settings used to enable or disable Time to Live for the specified -// table. +// Represents the settings used to enable or disable Time to Live (TTL) for +// the specified table. type TimeToLiveSpecification struct { _ struct{} `type:"structure"` - // The name of the Time to Live attribute used to store the expiration time - // for items in the table. + // The name of the TTL attribute used to store the expiration time for items + // in the table. // // AttributeName is a required field AttributeName *string `min:"1" type:"string" required:"true"` - // Indicates whether Time To Live is to be enabled (true) or disabled (false) - // on the table. + // Indicates whether TTL is to be enabled (true) or disabled (false) on the + // table. // // Enabled is a required field Enabled *bool `type:"boolean" required:"true"` @@ -14162,7 +14444,7 @@ type TransactGetItemsInput struct { // is valid. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - // An ordered array of up to 10 TransactGetItem objects, each of which contains + // An ordered array of up to 25 TransactGetItem objects, each of which contains // a Get structure. // // TransactItems is a required field @@ -14226,7 +14508,7 @@ type TransactGetItemsOutput struct { // consumed by the TransactGetItems call in that table. ConsumedCapacity []*ConsumedCapacity `type:"list"` - // An ordered array of up to 10 ItemResponse objects, each of which corresponds + // An ordered array of up to 25 ItemResponse objects, each of which corresponds // to the TransactGetItem object in the same position in the TransactItems array. // Each ItemResponse object contains a Map of the name-value pairs that are // the projected attributes of the requested item. @@ -14350,19 +14632,20 @@ type TransactWriteItemsInput struct { // // Although multiple identical calls using the same client request token produce // the same result on the server (no side effects), the responses to the calls - // may not be the same. If the ReturnConsumedCapacity> parameter is set, then + // might not be the same. If the ReturnConsumedCapacity> parameter is set, then // the initial TransactWriteItems call returns the amount of write capacity - // units consumed in making the changes, and subsequent TransactWriteItems calls - // with the same client token return the amount of read capacity units consumed + // units consumed in making the changes. Subsequent TransactWriteItems calls + // with the same client token return the number of read capacity units consumed // in reading the item. // // A client request token is valid for 10 minutes after the first request that - // uses it completes. After 10 minutes, any request with the same client token - // is treated as a new request. Do not resubmit the same request with the same - // client token for more than 10 minutes or the result may not be idempotent. + // uses it is completed. After 10 minutes, any request with the same client + // token is treated as a new request. Do not resubmit the same request with + // the same client token for more than 10 minutes, or the result might not be + // idempotent. // // If you submit a request with the same client token but a change in other - // parameters within the 10 minute idempotency window, DynamoDB returns an IdempotentParameterMismatch + // parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch // exception. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -14371,11 +14654,9 @@ type TransactWriteItemsInput struct { // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. - // - // Note that some operations, such as GetItem and BatchGetItem, do not access - // any indexes at all. In these cases, specifying INDEXES will only return - // ConsumedCapacity information for table(s). + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. @@ -14389,10 +14670,10 @@ type TransactWriteItemsInput struct { // NONE (the default), no statistics are returned. ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` - // An ordered array of up to 10 TransactWriteItem objects, each of which contains + // An ordered array of up to 25 TransactWriteItem objects, each of which contains // a ConditionCheck, Put, Update, or Delete object. These can operate on items // in different tables, but the tables must reside in the same AWS account and - // region, and no two of them can operate on the same item. + // Region, and no two of them can operate on the same item. // // TransactItems is a required field TransactItems []*TransactWriteItem `min:"1" type:"list" required:"true"` @@ -14471,7 +14752,7 @@ type TransactWriteItemsOutput struct { // A list of tables that were processed by TransactWriteItems and, for each // table, information about any item collections that were affected by individual - // UpdateItem, PutItem or DeleteItem operations. + // UpdateItem, PutItem, or DeleteItem operations. ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` } @@ -14500,14 +14781,14 @@ func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*Item type UntagResourceInput struct { _ struct{} `type:"structure"` - // The Amazon DyanamoDB resource the tags will be removed from. This value is - // an Amazon Resource Name (ARN). + // The DynamoDB resource that the tags will be removed from. This value is an + // Amazon Resource Name (ARN). // // ResourceArn is a required field ResourceArn *string `min:"1" type:"string" required:"true"` // A list of tag keys. Existing tags of the resource whose keys are members - // of this list will be removed from the Amazon DynamoDB resource. + // of this list will be removed from the DynamoDB resource. // // TagKeys is a required field TagKeys []*string `type:"list" required:"true"` @@ -14837,7 +15118,7 @@ type UpdateGlobalTableInput struct { // GlobalTableName is a required field GlobalTableName *string `min:"3" type:"string" required:"true"` - // A list of regions that should be added or removed from the global table. + // A list of Regions that should be added or removed from the global table. // // ReplicaUpdates is a required field ReplicaUpdates []*ReplicaUpdate `type:"list" required:"true"` @@ -14922,6 +15203,12 @@ type UpdateGlobalTableSettingsInput struct { // The billing mode of the global table. If GlobalTableBillingMode is not specified, // the global table defaults to PROVISIONED capacity billing mode. + // + // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // + // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). GlobalTableBillingMode *string `type:"string" enum:"BillingMode"` // Represents the settings of a global secondary index for a global table that @@ -14933,7 +15220,7 @@ type UpdateGlobalTableSettingsInput struct { // GlobalTableName is a required field GlobalTableName *string `min:"3" type:"string" required:"true"` - // AutoScaling settings for managing provisioned write capacity for the global + // Auto scaling settings for managing provisioned write capacity for the global // table. GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` @@ -14941,7 +15228,7 @@ type UpdateGlobalTableSettingsInput struct { // a ThrottlingException. GlobalTableProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` - // Represents the settings for a global table in a region that will be modified. + // Represents the settings for a global table in a Region that will be modified. ReplicaSettingsUpdate []*ReplicaSettingsUpdate `min:"1" type:"list"` } @@ -15047,7 +15334,7 @@ type UpdateGlobalTableSettingsOutput struct { // The name of the global table. GlobalTableName *string `min:"3" type:"string"` - // The region specific settings for the global table. + // The Region-specific settings for the global table. ReplicaSettings []*ReplicaSettingsDescription `type:"list"` } @@ -15087,15 +15374,13 @@ type UpdateItemInput struct { // An expression can contain any of the following: // // * Functions: attribute_exists | attribute_not_exists | attribute_type - // | contains | begins_with | size - // - // These function names are case-sensitive. + // | contains | begins_with | size These function names are case-sensitive. // // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN // - // * Logical operators: AND | OR | NOT + // * Logical operators: AND | OR | NOT // - // For more information on condition expressions, see Specifying Conditions + // For more information about condition expressions, see Specifying Conditions // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. ConditionExpression *string `type:"string"` @@ -15130,7 +15415,7 @@ type UpdateItemInput struct { // The name of this attribute conflicts with a reserved word, so it cannot be // used directly in an expression. (For the complete list of reserved words, // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify // the following for ExpressionAttributeNames: // // * {"#P":"Percentile"} @@ -15142,8 +15427,8 @@ type UpdateItemInput struct { // Tokens that begin with the : character are expression attribute values, which // are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information about expression attribute names, see Specifying Item + // Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -15164,7 +15449,7 @@ type UpdateItemInput struct { // // ProductStatus IN (:avail, :back, :disc) // - // For more information on expression attribute values, see Specifying Conditions + // For more information on expression attribute values, see Condition Expressions // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeValues map[string]*AttributeValue `type:"map"` @@ -15185,11 +15470,9 @@ type UpdateItemInput struct { // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. - // - // Note that some operations, such as GetItem and BatchGetItem, do not access - // any indexes at all. In these cases, specifying INDEXES will only return - // ConsumedCapacity information for table(s). + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. @@ -15234,71 +15517,55 @@ type UpdateItemInput struct { TableName *string `min:"3" type:"string" required:"true"` // An expression that defines one or more attributes to be updated, the action - // to be performed on them, and new value(s) for them. + // to be performed on them, and new values for them. // // The following action values are available for UpdateExpression. // // * SET - Adds one or more attributes and values to an item. If any of these - // attribute already exist, they are replaced by the new values. You can + // attributes already exist, they are replaced by the new values. You can // also use SET to add or subtract from an attribute that is of type Number. - // For example: SET myNum = myNum + :val - // - // SET supports the following functions: - // - // if_not_exists (path, operand) - if the item does not contain an attribute + // For example: SET myNum = myNum + :val SET supports the following functions: + // if_not_exists (path, operand) - if the item does not contain an attribute // at the specified path, then if_not_exists evaluates to operand; otherwise, // it evaluates to path. You can use this function to avoid overwriting an - // attribute that may already be present in the item. - // - // list_append (operand, operand) - evaluates to a list with a new element added - // to it. You can append the new element to the start or the end of the list - // by reversing the order of the operands. - // - // These function names are case-sensitive. + // attribute that may already be present in the item. list_append (operand, + // operand) - evaluates to a list with a new element added to it. You can + // append the new element to the start or the end of the list by reversing + // the order of the operands. These function names are case-sensitive. // // * REMOVE - Removes one or more attributes from an item. // // * ADD - Adds the specified value to the item, if the attribute does not // already exist. If the attribute does exist, then the behavior of ADD depends - // on the data type of the attribute: - // - // If the existing attribute is a number, and if Value is also a number, then - // Value is mathematically added to the existing attribute. If Value is a - // negative number, then it is subtracted from the existing attribute. - // - // If you use ADD to increment or decrement a number value for an item that - // doesn't exist before the update, DynamoDB uses 0 as the initial value. - // - // Similarly, if you use ADD for an existing item to increment or decrement - // an attribute value that doesn't exist before the update, DynamoDB uses - // 0 as the initial value. For example, suppose that the item you want to - // update doesn't have an attribute named itemcount, but you decide to ADD - // the number 3 to this attribute anyway. DynamoDB will create the itemcount - // attribute, set its initial value to 0, and finally add 3 to it. The result - // will be a new itemcount attribute in the item, with a value of 3. - // - // If the existing data type is a set and if Value is also a set, then Value - // is added to the existing set. For example, if the attribute value is the - // set [1,2], and the ADD action specified [3], then the final attribute - // value is [1,2,3]. An error occurs if an ADD action is specified for a - // set attribute and the attribute type specified does not match the existing - // set type. - // - // Both sets must have the same primitive data type. For example, if the existing - // data type is a set of strings, the Value must also be a set of strings. - // - // The ADD action only supports Number and set data types. In addition, ADD - // can only be used on top-level attributes, not nested attributes. - // - // * DELETE - Deletes an element from a set. - // - // If a set of values is specified, then those values are subtracted from the - // old set. For example, if the attribute value was the set [a,b,c] and the - // DELETE action specifies [a,c], then the final attribute value is [b]. - // Specifying an empty set is an error. - // - // The DELETE action only supports set data types. In addition, DELETE can only - // be used on top-level attributes, not nested attributes. + // on the data type of the attribute: If the existing attribute is a number, + // and if Value is also a number, then Value is mathematically added to the + // existing attribute. If Value is a negative number, then it is subtracted + // from the existing attribute. If you use ADD to increment or decrement + // a number value for an item that doesn't exist before the update, DynamoDB + // uses 0 as the initial value. Similarly, if you use ADD for an existing + // item to increment or decrement an attribute value that doesn't exist before + // the update, DynamoDB uses 0 as the initial value. For example, suppose + // that the item you want to update doesn't have an attribute named itemcount, + // but you decide to ADD the number 3 to this attribute anyway. DynamoDB + // will create the itemcount attribute, set its initial value to 0, and finally + // add 3 to it. The result will be a new itemcount attribute in the item, + // with a value of 3. If the existing data type is a set and if Value is + // also a set, then Value is added to the existing set. For example, if the + // attribute value is the set [1,2], and the ADD action specified [3], then + // the final attribute value is [1,2,3]. An error occurs if an ADD action + // is specified for a set attribute and the attribute type specified does + // not match the existing set type. Both sets must have the same primitive + // data type. For example, if the existing data type is a set of strings, + // the Value must also be a set of strings. The ADD action only supports + // Number and set data types. In addition, ADD can only be used on top-level + // attributes, not nested attributes. + // + // * DELETE - Deletes an element from a set. If a set of values is specified, + // then those values are subtracted from the old set. For example, if the + // attribute value was the set [a,b,c] and the DELETE action specifies [a,c], + // then the final attribute value is [b]. Specifying an empty set is an error. + // The DELETE action only supports set data types. In addition, DELETE can + // only be used on top-level attributes, not nested attributes. // // You can have many actions in a single expression, such as the following: // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 @@ -15444,10 +15711,9 @@ type UpdateItemOutput struct { // bound for the estimate. The estimate includes the size of all the items // in the table, plus the size of all attributes projected into all of the // local secondary indexes on that table. Use this estimate to measure whether - // a local secondary index is approaching its size limit. - // - // The estimate is subject to change over time; therefore, do not rely on the - // precision or accuracy of the estimate. + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` } @@ -15494,11 +15760,11 @@ type UpdateTableInput struct { // values are estimated based on the consumed read and write capacity of your // table and global secondary indexes over the past 30 minutes. // - // * PROVISIONED - Sets the billing mode to PROVISIONED. We recommend using - // PROVISIONED for predictable workloads. + // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). // - // * PAY_PER_REQUEST - Sets the billing mode to PAY_PER_REQUEST. We recommend - // using PAY_PER_REQUEST for unpredictable workloads. + // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). BillingMode *string `type:"string" enum:"BillingMode"` // An array of one or more global secondary indexes for the table. For each @@ -15511,6 +15777,9 @@ type UpdateTableInput struct { // // * Delete - remove a global secondary index from the table. // + // You can create or delete only one global secondary index per UpdateTable + // operation. + // // For more information, see Managing Global Secondary Indexes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) // in the Amazon DynamoDB Developer Guide. GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` @@ -15523,9 +15792,9 @@ type UpdateTableInput struct { // Represents the DynamoDB Streams configuration for the table. // - // You will receive a ResourceInUseException if you attempt to enable a stream - // on a table that already has a stream, or if you attempt to disable a stream - // on a table which does not have a stream. + // You receive a ResourceInUseException if you try to enable a stream on a table + // that already has a stream, or if you try to disable a stream on a table that + // doesn't have a stream. StreamSpecification *StreamSpecification `type:"structure"` // The name of the table to be updated. @@ -15738,8 +16007,8 @@ func (s *UpdateTimeToLiveOutput) SetTimeToLiveSpecification(v *TimeToLiveSpecifi // Represents an operation to perform - either DeleteItem or PutItem. You can // only request one of these operations, not both, in a single WriteRequest. -// If you do need to perform both of these operations, you will need to provide -// two separate WriteRequest objects. +// If you do need to perform both of these operations, you need to provide two +// separate WriteRequest objects. type WriteRequest struct { _ struct{} `type:"structure"` @@ -15958,11 +16227,9 @@ const ( // // * INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary -// index that was accessed. -// -// Note that some operations, such as GetItem and BatchGetItem, do not access -// any indexes at all. In these cases, specifying INDEXES will only return -// ConsumedCapacity information for table(s). +// index that was accessed. Note that some operations, such as GetItem and +// BatchGetItem, do not access any indexes at all. In these cases, specifying +// INDEXES will only return ConsumedCapacity information for table(s). // // * TOTAL - The response includes only the aggregate ConsumedCapacity for // the operation. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go index 333e61bfcb1..c019e63dfc8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go @@ -5,7 +5,6 @@ import ( "hash/crc32" "io" "io/ioutil" - "math" "strconv" "time" @@ -15,15 +14,6 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -type retryer struct { - client.DefaultRetryer -} - -func (d retryer) RetryRules(r *request.Request) time.Duration { - delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 50 - return delay * time.Millisecond -} - func init() { initClient = func(c *client.Client) { if c.Config.Retryer == nil { @@ -43,10 +33,9 @@ func setCustomRetryer(c *client.Client) { maxRetries = 10 } - c.Retryer = retryer{ - DefaultRetryer: client.DefaultRetryer{ - NumMaxRetries: maxRetries, - }, + c.Retryer = client.DefaultRetryer{ + NumMaxRetries: maxRetries, + MinRetryDelay: 50 * time.Millisecond, } } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go index 5ebc5807284..013e9b1d2a3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go @@ -3,7 +3,7 @@ AttributeValue Marshaling and Unmarshaling Helpers Utility helpers to marshal and unmarshal AttributeValue to and from Go types can be found in the dynamodbattribute sub package. This package -provides has specialized functions for the common ways of working with +provides specialized functions for the common ways of working with AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and directly with *AttributeValue. This is helpful for marshaling Go types for API operations such as PutItem, and unmarshaling Query and Scan APIs' responses. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go index 5485db7e4a4..e1b7931960d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -124,8 +124,8 @@ const ( // "RequestLimitExceeded". // // Throughput exceeds the current throughput limit for your account. Please - // contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support) - // to request a limit increase. + // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request + // a limit increase. ErrCodeRequestLimitExceeded = "RequestLimitExceeded" // ErrCodeResourceInUseException for service response error code @@ -165,9 +165,9 @@ const ( // ErrCodeTransactionCanceledException for service response error code // "TransactionCanceledException". // - // The entire transaction request was rejected. + // The entire transaction request was canceled. // - // DynamoDB rejects a TransactWriteItems request under the following circumstances: + // DynamoDB cancels a TransactWriteItems request under the following circumstances: // // * A condition in one of the condition expressions is not met. // @@ -186,7 +186,7 @@ const ( // // * There is a user error, such as an invalid data format. // - // DynamoDB rejects a TransactGetItems request under the following circumstances: + // DynamoDB cancels a TransactGetItems request under the following circumstances: // // * There is an ongoing TransactGetItems operation that conflicts with a // concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. @@ -199,6 +199,57 @@ const ( // completed. // // * There is a user error, such as an invalid data format. + // + // If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons + // property. This property is not set for other languages. Transaction cancellation + // reasons are ordered in the order of requested items, if an item has no error + // it will have NONE code and Null message. + // + // Cancellation reason codes and possible error messages: + // + // * No Errors: Code: NONE Message: null + // + // * Conditional Check Failed: Code: ConditionalCheckFailed Message: The + // conditional request failed. + // + // * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded + // Message: Collection size exceeded. + // + // * Transaction Conflict: Code: TransactionConflict Message: Transaction + // is ongoing for the item. + // + // * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded + // Messages: The level of configured provisioned throughput for the table + // was exceeded. Consider increasing your provisioning level with the UpdateTable + // API. This Message is received when provisioned throughput is exceeded + // is on a provisioned DynamoDB table. The level of configured provisioned + // throughput for one or more global secondary indexes of the table was exceeded. + // Consider increasing your provisioning level for the under-provisioned + // global secondary indexes with the UpdateTable API. This message is returned + // when provisioned throughput is exceeded is on a provisioned GSI. + // + // * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds + // the current capacity of your table or index. DynamoDB is automatically + // scaling your table or index so please try again shortly. If exceptions + // persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. + // This message is returned when writes get throttled on an On-Demand table + // as DynamoDB is automatically scaling the table. Throughput exceeds the + // current capacity for one or more global secondary indexes. DynamoDB is + // automatically scaling your index so please try again shortly. This message + // is returned when when writes get throttled on an On-Demand GSI as DynamoDB + // is automatically scaling the GSI. + // + // * Validation Error: Code: ValidationError Messages: One or more parameter + // values were invalid. The update expression attempted to update the secondary + // index key beyond allowed size limits. The update expression attempted + // to update the secondary index key to unsupported type. An operand in the + // update expression has an incorrect data type. Item size to update has + // exceeded the maximum allowed size. Number overflow. Attempting to store + // a number with magnitude larger than supported range. Type mismatch for + // attribute to update. Nesting Levels have exceeded supported limits. The + // document path provided in the update expression is invalid for update. + // The provided expression refers to an attribute that does not exist in + // the item. ErrCodeTransactionCanceledException = "TransactionCanceledException" // ErrCodeTransactionConflictException for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go index edcb5b8598e..0400da631df 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -48,11 +48,11 @@ const ( // svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DynamoDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DynamoDB { svc := &DynamoDB{ Client: client.New( cfg, @@ -61,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-08-10", JSONVersion: "1.0", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 43da1f79af5..29e19cb10ab 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -290,8 +290,8 @@ func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectio // of the peer VPC. Use DescribeVpcPeeringConnections to view your outstanding // VPC peering connection requests. // -// For an inter-region VPC peering connection request, you must accept the VPC -// peering connection in the region of the accepter VPC. +// For an inter-Region VPC peering connection request, you must accept the VPC +// peering connection in the Region of the accepter VPC. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -474,7 +474,7 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // // An Elastic IP address is for use either in the EC2-Classic platform or in // a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic -// per region and 5 Elastic IP addresses for EC2-VPC per region. +// per Region and 5 Elastic IP addresses for EC2-VPC per Region. // // For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -778,7 +778,6 @@ func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInp output = &AssignPrivateIpAddressesOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -1062,7 +1061,7 @@ func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req // its DHCP lease. You can explicitly renew the lease using the operating system // on the instance. // -// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1218,7 +1217,7 @@ func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req * // an association ID, which you need in order to disassociate the route table // from the subnet later. A route table can be associated with multiple subnets. // -// For more information, see Route Tables (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1449,7 +1448,7 @@ func (c *EC2) AssociateVpcCidrBlockRequest(input *AssociateVpcCidrBlockInput) (r // IPv6 CIDR block size is fixed at /56. // // For more information about associating CIDR blocks with your VPC and applicable -// restrictions, see VPC and Subnet Sizing (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html#VPC_Sizing) +// restrictions, see VPC and Subnet Sizing (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#VPC_Sizing) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1612,7 +1611,7 @@ func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (r // // Attaches an internet gateway to a VPC, enabling connectivity between the // internet and the VPC. For more information about your VPC and internet gateway, -// see the Amazon Virtual Private Cloud User Guide (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +// see the Amazon Virtual Private Cloud User Guide (https://docs.aws.amazon.com/vpc/latest/userguide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1763,15 +1762,12 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques // Attaches an EBS volume to a running or stopped instance and exposes it to // the instance with the specified device name. // -// Encrypted EBS volumes may only be attached to instances that support Amazon -// EBS encryption. For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// Encrypted EBS volumes must be attached to instances that support Amazon EBS +// encryption. For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For a list of supported device names, see Attaching an EBS Volume to an Instance -// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html). -// Any device names that aren't reserved for instance store volumes can be used -// for EBS volumes. For more information, see Amazon EC2 Instance Store (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) -// in the Amazon Elastic Compute Cloud User Guide. +// After you attach an EBS volume, you must make it available. For more information, +// see Making an EBS Volume Available For Use (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html). // // If a volume has an AWS Marketplace product code: // @@ -1785,8 +1781,7 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques // the product. For example, you can't detach a volume from a Windows instance // and attach it to a Linux instance. // -// For more information about EBS volumes, see Attaching Amazon EBS Volumes -// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) +// For more information, see Attaching Amazon EBS Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2020,9 +2015,9 @@ func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupE // [VPC only] Adds the specified egress rules to a security group for use with // a VPC. // -// An outbound rule permits instances to send traffic to the specified destination -// IPv4 or IPv6 CIDR address ranges, or to the specified destination security -// groups for the same VPC. +// An outbound rule permits instances to send traffic to the specified IPv4 +// or IPv6 CIDR address ranges, or to the instances associated with the specified +// destination security groups. // // You specify a protocol for each rule (for example, TCP). For the TCP and // UDP protocols, you must also specify the destination port or port range. @@ -2110,9 +2105,9 @@ func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroup // // Adds the specified ingress rules to a security group. // -// An inbound rule permits instances to receive traffic from the specified destination -// IPv4 or IPv6 CIDR address ranges, or from the specified destination security -// groups. +// An inbound rule permits instances to receive traffic from the specified IPv4 +// or IPv6 CIDR address ranges, or from the instances associated with the specified +// destination security groups. // // You specify a protocol for each rule (for example, TCP). For TCP and UDP, // you must also specify the destination port or port range. For ICMP/ICMPv6, @@ -2978,7 +2973,7 @@ func (c *EC2) CopyFpgaImageRequest(input *CopyFpgaImageInput) (req *request.Requ // CopyFpgaImage API operation for Amazon Elastic Compute Cloud. // -// Copies the specified Amazon FPGA Image (AFI) to the current region. +// Copies the specified Amazon FPGA Image (AFI) to the current Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3052,8 +3047,8 @@ func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, out // CopyImage API operation for Amazon Elastic Compute Cloud. // -// Initiates the copy of an AMI from the specified source region to the current -// region. You specify the destination region by using its endpoint when making +// Initiates the copy of an AMI from the specified source Region to the current +// Region. You specify the destination Region by using its endpoint when making // the request. // // Copies of encrypted backing snapshots for the AMI are encrypted. Copies of @@ -3140,14 +3135,13 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques // Copies a point-in-time snapshot of an EBS volume and stores it in Amazon // S3. You can copy the snapshot within the same Region or from one Region to // another. You can use the snapshot to create EBS volumes or Amazon Machine -// Images (AMIs). The snapshot is copied to the regional endpoint that you send -// the HTTP request to. +// Images (AMIs). // // Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted -// snapshots remain unencrypted, unless the Encrypted flag is specified during -// the snapshot copy operation. By default, encrypted snapshot copies use the -// default AWS Key Management Service (AWS KMS) customer master key (CMK); however, -// you can specify a non-default CMK with the KmsKeyId parameter. +// snapshots remain unencrypted, unless you enable encryption for the snapshot +// copy operation. By default, encrypted snapshot copies use the default AWS +// Key Management Service (AWS KMS) customer master key (CMK); however, you +// can specify a different CMK. // // To copy an encrypted snapshot that has been shared from another account, // you must have permissions for the CMK used to encrypt the snapshot. @@ -3405,8 +3399,8 @@ func (c *EC2) CreateClientVpnRouteRequest(input *CreateClientVpnRouteInput) (req // // Adds a route to a network to a Client VPN endpoint. Each Client VPN endpoint // has a route table that describes the available destination network routes. -// Each route in the route table specifies the path for traffic to specific resources -// or networks. +// Each route in the route table specifies the path for traffic to specific +// resources or networks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3484,7 +3478,7 @@ func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (r // gateway is the appliance at your end of the VPN connection. (The device on // the AWS side of the VPN connection is the virtual private gateway.) You must // provide the Internet-routable IP address of the customer gateway's external -// interface. The IP address must be static and may be behind a device performing +// interface. The IP address must be static and can be behind a device performing // network address translation (NAT). // // For devices that use Border Gateway Protocol (BGP), you can also provide @@ -3493,8 +3487,8 @@ func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (r // a private ASN (in the 64512 - 65534 range). // // Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with -// the exception of 7224, which is reserved in the us-east-1 region, and 9059, -// which is reserved in the eu-west-1 region. +// the exception of 7224, which is reserved in the us-east-1 Region, and 9059, +// which is reserved in the eu-west-1 Region. // // For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) // in the AWS Site-to-Site VPN User Guide. @@ -3580,7 +3574,7 @@ func (c *EC2) CreateDefaultSubnetRequest(input *CreateDefaultSubnetInput) (req * // Creates a default subnet with a size /20 IPv4 CIDR block in the specified // Availability Zone in your default VPC. You can have only one default subnet // per Availability Zone. For more information, see Creating a Default Subnet -// (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html#create-default-subnet) +// (https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html#create-default-subnet) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3657,7 +3651,7 @@ func (c *EC2) CreateDefaultVpcRequest(input *CreateDefaultVpcInput) (req *reques // // Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet // in each Availability Zone. For more information about the components of a -// default VPC, see Default VPC and Default Subnets (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html) +// default VPC, see Default VPC and Default Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html) // in the Amazon Virtual Private Cloud User Guide. You cannot specify the components // of the default VPC yourself. // @@ -3750,12 +3744,12 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // * domain-name-servers - The IP addresses of up to four domain name servers, // or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. // If specifying more than one domain name server, specify the IP addresses -// in a single parameter, separated by commas. ITo have your instance to -// receive a custom DNS hostname as specified in domain-name, you must set -// domain-name-servers to a custom DNS server. +// in a single parameter, separated by commas. To have your instance receive +// a custom DNS hostname as specified in domain-name, you must set domain-name-servers +// to a custom DNS server. // // * domain-name - If you're using AmazonProvidedDNS in us-east-1, specify -// ec2.internal. If you're using AmazonProvidedDNS in another region, specify +// ec2.internal. If you're using AmazonProvidedDNS in another Region, specify // region.compute.internal (for example, ap-northeast-1.compute.internal). // Otherwise, specify a domain name (for example, MyCompany.com). This value // is used to complete unqualified DNS hostnames. Important: Some Linux operating @@ -3778,7 +3772,7 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // only a DNS server that we provide (AmazonProvidedDNS). If you create a set // of options, and if your VPC has an internet gateway, make sure to set the // domain-name-servers option either to AmazonProvidedDNS or to a domain name -// server of your choice. For more information, see DHCP Options Sets (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// server of your choice. For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4015,7 +4009,7 @@ func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Re // // Flow log data for a monitored network interface is recorded as flow log records, // which are log events consisting of fields that describe the traffic flow. -// For more information, see Flow Log Records (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html#flow-log-records) +// For more information, see Flow Log Records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records) // in the Amazon Virtual Private Cloud User Guide. // // When publishing to CloudWatch Logs, flow log records are published to a log @@ -4024,7 +4018,7 @@ func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Re // interfaces are published to a single log file object that is stored in the // specified bucket. // -// For more information, see VPC Flow Logs (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html) +// For more information, see VPC Flow Logs (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4346,7 +4340,7 @@ func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (r // gateway, you attach it to a VPC using AttachInternetGateway. // // For more information about your VPC and internet gateway, see the Amazon -// Virtual Private Cloud User Guide (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +// Virtual Private Cloud User Guide (https://docs.aws.amazon.com/vpc/latest/userguide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4425,11 +4419,11 @@ func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Requ // private key is returned as an unencrypted PEM encoded PKCS#1 private key. // If a key with the specified name already exists, Amazon EC2 returns an error. // -// You can have up to five thousand key pairs per region. +// You can have up to five thousand key pairs per Region. // -// The key pair returned to you is available only in the region in which you +// The key pair returned to you is available only in the Region in which you // create it. If you prefer, you can create your own key pair using a third-party -// tool and upload it to any region using ImportKeyPair. +// tool and upload it to any Region using ImportKeyPair. // // For more information, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -4665,7 +4659,7 @@ func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *reques // the IP address range of the subnet. Internet-bound traffic from a private // subnet can be routed to the NAT gateway, therefore enabling instances in // the private subnet to connect to the internet. For more information, see -// NAT Gateways (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html) +// NAT Gateways (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4743,7 +4737,7 @@ func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *reques // Creates a network ACL in a VPC. Network ACLs provide an optional layer of // security (in addition to security groups) for the instances in your VPC. // -// For more information, see Network ACLs (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// For more information, see Network ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4834,7 +4828,7 @@ func (c *EC2) CreateNetworkAclEntryRequest(input *CreateNetworkAclEntryInput) (r // After you add an entry, you can't modify it; you must either replace it, // or create an entry and delete the old one. // -// For more information about network ACLs, see Network ACLs (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// For more information about network ACLs, see Network ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5265,7 +5259,7 @@ func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, // route in the list covers a smaller number of IP addresses and is therefore // more specific, so we use that route to determine where to target the traffic. // -// For more information about route tables, see Route Tables (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// For more information about route tables, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5343,7 +5337,7 @@ func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *reques // Creates a route table for the specified VPC. After you create a route table, // you can add routes and associate the table with a subnet. // -// For more information, see Route Tables (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5578,6 +5572,83 @@ func (c *EC2) CreateSnapshotWithContext(ctx aws.Context, input *CreateSnapshotIn return out, req.Send() } +const opCreateSnapshots = "CreateSnapshots" + +// CreateSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshots operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSnapshots for more information on using the CreateSnapshots +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSnapshotsRequest method. +// req, resp := client.CreateSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshots +func (c *EC2) CreateSnapshotsRequest(input *CreateSnapshotsInput) (req *request.Request, output *CreateSnapshotsOutput) { + op := &request.Operation{ + Name: opCreateSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotsInput{} + } + + output = &CreateSnapshotsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSnapshots API operation for Amazon Elastic Compute Cloud. +// +// Creates crash-consistent snapshots of multiple EBS volumes and stores the +// data in S3. Volumes are chosen by specifying an instance. Any attached volumes +// will produce one snapshot each that is crash-consistent across the instance. +// Boot volumes can be excluded by changing the parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateSnapshots for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshots +func (c *EC2) CreateSnapshots(input *CreateSnapshotsInput) (*CreateSnapshotsOutput, error) { + req, out := c.CreateSnapshotsRequest(input) + return out, req.Send() +} + +// CreateSnapshotsWithContext is the same as CreateSnapshots with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSnapshots for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSnapshotsWithContext(ctx aws.Context, input *CreateSnapshotsInput, opts ...request.Option) (*CreateSnapshotsOutput, error) { + req, out := c.CreateSnapshotsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" // CreateSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the @@ -5724,7 +5795,7 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // It's therefore possible to have a subnet with no running instances (they're // all stopped), but no remaining IP addresses available. // -// For more information about subnets, see Your VPC and Subnets (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// For more information about subnets, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5838,6 +5909,336 @@ func (c *EC2) CreateTagsWithContext(ctx aws.Context, input *CreateTagsInput, opt return out, req.Send() } +const opCreateTrafficMirrorFilter = "CreateTrafficMirrorFilter" + +// CreateTrafficMirrorFilterRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficMirrorFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTrafficMirrorFilter for more information on using the CreateTrafficMirrorFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTrafficMirrorFilterRequest method. +// req, resp := client.CreateTrafficMirrorFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorFilter +func (c *EC2) CreateTrafficMirrorFilterRequest(input *CreateTrafficMirrorFilterInput) (req *request.Request, output *CreateTrafficMirrorFilterOutput) { + op := &request.Operation{ + Name: opCreateTrafficMirrorFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrafficMirrorFilterInput{} + } + + output = &CreateTrafficMirrorFilterOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTrafficMirrorFilter API operation for Amazon Elastic Compute Cloud. +// +// Creates a Traffic Mirror filter. +// +// A Traffic Mirror filter is a set of rules that defines the traffic to mirror. +// +// By default, no traffic is mirrored. To mirror traffic, use CreateTrafficMirrorFilterRule +// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTrafficMirrorFilterRule.htm) +// to add Traffic Mirror rules to the filter. The rules you add define what +// traffic gets mirrored. You can also use ModifyTrafficMirrorFilterNetworkServices +// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyTrafficMirrorFilterNetworkServices.html) +// to mirror supported network services. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTrafficMirrorFilter for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorFilter +func (c *EC2) CreateTrafficMirrorFilter(input *CreateTrafficMirrorFilterInput) (*CreateTrafficMirrorFilterOutput, error) { + req, out := c.CreateTrafficMirrorFilterRequest(input) + return out, req.Send() +} + +// CreateTrafficMirrorFilterWithContext is the same as CreateTrafficMirrorFilter with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTrafficMirrorFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTrafficMirrorFilterWithContext(ctx aws.Context, input *CreateTrafficMirrorFilterInput, opts ...request.Option) (*CreateTrafficMirrorFilterOutput, error) { + req, out := c.CreateTrafficMirrorFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTrafficMirrorFilterRule = "CreateTrafficMirrorFilterRule" + +// CreateTrafficMirrorFilterRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficMirrorFilterRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTrafficMirrorFilterRule for more information on using the CreateTrafficMirrorFilterRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTrafficMirrorFilterRuleRequest method. +// req, resp := client.CreateTrafficMirrorFilterRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorFilterRule +func (c *EC2) CreateTrafficMirrorFilterRuleRequest(input *CreateTrafficMirrorFilterRuleInput) (req *request.Request, output *CreateTrafficMirrorFilterRuleOutput) { + op := &request.Operation{ + Name: opCreateTrafficMirrorFilterRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrafficMirrorFilterRuleInput{} + } + + output = &CreateTrafficMirrorFilterRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTrafficMirrorFilterRule API operation for Amazon Elastic Compute Cloud. +// +// Creates a Traffic Mirror filter rule. +// +// A Traffic Mirror rule defines the Traffic Mirror source traffic to mirror. +// +// You need the Traffic Mirror filter ID when you create the rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTrafficMirrorFilterRule for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorFilterRule +func (c *EC2) CreateTrafficMirrorFilterRule(input *CreateTrafficMirrorFilterRuleInput) (*CreateTrafficMirrorFilterRuleOutput, error) { + req, out := c.CreateTrafficMirrorFilterRuleRequest(input) + return out, req.Send() +} + +// CreateTrafficMirrorFilterRuleWithContext is the same as CreateTrafficMirrorFilterRule with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTrafficMirrorFilterRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTrafficMirrorFilterRuleWithContext(ctx aws.Context, input *CreateTrafficMirrorFilterRuleInput, opts ...request.Option) (*CreateTrafficMirrorFilterRuleOutput, error) { + req, out := c.CreateTrafficMirrorFilterRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTrafficMirrorSession = "CreateTrafficMirrorSession" + +// CreateTrafficMirrorSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficMirrorSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTrafficMirrorSession for more information on using the CreateTrafficMirrorSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTrafficMirrorSessionRequest method. +// req, resp := client.CreateTrafficMirrorSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorSession +func (c *EC2) CreateTrafficMirrorSessionRequest(input *CreateTrafficMirrorSessionInput) (req *request.Request, output *CreateTrafficMirrorSessionOutput) { + op := &request.Operation{ + Name: opCreateTrafficMirrorSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrafficMirrorSessionInput{} + } + + output = &CreateTrafficMirrorSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTrafficMirrorSession API operation for Amazon Elastic Compute Cloud. +// +// Creates a Traffic Mirror session. +// +// A Traffic Mirror session actively copies packets from a Traffic Mirror source +// to a Traffic Mirror target. Create a filter, and then assign it to the session +// to define a subset of the traffic to mirror, for example all TCP traffic. +// +// The Traffic Mirror source and the Traffic Mirror target (monitoring appliances) +// can be in the same VPC, or in a different VPC connected via VPC peering or +// a transit gateway. +// +// By default, no traffic is mirrored. Use CreateTrafficMirrorFilter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTrafficMirrorFilter.htm) +// to create filter rules that specify the traffic to mirror. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTrafficMirrorSession for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorSession +func (c *EC2) CreateTrafficMirrorSession(input *CreateTrafficMirrorSessionInput) (*CreateTrafficMirrorSessionOutput, error) { + req, out := c.CreateTrafficMirrorSessionRequest(input) + return out, req.Send() +} + +// CreateTrafficMirrorSessionWithContext is the same as CreateTrafficMirrorSession with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTrafficMirrorSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTrafficMirrorSessionWithContext(ctx aws.Context, input *CreateTrafficMirrorSessionInput, opts ...request.Option) (*CreateTrafficMirrorSessionOutput, error) { + req, out := c.CreateTrafficMirrorSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTrafficMirrorTarget = "CreateTrafficMirrorTarget" + +// CreateTrafficMirrorTargetRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficMirrorTarget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTrafficMirrorTarget for more information on using the CreateTrafficMirrorTarget +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTrafficMirrorTargetRequest method. +// req, resp := client.CreateTrafficMirrorTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorTarget +func (c *EC2) CreateTrafficMirrorTargetRequest(input *CreateTrafficMirrorTargetInput) (req *request.Request, output *CreateTrafficMirrorTargetOutput) { + op := &request.Operation{ + Name: opCreateTrafficMirrorTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrafficMirrorTargetInput{} + } + + output = &CreateTrafficMirrorTargetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTrafficMirrorTarget API operation for Amazon Elastic Compute Cloud. +// +// Creates a target for your Traffic Mirror session. +// +// A Traffic Mirror target is the destination for mirrored traffic. The Traffic +// Mirror source and the Traffic Mirror target (monitoring appliances) can be +// in the same VPC, or in different VPCs connected via VPC peering or a transit +// gateway. +// +// A Traffic Mirror target can be a network interface, or a Network Load Balancer. +// +// To use the target in a Traffic Mirror session, use CreateTrafficMirrorSession +// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTrafficMirrorSession.htm). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTrafficMirrorTarget for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorTarget +func (c *EC2) CreateTrafficMirrorTarget(input *CreateTrafficMirrorTargetInput) (*CreateTrafficMirrorTargetOutput, error) { + req, out := c.CreateTrafficMirrorTargetRequest(input) + return out, req.Send() +} + +// CreateTrafficMirrorTargetWithContext is the same as CreateTrafficMirrorTarget with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTrafficMirrorTarget for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTrafficMirrorTargetWithContext(ctx aws.Context, input *CreateTrafficMirrorTargetInput, opts ...request.Option) (*CreateTrafficMirrorTargetOutput, error) { + req, out := c.CreateTrafficMirrorTargetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateTransitGateway = "CreateTransitGateway" // CreateTransitGatewayRequest generates a "aws/request.Request" representing the @@ -6212,10 +6613,10 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques // Any AWS Marketplace product codes from the snapshot are propagated to the // volume. // -// You can create encrypted volumes with the Encrypted parameter. Encrypted -// volumes may only be attached to instances that support Amazon EBS encryption. -// Volumes that are created from encrypted snapshots are also automatically -// encrypted. For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// You can create encrypted volumes. Encrypted volumes must be attached to instances +// that support Amazon EBS encryption. Volumes that are created from encrypted +// snapshots are also automatically encrypted. For more information, see Amazon +// EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) // in the Amazon Elastic Compute Cloud User Guide. // // You can tag your volumes during creation. For more information, see Tagging @@ -6300,7 +6701,7 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out // Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can // create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 // netmask (65,536 IPv4 addresses). For more information about how large to -// make your VPC, see Your VPC and Subnets (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// make your VPC, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) // in the Amazon Virtual Private Cloud User Guide. // // You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. @@ -6309,7 +6710,7 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out // // By default, each instance you launch in the VPC has the default DHCP options, // which include only a default DNS server that we provide (AmazonProvidedDNS). -// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) // in the Amazon Virtual Private Cloud User Guide. // // You can specify the instance tenancy value for the VPC when you create it. @@ -6392,7 +6793,7 @@ func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *requ // Creates a VPC endpoint for a specified service. An endpoint enables you to // create a private connection between your VPC and the service. The service // may be provided by AWS, an AWS Marketplace partner, or another AWS account. -// For more information, see VPC Endpoints (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-endpoints.html) +// For more information, see VPC Endpoints (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints.html) // in the Amazon Virtual Private Cloud User Guide. // // A gateway endpoint serves as a target for a route in your route table for @@ -6565,7 +6966,7 @@ func (c *EC2) CreateVpcEndpointServiceConfigurationRequest(input *CreateVpcEndpo // // To create an endpoint service configuration, you must first create a Network // Load Balancer for your service. For more information, see VPC Endpoint Services -// (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/endpoint-service.html) +// (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6647,7 +7048,7 @@ func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectio // CIDR blocks. // // Limitations and rules apply to a VPC peering connection. For more information, -// see the limitations (https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/vpc-peering-basics.html#vpc-peering-limitations) +// see the limitations (https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-basics.html#vpc-peering-limitations) // section in the VPC Peering Guide. // // The owner of the accepter VPC must accept the peering request to activate @@ -6730,7 +7131,7 @@ func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req * // CreateVpnConnection API operation for Amazon Elastic Compute Cloud. // // Creates a VPN connection between an existing virtual private gateway and -// a VPN customer gateway. The only supported connection type is ipsec.1. +// a VPN customer gateway. The supported connection type is ipsec.1. // // The response includes information that you need to give to your network administrator // to configure your customer gateway. @@ -8308,6 +8709,80 @@ func (c *EC2) DeletePlacementGroupWithContext(ctx aws.Context, input *DeletePlac return out, req.Send() } +const opDeleteQueuedReservedInstances = "DeleteQueuedReservedInstances" + +// DeleteQueuedReservedInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteQueuedReservedInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteQueuedReservedInstances for more information on using the DeleteQueuedReservedInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteQueuedReservedInstancesRequest method. +// req, resp := client.DeleteQueuedReservedInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteQueuedReservedInstances +func (c *EC2) DeleteQueuedReservedInstancesRequest(input *DeleteQueuedReservedInstancesInput) (req *request.Request, output *DeleteQueuedReservedInstancesOutput) { + op := &request.Operation{ + Name: opDeleteQueuedReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteQueuedReservedInstancesInput{} + } + + output = &DeleteQueuedReservedInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteQueuedReservedInstances API operation for Amazon Elastic Compute Cloud. +// +// Deletes the queued purchases for the specified Reserved Instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteQueuedReservedInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteQueuedReservedInstances +func (c *EC2) DeleteQueuedReservedInstances(input *DeleteQueuedReservedInstancesInput) (*DeleteQueuedReservedInstancesOutput, error) { + req, out := c.DeleteQueuedReservedInstancesRequest(input) + return out, req.Send() +} + +// DeleteQueuedReservedInstancesWithContext is the same as DeleteQueuedReservedInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteQueuedReservedInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteQueuedReservedInstancesWithContext(ctx aws.Context, input *DeleteQueuedReservedInstancesInput, opts ...request.Option) (*DeleteQueuedReservedInstancesOutput, error) { + req, out := c.DeleteQueuedReservedInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteRoute = "DeleteRoute" // DeleteRouteRequest generates a "aws/request.Request" representing the @@ -8858,6 +9333,308 @@ func (c *EC2) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opt return out, req.Send() } +const opDeleteTrafficMirrorFilter = "DeleteTrafficMirrorFilter" + +// DeleteTrafficMirrorFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficMirrorFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTrafficMirrorFilter for more information on using the DeleteTrafficMirrorFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTrafficMirrorFilterRequest method. +// req, resp := client.DeleteTrafficMirrorFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorFilter +func (c *EC2) DeleteTrafficMirrorFilterRequest(input *DeleteTrafficMirrorFilterInput) (req *request.Request, output *DeleteTrafficMirrorFilterOutput) { + op := &request.Operation{ + Name: opDeleteTrafficMirrorFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrafficMirrorFilterInput{} + } + + output = &DeleteTrafficMirrorFilterOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTrafficMirrorFilter API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Traffic Mirror filter. +// +// You cannot delete a Traffic Mirror filter that is in use by a Traffic Mirror +// session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTrafficMirrorFilter for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorFilter +func (c *EC2) DeleteTrafficMirrorFilter(input *DeleteTrafficMirrorFilterInput) (*DeleteTrafficMirrorFilterOutput, error) { + req, out := c.DeleteTrafficMirrorFilterRequest(input) + return out, req.Send() +} + +// DeleteTrafficMirrorFilterWithContext is the same as DeleteTrafficMirrorFilter with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTrafficMirrorFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTrafficMirrorFilterWithContext(ctx aws.Context, input *DeleteTrafficMirrorFilterInput, opts ...request.Option) (*DeleteTrafficMirrorFilterOutput, error) { + req, out := c.DeleteTrafficMirrorFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTrafficMirrorFilterRule = "DeleteTrafficMirrorFilterRule" + +// DeleteTrafficMirrorFilterRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficMirrorFilterRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTrafficMirrorFilterRule for more information on using the DeleteTrafficMirrorFilterRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTrafficMirrorFilterRuleRequest method. +// req, resp := client.DeleteTrafficMirrorFilterRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorFilterRule +func (c *EC2) DeleteTrafficMirrorFilterRuleRequest(input *DeleteTrafficMirrorFilterRuleInput) (req *request.Request, output *DeleteTrafficMirrorFilterRuleOutput) { + op := &request.Operation{ + Name: opDeleteTrafficMirrorFilterRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrafficMirrorFilterRuleInput{} + } + + output = &DeleteTrafficMirrorFilterRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTrafficMirrorFilterRule API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Traffic Mirror rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTrafficMirrorFilterRule for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorFilterRule +func (c *EC2) DeleteTrafficMirrorFilterRule(input *DeleteTrafficMirrorFilterRuleInput) (*DeleteTrafficMirrorFilterRuleOutput, error) { + req, out := c.DeleteTrafficMirrorFilterRuleRequest(input) + return out, req.Send() +} + +// DeleteTrafficMirrorFilterRuleWithContext is the same as DeleteTrafficMirrorFilterRule with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTrafficMirrorFilterRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTrafficMirrorFilterRuleWithContext(ctx aws.Context, input *DeleteTrafficMirrorFilterRuleInput, opts ...request.Option) (*DeleteTrafficMirrorFilterRuleOutput, error) { + req, out := c.DeleteTrafficMirrorFilterRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTrafficMirrorSession = "DeleteTrafficMirrorSession" + +// DeleteTrafficMirrorSessionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficMirrorSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTrafficMirrorSession for more information on using the DeleteTrafficMirrorSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTrafficMirrorSessionRequest method. +// req, resp := client.DeleteTrafficMirrorSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorSession +func (c *EC2) DeleteTrafficMirrorSessionRequest(input *DeleteTrafficMirrorSessionInput) (req *request.Request, output *DeleteTrafficMirrorSessionOutput) { + op := &request.Operation{ + Name: opDeleteTrafficMirrorSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrafficMirrorSessionInput{} + } + + output = &DeleteTrafficMirrorSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTrafficMirrorSession API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Traffic Mirror session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTrafficMirrorSession for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorSession +func (c *EC2) DeleteTrafficMirrorSession(input *DeleteTrafficMirrorSessionInput) (*DeleteTrafficMirrorSessionOutput, error) { + req, out := c.DeleteTrafficMirrorSessionRequest(input) + return out, req.Send() +} + +// DeleteTrafficMirrorSessionWithContext is the same as DeleteTrafficMirrorSession with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTrafficMirrorSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTrafficMirrorSessionWithContext(ctx aws.Context, input *DeleteTrafficMirrorSessionInput, opts ...request.Option) (*DeleteTrafficMirrorSessionOutput, error) { + req, out := c.DeleteTrafficMirrorSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTrafficMirrorTarget = "DeleteTrafficMirrorTarget" + +// DeleteTrafficMirrorTargetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficMirrorTarget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTrafficMirrorTarget for more information on using the DeleteTrafficMirrorTarget +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTrafficMirrorTargetRequest method. +// req, resp := client.DeleteTrafficMirrorTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorTarget +func (c *EC2) DeleteTrafficMirrorTargetRequest(input *DeleteTrafficMirrorTargetInput) (req *request.Request, output *DeleteTrafficMirrorTargetOutput) { + op := &request.Operation{ + Name: opDeleteTrafficMirrorTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrafficMirrorTargetInput{} + } + + output = &DeleteTrafficMirrorTargetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTrafficMirrorTarget API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Traffic Mirror target. +// +// You cannot delete a Traffic Mirror target that is in use by a Traffic Mirror +// session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTrafficMirrorTarget for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorTarget +func (c *EC2) DeleteTrafficMirrorTarget(input *DeleteTrafficMirrorTargetInput) (*DeleteTrafficMirrorTargetOutput, error) { + req, out := c.DeleteTrafficMirrorTargetRequest(input) + return out, req.Send() +} + +// DeleteTrafficMirrorTargetWithContext is the same as DeleteTrafficMirrorTarget with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTrafficMirrorTarget for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTrafficMirrorTargetWithContext(ctx aws.Context, input *DeleteTrafficMirrorTargetInput, opts ...request.Option) (*DeleteTrafficMirrorTargetOutput, error) { + req, out := c.DeleteTrafficMirrorTargetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteTransitGateway = "DeleteTransitGateway" // DeleteTransitGatewayRequest generates a "aws/request.Request" representing the @@ -10241,8 +11018,8 @@ func (c *EC2) DescribeAggregateIdFormatRequest(input *DescribeAggregateIdFormatI // DescribeAggregateIdFormat API operation for Amazon Elastic Compute Cloud. // // Describes the longer ID format settings for all resource types in a specific -// region. This request is useful for performing a quick audit to determine -// whether a specific region is fully opted in for longer IDs (17-character +// Region. This request is useful for performing a quick audit to determine +// whether a specific Region is fully opted in for longer IDs (17-character // IDs). // // This request only returns information about resource types that support longer @@ -10329,7 +11106,7 @@ func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesI // DescribeAvailabilityZones API operation for Amazon Elastic Compute Cloud. // // Describes the Availability Zones that are available to you. The results include -// zones only for the region you're currently using. If there is an event impacting +// zones only for the Region you're currently using. If there is an event impacting // an Availability Zone, you can use this request to view the state and any // provided message for that Availability Zone. // @@ -10537,7 +11314,7 @@ func (c *EC2) DescribeByoipCidrsWithContext(ctx aws.Context, input *DescribeByoi // // Example iterating over at most 3 pages of a DescribeByoipCidrs operation. // pageNum := 0 // err := client.DescribeByoipCidrsPages(params, -// func(page *DescribeByoipCidrsOutput, lastPage bool) bool { +// func(page *ec2.DescribeByoipCidrsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -10569,10 +11346,12 @@ func (c *EC2) DescribeByoipCidrsPagesWithContext(ctx aws.Context, input *Describ }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeByoipCidrsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeByoipCidrsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -10668,7 +11447,7 @@ func (c *EC2) DescribeCapacityReservationsWithContext(ctx aws.Context, input *De // // Example iterating over at most 3 pages of a DescribeCapacityReservations operation. // pageNum := 0 // err := client.DescribeCapacityReservationsPages(params, -// func(page *DescribeCapacityReservationsOutput, lastPage bool) bool { +// func(page *ec2.DescribeCapacityReservationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -10700,10 +11479,12 @@ func (c *EC2) DescribeCapacityReservationsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeCapacityReservationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeCapacityReservationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -10801,7 +11582,7 @@ func (c *EC2) DescribeClassicLinkInstancesWithContext(ctx aws.Context, input *De // // Example iterating over at most 3 pages of a DescribeClassicLinkInstances operation. // pageNum := 0 // err := client.DescribeClassicLinkInstancesPages(params, -// func(page *DescribeClassicLinkInstancesOutput, lastPage bool) bool { +// func(page *ec2.DescribeClassicLinkInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -10833,10 +11614,12 @@ func (c *EC2) DescribeClassicLinkInstancesPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClassicLinkInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClassicLinkInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -10931,7 +11714,7 @@ func (c *EC2) DescribeClientVpnAuthorizationRulesWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a DescribeClientVpnAuthorizationRules operation. // pageNum := 0 // err := client.DescribeClientVpnAuthorizationRulesPages(params, -// func(page *DescribeClientVpnAuthorizationRulesOutput, lastPage bool) bool { +// func(page *ec2.DescribeClientVpnAuthorizationRulesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -10963,10 +11746,12 @@ func (c *EC2) DescribeClientVpnAuthorizationRulesPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClientVpnAuthorizationRulesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnAuthorizationRulesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -11062,7 +11847,7 @@ func (c *EC2) DescribeClientVpnConnectionsWithContext(ctx aws.Context, input *De // // Example iterating over at most 3 pages of a DescribeClientVpnConnections operation. // pageNum := 0 // err := client.DescribeClientVpnConnectionsPages(params, -// func(page *DescribeClientVpnConnectionsOutput, lastPage bool) bool { +// func(page *ec2.DescribeClientVpnConnectionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -11094,10 +11879,12 @@ func (c *EC2) DescribeClientVpnConnectionsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClientVpnConnectionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnConnectionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -11192,7 +11979,7 @@ func (c *EC2) DescribeClientVpnEndpointsWithContext(ctx aws.Context, input *Desc // // Example iterating over at most 3 pages of a DescribeClientVpnEndpoints operation. // pageNum := 0 // err := client.DescribeClientVpnEndpointsPages(params, -// func(page *DescribeClientVpnEndpointsOutput, lastPage bool) bool { +// func(page *ec2.DescribeClientVpnEndpointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -11224,10 +12011,12 @@ func (c *EC2) DescribeClientVpnEndpointsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClientVpnEndpointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnEndpointsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -11322,7 +12111,7 @@ func (c *EC2) DescribeClientVpnRoutesWithContext(ctx aws.Context, input *Describ // // Example iterating over at most 3 pages of a DescribeClientVpnRoutes operation. // pageNum := 0 // err := client.DescribeClientVpnRoutesPages(params, -// func(page *DescribeClientVpnRoutesOutput, lastPage bool) bool { +// func(page *ec2.DescribeClientVpnRoutesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -11354,10 +12143,12 @@ func (c *EC2) DescribeClientVpnRoutesPagesWithContext(ctx aws.Context, input *De }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClientVpnRoutesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnRoutesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -11452,7 +12243,7 @@ func (c *EC2) DescribeClientVpnTargetNetworksWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeClientVpnTargetNetworks operation. // pageNum := 0 // err := client.DescribeClientVpnTargetNetworksPages(params, -// func(page *DescribeClientVpnTargetNetworksOutput, lastPage bool) bool { +// func(page *ec2.DescribeClientVpnTargetNetworksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -11484,10 +12275,12 @@ func (c *EC2) DescribeClientVpnTargetNetworksPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClientVpnTargetNetworksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnTargetNetworksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -11677,6 +12470,12 @@ func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req * Name: opDescribeDhcpOptions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -11692,7 +12491,7 @@ func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req * // // Describes one or more of your DHCP options sets. // -// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -11723,6 +12522,58 @@ func (c *EC2) DescribeDhcpOptionsWithContext(ctx aws.Context, input *DescribeDhc return out, req.Send() } +// DescribeDhcpOptionsPages iterates over the pages of a DescribeDhcpOptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDhcpOptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDhcpOptions operation. +// pageNum := 0 +// err := client.DescribeDhcpOptionsPages(params, +// func(page *ec2.DescribeDhcpOptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeDhcpOptionsPages(input *DescribeDhcpOptionsInput, fn func(*DescribeDhcpOptionsOutput, bool) bool) error { + return c.DescribeDhcpOptionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDhcpOptionsPagesWithContext same as DescribeDhcpOptionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeDhcpOptionsPagesWithContext(ctx aws.Context, input *DescribeDhcpOptionsInput, fn func(*DescribeDhcpOptionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDhcpOptionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDhcpOptionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDhcpOptionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeEgressOnlyInternetGateways = "DescribeEgressOnlyInternetGateways" // DescribeEgressOnlyInternetGatewaysRequest generates a "aws/request.Request" representing the @@ -11814,7 +12665,7 @@ func (c *EC2) DescribeEgressOnlyInternetGatewaysWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a DescribeEgressOnlyInternetGateways operation. // pageNum := 0 // err := client.DescribeEgressOnlyInternetGatewaysPages(params, -// func(page *DescribeEgressOnlyInternetGatewaysOutput, lastPage bool) bool { +// func(page *ec2.DescribeEgressOnlyInternetGatewaysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -11846,10 +12697,12 @@ func (c *EC2) DescribeEgressOnlyInternetGatewaysPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEgressOnlyInternetGatewaysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEgressOnlyInternetGatewaysOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -11929,6 +12782,80 @@ func (c *EC2) DescribeElasticGpusWithContext(ctx aws.Context, input *DescribeEla return out, req.Send() } +const opDescribeExportImageTasks = "DescribeExportImageTasks" + +// DescribeExportImageTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportImageTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExportImageTasks for more information on using the DescribeExportImageTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeExportImageTasksRequest method. +// req, resp := client.DescribeExportImageTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasksRequest(input *DescribeExportImageTasksInput) (req *request.Request, output *DescribeExportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportImageTasksInput{} + } + + output = &DescribeExportImageTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExportImageTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified export image tasks or all your export image tasks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeExportImageTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasks(input *DescribeExportImageTasksInput) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + return out, req.Send() +} + +// DescribeExportImageTasksWithContext is the same as DescribeExportImageTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExportImageTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeExportImageTasksWithContext(ctx aws.Context, input *DescribeExportImageTasksInput, opts ...request.Option) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeExportTasks = "DescribeExportTasks" // DescribeExportTasksRequest generates a "aws/request.Request" representing the @@ -11973,7 +12900,8 @@ func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req * // DescribeExportTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export tasks or all your export tasks. +// Describes the specified export instance tasks or all your export instance +// tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12242,7 +13170,7 @@ func (c *EC2) DescribeFleetsWithContext(ctx aws.Context, input *DescribeFleetsIn // // Example iterating over at most 3 pages of a DescribeFleets operation. // pageNum := 0 // err := client.DescribeFleetsPages(params, -// func(page *DescribeFleetsOutput, lastPage bool) bool { +// func(page *ec2.DescribeFleetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -12274,10 +13202,12 @@ func (c *EC2) DescribeFleetsPagesWithContext(ctx aws.Context, input *DescribeFle }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeFleetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeFleetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -12374,7 +13304,7 @@ func (c *EC2) DescribeFlowLogsWithContext(ctx aws.Context, input *DescribeFlowLo // // Example iterating over at most 3 pages of a DescribeFlowLogs operation. // pageNum := 0 // err := client.DescribeFlowLogsPages(params, -// func(page *DescribeFlowLogsOutput, lastPage bool) bool { +// func(page *ec2.DescribeFlowLogsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -12406,10 +13336,12 @@ func (c *EC2) DescribeFlowLogsPagesWithContext(ctx aws.Context, input *DescribeF }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeFlowLogsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeFlowLogsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -12580,7 +13512,7 @@ func (c *EC2) DescribeFpgaImagesWithContext(ctx aws.Context, input *DescribeFpga // // Example iterating over at most 3 pages of a DescribeFpgaImages operation. // pageNum := 0 // err := client.DescribeFpgaImagesPages(params, -// func(page *DescribeFpgaImagesOutput, lastPage bool) bool { +// func(page *ec2.DescribeFpgaImagesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -12612,10 +13544,12 @@ func (c *EC2) DescribeFpgaImagesPagesWithContext(ctx aws.Context, input *Describ }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeFpgaImagesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeFpgaImagesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -12718,7 +13652,7 @@ func (c *EC2) DescribeHostReservationOfferingsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeHostReservationOfferings operation. // pageNum := 0 // err := client.DescribeHostReservationOfferingsPages(params, -// func(page *DescribeHostReservationOfferingsOutput, lastPage bool) bool { +// func(page *ec2.DescribeHostReservationOfferingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -12750,10 +13684,12 @@ func (c *EC2) DescribeHostReservationOfferingsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeHostReservationOfferingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeHostReservationOfferingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -12848,7 +13784,7 @@ func (c *EC2) DescribeHostReservationsWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeHostReservations operation. // pageNum := 0 // err := client.DescribeHostReservationsPages(params, -// func(page *DescribeHostReservationsOutput, lastPage bool) bool { +// func(page *ec2.DescribeHostReservationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -12880,10 +13816,12 @@ func (c *EC2) DescribeHostReservationsPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeHostReservationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeHostReservationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -12982,7 +13920,7 @@ func (c *EC2) DescribeHostsWithContext(ctx aws.Context, input *DescribeHostsInpu // // Example iterating over at most 3 pages of a DescribeHosts operation. // pageNum := 0 // err := client.DescribeHostsPages(params, -// func(page *DescribeHostsOutput, lastPage bool) bool { +// func(page *ec2.DescribeHostsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -13014,10 +13952,12 @@ func (c *EC2) DescribeHostsPagesWithContext(ctx aws.Context, input *DescribeHost }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeHostsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeHostsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -13112,7 +14052,7 @@ func (c *EC2) DescribeIamInstanceProfileAssociationsWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeIamInstanceProfileAssociations operation. // pageNum := 0 // err := client.DescribeIamInstanceProfileAssociationsPages(params, -// func(page *DescribeIamInstanceProfileAssociationsOutput, lastPage bool) bool { +// func(page *ec2.DescribeIamInstanceProfileAssociationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -13144,10 +14084,12 @@ func (c *EC2) DescribeIamInstanceProfileAssociationsPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeIamInstanceProfileAssociationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeIamInstanceProfileAssociationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -13195,7 +14137,7 @@ func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *reques // DescribeIdFormat API operation for Amazon Elastic Compute Cloud. // -// Describes the ID format settings for your resources on a per-region basis, +// Describes the ID format settings for your resources on a per-Region basis, // for example, to view which resource types are enabled for longer IDs. This // request only returns information about resource types whose ID formats can // be modified; it does not return information about other resource types. @@ -13459,8 +14401,10 @@ func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re // own, and private images owned by other AWS accounts for which you have explicit // launch permissions. // -// Recently deregistered images might appear in the returned results for a short -// interval. +// Recently deregistered images appear in the returned results for a short interval +// and then return empty results. After all instances that reference a deregistered +// AMI are terminated, specifying the ID of the image results in an error indicating +// that the AMI ID cannot be found. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13582,7 +14526,7 @@ func (c *EC2) DescribeImportImageTasksWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeImportImageTasks operation. // pageNum := 0 // err := client.DescribeImportImageTasksPages(params, -// func(page *DescribeImportImageTasksOutput, lastPage bool) bool { +// func(page *ec2.DescribeImportImageTasksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -13614,10 +14558,12 @@ func (c *EC2) DescribeImportImageTasksPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeImportImageTasksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeImportImageTasksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -13712,7 +14658,7 @@ func (c *EC2) DescribeImportSnapshotTasksWithContext(ctx aws.Context, input *Des // // Example iterating over at most 3 pages of a DescribeImportSnapshotTasks operation. // pageNum := 0 // err := client.DescribeImportSnapshotTasksPages(params, -// func(page *DescribeImportSnapshotTasksOutput, lastPage bool) bool { +// func(page *ec2.DescribeImportSnapshotTasksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -13744,10 +14690,12 @@ func (c *EC2) DescribeImportSnapshotTasksPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeImportSnapshotTasksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeImportSnapshotTasksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -13943,7 +14891,7 @@ func (c *EC2) DescribeInstanceCreditSpecificationsWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a DescribeInstanceCreditSpecifications operation. // pageNum := 0 // err := client.DescribeInstanceCreditSpecificationsPages(params, -// func(page *DescribeInstanceCreditSpecificationsOutput, lastPage bool) bool { +// func(page *ec2.DescribeInstanceCreditSpecificationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -13975,10 +14923,12 @@ func (c *EC2) DescribeInstanceCreditSpecificationsPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeInstanceCreditSpecificationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeInstanceCreditSpecificationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -14094,7 +15044,7 @@ func (c *EC2) DescribeInstanceStatusWithContext(ctx aws.Context, input *Describe // // Example iterating over at most 3 pages of a DescribeInstanceStatus operation. // pageNum := 0 // err := client.DescribeInstanceStatusPages(params, -// func(page *DescribeInstanceStatusOutput, lastPage bool) bool { +// func(page *ec2.DescribeInstanceStatusOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -14126,10 +15076,12 @@ func (c *EC2) DescribeInstanceStatusPagesWithContext(ctx aws.Context, input *Des }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeInstanceStatusOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeInstanceStatusOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -14183,7 +15135,7 @@ func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *requ // DescribeInstances API operation for Amazon Elastic Compute Cloud. // -// Describes the specified instances or all of your instances. +// Describes the specified instances or all of AWS account's instances. // // If you specify one or more instance IDs, Amazon EC2 returns information for // those instances. If you do not specify instance IDs, Amazon EC2 returns information @@ -14239,7 +15191,7 @@ func (c *EC2) DescribeInstancesWithContext(ctx aws.Context, input *DescribeInsta // // Example iterating over at most 3 pages of a DescribeInstances operation. // pageNum := 0 // err := client.DescribeInstancesPages(params, -// func(page *DescribeInstancesOutput, lastPage bool) bool { +// func(page *ec2.DescribeInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -14271,10 +15223,12 @@ func (c *EC2) DescribeInstancesPagesWithContext(ctx aws.Context, input *Describe }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -14369,7 +15323,7 @@ func (c *EC2) DescribeInternetGatewaysWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeInternetGateways operation. // pageNum := 0 // err := client.DescribeInternetGatewaysPages(params, -// func(page *DescribeInternetGatewaysOutput, lastPage bool) bool { +// func(page *ec2.DescribeInternetGatewaysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -14401,10 +15355,12 @@ func (c *EC2) DescribeInternetGatewaysPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeInternetGatewaysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeInternetGatewaysOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -14577,7 +15533,7 @@ func (c *EC2) DescribeLaunchTemplateVersionsWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a DescribeLaunchTemplateVersions operation. // pageNum := 0 // err := client.DescribeLaunchTemplateVersionsPages(params, -// func(page *DescribeLaunchTemplateVersionsOutput, lastPage bool) bool { +// func(page *ec2.DescribeLaunchTemplateVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -14609,10 +15565,12 @@ func (c *EC2) DescribeLaunchTemplateVersionsPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeLaunchTemplateVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeLaunchTemplateVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -14707,7 +15665,7 @@ func (c *EC2) DescribeLaunchTemplatesWithContext(ctx aws.Context, input *Describ // // Example iterating over at most 3 pages of a DescribeLaunchTemplates operation. // pageNum := 0 // err := client.DescribeLaunchTemplatesPages(params, -// func(page *DescribeLaunchTemplatesOutput, lastPage bool) bool { +// func(page *ec2.DescribeLaunchTemplatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -14739,10 +15697,12 @@ func (c *EC2) DescribeLaunchTemplatesPagesWithContext(ctx aws.Context, input *De }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeLaunchTemplatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeLaunchTemplatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -14839,7 +15799,7 @@ func (c *EC2) DescribeMovingAddressesWithContext(ctx aws.Context, input *Describ // // Example iterating over at most 3 pages of a DescribeMovingAddresses operation. // pageNum := 0 // err := client.DescribeMovingAddressesPages(params, -// func(page *DescribeMovingAddressesOutput, lastPage bool) bool { +// func(page *ec2.DescribeMovingAddressesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -14871,10 +15831,12 @@ func (c *EC2) DescribeMovingAddressesPagesWithContext(ctx aws.Context, input *De }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeMovingAddressesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeMovingAddressesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -14969,7 +15931,7 @@ func (c *EC2) DescribeNatGatewaysWithContext(ctx aws.Context, input *DescribeNat // // Example iterating over at most 3 pages of a DescribeNatGateways operation. // pageNum := 0 // err := client.DescribeNatGatewaysPages(params, -// func(page *DescribeNatGatewaysOutput, lastPage bool) bool { +// func(page *ec2.DescribeNatGatewaysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -15001,10 +15963,12 @@ func (c *EC2) DescribeNatGatewaysPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeNatGatewaysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeNatGatewaysOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -15060,7 +16024,7 @@ func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req * // // Describes one or more of your network ACLs. // -// For more information, see Network ACLs (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// For more information, see Network ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -15102,7 +16066,7 @@ func (c *EC2) DescribeNetworkAclsWithContext(ctx aws.Context, input *DescribeNet // // Example iterating over at most 3 pages of a DescribeNetworkAcls operation. // pageNum := 0 // err := client.DescribeNetworkAclsPages(params, -// func(page *DescribeNetworkAclsOutput, lastPage bool) bool { +// func(page *ec2.DescribeNetworkAclsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -15134,10 +16098,12 @@ func (c *EC2) DescribeNetworkAclsPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeNetworkAclsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeNetworkAclsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -15307,7 +16273,7 @@ func (c *EC2) DescribeNetworkInterfacePermissionsWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a DescribeNetworkInterfacePermissions operation. // pageNum := 0 // err := client.DescribeNetworkInterfacePermissionsPages(params, -// func(page *DescribeNetworkInterfacePermissionsOutput, lastPage bool) bool { +// func(page *ec2.DescribeNetworkInterfacePermissionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -15339,10 +16305,12 @@ func (c *EC2) DescribeNetworkInterfacePermissionsPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeNetworkInterfacePermissionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeNetworkInterfacePermissionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -15437,7 +16405,7 @@ func (c *EC2) DescribeNetworkInterfacesWithContext(ctx aws.Context, input *Descr // // Example iterating over at most 3 pages of a DescribeNetworkInterfaces operation. // pageNum := 0 // err := client.DescribeNetworkInterfacesPages(params, -// func(page *DescribeNetworkInterfacesOutput, lastPage bool) bool { +// func(page *ec2.DescribeNetworkInterfacesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -15469,10 +16437,12 @@ func (c *EC2) DescribeNetworkInterfacesPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeNetworkInterfacesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeNetworkInterfacesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -15648,7 +16618,7 @@ func (c *EC2) DescribePrefixListsWithContext(ctx aws.Context, input *DescribePre // // Example iterating over at most 3 pages of a DescribePrefixLists operation. // pageNum := 0 // err := client.DescribePrefixListsPages(params, -// func(page *DescribePrefixListsOutput, lastPage bool) bool { +// func(page *ec2.DescribePrefixListsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -15680,10 +16650,12 @@ func (c *EC2) DescribePrefixListsPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribePrefixListsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribePrefixListsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -15792,7 +16764,7 @@ func (c *EC2) DescribePrincipalIdFormatWithContext(ctx aws.Context, input *Descr // // Example iterating over at most 3 pages of a DescribePrincipalIdFormat operation. // pageNum := 0 // err := client.DescribePrincipalIdFormatPages(params, -// func(page *DescribePrincipalIdFormatOutput, lastPage bool) bool { +// func(page *ec2.DescribePrincipalIdFormatOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -15824,10 +16796,12 @@ func (c *EC2) DescribePrincipalIdFormatPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribePrincipalIdFormatOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribePrincipalIdFormatOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -15922,7 +16896,7 @@ func (c *EC2) DescribePublicIpv4PoolsWithContext(ctx aws.Context, input *Describ // // Example iterating over at most 3 pages of a DescribePublicIpv4Pools operation. // pageNum := 0 // err := client.DescribePublicIpv4PoolsPages(params, -// func(page *DescribePublicIpv4PoolsOutput, lastPage bool) bool { +// func(page *ec2.DescribePublicIpv4PoolsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -15954,10 +16928,12 @@ func (c *EC2) DescribePublicIpv4PoolsPagesWithContext(ctx aws.Context, input *De }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribePublicIpv4PoolsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribePublicIpv4PoolsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -16005,11 +16981,15 @@ func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request. // DescribeRegions API operation for Amazon Elastic Compute Cloud. // -// Describes the regions that are currently available to you. +// Describes the Regions that are enabled for your account, or all Regions. // -// For a list of the regions supported by Amazon EC2, see Regions and Endpoints +// For a list of the Regions supported by Amazon EC2, see Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). // +// For information about enabling and disabling Regions for your account, see +// Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the AWS General Reference. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -16307,7 +17287,7 @@ func (c *EC2) DescribeReservedInstancesModificationsWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeReservedInstancesModifications operation. // pageNum := 0 // err := client.DescribeReservedInstancesModificationsPages(params, -// func(page *DescribeReservedInstancesModificationsOutput, lastPage bool) bool { +// func(page *ec2.DescribeReservedInstancesModificationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -16339,10 +17319,12 @@ func (c *EC2) DescribeReservedInstancesModificationsPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedInstancesModificationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedInstancesModificationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -16448,7 +17430,7 @@ func (c *EC2) DescribeReservedInstancesOfferingsWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a DescribeReservedInstancesOfferings operation. // pageNum := 0 // err := client.DescribeReservedInstancesOfferingsPages(params, -// func(page *DescribeReservedInstancesOfferingsOutput, lastPage bool) bool { +// func(page *ec2.DescribeReservedInstancesOfferingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -16480,10 +17462,12 @@ func (c *EC2) DescribeReservedInstancesOfferingsPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedInstancesOfferingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedInstancesOfferingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -16544,7 +17528,7 @@ func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req * // with the main route table. This command does not return the subnet ID for // implicit associations. // -// For more information, see Route Tables (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16586,7 +17570,7 @@ func (c *EC2) DescribeRouteTablesWithContext(ctx aws.Context, input *DescribeRou // // Example iterating over at most 3 pages of a DescribeRouteTables operation. // pageNum := 0 // err := client.DescribeRouteTablesPages(params, -// func(page *DescribeRouteTablesOutput, lastPage bool) bool { +// func(page *ec2.DescribeRouteTablesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -16618,10 +17602,12 @@ func (c *EC2) DescribeRouteTablesPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeRouteTablesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeRouteTablesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -16724,7 +17710,7 @@ func (c *EC2) DescribeScheduledInstanceAvailabilityWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeScheduledInstanceAvailability operation. // pageNum := 0 // err := client.DescribeScheduledInstanceAvailabilityPages(params, -// func(page *DescribeScheduledInstanceAvailabilityOutput, lastPage bool) bool { +// func(page *ec2.DescribeScheduledInstanceAvailabilityOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -16756,10 +17742,12 @@ func (c *EC2) DescribeScheduledInstanceAvailabilityPagesWithContext(ctx aws.Cont }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeScheduledInstanceAvailabilityOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeScheduledInstanceAvailabilityOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -16854,7 +17842,7 @@ func (c *EC2) DescribeScheduledInstancesWithContext(ctx aws.Context, input *Desc // // Example iterating over at most 3 pages of a DescribeScheduledInstances operation. // pageNum := 0 // err := client.DescribeScheduledInstancesPages(params, -// func(page *DescribeScheduledInstancesOutput, lastPage bool) bool { +// func(page *ec2.DescribeScheduledInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -16886,10 +17874,12 @@ func (c *EC2) DescribeScheduledInstancesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeScheduledInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeScheduledInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -17066,7 +18056,7 @@ func (c *EC2) DescribeSecurityGroupsWithContext(ctx aws.Context, input *Describe // // Example iterating over at most 3 pages of a DescribeSecurityGroups operation. // pageNum := 0 // err := client.DescribeSecurityGroupsPages(params, -// func(page *DescribeSecurityGroupsOutput, lastPage bool) bool { +// func(page *ec2.DescribeSecurityGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -17098,10 +18088,12 @@ func (c *EC2) DescribeSecurityGroupsPagesWithContext(ctx aws.Context, input *Des }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeSecurityGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeSecurityGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -17321,7 +18313,7 @@ func (c *EC2) DescribeSnapshotsWithContext(ctx aws.Context, input *DescribeSnaps // // Example iterating over at most 3 pages of a DescribeSnapshots operation. // pageNum := 0 // err := client.DescribeSnapshotsPages(params, -// func(page *DescribeSnapshotsOutput, lastPage bool) bool { +// func(page *ec2.DescribeSnapshotsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -17353,10 +18345,12 @@ func (c *EC2) DescribeSnapshotsPagesWithContext(ctx aws.Context, input *Describe }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeSnapshotsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeSnapshotsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -17683,7 +18677,7 @@ func (c *EC2) DescribeSpotFleetRequestsWithContext(ctx aws.Context, input *Descr // // Example iterating over at most 3 pages of a DescribeSpotFleetRequests operation. // pageNum := 0 // err := client.DescribeSpotFleetRequestsPages(params, -// func(page *DescribeSpotFleetRequestsOutput, lastPage bool) bool { +// func(page *ec2.DescribeSpotFleetRequestsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -17715,10 +18709,12 @@ func (c *EC2) DescribeSpotFleetRequestsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeSpotFleetRequestsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeSpotFleetRequestsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -17829,7 +18825,7 @@ func (c *EC2) DescribeSpotInstanceRequestsWithContext(ctx aws.Context, input *De // // Example iterating over at most 3 pages of a DescribeSpotInstanceRequests operation. // pageNum := 0 // err := client.DescribeSpotInstanceRequestsPages(params, -// func(page *DescribeSpotInstanceRequestsOutput, lastPage bool) bool { +// func(page *ec2.DescribeSpotInstanceRequestsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -17861,10 +18857,12 @@ func (c *EC2) DescribeSpotInstanceRequestsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeSpotInstanceRequestsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeSpotInstanceRequestsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -17966,7 +18964,7 @@ func (c *EC2) DescribeSpotPriceHistoryWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeSpotPriceHistory operation. // pageNum := 0 // err := client.DescribeSpotPriceHistoryPages(params, -// func(page *DescribeSpotPriceHistoryOutput, lastPage bool) bool { +// func(page *ec2.DescribeSpotPriceHistoryOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -17998,10 +18996,12 @@ func (c *EC2) DescribeSpotPriceHistoryPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeSpotPriceHistoryOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeSpotPriceHistoryOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -18099,7 +19099,7 @@ func (c *EC2) DescribeStaleSecurityGroupsWithContext(ctx aws.Context, input *Des // // Example iterating over at most 3 pages of a DescribeStaleSecurityGroups operation. // pageNum := 0 // err := client.DescribeStaleSecurityGroupsPages(params, -// func(page *DescribeStaleSecurityGroupsOutput, lastPage bool) bool { +// func(page *ec2.DescribeStaleSecurityGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -18131,10 +19131,12 @@ func (c *EC2) DescribeStaleSecurityGroupsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeStaleSecurityGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeStaleSecurityGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -18169,6 +19171,12 @@ func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request. Name: opDescribeSubnets, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -18184,7 +19192,7 @@ func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request. // // Describes one or more of your subnets. // -// For more information, see Your VPC and Subnets (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// For more information, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -18215,6 +19223,58 @@ func (c *EC2) DescribeSubnetsWithContext(ctx aws.Context, input *DescribeSubnets return out, req.Send() } +// DescribeSubnetsPages iterates over the pages of a DescribeSubnets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSubnets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSubnets operation. +// pageNum := 0 +// err := client.DescribeSubnetsPages(params, +// func(page *ec2.DescribeSubnetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSubnetsPages(input *DescribeSubnetsInput, fn func(*DescribeSubnetsOutput, bool) bool) error { + return c.DescribeSubnetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSubnetsPagesWithContext same as DescribeSubnetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSubnetsPagesWithContext(ctx aws.Context, input *DescribeSubnetsInput, fn func(*DescribeSubnetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSubnetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSubnetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSubnetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeTags = "DescribeTags" // DescribeTagsRequest generates a "aws/request.Request" representing the @@ -18309,7 +19369,7 @@ func (c *EC2) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, // // Example iterating over at most 3 pages of a DescribeTags operation. // pageNum := 0 // err := client.DescribeTagsPages(params, -// func(page *DescribeTagsOutput, lastPage bool) bool { +// func(page *ec2.DescribeTagsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -18341,42 +19401,44 @@ func (c *EC2) DescribeTagsPagesWithContext(ctx aws.Context, input *DescribeTagsI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTagsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTagsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opDescribeTransitGatewayAttachments = "DescribeTransitGatewayAttachments" +const opDescribeTrafficMirrorFilters = "DescribeTrafficMirrorFilters" -// DescribeTransitGatewayAttachmentsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTransitGatewayAttachments operation. The "output" return +// DescribeTrafficMirrorFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrafficMirrorFilters operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeTransitGatewayAttachments for more information on using the DescribeTransitGatewayAttachments +// See DescribeTrafficMirrorFilters for more information on using the DescribeTrafficMirrorFilters // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeTransitGatewayAttachmentsRequest method. -// req, resp := client.DescribeTransitGatewayAttachmentsRequest(params) +// // Example sending a request using the DescribeTrafficMirrorFiltersRequest method. +// req, resp := client.DescribeTrafficMirrorFiltersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachments -func (c *EC2) DescribeTransitGatewayAttachmentsRequest(input *DescribeTransitGatewayAttachmentsInput) (req *request.Request, output *DescribeTransitGatewayAttachmentsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorFilters +func (c *EC2) DescribeTrafficMirrorFiltersRequest(input *DescribeTrafficMirrorFiltersInput) (req *request.Request, output *DescribeTrafficMirrorFiltersOutput) { op := &request.Operation{ - Name: opDescribeTransitGatewayAttachments, + Name: opDescribeTrafficMirrorFilters, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -18388,128 +19450,127 @@ func (c *EC2) DescribeTransitGatewayAttachmentsRequest(input *DescribeTransitGat } if input == nil { - input = &DescribeTransitGatewayAttachmentsInput{} + input = &DescribeTrafficMirrorFiltersInput{} } - output = &DescribeTransitGatewayAttachmentsOutput{} + output = &DescribeTrafficMirrorFiltersOutput{} req = c.newRequest(op, input, output) return } -// DescribeTransitGatewayAttachments API operation for Amazon Elastic Compute Cloud. +// DescribeTrafficMirrorFilters API operation for Amazon Elastic Compute Cloud. // -// Describes one or more attachments between resources and transit gateways. -// By default, all attachments are described. Alternatively, you can filter -// the results by attachment ID, attachment state, resource ID, or resource -// owner. +// Describes one or more Traffic Mirror filters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elastic Compute Cloud's -// API operation DescribeTransitGatewayAttachments for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachments -func (c *EC2) DescribeTransitGatewayAttachments(input *DescribeTransitGatewayAttachmentsInput) (*DescribeTransitGatewayAttachmentsOutput, error) { - req, out := c.DescribeTransitGatewayAttachmentsRequest(input) +// API operation DescribeTrafficMirrorFilters for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorFilters +func (c *EC2) DescribeTrafficMirrorFilters(input *DescribeTrafficMirrorFiltersInput) (*DescribeTrafficMirrorFiltersOutput, error) { + req, out := c.DescribeTrafficMirrorFiltersRequest(input) return out, req.Send() } -// DescribeTransitGatewayAttachmentsWithContext is the same as DescribeTransitGatewayAttachments with the addition of +// DescribeTrafficMirrorFiltersWithContext is the same as DescribeTrafficMirrorFilters with the addition of // the ability to pass a context and additional request options. // -// See DescribeTransitGatewayAttachments for details on how to use this API operation. +// See DescribeTrafficMirrorFilters for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EC2) DescribeTransitGatewayAttachmentsWithContext(ctx aws.Context, input *DescribeTransitGatewayAttachmentsInput, opts ...request.Option) (*DescribeTransitGatewayAttachmentsOutput, error) { - req, out := c.DescribeTransitGatewayAttachmentsRequest(input) +func (c *EC2) DescribeTrafficMirrorFiltersWithContext(ctx aws.Context, input *DescribeTrafficMirrorFiltersInput, opts ...request.Option) (*DescribeTrafficMirrorFiltersOutput, error) { + req, out := c.DescribeTrafficMirrorFiltersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeTransitGatewayAttachmentsPages iterates over the pages of a DescribeTransitGatewayAttachments operation, +// DescribeTrafficMirrorFiltersPages iterates over the pages of a DescribeTrafficMirrorFilters operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeTransitGatewayAttachments method for more information on how to use this operation. +// See DescribeTrafficMirrorFilters method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeTransitGatewayAttachments operation. +// // Example iterating over at most 3 pages of a DescribeTrafficMirrorFilters operation. // pageNum := 0 -// err := client.DescribeTransitGatewayAttachmentsPages(params, -// func(page *DescribeTransitGatewayAttachmentsOutput, lastPage bool) bool { +// err := client.DescribeTrafficMirrorFiltersPages(params, +// func(page *ec2.DescribeTrafficMirrorFiltersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *EC2) DescribeTransitGatewayAttachmentsPages(input *DescribeTransitGatewayAttachmentsInput, fn func(*DescribeTransitGatewayAttachmentsOutput, bool) bool) error { - return c.DescribeTransitGatewayAttachmentsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *EC2) DescribeTrafficMirrorFiltersPages(input *DescribeTrafficMirrorFiltersInput, fn func(*DescribeTrafficMirrorFiltersOutput, bool) bool) error { + return c.DescribeTrafficMirrorFiltersPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeTransitGatewayAttachmentsPagesWithContext same as DescribeTransitGatewayAttachmentsPages except +// DescribeTrafficMirrorFiltersPagesWithContext same as DescribeTrafficMirrorFiltersPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EC2) DescribeTransitGatewayAttachmentsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayAttachmentsInput, fn func(*DescribeTransitGatewayAttachmentsOutput, bool) bool, opts ...request.Option) error { +func (c *EC2) DescribeTrafficMirrorFiltersPagesWithContext(ctx aws.Context, input *DescribeTrafficMirrorFiltersInput, fn func(*DescribeTrafficMirrorFiltersOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeTransitGatewayAttachmentsInput + var inCpy *DescribeTrafficMirrorFiltersInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeTransitGatewayAttachmentsRequest(inCpy) + req, _ := c.DescribeTrafficMirrorFiltersRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTransitGatewayAttachmentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTrafficMirrorFiltersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opDescribeTransitGatewayRouteTables = "DescribeTransitGatewayRouteTables" +const opDescribeTrafficMirrorSessions = "DescribeTrafficMirrorSessions" -// DescribeTransitGatewayRouteTablesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTransitGatewayRouteTables operation. The "output" return +// DescribeTrafficMirrorSessionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrafficMirrorSessions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeTransitGatewayRouteTables for more information on using the DescribeTransitGatewayRouteTables +// See DescribeTrafficMirrorSessions for more information on using the DescribeTrafficMirrorSessions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeTransitGatewayRouteTablesRequest method. -// req, resp := client.DescribeTransitGatewayRouteTablesRequest(params) +// // Example sending a request using the DescribeTrafficMirrorSessionsRequest method. +// req, resp := client.DescribeTrafficMirrorSessionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayRouteTables -func (c *EC2) DescribeTransitGatewayRouteTablesRequest(input *DescribeTransitGatewayRouteTablesInput) (req *request.Request, output *DescribeTransitGatewayRouteTablesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorSessions +func (c *EC2) DescribeTrafficMirrorSessionsRequest(input *DescribeTrafficMirrorSessionsInput) (req *request.Request, output *DescribeTrafficMirrorSessionsOutput) { op := &request.Operation{ - Name: opDescribeTransitGatewayRouteTables, + Name: opDescribeTrafficMirrorSessions, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -18521,126 +19582,128 @@ func (c *EC2) DescribeTransitGatewayRouteTablesRequest(input *DescribeTransitGat } if input == nil { - input = &DescribeTransitGatewayRouteTablesInput{} + input = &DescribeTrafficMirrorSessionsInput{} } - output = &DescribeTransitGatewayRouteTablesOutput{} + output = &DescribeTrafficMirrorSessionsOutput{} req = c.newRequest(op, input, output) return } -// DescribeTransitGatewayRouteTables API operation for Amazon Elastic Compute Cloud. +// DescribeTrafficMirrorSessions API operation for Amazon Elastic Compute Cloud. // -// Describes one or more transit gateway route tables. By default, all transit -// gateway route tables are described. Alternatively, you can filter the results. +// Describes one or more Traffic Mirror sessions. By default, all Traffic Mirror +// sessions are described. Alternatively, you can filter the results. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elastic Compute Cloud's -// API operation DescribeTransitGatewayRouteTables for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayRouteTables -func (c *EC2) DescribeTransitGatewayRouteTables(input *DescribeTransitGatewayRouteTablesInput) (*DescribeTransitGatewayRouteTablesOutput, error) { - req, out := c.DescribeTransitGatewayRouteTablesRequest(input) +// API operation DescribeTrafficMirrorSessions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorSessions +func (c *EC2) DescribeTrafficMirrorSessions(input *DescribeTrafficMirrorSessionsInput) (*DescribeTrafficMirrorSessionsOutput, error) { + req, out := c.DescribeTrafficMirrorSessionsRequest(input) return out, req.Send() } -// DescribeTransitGatewayRouteTablesWithContext is the same as DescribeTransitGatewayRouteTables with the addition of +// DescribeTrafficMirrorSessionsWithContext is the same as DescribeTrafficMirrorSessions with the addition of // the ability to pass a context and additional request options. // -// See DescribeTransitGatewayRouteTables for details on how to use this API operation. +// See DescribeTrafficMirrorSessions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EC2) DescribeTransitGatewayRouteTablesWithContext(ctx aws.Context, input *DescribeTransitGatewayRouteTablesInput, opts ...request.Option) (*DescribeTransitGatewayRouteTablesOutput, error) { - req, out := c.DescribeTransitGatewayRouteTablesRequest(input) +func (c *EC2) DescribeTrafficMirrorSessionsWithContext(ctx aws.Context, input *DescribeTrafficMirrorSessionsInput, opts ...request.Option) (*DescribeTrafficMirrorSessionsOutput, error) { + req, out := c.DescribeTrafficMirrorSessionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeTransitGatewayRouteTablesPages iterates over the pages of a DescribeTransitGatewayRouteTables operation, +// DescribeTrafficMirrorSessionsPages iterates over the pages of a DescribeTrafficMirrorSessions operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeTransitGatewayRouteTables method for more information on how to use this operation. +// See DescribeTrafficMirrorSessions method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeTransitGatewayRouteTables operation. +// // Example iterating over at most 3 pages of a DescribeTrafficMirrorSessions operation. // pageNum := 0 -// err := client.DescribeTransitGatewayRouteTablesPages(params, -// func(page *DescribeTransitGatewayRouteTablesOutput, lastPage bool) bool { +// err := client.DescribeTrafficMirrorSessionsPages(params, +// func(page *ec2.DescribeTrafficMirrorSessionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *EC2) DescribeTransitGatewayRouteTablesPages(input *DescribeTransitGatewayRouteTablesInput, fn func(*DescribeTransitGatewayRouteTablesOutput, bool) bool) error { - return c.DescribeTransitGatewayRouteTablesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *EC2) DescribeTrafficMirrorSessionsPages(input *DescribeTrafficMirrorSessionsInput, fn func(*DescribeTrafficMirrorSessionsOutput, bool) bool) error { + return c.DescribeTrafficMirrorSessionsPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeTransitGatewayRouteTablesPagesWithContext same as DescribeTransitGatewayRouteTablesPages except +// DescribeTrafficMirrorSessionsPagesWithContext same as DescribeTrafficMirrorSessionsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EC2) DescribeTransitGatewayRouteTablesPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayRouteTablesInput, fn func(*DescribeTransitGatewayRouteTablesOutput, bool) bool, opts ...request.Option) error { +func (c *EC2) DescribeTrafficMirrorSessionsPagesWithContext(ctx aws.Context, input *DescribeTrafficMirrorSessionsInput, fn func(*DescribeTrafficMirrorSessionsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeTransitGatewayRouteTablesInput + var inCpy *DescribeTrafficMirrorSessionsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeTransitGatewayRouteTablesRequest(inCpy) + req, _ := c.DescribeTrafficMirrorSessionsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTransitGatewayRouteTablesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTrafficMirrorSessionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opDescribeTransitGatewayVpcAttachments = "DescribeTransitGatewayVpcAttachments" +const opDescribeTrafficMirrorTargets = "DescribeTrafficMirrorTargets" -// DescribeTransitGatewayVpcAttachmentsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTransitGatewayVpcAttachments operation. The "output" return +// DescribeTrafficMirrorTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrafficMirrorTargets operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeTransitGatewayVpcAttachments for more information on using the DescribeTransitGatewayVpcAttachments +// See DescribeTrafficMirrorTargets for more information on using the DescribeTrafficMirrorTargets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeTransitGatewayVpcAttachmentsRequest method. -// req, resp := client.DescribeTransitGatewayVpcAttachmentsRequest(params) +// // Example sending a request using the DescribeTrafficMirrorTargetsRequest method. +// req, resp := client.DescribeTrafficMirrorTargetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayVpcAttachments -func (c *EC2) DescribeTransitGatewayVpcAttachmentsRequest(input *DescribeTransitGatewayVpcAttachmentsInput) (req *request.Request, output *DescribeTransitGatewayVpcAttachmentsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorTargets +func (c *EC2) DescribeTrafficMirrorTargetsRequest(input *DescribeTrafficMirrorTargetsInput) (req *request.Request, output *DescribeTrafficMirrorTargetsOutput) { op := &request.Operation{ - Name: opDescribeTransitGatewayVpcAttachments, + Name: opDescribeTrafficMirrorTargets, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -18652,126 +19715,127 @@ func (c *EC2) DescribeTransitGatewayVpcAttachmentsRequest(input *DescribeTransit } if input == nil { - input = &DescribeTransitGatewayVpcAttachmentsInput{} + input = &DescribeTrafficMirrorTargetsInput{} } - output = &DescribeTransitGatewayVpcAttachmentsOutput{} + output = &DescribeTrafficMirrorTargetsOutput{} req = c.newRequest(op, input, output) return } -// DescribeTransitGatewayVpcAttachments API operation for Amazon Elastic Compute Cloud. +// DescribeTrafficMirrorTargets API operation for Amazon Elastic Compute Cloud. // -// Describes one or more VPC attachments. By default, all VPC attachments are -// described. Alternatively, you can filter the results. +// Information about one or more Traffic Mirror targets. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elastic Compute Cloud's -// API operation DescribeTransitGatewayVpcAttachments for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayVpcAttachments -func (c *EC2) DescribeTransitGatewayVpcAttachments(input *DescribeTransitGatewayVpcAttachmentsInput) (*DescribeTransitGatewayVpcAttachmentsOutput, error) { - req, out := c.DescribeTransitGatewayVpcAttachmentsRequest(input) +// API operation DescribeTrafficMirrorTargets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorTargets +func (c *EC2) DescribeTrafficMirrorTargets(input *DescribeTrafficMirrorTargetsInput) (*DescribeTrafficMirrorTargetsOutput, error) { + req, out := c.DescribeTrafficMirrorTargetsRequest(input) return out, req.Send() } -// DescribeTransitGatewayVpcAttachmentsWithContext is the same as DescribeTransitGatewayVpcAttachments with the addition of +// DescribeTrafficMirrorTargetsWithContext is the same as DescribeTrafficMirrorTargets with the addition of // the ability to pass a context and additional request options. // -// See DescribeTransitGatewayVpcAttachments for details on how to use this API operation. +// See DescribeTrafficMirrorTargets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EC2) DescribeTransitGatewayVpcAttachmentsWithContext(ctx aws.Context, input *DescribeTransitGatewayVpcAttachmentsInput, opts ...request.Option) (*DescribeTransitGatewayVpcAttachmentsOutput, error) { - req, out := c.DescribeTransitGatewayVpcAttachmentsRequest(input) +func (c *EC2) DescribeTrafficMirrorTargetsWithContext(ctx aws.Context, input *DescribeTrafficMirrorTargetsInput, opts ...request.Option) (*DescribeTrafficMirrorTargetsOutput, error) { + req, out := c.DescribeTrafficMirrorTargetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeTransitGatewayVpcAttachmentsPages iterates over the pages of a DescribeTransitGatewayVpcAttachments operation, +// DescribeTrafficMirrorTargetsPages iterates over the pages of a DescribeTrafficMirrorTargets operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeTransitGatewayVpcAttachments method for more information on how to use this operation. +// See DescribeTrafficMirrorTargets method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeTransitGatewayVpcAttachments operation. +// // Example iterating over at most 3 pages of a DescribeTrafficMirrorTargets operation. // pageNum := 0 -// err := client.DescribeTransitGatewayVpcAttachmentsPages(params, -// func(page *DescribeTransitGatewayVpcAttachmentsOutput, lastPage bool) bool { +// err := client.DescribeTrafficMirrorTargetsPages(params, +// func(page *ec2.DescribeTrafficMirrorTargetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *EC2) DescribeTransitGatewayVpcAttachmentsPages(input *DescribeTransitGatewayVpcAttachmentsInput, fn func(*DescribeTransitGatewayVpcAttachmentsOutput, bool) bool) error { - return c.DescribeTransitGatewayVpcAttachmentsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *EC2) DescribeTrafficMirrorTargetsPages(input *DescribeTrafficMirrorTargetsInput, fn func(*DescribeTrafficMirrorTargetsOutput, bool) bool) error { + return c.DescribeTrafficMirrorTargetsPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeTransitGatewayVpcAttachmentsPagesWithContext same as DescribeTransitGatewayVpcAttachmentsPages except +// DescribeTrafficMirrorTargetsPagesWithContext same as DescribeTrafficMirrorTargetsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EC2) DescribeTransitGatewayVpcAttachmentsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayVpcAttachmentsInput, fn func(*DescribeTransitGatewayVpcAttachmentsOutput, bool) bool, opts ...request.Option) error { +func (c *EC2) DescribeTrafficMirrorTargetsPagesWithContext(ctx aws.Context, input *DescribeTrafficMirrorTargetsInput, fn func(*DescribeTrafficMirrorTargetsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeTransitGatewayVpcAttachmentsInput + var inCpy *DescribeTrafficMirrorTargetsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeTransitGatewayVpcAttachmentsRequest(inCpy) + req, _ := c.DescribeTrafficMirrorTargetsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTransitGatewayVpcAttachmentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTrafficMirrorTargetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opDescribeTransitGateways = "DescribeTransitGateways" +const opDescribeTransitGatewayAttachments = "DescribeTransitGatewayAttachments" -// DescribeTransitGatewaysRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTransitGateways operation. The "output" return +// DescribeTransitGatewayAttachmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGatewayAttachments operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeTransitGateways for more information on using the DescribeTransitGateways +// See DescribeTransitGatewayAttachments for more information on using the DescribeTransitGatewayAttachments // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeTransitGatewaysRequest method. -// req, resp := client.DescribeTransitGatewaysRequest(params) +// // Example sending a request using the DescribeTransitGatewayAttachmentsRequest method. +// req, resp := client.DescribeTransitGatewayAttachmentsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGateways -func (c *EC2) DescribeTransitGatewaysRequest(input *DescribeTransitGatewaysInput) (req *request.Request, output *DescribeTransitGatewaysOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachments +func (c *EC2) DescribeTransitGatewayAttachmentsRequest(input *DescribeTransitGatewayAttachmentsInput) (req *request.Request, output *DescribeTransitGatewayAttachmentsOutput) { op := &request.Operation{ - Name: opDescribeTransitGateways, + Name: opDescribeTransitGatewayAttachments, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -18783,193 +19847,596 @@ func (c *EC2) DescribeTransitGatewaysRequest(input *DescribeTransitGatewaysInput } if input == nil { - input = &DescribeTransitGatewaysInput{} + input = &DescribeTransitGatewayAttachmentsInput{} } - output = &DescribeTransitGatewaysOutput{} + output = &DescribeTransitGatewayAttachmentsOutput{} req = c.newRequest(op, input, output) return } -// DescribeTransitGateways API operation for Amazon Elastic Compute Cloud. +// DescribeTransitGatewayAttachments API operation for Amazon Elastic Compute Cloud. // -// Describes one or more transit gateways. By default, all transit gateways -// are described. Alternatively, you can filter the results. +// Describes one or more attachments between resources and transit gateways. +// By default, all attachments are described. Alternatively, you can filter +// the results by attachment ID, attachment state, resource ID, or resource +// owner. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elastic Compute Cloud's -// API operation DescribeTransitGateways for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGateways -func (c *EC2) DescribeTransitGateways(input *DescribeTransitGatewaysInput) (*DescribeTransitGatewaysOutput, error) { - req, out := c.DescribeTransitGatewaysRequest(input) +// API operation DescribeTransitGatewayAttachments for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachments +func (c *EC2) DescribeTransitGatewayAttachments(input *DescribeTransitGatewayAttachmentsInput) (*DescribeTransitGatewayAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayAttachmentsRequest(input) return out, req.Send() } -// DescribeTransitGatewaysWithContext is the same as DescribeTransitGateways with the addition of +// DescribeTransitGatewayAttachmentsWithContext is the same as DescribeTransitGatewayAttachments with the addition of // the ability to pass a context and additional request options. // -// See DescribeTransitGateways for details on how to use this API operation. +// See DescribeTransitGatewayAttachments for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EC2) DescribeTransitGatewaysWithContext(ctx aws.Context, input *DescribeTransitGatewaysInput, opts ...request.Option) (*DescribeTransitGatewaysOutput, error) { - req, out := c.DescribeTransitGatewaysRequest(input) +func (c *EC2) DescribeTransitGatewayAttachmentsWithContext(ctx aws.Context, input *DescribeTransitGatewayAttachmentsInput, opts ...request.Option) (*DescribeTransitGatewayAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayAttachmentsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeTransitGatewaysPages iterates over the pages of a DescribeTransitGateways operation, +// DescribeTransitGatewayAttachmentsPages iterates over the pages of a DescribeTransitGatewayAttachments operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeTransitGateways method for more information on how to use this operation. +// See DescribeTransitGatewayAttachments method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeTransitGateways operation. +// // Example iterating over at most 3 pages of a DescribeTransitGatewayAttachments operation. // pageNum := 0 -// err := client.DescribeTransitGatewaysPages(params, -// func(page *DescribeTransitGatewaysOutput, lastPage bool) bool { +// err := client.DescribeTransitGatewayAttachmentsPages(params, +// func(page *ec2.DescribeTransitGatewayAttachmentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *EC2) DescribeTransitGatewaysPages(input *DescribeTransitGatewaysInput, fn func(*DescribeTransitGatewaysOutput, bool) bool) error { - return c.DescribeTransitGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *EC2) DescribeTransitGatewayAttachmentsPages(input *DescribeTransitGatewayAttachmentsInput, fn func(*DescribeTransitGatewayAttachmentsOutput, bool) bool) error { + return c.DescribeTransitGatewayAttachmentsPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeTransitGatewaysPagesWithContext same as DescribeTransitGatewaysPages except +// DescribeTransitGatewayAttachmentsPagesWithContext same as DescribeTransitGatewayAttachmentsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EC2) DescribeTransitGatewaysPagesWithContext(ctx aws.Context, input *DescribeTransitGatewaysInput, fn func(*DescribeTransitGatewaysOutput, bool) bool, opts ...request.Option) error { +func (c *EC2) DescribeTransitGatewayAttachmentsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayAttachmentsInput, fn func(*DescribeTransitGatewayAttachmentsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeTransitGatewaysInput + var inCpy *DescribeTransitGatewayAttachmentsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeTransitGatewaysRequest(inCpy) + req, _ := c.DescribeTransitGatewayAttachmentsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTransitGatewaysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayAttachmentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opDescribeVolumeAttribute = "DescribeVolumeAttribute" +const opDescribeTransitGatewayRouteTables = "DescribeTransitGatewayRouteTables" -// DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the -// client's request for the DescribeVolumeAttribute operation. The "output" return +// DescribeTransitGatewayRouteTablesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGatewayRouteTables operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeVolumeAttribute for more information on using the DescribeVolumeAttribute +// See DescribeTransitGatewayRouteTables for more information on using the DescribeTransitGatewayRouteTables // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeVolumeAttributeRequest method. -// req, resp := client.DescribeVolumeAttributeRequest(params) +// // Example sending a request using the DescribeTransitGatewayRouteTablesRequest method. +// req, resp := client.DescribeTransitGatewayRouteTablesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeAttribute -func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput) (req *request.Request, output *DescribeVolumeAttributeOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayRouteTables +func (c *EC2) DescribeTransitGatewayRouteTablesRequest(input *DescribeTransitGatewayRouteTablesInput) (req *request.Request, output *DescribeTransitGatewayRouteTablesOutput) { op := &request.Operation{ - Name: opDescribeVolumeAttribute, + Name: opDescribeTransitGatewayRouteTables, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &DescribeVolumeAttributeInput{} + input = &DescribeTransitGatewayRouteTablesInput{} } - output = &DescribeVolumeAttributeOutput{} + output = &DescribeTransitGatewayRouteTablesOutput{} req = c.newRequest(op, input, output) return } -// DescribeVolumeAttribute API operation for Amazon Elastic Compute Cloud. -// -// Describes the specified attribute of the specified volume. You can specify -// only one attribute at a time. +// DescribeTransitGatewayRouteTables API operation for Amazon Elastic Compute Cloud. // -// For more information about EBS volumes, see Amazon EBS Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) -// in the Amazon Elastic Compute Cloud User Guide. +// Describes one or more transit gateway route tables. By default, all transit +// gateway route tables are described. Alternatively, you can filter the results. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elastic Compute Cloud's -// API operation DescribeVolumeAttribute for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeAttribute -func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) { - req, out := c.DescribeVolumeAttributeRequest(input) +// API operation DescribeTransitGatewayRouteTables for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayRouteTables +func (c *EC2) DescribeTransitGatewayRouteTables(input *DescribeTransitGatewayRouteTablesInput) (*DescribeTransitGatewayRouteTablesOutput, error) { + req, out := c.DescribeTransitGatewayRouteTablesRequest(input) return out, req.Send() } -// DescribeVolumeAttributeWithContext is the same as DescribeVolumeAttribute with the addition of +// DescribeTransitGatewayRouteTablesWithContext is the same as DescribeTransitGatewayRouteTables with the addition of // the ability to pass a context and additional request options. // -// See DescribeVolumeAttribute for details on how to use this API operation. +// See DescribeTransitGatewayRouteTables for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EC2) DescribeVolumeAttributeWithContext(ctx aws.Context, input *DescribeVolumeAttributeInput, opts ...request.Option) (*DescribeVolumeAttributeOutput, error) { - req, out := c.DescribeVolumeAttributeRequest(input) +func (c *EC2) DescribeTransitGatewayRouteTablesWithContext(ctx aws.Context, input *DescribeTransitGatewayRouteTablesInput, opts ...request.Option) (*DescribeTransitGatewayRouteTablesOutput, error) { + req, out := c.DescribeTransitGatewayRouteTablesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeVolumeStatus = "DescribeVolumeStatus" +// DescribeTransitGatewayRouteTablesPages iterates over the pages of a DescribeTransitGatewayRouteTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGatewayRouteTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGatewayRouteTables operation. +// pageNum := 0 +// err := client.DescribeTransitGatewayRouteTablesPages(params, +// func(page *ec2.DescribeTransitGatewayRouteTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewayRouteTablesPages(input *DescribeTransitGatewayRouteTablesInput, fn func(*DescribeTransitGatewayRouteTablesOutput, bool) bool) error { + return c.DescribeTransitGatewayRouteTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// DescribeVolumeStatusRequest generates a "aws/request.Request" representing the -// client's request for the DescribeVolumeStatus operation. The "output" return +// DescribeTransitGatewayRouteTablesPagesWithContext same as DescribeTransitGatewayRouteTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayRouteTablesPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayRouteTablesInput, fn func(*DescribeTransitGatewayRouteTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewayRouteTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewayRouteTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayRouteTablesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTransitGatewayVpcAttachments = "DescribeTransitGatewayVpcAttachments" + +// DescribeTransitGatewayVpcAttachmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGatewayVpcAttachments operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeVolumeStatus for more information on using the DescribeVolumeStatus +// See DescribeTransitGatewayVpcAttachments for more information on using the DescribeTransitGatewayVpcAttachments // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeVolumeStatusRequest method. +// // Example sending a request using the DescribeTransitGatewayVpcAttachmentsRequest method. +// req, resp := client.DescribeTransitGatewayVpcAttachmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayVpcAttachments +func (c *EC2) DescribeTransitGatewayVpcAttachmentsRequest(input *DescribeTransitGatewayVpcAttachmentsInput) (req *request.Request, output *DescribeTransitGatewayVpcAttachmentsOutput) { + op := &request.Operation{ + Name: opDescribeTransitGatewayVpcAttachments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTransitGatewayVpcAttachmentsInput{} + } + + output = &DescribeTransitGatewayVpcAttachmentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTransitGatewayVpcAttachments API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more VPC attachments. By default, all VPC attachments are +// described. Alternatively, you can filter the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTransitGatewayVpcAttachments for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayVpcAttachments +func (c *EC2) DescribeTransitGatewayVpcAttachments(input *DescribeTransitGatewayVpcAttachmentsInput) (*DescribeTransitGatewayVpcAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayVpcAttachmentsRequest(input) + return out, req.Send() +} + +// DescribeTransitGatewayVpcAttachmentsWithContext is the same as DescribeTransitGatewayVpcAttachments with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTransitGatewayVpcAttachments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayVpcAttachmentsWithContext(ctx aws.Context, input *DescribeTransitGatewayVpcAttachmentsInput, opts ...request.Option) (*DescribeTransitGatewayVpcAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayVpcAttachmentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTransitGatewayVpcAttachmentsPages iterates over the pages of a DescribeTransitGatewayVpcAttachments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGatewayVpcAttachments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGatewayVpcAttachments operation. +// pageNum := 0 +// err := client.DescribeTransitGatewayVpcAttachmentsPages(params, +// func(page *ec2.DescribeTransitGatewayVpcAttachmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewayVpcAttachmentsPages(input *DescribeTransitGatewayVpcAttachmentsInput, fn func(*DescribeTransitGatewayVpcAttachmentsOutput, bool) bool) error { + return c.DescribeTransitGatewayVpcAttachmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewayVpcAttachmentsPagesWithContext same as DescribeTransitGatewayVpcAttachmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayVpcAttachmentsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayVpcAttachmentsInput, fn func(*DescribeTransitGatewayVpcAttachmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewayVpcAttachmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewayVpcAttachmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayVpcAttachmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTransitGateways = "DescribeTransitGateways" + +// DescribeTransitGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTransitGateways for more information on using the DescribeTransitGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTransitGatewaysRequest method. +// req, resp := client.DescribeTransitGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGateways +func (c *EC2) DescribeTransitGatewaysRequest(input *DescribeTransitGatewaysInput) (req *request.Request, output *DescribeTransitGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeTransitGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTransitGatewaysInput{} + } + + output = &DescribeTransitGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTransitGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more transit gateways. By default, all transit gateways +// are described. Alternatively, you can filter the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTransitGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGateways +func (c *EC2) DescribeTransitGateways(input *DescribeTransitGatewaysInput) (*DescribeTransitGatewaysOutput, error) { + req, out := c.DescribeTransitGatewaysRequest(input) + return out, req.Send() +} + +// DescribeTransitGatewaysWithContext is the same as DescribeTransitGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTransitGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewaysWithContext(ctx aws.Context, input *DescribeTransitGatewaysInput, opts ...request.Option) (*DescribeTransitGatewaysOutput, error) { + req, out := c.DescribeTransitGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTransitGatewaysPages iterates over the pages of a DescribeTransitGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGateways operation. +// pageNum := 0 +// err := client.DescribeTransitGatewaysPages(params, +// func(page *ec2.DescribeTransitGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewaysPages(input *DescribeTransitGatewaysInput, fn func(*DescribeTransitGatewaysOutput, bool) bool) error { + return c.DescribeTransitGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewaysPagesWithContext same as DescribeTransitGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewaysPagesWithContext(ctx aws.Context, input *DescribeTransitGatewaysInput, fn func(*DescribeTransitGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVolumeAttribute = "DescribeVolumeAttribute" + +// DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumeAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVolumeAttribute for more information on using the DescribeVolumeAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVolumeAttributeRequest method. +// req, resp := client.DescribeVolumeAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeAttribute +func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput) (req *request.Request, output *DescribeVolumeAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumeAttributeInput{} + } + + output = &DescribeVolumeAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVolumeAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified volume. You can specify +// only one attribute at a time. +// +// For more information about EBS volumes, see Amazon EBS Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVolumeAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeAttribute +func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) { + req, out := c.DescribeVolumeAttributeRequest(input) + return out, req.Send() +} + +// DescribeVolumeAttributeWithContext is the same as DescribeVolumeAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVolumeAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumeAttributeWithContext(ctx aws.Context, input *DescribeVolumeAttributeInput, opts ...request.Option) (*DescribeVolumeAttributeOutput, error) { + req, out := c.DescribeVolumeAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVolumeStatus = "DescribeVolumeStatus" + +// DescribeVolumeStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumeStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVolumeStatus for more information on using the DescribeVolumeStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVolumeStatusRequest method. // req, resp := client.DescribeVolumeStatusRequest(params) // // err := req.Send() @@ -19078,7 +20545,7 @@ func (c *EC2) DescribeVolumeStatusWithContext(ctx aws.Context, input *DescribeVo // // Example iterating over at most 3 pages of a DescribeVolumeStatus operation. // pageNum := 0 // err := client.DescribeVolumeStatusPages(params, -// func(page *DescribeVolumeStatusOutput, lastPage bool) bool { +// func(page *ec2.DescribeVolumeStatusOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -19110,10 +20577,12 @@ func (c *EC2) DescribeVolumeStatusPagesWithContext(ctx aws.Context, input *Descr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVolumeStatusOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVolumeStatusOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -19218,7 +20687,7 @@ func (c *EC2) DescribeVolumesWithContext(ctx aws.Context, input *DescribeVolumes // // Example iterating over at most 3 pages of a DescribeVolumes operation. // pageNum := 0 // err := client.DescribeVolumesPages(params, -// func(page *DescribeVolumesOutput, lastPage bool) bool { +// func(page *ec2.DescribeVolumesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -19250,10 +20719,12 @@ func (c *EC2) DescribeVolumesPagesWithContext(ctx aws.Context, input *DescribeVo }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVolumesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVolumesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -19361,7 +20832,7 @@ func (c *EC2) DescribeVolumesModificationsWithContext(ctx aws.Context, input *De // // Example iterating over at most 3 pages of a DescribeVolumesModifications operation. // pageNum := 0 // err := client.DescribeVolumesModificationsPages(params, -// func(page *DescribeVolumesModificationsOutput, lastPage bool) bool { +// func(page *ec2.DescribeVolumesModificationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -19393,10 +20864,12 @@ func (c *EC2) DescribeVolumesModificationsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVolumesModificationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVolumesModificationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -19646,7 +21119,7 @@ func (c *EC2) DescribeVpcClassicLinkDnsSupportWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeVpcClassicLinkDnsSupport operation. // pageNum := 0 // err := client.DescribeVpcClassicLinkDnsSupportPages(params, -// func(page *DescribeVpcClassicLinkDnsSupportOutput, lastPage bool) bool { +// func(page *ec2.DescribeVpcClassicLinkDnsSupportOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -19678,10 +21151,12 @@ func (c *EC2) DescribeVpcClassicLinkDnsSupportPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVpcClassicLinkDnsSupportOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVpcClassicLinkDnsSupportOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -19777,7 +21252,7 @@ func (c *EC2) DescribeVpcEndpointConnectionNotificationsWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a DescribeVpcEndpointConnectionNotifications operation. // pageNum := 0 // err := client.DescribeVpcEndpointConnectionNotificationsPages(params, -// func(page *DescribeVpcEndpointConnectionNotificationsOutput, lastPage bool) bool { +// func(page *ec2.DescribeVpcEndpointConnectionNotificationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -19809,10 +21284,12 @@ func (c *EC2) DescribeVpcEndpointConnectionNotificationsPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVpcEndpointConnectionNotificationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointConnectionNotificationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -19908,7 +21385,7 @@ func (c *EC2) DescribeVpcEndpointConnectionsWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a DescribeVpcEndpointConnections operation. // pageNum := 0 // err := client.DescribeVpcEndpointConnectionsPages(params, -// func(page *DescribeVpcEndpointConnectionsOutput, lastPage bool) bool { +// func(page *ec2.DescribeVpcEndpointConnectionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -19940,10 +21417,12 @@ func (c *EC2) DescribeVpcEndpointConnectionsPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVpcEndpointConnectionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointConnectionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -20038,7 +21517,7 @@ func (c *EC2) DescribeVpcEndpointServiceConfigurationsWithContext(ctx aws.Contex // // Example iterating over at most 3 pages of a DescribeVpcEndpointServiceConfigurations operation. // pageNum := 0 // err := client.DescribeVpcEndpointServiceConfigurationsPages(params, -// func(page *DescribeVpcEndpointServiceConfigurationsOutput, lastPage bool) bool { +// func(page *ec2.DescribeVpcEndpointServiceConfigurationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -20070,10 +21549,12 @@ func (c *EC2) DescribeVpcEndpointServiceConfigurationsPagesWithContext(ctx aws.C }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVpcEndpointServiceConfigurationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointServiceConfigurationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -20169,7 +21650,7 @@ func (c *EC2) DescribeVpcEndpointServicePermissionsWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeVpcEndpointServicePermissions operation. // pageNum := 0 // err := client.DescribeVpcEndpointServicePermissionsPages(params, -// func(page *DescribeVpcEndpointServicePermissionsOutput, lastPage bool) bool { +// func(page *ec2.DescribeVpcEndpointServicePermissionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -20201,10 +21682,12 @@ func (c *EC2) DescribeVpcEndpointServicePermissionsPagesWithContext(ctx aws.Cont }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVpcEndpointServicePermissionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointServicePermissionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -20373,7 +21856,7 @@ func (c *EC2) DescribeVpcEndpointsWithContext(ctx aws.Context, input *DescribeVp // // Example iterating over at most 3 pages of a DescribeVpcEndpoints operation. // pageNum := 0 // err := client.DescribeVpcEndpointsPages(params, -// func(page *DescribeVpcEndpointsOutput, lastPage bool) bool { +// func(page *ec2.DescribeVpcEndpointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -20405,10 +21888,12 @@ func (c *EC2) DescribeVpcEndpointsPagesWithContext(ctx aws.Context, input *Descr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVpcEndpointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -20503,7 +21988,7 @@ func (c *EC2) DescribeVpcPeeringConnectionsWithContext(ctx aws.Context, input *D // // Example iterating over at most 3 pages of a DescribeVpcPeeringConnections operation. // pageNum := 0 // err := client.DescribeVpcPeeringConnectionsPages(params, -// func(page *DescribeVpcPeeringConnectionsOutput, lastPage bool) bool { +// func(page *ec2.DescribeVpcPeeringConnectionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -20535,10 +22020,12 @@ func (c *EC2) DescribeVpcPeeringConnectionsPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVpcPeeringConnectionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVpcPeeringConnectionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -20633,7 +22120,7 @@ func (c *EC2) DescribeVpcsWithContext(ctx aws.Context, input *DescribeVpcsInput, // // Example iterating over at most 3 pages of a DescribeVpcs operation. // pageNum := 0 // err := client.DescribeVpcsPages(params, -// func(page *DescribeVpcsOutput, lastPage bool) bool { +// func(page *ec2.DescribeVpcsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -20665,10 +22152,12 @@ func (c *EC2) DescribeVpcsPagesWithContext(ctx aws.Context, input *DescribeVpcsI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVpcsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVpcsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -21223,6 +22712,89 @@ func (c *EC2) DetachVpnGatewayWithContext(ctx aws.Context, input *DetachVpnGatew return out, req.Send() } +const opDisableEbsEncryptionByDefault = "DisableEbsEncryptionByDefault" + +// DisableEbsEncryptionByDefaultRequest generates a "aws/request.Request" representing the +// client's request for the DisableEbsEncryptionByDefault operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableEbsEncryptionByDefault for more information on using the DisableEbsEncryptionByDefault +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableEbsEncryptionByDefaultRequest method. +// req, resp := client.DisableEbsEncryptionByDefaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableEbsEncryptionByDefault +func (c *EC2) DisableEbsEncryptionByDefaultRequest(input *DisableEbsEncryptionByDefaultInput) (req *request.Request, output *DisableEbsEncryptionByDefaultOutput) { + op := &request.Operation{ + Name: opDisableEbsEncryptionByDefault, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableEbsEncryptionByDefaultInput{} + } + + output = &DisableEbsEncryptionByDefaultOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableEbsEncryptionByDefault API operation for Amazon Elastic Compute Cloud. +// +// Disables EBS encryption by default for your account in the current Region. +// +// After you disable encryption by default, you can still create encrypted volumes +// by enabling encryption when you create each volume. +// +// Disabling encryption by default does not change the encryption status of +// your existing volumes. +// +// For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableEbsEncryptionByDefault for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableEbsEncryptionByDefault +func (c *EC2) DisableEbsEncryptionByDefault(input *DisableEbsEncryptionByDefaultInput) (*DisableEbsEncryptionByDefaultOutput, error) { + req, out := c.DisableEbsEncryptionByDefaultRequest(input) + return out, req.Send() +} + +// DisableEbsEncryptionByDefaultWithContext is the same as DisableEbsEncryptionByDefault with the addition of +// the ability to pass a context and additional request options. +// +// See DisableEbsEncryptionByDefault for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableEbsEncryptionByDefaultWithContext(ctx aws.Context, input *DisableEbsEncryptionByDefaultInput, opts ...request.Option) (*DisableEbsEncryptionByDefaultOutput, error) { + req, out := c.DisableEbsEncryptionByDefaultRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisableTransitGatewayRouteTablePropagation = "DisableTransitGatewayRouteTablePropagation" // DisableTransitGatewayRouteTablePropagationRequest generates a "aws/request.Request" representing the @@ -21819,7 +23391,7 @@ func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) // // After you perform this action, the subnet no longer uses the routes in the // route table. Instead, it uses the routes in the VPC's main route table. For -// more information about route tables, see Route Tables (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// more information about route tables, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -22080,6 +23652,96 @@ func (c *EC2) DisassociateVpcCidrBlockWithContext(ctx aws.Context, input *Disass return out, req.Send() } +const opEnableEbsEncryptionByDefault = "EnableEbsEncryptionByDefault" + +// EnableEbsEncryptionByDefaultRequest generates a "aws/request.Request" representing the +// client's request for the EnableEbsEncryptionByDefault operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableEbsEncryptionByDefault for more information on using the EnableEbsEncryptionByDefault +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableEbsEncryptionByDefaultRequest method. +// req, resp := client.EnableEbsEncryptionByDefaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableEbsEncryptionByDefault +func (c *EC2) EnableEbsEncryptionByDefaultRequest(input *EnableEbsEncryptionByDefaultInput) (req *request.Request, output *EnableEbsEncryptionByDefaultOutput) { + op := &request.Operation{ + Name: opEnableEbsEncryptionByDefault, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableEbsEncryptionByDefaultInput{} + } + + output = &EnableEbsEncryptionByDefaultOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableEbsEncryptionByDefault API operation for Amazon Elastic Compute Cloud. +// +// Enables EBS encryption by default for your account in the current Region. +// +// After you enable encryption by default, the EBS volumes that you create are +// are always encrypted, either using the default CMK or the CMK that you specified +// when you created each volume. For more information, see Amazon EBS Encryption +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId +// or ResetEbsDefaultKmsKeyId. +// +// Enabling encryption by default has no effect on the encryption status of +// your existing volumes. +// +// After you enable encryption by default, you can no longer launch instances +// using instance types that do not support encryption. For more information, +// see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableEbsEncryptionByDefault for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableEbsEncryptionByDefault +func (c *EC2) EnableEbsEncryptionByDefault(input *EnableEbsEncryptionByDefaultInput) (*EnableEbsEncryptionByDefaultOutput, error) { + req, out := c.EnableEbsEncryptionByDefaultRequest(input) + return out, req.Send() +} + +// EnableEbsEncryptionByDefaultWithContext is the same as EnableEbsEncryptionByDefault with the addition of +// the ability to pass a context and additional request options. +// +// See EnableEbsEncryptionByDefault for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableEbsEncryptionByDefaultWithContext(ctx aws.Context, input *EnableEbsEncryptionByDefaultInput, opts ...request.Option) (*EnableEbsEncryptionByDefaultOutput, error) { + req, out := c.EnableEbsEncryptionByDefaultRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opEnableTransitGatewayRouteTablePropagation = "EnableTransitGatewayRouteTablePropagation" // EnableTransitGatewayRouteTablePropagationRequest generates a "aws/request.Request" representing the @@ -22619,6 +24281,82 @@ func (c *EC2) ExportClientVpnClientConfigurationWithContext(ctx aws.Context, inp return out, req.Send() } +const opExportImage = "ExportImage" + +// ExportImageRequest generates a "aws/request.Request" representing the +// client's request for the ExportImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportImage for more information on using the ExportImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportImageRequest method. +// req, resp := client.ExportImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImageRequest(input *ExportImageInput) (req *request.Request, output *ExportImageOutput) { + op := &request.Operation{ + Name: opExportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportImageInput{} + } + + output = &ExportImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportImage API operation for Amazon Elastic Compute Cloud. +// +// Exports an Amazon Machine Image (AMI) to a VM file. For more information, +// see Exporting a VM Directory from an Amazon Machine Image (AMI) (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html) +// in the VM Import/Export User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ExportImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImage(input *ExportImageInput) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + return out, req.Send() +} + +// ExportImageWithContext is the same as ExportImage with the addition of +// the ability to pass a context and additional request options. +// +// See ExportImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ExportImageWithContext(ctx aws.Context, input *ExportImageInput, opts ...request.Option) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opExportTransitGatewayRoutes = "ExportTransitGatewayRoutes" // ExportTransitGatewayRoutesRequest generates a "aws/request.Request" representing the @@ -22695,6 +24433,84 @@ func (c *EC2) ExportTransitGatewayRoutesWithContext(ctx aws.Context, input *Expo return out, req.Send() } +const opGetCapacityReservationUsage = "GetCapacityReservationUsage" + +// GetCapacityReservationUsageRequest generates a "aws/request.Request" representing the +// client's request for the GetCapacityReservationUsage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCapacityReservationUsage for more information on using the GetCapacityReservationUsage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCapacityReservationUsageRequest method. +// req, resp := client.GetCapacityReservationUsageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetCapacityReservationUsage +func (c *EC2) GetCapacityReservationUsageRequest(input *GetCapacityReservationUsageInput) (req *request.Request, output *GetCapacityReservationUsageOutput) { + op := &request.Operation{ + Name: opGetCapacityReservationUsage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCapacityReservationUsageInput{} + } + + output = &GetCapacityReservationUsageOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCapacityReservationUsage API operation for Amazon Elastic Compute Cloud. +// +// Gets usage information about a Capacity Reservation. If the Capacity Reservation +// is shared, it shows usage information for the Capacity Reservation owner +// and each AWS account that is currently using the shared capacity. If the +// Capacity Reservation is not shared, it shows only the Capacity Reservation +// owner's usage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetCapacityReservationUsage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetCapacityReservationUsage +func (c *EC2) GetCapacityReservationUsage(input *GetCapacityReservationUsageInput) (*GetCapacityReservationUsageOutput, error) { + req, out := c.GetCapacityReservationUsageRequest(input) + return out, req.Send() +} + +// GetCapacityReservationUsageWithContext is the same as GetCapacityReservationUsage with the addition of +// the ability to pass a context and additional request options. +// +// See GetCapacityReservationUsage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetCapacityReservationUsageWithContext(ctx aws.Context, input *GetCapacityReservationUsageInput, opts ...request.Option) (*GetCapacityReservationUsageOutput, error) { + req, out := c.GetCapacityReservationUsageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetConsoleOutput = "GetConsoleOutput" // GetConsoleOutputRequest generates a "aws/request.Request" representing the @@ -22861,6 +24677,163 @@ func (c *EC2) GetConsoleScreenshotWithContext(ctx aws.Context, input *GetConsole return out, req.Send() } +const opGetEbsDefaultKmsKeyId = "GetEbsDefaultKmsKeyId" + +// GetEbsDefaultKmsKeyIdRequest generates a "aws/request.Request" representing the +// client's request for the GetEbsDefaultKmsKeyId operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetEbsDefaultKmsKeyId for more information on using the GetEbsDefaultKmsKeyId +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetEbsDefaultKmsKeyIdRequest method. +// req, resp := client.GetEbsDefaultKmsKeyIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetEbsDefaultKmsKeyId +func (c *EC2) GetEbsDefaultKmsKeyIdRequest(input *GetEbsDefaultKmsKeyIdInput) (req *request.Request, output *GetEbsDefaultKmsKeyIdOutput) { + op := &request.Operation{ + Name: opGetEbsDefaultKmsKeyId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEbsDefaultKmsKeyIdInput{} + } + + output = &GetEbsDefaultKmsKeyIdOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud. +// +// Describes the default customer master key (CMK) for EBS encryption by default +// for your account in this Region. You can change the default CMK for encryption +// by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId. +// +// For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetEbsDefaultKmsKeyId for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetEbsDefaultKmsKeyId +func (c *EC2) GetEbsDefaultKmsKeyId(input *GetEbsDefaultKmsKeyIdInput) (*GetEbsDefaultKmsKeyIdOutput, error) { + req, out := c.GetEbsDefaultKmsKeyIdRequest(input) + return out, req.Send() +} + +// GetEbsDefaultKmsKeyIdWithContext is the same as GetEbsDefaultKmsKeyId with the addition of +// the ability to pass a context and additional request options. +// +// See GetEbsDefaultKmsKeyId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetEbsDefaultKmsKeyIdWithContext(ctx aws.Context, input *GetEbsDefaultKmsKeyIdInput, opts ...request.Option) (*GetEbsDefaultKmsKeyIdOutput, error) { + req, out := c.GetEbsDefaultKmsKeyIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetEbsEncryptionByDefault = "GetEbsEncryptionByDefault" + +// GetEbsEncryptionByDefaultRequest generates a "aws/request.Request" representing the +// client's request for the GetEbsEncryptionByDefault operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetEbsEncryptionByDefault for more information on using the GetEbsEncryptionByDefault +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetEbsEncryptionByDefaultRequest method. +// req, resp := client.GetEbsEncryptionByDefaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetEbsEncryptionByDefault +func (c *EC2) GetEbsEncryptionByDefaultRequest(input *GetEbsEncryptionByDefaultInput) (req *request.Request, output *GetEbsEncryptionByDefaultOutput) { + op := &request.Operation{ + Name: opGetEbsEncryptionByDefault, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEbsEncryptionByDefaultInput{} + } + + output = &GetEbsEncryptionByDefaultOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetEbsEncryptionByDefault API operation for Amazon Elastic Compute Cloud. +// +// Describes whether EBS encryption by default is enabled for your account in +// the current Region. +// +// For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetEbsEncryptionByDefault for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetEbsEncryptionByDefault +func (c *EC2) GetEbsEncryptionByDefault(input *GetEbsEncryptionByDefaultInput) (*GetEbsEncryptionByDefaultOutput, error) { + req, out := c.GetEbsEncryptionByDefaultRequest(input) + return out, req.Send() +} + +// GetEbsEncryptionByDefaultWithContext is the same as GetEbsEncryptionByDefault with the addition of +// the ability to pass a context and additional request options. +// +// See GetEbsEncryptionByDefault for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetEbsEncryptionByDefaultWithContext(ctx aws.Context, input *GetEbsEncryptionByDefaultInput, opts ...request.Option) (*GetEbsEncryptionByDefaultOutput, error) { + req, out := c.GetEbsEncryptionByDefaultRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetHostReservationPurchasePreview = "GetHostReservationPurchasePreview" // GetHostReservationPurchasePreviewRequest generates a "aws/request.Request" representing the @@ -23275,7 +25248,7 @@ func (c *EC2) GetTransitGatewayAttachmentPropagationsWithContext(ctx aws.Context // // Example iterating over at most 3 pages of a GetTransitGatewayAttachmentPropagations operation. // pageNum := 0 // err := client.GetTransitGatewayAttachmentPropagationsPages(params, -// func(page *GetTransitGatewayAttachmentPropagationsOutput, lastPage bool) bool { +// func(page *ec2.GetTransitGatewayAttachmentPropagationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -23307,10 +25280,12 @@ func (c *EC2) GetTransitGatewayAttachmentPropagationsPagesWithContext(ctx aws.Co }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTransitGatewayAttachmentPropagationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayAttachmentPropagationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -23406,7 +25381,7 @@ func (c *EC2) GetTransitGatewayRouteTableAssociationsWithContext(ctx aws.Context // // Example iterating over at most 3 pages of a GetTransitGatewayRouteTableAssociations operation. // pageNum := 0 // err := client.GetTransitGatewayRouteTableAssociationsPages(params, -// func(page *GetTransitGatewayRouteTableAssociationsOutput, lastPage bool) bool { +// func(page *ec2.GetTransitGatewayRouteTableAssociationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -23438,10 +25413,12 @@ func (c *EC2) GetTransitGatewayRouteTableAssociationsPagesWithContext(ctx aws.Co }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTransitGatewayRouteTableAssociationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayRouteTableAssociationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -23537,7 +25514,7 @@ func (c *EC2) GetTransitGatewayRouteTablePropagationsWithContext(ctx aws.Context // // Example iterating over at most 3 pages of a GetTransitGatewayRouteTablePropagations operation. // pageNum := 0 // err := client.GetTransitGatewayRouteTablePropagationsPages(params, -// func(page *GetTransitGatewayRouteTablePropagationsOutput, lastPage bool) bool { +// func(page *ec2.GetTransitGatewayRouteTablePropagationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -23569,10 +25546,12 @@ func (c *EC2) GetTransitGatewayRouteTablePropagationsPagesWithContext(ctx aws.Co }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTransitGatewayRouteTablePropagationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayRouteTablePropagationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -24200,6 +26179,92 @@ func (c *EC2) ModifyClientVpnEndpointWithContext(ctx aws.Context, input *ModifyC return out, req.Send() } +const opModifyEbsDefaultKmsKeyId = "ModifyEbsDefaultKmsKeyId" + +// ModifyEbsDefaultKmsKeyIdRequest generates a "aws/request.Request" representing the +// client's request for the ModifyEbsDefaultKmsKeyId operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyEbsDefaultKmsKeyId for more information on using the ModifyEbsDefaultKmsKeyId +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyEbsDefaultKmsKeyIdRequest method. +// req, resp := client.ModifyEbsDefaultKmsKeyIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyEbsDefaultKmsKeyId +func (c *EC2) ModifyEbsDefaultKmsKeyIdRequest(input *ModifyEbsDefaultKmsKeyIdInput) (req *request.Request, output *ModifyEbsDefaultKmsKeyIdOutput) { + op := &request.Operation{ + Name: opModifyEbsDefaultKmsKeyId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyEbsDefaultKmsKeyIdInput{} + } + + output = &ModifyEbsDefaultKmsKeyIdOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud. +// +// Changes the default customer master key (CMK) for EBS encryption by default +// for your account in this Region. +// +// AWS creates a unique AWS managed CMK in each Region for use with encryption +// by default. If you change the default CMK to a customer managed CMK, it is +// used instead of the AWS managed CMK. To reset the default CMK to the AWS +// managed CMK for EBS, use ResetEbsDefaultKmsKeyId. +// +// If you delete or disable the customer managed CMK that you specified for +// use with encryption by default, your instances will fail to launch. +// +// For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyEbsDefaultKmsKeyId for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyEbsDefaultKmsKeyId +func (c *EC2) ModifyEbsDefaultKmsKeyId(input *ModifyEbsDefaultKmsKeyIdInput) (*ModifyEbsDefaultKmsKeyIdOutput, error) { + req, out := c.ModifyEbsDefaultKmsKeyIdRequest(input) + return out, req.Send() +} + +// ModifyEbsDefaultKmsKeyIdWithContext is the same as ModifyEbsDefaultKmsKeyId with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyEbsDefaultKmsKeyId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyEbsDefaultKmsKeyIdWithContext(ctx aws.Context, input *ModifyEbsDefaultKmsKeyIdInput, opts ...request.Option) (*ModifyEbsDefaultKmsKeyIdOutput, error) { + req, out := c.ModifyEbsDefaultKmsKeyIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyFleet = "ModifyFleet" // ModifyFleetRequest generates a "aws/request.Request" representing the @@ -24246,8 +26311,35 @@ func (c *EC2) ModifyFleetRequest(input *ModifyFleetInput) (req *request.Request, // // Modifies the specified EC2 Fleet. // +// You can only modify an EC2 Fleet request of type maintain. +// // While the EC2 Fleet is being modified, it is in the modifying state. // +// To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches +// the additional Spot Instances according to the allocation strategy for the +// EC2 Fleet request. If the allocation strategy is lowest-price, the EC2 Fleet +// launches instances using the Spot Instance pool with the lowest price. If +// the allocation strategy is diversified, the EC2 Fleet distributes the instances +// across the Spot Instance pools. If the allocation strategy is capacity-optimized, +// EC2 Fleet launches instances from Spot Instance pools with optimal capacity +// for the number of instances that are launching. +// +// To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 +// Fleet cancels any open requests that exceed the new target capacity. You +// can request that the EC2 Fleet terminate Spot Instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowest-price, the EC2 Fleet terminates the instances with the highest +// price per unit. If the allocation strategy is capacity-optimized, the EC2 +// Fleet terminates the instances in the Spot Instance pools that have the least +// available Spot Instance capacity. If the allocation strategy is diversified, +// the EC2 Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the EC2 Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. +// +// If you are finished with your EC2 Fleet for now, but will use it again later, +// you can set the target capacity to 0. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -24475,7 +26567,7 @@ func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Re // ModifyIdFormat API operation for Amazon Elastic Compute Cloud. // -// Modifies the ID format for the specified resource on a per-region basis. +// Modifies the ID format for the specified resource on a per-Region basis. // You can specify that resources should receive longer IDs (17-character IDs) // when they are created. // @@ -25398,9 +27490,9 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput // // Adds or removes permission settings for the specified snapshot. You may add // or remove specified AWS account IDs from a snapshot's list of create volume -// permissions, but you cannot do both in a single API call. If you need to -// both add and remove account IDs for a snapshot, you must use multiple API -// calls. +// permissions, but you cannot do both in a single operation. If you need to +// both add and remove account IDs for a snapshot, you must use multiple operations. +// You can make up to 500 modifications to a snapshot in a single operation. // // Encrypted snapshots and snapshots with AWS Marketplace product codes cannot // be made public. Snapshots encrypted with your default CMK cannot be shared @@ -25484,24 +27576,31 @@ func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) // // Modifies the specified Spot Fleet request. // +// You can only modify a Spot Fleet request of type maintain. +// // While the Spot Fleet request is being modified, it is in the modifying state. // // To scale up your Spot Fleet, increase its target capacity. The Spot Fleet // launches the additional Spot Instances according to the allocation strategy // for the Spot Fleet request. If the allocation strategy is lowestPrice, the -// Spot Fleet launches instances using the Spot pool with the lowest price. -// If the allocation strategy is diversified, the Spot Fleet distributes the -// instances across the Spot pools. +// Spot Fleet launches instances using the Spot Instance pool with the lowest +// price. If the allocation strategy is diversified, the Spot Fleet distributes +// the instances across the Spot Instance pools. If the allocation strategy +// is capacityOptimized, Spot Fleet launches instances from Spot Instance pools +// with optimal capacity for the number of instances that are launching. // // To scale down your Spot Fleet, decrease its target capacity. First, the Spot // Fleet cancels any open requests that exceed the new target capacity. You // can request that the Spot Fleet terminate Spot Instances until the size of // the fleet no longer exceeds the new target capacity. If the allocation strategy // is lowestPrice, the Spot Fleet terminates the instances with the highest -// price per unit. If the allocation strategy is diversified, the Spot Fleet -// terminates instances across the Spot pools. Alternatively, you can request -// that the Spot Fleet keep the fleet at its current size, but not replace any -// Spot Instances that are interrupted or that you terminate manually. +// price per unit. If the allocation strategy is capacityOptimized, the Spot +// Fleet terminates the instances in the Spot Instance pools that have the least +// available Spot Instance capacity. If the allocation strategy is diversified, +// the Spot Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the Spot Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. // // If you are finished with your Spot Fleet for now, but will use it again later, // you can set the target capacity to 0. @@ -25609,6 +27708,241 @@ func (c *EC2) ModifySubnetAttributeWithContext(ctx aws.Context, input *ModifySub return out, req.Send() } +const opModifyTrafficMirrorFilterNetworkServices = "ModifyTrafficMirrorFilterNetworkServices" + +// ModifyTrafficMirrorFilterNetworkServicesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTrafficMirrorFilterNetworkServices operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTrafficMirrorFilterNetworkServices for more information on using the ModifyTrafficMirrorFilterNetworkServices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTrafficMirrorFilterNetworkServicesRequest method. +// req, resp := client.ModifyTrafficMirrorFilterNetworkServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorFilterNetworkServices +func (c *EC2) ModifyTrafficMirrorFilterNetworkServicesRequest(input *ModifyTrafficMirrorFilterNetworkServicesInput) (req *request.Request, output *ModifyTrafficMirrorFilterNetworkServicesOutput) { + op := &request.Operation{ + Name: opModifyTrafficMirrorFilterNetworkServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTrafficMirrorFilterNetworkServicesInput{} + } + + output = &ModifyTrafficMirrorFilterNetworkServicesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTrafficMirrorFilterNetworkServices API operation for Amazon Elastic Compute Cloud. +// +// Allows or restricts mirroring network services. +// +// By default, Amazon DNS network services are not eligible for Traffic Mirror. +// Use AddNetworkServices to add network services to a Traffic Mirror filter. +// When a network service is added to the Traffic Mirror filter, all traffic +// related to that network service will be mirrored. When you no longer want +// to mirror network services, use RemoveNetworkServices to remove the network +// services from the Traffic Mirror filter. +// +// For information about filter rule properties, see Network Services (https://docs.aws.amazon.com/vpc/latest/mirroring/traffic-mirroring-considerations.html) +// in the Traffic Mirroring User Guide . +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTrafficMirrorFilterNetworkServices for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorFilterNetworkServices +func (c *EC2) ModifyTrafficMirrorFilterNetworkServices(input *ModifyTrafficMirrorFilterNetworkServicesInput) (*ModifyTrafficMirrorFilterNetworkServicesOutput, error) { + req, out := c.ModifyTrafficMirrorFilterNetworkServicesRequest(input) + return out, req.Send() +} + +// ModifyTrafficMirrorFilterNetworkServicesWithContext is the same as ModifyTrafficMirrorFilterNetworkServices with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTrafficMirrorFilterNetworkServices for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTrafficMirrorFilterNetworkServicesWithContext(ctx aws.Context, input *ModifyTrafficMirrorFilterNetworkServicesInput, opts ...request.Option) (*ModifyTrafficMirrorFilterNetworkServicesOutput, error) { + req, out := c.ModifyTrafficMirrorFilterNetworkServicesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTrafficMirrorFilterRule = "ModifyTrafficMirrorFilterRule" + +// ModifyTrafficMirrorFilterRuleRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTrafficMirrorFilterRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTrafficMirrorFilterRule for more information on using the ModifyTrafficMirrorFilterRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTrafficMirrorFilterRuleRequest method. +// req, resp := client.ModifyTrafficMirrorFilterRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorFilterRule +func (c *EC2) ModifyTrafficMirrorFilterRuleRequest(input *ModifyTrafficMirrorFilterRuleInput) (req *request.Request, output *ModifyTrafficMirrorFilterRuleOutput) { + op := &request.Operation{ + Name: opModifyTrafficMirrorFilterRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTrafficMirrorFilterRuleInput{} + } + + output = &ModifyTrafficMirrorFilterRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTrafficMirrorFilterRule API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified Traffic Mirror rule. +// +// DestinationCidrBlock and SourceCidrBlock must both be an IPv4 range or an +// IPv6 range. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTrafficMirrorFilterRule for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorFilterRule +func (c *EC2) ModifyTrafficMirrorFilterRule(input *ModifyTrafficMirrorFilterRuleInput) (*ModifyTrafficMirrorFilterRuleOutput, error) { + req, out := c.ModifyTrafficMirrorFilterRuleRequest(input) + return out, req.Send() +} + +// ModifyTrafficMirrorFilterRuleWithContext is the same as ModifyTrafficMirrorFilterRule with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTrafficMirrorFilterRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTrafficMirrorFilterRuleWithContext(ctx aws.Context, input *ModifyTrafficMirrorFilterRuleInput, opts ...request.Option) (*ModifyTrafficMirrorFilterRuleOutput, error) { + req, out := c.ModifyTrafficMirrorFilterRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTrafficMirrorSession = "ModifyTrafficMirrorSession" + +// ModifyTrafficMirrorSessionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTrafficMirrorSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTrafficMirrorSession for more information on using the ModifyTrafficMirrorSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTrafficMirrorSessionRequest method. +// req, resp := client.ModifyTrafficMirrorSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorSession +func (c *EC2) ModifyTrafficMirrorSessionRequest(input *ModifyTrafficMirrorSessionInput) (req *request.Request, output *ModifyTrafficMirrorSessionOutput) { + op := &request.Operation{ + Name: opModifyTrafficMirrorSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTrafficMirrorSessionInput{} + } + + output = &ModifyTrafficMirrorSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTrafficMirrorSession API operation for Amazon Elastic Compute Cloud. +// +// Modifies a Traffic Mirror session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTrafficMirrorSession for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorSession +func (c *EC2) ModifyTrafficMirrorSession(input *ModifyTrafficMirrorSessionInput) (*ModifyTrafficMirrorSessionOutput, error) { + req, out := c.ModifyTrafficMirrorSessionRequest(input) + return out, req.Send() +} + +// ModifyTrafficMirrorSessionWithContext is the same as ModifyTrafficMirrorSession with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTrafficMirrorSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTrafficMirrorSessionWithContext(ctx aws.Context, input *ModifyTrafficMirrorSessionInput, opts ...request.Option) (*ModifyTrafficMirrorSessionOutput, error) { + req, out := c.ModifyTrafficMirrorSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyTransitGatewayVpcAttachment = "ModifyTransitGatewayVpcAttachment" // ModifyTransitGatewayVpcAttachmentRequest generates a "aws/request.Request" representing the @@ -25746,9 +28080,9 @@ func (c *EC2) ModifyVolumeRequest(input *ModifyVolumeInput) (req *request.Reques // You can use CloudWatch Events to check the status of a modification to an // EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch // Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). -// You can also track the status of a modification using the DescribeVolumesModifications -// API. For information about tracking status changes using either method, see -// Monitoring Volume Modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). +// You can also track the status of a modification using DescribeVolumesModifications. +// For information about tracking status changes using either method, see Monitoring +// Volume Modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). // // With previous-generation instance types, resizing an EBS volume may require // detaching and reattaching the volume or stopping and restarting the instance. @@ -25993,7 +28327,7 @@ func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *requ // // Modifies attributes of a specified VPC endpoint. The attributes that you // can modify depend on the type of VPC endpoint (interface or gateway). For -// more information, see VPC Endpoints (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-endpoints.html) +// more information, see VPC Endpoints (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -26221,7 +28555,7 @@ func (c *EC2) ModifyVpcEndpointServicePermissionsRequest(input *ModifyVpcEndpoin // ModifyVpcEndpointServicePermissions API operation for Amazon Elastic Compute Cloud. // -// Modifies the permissions for your VPC endpoint service (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/endpoint-service.html). +// Modifies the permissions for your VPC endpoint service (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html). // You can add or remove permissions for service consumers (IAM users, IAM roles, // and AWS accounts) to connect to your endpoint service. // @@ -26318,11 +28652,11 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringCo // If the peered VPCs are in the same AWS account, you can enable DNS resolution // for queries from the local VPC. This ensures that queries from the local // VPC resolve to private IP addresses in the peer VPC. This option is not available -// if the peered VPCs are in different AWS accounts or different regions. For +// if the peered VPCs are in different AWS accounts or different Regions. For // peered VPCs in different AWS accounts, each AWS account owner must initiate // a separate request to modify the peering connection options. For inter-region -// peering connections, you must use the region for the requester VPC to modify -// the requester VPC peering options and the region for the accepter VPC to +// peering connections, you must use the Region for the requester VPC to modify +// the requester VPC peering options and the Region for the accepter VPC to // modify the accepter VPC peering options. To verify which VPCs are the accepter // and the requester for a VPC peering connection, use the DescribeVpcPeeringConnections // command. @@ -26482,6 +28816,47 @@ func (c *EC2) ModifyVpnConnectionRequest(input *ModifyVpnConnectionInput) (req * // ModifyVpnConnection API operation for Amazon Elastic Compute Cloud. // +// Modifies the target gateway of an AWS Site-to-Site VPN connection. The following +// migration options are available: +// +// * An existing virtual private gateway to a new virtual private gateway +// +// * An existing virtual private gateway to a transit gateway +// +// * An existing transit gateway to a new transit gateway +// +// * An existing transit gateway to a virtual private gateway +// +// Before you perform the migration to the new gateway, you must configure the +// new gateway. Use CreateVpnGateway to create a virtual private gateway, or +// CreateTransitGateway to create a transit gateway. +// +// This step is required when you migrate from a virtual private gateway with +// static routes to a transit gateway. +// +// You must delete the static routes before you migrate to the new gateway. +// +// Keep a copy of the static route before you delete it. You will need to add +// back these routes to the transit gateway after the VPN connection migration +// is complete. +// +// After you migrate to the new gateway, you might need to modify your VPC route +// table. Use CreateRoute and DeleteRoute to make the changes described in VPN +// Gateway Target Modification Required VPC Route Table Updates (https://docs.aws.amazon.com/vpn/latest/s2svpn/modify-vpn-target.html#step-update-routing) +// in the AWS Site-to-Site VPN User Guide. +// +// When the new gateway is a transit gateway, modify the transit gateway route +// table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. +// Use CreateTransitGatewayRoute to add the routes. +// +// If you deleted VPN static routes, you must add the static routes to the transit +// gateway route table. +// +// After you perform this operation, the AWS VPN endpoint's IP addresses on +// the AWS side and the tunnel options remain intact. Your s2slong; connection +// will be temporarily unavailable for approximately 10 minutes while we provision +// the new endpoints +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -26510,6 +28885,158 @@ func (c *EC2) ModifyVpnConnectionWithContext(ctx aws.Context, input *ModifyVpnCo return out, req.Send() } +const opModifyVpnTunnelCertificate = "ModifyVpnTunnelCertificate" + +// ModifyVpnTunnelCertificateRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnTunnelCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnTunnelCertificate for more information on using the ModifyVpnTunnelCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnTunnelCertificateRequest method. +// req, resp := client.ModifyVpnTunnelCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificateRequest(input *ModifyVpnTunnelCertificateInput) (req *request.Request, output *ModifyVpnTunnelCertificateOutput) { + op := &request.Operation{ + Name: opModifyVpnTunnelCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnTunnelCertificateInput{} + } + + output = &ModifyVpnTunnelCertificateOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnTunnelCertificate API operation for Amazon Elastic Compute Cloud. +// +// Modifies the VPN tunnel endpoint certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnTunnelCertificate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificate(input *ModifyVpnTunnelCertificateInput) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + return out, req.Send() +} + +// ModifyVpnTunnelCertificateWithContext is the same as ModifyVpnTunnelCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnTunnelCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnTunnelCertificateWithContext(ctx aws.Context, input *ModifyVpnTunnelCertificateInput, opts ...request.Option) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpnTunnelOptions = "ModifyVpnTunnelOptions" + +// ModifyVpnTunnelOptionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnTunnelOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnTunnelOptions for more information on using the ModifyVpnTunnelOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnTunnelOptionsRequest method. +// req, resp := client.ModifyVpnTunnelOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelOptions +func (c *EC2) ModifyVpnTunnelOptionsRequest(input *ModifyVpnTunnelOptionsInput) (req *request.Request, output *ModifyVpnTunnelOptionsOutput) { + op := &request.Operation{ + Name: opModifyVpnTunnelOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnTunnelOptionsInput{} + } + + output = &ModifyVpnTunnelOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnTunnelOptions API operation for Amazon Elastic Compute Cloud. +// +// Modifies the options for a VPN tunnel in an AWS Site-to-Site VPN connection. +// You can modify multiple options for a tunnel in a single request, but you +// can only modify one tunnel at a time. For more information, see Site-to-Site +// VPN Tunnel Options for Your Site-to-Site VPN Connection (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPNTunnels.html) +// in the AWS Site-to-Site VPN User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnTunnelOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelOptions +func (c *EC2) ModifyVpnTunnelOptions(input *ModifyVpnTunnelOptionsInput) (*ModifyVpnTunnelOptionsOutput, error) { + req, out := c.ModifyVpnTunnelOptionsRequest(input) + return out, req.Send() +} + +// ModifyVpnTunnelOptionsWithContext is the same as ModifyVpnTunnelOptions with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnTunnelOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnTunnelOptionsWithContext(ctx aws.Context, input *ModifyVpnTunnelOptionsInput, opts ...request.Option) (*ModifyVpnTunnelOptionsOutput, error) { + req, out := c.ModifyVpnTunnelOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the @@ -26887,6 +29414,9 @@ func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedIn // offerings that match your specifications. After you've purchased a Reserved // Instance, you can check for your new Reserved Instance with DescribeReservedInstances. // +// To queue a purchase for a future date and time, specify a purchase time. +// If you do not specify a purchase time, the default is the current time. +// // For more information, see Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) // and Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -27148,16 +29678,24 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // You can't register an image where a secondary (non-root) snapshot has AWS // Marketplace product codes. // -// Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE -// Linux Enterprise Server (SLES), use the EC2 billing product code associated -// with an AMI to verify the subscription status for package updates. Creating -// an AMI from an EBS snapshot does not maintain this billing code, and instances -// launched from such an AMI are not able to connect to package update infrastructure. -// If you purchase a Reserved Instance offering for one of these Linux distributions -// and launch instances using an AMI that does not contain the required billing -// code, your Reserved Instance is not applied to these instances. +// Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) +// and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code +// associated with an AMI to verify the subscription status for package updates. +// To create a new AMI for operating systems that require a billing product +// code, do the following: +// +// Launch an instance from an existing AMI with that billing product code. +// +// Customize the instance. // -// To create an AMI for operating systems that require a billing code, see CreateImage. +// Create a new AMI from the instance using CreateImage to preserve the billing +// product code association. +// +// If you purchase a Reserved Instance to apply to an On-Demand Instance that +// was launched from an AMI with a billing product code, make sure that the +// Reserved Instance has the matching billing product code. If you purchase +// a Reserved Instance without the matching billing product code, the Reserved +// Instance will not be applied to the On-Demand Instance. // // If needed, you can deregister an AMI at any time. Any modifications you make // to an AMI backed by an instance store volume invalidates its registration. @@ -27724,7 +30262,7 @@ func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssoci // // Changes which network ACL a subnet is associated with. By default when you // create a subnet, it's automatically associated with the default network ACL. -// For more information, see Network ACLs (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// For more information, see Network ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // // This is an idempotent operation. @@ -27803,8 +30341,8 @@ func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) // ReplaceNetworkAclEntry API operation for Amazon Elastic Compute Cloud. // // Replaces an entry (rule) in a network ACL. For more information, see Network -// ACLs (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) -// in the Amazon Virtual Private Cloud User Guide. +// ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) in +// the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -27884,7 +30422,7 @@ func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Reques // instance, NAT gateway, VPC peering connection, network interface, or egress-only // internet gateway. // -// For more information, see Route Tables (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -27962,7 +30500,7 @@ func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssoci // Changes the route table associated with a given subnet in a VPC. After the // operation completes, the subnet uses the routes in the new route table it's // associated with. For more information about route tables, see Route Tables -// (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // You can also use ReplaceRouteTableAssociation to change which table is the @@ -28205,10 +30743,10 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // You can submit a single request that includes multiple launch specifications // that vary by instance type, AMI, Availability Zone, or subnet. // -// By default, the Spot Fleet requests Spot Instances in the Spot pool where -// the price per unit is the lowest. Each launch specification can include its -// own instance weighting that reflects the value of the instance type to your -// application workload. +// By default, the Spot Fleet requests Spot Instances in the Spot Instance pool +// where the price per unit is the lowest. Each launch specification can include +// its own instance weighting that reflects the value of the instance type to +// your application workload. // // Alternatively, you can specify that the Spot Fleet distribute the target // capacity across the Spot pools included in its launch specifications. By @@ -28327,6 +30865,86 @@ func (c *EC2) RequestSpotInstancesWithContext(ctx aws.Context, input *RequestSpo return out, req.Send() } +const opResetEbsDefaultKmsKeyId = "ResetEbsDefaultKmsKeyId" + +// ResetEbsDefaultKmsKeyIdRequest generates a "aws/request.Request" representing the +// client's request for the ResetEbsDefaultKmsKeyId operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetEbsDefaultKmsKeyId for more information on using the ResetEbsDefaultKmsKeyId +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetEbsDefaultKmsKeyIdRequest method. +// req, resp := client.ResetEbsDefaultKmsKeyIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetEbsDefaultKmsKeyId +func (c *EC2) ResetEbsDefaultKmsKeyIdRequest(input *ResetEbsDefaultKmsKeyIdInput) (req *request.Request, output *ResetEbsDefaultKmsKeyIdOutput) { + op := &request.Operation{ + Name: opResetEbsDefaultKmsKeyId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetEbsDefaultKmsKeyIdInput{} + } + + output = &ResetEbsDefaultKmsKeyIdOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResetEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud. +// +// Resets the default customer master key (CMK) for EBS encryption for your +// account in this Region to the AWS managed CMK for EBS. +// +// After resetting the default CMK to the AWS managed CMK, you can continue +// to encrypt by a customer managed CMK by specifying it when you create the +// volume. For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetEbsDefaultKmsKeyId for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetEbsDefaultKmsKeyId +func (c *EC2) ResetEbsDefaultKmsKeyId(input *ResetEbsDefaultKmsKeyIdInput) (*ResetEbsDefaultKmsKeyIdOutput, error) { + req, out := c.ResetEbsDefaultKmsKeyIdRequest(input) + return out, req.Send() +} + +// ResetEbsDefaultKmsKeyIdWithContext is the same as ResetEbsDefaultKmsKeyId with the addition of +// the ability to pass a context and additional request options. +// +// See ResetEbsDefaultKmsKeyId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetEbsDefaultKmsKeyIdWithContext(ctx aws.Context, input *ResetEbsDefaultKmsKeyIdInput, opts ...request.Option) (*ResetEbsDefaultKmsKeyIdOutput, error) { + req, out := c.ResetEbsDefaultKmsKeyIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opResetFpgaImageAttribute = "ResetFpgaImageAttribute" // ResetFpgaImageAttributeRequest generates a "aws/request.Request" representing the @@ -29330,6 +31948,98 @@ func (c *EC2) SearchTransitGatewayRoutesWithContext(ctx aws.Context, input *Sear return out, req.Send() } +const opSendDiagnosticInterrupt = "SendDiagnosticInterrupt" + +// SendDiagnosticInterruptRequest generates a "aws/request.Request" representing the +// client's request for the SendDiagnosticInterrupt operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendDiagnosticInterrupt for more information on using the SendDiagnosticInterrupt +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendDiagnosticInterruptRequest method. +// req, resp := client.SendDiagnosticInterruptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterruptRequest(input *SendDiagnosticInterruptInput) (req *request.Request, output *SendDiagnosticInterruptOutput) { + op := &request.Operation{ + Name: opSendDiagnosticInterrupt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendDiagnosticInterruptInput{} + } + + output = &SendDiagnosticInterruptOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SendDiagnosticInterrupt API operation for Amazon Elastic Compute Cloud. +// +// Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger +// a kernel panic (on Linux instances), or a blue screen/stop error (on Windows +// instances). For instances based on Intel and AMD processors, the interrupt +// is received as a non-maskable interrupt (NMI). +// +// In general, the operating system crashes and reboots when a kernel panic +// or stop error is triggered. The operating system can also be configured to +// perform diagnostic tasks, such as generating a memory dump file, loading +// a secondary kernel, or obtaining a call trace. +// +// Before sending a diagnostic interrupt to your instance, ensure that its operating +// system is configured to perform the required diagnostic tasks. +// +// For more information about configuring your operating system to generate +// a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic +// Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) +// (Linux instances) or Send a Diagnostic Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/diagnostic-interrupt.html) +// (Windows instances). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation SendDiagnosticInterrupt for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterrupt(input *SendDiagnosticInterruptInput) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + return out, req.Send() +} + +// SendDiagnosticInterruptWithContext is the same as SendDiagnosticInterrupt with the addition of +// the ability to pass a context and additional request options. +// +// See SendDiagnosticInterrupt for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SendDiagnosticInterruptWithContext(ctx aws.Context, input *SendDiagnosticInterruptInput, opts ...request.Option) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartInstances = "StartInstances" // StartInstancesRequest generates a "aws/request.Request" representing the @@ -30905,7 +33615,7 @@ type AllocateHostsInput struct { // Indicates whether the host accepts any untargeted instance launches that // match its instance type configuration, or if it only accepts Host tenancy // instance launches that specify its unique host ID. For more information, - // see Understanding Instance Placement and Host Affinity (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-dedicated-hosts-work.html#dedicated-hosts-understanding) + // see Understanding Instance Placement and Host Affinity (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-dedicated-hosts-work.html#dedicated-hosts-understanding) // in the Amazon EC2 User Guide for Linux Instances. // // Default: on @@ -30917,10 +33627,17 @@ type AllocateHostsInput struct { AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) - // in the Amazon Elastic Compute Cloud User Guide. + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` + // Indicates whether to enable or disable host recovery for the Dedicated Host. + // Host recovery is disabled by default. For more information, see Host Recovery + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: off + HostRecovery *string `type:"string" enum:"HostRecovery"` + // Specifies the instance type for which to configure your Dedicated Hosts. // When you specify the instance type, that is the only instance type that you // can launch onto that host. @@ -30984,6 +33701,12 @@ func (s *AllocateHostsInput) SetClientToken(v string) *AllocateHostsInput { return s } +// SetHostRecovery sets the HostRecovery field's value. +func (s *AllocateHostsInput) SetHostRecovery(v string) *AllocateHostsInput { + s.HostRecovery = &v + return s +} + // SetInstanceType sets the InstanceType field's value. func (s *AllocateHostsInput) SetInstanceType(v string) *AllocateHostsInput { s.InstanceType = &v @@ -31328,6 +34051,12 @@ func (s *AssignPrivateIpAddressesInput) SetSecondaryPrivateIpAddressCount(v int6 type AssignPrivateIpAddressesOutput struct { _ struct{} `type:"structure"` + + // The private IP addresses assigned to the network interface. + AssignedPrivateIpAddresses []*AssignedPrivateIpAddress `locationName:"assignedPrivateIpAddressesSet" locationNameList:"item" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` } // String returns the string representation @@ -31340,6 +34069,42 @@ func (s AssignPrivateIpAddressesOutput) GoString() string { return s.String() } +// SetAssignedPrivateIpAddresses sets the AssignedPrivateIpAddresses field's value. +func (s *AssignPrivateIpAddressesOutput) SetAssignedPrivateIpAddresses(v []*AssignedPrivateIpAddress) *AssignPrivateIpAddressesOutput { + s.AssignedPrivateIpAddresses = v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *AssignPrivateIpAddressesOutput) SetNetworkInterfaceId(v string) *AssignPrivateIpAddressesOutput { + s.NetworkInterfaceId = &v + return s +} + +// Describes the private IP addresses assigned to a network interface. +type AssignedPrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The private IP address assigned to the network interface. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s AssignedPrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignedPrivateIpAddress) GoString() string { + return s.String() +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *AssignedPrivateIpAddress) SetPrivateIpAddress(v string) *AssignedPrivateIpAddress { + s.PrivateIpAddress = &v + return s +} + type AssociateAddressInput struct { _ struct{} `type:"structure"` @@ -31462,6 +34227,10 @@ func (s *AssociateAddressOutput) SetAssociationId(v string) *AssociateAddressOut type AssociateClientVpnTargetNetworkInput struct { _ struct{} `type:"structure"` + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + // The ID of the Client VPN endpoint. // // ClientVpnEndpointId is a required field @@ -31505,6 +34274,12 @@ func (s *AssociateClientVpnTargetNetworkInput) Validate() error { return nil } +// SetClientToken sets the ClientToken field's value. +func (s *AssociateClientVpnTargetNetworkInput) SetClientToken(v string) *AssociateClientVpnTargetNetworkInput { + s.ClientToken = &v + return s +} + // SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. func (s *AssociateClientVpnTargetNetworkInput) SetClientVpnEndpointId(v string) *AssociateClientVpnTargetNetworkInput { s.ClientVpnEndpointId = &v @@ -32715,6 +35490,10 @@ type AuthorizeClientVpnIngressInput struct { // who successfully establish a VPN connection access to the network. AuthorizeAllGroups *bool `type:"boolean"` + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + // The ID of the Client VPN endpoint. // // ClientVpnEndpointId is a required field @@ -32774,6 +35553,12 @@ func (s *AuthorizeClientVpnIngressInput) SetAuthorizeAllGroups(v bool) *Authoriz return s } +// SetClientToken sets the ClientToken field's value. +func (s *AuthorizeClientVpnIngressInput) SetClientToken(v string) *AuthorizeClientVpnIngressInput { + s.ClientToken = &v + return s +} + // SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. func (s *AuthorizeClientVpnIngressInput) SetClientVpnEndpointId(v string) *AuthorizeClientVpnIngressInput { s.ClientVpnEndpointId = &v @@ -33117,7 +35902,7 @@ type AvailabilityZone struct { // Any messages about the Availability Zone. Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"` - // The name of the region. + // The name of the Region. RegionName *string `locationName:"regionName" type:"string"` // The state of the Availability Zone. @@ -33733,7 +36518,6 @@ func (s *CancelCapacityReservationOutput) SetReturn(v bool) *CancelCapacityReser return s } -// Contains the parameters for CancelConversionTask. type CancelConversionTaskInput struct { _ struct{} `type:"structure"` @@ -33807,7 +36591,6 @@ func (s CancelConversionTaskOutput) GoString() string { return s.String() } -// Contains the parameters for CancelExportTask. type CancelExportTaskInput struct { _ struct{} `type:"structure"` @@ -33860,7 +36643,6 @@ func (s CancelExportTaskOutput) GoString() string { return s.String() } -// Contains the parameters for CancelImportTask. type CancelImportTaskInput struct { _ struct{} `type:"structure"` @@ -33905,7 +36687,6 @@ func (s *CancelImportTaskInput) SetImportTaskId(v string) *CancelImportTaskInput return s } -// Contains the output for CancelImportTask. type CancelImportTaskOutput struct { _ struct{} `type:"structure"` @@ -34332,10 +37113,16 @@ type CapacityReservation struct { // The Availability Zone in which the capacity is reserved. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + // The Availability Zone ID of the Capacity Reservation. + AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + // The remaining capacity. Indicates the number of instances that can be launched // in the Capacity Reservation. AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + // The Amazon Resource Name (ARN) of the Capacity Reservation. + CapacityReservationArn *string `locationName:"capacityReservationArn" type:"string"` + // The ID of the Capacity Reservation. CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` @@ -34390,17 +37177,20 @@ type CapacityReservation struct { // The type of instance for which the Capacity Reservation reserves capacity. InstanceType *string `locationName:"instanceType" type:"string"` + // The ID of the AWS account that owns the Capacity Reservation. + OwnerId *string `locationName:"ownerId" type:"string"` + // The current state of the Capacity Reservation. A Capacity Reservation can // be in one of the following states: // // * active - The Capacity Reservation is active and the capacity is available // for your use. // - // * cancelled - The Capacity Reservation expired automatically at the date + // * expired - The Capacity Reservation expired automatically at the date // and time specified in your request. The reserved capacity is no longer // available for your use. // - // * expired - The Capacity Reservation was manually cancelled. The reserved + // * cancelled - The Capacity Reservation was manually cancelled. The reserved // capacity is no longer available for your use. // // * pending - The Capacity Reservation request was successful but the capacity @@ -34424,7 +37214,8 @@ type CapacityReservation struct { // that is dedicated to a single AWS account. Tenancy *string `locationName:"tenancy" type:"string" enum:"CapacityReservationTenancy"` - // The number of instances for which the Capacity Reservation reserves capacity. + // The total number of instances for which the Capacity Reservation reserves + // capacity. TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` } @@ -34444,12 +37235,24 @@ func (s *CapacityReservation) SetAvailabilityZone(v string) *CapacityReservation return s } +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *CapacityReservation) SetAvailabilityZoneId(v string) *CapacityReservation { + s.AvailabilityZoneId = &v + return s +} + // SetAvailableInstanceCount sets the AvailableInstanceCount field's value. func (s *CapacityReservation) SetAvailableInstanceCount(v int64) *CapacityReservation { s.AvailableInstanceCount = &v return s } +// SetCapacityReservationArn sets the CapacityReservationArn field's value. +func (s *CapacityReservation) SetCapacityReservationArn(v string) *CapacityReservation { + s.CapacityReservationArn = &v + return s +} + // SetCapacityReservationId sets the CapacityReservationId field's value. func (s *CapacityReservation) SetCapacityReservationId(v string) *CapacityReservation { s.CapacityReservationId = &v @@ -34504,6 +37307,12 @@ func (s *CapacityReservation) SetInstanceType(v string) *CapacityReservation { return s } +// SetOwnerId sets the OwnerId field's value. +func (s *CapacityReservation) SetOwnerId(v string) *CapacityReservation { + s.OwnerId = &v + return s +} + // SetState sets the State field's value. func (s *CapacityReservation) SetState(v string) *CapacityReservation { s.State = &v @@ -35366,7 +38175,11 @@ type ClientVpnEndpoint struct { // The ARN of the server certificate. ServerCertificateArn *string `locationName:"serverCertificateArn" type:"string"` - // Indicates whether VPN split tunneling is supported. + // Indicates whether split-tunnel is enabled in the AWS Client VPN endpoint. + // + // For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client + // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) + // in the AWS Client VPN Administrator Guide. SplitTunnel *bool `locationName:"splitTunnel" type:"boolean"` // The current state of the Client VPN endpoint. @@ -36019,7 +38832,7 @@ type CopyFpgaImageInput struct { // SourceFpgaImageId is a required field SourceFpgaImageId *string `type:"string" required:"true"` - // The region that contains the source AFI. + // The Region that contains the source AFI. // // SourceRegion is a required field SourceRegion *string `type:"string" required:"true"` @@ -36119,7 +38932,7 @@ type CopyImageInput struct { // in the Amazon Elastic Compute Cloud User Guide. ClientToken *string `type:"string"` - // A description for the new AMI in the destination region. + // A description for the new AMI in the destination Region. Description *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -36143,33 +38956,26 @@ type CopyImageInput struct { // the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted // flag must also be set. // - // The CMK identifier may be provided in any of the following formats: - // - // * Key ID - // - // * Key alias. The alias ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the alias - // namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, + // or alias ARN. When using an alias name, prefix it with "alias/". For example: // - // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the key - // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // - // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, - // followed by the region of the CMK, the AWS account ID of the CMK owner, - // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // * Alias name: alias/ExampleAlias // + // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // AWS parses KmsKeyId asynchronously, meaning that the action you call may // appear to complete even though you provided an invalid identifier. This action // will eventually report failure. // - // The specified CMK must exist in the region that the snapshot is being copied + // The specified CMK must exist in the Region that the snapshot is being copied // to. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` - // The name of the new AMI in the destination region. + // The name of the new AMI in the destination Region. // // Name is a required field Name *string `type:"string" required:"true"` @@ -36179,7 +38985,7 @@ type CopyImageInput struct { // SourceImageId is a required field SourceImageId *string `type:"string" required:"true"` - // The name of the region that contains the AMI to copy. + // The name of the Region that contains the AMI to copy. // // SourceRegion is a required field SourceRegion *string `type:"string" required:"true"` @@ -36309,41 +39115,32 @@ type CopySnapshotInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Specifies whether the destination snapshot should be encrypted. You can encrypt - // a copy of an unencrypted snapshot, but you cannot use it to create an unencrypted - // copy of an encrypted snapshot. Your default CMK for EBS is used unless you - // specify a non-default AWS Key Management Service (AWS KMS) CMK using KmsKeyId. - // For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // To encrypt a copy of an unencrypted snapshot if encryption by default is + // not enabled, enable encryption using this parameter. Otherwise, omit this + // parameter. Encrypted snapshots are encrypted, even if you omit this parameter + // and encryption by default is not enabled. You cannot set this parameter to + // false. For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) // in the Amazon Elastic Compute Cloud User Guide. Encrypted *bool `locationName:"encrypted" type:"boolean"` - // An identifier for the AWS Key Management Service (AWS KMS) customer master - // key (CMK) to use when creating the encrypted volume. This parameter is only - // required if you want to use a non-default CMK; if this parameter is not specified, - // the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted - // flag must also be set. - // - // The CMK identifier may be provided in any of the following formats: + // The identifier of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, + // your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted + // state must be true. // - // * Key ID - // - // * Key alias. The alias ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the alias - // namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // You can specify the CMK using any of the following: // - // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the key - // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // * Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. // + // * Key alias. For example, alias/ExampleAlias. // - // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, - // followed by the region of the CMK, the AWS account ID of the CMK owner, - // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // + // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // AWS parses KmsKeyId asynchronously, meaning that the action you call may - // appear to complete even though you provided an invalid identifier. The action - // will eventually fail. + // AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, + // alias, or ARN that is not valid, the action can appear to complete, but eventually + // fails. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` // When you copy an encrypted source snapshot using the Amazon EC2 Query API, @@ -36511,9 +39308,8 @@ type CpuOptionsRequest struct { // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` - // The number of threads per CPU core. To disable Intel Hyper-Threading Technology - // for the instance, specify a value of 1. Otherwise, specify the default value - // of 2. + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. ThreadsPerCore *int64 `type:"integer"` } @@ -36543,9 +39339,10 @@ type CreateCapacityReservationInput struct { _ struct{} `type:"structure"` // The Availability Zone in which to create the Capacity Reservation. - // - // AvailabilityZone is a required field - AvailabilityZone *string `type:"string" required:"true"` + AvailabilityZone *string `type:"string"` + + // The ID of the Availability Zone in which to create the Capacity Reservation. + AvailabilityZoneId *string `type:"string"` // Unique, case-sensitive identifier that you provide to ensure the idempotency // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). @@ -36655,9 +39452,6 @@ func (s CreateCapacityReservationInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateCapacityReservationInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateCapacityReservationInput"} - if s.AvailabilityZone == nil { - invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) - } if s.InstanceCount == nil { invalidParams.Add(request.NewErrParamRequired("InstanceCount")) } @@ -36680,6 +39474,12 @@ func (s *CreateCapacityReservationInput) SetAvailabilityZone(v string) *CreateCa return s } +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *CreateCapacityReservationInput) SetAvailabilityZoneId(v string) *CreateCapacityReservationInput { + s.AvailabilityZoneId = &v + return s +} + // SetClientToken sets the ClientToken field's value. func (s *CreateCapacityReservationInput) SetClientToken(v string) *CreateCapacityReservationInput { s.ClientToken = &v @@ -36792,8 +39592,8 @@ type CreateClientVpnEndpointInput struct { // ClientCidrBlock is a required field ClientCidrBlock *string `type:"string" required:"true"` - // Unique, case-sensitive identifier you provide to ensure the idempotency of - // the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Information about the client connection logging options. @@ -36817,8 +39617,7 @@ type CreateClientVpnEndpointInput struct { // Information about the DNS servers to be used for DNS resolution. A Client // VPN endpoint can have up to two DNS servers. If no DNS server is specified, - // the DNS address of the VPC that is to be associated with Client VPN endpoint - // is used as the DNS server. + // the DNS address configured on the device is used for the DNS server. DnsServers []*string `locationNameList:"item" type:"list"` // Checks whether you have the required permissions for the action, without @@ -36833,6 +39632,15 @@ type CreateClientVpnEndpointInput struct { // ServerCertificateArn is a required field ServerCertificateArn *string `type:"string" required:"true"` + // Indicates whether split-tunnel is enabled on the AWS Client VPN endpoint. + // + // By default, split-tunnel on a VPN endpoint is disabled. + // + // For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client + // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) + // in the AWS Client VPN Administrator Guide. + SplitTunnel *bool `type:"boolean"` + // The tags to apply to the Client VPN endpoint during creation. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` @@ -36922,6 +39730,12 @@ func (s *CreateClientVpnEndpointInput) SetServerCertificateArn(v string) *Create return s } +// SetSplitTunnel sets the SplitTunnel field's value. +func (s *CreateClientVpnEndpointInput) SetSplitTunnel(v bool) *CreateClientVpnEndpointInput { + s.SplitTunnel = &v + return s +} + // SetTagSpecifications sets the TagSpecifications field's value. func (s *CreateClientVpnEndpointInput) SetTagSpecifications(v []*TagSpecification) *CreateClientVpnEndpointInput { s.TagSpecifications = v @@ -36978,6 +39792,10 @@ func (s *CreateClientVpnEndpointOutput) SetStatus(v *ClientVpnEndpointStatus) *C type CreateClientVpnRouteInput struct { _ struct{} `type:"structure"` + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + // The ID of the Client VPN endpoint to which to add the route. // // ClientVpnEndpointId is a required field @@ -37043,6 +39861,12 @@ func (s *CreateClientVpnRouteInput) Validate() error { return nil } +// SetClientToken sets the ClientToken field's value. +func (s *CreateClientVpnRouteInput) SetClientToken(v string) *CreateClientVpnRouteInput { + s.ClientToken = &v + return s +} + // SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. func (s *CreateClientVpnRouteInput) SetClientVpnEndpointId(v string) *CreateClientVpnRouteInput { s.ClientVpnEndpointId = &v @@ -37107,6 +39931,9 @@ type CreateCustomerGatewayInput struct { // BgpAsn is a required field BgpAsn *int64 `type:"integer" required:"true"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -37115,9 +39942,7 @@ type CreateCustomerGatewayInput struct { // The Internet-routable IP address for the customer gateway's outside interface. // The address must be static. - // - // PublicIp is a required field - PublicIp *string `locationName:"IpAddress" type:"string" required:"true"` + PublicIp *string `locationName:"IpAddress" type:"string"` // The type of VPN connection that this customer gateway supports (ipsec.1). // @@ -37141,9 +39966,6 @@ func (s *CreateCustomerGatewayInput) Validate() error { if s.BgpAsn == nil { invalidParams.Add(request.NewErrParamRequired("BgpAsn")) } - if s.PublicIp == nil { - invalidParams.Add(request.NewErrParamRequired("PublicIp")) - } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -37160,6 +39982,12 @@ func (s *CreateCustomerGatewayInput) SetBgpAsn(v int64) *CreateCustomerGatewayIn return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *CreateCustomerGatewayInput) SetCertificateArn(v string) *CreateCustomerGatewayInput { + s.CertificateArn = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *CreateCustomerGatewayInput) SetDryRun(v bool) *CreateCustomerGatewayInput { s.DryRun = &v @@ -37569,7 +40397,7 @@ type CreateFleetInput struct { // LaunchTemplateConfigs is a required field LaunchTemplateConfigs []*FleetLaunchTemplateConfigRequest `locationNameList:"item" type:"list" required:"true"` - // The allocation strategy of On-Demand Instances in an EC2 Fleet. + // Describes the configuration of On-Demand Instances in an EC2 Fleet. OnDemandOptions *OnDemandOptionsRequest `type:"structure"` // Indicates whether EC2 Fleet should replace unhealthy instances. @@ -37584,8 +40412,7 @@ type CreateFleetInput struct { // For information about tagging after launch, see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` - // The TotalTargetCapacity, OnDemandTargetCapacity, SpotTargetCapacity, and - // DefaultCapacityType structure. + // The number of units to request. // // TargetCapacitySpecification is a required field TargetCapacitySpecification *TargetCapacitySpecificationRequest `type:"structure" required:"true"` @@ -37886,6 +40713,17 @@ type CreateFlowLogsInput struct { // Default: cloud-watch-logs LogDestinationType *string `type:"string" enum:"LogDestinationType"` + // The fields to include in the flow log record, in the order in which they + // should appear. For a list of available fields, see Flow Log Records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records). + // If you omit this parameter, the flow log is created using the default format. + // If you specify this parameter, you must specify at least one field. + // + // Specify the fields using the ${field-id} format, separated by spaces. For + // the AWS CLI, use single quotation marks (' ') to surround the parameter value. + // + // Only applicable to flow logs that are published to an Amazon S3 bucket. + LogFormat *string `type:"string"` + // The name of a new or existing CloudWatch Logs log group where Amazon EC2 // publishes your flow logs. // @@ -37973,6 +40811,12 @@ func (s *CreateFlowLogsInput) SetLogDestinationType(v string) *CreateFlowLogsInp return s } +// SetLogFormat sets the LogFormat field's value. +func (s *CreateFlowLogsInput) SetLogFormat(v string) *CreateFlowLogsInput { + s.LogFormat = &v + return s +} + // SetLogGroupName sets the LogGroupName field's value. func (s *CreateFlowLogsInput) SetLogGroupName(v string) *CreateFlowLogsInput { s.LogGroupName = &v @@ -38066,6 +40910,9 @@ type CreateFpgaImageInput struct { // A name for the AFI. Name *string `type:"string"` + + // The tags to apply to the FPGA image during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -38127,6 +40974,12 @@ func (s *CreateFpgaImageInput) SetName(v string) *CreateFpgaImageInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateFpgaImageInput) SetTagSpecifications(v []*TagSpecification) *CreateFpgaImageInput { + s.TagSpecifications = v + return s +} + type CreateFpgaImageOutput struct { _ struct{} `type:"structure"` @@ -38162,7 +41015,7 @@ func (s *CreateFpgaImageOutput) SetFpgaImageId(v string) *CreateFpgaImageOutput type CreateImageInput struct { _ struct{} `type:"structure"` - // Tthe block device mappings. This parameter cannot be used to modify the encryption + // The block device mappings. This parameter cannot be used to modify the encryption // status of existing volumes or snapshots. To create an AMI with encrypted // snapshots, use the CopyImage action. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` @@ -38282,7 +41135,6 @@ func (s *CreateImageOutput) SetImageId(v string) *CreateImageOutput { return s } -// Contains the parameters for CreateInstanceExportTask. type CreateInstanceExportTaskInput struct { _ struct{} `type:"structure"` @@ -38349,7 +41201,6 @@ func (s *CreateInstanceExportTaskInput) SetTargetEnvironment(v string) *CreateIn return s } -// Contains the output for CreateInstanceExportTask. type CreateInstanceExportTaskOutput struct { _ struct{} `type:"structure"` @@ -38482,7 +41333,7 @@ type CreateKeyPairOutput struct { KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` // An unencrypted PEM encoded RSA private key. - KeyMaterial *string `locationName:"keyMaterial" type:"string"` + KeyMaterial *string `locationName:"keyMaterial" type:"string" sensitive:"true"` // The name of the key pair. KeyName *string `locationName:"keyName" type:"string"` @@ -38541,6 +41392,9 @@ type CreateLaunchTemplateInput struct { // LaunchTemplateName is a required field LaunchTemplateName *string `min:"3" type:"string" required:"true"` + // The tags to apply to the launch template during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // A description for the first version of the launch template. VersionDescription *string `type:"string"` } @@ -38603,6 +41457,12 @@ func (s *CreateLaunchTemplateInput) SetLaunchTemplateName(v string) *CreateLaunc return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateLaunchTemplateInput) SetTagSpecifications(v []*TagSpecification) *CreateLaunchTemplateInput { + s.TagSpecifications = v + return s +} + // SetVersionDescription sets the VersionDescription field's value. func (s *CreateLaunchTemplateInput) SetVersionDescription(v string) *CreateLaunchTemplateInput { s.VersionDescription = &v @@ -38662,7 +41522,9 @@ type CreateLaunchTemplateVersionInput struct { // The version number of the launch template version on which to base the new // version. The new version inherits the same launch parameters as the source - // version, except for parameters that you specify in LaunchTemplateData. + // version, except for parameters that you specify in LaunchTemplateData. Snapshots + // applied to the block device mapping are ignored when creating a new version + // unless they are explicitly included. SourceVersion *string `type:"string"` // A description for the version of the launch template. @@ -39122,12 +41984,9 @@ type CreateNetworkInterfaceInput struct { // The IDs of one or more security groups. Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` - // Indicates whether the network interface is an Elastic Fabric Adapter (EFA). - // Only specify this parameter to create an EFA. For more information, see Elastic - // Fabric Adapter (AWSEC2/latest/UserGuide/efa.html) in the Amazon Elastic Compute - // Cloud User Guide. - // - // If you are not creating an EFA ENI, omit this parameter. + // Indicates the type of network interface. To create an Elastic Fabric Adapter + // (EFA), specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) + // in the Amazon Elastic Compute Cloud User Guide. InterfaceType *string `type:"string" enum:"NetworkInterfaceCreationType"` // The number of IPv6 addresses to assign to a network interface. Amazon EC2 @@ -39971,6 +42830,105 @@ func (s *CreateSnapshotInput) SetVolumeId(v string) *CreateSnapshotInput { return s } +type CreateSnapshotsInput struct { + _ struct{} `type:"structure"` + + // Copies the tags from the specified volume to corresponding snapshot. + CopyTagsFromSource *string `type:"string" enum:"CopyTagsFromSource"` + + // A description propagated to every snapshot specified by the instance. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action without actually + // making the request. Provides an error response. If you have the required + // permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The instance to specify which volumes should be included in the snapshots. + // + // InstanceSpecification is a required field + InstanceSpecification *InstanceSpecification `type:"structure" required:"true"` + + // Tags to apply to every snapshot specified by the instance. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotsInput"} + if s.InstanceSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceSpecification")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCopyTagsFromSource sets the CopyTagsFromSource field's value. +func (s *CreateSnapshotsInput) SetCopyTagsFromSource(v string) *CreateSnapshotsInput { + s.CopyTagsFromSource = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateSnapshotsInput) SetDescription(v string) *CreateSnapshotsInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateSnapshotsInput) SetDryRun(v bool) *CreateSnapshotsInput { + s.DryRun = &v + return s +} + +// SetInstanceSpecification sets the InstanceSpecification field's value. +func (s *CreateSnapshotsInput) SetInstanceSpecification(v *InstanceSpecification) *CreateSnapshotsInput { + s.InstanceSpecification = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSnapshotsInput) SetTagSpecifications(v []*TagSpecification) *CreateSnapshotsInput { + s.TagSpecifications = v + return s +} + +type CreateSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // List of snapshots. + Snapshots []*SnapshotInfo `locationName:"snapshotSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotsOutput) GoString() string { + return s.String() +} + +// SetSnapshots sets the Snapshots field's value. +func (s *CreateSnapshotsOutput) SetSnapshots(v []*SnapshotInfo) *CreateSnapshotsOutput { + s.Snapshots = v + return s +} + // Contains the parameters for CreateSpotDatafeedSubscription. type CreateSpotDatafeedSubscriptionInput struct { _ struct{} `type:"structure"` @@ -40256,6 +43214,588 @@ func (s CreateTagsOutput) GoString() string { return s.String() } +type CreateTrafficMirrorFilterInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The description of the Traffic Mirror filter. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tags to assign to a Traffic Mirror filter. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateTrafficMirrorFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorFilterInput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorFilterInput) SetClientToken(v string) *CreateTrafficMirrorFilterInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateTrafficMirrorFilterInput) SetDescription(v string) *CreateTrafficMirrorFilterInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTrafficMirrorFilterInput) SetDryRun(v bool) *CreateTrafficMirrorFilterInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTrafficMirrorFilterInput) SetTagSpecifications(v []*TagSpecification) *CreateTrafficMirrorFilterInput { + s.TagSpecifications = v + return s +} + +type CreateTrafficMirrorFilterOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the Traffic Mirror filter. + TrafficMirrorFilter *TrafficMirrorFilter `locationName:"trafficMirrorFilter" type:"structure"` +} + +// String returns the string representation +func (s CreateTrafficMirrorFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorFilterOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorFilterOutput) SetClientToken(v string) *CreateTrafficMirrorFilterOutput { + s.ClientToken = &v + return s +} + +// SetTrafficMirrorFilter sets the TrafficMirrorFilter field's value. +func (s *CreateTrafficMirrorFilterOutput) SetTrafficMirrorFilter(v *TrafficMirrorFilter) *CreateTrafficMirrorFilterOutput { + s.TrafficMirrorFilter = v + return s +} + +type CreateTrafficMirrorFilterRuleInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The description of the Traffic Mirror rule. + Description *string `type:"string"` + + // The destination CIDR block to assign to the Traffic Mirror rule. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // The destination port range. + DestinationPortRange *TrafficMirrorPortRangeRequest `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The protocol, for example UDP, to assign to the Traffic Mirror rule. + // + // For information about the protocol value, see Protocol Numbers (https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + // on the Internet Assigned Numbers Authority (IANA) website. + Protocol *int64 `type:"integer"` + + // The action to take (accept | reject) on the filtered traffic. + // + // RuleAction is a required field + RuleAction *string `type:"string" required:"true" enum:"TrafficMirrorRuleAction"` + + // The number of the Traffic Mirror rule. This number must be unique for each + // Traffic Mirror rule in a given direction. The rules are processed in ascending + // order by rule number. + // + // RuleNumber is a required field + RuleNumber *int64 `type:"integer" required:"true"` + + // The source CIDR block to assign to the Traffic Mirror rule. + // + // SourceCidrBlock is a required field + SourceCidrBlock *string `type:"string" required:"true"` + + // The source port range. + SourcePortRange *TrafficMirrorPortRangeRequest `type:"structure"` + + // The type of traffic (ingress | egress). + // + // TrafficDirection is a required field + TrafficDirection *string `type:"string" required:"true" enum:"TrafficDirection"` + + // The ID of the filter that this rule is associated with. + // + // TrafficMirrorFilterId is a required field + TrafficMirrorFilterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficMirrorFilterRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorFilterRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficMirrorFilterRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficMirrorFilterRuleInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.RuleAction == nil { + invalidParams.Add(request.NewErrParamRequired("RuleAction")) + } + if s.RuleNumber == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNumber")) + } + if s.SourceCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCidrBlock")) + } + if s.TrafficDirection == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficDirection")) + } + if s.TrafficMirrorFilterId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetClientToken(v string) *CreateTrafficMirrorFilterRuleInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetDescription(v string) *CreateTrafficMirrorFilterRuleInput { + s.Description = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetDestinationCidrBlock(v string) *CreateTrafficMirrorFilterRuleInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationPortRange sets the DestinationPortRange field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetDestinationPortRange(v *TrafficMirrorPortRangeRequest) *CreateTrafficMirrorFilterRuleInput { + s.DestinationPortRange = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetDryRun(v bool) *CreateTrafficMirrorFilterRuleInput { + s.DryRun = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetProtocol(v int64) *CreateTrafficMirrorFilterRuleInput { + s.Protocol = &v + return s +} + +// SetRuleAction sets the RuleAction field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetRuleAction(v string) *CreateTrafficMirrorFilterRuleInput { + s.RuleAction = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetRuleNumber(v int64) *CreateTrafficMirrorFilterRuleInput { + s.RuleNumber = &v + return s +} + +// SetSourceCidrBlock sets the SourceCidrBlock field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetSourceCidrBlock(v string) *CreateTrafficMirrorFilterRuleInput { + s.SourceCidrBlock = &v + return s +} + +// SetSourcePortRange sets the SourcePortRange field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetSourcePortRange(v *TrafficMirrorPortRangeRequest) *CreateTrafficMirrorFilterRuleInput { + s.SourcePortRange = v + return s +} + +// SetTrafficDirection sets the TrafficDirection field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetTrafficDirection(v string) *CreateTrafficMirrorFilterRuleInput { + s.TrafficDirection = &v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetTrafficMirrorFilterId(v string) *CreateTrafficMirrorFilterRuleInput { + s.TrafficMirrorFilterId = &v + return s +} + +type CreateTrafficMirrorFilterRuleOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The Traffic Mirror rule. + TrafficMirrorFilterRule *TrafficMirrorFilterRule `locationName:"trafficMirrorFilterRule" type:"structure"` +} + +// String returns the string representation +func (s CreateTrafficMirrorFilterRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorFilterRuleOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorFilterRuleOutput) SetClientToken(v string) *CreateTrafficMirrorFilterRuleOutput { + s.ClientToken = &v + return s +} + +// SetTrafficMirrorFilterRule sets the TrafficMirrorFilterRule field's value. +func (s *CreateTrafficMirrorFilterRuleOutput) SetTrafficMirrorFilterRule(v *TrafficMirrorFilterRule) *CreateTrafficMirrorFilterRuleOutput { + s.TrafficMirrorFilterRule = v + return s +} + +type CreateTrafficMirrorSessionInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The description of the Traffic Mirror session. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the source network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `type:"string" required:"true"` + + // The number of bytes in each packet to mirror. These are bytes after the VXLAN + // header. Do not specify this parameter when you want to mirror the entire + // packet. To mirror a subset of the packet, set this to the length (in bytes) + // that you want to mirror. For example, if you set this value to 100, then + // the first 100 bytes that meet the filter criteria are copied to the target. + // + // If you do not want to mirror the entire packet, use the PacketLength parameter + // to specify the number of bytes in each packet to mirror. + PacketLength *int64 `type:"integer"` + + // The session number determines the order in which sessions are evaluated when + // an interface is used by multiple sessions. The first session with a matching + // filter is the one that mirrors the packets. + // + // Valid values are 1-32766. + // + // SessionNumber is a required field + SessionNumber *int64 `type:"integer" required:"true"` + + // The tags to assign to a Traffic Mirror session. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror filter. + // + // TrafficMirrorFilterId is a required field + TrafficMirrorFilterId *string `type:"string" required:"true"` + + // The ID of the Traffic Mirror target. + // + // TrafficMirrorTargetId is a required field + TrafficMirrorTargetId *string `type:"string" required:"true"` + + // The VXLAN ID for the Traffic Mirror session. For more information about the + // VXLAN protocol, see RFC 7348 (https://tools.ietf.org/html/rfc7348). If you + // do not specify a VirtualNetworkId, an account-wide unique id is chosen at + // random. + VirtualNetworkId *int64 `type:"integer"` +} + +// String returns the string representation +func (s CreateTrafficMirrorSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficMirrorSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficMirrorSessionInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + if s.SessionNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SessionNumber")) + } + if s.TrafficMirrorFilterId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterId")) + } + if s.TrafficMirrorTargetId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorTargetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorSessionInput) SetClientToken(v string) *CreateTrafficMirrorSessionInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateTrafficMirrorSessionInput) SetDescription(v string) *CreateTrafficMirrorSessionInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTrafficMirrorSessionInput) SetDryRun(v bool) *CreateTrafficMirrorSessionInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *CreateTrafficMirrorSessionInput) SetNetworkInterfaceId(v string) *CreateTrafficMirrorSessionInput { + s.NetworkInterfaceId = &v + return s +} + +// SetPacketLength sets the PacketLength field's value. +func (s *CreateTrafficMirrorSessionInput) SetPacketLength(v int64) *CreateTrafficMirrorSessionInput { + s.PacketLength = &v + return s +} + +// SetSessionNumber sets the SessionNumber field's value. +func (s *CreateTrafficMirrorSessionInput) SetSessionNumber(v int64) *CreateTrafficMirrorSessionInput { + s.SessionNumber = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTrafficMirrorSessionInput) SetTagSpecifications(v []*TagSpecification) *CreateTrafficMirrorSessionInput { + s.TagSpecifications = v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *CreateTrafficMirrorSessionInput) SetTrafficMirrorFilterId(v string) *CreateTrafficMirrorSessionInput { + s.TrafficMirrorFilterId = &v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *CreateTrafficMirrorSessionInput) SetTrafficMirrorTargetId(v string) *CreateTrafficMirrorSessionInput { + s.TrafficMirrorTargetId = &v + return s +} + +// SetVirtualNetworkId sets the VirtualNetworkId field's value. +func (s *CreateTrafficMirrorSessionInput) SetVirtualNetworkId(v int64) *CreateTrafficMirrorSessionInput { + s.VirtualNetworkId = &v + return s +} + +type CreateTrafficMirrorSessionOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the Traffic Mirror session. + TrafficMirrorSession *TrafficMirrorSession `locationName:"trafficMirrorSession" type:"structure"` +} + +// String returns the string representation +func (s CreateTrafficMirrorSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorSessionOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorSessionOutput) SetClientToken(v string) *CreateTrafficMirrorSessionOutput { + s.ClientToken = &v + return s +} + +// SetTrafficMirrorSession sets the TrafficMirrorSession field's value. +func (s *CreateTrafficMirrorSessionOutput) SetTrafficMirrorSession(v *TrafficMirrorSession) *CreateTrafficMirrorSessionOutput { + s.TrafficMirrorSession = v + return s +} + +type CreateTrafficMirrorTargetInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The description of the Traffic Mirror target. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The network interface ID that is associated with the target. + NetworkInterfaceId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Network Load Balancer that is associated + // with the target. + NetworkLoadBalancerArn *string `type:"string"` + + // The tags to assign to the Traffic Mirror target. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateTrafficMirrorTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorTargetInput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorTargetInput) SetClientToken(v string) *CreateTrafficMirrorTargetInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateTrafficMirrorTargetInput) SetDescription(v string) *CreateTrafficMirrorTargetInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTrafficMirrorTargetInput) SetDryRun(v bool) *CreateTrafficMirrorTargetInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *CreateTrafficMirrorTargetInput) SetNetworkInterfaceId(v string) *CreateTrafficMirrorTargetInput { + s.NetworkInterfaceId = &v + return s +} + +// SetNetworkLoadBalancerArn sets the NetworkLoadBalancerArn field's value. +func (s *CreateTrafficMirrorTargetInput) SetNetworkLoadBalancerArn(v string) *CreateTrafficMirrorTargetInput { + s.NetworkLoadBalancerArn = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTrafficMirrorTargetInput) SetTagSpecifications(v []*TagSpecification) *CreateTrafficMirrorTargetInput { + s.TagSpecifications = v + return s +} + +type CreateTrafficMirrorTargetOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the Traffic Mirror target. + TrafficMirrorTarget *TrafficMirrorTarget `locationName:"trafficMirrorTarget" type:"structure"` +} + +// String returns the string representation +func (s CreateTrafficMirrorTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorTargetOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorTargetOutput) SetClientToken(v string) *CreateTrafficMirrorTargetOutput { + s.ClientToken = &v + return s +} + +// SetTrafficMirrorTarget sets the TrafficMirrorTarget field's value. +func (s *CreateTrafficMirrorTargetOutput) SetTrafficMirrorTarget(v *TrafficMirrorTarget) *CreateTrafficMirrorTargetOutput { + s.TrafficMirrorTarget = v + return s +} + type CreateTransitGatewayInput struct { _ struct{} `type:"structure"` @@ -40335,7 +43875,7 @@ func (s *CreateTransitGatewayOutput) SetTransitGateway(v *TransitGateway) *Creat type CreateTransitGatewayRouteInput struct { _ struct{} `type:"structure"` - // Indicates whether to drop traffic if the target isn't available. + // Indicates whether to drop traffic that matches this route. Blackhole *bool `type:"boolean"` // The CIDR range used for destination matches. Routing decisions are based @@ -40679,8 +44219,7 @@ func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetIpv6Support(v strin type CreateVolumeInput struct { _ struct{} `type:"structure"` - // The Availability Zone in which to create the volume. Use DescribeAvailabilityZones - // to list the Availability Zones that are currently available to you. + // The Availability Zone in which to create the volume. // // AvailabilityZone is a required field AvailabilityZone *string `type:"string" required:"true"` @@ -40691,13 +44230,14 @@ type CreateVolumeInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes - // may only be attached to instances that support Amazon EBS encryption. Volumes - // that are created from encrypted snapshots are automatically encrypted. There - // is no way to create an encrypted volume from an unencrypted snapshot or vice - // versa. If your AMI uses encrypted volumes, you can only launch it on supported - // instance types. For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // Specifies whether the volume should be encrypted. The effect of setting the + // encryption state to true depends on the volume origin (new or from a snapshot), + // starting encryption state, ownership, and whether encryption by default is + // enabled. For more information, see Encryption by Default (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) // in the Amazon Elastic Compute Cloud User Guide. + // + // Encrypted Amazon EBS volumes must be attached to instances that support Amazon + // EBS encryption. For more information, see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). Encrypted *bool `locationName:"encrypted" type:"boolean"` // The number of I/O operations per second (IOPS) to provision for the volume, @@ -40711,33 +44251,24 @@ type CreateVolumeInput struct { // This parameter is valid only for Provisioned IOPS SSD (io1) volumes. Iops *int64 `type:"integer"` - // An identifier for the AWS Key Management Service (AWS KMS) customer master - // key (CMK) to use when creating the encrypted volume. This parameter is only - // required if you want to use a non-default CMK; if this parameter is not specified, - // the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted - // flag must also be set. + // The identifier of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, + // your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted + // state must be true. // - // The CMK identifier may be provided in any of the following formats: + // You can specify the CMK using any of the following: // - // * Key ID + // * Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. // - // * Key alias. The alias ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the alias - // namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // * Key alias. For example, alias/ExampleAlias. // - // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the key - // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. - // - // - // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, - // followed by the region of the CMK, the AWS account ID of the CMK owner, - // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // + // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // AWS parses KmsKeyId asynchronously, meaning that the action you call may - // appear to complete even though you provided an invalid identifier. The action - // will eventually fail. + // AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, + // alias, or ARN that is not valid, the action can appear to complete, but eventually + // fails. KmsKeyId *string `type:"string"` // The size of the volume, in GiBs. @@ -40749,7 +44280,7 @@ type CreateVolumeInput struct { // Default: If you're creating the volume from a snapshot and don't specify // a volume size, the default is the snapshot size. // - // At least one of Size or SnapshotId are required. + // At least one of Size or SnapshotId is required. Size *int64 `type:"integer"` // The snapshot from which to create the volume. @@ -40764,10 +44295,7 @@ type CreateVolumeInput struct { // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard // for Magnetic volumes. // - // Defaults: If no volume type is specified, the default is standard in us-east-1, - // eu-west-1, eu-central-1, us-west-2, us-west-1, sa-east-1, ap-northeast-1, - // ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-south-1, us-gov-west-1, - // and cn-north-1. In all other Regions, EBS defaults to gp2. + // Default: gp2 VolumeType *string `type:"string" enum:"VolumeType"` } @@ -41062,7 +44590,7 @@ type CreateVpcEndpointInput struct { // (Interface endpoint) Indicate whether to associate a private hosted zone // with the specified VPC. The private hosted zone contains a record set for - // the default public DNS name for the service for the region (for example, + // the default public DNS name for the service for the Region (for example, // kinesis.us-east-1.amazonaws.com) which resolves to the private IP addresses // of the endpoint network interfaces in the VPC. This enables you to make requests // to the default public DNS name for the service instead of the public DNS @@ -41072,7 +44600,7 @@ type CreateVpcEndpointInput struct { // true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to // set the VPC attributes. // - // Default: false + // Default: true PrivateDnsEnabled *bool `type:"boolean"` // (Gateway endpoint) One or more route table IDs. @@ -41443,10 +44971,10 @@ type CreateVpcPeeringConnectionInput struct { // Default: Your AWS account ID PeerOwnerId *string `locationName:"peerOwnerId" type:"string"` - // The region code for the accepter VPC, if the accepter VPC is located in a - // region other than the region in which you make the request. + // The Region code for the accepter VPC, if the accepter VPC is located in a + // Region other than the Region in which you make the request. // - // Default: The region in which you make the request. + // Default: The Region in which you make the request. PeerRegion *string `type:"string"` // The ID of the VPC with which you are creating the VPC peering connection. @@ -41875,6 +45403,9 @@ type CustomerGateway struct { // (ASN). BgpAsn *string `locationName:"bgpAsn" type:"string"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + // The ID of the customer gateway. CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` @@ -41908,6 +45439,12 @@ func (s *CustomerGateway) SetBgpAsn(v string) *CustomerGateway { return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *CustomerGateway) SetCertificateArn(v string) *CustomerGateway { + s.CertificateArn = &v + return s +} + // SetCustomerGatewayId sets the CustomerGatewayId field's value. func (s *CustomerGateway) SetCustomerGatewayId(v string) *CustomerGateway { s.CustomerGatewayId = &v @@ -42525,6 +46062,8 @@ type DeleteFlowLogsInput struct { // One or more flow log IDs. // + // Constraint: Maximum of 1000 flow log IDs. + // // FlowLogIds is a required field FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list" required:"true"` } @@ -43499,6 +47038,125 @@ func (s DeletePlacementGroupOutput) GoString() string { return s.String() } +// Describes the error for a Reserved Instance whose queued purchase could not +// be deleted. +type DeleteQueuedReservedInstancesError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" enum:"DeleteQueuedReservedInstancesErrorCode"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DeleteQueuedReservedInstancesError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueuedReservedInstancesError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *DeleteQueuedReservedInstancesError) SetCode(v string) *DeleteQueuedReservedInstancesError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DeleteQueuedReservedInstancesError) SetMessage(v string) *DeleteQueuedReservedInstancesError { + s.Message = &v + return s +} + +type DeleteQueuedReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the Reserved Instances. + // + // ReservedInstancesIds is a required field + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"item" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteQueuedReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueuedReservedInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteQueuedReservedInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteQueuedReservedInstancesInput"} + if s.ReservedInstancesIds == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesIds")) + } + if s.ReservedInstancesIds != nil && len(s.ReservedInstancesIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReservedInstancesIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteQueuedReservedInstancesInput) SetDryRun(v bool) *DeleteQueuedReservedInstancesInput { + s.DryRun = &v + return s +} + +// SetReservedInstancesIds sets the ReservedInstancesIds field's value. +func (s *DeleteQueuedReservedInstancesInput) SetReservedInstancesIds(v []*string) *DeleteQueuedReservedInstancesInput { + s.ReservedInstancesIds = v + return s +} + +type DeleteQueuedReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the queued purchases that could not be deleted. + FailedQueuedPurchaseDeletions []*FailedQueuedPurchaseDeletion `locationName:"failedQueuedPurchaseDeletionSet" locationNameList:"item" type:"list"` + + // Information about the queued purchases that were successfully deleted. + SuccessfulQueuedPurchaseDeletions []*SuccessfulQueuedPurchaseDeletion `locationName:"successfulQueuedPurchaseDeletionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteQueuedReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueuedReservedInstancesOutput) GoString() string { + return s.String() +} + +// SetFailedQueuedPurchaseDeletions sets the FailedQueuedPurchaseDeletions field's value. +func (s *DeleteQueuedReservedInstancesOutput) SetFailedQueuedPurchaseDeletions(v []*FailedQueuedPurchaseDeletion) *DeleteQueuedReservedInstancesOutput { + s.FailedQueuedPurchaseDeletions = v + return s +} + +// SetSuccessfulQueuedPurchaseDeletions sets the SuccessfulQueuedPurchaseDeletions field's value. +func (s *DeleteQueuedReservedInstancesOutput) SetSuccessfulQueuedPurchaseDeletions(v []*SuccessfulQueuedPurchaseDeletion) *DeleteQueuedReservedInstancesOutput { + s.SuccessfulQueuedPurchaseDeletions = v + return s +} + type DeleteRouteInput struct { _ struct{} `type:"structure"` @@ -43959,6 +47617,298 @@ func (s DeleteTagsOutput) GoString() string { return s.String() } +type DeleteTrafficMirrorFilterInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the Traffic Mirror filter. + // + // TrafficMirrorFilterId is a required field + TrafficMirrorFilterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficMirrorFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficMirrorFilterInput"} + if s.TrafficMirrorFilterId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTrafficMirrorFilterInput) SetDryRun(v bool) *DeleteTrafficMirrorFilterInput { + s.DryRun = &v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *DeleteTrafficMirrorFilterInput) SetTrafficMirrorFilterId(v string) *DeleteTrafficMirrorFilterInput { + s.TrafficMirrorFilterId = &v + return s +} + +type DeleteTrafficMirrorFilterOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterId *string `locationName:"trafficMirrorFilterId" type:"string"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorFilterOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *DeleteTrafficMirrorFilterOutput) SetTrafficMirrorFilterId(v string) *DeleteTrafficMirrorFilterOutput { + s.TrafficMirrorFilterId = &v + return s +} + +type DeleteTrafficMirrorFilterRuleInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the Traffic Mirror rule. + // + // TrafficMirrorFilterRuleId is a required field + TrafficMirrorFilterRuleId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorFilterRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorFilterRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficMirrorFilterRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficMirrorFilterRuleInput"} + if s.TrafficMirrorFilterRuleId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterRuleId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTrafficMirrorFilterRuleInput) SetDryRun(v bool) *DeleteTrafficMirrorFilterRuleInput { + s.DryRun = &v + return s +} + +// SetTrafficMirrorFilterRuleId sets the TrafficMirrorFilterRuleId field's value. +func (s *DeleteTrafficMirrorFilterRuleInput) SetTrafficMirrorFilterRuleId(v string) *DeleteTrafficMirrorFilterRuleInput { + s.TrafficMirrorFilterRuleId = &v + return s +} + +type DeleteTrafficMirrorFilterRuleOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted Traffic Mirror rule. + TrafficMirrorFilterRuleId *string `locationName:"trafficMirrorFilterRuleId" type:"string"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorFilterRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorFilterRuleOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorFilterRuleId sets the TrafficMirrorFilterRuleId field's value. +func (s *DeleteTrafficMirrorFilterRuleOutput) SetTrafficMirrorFilterRuleId(v string) *DeleteTrafficMirrorFilterRuleOutput { + s.TrafficMirrorFilterRuleId = &v + return s +} + +type DeleteTrafficMirrorSessionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the Traffic Mirror session. + // + // TrafficMirrorSessionId is a required field + TrafficMirrorSessionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficMirrorSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficMirrorSessionInput"} + if s.TrafficMirrorSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorSessionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTrafficMirrorSessionInput) SetDryRun(v bool) *DeleteTrafficMirrorSessionInput { + s.DryRun = &v + return s +} + +// SetTrafficMirrorSessionId sets the TrafficMirrorSessionId field's value. +func (s *DeleteTrafficMirrorSessionInput) SetTrafficMirrorSessionId(v string) *DeleteTrafficMirrorSessionInput { + s.TrafficMirrorSessionId = &v + return s +} + +type DeleteTrafficMirrorSessionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted Traffic Mirror session. + TrafficMirrorSessionId *string `locationName:"trafficMirrorSessionId" type:"string"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorSessionOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorSessionId sets the TrafficMirrorSessionId field's value. +func (s *DeleteTrafficMirrorSessionOutput) SetTrafficMirrorSessionId(v string) *DeleteTrafficMirrorSessionOutput { + s.TrafficMirrorSessionId = &v + return s +} + +type DeleteTrafficMirrorTargetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the Traffic Mirror target. + // + // TrafficMirrorTargetId is a required field + TrafficMirrorTargetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficMirrorTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficMirrorTargetInput"} + if s.TrafficMirrorTargetId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorTargetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTrafficMirrorTargetInput) SetDryRun(v bool) *DeleteTrafficMirrorTargetInput { + s.DryRun = &v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *DeleteTrafficMirrorTargetInput) SetTrafficMirrorTargetId(v string) *DeleteTrafficMirrorTargetInput { + s.TrafficMirrorTargetId = &v + return s +} + +type DeleteTrafficMirrorTargetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted Traffic Mirror target. + TrafficMirrorTargetId *string `locationName:"trafficMirrorTargetId" type:"string"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorTargetOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *DeleteTrafficMirrorTargetOutput) SetTrafficMirrorTargetId(v string) *DeleteTrafficMirrorTargetOutput { + s.TrafficMirrorTargetId = &v + return s +} + type DeleteTransitGatewayInput struct { _ struct{} `type:"structure"` @@ -45222,9 +49172,9 @@ type DescribeAggregateIdFormatOutput struct { // Information about each resource's ID format. Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` - // Indicates whether all resource types in the region are configured to use + // Indicates whether all resource types in the Region are configured to use // longer IDs. This value is only true if all users are configured to use longer - // IDs for all resources types in the region. + // IDs for all resources types in the Region. UseLongIdsAggregated *bool `locationName:"useLongIdsAggregated" type:"boolean"` } @@ -45263,7 +49213,7 @@ type DescribeAvailabilityZonesInput struct { // // * message - Information about the Availability Zone. // - // * region-name - The name of the region for the Availability Zone (for + // * region-name - The name of the Region for the Availability Zone (for // example, us-east-1). // // * state - The state of the Availability Zone (available | information @@ -45442,10 +49392,10 @@ type DescribeByoipCidrsInput struct { // remaining results, make another call with the returned nextToken value. // // MaxResults is a required field - MaxResults *int64 `min:"5" type:"integer" required:"true"` + MaxResults *int64 `min:"1" type:"integer" required:"true"` // The token for the next page of results. - NextToken *string `min:"1" type:"string"` + NextToken *string `type:"string"` } // String returns the string representation @@ -45464,11 +49414,8 @@ func (s *DescribeByoipCidrsInput) Validate() error { if s.MaxResults == nil { invalidParams.Add(request.NewErrParamRequired("MaxResults")) } - if s.MaxResults != nil && *s.MaxResults < 5 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -45546,7 +49493,7 @@ type DescribeCapacityReservationsInput struct { // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned // nextToken value. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"1" type:"integer"` // The token to retrieve the next page of results. NextToken *string `type:"string"` @@ -45562,6 +49509,19 @@ func (s DescribeCapacityReservationsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCapacityReservationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCapacityReservationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetCapacityReservationIds sets the CapacityReservationIds field's value. func (s *DescribeCapacityReservationsInput) SetCapacityReservationIds(v []*string) *DescribeCapacityReservationsInput { s.CapacityReservationIds = v @@ -45651,9 +49611,8 @@ type DescribeClassicLinkInstancesInput struct { // to find all resources assigned a tag with a specific key, regardless of // the tag value. // - // * vpc-id - The ID of the VPC to which the instance is linked. - // - // vpc-id - The ID of the VPC that the instance is linked to. + // * vpc-id - The ID of the VPC to which the instance is linked. vpc-id - + // The ID of the VPC that the instance is linked to. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more instance IDs. Must be instances linked to a VPC through ClassicLink. @@ -45663,7 +49622,7 @@ type DescribeClassicLinkInstancesInput struct { // remaining results, make another call with the returned nextToken value. // // Constraint: If the value is greater than 1000, we return only 1000 items. - MaxResults *int64 `locationName:"maxResults" type:"integer"` + MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"` // The token for the next page of results. NextToken *string `locationName:"nextToken" type:"string"` @@ -45679,6 +49638,19 @@ func (s DescribeClassicLinkInstancesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClassicLinkInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClassicLinkInstancesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribeClassicLinkInstancesInput) SetDryRun(v bool) *DescribeClassicLinkInstancesInput { s.DryRun = &v @@ -45765,7 +49737,7 @@ type DescribeClientVpnAuthorizationRulesInput struct { MaxResults *int64 `min:"5" type:"integer"` // The token to retrieve the next page of results. - NextToken *string `min:"1" type:"string"` + NextToken *string `type:"string"` } // String returns the string representation @@ -45787,9 +49759,6 @@ func (s *DescribeClientVpnAuthorizationRulesInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 5 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -45835,7 +49804,7 @@ type DescribeClientVpnAuthorizationRulesOutput struct { // The token to use to retrieve the next page of results. This value is null // when there are no more results to return. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation @@ -45883,7 +49852,7 @@ type DescribeClientVpnConnectionsInput struct { MaxResults *int64 `min:"5" type:"integer"` // The token to retrieve the next page of results. - NextToken *string `min:"1" type:"string"` + NextToken *string `type:"string"` } // String returns the string representation @@ -45905,9 +49874,6 @@ func (s *DescribeClientVpnConnectionsInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 5 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -45953,7 +49919,7 @@ type DescribeClientVpnConnectionsOutput struct { // The token to use to retrieve the next page of results. This value is null // when there are no more results to return. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation @@ -45999,7 +49965,7 @@ type DescribeClientVpnEndpointsInput struct { MaxResults *int64 `min:"5" type:"integer"` // The token to retrieve the next page of results. - NextToken *string `min:"1" type:"string"` + NextToken *string `type:"string"` } // String returns the string representation @@ -46018,9 +49984,6 @@ func (s *DescribeClientVpnEndpointsInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 5 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -46066,7 +50029,7 @@ type DescribeClientVpnEndpointsOutput struct { // The token to use to retrieve the next page of results. This value is null // when there are no more results to return. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation @@ -46114,7 +50077,7 @@ type DescribeClientVpnRoutesInput struct { MaxResults *int64 `min:"5" type:"integer"` // The token to retrieve the next page of results. - NextToken *string `min:"1" type:"string"` + NextToken *string `type:"string"` } // String returns the string representation @@ -46136,9 +50099,6 @@ func (s *DescribeClientVpnRoutesInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 5 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -46181,7 +50141,7 @@ type DescribeClientVpnRoutesOutput struct { // The token to use to retrieve the next page of results. This value is null // when there are no more results to return. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` // Information about the Client VPN endpoint routes. Routes []*ClientVpnRoute `locationName:"routes" locationNameList:"item" type:"list"` @@ -46235,7 +50195,7 @@ type DescribeClientVpnTargetNetworksInput struct { MaxResults *int64 `min:"5" type:"integer"` // The token to retrieve the next page of results. - NextToken *string `min:"1" type:"string"` + NextToken *string `type:"string"` } // String returns the string representation @@ -46257,9 +50217,6 @@ func (s *DescribeClientVpnTargetNetworksInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 5 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -46311,7 +50268,7 @@ type DescribeClientVpnTargetNetworksOutput struct { // The token to use to retrieve the next page of results. This value is null // when there are no more results to return. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation @@ -46336,7 +50293,6 @@ func (s *DescribeClientVpnTargetNetworksOutput) SetNextToken(v string) *Describe return s } -// Contains the parameters for DescribeConversionTasks. type DescribeConversionTasksInput struct { _ struct{} `type:"structure"` @@ -46372,7 +50328,6 @@ func (s *DescribeConversionTasksInput) SetDryRun(v bool) *DescribeConversionTask return s } -// Contains the output for DescribeConversionTasks. type DescribeConversionTasksOutput struct { _ struct{} `type:"structure"` @@ -46525,6 +50480,13 @@ type DescribeDhcpOptionsInput struct { // to find all resources assigned a tag with a specific key, regardless of // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` } // String returns the string representation @@ -46537,6 +50499,19 @@ func (s DescribeDhcpOptionsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDhcpOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDhcpOptionsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDhcpOptionsIds sets the DhcpOptionsIds field's value. func (s *DescribeDhcpOptionsInput) SetDhcpOptionsIds(v []*string) *DescribeDhcpOptionsInput { s.DhcpOptionsIds = v @@ -46555,11 +50530,27 @@ func (s *DescribeDhcpOptionsInput) SetFilters(v []*Filter) *DescribeDhcpOptionsI return s } +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeDhcpOptionsInput) SetMaxResults(v int64) *DescribeDhcpOptionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDhcpOptionsInput) SetNextToken(v string) *DescribeDhcpOptionsInput { + s.NextToken = &v + return s +} + type DescribeDhcpOptionsOutput struct { _ struct{} `type:"structure"` // Information about one or more DHCP options sets. DhcpOptions []*DhcpOptions `locationName:"dhcpOptionsSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation @@ -46578,6 +50569,12 @@ func (s *DescribeDhcpOptionsOutput) SetDhcpOptions(v []*DhcpOptions) *DescribeDh return s } +// SetNextToken sets the NextToken field's value. +func (s *DescribeDhcpOptionsOutput) SetNextToken(v string) *DescribeDhcpOptionsOutput { + s.NextToken = &v + return s +} + type DescribeEgressOnlyInternetGatewaysInput struct { _ struct{} `type:"structure"` @@ -46592,7 +50589,7 @@ type DescribeEgressOnlyInternetGatewaysInput struct { // The maximum number of results to return with a single call. To retrieve the // remaining results, make another call with the returned nextToken value. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"5" type:"integer"` // The token for the next page of results. NextToken *string `type:"string"` @@ -46608,6 +50605,19 @@ func (s DescribeEgressOnlyInternetGatewaysInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEgressOnlyInternetGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEgressOnlyInternetGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribeEgressOnlyInternetGatewaysInput) SetDryRun(v bool) *DescribeEgressOnlyInternetGatewaysInput { s.DryRun = &v @@ -46697,7 +50707,7 @@ type DescribeElasticGpusInput struct { // The maximum number of results to return in a single call. To retrieve the // remaining results, make another call with the returned NextToken value. This // value can be between 5 and 1000. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"10" type:"integer"` // The token to request the next page of results. NextToken *string `type:"string"` @@ -46713,6 +50723,19 @@ func (s DescribeElasticGpusInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeElasticGpusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeElasticGpusInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribeElasticGpusInput) SetDryRun(v bool) *DescribeElasticGpusInput { s.DryRun = &v @@ -46787,7 +50810,115 @@ func (s *DescribeElasticGpusOutput) SetNextToken(v string) *DescribeElasticGpusO return s } -// Contains the parameters for DescribeExportTasks. +type DescribeExportImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the export image tasks. + ExportImageTaskIds []*string `locationName:"ExportImageTaskId" locationNameList:"ExportImageTaskId" type:"list"` + + // Filter tasks using the task-state filter and one of the following values: + // active, completed, deleting, or deleted. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `min:"1" type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportImageTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportImageTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeExportImageTasksInput) SetDryRun(v bool) *DescribeExportImageTasksInput { + s.DryRun = &v + return s +} + +// SetExportImageTaskIds sets the ExportImageTaskIds field's value. +func (s *DescribeExportImageTasksInput) SetExportImageTaskIds(v []*string) *DescribeExportImageTasksInput { + s.ExportImageTaskIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeExportImageTasksInput) SetFilters(v []*Filter) *DescribeExportImageTasksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeExportImageTasksInput) SetMaxResults(v int64) *DescribeExportImageTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksInput) SetNextToken(v string) *DescribeExportImageTasksInput { + s.NextToken = &v + return s +} + +type DescribeExportImageTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the export image tasks. + ExportImageTasks []*ExportImageTask `locationName:"exportImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksOutput) GoString() string { + return s.String() +} + +// SetExportImageTasks sets the ExportImageTasks field's value. +func (s *DescribeExportImageTasksOutput) SetExportImageTasks(v []*ExportImageTask) *DescribeExportImageTasksOutput { + s.ExportImageTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksOutput) SetNextToken(v string) *DescribeExportImageTasksOutput { + s.NextToken = &v + return s +} + type DescribeExportTasksInput struct { _ struct{} `type:"structure"` @@ -46811,7 +50942,6 @@ func (s *DescribeExportTasksInput) SetExportTaskIds(v []*string) *DescribeExport return s } -// Contains the output for DescribeExportTasks. type DescribeExportTasksOutput struct { _ struct{} `type:"structure"` @@ -47370,6 +51500,8 @@ type DescribeFlowLogsInput struct { Filter []*Filter `locationNameList:"Filter" type:"list"` // One or more flow log IDs. + // + // Constraint: Maximum of 1000 flow log IDs. FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list"` // The maximum number of results to return with a single call. To retrieve the @@ -47588,7 +51720,7 @@ type DescribeFpgaImagesInput struct { MaxResults *int64 `min:"5" type:"integer"` // The token to retrieve the next page of results. - NextToken *string `min:"1" type:"string"` + NextToken *string `type:"string"` // Filters the AFI by owner. Specify an AWS account ID, self (owner is the sender // of the request), or an AWS owner alias (valid values are amazon | aws-marketplace). @@ -47611,9 +51743,6 @@ func (s *DescribeFpgaImagesInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 5 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -47665,7 +51794,7 @@ type DescribeFpgaImagesOutput struct { // The token to use to retrieve the next page of results. This value is null // when there are no more results to return. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation @@ -47955,11 +52084,13 @@ type DescribeHostsInput struct { // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned // nextToken value. This value can be between 5 and 500. If maxResults is given - // a larger value than 500, you receive an error. You cannot specify this parameter - // and the host IDs parameter in the same request. + // a larger value than 500, you receive an error. + // + // You cannot specify this parameter and the host IDs parameter in the same + // request. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The token to retrieve the next page of results. + // The token to use to retrieve the next page of results. NextToken *string `locationName:"nextToken" type:"string"` } @@ -48049,7 +52180,7 @@ type DescribeIamInstanceProfileAssociationsInput struct { MaxResults *int64 `min:"5" type:"integer"` // The token to request the next page of results. - NextToken *string `min:"1" type:"string"` + NextToken *string `type:"string"` } // String returns the string representation @@ -48068,9 +52199,6 @@ func (s *DescribeIamInstanceProfileAssociationsInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 5 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -48110,7 +52238,7 @@ type DescribeIamInstanceProfileAssociationsOutput struct { // The token to use to retrieve the next page of results. This value is null // when there are no more results to return. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation @@ -48436,7 +52564,7 @@ type DescribeImagesInput struct { // The filters. // - // * architecture - The image architecture (i386 | x86_64). + // * architecture - The image architecture (i386 | x86_64 | arm64). // // * block-device-mapping.delete-on-termination - A Boolean value that indicates // whether the Amazon EBS volume is deleted on instance termination. @@ -48591,7 +52719,6 @@ func (s *DescribeImagesOutput) SetImages(v []*Image) *DescribeImagesOutput { return s } -// Contains the parameters for DescribeImportImageTasks. type DescribeImportImageTasksInput struct { _ struct{} `type:"structure"` @@ -48602,14 +52729,13 @@ type DescribeImportImageTasksInput struct { DryRun *bool `type:"boolean"` // Filter tasks using the task-state filter and one of the following values: - // active, completed, deleting, deleted. + // active, completed, deleting, or deleted. Filters []*Filter `locationNameList:"Filter" type:"list"` - // A list of import image task IDs. + // The IDs of the import image tasks. ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` - // The maximum number of results to return in a single call. To retrieve the - // remaining results, make another call with the returned NextToken value. + // The maximum number of results to return in a single call. MaxResults *int64 `type:"integer"` // A token that indicates the next page of results. @@ -48656,7 +52782,6 @@ func (s *DescribeImportImageTasksInput) SetNextToken(v string) *DescribeImportIm return s } -// Contains the output for DescribeImportImageTasks. type DescribeImportImageTasksOutput struct { _ struct{} `type:"structure"` @@ -48691,7 +52816,6 @@ func (s *DescribeImportImageTasksOutput) SetNextToken(v string) *DescribeImportI return s } -// Contains the parameters for DescribeImportSnapshotTasks. type DescribeImportSnapshotTasksInput struct { _ struct{} `type:"structure"` @@ -48755,7 +52879,6 @@ func (s *DescribeImportSnapshotTasksInput) SetNextToken(v string) *DescribeImpor return s } -// Contains the output for DescribeImportSnapshotTasks. type DescribeImportSnapshotTasksOutput struct { _ struct{} `type:"structure"` @@ -49296,7 +53419,7 @@ type DescribeInstancesInput struct { // * affinity - The affinity setting for an instance running on a Dedicated // Host (default | host). // - // * architecture - The instance architecture (i386 | x86_64). + // * architecture - The instance architecture (i386 | x86_64 | arm64). // // * availability-zone - The Availability Zone of the instance. // @@ -49359,7 +53482,6 @@ type DescribeInstancesInput struct { // // * instance.group-name - The name of the security group for the instance. // - // // * ip-address - The public IPv4 address of the instance. // // * kernel-id - The kernel ID. @@ -49369,7 +53491,6 @@ type DescribeInstancesInput struct { // * launch-index - When launching multiple instances, this is the index // for the instance in the launch group (for example, 0, 1, 2, and so on). // - // // * launch-time - The time when the instance was launched. // // * monitoring-state - Indicates whether detailed monitoring is enabled @@ -50024,7 +54145,7 @@ type DescribeLaunchTemplatesInput struct { // The maximum number of results to return in a single call. To retrieve the // remaining results, make another call with the returned NextToken value. This // value can be between 1 and 200. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"1" type:"integer"` // The token to request the next page of results. NextToken *string `type:"string"` @@ -50040,6 +54161,19 @@ func (s DescribeLaunchTemplatesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLaunchTemplatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLaunchTemplatesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribeLaunchTemplatesInput) SetDryRun(v bool) *DescribeLaunchTemplatesInput { s.DryRun = &v @@ -50130,7 +54264,7 @@ type DescribeMovingAddressesInput struct { // 1000; if MaxResults is given a value outside of this range, an error is returned. // // Default: If no value is provided, the default is 1000. - MaxResults *int64 `locationName:"maxResults" type:"integer"` + MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"` // The token for the next page of results. NextToken *string `locationName:"nextToken" type:"string"` @@ -50149,6 +54283,19 @@ func (s DescribeMovingAddressesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMovingAddressesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMovingAddressesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribeMovingAddressesInput) SetDryRun(v bool) *DescribeMovingAddressesInput { s.DryRun = &v @@ -50239,7 +54386,7 @@ type DescribeNatGatewaysInput struct { // The maximum number of results to return with a single call. To retrieve the // remaining results, make another call with the returned nextToken value. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"5" type:"integer"` // One or more NAT gateway IDs. NatGatewayIds []*string `locationName:"NatGatewayId" locationNameList:"item" type:"list"` @@ -50258,6 +54405,19 @@ func (s DescribeNatGatewaysInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNatGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNatGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetFilter sets the Filter field's value. func (s *DescribeNatGatewaysInput) SetFilter(v []*Filter) *DescribeNatGatewaysInput { s.Filter = v @@ -50349,7 +54509,6 @@ type DescribeNetworkAclsInput struct { // // * entry.port-range.to - The end of the port range specified in the entry. // - // // * entry.protocol - The protocol specified in the entry (tcp | udp | icmp // or a protocol number). // @@ -50617,7 +54776,7 @@ type DescribeNetworkInterfacePermissionsInput struct { // The maximum number of results to return in a single call. To retrieve the // remaining results, make another call with the returned NextToken value. If // this parameter is not specified, up to 50 results are returned by default. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"5" type:"integer"` // One or more network interface permission IDs. NetworkInterfacePermissionIds []*string `locationName:"NetworkInterfacePermissionId" type:"list"` @@ -50636,6 +54795,19 @@ func (s DescribeNetworkInterfacePermissionsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNetworkInterfacePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNetworkInterfacePermissionsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetFilters sets the Filters field's value. func (s *DescribeNetworkInterfacePermissionsInput) SetFilters(v []*Filter) *DescribeNetworkInterfacePermissionsInput { s.Filters = v @@ -50734,7 +54906,7 @@ type DescribeNetworkInterfacesInput struct { // // * attachment.attachment-id - The ID of the interface attachment. // - // * attachment.attach.time - The time that the network interface was attached + // * attachment.attach-time - The time that the network interface was attached // to an instance. // // * attachment.delete-on-termination - Indicates whether the attachment @@ -50813,7 +54985,7 @@ type DescribeNetworkInterfacesInput struct { // The maximum number of items to return for this request. The request returns // a token that you can specify in a subsequent call to get the next set of // results. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"5" type:"integer"` // One or more network interface IDs. // @@ -50834,6 +55006,19 @@ func (s DescribeNetworkInterfacesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNetworkInterfacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNetworkInterfacesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribeNetworkInterfacesInput) SetDryRun(v bool) *DescribeNetworkInterfacesInput { s.DryRun = &v @@ -51085,7 +55270,7 @@ type DescribePrincipalIdFormatInput struct { // The maximum number of results to return in a single call. To retrieve the // remaining results, make another call with the returned NextToken value. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"1" type:"integer"` // The token to request the next page of results. NextToken *string `type:"string"` @@ -51110,6 +55295,19 @@ func (s DescribePrincipalIdFormatInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePrincipalIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePrincipalIdFormatInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribePrincipalIdFormatInput) SetDryRun(v bool) *DescribePrincipalIdFormatInput { s.DryRun = &v @@ -51175,7 +55373,7 @@ type DescribePublicIpv4PoolsInput struct { MaxResults *int64 `min:"1" type:"integer"` // The token for the next page of results. - NextToken *string `min:"1" type:"string"` + NextToken *string `type:"string"` // The IDs of the address pools. PoolIds []*string `locationName:"PoolId" locationNameList:"item" type:"list"` @@ -51197,9 +55395,6 @@ func (s *DescribePublicIpv4PoolsInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -51261,6 +55456,10 @@ func (s *DescribePublicIpv4PoolsOutput) SetPublicIpv4Pools(v []*PublicIpv4Pool) type DescribeRegionsInput struct { _ struct{} `type:"structure"` + // Indicates whether to display all Regions, including Regions that are disabled + // for your account. + AllRegions *bool `type:"boolean"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -51269,12 +55468,16 @@ type DescribeRegionsInput struct { // The filters. // - // * endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com). + // * endpoint - The endpoint of the Region (for example, ec2.us-east-1.amazonaws.com). // - // * region-name - The name of the region (for example, us-east-1). + // * opt-in-status - The opt-in status of the Region (opt-in-not-required + // | opted-in | not-opted-in). + // + // * region-name - The name of the Region (for example, us-east-1). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The names of the regions. + // The names of the Regions. You can specify any Regions, whether they are enabled + // and disabled for your account. RegionNames []*string `locationName:"RegionName" locationNameList:"RegionName" type:"list"` } @@ -51288,6 +55491,12 @@ func (s DescribeRegionsInput) GoString() string { return s.String() } +// SetAllRegions sets the AllRegions field's value. +func (s *DescribeRegionsInput) SetAllRegions(v bool) *DescribeRegionsInput { + s.AllRegions = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *DescribeRegionsInput) SetDryRun(v bool) *DescribeRegionsInput { s.DryRun = &v @@ -51309,7 +55518,7 @@ func (s *DescribeRegionsInput) SetRegionNames(v []*string) *DescribeRegionsInput type DescribeRegionsOutput struct { _ struct{} `type:"structure"` - // Information about the regions. + // Information about the Regions. Regions []*Region `locationName:"regionInfo" locationNameList:"item" type:"list"` } @@ -51536,7 +55745,7 @@ type DescribeReservedInstancesModificationsInput struct { // * modification-result.target-configuration.availability-zone - The Availability // Zone for the new Reserved Instances. // - // * modification-result.target-configuration.instance-count - The number + // * modification-result.target-configuration.instance-count - The number // of new Reserved Instances. // // * modification-result.target-configuration.instance-type - The instance @@ -51664,7 +55873,7 @@ type DescribeReservedInstancesOfferingsInput struct { // SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise // Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL // Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows - // with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows + // with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows // with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon // VPC)) // @@ -51972,7 +56181,7 @@ type DescribeRouteTablesInput struct { // The maximum number of results to return with a single call. To retrieve the // remaining results, make another call with the returned nextToken value. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"5" type:"integer"` // The token for the next page of results. NextToken *string `type:"string"` @@ -51993,6 +56202,19 @@ func (s DescribeRouteTablesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRouteTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRouteTablesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribeRouteTablesInput) SetDryRun(v bool) *DescribeRouteTablesInput { s.DryRun = &v @@ -52086,7 +56308,7 @@ type DescribeScheduledInstanceAvailabilityInput struct { // The maximum number of results to return in a single call. This value can // be between 5 and 300. The default value is 300. To retrieve the remaining // results, make another call with the returned NextToken value. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"5" type:"integer"` // The maximum available duration, in hours. This value must be greater than // MinSlotDurationInHours and less than 1,720. @@ -52123,6 +56345,9 @@ func (s *DescribeScheduledInstanceAvailabilityInput) Validate() error { if s.FirstSlotStartTimeRange == nil { invalidParams.Add(request.NewErrParamRequired("FirstSlotStartTimeRange")) } + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } if s.Recurrence == nil { invalidParams.Add(request.NewErrParamRequired("Recurrence")) } @@ -52515,7 +56740,7 @@ type DescribeSecurityGroupsInput struct { // remaining results, make another request with the returned NextToken value. // This value can be between 5 and 1000. If this parameter is not specified, // then all results are returned. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"5" type:"integer"` // The token to request the next page of results. NextToken *string `type:"string"` @@ -52531,6 +56756,19 @@ func (s DescribeSecurityGroupsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSecurityGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSecurityGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribeSecurityGroupsInput) SetDryRun(v bool) *DescribeSecurityGroupsInput { s.DryRun = &v @@ -52935,7 +57173,7 @@ type DescribeSpotFleetInstancesInput struct { // The maximum number of results to return in a single call. Specify a value // between 1 and 1000. The default value is 1000. To retrieve the remaining // results, make another call with the returned NextToken value. - MaxResults *int64 `locationName:"maxResults" type:"integer"` + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` // The token for the next set of results. NextToken *string `locationName:"nextToken" type:"string"` @@ -52959,6 +57197,9 @@ func (s DescribeSpotFleetInstancesInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeSpotFleetInstancesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeSpotFleetInstancesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if s.SpotFleetRequestId == nil { invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) } @@ -53053,7 +57294,7 @@ type DescribeSpotFleetRequestHistoryInput struct { // The maximum number of results to return in a single call. Specify a value // between 1 and 1000. The default value is 1000. To retrieve the remaining // results, make another call with the returned NextToken value. - MaxResults *int64 `locationName:"maxResults" type:"integer"` + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` // The token for the next set of results. NextToken *string `locationName:"nextToken" type:"string"` @@ -53082,6 +57323,9 @@ func (s DescribeSpotFleetRequestHistoryInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeSpotFleetRequestHistoryInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeSpotFleetRequestHistoryInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if s.SpotFleetRequestId == nil { invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) } @@ -53810,6 +58054,13 @@ type DescribeSubnetsInput struct { // * vpc-id - The ID of the VPC for the subnet. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + // One or more subnet IDs. // // Default: Describes all your subnets. @@ -53826,6 +58077,19 @@ func (s DescribeSubnetsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSubnetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSubnetsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DescribeSubnetsInput) SetDryRun(v bool) *DescribeSubnetsInput { s.DryRun = &v @@ -53838,6 +58102,18 @@ func (s *DescribeSubnetsInput) SetFilters(v []*Filter) *DescribeSubnetsInput { return s } +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSubnetsInput) SetMaxResults(v int64) *DescribeSubnetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubnetsInput) SetNextToken(v string) *DescribeSubnetsInput { + s.NextToken = &v + return s +} + // SetSubnetIds sets the SubnetIds field's value. func (s *DescribeSubnetsInput) SetSubnetIds(v []*string) *DescribeSubnetsInput { s.SubnetIds = v @@ -53847,6 +58123,10 @@ func (s *DescribeSubnetsInput) SetSubnetIds(v []*string) *DescribeSubnetsInput { type DescribeSubnetsOutput struct { _ struct{} `type:"structure"` + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + // Information about one or more subnets. Subnets []*Subnet `locationName:"subnetSet" locationNameList:"item" type:"list"` } @@ -53861,6 +58141,12 @@ func (s DescribeSubnetsOutput) GoString() string { return s.String() } +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubnetsOutput) SetNextToken(v string) *DescribeSubnetsOutput { + s.NextToken = &v + return s +} + // SetSubnets sets the Subnets field's value. func (s *DescribeSubnetsOutput) SetSubnets(v []*Subnet) *DescribeSubnetsOutput { s.Subnets = v @@ -53972,6 +58258,367 @@ func (s *DescribeTagsOutput) SetTags(v []*TagDescription) *DescribeTagsOutput { return s } +type DescribeTrafficMirrorFiltersInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * description: The Traffic Mirror filter description. + // + // * traffic-mirror-filter-id: The ID of the Traffic Mirror filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterIds []*string `locationName:"TrafficMirrorFilterId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrafficMirrorFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrafficMirrorFiltersInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetDryRun(v bool) *DescribeTrafficMirrorFiltersInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetFilters(v []*Filter) *DescribeTrafficMirrorFiltersInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetMaxResults(v int64) *DescribeTrafficMirrorFiltersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetNextToken(v string) *DescribeTrafficMirrorFiltersInput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorFilterIds sets the TrafficMirrorFilterIds field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetTrafficMirrorFilterIds(v []*string) *DescribeTrafficMirrorFiltersInput { + s.TrafficMirrorFilterIds = v + return s +} + +type DescribeTrafficMirrorFiltersOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. The value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about one or more Traffic Mirror filters. + TrafficMirrorFilters []*TrafficMirrorFilter `locationName:"trafficMirrorFilterSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorFiltersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorFiltersOutput) SetNextToken(v string) *DescribeTrafficMirrorFiltersOutput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorFilters sets the TrafficMirrorFilters field's value. +func (s *DescribeTrafficMirrorFiltersOutput) SetTrafficMirrorFilters(v []*TrafficMirrorFilter) *DescribeTrafficMirrorFiltersOutput { + s.TrafficMirrorFilters = v + return s +} + +type DescribeTrafficMirrorSessionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * description: The Traffic Mirror session description. + // + // * network-interface-id: The ID of the Traffic Mirror session network interface. + // + // * owner-id: The ID of the account that owns the Traffic Mirror session. + // + // * packet-length: The assigned number of packets to mirror. + // + // * session-number: The assigned session number. + // + // * traffic-mirror-filter-id: The ID of the Traffic Mirror filter. + // + // * traffic-mirror-session-id: The ID of the Traffic Mirror session. + // + // * traffic-mirror-target-id: The ID of the Traffic Mirror target. + // + // * virtual-network-id: The virtual network ID of the Traffic Mirror session. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the Traffic Mirror session. + TrafficMirrorSessionIds []*string `locationName:"TrafficMirrorSessionId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrafficMirrorSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrafficMirrorSessionsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetDryRun(v bool) *DescribeTrafficMirrorSessionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetFilters(v []*Filter) *DescribeTrafficMirrorSessionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetMaxResults(v int64) *DescribeTrafficMirrorSessionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetNextToken(v string) *DescribeTrafficMirrorSessionsInput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorSessionIds sets the TrafficMirrorSessionIds field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetTrafficMirrorSessionIds(v []*string) *DescribeTrafficMirrorSessionsInput { + s.TrafficMirrorSessionIds = v + return s +} + +type DescribeTrafficMirrorSessionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. The value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Describes one or more Traffic Mirror sessions. By default, all Traffic Mirror + // sessions are described. Alternatively, you can filter the results. + TrafficMirrorSessions []*TrafficMirrorSession `locationName:"trafficMirrorSessionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorSessionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorSessionsOutput) SetNextToken(v string) *DescribeTrafficMirrorSessionsOutput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorSessions sets the TrafficMirrorSessions field's value. +func (s *DescribeTrafficMirrorSessionsOutput) SetTrafficMirrorSessions(v []*TrafficMirrorSession) *DescribeTrafficMirrorSessionsOutput { + s.TrafficMirrorSessions = v + return s +} + +type DescribeTrafficMirrorTargetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * description: The Traffic Mirror target description. + // + // * network-interface-id: The ID of the Traffic Mirror session network interface. + // + // * network-load-balancer-arn: The Amazon Resource Name (ARN) of the Network + // Load Balancer that is associated with the session. + // + // * owner-id: The ID of the account that owns the Traffic Mirror session. + // + // * traffic-mirror-target-id: The ID of the Traffic Mirror target. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the Traffic Mirror targets. + TrafficMirrorTargetIds []*string `locationName:"TrafficMirrorTargetId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrafficMirrorTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrafficMirrorTargetsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetDryRun(v bool) *DescribeTrafficMirrorTargetsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetFilters(v []*Filter) *DescribeTrafficMirrorTargetsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetMaxResults(v int64) *DescribeTrafficMirrorTargetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetNextToken(v string) *DescribeTrafficMirrorTargetsInput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorTargetIds sets the TrafficMirrorTargetIds field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetTrafficMirrorTargetIds(v []*string) *DescribeTrafficMirrorTargetsInput { + s.TrafficMirrorTargetIds = v + return s +} + +type DescribeTrafficMirrorTargetsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. The value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about one or more Traffic Mirror targets. + TrafficMirrorTargets []*TrafficMirrorTarget `locationName:"trafficMirrorTargetSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorTargetsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorTargetsOutput) SetNextToken(v string) *DescribeTrafficMirrorTargetsOutput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorTargets sets the TrafficMirrorTargets field's value. +func (s *DescribeTrafficMirrorTargetsOutput) SetTrafficMirrorTargets(v []*TrafficMirrorTarget) *DescribeTrafficMirrorTargetsOutput { + s.TrafficMirrorTargets = v + return s +} + type DescribeTransitGatewayAttachmentsInput struct { _ struct{} `type:"structure"` @@ -55486,6 +60133,16 @@ type DescribeVpcEndpointServiceConfigurationsInput struct { // // * service-state - The state of the service (Pending | Available | Deleting // | Deleted | Failed). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -55707,6 +60364,16 @@ type DescribeVpcEndpointServicesInput struct { // One or more filters. // // * service-name: The name of the service. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of items to return for this request. The request returns @@ -55825,8 +60492,18 @@ type DescribeVpcEndpointsInput struct { // // * vpc-endpoint-id: The ID of the endpoint. // - // * vpc-endpoint-state: The state of the endpoint. (pending | available - // | deleting | deleted) + // * vpc-endpoint-state - The state of the endpoint (pendingAcceptance | + // pending | available | deleting | deleted | rejected | failed). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of items to return for this request. The request returns @@ -56607,6 +61284,19 @@ type DetachNetworkInterfaceInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // Specifies whether to force a detachment. + // + // * Use the Force parameter only as a last resort to detach a network interface + // from a failed instance. + // + // * If you use the Force parameter to detach a network interface, you might + // not be able to attach a different network interface to the same index + // on the instance without first stopping and starting the instance. + // + // * If you force the detachment of a network interface, the instance metadata + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // might not get updated. This means that the attributes associated with + // the detached network interface might still be visible. The instance metadata + // will get updated when you stop and start the instance. Force *bool `locationName:"force" type:"boolean"` } @@ -56960,6 +61650,55 @@ func (s *DirectoryServiceAuthenticationRequest) SetDirectoryId(v string) *Direct return s } +type DisableEbsEncryptionByDefaultInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DisableEbsEncryptionByDefaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableEbsEncryptionByDefaultInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableEbsEncryptionByDefaultInput) SetDryRun(v bool) *DisableEbsEncryptionByDefaultInput { + s.DryRun = &v + return s +} + +type DisableEbsEncryptionByDefaultOutput struct { + _ struct{} `type:"structure"` + + // The updated status of encryption by default. + EbsEncryptionByDefault *bool `locationName:"ebsEncryptionByDefault" type:"boolean"` +} + +// String returns the string representation +func (s DisableEbsEncryptionByDefaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableEbsEncryptionByDefaultOutput) GoString() string { + return s.String() +} + +// SetEbsEncryptionByDefault sets the EbsEncryptionByDefault field's value. +func (s *DisableEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *DisableEbsEncryptionByDefaultOutput { + s.EbsEncryptionByDefault = &v + return s +} + type DisableTransitGatewayRouteTablePropagationInput struct { _ struct{} `type:"structure"` @@ -58049,18 +62788,23 @@ func (s *DnsServersOptionsModifyStructure) SetEnabled(v bool) *DnsServersOptions type EbsBlockDevice struct { _ struct{} `type:"structure"` - // Indicates whether the EBS volume is deleted on instance termination. + // Indicates whether the EBS volume is deleted on instance termination. For + // more information, see Preserving Amazon EBS Volumes on Instance Termination + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#preserving-volumes-on-termination) + // in the Amazon Elastic Compute Cloud User Guide. DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` - // Indicates whether the EBS volume is encrypted. Encrypted volumes can only - // be attached to instances that support Amazon EBS encryption. - // - // If you are creating a volume from a snapshot, you cannot specify an encryption - // value. This is because only blank volumes can be encrypted on creation. If - // you are creating a snapshot from an existing EBS volume, you cannot specify - // an encryption value that differs from that of the EBS volume. We recommend - // that you omit the encryption value from the block device mappings when creating - // an image from an instance. + // Indicates whether the encryption state of an EBS volume is changed while + // being restored from a backing snapshot. The effect of setting the encryption + // state to true depends on the volume origin (new or from a snapshot), starting + // encryption state, ownership, and whether encryption by default is enabled. + // For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-parameters) + // in the Amazon Elastic Compute Cloud User Guide. + // + // In no case can you remove encryption from an encrypted volume. + // + // Encrypted volumes can only be attached to instances that support Amazon EBS + // encryption. For more information, see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). Encrypted *bool `locationName:"encrypted" type:"boolean"` // The number of I/O operations per second (IOPS) that the volume supports. @@ -58071,16 +62815,18 @@ type EbsBlockDevice struct { // in the Amazon Elastic Compute Cloud User Guide. // // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS - // for io1 volumes, in most Regions. The maximum IOPS for io1 of 64,000 is guaranteed + // for io1 volumes in most Regions. Maximum io1 IOPS of 64,000 is guaranteed // only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). - // Other instance families guarantee performance up to 32,000 IOPS. + // Other instance families guarantee performance up to 32,000 IOPS. For more + // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. // // Condition: This parameter is required for requests to create io1 volumes; // it is not used in requests to create gp2, st1, sc1, or standard volumes. Iops *int64 `locationName:"iops" type:"integer"` - // Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK - // under which the EBS volume is encrypted. + // Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed + // CMK under which the EBS volume is encrypted. // // This parameter is only supported on BlockDeviceMapping objects called by // RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), @@ -58103,9 +62849,10 @@ type EbsBlockDevice struct { // size. VolumeSize *int64 `locationName:"volumeSize" type:"integer"` - // The volume type. If you set the type to io1, you must also set the Iops property. + // The volume type. If you set the type to io1, you must also specify the IOPS + // that the volume supports. // - // Default: standard + // Default: gp2 VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` } @@ -58579,6 +63326,55 @@ func (s *ElasticInferenceAcceleratorAssociation) SetElasticInferenceAcceleratorA return s } +type EnableEbsEncryptionByDefaultInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s EnableEbsEncryptionByDefaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableEbsEncryptionByDefaultInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableEbsEncryptionByDefaultInput) SetDryRun(v bool) *EnableEbsEncryptionByDefaultInput { + s.DryRun = &v + return s +} + +type EnableEbsEncryptionByDefaultOutput struct { + _ struct{} `type:"structure"` + + // The updated status of encryption by default. + EbsEncryptionByDefault *bool `locationName:"ebsEncryptionByDefault" type:"boolean"` +} + +// String returns the string representation +func (s EnableEbsEncryptionByDefaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableEbsEncryptionByDefaultOutput) GoString() string { + return s.String() +} + +// SetEbsEncryptionByDefault sets the EbsEncryptionByDefault field's value. +func (s *EnableEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *EnableEbsEncryptionByDefaultOutput { + s.EbsEncryptionByDefault = &v + return s +} + type EnableTransitGatewayRouteTablePropagationInput struct { _ struct{} `type:"structure"` @@ -59180,6 +63976,295 @@ func (s *ExportClientVpnClientConfigurationOutput) SetClientConfiguration(v stri return s } +type ExportImageInput struct { + _ struct{} `type:"structure"` + + // Token to enable idempotency for export image requests. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // A description of the image being exported. The maximum length is 255 bytes. + Description *string `type:"string"` + + // The disk image format. + // + // DiskImageFormat is a required field + DiskImageFormat *string `type:"string" required:"true" enum:"DiskImageFormat"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the image. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // The name of the role that grants VM Import/Export permission to export images + // to your S3 bucket. If this parameter is not specified, the default role is + // named 'vmimport'. + RoleName *string `type:"string"` + + // Information about the destination S3 bucket. The bucket must exist and grant + // WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // + // S3ExportLocation is a required field + S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportImageInput"} + if s.DiskImageFormat == nil { + invalidParams.Add(request.NewErrParamRequired("DiskImageFormat")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.S3ExportLocation == nil { + invalidParams.Add(request.NewErrParamRequired("S3ExportLocation")) + } + if s.S3ExportLocation != nil { + if err := s.S3ExportLocation.Validate(); err != nil { + invalidParams.AddNested("S3ExportLocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportImageInput) SetClientToken(v string) *ExportImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ExportImageInput) SetDescription(v string) *ExportImageInput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageInput) SetDiskImageFormat(v string) *ExportImageInput { + s.DiskImageFormat = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ExportImageInput) SetDryRun(v bool) *ExportImageInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageInput) SetImageId(v string) *ExportImageInput { + s.ImageId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageInput) SetRoleName(v string) *ExportImageInput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageInput) SetS3ExportLocation(v *ExportTaskS3LocationRequest) *ExportImageInput { + s.S3ExportLocation = v + return s +} + +type ExportImageOutput struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The disk image format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // The name of the role that grants VM Import/Export permission to export images + // to your S3 bucket. + RoleName *string `locationName:"roleName" type:"string"` + + // Information about the destination S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageOutput) SetDescription(v string) *ExportImageOutput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageOutput) SetDiskImageFormat(v string) *ExportImageOutput { + s.DiskImageFormat = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageOutput) SetExportImageTaskId(v string) *ExportImageOutput { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageOutput) SetImageId(v string) *ExportImageOutput { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageOutput) SetProgress(v string) *ExportImageOutput { + s.Progress = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageOutput) SetRoleName(v string) *ExportImageOutput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageOutput) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageOutput { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageOutput) SetStatus(v string) *ExportImageOutput { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageOutput) SetStatusMessage(v string) *ExportImageOutput { + s.StatusMessage = &v + return s +} + +// Describes an export image task. +type ExportImageTask struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the destination S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageTask) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageTask) SetDescription(v string) *ExportImageTask { + s.Description = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageTask) SetExportImageTaskId(v string) *ExportImageTask { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageTask) SetImageId(v string) *ExportImageTask { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageTask) SetProgress(v string) *ExportImageTask { + s.Progress = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageTask) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageTask { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageTask) SetStatus(v string) *ExportImageTask { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageTask) SetStatusMessage(v string) *ExportImageTask { + s.StatusMessage = &v + return s +} + // Describes an instance export task. type ExportTask struct { _ struct{} `type:"structure"` @@ -59249,6 +64334,87 @@ func (s *ExportTask) SetStatusMessage(v string) *ExportTask { return s } +// Describes the destination for an export image task. +type ExportTaskS3Location struct { + _ struct{} `type:"structure"` + + // The destination S3 bucket. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `locationName:"s3Prefix" type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3Location) GoString() string { + return s.String() +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3Location) SetS3Bucket(v string) *ExportTaskS3Location { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3Location) SetS3Prefix(v string) *ExportTaskS3Location { + s.S3Prefix = &v + return s +} + +// Describes the destination for an export image task. +type ExportTaskS3LocationRequest struct { + _ struct{} `type:"structure"` + + // The destination S3 bucket. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3LocationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3LocationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTaskS3LocationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTaskS3LocationRequest"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3LocationRequest) SetS3Bucket(v string) *ExportTaskS3LocationRequest { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3LocationRequest) SetS3Prefix(v string) *ExportTaskS3LocationRequest { + s.S3Prefix = &v + return s +} + // Describes the format and location for an instance export task. type ExportToS3Task struct { _ struct{} `type:"structure"` @@ -59367,7 +64533,7 @@ type ExportTransitGatewayRoutesInput struct { // One or more filters. The possible values are: // - // * attachment.transit-gateway-attachment-id- The id of the transit gateway + // * attachment.transit-gateway-attachment-id - The id of the transit gateway // attachment. // // * attachment.resource-id - The resource id of the transit gateway attachment. @@ -59391,7 +64557,7 @@ type ExportTransitGatewayRoutesInput struct { // // * transit-gateway-route-destination-cidr-block - The CIDR range. // - // * type - The type of roue (active | blackhole). + // * type - The type of route (active | blackhole). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The name of the S3 bucket. @@ -59478,6 +64644,39 @@ func (s *ExportTransitGatewayRoutesOutput) SetS3Location(v string) *ExportTransi return s } +// Describes a Reserved Instance whose queued purchase was not deleted. +type FailedQueuedPurchaseDeletion struct { + _ struct{} `type:"structure"` + + // The error. + Error *DeleteQueuedReservedInstancesError `locationName:"error" type:"structure"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s FailedQueuedPurchaseDeletion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedQueuedPurchaseDeletion) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *FailedQueuedPurchaseDeletion) SetError(v *DeleteQueuedReservedInstancesError) *FailedQueuedPurchaseDeletion { + s.Error = v + return s +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *FailedQueuedPurchaseDeletion) SetReservedInstancesId(v string) *FailedQueuedPurchaseDeletion { + s.ReservedInstancesId = &v + return s +} + // A filter name and value pair that is used to return a more specific list // of results from a describe operation. Filters can be used to match a set // of resources by specific criteria, such as tags, attributes, or IDs. The @@ -60075,7 +65274,8 @@ type FleetLaunchTemplateSpecificationRequest struct { // The name of the launch template. LaunchTemplateName *string `min:"3" type:"string"` - // The version number of the launch template. + // The version number of the launch template. Note: This is a required parameter + // and will be updated soon. Version *string `type:"string"` } @@ -60159,6 +65359,9 @@ type FlowLog struct { // Flow log data can be published to CloudWatch Logs or Amazon S3. LogDestinationType *string `locationName:"logDestinationType" type:"string" enum:"LogDestinationType"` + // The format of the flow log record. + LogFormat *string `locationName:"logFormat" type:"string"` + // The name of the flow log group. LogGroupName *string `locationName:"logGroupName" type:"string"` @@ -60227,6 +65430,12 @@ func (s *FlowLog) SetLogDestinationType(v string) *FlowLog { return s } +// SetLogFormat sets the LogFormat field's value. +func (s *FlowLog) SetLogFormat(v string) *FlowLog { + s.LogFormat = &v + return s +} + // SetLogGroupName sets the LogGroupName field's value. func (s *FlowLog) SetLogGroupName(v string) *FlowLog { s.LogGroupName = &v @@ -60497,6 +65706,178 @@ func (s *FpgaImageState) SetMessage(v string) *FpgaImageState { return s } +type GetCapacityReservationUsageInput struct { + _ struct{} `type:"structure"` + + // The ID of the Capacity Reservation. + // + // CapacityReservationId is a required field + CapacityReservationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. + // + // Valid range: Minimum value of 1. Maximum value of 1000. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetCapacityReservationUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCapacityReservationUsageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCapacityReservationUsageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCapacityReservationUsageInput"} + if s.CapacityReservationId == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *GetCapacityReservationUsageInput) SetCapacityReservationId(v string) *GetCapacityReservationUsageInput { + s.CapacityReservationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetCapacityReservationUsageInput) SetDryRun(v bool) *GetCapacityReservationUsageInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetCapacityReservationUsageInput) SetMaxResults(v int64) *GetCapacityReservationUsageInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetCapacityReservationUsageInput) SetNextToken(v string) *GetCapacityReservationUsageInput { + s.NextToken = &v + return s +} + +type GetCapacityReservationUsageOutput struct { + _ struct{} `type:"structure"` + + // The remaining capacity. Indicates the number of instances that can be launched + // in the Capacity Reservation. + AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + + // The ID of the Capacity Reservation. + CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + + // The type of instance for which the Capacity Reservation reserves capacity. + InstanceType *string `locationName:"instanceType" type:"string"` + + // Information about the Capacity Reservation usage. + InstanceUsages []*InstanceUsage `locationName:"instanceUsageSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The current state of the Capacity Reservation. A Capacity Reservation can + // be in one of the following states: + // + // * active - The Capacity Reservation is active and the capacity is available + // for your use. + // + // * expired - The Capacity Reservation expired automatically at the date + // and time specified in your request. The reserved capacity is no longer + // available for your use. + // + // * cancelled - The Capacity Reservation was manually cancelled. The reserved + // capacity is no longer available for your use. + // + // * pending - The Capacity Reservation request was successful but the capacity + // provisioning is still pending. + // + // * failed - The Capacity Reservation request has failed. A request might + // fail due to invalid request parameters, capacity constraints, or instance + // limit constraints. Failed requests are retained for 60 minutes. + State *string `locationName:"state" type:"string" enum:"CapacityReservationState"` + + // The number of instances for which the Capacity Reservation reserves capacity. + TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` +} + +// String returns the string representation +func (s GetCapacityReservationUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCapacityReservationUsageOutput) GoString() string { + return s.String() +} + +// SetAvailableInstanceCount sets the AvailableInstanceCount field's value. +func (s *GetCapacityReservationUsageOutput) SetAvailableInstanceCount(v int64) *GetCapacityReservationUsageOutput { + s.AvailableInstanceCount = &v + return s +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *GetCapacityReservationUsageOutput) SetCapacityReservationId(v string) *GetCapacityReservationUsageOutput { + s.CapacityReservationId = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *GetCapacityReservationUsageOutput) SetInstanceType(v string) *GetCapacityReservationUsageOutput { + s.InstanceType = &v + return s +} + +// SetInstanceUsages sets the InstanceUsages field's value. +func (s *GetCapacityReservationUsageOutput) SetInstanceUsages(v []*InstanceUsage) *GetCapacityReservationUsageOutput { + s.InstanceUsages = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetCapacityReservationUsageOutput) SetNextToken(v string) *GetCapacityReservationUsageOutput { + s.NextToken = &v + return s +} + +// SetState sets the State field's value. +func (s *GetCapacityReservationUsageOutput) SetState(v string) *GetCapacityReservationUsageOutput { + s.State = &v + return s +} + +// SetTotalInstanceCount sets the TotalInstanceCount field's value. +func (s *GetCapacityReservationUsageOutput) SetTotalInstanceCount(v int64) *GetCapacityReservationUsageOutput { + s.TotalInstanceCount = &v + return s +} + type GetConsoleOutputInput struct { _ struct{} `type:"structure"` @@ -60692,6 +66073,104 @@ func (s *GetConsoleScreenshotOutput) SetInstanceId(v string) *GetConsoleScreensh return s } +type GetEbsDefaultKmsKeyIdInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetEbsDefaultKmsKeyIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEbsDefaultKmsKeyIdInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *GetEbsDefaultKmsKeyIdInput) SetDryRun(v bool) *GetEbsDefaultKmsKeyIdInput { + s.DryRun = &v + return s +} + +type GetEbsDefaultKmsKeyIdOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the default CMK for encryption by default. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` +} + +// String returns the string representation +func (s GetEbsDefaultKmsKeyIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEbsDefaultKmsKeyIdOutput) GoString() string { + return s.String() +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *GetEbsDefaultKmsKeyIdOutput) SetKmsKeyId(v string) *GetEbsDefaultKmsKeyIdOutput { + s.KmsKeyId = &v + return s +} + +type GetEbsEncryptionByDefaultInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetEbsEncryptionByDefaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEbsEncryptionByDefaultInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *GetEbsEncryptionByDefaultInput) SetDryRun(v bool) *GetEbsEncryptionByDefaultInput { + s.DryRun = &v + return s +} + +type GetEbsEncryptionByDefaultOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether encryption by default is enabled. + EbsEncryptionByDefault *bool `locationName:"ebsEncryptionByDefault" type:"boolean"` +} + +// String returns the string representation +func (s GetEbsEncryptionByDefaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEbsEncryptionByDefaultOutput) GoString() string { + return s.String() +} + +// SetEbsEncryptionByDefault sets the EbsEncryptionByDefault field's value. +func (s *GetEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *GetEbsEncryptionByDefaultOutput { + s.EbsEncryptionByDefault = &v + return s +} + type GetHostReservationPurchasePreviewInput struct { _ struct{} `type:"structure"` @@ -61520,8 +66999,7 @@ func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// Hibernation is currently supported only for Amazon Linux. For more information, -// see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. type HibernationOptions struct { _ struct{} `type:"structure"` @@ -61549,8 +67027,7 @@ func (s *HibernationOptions) SetConfigured(v bool) *HibernationOptions { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// Hibernation is currently supported only for Amazon Linux. For more information, -// see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. type HibernationOptionsRequest struct { _ struct{} `type:"structure"` @@ -61686,9 +67163,8 @@ type Host struct { // The number of new instances that can be launched onto the Dedicated Host. AvailableCapacity *AvailableCapacity `locationName:"availableCapacity" type:"structure"` - // Unique, case-sensitive identifier that you provide to ensure idempotency - // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) - // in the Amazon Elastic Compute Cloud User Guide. + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` // The ID of the Dedicated Host. @@ -61697,6 +67173,10 @@ type Host struct { // The hardware specifications of the Dedicated Host. HostProperties *HostProperties `locationName:"hostProperties" type:"structure"` + // Indicates whether host recovery is enabled or disabled for the Dedicated + // Host. + HostRecovery *string `locationName:"hostRecovery" type:"string" enum:"HostRecovery"` + // The reservation ID of the Dedicated Host. This returns a null response if // the Dedicated Host doesn't have an associated reservation. HostReservationId *string `locationName:"hostReservationId" type:"string"` @@ -61766,6 +67246,12 @@ func (s *Host) SetHostProperties(v *HostProperties) *Host { return s } +// SetHostRecovery sets the HostRecovery field's value. +func (s *Host) SetHostRecovery(v string) *Host { + s.HostRecovery = &v + return s +} + // SetHostReservationId sets the HostReservationId field's value. func (s *Host) SetHostReservationId(v string) *Host { s.HostReservationId = &v @@ -62104,6 +67590,54 @@ func (s *HostReservation) SetUpfrontPrice(v string) *HostReservation { return s } +// The internet key exchange (IKE) version permitted for the VPN tunnel. +type IKEVersionsListValue struct { + _ struct{} `type:"structure"` + + // The IKE version. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s IKEVersionsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IKEVersionsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *IKEVersionsListValue) SetValue(v string) *IKEVersionsListValue { + s.Value = &v + return s +} + +// The IKE version that is permitted for the VPN tunnel. +type IKEVersionsRequestListValue struct { + _ struct{} `type:"structure"` + + // The IKE version. + Value *string `type:"string"` +} + +// String returns the string representation +func (s IKEVersionsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IKEVersionsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *IKEVersionsRequestListValue) SetValue(v string) *IKEVersionsRequestListValue { + s.Value = &v + return s +} + // Describes an IAM instance profile. type IamInstanceProfile struct { _ struct{} `type:"structure"` @@ -62352,7 +67886,7 @@ type Image struct { // The AWS account ID of the image owner. OwnerId *string `locationName:"imageOwnerId" type:"string"` - // The value is Windows for Windows AMIs; otherwise blank. + // This value is set to windows for Windows AMIs; otherwise, it is blank. Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` // Any product codes associated with the AMI. @@ -62708,13 +68242,12 @@ func (s *ImportClientVpnClientCertificateRevocationListOutput) SetReturn(v bool) return s } -// Contains the parameters for ImportImage. type ImportImageInput struct { _ struct{} `type:"structure"` // The architecture of the virtual machine. // - // Valid values: i386 | x86_64 + // Valid values: i386 | x86_64 | arm64 Architecture *string `type:"string"` // The client-specific data. @@ -62758,42 +68291,35 @@ type ImportImageInput struct { // * Key ID // // * Key alias. The alias ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the alias + // by the Region of the CMK, the AWS account ID of the CMK owner, the alias // namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the key + // by the Region of the CMK, the AWS account ID of the CMK owner, the key // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, - // followed by the region of the CMK, the AWS account ID of the CMK owner, + // followed by the Region of the CMK, the AWS account ID of the CMK owner, // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // // AWS parses KmsKeyId asynchronously, meaning that the action you call may // appear to complete even though you provided an invalid identifier. This action // will eventually report failure. // - // The specified CMK must exist in the region that the AMI is being copied to. + // The specified CMK must exist in the Region that the AMI is being copied to. KmsKeyId *string `type:"string"` // The license type to be used for the Amazon Machine Image (AMI) after importing. // - // Note: You may only use BYOL if you have existing licenses with rights to - // use these licenses in a third party cloud like AWS. For more information, - // see Prerequisites (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#prerequisites-image) - // in the VM Import/Export User Guide. - // - // Valid values include: - // - // * Auto - Detects the source-system operating system (OS) and applies the - // appropriate license. - // - // * AWS - Replaces the source-system license with an AWS license, if appropriate. + // By default, we detect the source-system operating system (OS) and apply the + // appropriate license. Specify AWS to replace the source-system license with + // an AWS license, if appropriate. Specify BYOL to retain the source-system + // license, if appropriate. // - // * BYOL - Retains the source-system license, if appropriate. - // - // Default value: Auto + // To use BYOL, you must have existing licenses with rights to use these licenses + // in a third party cloud, such as AWS. For more information, see Prerequisites + // (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#prerequisites-image) + // in the VM Import/Export User Guide. LicenseType *string `type:"string"` // The operating system of the virtual machine. @@ -62887,7 +68413,6 @@ func (s *ImportImageInput) SetRoleName(v string) *ImportImageInput { return s } -// Contains the output for ImportImage. type ImportImageOutput struct { _ struct{} `type:"structure"` @@ -63026,7 +68551,7 @@ type ImportImageTask struct { // The architecture of the virtual machine. // - // Valid values: i386 | x86_64 + // Valid values: i386 | x86_64 | arm64 Architecture *string `locationName:"architecture" type:"string"` // A description of the import task. @@ -63157,7 +68682,6 @@ func (s *ImportImageTask) SetStatusMessage(v string) *ImportImageTask { return s } -// Contains the parameters for ImportInstance. type ImportInstanceInput struct { _ struct{} `type:"structure"` @@ -63283,7 +68807,7 @@ type ImportInstanceLaunchSpecification struct { SubnetId *string `locationName:"subnetId" type:"string"` // The Base64-encoded user data to make available to the instance. - UserData *UserData `locationName:"userData" type:"structure"` + UserData *UserData `locationName:"userData" type:"structure" sensitive:"true"` } // String returns the string representation @@ -63362,7 +68886,6 @@ func (s *ImportInstanceLaunchSpecification) SetUserData(v *UserData) *ImportInst return s } -// Contains the output for ImportInstance. type ImportInstanceOutput struct { _ struct{} `type:"structure"` @@ -63614,7 +69137,6 @@ func (s *ImportKeyPairOutput) SetKeyName(v string) *ImportKeyPairOutput { return s } -// Contains the parameters for ImportSnapshot. type ImportSnapshotInput struct { _ struct{} `type:"structure"` @@ -63654,23 +69176,22 @@ type ImportSnapshotInput struct { // * Key ID // // * Key alias. The alias ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the alias + // by the Region of the CMK, the AWS account ID of the CMK owner, the alias // namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed - // by the region of the CMK, the AWS account ID of the CMK owner, the key + // by the Region of the CMK, the AWS account ID of the CMK owner, the key // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, - // followed by the region of the CMK, the AWS account ID of the CMK owner, + // followed by the Region of the CMK, the AWS account ID of the CMK owner, // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // // AWS parses KmsKeyId asynchronously, meaning that the action you call may // appear to complete even though you provided an invalid identifier. This action // will eventually report failure. // - // The specified CMK must exist in the region that the snapshot is being copied + // The specified CMK must exist in the Region that the snapshot is being copied // to. KmsKeyId *string `type:"string"` @@ -63736,7 +69257,6 @@ func (s *ImportSnapshotInput) SetRoleName(v string) *ImportSnapshotInput { return s } -// Contains the output for ImportSnapshot. type ImportSnapshotOutput struct { _ struct{} `type:"structure"` @@ -63820,7 +69340,6 @@ func (s *ImportSnapshotTask) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *Impor return s } -// Contains the parameters for ImportVolume. type ImportVolumeInput struct { _ struct{} `type:"structure"` @@ -63918,7 +69437,6 @@ func (s *ImportVolumeInput) SetVolume(v *VolumeDetail) *ImportVolumeInput { return s } -// Contains the output for ImportVolume. type ImportVolumeOutput struct { _ struct{} `type:"structure"` @@ -64838,6 +70356,8 @@ type InstanceNetworkInterface struct { Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` // Describes the type of network interface. + // + // Valid values: interface | efa InterfaceType *string `locationName:"interfaceType" type:"string"` // One or more IPv6 addresses associated with the network interface. @@ -65102,16 +70622,24 @@ type InstanceNetworkInterfaceSpecification struct { // interface when launching an instance. Description *string `locationName:"description" type:"string"` - // The index of the device on the instance for the network interface attachment. - // If you are specifying a network interface in a RunInstances request, you - // must provide the device index. + // The position of the network interface in the attachment order. A primary + // network interface has a device index of 0. + // + // If you specify a network interface when launching an instance, you must specify + // the device index. DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` // The IDs of the security groups for the network interface. Applies only if // creating a network interface when launching an instance. Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` - // The type of interface. + // The type of network interface. To create an Elastic Fabric Adapter (EFA), + // specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // If you are not creating an EFA, specify interface or omit this parameter. + // + // Valid values: interface | efa InterfaceType *string `type:"string"` // A number of IPv6 addresses to assign to the network interface. Amazon EC2 @@ -65132,23 +70660,26 @@ type InstanceNetworkInterfaceSpecification struct { // The private IPv4 address of the network interface. Applies only if creating // a network interface when launching an instance. You cannot specify this option - // if you're launching more than one instance in a RunInstances request. + // if you're launching more than one instance in a RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + // request. PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` // One or more private IPv4 addresses to assign to the network interface. Only // one private IPv4 address can be designated as primary. You cannot specify // this option if you're launching more than one instance in a RunInstances + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) // request. PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddressesSet" queryName:"PrivateIpAddresses" locationNameList:"item" type:"list"` // The number of secondary private IPv4 addresses. You can't specify this option // and specify more than one private IP address using the private IP addresses // option. You cannot specify this option if you're launching more than one - // instance in a RunInstances request. + // instance in a RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + // request. SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` - // The ID of the subnet associated with the network string. Applies only if - // creating a network interface when launching an instance. + // The ID of the subnet associated with the network interface. Applies only + // if creating a network interface when launching an instance. SubnetId *string `locationName:"subnetId" type:"string"` } @@ -65292,6 +70823,39 @@ func (s *InstancePrivateIpAddress) SetPrivateIpAddress(v string) *InstancePrivat return s } +// The instance details to specify which volumes should be snapshotted. +type InstanceSpecification struct { + _ struct{} `type:"structure"` + + // Excludes the root volume from being snapshotted. + ExcludeBootVolume *bool `type:"boolean"` + + // The instance to specify which volumes should be snapshotted. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s InstanceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceSpecification) GoString() string { + return s.String() +} + +// SetExcludeBootVolume sets the ExcludeBootVolume field's value. +func (s *InstanceSpecification) SetExcludeBootVolume(v bool) *InstanceSpecification { + s.ExcludeBootVolume = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceSpecification) SetInstanceId(v string) *InstanceSpecification { + s.InstanceId = &v + return s +} + // Describes the current state of an instance. type InstanceState struct { _ struct{} `type:"structure"` @@ -65614,6 +71178,39 @@ func (s *InstanceStatusSummary) SetStatus(v string) *InstanceStatusSummary { return s } +// Information about the Capacity Reservation usage. +type InstanceUsage struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account that is making use of the Capacity Reservation. + AccountId *string `locationName:"accountId" type:"string"` + + // The number of instances the AWS account currently has in the Capacity Reservation. + UsedInstanceCount *int64 `locationName:"usedInstanceCount" type:"integer"` +} + +// String returns the string representation +func (s InstanceUsage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceUsage) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *InstanceUsage) SetAccountId(v string) *InstanceUsage { + s.AccountId = &v + return s +} + +// SetUsedInstanceCount sets the UsedInstanceCount field's value. +func (s *InstanceUsage) SetUsedInstanceCount(v int64) *InstanceUsage { + s.UsedInstanceCount = &v + return s +} + // Describes an internet gateway. type InternetGateway struct { _ struct{} `type:"structure"` @@ -66550,9 +72147,8 @@ type LaunchTemplateCpuOptionsRequest struct { // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` - // The number of threads per CPU core. To disable Intel Hyper-Threading Technology - // for the instance, specify a value of 1. Otherwise, specify the default value - // of 2. + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. ThreadsPerCore *int64 `type:"integer"` } @@ -66840,7 +72436,6 @@ func (s *LaunchTemplateHibernationOptions) SetConfigured(v bool) *LaunchTemplate // Indicates whether the instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// Hibernation is currently supported only for Amazon Linux. type LaunchTemplateHibernationOptionsRequest struct { _ struct{} `type:"structure"` @@ -67150,7 +72745,13 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { // The IDs of one or more security groups. Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` - // The type of networking interface. + // The type of network interface. To create an Elastic Fabric Adapter (EFA), + // specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // If you are not creating an EFA, specify interface or omit this parameter. + // + // Valid values: interface | efa InterfaceType *string `type:"string"` // The number of IPv6 addresses to assign to a network interface. Amazon EC2 @@ -67743,7 +73344,7 @@ type LaunchTemplateTagSpecificationRequest struct { // The type of resource to tag. Currently, the resource types that support tagging // on creation are instance and volume. To tag a resource after it has been - // created, see CreateTags. + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). ResourceType *string `type:"string" enum:"ResourceType"` // The tags to apply to the resource. @@ -68207,7 +73808,7 @@ func (s *ModifyCapacityReservationInput) SetInstanceCount(v int64) *ModifyCapaci type ModifyCapacityReservationOutput struct { _ struct{} `type:"structure"` - // Information about the Capacity Reservation. + // Returns true if the request succeeds; otherwise, it returns an error. Return *bool `locationName:"return" type:"boolean"` } @@ -68265,6 +73866,13 @@ type ModifyClientVpnEndpointInput struct { // The ARN of the server certificate to be used. The server certificate must // be provisioned in AWS Certificate Manager (ACM). ServerCertificateArn *string `type:"string"` + + // Indicates whether the VPN is split-tunnel. + // + // For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client + // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) + // in the AWS Client VPN Administrator Guide. + SplitTunnel *bool `type:"boolean"` } // String returns the string representation @@ -68326,6 +73934,12 @@ func (s *ModifyClientVpnEndpointInput) SetServerCertificateArn(v string) *Modify return s } +// SetSplitTunnel sets the SplitTunnel field's value. +func (s *ModifyClientVpnEndpointInput) SetSplitTunnel(v bool) *ModifyClientVpnEndpointInput { + s.SplitTunnel = &v + return s +} + type ModifyClientVpnEndpointOutput struct { _ struct{} `type:"structure"` @@ -68349,6 +73963,96 @@ func (s *ModifyClientVpnEndpointOutput) SetReturn(v bool) *ModifyClientVpnEndpoi return s } +type ModifyEbsDefaultKmsKeyIdInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The identifier of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, + // your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted + // state must be true. + // + // You can specify the CMK using any of the following: + // + // * Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // + // * Key alias. For example, alias/ExampleAlias. + // + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // + // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, + // alias, or ARN that is not valid, the action can appear to complete, but eventually + // fails. + // + // KmsKeyId is a required field + KmsKeyId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyEbsDefaultKmsKeyIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEbsDefaultKmsKeyIdInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyEbsDefaultKmsKeyIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyEbsDefaultKmsKeyIdInput"} + if s.KmsKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KmsKeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyEbsDefaultKmsKeyIdInput) SetDryRun(v bool) *ModifyEbsDefaultKmsKeyIdInput { + s.DryRun = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ModifyEbsDefaultKmsKeyIdInput) SetKmsKeyId(v string) *ModifyEbsDefaultKmsKeyIdInput { + s.KmsKeyId = &v + return s +} + +type ModifyEbsDefaultKmsKeyIdOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the default CMK for encryption by default. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` +} + +// String returns the string representation +func (s ModifyEbsDefaultKmsKeyIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEbsDefaultKmsKeyIdOutput) GoString() string { + return s.String() +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ModifyEbsDefaultKmsKeyIdOutput) SetKmsKeyId(v string) *ModifyEbsDefaultKmsKeyIdOutput { + s.KmsKeyId = &v + return s +} + type ModifyFleetInput struct { _ struct{} `type:"structure"` @@ -68604,14 +74308,17 @@ type ModifyHostsInput struct { _ struct{} `type:"structure"` // Specify whether to enable or disable auto-placement. - // - // AutoPlacement is a required field - AutoPlacement *string `locationName:"autoPlacement" type:"string" required:"true" enum:"AutoPlacement"` + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` // The IDs of the Dedicated Hosts to modify. // // HostIds is a required field HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` + + // Indicates whether to enable or disable host recovery for the Dedicated Host. + // For more information, see Host Recovery (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html) + // in the Amazon Elastic Compute Cloud User Guide. + HostRecovery *string `type:"string" enum:"HostRecovery"` } // String returns the string representation @@ -68627,9 +74334,6 @@ func (s ModifyHostsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ModifyHostsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ModifyHostsInput"} - if s.AutoPlacement == nil { - invalidParams.Add(request.NewErrParamRequired("AutoPlacement")) - } if s.HostIds == nil { invalidParams.Add(request.NewErrParamRequired("HostIds")) } @@ -68652,6 +74356,12 @@ func (s *ModifyHostsInput) SetHostIds(v []*string) *ModifyHostsInput { return s } +// SetHostRecovery sets the HostRecovery field's value. +func (s *ModifyHostsInput) SetHostRecovery(v string) *ModifyHostsInput { + s.HostRecovery = &v + return s +} + type ModifyHostsOutput struct { _ struct{} `type:"structure"` @@ -70036,6 +75746,9 @@ type ModifySpotFleetRequestInput struct { // the Spot Fleet. ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + // The number of On-Demand Instances in the fleet. + OnDemandTargetCapacity *int64 `type:"integer"` + // The ID of the Spot Fleet request. // // SpotFleetRequestId is a required field @@ -70074,6 +75787,12 @@ func (s *ModifySpotFleetRequestInput) SetExcessCapacityTerminationPolicy(v strin return s } +// SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. +func (s *ModifySpotFleetRequestInput) SetOnDemandTargetCapacity(v int64) *ModifySpotFleetRequestInput { + s.OnDemandTargetCapacity = &v + return s +} + // SetSpotFleetRequestId sets the SpotFleetRequestId field's value. func (s *ModifySpotFleetRequestInput) SetSpotFleetRequestId(v string) *ModifySpotFleetRequestInput { s.SpotFleetRequestId = &v @@ -70123,10 +75842,8 @@ type ModifySubnetAttributeInput struct { // or later of the Amazon EC2 API. AssignIpv6AddressOnCreation *AttributeBooleanValue `type:"structure"` - // Specify true to indicate that network interfaces created in the specified - // subnet should be assigned a public IPv4 address. This includes a network - // interface that's created when launching an instance into the subnet (the - // instance therefore receives a public IPv4 address). + // Specify true to indicate that ENIs attached to instances created in the specified + // subnet should be assigned a public IPv4 address. MapPublicIpOnLaunch *AttributeBooleanValue `type:"structure"` // The ID of the subnet. @@ -70190,6 +75907,413 @@ func (s ModifySubnetAttributeOutput) GoString() string { return s.String() } +type ModifyTrafficMirrorFilterNetworkServicesInput struct { + _ struct{} `type:"structure"` + + // The network service, for example Amazon DNS, that you want to mirror. + AddNetworkServices []*string `locationName:"AddNetworkService" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The network service, for example Amazon DNS, that you no longer want to mirror. + RemoveNetworkServices []*string `locationName:"RemoveNetworkService" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror filter. + // + // TrafficMirrorFilterId is a required field + TrafficMirrorFilterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorFilterNetworkServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorFilterNetworkServicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTrafficMirrorFilterNetworkServicesInput"} + if s.TrafficMirrorFilterId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddNetworkServices sets the AddNetworkServices field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) SetAddNetworkServices(v []*string) *ModifyTrafficMirrorFilterNetworkServicesInput { + s.AddNetworkServices = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) SetDryRun(v bool) *ModifyTrafficMirrorFilterNetworkServicesInput { + s.DryRun = &v + return s +} + +// SetRemoveNetworkServices sets the RemoveNetworkServices field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) SetRemoveNetworkServices(v []*string) *ModifyTrafficMirrorFilterNetworkServicesInput { + s.RemoveNetworkServices = v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) SetTrafficMirrorFilterId(v string) *ModifyTrafficMirrorFilterNetworkServicesInput { + s.TrafficMirrorFilterId = &v + return s +} + +type ModifyTrafficMirrorFilterNetworkServicesOutput struct { + _ struct{} `type:"structure"` + + // The Traffic Mirror filter that the network service is associated with. + TrafficMirrorFilter *TrafficMirrorFilter `locationName:"trafficMirrorFilter" type:"structure"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorFilterNetworkServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorFilterNetworkServicesOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorFilter sets the TrafficMirrorFilter field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesOutput) SetTrafficMirrorFilter(v *TrafficMirrorFilter) *ModifyTrafficMirrorFilterNetworkServicesOutput { + s.TrafficMirrorFilter = v + return s +} + +type ModifyTrafficMirrorFilterRuleInput struct { + _ struct{} `type:"structure"` + + // The description to assign to the Traffic Mirror rule. + Description *string `type:"string"` + + // The destination CIDR block to assign to the Traffic Mirror rule. + DestinationCidrBlock *string `type:"string"` + + // The destination ports that are associated with the Traffic Mirror rule. + DestinationPortRange *TrafficMirrorPortRangeRequest `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The protocol, for example TCP, to assign to the Traffic Mirror rule. + Protocol *int64 `type:"integer"` + + // The properties that you want to remove from the Traffic Mirror filter rule. + // + // When you remove a property from a Traffic Mirror filter rule, the property + // is set to the default. + RemoveFields []*string `locationName:"RemoveField" type:"list"` + + // The action to assign to the rule. + RuleAction *string `type:"string" enum:"TrafficMirrorRuleAction"` + + // The number of the Traffic Mirror rule. This number must be unique for each + // Traffic Mirror rule in a given direction. The rules are processed in ascending + // order by rule number. + RuleNumber *int64 `type:"integer"` + + // The source CIDR block to assign to the Traffic Mirror rule. + SourceCidrBlock *string `type:"string"` + + // The port range to assign to the Traffic Mirror rule. + SourcePortRange *TrafficMirrorPortRangeRequest `type:"structure"` + + // The type of traffic (ingress | egress) to assign to the rule. + TrafficDirection *string `type:"string" enum:"TrafficDirection"` + + // The ID of the Traffic Mirror rule. + // + // TrafficMirrorFilterRuleId is a required field + TrafficMirrorFilterRuleId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorFilterRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorFilterRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTrafficMirrorFilterRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTrafficMirrorFilterRuleInput"} + if s.TrafficMirrorFilterRuleId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterRuleId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetDescription(v string) *ModifyTrafficMirrorFilterRuleInput { + s.Description = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetDestinationCidrBlock(v string) *ModifyTrafficMirrorFilterRuleInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationPortRange sets the DestinationPortRange field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetDestinationPortRange(v *TrafficMirrorPortRangeRequest) *ModifyTrafficMirrorFilterRuleInput { + s.DestinationPortRange = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetDryRun(v bool) *ModifyTrafficMirrorFilterRuleInput { + s.DryRun = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetProtocol(v int64) *ModifyTrafficMirrorFilterRuleInput { + s.Protocol = &v + return s +} + +// SetRemoveFields sets the RemoveFields field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetRemoveFields(v []*string) *ModifyTrafficMirrorFilterRuleInput { + s.RemoveFields = v + return s +} + +// SetRuleAction sets the RuleAction field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetRuleAction(v string) *ModifyTrafficMirrorFilterRuleInput { + s.RuleAction = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetRuleNumber(v int64) *ModifyTrafficMirrorFilterRuleInput { + s.RuleNumber = &v + return s +} + +// SetSourceCidrBlock sets the SourceCidrBlock field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetSourceCidrBlock(v string) *ModifyTrafficMirrorFilterRuleInput { + s.SourceCidrBlock = &v + return s +} + +// SetSourcePortRange sets the SourcePortRange field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetSourcePortRange(v *TrafficMirrorPortRangeRequest) *ModifyTrafficMirrorFilterRuleInput { + s.SourcePortRange = v + return s +} + +// SetTrafficDirection sets the TrafficDirection field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetTrafficDirection(v string) *ModifyTrafficMirrorFilterRuleInput { + s.TrafficDirection = &v + return s +} + +// SetTrafficMirrorFilterRuleId sets the TrafficMirrorFilterRuleId field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetTrafficMirrorFilterRuleId(v string) *ModifyTrafficMirrorFilterRuleInput { + s.TrafficMirrorFilterRuleId = &v + return s +} + +type ModifyTrafficMirrorFilterRuleOutput struct { + _ struct{} `type:"structure"` + + // Modifies a Traffic Mirror rule. + TrafficMirrorFilterRule *TrafficMirrorFilterRule `locationName:"trafficMirrorFilterRule" type:"structure"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorFilterRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorFilterRuleOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorFilterRule sets the TrafficMirrorFilterRule field's value. +func (s *ModifyTrafficMirrorFilterRuleOutput) SetTrafficMirrorFilterRule(v *TrafficMirrorFilterRule) *ModifyTrafficMirrorFilterRuleOutput { + s.TrafficMirrorFilterRule = v + return s +} + +type ModifyTrafficMirrorSessionInput struct { + _ struct{} `type:"structure"` + + // The description to assign to the Traffic Mirror session. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The number of bytes in each packet to mirror. These are bytes after the VXLAN + // header. To mirror a subset, set this to the length (in bytes) to mirror. + // For example, if you set this value to 100, then the first 100 bytes that + // meet the filter criteria are copied to the target. Do not specify this parameter + // when you want to mirror the entire packet. + PacketLength *int64 `type:"integer"` + + // The properties that you want to remove from the Traffic Mirror session. + // + // When you remove a property from a Traffic Mirror session, the property is + // set to the default. + RemoveFields []*string `locationName:"RemoveField" type:"list"` + + // The session number determines the order in which sessions are evaluated when + // an interface is used by multiple sessions. The first session with a matching + // filter is the one that mirrors the packets. + // + // Valid values are 1-32766. + SessionNumber *int64 `type:"integer"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterId *string `type:"string"` + + // The ID of the Traffic Mirror session. + // + // TrafficMirrorSessionId is a required field + TrafficMirrorSessionId *string `type:"string" required:"true"` + + // The Traffic Mirror target. The target must be in the same VPC as the source, + // or have a VPC peering connection with the source. + TrafficMirrorTargetId *string `type:"string"` + + // The virtual network ID of the Traffic Mirror session. + VirtualNetworkId *int64 `type:"integer"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTrafficMirrorSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTrafficMirrorSessionInput"} + if s.TrafficMirrorSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorSessionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *ModifyTrafficMirrorSessionInput) SetDescription(v string) *ModifyTrafficMirrorSessionInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTrafficMirrorSessionInput) SetDryRun(v bool) *ModifyTrafficMirrorSessionInput { + s.DryRun = &v + return s +} + +// SetPacketLength sets the PacketLength field's value. +func (s *ModifyTrafficMirrorSessionInput) SetPacketLength(v int64) *ModifyTrafficMirrorSessionInput { + s.PacketLength = &v + return s +} + +// SetRemoveFields sets the RemoveFields field's value. +func (s *ModifyTrafficMirrorSessionInput) SetRemoveFields(v []*string) *ModifyTrafficMirrorSessionInput { + s.RemoveFields = v + return s +} + +// SetSessionNumber sets the SessionNumber field's value. +func (s *ModifyTrafficMirrorSessionInput) SetSessionNumber(v int64) *ModifyTrafficMirrorSessionInput { + s.SessionNumber = &v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *ModifyTrafficMirrorSessionInput) SetTrafficMirrorFilterId(v string) *ModifyTrafficMirrorSessionInput { + s.TrafficMirrorFilterId = &v + return s +} + +// SetTrafficMirrorSessionId sets the TrafficMirrorSessionId field's value. +func (s *ModifyTrafficMirrorSessionInput) SetTrafficMirrorSessionId(v string) *ModifyTrafficMirrorSessionInput { + s.TrafficMirrorSessionId = &v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *ModifyTrafficMirrorSessionInput) SetTrafficMirrorTargetId(v string) *ModifyTrafficMirrorSessionInput { + s.TrafficMirrorTargetId = &v + return s +} + +// SetVirtualNetworkId sets the VirtualNetworkId field's value. +func (s *ModifyTrafficMirrorSessionInput) SetVirtualNetworkId(v int64) *ModifyTrafficMirrorSessionInput { + s.VirtualNetworkId = &v + return s +} + +type ModifyTrafficMirrorSessionOutput struct { + _ struct{} `type:"structure"` + + // Information about the Traffic Mirror session. + TrafficMirrorSession *TrafficMirrorSession `locationName:"trafficMirrorSession" type:"structure"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorSessionOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorSession sets the TrafficMirrorSession field's value. +func (s *ModifyTrafficMirrorSessionOutput) SetTrafficMirrorSession(v *TrafficMirrorSession) *ModifyTrafficMirrorSessionOutput { + s.TrafficMirrorSession = v + return s +} + type ModifyTransitGatewayVpcAttachmentInput struct { _ struct{} `type:"structure"` @@ -70704,8 +76828,7 @@ type ModifyVpcEndpointInput struct { DryRun *bool `type:"boolean"` // A policy to attach to the endpoint that controls access to the service. The - // policy must be in valid JSON format. If this parameter is not specified, - // we attach a default policy that allows full access to the service. + // policy must be in valid JSON format. PolicyDocument *string `type:"string"` // (Interface endpoint) Indicate whether a private hosted zone is associated @@ -71230,13 +77353,24 @@ func (s *ModifyVpcTenancyOutput) SetReturnValue(v bool) *ModifyVpcTenancyOutput type ModifyVpnConnectionInput struct { _ struct{} `type:"structure"` + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The ID of the transit gateway. TransitGatewayId *string `type:"string"` + // The ID of the VPN connection. + // // VpnConnectionId is a required field VpnConnectionId *string `type:"string" required:"true"` + // The ID of the virtual private gateway at the AWS side of the VPN connection. VpnGatewayId *string `type:"string"` } @@ -71263,6 +77397,12 @@ func (s *ModifyVpnConnectionInput) Validate() error { return nil } +// SetCustomerGatewayId sets the CustomerGatewayId field's value. +func (s *ModifyVpnConnectionInput) SetCustomerGatewayId(v string) *ModifyVpnConnectionInput { + s.CustomerGatewayId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *ModifyVpnConnectionInput) SetDryRun(v bool) *ModifyVpnConnectionInput { s.DryRun = &v @@ -71310,6 +77450,416 @@ func (s *ModifyVpnConnectionOutput) SetVpnConnection(v *VpnConnection) *ModifyVp return s } +type ModifyVpnTunnelCertificateInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AWS Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` + + // The external IP address of the VPN tunnel. + // + // VpnTunnelOutsideIpAddress is a required field + VpnTunnelOutsideIpAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnTunnelCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnTunnelCertificateInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + if s.VpnTunnelOutsideIpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("VpnTunnelOutsideIpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnTunnelCertificateInput) SetDryRun(v bool) *ModifyVpnTunnelCertificateInput { + s.DryRun = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnConnectionId(v string) *ModifyVpnTunnelCertificateInput { + s.VpnConnectionId = &v + return s +} + +// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnTunnelOutsideIpAddress(v string) *ModifyVpnTunnelCertificateInput { + s.VpnTunnelOutsideIpAddress = &v + return s +} + +type ModifyVpnTunnelCertificateOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnTunnelCertificateOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnTunnelCertificateOutput { + s.VpnConnection = v + return s +} + +type ModifyVpnTunnelOptionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tunnel options to modify. + // + // TunnelOptions is a required field + TunnelOptions *ModifyVpnTunnelOptionsSpecification `type:"structure" required:"true"` + + // The ID of the AWS Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` + + // The external IP address of the VPN tunnel. + // + // VpnTunnelOutsideIpAddress is a required field + VpnTunnelOutsideIpAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnTunnelOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnTunnelOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnTunnelOptionsInput"} + if s.TunnelOptions == nil { + invalidParams.Add(request.NewErrParamRequired("TunnelOptions")) + } + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + if s.VpnTunnelOutsideIpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("VpnTunnelOutsideIpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnTunnelOptionsInput) SetDryRun(v bool) *ModifyVpnTunnelOptionsInput { + s.DryRun = &v + return s +} + +// SetTunnelOptions sets the TunnelOptions field's value. +func (s *ModifyVpnTunnelOptionsInput) SetTunnelOptions(v *ModifyVpnTunnelOptionsSpecification) *ModifyVpnTunnelOptionsInput { + s.TunnelOptions = v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnTunnelOptionsInput) SetVpnConnectionId(v string) *ModifyVpnTunnelOptionsInput { + s.VpnConnectionId = &v + return s +} + +// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value. +func (s *ModifyVpnTunnelOptionsInput) SetVpnTunnelOutsideIpAddress(v string) *ModifyVpnTunnelOptionsInput { + s.VpnTunnelOutsideIpAddress = &v + return s +} + +type ModifyVpnTunnelOptionsOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnTunnelOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelOptionsOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnTunnelOptionsOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnTunnelOptionsOutput { + s.VpnConnection = v + return s +} + +// The AWS Site-to-Site VPN tunnel options to modify. +type ModifyVpnTunnelOptionsSpecification struct { + _ struct{} `type:"structure"` + + // The number of seconds after which a DPD timeout occurs. + // + // Constraints: A value between 0 and 30. + // + // Default: 30 + DPDTimeoutSeconds *int64 `type:"integer"` + + // The IKE versions that are permitted for the VPN tunnel. + // + // Valid values: ikev1 | ikev2 + IKEVersions []*IKEVersionsRequestListValue `locationName:"IKEVersion" locationNameList:"item" type:"list"` + + // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel + // for phase 1 IKE negotiations. + // + // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + Phase1DHGroupNumbers []*Phase1DHGroupNumbersRequestListValue `locationName:"Phase1DHGroupNumber" locationNameList:"item" type:"list"` + + // One or more encryption algorithms that are permitted for the VPN tunnel for + // phase 1 IKE negotiations. + // + // Valid values: AES128 | AES256 + Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsRequestListValue `locationName:"Phase1EncryptionAlgorithm" locationNameList:"item" type:"list"` + + // One or more integrity algorithms that are permitted for the VPN tunnel for + // phase 1 IKE negotiations. + // + // Valid values: SHA1 | SHA2-256 + Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsRequestListValue `locationName:"Phase1IntegrityAlgorithm" locationNameList:"item" type:"list"` + + // The lifetime for phase 1 of the IKE negotiation, in seconds. + // + // Constraints: A value between 900 and 28,800. + // + // Default: 28800 + Phase1LifetimeSeconds *int64 `type:"integer"` + + // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel + // for phase 2 IKE negotiations. + // + // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + Phase2DHGroupNumbers []*Phase2DHGroupNumbersRequestListValue `locationName:"Phase2DHGroupNumber" locationNameList:"item" type:"list"` + + // One or more encryption algorithms that are permitted for the VPN tunnel for + // phase 2 IKE negotiations. + // + // Valid values: AES128 | AES256 + Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsRequestListValue `locationName:"Phase2EncryptionAlgorithm" locationNameList:"item" type:"list"` + + // One or more integrity algorithms that are permitted for the VPN tunnel for + // phase 2 IKE negotiations. + // + // Valid values: SHA1 | SHA2-256 + Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsRequestListValue `locationName:"Phase2IntegrityAlgorithm" locationNameList:"item" type:"list"` + + // The lifetime for phase 2 of the IKE negotiation, in seconds. + // + // Constraints: A value between 900 and 3,600. The value must be less than the + // value for Phase1LifetimeSeconds. + // + // Default: 3600 + Phase2LifetimeSeconds *int64 `type:"integer"` + + // The pre-shared key (PSK) to establish initial authentication between the + // virtual private gateway and the customer gateway. + // + // Constraints: Allowed characters are alphanumeric characters, periods (.), + // and underscores (_). Must be between 8 and 64 characters in length and cannot + // start with zero (0). + PreSharedKey *string `type:"string"` + + // The percentage of the rekey window (determined by RekeyMarginTimeSeconds) + // during which the rekey time is randomly selected. + // + // Constraints: A value between 0 and 100. + // + // Default: 100 + RekeyFuzzPercentage *int64 `type:"integer"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during + // which the AWS side of the VPN connection performs an IKE rekey. The exact + // time of the rekey is randomly selected based on the value for RekeyFuzzPercentage. + // + // Constraints: A value between 60 and half of Phase2LifetimeSeconds. + // + // Default: 540 + RekeyMarginTimeSeconds *int64 `type:"integer"` + + // The number of packets in an IKE replay window. + // + // Constraints: A value between 64 and 2048. + // + // Default: 1024 + ReplayWindowSize *int64 `type:"integer"` + + // The range of inside IP addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same virtual private + // gateway. + // + // Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The following + // CIDR blocks are reserved and cannot be used: + // + // * 169.254.0.0/30 + // + // * 169.254.1.0/30 + // + // * 169.254.2.0/30 + // + // * 169.254.3.0/30 + // + // * 169.254.4.0/30 + // + // * 169.254.5.0/30 + // + // * 169.254.169.252/30 + TunnelInsideCidr *string `type:"string"` +} + +// String returns the string representation +func (s ModifyVpnTunnelOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelOptionsSpecification) GoString() string { + return s.String() +} + +// SetDPDTimeoutSeconds sets the DPDTimeoutSeconds field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { + s.DPDTimeoutSeconds = &v + return s +} + +// SetIKEVersions sets the IKEVersions field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetIKEVersions(v []*IKEVersionsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.IKEVersions = v + return s +} + +// SetPhase1DHGroupNumbers sets the Phase1DHGroupNumbers field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase1DHGroupNumbers(v []*Phase1DHGroupNumbersRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase1DHGroupNumbers = v + return s +} + +// SetPhase1EncryptionAlgorithms sets the Phase1EncryptionAlgorithms field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase1EncryptionAlgorithms(v []*Phase1EncryptionAlgorithmsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase1EncryptionAlgorithms = v + return s +} + +// SetPhase1IntegrityAlgorithms sets the Phase1IntegrityAlgorithms field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase1IntegrityAlgorithms(v []*Phase1IntegrityAlgorithmsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase1IntegrityAlgorithms = v + return s +} + +// SetPhase1LifetimeSeconds sets the Phase1LifetimeSeconds field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase1LifetimeSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { + s.Phase1LifetimeSeconds = &v + return s +} + +// SetPhase2DHGroupNumbers sets the Phase2DHGroupNumbers field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase2DHGroupNumbers(v []*Phase2DHGroupNumbersRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase2DHGroupNumbers = v + return s +} + +// SetPhase2EncryptionAlgorithms sets the Phase2EncryptionAlgorithms field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase2EncryptionAlgorithms(v []*Phase2EncryptionAlgorithmsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase2EncryptionAlgorithms = v + return s +} + +// SetPhase2IntegrityAlgorithms sets the Phase2IntegrityAlgorithms field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase2IntegrityAlgorithms(v []*Phase2IntegrityAlgorithmsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase2IntegrityAlgorithms = v + return s +} + +// SetPhase2LifetimeSeconds sets the Phase2LifetimeSeconds field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase2LifetimeSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { + s.Phase2LifetimeSeconds = &v + return s +} + +// SetPreSharedKey sets the PreSharedKey field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPreSharedKey(v string) *ModifyVpnTunnelOptionsSpecification { + s.PreSharedKey = &v + return s +} + +// SetRekeyFuzzPercentage sets the RekeyFuzzPercentage field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetRekeyFuzzPercentage(v int64) *ModifyVpnTunnelOptionsSpecification { + s.RekeyFuzzPercentage = &v + return s +} + +// SetRekeyMarginTimeSeconds sets the RekeyMarginTimeSeconds field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetRekeyMarginTimeSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { + s.RekeyMarginTimeSeconds = &v + return s +} + +// SetReplayWindowSize sets the ReplayWindowSize field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetReplayWindowSize(v int64) *ModifyVpnTunnelOptionsSpecification { + s.ReplayWindowSize = &v + return s +} + +// SetTunnelInsideCidr sets the TunnelInsideCidr field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *ModifyVpnTunnelOptionsSpecification { + s.TunnelInsideCidr = &v + return s +} + type MonitorInstancesInput struct { _ struct{} `type:"structure"` @@ -71569,7 +78119,7 @@ type NatGateway struct { NatGatewayId *string `locationName:"natGatewayId" type:"string"` // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). ProvisionedBandwidth *ProvisionedBandwidth `locationName:"provisionedBandwidth" type:"structure"` @@ -72516,7 +79066,7 @@ func (s *NewDhcpConfiguration) SetValues(v []*string) *NewDhcpConfiguration { return s } -// The allocation strategy of On-Demand Instances in an EC2 Fleet. +// Describes the configuration of On-Demand Instances in an EC2 Fleet. type OnDemandOptions struct { _ struct{} `type:"structure"` @@ -72528,6 +79078,10 @@ type OnDemandOptions struct { // Fleet defaults to lowest-price. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"FleetOnDemandAllocationStrategy"` + // The maximum amount per hour for On-Demand Instances that you're willing to + // pay. + MaxTotalPrice *string `locationName:"maxTotalPrice" type:"string"` + // The minimum target capacity for On-Demand Instances in the fleet. If the // minimum target capacity is not reached, the fleet launches no instances. MinTargetCapacity *int64 `locationName:"minTargetCapacity" type:"integer"` @@ -72557,6 +79111,12 @@ func (s *OnDemandOptions) SetAllocationStrategy(v string) *OnDemandOptions { return s } +// SetMaxTotalPrice sets the MaxTotalPrice field's value. +func (s *OnDemandOptions) SetMaxTotalPrice(v string) *OnDemandOptions { + s.MaxTotalPrice = &v + return s +} + // SetMinTargetCapacity sets the MinTargetCapacity field's value. func (s *OnDemandOptions) SetMinTargetCapacity(v int64) *OnDemandOptions { s.MinTargetCapacity = &v @@ -72575,7 +79135,7 @@ func (s *OnDemandOptions) SetSingleInstanceType(v bool) *OnDemandOptions { return s } -// The allocation strategy of On-Demand Instances in an EC2 Fleet. +// Describes the configuration of On-Demand Instances in an EC2 Fleet. type OnDemandOptionsRequest struct { _ struct{} `type:"structure"` @@ -72587,6 +79147,10 @@ type OnDemandOptionsRequest struct { // Fleet defaults to lowest-price. AllocationStrategy *string `type:"string" enum:"FleetOnDemandAllocationStrategy"` + // The maximum amount per hour for On-Demand Instances that you're willing to + // pay. + MaxTotalPrice *string `type:"string"` + // The minimum target capacity for On-Demand Instances in the fleet. If the // minimum target capacity is not reached, the fleet launches no instances. MinTargetCapacity *int64 `type:"integer"` @@ -72616,6 +79180,12 @@ func (s *OnDemandOptionsRequest) SetAllocationStrategy(v string) *OnDemandOption return s } +// SetMaxTotalPrice sets the MaxTotalPrice field's value. +func (s *OnDemandOptionsRequest) SetMaxTotalPrice(v string) *OnDemandOptionsRequest { + s.MaxTotalPrice = &v + return s +} + // SetMinTargetCapacity sets the MinTargetCapacity field's value. func (s *OnDemandOptionsRequest) SetMinTargetCapacity(v int64) *OnDemandOptionsRequest { s.MinTargetCapacity = &v @@ -72776,6 +79346,296 @@ func (s *PeeringConnectionOptionsRequest) SetAllowEgressFromLocalVpcToRemoteClas return s } +// The Diffie-Hellmann group number for phase 1 IKE negotiations. +type Phase1DHGroupNumbersListValue struct { + _ struct{} `type:"structure"` + + // The Diffie-Hellmann group number. + Value *int64 `locationName:"value" type:"integer"` +} + +// String returns the string representation +func (s Phase1DHGroupNumbersListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1DHGroupNumbersListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1DHGroupNumbersListValue) SetValue(v int64) *Phase1DHGroupNumbersListValue { + s.Value = &v + return s +} + +// Specifies a Diffie-Hellman group number for the VPN tunnel for phase 1 IKE +// negotiations. +type Phase1DHGroupNumbersRequestListValue struct { + _ struct{} `type:"structure"` + + // The Diffie-Hellmann group number. + Value *int64 `type:"integer"` +} + +// String returns the string representation +func (s Phase1DHGroupNumbersRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1DHGroupNumbersRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1DHGroupNumbersRequestListValue) SetValue(v int64) *Phase1DHGroupNumbersRequestListValue { + s.Value = &v + return s +} + +// The encryption algorithm for phase 1 IKE negotiations. +type Phase1EncryptionAlgorithmsListValue struct { + _ struct{} `type:"structure"` + + // The value for the encryption algorithm. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Phase1EncryptionAlgorithmsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1EncryptionAlgorithmsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1EncryptionAlgorithmsListValue) SetValue(v string) *Phase1EncryptionAlgorithmsListValue { + s.Value = &v + return s +} + +// Specifies the encryption algorithm for the VPN tunnel for phase 1 IKE negotiations. +type Phase1EncryptionAlgorithmsRequestListValue struct { + _ struct{} `type:"structure"` + + // The value for the encryption algorithm. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Phase1EncryptionAlgorithmsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1EncryptionAlgorithmsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1EncryptionAlgorithmsRequestListValue) SetValue(v string) *Phase1EncryptionAlgorithmsRequestListValue { + s.Value = &v + return s +} + +// The integrity algorithm for phase 1 IKE negotiations. +type Phase1IntegrityAlgorithmsListValue struct { + _ struct{} `type:"structure"` + + // The value for the integrity algorithm. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Phase1IntegrityAlgorithmsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1IntegrityAlgorithmsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1IntegrityAlgorithmsListValue) SetValue(v string) *Phase1IntegrityAlgorithmsListValue { + s.Value = &v + return s +} + +// Specifies the integrity algorithm for the VPN tunnel for phase 1 IKE negotiations. +type Phase1IntegrityAlgorithmsRequestListValue struct { + _ struct{} `type:"structure"` + + // The value for the integrity algorithm. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Phase1IntegrityAlgorithmsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1IntegrityAlgorithmsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1IntegrityAlgorithmsRequestListValue) SetValue(v string) *Phase1IntegrityAlgorithmsRequestListValue { + s.Value = &v + return s +} + +// The Diffie-Hellmann group number for phase 2 IKE negotiations. +type Phase2DHGroupNumbersListValue struct { + _ struct{} `type:"structure"` + + // The Diffie-Hellmann group number. + Value *int64 `locationName:"value" type:"integer"` +} + +// String returns the string representation +func (s Phase2DHGroupNumbersListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2DHGroupNumbersListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2DHGroupNumbersListValue) SetValue(v int64) *Phase2DHGroupNumbersListValue { + s.Value = &v + return s +} + +// Specifies a Diffie-Hellman group number for the VPN tunnel for phase 2 IKE +// negotiations. +type Phase2DHGroupNumbersRequestListValue struct { + _ struct{} `type:"structure"` + + // The Diffie-Hellmann group number. + Value *int64 `type:"integer"` +} + +// String returns the string representation +func (s Phase2DHGroupNumbersRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2DHGroupNumbersRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2DHGroupNumbersRequestListValue) SetValue(v int64) *Phase2DHGroupNumbersRequestListValue { + s.Value = &v + return s +} + +// The encryption algorithm for phase 2 IKE negotiations. +type Phase2EncryptionAlgorithmsListValue struct { + _ struct{} `type:"structure"` + + // The encryption algorithm. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Phase2EncryptionAlgorithmsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2EncryptionAlgorithmsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2EncryptionAlgorithmsListValue) SetValue(v string) *Phase2EncryptionAlgorithmsListValue { + s.Value = &v + return s +} + +// Specifies the encryption algorithm for the VPN tunnel for phase 2 IKE negotiations. +type Phase2EncryptionAlgorithmsRequestListValue struct { + _ struct{} `type:"structure"` + + // The encryption algorithm. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Phase2EncryptionAlgorithmsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2EncryptionAlgorithmsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2EncryptionAlgorithmsRequestListValue) SetValue(v string) *Phase2EncryptionAlgorithmsRequestListValue { + s.Value = &v + return s +} + +// The integrity algorithm for phase 2 IKE negotiations. +type Phase2IntegrityAlgorithmsListValue struct { + _ struct{} `type:"structure"` + + // The integrity algorithm. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Phase2IntegrityAlgorithmsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2IntegrityAlgorithmsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2IntegrityAlgorithmsListValue) SetValue(v string) *Phase2IntegrityAlgorithmsListValue { + s.Value = &v + return s +} + +// Specifies the integrity algorithm for the VPN tunnel for phase 2 IKE negotiations. +type Phase2IntegrityAlgorithmsRequestListValue struct { + _ struct{} `type:"structure"` + + // The integrity algorithm. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Phase2IntegrityAlgorithmsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2IntegrityAlgorithmsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2IntegrityAlgorithmsRequestListValue) SetValue(v string) *Phase2IntegrityAlgorithmsRequestListValue { + s.Value = &v + return s +} + // Describes the placement of an instance. type Placement struct { _ struct{} `type:"structure"` @@ -72787,7 +79647,7 @@ type Placement struct { // The Availability Zone of the instance. // // If not specified, an Availability Zone will be automatically chosen for you - // based on the load balancing criteria for the region. + // based on the load balancing criteria for the Region. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` // The name of the placement group the instance is in. @@ -73317,7 +80177,7 @@ type ProvisionByoipCidrInput struct { // The public IPv4 address range, in CIDR notation. The most specific prefix // that you can specify is /24. The address range cannot overlap with another - // address range that you've brought to this or another region. + // address range that you've brought to this or another Region. // // Cidr is a required field Cidr *string `type:"string" required:"true"` @@ -73412,33 +80272,33 @@ func (s *ProvisionByoipCidrOutput) SetByoipCidr(v *ByoipCidr) *ProvisionByoipCid } // Reserved. If you need to sustain traffic greater than the documented limits -// (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), +// (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). type ProvisionedBandwidth struct { _ struct{} `type:"structure"` // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). ProvisionTime *time.Time `locationName:"provisionTime" type:"timestamp"` // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). Provisioned *string `locationName:"provisioned" type:"string"` // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). RequestTime *time.Time `locationName:"requestTime" type:"timestamp"` // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). Requested *string `locationName:"requested" type:"string"` // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). Status *string `locationName:"status" type:"string"` } @@ -73686,9 +80546,8 @@ func (s *Purchase) SetUpfrontPrice(v string) *Purchase { type PurchaseHostReservationInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier you provide to ensure idempotency of the - // request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) - // in the Amazon Elastic Compute Cloud User Guide. + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string"` // The currency in which the totalUpfrontPrice, LimitPrice, and totalHourlyPrice @@ -73773,9 +80632,8 @@ func (s *PurchaseHostReservationInput) SetOfferingId(v string) *PurchaseHostRese type PurchaseHostReservationOutput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier you provide to ensure idempotency of the - // request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) - // in the Amazon Elastic Compute Cloud User Guide. + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` // The currency in which the totalUpfrontPrice and totalHourlyPrice amounts @@ -73905,6 +80763,10 @@ type PurchaseReservedInstancesOfferingInput struct { // prices. LimitPrice *ReservedInstanceLimitPrice `locationName:"limitPrice" type:"structure"` + // The time at which to purchase the Reserved Instance, in UTC format (for example, + // YYYY-MM-DDTHH:MM:SSZ). + PurchaseTime *time.Time `type:"timestamp"` + // The ID of the Reserved Instance offering to purchase. // // ReservedInstancesOfferingId is a required field @@ -73955,6 +80817,12 @@ func (s *PurchaseReservedInstancesOfferingInput) SetLimitPrice(v *ReservedInstan return s } +// SetPurchaseTime sets the PurchaseTime field's value. +func (s *PurchaseReservedInstancesOfferingInput) SetPurchaseTime(v time.Time) *PurchaseReservedInstancesOfferingInput { + s.PurchaseTime = &v + return s +} + // SetReservedInstancesOfferingId sets the ReservedInstancesOfferingId field's value. func (s *PurchaseReservedInstancesOfferingInput) SetReservedInstancesOfferingId(v string) *PurchaseReservedInstancesOfferingInput { s.ReservedInstancesOfferingId = &v @@ -74180,14 +81048,18 @@ func (s *RecurringCharge) SetFrequency(v string) *RecurringCharge { return s } -// Describes a region. +// Describes a Region. type Region struct { _ struct{} `type:"structure"` - // The region service endpoint. + // The Region service endpoint. Endpoint *string `locationName:"regionEndpoint" type:"string"` - // The name of the region. + // The Region opt-in status. The possible values are opt-in-not-required, opted-in, + // and not-opted-in. + OptInStatus *string `locationName:"optInStatus" type:"string"` + + // The name of the Region. RegionName *string `locationName:"regionName" type:"string"` } @@ -74207,6 +81079,12 @@ func (s *Region) SetEndpoint(v string) *Region { return s } +// SetOptInStatus sets the OptInStatus field's value. +func (s *Region) SetOptInStatus(v string) *Region { + s.OptInStatus = &v + return s +} + // SetRegionName sets the RegionName field's value. func (s *Region) SetRegionName(v string) *Region { s.RegionName = &v @@ -74247,7 +81125,10 @@ type RegisterImageInput struct { // PV AMI can make instances launched from the AMI unreachable. EnaSupport *bool `locationName:"enaSupport" type:"boolean"` - // The full path to your AMI manifest in Amazon S3 storage. + // The full path to your AMI manifest in Amazon S3 storage. The specified bucket + // must have the aws-exec-read canned access control list (ACL) to ensure that + // it can be accessed by Amazon EC2. For more information, see Canned ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) + // in the Amazon S3 Service Developer Guide. ImageLocation *string `type:"string"` // The ID of the kernel. @@ -75608,9 +82489,11 @@ type RequestLaunchTemplateData struct { // only. CreditSpecification *CreditSpecificationRequest `type:"structure"` - // If set to true, you can't terminate the instance using the Amazon EC2 console, - // CLI, or API. To change this attribute to false after launch, use ModifyInstanceAttribute - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html). + // If you set this parameter to true, you can't terminate the instance using + // the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute + // after launch, use ModifyInstanceAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html). + // Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, + // you can terminate the instance by running the shutdown command from the instance. DisableApiTermination *bool `type:"boolean"` // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization @@ -75628,15 +82511,14 @@ type RequestLaunchTemplateData struct { // Indicates whether an instance is enabled for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). - // Hibernation is currently supported only for Amazon Linux. For more information, - // see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. HibernationOptions *LaunchTemplateHibernationOptionsRequest `type:"structure"` // The IAM instance profile. IamInstanceProfile *LaunchTemplateIamInstanceProfileSpecificationRequest `type:"structure"` - // The ID of the AMI, which you can get by using DescribeImages (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html). + // The ID of the AMI. ImageId *string `type:"string"` // Indicates whether an instance stops or terminates when you initiate shutdown @@ -75672,7 +82554,8 @@ type RequestLaunchTemplateData struct { // The monitoring for the instance. Monitoring *LaunchTemplatesMonitoringRequest `type:"structure"` - // One or more network interfaces. + // One or more network interfaces. If you specify a network interface, you must + // specify any security groups and subnets as part of the network interface. NetworkInterfaces []*LaunchTemplateInstanceNetworkInterfaceSpecificationRequest `locationName:"NetworkInterface" locationNameList:"InstanceNetworkInterfaceSpecification" type:"list"` // The placement for the instance. @@ -75685,7 +82568,8 @@ type RequestLaunchTemplateData struct { // in the Amazon Elastic Compute Cloud User Guide. RamDiskId *string `type:"string"` - // One or more security group IDs. You can create a security group using CreateSecurityGroup. + // One or more security group IDs. You can create a security group using CreateSecurityGroup + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html). // You cannot specify both a security group ID and security name in the same // request. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` @@ -75698,7 +82582,7 @@ type RequestLaunchTemplateData struct { // The tags to apply to the resources during launch. You can only tag instances // and volumes on launch. The specified tags are applied to all instances or // volumes that are created during launch. To tag a resource after it has been - // created, see CreateTags. + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). TagSpecifications []*LaunchTemplateTagSpecificationRequest `locationName:"TagSpecification" locationNameList:"LaunchTemplateTagSpecificationRequest" type:"list"` // The Base64-encoded user data to make available to the instance. For more @@ -76063,6 +82947,10 @@ type RequestSpotInstancesInput struct { // launch, the request expires, or the request is canceled. If the request is // persistent, the request becomes active at this date and time and remains // active until it expires or is canceled. + // + // The specified start date and time cannot be equal to the current date and + // time. You must specify a start date and time that occurs after the current + // date and time. ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` // The end date of the request. If this is a one-time request, the request remains @@ -76254,7 +83142,9 @@ type RequestSpotLaunchSpecification struct { // you can specify the names or the IDs of the security groups. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` - // The ID of the subnet in which to launch the instance. + // The IDs of the subnets in which to launch the instance. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". SubnetId *string `locationName:"subnetId" type:"string"` // The Base64-encoded user data for the instance. User data is limited to 16 @@ -76752,7 +83642,7 @@ type ReservedInstancesConfiguration struct { // EC2-Classic or EC2-VPC. Platform *string `locationName:"platform" type:"string"` - // Whether the Reserved Instance is applied to instances in a region or instances + // Whether the Reserved Instance is applied to instances in a Region or instances // in a specific Availability Zone. Scope *string `locationName:"scope" type:"string" enum:"scope"` } @@ -77111,7 +84001,7 @@ type ReservedInstancesOffering struct { // GetReservedInstancesExchangeQuote to confirm that an exchange can be made. ReservedInstancesOfferingId *string `locationName:"reservedInstancesOfferingId" type:"string"` - // Whether the Reserved Instance is applied to instances in a region or an Availability + // Whether the Reserved Instance is applied to instances in a Region or an Availability // Zone. Scope *string `locationName:"scope" type:"string" enum:"scope"` @@ -77219,6 +84109,55 @@ func (s *ReservedInstancesOffering) SetUsagePrice(v float64) *ReservedInstancesO return s } +type ResetEbsDefaultKmsKeyIdInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetEbsDefaultKmsKeyIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetEbsDefaultKmsKeyIdInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetEbsDefaultKmsKeyIdInput) SetDryRun(v bool) *ResetEbsDefaultKmsKeyIdInput { + s.DryRun = &v + return s +} + +type ResetEbsDefaultKmsKeyIdOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the default CMK for EBS encryption by default. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` +} + +// String returns the string representation +func (s ResetEbsDefaultKmsKeyIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetEbsDefaultKmsKeyIdOutput) GoString() string { + return s.String() +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ResetEbsDefaultKmsKeyIdOutput) SetKmsKeyId(v string) *ResetEbsDefaultKmsKeyIdOutput { + s.KmsKeyId = &v + return s +} + type ResetFpgaImageAttributeInput struct { _ struct{} `type:"structure"` @@ -78630,10 +85569,7 @@ type RunInstancesInput struct { // Reserved. AdditionalInfo *string `locationName:"additionalInfo" type:"string"` - // The block device mapping entries. You can't specify both a snapshot ID and - // an encryption value. This is because only blank volumes can be encrypted - // on creation. If a snapshot is the basis for a volume, it is not blank and - // its encryption status is used for the volume encryption status. + // The block device mapping entries. BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` // Information about the Capacity Reservation targeting option. If you do not @@ -78654,7 +85590,8 @@ type RunInstancesInput struct { CpuOptions *CpuOptionsRequest `type:"structure"` // The credit option for CPU usage of the T2 or T3 instance. Valid values are - // standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. + // standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). // For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -78663,9 +85600,9 @@ type RunInstancesInput struct { // If you set this parameter to true, you can't terminate the instance using // the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute - // to false after launch, use ModifyInstanceAttribute. Alternatively, if you - // set InstanceInitiatedShutdownBehavior to terminate, you can terminate the - // instance by running the shutdown command from the instance. + // after launch, use ModifyInstanceAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html). + // Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, + // you can terminate the instance by running the shutdown command from the instance. // // Default: false DisableApiTermination *bool `locationName:"disableApiTermination" type:"boolean"` @@ -78687,8 +85624,7 @@ type RunInstancesInput struct { // An elastic GPU to associate with the instance. An Elastic GPU is a GPU resource // that you can attach to your Windows instance to accelerate the graphics performance - // of your applications. For more information, see Amazon EC2 Elastic GPUs - // (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html) + // of your applications. For more information, see Amazon EC2 Elastic GPUs (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html) // in the Amazon Elastic Compute Cloud User Guide. ElasticGpuSpecification []*ElasticGpuSpecification `locationNameList:"item" type:"list"` @@ -78705,8 +85641,8 @@ type RunInstancesInput struct { // The IAM instance profile. IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` - // The ID of the AMI. An AMI is required to launch an instance and must be specified - // here or in a launch template. + // The ID of the AMI. An AMI ID is required to launch an instance and must be + // specified here or in a launch template. ImageId *string `type:"string"` // Indicates whether an instance stops or terminates when you initiate shutdown @@ -78749,12 +85685,12 @@ type RunInstancesInput struct { // The ID of the kernel. // // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more - // information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) // in the Amazon Elastic Compute Cloud User Guide. KernelId *string `type:"string"` - // The name of the key pair. You can create a key pair using CreateKeyPair or - // ImportKeyPair. + // The name of the key pair. You can create a key pair using CreateKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) + // or ImportKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html). // // If you do not specify a key pair, you can't connect to the instance unless // you choose an AMI that is configured to allow users another way to log in. @@ -78795,7 +85731,9 @@ type RunInstancesInput struct { // Specifies whether detailed monitoring is enabled for the instance. Monitoring *RunInstancesMonitoringEnabled `type:"structure"` - // The network interfaces to associate with the instance. + // The network interfaces to associate with the instance. If you specify a network + // interface, you must specify any security groups and subnets as part of the + // network interface. NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"` // The placement for the instance. @@ -78819,37 +85757,36 @@ type RunInstancesInput struct { // Center and search for the kernel ID. // // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more - // information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) // in the Amazon Elastic Compute Cloud User Guide. RamdiskId *string `type:"string"` - // The IDs of the security groups. You can create a security group using CreateSecurityGroup. + // The IDs of the security groups. You can create a security group using CreateSecurityGroup + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html). // - // Default: Amazon EC2 uses the default security group. - // - // You cannot specify this option and the network interfaces option in the same - // request. + // If you specify a network interface, you must specify any security groups + // as part of the network interface. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` // [EC2-Classic, default VPC] The names of the security groups. For a nondefault // VPC, you must use security group IDs instead. // - // You cannot specify this option and the network interfaces option in the same - // request. + // If you specify a network interface, you must specify any security groups + // as part of the network interface. // // Default: Amazon EC2 uses the default security group. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` // [EC2-VPC] The ID of the subnet to launch the instance into. // - // You cannot specify this option and the network interfaces option in the same - // request. + // If you specify a network interface, you must specify any subnets as part + // of the network interface. SubnetId *string `type:"string"` // The tags to apply to the resources during launch. You can only tag instances // and volumes on launch. The specified tags are applied to all instances or // volumes that are created during launch. To tag a resource after it has been - // created, see CreateTags. + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The user data to make available to the instance. For more information, see @@ -79865,7 +86802,7 @@ type ScheduledInstancesEbs struct { // The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, // Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic. // - // Default: standard + // Default: gp2 VolumeType *string `type:"string"` } @@ -80388,11 +87325,9 @@ type SearchTransitGatewayRoutesInput struct { // routes in your route table and you specify supernet-of-match as 10.0.1.0/30, // then the result returns 10.0.1.0/29. // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the route (active | blackhole). // - // * type - The type of roue (active | blackhole). + // * type - The type of route (propagated | static). // // Filters is a required field Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list" required:"true"` @@ -80653,6 +87588,70 @@ func (s *SecurityGroupReference) SetVpcPeeringConnectionId(v string) *SecurityGr return s } +type SendDiagnosticInterruptInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendDiagnosticInterruptInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendDiagnosticInterruptInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *SendDiagnosticInterruptInput) SetDryRun(v bool) *SendDiagnosticInterruptInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *SendDiagnosticInterruptInput) SetInstanceId(v string) *SendDiagnosticInterruptInput { + s.InstanceId = &v + return s +} + +type SendDiagnosticInterruptOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptOutput) GoString() string { + return s.String() +} + // Describes a service configuration for a VPC endpoint service. type ServiceConfiguration struct { _ struct{} `type:"structure"` @@ -80688,6 +87687,9 @@ type ServiceConfiguration struct { // The type of service. ServiceType []*ServiceTypeDetail `locationName:"serviceType" locationNameList:"item" type:"list"` + + // Any tags assigned to the service. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -80760,6 +87762,12 @@ func (s *ServiceConfiguration) SetServiceType(v []*ServiceTypeDetail) *ServiceCo return s } +// SetTags sets the Tags field's value. +func (s *ServiceConfiguration) SetTags(v []*Tag) *ServiceConfiguration { + s.Tags = v + return s +} + // Describes a VPC endpoint service. type ServiceDetail struct { _ struct{} `type:"structure"` @@ -80784,12 +87792,18 @@ type ServiceDetail struct { // The private DNS name for the service. PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + // The ID of the endpoint service. + ServiceId *string `locationName:"serviceId" type:"string"` + // The Amazon Resource Name (ARN) of the service. ServiceName *string `locationName:"serviceName" type:"string"` // The type of service. ServiceType []*ServiceTypeDetail `locationName:"serviceType" locationNameList:"item" type:"list"` + // Any tags assigned to the service. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + // Indicates whether the service supports endpoint policies. VpcEndpointPolicySupported *bool `locationName:"vpcEndpointPolicySupported" type:"boolean"` } @@ -80840,6 +87854,12 @@ func (s *ServiceDetail) SetPrivateDnsName(v string) *ServiceDetail { return s } +// SetServiceId sets the ServiceId field's value. +func (s *ServiceDetail) SetServiceId(v string) *ServiceDetail { + s.ServiceId = &v + return s +} + // SetServiceName sets the ServiceName field's value. func (s *ServiceDetail) SetServiceName(v string) *ServiceDetail { s.ServiceName = &v @@ -80852,6 +87872,12 @@ func (s *ServiceDetail) SetServiceType(v []*ServiceTypeDetail) *ServiceDetail { return s } +// SetTags sets the Tags field's value. +func (s *ServiceDetail) SetTags(v []*Tag) *ServiceDetail { + s.Tags = v + return s +} + // SetVpcEndpointPolicySupported sets the VpcEndpointPolicySupported field's value. func (s *ServiceDetail) SetVpcEndpointPolicySupported(v bool) *ServiceDetail { s.VpcEndpointPolicySupported = &v @@ -80980,7 +88006,7 @@ type Snapshot struct { // the original volume or snapshot copy. Because data encryption keys are inherited // by volumes created from snapshots, and vice versa, if snapshots share the // same data encryption key identifier, then they belong to the same volume/snapshot - // lineage. This parameter is only returned by the DescribeSnapshots API operation. + // lineage. This parameter is only returned by DescribeSnapshots. DataEncryptionKeyId *string `locationName:"dataEncryptionKeyId" type:"string"` // The description for the snapshot. @@ -80989,9 +88015,9 @@ type Snapshot struct { // Indicates whether the snapshot is encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` - // The full ARN of the AWS Key Management Service (AWS KMS) customer master - // key (CMK) that was used to protect the volume encryption key for the parent - // volume. + // The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) + // customer master key (CMK) that was used to protect the volume encryption + // key for the parent volume. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` // Value from an Amazon-maintained list (amazon | self | all | aws-marketplace @@ -81019,7 +88045,7 @@ type Snapshot struct { // operation fails (for example, if the proper AWS Key Management Service (AWS // KMS) permissions are not obtained) this field displays error state details // to help you diagnose why the error occurred. This parameter is only returned - // by the DescribeSnapshots API operation. + // by DescribeSnapshots. StateMessage *string `locationName:"statusMessage" type:"string"` // Any tags assigned to the snapshot. @@ -81287,6 +88313,113 @@ func (s *SnapshotDiskContainer) SetUserBucket(v *UserBucket) *SnapshotDiskContai return s } +// Information about a snapshot. +type SnapshotInfo struct { + _ struct{} `type:"structure"` + + // Description specified by the CreateSnapshotRequest that has been applied + // to all snapshots. + Description *string `locationName:"description" type:"string"` + + // Indicates whether the snapshot is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // Account id used when creating this snapshot. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Progress this snapshot has made towards completing. + Progress *string `locationName:"progress" type:"string"` + + // Snapshot id that can be used to describe this snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // Time this snapshot was started. This is the same for all snapshots initiated + // by the same request. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // Current state of the snapshot. + State *string `locationName:"state" type:"string" enum:"SnapshotState"` + + // Tags associated with this snapshot. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // Source volume from which this snapshot was created. + VolumeId *string `locationName:"volumeId" type:"string"` + + // Size of the volume from which this snapshot was created. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` +} + +// String returns the string representation +func (s SnapshotInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotInfo) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *SnapshotInfo) SetDescription(v string) *SnapshotInfo { + s.Description = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *SnapshotInfo) SetEncrypted(v bool) *SnapshotInfo { + s.Encrypted = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *SnapshotInfo) SetOwnerId(v string) *SnapshotInfo { + s.OwnerId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *SnapshotInfo) SetProgress(v string) *SnapshotInfo { + s.Progress = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *SnapshotInfo) SetSnapshotId(v string) *SnapshotInfo { + s.SnapshotId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *SnapshotInfo) SetStartTime(v time.Time) *SnapshotInfo { + s.StartTime = &v + return s +} + +// SetState sets the State field's value. +func (s *SnapshotInfo) SetState(v string) *SnapshotInfo { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SnapshotInfo) SetTags(v []*Tag) *SnapshotInfo { + s.Tags = v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *SnapshotInfo) SetVolumeId(v string) *SnapshotInfo { + s.VolumeId = &v + return s +} + +// SetVolumeSize sets the VolumeSize field's value. +func (s *SnapshotInfo) SetVolumeSize(v int64) *SnapshotInfo { + s.VolumeSize = &v + return s +} + // Details about the import snapshot task. type SnapshotTaskDetail struct { _ struct{} `type:"structure"` @@ -81462,17 +88595,20 @@ func (s *SpotDatafeedSubscription) SetState(v string) *SpotDatafeedSubscription return s } -// Describes the launch specification for one or more Spot Instances. +// Describes the launch specification for one or more Spot Instances. If you +// include On-Demand capacity in your fleet request, you can't use SpotFleetLaunchSpecification; +// you must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). type SpotFleetLaunchSpecification struct { _ struct{} `type:"structure"` // Deprecated. AddressingType *string `locationName:"addressingType" type:"string"` - // One or more block device mapping entries. You can't specify both a snapshot - // ID and an encryption value. This is because only blank volumes can be encrypted - // on creation. If a snapshot is the basis for a volume, it is not blank and - // its encryption status is used for the volume encryption status. + // One or more block devices that are mapped to the Spot Instances. You can't + // specify both a snapshot ID and an encryption value. This is because only + // blank volumes can be encrypted on creation. If a snapshot is the basis for + // a volume, it is not blank and its encryption status is used for the volume + // encryption status. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // Indicates whether the instances are optimized for EBS I/O. This optimization @@ -81509,7 +88645,10 @@ type SpotFleetLaunchSpecification struct { // The placement information. Placement *SpotPlacement `locationName:"placement" type:"structure"` - // The ID of the RAM disk. + // The ID of the RAM disk. Some kernels require additional drivers at launch. + // Check the kernel requirements for information about whether you need to specify + // a RAM disk. To find kernel requirements, refer to the AWS Resource Center + // and search for the kernel ID. RamdiskId *string `locationName:"ramdiskId" type:"string"` // One or more security groups. When requesting instances in a VPC, you must @@ -81523,23 +88662,24 @@ type SpotFleetLaunchSpecification struct { // by the value of WeightedCapacity. SpotPrice *string `locationName:"spotPrice" type:"string"` - // The ID of the subnet in which to launch the instances. To specify multiple - // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". + // The IDs of the subnets in which to launch the instances. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". SubnetId *string `locationName:"subnetId" type:"string"` // The tags to apply during creation. TagSpecifications []*SpotFleetTagSpecification `locationName:"tagSpecificationSet" locationNameList:"item" type:"list"` - // The Base64-encoded user data to make available to the instances. + // The Base64-encoded user data that instances use when starting up. UserData *string `locationName:"userData" type:"string"` // The number of units provided by the specified instance type. These are the - // same units that you chose to set the target capacity in terms (instances - // or a performance characteristic such as vCPUs, memory, or I/O). + // same units that you chose to set the target capacity in terms of instances, + // or a performance characteristic such as vCPUs, memory, or I/O. // - // If the target capacity divided by this value is not a whole number, we round - // the number of instances to the next whole number. If this value is not specified, - // the default is 1. + // If the target capacity divided by this value is not a whole number, Amazon + // EC2 rounds the number of instances to the next whole number. If this value + // is not specified, the default is 1. WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` } @@ -81755,8 +88895,19 @@ func (s *SpotFleetRequestConfig) SetSpotFleetRequestState(v string) *SpotFleetRe type SpotFleetRequestConfigData struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowestPrice. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the Spot Fleet request. + // + // If the allocation strategy is lowestPrice, Spot Fleet launches instances + // from the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, Spot Fleet launches instances + // from all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, Spot Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` // A unique, case-sensitive identifier that you provide to ensure the idempotency @@ -81764,18 +88915,23 @@ type SpotFleetRequestConfigData struct { // see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` - // Indicates whether running Spot Instances should be terminated if the target - // capacity of the Spot Fleet request is decreased below the current size of - // the Spot Fleet. + // Indicates whether running Spot Instances should be terminated if you decrease + // the target capacity of the Spot Fleet request below the current size of the + // Spot Fleet. ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` // The number of units fulfilled by this request compared to the set target // capacity. You cannot set this value. FulfilledCapacity *float64 `locationName:"fulfilledCapacity" type:"double"` - // Grants the Spot Fleet permission to terminate Spot Instances on your behalf - // when you cancel its Spot Fleet request using CancelSpotFleetRequests or when - // the Spot Fleet request expires, if you set terminateInstancesWithExpiration. + // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) + // role that grants the Spot Fleet the permission to request, launch, terminate, + // and tag instances on your behalf. For more information, see Spot Fleet Prerequisites + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html#spot-fleet-prerequisites) + // in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate + // Spot Instances on your behalf when you cancel its Spot Fleet request using + // CancelSpotFleetRequests or when the Spot Fleet request expires, if you set + // TerminateInstancesWithExpiration. // // IamFleetRole is a required field IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` @@ -81790,11 +88946,13 @@ type SpotFleetRequestConfigData struct { InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` // The launch specifications for the Spot Fleet request. If you specify LaunchSpecifications, - // you can't specify LaunchTemplateConfigs. + // you can't specify LaunchTemplateConfigs. If you include On-Demand capacity + // in your request, you must use LaunchTemplateConfigs. LaunchSpecifications []*SpotFleetLaunchSpecification `locationName:"launchSpecifications" locationNameList:"item" type:"list"` // The launch template and overrides. If you specify LaunchTemplateConfigs, - // you can't specify LaunchSpecifications. + // you can't specify LaunchSpecifications. If you include On-Demand capacity + // in your request, you must use LaunchTemplateConfigs. LaunchTemplateConfigs []*LaunchTemplateConfig `locationName:"launchTemplateConfigs" locationNameList:"item" type:"list"` // One or more Classic Load Balancers and target groups to attach to the Spot @@ -81818,6 +88976,16 @@ type SpotFleetRequestConfigData struct { // target On-Demand capacity. OnDemandFulfilledCapacity *float64 `locationName:"onDemandFulfilledCapacity" type:"double"` + // The maximum amount per hour for On-Demand Instances that you're willing to + // pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice + // parameter, or both parameters to ensure that your fleet cost does not exceed + // your budget. If you set a maximum price per hour for the On-Demand Instances + // and Spot Instances in your request, Spot Fleet will launch instances until + // it reaches the maximum amount you're willing to pay. When the maximum amount + // you're willing to pay is reached, the fleet stops launching instances even + // if it hasn’t met the target capacity. + OnDemandMaxTotalPrice *string `locationName:"onDemandMaxTotalPrice" type:"string"` + // The number of On-Demand units to request. You can choose to set the target // capacity in terms of instances or a performance characteristic that is important // to your application workload, such as vCPUs, memory, or I/O. If the request @@ -81828,21 +88996,31 @@ type SpotFleetRequestConfigData struct { // Indicates whether Spot Fleet should replace unhealthy instances. ReplaceUnhealthyInstances *bool `locationName:"replaceUnhealthyInstances" type:"boolean"` + // The maximum amount per hour for Spot Instances that you're willing to pay. + // You can use the spotdMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, + // or both parameters to ensure that your fleet cost does not exceed your budget. + // If you set a maximum price per hour for the On-Demand Instances and Spot + // Instances in your request, Spot Fleet will launch instances until it reaches + // the maximum amount you're willing to pay. When the maximum amount you're + // willing to pay is reached, the fleet stops launching instances even if it + // hasn’t met the target capacity. + SpotMaxTotalPrice *string `locationName:"spotMaxTotalPrice" type:"string"` + // The maximum price per unit hour that you are willing to pay for a Spot Instance. // The default is the On-Demand price. SpotPrice *string `locationName:"spotPrice" type:"string"` - // The number of units to request. You can choose to set the target capacity - // in terms of instances or a performance characteristic that is important to - // your application workload, such as vCPUs, memory, or I/O. If the request - // type is maintain, you can specify a target capacity of 0 and add capacity - // later. + // The number of units to request for the Spot Fleet. You can choose to set + // the target capacity in terms of instances or a performance characteristic + // that is important to your application workload, such as vCPUs, memory, or + // I/O. If the request type is maintain, you can specify a target capacity of + // 0 and add capacity later. // // TargetCapacity is a required field TargetCapacity *int64 `locationName:"targetCapacity" type:"integer" required:"true"` - // Indicates whether running Spot Instances should be terminated when the Spot - // Fleet request expires. + // Indicates whether running Spot Instances are terminated when the Spot Fleet + // request expires. TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` // The type of request. Indicates whether the Spot Fleet only requests the target @@ -81855,14 +89033,14 @@ type SpotFleetRequestConfigData struct { // Default: maintain. instant is listed but is not used by Spot Fleet. Type *string `locationName:"type" type:"string" enum:"FleetType"` - // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). - // The default is to start fulfilling the request immediately. + // The start date and time of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // By default, Amazon EC2 starts fulfilling the request immediately. ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` - // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). - // At this point, no new Spot Instance requests are placed or able to fulfill - // the request. If no value is specified, the Spot Fleet request remains until - // you cancel it. + // The end date and time of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // After the end date and time, no new Spot Instance requests are placed or + // able to fulfill the request. If no value is specified, the Spot Fleet request + // remains until you cancel it. ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` } @@ -81979,6 +89157,12 @@ func (s *SpotFleetRequestConfigData) SetOnDemandFulfilledCapacity(v float64) *Sp return s } +// SetOnDemandMaxTotalPrice sets the OnDemandMaxTotalPrice field's value. +func (s *SpotFleetRequestConfigData) SetOnDemandMaxTotalPrice(v string) *SpotFleetRequestConfigData { + s.OnDemandMaxTotalPrice = &v + return s +} + // SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. func (s *SpotFleetRequestConfigData) SetOnDemandTargetCapacity(v int64) *SpotFleetRequestConfigData { s.OnDemandTargetCapacity = &v @@ -81991,6 +89175,12 @@ func (s *SpotFleetRequestConfigData) SetReplaceUnhealthyInstances(v bool) *SpotF return s } +// SetSpotMaxTotalPrice sets the SpotMaxTotalPrice field's value. +func (s *SpotFleetRequestConfigData) SetSpotMaxTotalPrice(v string) *SpotFleetRequestConfigData { + s.SpotMaxTotalPrice = &v + return s +} + // SetSpotPrice sets the SpotPrice field's value. func (s *SpotFleetRequestConfigData) SetSpotPrice(v string) *SpotFleetRequestConfigData { s.SpotPrice = &v @@ -82410,19 +89600,33 @@ func (s *SpotMarketOptions) SetValidUntil(v time.Time) *SpotMarketOptions { type SpotOptions struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowest-price. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowest-price, EC2 Fleet launches instances + // from the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacity-optimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"SpotInstanceInterruptionBehavior"` // The number of Spot pools across which to allocate your target Spot capacity. - // Valid only when AllocationStrategy is set to lowestPrice. EC2 Fleet selects + // Valid only when AllocationStrategy is set to lowest-price. EC2 Fleet selects // the cheapest Spot pools and evenly allocates your target Spot capacity across // the number of Spot pools that you specify. InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` + // The maximum amount per hour for Spot Instances that you're willing to pay. + MaxTotalPrice *string `locationName:"maxTotalPrice" type:"string"` + // The minimum target capacity for Spot Instances in the fleet. If the minimum // target capacity is not reached, the fleet launches no instances. MinTargetCapacity *int64 `locationName:"minTargetCapacity" type:"integer"` @@ -82464,6 +89668,12 @@ func (s *SpotOptions) SetInstancePoolsToUseCount(v int64) *SpotOptions { return s } +// SetMaxTotalPrice sets the MaxTotalPrice field's value. +func (s *SpotOptions) SetMaxTotalPrice(v string) *SpotOptions { + s.MaxTotalPrice = &v + return s +} + // SetMinTargetCapacity sets the MinTargetCapacity field's value. func (s *SpotOptions) SetMinTargetCapacity(v int64) *SpotOptions { s.MinTargetCapacity = &v @@ -82486,8 +89696,19 @@ func (s *SpotOptions) SetSingleInstanceType(v bool) *SpotOptions { type SpotOptionsRequest struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowestPrice. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowest-price, EC2 Fleet launches instances + // from the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacity-optimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -82499,6 +89720,9 @@ type SpotOptionsRequest struct { // across the number of Spot pools that you specify. InstancePoolsToUseCount *int64 `type:"integer"` + // The maximum amount per hour for Spot Instances that you're willing to pay. + MaxTotalPrice *string `type:"string"` + // The minimum target capacity for Spot Instances in the fleet. If the minimum // target capacity is not reached, the fleet launches no instances. MinTargetCapacity *int64 `type:"integer"` @@ -82540,6 +89764,12 @@ func (s *SpotOptionsRequest) SetInstancePoolsToUseCount(v int64) *SpotOptionsReq return s } +// SetMaxTotalPrice sets the MaxTotalPrice field's value. +func (s *SpotOptionsRequest) SetMaxTotalPrice(v string) *SpotOptionsRequest { + s.MaxTotalPrice = &v + return s +} + // SetMinTargetCapacity sets the MinTargetCapacity field's value. func (s *SpotOptionsRequest) SetMinTargetCapacity(v int64) *SpotOptionsRequest { s.MinTargetCapacity = &v @@ -83364,6 +90594,30 @@ func (s *SuccessfulInstanceCreditSpecificationItem) SetInstanceId(v string) *Suc return s } +// Describes a Reserved Instance whose queued purchase was successfully deleted. +type SuccessfulQueuedPurchaseDeletion struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s SuccessfulQueuedPurchaseDeletion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuccessfulQueuedPurchaseDeletion) GoString() string { + return s.String() +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *SuccessfulQueuedPurchaseDeletion) SetReservedInstancesId(v string) *SuccessfulQueuedPurchaseDeletion { + s.ReservedInstancesId = &v + return s +} + // Describes a tag. type Tag struct { _ struct{} `type:"structure"` @@ -83459,8 +90713,12 @@ type TagSpecification struct { _ struct{} `type:"structure"` // The type of resource to tag. Currently, the resource types that support tagging - // on creation are fleet, dedicated-host, instance, snapshot, and volume. To - // tag a resource after it has been created, see CreateTags. + // on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host + // | fleet | fpga-image | instance | launch-template | snapshot | traffic-mirror-filter + // | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment + // | transit-gateway-route-table | volume. + // + // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` // The tags to apply to the resource. @@ -83494,16 +90752,27 @@ func (s *TagSpecification) SetTags(v []*Tag) *TagSpecification { // your application workload, such as vCPUs, memory, or I/O. If the request // type is maintain, you can specify a target capacity of 0 and add capacity // later. +// +// You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance +// MaxTotalPrice, or both to ensure your fleet cost does not exceed your budget. +// If you set a maximum price per hour for the On-Demand Instances and Spot +// Instances in your request, EC2 Fleet will launch instances until it reaches +// the maximum amount you're willing to pay. When the maximum amount you're +// willing to pay is reached, the fleet stops launching instances even if it +// hasn’t met the target capacity. The MaxTotalPrice parameters are located +// in and type TargetCapacitySpecification struct { _ struct{} `type:"structure"` // The default TotalTargetCapacity, which is either Spot or On-Demand. DefaultTargetCapacityType *string `locationName:"defaultTargetCapacityType" type:"string" enum:"DefaultTargetCapacityType"` - // The number of On-Demand units to request. + // The number of On-Demand units to request. If you specify a target capacity + // for Spot units, you cannot specify a target capacity for On-Demand units. OnDemandTargetCapacity *int64 `locationName:"onDemandTargetCapacity" type:"integer"` - // The maximum number of Spot units to launch. + // The maximum number of Spot units to launch. If you specify a target capacity + // for On-Demand units, you cannot specify a target capacity for Spot units. SpotTargetCapacity *int64 `locationName:"spotTargetCapacity" type:"integer"` // The number of units to request, filled using DefaultTargetCapacityType. @@ -83545,10 +90814,19 @@ func (s *TargetCapacitySpecification) SetTotalTargetCapacity(v int64) *TargetCap } // The number of units to request. You can choose to set the target capacity -// in terms of instances or a performance characteristic that is important to -// your application workload, such as vCPUs, memory, or I/O. If the request -// type is maintain, you can specify a target capacity of 0 and add capacity -// later. +// as the number of instances. Or you can set the target capacity to a performance +// characteristic that is important to your application workload, such as vCPUs, +// memory, or I/O. If the request type is maintain, you can specify a target +// capacity of 0 and add capacity later. +// +// You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance +// MaxTotalPrice parameter, or both parameters to ensure that your fleet cost +// does not exceed your budget. If you set a maximum price per hour for the +// On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch +// instances until it reaches the maximum amount you're willing to pay. When +// the maximum amount you're willing to pay is reached, the fleet stops launching +// instances even if it hasn’t met the target capacity. The MaxTotalPrice +// parameters are located in and . type TargetCapacitySpecificationRequest struct { _ struct{} `type:"structure"` @@ -84093,6 +91371,452 @@ func (s *TerminateInstancesOutput) SetTerminatingInstances(v []*InstanceStateCha return s } +// Describes the Traffic Mirror filter. +type TrafficMirrorFilter struct { + _ struct{} `type:"structure"` + + // The description of the Traffic Mirror filter. + Description *string `locationName:"description" type:"string"` + + // Information about the egress rules that are associated with the Traffic Mirror + // filter. + EgressFilterRules []*TrafficMirrorFilterRule `locationName:"egressFilterRuleSet" locationNameList:"item" type:"list"` + + // Information about the ingress rules that are associated with the Traffic + // Mirror filter. + IngressFilterRules []*TrafficMirrorFilterRule `locationName:"ingressFilterRuleSet" locationNameList:"item" type:"list"` + + // The network service traffic that is associated with the Traffic Mirror filter. + NetworkServices []*string `locationName:"networkServiceSet" locationNameList:"item" type:"list"` + + // The tags assigned to the Traffic Mirror filter. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterId *string `locationName:"trafficMirrorFilterId" type:"string"` +} + +// String returns the string representation +func (s TrafficMirrorFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorFilter) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TrafficMirrorFilter) SetDescription(v string) *TrafficMirrorFilter { + s.Description = &v + return s +} + +// SetEgressFilterRules sets the EgressFilterRules field's value. +func (s *TrafficMirrorFilter) SetEgressFilterRules(v []*TrafficMirrorFilterRule) *TrafficMirrorFilter { + s.EgressFilterRules = v + return s +} + +// SetIngressFilterRules sets the IngressFilterRules field's value. +func (s *TrafficMirrorFilter) SetIngressFilterRules(v []*TrafficMirrorFilterRule) *TrafficMirrorFilter { + s.IngressFilterRules = v + return s +} + +// SetNetworkServices sets the NetworkServices field's value. +func (s *TrafficMirrorFilter) SetNetworkServices(v []*string) *TrafficMirrorFilter { + s.NetworkServices = v + return s +} + +// SetTags sets the Tags field's value. +func (s *TrafficMirrorFilter) SetTags(v []*Tag) *TrafficMirrorFilter { + s.Tags = v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *TrafficMirrorFilter) SetTrafficMirrorFilterId(v string) *TrafficMirrorFilter { + s.TrafficMirrorFilterId = &v + return s +} + +// Describes the Traffic Mirror rule. +type TrafficMirrorFilterRule struct { + _ struct{} `type:"structure"` + + // The description of the Traffic Mirror rule. + Description *string `locationName:"description" type:"string"` + + // The destination CIDR block assigned to the Traffic Mirror rule. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The destination port range assigned to the Traffic Mirror rule. + DestinationPortRange *TrafficMirrorPortRange `locationName:"destinationPortRange" type:"structure"` + + // The protocol assigned to the Traffic Mirror rule. + Protocol *int64 `locationName:"protocol" type:"integer"` + + // The action assigned to the Traffic Mirror rule. + RuleAction *string `locationName:"ruleAction" type:"string" enum:"TrafficMirrorRuleAction"` + + // The rule number of the Traffic Mirror rule. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer"` + + // The source CIDR block assigned to the Traffic Mirror rule. + SourceCidrBlock *string `locationName:"sourceCidrBlock" type:"string"` + + // The source port range assigned to the Traffic Mirror rule. + SourcePortRange *TrafficMirrorPortRange `locationName:"sourcePortRange" type:"structure"` + + // The traffic direction assigned to the Traffic Mirror rule. + TrafficDirection *string `locationName:"trafficDirection" type:"string" enum:"TrafficDirection"` + + // The ID of the Traffic Mirror filter that the rule is associated with. + TrafficMirrorFilterId *string `locationName:"trafficMirrorFilterId" type:"string"` + + // The ID of the Traffic Mirror rule. + TrafficMirrorFilterRuleId *string `locationName:"trafficMirrorFilterRuleId" type:"string"` +} + +// String returns the string representation +func (s TrafficMirrorFilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorFilterRule) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TrafficMirrorFilterRule) SetDescription(v string) *TrafficMirrorFilterRule { + s.Description = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *TrafficMirrorFilterRule) SetDestinationCidrBlock(v string) *TrafficMirrorFilterRule { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationPortRange sets the DestinationPortRange field's value. +func (s *TrafficMirrorFilterRule) SetDestinationPortRange(v *TrafficMirrorPortRange) *TrafficMirrorFilterRule { + s.DestinationPortRange = v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *TrafficMirrorFilterRule) SetProtocol(v int64) *TrafficMirrorFilterRule { + s.Protocol = &v + return s +} + +// SetRuleAction sets the RuleAction field's value. +func (s *TrafficMirrorFilterRule) SetRuleAction(v string) *TrafficMirrorFilterRule { + s.RuleAction = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *TrafficMirrorFilterRule) SetRuleNumber(v int64) *TrafficMirrorFilterRule { + s.RuleNumber = &v + return s +} + +// SetSourceCidrBlock sets the SourceCidrBlock field's value. +func (s *TrafficMirrorFilterRule) SetSourceCidrBlock(v string) *TrafficMirrorFilterRule { + s.SourceCidrBlock = &v + return s +} + +// SetSourcePortRange sets the SourcePortRange field's value. +func (s *TrafficMirrorFilterRule) SetSourcePortRange(v *TrafficMirrorPortRange) *TrafficMirrorFilterRule { + s.SourcePortRange = v + return s +} + +// SetTrafficDirection sets the TrafficDirection field's value. +func (s *TrafficMirrorFilterRule) SetTrafficDirection(v string) *TrafficMirrorFilterRule { + s.TrafficDirection = &v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *TrafficMirrorFilterRule) SetTrafficMirrorFilterId(v string) *TrafficMirrorFilterRule { + s.TrafficMirrorFilterId = &v + return s +} + +// SetTrafficMirrorFilterRuleId sets the TrafficMirrorFilterRuleId field's value. +func (s *TrafficMirrorFilterRule) SetTrafficMirrorFilterRuleId(v string) *TrafficMirrorFilterRule { + s.TrafficMirrorFilterRuleId = &v + return s +} + +// Describes the Traffic Mirror port range. +type TrafficMirrorPortRange struct { + _ struct{} `type:"structure"` + + // The start of the Traffic Mirror port range. This applies to the TCP and UDP + // protocols. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The end of the Traffic Mirror port range. This applies to the TCP and UDP + // protocols. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s TrafficMirrorPortRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorPortRange) GoString() string { + return s.String() +} + +// SetFromPort sets the FromPort field's value. +func (s *TrafficMirrorPortRange) SetFromPort(v int64) *TrafficMirrorPortRange { + s.FromPort = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *TrafficMirrorPortRange) SetToPort(v int64) *TrafficMirrorPortRange { + s.ToPort = &v + return s +} + +// Information about the Traffic Mirror filter rule port range. +type TrafficMirrorPortRangeRequest struct { + _ struct{} `type:"structure"` + + // The first port in the Traffic Mirror port range. This applies to the TCP + // and UDP protocols. + FromPort *int64 `type:"integer"` + + // The last port in the Traffic Mirror port range. This applies to the TCP and + // UDP protocols. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s TrafficMirrorPortRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorPortRangeRequest) GoString() string { + return s.String() +} + +// SetFromPort sets the FromPort field's value. +func (s *TrafficMirrorPortRangeRequest) SetFromPort(v int64) *TrafficMirrorPortRangeRequest { + s.FromPort = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *TrafficMirrorPortRangeRequest) SetToPort(v int64) *TrafficMirrorPortRangeRequest { + s.ToPort = &v + return s +} + +// Describes a Traffic Mirror session. +type TrafficMirrorSession struct { + _ struct{} `type:"structure"` + + // The description of the Traffic Mirror session. + Description *string `locationName:"description" type:"string"` + + // The ID of the Traffic Mirror session's network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the account that owns the Traffic Mirror session. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The number of bytes in each packet to mirror. These are the bytes after the + // VXLAN header. To mirror a subset, set this to the length (in bytes) to mirror. + // For example, if you set this value to 100, then the first 100 bytes that + // meet the filter criteria are copied to the target. Do not specify this parameter + // when you want to mirror the entire packet + PacketLength *int64 `locationName:"packetLength" type:"integer"` + + // The session number determines the order in which sessions are evaluated when + // an interface is used by multiple sessions. The first session with a matching + // filter is the one that mirrors the packets. + // + // Valid values are 1-32766. + SessionNumber *int64 `locationName:"sessionNumber" type:"integer"` + + // The tags assigned to the Traffic Mirror session. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterId *string `locationName:"trafficMirrorFilterId" type:"string"` + + // The ID for the Traffic Mirror session. + TrafficMirrorSessionId *string `locationName:"trafficMirrorSessionId" type:"string"` + + // The ID of the Traffic Mirror target. + TrafficMirrorTargetId *string `locationName:"trafficMirrorTargetId" type:"string"` + + // The virtual network ID associated with the Traffic Mirror session. + VirtualNetworkId *int64 `locationName:"virtualNetworkId" type:"integer"` +} + +// String returns the string representation +func (s TrafficMirrorSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorSession) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TrafficMirrorSession) SetDescription(v string) *TrafficMirrorSession { + s.Description = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *TrafficMirrorSession) SetNetworkInterfaceId(v string) *TrafficMirrorSession { + s.NetworkInterfaceId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *TrafficMirrorSession) SetOwnerId(v string) *TrafficMirrorSession { + s.OwnerId = &v + return s +} + +// SetPacketLength sets the PacketLength field's value. +func (s *TrafficMirrorSession) SetPacketLength(v int64) *TrafficMirrorSession { + s.PacketLength = &v + return s +} + +// SetSessionNumber sets the SessionNumber field's value. +func (s *TrafficMirrorSession) SetSessionNumber(v int64) *TrafficMirrorSession { + s.SessionNumber = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TrafficMirrorSession) SetTags(v []*Tag) *TrafficMirrorSession { + s.Tags = v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *TrafficMirrorSession) SetTrafficMirrorFilterId(v string) *TrafficMirrorSession { + s.TrafficMirrorFilterId = &v + return s +} + +// SetTrafficMirrorSessionId sets the TrafficMirrorSessionId field's value. +func (s *TrafficMirrorSession) SetTrafficMirrorSessionId(v string) *TrafficMirrorSession { + s.TrafficMirrorSessionId = &v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *TrafficMirrorSession) SetTrafficMirrorTargetId(v string) *TrafficMirrorSession { + s.TrafficMirrorTargetId = &v + return s +} + +// SetVirtualNetworkId sets the VirtualNetworkId field's value. +func (s *TrafficMirrorSession) SetVirtualNetworkId(v int64) *TrafficMirrorSession { + s.VirtualNetworkId = &v + return s +} + +// Describes a Traffic Mirror target. +type TrafficMirrorTarget struct { + _ struct{} `type:"structure"` + + // Information about the Traffic Mirror target. + Description *string `locationName:"description" type:"string"` + + // The network interface ID that is attached to the target. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The Amazon Resource Name (ARN) of the Network Load Balancer. + NetworkLoadBalancerArn *string `locationName:"networkLoadBalancerArn" type:"string"` + + // The ID of the account that owns the Traffic Mirror target. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The tags assigned to the Traffic Mirror target. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror target. + TrafficMirrorTargetId *string `locationName:"trafficMirrorTargetId" type:"string"` + + // The type of Traffic Mirror target. + Type *string `locationName:"type" type:"string" enum:"TrafficMirrorTargetType"` +} + +// String returns the string representation +func (s TrafficMirrorTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorTarget) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TrafficMirrorTarget) SetDescription(v string) *TrafficMirrorTarget { + s.Description = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *TrafficMirrorTarget) SetNetworkInterfaceId(v string) *TrafficMirrorTarget { + s.NetworkInterfaceId = &v + return s +} + +// SetNetworkLoadBalancerArn sets the NetworkLoadBalancerArn field's value. +func (s *TrafficMirrorTarget) SetNetworkLoadBalancerArn(v string) *TrafficMirrorTarget { + s.NetworkLoadBalancerArn = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *TrafficMirrorTarget) SetOwnerId(v string) *TrafficMirrorTarget { + s.OwnerId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TrafficMirrorTarget) SetTags(v []*Tag) *TrafficMirrorTarget { + s.Tags = v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *TrafficMirrorTarget) SetTrafficMirrorTargetId(v string) *TrafficMirrorTarget { + s.TrafficMirrorTargetId = &v + return s +} + +// SetType sets the Type field's value. +func (s *TrafficMirrorTarget) SetType(v string) *TrafficMirrorTarget { + s.Type = &v + return s +} + // Describes a transit gateway. type TransitGateway struct { _ struct{} `type:"structure"` @@ -85041,6 +92765,170 @@ func (s *TransitGatewayVpcAttachmentOptions) SetIpv6Support(v string) *TransitGa return s } +// The VPN tunnel options. +type TunnelOption struct { + _ struct{} `type:"structure"` + + // The number of seconds after which a DPD timeout occurs. + DpdTimeoutSeconds *int64 `locationName:"dpdTimeoutSeconds" type:"integer"` + + // The IKE versions that are permitted for the VPN tunnel. + IkeVersions []*IKEVersionsListValue `locationName:"ikeVersionSet" locationNameList:"item" type:"list"` + + // The external IP address of the VPN tunnel. + OutsideIpAddress *string `locationName:"outsideIpAddress" type:"string"` + + // The permitted Diffie-Hellman group numbers for the VPN tunnel for phase 1 + // IKE negotiations. + Phase1DHGroupNumbers []*Phase1DHGroupNumbersListValue `locationName:"phase1DHGroupNumberSet" locationNameList:"item" type:"list"` + + // The permitted encryption algorithms for the VPN tunnel for phase 1 IKE negotiations. + Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsListValue `locationName:"phase1EncryptionAlgorithmSet" locationNameList:"item" type:"list"` + + // The permitted integrity algorithms for the VPN tunnel for phase 1 IKE negotiations. + Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsListValue `locationName:"phase1IntegrityAlgorithmSet" locationNameList:"item" type:"list"` + + // The lifetime for phase 1 of the IKE negotiation, in seconds. + Phase1LifetimeSeconds *int64 `locationName:"phase1LifetimeSeconds" type:"integer"` + + // The permitted Diffie-Hellman group numbers for the VPN tunnel for phase 2 + // IKE negotiations. + Phase2DHGroupNumbers []*Phase2DHGroupNumbersListValue `locationName:"phase2DHGroupNumberSet" locationNameList:"item" type:"list"` + + // The permitted encryption algorithms for the VPN tunnel for phase 2 IKE negotiations. + Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsListValue `locationName:"phase2EncryptionAlgorithmSet" locationNameList:"item" type:"list"` + + // The permitted integrity algorithms for the VPN tunnel for phase 2 IKE negotiations. + Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsListValue `locationName:"phase2IntegrityAlgorithmSet" locationNameList:"item" type:"list"` + + // The lifetime for phase 2 of the IKE negotiation, in seconds. + Phase2LifetimeSeconds *int64 `locationName:"phase2LifetimeSeconds" type:"integer"` + + // The pre-shared key (PSK) to establish initial authentication between the + // virtual private gateway and the customer gateway. + PreSharedKey *string `locationName:"preSharedKey" type:"string"` + + // The percentage of the rekey window determined by RekeyMarginTimeSeconds during + // which the rekey time is randomly selected. + RekeyFuzzPercentage *int64 `locationName:"rekeyFuzzPercentage" type:"integer"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during + // which the AWS side of the VPN connection performs an IKE rekey. + RekeyMarginTimeSeconds *int64 `locationName:"rekeyMarginTimeSeconds" type:"integer"` + + // The number of packets in an IKE replay window. + ReplayWindowSize *int64 `locationName:"replayWindowSize" type:"integer"` + + // The range of inside IP addresses for the tunnel. + TunnelInsideCidr *string `locationName:"tunnelInsideCidr" type:"string"` +} + +// String returns the string representation +func (s TunnelOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TunnelOption) GoString() string { + return s.String() +} + +// SetDpdTimeoutSeconds sets the DpdTimeoutSeconds field's value. +func (s *TunnelOption) SetDpdTimeoutSeconds(v int64) *TunnelOption { + s.DpdTimeoutSeconds = &v + return s +} + +// SetIkeVersions sets the IkeVersions field's value. +func (s *TunnelOption) SetIkeVersions(v []*IKEVersionsListValue) *TunnelOption { + s.IkeVersions = v + return s +} + +// SetOutsideIpAddress sets the OutsideIpAddress field's value. +func (s *TunnelOption) SetOutsideIpAddress(v string) *TunnelOption { + s.OutsideIpAddress = &v + return s +} + +// SetPhase1DHGroupNumbers sets the Phase1DHGroupNumbers field's value. +func (s *TunnelOption) SetPhase1DHGroupNumbers(v []*Phase1DHGroupNumbersListValue) *TunnelOption { + s.Phase1DHGroupNumbers = v + return s +} + +// SetPhase1EncryptionAlgorithms sets the Phase1EncryptionAlgorithms field's value. +func (s *TunnelOption) SetPhase1EncryptionAlgorithms(v []*Phase1EncryptionAlgorithmsListValue) *TunnelOption { + s.Phase1EncryptionAlgorithms = v + return s +} + +// SetPhase1IntegrityAlgorithms sets the Phase1IntegrityAlgorithms field's value. +func (s *TunnelOption) SetPhase1IntegrityAlgorithms(v []*Phase1IntegrityAlgorithmsListValue) *TunnelOption { + s.Phase1IntegrityAlgorithms = v + return s +} + +// SetPhase1LifetimeSeconds sets the Phase1LifetimeSeconds field's value. +func (s *TunnelOption) SetPhase1LifetimeSeconds(v int64) *TunnelOption { + s.Phase1LifetimeSeconds = &v + return s +} + +// SetPhase2DHGroupNumbers sets the Phase2DHGroupNumbers field's value. +func (s *TunnelOption) SetPhase2DHGroupNumbers(v []*Phase2DHGroupNumbersListValue) *TunnelOption { + s.Phase2DHGroupNumbers = v + return s +} + +// SetPhase2EncryptionAlgorithms sets the Phase2EncryptionAlgorithms field's value. +func (s *TunnelOption) SetPhase2EncryptionAlgorithms(v []*Phase2EncryptionAlgorithmsListValue) *TunnelOption { + s.Phase2EncryptionAlgorithms = v + return s +} + +// SetPhase2IntegrityAlgorithms sets the Phase2IntegrityAlgorithms field's value. +func (s *TunnelOption) SetPhase2IntegrityAlgorithms(v []*Phase2IntegrityAlgorithmsListValue) *TunnelOption { + s.Phase2IntegrityAlgorithms = v + return s +} + +// SetPhase2LifetimeSeconds sets the Phase2LifetimeSeconds field's value. +func (s *TunnelOption) SetPhase2LifetimeSeconds(v int64) *TunnelOption { + s.Phase2LifetimeSeconds = &v + return s +} + +// SetPreSharedKey sets the PreSharedKey field's value. +func (s *TunnelOption) SetPreSharedKey(v string) *TunnelOption { + s.PreSharedKey = &v + return s +} + +// SetRekeyFuzzPercentage sets the RekeyFuzzPercentage field's value. +func (s *TunnelOption) SetRekeyFuzzPercentage(v int64) *TunnelOption { + s.RekeyFuzzPercentage = &v + return s +} + +// SetRekeyMarginTimeSeconds sets the RekeyMarginTimeSeconds field's value. +func (s *TunnelOption) SetRekeyMarginTimeSeconds(v int64) *TunnelOption { + s.RekeyMarginTimeSeconds = &v + return s +} + +// SetReplayWindowSize sets the ReplayWindowSize field's value. +func (s *TunnelOption) SetReplayWindowSize(v int64) *TunnelOption { + s.ReplayWindowSize = &v + return s +} + +// SetTunnelInsideCidr sets the TunnelInsideCidr field's value. +func (s *TunnelOption) SetTunnelInsideCidr(v string) *TunnelOption { + s.TunnelInsideCidr = &v + return s +} + type UnassignIpv6AddressesInput struct { _ struct{} `type:"structure"` @@ -85658,7 +93546,7 @@ func (s *UserBucketDetails) SetS3Key(v string) *UserBucketDetails { // Describes the user data for an instance. type UserData struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" sensitive:"true"` // The user data. If you are using an AWS SDK or command line tool, Base64-encoding // is performed for you, and you can load the text from a file. Otherwise, you @@ -85783,6 +93671,9 @@ type VgwTelemetry struct { // The number of accepted routes. AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` + // The Amazon Resource Name (ARN) of the VPN tunnel endpoint certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + // The date and time of the last change in status. LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp"` @@ -85813,6 +93704,12 @@ func (s *VgwTelemetry) SetAcceptedRouteCount(v int64) *VgwTelemetry { return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *VgwTelemetry) SetCertificateArn(v string) *VgwTelemetry { + s.CertificateArn = &v + return s +} + // SetLastStatusChange sets the LastStatusChange field's value. func (s *VgwTelemetry) SetLastStatusChange(v time.Time) *VgwTelemetry { s.LastStatusChange = &v @@ -85850,7 +93747,7 @@ type Volume struct { // The time stamp when volume creation was initiated. CreateTime *time.Time `locationName:"createTime" type:"timestamp"` - // Indicates whether the volume will be encrypted. + // Indicates whether the volume is encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` // The number of I/O operations per second (IOPS) that the volume supports. @@ -85870,8 +93767,9 @@ type Volume struct { // it is not used in requests to create gp2, st1, sc1, or standard volumes. Iops *int64 `locationName:"iops" type:"integer"` - // The full ARN of the AWS Key Management Service (AWS KMS) customer master - // key (CMK) that was used to protect the volume encryption key for the volume. + // The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) + // customer master key (CMK) that was used to protect the volume encryption + // key for the volume. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` // The size of the volume, in GiBs. @@ -86721,6 +94619,9 @@ type VpcEndpoint struct { // (Interface endpoint) One or more network interfaces for the endpoint. NetworkInterfaceIds []*string `locationName:"networkInterfaceIdSet" locationNameList:"item" type:"list"` + // The ID of the AWS account that owns the VPC endpoint. + OwnerId *string `locationName:"ownerId" type:"string"` + // The policy document associated with the endpoint, if applicable. PolicyDocument *string `locationName:"policyDocument" type:"string"` @@ -86743,6 +94644,9 @@ type VpcEndpoint struct { // (Interface endpoint) One or more subnets in which the endpoint is located. SubnetIds []*string `locationName:"subnetIdSet" locationNameList:"item" type:"list"` + // Any tags assigned to the VPC endpoint. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + // The ID of the VPC endpoint. VpcEndpointId *string `locationName:"vpcEndpointId" type:"string"` @@ -86787,6 +94691,12 @@ func (s *VpcEndpoint) SetNetworkInterfaceIds(v []*string) *VpcEndpoint { return s } +// SetOwnerId sets the OwnerId field's value. +func (s *VpcEndpoint) SetOwnerId(v string) *VpcEndpoint { + s.OwnerId = &v + return s +} + // SetPolicyDocument sets the PolicyDocument field's value. func (s *VpcEndpoint) SetPolicyDocument(v string) *VpcEndpoint { s.PolicyDocument = &v @@ -86829,6 +94739,12 @@ func (s *VpcEndpoint) SetSubnetIds(v []*string) *VpcEndpoint { return s } +// SetTags sets the Tags field's value. +func (s *VpcEndpoint) SetTags(v []*Tag) *VpcEndpoint { + s.Tags = v + return s +} + // SetVpcEndpointId sets the VpcEndpointId field's value. func (s *VpcEndpoint) SetVpcEndpointId(v string) *VpcEndpoint { s.VpcEndpointId = &v @@ -86854,6 +94770,12 @@ type VpcEndpointConnection struct { // The date and time the VPC endpoint was created. CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp"` + // The DNS entries for the VPC endpoint. + DnsEntries []*DnsEntry `locationName:"dnsEntrySet" locationNameList:"item" type:"list"` + + // The Amazon Resource Names (ARNs) of the network load balancers for the service. + NetworkLoadBalancerArns []*string `locationName:"networkLoadBalancerArnSet" locationNameList:"item" type:"list"` + // The ID of the service to which the endpoint is connected. ServiceId *string `locationName:"serviceId" type:"string"` @@ -86883,6 +94805,18 @@ func (s *VpcEndpointConnection) SetCreationTimestamp(v time.Time) *VpcEndpointCo return s } +// SetDnsEntries sets the DnsEntries field's value. +func (s *VpcEndpointConnection) SetDnsEntries(v []*DnsEntry) *VpcEndpointConnection { + s.DnsEntries = v + return s +} + +// SetNetworkLoadBalancerArns sets the NetworkLoadBalancerArns field's value. +func (s *VpcEndpointConnection) SetNetworkLoadBalancerArns(v []*string) *VpcEndpointConnection { + s.NetworkLoadBalancerArns = v + return s +} + // SetServiceId sets the ServiceId field's value. func (s *VpcEndpointConnection) SetServiceId(v string) *VpcEndpointConnection { s.ServiceId = &v @@ -87118,7 +95052,7 @@ type VpcPeeringConnectionVpcInfo struct { // requester VPC. PeeringOptions *VpcPeeringConnectionOptionsDescription `locationName:"peeringOptions" type:"structure"` - // The region in which the VPC is located. + // The Region in which the VPC is located. Region *string `locationName:"region" type:"string"` // The ID of the VPC. @@ -87311,6 +95245,9 @@ type VpnConnectionOptions struct { // Indicates whether the VPN connection uses static routes only. Static routes // must be used for devices that don't support BGP. StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + + // Indicates the VPN tunnel options. + TunnelOptions []*TunnelOption `locationName:"tunnelOptionSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -87329,6 +95266,12 @@ func (s *VpnConnectionOptions) SetStaticRoutesOnly(v bool) *VpnConnectionOptions return s } +// SetTunnelOptions sets the TunnelOptions field's value. +func (s *VpnConnectionOptions) SetTunnelOptions(v []*TunnelOption) *VpnConnectionOptions { + s.TunnelOptions = v + return s +} + // Describes VPN connection options. type VpnConnectionOptionsSpecification struct { _ struct{} `type:"structure"` @@ -87341,7 +95284,7 @@ type VpnConnectionOptionsSpecification struct { StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` // The tunnel options for the VPN connection. - TunnelOptions []*VpnTunnelOptionsSpecification `locationNameList:"item" type:"list"` + TunnelOptions []*VpnTunnelOptionsSpecification `type:"list"` } // String returns the string representation @@ -87491,13 +95434,101 @@ func (s *VpnStaticRoute) SetState(v string) *VpnStaticRoute { type VpnTunnelOptionsSpecification struct { _ struct{} `type:"structure"` + // The number of seconds after which a DPD timeout occurs. + // + // Constraints: A value between 0 and 30. + // + // Default: 30 + DPDTimeoutSeconds *int64 `type:"integer"` + + // The IKE versions that are permitted for the VPN tunnel. + // + // Valid values: ikev1 | ikev2 + IKEVersions []*IKEVersionsRequestListValue `locationName:"IKEVersion" locationNameList:"item" type:"list"` + + // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel + // for phase 1 IKE negotiations. + // + // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + Phase1DHGroupNumbers []*Phase1DHGroupNumbersRequestListValue `locationName:"Phase1DHGroupNumber" locationNameList:"item" type:"list"` + + // One or more encryption algorithms that are permitted for the VPN tunnel for + // phase 1 IKE negotiations. + // + // Valid values: AES128 | AES256 + Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsRequestListValue `locationName:"Phase1EncryptionAlgorithm" locationNameList:"item" type:"list"` + + // One or more integrity algorithms that are permitted for the VPN tunnel for + // phase 1 IKE negotiations. + // + // Valid values: SHA1 | SHA2-256 + Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsRequestListValue `locationName:"Phase1IntegrityAlgorithm" locationNameList:"item" type:"list"` + + // The lifetime for phase 1 of the IKE negotiation, in seconds. + // + // Constraints: A value between 900 and 28,800. + // + // Default: 28800 + Phase1LifetimeSeconds *int64 `type:"integer"` + + // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel + // for phase 2 IKE negotiations. + // + // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + Phase2DHGroupNumbers []*Phase2DHGroupNumbersRequestListValue `locationName:"Phase2DHGroupNumber" locationNameList:"item" type:"list"` + + // One or more encryption algorithms that are permitted for the VPN tunnel for + // phase 2 IKE negotiations. + // + // Valid values: AES128 | AES256 + Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsRequestListValue `locationName:"Phase2EncryptionAlgorithm" locationNameList:"item" type:"list"` + + // One or more integrity algorithms that are permitted for the VPN tunnel for + // phase 2 IKE negotiations. + // + // Valid values: SHA1 | SHA2-256 + Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsRequestListValue `locationName:"Phase2IntegrityAlgorithm" locationNameList:"item" type:"list"` + + // The lifetime for phase 2 of the IKE negotiation, in seconds. + // + // Constraints: A value between 900 and 3,600. The value must be less than the + // value for Phase1LifetimeSeconds. + // + // Default: 3600 + Phase2LifetimeSeconds *int64 `type:"integer"` + // The pre-shared key (PSK) to establish initial authentication between the // virtual private gateway and customer gateway. // - // Constraints: Allowed characters are alphanumeric characters and ._. Must - // be between 8 and 64 characters in length and cannot start with zero (0). + // Constraints: Allowed characters are alphanumeric characters, periods (.), + // and underscores (_). Must be between 8 and 64 characters in length and cannot + // start with zero (0). PreSharedKey *string `type:"string"` + // The percentage of the rekey window (determined by RekeyMarginTimeSeconds) + // during which the rekey time is randomly selected. + // + // Constraints: A value between 0 and 100. + // + // Default: 100 + RekeyFuzzPercentage *int64 `type:"integer"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during + // which the AWS side of the VPN connection performs an IKE rekey. The exact + // time of the rekey is randomly selected based on the value for RekeyFuzzPercentage. + // + // Constraints: A value between 60 and half of Phase2LifetimeSeconds. + // + // Default: 540 + RekeyMarginTimeSeconds *int64 `type:"integer"` + + // The number of packets in an IKE replay window. + // + // Constraints: A value between 64 and 2048. + // + // Default: 1024 + ReplayWindowSize *int64 `type:"integer"` + // The range of inside IP addresses for the tunnel. Any specified CIDR blocks // must be unique across all VPN connections that use the same virtual private // gateway. @@ -87531,12 +95562,90 @@ func (s VpnTunnelOptionsSpecification) GoString() string { return s.String() } +// SetDPDTimeoutSeconds sets the DPDTimeoutSeconds field's value. +func (s *VpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *VpnTunnelOptionsSpecification { + s.DPDTimeoutSeconds = &v + return s +} + +// SetIKEVersions sets the IKEVersions field's value. +func (s *VpnTunnelOptionsSpecification) SetIKEVersions(v []*IKEVersionsRequestListValue) *VpnTunnelOptionsSpecification { + s.IKEVersions = v + return s +} + +// SetPhase1DHGroupNumbers sets the Phase1DHGroupNumbers field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase1DHGroupNumbers(v []*Phase1DHGroupNumbersRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase1DHGroupNumbers = v + return s +} + +// SetPhase1EncryptionAlgorithms sets the Phase1EncryptionAlgorithms field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase1EncryptionAlgorithms(v []*Phase1EncryptionAlgorithmsRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase1EncryptionAlgorithms = v + return s +} + +// SetPhase1IntegrityAlgorithms sets the Phase1IntegrityAlgorithms field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase1IntegrityAlgorithms(v []*Phase1IntegrityAlgorithmsRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase1IntegrityAlgorithms = v + return s +} + +// SetPhase1LifetimeSeconds sets the Phase1LifetimeSeconds field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase1LifetimeSeconds(v int64) *VpnTunnelOptionsSpecification { + s.Phase1LifetimeSeconds = &v + return s +} + +// SetPhase2DHGroupNumbers sets the Phase2DHGroupNumbers field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase2DHGroupNumbers(v []*Phase2DHGroupNumbersRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase2DHGroupNumbers = v + return s +} + +// SetPhase2EncryptionAlgorithms sets the Phase2EncryptionAlgorithms field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase2EncryptionAlgorithms(v []*Phase2EncryptionAlgorithmsRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase2EncryptionAlgorithms = v + return s +} + +// SetPhase2IntegrityAlgorithms sets the Phase2IntegrityAlgorithms field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase2IntegrityAlgorithms(v []*Phase2IntegrityAlgorithmsRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase2IntegrityAlgorithms = v + return s +} + +// SetPhase2LifetimeSeconds sets the Phase2LifetimeSeconds field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase2LifetimeSeconds(v int64) *VpnTunnelOptionsSpecification { + s.Phase2LifetimeSeconds = &v + return s +} + // SetPreSharedKey sets the PreSharedKey field's value. func (s *VpnTunnelOptionsSpecification) SetPreSharedKey(v string) *VpnTunnelOptionsSpecification { s.PreSharedKey = &v return s } +// SetRekeyFuzzPercentage sets the RekeyFuzzPercentage field's value. +func (s *VpnTunnelOptionsSpecification) SetRekeyFuzzPercentage(v int64) *VpnTunnelOptionsSpecification { + s.RekeyFuzzPercentage = &v + return s +} + +// SetRekeyMarginTimeSeconds sets the RekeyMarginTimeSeconds field's value. +func (s *VpnTunnelOptionsSpecification) SetRekeyMarginTimeSeconds(v int64) *VpnTunnelOptionsSpecification { + s.RekeyMarginTimeSeconds = &v + return s +} + +// SetReplayWindowSize sets the ReplayWindowSize field's value. +func (s *VpnTunnelOptionsSpecification) SetReplayWindowSize(v int64) *VpnTunnelOptionsSpecification { + s.ReplayWindowSize = &v + return s +} + // SetTunnelInsideCidr sets the TunnelInsideCidr field's value. func (s *VpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *VpnTunnelOptionsSpecification { s.TunnelInsideCidr = &v @@ -87661,6 +95770,9 @@ const ( // AllocationStateReleasedPermanentFailure is a AllocationState enum value AllocationStateReleasedPermanentFailure = "released-permanent-failure" + + // AllocationStatePending is a AllocationState enum value + AllocationStatePending = "pending" ) const ( @@ -87669,6 +95781,9 @@ const ( // AllocationStrategyDiversified is a AllocationStrategy enum value AllocationStrategyDiversified = "diversified" + + // AllocationStrategyCapacityOptimized is a AllocationStrategy enum value + AllocationStrategyCapacityOptimized = "capacityOptimized" ) const ( @@ -88020,6 +96135,11 @@ const ( ConversionTaskStateCompleted = "completed" ) +const ( + // CopyTagsFromSourceVolume is a CopyTagsFromSource enum value + CopyTagsFromSourceVolume = "volume" +) + const ( // CurrencyCodeValuesUsd is a CurrencyCodeValues enum value CurrencyCodeValuesUsd = "USD" @@ -88071,6 +96191,17 @@ const ( DeleteFleetErrorCodeUnexpectedError = "unexpectedError" ) +const ( + // DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid is a DeleteQueuedReservedInstancesErrorCode enum value + DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid = "reserved-instances-id-invalid" + + // DeleteQueuedReservedInstancesErrorCodeReservedInstancesNotInQueuedState is a DeleteQueuedReservedInstancesErrorCode enum value + DeleteQueuedReservedInstancesErrorCodeReservedInstancesNotInQueuedState = "reserved-instances-not-in-queued-state" + + // DeleteQueuedReservedInstancesErrorCodeUnexpectedError is a DeleteQueuedReservedInstancesErrorCode enum value + DeleteQueuedReservedInstancesErrorCodeUnexpectedError = "unexpected-error" +) + const ( // DeviceTypeEbs is a DeviceType enum value DeviceTypeEbs = "ebs" @@ -88153,6 +96284,9 @@ const ( // EventTypeError is a EventType enum value EventTypeError = "error" + + // EventTypeInformation is a EventType enum value + EventTypeInformation = "information" ) const ( @@ -88193,10 +96327,10 @@ const ( FleetActivityStatusError = "error" // FleetActivityStatusPendingFulfillment is a FleetActivityStatus enum value - FleetActivityStatusPendingFulfillment = "pending-fulfillment" + FleetActivityStatusPendingFulfillment = "pending_fulfillment" // FleetActivityStatusPendingTermination is a FleetActivityStatus enum value - FleetActivityStatusPendingTermination = "pending-termination" + FleetActivityStatusPendingTermination = "pending_termination" // FleetActivityStatusFulfilled is a FleetActivityStatus enum value FleetActivityStatusFulfilled = "fulfilled" @@ -88243,10 +96377,10 @@ const ( FleetStateCodeFailed = "failed" // FleetStateCodeDeletedRunning is a FleetStateCode enum value - FleetStateCodeDeletedRunning = "deleted-running" + FleetStateCodeDeletedRunning = "deleted_running" // FleetStateCodeDeletedTerminating is a FleetStateCode enum value - FleetStateCodeDeletedTerminating = "deleted-terminating" + FleetStateCodeDeletedTerminating = "deleted_terminating" // FleetStateCodeModifying is a FleetStateCode enum value FleetStateCodeModifying = "modifying" @@ -88307,6 +96441,14 @@ const ( GatewayTypeIpsec1 = "ipsec.1" ) +const ( + // HostRecoveryOn is a HostRecovery enum value + HostRecoveryOn = "on" + + // HostRecoveryOff is a HostRecovery enum value + HostRecoveryOff = "off" +) + const ( // HostTenancyDedicated is a HostTenancy enum value HostTenancyDedicated = "dedicated" @@ -88667,9 +96809,15 @@ const ( // InstanceTypeR54xlarge is a InstanceType enum value InstanceTypeR54xlarge = "r5.4xlarge" + // InstanceTypeR58xlarge is a InstanceType enum value + InstanceTypeR58xlarge = "r5.8xlarge" + // InstanceTypeR512xlarge is a InstanceType enum value InstanceTypeR512xlarge = "r5.12xlarge" + // InstanceTypeR516xlarge is a InstanceType enum value + InstanceTypeR516xlarge = "r5.16xlarge" + // InstanceTypeR524xlarge is a InstanceType enum value InstanceTypeR524xlarge = "r5.24xlarge" @@ -88688,9 +96836,15 @@ const ( // InstanceTypeR5a4xlarge is a InstanceType enum value InstanceTypeR5a4xlarge = "r5a.4xlarge" + // InstanceTypeR5a8xlarge is a InstanceType enum value + InstanceTypeR5a8xlarge = "r5a.8xlarge" + // InstanceTypeR5a12xlarge is a InstanceType enum value InstanceTypeR5a12xlarge = "r5a.12xlarge" + // InstanceTypeR5a16xlarge is a InstanceType enum value + InstanceTypeR5a16xlarge = "r5a.16xlarge" + // InstanceTypeR5a24xlarge is a InstanceType enum value InstanceTypeR5a24xlarge = "r5a.24xlarge" @@ -88706,9 +96860,15 @@ const ( // InstanceTypeR5d4xlarge is a InstanceType enum value InstanceTypeR5d4xlarge = "r5d.4xlarge" + // InstanceTypeR5d8xlarge is a InstanceType enum value + InstanceTypeR5d8xlarge = "r5d.8xlarge" + // InstanceTypeR5d12xlarge is a InstanceType enum value InstanceTypeR5d12xlarge = "r5d.12xlarge" + // InstanceTypeR5d16xlarge is a InstanceType enum value + InstanceTypeR5d16xlarge = "r5d.16xlarge" + // InstanceTypeR5d24xlarge is a InstanceType enum value InstanceTypeR5d24xlarge = "r5d.24xlarge" @@ -88796,6 +96956,30 @@ const ( // InstanceTypeI3Metal is a InstanceType enum value InstanceTypeI3Metal = "i3.metal" + // InstanceTypeI3enLarge is a InstanceType enum value + InstanceTypeI3enLarge = "i3en.large" + + // InstanceTypeI3enXlarge is a InstanceType enum value + InstanceTypeI3enXlarge = "i3en.xlarge" + + // InstanceTypeI3en2xlarge is a InstanceType enum value + InstanceTypeI3en2xlarge = "i3en.2xlarge" + + // InstanceTypeI3en3xlarge is a InstanceType enum value + InstanceTypeI3en3xlarge = "i3en.3xlarge" + + // InstanceTypeI3en6xlarge is a InstanceType enum value + InstanceTypeI3en6xlarge = "i3en.6xlarge" + + // InstanceTypeI3en12xlarge is a InstanceType enum value + InstanceTypeI3en12xlarge = "i3en.12xlarge" + + // InstanceTypeI3en24xlarge is a InstanceType enum value + InstanceTypeI3en24xlarge = "i3en.24xlarge" + + // InstanceTypeI3enMetal is a InstanceType enum value + InstanceTypeI3enMetal = "i3en.metal" + // InstanceTypeHi14xlarge is a InstanceType enum value InstanceTypeHi14xlarge = "hi1.4xlarge" @@ -88853,9 +97037,18 @@ const ( // InstanceTypeC59xlarge is a InstanceType enum value InstanceTypeC59xlarge = "c5.9xlarge" + // InstanceTypeC512xlarge is a InstanceType enum value + InstanceTypeC512xlarge = "c5.12xlarge" + // InstanceTypeC518xlarge is a InstanceType enum value InstanceTypeC518xlarge = "c5.18xlarge" + // InstanceTypeC524xlarge is a InstanceType enum value + InstanceTypeC524xlarge = "c5.24xlarge" + + // InstanceTypeC5Metal is a InstanceType enum value + InstanceTypeC5Metal = "c5.metal" + // InstanceTypeC5dLarge is a InstanceType enum value InstanceTypeC5dLarge = "c5d.large" @@ -88916,6 +97109,24 @@ const ( // InstanceTypeG3sXlarge is a InstanceType enum value InstanceTypeG3sXlarge = "g3s.xlarge" + // InstanceTypeG4dnXlarge is a InstanceType enum value + InstanceTypeG4dnXlarge = "g4dn.xlarge" + + // InstanceTypeG4dn2xlarge is a InstanceType enum value + InstanceTypeG4dn2xlarge = "g4dn.2xlarge" + + // InstanceTypeG4dn4xlarge is a InstanceType enum value + InstanceTypeG4dn4xlarge = "g4dn.4xlarge" + + // InstanceTypeG4dn8xlarge is a InstanceType enum value + InstanceTypeG4dn8xlarge = "g4dn.8xlarge" + + // InstanceTypeG4dn12xlarge is a InstanceType enum value + InstanceTypeG4dn12xlarge = "g4dn.12xlarge" + + // InstanceTypeG4dn16xlarge is a InstanceType enum value + InstanceTypeG4dn16xlarge = "g4dn.16xlarge" + // InstanceTypeCg14xlarge is a InstanceType enum value InstanceTypeCg14xlarge = "cg1.4xlarge" @@ -88973,9 +97184,15 @@ const ( // InstanceTypeM54xlarge is a InstanceType enum value InstanceTypeM54xlarge = "m5.4xlarge" + // InstanceTypeM58xlarge is a InstanceType enum value + InstanceTypeM58xlarge = "m5.8xlarge" + // InstanceTypeM512xlarge is a InstanceType enum value InstanceTypeM512xlarge = "m5.12xlarge" + // InstanceTypeM516xlarge is a InstanceType enum value + InstanceTypeM516xlarge = "m5.16xlarge" + // InstanceTypeM524xlarge is a InstanceType enum value InstanceTypeM524xlarge = "m5.24xlarge" @@ -88994,9 +97211,15 @@ const ( // InstanceTypeM5a4xlarge is a InstanceType enum value InstanceTypeM5a4xlarge = "m5a.4xlarge" + // InstanceTypeM5a8xlarge is a InstanceType enum value + InstanceTypeM5a8xlarge = "m5a.8xlarge" + // InstanceTypeM5a12xlarge is a InstanceType enum value InstanceTypeM5a12xlarge = "m5a.12xlarge" + // InstanceTypeM5a16xlarge is a InstanceType enum value + InstanceTypeM5a16xlarge = "m5a.16xlarge" + // InstanceTypeM5a24xlarge is a InstanceType enum value InstanceTypeM5a24xlarge = "m5a.24xlarge" @@ -89012,9 +97235,15 @@ const ( // InstanceTypeM5d4xlarge is a InstanceType enum value InstanceTypeM5d4xlarge = "m5d.4xlarge" + // InstanceTypeM5d8xlarge is a InstanceType enum value + InstanceTypeM5d8xlarge = "m5d.8xlarge" + // InstanceTypeM5d12xlarge is a InstanceType enum value InstanceTypeM5d12xlarge = "m5d.12xlarge" + // InstanceTypeM5d16xlarge is a InstanceType enum value + InstanceTypeM5d16xlarge = "m5d.16xlarge" + // InstanceTypeM5d24xlarge is a InstanceType enum value InstanceTypeM5d24xlarge = "m5d.24xlarge" @@ -89087,6 +97316,12 @@ const ( // InstanceTypeU12tb1Metal is a InstanceType enum value InstanceTypeU12tb1Metal = "u-12tb1.metal" + // InstanceTypeU18tb1Metal is a InstanceType enum value + InstanceTypeU18tb1Metal = "u-18tb1.metal" + + // InstanceTypeU24tb1Metal is a InstanceType enum value + InstanceTypeU24tb1Metal = "u-24tb1.metal" + // InstanceTypeA1Medium is a InstanceType enum value InstanceTypeA1Medium = "a1.medium" @@ -89101,6 +97336,105 @@ const ( // InstanceTypeA14xlarge is a InstanceType enum value InstanceTypeA14xlarge = "a1.4xlarge" + + // InstanceTypeA1Metal is a InstanceType enum value + InstanceTypeA1Metal = "a1.metal" + + // InstanceTypeM5dnLarge is a InstanceType enum value + InstanceTypeM5dnLarge = "m5dn.large" + + // InstanceTypeM5dnXlarge is a InstanceType enum value + InstanceTypeM5dnXlarge = "m5dn.xlarge" + + // InstanceTypeM5dn2xlarge is a InstanceType enum value + InstanceTypeM5dn2xlarge = "m5dn.2xlarge" + + // InstanceTypeM5dn4xlarge is a InstanceType enum value + InstanceTypeM5dn4xlarge = "m5dn.4xlarge" + + // InstanceTypeM5dn8xlarge is a InstanceType enum value + InstanceTypeM5dn8xlarge = "m5dn.8xlarge" + + // InstanceTypeM5dn12xlarge is a InstanceType enum value + InstanceTypeM5dn12xlarge = "m5dn.12xlarge" + + // InstanceTypeM5dn16xlarge is a InstanceType enum value + InstanceTypeM5dn16xlarge = "m5dn.16xlarge" + + // InstanceTypeM5dn24xlarge is a InstanceType enum value + InstanceTypeM5dn24xlarge = "m5dn.24xlarge" + + // InstanceTypeM5nLarge is a InstanceType enum value + InstanceTypeM5nLarge = "m5n.large" + + // InstanceTypeM5nXlarge is a InstanceType enum value + InstanceTypeM5nXlarge = "m5n.xlarge" + + // InstanceTypeM5n2xlarge is a InstanceType enum value + InstanceTypeM5n2xlarge = "m5n.2xlarge" + + // InstanceTypeM5n4xlarge is a InstanceType enum value + InstanceTypeM5n4xlarge = "m5n.4xlarge" + + // InstanceTypeM5n8xlarge is a InstanceType enum value + InstanceTypeM5n8xlarge = "m5n.8xlarge" + + // InstanceTypeM5n12xlarge is a InstanceType enum value + InstanceTypeM5n12xlarge = "m5n.12xlarge" + + // InstanceTypeM5n16xlarge is a InstanceType enum value + InstanceTypeM5n16xlarge = "m5n.16xlarge" + + // InstanceTypeM5n24xlarge is a InstanceType enum value + InstanceTypeM5n24xlarge = "m5n.24xlarge" + + // InstanceTypeR5dnLarge is a InstanceType enum value + InstanceTypeR5dnLarge = "r5dn.large" + + // InstanceTypeR5dnXlarge is a InstanceType enum value + InstanceTypeR5dnXlarge = "r5dn.xlarge" + + // InstanceTypeR5dn2xlarge is a InstanceType enum value + InstanceTypeR5dn2xlarge = "r5dn.2xlarge" + + // InstanceTypeR5dn4xlarge is a InstanceType enum value + InstanceTypeR5dn4xlarge = "r5dn.4xlarge" + + // InstanceTypeR5dn8xlarge is a InstanceType enum value + InstanceTypeR5dn8xlarge = "r5dn.8xlarge" + + // InstanceTypeR5dn12xlarge is a InstanceType enum value + InstanceTypeR5dn12xlarge = "r5dn.12xlarge" + + // InstanceTypeR5dn16xlarge is a InstanceType enum value + InstanceTypeR5dn16xlarge = "r5dn.16xlarge" + + // InstanceTypeR5dn24xlarge is a InstanceType enum value + InstanceTypeR5dn24xlarge = "r5dn.24xlarge" + + // InstanceTypeR5nLarge is a InstanceType enum value + InstanceTypeR5nLarge = "r5n.large" + + // InstanceTypeR5nXlarge is a InstanceType enum value + InstanceTypeR5nXlarge = "r5n.xlarge" + + // InstanceTypeR5n2xlarge is a InstanceType enum value + InstanceTypeR5n2xlarge = "r5n.2xlarge" + + // InstanceTypeR5n4xlarge is a InstanceType enum value + InstanceTypeR5n4xlarge = "r5n.4xlarge" + + // InstanceTypeR5n8xlarge is a InstanceType enum value + InstanceTypeR5n8xlarge = "r5n.8xlarge" + + // InstanceTypeR5n12xlarge is a InstanceType enum value + InstanceTypeR5n12xlarge = "r5n.12xlarge" + + // InstanceTypeR5n16xlarge is a InstanceType enum value + InstanceTypeR5n16xlarge = "r5n.16xlarge" + + // InstanceTypeR5n24xlarge is a InstanceType enum value + InstanceTypeR5n24xlarge = "r5n.24xlarge" ) const ( @@ -89480,6 +97814,12 @@ const ( // ReservedInstanceStateRetired is a ReservedInstanceState enum value ReservedInstanceStateRetired = "retired" + + // ReservedInstanceStateQueued is a ReservedInstanceState enum value + ReservedInstanceStateQueued = "queued" + + // ReservedInstanceStateQueuedDeleted is a ReservedInstanceState enum value + ReservedInstanceStateQueuedDeleted = "queued-deleted" ) const ( @@ -89556,6 +97896,15 @@ const ( // ResourceTypeSubnet is a ResourceType enum value ResourceTypeSubnet = "subnet" + // ResourceTypeTrafficMirrorFilter is a ResourceType enum value + ResourceTypeTrafficMirrorFilter = "traffic-mirror-filter" + + // ResourceTypeTrafficMirrorSession is a ResourceType enum value + ResourceTypeTrafficMirrorSession = "traffic-mirror-session" + + // ResourceTypeTrafficMirrorTarget is a ResourceType enum value + ResourceTypeTrafficMirrorTarget = "traffic-mirror-target" + // ResourceTypeTransitGateway is a ResourceType enum value ResourceTypeTransitGateway = "transit-gateway" @@ -89666,6 +98015,9 @@ const ( // SpotAllocationStrategyDiversified is a SpotAllocationStrategy enum value SpotAllocationStrategyDiversified = "diversified" + + // SpotAllocationStrategyCapacityOptimized is a SpotAllocationStrategy enum value + SpotAllocationStrategyCapacityOptimized = "capacity-optimized" ) const ( @@ -89824,6 +98176,60 @@ const ( TenancyHost = "host" ) +const ( + // TrafficDirectionIngress is a TrafficDirection enum value + TrafficDirectionIngress = "ingress" + + // TrafficDirectionEgress is a TrafficDirection enum value + TrafficDirectionEgress = "egress" +) + +const ( + // TrafficMirrorFilterRuleFieldDestinationPortRange is a TrafficMirrorFilterRuleField enum value + TrafficMirrorFilterRuleFieldDestinationPortRange = "destination-port-range" + + // TrafficMirrorFilterRuleFieldSourcePortRange is a TrafficMirrorFilterRuleField enum value + TrafficMirrorFilterRuleFieldSourcePortRange = "source-port-range" + + // TrafficMirrorFilterRuleFieldProtocol is a TrafficMirrorFilterRuleField enum value + TrafficMirrorFilterRuleFieldProtocol = "protocol" + + // TrafficMirrorFilterRuleFieldDescription is a TrafficMirrorFilterRuleField enum value + TrafficMirrorFilterRuleFieldDescription = "description" +) + +const ( + // TrafficMirrorNetworkServiceAmazonDns is a TrafficMirrorNetworkService enum value + TrafficMirrorNetworkServiceAmazonDns = "amazon-dns" +) + +const ( + // TrafficMirrorRuleActionAccept is a TrafficMirrorRuleAction enum value + TrafficMirrorRuleActionAccept = "accept" + + // TrafficMirrorRuleActionReject is a TrafficMirrorRuleAction enum value + TrafficMirrorRuleActionReject = "reject" +) + +const ( + // TrafficMirrorSessionFieldPacketLength is a TrafficMirrorSessionField enum value + TrafficMirrorSessionFieldPacketLength = "packet-length" + + // TrafficMirrorSessionFieldDescription is a TrafficMirrorSessionField enum value + TrafficMirrorSessionFieldDescription = "description" + + // TrafficMirrorSessionFieldVirtualNetworkId is a TrafficMirrorSessionField enum value + TrafficMirrorSessionFieldVirtualNetworkId = "virtual-network-id" +) + +const ( + // TrafficMirrorTargetTypeNetworkInterface is a TrafficMirrorTargetType enum value + TrafficMirrorTargetTypeNetworkInterface = "network-interface" + + // TrafficMirrorTargetTypeNetworkLoadBalancer is a TrafficMirrorTargetType enum value + TrafficMirrorTargetTypeNetworkLoadBalancer = "network-load-balancer" +) + const ( // TrafficTypeAccept is a TrafficType enum value TrafficTypeAccept = "ACCEPT" @@ -89855,6 +98261,9 @@ const ( // TransitGatewayAttachmentResourceTypeVpn is a TransitGatewayAttachmentResourceType enum value TransitGatewayAttachmentResourceTypeVpn = "vpn" + + // TransitGatewayAttachmentResourceTypeDirectConnectGateway is a TransitGatewayAttachmentResourceType enum value + TransitGatewayAttachmentResourceTypeDirectConnectGateway = "direct-connect-gateway" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go index 7b42719d653..efec8d8a94e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -8,65 +8,32 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" ) -type retryer struct { - client.DefaultRetryer -} - -func (d retryer) RetryRules(r *request.Request) time.Duration { - switch r.Operation.Name { - case opModifyNetworkInterfaceAttribute: - fallthrough - case opAssignPrivateIpAddresses: - return customRetryRule(r) - default: - return d.DefaultRetryer.RetryRules(r) - } -} - -func customRetryRule(r *request.Request) time.Duration { - retryTimes := []time.Duration{ - time.Second, - 3 * time.Second, - 5 * time.Second, - } - - count := r.RetryCount - if count >= len(retryTimes) { - count = len(retryTimes) - 1 - } - - minTime := int(retryTimes[count]) - return time.Duration(sdkrand.SeededRand.Intn(minTime) + minTime) -} +const ( + // customRetryerMinRetryDelay sets min retry delay + customRetryerMinRetryDelay = 1 * time.Second -func setCustomRetryer(c *client.Client) { - maxRetries := aws.IntValue(c.Config.MaxRetries) - if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 - } - - c.Retryer = retryer{ - DefaultRetryer: client.DefaultRetryer{ - NumMaxRetries: maxRetries, - }, - } -} + // customRetryerMaxRetryDelay sets max retry delay + customRetryerMaxRetryDelay = 8 * time.Second +) func init() { - initClient = func(c *client.Client) { - if c.Config.Retryer == nil { - // Only override the retryer with a custom one if the config - // does not already contain a retryer - setCustomRetryer(c) - } - } initRequest = func(r *request.Request) { if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter r.Handlers.Build.PushFront(fillPresignedURL) } + + // only set the retryer on request if config doesn't have a retryer + if r.Config.Retryer == nil && (r.Operation.Name == opModifyNetworkInterfaceAttribute || r.Operation.Name == opAssignPrivateIpAddresses) { + r.Retryer = client.DefaultRetryer{ + NumMaxRetries: client.DefaultRetryerMaxNumRetries, + MinRetryDelay: customRetryerMinRetryDelay, + MinThrottleDelay: customRetryerMinRetryDelay, + MaxRetryDelay: customRetryerMaxRetryDelay, + MaxThrottleDelay: customRetryerMaxRetryDelay, + } + } } } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go index c258e0e85c0..31c314e0e5f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go @@ -7,18 +7,19 @@ // capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest // in hardware up front, so you can develop and deploy applications faster. // -// To learn more about Amazon EC2, Amazon EBS, and Amazon VPC, see the following -// resources: +// To learn more, see the following resources: // -// * Amazon EC2 product page (http://aws.amazon.com/ec2) +// * Amazon EC2: AmazonEC2 product page (http://aws.amazon.com/ec2), Amazon +// EC2 documentation (http://aws.amazon.com/documentation/ec2) // -// * Amazon EC2 documentation (http://aws.amazon.com/documentation/ec2) +// * Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon +// EBS documentation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) // -// * Amazon EBS product page (http://aws.amazon.com/ebs) +// * Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon +// VPC documentation (http://aws.amazon.com/documentation/vpc) // -// * Amazon VPC product page (http://aws.amazon.com/vpc) -// -// * Amazon VPC documentation (http://aws.amazon.com/documentation/vpc) +// * AWS VPN: AWS VPN product page (http://aws.amazon.com/vpn), AWS VPN documentation +// (http://aws.amazon.com/documentation/vpn) // // See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go index 6acbc43fe3d..b2b9fb8c564 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -46,11 +46,11 @@ const ( // svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EC2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EC2 { svc := &EC2{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-15", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index e3284308c8a..83be08f0f77 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -3,6 +3,7 @@ package ecr import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws" @@ -441,7 +442,10 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // CreateRepository API operation for Amazon EC2 Container Registry. // -// Creates an image repository. +// Creates an Amazon Elastic Container Registry (Amazon ECR) repository, where +// users can push and pull Docker images. For more information, see Amazon ECR +// Repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) +// in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -473,7 +477,7 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // * ErrCodeLimitExceededException "LimitExceededException" // The operation did not succeed because it would have exceeded a service limit // for your account. For more information, see Amazon ECR Default Service Limits -// (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) // in the Amazon Elastic Container Registry User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository @@ -771,6 +775,158 @@ func (c *ECR) DeleteRepositoryPolicyWithContext(ctx aws.Context, input *DeleteRe return out, req.Send() } +const opDescribeImageScanFindings = "DescribeImageScanFindings" + +// DescribeImageScanFindingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImageScanFindings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImageScanFindings for more information on using the DescribeImageScanFindings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImageScanFindingsRequest method. +// req, resp := client.DescribeImageScanFindingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings +func (c *ECR) DescribeImageScanFindingsRequest(input *DescribeImageScanFindingsInput) (req *request.Request, output *DescribeImageScanFindingsOutput) { + op := &request.Operation{ + Name: opDescribeImageScanFindings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeImageScanFindingsInput{} + } + + output = &DescribeImageScanFindingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImageScanFindings API operation for Amazon EC2 Container Registry. +// +// Describes the image scan findings for the specified image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DescribeImageScanFindings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeImageNotFoundException "ImageNotFoundException" +// The image requested does not exist in the specified repository. +// +// * ErrCodeScanNotFoundException "ScanNotFoundException" +// The specified image scan could not be found. Ensure that image scanning is +// enabled on the repository and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings +func (c *ECR) DescribeImageScanFindings(input *DescribeImageScanFindingsInput) (*DescribeImageScanFindingsOutput, error) { + req, out := c.DescribeImageScanFindingsRequest(input) + return out, req.Send() +} + +// DescribeImageScanFindingsWithContext is the same as DescribeImageScanFindings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImageScanFindings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImageScanFindingsWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, opts ...request.Option) (*DescribeImageScanFindingsOutput, error) { + req, out := c.DescribeImageScanFindingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeImageScanFindingsPages iterates over the pages of a DescribeImageScanFindings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeImageScanFindings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeImageScanFindings operation. +// pageNum := 0 +// err := client.DescribeImageScanFindingsPages(params, +// func(page *ecr.DescribeImageScanFindingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) DescribeImageScanFindingsPages(input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool) error { + return c.DescribeImageScanFindingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImageScanFindingsPagesWithContext same as DescribeImageScanFindingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImageScanFindingsPagesWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImageScanFindingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImageScanFindingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeImageScanFindingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeImages = "DescribeImages" // DescribeImagesRequest generates a "aws/request.Request" representing the @@ -884,7 +1040,7 @@ func (c *ECR) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesIn // // Example iterating over at most 3 pages of a DescribeImages operation. // pageNum := 0 // err := client.DescribeImagesPages(params, -// func(page *DescribeImagesOutput, lastPage bool) bool { +// func(page *ecr.DescribeImagesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -916,10 +1072,12 @@ func (c *ECR) DescribeImagesPagesWithContext(ctx aws.Context, input *DescribeIma }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeImagesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeImagesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1027,7 +1185,7 @@ func (c *ECR) DescribeRepositoriesWithContext(ctx aws.Context, input *DescribeRe // // Example iterating over at most 3 pages of a DescribeRepositories operation. // pageNum := 0 // err := client.DescribeRepositoriesPages(params, -// func(page *DescribeRepositoriesOutput, lastPage bool) bool { +// func(page *ecr.DescribeRepositoriesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1059,10 +1217,12 @@ func (c *ECR) DescribeRepositoriesPagesWithContext(ctx aws.Context, input *Descr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeRepositoriesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeRepositoriesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1377,6 +1537,12 @@ func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewI Name: opGetLifecyclePolicyPreview, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -1436,6 +1602,58 @@ func (c *ECR) GetLifecyclePolicyPreviewWithContext(ctx aws.Context, input *GetLi return out, req.Send() } +// GetLifecyclePolicyPreviewPages iterates over the pages of a GetLifecyclePolicyPreview operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetLifecyclePolicyPreview method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetLifecyclePolicyPreview operation. +// pageNum := 0 +// err := client.GetLifecyclePolicyPreviewPages(params, +// func(page *ecr.GetLifecyclePolicyPreviewOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) GetLifecyclePolicyPreviewPages(input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool) error { + return c.GetLifecyclePolicyPreviewPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetLifecyclePolicyPreviewPagesWithContext same as GetLifecyclePolicyPreviewPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyPreviewPagesWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetLifecyclePolicyPreviewInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetLifecyclePolicyPreviewRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetLifecyclePolicyPreviewOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetRepositoryPolicy = "GetRepositoryPolicy" // GetRepositoryPolicyRequest generates a "aws/request.Request" representing the @@ -1728,7 +1946,7 @@ func (c *ECR) ListImagesWithContext(ctx aws.Context, input *ListImagesInput, opt // // Example iterating over at most 3 pages of a ListImages operation. // pageNum := 0 // err := client.ListImagesPages(params, -// func(page *ListImagesOutput, lastPage bool) bool { +// func(page *ecr.ListImagesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1760,10 +1978,12 @@ func (c *ECR) ListImagesPagesWithContext(ctx aws.Context, input *ListImagesInput }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListImagesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListImagesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1934,9 +2154,13 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // * ErrCodeLimitExceededException "LimitExceededException" // The operation did not succeed because it would have exceeded a service limit // for your account. For more information, see Amazon ECR Default Service Limits -// (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) // in the Amazon Elastic Container Registry User Guide. // +// * ErrCodeImageTagAlreadyExistsException "ImageTagAlreadyExistsException" +// The specified image is tagged with a tag that already exists. The repository +// is configured for tag immutability. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { req, out := c.PutImageRequest(input) @@ -1959,59 +2183,58 @@ func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts .. return out, req.Send() } -const opPutLifecyclePolicy = "PutLifecyclePolicy" +const opPutImageScanningConfiguration = "PutImageScanningConfiguration" -// PutLifecyclePolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutLifecyclePolicy operation. The "output" return +// PutImageScanningConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutImageScanningConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutLifecyclePolicy for more information on using the PutLifecyclePolicy +// See PutImageScanningConfiguration for more information on using the PutImageScanningConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutLifecyclePolicyRequest method. -// req, resp := client.PutLifecyclePolicyRequest(params) +// // Example sending a request using the PutImageScanningConfigurationRequest method. +// req, resp := client.PutImageScanningConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy -func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *request.Request, output *PutLifecyclePolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration +func (c *ECR) PutImageScanningConfigurationRequest(input *PutImageScanningConfigurationInput) (req *request.Request, output *PutImageScanningConfigurationOutput) { op := &request.Operation{ - Name: opPutLifecyclePolicy, + Name: opPutImageScanningConfiguration, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutLifecyclePolicyInput{} + input = &PutImageScanningConfigurationInput{} } - output = &PutLifecyclePolicyOutput{} + output = &PutImageScanningConfigurationOutput{} req = c.newRequest(op, input, output) return } -// PutLifecyclePolicy API operation for Amazon EC2 Container Registry. +// PutImageScanningConfiguration API operation for Amazon EC2 Container Registry. // -// Creates or updates a lifecycle policy. For information about lifecycle policy -// syntax, see Lifecycle Policy Template (http://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html). +// Updates the image scanning configuration for a repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon EC2 Container Registry's -// API operation PutLifecyclePolicy for usage and error information. +// API operation PutImageScanningConfiguration for usage and error information. // // Returned Error Codes: // * ErrCodeServerException "ServerException" @@ -2025,80 +2248,84 @@ func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *re // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy -func (c *ECR) PutLifecyclePolicy(input *PutLifecyclePolicyInput) (*PutLifecyclePolicyOutput, error) { - req, out := c.PutLifecyclePolicyRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration +func (c *ECR) PutImageScanningConfiguration(input *PutImageScanningConfigurationInput) (*PutImageScanningConfigurationOutput, error) { + req, out := c.PutImageScanningConfigurationRequest(input) return out, req.Send() } -// PutLifecyclePolicyWithContext is the same as PutLifecyclePolicy with the addition of +// PutImageScanningConfigurationWithContext is the same as PutImageScanningConfiguration with the addition of // the ability to pass a context and additional request options. // -// See PutLifecyclePolicy for details on how to use this API operation. +// See PutImageScanningConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ECR) PutLifecyclePolicyWithContext(ctx aws.Context, input *PutLifecyclePolicyInput, opts ...request.Option) (*PutLifecyclePolicyOutput, error) { - req, out := c.PutLifecyclePolicyRequest(input) +func (c *ECR) PutImageScanningConfigurationWithContext(ctx aws.Context, input *PutImageScanningConfigurationInput, opts ...request.Option) (*PutImageScanningConfigurationOutput, error) { + req, out := c.PutImageScanningConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opSetRepositoryPolicy = "SetRepositoryPolicy" +const opPutImageTagMutability = "PutImageTagMutability" -// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the -// client's request for the SetRepositoryPolicy operation. The "output" return +// PutImageTagMutabilityRequest generates a "aws/request.Request" representing the +// client's request for the PutImageTagMutability operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy +// See PutImageTagMutability for more information on using the PutImageTagMutability // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SetRepositoryPolicyRequest method. -// req, resp := client.SetRepositoryPolicyRequest(params) +// // Example sending a request using the PutImageTagMutabilityRequest method. +// req, resp := client.PutImageTagMutabilityRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy -func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageTagMutability +func (c *ECR) PutImageTagMutabilityRequest(input *PutImageTagMutabilityInput) (req *request.Request, output *PutImageTagMutabilityOutput) { op := &request.Operation{ - Name: opSetRepositoryPolicy, + Name: opPutImageTagMutability, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &SetRepositoryPolicyInput{} + input = &PutImageTagMutabilityInput{} } - output = &SetRepositoryPolicyOutput{} + output = &PutImageTagMutabilityOutput{} req = c.newRequest(op, input, output) return } -// SetRepositoryPolicy API operation for Amazon EC2 Container Registry. +// PutImageTagMutability API operation for Amazon EC2 Container Registry. // -// Applies a repository policy on a specified repository to control access permissions. +// Updates the image tag mutability settings for a repository. When a repository +// is configured with tag immutability, all image tags within the repository +// will be prevented them from being overwritten. For more information, see +// Image Tag Mutability (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html) +// in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon EC2 Container Registry's -// API operation SetRepositoryPolicy for usage and error information. +// API operation PutImageTagMutability for usage and error information. // // Returned Error Codes: // * ErrCodeServerException "ServerException" @@ -2112,81 +2339,81 @@ func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req * // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy -func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { - req, out := c.SetRepositoryPolicyRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageTagMutability +func (c *ECR) PutImageTagMutability(input *PutImageTagMutabilityInput) (*PutImageTagMutabilityOutput, error) { + req, out := c.PutImageTagMutabilityRequest(input) return out, req.Send() } -// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of +// PutImageTagMutabilityWithContext is the same as PutImageTagMutability with the addition of // the ability to pass a context and additional request options. // -// See SetRepositoryPolicy for details on how to use this API operation. +// See PutImageTagMutability for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) { - req, out := c.SetRepositoryPolicyRequest(input) +func (c *ECR) PutImageTagMutabilityWithContext(ctx aws.Context, input *PutImageTagMutabilityInput, opts ...request.Option) (*PutImageTagMutabilityOutput, error) { + req, out := c.PutImageTagMutabilityRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" +const opPutLifecyclePolicy = "PutLifecyclePolicy" -// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the -// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// PutLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutLifecyclePolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// See PutLifecyclePolicy for more information on using the PutLifecyclePolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. -// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// // Example sending a request using the PutLifecyclePolicyRequest method. +// req, resp := client.PutLifecyclePolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview -func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *request.Request, output *PutLifecyclePolicyOutput) { op := &request.Operation{ - Name: opStartLifecyclePolicyPreview, + Name: opPutLifecyclePolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartLifecyclePolicyPreviewInput{} + input = &PutLifecyclePolicyInput{} } - output = &StartLifecyclePolicyPreviewOutput{} + output = &PutLifecyclePolicyOutput{} req = c.newRequest(op, input, output) return } -// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// PutLifecyclePolicy API operation for Amazon EC2 Container Registry. // -// Starts a preview of the specified lifecycle policy. This allows you to see -// the results before creating the lifecycle policy. +// Creates or updates a lifecycle policy. For information about lifecycle policy +// syntax, see Lifecycle Policy Template (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon EC2 Container Registry's -// API operation StartLifecyclePolicyPreview for usage and error information. +// API operation PutLifecyclePolicy for usage and error information. // // Returned Error Codes: // * ErrCodeServerException "ServerException" @@ -2200,39 +2427,309 @@ func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPrev // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" -// The lifecycle policy could not be found, and no policy is set to the repository. -// -// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException" -// The previous lifecycle policy preview request has not completed. Please try -// again later. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview -func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { - req, out := c.StartLifecyclePolicyPreviewRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicy(input *PutLifecyclePolicyInput) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) return out, req.Send() } -// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// PutLifecyclePolicyWithContext is the same as PutLifecyclePolicy with the addition of // the ability to pass a context and additional request options. // -// See StartLifecyclePolicyPreview for details on how to use this API operation. +// See PutLifecyclePolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { - req, out := c.StartLifecyclePolicyPreviewRequest(input) +func (c *ECR) PutLifecyclePolicyWithContext(ctx aws.Context, input *PutLifecyclePolicyInput, opts ...request.Option) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opSetRepositoryPolicy = "SetRepositoryPolicy" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetRepositoryPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetRepositoryPolicyRequest method. +// req, resp := client.SetRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opSetRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetRepositoryPolicyInput{} + } + + output = &SetRepositoryPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// SetRepositoryPolicy API operation for Amazon EC2 Container Registry. +// +// Applies a repository policy on a specified repository to control access permissions. +// For more information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/RepositoryPolicies.html) +// in the Amazon Elastic Container Registry User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation SetRepositoryPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + return out, req.Send() +} + +// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See SetRepositoryPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartImageScan = "StartImageScan" + +// StartImageScanRequest generates a "aws/request.Request" representing the +// client's request for the StartImageScan operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartImageScan for more information on using the StartImageScan +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartImageScanRequest method. +// req, resp := client.StartImageScanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan +func (c *ECR) StartImageScanRequest(input *StartImageScanInput) (req *request.Request, output *StartImageScanOutput) { + op := &request.Operation{ + Name: opStartImageScan, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartImageScanInput{} + } + + output = &StartImageScanOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartImageScan API operation for Amazon EC2 Container Registry. +// +// Starts an image vulnerability scan. An image scan can only be started once +// per day on an individual image. This limit includes if an image was scanned +// on initial push. For more information, see Image Scanning (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) +// in the Amazon Elastic Container Registry User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation StartImageScan for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeImageNotFoundException "ImageNotFoundException" +// The image requested does not exist in the specified repository. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan +func (c *ECR) StartImageScan(input *StartImageScanInput) (*StartImageScanOutput, error) { + req, out := c.StartImageScanRequest(input) + return out, req.Send() +} + +// StartImageScanWithContext is the same as StartImageScan with the addition of +// the ability to pass a context and additional request options. +// +// See StartImageScan for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) StartImageScanWithContext(ctx aws.Context, input *StartImageScanInput, opts ...request.Option) (*StartImageScanOutput, error) { + req, out := c.StartImageScanRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" + +// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. +// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opStartLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLifecyclePolicyPreviewInput{} + } + + output = &StartLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Starts a preview of the specified lifecycle policy. This allows you to see +// the results before creating the lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation StartLifecyclePolicyPreview for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException" +// The previous lifecycle policy preview request has not completed. Please try +// again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See StartLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // @@ -2504,7 +3001,7 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // * ErrCodeLimitExceededException "LimitExceededException" // The operation did not succeed because it would have exceeded a service limit // for your account. For more information, see Amazon ECR Default Service Limits -// (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) // in the Amazon Elastic Container Registry User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart @@ -2529,6 +3026,41 @@ func (c *ECR) UploadLayerPartWithContext(ctx aws.Context, input *UploadLayerPart return out, req.Send() } +// This data type is used in the ImageScanFinding data type. +type Attribute struct { + _ struct{} `type:"structure"` + + // The attribute key. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The value assigned to the attribute key. + Value *string `locationName:"value" min:"1" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *Attribute) SetKey(v string) *Attribute { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Attribute) SetValue(v string) *Attribute { + s.Value = &v + return s +} + // An object representing authorization data for an Amazon ECR registry. type AuthorizationData struct { _ struct{} `type:"structure"` @@ -2723,6 +3255,16 @@ func (s *BatchDeleteImageInput) Validate() error { if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) } + if s.ImageIds != nil { + for i, v := range s.ImageIds { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2833,6 +3375,16 @@ func (s *BatchGetImageInput) Validate() error { if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) } + if s.ImageIds != nil { + for i, v := range s.ImageIds { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3032,6 +3584,17 @@ func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOu type CreateRepositoryInput struct { _ struct{} `type:"structure"` + // The image scanning configuration for the repository. This setting determines + // whether images are scanned for known vulnerabilities after being pushed to + // the repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + + // The tag mutability setting for the repository. If this parameter is omitted, + // the default setting of MUTABLE will be used which will allow image tags to + // be overwritten. If IMMUTABLE is specified, all image tags within the repository + // will be immutable which will prevent them from being overwritten. + ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"` + // The name to use for the repository. The repository name may be specified // on its own (such as nginx-web-app) or it can be prepended with a namespace // to group the repository into a category (such as project-a/nginx-web-app). @@ -3039,6 +3602,10 @@ type CreateRepositoryInput struct { // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + // The metadata that you apply to the repository to help you categorize and + // organize them. Each tag consists of a key and an optional value, both of + // which you define. Tag keys can have a maximum character length of 128 characters, + // and tag values can have a maximum length of 256 characters. Tags []*Tag `locationName:"tags" type:"list"` } @@ -3068,6 +3635,18 @@ func (s *CreateRepositoryInput) Validate() error { return nil } +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *CreateRepositoryInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *CreateRepositoryInput { + s.ImageScanningConfiguration = v + return s +} + +// SetImageTagMutability sets the ImageTagMutability field's value. +func (s *CreateRepositoryInput) SetImageTagMutability(v string) *CreateRepositoryInput { + s.ImageTagMutability = &v + return s +} + // SetRepositoryName sets the RepositoryName field's value. func (s *CreateRepositoryInput) SetRepositoryName(v string) *CreateRepositoryInput { s.RepositoryName = &v @@ -3291,36 +3870,162 @@ type DeleteRepositoryPolicyInput struct { _ struct{} `type:"structure"` // The AWS account ID associated with the registry that contains the repository - // policy to delete. If you do not specify a registry, the default registry - // is assumed. + // policy to delete. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the repository policy + // to delete. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRepositoryPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryPolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteRepositoryPolicyInput) SetRegistryId(v string) *DeleteRepositoryPolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteRepositoryPolicyInput) SetRepositoryName(v string) *DeleteRepositoryPolicyInput { + s.RepositoryName = &v + return s +} + +type DeleteRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy that was deleted from the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyText sets the PolicyText field's value. +func (s *DeleteRepositoryPolicyOutput) SetPolicyText(v string) *DeleteRepositoryPolicyOutput { + s.PolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteRepositoryPolicyOutput) SetRegistryId(v string) *DeleteRepositoryPolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteRepositoryPolicyOutput) SetRepositoryName(v string) *DeleteRepositoryPolicyOutput { + s.RepositoryName = &v + return s +} + +type DescribeImageScanFindingsInput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + // + // ImageId is a required field + ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"` + + // The maximum number of image scan results returned by DescribeImageScanFindings + // in paginated output. When this parameter is used, DescribeImageScanFindings + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeImageScanFindings request with the returned nextToken value. + // This value can be between 1 and 1000. If this parameter is not used, then + // DescribeImageScanFindings returns up to 100 results and a nextToken value, + // if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated DescribeImageScanFindings + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // in which to describe the image scan findings for. If you do not specify a + // registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository that is associated with the repository policy - // to delete. + // The repository for the image for which to describe the scan findings. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` } // String returns the string representation -func (s DeleteRepositoryPolicyInput) String() string { +func (s DescribeImageScanFindingsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRepositoryPolicyInput) GoString() string { +func (s DescribeImageScanFindingsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRepositoryPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryPolicyInput"} +func (s *DescribeImageScanFindingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImageScanFindingsInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) } + if s.ImageId != nil { + if err := s.ImageId.Validate(); err != nil { + invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3328,23 +4033,53 @@ func (s *DeleteRepositoryPolicyInput) Validate() error { return nil } +// SetImageId sets the ImageId field's value. +func (s *DescribeImageScanFindingsInput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsInput { + s.ImageId = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImageScanFindingsInput) SetMaxResults(v int64) *DescribeImageScanFindingsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageScanFindingsInput) SetNextToken(v string) *DescribeImageScanFindingsInput { + s.NextToken = &v + return s +} + // SetRegistryId sets the RegistryId field's value. -func (s *DeleteRepositoryPolicyInput) SetRegistryId(v string) *DeleteRepositoryPolicyInput { +func (s *DescribeImageScanFindingsInput) SetRegistryId(v string) *DescribeImageScanFindingsInput { s.RegistryId = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *DeleteRepositoryPolicyInput) SetRepositoryName(v string) *DeleteRepositoryPolicyInput { +func (s *DescribeImageScanFindingsInput) SetRepositoryName(v string) *DescribeImageScanFindingsInput { s.RepositoryName = &v return s } -type DeleteRepositoryPolicyOutput struct { +type DescribeImageScanFindingsOutput struct { _ struct{} `type:"structure"` - // The JSON repository policy that was deleted from the repository. - PolicyText *string `locationName:"policyText" type:"string"` + // An object with identifying information for an Amazon ECR image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The information contained in the image scan findings. + ImageScanFindings *ImageScanFindings `locationName:"imageScanFindings" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + + // The nextToken value to include in a future DescribeImageScanFindings request. + // When the results of a DescribeImageScanFindings request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` // The registry ID associated with the request. RegistryId *string `locationName:"registryId" type:"string"` @@ -3354,29 +4089,47 @@ type DeleteRepositoryPolicyOutput struct { } // String returns the string representation -func (s DeleteRepositoryPolicyOutput) String() string { +func (s DescribeImageScanFindingsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRepositoryPolicyOutput) GoString() string { +func (s DescribeImageScanFindingsOutput) GoString() string { return s.String() } -// SetPolicyText sets the PolicyText field's value. -func (s *DeleteRepositoryPolicyOutput) SetPolicyText(v string) *DeleteRepositoryPolicyOutput { - s.PolicyText = &v +// SetImageId sets the ImageId field's value. +func (s *DescribeImageScanFindingsOutput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsOutput { + s.ImageId = v + return s +} + +// SetImageScanFindings sets the ImageScanFindings field's value. +func (s *DescribeImageScanFindingsOutput) SetImageScanFindings(v *ImageScanFindings) *DescribeImageScanFindingsOutput { + s.ImageScanFindings = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *DescribeImageScanFindingsOutput) SetImageScanStatus(v *ImageScanStatus) *DescribeImageScanFindingsOutput { + s.ImageScanStatus = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageScanFindingsOutput) SetNextToken(v string) *DescribeImageScanFindingsOutput { + s.NextToken = &v return s } // SetRegistryId sets the RegistryId field's value. -func (s *DeleteRepositoryPolicyOutput) SetRegistryId(v string) *DeleteRepositoryPolicyOutput { +func (s *DescribeImageScanFindingsOutput) SetRegistryId(v string) *DescribeImageScanFindingsOutput { s.RegistryId = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *DeleteRepositoryPolicyOutput) SetRepositoryName(v string) *DeleteRepositoryPolicyOutput { +func (s *DescribeImageScanFindingsOutput) SetRepositoryName(v string) *DescribeImageScanFindingsOutput { s.RepositoryName = &v return s } @@ -3437,7 +4190,7 @@ type DescribeImagesInput struct { // registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // A list of repositories to describe. + // The repository that contains the images to describe. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -3468,6 +4221,16 @@ func (s *DescribeImagesInput) Validate() error { if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) } + if s.ImageIds != nil { + for i, v := range s.ImageIds { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3936,22 +4699,21 @@ type GetLifecyclePolicyPreviewInput struct { ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` // The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest - // in
 paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest - // only returns
 maxResults results in a single page along with a nextToken - // response element. The remaining results of the initial request can be seen - // by sending
 another GetLifecyclePolicyPreviewRequest request with the returned - // nextToken
 value. This value can be between 1 and 1000. If this
 parameter - // is not used, then GetLifecyclePolicyPreviewRequest returns up to
 100 results - // and a nextToken value, if
 applicable. This option cannot be used when you - // specify images with imageIds. + // in paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another GetLifecyclePolicyPreviewRequest request with the returned nextToken + // value. This value can be between 1 and 1000. If this parameter is not used, + // then GetLifecyclePolicyPreviewRequest returns up to 100 results and a nextToken + // value, if applicable. This option cannot be used when you specify images + // with imageIds. MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` - // The nextToken value returned from a previous paginated
 GetLifecyclePolicyPreviewRequest - // request where maxResults was used and the
 results exceeded the value of - // that parameter. Pagination continues from the end of the
 previous results - // that returned the nextToken value. This value is
 null when there are no - // more results to return. This option cannot be used when you specify images - // with imageIds. + // The nextToken value returned from a previous paginated GetLifecyclePolicyPreviewRequest + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. This option cannot be used when you specify images with imageIds. NextToken *string `locationName:"nextToken" type:"string"` // The AWS account ID associated with the registry that contains the repository. @@ -3989,6 +4751,16 @@ func (s *GetLifecyclePolicyPreviewInput) Validate() error { if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) } + if s.ImageIds != nil { + for i, v := range s.ImageIds { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4212,7 +4984,7 @@ type Image struct { ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` // The image manifest associated with the image. - ImageManifest *string `locationName:"imageManifest" type:"string"` + ImageManifest *string `locationName:"imageManifest" min:"1" type:"string"` // The AWS account ID associated with the registry containing the image. RegistryId *string `locationName:"registryId" type:"string"` @@ -4266,6 +5038,12 @@ type ImageDetail struct { // the current image was pushed to the repository. ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"` + // A summary of the last completed image scan. + ImageScanFindingsSummary *ImageScanFindingsSummary `locationName:"imageScanFindingsSummary" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + // The size, in bytes, of the image in the repository. // // Beginning with Docker version 1.9, the Docker client compresses image layers @@ -4274,134 +5052,373 @@ type ImageDetail struct { // size than the image sizes returned by DescribeImages. ImageSizeInBytes *int64 `locationName:"imageSizeInBytes" type:"long"` - // The list of tags associated with this image. - ImageTags []*string `locationName:"imageTags" type:"list"` + // The list of tags associated with this image. + ImageTags []*string `locationName:"imageTags" type:"list"` + + // The AWS account ID associated with the registry to which this image belongs. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to which this image belongs. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s ImageDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageDetail) GoString() string { + return s.String() +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *ImageDetail) SetImageDigest(v string) *ImageDetail { + s.ImageDigest = &v + return s +} + +// SetImagePushedAt sets the ImagePushedAt field's value. +func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail { + s.ImagePushedAt = &v + return s +} + +// SetImageScanFindingsSummary sets the ImageScanFindingsSummary field's value. +func (s *ImageDetail) SetImageScanFindingsSummary(v *ImageScanFindingsSummary) *ImageDetail { + s.ImageScanFindingsSummary = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *ImageDetail) SetImageScanStatus(v *ImageScanStatus) *ImageDetail { + s.ImageScanStatus = v + return s +} + +// SetImageSizeInBytes sets the ImageSizeInBytes field's value. +func (s *ImageDetail) SetImageSizeInBytes(v int64) *ImageDetail { + s.ImageSizeInBytes = &v + return s +} + +// SetImageTags sets the ImageTags field's value. +func (s *ImageDetail) SetImageTags(v []*string) *ImageDetail { + s.ImageTags = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *ImageDetail) SetRegistryId(v string) *ImageDetail { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *ImageDetail) SetRepositoryName(v string) *ImageDetail { + s.RepositoryName = &v + return s +} + +// An object representing an Amazon ECR image failure. +type ImageFailure struct { + _ struct{} `type:"structure"` + + // The code associated with the failure. + FailureCode *string `locationName:"failureCode" type:"string" enum:"ImageFailureCode"` + + // The reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The image ID associated with the failure. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` +} + +// String returns the string representation +func (s ImageFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageFailure) GoString() string { + return s.String() +} + +// SetFailureCode sets the FailureCode field's value. +func (s *ImageFailure) SetFailureCode(v string) *ImageFailure { + s.FailureCode = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *ImageFailure) SetFailureReason(v string) *ImageFailure { + s.FailureReason = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ImageFailure) SetImageId(v *ImageIdentifier) *ImageFailure { + s.ImageId = v + return s +} + +// An object with identifying information for an Amazon ECR image. +type ImageIdentifier struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The tag used for the image. + ImageTag *string `locationName:"imageTag" min:"1" type:"string"` +} + +// String returns the string representation +func (s ImageIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImageIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImageIdentifier"} + if s.ImageTag != nil && len(*s.ImageTag) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageTag", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *ImageIdentifier) SetImageDigest(v string) *ImageIdentifier { + s.ImageDigest = &v + return s +} + +// SetImageTag sets the ImageTag field's value. +func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { + s.ImageTag = &v + return s +} + +// Contains information about an image scan finding. +type ImageScanFinding struct { + _ struct{} `type:"structure"` + + // A collection of attributes of the host from which the finding is generated. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The description of the finding. + Description *string `locationName:"description" type:"string"` + + // The name associated with the finding, usually a CVE number. + Name *string `locationName:"name" type:"string"` + + // The finding severity. + Severity *string `locationName:"severity" type:"string" enum:"FindingSeverity"` + + // A link containing additional details about the security vulnerability. + Uri *string `locationName:"uri" type:"string"` +} + +// String returns the string representation +func (s ImageScanFinding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFinding) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *ImageScanFinding) SetAttributes(v []*Attribute) *ImageScanFinding { + s.Attributes = v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImageScanFinding) SetDescription(v string) *ImageScanFinding { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *ImageScanFinding) SetName(v string) *ImageScanFinding { + s.Name = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *ImageScanFinding) SetSeverity(v string) *ImageScanFinding { + s.Severity = &v + return s +} + +// SetUri sets the Uri field's value. +func (s *ImageScanFinding) SetUri(v string) *ImageScanFinding { + s.Uri = &v + return s +} + +// The details of an image scan. +type ImageScanFindings struct { + _ struct{} `type:"structure"` - // The AWS account ID associated with the registry to which this image belongs. - RegistryId *string `locationName:"registryId" type:"string"` + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"` - // The name of the repository to which this image belongs. - RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + // The findings from the image scan. + Findings []*ImageScanFinding `locationName:"findings" type:"list"` + + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"` + + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"` } // String returns the string representation -func (s ImageDetail) String() string { +func (s ImageScanFindings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ImageDetail) GoString() string { +func (s ImageScanFindings) GoString() string { return s.String() } -// SetImageDigest sets the ImageDigest field's value. -func (s *ImageDetail) SetImageDigest(v string) *ImageDetail { - s.ImageDigest = &v - return s -} - -// SetImagePushedAt sets the ImagePushedAt field's value. -func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail { - s.ImagePushedAt = &v - return s -} - -// SetImageSizeInBytes sets the ImageSizeInBytes field's value. -func (s *ImageDetail) SetImageSizeInBytes(v int64) *ImageDetail { - s.ImageSizeInBytes = &v +// SetFindingSeverityCounts sets the FindingSeverityCounts field's value. +func (s *ImageScanFindings) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindings { + s.FindingSeverityCounts = v return s } -// SetImageTags sets the ImageTags field's value. -func (s *ImageDetail) SetImageTags(v []*string) *ImageDetail { - s.ImageTags = v +// SetFindings sets the Findings field's value. +func (s *ImageScanFindings) SetFindings(v []*ImageScanFinding) *ImageScanFindings { + s.Findings = v return s } -// SetRegistryId sets the RegistryId field's value. -func (s *ImageDetail) SetRegistryId(v string) *ImageDetail { - s.RegistryId = &v +// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value. +func (s *ImageScanFindings) SetImageScanCompletedAt(v time.Time) *ImageScanFindings { + s.ImageScanCompletedAt = &v return s } -// SetRepositoryName sets the RepositoryName field's value. -func (s *ImageDetail) SetRepositoryName(v string) *ImageDetail { - s.RepositoryName = &v +// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value. +func (s *ImageScanFindings) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindings { + s.VulnerabilitySourceUpdatedAt = &v return s } -// An object representing an Amazon ECR image failure. -type ImageFailure struct { +// A summary of the last completed image scan. +type ImageScanFindingsSummary struct { _ struct{} `type:"structure"` - // The code associated with the failure. - FailureCode *string `locationName:"failureCode" type:"string" enum:"ImageFailureCode"` + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"` - // The reason for the failure. - FailureReason *string `locationName:"failureReason" type:"string"` + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"` - // The image ID associated with the failure. - ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"` } // String returns the string representation -func (s ImageFailure) String() string { +func (s ImageScanFindingsSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ImageFailure) GoString() string { +func (s ImageScanFindingsSummary) GoString() string { return s.String() } -// SetFailureCode sets the FailureCode field's value. -func (s *ImageFailure) SetFailureCode(v string) *ImageFailure { - s.FailureCode = &v +// SetFindingSeverityCounts sets the FindingSeverityCounts field's value. +func (s *ImageScanFindingsSummary) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindingsSummary { + s.FindingSeverityCounts = v return s } -// SetFailureReason sets the FailureReason field's value. -func (s *ImageFailure) SetFailureReason(v string) *ImageFailure { - s.FailureReason = &v +// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value. +func (s *ImageScanFindingsSummary) SetImageScanCompletedAt(v time.Time) *ImageScanFindingsSummary { + s.ImageScanCompletedAt = &v return s } -// SetImageId sets the ImageId field's value. -func (s *ImageFailure) SetImageId(v *ImageIdentifier) *ImageFailure { - s.ImageId = v +// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value. +func (s *ImageScanFindingsSummary) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindingsSummary { + s.VulnerabilitySourceUpdatedAt = &v return s } -// An object with identifying information for an Amazon ECR image. -type ImageIdentifier struct { +// The current status of an image scan. +type ImageScanStatus struct { _ struct{} `type:"structure"` - // The sha256 digest of the image manifest. - ImageDigest *string `locationName:"imageDigest" type:"string"` + // The description of the image scan status. + Description *string `locationName:"description" type:"string"` - // The tag used for the image. - ImageTag *string `locationName:"imageTag" type:"string"` + // The current state of an image scan. + Status *string `locationName:"status" type:"string" enum:"ScanStatus"` } // String returns the string representation -func (s ImageIdentifier) String() string { +func (s ImageScanStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ImageIdentifier) GoString() string { +func (s ImageScanStatus) GoString() string { return s.String() } -// SetImageDigest sets the ImageDigest field's value. -func (s *ImageIdentifier) SetImageDigest(v string) *ImageIdentifier { - s.ImageDigest = &v +// SetDescription sets the Description field's value. +func (s *ImageScanStatus) SetDescription(v string) *ImageScanStatus { + s.Description = &v return s } -// SetImageTag sets the ImageTag field's value. -func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { - s.ImageTag = &v +// SetStatus sets the Status field's value. +func (s *ImageScanStatus) SetStatus(v string) *ImageScanStatus { + s.Status = &v + return s +} + +// The image scanning configuration for a repository. +type ImageScanningConfiguration struct { + _ struct{} `type:"structure"` + + // The setting that determines whether images are scanned after being pushed + // to a repository. If set to true, images will be scanned after being pushed. + // If this parameter is not specified, it will default to false and images will + // not be scanned unless a scan is manually started with the StartImageScan + // API. + ScanOnPush *bool `locationName:"scanOnPush" type:"boolean"` +} + +// String returns the string representation +func (s ImageScanningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanningConfiguration) GoString() string { + return s.String() +} + +// SetScanOnPush sets the ScanOnPush field's value. +func (s *ImageScanningConfiguration) SetScanOnPush(v bool) *ImageScanningConfiguration { + s.ScanOnPush = &v return s } @@ -4836,66 +5853,251 @@ func (s *ListImagesInput) SetRepositoryName(v string) *ListImagesInput { return s } -type ListImagesOutput struct { +type ListImagesOutput struct { + _ struct{} `type:"structure"` + + // The list of image IDs for the requested repository. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The nextToken value to include in a future ListImages request. When the results + // of a ListImages request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesOutput) GoString() string { + return s.String() +} + +// SetImageIds sets the ImageIds field's value. +func (s *ListImagesOutput) SetImageIds(v []*ImageIdentifier) *ListImagesOutput { + s.ImageIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListImagesOutput) SetNextToken(v string) *ListImagesOutput { + s.NextToken = &v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the resource for which to + // list the tags. Currently, the only supported resource is an Amazon ECR repository. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags for the resource. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +type PutImageInput struct { + _ struct{} `type:"structure"` + + // The image manifest corresponding to the image to be uploaded. + // + // ImageManifest is a required field + ImageManifest *string `locationName:"imageManifest" min:"1" type:"string" required:"true"` + + // The tag to associate with the image. This parameter is required for images + // that use the Docker Image Manifest V2 Schema 2 or OCI formats. + ImageTag *string `locationName:"imageTag" min:"1" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // in which to put the image. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to put the image. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutImageInput"} + if s.ImageManifest == nil { + invalidParams.Add(request.NewErrParamRequired("ImageManifest")) + } + if s.ImageManifest != nil && len(*s.ImageManifest) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageManifest", 1)) + } + if s.ImageTag != nil && len(*s.ImageTag) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageTag", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageManifest sets the ImageManifest field's value. +func (s *PutImageInput) SetImageManifest(v string) *PutImageInput { + s.ImageManifest = &v + return s +} + +// SetImageTag sets the ImageTag field's value. +func (s *PutImageInput) SetImageTag(v string) *PutImageInput { + s.ImageTag = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageInput) SetRegistryId(v string) *PutImageInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageInput) SetRepositoryName(v string) *PutImageInput { + s.RepositoryName = &v + return s +} + +type PutImageOutput struct { _ struct{} `type:"structure"` - // The list of image IDs for the requested repository. - ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` - - // The nextToken value to include in a future ListImages request. When the results - // of a ListImages request exceed maxResults, this value can be used to retrieve - // the next page of results. This value is null when there are no more results - // to return. - NextToken *string `locationName:"nextToken" type:"string"` + // Details of the image uploaded. + Image *Image `locationName:"image" type:"structure"` } // String returns the string representation -func (s ListImagesOutput) String() string { +func (s PutImageOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListImagesOutput) GoString() string { +func (s PutImageOutput) GoString() string { return s.String() } -// SetImageIds sets the ImageIds field's value. -func (s *ListImagesOutput) SetImageIds(v []*ImageIdentifier) *ListImagesOutput { - s.ImageIds = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListImagesOutput) SetNextToken(v string) *ListImagesOutput { - s.NextToken = &v +// SetImage sets the Image field's value. +func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput { + s.Image = v return s } -type ListTagsForResourceInput struct { +type PutImageScanningConfigurationInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that identifies the resource for which to - // list the tags. Currently, the only supported resource is an Amazon ECR repository. + // The image scanning configuration for the repository. This setting determines + // whether images are scanned for known vulnerabilities after being pushed to + // the repository. // - // ResourceArn is a required field - ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + // ImageScanningConfiguration is a required field + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to update the image scanning configuration setting. If you do not + // specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to update the image scanning configuration + // setting. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` } // String returns the string representation -func (s ListTagsForResourceInput) String() string { +func (s PutImageScanningConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceInput) GoString() string { +func (s PutImageScanningConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) +func (s *PutImageScanningConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutImageScanningConfigurationInput"} + if s.ImageScanningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ImageScanningConfiguration")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) } if invalidParams.Len() > 0 { @@ -4904,73 +6106,101 @@ func (s *ListTagsForResourceInput) Validate() error { return nil } -// SetResourceArn sets the ResourceArn field's value. -func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { - s.ResourceArn = &v +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *PutImageScanningConfigurationInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationInput { + s.ImageScanningConfiguration = v return s } -type ListTagsForResourceOutput struct { +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageScanningConfigurationInput) SetRegistryId(v string) *PutImageScanningConfigurationInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageScanningConfigurationInput) SetRepositoryName(v string) *PutImageScanningConfigurationInput { + s.RepositoryName = &v + return s +} + +type PutImageScanningConfigurationOutput struct { _ struct{} `type:"structure"` - // The tags for the resource. - Tags []*Tag `locationName:"tags" type:"list"` + // The image scanning configuration setting for the repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` } // String returns the string representation -func (s ListTagsForResourceOutput) String() string { +func (s PutImageScanningConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceOutput) GoString() string { +func (s PutImageScanningConfigurationOutput) GoString() string { return s.String() } -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { - s.Tags = v +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *PutImageScanningConfigurationOutput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationOutput { + s.ImageScanningConfiguration = v return s } -type PutImageInput struct { +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageScanningConfigurationOutput) SetRegistryId(v string) *PutImageScanningConfigurationOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageScanningConfigurationOutput) SetRepositoryName(v string) *PutImageScanningConfigurationOutput { + s.RepositoryName = &v + return s +} + +type PutImageTagMutabilityInput struct { _ struct{} `type:"structure"` - // The image manifest corresponding to the image to be uploaded. + // The tag mutability setting for the repository. If MUTABLE is specified, image + // tags can be overwritten. If IMMUTABLE is specified, all image tags within + // the repository will be immutable which will prevent them from being overwritten. // - // ImageManifest is a required field - ImageManifest *string `locationName:"imageManifest" type:"string" required:"true"` - - // The tag to associate with the image. This parameter is required for images - // that use the Docker Image Manifest V2 Schema 2 or OCI formats. - ImageTag *string `locationName:"imageTag" type:"string"` + // ImageTagMutability is a required field + ImageTagMutability *string `locationName:"imageTagMutability" type:"string" required:"true" enum:"ImageTagMutability"` // The AWS account ID associated with the registry that contains the repository - // in which to put the image. If you do not specify a registry, the default - // registry is assumed. + // in which to update the image tag mutability settings. If you do not specify + // a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository in which to put the image. + // The name of the repository in which to update the image tag mutability settings. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` } // String returns the string representation -func (s PutImageInput) String() string { +func (s PutImageTagMutabilityInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutImageInput) GoString() string { +func (s PutImageTagMutabilityInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutImageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutImageInput"} - if s.ImageManifest == nil { - invalidParams.Add(request.NewErrParamRequired("ImageManifest")) +func (s *PutImageTagMutabilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutImageTagMutabilityInput"} + if s.ImageTagMutability == nil { + invalidParams.Add(request.NewErrParamRequired("ImageTagMutability")) } if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) @@ -4985,50 +6215,62 @@ func (s *PutImageInput) Validate() error { return nil } -// SetImageManifest sets the ImageManifest field's value. -func (s *PutImageInput) SetImageManifest(v string) *PutImageInput { - s.ImageManifest = &v - return s -} - -// SetImageTag sets the ImageTag field's value. -func (s *PutImageInput) SetImageTag(v string) *PutImageInput { - s.ImageTag = &v +// SetImageTagMutability sets the ImageTagMutability field's value. +func (s *PutImageTagMutabilityInput) SetImageTagMutability(v string) *PutImageTagMutabilityInput { + s.ImageTagMutability = &v return s } // SetRegistryId sets the RegistryId field's value. -func (s *PutImageInput) SetRegistryId(v string) *PutImageInput { +func (s *PutImageTagMutabilityInput) SetRegistryId(v string) *PutImageTagMutabilityInput { s.RegistryId = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *PutImageInput) SetRepositoryName(v string) *PutImageInput { +func (s *PutImageTagMutabilityInput) SetRepositoryName(v string) *PutImageTagMutabilityInput { s.RepositoryName = &v return s } -type PutImageOutput struct { +type PutImageTagMutabilityOutput struct { _ struct{} `type:"structure"` - // Details of the image uploaded. - Image *Image `locationName:"image" type:"structure"` + // The image tag mutability setting for the repository. + ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` } // String returns the string representation -func (s PutImageOutput) String() string { +func (s PutImageTagMutabilityOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutImageOutput) GoString() string { +func (s PutImageTagMutabilityOutput) GoString() string { return s.String() } -// SetImage sets the Image field's value. -func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput { - s.Image = v +// SetImageTagMutability sets the ImageTagMutability field's value. +func (s *PutImageTagMutabilityOutput) SetImageTagMutability(v string) *PutImageTagMutabilityOutput { + s.ImageTagMutability = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageTagMutabilityOutput) SetRegistryId(v string) *PutImageTagMutabilityOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageTagMutabilityOutput) SetRepositoryName(v string) *PutImageTagMutabilityOutput { + s.RepositoryName = &v return s } @@ -5041,7 +6283,7 @@ type PutLifecyclePolicyInput struct { LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string" required:"true"` // The AWS account ID associated with the registry that contains the repository. - // If you do
 not specify a registry, the default registry is assumed. + // If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` // The name of the repository to receive the policy. @@ -5148,6 +6390,12 @@ type Repository struct { // The date and time, in JavaScript date format, when the repository was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + // The image scanning configuration for a repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + + // The tag mutability setting for the repository. + ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"` + // The AWS account ID associated with the registry that contains the repository. RegistryId *string `locationName:"registryId" type:"string"` @@ -5181,6 +6429,18 @@ func (s *Repository) SetCreatedAt(v time.Time) *Repository { return s } +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *Repository) SetImageScanningConfiguration(v *ImageScanningConfiguration) *Repository { + s.ImageScanningConfiguration = v + return s +} + +// SetImageTagMutability sets the ImageTagMutability field's value. +func (s *Repository) SetImageTagMutability(v string) *Repository { + s.ImageTagMutability = &v + return s +} + // SetRegistryId sets the RegistryId field's value. func (s *Repository) SetRegistryId(v string) *Repository { s.RegistryId = &v @@ -5213,7 +6473,9 @@ type SetRepositoryPolicyInput struct { // operation. This is intended to prevent accidental repository lock outs. Force *bool `locationName:"force" type:"boolean"` - // The JSON repository policy text to apply to the repository. + // The JSON repository policy text to apply to the repository. For more information, + // see Amazon ECR Repository Policy Examples (https://docs.aws.amazon.com/AmazonECR/latest/userguide/RepositoryPolicyExamples.html) + // in the Amazon Elastic Container Registry User Guide. // // PolicyText is a required field PolicyText *string `locationName:"policyText" type:"string" required:"true"` @@ -5322,6 +6584,127 @@ func (s *SetRepositoryPolicyOutput) SetRepositoryName(v string) *SetRepositoryPo return s } +type StartImageScanInput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + // + // ImageId is a required field + ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to start an image scan request. If you do not specify a registry, + // the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that contains the images to scan. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartImageScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageScanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartImageScanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartImageScanInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageId != nil { + if err := s.ImageId.Validate(); err != nil { + invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageId sets the ImageId field's value. +func (s *StartImageScanInput) SetImageId(v *ImageIdentifier) *StartImageScanInput { + s.ImageId = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartImageScanInput) SetRegistryId(v string) *StartImageScanInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartImageScanInput) SetRepositoryName(v string) *StartImageScanInput { + s.RepositoryName = &v + return s +} + +type StartImageScanOutput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s StartImageScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageScanOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *StartImageScanOutput) SetImageId(v *ImageIdentifier) *StartImageScanOutput { + s.ImageId = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *StartImageScanOutput) SetImageScanStatus(v *ImageScanStatus) *StartImageScanOutput { + s.ImageScanStatus = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartImageScanOutput) SetRegistryId(v string) *StartImageScanOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartImageScanOutput) SetRepositoryName(v string) *StartImageScanOutput { + s.RepositoryName = &v + return s +} + type StartLifecyclePolicyPreviewInput struct { _ struct{} `type:"structure"` @@ -5770,6 +7153,26 @@ func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput { return s } +const ( + // FindingSeverityInformational is a FindingSeverity enum value + FindingSeverityInformational = "INFORMATIONAL" + + // FindingSeverityLow is a FindingSeverity enum value + FindingSeverityLow = "LOW" + + // FindingSeverityMedium is a FindingSeverity enum value + FindingSeverityMedium = "MEDIUM" + + // FindingSeverityHigh is a FindingSeverity enum value + FindingSeverityHigh = "HIGH" + + // FindingSeverityCritical is a FindingSeverity enum value + FindingSeverityCritical = "CRITICAL" + + // FindingSeverityUndefined is a FindingSeverity enum value + FindingSeverityUndefined = "UNDEFINED" +) + const ( // ImageActionTypeExpire is a ImageActionType enum value ImageActionTypeExpire = "EXPIRE" @@ -5792,6 +7195,14 @@ const ( ImageFailureCodeMissingDigestAndTag = "MissingDigestAndTag" ) +const ( + // ImageTagMutabilityMutable is a ImageTagMutability enum value + ImageTagMutabilityMutable = "MUTABLE" + + // ImageTagMutabilityImmutable is a ImageTagMutability enum value + ImageTagMutabilityImmutable = "IMMUTABLE" +) + const ( // LayerAvailabilityAvailable is a LayerAvailability enum value LayerAvailabilityAvailable = "AVAILABLE" @@ -5822,6 +7233,17 @@ const ( LifecyclePolicyPreviewStatusFailed = "FAILED" ) +const ( + // ScanStatusInProgress is a ScanStatus enum value + ScanStatusInProgress = "IN_PROGRESS" + + // ScanStatusComplete is a ScanStatus enum value + ScanStatusComplete = "COMPLETE" + + // ScanStatusFailed is a ScanStatus enum value + ScanStatusFailed = "FAILED" +) + const ( // TagStatusTagged is a TagStatus enum value TagStatusTagged = "TAGGED" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go index 834905106ab..786759af0fd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -23,6 +23,13 @@ const ( // The image requested does not exist in the specified repository. ErrCodeImageNotFoundException = "ImageNotFoundException" + // ErrCodeImageTagAlreadyExistsException for service response error code + // "ImageTagAlreadyExistsException". + // + // The specified image is tagged with a tag that already exists. The repository + // is configured for tag immutability. + ErrCodeImageTagAlreadyExistsException = "ImageTagAlreadyExistsException" + // ErrCodeInvalidLayerException for service response error code // "InvalidLayerException". // @@ -102,7 +109,7 @@ const ( // // The operation did not succeed because it would have exceeded a service limit // for your account. For more information, see Amazon ECR Default Service Limits - // (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) + // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) // in the Amazon Elastic Container Registry User Guide. ErrCodeLimitExceededException = "LimitExceededException" @@ -133,6 +140,13 @@ const ( // repository policy. ErrCodeRepositoryPolicyNotFoundException = "RepositoryPolicyNotFoundException" + // ErrCodeScanNotFoundException for service response error code + // "ScanNotFoundException". + // + // The specified image scan could not be found. Ensure that image scanning is + // enabled on the repository and try again. + ErrCodeScanNotFoundException = "ScanNotFoundException" + // ErrCodeServerException for service response error code // "ServerException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go index 3eba7f696b6..b1ee8a2c723 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "ecr" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECR { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ECR { svc := &ECR{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-09-21", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go index d15267f9ae0..384737bcf2a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go @@ -66,7 +66,7 @@ func (c *ECS) CreateClusterRequest(input *CreateClusterInput) (req *request.Requ // AWS services can be managed on your behalf. However, if the IAM user that // makes the call does not have permissions to create the service-linked role, // it is not created. For more information, see Using Service-Linked Roles for -// Amazon ECS (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) +// Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) // in the Amazon Elastic Container Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -157,13 +157,13 @@ func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Requ // // Runs and maintains a desired number of tasks from a specified task definition. // If the number of tasks running in a service drops below the desiredCount, -// Amazon ECS spawns another copy of the task in the specified cluster. To update +// Amazon ECS runs another copy of the task in the specified cluster. To update // an existing service, see UpdateService. // // In addition to maintaining the desired count of tasks in your service, you -// can optionally run your service behind a load balancer. The load balancer -// distributes traffic across the tasks that are associated with the service. -// For more information, see Service Load Balancing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) +// can optionally run your service behind one or more load balancers. The load +// balancers distribute traffic across the tasks that are associated with the +// service. For more information, see Service Load Balancing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) // in the Amazon Elastic Container Service Developer Guide. // // Tasks for services that do not use a load balancer are considered healthy @@ -246,17 +246,14 @@ func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Requ // // * By default, the service scheduler attempts to balance tasks across Availability // Zones in this manner (although you can choose a different placement strategy) -// with the placementStrategy parameter): -// -// Sort the valid container instances, giving priority to instances that have -// the fewest number of running tasks for this service in their respective -// Availability Zone. For example, if zone A has one running service task -// and zones B and C each have zero, valid container instances in either -// zone B or C are considered optimal for placement. -// -// Place the new service task on a valid container instance in an optimal Availability -// Zone (based on the previous steps), favoring container instances with -// the fewest number of running tasks for this service. +// with the placementStrategy parameter): Sort the valid container instances, +// giving priority to instances that have the fewest number of running tasks +// for this service in their respective Availability Zone. For example, if +// zone A has one running service task and zones B and C each have zero, +// valid container instances in either zone B or C are considered optimal +// for placement. Place the new service task on a valid container instance +// in an optimal Availability Zone (based on the previous steps), favoring +// container instances with the fewest number of running tasks for this service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -363,7 +360,7 @@ func (c *ECS) CreateTaskSetRequest(input *CreateTaskSetInput) (req *request.Requ // // Create a task set in the specified cluster and service. This is used when // a service uses the EXTERNAL deployment controller type. For more information, -// see Amazon ECS Deployment Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) +// see Amazon ECS Deployment Types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) // in the Amazon Elastic Container Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -477,10 +474,8 @@ func (c *ECS) DeleteAccountSettingRequest(input *DeleteAccountSettingInput) (req // DeleteAccountSetting API operation for Amazon EC2 Container Service. // -// Modifies the ARN and resource ID format of a resource for a specified IAM -// user, IAM role, or the root user for an account. You can specify whether -// the new ARN and resource ID format are disabled for new resources that are -// created. +// Disables an account setting for a specified IAM user, IAM role, or the root +// user for an account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -773,12 +768,12 @@ func (c *ECS) DeleteServiceRequest(input *DeleteServiceInput) (req *request.Requ // When you delete a service, if there are still running tasks that require // cleanup, the service status moves from ACTIVE to DRAINING, and the service // is no longer visible in the console or in the ListServices API operation. -// After the tasks have stopped, then the service status moves from DRAINING -// to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed -// with the DescribeServices API operation. However, in the future, INACTIVE -// services may be cleaned up and purged from Amazon ECS record keeping, and -// DescribeServices calls on those services return a ServiceNotFoundException -// error. +// After all tasks have transitioned to either STOPPING or STOPPED status, the +// service status moves from DRAINING to INACTIVE. Services in the DRAINING +// or INACTIVE status can still be viewed with the DescribeServices API operation. +// However, in the future, INACTIVE services may be cleaned up and purged from +// Amazon ECS record keeping, and DescribeServices calls on those services return +// a ServiceNotFoundException error. // // If you attempt to create a new service with the same name as an existing // service in either ACTIVE or DRAINING status, you receive an error. @@ -879,7 +874,7 @@ func (c *ECS) DeleteTaskSetRequest(input *DeleteTaskSetInput) (req *request.Requ // // Deletes a specified task set within a service. This is used when a service // uses the EXTERNAL deployment controller type. For more information, see Amazon -// ECS Deployment Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) +// ECS Deployment Types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) // in the Amazon Elastic Container Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1567,7 +1562,7 @@ func (c *ECS) DescribeTaskSetsRequest(input *DescribeTaskSetsInput) (req *reques // // Describes the task sets in the specified cluster and service. This is used // when a service uses the EXTERNAL deployment controller type. For more information, -// see Amazon ECS Deployment Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) +// see Amazon ECS Deployment Types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) // in the Amazon Elastic Container Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1766,6 +1761,7 @@ func (c *ECS) DiscoverPollEndpointRequest(input *DiscoverPollEndpointInput) (req // DiscoverPollEndpoint API operation for Amazon EC2 Container Service. // +// // This action is only used by the Amazon ECS agent, and it is not intended // for use outside of the agent. // @@ -1853,7 +1849,7 @@ func (c *ECS) ListAccountSettingsRequest(input *ListAccountSettingsInput) (req * // ListAccountSettings API operation for Amazon EC2 Container Service. // -// Lists the account settings for an Amazon ECS resource for a specified principal. +// Lists the account settings for a specified principal. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2092,7 +2088,7 @@ func (c *ECS) ListClustersWithContext(ctx aws.Context, input *ListClustersInput, // // Example iterating over at most 3 pages of a ListClusters operation. // pageNum := 0 // err := client.ListClustersPages(params, -// func(page *ListClustersOutput, lastPage bool) bool { +// func(page *ecs.ListClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2124,10 +2120,12 @@ func (c *ECS) ListClustersPagesWithContext(ctx aws.Context, input *ListClustersI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListClustersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListClustersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2244,7 +2242,7 @@ func (c *ECS) ListContainerInstancesWithContext(ctx aws.Context, input *ListCont // // Example iterating over at most 3 pages of a ListContainerInstances operation. // pageNum := 0 // err := client.ListContainerInstancesPages(params, -// func(page *ListContainerInstancesOutput, lastPage bool) bool { +// func(page *ecs.ListContainerInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2276,10 +2274,12 @@ func (c *ECS) ListContainerInstancesPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListContainerInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListContainerInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2392,7 +2392,7 @@ func (c *ECS) ListServicesWithContext(ctx aws.Context, input *ListServicesInput, // // Example iterating over at most 3 pages of a ListServices operation. // pageNum := 0 // err := client.ListServicesPages(params, -// func(page *ListServicesOutput, lastPage bool) bool { +// func(page *ecs.ListServicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2424,10 +2424,12 @@ func (c *ECS) ListServicesPagesWithContext(ctx aws.Context, input *ListServicesI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListServicesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListServicesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2634,7 +2636,7 @@ func (c *ECS) ListTaskDefinitionFamiliesWithContext(ctx aws.Context, input *List // // Example iterating over at most 3 pages of a ListTaskDefinitionFamilies operation. // pageNum := 0 // err := client.ListTaskDefinitionFamiliesPages(params, -// func(page *ListTaskDefinitionFamiliesOutput, lastPage bool) bool { +// func(page *ecs.ListTaskDefinitionFamiliesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2666,10 +2668,12 @@ func (c *ECS) ListTaskDefinitionFamiliesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTaskDefinitionFamiliesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTaskDefinitionFamiliesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2780,7 +2784,7 @@ func (c *ECS) ListTaskDefinitionsWithContext(ctx aws.Context, input *ListTaskDef // // Example iterating over at most 3 pages of a ListTaskDefinitions operation. // pageNum := 0 // err := client.ListTaskDefinitionsPages(params, -// func(page *ListTaskDefinitionsOutput, lastPage bool) bool { +// func(page *ecs.ListTaskDefinitionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2812,10 +2816,12 @@ func (c *ECS) ListTaskDefinitionsPagesWithContext(ctx aws.Context, input *ListTa }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTaskDefinitionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTaskDefinitionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2937,7 +2943,7 @@ func (c *ECS) ListTasksWithContext(ctx aws.Context, input *ListTasksInput, opts // // Example iterating over at most 3 pages of a ListTasks operation. // pageNum := 0 // err := client.ListTasksPages(params, -// func(page *ListTasksOutput, lastPage bool) bool { +// func(page *ecs.ListTasksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2969,10 +2975,12 @@ func (c *ECS) ListTasksPagesWithContext(ctx aws.Context, input *ListTasksInput, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTasksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTasksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3020,15 +3028,35 @@ func (c *ECS) PutAccountSettingRequest(input *PutAccountSettingInput) (req *requ // PutAccountSetting API operation for Amazon EC2 Container Service. // -// Modifies the ARN and resource ID format of a resource type for a specified -// IAM user, IAM role, or the root user for an account. If the account setting -// for the root user is changed, it sets the default setting for all of the -// IAM users and roles for which no individual account setting has been set. -// The opt-in and opt-out account setting can be set for each Amazon ECS resource -// separately. The ARN and resource ID format of a resource will be defined -// by the opt-in status of the IAM user or role that created the resource. Enabling -// this setting is required to use new Amazon ECS features such as resource -// tagging. For more information, see Amazon Resource Names (ARNs) and IDs (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-resource-ids.html) +// Modifies an account setting. Account settings are set on a per-Region basis. +// +// If you change the account setting for the root user, the default settings +// for all of the IAM users and roles for which no individual account setting +// has been specified are reset. For more information, see Account Settings +// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat +// are specified, the Amazon Resource Name (ARN) and resource ID format of the +// resource type for a specified IAM user, IAM role, or the root user for an +// account is affected. The opt-in and opt-out account setting must be set for +// each Amazon ECS resource separately. The ARN and resource ID format of a +// resource will be defined by the opt-in status of the IAM user or role that +// created the resource. You must enable this setting to use Amazon ECS features +// such as resource tagging. +// +// When awsvpcTrunking is specified, the elastic network interface (ENI) limit +// for any new container instances that support the feature is changed. If awsvpcTrunking +// is enabled, any new container instances that support the feature are launched +// have the increased ENI limits available to them. For more information, see +// Elastic Network Interface Trunking (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-eni.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// When containerInsights is specified, the default setting indicating whether +// CloudWatch Container Insights is enabled for your clusters is changed. If +// containerInsights is enabled, any new clusters that are created will have +// Container Insights enabled unless you disable it during cluster creation. +// For more information, see CloudWatch Container Insights (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-container-insights.html) // in the Amazon Elastic Container Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3117,10 +3145,9 @@ func (c *ECS) PutAccountSettingDefaultRequest(input *PutAccountSettingDefaultInp // PutAccountSettingDefault API operation for Amazon EC2 Container Service. // -// Modifies the ARN and resource ID format of a resource type for all IAM users -// on an account for which no individual account setting has been set. Enabling -// this setting is required to use new Amazon ECS features such as resource -// tagging. +// Modifies an account setting for all IAM users on an account for whom no individual +// account setting has been specified. Account settings are set on a per-Region +// basis. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3306,6 +3333,7 @@ func (c *ECS) RegisterContainerInstanceRequest(input *RegisterContainerInstanceI // RegisterContainerInstance API operation for Amazon EC2 Container Service. // +// // This action is only used by the Amazon ECS agent, and it is not intended // for use outside of the agent. // @@ -3417,7 +3445,7 @@ func (c *ECS) RegisterTaskDefinitionRequest(input *RegisterTaskDefinitionInput) // in the Docker run reference. If you specify the awsvpc network mode, the // task is allocated an elastic network interface, and you must specify a NetworkConfiguration // when you create a service or run a task with the task definition. For more -// information, see Task Networking (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) +// information, see Task Networking (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) // in the Amazon Elastic Container Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3800,6 +3828,101 @@ func (c *ECS) StopTaskWithContext(ctx aws.Context, input *StopTaskInput, opts .. return out, req.Send() } +const opSubmitAttachmentStateChanges = "SubmitAttachmentStateChanges" + +// SubmitAttachmentStateChangesRequest generates a "aws/request.Request" representing the +// client's request for the SubmitAttachmentStateChanges operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SubmitAttachmentStateChanges for more information on using the SubmitAttachmentStateChanges +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SubmitAttachmentStateChangesRequest method. +// req, resp := client.SubmitAttachmentStateChangesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecs-2014-11-13/SubmitAttachmentStateChanges +func (c *ECS) SubmitAttachmentStateChangesRequest(input *SubmitAttachmentStateChangesInput) (req *request.Request, output *SubmitAttachmentStateChangesOutput) { + op := &request.Operation{ + Name: opSubmitAttachmentStateChanges, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitAttachmentStateChangesInput{} + } + + output = &SubmitAttachmentStateChangesOutput{} + req = c.newRequest(op, input, output) + return +} + +// SubmitAttachmentStateChanges API operation for Amazon EC2 Container Service. +// +// +// This action is only used by the Amazon ECS agent, and it is not intended +// for use outside of the agent. +// +// Sent to acknowledge that an attachment changed states. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Service's +// API operation SubmitAttachmentStateChanges for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server issue. +// +// * ErrCodeClientException "ClientException" +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have authorization to perform the requested action. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecs-2014-11-13/SubmitAttachmentStateChanges +func (c *ECS) SubmitAttachmentStateChanges(input *SubmitAttachmentStateChangesInput) (*SubmitAttachmentStateChangesOutput, error) { + req, out := c.SubmitAttachmentStateChangesRequest(input) + return out, req.Send() +} + +// SubmitAttachmentStateChangesWithContext is the same as SubmitAttachmentStateChanges with the addition of +// the ability to pass a context and additional request options. +// +// See SubmitAttachmentStateChanges for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) SubmitAttachmentStateChangesWithContext(ctx aws.Context, input *SubmitAttachmentStateChangesInput, opts ...request.Option) (*SubmitAttachmentStateChangesOutput, error) { + req, out := c.SubmitAttachmentStateChangesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSubmitContainerStateChange = "SubmitContainerStateChange" // SubmitContainerStateChangeRequest generates a "aws/request.Request" representing the @@ -3844,6 +3967,7 @@ func (c *ECS) SubmitContainerStateChangeRequest(input *SubmitContainerStateChang // SubmitContainerStateChange API operation for Amazon EC2 Container Service. // +// // This action is only used by the Amazon ECS agent, and it is not intended // for use outside of the agent. // @@ -3934,6 +4058,7 @@ func (c *ECS) SubmitTaskStateChangeRequest(input *SubmitTaskStateChangeInput) (r // SubmitTaskStateChange API operation for Amazon EC2 Container Service. // +// // This action is only used by the Amazon ECS agent, and it is not intended // for use outside of the agent. // @@ -3958,6 +4083,10 @@ func (c *ECS) SubmitTaskStateChangeRequest(input *SubmitTaskStateChangeInput) (r // * ErrCodeAccessDeniedException "AccessDeniedException" // You do not have authorization to perform the requested action. // +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecs-2014-11-13/SubmitTaskStateChange func (c *ECS) SubmitTaskStateChange(input *SubmitTaskStateChangeInput) (*SubmitTaskStateChangeOutput, error) { req, out := c.SubmitTaskStateChangeRequest(input) @@ -4175,6 +4304,98 @@ func (c *ECS) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInpu return out, req.Send() } +const opUpdateClusterSettings = "UpdateClusterSettings" + +// UpdateClusterSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateClusterSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateClusterSettings for more information on using the UpdateClusterSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateClusterSettingsRequest method. +// req, resp := client.UpdateClusterSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecs-2014-11-13/UpdateClusterSettings +func (c *ECS) UpdateClusterSettingsRequest(input *UpdateClusterSettingsInput) (req *request.Request, output *UpdateClusterSettingsOutput) { + op := &request.Operation{ + Name: opUpdateClusterSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateClusterSettingsInput{} + } + + output = &UpdateClusterSettingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateClusterSettings API operation for Amazon EC2 Container Service. +// +// Modifies the settings to use for a cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Service's +// API operation UpdateClusterSettings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server issue. +// +// * ErrCodeClientException "ClientException" +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * ErrCodeClusterNotFoundException "ClusterNotFoundException" +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are Region-specific. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecs-2014-11-13/UpdateClusterSettings +func (c *ECS) UpdateClusterSettings(input *UpdateClusterSettingsInput) (*UpdateClusterSettingsOutput, error) { + req, out := c.UpdateClusterSettingsRequest(input) + return out, req.Send() +} + +// UpdateClusterSettingsWithContext is the same as UpdateClusterSettings with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateClusterSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) UpdateClusterSettingsWithContext(ctx aws.Context, input *UpdateClusterSettingsInput, opts ...request.Option) (*UpdateClusterSettingsOutput, error) { + req, out := c.UpdateClusterSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateContainerAgent = "UpdateContainerAgent" // UpdateContainerAgentRequest generates a "aws/request.Request" representing the @@ -4341,9 +4562,13 @@ func (c *ECS) UpdateContainerInstancesStateRequest(input *UpdateContainerInstanc // // Modifies the status of an Amazon ECS container instance. // -// You can change the status of a container instance to DRAINING to manually -// remove an instance from a cluster, for example to perform system updates, -// update the Docker daemon, or scale down the cluster size. +// Once a container instance has reached an ACTIVE state, you can change the +// status of a container instance to DRAINING to manually remove an instance +// from a cluster, for example to perform system updates, update the Docker +// daemon, or scale down the cluster size. +// +// A container instance cannot be changed to DRAINING until it has reached an +// ACTIVE status. If the instance is in any other status, an error will be received. // // When you set a container instance to DRAINING, Amazon ECS prevents new tasks // from being scheduled for placement on the container instance and replacement @@ -4381,8 +4606,9 @@ func (c *ECS) UpdateContainerInstancesStateRequest(input *UpdateContainerInstanc // A container instance has completed draining when it has no more RUNNING tasks. // You can verify this using ListTasks. // -// When you set a container instance to ACTIVE, the Amazon ECS scheduler can -// begin scheduling tasks on the instance again. +// When a container instance has been drained, you can set a container instance +// to ACTIVE status and once it has reached that status the Amazon ECS scheduler +// can begin scheduling tasks on the instance again. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4544,16 +4770,13 @@ func (c *ECS) UpdateServiceRequest(input *UpdateServiceInput) (req *request.Requ // // * By default, the service scheduler attempts to balance tasks across Availability // Zones in this manner (although you can choose a different placement strategy): -// -// Sort the valid container instances by the fewest number of running tasks +// Sort the valid container instances by the fewest number of running tasks // for this service in the same Availability Zone as the instance. For example, // if zone A has one running service task and zones B and C each have zero, // valid container instances in either zone B or C are considered optimal -// for placement. -// -// Place the new service task on a valid container instance in an optimal Availability -// Zone (based on the previous steps), favoring container instances with -// the fewest number of running tasks for this service. +// for placement. Place the new service task on a valid container instance +// in an optimal Availability Zone (based on the previous steps), favoring +// container instances with the fewest number of running tasks for this service. // // When the service scheduler stops running tasks, it attempts to maintain balance // across the Availability Zones in your cluster using the following logic: @@ -4678,7 +4901,7 @@ func (c *ECS) UpdateServicePrimaryTaskSetRequest(input *UpdateServicePrimaryTask // Modifies which task set in a service is the primary task set. Any parameters // that are updated on the primary task set in a service will transition to // the service. This is used when a service uses the EXTERNAL deployment controller -// type. For more information, see Amazon ECS Deployment Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) +// type. For more information, see Amazon ECS Deployment Types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) // in the Amazon Elastic Container Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4794,7 +5017,7 @@ func (c *ECS) UpdateTaskSetRequest(input *UpdateTaskSetInput) (req *request.Requ // UpdateTaskSet API operation for Amazon EC2 Container Service. // // Modifies a task set. This is used when a service uses the EXTERNAL deployment -// controller type. For more information, see Amazon ECS Deployment Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) +// controller type. For more information, see Amazon ECS Deployment Types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) // in the Amazon Elastic Container Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5123,7 +5346,7 @@ type Cluster struct { // The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains // the arn:aws:ecs namespace, followed by the Region of the cluster, the AWS // account ID of the cluster owner, the cluster namespace, and then the cluster - // name. For example, arn:aws:ecs:region:012345678910:cluster/test.. + // name. For example, arn:aws:ecs:region:012345678910:cluster/test. ClusterArn *string `locationName:"clusterArn" type:"string"` // A user-generated string that you use to identify your cluster. @@ -5139,6 +5362,10 @@ type Cluster struct { // The number of tasks in the cluster that are in the RUNNING state. RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` + // The settings for the cluster. This parameter indicates whether CloudWatch + // Container Insights is enabled or disabled for a cluster. + Settings []*ClusterSetting `locationName:"settings" type:"list"` + // Additional information about your clusters that are separated by launch type, // including: // @@ -5166,8 +5393,30 @@ type Cluster struct { // The metadata that you apply to the cluster to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` } @@ -5217,6 +5466,12 @@ func (s *Cluster) SetRunningTasksCount(v int64) *Cluster { return s } +// SetSettings sets the Settings field's value. +func (s *Cluster) SetSettings(v []*ClusterSetting) *Cluster { + s.Settings = v + return s +} + // SetStatistics sets the Statistics field's value. func (s *Cluster) SetStatistics(v []*KeyValuePair) *Cluster { s.Statistics = v @@ -5235,6 +5490,44 @@ func (s *Cluster) SetTags(v []*Tag) *Cluster { return s } +// The settings to use when creating a cluster. This parameter is used to enable +// CloudWatch Container Insights for a cluster. +type ClusterSetting struct { + _ struct{} `type:"structure"` + + // The name of the cluster setting. The only supported value is containerInsights. + Name *string `locationName:"name" type:"string" enum:"ClusterSettingName"` + + // The value to set for the cluster setting. The supported values are enabled + // and disabled. If enabled is specified, CloudWatch Container Insights will + // be enabled for the cluster, otherwise it will be disabled unless the containerInsights + // account setting is enabled. If a cluster value is specified, it will override + // the containerInsights value set with PutAccountSetting or PutAccountSettingDefault. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ClusterSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSetting) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *ClusterSetting) SetName(v string) *ClusterSetting { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ClusterSetting) SetValue(v string) *ClusterSetting { + s.Value = &v + return s +} + // A Docker container that is part of a task. type Container struct { _ struct{} `type:"structure"` @@ -5258,6 +5551,15 @@ type Container struct { // as UNKNOWN. HealthStatus *string `locationName:"healthStatus" type:"string" enum:"HealthStatus"` + // The image used for the container. + Image *string `locationName:"image" type:"string"` + + // The container image manifest digest. + // + // The imageDigest is only returned if the container is using an image hosted + // in Amazon ECR, otherwise it is omitted. + ImageDigest *string `locationName:"imageDigest" type:"string"` + // The last known status of the container. LastStatus *string `locationName:"lastStatus" type:"string"` @@ -5280,6 +5582,9 @@ type Container struct { // details about a running or stopped container. Reason *string `locationName:"reason" type:"string"` + // The ID of the Docker container. + RuntimeId *string `locationName:"runtimeId" type:"string"` + // The ARN of the task. TaskArn *string `locationName:"taskArn" type:"string"` } @@ -5324,6 +5629,18 @@ func (s *Container) SetHealthStatus(v string) *Container { return s } +// SetImage sets the Image field's value. +func (s *Container) SetImage(v string) *Container { + s.Image = &v + return s +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *Container) SetImageDigest(v string) *Container { + s.ImageDigest = &v + return s +} + // SetLastStatus sets the LastStatus field's value. func (s *Container) SetLastStatus(v string) *Container { s.LastStatus = &v @@ -5366,6 +5683,12 @@ func (s *Container) SetReason(v string) *Container { return s } +// SetRuntimeId sets the RuntimeId field's value. +func (s *Container) SetRuntimeId(v string) *Container { + s.RuntimeId = &v + return s +} + // SetTaskArn sets the TaskArn field's value. func (s *Container) SetTaskArn(v string) *Container { s.TaskArn = &v @@ -5450,13 +5773,13 @@ type ContainerDefinition struct { // version 1.26.0 of the container agent to enable container dependencies. However, // we recommend using the latest container agent version. For information about // checking your agent version and updating to the latest version, see Updating - // the Amazon ECS Container Agent (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) + // the Amazon ECS Container Agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) // in the Amazon Elastic Container Service Developer Guide. If you are using // an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 // of the ecs-init package. If your container instances are launched from version // 20190301 or later, then they contain the required versions of the container // agent and ecs-init. For more information, see Amazon ECS-optimized Linux - // AMI (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) + // AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. // // This parameter is available for tasks using the Fargate launch type in the @@ -5515,6 +5838,7 @@ type ContainerDefinition struct { // This parameter is not supported for Windows containers. DockerSecurityOptions []*string `locationName:"dockerSecurityOptions" type:"list"` + // // Early versions of the Amazon ECS container agent do not properly handle entryPoint // parameters. If you have problems using entryPoint, update your container // agent or enter your commands and arguments as command array items instead. @@ -5560,6 +5884,12 @@ type ContainerDefinition struct { // the awsvpc network mode. ExtraHosts []*HostEntry `locationName:"extraHosts" type:"list"` + // The FireLens configuration for the container. This is used to specify and + // configure a log router for container logs. For more information, see Custom + // Log Routing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) + // in the Amazon Elastic Container Service Developer Guide. + FirelensConfiguration *FirelensConfiguration `locationName:"firelensConfiguration" type:"structure"` + // The health check command and associated configuration parameters for the // container. This parameter maps to HealthCheck in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) @@ -5578,7 +5908,7 @@ type ContainerDefinition struct { // The image used to start a container. This string is passed directly to the // Docker daemon. Images in the Docker Hub registry are available by default. // Other repositories are specified with either repository-url/image:tag or - // repository-url/image@digest. Up to 255 letters (uppercase and lowercase), + // repository-url/image@digest . Up to 255 letters (uppercase and lowercase), // numbers, hyphens, underscores, colons, periods, forward slashes, and number // signs are allowed. This parameter maps to Image in the Create a container // (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section @@ -5595,7 +5925,6 @@ type ContainerDefinition struct { // 012345678910.dkr.ecr..amazonaws.com/:latest // or 012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE. // - // // * Images in official repositories on Docker Hub use a single name (for // example, ubuntu or mongo). // @@ -5613,16 +5942,16 @@ type ContainerDefinition struct { // and the --interactive option to docker run (https://docs.docker.com/engine/reference/run/). Interactive *bool `locationName:"interactive" type:"boolean"` - // The link parameter allows containers to communicate with each other without - // the need for port mappings. Only supported if the network mode of a task - // definition is set to bridge. The name:internalName construct is analogous + // The links parameter allows containers to communicate with each other without + // the need for port mappings. This parameter is only supported if the network + // mode of a task definition is bridge. The name:internalName construct is analogous // to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), - // numbers, hyphens, and underscores are allowed. For more information about - // linking Docker containers, go to https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/ - // (https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/). - // This parameter maps to Links in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) + // numbers, and hyphens are allowed. For more information about linking Docker + // containers, go to Legacy container links (https://docs.docker.com/network/links/) + // in the Docker documentation. This parameter maps to Links in the Create a + // container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --link option to docker run (https://docs.docker.com/engine/reference/commandline/run/). + // and the --link option to docker run (https://docs.docker.com/engine/reference/run/). // // This parameter is not supported for Windows containers. // @@ -5633,19 +5962,13 @@ type ContainerDefinition struct { Links []*string `locationName:"links" type:"list"` // Linux-specific modifications that are applied to the container, such as Linux - // KernelCapabilities. + // kernel capabilities. For more information see KernelCapabilities. // // This parameter is not supported for Windows containers. LinuxParameters *LinuxParameters `locationName:"linuxParameters" type:"structure"` // The log configuration specification for the container. // - // For tasks using the Fargate launch type, the supported log drivers are awslogs - // and splunk. - // - // For tasks using the EC2 launch type, the supported log drivers are awslogs, - // syslog, gelf, fluentd, splunk, journald, and json-file. - // // This parameter maps to LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/). @@ -5675,22 +5998,22 @@ type ContainerDefinition struct { // in the Amazon Elastic Container Service Developer Guide. LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` - // The hard limit (in MiB) of memory to present to the container. If your container - // attempts to exceed the memory specified here, the container is killed. This - // parameter maps to Memory in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) + // The amount (in MiB) of memory to present to the container. If your container + // attempts to exceed the memory specified here, the container is killed. The + // total amount of memory reserved for all containers within a task must be + // lower than the task memory value, if one is specified. This parameter maps + // to Memory in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/). // - // If your containers are part of a task using the Fargate launch type, this - // field is optional and the only requirement is that the total amount of memory - // reserved for all containers within a task be lower than the task memory value. + // If using the Fargate launch type, this parameter is optional. // - // For containers that are part of a task using the EC2 launch type, you must - // specify a non-zero integer for one or both of memory or memoryReservation - // in container definitions. If you specify both, memory must be greater than - // memoryReservation. If you specify memoryReservation, then that value is subtracted - // from the available memory resources for the container instance on which the - // container is placed. Otherwise, the value of memory is used. + // If using the EC2 launch type, you must specify either a task-level memory + // value or a container-level memory value. If you specify both a container-level + // memory and memoryReservation value, memory must be greater than memoryReservation. + // If you specify memoryReservation, then that value is subtracted from the + // available memory resources for the container instance on which the container + // is placed. Otherwise, the value of memory is used. // // The Docker daemon reserves a minimum of 4 MiB of memory for a container, // so you should not specify fewer than 4 MiB of memory for your containers. @@ -5706,11 +6029,12 @@ type ContainerDefinition struct { // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) // and the --memory-reservation option to docker run (https://docs.docker.com/engine/reference/run/). // - // You must specify a non-zero integer for one or both of memory or memoryReservation - // in container definitions. If you specify both, memory must be greater than - // memoryReservation. If you specify memoryReservation, then that value is subtracted - // from the available memory resources for the container instance on which the - // container is placed. Otherwise, the value of memory is used. + // If a task-level memory value is not specified, you must specify a non-zero + // integer for one or both of memory or memoryReservation in a container definition. + // If you specify both, memory must be greater than memoryReservation. If you + // specify memoryReservation, then that value is subtracted from the available + // memory resources for the container instance on which the container is placed. + // Otherwise, the value of memory is used. // // For example, if your container normally uses 128 MiB of memory, but occasionally // bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation @@ -5737,8 +6061,8 @@ type ContainerDefinition struct { // The name of a container. If you are linking multiple containers together // in a task definition, the name of one container can be entered in the links // of another container to connect the containers. Up to 255 letters (uppercase - // and lowercase), numbers, hyphens, and underscores are allowed. This parameter - // maps to name in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) + // and lowercase), numbers, and hyphens are allowed. This parameter maps to + // name in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) // and the --name option to docker run (https://docs.docker.com/engine/reference/run/). Name *string `locationName:"name" type:"string"` @@ -5801,28 +6125,29 @@ type ContainerDefinition struct { ResourceRequirements []*ResourceRequirement `locationName:"resourceRequirements" type:"list"` // The secrets to pass to the container. For more information, see Specifying - // Sensitive Data (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) + // Sensitive Data (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) // in the Amazon Elastic Container Service Developer Guide. Secrets []*Secret `locationName:"secrets" type:"list"` - // Time duration to wait before giving up on resolving dependencies for a container. - // For example, you specify two containers in a task definition with containerA - // having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY - // status. If a startTimeout value is specified for containerB and it does not - // reach the desired status within that time then containerA will give up and - // not start. This results in the task transitioning to a STOPPED state. + // Time duration (in seconds) to wait before giving up on resolving dependencies + // for a container. For example, you specify two containers in a task definition + // with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, + // or HEALTHY status. If a startTimeout value is specified for containerB and + // it does not reach the desired status within that time then containerA will + // give up and not start. This results in the task transitioning to a STOPPED + // state. // // For tasks using the EC2 launch type, the container instances require at least // version 1.26.0 of the container agent to enable a container start timeout // value. However, we recommend using the latest container agent version. For // information about checking your agent version and updating to the latest - // version, see Updating the Amazon ECS Container Agent (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) + // version, see Updating the Amazon ECS Container Agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) // in the Amazon Elastic Container Service Developer Guide. If you are using // an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 // of the ecs-init package. If your container instances are launched from version // 20190301 or later, then they contain the required versions of the container // agent and ecs-init. For more information, see Amazon ECS-optimized Linux - // AMI (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) + // AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. // // This parameter is available for tasks using the Fargate launch type in the @@ -5830,11 +6155,11 @@ type ContainerDefinition struct { // 1.3.0 or later. StartTimeout *int64 `locationName:"startTimeout" type:"integer"` - // Time duration to wait before the container is forcefully killed if it doesn't - // exit normally on its own. For tasks using the Fargate launch type, the max - // stopTimeout value is 2 minutes. This parameter is available for tasks using - // the Fargate launch type in the Ohio (us-east-2) region only and the task - // or service requires platform version 1.3.0 or later. + // Time duration (in seconds) to wait before the container is forcefully killed + // if it doesn't exit normally on its own. For tasks using the Fargate launch + // type, the max stopTimeout value is 2 minutes. This parameter is available + // for tasks using the Fargate launch type in the Ohio (us-east-2) region only + // and the task or service requires platform version 1.3.0 or later. // // For tasks using the EC2 launch type, the stop timeout value for the container // takes precedence over the ECS_CONTAINER_STOP_TIMEOUT container agent configuration @@ -5842,13 +6167,13 @@ type ContainerDefinition struct { // the container agent to enable a container stop timeout value. However, we // recommend using the latest container agent version. For information about // checking your agent version and updating to the latest version, see Updating - // the Amazon ECS Container Agent (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) + // the Amazon ECS Container Agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) // in the Amazon Elastic Container Service Developer Guide. If you are using // an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 // of the ecs-init package. If your container instances are launched from version // 20190301 or later, then they contain the required versions of the container // agent and ecs-init. For more information, see Amazon ECS-optimized Linux - // AMI (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) + // AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. StopTimeout *int64 `locationName:"stopTimeout" type:"integer"` @@ -5947,6 +6272,11 @@ func (s *ContainerDefinition) Validate() error { } } } + if s.FirelensConfiguration != nil { + if err := s.FirelensConfiguration.Validate(); err != nil { + invalidParams.AddNested("FirelensConfiguration", err.(request.ErrInvalidParams)) + } + } if s.HealthCheck != nil { if err := s.HealthCheck.Validate(); err != nil { invalidParams.AddNested("HealthCheck", err.(request.ErrInvalidParams)) @@ -6076,6 +6406,12 @@ func (s *ContainerDefinition) SetExtraHosts(v []*HostEntry) *ContainerDefinition return s } +// SetFirelensConfiguration sets the FirelensConfiguration field's value. +func (s *ContainerDefinition) SetFirelensConfiguration(v *FirelensConfiguration) *ContainerDefinition { + s.FirelensConfiguration = v + return s +} + // SetHealthCheck sets the HealthCheck field's value. func (s *ContainerDefinition) SetHealthCheck(v *HealthCheck) *ContainerDefinition { s.HealthCheck = v @@ -6234,13 +6570,13 @@ func (s *ContainerDefinition) SetWorkingDirectory(v string) *ContainerDefinition // container agent to enable container dependencies. However, we recommend using // the latest container agent version. For information about checking your agent // version and updating to the latest version, see Updating the Amazon ECS Container -// Agent (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) +// Agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) // in the Amazon Elastic Container Service Developer Guide. If you are using // an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 // of the ecs-init package. If your container instances are launched from version // 20190301 or later, then they contain the required versions of the container // agent and ecs-init. For more information, see Amazon ECS-optimized Linux -// AMI (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) +// AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. // // If you are using tasks that use the Fargate launch type, container dependency @@ -6328,7 +6664,7 @@ type ContainerInstance struct { // this value is NULL. AgentUpdateStatus *string `locationName:"agentUpdateStatus" type:"string" enum:"AgentUpdateStatus"` - // The elastic network interfaces associated with the container instance. + // The resources attached to a container instance, such as elastic network interfaces. Attachments []*Attachment `locationName:"attachments" type:"list"` // The attributes set for the container instance, either by the Amazon ECS container @@ -6371,18 +6707,55 @@ type ContainerInstance struct { // The number of tasks on the container instance that are in the RUNNING status. RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` - // The status of the container instance. The valid values are ACTIVE, INACTIVE, - // or DRAINING. ACTIVE indicates that the container instance can accept tasks. - // DRAINING indicates that new tasks are not placed on the container instance + // The status of the container instance. The valid values are REGISTERING, REGISTRATION_FAILED, + // ACTIVE, INACTIVE, DEREGISTERING, or DRAINING. + // + // If your account has opted in to the awsvpcTrunking account setting, then + // any newly registered container instance will transition to a REGISTERING + // status while the trunk elastic network interface is provisioned for the instance. + // If the registration fails, the instance will transition to a REGISTRATION_FAILED + // status. You can describe the container instance and see the reason for failure + // in the statusReason parameter. Once the container instance is terminated, + // the instance transitions to a DEREGISTERING status while the trunk elastic + // network interface is deprovisioned. The instance then transitions to an INACTIVE + // status. + // + // The ACTIVE status indicates that the container instance can accept tasks. + // The DRAINING indicates that new tasks are not placed on the container instance // and any service tasks running on the container instance are removed if possible. // For more information, see Container Instance Draining (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-draining.html) // in the Amazon Elastic Container Service Developer Guide. Status *string `locationName:"status" type:"string"` + // The reason that the container instance reached its current status. + StatusReason *string `locationName:"statusReason" type:"string"` + // The metadata that you apply to the container instance to help you categorize // and organize them. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The version counter for the container instance. Every time a container instance @@ -6481,6 +6854,12 @@ func (s *ContainerInstance) SetStatus(v string) *ContainerInstance { return s } +// SetStatusReason sets the StatusReason field's value. +func (s *ContainerInstance) SetStatusReason(v string) *ContainerInstance { + s.StatusReason = &v + return s +} + // SetTags sets the Tags field's value. func (s *ContainerInstance) SetTags(v []*Tag) *ContainerInstance { s.Tags = v @@ -6499,7 +6878,10 @@ func (s *ContainerInstance) SetVersionInfo(v *VersionInfo) *ContainerInstance { return s } -// The overrides that should be sent to a container. +// The overrides that should be sent to a container. An empty container override +// can be passed in. An example of an empty container override would be {"containerOverrides": +// [ ] }. If a non-empty container override is specified, the name parameter +// must be included. type ContainerOverride struct { _ struct{} `type:"structure"` @@ -6622,12 +7004,18 @@ type ContainerStateChange struct { // exiting. ExitCode *int64 `locationName:"exitCode" type:"integer"` + // The container image SHA 256 digest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + // Any network bindings associated with the container. NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` // The reason for the state change. Reason *string `locationName:"reason" type:"string"` + // The ID of the Docker container. + RuntimeId *string `locationName:"runtimeId" type:"string"` + // The status of the container. Status *string `locationName:"status" type:"string"` } @@ -6654,6 +7042,12 @@ func (s *ContainerStateChange) SetExitCode(v int64) *ContainerStateChange { return s } +// SetImageDigest sets the ImageDigest field's value. +func (s *ContainerStateChange) SetImageDigest(v string) *ContainerStateChange { + s.ImageDigest = &v + return s +} + // SetNetworkBindings sets the NetworkBindings field's value. func (s *ContainerStateChange) SetNetworkBindings(v []*NetworkBinding) *ContainerStateChange { s.NetworkBindings = v @@ -6666,6 +7060,12 @@ func (s *ContainerStateChange) SetReason(v string) *ContainerStateChange { return s } +// SetRuntimeId sets the RuntimeId field's value. +func (s *ContainerStateChange) SetRuntimeId(v string) *ContainerStateChange { + s.RuntimeId = &v + return s +} + // SetStatus sets the Status field's value. func (s *ContainerStateChange) SetStatus(v string) *ContainerStateChange { s.Status = &v @@ -6677,13 +7077,41 @@ type CreateClusterInput struct { // The name of your cluster. If you do not specify a name for your cluster, // you create a cluster named default. Up to 255 letters (uppercase and lowercase), - // numbers, hyphens, and underscores are allowed. + // numbers, and hyphens are allowed. ClusterName *string `locationName:"clusterName" type:"string"` + // The setting to use when creating a cluster. This parameter is used to enable + // CloudWatch Container Insights for a cluster. If this value is specified, + // it will override the containerInsights value set with PutAccountSetting or + // PutAccountSettingDefault. + Settings []*ClusterSetting `locationName:"settings" type:"list"` + // The metadata that you apply to the cluster to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` } @@ -6723,6 +7151,12 @@ func (s *CreateClusterInput) SetClusterName(v string) *CreateClusterInput { return s } +// SetSettings sets the Settings field's value. +func (s *CreateClusterInput) SetSettings(v []*ClusterSetting) *CreateClusterInput { + s.Settings = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateClusterInput) SetTags(v []*Tag) *CreateClusterInput { s.Tags = v @@ -6773,11 +7207,14 @@ type CreateServiceInput struct { // The number of instantiations of the specified task definition to place and // keep running on your cluster. + // + // This is required if schedulingStrategy is REPLICA or is not specified. If + // schedulingStrategy is DAEMON then this is not required. DesiredCount *int64 `locationName:"desiredCount" type:"integer"` // Specifies whether to enable Amazon ECS managed tags for the tasks within // the service. For more information, see Tagging Your Amazon ECS Resources - // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) + // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) // in the Amazon Elastic Container Service Developer Guide. EnableECSManagedTags *bool `locationName:"enableECSManagedTags" type:"boolean"` @@ -6797,10 +7234,16 @@ type CreateServiceInput struct { // in the Amazon Elastic Container Service Developer Guide. LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` - // A load balancer object representing the load balancer to use with your service. + // A load balancer object representing the load balancers to use with your service. + // For more information, see Service Load Balancing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) + // in the Amazon Elastic Container Service Developer Guide. // - // If the service is using the ECS deployment controller, you are limited to - // one load balancer or target group. + // If the service is using the rolling update (ECS) deployment controller and + // using either an Application Load Balancer or Network Load Balancer, you can + // specify multiple target groups to attach to the service. The service-linked + // role is required for services that make use of multiple target groups. For + // more information, see Using Service-Linked Roles for Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) + // in the Amazon Elastic Container Service Developer Guide. // // If the service is using the CODE_DEPLOY deployment controller, the service // is required to use either an Application Load Balancer or Network Load Balancer. @@ -6818,12 +7261,6 @@ type CreateServiceInput struct { // in the service definition are immutable. If you are using the CODE_DEPLOY // deployment controller, these values can be changed when updating the service. // - // For Classic Load Balancers, this object must contain the load balancer name, - // the container name (as it appears in a container definition), and the container - // port to access from the load balancer. When a task from this service is placed - // on a container instance, the container instance is registered with the load - // balancer specified here. - // // For Application Load Balancers and Network Load Balancers, this object must // contain the load balancer target group ARN, the container name (as it appears // in a container definition), and the container port to access from the load @@ -6831,6 +7268,12 @@ type CreateServiceInput struct { // the container instance and port combination is registered as a target in // the target group specified here. // + // For Classic Load Balancers, this object must contain the load balancer name, + // the container name (as it appears in a container definition), and the container + // port to access from the load balancer. When a task from this service is placed + // on a container instance, the container instance is registered with the load + // balancer specified here. + // // Services with tasks that use the awsvpc network mode (for example, those // with the Fargate launch type) only support Application Load Balancers and // Network Load Balancers. Classic Load Balancers are not supported. Also, when @@ -6842,7 +7285,7 @@ type CreateServiceInput struct { // The network configuration for the service. This parameter is required for // task definitions that use the awsvpc network mode to receive their own elastic // network interface, and it is not supported for other network modes. For more - // information, see Task Networking (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // information, see Task Networking (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) // in the Amazon Elastic Container Service Developer Guide. NetworkConfiguration *NetworkConfiguration `locationName:"networkConfiguration" type:"structure"` @@ -6879,8 +7322,10 @@ type CreateServiceInput struct { // If your account has already created the Amazon ECS service-linked role, that // role is used by default for your service unless you specify a role here. // The service-linked role is required if your task definition uses the awsvpc - // network mode, in which case you should not specify a role here. For more - // information, see Using Service-Linked Roles for Amazon ECS (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) + // network mode or if the service is configured to use service discovery, an + // external deployment controller, or multiple target groups in which case you + // should not specify a role here. For more information, see Using Service-Linked + // Roles for Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) // in the Amazon Elastic Container Service Developer Guide. // // If your specified role has a path other than /, then you must either specify @@ -6892,7 +7337,7 @@ type CreateServiceInput struct { Role *string `locationName:"role" type:"string"` // The scheduling strategy to use for the service. For more information, see - // Services (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). + // Services (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). // // There are two service scheduler strategies available: // @@ -6907,33 +7352,53 @@ type CreateServiceInput struct { // active container instance that meets all of the task placement constraints // that you specify in your cluster. When you're using this strategy, you // don't need to specify a desired number of tasks, a task placement strategy, - // or use Service Auto Scaling policies. - // - // Tasks using the Fargate launch type or the CODE_DEPLOY or EXTERNAL deployment - // controller types don't support the DAEMON scheduling strategy. + // or use Service Auto Scaling policies. Tasks using the Fargate launch type + // or the CODE_DEPLOY or EXTERNAL deployment controller types don't support + // the DAEMON scheduling strategy. SchedulingStrategy *string `locationName:"schedulingStrategy" type:"string" enum:"SchedulingStrategy"` // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, - // hyphens, and underscores are allowed. Service names must be unique within - // a cluster, but you can have similarly named services in multiple clusters - // within a Region or across multiple Regions. + // and hyphens are allowed. Service names must be unique within a cluster, but + // you can have similarly named services in multiple clusters within a Region + // or across multiple Regions. // // ServiceName is a required field ServiceName *string `locationName:"serviceName" type:"string" required:"true"` // The details of the service discovery registries to assign to this service. - // For more information, see Service Discovery (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). + // For more information, see Service Discovery (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). // // Service discovery is supported for Fargate tasks if you are using platform // version v1.1.0 or later. For more information, see AWS Fargate Platform Versions - // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). + // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). ServiceRegistries []*ServiceRegistry `locationName:"serviceRegistries" type:"list"` // The metadata that you apply to the service to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. When a service is deleted, the tags are deleted as well. Tag keys - // can have a maximum character length of 128 characters, and tag values can - // have a maximum length of 256 characters. + // define. When a service is deleted, the tags are deleted as well. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The family and revision (family:revision) or full ARN of the task definition @@ -7186,7 +7651,7 @@ type CreateTaskSetInput struct { Service *string `locationName:"service" type:"string" required:"true"` // The details of the service discovery registries to assign to this task set. - // For more information, see Service Discovery (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). + // For more information, see Service Discovery (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). ServiceRegistries []*ServiceRegistry `locationName:"serviceRegistries" type:"list"` // The task definition for the tasks in the task set to use. @@ -7324,20 +7789,21 @@ func (s *CreateTaskSetOutput) SetTaskSet(v *TaskSet) *CreateTaskSetOutput { type DeleteAccountSettingInput struct { _ struct{} `type:"structure"` - // The resource name for which to disable the new format. If serviceLongArnFormat + // The resource name for which to disable the account setting. If serviceLongArnFormat // is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat // is specified, the ARN and resource ID for your Amazon ECS tasks is affected. // If containerInstanceLongArnFormat is specified, the ARN and resource ID for - // your Amazon ECS container instances is affected. + // your Amazon ECS container instances is affected. If awsvpcTrunking is specified, + // the ENI limit for your Amazon ECS container instances is affected. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true" enum:"SettingName"` // The ARN of the principal, which can be an IAM user, IAM role, or the root - // user. If you specify the root user, it modifies the ARN and resource ID format - // for all IAM users, IAM roles, and the root user of the account unless an - // IAM user or role explicitly overrides these settings for themselves. If this - // field is omitted, the setting are changed only for the authenticated user. + // user. If you specify the root user, it disables the account setting for all + // IAM users, IAM roles, and the root user of the account unless an IAM user + // or role explicitly overrides these settings. If this field is omitted, the + // setting is changed only for the authenticated user. PrincipalArn *string `locationName:"principalArn" type:"string"` } @@ -7771,12 +8237,18 @@ type Deployment struct { // The status of the deployment. The following describes each state: // - // PRIMARYThe most recent deployment of a service. + // PRIMARY + // + // The most recent deployment of a service. + // + // ACTIVE // - // ACTIVEA service deployment that still has running tasks, but are in the process + // A service deployment that still has running tasks, but are in the process // of being replaced with a new PRIMARY deployment. // - // INACTIVEA deployment that has been completely replaced. + // INACTIVE + // + // A deployment that has been completely replaced. Status *string `locationName:"status" type:"string"` // The most recent task definition that was specified for the tasks in the service @@ -7937,7 +8409,7 @@ func (s *DeploymentConfiguration) SetMinimumHealthyPercent(v int64) *DeploymentC } // The deployment controller to use for the service. For more information, see -// Amazon ECS Deployment Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) +// Amazon ECS Deployment Types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) // in the Amazon Elastic Container Service Developer Guide. type DeploymentController struct { _ struct{} `type:"structure"` @@ -7946,17 +8418,23 @@ type DeploymentController struct { // // There are three deployment controller types available: // - // ECSThe rolling update (ECS) deployment type involves replacing the current - // running version of the container with the latest version. The number of containers + // ECS + // + // The rolling update (ECS) deployment type involves replacing the current running + // version of the container with the latest version. The number of containers // Amazon ECS adds or removes from the service during a rolling update is controlled // by adjusting the minimum and maximum number of healthy tasks allowed during // a service deployment, as specified in the DeploymentConfiguration. // - // CODE_DEPLOYThe blue/green (CODE_DEPLOY) deployment type uses the blue/green - // deployment model powered by AWS CodeDeploy, which allows you to verify a - // new deployment of a service before sending production traffic to it. + // CODE_DEPLOY + // + // The blue/green (CODE_DEPLOY) deployment type uses the blue/green deployment + // model powered by AWS CodeDeploy, which allows you to verify a new deployment + // of a service before sending production traffic to it. // - // EXTERNALThe external (EXTERNAL) deployment type enables you to use any third-party + // EXTERNAL + // + // The external (EXTERNAL) deployment type enables you to use any third-party // deployment controller for full control over the deployment process for an // Amazon ECS service. // @@ -8238,7 +8716,9 @@ type DescribeContainerInstancesInput struct { // The short name or full Amazon Resource Name (ARN) of the cluster that hosts // the container instances to describe. If you do not specify a cluster, the - // default cluster is assumed. + // default cluster is assumed. This parameter is required if the container instance + // or container instances you are describing were launched in any cluster other + // than the default cluster. Cluster *string `locationName:"cluster" type:"string"` // A list of up to 100 container instance IDs or full Amazon Resource Name (ARN) @@ -8331,7 +8811,8 @@ type DescribeServicesInput struct { // The short name or full Amazon Resource Name (ARN)the cluster that hosts the // service to describe. If you do not specify a cluster, the default cluster - // is assumed. + // is assumed. This parameter is required if the service or services you are + // describing were launched in any cluster other than the default cluster. Cluster *string `locationName:"cluster" type:"string"` // Specifies whether you want to see the resource tags for the service. If TAGS @@ -8475,8 +8956,30 @@ type DescribeTaskDefinitionOutput struct { // The metadata that is applied to the task definition to help you categorize // and organize them. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The full task definition description. @@ -8604,8 +9107,9 @@ type DescribeTasksInput struct { _ struct{} `type:"structure"` // The short name or full Amazon Resource Name (ARN) of the cluster that hosts - // the task to describe. If you do not specify a cluster, the default cluster - // is assumed. + // the task or tasks to describe. If you do not specify a cluster, the default + // cluster is assumed. This parameter is required if the task or tasks you are + // describing were launched in any cluster other than the default cluster. Cluster *string `locationName:"cluster" type:"string"` // Specifies whether you want to see the resource tags for the task. If TAGS @@ -8934,6 +9438,60 @@ func (s *Failure) SetReason(v string) *Failure { return s } +// The FireLens configuration for the container. This is used to specify and +// configure a log router for container logs. For more information, see Custom +// Log Routing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) +// in the Amazon Elastic Container Service Developer Guide. +type FirelensConfiguration struct { + _ struct{} `type:"structure"` + + // The options to use when configuring the log router. This field is optional + // and can be used to add additional metadata, such as the task, task definition, + // cluster, and container instance details to the log event. If specified, the + // syntax to use is "options":{"enable-ecs-log-metadata":"true|false"}. + Options map[string]*string `locationName:"options" type:"map"` + + // The log router to use. The valid values are fluentd or fluentbit. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"FirelensConfigurationType"` +} + +// String returns the string representation +func (s FirelensConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FirelensConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FirelensConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FirelensConfiguration"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOptions sets the Options field's value. +func (s *FirelensConfiguration) SetOptions(v map[string]*string) *FirelensConfiguration { + s.Options = v + return s +} + +// SetType sets the Type field's value. +func (s *FirelensConfiguration) SetType(v string) *FirelensConfiguration { + s.Type = &v + return s +} + // An object representing a container health check. Health check parameters // that are specified in a container definition override any Docker health checks // that exist in the container image (such as those specified in a parent image @@ -9134,6 +9692,100 @@ func (s *HostVolumeProperties) SetSourcePath(v string) *HostVolumeProperties { return s } +// Details on a Elastic Inference accelerator. For more information, see Working +// with Amazon Elastic Inference on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-eia.html) +// in the Amazon Elastic Container Service Developer Guide. +type InferenceAccelerator struct { + _ struct{} `type:"structure"` + + // The Elastic Inference accelerator device name. The deviceName must also be + // referenced in a container definition as a ResourceRequirement. + // + // DeviceName is a required field + DeviceName *string `locationName:"deviceName" type:"string" required:"true"` + + // The Elastic Inference accelerator type to use. + // + // DeviceType is a required field + DeviceType *string `locationName:"deviceType" type:"string" required:"true"` +} + +// String returns the string representation +func (s InferenceAccelerator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceAccelerator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InferenceAccelerator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InferenceAccelerator"} + if s.DeviceName == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceName")) + } + if s.DeviceType == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeviceName sets the DeviceName field's value. +func (s *InferenceAccelerator) SetDeviceName(v string) *InferenceAccelerator { + s.DeviceName = &v + return s +} + +// SetDeviceType sets the DeviceType field's value. +func (s *InferenceAccelerator) SetDeviceType(v string) *InferenceAccelerator { + s.DeviceType = &v + return s +} + +// Details on an Elastic Inference accelerator task override. This parameter +// is used to override the Elastic Inference accelerator specified in the task +// definition. For more information, see Working with Amazon Elastic Inference +// on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-eia.html) +// in the Amazon Elastic Container Service Developer Guide. +type InferenceAcceleratorOverride struct { + _ struct{} `type:"structure"` + + // The Elastic Inference accelerator device name to override for the task. This + // parameter must match a deviceName specified in the task definition. + DeviceName *string `locationName:"deviceName" type:"string"` + + // The Elastic Inference accelerator type to use. + DeviceType *string `locationName:"deviceType" type:"string"` +} + +// String returns the string representation +func (s InferenceAcceleratorOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceAcceleratorOverride) GoString() string { + return s.String() +} + +// SetDeviceName sets the DeviceName field's value. +func (s *InferenceAcceleratorOverride) SetDeviceName(v string) *InferenceAcceleratorOverride { + s.DeviceName = &v + return s +} + +// SetDeviceType sets the DeviceType field's value. +func (s *InferenceAcceleratorOverride) SetDeviceType(v string) *InferenceAcceleratorOverride { + s.DeviceType = &v + return s +} + // The Linux capabilities for the container that are added to or dropped from // the default configuration provided by Docker. For more information on the // default capabilities and the non-default available capabilities, see Runtime @@ -9265,6 +9917,21 @@ type LinuxParameters struct { // command: sudo docker version --format '{{.Server.APIVersion}}' InitProcessEnabled *bool `locationName:"initProcessEnabled" type:"boolean"` + // The total amount of swap memory (in MiB) a container can use. This parameter + // will be translated to the --memory-swap option to docker run (https://docs.docker.com/engine/reference/run/) + // where the value would be the sum of the container memory plus the maxSwap + // value. + // + // If a maxSwap value of 0 is specified, the container will not use swap. Accepted + // values are 0 or any positive integer. If the maxSwap parameter is omitted, + // the container will use the swap configuration for the container instance + // it is running on. A maxSwap value must be set for the swappiness parameter + // to be used. + // + // If you are using tasks that use the Fargate launch type, the maxSwap parameter + // is not supported. + MaxSwap *int64 `locationName:"maxSwap" type:"integer"` + // The value for the size (in MiB) of the /dev/shm volume. This parameter maps // to the --shm-size option to docker run (https://docs.docker.com/engine/reference/run/). // @@ -9272,6 +9939,18 @@ type LinuxParameters struct { // parameter is not supported. SharedMemorySize *int64 `locationName:"sharedMemorySize" type:"integer"` + // This allows you to tune a container's memory swappiness behavior. A swappiness + // value of 0 will cause swapping to not happen unless absolutely necessary. + // A swappiness value of 100 will cause pages to be swapped very aggressively. + // Accepted values are whole numbers between 0 and 100. If the swappiness parameter + // is not specified, a default value of 60 is used. If a value is not specified + // for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness + // option to docker run (https://docs.docker.com/engine/reference/run/). + // + // If you are using tasks that use the Fargate launch type, the swappiness parameter + // is not supported. + Swappiness *int64 `locationName:"swappiness" type:"integer"` + // The container path, mount options, and size (in MiB) of the tmpfs mount. // This parameter maps to the --tmpfs option to docker run (https://docs.docker.com/engine/reference/run/). // @@ -9338,12 +10017,24 @@ func (s *LinuxParameters) SetInitProcessEnabled(v bool) *LinuxParameters { return s } +// SetMaxSwap sets the MaxSwap field's value. +func (s *LinuxParameters) SetMaxSwap(v int64) *LinuxParameters { + s.MaxSwap = &v + return s +} + // SetSharedMemorySize sets the SharedMemorySize field's value. func (s *LinuxParameters) SetSharedMemorySize(v int64) *LinuxParameters { s.SharedMemorySize = &v return s } +// SetSwappiness sets the Swappiness field's value. +func (s *LinuxParameters) SetSwappiness(v int64) *LinuxParameters { + s.Swappiness = &v + return s +} + // SetTmpfs sets the Tmpfs field's value. func (s *LinuxParameters) SetTmpfs(v []*Tmpfs) *LinuxParameters { s.Tmpfs = v @@ -9354,9 +10045,9 @@ type ListAccountSettingsInput struct { _ struct{} `type:"structure"` // Specifies whether to return the effective settings. If true, the account - // settings for the root user or the default setting for the principalArn. If - // false, the account settings for the principalArn are returned if they are - // set. Otherwise, no account settings are returned. + // settings for the root user or the default setting for the principalArn are + // returned. If false, the account settings for the principalArn are returned + // if they are set. Otherwise, no account settings are returned. EffectiveSettings *bool `locationName:"effectiveSettings" type:"boolean"` // The maximum number of account setting results returned by ListAccountSettings @@ -9719,8 +10410,8 @@ type ListContainerInstancesInput struct { // Filters the container instances by status. For example, if you specify the // DRAINING status, the results include only container instances that have been // set to DRAINING using UpdateContainerInstancesState. If you do not specify - // this parameter, the default is to include container instances set to ACTIVE - // and DRAINING. + // this parameter, the default is to include container instances set to all + // states other than INACTIVE. Status *string `locationName:"status" type:"string" enum:"ContainerInstanceStatus"` } @@ -10355,26 +11046,8 @@ func (s *ListTasksOutput) SetTaskArns(v []*string) *ListTasksOutput { return s } -// Details on a load balancer that is used with a service. -// -// If the service is using the ECS deployment controller, you are limited to -// one load balancer or target group. -// -// If the service is using the CODE_DEPLOY deployment controller, the service -// is required to use either an Application Load Balancer or Network Load Balancer. -// When you are creating an AWS CodeDeploy deployment group, you specify two -// target groups (referred to as a targetGroupPair). Each target group binds -// to a separate task set in the deployment. The load balancer can also have -// up to two listeners, a required listener for production traffic and an optional -// listener that allows you to test new revisions of the service before routing -// production traffic to it. -// -// Services with tasks that use the awsvpc network mode (for example, those -// with the Fargate launch type) only support Application Load Balancers and -// Network Load Balancers. Classic Load Balancers are not supported. Also, when -// you create any target groups for these services, you must choose ip as the -// target type, not instance. Tasks that use the awsvpc network mode are associated -// with an elastic network interface, not an Amazon EC2 instance. +// Details on the load balancer or load balancers to use with a service or task +// set. type LoadBalancer struct { _ struct{} `type:"structure"` @@ -10383,24 +11056,42 @@ type LoadBalancer struct { ContainerName *string `locationName:"containerName" type:"string"` // The port on the container to associate with the load balancer. This port - // must correspond to a containerPort in the service's task definition. Your - // container instances must allow ingress traffic on the hostPort of the port - // mapping. + // must correspond to a containerPort in the task definition the tasks in the + // service are using. For tasks that use the EC2 launch type, the container + // instance they are launched on must allow ingress traffic on the hostPort + // of the port mapping. ContainerPort *int64 `locationName:"containerPort" type:"integer"` - // The name of a load balancer. + // The name of the load balancer to associate with the Amazon ECS service or + // task set. + // + // A load balancer name is only specified when using a Classic Load Balancer. + // If you are using an Application Load Balancer or a Network Load Balancer + // this should be omitted. LoadBalancerName *string `locationName:"loadBalancerName" type:"string"` // The full Amazon Resource Name (ARN) of the Elastic Load Balancing target - // group or groups associated with a service. For services using the ECS deployment - // controller, you are limited to one target group. For services using the CODE_DEPLOY - // deployment controller, you are required to define two target groups for the - // load balancer. + // group or groups associated with a service or task set. + // + // A target group ARN is only specified when using an Application Load Balancer + // or Network Load Balancer. If you are using a Classic Load Balancer this should + // be omitted. + // + // For services using the ECS deployment controller, you can specify one or + // multiple target groups. For more information, see Registering Multiple Target + // Groups with a Service (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/register-multiple-targetgroups.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // For services using the CODE_DEPLOY deployment controller, you are required + // to define two target groups for the load balancer. For more information, + // see Blue/Green Deployment with CodeDeploy (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-bluegreen.html) + // in the Amazon Elastic Container Service Developer Guide. // // If your service's task definition uses the awsvpc network mode (which is // required for the Fargate launch type), you must choose ip as the target type, - // not instance, because tasks that use the awsvpc network mode are associated - // with an elastic network interface, not an Amazon EC2 instance. + // not instance, when creating your target groups because tasks that use the + // awsvpc network mode are associated with an elastic network interface, not + // an Amazon EC2 instance. TargetGroupArn *string `locationName:"targetGroupArn" type:"string"` } @@ -10446,16 +11137,20 @@ type LogConfiguration struct { // parameter are log drivers that the Amazon ECS container agent can communicate // with by default. // - // For tasks using the Fargate launch type, the supported log drivers are awslogs - // and splunk. + // For tasks using the Fargate launch type, the supported log drivers are awslogs, + // splunk, and awsfirelens. // // For tasks using the EC2 launch type, the supported log drivers are awslogs, - // syslog, gelf, fluentd, splunk, journald, and json-file. + // fluentd, gelf, json-file, journald, logentries, syslog, splunk, and awsfirelens. // // For more information about using the awslogs log driver, see Using the awslogs // Log Driver (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) // in the Amazon Elastic Container Service Developer Guide. // + // For more information about using the awsfirelens log driver, see Custom Log + // Routing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) + // in the Amazon Elastic Container Service Developer Guide. + // // If you have a custom driver that is not listed above that you would like // to work with the Amazon ECS container agent, you can fork the Amazon ECS // container agent project that is available on GitHub (https://github.com/aws/amazon-ecs-agent) @@ -10478,7 +11173,9 @@ type LogConfiguration struct { // --format '{{.Server.APIVersion}}' Options map[string]*string `locationName:"options" type:"map"` - // The secrets to pass to the log configuration. + // The secrets to pass to the log configuration. For more information, see Specifying + // Sensitive Data (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) + // in the Amazon Elastic Container Service Developer Guide. SecretOptions []*Secret `locationName:"secretOptions" type:"list"` } @@ -10719,6 +11416,9 @@ func (s *NetworkInterface) SetPrivateIpv4Address(v string) *NetworkInterface { // An object representing a constraint on task placement. For more information, // see Task Placement Constraints (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) // in the Amazon Elastic Container Service Developer Guide. +// +// If you are using the Fargate launch type, task placement constraints are +// not supported. type PlacementConstraint struct { _ struct{} `type:"structure"` @@ -10730,8 +11430,7 @@ type PlacementConstraint struct { // The type of constraint. Use distinctInstance to ensure that each task in // a particular group is running on a different container instance. Use memberOf - // to restrict the selection to a group of valid candidates. The value distinctInstance - // is not supported in task definitions. + // to restrict the selection to a group of valid candidates. Type *string `locationName:"type" type:"string" enum:"PlacementConstraintType"` } @@ -10885,6 +11584,9 @@ type PortMapping struct { // receives a host port in the ephemeral port range. For more information, see // hostPort. Port mappings that are automatically assigned in this way do not // count toward the 100 reserved ports limit of a container instance. + // + // You cannot expose the same container port for multiple protocols. An error + // will be returned if this is attempted. ContainerPort *int64 `locationName:"containerPort" type:"integer"` // The port number on the container instance to reserve for your container. @@ -10958,12 +11660,11 @@ func (s *PortMapping) SetProtocol(v string) *PortMapping { // ecs-init package to enable a proxy configuration. If your container instances // are launched from the Amazon ECS-optimized AMI version 20190301 or later, // then they contain the required versions of the container agent and ecs-init. -// For more information, see Amazon ECS-optimized Linux AMI (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) +// For more information, see Amazon ECS-optimized Linux AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. // -// This parameter is available for tasks using the Fargate launch type in the -// Ohio (us-east-2) region only and the task or service requires platform version -// 1.3.0 or later. +// For tasks using the Fargate launch type, the task or service requires platform +// version 1.3.0 or later. type ProxyConfiguration struct { _ struct{} `type:"structure"` @@ -10982,7 +11683,7 @@ type ProxyConfiguration struct { // // * IgnoredGID - (Required) The group ID (GID) of the proxy container as // defined by the user parameter in a container definition. This is used - // to ensure the proxy ignores its own traffic. If IgnoredGID is specified, + // to ensure the proxy ignores its own traffic. If IgnoredUID is specified, // this field can be empty. // // * AppPorts - (Required) The list of ports that the application uses. Network @@ -11051,11 +11752,14 @@ func (s *ProxyConfiguration) SetType(v string) *ProxyConfiguration { type PutAccountSettingDefaultInput struct { _ struct{} `type:"structure"` - // The resource type to enable the new format for. If serviceLongArnFormat is - // specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat - // is specified, the ARN and resource ID for your Amazon ECS tasks are affected. + // The resource name for which to modify the account setting. If serviceLongArnFormat + // is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat + // is specified, the ARN and resource ID for your Amazon ECS tasks is affected. // If containerInstanceLongArnFormat is specified, the ARN and resource ID for - // your Amazon ECS container instances are affected. + // your Amazon ECS container instances is affected. If awsvpcTrunking is specified, + // the ENI limit for your Amazon ECS container instances is affected. If containerInsights + // is specified, the default setting for CloudWatch Container Insights for your + // clusters is affected. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true" enum:"SettingName"` @@ -11131,20 +11835,24 @@ func (s *PutAccountSettingDefaultOutput) SetSetting(v *Setting) *PutAccountSetti type PutAccountSettingInput struct { _ struct{} `type:"structure"` - // The resource name for which to enable the new format. If serviceLongArnFormat - // is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat - // is specified, the ARN and resource ID for your Amazon ECS tasks is affected. - // If containerInstanceLongArnFormat is specified, the ARN and resource ID for - // your Amazon ECS container instances is affected. + // The Amazon ECS resource name for which to modify the account setting. If + // serviceLongArnFormat is specified, the ARN for your Amazon ECS services is + // affected. If taskLongArnFormat is specified, the ARN and resource ID for + // your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, + // the ARN and resource ID for your Amazon ECS container instances is affected. + // If awsvpcTrunking is specified, the elastic network interface (ENI) limit + // for your Amazon ECS container instances is affected. If containerInsights + // is specified, the default setting for CloudWatch Container Insights for your + // clusters is affected. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true" enum:"SettingName"` // The ARN of the principal, which can be an IAM user, IAM role, or the root - // user. If you specify the root user, it modifies the ARN and resource ID format - // for all IAM users, IAM roles, and the root user of the account unless an - // IAM user or role explicitly overrides these settings for themselves. If this - // field is omitted, the settings are changed only for the authenticated user. + // user. If you specify the root user, it modifies the account setting for all + // IAM users, IAM roles, and the root user of the account unless an IAM user + // or role explicitly overrides these settings. If this field is omitted, the + // setting is changed only for the authenticated user. PrincipalArn *string `locationName:"principalArn" type:"string"` // The account setting value for the specified principal ARN. Accepted values @@ -11334,8 +12042,30 @@ type RegisterContainerInstanceInput struct { // The metadata that you apply to the container instance to help you categorize // and organize them. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The resources available on the instance. @@ -11520,11 +12250,14 @@ type RegisterTaskDefinitionInput struct { // You must specify a family for a task definition, which allows you to track // multiple versions of the same task definition. The family is used as a name // for your task definition. Up to 255 letters (uppercase and lowercase), numbers, - // hyphens, and underscores are allowed. + // and hyphens are allowed. // // Family is a required field Family *string `locationName:"family" type:"string" required:"true"` + // The Elastic Inference accelerators to use for the containers in the task. + InferenceAccelerators []*InferenceAccelerator `locationName:"inferenceAccelerators" type:"list"` + // The IPC resource namespace to use for the containers in the task. The valid // values are host, task, or none. If host is specified, then all containers // within the tasks that specified the host IPC mode on the same container instance @@ -11543,7 +12276,7 @@ type RegisterTaskDefinitionInput struct { // // If you are setting namespaced kernel parameters using systemControls for // the containers in the task, the following will apply to your IPC resource - // namespace. For more information, see System Controls (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) + // namespace. For more information, see System Controls (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) // in the Amazon Elastic Container Service Developer Guide. // // * For tasks that use the host IPC mode, IPC namespace related systemControls @@ -11604,7 +12337,7 @@ type RegisterTaskDefinitionInput struct { // If the network mode is awsvpc, the task is allocated an elastic network interface, // and you must specify a NetworkConfiguration value when you create a service // or run a task with the task definition. For more information, see Task Networking - // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) // in the Amazon Elastic Container Service Developer Guide. // // Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with @@ -11626,7 +12359,7 @@ type RegisterTaskDefinitionInput struct { // The process namespace to use for the containers in the task. The valid values // are host or task. If host is specified, then all containers within the tasks // that specified the host PID mode on the same container instance share the - // same IPC resources with the host Amazon EC2 instance. If task is specified, + // same process namespace with the host Amazon EC2 instance. If task is specified, // all containers within the specified task share the same process namespace. // If no value is specified, the default is a private namespace. For more information, // see PID settings (https://docs.docker.com/engine/reference/run/#pid-settings---pid) @@ -11652,12 +12385,11 @@ type RegisterTaskDefinitionInput struct { // ecs-init package to enable a proxy configuration. If your container instances // are launched from the Amazon ECS-optimized AMI version 20190301 or later, // then they contain the required versions of the container agent and ecs-init. - // For more information, see Amazon ECS-optimized Linux AMI (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) + // For more information, see Amazon ECS-optimized Linux AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. // - // This parameter is available for tasks using the Fargate launch type in the - // Ohio (us-east-2) region only and the task or service requires platform version - // 1.3.0 or later. + // For tasks using the Fargate launch type, the task or service requires platform + // version 1.3.0 or later. ProxyConfiguration *ProxyConfiguration `locationName:"proxyConfiguration" type:"structure"` // The launch type required by the task. If no value is specified, it defaults @@ -11666,8 +12398,30 @@ type RegisterTaskDefinitionInput struct { // The metadata that you apply to the task definition to help you categorize // and organize them. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The short name or full Amazon Resource Name (ARN) of the IAM role that containers @@ -11711,6 +12465,16 @@ func (s *RegisterTaskDefinitionInput) Validate() error { } } } + if s.InferenceAccelerators != nil { + for i, v := range s.InferenceAccelerators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InferenceAccelerators", i), err.(request.ErrInvalidParams)) + } + } + } if s.ProxyConfiguration != nil { if err := s.ProxyConfiguration.Validate(); err != nil { invalidParams.AddNested("ProxyConfiguration", err.(request.ErrInvalidParams)) @@ -11757,6 +12521,12 @@ func (s *RegisterTaskDefinitionInput) SetFamily(v string) *RegisterTaskDefinitio return s } +// SetInferenceAccelerators sets the InferenceAccelerators field's value. +func (s *RegisterTaskDefinitionInput) SetInferenceAccelerators(v []*InferenceAccelerator) *RegisterTaskDefinitionInput { + s.InferenceAccelerators = v + return s +} + // SetIpcMode sets the IpcMode field's value. func (s *RegisterTaskDefinitionInput) SetIpcMode(v string) *RegisterTaskDefinitionInput { s.IpcMode = &v @@ -11967,23 +12737,29 @@ func (s *Resource) SetType(v string) *Resource { return s } -// The type and amount of a resource to assign to a container. The only supported -// resource is a GPU. For more information, see Working with GPUs on Amazon -// ECS (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-gpu.html) +// The type and amount of a resource to assign to a container. The supported +// resource types are GPUs and Elastic Inference accelerators. For more information, +// see Working with GPUs on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-gpu.html) +// or Working with Amazon Elastic Inference on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-eia.html) // in the Amazon Elastic Container Service Developer Guide type ResourceRequirement struct { _ struct{} `type:"structure"` - // The type of resource to assign to a container. The only supported value is - // GPU. + // The type of resource to assign to a container. The supported values are GPU + // or InferenceAccelerator. // // Type is a required field Type *string `locationName:"type" type:"string" required:"true" enum:"ResourceType"` - // The number of physical GPUs the Amazon ECS container agent will reserve for - // the container. The number of GPUs reserved for all containers in a task should - // not exceed the number of available GPUs on the container instance the task - // is launched on. + // The value for the specified resource type. + // + // If the GPU type is used, the value is the number of physical GPUs the Amazon + // ECS container agent will reserve for the container. The number of GPUs reserved + // for all containers in a task should not exceed the number of available GPUs + // on the container instance the task is launched on. + // + // If the InferenceAccelerator type is used, the value should match the deviceName + // for an InferenceAccelerator specified in a task definition. // // Value is a required field Value *string `locationName:"value" type:"string" required:"true"` @@ -12040,7 +12816,7 @@ type RunTaskInput struct { Count *int64 `locationName:"count" type:"integer"` // Specifies whether to enable Amazon ECS managed tags for the task. For more - // information, see Tagging Your Amazon ECS Resources (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) + // information, see Tagging Your Amazon ECS Resources (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) // in the Amazon Elastic Container Service Developer Guide. EnableECSManagedTags *bool `locationName:"enableECSManagedTags" type:"boolean"` @@ -12056,7 +12832,7 @@ type RunTaskInput struct { // The network configuration for the task. This parameter is required for task // definitions that use the awsvpc network mode to receive their own elastic // network interface, and it is not supported for other network modes. For more - // information, see Task Networking (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // information, see Task Networking (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) // in the Amazon Elastic Container Service Developer Guide. NetworkConfiguration *NetworkConfiguration `locationName:"networkConfiguration" type:"structure"` @@ -12110,8 +12886,30 @@ type RunTaskInput struct { // The metadata that you apply to the task to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The family and revision (family:revision) or full ARN of the task definition @@ -12325,7 +13123,7 @@ func (s *Scale) SetValue(v float64) *Scale { // * To reference sensitive information in the log configuration of a container, // use the secretOptions container definition parameter. // -// For more information, see Specifying Sensitive Data (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) +// For more information, see Specifying Sensitive Data (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) // in the Amazon Elastic Container Service Developer Guide. type Secret struct { _ struct{} `type:"structure"` @@ -12403,7 +13201,9 @@ type Service struct { // deployment and the ordering of stopping and starting tasks. DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` - // The deployment controller type the service is using. + // The deployment controller type the service is using. When using the DescribeServices + // API, this field is omitted if the service is using the ECS deployment controller + // type. DeploymentController *DeploymentController `locationName:"deploymentController" type:"structure"` // The current state of deployments for the service. @@ -12415,7 +13215,7 @@ type Service struct { DesiredCount *int64 `locationName:"desiredCount" type:"integer"` // Specifies whether to enable Amazon ECS managed tags for the tasks in the - // service. For more information, see Tagging Your Amazon ECS Resources (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) + // service. For more information, see Tagging Your Amazon ECS Resources (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) // in the Amazon Elastic Container Service Developer Guide. EnableECSManagedTags *bool `locationName:"enableECSManagedTags" type:"boolean"` @@ -12428,21 +13228,15 @@ type Service struct { // started. HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` - // The launch type on which your service is running. For more information, see - // Amazon ECS Launch Types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // The launch type on which your service is running. If no value is specified, + // it will default to EC2. Valid values include EC2 and FARGATE. For more information, + // see Amazon ECS Launch Types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) // in the Amazon Elastic Container Service Developer Guide. LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` // A list of Elastic Load Balancing load balancer objects, containing the load // balancer name, the container name (as it appears in a container definition), // and the container port to access from the load balancer. - // - // Services with tasks that use the awsvpc network mode (for example, those - // with the Fargate launch type) only support Application Load Balancers and - // Network Load Balancers. Classic Load Balancers are not supported. Also, when - // you create any target groups for these services, you must choose ip as the - // target type, not instance. Tasks that use the awsvpc network mode are associated - // with an elastic network interface, not an Amazon EC2 instance. LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` // The VPC subnet and security group configuration for tasks that receive their @@ -12458,10 +13252,10 @@ type Service struct { // The placement strategy that determines how tasks for the service are placed. PlacementStrategy []*PlacementStrategy `locationName:"placementStrategy" type:"list"` - // The platform version on which your tasks in the service are running. A platform - // version is only specified for tasks using the Fargate launch type. If one - // is not specified, the LATEST platform version is used by default. For more - // information, see AWS Fargate Platform Versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) + // The platform version on which to run your service. A platform version is + // only specified for tasks using the Fargate launch type. If one is not specified, + // the LATEST platform version is used by default. For more information, see + // AWS Fargate Platform Versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) // in the Amazon Elastic Container Service Developer Guide. PlatformVersion *string `locationName:"platformVersion" type:"string"` @@ -12478,7 +13272,7 @@ type Service struct { RunningCount *int64 `locationName:"runningCount" type:"integer"` // The scheduling strategy to use for the service. For more information, see - // Services (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). + // Services (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). // // There are two service scheduler strategies available: // @@ -12490,8 +13284,7 @@ type Service struct { // * DAEMON-The daemon scheduling strategy deploys exactly one task on each // container instance in your cluster. When you are using this strategy, // do not specify a desired number of tasks or any task placement strategies. - // - // Fargate tasks do not support the DAEMON scheduling strategy. + // Fargate tasks do not support the DAEMON scheduling strategy. SchedulingStrategy *string `locationName:"schedulingStrategy" type:"string" enum:"SchedulingStrategy"` // The ARN that identifies the service. The ARN contains the arn:aws:ecs namespace, @@ -12500,13 +13293,13 @@ type Service struct { ServiceArn *string `locationName:"serviceArn" type:"string"` // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, - // hyphens, and underscores are allowed. Service names must be unique within - // a cluster, but you can have similarly named services in multiple clusters - // within a Region or across multiple Regions. + // and hyphens are allowed. Service names must be unique within a cluster, but + // you can have similarly named services in multiple clusters within a Region + // or across multiple Regions. ServiceName *string `locationName:"serviceName" type:"string"` // The details of the service discovery registries to assign to this service. - // For more information, see Service Discovery (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). + // For more information, see Service Discovery (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). ServiceRegistries []*ServiceRegistry `locationName:"serviceRegistries" type:"list"` // The status of the service. The valid values are ACTIVE, DRAINING, or INACTIVE. @@ -12514,8 +13307,30 @@ type Service struct { // The metadata that you apply to the service to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The task definition to use for tasks in the service. This value is specified @@ -12821,17 +13636,14 @@ func (s *ServiceRegistry) SetRegistryArn(v string) *ServiceRegistry { type Setting struct { _ struct{} `type:"structure"` - // The account resource name. + // The Amazon ECS resource name. Name *string `locationName:"name" type:"string" enum:"SettingName"` // The ARN of the principal, which can be an IAM user, IAM role, or the root // user. If this field is omitted, the authenticated user is assumed. PrincipalArn *string `locationName:"principalArn" type:"string"` - // The current account setting for the resource name. If enabled, the resource - // receives the new Amazon Resource Name (ARN) and resource identifier (ID) - // format. If disabled, the resource receives the old Amazon Resource Name (ARN) - // and resource identifier (ID) format. + // Whether the account setting is enabled or disabled for the specified resource. Value *string `locationName:"value" type:"string"` } @@ -12879,7 +13691,7 @@ type StartTaskInput struct { ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` // Specifies whether to enable Amazon ECS managed tags for the task. For more - // information, see Tagging Your Amazon ECS Resources (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) + // information, see Tagging Your Amazon ECS Resources (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) // in the Amazon Elastic Container Service Developer Guide. EnableECSManagedTags *bool `locationName:"enableECSManagedTags" type:"boolean"` @@ -12920,8 +13732,30 @@ type StartTaskInput struct { // The metadata that you apply to the task to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The family and revision (family:revision) or full ARN of the task definition @@ -13154,6 +13988,87 @@ func (s *StopTaskOutput) SetTask(v *Task) *StopTaskOutput { return s } +type SubmitAttachmentStateChangesInput struct { + _ struct{} `type:"structure"` + + // Any attachments associated with the state change request. + // + // Attachments is a required field + Attachments []*AttachmentStateChange `locationName:"attachments" type:"list" required:"true"` + + // The short name or full ARN of the cluster that hosts the container instance + // the attachment belongs to. + Cluster *string `locationName:"cluster" type:"string"` +} + +// String returns the string representation +func (s SubmitAttachmentStateChangesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitAttachmentStateChangesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SubmitAttachmentStateChangesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SubmitAttachmentStateChangesInput"} + if s.Attachments == nil { + invalidParams.Add(request.NewErrParamRequired("Attachments")) + } + if s.Attachments != nil { + for i, v := range s.Attachments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attachments", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttachments sets the Attachments field's value. +func (s *SubmitAttachmentStateChangesInput) SetAttachments(v []*AttachmentStateChange) *SubmitAttachmentStateChangesInput { + s.Attachments = v + return s +} + +// SetCluster sets the Cluster field's value. +func (s *SubmitAttachmentStateChangesInput) SetCluster(v string) *SubmitAttachmentStateChangesInput { + s.Cluster = &v + return s +} + +type SubmitAttachmentStateChangesOutput struct { + _ struct{} `type:"structure"` + + // Acknowledgement of the state change. + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitAttachmentStateChangesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitAttachmentStateChangesOutput) GoString() string { + return s.String() +} + +// SetAcknowledgment sets the Acknowledgment field's value. +func (s *SubmitAttachmentStateChangesOutput) SetAcknowledgment(v string) *SubmitAttachmentStateChangesOutput { + s.Acknowledgment = &v + return s +} + type SubmitContainerStateChangeInput struct { _ struct{} `type:"structure"` @@ -13172,6 +14087,9 @@ type SubmitContainerStateChangeInput struct { // The reason for the state change request. Reason *string `locationName:"reason" type:"string"` + // The ID of the Docker container. + RuntimeId *string `locationName:"runtimeId" type:"string"` + // The status of the state change request. Status *string `locationName:"status" type:"string"` @@ -13220,6 +14138,12 @@ func (s *SubmitContainerStateChangeInput) SetReason(v string) *SubmitContainerSt return s } +// SetRuntimeId sets the RuntimeId field's value. +func (s *SubmitContainerStateChangeInput) SetRuntimeId(v string) *SubmitContainerStateChangeInput { + s.RuntimeId = &v + return s +} + // SetStatus sets the Status field's value. func (s *SubmitContainerStateChangeInput) SetStatus(v string) *SubmitContainerStateChangeInput { s.Status = &v @@ -13445,8 +14369,30 @@ func (s *SystemControl) SetValue(v string) *SystemControl { // The metadata that you apply to a resource to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you -// define. Tag keys can have a maximum character length of 128 characters, and -// tag values can have a maximum length of 256 characters. +// define. +// +// The following basic restrictions apply to tags: +// +// * Maximum number of tags per resource - 50 +// +// * For each resource, each tag key must be unique, and each tag key can +// have only one value. +// +// * Maximum key length - 128 Unicode characters in UTF-8 +// +// * Maximum value length - 256 Unicode characters in UTF-8 +// +// * If your tagging schema is used across multiple services and resources, +// remember that other services may have restrictions on allowed characters. +// Generally allowed characters are: letters, numbers, and spaces representable +// in UTF-8, and the following characters: + - = . _ : / @. +// +// * Tag keys and values are case-sensitive. +// +// * Do not use aws:, AWS:, or any upper or lowercase combination of such +// as a prefix for either keys or values as it is reserved for AWS use. You +// cannot edit or delete tag keys or values with this prefix. Tags with this +// prefix do not count against your tags per resource limit. type Tag struct { _ struct{} `type:"structure"` @@ -13504,9 +14450,30 @@ type TagResourceInput struct { // ResourceArn is a required field ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` - // The tags to add to the resource. A tag is an array of key-value pairs. Tag - // keys can have a maximum character length of 128 characters, and tag values - // can have a maximum length of 256 characters. + // The tags to add to the resource. A tag is an array of key-value pairs. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. // // Tags is a required field Tags []*Tag `locationName:"tags" type:"list" required:"true"` @@ -13631,7 +14598,7 @@ type Task struct { CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` // The desired status of the task. For more information, see Task Lifecycle - // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_life_cycle.html). + // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-lifecycle.html). DesiredStatus *string `locationName:"desiredStatus" type:"string"` // The Unix timestamp for when the task execution stopped. @@ -13653,8 +14620,11 @@ type Task struct { // override any Docker health checks that exist in the container image. HealthStatus *string `locationName:"healthStatus" type:"string" enum:"HealthStatus"` + // The Elastic Inference accelerator associated with the task. + InferenceAccelerators []*InferenceAccelerator `locationName:"inferenceAccelerators" type:"list"` + // The last known status of the task. For more information, see Task Lifecycle - // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_life_cycle.html). + // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-lifecycle.html). LastStatus *string `locationName:"lastStatus" type:"string"` // The launch type on which your task is running. For more information, see @@ -13732,8 +14702,30 @@ type Task struct { // The metadata that you apply to the task to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The Amazon Resource Name (ARN) of the task. @@ -13745,7 +14737,7 @@ type Task struct { // The version counter for the task. Every time a task experiences a change // that triggers a CloudWatch event, the version counter is incremented. If // you are replicating your Amazon ECS task state with CloudWatch Events, you - // can compare the version of a task reported by the Amazon ECS API actionss + // can compare the version of a task reported by the Amazon ECS API actions // with the version reported in CloudWatch Events for the task (inside the detail // object) to verify that the version in your event stream is current. Version *int64 `locationName:"version" type:"long"` @@ -13833,6 +14825,12 @@ func (s *Task) SetHealthStatus(v string) *Task { return s } +// SetInferenceAccelerators sets the InferenceAccelerators field's value. +func (s *Task) SetInferenceAccelerators(v []*InferenceAccelerator) *Task { + s.InferenceAccelerators = v + return s +} + // SetLastStatus sets the LastStatus field's value. func (s *Task) SetLastStatus(v string) *Task { s.LastStatus = &v @@ -13935,7 +14933,11 @@ func (s *Task) SetVersion(v int64) *Task { return s } -// Details of a task definition. +// The details of a task definition which describes the container and volume +// definitions of an Amazon Elastic Container Service task. You can specify +// which Docker images to use, the required resources, and other configurations +// related to launching the task definition through an Amazon ECS service or +// task. type TaskDefinition struct { _ struct{} `type:"structure"` @@ -13972,11 +14974,15 @@ type TaskDefinition struct { // (30 GB) in increments of 1024 (1 GB) Cpu *string `locationName:"cpu" type:"string"` - // The Amazon Resource Name (ARN) of the task execution role that the Amazon - // ECS container agent and the Docker daemon can assume. + // The Amazon Resource Name (ARN) of the task execution role that containers + // in this task can assume. All containers in this task are granted the permissions + // that are specified in this role. ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` - // The family of your task definition, used as the definition name. + // The name of a family that this task definition is registered to. A family + // groups multiple versions of a task definition. Amazon ECS gives the first + // task definition that you registered to a family a revision number of 1. Amazon + // ECS gives sequential revision numbers to each task definition that you add. Family *string `locationName:"family" type:"string"` // The IPC resource namespace to use for the containers in the task. The valid @@ -13997,7 +15003,7 @@ type TaskDefinition struct { // // If you are setting namespaced kernel parameters using systemControls for // the containers in the task, the following will apply to your IPC resource - // namespace. For more information, see System Controls (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) + // namespace. For more information, see System Controls (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) // in the Amazon Elastic Container Service Developer Guide. // // * For tasks that use the host IPC mode, IPC namespace related systemControls @@ -14010,10 +15016,15 @@ type TaskDefinition struct { // Fargate launch type. IpcMode *string `locationName:"ipcMode" type:"string" enum:"IpcMode"` - // The amount (in MiB) of memory used by the task. If using the EC2 launch type, - // this field is optional and any value can be used. If using the Fargate launch - // type, this field is required and you must use one of the following values, - // which determines your range of valid values for the cpu parameter: + // The amount (in MiB) of memory used by the task. + // + // If using the EC2 launch type, this field is optional and any value can be + // used. If a task-level memory value is specified then the container-level + // memory value is optional. + // + // If using the Fargate launch type, this field is required and you must use + // one of the following values, which determines your range of valid values + // for the cpu parameter: // // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 // vCPU) @@ -14049,7 +15060,7 @@ type TaskDefinition struct { // If the network mode is awsvpc, the task is allocated an elastic network interface, // and you must specify a NetworkConfiguration value when you create a service // or run a task with the task definition. For more information, see Task Networking - // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) // in the Amazon Elastic Container Service Developer Guide. // // Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with @@ -14071,7 +15082,7 @@ type TaskDefinition struct { // The process namespace to use for the containers in the task. The valid values // are host or task. If host is specified, then all containers within the tasks // that specified the host PID mode on the same container instance share the - // same IPC resources with the host Amazon EC2 instance. If task is specified, + // same process namespace with the host Amazon EC2 instance. If task is specified, // all containers within the specified task share the same process namespace. // If no value is specified, the default is a private namespace. For more information, // see PID settings (https://docs.docker.com/engine/reference/run/#pid-settings---pid) @@ -14096,7 +15107,7 @@ type TaskDefinition struct { // enable a proxy configuration. If your container instances are launched from // the Amazon ECS-optimized AMI version 20190301 or later, then they contain // the required versions of the container agent and ecs-init. For more information, - // see Amazon ECS-optimized Linux AMI (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) + // see Amazon ECS-optimized Linux AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. ProxyConfiguration *ProxyConfiguration `locationName:"proxyConfiguration" type:"structure"` @@ -14104,7 +15115,8 @@ type TaskDefinition struct { // valid if you are using the Fargate launch type for your task. RequiresAttributes []*Attribute `locationName:"requiresAttributes" type:"list"` - // The launch type that the task is using. + // The launch type the task requires. If no value is specified, it will default + // to EC2. Valid values include EC2 and FARGATE. RequiresCompatibilities []*string `locationName:"requiresCompatibilities" type:"list"` // The revision of the task in a particular family. The revision is a version @@ -14121,8 +15133,11 @@ type TaskDefinition struct { // The full Amazon Resource Name (ARN) of the task definition. TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` - // The ARN of the IAM role that containers in this task can assume. All containers - // in this task are granted the permissions that are specified in this role. + // The short name or full Amazon Resource Name (ARN) of the AWS Identity and + // Access Management (IAM) role that grants containers in the task permission + // to call AWS APIs on your behalf. For more information, see Amazon ECS Task + // Role (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_IAM_role.html) + // in the Amazon Elastic Container Service Developer Guide. // // IAM roles for tasks on Windows require that the -EnableTaskIAMRole option // is set when you launch the Amazon ECS-optimized Windows AMI. Your containers @@ -14131,10 +15146,10 @@ type TaskDefinition struct { // in the Amazon Elastic Container Service Developer Guide. TaskRoleArn *string `locationName:"taskRoleArn" type:"string"` - // The list of volumes in a task. + // The list of volume definitions for the task. // - // If you are using the Fargate launch type, the host and sourcePath parameters - // are not supported. + // If your tasks are using the Fargate launch type, the host and sourcePath + // parameters are not supported. // // For more information about volume definition parameters and defaults, see // Amazon ECS Task Definitions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) @@ -14261,12 +15276,11 @@ func (s *TaskDefinition) SetVolumes(v []*Volume) *TaskDefinition { } // An object representing a constraint on task placement in the task definition. +// For more information, see Task Placement Constraints (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) +// in the Amazon Elastic Container Service Developer Guide. // // If you are using the Fargate launch type, task placement constraints are // not supported. -// -// For more information, see Task Placement Constraints (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) -// in the Amazon Elastic Container Service Developer Guide. type TaskDefinitionPlacementConstraint struct { _ struct{} `type:"structure"` @@ -14275,9 +15289,8 @@ type TaskDefinitionPlacementConstraint struct { // in the Amazon Elastic Container Service Developer Guide. Expression *string `locationName:"expression" type:"string"` - // The type of constraint. The DistinctInstance constraint ensures that each - // task in a particular group is running on a different container instance. - // The MemberOf constraint restricts selection to be from a group of valid candidates. + // The type of constraint. The MemberOf constraint restricts selection to be + // from a group of valid candidates. Type *string `locationName:"type" type:"string" enum:"TaskDefinitionPlacementConstraintType"` } @@ -14314,6 +15327,9 @@ type TaskOverride struct { // ECS container agent and the Docker daemon can assume. ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` + // The Elastic Inference accelerator override for the task. + InferenceAcceleratorOverrides []*InferenceAcceleratorOverride `locationName:"inferenceAcceleratorOverrides" type:"list"` + // The Amazon Resource Name (ARN) of the IAM role that containers in this task // can assume. All containers in this task are granted the permissions that // are specified in this role. @@ -14362,6 +15378,12 @@ func (s *TaskOverride) SetExecutionRoleArn(v string) *TaskOverride { return s } +// SetInferenceAcceleratorOverrides sets the InferenceAcceleratorOverrides field's value. +func (s *TaskOverride) SetInferenceAcceleratorOverrides(v []*InferenceAcceleratorOverride) *TaskOverride { + s.InferenceAcceleratorOverrides = v + return s +} + // SetTaskRoleArn sets the TaskRoleArn field's value. func (s *TaskOverride) SetTaskRoleArn(v string) *TaskOverride { s.TaskRoleArn = &v @@ -14437,7 +15459,7 @@ type TaskSet struct { ServiceArn *string `locationName:"serviceArn" type:"string"` // The details of the service discovery registries to assign to this task set. - // For more information, see Service Discovery (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). + // For more information, see Service Discovery (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). ServiceRegistries []*ServiceRegistry `locationName:"serviceRegistries" type:"list"` // The stability status, which indicates whether the task set has reached a @@ -14467,12 +15489,18 @@ type TaskSet struct { // The status of the task set. The following describes each state: // - // PRIMARYThe task set is serving production traffic. + // PRIMARY + // + // The task set is serving production traffic. + // + // ACTIVE + // + // The task set is not serving production traffic. // - // ACTIVEThe task set is not serving production traffic. + // DRAINING // - // DRAININGThe tasks in the task set are being stopped and their corresponding - // targets are being deregistered from their target group. + // The tasks in the task set are being stopped and their corresponding targets + // are being deregistered from their target group. Status *string `locationName:"status" type:"string"` // The task definition the task set is using. @@ -14825,6 +15853,87 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +type UpdateClusterSettingsInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster to modify the settings for. + // + // Cluster is a required field + Cluster *string `locationName:"cluster" type:"string" required:"true"` + + // The setting to use by default for a cluster. This parameter is used to enable + // CloudWatch Container Insights for a cluster. If this value is specified, + // it will override the containerInsights value set with PutAccountSetting or + // PutAccountSettingDefault. + // + // Settings is a required field + Settings []*ClusterSetting `locationName:"settings" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateClusterSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateClusterSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateClusterSettingsInput"} + if s.Cluster == nil { + invalidParams.Add(request.NewErrParamRequired("Cluster")) + } + if s.Settings == nil { + invalidParams.Add(request.NewErrParamRequired("Settings")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *UpdateClusterSettingsInput) SetCluster(v string) *UpdateClusterSettingsInput { + s.Cluster = &v + return s +} + +// SetSettings sets the Settings field's value. +func (s *UpdateClusterSettingsInput) SetSettings(v []*ClusterSetting) *UpdateClusterSettingsInput { + s.Settings = v + return s +} + +type UpdateClusterSettingsOutput struct { + _ struct{} `type:"structure"` + + // A regional grouping of one or more container instances on which you can run + // task requests. Each account receives a default cluster the first time you + // use the Amazon ECS service, but you may also create other clusters. Clusters + // may contain more than one instance type simultaneously. + Cluster *Cluster `locationName:"cluster" type:"structure"` +} + +// String returns the string representation +func (s UpdateClusterSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterSettingsOutput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *UpdateClusterSettingsOutput) SetCluster(v *Cluster) *UpdateClusterSettingsOutput { + s.Cluster = v + return s +} + type UpdateContainerAgentInput struct { _ struct{} `type:"structure"` @@ -14912,6 +16021,11 @@ type UpdateContainerInstancesStateInput struct { ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` // The container instance state with which to update the container instance. + // The only valid values for this action are ACTIVE and DRAINING. A container + // instance can only be updated to DRAINING status once it has reached an ACTIVE + // state. If a container instance is in REGISTERING, DEREGISTERING, or REGISTRATION_FAILED + // state you can describe the container instance but will be unable to update + // the container instance state. // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"ContainerInstanceStatus"` @@ -15021,16 +16135,16 @@ type UpdateServiceInput struct { // has first started. This is only valid if your service is configured to use // a load balancer. If your service's tasks take a while to start and respond // to Elastic Load Balancing health checks, you can specify a health check grace - // period of up to 1,800 seconds. During that time, the ECS service scheduler - // ignores the Elastic Load Balancing health check status. This grace period - // can prevent the ECS service scheduler from marking tasks as unhealthy and - // stopping them before they have time to come up. + // period of up to 2,147,483,647 seconds. During that time, the ECS service + // scheduler ignores the Elastic Load Balancing health check status. This grace + // period can prevent the ECS service scheduler from marking tasks as unhealthy + // and stopping them before they have time to come up. HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` // The network configuration for the service. This parameter is required for // task definitions that use the awsvpc network mode to receive their own elastic // network interface, and it is not supported for other network modes. For more - // information, see Task Networking (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // information, see Task Networking (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) // in the Amazon Elastic Container Service Developer Guide. // // Updating a service to add a subnet to a list of existing subnets does not @@ -15374,7 +16488,7 @@ type VersionInfo struct { _ struct{} `type:"structure"` // The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent - // (https://github.com/aws/amazon-ecs-agent/commits/master) GitHub repository. + // (https://github.com/aws/amazon-ecs-agent/commits/master) GitHub repository. AgentHash *string `locationName:"agentHash" type:"string"` // The version number of the Amazon ECS container agent. @@ -15415,7 +16529,7 @@ func (s *VersionInfo) SetDockerVersion(v string) *VersionInfo { // A data volume used in a task definition. For tasks that use a Docker volume, // specify a DockerVolumeConfiguration. For tasks that use a bind mount host // volume, specify a host and optional sourcePath. For more information, see -// Using Data Volumes in Tasks (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html). +// Using Data Volumes in Tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html). type Volume struct { _ struct{} `type:"structure"` @@ -15440,8 +16554,8 @@ type Volume struct { Host *HostVolumeProperties `locationName:"host" type:"structure"` // The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, - // hyphens, and underscores are allowed. This name is referenced in the sourceVolume - // parameter of container definition mountPoints. + // and hyphens are allowed. This name is referenced in the sourceVolume parameter + // of container definition mountPoints. Name *string `locationName:"name" type:"string"` } @@ -15545,6 +16659,11 @@ const ( ClusterFieldTags = "TAGS" ) +const ( + // ClusterSettingNameContainerInsights is a ClusterSettingName enum value + ClusterSettingNameContainerInsights = "containerInsights" +) + const ( // CompatibilityEc2 is a Compatibility enum value CompatibilityEc2 = "EC2" @@ -15586,6 +16705,15 @@ const ( // ContainerInstanceStatusDraining is a ContainerInstanceStatus enum value ContainerInstanceStatusDraining = "DRAINING" + + // ContainerInstanceStatusRegistering is a ContainerInstanceStatus enum value + ContainerInstanceStatusRegistering = "REGISTERING" + + // ContainerInstanceStatusDeregistering is a ContainerInstanceStatus enum value + ContainerInstanceStatusDeregistering = "DEREGISTERING" + + // ContainerInstanceStatusRegistrationFailed is a ContainerInstanceStatus enum value + ContainerInstanceStatusRegistrationFailed = "REGISTRATION_FAILED" ) const ( @@ -15621,6 +16749,14 @@ const ( DeviceCgroupPermissionMknod = "mknod" ) +const ( + // FirelensConfigurationTypeFluentd is a FirelensConfigurationType enum value + FirelensConfigurationTypeFluentd = "fluentd" + + // FirelensConfigurationTypeFluentbit is a FirelensConfigurationType enum value + FirelensConfigurationTypeFluentbit = "fluentbit" +) + const ( // HealthStatusHealthy is a HealthStatus enum value HealthStatusHealthy = "HEALTHY" @@ -15672,6 +16808,9 @@ const ( // LogDriverSplunk is a LogDriver enum value LogDriverSplunk = "splunk" + + // LogDriverAwsfirelens is a LogDriver enum value + LogDriverAwsfirelens = "awsfirelens" ) const ( @@ -15736,6 +16875,9 @@ const ( const ( // ResourceTypeGpu is a ResourceType enum value ResourceTypeGpu = "GPU" + + // ResourceTypeInferenceAccelerator is a ResourceType enum value + ResourceTypeInferenceAccelerator = "InferenceAccelerator" ) const ( @@ -15773,6 +16915,12 @@ const ( // SettingNameContainerInstanceLongArnFormat is a SettingName enum value SettingNameContainerInstanceLongArnFormat = "containerInstanceLongArnFormat" + + // SettingNameAwsvpcTrunking is a SettingName enum value + SettingNameAwsvpcTrunking = "awsvpcTrunking" + + // SettingNameContainerInsights is a SettingName enum value + SettingNameContainerInsights = "containerInsights" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/doc.go index 1d59f5b640e..5582142122a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/doc.go @@ -10,7 +10,7 @@ // tasks using the Fargate launch type. For more control, you can host your // tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances // that you manage by using the EC2 launch type. For more information about -// launch types, see Amazon ECS Launch Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html). +// launch types, see Amazon ECS Launch Types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html). // // Amazon ECS lets you launch and stop container-based applications with simple // API calls, allows you to get the state of your cluster from a centralized diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go index c268614ecb9..a2eef295918 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go @@ -46,11 +46,11 @@ const ( // svc := ecs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ECS { svc := &ECS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-13", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/api.go index 73af29445db..47c2ce00664 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/api.go @@ -251,28 +251,22 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ // // * Creates a new mount target in the specified subnet. // -// * Also creates a new network interface in the subnet as follows: -// -// If the request provides an IpAddress, Amazon EFS assigns that IP address -// to the network interface. Otherwise, Amazon EFS assigns a free address -// in the subnet (in the same way that the Amazon EC2 CreateNetworkInterface -// call does when a request does not specify a primary private IP address). -// -// If the request provides SecurityGroups, this network interface is associated +// * Also creates a new network interface in the subnet as follows: If the +// request provides an IpAddress, Amazon EFS assigns that IP address to the +// network interface. Otherwise, Amazon EFS assigns a free address in the +// subnet (in the same way that the Amazon EC2 CreateNetworkInterface call +// does when a request does not specify a primary private IP address). If +// the request provides SecurityGroups, this network interface is associated // with those security groups. Otherwise, it belongs to the default security -// group for the subnet's VPC. -// -// Assigns the description Mount target fsmt-id for file system fs-id where -// fsmt-id is the mount target ID, and fs-id is the FileSystemId. -// -// Sets the requesterManaged property of the network interface to true, and -// the requesterId value to EFS. -// -// Each Amazon EFS mount target has one corresponding requester-managed EC2 -// network interface. After the network interface is created, Amazon EFS -// sets the NetworkInterfaceId field in the mount target's description to -// the network interface ID, and the IpAddress field to its address. If network -// interface creation fails, the entire CreateMountTarget operation fails. +// group for the subnet's VPC. Assigns the description Mount target fsmt-id +// for file system fs-id where fsmt-id is the mount target ID, and fs-id +// is the FileSystemId. Sets the requesterManaged property of the network +// interface to true, and the requesterId value to EFS. Each Amazon EFS mount +// target has one corresponding requester-managed EC2 network interface. +// After the network interface is created, Amazon EFS sets the NetworkInterfaceId +// field in the mount target's description to the network interface ID, and +// the IpAddress field to its address. If network interface creation fails, +// the entire CreateMountTarget operation fails. // // The CreateMountTarget call returns only after creating the network interface, // but while the mount target state is still creating, you can check the mount @@ -343,7 +337,7 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ // for the specific AWS Region. The client should try to delete some elastic // network interfaces or get the account limit raised. For more information, // see Amazon VPC Limits (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html) -// in the Amazon VPC User Guide (see the Network interfaces per VPC entry in +// in the Amazon VPC User Guide (see the Network interfaces per VPC entry in // the table). // // * ErrCodeSecurityGroupLimitExceeded "SecurityGroupLimitExceeded" @@ -1453,17 +1447,14 @@ func (c *EFS) PutLifecycleConfigurationRequest(input *PutLifecycleConfigurationI // LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration // and disables lifecycle management. // -// You can enable lifecycle management only for EFS file systems created after -// the release of EFS infrequent access. -// // In the request, specify the following: // -// * The ID for the file system for which you are creating a lifecycle management -// configuration. +// * The ID for the file system for which you are enabling, disabling, or +// modifying lifecycle management. // // * A LifecyclePolicies array of LifecyclePolicy objects that define when // files are moved to the IA storage class. The array can contain only one -// "TransitionToIA": "AFTER_30_DAYS"LifecyclePolicy item. +// LifecyclePolicy item. // // This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration // operation. @@ -1664,11 +1655,12 @@ type CreateFileSystemInput struct { PerformanceMode *string `type:"string" enum:"PerformanceMode"` // The throughput, measured in MiB/s, that you want to provision for a file - // system that you're creating. The limit on throughput is 1024 MiB/s. You can - // get these limits increased by contacting AWS Support. For more information, + // system that you're creating. Valid values are 1-1024. Required if ThroughputMode + // is set to provisioned. The upper limit for throughput is 1024 MiB/s. You + // can get this limit increased by contacting AWS Support. For more information, // see Amazon EFS Limits That You Can Increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) // in the Amazon EFS User Guide. - ProvisionedThroughputInMibps *float64 `type:"double"` + ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` // A value that specifies to create one or more tags associated with the file // system. Each tag is a user-defined key-value pair. Name your file system @@ -1676,10 +1668,13 @@ type CreateFileSystemInput struct { Tags []*Tag `type:"list"` // The throughput mode for the file system to be created. There are two throughput - // modes to choose from for your file system: bursting and provisioned. You - // can decrease your file system's throughput in Provisioned Throughput mode - // or change between the throughput modes as long as it’s been more than 24 - // hours since the last decrease or throughput mode change. + // modes to choose from for your file system: bursting and provisioned. If you + // set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughPutInMibps. + // You can decrease your file system's throughput in Provisioned Throughput + // mode or change between the throughput modes as long as it’s been more than + // 24 hours since the last decrease or throughput mode change. For more, see + // Specifying Throughput with Provisioned Mode (https://docs.aws.amazon.com/efs/latest/ug/performance.html#provisioned-throughput) + // in the Amazon EFS User Guide. ThroughputMode *string `type:"string" enum:"ThroughputMode"` } @@ -1705,6 +1700,9 @@ func (s *CreateFileSystemInput) Validate() error { if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) } + if s.ProvisionedThroughputInMibps != nil && *s.ProvisionedThroughputInMibps < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedThroughputInMibps", 1)) + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -2111,7 +2109,9 @@ type DescribeFileSystemsInput struct { Marker *string `location:"querystring" locationName:"Marker" type:"string"` // (Optional) Specifies the maximum number of file systems to return in the - // response (integer). Currently, this number is automatically set to 10. + // response (integer). Currently, this number is automatically set to 10, and + // other values are ignored. The response is paginated at 10 per page if you + // have more than 10 file systems. MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` } @@ -2352,7 +2352,8 @@ type DescribeMountTargetsInput struct { Marker *string `location:"querystring" locationName:"Marker" type:"string"` // (Optional) Maximum number of mount targets to return in the response. Currently, - // this number is automatically set to 10. + // this number is automatically set to 10, and other values are ignored. The + // response is paginated at 10 per page if you have more than 10 mount targets. MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` // (Optional) ID of the mount target that you want to have described (String). @@ -2466,7 +2467,8 @@ type DescribeTagsInput struct { Marker *string `location:"querystring" locationName:"Marker" type:"string"` // (Optional) The maximum number of file system tags to return in the response. - // Currently, this number is automatically set to 10. + // Currently, this number is automatically set to 10, and other values are ignored. + // The response is paginated at 10 per page if you have more than 10 tags. MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` } @@ -2617,11 +2619,12 @@ type FileSystemDescription struct { PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"` // The throughput, measured in MiB/s, that you want to provision for a file - // system. The limit on throughput is 1024 MiB/s. You can get these limits increased + // system. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. + // The limit on throughput is 1024 MiB/s. You can get these limits increased // by contacting AWS Support. For more information, see Amazon EFS Limits That // You Can Increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) // in the Amazon EFS User Guide. - ProvisionedThroughputInMibps *float64 `type:"double"` + ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` // The latest known metered size (in bytes) of data stored in the file system, // in its Value field, and the time at which that size was determined in its @@ -2642,10 +2645,11 @@ type FileSystemDescription struct { Tags []*Tag `type:"list" required:"true"` // The throughput mode for a file system. There are two throughput modes to - // choose from for your file system: bursting and provisioned. You can decrease - // your file system's throughput in Provisioned Throughput mode or change between - // the throughput modes as long as it’s been more than 24 hours since the last - // decrease or throughput mode change. + // choose from for your file system: bursting and provisioned. If you set ThroughputMode + // to provisioned, you must also set a value for ProvisionedThroughPutInMibps. + // You can decrease your file system's throughput in Provisioned Throughput + // mode or change between the throughput modes as long as it’s been more than + // 24 hours since the last decrease or throughput mode change. ThroughputMode *string `type:"string" enum:"ThroughputMode"` } @@ -2811,13 +2815,9 @@ func (s *FileSystemSize) SetValueInStandard(v int64) *FileSystemSize { type LifecyclePolicy struct { _ struct{} `type:"structure"` - // A value that indicates how long it takes to transition files to the IA storage - // class. Currently, the only valid value is AFTER_30_DAYS. - // - // AFTER_30_DAYS indicates files that have not been read from or written to - // for 30 days are transitioned from the Standard storage class to the IA storage - // class. Metadata operations such as listing the contents of a directory don't - // count as a file access event. + // A value that describes the period of time that a file is not accessed, after + // which it transitions to the IA storage class. Metadata operations such as + // listing the contents of a directory don't count as file access events. TransitionToIA *string `type:"string" enum:"TransitionToIARules"` } @@ -3072,7 +3072,7 @@ func (s *PutLifecycleConfigurationOutput) SetLifecyclePolicies(v []*LifecyclePol } // A tag is a key-value pair. Allowed characters are letters, white space, and -// numbers that can be represented in UTF-8, and the following characters: + +// numbers that can be represented in UTF-8, and the following characters:+ // - = . _ : / type Tag struct { _ struct{} `type:"structure"` @@ -3138,13 +3138,16 @@ type UpdateFileSystemInput struct { FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` // (Optional) The amount of throughput, in MiB/s, that you want to provision - // for your file system. If you're not updating the amount of provisioned throughput - // for your file system, you don't need to provide this value in your request. - ProvisionedThroughputInMibps *float64 `type:"double"` + // for your file system. Valid values are 1-1024. Required if ThroughputMode + // is changed to provisioned on update. If you're not updating the amount of + // provisioned throughput for your file system, you don't need to provide this + // value in your request. + ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` // (Optional) The throughput mode that you want your file system to use. If // you're not updating your throughput mode, you don't need to provide this - // value in your request. + // value in your request. If you are changing the ThroughputMode to provisioned, + // you must also set a value for ProvisionedThroughputInMibps. ThroughputMode *string `type:"string" enum:"ThroughputMode"` } @@ -3167,6 +3170,9 @@ func (s *UpdateFileSystemInput) Validate() error { if s.FileSystemId != nil && len(*s.FileSystemId) < 1 { invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1)) } + if s.ProvisionedThroughputInMibps != nil && *s.ProvisionedThroughputInMibps < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedThroughputInMibps", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3246,11 +3252,12 @@ type UpdateFileSystemOutput struct { PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"` // The throughput, measured in MiB/s, that you want to provision for a file - // system. The limit on throughput is 1024 MiB/s. You can get these limits increased + // system. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. + // The limit on throughput is 1024 MiB/s. You can get these limits increased // by contacting AWS Support. For more information, see Amazon EFS Limits That // You Can Increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) // in the Amazon EFS User Guide. - ProvisionedThroughputInMibps *float64 `type:"double"` + ProvisionedThroughputInMibps *float64 `min:"1" type:"double"` // The latest known metered size (in bytes) of data stored in the file system, // in its Value field, and the time at which that size was determined in its @@ -3271,10 +3278,11 @@ type UpdateFileSystemOutput struct { Tags []*Tag `type:"list" required:"true"` // The throughput mode for a file system. There are two throughput modes to - // choose from for your file system: bursting and provisioned. You can decrease - // your file system's throughput in Provisioned Throughput mode or change between - // the throughput modes as long as it’s been more than 24 hours since the last - // decrease or throughput mode change. + // choose from for your file system: bursting and provisioned. If you set ThroughputMode + // to provisioned, you must also set a value for ProvisionedThroughPutInMibps. + // You can decrease your file system's throughput in Provisioned Throughput + // mode or change between the throughput modes as long as it’s been more than + // 24 hours since the last decrease or throughput mode change. ThroughputMode *string `type:"string" enum:"ThroughputMode"` } @@ -3406,6 +3414,18 @@ const ( ) const ( + // TransitionToIARulesAfter7Days is a TransitionToIARules enum value + TransitionToIARulesAfter7Days = "AFTER_7_DAYS" + + // TransitionToIARulesAfter14Days is a TransitionToIARules enum value + TransitionToIARulesAfter14Days = "AFTER_14_DAYS" + // TransitionToIARulesAfter30Days is a TransitionToIARules enum value TransitionToIARulesAfter30Days = "AFTER_30_DAYS" + + // TransitionToIARulesAfter60Days is a TransitionToIARules enum value + TransitionToIARulesAfter60Days = "AFTER_60_DAYS" + + // TransitionToIARulesAfter90Days is a TransitionToIARules enum value + TransitionToIARulesAfter90Days = "AFTER_90_DAYS" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go index 85406d92d5f..7a7eb3b1bf7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go @@ -101,7 +101,7 @@ const ( // for the specific AWS Region. The client should try to delete some elastic // network interfaces or get the account limit raised. For more information, // see Amazon VPC Limits (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html) - // in the Amazon VPC User Guide (see the Network interfaces per VPC entry in + // in the Amazon VPC User Guide (see the Network interfaces per VPC entry in // the table). ErrCodeNetworkInterfaceLimitExceeded = "NetworkInterfaceLimitExceeded" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/service.go index 6b1a11c900a..3b6336d6224 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/efs/service.go @@ -46,11 +46,11 @@ const ( // svc := efs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EFS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EFS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EFS { svc := &EFS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-02-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/api.go index 2856011408c..28140fc874e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/api.go @@ -8,6 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" ) const opCreateCluster = "CreateCluster" @@ -52,15 +54,15 @@ func (c *EKS) CreateClusterRequest(input *CreateClusterInput) (req *request.Requ return } -// CreateCluster API operation for Amazon Elastic Container Service for Kubernetes. +// CreateCluster API operation for Amazon Elastic Kubernetes Service. // // Creates an Amazon EKS control plane. // // The Amazon EKS control plane consists of control plane instances that run -// the Kubernetes software, like etcd and the API server. The control plane +// the Kubernetes software, such as etcd and the API server. The control plane // runs in an account managed by AWS, and the Kubernetes API is exposed via // the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane -// is single-tenant and unique, and runs on its own set of Amazon EC2 instances. +// is single-tenant and unique and runs on its own set of Amazon EC2 instances. // // The cluster control plane is provisioned across multiple Availability Zones // and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS @@ -74,16 +76,16 @@ func (c *EKS) CreateClusterRequest(input *CreateClusterInput) (req *request.Requ // // You can use the endpointPublicAccess and endpointPrivateAccess parameters // to enable or disable public and private access to your cluster's Kubernetes -// API server endpoint. By default, public access is enabled and private access +// API server endpoint. By default, public access is enabled, and private access // is disabled. For more information, see Amazon EKS Cluster Endpoint Access // Control (https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) -// in the Amazon EKS User Guide. +// in the Amazon EKS User Guide . // // You can use the logging parameter to enable or disable exporting the Kubernetes // control plane logs for your cluster to CloudWatch Logs. By default, cluster -// control plane logs are not exported to CloudWatch Logs. For more information, +// control plane logs aren't exported to CloudWatch Logs. For more information, // see Amazon EKS Cluster Control Plane Logs (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) -// in the Amazon EKS User Guide. +// in the Amazon EKS User Guide . // // CloudWatch Logs ingestion, archive storage, and data scanning rates apply // to exported control plane logs. For more information, see Amazon CloudWatch @@ -100,7 +102,7 @@ func (c *EKS) CreateClusterRequest(input *CreateClusterInput) (req *request.Requ // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for Amazon Elastic Container Service for Kubernetes's +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's // API operation CreateCluster for usage and error information. // // Returned Error Codes: @@ -195,7 +197,7 @@ func (c *EKS) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Requ return } -// DeleteCluster API operation for Amazon Elastic Container Service for Kubernetes. +// DeleteCluster API operation for Amazon Elastic Kubernetes Service. // // Deletes the Amazon EKS cluster control plane. // @@ -210,7 +212,7 @@ func (c *EKS) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Requ // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for Amazon Elastic Container Service for Kubernetes's +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's // API operation DeleteCluster for usage and error information. // // Returned Error Codes: @@ -296,7 +298,7 @@ func (c *EKS) DescribeClusterRequest(input *DescribeClusterInput) (req *request. return } -// DescribeCluster API operation for Amazon Elastic Container Service for Kubernetes. +// DescribeCluster API operation for Amazon Elastic Kubernetes Service. // // Returns descriptive information about an Amazon EKS cluster. // @@ -305,14 +307,14 @@ func (c *EKS) DescribeClusterRequest(input *DescribeClusterInput) (req *request. // API server. For more information, see Create a kubeconfig for Amazon EKS // (https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html). // -// The API server endpoint and certificate authority data are not available -// until the cluster reaches the ACTIVE state. +// The API server endpoint and certificate authority data aren't available until +// the cluster reaches the ACTIVE state. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for Amazon Elastic Container Service for Kubernetes's +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's // API operation DescribeCluster for usage and error information. // // Returned Error Codes: @@ -395,7 +397,7 @@ func (c *EKS) DescribeUpdateRequest(input *DescribeUpdateInput) (req *request.Re return } -// DescribeUpdate API operation for Amazon Elastic Container Service for Kubernetes. +// DescribeUpdate API operation for Amazon Elastic Kubernetes Service. // // Returns descriptive information about an update against your Amazon EKS cluster. // @@ -407,7 +409,7 @@ func (c *EKS) DescribeUpdateRequest(input *DescribeUpdateInput) (req *request.Re // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for Amazon Elastic Container Service for Kubernetes's +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's // API operation DescribeUpdate for usage and error information. // // Returned Error Codes: @@ -480,6 +482,12 @@ func (c *EKS) ListClustersRequest(input *ListClustersInput) (req *request.Reques Name: opListClusters, HTTPMethod: "GET", HTTPPath: "/clusters", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -491,7 +499,7 @@ func (c *EKS) ListClustersRequest(input *ListClustersInput) (req *request.Reques return } -// ListClusters API operation for Amazon Elastic Container Service for Kubernetes. +// ListClusters API operation for Amazon Elastic Kubernetes Service. // // Lists the Amazon EKS clusters in your AWS account in the specified Region. // @@ -499,7 +507,7 @@ func (c *EKS) ListClustersRequest(input *ListClustersInput) (req *request.Reques // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for Amazon Elastic Container Service for Kubernetes's +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's // API operation ListClusters for usage and error information. // // Returned Error Codes: @@ -540,6 +548,142 @@ func (c *EKS) ListClustersWithContext(ctx aws.Context, input *ListClustersInput, return out, req.Send() } +// ListClustersPages iterates over the pages of a ListClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListClusters operation. +// pageNum := 0 +// err := client.ListClustersPages(params, +// func(page *eks.ListClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EKS) ListClustersPages(input *ListClustersInput, fn func(*ListClustersOutput, bool) bool) error { + return c.ListClustersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListClustersPagesWithContext same as ListClustersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EKS) ListClustersPagesWithContext(ctx aws.Context, input *ListClustersInput, fn func(*ListClustersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListClustersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListClustersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListClustersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/ListTagsForResource +func (c *EKS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Elastic Kubernetes Service. +// +// List the tags for an Amazon EKS resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// This exception is thrown if the request contains a semantic error. The precise +// meaning will depend on the API, and will be documented in the error message. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/ListTagsForResource +func (c *EKS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EKS) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListUpdates = "ListUpdates" // ListUpdatesRequest generates a "aws/request.Request" representing the @@ -571,6 +715,12 @@ func (c *EKS) ListUpdatesRequest(input *ListUpdatesInput) (req *request.Request, Name: opListUpdates, HTTPMethod: "GET", HTTPPath: "/clusters/{name}/updates", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -582,7 +732,7 @@ func (c *EKS) ListUpdatesRequest(input *ListUpdatesInput) (req *request.Request, return } -// ListUpdates API operation for Amazon Elastic Container Service for Kubernetes. +// ListUpdates API operation for Amazon Elastic Kubernetes Service. // // Lists the updates associated with an Amazon EKS cluster in your AWS account, // in the specified Region. @@ -591,7 +741,7 @@ func (c *EKS) ListUpdatesRequest(input *ListUpdatesInput) (req *request.Request, // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for Amazon Elastic Container Service for Kubernetes's +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's // API operation ListUpdates for usage and error information. // // Returned Error Codes: @@ -633,6 +783,231 @@ func (c *EKS) ListUpdatesWithContext(ctx aws.Context, input *ListUpdatesInput, o return out, req.Send() } +// ListUpdatesPages iterates over the pages of a ListUpdates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUpdates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUpdates operation. +// pageNum := 0 +// err := client.ListUpdatesPages(params, +// func(page *eks.ListUpdatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EKS) ListUpdatesPages(input *ListUpdatesInput, fn func(*ListUpdatesOutput, bool) bool) error { + return c.ListUpdatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListUpdatesPagesWithContext same as ListUpdatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EKS) ListUpdatesPagesWithContext(ctx aws.Context, input *ListUpdatesInput, fn func(*ListUpdatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListUpdatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListUpdatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListUpdatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/TagResource +func (c *EKS) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon Elastic Kubernetes Service. +// +// Associates the specified tags to a resource with the specified resourceArn. +// If existing tags on a resource are not specified in the request parameters, +// they are not changed. When a resource is deleted, the tags associated with +// that resource are deleted as well. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// This exception is thrown if the request contains a semantic error. The precise +// meaning will depend on the API, and will be documented in the error message. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/TagResource +func (c *EKS) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EKS) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/UntagResource +func (c *EKS) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Elastic Kubernetes Service. +// +// Deletes specified tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// This exception is thrown if the request contains a semantic error. The precise +// meaning will depend on the API, and will be documented in the error message. +// +// * ErrCodeNotFoundException "NotFoundException" +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/UntagResource +func (c *EKS) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EKS) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateClusterConfig = "UpdateClusterConfig" // UpdateClusterConfigRequest generates a "aws/request.Request" representing the @@ -675,29 +1050,32 @@ func (c *EKS) UpdateClusterConfigRequest(input *UpdateClusterConfigInput) (req * return } -// UpdateClusterConfig API operation for Amazon Elastic Container Service for Kubernetes. +// UpdateClusterConfig API operation for Amazon Elastic Kubernetes Service. // // Updates an Amazon EKS cluster configuration. Your cluster continues to function // during the update. The response output includes an update ID that you can // use to track the status of your cluster update with the DescribeUpdate API // operation. // -// You can use this API operation to enable or disable public and private access -// to your cluster's Kubernetes API server endpoint. By default, public access -// is enabled and private access is disabled. For more information, see Amazon -// EKS Cluster Endpoint Access Control (https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) -// in the Amazon EKS User Guide. -// -// You can also use this API operation to enable or disable exporting the Kubernetes +// You can use this API operation to enable or disable exporting the Kubernetes // control plane logs for your cluster to CloudWatch Logs. By default, cluster -// control plane logs are not exported to CloudWatch Logs. For more information, +// control plane logs aren't exported to CloudWatch Logs. For more information, // see Amazon EKS Cluster Control Plane Logs (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) -// in the Amazon EKS User Guide. +// in the Amazon EKS User Guide . // // CloudWatch Logs ingestion, archive storage, and data scanning rates apply // to exported control plane logs. For more information, see Amazon CloudWatch // Pricing (http://aws.amazon.com/cloudwatch/pricing/). // +// You can also use this API operation to enable or disable public and private +// access to your cluster's Kubernetes API server endpoint. By default, public +// access is enabled, and private access is disabled. For more information, +// see Amazon EKS Cluster Endpoint Access Control (https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) +// in the Amazon EKS User Guide . +// +// At this time, you can not update the subnets or security group IDs for an +// existing cluster. +// // Cluster updates are asynchronous, and they should finish within a few minutes. // During an update, the cluster status moves to UPDATING (this status transition // is eventually consistent). When the update is complete (either Failed or @@ -707,7 +1085,7 @@ func (c *EKS) UpdateClusterConfigRequest(input *UpdateClusterConfigInput) (req * // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for Amazon Elastic Container Service for Kubernetes's +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's // API operation UpdateClusterConfig for usage and error information. // // Returned Error Codes: @@ -798,7 +1176,7 @@ func (c *EKS) UpdateClusterVersionRequest(input *UpdateClusterVersionInput) (req return } -// UpdateClusterVersion API operation for Amazon Elastic Container Service for Kubernetes. +// UpdateClusterVersion API operation for Amazon Elastic Kubernetes Service. // // Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster // continues to function during the update. The response output includes an @@ -814,7 +1192,7 @@ func (c *EKS) UpdateClusterVersionRequest(input *UpdateClusterVersionInput) (req // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for Amazon Elastic Container Service for Kubernetes's +// See the AWS API reference guide for Amazon Elastic Kubernetes Service's // API operation UpdateClusterVersion for usage and error information. // // Returned Error Codes: @@ -867,7 +1245,7 @@ func (c *EKS) UpdateClusterVersionWithContext(ctx aws.Context, input *UpdateClus type Certificate struct { _ struct{} `type:"structure"` - // The base64 encoded certificate data required to communicate with your cluster. + // The Base64-encoded certificate data required to communicate with your cluster. // Add this to the certificate-authority-data section of the kubeconfig file // for your cluster. Data *string `locationName:"data" type:"string"` @@ -909,6 +1287,9 @@ type Cluster struct { // The endpoint for your Kubernetes API server. Endpoint *string `locationName:"endpoint" type:"string"` + // The identity provider information for the cluster. + Identity *Identity `locationName:"identity" type:"structure"` + // The logging configuration for your cluster. Logging *Logging `locationName:"logging" type:"structure"` @@ -917,7 +1298,7 @@ type Cluster struct { // The platform version of your Amazon EKS cluster. For more information, see // Platform Versions (https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html) - // in the Amazon EKS User Guide. + // in the Amazon EKS User Guide . PlatformVersion *string `locationName:"platformVersion" type:"string"` // The VPC configuration used by the cluster control plane. Amazon EKS VPC resources @@ -935,6 +1316,11 @@ type Cluster struct { // The current status of the cluster. Status *string `locationName:"status" type:"string" enum:"ClusterStatus"` + // The metadata that you apply to the cluster to assist with categorization + // and organization. Each tag consists of a key and an optional value, both + // of which you define. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + // The Kubernetes server version for the cluster. Version *string `locationName:"version" type:"string"` } @@ -979,6 +1365,12 @@ func (s *Cluster) SetEndpoint(v string) *Cluster { return s } +// SetIdentity sets the Identity field's value. +func (s *Cluster) SetIdentity(v *Identity) *Cluster { + s.Identity = v + return s +} + // SetLogging sets the Logging field's value. func (s *Cluster) SetLogging(v *Logging) *Cluster { s.Logging = v @@ -1015,6 +1407,12 @@ func (s *Cluster) SetStatus(v string) *Cluster { return s } +// SetTags sets the Tags field's value. +func (s *Cluster) SetTags(v map[string]*string) *Cluster { + s.Tags = v + return s +} + // SetVersion sets the Version field's value. func (s *Cluster) SetVersion(v string) *Cluster { s.Version = &v @@ -1029,10 +1427,10 @@ type CreateClusterInput struct { ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // Enable or disable exporting the Kubernetes control plane logs for your cluster - // to CloudWatch Logs. By default, cluster control plane logs are not exported + // to CloudWatch Logs. By default, cluster control plane logs aren't exported // to CloudWatch Logs. For more information, see Amazon EKS Cluster Control // Plane Logs (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) - // in the Amazon EKS User Guide. + // in the Amazon EKS User Guide . // // CloudWatch Logs ingestion, archive storage, and data scanning rates apply // to exported control plane logs. For more information, see Amazon CloudWatch @@ -1049,7 +1447,7 @@ type CreateClusterInput struct { // see Cluster VPC Considerations (https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) // and Cluster Security Group Considerations (https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) // in the Amazon EKS User Guide. You must specify at least two subnets. You - // may specify up to five security groups, but we recommend that you use a dedicated + // can specify up to five security groups, but we recommend that you use a dedicated // security group for your cluster control plane. // // ResourcesVpcConfig is a required field @@ -1058,13 +1456,17 @@ type CreateClusterInput struct { // The Amazon Resource Name (ARN) of the IAM role that provides permissions // for Amazon EKS to make calls to other AWS API operations on your behalf. // For more information, see Amazon EKS Service IAM Role (https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) - // in the Amazon EKS User Guide. + // in the Amazon EKS User Guide . // // RoleArn is a required field RoleArn *string `locationName:"roleArn" type:"string" required:"true"` - // The desired Kubernetes version for your cluster. If you do not specify a - // value here, the latest version available in Amazon EKS is used. + // The metadata to apply to the cluster to assist with categorization and organization. + // Each tag consists of a key and an optional value, both of which you define. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + + // The desired Kubernetes version for your cluster. If you don't specify a value + // here, the latest version available in Amazon EKS is used. Version *string `locationName:"version" type:"string"` } @@ -1093,6 +1495,9 @@ func (s *CreateClusterInput) Validate() error { if s.RoleArn == nil { invalidParams.Add(request.NewErrParamRequired("RoleArn")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -1130,6 +1535,12 @@ func (s *CreateClusterInput) SetRoleArn(v string) *CreateClusterInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateClusterInput) SetTags(v map[string]*string) *CreateClusterInput { + s.Tags = v + return s +} + // SetVersion sets the Version field's value. func (s *CreateClusterInput) SetVersion(v string) *CreateClusterInput { s.Version = &v @@ -1374,24 +1785,24 @@ type ErrorDetail struct { // A brief description of the error. // - // * SubnetNotFound: One of the subnets associated with the cluster could - // not be found. + // * SubnetNotFound: We couldn't find one of the subnets associated with + // the cluster. // - // * SecurityGroupNotFound: One of the security groups associated with the - // cluster could not be found. + // * SecurityGroupNotFound: We couldn't find one of the security groups associated + // with the cluster. // // * EniLimitReached: You have reached the elastic network interface limit // for your account. // - // * IpNotAvailable: A subnet associated with the cluster does not have any + // * IpNotAvailable: A subnet associated with the cluster doesn't have any // free IP addresses. // - // * AccessDenied: You do not have permissions to perform the specified operation. + // * AccessDenied: You don't have permissions to perform the specified operation. // // * OperationNotPermitted: The service role associated with the cluster - // does not have the required access permissions for Amazon EKS. + // doesn't have the required access permissions for Amazon EKS. // - // * VpcIdNotFound: The VPC associated with the cluster could not be found. + // * VpcIdNotFound: We couldn't find the VPC associated with the cluster. ErrorCode *string `locationName:"errorCode" type:"string" enum:"ErrorCode"` // A more complete description of the error. @@ -1429,15 +1840,40 @@ func (s *ErrorDetail) SetResourceIds(v []*string) *ErrorDetail { return s } +// An object representing an identity provider for authentication credentials. +type Identity struct { + _ struct{} `type:"structure"` + + // The OpenID Connect (https://openid.net/connect/) identity provider information + // for the cluster. + Oidc *OIDC `locationName:"oidc" type:"structure"` +} + +// String returns the string representation +func (s Identity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Identity) GoString() string { + return s.String() +} + +// SetOidc sets the Oidc field's value. +func (s *Identity) SetOidc(v *OIDC) *Identity { + s.Oidc = v + return s +} + type ListClustersInput struct { _ struct{} `type:"structure"` // The maximum number of cluster results returned by ListClusters in paginated - // output. When this parameter is used, ListClusters only returns maxResults - // results in a single page along with a nextToken response element. The remaining - // results of the initial request can be seen by sending another ListClusters + // output. When you use this parameter, ListClusters returns only maxResults + // results in a single page along with a nextToken response element. You can + // see the remaining results of the initial request by sending another ListClusters // request with the returned nextToken value. This value can be between 1 and - // 100. If this parameter is not used, then ListClusters returns up to 100 results + // 100. If you don't use this parameter, ListClusters returns up to 100 results // and a nextToken value if applicable. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` @@ -1446,7 +1882,7 @@ type ListClustersInput struct { // Pagination continues from the end of the previous results that returned the // nextToken value. // - // This token should be treated as an opaque identifier that is only used to + // This token should be treated as an opaque identifier that is used only to // retrieve the next items in a list and not for other programmatic purposes. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -1493,7 +1929,7 @@ type ListClustersOutput struct { Clusters []*string `locationName:"clusters" type:"list"` // The nextToken value to include in a future ListClusters request. When the - // results of a ListClusters request exceed maxResults, this value can be used + // results of a ListClusters request exceed maxResults, you can use this value // to retrieve the next page of results. This value is null when there are no // more results to return. NextToken *string `locationName:"nextToken" type:"string"` @@ -1521,19 +1957,84 @@ func (s *ListClustersOutput) SetNextToken(v string) *ListClustersOutput { return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the resource for which to + // list the tags. Currently, the supported resources are Amazon EKS clusters. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags for the resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + type ListUpdatesInput struct { _ struct{} `type:"structure"` // The maximum number of update results returned by ListUpdates in paginated - // output. When this parameter is used, ListUpdates only returns maxResults - // results in a single page along with a nextToken response element. The remaining - // results of the initial request can be seen by sending another ListUpdates + // output. When you use this parameter, ListUpdates returns only maxResults + // results in a single page along with a nextToken response element. You can + // see the remaining results of the initial request by sending another ListUpdates // request with the returned nextToken value. This value can be between 1 and - // 100. If this parameter is not used, then ListUpdates returns up to 100 results + // 100. If you don't use this parameter, ListUpdates returns up to 100 results // and a nextToken value if applicable. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // The name of the Amazon EKS cluster for which to list updates. + // The name of the Amazon EKS cluster to list updates for. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` @@ -1596,7 +2097,7 @@ type ListUpdatesOutput struct { _ struct{} `type:"structure"` // The nextToken value to include in a future ListUpdates request. When the - // results of a ListUpdates request exceed maxResults, this value can be used + // results of a ListUpdates request exceed maxResults, you can use this value // to retrieve the next page of results. This value is null when there are no // more results to return. NextToken *string `locationName:"nextToken" type:"string"` @@ -1632,10 +2133,10 @@ func (s *ListUpdatesOutput) SetUpdateIds(v []*string) *ListUpdatesOutput { type LogSetup struct { _ struct{} `type:"structure"` - // If a log type is enabled, then that log type exports its control plane logs - // to CloudWatch Logs. If a log type is not enabled, then that log type does - // not export its control plane logs. Each individual log type can be enabled - // or disabled independently. + // If a log type is enabled, that log type exports its control plane logs to + // CloudWatch Logs. If a log type isn't enabled, that log type doesn't export + // its control plane logs. Each individual log type can be enabled or disabled + // independently. Enabled *bool `locationName:"enabled" type:"boolean"` // The available cluster control plane log types. @@ -1688,6 +2189,177 @@ func (s *Logging) SetClusterLogging(v []*LogSetup) *Logging { return s } +// An object representing the OpenID Connect (https://openid.net/connect/) identity +// provider information for the cluster. +type OIDC struct { + _ struct{} `type:"structure"` + + // The issuer URL for the OpenID Connect identity provider. + Issuer *string `locationName:"issuer" type:"string"` +} + +// String returns the string representation +func (s OIDC) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OIDC) GoString() string { + return s.String() +} + +// SetIssuer sets the Issuer field's value. +func (s *OIDC) SetIssuer(v string) *OIDC { + s.Issuer = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, + // the supported resources are Amazon EKS clusters. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The tags to add to the resource. A tag is an array of key-value pairs. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource from which to delete tags. + // Currently, the supported resources are Amazon EKS clusters. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The keys of the tags to be removed. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // An object representing an asynchronous update. type Update struct { _ struct{} `type:"structure"` @@ -1765,10 +2437,10 @@ type UpdateClusterConfigInput struct { ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // Enable or disable exporting the Kubernetes control plane logs for your cluster - // to CloudWatch Logs. By default, cluster control plane logs are not exported + // to CloudWatch Logs. By default, cluster control plane logs aren't exported // to CloudWatch Logs. For more information, see Amazon EKS Cluster Control // Plane Logs (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) - // in the Amazon EKS User Guide. + // in the Amazon EKS User Guide . // // CloudWatch Logs ingestion, archive storage, and data scanning rates apply // to exported control plane logs. For more information, see Amazon CloudWatch @@ -1984,25 +2656,25 @@ type VpcConfigRequest struct { // Set this value to true to enable private access for your cluster's Kubernetes // API server endpoint. If you enable private access, Kubernetes API requests - // from within your cluster's VPC will use the private VPC endpoint. The default + // from within your cluster's VPC use the private VPC endpoint. The default // value for this parameter is false, which disables private access for your // Kubernetes API server. For more information, see Amazon EKS Cluster Endpoint // Access Control (https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - // in the Amazon EKS User Guide. + // in the Amazon EKS User Guide . EndpointPrivateAccess *bool `locationName:"endpointPrivateAccess" type:"boolean"` // Set this value to false to disable public access for your cluster's Kubernetes // API server endpoint. If you disable public access, your cluster's Kubernetes - // API server can only receive requests from within the cluster VPC. The default + // API server can receive only requests from within the cluster VPC. The default // value for this parameter is true, which enables public access for your Kubernetes // API server. For more information, see Amazon EKS Cluster Endpoint Access // Control (https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - // in the Amazon EKS User Guide. + // in the Amazon EKS User Guide . EndpointPublicAccess *bool `locationName:"endpointPublicAccess" type:"boolean"` // Specify one or more security groups for the cross-account elastic network // interfaces that Amazon EKS creates to use to allow communication between - // your worker nodes and the Kubernetes control plane. If you do not specify + // your worker nodes and the Kubernetes control plane. If you don't specify // a security group, the default security group for your VPC is used. SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list"` @@ -2052,13 +2724,13 @@ type VpcConfigResponse struct { // This parameter indicates whether the Amazon EKS private API server endpoint // is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes - // API requests that originate from within your cluster's VPC will use the private + // API requests that originate from within your cluster's VPC use the private // VPC endpoint instead of traversing the internet. EndpointPrivateAccess *bool `locationName:"endpointPrivateAccess" type:"boolean"` // This parameter indicates whether the Amazon EKS public API server endpoint // is enabled. If the Amazon EKS public API server endpoint is disabled, your - // cluster's Kubernetes API server can only receive requests that originate + // cluster's Kubernetes API server can receive only requests that originate // from within the cluster VPC. EndpointPublicAccess *bool `locationName:"endpointPublicAccess" type:"boolean"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/doc.go index 0f194107613..d30d7ad088a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/doc.go @@ -1,13 +1,13 @@ // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. // Package eks provides the client and types for making API -// requests to Amazon Elastic Container Service for Kubernetes. +// requests to Amazon Elastic Kubernetes Service. // -// Amazon Elastic Container Service for Kubernetes (Amazon EKS) is a managed -// service that makes it easy for you to run Kubernetes on AWS without needing -// to stand up or maintain your own Kubernetes control plane. Kubernetes is -// an open-source system for automating the deployment, scaling, and management -// of containerized applications. +// Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that +// makes it easy for you to run Kubernetes on AWS without needing to stand up +// or maintain your own Kubernetes control plane. Kubernetes is an open-source +// system for automating the deployment, scaling, and management of containerized +// applications. // // Amazon EKS runs up-to-date versions of the open-source Kubernetes software, // so you can use all the existing plugins and tooling from the Kubernetes community. @@ -24,7 +24,7 @@ // // Using the Client // -// To contact Amazon Elastic Container Service for Kubernetes with the SDK use the New function to create +// To contact Amazon Elastic Kubernetes Service with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. // These clients are safe to use concurrently. // @@ -34,7 +34,7 @@ // See aws.Config documentation for more information on configuring SDK clients. // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // -// See the Amazon Elastic Container Service for Kubernetes client EKS for more +// See the Amazon Elastic Kubernetes Service client EKS for more // information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/eks/#New package eks diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/errors.go index b343c7e0b63..6fd3dd1d8c5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/errors.go @@ -4,6 +4,13 @@ package eks const ( + // ErrCodeBadRequestException for service response error code + // "BadRequestException". + // + // This exception is thrown if the request contains a semantic error. The precise + // meaning will depend on the API, and will be documented in the error message. + ErrCodeBadRequestException = "BadRequestException" + // ErrCodeClientException for service response error code // "ClientException". // @@ -26,6 +33,13 @@ const ( // the cluster and the associated operations. ErrCodeInvalidRequestException = "InvalidRequestException" + // ErrCodeNotFoundException for service response error code + // "NotFoundException". + // + // A service resource associated with the request could not be found. Clients + // should not retry such requests. + ErrCodeNotFoundException = "NotFoundException" + // ErrCodeResourceInUseException for service response error code // "ResourceInUseException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/service.go index 185214633f5..9c3ae450660 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/eks/service.go @@ -12,7 +12,7 @@ import ( ) // EKS provides the API operation methods for making requests to -// Amazon Elastic Container Service for Kubernetes. See this package's package overview docs +// Amazon Elastic Kubernetes Service. See this package's package overview docs // for details on the service. // // EKS methods are safe to use concurrently. It is not safe to @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *EKS { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "eks" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EKS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EKS { svc := &EKS{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go index f0b65981d9c..d72e03e5ed4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go @@ -66,7 +66,7 @@ func (c *ElastiCache) AddTagsToResourceRequest(input *AddTagsToResourceInput) (r // by your tags. You can apply tags that represent business categories (such // as cost centers, application names, or owners) to organize your costs across // multiple services. For more information, see Using Cost Allocation Tags in -// Amazon ElastiCache (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Tagging.html) +// Amazon ElastiCache (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Tagging.html) // in the ElastiCache User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -211,6 +211,257 @@ func (c *ElastiCache) AuthorizeCacheSecurityGroupIngressWithContext(ctx aws.Cont return out, req.Send() } +const opBatchApplyUpdateAction = "BatchApplyUpdateAction" + +// BatchApplyUpdateActionRequest generates a "aws/request.Request" representing the +// client's request for the BatchApplyUpdateAction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchApplyUpdateAction for more information on using the BatchApplyUpdateAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchApplyUpdateActionRequest method. +// req, resp := client.BatchApplyUpdateActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/BatchApplyUpdateAction +func (c *ElastiCache) BatchApplyUpdateActionRequest(input *BatchApplyUpdateActionInput) (req *request.Request, output *BatchApplyUpdateActionOutput) { + op := &request.Operation{ + Name: opBatchApplyUpdateAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchApplyUpdateActionInput{} + } + + output = &BatchApplyUpdateActionOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchApplyUpdateAction API operation for Amazon ElastiCache. +// +// Apply the service update. For more information on service updates and applying +// them, see Applying Service Updates (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/applying-updates.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation BatchApplyUpdateAction for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceUpdateNotFoundFault "ServiceUpdateNotFoundFault" +// The service update doesn't exist +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/BatchApplyUpdateAction +func (c *ElastiCache) BatchApplyUpdateAction(input *BatchApplyUpdateActionInput) (*BatchApplyUpdateActionOutput, error) { + req, out := c.BatchApplyUpdateActionRequest(input) + return out, req.Send() +} + +// BatchApplyUpdateActionWithContext is the same as BatchApplyUpdateAction with the addition of +// the ability to pass a context and additional request options. +// +// See BatchApplyUpdateAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) BatchApplyUpdateActionWithContext(ctx aws.Context, input *BatchApplyUpdateActionInput, opts ...request.Option) (*BatchApplyUpdateActionOutput, error) { + req, out := c.BatchApplyUpdateActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchStopUpdateAction = "BatchStopUpdateAction" + +// BatchStopUpdateActionRequest generates a "aws/request.Request" representing the +// client's request for the BatchStopUpdateAction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchStopUpdateAction for more information on using the BatchStopUpdateAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchStopUpdateActionRequest method. +// req, resp := client.BatchStopUpdateActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/BatchStopUpdateAction +func (c *ElastiCache) BatchStopUpdateActionRequest(input *BatchStopUpdateActionInput) (req *request.Request, output *BatchStopUpdateActionOutput) { + op := &request.Operation{ + Name: opBatchStopUpdateAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchStopUpdateActionInput{} + } + + output = &BatchStopUpdateActionOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchStopUpdateAction API operation for Amazon ElastiCache. +// +// Stop the service update. For more information on service updates and stopping +// them, see Stopping Service Updates (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/stopping-self-service-updates.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation BatchStopUpdateAction for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceUpdateNotFoundFault "ServiceUpdateNotFoundFault" +// The service update doesn't exist +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/BatchStopUpdateAction +func (c *ElastiCache) BatchStopUpdateAction(input *BatchStopUpdateActionInput) (*BatchStopUpdateActionOutput, error) { + req, out := c.BatchStopUpdateActionRequest(input) + return out, req.Send() +} + +// BatchStopUpdateActionWithContext is the same as BatchStopUpdateAction with the addition of +// the ability to pass a context and additional request options. +// +// See BatchStopUpdateAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) BatchStopUpdateActionWithContext(ctx aws.Context, input *BatchStopUpdateActionInput, opts ...request.Option) (*BatchStopUpdateActionOutput, error) { + req, out := c.BatchStopUpdateActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMigration = "CompleteMigration" + +// CompleteMigrationRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMigration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMigration for more information on using the CompleteMigration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CompleteMigrationRequest method. +// req, resp := client.CompleteMigrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CompleteMigration +func (c *ElastiCache) CompleteMigrationRequest(input *CompleteMigrationInput) (req *request.Request, output *CompleteMigrationOutput) { + op := &request.Operation{ + Name: opCompleteMigration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteMigrationInput{} + } + + output = &CompleteMigrationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMigration API operation for Amazon ElastiCache. +// +// Complete the migration of data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation CompleteMigration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeReplicationGroupNotUnderMigrationFault "ReplicationGroupNotUnderMigrationFault" +// The designated replication group is not available for data migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CompleteMigration +func (c *ElastiCache) CompleteMigration(input *CompleteMigrationInput) (*CompleteMigrationOutput, error) { + req, out := c.CompleteMigrationRequest(input) + return out, req.Send() +} + +// CompleteMigrationWithContext is the same as CompleteMigration with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMigration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) CompleteMigrationWithContext(ctx aws.Context, input *CompleteMigrationInput, opts ...request.Option) (*CompleteMigrationOutput, error) { + req, out := c.CompleteMigrationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCopySnapshot = "CopySnapshot" // CopySnapshotRequest generates a "aws/request.Request" representing the @@ -263,62 +514,53 @@ func (c *ElastiCache) CopySnapshotRequest(input *CopySnapshotInput) (req *reques // create their own Amazon S3 buckets and copy snapshots to it. To control access // to your snapshots, use an IAM policy to control who has the ability to use // the CopySnapshot operation. For more information about using IAM to control -// the use of ElastiCache operations, see Exporting Snapshots (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html) -// and Authentication & Access Control (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/IAM.html). +// the use of ElastiCache operations, see Exporting Snapshots (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html) +// and Authentication & Access Control (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/IAM.html). // // You could receive the following error messages. // // Error Messages // -// * Error Message: The S3 bucket %s is outside of the region. -// -// Solution: Create an Amazon S3 bucket in the same region as your snapshot. -// For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket) +// * Error Message: The S3 bucket %s is outside of the region. Solution: +// Create an Amazon S3 bucket in the same region as your snapshot. For more +// information, see Step 1: Create an Amazon S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-create-s3-bucket) // in the ElastiCache User Guide. // -// * Error Message: The S3 bucket %s does not exist. -// -// Solution: Create an Amazon S3 bucket in the same region as your snapshot. -// For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket) +// * Error Message: The S3 bucket %s does not exist. Solution: Create an +// Amazon S3 bucket in the same region as your snapshot. For more information, +// see Step 1: Create an Amazon S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-create-s3-bucket) // in the ElastiCache User Guide. // // * Error Message: The S3 bucket %s is not owned by the authenticated user. -// -// Solution: Create an Amazon S3 bucket in the same region as your snapshot. -// For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket) +// Solution: Create an Amazon S3 bucket in the same region as your snapshot. +// For more information, see Step 1: Create an Amazon S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-create-s3-bucket) // in the ElastiCache User Guide. // // * Error Message: The authenticated user does not have sufficient permissions -// to perform the desired activity. -// -// Solution: Contact your system administrator to get the needed permissions. +// to perform the desired activity. Solution: Contact your system administrator +// to get the needed permissions. // // * Error Message: The S3 bucket %s already contains an object with key -// %s. -// -// Solution: Give the TargetSnapshotName a new and unique value. If exporting +// %s. Solution: Give the TargetSnapshotName a new and unique value. If exporting // a snapshot, you could alternatively create a new Amazon S3 bucket and // use this same value for TargetSnapshotName. // -// * Error Message: ElastiCache has not been granted READ permissions %s -// on the S3 Bucket. -// -// Solution: Add List and Read permissions on the bucket. For more information, -// see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess) +// * Error Message: ElastiCache has not been granted READ permissions %s +// on the S3 Bucket. Solution: Add List and Read permissions on the bucket. +// For more information, see Step 2: Grant ElastiCache Access to Your Amazon +// S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) // in the ElastiCache User Guide. // -// * Error Message: ElastiCache has not been granted WRITE permissions %s -// on the S3 Bucket. -// -// Solution: Add Upload/Delete permissions on the bucket. For more information, -// see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess) +// * Error Message: ElastiCache has not been granted WRITE permissions %s +// on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. +// For more information, see Step 2: Grant ElastiCache Access to Your Amazon +// S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) // in the ElastiCache User Guide. // -// * Error Message: ElastiCache has not been granted READ_ACP permissions -// %s on the S3 Bucket. -// -// Solution: Add View Permissions on the bucket. For more information, see Step -// 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess) +// * Error Message: ElastiCache has not been granted READ_ACP permissions +// %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For +// more information, see Step 2: Grant ElastiCache Access to Your Amazon +// S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) // in the ElastiCache User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -439,7 +681,8 @@ func (c *ElastiCache) CreateCacheClusterRequest(input *CreateCacheClusterInput) // // * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" // The requested cache node type is not available in the specified Availability -// Zone. +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. // // * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" // The requested cache security group name does not refer to an existing cache @@ -554,10 +797,10 @@ func (c *ElastiCache) CreateCacheParameterGroupRequest(input *CreateCacheParamet // created CacheParameterGroup you can change the values of specific parameters. // For more information, see: // -// * ModifyCacheParameterGroup (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheParameterGroup.html) +// * ModifyCacheParameterGroup (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheParameterGroup.html) // in the ElastiCache API Reference. // -// * Parameters and Parameter Groups (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.html) +// * Parameters and Parameter Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.html) // in the ElastiCache User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -657,7 +900,7 @@ func (c *ElastiCache) CreateCacheSecurityGroupRequest(input *CreateCacheSecurity // Cache security groups are only used when you are creating a cluster outside // of an Amazon Virtual Private Cloud (Amazon VPC). If you are creating a cluster // inside of a VPC, use a cache subnet group instead. For more information, -// see CreateCacheSubnetGroup (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_CreateCacheSubnetGroup.html). +// see CreateCacheSubnetGroup (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_CreateCacheSubnetGroup.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -848,7 +1091,7 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou // replicas. Writes to the primary are asynchronously propagated to the replicas. // // A Redis (cluster mode enabled) replication group is a collection of 1 to -// 15 node groups (shards). Each node group (shard) has one read/write primary +// 90 node groups (shards). Each node group (shard) has one read/write primary // node and up to 5 read-only replica nodes. Writes to the primary are asynchronously // propagated to the replicas. Redis (cluster mode enabled) replication groups // partition the data across node groups (shards). @@ -859,7 +1102,7 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou // group after it has been created. However, if you need to increase or decrease // the number of node groups (console: shards), you can avail yourself of ElastiCache // for Redis' enhanced backup and restore. For more information, see Restoring -// From a Backup with Cluster Resizing (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-restoring.html) +// From a Backup with Cluster Resizing (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-restoring.html) // in the ElastiCache User Guide. // // This operation is valid for Redis only. @@ -883,7 +1126,8 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou // // * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" // The requested cache node type is not available in the specified Availability -// Zone. +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. // // * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" // The requested cache security group name does not refer to an existing cache @@ -920,7 +1164,7 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou // * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" // The request cannot be processed because it would exceed the maximum allowed // number of node groups (shards) in a single replication group. The default -// maximum is 15 +// maximum is 90 // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -1136,7 +1380,8 @@ func (c *ElastiCache) DecreaseReplicaCountRequest(input *DecreaseReplicaCountInp // // * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" // The requested cache node type is not available in the specified Availability -// Zone. +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. // // * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceeded" // The request cannot be processed because it would exceed the allowed number @@ -1145,7 +1390,7 @@ func (c *ElastiCache) DecreaseReplicaCountRequest(input *DecreaseReplicaCountInp // * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" // The request cannot be processed because it would exceed the maximum allowed // number of node groups (shards) in a single replication group. The default -// maximum is 15 +// maximum is 90 // // * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" // The request cannot be processed because it would exceed the allowed number @@ -1234,11 +1479,17 @@ func (c *ElastiCache) DeleteCacheClusterRequest(input *DeleteCacheClusterInput) // a successful response from this operation, Amazon ElastiCache immediately // begins deleting the cluster; you cannot cancel or revert this operation. // -// This operation cannot be used to delete a cluster that is the last read replica -// of a replication group or node group (shard) that has Multi-AZ mode enabled -// or a cluster from a Redis (cluster mode enabled) replication group. +// This operation is not valid for: +// +// * Redis (cluster mode enabled) clusters +// +// * A cluster that is the last read replica of a replication group +// +// * A node group (shard) that has Multi-AZ mode enabled +// +// * A cluster from a Redis (cluster mode enabled) replication group // -// This operation is not valid for Redis (cluster mode enabled) clusters. +// * A cluster that is not in the available state // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1902,7 +2153,7 @@ func (c *ElastiCache) DescribeCacheClustersWithContext(ctx aws.Context, input *D // // Example iterating over at most 3 pages of a DescribeCacheClusters operation. // pageNum := 0 // err := client.DescribeCacheClustersPages(params, -// func(page *DescribeCacheClustersOutput, lastPage bool) bool { +// func(page *elasticache.DescribeCacheClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1934,10 +2185,12 @@ func (c *ElastiCache) DescribeCacheClustersPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeCacheClustersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeCacheClustersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2032,7 +2285,7 @@ func (c *ElastiCache) DescribeCacheEngineVersionsWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a DescribeCacheEngineVersions operation. // pageNum := 0 // err := client.DescribeCacheEngineVersionsPages(params, -// func(page *DescribeCacheEngineVersionsOutput, lastPage bool) bool { +// func(page *elasticache.DescribeCacheEngineVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2064,10 +2317,12 @@ func (c *ElastiCache) DescribeCacheEngineVersionsPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeCacheEngineVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeCacheEngineVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2176,7 +2431,7 @@ func (c *ElastiCache) DescribeCacheParameterGroupsWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a DescribeCacheParameterGroups operation. // pageNum := 0 // err := client.DescribeCacheParameterGroupsPages(params, -// func(page *DescribeCacheParameterGroupsOutput, lastPage bool) bool { +// func(page *elasticache.DescribeCacheParameterGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2208,10 +2463,12 @@ func (c *ElastiCache) DescribeCacheParameterGroupsPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeCacheParameterGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeCacheParameterGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2318,7 +2575,7 @@ func (c *ElastiCache) DescribeCacheParametersWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeCacheParameters operation. // pageNum := 0 // err := client.DescribeCacheParametersPages(params, -// func(page *DescribeCacheParametersOutput, lastPage bool) bool { +// func(page *elasticache.DescribeCacheParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2350,10 +2607,12 @@ func (c *ElastiCache) DescribeCacheParametersPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeCacheParametersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeCacheParametersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2409,6 +2668,7 @@ func (c *ElastiCache) DescribeCacheSecurityGroupsRequest(input *DescribeCacheSec // // Returns a list of cache security group descriptions. If a cache security // group name is specified, the list contains only the description of that group. +// This applicable only when you have ElastiCache in Classic setup // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2461,7 +2721,7 @@ func (c *ElastiCache) DescribeCacheSecurityGroupsWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a DescribeCacheSecurityGroups operation. // pageNum := 0 // err := client.DescribeCacheSecurityGroupsPages(params, -// func(page *DescribeCacheSecurityGroupsOutput, lastPage bool) bool { +// func(page *elasticache.DescribeCacheSecurityGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2493,10 +2753,12 @@ func (c *ElastiCache) DescribeCacheSecurityGroupsPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeCacheSecurityGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeCacheSecurityGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2551,7 +2813,9 @@ func (c *ElastiCache) DescribeCacheSubnetGroupsRequest(input *DescribeCacheSubne // DescribeCacheSubnetGroups API operation for Amazon ElastiCache. // // Returns a list of cache subnet group descriptions. If a subnet group name -// is specified, the list contains only the description of that group. +// is specified, the list contains only the description of that group. This +// is applicable only when you have ElastiCache in VPC setup. All ElastiCache +// clusters now launch in VPC by default. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2598,7 +2862,7 @@ func (c *ElastiCache) DescribeCacheSubnetGroupsWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a DescribeCacheSubnetGroups operation. // pageNum := 0 // err := client.DescribeCacheSubnetGroupsPages(params, -// func(page *DescribeCacheSubnetGroupsOutput, lastPage bool) bool { +// func(page *elasticache.DescribeCacheSubnetGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2630,10 +2894,12 @@ func (c *ElastiCache) DescribeCacheSubnetGroupsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeCacheSubnetGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeCacheSubnetGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2737,7 +3003,7 @@ func (c *ElastiCache) DescribeEngineDefaultParametersWithContext(ctx aws.Context // // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation. // pageNum := 0 // err := client.DescribeEngineDefaultParametersPages(params, -// func(page *DescribeEngineDefaultParametersOutput, lastPage bool) bool { +// func(page *elasticache.DescribeEngineDefaultParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2769,10 +3035,12 @@ func (c *ElastiCache) DescribeEngineDefaultParametersPagesWithContext(ctx aws.Co }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEngineDefaultParametersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEngineDefaultParametersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2880,7 +3148,7 @@ func (c *ElastiCache) DescribeEventsWithContext(ctx aws.Context, input *Describe // // Example iterating over at most 3 pages of a DescribeEvents operation. // pageNum := 0 // err := client.DescribeEventsPages(params, -// func(page *DescribeEventsOutput, lastPage bool) bool { +// func(page *elasticache.DescribeEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2912,10 +3180,12 @@ func (c *ElastiCache) DescribeEventsPagesWithContext(ctx aws.Context, input *Des }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3025,7 +3295,7 @@ func (c *ElastiCache) DescribeReplicationGroupsWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a DescribeReplicationGroups operation. // pageNum := 0 // err := client.DescribeReplicationGroupsPages(params, -// func(page *DescribeReplicationGroupsOutput, lastPage bool) bool { +// func(page *elasticache.DescribeReplicationGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3057,10 +3327,12 @@ func (c *ElastiCache) DescribeReplicationGroupsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReplicationGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReplicationGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3167,7 +3439,7 @@ func (c *ElastiCache) DescribeReservedCacheNodesWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a DescribeReservedCacheNodes operation. // pageNum := 0 // err := client.DescribeReservedCacheNodesPages(params, -// func(page *DescribeReservedCacheNodesOutput, lastPage bool) bool { +// func(page *elasticache.DescribeReservedCacheNodesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3199,10 +3471,12 @@ func (c *ElastiCache) DescribeReservedCacheNodesPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedCacheNodesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedCacheNodesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3308,7 +3582,7 @@ func (c *ElastiCache) DescribeReservedCacheNodesOfferingsWithContext(ctx aws.Con // // Example iterating over at most 3 pages of a DescribeReservedCacheNodesOfferings operation. // pageNum := 0 // err := client.DescribeReservedCacheNodesOfferingsPages(params, -// func(page *DescribeReservedCacheNodesOfferingsOutput, lastPage bool) bool { +// func(page *elasticache.DescribeReservedCacheNodesOfferingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3340,42 +3614,44 @@ func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPagesWithContext(ctx aw }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedCacheNodesOfferingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedCacheNodesOfferingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opDescribeSnapshots = "DescribeSnapshots" +const opDescribeServiceUpdates = "DescribeServiceUpdates" -// DescribeSnapshotsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeSnapshots operation. The "output" return +// DescribeServiceUpdatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServiceUpdates operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeSnapshots for more information on using the DescribeSnapshots +// See DescribeServiceUpdates for more information on using the DescribeServiceUpdates // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeSnapshotsRequest method. -// req, resp := client.DescribeSnapshotsRequest(params) +// // Example sending a request using the DescribeServiceUpdatesRequest method. +// req, resp := client.DescribeServiceUpdatesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots -func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeServiceUpdates +func (c *ElastiCache) DescribeServiceUpdatesRequest(input *DescribeServiceUpdatesInput) (req *request.Request, output *DescribeServiceUpdatesOutput) { op := &request.Operation{ - Name: opDescribeSnapshots, + Name: opDescribeServiceUpdates, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -3387,36 +3663,28 @@ func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (r } if input == nil { - input = &DescribeSnapshotsInput{} + input = &DescribeServiceUpdatesInput{} } - output = &DescribeSnapshotsOutput{} + output = &DescribeServiceUpdatesOutput{} req = c.newRequest(op, input, output) return } -// DescribeSnapshots API operation for Amazon ElastiCache. -// -// Returns information about cluster or replication group snapshots. By default, -// DescribeSnapshots lists all of your snapshots; it can optionally describe -// a single snapshot, or just the snapshots associated with a particular cache -// cluster. +// DescribeServiceUpdates API operation for Amazon ElastiCache. // -// This operation is valid for Redis only. +// Returns details of the service updates // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeSnapshots for usage and error information. +// API operation DescribeServiceUpdates for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. -// -// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" -// The requested snapshot name does not refer to an existing snapshot. +// * ErrCodeServiceUpdateNotFoundFault "ServiceUpdateNotFoundFault" +// The service update doesn't exist // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -3424,75 +3692,368 @@ func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (r // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots -func (c *ElastiCache) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { - req, out := c.DescribeSnapshotsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeServiceUpdates +func (c *ElastiCache) DescribeServiceUpdates(input *DescribeServiceUpdatesInput) (*DescribeServiceUpdatesOutput, error) { + req, out := c.DescribeServiceUpdatesRequest(input) return out, req.Send() } -// DescribeSnapshotsWithContext is the same as DescribeSnapshots with the addition of +// DescribeServiceUpdatesWithContext is the same as DescribeServiceUpdates with the addition of // the ability to pass a context and additional request options. // -// See DescribeSnapshots for details on how to use this API operation. +// See DescribeServiceUpdates for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeSnapshotsWithContext(ctx aws.Context, input *DescribeSnapshotsInput, opts ...request.Option) (*DescribeSnapshotsOutput, error) { - req, out := c.DescribeSnapshotsRequest(input) +func (c *ElastiCache) DescribeServiceUpdatesWithContext(ctx aws.Context, input *DescribeServiceUpdatesInput, opts ...request.Option) (*DescribeServiceUpdatesOutput, error) { + req, out := c.DescribeServiceUpdatesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, +// DescribeServiceUpdatesPages iterates over the pages of a DescribeServiceUpdates operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeSnapshots method for more information on how to use this operation. +// See DescribeServiceUpdates method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeSnapshots operation. +// // Example iterating over at most 3 pages of a DescribeServiceUpdates operation. // pageNum := 0 -// err := client.DescribeSnapshotsPages(params, -// func(page *DescribeSnapshotsOutput, lastPage bool) bool { +// err := client.DescribeServiceUpdatesPages(params, +// func(page *elasticache.DescribeServiceUpdatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool) error { - return c.DescribeSnapshotsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeServiceUpdatesPages(input *DescribeServiceUpdatesInput, fn func(*DescribeServiceUpdatesOutput, bool) bool) error { + return c.DescribeServiceUpdatesPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeSnapshotsPagesWithContext same as DescribeSnapshotsPages except +// DescribeServiceUpdatesPagesWithContext same as DescribeServiceUpdatesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeSnapshotsPagesWithContext(ctx aws.Context, input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeServiceUpdatesPagesWithContext(ctx aws.Context, input *DescribeServiceUpdatesInput, fn func(*DescribeServiceUpdatesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeSnapshotsInput + var inCpy *DescribeServiceUpdatesInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeSnapshotsRequest(inCpy) + req, _ := c.DescribeServiceUpdatesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeSnapshotsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeServiceUpdatesOutput), !p.HasNextPage()) { + break + } } + + return p.Err() +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshots operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSnapshots for more information on using the DescribeSnapshots +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSnapshotsRequest method. +// req, resp := client.DescribeSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots +func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + output = &DescribeSnapshotsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSnapshots API operation for Amazon ElastiCache. +// +// Returns information about cluster or replication group snapshots. By default, +// DescribeSnapshots lists all of your snapshots; it can optionally describe +// a single snapshot, or just the snapshots associated with a particular cache +// cluster. +// +// This operation is valid for Redis only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation DescribeSnapshots for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. +// +// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" +// The requested snapshot name does not refer to an existing snapshot. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots +func (c *ElastiCache) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + return out, req.Send() +} + +// DescribeSnapshotsWithContext is the same as DescribeSnapshots with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSnapshots for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeSnapshotsWithContext(ctx aws.Context, input *DescribeSnapshotsInput, opts ...request.Option) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSnapshots operation. +// pageNum := 0 +// err := client.DescribeSnapshotsPages(params, +// func(page *elasticache.DescribeSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool) error { + return c.DescribeSnapshotsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSnapshotsPagesWithContext same as DescribeSnapshotsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeSnapshotsPagesWithContext(ctx aws.Context, input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSnapshotsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeUpdateActions = "DescribeUpdateActions" + +// DescribeUpdateActionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUpdateActions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeUpdateActions for more information on using the DescribeUpdateActions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeUpdateActionsRequest method. +// req, resp := client.DescribeUpdateActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUpdateActions +func (c *ElastiCache) DescribeUpdateActionsRequest(input *DescribeUpdateActionsInput) (req *request.Request, output *DescribeUpdateActionsOutput) { + op := &request.Operation{ + Name: opDescribeUpdateActions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeUpdateActionsInput{} + } + + output = &DescribeUpdateActionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeUpdateActions API operation for Amazon ElastiCache. +// +// Returns details of the update actions +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation DescribeUpdateActions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUpdateActions +func (c *ElastiCache) DescribeUpdateActions(input *DescribeUpdateActionsInput) (*DescribeUpdateActionsOutput, error) { + req, out := c.DescribeUpdateActionsRequest(input) + return out, req.Send() +} + +// DescribeUpdateActionsWithContext is the same as DescribeUpdateActions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeUpdateActions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeUpdateActionsWithContext(ctx aws.Context, input *DescribeUpdateActionsInput, opts ...request.Option) (*DescribeUpdateActionsOutput, error) { + req, out := c.DescribeUpdateActionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeUpdateActionsPages iterates over the pages of a DescribeUpdateActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeUpdateActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeUpdateActions operation. +// pageNum := 0 +// err := client.DescribeUpdateActionsPages(params, +// func(page *elasticache.DescribeUpdateActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeUpdateActionsPages(input *DescribeUpdateActionsInput, fn func(*DescribeUpdateActionsOutput, bool) bool) error { + return c.DescribeUpdateActionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeUpdateActionsPagesWithContext same as DescribeUpdateActionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeUpdateActionsPagesWithContext(ctx aws.Context, input *DescribeUpdateActionsInput, fn func(*DescribeUpdateActionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeUpdateActionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeUpdateActionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeUpdateActionsOutput), !p.HasNextPage()) { + break + } + } + return p.Err() } @@ -3567,7 +4128,8 @@ func (c *ElastiCache) IncreaseReplicaCountRequest(input *IncreaseReplicaCountInp // // * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" // The requested cache node type is not available in the specified Availability -// Zone. +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. // // * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceeded" // The request cannot be processed because it would exceed the allowed number @@ -3576,7 +4138,7 @@ func (c *ElastiCache) IncreaseReplicaCountRequest(input *IncreaseReplicaCountInp // * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" // The request cannot be processed because it would exceed the maximum allowed // number of node groups (shards) in a single replication group. The default -// maximum is 15 +// maximum is 90 // // * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" // The request cannot be processed because it would exceed the allowed number @@ -3585,6 +4147,9 @@ func (c *ElastiCache) IncreaseReplicaCountRequest(input *IncreaseReplicaCountInp // * ErrCodeNoOperationFault "NoOperationFault" // The operation was not performed because no changes were required. // +// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" +// The KMS key supplied is not valid. +// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // @@ -3658,10 +4223,10 @@ func (c *ElastiCache) ListAllowedNodeTypeModificationsRequest(input *ListAllowed // ListAllowedNodeTypeModifications API operation for Amazon ElastiCache. // // Lists all available node types that you can scale your Redis cluster's or -// replication group's current node type up to. +// replication group's current node type. // // When you use the ModifyCacheCluster or ModifyReplicationGroup operations -// to scale up your cluster or replication group, the value of the CacheNodeType +// to scale your cluster or replication group, the value of the CacheNodeType // parameter must be one of the node types returned by this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3759,7 +4324,7 @@ func (c *ElastiCache) ListTagsForResourceRequest(input *ListTagsForResourceInput // an error. // // You can have a maximum of 50 cost allocation tags on an ElastiCache resource. -// For more information, see Monitoring Costs with Tags (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Tagging.html). +// For more information, see Monitoring Costs with Tags (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Tagging.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3864,7 +4429,8 @@ func (c *ElastiCache) ModifyCacheClusterRequest(input *ModifyCacheClusterInput) // // * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" // The requested cache node type is not available in the specified Availability -// Zone. +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. // // * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" // The requested cluster ID does not refer to an existing cluster. @@ -4148,11 +4714,10 @@ func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGrou // to change a cluster's node type or engine version. For more information, // see: // -// * Scaling for Amazon ElastiCache for Redis—Redis (cluster mode enabled) -// (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/scaling-redis-cluster-mode-enabled.html) +// * Scaling for Amazon ElastiCache for Redis (cluster mode enabled) (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/scaling-redis-cluster-mode-enabled.html) // in the ElastiCache User Guide // -// * ModifyReplicationGroupShardConfiguration (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyReplicationGroupShardConfiguration.html) +// * ModifyReplicationGroupShardConfiguration (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyReplicationGroupShardConfiguration.html) // in the ElastiCache API Reference // // This operation is valid for Redis only. @@ -4179,7 +4744,8 @@ func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGrou // // * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" // The requested cache node type is not available in the specified Availability -// Zone. +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. // // * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" // The requested cluster ID does not refer to an existing cluster. @@ -4203,6 +4769,9 @@ func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGrou // * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" // The VPC network is in an invalid state. // +// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" +// The KMS key supplied is not valid. +// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // @@ -4300,17 +4869,21 @@ func (c *ElastiCache) ModifyReplicationGroupShardConfigurationRequest(input *Mod // // * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" // The requested cache node type is not available in the specified Availability -// Zone. +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. // // * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" // The request cannot be processed because it would exceed the maximum allowed // number of node groups (shards) in a single replication group. The default -// maximum is 15 +// maximum is 90 // // * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" // The request cannot be processed because it would exceed the allowed number // of cache nodes per customer. // +// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" +// The KMS key supplied is not valid. +// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // @@ -4804,6 +5377,94 @@ func (c *ElastiCache) RevokeCacheSecurityGroupIngressWithContext(ctx aws.Context return out, req.Send() } +const opStartMigration = "StartMigration" + +// StartMigrationRequest generates a "aws/request.Request" representing the +// client's request for the StartMigration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartMigration for more information on using the StartMigration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartMigrationRequest method. +// req, resp := client.StartMigrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration +func (c *ElastiCache) StartMigrationRequest(input *StartMigrationInput) (req *request.Request, output *StartMigrationOutput) { + op := &request.Operation{ + Name: opStartMigration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartMigrationInput{} + } + + output = &StartMigrationOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartMigration API operation for Amazon ElastiCache. +// +// Start the migration of data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation StartMigration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeReplicationGroupAlreadyUnderMigrationFault "ReplicationGroupAlreadyUnderMigrationFault" +// The targeted replication group is not available. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration +func (c *ElastiCache) StartMigration(input *StartMigrationInput) (*StartMigrationOutput, error) { + req, out := c.StartMigrationRequest(input) + return out, req.Send() +} + +// StartMigrationWithContext is the same as StartMigration with the addition of +// the ability to pass a context and additional request options. +// +// See StartMigration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) StartMigrationWithContext(ctx aws.Context, input *StartMigrationInput, opts ...request.Option) (*StartMigrationOutput, error) { + req, out := c.StartMigrationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTestFailover = "TestFailover" // TestFailoverRequest generates a "aws/request.Request" representing the @@ -4868,29 +5529,17 @@ func (c *ElastiCache) TestFailoverRequest(input *TestFailoverInput) (req *reques // * To determine whether the node replacement is complete you can check // Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache // API. Look for the following automatic failover related events, listed -// here in order of occurrance: -// -// Replication group message: Test Failover API called for node group -// -// Cache cluster message: Failover from master node to replica -// node completed -// -// Replication group message: Failover from master node to -// replica node completed -// -// Cache cluster message: Recovering cache nodes -// -// Cache cluster message: Finished recovery for cache nodes -// -// For more information see: -// -// Viewing ElastiCache Events (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ECEvents.Viewing.html) -// in the ElastiCache User Guide -// -// DescribeEvents (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeEvents.html) +// here in order of occurrance: Replication group message: Test Failover +// API called for node group Cache cluster message: Failover +// from master node to replica node completed +// Replication group message: Failover from master node +// to replica node completed Cache cluster message: Recovering +// cache nodes Cache cluster message: Finished recovery for cache +// nodes For more information see: Viewing ElastiCache Events (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ECEvents.Viewing.html) +// in the ElastiCache User Guide DescribeEvents (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeEvents.html) // in the ElastiCache API Reference // -// Also see, Testing Multi-AZ with Automatic Failover (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html#auto-failover-test) +// Also see, Testing Multi-AZ with Automatic Failover (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html#auto-failover-test) // in the ElastiCache User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4921,6 +5570,9 @@ func (c *ElastiCache) TestFailoverRequest(input *TestFailoverInput) (req *reques // * ErrCodeTestFailoverNotAvailableFault "TestFailoverNotAvailableFault" // The TestFailover action is not available. // +// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" +// The KMS key supplied is not valid. +// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // @@ -4959,7 +5611,7 @@ type AddTagsToResourceInput struct { // resources are cluster and snapshot. // // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // // ResourceName is a required field ResourceName *string `type:"string" required:"true"` @@ -5132,6 +5784,182 @@ func (s *AvailabilityZone) SetName(v string) *AvailabilityZone { return s } +type BatchApplyUpdateActionInput struct { + _ struct{} `type:"structure"` + + // The cache cluster IDs + CacheClusterIds []*string `type:"list"` + + // The replication group IDs + ReplicationGroupIds []*string `type:"list"` + + // The unique ID of the service update + // + // ServiceUpdateName is a required field + ServiceUpdateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchApplyUpdateActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchApplyUpdateActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchApplyUpdateActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchApplyUpdateActionInput"} + if s.ServiceUpdateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceUpdateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCacheClusterIds sets the CacheClusterIds field's value. +func (s *BatchApplyUpdateActionInput) SetCacheClusterIds(v []*string) *BatchApplyUpdateActionInput { + s.CacheClusterIds = v + return s +} + +// SetReplicationGroupIds sets the ReplicationGroupIds field's value. +func (s *BatchApplyUpdateActionInput) SetReplicationGroupIds(v []*string) *BatchApplyUpdateActionInput { + s.ReplicationGroupIds = v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *BatchApplyUpdateActionInput) SetServiceUpdateName(v string) *BatchApplyUpdateActionInput { + s.ServiceUpdateName = &v + return s +} + +type BatchApplyUpdateActionOutput struct { + _ struct{} `type:"structure"` + + // Update actions that have been processed successfully + ProcessedUpdateActions []*ProcessedUpdateAction `locationNameList:"ProcessedUpdateAction" type:"list"` + + // Update actions that haven't been processed successfully + UnprocessedUpdateActions []*UnprocessedUpdateAction `locationNameList:"UnprocessedUpdateAction" type:"list"` +} + +// String returns the string representation +func (s BatchApplyUpdateActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchApplyUpdateActionOutput) GoString() string { + return s.String() +} + +// SetProcessedUpdateActions sets the ProcessedUpdateActions field's value. +func (s *BatchApplyUpdateActionOutput) SetProcessedUpdateActions(v []*ProcessedUpdateAction) *BatchApplyUpdateActionOutput { + s.ProcessedUpdateActions = v + return s +} + +// SetUnprocessedUpdateActions sets the UnprocessedUpdateActions field's value. +func (s *BatchApplyUpdateActionOutput) SetUnprocessedUpdateActions(v []*UnprocessedUpdateAction) *BatchApplyUpdateActionOutput { + s.UnprocessedUpdateActions = v + return s +} + +type BatchStopUpdateActionInput struct { + _ struct{} `type:"structure"` + + // The cache cluster IDs + CacheClusterIds []*string `type:"list"` + + // The replication group IDs + ReplicationGroupIds []*string `type:"list"` + + // The unique ID of the service update + // + // ServiceUpdateName is a required field + ServiceUpdateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchStopUpdateActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopUpdateActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchStopUpdateActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchStopUpdateActionInput"} + if s.ServiceUpdateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceUpdateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCacheClusterIds sets the CacheClusterIds field's value. +func (s *BatchStopUpdateActionInput) SetCacheClusterIds(v []*string) *BatchStopUpdateActionInput { + s.CacheClusterIds = v + return s +} + +// SetReplicationGroupIds sets the ReplicationGroupIds field's value. +func (s *BatchStopUpdateActionInput) SetReplicationGroupIds(v []*string) *BatchStopUpdateActionInput { + s.ReplicationGroupIds = v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *BatchStopUpdateActionInput) SetServiceUpdateName(v string) *BatchStopUpdateActionInput { + s.ServiceUpdateName = &v + return s +} + +type BatchStopUpdateActionOutput struct { + _ struct{} `type:"structure"` + + // Update actions that have been processed successfully + ProcessedUpdateActions []*ProcessedUpdateAction `locationNameList:"ProcessedUpdateAction" type:"list"` + + // Update actions that haven't been processed successfully + UnprocessedUpdateActions []*UnprocessedUpdateAction `locationNameList:"UnprocessedUpdateAction" type:"list"` +} + +// String returns the string representation +func (s BatchStopUpdateActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopUpdateActionOutput) GoString() string { + return s.String() +} + +// SetProcessedUpdateActions sets the ProcessedUpdateActions field's value. +func (s *BatchStopUpdateActionOutput) SetProcessedUpdateActions(v []*ProcessedUpdateAction) *BatchStopUpdateActionOutput { + s.ProcessedUpdateActions = v + return s +} + +// SetUnprocessedUpdateActions sets the UnprocessedUpdateActions field's value. +func (s *BatchStopUpdateActionOutput) SetUnprocessedUpdateActions(v []*UnprocessedUpdateAction) *BatchStopUpdateActionOutput { + s.UnprocessedUpdateActions = v + return s +} + // Contains all of the attributes of a specific cluster. type CacheCluster struct { _ struct{} `type:"structure"` @@ -5143,7 +5971,7 @@ type CacheCluster struct { // to true when you create a cluster. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6 or 4.x. + // using redis version 3.2.6, 4.x or later. // // Default: false AtRestEncryptionEnabled *bool `type:"boolean"` @@ -5153,6 +5981,9 @@ type CacheCluster struct { // Default: false AuthTokenEnabled *bool `type:"boolean"` + // The date the auth token was last modified + AuthTokenLastModifiedDate *time.Time `type:"timestamp"` + // This parameter is currently disabled. AutoMinorVersionUpgrade *bool `type:"boolean"` @@ -5174,64 +6005,36 @@ type CacheCluster struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: - // - // Current generation: - // - // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium - // - // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - // cache.m4.10xlarge - // - // Previous generation: (not recommended) - // - // T1 node types:cache.t1.micro - // - // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // - // * Compute optimized: - // - // Previous generation: (not recommended) - // - // C1 node types:cache.c1.xlarge - // - // * Memory optimized: - // - // Current generation: - // - // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // R4 node types;cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge - // - // Previous generation: (not recommended) - // - // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // - // Notes: - // - // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon - // VPC). - // - // * Redis (cluster mode disabled): Redis backup/restore is not supported - // on T1 and T2 instances. - // - // * Redis (cluster mode enabled): Backup/restore is not supported on T1 - // instances. - // - // * Redis Append-only files (AOF) functionality is not supported for T1 - // or T2 instances. + // Additional node type info // - // For a complete listing of node types and specifications, see: + // * All current generation instance types are created in Amazon VPC by default. // - // * Amazon ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. // - // * Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. // - // * Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific) + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. CacheNodeType *string `type:"string"` // A list of cache nodes that are members of the cluster. @@ -5333,7 +6136,7 @@ type CacheCluster struct { // to true when you create a cluster. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6 or 4.x. + // using redis version 3.2.6, 4.x or later. // // Default: false TransitEncryptionEnabled *bool `type:"boolean"` @@ -5361,6 +6164,12 @@ func (s *CacheCluster) SetAuthTokenEnabled(v bool) *CacheCluster { return s } +// SetAuthTokenLastModifiedDate sets the AuthTokenLastModifiedDate field's value. +func (s *CacheCluster) SetAuthTokenLastModifiedDate(v time.Time) *CacheCluster { + s.AuthTokenLastModifiedDate = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *CacheCluster) SetAutoMinorVersionUpgrade(v bool) *CacheCluster { s.AutoMinorVersionUpgrade = &v @@ -5511,7 +6320,8 @@ type CacheEngineVersion struct { // The name of the cache parameter group family associated with this cache engine. // - // Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0 + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | CacheParameterGroupFamily *string `type:"string"` // The name of the cache engine. @@ -5569,64 +6379,36 @@ func (s *CacheEngineVersion) SetEngineVersion(v string) *CacheEngineVersion { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // -// * General purpose: -// -// Current generation: -// -// T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium -// -// M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge -// -// M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, -// cache.m4.10xlarge -// -// Previous generation: (not recommended) -// -// T1 node types:cache.t1.micro -// -// M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge -// -// * Compute optimized: -// -// Previous generation: (not recommended) -// -// C1 node types:cache.c1.xlarge -// -// * Memory optimized: -// -// Current generation: -// -// R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, +// * General purpose: Current generation: M5 node types: cache.m5.large, +// cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, +// cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, +// cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, +// cache.t2.medium Previous generation: (not recommended) T1 node types: +// cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, +// cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, +// cache.m3.2xlarge +// +// * Compute optimized: Previous generation: (not recommended) C1 node types: +// cache.c1.xlarge +// +// * Memory optimized: Current generation: R5 node types: cache.r5.large, +// cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, +// cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, +// cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: +// (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge +// R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // -// R4 node types;cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, -// cache.r4.8xlarge, cache.r4.16xlarge +// Additional node type info // -// Previous generation: (not recommended) +// * All current generation instance types are created in Amazon VPC by default. // -// M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge +// * Redis append-only files (AOF) are not supported for T1 or T2 instances. // -// Notes: +// * Redis Multi-AZ with automatic failover is not supported on T1 instances. // -// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon -// VPC). -// -// * Redis (cluster mode disabled): Redis backup/restore is not supported -// on T1 and T2 instances. -// -// * Redis (cluster mode enabled): Backup/restore is not supported on T1 -// instances. -// -// * Redis Append-only files (AOF) functionality is not supported for T1 -// or T2 instances. -// -// For a complete listing of node types and specifications, see: -// -// * Amazon ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) -// -// * Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) -// -// * Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific) +// * Redis configuration variables appendonly and appendfsync are not supported +// on Redis version 2.8.22 and later. type CacheNode struct { _ struct{} `type:"structure"` @@ -5722,7 +6504,7 @@ type CacheNodeTypeSpecificParameter struct { // Indicates whether a change to the parameter is applied immediately or requires // a reboot for the change to be applied. You can force a reboot or wait until // the next maintenance window's reboot. For more information, see Rebooting - // a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Rebooting.html). + // a Cluster (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Rebooting.html). ChangeType *string `type:"string" enum:"ChangeType"` // The valid data type for the parameter. @@ -5843,55 +6625,144 @@ func (s *CacheNodeTypeSpecificValue) SetValue(v string) *CacheNodeTypeSpecificVa return s } -// Represents the output of a CreateCacheParameterGroup operation. -type CacheParameterGroup struct { +// The status of the service update on the cache node +type CacheNodeUpdateStatus struct { _ struct{} `type:"structure"` - // The name of the cache parameter group family that this cache parameter group - // is compatible with. - // - // Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0 - CacheParameterGroupFamily *string `type:"string"` + // The node ID of the cache cluster + CacheNodeId *string `type:"string"` - // The name of the cache parameter group. - CacheParameterGroupName *string `type:"string"` + // The deletion date of the node + NodeDeletionDate *time.Time `type:"timestamp"` - // The description for this cache parameter group. - Description *string `type:"string"` + // The end date of the update for a node + NodeUpdateEndDate *time.Time `type:"timestamp"` + + // Reflects whether the update was initiated by the customer or automatically + // applied + NodeUpdateInitiatedBy *string `type:"string" enum:"NodeUpdateInitiatedBy"` + + // The date when the update is triggered + NodeUpdateInitiatedDate *time.Time `type:"timestamp"` + + // The start date of the update for a node + NodeUpdateStartDate *time.Time `type:"timestamp"` + + // The update status of the node + NodeUpdateStatus *string `type:"string" enum:"NodeUpdateStatus"` + + // The date when the NodeUpdateStatus was last modified> + NodeUpdateStatusModifiedDate *time.Time `type:"timestamp"` } // String returns the string representation -func (s CacheParameterGroup) String() string { +func (s CacheNodeUpdateStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheParameterGroup) GoString() string { +func (s CacheNodeUpdateStatus) GoString() string { return s.String() } -// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. -func (s *CacheParameterGroup) SetCacheParameterGroupFamily(v string) *CacheParameterGroup { - s.CacheParameterGroupFamily = &v +// SetCacheNodeId sets the CacheNodeId field's value. +func (s *CacheNodeUpdateStatus) SetCacheNodeId(v string) *CacheNodeUpdateStatus { + s.CacheNodeId = &v return s } -// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *CacheParameterGroup) SetCacheParameterGroupName(v string) *CacheParameterGroup { - s.CacheParameterGroupName = &v +// SetNodeDeletionDate sets the NodeDeletionDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeDeletionDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeDeletionDate = &v return s } -// SetDescription sets the Description field's value. -func (s *CacheParameterGroup) SetDescription(v string) *CacheParameterGroup { - s.Description = &v +// SetNodeUpdateEndDate sets the NodeUpdateEndDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateEndDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateEndDate = &v return s } -// Represents the output of one of the following operations: -// -// * ModifyCacheParameterGroup -// +// SetNodeUpdateInitiatedBy sets the NodeUpdateInitiatedBy field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateInitiatedBy(v string) *CacheNodeUpdateStatus { + s.NodeUpdateInitiatedBy = &v + return s +} + +// SetNodeUpdateInitiatedDate sets the NodeUpdateInitiatedDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateInitiatedDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateInitiatedDate = &v + return s +} + +// SetNodeUpdateStartDate sets the NodeUpdateStartDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateStartDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateStartDate = &v + return s +} + +// SetNodeUpdateStatus sets the NodeUpdateStatus field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateStatus(v string) *CacheNodeUpdateStatus { + s.NodeUpdateStatus = &v + return s +} + +// SetNodeUpdateStatusModifiedDate sets the NodeUpdateStatusModifiedDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateStatusModifiedDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateStatusModifiedDate = &v + return s +} + +// Represents the output of a CreateCacheParameterGroup operation. +type CacheParameterGroup struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family that this cache parameter group + // is compatible with. + // + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | + CacheParameterGroupFamily *string `type:"string"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` + + // The description for this cache parameter group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroup) GoString() string { + return s.String() +} + +// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. +func (s *CacheParameterGroup) SetCacheParameterGroupFamily(v string) *CacheParameterGroup { + s.CacheParameterGroupFamily = &v + return s +} + +// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. +func (s *CacheParameterGroup) SetCacheParameterGroupName(v string) *CacheParameterGroup { + s.CacheParameterGroupName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CacheParameterGroup) SetDescription(v string) *CacheParameterGroup { + s.Description = &v + return s +} + +// Represents the output of one of the following operations: +// +// * ModifyCacheParameterGroup +// // * ResetCacheParameterGroup type CacheParameterGroupNameMessage struct { _ struct{} `type:"structure"` @@ -6108,6 +6979,78 @@ func (s *CacheSubnetGroup) SetVpcId(v string) *CacheSubnetGroup { return s } +type CompleteMigrationInput struct { + _ struct{} `type:"structure"` + + // Forces the migration to stop without ensuring that data is in sync. It is + // recommended to use this option only to abort the migration and not recommended + // when application wants to continue migration to ElastiCache. + Force *bool `type:"boolean"` + + // The ID of the replication group to which data is being migrated. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMigrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMigrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMigrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMigrationInput"} + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForce sets the Force field's value. +func (s *CompleteMigrationInput) SetForce(v bool) *CompleteMigrationInput { + s.Force = &v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *CompleteMigrationInput) SetReplicationGroupId(v string) *CompleteMigrationInput { + s.ReplicationGroupId = &v + return s +} + +type CompleteMigrationOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s CompleteMigrationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMigrationOutput) GoString() string { + return s.String() +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *CompleteMigrationOutput) SetReplicationGroup(v *ReplicationGroup) *CompleteMigrationOutput { + s.ReplicationGroup = v + return s +} + // Node group (shard) configuration options when adding or removing replicas. // Each node group (shard) configuration has the following members: NodeGroupId, // NewReplicaCount, and PreferredAvailabilityZones. @@ -6120,11 +7063,8 @@ type ConfigureShard struct { // // The minimum number of replicas in a shard or replication group is: // - // * Redis (cluster mode disabled) - // - // If Multi-AZ with Automatic Failover is enabled: 1 - // - // If Multi-AZ with Automatic Failover is not enable: 0 + // * Redis (cluster mode disabled) If Multi-AZ with Automatic Failover is + // enabled: 1 If Multi-AZ with Automatic Failover is not enable: 0 // // * Redis (cluster mode enabled): 0 (though you will not be able to failover // to a replica if your primary node fails) @@ -6135,7 +7075,7 @@ type ConfigureShard struct { // The 4-digit id for the node group you are configuring. For Redis (cluster // mode disabled) replication groups, the node group id is always 0001. To find // a Redis (cluster mode enabled)'s node group's (shard's) id, see Finding a - // Shard's Id (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/shard-find-id.html). + // Shard's Id (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/shard-find-id.html). // // NodeGroupId is a required field NodeGroupId *string `min:"1" type:"string" required:"true"` @@ -6199,6 +7139,9 @@ func (s *ConfigureShard) SetPreferredAvailabilityZones(v []*string) *ConfigureSh type CopySnapshotInput struct { _ struct{} `type:"structure"` + // The ID of the KMS key used to encrypt the target snapshot. + KmsKeyId *string `type:"string"` + // The name of an existing snapshot from which to make a copy. // // SourceSnapshotName is a required field @@ -6209,10 +7152,10 @@ type CopySnapshotInput struct { // // When using this parameter to export a snapshot, be sure Amazon ElastiCache // has the needed permissions to this S3 bucket. For more information, see Step - // 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess) + // 2: Grant ElastiCache Access to Your Amazon S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) // in the Amazon ElastiCache User Guide. // - // For more information, see Exporting a Snapshot (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html) + // For more information, see Exporting a Snapshot (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html) // in the Amazon ElastiCache User Guide. TargetBucket *string `type:"string"` @@ -6250,6 +7193,12 @@ func (s *CopySnapshotInput) Validate() error { return nil } +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CopySnapshotInput) SetKmsKeyId(v string) *CopySnapshotInput { + s.KmsKeyId = &v + return s +} + // SetSourceSnapshotName sets the SourceSnapshotName field's value. func (s *CopySnapshotInput) SetSourceSnapshotName(v string) *CopySnapshotInput { s.SourceSnapshotName = &v @@ -6314,7 +7263,9 @@ type CreateCacheClusterInput struct { // // * Must be at least 16 characters and no more than 128 characters in length. // - // * Cannot contain any of the following characters: '/', '"', or '@'. + // * The only permitted printable special characters are !, &, #, $, ^, <, + // >, and -. Other printable special characters cannot be used in the AUTH + // token. // // For more information, see AUTH password (http://redis.io/commands/AUTH) at // http://redis.io/commands/AUTH. @@ -6328,7 +7279,7 @@ type CreateCacheClusterInput struct { // // Constraints: // - // * A name must contain from 1 to 20 alphanumeric characters or hyphens. + // * A name must contain from 1 to 50 alphanumeric characters or hyphens. // // * The first character must be a letter. // @@ -6343,64 +7294,36 @@ type CreateCacheClusterInput struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: - // - // Current generation: - // - // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium - // - // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - // cache.m4.10xlarge - // - // Previous generation: (not recommended) - // - // T1 node types:cache.t1.micro - // - // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // - // * Compute optimized: - // - // Previous generation: (not recommended) - // - // C1 node types:cache.c1.xlarge - // - // * Memory optimized: - // - // Current generation: - // - // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // R4 node types;cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge + // Additional node type info // - // Previous generation: (not recommended) + // * All current generation instance types are created in Amazon VPC by default. // - // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. // - // Notes: + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. // - // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon - // VPC). - // - // * Redis (cluster mode disabled): Redis backup/restore is not supported - // on T1 and T2 instances. - // - // * Redis (cluster mode enabled): Backup/restore is not supported on T1 - // instances. - // - // * Redis Append-only files (AOF) functionality is not supported for T1 - // or T2 instances. - // - // For a complete listing of node types and specifications, see: - // - // * Amazon ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) - // - // * Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) - // - // * Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific) + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. CacheNodeType *string `type:"string"` // The name of the parameter group to associate with this cluster. If this argument @@ -6422,7 +7345,7 @@ type CreateCacheClusterInput struct { // // If you're going to launch your cluster in an Amazon VPC, you need to create // a subnet group before you start creating a cluster. For more information, - // see Subnets and Subnet Groups (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). CacheSubnetGroupName *string `type:"string"` // The name of the cache engine to be used for this cluster. @@ -6435,7 +7358,7 @@ type CreateCacheClusterInput struct { // operation. // // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), // but you cannot downgrade to an earlier engine version. If you want to use // an earlier engine version, you must delete the existing cluster or replication // group and create it anew with the earlier engine version. @@ -6764,7 +7687,8 @@ type CreateCacheParameterGroupInput struct { // The name of the cache parameter group family that the cache parameter group // can be used with. // - // Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0 + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | // // CacheParameterGroupFamily is a required field CacheParameterGroupFamily *string `type:"string" required:"true"` @@ -7048,7 +7972,7 @@ type CreateReplicationGroupInput struct { // group. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6 or 4.x. + // using redis version 3.2.6, 4.x or later. // // Default: false AtRestEncryptionEnabled *bool `type:"boolean"` @@ -7067,7 +7991,9 @@ type CreateReplicationGroupInput struct { // // * Must be at least 16 characters and no more than 128 characters in length. // - // * Cannot contain any of the following characters: '/', '"', or '@'. + // * The only permitted printable special characters are !, &, #, $, ^, <, + // >, and -. Other printable special characters cannot be used in the AUTH + // token. // // For more information, see AUTH password (http://redis.io/commands/AUTH) at // http://redis.io/commands/AUTH. @@ -7092,7 +8018,7 @@ type CreateReplicationGroupInput struct { // // * Redis versions earlier than 2.8.6. // - // * Redis (cluster mode disabled): T1 and T2 cache node types. + // * Redis (cluster mode disabled): T1 node types. // // * Redis (cluster mode enabled): T1 node types. AutomaticFailoverEnabled *bool `type:"boolean"` @@ -7103,70 +8029,45 @@ type CreateReplicationGroupInput struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: - // - // Current generation: - // - // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium - // - // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - // cache.m4.10xlarge - // - // Previous generation: (not recommended) - // - // T1 node types:cache.t1.micro - // - // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // - // * Compute optimized: - // - // Previous generation: (not recommended) - // - // C1 node types:cache.c1.xlarge - // - // * Memory optimized: - // - // Current generation: - // - // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // R4 node types;cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge - // - // Previous generation: (not recommended) - // - // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // - // Notes: - // - // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon - // VPC). + // Additional node type info // - // * Redis (cluster mode disabled): Redis backup/restore is not supported - // on T1 and T2 instances. + // * All current generation instance types are created in Amazon VPC by default. // - // * Redis (cluster mode enabled): Backup/restore is not supported on T1 - // instances. + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. // - // * Redis Append-only files (AOF) functionality is not supported for T1 - // or T2 instances. + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. // - // For a complete listing of node types and specifications, see: - // - // * Amazon ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) - // - // * Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) - // - // * Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific) + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. CacheNodeType *string `type:"string"` // The name of the parameter group to associate with this replication group. // If this argument is omitted, the default cache parameter group for the specified // engine is used. // + // If you are restoring to an engine version that is different than the original, + // you must specify the default version of that version. For example, CacheParameterGroupName=default.redis4.0. + // // If you are running Redis version 3.2.4 or later, only one node group (shard), // and want to use a default parameter group, we recommend that you specify // the parameter group by name. @@ -7183,7 +8084,7 @@ type CreateReplicationGroupInput struct { // // If you're going to launch your cluster in an Amazon VPC, you need to create // a subnet group before you start creating a cluster. For more information, - // see Subnets and Subnet Groups (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). CacheSubnetGroupName *string `type:"string"` // The name of the cache engine to be used for the clusters in this replication @@ -7195,13 +8096,16 @@ type CreateReplicationGroupInput struct { // operation. // // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) // in the ElastiCache User Guide, but you cannot downgrade to an earlier engine // version. If you want to use an earlier engine version, you must delete the // existing cluster or replication group and create it anew with the earlier // engine version. EngineVersion *string `type:"string"` + // The ID of the KMS key used to encrypt the disk on the cluster. + KmsKeyId *string `type:"string"` + // A list of node group (shard) configuration options. Each node group (shard) // configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, // ReplicaCount, and Slots. @@ -7309,7 +8213,7 @@ type CreateReplicationGroupInput struct { // // Constraints: // - // * A name must contain from 1 to 20 alphanumeric characters or hyphens. + // * A name must contain from 1 to 40 alphanumeric characters or hyphens. // // * The first character must be a letter. // @@ -7356,8 +8260,9 @@ type CreateReplicationGroupInput struct { // appropriate time range. SnapshotWindow *string `type:"string"` - // A list of cost allocation tags to be added to this resource. A tag is a key-value - // pair. + // A list of cost allocation tags to be added to this resource. Tags are comma-separated + // key,value pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple + // tags as shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue. Tags []*Tag `locationNameList:"Tag" type:"list"` // A flag that enables in-transit encryption when set to true. @@ -7367,13 +8272,13 @@ type CreateReplicationGroupInput struct { // to true when you create a cluster. // // This parameter is valid only if the Engine parameter is redis, the EngineVersion - // parameter is 3.2.6 or 4.x, and the cluster is being created in an Amazon - // VPC. + // parameter is 3.2.6, 4.x or later, and the cluster is being created in an + // Amazon VPC. // // If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6 or 4.x. + // using redis version 3.2.6, 4.x or later. // // Default: false // @@ -7478,6 +8383,12 @@ func (s *CreateReplicationGroupInput) SetEngineVersion(v string) *CreateReplicat return s } +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateReplicationGroupInput) SetKmsKeyId(v string) *CreateReplicationGroupInput { + s.KmsKeyId = &v + return s +} + // SetNodeGroupConfiguration sets the NodeGroupConfiguration field's value. func (s *CreateReplicationGroupInput) SetNodeGroupConfiguration(v []*NodeGroupConfiguration) *CreateReplicationGroupInput { s.NodeGroupConfiguration = v @@ -7617,6 +8528,9 @@ type CreateSnapshotInput struct { // cluster. CacheClusterId *string `type:"string"` + // The ID of the KMS key used to encrypt the snapshot. + KmsKeyId *string `type:"string"` + // The identifier of an existing replication group. The snapshot is created // from this replication group. ReplicationGroupId *string `type:"string"` @@ -7656,6 +8570,12 @@ func (s *CreateSnapshotInput) SetCacheClusterId(v string) *CreateSnapshotInput { return s } +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateSnapshotInput) SetKmsKeyId(v string) *CreateSnapshotInput { + s.KmsKeyId = &v + return s +} + // SetReplicationGroupId sets the ReplicationGroupId field's value. func (s *CreateSnapshotInput) SetReplicationGroupId(v string) *CreateSnapshotInput { s.ReplicationGroupId = &v @@ -7692,11 +8612,44 @@ func (s *CreateSnapshotOutput) SetSnapshot(v *Snapshot) *CreateSnapshotOutput { return s } +// The endpoint from which data should be migrated. +type CustomerNodeEndpoint struct { + _ struct{} `type:"structure"` + + // The address of the node endpoint + Address *string `type:"string"` + + // The port of the node endpoint + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s CustomerNodeEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerNodeEndpoint) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *CustomerNodeEndpoint) SetAddress(v string) *CustomerNodeEndpoint { + s.Address = &v + return s +} + +// SetPort sets the Port field's value. +func (s *CustomerNodeEndpoint) SetPort(v int64) *CustomerNodeEndpoint { + s.Port = &v + return s +} + type DecreaseReplicaCountInput struct { _ struct{} `type:"structure"` - // If True, the number of replica nodes is decreased immediately. If False, - // the number of replica nodes is decreased during the next maintenance window. + // If True, the number of replica nodes is decreased immediately. ApplyImmediately=False + // is not currently supported. // // ApplyImmediately is a required field ApplyImmediately *bool `type:"boolean" required:"true"` @@ -7709,11 +8662,8 @@ type DecreaseReplicaCountInput struct { // // The minimum number of replicas in a shard or replication group is: // - // * Redis (cluster mode disabled) - // - // If Multi-AZ with Automatic Failover is enabled: 1 - // - // If Multi-AZ with Automatic Failover is not enabled: 0 + // * Redis (cluster mode disabled) If Multi-AZ with Automatic Failover is + // enabled: 1 If Multi-AZ with Automatic Failover is not enabled: 0 // // * Redis (cluster mode enabled): 0 (though you will not be able to failover // to a replica if your primary node fails) @@ -8323,7 +9273,8 @@ type DescribeCacheEngineVersionsInput struct { // The name of a specific cache parameter group family to return details for. // - // Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0 + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | // // Constraints: // @@ -8818,7 +9769,8 @@ type DescribeEngineDefaultParametersInput struct { // The name of the cache parameter group family. // - // Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0 + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | // // CacheParameterGroupFamily is a required field CacheParameterGroupFamily *string `type:"string" required:"true"` @@ -9129,64 +10081,36 @@ type DescribeReservedCacheNodesInput struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: - // - // Current generation: - // - // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium - // - // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - // cache.m4.10xlarge - // - // Previous generation: (not recommended) - // - // T1 node types:cache.t1.micro - // - // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // - // * Compute optimized: - // - // Previous generation: (not recommended) - // - // C1 node types:cache.c1.xlarge - // - // * Memory optimized: - // - // Current generation: - // - // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // R4 node types;cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge - // - // Previous generation: (not recommended) - // - // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // - // Notes: + // Additional node type info // - // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon - // VPC). + // * All current generation instance types are created in Amazon VPC by default. // - // * Redis (cluster mode disabled): Redis backup/restore is not supported - // on T1 and T2 instances. + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. // - // * Redis (cluster mode enabled): Backup/restore is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. // - // * Redis Append-only files (AOF) functionality is not supported for T1 - // or T2 instances. - // - // For a complete listing of node types and specifications, see: - // - // * Amazon ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) - // - // * Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) - // - // * Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific) + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. CacheNodeType *string `type:"string"` // The duration filter value, specified in years or seconds. Use this parameter @@ -9297,64 +10221,36 @@ type DescribeReservedCacheNodesOfferingsInput struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: - // - // Current generation: - // - // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium - // - // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - // cache.m4.10xlarge - // - // Previous generation: (not recommended) - // - // T1 node types:cache.t1.micro - // - // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // - // * Compute optimized: - // - // Previous generation: (not recommended) - // - // C1 node types:cache.c1.xlarge - // - // * Memory optimized: - // - // Current generation: - // - // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // R4 node types;cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge - // - // Previous generation: (not recommended) - // - // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // - // Notes: - // - // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon - // VPC). - // - // * Redis (cluster mode disabled): Redis backup/restore is not supported - // on T1 and T2 instances. - // - // * Redis (cluster mode enabled): Backup/restore is not supported on T1 - // instances. - // - // * Redis Append-only files (AOF) functionality is not supported for T1 - // or T2 instances. + // Additional node type info // - // For a complete listing of node types and specifications, see: + // * All current generation instance types are created in Amazon VPC by default. // - // * Amazon ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. // - // * Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. // - // * Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific) + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. CacheNodeType *string `type:"string"` // Duration filter value, specified in years or seconds. Use this parameter @@ -9514,31 +10410,117 @@ func (s *DescribeReservedCacheNodesOutput) SetReservedCacheNodes(v []*ReservedCa return s } -// Represents the input of a DescribeSnapshotsMessage operation. -type DescribeSnapshotsInput struct { +type DescribeServiceUpdatesInput struct { _ struct{} `type:"structure"` - // A user-supplied cluster identifier. If this parameter is specified, only - // snapshots associated with that specific cluster are described. - CacheClusterId *string `type:"string"` - // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response // includes only records beyond the marker, up to the value specified by MaxRecords. Marker *string `type:"string"` - // The maximum number of records to include in the response. If more records - // exist than the specified MaxRecords value, a marker is included in the response - // so that the remaining results can be retrieved. - // - // Default: 50 - // - // Constraints: minimum 20; maximum 50. + // The maximum number of records to include in the response MaxRecords *int64 `type:"integer"` - // A user-supplied replication group identifier. If this parameter is specified, - // only snapshots associated with that specific replication group are described. - ReplicationGroupId *string `type:"string"` + // The unique ID of the service update + ServiceUpdateName *string `type:"string"` + + // The status of the service update + ServiceUpdateStatus []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeServiceUpdatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceUpdatesInput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeServiceUpdatesInput) SetMarker(v string) *DescribeServiceUpdatesInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeServiceUpdatesInput) SetMaxRecords(v int64) *DescribeServiceUpdatesInput { + s.MaxRecords = &v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *DescribeServiceUpdatesInput) SetServiceUpdateName(v string) *DescribeServiceUpdatesInput { + s.ServiceUpdateName = &v + return s +} + +// SetServiceUpdateStatus sets the ServiceUpdateStatus field's value. +func (s *DescribeServiceUpdatesInput) SetServiceUpdateStatus(v []*string) *DescribeServiceUpdatesInput { + s.ServiceUpdateStatus = v + return s +} + +type DescribeServiceUpdatesOutput struct { + _ struct{} `type:"structure"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of service updates + ServiceUpdates []*ServiceUpdate `locationNameList:"ServiceUpdate" type:"list"` +} + +// String returns the string representation +func (s DescribeServiceUpdatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceUpdatesOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeServiceUpdatesOutput) SetMarker(v string) *DescribeServiceUpdatesOutput { + s.Marker = &v + return s +} + +// SetServiceUpdates sets the ServiceUpdates field's value. +func (s *DescribeServiceUpdatesOutput) SetServiceUpdates(v []*ServiceUpdate) *DescribeServiceUpdatesOutput { + s.ServiceUpdates = v + return s +} + +// Represents the input of a DescribeSnapshotsMessage operation. +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // A user-supplied cluster identifier. If this parameter is specified, only + // snapshots associated with that specific cluster are described. + CacheClusterId *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 50 + // + // Constraints: minimum 20; maximum 50. + MaxRecords *int64 `type:"integer"` + + // A user-supplied replication group identifier. If this parameter is specified, + // only snapshots associated with that specific replication group are described. + ReplicationGroupId *string `type:"string"` // A Boolean value which if true, the node group (shard) configuration is included // in the snapshot description. @@ -9643,6 +10625,147 @@ func (s *DescribeSnapshotsOutput) SetSnapshots(v []*Snapshot) *DescribeSnapshots return s } +type DescribeUpdateActionsInput struct { + _ struct{} `type:"structure"` + + // The cache cluster IDs + CacheClusterIds []*string `type:"list"` + + // The Elasticache engine to which the update applies. Either Redis or Memcached + Engine *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response + MaxRecords *int64 `type:"integer"` + + // The replication group IDs + ReplicationGroupIds []*string `type:"list"` + + // The unique ID of the service update + ServiceUpdateName *string `type:"string"` + + // The status of the service update + ServiceUpdateStatus []*string `type:"list"` + + // The range of time specified to search for service updates that are in available + // status + ServiceUpdateTimeRange *TimeRangeFilter `type:"structure"` + + // Dictates whether to include node level update status in the response + ShowNodeLevelUpdateStatus *bool `type:"boolean"` + + // The status of the update action. + UpdateActionStatus []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeUpdateActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUpdateActionsInput) GoString() string { + return s.String() +} + +// SetCacheClusterIds sets the CacheClusterIds field's value. +func (s *DescribeUpdateActionsInput) SetCacheClusterIds(v []*string) *DescribeUpdateActionsInput { + s.CacheClusterIds = v + return s +} + +// SetEngine sets the Engine field's value. +func (s *DescribeUpdateActionsInput) SetEngine(v string) *DescribeUpdateActionsInput { + s.Engine = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeUpdateActionsInput) SetMarker(v string) *DescribeUpdateActionsInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeUpdateActionsInput) SetMaxRecords(v int64) *DescribeUpdateActionsInput { + s.MaxRecords = &v + return s +} + +// SetReplicationGroupIds sets the ReplicationGroupIds field's value. +func (s *DescribeUpdateActionsInput) SetReplicationGroupIds(v []*string) *DescribeUpdateActionsInput { + s.ReplicationGroupIds = v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *DescribeUpdateActionsInput) SetServiceUpdateName(v string) *DescribeUpdateActionsInput { + s.ServiceUpdateName = &v + return s +} + +// SetServiceUpdateStatus sets the ServiceUpdateStatus field's value. +func (s *DescribeUpdateActionsInput) SetServiceUpdateStatus(v []*string) *DescribeUpdateActionsInput { + s.ServiceUpdateStatus = v + return s +} + +// SetServiceUpdateTimeRange sets the ServiceUpdateTimeRange field's value. +func (s *DescribeUpdateActionsInput) SetServiceUpdateTimeRange(v *TimeRangeFilter) *DescribeUpdateActionsInput { + s.ServiceUpdateTimeRange = v + return s +} + +// SetShowNodeLevelUpdateStatus sets the ShowNodeLevelUpdateStatus field's value. +func (s *DescribeUpdateActionsInput) SetShowNodeLevelUpdateStatus(v bool) *DescribeUpdateActionsInput { + s.ShowNodeLevelUpdateStatus = &v + return s +} + +// SetUpdateActionStatus sets the UpdateActionStatus field's value. +func (s *DescribeUpdateActionsInput) SetUpdateActionStatus(v []*string) *DescribeUpdateActionsInput { + s.UpdateActionStatus = v + return s +} + +type DescribeUpdateActionsOutput struct { + _ struct{} `type:"structure"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // Returns a list of update actions + UpdateActions []*UpdateAction `locationNameList:"UpdateAction" type:"list"` +} + +// String returns the string representation +func (s DescribeUpdateActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUpdateActionsOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeUpdateActionsOutput) SetMarker(v string) *DescribeUpdateActionsOutput { + s.Marker = &v + return s +} + +// SetUpdateActions sets the UpdateActions field's value. +func (s *DescribeUpdateActionsOutput) SetUpdateActions(v []*UpdateAction) *DescribeUpdateActionsOutput { + s.UpdateActions = v + return s +} + // Provides ownership and status information for an Amazon EC2 security group. type EC2SecurityGroup struct { _ struct{} `type:"structure"` @@ -9730,7 +10853,8 @@ type EngineDefaults struct { // Specifies the name of the cache parameter group family to which the engine // default parameters apply. // - // Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2 | redis4.0 + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | CacheParameterGroupFamily *string `type:"string"` // Provides an identifier to allow retrieval of paginated results. @@ -9832,8 +10956,8 @@ func (s *Event) SetSourceType(v string) *Event { type IncreaseReplicaCountInput struct { _ struct{} `type:"structure"` - // If True, the number of replica nodes is increased immediately. If False, - // the number of replica nodes is increased during the next maintenance window. + // If True, the number of replica nodes is increased immediately. ApplyImmediately=False + // is not currently supported. // // ApplyImmediately is a required field ApplyImmediately *bool `type:"boolean" required:"true"` @@ -9986,6 +11110,14 @@ func (s *ListAllowedNodeTypeModificationsInput) SetReplicationGroupId(v string) type ListAllowedNodeTypeModificationsOutput struct { _ struct{} `type:"structure"` + // A string list, each element of which specifies a cache node type which you + // can use to scale your cluster or replication group. + // + // When scaling down on a Redis cluster or replication group using ModifyCacheCluster + // or ModifyReplicationGroup, use a value from this list for the CacheNodeType + // parameter. + ScaleDownModifications []*string `type:"list"` + // A string list, each element of which specifies a cache node type which you // can use to scale your cluster or replication group. // @@ -10005,6 +11137,12 @@ func (s ListAllowedNodeTypeModificationsOutput) GoString() string { return s.String() } +// SetScaleDownModifications sets the ScaleDownModifications field's value. +func (s *ListAllowedNodeTypeModificationsOutput) SetScaleDownModifications(v []*string) *ListAllowedNodeTypeModificationsOutput { + s.ScaleDownModifications = v + return s +} + // SetScaleUpModifications sets the ScaleUpModifications field's value. func (s *ListAllowedNodeTypeModificationsOutput) SetScaleUpModifications(v []*string) *ListAllowedNodeTypeModificationsOutput { s.ScaleUpModifications = v @@ -10020,7 +11158,7 @@ type ListTagsForResourceInput struct { // or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. // // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // // ResourceName is a required field ResourceName *string `type:"string" required:"true"` @@ -10070,10 +11208,7 @@ type ModifyCacheClusterInput struct { // in different Availability Zones. If cross-az is specified, existing Memcached // nodes remain in their current Availability Zone. // - // Only newly created nodes are located in different Availability Zones. For - // instructions on how to move existing Memcached nodes to different Availability - // Zones, see the Availability Zone Considerations section of Cache Node Considerations - // for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheNode.Memcached.html). + // Only newly created nodes are located in different Availability Zones. AZMode *string `type:"string" enum:"AZMode"` // If true, this parameter causes the modifications in this request and any @@ -10091,6 +11226,29 @@ type ModifyCacheClusterInput struct { // Default: false ApplyImmediately *bool `type:"boolean"` + // Reserved parameter. The password used to access a password protected server. + // This parameter must be specified with the auth-token-update parameter. Password + // constraints: + // + // * Must be only printable ASCII characters + // + // * Must be at least 16 characters and no more than 128 characters in length + // + // * Cannot contain any of the following characters: '/', '"', or '@', '%' + // + // For more information, see AUTH password at AUTH (http://redis.io/commands/AUTH). + AuthToken *string `type:"string"` + + // Specifies the strategy to use to update the AUTH token. This parameter must + // be specified with the auth-token parameter. Possible values: + // + // * Rotate + // + // * Set + // + // For more information, see Authenticating Users with Redis AUTH (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) + AuthTokenUpdateStrategy *string `type:"string" enum:"AuthTokenUpdateStrategyType"` + // This parameter is currently disabled. AutoMinorVersionUpgrade *bool `type:"boolean"` @@ -10132,7 +11290,7 @@ type ModifyCacheClusterInput struct { // The upgraded version of the cache engine to be run on the cache nodes. // // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), // but you cannot downgrade to an earlier engine version. If you want to use // an earlier engine version, you must delete the existing cluster and create // it anew with the earlier engine version. @@ -10169,45 +11327,23 @@ type ModifyCacheClusterInput struct { // Availability Zone. Only newly created nodes can be located in different Availability // Zones. For guidance on how to move existing Memcached nodes to different // Availability Zones, see the Availability Zone Considerations section of Cache - // Node Considerations for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheNode.Memcached.html). + // Node Considerations for Memcached (https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheNodes.SupportedTypes.html). // // Impact of new add/remove requests upon pending requests // - // * Scenario-1 - // - // Pending Action: Delete - // - // New Request: Delete - // - // Result: The new delete, pending or immediate, replaces the pending delete. - // - // * Scenario-2 - // - // Pending Action: Delete - // - // New Request: Create - // - // Result: The new create, pending or immediate, replaces the pending delete. + // * Scenario-1 Pending Action: Delete New Request: Delete Result: The new + // delete, pending or immediate, replaces the pending delete. // - // * Scenario-3 + // * Scenario-2 Pending Action: Delete New Request: Create Result: The new + // create, pending or immediate, replaces the pending delete. // - // Pending Action: Create + // * Scenario-3 Pending Action: Create New Request: Delete Result: The new + // delete, pending or immediate, replaces the pending create. // - // New Request: Delete - // - // Result: The new delete, pending or immediate, replaces the pending create. - // - // * Scenario-4 - // - // Pending Action: Create - // - // New Request: Create - // - // Result: The new create is added to the pending create. - // - // Important: If the new create request is Apply Immediately - Yes, all creates - // are performed immediately. If the new create request is Apply Immediately - // - No, all creates are pending. + // * Scenario-4 Pending Action: Create New Request: Create Result: The new + // create is added to the pending create. Important: If the new create request + // is Apply Immediately - Yes, all creates are performed immediately. If + // the new create request is Apply Immediately - No, all creates are pending. NewAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications @@ -10332,6 +11468,18 @@ func (s *ModifyCacheClusterInput) SetApplyImmediately(v bool) *ModifyCacheCluste return s } +// SetAuthToken sets the AuthToken field's value. +func (s *ModifyCacheClusterInput) SetAuthToken(v string) *ModifyCacheClusterInput { + s.AuthToken = &v + return s +} + +// SetAuthTokenUpdateStrategy sets the AuthTokenUpdateStrategy field's value. +func (s *ModifyCacheClusterInput) SetAuthTokenUpdateStrategy(v string) *ModifyCacheClusterInput { + s.AuthTokenUpdateStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *ModifyCacheClusterInput) SetAutoMinorVersionUpgrade(v bool) *ModifyCacheClusterInput { s.AutoMinorVersionUpgrade = &v @@ -10606,6 +11754,29 @@ type ModifyReplicationGroupInput struct { // Default: false ApplyImmediately *bool `type:"boolean"` + // Reserved parameter. The password used to access a password protected server. + // This parameter must be specified with the auth-token-update-strategy parameter. + // Password constraints: + // + // * Must be only printable ASCII characters + // + // * Must be at least 16 characters and no more than 128 characters in length + // + // * Cannot contain any of the following characters: '/', '"', or '@', '%' + // + // For more information, see AUTH password at AUTH (http://redis.io/commands/AUTH). + AuthToken *string `type:"string"` + + // Specifies the strategy to use to update the AUTH token. This parameter must + // be specified with the auth-token parameter. Possible values: + // + // * Rotate + // + // * Set + // + // For more information, see Authenticating Users with Redis AUTH (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) + AuthTokenUpdateStrategy *string `type:"string" enum:"AuthTokenUpdateStrategyType"` + // This parameter is currently disabled. AutoMinorVersionUpgrade *bool `type:"boolean"` @@ -10619,7 +11790,7 @@ type ModifyReplicationGroupInput struct { // // * Redis versions earlier than 2.8.6. // - // * Redis (cluster mode disabled): T1 and T2 cache node types. + // * Redis (cluster mode disabled): T1 node types. // // * Redis (cluster mode enabled): T1 node types. AutomaticFailoverEnabled *bool `type:"boolean"` @@ -10647,7 +11818,7 @@ type ModifyReplicationGroupInput struct { // replication group. // // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), // but you cannot downgrade to an earlier engine version. If you want to use // an earlier engine version, you must delete the existing replication group // and create it anew with the earlier engine version. @@ -10767,6 +11938,18 @@ func (s *ModifyReplicationGroupInput) SetApplyImmediately(v bool) *ModifyReplica return s } +// SetAuthToken sets the AuthToken field's value. +func (s *ModifyReplicationGroupInput) SetAuthToken(v string) *ModifyReplicationGroupInput { + s.AuthToken = &v + return s +} + +// SetAuthTokenUpdateStrategy sets the AuthTokenUpdateStrategy field's value. +func (s *ModifyReplicationGroupInput) SetAuthTokenUpdateStrategy(v string) *ModifyReplicationGroupInput { + s.AuthTokenUpdateStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *ModifyReplicationGroupInput) SetAutoMinorVersionUpgrade(v bool) *ModifyReplicationGroupInput { s.AutoMinorVersionUpgrade = &v @@ -10911,16 +12094,16 @@ type ModifyReplicationGroupShardConfigurationInput struct { NodeGroupCount *int64 `type:"integer" required:"true"` // If the value of NodeGroupCount is less than the current number of node groups - // (shards), the NodeGroupsToRemove or NodeGroupsToRetain is a required list - // of node group ids to remove from or retain in the cluster. + // (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. + // NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. // // ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove // from the cluster. NodeGroupsToRemove []*string `locationNameList:"NodeGroupToRemove" type:"list"` // If the value of NodeGroupCount is less than the current number of node groups - // (shards), the NodeGroupsToRemove or NodeGroupsToRetain is a required list - // of node group ids to remove from or retain in the cluster. + // (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. + // NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. // // ElastiCache for Redis will attempt to remove all node groups except those // listed by NodeGroupsToRetain from the cluster. @@ -11049,8 +12232,9 @@ type NodeGroup struct { // The identifier for the node group (shard). A Redis (cluster mode disabled) // replication group contains only 1 node group; therefore, the node group ID - // is 0001. A Redis (cluster mode enabled) replication group contains 1 to 15 - // node groups numbered 0001 to 0015. + // is 0001. A Redis (cluster mode enabled) replication group contains 1 to 90 + // node groups numbered 0001 to 0090. Optionally, the user can provide the id + // for a node group. NodeGroupId *string `type:"string"` // A list containing information about individual nodes within the node group @@ -11060,6 +12244,9 @@ type NodeGroup struct { // The endpoint of the primary node in this node group (shard). PrimaryEndpoint *Endpoint `type:"structure"` + // The endpoint of the replica nodes in this node group (shard). + ReaderEndpoint *Endpoint `type:"structure"` + // The keyspace for this node group (shard). Slots *string `type:"string"` @@ -11095,6 +12282,12 @@ func (s *NodeGroup) SetPrimaryEndpoint(v *Endpoint) *NodeGroup { return s } +// SetReaderEndpoint sets the ReaderEndpoint field's value. +func (s *NodeGroup) SetReaderEndpoint(v *Endpoint) *NodeGroup { + s.ReaderEndpoint = v + return s +} + // SetSlots sets the Slots field's value. func (s *NodeGroup) SetSlots(v string) *NodeGroup { s.Slots = &v @@ -11113,7 +12306,8 @@ func (s *NodeGroup) SetStatus(v string) *NodeGroup { type NodeGroupConfiguration struct { _ struct{} `type:"structure"` - // The 4-digit id for the node group these configuration values apply to. + // Either the ElastiCache for Redis supplied 4-digit id or a user supplied id + // for the node group these configuration values apply to. NodeGroupId *string `min:"1" type:"string"` // The Availability Zone where the primary node of this node group (shard) is @@ -11252,53 +12446,183 @@ func (s *NodeGroupMember) SetReadEndpoint(v *Endpoint) *NodeGroupMember { return s } -// Represents an individual cache node in a snapshot of a cluster. -type NodeSnapshot struct { +// The status of the service update on the node group member +type NodeGroupMemberUpdateStatus struct { _ struct{} `type:"structure"` - // A unique identifier for the source cluster. + // The cache cluster ID CacheClusterId *string `type:"string"` - // The date and time when the cache node was created in the source cluster. - CacheNodeCreateTime *time.Time `type:"timestamp"` - - // The cache node identifier for the node in the source cluster. + // The node ID of the cache cluster CacheNodeId *string `type:"string"` - // The size of the cache on the source cache node. - CacheSize *string `type:"string"` + // The deletion date of the node + NodeDeletionDate *time.Time `type:"timestamp"` - // The configuration for the source node group (shard). - NodeGroupConfiguration *NodeGroupConfiguration `type:"structure"` + // The end date of the update for a node + NodeUpdateEndDate *time.Time `type:"timestamp"` - // A unique identifier for the source node group (shard). - NodeGroupId *string `type:"string"` + // Reflects whether the update was initiated by the customer or automatically + // applied + NodeUpdateInitiatedBy *string `type:"string" enum:"NodeUpdateInitiatedBy"` - // The date and time when the source node's metadata and cache data set was - // obtained for the snapshot. - SnapshotCreateTime *time.Time `type:"timestamp"` + // The date when the update is triggered + NodeUpdateInitiatedDate *time.Time `type:"timestamp"` + + // The start date of the update for a node + NodeUpdateStartDate *time.Time `type:"timestamp"` + + // The update status of the node + NodeUpdateStatus *string `type:"string" enum:"NodeUpdateStatus"` + + // The date when the NodeUpdateStatus was last modified + NodeUpdateStatusModifiedDate *time.Time `type:"timestamp"` } // String returns the string representation -func (s NodeSnapshot) String() string { +func (s NodeGroupMemberUpdateStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NodeSnapshot) GoString() string { +func (s NodeGroupMemberUpdateStatus) GoString() string { return s.String() } // SetCacheClusterId sets the CacheClusterId field's value. -func (s *NodeSnapshot) SetCacheClusterId(v string) *NodeSnapshot { +func (s *NodeGroupMemberUpdateStatus) SetCacheClusterId(v string) *NodeGroupMemberUpdateStatus { s.CacheClusterId = &v return s } -// SetCacheNodeCreateTime sets the CacheNodeCreateTime field's value. -func (s *NodeSnapshot) SetCacheNodeCreateTime(v time.Time) *NodeSnapshot { - s.CacheNodeCreateTime = &v - return s +// SetCacheNodeId sets the CacheNodeId field's value. +func (s *NodeGroupMemberUpdateStatus) SetCacheNodeId(v string) *NodeGroupMemberUpdateStatus { + s.CacheNodeId = &v + return s +} + +// SetNodeDeletionDate sets the NodeDeletionDate field's value. +func (s *NodeGroupMemberUpdateStatus) SetNodeDeletionDate(v time.Time) *NodeGroupMemberUpdateStatus { + s.NodeDeletionDate = &v + return s +} + +// SetNodeUpdateEndDate sets the NodeUpdateEndDate field's value. +func (s *NodeGroupMemberUpdateStatus) SetNodeUpdateEndDate(v time.Time) *NodeGroupMemberUpdateStatus { + s.NodeUpdateEndDate = &v + return s +} + +// SetNodeUpdateInitiatedBy sets the NodeUpdateInitiatedBy field's value. +func (s *NodeGroupMemberUpdateStatus) SetNodeUpdateInitiatedBy(v string) *NodeGroupMemberUpdateStatus { + s.NodeUpdateInitiatedBy = &v + return s +} + +// SetNodeUpdateInitiatedDate sets the NodeUpdateInitiatedDate field's value. +func (s *NodeGroupMemberUpdateStatus) SetNodeUpdateInitiatedDate(v time.Time) *NodeGroupMemberUpdateStatus { + s.NodeUpdateInitiatedDate = &v + return s +} + +// SetNodeUpdateStartDate sets the NodeUpdateStartDate field's value. +func (s *NodeGroupMemberUpdateStatus) SetNodeUpdateStartDate(v time.Time) *NodeGroupMemberUpdateStatus { + s.NodeUpdateStartDate = &v + return s +} + +// SetNodeUpdateStatus sets the NodeUpdateStatus field's value. +func (s *NodeGroupMemberUpdateStatus) SetNodeUpdateStatus(v string) *NodeGroupMemberUpdateStatus { + s.NodeUpdateStatus = &v + return s +} + +// SetNodeUpdateStatusModifiedDate sets the NodeUpdateStatusModifiedDate field's value. +func (s *NodeGroupMemberUpdateStatus) SetNodeUpdateStatusModifiedDate(v time.Time) *NodeGroupMemberUpdateStatus { + s.NodeUpdateStatusModifiedDate = &v + return s +} + +// The status of the service update on the node group +type NodeGroupUpdateStatus struct { + _ struct{} `type:"structure"` + + // The ID of the node group + NodeGroupId *string `type:"string"` + + // The status of the service update on the node group member + NodeGroupMemberUpdateStatus []*NodeGroupMemberUpdateStatus `locationNameList:"NodeGroupMemberUpdateStatus" type:"list"` +} + +// String returns the string representation +func (s NodeGroupUpdateStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeGroupUpdateStatus) GoString() string { + return s.String() +} + +// SetNodeGroupId sets the NodeGroupId field's value. +func (s *NodeGroupUpdateStatus) SetNodeGroupId(v string) *NodeGroupUpdateStatus { + s.NodeGroupId = &v + return s +} + +// SetNodeGroupMemberUpdateStatus sets the NodeGroupMemberUpdateStatus field's value. +func (s *NodeGroupUpdateStatus) SetNodeGroupMemberUpdateStatus(v []*NodeGroupMemberUpdateStatus) *NodeGroupUpdateStatus { + s.NodeGroupMemberUpdateStatus = v + return s +} + +// Represents an individual cache node in a snapshot of a cluster. +type NodeSnapshot struct { + _ struct{} `type:"structure"` + + // A unique identifier for the source cluster. + CacheClusterId *string `type:"string"` + + // The date and time when the cache node was created in the source cluster. + CacheNodeCreateTime *time.Time `type:"timestamp"` + + // The cache node identifier for the node in the source cluster. + CacheNodeId *string `type:"string"` + + // The size of the cache on the source cache node. + CacheSize *string `type:"string"` + + // The configuration for the source node group (shard). + NodeGroupConfiguration *NodeGroupConfiguration `type:"structure"` + + // A unique identifier for the source node group (shard). + NodeGroupId *string `type:"string"` + + // The date and time when the source node's metadata and cache data set was + // obtained for the snapshot. + SnapshotCreateTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s NodeSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeSnapshot) GoString() string { + return s.String() +} + +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *NodeSnapshot) SetCacheClusterId(v string) *NodeSnapshot { + s.CacheClusterId = &v + return s +} + +// SetCacheNodeCreateTime sets the CacheNodeCreateTime field's value. +func (s *NodeSnapshot) SetCacheNodeCreateTime(v time.Time) *NodeSnapshot { + s.CacheNodeCreateTime = &v + return s } // SetCacheNodeId sets the CacheNodeId field's value. @@ -11377,7 +12701,7 @@ type Parameter struct { // Indicates whether a change to the parameter is applied immediately or requires // a reboot for the change to be applied. You can force a reboot or wait until // the next maintenance window's reboot. For more information, see Rebooting - // a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Rebooting.html). + // a Cluster (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Rebooting.html). ChangeType *string `type:"string" enum:"ChangeType"` // The valid data type for the parameter. @@ -11506,6 +12830,9 @@ func (s *ParameterNameValue) SetParameterValue(v string) *ParameterNameValue { type PendingModifiedValues struct { _ struct{} `type:"structure"` + // The auth token status + AuthTokenStatus *string `type:"string" enum:"AuthTokenUpdateStatus"` + // A list of cache node IDs that are being removed (or will be removed) from // the cluster. A node ID is a 4-digit numeric identifier (0001, 0002, etc.). CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"` @@ -11533,6 +12860,12 @@ func (s PendingModifiedValues) GoString() string { return s.String() } +// SetAuthTokenStatus sets the AuthTokenStatus field's value. +func (s *PendingModifiedValues) SetAuthTokenStatus(v string) *PendingModifiedValues { + s.AuthTokenStatus = &v + return s +} + // SetCacheNodeIdsToRemove sets the CacheNodeIdsToRemove field's value. func (s *PendingModifiedValues) SetCacheNodeIdsToRemove(v []*string) *PendingModifiedValues { s.CacheNodeIdsToRemove = v @@ -11557,6 +12890,57 @@ func (s *PendingModifiedValues) SetNumCacheNodes(v int64) *PendingModifiedValues return s } +// Update action that has been processed for the corresponding apply/stop request +type ProcessedUpdateAction struct { + _ struct{} `type:"structure"` + + // The ID of the cache cluster + CacheClusterId *string `type:"string"` + + // The ID of the replication group + ReplicationGroupId *string `type:"string"` + + // The unique ID of the service update + ServiceUpdateName *string `type:"string"` + + // The status of the update action on the Redis cluster + UpdateActionStatus *string `type:"string" enum:"UpdateActionStatus"` +} + +// String returns the string representation +func (s ProcessedUpdateAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProcessedUpdateAction) GoString() string { + return s.String() +} + +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *ProcessedUpdateAction) SetCacheClusterId(v string) *ProcessedUpdateAction { + s.CacheClusterId = &v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *ProcessedUpdateAction) SetReplicationGroupId(v string) *ProcessedUpdateAction { + s.ReplicationGroupId = &v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *ProcessedUpdateAction) SetServiceUpdateName(v string) *ProcessedUpdateAction { + s.ServiceUpdateName = &v + return s +} + +// SetUpdateActionStatus sets the UpdateActionStatus field's value. +func (s *ProcessedUpdateAction) SetUpdateActionStatus(v string) *ProcessedUpdateAction { + s.UpdateActionStatus = &v + return s +} + // Represents the input of a PurchaseReservedCacheNodesOffering operation. type PurchaseReservedCacheNodesOfferingInput struct { _ struct{} `type:"structure"` @@ -11767,7 +13151,7 @@ type RemoveTagsFromResourceInput struct { // or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. // // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // // ResourceName is a required field ResourceName *string `type:"string" required:"true"` @@ -11827,7 +13211,7 @@ type ReplicationGroup struct { // to true when you create a cluster. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6 or 4.x. + // using redis version 3.2.6, 4.x or later. // // Default: false AtRestEncryptionEnabled *bool `type:"boolean"` @@ -11837,6 +13221,9 @@ type ReplicationGroup struct { // Default: false AuthTokenEnabled *bool `type:"boolean"` + // The date the auth token was last modified + AuthTokenLastModifiedDate *time.Time `type:"timestamp"` + // Indicates the status of Multi-AZ with automatic failover for this Redis replication // group. // @@ -11845,7 +13232,7 @@ type ReplicationGroup struct { // // * Redis versions earlier than 2.8.6. // - // * Redis (cluster mode disabled): T1 and T2 cache node types. + // * Redis (cluster mode disabled): T1 node types. // // * Redis (cluster mode enabled): T1 node types. AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` @@ -11868,6 +13255,9 @@ type ReplicationGroup struct { // The user supplied description of the replication group. Description *string `type:"string"` + // The ID of the KMS key used to encrypt the disk in the cluster. + KmsKeyId *string `type:"string"` + // The names of all the cache clusters that are part of this replication group. MemberClusters []*string `locationNameList:"ClusterId" type:"list"` @@ -11918,7 +13308,7 @@ type ReplicationGroup struct { // to true when you create a cluster. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6 or 4.x. + // using redis version 3.2.6, 4.x or later. // // Default: false TransitEncryptionEnabled *bool `type:"boolean"` @@ -11946,6 +13336,12 @@ func (s *ReplicationGroup) SetAuthTokenEnabled(v bool) *ReplicationGroup { return s } +// SetAuthTokenLastModifiedDate sets the AuthTokenLastModifiedDate field's value. +func (s *ReplicationGroup) SetAuthTokenLastModifiedDate(v time.Time) *ReplicationGroup { + s.AuthTokenLastModifiedDate = &v + return s +} + // SetAutomaticFailover sets the AutomaticFailover field's value. func (s *ReplicationGroup) SetAutomaticFailover(v string) *ReplicationGroup { s.AutomaticFailover = &v @@ -11976,6 +13372,12 @@ func (s *ReplicationGroup) SetDescription(v string) *ReplicationGroup { return s } +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ReplicationGroup) SetKmsKeyId(v string) *ReplicationGroup { + s.KmsKeyId = &v + return s +} + // SetMemberClusters sets the MemberClusters field's value. func (s *ReplicationGroup) SetMemberClusters(v []*string) *ReplicationGroup { s.MemberClusters = v @@ -12035,6 +13437,9 @@ func (s *ReplicationGroup) SetTransitEncryptionEnabled(v bool) *ReplicationGroup type ReplicationGroupPendingModifiedValues struct { _ struct{} `type:"structure"` + // The auth token status + AuthTokenStatus *string `type:"string" enum:"AuthTokenUpdateStatus"` + // Indicates the status of Multi-AZ with automatic failover for this Redis replication // group. // @@ -12043,7 +13448,7 @@ type ReplicationGroupPendingModifiedValues struct { // // * Redis versions earlier than 2.8.6. // - // * Redis (cluster mode disabled): T1 and T2 cache node types. + // * Redis (cluster mode disabled): T1 node types. // // * Redis (cluster mode enabled): T1 node types. AutomaticFailoverStatus *string `type:"string" enum:"PendingAutomaticFailoverStatus"` @@ -12066,6 +13471,12 @@ func (s ReplicationGroupPendingModifiedValues) GoString() string { return s.String() } +// SetAuthTokenStatus sets the AuthTokenStatus field's value. +func (s *ReplicationGroupPendingModifiedValues) SetAuthTokenStatus(v string) *ReplicationGroupPendingModifiedValues { + s.AuthTokenStatus = &v + return s +} + // SetAutomaticFailoverStatus sets the AutomaticFailoverStatus field's value. func (s *ReplicationGroupPendingModifiedValues) SetAutomaticFailoverStatus(v string) *ReplicationGroupPendingModifiedValues { s.AutomaticFailoverStatus = &v @@ -12097,64 +13508,36 @@ type ReservedCacheNode struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: - // - // Current generation: - // - // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium - // - // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - // cache.m4.10xlarge - // - // Previous generation: (not recommended) - // - // T1 node types:cache.t1.micro - // - // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // - // * Compute optimized: - // - // Previous generation: (not recommended) - // - // C1 node types:cache.c1.xlarge - // - // * Memory optimized: - // - // Current generation: - // - // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // R4 node types;cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge - // - // Previous generation: (not recommended) - // - // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // - // Notes: + // Additional node type info // - // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon - // VPC). + // * All current generation instance types are created in Amazon VPC by default. // - // * Redis (cluster mode disabled): Redis backup/restore is not supported - // on T1 and T2 instances. + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. // - // * Redis (cluster mode enabled): Backup/restore is not supported on T1 - // instances. + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. // - // * Redis Append-only files (AOF) functionality is not supported for T1 - // or T2 instances. - // - // For a complete listing of node types and specifications, see: - // - // * Amazon ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) - // - // * Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) - // - // * Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific) + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. CacheNodeType *string `type:"string"` // The duration of the reservation in seconds. @@ -12291,64 +13674,36 @@ type ReservedCacheNodesOffering struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: - // - // Current generation: - // - // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium - // - // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - // cache.m4.10xlarge - // - // Previous generation: (not recommended) - // - // T1 node types:cache.t1.micro - // - // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // - // * Compute optimized: - // - // Previous generation: (not recommended) - // - // C1 node types:cache.c1.xlarge - // - // * Memory optimized: - // - // Current generation: - // - // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // R4 node types;cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge - // - // Previous generation: (not recommended) - // - // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // - // Notes: - // - // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon - // VPC). - // - // * Redis (cluster mode disabled): Redis backup/restore is not supported - // on T1 and T2 instances. - // - // * Redis (cluster mode enabled): Backup/restore is not supported on T1 - // instances. - // - // * Redis Append-only files (AOF) functionality is not supported for T1 - // or T2 instances. + // Additional node type info // - // For a complete listing of node types and specifications, see: + // * All current generation instance types are created in Amazon VPC by default. // - // * Amazon ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. // - // * Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. // - // * Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific) + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. CacheNodeType *string `type:"string"` // The duration of the offering. in seconds. @@ -12499,7 +13854,8 @@ func (s *ResetCacheParameterGroupInput) SetResetAllParameters(v bool) *ResetCach type ReshardingConfiguration struct { _ struct{} `type:"structure"` - // The 4-digit id for the node group these configuration values apply to. + // Either the ElastiCache for Redis supplied 4-digit id or a user supplied id + // for the node group these configuration values apply to. NodeGroupId *string `min:"1" type:"string"` // A list of preferred availability zones for the nodes in this cluster. @@ -12698,6 +14054,133 @@ func (s *SecurityGroupMembership) SetStatus(v string) *SecurityGroupMembership { return s } +// An update that you can apply to your Redis clusters. +type ServiceUpdate struct { + _ struct{} `type:"structure"` + + // Indicates whether the service update will be automatically applied once the + // recommended apply-by date has expired. + AutoUpdateAfterRecommendedApplyByDate *bool `type:"boolean"` + + // The Elasticache engine to which the update applies. Either Redis or Memcached + Engine *string `type:"string"` + + // The Elasticache engine version to which the update applies. Either Redis + // or Memcached engine version + EngineVersion *string `type:"string"` + + // The estimated length of time the service update will take + EstimatedUpdateTime *string `type:"string"` + + // Provides details of the service update + ServiceUpdateDescription *string `type:"string"` + + // The date after which the service update is no longer available + ServiceUpdateEndDate *time.Time `type:"timestamp"` + + // The unique ID of the service update + ServiceUpdateName *string `type:"string"` + + // The recommendend date to apply the service update in order to ensure compliance. + // For information on compliance, see Self-Service Security Updates for Compliance + // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/elasticache-compliance.html#elasticache-compliance-self-service). + ServiceUpdateRecommendedApplyByDate *time.Time `type:"timestamp"` + + // The date when the service update is initially available + ServiceUpdateReleaseDate *time.Time `type:"timestamp"` + + // The severity of the service update + ServiceUpdateSeverity *string `type:"string" enum:"ServiceUpdateSeverity"` + + // The status of the service update + ServiceUpdateStatus *string `type:"string" enum:"ServiceUpdateStatus"` + + // Reflects the nature of the service update + ServiceUpdateType *string `type:"string" enum:"ServiceUpdateType"` +} + +// String returns the string representation +func (s ServiceUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceUpdate) GoString() string { + return s.String() +} + +// SetAutoUpdateAfterRecommendedApplyByDate sets the AutoUpdateAfterRecommendedApplyByDate field's value. +func (s *ServiceUpdate) SetAutoUpdateAfterRecommendedApplyByDate(v bool) *ServiceUpdate { + s.AutoUpdateAfterRecommendedApplyByDate = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *ServiceUpdate) SetEngine(v string) *ServiceUpdate { + s.Engine = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *ServiceUpdate) SetEngineVersion(v string) *ServiceUpdate { + s.EngineVersion = &v + return s +} + +// SetEstimatedUpdateTime sets the EstimatedUpdateTime field's value. +func (s *ServiceUpdate) SetEstimatedUpdateTime(v string) *ServiceUpdate { + s.EstimatedUpdateTime = &v + return s +} + +// SetServiceUpdateDescription sets the ServiceUpdateDescription field's value. +func (s *ServiceUpdate) SetServiceUpdateDescription(v string) *ServiceUpdate { + s.ServiceUpdateDescription = &v + return s +} + +// SetServiceUpdateEndDate sets the ServiceUpdateEndDate field's value. +func (s *ServiceUpdate) SetServiceUpdateEndDate(v time.Time) *ServiceUpdate { + s.ServiceUpdateEndDate = &v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *ServiceUpdate) SetServiceUpdateName(v string) *ServiceUpdate { + s.ServiceUpdateName = &v + return s +} + +// SetServiceUpdateRecommendedApplyByDate sets the ServiceUpdateRecommendedApplyByDate field's value. +func (s *ServiceUpdate) SetServiceUpdateRecommendedApplyByDate(v time.Time) *ServiceUpdate { + s.ServiceUpdateRecommendedApplyByDate = &v + return s +} + +// SetServiceUpdateReleaseDate sets the ServiceUpdateReleaseDate field's value. +func (s *ServiceUpdate) SetServiceUpdateReleaseDate(v time.Time) *ServiceUpdate { + s.ServiceUpdateReleaseDate = &v + return s +} + +// SetServiceUpdateSeverity sets the ServiceUpdateSeverity field's value. +func (s *ServiceUpdate) SetServiceUpdateSeverity(v string) *ServiceUpdate { + s.ServiceUpdateSeverity = &v + return s +} + +// SetServiceUpdateStatus sets the ServiceUpdateStatus field's value. +func (s *ServiceUpdate) SetServiceUpdateStatus(v string) *ServiceUpdate { + s.ServiceUpdateStatus = &v + return s +} + +// SetServiceUpdateType sets the ServiceUpdateType field's value. +func (s *ServiceUpdate) SetServiceUpdateType(v string) *ServiceUpdate { + s.ServiceUpdateType = &v + return s +} + // Represents the progress of an online resharding operation. type SlotMigration struct { _ struct{} `type:"structure"` @@ -12738,7 +14221,7 @@ type Snapshot struct { // // * Redis versions earlier than 2.8.6. // - // * Redis (cluster mode disabled): T1 and T2 cache node types. + // * Redis (cluster mode disabled): T1 node types. // // * Redis (cluster mode enabled): T1 node types. AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` @@ -12755,64 +14238,36 @@ type Snapshot struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: - // - // Current generation: - // - // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium - // - // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - // cache.m4.10xlarge - // - // Previous generation: (not recommended) - // - // T1 node types:cache.t1.micro + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, + // cache.t2.medium Previous generation: (not recommended) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge // - // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // Additional node type info // - // * Compute optimized: + // * All current generation instance types are created in Amazon VPC by default. // - // Previous generation: (not recommended) + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. // - // C1 node types:cache.c1.xlarge + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. // - // * Memory optimized: - // - // Current generation: - // - // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge - // - // R4 node types;cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge - // - // Previous generation: (not recommended) - // - // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // - // Notes: - // - // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon - // VPC). - // - // * Redis (cluster mode disabled): Redis backup/restore is not supported - // on T1 and T2 instances. - // - // * Redis (cluster mode enabled): Backup/restore is not supported on T1 - // instances. - // - // * Redis Append-only files (AOF) functionality is not supported for T1 - // or T2 instances. - // - // For a complete listing of node types and specifications, see: - // - // * Amazon ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) - // - // * Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) - // - // * Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific) + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. CacheNodeType *string `type:"string"` // The cache parameter group that is associated with the source cluster. @@ -12827,6 +14282,9 @@ type Snapshot struct { // The version of the cache engine version that is used by the source cluster. EngineVersion *string `type:"string"` + // The ID of the KMS key used to encrypt the snapshot. + KmsKeyId *string `type:"string"` + // A list of the cache nodes in the source cluster. NodeSnapshots []*NodeSnapshot `locationNameList:"NodeSnapshot" type:"list"` @@ -12977,6 +14435,12 @@ func (s *Snapshot) SetEngineVersion(v string) *Snapshot { return s } +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *Snapshot) SetKmsKeyId(v string) *Snapshot { + s.KmsKeyId = &v + return s +} + // SetNodeSnapshots sets the NodeSnapshots field's value. func (s *Snapshot) SetNodeSnapshots(v []*NodeSnapshot) *Snapshot { s.NodeSnapshots = v @@ -13067,6 +14531,82 @@ func (s *Snapshot) SetVpcId(v string) *Snapshot { return s } +type StartMigrationInput struct { + _ struct{} `type:"structure"` + + // List of endpoints from which data should be migrated. For Redis (cluster + // mode disabled), list should have only one element. + // + // CustomerNodeEndpointList is a required field + CustomerNodeEndpointList []*CustomerNodeEndpoint `type:"list" required:"true"` + + // The ID of the replication group to which data should be migrated. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartMigrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMigrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartMigrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMigrationInput"} + if s.CustomerNodeEndpointList == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerNodeEndpointList")) + } + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomerNodeEndpointList sets the CustomerNodeEndpointList field's value. +func (s *StartMigrationInput) SetCustomerNodeEndpointList(v []*CustomerNodeEndpoint) *StartMigrationInput { + s.CustomerNodeEndpointList = v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *StartMigrationInput) SetReplicationGroupId(v string) *StartMigrationInput { + s.ReplicationGroupId = &v + return s +} + +type StartMigrationOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s StartMigrationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMigrationOutput) GoString() string { + return s.String() +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *StartMigrationOutput) SetReplicationGroup(v *ReplicationGroup) *StartMigrationOutput { + s.ReplicationGroup = v + return s +} + // Represents the subnet associated with a cluster. This parameter refers to // subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with // ElastiCache. @@ -13243,6 +14783,274 @@ func (s *TestFailoverOutput) SetReplicationGroup(v *ReplicationGroup) *TestFailo return s } +// Filters update actions from the service updates that are in available status +// during the time range. +type TimeRangeFilter struct { + _ struct{} `type:"structure"` + + // The end time of the time range filter + EndTime *time.Time `type:"timestamp"` + + // The start time of the time range filter + StartTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s TimeRangeFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeRangeFilter) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *TimeRangeFilter) SetEndTime(v time.Time) *TimeRangeFilter { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *TimeRangeFilter) SetStartTime(v time.Time) *TimeRangeFilter { + s.StartTime = &v + return s +} + +// Update action that has failed to be processed for the corresponding apply/stop +// request +type UnprocessedUpdateAction struct { + _ struct{} `type:"structure"` + + // The ID of the cache cluster + CacheClusterId *string `type:"string"` + + // The error message that describes the reason the request was not processed + ErrorMessage *string `type:"string"` + + // The error type for requests that are not processed + ErrorType *string `type:"string"` + + // The replication group ID + ReplicationGroupId *string `type:"string"` + + // The unique ID of the service update + ServiceUpdateName *string `type:"string"` +} + +// String returns the string representation +func (s UnprocessedUpdateAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnprocessedUpdateAction) GoString() string { + return s.String() +} + +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *UnprocessedUpdateAction) SetCacheClusterId(v string) *UnprocessedUpdateAction { + s.CacheClusterId = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *UnprocessedUpdateAction) SetErrorMessage(v string) *UnprocessedUpdateAction { + s.ErrorMessage = &v + return s +} + +// SetErrorType sets the ErrorType field's value. +func (s *UnprocessedUpdateAction) SetErrorType(v string) *UnprocessedUpdateAction { + s.ErrorType = &v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *UnprocessedUpdateAction) SetReplicationGroupId(v string) *UnprocessedUpdateAction { + s.ReplicationGroupId = &v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *UnprocessedUpdateAction) SetServiceUpdateName(v string) *UnprocessedUpdateAction { + s.ServiceUpdateName = &v + return s +} + +// The status of the service update for a specific replication group +type UpdateAction struct { + _ struct{} `type:"structure"` + + // The ID of the cache cluster + CacheClusterId *string `type:"string"` + + // The status of the service update on the cache node + CacheNodeUpdateStatus []*CacheNodeUpdateStatus `locationNameList:"CacheNodeUpdateStatus" type:"list"` + + // The Elasticache engine to which the update applies. Either Redis or Memcached + Engine *string `type:"string"` + + // The estimated length of time for the update to complete + EstimatedUpdateTime *string `type:"string"` + + // The status of the service update on the node group + NodeGroupUpdateStatus []*NodeGroupUpdateStatus `locationNameList:"NodeGroupUpdateStatus" type:"list"` + + // The progress of the service update on the replication group + NodesUpdated *string `type:"string"` + + // The ID of the replication group + ReplicationGroupId *string `type:"string"` + + // The unique ID of the service update + ServiceUpdateName *string `type:"string"` + + // The recommended date to apply the service update to ensure compliance. For + // information on compliance, see Self-Service Security Updates for Compliance + // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/elasticache-compliance.html#elasticache-compliance-self-service). + ServiceUpdateRecommendedApplyByDate *time.Time `type:"timestamp"` + + // The date the update is first available + ServiceUpdateReleaseDate *time.Time `type:"timestamp"` + + // The severity of the service update + ServiceUpdateSeverity *string `type:"string" enum:"ServiceUpdateSeverity"` + + // The status of the service update + ServiceUpdateStatus *string `type:"string" enum:"ServiceUpdateStatus"` + + // Reflects the nature of the service update + ServiceUpdateType *string `type:"string" enum:"ServiceUpdateType"` + + // If yes, all nodes in the replication group have been updated by the recommended + // apply-by date. If no, at least one node in the replication group have not + // been updated by the recommended apply-by date. If N/A, the replication group + // was created after the recommended apply-by date. + SlaMet *string `type:"string" enum:"SlaMet"` + + // The date that the service update is available to a replication group + UpdateActionAvailableDate *time.Time `type:"timestamp"` + + // The status of the update action + UpdateActionStatus *string `type:"string" enum:"UpdateActionStatus"` + + // The date when the UpdateActionStatus was last modified + UpdateActionStatusModifiedDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s UpdateAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAction) GoString() string { + return s.String() +} + +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *UpdateAction) SetCacheClusterId(v string) *UpdateAction { + s.CacheClusterId = &v + return s +} + +// SetCacheNodeUpdateStatus sets the CacheNodeUpdateStatus field's value. +func (s *UpdateAction) SetCacheNodeUpdateStatus(v []*CacheNodeUpdateStatus) *UpdateAction { + s.CacheNodeUpdateStatus = v + return s +} + +// SetEngine sets the Engine field's value. +func (s *UpdateAction) SetEngine(v string) *UpdateAction { + s.Engine = &v + return s +} + +// SetEstimatedUpdateTime sets the EstimatedUpdateTime field's value. +func (s *UpdateAction) SetEstimatedUpdateTime(v string) *UpdateAction { + s.EstimatedUpdateTime = &v + return s +} + +// SetNodeGroupUpdateStatus sets the NodeGroupUpdateStatus field's value. +func (s *UpdateAction) SetNodeGroupUpdateStatus(v []*NodeGroupUpdateStatus) *UpdateAction { + s.NodeGroupUpdateStatus = v + return s +} + +// SetNodesUpdated sets the NodesUpdated field's value. +func (s *UpdateAction) SetNodesUpdated(v string) *UpdateAction { + s.NodesUpdated = &v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *UpdateAction) SetReplicationGroupId(v string) *UpdateAction { + s.ReplicationGroupId = &v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *UpdateAction) SetServiceUpdateName(v string) *UpdateAction { + s.ServiceUpdateName = &v + return s +} + +// SetServiceUpdateRecommendedApplyByDate sets the ServiceUpdateRecommendedApplyByDate field's value. +func (s *UpdateAction) SetServiceUpdateRecommendedApplyByDate(v time.Time) *UpdateAction { + s.ServiceUpdateRecommendedApplyByDate = &v + return s +} + +// SetServiceUpdateReleaseDate sets the ServiceUpdateReleaseDate field's value. +func (s *UpdateAction) SetServiceUpdateReleaseDate(v time.Time) *UpdateAction { + s.ServiceUpdateReleaseDate = &v + return s +} + +// SetServiceUpdateSeverity sets the ServiceUpdateSeverity field's value. +func (s *UpdateAction) SetServiceUpdateSeverity(v string) *UpdateAction { + s.ServiceUpdateSeverity = &v + return s +} + +// SetServiceUpdateStatus sets the ServiceUpdateStatus field's value. +func (s *UpdateAction) SetServiceUpdateStatus(v string) *UpdateAction { + s.ServiceUpdateStatus = &v + return s +} + +// SetServiceUpdateType sets the ServiceUpdateType field's value. +func (s *UpdateAction) SetServiceUpdateType(v string) *UpdateAction { + s.ServiceUpdateType = &v + return s +} + +// SetSlaMet sets the SlaMet field's value. +func (s *UpdateAction) SetSlaMet(v string) *UpdateAction { + s.SlaMet = &v + return s +} + +// SetUpdateActionAvailableDate sets the UpdateActionAvailableDate field's value. +func (s *UpdateAction) SetUpdateActionAvailableDate(v time.Time) *UpdateAction { + s.UpdateActionAvailableDate = &v + return s +} + +// SetUpdateActionStatus sets the UpdateActionStatus field's value. +func (s *UpdateAction) SetUpdateActionStatus(v string) *UpdateAction { + s.UpdateActionStatus = &v + return s +} + +// SetUpdateActionStatusModifiedDate sets the UpdateActionStatusModifiedDate field's value. +func (s *UpdateAction) SetUpdateActionStatusModifiedDate(v time.Time) *UpdateAction { + s.UpdateActionStatusModifiedDate = &v + return s +} + const ( // AZModeSingleAz is a AZMode enum value AZModeSingleAz = "single-az" @@ -13251,6 +15059,22 @@ const ( AZModeCrossAz = "cross-az" ) +const ( + // AuthTokenUpdateStatusSetting is a AuthTokenUpdateStatus enum value + AuthTokenUpdateStatusSetting = "SETTING" + + // AuthTokenUpdateStatusRotating is a AuthTokenUpdateStatus enum value + AuthTokenUpdateStatusRotating = "ROTATING" +) + +const ( + // AuthTokenUpdateStrategyTypeSet is a AuthTokenUpdateStrategyType enum value + AuthTokenUpdateStrategyTypeSet = "SET" + + // AuthTokenUpdateStrategyTypeRotate is a AuthTokenUpdateStrategyType enum value + AuthTokenUpdateStrategyTypeRotate = "ROTATE" +) + const ( // AutomaticFailoverStatusEnabled is a AutomaticFailoverStatus enum value AutomaticFailoverStatusEnabled = "enabled" @@ -13273,6 +15097,34 @@ const ( ChangeTypeRequiresReboot = "requires-reboot" ) +const ( + // NodeUpdateInitiatedBySystem is a NodeUpdateInitiatedBy enum value + NodeUpdateInitiatedBySystem = "system" + + // NodeUpdateInitiatedByCustomer is a NodeUpdateInitiatedBy enum value + NodeUpdateInitiatedByCustomer = "customer" +) + +const ( + // NodeUpdateStatusNotApplied is a NodeUpdateStatus enum value + NodeUpdateStatusNotApplied = "not-applied" + + // NodeUpdateStatusWaitingToStart is a NodeUpdateStatus enum value + NodeUpdateStatusWaitingToStart = "waiting-to-start" + + // NodeUpdateStatusInProgress is a NodeUpdateStatus enum value + NodeUpdateStatusInProgress = "in-progress" + + // NodeUpdateStatusStopping is a NodeUpdateStatus enum value + NodeUpdateStatusStopping = "stopping" + + // NodeUpdateStatusStopped is a NodeUpdateStatus enum value + NodeUpdateStatusStopped = "stopped" + + // NodeUpdateStatusComplete is a NodeUpdateStatus enum value + NodeUpdateStatusComplete = "complete" +) + const ( // PendingAutomaticFailoverStatusEnabled is a PendingAutomaticFailoverStatus enum value PendingAutomaticFailoverStatusEnabled = "enabled" @@ -13281,6 +15133,47 @@ const ( PendingAutomaticFailoverStatusDisabled = "disabled" ) +const ( + // ServiceUpdateSeverityCritical is a ServiceUpdateSeverity enum value + ServiceUpdateSeverityCritical = "critical" + + // ServiceUpdateSeverityImportant is a ServiceUpdateSeverity enum value + ServiceUpdateSeverityImportant = "important" + + // ServiceUpdateSeverityMedium is a ServiceUpdateSeverity enum value + ServiceUpdateSeverityMedium = "medium" + + // ServiceUpdateSeverityLow is a ServiceUpdateSeverity enum value + ServiceUpdateSeverityLow = "low" +) + +const ( + // ServiceUpdateStatusAvailable is a ServiceUpdateStatus enum value + ServiceUpdateStatusAvailable = "available" + + // ServiceUpdateStatusCancelled is a ServiceUpdateStatus enum value + ServiceUpdateStatusCancelled = "cancelled" + + // ServiceUpdateStatusExpired is a ServiceUpdateStatus enum value + ServiceUpdateStatusExpired = "expired" +) + +const ( + // ServiceUpdateTypeSecurityUpdate is a ServiceUpdateType enum value + ServiceUpdateTypeSecurityUpdate = "security-update" +) + +const ( + // SlaMetYes is a SlaMet enum value + SlaMetYes = "yes" + + // SlaMetNo is a SlaMet enum value + SlaMetNo = "no" + + // SlaMetNA is a SlaMet enum value + SlaMetNA = "n/a" +) + const ( // SourceTypeCacheCluster is a SourceType enum value SourceTypeCacheCluster = "cache-cluster" @@ -13297,3 +15190,23 @@ const ( // SourceTypeReplicationGroup is a SourceType enum value SourceTypeReplicationGroup = "replication-group" ) + +const ( + // UpdateActionStatusNotApplied is a UpdateActionStatus enum value + UpdateActionStatusNotApplied = "not-applied" + + // UpdateActionStatusWaitingToStart is a UpdateActionStatus enum value + UpdateActionStatusWaitingToStart = "waiting-to-start" + + // UpdateActionStatusInProgress is a UpdateActionStatus enum value + UpdateActionStatusInProgress = "in-progress" + + // UpdateActionStatusStopping is a UpdateActionStatus enum value + UpdateActionStatusStopping = "stopping" + + // UpdateActionStatusStopped is a UpdateActionStatus enum value + UpdateActionStatusStopped = "stopped" + + // UpdateActionStatusComplete is a UpdateActionStatus enum value + UpdateActionStatusComplete = "complete" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go index e35a9ece81a..25579b1d5d6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go @@ -121,7 +121,8 @@ const ( // "InsufficientCacheClusterCapacity". // // The requested cache node type is not available in the specified Availability - // Zone. + // Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) + // in the ElastiCache User Guide. ErrCodeInsufficientCacheClusterCapacityFault = "InsufficientCacheClusterCapacity" // ErrCodeInvalidARNFault for service response error code @@ -149,6 +150,12 @@ const ( // The current state of the cache security group does not allow deletion. ErrCodeInvalidCacheSecurityGroupStateFault = "InvalidCacheSecurityGroupState" + // ErrCodeInvalidKMSKeyFault for service response error code + // "InvalidKMSKeyFault". + // + // The KMS key supplied is not valid. + ErrCodeInvalidKMSKeyFault = "InvalidKMSKeyFault" + // ErrCodeInvalidParameterCombinationException for service response error code // "InvalidParameterCombination". // @@ -205,7 +212,7 @@ const ( // // The request cannot be processed because it would exceed the maximum allowed // number of node groups (shards) in a single replication group. The default - // maximum is 15 + // maximum is 90 ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault = "NodeGroupsPerReplicationGroupQuotaExceeded" // ErrCodeNodeQuotaForClusterExceededFault for service response error code @@ -228,12 +235,24 @@ const ( // The specified replication group already exists. ErrCodeReplicationGroupAlreadyExistsFault = "ReplicationGroupAlreadyExists" + // ErrCodeReplicationGroupAlreadyUnderMigrationFault for service response error code + // "ReplicationGroupAlreadyUnderMigrationFault". + // + // The targeted replication group is not available. + ErrCodeReplicationGroupAlreadyUnderMigrationFault = "ReplicationGroupAlreadyUnderMigrationFault" + // ErrCodeReplicationGroupNotFoundFault for service response error code // "ReplicationGroupNotFoundFault". // // The specified replication group does not exist. ErrCodeReplicationGroupNotFoundFault = "ReplicationGroupNotFoundFault" + // ErrCodeReplicationGroupNotUnderMigrationFault for service response error code + // "ReplicationGroupNotUnderMigrationFault". + // + // The designated replication group is not available for data migration. + ErrCodeReplicationGroupNotUnderMigrationFault = "ReplicationGroupNotUnderMigrationFault" + // ErrCodeReservedCacheNodeAlreadyExistsFault for service response error code // "ReservedCacheNodeAlreadyExists". // @@ -265,6 +284,12 @@ const ( // The specified service linked role (SLR) was not found. ErrCodeServiceLinkedRoleNotFoundFault = "ServiceLinkedRoleNotFoundFault" + // ErrCodeServiceUpdateNotFoundFault for service response error code + // "ServiceUpdateNotFoundFault". + // + // The service update doesn't exist + ErrCodeServiceUpdateNotFoundFault = "ServiceUpdateNotFoundFault" + // ErrCodeSnapshotAlreadyExistsFault for service response error code // "SnapshotAlreadyExistsFault". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go index fd5f8c51707..2ad929dca32 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go @@ -46,11 +46,11 @@ const ( // svc := elasticache.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElastiCache { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElastiCache { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElastiCache { svc := &ElastiCache{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-02-02", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go index 87538248898..2e91780c444 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go @@ -2237,7 +2237,7 @@ func (c *ElasticBeanstalk) DescribeEventsWithContext(ctx aws.Context, input *Des // // Example iterating over at most 3 pages of a DescribeEvents operation. // pageNum := 0 // err := client.DescribeEventsPages(params, -// func(page *DescribeEventsOutput, lastPage bool) bool { +// func(page *elasticbeanstalk.DescribeEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2269,10 +2269,12 @@ func (c *ElasticBeanstalk) DescribeEventsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3647,11 +3649,15 @@ func (c *ElasticBeanstalk) UpdateTagsForResourceRequest(input *UpdateTagsForReso // specify one of the following two virtual actions (or both) instead of the // API operation name: // -// elasticbeanstalk:AddTagsControls permission to call UpdateTagsForResource -// and pass a list of tags to add in the TagsToAdd parameter. +// elasticbeanstalk:AddTags +// +// Controls permission to call UpdateTagsForResource and pass a list of tags +// to add in the TagsToAdd parameter. +// +// elasticbeanstalk:RemoveTags // -// elasticbeanstalk:RemoveTagsControls permission to call UpdateTagsForResource -// and pass a list of tag keys to remove in the TagsToRemove parameter. +// Controls permission to call UpdateTagsForResource and pass a list of tag +// keys to remove in the TagsToRemove parameter. // // For details about creating a custom user policy, see Creating a Custom User // Policy (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/AWSHowTo.iam.managed-policies.html#AWSHowTo.iam.policies). @@ -9464,7 +9470,7 @@ func (s *PlatformDescription) SetSupportedTierList(v []*string) *PlatformDescrip // // The filter is evaluated as the expression: // -// TypeOperatorValues[i] +// Type Operator Values[i] type PlatformFilter struct { _ struct{} `type:"structure"` @@ -10683,7 +10689,7 @@ type TerminateEnvironmentInput struct { // * false: AWS Elastic Beanstalk resource management is removed from the // environment, but the AWS resources continue to operate. // - // For more information, see the AWS Elastic Beanstalk User Guide. (https://docs.aws.amazon.com/elasticbeanstalk/latest/ug/) + // For more information, see the AWS Elastic Beanstalk User Guide. (https://docs.aws.amazon.com/elasticbeanstalk/latest/ug/) // // Default: true // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go index 12e8b1c819a..841587452ab 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go @@ -46,11 +46,11 @@ const ( // svc := elasticbeanstalk.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticBeanstalk { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElasticBeanstalk { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElasticBeanstalk { svc := &ElasticBeanstalk{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-12-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go index b3954643d53..5208c41a15f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go @@ -57,7 +57,7 @@ func (c *ElasticsearchService) AddTagsRequest(input *AddTagsInput) (req *request // AddTags API operation for Amazon Elasticsearch Service. // // Attaches tags to an existing Elasticsearch domain. Tags are a set of case-sensitive -// key value pairs. An Elasticsearch domain may have up to 10 tags. See Tagging +// key value pairs. An Elasticsearch domain may have up to 10 tags. See Tagging // Amazon Elasticsearch Service Domains for more information. (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-awsresorcetagging) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -952,7 +952,7 @@ func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsWit // // Example iterating over at most 3 pages of a DescribeReservedElasticsearchInstanceOfferings operation. // pageNum := 0 // err := client.DescribeReservedElasticsearchInstanceOfferingsPages(params, -// func(page *DescribeReservedElasticsearchInstanceOfferingsOutput, lastPage bool) bool { +// func(page *elasticsearchservice.DescribeReservedElasticsearchInstanceOfferingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -984,10 +984,12 @@ func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsPag }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedElasticsearchInstanceOfferingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedElasticsearchInstanceOfferingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1098,7 +1100,7 @@ func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesWithContext // // Example iterating over at most 3 pages of a DescribeReservedElasticsearchInstances operation. // pageNum := 0 // err := client.DescribeReservedElasticsearchInstancesPages(params, -// func(page *DescribeReservedElasticsearchInstancesOutput, lastPage bool) bool { +// func(page *elasticsearchservice.DescribeReservedElasticsearchInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1130,10 +1132,12 @@ func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesPagesWithCo }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedElasticsearchInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedElasticsearchInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1343,7 +1347,7 @@ func (c *ElasticsearchService) GetUpgradeHistoryWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a GetUpgradeHistory operation. // pageNum := 0 // err := client.GetUpgradeHistoryPages(params, -// func(page *GetUpgradeHistoryOutput, lastPage bool) bool { +// func(page *elasticsearchservice.GetUpgradeHistoryOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1375,10 +1379,12 @@ func (c *ElasticsearchService) GetUpgradeHistoryPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetUpgradeHistoryOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetUpgradeHistoryOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1663,7 +1669,7 @@ func (c *ElasticsearchService) ListElasticsearchInstanceTypesWithContext(ctx aws // // Example iterating over at most 3 pages of a ListElasticsearchInstanceTypes operation. // pageNum := 0 // err := client.ListElasticsearchInstanceTypesPages(params, -// func(page *ListElasticsearchInstanceTypesOutput, lastPage bool) bool { +// func(page *elasticsearchservice.ListElasticsearchInstanceTypesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1695,10 +1701,12 @@ func (c *ElasticsearchService) ListElasticsearchInstanceTypesPagesWithContext(ct }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListElasticsearchInstanceTypesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListElasticsearchInstanceTypesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1808,7 +1816,7 @@ func (c *ElasticsearchService) ListElasticsearchVersionsWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a ListElasticsearchVersions operation. // pageNum := 0 // err := client.ListElasticsearchVersionsPages(params, -// func(page *ListElasticsearchVersionsOutput, lastPage bool) bool { +// func(page *elasticsearchservice.ListElasticsearchVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1840,10 +1848,12 @@ func (c *ElasticsearchService) ListElasticsearchVersionsPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListElasticsearchVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListElasticsearchVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2411,7 +2421,7 @@ type AccessPoliciesStatus struct { _ struct{} `type:"structure"` // The access policy configured for the Elasticsearch domain. Access policies - // may be resource-based, IP-based, or IAM-based. See Configuring Access Policies + // may be resource-based, IP-based, or IAM-based. See Configuring Access Policies // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-access-policies)for // more information. // @@ -2531,11 +2541,10 @@ type AdditionalLimit struct { _ struct{} `type:"structure"` // Name of Additional Limit is specific to a given InstanceType and for each - // of it's InstanceRole etc. Attributes and their details: MaximumNumberOfDataNodesSupported - // This attribute will be present in Master node only to specify how much data - // nodes upto which given ESPartitionInstanceTypecan support as master node. MaximumNumberOfDataNodesWithoutMasterNode - // This attribute will be present in Data node only to specify how much data - // nodes of given ESPartitionInstanceType + // of it's InstanceRole etc. Attributes and their details: + // * MaximumNumberOfDataNodesSupported + // + // * MaximumNumberOfDataNodesWithoutMasterNode LimitName *string `type:"string"` // Value for given AdditionalLimit$LimitName . @@ -2569,10 +2578,11 @@ func (s *AdditionalLimit) SetLimitValues(v []*string) *AdditionalLimit { // // * Option to allow references to indices in an HTTP request body. Must // be false when configuring access to individual sub-resources. By default, -// the value is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options) -// for more information. +// the value is true. See Configuration Advanced Options for more information. +// // * Option to specify the percentage of heap space that is allocated to // field data. By default, this setting is unbounded. +// // For more information, see Configuring Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options). type AdvancedOptionsStatus struct { _ struct{} `type:"structure"` @@ -2842,9 +2852,12 @@ type CreateElasticsearchDomainInput struct { // For more information, see Amazon Cognito Authentication for Kibana (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html). CognitoOptions *CognitoOptions `type:"structure"` + // Options to specify configuration that will be applied to the domain endpoint. + DomainEndpointOptions *DomainEndpointOptions `type:"structure"` + // The name of the Elasticsearch domain that you are creating. Domain names // are unique across the domains owned by an account within an AWS region. Domain - // names must start with a letter or number and can contain the following characters: + // names must start with a lowercase letter and can contain the following characters: // a-z (lowercase), 0-9, and - (hyphen). // // DomainName is a required field @@ -2937,6 +2950,12 @@ func (s *CreateElasticsearchDomainInput) SetCognitoOptions(v *CognitoOptions) *C return s } +// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. +func (s *CreateElasticsearchDomainInput) SetDomainEndpointOptions(v *DomainEndpointOptions) *CreateElasticsearchDomainInput { + s.DomainEndpointOptions = v + return s +} + // SetDomainName sets the DomainName field's value. func (s *CreateElasticsearchDomainInput) SetDomainName(v string) *CreateElasticsearchDomainInput { s.DomainName = &v @@ -3400,9 +3419,10 @@ type DescribeElasticsearchInstanceTypeLimitsOutput struct { _ struct{} `type:"structure"` // Map of Role of the Instance and Limits that are applicable. Role performed - // by given Instance in Elasticsearch can be one of the following: Data: If - // the given InstanceType is used as Data node - // Master: If the given InstanceType is used as Master node + // by given Instance in Elasticsearch can be one of the following: + // * Data: If the given InstanceType is used as Data node + // + // * Master: If the given InstanceType is used as Master node LimitsByRole map[string]*Limits `type:"map"` } @@ -3579,6 +3599,83 @@ func (s *DescribeReservedElasticsearchInstancesOutput) SetReservedElasticsearchI return s } +// Options to configure endpoint for the Elasticsearch domain. +type DomainEndpointOptions struct { + _ struct{} `type:"structure"` + + // Specify if only HTTPS endpoint should be enabled for the Elasticsearch domain. + EnforceHTTPS *bool `type:"boolean"` + + // Specify the TLS security policy that needs to be applied to the HTTPS endpoint + // of Elasticsearch domain. It can be one of the following values: + // * Policy-Min-TLS-1-0-2019-07: TLS security policy which supports TLSv1.0 + // and higher. + // + // * Policy-Min-TLS-1-2-2019-07: TLS security policy which supports only + // TLSv1.2 + TLSSecurityPolicy *string `type:"string" enum:"TLSSecurityPolicy"` +} + +// String returns the string representation +func (s DomainEndpointOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainEndpointOptions) GoString() string { + return s.String() +} + +// SetEnforceHTTPS sets the EnforceHTTPS field's value. +func (s *DomainEndpointOptions) SetEnforceHTTPS(v bool) *DomainEndpointOptions { + s.EnforceHTTPS = &v + return s +} + +// SetTLSSecurityPolicy sets the TLSSecurityPolicy field's value. +func (s *DomainEndpointOptions) SetTLSSecurityPolicy(v string) *DomainEndpointOptions { + s.TLSSecurityPolicy = &v + return s +} + +// The configured endpoint options for the domain and their current status. +type DomainEndpointOptionsStatus struct { + _ struct{} `type:"structure"` + + // Options to configure endpoint for the Elasticsearch domain. + // + // Options is a required field + Options *DomainEndpointOptions `type:"structure" required:"true"` + + // The status of the endpoint options for the Elasticsearch domain. See OptionStatus + // for the status information that's included. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DomainEndpointOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainEndpointOptionsStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *DomainEndpointOptionsStatus) SetOptions(v *DomainEndpointOptions) *DomainEndpointOptionsStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *DomainEndpointOptionsStatus) SetStatus(v *OptionStatus) *DomainEndpointOptionsStatus { + s.Status = v + return s +} + type DomainInfo struct { _ struct{} `type:"structure"` @@ -3603,7 +3700,7 @@ func (s *DomainInfo) SetDomainName(v string) *DomainInfo { } // Options to enable, disable, and specify the properties of EBS storage volumes. -// For more information, see Configuring EBS-based Storage (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs). +// For more information, see Configuring EBS-based Storage (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs). type EBSOptions struct { _ struct{} `type:"structure"` @@ -3829,6 +3926,9 @@ type ElasticsearchDomainConfig struct { // Cognito Authentication for Kibana (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html). CognitoOptions *CognitoOptionsStatus `type:"structure"` + // Specifies the DomainEndpointOptions for the Elasticsearch domain. + DomainEndpointOptions *DomainEndpointOptionsStatus `type:"structure"` + // Specifies the EBSOptions for the Elasticsearch domain. EBSOptions *EBSOptionsStatus `type:"structure"` @@ -3883,6 +3983,12 @@ func (s *ElasticsearchDomainConfig) SetCognitoOptions(v *CognitoOptionsStatus) * return s } +// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. +func (s *ElasticsearchDomainConfig) SetDomainEndpointOptions(v *DomainEndpointOptionsStatus) *ElasticsearchDomainConfig { + s.DomainEndpointOptions = v + return s +} + // SetEBSOptions sets the EBSOptions field's value. func (s *ElasticsearchDomainConfig) SetEBSOptions(v *EBSOptionsStatus) *ElasticsearchDomainConfig { s.EBSOptions = v @@ -3962,6 +4068,9 @@ type ElasticsearchDomainStatus struct { // domain is no longer returned. Deleted *bool `type:"boolean"` + // The current status of the Elasticsearch domain's endpoint options. + DomainEndpointOptions *DomainEndpointOptions `type:"structure"` + // The unique identifier for the specified Elasticsearch domain. // // DomainId is a required field @@ -4070,6 +4179,12 @@ func (s *ElasticsearchDomainStatus) SetDeleted(v bool) *ElasticsearchDomainStatu return s } +// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. +func (s *ElasticsearchDomainStatus) SetDomainEndpointOptions(v *DomainEndpointOptions) *ElasticsearchDomainStatus { + s.DomainEndpointOptions = v + return s +} + // SetDomainId sets the DomainId field's value. func (s *ElasticsearchDomainStatus) SetDomainId(v string) *ElasticsearchDomainStatus { s.DomainId = &v @@ -4501,19 +4616,26 @@ type GetUpgradeStatusOutput struct { _ struct{} `type:"structure"` // One of 4 statuses that a step can go through returned as part of the GetUpgradeStatusResponse - // object. The status can take one of the following values: In Progress - // Succeeded - // Succeeded with Issues - // Failed + // object. The status can take one of the following values: + // * In Progress + // + // * Succeeded + // + // * Succeeded with Issues + // + // * Failed StepStatus *string `type:"string" enum:"UpgradeStatus"` // A string that describes the update briefly UpgradeName *string `type:"string"` // Represents one of 3 steps that an Upgrade or Upgrade Eligibility Check does - // through: PreUpgradeCheck - // Snapshot - // Upgrade + // through: + // * PreUpgradeCheck + // + // * Snapshot + // + // * Upgrade UpgradeStep *string `type:"string" enum:"UpgradeStep"` } @@ -4605,7 +4727,8 @@ func (s *InstanceLimits) SetInstanceCountLimits(v *InstanceCountLimits) *Instanc return s } -// Limits for given InstanceType and for each of it's role. Limits contains following StorageTypes, InstanceLimitsand AdditionalLimits +// Limits for given InstanceType and for each of it's role. Limits contains +// following StorageTypes, InstanceLimits and AdditionalLimits type Limits struct { _ struct{} `type:"structure"` @@ -4803,10 +4926,10 @@ func (s *ListElasticsearchInstanceTypesOutput) SetNextToken(v string) *ListElast } // Container for the parameters to the ListElasticsearchVersions operation. -// Use MaxResults to control the maximum number of results to retrieve in a +// Use MaxResults to control the maximum number of results to retrieve in a // single call. // -// Use NextToken in response to retrieve more results. If the received response +// Use NextToken in response to retrieve more results. If the received response // does not contain a NextToken, then there are no more results to retrieve. type ListElasticsearchVersionsInput struct { _ struct{} `type:"structure"` @@ -4946,9 +5069,13 @@ func (s *ListTagsOutput) SetTagList(v []*Tag) *ListTagsOutput { return s } -// Log Publishing option that is set for given domain. Attributes and their details: CloudWatchLogsLogGroupArn: ARN of the Cloudwatch -// log group to which log needs to be published. -// Enabled: Whether the log publishing for given log type is enabled or not +// Log Publishing option that is set for given domain. Attributes and their +// details: +// * CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which +// log needs to be published. +// +// * Enabled: Whether the log publishing for given log type is enabled or +// not type LogPublishingOption struct { _ struct{} `type:"structure"` @@ -5799,18 +5926,16 @@ type StorageType struct { // SubType of the given storage type. List of available sub-storage options: // For "instance" storageType we wont have any storageSubType, in case of "ebs" - // storageType we will have following valid storageSubTypes standard - // gp2 - // io1 - // Refer VolumeType for more information regarding above EBS storage options. + // storageType we will have following valid storageSubTypes standard gp2 io1 + // Refer VolumeType for more information regarding above EBS storage options. StorageSubTypeName *string `type:"string"` // List of limits that are applicable for given storage type. StorageTypeLimits []*StorageTypeLimit `type:"list"` - // Type of the storage. List of available storage options: instance - // Inbuilt storage available for the given Instance ebs - // Elastic block storage that would be attached to the given Instance + // Type of the storage. List of available storage options: instance Inbuilt + // storage available for the given Instance ebs Elastic block storage that would + // be attached to the given Instance StorageTypeName *string `type:"string"` } @@ -5847,15 +5972,14 @@ type StorageTypeLimit struct { _ struct{} `type:"structure"` // Name of storage limits that are applicable for given storage type. If StorageType - // is ebs, following storage options are applicable MinimumVolumeSize - // Minimum amount of volume size that is applicable for given storage type.It - // can be empty if it is not applicable. MaximumVolumeSize - // Maximum amount of volume size that is applicable for given storage type.It - // can be empty if it is not applicable. MaximumIops - // Maximum amount of Iops that is applicable for given storage type.It can - // be empty if it is not applicable. MinimumIops - // Minimum amount of Iops that is applicable for given storage type.It can - // be empty if it is not applicable. + // is ebs, following storage options are applicable MinimumVolumeSize Minimum + // amount of volume size that is applicable for given storage type.It can be + // empty if it is not applicable. MaximumVolumeSize Maximum amount of volume + // size that is applicable for given storage type.It can be empty if it is not + // applicable. MaximumIops Maximum amount of Iops that is applicable for given + // storage type.It can be empty if it is not applicable. MinimumIops Minimum + // amount of Iops that is applicable for given storage type.It can be empty + // if it is not applicable. LimitName *string `type:"string"` // Values for the StorageTypeLimit$LimitName . @@ -5962,6 +6086,9 @@ type UpdateElasticsearchDomainConfigInput struct { // For more information, see Amazon Cognito Authentication for Kibana (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html). CognitoOptions *CognitoOptions `type:"structure"` + // Options to specify configuration that will be applied to the domain endpoint. + DomainEndpointOptions *DomainEndpointOptions `type:"structure"` + // The name of the Elasticsearch domain that you are updating. // // DomainName is a required field @@ -6036,6 +6163,12 @@ func (s *UpdateElasticsearchDomainConfigInput) SetCognitoOptions(v *CognitoOptio return s } +// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. +func (s *UpdateElasticsearchDomainConfigInput) SetDomainEndpointOptions(v *DomainEndpointOptions) *UpdateElasticsearchDomainConfigInput { + s.DomainEndpointOptions = v + return s +} + // SetDomainName sets the DomainName field's value. func (s *UpdateElasticsearchDomainConfigInput) SetDomainName(v string) *UpdateElasticsearchDomainConfigInput { s.DomainName = &v @@ -6230,10 +6363,14 @@ type UpgradeHistory struct { UpgradeName *string `type:"string"` // The overall status of the update. The status can take one of the following - // values: In Progress - // Succeeded - // Succeeded with Issues - // Failed + // values: + // * In Progress + // + // * Succeeded + // + // * Succeeded with Issues + // + // * Failed UpgradeStatus *string `type:"string" enum:"UpgradeStatus"` } @@ -6284,16 +6421,23 @@ type UpgradeStepItem struct { ProgressPercent *float64 `type:"double"` // Represents one of 3 steps that an Upgrade or Upgrade Eligibility Check does - // through: PreUpgradeCheck - // Snapshot - // Upgrade + // through: + // * PreUpgradeCheck + // + // * Snapshot + // + // * Upgrade UpgradeStep *string `type:"string" enum:"UpgradeStep"` // The status of a particular step during an upgrade. The status can take one - // of the following values: In Progress - // Succeeded - // Succeeded with Issues - // Failed + // of the following values: + // * In Progress + // + // * Succeeded + // + // * Succeeded with Issues + // + // * Failed UpgradeStepStatus *string `type:"string" enum:"UpgradeStatus"` } @@ -6332,7 +6476,7 @@ func (s *UpgradeStepItem) SetUpgradeStepStatus(v string) *UpgradeStepItem { } // Options to specify the subnets and security groups for VPC endpoint. For -// more information, see VPC Endpoints for Amazon Elasticsearch Service Domains +// more information, see VPC Endpoints for Amazon Elasticsearch Service Domains // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html). type VPCDerivedInfo struct { _ struct{} `type:"structure"` @@ -6424,7 +6568,7 @@ func (s *VPCDerivedInfoStatus) SetStatus(v *OptionStatus) *VPCDerivedInfoStatus } // Options to specify the subnets and security groups for VPC endpoint. For -// more information, see VPC Endpoints for Amazon Elasticsearch Service Domains +// more information, see VPC Endpoints for Amazon Elasticsearch Service Domains // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html). type VPCOptions struct { _ struct{} `type:"structure"` @@ -6530,6 +6674,54 @@ const ( // ESPartitionInstanceTypeM410xlargeElasticsearch is a ESPartitionInstanceType enum value ESPartitionInstanceTypeM410xlargeElasticsearch = "m4.10xlarge.elasticsearch" + // ESPartitionInstanceTypeM5LargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeM5LargeElasticsearch = "m5.large.elasticsearch" + + // ESPartitionInstanceTypeM5XlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeM5XlargeElasticsearch = "m5.xlarge.elasticsearch" + + // ESPartitionInstanceTypeM52xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeM52xlargeElasticsearch = "m5.2xlarge.elasticsearch" + + // ESPartitionInstanceTypeM54xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeM54xlargeElasticsearch = "m5.4xlarge.elasticsearch" + + // ESPartitionInstanceTypeM512xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeM512xlargeElasticsearch = "m5.12xlarge.elasticsearch" + + // ESPartitionInstanceTypeR5LargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeR5LargeElasticsearch = "r5.large.elasticsearch" + + // ESPartitionInstanceTypeR5XlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeR5XlargeElasticsearch = "r5.xlarge.elasticsearch" + + // ESPartitionInstanceTypeR52xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeR52xlargeElasticsearch = "r5.2xlarge.elasticsearch" + + // ESPartitionInstanceTypeR54xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeR54xlargeElasticsearch = "r5.4xlarge.elasticsearch" + + // ESPartitionInstanceTypeR512xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeR512xlargeElasticsearch = "r5.12xlarge.elasticsearch" + + // ESPartitionInstanceTypeC5LargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeC5LargeElasticsearch = "c5.large.elasticsearch" + + // ESPartitionInstanceTypeC5XlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeC5XlargeElasticsearch = "c5.xlarge.elasticsearch" + + // ESPartitionInstanceTypeC52xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeC52xlargeElasticsearch = "c5.2xlarge.elasticsearch" + + // ESPartitionInstanceTypeC54xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeC54xlargeElasticsearch = "c5.4xlarge.elasticsearch" + + // ESPartitionInstanceTypeC59xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeC59xlargeElasticsearch = "c5.9xlarge.elasticsearch" + + // ESPartitionInstanceTypeC518xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeC518xlargeElasticsearch = "c5.18xlarge.elasticsearch" + // ESPartitionInstanceTypeT2MicroElasticsearch is a ESPartitionInstanceType enum value ESPartitionInstanceTypeT2MicroElasticsearch = "t2.micro.elasticsearch" @@ -6624,14 +6816,16 @@ const ( ESPartitionInstanceTypeI316xlargeElasticsearch = "i3.16xlarge.elasticsearch" ) -// Type of Log File, it can be one of the following: INDEX_SLOW_LOGS: Index -// slow logs contain insert requests that took more time than configured index -// query log threshold to execute. -// SEARCH_SLOW_LOGS: Search slow logs contain search queries that took more -// time than configured search query log threshold to execute. -// ES_APPLICATION_LOGS: Elasticsearch application logs contain information about -// errors and warnings raised during the operation of the service and can be -// useful for troubleshooting. +// Type of Log File, it can be one of the following: +// * INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more +// time than configured index query log threshold to execute. +// +// * SEARCH_SLOW_LOGS: Search slow logs contain search queries that took +// more time than configured search query log threshold to execute. +// +// * ES_APPLICATION_LOGS: Elasticsearch application logs contain information +// about errors and warnings raised during the operation of the service and +// can be useful for troubleshooting. const ( // LogTypeIndexSlowLogs is a LogType enum value LogTypeIndexSlowLogs = "INDEX_SLOW_LOGS" @@ -6646,6 +6840,7 @@ const ( // The state of a requested change. One of the following: // // * Processing: The request change is still in-process. +// // * Active: The request change is processed and deployed to the Elasticsearch // domain. const ( @@ -6670,6 +6865,14 @@ const ( ReservedElasticsearchInstancePaymentOptionNoUpfront = "NO_UPFRONT" ) +const ( + // TLSSecurityPolicyPolicyMinTls10201907 is a TLSSecurityPolicy enum value + TLSSecurityPolicyPolicyMinTls10201907 = "Policy-Min-TLS-1-0-2019-07" + + // TLSSecurityPolicyPolicyMinTls12201907 is a TLSSecurityPolicy enum value + TLSSecurityPolicyPolicyMinTls12201907 = "Policy-Min-TLS-1-2-2019-07" +) + const ( // UpgradeStatusInProgress is a UpgradeStatus enum value UpgradeStatusInProgress = "IN_PROGRESS" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/doc.go index 944fadc200d..fa8988a22d2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/doc.go @@ -3,9 +3,14 @@ // Package elasticsearchservice provides the client and types for making API // requests to Amazon Elasticsearch Service. // -// Use the Amazon Elasticsearch configuration API to create, configure, and +// Use the Amazon Elasticsearch Configuration API to create, configure, and // manage Elasticsearch domains. // +// For sample code that uses the Configuration API, see the Amazon Elasticsearch +// Service Developer Guide (https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-configuration-samples.html). +// The guide also contains sample code for sending signed HTTP requests to the +// Elasticsearch APIs (https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-request-signing.html). +// // The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. // For example, es.us-east-1.amazonaws.com. For a current list of supported // regions and endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticsearch-service-regions). diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go index d2f8f382733..9f309cdb009 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go @@ -46,11 +46,11 @@ const ( // svc := elasticsearchservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticsearchService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElasticsearchService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElasticsearchService { svc := &ElasticsearchService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-01-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go index ec140ae0bdb..49363f1934f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go @@ -711,7 +711,7 @@ func (c *ElasticTranscoder) ListJobsByPipelineWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a ListJobsByPipeline operation. // pageNum := 0 // err := client.ListJobsByPipelinePages(params, -// func(page *ListJobsByPipelineOutput, lastPage bool) bool { +// func(page *elastictranscoder.ListJobsByPipelineOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -743,10 +743,12 @@ func (c *ElasticTranscoder) ListJobsByPipelinePagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListJobsByPipelineOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListJobsByPipelineOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -859,7 +861,7 @@ func (c *ElasticTranscoder) ListJobsByStatusWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a ListJobsByStatus operation. // pageNum := 0 // err := client.ListJobsByStatusPages(params, -// func(page *ListJobsByStatusOutput, lastPage bool) bool { +// func(page *elastictranscoder.ListJobsByStatusOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -891,10 +893,12 @@ func (c *ElasticTranscoder) ListJobsByStatusPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListJobsByStatusOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListJobsByStatusOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1001,7 +1005,7 @@ func (c *ElasticTranscoder) ListPipelinesWithContext(ctx aws.Context, input *Lis // // Example iterating over at most 3 pages of a ListPipelines operation. // pageNum := 0 // err := client.ListPipelinesPages(params, -// func(page *ListPipelinesOutput, lastPage bool) bool { +// func(page *elastictranscoder.ListPipelinesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1033,10 +1037,12 @@ func (c *ElasticTranscoder) ListPipelinesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPipelinesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPipelinesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1143,7 +1149,7 @@ func (c *ElasticTranscoder) ListPresetsWithContext(ctx aws.Context, input *ListP // // Example iterating over at most 3 pages of a ListPresets operation. // pageNum := 0 // err := client.ListPresetsPages(params, -// func(page *ListPresetsOutput, lastPage bool) bool { +// func(page *elastictranscoder.ListPresetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1175,10 +1181,12 @@ func (c *ElasticTranscoder) ListPresetsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPresetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPresetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2373,29 +2381,14 @@ type CaptionFormat struct { // The format you specify determines whether Elastic Transcoder generates an // embedded or sidecar caption for this output. // - // * Valid Embedded Caption Formats: - // - // for FLAC: None - // - // For MP3: None - // - // For MP4: mov-text - // - // For MPEG-TS: None - // - // For ogg: None - // - // For webm: None + // * Valid Embedded Caption Formats: for FLAC: None For MP3: None For MP4: + // mov-text For MPEG-TS: None For ogg: None For webm: None // // * Valid Sidecar Caption Formats: Elastic Transcoder supports dfxp (first // div element only), scc, srt, and webvtt. If you want ttml or smpte-tt - // compatible captions, specify dfxp as your output format. - // - // For FMP4: dfxp - // - // Non-FMP4 outputs: All sidecar types - // - // fmp4 captions have an extension of .ismt + // compatible captions, specify dfxp as your output format. For FMP4: dfxp + // Non-FMP4 outputs: All sidecar types fmp4 captions have an extension of + // .ismt Format *string `type:"string"` // The prefix for caption filenames, in the form description-{language}, where: @@ -2835,25 +2828,19 @@ type CreateJobOutput struct { // // * Embedded: Embedded captions are included in the same file as the audio // and video. Elastic Transcoder supports only one embedded caption per language, - // to a maximum of 300 embedded captions per file. - // - // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), - // CEA-708 (EIA-708, first non-empty channel only), and mov-text - // - // Valid outputs include: mov-text - // - // Elastic Transcoder supports a maximum of one embedded format per output. + // to a maximum of 300 embedded captions per file. Valid input values include: + // CEA-608 (EIA-608, first non-empty channel only), CEA-708 (EIA-708, first + // non-empty channel only), and mov-text Valid outputs include: mov-text + // Elastic Transcoder supports a maximum of one embedded format per output. // // * Sidecar: Sidecar captions are kept in a separate metadata file from // the audio and video data. Sidecar captions require a player that is capable // of understanding the relationship between the video file and the sidecar // file. Elastic Transcoder supports only one sidecar caption per language, - // to a maximum of 20 sidecar captions per file. - // - // Valid input values include: dfxp (first div element only), ebu-tt, scc, smpt, - // srt, ttml (first div element only), and webvtt - // - // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // to a maximum of 20 sidecar captions per file. Valid input values include: + // dfxp (first div element only), ebu-tt, scc, smpt, srt, ttml (first div + // element only), and webvtt Valid outputs include: dfxp (first div element + // only), scc, srt, and webvtt. // // If you want ttml or smpte-tt compatible captions, specify dfxp as your output // format. @@ -2904,6 +2891,7 @@ type CreateJobOutput struct { // transcoding contains rotation metadata. Rotate *string `type:"string"` + // // (Outputs in Fragmented MP4 or MPEG-TS format only. // // If you specify a preset in PresetId for which the value of Container is fmp4 @@ -2940,11 +2928,9 @@ type CreateJobOutput struct { // {count} in the ThumbnailPattern object. Wherever you specify {count}, // Elastic Transcoder adds a five-digit sequence number (beginning with 00001) // to thumbnail file names. The number indicates where a given thumbnail - // appears in the sequence of thumbnails for a transcoded file. - // - // If you specify a literal value and/or {resolution} but you omit {count}, - // Elastic Transcoder returns a validation error and does not create the - // job. + // appears in the sequence of thumbnails for a transcoded file. If you specify + // a literal value and/or {resolution} but you omit {count}, Elastic Transcoder + // returns a validation error and does not create the job. // // * Literal values (Optional): You can specify literal values anywhere in // the ThumbnailPattern object. For example, you can include them as a file @@ -3105,28 +3091,19 @@ type CreateJobPlaylist struct { // // * If your output is not HLS or does not have a segment duration set, the // name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key: - // - // OutputKeyPrefixOutputs:Key + // OutputKeyPrefixOutputs:Key // // * If your output is HLSv3 and has a segment duration set, or is not included // in a playlist, Elastic Transcoder creates an output playlist file with // a file extension of .m3u8, and a series of .ts files that include a five-digit - // sequential counter beginning with 00000: - // - // OutputKeyPrefixOutputs:Key.m3u8 - // - // OutputKeyPrefixOutputs:Key00000.ts + // sequential counter beginning with 00000: OutputKeyPrefixOutputs:Key.m3u8 + // OutputKeyPrefixOutputs:Key00000.ts // // * If your output is HLSv4, has a segment duration set, and is included // in an HLSv4 playlist, Elastic Transcoder creates an output playlist file // with a file extension of _v4.m3u8. If the output is video, Elastic Transcoder - // also creates an output file with an extension of _iframe.m3u8: - // - // OutputKeyPrefixOutputs:Key_v4.m3u8 - // - // OutputKeyPrefixOutputs:Key_iframe.m3u8 - // - // OutputKeyPrefixOutputs:Key.ts + // also creates an output file with an extension of _iframe.m3u8: OutputKeyPrefixOutputs:Key_v4.m3u8 + // OutputKeyPrefixOutputs:Key_iframe.m3u8 OutputKeyPrefixOutputs:Key.ts // // Elastic Transcoder automatically appends the relevant file extension to the // file name. If you include a file extension in Output Key, the file name will @@ -3261,23 +3238,18 @@ type CreatePipelineInput struct { // and/or predefined Amazon S3 groups. // // * Grantee Type: Specify the type of value that appears in the Grantee - // object: - // - // Canonical: The value in the Grantee object is either the canonical user ID - // for an AWS account or an origin access identity for an Amazon CloudFront - // distribution. For more information about canonical user IDs, see Access - // Control List (ACL) Overview in the Amazon Simple Storage Service Developer - // Guide. For more information about using CloudFront origin access identities - // to require that users use CloudFront URLs instead of Amazon S3 URLs, see - // Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content. - // - // A canonical user ID is not the same as an AWS account number. - // - // Email: The value in the Grantee object is the registered email address of - // an AWS account. - // - // Group: The value in the Grantee object is one of the following predefined - // Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // object: Canonical: The value in the Grantee object is either the canonical + // user ID for an AWS account or an origin access identity for an Amazon + // CloudFront distribution. For more information about canonical user IDs, + // see Access Control List (ACL) Overview in the Amazon Simple Storage Service + // Developer Guide. For more information about using CloudFront origin access + // identities to require that users use CloudFront URLs instead of Amazon + // S3 URLs, see Using an Origin Access Identity to Restrict Access to Your + // Amazon S3 Content. A canonical user ID is not the same as an AWS account + // number. Email: The value in the Grantee object is the registered email + // address of an AWS account. Group: The value in the Grantee object is one + // of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, + // or LogDelivery. // // * Grantee: The AWS user or group that you want to have access to transcoded // files and playlists. To identify the user or group, you can specify the @@ -3288,19 +3260,13 @@ type CreatePipelineInput struct { // * Access: The permission that you want to give to the AWS user that you // specified in Grantee. Permissions are granted on the files that Elastic // Transcoder adds to the bucket, including playlists and video files. Valid - // values include: - // - // READ: The grantee can read the objects and metadata for objects that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // READ_ACP: The grantee can read the object ACL for objects that Elastic Transcoder - // adds to the Amazon S3 bucket. - // - // WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder - // adds to the Amazon S3 bucket. - // - // FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for - // the objects that Elastic Transcoder adds to the Amazon S3 bucket. + // values include: READ: The grantee can read the objects and metadata for + // objects that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: + // The grantee can read the object ACL for objects that Elastic Transcoder + // adds to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL + // for the objects that Elastic Transcoder adds to the Amazon S3 bucket. + // FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions + // for the objects that Elastic Transcoder adds to the Amazon S3 bucket. // // * StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, // that you want Elastic Transcoder to assign to the video files and playlists @@ -3360,11 +3326,9 @@ type CreatePipelineInput struct { // transcoded files, thumbnails, and playlists. // // * You do not want to specify the permissions that Elastic Transcoder grants - // to the files. - // - // When Elastic Transcoder saves files in OutputBucket, it grants full control - // over the files only to the AWS account that owns the role that is specified - // by Role. + // to the files. When Elastic Transcoder saves files in OutputBucket, it + // grants full control over the files only to the AWS account that owns the + // role that is specified by Role. // // * You want to associate the transcoded files and thumbnails with the Amazon // S3 Standard storage class. @@ -3401,19 +3365,13 @@ type CreatePipelineInput struct { // to a maximum of 30 users and/or predefined Amazon S3 groups. // // * GranteeType: Specify the type of value that appears in the Grantee object: - // - // - // Canonical: The value in the Grantee object is either the canonical user ID - // for an AWS account or an origin access identity for an Amazon CloudFront - // distribution. - // - // A canonical user ID is not the same as an AWS account number. - // - // Email: The value in the Grantee object is the registered email address of - // an AWS account. - // - // Group: The value in the Grantee object is one of the following predefined - // Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Canonical: The value in the Grantee object is either the canonical user + // ID for an AWS account or an origin access identity for an Amazon CloudFront + // distribution. A canonical user ID is not the same as an AWS account number. + // Email: The value in the Grantee object is the registered email address + // of an AWS account. Group: The value in the Grantee object is one of the + // following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or + // LogDelivery. // // * Grantee: The AWS user or group that you want to have access to thumbnail // files. To identify the user or group, you can specify the canonical user @@ -3423,19 +3381,14 @@ type CreatePipelineInput struct { // // * Access: The permission that you want to give to the AWS user that you // specified in Grantee. Permissions are granted on the thumbnail files that - // Elastic Transcoder adds to the bucket. Valid values include: - // - // READ: The grantee can read the thumbnails and metadata for objects that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // READ_ACP: The grantee can read the object ACL for thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for - // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. + // Elastic Transcoder adds to the bucket. Valid values include: READ: The + // grantee can read the thumbnails and metadata for objects that Elastic + // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read + // the object ACL for thumbnails that Elastic Transcoder adds to the Amazon + // S3 bucket. WRITE_ACP: The grantee can write the ACL for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The + // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. // // * StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, // that you want Elastic Transcoder to assign to the thumbnails that it stores @@ -4210,6 +4163,7 @@ type Job struct { // into which Elastic Transcoder puts the transcoded files. PipelineId *string `type:"string"` + // // Outputs in Fragmented MP4 or MPEG-TS format only. // // If you specify a preset in PresetId for which the value of Container is fmp4 @@ -4437,25 +4391,19 @@ type JobInput struct { // // * Embedded: Embedded captions are included in the same file as the audio // and video. Elastic Transcoder supports only one embedded caption per language, - // to a maximum of 300 embedded captions per file. - // - // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), - // CEA-708 (EIA-708, first non-empty channel only), and mov-text - // - // Valid outputs include: mov-text - // - // Elastic Transcoder supports a maximum of one embedded format per output. + // to a maximum of 300 embedded captions per file. Valid input values include: + // CEA-608 (EIA-608, first non-empty channel only), CEA-708 (EIA-708, first + // non-empty channel only), and mov-text Valid outputs include: mov-text + // Elastic Transcoder supports a maximum of one embedded format per output. // // * Sidecar: Sidecar captions are kept in a separate metadata file from // the audio and video data. Sidecar captions require a player that is capable // of understanding the relationship between the video file and the sidecar // file. Elastic Transcoder supports only one sidecar caption per language, - // to a maximum of 20 sidecar captions per file. - // - // Valid input values include: dfxp (first div element only), ebu-tt, scc, smpt, - // srt, ttml (first div element only), and webvtt - // - // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // to a maximum of 20 sidecar captions per file. Valid input values include: + // dfxp (first div element only), ebu-tt, scc, smpt, srt, ttml (first div + // element only), and webvtt Valid outputs include: dfxp (first div element + // only), scc, srt, and webvtt. // // If you want ttml or smpte-tt compatible captions, specify dfxp as your output // format. @@ -4592,6 +4540,7 @@ func (s *JobInput) SetTimeSpan(v *TimeSpan) *JobInput { return s } +// // Outputs recommended instead. // // If you specified one output for a job, information about that output. If @@ -4616,25 +4565,19 @@ type JobOutput struct { // // * Embedded: Embedded captions are included in the same file as the audio // and video. Elastic Transcoder supports only one embedded caption per language, - // to a maximum of 300 embedded captions per file. - // - // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), - // CEA-708 (EIA-708, first non-empty channel only), and mov-text - // - // Valid outputs include: mov-text - // - // Elastic Transcoder supports a maximum of one embedded format per output. + // to a maximum of 300 embedded captions per file. Valid input values include: + // CEA-608 (EIA-608, first non-empty channel only), CEA-708 (EIA-708, first + // non-empty channel only), and mov-text Valid outputs include: mov-text + // Elastic Transcoder supports a maximum of one embedded format per output. // // * Sidecar: Sidecar captions are kept in a separate metadata file from // the audio and video data. Sidecar captions require a player that is capable // of understanding the relationship between the video file and the sidecar // file. Elastic Transcoder supports only one sidecar caption per language, - // to a maximum of 20 sidecar captions per file. - // - // Valid input values include: dfxp (first div element only), ebu-tt, scc, smpt, - // srt, ttml (first div element only), and webvtt - // - // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // to a maximum of 20 sidecar captions per file. Valid input values include: + // dfxp (first div element only), ebu-tt, scc, smpt, srt, ttml (first div + // element only), and webvtt Valid outputs include: dfxp (first div element + // only), scc, srt, and webvtt. // // If you want ttml or smpte-tt compatible captions, specify dfxp as your output // format. @@ -4711,6 +4654,7 @@ type JobOutput struct { // rotation metadata. Rotate *string `type:"string"` + // // (Outputs in Fragmented MP4 or MPEG-TS format only. // // If you specify a preset in PresetId for which the value of Container is fmp4 @@ -4774,11 +4718,9 @@ type JobOutput struct { // {count} in the ThumbnailPattern object. Wherever you specify {count}, // Elastic Transcoder adds a five-digit sequence number (beginning with 00001) // to thumbnail file names. The number indicates where a given thumbnail - // appears in the sequence of thumbnails for a transcoded file. - // - // If you specify a literal value and/or {resolution} but you omit {count}, - // Elastic Transcoder returns a validation error and does not create the - // job. + // appears in the sequence of thumbnails for a transcoded file. If you specify + // a literal value and/or {resolution} but you omit {count}, Elastic Transcoder + // returns a validation error and does not create the job. // // * Literal values (Optional): You can specify literal values anywhere in // the ThumbnailPattern object. For example, you can include them as a file @@ -4800,12 +4742,12 @@ type JobOutput struct { // you specify in Preset for the current output. // // Watermarks are added to the output video in the sequence in which you list - // them in the job output—the first watermark in the list is added to the output - // video first, the second watermark in the list is added next, and so on. As - // a result, if the settings in a preset cause Elastic Transcoder to place all - // watermarks in the same location, the second watermark that you add covers - // the first one, the third one covers the second, and the fourth one covers - // the third. + // them in the job output—the first watermark in the list is added to the + // output video first, the second watermark in the list is added next, and so + // on. As a result, if the settings in a preset cause Elastic Transcoder to + // place all watermarks in the same location, the second watermark that you + // add covers the first one, the third one covers the second, and the fourth + // one covers the third. Watermarks []*JobWatermark `type:"list"` // Specifies the width of the output file in pixels. @@ -5445,9 +5387,8 @@ type Permission struct { // The type of value that appears in the Grantee object: // // * Canonical: Either the canonical user ID for an AWS account or an origin - // access identity for an Amazon CloudFront distribution. - // - // A canonical user ID is not the same as an AWS account number. + // access identity for an Amazon CloudFront distribution. A canonical user + // ID is not the same as an AWS account number. // // * Email: The registered email address of an AWS account. // @@ -5523,36 +5464,22 @@ type Pipeline struct { // // * Permissions: A list of the users and/or predefined Amazon S3 groups // you want to have access to transcoded files and playlists, and the type - // of access that you want them to have. - // - // GranteeType: The type of value that appears in the Grantee object: - // - // Canonical: Either the canonical user ID for an AWS account or an origin access - // identity for an Amazon CloudFront distribution. - // - // Email: The registered email address of an AWS account. - // - // Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, - // or LogDelivery. - // - // Grantee: The AWS user or group that you want to have access to transcoded - // files and playlists. - // - // Access: The permission that you want to give to the AWS user that is listed - // in Grantee. Valid values include: - // - // READ: The grantee can read the objects and metadata for objects that Elastic + // of access that you want them to have. GranteeType: The type of value that + // appears in the Grantee object: Canonical: Either the canonical user ID + // for an AWS account or an origin access identity for an Amazon CloudFront + // distribution. Email: The registered email address of an AWS account. Group: + // One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, + // or LogDelivery. Grantee: The AWS user or group that you want to have access + // to transcoded files and playlists. Access: The permission that you want + // to give to the AWS user that is listed in Grantee. Valid values include: + // READ: The grantee can read the objects and metadata for objects that Elastic + // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read + // the object ACL for objects that Elastic Transcoder adds to the Amazon + // S3 bucket. WRITE_ACP: The grantee can write the ACL for the objects that + // Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The grantee + // has READ, READ_ACP, and WRITE_ACP permissions for the objects that Elastic // Transcoder adds to the Amazon S3 bucket. // - // READ_ACP: The grantee can read the object ACL for objects that Elastic Transcoder - // adds to the Amazon S3 bucket. - // - // WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder - // adds to the Amazon S3 bucket. - // - // FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for - // the objects that Elastic Transcoder adds to the Amazon S3 bucket. - // // * StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, // that you want Elastic Transcoder to assign to the video files and playlists // that it stores in your Amazon S3 bucket. @@ -5618,37 +5545,22 @@ type Pipeline struct { // // * Permissions: A list of the users and/or predefined Amazon S3 groups // you want to have access to thumbnail files, and the type of access that - // you want them to have. - // - // GranteeType: The type of value that appears in the Grantee object: - // - // Canonical: Either the canonical user ID for an AWS account or an origin access - // identity for an Amazon CloudFront distribution. - // - // A canonical user ID is not the same as an AWS account number. - // - // Email: The registered email address of an AWS account. - // - // Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, - // or LogDelivery. - // - // Grantee: The AWS user or group that you want to have access to thumbnail - // files. - // - // Access: The permission that you want to give to the AWS user that is listed - // in Grantee. Valid values include: - // - // READ: The grantee can read the thumbnails and metadata for thumbnails that - // Elastic Transcoder adds to the Amazon S3 bucket. - // - // READ_ACP: The grantee can read the object ACL for thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for - // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. + // you want them to have. GranteeType: The type of value that appears in + // the Grantee object: Canonical: Either the canonical user ID for an AWS + // account or an origin access identity for an Amazon CloudFront distribution. + // A canonical user ID is not the same as an AWS account number. Email: The + // registered email address of an AWS account. Group: One of the following + // predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Grantee: The AWS user or group that you want to have access to thumbnail + // files. Access: The permission that you want to give to the AWS user that + // is listed in Grantee. Valid values include: READ: The grantee can read + // the thumbnails and metadata for thumbnails that Elastic Transcoder adds + // to the Amazon S3 bucket. READ_ACP: The grantee can read the object ACL + // for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // The grantee can write the ACL for the thumbnails that Elastic Transcoder + // adds to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, + // and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds + // to the Amazon S3 bucket. // // * StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, // that you want Elastic Transcoder to assign to the thumbnails that it stores @@ -5970,28 +5882,19 @@ type Playlist struct { // // * If your output is not HLS or does not have a segment duration set, the // name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key: - // - // OutputKeyPrefixOutputs:Key + // OutputKeyPrefixOutputs:Key // // * If your output is HLSv3 and has a segment duration set, or is not included // in a playlist, Elastic Transcoder creates an output playlist file with // a file extension of .m3u8, and a series of .ts files that include a five-digit - // sequential counter beginning with 00000: - // - // OutputKeyPrefixOutputs:Key.m3u8 - // - // OutputKeyPrefixOutputs:Key00000.ts + // sequential counter beginning with 00000: OutputKeyPrefixOutputs:Key.m3u8 + // OutputKeyPrefixOutputs:Key00000.ts // // * If your output is HLSv4, has a segment duration set, and is included // in an HLSv4 playlist, Elastic Transcoder creates an output playlist file // with a file extension of _v4.m3u8. If the output is video, Elastic Transcoder - // also creates an output file with an extension of _iframe.m3u8: - // - // OutputKeyPrefixOutputs:Key_v4.m3u8 - // - // OutputKeyPrefixOutputs:Key_iframe.m3u8 - // - // OutputKeyPrefixOutputs:Key.ts + // also creates an output file with an extension of _iframe.m3u8: OutputKeyPrefixOutputs:Key_v4.m3u8 + // OutputKeyPrefixOutputs:Key_iframe.m3u8 OutputKeyPrefixOutputs:Key.ts // // Elastic Transcoder automatically appends the relevant file extension to the // file name. If you include a file extension in Output Key, the file name will @@ -6250,8 +6153,7 @@ type PresetWatermark struct { // * integer percentage (%): The range of valid values is 0 to 100. Use the // value of Target to specify whether you want Elastic Transcoder to include // the black bars that are added by Elastic Transcoder, if any, in the calculation. - // - // If you specify the value in pixels, it must be less than or equal to the + // If you specify the value in pixels, it must be less than or equal to the // value of MaxWidth. MaxWidth *string `type:"string"` @@ -6761,6 +6663,7 @@ func (s *TestRoleOutput) SetSuccess(v string) *TestRoleOutput { type Thumbnails struct { _ struct{} `type:"structure"` + // // To better control resolution and aspect ratio of thumbnails, we recommend // that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy // instead of Resolution and AspectRatio. The two groups of settings are mutually @@ -6800,6 +6703,7 @@ type Thumbnails struct { // MaxWidth and MaxHeight settings. PaddingPolicy *string `type:"string"` + // // To better control resolution and aspect ratio of thumbnails, we recommend // that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy // instead of Resolution and AspectRatio. The two groups of settings are mutually @@ -7019,23 +6923,18 @@ type UpdatePipelineInput struct { // and/or predefined Amazon S3 groups. // // * Grantee Type: Specify the type of value that appears in the Grantee - // object: - // - // Canonical: The value in the Grantee object is either the canonical user ID - // for an AWS account or an origin access identity for an Amazon CloudFront - // distribution. For more information about canonical user IDs, see Access - // Control List (ACL) Overview in the Amazon Simple Storage Service Developer - // Guide. For more information about using CloudFront origin access identities - // to require that users use CloudFront URLs instead of Amazon S3 URLs, see - // Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content. - // - // A canonical user ID is not the same as an AWS account number. - // - // Email: The value in the Grantee object is the registered email address of - // an AWS account. - // - // Group: The value in the Grantee object is one of the following predefined - // Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // object: Canonical: The value in the Grantee object is either the canonical + // user ID for an AWS account or an origin access identity for an Amazon + // CloudFront distribution. For more information about canonical user IDs, + // see Access Control List (ACL) Overview in the Amazon Simple Storage Service + // Developer Guide. For more information about using CloudFront origin access + // identities to require that users use CloudFront URLs instead of Amazon + // S3 URLs, see Using an Origin Access Identity to Restrict Access to Your + // Amazon S3 Content. A canonical user ID is not the same as an AWS account + // number. Email: The value in the Grantee object is the registered email + // address of an AWS account. Group: The value in the Grantee object is one + // of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, + // or LogDelivery. // // * Grantee: The AWS user or group that you want to have access to transcoded // files and playlists. To identify the user or group, you can specify the @@ -7046,19 +6945,13 @@ type UpdatePipelineInput struct { // * Access: The permission that you want to give to the AWS user that you // specified in Grantee. Permissions are granted on the files that Elastic // Transcoder adds to the bucket, including playlists and video files. Valid - // values include: - // - // READ: The grantee can read the objects and metadata for objects that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // READ_ACP: The grantee can read the object ACL for objects that Elastic Transcoder - // adds to the Amazon S3 bucket. - // - // WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder - // adds to the Amazon S3 bucket. - // - // FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for - // the objects that Elastic Transcoder adds to the Amazon S3 bucket. + // values include: READ: The grantee can read the objects and metadata for + // objects that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: + // The grantee can read the object ACL for objects that Elastic Transcoder + // adds to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL + // for the objects that Elastic Transcoder adds to the Amazon S3 bucket. + // FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions + // for the objects that Elastic Transcoder adds to the Amazon S3 bucket. // // * StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, // that you want Elastic Transcoder to assign to the video files and playlists @@ -7128,18 +7021,13 @@ type UpdatePipelineInput struct { // to a maximum of 30 users and/or predefined Amazon S3 groups. // // * GranteeType: Specify the type of value that appears in the Grantee object: - // - // Canonical: The value in the Grantee object is either the canonical user ID - // for an AWS account or an origin access identity for an Amazon CloudFront - // distribution. - // - // A canonical user ID is not the same as an AWS account number. - // - // Email: The value in the Grantee object is the registered email address of - // an AWS account. - // - // Group: The value in the Grantee object is one of the following predefined - // Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Canonical: The value in the Grantee object is either the canonical user + // ID for an AWS account or an origin access identity for an Amazon CloudFront + // distribution. A canonical user ID is not the same as an AWS account number. + // Email: The value in the Grantee object is the registered email address + // of an AWS account. Group: The value in the Grantee object is one of the + // following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or + // LogDelivery. // // * Grantee: The AWS user or group that you want to have access to thumbnail // files. To identify the user or group, you can specify the canonical user @@ -7149,19 +7037,14 @@ type UpdatePipelineInput struct { // // * Access: The permission that you want to give to the AWS user that you // specified in Grantee. Permissions are granted on the thumbnail files that - // Elastic Transcoder adds to the bucket. Valid values include: - // - // READ: The grantee can read the thumbnails and metadata for objects that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // READ_ACP: The grantee can read the object ACL for thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for - // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. + // Elastic Transcoder adds to the bucket. Valid values include: READ: The + // grantee can read the thumbnails and metadata for objects that Elastic + // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read + // the object ACL for thumbnails that Elastic Transcoder adds to the Amazon + // S3 bucket. WRITE_ACP: The grantee can write the ACL for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The + // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. // // * StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, // that you want Elastic Transcoder to assign to the thumbnails that it stores @@ -7487,6 +7370,7 @@ func (s *UpdatePipelineStatusOutput) SetPipeline(v *Pipeline) *UpdatePipelineSta type VideoParameters struct { _ struct{} `type:"structure"` + // // To better control resolution and aspect ratio of output videos, we recommend // that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, // and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups @@ -7658,8 +7542,8 @@ type VideoParameters struct { // // * HD to Standard, 1920x1080 to 720x576 - Elastic Transcoder applies Bt709ToBt601 // - // Elastic Transcoder may change the behavior of the ColorspaceConversionModeAuto - // mode in the future. All outputs in a playlist must use the same ColorSpaceConversionMode. + // Elastic Transcoder may change the behavior of the ColorspaceConversionMode + // Auto mode in the future. All outputs in a playlist must use the same ColorSpaceConversionMode. // // If you do not specify a ColorSpaceConversionMode, Elastic Transcoder does // not change the color space of a file. If you are unsure what ColorSpaceConversionMode @@ -7796,6 +7680,7 @@ type VideoParameters struct { // for MaxWidth and MaxHeight. PaddingPolicy *string `type:"string"` + // // To better control resolution and aspect ratio of output videos, we recommend // that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, // and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups @@ -7807,7 +7692,7 @@ type VideoParameters struct { // * auto: Elastic Transcoder attempts to preserve the width and height of // the input file, subject to the following rules. // - // * width x height: The width and height of the output video in pixels. + // * width x height : The width and height of the output video in pixels. // // Note the following about specifying the width and height: // @@ -7823,33 +7708,9 @@ type VideoParameters struct { // // * We recommend that you specify a resolution for which the product of // width and height is less than or equal to the applicable value in the - // following list (List - Max width x height value): - // - // 1 - 25344 - // - // 1b - 25344 - // - // 1.1 - 101376 - // - // 1.2 - 101376 - // - // 1.3 - 101376 - // - // 2 - 101376 - // - // 2.1 - 202752 - // - // 2.2 - 404720 - // - // 3 - 404720 - // - // 3.1 - 921600 - // - // 3.2 - 1310720 - // - // 4 - 2097152 - // - // 4.1 - 2097152 + // following list (List - Max width x height value): 1 - 25344 1b - 25344 + // 1.1 - 101376 1.2 - 101376 1.3 - 101376 2 - 101376 2.1 - 202752 2.2 - 404720 + // 3 - 404720 3.1 - 921600 3.2 - 1310720 4 - 2097152 4.1 - 2097152 Resolution *string `type:"string"` // Specify one of the following values to control scaling of the output video: diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go index 30acb8d1bc0..9a33ddbf610 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go @@ -46,11 +46,11 @@ const ( // svc := elastictranscoder.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticTranscoder { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElasticTranscoder { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElasticTranscoder { svc := &ElasticTranscoder{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-09-25", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elb/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elb/api.go index c1028cae5dc..c2e93fa72d3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elb/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elb/api.go @@ -1775,7 +1775,7 @@ func (c *ELB) DescribeLoadBalancersWithContext(ctx aws.Context, input *DescribeL // // Example iterating over at most 3 pages of a DescribeLoadBalancers operation. // pageNum := 0 // err := client.DescribeLoadBalancersPages(params, -// func(page *DescribeLoadBalancersOutput, lastPage bool) bool { +// func(page *elb.DescribeLoadBalancersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1807,10 +1807,12 @@ func (c *ELB) DescribeLoadBalancersPagesWithContext(ctx aws.Context, input *Desc }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeLoadBalancersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeLoadBalancersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elb/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elb/service.go index 5dfdd322c9b..73e40b747c9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elb/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elb/service.go @@ -46,11 +46,11 @@ const ( // svc := elb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELB { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ELB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ELB { svc := &ELB{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-06-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go index 13503617465..72ed9cd050b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go @@ -57,14 +57,18 @@ func (c *ELBV2) AddListenerCertificatesRequest(input *AddListenerCertificatesInp // AddListenerCertificates API operation for Elastic Load Balancing. // -// Adds the specified certificate to the specified HTTPS listener. +// Adds the specified SSL server certificate to the certificate list for the +// specified HTTPS or TLS listener. // -// If the certificate was already added, the call is successful but the certificate -// is not added again. +// If the certificate in already in the certificate list, the call is successful +// but the certificate is not added again. // -// To list the certificates for your listener, use DescribeListenerCertificates. -// To remove certificates from your listener, use RemoveListenerCertificates. -// To specify the default SSL server certificate, use ModifyListener. +// To get the certificate list for a listener, use DescribeListenerCertificates. +// To remove certificates from the certificate list for a listener, use RemoveListenerCertificates. +// To replace the default certificate for a listener, use ModifyListener. +// +// For more information, see SSL Certificates (https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#https-listener-certificates) +// in the Application Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1259,7 +1263,15 @@ func (c *ELBV2) DescribeListenerCertificatesRequest(input *DescribeListenerCerti // DescribeListenerCertificates API operation for Elastic Load Balancing. // -// Describes the certificates for the specified HTTPS listener. +// Describes the default certificate and the certificate list for the specified +// HTTPS or TLS listener. +// +// If the default certificate is also in the certificate list, it appears twice +// in the results (once with IsDefault set to true and once with IsDefault set +// to false). +// +// For more information, see SSL Certificates (https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#https-listener-certificates) +// in the Application Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1348,6 +1360,10 @@ func (c *ELBV2) DescribeListenersRequest(input *DescribeListenersInput) (req *re // Load Balancer or Network Load Balancer. You must specify either a load balancer // or one or more listeners. // +// For an HTTPS or TLS listener, the output includes the default certificate +// for the listener. To describe the certificate list for the listener, use +// DescribeListenerCertificates. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1398,7 +1414,7 @@ func (c *ELBV2) DescribeListenersWithContext(ctx aws.Context, input *DescribeLis // // Example iterating over at most 3 pages of a DescribeListeners operation. // pageNum := 0 // err := client.DescribeListenersPages(params, -// func(page *DescribeListenersOutput, lastPage bool) bool { +// func(page *elbv2.DescribeListenersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1430,10 +1446,12 @@ func (c *ELBV2) DescribeListenersPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeListenersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeListenersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1620,7 +1638,7 @@ func (c *ELBV2) DescribeLoadBalancersWithContext(ctx aws.Context, input *Describ // // Example iterating over at most 3 pages of a DescribeLoadBalancers operation. // pageNum := 0 // err := client.DescribeLoadBalancersPages(params, -// func(page *DescribeLoadBalancersOutput, lastPage bool) bool { +// func(page *elbv2.DescribeLoadBalancersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1652,10 +1670,12 @@ func (c *ELBV2) DescribeLoadBalancersPagesWithContext(ctx aws.Context, input *De }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeLoadBalancersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeLoadBalancersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2105,7 +2125,7 @@ func (c *ELBV2) DescribeTargetGroupsWithContext(ctx aws.Context, input *Describe // // Example iterating over at most 3 pages of a DescribeTargetGroups operation. // pageNum := 0 // err := client.DescribeTargetGroupsPages(params, -// func(page *DescribeTargetGroupsOutput, lastPage bool) bool { +// func(page *elbv2.DescribeTargetGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2137,10 +2157,12 @@ func (c *ELBV2) DescribeTargetGroupsPagesWithContext(ctx aws.Context, input *Des }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTargetGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTargetGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2279,9 +2301,9 @@ func (c *ELBV2) ModifyListenerRequest(input *ModifyListenerInput) (req *request. // // Any properties that you do not specify retain their current values. However, // changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the -// security policy and server certificate properties. If you change the protocol +// security policy and default certificate properties. If you change the protocol // from HTTP to HTTPS, or from TCP to TLS, you must add the security policy -// and server certificate properties. +// and default certificate properties. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2875,7 +2897,8 @@ func (c *ELBV2) RemoveListenerCertificatesRequest(input *RemoveListenerCertifica // RemoveListenerCertificates API operation for Elastic Load Balancing. // -// Removes the specified certificate from the specified HTTPS listener. +// Removes the specified certificate from the certificate list for the specified +// HTTPS or TLS listener. // // You can't remove the default certificate for a listener. To replace the default // certificate, call ModifyListener. @@ -3059,8 +3082,6 @@ func (c *ELBV2) SetIpAddressTypeRequest(input *SetIpAddressTypeInput) (req *requ // Sets the type of IP addresses used by the subnets of the specified Application // Load Balancer or Network Load Balancer. // -// Network Load Balancers must use ipv4. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3394,7 +3415,8 @@ type Action struct { // The order for the action. This value is required for rules with multiple // actions. The action with the lowest value for order is performed first. The - // final action to be performed must be a forward or a fixed-response action. + // last action to be performed must be one of the following types of actions: + // a forward, fixed-response, or redirect. Order *int64 `min:"1" type:"integer"` // [Application Load Balancer] Information for creating a redirect action. Specify @@ -3405,8 +3427,7 @@ type Action struct { // is forward. TargetGroupArn *string `type:"string"` - // The type of action. Each rule must include exactly one of the following types - // of actions: forward, fixed-response, or redirect. + // The type of action. // // Type is a required field Type *string `type:"string" required:"true" enum:"ActionTypeEnum"` @@ -3556,7 +3577,7 @@ func (s *AddListenerCertificatesInput) SetListenerArn(v string) *AddListenerCert type AddListenerCertificatesOutput struct { _ struct{} `type:"structure"` - // Information about the certificates. + // Information about the certificates in the certificate list. Certificates []*Certificate `type:"list"` } @@ -3671,7 +3692,8 @@ type AuthenticateCognitoActionConfig struct { // // * allow - Allow the request to be forwarded to the target. // - // authenticate + // * authenticate - Redirect the request to the IdP authorization endpoint. + // This is the default value. OnUnauthenticatedRequest *string `type:"string" enum:"AuthenticateCognitoActionConditionalBehaviorEnum"` // The set of user claims to be requested from the IdP. The default is openid. @@ -3820,7 +3842,8 @@ type AuthenticateOidcActionConfig struct { // // * allow - Allow the request to be forwarded to the target. // - // authenticate + // * authenticate - Redirect the request to the IdP authorization endpoint. + // This is the default value. OnUnauthenticatedRequest *string `type:"string" enum:"AuthenticateOidcActionConditionalBehaviorEnum"` // The set of user claims to be requested from the IdP. The default is openid. @@ -3965,10 +3988,12 @@ func (s *AuthenticateOidcActionConfig) SetUserInfoEndpoint(v string) *Authentica type AvailabilityZone struct { _ struct{} `type:"structure"` - // [Network Load Balancers] The static IP address. + // [Network Load Balancers] If you need static IP addresses for your load balancer, + // you can specify one Elastic IP address per Availability Zone when you create + // the load balancer. LoadBalancerAddresses []*LoadBalancerAddress `type:"list"` - // The ID of the subnet. + // The ID of the subnet. You can specify one subnet per Availability Zone. SubnetId *string `type:"string"` // The name of the Availability Zone. @@ -4011,7 +4036,9 @@ type Certificate struct { CertificateArn *string `type:"string"` // Indicates whether the certificate is the default certificate. Do not set - // IsDefault when specifying a certificate as an input parameter. + // this value when specifying a certificate as an input. This value is not included + // in the output when describing a listener, but is included when describing + // listener certificates. IsDefault *bool `type:"boolean"` } @@ -4073,11 +4100,11 @@ func (s *Cipher) SetPriority(v int64) *Cipher { type CreateListenerInput struct { _ struct{} `type:"structure"` - // [HTTPS and TLS listeners] The default SSL server certificate. You must provide - // exactly one certificate. Set CertificateArn to the certificate ARN but do - // not set IsDefault. + // [HTTPS and TLS listeners] The default certificate for the listener. You must + // provide exactly one certificate. Set CertificateArn to the certificate ARN + // but do not set IsDefault. // - // To create a certificate list, use AddListenerCertificates. + // To create a certificate list for the listener, use AddListenerCertificates. Certificates []*Certificate `type:"list"` // The actions for the default rule. The rule must include one forward action @@ -4085,7 +4112,8 @@ type CreateListenerInput struct { // // If the action type is forward, you specify a target group. The protocol of // the target group must be HTTP or HTTPS for an Application Load Balancer. - // The protocol of the target group must be TCP or TLS for a Network Load Balancer. + // The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a + // Network Load Balancer. // // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate // users through an identity provider that is OpenID Connect (OIDC) compliant. @@ -4114,7 +4142,7 @@ type CreateListenerInput struct { // The protocol for connections from clients to the load balancer. For Application // Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load - // Balancers, the supported protocols are TCP and TLS. + // Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. // // Protocol is a required field Protocol *string `type:"string" required:"true" enum:"ProtocolEnum"` @@ -4254,8 +4282,8 @@ type CreateLoadBalancerInput struct { // // The nodes of an internal load balancer have only private IP addresses. The // DNS name of an internal load balancer is publicly resolvable to the private - // IP addresses of the nodes. Therefore, internal load balancers can only route - // requests from clients with access to the VPC for the load balancer. + // IP addresses of the nodes. Therefore, internal load balancers can route requests + // only from clients with access to the VPC for the load balancer. // // The default is an Internet-facing load balancer. Scheme *string `type:"string" enum:"LoadBalancerSchemeEnum"` @@ -4271,7 +4299,8 @@ type CreateLoadBalancerInput struct { // Zones. You cannot specify Elastic IP addresses for your subnets. // // [Network Load Balancers] You can specify subnets from one or more Availability - // Zones. You can specify one Elastic IP address per subnet. + // Zones. You can specify one Elastic IP address per subnet if you need static + // IP addresses for your load balancer. SubnetMappings []*SubnetMapping `type:"list"` // The IDs of the public subnets. You can specify only one subnet per Availability @@ -4402,11 +4431,13 @@ type CreateRuleInput struct { _ struct{} `type:"structure"` // The actions. Each rule must include exactly one of the following types of - // actions: forward, fixed-response, or redirect. + // actions: forward, fixed-response, or redirect, and it must be the last action + // to be performed. // // If the action type is forward, you specify a target group. The protocol of // the target group must be HTTP or HTTPS for an Application Load Balancer. - // The protocol of the target group must be TCP or TLS for a Network Load Balancer. + // The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a + // Network Load Balancer. // // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate // users through an identity provider that is OpenID Connect (OIDC) compliant. @@ -4423,35 +4454,9 @@ type CreateRuleInput struct { // Actions is a required field Actions []*Action `type:"list" required:"true"` - // The conditions. Each condition specifies a field name and a single value. - // - // If the field name is host-header, you can specify a single host name (for - // example, my.example.com). A host name is case insensitive, can be up to 128 - // characters in length, and can contain any of the following characters. You - // can include up to three wildcard characters. - // - // * A-Z, a-z, 0-9 - // - // * - . - // - // * * (matches 0 or more characters) - // - // * ? (matches exactly 1 character) - // - // If the field name is path-pattern, you can specify a single path pattern. - // A path pattern is case-sensitive, can be up to 128 characters in length, - // and can contain any of the following characters. You can include up to three - // wildcard characters. - // - // * A-Z, a-z, 0-9 - // - // * _ - . $ / ~ " ' @ : + - // - // * & (using &) - // - // * * (matches 0 or more characters) - // - // * ? (matches exactly 1 character) + // The conditions. Each rule can include zero or one of the following conditions: + // http-request-method, host-header, path-pattern, and source-ip, and zero or + // more of the following conditions: http-header and query-string. // // Conditions is a required field Conditions []*RuleCondition `type:"list" required:"true"` @@ -4562,14 +4567,14 @@ func (s *CreateRuleOutput) SetRules(v []*Rule) *CreateRuleOutput { type CreateTargetGroupInput struct { _ struct{} `type:"structure"` - // Indicates whether health checks are enabled. If the target type is instance - // or ip, the default is true. If the target type is lambda, the default is - // false. + // Indicates whether health checks are enabled. If the target type is lambda, + // health checks are disabled by default but can be enabled. If the target type + // is instance or ip, health checks are always enabled and cannot be disabled. HealthCheckEnabled *bool `type:"boolean"` // The approximate amount of time, in seconds, between health checks of an individual - // target. For Application Load Balancers, the range is 5–300 seconds. For Network - // Load Balancers, the supported values are 10 or 30 seconds. If the target + // target. For HTTP and HTTPS health checks, the range is 5–300 seconds. For + // TCP health checks, the supported values are 10 and 30 seconds. If the target // type is instance or ip, the default is 30 seconds. If the target type is // lambda, the default is 35 seconds. HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` @@ -4586,21 +4591,22 @@ type CreateTargetGroupInput struct { // The protocol the load balancer uses when performing health checks on targets. // For Application Load Balancers, the default is HTTP. For Network Load Balancers, // the default is TCP. The TCP protocol is supported for health checks only - // if the protocol of the target group is TCP or TLS. The TLS protocol is not - // supported for health checks. + // if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The TLS, + // UDP, and TCP_UDP protocols are not supported for health checks. HealthCheckProtocol *string `type:"string" enum:"ProtocolEnum"` // The amount of time, in seconds, during which no response from a target means - // a failed health check. For Application Load Balancers, the range is 2–120 - // seconds and the default is 5 seconds if the target type is instance or ip - // and 30 seconds if the target type is lambda. For Network Load Balancers, - // this is 10 seconds for TCP and HTTPS health checks and 6 seconds for HTTP - // health checks. + // a failed health check. For target groups with a protocol of HTTP or HTTPS, + // the default is 5 seconds. For target groups with a protocol of TCP or TLS, + // this value must be 6 seconds for HTTP health checks and 10 seconds for TCP + // and HTTPS health checks. If the target type is lambda, the default is 30 + // seconds. HealthCheckTimeoutSeconds *int64 `min:"2" type:"integer"` // The number of consecutive health checks successes required before considering - // an unhealthy target healthy. For Application Load Balancers, the default - // is 5. For Network Load Balancers, the default is 3. + // an unhealthy target healthy. For target groups with a protocol of HTTP or + // HTTPS, the default is 5. For target groups with a protocol of TCP or TLS, + // the default is 3. If the target type is lambda, the default is 5. HealthyThresholdCount *int64 `min:"2" type:"integer"` // [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful @@ -4623,8 +4629,9 @@ type CreateTargetGroupInput struct { // The protocol to use for routing traffic to the targets. For Application Load // Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, - // the supported protocols are TCP and TLS. If the target is a Lambda function, - // this parameter does not apply. + // the supported protocols are TCP, TLS, UDP, or TCP_UDP. A TCP_UDP listener + // must be associated with a TCP_UDP target group. If the target is a Lambda + // function, this parameter does not apply. Protocol *string `type:"string" enum:"ProtocolEnum"` // The type of target that you must specify when registering targets with this @@ -4632,7 +4639,8 @@ type CreateTargetGroupInput struct { // one target type. // // * instance - Targets are specified by instance ID. This is the default - // value. + // value. If the target group protocol is UDP or TCP_UDP, the target type + // must be instance. // // * ip - Targets are specified by IP address. You can specify IP addresses // from the subnets of the virtual private cloud (VPC) for the target group, @@ -4644,13 +4652,14 @@ type CreateTargetGroupInput struct { TargetType *string `type:"string" enum:"TargetTypeEnum"` // The number of consecutive health check failures required before considering - // a target unhealthy. For Application Load Balancers, the default is 2. For - // Network Load Balancers, this value must be the same as the healthy threshold - // count. + // a target unhealthy. For target groups with a protocol of HTTP or HTTPS, the + // default is 2. For target groups with a protocol of TCP or TLS, this value + // must be the same as the healthy threshold count. If the target type is lambda, + // the default is 2. UnhealthyThresholdCount *int64 `min:"2" type:"integer"` // The identifier of the virtual private cloud (VPC). If the target is a Lambda - // function, this parameter does not apply. + // function, this parameter does not apply. Otherwise, this parameter is required. VpcId *string `type:"string"` } @@ -5144,8 +5153,8 @@ type DescribeAccountLimitsOutput struct { // Information about the limits. Limits []*Limit `type:"list"` - // The marker to use when requesting the next set of results. If there are no - // additional results, the string is empty. + // If there are additional results, this is the marker for the next set of results. + // Otherwise, this is null. NextMarker *string `type:"string"` } @@ -5237,8 +5246,8 @@ type DescribeListenerCertificatesOutput struct { // Information about the certificates. Certificates []*Certificate `type:"list"` - // The marker to use when requesting the next set of results. If there are no - // additional results, the string is empty. + // If there are additional results, this is the marker for the next set of results. + // Otherwise, this is null. NextMarker *string `type:"string"` } @@ -5334,8 +5343,8 @@ type DescribeListenersOutput struct { // Information about the listeners. Listeners []*Listener `type:"list"` - // The marker to use when requesting the next set of results. If there are no - // additional results, the string is empty. + // If there are additional results, this is the marker for the next set of results. + // Otherwise, this is null. NextMarker *string `type:"string"` } @@ -5493,8 +5502,8 @@ type DescribeLoadBalancersOutput struct { // Information about the load balancers. LoadBalancers []*LoadBalancer `type:"list"` - // The marker to use when requesting the next set of results. If there are no - // additional results, the string is empty. + // If there are additional results, this is the marker for the next set of results. + // Otherwise, this is null. NextMarker *string `type:"string"` } @@ -5587,8 +5596,8 @@ func (s *DescribeRulesInput) SetRuleArns(v []*string) *DescribeRulesInput { type DescribeRulesOutput struct { _ struct{} `type:"structure"` - // The marker to use when requesting the next set of results. If there are no - // additional results, the string is empty. + // If there are additional results, this is the marker for the next set of results. + // Otherwise, this is null. NextMarker *string `type:"string"` // Information about the rules. @@ -5675,8 +5684,8 @@ func (s *DescribeSSLPoliciesInput) SetPageSize(v int64) *DescribeSSLPoliciesInpu type DescribeSSLPoliciesOutput struct { _ struct{} `type:"structure"` - // The marker to use when requesting the next set of results. If there are no - // additional results, the string is empty. + // If there are additional results, this is the marker for the next set of results. + // Otherwise, this is null. NextMarker *string `type:"string"` // Information about the policies. @@ -5903,8 +5912,8 @@ func (s *DescribeTargetGroupsInput) SetTargetGroupArns(v []*string) *DescribeTar type DescribeTargetGroupsOutput struct { _ struct{} `type:"structure"` - // The marker to use when requesting the next set of results. If there are no - // additional results, the string is empty. + // If there are additional results, this is the marker for the next set of results. + // Otherwise, this is null. NextMarker *string `type:"string"` // Information about the target groups. @@ -6073,9 +6082,16 @@ func (s *FixedResponseActionConfig) SetStatusCode(v string) *FixedResponseAction return s } +// Information about a host header condition. type HostHeaderConditionConfig struct { _ struct{} `type:"structure"` + // One or more host names. The maximum size of each name is 128 characters. + // The comparison is case insensitive. The following wildcard characters are + // supported: * (matches 0 or more characters) and ? (matches exactly 1 character). + // + // If you specify multiple strings, the condition is satisfied if one of the + // strings matches the host name. Values []*string `type:"list"` } @@ -6095,11 +6111,32 @@ func (s *HostHeaderConditionConfig) SetValues(v []*string) *HostHeaderConditionC return s } +// Information about an HTTP header condition. +// +// There is a set of standard HTTP header fields. You can also define custom +// HTTP header fields. type HttpHeaderConditionConfig struct { _ struct{} `type:"structure"` + // The name of the HTTP header field. The maximum size is 40 characters. The + // header name is case insensitive. The allowed characters are specified by + // RFC 7230. Wildcards are not supported. + // + // You can't use an HTTP header condition to specify the host header. Use HostHeaderConditionConfig + // to specify a host header condition. HttpHeaderName *string `type:"string"` + // One or more strings to compare against the value of the HTTP header. The + // maximum size of each string is 128 characters. The comparison strings are + // case insensitive. The following wildcard characters are supported: * (matches + // 0 or more characters) and ? (matches exactly 1 character). + // + // If the same header appears multiple times in the request, we search them + // in order until a match is found. + // + // If you specify multiple strings, the condition is satisfied if one of the + // strings matches the value of the HTTP header. To require that all of the + // strings are a match, create one condition per string. Values []*string `type:"list"` } @@ -6125,9 +6162,23 @@ func (s *HttpHeaderConditionConfig) SetValues(v []*string) *HttpHeaderConditionC return s } +// Information about an HTTP method condition. +// +// HTTP defines a set of request methods, also referred to as HTTP verbs. For +// more information, see the HTTP Method Registry (https://www.iana.org/assignments/http-methods/http-methods.xhtml). +// You can also define custom HTTP methods. type HttpRequestMethodConditionConfig struct { _ struct{} `type:"structure"` + // The name of the request method. The maximum size is 40 characters. The allowed + // characters are A-Z, hyphen (-), and underscore (_). The comparison is case + // sensitive. Wildcards are not supported; therefore, the method name must be + // an exact match. + // + // If you specify multiple strings, the condition is satisfied if one of the + // strings matches the HTTP request method. We recommend that you route GET + // and HEAD requests in the same way, because the response to a HEAD request + // may be cached. Values []*string `type:"list"` } @@ -6202,8 +6253,7 @@ func (s *Limit) SetName(v string) *Limit { type Listener struct { _ struct{} `type:"structure"` - // The SSL server certificate. You must provide a certificate if the protocol - // is HTTPS or TLS. + // [HTTPS or TLS listener] The default certificate for the listener. Certificates []*Certificate `type:"list"` // The default actions for the listener. @@ -6221,8 +6271,8 @@ type Listener struct { // The protocol for connections from clients to the load balancer. Protocol *string `type:"string" enum:"ProtocolEnum"` - // The security policy that defines which ciphers and protocols are supported. - // The default is the current predefined security policy. + // [HTTPS or TLS listener] The security policy that defines which ciphers and + // protocols are supported. The default is the current predefined security policy. SslPolicy *string `type:"string"` } @@ -6312,8 +6362,8 @@ type LoadBalancer struct { // // The nodes of an internal load balancer have only private IP addresses. The // DNS name of an internal load balancer is publicly resolvable to the private - // IP addresses of the nodes. Therefore, internal load balancers can only route - // requests from clients with access to the VPC for the load balancer. + // IP addresses of the nodes. Therefore, internal load balancers can route requests + // only from clients with access to the VPC for the load balancer. Scheme *string `type:"string" enum:"LoadBalancerSchemeEnum"` // The IDs of the security groups for the load balancer. @@ -6472,6 +6522,10 @@ type LoadBalancerAttribute struct { // * idle_timeout.timeout_seconds - The idle timeout value, in seconds. The // valid range is 1-4000 seconds. The default is 60 seconds. // + // * routing.http.drop_invalid_header_fields.enabled - Indicates whether + // HTTP headers with invalid header fields are removed by the load balancer + // (true) or routed to targets (false). The default is true. + // // * routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value // is true or false. The default is true. // @@ -6590,9 +6644,9 @@ func (s *Matcher) SetHttpCode(v string) *Matcher { type ModifyListenerInput struct { _ struct{} `type:"structure"` - // [HTTPS and TLS listeners] The default SSL server certificate. You must provide - // exactly one certificate. Set CertificateArn to the certificate ARN but do - // not set IsDefault. + // [HTTPS and TLS listeners] The default certificate for the listener. You must + // provide exactly one certificate. Set CertificateArn to the certificate ARN + // but do not set IsDefault. // // To create a certificate list, use AddListenerCertificates. Certificates []*Certificate `type:"list"` @@ -6602,7 +6656,8 @@ type ModifyListenerInput struct { // // If the action type is forward, you specify a target group. The protocol of // the target group must be HTTP or HTTPS for an Application Load Balancer. - // The protocol of the target group must be TCP or TLS for a Network Load Balancer. + // The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a + // Network Load Balancer. // // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate // users through an identity provider that is OpenID Connect (OIDC) compliant. @@ -6627,7 +6682,7 @@ type ModifyListenerInput struct { // The protocol for connections from clients to the load balancer. Application // Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers - // support the TCP and TLS protocols. + // support the TCP, TLS, UDP, and TCP_UDP protocols. Protocol *string `type:"string" enum:"ProtocolEnum"` // [HTTPS and TLS listeners] The security policy that defines which protocols @@ -6809,11 +6864,14 @@ func (s *ModifyLoadBalancerAttributesOutput) SetAttributes(v []*LoadBalancerAttr type ModifyRuleInput struct { _ struct{} `type:"structure"` - // The actions. + // The actions. Each rule must include exactly one of the following types of + // actions: forward, fixed-response, or redirect, and it must be the last action + // to be performed. // // If the action type is forward, you specify a target group. The protocol of // the target group must be HTTP or HTTPS for an Application Load Balancer. - // The protocol of the target group must be TCP or TLS for a Network Load Balancer. + // The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a + // Network Load Balancer. // // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate // users through an identity provider that is OpenID Connect (OIDC) compliant. @@ -6828,35 +6886,9 @@ type ModifyRuleInput struct { // specified client requests and return a custom HTTP response. Actions []*Action `type:"list"` - // The conditions. Each condition specifies a field name and a single value. - // - // If the field name is host-header, you can specify a single host name (for - // example, my.example.com). A host name is case insensitive, can be up to 128 - // characters in length, and can contain any of the following characters. You - // can include up to three wildcard characters. - // - // * A-Z, a-z, 0-9 - // - // * - . - // - // * * (matches 0 or more characters) - // - // * ? (matches exactly 1 character) - // - // If the field name is path-pattern, you can specify a single path pattern. - // A path pattern is case-sensitive, can be up to 128 characters in length, - // and can contain any of the following characters. You can include up to three - // wildcard characters. - // - // * A-Z, a-z, 0-9 - // - // * _ - . $ / ~ " ' @ : + - // - // * & (using &) - // - // * * (matches 0 or more characters) - // - // * ? (matches exactly 1 character) + // The conditions. Each rule can include zero or one of the following conditions: + // http-request-method, host-header, path-pattern, and source-ip, and zero or + // more of the following conditions: http-header and query-string. Conditions []*RuleCondition `type:"list"` // The Amazon Resource Name (ARN) of the rule. @@ -7021,10 +7053,10 @@ type ModifyTargetGroupInput struct { HealthCheckEnabled *bool `type:"boolean"` // The approximate amount of time, in seconds, between health checks of an individual - // target. For Application Load Balancers, the range is 5–300 seconds. For Network - // Load Balancers, the supported values are 10 or 30 seconds. + // target. For Application Load Balancers, the range is 5 to 300 seconds. For + // Network Load Balancers, the supported values are 10 or 30 seconds. // - // If the protocol of the target group is TCP, you can't modify this setting. + // With Network Load Balancers, you can't modify this setting. HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` // [HTTP/HTTPS health checks] The ping path that is the destination for the @@ -7036,16 +7068,16 @@ type ModifyTargetGroupInput struct { // The protocol the load balancer uses when performing health checks on targets. // The TCP protocol is supported for health checks only if the protocol of the - // target group is TCP or TLS. The TLS protocol is not supported for health - // checks. + // target group is TCP, TLS, UDP, or TCP_UDP. The TLS, UDP, and TCP_UDP protocols + // are not supported for health checks. // - // If the protocol of the target group is TCP, you can't modify this setting. + // With Network Load Balancers, you can't modify this setting. HealthCheckProtocol *string `type:"string" enum:"ProtocolEnum"` // [HTTP/HTTPS health checks] The amount of time, in seconds, during which no // response means a failed health check. // - // If the protocol of the target group is TCP, you can't modify this setting. + // With Network Load Balancers, you can't modify this setting. HealthCheckTimeoutSeconds *int64 `min:"2" type:"integer"` // The number of consecutive health checks successes required before considering @@ -7055,7 +7087,7 @@ type ModifyTargetGroupInput struct { // [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful // response from a target. // - // If the protocol of the target group is TCP, you can't modify this setting. + // With Network Load Balancers, you can't modify this setting. Matcher *Matcher `type:"structure"` // The Amazon Resource Name (ARN) of the target group. @@ -7195,9 +7227,19 @@ func (s *ModifyTargetGroupOutput) SetTargetGroups(v []*TargetGroup) *ModifyTarge return s } +// Information about a path pattern condition. type PathPatternConditionConfig struct { _ struct{} `type:"structure"` + // One or more path patterns to compare against the request URL. The maximum + // size of each string is 128 characters. The comparison is case sensitive. + // The following wildcard characters are supported: * (matches 0 or more characters) + // and ? (matches exactly 1 character). + // + // If you specify multiple strings, the condition is satisfied if one of them + // matches the request URL. The path pattern is compared only to the path of + // the URL, not to its query string. To compare against the query string, use + // QueryStringConditionConfig. Values []*string `type:"list"` } @@ -7217,9 +7259,24 @@ func (s *PathPatternConditionConfig) SetValues(v []*string) *PathPatternConditio return s } +// Information about a query string condition. +// +// The query string component of a URI starts after the first '?' character +// and is terminated by either a '#' character or the end of the URI. A typical +// query string contains key/value pairs separated by '&' characters. The allowed +// characters are specified by RFC 3986. Any character can be percentage encoded. type QueryStringConditionConfig struct { _ struct{} `type:"structure"` + // One or more key/value pairs or values to find in the query string. The maximum + // size of each string is 128 characters. The comparison is case insensitive. + // The following wildcard characters are supported: * (matches 0 or more characters) + // and ? (matches exactly 1 character). To search for a literal '*' or '?' character + // in a query string, you must escape these characters in Values using a '\' + // character. + // + // If you specify multiple key/value pairs or values, the condition is satisfied + // if one of them is found in the query string. Values []*QueryStringKeyValuePair `type:"list"` } @@ -7239,11 +7296,14 @@ func (s *QueryStringConditionConfig) SetValues(v []*QueryStringKeyValuePair) *Qu return s } +// Information about a key/value pair. type QueryStringKeyValuePair struct { _ struct{} `type:"structure"` + // The key. You can omit the key. Key *string `type:"string"` + // The value. Value *string `type:"string"` } @@ -7603,10 +7663,14 @@ func (s RemoveTagsOutput) GoString() string { type Rule struct { _ struct{} `type:"structure"` - // The actions. + // The actions. Each rule must include exactly one of the following types of + // actions: forward, redirect, or fixed-response, and it must be the last action + // to be performed. Actions []*Action `type:"list"` - // The conditions. + // The conditions. Each rule can include zero or one of the following conditions: + // http-request-method, host-header, path-pattern, and source-ip, and zero or + // more of the following conditions: http-header and query-string. Conditions []*RuleCondition `type:"list"` // Indicates whether this is the default rule. @@ -7663,27 +7727,46 @@ func (s *Rule) SetRuleArn(v string) *Rule { type RuleCondition struct { _ struct{} `type:"structure"` - // The name of the field. The possible values are host-header and path-pattern. + // The field in the HTTP request. The following are the possible values: + // + // * http-header + // + // * http-request-method + // + // * host-header + // + // * path-pattern + // + // * query-string + // + // * source-ip Field *string `type:"string"` + // Information for a host header condition. Specify only when Field is host-header. HostHeaderConfig *HostHeaderConditionConfig `type:"structure"` + // Information for an HTTP header condition. Specify only when Field is http-header. HttpHeaderConfig *HttpHeaderConditionConfig `type:"structure"` + // Information for an HTTP method condition. Specify only when Field is http-request-method. HttpRequestMethodConfig *HttpRequestMethodConditionConfig `type:"structure"` + // Information for a path pattern condition. Specify only when Field is path-pattern. PathPatternConfig *PathPatternConditionConfig `type:"structure"` + // Information for a query string condition. Specify only when Field is query-string. QueryStringConfig *QueryStringConditionConfig `type:"structure"` + // Information for a source IP condition. Specify only when Field is source-ip. SourceIpConfig *SourceIpConditionConfig `type:"structure"` - // The condition value. + // The condition value. You can use Values if the rule contains only host-header + // and path-pattern conditions. Otherwise, you can use HostHeaderConfig for + // host-header conditions and PathPatternConfig for path-pattern conditions. // - // If the field name is host-header, you can specify a single host name (for - // example, my.example.com). A host name is case insensitive, can be up to 128 - // characters in length, and can contain any of the following characters. You - // can include up to three wildcard characters. + // If Field is host-header, you can specify a single host name (for example, + // my.example.com). A host name is case insensitive, can be up to 128 characters + // in length, and can contain any of the following characters. // // * A-Z, a-z, 0-9 // @@ -7693,10 +7776,9 @@ type RuleCondition struct { // // * ? (matches exactly 1 character) // - // If the field name is path-pattern, you can specify a single path pattern - // (for example, /img/*). A path pattern is case-sensitive, can be up to 128 - // characters in length, and can contain any of the following characters. You - // can include up to three wildcard characters. + // If Field is path-pattern, you can specify a single path pattern (for example, + // /img/*). A path pattern is case-sensitive, can be up to 128 characters in + // length, and can contain any of the following characters. // // * A-Z, a-z, 0-9 // @@ -7819,7 +7901,7 @@ type SetIpAddressTypeInput struct { // The IP address type. The possible values are ipv4 (for IPv4 addresses) and // dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use - // ipv4. + // ipv4. Network Load Balancers must use ipv4. // // IpAddressType is a required field IpAddressType *string `type:"string" required:"true" enum:"IpAddressType"` @@ -8122,9 +8204,21 @@ func (s *SetSubnetsOutput) SetAvailabilityZones(v []*AvailabilityZone) *SetSubne return s } +// Information about a source IP condition. +// +// You can use this condition to route based on the IP address of the source +// that connects to the load balancer. If a client is behind a proxy, this is +// the IP address of the proxy not the IP address of the client. type SourceIpConditionConfig struct { _ struct{} `type:"structure"` + // One or more source IP addresses, in CIDR format. You can use both IPv4 and + // IPv6 addresses. Wildcards are not supported. + // + // If you specify multiple addresses, the condition is satisfied if the source + // IP address of the request matches one of the CIDR blocks. This condition + // is not satisfied by the addresses in the X-Forwarded-For header. To search + // for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig. Values []*string `type:"list"` } @@ -8333,7 +8427,8 @@ type TargetDescription struct { // Id is a required field Id *string `type:"string" required:"true"` - // The port on which the target is listening. + // The port on which the target is listening. Not used if the target is a Lambda + // function. Port *int64 `min:"1" type:"integer"` } @@ -8416,7 +8511,8 @@ type TargetGroup struct { // The HTTP codes to use when checking for a successful response from a target. Matcher *Matcher `type:"structure"` - // The port on which the targets are listening. + // The port on which the targets are listening. Not used if the target is a + // Lambda function. Port *int64 `min:"1" type:"integer"` // The protocol to use for routing traffic to the targets. @@ -8649,15 +8745,16 @@ type TargetHealth struct { // values: // // * Target.ResponseCodeMismatch - The health checks did not return an expected - // HTTP code. + // HTTP code. Applies only to Application Load Balancers. // - // * Target.Timeout - The health check requests timed out. + // * Target.Timeout - The health check requests timed out. Applies only to + // Application Load Balancers. // - // * Target.FailedHealthChecks - The health checks failed because the connection - // to the target timed out, the target response was malformed, or the target - // failed the health check for an unknown reason. + // * Target.FailedHealthChecks - The load balancer received an error while + // establishing a connection to the target or the target response was malformed. // // * Elb.InternalError - The health checks failed due to an internal error. + // Applies only to Application Load Balancers. // // If the target state is unused, the reason code can be one of the following // values: @@ -8669,11 +8766,11 @@ type TargetHealth struct { // or the target is in an Availability Zone that is not enabled for its load // balancer. // + // * Target.InvalidState - The target is in the stopped or terminated state. + // // * Target.IpUnusable - The target IP address is reserved for use by a load // balancer. // - // * Target.InvalidState - The target is in the stopped or terminated state. - // // If the target state is draining, the reason code can be the following value: // // * Target.DeregistrationInProgress - The target is in the process of being @@ -8683,7 +8780,10 @@ type TargetHealth struct { // value: // // * Target.HealthCheckDisabled - Health checks are disabled for the target - // group. + // group. Applies only to Application Load Balancers. + // + // * Elb.InternalError - Target health is unavailable due to an internal + // error. Applies only to Network Load Balancers. Reason *string `type:"string" enum:"TargetHealthReasonEnum"` // The state of the target. @@ -8849,6 +8949,12 @@ const ( // ProtocolEnumTls is a ProtocolEnum enum value ProtocolEnumTls = "TLS" + + // ProtocolEnumUdp is a ProtocolEnum enum value + ProtocolEnumUdp = "UDP" + + // ProtocolEnumTcpUdp is a ProtocolEnum enum value + ProtocolEnumTcpUdp = "TCP_UDP" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/doc.go index ef3e971c654..9a67fe29932 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/doc.go @@ -15,40 +15,16 @@ // the targets. // // Elastic Load Balancing supports the following types of load balancers: Application -// Load Balancers, Network Load Balancers, and Classic Load Balancers. +// Load Balancers, Network Load Balancers, and Classic Load Balancers. This +// reference covers Application Load Balancers and Network Load Balancers. // // An Application Load Balancer makes routing and load balancing decisions at // the application layer (HTTP/HTTPS). A Network Load Balancer makes routing // and load balancing decisions at the transport layer (TCP/TLS). Both Application // Load Balancers and Network Load Balancers can route requests to one or more // ports on each EC2 instance or container instance in your virtual private -// cloud (VPC). -// -// A Classic Load Balancer makes routing and load balancing decisions either -// at the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS), and -// supports either EC2-Classic or a VPC. For more information, see the Elastic -// Load Balancing User Guide (https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/). -// -// This reference covers the 2015-12-01 API, which supports Application Load -// Balancers and Network Load Balancers. The 2012-06-01 API supports Classic -// Load Balancers. -// -// To get started, complete the following tasks: -// -// Create a load balancer using CreateLoadBalancer. -// -// Create a target group using CreateTargetGroup. -// -// Register targets for the target group using RegisterTargets. -// -// Create one or more listeners for your load balancer using CreateListener. -// -// To delete a load balancer and its related resources, complete the following -// tasks: -// -// Delete the load balancer using DeleteLoadBalancer. -// -// Delete the target group using DeleteTargetGroup. +// cloud (VPC). For more information, see the Elastic Load Balancing User Guide +// (https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/). // // All Elastic Load Balancing operations are idempotent, which means that they // complete at most one time. If you repeat an operation, it succeeds. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go index ad97e8df885..1fcdb5bf44c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go @@ -46,11 +46,11 @@ const ( // svc := elbv2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELBV2 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ELBV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ELBV2 { svc := &ELBV2{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-12-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/emr/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/emr/api.go index 4b8bd2c8130..d966767a739 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/emr/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/emr/api.go @@ -766,7 +766,7 @@ func (c *EMR) DescribeJobFlowsRequest(input *DescribeJobFlowsInput) (req *reques // // * Job flows created and completed in the last two weeks // -// * Job flows created within the last two months that are in one of the +// * Job flows created within the last two months that are in one of the // following states: RUNNING, WAITING, SHUTTING_DOWN, STARTING // // Amazon EMR can return a maximum of 512 job flow descriptions. @@ -974,6 +974,91 @@ func (c *EMR) DescribeStepWithContext(ctx aws.Context, input *DescribeStepInput, return out, req.Send() } +const opGetBlockPublicAccessConfiguration = "GetBlockPublicAccessConfiguration" + +// GetBlockPublicAccessConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBlockPublicAccessConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBlockPublicAccessConfiguration for more information on using the GetBlockPublicAccessConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBlockPublicAccessConfigurationRequest method. +// req, resp := client.GetBlockPublicAccessConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/GetBlockPublicAccessConfiguration +func (c *EMR) GetBlockPublicAccessConfigurationRequest(input *GetBlockPublicAccessConfigurationInput) (req *request.Request, output *GetBlockPublicAccessConfigurationOutput) { + op := &request.Operation{ + Name: opGetBlockPublicAccessConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBlockPublicAccessConfigurationInput{} + } + + output = &GetBlockPublicAccessConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBlockPublicAccessConfiguration API operation for Amazon Elastic MapReduce. +// +// Returns the Amazon EMR block public access configuration for your AWS account +// in the current Region. For more information see Configure Block Public Access +// for Amazon EMR (https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html) +// in the Amazon EMR Management Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation GetBlockPublicAccessConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// This exception occurs when there is an internal failure in the EMR service. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/GetBlockPublicAccessConfiguration +func (c *EMR) GetBlockPublicAccessConfiguration(input *GetBlockPublicAccessConfigurationInput) (*GetBlockPublicAccessConfigurationOutput, error) { + req, out := c.GetBlockPublicAccessConfigurationRequest(input) + return out, req.Send() +} + +// GetBlockPublicAccessConfigurationWithContext is the same as GetBlockPublicAccessConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBlockPublicAccessConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) GetBlockPublicAccessConfigurationWithContext(ctx aws.Context, input *GetBlockPublicAccessConfigurationInput, opts ...request.Option) (*GetBlockPublicAccessConfigurationOutput, error) { + req, out := c.GetBlockPublicAccessConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListBootstrapActions = "ListBootstrapActions" // ListBootstrapActionsRequest generates a "aws/request.Request" representing the @@ -1073,7 +1158,7 @@ func (c *EMR) ListBootstrapActionsWithContext(ctx aws.Context, input *ListBootst // // Example iterating over at most 3 pages of a ListBootstrapActions operation. // pageNum := 0 // err := client.ListBootstrapActionsPages(params, -// func(page *ListBootstrapActionsOutput, lastPage bool) bool { +// func(page *emr.ListBootstrapActionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1105,10 +1190,12 @@ func (c *EMR) ListBootstrapActionsPagesWithContext(ctx aws.Context, input *ListB }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListBootstrapActionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListBootstrapActionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1215,7 +1302,7 @@ func (c *EMR) ListClustersWithContext(ctx aws.Context, input *ListClustersInput, // // Example iterating over at most 3 pages of a ListClusters operation. // pageNum := 0 // err := client.ListClustersPages(params, -// func(page *ListClustersOutput, lastPage bool) bool { +// func(page *emr.ListClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1247,10 +1334,12 @@ func (c *EMR) ListClustersPagesWithContext(ctx aws.Context, input *ListClustersI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListClustersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListClustersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1356,7 +1445,7 @@ func (c *EMR) ListInstanceFleetsWithContext(ctx aws.Context, input *ListInstance // // Example iterating over at most 3 pages of a ListInstanceFleets operation. // pageNum := 0 // err := client.ListInstanceFleetsPages(params, -// func(page *ListInstanceFleetsOutput, lastPage bool) bool { +// func(page *emr.ListInstanceFleetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1388,10 +1477,12 @@ func (c *EMR) ListInstanceFleetsPagesWithContext(ctx aws.Context, input *ListIns }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInstanceFleetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInstanceFleetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1494,7 +1585,7 @@ func (c *EMR) ListInstanceGroupsWithContext(ctx aws.Context, input *ListInstance // // Example iterating over at most 3 pages of a ListInstanceGroups operation. // pageNum := 0 // err := client.ListInstanceGroupsPages(params, -// func(page *ListInstanceGroupsOutput, lastPage bool) bool { +// func(page *emr.ListInstanceGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1526,10 +1617,12 @@ func (c *EMR) ListInstanceGroupsPagesWithContext(ctx aws.Context, input *ListIns }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInstanceGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInstanceGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1635,7 +1728,7 @@ func (c *EMR) ListInstancesWithContext(ctx aws.Context, input *ListInstancesInpu // // Example iterating over at most 3 pages of a ListInstances operation. // pageNum := 0 // err := client.ListInstancesPages(params, -// func(page *ListInstancesOutput, lastPage bool) bool { +// func(page *emr.ListInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1667,10 +1760,12 @@ func (c *EMR) ListInstancesPagesWithContext(ctx aws.Context, input *ListInstance }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1776,7 +1871,7 @@ func (c *EMR) ListSecurityConfigurationsWithContext(ctx aws.Context, input *List // // Example iterating over at most 3 pages of a ListSecurityConfigurations operation. // pageNum := 0 // err := client.ListSecurityConfigurationsPages(params, -// func(page *ListSecurityConfigurationsOutput, lastPage bool) bool { +// func(page *emr.ListSecurityConfigurationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1808,10 +1903,12 @@ func (c *EMR) ListSecurityConfigurationsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSecurityConfigurationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSecurityConfigurationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1915,7 +2012,7 @@ func (c *EMR) ListStepsWithContext(ctx aws.Context, input *ListStepsInput, opts // // Example iterating over at most 3 pages of a ListSteps operation. // pageNum := 0 // err := client.ListStepsPages(params, -// func(page *ListStepsOutput, lastPage bool) bool { +// func(page *emr.ListStepsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1947,10 +2044,12 @@ func (c *EMR) ListStepsPagesWithContext(ctx aws.Context, input *ListStepsInput, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListStepsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListStepsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2203,6 +2302,92 @@ func (c *EMR) PutAutoScalingPolicyWithContext(ctx aws.Context, input *PutAutoSca return out, req.Send() } +const opPutBlockPublicAccessConfiguration = "PutBlockPublicAccessConfiguration" + +// PutBlockPublicAccessConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBlockPublicAccessConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBlockPublicAccessConfiguration for more information on using the PutBlockPublicAccessConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBlockPublicAccessConfigurationRequest method. +// req, resp := client.PutBlockPublicAccessConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/PutBlockPublicAccessConfiguration +func (c *EMR) PutBlockPublicAccessConfigurationRequest(input *PutBlockPublicAccessConfigurationInput) (req *request.Request, output *PutBlockPublicAccessConfigurationOutput) { + op := &request.Operation{ + Name: opPutBlockPublicAccessConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutBlockPublicAccessConfigurationInput{} + } + + output = &PutBlockPublicAccessConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBlockPublicAccessConfiguration API operation for Amazon Elastic MapReduce. +// +// Creates or updates an Amazon EMR block public access configuration for your +// AWS account in the current Region. For more information see Configure Block +// Public Access for Amazon EMR (https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html) +// in the Amazon EMR Management Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation PutBlockPublicAccessConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// This exception occurs when there is an internal failure in the EMR service. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/PutBlockPublicAccessConfiguration +func (c *EMR) PutBlockPublicAccessConfiguration(input *PutBlockPublicAccessConfigurationInput) (*PutBlockPublicAccessConfigurationOutput, error) { + req, out := c.PutBlockPublicAccessConfigurationRequest(input) + return out, req.Send() +} + +// PutBlockPublicAccessConfigurationWithContext is the same as PutBlockPublicAccessConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBlockPublicAccessConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) PutBlockPublicAccessConfigurationWithContext(ctx aws.Context, input *PutBlockPublicAccessConfigurationInput, opts ...request.Option) (*PutBlockPublicAccessConfigurationOutput, error) { + req, out := c.PutBlockPublicAccessConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRemoveAutoScalingPolicy = "RemoveAutoScalingPolicy" // RemoveAutoScalingPolicyRequest generates a "aws/request.Request" representing the @@ -2413,11 +2598,11 @@ func (c *EMR) RunJobFlowRequest(input *RunJobFlowInput) (req *request.Request, o // RunJobFlow creates and starts running a new cluster (job flow). The cluster // runs the steps specified. After the steps complete, the cluster stops and // the HDFS partition is lost. To prevent loss of data, configure the last step -// of the job flow to store results in Amazon S3. If the JobFlowInstancesConfigKeepJobFlowAliveWhenNoSteps -// parameter is set to TRUE, the cluster transitions to the WAITING state rather -// than shutting down after the steps have completed. +// of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig +// KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the cluster transitions +// to the WAITING state rather than shutting down after the steps have completed. // -// For additional protection, you can set the JobFlowInstancesConfigTerminationProtected +// For additional protection, you can set the JobFlowInstancesConfig TerminationProtected // parameter to TRUE to lock the cluster and prevent it from being terminated // by API call, user intervention, or in the event of a job flow error. // @@ -2614,6 +2799,8 @@ func (c *EMR) SetVisibleToAllUsersRequest(input *SetVisibleToAllUsersInput) (req // SetVisibleToAllUsers API operation for Amazon Elastic MapReduce. // +// This member will be deprecated. +// // Sets whether all AWS Identity and Access Management (IAM) users under your // account can access the specified clusters (job flows). This action works // on running clusters. You can also set the visibility of a cluster when you @@ -3341,6 +3528,117 @@ func (s *AutoScalingPolicyStatus) SetStateChangeReason(v *AutoScalingPolicyState return s } +// A configuration for Amazon EMR block public access. When BlockPublicSecurityGroupRules +// is set to true, Amazon EMR prevents cluster creation if one of the cluster's +// security groups has a rule that allows inbound traffic from 0.0.0.0/0 or +// ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges. +type BlockPublicAccessConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether EMR block public access is enabled (true) or disabled (false). + // By default, the value is false for accounts that have created EMR clusters + // before July 2019. For accounts created after this, the default is true. + // + // BlockPublicSecurityGroupRules is a required field + BlockPublicSecurityGroupRules *bool `type:"boolean" required:"true"` + + // Specifies ports and port ranges that are permitted to have security group + // rules that allow inbound traffic from all public sources. For example, if + // Port 23 (Telnet) is specified for PermittedPublicSecurityGroupRuleRanges, + // Amazon EMR allows cluster creation if a security group associated with the + // cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 + // or IPv6 port ::/0 as the source. + // + // By default, Port 22, which is used for SSH access to the cluster EC2 instances, + // is in the list of PermittedPublicSecurityGroupRuleRanges. + PermittedPublicSecurityGroupRuleRanges []*PortRange `type:"list"` +} + +// String returns the string representation +func (s BlockPublicAccessConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockPublicAccessConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BlockPublicAccessConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BlockPublicAccessConfiguration"} + if s.BlockPublicSecurityGroupRules == nil { + invalidParams.Add(request.NewErrParamRequired("BlockPublicSecurityGroupRules")) + } + if s.PermittedPublicSecurityGroupRuleRanges != nil { + for i, v := range s.PermittedPublicSecurityGroupRuleRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PermittedPublicSecurityGroupRuleRanges", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockPublicSecurityGroupRules sets the BlockPublicSecurityGroupRules field's value. +func (s *BlockPublicAccessConfiguration) SetBlockPublicSecurityGroupRules(v bool) *BlockPublicAccessConfiguration { + s.BlockPublicSecurityGroupRules = &v + return s +} + +// SetPermittedPublicSecurityGroupRuleRanges sets the PermittedPublicSecurityGroupRuleRanges field's value. +func (s *BlockPublicAccessConfiguration) SetPermittedPublicSecurityGroupRuleRanges(v []*PortRange) *BlockPublicAccessConfiguration { + s.PermittedPublicSecurityGroupRuleRanges = v + return s +} + +// Properties that describe the AWS principal that created the BlockPublicAccessConfiguration +// using the PutBlockPublicAccessConfiguration action as well as the date and +// time that the configuration was created. Each time a configuration for block +// public access is updated, Amazon EMR updates this metadata. +type BlockPublicAccessConfigurationMetadata struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name that created or last modified the configuration. + // + // CreatedByArn is a required field + CreatedByArn *string `min:"20" type:"string" required:"true"` + + // The date and time that the configuration was created. + // + // CreationDateTime is a required field + CreationDateTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s BlockPublicAccessConfigurationMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockPublicAccessConfigurationMetadata) GoString() string { + return s.String() +} + +// SetCreatedByArn sets the CreatedByArn field's value. +func (s *BlockPublicAccessConfigurationMetadata) SetCreatedByArn(v string) *BlockPublicAccessConfigurationMetadata { + s.CreatedByArn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *BlockPublicAccessConfigurationMetadata) SetCreationDateTime(v time.Time) *BlockPublicAccessConfigurationMetadata { + s.CreationDateTime = &v + return s +} + // Configuration of a bootstrap action. type BootstrapActionConfig struct { _ struct{} `type:"structure"` @@ -3696,6 +3994,7 @@ type Cluster struct { // The unique identifier for the cluster. Id *string `type:"string"` + // // The instance fleet configuration is available only in Amazon EMR versions // 4.8.0 and later, excluding 5.0.x versions. // @@ -3730,11 +4029,11 @@ type Cluster struct { // The Amazon EMR release label, which determines the version of open-source // application packages installed on the cluster. Release labels are in the - // form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, - // emr-5.14.0. For more information about Amazon EMR release versions and included - // application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/ + // form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. + // For more information about Amazon EMR release versions and included application + // versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/ // (https://docs.aws.amazon.com/emr/latest/ReleaseGuide/). The release label - // applies only to Amazon EMR releases versions 4.x and later. Earlier versions + // applies only to Amazon EMR releases version 4.0 and later. Earlier versions // use AmiVersion. ReleaseLabel *string `type:"string"` @@ -3781,6 +4080,8 @@ type Cluster struct { // of a cluster error. TerminationProtected *bool `type:"boolean"` + // This member will be deprecated. + // // Indicates whether the cluster is visible to all IAM users of the AWS account // associated with the cluster. If this value is set to true, all IAM users // of that AWS account can view and manage the cluster if they have the proper @@ -4166,6 +4467,7 @@ func (s *Command) SetScriptPath(v string) *Command { return s } +// // Amazon EMR releases 4.x or later. // // An optional configuration specification to be used when provisioning cluster @@ -4847,14 +5149,9 @@ type Ec2InstanceAttributes struct { // the master node as a user named "hadoop". Ec2KeyName *string `type:"string"` - // To launch the cluster in Amazon VPC, set this parameter to the identifier - // of the Amazon VPC subnet where you want the cluster to launch. If you do - // not specify this value, the cluster is launched in the normal AWS cloud, - // outside of a VPC. - // - // Amazon VPC currently does not support cluster compute quadruple extra large - // (cc1.4xlarge) instances. Thus, you cannot specify the cc1.4xlarge instance - // type for nodes of a cluster launched in a VPC. + // Set this parameter to the identifier of the Amazon VPC subnet where you want + // the cluster to launch. If you do not specify this value, and your account + // supports EC2-Classic, the cluster launches in EC2-Classic. Ec2SubnetId *string `type:"string"` // The identifier of the Amazon EC2 security group for the master node. @@ -4882,7 +5179,7 @@ type Ec2InstanceAttributes struct { // EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR // chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, // and then launches all cluster instances within that Subnet. If this value - // is not specified, and the account and region support EC2-Classic networks, + // is not specified, and the account and Region support EC2-Classic networks, // the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones // instead of this setting. If EC2-Classic is not supported, and no Subnet is // specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and @@ -5018,6 +5315,67 @@ func (s *FailureDetails) SetReason(v string) *FailureDetails { return s } +type GetBlockPublicAccessConfigurationInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetBlockPublicAccessConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBlockPublicAccessConfigurationInput) GoString() string { + return s.String() +} + +type GetBlockPublicAccessConfigurationOutput struct { + _ struct{} `type:"structure"` + + // A configuration for Amazon EMR block public access. The configuration applies + // to all clusters created in your account for the current Region. The configuration + // specifies whether block public access is enabled. If block public access + // is enabled, security groups associated with the cluster cannot have rules + // that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port + // is specified as an exception using PermittedPublicSecurityGroupRuleRanges + // in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, + // and public access is allowed on this port. You can change this by updating + // the block public access configuration to remove the exception. + // + // BlockPublicAccessConfiguration is a required field + BlockPublicAccessConfiguration *BlockPublicAccessConfiguration `type:"structure" required:"true"` + + // Properties that describe the AWS principal that created the BlockPublicAccessConfiguration + // using the PutBlockPublicAccessConfiguration action as well as the date and + // time that the configuration was created. Each time a configuration for block + // public access is updated, Amazon EMR updates this metadata. + // + // BlockPublicAccessConfigurationMetadata is a required field + BlockPublicAccessConfigurationMetadata *BlockPublicAccessConfigurationMetadata `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetBlockPublicAccessConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBlockPublicAccessConfigurationOutput) GoString() string { + return s.String() +} + +// SetBlockPublicAccessConfiguration sets the BlockPublicAccessConfiguration field's value. +func (s *GetBlockPublicAccessConfigurationOutput) SetBlockPublicAccessConfiguration(v *BlockPublicAccessConfiguration) *GetBlockPublicAccessConfigurationOutput { + s.BlockPublicAccessConfiguration = v + return s +} + +// SetBlockPublicAccessConfigurationMetadata sets the BlockPublicAccessConfigurationMetadata field's value. +func (s *GetBlockPublicAccessConfigurationOutput) SetBlockPublicAccessConfigurationMetadata(v *BlockPublicAccessConfigurationMetadata) *GetBlockPublicAccessConfigurationOutput { + s.BlockPublicAccessConfigurationMetadata = v + return s +} + // A job flow step consisting of a JAR file whose main function will be executed. // The main function submits a job for Hadoop to execute and waits for the job // to finish or fail. @@ -5708,8 +6066,8 @@ type InstanceFleetStatus struct { // A code representing the instance fleet status. // - // * PROVISIONING—The instance fleet is provisioning EC2 resources and is - // not yet ready to run jobs. + // * PROVISIONING—The instance fleet is provisioning EC2 resources and + // is not yet ready to run jobs. // // * BOOTSTRAPPING—EC2 instances and other resources have been provisioned // and the bootstrap actions specified for the instances are underway. @@ -5717,8 +6075,8 @@ type InstanceFleetStatus struct { // * RUNNING—EC2 instances and other resources are running. They are either // executing jobs or waiting to execute jobs. // - // * RESIZING—A resize operation is underway. EC2 instances are either being - // added or removed. + // * RESIZING—A resize operation is underway. EC2 instances are either + // being added or removed. // // * SUSPENDED—A resize operation could not complete. Existing EC2 instances // are running, but instances can't be added or removed. @@ -5830,6 +6188,7 @@ type InstanceGroup struct { // to the On-Demand price. BidPrice *string `type:"string"` + // // Amazon EMR releases 4.x or later. // // The list of configurations supplied for an EMR cluster instance group. You @@ -6016,6 +6375,7 @@ type InstanceGroupConfig struct { // to the On-Demand price. BidPrice *string `type:"string"` + // // Amazon EMR releases 4.x or later. // // The list of configurations supplied for an EMR cluster instance group. You @@ -6928,6 +7288,8 @@ type JobFlowDetail struct { // is empty. SupportedProducts []*string `type:"list"` + // This member will be deprecated. + // // Specifies whether the cluster is visible to all IAM users of the AWS account // associated with the cluster. If this value is set to true, all IAM users // of that AWS account can view and (if they have the proper policy permissions @@ -7127,14 +7489,8 @@ type JobFlowInstancesConfig struct { // Applies to clusters that use the uniform instance group configuration. To // launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this // parameter to the identifier of the Amazon VPC subnet where you want the cluster - // to launch. If you do not specify this value, the cluster launches in the - // normal Amazon Web Services cloud, outside of an Amazon VPC, if the account - // launching the cluster supports EC2 Classic networks in the region where the - // cluster launches. - // - // Amazon VPC currently does not support cluster compute quadruple extra large - // (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance - // type for clusters launched in an Amazon VPC. + // to launch. If you do not specify this value and your account supports EC2-Classic, + // the cluster launches in EC2-Classic. Ec2SubnetId *string `type:"string"` // Applies to clusters that use the instance fleet configuration. When multiple @@ -7162,6 +7518,7 @@ type JobFlowInstancesConfig struct { // The number of EC2 instances in the cluster. InstanceCount *int64 `type:"integer"` + // // The instance fleet configuration is available only in Amazon EMR versions // 4.8.0 and later, excluding 5.0.x versions. // @@ -8445,6 +8802,56 @@ func (s *PlacementType) SetAvailabilityZones(v []*string) *PlacementType { return s } +// A list of port ranges that are permitted to allow inbound traffic from all +// public IP addresses. To specify a single port, use the same value for MinRange +// and MaxRange. +type PortRange struct { + _ struct{} `type:"structure"` + + // The smallest port number in a specified range of port numbers. + MaxRange *int64 `type:"integer"` + + // The smallest port number in a specified range of port numbers. + // + // MinRange is a required field + MinRange *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s PortRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PortRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PortRange"} + if s.MinRange == nil { + invalidParams.Add(request.NewErrParamRequired("MinRange")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxRange sets the MaxRange field's value. +func (s *PortRange) SetMaxRange(v int64) *PortRange { + s.MaxRange = &v + return s +} + +// SetMinRange sets the MinRange field's value. +func (s *PortRange) SetMinRange(v int64) *PortRange { + s.MinRange = &v + return s +} + type PutAutoScalingPolicyInput struct { _ struct{} `type:"structure"` @@ -8560,6 +8967,71 @@ func (s *PutAutoScalingPolicyOutput) SetInstanceGroupId(v string) *PutAutoScalin return s } +type PutBlockPublicAccessConfigurationInput struct { + _ struct{} `type:"structure"` + + // A configuration for Amazon EMR block public access. The configuration applies + // to all clusters created in your account for the current Region. The configuration + // specifies whether block public access is enabled. If block public access + // is enabled, security groups associated with the cluster cannot have rules + // that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port + // is specified as an exception using PermittedPublicSecurityGroupRuleRanges + // in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, + // and public access is allowed on this port. You can change this by updating + // BlockPublicSecurityGroupRules to remove the exception. + // + // BlockPublicAccessConfiguration is a required field + BlockPublicAccessConfiguration *BlockPublicAccessConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBlockPublicAccessConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBlockPublicAccessConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBlockPublicAccessConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBlockPublicAccessConfigurationInput"} + if s.BlockPublicAccessConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("BlockPublicAccessConfiguration")) + } + if s.BlockPublicAccessConfiguration != nil { + if err := s.BlockPublicAccessConfiguration.Validate(); err != nil { + invalidParams.AddNested("BlockPublicAccessConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockPublicAccessConfiguration sets the BlockPublicAccessConfiguration field's value. +func (s *PutBlockPublicAccessConfigurationInput) SetBlockPublicAccessConfiguration(v *BlockPublicAccessConfiguration) *PutBlockPublicAccessConfigurationInput { + s.BlockPublicAccessConfiguration = v + return s +} + +type PutBlockPublicAccessConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBlockPublicAccessConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBlockPublicAccessConfigurationOutput) GoString() string { + return s.String() +} + type RemoveAutoScalingPolicyInput struct { _ struct{} `type:"structure"` @@ -8769,6 +9241,7 @@ type RunJobFlowInput struct { // Name is a required field Name *string `type:"string" required:"true"` + // // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, // use Applications. // @@ -8799,11 +9272,11 @@ type RunJobFlowInput struct { // The Amazon EMR release label, which determines the version of open-source // application packages installed on the cluster. Release labels are in the - // form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, - // emr-5.14.0. For more information about Amazon EMR release versions and included - // application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/ + // form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. + // For more information about Amazon EMR release versions and included application + // versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/ // (https://docs.aws.amazon.com/emr/latest/ReleaseGuide/). The release label - // applies only to Amazon EMR releases versions 4.x and later. Earlier versions + // applies only to Amazon EMR releases version 4.0 and later. Earlier versions // use AmiVersion. ReleaseLabel *string `type:"string"` @@ -8838,6 +9311,7 @@ type RunJobFlowInput struct { // A list of steps to run. Steps []*StepConfig `type:"list"` + // // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, // use Applications. // @@ -8853,6 +9327,8 @@ type RunJobFlowInput struct { // A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags []*Tag `type:"list"` + // This member will be deprecated. + // // Whether the cluster is visible to all IAM users of the AWS account associated // with the cluster. If this value is set to true, all IAM users of that AWS // account can view and (if they have the proper policy permissions set) manage @@ -9417,8 +9893,7 @@ type SetTerminationProtectionInput struct { _ struct{} `type:"structure"` // A list of strings that uniquely identify the clusters to protect. This identifier - // is returned by RunJobFlow and can also be obtained from DescribeJobFlows - // . + // is returned by RunJobFlow and can also be obtained from DescribeJobFlows . // // JobFlowIds is a required field JobFlowIds []*string `type:"list" required:"true"` @@ -9483,6 +9958,8 @@ func (s SetTerminationProtectionOutput) GoString() string { return s.String() } +// This member will be deprecated. +// // The input to the SetVisibleToAllUsers action. type SetVisibleToAllUsersInput struct { _ struct{} `type:"structure"` @@ -9492,6 +9969,8 @@ type SetVisibleToAllUsersInput struct { // JobFlowIds is a required field JobFlowIds []*string `type:"list" required:"true"` + // This member will be deprecated. + // // Whether the specified clusters are visible to all IAM users of the AWS account // associated with the cluster. If this value is set to True, all IAM users // of that AWS account can view and, if they have the proper IAM policy permissions @@ -10213,7 +10692,7 @@ type Tag struct { _ struct{} `type:"structure"` // A user-defined key, which is the minimum required information for a valid - // tag. For more information, see Tag (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html). + // tag. For more information, see Tag (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html). Key *string `type:"string"` // A user-defined value, which is optional in a tag. For more information, see diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/emr/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/emr/service.go index 92735a793d4..40af82bfabc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/emr/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/emr/service.go @@ -46,11 +46,11 @@ const ( // svc := emr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EMR { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EMR { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EMR { svc := &EMR{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2009-03-31", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go index 50916817484..e5afc32e61a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go @@ -109,7 +109,7 @@ func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) // the destination. The role should allow the Kinesis Data Firehose principal // to assume the role, and the role should have permissions that allow the service // to deliver the data. For more information, see Grant Kinesis Data Firehose -// Access to an Amazon S3 Destination (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) +// Access to an Amazon S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) // in the Amazon Kinesis Data Firehose Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -547,7 +547,7 @@ func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request // second, 5,000 records per second, or 5 MB per second. If you use PutRecord // and PutRecordBatch, the limits are an aggregate across these two operations // for each delivery stream. For more information about limits and how to request -// an increase, see Amazon Kinesis Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// an increase, see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // // You must specify the name of the delivery stream and the data record when // using PutRecord. The data record consists of a data blob that can be up to @@ -595,7 +595,7 @@ func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request // The service is unavailable. Back off and retry the operation. If you continue // to see the exception, throughput limits for the delivery stream may have // been exceeded. For more information about limits and how to request an increase, -// see Amazon Kinesis Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/PutRecord func (c *Firehose) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { @@ -672,7 +672,7 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // second, 5,000 records per second, or 5 MB per second. If you use PutRecord // and PutRecordBatch, the limits are an aggregate across these two operations // for each delivery stream. For more information about limits, see Amazon Kinesis -// Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // // Each PutRecordBatch request supports up to 500 records. Each record in the // request can be as large as 1,000 KB (before 64-bit encoding), up to a limit @@ -745,7 +745,7 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // The service is unavailable. Back off and retry the operation. If you continue // to see the exception, throughput limits for the delivery stream may have // been exceeded. For more information about limits and how to request an increase, -// see Amazon Kinesis Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/PutRecordBatch func (c *Firehose) PutRecordBatch(input *PutRecordBatchInput) (*PutRecordBatchOutput, error) { @@ -1299,20 +1299,27 @@ func (c *Firehose) UpdateDestinationWithContext(ctx aws.Context, input *UpdateDe // Describes hints for the buffering to perform before delivering data to the // destination. These options are treated as hints, and therefore Kinesis Data -// Firehose might choose to use different values when it is optimal. +// Firehose might choose to use different values when it is optimal. The SizeInMBs +// and IntervalInSeconds parameters are optional. However, if specify a value +// for one of them, you must also provide a value for the other. type BufferingHints struct { _ struct{} `type:"structure"` // Buffer incoming data for the specified period of time, in seconds, before - // delivering it to the destination. The default value is 300. + // delivering it to the destination. The default value is 300. This parameter + // is optional but if you specify a value for it, you must also specify a value + // for SizeInMBs, and vice versa. IntervalInSeconds *int64 `min:"60" type:"integer"` - // Buffer incoming data to the specified size, in MBs, before delivering it - // to the destination. The default value is 5. + // Buffer incoming data to the specified size, in MiBs, before delivering it + // to the destination. The default value is 5. This parameter is optional but + // if you specify a value for it, you must also specify a value for IntervalInSeconds, + // and vice versa. // // We recommend setting this parameter to a value greater than the amount of // data you typically ingest into the delivery stream in 10 seconds. For example, - // if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher. + // if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or + // higher. SizeInMBs *int64 `min:"1" type:"integer"` } @@ -1404,7 +1411,7 @@ type CopyCommand struct { // Optional parameters to use with the Amazon Redshift COPY command. For more // information, see the "Optional Parameters" section of Amazon Redshift COPY - // command (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html). Some + // command (https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html). Some // possible examples that would apply to Kinesis Data Firehose are as follows: // // delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and @@ -1421,7 +1428,7 @@ type CopyCommand struct { // JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path // specified is the format of the data. // - // For more examples, see Amazon Redshift COPY command examples (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html). + // For more examples, see Amazon Redshift COPY command examples (https://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html). CopyOptions *string `type:"string"` // A comma-separated list of column names. @@ -2242,13 +2249,17 @@ type ElasticsearchDestinationConfiguration struct { // The Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` - // The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, - // DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after + // The endpoint to use when communicating with the cluster. Specify either this + // ClusterEndpoint or the DomainARN field. + ClusterEndpoint *string `min:"1" type:"string"` + + // The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, + // DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after // assuming the role specified in RoleARN. For more information, see Amazon // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // - // DomainARN is a required field - DomainARN *string `min:"1" type:"string" required:"true"` + // Specify either ClusterEndpoint or DomainARN. + DomainARN *string `min:"1" type:"string"` // The Elasticsearch index name. // @@ -2257,8 +2268,8 @@ type ElasticsearchDestinationConfiguration struct { // The Elasticsearch index rotation period. Index rotation appends a timestamp // to the IndexName to facilitate the expiration of old data. For more information, - // see Index Rotation for the Amazon ES Destination (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation). - // The default value is OneDay. + // see Index Rotation for the Amazon ES Destination (https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation). + // The default value is OneDay. IndexRotationPeriod *string `type:"string" enum:"ElasticsearchIndexRotationPeriod"` // The data processing configuration. @@ -2271,7 +2282,7 @@ type ElasticsearchDestinationConfiguration struct { // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data // Firehose for calling the Amazon ES Configuration API and for indexing documents. // For more information, see Grant Kinesis Data Firehose Access to an Amazon - // S3 Destination (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) + // S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) // and Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // // RoleARN is a required field @@ -2283,7 +2294,7 @@ type ElasticsearchDestinationConfiguration struct { // appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose // delivers all incoming records to Amazon S3, and also writes failed documents // with elasticsearch-failed/ appended to the prefix. For more information, - // see Amazon S3 Backup for the Amazon ES Destination (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup). + // see Amazon S3 Backup for the Amazon ES Destination (https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup). // Default value is FailedDocumentsOnly. S3BackupMode *string `type:"string" enum:"ElasticsearchS3BackupMode"` @@ -2297,8 +2308,8 @@ type ElasticsearchDestinationConfiguration struct { // already has another type, Kinesis Data Firehose returns an error during run // time. // - // TypeName is a required field - TypeName *string `min:"1" type:"string" required:"true"` + // For Elasticsearch 7.x, don't specify a TypeName. + TypeName *string `type:"string"` } // String returns the string representation @@ -2314,8 +2325,8 @@ func (s ElasticsearchDestinationConfiguration) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ElasticsearchDestinationConfiguration) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ElasticsearchDestinationConfiguration"} - if s.DomainARN == nil { - invalidParams.Add(request.NewErrParamRequired("DomainARN")) + if s.ClusterEndpoint != nil && len(*s.ClusterEndpoint) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterEndpoint", 1)) } if s.DomainARN != nil && len(*s.DomainARN) < 1 { invalidParams.Add(request.NewErrParamMinLen("DomainARN", 1)) @@ -2335,12 +2346,6 @@ func (s *ElasticsearchDestinationConfiguration) Validate() error { if s.S3Configuration == nil { invalidParams.Add(request.NewErrParamRequired("S3Configuration")) } - if s.TypeName == nil { - invalidParams.Add(request.NewErrParamRequired("TypeName")) - } - if s.TypeName != nil && len(*s.TypeName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) - } if s.BufferingHints != nil { if err := s.BufferingHints.Validate(); err != nil { invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams)) @@ -2375,6 +2380,12 @@ func (s *ElasticsearchDestinationConfiguration) SetCloudWatchLoggingOptions(v *C return s } +// SetClusterEndpoint sets the ClusterEndpoint field's value. +func (s *ElasticsearchDestinationConfiguration) SetClusterEndpoint(v string) *ElasticsearchDestinationConfiguration { + s.ClusterEndpoint = &v + return s +} + // SetDomainARN sets the DomainARN field's value. func (s *ElasticsearchDestinationConfiguration) SetDomainARN(v string) *ElasticsearchDestinationConfiguration { s.DomainARN = &v @@ -2439,8 +2450,16 @@ type ElasticsearchDestinationDescription struct { // The Amazon CloudWatch logging options. CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + // The endpoint to use when communicating with the cluster. Kinesis Data Firehose + // uses either this ClusterEndpoint or the DomainARN field to send data to Amazon + // ES. + ClusterEndpoint *string `min:"1" type:"string"` + // The ARN of the Amazon ES domain. For more information, see Amazon Resource // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // + // Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data + // to Amazon ES. DomainARN *string `min:"1" type:"string"` // The Elasticsearch index name. @@ -2465,8 +2484,9 @@ type ElasticsearchDestinationDescription struct { // The Amazon S3 destination. S3DestinationDescription *S3DestinationDescription `type:"structure"` - // The Elasticsearch type name. - TypeName *string `min:"1" type:"string"` + // The Elasticsearch type name. This applies to Elasticsearch 6.x and lower + // versions. For Elasticsearch 7.x, there's no value for TypeName. + TypeName *string `type:"string"` } // String returns the string representation @@ -2491,6 +2511,12 @@ func (s *ElasticsearchDestinationDescription) SetCloudWatchLoggingOptions(v *Clo return s } +// SetClusterEndpoint sets the ClusterEndpoint field's value. +func (s *ElasticsearchDestinationDescription) SetClusterEndpoint(v string) *ElasticsearchDestinationDescription { + s.ClusterEndpoint = &v + return s +} + // SetDomainARN sets the DomainARN field's value. func (s *ElasticsearchDestinationDescription) SetDomainARN(v string) *ElasticsearchDestinationDescription { s.DomainARN = &v @@ -2556,10 +2582,16 @@ type ElasticsearchDestinationUpdate struct { // The CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` - // The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, - // DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after + // The endpoint to use when communicating with the cluster. Specify either this + // ClusterEndpoint or the DomainARN field. + ClusterEndpoint *string `min:"1" type:"string"` + + // The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, + // DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after // assuming the IAM role specified in RoleARN. For more information, see Amazon // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // + // Specify either ClusterEndpoint or DomainARN. DomainARN *string `min:"1" type:"string"` // The Elasticsearch index name. @@ -2567,8 +2599,8 @@ type ElasticsearchDestinationUpdate struct { // The Elasticsearch index rotation period. Index rotation appends a timestamp // to IndexName to facilitate the expiration of old data. For more information, - // see Index Rotation for the Amazon ES Destination (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation). - // Default value is OneDay. + // see Index Rotation for the Amazon ES Destination (https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation). + // Default value is OneDay. IndexRotationPeriod *string `type:"string" enum:"ElasticsearchIndexRotationPeriod"` // The data processing configuration. @@ -2581,7 +2613,7 @@ type ElasticsearchDestinationUpdate struct { // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data // Firehose for calling the Amazon ES Configuration API and for indexing documents. // For more information, see Grant Kinesis Data Firehose Access to an Amazon - // S3 Destination (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) + // S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) // and Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). RoleARN *string `min:"1" type:"string"` @@ -2591,7 +2623,12 @@ type ElasticsearchDestinationUpdate struct { // The Elasticsearch type name. For Elasticsearch 6.x, there can be only one // type per index. If you try to specify a new type for an existing index that // already has another type, Kinesis Data Firehose returns an error during runtime. - TypeName *string `min:"1" type:"string"` + // + // If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery + // stream, Kinesis Data Firehose still delivers data to Elasticsearch with the + // old index name and type name. If you want to update your delivery stream + // with a new index name, provide an empty string for TypeName. + TypeName *string `type:"string"` } // String returns the string representation @@ -2607,6 +2644,9 @@ func (s ElasticsearchDestinationUpdate) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ElasticsearchDestinationUpdate) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ElasticsearchDestinationUpdate"} + if s.ClusterEndpoint != nil && len(*s.ClusterEndpoint) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterEndpoint", 1)) + } if s.DomainARN != nil && len(*s.DomainARN) < 1 { invalidParams.Add(request.NewErrParamMinLen("DomainARN", 1)) } @@ -2616,9 +2656,6 @@ func (s *ElasticsearchDestinationUpdate) Validate() error { if s.RoleARN != nil && len(*s.RoleARN) < 1 { invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) } - if s.TypeName != nil && len(*s.TypeName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) - } if s.BufferingHints != nil { if err := s.BufferingHints.Validate(); err != nil { invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams)) @@ -2653,6 +2690,12 @@ func (s *ElasticsearchDestinationUpdate) SetCloudWatchLoggingOptions(v *CloudWat return s } +// SetClusterEndpoint sets the ClusterEndpoint field's value. +func (s *ElasticsearchDestinationUpdate) SetClusterEndpoint(v string) *ElasticsearchDestinationUpdate { + s.ClusterEndpoint = &v + return s +} + // SetDomainARN sets the DomainARN field's value. func (s *ElasticsearchDestinationUpdate) SetDomainARN(v string) *ElasticsearchDestinationUpdate { s.DomainARN = &v @@ -2808,15 +2851,13 @@ type ExtendedS3DestinationConfiguration struct { // A prefix that Kinesis Data Firehose evaluates and adds to failed records // before writing them to S3. This prefix appears immediately following the - // bucket name. + // bucket name. For information about how to specify this prefix, see Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered - // Amazon S3 files. You can specify an extra prefix to be added in front of - // the time format prefix. If the prefix ends with a slash, it appears as a - // folder in the S3 bucket. For more information, see Amazon S3 Object Name - // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name) - // in the Amazon Kinesis Data Firehose Developer Guide. + // Amazon S3 files. You can also specify a custom prefix, as described in Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). Prefix *string `type:"string"` // The data processing configuration. @@ -2999,15 +3040,13 @@ type ExtendedS3DestinationDescription struct { // A prefix that Kinesis Data Firehose evaluates and adds to failed records // before writing them to S3. This prefix appears immediately following the - // bucket name. + // bucket name. For information about how to specify this prefix, see Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered - // Amazon S3 files. You can specify an extra prefix to be added in front of - // the time format prefix. If the prefix ends with a slash, it appears as a - // folder in the S3 bucket. For more information, see Amazon S3 Object Name - // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name) - // in the Amazon Kinesis Data Firehose Developer Guide. + // Amazon S3 files. You can also specify a custom prefix, as described in Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). Prefix *string `type:"string"` // The data processing configuration. @@ -3135,15 +3174,13 @@ type ExtendedS3DestinationUpdate struct { // A prefix that Kinesis Data Firehose evaluates and adds to failed records // before writing them to S3. This prefix appears immediately following the - // bucket name. + // bucket name. For information about how to specify this prefix, see Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered - // Amazon S3 files. You can specify an extra prefix to be added in front of - // the time format prefix. If the prefix ends with a slash, it appears as a - // folder in the S3 bucket. For more information, see Amazon S3 Object Name - // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name) - // in the Amazon Kinesis Data Firehose Developer Guide. + // Amazon S3 files. You can also specify a custom prefix, as described in Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). Prefix *string `type:"string"` // The data processing configuration. @@ -5046,15 +5083,13 @@ type S3DestinationConfiguration struct { // A prefix that Kinesis Data Firehose evaluates and adds to failed records // before writing them to S3. This prefix appears immediately following the - // bucket name. + // bucket name. For information about how to specify this prefix, see Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered - // Amazon S3 files. You can specify an extra prefix to be added in front of - // the time format prefix. If the prefix ends with a slash, it appears as a - // folder in the S3 bucket. For more information, see Amazon S3 Object Name - // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name) - // in the Amazon Kinesis Data Firehose Developer Guide. + // Amazon S3 files. You can also specify a custom prefix, as described in Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). Prefix *string `type:"string"` // The Amazon Resource Name (ARN) of the AWS credentials. For more information, @@ -5186,15 +5221,13 @@ type S3DestinationDescription struct { // A prefix that Kinesis Data Firehose evaluates and adds to failed records // before writing them to S3. This prefix appears immediately following the - // bucket name. + // bucket name. For information about how to specify this prefix, see Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered - // Amazon S3 files. You can specify an extra prefix to be added in front of - // the time format prefix. If the prefix ends with a slash, it appears as a - // folder in the S3 bucket. For more information, see Amazon S3 Object Name - // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name) - // in the Amazon Kinesis Data Firehose Developer Guide. + // Amazon S3 files. You can also specify a custom prefix, as described in Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). Prefix *string `type:"string"` // The Amazon Resource Name (ARN) of the AWS credentials. For more information, @@ -5290,15 +5323,13 @@ type S3DestinationUpdate struct { // A prefix that Kinesis Data Firehose evaluates and adds to failed records // before writing them to S3. This prefix appears immediately following the - // bucket name. + // bucket name. For information about how to specify this prefix, see Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered - // Amazon S3 files. You can specify an extra prefix to be added in front of - // the time format prefix. If the prefix ends with a slash, it appears as a - // folder in the S3 bucket. For more information, see Amazon S3 Object Name - // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name) - // in the Amazon Kinesis Data Firehose Developer Guide. + // Amazon S3 files. You can also specify a custom prefix, as described in Custom + // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). Prefix *string `type:"string"` // The Amazon Resource Name (ARN) of the AWS credentials. For more information, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/errors.go index d70656e3e3e..762ed0eb390 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/errors.go @@ -41,6 +41,6 @@ const ( // The service is unavailable. Back off and retry the operation. If you continue // to see the exception, throughput limits for the delivery stream may have // been exceeded. For more information about limits and how to request an increase, - // see Amazon Kinesis Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). + // see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). ErrCodeServiceUnavailableException = "ServiceUnavailableException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go index bcdf23dffb9..6e234b3c9e6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go @@ -46,11 +46,11 @@ const ( // svc := firehose.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Firehose { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Firehose { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Firehose { svc := &Firehose{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-08-04", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fms/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fms/api.go index 52cacb83550..400e17a7f72 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fms/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fms/api.go @@ -59,7 +59,7 @@ func (c *FMS) AssociateAdminAccountRequest(input *AssociateAdminAccountInput) (r // AssociateAdminAccount API operation for Firewall Management Service. // // Sets the AWS Firewall Manager administrator account. AWS Firewall Manager -// must be associated with the master account your AWS organization or associated +// must be associated with the master account of your AWS organization or associated // with a member account that has the appropriate permissions. If the account // ID that you submit is not an AWS Organizations master account, AWS Firewall // Manager will set the appropriate permissions for the given member account. @@ -339,7 +339,7 @@ func (c *FMS) DisassociateAdminAccountRequest(input *DisassociateAdminAccountInp // // Disassociates the account that has been set as the AWS Firewall Manager administrator // account. To set a different account as the administrator account, you must -// submit an AssociateAdminAccount request . +// submit an AssociateAdminAccount request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -518,8 +518,11 @@ func (c *FMS) GetComplianceDetailRequest(input *GetComplianceDetailInput) (req * // // Returns detailed compliance information about the specified member account. // Details include resources that are in and out of compliance with the specified -// policy. Resources are considered non-compliant if the specified policy has -// not been applied to them. +// policy. Resources are considered noncompliant for AWS WAF and Shield Advanced +// policies if the specified policy has not been applied to them. Resources +// are considered noncompliant for security group policies if they are in scope +// of the policy, they violate one or more of the policy rules, and remediation +// is disabled or not possible. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -602,8 +605,8 @@ func (c *FMS) GetNotificationChannelRequest(input *GetNotificationChannelInput) // GetNotificationChannel API operation for Firewall Management Service. // -// Returns information about the Amazon Simple Notification Service (SNS) topic -// that is used to record AWS Firewall Manager SNS logs. +// Information about the Amazon Simple Notification Service (SNS) topic that +// is used to record AWS Firewall Manager SNS logs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -783,7 +786,8 @@ func (c *FMS) GetProtectionStatusRequest(input *GetProtectionStatusInput) (req * // GetProtectionStatus API operation for Firewall Management Service. // // If you created a Shield Advanced policy, returns policy-level attack summary -// information in the event of a potential DDoS attack. +// information in the event of a potential DDoS attack. Other policy types are +// currently unsupported. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -927,7 +931,7 @@ func (c *FMS) ListComplianceStatusWithContext(ctx aws.Context, input *ListCompli // // Example iterating over at most 3 pages of a ListComplianceStatus operation. // pageNum := 0 // err := client.ListComplianceStatusPages(params, -// func(page *ListComplianceStatusOutput, lastPage bool) bool { +// func(page *fms.ListComplianceStatusOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -959,10 +963,12 @@ func (c *FMS) ListComplianceStatusPagesWithContext(ctx aws.Context, input *ListC }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListComplianceStatusOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListComplianceStatusOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1070,7 +1076,7 @@ func (c *FMS) ListMemberAccountsWithContext(ctx aws.Context, input *ListMemberAc // // Example iterating over at most 3 pages of a ListMemberAccounts operation. // pageNum := 0 // err := client.ListMemberAccountsPages(params, -// func(page *ListMemberAccountsOutput, lastPage bool) bool { +// func(page *fms.ListMemberAccountsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1102,10 +1108,12 @@ func (c *FMS) ListMemberAccountsPagesWithContext(ctx aws.Context, input *ListMem }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMemberAccountsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMemberAccountsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1220,7 +1228,7 @@ func (c *FMS) ListPoliciesWithContext(ctx aws.Context, input *ListPoliciesInput, // // Example iterating over at most 3 pages of a ListPolicies operation. // pageNum := 0 // err := client.ListPoliciesPages(params, -// func(page *ListPoliciesOutput, lastPage bool) bool { +// func(page *fms.ListPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1252,10 +1260,12 @@ func (c *FMS) ListPoliciesPagesWithContext(ctx aws.Context, input *ListPoliciesI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1395,16 +1405,23 @@ func (c *FMS) PutPolicyRequest(input *PutPolicyInput) (req *request.Request, out // // Creates an AWS Firewall Manager policy. // -// Firewall Manager provides two types of policies: A Shield Advanced policy, -// which applies Shield Advanced protection to specified accounts and resources, -// or a WAF policy, which contains a rule group and defines which resources -// are to be protected by that rule group. A policy is specific to either WAF -// or Shield Advanced. If you want to enforce both WAF rules and Shield Advanced -// protection across accounts, you can create multiple policies. You can create -// one or more policies for WAF rules, and one or more policies for Shield Advanced. +// Firewall Manager provides the following types of policies: +// +// * A Shield Advanced policy, which applies Shield Advanced protection to +// specified accounts and resources +// +// * An AWS WAF policy, which contains a rule group and defines which resources +// are to be protected by that rule group +// +// * A security group policy, which manages VPC security groups across your +// AWS organization. +// +// Each policy is specific to one of the three types. If you want to enforce +// more than one policy type across accounts, you can create multiple policies. +// You can create multiple policies for each type. // // You must be subscribed to Shield Advanced to create a Shield Advanced policy. -// For more information on subscribing to Shield Advanced, see CreateSubscription +// For more information about subscribing to Shield Advanced, see CreateSubscription // (https://docs.aws.amazon.com/waf/latest/DDOSAPIReference/API_CreateSubscription.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1526,8 +1543,8 @@ type ComplianceViolator struct { // The resource ID. ResourceId *string `min:"1" type:"string"` - // The resource type. This is in the format shown in AWS Resource Types Reference - // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html). + // The resource type. This is in the format shown in the AWS Resource Types + // Reference (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html). // For example: AWS::ElasticLoadBalancingV2::LoadBalancer or AWS::CloudFront::Distribution. ResourceType *string `min:"1" type:"string"` @@ -1594,21 +1611,31 @@ func (s DeleteNotificationChannelOutput) GoString() string { type DeletePolicyInput struct { _ struct{} `type:"structure"` - // If True, the request will also perform a clean-up process that will: + // If True, the request performs cleanup according to the policy type. + // + // For AWS WAF and Shield Advanced policies, the cleanup does the following: + // + // * Deletes rule groups created by AWS Firewall Manager // - // * Delete rule groups created by AWS Firewall Manager + // * Removes web ACLs from in-scope resources // - // * Remove web ACLs from in-scope resources + // * Deletes web ACLs that contain no rules or rule groups // - // * Delete web ACLs that contain no rules or rule groups + // For security group policies, the cleanup does the following for each security + // group in the policy: // - // After the cleanup, in-scope resources will no longer be protected by web - // ACLs in this policy. Protection of out-of-scope resources will remain unchanged. - // Scope is determined by tags and accounts associated with the policy. When - // creating the policy, if you specified that only resources in specific accounts - // or with specific tags be protected by the policy, those resources are in-scope. - // All others are out of scope. If you did not specify tags or accounts, all - // resources are in-scope. + // * Disassociates the security group from in-scope resources + // + // * Deletes the security group if it was created through Firewall Manager + // and if it's no longer associated with any resources through another policy + // + // After the cleanup, in-scope resources are no longer protected by web ACLs + // in this policy. Protection of out-of-scope resources remains unchanged. Scope + // is determined by tags that you create and accounts that you associate with + // the policy. When creating the policy, if you specify that only resources + // in specific accounts or with specific tags are in scope of the policy, those + // accounts and resources are handled by the policy. All others are out of scope. + // If you don't specify tags or accounts, all resources are in scope. DeleteAllPolicyResources *bool `type:"boolean"` // The ID of the policy that you want to delete. PolicyId is returned by PutPolicy @@ -1699,20 +1726,23 @@ func (s DisassociateAdminAccountOutput) GoString() string { } // Describes the compliance status for the account. An account is considered -// non-compliant if it includes resources that are not protected by the specified -// policy. +// noncompliant if it includes resources that are not protected by the specified +// policy or that don't comply with the policy. type EvaluationResult struct { _ struct{} `type:"structure"` // Describes an AWS account's compliance with the AWS Firewall Manager policy. ComplianceStatus *string `type:"string" enum:"PolicyComplianceStatusType"` - // Indicates that over 100 resources are non-compliant with the AWS Firewall + // Indicates that over 100 resources are noncompliant with the AWS Firewall // Manager policy. EvaluationLimitExceeded *bool `type:"boolean"` - // Number of resources that are non-compliant with the specified policy. A resource - // is considered non-compliant if it is not associated with the specified policy. + // The number of resources that are noncompliant with the specified policy. + // For AWS WAF and Shield Advanced policies, a resource is considered noncompliant + // if it is not associated with the policy. For security group policies, a resource + // is considered noncompliant if it doesn't comply with the rules of the policy + // and remediation is disabled or not possible. ViolatorCount *int64 `type:"long"` } @@ -1997,7 +2027,7 @@ type GetProtectionStatusInput struct { _ struct{} `type:"structure"` // The end of the time period to query for the attacks. This is a timestamp - // type. The sample request above indicates a number type because the default + // type. The request syntax listing indicates a number type because the default // used by AWS Firewall Manager is Unix time in seconds. However, any valid // timestamp format is allowed. EndTime *time.Time `type:"timestamp"` @@ -2014,8 +2044,8 @@ type GetProtectionStatusInput struct { // If you specify a value for MaxResults and you have more objects than the // number that you specify for MaxResults, AWS Firewall Manager returns a NextToken - // value in the response that allows you to list another group of objects. For - // the second and subsequent GetProtectionStatus requests, specify the value + // value in the response, which you can use to retrieve another group of objects. + // For the second and subsequent GetProtectionStatus requests, specify the value // of NextToken from the previous response to get information about another // batch of objects. NextToken *string `min:"1" type:"string"` @@ -2026,7 +2056,7 @@ type GetProtectionStatusInput struct { PolicyId *string `min:"36" type:"string" required:"true"` // The start of the time period to query for the attacks. This is a timestamp - // type. The sample request above indicates a number type because the default + // type. The request syntax listing indicates a number type because the default // used by AWS Firewall Manager is Unix time in seconds. However, any valid // timestamp format is allowed. StartTime *time.Time `type:"timestamp"` @@ -2121,8 +2151,7 @@ type GetProtectionStatusOutput struct { // // * End time of the attack (ongoing attacks will not have an end time) // - // The details are in JSON format. An example is shown in the Examples section - // below. + // The details are in JSON format. Data *string `type:"string"` // If you have more objects than the number that you specified for MaxResults @@ -2482,9 +2511,9 @@ type Policy struct { ExcludeMap map[string][]*string `type:"map"` // If set to True, resources with the tags that are specified in the ResourceTag - // array are not protected by the policy. If set to False, and the ResourceTag - // array is not null, only resources with the specified tags are associated - // with the policy. + // array are not in scope of the policy. If set to False, and the ResourceTag + // array is not null, only resources with the specified tags are in scope of + // the policy. // // ExcludeResourceTags is a required field ExcludeResourceTags *bool `type:"boolean" required:"true"` @@ -2520,9 +2549,14 @@ type Policy struct { // An array of ResourceTag objects. ResourceTags []*ResourceTag `type:"list"` - // The type of resource to protect with the policy. This is in the format shown - // in AWS Resource Types Reference (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html). - // For example: AWS::ElasticLoadBalancingV2::LoadBalancer or AWS::CloudFront::Distribution. + // The type of resource protected by or in scope of the policy. This is in the + // format shown in the AWS Resource Types Reference (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html). + // For AWS WAF and Shield Advanced, examples include AWS::ElasticLoadBalancingV2::LoadBalancer + // and AWS::CloudFront::Distribution. For a security group common policy, valid + // values are AWS::EC2::NetworkInterface and AWS::EC2::Instance. For a security + // group content audit policy, valid values are AWS::EC2::SecurityGroup, AWS::EC2::NetworkInterface, + // and AWS::EC2::Instance. For a security group usage audit policy, the value + // is AWS::EC2::SecurityGroup. // // ResourceType is a required field ResourceType *string `min:"1" type:"string" required:"true"` @@ -2664,24 +2698,24 @@ func (s *Policy) SetSecurityServicePolicyData(v *SecurityServicePolicyData) *Pol return s } -// Describes the non-compliant resources in a member account for a specific -// AWS Firewall Manager policy. A maximum of 100 entries are displayed. If more -// than 100 resources are non-compliant, EvaluationLimitExceeded is set to True. +// Describes the noncompliant resources in a member account for a specific AWS +// Firewall Manager policy. A maximum of 100 entries are displayed. If more +// than 100 resources are noncompliant, EvaluationLimitExceeded is set to True. type PolicyComplianceDetail struct { _ struct{} `type:"structure"` - // Indicates if over 100 resources are non-compliant with the AWS Firewall Manager + // Indicates if over 100 resources are noncompliant with the AWS Firewall Manager // policy. EvaluationLimitExceeded *bool `type:"boolean"` - // A time stamp that indicates when the returned information should be considered - // out-of-date. + // A timestamp that indicates when the returned information should be considered + // out of date. ExpiredAt *time.Time `type:"timestamp"` // Details about problems with dependent services, such as AWS WAF or AWS Config, - // that are causing a resource to be non-compliant. The details include the - // name of the dependent service and the error message received that indicates - // the problem with the service. + // that are causing a resource to be noncompliant. The details include the name + // of the dependent service and the error message received that indicates the + // problem with the service. IssueInfoMap map[string]*string `type:"map"` // The AWS account ID. @@ -2693,7 +2727,8 @@ type PolicyComplianceDetail struct { // The AWS account that created the AWS Firewall Manager policy. PolicyOwner *string `min:"1" type:"string"` - // An array of resources that are not protected by the policy. + // An array of resources that aren't protected by the AWS WAF or Shield Advanced + // policy or that aren't in compliance with the security group policy. Violators []*ComplianceViolator `type:"list"` } @@ -2750,8 +2785,9 @@ func (s *PolicyComplianceDetail) SetViolators(v []*ComplianceViolator) *PolicyCo } // Indicates whether the account is compliant with the specified policy. An -// account is considered non-compliant if it includes resources that are not -// protected by the policy. +// account is considered noncompliant if it includes resources that are not +// protected by the policy, for AWS WAF and Shield Advanced policies, or that +// are noncompliant with the policy, for security group policies. type PolicyComplianceStatus struct { _ struct{} `type:"structure"` @@ -2759,12 +2795,12 @@ type PolicyComplianceStatus struct { EvaluationResults []*EvaluationResult `type:"list"` // Details about problems with dependent services, such as AWS WAF or AWS Config, - // that are causing a resource to be non-compliant. The details include the - // name of the dependent service and the error message received that indicates - // the problem with the service. + // that are causing a resource to be noncompliant. The details include the name + // of the dependent service and the error message received that indicates the + // problem with the service. IssueInfoMap map[string]*string `type:"map"` - // Time stamp of the last update to the EvaluationResult objects. + // Timestamp of the last update to the EvaluationResult objects. LastUpdated *time.Time `type:"timestamp"` // The member account ID. @@ -2848,14 +2884,19 @@ type PolicySummary struct { // Indicates if the policy should be automatically applied to new resources. RemediationEnabled *bool `type:"boolean"` - // The type of resource to protect with the policy. This is in the format shown - // in AWS Resource Types Reference (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html). - // For example: AWS::ElasticLoadBalancingV2::LoadBalancer or AWS::CloudFront::Distribution. + // The type of resource protected by or in scope of the policy. This is in the + // format shown in the AWS Resource Types Reference (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html). + // For AWS WAF and Shield Advanced, examples include AWS::ElasticLoadBalancingV2::LoadBalancer + // and AWS::CloudFront::Distribution. For a security group common policy, valid + // values are AWS::EC2::NetworkInterface and AWS::EC2::Instance. For a security + // group content audit policy, valid values are AWS::EC2::SecurityGroup, AWS::EC2::NetworkInterface, + // and AWS::EC2::Instance. For a security group usage audit policy, the value + // is AWS::EC2::SecurityGroup. ResourceType *string `min:"1" type:"string"` // The service that the policy is using to protect the resources. This specifies - // the type of policy that is created, either a WAF policy or Shield Advanced - // policy. + // the type of policy that is created, either an AWS WAF policy, a Shield Advanced + // policy, or a security group policy. SecurityServiceType *string `type:"string" enum:"SecurityServiceType"` } @@ -3055,13 +3096,13 @@ func (s *PutPolicyOutput) SetPolicyArn(v string) *PutPolicyOutput { } // The resource tags that AWS Firewall Manager uses to determine if a particular -// resource should be included or excluded from protection by the AWS Firewall -// Manager policy. Tags enable you to categorize your AWS resources in different -// ways, for example, by purpose, owner, or environment. Each tag consists of -// a key and an optional value, both of which you define. Tags are combined -// with an "OR." That is, if you add more than one tag, if any of the tags matches, -// the resource is considered a match for the include or exclude. Working with -// Tag Editor (https://docs.aws.amazon.com/awsconsolehelpdocs/latest/gsg/tag-editor.html). +// resource should be included or excluded from the AWS Firewall Manager policy. +// Tags enable you to categorize your AWS resources in different ways, for example, +// by purpose, owner, or environment. Each tag consists of a key and an optional +// value. Firewall Manager combines the tags with "AND" so that, if you add +// more than one tag to a policy scope, a resource must have all the specified +// tags to be included or excluded. For more information, see Working with Tag +// Editor (https://docs.aws.amazon.com/awsconsolehelpdocs/latest/gsg/tag-editor.html). type ResourceTag struct { _ struct{} `type:"structure"` @@ -3116,19 +3157,34 @@ func (s *ResourceTag) SetValue(v string) *ResourceTag { type SecurityServicePolicyData struct { _ struct{} `type:"structure"` - // Details about the service. This contains WAF data in JSON format, as shown - // in the following example: + // Details about the service that are specific to the service type, in JSON + // format. For service type SHIELD_ADVANCED, this is an empty string. + // + // * Example: WAF ManagedServiceData": "{\"type\": \"WAF\", \"ruleGroups\": + // [{\"id\": \"12345678-1bcd-9012-efga-0987654321ab\", \"overrideAction\" + // : {\"type\": \"COUNT\"}}], \"defaultAction\": {\"type\": \"BLOCK\"}} // - // ManagedServiceData": "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\": \"12345678-1bcd-9012-efga-0987654321ab\", - // \"overrideAction\" : {\"type\": \"COUNT\"}}], \"defaultAction\": {\"type\": - // \"BLOCK\"}} + // * Example: SECURITY_GROUPS_COMMON "SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_COMMON","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_COMMON\",\"revertManualSecurityGroupChanges\":false,\"exclusiveResourceSecurityGroupManagement\":false,\"securityGroups\":[{\"id\":\" + // sg-000e55995d61a06bd\"}]}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} // - // If this is a Shield Advanced policy, this string will be empty. + // * Example: SECURITY_GROUPS_CONTENT_AUDIT "SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_CONTENT_AUDIT","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_CONTENT_AUDIT\",\"securityGroups\":[{\"id\":\" + // sg-000e55995d61a06bd \"}],\"securityGroupAction\":{\"type\":\"ALLOW\"}}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} + // The security group action for content audit can be ALLOW or DENY. For + // ALLOW, all in-scope security group rules must be within the allowed range + // of the policy's security group rules. For DENY, all in-scope security + // group rules must not contain a value or a range that matches a rule value + // or range in the policy security group. + // + // * Example: SECURITY_GROUPS_USAGE_AUDIT "SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_USAGE_AUDIT","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_USAGE_AUDIT\",\"deleteUnusedSecurityGroups\":true,\"coalesceRedundantSecurityGroups\":true}"},"RemediationEnabled":false,"Resou + // rceType":"AWS::EC2::SecurityGroup"} ManagedServiceData *string `min:"1" type:"string"` // The service that the policy is using to protect the resources. This specifies - // the type of policy that is created, either a WAF policy or Shield Advanced - // policy. + // the type of policy that is created, either an AWS WAF policy, a Shield Advanced + // policy, or a security group policy. For security group policies, Firewall + // Manager supports one security group for each common policy and for each content + // audit policy. This is an adjustable limit that you can increase by contacting + // AWS Support. // // Type is a required field Type *string `type:"string" required:"true" enum:"SecurityServiceType"` @@ -3203,6 +3259,9 @@ const ( // DependentServiceNameAwsshieldAdvanced is a DependentServiceName enum value DependentServiceNameAwsshieldAdvanced = "AWSSHIELD_ADVANCED" + + // DependentServiceNameAwsvpc is a DependentServiceName enum value + DependentServiceNameAwsvpc = "AWSVPC" ) const ( @@ -3219,6 +3278,15 @@ const ( // SecurityServiceTypeShieldAdvanced is a SecurityServiceType enum value SecurityServiceTypeShieldAdvanced = "SHIELD_ADVANCED" + + // SecurityServiceTypeSecurityGroupsCommon is a SecurityServiceType enum value + SecurityServiceTypeSecurityGroupsCommon = "SECURITY_GROUPS_COMMON" + + // SecurityServiceTypeSecurityGroupsContentAudit is a SecurityServiceType enum value + SecurityServiceTypeSecurityGroupsContentAudit = "SECURITY_GROUPS_CONTENT_AUDIT" + + // SecurityServiceTypeSecurityGroupsUsageAudit is a SecurityServiceType enum value + SecurityServiceTypeSecurityGroupsUsageAudit = "SECURITY_GROUPS_USAGE_AUDIT" ) const ( @@ -3233,4 +3301,19 @@ const ( // ViolationReasonResourceMissingShieldProtection is a ViolationReason enum value ViolationReasonResourceMissingShieldProtection = "RESOURCE_MISSING_SHIELD_PROTECTION" + + // ViolationReasonResourceMissingWebAclOrShieldProtection is a ViolationReason enum value + ViolationReasonResourceMissingWebAclOrShieldProtection = "RESOURCE_MISSING_WEB_ACL_OR_SHIELD_PROTECTION" + + // ViolationReasonResourceMissingSecurityGroup is a ViolationReason enum value + ViolationReasonResourceMissingSecurityGroup = "RESOURCE_MISSING_SECURITY_GROUP" + + // ViolationReasonResourceViolatesAuditSecurityGroup is a ViolationReason enum value + ViolationReasonResourceViolatesAuditSecurityGroup = "RESOURCE_VIOLATES_AUDIT_SECURITY_GROUP" + + // ViolationReasonSecurityGroupUnused is a ViolationReason enum value + ViolationReasonSecurityGroupUnused = "SECURITY_GROUP_UNUSED" + + // ViolationReasonSecurityGroupRedundant is a ViolationReason enum value + ViolationReasonSecurityGroupRedundant = "SECURITY_GROUP_REDUNDANT" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fms/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fms/service.go index 6103e57fd82..e39fa854236 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fms/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fms/service.go @@ -46,11 +46,11 @@ const ( // svc := fms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *FMS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *FMS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *FMS { svc := &FMS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-01", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/api.go new file mode 100644 index 00000000000..1321d9b9ab3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/api.go @@ -0,0 +1,7813 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package forecastservice + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCreateDataset = "CreateDataset" + +// CreateDatasetRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDataset for more information on using the CreateDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatasetRequest method. +// req, resp := client.CreateDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDataset +func (c *ForecastService) CreateDatasetRequest(input *CreateDatasetInput) (req *request.Request, output *CreateDatasetOutput) { + op := &request.Operation{ + Name: opCreateDataset, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDatasetInput{} + } + + output = &CreateDatasetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDataset API operation for Amazon Forecast Service. +// +// Creates an Amazon Forecast dataset. The information about the dataset that +// you provide helps Forecast understand how to consume the data for model training. +// This includes the following: +// +// * DataFrequency - How frequently your historical time-series data is collected. +// Amazon Forecast uses this information when training the model and generating +// a forecast. +// +// * Domain and DatasetType - Each dataset has an associated dataset domain +// and a type within the domain. Amazon Forecast provides a list of predefined +// domains and types within each domain. For each unique dataset domain and +// type within the domain, Amazon Forecast requires your data to include +// a minimum set of predefined fields. +// +// * Schema - A schema specifies the fields of the dataset, including the +// field name and data type. +// +// After creating a dataset, you import your training data into the dataset +// and add the dataset to a dataset group. You then use the dataset group to +// create a predictor. For more information, see howitworks-datasets-groups. +// +// To get a list of all your datasets, use the ListDatasets operation. +// +// The Status of a dataset must be ACTIVE before you can import training data. +// Use the DescribeDataset operation to get the status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation CreateDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// There is already a resource with that Amazon Resource Name (ARN). Try again +// with a different ARN. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDataset +func (c *ForecastService) CreateDataset(input *CreateDatasetInput) (*CreateDatasetOutput, error) { + req, out := c.CreateDatasetRequest(input) + return out, req.Send() +} + +// CreateDatasetWithContext is the same as CreateDataset with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) CreateDatasetWithContext(ctx aws.Context, input *CreateDatasetInput, opts ...request.Option) (*CreateDatasetOutput, error) { + req, out := c.CreateDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDatasetGroup = "CreateDatasetGroup" + +// CreateDatasetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDatasetGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDatasetGroup for more information on using the CreateDatasetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatasetGroupRequest method. +// req, resp := client.CreateDatasetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetGroup +func (c *ForecastService) CreateDatasetGroupRequest(input *CreateDatasetGroupInput) (req *request.Request, output *CreateDatasetGroupOutput) { + op := &request.Operation{ + Name: opCreateDatasetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDatasetGroupInput{} + } + + output = &CreateDatasetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDatasetGroup API operation for Amazon Forecast Service. +// +// Creates an Amazon Forecast dataset group, which holds a collection of related +// datasets. You can add datasets to the dataset group when you create the dataset +// group, or you can add datasets later with the UpdateDatasetGroup operation. +// +// After creating a dataset group and adding datasets, you use the dataset group +// when you create a predictor. For more information, see howitworks-datasets-groups. +// +// To get a list of all your datasets groups, use the ListDatasetGroups operation. +// +// The Status of a dataset group must be ACTIVE before you can create a predictor +// using the dataset group. Use the DescribeDatasetGroup operation to get the +// status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation CreateDatasetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// There is already a resource with that Amazon Resource Name (ARN). Try again +// with a different ARN. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetGroup +func (c *ForecastService) CreateDatasetGroup(input *CreateDatasetGroupInput) (*CreateDatasetGroupOutput, error) { + req, out := c.CreateDatasetGroupRequest(input) + return out, req.Send() +} + +// CreateDatasetGroupWithContext is the same as CreateDatasetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDatasetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) CreateDatasetGroupWithContext(ctx aws.Context, input *CreateDatasetGroupInput, opts ...request.Option) (*CreateDatasetGroupOutput, error) { + req, out := c.CreateDatasetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDatasetImportJob = "CreateDatasetImportJob" + +// CreateDatasetImportJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateDatasetImportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDatasetImportJob for more information on using the CreateDatasetImportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatasetImportJobRequest method. +// req, resp := client.CreateDatasetImportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetImportJob +func (c *ForecastService) CreateDatasetImportJobRequest(input *CreateDatasetImportJobInput) (req *request.Request, output *CreateDatasetImportJobOutput) { + op := &request.Operation{ + Name: opCreateDatasetImportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDatasetImportJobInput{} + } + + output = &CreateDatasetImportJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDatasetImportJob API operation for Amazon Forecast Service. +// +// Imports your training data to an Amazon Forecast dataset. You provide the +// location of your training data in an Amazon Simple Storage Service (Amazon +// S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want +// to import the data to. +// +// You must specify a DataSource object that includes an AWS Identity and Access +// Management (IAM) role that Amazon Forecast can assume to access the data. +// For more information, see aws-forecast-iam-roles. +// +// Two properties of the training data are optionally specified: +// +// * The delimiter that separates the data fields. The default delimiter +// is a comma (,), which is the only supported delimiter in this release. +// +// * The format of timestamps. If the format is not specified, Amazon Forecast +// expects the format to be "yyyy-MM-dd HH:mm:ss". +// +// When Amazon Forecast uploads your training data, it verifies that the data +// was collected at the DataFrequency specified when the target dataset was +// created. For more information, see CreateDataset and howitworks-datasets-groups. +// Amazon Forecast also verifies the delimiter and timestamp format. +// +// You can use the ListDatasetImportJobs operation to get a list of all your +// dataset import jobs, filtered by specified criteria. +// +// To get a list of all your dataset import jobs, filtered by the specified +// criteria, use the ListDatasetGroups operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation CreateDatasetImportJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// There is already a resource with that Amazon Resource Name (ARN). Try again +// with a different ARN. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateDatasetImportJob +func (c *ForecastService) CreateDatasetImportJob(input *CreateDatasetImportJobInput) (*CreateDatasetImportJobOutput, error) { + req, out := c.CreateDatasetImportJobRequest(input) + return out, req.Send() +} + +// CreateDatasetImportJobWithContext is the same as CreateDatasetImportJob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDatasetImportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) CreateDatasetImportJobWithContext(ctx aws.Context, input *CreateDatasetImportJobInput, opts ...request.Option) (*CreateDatasetImportJobOutput, error) { + req, out := c.CreateDatasetImportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateForecast = "CreateForecast" + +// CreateForecastRequest generates a "aws/request.Request" representing the +// client's request for the CreateForecast operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateForecast for more information on using the CreateForecast +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateForecastRequest method. +// req, resp := client.CreateForecastRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecast +func (c *ForecastService) CreateForecastRequest(input *CreateForecastInput) (req *request.Request, output *CreateForecastOutput) { + op := &request.Operation{ + Name: opCreateForecast, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateForecastInput{} + } + + output = &CreateForecastOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateForecast API operation for Amazon Forecast Service. +// +// Creates a forecast for each item in the TARGET_TIME_SERIES dataset that was +// used to train the predictor. This is known as inference. To retrieve the +// forecast for a single item at low latency, use the operation. To export the +// complete forecast into your Amazon Simple Storage Service (Amazon S3), use +// the CreateForecastExportJob operation. +// +// The range of the forecast is determined by the ForecastHorizon, specified +// in the CreatePredictor request, multiplied by the DataFrequency, specified +// in the CreateDataset request. When you query a forecast, you can request +// a specific date range within the complete forecast. +// +// To get a list of all your forecasts, use the ListForecasts operation. +// +// The forecasts generated by Amazon Forecast are in the same timezone as the +// dataset that was used to create the predictor. +// +// For more information, see howitworks-forecast. +// +// The Status of the forecast must be ACTIVE before you can query or export +// the forecast. Use the DescribeForecast operation to get the status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation CreateForecast for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// There is already a resource with that Amazon Resource Name (ARN). Try again +// with a different ARN. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecast +func (c *ForecastService) CreateForecast(input *CreateForecastInput) (*CreateForecastOutput, error) { + req, out := c.CreateForecastRequest(input) + return out, req.Send() +} + +// CreateForecastWithContext is the same as CreateForecast with the addition of +// the ability to pass a context and additional request options. +// +// See CreateForecast for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) CreateForecastWithContext(ctx aws.Context, input *CreateForecastInput, opts ...request.Option) (*CreateForecastOutput, error) { + req, out := c.CreateForecastRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateForecastExportJob = "CreateForecastExportJob" + +// CreateForecastExportJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateForecastExportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateForecastExportJob for more information on using the CreateForecastExportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateForecastExportJobRequest method. +// req, resp := client.CreateForecastExportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecastExportJob +func (c *ForecastService) CreateForecastExportJobRequest(input *CreateForecastExportJobInput) (req *request.Request, output *CreateForecastExportJobOutput) { + op := &request.Operation{ + Name: opCreateForecastExportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateForecastExportJobInput{} + } + + output = &CreateForecastExportJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateForecastExportJob API operation for Amazon Forecast Service. +// +// Exports a forecast created by the CreateForecast operation to your Amazon +// Simple Storage Service (Amazon S3) bucket. +// +// You must specify a DataDestination object that includes an AWS Identity and +// Access Management (IAM) role that Amazon Forecast can assume to access the +// Amazon S3 bucket. For more information, see aws-forecast-iam-roles. +// +// For more information, see howitworks-forecast. +// +// To get a list of all your forecast export jobs, use the ListForecastExportJobs +// operation. +// +// The Status of the forecast export job must be ACTIVE before you can access +// the forecast in your Amazon S3 bucket. Use the DescribeForecastExportJob +// operation to get the status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation CreateForecastExportJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// There is already a resource with that Amazon Resource Name (ARN). Try again +// with a different ARN. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreateForecastExportJob +func (c *ForecastService) CreateForecastExportJob(input *CreateForecastExportJobInput) (*CreateForecastExportJobOutput, error) { + req, out := c.CreateForecastExportJobRequest(input) + return out, req.Send() +} + +// CreateForecastExportJobWithContext is the same as CreateForecastExportJob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateForecastExportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) CreateForecastExportJobWithContext(ctx aws.Context, input *CreateForecastExportJobInput, opts ...request.Option) (*CreateForecastExportJobOutput, error) { + req, out := c.CreateForecastExportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePredictor = "CreatePredictor" + +// CreatePredictorRequest generates a "aws/request.Request" representing the +// client's request for the CreatePredictor operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePredictor for more information on using the CreatePredictor +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePredictorRequest method. +// req, resp := client.CreatePredictorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreatePredictor +func (c *ForecastService) CreatePredictorRequest(input *CreatePredictorInput) (req *request.Request, output *CreatePredictorOutput) { + op := &request.Operation{ + Name: opCreatePredictor, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePredictorInput{} + } + + output = &CreatePredictorOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreatePredictor API operation for Amazon Forecast Service. +// +// Creates an Amazon Forecast predictor. +// +// In the request, you provide a dataset group and either specify an algorithm +// or let Amazon Forecast choose the algorithm for you using AutoML. If you +// specify an algorithm, you also can override algorithm-specific hyperparameters. +// +// Amazon Forecast uses the chosen algorithm to train a model using the latest +// version of the datasets in the specified dataset group. The result is called +// a predictor. You then generate a forecast using the CreateForecast operation. +// +// After training a model, the CreatePredictor operation also evaluates it. +// To see the evaluation metrics, use the GetAccuracyMetrics operation. Always +// review the evaluation metrics before deciding to use the predictor to generate +// a forecast. +// +// Optionally, you can specify a featurization configuration to fill and aggragate +// the data fields in the TARGET_TIME_SERIES dataset to improve model training. +// For more information, see FeaturizationConfig. +// +// AutoML +// +// If you set PerformAutoML to true, Amazon Forecast evaluates each algorithm +// and chooses the one that minimizes the objective function. The objective +// function is defined as the mean of the weighted p10, p50, and p90 quantile +// losses. For more information, see EvaluationResult. +// +// When AutoML is enabled, the following properties are disallowed: +// +// * AlgorithmArn +// +// * HPOConfig +// +// * PerformHPO +// +// * TrainingParameters +// +// To get a list of all your predictors, use the ListPredictors operation. +// +// The Status of the predictor must be ACTIVE, signifying that training has +// completed, before you can use the predictor to create a forecast. Use the +// DescribePredictor operation to get the status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation CreatePredictor for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// There is already a resource with that Amazon Resource Name (ARN). Try again +// with a different ARN. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/CreatePredictor +func (c *ForecastService) CreatePredictor(input *CreatePredictorInput) (*CreatePredictorOutput, error) { + req, out := c.CreatePredictorRequest(input) + return out, req.Send() +} + +// CreatePredictorWithContext is the same as CreatePredictor with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePredictor for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) CreatePredictorWithContext(ctx aws.Context, input *CreatePredictorInput, opts ...request.Option) (*CreatePredictorOutput, error) { + req, out := c.CreatePredictorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDataset = "DeleteDataset" + +// DeleteDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDataset for more information on using the DeleteDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDatasetRequest method. +// req, resp := client.DeleteDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDataset +func (c *ForecastService) DeleteDatasetRequest(input *DeleteDatasetInput) (req *request.Request, output *DeleteDatasetOutput) { + op := &request.Operation{ + Name: opDeleteDataset, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDatasetInput{} + } + + output = &DeleteDatasetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDataset API operation for Amazon Forecast Service. +// +// Deletes an Amazon Forecast dataset created using the CreateDataset operation. +// To be deleted, the dataset must have a status of ACTIVE or CREATE_FAILED. +// Use the DescribeDataset operation to get the status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DeleteDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDataset +func (c *ForecastService) DeleteDataset(input *DeleteDatasetInput) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + return out, req.Send() +} + +// DeleteDatasetWithContext is the same as DeleteDataset with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DeleteDatasetWithContext(ctx aws.Context, input *DeleteDatasetInput, opts ...request.Option) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDatasetGroup = "DeleteDatasetGroup" + +// DeleteDatasetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDatasetGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDatasetGroup for more information on using the DeleteDatasetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDatasetGroupRequest method. +// req, resp := client.DeleteDatasetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetGroup +func (c *ForecastService) DeleteDatasetGroupRequest(input *DeleteDatasetGroupInput) (req *request.Request, output *DeleteDatasetGroupOutput) { + op := &request.Operation{ + Name: opDeleteDatasetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDatasetGroupInput{} + } + + output = &DeleteDatasetGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDatasetGroup API operation for Amazon Forecast Service. +// +// Deletes a dataset group created using the CreateDatasetGroup operation. To +// be deleted, the dataset group must have a status of ACTIVE, CREATE_FAILED, +// or UPDATE_FAILED. Use the DescribeDatasetGroup operation to get the status. +// +// The operation deletes only the dataset group, not the datasets in the group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DeleteDatasetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetGroup +func (c *ForecastService) DeleteDatasetGroup(input *DeleteDatasetGroupInput) (*DeleteDatasetGroupOutput, error) { + req, out := c.DeleteDatasetGroupRequest(input) + return out, req.Send() +} + +// DeleteDatasetGroupWithContext is the same as DeleteDatasetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDatasetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DeleteDatasetGroupWithContext(ctx aws.Context, input *DeleteDatasetGroupInput, opts ...request.Option) (*DeleteDatasetGroupOutput, error) { + req, out := c.DeleteDatasetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDatasetImportJob = "DeleteDatasetImportJob" + +// DeleteDatasetImportJobRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDatasetImportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDatasetImportJob for more information on using the DeleteDatasetImportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDatasetImportJobRequest method. +// req, resp := client.DeleteDatasetImportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetImportJob +func (c *ForecastService) DeleteDatasetImportJobRequest(input *DeleteDatasetImportJobInput) (req *request.Request, output *DeleteDatasetImportJobOutput) { + op := &request.Operation{ + Name: opDeleteDatasetImportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDatasetImportJobInput{} + } + + output = &DeleteDatasetImportJobOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDatasetImportJob API operation for Amazon Forecast Service. +// +// Deletes a dataset import job created using the CreateDatasetImportJob operation. +// To be deleted, the import job must have a status of ACTIVE or CREATE_FAILED. +// Use the DescribeDatasetImportJob operation to get the status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DeleteDatasetImportJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteDatasetImportJob +func (c *ForecastService) DeleteDatasetImportJob(input *DeleteDatasetImportJobInput) (*DeleteDatasetImportJobOutput, error) { + req, out := c.DeleteDatasetImportJobRequest(input) + return out, req.Send() +} + +// DeleteDatasetImportJobWithContext is the same as DeleteDatasetImportJob with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDatasetImportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DeleteDatasetImportJobWithContext(ctx aws.Context, input *DeleteDatasetImportJobInput, opts ...request.Option) (*DeleteDatasetImportJobOutput, error) { + req, out := c.DeleteDatasetImportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteForecast = "DeleteForecast" + +// DeleteForecastRequest generates a "aws/request.Request" representing the +// client's request for the DeleteForecast operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteForecast for more information on using the DeleteForecast +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteForecastRequest method. +// req, resp := client.DeleteForecastRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecast +func (c *ForecastService) DeleteForecastRequest(input *DeleteForecastInput) (req *request.Request, output *DeleteForecastOutput) { + op := &request.Operation{ + Name: opDeleteForecast, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteForecastInput{} + } + + output = &DeleteForecastOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteForecast API operation for Amazon Forecast Service. +// +// Deletes a forecast created using the CreateForecast operation. To be deleted, +// the forecast must have a status of ACTIVE or CREATE_FAILED. Use the DescribeForecast +// operation to get the status. +// +// You can't delete a forecast while it is being exported. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DeleteForecast for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecast +func (c *ForecastService) DeleteForecast(input *DeleteForecastInput) (*DeleteForecastOutput, error) { + req, out := c.DeleteForecastRequest(input) + return out, req.Send() +} + +// DeleteForecastWithContext is the same as DeleteForecast with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteForecast for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DeleteForecastWithContext(ctx aws.Context, input *DeleteForecastInput, opts ...request.Option) (*DeleteForecastOutput, error) { + req, out := c.DeleteForecastRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteForecastExportJob = "DeleteForecastExportJob" + +// DeleteForecastExportJobRequest generates a "aws/request.Request" representing the +// client's request for the DeleteForecastExportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteForecastExportJob for more information on using the DeleteForecastExportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteForecastExportJobRequest method. +// req, resp := client.DeleteForecastExportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecastExportJob +func (c *ForecastService) DeleteForecastExportJobRequest(input *DeleteForecastExportJobInput) (req *request.Request, output *DeleteForecastExportJobOutput) { + op := &request.Operation{ + Name: opDeleteForecastExportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteForecastExportJobInput{} + } + + output = &DeleteForecastExportJobOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteForecastExportJob API operation for Amazon Forecast Service. +// +// Deletes a forecast export job created using the CreateForecastExportJob operation. +// To be deleted, the export job must have a status of ACTIVE or CREATE_FAILED. +// Use the DescribeForecastExportJob operation to get the status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DeleteForecastExportJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeleteForecastExportJob +func (c *ForecastService) DeleteForecastExportJob(input *DeleteForecastExportJobInput) (*DeleteForecastExportJobOutput, error) { + req, out := c.DeleteForecastExportJobRequest(input) + return out, req.Send() +} + +// DeleteForecastExportJobWithContext is the same as DeleteForecastExportJob with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteForecastExportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DeleteForecastExportJobWithContext(ctx aws.Context, input *DeleteForecastExportJobInput, opts ...request.Option) (*DeleteForecastExportJobOutput, error) { + req, out := c.DeleteForecastExportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePredictor = "DeletePredictor" + +// DeletePredictorRequest generates a "aws/request.Request" representing the +// client's request for the DeletePredictor operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePredictor for more information on using the DeletePredictor +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePredictorRequest method. +// req, resp := client.DeletePredictorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeletePredictor +func (c *ForecastService) DeletePredictorRequest(input *DeletePredictorInput) (req *request.Request, output *DeletePredictorOutput) { + op := &request.Operation{ + Name: opDeletePredictor, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePredictorInput{} + } + + output = &DeletePredictorOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePredictor API operation for Amazon Forecast Service. +// +// Deletes a predictor created using the CreatePredictor operation. To be deleted, +// the predictor must have a status of ACTIVE or CREATE_FAILED. Use the DescribePredictor +// operation to get the status. +// +// Any forecasts generated by the predictor will no longer be available. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DeletePredictor for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DeletePredictor +func (c *ForecastService) DeletePredictor(input *DeletePredictorInput) (*DeletePredictorOutput, error) { + req, out := c.DeletePredictorRequest(input) + return out, req.Send() +} + +// DeletePredictorWithContext is the same as DeletePredictor with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePredictor for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DeletePredictorWithContext(ctx aws.Context, input *DeletePredictorInput, opts ...request.Option) (*DeletePredictorOutput, error) { + req, out := c.DeletePredictorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDataset = "DescribeDataset" + +// DescribeDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDataset for more information on using the DescribeDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDatasetRequest method. +// req, resp := client.DescribeDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDataset +func (c *ForecastService) DescribeDatasetRequest(input *DescribeDatasetInput) (req *request.Request, output *DescribeDatasetOutput) { + op := &request.Operation{ + Name: opDescribeDataset, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDatasetInput{} + } + + output = &DescribeDatasetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDataset API operation for Amazon Forecast Service. +// +// Describes an Amazon Forecast dataset created using the CreateDataset operation. +// +// In addition to listing the properties provided by the user in the CreateDataset +// request, this operation includes the following properties: +// +// * CreationTime +// +// * LastModificationTime +// +// * Status +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DescribeDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDataset +func (c *ForecastService) DescribeDataset(input *DescribeDatasetInput) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + return out, req.Send() +} + +// DescribeDatasetWithContext is the same as DescribeDataset with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DescribeDatasetWithContext(ctx aws.Context, input *DescribeDatasetInput, opts ...request.Option) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDatasetGroup = "DescribeDatasetGroup" + +// DescribeDatasetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDatasetGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDatasetGroup for more information on using the DescribeDatasetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDatasetGroupRequest method. +// req, resp := client.DescribeDatasetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetGroup +func (c *ForecastService) DescribeDatasetGroupRequest(input *DescribeDatasetGroupInput) (req *request.Request, output *DescribeDatasetGroupOutput) { + op := &request.Operation{ + Name: opDescribeDatasetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDatasetGroupInput{} + } + + output = &DescribeDatasetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDatasetGroup API operation for Amazon Forecast Service. +// +// Describes a dataset group created using the CreateDatasetGroup operation. +// +// In addition to listing the properties provided by the user in the CreateDatasetGroup +// request, this operation includes the following properties: +// +// * DatasetArns - The datasets belonging to the group. +// +// * CreationTime +// +// * LastModificationTime +// +// * Status +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DescribeDatasetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetGroup +func (c *ForecastService) DescribeDatasetGroup(input *DescribeDatasetGroupInput) (*DescribeDatasetGroupOutput, error) { + req, out := c.DescribeDatasetGroupRequest(input) + return out, req.Send() +} + +// DescribeDatasetGroupWithContext is the same as DescribeDatasetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDatasetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DescribeDatasetGroupWithContext(ctx aws.Context, input *DescribeDatasetGroupInput, opts ...request.Option) (*DescribeDatasetGroupOutput, error) { + req, out := c.DescribeDatasetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDatasetImportJob = "DescribeDatasetImportJob" + +// DescribeDatasetImportJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDatasetImportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDatasetImportJob for more information on using the DescribeDatasetImportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDatasetImportJobRequest method. +// req, resp := client.DescribeDatasetImportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetImportJob +func (c *ForecastService) DescribeDatasetImportJobRequest(input *DescribeDatasetImportJobInput) (req *request.Request, output *DescribeDatasetImportJobOutput) { + op := &request.Operation{ + Name: opDescribeDatasetImportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDatasetImportJobInput{} + } + + output = &DescribeDatasetImportJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDatasetImportJob API operation for Amazon Forecast Service. +// +// Describes a dataset import job created using the CreateDatasetImportJob operation. +// +// In addition to listing the properties provided by the user in the CreateDatasetImportJob +// request, this operation includes the following properties: +// +// * CreationTime +// +// * LastModificationTime +// +// * DataSize +// +// * FieldStatistics +// +// * Status +// +// * Message - If an error occurred, information about the error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DescribeDatasetImportJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeDatasetImportJob +func (c *ForecastService) DescribeDatasetImportJob(input *DescribeDatasetImportJobInput) (*DescribeDatasetImportJobOutput, error) { + req, out := c.DescribeDatasetImportJobRequest(input) + return out, req.Send() +} + +// DescribeDatasetImportJobWithContext is the same as DescribeDatasetImportJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDatasetImportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DescribeDatasetImportJobWithContext(ctx aws.Context, input *DescribeDatasetImportJobInput, opts ...request.Option) (*DescribeDatasetImportJobOutput, error) { + req, out := c.DescribeDatasetImportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeForecast = "DescribeForecast" + +// DescribeForecastRequest generates a "aws/request.Request" representing the +// client's request for the DescribeForecast operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeForecast for more information on using the DescribeForecast +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeForecastRequest method. +// req, resp := client.DescribeForecastRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecast +func (c *ForecastService) DescribeForecastRequest(input *DescribeForecastInput) (req *request.Request, output *DescribeForecastOutput) { + op := &request.Operation{ + Name: opDescribeForecast, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeForecastInput{} + } + + output = &DescribeForecastOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeForecast API operation for Amazon Forecast Service. +// +// Describes a forecast created using the CreateForecast operation. +// +// In addition to listing the properties provided by the user in the CreateForecast +// request, this operation includes the following properties: +// +// * DatasetGroupArn - The dataset group that provided the training data. +// +// * CreationTime +// +// * LastModificationTime +// +// * Status +// +// * Message - If an error occurred, information about the error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DescribeForecast for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecast +func (c *ForecastService) DescribeForecast(input *DescribeForecastInput) (*DescribeForecastOutput, error) { + req, out := c.DescribeForecastRequest(input) + return out, req.Send() +} + +// DescribeForecastWithContext is the same as DescribeForecast with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeForecast for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DescribeForecastWithContext(ctx aws.Context, input *DescribeForecastInput, opts ...request.Option) (*DescribeForecastOutput, error) { + req, out := c.DescribeForecastRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeForecastExportJob = "DescribeForecastExportJob" + +// DescribeForecastExportJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeForecastExportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeForecastExportJob for more information on using the DescribeForecastExportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeForecastExportJobRequest method. +// req, resp := client.DescribeForecastExportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecastExportJob +func (c *ForecastService) DescribeForecastExportJobRequest(input *DescribeForecastExportJobInput) (req *request.Request, output *DescribeForecastExportJobOutput) { + op := &request.Operation{ + Name: opDescribeForecastExportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeForecastExportJobInput{} + } + + output = &DescribeForecastExportJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeForecastExportJob API operation for Amazon Forecast Service. +// +// Describes a forecast export job created using the CreateForecastExportJob +// operation. +// +// In addition to listing the properties provided by the user in the CreateForecastExportJob +// request, this operation includes the following properties: +// +// * CreationTime +// +// * LastModificationTime +// +// * Status +// +// * Message - If an error occurred, information about the error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DescribeForecastExportJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribeForecastExportJob +func (c *ForecastService) DescribeForecastExportJob(input *DescribeForecastExportJobInput) (*DescribeForecastExportJobOutput, error) { + req, out := c.DescribeForecastExportJobRequest(input) + return out, req.Send() +} + +// DescribeForecastExportJobWithContext is the same as DescribeForecastExportJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeForecastExportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DescribeForecastExportJobWithContext(ctx aws.Context, input *DescribeForecastExportJobInput, opts ...request.Option) (*DescribeForecastExportJobOutput, error) { + req, out := c.DescribeForecastExportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribePredictor = "DescribePredictor" + +// DescribePredictorRequest generates a "aws/request.Request" representing the +// client's request for the DescribePredictor operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePredictor for more information on using the DescribePredictor +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePredictorRequest method. +// req, resp := client.DescribePredictorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribePredictor +func (c *ForecastService) DescribePredictorRequest(input *DescribePredictorInput) (req *request.Request, output *DescribePredictorOutput) { + op := &request.Operation{ + Name: opDescribePredictor, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePredictorInput{} + } + + output = &DescribePredictorOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePredictor API operation for Amazon Forecast Service. +// +// Describes a predictor created using the CreatePredictor operation. +// +// In addition to listing the properties provided by the user in the CreatePredictor +// request, this operation includes the following properties: +// +// * DatasetImportJobArns - The dataset import jobs used to import training +// data. +// +// * AutoMLAlgorithmArns - If AutoML is performed, the algorithms evaluated. +// +// * CreationTime +// +// * LastModificationTime +// +// * Status +// +// * Message - If an error occurred, information about the error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation DescribePredictor for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/DescribePredictor +func (c *ForecastService) DescribePredictor(input *DescribePredictorInput) (*DescribePredictorOutput, error) { + req, out := c.DescribePredictorRequest(input) + return out, req.Send() +} + +// DescribePredictorWithContext is the same as DescribePredictor with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePredictor for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) DescribePredictorWithContext(ctx aws.Context, input *DescribePredictorInput, opts ...request.Option) (*DescribePredictorOutput, error) { + req, out := c.DescribePredictorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccuracyMetrics = "GetAccuracyMetrics" + +// GetAccuracyMetricsRequest generates a "aws/request.Request" representing the +// client's request for the GetAccuracyMetrics operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccuracyMetrics for more information on using the GetAccuracyMetrics +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccuracyMetricsRequest method. +// req, resp := client.GetAccuracyMetricsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/GetAccuracyMetrics +func (c *ForecastService) GetAccuracyMetricsRequest(input *GetAccuracyMetricsInput) (req *request.Request, output *GetAccuracyMetricsOutput) { + op := &request.Operation{ + Name: opGetAccuracyMetrics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccuracyMetricsInput{} + } + + output = &GetAccuracyMetricsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccuracyMetrics API operation for Amazon Forecast Service. +// +// Provides metrics on the accuracy of the models that were trained by the CreatePredictor +// operation. Use metrics to see how well the model performed and to decide +// whether to use the predictor to generate a forecast. +// +// Metrics are generated for each backtest window evaluated. For more information, +// see EvaluationParameters. +// +// The parameters of the filling method determine which items contribute to +// the metrics. If zero is specified, all items contribute. If nan is specified, +// only those items that have complete data in the range being evaluated contribute. +// For more information, see FeaturizationMethod. +// +// For an example of how to train a model and review metrics, see getting-started. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation GetAccuracyMetrics for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/GetAccuracyMetrics +func (c *ForecastService) GetAccuracyMetrics(input *GetAccuracyMetricsInput) (*GetAccuracyMetricsOutput, error) { + req, out := c.GetAccuracyMetricsRequest(input) + return out, req.Send() +} + +// GetAccuracyMetricsWithContext is the same as GetAccuracyMetrics with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccuracyMetrics for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) GetAccuracyMetricsWithContext(ctx aws.Context, input *GetAccuracyMetricsInput, opts ...request.Option) (*GetAccuracyMetricsOutput, error) { + req, out := c.GetAccuracyMetricsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDatasetGroups = "ListDatasetGroups" + +// ListDatasetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasetGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatasetGroups for more information on using the ListDatasetGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatasetGroupsRequest method. +// req, resp := client.ListDatasetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetGroups +func (c *ForecastService) ListDatasetGroupsRequest(input *ListDatasetGroupsInput) (req *request.Request, output *ListDatasetGroupsOutput) { + op := &request.Operation{ + Name: opListDatasetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatasetGroupsInput{} + } + + output = &ListDatasetGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatasetGroups API operation for Amazon Forecast Service. +// +// Returns a list of dataset groups created using the CreateDatasetGroup operation. +// For each dataset group, a summary of its properties, including its Amazon +// Resource Name (ARN), is returned. You can retrieve the complete set of properties +// by using the ARN with the DescribeDatasetGroup operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation ListDatasetGroups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. Tokens expire after 24 hours. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetGroups +func (c *ForecastService) ListDatasetGroups(input *ListDatasetGroupsInput) (*ListDatasetGroupsOutput, error) { + req, out := c.ListDatasetGroupsRequest(input) + return out, req.Send() +} + +// ListDatasetGroupsWithContext is the same as ListDatasetGroups with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatasetGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListDatasetGroupsWithContext(ctx aws.Context, input *ListDatasetGroupsInput, opts ...request.Option) (*ListDatasetGroupsOutput, error) { + req, out := c.ListDatasetGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatasetGroupsPages iterates over the pages of a ListDatasetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatasetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatasetGroups operation. +// pageNum := 0 +// err := client.ListDatasetGroupsPages(params, +// func(page *forecastservice.ListDatasetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ForecastService) ListDatasetGroupsPages(input *ListDatasetGroupsInput, fn func(*ListDatasetGroupsOutput, bool) bool) error { + return c.ListDatasetGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatasetGroupsPagesWithContext same as ListDatasetGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListDatasetGroupsPagesWithContext(ctx aws.Context, input *ListDatasetGroupsInput, fn func(*ListDatasetGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatasetGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatasetGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatasetGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatasetImportJobs = "ListDatasetImportJobs" + +// ListDatasetImportJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasetImportJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatasetImportJobs for more information on using the ListDatasetImportJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatasetImportJobsRequest method. +// req, resp := client.ListDatasetImportJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetImportJobs +func (c *ForecastService) ListDatasetImportJobsRequest(input *ListDatasetImportJobsInput) (req *request.Request, output *ListDatasetImportJobsOutput) { + op := &request.Operation{ + Name: opListDatasetImportJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatasetImportJobsInput{} + } + + output = &ListDatasetImportJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatasetImportJobs API operation for Amazon Forecast Service. +// +// Returns a list of dataset import jobs created using the CreateDatasetImportJob +// operation. For each import job, a summary of its properties, including its +// Amazon Resource Name (ARN), is returned. You can retrieve the complete set +// of properties by using the ARN with the DescribeDatasetImportJob operation. +// You can filter the list by providing an array of Filter objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation ListDatasetImportJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. Tokens expire after 24 hours. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasetImportJobs +func (c *ForecastService) ListDatasetImportJobs(input *ListDatasetImportJobsInput) (*ListDatasetImportJobsOutput, error) { + req, out := c.ListDatasetImportJobsRequest(input) + return out, req.Send() +} + +// ListDatasetImportJobsWithContext is the same as ListDatasetImportJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatasetImportJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListDatasetImportJobsWithContext(ctx aws.Context, input *ListDatasetImportJobsInput, opts ...request.Option) (*ListDatasetImportJobsOutput, error) { + req, out := c.ListDatasetImportJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatasetImportJobsPages iterates over the pages of a ListDatasetImportJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatasetImportJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatasetImportJobs operation. +// pageNum := 0 +// err := client.ListDatasetImportJobsPages(params, +// func(page *forecastservice.ListDatasetImportJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ForecastService) ListDatasetImportJobsPages(input *ListDatasetImportJobsInput, fn func(*ListDatasetImportJobsOutput, bool) bool) error { + return c.ListDatasetImportJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatasetImportJobsPagesWithContext same as ListDatasetImportJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListDatasetImportJobsPagesWithContext(ctx aws.Context, input *ListDatasetImportJobsInput, fn func(*ListDatasetImportJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatasetImportJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatasetImportJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatasetImportJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatasets = "ListDatasets" + +// ListDatasetsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatasets for more information on using the ListDatasets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatasetsRequest method. +// req, resp := client.ListDatasetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasets +func (c *ForecastService) ListDatasetsRequest(input *ListDatasetsInput) (req *request.Request, output *ListDatasetsOutput) { + op := &request.Operation{ + Name: opListDatasets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatasetsInput{} + } + + output = &ListDatasetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatasets API operation for Amazon Forecast Service. +// +// Returns a list of datasets created using the CreateDataset operation. For +// each dataset, a summary of its properties, including its Amazon Resource +// Name (ARN), is returned. You can retrieve the complete set of properties +// by using the ARN with the DescribeDataset operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation ListDatasets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. Tokens expire after 24 hours. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListDatasets +func (c *ForecastService) ListDatasets(input *ListDatasetsInput) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + return out, req.Send() +} + +// ListDatasetsWithContext is the same as ListDatasets with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatasets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListDatasetsWithContext(ctx aws.Context, input *ListDatasetsInput, opts ...request.Option) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatasetsPages iterates over the pages of a ListDatasets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatasets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatasets operation. +// pageNum := 0 +// err := client.ListDatasetsPages(params, +// func(page *forecastservice.ListDatasetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ForecastService) ListDatasetsPages(input *ListDatasetsInput, fn func(*ListDatasetsOutput, bool) bool) error { + return c.ListDatasetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatasetsPagesWithContext same as ListDatasetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListDatasetsPagesWithContext(ctx aws.Context, input *ListDatasetsInput, fn func(*ListDatasetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatasetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatasetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatasetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListForecastExportJobs = "ListForecastExportJobs" + +// ListForecastExportJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListForecastExportJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListForecastExportJobs for more information on using the ListForecastExportJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListForecastExportJobsRequest method. +// req, resp := client.ListForecastExportJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecastExportJobs +func (c *ForecastService) ListForecastExportJobsRequest(input *ListForecastExportJobsInput) (req *request.Request, output *ListForecastExportJobsOutput) { + op := &request.Operation{ + Name: opListForecastExportJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListForecastExportJobsInput{} + } + + output = &ListForecastExportJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListForecastExportJobs API operation for Amazon Forecast Service. +// +// Returns a list of forecast export jobs created using the CreateForecastExportJob +// operation. For each forecast export job, a summary of its properties, including +// its Amazon Resource Name (ARN), is returned. You can retrieve the complete +// set of properties by using the ARN with the DescribeForecastExportJob operation. +// The list can be filtered using an array of Filter objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation ListForecastExportJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. Tokens expire after 24 hours. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecastExportJobs +func (c *ForecastService) ListForecastExportJobs(input *ListForecastExportJobsInput) (*ListForecastExportJobsOutput, error) { + req, out := c.ListForecastExportJobsRequest(input) + return out, req.Send() +} + +// ListForecastExportJobsWithContext is the same as ListForecastExportJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListForecastExportJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListForecastExportJobsWithContext(ctx aws.Context, input *ListForecastExportJobsInput, opts ...request.Option) (*ListForecastExportJobsOutput, error) { + req, out := c.ListForecastExportJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListForecastExportJobsPages iterates over the pages of a ListForecastExportJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListForecastExportJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListForecastExportJobs operation. +// pageNum := 0 +// err := client.ListForecastExportJobsPages(params, +// func(page *forecastservice.ListForecastExportJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ForecastService) ListForecastExportJobsPages(input *ListForecastExportJobsInput, fn func(*ListForecastExportJobsOutput, bool) bool) error { + return c.ListForecastExportJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListForecastExportJobsPagesWithContext same as ListForecastExportJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListForecastExportJobsPagesWithContext(ctx aws.Context, input *ListForecastExportJobsInput, fn func(*ListForecastExportJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListForecastExportJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListForecastExportJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListForecastExportJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListForecasts = "ListForecasts" + +// ListForecastsRequest generates a "aws/request.Request" representing the +// client's request for the ListForecasts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListForecasts for more information on using the ListForecasts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListForecastsRequest method. +// req, resp := client.ListForecastsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecasts +func (c *ForecastService) ListForecastsRequest(input *ListForecastsInput) (req *request.Request, output *ListForecastsOutput) { + op := &request.Operation{ + Name: opListForecasts, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListForecastsInput{} + } + + output = &ListForecastsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListForecasts API operation for Amazon Forecast Service. +// +// Returns a list of forecasts created using the CreateForecast operation. For +// each forecast, a summary of its properties, including its Amazon Resource +// Name (ARN), is returned. You can retrieve the complete set of properties +// by using the ARN with the DescribeForecast operation. The list can be filtered +// using an array of Filter objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation ListForecasts for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. Tokens expire after 24 hours. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListForecasts +func (c *ForecastService) ListForecasts(input *ListForecastsInput) (*ListForecastsOutput, error) { + req, out := c.ListForecastsRequest(input) + return out, req.Send() +} + +// ListForecastsWithContext is the same as ListForecasts with the addition of +// the ability to pass a context and additional request options. +// +// See ListForecasts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListForecastsWithContext(ctx aws.Context, input *ListForecastsInput, opts ...request.Option) (*ListForecastsOutput, error) { + req, out := c.ListForecastsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListForecastsPages iterates over the pages of a ListForecasts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListForecasts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListForecasts operation. +// pageNum := 0 +// err := client.ListForecastsPages(params, +// func(page *forecastservice.ListForecastsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ForecastService) ListForecastsPages(input *ListForecastsInput, fn func(*ListForecastsOutput, bool) bool) error { + return c.ListForecastsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListForecastsPagesWithContext same as ListForecastsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListForecastsPagesWithContext(ctx aws.Context, input *ListForecastsInput, fn func(*ListForecastsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListForecastsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListForecastsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListForecastsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListPredictors = "ListPredictors" + +// ListPredictorsRequest generates a "aws/request.Request" representing the +// client's request for the ListPredictors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPredictors for more information on using the ListPredictors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPredictorsRequest method. +// req, resp := client.ListPredictorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListPredictors +func (c *ForecastService) ListPredictorsRequest(input *ListPredictorsInput) (req *request.Request, output *ListPredictorsOutput) { + op := &request.Operation{ + Name: opListPredictors, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPredictorsInput{} + } + + output = &ListPredictorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPredictors API operation for Amazon Forecast Service. +// +// Returns a list of predictors created using the CreatePredictor operation. +// For each predictor, a summary of its properties, including its Amazon Resource +// Name (ARN), is returned. You can retrieve the complete set of properties +// by using the ARN with the DescribePredictor operation. The list can be filtered +// using an array of Filter objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation ListPredictors for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. Tokens expire after 24 hours. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListPredictors +func (c *ForecastService) ListPredictors(input *ListPredictorsInput) (*ListPredictorsOutput, error) { + req, out := c.ListPredictorsRequest(input) + return out, req.Send() +} + +// ListPredictorsWithContext is the same as ListPredictors with the addition of +// the ability to pass a context and additional request options. +// +// See ListPredictors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListPredictorsWithContext(ctx aws.Context, input *ListPredictorsInput, opts ...request.Option) (*ListPredictorsOutput, error) { + req, out := c.ListPredictorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPredictorsPages iterates over the pages of a ListPredictors operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPredictors method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPredictors operation. +// pageNum := 0 +// err := client.ListPredictorsPages(params, +// func(page *forecastservice.ListPredictorsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ForecastService) ListPredictorsPages(input *ListPredictorsInput, fn func(*ListPredictorsOutput, bool) bool) error { + return c.ListPredictorsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPredictorsPagesWithContext same as ListPredictorsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListPredictorsPagesWithContext(ctx aws.Context, input *ListPredictorsInput, fn func(*ListPredictorsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPredictorsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPredictorsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPredictorsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opUpdateDatasetGroup = "UpdateDatasetGroup" + +// UpdateDatasetGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDatasetGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDatasetGroup for more information on using the UpdateDatasetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDatasetGroupRequest method. +// req, resp := client.UpdateDatasetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UpdateDatasetGroup +func (c *ForecastService) UpdateDatasetGroupRequest(input *UpdateDatasetGroupInput) (req *request.Request, output *UpdateDatasetGroupOutput) { + op := &request.Operation{ + Name: opUpdateDatasetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDatasetGroupInput{} + } + + output = &UpdateDatasetGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateDatasetGroup API operation for Amazon Forecast Service. +// +// Replaces any existing datasets in the dataset group with the specified datasets. +// +// The Status of the dataset group must be ACTIVE before creating a predictor +// using the dataset group. Use the DescribeDatasetGroup operation to get the +// status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation UpdateDatasetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UpdateDatasetGroup +func (c *ForecastService) UpdateDatasetGroup(input *UpdateDatasetGroupInput) (*UpdateDatasetGroupOutput, error) { + req, out := c.UpdateDatasetGroupRequest(input) + return out, req.Send() +} + +// UpdateDatasetGroupWithContext is the same as UpdateDatasetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDatasetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) UpdateDatasetGroupWithContext(ctx aws.Context, input *UpdateDatasetGroupInput, opts ...request.Option) (*UpdateDatasetGroupOutput, error) { + req, out := c.UpdateDatasetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies a categorical hyperparameter and it's range of tunable values. +// This object is part of the ParameterRanges object. +type CategoricalParameterRange struct { + _ struct{} `type:"structure"` + + // The name of the categorical hyperparameter to tune. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A list of the tunable categories for the hyperparameter. + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CategoricalParameterRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CategoricalParameterRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CategoricalParameterRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CategoricalParameterRange"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CategoricalParameterRange) SetName(v string) *CategoricalParameterRange { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *CategoricalParameterRange) SetValues(v []*string) *CategoricalParameterRange { + s.Values = v + return s +} + +// Specifies a continuous hyperparameter and it's range of tunable values. This +// object is part of the ParameterRanges object. +type ContinuousParameterRange struct { + _ struct{} `type:"structure"` + + // The maximum tunable value of the hyperparameter. + // + // MaxValue is a required field + MaxValue *float64 `type:"double" required:"true"` + + // The minimum tunable value of the hyperparameter. + // + // MinValue is a required field + MinValue *float64 `type:"double" required:"true"` + + // The name of the hyperparameter to tune. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The scale that hyperparameter tuning uses to search the hyperparameter range. + // For information about choosing a hyperparameter scale, see Hyperparameter + // Scaling (http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). + // One of the following values: + // + // Auto + // + // Amazon Forecast hyperparameter tuning chooses the best scale for the hyperparameter. + // + // Linear + // + // Hyperparameter tuning searches the values in the hyperparameter range by + // using a linear scale. + // + // Logarithmic + // + // Hyperparameter tuning searches the values in the hyperparameter range by + // using a logarithmic scale. + // + // Logarithmic scaling works only for ranges that have only values greater than + // 0. + // + // ReverseLogarithmic + // + // Hyperparemeter tuning searches the values in the hyperparameter range by + // using a reverse logarithmic scale. + // + // Reverse logarithmic scaling works only for ranges that are entirely within + // the range 0 <= x < 1.0. + ScalingType *string `type:"string" enum:"ScalingType"` +} + +// String returns the string representation +func (s ContinuousParameterRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuousParameterRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContinuousParameterRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContinuousParameterRange"} + if s.MaxValue == nil { + invalidParams.Add(request.NewErrParamRequired("MaxValue")) + } + if s.MinValue == nil { + invalidParams.Add(request.NewErrParamRequired("MinValue")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxValue sets the MaxValue field's value. +func (s *ContinuousParameterRange) SetMaxValue(v float64) *ContinuousParameterRange { + s.MaxValue = &v + return s +} + +// SetMinValue sets the MinValue field's value. +func (s *ContinuousParameterRange) SetMinValue(v float64) *ContinuousParameterRange { + s.MinValue = &v + return s +} + +// SetName sets the Name field's value. +func (s *ContinuousParameterRange) SetName(v string) *ContinuousParameterRange { + s.Name = &v + return s +} + +// SetScalingType sets the ScalingType field's value. +func (s *ContinuousParameterRange) SetScalingType(v string) *ContinuousParameterRange { + s.ScalingType = &v + return s +} + +type CreateDatasetGroupInput struct { + _ struct{} `type:"structure"` + + // An array of Amazon Resource Names (ARNs) of the datasets that you want to + // include in the dataset group. + DatasetArns []*string `type:"list"` + + // A name for the dataset group. + // + // DatasetGroupName is a required field + DatasetGroupName *string `min:"1" type:"string" required:"true"` + + // The domain associated with the dataset group. The Domain and DatasetType + // that you choose determine the fields that must be present in the training + // data that you import to the dataset. For example, if you choose the RETAIL + // domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires + // item_id, timestamp, and demand fields to be present in your data. For more + // information, see howitworks-datasets-groups. + // + // Domain is a required field + Domain *string `type:"string" required:"true" enum:"Domain"` +} + +// String returns the string representation +func (s CreateDatasetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetGroupInput"} + if s.DatasetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupName")) + } + if s.DatasetGroupName != nil && len(*s.DatasetGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetGroupName", 1)) + } + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetArns sets the DatasetArns field's value. +func (s *CreateDatasetGroupInput) SetDatasetArns(v []*string) *CreateDatasetGroupInput { + s.DatasetArns = v + return s +} + +// SetDatasetGroupName sets the DatasetGroupName field's value. +func (s *CreateDatasetGroupInput) SetDatasetGroupName(v string) *CreateDatasetGroupInput { + s.DatasetGroupName = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *CreateDatasetGroupInput) SetDomain(v string) *CreateDatasetGroupInput { + s.Domain = &v + return s +} + +type CreateDatasetGroupOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group. + DatasetGroupArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateDatasetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetGroupOutput) GoString() string { + return s.String() +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *CreateDatasetGroupOutput) SetDatasetGroupArn(v string) *CreateDatasetGroupOutput { + s.DatasetGroupArn = &v + return s +} + +type CreateDatasetImportJobInput struct { + _ struct{} `type:"structure"` + + // The location of the training data to import and an AWS Identity and Access + // Management (IAM) role that Amazon Forecast can assume to access the data. + // + // DataSource is a required field + DataSource *DataSource `type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want + // to import data to. + // + // DatasetArn is a required field + DatasetArn *string `type:"string" required:"true"` + + // The name for the dataset import job. It is recommended to include the current + // timestamp in the name to guard against getting a ResourceAlreadyExistsException + // exception, for example, 20190721DatasetImport. + // + // DatasetImportJobName is a required field + DatasetImportJobName *string `min:"1" type:"string" required:"true"` + + // The format of timestamps in the dataset. Two formats are supported, dependent + // on the DataFrequency specified when the dataset was created. + // + // * "yyyy-MM-dd" For data frequencies: Y, M, W, and D + // + // * "yyyy-MM-dd HH:mm:ss" For data frequencies: H, 30min, 15min, and 1min; + // and optionally, for: Y, M, W, and D + TimestampFormat *string `type:"string"` +} + +// String returns the string representation +func (s CreateDatasetImportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetImportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetImportJobInput"} + if s.DataSource == nil { + invalidParams.Add(request.NewErrParamRequired("DataSource")) + } + if s.DatasetArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetArn")) + } + if s.DatasetImportJobName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetImportJobName")) + } + if s.DatasetImportJobName != nil && len(*s.DatasetImportJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetImportJobName", 1)) + } + if s.DataSource != nil { + if err := s.DataSource.Validate(); err != nil { + invalidParams.AddNested("DataSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataSource sets the DataSource field's value. +func (s *CreateDatasetImportJobInput) SetDataSource(v *DataSource) *CreateDatasetImportJobInput { + s.DataSource = v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *CreateDatasetImportJobInput) SetDatasetArn(v string) *CreateDatasetImportJobInput { + s.DatasetArn = &v + return s +} + +// SetDatasetImportJobName sets the DatasetImportJobName field's value. +func (s *CreateDatasetImportJobInput) SetDatasetImportJobName(v string) *CreateDatasetImportJobInput { + s.DatasetImportJobName = &v + return s +} + +// SetTimestampFormat sets the TimestampFormat field's value. +func (s *CreateDatasetImportJobInput) SetTimestampFormat(v string) *CreateDatasetImportJobInput { + s.TimestampFormat = &v + return s +} + +type CreateDatasetImportJobOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset import job. + DatasetImportJobArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateDatasetImportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetImportJobOutput) GoString() string { + return s.String() +} + +// SetDatasetImportJobArn sets the DatasetImportJobArn field's value. +func (s *CreateDatasetImportJobOutput) SetDatasetImportJobArn(v string) *CreateDatasetImportJobOutput { + s.DatasetImportJobArn = &v + return s +} + +type CreateDatasetInput struct { + _ struct{} `type:"structure"` + + // The frequency of data collection. + // + // Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min + // (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and + // 1min (1 minute). For example, "D" indicates every day and "15min" indicates + // every 15 minutes. + DataFrequency *string `type:"string"` + + // A name for the dataset. + // + // DatasetName is a required field + DatasetName *string `min:"1" type:"string" required:"true"` + + // The dataset type. Valid values depend on the chosen Domain. + // + // DatasetType is a required field + DatasetType *string `type:"string" required:"true" enum:"DatasetType"` + + // The domain associated with the dataset. The Domain and DatasetType that you + // choose determine the fields that must be present in the training data that + // you import to the dataset. For example, if you choose the RETAIL domain and + // TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires item_id, + // timestamp, and demand fields to be present in your data. For more information, + // see howitworks-datasets-groups. + // + // Domain is a required field + Domain *string `type:"string" required:"true" enum:"Domain"` + + // An AWS Key Management Service (KMS) key and the AWS Identity and Access Management + // (IAM) role that Amazon Forecast can assume to access the key. + EncryptionConfig *EncryptionConfig `type:"structure"` + + // The schema for the dataset. The schema attributes and their order must match + // the fields in your data. The dataset Domain and DatasetType that you choose + // determine the minimum required fields in your training data. For information + // about the required fields for a specific dataset domain and type, see howitworks-domains-ds-types. + // + // Schema is a required field + Schema *Schema `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.DatasetType == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetType")) + } + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Schema == nil { + invalidParams.Add(request.NewErrParamRequired("Schema")) + } + if s.EncryptionConfig != nil { + if err := s.EncryptionConfig.Validate(); err != nil { + invalidParams.AddNested("EncryptionConfig", err.(request.ErrInvalidParams)) + } + } + if s.Schema != nil { + if err := s.Schema.Validate(); err != nil { + invalidParams.AddNested("Schema", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataFrequency sets the DataFrequency field's value. +func (s *CreateDatasetInput) SetDataFrequency(v string) *CreateDatasetInput { + s.DataFrequency = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *CreateDatasetInput) SetDatasetName(v string) *CreateDatasetInput { + s.DatasetName = &v + return s +} + +// SetDatasetType sets the DatasetType field's value. +func (s *CreateDatasetInput) SetDatasetType(v string) *CreateDatasetInput { + s.DatasetType = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *CreateDatasetInput) SetDomain(v string) *CreateDatasetInput { + s.Domain = &v + return s +} + +// SetEncryptionConfig sets the EncryptionConfig field's value. +func (s *CreateDatasetInput) SetEncryptionConfig(v *EncryptionConfig) *CreateDatasetInput { + s.EncryptionConfig = v + return s +} + +// SetSchema sets the Schema field's value. +func (s *CreateDatasetInput) SetSchema(v *Schema) *CreateDatasetInput { + s.Schema = v + return s +} + +type CreateDatasetOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset. + DatasetArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetOutput) GoString() string { + return s.String() +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *CreateDatasetOutput) SetDatasetArn(v string) *CreateDatasetOutput { + s.DatasetArn = &v + return s +} + +type CreateForecastExportJobInput struct { + _ struct{} `type:"structure"` + + // The path to the Amazon S3 bucket where you want to save the forecast and + // an AWS Identity and Access Management (IAM) role that Amazon Forecast can + // assume to access the bucket. + // + // Destination is a required field + Destination *DataDestination `type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the forecast that you want to export. + // + // ForecastArn is a required field + ForecastArn *string `type:"string" required:"true"` + + // The name for the forecast export job. + // + // ForecastExportJobName is a required field + ForecastExportJobName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateForecastExportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateForecastExportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateForecastExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateForecastExportJobInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.ForecastArn == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastArn")) + } + if s.ForecastExportJobName == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastExportJobName")) + } + if s.ForecastExportJobName != nil && len(*s.ForecastExportJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ForecastExportJobName", 1)) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *CreateForecastExportJobInput) SetDestination(v *DataDestination) *CreateForecastExportJobInput { + s.Destination = v + return s +} + +// SetForecastArn sets the ForecastArn field's value. +func (s *CreateForecastExportJobInput) SetForecastArn(v string) *CreateForecastExportJobInput { + s.ForecastArn = &v + return s +} + +// SetForecastExportJobName sets the ForecastExportJobName field's value. +func (s *CreateForecastExportJobInput) SetForecastExportJobName(v string) *CreateForecastExportJobInput { + s.ForecastExportJobName = &v + return s +} + +type CreateForecastExportJobOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the export job. + ForecastExportJobArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateForecastExportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateForecastExportJobOutput) GoString() string { + return s.String() +} + +// SetForecastExportJobArn sets the ForecastExportJobArn field's value. +func (s *CreateForecastExportJobOutput) SetForecastExportJobArn(v string) *CreateForecastExportJobOutput { + s.ForecastExportJobArn = &v + return s +} + +type CreateForecastInput struct { + _ struct{} `type:"structure"` + + // The name for the forecast. + // + // ForecastName is a required field + ForecastName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the predictor to use to generate the forecast. + // + // PredictorArn is a required field + PredictorArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateForecastInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateForecastInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateForecastInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateForecastInput"} + if s.ForecastName == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastName")) + } + if s.ForecastName != nil && len(*s.ForecastName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ForecastName", 1)) + } + if s.PredictorArn == nil { + invalidParams.Add(request.NewErrParamRequired("PredictorArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForecastName sets the ForecastName field's value. +func (s *CreateForecastInput) SetForecastName(v string) *CreateForecastInput { + s.ForecastName = &v + return s +} + +// SetPredictorArn sets the PredictorArn field's value. +func (s *CreateForecastInput) SetPredictorArn(v string) *CreateForecastInput { + s.PredictorArn = &v + return s +} + +type CreateForecastOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the forecast. + ForecastArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateForecastOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateForecastOutput) GoString() string { + return s.String() +} + +// SetForecastArn sets the ForecastArn field's value. +func (s *CreateForecastOutput) SetForecastArn(v string) *CreateForecastOutput { + s.ForecastArn = &v + return s +} + +type CreatePredictorInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the algorithm to use for model training. + // Required if PerformAutoML is not set to true. + // + // Supported algorithms + // + // * arn:aws:forecast:::algorithm/ARIMA + // + // * arn:aws:forecast:::algorithm/Deep_AR_Plus - supports hyperparameter + // optimization (HPO) + // + // * arn:aws:forecast:::algorithm/ETS + // + // * arn:aws:forecast:::algorithm/NPTS + // + // * arn:aws:forecast:::algorithm/Prophet + AlgorithmArn *string `type:"string"` + + // An AWS Key Management Service (KMS) key and the AWS Identity and Access Management + // (IAM) role that Amazon Forecast can assume to access the key. + EncryptionConfig *EncryptionConfig `type:"structure"` + + // Used to override the default evaluation parameters of the specified algorithm. + // Amazon Forecast evaluates a predictor by splitting a dataset into training + // data and testing data. The evaluation parameters define how to perform the + // split and the number of iterations. + EvaluationParameters *EvaluationParameters `type:"structure"` + + // The featurization configuration. + // + // FeaturizationConfig is a required field + FeaturizationConfig *FeaturizationConfig `type:"structure" required:"true"` + + // Specifies the number of time-steps that the model is trained to predict. + // The forecast horizon is also called the prediction length. + // + // For example, if you configure a dataset for daily data collection (using + // the DataFrequency parameter of the CreateDataset operation) and set the forecast + // horizon to 10, the model returns predictions for 10 days. + // + // ForecastHorizon is a required field + ForecastHorizon *int64 `type:"integer" required:"true"` + + // Provides hyperparameter override values for the algorithm. If you don't provide + // this parameter, Amazon Forecast uses default values. The individual algorithms + // specify which hyperparameters support hyperparameter optimization (HPO). + // For more information, see aws-forecast-choosing-recipes. + HPOConfig *HyperParameterTuningJobConfig `type:"structure"` + + // Describes the dataset group that contains the data to use to train the predictor. + // + // InputDataConfig is a required field + InputDataConfig *InputDataConfig `type:"structure" required:"true"` + + // Whether to perform AutoML. The default value is false. In this case, you + // are required to specify an algorithm. + // + // If you want Amazon Forecast to evaluate the algorithms it provides and choose + // the best algorithm and configuration for your training dataset, set PerformAutoML + // to true. This is a good option if you aren't sure which algorithm is suitable + // for your application. + PerformAutoML *bool `type:"boolean"` + + // Whether to perform hyperparameter optimization (HPO). HPO finds optimal hyperparameter + // values for your training data. The process of performing HPO is known as + // a hyperparameter tuning job. + // + // The default value is false. In this case, Amazon Forecast uses default hyperparameter + // values from the chosen algorithm. + // + // To override the default values, set PerformHPO to true and supply the HyperParameterTuningJobConfig + // object. The tuning job specifies an objective metric, the hyperparameters + // to optimize, and the valid range for each hyperparameter. + // + // The following algorithms support HPO: + // + // * DeepAR+ + PerformHPO *bool `type:"boolean"` + + // A name for the predictor. + // + // PredictorName is a required field + PredictorName *string `min:"1" type:"string" required:"true"` + + // The training parameters to override for model training. The parameters that + // you can override are listed in the individual algorithms in aws-forecast-choosing-recipes. + TrainingParameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s CreatePredictorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePredictorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePredictorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePredictorInput"} + if s.FeaturizationConfig == nil { + invalidParams.Add(request.NewErrParamRequired("FeaturizationConfig")) + } + if s.ForecastHorizon == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastHorizon")) + } + if s.InputDataConfig == nil { + invalidParams.Add(request.NewErrParamRequired("InputDataConfig")) + } + if s.PredictorName == nil { + invalidParams.Add(request.NewErrParamRequired("PredictorName")) + } + if s.PredictorName != nil && len(*s.PredictorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PredictorName", 1)) + } + if s.EncryptionConfig != nil { + if err := s.EncryptionConfig.Validate(); err != nil { + invalidParams.AddNested("EncryptionConfig", err.(request.ErrInvalidParams)) + } + } + if s.FeaturizationConfig != nil { + if err := s.FeaturizationConfig.Validate(); err != nil { + invalidParams.AddNested("FeaturizationConfig", err.(request.ErrInvalidParams)) + } + } + if s.HPOConfig != nil { + if err := s.HPOConfig.Validate(); err != nil { + invalidParams.AddNested("HPOConfig", err.(request.ErrInvalidParams)) + } + } + if s.InputDataConfig != nil { + if err := s.InputDataConfig.Validate(); err != nil { + invalidParams.AddNested("InputDataConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlgorithmArn sets the AlgorithmArn field's value. +func (s *CreatePredictorInput) SetAlgorithmArn(v string) *CreatePredictorInput { + s.AlgorithmArn = &v + return s +} + +// SetEncryptionConfig sets the EncryptionConfig field's value. +func (s *CreatePredictorInput) SetEncryptionConfig(v *EncryptionConfig) *CreatePredictorInput { + s.EncryptionConfig = v + return s +} + +// SetEvaluationParameters sets the EvaluationParameters field's value. +func (s *CreatePredictorInput) SetEvaluationParameters(v *EvaluationParameters) *CreatePredictorInput { + s.EvaluationParameters = v + return s +} + +// SetFeaturizationConfig sets the FeaturizationConfig field's value. +func (s *CreatePredictorInput) SetFeaturizationConfig(v *FeaturizationConfig) *CreatePredictorInput { + s.FeaturizationConfig = v + return s +} + +// SetForecastHorizon sets the ForecastHorizon field's value. +func (s *CreatePredictorInput) SetForecastHorizon(v int64) *CreatePredictorInput { + s.ForecastHorizon = &v + return s +} + +// SetHPOConfig sets the HPOConfig field's value. +func (s *CreatePredictorInput) SetHPOConfig(v *HyperParameterTuningJobConfig) *CreatePredictorInput { + s.HPOConfig = v + return s +} + +// SetInputDataConfig sets the InputDataConfig field's value. +func (s *CreatePredictorInput) SetInputDataConfig(v *InputDataConfig) *CreatePredictorInput { + s.InputDataConfig = v + return s +} + +// SetPerformAutoML sets the PerformAutoML field's value. +func (s *CreatePredictorInput) SetPerformAutoML(v bool) *CreatePredictorInput { + s.PerformAutoML = &v + return s +} + +// SetPerformHPO sets the PerformHPO field's value. +func (s *CreatePredictorInput) SetPerformHPO(v bool) *CreatePredictorInput { + s.PerformHPO = &v + return s +} + +// SetPredictorName sets the PredictorName field's value. +func (s *CreatePredictorInput) SetPredictorName(v string) *CreatePredictorInput { + s.PredictorName = &v + return s +} + +// SetTrainingParameters sets the TrainingParameters field's value. +func (s *CreatePredictorInput) SetTrainingParameters(v map[string]*string) *CreatePredictorInput { + s.TrainingParameters = v + return s +} + +type CreatePredictorOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the predictor. + PredictorArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePredictorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePredictorOutput) GoString() string { + return s.String() +} + +// SetPredictorArn sets the PredictorArn field's value. +func (s *CreatePredictorOutput) SetPredictorArn(v string) *CreatePredictorOutput { + s.PredictorArn = &v + return s +} + +// The destination of an exported forecast and credentials to access the location. +// This object is submitted in the CreateForecastExportJob request. +type DataDestination struct { + _ struct{} `type:"structure"` + + // The path to an Amazon Simple Storage Service (Amazon S3) bucket along with + // the credentials to access the bucket. + // + // S3Config is a required field + S3Config *S3Config `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DataDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataDestination"} + if s.S3Config == nil { + invalidParams.Add(request.NewErrParamRequired("S3Config")) + } + if s.S3Config != nil { + if err := s.S3Config.Validate(); err != nil { + invalidParams.AddNested("S3Config", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Config sets the S3Config field's value. +func (s *DataDestination) SetS3Config(v *S3Config) *DataDestination { + s.S3Config = v + return s +} + +// The source of your training data and credentials to access the data. This +// object is submitted in the CreateDatasetImportJob request. +type DataSource struct { + _ struct{} `type:"structure"` + + // The path to the training data stored in an Amazon Simple Storage Service + // (Amazon S3) bucket along with the credentials to access the data. + // + // S3Config is a required field + S3Config *S3Config `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataSource"} + if s.S3Config == nil { + invalidParams.Add(request.NewErrParamRequired("S3Config")) + } + if s.S3Config != nil { + if err := s.S3Config.Validate(); err != nil { + invalidParams.AddNested("S3Config", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Config sets the S3Config field's value. +func (s *DataSource) SetS3Config(v *S3Config) *DataSource { + s.S3Config = v + return s +} + +// Provides a summary of the dataset group properties used in the ListDatasetGroups +// operation. To get the complete set of properties, call the DescribeDatasetGroup +// operation, and provide the listed DatasetGroupArn. +type DatasetGroupSummary struct { + _ struct{} `type:"structure"` + + // When the datase group was created. + CreationTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset group. + DatasetGroupArn *string `type:"string"` + + // The name of the dataset group. + DatasetGroupName *string `min:"1" type:"string"` + + // When the dataset group was created or last updated from a call to the UpdateDatasetGroup + // operation. While the dataset group is being updated, LastModificationTime + // is the current query time. + LastModificationTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s DatasetGroupSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetGroupSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DatasetGroupSummary) SetCreationTime(v time.Time) *DatasetGroupSummary { + s.CreationTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DatasetGroupSummary) SetDatasetGroupArn(v string) *DatasetGroupSummary { + s.DatasetGroupArn = &v + return s +} + +// SetDatasetGroupName sets the DatasetGroupName field's value. +func (s *DatasetGroupSummary) SetDatasetGroupName(v string) *DatasetGroupSummary { + s.DatasetGroupName = &v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *DatasetGroupSummary) SetLastModificationTime(v time.Time) *DatasetGroupSummary { + s.LastModificationTime = &v + return s +} + +// Provides a summary of the dataset import job properties used in the ListDatasetImportJobs +// operation. To get the complete set of properties, call the DescribeDatasetImportJob +// operation, and provide the listed DatasetImportJobArn. +type DatasetImportJobSummary struct { + _ struct{} `type:"structure"` + + // When the dataset import job was created. + CreationTime *time.Time `type:"timestamp"` + + // The location of the Amazon S3 bucket that contains the training data. + DataSource *DataSource `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset import job. + DatasetImportJobArn *string `type:"string"` + + // The name of the dataset import job. + DatasetImportJobName *string `min:"1" type:"string"` + + // Dependent on the status as follows: + // + // * CREATE_PENDING - same as CreationTime + // + // * CREATE_IN_PROGRESS - the current timestamp + // + // * ACTIVE or CREATE_FAILED - when the job finished or failed + LastModificationTime *time.Time `type:"timestamp"` + + // If an error occurred, an informational message about the error. + Message *string `type:"string"` + + // The status of the dataset import job. The status is reflected in the status + // of the dataset. For example, when the import job status is CREATE_IN_PROGRESS, + // the status of the dataset is UPDATE_IN_PROGRESS. States include: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + Status *string `type:"string"` +} + +// String returns the string representation +func (s DatasetImportJobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetImportJobSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DatasetImportJobSummary) SetCreationTime(v time.Time) *DatasetImportJobSummary { + s.CreationTime = &v + return s +} + +// SetDataSource sets the DataSource field's value. +func (s *DatasetImportJobSummary) SetDataSource(v *DataSource) *DatasetImportJobSummary { + s.DataSource = v + return s +} + +// SetDatasetImportJobArn sets the DatasetImportJobArn field's value. +func (s *DatasetImportJobSummary) SetDatasetImportJobArn(v string) *DatasetImportJobSummary { + s.DatasetImportJobArn = &v + return s +} + +// SetDatasetImportJobName sets the DatasetImportJobName field's value. +func (s *DatasetImportJobSummary) SetDatasetImportJobName(v string) *DatasetImportJobSummary { + s.DatasetImportJobName = &v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *DatasetImportJobSummary) SetLastModificationTime(v time.Time) *DatasetImportJobSummary { + s.LastModificationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DatasetImportJobSummary) SetMessage(v string) *DatasetImportJobSummary { + s.Message = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatasetImportJobSummary) SetStatus(v string) *DatasetImportJobSummary { + s.Status = &v + return s +} + +// Provides a summary of the dataset properties used in the ListDatasets operation. +// To get the complete set of properties, call the DescribeDataset operation, +// and provide the listed DatasetArn. +type DatasetSummary struct { + _ struct{} `type:"structure"` + + // When the dataset was created. + CreationTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset. + DatasetArn *string `type:"string"` + + // The name of the dataset. + DatasetName *string `min:"1" type:"string"` + + // The dataset type. + DatasetType *string `type:"string" enum:"DatasetType"` + + // The domain associated with the dataset. + Domain *string `type:"string" enum:"Domain"` + + // When the dataset is created, LastModificationTime is the same as CreationTime. + // After a CreateDatasetImportJob operation is called, LastModificationTime + // is when the import job finished or failed. While data is being imported to + // the dataset, LastModificationTime is the current query time. + LastModificationTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s DatasetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DatasetSummary) SetCreationTime(v time.Time) *DatasetSummary { + s.CreationTime = &v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DatasetSummary) SetDatasetArn(v string) *DatasetSummary { + s.DatasetArn = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DatasetSummary) SetDatasetName(v string) *DatasetSummary { + s.DatasetName = &v + return s +} + +// SetDatasetType sets the DatasetType field's value. +func (s *DatasetSummary) SetDatasetType(v string) *DatasetSummary { + s.DatasetType = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *DatasetSummary) SetDomain(v string) *DatasetSummary { + s.Domain = &v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *DatasetSummary) SetLastModificationTime(v time.Time) *DatasetSummary { + s.LastModificationTime = &v + return s +} + +type DeleteDatasetGroupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group to delete. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatasetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatasetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatasetGroupInput"} + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DeleteDatasetGroupInput) SetDatasetGroupArn(v string) *DeleteDatasetGroupInput { + s.DatasetGroupArn = &v + return s +} + +type DeleteDatasetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetGroupOutput) GoString() string { + return s.String() +} + +type DeleteDatasetImportJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset import job to delete. + // + // DatasetImportJobArn is a required field + DatasetImportJobArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatasetImportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetImportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatasetImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatasetImportJobInput"} + if s.DatasetImportJobArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetImportJobArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetImportJobArn sets the DatasetImportJobArn field's value. +func (s *DeleteDatasetImportJobInput) SetDatasetImportJobArn(v string) *DeleteDatasetImportJobInput { + s.DatasetImportJobArn = &v + return s +} + +type DeleteDatasetImportJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetImportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetImportJobOutput) GoString() string { + return s.String() +} + +type DeleteDatasetInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset to delete. + // + // DatasetArn is a required field + DatasetArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatasetInput"} + if s.DatasetArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DeleteDatasetInput) SetDatasetArn(v string) *DeleteDatasetInput { + s.DatasetArn = &v + return s +} + +type DeleteDatasetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetOutput) GoString() string { + return s.String() +} + +type DeleteForecastExportJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the forecast export job to delete. + // + // ForecastExportJobArn is a required field + ForecastExportJobArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteForecastExportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteForecastExportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteForecastExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteForecastExportJobInput"} + if s.ForecastExportJobArn == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastExportJobArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForecastExportJobArn sets the ForecastExportJobArn field's value. +func (s *DeleteForecastExportJobInput) SetForecastExportJobArn(v string) *DeleteForecastExportJobInput { + s.ForecastExportJobArn = &v + return s +} + +type DeleteForecastExportJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteForecastExportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteForecastExportJobOutput) GoString() string { + return s.String() +} + +type DeleteForecastInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the forecast to delete. + // + // ForecastArn is a required field + ForecastArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteForecastInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteForecastInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteForecastInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteForecastInput"} + if s.ForecastArn == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForecastArn sets the ForecastArn field's value. +func (s *DeleteForecastInput) SetForecastArn(v string) *DeleteForecastInput { + s.ForecastArn = &v + return s +} + +type DeleteForecastOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteForecastOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteForecastOutput) GoString() string { + return s.String() +} + +type DeletePredictorInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the predictor to delete. + // + // PredictorArn is a required field + PredictorArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePredictorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePredictorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePredictorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePredictorInput"} + if s.PredictorArn == nil { + invalidParams.Add(request.NewErrParamRequired("PredictorArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPredictorArn sets the PredictorArn field's value. +func (s *DeletePredictorInput) SetPredictorArn(v string) *DeletePredictorInput { + s.PredictorArn = &v + return s +} + +type DeletePredictorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePredictorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePredictorOutput) GoString() string { + return s.String() +} + +type DescribeDatasetGroupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatasetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatasetGroupInput"} + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DescribeDatasetGroupInput) SetDatasetGroupArn(v string) *DescribeDatasetGroupInput { + s.DatasetGroupArn = &v + return s +} + +type DescribeDatasetGroupOutput struct { + _ struct{} `type:"structure"` + + // When the dataset group was created. + CreationTime *time.Time `type:"timestamp"` + + // An array of Amazon Resource Names (ARNs) of the datasets contained in the + // dataset group. + DatasetArns []*string `type:"list"` + + // The ARN of the dataset group. + DatasetGroupArn *string `type:"string"` + + // The name of the dataset group. + DatasetGroupName *string `min:"1" type:"string"` + + // The domain associated with the dataset group. The Domain and DatasetType + // that you choose determine the fields that must be present in the training + // data that you import to the dataset. For example, if you choose the RETAIL + // domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires + // item_id, timestamp, and demand fields to be present in your data. For more + // information, see howitworks-datasets-groups. + Domain *string `type:"string" enum:"Domain"` + + // When the dataset group was created or last updated from a call to the UpdateDatasetGroup + // operation. While the dataset group is being updated, LastModificationTime + // is the current query time. + LastModificationTime *time.Time `type:"timestamp"` + + // The status of the dataset group. States include: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + // + // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED + // + // The UPDATE states apply when the UpdateDatasetGroup operation is called. + // + // The Status of the dataset group must be ACTIVE before creating a predictor + // using the dataset group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDatasetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetGroupOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeDatasetGroupOutput) SetCreationTime(v time.Time) *DescribeDatasetGroupOutput { + s.CreationTime = &v + return s +} + +// SetDatasetArns sets the DatasetArns field's value. +func (s *DescribeDatasetGroupOutput) SetDatasetArns(v []*string) *DescribeDatasetGroupOutput { + s.DatasetArns = v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DescribeDatasetGroupOutput) SetDatasetGroupArn(v string) *DescribeDatasetGroupOutput { + s.DatasetGroupArn = &v + return s +} + +// SetDatasetGroupName sets the DatasetGroupName field's value. +func (s *DescribeDatasetGroupOutput) SetDatasetGroupName(v string) *DescribeDatasetGroupOutput { + s.DatasetGroupName = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *DescribeDatasetGroupOutput) SetDomain(v string) *DescribeDatasetGroupOutput { + s.Domain = &v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *DescribeDatasetGroupOutput) SetLastModificationTime(v time.Time) *DescribeDatasetGroupOutput { + s.LastModificationTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeDatasetGroupOutput) SetStatus(v string) *DescribeDatasetGroupOutput { + s.Status = &v + return s +} + +type DescribeDatasetImportJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset import job. + // + // DatasetImportJobArn is a required field + DatasetImportJobArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetImportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetImportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatasetImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatasetImportJobInput"} + if s.DatasetImportJobArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetImportJobArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetImportJobArn sets the DatasetImportJobArn field's value. +func (s *DescribeDatasetImportJobInput) SetDatasetImportJobArn(v string) *DescribeDatasetImportJobInput { + s.DatasetImportJobArn = &v + return s +} + +type DescribeDatasetImportJobOutput struct { + _ struct{} `type:"structure"` + + // When the dataset import job was created. + CreationTime *time.Time `type:"timestamp"` + + // The size of the dataset in gigabytes (GB) after completion of the import + // job. + DataSize *float64 `type:"double"` + + // The location of the training data to import. The training data must be stored + // in an Amazon S3 bucket. + DataSource *DataSource `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset that the training data was + // imported to. + DatasetArn *string `type:"string"` + + // The ARN of the dataset import job. + DatasetImportJobArn *string `type:"string"` + + // The name of the dataset import job. + DatasetImportJobName *string `min:"1" type:"string"` + + // Statistical information about each field in the input data. + FieldStatistics map[string]*Statistics `type:"map"` + + // Dependent on the status as follows: + // + // * CREATE_PENDING - same as CreationTime + // + // * CREATE_IN_PROGRESS - the current timestamp + // + // * ACTIVE or CREATE_FAILED - when the job finished or failed + LastModificationTime *time.Time `type:"timestamp"` + + // If an error occurred, an informational message about the error. + Message *string `type:"string"` + + // The status of the dataset import job. The status is reflected in the status + // of the dataset. For example, when the import job status is CREATE_IN_PROGRESS, + // the status of the dataset is UPDATE_IN_PROGRESS. States include: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + Status *string `type:"string"` + + // The format of timestamps in the dataset. Two formats are supported dependent + // on the DataFrequency specified when the dataset was created. + // + // * "yyyy-MM-dd" For data frequencies: Y, M, W, and D + // + // * "yyyy-MM-dd HH:mm:ss" For data frequencies: H, 30min, 15min, and 1min; + // and optionally, for: Y, M, W, and D + TimestampFormat *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDatasetImportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetImportJobOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeDatasetImportJobOutput) SetCreationTime(v time.Time) *DescribeDatasetImportJobOutput { + s.CreationTime = &v + return s +} + +// SetDataSize sets the DataSize field's value. +func (s *DescribeDatasetImportJobOutput) SetDataSize(v float64) *DescribeDatasetImportJobOutput { + s.DataSize = &v + return s +} + +// SetDataSource sets the DataSource field's value. +func (s *DescribeDatasetImportJobOutput) SetDataSource(v *DataSource) *DescribeDatasetImportJobOutput { + s.DataSource = v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DescribeDatasetImportJobOutput) SetDatasetArn(v string) *DescribeDatasetImportJobOutput { + s.DatasetArn = &v + return s +} + +// SetDatasetImportJobArn sets the DatasetImportJobArn field's value. +func (s *DescribeDatasetImportJobOutput) SetDatasetImportJobArn(v string) *DescribeDatasetImportJobOutput { + s.DatasetImportJobArn = &v + return s +} + +// SetDatasetImportJobName sets the DatasetImportJobName field's value. +func (s *DescribeDatasetImportJobOutput) SetDatasetImportJobName(v string) *DescribeDatasetImportJobOutput { + s.DatasetImportJobName = &v + return s +} + +// SetFieldStatistics sets the FieldStatistics field's value. +func (s *DescribeDatasetImportJobOutput) SetFieldStatistics(v map[string]*Statistics) *DescribeDatasetImportJobOutput { + s.FieldStatistics = v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *DescribeDatasetImportJobOutput) SetLastModificationTime(v time.Time) *DescribeDatasetImportJobOutput { + s.LastModificationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DescribeDatasetImportJobOutput) SetMessage(v string) *DescribeDatasetImportJobOutput { + s.Message = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeDatasetImportJobOutput) SetStatus(v string) *DescribeDatasetImportJobOutput { + s.Status = &v + return s +} + +// SetTimestampFormat sets the TimestampFormat field's value. +func (s *DescribeDatasetImportJobOutput) SetTimestampFormat(v string) *DescribeDatasetImportJobOutput { + s.TimestampFormat = &v + return s +} + +type DescribeDatasetInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset. + // + // DatasetArn is a required field + DatasetArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatasetInput"} + if s.DatasetArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DescribeDatasetInput) SetDatasetArn(v string) *DescribeDatasetInput { + s.DatasetArn = &v + return s +} + +type DescribeDatasetOutput struct { + _ struct{} `type:"structure"` + + // When the dataset was created. + CreationTime *time.Time `type:"timestamp"` + + // The frequency of data collection. + // + // Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min + // (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and + // 1min (1 minute). For example, "M" indicates every month and "30min" indicates + // every 30 minutes. + DataFrequency *string `type:"string"` + + // The Amazon Resource Name (ARN) of the dataset. + DatasetArn *string `type:"string"` + + // The name of the dataset. + DatasetName *string `min:"1" type:"string"` + + // The dataset type. + DatasetType *string `type:"string" enum:"DatasetType"` + + // The dataset domain. + Domain *string `type:"string" enum:"Domain"` + + // An AWS Key Management Service (KMS) key and the AWS Identity and Access Management + // (IAM) role that Amazon Forecast can assume to access the key. + EncryptionConfig *EncryptionConfig `type:"structure"` + + // When the dataset is created, LastModificationTime is the same as CreationTime. + // After a CreateDatasetImportJob operation is called, LastModificationTime + // is when the import job finished or failed. While data is being imported to + // the dataset, LastModificationTime is the current query time. + LastModificationTime *time.Time `type:"timestamp"` + + // An array of SchemaAttribute objects that specify the dataset fields. Each + // SchemaAttribute specifies the name and data type of a field. + Schema *Schema `type:"structure"` + + // The status of the dataset. States include: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + // + // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED + // + // The UPDATE states apply while data is imported to the dataset from a call + // to the CreateDatasetImportJob operation. During this time, the status reflects + // the status of the dataset import job. For example, when the import job status + // is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS. + // + // The Status of the dataset must be ACTIVE before you can import training data. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeDatasetOutput) SetCreationTime(v time.Time) *DescribeDatasetOutput { + s.CreationTime = &v + return s +} + +// SetDataFrequency sets the DataFrequency field's value. +func (s *DescribeDatasetOutput) SetDataFrequency(v string) *DescribeDatasetOutput { + s.DataFrequency = &v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DescribeDatasetOutput) SetDatasetArn(v string) *DescribeDatasetOutput { + s.DatasetArn = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DescribeDatasetOutput) SetDatasetName(v string) *DescribeDatasetOutput { + s.DatasetName = &v + return s +} + +// SetDatasetType sets the DatasetType field's value. +func (s *DescribeDatasetOutput) SetDatasetType(v string) *DescribeDatasetOutput { + s.DatasetType = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *DescribeDatasetOutput) SetDomain(v string) *DescribeDatasetOutput { + s.Domain = &v + return s +} + +// SetEncryptionConfig sets the EncryptionConfig field's value. +func (s *DescribeDatasetOutput) SetEncryptionConfig(v *EncryptionConfig) *DescribeDatasetOutput { + s.EncryptionConfig = v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *DescribeDatasetOutput) SetLastModificationTime(v time.Time) *DescribeDatasetOutput { + s.LastModificationTime = &v + return s +} + +// SetSchema sets the Schema field's value. +func (s *DescribeDatasetOutput) SetSchema(v *Schema) *DescribeDatasetOutput { + s.Schema = v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeDatasetOutput) SetStatus(v string) *DescribeDatasetOutput { + s.Status = &v + return s +} + +type DescribeForecastExportJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the forecast export job. + // + // ForecastExportJobArn is a required field + ForecastExportJobArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeForecastExportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeForecastExportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeForecastExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeForecastExportJobInput"} + if s.ForecastExportJobArn == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastExportJobArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForecastExportJobArn sets the ForecastExportJobArn field's value. +func (s *DescribeForecastExportJobInput) SetForecastExportJobArn(v string) *DescribeForecastExportJobInput { + s.ForecastExportJobArn = &v + return s +} + +type DescribeForecastExportJobOutput struct { + _ struct{} `type:"structure"` + + // When the forecast export job was created. + CreationTime *time.Time `type:"timestamp"` + + // The path to the AWS S3 bucket where the forecast is exported. + Destination *DataDestination `type:"structure"` + + // The Amazon Resource Name (ARN) of the exported forecast. + ForecastArn *string `type:"string"` + + // The ARN of the forecast export job. + ForecastExportJobArn *string `type:"string"` + + // The name of the forecast export job. + ForecastExportJobName *string `min:"1" type:"string"` + + // When the last successful export job finished. + LastModificationTime *time.Time `type:"timestamp"` + + // If an error occurred, an informational message about the error. + Message *string `type:"string"` + + // The status of the forecast export job. One of the following states: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + // + // The Status of the forecast export job must be ACTIVE before you can access + // the forecast in your Amazon S3 bucket. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DescribeForecastExportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeForecastExportJobOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeForecastExportJobOutput) SetCreationTime(v time.Time) *DescribeForecastExportJobOutput { + s.CreationTime = &v + return s +} + +// SetDestination sets the Destination field's value. +func (s *DescribeForecastExportJobOutput) SetDestination(v *DataDestination) *DescribeForecastExportJobOutput { + s.Destination = v + return s +} + +// SetForecastArn sets the ForecastArn field's value. +func (s *DescribeForecastExportJobOutput) SetForecastArn(v string) *DescribeForecastExportJobOutput { + s.ForecastArn = &v + return s +} + +// SetForecastExportJobArn sets the ForecastExportJobArn field's value. +func (s *DescribeForecastExportJobOutput) SetForecastExportJobArn(v string) *DescribeForecastExportJobOutput { + s.ForecastExportJobArn = &v + return s +} + +// SetForecastExportJobName sets the ForecastExportJobName field's value. +func (s *DescribeForecastExportJobOutput) SetForecastExportJobName(v string) *DescribeForecastExportJobOutput { + s.ForecastExportJobName = &v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *DescribeForecastExportJobOutput) SetLastModificationTime(v time.Time) *DescribeForecastExportJobOutput { + s.LastModificationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DescribeForecastExportJobOutput) SetMessage(v string) *DescribeForecastExportJobOutput { + s.Message = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeForecastExportJobOutput) SetStatus(v string) *DescribeForecastExportJobOutput { + s.Status = &v + return s +} + +type DescribeForecastInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the forecast. + // + // ForecastArn is a required field + ForecastArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeForecastInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeForecastInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeForecastInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeForecastInput"} + if s.ForecastArn == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForecastArn sets the ForecastArn field's value. +func (s *DescribeForecastInput) SetForecastArn(v string) *DescribeForecastInput { + s.ForecastArn = &v + return s +} + +type DescribeForecastOutput struct { + _ struct{} `type:"structure"` + + // When the forecast creation task was created. + CreationTime *time.Time `type:"timestamp"` + + // The ARN of the dataset group that provided the data used to train the predictor. + DatasetGroupArn *string `type:"string"` + + // The same forecast ARN as given in the request. + ForecastArn *string `type:"string"` + + // The name of the forecast. + ForecastName *string `min:"1" type:"string"` + + // Initially, the same as CreationTime (status is CREATE_PENDING). Updated when + // inference (creating the forecast) starts (status changed to CREATE_IN_PROGRESS), + // and when inference is complete (status changed to ACTIVE) or fails (status + // changed to CREATE_FAILED). + LastModificationTime *time.Time `type:"timestamp"` + + // If an error occurred, an informational message about the error. + Message *string `type:"string"` + + // The ARN of the predictor used to generate the forecast. + PredictorArn *string `type:"string"` + + // The status of the forecast. States include: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + // + // The Status of the forecast must be ACTIVE before you can query or export + // the forecast. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DescribeForecastOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeForecastOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeForecastOutput) SetCreationTime(v time.Time) *DescribeForecastOutput { + s.CreationTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DescribeForecastOutput) SetDatasetGroupArn(v string) *DescribeForecastOutput { + s.DatasetGroupArn = &v + return s +} + +// SetForecastArn sets the ForecastArn field's value. +func (s *DescribeForecastOutput) SetForecastArn(v string) *DescribeForecastOutput { + s.ForecastArn = &v + return s +} + +// SetForecastName sets the ForecastName field's value. +func (s *DescribeForecastOutput) SetForecastName(v string) *DescribeForecastOutput { + s.ForecastName = &v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *DescribeForecastOutput) SetLastModificationTime(v time.Time) *DescribeForecastOutput { + s.LastModificationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DescribeForecastOutput) SetMessage(v string) *DescribeForecastOutput { + s.Message = &v + return s +} + +// SetPredictorArn sets the PredictorArn field's value. +func (s *DescribeForecastOutput) SetPredictorArn(v string) *DescribeForecastOutput { + s.PredictorArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeForecastOutput) SetStatus(v string) *DescribeForecastOutput { + s.Status = &v + return s +} + +type DescribePredictorInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the predictor that you want information + // about. + // + // PredictorArn is a required field + PredictorArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribePredictorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePredictorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePredictorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePredictorInput"} + if s.PredictorArn == nil { + invalidParams.Add(request.NewErrParamRequired("PredictorArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPredictorArn sets the PredictorArn field's value. +func (s *DescribePredictorInput) SetPredictorArn(v string) *DescribePredictorInput { + s.PredictorArn = &v + return s +} + +type DescribePredictorOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the algorithm used for model training. + AlgorithmArn *string `type:"string"` + + // When PerformAutoML is specified, the ARN of the chosen algorithm. + AutoMLAlgorithmArns []*string `type:"list"` + + // When the model training task was created. + CreationTime *time.Time `type:"timestamp"` + + // An array of ARNs of the dataset import jobs used to import training data + // for the predictor. + DatasetImportJobArns []*string `type:"list"` + + // An AWS Key Management Service (KMS) key and the AWS Identity and Access Management + // (IAM) role that Amazon Forecast can assume to access the key. + EncryptionConfig *EncryptionConfig `type:"structure"` + + // Used to override the default evaluation parameters of the specified algorithm. + // Amazon Forecast evaluates a predictor by splitting a dataset into training + // data and testing data. The evaluation parameters define how to perform the + // split and the number of iterations. + EvaluationParameters *EvaluationParameters `type:"structure"` + + // The featurization configuration. + FeaturizationConfig *FeaturizationConfig `type:"structure"` + + // The number of time-steps of the forecast. The forecast horizon is also called + // the prediction length. + ForecastHorizon *int64 `type:"integer"` + + // The hyperparameter override values for the algorithm. + HPOConfig *HyperParameterTuningJobConfig `type:"structure"` + + // Describes the dataset group that contains the data to use to train the predictor. + InputDataConfig *InputDataConfig `type:"structure"` + + // Initially, the same as CreationTime (status is CREATE_PENDING). Updated when + // training starts (status changed to CREATE_IN_PROGRESS), and when training + // is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED). + LastModificationTime *time.Time `type:"timestamp"` + + // If an error occurred, an informational message about the error. + Message *string `type:"string"` + + // Whether the predictor is set to perform AutoML. + PerformAutoML *bool `type:"boolean"` + + // Whether the predictor is set to perform HPO. + PerformHPO *bool `type:"boolean"` + + // The ARN of the predictor. + PredictorArn *string `min:"1" type:"string"` + + // The name of the predictor. + PredictorName *string `min:"1" type:"string"` + + // The status of the predictor. States include: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + // + // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED + // + // The Status of the predictor must be ACTIVE before using the predictor to + // create a forecast. + Status *string `type:"string"` + + // The training parameters to override for model training. The parameters that + // you can override are listed in the individual algorithms in aws-forecast-choosing-recipes. + TrainingParameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s DescribePredictorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePredictorOutput) GoString() string { + return s.String() +} + +// SetAlgorithmArn sets the AlgorithmArn field's value. +func (s *DescribePredictorOutput) SetAlgorithmArn(v string) *DescribePredictorOutput { + s.AlgorithmArn = &v + return s +} + +// SetAutoMLAlgorithmArns sets the AutoMLAlgorithmArns field's value. +func (s *DescribePredictorOutput) SetAutoMLAlgorithmArns(v []*string) *DescribePredictorOutput { + s.AutoMLAlgorithmArns = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribePredictorOutput) SetCreationTime(v time.Time) *DescribePredictorOutput { + s.CreationTime = &v + return s +} + +// SetDatasetImportJobArns sets the DatasetImportJobArns field's value. +func (s *DescribePredictorOutput) SetDatasetImportJobArns(v []*string) *DescribePredictorOutput { + s.DatasetImportJobArns = v + return s +} + +// SetEncryptionConfig sets the EncryptionConfig field's value. +func (s *DescribePredictorOutput) SetEncryptionConfig(v *EncryptionConfig) *DescribePredictorOutput { + s.EncryptionConfig = v + return s +} + +// SetEvaluationParameters sets the EvaluationParameters field's value. +func (s *DescribePredictorOutput) SetEvaluationParameters(v *EvaluationParameters) *DescribePredictorOutput { + s.EvaluationParameters = v + return s +} + +// SetFeaturizationConfig sets the FeaturizationConfig field's value. +func (s *DescribePredictorOutput) SetFeaturizationConfig(v *FeaturizationConfig) *DescribePredictorOutput { + s.FeaturizationConfig = v + return s +} + +// SetForecastHorizon sets the ForecastHorizon field's value. +func (s *DescribePredictorOutput) SetForecastHorizon(v int64) *DescribePredictorOutput { + s.ForecastHorizon = &v + return s +} + +// SetHPOConfig sets the HPOConfig field's value. +func (s *DescribePredictorOutput) SetHPOConfig(v *HyperParameterTuningJobConfig) *DescribePredictorOutput { + s.HPOConfig = v + return s +} + +// SetInputDataConfig sets the InputDataConfig field's value. +func (s *DescribePredictorOutput) SetInputDataConfig(v *InputDataConfig) *DescribePredictorOutput { + s.InputDataConfig = v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *DescribePredictorOutput) SetLastModificationTime(v time.Time) *DescribePredictorOutput { + s.LastModificationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DescribePredictorOutput) SetMessage(v string) *DescribePredictorOutput { + s.Message = &v + return s +} + +// SetPerformAutoML sets the PerformAutoML field's value. +func (s *DescribePredictorOutput) SetPerformAutoML(v bool) *DescribePredictorOutput { + s.PerformAutoML = &v + return s +} + +// SetPerformHPO sets the PerformHPO field's value. +func (s *DescribePredictorOutput) SetPerformHPO(v bool) *DescribePredictorOutput { + s.PerformHPO = &v + return s +} + +// SetPredictorArn sets the PredictorArn field's value. +func (s *DescribePredictorOutput) SetPredictorArn(v string) *DescribePredictorOutput { + s.PredictorArn = &v + return s +} + +// SetPredictorName sets the PredictorName field's value. +func (s *DescribePredictorOutput) SetPredictorName(v string) *DescribePredictorOutput { + s.PredictorName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribePredictorOutput) SetStatus(v string) *DescribePredictorOutput { + s.Status = &v + return s +} + +// SetTrainingParameters sets the TrainingParameters field's value. +func (s *DescribePredictorOutput) SetTrainingParameters(v map[string]*string) *DescribePredictorOutput { + s.TrainingParameters = v + return s +} + +// An AWS Key Management Service (KMS) key and an AWS Identity and Access Management +// (IAM) role that Amazon Forecast can assume to access the key. This object +// is optionally submitted in the CreateDataset and CreatePredictor requests. +type EncryptionConfig struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key. + // + // KMSKeyArn is a required field + KMSKeyArn *string `type:"string" required:"true"` + + // The ARN of the AWS Identity and Access Management (IAM) role that Amazon + // Forecast can assume to access the AWS KMS key. + // + // Cross-account pass role is not allowed. If you pass a role that doesn't belong + // to your account, an InvalidInputException is thrown. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EncryptionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EncryptionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionConfig"} + if s.KMSKeyArn == nil { + invalidParams.Add(request.NewErrParamRequired("KMSKeyArn")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSKeyArn sets the KMSKeyArn field's value. +func (s *EncryptionConfig) SetKMSKeyArn(v string) *EncryptionConfig { + s.KMSKeyArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *EncryptionConfig) SetRoleArn(v string) *EncryptionConfig { + s.RoleArn = &v + return s +} + +// Parameters that define how to split a dataset into training data and testing +// data, and the number of iterations to perform. These parameters are specified +// in the predefined algorithms and can be overridden in the CreatePredictor +// request. +// +// For example, suppose that you have a dataset with data collection frequency +// set to every day and you have 200 days worth of data (that is, 200 data points). +// Now suppose that you set the NumberOfBacktestWindows to 2 and the BackTestWindowOffset +// parameter to 20. The algorithm splits the data twice. The first time, the +// algorithm trains the model using the first 180 data points and uses the last +// 20 data points for evaluation. The second time, the algorithm trains the +// model using the first 160 data points and uses the last 40 data points for +// evaluation. +type EvaluationParameters struct { + _ struct{} `type:"structure"` + + // The point from the end of the dataset where you want to split the data for + // model training and evaluation. The value is specified as the number of data + // points. + BackTestWindowOffset *int64 `type:"integer"` + + // The number of times to split the input data. The default is 1. The range + // is 1 through 5. + NumberOfBacktestWindows *int64 `type:"integer"` +} + +// String returns the string representation +func (s EvaluationParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationParameters) GoString() string { + return s.String() +} + +// SetBackTestWindowOffset sets the BackTestWindowOffset field's value. +func (s *EvaluationParameters) SetBackTestWindowOffset(v int64) *EvaluationParameters { + s.BackTestWindowOffset = &v + return s +} + +// SetNumberOfBacktestWindows sets the NumberOfBacktestWindows field's value. +func (s *EvaluationParameters) SetNumberOfBacktestWindows(v int64) *EvaluationParameters { + s.NumberOfBacktestWindows = &v + return s +} + +// The results of evaluating an algorithm. Returned as part of the GetAccuracyMetrics +// response. +type EvaluationResult struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the algorithm that was evaluated. + AlgorithmArn *string `type:"string"` + + // The array of test windows used for evaluating the algorithm. The NumberOfBacktestWindows + // from the EvaluationParameters object determines the number of windows in + // the array. + TestWindows []*WindowSummary `type:"list"` +} + +// String returns the string representation +func (s EvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResult) GoString() string { + return s.String() +} + +// SetAlgorithmArn sets the AlgorithmArn field's value. +func (s *EvaluationResult) SetAlgorithmArn(v string) *EvaluationResult { + s.AlgorithmArn = &v + return s +} + +// SetTestWindows sets the TestWindows field's value. +func (s *EvaluationResult) SetTestWindows(v []*WindowSummary) *EvaluationResult { + s.TestWindows = v + return s +} + +// Provides featurization (transformation) information for a dataset field. +// This object is part of the FeaturizationConfig object. +// +// For example: +// +// { +// +// "AttributeName": "demand", +// +// FeaturizationPipeline [ { +// +// "FeaturizationMethodName": "filling", +// +// "FeaturizationMethodParameters": {"aggregation": "avg", "backfill": "nan"} +// +// } ] +// +// } +type Featurization struct { + _ struct{} `type:"structure"` + + // The name of the schema attribute specifying the data field to be featurized. + // In this release, only the target field of the TARGET_TIME_SERIES dataset + // type is supported. For example, for the RETAIL domain, the target is demand, + // and for the CUSTOM domain, the target is target_value. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // An array FeaturizationMethod objects that specifies the feature transformation + // methods. For this release, the number of methods is limited to one. + FeaturizationPipeline []*FeaturizationMethod `min:"1" type:"list"` +} + +// String returns the string representation +func (s Featurization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Featurization) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Featurization) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Featurization"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.FeaturizationPipeline != nil && len(s.FeaturizationPipeline) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FeaturizationPipeline", 1)) + } + if s.FeaturizationPipeline != nil { + for i, v := range s.FeaturizationPipeline { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FeaturizationPipeline", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *Featurization) SetAttributeName(v string) *Featurization { + s.AttributeName = &v + return s +} + +// SetFeaturizationPipeline sets the FeaturizationPipeline field's value. +func (s *Featurization) SetFeaturizationPipeline(v []*FeaturizationMethod) *Featurization { + s.FeaturizationPipeline = v + return s +} + +// In a CreatePredictor operation, the specified algorithm trains a model using +// the specified dataset group. You can optionally tell the operation to modify +// data fields prior to training a model. These modifications are referred to +// as featurization. +// +// You define featurization using the FeaturizationConfig object. You specify +// an array of transformations, one for each field that you want to featurize. +// You then include the FeaturizationConfig in your CreatePredictor request. +// Amazon Forecast applies the featurization to the TARGET_TIME_SERIES dataset +// before model training. +// +// You can create multiple featurization configurations. For example, you might +// call the CreatePredictor operation twice by specifying different featurization +// configurations. +type FeaturizationConfig struct { + _ struct{} `type:"structure"` + + // An array of featurization (transformation) information for the fields of + // a dataset. In this release, only a single featurization is supported. + Featurizations []*Featurization `min:"1" type:"list"` + + // An array of dimension (field) names that specify how to group the generated + // forecast. + // + // For example, suppose that you are generating a forecast for item sales across + // all of your stores, and your dataset contains a store_id field. If you want + // the sales forecast for each item by store, you would specify store_id as + // the dimension. + ForecastDimensions []*string `min:"1" type:"list"` + + // The frequency of predictions in a forecast. + // + // Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min + // (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and + // 1min (1 minute). For example, "Y" indicates every year and "5min" indicates + // every five minutes. + // + // ForecastFrequency is a required field + ForecastFrequency *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s FeaturizationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FeaturizationConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FeaturizationConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FeaturizationConfig"} + if s.Featurizations != nil && len(s.Featurizations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Featurizations", 1)) + } + if s.ForecastDimensions != nil && len(s.ForecastDimensions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ForecastDimensions", 1)) + } + if s.ForecastFrequency == nil { + invalidParams.Add(request.NewErrParamRequired("ForecastFrequency")) + } + if s.Featurizations != nil { + for i, v := range s.Featurizations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Featurizations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFeaturizations sets the Featurizations field's value. +func (s *FeaturizationConfig) SetFeaturizations(v []*Featurization) *FeaturizationConfig { + s.Featurizations = v + return s +} + +// SetForecastDimensions sets the ForecastDimensions field's value. +func (s *FeaturizationConfig) SetForecastDimensions(v []*string) *FeaturizationConfig { + s.ForecastDimensions = v + return s +} + +// SetForecastFrequency sets the ForecastFrequency field's value. +func (s *FeaturizationConfig) SetForecastFrequency(v string) *FeaturizationConfig { + s.ForecastFrequency = &v + return s +} + +// Provides information about a method that featurizes (transforms) a dataset +// field. The method is part of the FeaturizationPipeline of the Featurization +// object. If FeaturizationMethodParameters isn't specified, Amazon Forecast +// uses default parameters. +// +// For example: +// +// { +// +// "FeaturizationMethodName": "filling", +// +// "FeaturizationMethodParameters": {"aggregation": "avg", "backfill": "nan"} +// +// } +type FeaturizationMethod struct { + _ struct{} `type:"structure"` + + // The name of the method. In this release, "filling" is the only supported + // method. + // + // FeaturizationMethodName is a required field + FeaturizationMethodName *string `type:"string" required:"true" enum:"FeaturizationMethodName"` + + // The method parameters (key-value pairs). Specify these to override the default + // values. The following list shows the parameters and their valid values. Bold + // signifies the default value. + // + // * aggregation: sum, avg, first, min, max + // + // * frontfill: none + // + // * middlefill: zero, nan (not a number) + // + // * backfill: zero, nan + FeaturizationMethodParameters map[string]*string `min:"1" type:"map"` +} + +// String returns the string representation +func (s FeaturizationMethod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FeaturizationMethod) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FeaturizationMethod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FeaturizationMethod"} + if s.FeaturizationMethodName == nil { + invalidParams.Add(request.NewErrParamRequired("FeaturizationMethodName")) + } + if s.FeaturizationMethodParameters != nil && len(s.FeaturizationMethodParameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FeaturizationMethodParameters", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFeaturizationMethodName sets the FeaturizationMethodName field's value. +func (s *FeaturizationMethod) SetFeaturizationMethodName(v string) *FeaturizationMethod { + s.FeaturizationMethodName = &v + return s +} + +// SetFeaturizationMethodParameters sets the FeaturizationMethodParameters field's value. +func (s *FeaturizationMethod) SetFeaturizationMethodParameters(v map[string]*string) *FeaturizationMethod { + s.FeaturizationMethodParameters = v + return s +} + +// Describes a filter for choosing a subset of objects. Each filter consists +// of a condition and a match statement. The condition is either IS or IS_NOT, +// which specifies whether to include or exclude, respectively, the objects +// that match the statement. The match statement consists of a key and a value. +type Filter struct { + _ struct{} `type:"structure"` + + // The condition to apply. + // + // Condition is a required field + Condition *string `type:"string" required:"true" enum:"FilterConditionString"` + + // The name of the parameter to filter on. + // + // Key is a required field + Key *string `type:"string" required:"true"` + + // A valid value for Key. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Filter"} + if s.Condition == nil { + invalidParams.Add(request.NewErrParamRequired("Condition")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *Filter) SetCondition(v string) *Filter { + s.Condition = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Filter) SetKey(v string) *Filter { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Filter) SetValue(v string) *Filter { + s.Value = &v + return s +} + +// Provides a summary of the forecast export job properties used in the ListForecastExportJobs +// operation. To get the complete set of properties, call the DescribeForecastExportJob +// operation, and provide the listed ForecastExportJobArn. +type ForecastExportJobSummary struct { + _ struct{} `type:"structure"` + + // When the forecast export job was created. + CreationTime *time.Time `type:"timestamp"` + + // The path to the S3 bucket where the forecast is stored. + Destination *DataDestination `type:"structure"` + + // The Amazon Resource Name (ARN) of the forecast export job. + ForecastExportJobArn *string `type:"string"` + + // The name of the forecast export job. + ForecastExportJobName *string `min:"1" type:"string"` + + // When the last successful export job finished. + LastModificationTime *time.Time `type:"timestamp"` + + // If an error occurred, an informational message about the error. + Message *string `type:"string"` + + // The status of the forecast export job. One of the following states: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + // + // The Status of the forecast export job must be ACTIVE before you can access + // the forecast in your Amazon S3 bucket. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ForecastExportJobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ForecastExportJobSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ForecastExportJobSummary) SetCreationTime(v time.Time) *ForecastExportJobSummary { + s.CreationTime = &v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ForecastExportJobSummary) SetDestination(v *DataDestination) *ForecastExportJobSummary { + s.Destination = v + return s +} + +// SetForecastExportJobArn sets the ForecastExportJobArn field's value. +func (s *ForecastExportJobSummary) SetForecastExportJobArn(v string) *ForecastExportJobSummary { + s.ForecastExportJobArn = &v + return s +} + +// SetForecastExportJobName sets the ForecastExportJobName field's value. +func (s *ForecastExportJobSummary) SetForecastExportJobName(v string) *ForecastExportJobSummary { + s.ForecastExportJobName = &v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *ForecastExportJobSummary) SetLastModificationTime(v time.Time) *ForecastExportJobSummary { + s.LastModificationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ForecastExportJobSummary) SetMessage(v string) *ForecastExportJobSummary { + s.Message = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ForecastExportJobSummary) SetStatus(v string) *ForecastExportJobSummary { + s.Status = &v + return s +} + +// Provides a summary of the forecast properties used in the ListForecasts operation. +// To get the complete set of properties, call the DescribeForecast operation, +// and provide the listed ForecastArn. +type ForecastSummary struct { + _ struct{} `type:"structure"` + + // When the forecast creation task was created. + CreationTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset group that provided the data + // used to train the predictor. + DatasetGroupArn *string `type:"string"` + + // The ARN of the forecast. + ForecastArn *string `type:"string"` + + // The name of the forecast. + ForecastName *string `min:"1" type:"string"` + + // Initially, the same as CreationTime (status is CREATE_PENDING). Updated when + // inference (creating the forecast) starts (status changed to CREATE_IN_PROGRESS), + // and when inference is complete (status changed to ACTIVE) or fails (status + // changed to CREATE_FAILED). + LastModificationTime *time.Time `type:"timestamp"` + + // If an error occurred, an informational message about the error. + Message *string `type:"string"` + + // The ARN of the predictor used to generate the forecast. + PredictorArn *string `type:"string"` + + // The status of the forecast. States include: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + // + // The Status of the forecast must be ACTIVE before you can query or export + // the forecast. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ForecastSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ForecastSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ForecastSummary) SetCreationTime(v time.Time) *ForecastSummary { + s.CreationTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *ForecastSummary) SetDatasetGroupArn(v string) *ForecastSummary { + s.DatasetGroupArn = &v + return s +} + +// SetForecastArn sets the ForecastArn field's value. +func (s *ForecastSummary) SetForecastArn(v string) *ForecastSummary { + s.ForecastArn = &v + return s +} + +// SetForecastName sets the ForecastName field's value. +func (s *ForecastSummary) SetForecastName(v string) *ForecastSummary { + s.ForecastName = &v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *ForecastSummary) SetLastModificationTime(v time.Time) *ForecastSummary { + s.LastModificationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ForecastSummary) SetMessage(v string) *ForecastSummary { + s.Message = &v + return s +} + +// SetPredictorArn sets the PredictorArn field's value. +func (s *ForecastSummary) SetPredictorArn(v string) *ForecastSummary { + s.PredictorArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ForecastSummary) SetStatus(v string) *ForecastSummary { + s.Status = &v + return s +} + +type GetAccuracyMetricsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the predictor to get metrics for. + // + // PredictorArn is a required field + PredictorArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccuracyMetricsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccuracyMetricsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccuracyMetricsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccuracyMetricsInput"} + if s.PredictorArn == nil { + invalidParams.Add(request.NewErrParamRequired("PredictorArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPredictorArn sets the PredictorArn field's value. +func (s *GetAccuracyMetricsInput) SetPredictorArn(v string) *GetAccuracyMetricsInput { + s.PredictorArn = &v + return s +} + +type GetAccuracyMetricsOutput struct { + _ struct{} `type:"structure"` + + // An array of results from evaluating the predictor. + PredictorEvaluationResults []*EvaluationResult `type:"list"` +} + +// String returns the string representation +func (s GetAccuracyMetricsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccuracyMetricsOutput) GoString() string { + return s.String() +} + +// SetPredictorEvaluationResults sets the PredictorEvaluationResults field's value. +func (s *GetAccuracyMetricsOutput) SetPredictorEvaluationResults(v []*EvaluationResult) *GetAccuracyMetricsOutput { + s.PredictorEvaluationResults = v + return s +} + +// Configuration information for a hyperparameter tuning job. This object is +// specified in the CreatePredictor request. +// +// A hyperparameter is a parameter that governs the model training process and +// is set before training starts. This is as opposed to a model parameter that +// is determined during training. The values of the hyperparameters have an +// effect on the chosen model parameters. +// +// A hyperparameter tuning job is the process of choosing the optimum set of +// hyperparameter values that optimize a specified metric. This is accomplished +// by running many training jobs over a range of hyperparameter values. The +// optimum set of values is dependent on the algorithm, the training data, and +// the given metric objective. +type HyperParameterTuningJobConfig struct { + _ struct{} `type:"structure"` + + // Specifies the ranges of valid values for the hyperparameters. + ParameterRanges *ParameterRanges `type:"structure"` +} + +// String returns the string representation +func (s HyperParameterTuningJobConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HyperParameterTuningJobConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HyperParameterTuningJobConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HyperParameterTuningJobConfig"} + if s.ParameterRanges != nil { + if err := s.ParameterRanges.Validate(); err != nil { + invalidParams.AddNested("ParameterRanges", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetParameterRanges sets the ParameterRanges field's value. +func (s *HyperParameterTuningJobConfig) SetParameterRanges(v *ParameterRanges) *HyperParameterTuningJobConfig { + s.ParameterRanges = v + return s +} + +// The data used to train a predictor. The data includes a dataset group and +// any supplementary features. This object is specified in the CreatePredictor +// request. +type InputDataConfig struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `type:"string" required:"true"` + + // An array of supplementary features. For this release, the only supported + // feature is a holiday calendar. + SupplementaryFeatures []*SupplementaryFeature `min:"1" type:"list"` +} + +// String returns the string representation +func (s InputDataConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputDataConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputDataConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputDataConfig"} + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + if s.SupplementaryFeatures != nil && len(s.SupplementaryFeatures) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SupplementaryFeatures", 1)) + } + if s.SupplementaryFeatures != nil { + for i, v := range s.SupplementaryFeatures { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SupplementaryFeatures", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *InputDataConfig) SetDatasetGroupArn(v string) *InputDataConfig { + s.DatasetGroupArn = &v + return s +} + +// SetSupplementaryFeatures sets the SupplementaryFeatures field's value. +func (s *InputDataConfig) SetSupplementaryFeatures(v []*SupplementaryFeature) *InputDataConfig { + s.SupplementaryFeatures = v + return s +} + +// Specifies an integer hyperparameter and it's range of tunable values. This +// object is part of the ParameterRanges object. +type IntegerParameterRange struct { + _ struct{} `type:"structure"` + + // The maximum tunable value of the hyperparameter. + // + // MaxValue is a required field + MaxValue *int64 `type:"integer" required:"true"` + + // The minimum tunable value of the hyperparameter. + // + // MinValue is a required field + MinValue *int64 `type:"integer" required:"true"` + + // The name of the hyperparameter to tune. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The scale that hyperparameter tuning uses to search the hyperparameter range. + // For information about choosing a hyperparameter scale, see Hyperparameter + // Scaling (http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). + // One of the following values: + // + // Auto + // + // Amazon Forecast hyperparameter tuning chooses the best scale for the hyperparameter. + // + // Linear + // + // Hyperparameter tuning searches the values in the hyperparameter range by + // using a linear scale. + // + // Logarithmic + // + // Hyperparameter tuning searches the values in the hyperparameter range by + // using a logarithmic scale. + // + // Logarithmic scaling works only for ranges that have only values greater than + // 0. + // + // ReverseLogarithmic + // + // Not supported for IntegerParameterRange. + // + // Reverse logarithmic scaling works only for ranges that are entirely within + // the range 0 <= x < 1.0. + ScalingType *string `type:"string" enum:"ScalingType"` +} + +// String returns the string representation +func (s IntegerParameterRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntegerParameterRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntegerParameterRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntegerParameterRange"} + if s.MaxValue == nil { + invalidParams.Add(request.NewErrParamRequired("MaxValue")) + } + if s.MinValue == nil { + invalidParams.Add(request.NewErrParamRequired("MinValue")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxValue sets the MaxValue field's value. +func (s *IntegerParameterRange) SetMaxValue(v int64) *IntegerParameterRange { + s.MaxValue = &v + return s +} + +// SetMinValue sets the MinValue field's value. +func (s *IntegerParameterRange) SetMinValue(v int64) *IntegerParameterRange { + s.MinValue = &v + return s +} + +// SetName sets the Name field's value. +func (s *IntegerParameterRange) SetName(v string) *IntegerParameterRange { + s.Name = &v + return s +} + +// SetScalingType sets the ScalingType field's value. +func (s *IntegerParameterRange) SetScalingType(v string) *IntegerParameterRange { + s.ScalingType = &v + return s +} + +type ListDatasetGroupsInput struct { + _ struct{} `type:"structure"` + + // The number of items to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // If the result of the previous request was truncated, the response includes + // a NextToken. To retrieve the next set of results, use the token in the next + // request. Tokens expire after 24 hours. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDatasetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatasetGroupsInput) SetMaxResults(v int64) *ListDatasetGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetGroupsInput) SetNextToken(v string) *ListDatasetGroupsInput { + s.NextToken = &v + return s +} + +type ListDatasetGroupsOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that summarize each dataset group's properties. + DatasetGroups []*DatasetGroupSummary `type:"list"` + + // If the response is truncated, Amazon Forecast returns this token. To retrieve + // the next set of results, use the token in the next request. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDatasetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetGroupsOutput) GoString() string { + return s.String() +} + +// SetDatasetGroups sets the DatasetGroups field's value. +func (s *ListDatasetGroupsOutput) SetDatasetGroups(v []*DatasetGroupSummary) *ListDatasetGroupsOutput { + s.DatasetGroups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetGroupsOutput) SetNextToken(v string) *ListDatasetGroupsOutput { + s.NextToken = &v + return s +} + +type ListDatasetImportJobsInput struct { + _ struct{} `type:"structure"` + + // An array of filters. For each filter, you provide a condition and a match + // statement. The condition is either IS or IS_NOT, which specifies whether + // to include or exclude, respectively, from the list, the predictors that match + // the statement. The match statement consists of a key and a value. In this + // release, Name is the only valid key, which filters on the DatasetImportJobName + // property. + // + // * Condition - IS or IS_NOT + // + // * Key - Name + // + // * Value - the value to match + // + // For example, to list all dataset import jobs named my_dataset_import_job, + // you would specify: + // + // "Filters": [ { "Condition": "IS", "Key": "Name", "Value": "my_dataset_import_job" + // } ] + Filters []*Filter `type:"list"` + + // The number of items to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // If the result of the previous request was truncated, the response includes + // a NextToken. To retrieve the next set of results, use the token in the next + // request. Tokens expire after 24 hours. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDatasetImportJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetImportJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetImportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetImportJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListDatasetImportJobsInput) SetFilters(v []*Filter) *ListDatasetImportJobsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatasetImportJobsInput) SetMaxResults(v int64) *ListDatasetImportJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetImportJobsInput) SetNextToken(v string) *ListDatasetImportJobsInput { + s.NextToken = &v + return s +} + +type ListDatasetImportJobsOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that summarize each dataset import job's properties. + DatasetImportJobs []*DatasetImportJobSummary `type:"list"` + + // If the response is truncated, Amazon Forecast returns this token. To retrieve + // the next set of results, use the token in the next request. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDatasetImportJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetImportJobsOutput) GoString() string { + return s.String() +} + +// SetDatasetImportJobs sets the DatasetImportJobs field's value. +func (s *ListDatasetImportJobsOutput) SetDatasetImportJobs(v []*DatasetImportJobSummary) *ListDatasetImportJobsOutput { + s.DatasetImportJobs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetImportJobsOutput) SetNextToken(v string) *ListDatasetImportJobsOutput { + s.NextToken = &v + return s +} + +type ListDatasetsInput struct { + _ struct{} `type:"structure"` + + // The number of items to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // If the result of the previous request was truncated, the response includes + // a NextToken. To retrieve the next set of results, use the token in the next + // request. Tokens expire after 24 hours. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDatasetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatasetsInput) SetMaxResults(v int64) *ListDatasetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetsInput) SetNextToken(v string) *ListDatasetsInput { + s.NextToken = &v + return s +} + +type ListDatasetsOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that summarize each dataset's properties. + Datasets []*DatasetSummary `type:"list"` + + // If the response is truncated, Amazon Forecast returns this token. To retrieve + // the next set of results, use the token in the next request. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDatasetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsOutput) GoString() string { + return s.String() +} + +// SetDatasets sets the Datasets field's value. +func (s *ListDatasetsOutput) SetDatasets(v []*DatasetSummary) *ListDatasetsOutput { + s.Datasets = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetsOutput) SetNextToken(v string) *ListDatasetsOutput { + s.NextToken = &v + return s +} + +type ListForecastExportJobsInput struct { + _ struct{} `type:"structure"` + + // An array of filters. For each filter, you provide a condition and a match + // statement. The condition is either IS or IS_NOT, which specifies whether + // to include or exclude, respectively, from the list, the predictors that match + // the statement. The match statement consists of a key and a value. In this + // release, Name is the only valid key, which filters on the ForecastExportJobName + // property. + // + // * Condition - IS or IS_NOT + // + // * Key - Name + // + // * Value - the value to match + // + // For example, to list all forecast export jobs named my_forecast_export_job, + // you would specify: + // + // "Filters": [ { "Condition": "IS", "Key": "Name", "Value": "my_forecast_export_job" + // } ] + Filters []*Filter `type:"list"` + + // The number of items to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // If the result of the previous request was truncated, the response includes + // a NextToken. To retrieve the next set of results, use the token in the next + // request. Tokens expire after 24 hours. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListForecastExportJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListForecastExportJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListForecastExportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListForecastExportJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListForecastExportJobsInput) SetFilters(v []*Filter) *ListForecastExportJobsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListForecastExportJobsInput) SetMaxResults(v int64) *ListForecastExportJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListForecastExportJobsInput) SetNextToken(v string) *ListForecastExportJobsInput { + s.NextToken = &v + return s +} + +type ListForecastExportJobsOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that summarize each export job's properties. + ForecastExportJobs []*ForecastExportJobSummary `type:"list"` + + // If the response is truncated, Amazon Forecast returns this token. To retrieve + // the next set of results, use the token in the next request. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListForecastExportJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListForecastExportJobsOutput) GoString() string { + return s.String() +} + +// SetForecastExportJobs sets the ForecastExportJobs field's value. +func (s *ListForecastExportJobsOutput) SetForecastExportJobs(v []*ForecastExportJobSummary) *ListForecastExportJobsOutput { + s.ForecastExportJobs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListForecastExportJobsOutput) SetNextToken(v string) *ListForecastExportJobsOutput { + s.NextToken = &v + return s +} + +type ListForecastsInput struct { + _ struct{} `type:"structure"` + + // An array of filters. For each filter, you provide a condition and a match + // statement. The condition is either IS or IS_NOT, which specifies whether + // to include or exclude, respectively, from the list, the predictors that match + // the statement. The match statement consists of a key and a value. In this + // release, Name is the only valid key, which filters on the ForecastName property. + // + // * Condition - IS or IS_NOT + // + // * Key - Name + // + // * Value - the value to match + // + // For example, to list all forecasts named my_forecast, you would specify: + // + // "Filters": [ { "Condition": "IS", "Key": "Name", "Value": "my_forecast" } + // ] + Filters []*Filter `type:"list"` + + // The number of items to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // If the result of the previous request was truncated, the response includes + // a NextToken. To retrieve the next set of results, use the token in the next + // request. Tokens expire after 24 hours. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListForecastsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListForecastsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListForecastsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListForecastsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListForecastsInput) SetFilters(v []*Filter) *ListForecastsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListForecastsInput) SetMaxResults(v int64) *ListForecastsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListForecastsInput) SetNextToken(v string) *ListForecastsInput { + s.NextToken = &v + return s +} + +type ListForecastsOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that summarize each forecast's properties. + Forecasts []*ForecastSummary `type:"list"` + + // If the response is truncated, Amazon Forecast returns this token. To retrieve + // the next set of results, use the token in the next request. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListForecastsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListForecastsOutput) GoString() string { + return s.String() +} + +// SetForecasts sets the Forecasts field's value. +func (s *ListForecastsOutput) SetForecasts(v []*ForecastSummary) *ListForecastsOutput { + s.Forecasts = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListForecastsOutput) SetNextToken(v string) *ListForecastsOutput { + s.NextToken = &v + return s +} + +type ListPredictorsInput struct { + _ struct{} `type:"structure"` + + // An array of filters. For each filter, you provide a condition and a match + // statement. The condition is either IS or IS_NOT, which specifies whether + // to include or exclude, respectively, from the list, the predictors that match + // the statement. The match statement consists of a key and a value. In this + // release, Name is the only valid key, which filters on the PredictorName property. + // + // * Condition - IS or IS_NOT + // + // * Key - Name + // + // * Value - the value to match + // + // For example, to list all predictors named my_predictor, you would specify: + // + // "Filters": [ { "Condition": "IS", "Key": "Name", "Value": "my_predictor" + // } ] + Filters []*Filter `type:"list"` + + // The number of items to return in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // If the result of the previous request was truncated, the response includes + // a NextToken. To retrieve the next set of results, use the token in the next + // request. Tokens expire after 24 hours. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListPredictorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPredictorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPredictorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPredictorsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListPredictorsInput) SetFilters(v []*Filter) *ListPredictorsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListPredictorsInput) SetMaxResults(v int64) *ListPredictorsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPredictorsInput) SetNextToken(v string) *ListPredictorsInput { + s.NextToken = &v + return s +} + +type ListPredictorsOutput struct { + _ struct{} `type:"structure"` + + // If the response is truncated, Amazon Forecast returns this token. To retrieve + // the next set of results, use the token in the next request. + NextToken *string `min:"1" type:"string"` + + // An array of objects that summarize each predictor's properties. + Predictors []*PredictorSummary `type:"list"` +} + +// String returns the string representation +func (s ListPredictorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPredictorsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPredictorsOutput) SetNextToken(v string) *ListPredictorsOutput { + s.NextToken = &v + return s +} + +// SetPredictors sets the Predictors field's value. +func (s *ListPredictorsOutput) SetPredictors(v []*PredictorSummary) *ListPredictorsOutput { + s.Predictors = v + return s +} + +// Provides metrics used to evaluate the performance of a predictor. This object +// is part of the WindowSummary object. +type Metrics struct { + _ struct{} `type:"structure"` + + // The root mean square error (RMSE). + RMSE *float64 `type:"double"` + + // An array of weighted quantile losses. Quantiles divide a probability distribution + // into regions of equal probability. The distribution in this case is the loss + // function. + WeightedQuantileLosses []*WeightedQuantileLoss `type:"list"` +} + +// String returns the string representation +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metrics) GoString() string { + return s.String() +} + +// SetRMSE sets the RMSE field's value. +func (s *Metrics) SetRMSE(v float64) *Metrics { + s.RMSE = &v + return s +} + +// SetWeightedQuantileLosses sets the WeightedQuantileLosses field's value. +func (s *Metrics) SetWeightedQuantileLosses(v []*WeightedQuantileLoss) *Metrics { + s.WeightedQuantileLosses = v + return s +} + +// Specifies the categorical, continuous, and integer hyperparameters, and their +// ranges of tunable values. The range of tunable values determines which values +// that a hyperparameter tuning job can choose for the specified hyperparameter. +// This object is part of the HyperParameterTuningJobConfig object. +type ParameterRanges struct { + _ struct{} `type:"structure"` + + // Specifies the tunable range for each categorical hyperparameter. + CategoricalParameterRanges []*CategoricalParameterRange `min:"1" type:"list"` + + // Specifies the tunable range for each continuous hyperparameter. + ContinuousParameterRanges []*ContinuousParameterRange `min:"1" type:"list"` + + // Specifies the tunable range for each integer hyperparameter. + IntegerParameterRanges []*IntegerParameterRange `min:"1" type:"list"` +} + +// String returns the string representation +func (s ParameterRanges) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterRanges) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParameterRanges) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParameterRanges"} + if s.CategoricalParameterRanges != nil && len(s.CategoricalParameterRanges) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CategoricalParameterRanges", 1)) + } + if s.ContinuousParameterRanges != nil && len(s.ContinuousParameterRanges) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ContinuousParameterRanges", 1)) + } + if s.IntegerParameterRanges != nil && len(s.IntegerParameterRanges) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IntegerParameterRanges", 1)) + } + if s.CategoricalParameterRanges != nil { + for i, v := range s.CategoricalParameterRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CategoricalParameterRanges", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ContinuousParameterRanges != nil { + for i, v := range s.ContinuousParameterRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContinuousParameterRanges", i), err.(request.ErrInvalidParams)) + } + } + } + if s.IntegerParameterRanges != nil { + for i, v := range s.IntegerParameterRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "IntegerParameterRanges", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCategoricalParameterRanges sets the CategoricalParameterRanges field's value. +func (s *ParameterRanges) SetCategoricalParameterRanges(v []*CategoricalParameterRange) *ParameterRanges { + s.CategoricalParameterRanges = v + return s +} + +// SetContinuousParameterRanges sets the ContinuousParameterRanges field's value. +func (s *ParameterRanges) SetContinuousParameterRanges(v []*ContinuousParameterRange) *ParameterRanges { + s.ContinuousParameterRanges = v + return s +} + +// SetIntegerParameterRanges sets the IntegerParameterRanges field's value. +func (s *ParameterRanges) SetIntegerParameterRanges(v []*IntegerParameterRange) *ParameterRanges { + s.IntegerParameterRanges = v + return s +} + +// Provides a summary of the predictor properties used in the ListPredictors +// operation. To get the complete set of properties, call the DescribePredictor +// operation, and provide the listed PredictorArn. +type PredictorSummary struct { + _ struct{} `type:"structure"` + + // When the model training task was created. + CreationTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset group that contains the data + // used to train the predictor. + DatasetGroupArn *string `type:"string"` + + // Initially, the same as CreationTime (status is CREATE_PENDING). Updated when + // training starts (status changed to CREATE_IN_PROGRESS), and when training + // is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED). + LastModificationTime *time.Time `type:"timestamp"` + + // If an error occurred, an informational message about the error. + Message *string `type:"string"` + + // The ARN of the predictor. + PredictorArn *string `type:"string"` + + // The name of the predictor. + PredictorName *string `min:"1" type:"string"` + + // The status of the predictor. States include: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED + // + // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED + // + // The Status of the predictor must be ACTIVE before using the predictor to + // create a forecast. + Status *string `type:"string"` +} + +// String returns the string representation +func (s PredictorSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PredictorSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *PredictorSummary) SetCreationTime(v time.Time) *PredictorSummary { + s.CreationTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *PredictorSummary) SetDatasetGroupArn(v string) *PredictorSummary { + s.DatasetGroupArn = &v + return s +} + +// SetLastModificationTime sets the LastModificationTime field's value. +func (s *PredictorSummary) SetLastModificationTime(v time.Time) *PredictorSummary { + s.LastModificationTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *PredictorSummary) SetMessage(v string) *PredictorSummary { + s.Message = &v + return s +} + +// SetPredictorArn sets the PredictorArn field's value. +func (s *PredictorSummary) SetPredictorArn(v string) *PredictorSummary { + s.PredictorArn = &v + return s +} + +// SetPredictorName sets the PredictorName field's value. +func (s *PredictorSummary) SetPredictorName(v string) *PredictorSummary { + s.PredictorName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *PredictorSummary) SetStatus(v string) *PredictorSummary { + s.Status = &v + return s +} + +// The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, +// and an AWS Identity and Access Management (IAM) role that Amazon Forecast +// can assume to access the file(s). Optionally, includes an AWS Key Management +// Service (KMS) key. This object is submitted in the CreateDatasetImportJob +// and CreateForecastExportJob requests. +type S3Config struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key. + KMSKeyArn *string `type:"string"` + + // The path to an Amazon Simple Storage Service (Amazon S3) bucket or file(s) + // in an Amazon S3 bucket. + // + // Path is a required field + Path *string `type:"string" required:"true"` + + // The ARN of the AWS Identity and Access Management (IAM) role that Amazon + // Forecast can assume to access the Amazon S3 bucket or file(s). + // + // Cross-account pass role is not allowed. If you pass a role that doesn't belong + // to your account, an InvalidInputException is thrown. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s S3Config) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Config) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Config) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Config"} + if s.Path == nil { + invalidParams.Add(request.NewErrParamRequired("Path")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSKeyArn sets the KMSKeyArn field's value. +func (s *S3Config) SetKMSKeyArn(v string) *S3Config { + s.KMSKeyArn = &v + return s +} + +// SetPath sets the Path field's value. +func (s *S3Config) SetPath(v string) *S3Config { + s.Path = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *S3Config) SetRoleArn(v string) *S3Config { + s.RoleArn = &v + return s +} + +// Defines the fields of a dataset. This object is specified in the CreateDataset +// request. +type Schema struct { + _ struct{} `type:"structure"` + + // An array of attributes specifying the name and type of each field in a dataset. + Attributes []*SchemaAttribute `type:"list"` +} + +// String returns the string representation +func (s Schema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Schema) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Schema) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Schema"} + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *Schema) SetAttributes(v []*SchemaAttribute) *Schema { + s.Attributes = v + return s +} + +// An attribute of a schema, which defines a field of a dataset. A schema attribute +// is required for every field in a dataset. The Schema object contains an array +// of SchemaAttribute objects. +type SchemaAttribute struct { + _ struct{} `type:"structure"` + + // The name of the dataset field. + AttributeName *string `min:"1" type:"string"` + + // The data type of the field. + AttributeType *string `type:"string" enum:"AttributeType"` +} + +// String returns the string representation +func (s SchemaAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SchemaAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SchemaAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SchemaAttribute"} + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *SchemaAttribute) SetAttributeName(v string) *SchemaAttribute { + s.AttributeName = &v + return s +} + +// SetAttributeType sets the AttributeType field's value. +func (s *SchemaAttribute) SetAttributeType(v string) *SchemaAttribute { + s.AttributeType = &v + return s +} + +// Provides statistics for each data field imported to an Amazon Forecast dataset +// with the CreateDatasetImportJob operation. +type Statistics struct { + _ struct{} `type:"structure"` + + // For a numeric field, the average value in the field. + Avg *float64 `type:"double"` + + // The number of values in the field. + Count *int64 `type:"integer"` + + // The number of distinct values in the field. + CountDistinct *int64 `type:"integer"` + + // The number of NAN (not a number) values in the field. + CountNan *int64 `type:"integer"` + + // The number of null values in the field. + CountNull *int64 `type:"integer"` + + // For a numeric field, the maximum value in the field. + Max *string `type:"string"` + + // For a numeric field, the minimum value in the field. + Min *string `type:"string"` + + // For a numeric field, the standard deviation. + Stddev *float64 `type:"double"` +} + +// String returns the string representation +func (s Statistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Statistics) GoString() string { + return s.String() +} + +// SetAvg sets the Avg field's value. +func (s *Statistics) SetAvg(v float64) *Statistics { + s.Avg = &v + return s +} + +// SetCount sets the Count field's value. +func (s *Statistics) SetCount(v int64) *Statistics { + s.Count = &v + return s +} + +// SetCountDistinct sets the CountDistinct field's value. +func (s *Statistics) SetCountDistinct(v int64) *Statistics { + s.CountDistinct = &v + return s +} + +// SetCountNan sets the CountNan field's value. +func (s *Statistics) SetCountNan(v int64) *Statistics { + s.CountNan = &v + return s +} + +// SetCountNull sets the CountNull field's value. +func (s *Statistics) SetCountNull(v int64) *Statistics { + s.CountNull = &v + return s +} + +// SetMax sets the Max field's value. +func (s *Statistics) SetMax(v string) *Statistics { + s.Max = &v + return s +} + +// SetMin sets the Min field's value. +func (s *Statistics) SetMin(v string) *Statistics { + s.Min = &v + return s +} + +// SetStddev sets the Stddev field's value. +func (s *Statistics) SetStddev(v float64) *Statistics { + s.Stddev = &v + return s +} + +// Describes a supplementary feature of a dataset group. This object is part +// of the InputDataConfig object. +// +// For this release, the only supported feature is a holiday calendar. If the +// calendar is used, all data should belong to the same country as the calendar. +// For the calendar data, see http://jollyday.sourceforge.net/data.html (http://jollyday.sourceforge.net/data.html). +type SupplementaryFeature struct { + _ struct{} `type:"structure"` + + // The name of the feature. This must be "holiday". + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // One of the following 2 letter country codes: + // + // * "AU" - AUSTRALIA + // + // * "DE" - GERMANY + // + // * "JP" - JAPAN + // + // * "US" - UNITED_STATES + // + // * "UK" - UNITED_KINGDOM + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SupplementaryFeature) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SupplementaryFeature) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SupplementaryFeature) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SupplementaryFeature"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *SupplementaryFeature) SetName(v string) *SupplementaryFeature { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *SupplementaryFeature) SetValue(v string) *SupplementaryFeature { + s.Value = &v + return s +} + +type UpdateDatasetGroupInput struct { + _ struct{} `type:"structure"` + + // An array of Amazon Resource Names (ARNs) of the datasets to add to the dataset + // group. + // + // DatasetArns is a required field + DatasetArns []*string `type:"list" required:"true"` + + // The ARN of the dataset group. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDatasetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDatasetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDatasetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDatasetGroupInput"} + if s.DatasetArns == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetArns")) + } + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetArns sets the DatasetArns field's value. +func (s *UpdateDatasetGroupInput) SetDatasetArns(v []*string) *UpdateDatasetGroupInput { + s.DatasetArns = v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *UpdateDatasetGroupInput) SetDatasetGroupArn(v string) *UpdateDatasetGroupInput { + s.DatasetGroupArn = &v + return s +} + +type UpdateDatasetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDatasetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDatasetGroupOutput) GoString() string { + return s.String() +} + +// The weighted loss value for a quantile. This object is part of the Metrics +// object. +type WeightedQuantileLoss struct { + _ struct{} `type:"structure"` + + // The difference between the predicted value and actual value over the quantile, + // weighted (normalized) by dividing by the sum over all quantiles. + LossValue *float64 `type:"double"` + + // The quantile. Quantiles divide a probability distribution into regions of + // equal probability. For example, if the distribution was divided into 5 regions + // of equal probability, the quantiles would be 0.2, 0.4, 0.6, and 0.8. + Quantile *float64 `type:"double"` +} + +// String returns the string representation +func (s WeightedQuantileLoss) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WeightedQuantileLoss) GoString() string { + return s.String() +} + +// SetLossValue sets the LossValue field's value. +func (s *WeightedQuantileLoss) SetLossValue(v float64) *WeightedQuantileLoss { + s.LossValue = &v + return s +} + +// SetQuantile sets the Quantile field's value. +func (s *WeightedQuantileLoss) SetQuantile(v float64) *WeightedQuantileLoss { + s.Quantile = &v + return s +} + +// The metrics for a time range within the evaluation portion of a dataset. +// This object is part of the EvaluationResult object. +// +// The TestWindowStart and TestWindowEnd parameters are determined by the BackTestWindowOffset +// parameter of the EvaluationParameters object. +type WindowSummary struct { + _ struct{} `type:"structure"` + + // The type of evaluation. + // + // * SUMMARY - The average metrics across all windows. + // + // * COMPUTED - The metrics for the specified window. + EvaluationType *string `type:"string" enum:"EvaluationType"` + + // The number of data points within the window. + ItemCount *int64 `type:"integer"` + + // Provides metrics used to evaluate the performance of a predictor. This object + // is part of the WindowSummary object. + Metrics *Metrics `type:"structure"` + + // The timestamp that defines the end of the window. + TestWindowEnd *time.Time `type:"timestamp"` + + // The timestamp that defines the start of the window. + TestWindowStart *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s WindowSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WindowSummary) GoString() string { + return s.String() +} + +// SetEvaluationType sets the EvaluationType field's value. +func (s *WindowSummary) SetEvaluationType(v string) *WindowSummary { + s.EvaluationType = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *WindowSummary) SetItemCount(v int64) *WindowSummary { + s.ItemCount = &v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *WindowSummary) SetMetrics(v *Metrics) *WindowSummary { + s.Metrics = v + return s +} + +// SetTestWindowEnd sets the TestWindowEnd field's value. +func (s *WindowSummary) SetTestWindowEnd(v time.Time) *WindowSummary { + s.TestWindowEnd = &v + return s +} + +// SetTestWindowStart sets the TestWindowStart field's value. +func (s *WindowSummary) SetTestWindowStart(v time.Time) *WindowSummary { + s.TestWindowStart = &v + return s +} + +const ( + // AttributeTypeString is a AttributeType enum value + AttributeTypeString = "string" + + // AttributeTypeInteger is a AttributeType enum value + AttributeTypeInteger = "integer" + + // AttributeTypeFloat is a AttributeType enum value + AttributeTypeFloat = "float" + + // AttributeTypeTimestamp is a AttributeType enum value + AttributeTypeTimestamp = "timestamp" +) + +const ( + // DatasetTypeTargetTimeSeries is a DatasetType enum value + DatasetTypeTargetTimeSeries = "TARGET_TIME_SERIES" + + // DatasetTypeRelatedTimeSeries is a DatasetType enum value + DatasetTypeRelatedTimeSeries = "RELATED_TIME_SERIES" + + // DatasetTypeItemMetadata is a DatasetType enum value + DatasetTypeItemMetadata = "ITEM_METADATA" +) + +const ( + // DomainRetail is a Domain enum value + DomainRetail = "RETAIL" + + // DomainCustom is a Domain enum value + DomainCustom = "CUSTOM" + + // DomainInventoryPlanning is a Domain enum value + DomainInventoryPlanning = "INVENTORY_PLANNING" + + // DomainEc2Capacity is a Domain enum value + DomainEc2Capacity = "EC2_CAPACITY" + + // DomainWorkForce is a Domain enum value + DomainWorkForce = "WORK_FORCE" + + // DomainWebTraffic is a Domain enum value + DomainWebTraffic = "WEB_TRAFFIC" + + // DomainMetrics is a Domain enum value + DomainMetrics = "METRICS" +) + +const ( + // EvaluationTypeSummary is a EvaluationType enum value + EvaluationTypeSummary = "SUMMARY" + + // EvaluationTypeComputed is a EvaluationType enum value + EvaluationTypeComputed = "COMPUTED" +) + +const ( + // FeaturizationMethodNameFilling is a FeaturizationMethodName enum value + FeaturizationMethodNameFilling = "filling" +) + +const ( + // FilterConditionStringIs is a FilterConditionString enum value + FilterConditionStringIs = "IS" + + // FilterConditionStringIsNot is a FilterConditionString enum value + FilterConditionStringIsNot = "IS_NOT" +) + +const ( + // ScalingTypeAuto is a ScalingType enum value + ScalingTypeAuto = "Auto" + + // ScalingTypeLinear is a ScalingType enum value + ScalingTypeLinear = "Linear" + + // ScalingTypeLogarithmic is a ScalingType enum value + ScalingTypeLogarithmic = "Logarithmic" + + // ScalingTypeReverseLogarithmic is a ScalingType enum value + ScalingTypeReverseLogarithmic = "ReverseLogarithmic" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/doc.go new file mode 100644 index 00000000000..fe4d4b24c59 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/doc.go @@ -0,0 +1,28 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package forecastservice provides the client and types for making API +// requests to Amazon Forecast Service. +// +// Provides APIs for creating and managing Amazon Forecast resources. +// +// See https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26 for more information on this service. +// +// See forecastservice package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/forecastservice/ +// +// Using the Client +// +// To contact Amazon Forecast Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Forecast Service client ForecastService for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/forecastservice/#New +package forecastservice diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/errors.go new file mode 100644 index 00000000000..19eb82f7388 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/errors.go @@ -0,0 +1,45 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package forecastservice + +const ( + + // ErrCodeInvalidInputException for service response error code + // "InvalidInputException". + // + // We can't process the request because it includes an invalid value or a value + // that exceeds the valid range. + ErrCodeInvalidInputException = "InvalidInputException" + + // ErrCodeInvalidNextTokenException for service response error code + // "InvalidNextTokenException". + // + // The token is not valid. Tokens expire after 24 hours. + ErrCodeInvalidNextTokenException = "InvalidNextTokenException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The limit on the number of requests per second has been exceeded. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // There is already a resource with that Amazon Resource Name (ARN). Try again + // with a different ARN. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // The specified resource is in use. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // We can't find a resource with that Amazon Resource Name (ARN). Check the + // ARN and try again. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go new file mode 100644 index 00000000000..f7b4cda112f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go @@ -0,0 +1,101 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package forecastservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// ForecastService provides the API operation methods for making requests to +// Amazon Forecast Service. See this package's package overview docs +// for details on the service. +// +// ForecastService methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type ForecastService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "forecast" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "forecast" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the ForecastService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ForecastService client from just a session. +// svc := forecastservice.New(mySession) +// +// // Create a ForecastService client with additional configuration +// svc := forecastservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ForecastService { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "forecast" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ForecastService { + svc := &ForecastService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2018-06-26", + JSONVersion: "1.1", + TargetPrefix: "AmazonForecast", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ForecastService operation and runs any +// custom request initialization. +func (c *ForecastService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go index 19af266ad00..99bc03c8f12 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go @@ -96,6 +96,9 @@ func (c *FSx) CreateBackupRequest(input *CreateBackupInput) (req *request.Reques // * ErrCodeBadRequest "BadRequest" // A generic error indicating a failure with a client request. // +// * ErrCodeUnsupportedOperation "UnsupportedOperation" +// An error occured. +// // * ErrCodeFileSystemNotFound "FileSystemNotFound" // No Amazon FSx file systems were found based upon supplied parameters. // @@ -324,11 +327,11 @@ func (c *FSx) CreateFileSystemFromBackupRequest(input *CreateFileSystemFromBacku // File Server backup. // // If a file system with the specified client request token exists and the parameters -// match, this call returns the description of the existing file system. If -// a client request token specified by the file system exists and the parameters -// don't match, this call returns IncompatibleParameterError. If a file system -// with the specified client request token doesn't exist, this operation does -// the following: +// match, this operation returns the description of the file system. If a client +// request token specified by the file system exists and the parameters don't +// match, this call returns IncompatibleParameterError. If a file system with +// the specified client request token doesn't exist, this operation does the +// following: // // * Creates a new Amazon FSx file system from backup with an assigned ID, // and an initial lifecycle state of CREATING. @@ -755,7 +758,7 @@ func (c *FSx) DescribeBackupsWithContext(ctx aws.Context, input *DescribeBackups // // Example iterating over at most 3 pages of a DescribeBackups operation. // pageNum := 0 // err := client.DescribeBackupsPages(params, -// func(page *DescribeBackupsOutput, lastPage bool) bool { +// func(page *fsx.DescribeBackupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -787,10 +790,12 @@ func (c *FSx) DescribeBackupsPagesWithContext(ctx aws.Context, input *DescribeBa }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeBackupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeBackupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -919,7 +924,7 @@ func (c *FSx) DescribeFileSystemsWithContext(ctx aws.Context, input *DescribeFil // // Example iterating over at most 3 pages of a DescribeFileSystems operation. // pageNum := 0 // err := client.DescribeFileSystemsPages(params, -// func(page *DescribeFileSystemsOutput, lastPage bool) bool { +// func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -951,10 +956,12 @@ func (c *FSx) DescribeFileSystemsPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeFileSystemsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeFileSystemsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1314,6 +1321,9 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // * ErrCodeBadRequest "BadRequest" // A generic error indicating a failure with a client request. // +// * ErrCodeUnsupportedOperation "UnsupportedOperation" +// An error occured. +// // * ErrCodeIncompatibleParameterError "IncompatibleParameterError" // The error returned when a second request is received with the same client // request token but different parameters settings. A client request token should @@ -1350,6 +1360,41 @@ func (c *FSx) UpdateFileSystemWithContext(ctx aws.Context, input *UpdateFileSyst return out, req.Send() } +// The Microsoft AD attributes of the Amazon FSx for Windows File Server file +// system. +type ActiveDirectoryBackupAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the AWS Managed Microsoft Active Directory instance to which the + // file system is joined. + ActiveDirectoryId *string `min:"12" type:"string"` + + // The fully qualified domain name of the self-managed AD directory. + DomainName *string `type:"string"` +} + +// String returns the string representation +func (s ActiveDirectoryBackupAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveDirectoryBackupAttributes) GoString() string { + return s.String() +} + +// SetActiveDirectoryId sets the ActiveDirectoryId field's value. +func (s *ActiveDirectoryBackupAttributes) SetActiveDirectoryId(v string) *ActiveDirectoryBackupAttributes { + s.ActiveDirectoryId = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *ActiveDirectoryBackupAttributes) SetDomainName(v string) *ActiveDirectoryBackupAttributes { + s.DomainName = &v + return s +} + // A backup of an Amazon FSx for Windows File Server file system. You can create // a new file system from a backup to protect against data loss. type Backup struct { @@ -1365,6 +1410,10 @@ type Backup struct { // CreationTime is a required field CreationTime *time.Time `type:"timestamp" required:"true"` + // The configuration of the self-managed Microsoft Active Directory (AD) to + // which the Windows File Server instance is joined. + DirectoryInformation *ActiveDirectoryBackupAttributes `type:"structure"` + // Details explaining any failures that occur when creating a backup. FailureDetails *BackupFailureDetails `type:"structure"` @@ -1420,6 +1469,12 @@ func (s *Backup) SetCreationTime(v time.Time) *Backup { return s } +// SetDirectoryInformation sets the DirectoryInformation field's value. +func (s *Backup) SetDirectoryInformation(v *ActiveDirectoryBackupAttributes) *Backup { + s.DirectoryInformation = v + return s +} + // SetFailureDetails sets the FailureDetails field's value. func (s *Backup) SetFailureDetails(v *BackupFailureDetails) *Backup { s.FailureDetails = v @@ -1599,7 +1654,8 @@ func (s *CreateBackupOutput) SetBackup(v *Backup) *CreateBackupOutput { type CreateFileSystemFromBackupInput struct { _ struct{} `type:"structure"` - // The ID of the backup. + // The ID of the backup. Specifies the backup to use if you're creating a file + // system from an existing backup. // // BackupId is a required field BackupId *string `min:"12" type:"string" required:"true"` @@ -1739,7 +1795,7 @@ func (s *CreateFileSystemFromBackupOutput) SetFileSystem(v *FileSystem) *CreateF return s } -// The request object for the CreateFileSystem operation. +// The request object used to create a new Amazon FSx file system. type CreateFileSystemInput struct { _ struct{} `type:"structure"` @@ -1748,7 +1804,7 @@ type CreateFileSystemInput struct { // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` - // The type of file system. + // The type of Amazon FSx file system to create. // // FileSystemType is a required field FileSystemType *string `type:"string" required:"true" enum:"FileSystemType"` @@ -1759,16 +1815,16 @@ type CreateFileSystemInput struct { // in the AWS Key Management Service API Reference. KmsKeyId *string `min:"1" type:"string"` - // The configuration object for Lustre file systems used in the CreateFileSystem - // operation. + // The Lustre configuration for the file system being created. This value is + // required if FileSystemType is set to LUSTRE. LustreConfiguration *CreateFileSystemLustreConfiguration `type:"structure"` - // A list of IDs for the security groups that apply to the specified network - // interfaces created for file system access. These security groups will apply - // to all network interfaces. This list isn't returned in later describe requests. + // A list of IDs specifying the security groups to apply to all network interfaces + // created for file system access. This list isn't returned in later requests + // to describe the file system. SecurityGroupIds []*string `type:"list"` - // The storage capacity of the file system. + // The storage capacity of the file system being created. // // For Windows file systems, the storage capacity has a minimum of 300 GiB, // and a maximum of 65,536 GiB. @@ -1779,18 +1835,19 @@ type CreateFileSystemInput struct { // StorageCapacity is a required field StorageCapacity *int64 `min:"1" type:"integer" required:"true"` - // A list of IDs for the subnets that the file system will be accessible from. - // File systems support only one subnet. The file server is also launched in - // that subnet's Availability Zone. + // The IDs of the subnets that the file system will be accessible from. File + // systems support only one subnet. The file server is also launched in that + // subnet's Availability Zone. // // SubnetIds is a required field SubnetIds []*string `type:"list" required:"true"` - // The tags to be applied to the file system at file system creation. The key - // value of the Name tag appears in the console as the file system name. + // The tags to apply to the file system being created. The key value of the + // Name tag appears in the console as the file system name. Tags []*Tag `min:"1" type:"list"` - // The configuration for this Microsoft Windows file system. + // The Microsoft Windows configuration for the file system being created. This + // value is required if FileSystemType is set to WINDOWS. WindowsConfiguration *CreateFileSystemWindowsConfiguration `type:"structure"` } @@ -1909,8 +1966,8 @@ func (s *CreateFileSystemInput) SetWindowsConfiguration(v *CreateFileSystemWindo return s } -// The configuration object for Lustre file systems used in the CreateFileSystem -// operation. +// The Lustre configuration for the file system being created. This value is +// required if FileSystemType is set to LUSTRE. type CreateFileSystemLustreConfiguration struct { _ struct{} `type:"structure"` @@ -2008,11 +2065,11 @@ func (s *CreateFileSystemLustreConfiguration) SetWeeklyMaintenanceStartTime(v st return s } -// The response object for the CreateFileSystem operation. +// The response object returned after the file system is created. type CreateFileSystemOutput struct { _ struct{} `type:"structure"` - // A description of the file system. + // The configuration of the file system that was created. FileSystem *FileSystem `type:"structure"` } @@ -2037,8 +2094,8 @@ func (s *CreateFileSystemOutput) SetFileSystem(v *FileSystem) *CreateFileSystemO type CreateFileSystemWindowsConfiguration struct { _ struct{} `type:"structure"` - // The ID for an existing Microsoft Active Directory instance that the file - // system should join when it's created. + // The ID for an existing AWS Managed Microsoft Active Directory (AD) instance + // that the file system should join when it's created. ActiveDirectoryId *string `min:"12" type:"string"` // The number of days to retain automatic backups. The default is to retain @@ -2046,22 +2103,30 @@ type CreateFileSystemWindowsConfiguration struct { // backups. The maximum retention period for backups is 35 days. AutomaticBackupRetentionDays *int64 `type:"integer"` - // A boolean flag indicating whether tags on the file system should be copied - // to backups. This value defaults to false. If it's set to true, all tags on - // the file system are copied to all automatic backups and any user-initiated - // backups where the user doesn't specify any tags. If this value is true, and - // you specify one or more tags, only the specified tags are copied to backups. + // A boolean flag indicating whether tags for the file system should be copied + // to backups. This value defaults to false. If it's set to true, all tags for + // the file system are copied to all automatic and user-initiated backups where + // the user doesn't specify tags. If this value is true, and you specify one + // or more tags, only the specified tags are copied to backups. CopyTagsToBackups *bool `type:"boolean"` - // The preferred time to take daily automatic backups, in the UTC time zone. + // The preferred time to take daily automatic backups, formatted HH:MM in the + // UTC time zone. DailyAutomaticBackupStartTime *string `min:"5" type:"string"` - // The throughput of an Amazon FSx file system, measured in megabytes per second. + // The configuration that Amazon FSx uses to join the Windows File Server instance + // to your self-managed (including on-premises) Microsoft Active Directory (AD) + // directory. + SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfiguration `type:"structure"` + + // The throughput of an Amazon FSx file system, measured in megabytes per second, + // in 2 to the nth increments, between 2^3 (8) and 2^11 (2048). // // ThroughputCapacity is a required field ThroughputCapacity *int64 `min:"8" type:"integer" required:"true"` - // The preferred start time to perform weekly maintenance, in the UTC time zone. + // The preferred start time to perform weekly maintenance, formatted d:HH:MM + // in the UTC time zone. WeeklyMaintenanceStartTime *string `min:"7" type:"string"` } @@ -2093,6 +2158,11 @@ func (s *CreateFileSystemWindowsConfiguration) Validate() error { if s.WeeklyMaintenanceStartTime != nil && len(*s.WeeklyMaintenanceStartTime) < 7 { invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceStartTime", 7)) } + if s.SelfManagedActiveDirectoryConfiguration != nil { + if err := s.SelfManagedActiveDirectoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("SelfManagedActiveDirectoryConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2124,6 +2194,12 @@ func (s *CreateFileSystemWindowsConfiguration) SetDailyAutomaticBackupStartTime( return s } +// SetSelfManagedActiveDirectoryConfiguration sets the SelfManagedActiveDirectoryConfiguration field's value. +func (s *CreateFileSystemWindowsConfiguration) SetSelfManagedActiveDirectoryConfiguration(v *SelfManagedActiveDirectoryConfiguration) *CreateFileSystemWindowsConfiguration { + s.SelfManagedActiveDirectoryConfiguration = v + return s +} + // SetThroughputCapacity sets the ThroughputCapacity field's value. func (s *CreateFileSystemWindowsConfiguration) SetThroughputCapacity(v int64) *CreateFileSystemWindowsConfiguration { s.ThroughputCapacity = &v @@ -2703,22 +2779,38 @@ type FileSystem struct { // The DNS name for the file system. DNSName *string `min:"16" type:"string"` - // Structure providing details of any failures that occur when creating the + // A structure providing details of any failures that occur when creating the // file system has failed. FailureDetails *FileSystemFailureDetails `type:"structure"` - // The eight-digit ID of the file system that was automatically assigned by - // Amazon FSx. + // The system-generated, unique 17-digit ID of the file system. FileSystemId *string `min:"11" type:"string"` - // Type of file system. Currently the only supported type is WINDOWS. + // The type of Amazon FSx file system, either LUSTRE or WINDOWS. FileSystemType *string `type:"string" enum:"FileSystemType"` // The ID of the AWS Key Management Service (AWS KMS) key used to encrypt the // file system's data for an Amazon FSx for Windows File Server file system. KmsKeyId *string `min:"1" type:"string"` - // The lifecycle status of the file system. + // The lifecycle status of the file system: + // + // * AVAILABLE indicates that the file system is reachable and available + // for use. + // + // * CREATING indicates that Amazon FSx is in the process of creating the + // new file system. + // + // * DELETING indicates that Amazon FSx is in the process of deleting the + // file system. + // + // * FAILED indicates that Amazon FSx was not able to create the file system. + // + // * MISCONFIGURED indicates that the file system is in a failed but recoverable + // state. + // + // * UPDATING indicates that the file system is undergoing a customer initiated + // update. Lifecycle *string `type:"string" enum:"FileSystemLifecycle"` // The configuration for the Amazon FSx for Lustre file system. @@ -2731,21 +2823,22 @@ type FileSystem struct { // in the Amazon EC2 User Guide. // // For an Amazon FSx for Windows File Server file system, you can have one network - // interface Id. For an Amazon FSx for Lustre file system, you can have more + // interface ID. For an Amazon FSx for Lustre file system, you can have more // than one. NetworkInterfaceIds []*string `type:"list"` // The AWS account that created the file system. If the file system was created - // by an IAM user, the AWS account to which the IAM user belongs is the owner. + // by an AWS Identity and Access Management (IAM) user, the AWS account to which + // the IAM user belongs is the owner. OwnerId *string `min:"12" type:"string"` - // The resource ARN of the file system. + // The Amazon Resource Name (ARN) for the file system resource. ResourceARN *string `min:"8" type:"string"` - // The storage capacity of the file system in gigabytes. + // The storage capacity of the file system in gigabytes (GB). StorageCapacity *int64 `min:"1" type:"integer"` - // The IDs of the subnets to contain the endpoint for the file system. One and + // The ID of the subnet to contain the endpoint for the file system. One and // only one is supported. The file system is launched in the Availability Zone // associated with this subnet. SubnetIds []*string `type:"list"` @@ -2868,12 +2961,12 @@ func (s *FileSystem) SetWindowsConfiguration(v *WindowsFileSystemConfiguration) return s } -// Structure providing details of any failures that occur when creating the +// A structure providing details of any failures that occur when creating the // file system has failed. type FileSystemFailureDetails struct { _ struct{} `type:"structure"` - // Message describing the failures that occurred during file system creation. + // A message describing any failures that occurred during file system creation. Message *string `min:"1" type:"string"` } @@ -3067,6 +3160,284 @@ func (s *LustreFileSystemConfiguration) SetWeeklyMaintenanceStartTime(v string) return s } +// The configuration of the self-managed Microsoft Active Directory (AD) directory +// to which the Windows File Server instance is joined. +type SelfManagedActiveDirectoryAttributes struct { + _ struct{} `type:"structure"` + + // A list of up to two IP addresses of DNS servers or domain controllers in + // the self-managed AD directory. + DnsIps []*string `min:"1" type:"list"` + + // The fully qualified domain name of the self-managed AD directory. + DomainName *string `type:"string"` + + // The name of the domain group whose members have administrative privileges + // for the FSx file system. + FileSystemAdministratorsGroup *string `min:"1" type:"string"` + + // The fully qualified distinguished name of the organizational unit within + // the self-managed AD directory to which the Windows File Server instance is + // joined. + OrganizationalUnitDistinguishedName *string `min:"1" type:"string"` + + // The user name for the service account on your self-managed AD domain that + // FSx uses to join to your AD domain. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SelfManagedActiveDirectoryAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelfManagedActiveDirectoryAttributes) GoString() string { + return s.String() +} + +// SetDnsIps sets the DnsIps field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetDnsIps(v []*string) *SelfManagedActiveDirectoryAttributes { + s.DnsIps = v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetDomainName(v string) *SelfManagedActiveDirectoryAttributes { + s.DomainName = &v + return s +} + +// SetFileSystemAdministratorsGroup sets the FileSystemAdministratorsGroup field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetFileSystemAdministratorsGroup(v string) *SelfManagedActiveDirectoryAttributes { + s.FileSystemAdministratorsGroup = &v + return s +} + +// SetOrganizationalUnitDistinguishedName sets the OrganizationalUnitDistinguishedName field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetOrganizationalUnitDistinguishedName(v string) *SelfManagedActiveDirectoryAttributes { + s.OrganizationalUnitDistinguishedName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetUserName(v string) *SelfManagedActiveDirectoryAttributes { + s.UserName = &v + return s +} + +// The configuration that Amazon FSx uses to join the Windows File Server instance +// to your self-managed (including on-premises) Microsoft Active Directory (AD) +// directory. +type SelfManagedActiveDirectoryConfiguration struct { + _ struct{} `type:"structure"` + + // A list of up to two IP addresses of DNS servers or domain controllers in + // the self-managed AD directory. The IP addresses need to be either in the + // same VPC CIDR range as the one in which your Amazon FSx file system is being + // created, or in the private IP version 4 (Iv4) address ranges, as specified + // in RFC 1918 (http://www.faqs.org/rfcs/rfc1918.html): + // + // * 10.0.0.0 - 10.255.255.255 (10/8 prefix) + // + // * 172.16.0.0 - 172.31.255.255 (172.16/12 prefix) + // + // * 192.168.0.0 - 192.168.255.255 (192.168/16 prefix) + // + // DnsIps is a required field + DnsIps []*string `min:"1" type:"list" required:"true"` + + // The fully qualified domain name of the self-managed AD directory, such as + // corp.example.com. + // + // DomainName is a required field + DomainName *string `type:"string" required:"true"` + + // (Optional) The name of the domain group whose members are granted administrative + // privileges for the file system. Administrative privileges include taking + // ownership of files and folders, and setting audit controls (audit ACLs) on + // files and folders. The group that you specify must already exist in your + // domain. If you don't provide one, your AD domain's Domain Admins group is + // used. + FileSystemAdministratorsGroup *string `min:"1" type:"string"` + + // (Optional) The fully qualified distinguished name of the organizational unit + // within your self-managed AD directory that the Windows File Server instance + // will join. Amazon FSx only accepts OU as the direct parent of the file system. + // An example is OU=FSx,DC=yourdomain,DC=corp,DC=com. To learn more, see RFC + // 2253 (https://tools.ietf.org/html/rfc2253). If none is provided, the FSx + // file system is created in the default location of your self-managed AD directory. + // + // Only Organizational Unit (OU) objects can be the direct parent of the file + // system that you're creating. + OrganizationalUnitDistinguishedName *string `min:"1" type:"string"` + + // The password for the service account on your self-managed AD domain that + // Amazon FSx will use to join to your AD domain. + // + // Password is a required field + Password *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // The user name for the service account on your self-managed AD domain that + // Amazon FSx will use to join to your AD domain. This account must have the + // permission to join computers to the domain in the organizational unit provided + // in OrganizationalUnitDistinguishedName, or in the default location of your + // AD domain. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SelfManagedActiveDirectoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelfManagedActiveDirectoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelfManagedActiveDirectoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelfManagedActiveDirectoryConfiguration"} + if s.DnsIps == nil { + invalidParams.Add(request.NewErrParamRequired("DnsIps")) + } + if s.DnsIps != nil && len(s.DnsIps) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DnsIps", 1)) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.FileSystemAdministratorsGroup != nil && len(*s.FileSystemAdministratorsGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemAdministratorsGroup", 1)) + } + if s.OrganizationalUnitDistinguishedName != nil && len(*s.OrganizationalUnitDistinguishedName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationalUnitDistinguishedName", 1)) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDnsIps sets the DnsIps field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetDnsIps(v []*string) *SelfManagedActiveDirectoryConfiguration { + s.DnsIps = v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetDomainName(v string) *SelfManagedActiveDirectoryConfiguration { + s.DomainName = &v + return s +} + +// SetFileSystemAdministratorsGroup sets the FileSystemAdministratorsGroup field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetFileSystemAdministratorsGroup(v string) *SelfManagedActiveDirectoryConfiguration { + s.FileSystemAdministratorsGroup = &v + return s +} + +// SetOrganizationalUnitDistinguishedName sets the OrganizationalUnitDistinguishedName field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetOrganizationalUnitDistinguishedName(v string) *SelfManagedActiveDirectoryConfiguration { + s.OrganizationalUnitDistinguishedName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetPassword(v string) *SelfManagedActiveDirectoryConfiguration { + s.Password = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetUserName(v string) *SelfManagedActiveDirectoryConfiguration { + s.UserName = &v + return s +} + +// The configuration that Amazon FSx uses to join the Windows File Server instance +// to the self-managed Microsoft Active Directory (AD) directory. +type SelfManagedActiveDirectoryConfigurationUpdates struct { + _ struct{} `type:"structure"` + + // A list of up to two IP addresses of DNS servers or domain controllers in + // the self-managed AD directory. + DnsIps []*string `min:"1" type:"list"` + + // The password for the service account on your self-managed AD domain that + // Amazon FSx will use to join to your AD domain. + Password *string `min:"1" type:"string" sensitive:"true"` + + // The user name for the service account on your self-managed AD domain that + // Amazon FSx will use to join to your AD domain. This account must have the + // permission to join computers to the domain in the organizational unit provided + // in OrganizationalUnitDistinguishedName. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SelfManagedActiveDirectoryConfigurationUpdates) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelfManagedActiveDirectoryConfigurationUpdates) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelfManagedActiveDirectoryConfigurationUpdates) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelfManagedActiveDirectoryConfigurationUpdates"} + if s.DnsIps != nil && len(s.DnsIps) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DnsIps", 1)) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDnsIps sets the DnsIps field's value. +func (s *SelfManagedActiveDirectoryConfigurationUpdates) SetDnsIps(v []*string) *SelfManagedActiveDirectoryConfigurationUpdates { + s.DnsIps = v + return s +} + +// SetPassword sets the Password field's value. +func (s *SelfManagedActiveDirectoryConfigurationUpdates) SetPassword(v string) *SelfManagedActiveDirectoryConfigurationUpdates { + s.Password = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *SelfManagedActiveDirectoryConfigurationUpdates) SetUserName(v string) *SelfManagedActiveDirectoryConfigurationUpdates { + s.UserName = &v + return s +} + // Specifies a key-value pair for a resource tag. type Tag struct { _ struct{} `type:"structure"` @@ -3296,8 +3667,9 @@ type UpdateFileSystemInput struct { // UpdateFileSystem operation. LustreConfiguration *UpdateFileSystemLustreConfiguration `type:"structure"` - // The configuration for this Microsoft Windows file system. The only supported - // options are for backup and maintenance. + // The configuration update for this Microsoft Windows file system. The only + // supported options are for backup and maintenance and for self-managed Active + // Directory configuration. WindowsConfiguration *UpdateFileSystemWindowsConfiguration `type:"structure"` } @@ -3406,7 +3778,7 @@ func (s *UpdateFileSystemLustreConfiguration) SetWeeklyMaintenanceStartTime(v st type UpdateFileSystemOutput struct { _ struct{} `type:"structure"` - // A description of the file system. + // A description of the file system that was updated. FileSystem *FileSystem `type:"structure"` } @@ -3426,8 +3798,10 @@ func (s *UpdateFileSystemOutput) SetFileSystem(v *FileSystem) *UpdateFileSystemO return s } -// The configuration object for the Microsoft Windows file system used in the -// UpdateFileSystem operation. +// Updates the Microsoft Windows configuration for an existing Amazon FSx for +// Windows File Server file system. Amazon FSx overwrites existing properties +// with non-null values provided in the request. If you don't specify a non-null +// value for a property, that property is not updated. type UpdateFileSystemWindowsConfiguration struct { _ struct{} `type:"structure"` @@ -3438,6 +3812,10 @@ type UpdateFileSystemWindowsConfiguration struct { // The preferred time to take daily automatic backups, in the UTC time zone. DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + // The configuration Amazon FSx uses to join the Windows File Server instance + // to the self-managed Microsoft AD directory. + SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfigurationUpdates `type:"structure"` + // The preferred time to perform weekly maintenance, in the UTC time zone. WeeklyMaintenanceStartTime *string `min:"7" type:"string"` } @@ -3461,6 +3839,11 @@ func (s *UpdateFileSystemWindowsConfiguration) Validate() error { if s.WeeklyMaintenanceStartTime != nil && len(*s.WeeklyMaintenanceStartTime) < 7 { invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceStartTime", 7)) } + if s.SelfManagedActiveDirectoryConfiguration != nil { + if err := s.SelfManagedActiveDirectoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("SelfManagedActiveDirectoryConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3480,6 +3863,12 @@ func (s *UpdateFileSystemWindowsConfiguration) SetDailyAutomaticBackupStartTime( return s } +// SetSelfManagedActiveDirectoryConfiguration sets the SelfManagedActiveDirectoryConfiguration field's value. +func (s *UpdateFileSystemWindowsConfiguration) SetSelfManagedActiveDirectoryConfiguration(v *SelfManagedActiveDirectoryConfigurationUpdates) *UpdateFileSystemWindowsConfiguration { + s.SelfManagedActiveDirectoryConfiguration = v + return s +} + // SetWeeklyMaintenanceStartTime sets the WeeklyMaintenanceStartTime field's value. func (s *UpdateFileSystemWindowsConfiguration) SetWeeklyMaintenanceStartTime(v string) *UpdateFileSystemWindowsConfiguration { s.WeeklyMaintenanceStartTime = &v @@ -3511,6 +3900,10 @@ type WindowsFileSystemConfiguration struct { // The list of maintenance operations in progress for this file system. MaintenanceOperationsInProgress []*string `type:"list"` + // The configuration of the self-managed Microsoft Active Directory (AD) directory + // to which the Windows File Server instance is joined. + SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryAttributes `type:"structure"` + // The throughput of an Amazon FSx file system, measured in megabytes per second. ThroughputCapacity *int64 `min:"8" type:"integer"` @@ -3558,6 +3951,12 @@ func (s *WindowsFileSystemConfiguration) SetMaintenanceOperationsInProgress(v [] return s } +// SetSelfManagedActiveDirectoryConfiguration sets the SelfManagedActiveDirectoryConfiguration field's value. +func (s *WindowsFileSystemConfiguration) SetSelfManagedActiveDirectoryConfiguration(v *SelfManagedActiveDirectoryAttributes) *WindowsFileSystemConfiguration { + s.SelfManagedActiveDirectoryConfiguration = v + return s +} + // SetThroughputCapacity sets the ThroughputCapacity field's value. func (s *WindowsFileSystemConfiguration) SetThroughputCapacity(v int64) *WindowsFileSystemConfiguration { s.ThroughputCapacity = &v @@ -3627,6 +4026,12 @@ const ( // FileSystemLifecycleDeleting is a FileSystemLifecycle enum value FileSystemLifecycleDeleting = "DELETING" + + // FileSystemLifecycleMisconfigured is a FileSystemLifecycle enum value + FileSystemLifecycleMisconfigured = "MISCONFIGURED" + + // FileSystemLifecycleUpdating is a FileSystemLifecycle enum value + FileSystemLifecycleUpdating = "UPDATING" ) // An enumeration specifying the currently ongoing maintenance operation. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/errors.go index d207da38f0e..42a1e80601b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/errors.go @@ -109,4 +109,10 @@ const ( // An error indicating that a particular service limit was exceeded. You can // increase some service limits by contacting AWS Support. ErrCodeServiceLimitExceeded = "ServiceLimitExceeded" + + // ErrCodeUnsupportedOperation for service response error code + // "UnsupportedOperation". + // + // An error occured. + ErrCodeUnsupportedOperation = "UnsupportedOperation" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go index 544a82e1b07..0afe7ddcb61 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go @@ -46,11 +46,11 @@ const ( // svc := fsx.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *FSx { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *FSx { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *FSx { svc := &FSx{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-03-01", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go index 08d00aba78e..c29096f3f1b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go @@ -75,12 +75,20 @@ func (c *GameLift) AcceptMatchRequest(input *AcceptMatchInput) (req *request.Req // // If any player rejects the match, or if acceptances are not received before // a specified timeout, the proposed match is dropped. The matchmaking tickets -// are then handled in one of two ways: For tickets where all players accepted -// the match, the ticket status is returned to SEARCHING to find a new match. -// For tickets where one or more players failed to accept the match, the ticket -// status is set to FAILED, and processing is terminated. A new matchmaking +// are then handled in one of two ways: For tickets where one or more players +// rejected the match, the ticket status is returned to SEARCHING to find a +// new match. For tickets where one or more players failed to respond, the ticket +// status is set to CANCELLED, and processing is terminated. A new matchmaking // request for these players can be submitted as needed. // +// Learn more +// +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// +// FlexMatch Events Reference (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-events.html) +// +// Related operations +// // * StartMatchmaking // // * DescribeMatchmaking @@ -316,8 +324,8 @@ func (c *GameLift) CreateBuildRequest(input *CreateBuildInput) (req *request.Req // GameLift. // // To create new builds quickly and easily, use the AWS CLI command upload-build -// (https://docs.aws.amazon.com/cli/latest/reference/gamelift/upload-build.html). -// This helper command uploads your build and creates a new build record in +// (https://docs.aws.amazon.com/cli/latest/reference/gamelift/upload-build.html) +// . This helper command uploads your build and creates a new build record in // one step, and automatically handles the necessary permissions. // // The CreateBuild operation should be used only when you need to manually upload @@ -346,7 +354,7 @@ func (c *GameLift) CreateBuildRequest(input *CreateBuildInput) (req *request.Req // // Uploading Your Game (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) // -// Create a Build with Files in Amazon S3 (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build) +// Create a Build with Files in Amazon S3 (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build) // // Related operations // @@ -465,7 +473,7 @@ func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Req // instance in the fleet. // // When creating a Realtime Servers fleet, we recommend using a minimal version -// of the Realtime script (see this working code example (https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-script.html#realtime-script-examples)). +// of the Realtime script (see this working code example (https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-script.html#realtime-script-examples)). // This will make it much easier to troubleshoot any fleet creation issues. // Once the fleet is active, you can update your Realtime script as needed. // @@ -476,10 +484,9 @@ func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Req // * Creates a fleet record. Status: NEW. // // * Begins writing events to the fleet event log, which can be accessed -// in the Amazon GameLift console. -// -// Sets the fleet's target capacity to 1 (desired instances), which triggers -// Amazon GameLift to start one new EC2 instance. +// in the Amazon GameLift console. Sets the fleet's target capacity to 1 +// (desired instances), which triggers Amazon GameLift to start one new EC2 +// instance. // // * Downloads the game build or Realtime script to the new instance and // installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING. @@ -493,9 +500,9 @@ func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Req // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // -// Debug Fleet Creation Issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html) +// Debug Fleet Creation Issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html) // // Related operations // @@ -505,37 +512,14 @@ func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Req // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// * Manage fleet actions: +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -679,13 +663,8 @@ func (c *GameLift) CreateGameSessionRequest(input *CreateGameSessionInput) (req // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -948,21 +927,21 @@ func (c *GameLift) CreateMatchmakingConfigurationRequest(input *CreateMatchmakin // game session for the match; and the maximum time allowed for a matchmaking // attempt. // -// Player acceptance -- In each configuration, you have the option to require -// that all players accept participation in a proposed match. To enable this -// feature, set AcceptanceRequired to true and specify a time limit for player -// acceptance. Players have the option to accept or reject a proposed match, -// and a match does not move ahead to game session placement unless all matched -// players accept. -// -// Matchmaking status notification -- There are two ways to track the progress -// of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; -// or (2) receiving notifications with Amazon Simple Notification Service (SNS). -// To use notifications, you first need to set up an SNS topic to receive the -// notifications, and provide the topic ARN in the matchmaking configuration -// (see Setting up Notifications for Matchmaking (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html)). -// Since notifications promise only "best effort" delivery, we recommend calling -// DescribeMatchmaking if no notifications are received within 30 seconds. +// There are two ways to track the progress of matchmaking tickets: (1) polling +// ticket status with DescribeMatchmaking; or (2) receiving notifications with +// Amazon Simple Notification Service (SNS). To use notifications, you first +// need to set up an SNS topic to receive the notifications, and provide the +// topic ARN in the matchmaking configuration. Since notifications promise only +// "best effort" delivery, we recommend calling DescribeMatchmaking if no notifications +// are received within 30 seconds. +// +// Learn more +// +// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) +// +// Setting up Notifications for Matchmaking (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// +// Related operations // // * CreateMatchmakingConfiguration // @@ -1081,7 +1060,7 @@ func (c *GameLift) CreateMatchmakingRuleSetRequest(input *CreateMatchmakingRuleS // // To create a matchmaking rule set, provide unique rule set name and the rule // set body in JSON format. Rule sets must be defined in the same region as -// the matchmaking configuration they will be used with. +// the matchmaking configuration they are used with. // // Since matchmaking rule sets cannot be edited, it is a good idea to check // the rule set syntax using ValidateMatchmakingRuleSet before creating a new @@ -1220,13 +1199,8 @@ func (c *GameLift) CreatePlayerSessionRequest(input *CreatePlayerSessionInput) ( // // * DescribePlayerSessions // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1355,13 +1329,8 @@ func (c *GameLift) CreatePlayerSessionsRequest(input *CreatePlayerSessionsInput) // // * DescribePlayerSessions // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1976,7 +1945,7 @@ func (c *GameLift) DeleteBuildRequest(input *DeleteBuildInput) (req *request.Req // // Learn more // -// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) +// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) // // Related operations // @@ -2084,12 +2053,17 @@ func (c *GameLift) DeleteFleetRequest(input *DeleteFleetInput) (req *request.Req // Deletes everything related to a fleet. Before deleting a fleet, you must // set the fleet's desired capacity to zero. See UpdateFleetCapacity. // +// If the fleet being deleted has a VPC peering connection, you first need to +// get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. +// You do not need to explicitly delete the VPC peering connection--this is +// done as part of the delete fleet process. +// // This action removes the fleet's resources and the fleet record. Once a fleet // is deleted, you can no longer use that fleet. // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -2099,37 +2073,14 @@ func (c *GameLift) DeleteFleetRequest(input *DeleteFleetInput) (req *request.Req // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// DescribeFleetPortSettings +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2334,6 +2285,8 @@ func (c *GameLift) DeleteMatchmakingConfigurationRequest(input *DeleteMatchmakin // the configuration name. A matchmaking configuration cannot be deleted if // it is being used in any active matchmaking tickets. // +// Related operations +// // * CreateMatchmakingConfiguration // // * DescribeMatchmakingConfigurations @@ -2571,19 +2524,10 @@ func (c *GameLift) DeleteScalingPolicyRequest(input *DeleteScalingPolicyInput) ( // // * DescribeEC2InstanceLimits // -// * Manage scaling policies: -// -// PutScalingPolicy (auto-scaling) -// -// DescribeScalingPolicies (auto-scaling) +// * Manage scaling policies: PutScalingPolicy (auto-scaling) DescribeScalingPolicies +// (auto-scaling) DeleteScalingPolicy (auto-scaling) // -// DeleteScalingPolicy (auto-scaling) -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2793,9 +2737,8 @@ func (c *GameLift) DeleteVpcPeeringAuthorizationRequest(input *DeleteVpcPeeringA // DeleteVpcPeeringAuthorization API operation for Amazon GameLift. // -// Cancels a pending VPC peering authorization for the specified VPC. If the -// authorization has already been used to create a peering connection, call -// DeleteVpcPeeringConnection to remove the connection. +// Cancels a pending VPC peering authorization for the specified VPC. If you +// need to delete an existing VPC peering connection, call DeleteVpcPeeringConnection. // // * CreateVpcPeeringAuthorization // @@ -3125,7 +3068,7 @@ func (c *GameLift) DescribeBuildRequest(input *DescribeBuildInput) (req *request // // Learn more // -// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) +// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) // // Related operations // @@ -3241,7 +3184,7 @@ func (c *GameLift) DescribeEC2InstanceLimitsRequest(input *DescribeEC2InstanceLi // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -3251,37 +3194,14 @@ func (c *GameLift) DescribeEC2InstanceLimitsRequest(input *DescribeEC2InstanceLi // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// DescribeFleetCapacity +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3383,7 +3303,7 @@ func (c *GameLift) DescribeFleetAttributesRequest(input *DescribeFleetAttributes // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -3393,37 +3313,14 @@ func (c *GameLift) DescribeFleetAttributesRequest(input *DescribeFleetAttributes // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// DescribeFleetEvents +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3530,7 +3427,7 @@ func (c *GameLift) DescribeFleetCapacityRequest(input *DescribeFleetCapacityInpu // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -3540,37 +3437,14 @@ func (c *GameLift) DescribeFleetCapacityRequest(input *DescribeFleetCapacityInpu // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// * Manage fleet actions: +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3669,7 +3543,7 @@ func (c *GameLift) DescribeFleetEventsRequest(input *DescribeFleetEventsInput) ( // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -3679,37 +3553,14 @@ func (c *GameLift) DescribeFleetEventsRequest(input *DescribeFleetEventsInput) ( // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// UpdateFleetAttributes +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3810,7 +3661,7 @@ func (c *GameLift) DescribeFleetPortSettingsRequest(input *DescribeFleetPortSett // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -3820,37 +3671,14 @@ func (c *GameLift) DescribeFleetPortSettingsRequest(input *DescribeFleetPortSett // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// DescribeRuntimeConfiguration +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3955,7 +3783,7 @@ func (c *GameLift) DescribeFleetUtilizationRequest(input *DescribeFleetUtilizati // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -3965,37 +3793,14 @@ func (c *GameLift) DescribeFleetUtilizationRequest(input *DescribeFleetUtilizati // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// UpdateFleetPortSettings +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4111,13 +3916,8 @@ func (c *GameLift) DescribeGameSessionDetailsRequest(input *DescribeGameSessionD // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4232,13 +4032,8 @@ func (c *GameLift) DescribeGameSessionPlacementRequest(input *DescribeGameSessio // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4459,13 +4254,8 @@ func (c *GameLift) DescribeGameSessionsRequest(input *DescribeGameSessionsInput) // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4676,6 +4466,14 @@ func (c *GameLift) DescribeMatchmakingRequest(input *DescribeMatchmakingInput) ( // the request is successful, a ticket object is returned for each requested // ID that currently exists. // +// Learn more +// +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguidematch-notification.html) +// +// Related operations +// // * StartMatchmaking // // * DescribeMatchmaking @@ -4772,7 +4570,7 @@ func (c *GameLift) DescribeMatchmakingConfigurationsRequest(input *DescribeMatch // DescribeMatchmakingConfigurations API operation for Amazon GameLift. // -// Retrieves the details of FlexMatch matchmaking configurations. with this +// Retrieves the details of FlexMatch matchmaking configurations. With this // operation, you have the following options: (1) retrieve all existing configurations, // (2) provide the names of one or more configurations to retrieve, or (3) retrieve // all configurations that use a specified rule set name. When requesting multiple @@ -4781,6 +4579,12 @@ func (c *GameLift) DescribeMatchmakingConfigurationsRequest(input *DescribeMatch // When specifying a list of names, only configurations that currently exist // are returned. // +// Learn more +// +// Setting Up FlexMatch Matchmakers (https://docs.aws.amazon.com/gamelift/latest/developerguide/matchmaker-build.html) +// +// Related operations +// // * CreateMatchmakingConfiguration // // * DescribeMatchmakingConfigurations @@ -5021,13 +4825,8 @@ func (c *GameLift) DescribePlayerSessionsRequest(input *DescribePlayerSessionsIn // // * DescribePlayerSessions // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5125,7 +4924,7 @@ func (c *GameLift) DescribeRuntimeConfigurationRequest(input *DescribeRuntimeCon // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -5135,37 +4934,14 @@ func (c *GameLift) DescribeRuntimeConfigurationRequest(input *DescribeRuntimeCon // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// UpdateRuntimeConfiguration +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5275,19 +5051,10 @@ func (c *GameLift) DescribeScalingPoliciesRequest(input *DescribeScalingPolicies // // * DescribeEC2InstanceLimits // -// * Manage scaling policies: -// -// PutScalingPolicy (auto-scaling) -// -// DescribeScalingPolicies (auto-scaling) -// -// DeleteScalingPolicy (auto-scaling) +// * Manage scaling policies: PutScalingPolicy (auto-scaling) DescribeScalingPolicies +// (auto-scaling) DeleteScalingPolicy (auto-scaling) // -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5724,13 +5491,8 @@ func (c *GameLift) GetGameSessionLogUrlRequest(input *GetGameSessionLogUrlInput) // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6043,7 +5805,7 @@ func (c *GameLift) ListBuildsRequest(input *ListBuildsInput) (req *request.Reque // // Learn more // -// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) +// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) // // Related operations // @@ -6152,7 +5914,7 @@ func (c *GameLift) ListFleetsRequest(input *ListFleetsInput) (req *request.Reque // // Learn more // -// Set Up Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Set Up Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -6162,37 +5924,14 @@ func (c *GameLift) ListFleetsRequest(input *ListFleetsInput) (req *request.Reque // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// UpdateFleetCapacity +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6449,12 +6188,12 @@ func (c *GameLift) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *r // // A policy's rule statement has the following structure: // -// If [MetricName] is [ComparisonOperator][Threshold] for [EvaluationPeriods] +// If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] // minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment]. // // To implement the example, the rule statement would look like this: // -// If [PercentIdleInstances] is [GreaterThanThreshold][20] for [15] minutes, +// If [PercentIdleInstances] is [GreaterThanThreshold] [20] for [15] minutes, // then [PercentChangeInCapacity] to/by [10]. // // To create or update a scaling policy, specify a unique combination of name @@ -6470,19 +6209,10 @@ func (c *GameLift) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *r // // * DescribeEC2InstanceLimits // -// * Manage scaling policies: -// -// PutScalingPolicy (auto-scaling) +// * Manage scaling policies: PutScalingPolicy (auto-scaling) DescribeScalingPolicies +// (auto-scaling) DeleteScalingPolicy (auto-scaling) // -// DescribeScalingPolicies (auto-scaling) -// -// DeleteScalingPolicy (auto-scaling) -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6861,13 +6591,8 @@ func (c *GameLift) SearchGameSessionsRequest(input *SearchGameSessionsInput) (re // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6978,7 +6703,7 @@ func (c *GameLift) StartFleetActionsRequest(input *StartFleetActionsInput) (req // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -6988,37 +6713,14 @@ func (c *GameLift) StartFleetActionsRequest(input *StartFleetActionsInput) (req // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// * Update fleets: +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7165,13 +6867,8 @@ func (c *GameLift) StartGameSessionPlacementRequest(input *StartGameSessionPlace // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7276,8 +6973,7 @@ func (c *GameLift) StartMatchBackfillRequest(input *StartMatchBackfillInput) (re // all current players in the game session. If successful, a match backfill // ticket is created and returned with status set to QUEUED. The ticket is placed // in the matchmaker's ticket pool and processed. Track the status of the ticket -// to respond as needed. For more detail how to set up backfilling, see Backfill -// Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). +// to respond as needed. // // The process of finding backfill matches is essentially identical to the initial // matchmaking process. The matchmaker searches the pool and groups tickets @@ -7286,8 +6982,16 @@ func (c *GameLift) StartMatchBackfillRequest(input *StartMatchBackfillInput) (re // sessions for the new players. All tickets in the match are updated with the // game session's connection information, and the GameSession object is updated // to include matchmaker data on the new players. For more detail on how match -// backfill requests are processed, see How Amazon GameLift FlexMatch Works -// (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html). +// backfill requests are processed, see How Amazon GameLift FlexMatch Works +// (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html). +// +// Learn more +// +// Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html) +// +// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html) +// +// Related operations // // * StartMatchmaking // @@ -7397,9 +7101,7 @@ func (c *GameLift) StartMatchmakingRequest(input *StartMatchmakingInput) (req *r // A matchmaking request might start with a single player or a group of players // who want to play together. FlexMatch finds additional players as needed to // fill the match. Match type, rules, and the queue used to place a new game -// session are defined in a MatchmakingConfiguration. For complete information -// on setting up and using FlexMatch, see the topic Adding FlexMatch to Your -// Game (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html). +// session are defined in a MatchmakingConfiguration. // // To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, // and include the players to be matched. You must also include a set of player @@ -7451,6 +7153,18 @@ func (c *GameLift) StartMatchmakingRequest(input *StartMatchmakingInput) (req *r // and player session) is added to the matchmaking tickets. Matched players // can use the connection information to join the game. // +// Learn more +// +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// +// FlexMatch Integration Roadmap (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-tasks.html) +// +// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html) +// +// Related operations +// // * StartMatchmaking // // * DescribeMatchmaking @@ -7565,7 +7279,7 @@ func (c *GameLift) StopFleetActionsRequest(input *StopFleetActionsInput) (req *r // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -7575,37 +7289,14 @@ func (c *GameLift) StopFleetActionsRequest(input *StopFleetActionsInput) (req *r // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// DescribeEC2InstanceLimits +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7713,13 +7404,8 @@ func (c *GameLift) StopGameSessionPlacementRequest(input *StopGameSessionPlaceme // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7812,9 +7498,23 @@ func (c *GameLift) StopMatchmakingRequest(input *StopMatchmakingInput) (req *req // StopMatchmaking API operation for Amazon GameLift. // -// Cancels a matchmaking ticket that is currently being processed. To stop the -// matchmaking operation, specify the ticket ID. If successful, work on the -// ticket is stopped, and the ticket status is changed to CANCELLED. +// Cancels a matchmaking ticket or match backfill ticket that is currently being +// processed. To stop the matchmaking operation, specify the ticket ID. If successful, +// work on the ticket is stopped, and the ticket status is changed to CANCELLED. +// +// This call is also used to turn off automatic backfill for an individual game +// session. This is for game sessions that are created with a matchmaking configuration +// that has automatic backfill enabled. The ticket ID is included in the MatchmakerData +// of an updated game session object, which is provided to the game server. +// +// If the action is successful, the service sends back an empty JSON struct +// with the HTTP 200 response (not an empty HTTP body). +// +// Learn more +// +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// +// Related operations // // * StartMatchmaking // @@ -8030,7 +7730,7 @@ func (c *GameLift) UpdateBuildRequest(input *UpdateBuildInput) (req *request.Req // // Learn more // -// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) +// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) // // Related operations // @@ -8140,7 +7840,7 @@ func (c *GameLift) UpdateFleetAttributesRequest(input *UpdateFleetAttributesInpu // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -8150,37 +7850,14 @@ func (c *GameLift) UpdateFleetAttributesRequest(input *UpdateFleetAttributesInpu // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// UpdateFleetCapacity +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8305,7 +7982,7 @@ func (c *GameLift) UpdateFleetCapacityRequest(input *UpdateFleetCapacityInput) ( // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -8315,37 +7992,14 @@ func (c *GameLift) UpdateFleetCapacityRequest(input *UpdateFleetCapacityInput) ( // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// DescribeFleetEvents +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8460,7 +8114,7 @@ func (c *GameLift) UpdateFleetPortSettingsRequest(input *UpdateFleetPortSettings // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -8470,37 +8124,14 @@ func (c *GameLift) UpdateFleetPortSettingsRequest(input *UpdateFleetPortSettings // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// DescribeFleetPortSettings +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8626,13 +8257,8 @@ func (c *GameLift) UpdateGameSessionRequest(input *UpdateGameSessionInput) (req // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8837,8 +8463,16 @@ func (c *GameLift) UpdateMatchmakingConfigurationRequest(input *UpdateMatchmakin // UpdateMatchmakingConfiguration API operation for Amazon GameLift. // -// Updates settings for a FlexMatch matchmaking configuration. To update settings, -// specify the configuration name to be updated and provide the new settings. +// Updates settings for a FlexMatch matchmaking configuration. These changes +// affect all matches and game sessions that are created after the update. To +// update settings, specify the configuration name to be updated and provide +// the new settings. +// +// Learn more +// +// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) +// +// Related operations // // * CreateMatchmakingConfiguration // @@ -8963,7 +8597,7 @@ func (c *GameLift) UpdateRuntimeConfigurationRequest(input *UpdateRuntimeConfigu // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). // // Related operations // @@ -8973,37 +8607,14 @@ func (c *GameLift) UpdateRuntimeConfigurationRequest(input *UpdateRuntimeConfigu // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// * Update fleets: +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9309,7 +8920,7 @@ type AcceptMatchInput struct { // REQUIRES_ACCEPTANCE; otherwise this request will fail. // // TicketId is a required field - TicketId *string `min:"1" type:"string" required:"true"` + TicketId *string `type:"string" required:"true"` } // String returns the string representation @@ -9334,9 +8945,6 @@ func (s *AcceptMatchInput) Validate() error { if s.TicketId == nil { invalidParams.Add(request.NewErrParamRequired("TicketId")) } - if s.TicketId != nil && len(*s.TicketId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -9471,7 +9079,7 @@ func (s *Alias) SetRoutingStrategy(v *RoutingStrategy) *Alias { // Values for use in Player attribute key:value pairs. This object lets you // specify an attribute value using any of the valid data types: string, number, -// string array or data map. Each AttributeValue object can use only one of +// string array, or data map. Each AttributeValue object can use only one of // the available properties. type AttributeValue struct { _ struct{} `type:"structure"` @@ -9692,6 +9300,42 @@ func (s *Build) SetVersion(v string) *Build { return s } +type CertificateConfiguration struct { + _ struct{} `type:"structure"` + + // CertificateType is a required field + CertificateType *string `type:"string" required:"true" enum:"CertificateType"` +} + +// String returns the string representation +func (s CertificateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CertificateConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CertificateConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CertificateConfiguration"} + if s.CertificateType == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateType sets the CertificateType field's value. +func (s *CertificateConfiguration) SetCertificateType(v string) *CertificateConfiguration { + s.CertificateType = &v + return s +} + // Represents the input for a request action. type CreateAliasInput struct { _ struct{} `type:"structure"` @@ -9924,6 +9568,8 @@ type CreateFleetInput struct { // is created. BuildId *string `type:"string"` + CertificateConfiguration *CertificateConfiguration `type:"structure"` + // Human-readable description of a fleet. Description *string `min:"1" type:"string"` @@ -9947,7 +9593,7 @@ type CreateFleetInput struct { // Indicates whether to use on-demand instances or spot instances for this fleet. // If empty, the default is ON_DEMAND. Both categories of instances use identical // hardware and configurations based on the instance type selected for this - // fleet. Learn more about On-Demand versus Spot Instances (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot). + // fleet. Learn more about On-Demand versus Spot Instances (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot). FleetType *string `type:"string" enum:"FleetType"` // Unique identifier for an AWS IAM role that manages access to your AWS services. @@ -9956,7 +9602,7 @@ type CreateFleetInput struct { // daemons (background processes). Create a role or look up a role's ARN using // the IAM dashboard (https://console.aws.amazon.com/iam/) in the AWS Management // Console. Learn more about using on-box credentials for your game servers - // at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). + // at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). InstanceRoleArn *string `min:"1" type:"string"` // This parameter is no longer used. Instead, to specify where Amazon GameLift @@ -10073,6 +9719,11 @@ func (s *CreateFleetInput) Validate() error { if s.ServerLaunchPath != nil && len(*s.ServerLaunchPath) < 1 { invalidParams.Add(request.NewErrParamMinLen("ServerLaunchPath", 1)) } + if s.CertificateConfiguration != nil { + if err := s.CertificateConfiguration.Validate(); err != nil { + invalidParams.AddNested("CertificateConfiguration", err.(request.ErrInvalidParams)) + } + } if s.EC2InboundPermissions != nil { for i, v := range s.EC2InboundPermissions { if v == nil { @@ -10101,6 +9752,12 @@ func (s *CreateFleetInput) SetBuildId(v string) *CreateFleetInput { return s } +// SetCertificateConfiguration sets the CertificateConfiguration field's value. +func (s *CreateFleetInput) SetCertificateConfiguration(v *CertificateConfiguration) *CreateFleetInput { + s.CertificateConfiguration = v + return s +} + // SetDescription sets the Description field's value. func (s *CreateFleetInput) SetDescription(v string) *CreateFleetInput { s.Description = &v @@ -10524,7 +10181,7 @@ func (s *CreateGameSessionQueueOutput) SetGameSessionQueue(v *GameSessionQueue) type CreateMatchmakingConfigurationInput struct { _ struct{} `type:"structure"` - // Flag that determines whether or not a match that was created with this configuration + // Flag that determines whether a match that was created with this configuration // must be accepted by the matched players. To require acceptance, set to TRUE. // // AcceptanceRequired is a required field @@ -10541,7 +10198,15 @@ type CreateMatchmakingConfigurationInput struct { // for the match. AdditionalPlayerCount *int64 `type:"integer"` - // Information to attached to all events related to the matchmaking configuration. + // Method used to backfill game sessions created with this matchmaking configuration. + // Specify MANUAL when your game manages backfill requests manually or does + // not use the match backfill feature. Specify AUTOMATIC to have GameLift create + // a StartMatchBackfill request whenever a game session has one or more open + // slots. Learn more about manual and automatic backfill in Backfill Existing + // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). + BackfillMode *string `type:"string" enum:"BackfillMode"` + + // Information to be added to all events related to this matchmaking configuration. CustomEventData *string `type:"string"` // Meaningful description of the matchmaking configuration. @@ -10563,7 +10228,7 @@ type CreateMatchmakingConfigurationInput struct { // Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) // that is assigned to a game session queue and uniquely identifies it. Format - // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. + // is arn:aws:gamelift:::gamesessionqueue/. // These queues are used when placing game sessions for matches that are created // with this matchmaking configuration. Queues can be located in any region. // @@ -10574,13 +10239,14 @@ type CreateMatchmakingConfigurationInput struct { // the configuration associated with a matchmaking request or ticket. // // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + Name *string `type:"string" required:"true"` // SNS topic ARN that is set up to receive matchmaking notifications. NotificationTarget *string `type:"string"` // Maximum duration, in seconds, that a matchmaking ticket can remain in process - // before timing out. Requests that time out can be resubmitted as needed. + // before timing out. Requests that fail due to timing out can be resubmitted + // as needed. // // RequestTimeoutSeconds is a required field RequestTimeoutSeconds *int64 `min:"1" type:"integer" required:"true"` @@ -10590,7 +10256,7 @@ type CreateMatchmakingConfigurationInput struct { // same region. // // RuleSetName is a required field - RuleSetName *string `min:"1" type:"string" required:"true"` + RuleSetName *string `type:"string" required:"true"` } // String returns the string representation @@ -10624,9 +10290,6 @@ func (s *CreateMatchmakingConfigurationInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } if s.RequestTimeoutSeconds == nil { invalidParams.Add(request.NewErrParamRequired("RequestTimeoutSeconds")) } @@ -10636,9 +10299,6 @@ func (s *CreateMatchmakingConfigurationInput) Validate() error { if s.RuleSetName == nil { invalidParams.Add(request.NewErrParamRequired("RuleSetName")) } - if s.RuleSetName != nil && len(*s.RuleSetName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleSetName", 1)) - } if s.GameProperties != nil { for i, v := range s.GameProperties { if v == nil { @@ -10674,6 +10334,12 @@ func (s *CreateMatchmakingConfigurationInput) SetAdditionalPlayerCount(v int64) return s } +// SetBackfillMode sets the BackfillMode field's value. +func (s *CreateMatchmakingConfigurationInput) SetBackfillMode(v string) *CreateMatchmakingConfigurationInput { + s.BackfillMode = &v + return s +} + // SetCustomEventData sets the CustomEventData field's value. func (s *CreateMatchmakingConfigurationInput) SetCustomEventData(v string) *CreateMatchmakingConfigurationInput { s.CustomEventData = &v @@ -10761,10 +10427,10 @@ type CreateMatchmakingRuleSetInput struct { // is different from the optional "name" field in the rule set body.) // // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + Name *string `type:"string" required:"true"` - // Collection of matchmaking rules, formatted as a JSON string. Note that comments - // are not allowed in JSON, but most elements support a description field. + // Collection of matchmaking rules, formatted as a JSON string. Comments are + // not allowed in JSON, but most elements support a description field. // // RuleSetBody is a required field RuleSetBody *string `min:"1" type:"string" required:"true"` @@ -10786,9 +10452,6 @@ func (s *CreateMatchmakingRuleSetInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } if s.RuleSetBody == nil { invalidParams.Add(request.NewErrParamRequired("RuleSetBody")) } @@ -11553,7 +11216,7 @@ type DeleteMatchmakingConfigurationInput struct { // Unique identifier for a matchmaking configuration // // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + Name *string `type:"string" required:"true"` } // String returns the string representation @@ -11572,9 +11235,6 @@ func (s *DeleteMatchmakingConfigurationInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -11610,7 +11270,7 @@ type DeleteMatchmakingRuleSetInput struct { // set name is different from the optional "name" field in the rule set body.) // // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + Name *string `type:"string" required:"true"` } // String returns the string representation @@ -11629,9 +11289,6 @@ func (s *DeleteMatchmakingRuleSetInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -13172,7 +12829,7 @@ type DescribeMatchmakingConfigurationsInput struct { // Unique identifier for a matchmaking rule set. Use this parameter to retrieve // all matchmaking configurations that use this rule set. - RuleSetName *string `min:"1" type:"string"` + RuleSetName *string `type:"string"` } // String returns the string representation @@ -13194,9 +12851,6 @@ func (s *DescribeMatchmakingConfigurationsInput) Validate() error { if s.NextToken != nil && len(*s.NextToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } - if s.RuleSetName != nil && len(*s.RuleSetName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleSetName", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -13977,37 +13631,14 @@ func (s *DesiredPlayerSession) SetPlayerId(v string) *DesiredPlayerSession { // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// StartFleetActions +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions type EC2InstanceCounts struct { _ struct{} `type:"structure"` @@ -14151,7 +13782,7 @@ type Event struct { // * FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. // The compressed build has started downloading to a fleet instance for installation. // - // * FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the + // * FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the // fleet instance. // // * FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully @@ -14179,7 +13810,7 @@ type Event struct { // // * FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. // - // * FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the run-time + // * FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the run-time // configuration failed because the executable specified in a launch path // does not exist on the instance. // @@ -14191,8 +13822,7 @@ type Event struct { // // * FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. // - // - // * FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete + // * FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete // one of the steps in the fleet activation process. This event code indicates // that the game build was successfully downloaded to a fleet instance, built, // and validated, but was not able to start a server process. A possible @@ -14220,7 +13850,7 @@ type Event struct { // // Spot instance events: // - // * INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with + // * INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with // a two-minute notification. // // Other fleet events: @@ -14235,7 +13865,7 @@ type Event struct { // // * FLEET_DELETED -- A request to delete a fleet was initiated. // - // * GENERIC_EVENT -- An unspecified event has occurred. + // * GENERIC_EVENT -- An unspecified event has occurred. EventCode *string `type:"string" enum:"EventCode"` // Unique identifier for a fleet event. @@ -14311,43 +13941,22 @@ func (s *Event) SetResourceId(v string) *Event { // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// * Update fleets: +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions type FleetAttributes struct { _ struct{} `type:"structure"` // Unique identifier for a build. BuildId *string `type:"string"` + CertificateConfiguration *CertificateConfiguration `type:"structure"` + // Time stamp indicating when this data object was created. Format is a number // expressed in Unix time as milliseconds (for example "1469498468.057"). CreationTime *time.Time `type:"timestamp"` @@ -14371,7 +13980,7 @@ type FleetAttributes struct { // daemons (background processes). Create a role or look up a role's ARN using // the IAM dashboard (https://console.aws.amazon.com/iam/) in the AWS Management // Console. Learn more about using on-box credentials for your game servers - // at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). + // at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). InstanceRoleArn *string `min:"1" type:"string"` // EC2 instance type indicating the computing resources of each instance in @@ -14478,6 +14087,12 @@ func (s *FleetAttributes) SetBuildId(v string) *FleetAttributes { return s } +// SetCertificateConfiguration sets the CertificateConfiguration field's value. +func (s *FleetAttributes) SetCertificateConfiguration(v *CertificateConfiguration) *FleetAttributes { + s.CertificateConfiguration = v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *FleetAttributes) SetCreationTime(v time.Time) *FleetAttributes { s.CreationTime = &v @@ -14603,37 +14218,14 @@ func (s *FleetAttributes) SetTerminationTime(v time.Time) *FleetAttributes { // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// DescribeFleetEvents +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions type FleetCapacity struct { _ struct{} `type:"structure"` @@ -14688,37 +14280,14 @@ func (s *FleetCapacity) SetInstanceType(v string) *FleetCapacity { // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// DescribeFleetCapacity +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity -// -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions type FleetUtilization struct { _ struct{} `type:"structure"` @@ -14787,8 +14356,8 @@ func (s *FleetUtilization) SetMaximumPlayerSessionCount(v int64) *FleetUtilizati // to be used when setting up the new game session, such as to specify a game // mode, level, or map. Game properties are passed to the game server process // when initiating a new game session; the server process uses the properties -// as appropriate. For more information, see the Amazon GameLift Developer -// Guide (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-client-api.html#gamelift-sdk-client-api-create). +// as appropriate. For more information, see the Amazon GameLift Developer Guide +// (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-client-api.html#gamelift-sdk-client-api-create). type GameProperty struct { _ struct{} `type:"structure"` @@ -14862,13 +14431,8 @@ func (s *GameProperty) SetValue(v string) *GameProperty { // // * GetGameSessionLogUrl // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement type GameSession struct { _ struct{} `type:"structure"` @@ -14884,6 +14448,8 @@ type GameSession struct { // Number of players currently in the game session. CurrentPlayerSessionCount *int64 `type:"integer"` + DnsName *string `type:"string"` + // Unique identifier for a fleet that the game session is running on. FleetId *string `type:"string"` @@ -14973,6 +14539,12 @@ func (s *GameSession) SetCurrentPlayerSessionCount(v int64) *GameSession { return s } +// SetDnsName sets the DnsName field's value. +func (s *GameSession) SetDnsName(v string) *GameSession { + s.DnsName = &v + return s +} + // SetFleetId sets the FleetId field's value. func (s *GameSession) SetFleetId(v string) *GameSession { s.FleetId = &v @@ -15060,6 +14632,8 @@ func (s *GameSession) SetTerminationTime(v time.Time) *GameSession { type GameSessionConnectionInfo struct { _ struct{} `type:"structure"` + DnsName *string `type:"string"` + // Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) // that is assigned to a game session and uniquely identifies it. GameSessionArn *string `min:"1" type:"string"` @@ -15087,6 +14661,12 @@ func (s GameSessionConnectionInfo) GoString() string { return s.String() } +// SetDnsName sets the DnsName field's value. +func (s *GameSessionConnectionInfo) SetDnsName(v string) *GameSessionConnectionInfo { + s.DnsName = &v + return s +} + // SetGameSessionArn sets the GameSessionArn field's value. func (s *GameSessionConnectionInfo) SetGameSessionArn(v string) *GameSessionConnectionInfo { s.GameSessionArn = &v @@ -15164,6 +14744,8 @@ func (s *GameSessionDetail) SetProtectionPolicy(v string) *GameSessionDetail { type GameSessionPlacement struct { _ struct{} `type:"structure"` + DnsName *string `type:"string"` + // Time stamp indicating when this request was completed, canceled, or timed // out. EndTime *time.Time `type:"timestamp"` @@ -15267,6 +14849,12 @@ func (s GameSessionPlacement) GoString() string { return s.String() } +// SetDnsName sets the DnsName field's value. +func (s *GameSessionPlacement) SetDnsName(v string) *GameSessionPlacement { + s.DnsName = &v + return s +} + // SetEndTime sets the EndTime field's value. func (s *GameSessionPlacement) SetEndTime(v time.Time) *GameSessionPlacement { s.EndTime = &v @@ -15403,7 +14991,7 @@ type GameSessionQueue struct { // Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) // that is assigned to a game session queue and uniquely identifies it. Format - // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. + // is arn:aws:gamelift:::gamesessionqueue/. GameSessionQueueArn *string `min:"1" type:"string"` // Descriptive label that is associated with game session queue. Queue names @@ -15675,6 +15263,8 @@ type Instance struct { // expressed in Unix time as milliseconds (for example "1469498468.057"). CreationTime *time.Time `type:"timestamp"` + DnsName *string `type:"string"` + // Unique identifier for a fleet that the instance is in. FleetId *string `type:"string"` @@ -15722,6 +15312,12 @@ func (s *Instance) SetCreationTime(v time.Time) *Instance { return s } +// SetDnsName sets the DnsName field's value. +func (s *Instance) SetDnsName(v string) *Instance { + s.DnsName = &v + return s +} + // SetFleetId sets the FleetId field's value. func (s *Instance) SetFleetId(v string) *Instance { s.FleetId = &v @@ -16421,7 +16017,7 @@ func (s *MatchedPlayerSession) SetPlayerSessionId(v string) *MatchedPlayerSessio type MatchmakingConfiguration struct { _ struct{} `type:"structure"` - // Flag that determines whether or not a match that was created with this configuration + // Flag that determines whether a match that was created with this configuration // must be accepted by the matched players. To require acceptance, set to TRUE. AcceptanceRequired *bool `type:"boolean"` @@ -16436,11 +16032,18 @@ type MatchmakingConfiguration struct { // for the match. AdditionalPlayerCount *int64 `type:"integer"` + // Method used to backfill game sessions created with this matchmaking configuration. + // MANUAL indicates that the game makes backfill requests or does not use the + // match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill + // requests whenever a game session has one or more open slots. Learn more about + // manual and automatic backfill in Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). + BackfillMode *string `type:"string" enum:"BackfillMode"` + // Time stamp indicating when this data object was created. Format is a number // expressed in Unix time as milliseconds (for example "1469498468.057"). CreationTime *time.Time `type:"timestamp"` - // Information to attached to all events related to the matchmaking configuration. + // Information to attach to all events related to the matchmaking configuration. CustomEventData *string `type:"string"` // Descriptive label that is associated with matchmaking configuration. @@ -16462,26 +16065,27 @@ type MatchmakingConfiguration struct { // Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) // that is assigned to a game session queue and uniquely identifies it. Format - // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. + // is arn:aws:gamelift:::gamesessionqueue/. // These queues are used when placing game sessions for matches that are created // with this matchmaking configuration. Queues can be located in any region. GameSessionQueueArns []*string `type:"list"` // Unique identifier for a matchmaking configuration. This name is used to identify // the configuration associated with a matchmaking request or ticket. - Name *string `min:"1" type:"string"` + Name *string `type:"string"` // SNS topic ARN that is set up to receive matchmaking notifications. NotificationTarget *string `type:"string"` // Maximum duration, in seconds, that a matchmaking ticket can remain in process - // before timing out. Requests that time out can be resubmitted as needed. + // before timing out. Requests that fail due to timing out can be resubmitted + // as needed. RequestTimeoutSeconds *int64 `min:"1" type:"integer"` // Unique identifier for a matchmaking rule set to use with this configuration. // A matchmaking configuration can only use rule sets that are defined in the // same region. - RuleSetName *string `min:"1" type:"string"` + RuleSetName *string `type:"string"` } // String returns the string representation @@ -16512,6 +16116,12 @@ func (s *MatchmakingConfiguration) SetAdditionalPlayerCount(v int64) *Matchmakin return s } +// SetBackfillMode sets the BackfillMode field's value. +func (s *MatchmakingConfiguration) SetBackfillMode(v string) *MatchmakingConfiguration { + s.BackfillMode = &v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *MatchmakingConfiguration) SetCreationTime(v time.Time) *MatchmakingConfiguration { s.CreationTime = &v @@ -16573,9 +16183,9 @@ func (s *MatchmakingConfiguration) SetRuleSetName(v string) *MatchmakingConfigur } // Set of rule statements, used with FlexMatch, that determine how to build -// a certain kind of player match. Each rule set describes a type of group to -// be created and defines the parameters for acceptable player matches. Rule -// sets are used in MatchmakingConfiguration objects. +// your player matches. Each rule set describes a type of group to be created +// and defines the parameters for acceptable player matches. Rule sets are used +// in MatchmakingConfiguration objects. // // A rule set may define the following elements for a match. For detailed information // and examples showing how to construct a rule set, see Build a FlexMatch Rule @@ -16585,7 +16195,6 @@ func (s *MatchmakingConfiguration) SetRuleSetName(v string) *MatchmakingConfigur // the match and set minimum and maximum team sizes. For example, a rule // set might describe a 4x4 match that requires all eight slots to be filled. // -// // * Player attributes -- Optional. These attributes specify a set of player // characteristics to evaluate when looking for a match. Matchmaking requests // that use a rule set with player attributes must provide the corresponding @@ -16600,7 +16209,6 @@ func (s *MatchmakingConfiguration) SetRuleSetName(v string) *MatchmakingConfigur // average skill level. or may describe an entire group--such as all teams // must be evenly matched or have at least one player in a certain role. // -// // * Expansions -- Optional. Expansions allow you to relax the rules after // a period of time when no acceptable matches are found. This feature lets // you balance getting players into games in a reasonable amount of time @@ -16614,14 +16222,14 @@ type MatchmakingRuleSet struct { // expressed in Unix time as milliseconds (for example "1469498468.057"). CreationTime *time.Time `type:"timestamp"` - // Collection of matchmaking rules, formatted as a JSON string. (Note that comments14 - // are not allowed in JSON, but most elements support a description field.) + // Collection of matchmaking rules, formatted as a JSON string. Comments are + // not allowed in JSON, but most elements support a description field. // // RuleSetBody is a required field RuleSetBody *string `min:"1" type:"string" required:"true"` // Unique identifier for a matchmaking rule set - RuleSetName *string `min:"1" type:"string"` + RuleSetName *string `type:"string"` } // String returns the string representation @@ -16662,7 +16270,7 @@ type MatchmakingTicket struct { // Name of the MatchmakingConfiguration that is used with this ticket. Matchmaking // configurations determine how players are grouped into a match and how a new // game session is created for the match. - ConfigurationName *string `min:"1" type:"string"` + ConfigurationName *string `type:"string"` // Time stamp indicating when this matchmaking request stopped being processed // due to success, failure, or cancellation. Format is a number expressed in @@ -16706,10 +16314,11 @@ type MatchmakingTicket struct { // host the players. A ticket in this state contains the necessary connection // information for players. // - // * FAILED -- The matchmaking request was not completed. Tickets with players - // who fail to accept a proposed match are placed in FAILED status. + // * FAILED -- The matchmaking request was not completed. // - // * CANCELLED -- The matchmaking request was canceled with a call to StopMatchmaking. + // * CANCELLED -- The matchmaking request was canceled. This may be the result + // of a call to StopMatchmaking or a proposed match that one or more players + // failed to accept. // // * TIMED_OUT -- The matchmaking request was not successful within the duration // specified in the matchmaking configuration. @@ -16728,7 +16337,7 @@ type MatchmakingTicket struct { StatusReason *string `type:"string"` // Unique identifier for a matchmaking ticket. - TicketId *string `min:"1" type:"string"` + TicketId *string `type:"string"` } // String returns the string representation @@ -16812,13 +16421,8 @@ func (s *MatchmakingTicket) SetTicketId(v string) *MatchmakingTicket { // // * DescribePlayerSessions // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement type PlacedPlayerSession struct { _ struct{} `type:"structure"` @@ -17072,13 +16676,8 @@ func (s *PlayerLatencyPolicy) SetPolicyDurationSeconds(v int64) *PlayerLatencyPo // // * DescribePlayerSessions // -// * Game session placements -// -// StartGameSessionPlacement -// -// DescribeGameSessionPlacement -// -// StopGameSessionPlacement +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement type PlayerSession struct { _ struct{} `type:"structure"` @@ -17086,6 +16685,8 @@ type PlayerSession struct { // expressed in Unix time as milliseconds (for example "1469498468.057"). CreationTime *time.Time `type:"timestamp"` + DnsName *string `type:"string"` + // Unique identifier for a fleet that the player's game session is running on. FleetId *string `type:"string"` @@ -17148,6 +16749,12 @@ func (s *PlayerSession) SetCreationTime(v time.Time) *PlayerSession { return s } +// SetDnsName sets the DnsName field's value. +func (s *PlayerSession) SetDnsName(v string) *PlayerSession { + s.DnsName = &v + return s +} + // SetFleetId sets the FleetId field's value. func (s *PlayerSession) SetFleetId(v string) *PlayerSession { s.FleetId = &v @@ -17605,43 +17212,17 @@ func (s *ResourceCreationLimitPolicy) SetPolicyPeriodInMinutes(v int64) *Resourc // Routing configuration for a fleet alias. // -// * CreateFleet -// -// * ListFleets -// -// * DeleteFleet -// -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes -// -// UpdateFleetCapacity +// * CreateAlias // -// UpdateFleetPortSettings +// * ListAliases // -// UpdateRuntimeConfiguration +// * DescribeAlias // -// * Manage fleet actions: +// * UpdateAlias // -// StartFleetActions +// * DeleteAlias // -// StopFleetActions +// * ResolveAlias type RoutingStrategy struct { _ struct{} `type:"structure"` @@ -17700,8 +17281,8 @@ func (s *RoutingStrategy) SetType(v string) *RoutingStrategy { // configuration. // // The run-time configuration enables the instances in a fleet to run multiple -// processes simultaneously. Learn more about Running Multiple Processes on -// a Fleet (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-multiprocess.html). +// processes simultaneously. Learn more about Running Multiple Processes on +// a Fleet (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-multiprocess.html). // // A Amazon GameLift instance is limited to 50 processes running simultaneously. // To calculate the total number of processes in a run-time configuration, add @@ -17713,37 +17294,14 @@ func (s *RoutingStrategy) SetType(v string) *RoutingStrategy { // // * DeleteFleet // -// * Describe fleets: -// -// DescribeFleetAttributes -// -// DescribeFleetCapacity -// -// DescribeFleetPortSettings -// -// DescribeFleetUtilization -// -// DescribeRuntimeConfiguration -// -// DescribeEC2InstanceLimits -// -// DescribeFleetEvents -// -// * Update fleets: -// -// UpdateFleetAttributes +// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits +// DescribeFleetEvents // -// UpdateFleetCapacity +// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration // -// UpdateFleetPortSettings -// -// UpdateRuntimeConfiguration -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions type RuntimeConfiguration struct { _ struct{} `type:"structure"` @@ -17907,19 +17465,10 @@ func (s *S3Location) SetRoleArn(v string) *S3Location { // // * DescribeEC2InstanceLimits // -// * Manage scaling policies: -// -// PutScalingPolicy (auto-scaling) -// -// DescribeScalingPolicies (auto-scaling) -// -// DeleteScalingPolicy (auto-scaling) -// -// * Manage fleet actions: +// * Manage scaling policies: PutScalingPolicy (auto-scaling) DescribeScalingPolicies +// (auto-scaling) DeleteScalingPolicy (auto-scaling) // -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions type ScalingPolicy struct { _ struct{} `type:"structure"` @@ -18381,7 +17930,7 @@ func (s *SearchGameSessionsOutput) SetNextToken(v string) *SearchGameSessionsOut // the custom game build executable or Realtime launch script, optional launch // parameters, and the number of server processes with this configuration to // maintain concurrently on the instance. Server process configurations make -// up a fleet's RuntimeConfiguration. +// up a fleet's RuntimeConfiguration . type ServerProcess struct { _ struct{} `type:"structure"` @@ -18397,7 +17946,6 @@ type ServerProcess struct { // // * Windows (for custom game builds only): C:\game. Example: "C:\game\MyGame\server.exe" // - // // * Linux: /local/game. Examples: "/local/game/MyGame/server.exe" or "/local/game/MyRealtimeScript.js" // // LaunchPath is a required field @@ -18733,7 +18281,7 @@ type StartMatchBackfillInput struct { // parameter. // // ConfigurationName is a required field - ConfigurationName *string `min:"1" type:"string" required:"true"` + ConfigurationName *string `type:"string" required:"true"` // Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) // that is assigned to a game session and uniquely identifies it. @@ -18748,9 +18296,7 @@ type StartMatchBackfillInput struct { // * PlayerID, PlayerAttributes, Team -\\- This information is maintained // in the GameSession object, MatchmakerData property, for all players who // are currently assigned to the game session. The matchmaker data is in - // JSON syntax, formatted as a string. For more details, see Match Data - // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data). - // + // JSON syntax, formatted as a string. For more details, see Match Data (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data). // // * LatencyInMs -\\- If the matchmaker uses player latency, include a latency // value, in milliseconds, for the region that the game session is currently @@ -18762,7 +18308,7 @@ type StartMatchBackfillInput struct { // Unique identifier for a matchmaking ticket. If no ticket ID is specified // here, Amazon GameLift will generate one in the form of a UUID. Use this identifier // to track the match backfill ticket status and retrieve match results. - TicketId *string `min:"1" type:"string"` + TicketId *string `type:"string"` } // String returns the string representation @@ -18781,9 +18327,6 @@ func (s *StartMatchBackfillInput) Validate() error { if s.ConfigurationName == nil { invalidParams.Add(request.NewErrParamRequired("ConfigurationName")) } - if s.ConfigurationName != nil && len(*s.ConfigurationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationName", 1)) - } if s.GameSessionArn == nil { invalidParams.Add(request.NewErrParamRequired("GameSessionArn")) } @@ -18793,9 +18336,6 @@ func (s *StartMatchBackfillInput) Validate() error { if s.Players == nil { invalidParams.Add(request.NewErrParamRequired("Players")) } - if s.TicketId != nil && len(*s.TicketId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) - } if s.Players != nil { for i, v := range s.Players { if v == nil { @@ -18871,7 +18411,7 @@ type StartMatchmakingInput struct { // configurations must exist in the same region as this request. // // ConfigurationName is a required field - ConfigurationName *string `min:"1" type:"string" required:"true"` + ConfigurationName *string `type:"string" required:"true"` // Information on each player to be matched. This information must include a // player ID, and may contain player attributes and latency data to be used @@ -18884,7 +18424,7 @@ type StartMatchmakingInput struct { // Unique identifier for a matchmaking ticket. If no ticket ID is specified // here, Amazon GameLift will generate one in the form of a UUID. Use this identifier // to track the matchmaking ticket status and retrieve match results. - TicketId *string `min:"1" type:"string"` + TicketId *string `type:"string"` } // String returns the string representation @@ -18903,15 +18443,9 @@ func (s *StartMatchmakingInput) Validate() error { if s.ConfigurationName == nil { invalidParams.Add(request.NewErrParamRequired("ConfigurationName")) } - if s.ConfigurationName != nil && len(*s.ConfigurationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConfigurationName", 1)) - } if s.Players == nil { invalidParams.Add(request.NewErrParamRequired("Players")) } - if s.TicketId != nil && len(*s.TicketId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) - } if s.Players != nil { for i, v := range s.Players { if v == nil { @@ -19116,7 +18650,7 @@ type StopMatchmakingInput struct { // Unique identifier for a matchmaking ticket. // // TicketId is a required field - TicketId *string `min:"1" type:"string" required:"true"` + TicketId *string `type:"string" required:"true"` } // String returns the string representation @@ -19135,9 +18669,6 @@ func (s *StopMatchmakingInput) Validate() error { if s.TicketId == nil { invalidParams.Add(request.NewErrParamRequired("TicketId")) } - if s.TicketId != nil && len(*s.TicketId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TicketId", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -19178,19 +18709,10 @@ func (s StopMatchmakingOutput) GoString() string { // // * DescribeEC2InstanceLimits // -// * Manage scaling policies: +// * Manage scaling policies: PutScalingPolicy (auto-scaling) DescribeScalingPolicies +// (auto-scaling) DeleteScalingPolicy (auto-scaling) // -// PutScalingPolicy (auto-scaling) -// -// DescribeScalingPolicies (auto-scaling) -// -// DeleteScalingPolicy (auto-scaling) -// -// * Manage fleet actions: -// -// StartFleetActions -// -// StopFleetActions +// * Manage fleet actions: StartFleetActions StopFleetActions type TargetConfiguration struct { _ struct{} `type:"structure"` @@ -19974,7 +19496,7 @@ func (s *UpdateGameSessionQueueOutput) SetGameSessionQueue(v *GameSessionQueue) type UpdateMatchmakingConfigurationInput struct { _ struct{} `type:"structure"` - // Flag that determines whether or not a match that was created with this configuration + // Flag that determines whether a match that was created with this configuration // must be accepted by the matched players. To require acceptance, set to TRUE. AcceptanceRequired *bool `type:"boolean"` @@ -19989,7 +19511,15 @@ type UpdateMatchmakingConfigurationInput struct { // for the match. AdditionalPlayerCount *int64 `type:"integer"` - // Information to attached to all events related to the matchmaking configuration. + // Method used to backfill game sessions created with this matchmaking configuration. + // Specify MANUAL when your game manages backfill requests manually or does + // not use the match backfill feature. Specify AUTOMATIC to have GameLift create + // a StartMatchBackfill request whenever a game session has one or more open + // slots. Learn more about manual and automatic backfill in Backfill Existing + // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). + BackfillMode *string `type:"string" enum:"BackfillMode"` + + // Information to add to all events related to the matchmaking configuration. CustomEventData *string `type:"string"` // Descriptive label that is associated with matchmaking configuration. @@ -20011,7 +19541,7 @@ type UpdateMatchmakingConfigurationInput struct { // Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) // that is assigned to a game session queue and uniquely identifies it. Format - // is arn:aws:gamelift:::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. + // is arn:aws:gamelift:::gamesessionqueue/. // These queues are used when placing game sessions for matches that are created // with this matchmaking configuration. Queues can be located in any region. GameSessionQueueArns []*string `type:"list"` @@ -20019,21 +19549,22 @@ type UpdateMatchmakingConfigurationInput struct { // Unique identifier for a matchmaking configuration to update. // // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + Name *string `type:"string" required:"true"` - // SNS topic ARN that is set up to receive matchmaking notifications. See Setting + // SNS topic ARN that is set up to receive matchmaking notifications. See Setting // up Notifications for Matchmaking (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) // for more information. NotificationTarget *string `type:"string"` // Maximum duration, in seconds, that a matchmaking ticket can remain in process - // before timing out. Requests that time out can be resubmitted as needed. + // before timing out. Requests that fail due to timing out can be resubmitted + // as needed. RequestTimeoutSeconds *int64 `min:"1" type:"integer"` // Unique identifier for a matchmaking rule set to use with this configuration. // A matchmaking configuration can only use rule sets that are defined in the // same region. - RuleSetName *string `min:"1" type:"string"` + RuleSetName *string `type:"string"` } // String returns the string representation @@ -20061,15 +19592,9 @@ func (s *UpdateMatchmakingConfigurationInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } if s.RequestTimeoutSeconds != nil && *s.RequestTimeoutSeconds < 1 { invalidParams.Add(request.NewErrParamMinValue("RequestTimeoutSeconds", 1)) } - if s.RuleSetName != nil && len(*s.RuleSetName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleSetName", 1)) - } if s.GameProperties != nil { for i, v := range s.GameProperties { if v == nil { @@ -20105,6 +19630,12 @@ func (s *UpdateMatchmakingConfigurationInput) SetAdditionalPlayerCount(v int64) return s } +// SetBackfillMode sets the BackfillMode field's value. +func (s *UpdateMatchmakingConfigurationInput) SetBackfillMode(v string) *UpdateMatchmakingConfigurationInput { + s.BackfillMode = &v + return s +} + // SetCustomEventData sets the CustomEventData field's value. func (s *UpdateMatchmakingConfigurationInput) SetCustomEventData(v string) *UpdateMatchmakingConfigurationInput { s.CustomEventData = &v @@ -20448,7 +19979,7 @@ func (s *ValidateMatchmakingRuleSetInput) SetRuleSetBody(v string) *ValidateMatc type ValidateMatchmakingRuleSetOutput struct { _ struct{} `type:"structure"` - // Response indicating whether or not the rule set is valid. + // Response indicating whether the rule set is valid. Valid *bool `type:"boolean"` } @@ -20689,6 +20220,14 @@ const ( AcceptanceTypeReject = "REJECT" ) +const ( + // BackfillModeAutomatic is a BackfillMode enum value + BackfillModeAutomatic = "AUTOMATIC" + + // BackfillModeManual is a BackfillMode enum value + BackfillModeManual = "MANUAL" +) + const ( // BuildStatusInitialized is a BuildStatus enum value BuildStatusInitialized = "INITIALIZED" @@ -20700,6 +20239,14 @@ const ( BuildStatusFailed = "FAILED" ) +const ( + // CertificateTypeDisabled is a CertificateType enum value + CertificateTypeDisabled = "DISABLED" + + // CertificateTypeGenerated is a CertificateType enum value + CertificateTypeGenerated = "GENERATED" +) + const ( // ComparisonOperatorTypeGreaterThanOrEqualToThreshold is a ComparisonOperatorType enum value ComparisonOperatorTypeGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" @@ -20757,6 +20304,30 @@ const ( // EC2InstanceTypeC48xlarge is a EC2InstanceType enum value EC2InstanceTypeC48xlarge = "c4.8xlarge" + // EC2InstanceTypeC5Large is a EC2InstanceType enum value + EC2InstanceTypeC5Large = "c5.large" + + // EC2InstanceTypeC5Xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5Xlarge = "c5.xlarge" + + // EC2InstanceTypeC52xlarge is a EC2InstanceType enum value + EC2InstanceTypeC52xlarge = "c5.2xlarge" + + // EC2InstanceTypeC54xlarge is a EC2InstanceType enum value + EC2InstanceTypeC54xlarge = "c5.4xlarge" + + // EC2InstanceTypeC59xlarge is a EC2InstanceType enum value + EC2InstanceTypeC59xlarge = "c5.9xlarge" + + // EC2InstanceTypeC512xlarge is a EC2InstanceType enum value + EC2InstanceTypeC512xlarge = "c5.12xlarge" + + // EC2InstanceTypeC518xlarge is a EC2InstanceType enum value + EC2InstanceTypeC518xlarge = "c5.18xlarge" + + // EC2InstanceTypeC524xlarge is a EC2InstanceType enum value + EC2InstanceTypeC524xlarge = "c5.24xlarge" + // EC2InstanceTypeR3Large is a EC2InstanceType enum value EC2InstanceTypeR3Large = "r3.large" @@ -20790,6 +20361,30 @@ const ( // EC2InstanceTypeR416xlarge is a EC2InstanceType enum value EC2InstanceTypeR416xlarge = "r4.16xlarge" + // EC2InstanceTypeR5Large is a EC2InstanceType enum value + EC2InstanceTypeR5Large = "r5.large" + + // EC2InstanceTypeR5Xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5Xlarge = "r5.xlarge" + + // EC2InstanceTypeR52xlarge is a EC2InstanceType enum value + EC2InstanceTypeR52xlarge = "r5.2xlarge" + + // EC2InstanceTypeR54xlarge is a EC2InstanceType enum value + EC2InstanceTypeR54xlarge = "r5.4xlarge" + + // EC2InstanceTypeR58xlarge is a EC2InstanceType enum value + EC2InstanceTypeR58xlarge = "r5.8xlarge" + + // EC2InstanceTypeR512xlarge is a EC2InstanceType enum value + EC2InstanceTypeR512xlarge = "r5.12xlarge" + + // EC2InstanceTypeR516xlarge is a EC2InstanceType enum value + EC2InstanceTypeR516xlarge = "r5.16xlarge" + + // EC2InstanceTypeR524xlarge is a EC2InstanceType enum value + EC2InstanceTypeR524xlarge = "r5.24xlarge" + // EC2InstanceTypeM3Medium is a EC2InstanceType enum value EC2InstanceTypeM3Medium = "m3.medium" @@ -20816,6 +20411,30 @@ const ( // EC2InstanceTypeM410xlarge is a EC2InstanceType enum value EC2InstanceTypeM410xlarge = "m4.10xlarge" + + // EC2InstanceTypeM5Large is a EC2InstanceType enum value + EC2InstanceTypeM5Large = "m5.large" + + // EC2InstanceTypeM5Xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5Xlarge = "m5.xlarge" + + // EC2InstanceTypeM52xlarge is a EC2InstanceType enum value + EC2InstanceTypeM52xlarge = "m5.2xlarge" + + // EC2InstanceTypeM54xlarge is a EC2InstanceType enum value + EC2InstanceTypeM54xlarge = "m5.4xlarge" + + // EC2InstanceTypeM58xlarge is a EC2InstanceType enum value + EC2InstanceTypeM58xlarge = "m5.8xlarge" + + // EC2InstanceTypeM512xlarge is a EC2InstanceType enum value + EC2InstanceTypeM512xlarge = "m5.12xlarge" + + // EC2InstanceTypeM516xlarge is a EC2InstanceType enum value + EC2InstanceTypeM516xlarge = "m5.16xlarge" + + // EC2InstanceTypeM524xlarge is a EC2InstanceType enum value + EC2InstanceTypeM524xlarge = "m5.24xlarge" ) const ( @@ -20973,6 +20592,9 @@ const ( // GameSessionPlacementStateTimedOut is a GameSessionPlacementState enum value GameSessionPlacementStateTimedOut = "TIMED_OUT" + + // GameSessionPlacementStateFailed is a GameSessionPlacementState enum value + GameSessionPlacementStateFailed = "FAILED" ) const ( @@ -21083,6 +20705,9 @@ const ( // OperatingSystemAmazonLinux is a OperatingSystem enum value OperatingSystemAmazonLinux = "AMAZON_LINUX" + + // OperatingSystemAmazonLinux2 is a OperatingSystem enum value + OperatingSystemAmazonLinux2 = "AMAZON_LINUX_2" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go index 97836a89d32..9c054334111 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go @@ -18,7 +18,7 @@ // Get Amazon GameLift Tools and Resources // // This reference guide describes the low-level service API for Amazon GameLift -// and provides links to language-specific SDK reference topics. See also Amazon +// and provides links to language-specific SDK reference topics. See also Amazon // GameLift Tools and Resources (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-components.html). // // API Summary @@ -35,7 +35,7 @@ // Set up matchmakers, configure auto-scaling, retrieve game logs, and get // hosting and game metrics. // -// Task-based list of API actions (https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html) +// Task-based list of API actions (https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html) // // See https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go index a2361e47690..2d5f27330a5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go @@ -46,11 +46,11 @@ const ( // svc := gamelift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *GameLift { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GameLift { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *GameLift { svc := &GameLift{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-10-01", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go index f6199352f7b..fc7c251dad6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go @@ -70,11 +70,11 @@ func (c *Glacier) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Working with Archives -// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) -// and Abort Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html) +// in Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and Abort Multipart Upload (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -171,9 +171,9 @@ func (c *Glacier) AbortVaultLockRequest(input *AbortVaultLockInput) (req *reques // A vault lock is put into the InProgress state by calling InitiateVaultLock. // A vault lock is put into the Locked state by calling CompleteVaultLock. You // can get the state of a vault lock by calling GetVaultLock. For more information -// about the vault locking process, see Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// about the vault locking process, see Amazon Glacier Vault Lock (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). // For more information about vault lock policies, see Amazon Glacier Access -// Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +// Control with Vault Lock Policies (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). // // This operation is idempotent. You can successfully invoke this operation // multiple times, if the vault lock is in the InProgress state or if there @@ -269,7 +269,7 @@ func (c *Glacier) AddTagsToVaultRequest(input *AddTagsToVaultInput) (req *reques // cause the tag limit for the vault to be exceeded, the operation throws the // LimitExceededException error. If a tag already exists on the vault under // a specified key, the existing key value will be overwritten. For more information -// about tags, see Tagging Amazon Glacier Resources (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). +// about tags, see Tagging Amazon S3 Glacier Resources (https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -358,28 +358,28 @@ func (c *Glacier) CompleteMultipartUploadRequest(input *CompleteMultipartUploadI // CompleteMultipartUpload API operation for Amazon Glacier. // -// You call this operation to inform Amazon Glacier that all the archive parts -// have been uploaded and that Amazon Glacier can now assemble the archive from -// the uploaded parts. After assembling and saving the archive to the vault, -// Amazon Glacier returns the URI path of the newly created archive resource. -// Using the URI path, you can then access the archive. After you upload an -// archive, you should save the archive ID returned to retrieve the archive -// at a later point. You can also get the vault inventory to obtain a list of -// archive IDs in a vault. For more information, see InitiateJob. +// You call this operation to inform Amazon S3 Glacier (Glacier) that all the +// archive parts have been uploaded and that Glacier can now assemble the archive +// from the uploaded parts. After assembling and saving the archive to the vault, +// Glacier returns the URI path of the newly created archive resource. Using +// the URI path, you can then access the archive. After you upload an archive, +// you should save the archive ID returned to retrieve the archive at a later +// point. You can also get the vault inventory to obtain a list of archive IDs +// in a vault. For more information, see InitiateJob. // // In the request, you must include the computed SHA256 tree hash of the entire // archive you have uploaded. For information about computing a SHA256 tree -// hash, see Computing Checksums (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). -// On the server side, Amazon Glacier also constructs the SHA256 tree hash of -// the assembled archive. If the values match, Amazon Glacier saves the archive -// to the vault; otherwise, it returns an error, and the operation fails. The -// ListParts operation returns a list of parts uploaded for a specific multipart -// upload. It includes checksum information for each uploaded part that can -// be used to debug a bad checksum issue. -// -// Additionally, Amazon Glacier also checks for any missing content ranges when -// assembling the archive, if missing content ranges are found, Amazon Glacier -// returns an error and the operation fails. +// hash, see Computing Checksums (https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// On the server side, Glacier also constructs the SHA256 tree hash of the assembled +// archive. If the values match, Glacier saves the archive to the vault; otherwise, +// it returns an error, and the operation fails. The ListParts operation returns +// a list of parts uploaded for a specific multipart upload. It includes checksum +// information for each uploaded part that can be used to debug a bad checksum +// issue. +// +// Additionally, Glacier also checks for any missing content ranges when assembling +// the archive, if missing content ranges are found, Glacier returns an error +// and the operation fails. // // Complete Multipart Upload is an idempotent operation. After your first successful // complete multipart upload, if you call the operation again within a short @@ -396,11 +396,11 @@ func (c *Glacier) CompleteMultipartUploadRequest(input *CompleteMultipartUploadI // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Uploading Large Archives -// in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) -// and Complete Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html) +// in Parts (Multipart Upload) (https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Complete Multipart Upload (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -493,7 +493,7 @@ func (c *Glacier) CompleteVaultLockRequest(input *CompleteVaultLockInput) (req * // lock policy to become unchangeable. A vault lock is put into the InProgress // state by calling InitiateVaultLock. You can obtain the state of the vault // lock by calling GetVaultLock. For more information about the vault locking -// process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// process, Amazon Glacier Vault Lock (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). // // This operation is idempotent. This request is always successful if the vault // lock is in the Locked state and the provided lock ID matches the lock ID @@ -591,7 +591,7 @@ func (c *Glacier) CreateVaultRequest(input *CreateVaultInput) (req *request.Requ // This operation creates a new vault with the specified name. The name of the // vault must be unique within a region for an AWS account. You can create up // to 1,000 vaults per account. If you need to create more vaults, contact Amazon -// Glacier. +// S3 Glacier. // // You must use the following guidelines when naming a vault. // @@ -606,11 +606,11 @@ func (c *Glacier) CreateVaultRequest(input *CreateVaultInput) (req *request.Requ // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Creating a Vault -// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html) -// and Create Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html) +// in Amazon Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html) +// and Create Vault (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -702,11 +702,11 @@ func (c *Glacier) DeleteArchiveRequest(input *DeleteArchiveInput) (req *request. // for this archive ID may or may not succeed according to the following scenarios: // // * If the archive retrieval job is actively preparing the data for download -// when Amazon Glacier receives the delete archive request, the archival +// when Amazon S3 Glacier receives the delete archive request, the archival // retrieval operation might fail. // // * If the archive retrieval job has successfully prepared the archive for -// download when Amazon Glacier receives the delete archive request, you +// download when Amazon S3 Glacier receives the delete archive request, you // will be able to download the output. // // This operation is idempotent. Attempting to delete an already-deleted archive @@ -716,11 +716,11 @@ func (c *Glacier) DeleteArchiveRequest(input *DeleteArchiveInput) (req *request. // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Deleting an Archive -// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html) -// and Delete Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html) +// in Amazon Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html) +// and Delete Archive (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -808,16 +808,16 @@ func (c *Glacier) DeleteVaultRequest(input *DeleteVaultInput) (req *request.Requ // DeleteVault API operation for Amazon Glacier. // -// This operation deletes a vault. Amazon Glacier will delete a vault only if -// there are no archives in the vault as of the last inventory and there have -// been no writes to the vault since the last inventory. If either of these +// This operation deletes a vault. Amazon S3 Glacier will delete a vault only +// if there are no archives in the vault as of the last inventory and there +// have been no writes to the vault since the last inventory. If either of these // conditions is not satisfied, the vault deletion fails (that is, the vault -// is not removed) and Amazon Glacier returns an error. You can use DescribeVault +// is not removed) and Amazon S3 Glacier returns an error. You can use DescribeVault // to return the number of archives in a vault, and you can use Initiate a Job -// (POST jobs) (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html) +// (POST jobs) (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html) // to initiate a new inventory retrieval for a vault. The inventory contains // the archive IDs you use to delete archives using Delete Archive (DELETE archive) -// (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html). +// (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html). // // This operation is idempotent. // @@ -825,12 +825,12 @@ func (c *Glacier) DeleteVaultRequest(input *DeleteVaultInput) (req *request.Requ // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Deleting a Vault -// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html) -// and Delete Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html) -// in the Amazon Glacier Developer Guide. +// in Amazon Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html) +// and Delete Vault (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html) +// in the Amazon S3 Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -919,14 +919,14 @@ func (c *Glacier) DeleteVaultAccessPolicyRequest(input *DeleteVaultAccessPolicyI // // This operation deletes the access policy associated with the specified vault. // The operation is eventually consistent; that is, it might take some time -// for Amazon Glacier to completely remove the access policy, and you might +// for Amazon S3 Glacier to completely remove the access policy, and you might // still see the effect of the policy for a short time after you send the delete // request. // // This operation is idempotent. You can invoke delete multiple times, even // if there is no policy associated with the vault. For more information about // vault access policies, see Amazon Glacier Access Control with Vault Access -// Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +// Policies (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1015,19 +1015,19 @@ func (c *Glacier) DeleteVaultNotificationsRequest(input *DeleteVaultNotification // // This operation deletes the notification configuration set for a vault. The // operation is eventually consistent; that is, it might take some time for -// Amazon Glacier to completely disable the notifications and you might still +// Amazon S3 Glacier to completely disable the notifications and you might still // receive some notifications for a short time after you send the delete request. // // An AWS account has full permission to perform all operations (actions). However, // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Configuring Vault -// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) -// and Delete Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html) -// in the Amazon Glacier Developer Guide. +// Notifications in Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Delete Vault Notification Configuration (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html) +// in the Amazon S3 Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1115,25 +1115,26 @@ func (c *Glacier) DescribeJobRequest(input *DescribeJobInput) (req *request.Requ // // This operation returns information about a job you previously initiated, // including the job initiation date, the user who initiated the job, the job -// status code/message and the Amazon SNS topic to notify after Amazon Glacier -// completes the job. For more information about initiating a job, see InitiateJob. +// status code/message and the Amazon SNS topic to notify after Amazon S3 Glacier +// (Glacier) completes the job. For more information about initiating a job, +// see InitiateJob. // // This operation enables you to check the status of your job. However, it is // strongly recommended that you set up an Amazon SNS topic and specify it in -// your initiate job request so that Amazon Glacier can notify the topic after -// it completes the job. -// -// A job ID will not expire for at least 24 hours after Amazon Glacier completes +// your initiate job request so that Glacier can notify the topic after it completes // the job. // +// A job ID will not expire for at least 24 hours after Glacier completes the +// job. +// // An AWS account has full permission to perform all operations (actions). However, // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For more information about using this operation, see the documentation for -// the underlying REST API Describe Job (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html) +// the underlying REST API Describe Job (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1227,18 +1228,18 @@ func (c *Glacier) DescribeVaultRequest(input *DescribeVaultInput) (req *request. // This means that if you add or remove an archive from a vault, and then immediately // use Describe Vault, the change in contents will not be immediately reflected. // If you want to retrieve the latest inventory of the vault, use InitiateJob. -// Amazon Glacier generates vault inventories approximately daily. For more -// information, see Downloading a Vault Inventory in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html). +// Amazon S3 Glacier generates vault inventories approximately daily. For more +// information, see Downloading a Vault Inventory in Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html). // // An AWS account has full permission to perform all operations (actions). However, // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Retrieving Vault -// Metadata in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) -// and Describe Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html) +// Metadata in Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) +// and Describe Vault (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1327,7 +1328,7 @@ func (c *Glacier) GetDataRetrievalPolicyRequest(input *GetDataRetrievalPolicyInp // // This operation returns the current data retrieval policy for the account // and region specified in the GET request. For more information about data -// retrieval policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +// retrieval policies, see Amazon Glacier Data Retrieval Policies (https://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1415,14 +1416,14 @@ func (c *Glacier) GetJobOutputRequest(input *GetJobOutputInput) (req *request.Re // // You can download all the job output or download a portion of the output by // specifying a byte range. In the case of an archive retrieval job, depending -// on the byte range you specify, Amazon Glacier returns the checksum for the -// portion of the data. You can compute the checksum on the client and verify -// that the values match to ensure the portion you downloaded is the correct -// data. -// -// A job ID will not expire for at least 24 hours after Amazon Glacier completes -// the job. That a byte range. For both archive and inventory retrieval jobs, -// you should verify the downloaded size against the size returned in the headers +// on the byte range you specify, Amazon S3 Glacier (Glacier) returns the checksum +// for the portion of the data. You can compute the checksum on the client and +// verify that the values match to ensure the portion you downloaded is the +// correct data. +// +// A job ID will not expire for at least 24 hours after Glacier completes the +// job. That a byte range. For both archive and inventory retrieval jobs, you +// should verify the downloaded size against the size returned in the headers // from the Get Job Output response. // // For archive retrieval jobs, you should also verify that the size is what @@ -1430,29 +1431,29 @@ func (c *Glacier) GetJobOutputRequest(input *GetJobOutputInput) (req *request.Re // is based on the range of bytes you specified. For example, if you specify // a range of bytes=0-1048575, you should verify your download size is 1,048,576 // bytes. If you download an entire archive, the expected size is the size of -// the archive when you uploaded it to Amazon Glacier The expected size is also -// returned in the headers from the Get Job Output response. +// the archive when you uploaded it to Amazon S3 Glacier The expected size is +// also returned in the headers from the Get Job Output response. // // In the case of an archive retrieval job, depending on the byte range you -// specify, Amazon Glacier returns the checksum for the portion of the data. -// To ensure the portion you downloaded is the correct data, compute the checksum -// on the client, verify that the values match, and verify that the size is -// what you expected. +// specify, Glacier returns the checksum for the portion of the data. To ensure +// the portion you downloaded is the correct data, compute the checksum on the +// client, verify that the values match, and verify that the size is what you +// expected. // -// A job ID does not expire for at least 24 hours after Amazon Glacier completes -// the job. That is, you can download the job output within the 24 hours period +// A job ID does not expire for at least 24 hours after Glacier completes the +// job. That is, you can download the job output within the 24 hours period // after Amazon Glacier completes the job. // // An AWS account has full permission to perform all operations (actions). However, // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and the underlying REST API, see Downloading a -// Vault Inventory (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html), -// Downloading an Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html), -// and Get Job Output (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html) +// Vault Inventory (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html), +// Downloading an Archive (https://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html), +// and Get Job Output (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1540,10 +1541,10 @@ func (c *Glacier) GetVaultAccessPolicyRequest(input *GetVaultAccessPolicyInput) // // This operation retrieves the access-policy subresource set on the vault; // for more information on setting this subresource, see Set Vault Access Policy -// (PUT access-policy) (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html). +// (PUT access-policy) (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html). // If there is no access policy set on the vault, the operation returns a 404 // Not found error. For more information about vault access policies, see Amazon -// Glacier Access Control with Vault Access Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +// Glacier Access Control with Vault Access Policies (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1644,11 +1645,11 @@ func (c *Glacier) GetVaultLockRequest(input *GetVaultLockInput) (req *request.Re // A vault lock is put into the InProgress state by calling InitiateVaultLock. // A vault lock is put into the Locked state by calling CompleteVaultLock. You // can abort the vault locking process by calling AbortVaultLock. For more information -// about the vault locking process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// about the vault locking process, Amazon Glacier Vault Lock (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). // // If there is no vault lock policy set on the vault, the operation returns // a 404 Not found error. For more information about vault lock policies, Amazon -// Glacier Access Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +// Glacier Access Control with Vault Lock Policies (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1740,18 +1741,18 @@ func (c *Glacier) GetVaultNotificationsRequest(input *GetVaultNotificationsInput // For information about setting a notification configuration on a vault, see // SetVaultNotifications. If a notification configuration for a vault is not // set, the operation returns a 404 Not Found error. For more information about -// vault notifications, see Configuring Vault Notifications in Amazon Glacier -// (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html). +// vault notifications, see Configuring Vault Notifications in Amazon S3 Glacier +// (https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html). // // An AWS account has full permission to perform all operations (actions). However, // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Configuring Vault -// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) -// and Get Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html) +// Notifications in Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Get Vault Notification Configuration (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1841,7 +1842,7 @@ func (c *Glacier) InitiateJobRequest(input *InitiateJobInput) (req *request.Requ // This operation initiates a job of the specified type, which can be a select, // an archival retrieval, or a vault retrieval. For more information about using // this operation, see the documentation for the underlying REST API Initiate -// a Job (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html). +// a Job (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1936,9 +1937,10 @@ func (c *Glacier) InitiateMultipartUploadRequest(input *InitiateMultipartUploadI // InitiateMultipartUpload API operation for Amazon Glacier. // -// This operation initiates a multipart upload. Amazon Glacier creates a multipart -// upload resource and returns its ID in the response. The multipart upload -// ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart). +// This operation initiates a multipart upload. Amazon S3 Glacier creates a +// multipart upload resource and returns its ID in the response. The multipart +// upload ID is used in subsequent requests to upload parts of an archive (see +// UploadMultipartPart). // // When you initiate a multipart upload, you specify the part size in number // of bytes. The part size must be a megabyte (1024 KB) multiplied by a power @@ -1953,23 +1955,23 @@ func (c *Glacier) InitiateMultipartUploadRequest(input *InitiateMultipartUploadI // parts of 4 MB each and one part of 0.2 MB. // // You don't need to know the size of the archive when you start a multipart -// upload because Amazon Glacier does not require you to specify the overall +// upload because Amazon S3 Glacier does not require you to specify the overall // archive size. // -// After you complete the multipart upload, Amazon Glacier removes the multipart -// upload resource referenced by the ID. Amazon Glacier also removes the multipart -// upload resource if you cancel the multipart upload or it may be removed if -// there is no activity for a period of 24 hours. +// After you complete the multipart upload, Amazon S3 Glacier (Glacier) removes +// the multipart upload resource referenced by the ID. Glacier also removes +// the multipart upload resource if you cancel the multipart upload or it may +// be removed if there is no activity for a period of 24 hours. // // An AWS account has full permission to perform all operations (actions). However, // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Uploading Large Archives -// in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) -// and Initiate Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html) +// in Parts (Multipart Upload) (https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Initiate Multipart Upload (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2066,7 +2068,7 @@ func (c *Glacier) InitiateVaultLockRequest(input *InitiateVaultLockInput) (req * // // You can set one vault lock policy for each vault and this policy can be up // to 20 KB in size. For more information about vault lock policies, see Amazon -// Glacier Access Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +// Glacier Access Control with Vault Lock Policies (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). // // You must complete the vault locking process within 24 hours after the vault // lock enters the InProgress state. After the 24 hour window ends, the lock @@ -2079,7 +2081,7 @@ func (c *Glacier) InitiateVaultLockRequest(input *InitiateVaultLockInput) (req * // // You can abort the vault locking process by calling AbortVaultLock. You can // get the state of the vault lock by calling GetVaultLock. For more information -// about the vault locking process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// about the vault locking process, Amazon Glacier Vault Lock (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). // // If this operation is called when the vault lock is in the InProgress state, // the operation returns an AccessDeniedException error. When the vault lock @@ -2211,7 +2213,7 @@ func (c *Glacier) ListJobsRequest(input *ListJobsInput) (req *request.Request, o // (false). // // For more information about using this operation, see the documentation for -// the underlying REST API List Jobs (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html). +// the underlying REST API List Jobs (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2266,7 +2268,7 @@ func (c *Glacier) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opt // // Example iterating over at most 3 pages of a ListJobs operation. // pageNum := 0 // err := client.ListJobsPages(params, -// func(page *ListJobsOutput, lastPage bool) bool { +// func(page *glacier.ListJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2298,10 +2300,12 @@ func (c *Glacier) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2377,11 +2381,11 @@ func (c *Glacier) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and the underlying REST API, see Working with -// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) -// and List Multipart Uploads (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html) +// Archives in Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and List Multipart Uploads (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2437,7 +2441,7 @@ func (c *Glacier) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMu // // Example iterating over at most 3 pages of a ListMultipartUploads operation. // pageNum := 0 // err := client.ListMultipartUploadsPages(params, -// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// func(page *glacier.ListMultipartUploadsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2469,10 +2473,12 @@ func (c *Glacier) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2542,11 +2548,11 @@ func (c *Glacier) ListPartsRequest(input *ListPartsInput) (req *request.Request, // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and the underlying REST API, see Working with -// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) -// and List Parts (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html) +// Archives in Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and List Parts (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2602,7 +2608,7 @@ func (c *Glacier) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, o // // Example iterating over at most 3 pages of a ListParts operation. // pageNum := 0 // err := client.ListPartsPages(params, -// func(page *ListPartsOutput, lastPage bool) bool { +// func(page *glacier.ListPartsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2634,10 +2640,12 @@ func (c *Glacier) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2768,7 +2776,7 @@ func (c *Glacier) ListTagsForVaultRequest(input *ListTagsForVaultInput) (req *re // // This operation lists all the tags attached to a vault. The operation returns // an empty map if there are no tags. For more information about tags, see Tagging -// Amazon Glacier Resources (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). +// Amazon S3 Glacier Resources (https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2875,11 +2883,11 @@ func (c *Glacier) ListVaultsRequest(input *ListVaultsInput) (req *request.Reques // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Retrieving Vault -// Metadata in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) -// and List Vaults (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html) +// Metadata in Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) +// and List Vaults (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2935,7 +2943,7 @@ func (c *Glacier) ListVaultsWithContext(ctx aws.Context, input *ListVaultsInput, // // Example iterating over at most 3 pages of a ListVaults operation. // pageNum := 0 // err := client.ListVaultsPages(params, -// func(page *ListVaultsOutput, lastPage bool) bool { +// func(page *glacier.ListVaultsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2967,10 +2975,12 @@ func (c *Glacier) ListVaultsPagesWithContext(ctx aws.Context, input *ListVaultsI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListVaultsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListVaultsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3103,8 +3113,8 @@ func (c *Glacier) RemoveTagsFromVaultRequest(input *RemoveTagsFromVaultInput) (r // RemoveTagsFromVault API operation for Amazon Glacier. // // This operation removes one or more tags from the set of tags attached to -// a vault. For more information about tags, see Tagging Amazon Glacier Resources -// (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). This +// a vault. For more information about tags, see Tagging Amazon S3 Glacier Resources +// (https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). This // operation is idempotent. The operation will be successful, even if there // are no tags attached to the vault. // @@ -3199,7 +3209,7 @@ func (c *Glacier) SetDataRetrievalPolicyRequest(input *SetDataRetrievalPolicyInp // // The set policy operation does not affect retrieval jobs that were in progress // before the policy was enacted. For more information about data retrieval -// policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +// policies, see Amazon Glacier Data Retrieval Policies (https://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3288,7 +3298,7 @@ func (c *Glacier) SetVaultAccessPolicyRequest(input *SetVaultAccessPolicyInput) // to a vault and is also called a vault subresource. You can set one access // policy per vault and the policy can be up to 20 KB in size. For more information // about vault access policies, see Amazon Glacier Access Control with Vault -// Access Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +// Access Policies (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3381,7 +3391,7 @@ func (c *Glacier) SetVaultNotificationsRequest(input *SetVaultNotificationsInput // To configure vault notifications, send a PUT request to the notification-configuration // subresource of the vault. The request should include a JSON document that // provides an Amazon SNS topic and specific events for which you want Amazon -// Glacier to send notifications to the topic. +// S3 Glacier to send notifications to the topic. // // Amazon SNS topics must grant permission to the vault to be allowed to publish // notifications to the topic. You can configure a vault to publish a notification @@ -3401,11 +3411,11 @@ func (c *Glacier) SetVaultNotificationsRequest(input *SetVaultNotificationsInput // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Configuring Vault -// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) -// and Set Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html) +// Notifications in Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Set Vault Notification Configuration (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3493,10 +3503,10 @@ func (c *Glacier) UploadArchiveRequest(input *UploadArchiveInput) (req *request. // UploadArchive API operation for Amazon Glacier. // // This operation adds an archive to a vault. This is a synchronous operation, -// and for a successful upload, your data is durably persisted. Amazon Glacier +// and for a successful upload, your data is durably persisted. Amazon S3 Glacier // returns the archive ID in the x-amz-archive-id header of the response. // -// You must use the archive ID to access your data in Amazon Glacier. After +// You must use the archive ID to access your data in Amazon S3 Glacier. After // you upload an archive, you should save the archive ID returned so that you // can retrieve or delete the archive later. Besides saving the archive ID, // you can also index it and give it a friendly name to allow for better searching. @@ -3506,7 +3516,7 @@ func (c *Glacier) UploadArchiveRequest(input *UploadArchiveInput) (req *request. // a list of archive IDs in a vault. For more information, see InitiateJob. // // You must provide a SHA256 tree hash of the data you are uploading. For information -// about computing a SHA256 tree hash, see Computing Checksums (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// about computing a SHA256 tree hash, see Computing Checksums (https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). // // You can optionally specify an archive description of up to 1,024 printable // ASCII characters. You can get the archive description when you either retrieve @@ -3522,11 +3532,11 @@ func (c *Glacier) UploadArchiveRequest(input *UploadArchiveInput) (req *request. // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Uploading an Archive -// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html) -// and Upload Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html) +// in Amazon Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html) +// and Upload Archive (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3548,8 +3558,8 @@ func (c *Glacier) UploadArchiveRequest(input *UploadArchiveInput) (req *request. // Returned if a required header or parameter is missing from the request. // // * ErrCodeRequestTimeoutException "RequestTimeoutException" -// Returned if, when uploading an archive, Amazon Glacier times out while receiving -// the upload. +// Returned if, when uploading an archive, Amazon S3 Glacier times out while +// receiving the upload. // // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // Returned if the service cannot complete the request. @@ -3626,20 +3636,18 @@ func (c *Glacier) UploadMultipartPartRequest(input *UploadMultipartPartInput) (r // // * SHA256 tree hash does not matchTo ensure that part data is not corrupted // in transmission, you compute a SHA256 tree hash of the part and include -// it in your request. Upon receiving the part data, Amazon Glacier also +// it in your request. Upon receiving the part data, Amazon S3 Glacier also // computes a SHA256 tree hash. If these hash values don't match, the operation // fails. For information about computing a SHA256 tree hash, see Computing -// Checksums (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// Checksums (https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). // // * Part size does not matchThe size of each part except the last must match // the size specified in the corresponding InitiateMultipartUpload request. // The size of the last part must be the same size as, or smaller than, the -// specified size. -// -// If you upload a part whose size is smaller than the part size you specified -// in your initiate multipart upload request and that part is not the last -// part, then the upload part request will succeed. However, the subsequent -// Complete Multipart Upload request will fail. +// specified size. If you upload a part whose size is smaller than the part +// size you specified in your initiate multipart upload request and that +// part is not the last part, then the upload part request will succeed. +// However, the subsequent Complete Multipart Upload request will fail. // // * Range does not alignThe byte range value in the request does not align // with the part size specified in the corresponding initiate request. For @@ -3648,7 +3656,6 @@ func (c *Glacier) UploadMultipartPartRequest(input *UploadMultipartPartInput) (r // valid part ranges. However, if you set a range value of 2 MB to 6 MB, // the range does not align with the part size and the upload will fail. // -// // This operation is idempotent. If you upload the same part multiple times, // the data included in the most recent request overwrites the previously uploaded // data. @@ -3657,11 +3664,11 @@ func (c *Glacier) UploadMultipartPartRequest(input *UploadMultipartPartInput) (r // AWS Identity and Access Management (IAM) users don't have any permissions // by default. You must grant them explicit permission to perform specific actions. // For more information, see Access Control Using AWS Identity and Access Management -// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// (IAM) (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // // For conceptual information and underlying REST API, see Uploading Large Archives -// in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) -// and Upload Part (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html) +// in Parts (Multipart Upload) (https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Upload Part (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html) // in the Amazon Glacier Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3683,8 +3690,8 @@ func (c *Glacier) UploadMultipartPartRequest(input *UploadMultipartPartInput) (r // Returned if a required header or parameter is missing from the request. // // * ErrCodeRequestTimeoutException "RequestTimeoutException" -// Returned if, when uploading an archive, Amazon Glacier times out while receiving -// the upload. +// Returned if, when uploading an archive, Amazon S3 Glacier times out while +// receiving the upload. // // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // Returned if the service cannot complete the request. @@ -3713,14 +3720,15 @@ func (c *Glacier) UploadMultipartPartWithContext(ctx aws.Context, input *UploadM // Provides options to abort a multipart upload identified by the upload ID. // // For information about the underlying REST API, see Abort Multipart Upload -// (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html). -// For conceptual information, see Working with Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). +// (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html). +// For conceptual information, see Working with Archives in Amazon S3 Glacier +// (https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). type AbortMultipartUploadInput struct { _ struct{} `type:"structure"` // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -3892,7 +3900,7 @@ type AddTagsToVaultInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -3973,17 +3981,18 @@ func (s AddTagsToVaultOutput) GoString() string { return s.String() } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. // -// For information about the underlying REST API, see Upload Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html). -// For conceptual information, see Working with Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). +// For information about the underlying REST API, see Upload Archive (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html). +// For conceptual information, see Working with Archives in Amazon S3 Glacier +// (https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). type ArchiveCreationOutput struct { _ struct{} `type:"structure"` // The ID of the archive. This value is also included as part of the location. ArchiveId *string `location:"header" locationName:"x-amz-archive-id" type:"string"` - // The checksum of the archive computed by Amazon Glacier. + // The checksum of the archive computed by Amazon S3 Glacier. Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` // The relative URI path of the newly added archive resource. @@ -4156,16 +4165,16 @@ func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { } // Provides options to complete a multipart upload operation. This informs Amazon -// Glacier that all the archive parts have been uploaded and Amazon Glacier -// can now assemble the archive from the uploaded parts. After assembling and -// saving the archive to the vault, Amazon Glacier returns the URI path of the +// Glacier that all the archive parts have been uploaded and Amazon S3 Glacier +// (Glacier) can now assemble the archive from the uploaded parts. After assembling +// and saving the archive to the vault, Glacier returns the URI path of the // newly created archive resource. type CompleteMultipartUploadInput struct { _ struct{} `type:"structure"` // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -4179,7 +4188,8 @@ type CompleteMultipartUploadInput struct { // The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 // tree hash of the individual parts. If the value you specify in the request // does not match the SHA256 tree hash of the final assembled archive as computed - // by Amazon Glacier, Amazon Glacier returns an error and the request fails. + // by Amazon S3 Glacier (Glacier), Glacier returns an error and the request + // fails. Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` // The upload ID of the multipart upload. @@ -4363,7 +4373,7 @@ type CreateVaultInput struct { // The AccountId value is the AWS account ID. This value must match the AWS // account ID associated with the credentials used to sign the request. You // can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you specify your account ID, do // not include any hyphens ('-') in the ID. // @@ -4420,7 +4430,7 @@ func (s *CreateVaultInput) SetVaultName(v string) *CreateVaultInput { return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type CreateVaultOutput struct { _ struct{} `type:"structure"` @@ -4508,13 +4518,13 @@ func (s *DataRetrievalRule) SetStrategy(v string) *DataRetrievalRule { return s } -// Provides options for deleting an archive from an Amazon Glacier vault. +// Provides options for deleting an archive from an Amazon S3 Glacier vault. type DeleteArchiveInput struct { _ struct{} `type:"structure"` // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -4608,7 +4618,7 @@ type DeleteVaultAccessPolicyInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -4679,13 +4689,13 @@ func (s DeleteVaultAccessPolicyOutput) GoString() string { return s.String() } -// Provides options for deleting a vault from Amazon Glacier. +// Provides options for deleting a vault from Amazon S3 Glacier. type DeleteVaultInput struct { _ struct{} `type:"structure"` // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -4749,7 +4759,7 @@ type DeleteVaultNotificationsInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -4840,7 +4850,7 @@ type DescribeJobInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -4920,7 +4930,7 @@ type DescribeVaultInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -4977,7 +4987,7 @@ func (s *DescribeVaultInput) SetVaultName(v string) *DescribeVaultInput { return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type DescribeVaultOutput struct { _ struct{} `type:"structure"` @@ -4985,8 +4995,8 @@ type DescribeVaultOutput struct { // value should be a string in the ISO 8601 date format, for example 2012-03-20T17:03:43.221Z. CreationDate *string `type:"string"` - // The Universal Coordinated Time (UTC) date when Amazon Glacier completed the - // last vault inventory. This value should be a string in the ISO 8601 date + // The Universal Coordinated Time (UTC) date when Amazon S3 Glacier completed + // the last vault inventory. This value should be a string in the ISO 8601 date // format, for example 2012-03-20T17:03:43.221Z. LastInventoryDate *string `type:"string"` @@ -5147,7 +5157,7 @@ func (s *GetDataRetrievalPolicyInput) SetAccountId(v string) *GetDataRetrievalPo return s } -// Contains the Amazon Glacier response to the GetDataRetrievalPolicy request. +// Contains the Amazon S3 Glacier response to the GetDataRetrievalPolicy request. type GetDataRetrievalPolicyOutput struct { _ struct{} `type:"structure"` @@ -5171,13 +5181,13 @@ func (s *GetDataRetrievalPolicyOutput) SetPolicy(v *DataRetrievalPolicy) *GetDat return s } -// Provides options for downloading output of an Amazon Glacier job. +// Provides options for downloading output of an Amazon S3 Glacier job. type GetJobOutputInput struct { _ struct{} `type:"structure"` // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -5215,8 +5225,8 @@ type GetJobOutputInput struct { // checksum values. Compute the tree hash of these values to find the checksum // of the entire output. Using the DescribeJob API, obtain job information of // the job that provided you the output. The response includes the checksum - // of the entire archive stored in Amazon Glacier. You compare this value with - // the checksum you computed to ensure you have downloaded the entire archive + // of the entire archive stored in Amazon S3 Glacier. You compare this value + // with the checksum you computed to ensure you have downloaded the entire archive // content with no errors. Range *string `location:"header" locationName:"Range" type:"string"` @@ -5288,7 +5298,7 @@ func (s *GetJobOutputInput) SetVaultName(v string) *GetJobOutputInput { return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type GetJobOutputOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -5319,9 +5329,10 @@ type GetJobOutputOutput struct { // as a response header. Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` - // The range of bytes returned by Amazon Glacier. If only partial output is - // downloaded, the response provides the range of bytes Amazon Glacier returned. - // For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB. + // The range of bytes returned by Amazon S3 Glacier. If only partial output + // is downloaded, the response provides the range of bytes Amazon S3 Glacier + // returned. For example, bytes 0-1048575/8388608 returns the first 1 MB from + // 8 MB. ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` // The Content-Type depends on whether the job output is an archive or a vault @@ -5394,7 +5405,7 @@ type GetVaultAccessPolicyInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -5481,7 +5492,7 @@ type GetVaultLockInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -5538,7 +5549,7 @@ func (s *GetVaultLockInput) SetVaultName(v string) *GetVaultLockInput { return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type GetVaultLockOutput struct { _ struct{} `type:"structure"` @@ -5598,7 +5609,7 @@ type GetVaultNotificationsInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -5655,7 +5666,7 @@ func (s *GetVaultNotificationsInput) SetVaultName(v string) *GetVaultNotificatio return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type GetVaultNotificationsOutput struct { _ struct{} `type:"structure" payload:"VaultNotificationConfig"` @@ -5802,13 +5813,13 @@ func (s *Grantee) SetURI(v string) *Grantee { return s } -// Provides options for initiating an Amazon Glacier job. +// Provides options for initiating an Amazon S3 Glacier job. type InitiateJobInput struct { _ struct{} `type:"structure" payload:"JobParameters"` // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -5879,7 +5890,7 @@ func (s *InitiateJobInput) SetVaultName(v string) *InitiateJobInput { return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type InitiateJobOutput struct { _ struct{} `type:"structure"` @@ -5921,13 +5932,14 @@ func (s *InitiateJobOutput) SetLocation(v string) *InitiateJobOutput { return s } -// Provides options for initiating a multipart upload to an Amazon Glacier vault. +// Provides options for initiating a multipart upload to an Amazon S3 Glacier +// vault. type InitiateMultipartUploadInput struct { _ struct{} `type:"structure"` // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -6008,11 +6020,11 @@ func (s *InitiateMultipartUploadInput) SetVaultName(v string) *InitiateMultipart return s } -// The Amazon Glacier response to your request. +// The Amazon S3 Glacier response to your request. type InitiateMultipartUploadOutput struct { _ struct{} `type:"structure"` - // The relative URI path of the multipart upload ID Amazon Glacier created. + // The relative URI path of the multipart upload ID Amazon S3 Glacier created. Location *string `location:"header" locationName:"Location" type:"string"` // The ID of the multipart upload. This value is also included as part of the @@ -6115,7 +6127,7 @@ func (s *InitiateVaultLockInput) SetVaultName(v string) *InitiateVaultLockInput return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type InitiateVaultLockOutput struct { _ struct{} `type:"structure"` @@ -6185,8 +6197,7 @@ type InventoryRetrievalJobDescription struct { // An opaque string that represents where to continue pagination of the vault // inventory retrieval results. You use the marker in a new InitiateJob request // to obtain additional inventory items. If there are no more inventory items, - // this value is null. For more information, see Range Inventory Retrieval - // (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html#api-initiate-job-post-vault-inventory-list-filtering). + // this value is null. For more information, see Range Inventory Retrieval (https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html#api-initiate-job-post-vault-inventory-list-filtering). Marker *string `type:"string"` // The start of the date range in Universal Coordinated Time (UTC) for vault @@ -6294,7 +6305,7 @@ func (s *InventoryRetrievalJobInput) SetStartDate(v string) *InventoryRetrievalJ return s } -// Contains the description of an Amazon Glacier job. +// Contains the description of an Amazon S3 Glacier job. type JobDescription struct { _ struct{} `type:"structure"` @@ -6338,7 +6349,7 @@ type JobDescription struct { // The job description provided when initiating the job. JobDescription *string `type:"string"` - // An opaque string that identifies an Amazon Glacier job. + // An opaque string that identifies an Amazon S3 Glacier job. JobId *string `type:"string"` // Contains the job output location. @@ -6573,9 +6584,10 @@ type JobParameters struct { // request. RetrievalByteRange *string `type:"string"` - // The Amazon SNS topic ARN to which Amazon Glacier sends a notification when - // the job is completed and the output is ready for you to download. The specified - // topic publishes the notification to its subscribers. The SNS topic must exist. + // The Amazon SNS topic ARN to which Amazon S3 Glacier sends a notification + // when the job is completed and the output is ready for you to download. The + // specified topic publishes the notification to its subscribers. The SNS topic + // must exist. SNSTopic *string `type:"string"` // Contains the parameters that define a job. @@ -6676,13 +6688,13 @@ func (s *JobParameters) SetType(v string) *JobParameters { return s } -// Provides options for retrieving a job list for an Amazon Glacier vault. +// Provides options for retrieving a job list for an Amazon S3 Glacier vault. type ListJobsInput struct { _ struct{} `type:"structure"` // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -6781,7 +6793,7 @@ func (s *ListJobsInput) SetVaultName(v string) *ListJobsInput { return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type ListJobsOutput struct { _ struct{} `type:"structure"` @@ -6824,7 +6836,7 @@ type ListMultipartUploadsInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -6904,7 +6916,7 @@ func (s *ListMultipartUploadsInput) SetVaultName(v string) *ListMultipartUploads return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` @@ -6946,7 +6958,7 @@ type ListPartsInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -7044,7 +7056,7 @@ func (s *ListPartsInput) SetVaultName(v string) *ListPartsInput { return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type ListPartsOutput struct { _ struct{} `type:"structure"` @@ -7133,9 +7145,9 @@ type ListProvisionedCapacityInput struct { // The AWS account ID of the account that owns the vault. You can either specify // an AWS account ID or optionally a single '-' (hyphen), in which case Amazon - // Glacier uses the AWS account ID associated with the credentials used to sign - // the request. If you use an account ID, don't include any hyphens ('-') in - // the ID. + // S3 Glacier uses the AWS account ID associated with the credentials used to + // sign the request. If you use an account ID, don't include any hyphens ('-') + // in the ID. // // AccountId is a required field AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` @@ -7202,7 +7214,7 @@ type ListTagsForVaultInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -7259,7 +7271,7 @@ func (s *ListTagsForVaultInput) SetVaultName(v string) *ListTagsForVaultInput { return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type ListTagsForVaultOutput struct { _ struct{} `type:"structure"` @@ -7352,7 +7364,7 @@ func (s *ListVaultsInput) SetMarker(v string) *ListVaultsInput { return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type ListVaultsOutput struct { _ struct{} `type:"structure"` @@ -7457,8 +7469,8 @@ type PartListElement struct { // The byte range of a part, inclusive of the upper value of the range. RangeInBytes *string `type:"string"` - // The SHA256 tree hash value that Amazon Glacier calculated for the part. This - // field is never null. + // The SHA256 tree hash value that Amazon S3 Glacier calculated for the part. + // This field is never null. SHA256TreeHash *string `type:"string"` } @@ -7533,9 +7545,9 @@ type PurchaseProvisionedCapacityInput struct { // The AWS account ID of the account that owns the vault. You can either specify // an AWS account ID or optionally a single '-' (hyphen), in which case Amazon - // Glacier uses the AWS account ID associated with the credentials used to sign - // the request. If you use an account ID, don't include any hyphens ('-') in - // the ID. + // S3 Glacier uses the AWS account ID associated with the credentials used to + // sign the request. If you use an account ID, don't include any hyphens ('-') + // in the ID. // // AccountId is a required field AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` @@ -7602,7 +7614,7 @@ type RemoveTagsFromVaultInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -7918,7 +7930,7 @@ type SetVaultAccessPolicyInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -8005,7 +8017,7 @@ type SetVaultNotificationsInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -8091,7 +8103,7 @@ type UploadArchiveInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -8244,7 +8256,7 @@ type UploadMultipartPartInput struct { // The AccountId value is the AWS account ID of the account that owns the vault. // You can either specify an AWS account ID or optionally a single '-' (hyphen), - // in which case Amazon Glacier uses the AWS account ID associated with the + // in which case Amazon S3 Glacier uses the AWS account ID associated with the // credentials used to sign the request. If you use an account ID, do not include // any hyphens ('-') in the ID. // @@ -8258,7 +8270,7 @@ type UploadMultipartPartInput struct { Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` // Identifies the range of bytes in the assembled archive that will be uploaded - // in this part. Amazon Glacier uses this information to assemble the archive + // in this part. Amazon S3 Glacier uses this information to assemble the archive // in the proper sequence. The format of this header follows RFC 2616. An example // header is Content-Range:bytes 0-4194303/*. Range *string `location:"header" locationName:"Content-Range" type:"string"` @@ -8348,11 +8360,11 @@ func (s *UploadMultipartPartInput) SetVaultName(v string) *UploadMultipartPartIn return s } -// Contains the Amazon Glacier response to your request. +// Contains the Amazon S3 Glacier response to your request. type UploadMultipartPartOutput struct { _ struct{} `type:"structure"` - // The SHA256 tree hash that Amazon Glacier computed for the uploaded part. + // The SHA256 tree hash that Amazon S3 Glacier computed for the uploaded part. Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` } @@ -8424,7 +8436,7 @@ func (s *VaultLockPolicy) SetPolicy(v string) *VaultLockPolicy { type VaultNotificationConfig struct { _ struct{} `type:"structure"` - // A list of one or more events for which Amazon Glacier will send a notification + // A list of one or more events for which Amazon S3 Glacier will send a notification // to the specified Amazon SNS topic. Events []*string `type:"list"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/doc.go index 80c74d84891..98090522466 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/doc.go @@ -3,33 +3,33 @@ // Package glacier provides the client and types for making API // requests to Amazon Glacier. // -// Amazon Glacier is a storage solution for "cold data." -// -// Amazon Glacier is an extremely low-cost storage service that provides secure, -// durable, and easy-to-use storage for data backup and archival. With Amazon -// Glacier, customers can store their data cost effectively for months, years, -// or decades. Amazon Glacier also enables customers to offload the administrative -// burdens of operating and scaling storage to AWS, so they don't have to worry -// about capacity planning, hardware provisioning, data replication, hardware -// failure and recovery, or time-consuming hardware migrations. -// -// Amazon Glacier is a great storage choice when low storage cost is paramount -// and your data is rarely retrieved. If your application requires fast or frequent +// Amazon S3 Glacier (Glacier) is a storage solution for "cold data." +// +// Glacier is an extremely low-cost storage service that provides secure, durable, +// and easy-to-use storage for data backup and archival. With Glacier, customers +// can store their data cost effectively for months, years, or decades. Glacier +// also enables customers to offload the administrative burdens of operating +// and scaling storage to AWS, so they don't have to worry about capacity planning, +// hardware provisioning, data replication, hardware failure and recovery, or +// time-consuming hardware migrations. +// +// Glacier is a great storage choice when low storage cost is paramount and +// your data is rarely retrieved. If your application requires fast or frequent // access to your data, consider using Amazon S3. For more information, see // Amazon Simple Storage Service (Amazon S3) (http://aws.amazon.com/s3/). // // You can store any kind of data in any format. There is no maximum limit on -// the total amount of data you can store in Amazon Glacier. +// the total amount of data you can store in Glacier. // -// If you are a first-time user of Amazon Glacier, we recommend that you begin -// by reading the following sections in the Amazon Glacier Developer Guide: +// If you are a first-time user of Glacier, we recommend that you begin by reading +// the following sections in the Amazon S3 Glacier Developer Guide: // -// * What is Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html) +// * What is Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html) // - This section of the Developer Guide describes the underlying data model, // the operations it supports, and the AWS SDKs that you can use to interact // with the service. // -// * Getting Started with Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-getting-started.html) +// * Getting Started with Amazon S3 Glacier (https://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-getting-started.html) // - The Getting Started section walks you through the process of creating // a vault, uploading archives, creating jobs to download archives, retrieving // the job output, and deleting archives. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/errors.go index c47e3bb305c..b3e0922d747 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/errors.go @@ -40,8 +40,8 @@ const ( // ErrCodeRequestTimeoutException for service response error code // "RequestTimeoutException". // - // Returned if, when uploading an archive, Amazon Glacier times out while receiving - // the upload. + // Returned if, when uploading an archive, Amazon S3 Glacier times out while + // receiving the upload. ErrCodeRequestTimeoutException = "RequestTimeoutException" // ErrCodeResourceNotFoundException for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go index 85e6e367b20..b8e0cffc6cb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go @@ -46,11 +46,11 @@ const ( // svc := glacier.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glacier { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Glacier { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Glacier { svc := &Glacier{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-06-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/api.go index ac7d30d2d6e..d86736d9665 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/api.go @@ -62,6 +62,8 @@ func (c *GlobalAccelerator) CreateAcceleratorRequest(input *CreateAcceleratorInp // each of which includes endpoints, such as Network Load Balancers. To see // an AWS CLI example of creating an accelerator, scroll down to Example. // +// You must specify the US-West-2 (Oregon) Region to create or update accelerators. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -177,6 +179,9 @@ func (c *GlobalAccelerator) CreateEndpointGroupRequest(input *CreateEndpointGrou // Processing your request would cause you to exceed an AWS Global Accelerator // limit. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You don't have access permission. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/globalaccelerator-2018-08-08/CreateEndpointGroup func (c *GlobalAccelerator) CreateEndpointGroup(input *CreateEndpointGroupInput) (*CreateEndpointGroupOutput, error) { req, out := c.CreateEndpointGroupRequest(input) @@ -445,6 +450,9 @@ func (c *GlobalAccelerator) DeleteEndpointGroupRequest(input *DeleteEndpointGrou // API operation DeleteEndpointGroup for usage and error information. // // Returned Error Codes: +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// An argument that you specified is invalid. +// // * ErrCodeEndpointGroupNotFoundException "EndpointGroupNotFoundException" // The endpoint group that you specified doesn't exist. // @@ -528,6 +536,9 @@ func (c *GlobalAccelerator) DeleteListenerRequest(input *DeleteListenerInput) (r // API operation DeleteListener for usage and error information. // // Returned Error Codes: +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// An argument that you specified is invalid. +// // * ErrCodeListenerNotFoundException "ListenerNotFoundException" // The listener that you specified doesn't exist. // @@ -786,6 +797,9 @@ func (c *GlobalAccelerator) DescribeEndpointGroupRequest(input *DescribeEndpoint // API operation DescribeEndpointGroup for usage and error information. // // Returned Error Codes: +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// An argument that you specified is invalid. +// // * ErrCodeEndpointGroupNotFoundException "EndpointGroupNotFoundException" // The endpoint group that you specified doesn't exist. // @@ -953,6 +967,9 @@ func (c *GlobalAccelerator) ListAcceleratorsRequest(input *ListAcceleratorsInput // API operation ListAccelerators for usage and error information. // // Returned Error Codes: +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// An argument that you specified is invalid. +// // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // There isn't another item to return. // @@ -1123,6 +1140,9 @@ func (c *GlobalAccelerator) ListListenersRequest(input *ListListenersInput) (req // API operation ListListeners for usage and error information. // // Returned Error Codes: +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// An argument that you specified is invalid. +// // * ErrCodeAcceleratorNotFoundException "AcceleratorNotFoundException" // The accelerator that you specified doesn't exist. // @@ -1198,7 +1218,10 @@ func (c *GlobalAccelerator) UpdateAcceleratorRequest(input *UpdateAcceleratorInp // UpdateAccelerator API operation for AWS Global Accelerator. // -// Update an accelerator. +// Update an accelerator. To see an AWS CLI example of updating an accelerator, +// scroll down to Example. +// +// You must specify the US-West-2 (Oregon) Region to create or update accelerators. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1303,6 +1326,9 @@ func (c *GlobalAccelerator) UpdateAcceleratorAttributesRequest(input *UpdateAcce // * ErrCodeInvalidArgumentException "InvalidArgumentException" // An argument that you specified is invalid. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You don't have access permission. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/globalaccelerator-2018-08-08/UpdateAcceleratorAttributes func (c *GlobalAccelerator) UpdateAcceleratorAttributes(input *UpdateAcceleratorAttributesInput) (*UpdateAcceleratorAttributesOutput, error) { req, out := c.UpdateAcceleratorAttributesRequest(input) @@ -1393,6 +1419,9 @@ func (c *GlobalAccelerator) UpdateEndpointGroupRequest(input *UpdateEndpointGrou // Processing your request would cause you to exceed an AWS Global Accelerator // limit. // +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You don't have access permission. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/globalaccelerator-2018-08-08/UpdateEndpointGroup func (c *GlobalAccelerator) UpdateEndpointGroup(input *UpdateEndpointGroupInput) (*UpdateEndpointGroupOutput, error) { req, out := c.UpdateEndpointGroupRequest(input) @@ -1520,7 +1549,19 @@ type Accelerator struct { // The date and time that the accelerator was created. CreatedTime *time.Time `type:"timestamp"` - // Indicates whether theaccelerator is enabled. The value is true or false. + // The Domain Name System (DNS) name that Global Accelerator creates that points + // to your accelerator's static IP addresses. + // + // The naming convention for the DNS name is: a lower case letter a, followed + // by a 16-bit random hex string, followed by .awsglobalaccelerator.com. For + // example: a1234567890abcdef.awsglobalaccelerator.com. + // + // For more information about the default DNS name, see Support for DNS Addressing + // in Global Accelerator (https://docs.aws.amazon.com/global-accelerator/latest/dg/about-accelerators.html#about-accelerators.dns-addressing) + // in the AWS Global Accelerator Developer Guide. + DnsName *string `type:"string"` + + // Indicates whether the accelerator is enabled. The value is true or false. // The default value is true. // // If the value is set to true, the accelerator cannot be deleted. If set to @@ -1530,15 +1571,14 @@ type Accelerator struct { // The value for the address type must be IPv4. IpAddressType *string `type:"string" enum:"IpAddressType"` - // IP address set associated with the accelerator. + // The static IP addresses that Global Accelerator associates with the accelerator. IpSets []*IpSet `type:"list"` // The date and time that the accelerator was last modified. LastModifiedTime *time.Time `type:"timestamp"` - // The name of the accelerator. The name can have a maximum of 32 characters, - // must contain only alphanumeric characters or hyphens (-), and must not begin - // or end with a hyphen. + // The name of the accelerator. The name must contain only alphanumeric characters + // or hyphens (-), and must not begin or end with a hyphen. Name *string `type:"string"` // Describes the deployment status of the accelerator. @@ -1567,6 +1607,12 @@ func (s *Accelerator) SetCreatedTime(v time.Time) *Accelerator { return s } +// SetDnsName sets the DnsName field's value. +func (s *Accelerator) SetDnsName(v string) *Accelerator { + s.DnsName = &v + return s +} + // SetEnabled sets the Enabled field's value. func (s *Accelerator) SetEnabled(v bool) *Accelerator { s.Enabled = &v @@ -1620,8 +1666,8 @@ type AcceleratorAttributes struct { FlowLogsS3Bucket *string `type:"string"` // The prefix for the location in the Amazon S3 bucket for the flow logs. Attribute - // is required if FlowLogsEnabled is true. If you don’t specify a prefix, the - // flow logs are stored in the root of the bucket. + // is required if FlowLogsEnabled is true. If you don’t specify a prefix, + // the flow logs are stored in the root of the bucket. FlowLogsS3Prefix *string `type:"string"` } @@ -1948,10 +1994,11 @@ type CreateListenerInput struct { // AWS Global Accelerator uses a consistent-flow hashing algorithm to choose // the optimal endpoint for a connection. If client affinity is NONE, Global // Accelerator uses the "five-tuple" (5-tuple) properties—source IP address, - // source port, destination IP address, destination port, and protocol—to select - // the hash value, and then chooses the best endpoint. However, with this setting, - // if someone uses different ports to connect to Global Accelerator, their connections - // might not be always routed to the same endpoint because the hash value changes. + // source port, destination IP address, destination port, and protocol—to + // select the hash value, and then chooses the best endpoint. However, with + // this setting, if someone uses different ports to connect to Global Accelerator, + // their connections might not be always routed to the same endpoint because + // the hash value changes. // // If you want a given client to always be routed to the same endpoint, set // client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, @@ -2236,8 +2283,10 @@ type DescribeAcceleratorAttributesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the accelerator with the attributes that - // you want to describe. Value is required. - AcceleratorArn *string `type:"string"` + // you want to describe. + // + // AcceleratorArn is a required field + AcceleratorArn *string `type:"string" required:"true"` } // String returns the string representation @@ -2250,6 +2299,19 @@ func (s DescribeAcceleratorAttributesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAcceleratorAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAcceleratorAttributesInput"} + if s.AcceleratorArn == nil { + invalidParams.Add(request.NewErrParamRequired("AcceleratorArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAcceleratorArn sets the AcceleratorArn field's value. func (s *DescribeAcceleratorAttributesInput) SetAcceleratorArn(v string) *DescribeAcceleratorAttributesInput { s.AcceleratorArn = &v @@ -2466,6 +2528,19 @@ func (s *DescribeListenerOutput) SetListener(v *Listener) *DescribeListenerOutpu type EndpointConfiguration struct { _ struct{} `type:"structure"` + // Indicates whether client IP address preservation is enabled for an Application + // Load Balancer endpoint. The value is true or false. The default value is + // true for new accelerators. + // + // If the value is set to true, the client's IP address is preserved in the + // X-Forwarded-For request header as traffic travels to applications on the + // Application Load Balancer endpoint fronted by the accelerator. + // + // For more information, see Viewing Client IP Addresses in AWS Global Accelerator + // (https://docs.aws.amazon.com/global-accelerator/latest/dg/introduction-how-it-works-client-ip.html) + // in the AWS Global Accelerator Developer Guide. + ClientIPPreservationEnabled *bool `type:"boolean"` + // An ID for the endpoint. If the endpoint is a Network Load Balancer or Application // Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If // the endpoint is an Elastic IP address, this is the Elastic IP address allocation @@ -2493,6 +2568,12 @@ func (s EndpointConfiguration) GoString() string { return s.String() } +// SetClientIPPreservationEnabled sets the ClientIPPreservationEnabled field's value. +func (s *EndpointConfiguration) SetClientIPPreservationEnabled(v bool) *EndpointConfiguration { + s.ClientIPPreservationEnabled = &v + return s +} + // SetEndpointId sets the EndpointId field's value. func (s *EndpointConfiguration) SetEndpointId(v string) *EndpointConfiguration { s.EndpointId = &v @@ -2510,10 +2591,23 @@ func (s *EndpointConfiguration) SetWeight(v int64) *EndpointConfiguration { type EndpointDescription struct { _ struct{} `type:"structure"` + // Indicates whether client IP address preservation is enabled for an Application + // Load Balancer endpoint. The value is true or false. The default value is + // true for new accelerators. + // + // If the value is set to true, the client's IP address is preserved in the + // X-Forwarded-For request header as traffic travels to applications on the + // Application Load Balancer endpoint fronted by the accelerator. + // + // For more information, see Viewing Client IP Addresses in AWS Global Accelerator + // (https://docs.aws.amazon.com/global-accelerator/latest/dg/introduction-how-it-works-client-ip.html) + // in the AWS Global Accelerator Developer Guide. + ClientIPPreservationEnabled *bool `type:"boolean"` + // An ID for the endpoint. If the endpoint is a Network Load Balancer or Application // Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If // the endpoint is an Elastic IP address, this is the Elastic IP address allocation - // ID. + // ID. An Application Load Balancer can be either internal or internet-facing. EndpointId *string `type:"string"` // The reason code associated with why the endpoint is not healthy. If the endpoint @@ -2562,6 +2656,12 @@ func (s EndpointDescription) GoString() string { return s.String() } +// SetClientIPPreservationEnabled sets the ClientIPPreservationEnabled field's value. +func (s *EndpointDescription) SetClientIPPreservationEnabled(v bool) *EndpointDescription { + s.ClientIPPreservationEnabled = &v + return s +} + // SetEndpointId sets the EndpointId field's value. func (s *EndpointDescription) SetEndpointId(v string) *EndpointDescription { s.EndpointId = &v @@ -3016,10 +3116,11 @@ type Listener struct { // AWS Global Accelerator uses a consistent-flow hashing algorithm to choose // the optimal endpoint for a connection. If client affinity is NONE, Global // Accelerator uses the "five-tuple" (5-tuple) properties—source IP address, - // source port, destination IP address, destination port, and protocol—to select - // the hash value, and then chooses the best endpoint. However, with this setting, - // if someone uses different ports to connect to Global Accelerator, their connections - // might not be always routed to the same endpoint because the hash value changes. + // source port, destination IP address, destination port, and protocol—to + // select the hash value, and then chooses the best endpoint. However, with + // this setting, if someone uses different ports to connect to Global Accelerator, + // their connections might not be always routed to the same endpoint because + // the hash value changes. // // If you want a given client to always be routed to the same endpoint, set // client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, @@ -3126,8 +3227,9 @@ type UpdateAcceleratorAttributesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the accelerator that you want to update. - // Attribute is required. - AcceleratorArn *string `type:"string"` + // + // AcceleratorArn is a required field + AcceleratorArn *string `type:"string" required:"true"` // Update whether flow logs are enabled. The default value is false. If the // value is true, FlowLogsS3Bucket and FlowLogsS3Prefix must be specified. @@ -3142,8 +3244,8 @@ type UpdateAcceleratorAttributesInput struct { FlowLogsS3Bucket *string `type:"string"` // Update the prefix for the location in the Amazon S3 bucket for the flow logs. - // Attribute is required if FlowLogsEnabled is true. If you don’t specify a - // prefix, the flow logs are stored in the root of the bucket. + // Attribute is required if FlowLogsEnabled is true. If you don’t specify + // a prefix, the flow logs are stored in the root of the bucket. FlowLogsS3Prefix *string `type:"string"` } @@ -3157,6 +3259,19 @@ func (s UpdateAcceleratorAttributesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAcceleratorAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAcceleratorAttributesInput"} + if s.AcceleratorArn == nil { + invalidParams.Add(request.NewErrParamRequired("AcceleratorArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAcceleratorArn sets the AcceleratorArn field's value. func (s *UpdateAcceleratorAttributesInput) SetAcceleratorArn(v string) *UpdateAcceleratorAttributesInput { s.AcceleratorArn = &v @@ -3457,10 +3572,11 @@ type UpdateListenerInput struct { // AWS Global Accelerator uses a consistent-flow hashing algorithm to choose // the optimal endpoint for a connection. If client affinity is NONE, Global // Accelerator uses the "five-tuple" (5-tuple) properties—source IP address, - // source port, destination IP address, destination port, and protocol—to select - // the hash value, and then chooses the best endpoint. However, with this setting, - // if someone uses different ports to connect to Global Accelerator, their connections - // might not be always routed to the same endpoint because the hash value changes. + // source port, destination IP address, destination port, and protocol—to + // select the hash value, and then chooses the best endpoint. However, with + // this setting, if someone uses different ports to connect to Global Accelerator, + // their connections might not be always routed to the same endpoint because + // the hash value changes. // // If you want a given client to always be routed to the same endpoint, set // client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/doc.go index f75f98cf14e..eda86021620 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/doc.go @@ -12,6 +12,8 @@ // to improve availability and performance for internet applications used by // a global audience. // +// You must specify the US-West-2 (Oregon) Region to create or update accelerators. +// // Global Accelerator provides you with static IP addresses that you associate // with your accelerator. These IP addresses are anycast from the AWS edge network // and distribute incoming application traffic across multiple endpoint resources @@ -28,42 +30,53 @@ // Global Accelerator includes components that work together to help you improve // performance and availability for your applications: // -// Static IP addressAWS Global Accelerator provides you with a set of static -// IP addresses which are anycast from the AWS edge network and serve as the -// single fixed entry points for your clients. If you already have Elastic Load -// Balancing or Elastic IP address resources set up for your applications, you -// can easily add those to Global Accelerator to allow the resources to be accessed -// by a Global Accelerator static IP address. -// -// AcceleratorAn accelerator directs traffic to optimal endpoints over the AWS -// global network to improve availability and performance for your internet -// applications that have a global audience. Each accelerator includes one or -// more listeners. -// -// Network zoneA network zone services the static IP addresses for your accelerator -// from a unique IP subnet. Similar to an AWS Availability Zone, a network zone -// is an isolated unit with its own set of physical infrastructure. When you -// configure an accelerator, Global Accelerator allocates two IPv4 addresses -// for it. If one IP address from a network zone becomes unavailable due to -// IP address blocking by certain client networks, or network disruptions, then -// client applications can retry on the healthy static IP address from the other -// isolated network zone. -// -// ListenerA listener processes inbound connections from clients to Global Accelerator, +// Static IP address +// +// AWS Global Accelerator provides you with a set of static IP addresses which +// are anycast from the AWS edge network and serve as the single fixed entry +// points for your clients. If you already have Elastic Load Balancing or Elastic +// IP address resources set up for your applications, you can easily add those +// to Global Accelerator to allow the resources to be accessed by a Global Accelerator +// static IP address. +// +// Accelerator +// +// An accelerator directs traffic to optimal endpoints over the AWS global network +// to improve availability and performance for your internet applications that +// have a global audience. Each accelerator includes one or more listeners. +// +// Network zone +// +// A network zone services the static IP addresses for your accelerator from +// a unique IP subnet. Similar to an AWS Availability Zone, a network zone is +// an isolated unit with its own set of physical infrastructure. When you configure +// an accelerator, Global Accelerator allocates two IPv4 addresses for it. If +// one IP address from a network zone becomes unavailable due to IP address +// blocking by certain client networks, or network disruptions, then client +// applications can retry on the healthy static IP address from the other isolated +// network zone. +// +// Listener +// +// A listener processes inbound connections from clients to Global Accelerator, // based on the protocol and port that you configure. Each listener has one // or more endpoint groups associated with it, and traffic is forwarded to endpoints // in one of the groups. You associate endpoint groups with listeners by specifying // the Regions that you want to distribute traffic to. Traffic is distributed // to optimal endpoints within the endpoint groups associated with a listener. // -// Endpoint groupEach endpoint group is associated with a specific AWS Region. -// Endpoint groups include one or more endpoints in the Region. You can increase -// or reduce the percentage of traffic that would be otherwise directed to an -// endpoint group by adjusting a setting called a traffic dial. The traffic -// dial lets you easily do performance testing or blue/green deployment testing -// for new releases across different AWS Regions, for example. +// Endpoint group +// +// Each endpoint group is associated with a specific AWS Region. Endpoint groups +// include one or more endpoints in the Region. You can increase or reduce the +// percentage of traffic that would be otherwise directed to an endpoint group +// by adjusting a setting called a traffic dial. The traffic dial lets you easily +// do performance testing or blue/green deployment testing for new releases +// across different AWS Regions, for example. +// +// Endpoint // -// EndpointAn endpoint is an Elastic IP address, Network Load Balancer, or Application +// An endpoint is an Elastic IP address, Network Load Balancer, or Application // Load Balancer. Traffic is routed to endpoints based on several factors, including // the geo-proximity to the user, the health of the endpoint, and the configuration // options that you choose, such as endpoint weights. For each endpoint, you diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/errors.go index 13aea5e363d..8c1866ba2d4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/errors.go @@ -16,6 +16,12 @@ const ( // The accelerator that you specified doesn't exist. ErrCodeAcceleratorNotFoundException = "AcceleratorNotFoundException" + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You don't have access permission. + ErrCodeAccessDeniedException = "AccessDeniedException" + // ErrCodeAssociatedEndpointGroupFoundException for service response error code // "AssociatedEndpointGroupFoundException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go index 31552ab8c29..7750c412f61 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *GlobalAccelerator { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "globalaccelerator" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GlobalAccelerator { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *GlobalAccelerator { svc := &GlobalAccelerator{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-08-08", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/api.go index e30b6047252..a35163defca 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/api.go @@ -326,14 +326,14 @@ func (c *Glue) BatchDeleteTableRequest(input *BatchDeleteTableInput) (req *reque // // Deletes multiple tables at once. // -// After completing this operation, you will no longer have access to the table -// versions and partitions that belong to the deleted table. AWS Glue deletes -// these "orphaned" resources asynchronously in a timely manner, at the discretion -// of the service. +// After completing this operation, you no longer have access to the table versions +// and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" +// resources asynchronously in a timely manner, at the discretion of the service. // -// To ensure immediate deletion of all related resources, before calling BatchDeleteTable, -// use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or -// BatchDeletePartition, to delete any resources that belong to the table. +// To ensure the immediate deletion of all related resources, before calling +// BatchDeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and +// DeletePartition or BatchDeletePartition, to delete any resources that belong +// to the table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -594,8 +594,8 @@ func (c *Glue) BatchGetDevEndpointsRequest(input *BatchGetDevEndpointsInput) (re // BatchGetDevEndpoints API operation for AWS Glue. // -// Returns a list of resource metadata for a given list of DevEndpoint names. -// After calling the ListDevEndpoints operation, you can call this operation +// Returns a list of resource metadata for a given list of development endpoint +// names. After calling the ListDevEndpoints operation, you can call this operation // to access the data to which you have been granted permissions. This operation // supports all IAM permissions, including permission conditions that uses tags. // @@ -908,6 +908,94 @@ func (c *Glue) BatchGetTriggersWithContext(ctx aws.Context, input *BatchGetTrigg return out, req.Send() } +const opBatchGetWorkflows = "BatchGetWorkflows" + +// BatchGetWorkflowsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetWorkflows operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetWorkflows for more information on using the BatchGetWorkflows +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchGetWorkflowsRequest method. +// req, resp := client.BatchGetWorkflowsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchGetWorkflows +func (c *Glue) BatchGetWorkflowsRequest(input *BatchGetWorkflowsInput) (req *request.Request, output *BatchGetWorkflowsOutput) { + op := &request.Operation{ + Name: opBatchGetWorkflows, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetWorkflowsInput{} + } + + output = &BatchGetWorkflowsOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchGetWorkflows API operation for AWS Glue. +// +// Returns a list of resource metadata for a given list of workflow names. After +// calling the ListWorkflows operation, you can call this operation to access +// the data to which you have been granted permissions. This operation supports +// all IAM permissions, including permission conditions that uses tags. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation BatchGetWorkflows for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchGetWorkflows +func (c *Glue) BatchGetWorkflows(input *BatchGetWorkflowsInput) (*BatchGetWorkflowsOutput, error) { + req, out := c.BatchGetWorkflowsRequest(input) + return out, req.Send() +} + +// BatchGetWorkflowsWithContext is the same as BatchGetWorkflows with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetWorkflows for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) BatchGetWorkflowsWithContext(ctx aws.Context, input *BatchGetWorkflowsInput, opts ...request.Option) (*BatchGetWorkflowsOutput, error) { + req, out := c.BatchGetWorkflowsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opBatchStopJobRun = "BatchStopJobRun" // BatchStopJobRunRequest generates a "aws/request.Request" representing the @@ -993,6 +1081,97 @@ func (c *Glue) BatchStopJobRunWithContext(ctx aws.Context, input *BatchStopJobRu return out, req.Send() } +const opCancelMLTaskRun = "CancelMLTaskRun" + +// CancelMLTaskRunRequest generates a "aws/request.Request" representing the +// client's request for the CancelMLTaskRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelMLTaskRun for more information on using the CancelMLTaskRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelMLTaskRunRequest method. +// req, resp := client.CancelMLTaskRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CancelMLTaskRun +func (c *Glue) CancelMLTaskRunRequest(input *CancelMLTaskRunInput) (req *request.Request, output *CancelMLTaskRunOutput) { + op := &request.Operation{ + Name: opCancelMLTaskRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelMLTaskRunInput{} + } + + output = &CancelMLTaskRunOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelMLTaskRun API operation for AWS Glue. +// +// Cancels (stops) a task run. Machine learning task runs are asynchronous tasks +// that AWS Glue runs on your behalf as part of various machine learning workflows. +// You can cancel a machine learning task run at any time by calling CancelMLTaskRun +// with a task run's parent transform's TransformID and the task run's TaskRunId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation CancelMLTaskRun for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CancelMLTaskRun +func (c *Glue) CancelMLTaskRun(input *CancelMLTaskRunInput) (*CancelMLTaskRunOutput, error) { + req, out := c.CancelMLTaskRunRequest(input) + return out, req.Send() +} + +// CancelMLTaskRunWithContext is the same as CancelMLTaskRun with the addition of +// the ability to pass a context and additional request options. +// +// See CancelMLTaskRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) CancelMLTaskRunWithContext(ctx aws.Context, input *CancelMLTaskRunInput, opts ...request.Option) (*CancelMLTaskRunOutput, error) { + req, out := c.CancelMLTaskRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateClassifier = "CreateClassifier" // CreateClassifierRequest generates a "aws/request.Request" representing the @@ -1038,7 +1217,7 @@ func (c *Glue) CreateClassifierRequest(input *CreateClassifierInput) (req *reque // CreateClassifier API operation for AWS Glue. // -// Creates a classifier in the user's account. This may be a GrokClassifier, +// Creates a classifier in the user's account. This can be a GrokClassifier, // an XMLClassifier, a JsonClassifier, or a CsvClassifier, depending on which // field of the request is present. // @@ -1403,7 +1582,7 @@ func (c *Glue) CreateDevEndpointRequest(input *CreateDevEndpointInput) (req *req // CreateDevEndpoint API operation for AWS Glue. // -// Creates a new DevEndpoint. +// Creates a new development endpoint. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1556,6 +1735,115 @@ func (c *Glue) CreateJobWithContext(ctx aws.Context, input *CreateJobInput, opts return out, req.Send() } +const opCreateMLTransform = "CreateMLTransform" + +// CreateMLTransformRequest generates a "aws/request.Request" representing the +// client's request for the CreateMLTransform operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMLTransform for more information on using the CreateMLTransform +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMLTransformRequest method. +// req, resp := client.CreateMLTransformRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateMLTransform +func (c *Glue) CreateMLTransformRequest(input *CreateMLTransformInput) (req *request.Request, output *CreateMLTransformOutput) { + op := &request.Operation{ + Name: opCreateMLTransform, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateMLTransformInput{} + } + + output = &CreateMLTransformOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMLTransform API operation for AWS Glue. +// +// Creates an AWS Glue machine learning transform. This operation creates the +// transform and all the necessary parameters to train it. +// +// Call this operation as the first step in the process of using a machine learning +// transform (such as the FindMatches transform) for deduplicating data. You +// can provide an optional Description, in addition to the parameters that you +// want to use for your algorithm. +// +// You must also specify certain parameters for the tasks that AWS Glue runs +// on your behalf as part of learning from your data and creating a high-quality +// machine learning transform. These parameters include Role, and optionally, +// AllocatedCapacity, Timeout, and MaxRetries. For more information, see Jobs +// (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-jobs-job.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation CreateMLTransform for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAlreadyExistsException "AlreadyExistsException" +// A resource to be created or added already exists. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// Access to a resource was denied. +// +// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" +// A resource numerical limit was exceeded. +// +// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" +// The same unique identifier was associated with two different records. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateMLTransform +func (c *Glue) CreateMLTransform(input *CreateMLTransformInput) (*CreateMLTransformOutput, error) { + req, out := c.CreateMLTransformRequest(input) + return out, req.Send() +} + +// CreateMLTransformWithContext is the same as CreateMLTransform with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMLTransform for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) CreateMLTransformWithContext(ctx aws.Context, input *CreateMLTransformInput, opts ...request.Option) (*CreateMLTransformOutput, error) { + req, out := c.CreateMLTransformRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreatePartition = "CreatePartition" // CreatePartitionRequest generates a "aws/request.Request" representing the @@ -1783,7 +2071,11 @@ func (c *Glue) CreateSecurityConfigurationRequest(input *CreateSecurityConfigura // CreateSecurityConfiguration API operation for AWS Glue. // -// Creates a new security configuration. +// Creates a new security configuration. A security configuration is a set of +// security properties that can be used by AWS Glue. You can use a security +// configuration to encrypt data at rest. For information about using security +// configurations in AWS Glue, see Encrypting Data Written by Crawlers, Jobs, +// and Development Endpoints (https://docs.aws.amazon.com/glue/latest/dg/encryption-security-configuration.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1985,6 +2277,9 @@ func (c *Glue) CreateTriggerRequest(input *CreateTriggerInput) (req *request.Req // * ErrCodeAlreadyExistsException "AlreadyExistsException" // A resource to be created or added already exists. // +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // @@ -2123,92 +2418,186 @@ func (c *Glue) CreateUserDefinedFunctionWithContext(ctx aws.Context, input *Crea return out, req.Send() } -const opDeleteClassifier = "DeleteClassifier" +const opCreateWorkflow = "CreateWorkflow" -// DeleteClassifierRequest generates a "aws/request.Request" representing the -// client's request for the DeleteClassifier operation. The "output" return +// CreateWorkflowRequest generates a "aws/request.Request" representing the +// client's request for the CreateWorkflow operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteClassifier for more information on using the DeleteClassifier +// See CreateWorkflow for more information on using the CreateWorkflow // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteClassifierRequest method. -// req, resp := client.DeleteClassifierRequest(params) +// // Example sending a request using the CreateWorkflowRequest method. +// req, resp := client.CreateWorkflowRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteClassifier -func (c *Glue) DeleteClassifierRequest(input *DeleteClassifierInput) (req *request.Request, output *DeleteClassifierOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateWorkflow +func (c *Glue) CreateWorkflowRequest(input *CreateWorkflowInput) (req *request.Request, output *CreateWorkflowOutput) { op := &request.Operation{ - Name: opDeleteClassifier, + Name: opCreateWorkflow, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteClassifierInput{} + input = &CreateWorkflowInput{} } - output = &DeleteClassifierOutput{} + output = &CreateWorkflowOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteClassifier API operation for AWS Glue. +// CreateWorkflow API operation for AWS Glue. // -// Removes a classifier from the Data Catalog. +// Creates a new workflow. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation DeleteClassifier for usage and error information. +// API operation CreateWorkflow for usage and error information. // // Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist +// * ErrCodeAlreadyExistsException "AlreadyExistsException" +// A resource to be created or added already exists. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteClassifier -func (c *Glue) DeleteClassifier(input *DeleteClassifierInput) (*DeleteClassifierOutput, error) { - req, out := c.DeleteClassifierRequest(input) +// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" +// A resource numerical limit was exceeded. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateWorkflow +func (c *Glue) CreateWorkflow(input *CreateWorkflowInput) (*CreateWorkflowOutput, error) { + req, out := c.CreateWorkflowRequest(input) return out, req.Send() } -// DeleteClassifierWithContext is the same as DeleteClassifier with the addition of +// CreateWorkflowWithContext is the same as CreateWorkflow with the addition of // the ability to pass a context and additional request options. // -// See DeleteClassifier for details on how to use this API operation. +// See CreateWorkflow for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) DeleteClassifierWithContext(ctx aws.Context, input *DeleteClassifierInput, opts ...request.Option) (*DeleteClassifierOutput, error) { - req, out := c.DeleteClassifierRequest(input) +func (c *Glue) CreateWorkflowWithContext(ctx aws.Context, input *CreateWorkflowInput, opts ...request.Option) (*CreateWorkflowOutput, error) { + req, out := c.CreateWorkflowRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteConnection = "DeleteConnection" +const opDeleteClassifier = "DeleteClassifier" -// DeleteConnectionRequest generates a "aws/request.Request" representing the +// DeleteClassifierRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClassifier operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteClassifier for more information on using the DeleteClassifier +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteClassifierRequest method. +// req, resp := client.DeleteClassifierRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteClassifier +func (c *Glue) DeleteClassifierRequest(input *DeleteClassifierInput) (req *request.Request, output *DeleteClassifierOutput) { + op := &request.Operation{ + Name: opDeleteClassifier, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClassifierInput{} + } + + output = &DeleteClassifierOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteClassifier API operation for AWS Glue. +// +// Removes a classifier from the Data Catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation DeleteClassifier for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteClassifier +func (c *Glue) DeleteClassifier(input *DeleteClassifierInput) (*DeleteClassifierOutput, error) { + req, out := c.DeleteClassifierRequest(input) + return out, req.Send() +} + +// DeleteClassifierWithContext is the same as DeleteClassifier with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteClassifier for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) DeleteClassifierWithContext(ctx aws.Context, input *DeleteClassifierInput, opts ...request.Option) (*DeleteClassifierOutput, error) { + req, out := c.DeleteClassifierRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteConnection = "DeleteConnection" + +// DeleteConnectionRequest generates a "aws/request.Request" representing the // client's request for the DeleteConnection operation. The "output" return // value will be populated with the request's response once the request completes // successfully. @@ -2334,8 +2723,8 @@ func (c *Glue) DeleteCrawlerRequest(input *DeleteCrawlerInput) (req *request.Req // DeleteCrawler API operation for AWS Glue. // -// Removes a specified crawler from the Data Catalog, unless the crawler state -// is RUNNING. +// Removes a specified crawler from the AWS Glue Data Catalog, unless the crawler +// state is RUNNING. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2424,18 +2813,18 @@ func (c *Glue) DeleteDatabaseRequest(input *DeleteDatabaseInput) (req *request.R // DeleteDatabase API operation for AWS Glue. // -// Removes a specified Database from a Data Catalog. +// Removes a specified database from a Data Catalog. // -// After completing this operation, you will no longer have access to the tables +// After completing this operation, you no longer have access to the tables // (and all table versions and partitions that might belong to the tables) and // the user-defined functions in the deleted database. AWS Glue deletes these // "orphaned" resources asynchronously in a timely manner, at the discretion // of the service. // -// To ensure immediate deletion of all related resources, before calling DeleteDatabase, -// use DeleteTableVersion or BatchDeleteTableVersion, DeletePartition or BatchDeletePartition, -// DeleteUserDefinedFunction, and DeleteTable or BatchDeleteTable, to delete -// any resources that belong to the database. +// To ensure the immediate deletion of all related resources, before calling +// DeleteDatabase, use DeleteTableVersion or BatchDeleteTableVersion, DeletePartition +// or BatchDeletePartition, DeleteUserDefinedFunction, and DeleteTable or BatchDeleteTable, +// to delete any resources that belong to the database. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2524,7 +2913,7 @@ func (c *Glue) DeleteDevEndpointRequest(input *DeleteDevEndpointInput) (req *req // DeleteDevEndpoint API operation for AWS Glue. // -// Deletes a specified DevEndpoint. +// Deletes a specified development endpoint. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2654,6 +3043,100 @@ func (c *Glue) DeleteJobWithContext(ctx aws.Context, input *DeleteJobInput, opts return out, req.Send() } +const opDeleteMLTransform = "DeleteMLTransform" + +// DeleteMLTransformRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMLTransform operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteMLTransform for more information on using the DeleteMLTransform +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteMLTransformRequest method. +// req, resp := client.DeleteMLTransformRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteMLTransform +func (c *Glue) DeleteMLTransformRequest(input *DeleteMLTransformInput) (req *request.Request, output *DeleteMLTransformOutput) { + op := &request.Operation{ + Name: opDeleteMLTransform, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMLTransformInput{} + } + + output = &DeleteMLTransformOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteMLTransform API operation for AWS Glue. +// +// Deletes an AWS Glue machine learning transform. Machine learning transforms +// are a special type of transform that use machine learning to learn the details +// of the transformation to be performed by learning from examples provided +// by humans. These transformations are then saved by AWS Glue. If you no longer +// need a transform, you can delete it by calling DeleteMLTransforms. However, +// any AWS Glue jobs that still reference the deleted transform will no longer +// succeed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation DeleteMLTransform for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteMLTransform +func (c *Glue) DeleteMLTransform(input *DeleteMLTransformInput) (*DeleteMLTransformOutput, error) { + req, out := c.DeleteMLTransformRequest(input) + return out, req.Send() +} + +// DeleteMLTransformWithContext is the same as DeleteMLTransform with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteMLTransform for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) DeleteMLTransformWithContext(ctx aws.Context, input *DeleteMLTransformInput, opts ...request.Option) (*DeleteMLTransformOutput, error) { + req, out := c.DeleteMLTransformRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeletePartition = "DeletePartition" // DeletePartitionRequest generates a "aws/request.Request" representing the @@ -2971,14 +3454,13 @@ func (c *Glue) DeleteTableRequest(input *DeleteTableInput) (req *request.Request // // Removes a table definition from the Data Catalog. // -// After completing this operation, you will no longer have access to the table -// versions and partitions that belong to the deleted table. AWS Glue deletes -// these "orphaned" resources asynchronously in a timely manner, at the discretion -// of the service. +// After completing this operation, you no longer have access to the table versions +// and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" +// resources asynchronously in a timely manner, at the discretion of the service. // -// To ensure immediate deletion of all related resources, before calling DeleteTable, -// use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or -// BatchDeletePartition, to delete any resources that belong to the table. +// To ensure the immediate deletion of all related resources, before calling +// DeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition +// or BatchDeletePartition, to delete any resources that belong to the table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3289,6 +3771,94 @@ func (c *Glue) DeleteUserDefinedFunctionWithContext(ctx aws.Context, input *Dele return out, req.Send() } +const opDeleteWorkflow = "DeleteWorkflow" + +// DeleteWorkflowRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWorkflow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteWorkflow for more information on using the DeleteWorkflow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteWorkflowRequest method. +// req, resp := client.DeleteWorkflowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteWorkflow +func (c *Glue) DeleteWorkflowRequest(input *DeleteWorkflowInput) (req *request.Request, output *DeleteWorkflowOutput) { + op := &request.Operation{ + Name: opDeleteWorkflow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteWorkflowInput{} + } + + output = &DeleteWorkflowOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteWorkflow API operation for AWS Glue. +// +// Deletes a workflow. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation DeleteWorkflow for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteWorkflow +func (c *Glue) DeleteWorkflow(input *DeleteWorkflowInput) (*DeleteWorkflowOutput, error) { + req, out := c.DeleteWorkflowRequest(input) + return out, req.Send() +} + +// DeleteWorkflowWithContext is the same as DeleteWorkflow with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteWorkflow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) DeleteWorkflowWithContext(ctx aws.Context, input *DeleteWorkflowInput, opts ...request.Option) (*DeleteWorkflowOutput, error) { + req, out := c.DeleteWorkflowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetCatalogImportStatus = "GetCatalogImportStatus" // GetCatalogImportStatusRequest generates a "aws/request.Request" representing the @@ -3549,7 +4119,7 @@ func (c *Glue) GetClassifiersWithContext(ctx aws.Context, input *GetClassifiersI // // Example iterating over at most 3 pages of a GetClassifiers operation. // pageNum := 0 // err := client.GetClassifiersPages(params, -// func(page *GetClassifiersOutput, lastPage bool) bool { +// func(page *glue.GetClassifiersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3581,10 +4151,12 @@ func (c *Glue) GetClassifiersPagesWithContext(ctx aws.Context, input *GetClassif }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetClassifiersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetClassifiersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3781,7 +4353,7 @@ func (c *Glue) GetConnectionsWithContext(ctx aws.Context, input *GetConnectionsI // // Example iterating over at most 3 pages of a GetConnections operation. // pageNum := 0 // err := client.GetConnectionsPages(params, -// func(page *GetConnectionsOutput, lastPage bool) bool { +// func(page *glue.GetConnectionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3813,10 +4385,12 @@ func (c *Glue) GetConnectionsPagesWithContext(ctx aws.Context, input *GetConnect }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetConnectionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetConnectionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3998,7 +4572,7 @@ func (c *Glue) GetCrawlerMetricsWithContext(ctx aws.Context, input *GetCrawlerMe // // Example iterating over at most 3 pages of a GetCrawlerMetrics operation. // pageNum := 0 // err := client.GetCrawlerMetricsPages(params, -// func(page *GetCrawlerMetricsOutput, lastPage bool) bool { +// func(page *glue.GetCrawlerMetricsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4030,10 +4604,12 @@ func (c *Glue) GetCrawlerMetricsPagesWithContext(ctx aws.Context, input *GetCraw }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetCrawlerMetricsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetCrawlerMetricsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4133,7 +4709,7 @@ func (c *Glue) GetCrawlersWithContext(ctx aws.Context, input *GetCrawlersInput, // // Example iterating over at most 3 pages of a GetCrawlers operation. // pageNum := 0 // err := client.GetCrawlersPages(params, -// func(page *GetCrawlersOutput, lastPage bool) bool { +// func(page *glue.GetCrawlersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4165,10 +4741,12 @@ func (c *Glue) GetCrawlersPagesWithContext(ctx aws.Context, input *GetCrawlersIn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetCrawlersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetCrawlersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4398,7 +4976,7 @@ func (c *Glue) GetDatabasesRequest(input *GetDatabasesInput) (req *request.Reque // GetDatabases API operation for AWS Glue. // -// Retrieves all Databases defined in a given Data Catalog. +// Retrieves all databases defined in a given Data Catalog. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4453,7 +5031,7 @@ func (c *Glue) GetDatabasesWithContext(ctx aws.Context, input *GetDatabasesInput // // Example iterating over at most 3 pages of a GetDatabases operation. // pageNum := 0 // err := client.GetDatabasesPages(params, -// func(page *GetDatabasesOutput, lastPage bool) bool { +// func(page *glue.GetDatabasesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4485,10 +5063,12 @@ func (c *Glue) GetDatabasesPagesWithContext(ctx aws.Context, input *GetDatabases }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetDatabasesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetDatabasesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4621,7 +5201,7 @@ func (c *Glue) GetDevEndpointRequest(input *GetDevEndpointInput) (req *request.R // GetDevEndpoint API operation for AWS Glue. // -// Retrieves information about a specified DevEndpoint. +// Retrieves information about a specified development endpoint. // // When you create a development endpoint in a virtual private cloud (VPC), // AWS Glue returns only a private IP address, and the public IP address field @@ -4720,7 +5300,7 @@ func (c *Glue) GetDevEndpointsRequest(input *GetDevEndpointsInput) (req *request // GetDevEndpoints API operation for AWS Glue. // -// Retrieves all the DevEndpoints in this AWS account. +// Retrieves all the development endpoints in this AWS account. // // When you create a development endpoint in a virtual private cloud (VPC), // AWS Glue returns only a private IP address and the public IP address field @@ -4780,7 +5360,7 @@ func (c *Glue) GetDevEndpointsWithContext(ctx aws.Context, input *GetDevEndpoint // // Example iterating over at most 3 pages of a GetDevEndpoints operation. // pageNum := 0 // err := client.GetDevEndpointsPages(params, -// func(page *GetDevEndpointsOutput, lastPage bool) bool { +// func(page *glue.GetDevEndpointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4812,10 +5392,12 @@ func (c *Glue) GetDevEndpointsPagesWithContext(ctx aws.Context, input *GetDevEnd }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetDevEndpointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetDevEndpointsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4907,77 +5489,168 @@ func (c *Glue) GetJobWithContext(ctx aws.Context, input *GetJobInput, opts ...re return out, req.Send() } -const opGetJobRun = "GetJobRun" +const opGetJobBookmark = "GetJobBookmark" -// GetJobRunRequest generates a "aws/request.Request" representing the -// client's request for the GetJobRun operation. The "output" return +// GetJobBookmarkRequest generates a "aws/request.Request" representing the +// client's request for the GetJobBookmark operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetJobRun for more information on using the GetJobRun +// See GetJobBookmark for more information on using the GetJobBookmark // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetJobRunRequest method. -// req, resp := client.GetJobRunRequest(params) +// // Example sending a request using the GetJobBookmarkRequest method. +// req, resp := client.GetJobBookmarkRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRun -func (c *Glue) GetJobRunRequest(input *GetJobRunInput) (req *request.Request, output *GetJobRunOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobBookmark +func (c *Glue) GetJobBookmarkRequest(input *GetJobBookmarkInput) (req *request.Request, output *GetJobBookmarkOutput) { op := &request.Operation{ - Name: opGetJobRun, + Name: opGetJobBookmark, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetJobRunInput{} + input = &GetJobBookmarkInput{} } - output = &GetJobRunOutput{} + output = &GetJobBookmarkOutput{} req = c.newRequest(op, input, output) return } -// GetJobRun API operation for AWS Glue. +// GetJobBookmark API operation for AWS Glue. // -// Retrieves the metadata for a given job run. +// Returns information on a job bookmark entry. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetJobRun for usage and error information. +// API operation GetJobBookmark for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRun -func (c *Glue) GetJobRun(input *GetJobRunInput) (*GetJobRunOutput, error) { - req, out := c.GetJobRunRequest(input) - return out, req.Send() -} +// * ErrCodeValidationException "ValidationException" +// A value could not be validated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobBookmark +func (c *Glue) GetJobBookmark(input *GetJobBookmarkInput) (*GetJobBookmarkOutput, error) { + req, out := c.GetJobBookmarkRequest(input) + return out, req.Send() +} + +// GetJobBookmarkWithContext is the same as GetJobBookmark with the addition of +// the ability to pass a context and additional request options. +// +// See GetJobBookmark for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetJobBookmarkWithContext(ctx aws.Context, input *GetJobBookmarkInput, opts ...request.Option) (*GetJobBookmarkOutput, error) { + req, out := c.GetJobBookmarkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetJobRun = "GetJobRun" + +// GetJobRunRequest generates a "aws/request.Request" representing the +// client's request for the GetJobRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetJobRun for more information on using the GetJobRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetJobRunRequest method. +// req, resp := client.GetJobRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRun +func (c *Glue) GetJobRunRequest(input *GetJobRunInput) (req *request.Request, output *GetJobRunOutput) { + op := &request.Operation{ + Name: opGetJobRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetJobRunInput{} + } + + output = &GetJobRunOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetJobRun API operation for AWS Glue. +// +// Retrieves the metadata for a given job run. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation GetJobRun for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobRun +func (c *Glue) GetJobRun(input *GetJobRunInput) (*GetJobRunOutput, error) { + req, out := c.GetJobRunRequest(input) + return out, req.Send() +} // GetJobRunWithContext is the same as GetJobRun with the addition of // the ability to pass a context and additional request options. @@ -5100,7 +5773,7 @@ func (c *Glue) GetJobRunsWithContext(ctx aws.Context, input *GetJobRunsInput, op // // Example iterating over at most 3 pages of a GetJobRuns operation. // pageNum := 0 // err := client.GetJobRunsPages(params, -// func(page *GetJobRunsOutput, lastPage bool) bool { +// func(page *glue.GetJobRunsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5132,10 +5805,12 @@ func (c *Glue) GetJobRunsPagesWithContext(ctx aws.Context, input *GetJobRunsInpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetJobRunsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetJobRunsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5244,7 +5919,7 @@ func (c *Glue) GetJobsWithContext(ctx aws.Context, input *GetJobsInput, opts ... // // Example iterating over at most 3 pages of a GetJobs operation. // pageNum := 0 // err := client.GetJobsPages(params, -// func(page *GetJobsOutput, lastPage bool) bool { +// func(page *glue.GetJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5276,153 +5951,71 @@ func (c *Glue) GetJobsPagesWithContext(ctx aws.Context, input *GetJobsInput, fn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetJobsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetMapping = "GetMapping" - -// GetMappingRequest generates a "aws/request.Request" representing the -// client's request for the GetMapping operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetMapping for more information on using the GetMapping -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetMappingRequest method. -// req, resp := client.GetMappingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMapping -func (c *Glue) GetMappingRequest(input *GetMappingInput) (req *request.Request, output *GetMappingOutput) { - op := &request.Operation{ - Name: opGetMapping, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetMappingInput{} + for p.Next() { + if !fn(p.Page().(*GetJobsOutput), !p.HasNextPage()) { + break + } } - output = &GetMappingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetMapping API operation for AWS Glue. -// -// Creates mappings. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetMapping for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMapping -func (c *Glue) GetMapping(input *GetMappingInput) (*GetMappingOutput, error) { - req, out := c.GetMappingRequest(input) - return out, req.Send() -} - -// GetMappingWithContext is the same as GetMapping with the addition of -// the ability to pass a context and additional request options. -// -// See GetMapping for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetMappingWithContext(ctx aws.Context, input *GetMappingInput, opts ...request.Option) (*GetMappingOutput, error) { - req, out := c.GetMappingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() + return p.Err() } -const opGetPartition = "GetPartition" +const opGetMLTaskRun = "GetMLTaskRun" -// GetPartitionRequest generates a "aws/request.Request" representing the -// client's request for the GetPartition operation. The "output" return +// GetMLTaskRunRequest generates a "aws/request.Request" representing the +// client's request for the GetMLTaskRun operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetPartition for more information on using the GetPartition +// See GetMLTaskRun for more information on using the GetMLTaskRun // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetPartitionRequest method. -// req, resp := client.GetPartitionRequest(params) +// // Example sending a request using the GetMLTaskRunRequest method. +// req, resp := client.GetMLTaskRunRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartition -func (c *Glue) GetPartitionRequest(input *GetPartitionInput) (req *request.Request, output *GetPartitionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMLTaskRun +func (c *Glue) GetMLTaskRunRequest(input *GetMLTaskRunInput) (req *request.Request, output *GetMLTaskRunOutput) { op := &request.Operation{ - Name: opGetPartition, + Name: opGetMLTaskRun, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetPartitionInput{} + input = &GetMLTaskRunInput{} } - output = &GetPartitionOutput{} + output = &GetMLTaskRunOutput{} req = c.newRequest(op, input, output) return } -// GetPartition API operation for AWS Glue. +// GetMLTaskRun API operation for AWS Glue. // -// Retrieves information about a specified partition. +// Gets details for a specific task run on a machine learning transform. Machine +// learning task runs are asynchronous tasks that AWS Glue runs on your behalf +// as part of various machine learning workflows. You can check the stats of +// any task run by calling GetMLTaskRun with the TaskRunID and its parent transform's +// TransformID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetPartition for usage and error information. +// API operation GetMLTaskRun for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -5431,66 +6024,63 @@ func (c *Glue) GetPartitionRequest(input *GetPartitionInput) (req *request.Reque // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartition -func (c *Glue) GetPartition(input *GetPartitionInput) (*GetPartitionOutput, error) { - req, out := c.GetPartitionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMLTaskRun +func (c *Glue) GetMLTaskRun(input *GetMLTaskRunInput) (*GetMLTaskRunOutput, error) { + req, out := c.GetMLTaskRunRequest(input) return out, req.Send() } -// GetPartitionWithContext is the same as GetPartition with the addition of +// GetMLTaskRunWithContext is the same as GetMLTaskRun with the addition of // the ability to pass a context and additional request options. // -// See GetPartition for details on how to use this API operation. +// See GetMLTaskRun for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetPartitionWithContext(ctx aws.Context, input *GetPartitionInput, opts ...request.Option) (*GetPartitionOutput, error) { - req, out := c.GetPartitionRequest(input) +func (c *Glue) GetMLTaskRunWithContext(ctx aws.Context, input *GetMLTaskRunInput, opts ...request.Option) (*GetMLTaskRunOutput, error) { + req, out := c.GetMLTaskRunRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetPartitions = "GetPartitions" +const opGetMLTaskRuns = "GetMLTaskRuns" -// GetPartitionsRequest generates a "aws/request.Request" representing the -// client's request for the GetPartitions operation. The "output" return +// GetMLTaskRunsRequest generates a "aws/request.Request" representing the +// client's request for the GetMLTaskRuns operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetPartitions for more information on using the GetPartitions +// See GetMLTaskRuns for more information on using the GetMLTaskRuns // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetPartitionsRequest method. -// req, resp := client.GetPartitionsRequest(params) +// // Example sending a request using the GetMLTaskRunsRequest method. +// req, resp := client.GetMLTaskRunsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitions -func (c *Glue) GetPartitionsRequest(input *GetPartitionsInput) (req *request.Request, output *GetPartitionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMLTaskRuns +func (c *Glue) GetMLTaskRunsRequest(input *GetMLTaskRunsInput) (req *request.Request, output *GetMLTaskRunsOutput) { op := &request.Operation{ - Name: opGetPartitions, + Name: opGetMLTaskRuns, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -5502,24 +6092,31 @@ func (c *Glue) GetPartitionsRequest(input *GetPartitionsInput) (req *request.Req } if input == nil { - input = &GetPartitionsInput{} + input = &GetMLTaskRunsInput{} } - output = &GetPartitionsOutput{} + output = &GetMLTaskRunsOutput{} req = c.newRequest(op, input, output) return } -// GetPartitions API operation for AWS Glue. +// GetMLTaskRuns API operation for AWS Glue. // -// Retrieves information about the partitions in a table. +// Gets a list of runs for a machine learning transform. Machine learning task +// runs are asynchronous tasks that AWS Glue runs on your behalf as part of +// various machine learning workflows. You can get a sortable, filterable list +// of machine learning task runs by calling GetMLTaskRuns with their parent +// transform's TransformID and other optional parameters as documented in this +// section. +// +// This operation returns a list of historic runs and must be paginated. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetPartitions for usage and error information. +// API operation GetMLTaskRuns for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -5534,311 +6131,376 @@ func (c *Glue) GetPartitionsRequest(input *GetPartitionsInput) (req *request.Req // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitions -func (c *Glue) GetPartitions(input *GetPartitionsInput) (*GetPartitionsOutput, error) { - req, out := c.GetPartitionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMLTaskRuns +func (c *Glue) GetMLTaskRuns(input *GetMLTaskRunsInput) (*GetMLTaskRunsOutput, error) { + req, out := c.GetMLTaskRunsRequest(input) return out, req.Send() } -// GetPartitionsWithContext is the same as GetPartitions with the addition of +// GetMLTaskRunsWithContext is the same as GetMLTaskRuns with the addition of // the ability to pass a context and additional request options. // -// See GetPartitions for details on how to use this API operation. +// See GetMLTaskRuns for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetPartitionsWithContext(ctx aws.Context, input *GetPartitionsInput, opts ...request.Option) (*GetPartitionsOutput, error) { - req, out := c.GetPartitionsRequest(input) +func (c *Glue) GetMLTaskRunsWithContext(ctx aws.Context, input *GetMLTaskRunsInput, opts ...request.Option) (*GetMLTaskRunsOutput, error) { + req, out := c.GetMLTaskRunsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetPartitionsPages iterates over the pages of a GetPartitions operation, +// GetMLTaskRunsPages iterates over the pages of a GetMLTaskRuns operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See GetPartitions method for more information on how to use this operation. +// See GetMLTaskRuns method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a GetPartitions operation. +// // Example iterating over at most 3 pages of a GetMLTaskRuns operation. // pageNum := 0 -// err := client.GetPartitionsPages(params, -// func(page *GetPartitionsOutput, lastPage bool) bool { +// err := client.GetMLTaskRunsPages(params, +// func(page *glue.GetMLTaskRunsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Glue) GetPartitionsPages(input *GetPartitionsInput, fn func(*GetPartitionsOutput, bool) bool) error { - return c.GetPartitionsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Glue) GetMLTaskRunsPages(input *GetMLTaskRunsInput, fn func(*GetMLTaskRunsOutput, bool) bool) error { + return c.GetMLTaskRunsPagesWithContext(aws.BackgroundContext(), input, fn) } -// GetPartitionsPagesWithContext same as GetPartitionsPages except +// GetMLTaskRunsPagesWithContext same as GetMLTaskRunsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetPartitionsPagesWithContext(ctx aws.Context, input *GetPartitionsInput, fn func(*GetPartitionsOutput, bool) bool, opts ...request.Option) error { +func (c *Glue) GetMLTaskRunsPagesWithContext(ctx aws.Context, input *GetMLTaskRunsInput, fn func(*GetMLTaskRunsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *GetPartitionsInput + var inCpy *GetMLTaskRunsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.GetPartitionsRequest(inCpy) + req, _ := c.GetMLTaskRunsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetPartitionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetMLTaskRunsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opGetPlan = "GetPlan" +const opGetMLTransform = "GetMLTransform" -// GetPlanRequest generates a "aws/request.Request" representing the -// client's request for the GetPlan operation. The "output" return +// GetMLTransformRequest generates a "aws/request.Request" representing the +// client's request for the GetMLTransform operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetPlan for more information on using the GetPlan +// See GetMLTransform for more information on using the GetMLTransform // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetPlanRequest method. -// req, resp := client.GetPlanRequest(params) +// // Example sending a request using the GetMLTransformRequest method. +// req, resp := client.GetMLTransformRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPlan -func (c *Glue) GetPlanRequest(input *GetPlanInput) (req *request.Request, output *GetPlanOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMLTransform +func (c *Glue) GetMLTransformRequest(input *GetMLTransformInput) (req *request.Request, output *GetMLTransformOutput) { op := &request.Operation{ - Name: opGetPlan, + Name: opGetMLTransform, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetPlanInput{} + input = &GetMLTransformInput{} } - output = &GetPlanOutput{} + output = &GetMLTransformOutput{} req = c.newRequest(op, input, output) return } -// GetPlan API operation for AWS Glue. +// GetMLTransform API operation for AWS Glue. // -// Gets code to perform a specified mapping. +// Gets an AWS Glue machine learning transform artifact and all its corresponding +// metadata. Machine learning transforms are a special type of transform that +// use machine learning to learn the details of the transformation to be performed +// by learning from examples provided by humans. These transformations are then +// saved by AWS Glue. You can retrieve their metadata by calling GetMLTransform. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetPlan for usage and error information. +// API operation GetMLTransform for usage and error information. // // Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPlan -func (c *Glue) GetPlan(input *GetPlanInput) (*GetPlanOutput, error) { - req, out := c.GetPlanRequest(input) +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMLTransform +func (c *Glue) GetMLTransform(input *GetMLTransformInput) (*GetMLTransformOutput, error) { + req, out := c.GetMLTransformRequest(input) return out, req.Send() } -// GetPlanWithContext is the same as GetPlan with the addition of +// GetMLTransformWithContext is the same as GetMLTransform with the addition of // the ability to pass a context and additional request options. // -// See GetPlan for details on how to use this API operation. +// See GetMLTransform for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetPlanWithContext(ctx aws.Context, input *GetPlanInput, opts ...request.Option) (*GetPlanOutput, error) { - req, out := c.GetPlanRequest(input) +func (c *Glue) GetMLTransformWithContext(ctx aws.Context, input *GetMLTransformInput, opts ...request.Option) (*GetMLTransformOutput, error) { + req, out := c.GetMLTransformRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetResourcePolicy = "GetResourcePolicy" +const opGetMLTransforms = "GetMLTransforms" -// GetResourcePolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetResourcePolicy operation. The "output" return +// GetMLTransformsRequest generates a "aws/request.Request" representing the +// client's request for the GetMLTransforms operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetResourcePolicy for more information on using the GetResourcePolicy +// See GetMLTransforms for more information on using the GetMLTransforms // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetResourcePolicyRequest method. -// req, resp := client.GetResourcePolicyRequest(params) +// // Example sending a request using the GetMLTransformsRequest method. +// req, resp := client.GetMLTransformsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetResourcePolicy -func (c *Glue) GetResourcePolicyRequest(input *GetResourcePolicyInput) (req *request.Request, output *GetResourcePolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMLTransforms +func (c *Glue) GetMLTransformsRequest(input *GetMLTransformsInput) (req *request.Request, output *GetMLTransformsOutput) { op := &request.Operation{ - Name: opGetResourcePolicy, + Name: opGetMLTransforms, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &GetResourcePolicyInput{} + input = &GetMLTransformsInput{} } - output = &GetResourcePolicyOutput{} + output = &GetMLTransformsOutput{} req = c.newRequest(op, input, output) return } -// GetResourcePolicy API operation for AWS Glue. +// GetMLTransforms API operation for AWS Glue. // -// Retrieves a specified resource policy. +// Gets a sortable, filterable list of existing AWS Glue machine learning transforms. +// Machine learning transforms are a special type of transform that use machine +// learning to learn the details of the transformation to be performed by learning +// from examples provided by humans. These transformations are then saved by +// AWS Glue, and you can retrieve their metadata by calling GetMLTransforms. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetResourcePolicy for usage and error information. +// API operation GetMLTransforms for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetResourcePolicy -func (c *Glue) GetResourcePolicy(input *GetResourcePolicyInput) (*GetResourcePolicyOutput, error) { - req, out := c.GetResourcePolicyRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMLTransforms +func (c *Glue) GetMLTransforms(input *GetMLTransformsInput) (*GetMLTransformsOutput, error) { + req, out := c.GetMLTransformsRequest(input) return out, req.Send() } -// GetResourcePolicyWithContext is the same as GetResourcePolicy with the addition of +// GetMLTransformsWithContext is the same as GetMLTransforms with the addition of // the ability to pass a context and additional request options. // -// See GetResourcePolicy for details on how to use this API operation. +// See GetMLTransforms for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetResourcePolicyWithContext(ctx aws.Context, input *GetResourcePolicyInput, opts ...request.Option) (*GetResourcePolicyOutput, error) { - req, out := c.GetResourcePolicyRequest(input) +func (c *Glue) GetMLTransformsWithContext(ctx aws.Context, input *GetMLTransformsInput, opts ...request.Option) (*GetMLTransformsOutput, error) { + req, out := c.GetMLTransformsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSecurityConfiguration = "GetSecurityConfiguration" +// GetMLTransformsPages iterates over the pages of a GetMLTransforms operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetMLTransforms method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetMLTransforms operation. +// pageNum := 0 +// err := client.GetMLTransformsPages(params, +// func(page *glue.GetMLTransformsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glue) GetMLTransformsPages(input *GetMLTransformsInput, fn func(*GetMLTransformsOutput, bool) bool) error { + return c.GetMLTransformsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// GetSecurityConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetSecurityConfiguration operation. The "output" return +// GetMLTransformsPagesWithContext same as GetMLTransformsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetMLTransformsPagesWithContext(ctx aws.Context, input *GetMLTransformsInput, fn func(*GetMLTransformsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetMLTransformsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetMLTransformsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetMLTransformsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetMapping = "GetMapping" + +// GetMappingRequest generates a "aws/request.Request" representing the +// client's request for the GetMapping operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSecurityConfiguration for more information on using the GetSecurityConfiguration +// See GetMapping for more information on using the GetMapping // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSecurityConfigurationRequest method. -// req, resp := client.GetSecurityConfigurationRequest(params) +// // Example sending a request using the GetMappingRequest method. +// req, resp := client.GetMappingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfiguration -func (c *Glue) GetSecurityConfigurationRequest(input *GetSecurityConfigurationInput) (req *request.Request, output *GetSecurityConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMapping +func (c *Glue) GetMappingRequest(input *GetMappingInput) (req *request.Request, output *GetMappingOutput) { op := &request.Operation{ - Name: opGetSecurityConfiguration, + Name: opGetMapping, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetSecurityConfigurationInput{} + input = &GetMappingInput{} } - output = &GetSecurityConfigurationOutput{} + output = &GetMappingOutput{} req = c.newRequest(op, input, output) return } -// GetSecurityConfiguration API operation for AWS Glue. +// GetMapping API operation for AWS Glue. // -// Retrieves a specified security configuration. +// Creates mappings. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetSecurityConfiguration for usage and error information. +// API operation GetMapping for usage and error information. // // Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // @@ -5848,86 +6510,83 @@ func (c *Glue) GetSecurityConfigurationRequest(input *GetSecurityConfigurationIn // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfiguration -func (c *Glue) GetSecurityConfiguration(input *GetSecurityConfigurationInput) (*GetSecurityConfigurationOutput, error) { - req, out := c.GetSecurityConfigurationRequest(input) +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetMapping +func (c *Glue) GetMapping(input *GetMappingInput) (*GetMappingOutput, error) { + req, out := c.GetMappingRequest(input) return out, req.Send() } -// GetSecurityConfigurationWithContext is the same as GetSecurityConfiguration with the addition of +// GetMappingWithContext is the same as GetMapping with the addition of // the ability to pass a context and additional request options. // -// See GetSecurityConfiguration for details on how to use this API operation. +// See GetMapping for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetSecurityConfigurationWithContext(ctx aws.Context, input *GetSecurityConfigurationInput, opts ...request.Option) (*GetSecurityConfigurationOutput, error) { - req, out := c.GetSecurityConfigurationRequest(input) +func (c *Glue) GetMappingWithContext(ctx aws.Context, input *GetMappingInput, opts ...request.Option) (*GetMappingOutput, error) { + req, out := c.GetMappingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSecurityConfigurations = "GetSecurityConfigurations" +const opGetPartition = "GetPartition" -// GetSecurityConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the GetSecurityConfigurations operation. The "output" return +// GetPartitionRequest generates a "aws/request.Request" representing the +// client's request for the GetPartition operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSecurityConfigurations for more information on using the GetSecurityConfigurations +// See GetPartition for more information on using the GetPartition // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSecurityConfigurationsRequest method. -// req, resp := client.GetSecurityConfigurationsRequest(params) +// // Example sending a request using the GetPartitionRequest method. +// req, resp := client.GetPartitionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfigurations -func (c *Glue) GetSecurityConfigurationsRequest(input *GetSecurityConfigurationsInput) (req *request.Request, output *GetSecurityConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartition +func (c *Glue) GetPartitionRequest(input *GetPartitionInput) (req *request.Request, output *GetPartitionOutput) { op := &request.Operation{ - Name: opGetSecurityConfigurations, + Name: opGetPartition, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &GetSecurityConfigurationsInput{} + input = &GetPartitionInput{} } - output = &GetSecurityConfigurationsOutput{} + output = &GetPartitionOutput{} req = c.newRequest(op, input, output) return } -// GetSecurityConfigurations API operation for AWS Glue. +// GetPartition API operation for AWS Glue. // -// Retrieves a list of all security configurations. +// Retrieves information about a specified partition. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetSecurityConfigurations for usage and error information. +// API operation GetPartition for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -5942,130 +6601,89 @@ func (c *Glue) GetSecurityConfigurationsRequest(input *GetSecurityConfigurations // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfigurations -func (c *Glue) GetSecurityConfigurations(input *GetSecurityConfigurationsInput) (*GetSecurityConfigurationsOutput, error) { - req, out := c.GetSecurityConfigurationsRequest(input) +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartition +func (c *Glue) GetPartition(input *GetPartitionInput) (*GetPartitionOutput, error) { + req, out := c.GetPartitionRequest(input) return out, req.Send() } -// GetSecurityConfigurationsWithContext is the same as GetSecurityConfigurations with the addition of +// GetPartitionWithContext is the same as GetPartition with the addition of // the ability to pass a context and additional request options. // -// See GetSecurityConfigurations for details on how to use this API operation. +// See GetPartition for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetSecurityConfigurationsWithContext(ctx aws.Context, input *GetSecurityConfigurationsInput, opts ...request.Option) (*GetSecurityConfigurationsOutput, error) { - req, out := c.GetSecurityConfigurationsRequest(input) +func (c *Glue) GetPartitionWithContext(ctx aws.Context, input *GetPartitionInput, opts ...request.Option) (*GetPartitionOutput, error) { + req, out := c.GetPartitionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetSecurityConfigurationsPages iterates over the pages of a GetSecurityConfigurations operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetSecurityConfigurations method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetSecurityConfigurations operation. -// pageNum := 0 -// err := client.GetSecurityConfigurationsPages(params, -// func(page *GetSecurityConfigurationsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetSecurityConfigurationsPages(input *GetSecurityConfigurationsInput, fn func(*GetSecurityConfigurationsOutput, bool) bool) error { - return c.GetSecurityConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetSecurityConfigurationsPagesWithContext same as GetSecurityConfigurationsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetSecurityConfigurationsPagesWithContext(ctx aws.Context, input *GetSecurityConfigurationsInput, fn func(*GetSecurityConfigurationsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetSecurityConfigurationsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetSecurityConfigurationsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetSecurityConfigurationsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetTable = "GetTable" +const opGetPartitions = "GetPartitions" -// GetTableRequest generates a "aws/request.Request" representing the -// client's request for the GetTable operation. The "output" return +// GetPartitionsRequest generates a "aws/request.Request" representing the +// client's request for the GetPartitions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetTable for more information on using the GetTable +// See GetPartitions for more information on using the GetPartitions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetTableRequest method. -// req, resp := client.GetTableRequest(params) +// // Example sending a request using the GetPartitionsRequest method. +// req, resp := client.GetPartitionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTable -func (c *Glue) GetTableRequest(input *GetTableInput) (req *request.Request, output *GetTableOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitions +func (c *Glue) GetPartitionsRequest(input *GetPartitionsInput) (req *request.Request, output *GetPartitionsOutput) { op := &request.Operation{ - Name: opGetTable, + Name: opGetPartitions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &GetTableInput{} + input = &GetPartitionsInput{} } - output = &GetTableOutput{} + output = &GetPartitionsOutput{} req = c.newRequest(op, input, output) return } -// GetTable API operation for AWS Glue. +// GetPartitions API operation for AWS Glue. // -// Retrieves the Table definition in a Data Catalog for a specified table. +// Retrieves information about the partitions in a table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetTable for usage and error information. +// API operation GetPartitions for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -6074,94 +6692,143 @@ func (c *Glue) GetTableRequest(input *GetTableInput) (req *request.Request, outp // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// // * ErrCodeEncryptionException "GlueEncryptionException" // An encryption operation failed. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTable -func (c *Glue) GetTable(input *GetTableInput) (*GetTableOutput, error) { - req, out := c.GetTableRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitions +func (c *Glue) GetPartitions(input *GetPartitionsInput) (*GetPartitionsOutput, error) { + req, out := c.GetPartitionsRequest(input) return out, req.Send() } -// GetTableWithContext is the same as GetTable with the addition of +// GetPartitionsWithContext is the same as GetPartitions with the addition of // the ability to pass a context and additional request options. // -// See GetTable for details on how to use this API operation. +// See GetPartitions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetTableWithContext(ctx aws.Context, input *GetTableInput, opts ...request.Option) (*GetTableOutput, error) { - req, out := c.GetTableRequest(input) +func (c *Glue) GetPartitionsWithContext(ctx aws.Context, input *GetPartitionsInput, opts ...request.Option) (*GetPartitionsOutput, error) { + req, out := c.GetPartitionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetTableVersion = "GetTableVersion" +// GetPartitionsPages iterates over the pages of a GetPartitions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetPartitions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetPartitions operation. +// pageNum := 0 +// err := client.GetPartitionsPages(params, +// func(page *glue.GetPartitionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glue) GetPartitionsPages(input *GetPartitionsInput, fn func(*GetPartitionsOutput, bool) bool) error { + return c.GetPartitionsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// GetTableVersionRequest generates a "aws/request.Request" representing the -// client's request for the GetTableVersion operation. The "output" return +// GetPartitionsPagesWithContext same as GetPartitionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetPartitionsPagesWithContext(ctx aws.Context, input *GetPartitionsInput, fn func(*GetPartitionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetPartitionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetPartitionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetPartitionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetPlan = "GetPlan" + +// GetPlanRequest generates a "aws/request.Request" representing the +// client's request for the GetPlan operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetTableVersion for more information on using the GetTableVersion +// See GetPlan for more information on using the GetPlan // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetTableVersionRequest method. -// req, resp := client.GetTableVersionRequest(params) +// // Example sending a request using the GetPlanRequest method. +// req, resp := client.GetPlanRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersion -func (c *Glue) GetTableVersionRequest(input *GetTableVersionInput) (req *request.Request, output *GetTableVersionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPlan +func (c *Glue) GetPlanRequest(input *GetPlanInput) (req *request.Request, output *GetPlanOutput) { op := &request.Operation{ - Name: opGetTableVersion, + Name: opGetPlan, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetTableVersionInput{} + input = &GetPlanInput{} } - output = &GetTableVersionOutput{} + output = &GetPlanOutput{} req = c.newRequest(op, input, output) return } -// GetTableVersion API operation for AWS Glue. +// GetPlan API operation for AWS Glue. // -// Retrieves a specified version of a table. +// Gets code to perform a specified mapping. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetTableVersion for usage and error information. +// API operation GetPlan for usage and error information. // // Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // @@ -6171,237 +6838,168 @@ func (c *Glue) GetTableVersionRequest(input *GetTableVersionInput) (req *request // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersion -func (c *Glue) GetTableVersion(input *GetTableVersionInput) (*GetTableVersionOutput, error) { - req, out := c.GetTableVersionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPlan +func (c *Glue) GetPlan(input *GetPlanInput) (*GetPlanOutput, error) { + req, out := c.GetPlanRequest(input) return out, req.Send() } -// GetTableVersionWithContext is the same as GetTableVersion with the addition of +// GetPlanWithContext is the same as GetPlan with the addition of // the ability to pass a context and additional request options. // -// See GetTableVersion for details on how to use this API operation. +// See GetPlan for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetTableVersionWithContext(ctx aws.Context, input *GetTableVersionInput, opts ...request.Option) (*GetTableVersionOutput, error) { - req, out := c.GetTableVersionRequest(input) +func (c *Glue) GetPlanWithContext(ctx aws.Context, input *GetPlanInput, opts ...request.Option) (*GetPlanOutput, error) { + req, out := c.GetPlanRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetTableVersions = "GetTableVersions" +const opGetResourcePolicy = "GetResourcePolicy" -// GetTableVersionsRequest generates a "aws/request.Request" representing the -// client's request for the GetTableVersions operation. The "output" return +// GetResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetResourcePolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetTableVersions for more information on using the GetTableVersions +// See GetResourcePolicy for more information on using the GetResourcePolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetTableVersionsRequest method. -// req, resp := client.GetTableVersionsRequest(params) +// // Example sending a request using the GetResourcePolicyRequest method. +// req, resp := client.GetResourcePolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersions -func (c *Glue) GetTableVersionsRequest(input *GetTableVersionsInput) (req *request.Request, output *GetTableVersionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetResourcePolicy +func (c *Glue) GetResourcePolicyRequest(input *GetResourcePolicyInput) (req *request.Request, output *GetResourcePolicyOutput) { op := &request.Operation{ - Name: opGetTableVersions, + Name: opGetResourcePolicy, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &GetTableVersionsInput{} + input = &GetResourcePolicyInput{} } - output = &GetTableVersionsOutput{} + output = &GetResourcePolicyOutput{} req = c.newRequest(op, input, output) return } -// GetTableVersions API operation for AWS Glue. +// GetResourcePolicy API operation for AWS Glue. // -// Retrieves a list of strings that identify available versions of a specified -// table. +// Retrieves a specified resource policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetTableVersions for usage and error information. +// API operation GetResourcePolicy for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersions -func (c *Glue) GetTableVersions(input *GetTableVersionsInput) (*GetTableVersionsOutput, error) { - req, out := c.GetTableVersionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetResourcePolicy +func (c *Glue) GetResourcePolicy(input *GetResourcePolicyInput) (*GetResourcePolicyOutput, error) { + req, out := c.GetResourcePolicyRequest(input) return out, req.Send() } -// GetTableVersionsWithContext is the same as GetTableVersions with the addition of +// GetResourcePolicyWithContext is the same as GetResourcePolicy with the addition of // the ability to pass a context and additional request options. // -// See GetTableVersions for details on how to use this API operation. +// See GetResourcePolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetTableVersionsWithContext(ctx aws.Context, input *GetTableVersionsInput, opts ...request.Option) (*GetTableVersionsOutput, error) { - req, out := c.GetTableVersionsRequest(input) +func (c *Glue) GetResourcePolicyWithContext(ctx aws.Context, input *GetResourcePolicyInput, opts ...request.Option) (*GetResourcePolicyOutput, error) { + req, out := c.GetResourcePolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetTableVersionsPages iterates over the pages of a GetTableVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetTableVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetTableVersions operation. -// pageNum := 0 -// err := client.GetTableVersionsPages(params, -// func(page *GetTableVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetTableVersionsPages(input *GetTableVersionsInput, fn func(*GetTableVersionsOutput, bool) bool) error { - return c.GetTableVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetTableVersionsPagesWithContext same as GetTableVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetTableVersionsPagesWithContext(ctx aws.Context, input *GetTableVersionsInput, fn func(*GetTableVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetTableVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetTableVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTableVersionsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetTables = "GetTables" +const opGetSecurityConfiguration = "GetSecurityConfiguration" -// GetTablesRequest generates a "aws/request.Request" representing the -// client's request for the GetTables operation. The "output" return +// GetSecurityConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetSecurityConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetTables for more information on using the GetTables +// See GetSecurityConfiguration for more information on using the GetSecurityConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetTablesRequest method. -// req, resp := client.GetTablesRequest(params) +// // Example sending a request using the GetSecurityConfigurationRequest method. +// req, resp := client.GetSecurityConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTables -func (c *Glue) GetTablesRequest(input *GetTablesInput) (req *request.Request, output *GetTablesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfiguration +func (c *Glue) GetSecurityConfigurationRequest(input *GetSecurityConfigurationInput) (req *request.Request, output *GetSecurityConfigurationOutput) { op := &request.Operation{ - Name: opGetTables, + Name: opGetSecurityConfiguration, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &GetTablesInput{} + input = &GetSecurityConfigurationInput{} } - output = &GetTablesOutput{} + output = &GetSecurityConfigurationOutput{} req = c.newRequest(op, input, output) return } -// GetTables API operation for AWS Glue. +// GetSecurityConfiguration API operation for AWS Glue. // -// Retrieves the definitions of some or all of the tables in a given Database. +// Retrieves a specified security configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetTables for usage and error information. +// API operation GetSecurityConfiguration for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -6410,141 +7008,237 @@ func (c *Glue) GetTablesRequest(input *GetTablesInput) (req *request.Request, ou // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfiguration +func (c *Glue) GetSecurityConfiguration(input *GetSecurityConfigurationInput) (*GetSecurityConfigurationOutput, error) { + req, out := c.GetSecurityConfigurationRequest(input) + return out, req.Send() +} + +// GetSecurityConfigurationWithContext is the same as GetSecurityConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetSecurityConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetSecurityConfigurationWithContext(ctx aws.Context, input *GetSecurityConfigurationInput, opts ...request.Option) (*GetSecurityConfigurationOutput, error) { + req, out := c.GetSecurityConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSecurityConfigurations = "GetSecurityConfigurations" + +// GetSecurityConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the GetSecurityConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSecurityConfigurations for more information on using the GetSecurityConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSecurityConfigurationsRequest method. +// req, resp := client.GetSecurityConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfigurations +func (c *Glue) GetSecurityConfigurationsRequest(input *GetSecurityConfigurationsInput) (req *request.Request, output *GetSecurityConfigurationsOutput) { + op := &request.Operation{ + Name: opGetSecurityConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetSecurityConfigurationsInput{} + } + + output = &GetSecurityConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSecurityConfigurations API operation for AWS Glue. +// +// Retrieves a list of all security configurations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation GetSecurityConfigurations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTables -func (c *Glue) GetTables(input *GetTablesInput) (*GetTablesOutput, error) { - req, out := c.GetTablesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfigurations +func (c *Glue) GetSecurityConfigurations(input *GetSecurityConfigurationsInput) (*GetSecurityConfigurationsOutput, error) { + req, out := c.GetSecurityConfigurationsRequest(input) return out, req.Send() } -// GetTablesWithContext is the same as GetTables with the addition of +// GetSecurityConfigurationsWithContext is the same as GetSecurityConfigurations with the addition of // the ability to pass a context and additional request options. // -// See GetTables for details on how to use this API operation. +// See GetSecurityConfigurations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetTablesWithContext(ctx aws.Context, input *GetTablesInput, opts ...request.Option) (*GetTablesOutput, error) { - req, out := c.GetTablesRequest(input) +func (c *Glue) GetSecurityConfigurationsWithContext(ctx aws.Context, input *GetSecurityConfigurationsInput, opts ...request.Option) (*GetSecurityConfigurationsOutput, error) { + req, out := c.GetSecurityConfigurationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetTablesPages iterates over the pages of a GetTables operation, +// GetSecurityConfigurationsPages iterates over the pages of a GetSecurityConfigurations operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See GetTables method for more information on how to use this operation. +// See GetSecurityConfigurations method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a GetTables operation. +// // Example iterating over at most 3 pages of a GetSecurityConfigurations operation. // pageNum := 0 -// err := client.GetTablesPages(params, -// func(page *GetTablesOutput, lastPage bool) bool { +// err := client.GetSecurityConfigurationsPages(params, +// func(page *glue.GetSecurityConfigurationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Glue) GetTablesPages(input *GetTablesInput, fn func(*GetTablesOutput, bool) bool) error { - return c.GetTablesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Glue) GetSecurityConfigurationsPages(input *GetSecurityConfigurationsInput, fn func(*GetSecurityConfigurationsOutput, bool) bool) error { + return c.GetSecurityConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) } -// GetTablesPagesWithContext same as GetTablesPages except +// GetSecurityConfigurationsPagesWithContext same as GetSecurityConfigurationsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetTablesPagesWithContext(ctx aws.Context, input *GetTablesInput, fn func(*GetTablesOutput, bool) bool, opts ...request.Option) error { +func (c *Glue) GetSecurityConfigurationsPagesWithContext(ctx aws.Context, input *GetSecurityConfigurationsInput, fn func(*GetSecurityConfigurationsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *GetTablesInput + var inCpy *GetSecurityConfigurationsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.GetTablesRequest(inCpy) + req, _ := c.GetSecurityConfigurationsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTablesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetSecurityConfigurationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opGetTags = "GetTags" +const opGetTable = "GetTable" -// GetTagsRequest generates a "aws/request.Request" representing the -// client's request for the GetTags operation. The "output" return +// GetTableRequest generates a "aws/request.Request" representing the +// client's request for the GetTable operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetTags for more information on using the GetTags +// See GetTable for more information on using the GetTable // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetTagsRequest method. -// req, resp := client.GetTagsRequest(params) +// // Example sending a request using the GetTableRequest method. +// req, resp := client.GetTableRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTags -func (c *Glue) GetTagsRequest(input *GetTagsInput) (req *request.Request, output *GetTagsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTable +func (c *Glue) GetTableRequest(input *GetTableInput) (req *request.Request, output *GetTableOutput) { op := &request.Operation{ - Name: opGetTags, + Name: opGetTable, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetTagsInput{} + input = &GetTableInput{} } - output = &GetTagsOutput{} + output = &GetTableOutput{} req = c.newRequest(op, input, output) return } -// GetTags API operation for AWS Glue. +// GetTable API operation for AWS Glue. // -// Retrieves a list of tags associated with a resource. +// Retrieves the Table definition in a Data Catalog for a specified table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetTags for usage and error information. +// API operation GetTable for usage and error information. // // Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // @@ -6554,83 +7248,83 @@ func (c *Glue) GetTagsRequest(input *GetTagsInput) (req *request.Request, output // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTags -func (c *Glue) GetTags(input *GetTagsInput) (*GetTagsOutput, error) { - req, out := c.GetTagsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTable +func (c *Glue) GetTable(input *GetTableInput) (*GetTableOutput, error) { + req, out := c.GetTableRequest(input) return out, req.Send() } -// GetTagsWithContext is the same as GetTags with the addition of +// GetTableWithContext is the same as GetTable with the addition of // the ability to pass a context and additional request options. // -// See GetTags for details on how to use this API operation. +// See GetTable for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetTagsWithContext(ctx aws.Context, input *GetTagsInput, opts ...request.Option) (*GetTagsOutput, error) { - req, out := c.GetTagsRequest(input) +func (c *Glue) GetTableWithContext(ctx aws.Context, input *GetTableInput, opts ...request.Option) (*GetTableOutput, error) { + req, out := c.GetTableRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetTrigger = "GetTrigger" +const opGetTableVersion = "GetTableVersion" -// GetTriggerRequest generates a "aws/request.Request" representing the -// client's request for the GetTrigger operation. The "output" return +// GetTableVersionRequest generates a "aws/request.Request" representing the +// client's request for the GetTableVersion operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetTrigger for more information on using the GetTrigger +// See GetTableVersion for more information on using the GetTableVersion // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetTriggerRequest method. -// req, resp := client.GetTriggerRequest(params) +// // Example sending a request using the GetTableVersionRequest method. +// req, resp := client.GetTableVersionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTrigger -func (c *Glue) GetTriggerRequest(input *GetTriggerInput) (req *request.Request, output *GetTriggerOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersion +func (c *Glue) GetTableVersionRequest(input *GetTableVersionInput) (req *request.Request, output *GetTableVersionOutput) { op := &request.Operation{ - Name: opGetTrigger, + Name: opGetTableVersion, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetTriggerInput{} + input = &GetTableVersionInput{} } - output = &GetTriggerOutput{} + output = &GetTableVersionOutput{} req = c.newRequest(op, input, output) return } -// GetTrigger API operation for AWS Glue. +// GetTableVersion API operation for AWS Glue. // -// Retrieves the definition of a trigger. +// Retrieves a specified version of a table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetTrigger for usage and error information. +// API operation GetTableVersion for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -6645,57 +7339,60 @@ func (c *Glue) GetTriggerRequest(input *GetTriggerInput) (req *request.Request, // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTrigger -func (c *Glue) GetTrigger(input *GetTriggerInput) (*GetTriggerOutput, error) { - req, out := c.GetTriggerRequest(input) +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersion +func (c *Glue) GetTableVersion(input *GetTableVersionInput) (*GetTableVersionOutput, error) { + req, out := c.GetTableVersionRequest(input) return out, req.Send() } -// GetTriggerWithContext is the same as GetTrigger with the addition of +// GetTableVersionWithContext is the same as GetTableVersion with the addition of // the ability to pass a context and additional request options. // -// See GetTrigger for details on how to use this API operation. +// See GetTableVersion for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetTriggerWithContext(ctx aws.Context, input *GetTriggerInput, opts ...request.Option) (*GetTriggerOutput, error) { - req, out := c.GetTriggerRequest(input) +func (c *Glue) GetTableVersionWithContext(ctx aws.Context, input *GetTableVersionInput, opts ...request.Option) (*GetTableVersionOutput, error) { + req, out := c.GetTableVersionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetTriggers = "GetTriggers" +const opGetTableVersions = "GetTableVersions" -// GetTriggersRequest generates a "aws/request.Request" representing the -// client's request for the GetTriggers operation. The "output" return +// GetTableVersionsRequest generates a "aws/request.Request" representing the +// client's request for the GetTableVersions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetTriggers for more information on using the GetTriggers +// See GetTableVersions for more information on using the GetTableVersions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetTriggersRequest method. -// req, resp := client.GetTriggersRequest(params) +// // Example sending a request using the GetTableVersionsRequest method. +// req, resp := client.GetTableVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggers -func (c *Glue) GetTriggersRequest(input *GetTriggersInput) (req *request.Request, output *GetTriggersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersions +func (c *Glue) GetTableVersionsRequest(input *GetTableVersionsInput) (req *request.Request, output *GetTableVersionsOutput) { op := &request.Operation{ - Name: opGetTriggers, + Name: opGetTableVersions, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -6707,24 +7404,25 @@ func (c *Glue) GetTriggersRequest(input *GetTriggersInput) (req *request.Request } if input == nil { - input = &GetTriggersInput{} + input = &GetTableVersionsInput{} } - output = &GetTriggersOutput{} + output = &GetTableVersionsOutput{} req = c.newRequest(op, input, output) return } -// GetTriggers API operation for AWS Glue. +// GetTableVersions API operation for AWS Glue. // -// Gets all the triggers associated with a job. +// Retrieves a list of strings that identify available versions of a specified +// table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetTriggers for usage and error information. +// API operation GetTableVersions for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -6739,198 +7437,112 @@ func (c *Glue) GetTriggersRequest(input *GetTriggersInput) (req *request.Request // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggers -func (c *Glue) GetTriggers(input *GetTriggersInput) (*GetTriggersOutput, error) { - req, out := c.GetTriggersRequest(input) +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableVersions +func (c *Glue) GetTableVersions(input *GetTableVersionsInput) (*GetTableVersionsOutput, error) { + req, out := c.GetTableVersionsRequest(input) return out, req.Send() } -// GetTriggersWithContext is the same as GetTriggers with the addition of +// GetTableVersionsWithContext is the same as GetTableVersions with the addition of // the ability to pass a context and additional request options. // -// See GetTriggers for details on how to use this API operation. +// See GetTableVersions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetTriggersWithContext(ctx aws.Context, input *GetTriggersInput, opts ...request.Option) (*GetTriggersOutput, error) { - req, out := c.GetTriggersRequest(input) +func (c *Glue) GetTableVersionsWithContext(ctx aws.Context, input *GetTableVersionsInput, opts ...request.Option) (*GetTableVersionsOutput, error) { + req, out := c.GetTableVersionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetTriggersPages iterates over the pages of a GetTriggers operation, +// GetTableVersionsPages iterates over the pages of a GetTableVersions operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See GetTriggers method for more information on how to use this operation. +// See GetTableVersions method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a GetTriggers operation. +// // Example iterating over at most 3 pages of a GetTableVersions operation. // pageNum := 0 -// err := client.GetTriggersPages(params, -// func(page *GetTriggersOutput, lastPage bool) bool { +// err := client.GetTableVersionsPages(params, +// func(page *glue.GetTableVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Glue) GetTriggersPages(input *GetTriggersInput, fn func(*GetTriggersOutput, bool) bool) error { - return c.GetTriggersPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Glue) GetTableVersionsPages(input *GetTableVersionsInput, fn func(*GetTableVersionsOutput, bool) bool) error { + return c.GetTableVersionsPagesWithContext(aws.BackgroundContext(), input, fn) } -// GetTriggersPagesWithContext same as GetTriggersPages except +// GetTableVersionsPagesWithContext same as GetTableVersionsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetTriggersPagesWithContext(ctx aws.Context, input *GetTriggersInput, fn func(*GetTriggersOutput, bool) bool, opts ...request.Option) error { +func (c *Glue) GetTableVersionsPagesWithContext(ctx aws.Context, input *GetTableVersionsInput, fn func(*GetTableVersionsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *GetTriggersInput + var inCpy *GetTableVersionsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.GetTriggersRequest(inCpy) + req, _ := c.GetTableVersionsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTriggersOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opGetUserDefinedFunction = "GetUserDefinedFunction" - -// GetUserDefinedFunctionRequest generates a "aws/request.Request" representing the -// client's request for the GetUserDefinedFunction operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetUserDefinedFunction for more information on using the GetUserDefinedFunction -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetUserDefinedFunctionRequest method. -// req, resp := client.GetUserDefinedFunctionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunction -func (c *Glue) GetUserDefinedFunctionRequest(input *GetUserDefinedFunctionInput) (req *request.Request, output *GetUserDefinedFunctionOutput) { - op := &request.Operation{ - Name: opGetUserDefinedFunction, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetUserDefinedFunctionInput{} + for p.Next() { + if !fn(p.Page().(*GetTableVersionsOutput), !p.HasNextPage()) { + break + } } - output = &GetUserDefinedFunctionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetUserDefinedFunction API operation for AWS Glue. -// -// Retrieves a specified function definition from the Data Catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetUserDefinedFunction for usage and error information. -// -// Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunction -func (c *Glue) GetUserDefinedFunction(input *GetUserDefinedFunctionInput) (*GetUserDefinedFunctionOutput, error) { - req, out := c.GetUserDefinedFunctionRequest(input) - return out, req.Send() -} - -// GetUserDefinedFunctionWithContext is the same as GetUserDefinedFunction with the addition of -// the ability to pass a context and additional request options. -// -// See GetUserDefinedFunction for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetUserDefinedFunctionWithContext(ctx aws.Context, input *GetUserDefinedFunctionInput, opts ...request.Option) (*GetUserDefinedFunctionOutput, error) { - req, out := c.GetUserDefinedFunctionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() + return p.Err() } -const opGetUserDefinedFunctions = "GetUserDefinedFunctions" +const opGetTables = "GetTables" -// GetUserDefinedFunctionsRequest generates a "aws/request.Request" representing the -// client's request for the GetUserDefinedFunctions operation. The "output" return +// GetTablesRequest generates a "aws/request.Request" representing the +// client's request for the GetTables operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetUserDefinedFunctions for more information on using the GetUserDefinedFunctions +// See GetTables for more information on using the GetTables // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetUserDefinedFunctionsRequest method. -// req, resp := client.GetUserDefinedFunctionsRequest(params) +// // Example sending a request using the GetTablesRequest method. +// req, resp := client.GetTablesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctions -func (c *Glue) GetUserDefinedFunctionsRequest(input *GetUserDefinedFunctionsInput) (req *request.Request, output *GetUserDefinedFunctionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTables +func (c *Glue) GetTablesRequest(input *GetTablesInput) (req *request.Request, output *GetTablesOutput) { op := &request.Operation{ - Name: opGetUserDefinedFunctions, + Name: opGetTables, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -6942,24 +7554,24 @@ func (c *Glue) GetUserDefinedFunctionsRequest(input *GetUserDefinedFunctionsInpu } if input == nil { - input = &GetUserDefinedFunctionsInput{} + input = &GetTablesInput{} } - output = &GetUserDefinedFunctionsOutput{} + output = &GetTablesOutput{} req = c.newRequest(op, input, output) return } -// GetUserDefinedFunctions API operation for AWS Glue. +// GetTables API operation for AWS Glue. // -// Retrieves a multiple function definitions from the Data Catalog. +// Retrieves the definitions of some or all of the tables in a given Database. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation GetUserDefinedFunctions for usage and error information. +// API operation GetTables for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -6977,331 +7589,285 @@ func (c *Glue) GetUserDefinedFunctionsRequest(input *GetUserDefinedFunctionsInpu // * ErrCodeEncryptionException "GlueEncryptionException" // An encryption operation failed. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctions -func (c *Glue) GetUserDefinedFunctions(input *GetUserDefinedFunctionsInput) (*GetUserDefinedFunctionsOutput, error) { - req, out := c.GetUserDefinedFunctionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTables +func (c *Glue) GetTables(input *GetTablesInput) (*GetTablesOutput, error) { + req, out := c.GetTablesRequest(input) return out, req.Send() } -// GetUserDefinedFunctionsWithContext is the same as GetUserDefinedFunctions with the addition of +// GetTablesWithContext is the same as GetTables with the addition of // the ability to pass a context and additional request options. // -// See GetUserDefinedFunctions for details on how to use this API operation. +// See GetTables for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetUserDefinedFunctionsWithContext(ctx aws.Context, input *GetUserDefinedFunctionsInput, opts ...request.Option) (*GetUserDefinedFunctionsOutput, error) { - req, out := c.GetUserDefinedFunctionsRequest(input) +func (c *Glue) GetTablesWithContext(ctx aws.Context, input *GetTablesInput, opts ...request.Option) (*GetTablesOutput, error) { + req, out := c.GetTablesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetUserDefinedFunctionsPages iterates over the pages of a GetUserDefinedFunctions operation, +// GetTablesPages iterates over the pages of a GetTables operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See GetUserDefinedFunctions method for more information on how to use this operation. +// See GetTables method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a GetUserDefinedFunctions operation. +// // Example iterating over at most 3 pages of a GetTables operation. // pageNum := 0 -// err := client.GetUserDefinedFunctionsPages(params, -// func(page *GetUserDefinedFunctionsOutput, lastPage bool) bool { +// err := client.GetTablesPages(params, +// func(page *glue.GetTablesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Glue) GetUserDefinedFunctionsPages(input *GetUserDefinedFunctionsInput, fn func(*GetUserDefinedFunctionsOutput, bool) bool) error { - return c.GetUserDefinedFunctionsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Glue) GetTablesPages(input *GetTablesInput, fn func(*GetTablesOutput, bool) bool) error { + return c.GetTablesPagesWithContext(aws.BackgroundContext(), input, fn) } -// GetUserDefinedFunctionsPagesWithContext same as GetUserDefinedFunctionsPages except +// GetTablesPagesWithContext same as GetTablesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) GetUserDefinedFunctionsPagesWithContext(ctx aws.Context, input *GetUserDefinedFunctionsInput, fn func(*GetUserDefinedFunctionsOutput, bool) bool, opts ...request.Option) error { +func (c *Glue) GetTablesPagesWithContext(ctx aws.Context, input *GetTablesInput, fn func(*GetTablesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *GetUserDefinedFunctionsInput + var inCpy *GetTablesInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.GetUserDefinedFunctionsRequest(inCpy) + req, _ := c.GetTablesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetUserDefinedFunctionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetTablesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opImportCatalogToGlue = "ImportCatalogToGlue" +const opGetTags = "GetTags" -// ImportCatalogToGlueRequest generates a "aws/request.Request" representing the -// client's request for the ImportCatalogToGlue operation. The "output" return +// GetTagsRequest generates a "aws/request.Request" representing the +// client's request for the GetTags operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ImportCatalogToGlue for more information on using the ImportCatalogToGlue +// See GetTags for more information on using the GetTags // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ImportCatalogToGlueRequest method. -// req, resp := client.ImportCatalogToGlueRequest(params) +// // Example sending a request using the GetTagsRequest method. +// req, resp := client.GetTagsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlue -func (c *Glue) ImportCatalogToGlueRequest(input *ImportCatalogToGlueInput) (req *request.Request, output *ImportCatalogToGlueOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTags +func (c *Glue) GetTagsRequest(input *GetTagsInput) (req *request.Request, output *GetTagsOutput) { op := &request.Operation{ - Name: opImportCatalogToGlue, + Name: opGetTags, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ImportCatalogToGlueInput{} + input = &GetTagsInput{} } - output = &ImportCatalogToGlueOutput{} + output = &GetTagsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// ImportCatalogToGlue API operation for AWS Glue. +// GetTags API operation for AWS Glue. // -// Imports an existing Athena Data Catalog to AWS Glue +// Retrieves a list of tags associated with a resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation ImportCatalogToGlue for usage and error information. +// API operation GetTags for usage and error information. // // Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlue -func (c *Glue) ImportCatalogToGlue(input *ImportCatalogToGlueInput) (*ImportCatalogToGlueOutput, error) { - req, out := c.ImportCatalogToGlueRequest(input) +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTags +func (c *Glue) GetTags(input *GetTagsInput) (*GetTagsOutput, error) { + req, out := c.GetTagsRequest(input) return out, req.Send() } -// ImportCatalogToGlueWithContext is the same as ImportCatalogToGlue with the addition of +// GetTagsWithContext is the same as GetTags with the addition of // the ability to pass a context and additional request options. // -// See ImportCatalogToGlue for details on how to use this API operation. +// See GetTags for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) ImportCatalogToGlueWithContext(ctx aws.Context, input *ImportCatalogToGlueInput, opts ...request.Option) (*ImportCatalogToGlueOutput, error) { - req, out := c.ImportCatalogToGlueRequest(input) +func (c *Glue) GetTagsWithContext(ctx aws.Context, input *GetTagsInput, opts ...request.Option) (*GetTagsOutput, error) { + req, out := c.GetTagsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListCrawlers = "ListCrawlers" +const opGetTrigger = "GetTrigger" -// ListCrawlersRequest generates a "aws/request.Request" representing the -// client's request for the ListCrawlers operation. The "output" return -// value will be populated with the request's response once the request completes +// GetTriggerRequest generates a "aws/request.Request" representing the +// client's request for the GetTrigger operation. The "output" return +// value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListCrawlers for more information on using the ListCrawlers +// See GetTrigger for more information on using the GetTrigger // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListCrawlersRequest method. -// req, resp := client.ListCrawlersRequest(params) +// // Example sending a request using the GetTriggerRequest method. +// req, resp := client.GetTriggerRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawlers -func (c *Glue) ListCrawlersRequest(input *ListCrawlersInput) (req *request.Request, output *ListCrawlersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTrigger +func (c *Glue) GetTriggerRequest(input *GetTriggerInput) (req *request.Request, output *GetTriggerOutput) { op := &request.Operation{ - Name: opListCrawlers, + Name: opGetTrigger, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListCrawlersInput{} + input = &GetTriggerInput{} } - output = &ListCrawlersOutput{} + output = &GetTriggerOutput{} req = c.newRequest(op, input, output) return } -// ListCrawlers API operation for AWS Glue. -// -// Retrieves the names of all crawler resources in this AWS account, or the -// resources with the specified tag. This operation allows you to see which -// resources are available in your account, and their names. +// GetTrigger API operation for AWS Glue. // -// This operation takes the optional Tags field which you can use as a filter -// on the response so that tagged resources can be retrieved as a group. If -// you choose to use tags filtering, only resources with the tag will be retrieved. +// Retrieves the definition of a trigger. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation ListCrawlers for usage and error information. +// API operation GetTrigger for usage and error information. // // Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawlers -func (c *Glue) ListCrawlers(input *ListCrawlersInput) (*ListCrawlersOutput, error) { - req, out := c.ListCrawlersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTrigger +func (c *Glue) GetTrigger(input *GetTriggerInput) (*GetTriggerOutput, error) { + req, out := c.GetTriggerRequest(input) return out, req.Send() } -// ListCrawlersWithContext is the same as ListCrawlers with the addition of +// GetTriggerWithContext is the same as GetTrigger with the addition of // the ability to pass a context and additional request options. // -// See ListCrawlers for details on how to use this API operation. +// See GetTrigger for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) ListCrawlersWithContext(ctx aws.Context, input *ListCrawlersInput, opts ...request.Option) (*ListCrawlersOutput, error) { - req, out := c.ListCrawlersRequest(input) +func (c *Glue) GetTriggerWithContext(ctx aws.Context, input *GetTriggerInput, opts ...request.Option) (*GetTriggerOutput, error) { + req, out := c.GetTriggerRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListCrawlersPages iterates over the pages of a ListCrawlers operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListCrawlers method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListCrawlers operation. -// pageNum := 0 -// err := client.ListCrawlersPages(params, -// func(page *ListCrawlersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) ListCrawlersPages(input *ListCrawlersInput, fn func(*ListCrawlersOutput, bool) bool) error { - return c.ListCrawlersPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListCrawlersPagesWithContext same as ListCrawlersPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) ListCrawlersPagesWithContext(ctx aws.Context, input *ListCrawlersInput, fn func(*ListCrawlersOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListCrawlersInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListCrawlersRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListCrawlersOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListDevEndpoints = "ListDevEndpoints" +const opGetTriggers = "GetTriggers" -// ListDevEndpointsRequest generates a "aws/request.Request" representing the -// client's request for the ListDevEndpoints operation. The "output" return +// GetTriggersRequest generates a "aws/request.Request" representing the +// client's request for the GetTriggers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListDevEndpoints for more information on using the ListDevEndpoints +// See GetTriggers for more information on using the GetTriggers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListDevEndpointsRequest method. -// req, resp := client.ListDevEndpointsRequest(params) +// // Example sending a request using the GetTriggersRequest method. +// req, resp := client.GetTriggersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListDevEndpoints -func (c *Glue) ListDevEndpointsRequest(input *ListDevEndpointsInput) (req *request.Request, output *ListDevEndpointsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggers +func (c *Glue) GetTriggersRequest(input *GetTriggersInput) (req *request.Request, output *GetTriggersOutput) { op := &request.Operation{ - Name: opListDevEndpoints, + Name: opGetTriggers, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -7313,295 +7879,232 @@ func (c *Glue) ListDevEndpointsRequest(input *ListDevEndpointsInput) (req *reque } if input == nil { - input = &ListDevEndpointsInput{} + input = &GetTriggersInput{} } - output = &ListDevEndpointsOutput{} + output = &GetTriggersOutput{} req = c.newRequest(op, input, output) return } -// ListDevEndpoints API operation for AWS Glue. -// -// Retrieves the names of all DevEndpoint resources in this AWS account, or -// the resources with the specified tag. This operation allows you to see which -// resources are available in your account, and their names. +// GetTriggers API operation for AWS Glue. // -// This operation takes the optional Tags field which you can use as a filter -// on the response so that tagged resources can be retrieved as a group. If -// you choose to use tags filtering, only resources with the tag will be retrieved. +// Gets all the triggers associated with a job. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation ListDevEndpoints for usage and error information. +// API operation GetTriggers for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListDevEndpoints -func (c *Glue) ListDevEndpoints(input *ListDevEndpointsInput) (*ListDevEndpointsOutput, error) { - req, out := c.ListDevEndpointsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggers +func (c *Glue) GetTriggers(input *GetTriggersInput) (*GetTriggersOutput, error) { + req, out := c.GetTriggersRequest(input) return out, req.Send() } -// ListDevEndpointsWithContext is the same as ListDevEndpoints with the addition of +// GetTriggersWithContext is the same as GetTriggers with the addition of // the ability to pass a context and additional request options. // -// See ListDevEndpoints for details on how to use this API operation. +// See GetTriggers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) ListDevEndpointsWithContext(ctx aws.Context, input *ListDevEndpointsInput, opts ...request.Option) (*ListDevEndpointsOutput, error) { - req, out := c.ListDevEndpointsRequest(input) +func (c *Glue) GetTriggersWithContext(ctx aws.Context, input *GetTriggersInput, opts ...request.Option) (*GetTriggersOutput, error) { + req, out := c.GetTriggersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListDevEndpointsPages iterates over the pages of a ListDevEndpoints operation, +// GetTriggersPages iterates over the pages of a GetTriggers operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListDevEndpoints method for more information on how to use this operation. +// See GetTriggers method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListDevEndpoints operation. +// // Example iterating over at most 3 pages of a GetTriggers operation. // pageNum := 0 -// err := client.ListDevEndpointsPages(params, -// func(page *ListDevEndpointsOutput, lastPage bool) bool { +// err := client.GetTriggersPages(params, +// func(page *glue.GetTriggersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Glue) ListDevEndpointsPages(input *ListDevEndpointsInput, fn func(*ListDevEndpointsOutput, bool) bool) error { - return c.ListDevEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Glue) GetTriggersPages(input *GetTriggersInput, fn func(*GetTriggersOutput, bool) bool) error { + return c.GetTriggersPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListDevEndpointsPagesWithContext same as ListDevEndpointsPages except +// GetTriggersPagesWithContext same as GetTriggersPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) ListDevEndpointsPagesWithContext(ctx aws.Context, input *ListDevEndpointsInput, fn func(*ListDevEndpointsOutput, bool) bool, opts ...request.Option) error { +func (c *Glue) GetTriggersPagesWithContext(ctx aws.Context, input *GetTriggersInput, fn func(*GetTriggersOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListDevEndpointsInput + var inCpy *GetTriggersInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListDevEndpointsRequest(inCpy) + req, _ := c.GetTriggersRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDevEndpointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetTriggersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opListJobs = "ListJobs" +const opGetUserDefinedFunction = "GetUserDefinedFunction" -// ListJobsRequest generates a "aws/request.Request" representing the -// client's request for the ListJobs operation. The "output" return +// GetUserDefinedFunctionRequest generates a "aws/request.Request" representing the +// client's request for the GetUserDefinedFunction operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListJobs for more information on using the ListJobs +// See GetUserDefinedFunction for more information on using the GetUserDefinedFunction // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListJobsRequest method. -// req, resp := client.ListJobsRequest(params) +// // Example sending a request using the GetUserDefinedFunctionRequest method. +// req, resp := client.GetUserDefinedFunctionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListJobs -func (c *Glue) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunction +func (c *Glue) GetUserDefinedFunctionRequest(input *GetUserDefinedFunctionInput) (req *request.Request, output *GetUserDefinedFunctionOutput) { op := &request.Operation{ - Name: opListJobs, + Name: opGetUserDefinedFunction, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListJobsInput{} + input = &GetUserDefinedFunctionInput{} } - output = &ListJobsOutput{} + output = &GetUserDefinedFunctionOutput{} req = c.newRequest(op, input, output) return } -// ListJobs API operation for AWS Glue. -// -// Retrieves the names of all job resources in this AWS account, or the resources -// with the specified tag. This operation allows you to see which resources -// are available in your account, and their names. +// GetUserDefinedFunction API operation for AWS Glue. // -// This operation takes the optional Tags field which you can use as a filter -// on the response so that tagged resources can be retrieved as a group. If -// you choose to use tags filtering, only resources with the tag will be retrieved. +// Retrieves a specified function definition from the Data Catalog. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation ListJobs for usage and error information. +// API operation GetUserDefinedFunction for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListJobs -func (c *Glue) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { - req, out := c.ListJobsRequest(input) +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunction +func (c *Glue) GetUserDefinedFunction(input *GetUserDefinedFunctionInput) (*GetUserDefinedFunctionOutput, error) { + req, out := c.GetUserDefinedFunctionRequest(input) return out, req.Send() } -// ListJobsWithContext is the same as ListJobs with the addition of +// GetUserDefinedFunctionWithContext is the same as GetUserDefinedFunction with the addition of // the ability to pass a context and additional request options. // -// See ListJobs for details on how to use this API operation. +// See GetUserDefinedFunction for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) { - req, out := c.ListJobsRequest(input) +func (c *Glue) GetUserDefinedFunctionWithContext(ctx aws.Context, input *GetUserDefinedFunctionInput, opts ...request.Option) (*GetUserDefinedFunctionOutput, error) { + req, out := c.GetUserDefinedFunctionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListJobsPages iterates over the pages of a ListJobs operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opGetUserDefinedFunctions = "GetUserDefinedFunctions" + +// GetUserDefinedFunctionsRequest generates a "aws/request.Request" representing the +// client's request for the GetUserDefinedFunctions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListJobs method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListJobs operation. -// pageNum := 0 -// err := client.ListJobsPages(params, -// func(page *ListJobsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) ListJobsPages(input *ListJobsInput, fn func(*ListJobsOutput, bool) bool) error { - return c.ListJobsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListJobsPagesWithContext same as ListJobsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, fn func(*ListJobsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListJobsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListJobsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) - } - return p.Err() -} - -const opListTriggers = "ListTriggers" - -// ListTriggersRequest generates a "aws/request.Request" representing the -// client's request for the ListTriggers operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTriggers for more information on using the ListTriggers -// API call, and error handling. +// See GetUserDefinedFunctions for more information on using the GetUserDefinedFunctions +// API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTriggersRequest method. -// req, resp := client.ListTriggersRequest(params) +// // Example sending a request using the GetUserDefinedFunctionsRequest method. +// req, resp := client.GetUserDefinedFunctionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListTriggers -func (c *Glue) ListTriggersRequest(input *ListTriggersInput) (req *request.Request, output *ListTriggersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctions +func (c *Glue) GetUserDefinedFunctionsRequest(input *GetUserDefinedFunctionsInput) (req *request.Request, output *GetUserDefinedFunctionsOutput) { op := &request.Operation{ - Name: opListTriggers, + Name: opGetUserDefinedFunctions, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -7613,30 +8116,24 @@ func (c *Glue) ListTriggersRequest(input *ListTriggersInput) (req *request.Reque } if input == nil { - input = &ListTriggersInput{} + input = &GetUserDefinedFunctionsInput{} } - output = &ListTriggersOutput{} + output = &GetUserDefinedFunctionsOutput{} req = c.newRequest(op, input, output) return } -// ListTriggers API operation for AWS Glue. -// -// Retrieves the names of all trigger resources in this AWS account, or the -// resources with the specified tag. This operation allows you to see which -// resources are available in your account, and their names. +// GetUserDefinedFunctions API operation for AWS Glue. // -// This operation takes the optional Tags field which you can use as a filter -// on the response so that tagged resources can be retrieved as a group. If -// you choose to use tags filtering, only resources with the tag will be retrieved. +// Retrieves multiple function definitions from the Data Catalog. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation ListTriggers for usage and error information. +// API operation GetUserDefinedFunctions for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -7645,226 +8142,234 @@ func (c *Glue) ListTriggersRequest(input *ListTriggersInput) (req *request.Reque // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListTriggers -func (c *Glue) ListTriggers(input *ListTriggersInput) (*ListTriggersOutput, error) { - req, out := c.ListTriggersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetUserDefinedFunctions +func (c *Glue) GetUserDefinedFunctions(input *GetUserDefinedFunctionsInput) (*GetUserDefinedFunctionsOutput, error) { + req, out := c.GetUserDefinedFunctionsRequest(input) return out, req.Send() } -// ListTriggersWithContext is the same as ListTriggers with the addition of +// GetUserDefinedFunctionsWithContext is the same as GetUserDefinedFunctions with the addition of // the ability to pass a context and additional request options. // -// See ListTriggers for details on how to use this API operation. +// See GetUserDefinedFunctions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) ListTriggersWithContext(ctx aws.Context, input *ListTriggersInput, opts ...request.Option) (*ListTriggersOutput, error) { - req, out := c.ListTriggersRequest(input) +func (c *Glue) GetUserDefinedFunctionsWithContext(ctx aws.Context, input *GetUserDefinedFunctionsInput, opts ...request.Option) (*GetUserDefinedFunctionsOutput, error) { + req, out := c.GetUserDefinedFunctionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTriggersPages iterates over the pages of a ListTriggers operation, +// GetUserDefinedFunctionsPages iterates over the pages of a GetUserDefinedFunctions operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListTriggers method for more information on how to use this operation. +// See GetUserDefinedFunctions method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListTriggers operation. +// // Example iterating over at most 3 pages of a GetUserDefinedFunctions operation. // pageNum := 0 -// err := client.ListTriggersPages(params, -// func(page *ListTriggersOutput, lastPage bool) bool { +// err := client.GetUserDefinedFunctionsPages(params, +// func(page *glue.GetUserDefinedFunctionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Glue) ListTriggersPages(input *ListTriggersInput, fn func(*ListTriggersOutput, bool) bool) error { - return c.ListTriggersPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Glue) GetUserDefinedFunctionsPages(input *GetUserDefinedFunctionsInput, fn func(*GetUserDefinedFunctionsOutput, bool) bool) error { + return c.GetUserDefinedFunctionsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListTriggersPagesWithContext same as ListTriggersPages except +// GetUserDefinedFunctionsPagesWithContext same as GetUserDefinedFunctionsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) ListTriggersPagesWithContext(ctx aws.Context, input *ListTriggersInput, fn func(*ListTriggersOutput, bool) bool, opts ...request.Option) error { +func (c *Glue) GetUserDefinedFunctionsPagesWithContext(ctx aws.Context, input *GetUserDefinedFunctionsInput, fn func(*GetUserDefinedFunctionsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListTriggersInput + var inCpy *GetUserDefinedFunctionsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListTriggersRequest(inCpy) + req, _ := c.GetUserDefinedFunctionsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTriggersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetUserDefinedFunctionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } -const opPutDataCatalogEncryptionSettings = "PutDataCatalogEncryptionSettings" +const opGetWorkflow = "GetWorkflow" -// PutDataCatalogEncryptionSettingsRequest generates a "aws/request.Request" representing the -// client's request for the PutDataCatalogEncryptionSettings operation. The "output" return +// GetWorkflowRequest generates a "aws/request.Request" representing the +// client's request for the GetWorkflow operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutDataCatalogEncryptionSettings for more information on using the PutDataCatalogEncryptionSettings +// See GetWorkflow for more information on using the GetWorkflow // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutDataCatalogEncryptionSettingsRequest method. -// req, resp := client.PutDataCatalogEncryptionSettingsRequest(params) +// // Example sending a request using the GetWorkflowRequest method. +// req, resp := client.GetWorkflowRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutDataCatalogEncryptionSettings -func (c *Glue) PutDataCatalogEncryptionSettingsRequest(input *PutDataCatalogEncryptionSettingsInput) (req *request.Request, output *PutDataCatalogEncryptionSettingsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetWorkflow +func (c *Glue) GetWorkflowRequest(input *GetWorkflowInput) (req *request.Request, output *GetWorkflowOutput) { op := &request.Operation{ - Name: opPutDataCatalogEncryptionSettings, + Name: opGetWorkflow, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutDataCatalogEncryptionSettingsInput{} + input = &GetWorkflowInput{} } - output = &PutDataCatalogEncryptionSettingsOutput{} + output = &GetWorkflowOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutDataCatalogEncryptionSettings API operation for AWS Glue. +// GetWorkflow API operation for AWS Glue. // -// Sets the security configuration for a specified catalog. After the configuration -// has been set, the specified encryption is applied to every catalog write -// thereafter. +// Retrieves resource metadata for a workflow. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation PutDataCatalogEncryptionSettings for usage and error information. +// API operation GetWorkflow for usage and error information. // // Returned Error Codes: -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutDataCatalogEncryptionSettings -func (c *Glue) PutDataCatalogEncryptionSettings(input *PutDataCatalogEncryptionSettingsInput) (*PutDataCatalogEncryptionSettingsOutput, error) { - req, out := c.PutDataCatalogEncryptionSettingsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetWorkflow +func (c *Glue) GetWorkflow(input *GetWorkflowInput) (*GetWorkflowOutput, error) { + req, out := c.GetWorkflowRequest(input) return out, req.Send() } -// PutDataCatalogEncryptionSettingsWithContext is the same as PutDataCatalogEncryptionSettings with the addition of +// GetWorkflowWithContext is the same as GetWorkflow with the addition of // the ability to pass a context and additional request options. // -// See PutDataCatalogEncryptionSettings for details on how to use this API operation. +// See GetWorkflow for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) PutDataCatalogEncryptionSettingsWithContext(ctx aws.Context, input *PutDataCatalogEncryptionSettingsInput, opts ...request.Option) (*PutDataCatalogEncryptionSettingsOutput, error) { - req, out := c.PutDataCatalogEncryptionSettingsRequest(input) +func (c *Glue) GetWorkflowWithContext(ctx aws.Context, input *GetWorkflowInput, opts ...request.Option) (*GetWorkflowOutput, error) { + req, out := c.GetWorkflowRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutResourcePolicy = "PutResourcePolicy" +const opGetWorkflowRun = "GetWorkflowRun" -// PutResourcePolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutResourcePolicy operation. The "output" return +// GetWorkflowRunRequest generates a "aws/request.Request" representing the +// client's request for the GetWorkflowRun operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutResourcePolicy for more information on using the PutResourcePolicy +// See GetWorkflowRun for more information on using the GetWorkflowRun // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutResourcePolicyRequest method. -// req, resp := client.PutResourcePolicyRequest(params) +// // Example sending a request using the GetWorkflowRunRequest method. +// req, resp := client.GetWorkflowRunRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutResourcePolicy -func (c *Glue) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *request.Request, output *PutResourcePolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetWorkflowRun +func (c *Glue) GetWorkflowRunRequest(input *GetWorkflowRunInput) (req *request.Request, output *GetWorkflowRunOutput) { op := &request.Operation{ - Name: opPutResourcePolicy, + Name: opGetWorkflowRun, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutResourcePolicyInput{} + input = &GetWorkflowRunInput{} } - output = &PutResourcePolicyOutput{} + output = &GetWorkflowRunOutput{} req = c.newRequest(op, input, output) return } -// PutResourcePolicy API operation for AWS Glue. +// GetWorkflowRun API operation for AWS Glue. // -// Sets the Data Catalog resource policy for access control. +// Retrieves the metadata for a given workflow run. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation PutResourcePolicy for usage and error information. +// API operation GetWorkflowRun for usage and error information. // // Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // @@ -7874,722 +8379,1002 @@ func (c *Glue) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *req // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeConditionCheckFailureException "ConditionCheckFailureException" -// A specified condition was not satisfied. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutResourcePolicy -func (c *Glue) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) { - req, out := c.PutResourcePolicyRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetWorkflowRun +func (c *Glue) GetWorkflowRun(input *GetWorkflowRunInput) (*GetWorkflowRunOutput, error) { + req, out := c.GetWorkflowRunRequest(input) return out, req.Send() } -// PutResourcePolicyWithContext is the same as PutResourcePolicy with the addition of +// GetWorkflowRunWithContext is the same as GetWorkflowRun with the addition of // the ability to pass a context and additional request options. // -// See PutResourcePolicy for details on how to use this API operation. +// See GetWorkflowRun for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) { - req, out := c.PutResourcePolicyRequest(input) +func (c *Glue) GetWorkflowRunWithContext(ctx aws.Context, input *GetWorkflowRunInput, opts ...request.Option) (*GetWorkflowRunOutput, error) { + req, out := c.GetWorkflowRunRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opResetJobBookmark = "ResetJobBookmark" +const opGetWorkflowRunProperties = "GetWorkflowRunProperties" -// ResetJobBookmarkRequest generates a "aws/request.Request" representing the -// client's request for the ResetJobBookmark operation. The "output" return +// GetWorkflowRunPropertiesRequest generates a "aws/request.Request" representing the +// client's request for the GetWorkflowRunProperties operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ResetJobBookmark for more information on using the ResetJobBookmark +// See GetWorkflowRunProperties for more information on using the GetWorkflowRunProperties // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ResetJobBookmarkRequest method. -// req, resp := client.ResetJobBookmarkRequest(params) +// // Example sending a request using the GetWorkflowRunPropertiesRequest method. +// req, resp := client.GetWorkflowRunPropertiesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmark -func (c *Glue) ResetJobBookmarkRequest(input *ResetJobBookmarkInput) (req *request.Request, output *ResetJobBookmarkOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetWorkflowRunProperties +func (c *Glue) GetWorkflowRunPropertiesRequest(input *GetWorkflowRunPropertiesInput) (req *request.Request, output *GetWorkflowRunPropertiesOutput) { op := &request.Operation{ - Name: opResetJobBookmark, + Name: opGetWorkflowRunProperties, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ResetJobBookmarkInput{} + input = &GetWorkflowRunPropertiesInput{} } - output = &ResetJobBookmarkOutput{} + output = &GetWorkflowRunPropertiesOutput{} req = c.newRequest(op, input, output) return } -// ResetJobBookmark API operation for AWS Glue. +// GetWorkflowRunProperties API operation for AWS Glue. // -// Resets a bookmark entry. +// Retrieves the workflow run properties which were set during the run. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation ResetJobBookmark for usage and error information. +// API operation GetWorkflowRunProperties for usage and error information. // // Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmark -func (c *Glue) ResetJobBookmark(input *ResetJobBookmarkInput) (*ResetJobBookmarkOutput, error) { - req, out := c.ResetJobBookmarkRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetWorkflowRunProperties +func (c *Glue) GetWorkflowRunProperties(input *GetWorkflowRunPropertiesInput) (*GetWorkflowRunPropertiesOutput, error) { + req, out := c.GetWorkflowRunPropertiesRequest(input) return out, req.Send() } -// ResetJobBookmarkWithContext is the same as ResetJobBookmark with the addition of +// GetWorkflowRunPropertiesWithContext is the same as GetWorkflowRunProperties with the addition of // the ability to pass a context and additional request options. // -// See ResetJobBookmark for details on how to use this API operation. +// See GetWorkflowRunProperties for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) ResetJobBookmarkWithContext(ctx aws.Context, input *ResetJobBookmarkInput, opts ...request.Option) (*ResetJobBookmarkOutput, error) { - req, out := c.ResetJobBookmarkRequest(input) +func (c *Glue) GetWorkflowRunPropertiesWithContext(ctx aws.Context, input *GetWorkflowRunPropertiesInput, opts ...request.Option) (*GetWorkflowRunPropertiesOutput, error) { + req, out := c.GetWorkflowRunPropertiesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartCrawler = "StartCrawler" +const opGetWorkflowRuns = "GetWorkflowRuns" -// StartCrawlerRequest generates a "aws/request.Request" representing the -// client's request for the StartCrawler operation. The "output" return +// GetWorkflowRunsRequest generates a "aws/request.Request" representing the +// client's request for the GetWorkflowRuns operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartCrawler for more information on using the StartCrawler +// See GetWorkflowRuns for more information on using the GetWorkflowRuns // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartCrawlerRequest method. -// req, resp := client.StartCrawlerRequest(params) +// // Example sending a request using the GetWorkflowRunsRequest method. +// req, resp := client.GetWorkflowRunsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawler -func (c *Glue) StartCrawlerRequest(input *StartCrawlerInput) (req *request.Request, output *StartCrawlerOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetWorkflowRuns +func (c *Glue) GetWorkflowRunsRequest(input *GetWorkflowRunsInput) (req *request.Request, output *GetWorkflowRunsOutput) { op := &request.Operation{ - Name: opStartCrawler, + Name: opGetWorkflowRuns, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &StartCrawlerInput{} + input = &GetWorkflowRunsInput{} } - output = &StartCrawlerOutput{} + output = &GetWorkflowRunsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// StartCrawler API operation for AWS Glue. +// GetWorkflowRuns API operation for AWS Glue. // -// Starts a crawl using the specified crawler, regardless of what is scheduled. -// If the crawler is already running, returns a CrawlerRunningException (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-exceptions.html#aws-glue-api-exceptions-CrawlerRunningException). +// Retrieves metadata for all runs of a given workflow. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation StartCrawler for usage and error information. +// API operation GetWorkflowRuns for usage and error information. // // Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeCrawlerRunningException "CrawlerRunningException" -// The operation cannot be performed because the crawler is already running. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawler -func (c *Glue) StartCrawler(input *StartCrawlerInput) (*StartCrawlerOutput, error) { - req, out := c.StartCrawlerRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetWorkflowRuns +func (c *Glue) GetWorkflowRuns(input *GetWorkflowRunsInput) (*GetWorkflowRunsOutput, error) { + req, out := c.GetWorkflowRunsRequest(input) return out, req.Send() } -// StartCrawlerWithContext is the same as StartCrawler with the addition of +// GetWorkflowRunsWithContext is the same as GetWorkflowRuns with the addition of // the ability to pass a context and additional request options. // -// See StartCrawler for details on how to use this API operation. +// See GetWorkflowRuns for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) StartCrawlerWithContext(ctx aws.Context, input *StartCrawlerInput, opts ...request.Option) (*StartCrawlerOutput, error) { - req, out := c.StartCrawlerRequest(input) +func (c *Glue) GetWorkflowRunsWithContext(ctx aws.Context, input *GetWorkflowRunsInput, opts ...request.Option) (*GetWorkflowRunsOutput, error) { + req, out := c.GetWorkflowRunsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartCrawlerSchedule = "StartCrawlerSchedule" - -// StartCrawlerScheduleRequest generates a "aws/request.Request" representing the -// client's request for the StartCrawlerSchedule operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. +// GetWorkflowRunsPages iterates over the pages of a GetWorkflowRuns operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// See GetWorkflowRuns method for more information on how to use this operation. // -// See StartCrawlerSchedule for more information on using the StartCrawlerSchedule -// API call, and error handling. +// Note: This operation can generate multiple requests to a service. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// // Example iterating over at most 3 pages of a GetWorkflowRuns operation. +// pageNum := 0 +// err := client.GetWorkflowRunsPages(params, +// func(page *glue.GetWorkflowRunsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // +func (c *Glue) GetWorkflowRunsPages(input *GetWorkflowRunsInput, fn func(*GetWorkflowRunsOutput, bool) bool) error { + return c.GetWorkflowRunsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetWorkflowRunsPagesWithContext same as GetWorkflowRunsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetWorkflowRunsPagesWithContext(ctx aws.Context, input *GetWorkflowRunsInput, fn func(*GetWorkflowRunsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetWorkflowRunsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetWorkflowRunsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetWorkflowRunsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opImportCatalogToGlue = "ImportCatalogToGlue" + +// ImportCatalogToGlueRequest generates a "aws/request.Request" representing the +// client's request for the ImportCatalogToGlue operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportCatalogToGlue for more information on using the ImportCatalogToGlue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the StartCrawlerScheduleRequest method. -// req, resp := client.StartCrawlerScheduleRequest(params) +// +// // Example sending a request using the ImportCatalogToGlueRequest method. +// req, resp := client.ImportCatalogToGlueRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerSchedule -func (c *Glue) StartCrawlerScheduleRequest(input *StartCrawlerScheduleInput) (req *request.Request, output *StartCrawlerScheduleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlue +func (c *Glue) ImportCatalogToGlueRequest(input *ImportCatalogToGlueInput) (req *request.Request, output *ImportCatalogToGlueOutput) { op := &request.Operation{ - Name: opStartCrawlerSchedule, + Name: opImportCatalogToGlue, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartCrawlerScheduleInput{} + input = &ImportCatalogToGlueInput{} } - output = &StartCrawlerScheduleOutput{} + output = &ImportCatalogToGlueOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// StartCrawlerSchedule API operation for AWS Glue. +// ImportCatalogToGlue API operation for AWS Glue. // -// Changes the schedule state of the specified crawler to SCHEDULED, unless -// the crawler is already running or the schedule state is already SCHEDULED. +// Imports an existing Amazon Athena Data Catalog to AWS Glue // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation StartCrawlerSchedule for usage and error information. +// API operation ImportCatalogToGlue for usage and error information. // // Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeSchedulerRunningException "SchedulerRunningException" -// The specified scheduler is already running. -// -// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" -// The specified scheduler is transitioning. -// -// * ErrCodeNoScheduleException "NoScheduleException" -// There is no applicable schedule. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerSchedule -func (c *Glue) StartCrawlerSchedule(input *StartCrawlerScheduleInput) (*StartCrawlerScheduleOutput, error) { - req, out := c.StartCrawlerScheduleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlue +func (c *Glue) ImportCatalogToGlue(input *ImportCatalogToGlueInput) (*ImportCatalogToGlueOutput, error) { + req, out := c.ImportCatalogToGlueRequest(input) return out, req.Send() } -// StartCrawlerScheduleWithContext is the same as StartCrawlerSchedule with the addition of +// ImportCatalogToGlueWithContext is the same as ImportCatalogToGlue with the addition of // the ability to pass a context and additional request options. // -// See StartCrawlerSchedule for details on how to use this API operation. +// See ImportCatalogToGlue for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) StartCrawlerScheduleWithContext(ctx aws.Context, input *StartCrawlerScheduleInput, opts ...request.Option) (*StartCrawlerScheduleOutput, error) { - req, out := c.StartCrawlerScheduleRequest(input) +func (c *Glue) ImportCatalogToGlueWithContext(ctx aws.Context, input *ImportCatalogToGlueInput, opts ...request.Option) (*ImportCatalogToGlueOutput, error) { + req, out := c.ImportCatalogToGlueRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartJobRun = "StartJobRun" +const opListCrawlers = "ListCrawlers" -// StartJobRunRequest generates a "aws/request.Request" representing the -// client's request for the StartJobRun operation. The "output" return +// ListCrawlersRequest generates a "aws/request.Request" representing the +// client's request for the ListCrawlers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartJobRun for more information on using the StartJobRun +// See ListCrawlers for more information on using the ListCrawlers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartJobRunRequest method. -// req, resp := client.StartJobRunRequest(params) +// // Example sending a request using the ListCrawlersRequest method. +// req, resp := client.ListCrawlersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartJobRun -func (c *Glue) StartJobRunRequest(input *StartJobRunInput) (req *request.Request, output *StartJobRunOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawlers +func (c *Glue) ListCrawlersRequest(input *ListCrawlersInput) (req *request.Request, output *ListCrawlersOutput) { op := &request.Operation{ - Name: opStartJobRun, + Name: opListCrawlers, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &StartJobRunInput{} + input = &ListCrawlersInput{} } - output = &StartJobRunOutput{} + output = &ListCrawlersOutput{} req = c.newRequest(op, input, output) return } -// StartJobRun API operation for AWS Glue. +// ListCrawlers API operation for AWS Glue. // -// Starts a job run using a job definition. +// Retrieves the names of all crawler resources in this AWS account, or the +// resources with the specified tag. This operation allows you to see which +// resources are available in your account, and their names. +// +// This operation takes the optional Tags field, which you can use as a filter +// on the response so that tagged resources can be retrieved as a group. If +// you choose to use tags filtering, only resources with the tag are retrieved. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation StartJobRun for usage and error information. +// API operation ListCrawlers for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// * ErrCodeConcurrentRunsExceededException "ConcurrentRunsExceededException" -// Too many jobs are being run concurrently. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartJobRun -func (c *Glue) StartJobRun(input *StartJobRunInput) (*StartJobRunOutput, error) { - req, out := c.StartJobRunRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawlers +func (c *Glue) ListCrawlers(input *ListCrawlersInput) (*ListCrawlersOutput, error) { + req, out := c.ListCrawlersRequest(input) return out, req.Send() } -// StartJobRunWithContext is the same as StartJobRun with the addition of +// ListCrawlersWithContext is the same as ListCrawlers with the addition of // the ability to pass a context and additional request options. // -// See StartJobRun for details on how to use this API operation. +// See ListCrawlers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) StartJobRunWithContext(ctx aws.Context, input *StartJobRunInput, opts ...request.Option) (*StartJobRunOutput, error) { - req, out := c.StartJobRunRequest(input) +func (c *Glue) ListCrawlersWithContext(ctx aws.Context, input *ListCrawlersInput, opts ...request.Option) (*ListCrawlersOutput, error) { + req, out := c.ListCrawlersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartTrigger = "StartTrigger" +// ListCrawlersPages iterates over the pages of a ListCrawlers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCrawlers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCrawlers operation. +// pageNum := 0 +// err := client.ListCrawlersPages(params, +// func(page *glue.ListCrawlersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glue) ListCrawlersPages(input *ListCrawlersInput, fn func(*ListCrawlersOutput, bool) bool) error { + return c.ListCrawlersPagesWithContext(aws.BackgroundContext(), input, fn) +} -// StartTriggerRequest generates a "aws/request.Request" representing the -// client's request for the StartTrigger operation. The "output" return +// ListCrawlersPagesWithContext same as ListCrawlersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) ListCrawlersPagesWithContext(ctx aws.Context, input *ListCrawlersInput, fn func(*ListCrawlersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCrawlersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCrawlersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCrawlersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDevEndpoints = "ListDevEndpoints" + +// ListDevEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the ListDevEndpoints operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartTrigger for more information on using the StartTrigger +// See ListDevEndpoints for more information on using the ListDevEndpoints // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartTriggerRequest method. -// req, resp := client.StartTriggerRequest(params) +// // Example sending a request using the ListDevEndpointsRequest method. +// req, resp := client.ListDevEndpointsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartTrigger -func (c *Glue) StartTriggerRequest(input *StartTriggerInput) (req *request.Request, output *StartTriggerOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListDevEndpoints +func (c *Glue) ListDevEndpointsRequest(input *ListDevEndpointsInput) (req *request.Request, output *ListDevEndpointsOutput) { op := &request.Operation{ - Name: opStartTrigger, + Name: opListDevEndpoints, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &StartTriggerInput{} + input = &ListDevEndpointsInput{} } - output = &StartTriggerOutput{} + output = &ListDevEndpointsOutput{} req = c.newRequest(op, input, output) return } -// StartTrigger API operation for AWS Glue. +// ListDevEndpoints API operation for AWS Glue. // -// Starts an existing trigger. See Triggering Jobs (http://docs.aws.amazon.com/glue/latest/dg/trigger-job.html) -// for information about how different types of trigger are started. +// Retrieves the names of all DevEndpoint resources in this AWS account, or +// the resources with the specified tag. This operation allows you to see which +// resources are available in your account, and their names. +// +// This operation takes the optional Tags field, which you can use as a filter +// on the response so that tagged resources can be retrieved as a group. If +// you choose to use tags filtering, only resources with the tag are retrieved. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation StartTrigger for usage and error information. +// API operation ListDevEndpoints for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. -// -// * ErrCodeConcurrentRunsExceededException "ConcurrentRunsExceededException" -// Too many jobs are being run concurrently. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartTrigger -func (c *Glue) StartTrigger(input *StartTriggerInput) (*StartTriggerOutput, error) { - req, out := c.StartTriggerRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListDevEndpoints +func (c *Glue) ListDevEndpoints(input *ListDevEndpointsInput) (*ListDevEndpointsOutput, error) { + req, out := c.ListDevEndpointsRequest(input) return out, req.Send() } -// StartTriggerWithContext is the same as StartTrigger with the addition of +// ListDevEndpointsWithContext is the same as ListDevEndpoints with the addition of // the ability to pass a context and additional request options. // -// See StartTrigger for details on how to use this API operation. +// See ListDevEndpoints for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) StartTriggerWithContext(ctx aws.Context, input *StartTriggerInput, opts ...request.Option) (*StartTriggerOutput, error) { - req, out := c.StartTriggerRequest(input) +func (c *Glue) ListDevEndpointsWithContext(ctx aws.Context, input *ListDevEndpointsInput, opts ...request.Option) (*ListDevEndpointsOutput, error) { + req, out := c.ListDevEndpointsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopCrawler = "StopCrawler" +// ListDevEndpointsPages iterates over the pages of a ListDevEndpoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDevEndpoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDevEndpoints operation. +// pageNum := 0 +// err := client.ListDevEndpointsPages(params, +// func(page *glue.ListDevEndpointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glue) ListDevEndpointsPages(input *ListDevEndpointsInput, fn func(*ListDevEndpointsOutput, bool) bool) error { + return c.ListDevEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// StopCrawlerRequest generates a "aws/request.Request" representing the -// client's request for the StopCrawler operation. The "output" return +// ListDevEndpointsPagesWithContext same as ListDevEndpointsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) ListDevEndpointsPagesWithContext(ctx aws.Context, input *ListDevEndpointsInput, fn func(*ListDevEndpointsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDevEndpointsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDevEndpointsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDevEndpointsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListJobs = "ListJobs" + +// ListJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopCrawler for more information on using the StopCrawler +// See ListJobs for more information on using the ListJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopCrawlerRequest method. -// req, resp := client.StopCrawlerRequest(params) +// // Example sending a request using the ListJobsRequest method. +// req, resp := client.ListJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawler -func (c *Glue) StopCrawlerRequest(input *StopCrawlerInput) (req *request.Request, output *StopCrawlerOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListJobs +func (c *Glue) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { op := &request.Operation{ - Name: opStopCrawler, + Name: opListJobs, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &StopCrawlerInput{} + input = &ListJobsInput{} } - output = &StopCrawlerOutput{} + output = &ListJobsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// StopCrawler API operation for AWS Glue. +// ListJobs API operation for AWS Glue. // -// If the specified crawler is running, stops the crawl. +// Retrieves the names of all job resources in this AWS account, or the resources +// with the specified tag. This operation allows you to see which resources +// are available in your account, and their names. +// +// This operation takes the optional Tags field, which you can use as a filter +// on the response so that tagged resources can be retrieved as a group. If +// you choose to use tags filtering, only resources with the tag are retrieved. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation StopCrawler for usage and error information. +// API operation ListJobs for usage and error information. // // Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeCrawlerNotRunningException "CrawlerNotRunningException" -// The specified crawler is not running. -// -// * ErrCodeCrawlerStoppingException "CrawlerStoppingException" -// The specified crawler is stopping. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawler -func (c *Glue) StopCrawler(input *StopCrawlerInput) (*StopCrawlerOutput, error) { - req, out := c.StopCrawlerRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListJobs +func (c *Glue) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) return out, req.Send() } -// StopCrawlerWithContext is the same as StopCrawler with the addition of +// ListJobsWithContext is the same as ListJobs with the addition of // the ability to pass a context and additional request options. // -// See StopCrawler for details on how to use this API operation. +// See ListJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) StopCrawlerWithContext(ctx aws.Context, input *StopCrawlerInput, opts ...request.Option) (*StopCrawlerOutput, error) { - req, out := c.StopCrawlerRequest(input) +func (c *Glue) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopCrawlerSchedule = "StopCrawlerSchedule" +// ListJobsPages iterates over the pages of a ListJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobs operation. +// pageNum := 0 +// err := client.ListJobsPages(params, +// func(page *glue.ListJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glue) ListJobsPages(input *ListJobsInput, fn func(*ListJobsOutput, bool) bool) error { + return c.ListJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// StopCrawlerScheduleRequest generates a "aws/request.Request" representing the -// client's request for the StopCrawlerSchedule operation. The "output" return +// ListJobsPagesWithContext same as ListJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, fn func(*ListJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTriggers = "ListTriggers" + +// ListTriggersRequest generates a "aws/request.Request" representing the +// client's request for the ListTriggers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopCrawlerSchedule for more information on using the StopCrawlerSchedule +// See ListTriggers for more information on using the ListTriggers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopCrawlerScheduleRequest method. -// req, resp := client.StopCrawlerScheduleRequest(params) +// // Example sending a request using the ListTriggersRequest method. +// req, resp := client.ListTriggersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerSchedule -func (c *Glue) StopCrawlerScheduleRequest(input *StopCrawlerScheduleInput) (req *request.Request, output *StopCrawlerScheduleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListTriggers +func (c *Glue) ListTriggersRequest(input *ListTriggersInput) (req *request.Request, output *ListTriggersOutput) { op := &request.Operation{ - Name: opStopCrawlerSchedule, + Name: opListTriggers, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &StopCrawlerScheduleInput{} + input = &ListTriggersInput{} } - output = &StopCrawlerScheduleOutput{} + output = &ListTriggersOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// StopCrawlerSchedule API operation for AWS Glue. +// ListTriggers API operation for AWS Glue. // -// Sets the schedule state of the specified crawler to NOT_SCHEDULED, but does -// not stop the crawler if it is already running. +// Retrieves the names of all trigger resources in this AWS account, or the +// resources with the specified tag. This operation allows you to see which +// resources are available in your account, and their names. +// +// This operation takes the optional Tags field, which you can use as a filter +// on the response so that tagged resources can be retrieved as a group. If +// you choose to use tags filtering, only resources with the tag are retrieved. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation StopCrawlerSchedule for usage and error information. +// API operation ListTriggers for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeSchedulerNotRunningException "SchedulerNotRunningException" -// The specified scheduler is not running. +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. // -// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" -// The specified scheduler is transitioning. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerSchedule -func (c *Glue) StopCrawlerSchedule(input *StopCrawlerScheduleInput) (*StopCrawlerScheduleOutput, error) { - req, out := c.StopCrawlerScheduleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListTriggers +func (c *Glue) ListTriggers(input *ListTriggersInput) (*ListTriggersOutput, error) { + req, out := c.ListTriggersRequest(input) return out, req.Send() } -// StopCrawlerScheduleWithContext is the same as StopCrawlerSchedule with the addition of +// ListTriggersWithContext is the same as ListTriggers with the addition of // the ability to pass a context and additional request options. // -// See StopCrawlerSchedule for details on how to use this API operation. +// See ListTriggers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) StopCrawlerScheduleWithContext(ctx aws.Context, input *StopCrawlerScheduleInput, opts ...request.Option) (*StopCrawlerScheduleOutput, error) { - req, out := c.StopCrawlerScheduleRequest(input) +func (c *Glue) ListTriggersWithContext(ctx aws.Context, input *ListTriggersInput, opts ...request.Option) (*ListTriggersOutput, error) { + req, out := c.ListTriggersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopTrigger = "StopTrigger" - -// StopTriggerRequest generates a "aws/request.Request" representing the -// client's request for the StopTrigger operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// ListTriggersPages iterates over the pages of a ListTriggers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// See StopTrigger for more information on using the StopTrigger -// API call, and error handling. +// See ListTriggers method for more information on how to use this operation. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// Note: This operation can generate multiple requests to a service. // +// // Example iterating over at most 3 pages of a ListTriggers operation. +// pageNum := 0 +// err := client.ListTriggersPages(params, +// func(page *glue.ListTriggersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// // Example sending a request using the StopTriggerRequest method. -// req, resp := client.StopTriggerRequest(params) +func (c *Glue) ListTriggersPages(input *ListTriggersInput, fn func(*ListTriggersOutput, bool) bool) error { + return c.ListTriggersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTriggersPagesWithContext same as ListTriggersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) ListTriggersPagesWithContext(ctx aws.Context, input *ListTriggersInput, fn func(*ListTriggersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTriggersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTriggersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTriggersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListWorkflows = "ListWorkflows" + +// ListWorkflowsRequest generates a "aws/request.Request" representing the +// client's request for the ListWorkflows operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListWorkflows for more information on using the ListWorkflows +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListWorkflowsRequest method. +// req, resp := client.ListWorkflowsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopTrigger -func (c *Glue) StopTriggerRequest(input *StopTriggerInput) (req *request.Request, output *StopTriggerOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListWorkflows +func (c *Glue) ListWorkflowsRequest(input *ListWorkflowsInput) (req *request.Request, output *ListWorkflowsOutput) { op := &request.Operation{ - Name: opStopTrigger, + Name: opListWorkflows, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &StopTriggerInput{} + input = &ListWorkflowsInput{} } - output = &StopTriggerOutput{} + output = &ListWorkflowsOutput{} req = c.newRequest(op, input, output) return } -// StopTrigger API operation for AWS Glue. +// ListWorkflows API operation for AWS Glue. // -// Stops a specified trigger. +// Lists names of workflows created in the account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation StopTrigger for usage and error information. +// API operation ListWorkflows for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" @@ -8598,185 +9383,227 @@ func (c *Glue) StopTriggerRequest(input *StopTriggerInput) (req *request.Request // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Two processes are trying to modify a resource simultaneously. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopTrigger -func (c *Glue) StopTrigger(input *StopTriggerInput) (*StopTriggerOutput, error) { - req, out := c.StopTriggerRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListWorkflows +func (c *Glue) ListWorkflows(input *ListWorkflowsInput) (*ListWorkflowsOutput, error) { + req, out := c.ListWorkflowsRequest(input) return out, req.Send() } -// StopTriggerWithContext is the same as StopTrigger with the addition of +// ListWorkflowsWithContext is the same as ListWorkflows with the addition of // the ability to pass a context and additional request options. // -// See StopTrigger for details on how to use this API operation. +// See ListWorkflows for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) StopTriggerWithContext(ctx aws.Context, input *StopTriggerInput, opts ...request.Option) (*StopTriggerOutput, error) { - req, out := c.StopTriggerRequest(input) +func (c *Glue) ListWorkflowsWithContext(ctx aws.Context, input *ListWorkflowsInput, opts ...request.Option) (*ListWorkflowsOutput, error) { + req, out := c.ListWorkflowsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +// ListWorkflowsPages iterates over the pages of a ListWorkflows operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListWorkflows method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListWorkflows operation. +// pageNum := 0 +// err := client.ListWorkflowsPages(params, +// func(page *glue.ListWorkflowsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glue) ListWorkflowsPages(input *ListWorkflowsInput, fn func(*ListWorkflowsOutput, bool) bool) error { + return c.ListWorkflowsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// ListWorkflowsPagesWithContext same as ListWorkflowsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) ListWorkflowsPagesWithContext(ctx aws.Context, input *ListWorkflowsInput, fn func(*ListWorkflowsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListWorkflowsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListWorkflowsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListWorkflowsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutDataCatalogEncryptionSettings = "PutDataCatalogEncryptionSettings" + +// PutDataCatalogEncryptionSettingsRequest generates a "aws/request.Request" representing the +// client's request for the PutDataCatalogEncryptionSettings operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See PutDataCatalogEncryptionSettings for more information on using the PutDataCatalogEncryptionSettings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the PutDataCatalogEncryptionSettingsRequest method. +// req, resp := client.PutDataCatalogEncryptionSettingsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TagResource -func (c *Glue) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutDataCatalogEncryptionSettings +func (c *Glue) PutDataCatalogEncryptionSettingsRequest(input *PutDataCatalogEncryptionSettingsInput) (req *request.Request, output *PutDataCatalogEncryptionSettingsOutput) { op := &request.Operation{ - Name: opTagResource, + Name: opPutDataCatalogEncryptionSettings, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TagResourceInput{} + input = &PutDataCatalogEncryptionSettingsInput{} } - output = &TagResourceOutput{} + output = &PutDataCatalogEncryptionSettingsOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for AWS Glue. +// PutDataCatalogEncryptionSettings API operation for AWS Glue. // -// Adds tags to a resource. A tag is a label you can assign to an AWS resource. -// In AWS Glue, you can tag only certain resources. For information about what -// resources you can tag, see AWS Tags in AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html). +// Sets the security configuration for a specified catalog. After the configuration +// has been set, the specified encryption is applied to every catalog write +// thereafter. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation TagResource for usage and error information. +// API operation PutDataCatalogEncryptionSettings for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TagResource -func (c *Glue) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutDataCatalogEncryptionSettings +func (c *Glue) PutDataCatalogEncryptionSettings(input *PutDataCatalogEncryptionSettingsInput) (*PutDataCatalogEncryptionSettingsOutput, error) { + req, out := c.PutDataCatalogEncryptionSettingsRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// PutDataCatalogEncryptionSettingsWithContext is the same as PutDataCatalogEncryptionSettings with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See PutDataCatalogEncryptionSettings for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *Glue) PutDataCatalogEncryptionSettingsWithContext(ctx aws.Context, input *PutDataCatalogEncryptionSettingsInput, opts ...request.Option) (*PutDataCatalogEncryptionSettingsOutput, error) { + req, out := c.PutDataCatalogEncryptionSettingsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +const opPutResourcePolicy = "PutResourcePolicy" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// PutResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutResourcePolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See PutResourcePolicy for more information on using the PutResourcePolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the PutResourcePolicyRequest method. +// req, resp := client.PutResourcePolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UntagResource -func (c *Glue) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutResourcePolicy +func (c *Glue) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *request.Request, output *PutResourcePolicyOutput) { op := &request.Operation{ - Name: opUntagResource, + Name: opPutResourcePolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UntagResourceInput{} + input = &PutResourcePolicyInput{} } - output = &UntagResourceOutput{} + output = &PutResourcePolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for AWS Glue. +// PutResourcePolicy API operation for AWS Glue. // -// Removes tags from a resource. +// Sets the Data Catalog resource policy for access control. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UntagResource for usage and error information. +// API operation PutResourcePolicy for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist // // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. @@ -8784,818 +9611,919 @@ func (c *Glue) UntagResourceRequest(input *UntagResourceInput) (req *request.Req // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UntagResource -func (c *Glue) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// * ErrCodeConditionCheckFailureException "ConditionCheckFailureException" +// A specified condition was not satisfied. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutResourcePolicy +func (c *Glue) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// PutResourcePolicyWithContext is the same as PutResourcePolicy with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See PutResourcePolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *Glue) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateClassifier = "UpdateClassifier" +const opPutWorkflowRunProperties = "PutWorkflowRunProperties" -// UpdateClassifierRequest generates a "aws/request.Request" representing the -// client's request for the UpdateClassifier operation. The "output" return +// PutWorkflowRunPropertiesRequest generates a "aws/request.Request" representing the +// client's request for the PutWorkflowRunProperties operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateClassifier for more information on using the UpdateClassifier +// See PutWorkflowRunProperties for more information on using the PutWorkflowRunProperties // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateClassifierRequest method. -// req, resp := client.UpdateClassifierRequest(params) +// // Example sending a request using the PutWorkflowRunPropertiesRequest method. +// req, resp := client.PutWorkflowRunPropertiesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateClassifier -func (c *Glue) UpdateClassifierRequest(input *UpdateClassifierInput) (req *request.Request, output *UpdateClassifierOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutWorkflowRunProperties +func (c *Glue) PutWorkflowRunPropertiesRequest(input *PutWorkflowRunPropertiesInput) (req *request.Request, output *PutWorkflowRunPropertiesOutput) { op := &request.Operation{ - Name: opUpdateClassifier, + Name: opPutWorkflowRunProperties, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateClassifierInput{} + input = &PutWorkflowRunPropertiesInput{} } - output = &UpdateClassifierOutput{} + output = &PutWorkflowRunPropertiesOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateClassifier API operation for AWS Glue. +// PutWorkflowRunProperties API operation for AWS Glue. // -// Modifies an existing classifier (a GrokClassifier, an XMLClassifier, a JsonClassifier, -// or a CsvClassifier, depending on which field is present). +// Puts the specified workflow run properties for the given workflow run. If +// a property already exists for the specified run, then it overrides the value +// otherwise adds the property to existing properties. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateClassifier for usage and error information. +// API operation PutWorkflowRunProperties for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeVersionMismatchException "VersionMismatchException" -// There was a version conflict. +// * ErrCodeAlreadyExistsException "AlreadyExistsException" +// A resource to be created or added already exists. // // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateClassifier -func (c *Glue) UpdateClassifier(input *UpdateClassifierInput) (*UpdateClassifierOutput, error) { - req, out := c.UpdateClassifierRequest(input) +// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" +// A resource numerical limit was exceeded. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutWorkflowRunProperties +func (c *Glue) PutWorkflowRunProperties(input *PutWorkflowRunPropertiesInput) (*PutWorkflowRunPropertiesOutput, error) { + req, out := c.PutWorkflowRunPropertiesRequest(input) return out, req.Send() } -// UpdateClassifierWithContext is the same as UpdateClassifier with the addition of +// PutWorkflowRunPropertiesWithContext is the same as PutWorkflowRunProperties with the addition of // the ability to pass a context and additional request options. // -// See UpdateClassifier for details on how to use this API operation. +// See PutWorkflowRunProperties for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateClassifierWithContext(ctx aws.Context, input *UpdateClassifierInput, opts ...request.Option) (*UpdateClassifierOutput, error) { - req, out := c.UpdateClassifierRequest(input) +func (c *Glue) PutWorkflowRunPropertiesWithContext(ctx aws.Context, input *PutWorkflowRunPropertiesInput, opts ...request.Option) (*PutWorkflowRunPropertiesOutput, error) { + req, out := c.PutWorkflowRunPropertiesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateConnection = "UpdateConnection" +const opResetJobBookmark = "ResetJobBookmark" -// UpdateConnectionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateConnection operation. The "output" return +// ResetJobBookmarkRequest generates a "aws/request.Request" representing the +// client's request for the ResetJobBookmark operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateConnection for more information on using the UpdateConnection +// See ResetJobBookmark for more information on using the ResetJobBookmark // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateConnectionRequest method. -// req, resp := client.UpdateConnectionRequest(params) +// // Example sending a request using the ResetJobBookmarkRequest method. +// req, resp := client.ResetJobBookmarkRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateConnection -func (c *Glue) UpdateConnectionRequest(input *UpdateConnectionInput) (req *request.Request, output *UpdateConnectionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmark +func (c *Glue) ResetJobBookmarkRequest(input *ResetJobBookmarkInput) (req *request.Request, output *ResetJobBookmarkOutput) { op := &request.Operation{ - Name: opUpdateConnection, + Name: opResetJobBookmark, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateConnectionInput{} + input = &ResetJobBookmarkInput{} } - output = &UpdateConnectionOutput{} + output = &ResetJobBookmarkOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateConnection API operation for AWS Glue. +// ResetJobBookmark API operation for AWS Glue. // -// Updates a connection definition in the Data Catalog. +// Resets a bookmark entry. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateConnection for usage and error information. +// API operation ResetJobBookmark for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateConnection -func (c *Glue) UpdateConnection(input *UpdateConnectionInput) (*UpdateConnectionOutput, error) { - req, out := c.UpdateConnectionRequest(input) +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmark +func (c *Glue) ResetJobBookmark(input *ResetJobBookmarkInput) (*ResetJobBookmarkOutput, error) { + req, out := c.ResetJobBookmarkRequest(input) return out, req.Send() } -// UpdateConnectionWithContext is the same as UpdateConnection with the addition of +// ResetJobBookmarkWithContext is the same as ResetJobBookmark with the addition of // the ability to pass a context and additional request options. // -// See UpdateConnection for details on how to use this API operation. +// See ResetJobBookmark for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateConnectionWithContext(ctx aws.Context, input *UpdateConnectionInput, opts ...request.Option) (*UpdateConnectionOutput, error) { - req, out := c.UpdateConnectionRequest(input) +func (c *Glue) ResetJobBookmarkWithContext(ctx aws.Context, input *ResetJobBookmarkInput, opts ...request.Option) (*ResetJobBookmarkOutput, error) { + req, out := c.ResetJobBookmarkRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateCrawler = "UpdateCrawler" +const opSearchTables = "SearchTables" -// UpdateCrawlerRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCrawler operation. The "output" return +// SearchTablesRequest generates a "aws/request.Request" representing the +// client's request for the SearchTables operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateCrawler for more information on using the UpdateCrawler +// See SearchTables for more information on using the SearchTables // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateCrawlerRequest method. -// req, resp := client.UpdateCrawlerRequest(params) +// // Example sending a request using the SearchTablesRequest method. +// req, resp := client.SearchTablesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawler -func (c *Glue) UpdateCrawlerRequest(input *UpdateCrawlerInput) (req *request.Request, output *UpdateCrawlerOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SearchTables +func (c *Glue) SearchTablesRequest(input *SearchTablesInput) (req *request.Request, output *SearchTablesOutput) { op := &request.Operation{ - Name: opUpdateCrawler, + Name: opSearchTables, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateCrawlerInput{} + input = &SearchTablesInput{} } - output = &UpdateCrawlerOutput{} + output = &SearchTablesOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateCrawler API operation for AWS Glue. +// SearchTables API operation for AWS Glue. // -// Updates a crawler. If a crawler is running, you must stop it using StopCrawler -// before updating it. +// Searches a set of tables based on properties in the table metadata as well +// as on the parent database. You can search against text or filter conditions. +// +// You can only get tables that you have access to based on the security policies +// defined in Lake Formation. You need at least a read-only access to the table +// for it to be returned. If you do not have access to all the columns in the +// table, these columns will not be searched against when returning the list +// of tables back to you. If you have access to the columns but not the data +// in the columns, those columns and the associated metadata for those columns +// will be included in the search. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateCrawler for usage and error information. +// API operation SearchTables for usage and error information. // // Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // -// * ErrCodeVersionMismatchException "VersionMismatchException" -// There was a version conflict. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeCrawlerRunningException "CrawlerRunningException" -// The operation cannot be performed because the crawler is already running. -// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawler -func (c *Glue) UpdateCrawler(input *UpdateCrawlerInput) (*UpdateCrawlerOutput, error) { - req, out := c.UpdateCrawlerRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SearchTables +func (c *Glue) SearchTables(input *SearchTablesInput) (*SearchTablesOutput, error) { + req, out := c.SearchTablesRequest(input) return out, req.Send() } -// UpdateCrawlerWithContext is the same as UpdateCrawler with the addition of +// SearchTablesWithContext is the same as SearchTables with the addition of // the ability to pass a context and additional request options. // -// See UpdateCrawler for details on how to use this API operation. +// See SearchTables for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateCrawlerWithContext(ctx aws.Context, input *UpdateCrawlerInput, opts ...request.Option) (*UpdateCrawlerOutput, error) { - req, out := c.UpdateCrawlerRequest(input) +func (c *Glue) SearchTablesWithContext(ctx aws.Context, input *SearchTablesInput, opts ...request.Option) (*SearchTablesOutput, error) { + req, out := c.SearchTablesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateCrawlerSchedule = "UpdateCrawlerSchedule" +// SearchTablesPages iterates over the pages of a SearchTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SearchTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SearchTables operation. +// pageNum := 0 +// err := client.SearchTablesPages(params, +// func(page *glue.SearchTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glue) SearchTablesPages(input *SearchTablesInput, fn func(*SearchTablesOutput, bool) bool) error { + return c.SearchTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateCrawlerScheduleRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCrawlerSchedule operation. The "output" return +// SearchTablesPagesWithContext same as SearchTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) SearchTablesPagesWithContext(ctx aws.Context, input *SearchTablesInput, fn func(*SearchTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SearchTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SearchTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*SearchTablesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opStartCrawler = "StartCrawler" + +// StartCrawlerRequest generates a "aws/request.Request" representing the +// client's request for the StartCrawler operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateCrawlerSchedule for more information on using the UpdateCrawlerSchedule +// See StartCrawler for more information on using the StartCrawler // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateCrawlerScheduleRequest method. -// req, resp := client.UpdateCrawlerScheduleRequest(params) +// // Example sending a request using the StartCrawlerRequest method. +// req, resp := client.StartCrawlerRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerSchedule -func (c *Glue) UpdateCrawlerScheduleRequest(input *UpdateCrawlerScheduleInput) (req *request.Request, output *UpdateCrawlerScheduleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawler +func (c *Glue) StartCrawlerRequest(input *StartCrawlerInput) (req *request.Request, output *StartCrawlerOutput) { op := &request.Operation{ - Name: opUpdateCrawlerSchedule, + Name: opStartCrawler, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateCrawlerScheduleInput{} + input = &StartCrawlerInput{} } - output = &UpdateCrawlerScheduleOutput{} + output = &StartCrawlerOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateCrawlerSchedule API operation for AWS Glue. +// StartCrawler API operation for AWS Glue. // -// Updates the schedule of a crawler using a cron expression. +// Starts a crawl using the specified crawler, regardless of what is scheduled. +// If the crawler is already running, returns a CrawlerRunningException (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-exceptions.html#aws-glue-api-exceptions-CrawlerRunningException). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateCrawlerSchedule for usage and error information. +// API operation StartCrawler for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeVersionMismatchException "VersionMismatchException" -// There was a version conflict. -// -// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" -// The specified scheduler is transitioning. +// * ErrCodeCrawlerRunningException "CrawlerRunningException" +// The operation cannot be performed because the crawler is already running. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerSchedule -func (c *Glue) UpdateCrawlerSchedule(input *UpdateCrawlerScheduleInput) (*UpdateCrawlerScheduleOutput, error) { - req, out := c.UpdateCrawlerScheduleRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawler +func (c *Glue) StartCrawler(input *StartCrawlerInput) (*StartCrawlerOutput, error) { + req, out := c.StartCrawlerRequest(input) return out, req.Send() } -// UpdateCrawlerScheduleWithContext is the same as UpdateCrawlerSchedule with the addition of +// StartCrawlerWithContext is the same as StartCrawler with the addition of // the ability to pass a context and additional request options. // -// See UpdateCrawlerSchedule for details on how to use this API operation. +// See StartCrawler for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateCrawlerScheduleWithContext(ctx aws.Context, input *UpdateCrawlerScheduleInput, opts ...request.Option) (*UpdateCrawlerScheduleOutput, error) { - req, out := c.UpdateCrawlerScheduleRequest(input) +func (c *Glue) StartCrawlerWithContext(ctx aws.Context, input *StartCrawlerInput, opts ...request.Option) (*StartCrawlerOutput, error) { + req, out := c.StartCrawlerRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDatabase = "UpdateDatabase" +const opStartCrawlerSchedule = "StartCrawlerSchedule" -// UpdateDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDatabase operation. The "output" return +// StartCrawlerScheduleRequest generates a "aws/request.Request" representing the +// client's request for the StartCrawlerSchedule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDatabase for more information on using the UpdateDatabase +// See StartCrawlerSchedule for more information on using the StartCrawlerSchedule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDatabaseRequest method. -// req, resp := client.UpdateDatabaseRequest(params) +// // Example sending a request using the StartCrawlerScheduleRequest method. +// req, resp := client.StartCrawlerScheduleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDatabase -func (c *Glue) UpdateDatabaseRequest(input *UpdateDatabaseInput) (req *request.Request, output *UpdateDatabaseOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerSchedule +func (c *Glue) StartCrawlerScheduleRequest(input *StartCrawlerScheduleInput) (req *request.Request, output *StartCrawlerScheduleOutput) { op := &request.Operation{ - Name: opUpdateDatabase, + Name: opStartCrawlerSchedule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateDatabaseInput{} + input = &StartCrawlerScheduleInput{} } - output = &UpdateDatabaseOutput{} + output = &StartCrawlerScheduleOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateDatabase API operation for AWS Glue. +// StartCrawlerSchedule API operation for AWS Glue. // -// Updates an existing database definition in a Data Catalog. +// Changes the schedule state of the specified crawler to SCHEDULED, unless +// the crawler is already running or the schedule state is already SCHEDULED. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateDatabase for usage and error information. +// API operation StartCrawlerSchedule for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. +// * ErrCodeSchedulerRunningException "SchedulerRunningException" +// The specified scheduler is already running. // -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. +// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" +// The specified scheduler is transitioning. +// +// * ErrCodeNoScheduleException "NoScheduleException" +// There is no applicable schedule. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDatabase -func (c *Glue) UpdateDatabase(input *UpdateDatabaseInput) (*UpdateDatabaseOutput, error) { - req, out := c.UpdateDatabaseRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartCrawlerSchedule +func (c *Glue) StartCrawlerSchedule(input *StartCrawlerScheduleInput) (*StartCrawlerScheduleOutput, error) { + req, out := c.StartCrawlerScheduleRequest(input) return out, req.Send() } -// UpdateDatabaseWithContext is the same as UpdateDatabase with the addition of +// StartCrawlerScheduleWithContext is the same as StartCrawlerSchedule with the addition of // the ability to pass a context and additional request options. // -// See UpdateDatabase for details on how to use this API operation. +// See StartCrawlerSchedule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateDatabaseWithContext(ctx aws.Context, input *UpdateDatabaseInput, opts ...request.Option) (*UpdateDatabaseOutput, error) { - req, out := c.UpdateDatabaseRequest(input) +func (c *Glue) StartCrawlerScheduleWithContext(ctx aws.Context, input *StartCrawlerScheduleInput, opts ...request.Option) (*StartCrawlerScheduleOutput, error) { + req, out := c.StartCrawlerScheduleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDevEndpoint = "UpdateDevEndpoint" +const opStartExportLabelsTaskRun = "StartExportLabelsTaskRun" -// UpdateDevEndpointRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDevEndpoint operation. The "output" return +// StartExportLabelsTaskRunRequest generates a "aws/request.Request" representing the +// client's request for the StartExportLabelsTaskRun operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDevEndpoint for more information on using the UpdateDevEndpoint +// See StartExportLabelsTaskRun for more information on using the StartExportLabelsTaskRun // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDevEndpointRequest method. -// req, resp := client.UpdateDevEndpointRequest(params) +// // Example sending a request using the StartExportLabelsTaskRunRequest method. +// req, resp := client.StartExportLabelsTaskRunRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDevEndpoint -func (c *Glue) UpdateDevEndpointRequest(input *UpdateDevEndpointInput) (req *request.Request, output *UpdateDevEndpointOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartExportLabelsTaskRun +func (c *Glue) StartExportLabelsTaskRunRequest(input *StartExportLabelsTaskRunInput) (req *request.Request, output *StartExportLabelsTaskRunOutput) { op := &request.Operation{ - Name: opUpdateDevEndpoint, + Name: opStartExportLabelsTaskRun, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateDevEndpointInput{} + input = &StartExportLabelsTaskRunInput{} } - output = &UpdateDevEndpointOutput{} + output = &StartExportLabelsTaskRunOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateDevEndpoint API operation for AWS Glue. +// StartExportLabelsTaskRun API operation for AWS Glue. // -// Updates a specified DevEndpoint. +// Begins an asynchronous task to export all labeled data for a particular transform. +// This task is the only label-related API call that is not part of the typical +// active learning workflow. You typically use StartExportLabelsTaskRun when +// you want to work with all of your existing labels at the same time, such +// as when you want to remove or change labels that were previously submitted +// as truth. This API operation accepts the TransformId whose labels you want +// to export and an Amazon Simple Storage Service (Amazon S3) path to export +// the labels to. The operation returns a TaskRunId. You can check on the status +// of your task run by calling the GetMLTaskRun API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateDevEndpoint for usage and error information. +// API operation StartExportLabelsTaskRun for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeValidationException "ValidationException" -// A value could not be validated. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDevEndpoint -func (c *Glue) UpdateDevEndpoint(input *UpdateDevEndpointInput) (*UpdateDevEndpointOutput, error) { - req, out := c.UpdateDevEndpointRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartExportLabelsTaskRun +func (c *Glue) StartExportLabelsTaskRun(input *StartExportLabelsTaskRunInput) (*StartExportLabelsTaskRunOutput, error) { + req, out := c.StartExportLabelsTaskRunRequest(input) return out, req.Send() } -// UpdateDevEndpointWithContext is the same as UpdateDevEndpoint with the addition of +// StartExportLabelsTaskRunWithContext is the same as StartExportLabelsTaskRun with the addition of // the ability to pass a context and additional request options. // -// See UpdateDevEndpoint for details on how to use this API operation. +// See StartExportLabelsTaskRun for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateDevEndpointWithContext(ctx aws.Context, input *UpdateDevEndpointInput, opts ...request.Option) (*UpdateDevEndpointOutput, error) { - req, out := c.UpdateDevEndpointRequest(input) +func (c *Glue) StartExportLabelsTaskRunWithContext(ctx aws.Context, input *StartExportLabelsTaskRunInput, opts ...request.Option) (*StartExportLabelsTaskRunOutput, error) { + req, out := c.StartExportLabelsTaskRunRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateJob = "UpdateJob" +const opStartImportLabelsTaskRun = "StartImportLabelsTaskRun" -// UpdateJobRequest generates a "aws/request.Request" representing the -// client's request for the UpdateJob operation. The "output" return +// StartImportLabelsTaskRunRequest generates a "aws/request.Request" representing the +// client's request for the StartImportLabelsTaskRun operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateJob for more information on using the UpdateJob +// See StartImportLabelsTaskRun for more information on using the StartImportLabelsTaskRun // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateJobRequest method. -// req, resp := client.UpdateJobRequest(params) +// // Example sending a request using the StartImportLabelsTaskRunRequest method. +// req, resp := client.StartImportLabelsTaskRunRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateJob -func (c *Glue) UpdateJobRequest(input *UpdateJobInput) (req *request.Request, output *UpdateJobOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartImportLabelsTaskRun +func (c *Glue) StartImportLabelsTaskRunRequest(input *StartImportLabelsTaskRunInput) (req *request.Request, output *StartImportLabelsTaskRunOutput) { op := &request.Operation{ - Name: opUpdateJob, + Name: opStartImportLabelsTaskRun, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateJobInput{} + input = &StartImportLabelsTaskRunInput{} } - output = &UpdateJobOutput{} + output = &StartImportLabelsTaskRunOutput{} req = c.newRequest(op, input, output) return } -// UpdateJob API operation for AWS Glue. +// StartImportLabelsTaskRun API operation for AWS Glue. // -// Updates an existing job definition. +// Enables you to provide additional labels (examples of truth) to be used to +// teach the machine learning transform and improve its quality. This API operation +// is generally used as part of the active learning workflow that starts with +// the StartMLLabelingSetGenerationTaskRun call and that ultimately results +// in improving the quality of your machine learning transform. +// +// After the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue machine +// learning will have generated a series of questions for humans to answer. +// (Answering these questions is often called 'labeling' in the machine learning +// workflows). In the case of the FindMatches transform, these questions are +// of the form, “What is the correct way to group these rows together into +// groups composed entirely of matching records?” After the labeling process +// is finished, users upload their answers/labels with a call to StartImportLabelsTaskRun. +// After StartImportLabelsTaskRun finishes, all future runs of the machine learning +// transform use the new and improved labels and perform a higher-quality transformation. +// +// By default, StartMLLabelingSetGenerationTaskRun continually learns from and +// combines all labels that you upload unless you set Replace to true. If you +// set Replace to true, StartImportLabelsTaskRun deletes and forgets all previously +// uploaded labels and learns only from the exact set that you upload. Replacing +// labels can be helpful if you realize that you previously uploaded incorrect +// labels, and you believe that they are having a negative effect on your transform +// quality. +// +// You can check on the status of your task run by calling the GetMLTaskRun +// operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateJob for usage and error information. +// API operation StartImportLabelsTaskRun for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Two processes are trying to modify a resource simultaneously. +// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" +// A resource numerical limit was exceeded. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateJob -func (c *Glue) UpdateJob(input *UpdateJobInput) (*UpdateJobOutput, error) { - req, out := c.UpdateJobRequest(input) +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartImportLabelsTaskRun +func (c *Glue) StartImportLabelsTaskRun(input *StartImportLabelsTaskRunInput) (*StartImportLabelsTaskRunOutput, error) { + req, out := c.StartImportLabelsTaskRunRequest(input) return out, req.Send() } -// UpdateJobWithContext is the same as UpdateJob with the addition of +// StartImportLabelsTaskRunWithContext is the same as StartImportLabelsTaskRun with the addition of // the ability to pass a context and additional request options. // -// See UpdateJob for details on how to use this API operation. +// See StartImportLabelsTaskRun for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateJobWithContext(ctx aws.Context, input *UpdateJobInput, opts ...request.Option) (*UpdateJobOutput, error) { - req, out := c.UpdateJobRequest(input) +func (c *Glue) StartImportLabelsTaskRunWithContext(ctx aws.Context, input *StartImportLabelsTaskRunInput, opts ...request.Option) (*StartImportLabelsTaskRunOutput, error) { + req, out := c.StartImportLabelsTaskRunRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdatePartition = "UpdatePartition" +const opStartJobRun = "StartJobRun" -// UpdatePartitionRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePartition operation. The "output" return +// StartJobRunRequest generates a "aws/request.Request" representing the +// client's request for the StartJobRun operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdatePartition for more information on using the UpdatePartition +// See StartJobRun for more information on using the StartJobRun // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdatePartitionRequest method. -// req, resp := client.UpdatePartitionRequest(params) +// // Example sending a request using the StartJobRunRequest method. +// req, resp := client.StartJobRunRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdatePartition -func (c *Glue) UpdatePartitionRequest(input *UpdatePartitionInput) (req *request.Request, output *UpdatePartitionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartJobRun +func (c *Glue) StartJobRunRequest(input *StartJobRunInput) (req *request.Request, output *StartJobRunOutput) { op := &request.Operation{ - Name: opUpdatePartition, + Name: opStartJobRun, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdatePartitionInput{} + input = &StartJobRunInput{} } - output = &UpdatePartitionOutput{} + output = &StartJobRunOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdatePartition API operation for AWS Glue. +// StartJobRun API operation for AWS Glue. // -// Updates a partition. +// Starts a job run using a job definition. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdatePartition for usage and error information. +// API operation StartJobRun for usage and error information. // // Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. +// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" +// A resource numerical limit was exceeded. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdatePartition -func (c *Glue) UpdatePartition(input *UpdatePartitionInput) (*UpdatePartitionOutput, error) { - req, out := c.UpdatePartitionRequest(input) +// * ErrCodeConcurrentRunsExceededException "ConcurrentRunsExceededException" +// Too many jobs are being run concurrently. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartJobRun +func (c *Glue) StartJobRun(input *StartJobRunInput) (*StartJobRunOutput, error) { + req, out := c.StartJobRunRequest(input) return out, req.Send() } -// UpdatePartitionWithContext is the same as UpdatePartition with the addition of +// StartJobRunWithContext is the same as StartJobRun with the addition of // the ability to pass a context and additional request options. // -// See UpdatePartition for details on how to use this API operation. +// See StartJobRun for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdatePartitionWithContext(ctx aws.Context, input *UpdatePartitionInput, opts ...request.Option) (*UpdatePartitionOutput, error) { - req, out := c.UpdatePartitionRequest(input) +func (c *Glue) StartJobRunWithContext(ctx aws.Context, input *StartJobRunInput, opts ...request.Option) (*StartJobRunOutput, error) { + req, out := c.StartJobRunRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateTable = "UpdateTable" +const opStartMLEvaluationTaskRun = "StartMLEvaluationTaskRun" -// UpdateTableRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTable operation. The "output" return +// StartMLEvaluationTaskRunRequest generates a "aws/request.Request" representing the +// client's request for the StartMLEvaluationTaskRun operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateTable for more information on using the UpdateTable +// See StartMLEvaluationTaskRun for more information on using the StartMLEvaluationTaskRun // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateTableRequest method. -// req, resp := client.UpdateTableRequest(params) +// // Example sending a request using the StartMLEvaluationTaskRunRequest method. +// req, resp := client.StartMLEvaluationTaskRunRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTable -func (c *Glue) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartMLEvaluationTaskRun +func (c *Glue) StartMLEvaluationTaskRunRequest(input *StartMLEvaluationTaskRunInput) (req *request.Request, output *StartMLEvaluationTaskRunOutput) { op := &request.Operation{ - Name: opUpdateTable, + Name: opStartMLEvaluationTaskRun, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateTableInput{} + input = &StartMLEvaluationTaskRunInput{} } - output = &UpdateTableOutput{} + output = &StartMLEvaluationTaskRunOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateTable API operation for AWS Glue. +// StartMLEvaluationTaskRun API operation for AWS Glue. // -// Updates a metadata table in the Data Catalog. +// Starts a task to estimate the quality of the transform. +// +// When you provide label sets as examples of truth, AWS Glue machine learning +// uses some of those examples to learn from them. The rest of the labels are +// used as a test to estimate quality. +// +// Returns a unique identifier for the run. You can call GetMLTaskRun to get +// more information about the stats of the EvaluationTaskRun. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateTable for usage and error information. +// API operation StartMLEvaluationTaskRun for usage and error information. // // Returned Error Codes: // * ErrCodeEntityNotFoundException "EntityNotFoundException" @@ -9604,287 +10532,5234 @@ func (c *Glue) UpdateTableRequest(input *UpdateTableInput) (req *request.Request // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Two processes are trying to modify a resource simultaneously. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // -// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" -// A resource numerical limit was exceeded. +// * ErrCodeConcurrentRunsExceededException "ConcurrentRunsExceededException" +// Too many jobs are being run concurrently. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. +// * ErrCodeMLTransformNotReadyException "MLTransformNotReadyException" +// The machine learning transform is not ready to run. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTable -func (c *Glue) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { - req, out := c.UpdateTableRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartMLEvaluationTaskRun +func (c *Glue) StartMLEvaluationTaskRun(input *StartMLEvaluationTaskRunInput) (*StartMLEvaluationTaskRunOutput, error) { + req, out := c.StartMLEvaluationTaskRunRequest(input) return out, req.Send() } -// UpdateTableWithContext is the same as UpdateTable with the addition of +// StartMLEvaluationTaskRunWithContext is the same as StartMLEvaluationTaskRun with the addition of // the ability to pass a context and additional request options. // -// See UpdateTable for details on how to use this API operation. +// See StartMLEvaluationTaskRun for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) { - req, out := c.UpdateTableRequest(input) +func (c *Glue) StartMLEvaluationTaskRunWithContext(ctx aws.Context, input *StartMLEvaluationTaskRunInput, opts ...request.Option) (*StartMLEvaluationTaskRunOutput, error) { + req, out := c.StartMLEvaluationTaskRunRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateTrigger = "UpdateTrigger" +const opStartMLLabelingSetGenerationTaskRun = "StartMLLabelingSetGenerationTaskRun" -// UpdateTriggerRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTrigger operation. The "output" return +// StartMLLabelingSetGenerationTaskRunRequest generates a "aws/request.Request" representing the +// client's request for the StartMLLabelingSetGenerationTaskRun operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateTrigger for more information on using the UpdateTrigger +// See StartMLLabelingSetGenerationTaskRun for more information on using the StartMLLabelingSetGenerationTaskRun // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateTriggerRequest method. -// req, resp := client.UpdateTriggerRequest(params) +// // Example sending a request using the StartMLLabelingSetGenerationTaskRunRequest method. +// req, resp := client.StartMLLabelingSetGenerationTaskRunRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTrigger -func (c *Glue) UpdateTriggerRequest(input *UpdateTriggerInput) (req *request.Request, output *UpdateTriggerOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartMLLabelingSetGenerationTaskRun +func (c *Glue) StartMLLabelingSetGenerationTaskRunRequest(input *StartMLLabelingSetGenerationTaskRunInput) (req *request.Request, output *StartMLLabelingSetGenerationTaskRunOutput) { op := &request.Operation{ - Name: opUpdateTrigger, + Name: opStartMLLabelingSetGenerationTaskRun, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateTriggerInput{} + input = &StartMLLabelingSetGenerationTaskRunInput{} } - output = &UpdateTriggerOutput{} + output = &StartMLLabelingSetGenerationTaskRunOutput{} req = c.newRequest(op, input, output) return } -// UpdateTrigger API operation for AWS Glue. +// StartMLLabelingSetGenerationTaskRun API operation for AWS Glue. // -// Updates a trigger definition. +// Starts the active learning workflow for your machine learning transform to +// improve the transform's quality by generating label sets and adding labels. +// +// When the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue will have +// generated a "labeling set" or a set of questions for humans to answer. +// +// In the case of the FindMatches transform, these questions are of the form, +// “What is the correct way to group these rows together into groups composed +// entirely of matching records?” +// +// After the labeling process is finished, you can upload your labels with a +// call to StartImportLabelsTaskRun. After StartImportLabelsTaskRun finishes, +// all future runs of the machine learning transform will use the new and improved +// labels and perform a higher-quality transformation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateTrigger for usage and error information. +// API operation StartMLLabelingSetGenerationTaskRun for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// // * ErrCodeEntityNotFoundException "EntityNotFoundException" // A specified entity does not exist // +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// Two processes are trying to modify a resource simultaneously. +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTrigger -func (c *Glue) UpdateTrigger(input *UpdateTriggerInput) (*UpdateTriggerOutput, error) { - req, out := c.UpdateTriggerRequest(input) +// * ErrCodeConcurrentRunsExceededException "ConcurrentRunsExceededException" +// Too many jobs are being run concurrently. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartMLLabelingSetGenerationTaskRun +func (c *Glue) StartMLLabelingSetGenerationTaskRun(input *StartMLLabelingSetGenerationTaskRunInput) (*StartMLLabelingSetGenerationTaskRunOutput, error) { + req, out := c.StartMLLabelingSetGenerationTaskRunRequest(input) return out, req.Send() } -// UpdateTriggerWithContext is the same as UpdateTrigger with the addition of +// StartMLLabelingSetGenerationTaskRunWithContext is the same as StartMLLabelingSetGenerationTaskRun with the addition of // the ability to pass a context and additional request options. // -// See UpdateTrigger for details on how to use this API operation. +// See StartMLLabelingSetGenerationTaskRun for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateTriggerWithContext(ctx aws.Context, input *UpdateTriggerInput, opts ...request.Option) (*UpdateTriggerOutput, error) { - req, out := c.UpdateTriggerRequest(input) +func (c *Glue) StartMLLabelingSetGenerationTaskRunWithContext(ctx aws.Context, input *StartMLLabelingSetGenerationTaskRunInput, opts ...request.Option) (*StartMLLabelingSetGenerationTaskRunOutput, error) { + req, out := c.StartMLLabelingSetGenerationTaskRunRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateUserDefinedFunction = "UpdateUserDefinedFunction" +const opStartTrigger = "StartTrigger" -// UpdateUserDefinedFunctionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateUserDefinedFunction operation. The "output" return +// StartTriggerRequest generates a "aws/request.Request" representing the +// client's request for the StartTrigger operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateUserDefinedFunction for more information on using the UpdateUserDefinedFunction +// See StartTrigger for more information on using the StartTrigger // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateUserDefinedFunctionRequest method. -// req, resp := client.UpdateUserDefinedFunctionRequest(params) +// // Example sending a request using the StartTriggerRequest method. +// req, resp := client.StartTriggerRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateUserDefinedFunction -func (c *Glue) UpdateUserDefinedFunctionRequest(input *UpdateUserDefinedFunctionInput) (req *request.Request, output *UpdateUserDefinedFunctionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartTrigger +func (c *Glue) StartTriggerRequest(input *StartTriggerInput) (req *request.Request, output *StartTriggerOutput) { op := &request.Operation{ - Name: opUpdateUserDefinedFunction, + Name: opStartTrigger, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateUserDefinedFunctionInput{} + input = &StartTriggerInput{} } - output = &UpdateUserDefinedFunctionOutput{} + output = &StartTriggerOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateUserDefinedFunction API operation for AWS Glue. +// StartTrigger API operation for AWS Glue. // -// Updates an existing function definition in the Data Catalog. +// Starts an existing trigger. See Triggering Jobs (https://docs.aws.amazon.com/glue/latest/dg/trigger-job.html) +// for information about how different types of trigger are started. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Glue's -// API operation UpdateUserDefinedFunction for usage and error information. +// API operation StartTrigger for usage and error information. // // Returned Error Codes: -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// // * ErrCodeInvalidInputException "InvalidInputException" // The input provided was not valid. // // * ErrCodeInternalServiceException "InternalServiceException" // An internal service error occurred. // +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// // * ErrCodeOperationTimeoutException "OperationTimeoutException" // The operation timed out. // -// * ErrCodeEncryptionException "GlueEncryptionException" -// An encryption operation failed. +// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" +// A resource numerical limit was exceeded. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateUserDefinedFunction -func (c *Glue) UpdateUserDefinedFunction(input *UpdateUserDefinedFunctionInput) (*UpdateUserDefinedFunctionOutput, error) { - req, out := c.UpdateUserDefinedFunctionRequest(input) +// * ErrCodeConcurrentRunsExceededException "ConcurrentRunsExceededException" +// Too many jobs are being run concurrently. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartTrigger +func (c *Glue) StartTrigger(input *StartTriggerInput) (*StartTriggerOutput, error) { + req, out := c.StartTriggerRequest(input) return out, req.Send() } -// UpdateUserDefinedFunctionWithContext is the same as UpdateUserDefinedFunction with the addition of +// StartTriggerWithContext is the same as StartTrigger with the addition of // the ability to pass a context and additional request options. // -// See UpdateUserDefinedFunction for details on how to use this API operation. +// See StartTrigger for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Glue) UpdateUserDefinedFunctionWithContext(ctx aws.Context, input *UpdateUserDefinedFunctionInput, opts ...request.Option) (*UpdateUserDefinedFunctionOutput, error) { - req, out := c.UpdateUserDefinedFunctionRequest(input) +func (c *Glue) StartTriggerWithContext(ctx aws.Context, input *StartTriggerInput, opts ...request.Option) (*StartTriggerOutput, error) { + req, out := c.StartTriggerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartWorkflowRun = "StartWorkflowRun" + +// StartWorkflowRunRequest generates a "aws/request.Request" representing the +// client's request for the StartWorkflowRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartWorkflowRun for more information on using the StartWorkflowRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartWorkflowRunRequest method. +// req, resp := client.StartWorkflowRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartWorkflowRun +func (c *Glue) StartWorkflowRunRequest(input *StartWorkflowRunInput) (req *request.Request, output *StartWorkflowRunOutput) { + op := &request.Operation{ + Name: opStartWorkflowRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartWorkflowRunInput{} + } + + output = &StartWorkflowRunOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartWorkflowRun API operation for AWS Glue. +// +// Starts a new run of the specified workflow. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation StartWorkflowRun for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" +// A resource numerical limit was exceeded. +// +// * ErrCodeConcurrentRunsExceededException "ConcurrentRunsExceededException" +// Too many jobs are being run concurrently. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartWorkflowRun +func (c *Glue) StartWorkflowRun(input *StartWorkflowRunInput) (*StartWorkflowRunOutput, error) { + req, out := c.StartWorkflowRunRequest(input) + return out, req.Send() +} + +// StartWorkflowRunWithContext is the same as StartWorkflowRun with the addition of +// the ability to pass a context and additional request options. +// +// See StartWorkflowRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) StartWorkflowRunWithContext(ctx aws.Context, input *StartWorkflowRunInput, opts ...request.Option) (*StartWorkflowRunOutput, error) { + req, out := c.StartWorkflowRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopCrawler = "StopCrawler" + +// StopCrawlerRequest generates a "aws/request.Request" representing the +// client's request for the StopCrawler operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopCrawler for more information on using the StopCrawler +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopCrawlerRequest method. +// req, resp := client.StopCrawlerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawler +func (c *Glue) StopCrawlerRequest(input *StopCrawlerInput) (req *request.Request, output *StopCrawlerOutput) { + op := &request.Operation{ + Name: opStopCrawler, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopCrawlerInput{} + } + + output = &StopCrawlerOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopCrawler API operation for AWS Glue. +// +// If the specified crawler is running, stops the crawl. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation StopCrawler for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeCrawlerNotRunningException "CrawlerNotRunningException" +// The specified crawler is not running. +// +// * ErrCodeCrawlerStoppingException "CrawlerStoppingException" +// The specified crawler is stopping. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawler +func (c *Glue) StopCrawler(input *StopCrawlerInput) (*StopCrawlerOutput, error) { + req, out := c.StopCrawlerRequest(input) + return out, req.Send() +} + +// StopCrawlerWithContext is the same as StopCrawler with the addition of +// the ability to pass a context and additional request options. +// +// See StopCrawler for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) StopCrawlerWithContext(ctx aws.Context, input *StopCrawlerInput, opts ...request.Option) (*StopCrawlerOutput, error) { + req, out := c.StopCrawlerRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Defines an action to be initiated by a trigger. -type Action struct { +const opStopCrawlerSchedule = "StopCrawlerSchedule" + +// StopCrawlerScheduleRequest generates a "aws/request.Request" representing the +// client's request for the StopCrawlerSchedule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopCrawlerSchedule for more information on using the StopCrawlerSchedule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopCrawlerScheduleRequest method. +// req, resp := client.StopCrawlerScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerSchedule +func (c *Glue) StopCrawlerScheduleRequest(input *StopCrawlerScheduleInput) (req *request.Request, output *StopCrawlerScheduleOutput) { + op := &request.Operation{ + Name: opStopCrawlerSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopCrawlerScheduleInput{} + } + + output = &StopCrawlerScheduleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopCrawlerSchedule API operation for AWS Glue. +// +// Sets the schedule state of the specified crawler to NOT_SCHEDULED, but does +// not stop the crawler if it is already running. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation StopCrawlerSchedule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeSchedulerNotRunningException "SchedulerNotRunningException" +// The specified scheduler is not running. +// +// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" +// The specified scheduler is transitioning. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopCrawlerSchedule +func (c *Glue) StopCrawlerSchedule(input *StopCrawlerScheduleInput) (*StopCrawlerScheduleOutput, error) { + req, out := c.StopCrawlerScheduleRequest(input) + return out, req.Send() +} + +// StopCrawlerScheduleWithContext is the same as StopCrawlerSchedule with the addition of +// the ability to pass a context and additional request options. +// +// See StopCrawlerSchedule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) StopCrawlerScheduleWithContext(ctx aws.Context, input *StopCrawlerScheduleInput, opts ...request.Option) (*StopCrawlerScheduleOutput, error) { + req, out := c.StopCrawlerScheduleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopTrigger = "StopTrigger" + +// StopTriggerRequest generates a "aws/request.Request" representing the +// client's request for the StopTrigger operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopTrigger for more information on using the StopTrigger +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopTriggerRequest method. +// req, resp := client.StopTriggerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopTrigger +func (c *Glue) StopTriggerRequest(input *StopTriggerInput) (req *request.Request, output *StopTriggerOutput) { + op := &request.Operation{ + Name: opStopTrigger, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopTriggerInput{} + } + + output = &StopTriggerOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopTrigger API operation for AWS Glue. +// +// Stops a specified trigger. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation StopTrigger for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopTrigger +func (c *Glue) StopTrigger(input *StopTriggerInput) (*StopTriggerOutput, error) { + req, out := c.StopTriggerRequest(input) + return out, req.Send() +} + +// StopTriggerWithContext is the same as StopTrigger with the addition of +// the ability to pass a context and additional request options. +// +// See StopTrigger for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) StopTriggerWithContext(ctx aws.Context, input *StopTriggerInput, opts ...request.Option) (*StopTriggerOutput, error) { + req, out := c.StopTriggerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TagResource +func (c *Glue) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Glue. +// +// Adds tags to a resource. A tag is a label you can assign to an AWS resource. +// In AWS Glue, you can tag only certain resources. For information about what +// resources you can tag, see AWS Tags in AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TagResource +func (c *Glue) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UntagResource +func (c *Glue) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Glue. +// +// Removes tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UntagResource +func (c *Glue) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateClassifier = "UpdateClassifier" + +// UpdateClassifierRequest generates a "aws/request.Request" representing the +// client's request for the UpdateClassifier operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateClassifier for more information on using the UpdateClassifier +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateClassifierRequest method. +// req, resp := client.UpdateClassifierRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateClassifier +func (c *Glue) UpdateClassifierRequest(input *UpdateClassifierInput) (req *request.Request, output *UpdateClassifierOutput) { + op := &request.Operation{ + Name: opUpdateClassifier, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateClassifierInput{} + } + + output = &UpdateClassifierOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateClassifier API operation for AWS Glue. +// +// Modifies an existing classifier (a GrokClassifier, an XMLClassifier, a JsonClassifier, +// or a CsvClassifier, depending on which field is present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateClassifier for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeVersionMismatchException "VersionMismatchException" +// There was a version conflict. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateClassifier +func (c *Glue) UpdateClassifier(input *UpdateClassifierInput) (*UpdateClassifierOutput, error) { + req, out := c.UpdateClassifierRequest(input) + return out, req.Send() +} + +// UpdateClassifierWithContext is the same as UpdateClassifier with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateClassifier for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateClassifierWithContext(ctx aws.Context, input *UpdateClassifierInput, opts ...request.Option) (*UpdateClassifierOutput, error) { + req, out := c.UpdateClassifierRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateConnection = "UpdateConnection" + +// UpdateConnectionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateConnection for more information on using the UpdateConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateConnectionRequest method. +// req, resp := client.UpdateConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateConnection +func (c *Glue) UpdateConnectionRequest(input *UpdateConnectionInput) (req *request.Request, output *UpdateConnectionOutput) { + op := &request.Operation{ + Name: opUpdateConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateConnectionInput{} + } + + output = &UpdateConnectionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateConnection API operation for AWS Glue. +// +// Updates a connection definition in the Data Catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateConnection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateConnection +func (c *Glue) UpdateConnection(input *UpdateConnectionInput) (*UpdateConnectionOutput, error) { + req, out := c.UpdateConnectionRequest(input) + return out, req.Send() +} + +// UpdateConnectionWithContext is the same as UpdateConnection with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateConnectionWithContext(ctx aws.Context, input *UpdateConnectionInput, opts ...request.Option) (*UpdateConnectionOutput, error) { + req, out := c.UpdateConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateCrawler = "UpdateCrawler" + +// UpdateCrawlerRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCrawler operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateCrawler for more information on using the UpdateCrawler +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateCrawlerRequest method. +// req, resp := client.UpdateCrawlerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawler +func (c *Glue) UpdateCrawlerRequest(input *UpdateCrawlerInput) (req *request.Request, output *UpdateCrawlerOutput) { + op := &request.Operation{ + Name: opUpdateCrawler, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateCrawlerInput{} + } + + output = &UpdateCrawlerOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateCrawler API operation for AWS Glue. +// +// Updates a crawler. If a crawler is running, you must stop it using StopCrawler +// before updating it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateCrawler for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeVersionMismatchException "VersionMismatchException" +// There was a version conflict. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeCrawlerRunningException "CrawlerRunningException" +// The operation cannot be performed because the crawler is already running. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawler +func (c *Glue) UpdateCrawler(input *UpdateCrawlerInput) (*UpdateCrawlerOutput, error) { + req, out := c.UpdateCrawlerRequest(input) + return out, req.Send() +} + +// UpdateCrawlerWithContext is the same as UpdateCrawler with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateCrawler for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateCrawlerWithContext(ctx aws.Context, input *UpdateCrawlerInput, opts ...request.Option) (*UpdateCrawlerOutput, error) { + req, out := c.UpdateCrawlerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateCrawlerSchedule = "UpdateCrawlerSchedule" + +// UpdateCrawlerScheduleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCrawlerSchedule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateCrawlerSchedule for more information on using the UpdateCrawlerSchedule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateCrawlerScheduleRequest method. +// req, resp := client.UpdateCrawlerScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerSchedule +func (c *Glue) UpdateCrawlerScheduleRequest(input *UpdateCrawlerScheduleInput) (req *request.Request, output *UpdateCrawlerScheduleOutput) { + op := &request.Operation{ + Name: opUpdateCrawlerSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateCrawlerScheduleInput{} + } + + output = &UpdateCrawlerScheduleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateCrawlerSchedule API operation for AWS Glue. +// +// Updates the schedule of a crawler using a cron expression. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateCrawlerSchedule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeVersionMismatchException "VersionMismatchException" +// There was a version conflict. +// +// * ErrCodeSchedulerTransitioningException "SchedulerTransitioningException" +// The specified scheduler is transitioning. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateCrawlerSchedule +func (c *Glue) UpdateCrawlerSchedule(input *UpdateCrawlerScheduleInput) (*UpdateCrawlerScheduleOutput, error) { + req, out := c.UpdateCrawlerScheduleRequest(input) + return out, req.Send() +} + +// UpdateCrawlerScheduleWithContext is the same as UpdateCrawlerSchedule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateCrawlerSchedule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateCrawlerScheduleWithContext(ctx aws.Context, input *UpdateCrawlerScheduleInput, opts ...request.Option) (*UpdateCrawlerScheduleOutput, error) { + req, out := c.UpdateCrawlerScheduleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDatabase = "UpdateDatabase" + +// UpdateDatabaseRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDatabase operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDatabase for more information on using the UpdateDatabase +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDatabaseRequest method. +// req, resp := client.UpdateDatabaseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDatabase +func (c *Glue) UpdateDatabaseRequest(input *UpdateDatabaseInput) (req *request.Request, output *UpdateDatabaseOutput) { + op := &request.Operation{ + Name: opUpdateDatabase, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDatabaseInput{} + } + + output = &UpdateDatabaseOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateDatabase API operation for AWS Glue. +// +// Updates an existing database definition in a Data Catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateDatabase for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDatabase +func (c *Glue) UpdateDatabase(input *UpdateDatabaseInput) (*UpdateDatabaseOutput, error) { + req, out := c.UpdateDatabaseRequest(input) + return out, req.Send() +} + +// UpdateDatabaseWithContext is the same as UpdateDatabase with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDatabase for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateDatabaseWithContext(ctx aws.Context, input *UpdateDatabaseInput, opts ...request.Option) (*UpdateDatabaseOutput, error) { + req, out := c.UpdateDatabaseRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDevEndpoint = "UpdateDevEndpoint" + +// UpdateDevEndpointRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDevEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDevEndpoint for more information on using the UpdateDevEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDevEndpointRequest method. +// req, resp := client.UpdateDevEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDevEndpoint +func (c *Glue) UpdateDevEndpointRequest(input *UpdateDevEndpointInput) (req *request.Request, output *UpdateDevEndpointOutput) { + op := &request.Operation{ + Name: opUpdateDevEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDevEndpointInput{} + } + + output = &UpdateDevEndpointOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateDevEndpoint API operation for AWS Glue. +// +// Updates a specified development endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateDevEndpoint for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeValidationException "ValidationException" +// A value could not be validated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateDevEndpoint +func (c *Glue) UpdateDevEndpoint(input *UpdateDevEndpointInput) (*UpdateDevEndpointOutput, error) { + req, out := c.UpdateDevEndpointRequest(input) + return out, req.Send() +} + +// UpdateDevEndpointWithContext is the same as UpdateDevEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDevEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateDevEndpointWithContext(ctx aws.Context, input *UpdateDevEndpointInput, opts ...request.Option) (*UpdateDevEndpointOutput, error) { + req, out := c.UpdateDevEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateJob = "UpdateJob" + +// UpdateJobRequest generates a "aws/request.Request" representing the +// client's request for the UpdateJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateJob for more information on using the UpdateJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateJobRequest method. +// req, resp := client.UpdateJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateJob +func (c *Glue) UpdateJobRequest(input *UpdateJobInput) (req *request.Request, output *UpdateJobOutput) { + op := &request.Operation{ + Name: opUpdateJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateJobInput{} + } + + output = &UpdateJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateJob API operation for AWS Glue. +// +// Updates an existing job definition. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateJob +func (c *Glue) UpdateJob(input *UpdateJobInput) (*UpdateJobOutput, error) { + req, out := c.UpdateJobRequest(input) + return out, req.Send() +} + +// UpdateJobWithContext is the same as UpdateJob with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateJobWithContext(ctx aws.Context, input *UpdateJobInput, opts ...request.Option) (*UpdateJobOutput, error) { + req, out := c.UpdateJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateMLTransform = "UpdateMLTransform" + +// UpdateMLTransformRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMLTransform operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateMLTransform for more information on using the UpdateMLTransform +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateMLTransformRequest method. +// req, resp := client.UpdateMLTransformRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateMLTransform +func (c *Glue) UpdateMLTransformRequest(input *UpdateMLTransformInput) (req *request.Request, output *UpdateMLTransformOutput) { + op := &request.Operation{ + Name: opUpdateMLTransform, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMLTransformInput{} + } + + output = &UpdateMLTransformOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateMLTransform API operation for AWS Glue. +// +// Updates an existing machine learning transform. Call this operation to tune +// the algorithm parameters to achieve better results. +// +// After calling this operation, you can call the StartMLEvaluationTaskRun operation +// to assess how well your new parameters achieved your goals (such as improving +// the quality of your machine learning transform, or making it more cost-effective). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateMLTransform for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// Access to a resource was denied. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateMLTransform +func (c *Glue) UpdateMLTransform(input *UpdateMLTransformInput) (*UpdateMLTransformOutput, error) { + req, out := c.UpdateMLTransformRequest(input) + return out, req.Send() +} + +// UpdateMLTransformWithContext is the same as UpdateMLTransform with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateMLTransform for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateMLTransformWithContext(ctx aws.Context, input *UpdateMLTransformInput, opts ...request.Option) (*UpdateMLTransformOutput, error) { + req, out := c.UpdateMLTransformRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePartition = "UpdatePartition" + +// UpdatePartitionRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePartition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePartition for more information on using the UpdatePartition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePartitionRequest method. +// req, resp := client.UpdatePartitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdatePartition +func (c *Glue) UpdatePartitionRequest(input *UpdatePartitionInput) (req *request.Request, output *UpdatePartitionOutput) { + op := &request.Operation{ + Name: opUpdatePartition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePartitionInput{} + } + + output = &UpdatePartitionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdatePartition API operation for AWS Glue. +// +// Updates a partition. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdatePartition for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdatePartition +func (c *Glue) UpdatePartition(input *UpdatePartitionInput) (*UpdatePartitionOutput, error) { + req, out := c.UpdatePartitionRequest(input) + return out, req.Send() +} + +// UpdatePartitionWithContext is the same as UpdatePartition with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePartition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdatePartitionWithContext(ctx aws.Context, input *UpdatePartitionInput, opts ...request.Option) (*UpdatePartitionOutput, error) { + req, out := c.UpdatePartitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTable = "UpdateTable" + +// UpdateTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTable for more information on using the UpdateTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTableRequest method. +// req, resp := client.UpdateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTable +func (c *Glue) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { + op := &request.Operation{ + Name: opUpdateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableInput{} + } + + output = &UpdateTableOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateTable API operation for AWS Glue. +// +// Updates a metadata table in the Data Catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateTable for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// * ErrCodeResourceNumberLimitExceededException "ResourceNumberLimitExceededException" +// A resource numerical limit was exceeded. +// +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTable +func (c *Glue) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + return out, req.Send() +} + +// UpdateTableWithContext is the same as UpdateTable with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTrigger = "UpdateTrigger" + +// UpdateTriggerRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTrigger operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTrigger for more information on using the UpdateTrigger +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTriggerRequest method. +// req, resp := client.UpdateTriggerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTrigger +func (c *Glue) UpdateTriggerRequest(input *UpdateTriggerInput) (req *request.Request, output *UpdateTriggerOutput) { + op := &request.Operation{ + Name: opUpdateTrigger, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTriggerInput{} + } + + output = &UpdateTriggerOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateTrigger API operation for AWS Glue. +// +// Updates a trigger definition. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateTrigger for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateTrigger +func (c *Glue) UpdateTrigger(input *UpdateTriggerInput) (*UpdateTriggerOutput, error) { + req, out := c.UpdateTriggerRequest(input) + return out, req.Send() +} + +// UpdateTriggerWithContext is the same as UpdateTrigger with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTrigger for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateTriggerWithContext(ctx aws.Context, input *UpdateTriggerInput, opts ...request.Option) (*UpdateTriggerOutput, error) { + req, out := c.UpdateTriggerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateUserDefinedFunction = "UpdateUserDefinedFunction" + +// UpdateUserDefinedFunctionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUserDefinedFunction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateUserDefinedFunction for more information on using the UpdateUserDefinedFunction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateUserDefinedFunctionRequest method. +// req, resp := client.UpdateUserDefinedFunctionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateUserDefinedFunction +func (c *Glue) UpdateUserDefinedFunctionRequest(input *UpdateUserDefinedFunctionInput) (req *request.Request, output *UpdateUserDefinedFunctionOutput) { + op := &request.Operation{ + Name: opUpdateUserDefinedFunction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserDefinedFunctionInput{} + } + + output = &UpdateUserDefinedFunctionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateUserDefinedFunction API operation for AWS Glue. +// +// Updates an existing function definition in the Data Catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateUserDefinedFunction for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeEncryptionException "GlueEncryptionException" +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateUserDefinedFunction +func (c *Glue) UpdateUserDefinedFunction(input *UpdateUserDefinedFunctionInput) (*UpdateUserDefinedFunctionOutput, error) { + req, out := c.UpdateUserDefinedFunctionRequest(input) + return out, req.Send() +} + +// UpdateUserDefinedFunctionWithContext is the same as UpdateUserDefinedFunction with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateUserDefinedFunction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateUserDefinedFunctionWithContext(ctx aws.Context, input *UpdateUserDefinedFunctionInput, opts ...request.Option) (*UpdateUserDefinedFunctionOutput, error) { + req, out := c.UpdateUserDefinedFunctionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateWorkflow = "UpdateWorkflow" + +// UpdateWorkflowRequest generates a "aws/request.Request" representing the +// client's request for the UpdateWorkflow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateWorkflow for more information on using the UpdateWorkflow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateWorkflowRequest method. +// req, resp := client.UpdateWorkflowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateWorkflow +func (c *Glue) UpdateWorkflowRequest(input *UpdateWorkflowInput) (req *request.Request, output *UpdateWorkflowOutput) { + op := &request.Operation{ + Name: opUpdateWorkflow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateWorkflowInput{} + } + + output = &UpdateWorkflowOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateWorkflow API operation for AWS Glue. +// +// Updates an existing workflow. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateWorkflow for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateWorkflow +func (c *Glue) UpdateWorkflow(input *UpdateWorkflowInput) (*UpdateWorkflowOutput, error) { + req, out := c.UpdateWorkflowRequest(input) + return out, req.Send() +} + +// UpdateWorkflowWithContext is the same as UpdateWorkflow with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateWorkflow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateWorkflowWithContext(ctx aws.Context, input *UpdateWorkflowInput, opts ...request.Option) (*UpdateWorkflowOutput, error) { + req, out := c.UpdateWorkflowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Defines an action to be initiated by a trigger. +type Action struct { + _ struct{} `type:"structure"` + + // The job arguments used when this trigger fires. For this job run, they replace + // the default arguments set in the job definition itself. + // + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) + // topic in the developer guide. + Arguments map[string]*string `type:"map"` + + // The name of the crawler to be used with this action. + CrawlerName *string `min:"1" type:"string"` + + // The name of a job to be executed. + JobName *string `min:"1" type:"string"` + + // Specifies configuration properties of a job run notification. + NotificationProperty *NotificationProperty `type:"structure"` + + // The name of the SecurityConfiguration structure to be used with this action. + SecurityConfiguration *string `min:"1" type:"string"` + + // The JobRun timeout in minutes. This is the maximum time that a job run can + // consume resources before it is terminated and enters TIMEOUT status. The + // default is 2,880 minutes (48 hours). This overrides the timeout value set + // in the parent job. + Timeout *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Action) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Action"} + if s.CrawlerName != nil && len(*s.CrawlerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CrawlerName", 1)) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) + } + if s.Timeout != nil && *s.Timeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + } + if s.NotificationProperty != nil { + if err := s.NotificationProperty.Validate(); err != nil { + invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArguments sets the Arguments field's value. +func (s *Action) SetArguments(v map[string]*string) *Action { + s.Arguments = v + return s +} + +// SetCrawlerName sets the CrawlerName field's value. +func (s *Action) SetCrawlerName(v string) *Action { + s.CrawlerName = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *Action) SetJobName(v string) *Action { + s.JobName = &v + return s +} + +// SetNotificationProperty sets the NotificationProperty field's value. +func (s *Action) SetNotificationProperty(v *NotificationProperty) *Action { + s.NotificationProperty = v + return s +} + +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *Action) SetSecurityConfiguration(v string) *Action { + s.SecurityConfiguration = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *Action) SetTimeout(v int64) *Action { + s.Timeout = &v + return s +} + +type BatchCreatePartitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the catalog in which the partition is to be created. Currently, + // this should be the AWS account ID. + CatalogId *string `min:"1" type:"string"` + + // The name of the metadata database in which the partition is to be created. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of PartitionInput structures that define the partitions to be created. + // + // PartitionInputList is a required field + PartitionInputList []*PartitionInput `type:"list" required:"true"` + + // The name of the metadata table in which the partition is to be created. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchCreatePartitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCreatePartitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchCreatePartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchCreatePartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.PartitionInputList == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionInputList")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.PartitionInputList != nil { + for i, v := range s.PartitionInputList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionInputList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchCreatePartitionInput) SetCatalogId(v string) *BatchCreatePartitionInput { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *BatchCreatePartitionInput) SetDatabaseName(v string) *BatchCreatePartitionInput { + s.DatabaseName = &v + return s +} + +// SetPartitionInputList sets the PartitionInputList field's value. +func (s *BatchCreatePartitionInput) SetPartitionInputList(v []*PartitionInput) *BatchCreatePartitionInput { + s.PartitionInputList = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BatchCreatePartitionInput) SetTableName(v string) *BatchCreatePartitionInput { + s.TableName = &v + return s +} + +type BatchCreatePartitionOutput struct { + _ struct{} `type:"structure"` + + // The errors encountered when trying to create the requested partitions. + Errors []*PartitionError `type:"list"` +} + +// String returns the string representation +func (s BatchCreatePartitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCreatePartitionOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchCreatePartitionOutput) SetErrors(v []*PartitionError) *BatchCreatePartitionOutput { + s.Errors = v + return s +} + +type BatchDeleteConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog in which the connections reside. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // A list of names of the connections to delete. + // + // ConnectionNameList is a required field + ConnectionNameList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDeleteConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDeleteConnectionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.ConnectionNameList == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionNameList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchDeleteConnectionInput) SetCatalogId(v string) *BatchDeleteConnectionInput { + s.CatalogId = &v + return s +} + +// SetConnectionNameList sets the ConnectionNameList field's value. +func (s *BatchDeleteConnectionInput) SetConnectionNameList(v []*string) *BatchDeleteConnectionInput { + s.ConnectionNameList = v + return s +} + +type BatchDeleteConnectionOutput struct { + _ struct{} `type:"structure"` + + // A map of the names of connections that were not successfully deleted to error + // details. + Errors map[string]*ErrorDetail `type:"map"` + + // A list of names of the connection definitions that were successfully deleted. + Succeeded []*string `type:"list"` +} + +// String returns the string representation +func (s BatchDeleteConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteConnectionOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchDeleteConnectionOutput) SetErrors(v map[string]*ErrorDetail) *BatchDeleteConnectionOutput { + s.Errors = v + return s +} + +// SetSucceeded sets the Succeeded field's value. +func (s *BatchDeleteConnectionOutput) SetSucceeded(v []*string) *BatchDeleteConnectionOutput { + s.Succeeded = v + return s +} + +type BatchDeletePartitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the partition to be deleted resides. If + // none is provided, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The name of the catalog database in which the table in question resides. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of PartitionInput structures that define the partitions to be deleted. + // + // PartitionsToDelete is a required field + PartitionsToDelete []*PartitionValueList `type:"list" required:"true"` + + // The name of the table that contains the partitions to be deleted. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchDeletePartitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeletePartitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDeletePartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDeletePartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.PartitionsToDelete == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionsToDelete")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.PartitionsToDelete != nil { + for i, v := range s.PartitionsToDelete { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionsToDelete", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchDeletePartitionInput) SetCatalogId(v string) *BatchDeletePartitionInput { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *BatchDeletePartitionInput) SetDatabaseName(v string) *BatchDeletePartitionInput { + s.DatabaseName = &v + return s +} + +// SetPartitionsToDelete sets the PartitionsToDelete field's value. +func (s *BatchDeletePartitionInput) SetPartitionsToDelete(v []*PartitionValueList) *BatchDeletePartitionInput { + s.PartitionsToDelete = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BatchDeletePartitionInput) SetTableName(v string) *BatchDeletePartitionInput { + s.TableName = &v + return s +} + +type BatchDeletePartitionOutput struct { + _ struct{} `type:"structure"` + + // The errors encountered when trying to delete the requested partitions. + Errors []*PartitionError `type:"list"` +} + +// String returns the string representation +func (s BatchDeletePartitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeletePartitionOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchDeletePartitionOutput) SetErrors(v []*PartitionError) *BatchDeletePartitionOutput { + s.Errors = v + return s +} + +type BatchDeleteTableInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the table resides. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The name of the catalog database in which the tables to delete reside. For + // Hive compatibility, this name is entirely lowercase. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of the table to delete. + // + // TablesToDelete is a required field + TablesToDelete []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDeleteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDeleteTableInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TablesToDelete == nil { + invalidParams.Add(request.NewErrParamRequired("TablesToDelete")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchDeleteTableInput) SetCatalogId(v string) *BatchDeleteTableInput { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *BatchDeleteTableInput) SetDatabaseName(v string) *BatchDeleteTableInput { + s.DatabaseName = &v + return s +} + +// SetTablesToDelete sets the TablesToDelete field's value. +func (s *BatchDeleteTableInput) SetTablesToDelete(v []*string) *BatchDeleteTableInput { + s.TablesToDelete = v + return s +} + +type BatchDeleteTableOutput struct { + _ struct{} `type:"structure"` + + // A list of errors encountered in attempting to delete the specified tables. + Errors []*TableError `type:"list"` +} + +// String returns the string representation +func (s BatchDeleteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteTableOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchDeleteTableOutput) SetErrors(v []*TableError) *BatchDeleteTableOutput { + s.Errors = v + return s +} + +type BatchDeleteTableVersionInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the tables reside. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The database in the catalog in which the table resides. For Hive compatibility, + // this name is entirely lowercase. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the table. For Hive compatibility, this name is entirely lowercase. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` + + // A list of the IDs of versions to be deleted. A VersionId is a string representation + // of an integer. Each version is incremented by 1. + // + // VersionIds is a required field + VersionIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteTableVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteTableVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDeleteTableVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDeleteTableVersionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.VersionIds == nil { + invalidParams.Add(request.NewErrParamRequired("VersionIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchDeleteTableVersionInput) SetCatalogId(v string) *BatchDeleteTableVersionInput { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *BatchDeleteTableVersionInput) SetDatabaseName(v string) *BatchDeleteTableVersionInput { + s.DatabaseName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BatchDeleteTableVersionInput) SetTableName(v string) *BatchDeleteTableVersionInput { + s.TableName = &v + return s +} + +// SetVersionIds sets the VersionIds field's value. +func (s *BatchDeleteTableVersionInput) SetVersionIds(v []*string) *BatchDeleteTableVersionInput { + s.VersionIds = v + return s +} + +type BatchDeleteTableVersionOutput struct { + _ struct{} `type:"structure"` + + // A list of errors encountered while trying to delete the specified table versions. + Errors []*TableVersionError `type:"list"` +} + +// String returns the string representation +func (s BatchDeleteTableVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteTableVersionOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchDeleteTableVersionOutput) SetErrors(v []*TableVersionError) *BatchDeleteTableVersionOutput { + s.Errors = v + return s +} + +type BatchGetCrawlersInput struct { + _ struct{} `type:"structure"` + + // A list of crawler names, which might be the names returned from the ListCrawlers + // operation. + // + // CrawlerNames is a required field + CrawlerNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetCrawlersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetCrawlersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetCrawlersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetCrawlersInput"} + if s.CrawlerNames == nil { + invalidParams.Add(request.NewErrParamRequired("CrawlerNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCrawlerNames sets the CrawlerNames field's value. +func (s *BatchGetCrawlersInput) SetCrawlerNames(v []*string) *BatchGetCrawlersInput { + s.CrawlerNames = v + return s +} + +type BatchGetCrawlersOutput struct { + _ struct{} `type:"structure"` + + // A list of crawler definitions. + Crawlers []*Crawler `type:"list"` + + // A list of names of crawlers that were not found. + CrawlersNotFound []*string `type:"list"` +} + +// String returns the string representation +func (s BatchGetCrawlersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetCrawlersOutput) GoString() string { + return s.String() +} + +// SetCrawlers sets the Crawlers field's value. +func (s *BatchGetCrawlersOutput) SetCrawlers(v []*Crawler) *BatchGetCrawlersOutput { + s.Crawlers = v + return s +} + +// SetCrawlersNotFound sets the CrawlersNotFound field's value. +func (s *BatchGetCrawlersOutput) SetCrawlersNotFound(v []*string) *BatchGetCrawlersOutput { + s.CrawlersNotFound = v + return s +} + +type BatchGetDevEndpointsInput struct { + _ struct{} `type:"structure"` + + // The list of DevEndpoint names, which might be the names returned from the + // ListDevEndpoint operation. + // + // DevEndpointNames is a required field + DevEndpointNames []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetDevEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDevEndpointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetDevEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetDevEndpointsInput"} + if s.DevEndpointNames == nil { + invalidParams.Add(request.NewErrParamRequired("DevEndpointNames")) + } + if s.DevEndpointNames != nil && len(s.DevEndpointNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DevEndpointNames", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDevEndpointNames sets the DevEndpointNames field's value. +func (s *BatchGetDevEndpointsInput) SetDevEndpointNames(v []*string) *BatchGetDevEndpointsInput { + s.DevEndpointNames = v + return s +} + +type BatchGetDevEndpointsOutput struct { + _ struct{} `type:"structure"` + + // A list of DevEndpoint definitions. + DevEndpoints []*DevEndpoint `type:"list"` + + // A list of DevEndpoints not found. + DevEndpointsNotFound []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchGetDevEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDevEndpointsOutput) GoString() string { + return s.String() +} + +// SetDevEndpoints sets the DevEndpoints field's value. +func (s *BatchGetDevEndpointsOutput) SetDevEndpoints(v []*DevEndpoint) *BatchGetDevEndpointsOutput { + s.DevEndpoints = v + return s +} + +// SetDevEndpointsNotFound sets the DevEndpointsNotFound field's value. +func (s *BatchGetDevEndpointsOutput) SetDevEndpointsNotFound(v []*string) *BatchGetDevEndpointsOutput { + s.DevEndpointsNotFound = v + return s +} + +type BatchGetJobsInput struct { + _ struct{} `type:"structure"` + + // A list of job names, which might be the names returned from the ListJobs + // operation. + // + // JobNames is a required field + JobNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetJobsInput"} + if s.JobNames == nil { + invalidParams.Add(request.NewErrParamRequired("JobNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobNames sets the JobNames field's value. +func (s *BatchGetJobsInput) SetJobNames(v []*string) *BatchGetJobsInput { + s.JobNames = v + return s +} + +type BatchGetJobsOutput struct { + _ struct{} `type:"structure"` + + // A list of job definitions. + Jobs []*Job `type:"list"` + + // A list of names of jobs not found. + JobsNotFound []*string `type:"list"` +} + +// String returns the string representation +func (s BatchGetJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetJobsOutput) GoString() string { + return s.String() +} + +// SetJobs sets the Jobs field's value. +func (s *BatchGetJobsOutput) SetJobs(v []*Job) *BatchGetJobsOutput { + s.Jobs = v + return s +} + +// SetJobsNotFound sets the JobsNotFound field's value. +func (s *BatchGetJobsOutput) SetJobsNotFound(v []*string) *BatchGetJobsOutput { + s.JobsNotFound = v + return s +} + +type BatchGetPartitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the partitions in question reside. If none + // is supplied, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The name of the catalog database where the partitions reside. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of partition values identifying the partitions to retrieve. + // + // PartitionsToGet is a required field + PartitionsToGet []*PartitionValueList `type:"list" required:"true"` + + // The name of the partitions' table. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchGetPartitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetPartitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetPartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetPartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.PartitionsToGet == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionsToGet")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.PartitionsToGet != nil { + for i, v := range s.PartitionsToGet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionsToGet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchGetPartitionInput) SetCatalogId(v string) *BatchGetPartitionInput { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *BatchGetPartitionInput) SetDatabaseName(v string) *BatchGetPartitionInput { + s.DatabaseName = &v + return s +} + +// SetPartitionsToGet sets the PartitionsToGet field's value. +func (s *BatchGetPartitionInput) SetPartitionsToGet(v []*PartitionValueList) *BatchGetPartitionInput { + s.PartitionsToGet = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BatchGetPartitionInput) SetTableName(v string) *BatchGetPartitionInput { + s.TableName = &v + return s +} + +type BatchGetPartitionOutput struct { + _ struct{} `type:"structure"` + + // A list of the requested partitions. + Partitions []*Partition `type:"list"` + + // A list of the partition values in the request for which partitions were not + // returned. + UnprocessedKeys []*PartitionValueList `type:"list"` +} + +// String returns the string representation +func (s BatchGetPartitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetPartitionOutput) GoString() string { + return s.String() +} + +// SetPartitions sets the Partitions field's value. +func (s *BatchGetPartitionOutput) SetPartitions(v []*Partition) *BatchGetPartitionOutput { + s.Partitions = v + return s +} + +// SetUnprocessedKeys sets the UnprocessedKeys field's value. +func (s *BatchGetPartitionOutput) SetUnprocessedKeys(v []*PartitionValueList) *BatchGetPartitionOutput { + s.UnprocessedKeys = v + return s +} + +type BatchGetTriggersInput struct { + _ struct{} `type:"structure"` + + // A list of trigger names, which may be the names returned from the ListTriggers + // operation. + // + // TriggerNames is a required field + TriggerNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetTriggersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetTriggersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetTriggersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetTriggersInput"} + if s.TriggerNames == nil { + invalidParams.Add(request.NewErrParamRequired("TriggerNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTriggerNames sets the TriggerNames field's value. +func (s *BatchGetTriggersInput) SetTriggerNames(v []*string) *BatchGetTriggersInput { + s.TriggerNames = v + return s +} + +type BatchGetTriggersOutput struct { + _ struct{} `type:"structure"` + + // A list of trigger definitions. + Triggers []*Trigger `type:"list"` + + // A list of names of triggers not found. + TriggersNotFound []*string `type:"list"` +} + +// String returns the string representation +func (s BatchGetTriggersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetTriggersOutput) GoString() string { + return s.String() +} + +// SetTriggers sets the Triggers field's value. +func (s *BatchGetTriggersOutput) SetTriggers(v []*Trigger) *BatchGetTriggersOutput { + s.Triggers = v + return s +} + +// SetTriggersNotFound sets the TriggersNotFound field's value. +func (s *BatchGetTriggersOutput) SetTriggersNotFound(v []*string) *BatchGetTriggersOutput { + s.TriggersNotFound = v + return s +} + +type BatchGetWorkflowsInput struct { + _ struct{} `type:"structure"` + + // Specifies whether to include a graph when returning the workflow resource + // metadata. + IncludeGraph *bool `type:"boolean"` + + // A list of workflow names, which may be the names returned from the ListWorkflows + // operation. + // + // Names is a required field + Names []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetWorkflowsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetWorkflowsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetWorkflowsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetWorkflowsInput"} + if s.Names == nil { + invalidParams.Add(request.NewErrParamRequired("Names")) + } + if s.Names != nil && len(s.Names) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Names", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIncludeGraph sets the IncludeGraph field's value. +func (s *BatchGetWorkflowsInput) SetIncludeGraph(v bool) *BatchGetWorkflowsInput { + s.IncludeGraph = &v + return s +} + +// SetNames sets the Names field's value. +func (s *BatchGetWorkflowsInput) SetNames(v []*string) *BatchGetWorkflowsInput { + s.Names = v + return s +} + +type BatchGetWorkflowsOutput struct { + _ struct{} `type:"structure"` + + // A list of names of workflows not found. + MissingWorkflows []*string `min:"1" type:"list"` + + // A list of workflow resource metadata. + Workflows []*Workflow `min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchGetWorkflowsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetWorkflowsOutput) GoString() string { + return s.String() +} + +// SetMissingWorkflows sets the MissingWorkflows field's value. +func (s *BatchGetWorkflowsOutput) SetMissingWorkflows(v []*string) *BatchGetWorkflowsOutput { + s.MissingWorkflows = v + return s +} + +// SetWorkflows sets the Workflows field's value. +func (s *BatchGetWorkflowsOutput) SetWorkflows(v []*Workflow) *BatchGetWorkflowsOutput { + s.Workflows = v + return s +} + +// Records an error that occurred when attempting to stop a specified job run. +type BatchStopJobRunError struct { + _ struct{} `type:"structure"` + + // Specifies details about the error that was encountered. + ErrorDetail *ErrorDetail `type:"structure"` + + // The name of the job definition that is used in the job run in question. + JobName *string `min:"1" type:"string"` + + // The JobRunId of the job run in question. + JobRunId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s BatchStopJobRunError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopJobRunError) GoString() string { + return s.String() +} + +// SetErrorDetail sets the ErrorDetail field's value. +func (s *BatchStopJobRunError) SetErrorDetail(v *ErrorDetail) *BatchStopJobRunError { + s.ErrorDetail = v + return s +} + +// SetJobName sets the JobName field's value. +func (s *BatchStopJobRunError) SetJobName(v string) *BatchStopJobRunError { + s.JobName = &v + return s +} + +// SetJobRunId sets the JobRunId field's value. +func (s *BatchStopJobRunError) SetJobRunId(v string) *BatchStopJobRunError { + s.JobRunId = &v + return s +} + +type BatchStopJobRunInput struct { + _ struct{} `type:"structure"` + + // The name of the job definition for which to stop job runs. + // + // JobName is a required field + JobName *string `min:"1" type:"string" required:"true"` + + // A list of the JobRunIds that should be stopped for that job definition. + // + // JobRunIds is a required field + JobRunIds []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchStopJobRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopJobRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchStopJobRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchStopJobRunInput"} + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.JobRunIds == nil { + invalidParams.Add(request.NewErrParamRequired("JobRunIds")) + } + if s.JobRunIds != nil && len(s.JobRunIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobRunIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobName sets the JobName field's value. +func (s *BatchStopJobRunInput) SetJobName(v string) *BatchStopJobRunInput { + s.JobName = &v + return s +} + +// SetJobRunIds sets the JobRunIds field's value. +func (s *BatchStopJobRunInput) SetJobRunIds(v []*string) *BatchStopJobRunInput { + s.JobRunIds = v + return s +} + +type BatchStopJobRunOutput struct { + _ struct{} `type:"structure"` + + // A list of the errors that were encountered in trying to stop JobRuns, including + // the JobRunId for which each error was encountered and details about the error. + Errors []*BatchStopJobRunError `type:"list"` + + // A list of the JobRuns that were successfully submitted for stopping. + SuccessfulSubmissions []*BatchStopJobRunSuccessfulSubmission `type:"list"` +} + +// String returns the string representation +func (s BatchStopJobRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopJobRunOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchStopJobRunOutput) SetErrors(v []*BatchStopJobRunError) *BatchStopJobRunOutput { + s.Errors = v + return s +} + +// SetSuccessfulSubmissions sets the SuccessfulSubmissions field's value. +func (s *BatchStopJobRunOutput) SetSuccessfulSubmissions(v []*BatchStopJobRunSuccessfulSubmission) *BatchStopJobRunOutput { + s.SuccessfulSubmissions = v + return s +} + +// Records a successful request to stop a specified JobRun. +type BatchStopJobRunSuccessfulSubmission struct { + _ struct{} `type:"structure"` + + // The name of the job definition used in the job run that was stopped. + JobName *string `min:"1" type:"string"` + + // The JobRunId of the job run that was stopped. + JobRunId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s BatchStopJobRunSuccessfulSubmission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopJobRunSuccessfulSubmission) GoString() string { + return s.String() +} + +// SetJobName sets the JobName field's value. +func (s *BatchStopJobRunSuccessfulSubmission) SetJobName(v string) *BatchStopJobRunSuccessfulSubmission { + s.JobName = &v + return s +} + +// SetJobRunId sets the JobRunId field's value. +func (s *BatchStopJobRunSuccessfulSubmission) SetJobRunId(v string) *BatchStopJobRunSuccessfulSubmission { + s.JobRunId = &v + return s +} + +type CancelMLTaskRunInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the task run. + // + // TaskRunId is a required field + TaskRunId *string `min:"1" type:"string" required:"true"` + + // The unique identifier of the machine learning transform. + // + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelMLTaskRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelMLTaskRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelMLTaskRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelMLTaskRunInput"} + if s.TaskRunId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskRunId")) + } + if s.TaskRunId != nil && len(*s.TaskRunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskRunId", 1)) + } + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) + } + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTaskRunId sets the TaskRunId field's value. +func (s *CancelMLTaskRunInput) SetTaskRunId(v string) *CancelMLTaskRunInput { + s.TaskRunId = &v + return s +} + +// SetTransformId sets the TransformId field's value. +func (s *CancelMLTaskRunInput) SetTransformId(v string) *CancelMLTaskRunInput { + s.TransformId = &v + return s +} + +type CancelMLTaskRunOutput struct { + _ struct{} `type:"structure"` + + // The status for this run. + Status *string `type:"string" enum:"TaskStatusType"` + + // The unique identifier for the task run. + TaskRunId *string `min:"1" type:"string"` + + // The unique identifier of the machine learning transform. + TransformId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CancelMLTaskRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelMLTaskRunOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *CancelMLTaskRunOutput) SetStatus(v string) *CancelMLTaskRunOutput { + s.Status = &v + return s +} + +// SetTaskRunId sets the TaskRunId field's value. +func (s *CancelMLTaskRunOutput) SetTaskRunId(v string) *CancelMLTaskRunOutput { + s.TaskRunId = &v + return s +} + +// SetTransformId sets the TransformId field's value. +func (s *CancelMLTaskRunOutput) SetTransformId(v string) *CancelMLTaskRunOutput { + s.TransformId = &v + return s +} + +// Specifies a table definition in the AWS Glue Data Catalog. +type CatalogEntry struct { + _ struct{} `type:"structure"` + + // The database in which the table metadata resides. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the table in question. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CatalogEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CatalogEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CatalogEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CatalogEntry"} + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *CatalogEntry) SetDatabaseName(v string) *CatalogEntry { + s.DatabaseName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *CatalogEntry) SetTableName(v string) *CatalogEntry { + s.TableName = &v + return s +} + +// A structure containing migration status information. +type CatalogImportStatus struct { + _ struct{} `type:"structure"` + + // True if the migration has completed, or False otherwise. + ImportCompleted *bool `type:"boolean"` + + // The time that the migration was started. + ImportTime *time.Time `type:"timestamp"` + + // The name of the person who initiated the migration. + ImportedBy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CatalogImportStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CatalogImportStatus) GoString() string { + return s.String() +} + +// SetImportCompleted sets the ImportCompleted field's value. +func (s *CatalogImportStatus) SetImportCompleted(v bool) *CatalogImportStatus { + s.ImportCompleted = &v + return s +} + +// SetImportTime sets the ImportTime field's value. +func (s *CatalogImportStatus) SetImportTime(v time.Time) *CatalogImportStatus { + s.ImportTime = &v + return s +} + +// SetImportedBy sets the ImportedBy field's value. +func (s *CatalogImportStatus) SetImportedBy(v string) *CatalogImportStatus { + s.ImportedBy = &v + return s +} + +// Specifies an AWS Glue Data Catalog target. +type CatalogTarget struct { + _ struct{} `type:"structure"` + + // The name of the database to be synchronized. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of the tables to be synchronized. + // + // Tables is a required field + Tables []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CatalogTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CatalogTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CatalogTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CatalogTarget"} + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.Tables == nil { + invalidParams.Add(request.NewErrParamRequired("Tables")) + } + if s.Tables != nil && len(s.Tables) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tables", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *CatalogTarget) SetDatabaseName(v string) *CatalogTarget { + s.DatabaseName = &v + return s +} + +// SetTables sets the Tables field's value. +func (s *CatalogTarget) SetTables(v []*string) *CatalogTarget { + s.Tables = v + return s +} + +// Classifiers are triggered during a crawl task. A classifier checks whether +// a given file is in a format it can handle. If it is, the classifier creates +// a schema in the form of a StructType object that matches that data format. +// +// You can use the standard classifiers that AWS Glue provides, or you can write +// your own classifiers to best categorize your data sources and specify the +// appropriate schemas to use for them. A classifier can be a grok classifier, +// an XML classifier, a JSON classifier, or a custom CSV classifier, as specified +// in one of the fields in the Classifier object. +type Classifier struct { + _ struct{} `type:"structure"` + + // A classifier for comma-separated values (CSV). + CsvClassifier *CsvClassifier `type:"structure"` + + // A classifier that uses grok. + GrokClassifier *GrokClassifier `type:"structure"` + + // A classifier for JSON content. + JsonClassifier *JsonClassifier `type:"structure"` + + // A classifier for XML content. + XMLClassifier *XMLClassifier `type:"structure"` +} + +// String returns the string representation +func (s Classifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Classifier) GoString() string { + return s.String() +} + +// SetCsvClassifier sets the CsvClassifier field's value. +func (s *Classifier) SetCsvClassifier(v *CsvClassifier) *Classifier { + s.CsvClassifier = v + return s +} + +// SetGrokClassifier sets the GrokClassifier field's value. +func (s *Classifier) SetGrokClassifier(v *GrokClassifier) *Classifier { + s.GrokClassifier = v + return s +} + +// SetJsonClassifier sets the JsonClassifier field's value. +func (s *Classifier) SetJsonClassifier(v *JsonClassifier) *Classifier { + s.JsonClassifier = v + return s +} + +// SetXMLClassifier sets the XMLClassifier field's value. +func (s *Classifier) SetXMLClassifier(v *XMLClassifier) *Classifier { + s.XMLClassifier = v + return s +} + +// Specifies how Amazon CloudWatch data should be encrypted. +type CloudWatchEncryption struct { + _ struct{} `type:"structure"` + + // The encryption mode to use for CloudWatch data. + CloudWatchEncryptionMode *string `type:"string" enum:"CloudWatchEncryptionMode"` + + // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + KmsKeyArn *string `type:"string"` +} + +// String returns the string representation +func (s CloudWatchEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchEncryption) GoString() string { + return s.String() +} + +// SetCloudWatchEncryptionMode sets the CloudWatchEncryptionMode field's value. +func (s *CloudWatchEncryption) SetCloudWatchEncryptionMode(v string) *CloudWatchEncryption { + s.CloudWatchEncryptionMode = &v + return s +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *CloudWatchEncryption) SetKmsKeyArn(v string) *CloudWatchEncryption { + s.KmsKeyArn = &v + return s +} + +// Represents a directional edge in a directed acyclic graph (DAG). +type CodeGenEdge struct { + _ struct{} `type:"structure"` + + // The ID of the node at which the edge starts. + // + // Source is a required field + Source *string `min:"1" type:"string" required:"true"` + + // The ID of the node at which the edge ends. + // + // Target is a required field + Target *string `min:"1" type:"string" required:"true"` + + // The target of the edge. + TargetParameter *string `type:"string"` +} + +// String returns the string representation +func (s CodeGenEdge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CodeGenEdge) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CodeGenEdge) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CodeGenEdge"} + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Source != nil && len(*s.Source) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Source", 1)) + } + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + if s.Target != nil && len(*s.Target) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Target", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSource sets the Source field's value. +func (s *CodeGenEdge) SetSource(v string) *CodeGenEdge { + s.Source = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *CodeGenEdge) SetTarget(v string) *CodeGenEdge { + s.Target = &v + return s +} + +// SetTargetParameter sets the TargetParameter field's value. +func (s *CodeGenEdge) SetTargetParameter(v string) *CodeGenEdge { + s.TargetParameter = &v + return s +} + +// Represents a node in a directed acyclic graph (DAG) +type CodeGenNode struct { + _ struct{} `type:"structure"` + + // Properties of the node, in the form of name-value pairs. + // + // Args is a required field + Args []*CodeGenNodeArg `type:"list" required:"true"` + + // A node identifier that is unique within the node's graph. + // + // Id is a required field + Id *string `min:"1" type:"string" required:"true"` + + // The line number of the node. + LineNumber *int64 `type:"integer"` + + // The type of node that this is. + // + // NodeType is a required field + NodeType *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CodeGenNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CodeGenNode) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CodeGenNode) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CodeGenNode"} + if s.Args == nil { + invalidParams.Add(request.NewErrParamRequired("Args")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.NodeType == nil { + invalidParams.Add(request.NewErrParamRequired("NodeType")) + } + if s.Args != nil { + for i, v := range s.Args { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Args", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArgs sets the Args field's value. +func (s *CodeGenNode) SetArgs(v []*CodeGenNodeArg) *CodeGenNode { + s.Args = v + return s +} + +// SetId sets the Id field's value. +func (s *CodeGenNode) SetId(v string) *CodeGenNode { + s.Id = &v + return s +} + +// SetLineNumber sets the LineNumber field's value. +func (s *CodeGenNode) SetLineNumber(v int64) *CodeGenNode { + s.LineNumber = &v + return s +} + +// SetNodeType sets the NodeType field's value. +func (s *CodeGenNode) SetNodeType(v string) *CodeGenNode { + s.NodeType = &v + return s +} + +// An argument or property of a node. +type CodeGenNodeArg struct { + _ struct{} `type:"structure"` + + // The name of the argument or property. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // True if the value is used as a parameter. + Param *bool `type:"boolean"` + + // The value of the argument or property. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CodeGenNodeArg) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CodeGenNodeArg) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CodeGenNodeArg) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CodeGenNodeArg"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CodeGenNodeArg) SetName(v string) *CodeGenNodeArg { + s.Name = &v + return s +} + +// SetParam sets the Param field's value. +func (s *CodeGenNodeArg) SetParam(v bool) *CodeGenNodeArg { + s.Param = &v + return s +} + +// SetValue sets the Value field's value. +func (s *CodeGenNodeArg) SetValue(v string) *CodeGenNodeArg { + s.Value = &v + return s +} + +// A column in a Table. +type Column struct { + _ struct{} `type:"structure"` + + // A free-form text comment. + Comment *string `type:"string"` + + // The name of the Column. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // These key-value pairs define properties associated with the column. + Parameters map[string]*string `type:"map"` + + // The data type of the Column. + Type *string `type:"string"` +} + +// String returns the string representation +func (s Column) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Column) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Column) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Column"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComment sets the Comment field's value. +func (s *Column) SetComment(v string) *Column { + s.Comment = &v + return s +} + +// SetName sets the Name field's value. +func (s *Column) SetName(v string) *Column { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *Column) SetParameters(v map[string]*string) *Column { + s.Parameters = v + return s +} + +// SetType sets the Type field's value. +func (s *Column) SetType(v string) *Column { + s.Type = &v + return s +} + +// Defines a condition under which a trigger fires. +type Condition struct { + _ struct{} `type:"structure"` + + // The state of the crawler to which this condition applies. + CrawlState *string `type:"string" enum:"CrawlState"` + + // The name of the crawler to which this condition applies. + CrawlerName *string `min:"1" type:"string"` + + // The name of the job whose JobRuns this condition applies to, and on which + // this trigger waits. + JobName *string `min:"1" type:"string"` + + // A logical operator. + LogicalOperator *string `type:"string" enum:"LogicalOperator"` + + // The condition state. Currently, the values supported are SUCCEEDED, STOPPED, + // TIMEOUT, and FAILED. + State *string `type:"string" enum:"JobRunState"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Condition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Condition"} + if s.CrawlerName != nil && len(*s.CrawlerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CrawlerName", 1)) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCrawlState sets the CrawlState field's value. +func (s *Condition) SetCrawlState(v string) *Condition { + s.CrawlState = &v + return s +} + +// SetCrawlerName sets the CrawlerName field's value. +func (s *Condition) SetCrawlerName(v string) *Condition { + s.CrawlerName = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *Condition) SetJobName(v string) *Condition { + s.JobName = &v + return s +} + +// SetLogicalOperator sets the LogicalOperator field's value. +func (s *Condition) SetLogicalOperator(v string) *Condition { + s.LogicalOperator = &v + return s +} + +// SetState sets the State field's value. +func (s *Condition) SetState(v string) *Condition { + s.State = &v + return s +} + +// The confusion matrix shows you what your transform is predicting accurately +// and what types of errors it is making. +// +// For more information, see Confusion matrix (https://en.wikipedia.org/wiki/Confusion_matrix) +// in Wikipedia. +type ConfusionMatrix struct { + _ struct{} `type:"structure"` + + // The number of matches in the data that the transform didn't find, in the + // confusion matrix for your transform. + NumFalseNegatives *int64 `type:"long"` + + // The number of nonmatches in the data that the transform incorrectly classified + // as a match, in the confusion matrix for your transform. + NumFalsePositives *int64 `type:"long"` + + // The number of nonmatches in the data that the transform correctly rejected, + // in the confusion matrix for your transform. + NumTrueNegatives *int64 `type:"long"` + + // The number of matches in the data that the transform correctly found, in + // the confusion matrix for your transform. + NumTruePositives *int64 `type:"long"` +} + +// String returns the string representation +func (s ConfusionMatrix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfusionMatrix) GoString() string { + return s.String() +} + +// SetNumFalseNegatives sets the NumFalseNegatives field's value. +func (s *ConfusionMatrix) SetNumFalseNegatives(v int64) *ConfusionMatrix { + s.NumFalseNegatives = &v + return s +} + +// SetNumFalsePositives sets the NumFalsePositives field's value. +func (s *ConfusionMatrix) SetNumFalsePositives(v int64) *ConfusionMatrix { + s.NumFalsePositives = &v + return s +} + +// SetNumTrueNegatives sets the NumTrueNegatives field's value. +func (s *ConfusionMatrix) SetNumTrueNegatives(v int64) *ConfusionMatrix { + s.NumTrueNegatives = &v + return s +} + +// SetNumTruePositives sets the NumTruePositives field's value. +func (s *ConfusionMatrix) SetNumTruePositives(v int64) *ConfusionMatrix { + s.NumTruePositives = &v + return s +} + +// Defines a connection to a data source. +type Connection struct { + _ struct{} `type:"structure"` + + // These key-value pairs define parameters for the connection: + // + // * HOST - The host URI: either the fully qualified domain name (FQDN) or + // the IPv4 address of the database host. + // + // * PORT - The port number, between 1024 and 65535, of the port on which + // the database host is listening for database connections. + // + // * USER_NAME - The name under which to log in to the database. The value + // string for USER_NAME is "USERNAME". + // + // * PASSWORD - A password, if one is used, for the user name. + // + // * ENCRYPTED_PASSWORD - When you enable connection password protection + // by setting ConnectionPasswordEncryption in the Data Catalog encryption + // settings, this field stores the encrypted password. + // + // * JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) + // path of the JAR file that contains the JDBC driver to use. + // + // * JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use. + // + // * JDBC_ENGINE - The name of the JDBC engine to use. + // + // * JDBC_ENGINE_VERSION - The version of the JDBC engine to use. + // + // * CONFIG_FILES - (Reserved for future use.) + // + // * INSTANCE_ID - The instance ID to use. + // + // * JDBC_CONNECTION_URL - The URL for the JDBC connection. + // + // * JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether + // Secure Sockets Layer (SSL) with hostname matching is enforced for the + // JDBC connection on the client. The default is false. + // + // * CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root + // certificate. AWS Glue uses this root certificate to validate the customer’s + // certificate when connecting to the customer database. AWS Glue only handles + // X.509 certificates. The certificate provided must be DER-encoded and supplied + // in Base64 encoding PEM format. + // + // * SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. AWS Glue + // validates the Signature algorithm and Subject Public Key Algorithm for + // the customer certificate. The only permitted algorithms for the Signature + // algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject + // Public Key Algorithm, the key length must be at least 2048. You can set + // the value of this property to true to skip AWS Glue’s validation of + // the customer certificate. + // + // * CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is + // used for domain match or distinguished name match to prevent a man-in-the-middle + // attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in + // Microsoft SQL Server, this is used as the hostNameInCertificate. + ConnectionProperties map[string]*string `type:"map"` + + // The type of the connection. Currently, only JDBC is supported; SFTP is not + // supported. + ConnectionType *string `type:"string" enum:"ConnectionType"` + + // The time that this connection definition was created. + CreationTime *time.Time `type:"timestamp"` + + // The description of the connection. + Description *string `type:"string"` + + // The user, group, or role that last updated this connection definition. + LastUpdatedBy *string `min:"1" type:"string"` + + // The last time that this connection definition was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A list of criteria that can be used in selecting this connection. + MatchCriteria []*string `type:"list"` + + // The name of the connection definition. + Name *string `min:"1" type:"string"` + + // A map of physical connection requirements, such as virtual private cloud + // (VPC) and SecurityGroup, that are needed to make this connection successfully. + PhysicalConnectionRequirements *PhysicalConnectionRequirements `type:"structure"` +} + +// String returns the string representation +func (s Connection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Connection) GoString() string { + return s.String() +} + +// SetConnectionProperties sets the ConnectionProperties field's value. +func (s *Connection) SetConnectionProperties(v map[string]*string) *Connection { + s.ConnectionProperties = v + return s +} + +// SetConnectionType sets the ConnectionType field's value. +func (s *Connection) SetConnectionType(v string) *Connection { + s.ConnectionType = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Connection) SetCreationTime(v time.Time) *Connection { + s.CreationTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Connection) SetDescription(v string) *Connection { + s.Description = &v + return s +} + +// SetLastUpdatedBy sets the LastUpdatedBy field's value. +func (s *Connection) SetLastUpdatedBy(v string) *Connection { + s.LastUpdatedBy = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *Connection) SetLastUpdatedTime(v time.Time) *Connection { + s.LastUpdatedTime = &v + return s +} + +// SetMatchCriteria sets the MatchCriteria field's value. +func (s *Connection) SetMatchCriteria(v []*string) *Connection { + s.MatchCriteria = v + return s +} + +// SetName sets the Name field's value. +func (s *Connection) SetName(v string) *Connection { + s.Name = &v + return s +} + +// SetPhysicalConnectionRequirements sets the PhysicalConnectionRequirements field's value. +func (s *Connection) SetPhysicalConnectionRequirements(v *PhysicalConnectionRequirements) *Connection { + s.PhysicalConnectionRequirements = v + return s +} + +// A structure that is used to specify a connection to create or update. +type ConnectionInput struct { + _ struct{} `type:"structure"` + + // These key-value pairs define parameters for the connection. + // + // ConnectionProperties is a required field + ConnectionProperties map[string]*string `type:"map" required:"true"` + + // The type of the connection. Currently, only JDBC is supported; SFTP is not + // supported. + // + // ConnectionType is a required field + ConnectionType *string `type:"string" required:"true" enum:"ConnectionType"` + + // The description of the connection. + Description *string `type:"string"` + + // A list of criteria that can be used in selecting this connection. + MatchCriteria []*string `type:"list"` + + // The name of the connection. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A map of physical connection requirements, such as virtual private cloud + // (VPC) and SecurityGroup, that are needed to successfully make this connection. + PhysicalConnectionRequirements *PhysicalConnectionRequirements `type:"structure"` +} + +// String returns the string representation +func (s ConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConnectionInput"} + if s.ConnectionProperties == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionProperties")) + } + if s.ConnectionType == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionType")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.PhysicalConnectionRequirements != nil { + if err := s.PhysicalConnectionRequirements.Validate(); err != nil { + invalidParams.AddNested("PhysicalConnectionRequirements", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionProperties sets the ConnectionProperties field's value. +func (s *ConnectionInput) SetConnectionProperties(v map[string]*string) *ConnectionInput { + s.ConnectionProperties = v + return s +} + +// SetConnectionType sets the ConnectionType field's value. +func (s *ConnectionInput) SetConnectionType(v string) *ConnectionInput { + s.ConnectionType = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ConnectionInput) SetDescription(v string) *ConnectionInput { + s.Description = &v + return s +} + +// SetMatchCriteria sets the MatchCriteria field's value. +func (s *ConnectionInput) SetMatchCriteria(v []*string) *ConnectionInput { + s.MatchCriteria = v + return s +} + +// SetName sets the Name field's value. +func (s *ConnectionInput) SetName(v string) *ConnectionInput { + s.Name = &v + return s +} + +// SetPhysicalConnectionRequirements sets the PhysicalConnectionRequirements field's value. +func (s *ConnectionInput) SetPhysicalConnectionRequirements(v *PhysicalConnectionRequirements) *ConnectionInput { + s.PhysicalConnectionRequirements = v + return s +} + +// The data structure used by the Data Catalog to encrypt the password as part +// of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD +// field in the connection properties. You can enable catalog encryption or +// only password encryption. +// +// When a CreationConnection request arrives containing a password, the Data +// Catalog first encrypts the password using your AWS KMS key. It then encrypts +// the whole connection object again if catalog encryption is also enabled. +// +// This encryption requires that you set AWS KMS key permissions to enable or +// restrict access on the password key according to your security requirements. +// For example, you might want only administrators to have decrypt permission +// on the password key. +type ConnectionPasswordEncryption struct { + _ struct{} `type:"structure"` + + // An AWS KMS key that is used to encrypt the connection password. + // + // If connection password protection is enabled, the caller of CreateConnection + // and UpdateConnection needs at least kms:Encrypt permission on the specified + // AWS KMS key, to encrypt passwords before storing them in the Data Catalog. + // + // You can set the decrypt permission to enable or restrict access on the password + // key according to your security requirements. + AwsKmsKeyId *string `min:"1" type:"string"` + + // When the ReturnConnectionPasswordEncrypted flag is set to "true", passwords + // remain encrypted in the responses of GetConnection and GetConnections. This + // encryption takes effect independently from catalog encryption. + // + // ReturnConnectionPasswordEncrypted is a required field + ReturnConnectionPasswordEncrypted *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ConnectionPasswordEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionPasswordEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConnectionPasswordEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConnectionPasswordEncryption"} + if s.AwsKmsKeyId != nil && len(*s.AwsKmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsKmsKeyId", 1)) + } + if s.ReturnConnectionPasswordEncrypted == nil { + invalidParams.Add(request.NewErrParamRequired("ReturnConnectionPasswordEncrypted")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsKmsKeyId sets the AwsKmsKeyId field's value. +func (s *ConnectionPasswordEncryption) SetAwsKmsKeyId(v string) *ConnectionPasswordEncryption { + s.AwsKmsKeyId = &v + return s +} + +// SetReturnConnectionPasswordEncrypted sets the ReturnConnectionPasswordEncrypted field's value. +func (s *ConnectionPasswordEncryption) SetReturnConnectionPasswordEncrypted(v bool) *ConnectionPasswordEncryption { + s.ReturnConnectionPasswordEncrypted = &v + return s +} + +// Specifies the connections used by a job. +type ConnectionsList struct { + _ struct{} `type:"structure"` + + // A list of connections used by the job. + Connections []*string `type:"list"` +} + +// String returns the string representation +func (s ConnectionsList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionsList) GoString() string { + return s.String() +} + +// SetConnections sets the Connections field's value. +func (s *ConnectionsList) SetConnections(v []*string) *ConnectionsList { + s.Connections = v + return s +} + +// The details of a crawl in the workflow. +type Crawl struct { + _ struct{} `type:"structure"` + + // The date and time on which the crawl completed. + CompletedOn *time.Time `type:"timestamp"` + + // The error message associated with the crawl. + ErrorMessage *string `type:"string"` + + // The log group associated with the crawl. + LogGroup *string `min:"1" type:"string"` + + // The log stream associated with the crawl. + LogStream *string `min:"1" type:"string"` + + // The date and time on which the crawl started. + StartedOn *time.Time `type:"timestamp"` + + // The state of the crawler. + State *string `type:"string" enum:"CrawlState"` +} + +// String returns the string representation +func (s Crawl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Crawl) GoString() string { + return s.String() +} + +// SetCompletedOn sets the CompletedOn field's value. +func (s *Crawl) SetCompletedOn(v time.Time) *Crawl { + s.CompletedOn = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *Crawl) SetErrorMessage(v string) *Crawl { + s.ErrorMessage = &v + return s +} + +// SetLogGroup sets the LogGroup field's value. +func (s *Crawl) SetLogGroup(v string) *Crawl { + s.LogGroup = &v + return s +} + +// SetLogStream sets the LogStream field's value. +func (s *Crawl) SetLogStream(v string) *Crawl { + s.LogStream = &v + return s +} + +// SetStartedOn sets the StartedOn field's value. +func (s *Crawl) SetStartedOn(v time.Time) *Crawl { + s.StartedOn = &v + return s +} + +// SetState sets the State field's value. +func (s *Crawl) SetState(v string) *Crawl { + s.State = &v + return s +} + +// Specifies a crawler program that examines a data source and uses classifiers +// to try to determine its schema. If successful, the crawler records metadata +// concerning the data source in the AWS Glue Data Catalog. +type Crawler struct { + _ struct{} `type:"structure"` + + // A list of UTF-8 strings that specify the custom classifiers that are associated + // with the crawler. + Classifiers []*string `type:"list"` + + // Crawler configuration information. This versioned JSON string allows users + // to specify aspects of a crawler's behavior. For more information, see Configuring + // a Crawler (http://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). + Configuration *string `type:"string"` + + // If the crawler is running, contains the total time elapsed since the last + // crawl began. + CrawlElapsedTime *int64 `type:"long"` + + // The name of the SecurityConfiguration structure to be used by this crawler. + CrawlerSecurityConfiguration *string `type:"string"` + + // The time that the crawler was created. + CreationTime *time.Time `type:"timestamp"` + + // The name of the database in which the crawler's output is stored. + DatabaseName *string `type:"string"` + + // A description of the crawler. + Description *string `type:"string"` + + // The status of the last crawl, and potentially error information if an error + // occurred. + LastCrawl *LastCrawlInfo `type:"structure"` + + // The time that the crawler was last updated. + LastUpdated *time.Time `type:"timestamp"` + + // The name of the crawler. + Name *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of an IAM role that's used to access customer + // resources, such as Amazon Simple Storage Service (Amazon S3) data. + Role *string `type:"string"` + + // For scheduled crawlers, the schedule when the crawler runs. + Schedule *Schedule `type:"structure"` + + // The policy that specifies update and delete behaviors for the crawler. + SchemaChangePolicy *SchemaChangePolicy `type:"structure"` + + // Indicates whether the crawler is running, or whether a run is pending. + State *string `type:"string" enum:"CrawlerState"` + + // The prefix added to the names of tables that are created. + TablePrefix *string `type:"string"` + + // A collection of targets to crawl. + Targets *CrawlerTargets `type:"structure"` + + // The version of the crawler. + Version *int64 `type:"long"` +} + +// String returns the string representation +func (s Crawler) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Crawler) GoString() string { + return s.String() +} + +// SetClassifiers sets the Classifiers field's value. +func (s *Crawler) SetClassifiers(v []*string) *Crawler { + s.Classifiers = v + return s +} + +// SetConfiguration sets the Configuration field's value. +func (s *Crawler) SetConfiguration(v string) *Crawler { + s.Configuration = &v + return s +} + +// SetCrawlElapsedTime sets the CrawlElapsedTime field's value. +func (s *Crawler) SetCrawlElapsedTime(v int64) *Crawler { + s.CrawlElapsedTime = &v + return s +} + +// SetCrawlerSecurityConfiguration sets the CrawlerSecurityConfiguration field's value. +func (s *Crawler) SetCrawlerSecurityConfiguration(v string) *Crawler { + s.CrawlerSecurityConfiguration = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Crawler) SetCreationTime(v time.Time) *Crawler { + s.CreationTime = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *Crawler) SetDatabaseName(v string) *Crawler { + s.DatabaseName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Crawler) SetDescription(v string) *Crawler { + s.Description = &v + return s +} + +// SetLastCrawl sets the LastCrawl field's value. +func (s *Crawler) SetLastCrawl(v *LastCrawlInfo) *Crawler { + s.LastCrawl = v + return s +} + +// SetLastUpdated sets the LastUpdated field's value. +func (s *Crawler) SetLastUpdated(v time.Time) *Crawler { + s.LastUpdated = &v + return s +} + +// SetName sets the Name field's value. +func (s *Crawler) SetName(v string) *Crawler { + s.Name = &v + return s +} + +// SetRole sets the Role field's value. +func (s *Crawler) SetRole(v string) *Crawler { + s.Role = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *Crawler) SetSchedule(v *Schedule) *Crawler { + s.Schedule = v + return s +} + +// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. +func (s *Crawler) SetSchemaChangePolicy(v *SchemaChangePolicy) *Crawler { + s.SchemaChangePolicy = v + return s +} + +// SetState sets the State field's value. +func (s *Crawler) SetState(v string) *Crawler { + s.State = &v + return s +} + +// SetTablePrefix sets the TablePrefix field's value. +func (s *Crawler) SetTablePrefix(v string) *Crawler { + s.TablePrefix = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *Crawler) SetTargets(v *CrawlerTargets) *Crawler { + s.Targets = v + return s +} + +// SetVersion sets the Version field's value. +func (s *Crawler) SetVersion(v int64) *Crawler { + s.Version = &v + return s +} + +// Metrics for a specified crawler. +type CrawlerMetrics struct { + _ struct{} `type:"structure"` + + // The name of the crawler. + CrawlerName *string `min:"1" type:"string"` + + // The duration of the crawler's most recent run, in seconds. + LastRuntimeSeconds *float64 `type:"double"` + + // The median duration of this crawler's runs, in seconds. + MedianRuntimeSeconds *float64 `type:"double"` + + // True if the crawler is still estimating how long it will take to complete + // this run. + StillEstimating *bool `type:"boolean"` + + // The number of tables created by this crawler. + TablesCreated *int64 `type:"integer"` + + // The number of tables deleted by this crawler. + TablesDeleted *int64 `type:"integer"` + + // The number of tables updated by this crawler. + TablesUpdated *int64 `type:"integer"` + + // The estimated time left to complete a running crawl. + TimeLeftSeconds *float64 `type:"double"` +} + +// String returns the string representation +func (s CrawlerMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CrawlerMetrics) GoString() string { + return s.String() +} + +// SetCrawlerName sets the CrawlerName field's value. +func (s *CrawlerMetrics) SetCrawlerName(v string) *CrawlerMetrics { + s.CrawlerName = &v + return s +} + +// SetLastRuntimeSeconds sets the LastRuntimeSeconds field's value. +func (s *CrawlerMetrics) SetLastRuntimeSeconds(v float64) *CrawlerMetrics { + s.LastRuntimeSeconds = &v + return s +} + +// SetMedianRuntimeSeconds sets the MedianRuntimeSeconds field's value. +func (s *CrawlerMetrics) SetMedianRuntimeSeconds(v float64) *CrawlerMetrics { + s.MedianRuntimeSeconds = &v + return s +} + +// SetStillEstimating sets the StillEstimating field's value. +func (s *CrawlerMetrics) SetStillEstimating(v bool) *CrawlerMetrics { + s.StillEstimating = &v + return s +} + +// SetTablesCreated sets the TablesCreated field's value. +func (s *CrawlerMetrics) SetTablesCreated(v int64) *CrawlerMetrics { + s.TablesCreated = &v + return s +} + +// SetTablesDeleted sets the TablesDeleted field's value. +func (s *CrawlerMetrics) SetTablesDeleted(v int64) *CrawlerMetrics { + s.TablesDeleted = &v + return s +} + +// SetTablesUpdated sets the TablesUpdated field's value. +func (s *CrawlerMetrics) SetTablesUpdated(v int64) *CrawlerMetrics { + s.TablesUpdated = &v + return s +} + +// SetTimeLeftSeconds sets the TimeLeftSeconds field's value. +func (s *CrawlerMetrics) SetTimeLeftSeconds(v float64) *CrawlerMetrics { + s.TimeLeftSeconds = &v + return s +} + +// The details of a Crawler node present in the workflow. +type CrawlerNodeDetails struct { + _ struct{} `type:"structure"` + + // A list of crawls represented by the crawl node. + Crawls []*Crawl `type:"list"` +} + +// String returns the string representation +func (s CrawlerNodeDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CrawlerNodeDetails) GoString() string { + return s.String() +} + +// SetCrawls sets the Crawls field's value. +func (s *CrawlerNodeDetails) SetCrawls(v []*Crawl) *CrawlerNodeDetails { + s.Crawls = v + return s +} + +// Specifies data stores to crawl. +type CrawlerTargets struct { + _ struct{} `type:"structure"` + + // Specifies AWS Glue Data Catalog targets. + CatalogTargets []*CatalogTarget `type:"list"` + + // Specifies Amazon DynamoDB targets. + DynamoDBTargets []*DynamoDBTarget `type:"list"` + + // Specifies JDBC targets. + JdbcTargets []*JdbcTarget `type:"list"` + + // Specifies Amazon Simple Storage Service (Amazon S3) targets. + S3Targets []*S3Target `type:"list"` +} + +// String returns the string representation +func (s CrawlerTargets) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CrawlerTargets) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CrawlerTargets) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CrawlerTargets"} + if s.CatalogTargets != nil { + for i, v := range s.CatalogTargets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CatalogTargets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogTargets sets the CatalogTargets field's value. +func (s *CrawlerTargets) SetCatalogTargets(v []*CatalogTarget) *CrawlerTargets { + s.CatalogTargets = v + return s +} + +// SetDynamoDBTargets sets the DynamoDBTargets field's value. +func (s *CrawlerTargets) SetDynamoDBTargets(v []*DynamoDBTarget) *CrawlerTargets { + s.DynamoDBTargets = v + return s +} + +// SetJdbcTargets sets the JdbcTargets field's value. +func (s *CrawlerTargets) SetJdbcTargets(v []*JdbcTarget) *CrawlerTargets { + s.JdbcTargets = v + return s +} + +// SetS3Targets sets the S3Targets field's value. +func (s *CrawlerTargets) SetS3Targets(v []*S3Target) *CrawlerTargets { + s.S3Targets = v + return s +} + +type CreateClassifierInput struct { + _ struct{} `type:"structure"` + + // A CsvClassifier object specifying the classifier to create. + CsvClassifier *CreateCsvClassifierRequest `type:"structure"` + + // A GrokClassifier object specifying the classifier to create. + GrokClassifier *CreateGrokClassifierRequest `type:"structure"` + + // A JsonClassifier object specifying the classifier to create. + JsonClassifier *CreateJsonClassifierRequest `type:"structure"` + + // An XMLClassifier object specifying the classifier to create. + XMLClassifier *CreateXMLClassifierRequest `type:"structure"` +} + +// String returns the string representation +func (s CreateClassifierInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClassifierInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClassifierInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClassifierInput"} + if s.CsvClassifier != nil { + if err := s.CsvClassifier.Validate(); err != nil { + invalidParams.AddNested("CsvClassifier", err.(request.ErrInvalidParams)) + } + } + if s.GrokClassifier != nil { + if err := s.GrokClassifier.Validate(); err != nil { + invalidParams.AddNested("GrokClassifier", err.(request.ErrInvalidParams)) + } + } + if s.JsonClassifier != nil { + if err := s.JsonClassifier.Validate(); err != nil { + invalidParams.AddNested("JsonClassifier", err.(request.ErrInvalidParams)) + } + } + if s.XMLClassifier != nil { + if err := s.XMLClassifier.Validate(); err != nil { + invalidParams.AddNested("XMLClassifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCsvClassifier sets the CsvClassifier field's value. +func (s *CreateClassifierInput) SetCsvClassifier(v *CreateCsvClassifierRequest) *CreateClassifierInput { + s.CsvClassifier = v + return s +} + +// SetGrokClassifier sets the GrokClassifier field's value. +func (s *CreateClassifierInput) SetGrokClassifier(v *CreateGrokClassifierRequest) *CreateClassifierInput { + s.GrokClassifier = v + return s +} + +// SetJsonClassifier sets the JsonClassifier field's value. +func (s *CreateClassifierInput) SetJsonClassifier(v *CreateJsonClassifierRequest) *CreateClassifierInput { + s.JsonClassifier = v + return s +} + +// SetXMLClassifier sets the XMLClassifier field's value. +func (s *CreateClassifierInput) SetXMLClassifier(v *CreateXMLClassifierRequest) *CreateClassifierInput { + s.XMLClassifier = v + return s +} + +type CreateClassifierOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateClassifierOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClassifierOutput) GoString() string { + return s.String() +} + +type CreateConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog in which to create the connection. If none is + // provided, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // A ConnectionInput object defining the connection to create. + // + // ConnectionInput is a required field + ConnectionInput *ConnectionInput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConnectionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.ConnectionInput == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionInput")) + } + if s.ConnectionInput != nil { + if err := s.ConnectionInput.Validate(); err != nil { + invalidParams.AddNested("ConnectionInput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *CreateConnectionInput) SetCatalogId(v string) *CreateConnectionInput { + s.CatalogId = &v + return s +} + +// SetConnectionInput sets the ConnectionInput field's value. +func (s *CreateConnectionInput) SetConnectionInput(v *ConnectionInput) *CreateConnectionInput { + s.ConnectionInput = v + return s +} + +type CreateConnectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConnectionOutput) GoString() string { + return s.String() +} + +type CreateCrawlerInput struct { + _ struct{} `type:"structure"` + + // A list of custom classifiers that the user has registered. By default, all + // built-in classifiers are included in a crawl, but these custom classifiers + // always override the default classifiers for a given classification. + Classifiers []*string `type:"list"` + + // The crawler configuration information. This versioned JSON string allows + // users to specify aspects of a crawler's behavior. For more information, see + // Configuring a Crawler (http://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). + Configuration *string `type:"string"` + + // The name of the SecurityConfiguration structure to be used by this crawler. + CrawlerSecurityConfiguration *string `type:"string"` + + // The AWS Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/*. + DatabaseName *string `type:"string"` + + // A description of the new crawler. + Description *string `type:"string"` + + // Name of the new crawler. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The IAM role or Amazon Resource Name (ARN) of an IAM role used by the new + // crawler to access customer resources. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // A cron expression used to specify the schedule. For more information, see + // Time-Based Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // For example, to run something every day at 12:15 UTC, specify cron(15 12 + // * * ? *). + Schedule *string `type:"string"` + + // The policy for the crawler's update and deletion behavior. + SchemaChangePolicy *SchemaChangePolicy `type:"structure"` + + // The table prefix used for catalog tables that are created. + TablePrefix *string `type:"string"` + + // The tags to use with this crawler request. You can use tags to limit access + // to the crawler. For more information, see AWS Tags in AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html). + Tags map[string]*string `type:"map"` + + // A list of collection of targets to crawl. + // + // Targets is a required field + Targets *CrawlerTargets `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateCrawlerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCrawlerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCrawlerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCrawlerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Targets == nil { + invalidParams.Add(request.NewErrParamRequired("Targets")) + } + if s.Targets != nil { + if err := s.Targets.Validate(); err != nil { + invalidParams.AddNested("Targets", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClassifiers sets the Classifiers field's value. +func (s *CreateCrawlerInput) SetClassifiers(v []*string) *CreateCrawlerInput { + s.Classifiers = v + return s +} + +// SetConfiguration sets the Configuration field's value. +func (s *CreateCrawlerInput) SetConfiguration(v string) *CreateCrawlerInput { + s.Configuration = &v + return s +} + +// SetCrawlerSecurityConfiguration sets the CrawlerSecurityConfiguration field's value. +func (s *CreateCrawlerInput) SetCrawlerSecurityConfiguration(v string) *CreateCrawlerInput { + s.CrawlerSecurityConfiguration = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *CreateCrawlerInput) SetDatabaseName(v string) *CreateCrawlerInput { + s.DatabaseName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateCrawlerInput) SetDescription(v string) *CreateCrawlerInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateCrawlerInput) SetName(v string) *CreateCrawlerInput { + s.Name = &v + return s +} + +// SetRole sets the Role field's value. +func (s *CreateCrawlerInput) SetRole(v string) *CreateCrawlerInput { + s.Role = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *CreateCrawlerInput) SetSchedule(v string) *CreateCrawlerInput { + s.Schedule = &v + return s +} + +// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. +func (s *CreateCrawlerInput) SetSchemaChangePolicy(v *SchemaChangePolicy) *CreateCrawlerInput { + s.SchemaChangePolicy = v + return s +} + +// SetTablePrefix sets the TablePrefix field's value. +func (s *CreateCrawlerInput) SetTablePrefix(v string) *CreateCrawlerInput { + s.TablePrefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateCrawlerInput) SetTags(v map[string]*string) *CreateCrawlerInput { + s.Tags = v + return s +} + +// SetTargets sets the Targets field's value. +func (s *CreateCrawlerInput) SetTargets(v *CrawlerTargets) *CreateCrawlerInput { + s.Targets = v + return s +} + +type CreateCrawlerOutput struct { _ struct{} `type:"structure"` +} - // The job arguments used when this trigger fires. For this job run, they replace - // the default arguments set in the job definition itself. - // - // You can specify arguments here that your own job-execution script consumes, - // as well as arguments that AWS Glue itself consumes. - // - // For information about how to specify and consume your own Job arguments, - // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) - // topic in the developer guide. - // - // For information about the key-value pairs that AWS Glue consumes to set up - // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) - // topic in the developer guide. - Arguments map[string]*string `type:"map"` +// String returns the string representation +func (s CreateCrawlerOutput) String() string { + return awsutil.Prettify(s) +} - // The name of a job to be executed. - JobName *string `min:"1" type:"string"` +// GoString returns the string representation +func (s CreateCrawlerOutput) GoString() string { + return s.String() +} - // Specifies configuration properties of a job run notification. - NotificationProperty *NotificationProperty `type:"structure"` +// Specifies a custom CSV classifier for CreateClassifier to create. +type CreateCsvClassifierRequest struct { + _ struct{} `type:"structure"` - // The name of the SecurityConfiguration structure to be used with this action. - SecurityConfiguration *string `min:"1" type:"string"` + // Enables the processing of files that contain only one column. + AllowSingleColumn *bool `type:"boolean"` - // The JobRun timeout in minutes. This is the maximum time that a job run can - // consume resources before it is terminated and enters TIMEOUT status. The - // default is 2,880 minutes (48 hours). This overrides the timeout value set - // in the parent job. - Timeout *int64 `min:"1" type:"integer"` + // Indicates whether the CSV file contains a header. + ContainsHeader *string `type:"string" enum:"CsvHeaderOption"` + + // A custom symbol to denote what separates each column entry in the row. + Delimiter *string `min:"1" type:"string"` + + // Specifies not to trim values before identifying the type of column values. + // The default value is true. + DisableValueTrimming *bool `type:"boolean"` + + // A list of strings representing column names. + Header []*string `type:"list"` + + // The name of the classifier. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A custom symbol to denote what combines content into a single column value. + // Must be different from the column delimiter. + QuoteSymbol *string `min:"1" type:"string"` } // String returns the string representation -func (s Action) String() string { +func (s CreateCsvClassifierRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Action) GoString() string { +func (s CreateCsvClassifierRequest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Action) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Action"} - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) +func (s *CreateCsvClassifierRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCsvClassifierRequest"} + if s.Delimiter != nil && len(*s.Delimiter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Delimiter", 1)) } - if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Timeout != nil && *s.Timeout < 1 { - invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.NotificationProperty != nil { - if err := s.NotificationProperty.Validate(); err != nil { - invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) - } + if s.QuoteSymbol != nil && len(*s.QuoteSymbol) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QuoteSymbol", 1)) } if invalidParams.Len() > 0 { @@ -9893,99 +15768,247 @@ func (s *Action) Validate() error { return nil } -// SetArguments sets the Arguments field's value. -func (s *Action) SetArguments(v map[string]*string) *Action { - s.Arguments = v +// SetAllowSingleColumn sets the AllowSingleColumn field's value. +func (s *CreateCsvClassifierRequest) SetAllowSingleColumn(v bool) *CreateCsvClassifierRequest { + s.AllowSingleColumn = &v return s } -// SetJobName sets the JobName field's value. -func (s *Action) SetJobName(v string) *Action { - s.JobName = &v +// SetContainsHeader sets the ContainsHeader field's value. +func (s *CreateCsvClassifierRequest) SetContainsHeader(v string) *CreateCsvClassifierRequest { + s.ContainsHeader = &v return s } -// SetNotificationProperty sets the NotificationProperty field's value. -func (s *Action) SetNotificationProperty(v *NotificationProperty) *Action { - s.NotificationProperty = v +// SetDelimiter sets the Delimiter field's value. +func (s *CreateCsvClassifierRequest) SetDelimiter(v string) *CreateCsvClassifierRequest { + s.Delimiter = &v return s } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *Action) SetSecurityConfiguration(v string) *Action { - s.SecurityConfiguration = &v +// SetDisableValueTrimming sets the DisableValueTrimming field's value. +func (s *CreateCsvClassifierRequest) SetDisableValueTrimming(v bool) *CreateCsvClassifierRequest { + s.DisableValueTrimming = &v + return s +} + +// SetHeader sets the Header field's value. +func (s *CreateCsvClassifierRequest) SetHeader(v []*string) *CreateCsvClassifierRequest { + s.Header = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateCsvClassifierRequest) SetName(v string) *CreateCsvClassifierRequest { + s.Name = &v + return s +} + +// SetQuoteSymbol sets the QuoteSymbol field's value. +func (s *CreateCsvClassifierRequest) SetQuoteSymbol(v string) *CreateCsvClassifierRequest { + s.QuoteSymbol = &v + return s +} + +type CreateDatabaseInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog in which to create the database. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The metadata for the database. + // + // DatabaseInput is a required field + DatabaseInput *DatabaseInput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDatabaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatabaseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatabaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatabaseInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseInput == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseInput")) + } + if s.DatabaseInput != nil { + if err := s.DatabaseInput.Validate(); err != nil { + invalidParams.AddNested("DatabaseInput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *CreateDatabaseInput) SetCatalogId(v string) *CreateDatabaseInput { + s.CatalogId = &v return s } -// SetTimeout sets the Timeout field's value. -func (s *Action) SetTimeout(v int64) *Action { - s.Timeout = &v - return s -} +// SetDatabaseInput sets the DatabaseInput field's value. +func (s *CreateDatabaseInput) SetDatabaseInput(v *DatabaseInput) *CreateDatabaseInput { + s.DatabaseInput = v + return s +} + +type CreateDatabaseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateDatabaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatabaseOutput) GoString() string { + return s.String() +} + +type CreateDevEndpointInput struct { + _ struct{} `type:"structure"` + + // A map of arguments used to configure the DevEndpoint. + Arguments map[string]*string `type:"map"` + + // The name to be assigned to the new DevEndpoint. + // + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` + + // The path to one or more Java .jar files in an S3 bucket that should be loaded + // in your DevEndpoint. + ExtraJarsS3Path *string `type:"string"` + + // The paths to one or more Python libraries in an Amazon S3 bucket that should + // be loaded in your DevEndpoint. Multiple values must be complete paths separated + // by a comma. + // + // You can only use pure Python libraries with a DevEndpoint. Libraries that + // rely on C extensions, such as the pandas (http://pandas.pydata.org/) Python + // data analysis library, are not yet supported. + ExtraPythonLibsS3Path *string `type:"string"` + + // Glue version determines the versions of Apache Spark and Python that AWS + // Glue supports. The Python version indicates the version supported for running + // your ETL scripts on development endpoints. + // + // For more information about the available AWS Glue versions and corresponding + // Spark and Python versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html) + // in the developer guide. + // + // Development endpoints that are created without specifying a Glue version + // default to Glue 0.9. + // + // You can specify a version of Python support for development endpoints by + // using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint + // APIs. If no arguments are provided, the version defaults to Python 2. + GlueVersion *string `min:"1" type:"string"` + + // The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint. + NumberOfNodes *int64 `type:"integer"` + + // The number of workers of a defined workerType that are allocated to the development + // endpoint. + // + // The maximum number of workers you can define are 299 for G.1X, and 149 for + // G.2X. + NumberOfWorkers *int64 `type:"integer"` + + // The public key to be used by this DevEndpoint for authentication. This attribute + // is provided for backward compatibility because the recommended attribute + // to use is public keys. + PublicKey *string `type:"string"` + + // A list of public keys to be used by the development endpoints for authentication. + // The use of this attribute is preferred over a single public key because the + // public keys allow you to have a different private key per client. + // + // If you previously created an endpoint with a public key, you must remove + // that key to be able to set a list of public keys. Call the UpdateDevEndpoint + // API with the public key content in the deletePublicKeys attribute, and the + // list of new keys in the addPublicKeys attribute. + PublicKeys []*string `type:"list"` + + // The IAM role for the DevEndpoint. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` + + // The name of the SecurityConfiguration structure to be used with this DevEndpoint. + SecurityConfiguration *string `min:"1" type:"string"` -type BatchCreatePartitionInput struct { - _ struct{} `type:"structure"` + // Security group IDs for the security groups to be used by the new DevEndpoint. + SecurityGroupIds []*string `type:"list"` - // The ID of the catalog in which the partion is to be created. Currently, this - // should be the AWS account ID. - CatalogId *string `min:"1" type:"string"` + // The subnet ID for the new DevEndpoint to use. + SubnetId *string `type:"string"` - // The name of the metadata database in which the partition is to be created. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // The tags to use with this DevEndpoint. You may use tags to limit access to + // the DevEndpoint. For more information about tags in AWS Glue, see AWS Tags + // in AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) + // in the developer guide. + Tags map[string]*string `type:"map"` - // A list of PartitionInput structures that define the partitions to be created. + // The type of predefined worker that is allocated to the development endpoint. + // Accepts a value of Standard, G.1X, or G.2X. // - // PartitionInputList is a required field - PartitionInputList []*PartitionInput `type:"list" required:"true"` - - // The name of the metadata table in which the partition is to be created. + // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of + // memory and a 50GB disk, and 2 executors per worker. // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of + // memory, 64 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of + // memory, 128 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. + // + // Known issue: when a development endpoint is created with the G.2X WorkerType + // configuration, the Spark drivers for the development endpoint will run on + // 4 vCPU, 16 GB of memory, and a 64 GB disk. + WorkerType *string `type:"string" enum:"WorkerType"` } // String returns the string representation -func (s BatchCreatePartitionInput) String() string { +func (s CreateDevEndpointInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchCreatePartitionInput) GoString() string { +func (s CreateDevEndpointInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchCreatePartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchCreatePartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionInputList == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionInputList")) +func (s *CreateDevEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDevEndpointInput"} + if s.EndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointName")) } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) + if s.GlueVersion != nil && len(*s.GlueVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlueVersion", 1)) } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) } - if s.PartitionInputList != nil { - for i, v := range s.PartitionInputList { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionInputList", i), err.(request.ErrInvalidParams)) - } - } + if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) } if invalidParams.Len() > 0 { @@ -9994,402 +16017,350 @@ func (s *BatchCreatePartitionInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *BatchCreatePartitionInput) SetCatalogId(v string) *BatchCreatePartitionInput { - s.CatalogId = &v +// SetArguments sets the Arguments field's value. +func (s *CreateDevEndpointInput) SetArguments(v map[string]*string) *CreateDevEndpointInput { + s.Arguments = v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *BatchCreatePartitionInput) SetDatabaseName(v string) *BatchCreatePartitionInput { - s.DatabaseName = &v +// SetEndpointName sets the EndpointName field's value. +func (s *CreateDevEndpointInput) SetEndpointName(v string) *CreateDevEndpointInput { + s.EndpointName = &v return s } -// SetPartitionInputList sets the PartitionInputList field's value. -func (s *BatchCreatePartitionInput) SetPartitionInputList(v []*PartitionInput) *BatchCreatePartitionInput { - s.PartitionInputList = v +// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. +func (s *CreateDevEndpointInput) SetExtraJarsS3Path(v string) *CreateDevEndpointInput { + s.ExtraJarsS3Path = &v return s } -// SetTableName sets the TableName field's value. -func (s *BatchCreatePartitionInput) SetTableName(v string) *BatchCreatePartitionInput { - s.TableName = &v +// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. +func (s *CreateDevEndpointInput) SetExtraPythonLibsS3Path(v string) *CreateDevEndpointInput { + s.ExtraPythonLibsS3Path = &v return s } -type BatchCreatePartitionOutput struct { - _ struct{} `type:"structure"` - - // Errors encountered when trying to create the requested partitions. - Errors []*PartitionError `type:"list"` +// SetGlueVersion sets the GlueVersion field's value. +func (s *CreateDevEndpointInput) SetGlueVersion(v string) *CreateDevEndpointInput { + s.GlueVersion = &v + return s } -// String returns the string representation -func (s BatchCreatePartitionOutput) String() string { - return awsutil.Prettify(s) +// SetNumberOfNodes sets the NumberOfNodes field's value. +func (s *CreateDevEndpointInput) SetNumberOfNodes(v int64) *CreateDevEndpointInput { + s.NumberOfNodes = &v + return s } -// GoString returns the string representation -func (s BatchCreatePartitionOutput) GoString() string { - return s.String() +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *CreateDevEndpointInput) SetNumberOfWorkers(v int64) *CreateDevEndpointInput { + s.NumberOfWorkers = &v + return s } -// SetErrors sets the Errors field's value. -func (s *BatchCreatePartitionOutput) SetErrors(v []*PartitionError) *BatchCreatePartitionOutput { - s.Errors = v +// SetPublicKey sets the PublicKey field's value. +func (s *CreateDevEndpointInput) SetPublicKey(v string) *CreateDevEndpointInput { + s.PublicKey = &v return s } -type BatchDeleteConnectionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog in which the connections reside. If none is provided, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // A list of names of the connections to delete. - // - // ConnectionNameList is a required field - ConnectionNameList []*string `type:"list" required:"true"` +// SetPublicKeys sets the PublicKeys field's value. +func (s *CreateDevEndpointInput) SetPublicKeys(v []*string) *CreateDevEndpointInput { + s.PublicKeys = v + return s } -// String returns the string representation -func (s BatchDeleteConnectionInput) String() string { - return awsutil.Prettify(s) +// SetRoleArn sets the RoleArn field's value. +func (s *CreateDevEndpointInput) SetRoleArn(v string) *CreateDevEndpointInput { + s.RoleArn = &v + return s } -// GoString returns the string representation -func (s BatchDeleteConnectionInput) GoString() string { - return s.String() +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *CreateDevEndpointInput) SetSecurityConfiguration(v string) *CreateDevEndpointInput { + s.SecurityConfiguration = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchDeleteConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchDeleteConnectionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.ConnectionNameList == nil { - invalidParams.Add(request.NewErrParamRequired("ConnectionNameList")) - } +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateDevEndpointInput) SetSecurityGroupIds(v []*string) *CreateDevEndpointInput { + s.SecurityGroupIds = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSubnetId sets the SubnetId field's value. +func (s *CreateDevEndpointInput) SetSubnetId(v string) *CreateDevEndpointInput { + s.SubnetId = &v + return s } -// SetCatalogId sets the CatalogId field's value. -func (s *BatchDeleteConnectionInput) SetCatalogId(v string) *BatchDeleteConnectionInput { - s.CatalogId = &v +// SetTags sets the Tags field's value. +func (s *CreateDevEndpointInput) SetTags(v map[string]*string) *CreateDevEndpointInput { + s.Tags = v return s } -// SetConnectionNameList sets the ConnectionNameList field's value. -func (s *BatchDeleteConnectionInput) SetConnectionNameList(v []*string) *BatchDeleteConnectionInput { - s.ConnectionNameList = v +// SetWorkerType sets the WorkerType field's value. +func (s *CreateDevEndpointInput) SetWorkerType(v string) *CreateDevEndpointInput { + s.WorkerType = &v return s } -type BatchDeleteConnectionOutput struct { +type CreateDevEndpointOutput struct { _ struct{} `type:"structure"` - // A map of the names of connections that were not successfully deleted to error - // details. - Errors map[string]*ErrorDetail `type:"map"` + // The map of arguments used to configure this DevEndpoint. + // + // Valid arguments are: + // + // * "--enable-glue-datacatalog": "" + // + // * "GLUE_PYTHON_VERSION": "3" + // + // * "GLUE_PYTHON_VERSION": "2" + // + // You can specify a version of Python support for development endpoints by + // using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint + // APIs. If no arguments are provided, the version defaults to Python 2. + Arguments map[string]*string `type:"map"` - // A list of names of the connection definitions that were successfully deleted. - Succeeded []*string `type:"list"` -} + // The AWS Availability Zone where this DevEndpoint is located. + AvailabilityZone *string `type:"string"` -// String returns the string representation -func (s BatchDeleteConnectionOutput) String() string { - return awsutil.Prettify(s) -} + // The point in time at which this DevEndpoint was created. + CreatedTimestamp *time.Time `type:"timestamp"` -// GoString returns the string representation -func (s BatchDeleteConnectionOutput) GoString() string { - return s.String() -} + // The name assigned to the new DevEndpoint. + EndpointName *string `type:"string"` -// SetErrors sets the Errors field's value. -func (s *BatchDeleteConnectionOutput) SetErrors(v map[string]*ErrorDetail) *BatchDeleteConnectionOutput { - s.Errors = v - return s -} + // Path to one or more Java .jar files in an S3 bucket that will be loaded in + // your DevEndpoint. + ExtraJarsS3Path *string `type:"string"` -// SetSucceeded sets the Succeeded field's value. -func (s *BatchDeleteConnectionOutput) SetSucceeded(v []*string) *BatchDeleteConnectionOutput { - s.Succeeded = v - return s -} + // The paths to one or more Python libraries in an S3 bucket that will be loaded + // in your DevEndpoint. + ExtraPythonLibsS3Path *string `type:"string"` -type BatchDeletePartitionInput struct { - _ struct{} `type:"structure"` + // The reason for a current failure in this DevEndpoint. + FailureReason *string `type:"string"` - // The ID of the Data Catalog where the partition to be deleted resides. If - // none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` + // Glue version determines the versions of Apache Spark and Python that AWS + // Glue supports. The Python version indicates the version supported for running + // your ETL scripts on development endpoints. + GlueVersion *string `min:"1" type:"string"` - // The name of the catalog database in which the table in question resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint. + NumberOfNodes *int64 `type:"integer"` + + // The number of workers of a defined workerType that are allocated to the development + // endpoint. + NumberOfWorkers *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the role assigned to the new DevEndpoint. + RoleArn *string `type:"string"` + + // The name of the SecurityConfiguration structure being used with this DevEndpoint. + SecurityConfiguration *string `min:"1" type:"string"` + + // The security groups assigned to the new DevEndpoint. + SecurityGroupIds []*string `type:"list"` + + // The current status of the new DevEndpoint. + Status *string `type:"string"` + + // The subnet ID assigned to the new DevEndpoint. + SubnetId *string `type:"string"` + + // The ID of the virtual private cloud (VPC) used by this DevEndpoint. + VpcId *string `type:"string"` - // A list of PartitionInput structures that define the partitions to be deleted. - // - // PartitionsToDelete is a required field - PartitionsToDelete []*PartitionValueList `type:"list" required:"true"` + // The type of predefined worker that is allocated to the development endpoint. + // May be a value of Standard, G.1X, or G.2X. + WorkerType *string `type:"string" enum:"WorkerType"` - // The name of the table where the partitions to be deleted is located. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // The address of the YARN endpoint used by this DevEndpoint. + YarnEndpointAddress *string `type:"string"` + + // The Apache Zeppelin port for the remote Apache Spark interpreter. + ZeppelinRemoteSparkInterpreterPort *int64 `type:"integer"` } // String returns the string representation -func (s BatchDeletePartitionInput) String() string { +func (s CreateDevEndpointOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchDeletePartitionInput) GoString() string { +func (s CreateDevEndpointOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchDeletePartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchDeletePartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionsToDelete == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionsToDelete")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.PartitionsToDelete != nil { - for i, v := range s.PartitionsToDelete { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionsToDelete", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *BatchDeletePartitionInput) SetCatalogId(v string) *BatchDeletePartitionInput { - s.CatalogId = &v +// SetArguments sets the Arguments field's value. +func (s *CreateDevEndpointOutput) SetArguments(v map[string]*string) *CreateDevEndpointOutput { + s.Arguments = v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *BatchDeletePartitionInput) SetDatabaseName(v string) *BatchDeletePartitionInput { - s.DatabaseName = &v +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateDevEndpointOutput) SetAvailabilityZone(v string) *CreateDevEndpointOutput { + s.AvailabilityZone = &v return s } -// SetPartitionsToDelete sets the PartitionsToDelete field's value. -func (s *BatchDeletePartitionInput) SetPartitionsToDelete(v []*PartitionValueList) *BatchDeletePartitionInput { - s.PartitionsToDelete = v +// SetCreatedTimestamp sets the CreatedTimestamp field's value. +func (s *CreateDevEndpointOutput) SetCreatedTimestamp(v time.Time) *CreateDevEndpointOutput { + s.CreatedTimestamp = &v return s } -// SetTableName sets the TableName field's value. -func (s *BatchDeletePartitionInput) SetTableName(v string) *BatchDeletePartitionInput { - s.TableName = &v +// SetEndpointName sets the EndpointName field's value. +func (s *CreateDevEndpointOutput) SetEndpointName(v string) *CreateDevEndpointOutput { + s.EndpointName = &v return s } -type BatchDeletePartitionOutput struct { - _ struct{} `type:"structure"` - - // Errors encountered when trying to delete the requested partitions. - Errors []*PartitionError `type:"list"` +// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. +func (s *CreateDevEndpointOutput) SetExtraJarsS3Path(v string) *CreateDevEndpointOutput { + s.ExtraJarsS3Path = &v + return s } -// String returns the string representation -func (s BatchDeletePartitionOutput) String() string { - return awsutil.Prettify(s) +// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. +func (s *CreateDevEndpointOutput) SetExtraPythonLibsS3Path(v string) *CreateDevEndpointOutput { + s.ExtraPythonLibsS3Path = &v + return s } -// GoString returns the string representation -func (s BatchDeletePartitionOutput) GoString() string { - return s.String() +// SetFailureReason sets the FailureReason field's value. +func (s *CreateDevEndpointOutput) SetFailureReason(v string) *CreateDevEndpointOutput { + s.FailureReason = &v + return s } -// SetErrors sets the Errors field's value. -func (s *BatchDeletePartitionOutput) SetErrors(v []*PartitionError) *BatchDeletePartitionOutput { - s.Errors = v +// SetGlueVersion sets the GlueVersion field's value. +func (s *CreateDevEndpointOutput) SetGlueVersion(v string) *CreateDevEndpointOutput { + s.GlueVersion = &v return s } -type BatchDeleteTableInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the table resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the tables to delete reside. For Hive - // compatibility, this name is entirely lowercase. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A list of the table to delete. - // - // TablesToDelete is a required field - TablesToDelete []*string `type:"list" required:"true"` +// SetNumberOfNodes sets the NumberOfNodes field's value. +func (s *CreateDevEndpointOutput) SetNumberOfNodes(v int64) *CreateDevEndpointOutput { + s.NumberOfNodes = &v + return s } -// String returns the string representation -func (s BatchDeleteTableInput) String() string { - return awsutil.Prettify(s) +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *CreateDevEndpointOutput) SetNumberOfWorkers(v int64) *CreateDevEndpointOutput { + s.NumberOfWorkers = &v + return s } -// GoString returns the string representation -func (s BatchDeleteTableInput) GoString() string { - return s.String() +// SetRoleArn sets the RoleArn field's value. +func (s *CreateDevEndpointOutput) SetRoleArn(v string) *CreateDevEndpointOutput { + s.RoleArn = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchDeleteTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchDeleteTableInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TablesToDelete == nil { - invalidParams.Add(request.NewErrParamRequired("TablesToDelete")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *CreateDevEndpointOutput) SetSecurityConfiguration(v string) *CreateDevEndpointOutput { + s.SecurityConfiguration = &v + return s } -// SetCatalogId sets the CatalogId field's value. -func (s *BatchDeleteTableInput) SetCatalogId(v string) *BatchDeleteTableInput { - s.CatalogId = &v +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateDevEndpointOutput) SetSecurityGroupIds(v []*string) *CreateDevEndpointOutput { + s.SecurityGroupIds = v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *BatchDeleteTableInput) SetDatabaseName(v string) *BatchDeleteTableInput { - s.DatabaseName = &v +// SetStatus sets the Status field's value. +func (s *CreateDevEndpointOutput) SetStatus(v string) *CreateDevEndpointOutput { + s.Status = &v return s } -// SetTablesToDelete sets the TablesToDelete field's value. -func (s *BatchDeleteTableInput) SetTablesToDelete(v []*string) *BatchDeleteTableInput { - s.TablesToDelete = v +// SetSubnetId sets the SubnetId field's value. +func (s *CreateDevEndpointOutput) SetSubnetId(v string) *CreateDevEndpointOutput { + s.SubnetId = &v return s } -type BatchDeleteTableOutput struct { - _ struct{} `type:"structure"` - - // A list of errors encountered in attempting to delete the specified tables. - Errors []*TableError `type:"list"` +// SetVpcId sets the VpcId field's value. +func (s *CreateDevEndpointOutput) SetVpcId(v string) *CreateDevEndpointOutput { + s.VpcId = &v + return s } -// String returns the string representation -func (s BatchDeleteTableOutput) String() string { - return awsutil.Prettify(s) +// SetWorkerType sets the WorkerType field's value. +func (s *CreateDevEndpointOutput) SetWorkerType(v string) *CreateDevEndpointOutput { + s.WorkerType = &v + return s } -// GoString returns the string representation -func (s BatchDeleteTableOutput) GoString() string { - return s.String() +// SetYarnEndpointAddress sets the YarnEndpointAddress field's value. +func (s *CreateDevEndpointOutput) SetYarnEndpointAddress(v string) *CreateDevEndpointOutput { + s.YarnEndpointAddress = &v + return s } -// SetErrors sets the Errors field's value. -func (s *BatchDeleteTableOutput) SetErrors(v []*TableError) *BatchDeleteTableOutput { - s.Errors = v +// SetZeppelinRemoteSparkInterpreterPort sets the ZeppelinRemoteSparkInterpreterPort field's value. +func (s *CreateDevEndpointOutput) SetZeppelinRemoteSparkInterpreterPort(v int64) *CreateDevEndpointOutput { + s.ZeppelinRemoteSparkInterpreterPort = &v return s } -type BatchDeleteTableVersionInput struct { +// Specifies a grok classifier for CreateClassifier to create. +type CreateGrokClassifierRequest struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog where the tables reside. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The database in the catalog in which the table resides. For Hive compatibility, - // this name is entirely lowercase. + // An identifier of the data format that the classifier matches, such as Twitter, + // JSON, Omniture logs, Amazon CloudWatch Logs, and so on. // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // Classification is a required field + Classification *string `type:"string" required:"true"` - // The name of the table. For Hive compatibility, this name is entirely lowercase. + // Optional custom grok patterns used by this classifier. + CustomPatterns *string `type:"string"` + + // The grok pattern used by this classifier. // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // GrokPattern is a required field + GrokPattern *string `min:"1" type:"string" required:"true"` - // A list of the IDs of versions to be deleted. A VersionId is a string representation - // of an integer. Each version is incremented by 1. + // The name of the new classifier. // - // VersionIds is a required field - VersionIds []*string `type:"list" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s BatchDeleteTableVersionInput) String() string { +func (s CreateGrokClassifierRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchDeleteTableVersionInput) GoString() string { +func (s CreateGrokClassifierRequest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchDeleteTableVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchDeleteTableVersionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) +func (s *CreateGrokClassifierRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGrokClassifierRequest"} + if s.Classification == nil { + invalidParams.Add(request.NewErrParamRequired("Classification")) } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + if s.GrokPattern == nil { + invalidParams.Add(request.NewErrParamRequired("GrokPattern")) } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) + if s.GrokPattern != nil && len(*s.GrokPattern) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GrokPattern", 1)) } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.VersionIds == nil { - invalidParams.Add(request.NewErrParamRequired("VersionIds")) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -10398,152 +16369,196 @@ func (s *BatchDeleteTableVersionInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *BatchDeleteTableVersionInput) SetCatalogId(v string) *BatchDeleteTableVersionInput { - s.CatalogId = &v +// SetClassification sets the Classification field's value. +func (s *CreateGrokClassifierRequest) SetClassification(v string) *CreateGrokClassifierRequest { + s.Classification = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *BatchDeleteTableVersionInput) SetDatabaseName(v string) *BatchDeleteTableVersionInput { - s.DatabaseName = &v +// SetCustomPatterns sets the CustomPatterns field's value. +func (s *CreateGrokClassifierRequest) SetCustomPatterns(v string) *CreateGrokClassifierRequest { + s.CustomPatterns = &v return s } -// SetTableName sets the TableName field's value. -func (s *BatchDeleteTableVersionInput) SetTableName(v string) *BatchDeleteTableVersionInput { - s.TableName = &v +// SetGrokPattern sets the GrokPattern field's value. +func (s *CreateGrokClassifierRequest) SetGrokPattern(v string) *CreateGrokClassifierRequest { + s.GrokPattern = &v return s } -// SetVersionIds sets the VersionIds field's value. -func (s *BatchDeleteTableVersionInput) SetVersionIds(v []*string) *BatchDeleteTableVersionInput { - s.VersionIds = v +// SetName sets the Name field's value. +func (s *CreateGrokClassifierRequest) SetName(v string) *CreateGrokClassifierRequest { + s.Name = &v return s } -type BatchDeleteTableVersionOutput struct { +type CreateJobInput struct { _ struct{} `type:"structure"` - // A list of errors encountered while trying to delete the specified table versions. - Errors []*TableVersionError `type:"list"` -} - -// String returns the string representation -func (s BatchDeleteTableVersionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchDeleteTableVersionOutput) GoString() string { - return s.String() -} + // This parameter is deprecated. Use MaxCapacity instead. + // + // The number of AWS Glue data processing units (DPUs) to allocate to this Job. + // You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative + // measure of processing power that consists of 4 vCPUs of compute capacity + // and 16 GB of memory. For more information, see the AWS Glue pricing page + // (https://aws.amazon.com/glue/pricing/). + // + // Deprecated: This property is deprecated, use MaxCapacity instead. + AllocatedCapacity *int64 `deprecated:"true" type:"integer"` -// SetErrors sets the Errors field's value. -func (s *BatchDeleteTableVersionOutput) SetErrors(v []*TableVersionError) *BatchDeleteTableVersionOutput { - s.Errors = v - return s -} + // The JobCommand that executes this job. + // + // Command is a required field + Command *JobCommand `type:"structure" required:"true"` -type BatchGetCrawlersInput struct { - _ struct{} `type:"structure"` + // The connections used for this job. + Connections *ConnectionsList `type:"structure"` - // A list of crawler names, which may be the names returned from the ListCrawlers - // operation. + // The default arguments for this job. // - // CrawlerNames is a required field - CrawlerNames []*string `type:"list" required:"true"` -} + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) + // topic in the developer guide. + DefaultArguments map[string]*string `type:"map"` -// String returns the string representation -func (s BatchGetCrawlersInput) String() string { - return awsutil.Prettify(s) -} + // Description of the job being defined. + Description *string `type:"string"` -// GoString returns the string representation -func (s BatchGetCrawlersInput) GoString() string { - return s.String() -} + // An ExecutionProperty specifying the maximum number of concurrent runs allowed + // for this job. + ExecutionProperty *ExecutionProperty `type:"structure"` -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetCrawlersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetCrawlersInput"} - if s.CrawlerNames == nil { - invalidParams.Add(request.NewErrParamRequired("CrawlerNames")) - } + // Glue version determines the versions of Apache Spark and Python that AWS + // Glue supports. The Python version indicates the version supported for jobs + // of type Spark. + // + // For more information about the available AWS Glue versions and corresponding + // Spark and Python versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html) + // in the developer guide. + // + // Jobs that are created without specifying a Glue version default to Glue 0.9. + GlueVersion *string `min:"1" type:"string"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // This field is reserved for future use. + LogUri *string `type:"string"` -// SetCrawlerNames sets the CrawlerNames field's value. -func (s *BatchGetCrawlersInput) SetCrawlerNames(v []*string) *BatchGetCrawlersInput { - s.CrawlerNames = v - return s -} + // The number of AWS Glue data processing units (DPUs) that can be allocated + // when this job runs. A DPU is a relative measure of processing power that + // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, + // see the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). + // + // Do not set Max Capacity if using WorkerType and NumberOfWorkers. + // + // The value that can be allocated for MaxCapacity depends on whether you are + // running a Python shell job or an Apache Spark ETL job: + // + // * When you specify a Python shell job (JobCommand.Name="pythonshell"), + // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. + // + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), + // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job + // type cannot have a fractional DPU allocation. + MaxCapacity *float64 `type:"double"` -type BatchGetCrawlersOutput struct { - _ struct{} `type:"structure"` + // The maximum number of times to retry this job if it fails. + MaxRetries *int64 `type:"integer"` - // A list of crawler definitions. - Crawlers []*Crawler `type:"list"` + // The name you assign to this job definition. It must be unique in your account. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // A list of names of crawlers not found. - CrawlersNotFound []*string `type:"list"` -} + // Specifies configuration properties of a job notification. + NotificationProperty *NotificationProperty `type:"structure"` -// String returns the string representation -func (s BatchGetCrawlersOutput) String() string { - return awsutil.Prettify(s) -} + // The number of workers of a defined workerType that are allocated when a job + // runs. + // + // The maximum number of workers you can define are 299 for G.1X, and 149 for + // G.2X. + NumberOfWorkers *int64 `type:"integer"` -// GoString returns the string representation -func (s BatchGetCrawlersOutput) GoString() string { - return s.String() -} + // The name or Amazon Resource Name (ARN) of the IAM role associated with this + // job. + // + // Role is a required field + Role *string `type:"string" required:"true"` -// SetCrawlers sets the Crawlers field's value. -func (s *BatchGetCrawlersOutput) SetCrawlers(v []*Crawler) *BatchGetCrawlersOutput { - s.Crawlers = v - return s -} + // The name of the SecurityConfiguration structure to be used with this job. + SecurityConfiguration *string `min:"1" type:"string"` -// SetCrawlersNotFound sets the CrawlersNotFound field's value. -func (s *BatchGetCrawlersOutput) SetCrawlersNotFound(v []*string) *BatchGetCrawlersOutput { - s.CrawlersNotFound = v - return s -} + // The tags to use with this job. You may use tags to limit access to the job. + // For more information about tags in AWS Glue, see AWS Tags in AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) + // in the developer guide. + Tags map[string]*string `type:"map"` -type BatchGetDevEndpointsInput struct { - _ struct{} `type:"structure"` + // The job timeout in minutes. This is the maximum time that a job run can consume + // resources before it is terminated and enters TIMEOUT status. The default + // is 2,880 minutes (48 hours). + Timeout *int64 `min:"1" type:"integer"` - // The list of DevEndpoint names, which may be the names returned from the ListDevEndpoint - // operation. + // The type of predefined worker that is allocated when a job runs. Accepts + // a value of Standard, G.1X, or G.2X. // - // DevEndpointNames is a required field - DevEndpointNames []*string `min:"1" type:"list" required:"true"` + // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of + // memory and a 50GB disk, and 2 executors per worker. + // + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of + // memory, 64 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of + // memory, 128 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. + WorkerType *string `type:"string" enum:"WorkerType"` } // String returns the string representation -func (s BatchGetDevEndpointsInput) String() string { +func (s CreateJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetDevEndpointsInput) GoString() string { +func (s CreateJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetDevEndpointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetDevEndpointsInput"} - if s.DevEndpointNames == nil { - invalidParams.Add(request.NewErrParamRequired("DevEndpointNames")) +func (s *CreateJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"} + if s.Command == nil { + invalidParams.Add(request.NewErrParamRequired("Command")) } - if s.DevEndpointNames != nil && len(s.DevEndpointNames) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DevEndpointNames", 1)) + if s.GlueVersion != nil && len(*s.GlueVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlueVersion", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) + } + if s.Timeout != nil && *s.Timeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + } + if s.NotificationProperty != nil { + if err := s.NotificationProperty.Validate(); err != nil { + invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -10552,177 +16567,175 @@ func (s *BatchGetDevEndpointsInput) Validate() error { return nil } -// SetDevEndpointNames sets the DevEndpointNames field's value. -func (s *BatchGetDevEndpointsInput) SetDevEndpointNames(v []*string) *BatchGetDevEndpointsInput { - s.DevEndpointNames = v +// SetAllocatedCapacity sets the AllocatedCapacity field's value. +func (s *CreateJobInput) SetAllocatedCapacity(v int64) *CreateJobInput { + s.AllocatedCapacity = &v + return s +} + +// SetCommand sets the Command field's value. +func (s *CreateJobInput) SetCommand(v *JobCommand) *CreateJobInput { + s.Command = v + return s +} + +// SetConnections sets the Connections field's value. +func (s *CreateJobInput) SetConnections(v *ConnectionsList) *CreateJobInput { + s.Connections = v + return s +} + +// SetDefaultArguments sets the DefaultArguments field's value. +func (s *CreateJobInput) SetDefaultArguments(v map[string]*string) *CreateJobInput { + s.DefaultArguments = v return s } -type BatchGetDevEndpointsOutput struct { - _ struct{} `type:"structure"` +// SetDescription sets the Description field's value. +func (s *CreateJobInput) SetDescription(v string) *CreateJobInput { + s.Description = &v + return s +} - // A list of DevEndpoint definitions. - DevEndpoints []*DevEndpoint `type:"list"` +// SetExecutionProperty sets the ExecutionProperty field's value. +func (s *CreateJobInput) SetExecutionProperty(v *ExecutionProperty) *CreateJobInput { + s.ExecutionProperty = v + return s +} - // A list of DevEndpoints not found. - DevEndpointsNotFound []*string `min:"1" type:"list"` +// SetGlueVersion sets the GlueVersion field's value. +func (s *CreateJobInput) SetGlueVersion(v string) *CreateJobInput { + s.GlueVersion = &v + return s } -// String returns the string representation -func (s BatchGetDevEndpointsOutput) String() string { - return awsutil.Prettify(s) +// SetLogUri sets the LogUri field's value. +func (s *CreateJobInput) SetLogUri(v string) *CreateJobInput { + s.LogUri = &v + return s } -// GoString returns the string representation -func (s BatchGetDevEndpointsOutput) GoString() string { - return s.String() +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *CreateJobInput) SetMaxCapacity(v float64) *CreateJobInput { + s.MaxCapacity = &v + return s } -// SetDevEndpoints sets the DevEndpoints field's value. -func (s *BatchGetDevEndpointsOutput) SetDevEndpoints(v []*DevEndpoint) *BatchGetDevEndpointsOutput { - s.DevEndpoints = v +// SetMaxRetries sets the MaxRetries field's value. +func (s *CreateJobInput) SetMaxRetries(v int64) *CreateJobInput { + s.MaxRetries = &v return s } -// SetDevEndpointsNotFound sets the DevEndpointsNotFound field's value. -func (s *BatchGetDevEndpointsOutput) SetDevEndpointsNotFound(v []*string) *BatchGetDevEndpointsOutput { - s.DevEndpointsNotFound = v +// SetName sets the Name field's value. +func (s *CreateJobInput) SetName(v string) *CreateJobInput { + s.Name = &v return s } -type BatchGetJobsInput struct { - _ struct{} `type:"structure"` +// SetNotificationProperty sets the NotificationProperty field's value. +func (s *CreateJobInput) SetNotificationProperty(v *NotificationProperty) *CreateJobInput { + s.NotificationProperty = v + return s +} - // A list of job names, which may be the names returned from the ListJobs operation. - // - // JobNames is a required field - JobNames []*string `type:"list" required:"true"` +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *CreateJobInput) SetNumberOfWorkers(v int64) *CreateJobInput { + s.NumberOfWorkers = &v + return s } -// String returns the string representation -func (s BatchGetJobsInput) String() string { - return awsutil.Prettify(s) +// SetRole sets the Role field's value. +func (s *CreateJobInput) SetRole(v string) *CreateJobInput { + s.Role = &v + return s } -// GoString returns the string representation -func (s BatchGetJobsInput) GoString() string { - return s.String() +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *CreateJobInput) SetSecurityConfiguration(v string) *CreateJobInput { + s.SecurityConfiguration = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetJobsInput"} - if s.JobNames == nil { - invalidParams.Add(request.NewErrParamRequired("JobNames")) - } +// SetTags sets the Tags field's value. +func (s *CreateJobInput) SetTags(v map[string]*string) *CreateJobInput { + s.Tags = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetTimeout sets the Timeout field's value. +func (s *CreateJobInput) SetTimeout(v int64) *CreateJobInput { + s.Timeout = &v + return s } -// SetJobNames sets the JobNames field's value. -func (s *BatchGetJobsInput) SetJobNames(v []*string) *BatchGetJobsInput { - s.JobNames = v +// SetWorkerType sets the WorkerType field's value. +func (s *CreateJobInput) SetWorkerType(v string) *CreateJobInput { + s.WorkerType = &v return s } -type BatchGetJobsOutput struct { +type CreateJobOutput struct { _ struct{} `type:"structure"` - // A list of job definitions. - Jobs []*Job `type:"list"` - - // A list of names of jobs not found. - JobsNotFound []*string `type:"list"` + // The unique name that was provided for this job definition. + Name *string `min:"1" type:"string"` } // String returns the string representation -func (s BatchGetJobsOutput) String() string { +func (s CreateJobOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetJobsOutput) GoString() string { +func (s CreateJobOutput) GoString() string { return s.String() } -// SetJobs sets the Jobs field's value. -func (s *BatchGetJobsOutput) SetJobs(v []*Job) *BatchGetJobsOutput { - s.Jobs = v - return s -} - -// SetJobsNotFound sets the JobsNotFound field's value. -func (s *BatchGetJobsOutput) SetJobsNotFound(v []*string) *BatchGetJobsOutput { - s.JobsNotFound = v +// SetName sets the Name field's value. +func (s *CreateJobOutput) SetName(v string) *CreateJobOutput { + s.Name = &v return s } -type BatchGetPartitionInput struct { +// Specifies a JSON classifier for CreateClassifier to create. +type CreateJsonClassifierRequest struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog where the partitions in question reside. If none - // is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database where the partitions reside. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A list of partition values identifying the partitions to retrieve. + // A JsonPath string defining the JSON data for the classifier to classify. + // AWS Glue supports a subset of JsonPath, as described in Writing JsonPath + // Custom Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json). // - // PartitionsToGet is a required field - PartitionsToGet []*PartitionValueList `type:"list" required:"true"` + // JsonPath is a required field + JsonPath *string `type:"string" required:"true"` - // The name of the partitions' table. + // The name of the classifier. // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s BatchGetPartitionInput) String() string { +func (s CreateJsonClassifierRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetPartitionInput) GoString() string { +func (s CreateJsonClassifierRequest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetPartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetPartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionsToGet == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionsToGet")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) +func (s *CreateJsonClassifierRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateJsonClassifierRequest"} + if s.JsonPath == nil { + invalidParams.Add(request.NewErrParamRequired("JsonPath")) } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.PartitionsToGet != nil { - for i, v := range s.PartitionsToGet { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionsToGet", i), err.(request.ErrInvalidParams)) - } - } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -10731,88 +16744,131 @@ func (s *BatchGetPartitionInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *BatchGetPartitionInput) SetCatalogId(v string) *BatchGetPartitionInput { - s.CatalogId = &v +// SetJsonPath sets the JsonPath field's value. +func (s *CreateJsonClassifierRequest) SetJsonPath(v string) *CreateJsonClassifierRequest { + s.JsonPath = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *BatchGetPartitionInput) SetDatabaseName(v string) *BatchGetPartitionInput { - s.DatabaseName = &v +// SetName sets the Name field's value. +func (s *CreateJsonClassifierRequest) SetName(v string) *CreateJsonClassifierRequest { + s.Name = &v return s } -// SetPartitionsToGet sets the PartitionsToGet field's value. -func (s *BatchGetPartitionInput) SetPartitionsToGet(v []*PartitionValueList) *BatchGetPartitionInput { - s.PartitionsToGet = v - return s -} +type CreateMLTransformInput struct { + _ struct{} `type:"structure"` -// SetTableName sets the TableName field's value. -func (s *BatchGetPartitionInput) SetTableName(v string) *BatchGetPartitionInput { - s.TableName = &v - return s -} + // A description of the machine learning transform that is being defined. The + // default is an empty string. + Description *string `type:"string"` -type BatchGetPartitionOutput struct { - _ struct{} `type:"structure"` + // A list of AWS Glue table definitions used by the transform. + // + // InputRecordTables is a required field + InputRecordTables []*Table `type:"list" required:"true"` - // A list of the requested partitions. - Partitions []*Partition `type:"list"` + // The number of AWS Glue data processing units (DPUs) that are allocated to + // task runs for this transform. You can allocate from 2 to 100 DPUs; the default + // is 10. A DPU is a relative measure of processing power that consists of 4 + // vCPUs of compute capacity and 16 GB of memory. For more information, see + // the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). + // + // When the WorkerType field is set to a value other than Standard, the MaxCapacity + // field is set automatically and becomes read-only. + MaxCapacity *float64 `type:"double"` - // A list of the partition values in the request for which partions were not - // returned. - UnprocessedKeys []*PartitionValueList `type:"list"` -} + // The maximum number of times to retry a task for this transform after a task + // run fails. + MaxRetries *int64 `type:"integer"` -// String returns the string representation -func (s BatchGetPartitionOutput) String() string { - return awsutil.Prettify(s) -} + // The unique name that you give the transform when you create it. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` -// GoString returns the string representation -func (s BatchGetPartitionOutput) GoString() string { - return s.String() -} + // The number of workers of a defined workerType that are allocated when this + // task runs. + NumberOfWorkers *int64 `type:"integer"` -// SetPartitions sets the Partitions field's value. -func (s *BatchGetPartitionOutput) SetPartitions(v []*Partition) *BatchGetPartitionOutput { - s.Partitions = v - return s -} + // The algorithmic parameters that are specific to the transform type used. + // Conditionally dependent on the transform type. + // + // Parameters is a required field + Parameters *TransformParameters `type:"structure" required:"true"` -// SetUnprocessedKeys sets the UnprocessedKeys field's value. -func (s *BatchGetPartitionOutput) SetUnprocessedKeys(v []*PartitionValueList) *BatchGetPartitionOutput { - s.UnprocessedKeys = v - return s -} + // The name or Amazon Resource Name (ARN) of the IAM role with the required + // permissions. Ensure that this role has permission to your Amazon Simple Storage + // Service (Amazon S3) sources, targets, temporary directory, scripts, and any + // libraries that are used by the task run for this transform. + // + // Role is a required field + Role *string `type:"string" required:"true"` -type BatchGetTriggersInput struct { - _ struct{} `type:"structure"` + // The timeout of the task run for this transform in minutes. This is the maximum + // time that a task run for this transform can consume resources before it is + // terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). + Timeout *int64 `min:"1" type:"integer"` - // A list of trigger names, which may be the names returned from the ListTriggers - // operation. + // The type of predefined worker that is allocated when this task runs. Accepts + // a value of Standard, G.1X, or G.2X. + // + // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of + // memory and a 50GB disk, and 2 executors per worker. // - // TriggerNames is a required field - TriggerNames []*string `type:"list" required:"true"` + // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory + // and a 64GB disk, and 1 executor per worker. + // + // * For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory + // and a 128GB disk, and 1 executor per worker. + WorkerType *string `type:"string" enum:"WorkerType"` } // String returns the string representation -func (s BatchGetTriggersInput) String() string { +func (s CreateMLTransformInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetTriggersInput) GoString() string { +func (s CreateMLTransformInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetTriggersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetTriggersInput"} - if s.TriggerNames == nil { - invalidParams.Add(request.NewErrParamRequired("TriggerNames")) +func (s *CreateMLTransformInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMLTransformInput"} + if s.InputRecordTables == nil { + invalidParams.Add(request.NewErrParamRequired("InputRecordTables")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Parameters == nil { + invalidParams.Add(request.NewErrParamRequired("Parameters")) + } + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Timeout != nil && *s.Timeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + } + if s.InputRecordTables != nil { + for i, v := range s.InputRecordTables { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputRecordTables", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Parameters != nil { + if err := s.Parameters.Validate(); err != nil { + invalidParams.AddNested("Parameters", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -10821,124 +16877,146 @@ func (s *BatchGetTriggersInput) Validate() error { return nil } -// SetTriggerNames sets the TriggerNames field's value. -func (s *BatchGetTriggersInput) SetTriggerNames(v []*string) *BatchGetTriggersInput { - s.TriggerNames = v +// SetDescription sets the Description field's value. +func (s *CreateMLTransformInput) SetDescription(v string) *CreateMLTransformInput { + s.Description = &v return s } -type BatchGetTriggersOutput struct { - _ struct{} `type:"structure"` +// SetInputRecordTables sets the InputRecordTables field's value. +func (s *CreateMLTransformInput) SetInputRecordTables(v []*Table) *CreateMLTransformInput { + s.InputRecordTables = v + return s +} - // A list of trigger definitions. - Triggers []*Trigger `type:"list"` +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *CreateMLTransformInput) SetMaxCapacity(v float64) *CreateMLTransformInput { + s.MaxCapacity = &v + return s +} - // A list of names of triggers not found. - TriggersNotFound []*string `type:"list"` +// SetMaxRetries sets the MaxRetries field's value. +func (s *CreateMLTransformInput) SetMaxRetries(v int64) *CreateMLTransformInput { + s.MaxRetries = &v + return s } -// String returns the string representation -func (s BatchGetTriggersOutput) String() string { - return awsutil.Prettify(s) +// SetName sets the Name field's value. +func (s *CreateMLTransformInput) SetName(v string) *CreateMLTransformInput { + s.Name = &v + return s } -// GoString returns the string representation -func (s BatchGetTriggersOutput) GoString() string { - return s.String() +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *CreateMLTransformInput) SetNumberOfWorkers(v int64) *CreateMLTransformInput { + s.NumberOfWorkers = &v + return s } -// SetTriggers sets the Triggers field's value. -func (s *BatchGetTriggersOutput) SetTriggers(v []*Trigger) *BatchGetTriggersOutput { - s.Triggers = v +// SetParameters sets the Parameters field's value. +func (s *CreateMLTransformInput) SetParameters(v *TransformParameters) *CreateMLTransformInput { + s.Parameters = v return s } -// SetTriggersNotFound sets the TriggersNotFound field's value. -func (s *BatchGetTriggersOutput) SetTriggersNotFound(v []*string) *BatchGetTriggersOutput { - s.TriggersNotFound = v +// SetRole sets the Role field's value. +func (s *CreateMLTransformInput) SetRole(v string) *CreateMLTransformInput { + s.Role = &v return s } -// Records an error that occurred when attempting to stop a specified job run. -type BatchStopJobRunError struct { - _ struct{} `type:"structure"` +// SetTimeout sets the Timeout field's value. +func (s *CreateMLTransformInput) SetTimeout(v int64) *CreateMLTransformInput { + s.Timeout = &v + return s +} - // Specifies details about the error that was encountered. - ErrorDetail *ErrorDetail `type:"structure"` +// SetWorkerType sets the WorkerType field's value. +func (s *CreateMLTransformInput) SetWorkerType(v string) *CreateMLTransformInput { + s.WorkerType = &v + return s +} - // The name of the job definition used in the job run in question. - JobName *string `min:"1" type:"string"` +type CreateMLTransformOutput struct { + _ struct{} `type:"structure"` - // The JobRunId of the job run in question. - JobRunId *string `min:"1" type:"string"` + // A unique identifier that is generated for the transform. + TransformId *string `min:"1" type:"string"` } // String returns the string representation -func (s BatchStopJobRunError) String() string { +func (s CreateMLTransformOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchStopJobRunError) GoString() string { +func (s CreateMLTransformOutput) GoString() string { return s.String() } -// SetErrorDetail sets the ErrorDetail field's value. -func (s *BatchStopJobRunError) SetErrorDetail(v *ErrorDetail) *BatchStopJobRunError { - s.ErrorDetail = v +// SetTransformId sets the TransformId field's value. +func (s *CreateMLTransformOutput) SetTransformId(v string) *CreateMLTransformOutput { + s.TransformId = &v return s } -// SetJobName sets the JobName field's value. -func (s *BatchStopJobRunError) SetJobName(v string) *BatchStopJobRunError { - s.JobName = &v - return s -} +type CreatePartitionInput struct { + _ struct{} `type:"structure"` -// SetJobRunId sets the JobRunId field's value. -func (s *BatchStopJobRunError) SetJobRunId(v string) *BatchStopJobRunError { - s.JobRunId = &v - return s -} + // The AWS account ID of the catalog in which the partition is to be created. + CatalogId *string `min:"1" type:"string"` -type BatchStopJobRunInput struct { - _ struct{} `type:"structure"` + // The name of the metadata database in which the partition is to be created. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` - // The name of the job definition for which to stop job runs. + // A PartitionInput structure defining the partition to be created. // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` + // PartitionInput is a required field + PartitionInput *PartitionInput `type:"structure" required:"true"` - // A list of the JobRunIds that should be stopped for that job definition. + // The name of the metadata table in which the partition is to be created. // - // JobRunIds is a required field - JobRunIds []*string `min:"1" type:"list" required:"true"` + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s BatchStopJobRunInput) String() string { +func (s CreatePartitionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchStopJobRunInput) GoString() string { +func (s CreatePartitionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchStopJobRunInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchStopJobRunInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) +func (s *CreatePartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } - if s.JobRunIds == nil { - invalidParams.Add(request.NewErrParamRequired("JobRunIds")) + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } - if s.JobRunIds != nil && len(s.JobRunIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobRunIds", 1)) + if s.PartitionInput == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionInput")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.PartitionInput != nil { + if err := s.PartitionInput.Validate(); err != nil { + invalidParams.AddNested("PartitionInput", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -10947,123 +17025,89 @@ func (s *BatchStopJobRunInput) Validate() error { return nil } -// SetJobName sets the JobName field's value. -func (s *BatchStopJobRunInput) SetJobName(v string) *BatchStopJobRunInput { - s.JobName = &v +// SetCatalogId sets the CatalogId field's value. +func (s *CreatePartitionInput) SetCatalogId(v string) *CreatePartitionInput { + s.CatalogId = &v return s } -// SetJobRunIds sets the JobRunIds field's value. -func (s *BatchStopJobRunInput) SetJobRunIds(v []*string) *BatchStopJobRunInput { - s.JobRunIds = v +// SetDatabaseName sets the DatabaseName field's value. +func (s *CreatePartitionInput) SetDatabaseName(v string) *CreatePartitionInput { + s.DatabaseName = &v return s } -type BatchStopJobRunOutput struct { - _ struct{} `type:"structure"` - - // A list of the errors that were encountered in tryng to stop JobRuns, including - // the JobRunId for which each error was encountered and details about the error. - Errors []*BatchStopJobRunError `type:"list"` - - // A list of the JobRuns that were successfully submitted for stopping. - SuccessfulSubmissions []*BatchStopJobRunSuccessfulSubmission `type:"list"` -} - -// String returns the string representation -func (s BatchStopJobRunOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchStopJobRunOutput) GoString() string { - return s.String() -} - -// SetErrors sets the Errors field's value. -func (s *BatchStopJobRunOutput) SetErrors(v []*BatchStopJobRunError) *BatchStopJobRunOutput { - s.Errors = v +// SetPartitionInput sets the PartitionInput field's value. +func (s *CreatePartitionInput) SetPartitionInput(v *PartitionInput) *CreatePartitionInput { + s.PartitionInput = v return s } -// SetSuccessfulSubmissions sets the SuccessfulSubmissions field's value. -func (s *BatchStopJobRunOutput) SetSuccessfulSubmissions(v []*BatchStopJobRunSuccessfulSubmission) *BatchStopJobRunOutput { - s.SuccessfulSubmissions = v +// SetTableName sets the TableName field's value. +func (s *CreatePartitionInput) SetTableName(v string) *CreatePartitionInput { + s.TableName = &v return s } -// Records a successful request to stop a specified JobRun. -type BatchStopJobRunSuccessfulSubmission struct { +type CreatePartitionOutput struct { _ struct{} `type:"structure"` - - // The name of the job definition used in the job run that was stopped. - JobName *string `min:"1" type:"string"` - - // The JobRunId of the job run that was stopped. - JobRunId *string `min:"1" type:"string"` } // String returns the string representation -func (s BatchStopJobRunSuccessfulSubmission) String() string { +func (s CreatePartitionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchStopJobRunSuccessfulSubmission) GoString() string { +func (s CreatePartitionOutput) GoString() string { return s.String() } -// SetJobName sets the JobName field's value. -func (s *BatchStopJobRunSuccessfulSubmission) SetJobName(v string) *BatchStopJobRunSuccessfulSubmission { - s.JobName = &v - return s -} - -// SetJobRunId sets the JobRunId field's value. -func (s *BatchStopJobRunSuccessfulSubmission) SetJobRunId(v string) *BatchStopJobRunSuccessfulSubmission { - s.JobRunId = &v - return s -} - -// Specifies a table definition in the Data Catalog. -type CatalogEntry struct { +type CreateScriptInput struct { _ struct{} `type:"structure"` - // The database in which the table metadata resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // A list of the edges in the DAG. + DagEdges []*CodeGenEdge `type:"list"` - // The name of the table in question. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // A list of the nodes in the DAG. + DagNodes []*CodeGenNode `type:"list"` + + // The programming language of the resulting code from the DAG. + Language *string `type:"string" enum:"Language"` } // String returns the string representation -func (s CatalogEntry) String() string { +func (s CreateScriptInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CatalogEntry) GoString() string { +func (s CreateScriptInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CatalogEntry) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CatalogEntry"} - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) +func (s *CreateScriptInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateScriptInput"} + if s.DagEdges != nil { + for i, v := range s.DagEdges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DagEdges", i), err.(request.ErrInvalidParams)) + } + } } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + if s.DagNodes != nil { + for i, v := range s.DagNodes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DagNodes", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -11072,194 +17116,191 @@ func (s *CatalogEntry) Validate() error { return nil } -// SetDatabaseName sets the DatabaseName field's value. -func (s *CatalogEntry) SetDatabaseName(v string) *CatalogEntry { - s.DatabaseName = &v +// SetDagEdges sets the DagEdges field's value. +func (s *CreateScriptInput) SetDagEdges(v []*CodeGenEdge) *CreateScriptInput { + s.DagEdges = v return s } -// SetTableName sets the TableName field's value. -func (s *CatalogEntry) SetTableName(v string) *CatalogEntry { - s.TableName = &v +// SetDagNodes sets the DagNodes field's value. +func (s *CreateScriptInput) SetDagNodes(v []*CodeGenNode) *CreateScriptInput { + s.DagNodes = v return s } -// A structure containing migration status information. -type CatalogImportStatus struct { - _ struct{} `type:"structure"` +// SetLanguage sets the Language field's value. +func (s *CreateScriptInput) SetLanguage(v string) *CreateScriptInput { + s.Language = &v + return s +} - // True if the migration has completed, or False otherwise. - ImportCompleted *bool `type:"boolean"` +type CreateScriptOutput struct { + _ struct{} `type:"structure"` - // The time that the migration was started. - ImportTime *time.Time `type:"timestamp"` + // The Python script generated from the DAG. + PythonScript *string `type:"string"` - // The name of the person who initiated the migration. - ImportedBy *string `min:"1" type:"string"` + // The Scala code generated from the DAG. + ScalaCode *string `type:"string"` } // String returns the string representation -func (s CatalogImportStatus) String() string { +func (s CreateScriptOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CatalogImportStatus) GoString() string { +func (s CreateScriptOutput) GoString() string { return s.String() } -// SetImportCompleted sets the ImportCompleted field's value. -func (s *CatalogImportStatus) SetImportCompleted(v bool) *CatalogImportStatus { - s.ImportCompleted = &v - return s -} - -// SetImportTime sets the ImportTime field's value. -func (s *CatalogImportStatus) SetImportTime(v time.Time) *CatalogImportStatus { - s.ImportTime = &v +// SetPythonScript sets the PythonScript field's value. +func (s *CreateScriptOutput) SetPythonScript(v string) *CreateScriptOutput { + s.PythonScript = &v return s } -// SetImportedBy sets the ImportedBy field's value. -func (s *CatalogImportStatus) SetImportedBy(v string) *CatalogImportStatus { - s.ImportedBy = &v +// SetScalaCode sets the ScalaCode field's value. +func (s *CreateScriptOutput) SetScalaCode(v string) *CreateScriptOutput { + s.ScalaCode = &v return s } -// Classifiers are triggered during a crawl task. A classifier checks whether -// a given file is in a format it can handle, and if it is, the classifier creates -// a schema in the form of a StructType object that matches that data format. -// -// You can use the standard classifiers that AWS Glue supplies, or you can write -// your own classifiers to best categorize your data sources and specify the -// appropriate schemas to use for them. A classifier can be a grok classifier, -// an XML classifier, a JSON classifier, or a custom CSV classifier as specified -// in one of the fields in the Classifier object. -type Classifier struct { +type CreateSecurityConfigurationInput struct { _ struct{} `type:"structure"` - // A CSVClassifier object. - CsvClassifier *CsvClassifier `type:"structure"` - - // A GrokClassifier object. - GrokClassifier *GrokClassifier `type:"structure"` - - // A JsonClassifier object. - JsonClassifier *JsonClassifier `type:"structure"` + // The encryption configuration for the new security configuration. + // + // EncryptionConfiguration is a required field + EncryptionConfiguration *EncryptionConfiguration `type:"structure" required:"true"` - // An XMLClassifier object. - XMLClassifier *XMLClassifier `type:"structure"` + // The name for the new security configuration. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s Classifier) String() string { +func (s CreateSecurityConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Classifier) GoString() string { +func (s CreateSecurityConfigurationInput) GoString() string { return s.String() } -// SetCsvClassifier sets the CsvClassifier field's value. -func (s *Classifier) SetCsvClassifier(v *CsvClassifier) *Classifier { - s.CsvClassifier = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSecurityConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSecurityConfigurationInput"} + if s.EncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionConfiguration")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } -// SetGrokClassifier sets the GrokClassifier field's value. -func (s *Classifier) SetGrokClassifier(v *GrokClassifier) *Classifier { - s.GrokClassifier = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetJsonClassifier sets the JsonClassifier field's value. -func (s *Classifier) SetJsonClassifier(v *JsonClassifier) *Classifier { - s.JsonClassifier = v +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *CreateSecurityConfigurationInput) SetEncryptionConfiguration(v *EncryptionConfiguration) *CreateSecurityConfigurationInput { + s.EncryptionConfiguration = v return s } -// SetXMLClassifier sets the XMLClassifier field's value. -func (s *Classifier) SetXMLClassifier(v *XMLClassifier) *Classifier { - s.XMLClassifier = v +// SetName sets the Name field's value. +func (s *CreateSecurityConfigurationInput) SetName(v string) *CreateSecurityConfigurationInput { + s.Name = &v return s } -// Specifies how CloudWatch data should be encrypted. -type CloudWatchEncryption struct { +type CreateSecurityConfigurationOutput struct { _ struct{} `type:"structure"` - // The encryption mode to use for CloudWatch data. - CloudWatchEncryptionMode *string `type:"string" enum:"CloudWatchEncryptionMode"` + // The time at which the new security configuration was created. + CreatedTimestamp *time.Time `type:"timestamp"` - // The AWS ARN of the KMS key to be used to encrypt the data. - KmsKeyArn *string `type:"string"` + // The name assigned to the new security configuration. + Name *string `min:"1" type:"string"` } // String returns the string representation -func (s CloudWatchEncryption) String() string { +func (s CreateSecurityConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CloudWatchEncryption) GoString() string { +func (s CreateSecurityConfigurationOutput) GoString() string { return s.String() } -// SetCloudWatchEncryptionMode sets the CloudWatchEncryptionMode field's value. -func (s *CloudWatchEncryption) SetCloudWatchEncryptionMode(v string) *CloudWatchEncryption { - s.CloudWatchEncryptionMode = &v +// SetCreatedTimestamp sets the CreatedTimestamp field's value. +func (s *CreateSecurityConfigurationOutput) SetCreatedTimestamp(v time.Time) *CreateSecurityConfigurationOutput { + s.CreatedTimestamp = &v return s } -// SetKmsKeyArn sets the KmsKeyArn field's value. -func (s *CloudWatchEncryption) SetKmsKeyArn(v string) *CloudWatchEncryption { - s.KmsKeyArn = &v +// SetName sets the Name field's value. +func (s *CreateSecurityConfigurationOutput) SetName(v string) *CreateSecurityConfigurationOutput { + s.Name = &v return s } -// Represents a directional edge in a directed acyclic graph (DAG). -type CodeGenEdge struct { +type CreateTableInput struct { _ struct{} `type:"structure"` - // The ID of the node at which the edge starts. - // - // Source is a required field - Source *string `min:"1" type:"string" required:"true"` + // The ID of the Data Catalog in which to create the Table. If none is supplied, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` - // The ID of the node at which the edge ends. + // The catalog database in which to create the new table. For Hive compatibility, + // this name is entirely lowercase. // - // Target is a required field - Target *string `min:"1" type:"string" required:"true"` + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` - // The target of the edge. - TargetParameter *string `type:"string"` + // The TableInput object that defines the metadata table to create in the catalog. + // + // TableInput is a required field + TableInput *TableInput `type:"structure" required:"true"` } // String returns the string representation -func (s CodeGenEdge) String() string { +func (s CreateTableInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CodeGenEdge) GoString() string { +func (s CreateTableInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CodeGenEdge) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CodeGenEdge"} - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) +func (s *CreateTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.Source != nil && len(*s.Source) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Source", 1)) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } - if s.Target == nil { - invalidParams.Add(request.NewErrParamRequired("Target")) + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } - if s.Target != nil && len(*s.Target) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Target", 1)) + if s.TableInput == nil { + invalidParams.Add(request.NewErrParamRequired("TableInput")) + } + if s.TableInput != nil { + if err := s.TableInput.Validate(); err != nil { + invalidParams.AddNested("TableInput", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -11268,82 +17309,129 @@ func (s *CodeGenEdge) Validate() error { return nil } -// SetSource sets the Source field's value. -func (s *CodeGenEdge) SetSource(v string) *CodeGenEdge { - s.Source = &v +// SetCatalogId sets the CatalogId field's value. +func (s *CreateTableInput) SetCatalogId(v string) *CreateTableInput { + s.CatalogId = &v return s } -// SetTarget sets the Target field's value. -func (s *CodeGenEdge) SetTarget(v string) *CodeGenEdge { - s.Target = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *CreateTableInput) SetDatabaseName(v string) *CreateTableInput { + s.DatabaseName = &v return s } -// SetTargetParameter sets the TargetParameter field's value. -func (s *CodeGenEdge) SetTargetParameter(v string) *CodeGenEdge { - s.TargetParameter = &v +// SetTableInput sets the TableInput field's value. +func (s *CreateTableInput) SetTableInput(v *TableInput) *CreateTableInput { + s.TableInput = v return s } -// Represents a node in a directed acyclic graph (DAG) -type CodeGenNode struct { +type CreateTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableOutput) GoString() string { + return s.String() +} + +type CreateTriggerInput struct { _ struct{} `type:"structure"` - // Properties of the node, in the form of name-value pairs. + // The actions initiated by this trigger when it fires. // - // Args is a required field - Args []*CodeGenNodeArg `type:"list" required:"true"` + // Actions is a required field + Actions []*Action `type:"list" required:"true"` - // A node identifier that is unique within the node's graph. + // A description of the new trigger. + Description *string `type:"string"` + + // The name of the trigger. // - // Id is a required field - Id *string `min:"1" type:"string" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // The line number of the node. - LineNumber *int64 `type:"integer"` + // A predicate to specify when the new trigger should fire. + // + // This field is required when the trigger type is CONDITIONAL. + Predicate *Predicate `type:"structure"` - // The type of node this is. + // A cron expression used to specify the schedule (see Time-Based Schedules + // for Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // For example, to run something every day at 12:15 UTC, you would specify: + // cron(15 12 * * ? *). // - // NodeType is a required field - NodeType *string `type:"string" required:"true"` + // This field is required when the trigger type is SCHEDULED. + Schedule *string `type:"string"` + + // Set to true to start SCHEDULED and CONDITIONAL triggers when created. True + // is not supported for ON_DEMAND triggers. + StartOnCreation *bool `type:"boolean"` + + // The tags to use with this trigger. You may use tags to limit access to the + // trigger. For more information about tags in AWS Glue, see AWS Tags in AWS + // Glue (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) in the + // developer guide. + Tags map[string]*string `type:"map"` + + // The type of the new trigger. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"TriggerType"` + + // The name of the workflow associated with the trigger. + WorkflowName *string `min:"1" type:"string"` } // String returns the string representation -func (s CodeGenNode) String() string { +func (s CreateTriggerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CodeGenNode) GoString() string { +func (s CreateTriggerInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CodeGenNode) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CodeGenNode"} - if s.Args == nil { - invalidParams.Add(request.NewErrParamRequired("Args")) +func (s *CreateTriggerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTriggerInput"} + if s.Actions == nil { + invalidParams.Add(request.NewErrParamRequired("Actions")) } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.NodeType == nil { - invalidParams.Add(request.NewErrParamRequired("NodeType")) + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } - if s.Args != nil { - for i, v := range s.Args { + if s.WorkflowName != nil && len(*s.WorkflowName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowName", 1)) + } + if s.Actions != nil { + for i, v := range s.Actions { if v == nil { continue } if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Args", i), err.(request.ErrInvalidParams)) + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) } } } + if s.Predicate != nil { + if err := s.Predicate.Validate(); err != nil { + invalidParams.AddNested("Predicate", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -11351,126 +17439,130 @@ func (s *CodeGenNode) Validate() error { return nil } -// SetArgs sets the Args field's value. -func (s *CodeGenNode) SetArgs(v []*CodeGenNodeArg) *CodeGenNode { - s.Args = v +// SetActions sets the Actions field's value. +func (s *CreateTriggerInput) SetActions(v []*Action) *CreateTriggerInput { + s.Actions = v return s } -// SetId sets the Id field's value. -func (s *CodeGenNode) SetId(v string) *CodeGenNode { - s.Id = &v +// SetDescription sets the Description field's value. +func (s *CreateTriggerInput) SetDescription(v string) *CreateTriggerInput { + s.Description = &v return s } -// SetLineNumber sets the LineNumber field's value. -func (s *CodeGenNode) SetLineNumber(v int64) *CodeGenNode { - s.LineNumber = &v +// SetName sets the Name field's value. +func (s *CreateTriggerInput) SetName(v string) *CreateTriggerInput { + s.Name = &v return s } -// SetNodeType sets the NodeType field's value. -func (s *CodeGenNode) SetNodeType(v string) *CodeGenNode { - s.NodeType = &v +// SetPredicate sets the Predicate field's value. +func (s *CreateTriggerInput) SetPredicate(v *Predicate) *CreateTriggerInput { + s.Predicate = v return s } -// An argument or property of a node. -type CodeGenNodeArg struct { - _ struct{} `type:"structure"` - - // The name of the argument or property. - // - // Name is a required field - Name *string `type:"string" required:"true"` +// SetSchedule sets the Schedule field's value. +func (s *CreateTriggerInput) SetSchedule(v string) *CreateTriggerInput { + s.Schedule = &v + return s +} - // True if the value is used as a parameter. - Param *bool `type:"boolean"` +// SetStartOnCreation sets the StartOnCreation field's value. +func (s *CreateTriggerInput) SetStartOnCreation(v bool) *CreateTriggerInput { + s.StartOnCreation = &v + return s +} - // The value of the argument or property. - // - // Value is a required field - Value *string `type:"string" required:"true"` +// SetTags sets the Tags field's value. +func (s *CreateTriggerInput) SetTags(v map[string]*string) *CreateTriggerInput { + s.Tags = v + return s } -// String returns the string representation -func (s CodeGenNodeArg) String() string { - return awsutil.Prettify(s) +// SetType sets the Type field's value. +func (s *CreateTriggerInput) SetType(v string) *CreateTriggerInput { + s.Type = &v + return s } -// GoString returns the string representation -func (s CodeGenNodeArg) GoString() string { - return s.String() +// SetWorkflowName sets the WorkflowName field's value. +func (s *CreateTriggerInput) SetWorkflowName(v string) *CreateTriggerInput { + s.WorkflowName = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CodeGenNodeArg) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CodeGenNodeArg"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } +type CreateTriggerOutput struct { + _ struct{} `type:"structure"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil + // The name of the trigger. + Name *string `min:"1" type:"string"` } -// SetName sets the Name field's value. -func (s *CodeGenNodeArg) SetName(v string) *CodeGenNodeArg { - s.Name = &v - return s +// String returns the string representation +func (s CreateTriggerOutput) String() string { + return awsutil.Prettify(s) } -// SetParam sets the Param field's value. -func (s *CodeGenNodeArg) SetParam(v bool) *CodeGenNodeArg { - s.Param = &v - return s +// GoString returns the string representation +func (s CreateTriggerOutput) GoString() string { + return s.String() } -// SetValue sets the Value field's value. -func (s *CodeGenNodeArg) SetValue(v string) *CodeGenNodeArg { - s.Value = &v +// SetName sets the Name field's value. +func (s *CreateTriggerOutput) SetName(v string) *CreateTriggerOutput { + s.Name = &v return s } -// A column in a Table. -type Column struct { +type CreateUserDefinedFunctionInput struct { _ struct{} `type:"structure"` - // Free-form text comment. - Comment *string `type:"string"` + // The ID of the Data Catalog in which to create the function. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` - // The name of the Column. + // The name of the catalog database in which to create the function. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` - // The datatype of data in the Column. - Type *string `type:"string"` + // A FunctionInput object that defines the function to create in the Data Catalog. + // + // FunctionInput is a required field + FunctionInput *UserDefinedFunctionInput `type:"structure" required:"true"` } // String returns the string representation -func (s Column) String() string { +func (s CreateUserDefinedFunctionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Column) GoString() string { +func (s CreateUserDefinedFunctionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Column) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Column"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *CreateUserDefinedFunctionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUserDefinedFunctionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.FunctionInput == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionInput")) + } + if s.FunctionInput != nil { + if err := s.FunctionInput.Validate(); err != nil { + invalidParams.AddNested("FunctionInput", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -11479,55 +17571,75 @@ func (s *Column) Validate() error { return nil } -// SetComment sets the Comment field's value. -func (s *Column) SetComment(v string) *Column { - s.Comment = &v +// SetCatalogId sets the CatalogId field's value. +func (s *CreateUserDefinedFunctionInput) SetCatalogId(v string) *CreateUserDefinedFunctionInput { + s.CatalogId = &v return s } -// SetName sets the Name field's value. -func (s *Column) SetName(v string) *Column { - s.Name = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *CreateUserDefinedFunctionInput) SetDatabaseName(v string) *CreateUserDefinedFunctionInput { + s.DatabaseName = &v return s } -// SetType sets the Type field's value. -func (s *Column) SetType(v string) *Column { - s.Type = &v +// SetFunctionInput sets the FunctionInput field's value. +func (s *CreateUserDefinedFunctionInput) SetFunctionInput(v *UserDefinedFunctionInput) *CreateUserDefinedFunctionInput { + s.FunctionInput = v return s } -// Defines a condition under which a trigger fires. -type Condition struct { +type CreateUserDefinedFunctionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateUserDefinedFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserDefinedFunctionOutput) GoString() string { + return s.String() +} + +type CreateWorkflowInput struct { _ struct{} `type:"structure"` - // The name of the Job to whose JobRuns this condition applies and on which - // this trigger waits. - JobName *string `min:"1" type:"string"` + // A collection of properties to be used as part of each execution of the workflow. + DefaultRunProperties map[string]*string `type:"map"` - // A logical operator. - LogicalOperator *string `type:"string" enum:"LogicalOperator"` + // A description of the workflow. + Description *string `type:"string"` - // The condition state. Currently, the values supported are SUCCEEDED, STOPPED, - // TIMEOUT and FAILED. - State *string `type:"string" enum:"JobRunState"` + // The name to be assigned to the workflow. It should be unique within your + // account. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The tags to be used with this workflow. + Tags map[string]*string `type:"map"` } // String returns the string representation -func (s Condition) String() string { +func (s CreateWorkflowInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Condition) GoString() string { +func (s CreateWorkflowInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Condition) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Condition"} - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) +func (s *CreateWorkflowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWorkflowInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -11536,312 +17648,267 @@ func (s *Condition) Validate() error { return nil } -// SetJobName sets the JobName field's value. -func (s *Condition) SetJobName(v string) *Condition { - s.JobName = &v +// SetDefaultRunProperties sets the DefaultRunProperties field's value. +func (s *CreateWorkflowInput) SetDefaultRunProperties(v map[string]*string) *CreateWorkflowInput { + s.DefaultRunProperties = v return s } -// SetLogicalOperator sets the LogicalOperator field's value. -func (s *Condition) SetLogicalOperator(v string) *Condition { - s.LogicalOperator = &v +// SetDescription sets the Description field's value. +func (s *CreateWorkflowInput) SetDescription(v string) *CreateWorkflowInput { + s.Description = &v return s } -// SetState sets the State field's value. -func (s *Condition) SetState(v string) *Condition { - s.State = &v +// SetName sets the Name field's value. +func (s *CreateWorkflowInput) SetName(v string) *CreateWorkflowInput { + s.Name = &v return s } -// Defines a connection to a data source. -type Connection struct { - _ struct{} `type:"structure"` - - // These key-value pairs define parameters for the connection: - // - // * HOST - The host URI: either the fully qualified domain name (FQDN) or - // the IPv4 address of the database host. - // - // * PORT - The port number, between 1024 and 65535, of the port on which - // the database host is listening for database connections. - // - // * USER_NAME - The name under which to log in to the database. The value - // string for USER_NAME is "USERNAME". - // - // * PASSWORD - A password, if one is used, for the user name. - // - // * ENCRYPTED_PASSWORD - When you enable connection password protection - // by setting ConnectionPasswordEncryption in the Data Catalog encryption - // settings, this field stores the encrypted password. - // - // * JDBC_DRIVER_JAR_URI - The Amazon S3 path of the JAR file that contains - // the JDBC driver to use. - // - // * JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use. - // - // * JDBC_ENGINE - The name of the JDBC engine to use. - // - // * JDBC_ENGINE_VERSION - The version of the JDBC engine to use. - // - // * CONFIG_FILES - (Reserved for future use). - // - // * INSTANCE_ID - The instance ID to use. - // - // * JDBC_CONNECTION_URL - The URL for the JDBC connection. - // - // * JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether - // Secure Sockets Layer (SSL) with hostname matching will be enforced for - // the JDBC connection on the client. The default is false. - ConnectionProperties map[string]*string `type:"map"` - - // The type of the connection. Currently, only JDBC is supported; SFTP is not - // supported. - ConnectionType *string `type:"string" enum:"ConnectionType"` - - // The time that this connection definition was created. - CreationTime *time.Time `type:"timestamp"` - - // The description of the connection. - Description *string `type:"string"` - - // The user, group, or role that last updated this connection definition. - LastUpdatedBy *string `min:"1" type:"string"` - - // The last time that this connection definition was updated. - LastUpdatedTime *time.Time `type:"timestamp"` +// SetTags sets the Tags field's value. +func (s *CreateWorkflowInput) SetTags(v map[string]*string) *CreateWorkflowInput { + s.Tags = v + return s +} - // A list of criteria that can be used in selecting this connection. - MatchCriteria []*string `type:"list"` +type CreateWorkflowOutput struct { + _ struct{} `type:"structure"` - // The name of the connection definition. + // The name of the workflow which was provided as part of the request. Name *string `min:"1" type:"string"` - - // A map of physical connection requirements, such as virtual private cloud - // (VPC) and SecurityGroup, that are needed to make this connection successfully. - PhysicalConnectionRequirements *PhysicalConnectionRequirements `type:"structure"` } // String returns the string representation -func (s Connection) String() string { +func (s CreateWorkflowOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Connection) GoString() string { +func (s CreateWorkflowOutput) GoString() string { return s.String() } -// SetConnectionProperties sets the ConnectionProperties field's value. -func (s *Connection) SetConnectionProperties(v map[string]*string) *Connection { - s.ConnectionProperties = v +// SetName sets the Name field's value. +func (s *CreateWorkflowOutput) SetName(v string) *CreateWorkflowOutput { + s.Name = &v return s } -// SetConnectionType sets the ConnectionType field's value. -func (s *Connection) SetConnectionType(v string) *Connection { - s.ConnectionType = &v - return s -} +// Specifies an XML classifier for CreateClassifier to create. +type CreateXMLClassifierRequest struct { + _ struct{} `type:"structure"` -// SetCreationTime sets the CreationTime field's value. -func (s *Connection) SetCreationTime(v time.Time) *Connection { - s.CreationTime = &v - return s + // An identifier of the data format that the classifier matches. + // + // Classification is a required field + Classification *string `type:"string" required:"true"` + + // The name of the classifier. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The XML tag designating the element that contains each record in an XML document + // being parsed. This can't identify a self-closing element (closed by />). + // An empty row element that contains only attributes can be parsed as long + // as it ends with a closing tag (for example, + // is okay, but is not). + RowTag *string `type:"string"` } -// SetDescription sets the Description field's value. -func (s *Connection) SetDescription(v string) *Connection { - s.Description = &v - return s +// String returns the string representation +func (s CreateXMLClassifierRequest) String() string { + return awsutil.Prettify(s) } -// SetLastUpdatedBy sets the LastUpdatedBy field's value. -func (s *Connection) SetLastUpdatedBy(v string) *Connection { - s.LastUpdatedBy = &v - return s +// GoString returns the string representation +func (s CreateXMLClassifierRequest) GoString() string { + return s.String() } -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *Connection) SetLastUpdatedTime(v time.Time) *Connection { - s.LastUpdatedTime = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateXMLClassifierRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateXMLClassifierRequest"} + if s.Classification == nil { + invalidParams.Add(request.NewErrParamRequired("Classification")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetMatchCriteria sets the MatchCriteria field's value. -func (s *Connection) SetMatchCriteria(v []*string) *Connection { - s.MatchCriteria = v +// SetClassification sets the Classification field's value. +func (s *CreateXMLClassifierRequest) SetClassification(v string) *CreateXMLClassifierRequest { + s.Classification = &v return s } // SetName sets the Name field's value. -func (s *Connection) SetName(v string) *Connection { +func (s *CreateXMLClassifierRequest) SetName(v string) *CreateXMLClassifierRequest { s.Name = &v return s } -// SetPhysicalConnectionRequirements sets the PhysicalConnectionRequirements field's value. -func (s *Connection) SetPhysicalConnectionRequirements(v *PhysicalConnectionRequirements) *Connection { - s.PhysicalConnectionRequirements = v +// SetRowTag sets the RowTag field's value. +func (s *CreateXMLClassifierRequest) SetRowTag(v string) *CreateXMLClassifierRequest { + s.RowTag = &v return s } -// A structure that is used to specify a connection to create or update. -type ConnectionInput struct { +// A classifier for custom CSV content. +type CsvClassifier struct { _ struct{} `type:"structure"` - // These key-value pairs define parameters for the connection. - // - // ConnectionProperties is a required field - ConnectionProperties map[string]*string `type:"map" required:"true"` + // Enables the processing of files that contain only one column. + AllowSingleColumn *bool `type:"boolean"` - // The type of the connection. Currently, only JDBC is supported; SFTP is not - // supported. - // - // ConnectionType is a required field - ConnectionType *string `type:"string" required:"true" enum:"ConnectionType"` + // Indicates whether the CSV file contains a header. + ContainsHeader *string `type:"string" enum:"CsvHeaderOption"` - // The description of the connection. - Description *string `type:"string"` + // The time that this classifier was registered. + CreationTime *time.Time `type:"timestamp"` - // A list of criteria that can be used in selecting this connection. - MatchCriteria []*string `type:"list"` + // A custom symbol to denote what separates each column entry in the row. + Delimiter *string `min:"1" type:"string"` - // The name of the connection. + // Specifies not to trim values before identifying the type of column values. + // The default value is true. + DisableValueTrimming *bool `type:"boolean"` + + // A list of strings representing column names. + Header []*string `type:"list"` + + // The time that this classifier was last updated. + LastUpdated *time.Time `type:"timestamp"` + + // The name of the classifier. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // A map of physical connection requirements, such as virtual private cloud - // (VPC) and SecurityGroup, that are needed to successfully make this connection. - PhysicalConnectionRequirements *PhysicalConnectionRequirements `type:"structure"` + // A custom symbol to denote what combines content into a single column value. + // It must be different from the column delimiter. + QuoteSymbol *string `min:"1" type:"string"` + + // The version of this classifier. + Version *int64 `type:"long"` } // String returns the string representation -func (s ConnectionInput) String() string { +func (s CsvClassifier) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConnectionInput) GoString() string { +func (s CsvClassifier) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConnectionInput"} - if s.ConnectionProperties == nil { - invalidParams.Add(request.NewErrParamRequired("ConnectionProperties")) - } - if s.ConnectionType == nil { - invalidParams.Add(request.NewErrParamRequired("ConnectionType")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.PhysicalConnectionRequirements != nil { - if err := s.PhysicalConnectionRequirements.Validate(); err != nil { - invalidParams.AddNested("PhysicalConnectionRequirements", err.(request.ErrInvalidParams)) - } - } +// SetAllowSingleColumn sets the AllowSingleColumn field's value. +func (s *CsvClassifier) SetAllowSingleColumn(v bool) *CsvClassifier { + s.AllowSingleColumn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetContainsHeader sets the ContainsHeader field's value. +func (s *CsvClassifier) SetContainsHeader(v string) *CsvClassifier { + s.ContainsHeader = &v + return s } -// SetConnectionProperties sets the ConnectionProperties field's value. -func (s *ConnectionInput) SetConnectionProperties(v map[string]*string) *ConnectionInput { - s.ConnectionProperties = v +// SetCreationTime sets the CreationTime field's value. +func (s *CsvClassifier) SetCreationTime(v time.Time) *CsvClassifier { + s.CreationTime = &v return s } -// SetConnectionType sets the ConnectionType field's value. -func (s *ConnectionInput) SetConnectionType(v string) *ConnectionInput { - s.ConnectionType = &v +// SetDelimiter sets the Delimiter field's value. +func (s *CsvClassifier) SetDelimiter(v string) *CsvClassifier { + s.Delimiter = &v return s } -// SetDescription sets the Description field's value. -func (s *ConnectionInput) SetDescription(v string) *ConnectionInput { - s.Description = &v +// SetDisableValueTrimming sets the DisableValueTrimming field's value. +func (s *CsvClassifier) SetDisableValueTrimming(v bool) *CsvClassifier { + s.DisableValueTrimming = &v return s } -// SetMatchCriteria sets the MatchCriteria field's value. -func (s *ConnectionInput) SetMatchCriteria(v []*string) *ConnectionInput { - s.MatchCriteria = v +// SetHeader sets the Header field's value. +func (s *CsvClassifier) SetHeader(v []*string) *CsvClassifier { + s.Header = v + return s +} + +// SetLastUpdated sets the LastUpdated field's value. +func (s *CsvClassifier) SetLastUpdated(v time.Time) *CsvClassifier { + s.LastUpdated = &v return s } // SetName sets the Name field's value. -func (s *ConnectionInput) SetName(v string) *ConnectionInput { +func (s *CsvClassifier) SetName(v string) *CsvClassifier { s.Name = &v return s } -// SetPhysicalConnectionRequirements sets the PhysicalConnectionRequirements field's value. -func (s *ConnectionInput) SetPhysicalConnectionRequirements(v *PhysicalConnectionRequirements) *ConnectionInput { - s.PhysicalConnectionRequirements = v +// SetQuoteSymbol sets the QuoteSymbol field's value. +func (s *CsvClassifier) SetQuoteSymbol(v string) *CsvClassifier { + s.QuoteSymbol = &v return s } -// The data structure used by the Data Catalog to encrypt the password as part -// of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD -// field in the connection properties. You can enable catalog encryption or -// only password encryption. -// -// When a CreationConnection request arrives containing a password, the Data -// Catalog first encrypts the password using your AWS KMS key. It then encrypts -// the whole connection object again if catalog encryption is also enabled. -// -// This encryption requires that you set AWS KMS key permissions to enable or -// restrict access on the password key according to your security requirements. -// For example, you might want only admin users to have decrypt permission on -// the password key. -type ConnectionPasswordEncryption struct { +// SetVersion sets the Version field's value. +func (s *CsvClassifier) SetVersion(v int64) *CsvClassifier { + s.Version = &v + return s +} + +// Contains configuration information for maintaining Data Catalog security. +type DataCatalogEncryptionSettings struct { _ struct{} `type:"structure"` - // An AWS KMS key that is used to encrypt the connection password. - // - // If connection password protection is enabled, the caller of CreateConnection - // and UpdateConnection needs at least kms:Encrypt permission on the specified - // AWS KMS key, to encrypt passwords before storing them in the Data Catalog. - // - // You can set the decrypt permission to enable or restrict access on the password - // key according to your security requirements. - AwsKmsKeyId *string `min:"1" type:"string"` + // When connection password protection is enabled, the Data Catalog uses a customer-provided + // key to encrypt the password as part of CreateConnection or UpdateConnection + // and store it in the ENCRYPTED_PASSWORD field in the connection properties. + // You can enable catalog encryption or only password encryption. + ConnectionPasswordEncryption *ConnectionPasswordEncryption `type:"structure"` - // When the ReturnConnectionPasswordEncrypted flag is set to "true", passwords - // remain encrypted in the responses of GetConnection and GetConnections. This - // encryption takes effect independently from catalog encryption. - // - // ReturnConnectionPasswordEncrypted is a required field - ReturnConnectionPasswordEncrypted *bool `type:"boolean" required:"true"` + // Specifies the encryption-at-rest configuration for the Data Catalog. + EncryptionAtRest *EncryptionAtRest `type:"structure"` } // String returns the string representation -func (s ConnectionPasswordEncryption) String() string { +func (s DataCatalogEncryptionSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConnectionPasswordEncryption) GoString() string { +func (s DataCatalogEncryptionSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ConnectionPasswordEncryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConnectionPasswordEncryption"} - if s.AwsKmsKeyId != nil && len(*s.AwsKmsKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AwsKmsKeyId", 1)) +func (s *DataCatalogEncryptionSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataCatalogEncryptionSettings"} + if s.ConnectionPasswordEncryption != nil { + if err := s.ConnectionPasswordEncryption.Validate(); err != nil { + invalidParams.AddNested("ConnectionPasswordEncryption", err.(request.ErrInvalidParams)) + } } - if s.ReturnConnectionPasswordEncrypted == nil { - invalidParams.Add(request.NewErrParamRequired("ReturnConnectionPasswordEncrypted")) + if s.EncryptionAtRest != nil { + if err := s.EncryptionAtRest.Validate(); err != nil { + invalidParams.AddNested("EncryptionAtRest", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -11850,395 +17917,493 @@ func (s *ConnectionPasswordEncryption) Validate() error { return nil } -// SetAwsKmsKeyId sets the AwsKmsKeyId field's value. -func (s *ConnectionPasswordEncryption) SetAwsKmsKeyId(v string) *ConnectionPasswordEncryption { - s.AwsKmsKeyId = &v +// SetConnectionPasswordEncryption sets the ConnectionPasswordEncryption field's value. +func (s *DataCatalogEncryptionSettings) SetConnectionPasswordEncryption(v *ConnectionPasswordEncryption) *DataCatalogEncryptionSettings { + s.ConnectionPasswordEncryption = v return s } -// SetReturnConnectionPasswordEncrypted sets the ReturnConnectionPasswordEncrypted field's value. -func (s *ConnectionPasswordEncryption) SetReturnConnectionPasswordEncrypted(v bool) *ConnectionPasswordEncryption { - s.ReturnConnectionPasswordEncrypted = &v +// SetEncryptionAtRest sets the EncryptionAtRest field's value. +func (s *DataCatalogEncryptionSettings) SetEncryptionAtRest(v *EncryptionAtRest) *DataCatalogEncryptionSettings { + s.EncryptionAtRest = v return s } -// Specifies the connections used by a job. -type ConnectionsList struct { +// The AWS Lake Formation principal. +type DataLakePrincipal struct { _ struct{} `type:"structure"` - // A list of connections used by the job. - Connections []*string `type:"list"` + // An identifier for the AWS Lake Formation principal. + DataLakePrincipalIdentifier *string `min:"1" type:"string"` } // String returns the string representation -func (s ConnectionsList) String() string { +func (s DataLakePrincipal) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConnectionsList) GoString() string { +func (s DataLakePrincipal) GoString() string { return s.String() } -// SetConnections sets the Connections field's value. -func (s *ConnectionsList) SetConnections(v []*string) *ConnectionsList { - s.Connections = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataLakePrincipal) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataLakePrincipal"} + if s.DataLakePrincipalIdentifier != nil && len(*s.DataLakePrincipalIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataLakePrincipalIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataLakePrincipalIdentifier sets the DataLakePrincipalIdentifier field's value. +func (s *DataLakePrincipal) SetDataLakePrincipalIdentifier(v string) *DataLakePrincipal { + s.DataLakePrincipalIdentifier = &v return s } -// Specifies a crawler program that examines a data source and uses classifiers -// to try to determine its schema. If successful, the crawler records metadata -// concerning the data source in the AWS Glue Data Catalog. -type Crawler struct { +// The Database object represents a logical grouping of tables that might reside +// in a Hive metastore or an RDBMS. +type Database struct { _ struct{} `type:"structure"` - // A list of custom classifiers associated with the crawler. - Classifiers []*string `type:"list"` + // Creates a set of default permissions on the table for principals. + CreateTableDefaultPermissions []*PrincipalPermissions `type:"list"` - // Crawler configuration information. This versioned JSON string allows users - // to specify aspects of a crawler's behavior. For more information, see Configuring - // a Crawler (http://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). - Configuration *string `type:"string"` + // The time at which the metadata database was created in the catalog. + CreateTime *time.Time `type:"timestamp"` - // If the crawler is running, contains the total time elapsed since the last - // crawl began. - CrawlElapsedTime *int64 `type:"long"` + // A description of the database. + Description *string `type:"string"` - // The name of the SecurityConfiguration structure to be used by this Crawler. - CrawlerSecurityConfiguration *string `type:"string"` + // The location of the database (for example, an HDFS path). + LocationUri *string `min:"1" type:"string"` - // The time when the crawler was created. - CreationTime *time.Time `type:"timestamp"` + // The name of the database. For Hive compatibility, this is folded to lowercase + // when it is stored. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // The database where metadata is written by this crawler. - DatabaseName *string `type:"string"` + // These key-value pairs define parameters and properties of the database. + Parameters map[string]*string `type:"map"` +} - // A description of the crawler. - Description *string `type:"string"` +// String returns the string representation +func (s Database) String() string { + return awsutil.Prettify(s) +} - // The status of the last crawl, and potentially error information if an error - // occurred. - LastCrawl *LastCrawlInfo `type:"structure"` +// GoString returns the string representation +func (s Database) GoString() string { + return s.String() +} - // The time the crawler was last updated. - LastUpdated *time.Time `type:"timestamp"` +// SetCreateTableDefaultPermissions sets the CreateTableDefaultPermissions field's value. +func (s *Database) SetCreateTableDefaultPermissions(v []*PrincipalPermissions) *Database { + s.CreateTableDefaultPermissions = v + return s +} - // The crawler name. - Name *string `min:"1" type:"string"` +// SetCreateTime sets the CreateTime field's value. +func (s *Database) SetCreateTime(v time.Time) *Database { + s.CreateTime = &v + return s +} - // The IAM role (or ARN of an IAM role) used to access customer resources, such - // as data in Amazon S3. - Role *string `type:"string"` +// SetDescription sets the Description field's value. +func (s *Database) SetDescription(v string) *Database { + s.Description = &v + return s +} - // For scheduled crawlers, the schedule when the crawler runs. - Schedule *Schedule `type:"structure"` +// SetLocationUri sets the LocationUri field's value. +func (s *Database) SetLocationUri(v string) *Database { + s.LocationUri = &v + return s +} - // Sets the behavior when the crawler finds a changed or deleted object. - SchemaChangePolicy *SchemaChangePolicy `type:"structure"` +// SetName sets the Name field's value. +func (s *Database) SetName(v string) *Database { + s.Name = &v + return s +} - // Indicates whether the crawler is running, or whether a run is pending. - State *string `type:"string" enum:"CrawlerState"` +// SetParameters sets the Parameters field's value. +func (s *Database) SetParameters(v map[string]*string) *Database { + s.Parameters = v + return s +} - // The prefix added to the names of tables that are created. - TablePrefix *string `type:"string"` +// The structure used to create or update a database. +type DatabaseInput struct { + _ struct{} `type:"structure"` - // A collection of targets to crawl. - Targets *CrawlerTargets `type:"structure"` + // Creates a set of default permissions on the table for principals. + CreateTableDefaultPermissions []*PrincipalPermissions `type:"list"` - // The version of the crawler. - Version *int64 `type:"long"` -} + // A description of the database. + Description *string `type:"string"` -// String returns the string representation -func (s Crawler) String() string { - return awsutil.Prettify(s) -} + // The location of the database (for example, an HDFS path). + LocationUri *string `min:"1" type:"string"` -// GoString returns the string representation -func (s Crawler) GoString() string { - return s.String() -} + // The name of the database. For Hive compatibility, this is folded to lowercase + // when it is stored. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` -// SetClassifiers sets the Classifiers field's value. -func (s *Crawler) SetClassifiers(v []*string) *Crawler { - s.Classifiers = v - return s + // These key-value pairs define parameters and properties of the database. + // + // These key-value pairs define parameters and properties of the database. + Parameters map[string]*string `type:"map"` } -// SetConfiguration sets the Configuration field's value. -func (s *Crawler) SetConfiguration(v string) *Crawler { - s.Configuration = &v - return s +// String returns the string representation +func (s DatabaseInput) String() string { + return awsutil.Prettify(s) } -// SetCrawlElapsedTime sets the CrawlElapsedTime field's value. -func (s *Crawler) SetCrawlElapsedTime(v int64) *Crawler { - s.CrawlElapsedTime = &v - return s +// GoString returns the string representation +func (s DatabaseInput) GoString() string { + return s.String() } -// SetCrawlerSecurityConfiguration sets the CrawlerSecurityConfiguration field's value. -func (s *Crawler) SetCrawlerSecurityConfiguration(v string) *Crawler { - s.CrawlerSecurityConfiguration = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatabaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatabaseInput"} + if s.LocationUri != nil && len(*s.LocationUri) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LocationUri", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.CreateTableDefaultPermissions != nil { + for i, v := range s.CreateTableDefaultPermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CreateTableDefaultPermissions", i), err.(request.ErrInvalidParams)) + } + } + } -// SetCreationTime sets the CreationTime field's value. -func (s *Crawler) SetCreationTime(v time.Time) *Crawler { - s.CreationTime = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetDatabaseName sets the DatabaseName field's value. -func (s *Crawler) SetDatabaseName(v string) *Crawler { - s.DatabaseName = &v +// SetCreateTableDefaultPermissions sets the CreateTableDefaultPermissions field's value. +func (s *DatabaseInput) SetCreateTableDefaultPermissions(v []*PrincipalPermissions) *DatabaseInput { + s.CreateTableDefaultPermissions = v return s } // SetDescription sets the Description field's value. -func (s *Crawler) SetDescription(v string) *Crawler { +func (s *DatabaseInput) SetDescription(v string) *DatabaseInput { s.Description = &v return s } -// SetLastCrawl sets the LastCrawl field's value. -func (s *Crawler) SetLastCrawl(v *LastCrawlInfo) *Crawler { - s.LastCrawl = v - return s -} - -// SetLastUpdated sets the LastUpdated field's value. -func (s *Crawler) SetLastUpdated(v time.Time) *Crawler { - s.LastUpdated = &v +// SetLocationUri sets the LocationUri field's value. +func (s *DatabaseInput) SetLocationUri(v string) *DatabaseInput { + s.LocationUri = &v return s } // SetName sets the Name field's value. -func (s *Crawler) SetName(v string) *Crawler { +func (s *DatabaseInput) SetName(v string) *DatabaseInput { s.Name = &v return s } -// SetRole sets the Role field's value. -func (s *Crawler) SetRole(v string) *Crawler { - s.Role = &v +// SetParameters sets the Parameters field's value. +func (s *DatabaseInput) SetParameters(v map[string]*string) *DatabaseInput { + s.Parameters = v return s } -// SetSchedule sets the Schedule field's value. -func (s *Crawler) SetSchedule(v *Schedule) *Crawler { - s.Schedule = v - return s -} +type DeleteClassifierInput struct { + _ struct{} `type:"structure"` -// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. -func (s *Crawler) SetSchemaChangePolicy(v *SchemaChangePolicy) *Crawler { - s.SchemaChangePolicy = v - return s + // Name of the classifier to remove. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } -// SetState sets the State field's value. -func (s *Crawler) SetState(v string) *Crawler { - s.State = &v - return s +// String returns the string representation +func (s DeleteClassifierInput) String() string { + return awsutil.Prettify(s) } -// SetTablePrefix sets the TablePrefix field's value. -func (s *Crawler) SetTablePrefix(v string) *Crawler { - s.TablePrefix = &v - return s +// GoString returns the string representation +func (s DeleteClassifierInput) GoString() string { + return s.String() } -// SetTargets sets the Targets field's value. -func (s *Crawler) SetTargets(v *CrawlerTargets) *Crawler { - s.Targets = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClassifierInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClassifierInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetVersion sets the Version field's value. -func (s *Crawler) SetVersion(v int64) *Crawler { - s.Version = &v +// SetName sets the Name field's value. +func (s *DeleteClassifierInput) SetName(v string) *DeleteClassifierInput { + s.Name = &v return s } -// Metrics for a specified crawler. -type CrawlerMetrics struct { +type DeleteClassifierOutput struct { _ struct{} `type:"structure"` +} - // The name of the crawler. - CrawlerName *string `min:"1" type:"string"` - - // The duration of the crawler's most recent run, in seconds. - LastRuntimeSeconds *float64 `type:"double"` - - // The median duration of this crawler's runs, in seconds. - MedianRuntimeSeconds *float64 `type:"double"` - - // True if the crawler is still estimating how long it will take to complete - // this run. - StillEstimating *bool `type:"boolean"` +// String returns the string representation +func (s DeleteClassifierOutput) String() string { + return awsutil.Prettify(s) +} - // The number of tables created by this crawler. - TablesCreated *int64 `type:"integer"` +// GoString returns the string representation +func (s DeleteClassifierOutput) GoString() string { + return s.String() +} - // The number of tables deleted by this crawler. - TablesDeleted *int64 `type:"integer"` +type DeleteConnectionInput struct { + _ struct{} `type:"structure"` - // The number of tables updated by this crawler. - TablesUpdated *int64 `type:"integer"` + // The ID of the Data Catalog in which the connection resides. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` - // The estimated time left to complete a running crawl. - TimeLeftSeconds *float64 `type:"double"` + // The name of the connection to delete. + // + // ConnectionName is a required field + ConnectionName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CrawlerMetrics) String() string { +func (s DeleteConnectionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CrawlerMetrics) GoString() string { +func (s DeleteConnectionInput) GoString() string { return s.String() } -// SetCrawlerName sets the CrawlerName field's value. -func (s *CrawlerMetrics) SetCrawlerName(v string) *CrawlerMetrics { - s.CrawlerName = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConnectionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.ConnectionName != nil && len(*s.ConnectionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConnectionName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastRuntimeSeconds sets the LastRuntimeSeconds field's value. -func (s *CrawlerMetrics) SetLastRuntimeSeconds(v float64) *CrawlerMetrics { - s.LastRuntimeSeconds = &v +// SetCatalogId sets the CatalogId field's value. +func (s *DeleteConnectionInput) SetCatalogId(v string) *DeleteConnectionInput { + s.CatalogId = &v return s } -// SetMedianRuntimeSeconds sets the MedianRuntimeSeconds field's value. -func (s *CrawlerMetrics) SetMedianRuntimeSeconds(v float64) *CrawlerMetrics { - s.MedianRuntimeSeconds = &v +// SetConnectionName sets the ConnectionName field's value. +func (s *DeleteConnectionInput) SetConnectionName(v string) *DeleteConnectionInput { + s.ConnectionName = &v return s } -// SetStillEstimating sets the StillEstimating field's value. -func (s *CrawlerMetrics) SetStillEstimating(v bool) *CrawlerMetrics { - s.StillEstimating = &v - return s +type DeleteConnectionOutput struct { + _ struct{} `type:"structure"` } -// SetTablesCreated sets the TablesCreated field's value. -func (s *CrawlerMetrics) SetTablesCreated(v int64) *CrawlerMetrics { - s.TablesCreated = &v - return s +// String returns the string representation +func (s DeleteConnectionOutput) String() string { + return awsutil.Prettify(s) } -// SetTablesDeleted sets the TablesDeleted field's value. -func (s *CrawlerMetrics) SetTablesDeleted(v int64) *CrawlerMetrics { - s.TablesDeleted = &v - return s +// GoString returns the string representation +func (s DeleteConnectionOutput) GoString() string { + return s.String() } -// SetTablesUpdated sets the TablesUpdated field's value. -func (s *CrawlerMetrics) SetTablesUpdated(v int64) *CrawlerMetrics { - s.TablesUpdated = &v - return s +type DeleteCrawlerInput struct { + _ struct{} `type:"structure"` + + // The name of the crawler to remove. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } -// SetTimeLeftSeconds sets the TimeLeftSeconds field's value. -func (s *CrawlerMetrics) SetTimeLeftSeconds(v float64) *CrawlerMetrics { - s.TimeLeftSeconds = &v +// String returns the string representation +func (s DeleteCrawlerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCrawlerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCrawlerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCrawlerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteCrawlerInput) SetName(v string) *DeleteCrawlerInput { + s.Name = &v return s } -// Specifies data stores to crawl. -type CrawlerTargets struct { +type DeleteCrawlerOutput struct { _ struct{} `type:"structure"` +} - // Specifies DynamoDB targets. - DynamoDBTargets []*DynamoDBTarget `type:"list"` +// String returns the string representation +func (s DeleteCrawlerOutput) String() string { + return awsutil.Prettify(s) +} - // Specifies JDBC targets. - JdbcTargets []*JdbcTarget `type:"list"` +// GoString returns the string representation +func (s DeleteCrawlerOutput) GoString() string { + return s.String() +} - // Specifies Amazon S3 targets. - S3Targets []*S3Target `type:"list"` +type DeleteDatabaseInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog in which the database resides. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The name of the database to delete. For Hive compatibility, this must be + // all lowercase. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CrawlerTargets) String() string { +func (s DeleteDatabaseInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CrawlerTargets) GoString() string { +func (s DeleteDatabaseInput) GoString() string { return s.String() } -// SetDynamoDBTargets sets the DynamoDBTargets field's value. -func (s *CrawlerTargets) SetDynamoDBTargets(v []*DynamoDBTarget) *CrawlerTargets { - s.DynamoDBTargets = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatabaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatabaseInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetJdbcTargets sets the JdbcTargets field's value. -func (s *CrawlerTargets) SetJdbcTargets(v []*JdbcTarget) *CrawlerTargets { - s.JdbcTargets = v +// SetCatalogId sets the CatalogId field's value. +func (s *DeleteDatabaseInput) SetCatalogId(v string) *DeleteDatabaseInput { + s.CatalogId = &v return s } -// SetS3Targets sets the S3Targets field's value. -func (s *CrawlerTargets) SetS3Targets(v []*S3Target) *CrawlerTargets { - s.S3Targets = v +// SetName sets the Name field's value. +func (s *DeleteDatabaseInput) SetName(v string) *DeleteDatabaseInput { + s.Name = &v return s } -type CreateClassifierInput struct { +type DeleteDatabaseOutput struct { _ struct{} `type:"structure"` +} - // A CsvClassifier object specifying the classifier to create. - CsvClassifier *CreateCsvClassifierRequest `type:"structure"` +// String returns the string representation +func (s DeleteDatabaseOutput) String() string { + return awsutil.Prettify(s) +} - // A GrokClassifier object specifying the classifier to create. - GrokClassifier *CreateGrokClassifierRequest `type:"structure"` +// GoString returns the string representation +func (s DeleteDatabaseOutput) GoString() string { + return s.String() +} - // A JsonClassifier object specifying the classifier to create. - JsonClassifier *CreateJsonClassifierRequest `type:"structure"` +type DeleteDevEndpointInput struct { + _ struct{} `type:"structure"` - // An XMLClassifier object specifying the classifier to create. - XMLClassifier *CreateXMLClassifierRequest `type:"structure"` + // The name of the DevEndpoint. + // + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` } // String returns the string representation -func (s CreateClassifierInput) String() string { +func (s DeleteDevEndpointInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateClassifierInput) GoString() string { +func (s DeleteDevEndpointInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateClassifierInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateClassifierInput"} - if s.CsvClassifier != nil { - if err := s.CsvClassifier.Validate(); err != nil { - invalidParams.AddNested("CsvClassifier", err.(request.ErrInvalidParams)) - } - } - if s.GrokClassifier != nil { - if err := s.GrokClassifier.Validate(); err != nil { - invalidParams.AddNested("GrokClassifier", err.(request.ErrInvalidParams)) - } - } - if s.JsonClassifier != nil { - if err := s.JsonClassifier.Validate(); err != nil { - invalidParams.AddNested("JsonClassifier", err.(request.ErrInvalidParams)) - } - } - if s.XMLClassifier != nil { - if err := s.XMLClassifier.Validate(); err != nil { - invalidParams.AddNested("XMLClassifier", err.(request.ErrInvalidParams)) - } +func (s *DeleteDevEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDevEndpointInput"} + if s.EndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointName")) } if invalidParams.Len() > 0 { @@ -12247,80 +18412,53 @@ func (s *CreateClassifierInput) Validate() error { return nil } -// SetCsvClassifier sets the CsvClassifier field's value. -func (s *CreateClassifierInput) SetCsvClassifier(v *CreateCsvClassifierRequest) *CreateClassifierInput { - s.CsvClassifier = v - return s -} - -// SetGrokClassifier sets the GrokClassifier field's value. -func (s *CreateClassifierInput) SetGrokClassifier(v *CreateGrokClassifierRequest) *CreateClassifierInput { - s.GrokClassifier = v - return s -} - -// SetJsonClassifier sets the JsonClassifier field's value. -func (s *CreateClassifierInput) SetJsonClassifier(v *CreateJsonClassifierRequest) *CreateClassifierInput { - s.JsonClassifier = v - return s -} - -// SetXMLClassifier sets the XMLClassifier field's value. -func (s *CreateClassifierInput) SetXMLClassifier(v *CreateXMLClassifierRequest) *CreateClassifierInput { - s.XMLClassifier = v +// SetEndpointName sets the EndpointName field's value. +func (s *DeleteDevEndpointInput) SetEndpointName(v string) *DeleteDevEndpointInput { + s.EndpointName = &v return s } -type CreateClassifierOutput struct { +type DeleteDevEndpointOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s CreateClassifierOutput) String() string { +func (s DeleteDevEndpointOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateClassifierOutput) GoString() string { +func (s DeleteDevEndpointOutput) GoString() string { return s.String() } -type CreateConnectionInput struct { +type DeleteJobInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog in which to create the connection. If none is - // provided, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // A ConnectionInput object defining the connection to create. + // The name of the job definition to delete. // - // ConnectionInput is a required field - ConnectionInput *ConnectionInput `type:"structure" required:"true"` + // JobName is a required field + JobName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateConnectionInput) String() string { +func (s DeleteJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateConnectionInput) GoString() string { +func (s DeleteJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateConnectionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.ConnectionInput == nil { - invalidParams.Add(request.NewErrParamRequired("ConnectionInput")) +func (s *DeleteJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteJobInput"} + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) } - if s.ConnectionInput != nil { - if err := s.ConnectionInput.Validate(); err != nil { - invalidParams.AddNested("ConnectionInput", err.(request.ErrInvalidParams)) - } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) } if invalidParams.Len() > 0 { @@ -12329,118 +18467,152 @@ func (s *CreateConnectionInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *CreateConnectionInput) SetCatalogId(v string) *CreateConnectionInput { - s.CatalogId = &v +// SetJobName sets the JobName field's value. +func (s *DeleteJobInput) SetJobName(v string) *DeleteJobInput { + s.JobName = &v return s } -// SetConnectionInput sets the ConnectionInput field's value. -func (s *CreateConnectionInput) SetConnectionInput(v *ConnectionInput) *CreateConnectionInput { - s.ConnectionInput = v +type DeleteJobOutput struct { + _ struct{} `type:"structure"` + + // The name of the job definition that was deleted. + JobName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteJobOutput) GoString() string { + return s.String() +} + +// SetJobName sets the JobName field's value. +func (s *DeleteJobOutput) SetJobName(v string) *DeleteJobOutput { + s.JobName = &v return s } -type CreateConnectionOutput struct { +type DeleteMLTransformInput struct { _ struct{} `type:"structure"` + + // The unique identifier of the transform to delete. + // + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateConnectionOutput) String() string { +func (s DeleteMLTransformInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateConnectionOutput) GoString() string { +func (s DeleteMLTransformInput) GoString() string { return s.String() } -type CreateCrawlerInput struct { - _ struct{} `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMLTransformInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMLTransformInput"} + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) + } + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) + } - // A list of custom classifiers that the user has registered. By default, all - // built-in classifiers are included in a crawl, but these custom classifiers - // always override the default classifiers for a given classification. - Classifiers []*string `type:"list"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Crawler configuration information. This versioned JSON string allows users - // to specify aspects of a crawler's behavior. For more information, see Configuring - // a Crawler (http://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). - Configuration *string `type:"string"` +// SetTransformId sets the TransformId field's value. +func (s *DeleteMLTransformInput) SetTransformId(v string) *DeleteMLTransformInput { + s.TransformId = &v + return s +} - // The name of the SecurityConfiguration structure to be used by this Crawler. - CrawlerSecurityConfiguration *string `type:"string"` +type DeleteMLTransformOutput struct { + _ struct{} `type:"structure"` - // The AWS Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/*. - // - // DatabaseName is a required field - DatabaseName *string `type:"string" required:"true"` + // The unique identifier of the transform that was deleted. + TransformId *string `min:"1" type:"string"` +} - // A description of the new crawler. - Description *string `type:"string"` +// String returns the string representation +func (s DeleteMLTransformOutput) String() string { + return awsutil.Prettify(s) +} - // Name of the new crawler. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` +// GoString returns the string representation +func (s DeleteMLTransformOutput) GoString() string { + return s.String() +} - // The IAM role (or ARN of an IAM role) used by the new crawler to access customer - // resources. - // - // Role is a required field - Role *string `type:"string" required:"true"` +// SetTransformId sets the TransformId field's value. +func (s *DeleteMLTransformOutput) SetTransformId(v string) *DeleteMLTransformOutput { + s.TransformId = &v + return s +} - // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - Schedule *string `type:"string"` +type DeletePartitionInput struct { + _ struct{} `type:"structure"` - // Policy for the crawler's update and deletion behavior. - SchemaChangePolicy *SchemaChangePolicy `type:"structure"` + // The ID of the Data Catalog where the partition to be deleted resides. If + // none is provided, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` - // The table prefix used for catalog tables that are created. - TablePrefix *string `type:"string"` + // The name of the catalog database in which the table in question resides. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` - // The tags to use with this crawler request. You may use tags to limit access - // to the crawler. For more information about tags in AWS Glue, see AWS Tags - // in AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) - // in the developer guide. - Tags map[string]*string `type:"map"` + // The values that define the partition. + // + // PartitionValues is a required field + PartitionValues []*string `type:"list" required:"true"` - // A list of collection of targets to crawl. + // The name of the table that contains the partition to be deleted. // - // Targets is a required field - Targets *CrawlerTargets `type:"structure" required:"true"` + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateCrawlerInput) String() string { +func (s DeletePartitionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCrawlerInput) GoString() string { +func (s DeletePartitionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCrawlerInput"} +func (s *DeletePartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } if s.DatabaseName == nil { invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.PartitionValues == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionValues")) } - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.Targets == nil { - invalidParams.Add(request.NewErrParamRequired("Targets")) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -12449,147 +18621,122 @@ func (s *CreateCrawlerInput) Validate() error { return nil } -// SetClassifiers sets the Classifiers field's value. -func (s *CreateCrawlerInput) SetClassifiers(v []*string) *CreateCrawlerInput { - s.Classifiers = v +// SetCatalogId sets the CatalogId field's value. +func (s *DeletePartitionInput) SetCatalogId(v string) *DeletePartitionInput { + s.CatalogId = &v return s } -// SetConfiguration sets the Configuration field's value. -func (s *CreateCrawlerInput) SetConfiguration(v string) *CreateCrawlerInput { - s.Configuration = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *DeletePartitionInput) SetDatabaseName(v string) *DeletePartitionInput { + s.DatabaseName = &v return s } -// SetCrawlerSecurityConfiguration sets the CrawlerSecurityConfiguration field's value. -func (s *CreateCrawlerInput) SetCrawlerSecurityConfiguration(v string) *CreateCrawlerInput { - s.CrawlerSecurityConfiguration = &v +// SetPartitionValues sets the PartitionValues field's value. +func (s *DeletePartitionInput) SetPartitionValues(v []*string) *DeletePartitionInput { + s.PartitionValues = v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *CreateCrawlerInput) SetDatabaseName(v string) *CreateCrawlerInput { - s.DatabaseName = &v +// SetTableName sets the TableName field's value. +func (s *DeletePartitionInput) SetTableName(v string) *DeletePartitionInput { + s.TableName = &v return s } -// SetDescription sets the Description field's value. -func (s *CreateCrawlerInput) SetDescription(v string) *CreateCrawlerInput { - s.Description = &v - return s +type DeletePartitionOutput struct { + _ struct{} `type:"structure"` } -// SetName sets the Name field's value. -func (s *CreateCrawlerInput) SetName(v string) *CreateCrawlerInput { - s.Name = &v - return s +// String returns the string representation +func (s DeletePartitionOutput) String() string { + return awsutil.Prettify(s) } -// SetRole sets the Role field's value. -func (s *CreateCrawlerInput) SetRole(v string) *CreateCrawlerInput { - s.Role = &v - return s +// GoString returns the string representation +func (s DeletePartitionOutput) GoString() string { + return s.String() } -// SetSchedule sets the Schedule field's value. -func (s *CreateCrawlerInput) SetSchedule(v string) *CreateCrawlerInput { - s.Schedule = &v - return s +type DeleteResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // The hash value returned when this policy was set. + PolicyHashCondition *string `min:"1" type:"string"` } -// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. -func (s *CreateCrawlerInput) SetSchemaChangePolicy(v *SchemaChangePolicy) *CreateCrawlerInput { - s.SchemaChangePolicy = v - return s +// String returns the string representation +func (s DeleteResourcePolicyInput) String() string { + return awsutil.Prettify(s) } -// SetTablePrefix sets the TablePrefix field's value. -func (s *CreateCrawlerInput) SetTablePrefix(v string) *CreateCrawlerInput { - s.TablePrefix = &v - return s +// GoString returns the string representation +func (s DeleteResourcePolicyInput) GoString() string { + return s.String() } -// SetTags sets the Tags field's value. -func (s *CreateCrawlerInput) SetTags(v map[string]*string) *CreateCrawlerInput { - s.Tags = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteResourcePolicyInput"} + if s.PolicyHashCondition != nil && len(*s.PolicyHashCondition) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyHashCondition", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetTargets sets the Targets field's value. -func (s *CreateCrawlerInput) SetTargets(v *CrawlerTargets) *CreateCrawlerInput { - s.Targets = v +// SetPolicyHashCondition sets the PolicyHashCondition field's value. +func (s *DeleteResourcePolicyInput) SetPolicyHashCondition(v string) *DeleteResourcePolicyInput { + s.PolicyHashCondition = &v return s } -type CreateCrawlerOutput struct { +type DeleteResourcePolicyOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s CreateCrawlerOutput) String() string { +func (s DeleteResourcePolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCrawlerOutput) GoString() string { +func (s DeleteResourcePolicyOutput) GoString() string { return s.String() } -// Specifies a custom CSV classifier for CreateClassifier to create. -type CreateCsvClassifierRequest struct { +type DeleteSecurityConfigurationInput struct { _ struct{} `type:"structure"` - // Enables the processing of files that contain only one column. - AllowSingleColumn *bool `type:"boolean"` - - // Indicates whether the CSV file contains a header. - ContainsHeader *string `type:"string" enum:"CsvHeaderOption"` - - // A custom symbol to denote what separates each column entry in the row. - Delimiter *string `min:"1" type:"string"` - - // Specifies not to trim values before identifying the type of column values. - // The default value is true. - DisableValueTrimming *bool `type:"boolean"` - - // A list of strings representing column names. - Header []*string `type:"list"` - - // The name of the classifier. + // The name of the security configuration to delete. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - - // A custom symbol to denote what combines content into a single column value. - // Must be different from the column delimiter. - QuoteSymbol *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateCsvClassifierRequest) String() string { +func (s DeleteSecurityConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCsvClassifierRequest) GoString() string { +func (s DeleteSecurityConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCsvClassifierRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCsvClassifierRequest"} - if s.Delimiter != nil && len(*s.Delimiter) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Delimiter", 1)) - } +func (s *DeleteSecurityConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSecurityConfigurationInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.QuoteSymbol != nil && len(*s.QuoteSymbol) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QuoteSymbol", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12597,84 +18744,73 @@ func (s *CreateCsvClassifierRequest) Validate() error { return nil } -// SetAllowSingleColumn sets the AllowSingleColumn field's value. -func (s *CreateCsvClassifierRequest) SetAllowSingleColumn(v bool) *CreateCsvClassifierRequest { - s.AllowSingleColumn = &v - return s -} - -// SetContainsHeader sets the ContainsHeader field's value. -func (s *CreateCsvClassifierRequest) SetContainsHeader(v string) *CreateCsvClassifierRequest { - s.ContainsHeader = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *CreateCsvClassifierRequest) SetDelimiter(v string) *CreateCsvClassifierRequest { - s.Delimiter = &v - return s -} - -// SetDisableValueTrimming sets the DisableValueTrimming field's value. -func (s *CreateCsvClassifierRequest) SetDisableValueTrimming(v bool) *CreateCsvClassifierRequest { - s.DisableValueTrimming = &v +// SetName sets the Name field's value. +func (s *DeleteSecurityConfigurationInput) SetName(v string) *DeleteSecurityConfigurationInput { + s.Name = &v return s } -// SetHeader sets the Header field's value. -func (s *CreateCsvClassifierRequest) SetHeader(v []*string) *CreateCsvClassifierRequest { - s.Header = v - return s +type DeleteSecurityConfigurationOutput struct { + _ struct{} `type:"structure"` } -// SetName sets the Name field's value. -func (s *CreateCsvClassifierRequest) SetName(v string) *CreateCsvClassifierRequest { - s.Name = &v - return s +// String returns the string representation +func (s DeleteSecurityConfigurationOutput) String() string { + return awsutil.Prettify(s) } -// SetQuoteSymbol sets the QuoteSymbol field's value. -func (s *CreateCsvClassifierRequest) SetQuoteSymbol(v string) *CreateCsvClassifierRequest { - s.QuoteSymbol = &v - return s +// GoString returns the string representation +func (s DeleteSecurityConfigurationOutput) GoString() string { + return s.String() } -type CreateDatabaseInput struct { +type DeleteTableInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog in which to create the database. If none is supplied, + // The ID of the Data Catalog where the table resides. If none is provided, // the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` - // A DatabaseInput object defining the metadata database to create in the catalog. + // The name of the catalog database in which the table resides. For Hive compatibility, + // this name is entirely lowercase. // - // DatabaseInput is a required field - DatabaseInput *DatabaseInput `type:"structure" required:"true"` + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the table to be deleted. For Hive compatibility, this name is + // entirely lowercase. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateDatabaseInput) String() string { +func (s DeleteTableInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDatabaseInput) GoString() string { +func (s DeleteTableInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDatabaseInput"} +func (s *DeleteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.DatabaseInput == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseInput")) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } - if s.DatabaseInput != nil { - if err := s.DatabaseInput.Validate(); err != nil { - invalidParams.AddNested("DatabaseInput", err.(request.ErrInvalidParams)) - } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -12684,115 +18820,95 @@ func (s *CreateDatabaseInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *CreateDatabaseInput) SetCatalogId(v string) *CreateDatabaseInput { +func (s *DeleteTableInput) SetCatalogId(v string) *DeleteTableInput { s.CatalogId = &v return s } -// SetDatabaseInput sets the DatabaseInput field's value. -func (s *CreateDatabaseInput) SetDatabaseInput(v *DatabaseInput) *CreateDatabaseInput { - s.DatabaseInput = v +// SetDatabaseName sets the DatabaseName field's value. +func (s *DeleteTableInput) SetDatabaseName(v string) *DeleteTableInput { + s.DatabaseName = &v return s } -type CreateDatabaseOutput struct { +// SetName sets the Name field's value. +func (s *DeleteTableInput) SetName(v string) *DeleteTableInput { + s.Name = &v + return s +} + +type DeleteTableOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s CreateDatabaseOutput) String() string { +func (s DeleteTableOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDatabaseOutput) GoString() string { +func (s DeleteTableOutput) GoString() string { return s.String() } -type CreateDevEndpointInput struct { +type DeleteTableVersionInput struct { _ struct{} `type:"structure"` - // A map of arguments used to configure the DevEndpoint. - Arguments map[string]*string `type:"map"` - - // The name to be assigned to the new DevEndpoint. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` - - // Path to one or more Java Jars in an S3 bucket that should be loaded in your - // DevEndpoint. - ExtraJarsS3Path *string `type:"string"` + // The ID of the Data Catalog where the tables reside. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` - // Path(s) to one or more Python libraries in an S3 bucket that should be loaded - // in your DevEndpoint. Multiple values must be complete paths separated by - // a comma. + // The database in the catalog in which the table resides. For Hive compatibility, + // this name is entirely lowercase. // - // Please note that only pure Python libraries can currently be used on a DevEndpoint. - // Libraries that rely on C extensions, such as the pandas (http://pandas.pydata.org/) - // Python data analysis library, are not yet supported. - ExtraPythonLibsS3Path *string `type:"string"` - - // The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint. - NumberOfNodes *int64 `type:"integer"` - - // The public key to be used by this DevEndpoint for authentication. This attribute - // is provided for backward compatibility, as the recommended attribute to use - // is public keys. - PublicKey *string `type:"string"` + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` - // A list of public keys to be used by the DevEndpoints for authentication. - // The use of this attribute is preferred over a single public key because the - // public keys allow you to have a different private key per client. + // The name of the table. For Hive compatibility, this name is entirely lowercase. // - // If you previously created an endpoint with a public key, you must remove - // that key to be able to set a list of public keys: call the UpdateDevEndpoint - // API with the public key content in the deletePublicKeys attribute, and the - // list of new keys in the addPublicKeys attribute. - PublicKeys []*string `type:"list"` + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` - // The IAM role for the DevEndpoint. + // The ID of the table version to be deleted. A VersionID is a string representation + // of an integer. Each version is incremented by 1. // - // RoleArn is a required field - RoleArn *string `type:"string" required:"true"` - - // The name of the SecurityConfiguration structure to be used with this DevEndpoint. - SecurityConfiguration *string `min:"1" type:"string"` - - // Security group IDs for the security groups to be used by the new DevEndpoint. - SecurityGroupIds []*string `type:"list"` - - // The subnet ID for the new DevEndpoint to use. - SubnetId *string `type:"string"` - - // The tags to use with this DevEndpoint. You may use tags to limit access to - // the DevEndpoint. For more information about tags in AWS Glue, see AWS Tags - // in AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) - // in the developer guide. - Tags map[string]*string `type:"map"` + // VersionId is a required field + VersionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateDevEndpointInput) String() string { +func (s DeleteTableVersionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDevEndpointInput) GoString() string { +func (s DeleteTableVersionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDevEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDevEndpointInput"} - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) +func (s *DeleteTableVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTableVersionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } - if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.VersionId == nil { + invalidParams.Add(request.NewErrParamRequired("VersionId")) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionId", 1)) } if invalidParams.Len() > 0 { @@ -12801,284 +18917,215 @@ func (s *CreateDevEndpointInput) Validate() error { return nil } -// SetArguments sets the Arguments field's value. -func (s *CreateDevEndpointInput) SetArguments(v map[string]*string) *CreateDevEndpointInput { - s.Arguments = v - return s -} - -// SetEndpointName sets the EndpointName field's value. -func (s *CreateDevEndpointInput) SetEndpointName(v string) *CreateDevEndpointInput { - s.EndpointName = &v +// SetCatalogId sets the CatalogId field's value. +func (s *DeleteTableVersionInput) SetCatalogId(v string) *DeleteTableVersionInput { + s.CatalogId = &v return s } -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *CreateDevEndpointInput) SetExtraJarsS3Path(v string) *CreateDevEndpointInput { - s.ExtraJarsS3Path = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *DeleteTableVersionInput) SetDatabaseName(v string) *DeleteTableVersionInput { + s.DatabaseName = &v return s } -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *CreateDevEndpointInput) SetExtraPythonLibsS3Path(v string) *CreateDevEndpointInput { - s.ExtraPythonLibsS3Path = &v +// SetTableName sets the TableName field's value. +func (s *DeleteTableVersionInput) SetTableName(v string) *DeleteTableVersionInput { + s.TableName = &v return s } -// SetNumberOfNodes sets the NumberOfNodes field's value. -func (s *CreateDevEndpointInput) SetNumberOfNodes(v int64) *CreateDevEndpointInput { - s.NumberOfNodes = &v +// SetVersionId sets the VersionId field's value. +func (s *DeleteTableVersionInput) SetVersionId(v string) *DeleteTableVersionInput { + s.VersionId = &v return s } -// SetPublicKey sets the PublicKey field's value. -func (s *CreateDevEndpointInput) SetPublicKey(v string) *CreateDevEndpointInput { - s.PublicKey = &v - return s +type DeleteTableVersionOutput struct { + _ struct{} `type:"structure"` } -// SetPublicKeys sets the PublicKeys field's value. -func (s *CreateDevEndpointInput) SetPublicKeys(v []*string) *CreateDevEndpointInput { - s.PublicKeys = v - return s +// String returns the string representation +func (s DeleteTableVersionOutput) String() string { + return awsutil.Prettify(s) } -// SetRoleArn sets the RoleArn field's value. -func (s *CreateDevEndpointInput) SetRoleArn(v string) *CreateDevEndpointInput { - s.RoleArn = &v - return s +// GoString returns the string representation +func (s DeleteTableVersionOutput) GoString() string { + return s.String() } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *CreateDevEndpointInput) SetSecurityConfiguration(v string) *CreateDevEndpointInput { - s.SecurityConfiguration = &v - return s -} +type DeleteTriggerInput struct { + _ struct{} `type:"structure"` -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *CreateDevEndpointInput) SetSecurityGroupIds(v []*string) *CreateDevEndpointInput { - s.SecurityGroupIds = v - return s + // The name of the trigger to delete. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } -// SetSubnetId sets the SubnetId field's value. -func (s *CreateDevEndpointInput) SetSubnetId(v string) *CreateDevEndpointInput { - s.SubnetId = &v - return s +// String returns the string representation +func (s DeleteTriggerInput) String() string { + return awsutil.Prettify(s) } -// SetTags sets the Tags field's value. -func (s *CreateDevEndpointInput) SetTags(v map[string]*string) *CreateDevEndpointInput { - s.Tags = v - return s +// GoString returns the string representation +func (s DeleteTriggerInput) GoString() string { + return s.String() } -type CreateDevEndpointOutput struct { - _ struct{} `type:"structure"` - - // The map of arguments used to configure this DevEndpoint. - Arguments map[string]*string `type:"map"` - - // The AWS availability zone where this DevEndpoint is located. - AvailabilityZone *string `type:"string"` - - // The point in time at which this DevEndpoint was created. - CreatedTimestamp *time.Time `type:"timestamp"` - - // The name assigned to the new DevEndpoint. - EndpointName *string `type:"string"` - - // Path to one or more Java Jars in an S3 bucket that will be loaded in your - // DevEndpoint. - ExtraJarsS3Path *string `type:"string"` - - // Path(s) to one or more Python libraries in an S3 bucket that will be loaded - // in your DevEndpoint. - ExtraPythonLibsS3Path *string `type:"string"` - - // The reason for a current failure in this DevEndpoint. - FailureReason *string `type:"string"` - - // The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint. - NumberOfNodes *int64 `type:"integer"` - - // The AWS ARN of the role assigned to the new DevEndpoint. - RoleArn *string `type:"string"` - - // The name of the SecurityConfiguration structure being used with this DevEndpoint. - SecurityConfiguration *string `min:"1" type:"string"` - - // The security groups assigned to the new DevEndpoint. - SecurityGroupIds []*string `type:"list"` - - // The current status of the new DevEndpoint. - Status *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTriggerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTriggerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } - // The subnet ID assigned to the new DevEndpoint. - SubnetId *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The ID of the VPC used by this DevEndpoint. - VpcId *string `type:"string"` +// SetName sets the Name field's value. +func (s *DeleteTriggerInput) SetName(v string) *DeleteTriggerInput { + s.Name = &v + return s +} - // The address of the YARN endpoint used by this DevEndpoint. - YarnEndpointAddress *string `type:"string"` +type DeleteTriggerOutput struct { + _ struct{} `type:"structure"` - // The Apache Zeppelin port for the remote Apache Spark interpreter. - ZeppelinRemoteSparkInterpreterPort *int64 `type:"integer"` + // The name of the trigger that was deleted. + Name *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateDevEndpointOutput) String() string { +func (s DeleteTriggerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDevEndpointOutput) GoString() string { +func (s DeleteTriggerOutput) GoString() string { return s.String() } -// SetArguments sets the Arguments field's value. -func (s *CreateDevEndpointOutput) SetArguments(v map[string]*string) *CreateDevEndpointOutput { - s.Arguments = v - return s -} - -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *CreateDevEndpointOutput) SetAvailabilityZone(v string) *CreateDevEndpointOutput { - s.AvailabilityZone = &v +// SetName sets the Name field's value. +func (s *DeleteTriggerOutput) SetName(v string) *DeleteTriggerOutput { + s.Name = &v return s } -// SetCreatedTimestamp sets the CreatedTimestamp field's value. -func (s *CreateDevEndpointOutput) SetCreatedTimestamp(v time.Time) *CreateDevEndpointOutput { - s.CreatedTimestamp = &v - return s -} +type DeleteUserDefinedFunctionInput struct { + _ struct{} `type:"structure"` -// SetEndpointName sets the EndpointName field's value. -func (s *CreateDevEndpointOutput) SetEndpointName(v string) *CreateDevEndpointOutput { - s.EndpointName = &v - return s -} + // The ID of the Data Catalog where the function to be deleted is located. If + // none is supplied, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *CreateDevEndpointOutput) SetExtraJarsS3Path(v string) *CreateDevEndpointOutput { - s.ExtraJarsS3Path = &v - return s -} + // The name of the catalog database where the function is located. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *CreateDevEndpointOutput) SetExtraPythonLibsS3Path(v string) *CreateDevEndpointOutput { - s.ExtraPythonLibsS3Path = &v - return s + // The name of the function definition to be deleted. + // + // FunctionName is a required field + FunctionName *string `min:"1" type:"string" required:"true"` } -// SetFailureReason sets the FailureReason field's value. -func (s *CreateDevEndpointOutput) SetFailureReason(v string) *CreateDevEndpointOutput { - s.FailureReason = &v - return s +// String returns the string representation +func (s DeleteUserDefinedFunctionInput) String() string { + return awsutil.Prettify(s) } -// SetNumberOfNodes sets the NumberOfNodes field's value. -func (s *CreateDevEndpointOutput) SetNumberOfNodes(v int64) *CreateDevEndpointOutput { - s.NumberOfNodes = &v - return s +// GoString returns the string representation +func (s DeleteUserDefinedFunctionInput) GoString() string { + return s.String() } -// SetRoleArn sets the RoleArn field's value. -func (s *CreateDevEndpointOutput) SetRoleArn(v string) *CreateDevEndpointOutput { - s.RoleArn = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserDefinedFunctionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserDefinedFunctionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *CreateDevEndpointOutput) SetSecurityConfiguration(v string) *CreateDevEndpointOutput { - s.SecurityConfiguration = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *CreateDevEndpointOutput) SetSecurityGroupIds(v []*string) *CreateDevEndpointOutput { - s.SecurityGroupIds = v +// SetCatalogId sets the CatalogId field's value. +func (s *DeleteUserDefinedFunctionInput) SetCatalogId(v string) *DeleteUserDefinedFunctionInput { + s.CatalogId = &v return s } -// SetStatus sets the Status field's value. -func (s *CreateDevEndpointOutput) SetStatus(v string) *CreateDevEndpointOutput { - s.Status = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *DeleteUserDefinedFunctionInput) SetDatabaseName(v string) *DeleteUserDefinedFunctionInput { + s.DatabaseName = &v return s } -// SetSubnetId sets the SubnetId field's value. -func (s *CreateDevEndpointOutput) SetSubnetId(v string) *CreateDevEndpointOutput { - s.SubnetId = &v +// SetFunctionName sets the FunctionName field's value. +func (s *DeleteUserDefinedFunctionInput) SetFunctionName(v string) *DeleteUserDefinedFunctionInput { + s.FunctionName = &v return s } -// SetVpcId sets the VpcId field's value. -func (s *CreateDevEndpointOutput) SetVpcId(v string) *CreateDevEndpointOutput { - s.VpcId = &v - return s +type DeleteUserDefinedFunctionOutput struct { + _ struct{} `type:"structure"` } -// SetYarnEndpointAddress sets the YarnEndpointAddress field's value. -func (s *CreateDevEndpointOutput) SetYarnEndpointAddress(v string) *CreateDevEndpointOutput { - s.YarnEndpointAddress = &v - return s +// String returns the string representation +func (s DeleteUserDefinedFunctionOutput) String() string { + return awsutil.Prettify(s) } -// SetZeppelinRemoteSparkInterpreterPort sets the ZeppelinRemoteSparkInterpreterPort field's value. -func (s *CreateDevEndpointOutput) SetZeppelinRemoteSparkInterpreterPort(v int64) *CreateDevEndpointOutput { - s.ZeppelinRemoteSparkInterpreterPort = &v - return s +// GoString returns the string representation +func (s DeleteUserDefinedFunctionOutput) GoString() string { + return s.String() } -// Specifies a grok classifier for CreateClassifier to create. -type CreateGrokClassifierRequest struct { +type DeleteWorkflowInput struct { _ struct{} `type:"structure"` - // An identifier of the data format that the classifier matches, such as Twitter, - // JSON, Omniture logs, Amazon CloudWatch Logs, and so on. - // - // Classification is a required field - Classification *string `type:"string" required:"true"` - - // Optional custom grok patterns used by this classifier. - CustomPatterns *string `type:"string"` - - // The grok pattern used by this classifier. - // - // GrokPattern is a required field - GrokPattern *string `min:"1" type:"string" required:"true"` - - // The name of the new classifier. + // Name of the workflow to be deleted. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateGrokClassifierRequest) String() string { +func (s DeleteWorkflowInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateGrokClassifierRequest) GoString() string { +func (s DeleteWorkflowInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGrokClassifierRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGrokClassifierRequest"} - if s.Classification == nil { - invalidParams.Add(request.NewErrParamRequired("Classification")) - } - if s.GrokPattern == nil { - invalidParams.Add(request.NewErrParamRequired("GrokPattern")) - } - if s.GrokPattern != nil && len(*s.GrokPattern) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrokPattern", 1)) - } +func (s *DeleteWorkflowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteWorkflowInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -13092,350 +19139,473 @@ func (s *CreateGrokClassifierRequest) Validate() error { return nil } -// SetClassification sets the Classification field's value. -func (s *CreateGrokClassifierRequest) SetClassification(v string) *CreateGrokClassifierRequest { - s.Classification = &v +// SetName sets the Name field's value. +func (s *DeleteWorkflowInput) SetName(v string) *DeleteWorkflowInput { + s.Name = &v return s } -// SetCustomPatterns sets the CustomPatterns field's value. -func (s *CreateGrokClassifierRequest) SetCustomPatterns(v string) *CreateGrokClassifierRequest { - s.CustomPatterns = &v - return s +type DeleteWorkflowOutput struct { + _ struct{} `type:"structure"` + + // Name of the workflow specified in input. + Name *string `min:"1" type:"string"` } -// SetGrokPattern sets the GrokPattern field's value. -func (s *CreateGrokClassifierRequest) SetGrokPattern(v string) *CreateGrokClassifierRequest { - s.GrokPattern = &v - return s +// String returns the string representation +func (s DeleteWorkflowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWorkflowOutput) GoString() string { + return s.String() } // SetName sets the Name field's value. -func (s *CreateGrokClassifierRequest) SetName(v string) *CreateGrokClassifierRequest { +func (s *DeleteWorkflowOutput) SetName(v string) *DeleteWorkflowOutput { s.Name = &v return s } -type CreateJobInput struct { +// A development endpoint where a developer can remotely debug extract, transform, +// and load (ETL) scripts. +type DevEndpoint struct { _ struct{} `type:"structure"` - // This parameter is deprecated. Use MaxCapacity instead. - // - // The number of AWS Glue data processing units (DPUs) to allocate to this Job. - // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative - // measure of processing power that consists of 4 vCPUs of compute capacity - // and 16 GB of memory. For more information, see the AWS Glue pricing page - // (https://aws.amazon.com/glue/pricing/). + // A map of arguments used to configure the DevEndpoint. // - // Deprecated: This property is deprecated, use MaxCapacity instead. - AllocatedCapacity *int64 `deprecated:"true" type:"integer"` - - // The JobCommand that executes this job. + // Valid arguments are: // - // Command is a required field - Command *JobCommand `type:"structure" required:"true"` - - // The connections used for this job. - Connections *ConnectionsList `type:"structure"` - - // The default arguments for this job. + // * "--enable-glue-datacatalog": "" // - // You can specify arguments here that your own job-execution script consumes, - // as well as arguments that AWS Glue itself consumes. + // * "GLUE_PYTHON_VERSION": "3" // - // For information about how to specify and consume your own Job arguments, - // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) - // topic in the developer guide. + // * "GLUE_PYTHON_VERSION": "2" // - // For information about the key-value pairs that AWS Glue consumes to set up - // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) - // topic in the developer guide. - DefaultArguments map[string]*string `type:"map"` + // You can specify a version of Python support for development endpoints by + // using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint + // APIs. If no arguments are provided, the version defaults to Python 2. + Arguments map[string]*string `type:"map"` - // Description of the job being defined. - Description *string `type:"string"` + // The AWS Availability Zone where this DevEndpoint is located. + AvailabilityZone *string `type:"string"` - // An ExecutionProperty specifying the maximum number of concurrent runs allowed - // for this job. - ExecutionProperty *ExecutionProperty `type:"structure"` + // The point in time at which this DevEndpoint was created. + CreatedTimestamp *time.Time `type:"timestamp"` - // This field is reserved for future use. - LogUri *string `type:"string"` + // The name of the DevEndpoint. + EndpointName *string `type:"string"` - // The number of AWS Glue data processing units (DPUs) that can be allocated - // when this job runs. A DPU is a relative measure of processing power that - // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, - // see the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). + // The path to one or more Java .jar files in an S3 bucket that should be loaded + // in your DevEndpoint. // - // Do not set Max Capacity if using WorkerType and NumberOfWorkers. + // You can only use pure Java/Scala libraries with a DevEndpoint. + ExtraJarsS3Path *string `type:"string"` + + // The paths to one or more Python libraries in an Amazon S3 bucket that should + // be loaded in your DevEndpoint. Multiple values must be complete paths separated + // by a comma. // - // The value that can be allocated for MaxCapacity depends on whether you are - // running a python shell job, or an Apache Spark ETL job: + // You can only use pure Python libraries with a DevEndpoint. Libraries that + // rely on C extensions, such as the pandas (http://pandas.pydata.org/) Python + // data analysis library, are not currently supported. + ExtraPythonLibsS3Path *string `type:"string"` + + // The reason for a current failure in this DevEndpoint. + FailureReason *string `type:"string"` + + // Glue version determines the versions of Apache Spark and Python that AWS + // Glue supports. The Python version indicates the version supported for running + // your ETL scripts on development endpoints. // - // * When you specify a python shell job (JobCommand.Name="pythonshell"), - // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. + // For more information about the available AWS Glue versions and corresponding + // Spark and Python versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html) + // in the developer guide. // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. - MaxCapacity *float64 `type:"double"` + // Development endpoints that are created without specifying a Glue version + // default to Glue 0.9. + // + // You can specify a version of Python support for development endpoints by + // using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint + // APIs. If no arguments are provided, the version defaults to Python 2. + GlueVersion *string `min:"1" type:"string"` - // The maximum number of times to retry this job if it fails. - MaxRetries *int64 `type:"integer"` + // The point in time at which this DevEndpoint was last modified. + LastModifiedTimestamp *time.Time `type:"timestamp"` - // The name you assign to this job definition. It must be unique in your account. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The status of the last update. + LastUpdateStatus *string `type:"string"` - // Specifies configuration properties of a job notification. - NotificationProperty *NotificationProperty `type:"structure"` + // The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint. + NumberOfNodes *int64 `type:"integer"` - // The number of workers of a defined workerType that are allocated when a job - // runs. + // The number of workers of a defined workerType that are allocated to the development + // endpoint. // // The maximum number of workers you can define are 299 for G.1X, and 149 for // G.2X. NumberOfWorkers *int64 `type:"integer"` - // The name or ARN of the IAM role associated with this job. + // A private IP address to access the DevEndpoint within a VPC if the DevEndpoint + // is created within one. The PrivateAddress field is present only when you + // create the DevEndpoint within your VPC. + PrivateAddress *string `type:"string"` + + // The public IP address used by this DevEndpoint. The PublicAddress field is + // present only when you create a non-virtual private cloud (VPC) DevEndpoint. + PublicAddress *string `type:"string"` + + // The public key to be used by this DevEndpoint for authentication. This attribute + // is provided for backward compatibility because the recommended attribute + // to use is public keys. + PublicKey *string `type:"string"` + + // A list of public keys to be used by the DevEndpoints for authentication. + // Using this attribute is preferred over a single public key because the public + // keys allow you to have a different private key per client. // - // Role is a required field - Role *string `type:"string" required:"true"` + // If you previously created an endpoint with a public key, you must remove + // that key to be able to set a list of public keys. Call the UpdateDevEndpoint + // API operation with the public key content in the deletePublicKeys attribute, + // and the list of new keys in the addPublicKeys attribute. + PublicKeys []*string `type:"list"` - // The name of the SecurityConfiguration structure to be used with this job. + // The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint. + RoleArn *string `type:"string"` + + // The name of the SecurityConfiguration structure to be used with this DevEndpoint. SecurityConfiguration *string `min:"1" type:"string"` - // The tags to use with this job. You may use tags to limit access to the job. - // For more information about tags in AWS Glue, see AWS Tags in AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) - // in the developer guide. - Tags map[string]*string `type:"map"` + // A list of security group identifiers used in this DevEndpoint. + SecurityGroupIds []*string `type:"list"` - // The job timeout in minutes. This is the maximum time that a job run can consume - // resources before it is terminated and enters TIMEOUT status. The default - // is 2,880 minutes (48 hours). - Timeout *int64 `min:"1" type:"integer"` + // The current status of this DevEndpoint. + Status *string `type:"string"` - // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, or G.2X. + // The subnet ID for this DevEndpoint. + SubnetId *string `type:"string"` + + // The ID of the virtual private cloud (VPC) used by this DevEndpoint. + VpcId *string `type:"string"` + + // The type of predefined worker that is allocated to the development endpoint. + // Accepts a value of Standard, G.1X, or G.2X. // // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of // memory and a 50GB disk, and 2 executors per worker. // - // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory - // and a 64GB disk, and 1 executor per worker. + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of + // memory, 64 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. // - // * For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory - // and a 128GB disk, and 1 executor per worker. + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of + // memory, 128 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. + // + // Known issue: when a development endpoint is created with the G.2X WorkerType + // configuration, the Spark drivers for the development endpoint will run on + // 4 vCPU, 16 GB of memory, and a 64 GB disk. WorkerType *string `type:"string" enum:"WorkerType"` + + // The YARN endpoint address used by this DevEndpoint. + YarnEndpointAddress *string `type:"string"` + + // The Apache Zeppelin port for the remote Apache Spark interpreter. + ZeppelinRemoteSparkInterpreterPort *int64 `type:"integer"` } // String returns the string representation -func (s CreateJobInput) String() string { +func (s DevEndpoint) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateJobInput) GoString() string { +func (s DevEndpoint) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"} - if s.Command == nil { - invalidParams.Add(request.NewErrParamRequired("Command")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } - if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) - } - if s.Timeout != nil && *s.Timeout < 1 { - invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) - } - if s.NotificationProperty != nil { - if err := s.NotificationProperty.Validate(); err != nil { - invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) - } - } +// SetArguments sets the Arguments field's value. +func (s *DevEndpoint) SetArguments(v map[string]*string) *DevEndpoint { + s.Arguments = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *DevEndpoint) SetAvailabilityZone(v string) *DevEndpoint { + s.AvailabilityZone = &v + return s } -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *CreateJobInput) SetAllocatedCapacity(v int64) *CreateJobInput { - s.AllocatedCapacity = &v +// SetCreatedTimestamp sets the CreatedTimestamp field's value. +func (s *DevEndpoint) SetCreatedTimestamp(v time.Time) *DevEndpoint { + s.CreatedTimestamp = &v return s } -// SetCommand sets the Command field's value. -func (s *CreateJobInput) SetCommand(v *JobCommand) *CreateJobInput { - s.Command = v +// SetEndpointName sets the EndpointName field's value. +func (s *DevEndpoint) SetEndpointName(v string) *DevEndpoint { + s.EndpointName = &v return s } -// SetConnections sets the Connections field's value. -func (s *CreateJobInput) SetConnections(v *ConnectionsList) *CreateJobInput { - s.Connections = v +// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. +func (s *DevEndpoint) SetExtraJarsS3Path(v string) *DevEndpoint { + s.ExtraJarsS3Path = &v return s } -// SetDefaultArguments sets the DefaultArguments field's value. -func (s *CreateJobInput) SetDefaultArguments(v map[string]*string) *CreateJobInput { - s.DefaultArguments = v +// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. +func (s *DevEndpoint) SetExtraPythonLibsS3Path(v string) *DevEndpoint { + s.ExtraPythonLibsS3Path = &v return s } -// SetDescription sets the Description field's value. -func (s *CreateJobInput) SetDescription(v string) *CreateJobInput { - s.Description = &v +// SetFailureReason sets the FailureReason field's value. +func (s *DevEndpoint) SetFailureReason(v string) *DevEndpoint { + s.FailureReason = &v return s } -// SetExecutionProperty sets the ExecutionProperty field's value. -func (s *CreateJobInput) SetExecutionProperty(v *ExecutionProperty) *CreateJobInput { - s.ExecutionProperty = v +// SetGlueVersion sets the GlueVersion field's value. +func (s *DevEndpoint) SetGlueVersion(v string) *DevEndpoint { + s.GlueVersion = &v return s } -// SetLogUri sets the LogUri field's value. -func (s *CreateJobInput) SetLogUri(v string) *CreateJobInput { - s.LogUri = &v +// SetLastModifiedTimestamp sets the LastModifiedTimestamp field's value. +func (s *DevEndpoint) SetLastModifiedTimestamp(v time.Time) *DevEndpoint { + s.LastModifiedTimestamp = &v return s } -// SetMaxCapacity sets the MaxCapacity field's value. -func (s *CreateJobInput) SetMaxCapacity(v float64) *CreateJobInput { - s.MaxCapacity = &v +// SetLastUpdateStatus sets the LastUpdateStatus field's value. +func (s *DevEndpoint) SetLastUpdateStatus(v string) *DevEndpoint { + s.LastUpdateStatus = &v return s } -// SetMaxRetries sets the MaxRetries field's value. -func (s *CreateJobInput) SetMaxRetries(v int64) *CreateJobInput { - s.MaxRetries = &v +// SetNumberOfNodes sets the NumberOfNodes field's value. +func (s *DevEndpoint) SetNumberOfNodes(v int64) *DevEndpoint { + s.NumberOfNodes = &v return s } -// SetName sets the Name field's value. -func (s *CreateJobInput) SetName(v string) *CreateJobInput { - s.Name = &v +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *DevEndpoint) SetNumberOfWorkers(v int64) *DevEndpoint { + s.NumberOfWorkers = &v + return s +} + +// SetPrivateAddress sets the PrivateAddress field's value. +func (s *DevEndpoint) SetPrivateAddress(v string) *DevEndpoint { + s.PrivateAddress = &v + return s +} + +// SetPublicAddress sets the PublicAddress field's value. +func (s *DevEndpoint) SetPublicAddress(v string) *DevEndpoint { + s.PublicAddress = &v + return s +} + +// SetPublicKey sets the PublicKey field's value. +func (s *DevEndpoint) SetPublicKey(v string) *DevEndpoint { + s.PublicKey = &v + return s +} + +// SetPublicKeys sets the PublicKeys field's value. +func (s *DevEndpoint) SetPublicKeys(v []*string) *DevEndpoint { + s.PublicKeys = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DevEndpoint) SetRoleArn(v string) *DevEndpoint { + s.RoleArn = &v + return s +} + +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *DevEndpoint) SetSecurityConfiguration(v string) *DevEndpoint { + s.SecurityConfiguration = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *DevEndpoint) SetSecurityGroupIds(v []*string) *DevEndpoint { + s.SecurityGroupIds = v + return s +} + +// SetStatus sets the Status field's value. +func (s *DevEndpoint) SetStatus(v string) *DevEndpoint { + s.Status = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *DevEndpoint) SetSubnetId(v string) *DevEndpoint { + s.SubnetId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DevEndpoint) SetVpcId(v string) *DevEndpoint { + s.VpcId = &v + return s +} + +// SetWorkerType sets the WorkerType field's value. +func (s *DevEndpoint) SetWorkerType(v string) *DevEndpoint { + s.WorkerType = &v + return s +} + +// SetYarnEndpointAddress sets the YarnEndpointAddress field's value. +func (s *DevEndpoint) SetYarnEndpointAddress(v string) *DevEndpoint { + s.YarnEndpointAddress = &v + return s +} + +// SetZeppelinRemoteSparkInterpreterPort sets the ZeppelinRemoteSparkInterpreterPort field's value. +func (s *DevEndpoint) SetZeppelinRemoteSparkInterpreterPort(v int64) *DevEndpoint { + s.ZeppelinRemoteSparkInterpreterPort = &v return s } -// SetNotificationProperty sets the NotificationProperty field's value. -func (s *CreateJobInput) SetNotificationProperty(v *NotificationProperty) *CreateJobInput { - s.NotificationProperty = v - return s +// Custom libraries to be loaded into a development endpoint. +type DevEndpointCustomLibraries struct { + _ struct{} `type:"structure"` + + // The path to one or more Java .jar files in an S3 bucket that should be loaded + // in your DevEndpoint. + // + // You can only use pure Java/Scala libraries with a DevEndpoint. + ExtraJarsS3Path *string `type:"string"` + + // The paths to one or more Python libraries in an Amazon Simple Storage Service + // (Amazon S3) bucket that should be loaded in your DevEndpoint. Multiple values + // must be complete paths separated by a comma. + // + // You can only use pure Python libraries with a DevEndpoint. Libraries that + // rely on C extensions, such as the pandas (http://pandas.pydata.org/) Python + // data analysis library, are not currently supported. + ExtraPythonLibsS3Path *string `type:"string"` +} + +// String returns the string representation +func (s DevEndpointCustomLibraries) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DevEndpointCustomLibraries) GoString() string { + return s.String() } -// SetNumberOfWorkers sets the NumberOfWorkers field's value. -func (s *CreateJobInput) SetNumberOfWorkers(v int64) *CreateJobInput { - s.NumberOfWorkers = &v +// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. +func (s *DevEndpointCustomLibraries) SetExtraJarsS3Path(v string) *DevEndpointCustomLibraries { + s.ExtraJarsS3Path = &v return s } -// SetRole sets the Role field's value. -func (s *CreateJobInput) SetRole(v string) *CreateJobInput { - s.Role = &v +// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. +func (s *DevEndpointCustomLibraries) SetExtraPythonLibsS3Path(v string) *DevEndpointCustomLibraries { + s.ExtraPythonLibsS3Path = &v return s } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *CreateJobInput) SetSecurityConfiguration(v string) *CreateJobInput { - s.SecurityConfiguration = &v - return s +// Specifies an Amazon DynamoDB table to crawl. +type DynamoDBTarget struct { + _ struct{} `type:"structure"` + + // The name of the DynamoDB table to crawl. + Path *string `type:"string"` } -// SetTags sets the Tags field's value. -func (s *CreateJobInput) SetTags(v map[string]*string) *CreateJobInput { - s.Tags = v - return s +// String returns the string representation +func (s DynamoDBTarget) String() string { + return awsutil.Prettify(s) } -// SetTimeout sets the Timeout field's value. -func (s *CreateJobInput) SetTimeout(v int64) *CreateJobInput { - s.Timeout = &v - return s +// GoString returns the string representation +func (s DynamoDBTarget) GoString() string { + return s.String() } -// SetWorkerType sets the WorkerType field's value. -func (s *CreateJobInput) SetWorkerType(v string) *CreateJobInput { - s.WorkerType = &v +// SetPath sets the Path field's value. +func (s *DynamoDBTarget) SetPath(v string) *DynamoDBTarget { + s.Path = &v return s } -type CreateJobOutput struct { +// An edge represents a directed connection between two AWS Glue components +// which are part of the workflow the edge belongs to. +type Edge struct { _ struct{} `type:"structure"` - // The unique name that was provided for this job definition. - Name *string `min:"1" type:"string"` + // The unique of the node within the workflow where the edge ends. + DestinationId *string `min:"1" type:"string"` + + // The unique of the node within the workflow where the edge starts. + SourceId *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateJobOutput) String() string { +func (s Edge) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateJobOutput) GoString() string { +func (s Edge) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *CreateJobOutput) SetName(v string) *CreateJobOutput { - s.Name = &v +// SetDestinationId sets the DestinationId field's value. +func (s *Edge) SetDestinationId(v string) *Edge { + s.DestinationId = &v return s } -// Specifies a JSON classifier for CreateClassifier to create. -type CreateJsonClassifierRequest struct { +// SetSourceId sets the SourceId field's value. +func (s *Edge) SetSourceId(v string) *Edge { + s.SourceId = &v + return s +} + +// Specifies the encryption-at-rest configuration for the Data Catalog. +type EncryptionAtRest struct { _ struct{} `type:"structure"` - // A JsonPath string defining the JSON data for the classifier to classify. - // AWS Glue supports a subset of JsonPath, as described in Writing JsonPath - // Custom Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json). + // The encryption-at-rest mode for encrypting Data Catalog data. // - // JsonPath is a required field - JsonPath *string `type:"string" required:"true"` + // CatalogEncryptionMode is a required field + CatalogEncryptionMode *string `type:"string" required:"true" enum:"CatalogEncryptionMode"` - // The name of the classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The ID of the AWS KMS key to use for encryption at rest. + SseAwsKmsKeyId *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateJsonClassifierRequest) String() string { +func (s EncryptionAtRest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateJsonClassifierRequest) GoString() string { +func (s EncryptionAtRest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateJsonClassifierRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateJsonClassifierRequest"} - if s.JsonPath == nil { - invalidParams.Add(request.NewErrParamRequired("JsonPath")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *EncryptionAtRest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionAtRest"} + if s.CatalogEncryptionMode == nil { + invalidParams.Add(request.NewErrParamRequired("CatalogEncryptionMode")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.SseAwsKmsKeyId != nil && len(*s.SseAwsKmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SseAwsKmsKeyId", 1)) } if invalidParams.Len() > 0 { @@ -13444,260 +19614,331 @@ func (s *CreateJsonClassifierRequest) Validate() error { return nil } -// SetJsonPath sets the JsonPath field's value. -func (s *CreateJsonClassifierRequest) SetJsonPath(v string) *CreateJsonClassifierRequest { - s.JsonPath = &v +// SetCatalogEncryptionMode sets the CatalogEncryptionMode field's value. +func (s *EncryptionAtRest) SetCatalogEncryptionMode(v string) *EncryptionAtRest { + s.CatalogEncryptionMode = &v return s } -// SetName sets the Name field's value. -func (s *CreateJsonClassifierRequest) SetName(v string) *CreateJsonClassifierRequest { - s.Name = &v +// SetSseAwsKmsKeyId sets the SseAwsKmsKeyId field's value. +func (s *EncryptionAtRest) SetSseAwsKmsKeyId(v string) *EncryptionAtRest { + s.SseAwsKmsKeyId = &v return s } -type CreatePartitionInput struct { +// Specifies an encryption configuration. +type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // The ID of the catalog in which the partion is to be created. Currently, this - // should be the AWS account ID. - CatalogId *string `min:"1" type:"string"` - - // The name of the metadata database in which the partition is to be created. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // The encryption configuration for Amazon CloudWatch. + CloudWatchEncryption *CloudWatchEncryption `type:"structure"` - // A PartitionInput structure defining the partition to be created. - // - // PartitionInput is a required field - PartitionInput *PartitionInput `type:"structure" required:"true"` + // The encryption configuration for job bookmarks. + JobBookmarksEncryption *JobBookmarksEncryption `type:"structure"` - // The name of the metadata table in which the partition is to be created. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // The encryption configuration for Amazon Simple Storage Service (Amazon S3) + // data. + S3Encryption []*S3Encryption `type:"list"` } // String returns the string representation -func (s CreatePartitionInput) String() string { +func (s EncryptionConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreatePartitionInput) GoString() string { +func (s EncryptionConfiguration) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionInput == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionInput")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.PartitionInput != nil { - if err := s.PartitionInput.Validate(); err != nil { - invalidParams.AddNested("PartitionInput", err.(request.ErrInvalidParams)) - } - } +// SetCloudWatchEncryption sets the CloudWatchEncryption field's value. +func (s *EncryptionConfiguration) SetCloudWatchEncryption(v *CloudWatchEncryption) *EncryptionConfiguration { + s.CloudWatchEncryption = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetJobBookmarksEncryption sets the JobBookmarksEncryption field's value. +func (s *EncryptionConfiguration) SetJobBookmarksEncryption(v *JobBookmarksEncryption) *EncryptionConfiguration { + s.JobBookmarksEncryption = v + return s } -// SetCatalogId sets the CatalogId field's value. -func (s *CreatePartitionInput) SetCatalogId(v string) *CreatePartitionInput { - s.CatalogId = &v +// SetS3Encryption sets the S3Encryption field's value. +func (s *EncryptionConfiguration) SetS3Encryption(v []*S3Encryption) *EncryptionConfiguration { + s.S3Encryption = v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *CreatePartitionInput) SetDatabaseName(v string) *CreatePartitionInput { - s.DatabaseName = &v +// Contains details about an error. +type ErrorDetail struct { + _ struct{} `type:"structure"` + + // The code associated with this error. + ErrorCode *string `min:"1" type:"string"` + + // A message describing the error. + ErrorMessage *string `type:"string"` +} + +// String returns the string representation +func (s ErrorDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDetail) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *ErrorDetail) SetErrorCode(v string) *ErrorDetail { + s.ErrorCode = &v return s } -// SetPartitionInput sets the PartitionInput field's value. -func (s *CreatePartitionInput) SetPartitionInput(v *PartitionInput) *CreatePartitionInput { - s.PartitionInput = v +// SetErrorMessage sets the ErrorMessage field's value. +func (s *ErrorDetail) SetErrorMessage(v string) *ErrorDetail { + s.ErrorMessage = &v return s } -// SetTableName sets the TableName field's value. -func (s *CreatePartitionInput) SetTableName(v string) *CreatePartitionInput { - s.TableName = &v +// Evaluation metrics provide an estimate of the quality of your machine learning +// transform. +type EvaluationMetrics struct { + _ struct{} `type:"structure"` + + // The evaluation metrics for the find matches algorithm. + FindMatchesMetrics *FindMatchesMetrics `type:"structure"` + + // The type of machine learning transform. + // + // TransformType is a required field + TransformType *string `type:"string" required:"true" enum:"TransformType"` +} + +// String returns the string representation +func (s EvaluationMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationMetrics) GoString() string { + return s.String() +} + +// SetFindMatchesMetrics sets the FindMatchesMetrics field's value. +func (s *EvaluationMetrics) SetFindMatchesMetrics(v *FindMatchesMetrics) *EvaluationMetrics { + s.FindMatchesMetrics = v + return s +} + +// SetTransformType sets the TransformType field's value. +func (s *EvaluationMetrics) SetTransformType(v string) *EvaluationMetrics { + s.TransformType = &v + return s +} + +// An execution property of a job. +type ExecutionProperty struct { + _ struct{} `type:"structure"` + + // The maximum number of concurrent runs allowed for the job. The default is + // 1. An error is returned when this threshold is reached. The maximum value + // you can specify is controlled by a service limit. + MaxConcurrentRuns *int64 `type:"integer"` +} + +// String returns the string representation +func (s ExecutionProperty) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutionProperty) GoString() string { + return s.String() +} + +// SetMaxConcurrentRuns sets the MaxConcurrentRuns field's value. +func (s *ExecutionProperty) SetMaxConcurrentRuns(v int64) *ExecutionProperty { + s.MaxConcurrentRuns = &v return s } -type CreatePartitionOutput struct { +// Specifies configuration properties for an exporting labels task run. +type ExportLabelsTaskRunProperties struct { _ struct{} `type:"structure"` + + // The Amazon Simple Storage Service (Amazon S3) path where you will export + // the labels. + OutputS3Path *string `type:"string"` } // String returns the string representation -func (s CreatePartitionOutput) String() string { +func (s ExportLabelsTaskRunProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreatePartitionOutput) GoString() string { +func (s ExportLabelsTaskRunProperties) GoString() string { return s.String() } -type CreateScriptInput struct { +// SetOutputS3Path sets the OutputS3Path field's value. +func (s *ExportLabelsTaskRunProperties) SetOutputS3Path(v string) *ExportLabelsTaskRunProperties { + s.OutputS3Path = &v + return s +} + +// The evaluation metrics for the find matches algorithm. The quality of your +// machine learning transform is measured by getting your transform to predict +// some matches and comparing the results to known matches from the same dataset. +// The quality metrics are based on a subset of your data, so they are not precise. +type FindMatchesMetrics struct { _ struct{} `type:"structure"` - // A list of the edges in the DAG. - DagEdges []*CodeGenEdge `type:"list"` + // The area under the precision/recall curve (AUPRC) is a single number measuring + // the overall quality of the transform, that is independent of the choice made + // for precision vs. recall. Higher values indicate that you have a more attractive + // precision vs. recall tradeoff. + // + // For more information, see Precision and recall (https://en.wikipedia.org/wiki/Precision_and_recall) + // in Wikipedia. + AreaUnderPRCurve *float64 `type:"double"` - // A list of the nodes in the DAG. - DagNodes []*CodeGenNode `type:"list"` + // The confusion matrix shows you what your transform is predicting accurately + // and what types of errors it is making. + // + // For more information, see Confusion matrix (https://en.wikipedia.org/wiki/Confusion_matrix) + // in Wikipedia. + ConfusionMatrix *ConfusionMatrix `type:"structure"` - // The programming language of the resulting code from the DAG. - Language *string `type:"string" enum:"Language"` + // The maximum F1 metric indicates the transform's accuracy between 0 and 1, + // where 1 is the best accuracy. + // + // For more information, see F1 score (https://en.wikipedia.org/wiki/F1_score) + // in Wikipedia. + F1 *float64 `type:"double"` + + // The precision metric indicates when often your transform is correct when + // it predicts a match. Specifically, it measures how well the transform finds + // true positives from the total true positives possible. + // + // For more information, see Precision and recall (https://en.wikipedia.org/wiki/Precision_and_recall) + // in Wikipedia. + Precision *float64 `type:"double"` + + // The recall metric indicates that for an actual match, how often your transform + // predicts the match. Specifically, it measures how well the transform finds + // true positives from the total records in the source data. + // + // For more information, see Precision and recall (https://en.wikipedia.org/wiki/Precision_and_recall) + // in Wikipedia. + Recall *float64 `type:"double"` } // String returns the string representation -func (s CreateScriptInput) String() string { +func (s FindMatchesMetrics) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateScriptInput) GoString() string { +func (s FindMatchesMetrics) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateScriptInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateScriptInput"} - if s.DagEdges != nil { - for i, v := range s.DagEdges { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DagEdges", i), err.(request.ErrInvalidParams)) - } - } - } - if s.DagNodes != nil { - for i, v := range s.DagNodes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DagNodes", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDagEdges sets the DagEdges field's value. -func (s *CreateScriptInput) SetDagEdges(v []*CodeGenEdge) *CreateScriptInput { - s.DagEdges = v +// SetAreaUnderPRCurve sets the AreaUnderPRCurve field's value. +func (s *FindMatchesMetrics) SetAreaUnderPRCurve(v float64) *FindMatchesMetrics { + s.AreaUnderPRCurve = &v return s } -// SetDagNodes sets the DagNodes field's value. -func (s *CreateScriptInput) SetDagNodes(v []*CodeGenNode) *CreateScriptInput { - s.DagNodes = v +// SetConfusionMatrix sets the ConfusionMatrix field's value. +func (s *FindMatchesMetrics) SetConfusionMatrix(v *ConfusionMatrix) *FindMatchesMetrics { + s.ConfusionMatrix = v return s } -// SetLanguage sets the Language field's value. -func (s *CreateScriptInput) SetLanguage(v string) *CreateScriptInput { - s.Language = &v +// SetF1 sets the F1 field's value. +func (s *FindMatchesMetrics) SetF1(v float64) *FindMatchesMetrics { + s.F1 = &v return s } -type CreateScriptOutput struct { - _ struct{} `type:"structure"` - - // The Python script generated from the DAG. - PythonScript *string `type:"string"` - - // The Scala code generated from the DAG. - ScalaCode *string `type:"string"` -} - -// String returns the string representation -func (s CreateScriptOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateScriptOutput) GoString() string { - return s.String() -} - -// SetPythonScript sets the PythonScript field's value. -func (s *CreateScriptOutput) SetPythonScript(v string) *CreateScriptOutput { - s.PythonScript = &v +// SetPrecision sets the Precision field's value. +func (s *FindMatchesMetrics) SetPrecision(v float64) *FindMatchesMetrics { + s.Precision = &v return s } -// SetScalaCode sets the ScalaCode field's value. -func (s *CreateScriptOutput) SetScalaCode(v string) *CreateScriptOutput { - s.ScalaCode = &v +// SetRecall sets the Recall field's value. +func (s *FindMatchesMetrics) SetRecall(v float64) *FindMatchesMetrics { + s.Recall = &v return s } -type CreateSecurityConfigurationInput struct { +// The parameters to configure the find matches transform. +type FindMatchesParameters struct { _ struct{} `type:"structure"` - // The encryption configuration for the new security configuration. + // The value that is selected when tuning your transform for a balance between + // accuracy and cost. A value of 0.5 means that the system balances accuracy + // and cost concerns. A value of 1.0 means a bias purely for accuracy, which + // typically results in a higher cost, sometimes substantially higher. A value + // of 0.0 means a bias purely for cost, which results in a less accurate FindMatches + // transform, sometimes with unacceptable accuracy. // - // EncryptionConfiguration is a required field - EncryptionConfiguration *EncryptionConfiguration `type:"structure" required:"true"` + // Accuracy measures how well the transform finds true positives and true negatives. + // Increasing accuracy requires more machine resources and cost. But it also + // results in increased recall. + // + // Cost measures how many compute resources, and thus money, are consumed to + // run the transform. + AccuracyCostTradeoff *float64 `type:"double"` - // The name for the new security configuration. + // The value to switch on or off to force the output to match the provided labels + // from users. If the value is True, the find matches transform forces the output + // to match the provided labels. The results override the normal conflation + // results. If the value is False, the find matches transform does not ensure + // all the labels provided are respected, and the results rely on the trained + // model. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // Note that setting this value to true may increase the conflation execution + // time. + EnforceProvidedLabels *bool `type:"boolean"` + + // The value selected when tuning your transform for a balance between precision + // and recall. A value of 0.5 means no preference; a value of 1.0 means a bias + // purely for precision, and a value of 0.0 means a bias for recall. Because + // this is a tradeoff, choosing values close to 1.0 means very low recall, and + // choosing values close to 0.0 results in very low precision. + // + // The precision metric indicates how often your model is correct when it predicts + // a match. + // + // The recall metric indicates that for an actual match, how often your model + // predicts the match. + PrecisionRecallTradeoff *float64 `type:"double"` + + // The name of a column that uniquely identifies rows in the source table. Used + // to help identify matching records. + PrimaryKeyColumnName *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateSecurityConfigurationInput) String() string { +func (s FindMatchesParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateSecurityConfigurationInput) GoString() string { +func (s FindMatchesParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSecurityConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSecurityConfigurationInput"} - if s.EncryptionConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("EncryptionConfiguration")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *FindMatchesParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FindMatchesParameters"} + if s.PrimaryKeyColumnName != nil && len(*s.PrimaryKeyColumnName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PrimaryKeyColumnName", 1)) } if invalidParams.Len() > 0 { @@ -13706,99 +19947,96 @@ func (s *CreateSecurityConfigurationInput) Validate() error { return nil } -// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. -func (s *CreateSecurityConfigurationInput) SetEncryptionConfiguration(v *EncryptionConfiguration) *CreateSecurityConfigurationInput { - s.EncryptionConfiguration = v +// SetAccuracyCostTradeoff sets the AccuracyCostTradeoff field's value. +func (s *FindMatchesParameters) SetAccuracyCostTradeoff(v float64) *FindMatchesParameters { + s.AccuracyCostTradeoff = &v return s } -// SetName sets the Name field's value. -func (s *CreateSecurityConfigurationInput) SetName(v string) *CreateSecurityConfigurationInput { - s.Name = &v +// SetEnforceProvidedLabels sets the EnforceProvidedLabels field's value. +func (s *FindMatchesParameters) SetEnforceProvidedLabels(v bool) *FindMatchesParameters { + s.EnforceProvidedLabels = &v return s } -type CreateSecurityConfigurationOutput struct { +// SetPrecisionRecallTradeoff sets the PrecisionRecallTradeoff field's value. +func (s *FindMatchesParameters) SetPrecisionRecallTradeoff(v float64) *FindMatchesParameters { + s.PrecisionRecallTradeoff = &v + return s +} + +// SetPrimaryKeyColumnName sets the PrimaryKeyColumnName field's value. +func (s *FindMatchesParameters) SetPrimaryKeyColumnName(v string) *FindMatchesParameters { + s.PrimaryKeyColumnName = &v + return s +} + +// Specifies configuration properties for a Find Matches task run. +type FindMatchesTaskRunProperties struct { _ struct{} `type:"structure"` - // The time at which the new security configuration was created. - CreatedTimestamp *time.Time `type:"timestamp"` + // The job ID for the Find Matches task run. + JobId *string `min:"1" type:"string"` - // The name assigned to the new security configuration. - Name *string `min:"1" type:"string"` + // The name assigned to the job for the Find Matches task run. + JobName *string `min:"1" type:"string"` + + // The job run ID for the Find Matches task run. + JobRunId *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateSecurityConfigurationOutput) String() string { +func (s FindMatchesTaskRunProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateSecurityConfigurationOutput) GoString() string { +func (s FindMatchesTaskRunProperties) GoString() string { return s.String() } -// SetCreatedTimestamp sets the CreatedTimestamp field's value. -func (s *CreateSecurityConfigurationOutput) SetCreatedTimestamp(v time.Time) *CreateSecurityConfigurationOutput { - s.CreatedTimestamp = &v +// SetJobId sets the JobId field's value. +func (s *FindMatchesTaskRunProperties) SetJobId(v string) *FindMatchesTaskRunProperties { + s.JobId = &v return s } -// SetName sets the Name field's value. -func (s *CreateSecurityConfigurationOutput) SetName(v string) *CreateSecurityConfigurationOutput { - s.Name = &v +// SetJobName sets the JobName field's value. +func (s *FindMatchesTaskRunProperties) SetJobName(v string) *FindMatchesTaskRunProperties { + s.JobName = &v return s } -type CreateTableInput struct { +// SetJobRunId sets the JobRunId field's value. +func (s *FindMatchesTaskRunProperties) SetJobRunId(v string) *FindMatchesTaskRunProperties { + s.JobRunId = &v + return s +} + +type GetCatalogImportStatusInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog in which to create the Table. If none is supplied, - // the AWS account ID is used by default. + // The ID of the catalog to migrate. Currently, this should be the AWS account + // ID. CatalogId *string `min:"1" type:"string"` - - // The catalog database in which to create the new table. For Hive compatibility, - // this name is entirely lowercase. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The TableInput object that defines the metadata table to create in the catalog. - // - // TableInput is a required field - TableInput *TableInput `type:"structure" required:"true"` } // String returns the string representation -func (s CreateTableInput) String() string { +func (s GetCatalogImportStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTableInput) GoString() string { +func (s GetCatalogImportStatusInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"} +func (s *GetCatalogImportStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCatalogImportStatusInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TableInput == nil { - invalidParams.Add(request.NewErrParamRequired("TableInput")) - } - if s.TableInput != nil { - if err := s.TableInput.Validate(); err != nil { - invalidParams.AddNested("TableInput", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -13807,122 +20045,62 @@ func (s *CreateTableInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *CreateTableInput) SetCatalogId(v string) *CreateTableInput { +func (s *GetCatalogImportStatusInput) SetCatalogId(v string) *GetCatalogImportStatusInput { s.CatalogId = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *CreateTableInput) SetDatabaseName(v string) *CreateTableInput { - s.DatabaseName = &v - return s -} - -// SetTableInput sets the TableInput field's value. -func (s *CreateTableInput) SetTableInput(v *TableInput) *CreateTableInput { - s.TableInput = v - return s -} - -type CreateTableOutput struct { +type GetCatalogImportStatusOutput struct { _ struct{} `type:"structure"` + + // The status of the specified catalog migration. + ImportStatus *CatalogImportStatus `type:"structure"` } // String returns the string representation -func (s CreateTableOutput) String() string { +func (s GetCatalogImportStatusOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTableOutput) GoString() string { +func (s GetCatalogImportStatusOutput) GoString() string { return s.String() } -type CreateTriggerInput struct { - _ struct{} `type:"structure"` - - // The actions initiated by this trigger when it fires. - // - // Actions is a required field - Actions []*Action `type:"list" required:"true"` +// SetImportStatus sets the ImportStatus field's value. +func (s *GetCatalogImportStatusOutput) SetImportStatus(v *CatalogImportStatus) *GetCatalogImportStatusOutput { + s.ImportStatus = v + return s +} - // A description of the new trigger. - Description *string `type:"string"` +type GetClassifierInput struct { + _ struct{} `type:"structure"` - // The name of the trigger. + // Name of the classifier to retrieve. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - - // A predicate to specify when the new trigger should fire. - // - // This field is required when the trigger type is CONDITIONAL. - Predicate *Predicate `type:"structure"` - - // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - // - // This field is required when the trigger type is SCHEDULED. - Schedule *string `type:"string"` - - // Set to true to start SCHEDULED and CONDITIONAL triggers when created. True - // not supported for ON_DEMAND triggers. - StartOnCreation *bool `type:"boolean"` - - // The tags to use with this trigger. You may use tags to limit access to the - // trigger. For more information about tags in AWS Glue, see AWS Tags in AWS - // Glue (http://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) in the - // developer guide. - Tags map[string]*string `type:"map"` - - // The type of the new trigger. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"TriggerType"` } // String returns the string representation -func (s CreateTriggerInput) String() string { +func (s GetClassifierInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTriggerInput) GoString() string { +func (s GetClassifierInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTriggerInput"} - if s.Actions == nil { - invalidParams.Add(request.NewErrParamRequired("Actions")) - } +func (s *GetClassifierInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetClassifierInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.Actions != nil { - for i, v := range s.Actions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Predicate != nil { - if err := s.Predicate.Validate(); err != nil { - invalidParams.AddNested("Predicate", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -13930,124 +20108,153 @@ func (s *CreateTriggerInput) Validate() error { return nil } -// SetActions sets the Actions field's value. -func (s *CreateTriggerInput) SetActions(v []*Action) *CreateTriggerInput { - s.Actions = v +// SetName sets the Name field's value. +func (s *GetClassifierInput) SetName(v string) *GetClassifierInput { + s.Name = &v return s } -// SetDescription sets the Description field's value. -func (s *CreateTriggerInput) SetDescription(v string) *CreateTriggerInput { - s.Description = &v - return s +type GetClassifierOutput struct { + _ struct{} `type:"structure"` + + // The requested classifier. + Classifier *Classifier `type:"structure"` } -// SetName sets the Name field's value. -func (s *CreateTriggerInput) SetName(v string) *CreateTriggerInput { - s.Name = &v - return s +// String returns the string representation +func (s GetClassifierOutput) String() string { + return awsutil.Prettify(s) } -// SetPredicate sets the Predicate field's value. -func (s *CreateTriggerInput) SetPredicate(v *Predicate) *CreateTriggerInput { - s.Predicate = v - return s +// GoString returns the string representation +func (s GetClassifierOutput) GoString() string { + return s.String() } -// SetSchedule sets the Schedule field's value. -func (s *CreateTriggerInput) SetSchedule(v string) *CreateTriggerInput { - s.Schedule = &v +// SetClassifier sets the Classifier field's value. +func (s *GetClassifierOutput) SetClassifier(v *Classifier) *GetClassifierOutput { + s.Classifier = v return s } -// SetStartOnCreation sets the StartOnCreation field's value. -func (s *CreateTriggerInput) SetStartOnCreation(v bool) *CreateTriggerInput { - s.StartOnCreation = &v - return s +type GetClassifiersInput struct { + _ struct{} `type:"structure"` + + // The size of the list to return (optional). + MaxResults *int64 `min:"1" type:"integer"` + + // An optional continuation token. + NextToken *string `type:"string"` } -// SetTags sets the Tags field's value. -func (s *CreateTriggerInput) SetTags(v map[string]*string) *CreateTriggerInput { - s.Tags = v +// String returns the string representation +func (s GetClassifiersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetClassifiersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetClassifiersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetClassifiersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetClassifiersInput) SetMaxResults(v int64) *GetClassifiersInput { + s.MaxResults = &v return s } -// SetType sets the Type field's value. -func (s *CreateTriggerInput) SetType(v string) *CreateTriggerInput { - s.Type = &v +// SetNextToken sets the NextToken field's value. +func (s *GetClassifiersInput) SetNextToken(v string) *GetClassifiersInput { + s.NextToken = &v return s } -type CreateTriggerOutput struct { +type GetClassifiersOutput struct { _ struct{} `type:"structure"` - // The name of the trigger. - Name *string `min:"1" type:"string"` + // The requested list of classifier objects. + Classifiers []*Classifier `type:"list"` + + // A continuation token. + NextToken *string `type:"string"` } // String returns the string representation -func (s CreateTriggerOutput) String() string { +func (s GetClassifiersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTriggerOutput) GoString() string { +func (s GetClassifiersOutput) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *CreateTriggerOutput) SetName(v string) *CreateTriggerOutput { - s.Name = &v +// SetClassifiers sets the Classifiers field's value. +func (s *GetClassifiersOutput) SetClassifiers(v []*Classifier) *GetClassifiersOutput { + s.Classifiers = v return s } -type CreateUserDefinedFunctionInput struct { +// SetNextToken sets the NextToken field's value. +func (s *GetClassifiersOutput) SetNextToken(v string) *GetClassifiersOutput { + s.NextToken = &v + return s +} + +type GetConnectionInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog in which to create the function. If none is supplied, + // The ID of the Data Catalog in which the connection resides. If none is provided, // the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` - // The name of the catalog database in which to create the function. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // Allows you to retrieve the connection metadata without returning the password. + // For instance, the AWS Glue console uses this flag to retrieve the connection, + // and does not display the password. Set this parameter when the caller might + // not have permission to use the AWS KMS key to decrypt the password, but it + // does have permission to access the rest of the connection properties. + HidePassword *bool `type:"boolean"` - // A FunctionInput object that defines the function to create in the Data Catalog. + // The name of the connection definition to retrieve. // - // FunctionInput is a required field - FunctionInput *UserDefinedFunctionInput `type:"structure" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateUserDefinedFunctionInput) String() string { +func (s GetConnectionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateUserDefinedFunctionInput) GoString() string { +func (s GetConnectionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateUserDefinedFunctionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateUserDefinedFunctionInput"} +func (s *GetConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetConnectionInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.FunctionInput == nil { - invalidParams.Add(request.NewErrParamRequired("FunctionInput")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.FunctionInput != nil { - if err := s.FunctionInput.Validate(); err != nil { - invalidParams.AddNested("FunctionInput", err.(request.ErrInvalidParams)) - } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -14057,251 +20264,222 @@ func (s *CreateUserDefinedFunctionInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *CreateUserDefinedFunctionInput) SetCatalogId(v string) *CreateUserDefinedFunctionInput { +func (s *GetConnectionInput) SetCatalogId(v string) *GetConnectionInput { s.CatalogId = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *CreateUserDefinedFunctionInput) SetDatabaseName(v string) *CreateUserDefinedFunctionInput { - s.DatabaseName = &v +// SetHidePassword sets the HidePassword field's value. +func (s *GetConnectionInput) SetHidePassword(v bool) *GetConnectionInput { + s.HidePassword = &v return s } -// SetFunctionInput sets the FunctionInput field's value. -func (s *CreateUserDefinedFunctionInput) SetFunctionInput(v *UserDefinedFunctionInput) *CreateUserDefinedFunctionInput { - s.FunctionInput = v +// SetName sets the Name field's value. +func (s *GetConnectionInput) SetName(v string) *GetConnectionInput { + s.Name = &v return s } -type CreateUserDefinedFunctionOutput struct { +type GetConnectionOutput struct { _ struct{} `type:"structure"` + + // The requested connection definition. + Connection *Connection `type:"structure"` } // String returns the string representation -func (s CreateUserDefinedFunctionOutput) String() string { +func (s GetConnectionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateUserDefinedFunctionOutput) GoString() string { +func (s GetConnectionOutput) GoString() string { return s.String() } -// Specifies an XML classifier for CreateClassifier to create. -type CreateXMLClassifierRequest struct { - _ struct{} `type:"structure"` +// SetConnection sets the Connection field's value. +func (s *GetConnectionOutput) SetConnection(v *Connection) *GetConnectionOutput { + s.Connection = v + return s +} - // An identifier of the data format that the classifier matches. - // - // Classification is a required field - Classification *string `type:"string" required:"true"` +// Filters the connection definitions that are returned by the GetConnections +// API operation. +type GetConnectionsFilter struct { + _ struct{} `type:"structure"` - // The name of the classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The type of connections to return. Currently, only JDBC is supported; SFTP + // is not supported. + ConnectionType *string `type:"string" enum:"ConnectionType"` - // The XML tag designating the element that contains each record in an XML document - // being parsed. Note that this cannot identify a self-closing element (closed - // by />). An empty row element that contains only attributes can be parsed - // as long as it ends with a closing tag (for example, - // is okay, but is not). - RowTag *string `type:"string"` + // A criteria string that must match the criteria recorded in the connection + // definition for that connection definition to be returned. + MatchCriteria []*string `type:"list"` } // String returns the string representation -func (s CreateXMLClassifierRequest) String() string { +func (s GetConnectionsFilter) String() string { return awsutil.Prettify(s) } - -// GoString returns the string representation -func (s CreateXMLClassifierRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateXMLClassifierRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateXMLClassifierRequest"} - if s.Classification == nil { - invalidParams.Add(request.NewErrParamRequired("Classification")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClassification sets the Classification field's value. -func (s *CreateXMLClassifierRequest) SetClassification(v string) *CreateXMLClassifierRequest { - s.Classification = &v - return s + +// GoString returns the string representation +func (s GetConnectionsFilter) GoString() string { + return s.String() } -// SetName sets the Name field's value. -func (s *CreateXMLClassifierRequest) SetName(v string) *CreateXMLClassifierRequest { - s.Name = &v +// SetConnectionType sets the ConnectionType field's value. +func (s *GetConnectionsFilter) SetConnectionType(v string) *GetConnectionsFilter { + s.ConnectionType = &v return s } -// SetRowTag sets the RowTag field's value. -func (s *CreateXMLClassifierRequest) SetRowTag(v string) *CreateXMLClassifierRequest { - s.RowTag = &v +// SetMatchCriteria sets the MatchCriteria field's value. +func (s *GetConnectionsFilter) SetMatchCriteria(v []*string) *GetConnectionsFilter { + s.MatchCriteria = v return s } -// A classifier for custom CSV content. -type CsvClassifier struct { +type GetConnectionsInput struct { _ struct{} `type:"structure"` - // Enables the processing of files that contain only one column. - AllowSingleColumn *bool `type:"boolean"` - - // Indicates whether the CSV file contains a header. - ContainsHeader *string `type:"string" enum:"CsvHeaderOption"` - - // The time this classifier was registered. - CreationTime *time.Time `type:"timestamp"` - - // A custom symbol to denote what separates each column entry in the row. - Delimiter *string `min:"1" type:"string"` - - // Specifies not to trim values before identifying the type of column values. - // The default value is true. - DisableValueTrimming *bool `type:"boolean"` - - // A list of strings representing column names. - Header []*string `type:"list"` + // The ID of the Data Catalog in which the connections reside. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` - // The time this classifier was last updated. - LastUpdated *time.Time `type:"timestamp"` + // A filter that controls which connections are returned. + Filter *GetConnectionsFilter `type:"structure"` - // The name of the classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // Allows you to retrieve the connection metadata without returning the password. + // For instance, the AWS Glue console uses this flag to retrieve the connection, + // and does not display the password. Set this parameter when the caller might + // not have permission to use the AWS KMS key to decrypt the password, but it + // does have permission to access the rest of the connection properties. + HidePassword *bool `type:"boolean"` - // A custom symbol to denote what combines content into a single column value. - // Must be different from the column delimiter. - QuoteSymbol *string `min:"1" type:"string"` + // The maximum number of connections to return in one response. + MaxResults *int64 `min:"1" type:"integer"` - // The version of this classifier. - Version *int64 `type:"long"` + // A continuation token, if this is a continuation call. + NextToken *string `type:"string"` } // String returns the string representation -func (s CsvClassifier) String() string { +func (s GetConnectionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CsvClassifier) GoString() string { +func (s GetConnectionsInput) GoString() string { return s.String() } -// SetAllowSingleColumn sets the AllowSingleColumn field's value. -func (s *CsvClassifier) SetAllowSingleColumn(v bool) *CsvClassifier { - s.AllowSingleColumn = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetConnectionsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetContainsHeader sets the ContainsHeader field's value. -func (s *CsvClassifier) SetContainsHeader(v string) *CsvClassifier { - s.ContainsHeader = &v +// SetCatalogId sets the CatalogId field's value. +func (s *GetConnectionsInput) SetCatalogId(v string) *GetConnectionsInput { + s.CatalogId = &v return s } -// SetCreationTime sets the CreationTime field's value. -func (s *CsvClassifier) SetCreationTime(v time.Time) *CsvClassifier { - s.CreationTime = &v +// SetFilter sets the Filter field's value. +func (s *GetConnectionsInput) SetFilter(v *GetConnectionsFilter) *GetConnectionsInput { + s.Filter = v return s } -// SetDelimiter sets the Delimiter field's value. -func (s *CsvClassifier) SetDelimiter(v string) *CsvClassifier { - s.Delimiter = &v +// SetHidePassword sets the HidePassword field's value. +func (s *GetConnectionsInput) SetHidePassword(v bool) *GetConnectionsInput { + s.HidePassword = &v return s } -// SetDisableValueTrimming sets the DisableValueTrimming field's value. -func (s *CsvClassifier) SetDisableValueTrimming(v bool) *CsvClassifier { - s.DisableValueTrimming = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetConnectionsInput) SetMaxResults(v int64) *GetConnectionsInput { + s.MaxResults = &v return s } -// SetHeader sets the Header field's value. -func (s *CsvClassifier) SetHeader(v []*string) *CsvClassifier { - s.Header = v +// SetNextToken sets the NextToken field's value. +func (s *GetConnectionsInput) SetNextToken(v string) *GetConnectionsInput { + s.NextToken = &v return s } -// SetLastUpdated sets the LastUpdated field's value. -func (s *CsvClassifier) SetLastUpdated(v time.Time) *CsvClassifier { - s.LastUpdated = &v - return s +type GetConnectionsOutput struct { + _ struct{} `type:"structure"` + + // A list of requested connection definitions. + ConnectionList []*Connection `type:"list"` + + // A continuation token, if the list of connections returned does not include + // the last of the filtered connections. + NextToken *string `type:"string"` } -// SetName sets the Name field's value. -func (s *CsvClassifier) SetName(v string) *CsvClassifier { - s.Name = &v - return s +// String returns the string representation +func (s GetConnectionsOutput) String() string { + return awsutil.Prettify(s) } -// SetQuoteSymbol sets the QuoteSymbol field's value. -func (s *CsvClassifier) SetQuoteSymbol(v string) *CsvClassifier { - s.QuoteSymbol = &v +// GoString returns the string representation +func (s GetConnectionsOutput) GoString() string { + return s.String() +} + +// SetConnectionList sets the ConnectionList field's value. +func (s *GetConnectionsOutput) SetConnectionList(v []*Connection) *GetConnectionsOutput { + s.ConnectionList = v return s } -// SetVersion sets the Version field's value. -func (s *CsvClassifier) SetVersion(v int64) *CsvClassifier { - s.Version = &v +// SetNextToken sets the NextToken field's value. +func (s *GetConnectionsOutput) SetNextToken(v string) *GetConnectionsOutput { + s.NextToken = &v return s } -// Contains configuration information for maintaining Data Catalog security. -type DataCatalogEncryptionSettings struct { +type GetCrawlerInput struct { _ struct{} `type:"structure"` - // When connection password protection is enabled, the Data Catalog uses a customer-provided - // key to encrypt the password as part of CreateConnection or UpdateConnection - // and store it in the ENCRYPTED_PASSWORD field in the connection properties. - // You can enable catalog encryption or only password encryption. - ConnectionPasswordEncryption *ConnectionPasswordEncryption `type:"structure"` - - // Specifies the encryption-at-rest configuration for the Data Catalog. - EncryptionAtRest *EncryptionAtRest `type:"structure"` + // The name of the crawler to retrieve metadata for. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DataCatalogEncryptionSettings) String() string { +func (s GetCrawlerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DataCatalogEncryptionSettings) GoString() string { +func (s GetCrawlerInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DataCatalogEncryptionSettings) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DataCatalogEncryptionSettings"} - if s.ConnectionPasswordEncryption != nil { - if err := s.ConnectionPasswordEncryption.Validate(); err != nil { - invalidParams.AddNested("ConnectionPasswordEncryption", err.(request.ErrInvalidParams)) - } +func (s *GetCrawlerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCrawlerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.EncryptionAtRest != nil { - if err := s.EncryptionAtRest.Validate(); err != nil { - invalidParams.AddNested("EncryptionAtRest", err.(request.ErrInvalidParams)) - } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -14310,182 +20488,147 @@ func (s *DataCatalogEncryptionSettings) Validate() error { return nil } -// SetConnectionPasswordEncryption sets the ConnectionPasswordEncryption field's value. -func (s *DataCatalogEncryptionSettings) SetConnectionPasswordEncryption(v *ConnectionPasswordEncryption) *DataCatalogEncryptionSettings { - s.ConnectionPasswordEncryption = v - return s -} - -// SetEncryptionAtRest sets the EncryptionAtRest field's value. -func (s *DataCatalogEncryptionSettings) SetEncryptionAtRest(v *EncryptionAtRest) *DataCatalogEncryptionSettings { - s.EncryptionAtRest = v +// SetName sets the Name field's value. +func (s *GetCrawlerInput) SetName(v string) *GetCrawlerInput { + s.Name = &v return s } -// The Database object represents a logical grouping of tables that may reside -// in a Hive metastore or an RDBMS. -type Database struct { +type GetCrawlerMetricsInput struct { _ struct{} `type:"structure"` - // The time at which the metadata database was created in the catalog. - CreateTime *time.Time `type:"timestamp"` - - // Description of the database. - Description *string `type:"string"` - - // The location of the database (for example, an HDFS path). - LocationUri *string `min:"1" type:"string"` + // A list of the names of crawlers about which to retrieve metrics. + CrawlerNameList []*string `type:"list"` - // Name of the database. For Hive compatibility, this is folded to lowercase - // when it is stored. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The maximum size of a list to return. + MaxResults *int64 `min:"1" type:"integer"` - // These key-value pairs define parameters and properties of the database. - Parameters map[string]*string `type:"map"` + // A continuation token, if this is a continuation call. + NextToken *string `type:"string"` } // String returns the string representation -func (s Database) String() string { +func (s GetCrawlerMetricsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Database) GoString() string { +func (s GetCrawlerMetricsInput) GoString() string { return s.String() } -// SetCreateTime sets the CreateTime field's value. -func (s *Database) SetCreateTime(v time.Time) *Database { - s.CreateTime = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCrawlerMetricsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCrawlerMetricsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } -// SetDescription sets the Description field's value. -func (s *Database) SetDescription(v string) *Database { - s.Description = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLocationUri sets the LocationUri field's value. -func (s *Database) SetLocationUri(v string) *Database { - s.LocationUri = &v +// SetCrawlerNameList sets the CrawlerNameList field's value. +func (s *GetCrawlerMetricsInput) SetCrawlerNameList(v []*string) *GetCrawlerMetricsInput { + s.CrawlerNameList = v return s } -// SetName sets the Name field's value. -func (s *Database) SetName(v string) *Database { - s.Name = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetCrawlerMetricsInput) SetMaxResults(v int64) *GetCrawlerMetricsInput { + s.MaxResults = &v return s } -// SetParameters sets the Parameters field's value. -func (s *Database) SetParameters(v map[string]*string) *Database { - s.Parameters = v +// SetNextToken sets the NextToken field's value. +func (s *GetCrawlerMetricsInput) SetNextToken(v string) *GetCrawlerMetricsInput { + s.NextToken = &v return s } -// The structure used to create or update a database. -type DatabaseInput struct { +type GetCrawlerMetricsOutput struct { _ struct{} `type:"structure"` - // Description of the database - Description *string `type:"string"` - - // The location of the database (for example, an HDFS path). - LocationUri *string `min:"1" type:"string"` - - // Name of the database. For Hive compatibility, this is folded to lowercase - // when it is stored. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // A list of metrics for the specified crawler. + CrawlerMetricsList []*CrawlerMetrics `type:"list"` - // Thes key-value pairs define parameters and properties of the database. - Parameters map[string]*string `type:"map"` + // A continuation token, if the returned list does not contain the last metric + // available. + NextToken *string `type:"string"` } // String returns the string representation -func (s DatabaseInput) String() string { +func (s GetCrawlerMetricsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DatabaseInput) GoString() string { +func (s GetCrawlerMetricsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DatabaseInput"} - if s.LocationUri != nil && len(*s.LocationUri) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LocationUri", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCrawlerMetricsList sets the CrawlerMetricsList field's value. +func (s *GetCrawlerMetricsOutput) SetCrawlerMetricsList(v []*CrawlerMetrics) *GetCrawlerMetricsOutput { + s.CrawlerMetricsList = v + return s } -// SetDescription sets the Description field's value. -func (s *DatabaseInput) SetDescription(v string) *DatabaseInput { - s.Description = &v +// SetNextToken sets the NextToken field's value. +func (s *GetCrawlerMetricsOutput) SetNextToken(v string) *GetCrawlerMetricsOutput { + s.NextToken = &v return s } -// SetLocationUri sets the LocationUri field's value. -func (s *DatabaseInput) SetLocationUri(v string) *DatabaseInput { - s.LocationUri = &v - return s +type GetCrawlerOutput struct { + _ struct{} `type:"structure"` + + // The metadata for the specified crawler. + Crawler *Crawler `type:"structure"` } -// SetName sets the Name field's value. -func (s *DatabaseInput) SetName(v string) *DatabaseInput { - s.Name = &v - return s +// String returns the string representation +func (s GetCrawlerOutput) String() string { + return awsutil.Prettify(s) } -// SetParameters sets the Parameters field's value. -func (s *DatabaseInput) SetParameters(v map[string]*string) *DatabaseInput { - s.Parameters = v +// GoString returns the string representation +func (s GetCrawlerOutput) GoString() string { + return s.String() +} + +// SetCrawler sets the Crawler field's value. +func (s *GetCrawlerOutput) SetCrawler(v *Crawler) *GetCrawlerOutput { + s.Crawler = v return s } -type DeleteClassifierInput struct { +type GetCrawlersInput struct { _ struct{} `type:"structure"` - // Name of the classifier to remove. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The number of crawlers to return on each call. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is a continuation request. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteClassifierInput) String() string { +func (s GetCrawlersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteClassifierInput) GoString() string { +func (s GetCrawlersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteClassifierInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteClassifierInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *GetCrawlersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCrawlersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -14494,61 +20637,75 @@ func (s *DeleteClassifierInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *DeleteClassifierInput) SetName(v string) *DeleteClassifierInput { - s.Name = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetCrawlersInput) SetMaxResults(v int64) *GetCrawlersInput { + s.MaxResults = &v return s } -type DeleteClassifierOutput struct { +// SetNextToken sets the NextToken field's value. +func (s *GetCrawlersInput) SetNextToken(v string) *GetCrawlersInput { + s.NextToken = &v + return s +} + +type GetCrawlersOutput struct { _ struct{} `type:"structure"` + + // A list of crawler metadata. + Crawlers []*Crawler `type:"list"` + + // A continuation token, if the returned list has not reached the end of those + // defined in this customer account. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteClassifierOutput) String() string { +func (s GetCrawlersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteClassifierOutput) GoString() string { +func (s GetCrawlersOutput) GoString() string { return s.String() } -type DeleteConnectionInput struct { +// SetCrawlers sets the Crawlers field's value. +func (s *GetCrawlersOutput) SetCrawlers(v []*Crawler) *GetCrawlersOutput { + s.Crawlers = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetCrawlersOutput) SetNextToken(v string) *GetCrawlersOutput { + s.NextToken = &v + return s +} + +type GetDataCatalogEncryptionSettingsInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog in which the connection resides. If none is provided, - // the AWS account ID is used by default. + // The ID of the Data Catalog to retrieve the security configuration for. If + // none is provided, the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` - - // The name of the connection to delete. - // - // ConnectionName is a required field - ConnectionName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteConnectionInput) String() string { +func (s GetDataCatalogEncryptionSettingsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteConnectionInput) GoString() string { +func (s GetDataCatalogEncryptionSettingsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteConnectionInput"} +func (s *GetDataCatalogEncryptionSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDataCatalogEncryptionSettingsInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.ConnectionName == nil { - invalidParams.Add(request.NewErrParamRequired("ConnectionName")) - } - if s.ConnectionName != nil && len(*s.ConnectionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConnectionName", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -14557,53 +20714,64 @@ func (s *DeleteConnectionInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *DeleteConnectionInput) SetCatalogId(v string) *DeleteConnectionInput { +func (s *GetDataCatalogEncryptionSettingsInput) SetCatalogId(v string) *GetDataCatalogEncryptionSettingsInput { s.CatalogId = &v return s } -// SetConnectionName sets the ConnectionName field's value. -func (s *DeleteConnectionInput) SetConnectionName(v string) *DeleteConnectionInput { - s.ConnectionName = &v - return s -} - -type DeleteConnectionOutput struct { +type GetDataCatalogEncryptionSettingsOutput struct { _ struct{} `type:"structure"` + + // The requested security configuration. + DataCatalogEncryptionSettings *DataCatalogEncryptionSettings `type:"structure"` } // String returns the string representation -func (s DeleteConnectionOutput) String() string { +func (s GetDataCatalogEncryptionSettingsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteConnectionOutput) GoString() string { +func (s GetDataCatalogEncryptionSettingsOutput) GoString() string { return s.String() } -type DeleteCrawlerInput struct { +// SetDataCatalogEncryptionSettings sets the DataCatalogEncryptionSettings field's value. +func (s *GetDataCatalogEncryptionSettingsOutput) SetDataCatalogEncryptionSettings(v *DataCatalogEncryptionSettings) *GetDataCatalogEncryptionSettingsOutput { + s.DataCatalogEncryptionSettings = v + return s +} + +type GetDatabaseInput struct { _ struct{} `type:"structure"` - // Name of the crawler to remove. + // The ID of the Data Catalog in which the database resides. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The name of the database to retrieve. For Hive compatibility, this should + // be all lowercase. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteCrawlerInput) String() string { +func (s GetDatabaseInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCrawlerInput) GoString() string { +func (s GetDatabaseInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCrawlerInput"} +func (s *GetDatabaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDatabaseInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -14617,61 +20785,73 @@ func (s *DeleteCrawlerInput) Validate() error { return nil } +// SetCatalogId sets the CatalogId field's value. +func (s *GetDatabaseInput) SetCatalogId(v string) *GetDatabaseInput { + s.CatalogId = &v + return s +} + // SetName sets the Name field's value. -func (s *DeleteCrawlerInput) SetName(v string) *DeleteCrawlerInput { +func (s *GetDatabaseInput) SetName(v string) *GetDatabaseInput { s.Name = &v return s } -type DeleteCrawlerOutput struct { +type GetDatabaseOutput struct { _ struct{} `type:"structure"` + + // The definition of the specified database in the Data Catalog. + Database *Database `type:"structure"` } // String returns the string representation -func (s DeleteCrawlerOutput) String() string { +func (s GetDatabaseOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCrawlerOutput) GoString() string { +func (s GetDatabaseOutput) GoString() string { return s.String() } -type DeleteDatabaseInput struct { +// SetDatabase sets the Database field's value. +func (s *GetDatabaseOutput) SetDatabase(v *Database) *GetDatabaseOutput { + s.Database = v + return s +} + +type GetDatabasesInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog in which the database resides. If none is supplied, + // The ID of the Data Catalog from which to retrieve Databases. If none is provided, // the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` - // The name of the Database to delete. For Hive compatibility, this must be - // all lowercase. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The maximum number of databases to return in one response. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is a continuation call. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteDatabaseInput) String() string { +func (s GetDatabasesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDatabaseInput) GoString() string { +func (s GetDatabasesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDatabaseInput"} +func (s *GetDatabasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDatabasesInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -14681,110 +20861,137 @@ func (s *DeleteDatabaseInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *DeleteDatabaseInput) SetCatalogId(v string) *DeleteDatabaseInput { +func (s *GetDatabasesInput) SetCatalogId(v string) *GetDatabasesInput { s.CatalogId = &v return s } -// SetName sets the Name field's value. -func (s *DeleteDatabaseInput) SetName(v string) *DeleteDatabaseInput { - s.Name = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetDatabasesInput) SetMaxResults(v int64) *GetDatabasesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetDatabasesInput) SetNextToken(v string) *GetDatabasesInput { + s.NextToken = &v return s } -type DeleteDatabaseOutput struct { +type GetDatabasesOutput struct { _ struct{} `type:"structure"` + + // A list of Database objects from the specified catalog. + // + // DatabaseList is a required field + DatabaseList []*Database `type:"list" required:"true"` + + // A continuation token for paginating the returned list of tokens, returned + // if the current segment of the list is not the last. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteDatabaseOutput) String() string { +func (s GetDatabasesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDatabaseOutput) GoString() string { +func (s GetDatabasesOutput) GoString() string { return s.String() } -type DeleteDevEndpointInput struct { +// SetDatabaseList sets the DatabaseList field's value. +func (s *GetDatabasesOutput) SetDatabaseList(v []*Database) *GetDatabasesOutput { + s.DatabaseList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetDatabasesOutput) SetNextToken(v string) *GetDatabasesOutput { + s.NextToken = &v + return s +} + +type GetDataflowGraphInput struct { _ struct{} `type:"structure"` - // The name of the DevEndpoint. - // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` + // The Python script to transform. + PythonScript *string `type:"string"` } // String returns the string representation -func (s DeleteDevEndpointInput) String() string { +func (s GetDataflowGraphInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDevEndpointInput) GoString() string { +func (s GetDataflowGraphInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDevEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDevEndpointInput"} - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointName sets the EndpointName field's value. -func (s *DeleteDevEndpointInput) SetEndpointName(v string) *DeleteDevEndpointInput { - s.EndpointName = &v +// SetPythonScript sets the PythonScript field's value. +func (s *GetDataflowGraphInput) SetPythonScript(v string) *GetDataflowGraphInput { + s.PythonScript = &v return s } -type DeleteDevEndpointOutput struct { +type GetDataflowGraphOutput struct { _ struct{} `type:"structure"` + + // A list of the edges in the resulting DAG. + DagEdges []*CodeGenEdge `type:"list"` + + // A list of the nodes in the resulting DAG. + DagNodes []*CodeGenNode `type:"list"` } // String returns the string representation -func (s DeleteDevEndpointOutput) String() string { +func (s GetDataflowGraphOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDevEndpointOutput) GoString() string { +func (s GetDataflowGraphOutput) GoString() string { return s.String() } -type DeleteJobInput struct { +// SetDagEdges sets the DagEdges field's value. +func (s *GetDataflowGraphOutput) SetDagEdges(v []*CodeGenEdge) *GetDataflowGraphOutput { + s.DagEdges = v + return s +} + +// SetDagNodes sets the DagNodes field's value. +func (s *GetDataflowGraphOutput) SetDagNodes(v []*CodeGenNode) *GetDataflowGraphOutput { + s.DagNodes = v + return s +} + +type GetDevEndpointInput struct { _ struct{} `type:"structure"` - // The name of the job definition to delete. + // Name of the DevEndpoint to retrieve information for. // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` + // EndpointName is a required field + EndpointName *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteJobInput) String() string { +func (s GetDevEndpointInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteJobInput) GoString() string { +func (s GetDevEndpointInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteJobInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) +func (s *GetDevEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDevEndpointInput"} + if s.EndpointName == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointName")) } if invalidParams.Len() > 0 { @@ -14793,88 +21000,60 @@ func (s *DeleteJobInput) Validate() error { return nil } -// SetJobName sets the JobName field's value. -func (s *DeleteJobInput) SetJobName(v string) *DeleteJobInput { - s.JobName = &v +// SetEndpointName sets the EndpointName field's value. +func (s *GetDevEndpointInput) SetEndpointName(v string) *GetDevEndpointInput { + s.EndpointName = &v return s } -type DeleteJobOutput struct { +type GetDevEndpointOutput struct { _ struct{} `type:"structure"` - // The name of the job definition that was deleted. - JobName *string `min:"1" type:"string"` + // A DevEndpoint definition. + DevEndpoint *DevEndpoint `type:"structure"` } // String returns the string representation -func (s DeleteJobOutput) String() string { +func (s GetDevEndpointOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteJobOutput) GoString() string { +func (s GetDevEndpointOutput) GoString() string { return s.String() } -// SetJobName sets the JobName field's value. -func (s *DeleteJobOutput) SetJobName(v string) *DeleteJobOutput { - s.JobName = &v +// SetDevEndpoint sets the DevEndpoint field's value. +func (s *GetDevEndpointOutput) SetDevEndpoint(v *DevEndpoint) *GetDevEndpointOutput { + s.DevEndpoint = v return s } -type DeletePartitionInput struct { +type GetDevEndpointsInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog where the partition to be deleted resides. If - // none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The name of the catalog database in which the table in question resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The values that define the partition. - // - // PartitionValues is a required field - PartitionValues []*string `type:"list" required:"true"` + // The maximum size of information to return. + MaxResults *int64 `min:"1" type:"integer"` - // The name of the table where the partition to be deleted is located. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // A continuation token, if this is a continuation call. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeletePartitionInput) String() string { +func (s GetDevEndpointsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletePartitionInput) GoString() string { +func (s GetDevEndpointsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionValues == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionValues")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) +func (s *GetDevEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDevEndpointsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -14883,66 +21062,77 @@ func (s *DeletePartitionInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *DeletePartitionInput) SetCatalogId(v string) *DeletePartitionInput { - s.CatalogId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetDevEndpointsInput) SetMaxResults(v int64) *GetDevEndpointsInput { + s.MaxResults = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *DeletePartitionInput) SetDatabaseName(v string) *DeletePartitionInput { - s.DatabaseName = &v +// SetNextToken sets the NextToken field's value. +func (s *GetDevEndpointsInput) SetNextToken(v string) *GetDevEndpointsInput { + s.NextToken = &v return s } -// SetPartitionValues sets the PartitionValues field's value. -func (s *DeletePartitionInput) SetPartitionValues(v []*string) *DeletePartitionInput { - s.PartitionValues = v - return s -} +type GetDevEndpointsOutput struct { + _ struct{} `type:"structure"` -// SetTableName sets the TableName field's value. -func (s *DeletePartitionInput) SetTableName(v string) *DeletePartitionInput { - s.TableName = &v - return s -} + // A list of DevEndpoint definitions. + DevEndpoints []*DevEndpoint `type:"list"` -type DeletePartitionOutput struct { - _ struct{} `type:"structure"` + // A continuation token, if not all DevEndpoint definitions have yet been returned. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeletePartitionOutput) String() string { +func (s GetDevEndpointsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletePartitionOutput) GoString() string { +func (s GetDevEndpointsOutput) GoString() string { return s.String() } -type DeleteResourcePolicyInput struct { +// SetDevEndpoints sets the DevEndpoints field's value. +func (s *GetDevEndpointsOutput) SetDevEndpoints(v []*DevEndpoint) *GetDevEndpointsOutput { + s.DevEndpoints = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetDevEndpointsOutput) SetNextToken(v string) *GetDevEndpointsOutput { + s.NextToken = &v + return s +} + +type GetJobBookmarkInput struct { _ struct{} `type:"structure"` - // The hash value returned when this policy was set. - PolicyHashCondition *string `min:"1" type:"string"` + // The name of the job in question. + // + // JobName is a required field + JobName *string `type:"string" required:"true"` + + // The unique run identifier associated with this job run. + RunId *string `type:"string"` } // String returns the string representation -func (s DeleteResourcePolicyInput) String() string { +func (s GetJobBookmarkInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteResourcePolicyInput) GoString() string { +func (s GetJobBookmarkInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteResourcePolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteResourcePolicyInput"} - if s.PolicyHashCondition != nil && len(*s.PolicyHashCondition) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PolicyHashCondition", 1)) +func (s *GetJobBookmarkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobBookmarkInput"} + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) } if invalidParams.Len() > 0 { @@ -14951,53 +21141,68 @@ func (s *DeleteResourcePolicyInput) Validate() error { return nil } -// SetPolicyHashCondition sets the PolicyHashCondition field's value. -func (s *DeleteResourcePolicyInput) SetPolicyHashCondition(v string) *DeleteResourcePolicyInput { - s.PolicyHashCondition = &v +// SetJobName sets the JobName field's value. +func (s *GetJobBookmarkInput) SetJobName(v string) *GetJobBookmarkInput { + s.JobName = &v + return s +} + +// SetRunId sets the RunId field's value. +func (s *GetJobBookmarkInput) SetRunId(v string) *GetJobBookmarkInput { + s.RunId = &v return s } -type DeleteResourcePolicyOutput struct { +type GetJobBookmarkOutput struct { _ struct{} `type:"structure"` + + // A structure that defines a point that a job can resume processing. + JobBookmarkEntry *JobBookmarkEntry `type:"structure"` } // String returns the string representation -func (s DeleteResourcePolicyOutput) String() string { +func (s GetJobBookmarkOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteResourcePolicyOutput) GoString() string { +func (s GetJobBookmarkOutput) GoString() string { return s.String() } -type DeleteSecurityConfigurationInput struct { +// SetJobBookmarkEntry sets the JobBookmarkEntry field's value. +func (s *GetJobBookmarkOutput) SetJobBookmarkEntry(v *JobBookmarkEntry) *GetJobBookmarkOutput { + s.JobBookmarkEntry = v + return s +} + +type GetJobInput struct { _ struct{} `type:"structure"` - // The name of the security configuration to delete. + // The name of the job definition to retrieve. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // JobName is a required field + JobName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteSecurityConfigurationInput) String() string { +func (s GetJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSecurityConfigurationInput) GoString() string { +func (s GetJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSecurityConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSecurityConfigurationInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *GetJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobInput"} + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) } if invalidParams.Len() > 0 { @@ -15006,73 +21211,76 @@ func (s *DeleteSecurityConfigurationInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *DeleteSecurityConfigurationInput) SetName(v string) *DeleteSecurityConfigurationInput { - s.Name = &v +// SetJobName sets the JobName field's value. +func (s *GetJobInput) SetJobName(v string) *GetJobInput { + s.JobName = &v return s } -type DeleteSecurityConfigurationOutput struct { +type GetJobOutput struct { _ struct{} `type:"structure"` + + // The requested job definition. + Job *Job `type:"structure"` } // String returns the string representation -func (s DeleteSecurityConfigurationOutput) String() string { +func (s GetJobOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSecurityConfigurationOutput) GoString() string { +func (s GetJobOutput) GoString() string { return s.String() } -type DeleteTableInput struct { - _ struct{} `type:"structure"` +// SetJob sets the Job field's value. +func (s *GetJobOutput) SetJob(v *Job) *GetJobOutput { + s.Job = v + return s +} - // The ID of the Data Catalog where the table resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` +type GetJobRunInput struct { + _ struct{} `type:"structure"` - // The name of the catalog database in which the table resides. For Hive compatibility, - // this name is entirely lowercase. + // Name of the job definition being run. // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // JobName is a required field + JobName *string `min:"1" type:"string" required:"true"` - // The name of the table to be deleted. For Hive compatibility, this name is - // entirely lowercase. + // True if a list of predecessor runs should be returned. + PredecessorsIncluded *bool `type:"boolean"` + + // The ID of the job run. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // RunId is a required field + RunId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteTableInput) String() string { +func (s GetJobRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTableInput) GoString() string { +func (s GetJobRunInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) +func (s *GetJobRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobRunInput"} + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) + if s.RunId == nil { + invalidParams.Add(request.NewErrParamRequired("RunId")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.RunId != nil && len(*s.RunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) } if invalidParams.Len() > 0 { @@ -15081,96 +21289,83 @@ func (s *DeleteTableInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *DeleteTableInput) SetCatalogId(v string) *DeleteTableInput { - s.CatalogId = &v +// SetJobName sets the JobName field's value. +func (s *GetJobRunInput) SetJobName(v string) *GetJobRunInput { + s.JobName = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *DeleteTableInput) SetDatabaseName(v string) *DeleteTableInput { - s.DatabaseName = &v +// SetPredecessorsIncluded sets the PredecessorsIncluded field's value. +func (s *GetJobRunInput) SetPredecessorsIncluded(v bool) *GetJobRunInput { + s.PredecessorsIncluded = &v return s } -// SetName sets the Name field's value. -func (s *DeleteTableInput) SetName(v string) *DeleteTableInput { - s.Name = &v +// SetRunId sets the RunId field's value. +func (s *GetJobRunInput) SetRunId(v string) *GetJobRunInput { + s.RunId = &v return s } -type DeleteTableOutput struct { +type GetJobRunOutput struct { _ struct{} `type:"structure"` + + // The requested job-run metadata. + JobRun *JobRun `type:"structure"` } // String returns the string representation -func (s DeleteTableOutput) String() string { +func (s GetJobRunOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTableOutput) GoString() string { +func (s GetJobRunOutput) GoString() string { return s.String() } -type DeleteTableVersionInput struct { - _ struct{} `type:"structure"` +// SetJobRun sets the JobRun field's value. +func (s *GetJobRunOutput) SetJobRun(v *JobRun) *GetJobRunOutput { + s.JobRun = v + return s +} - // The ID of the Data Catalog where the tables reside. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` +type GetJobRunsInput struct { + _ struct{} `type:"structure"` - // The database in the catalog in which the table resides. For Hive compatibility, - // this name is entirely lowercase. + // The name of the job definition for which to retrieve all job runs. // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // JobName is a required field + JobName *string `min:"1" type:"string" required:"true"` - // The name of the table. For Hive compatibility, this name is entirely lowercase. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // The maximum size of the response. + MaxResults *int64 `min:"1" type:"integer"` - // The ID of the table version to be deleted. A VersionID is a string representation - // of an integer. Each version is incremented by 1. - // - // VersionId is a required field - VersionId *string `min:"1" type:"string" required:"true"` + // A continuation token, if this is a continuation call. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteTableVersionInput) String() string { +func (s GetJobRunsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTableVersionInput) GoString() string { +func (s GetJobRunsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTableVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTableVersionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) +func (s *GetJobRunsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobRunsInput"} + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) } - if s.VersionId == nil { - invalidParams.Add(request.NewErrParamRequired("VersionId")) + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) } - if s.VersionId != nil && len(*s.VersionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VersionId", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -15179,71 +21374,81 @@ func (s *DeleteTableVersionInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *DeleteTableVersionInput) SetCatalogId(v string) *DeleteTableVersionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *DeleteTableVersionInput) SetDatabaseName(v string) *DeleteTableVersionInput { - s.DatabaseName = &v +// SetJobName sets the JobName field's value. +func (s *GetJobRunsInput) SetJobName(v string) *GetJobRunsInput { + s.JobName = &v return s } -// SetTableName sets the TableName field's value. -func (s *DeleteTableVersionInput) SetTableName(v string) *DeleteTableVersionInput { - s.TableName = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetJobRunsInput) SetMaxResults(v int64) *GetJobRunsInput { + s.MaxResults = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *DeleteTableVersionInput) SetVersionId(v string) *DeleteTableVersionInput { - s.VersionId = &v +// SetNextToken sets the NextToken field's value. +func (s *GetJobRunsInput) SetNextToken(v string) *GetJobRunsInput { + s.NextToken = &v return s } -type DeleteTableVersionOutput struct { +type GetJobRunsOutput struct { _ struct{} `type:"structure"` + + // A list of job-run metadata objects. + JobRuns []*JobRun `type:"list"` + + // A continuation token, if not all requested job runs have been returned. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteTableVersionOutput) String() string { +func (s GetJobRunsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTableVersionOutput) GoString() string { +func (s GetJobRunsOutput) GoString() string { return s.String() } -type DeleteTriggerInput struct { +// SetJobRuns sets the JobRuns field's value. +func (s *GetJobRunsOutput) SetJobRuns(v []*JobRun) *GetJobRunsOutput { + s.JobRuns = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetJobRunsOutput) SetNextToken(v string) *GetJobRunsOutput { + s.NextToken = &v + return s +} + +type GetJobsInput struct { _ struct{} `type:"structure"` - // The name of the trigger to delete. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The maximum size of the response. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is a continuation call. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteTriggerInput) String() string { +func (s GetJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTriggerInput) GoString() string { +func (s GetJobsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTriggerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *GetJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -15252,80 +21457,88 @@ func (s *DeleteTriggerInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *DeleteTriggerInput) SetName(v string) *DeleteTriggerInput { - s.Name = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetJobsInput) SetMaxResults(v int64) *GetJobsInput { + s.MaxResults = &v return s } -type DeleteTriggerOutput struct { +// SetNextToken sets the NextToken field's value. +func (s *GetJobsInput) SetNextToken(v string) *GetJobsInput { + s.NextToken = &v + return s +} + +type GetJobsOutput struct { _ struct{} `type:"structure"` - // The name of the trigger that was deleted. - Name *string `min:"1" type:"string"` + // A list of job definitions. + Jobs []*Job `type:"list"` + + // A continuation token, if not all job definitions have yet been returned. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteTriggerOutput) String() string { +func (s GetJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTriggerOutput) GoString() string { +func (s GetJobsOutput) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *DeleteTriggerOutput) SetName(v string) *DeleteTriggerOutput { - s.Name = &v +// SetJobs sets the Jobs field's value. +func (s *GetJobsOutput) SetJobs(v []*Job) *GetJobsOutput { + s.Jobs = v return s } -type DeleteUserDefinedFunctionInput struct { - _ struct{} `type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *GetJobsOutput) SetNextToken(v string) *GetJobsOutput { + s.NextToken = &v + return s +} - // The ID of the Data Catalog where the function to be deleted is located. If - // none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` +type GetMLTaskRunInput struct { + _ struct{} `type:"structure"` - // The name of the catalog database where the function is located. + // The unique identifier of the task run. // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // TaskRunId is a required field + TaskRunId *string `min:"1" type:"string" required:"true"` - // The name of the function definition to be deleted. + // The unique identifier of the machine learning transform. // - // FunctionName is a required field - FunctionName *string `min:"1" type:"string" required:"true"` + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteUserDefinedFunctionInput) String() string { +func (s GetMLTaskRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteUserDefinedFunctionInput) GoString() string { +func (s GetMLTaskRunInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteUserDefinedFunctionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteUserDefinedFunctionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) +func (s *GetMLTaskRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMLTaskRunInput"} + if s.TaskRunId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskRunId")) } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + if s.TaskRunId != nil && len(*s.TaskRunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskRunId", 1)) } - if s.FunctionName == nil { - invalidParams.Add(request.NewErrParamRequired("FunctionName")) + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) } if invalidParams.Len() > 0 { @@ -15334,518 +21547,632 @@ func (s *DeleteUserDefinedFunctionInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *DeleteUserDefinedFunctionInput) SetCatalogId(v string) *DeleteUserDefinedFunctionInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *DeleteUserDefinedFunctionInput) SetDatabaseName(v string) *DeleteUserDefinedFunctionInput { - s.DatabaseName = &v +// SetTaskRunId sets the TaskRunId field's value. +func (s *GetMLTaskRunInput) SetTaskRunId(v string) *GetMLTaskRunInput { + s.TaskRunId = &v return s } -// SetFunctionName sets the FunctionName field's value. -func (s *DeleteUserDefinedFunctionInput) SetFunctionName(v string) *DeleteUserDefinedFunctionInput { - s.FunctionName = &v +// SetTransformId sets the TransformId field's value. +func (s *GetMLTaskRunInput) SetTransformId(v string) *GetMLTaskRunInput { + s.TransformId = &v return s } -type DeleteUserDefinedFunctionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteUserDefinedFunctionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteUserDefinedFunctionOutput) GoString() string { - return s.String() -} - -// A development endpoint where a developer can remotely debug ETL scripts. -type DevEndpoint struct { +type GetMLTaskRunOutput struct { _ struct{} `type:"structure"` - // A map of arguments used to configure the DevEndpoint. - // - // Note that currently, we only support "--enable-glue-datacatalog": "" as a - // valid argument. - Arguments map[string]*string `type:"map"` - - // The AWS availability zone where this DevEndpoint is located. - AvailabilityZone *string `type:"string"` - - // The point in time at which this DevEndpoint was created. - CreatedTimestamp *time.Time `type:"timestamp"` - - // The name of the DevEndpoint. - EndpointName *string `type:"string"` - - // Path to one or more Java Jars in an S3 bucket that should be loaded in your - // DevEndpoint. - // - // Please note that only pure Java/Scala libraries can currently be used on - // a DevEndpoint. - ExtraJarsS3Path *string `type:"string"` - - // Path(s) to one or more Python libraries in an S3 bucket that should be loaded - // in your DevEndpoint. Multiple values must be complete paths separated by - // a comma. - // - // Please note that only pure Python libraries can currently be used on a DevEndpoint. - // Libraries that rely on C extensions, such as the pandas (http://pandas.pydata.org/) - // Python data analysis library, are not yet supported. - ExtraPythonLibsS3Path *string `type:"string"` - - // The reason for a current failure in this DevEndpoint. - FailureReason *string `type:"string"` - - // The point in time at which this DevEndpoint was last modified. - LastModifiedTimestamp *time.Time `type:"timestamp"` - - // The status of the last update. - LastUpdateStatus *string `type:"string"` - - // The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint. - NumberOfNodes *int64 `type:"integer"` - - // A private IP address to access the DevEndpoint within a VPC, if the DevEndpoint - // is created within one. The PrivateAddress field is present only when you - // create the DevEndpoint within your virtual private cloud (VPC). - PrivateAddress *string `type:"string"` - - // The public IP address used by this DevEndpoint. The PublicAddress field is - // present only when you create a non-VPC (virtual private cloud) DevEndpoint. - PublicAddress *string `type:"string"` - - // The public key to be used by this DevEndpoint for authentication. This attribute - // is provided for backward compatibility, as the recommended attribute to use - // is public keys. - PublicKey *string `type:"string"` + // The date and time when this task run was completed. + CompletedOn *time.Time `type:"timestamp"` - // A list of public keys to be used by the DevEndpoints for authentication. - // The use of this attribute is preferred over a single public key because the - // public keys allow you to have a different private key per client. - // - // If you previously created an endpoint with a public key, you must remove - // that key to be able to set a list of public keys: call the UpdateDevEndpoint - // API with the public key content in the deletePublicKeys attribute, and the - // list of new keys in the addPublicKeys attribute. - PublicKeys []*string `type:"list"` + // The error strings that are associated with the task run. + ErrorString *string `type:"string"` - // The AWS ARN of the IAM role used in this DevEndpoint. - RoleArn *string `type:"string"` + // The amount of time (in seconds) that the task run consumed resources. + ExecutionTime *int64 `type:"integer"` - // The name of the SecurityConfiguration structure to be used with this DevEndpoint. - SecurityConfiguration *string `min:"1" type:"string"` + // The date and time when this task run was last modified. + LastModifiedOn *time.Time `type:"timestamp"` - // A list of security group identifiers used in this DevEndpoint. - SecurityGroupIds []*string `type:"list"` + // The names of the log groups that are associated with the task run. + LogGroupName *string `type:"string"` - // The current status of this DevEndpoint. - Status *string `type:"string"` + // The list of properties that are associated with the task run. + Properties *TaskRunProperties `type:"structure"` - // The subnet ID for this DevEndpoint. - SubnetId *string `type:"string"` + // The date and time when this task run started. + StartedOn *time.Time `type:"timestamp"` - // The ID of the virtual private cloud (VPC) used by this DevEndpoint. - VpcId *string `type:"string"` + // The status for this task run. + Status *string `type:"string" enum:"TaskStatusType"` - // The YARN endpoint address used by this DevEndpoint. - YarnEndpointAddress *string `type:"string"` + // The unique run identifier associated with this run. + TaskRunId *string `min:"1" type:"string"` - // The Apache Zeppelin port for the remote Apache Spark interpreter. - ZeppelinRemoteSparkInterpreterPort *int64 `type:"integer"` + // The unique identifier of the task run. + TransformId *string `min:"1" type:"string"` } // String returns the string representation -func (s DevEndpoint) String() string { +func (s GetMLTaskRunOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DevEndpoint) GoString() string { +func (s GetMLTaskRunOutput) GoString() string { return s.String() } -// SetArguments sets the Arguments field's value. -func (s *DevEndpoint) SetArguments(v map[string]*string) *DevEndpoint { - s.Arguments = v +// SetCompletedOn sets the CompletedOn field's value. +func (s *GetMLTaskRunOutput) SetCompletedOn(v time.Time) *GetMLTaskRunOutput { + s.CompletedOn = &v return s } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *DevEndpoint) SetAvailabilityZone(v string) *DevEndpoint { - s.AvailabilityZone = &v +// SetErrorString sets the ErrorString field's value. +func (s *GetMLTaskRunOutput) SetErrorString(v string) *GetMLTaskRunOutput { + s.ErrorString = &v return s } -// SetCreatedTimestamp sets the CreatedTimestamp field's value. -func (s *DevEndpoint) SetCreatedTimestamp(v time.Time) *DevEndpoint { - s.CreatedTimestamp = &v +// SetExecutionTime sets the ExecutionTime field's value. +func (s *GetMLTaskRunOutput) SetExecutionTime(v int64) *GetMLTaskRunOutput { + s.ExecutionTime = &v return s } -// SetEndpointName sets the EndpointName field's value. -func (s *DevEndpoint) SetEndpointName(v string) *DevEndpoint { - s.EndpointName = &v +// SetLastModifiedOn sets the LastModifiedOn field's value. +func (s *GetMLTaskRunOutput) SetLastModifiedOn(v time.Time) *GetMLTaskRunOutput { + s.LastModifiedOn = &v return s } -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *DevEndpoint) SetExtraJarsS3Path(v string) *DevEndpoint { - s.ExtraJarsS3Path = &v +// SetLogGroupName sets the LogGroupName field's value. +func (s *GetMLTaskRunOutput) SetLogGroupName(v string) *GetMLTaskRunOutput { + s.LogGroupName = &v return s } -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *DevEndpoint) SetExtraPythonLibsS3Path(v string) *DevEndpoint { - s.ExtraPythonLibsS3Path = &v +// SetProperties sets the Properties field's value. +func (s *GetMLTaskRunOutput) SetProperties(v *TaskRunProperties) *GetMLTaskRunOutput { + s.Properties = v return s } -// SetFailureReason sets the FailureReason field's value. -func (s *DevEndpoint) SetFailureReason(v string) *DevEndpoint { - s.FailureReason = &v +// SetStartedOn sets the StartedOn field's value. +func (s *GetMLTaskRunOutput) SetStartedOn(v time.Time) *GetMLTaskRunOutput { + s.StartedOn = &v return s } -// SetLastModifiedTimestamp sets the LastModifiedTimestamp field's value. -func (s *DevEndpoint) SetLastModifiedTimestamp(v time.Time) *DevEndpoint { - s.LastModifiedTimestamp = &v +// SetStatus sets the Status field's value. +func (s *GetMLTaskRunOutput) SetStatus(v string) *GetMLTaskRunOutput { + s.Status = &v return s } -// SetLastUpdateStatus sets the LastUpdateStatus field's value. -func (s *DevEndpoint) SetLastUpdateStatus(v string) *DevEndpoint { - s.LastUpdateStatus = &v +// SetTaskRunId sets the TaskRunId field's value. +func (s *GetMLTaskRunOutput) SetTaskRunId(v string) *GetMLTaskRunOutput { + s.TaskRunId = &v return s } -// SetNumberOfNodes sets the NumberOfNodes field's value. -func (s *DevEndpoint) SetNumberOfNodes(v int64) *DevEndpoint { - s.NumberOfNodes = &v +// SetTransformId sets the TransformId field's value. +func (s *GetMLTaskRunOutput) SetTransformId(v string) *GetMLTaskRunOutput { + s.TransformId = &v return s } -// SetPrivateAddress sets the PrivateAddress field's value. -func (s *DevEndpoint) SetPrivateAddress(v string) *DevEndpoint { - s.PrivateAddress = &v - return s -} +type GetMLTaskRunsInput struct { + _ struct{} `type:"structure"` -// SetPublicAddress sets the PublicAddress field's value. -func (s *DevEndpoint) SetPublicAddress(v string) *DevEndpoint { - s.PublicAddress = &v - return s -} + // The filter criteria, in the TaskRunFilterCriteria structure, for the task + // run. + Filter *TaskRunFilterCriteria `type:"structure"` -// SetPublicKey sets the PublicKey field's value. -func (s *DevEndpoint) SetPublicKey(v string) *DevEndpoint { - s.PublicKey = &v - return s -} + // The maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` -// SetPublicKeys sets the PublicKeys field's value. -func (s *DevEndpoint) SetPublicKeys(v []*string) *DevEndpoint { - s.PublicKeys = v - return s + // A token for pagination of the results. The default is empty. + NextToken *string `type:"string"` + + // The sorting criteria, in the TaskRunSortCriteria structure, for the task + // run. + Sort *TaskRunSortCriteria `type:"structure"` + + // The unique identifier of the machine learning transform. + // + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` } -// SetRoleArn sets the RoleArn field's value. -func (s *DevEndpoint) SetRoleArn(v string) *DevEndpoint { - s.RoleArn = &v - return s +// String returns the string representation +func (s GetMLTaskRunsInput) String() string { + return awsutil.Prettify(s) } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *DevEndpoint) SetSecurityConfiguration(v string) *DevEndpoint { - s.SecurityConfiguration = &v - return s +// GoString returns the string representation +func (s GetMLTaskRunsInput) GoString() string { + return s.String() } -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *DevEndpoint) SetSecurityGroupIds(v []*string) *DevEndpoint { - s.SecurityGroupIds = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMLTaskRunsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMLTaskRunsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) + } + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) + } + if s.Sort != nil { + if err := s.Sort.Validate(); err != nil { + invalidParams.AddNested("Sort", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetStatus sets the Status field's value. -func (s *DevEndpoint) SetStatus(v string) *DevEndpoint { - s.Status = &v +// SetFilter sets the Filter field's value. +func (s *GetMLTaskRunsInput) SetFilter(v *TaskRunFilterCriteria) *GetMLTaskRunsInput { + s.Filter = v return s } -// SetSubnetId sets the SubnetId field's value. -func (s *DevEndpoint) SetSubnetId(v string) *DevEndpoint { - s.SubnetId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetMLTaskRunsInput) SetMaxResults(v int64) *GetMLTaskRunsInput { + s.MaxResults = &v return s } -// SetVpcId sets the VpcId field's value. -func (s *DevEndpoint) SetVpcId(v string) *DevEndpoint { - s.VpcId = &v +// SetNextToken sets the NextToken field's value. +func (s *GetMLTaskRunsInput) SetNextToken(v string) *GetMLTaskRunsInput { + s.NextToken = &v return s } -// SetYarnEndpointAddress sets the YarnEndpointAddress field's value. -func (s *DevEndpoint) SetYarnEndpointAddress(v string) *DevEndpoint { - s.YarnEndpointAddress = &v +// SetSort sets the Sort field's value. +func (s *GetMLTaskRunsInput) SetSort(v *TaskRunSortCriteria) *GetMLTaskRunsInput { + s.Sort = v return s } -// SetZeppelinRemoteSparkInterpreterPort sets the ZeppelinRemoteSparkInterpreterPort field's value. -func (s *DevEndpoint) SetZeppelinRemoteSparkInterpreterPort(v int64) *DevEndpoint { - s.ZeppelinRemoteSparkInterpreterPort = &v +// SetTransformId sets the TransformId field's value. +func (s *GetMLTaskRunsInput) SetTransformId(v string) *GetMLTaskRunsInput { + s.TransformId = &v return s } -// Custom libraries to be loaded into a DevEndpoint. -type DevEndpointCustomLibraries struct { +type GetMLTaskRunsOutput struct { _ struct{} `type:"structure"` - // Path to one or more Java Jars in an S3 bucket that should be loaded in your - // DevEndpoint. - // - // Please note that only pure Java/Scala libraries can currently be used on - // a DevEndpoint. - ExtraJarsS3Path *string `type:"string"` + // A pagination token, if more results are available. + NextToken *string `type:"string"` - // Path(s) to one or more Python libraries in an S3 bucket that should be loaded - // in your DevEndpoint. Multiple values must be complete paths separated by - // a comma. - // - // Please note that only pure Python libraries can currently be used on a DevEndpoint. - // Libraries that rely on C extensions, such as the pandas (http://pandas.pydata.org/) - // Python data analysis library, are not yet supported. - ExtraPythonLibsS3Path *string `type:"string"` + // A list of task runs that are associated with the transform. + TaskRuns []*TaskRun `type:"list"` } // String returns the string representation -func (s DevEndpointCustomLibraries) String() string { +func (s GetMLTaskRunsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DevEndpointCustomLibraries) GoString() string { +func (s GetMLTaskRunsOutput) GoString() string { return s.String() } -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *DevEndpointCustomLibraries) SetExtraJarsS3Path(v string) *DevEndpointCustomLibraries { - s.ExtraJarsS3Path = &v +// SetNextToken sets the NextToken field's value. +func (s *GetMLTaskRunsOutput) SetNextToken(v string) *GetMLTaskRunsOutput { + s.NextToken = &v return s } -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *DevEndpointCustomLibraries) SetExtraPythonLibsS3Path(v string) *DevEndpointCustomLibraries { - s.ExtraPythonLibsS3Path = &v +// SetTaskRuns sets the TaskRuns field's value. +func (s *GetMLTaskRunsOutput) SetTaskRuns(v []*TaskRun) *GetMLTaskRunsOutput { + s.TaskRuns = v return s } -// Specifies a DynamoDB table to crawl. -type DynamoDBTarget struct { +type GetMLTransformInput struct { _ struct{} `type:"structure"` - // The name of the DynamoDB table to crawl. - Path *string `type:"string"` + // The unique identifier of the transform, generated at the time that the transform + // was created. + // + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DynamoDBTarget) String() string { +func (s GetMLTransformInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DynamoDBTarget) GoString() string { +func (s GetMLTransformInput) GoString() string { return s.String() } -// SetPath sets the Path field's value. -func (s *DynamoDBTarget) SetPath(v string) *DynamoDBTarget { - s.Path = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMLTransformInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMLTransformInput"} + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) + } + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTransformId sets the TransformId field's value. +func (s *GetMLTransformInput) SetTransformId(v string) *GetMLTransformInput { + s.TransformId = &v return s } -// Specifies the encryption-at-rest configuration for the Data Catalog. -type EncryptionAtRest struct { +type GetMLTransformOutput struct { _ struct{} `type:"structure"` - // The encryption-at-rest mode for encrypting Data Catalog data. + // The date and time when the transform was created. + CreatedOn *time.Time `type:"timestamp"` + + // A description of the transform. + Description *string `type:"string"` + + // The latest evaluation metrics. + EvaluationMetrics *EvaluationMetrics `type:"structure"` + + // A list of AWS Glue table definitions used by the transform. + InputRecordTables []*Table `type:"list"` + + // The number of labels available for this transform. + LabelCount *int64 `type:"integer"` + + // The date and time when the transform was last modified. + LastModifiedOn *time.Time `type:"timestamp"` + + // The number of AWS Glue data processing units (DPUs) that are allocated to + // task runs for this transform. You can allocate from 2 to 100 DPUs; the default + // is 10. A DPU is a relative measure of processing power that consists of 4 + // vCPUs of compute capacity and 16 GB of memory. For more information, see + // the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). // - // CatalogEncryptionMode is a required field - CatalogEncryptionMode *string `type:"string" required:"true" enum:"CatalogEncryptionMode"` + // When the WorkerType field is set to a value other than Standard, the MaxCapacity + // field is set automatically and becomes read-only. + MaxCapacity *float64 `type:"double"` - // The ID of the AWS KMS key to use for encryption at rest. - SseAwsKmsKeyId *string `min:"1" type:"string"` + // The maximum number of times to retry a task for this transform after a task + // run fails. + MaxRetries *int64 `type:"integer"` + + // The unique name given to the transform when it was created. + Name *string `min:"1" type:"string"` + + // The number of workers of a defined workerType that are allocated when this + // task runs. + NumberOfWorkers *int64 `type:"integer"` + + // The configuration parameters that are specific to the algorithm used. + Parameters *TransformParameters `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the IAM role with the required + // permissions. + Role *string `type:"string"` + + // The Map object that represents the schema that this transform + // accepts. Has an upper bound of 100 columns. + Schema []*SchemaColumn `type:"list"` + + // The last known status of the transform (to indicate whether it can be used + // or not). One of "NOT_READY", "READY", or "DELETING". + Status *string `type:"string" enum:"TransformStatusType"` + + // The timeout for a task run for this transform in minutes. This is the maximum + // time that a task run for this transform can consume resources before it is + // terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). + Timeout *int64 `min:"1" type:"integer"` + + // The unique identifier of the transform, generated at the time that the transform + // was created. + TransformId *string `min:"1" type:"string"` + + // The type of predefined worker that is allocated when this task runs. Accepts + // a value of Standard, G.1X, or G.2X. + // + // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of + // memory and a 50GB disk, and 2 executors per worker. + // + // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory + // and a 64GB disk, and 1 executor per worker. + // + // * For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory + // and a 128GB disk, and 1 executor per worker. + WorkerType *string `type:"string" enum:"WorkerType"` } // String returns the string representation -func (s EncryptionAtRest) String() string { +func (s GetMLTransformOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EncryptionAtRest) GoString() string { +func (s GetMLTransformOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *EncryptionAtRest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EncryptionAtRest"} - if s.CatalogEncryptionMode == nil { - invalidParams.Add(request.NewErrParamRequired("CatalogEncryptionMode")) - } - if s.SseAwsKmsKeyId != nil && len(*s.SseAwsKmsKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SseAwsKmsKeyId", 1)) - } +// SetCreatedOn sets the CreatedOn field's value. +func (s *GetMLTransformOutput) SetCreatedOn(v time.Time) *GetMLTransformOutput { + s.CreatedOn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDescription sets the Description field's value. +func (s *GetMLTransformOutput) SetDescription(v string) *GetMLTransformOutput { + s.Description = &v + return s } -// SetCatalogEncryptionMode sets the CatalogEncryptionMode field's value. -func (s *EncryptionAtRest) SetCatalogEncryptionMode(v string) *EncryptionAtRest { - s.CatalogEncryptionMode = &v +// SetEvaluationMetrics sets the EvaluationMetrics field's value. +func (s *GetMLTransformOutput) SetEvaluationMetrics(v *EvaluationMetrics) *GetMLTransformOutput { + s.EvaluationMetrics = v return s } -// SetSseAwsKmsKeyId sets the SseAwsKmsKeyId field's value. -func (s *EncryptionAtRest) SetSseAwsKmsKeyId(v string) *EncryptionAtRest { - s.SseAwsKmsKeyId = &v +// SetInputRecordTables sets the InputRecordTables field's value. +func (s *GetMLTransformOutput) SetInputRecordTables(v []*Table) *GetMLTransformOutput { + s.InputRecordTables = v return s } -// Specifies an encryption configuration. -type EncryptionConfiguration struct { - _ struct{} `type:"structure"` +// SetLabelCount sets the LabelCount field's value. +func (s *GetMLTransformOutput) SetLabelCount(v int64) *GetMLTransformOutput { + s.LabelCount = &v + return s +} - // The encryption configuration for CloudWatch. - CloudWatchEncryption *CloudWatchEncryption `type:"structure"` +// SetLastModifiedOn sets the LastModifiedOn field's value. +func (s *GetMLTransformOutput) SetLastModifiedOn(v time.Time) *GetMLTransformOutput { + s.LastModifiedOn = &v + return s +} - // The encryption configuration for Job Bookmarks. - JobBookmarksEncryption *JobBookmarksEncryption `type:"structure"` +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *GetMLTransformOutput) SetMaxCapacity(v float64) *GetMLTransformOutput { + s.MaxCapacity = &v + return s +} - // The encryption configuration for S3 data. - S3Encryption []*S3Encryption `type:"list"` +// SetMaxRetries sets the MaxRetries field's value. +func (s *GetMLTransformOutput) SetMaxRetries(v int64) *GetMLTransformOutput { + s.MaxRetries = &v + return s } -// String returns the string representation -func (s EncryptionConfiguration) String() string { - return awsutil.Prettify(s) +// SetName sets the Name field's value. +func (s *GetMLTransformOutput) SetName(v string) *GetMLTransformOutput { + s.Name = &v + return s } -// GoString returns the string representation -func (s EncryptionConfiguration) GoString() string { - return s.String() +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *GetMLTransformOutput) SetNumberOfWorkers(v int64) *GetMLTransformOutput { + s.NumberOfWorkers = &v + return s } -// SetCloudWatchEncryption sets the CloudWatchEncryption field's value. -func (s *EncryptionConfiguration) SetCloudWatchEncryption(v *CloudWatchEncryption) *EncryptionConfiguration { - s.CloudWatchEncryption = v +// SetParameters sets the Parameters field's value. +func (s *GetMLTransformOutput) SetParameters(v *TransformParameters) *GetMLTransformOutput { + s.Parameters = v return s } -// SetJobBookmarksEncryption sets the JobBookmarksEncryption field's value. -func (s *EncryptionConfiguration) SetJobBookmarksEncryption(v *JobBookmarksEncryption) *EncryptionConfiguration { - s.JobBookmarksEncryption = v +// SetRole sets the Role field's value. +func (s *GetMLTransformOutput) SetRole(v string) *GetMLTransformOutput { + s.Role = &v return s } -// SetS3Encryption sets the S3Encryption field's value. -func (s *EncryptionConfiguration) SetS3Encryption(v []*S3Encryption) *EncryptionConfiguration { - s.S3Encryption = v +// SetSchema sets the Schema field's value. +func (s *GetMLTransformOutput) SetSchema(v []*SchemaColumn) *GetMLTransformOutput { + s.Schema = v return s } -// Contains details about an error. -type ErrorDetail struct { +// SetStatus sets the Status field's value. +func (s *GetMLTransformOutput) SetStatus(v string) *GetMLTransformOutput { + s.Status = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *GetMLTransformOutput) SetTimeout(v int64) *GetMLTransformOutput { + s.Timeout = &v + return s +} + +// SetTransformId sets the TransformId field's value. +func (s *GetMLTransformOutput) SetTransformId(v string) *GetMLTransformOutput { + s.TransformId = &v + return s +} + +// SetWorkerType sets the WorkerType field's value. +func (s *GetMLTransformOutput) SetWorkerType(v string) *GetMLTransformOutput { + s.WorkerType = &v + return s +} + +type GetMLTransformsInput struct { _ struct{} `type:"structure"` - // The code associated with this error. - ErrorCode *string `min:"1" type:"string"` + // The filter transformation criteria. + Filter *TransformFilterCriteria `type:"structure"` - // A message describing the error. - ErrorMessage *string `type:"string"` + // The maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A paginated token to offset the results. + NextToken *string `type:"string"` + + // The sorting criteria. + Sort *TransformSortCriteria `type:"structure"` } // String returns the string representation -func (s ErrorDetail) String() string { +func (s GetMLTransformsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ErrorDetail) GoString() string { +func (s GetMLTransformsInput) GoString() string { return s.String() } -// SetErrorCode sets the ErrorCode field's value. -func (s *ErrorDetail) SetErrorCode(v string) *ErrorDetail { - s.ErrorCode = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMLTransformsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMLTransformsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Sort != nil { + if err := s.Sort.Validate(); err != nil { + invalidParams.AddNested("Sort", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *GetMLTransformsInput) SetFilter(v *TransformFilterCriteria) *GetMLTransformsInput { + s.Filter = v return s } -// SetErrorMessage sets the ErrorMessage field's value. -func (s *ErrorDetail) SetErrorMessage(v string) *ErrorDetail { - s.ErrorMessage = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetMLTransformsInput) SetMaxResults(v int64) *GetMLTransformsInput { + s.MaxResults = &v return s } -// An execution property of a job. -type ExecutionProperty struct { +// SetNextToken sets the NextToken field's value. +func (s *GetMLTransformsInput) SetNextToken(v string) *GetMLTransformsInput { + s.NextToken = &v + return s +} + +// SetSort sets the Sort field's value. +func (s *GetMLTransformsInput) SetSort(v *TransformSortCriteria) *GetMLTransformsInput { + s.Sort = v + return s +} + +type GetMLTransformsOutput struct { _ struct{} `type:"structure"` - // The maximum number of concurrent runs allowed for the job. The default is - // 1. An error is returned when this threshold is reached. The maximum value - // you can specify is controlled by a service limit. - MaxConcurrentRuns *int64 `type:"integer"` + // A pagination token, if more results are available. + NextToken *string `type:"string"` + + // A list of machine learning transforms. + // + // Transforms is a required field + Transforms []*MLTransform `type:"list" required:"true"` } // String returns the string representation -func (s ExecutionProperty) String() string { +func (s GetMLTransformsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExecutionProperty) GoString() string { +func (s GetMLTransformsOutput) GoString() string { return s.String() } -// SetMaxConcurrentRuns sets the MaxConcurrentRuns field's value. -func (s *ExecutionProperty) SetMaxConcurrentRuns(v int64) *ExecutionProperty { - s.MaxConcurrentRuns = &v +// SetNextToken sets the NextToken field's value. +func (s *GetMLTransformsOutput) SetNextToken(v string) *GetMLTransformsOutput { + s.NextToken = &v return s } -type GetCatalogImportStatusInput struct { +// SetTransforms sets the Transforms field's value. +func (s *GetMLTransformsOutput) SetTransforms(v []*MLTransform) *GetMLTransformsOutput { + s.Transforms = v + return s +} + +type GetMappingInput struct { _ struct{} `type:"structure"` - // The ID of the catalog to migrate. Currently, this should be the AWS account - // ID. - CatalogId *string `min:"1" type:"string"` + // Parameters for the mapping. + Location *Location `type:"structure"` + + // A list of target tables. + Sinks []*CatalogEntry `type:"list"` + + // Specifies the source table. + // + // Source is a required field + Source *CatalogEntry `type:"structure" required:"true"` } // String returns the string representation -func (s GetCatalogImportStatusInput) String() string { +func (s GetMappingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCatalogImportStatusInput) GoString() string { +func (s GetMappingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCatalogImportStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCatalogImportStatusInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) +func (s *GetMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMappingInput"} + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Location != nil { + if err := s.Location.Validate(); err != nil { + invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) + } + } + if s.Sinks != nil { + for i, v := range s.Sinks { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sinks", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -15854,62 +22181,102 @@ func (s *GetCatalogImportStatusInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *GetCatalogImportStatusInput) SetCatalogId(v string) *GetCatalogImportStatusInput { - s.CatalogId = &v +// SetLocation sets the Location field's value. +func (s *GetMappingInput) SetLocation(v *Location) *GetMappingInput { + s.Location = v return s } -type GetCatalogImportStatusOutput struct { +// SetSinks sets the Sinks field's value. +func (s *GetMappingInput) SetSinks(v []*CatalogEntry) *GetMappingInput { + s.Sinks = v + return s +} + +// SetSource sets the Source field's value. +func (s *GetMappingInput) SetSource(v *CatalogEntry) *GetMappingInput { + s.Source = v + return s +} + +type GetMappingOutput struct { _ struct{} `type:"structure"` - // The status of the specified catalog migration. - ImportStatus *CatalogImportStatus `type:"structure"` + // A list of mappings to the specified targets. + // + // Mapping is a required field + Mapping []*MappingEntry `type:"list" required:"true"` } // String returns the string representation -func (s GetCatalogImportStatusOutput) String() string { +func (s GetMappingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCatalogImportStatusOutput) GoString() string { +func (s GetMappingOutput) GoString() string { return s.String() } -// SetImportStatus sets the ImportStatus field's value. -func (s *GetCatalogImportStatusOutput) SetImportStatus(v *CatalogImportStatus) *GetCatalogImportStatusOutput { - s.ImportStatus = v +// SetMapping sets the Mapping field's value. +func (s *GetMappingOutput) SetMapping(v []*MappingEntry) *GetMappingOutput { + s.Mapping = v return s } -type GetClassifierInput struct { +type GetPartitionInput struct { _ struct{} `type:"structure"` - // Name of the classifier to retrieve. + // The ID of the Data Catalog where the partition in question resides. If none + // is provided, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The name of the catalog database where the partition resides. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The values that define the partition. + // + // PartitionValues is a required field + PartitionValues []*string `type:"list" required:"true"` + + // The name of the partition's table. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetClassifierInput) String() string { +func (s GetPartitionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetClassifierInput) GoString() string { +func (s GetPartitionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetClassifierInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetClassifierInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *GetPartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.PartitionValues == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionValues")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -15918,153 +22285,202 @@ func (s *GetClassifierInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *GetClassifierInput) SetName(v string) *GetClassifierInput { - s.Name = &v +// SetCatalogId sets the CatalogId field's value. +func (s *GetPartitionInput) SetCatalogId(v string) *GetPartitionInput { + s.CatalogId = &v return s } -type GetClassifierOutput struct { - _ struct{} `type:"structure"` - - // The requested classifier. - Classifier *Classifier `type:"structure"` -} - -// String returns the string representation -func (s GetClassifierOutput) String() string { - return awsutil.Prettify(s) +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetPartitionInput) SetDatabaseName(v string) *GetPartitionInput { + s.DatabaseName = &v + return s } -// GoString returns the string representation -func (s GetClassifierOutput) GoString() string { - return s.String() +// SetPartitionValues sets the PartitionValues field's value. +func (s *GetPartitionInput) SetPartitionValues(v []*string) *GetPartitionInput { + s.PartitionValues = v + return s } -// SetClassifier sets the Classifier field's value. -func (s *GetClassifierOutput) SetClassifier(v *Classifier) *GetClassifierOutput { - s.Classifier = v +// SetTableName sets the TableName field's value. +func (s *GetPartitionInput) SetTableName(v string) *GetPartitionInput { + s.TableName = &v return s } -type GetClassifiersInput struct { +type GetPartitionOutput struct { _ struct{} `type:"structure"` - // Size of the list to return (optional). - MaxResults *int64 `min:"1" type:"integer"` - - // An optional continuation token. - NextToken *string `type:"string"` + // The requested information, in the form of a Partition object. + Partition *Partition `type:"structure"` } // String returns the string representation -func (s GetClassifiersInput) String() string { +func (s GetPartitionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetClassifiersInput) GoString() string { +func (s GetPartitionOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetClassifiersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetClassifiersInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetClassifiersInput) SetMaxResults(v int64) *GetClassifiersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetClassifiersInput) SetNextToken(v string) *GetClassifiersInput { - s.NextToken = &v +// SetPartition sets the Partition field's value. +func (s *GetPartitionOutput) SetPartition(v *Partition) *GetPartitionOutput { + s.Partition = v return s } -type GetClassifiersOutput struct { +type GetPartitionsInput struct { _ struct{} `type:"structure"` - // The requested list of classifier objects. - Classifiers []*Classifier `type:"list"` + // The ID of the Data Catalog where the partitions in question reside. If none + // is provided, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The name of the catalog database where the partitions reside. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // An expression that filters the partitions to be returned. + // + // The expression uses SQL syntax similar to the SQL WHERE filter clause. The + // SQL statement parser JSQLParser (http://jsqlparser.sourceforge.net/home.php) + // parses the expression. + // + // Operators: The following are the operators that you can use in the Expression + // API call: + // + // = + // + // Checks whether the values of the two operands are equal; if yes, then the + // condition becomes true. + // + // Example: Assume 'variable a' holds 10 and 'variable b' holds 20. + // + // (a = b) is not true. + // + // < > + // + // Checks whether the values of two operands are equal; if the values are not + // equal, then the condition becomes true. + // + // Example: (a < > b) is true. + // + // > + // + // Checks whether the value of the left operand is greater than the value of + // the right operand; if yes, then the condition becomes true. + // + // Example: (a > b) is not true. + // + // < + // + // Checks whether the value of the left operand is less than the value of the + // right operand; if yes, then the condition becomes true. + // + // Example: (a < b) is true. + // + // >= + // + // Checks whether the value of the left operand is greater than or equal to + // the value of the right operand; if yes, then the condition becomes true. + // + // Example: (a >= b) is not true. + // + // <= + // + // Checks whether the value of the left operand is less than or equal to the + // value of the right operand; if yes, then the condition becomes true. + // + // Example: (a <= b) is true. + // + // AND, OR, IN, BETWEEN, LIKE, NOT, IS NULL + // + // Logical operators. + // + // Supported Partition Key Types: The following are the supported partition + // keys. + // + // * string + // + // * date + // + // * timestamp + // + // * int + // + // * bigint + // + // * long + // + // * tinyint + // + // * smallint + // + // * decimal + // + // If an invalid type is encountered, an exception is thrown. + // + // The following list shows the valid operators on each type. When you define + // a crawler, the partitionKey type is created as a STRING, to be compatible + // with the catalog partitions. + // + // Sample API Call: + Expression *string `type:"string"` + + // The maximum number of partitions to return in a single response. + MaxResults *int64 `min:"1" type:"integer"` - // A continuation token. + // A continuation token, if this is not the first call to retrieve these partitions. NextToken *string `type:"string"` -} - -// String returns the string representation -func (s GetClassifiersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetClassifiersOutput) GoString() string { - return s.String() -} - -// SetClassifiers sets the Classifiers field's value. -func (s *GetClassifiersOutput) SetClassifiers(v []*Classifier) *GetClassifiersOutput { - s.Classifiers = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetClassifiersOutput) SetNextToken(v string) *GetClassifiersOutput { - s.NextToken = &v - return s -} - -type GetConnectionInput struct { - _ struct{} `type:"structure"` - // The ID of the Data Catalog in which the connection resides. If none is provided, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // Allows you to retrieve the connection metadata without returning the password. - // For instance, the AWS Glue console uses this flag to retrieve the connection, - // and does not display the password. Set this parameter when the caller might - // not have permission to use the AWS KMS key to decrypt the password, but does - // have permission to access the rest of the connection properties. - HidePassword *bool `type:"boolean"` + // The segment of the table's partitions to scan in this request. + Segment *Segment `type:"structure"` - // The name of the connection definition to retrieve. + // The name of the partitions' table. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetConnectionInput) String() string { +func (s GetPartitionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetConnectionInput) GoString() string { +func (s GetPartitionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetConnectionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetConnectionInput"} +func (s *GetPartitionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPartitionsInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.Segment != nil { + if err := s.Segment.Validate(); err != nil { + invalidParams.AddNested("Segment", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -16074,124 +22490,141 @@ func (s *GetConnectionInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *GetConnectionInput) SetCatalogId(v string) *GetConnectionInput { +func (s *GetPartitionsInput) SetCatalogId(v string) *GetPartitionsInput { s.CatalogId = &v return s } -// SetHidePassword sets the HidePassword field's value. -func (s *GetConnectionInput) SetHidePassword(v bool) *GetConnectionInput { - s.HidePassword = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetPartitionsInput) SetDatabaseName(v string) *GetPartitionsInput { + s.DatabaseName = &v return s } -// SetName sets the Name field's value. -func (s *GetConnectionInput) SetName(v string) *GetConnectionInput { - s.Name = &v +// SetExpression sets the Expression field's value. +func (s *GetPartitionsInput) SetExpression(v string) *GetPartitionsInput { + s.Expression = &v return s } -type GetConnectionOutput struct { - _ struct{} `type:"structure"` - - // The requested connection definition. - Connection *Connection `type:"structure"` +// SetMaxResults sets the MaxResults field's value. +func (s *GetPartitionsInput) SetMaxResults(v int64) *GetPartitionsInput { + s.MaxResults = &v + return s } -// String returns the string representation -func (s GetConnectionOutput) String() string { - return awsutil.Prettify(s) +// SetNextToken sets the NextToken field's value. +func (s *GetPartitionsInput) SetNextToken(v string) *GetPartitionsInput { + s.NextToken = &v + return s } -// GoString returns the string representation -func (s GetConnectionOutput) GoString() string { - return s.String() +// SetSegment sets the Segment field's value. +func (s *GetPartitionsInput) SetSegment(v *Segment) *GetPartitionsInput { + s.Segment = v + return s } -// SetConnection sets the Connection field's value. -func (s *GetConnectionOutput) SetConnection(v *Connection) *GetConnectionOutput { - s.Connection = v +// SetTableName sets the TableName field's value. +func (s *GetPartitionsInput) SetTableName(v string) *GetPartitionsInput { + s.TableName = &v return s } -// Filters the connection definitions that are returned by the GetConnections -// API operation. -type GetConnectionsFilter struct { +type GetPartitionsOutput struct { _ struct{} `type:"structure"` - // The type of connections to return. Currently, only JDBC is supported; SFTP - // is not supported. - ConnectionType *string `type:"string" enum:"ConnectionType"` + // A continuation token, if the returned list of partitions does not include + // the last one. + NextToken *string `type:"string"` - // A criteria string that must match the criteria recorded in the connection - // definition for that connection definition to be returned. - MatchCriteria []*string `type:"list"` + // A list of requested partitions. + Partitions []*Partition `type:"list"` } // String returns the string representation -func (s GetConnectionsFilter) String() string { +func (s GetPartitionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetConnectionsFilter) GoString() string { +func (s GetPartitionsOutput) GoString() string { return s.String() } -// SetConnectionType sets the ConnectionType field's value. -func (s *GetConnectionsFilter) SetConnectionType(v string) *GetConnectionsFilter { - s.ConnectionType = &v +// SetNextToken sets the NextToken field's value. +func (s *GetPartitionsOutput) SetNextToken(v string) *GetPartitionsOutput { + s.NextToken = &v return s } -// SetMatchCriteria sets the MatchCriteria field's value. -func (s *GetConnectionsFilter) SetMatchCriteria(v []*string) *GetConnectionsFilter { - s.MatchCriteria = v +// SetPartitions sets the Partitions field's value. +func (s *GetPartitionsOutput) SetPartitions(v []*Partition) *GetPartitionsOutput { + s.Partitions = v return s } -type GetConnectionsInput struct { +type GetPlanInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog in which the connections reside. If none is provided, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` + // The programming language of the code to perform the mapping. + Language *string `type:"string" enum:"Language"` - // A filter that controls which connections will be returned. - Filter *GetConnectionsFilter `type:"structure"` + // The parameters for the mapping. + Location *Location `type:"structure"` - // Allows you to retrieve the connection metadata without returning the password. - // For instance, the AWS Glue console uses this flag to retrieve the connection, - // and does not display the password. Set this parameter when the caller might - // not have permission to use the AWS KMS key to decrypt the password, but does - // have permission to access the rest of the connection properties. - HidePassword *bool `type:"boolean"` + // The list of mappings from a source table to target tables. + // + // Mapping is a required field + Mapping []*MappingEntry `type:"list" required:"true"` - // The maximum number of connections to return in one response. - MaxResults *int64 `min:"1" type:"integer"` + // The target tables. + Sinks []*CatalogEntry `type:"list"` - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` + // The source table. + // + // Source is a required field + Source *CatalogEntry `type:"structure" required:"true"` } // String returns the string representation -func (s GetConnectionsInput) String() string { +func (s GetPlanInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetConnectionsInput) GoString() string { +func (s GetPlanInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetConnectionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetConnectionsInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) +func (s *GetPlanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPlanInput"} + if s.Mapping == nil { + invalidParams.Add(request.NewErrParamRequired("Mapping")) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Location != nil { + if err := s.Location.Validate(); err != nil { + invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) + } + } + if s.Sinks != nil { + for i, v := range s.Sinks { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sinks", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -16200,243 +22633,219 @@ func (s *GetConnectionsInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *GetConnectionsInput) SetCatalogId(v string) *GetConnectionsInput { - s.CatalogId = &v +// SetLanguage sets the Language field's value. +func (s *GetPlanInput) SetLanguage(v string) *GetPlanInput { + s.Language = &v return s } -// SetFilter sets the Filter field's value. -func (s *GetConnectionsInput) SetFilter(v *GetConnectionsFilter) *GetConnectionsInput { - s.Filter = v +// SetLocation sets the Location field's value. +func (s *GetPlanInput) SetLocation(v *Location) *GetPlanInput { + s.Location = v return s } -// SetHidePassword sets the HidePassword field's value. -func (s *GetConnectionsInput) SetHidePassword(v bool) *GetConnectionsInput { - s.HidePassword = &v +// SetMapping sets the Mapping field's value. +func (s *GetPlanInput) SetMapping(v []*MappingEntry) *GetPlanInput { + s.Mapping = v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *GetConnectionsInput) SetMaxResults(v int64) *GetConnectionsInput { - s.MaxResults = &v +// SetSinks sets the Sinks field's value. +func (s *GetPlanInput) SetSinks(v []*CatalogEntry) *GetPlanInput { + s.Sinks = v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetConnectionsInput) SetNextToken(v string) *GetConnectionsInput { - s.NextToken = &v +// SetSource sets the Source field's value. +func (s *GetPlanInput) SetSource(v *CatalogEntry) *GetPlanInput { + s.Source = v return s } -type GetConnectionsOutput struct { +type GetPlanOutput struct { _ struct{} `type:"structure"` - // A list of requested connection definitions. - ConnectionList []*Connection `type:"list"` + // A Python script to perform the mapping. + PythonScript *string `type:"string"` - // A continuation token, if the list of connections returned does not include - // the last of the filtered connections. - NextToken *string `type:"string"` + // The Scala code to perform the mapping. + ScalaCode *string `type:"string"` } // String returns the string representation -func (s GetConnectionsOutput) String() string { +func (s GetPlanOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetConnectionsOutput) GoString() string { +func (s GetPlanOutput) GoString() string { return s.String() } -// SetConnectionList sets the ConnectionList field's value. -func (s *GetConnectionsOutput) SetConnectionList(v []*Connection) *GetConnectionsOutput { - s.ConnectionList = v +// SetPythonScript sets the PythonScript field's value. +func (s *GetPlanOutput) SetPythonScript(v string) *GetPlanOutput { + s.PythonScript = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetConnectionsOutput) SetNextToken(v string) *GetConnectionsOutput { - s.NextToken = &v +// SetScalaCode sets the ScalaCode field's value. +func (s *GetPlanOutput) SetScalaCode(v string) *GetPlanOutput { + s.ScalaCode = &v return s } -type GetCrawlerInput struct { +type GetResourcePolicyInput struct { _ struct{} `type:"structure"` - - // Name of the crawler to retrieve metadata for. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetCrawlerInput) String() string { +func (s GetResourcePolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCrawlerInput) GoString() string { +func (s GetResourcePolicyInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCrawlerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetCrawlerInput) SetName(v string) *GetCrawlerInput { - s.Name = &v - return s -} - -type GetCrawlerMetricsInput struct { +type GetResourcePolicyOutput struct { _ struct{} `type:"structure"` - // A list of the names of crawlers about which to retrieve metrics. - CrawlerNameList []*string `type:"list"` + // The date and time at which the policy was created. + CreateTime *time.Time `type:"timestamp"` - // The maximum size of a list to return. - MaxResults *int64 `min:"1" type:"integer"` + // Contains the hash value associated with this policy. + PolicyHash *string `min:"1" type:"string"` - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` + // Contains the requested policy document, in JSON format. + PolicyInJson *string `min:"2" type:"string"` + + // The date and time at which the policy was last updated. + UpdateTime *time.Time `type:"timestamp"` } // String returns the string representation -func (s GetCrawlerMetricsInput) String() string { +func (s GetResourcePolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCrawlerMetricsInput) GoString() string { +func (s GetResourcePolicyOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCrawlerMetricsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCrawlerMetricsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreateTime sets the CreateTime field's value. +func (s *GetResourcePolicyOutput) SetCreateTime(v time.Time) *GetResourcePolicyOutput { + s.CreateTime = &v + return s } -// SetCrawlerNameList sets the CrawlerNameList field's value. -func (s *GetCrawlerMetricsInput) SetCrawlerNameList(v []*string) *GetCrawlerMetricsInput { - s.CrawlerNameList = v +// SetPolicyHash sets the PolicyHash field's value. +func (s *GetResourcePolicyOutput) SetPolicyHash(v string) *GetResourcePolicyOutput { + s.PolicyHash = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *GetCrawlerMetricsInput) SetMaxResults(v int64) *GetCrawlerMetricsInput { - s.MaxResults = &v +// SetPolicyInJson sets the PolicyInJson field's value. +func (s *GetResourcePolicyOutput) SetPolicyInJson(v string) *GetResourcePolicyOutput { + s.PolicyInJson = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetCrawlerMetricsInput) SetNextToken(v string) *GetCrawlerMetricsInput { - s.NextToken = &v +// SetUpdateTime sets the UpdateTime field's value. +func (s *GetResourcePolicyOutput) SetUpdateTime(v time.Time) *GetResourcePolicyOutput { + s.UpdateTime = &v return s } -type GetCrawlerMetricsOutput struct { +type GetSecurityConfigurationInput struct { _ struct{} `type:"structure"` - // A list of metrics for the specified crawler. - CrawlerMetricsList []*CrawlerMetrics `type:"list"` - - // A continuation token, if the returned list does not contain the last metric - // available. - NextToken *string `type:"string"` + // The name of the security configuration to retrieve. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetCrawlerMetricsOutput) String() string { +func (s GetSecurityConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCrawlerMetricsOutput) GoString() string { +func (s GetSecurityConfigurationInput) GoString() string { return s.String() } -// SetCrawlerMetricsList sets the CrawlerMetricsList field's value. -func (s *GetCrawlerMetricsOutput) SetCrawlerMetricsList(v []*CrawlerMetrics) *GetCrawlerMetricsOutput { - s.CrawlerMetricsList = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSecurityConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSecurityConfigurationInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextToken sets the NextToken field's value. -func (s *GetCrawlerMetricsOutput) SetNextToken(v string) *GetCrawlerMetricsOutput { - s.NextToken = &v +// SetName sets the Name field's value. +func (s *GetSecurityConfigurationInput) SetName(v string) *GetSecurityConfigurationInput { + s.Name = &v return s } -type GetCrawlerOutput struct { +type GetSecurityConfigurationOutput struct { _ struct{} `type:"structure"` - // The metadata for the specified crawler. - Crawler *Crawler `type:"structure"` + // The requested security configuration. + SecurityConfiguration *SecurityConfiguration `type:"structure"` } // String returns the string representation -func (s GetCrawlerOutput) String() string { +func (s GetSecurityConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCrawlerOutput) GoString() string { +func (s GetSecurityConfigurationOutput) GoString() string { return s.String() } -// SetCrawler sets the Crawler field's value. -func (s *GetCrawlerOutput) SetCrawler(v *Crawler) *GetCrawlerOutput { - s.Crawler = v +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *GetSecurityConfigurationOutput) SetSecurityConfiguration(v *SecurityConfiguration) *GetSecurityConfigurationOutput { + s.SecurityConfiguration = v return s } -type GetCrawlersInput struct { +type GetSecurityConfigurationsInput struct { _ struct{} `type:"structure"` - // The number of crawlers to return on each call. + // The maximum number of results to return. MaxResults *int64 `min:"1" type:"integer"` - // A continuation token, if this is a continuation request. + // A continuation token, if this is a continuation call. NextToken *string `type:"string"` } // String returns the string representation -func (s GetCrawlersInput) String() string { +func (s GetSecurityConfigurationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCrawlersInput) GoString() string { +func (s GetSecurityConfigurationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCrawlersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCrawlersInput"} +func (s *GetSecurityConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSecurityConfigurationsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } @@ -16448,74 +22857,97 @@ func (s *GetCrawlersInput) Validate() error { } // SetMaxResults sets the MaxResults field's value. -func (s *GetCrawlersInput) SetMaxResults(v int64) *GetCrawlersInput { +func (s *GetSecurityConfigurationsInput) SetMaxResults(v int64) *GetSecurityConfigurationsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetCrawlersInput) SetNextToken(v string) *GetCrawlersInput { +func (s *GetSecurityConfigurationsInput) SetNextToken(v string) *GetSecurityConfigurationsInput { s.NextToken = &v return s } -type GetCrawlersOutput struct { +type GetSecurityConfigurationsOutput struct { _ struct{} `type:"structure"` - // A list of crawler metadata. - Crawlers []*Crawler `type:"list"` - - // A continuation token, if the returned list has not reached the end of those - // defined in this customer account. + // A continuation token, if there are more security configurations to return. NextToken *string `type:"string"` + + // A list of security configurations. + SecurityConfigurations []*SecurityConfiguration `type:"list"` } // String returns the string representation -func (s GetCrawlersOutput) String() string { +func (s GetSecurityConfigurationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCrawlersOutput) GoString() string { +func (s GetSecurityConfigurationsOutput) GoString() string { return s.String() } -// SetCrawlers sets the Crawlers field's value. -func (s *GetCrawlersOutput) SetCrawlers(v []*Crawler) *GetCrawlersOutput { - s.Crawlers = v +// SetNextToken sets the NextToken field's value. +func (s *GetSecurityConfigurationsOutput) SetNextToken(v string) *GetSecurityConfigurationsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetCrawlersOutput) SetNextToken(v string) *GetCrawlersOutput { - s.NextToken = &v +// SetSecurityConfigurations sets the SecurityConfigurations field's value. +func (s *GetSecurityConfigurationsOutput) SetSecurityConfigurations(v []*SecurityConfiguration) *GetSecurityConfigurationsOutput { + s.SecurityConfigurations = v return s } -type GetDataCatalogEncryptionSettingsInput struct { +type GetTableInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog for which to retrieve the security configuration. - // If none is provided, the AWS account ID is used by default. + // The ID of the Data Catalog where the table resides. If none is provided, + // the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` + + // The name of the database in the catalog in which the table resides. For Hive + // compatibility, this name is entirely lowercase. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the table for which to retrieve the definition. For Hive compatibility, + // this name is entirely lowercase. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetDataCatalogEncryptionSettingsInput) String() string { +func (s GetTableInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDataCatalogEncryptionSettingsInput) GoString() string { +func (s GetTableInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetDataCatalogEncryptionSettingsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDataCatalogEncryptionSettingsInput"} +func (s *GetTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTableInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -16524,69 +22956,99 @@ func (s *GetDataCatalogEncryptionSettingsInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *GetDataCatalogEncryptionSettingsInput) SetCatalogId(v string) *GetDataCatalogEncryptionSettingsInput { +func (s *GetTableInput) SetCatalogId(v string) *GetTableInput { s.CatalogId = &v return s } -type GetDataCatalogEncryptionSettingsOutput struct { +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetTableInput) SetDatabaseName(v string) *GetTableInput { + s.DatabaseName = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetTableInput) SetName(v string) *GetTableInput { + s.Name = &v + return s +} + +type GetTableOutput struct { _ struct{} `type:"structure"` - // The requested security configuration. - DataCatalogEncryptionSettings *DataCatalogEncryptionSettings `type:"structure"` + // The Table object that defines the specified table. + Table *TableData `type:"structure"` } // String returns the string representation -func (s GetDataCatalogEncryptionSettingsOutput) String() string { +func (s GetTableOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDataCatalogEncryptionSettingsOutput) GoString() string { +func (s GetTableOutput) GoString() string { return s.String() } -// SetDataCatalogEncryptionSettings sets the DataCatalogEncryptionSettings field's value. -func (s *GetDataCatalogEncryptionSettingsOutput) SetDataCatalogEncryptionSettings(v *DataCatalogEncryptionSettings) *GetDataCatalogEncryptionSettingsOutput { - s.DataCatalogEncryptionSettings = v +// SetTable sets the Table field's value. +func (s *GetTableOutput) SetTable(v *TableData) *GetTableOutput { + s.Table = v return s } -type GetDatabaseInput struct { +type GetTableVersionInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog in which the database resides. If none is supplied, + // The ID of the Data Catalog where the tables reside. If none is provided, // the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` - // The name of the database to retrieve. For Hive compatibility, this should - // be all lowercase. + // The database in the catalog in which the table resides. For Hive compatibility, + // this name is entirely lowercase. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the table. For Hive compatibility, this name is entirely lowercase. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` + + // The ID value of the table version to be retrieved. A VersionID is a string + // representation of an integer. Each version is incremented by 1. + VersionId *string `min:"1" type:"string"` } // String returns the string representation -func (s GetDatabaseInput) String() string { +func (s GetTableVersionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDatabaseInput) GoString() string { +func (s GetTableVersionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDatabaseInput"} +func (s *GetTableVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTableVersionInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionId", 1)) } if invalidParams.Len() > 0 { @@ -16596,73 +23058,108 @@ func (s *GetDatabaseInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *GetDatabaseInput) SetCatalogId(v string) *GetDatabaseInput { +func (s *GetTableVersionInput) SetCatalogId(v string) *GetTableVersionInput { s.CatalogId = &v return s } -// SetName sets the Name field's value. -func (s *GetDatabaseInput) SetName(v string) *GetDatabaseInput { - s.Name = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetTableVersionInput) SetDatabaseName(v string) *GetTableVersionInput { + s.DatabaseName = &v return s } -type GetDatabaseOutput struct { +// SetTableName sets the TableName field's value. +func (s *GetTableVersionInput) SetTableName(v string) *GetTableVersionInput { + s.TableName = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetTableVersionInput) SetVersionId(v string) *GetTableVersionInput { + s.VersionId = &v + return s +} + +type GetTableVersionOutput struct { _ struct{} `type:"structure"` - // The definition of the specified database in the catalog. - Database *Database `type:"structure"` + // The requested table version. + TableVersion *TableVersion `type:"structure"` } // String returns the string representation -func (s GetDatabaseOutput) String() string { +func (s GetTableVersionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDatabaseOutput) GoString() string { +func (s GetTableVersionOutput) GoString() string { return s.String() } -// SetDatabase sets the Database field's value. -func (s *GetDatabaseOutput) SetDatabase(v *Database) *GetDatabaseOutput { - s.Database = v +// SetTableVersion sets the TableVersion field's value. +func (s *GetTableVersionOutput) SetTableVersion(v *TableVersion) *GetTableVersionOutput { + s.TableVersion = v return s } -type GetDatabasesInput struct { +type GetTableVersionsInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog from which to retrieve Databases. If none is supplied, + // The ID of the Data Catalog where the tables reside. If none is provided, // the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` - // The maximum number of databases to return in one response. + // The database in the catalog in which the table resides. For Hive compatibility, + // this name is entirely lowercase. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The maximum number of table versions to return in one response. MaxResults *int64 `min:"1" type:"integer"` - // A continuation token, if this is a continuation call. + // A continuation token, if this is not the first call. NextToken *string `type:"string"` + + // The name of the table. For Hive compatibility, this name is entirely lowercase. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetDatabasesInput) String() string { +func (s GetTableVersionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDatabasesInput) GoString() string { +func (s GetTableVersionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetDatabasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDatabasesInput"} +func (s *GetTableVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTableVersionsInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -16671,137 +23168,213 @@ func (s *GetDatabasesInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *GetDatabasesInput) SetCatalogId(v string) *GetDatabasesInput { +func (s *GetTableVersionsInput) SetCatalogId(v string) *GetTableVersionsInput { s.CatalogId = &v return s } +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetTableVersionsInput) SetDatabaseName(v string) *GetTableVersionsInput { + s.DatabaseName = &v + return s +} + // SetMaxResults sets the MaxResults field's value. -func (s *GetDatabasesInput) SetMaxResults(v int64) *GetDatabasesInput { +func (s *GetTableVersionsInput) SetMaxResults(v int64) *GetTableVersionsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetDatabasesInput) SetNextToken(v string) *GetDatabasesInput { +func (s *GetTableVersionsInput) SetNextToken(v string) *GetTableVersionsInput { s.NextToken = &v return s } -type GetDatabasesOutput struct { +// SetTableName sets the TableName field's value. +func (s *GetTableVersionsInput) SetTableName(v string) *GetTableVersionsInput { + s.TableName = &v + return s +} + +type GetTableVersionsOutput struct { _ struct{} `type:"structure"` - // A list of Database objects from the specified catalog. + // A continuation token, if the list of available versions does not include + // the last one. + NextToken *string `type:"string"` + + // A list of strings identifying available versions of the specified table. + TableVersions []*TableVersion `type:"list"` +} + +// String returns the string representation +func (s GetTableVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTableVersionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTableVersionsOutput) SetNextToken(v string) *GetTableVersionsOutput { + s.NextToken = &v + return s +} + +// SetTableVersions sets the TableVersions field's value. +func (s *GetTableVersionsOutput) SetTableVersions(v []*TableVersion) *GetTableVersionsOutput { + s.TableVersions = v + return s +} + +type GetTablesInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the tables reside. If none is provided, + // the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The database in the catalog whose tables to list. For Hive compatibility, + // this name is entirely lowercase. // - // DatabaseList is a required field - DatabaseList []*Database `type:"list" required:"true"` + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` - // A continuation token for paginating the returned list of tokens, returned - // if the current segment of the list is not the last. + // A regular expression pattern. If present, only those tables whose names match + // the pattern are returned. + Expression *string `type:"string"` + + // The maximum number of tables to return in a single response. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, included if this is a continuation call. NextToken *string `type:"string"` } // String returns the string representation -func (s GetDatabasesOutput) String() string { +func (s GetTablesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDatabasesOutput) GoString() string { +func (s GetTablesInput) GoString() string { return s.String() } -// SetDatabaseList sets the DatabaseList field's value. -func (s *GetDatabasesOutput) SetDatabaseList(v []*Database) *GetDatabasesOutput { - s.DatabaseList = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTablesInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *GetTablesInput) SetCatalogId(v string) *GetTablesInput { + s.CatalogId = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetDatabasesOutput) SetNextToken(v string) *GetDatabasesOutput { - s.NextToken = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetTablesInput) SetDatabaseName(v string) *GetTablesInput { + s.DatabaseName = &v return s } -type GetDataflowGraphInput struct { - _ struct{} `type:"structure"` - - // The Python script to transform. - PythonScript *string `type:"string"` -} - -// String returns the string representation -func (s GetDataflowGraphInput) String() string { - return awsutil.Prettify(s) +// SetExpression sets the Expression field's value. +func (s *GetTablesInput) SetExpression(v string) *GetTablesInput { + s.Expression = &v + return s } -// GoString returns the string representation -func (s GetDataflowGraphInput) GoString() string { - return s.String() +// SetMaxResults sets the MaxResults field's value. +func (s *GetTablesInput) SetMaxResults(v int64) *GetTablesInput { + s.MaxResults = &v + return s } -// SetPythonScript sets the PythonScript field's value. -func (s *GetDataflowGraphInput) SetPythonScript(v string) *GetDataflowGraphInput { - s.PythonScript = &v +// SetNextToken sets the NextToken field's value. +func (s *GetTablesInput) SetNextToken(v string) *GetTablesInput { + s.NextToken = &v return s } -type GetDataflowGraphOutput struct { +type GetTablesOutput struct { _ struct{} `type:"structure"` - // A list of the edges in the resulting DAG. - DagEdges []*CodeGenEdge `type:"list"` + // A continuation token, present if the current list segment is not the last. + NextToken *string `type:"string"` - // A list of the nodes in the resulting DAG. - DagNodes []*CodeGenNode `type:"list"` + // A list of the requested Table objects. + TableList []*TableData `type:"list"` } // String returns the string representation -func (s GetDataflowGraphOutput) String() string { +func (s GetTablesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDataflowGraphOutput) GoString() string { +func (s GetTablesOutput) GoString() string { return s.String() } -// SetDagEdges sets the DagEdges field's value. -func (s *GetDataflowGraphOutput) SetDagEdges(v []*CodeGenEdge) *GetDataflowGraphOutput { - s.DagEdges = v +// SetNextToken sets the NextToken field's value. +func (s *GetTablesOutput) SetNextToken(v string) *GetTablesOutput { + s.NextToken = &v return s } -// SetDagNodes sets the DagNodes field's value. -func (s *GetDataflowGraphOutput) SetDagNodes(v []*CodeGenNode) *GetDataflowGraphOutput { - s.DagNodes = v +// SetTableList sets the TableList field's value. +func (s *GetTablesOutput) SetTableList(v []*TableData) *GetTablesOutput { + s.TableList = v return s } -type GetDevEndpointInput struct { +type GetTagsInput struct { _ struct{} `type:"structure"` - // Name of the DevEndpoint for which to retrieve information. + // The Amazon Resource Name (ARN) of the resource for which to retrieve tags. // - // EndpointName is a required field - EndpointName *string `type:"string" required:"true"` + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetDevEndpointInput) String() string { +func (s GetTagsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDevEndpointInput) GoString() string { +func (s GetTagsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetDevEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDevEndpointInput"} - if s.EndpointName == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointName")) +func (s *GetTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTagsInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) } if invalidParams.Len() > 0 { @@ -16810,60 +23383,62 @@ func (s *GetDevEndpointInput) Validate() error { return nil } -// SetEndpointName sets the EndpointName field's value. -func (s *GetDevEndpointInput) SetEndpointName(v string) *GetDevEndpointInput { - s.EndpointName = &v +// SetResourceArn sets the ResourceArn field's value. +func (s *GetTagsInput) SetResourceArn(v string) *GetTagsInput { + s.ResourceArn = &v return s } -type GetDevEndpointOutput struct { +type GetTagsOutput struct { _ struct{} `type:"structure"` - // A DevEndpoint definition. - DevEndpoint *DevEndpoint `type:"structure"` + // The requested tags. + Tags map[string]*string `type:"map"` } // String returns the string representation -func (s GetDevEndpointOutput) String() string { +func (s GetTagsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDevEndpointOutput) GoString() string { +func (s GetTagsOutput) GoString() string { return s.String() } -// SetDevEndpoint sets the DevEndpoint field's value. -func (s *GetDevEndpointOutput) SetDevEndpoint(v *DevEndpoint) *GetDevEndpointOutput { - s.DevEndpoint = v +// SetTags sets the Tags field's value. +func (s *GetTagsOutput) SetTags(v map[string]*string) *GetTagsOutput { + s.Tags = v return s } -type GetDevEndpointsInput struct { +type GetTriggerInput struct { _ struct{} `type:"structure"` - // The maximum size of information to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` + // The name of the trigger to retrieve. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetDevEndpointsInput) String() string { +func (s GetTriggerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDevEndpointsInput) GoString() string { +func (s GetTriggerInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetDevEndpointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDevEndpointsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *GetTriggerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTriggerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -16872,77 +23447,67 @@ func (s *GetDevEndpointsInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *GetDevEndpointsInput) SetMaxResults(v int64) *GetDevEndpointsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetDevEndpointsInput) SetNextToken(v string) *GetDevEndpointsInput { - s.NextToken = &v +// SetName sets the Name field's value. +func (s *GetTriggerInput) SetName(v string) *GetTriggerInput { + s.Name = &v return s } -type GetDevEndpointsOutput struct { +type GetTriggerOutput struct { _ struct{} `type:"structure"` - // A list of DevEndpoint definitions. - DevEndpoints []*DevEndpoint `type:"list"` - - // A continuation token, if not all DevEndpoint definitions have yet been returned. - NextToken *string `type:"string"` + // The requested trigger definition. + Trigger *Trigger `type:"structure"` } // String returns the string representation -func (s GetDevEndpointsOutput) String() string { +func (s GetTriggerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDevEndpointsOutput) GoString() string { +func (s GetTriggerOutput) GoString() string { return s.String() } -// SetDevEndpoints sets the DevEndpoints field's value. -func (s *GetDevEndpointsOutput) SetDevEndpoints(v []*DevEndpoint) *GetDevEndpointsOutput { - s.DevEndpoints = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetDevEndpointsOutput) SetNextToken(v string) *GetDevEndpointsOutput { - s.NextToken = &v +// SetTrigger sets the Trigger field's value. +func (s *GetTriggerOutput) SetTrigger(v *Trigger) *GetTriggerOutput { + s.Trigger = v return s } -type GetJobInput struct { +type GetTriggersInput struct { _ struct{} `type:"structure"` - // The name of the job definition to retrieve. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` + // The name of the job to retrieve triggers for. The trigger that can start + // this job is returned, and if there is no such trigger, all triggers are returned. + DependentJobName *string `min:"1" type:"string"` + + // The maximum size of the response. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is a continuation call. + NextToken *string `type:"string"` } // String returns the string representation -func (s GetJobInput) String() string { +func (s GetTriggersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetJobInput) GoString() string { +func (s GetTriggersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) +func (s *GetTriggersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTriggersInput"} + if s.DependentJobName != nil && len(*s.DependentJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DependentJobName", 1)) } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -16951,76 +23516,101 @@ func (s *GetJobInput) Validate() error { return nil } -// SetJobName sets the JobName field's value. -func (s *GetJobInput) SetJobName(v string) *GetJobInput { - s.JobName = &v +// SetDependentJobName sets the DependentJobName field's value. +func (s *GetTriggersInput) SetDependentJobName(v string) *GetTriggersInput { + s.DependentJobName = &v return s } -type GetJobOutput struct { +// SetMaxResults sets the MaxResults field's value. +func (s *GetTriggersInput) SetMaxResults(v int64) *GetTriggersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTriggersInput) SetNextToken(v string) *GetTriggersInput { + s.NextToken = &v + return s +} + +type GetTriggersOutput struct { _ struct{} `type:"structure"` - // The requested job definition. - Job *Job `type:"structure"` + // A continuation token, if not all the requested triggers have yet been returned. + NextToken *string `type:"string"` + + // A list of triggers for the specified job. + Triggers []*Trigger `type:"list"` } // String returns the string representation -func (s GetJobOutput) String() string { +func (s GetTriggersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetJobOutput) GoString() string { +func (s GetTriggersOutput) GoString() string { return s.String() } -// SetJob sets the Job field's value. -func (s *GetJobOutput) SetJob(v *Job) *GetJobOutput { - s.Job = v +// SetNextToken sets the NextToken field's value. +func (s *GetTriggersOutput) SetNextToken(v string) *GetTriggersOutput { + s.NextToken = &v return s } -type GetJobRunInput struct { +// SetTriggers sets the Triggers field's value. +func (s *GetTriggersOutput) SetTriggers(v []*Trigger) *GetTriggersOutput { + s.Triggers = v + return s +} + +type GetUserDefinedFunctionInput struct { _ struct{} `type:"structure"` - // Name of the job definition being run. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` + // The ID of the Data Catalog where the function to be retrieved is located. + // If none is provided, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` - // True if a list of predecessor runs should be returned. - PredecessorsIncluded *bool `type:"boolean"` + // The name of the catalog database where the function is located. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` - // The ID of the job run. + // The name of the function. // - // RunId is a required field - RunId *string `min:"1" type:"string" required:"true"` + // FunctionName is a required field + FunctionName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetJobRunInput) String() string { +func (s GetUserDefinedFunctionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetJobRunInput) GoString() string { +func (s GetUserDefinedFunctionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobRunInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobRunInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) +func (s *GetUserDefinedFunctionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUserDefinedFunctionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } - if s.RunId == nil { - invalidParams.Add(request.NewErrParamRequired("RunId")) + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } - if s.RunId != nil && len(*s.RunId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) } if invalidParams.Len() > 0 { @@ -17029,84 +23619,103 @@ func (s *GetJobRunInput) Validate() error { return nil } -// SetJobName sets the JobName field's value. -func (s *GetJobRunInput) SetJobName(v string) *GetJobRunInput { - s.JobName = &v +// SetCatalogId sets the CatalogId field's value. +func (s *GetUserDefinedFunctionInput) SetCatalogId(v string) *GetUserDefinedFunctionInput { + s.CatalogId = &v return s } -// SetPredecessorsIncluded sets the PredecessorsIncluded field's value. -func (s *GetJobRunInput) SetPredecessorsIncluded(v bool) *GetJobRunInput { - s.PredecessorsIncluded = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetUserDefinedFunctionInput) SetDatabaseName(v string) *GetUserDefinedFunctionInput { + s.DatabaseName = &v return s } -// SetRunId sets the RunId field's value. -func (s *GetJobRunInput) SetRunId(v string) *GetJobRunInput { - s.RunId = &v +// SetFunctionName sets the FunctionName field's value. +func (s *GetUserDefinedFunctionInput) SetFunctionName(v string) *GetUserDefinedFunctionInput { + s.FunctionName = &v return s } -type GetJobRunOutput struct { +type GetUserDefinedFunctionOutput struct { _ struct{} `type:"structure"` - // The requested job-run metadata. - JobRun *JobRun `type:"structure"` + // The requested function definition. + UserDefinedFunction *UserDefinedFunction `type:"structure"` } // String returns the string representation -func (s GetJobRunOutput) String() string { +func (s GetUserDefinedFunctionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetJobRunOutput) GoString() string { +func (s GetUserDefinedFunctionOutput) GoString() string { return s.String() } -// SetJobRun sets the JobRun field's value. -func (s *GetJobRunOutput) SetJobRun(v *JobRun) *GetJobRunOutput { - s.JobRun = v +// SetUserDefinedFunction sets the UserDefinedFunction field's value. +func (s *GetUserDefinedFunctionOutput) SetUserDefinedFunction(v *UserDefinedFunction) *GetUserDefinedFunctionOutput { + s.UserDefinedFunction = v return s } -type GetJobRunsInput struct { +type GetUserDefinedFunctionsInput struct { _ struct{} `type:"structure"` - // The name of the job definition for which to retrieve all job runs. + // The ID of the Data Catalog where the functions to be retrieved are located. + // If none is provided, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The name of the catalog database where the functions are located. // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` - // The maximum size of the response. + // The maximum number of functions to return in one response. MaxResults *int64 `min:"1" type:"integer"` // A continuation token, if this is a continuation call. NextToken *string `type:"string"` + + // An optional function-name pattern string that filters the function definitions + // returned. + // + // Pattern is a required field + Pattern *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetJobRunsInput) String() string { +func (s GetUserDefinedFunctionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetJobRunsInput) GoString() string { +func (s GetUserDefinedFunctionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobRunsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobRunsInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) +func (s *GetUserDefinedFunctionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUserDefinedFunctionsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } + if s.Pattern == nil { + invalidParams.Add(request.NewErrParamRequired("Pattern")) + } + if s.Pattern != nil && len(*s.Pattern) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Pattern", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -17114,81 +23723,100 @@ func (s *GetJobRunsInput) Validate() error { return nil } -// SetJobName sets the JobName field's value. -func (s *GetJobRunsInput) SetJobName(v string) *GetJobRunsInput { - s.JobName = &v +// SetCatalogId sets the CatalogId field's value. +func (s *GetUserDefinedFunctionsInput) SetCatalogId(v string) *GetUserDefinedFunctionsInput { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetUserDefinedFunctionsInput) SetDatabaseName(v string) *GetUserDefinedFunctionsInput { + s.DatabaseName = &v return s } // SetMaxResults sets the MaxResults field's value. -func (s *GetJobRunsInput) SetMaxResults(v int64) *GetJobRunsInput { +func (s *GetUserDefinedFunctionsInput) SetMaxResults(v int64) *GetUserDefinedFunctionsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetJobRunsInput) SetNextToken(v string) *GetJobRunsInput { +func (s *GetUserDefinedFunctionsInput) SetNextToken(v string) *GetUserDefinedFunctionsInput { s.NextToken = &v return s } -type GetJobRunsOutput struct { - _ struct{} `type:"structure"` +// SetPattern sets the Pattern field's value. +func (s *GetUserDefinedFunctionsInput) SetPattern(v string) *GetUserDefinedFunctionsInput { + s.Pattern = &v + return s +} - // A list of job-run metatdata objects. - JobRuns []*JobRun `type:"list"` +type GetUserDefinedFunctionsOutput struct { + _ struct{} `type:"structure"` - // A continuation token, if not all reequested job runs have been returned. + // A continuation token, if the list of functions returned does not include + // the last requested function. NextToken *string `type:"string"` + + // A list of requested function definitions. + UserDefinedFunctions []*UserDefinedFunction `type:"list"` } // String returns the string representation -func (s GetJobRunsOutput) String() string { +func (s GetUserDefinedFunctionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetJobRunsOutput) GoString() string { +func (s GetUserDefinedFunctionsOutput) GoString() string { return s.String() } -// SetJobRuns sets the JobRuns field's value. -func (s *GetJobRunsOutput) SetJobRuns(v []*JobRun) *GetJobRunsOutput { - s.JobRuns = v +// SetNextToken sets the NextToken field's value. +func (s *GetUserDefinedFunctionsOutput) SetNextToken(v string) *GetUserDefinedFunctionsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetJobRunsOutput) SetNextToken(v string) *GetJobRunsOutput { - s.NextToken = &v +// SetUserDefinedFunctions sets the UserDefinedFunctions field's value. +func (s *GetUserDefinedFunctionsOutput) SetUserDefinedFunctions(v []*UserDefinedFunction) *GetUserDefinedFunctionsOutput { + s.UserDefinedFunctions = v return s } -type GetJobsInput struct { +type GetWorkflowInput struct { _ struct{} `type:"structure"` - // The maximum size of the response. - MaxResults *int64 `min:"1" type:"integer"` + // Specifies whether to include a graph when returning the workflow resource + // metadata. + IncludeGraph *bool `type:"boolean"` - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` + // The name of the workflow to retrieve. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetJobsInput) String() string { +func (s GetWorkflowInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetJobsInput) GoString() string { +func (s GetWorkflowInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *GetWorkflowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWorkflowInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -17197,100 +23825,169 @@ func (s *GetJobsInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *GetJobsInput) SetMaxResults(v int64) *GetJobsInput { - s.MaxResults = &v +// SetIncludeGraph sets the IncludeGraph field's value. +func (s *GetWorkflowInput) SetIncludeGraph(v bool) *GetWorkflowInput { + s.IncludeGraph = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetJobsInput) SetNextToken(v string) *GetJobsInput { - s.NextToken = &v +// SetName sets the Name field's value. +func (s *GetWorkflowInput) SetName(v string) *GetWorkflowInput { + s.Name = &v return s } -type GetJobsOutput struct { +type GetWorkflowOutput struct { _ struct{} `type:"structure"` - // A list of job definitions. - Jobs []*Job `type:"list"` + // The resource metadata for the workflow. + Workflow *Workflow `type:"structure"` +} - // A continuation token, if not all job definitions have yet been returned. - NextToken *string `type:"string"` +// String returns the string representation +func (s GetWorkflowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWorkflowOutput) GoString() string { + return s.String() +} + +// SetWorkflow sets the Workflow field's value. +func (s *GetWorkflowOutput) SetWorkflow(v *Workflow) *GetWorkflowOutput { + s.Workflow = v + return s +} + +type GetWorkflowRunInput struct { + _ struct{} `type:"structure"` + + // Specifies whether to include the workflow graph in response or not. + IncludeGraph *bool `type:"boolean"` + + // Name of the workflow being run. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The ID of the workflow run. + // + // RunId is a required field + RunId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetJobsOutput) String() string { +func (s GetWorkflowRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetJobsOutput) GoString() string { +func (s GetWorkflowRunInput) GoString() string { return s.String() } -// SetJobs sets the Jobs field's value. -func (s *GetJobsOutput) SetJobs(v []*Job) *GetJobsOutput { - s.Jobs = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetWorkflowRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWorkflowRunInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RunId == nil { + invalidParams.Add(request.NewErrParamRequired("RunId")) + } + if s.RunId != nil && len(*s.RunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIncludeGraph sets the IncludeGraph field's value. +func (s *GetWorkflowRunInput) SetIncludeGraph(v bool) *GetWorkflowRunInput { + s.IncludeGraph = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetWorkflowRunInput) SetName(v string) *GetWorkflowRunInput { + s.Name = &v + return s +} + +// SetRunId sets the RunId field's value. +func (s *GetWorkflowRunInput) SetRunId(v string) *GetWorkflowRunInput { + s.RunId = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetJobsOutput) SetNextToken(v string) *GetJobsOutput { - s.NextToken = &v +type GetWorkflowRunOutput struct { + _ struct{} `type:"structure"` + + // The requested workflow run metadata. + Run *WorkflowRun `type:"structure"` +} + +// String returns the string representation +func (s GetWorkflowRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWorkflowRunOutput) GoString() string { + return s.String() +} + +// SetRun sets the Run field's value. +func (s *GetWorkflowRunOutput) SetRun(v *WorkflowRun) *GetWorkflowRunOutput { + s.Run = v return s } -type GetMappingInput struct { +type GetWorkflowRunPropertiesInput struct { _ struct{} `type:"structure"` - // Parameters for the mapping. - Location *Location `type:"structure"` - - // A list of target tables. - Sinks []*CatalogEntry `type:"list"` + // Name of the workflow which was run. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // Specifies the source table. + // The ID of the workflow run whose run properties should be returned. // - // Source is a required field - Source *CatalogEntry `type:"structure" required:"true"` + // RunId is a required field + RunId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetMappingInput) String() string { +func (s GetWorkflowRunPropertiesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetMappingInput) GoString() string { +func (s GetWorkflowRunPropertiesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetMappingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetMappingInput"} - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) +func (s *GetWorkflowRunPropertiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWorkflowRunPropertiesInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Location != nil { - if err := s.Location.Validate(); err != nil { - invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) - } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.Sinks != nil { - for i, v := range s.Sinks { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sinks", i), err.(request.ErrInvalidParams)) - } - } + if s.RunId == nil { + invalidParams.Add(request.NewErrParamRequired("RunId")) } - if s.Source != nil { - if err := s.Source.Validate(); err != nil { - invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) - } + if s.RunId != nil && len(*s.RunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) } if invalidParams.Len() > 0 { @@ -17299,102 +23996,80 @@ func (s *GetMappingInput) Validate() error { return nil } -// SetLocation sets the Location field's value. -func (s *GetMappingInput) SetLocation(v *Location) *GetMappingInput { - s.Location = v - return s -} - -// SetSinks sets the Sinks field's value. -func (s *GetMappingInput) SetSinks(v []*CatalogEntry) *GetMappingInput { - s.Sinks = v +// SetName sets the Name field's value. +func (s *GetWorkflowRunPropertiesInput) SetName(v string) *GetWorkflowRunPropertiesInput { + s.Name = &v return s } -// SetSource sets the Source field's value. -func (s *GetMappingInput) SetSource(v *CatalogEntry) *GetMappingInput { - s.Source = v +// SetRunId sets the RunId field's value. +func (s *GetWorkflowRunPropertiesInput) SetRunId(v string) *GetWorkflowRunPropertiesInput { + s.RunId = &v return s } -type GetMappingOutput struct { +type GetWorkflowRunPropertiesOutput struct { _ struct{} `type:"structure"` - // A list of mappings to the specified targets. - // - // Mapping is a required field - Mapping []*MappingEntry `type:"list" required:"true"` + // The workflow run properties which were set during the specified run. + RunProperties map[string]*string `type:"map"` } // String returns the string representation -func (s GetMappingOutput) String() string { +func (s GetWorkflowRunPropertiesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetMappingOutput) GoString() string { +func (s GetWorkflowRunPropertiesOutput) GoString() string { return s.String() } -// SetMapping sets the Mapping field's value. -func (s *GetMappingOutput) SetMapping(v []*MappingEntry) *GetMappingOutput { - s.Mapping = v +// SetRunProperties sets the RunProperties field's value. +func (s *GetWorkflowRunPropertiesOutput) SetRunProperties(v map[string]*string) *GetWorkflowRunPropertiesOutput { + s.RunProperties = v return s } -type GetPartitionInput struct { +type GetWorkflowRunsInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog where the partition in question resides. If none - // is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` + // Specifies whether to include the workflow graph in response or not. + IncludeGraph *bool `type:"boolean"` - // The name of the catalog database where the partition resides. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // The maximum number of workflow runs to be included in the response. + MaxResults *int64 `min:"1" type:"integer"` - // The values that define the partition. + // Name of the workflow whose metadata of runs should be returned. // - // PartitionValues is a required field - PartitionValues []*string `type:"list" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // The name of the partition's table. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // The maximum size of the response. + NextToken *string `type:"string"` } // String returns the string representation -func (s GetPartitionInput) String() string { +func (s GetWorkflowRunsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPartitionInput) GoString() string { +func (s GetWorkflowRunsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetPartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPartitionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.PartitionValues == nil { - invalidParams.Add(request.NewErrParamRequired("PartitionValues")) +func (s *GetWorkflowRunsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWorkflowRunsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -17403,189 +24078,173 @@ func (s *GetPartitionInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *GetPartitionInput) SetCatalogId(v string) *GetPartitionInput { - s.CatalogId = &v +// SetIncludeGraph sets the IncludeGraph field's value. +func (s *GetWorkflowRunsInput) SetIncludeGraph(v bool) *GetWorkflowRunsInput { + s.IncludeGraph = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetPartitionInput) SetDatabaseName(v string) *GetPartitionInput { - s.DatabaseName = &v +// SetMaxResults sets the MaxResults field's value. +func (s *GetWorkflowRunsInput) SetMaxResults(v int64) *GetWorkflowRunsInput { + s.MaxResults = &v return s } -// SetPartitionValues sets the PartitionValues field's value. -func (s *GetPartitionInput) SetPartitionValues(v []*string) *GetPartitionInput { - s.PartitionValues = v +// SetName sets the Name field's value. +func (s *GetWorkflowRunsInput) SetName(v string) *GetWorkflowRunsInput { + s.Name = &v return s } -// SetTableName sets the TableName field's value. -func (s *GetPartitionInput) SetTableName(v string) *GetPartitionInput { - s.TableName = &v +// SetNextToken sets the NextToken field's value. +func (s *GetWorkflowRunsInput) SetNextToken(v string) *GetWorkflowRunsInput { + s.NextToken = &v return s } -type GetPartitionOutput struct { +type GetWorkflowRunsOutput struct { _ struct{} `type:"structure"` - // The requested information, in the form of a Partition object. - Partition *Partition `type:"structure"` + // A continuation token, if not all requested workflow runs have been returned. + NextToken *string `type:"string"` + + // A list of workflow run metadata objects. + Runs []*WorkflowRun `min:"1" type:"list"` } // String returns the string representation -func (s GetPartitionOutput) String() string { +func (s GetWorkflowRunsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPartitionOutput) GoString() string { +func (s GetWorkflowRunsOutput) GoString() string { return s.String() } -// SetPartition sets the Partition field's value. -func (s *GetPartitionOutput) SetPartition(v *Partition) *GetPartitionOutput { - s.Partition = v +// SetNextToken sets the NextToken field's value. +func (s *GetWorkflowRunsOutput) SetNextToken(v string) *GetWorkflowRunsOutput { + s.NextToken = &v return s } -type GetPartitionsInput struct { - _ struct{} `type:"structure"` +// SetRuns sets the Runs field's value. +func (s *GetWorkflowRunsOutput) SetRuns(v []*WorkflowRun) *GetWorkflowRunsOutput { + s.Runs = v + return s +} - // The ID of the Data Catalog where the partitions in question reside. If none - // is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` +// A classifier that uses grok patterns. +type GrokClassifier struct { + _ struct{} `type:"structure"` - // The name of the catalog database where the partitions reside. + // An identifier of the data format that the classifier matches, such as Twitter, + // JSON, Omniture logs, and so on. // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // Classification is a required field + Classification *string `type:"string" required:"true"` - // An expression filtering the partitions to be returned. - // - // The expression uses SQL syntax similar to the SQL WHERE filter clause. The - // SQL statement parser JSQLParser (http://jsqlparser.sourceforge.net/home.php) - // parses the expression. - // - // Operators: The following are the operators that you can use in the Expression - // API call: - // - // =Checks if the values of the two operands are equal or not; if yes, then - // the condition becomes true. - // - // Example: Assume 'variable a' holds 10 and 'variable b' holds 20. - // - // (a = b) is not true. - // - // < >Checks if the values of two operands are equal or not; if the values are - // not equal, then the condition becomes true. - // - // Example: (a < > b) is true. - // - // >Checks if the value of the left operand is greater than the value of the - // right operand; if yes, then the condition becomes true. - // - // Example: (a > b) is not true. - // - // =Checks if the value of the left operand is greater than or equal to the - // value of the right operand; if yes, then the condition becomes true. - // - // Example: (a >= b) is not true. - // - // <=Checks if the value of the left operand is less than or equal to the value - // of the right operand; if yes, then the condition becomes true. - // - // Example: (a <= b) is true. - // - // AND, OR, IN, BETWEEN, LIKE, NOT, IS NULLLogical operators. - // - // Supported Partition Key Types: The following are the the supported partition - // keys. - // - // * string - // - // * date - // - // * timestamp - // - // * int - // - // * bigint - // - // * long - // - // * tinyint - // - // * smallint - // - // * decimal - // - // If an invalid type is encountered, an exception is thrown. + // The time that this classifier was registered. + CreationTime *time.Time `type:"timestamp"` + + // Optional custom grok patterns defined by this classifier. For more information, + // see custom patterns in Writing Custom Classifiers (http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). + CustomPatterns *string `type:"string"` + + // The grok pattern applied to a data store by this classifier. For more information, + // see built-in patterns in Writing Custom Classifiers (http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). // - // The following list shows the valid operators on each type. When you define - // a crawler, the partitionKey type is created as a STRING, to be compatible - // with the catalog partitions. + // GrokPattern is a required field + GrokPattern *string `min:"1" type:"string" required:"true"` + + // The time that this classifier was last updated. + LastUpdated *time.Time `type:"timestamp"` + + // The name of the classifier. // - // Sample API Call: - Expression *string `type:"string"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The version of this classifier. + Version *int64 `type:"long"` +} + +// String returns the string representation +func (s GrokClassifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrokClassifier) GoString() string { + return s.String() +} - // The maximum number of partitions to return in a single response. - MaxResults *int64 `min:"1" type:"integer"` +// SetClassification sets the Classification field's value. +func (s *GrokClassifier) SetClassification(v string) *GrokClassifier { + s.Classification = &v + return s +} - // A continuation token, if this is not the first call to retrieve these partitions. - NextToken *string `type:"string"` +// SetCreationTime sets the CreationTime field's value. +func (s *GrokClassifier) SetCreationTime(v time.Time) *GrokClassifier { + s.CreationTime = &v + return s +} - // The segment of the table's partitions to scan in this request. - Segment *Segment `type:"structure"` +// SetCustomPatterns sets the CustomPatterns field's value. +func (s *GrokClassifier) SetCustomPatterns(v string) *GrokClassifier { + s.CustomPatterns = &v + return s +} - // The name of the partitions' table. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` +// SetGrokPattern sets the GrokPattern field's value. +func (s *GrokClassifier) SetGrokPattern(v string) *GrokClassifier { + s.GrokPattern = &v + return s +} + +// SetLastUpdated sets the LastUpdated field's value. +func (s *GrokClassifier) SetLastUpdated(v time.Time) *GrokClassifier { + s.LastUpdated = &v + return s +} + +// SetName sets the Name field's value. +func (s *GrokClassifier) SetName(v string) *GrokClassifier { + s.Name = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *GrokClassifier) SetVersion(v int64) *GrokClassifier { + s.Version = &v + return s +} + +type ImportCatalogToGlueInput struct { + _ struct{} `type:"structure"` + + // The ID of the catalog to import. Currently, this should be the AWS account + // ID. + CatalogId *string `min:"1" type:"string"` } // String returns the string representation -func (s GetPartitionsInput) String() string { +func (s ImportCatalogToGlueInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPartitionsInput) GoString() string { +func (s ImportCatalogToGlueInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetPartitionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPartitionsInput"} +func (s *ImportCatalogToGlueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportCatalogToGlueInput"} if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.Segment != nil { - if err := s.Segment.Validate(); err != nil { - invalidParams.AddNested("Segment", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -17594,675 +24253,949 @@ func (s *GetPartitionsInput) Validate() error { } // SetCatalogId sets the CatalogId field's value. -func (s *GetPartitionsInput) SetCatalogId(v string) *GetPartitionsInput { +func (s *ImportCatalogToGlueInput) SetCatalogId(v string) *ImportCatalogToGlueInput { s.CatalogId = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetPartitionsInput) SetDatabaseName(v string) *GetPartitionsInput { - s.DatabaseName = &v - return s +type ImportCatalogToGlueOutput struct { + _ struct{} `type:"structure"` } -// SetExpression sets the Expression field's value. -func (s *GetPartitionsInput) SetExpression(v string) *GetPartitionsInput { - s.Expression = &v +// String returns the string representation +func (s ImportCatalogToGlueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportCatalogToGlueOutput) GoString() string { + return s.String() +} + +// Specifies configuration properties for an importing labels task run. +type ImportLabelsTaskRunProperties struct { + _ struct{} `type:"structure"` + + // The Amazon Simple Storage Service (Amazon S3) path from where you will import + // the labels. + InputS3Path *string `type:"string"` + + // Indicates whether to overwrite your existing labels. + Replace *bool `type:"boolean"` +} + +// String returns the string representation +func (s ImportLabelsTaskRunProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportLabelsTaskRunProperties) GoString() string { + return s.String() +} + +// SetInputS3Path sets the InputS3Path field's value. +func (s *ImportLabelsTaskRunProperties) SetInputS3Path(v string) *ImportLabelsTaskRunProperties { + s.InputS3Path = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *GetPartitionsInput) SetMaxResults(v int64) *GetPartitionsInput { - s.MaxResults = &v +// SetReplace sets the Replace field's value. +func (s *ImportLabelsTaskRunProperties) SetReplace(v bool) *ImportLabelsTaskRunProperties { + s.Replace = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetPartitionsInput) SetNextToken(v string) *GetPartitionsInput { - s.NextToken = &v +// Specifies a JDBC data store to crawl. +type JdbcTarget struct { + _ struct{} `type:"structure"` + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `type:"string"` + + // A list of glob patterns used to exclude from the crawl. For more information, + // see Catalog Tables with a Crawler (http://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). + Exclusions []*string `type:"list"` + + // The path of the JDBC target. + Path *string `type:"string"` +} + +// String returns the string representation +func (s JdbcTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JdbcTarget) GoString() string { + return s.String() +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *JdbcTarget) SetConnectionName(v string) *JdbcTarget { + s.ConnectionName = &v return s } -// SetSegment sets the Segment field's value. -func (s *GetPartitionsInput) SetSegment(v *Segment) *GetPartitionsInput { - s.Segment = v +// SetExclusions sets the Exclusions field's value. +func (s *JdbcTarget) SetExclusions(v []*string) *JdbcTarget { + s.Exclusions = v return s } -// SetTableName sets the TableName field's value. -func (s *GetPartitionsInput) SetTableName(v string) *GetPartitionsInput { - s.TableName = &v +// SetPath sets the Path field's value. +func (s *JdbcTarget) SetPath(v string) *JdbcTarget { + s.Path = &v return s } -type GetPartitionsOutput struct { +// Specifies a job definition. +type Job struct { _ struct{} `type:"structure"` - // A continuation token, if the returned list of partitions does not does not - // include the last one. - NextToken *string `type:"string"` + // This field is deprecated. Use MaxCapacity instead. + // + // The number of AWS Glue data processing units (DPUs) allocated to runs of + // this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is + // a relative measure of processing power that consists of 4 vCPUs of compute + // capacity and 16 GB of memory. For more information, see the AWS Glue pricing + // page (https://aws.amazon.com/glue/pricing/). + // + // Deprecated: This property is deprecated, use MaxCapacity instead. + AllocatedCapacity *int64 `deprecated:"true" type:"integer"` - // A list of requested partitions. - Partitions []*Partition `type:"list"` + // The JobCommand that executes this job. + Command *JobCommand `type:"structure"` + + // The connections used for this job. + Connections *ConnectionsList `type:"structure"` + + // The time and date that this job definition was created. + CreatedOn *time.Time `type:"timestamp"` + + // The default arguments for this job, specified as name-value pairs. + // + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) + // topic in the developer guide. + DefaultArguments map[string]*string `type:"map"` + + // A description of the job. + Description *string `type:"string"` + + // An ExecutionProperty specifying the maximum number of concurrent runs allowed + // for this job. + ExecutionProperty *ExecutionProperty `type:"structure"` + + // Glue version determines the versions of Apache Spark and Python that AWS + // Glue supports. The Python version indicates the version supported for jobs + // of type Spark. + // + // For more information about the available AWS Glue versions and corresponding + // Spark and Python versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html) + // in the developer guide. + // + // Jobs that are created without specifying a Glue version default to Glue 0.9. + GlueVersion *string `min:"1" type:"string"` + + // The last point in time when this job definition was modified. + LastModifiedOn *time.Time `type:"timestamp"` + + // This field is reserved for future use. + LogUri *string `type:"string"` + + // The number of AWS Glue data processing units (DPUs) that can be allocated + // when this job runs. A DPU is a relative measure of processing power that + // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, + // see the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). + // + // Do not set Max Capacity if using WorkerType and NumberOfWorkers. + // + // The value that can be allocated for MaxCapacity depends on whether you are + // running a Python shell job or an Apache Spark ETL job: + // + // * When you specify a Python shell job (JobCommand.Name="pythonshell"), + // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. + // + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), + // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job + // type cannot have a fractional DPU allocation. + MaxCapacity *float64 `type:"double"` + + // The maximum number of times to retry this job after a JobRun fails. + MaxRetries *int64 `type:"integer"` + + // The name you assign to this job definition. + Name *string `min:"1" type:"string"` + + // Specifies configuration properties of a job notification. + NotificationProperty *NotificationProperty `type:"structure"` + + // The number of workers of a defined workerType that are allocated when a job + // runs. + // + // The maximum number of workers you can define are 299 for G.1X, and 149 for + // G.2X. + NumberOfWorkers *int64 `type:"integer"` + + // The name or Amazon Resource Name (ARN) of the IAM role associated with this + // job. + Role *string `type:"string"` + + // The name of the SecurityConfiguration structure to be used with this job. + SecurityConfiguration *string `min:"1" type:"string"` + + // The job timeout in minutes. This is the maximum time that a job run can consume + // resources before it is terminated and enters TIMEOUT status. The default + // is 2,880 minutes (48 hours). + Timeout *int64 `min:"1" type:"integer"` + + // The type of predefined worker that is allocated when a job runs. Accepts + // a value of Standard, G.1X, or G.2X. + // + // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of + // memory and a 50GB disk, and 2 executors per worker. + // + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of + // memory, 64 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of + // memory, 128 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. + WorkerType *string `type:"string" enum:"WorkerType"` } // String returns the string representation -func (s GetPartitionsOutput) String() string { +func (s Job) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPartitionsOutput) GoString() string { +func (s Job) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *GetPartitionsOutput) SetNextToken(v string) *GetPartitionsOutput { - s.NextToken = &v +// SetAllocatedCapacity sets the AllocatedCapacity field's value. +func (s *Job) SetAllocatedCapacity(v int64) *Job { + s.AllocatedCapacity = &v return s } -// SetPartitions sets the Partitions field's value. -func (s *GetPartitionsOutput) SetPartitions(v []*Partition) *GetPartitionsOutput { - s.Partitions = v +// SetCommand sets the Command field's value. +func (s *Job) SetCommand(v *JobCommand) *Job { + s.Command = v return s } -type GetPlanInput struct { - _ struct{} `type:"structure"` - - // The programming language of the code to perform the mapping. - Language *string `type:"string" enum:"Language"` - - // Parameters for the mapping. - Location *Location `type:"structure"` - - // The list of mappings from a source table to target tables. - // - // Mapping is a required field - Mapping []*MappingEntry `type:"list" required:"true"` - - // The target tables. - Sinks []*CatalogEntry `type:"list"` - - // The source table. - // - // Source is a required field - Source *CatalogEntry `type:"structure" required:"true"` +// SetConnections sets the Connections field's value. +func (s *Job) SetConnections(v *ConnectionsList) *Job { + s.Connections = v + return s } -// String returns the string representation -func (s GetPlanInput) String() string { - return awsutil.Prettify(s) +// SetCreatedOn sets the CreatedOn field's value. +func (s *Job) SetCreatedOn(v time.Time) *Job { + s.CreatedOn = &v + return s } -// GoString returns the string representation -func (s GetPlanInput) GoString() string { - return s.String() +// SetDefaultArguments sets the DefaultArguments field's value. +func (s *Job) SetDefaultArguments(v map[string]*string) *Job { + s.DefaultArguments = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPlanInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPlanInput"} - if s.Mapping == nil { - invalidParams.Add(request.NewErrParamRequired("Mapping")) - } - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) - } - if s.Location != nil { - if err := s.Location.Validate(); err != nil { - invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) - } - } - if s.Sinks != nil { - for i, v := range s.Sinks { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sinks", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Source != nil { - if err := s.Source.Validate(); err != nil { - invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDescription sets the Description field's value. +func (s *Job) SetDescription(v string) *Job { + s.Description = &v + return s } -// SetLanguage sets the Language field's value. -func (s *GetPlanInput) SetLanguage(v string) *GetPlanInput { - s.Language = &v +// SetExecutionProperty sets the ExecutionProperty field's value. +func (s *Job) SetExecutionProperty(v *ExecutionProperty) *Job { + s.ExecutionProperty = v return s } -// SetLocation sets the Location field's value. -func (s *GetPlanInput) SetLocation(v *Location) *GetPlanInput { - s.Location = v +// SetGlueVersion sets the GlueVersion field's value. +func (s *Job) SetGlueVersion(v string) *Job { + s.GlueVersion = &v return s } -// SetMapping sets the Mapping field's value. -func (s *GetPlanInput) SetMapping(v []*MappingEntry) *GetPlanInput { - s.Mapping = v +// SetLastModifiedOn sets the LastModifiedOn field's value. +func (s *Job) SetLastModifiedOn(v time.Time) *Job { + s.LastModifiedOn = &v return s } -// SetSinks sets the Sinks field's value. -func (s *GetPlanInput) SetSinks(v []*CatalogEntry) *GetPlanInput { - s.Sinks = v +// SetLogUri sets the LogUri field's value. +func (s *Job) SetLogUri(v string) *Job { + s.LogUri = &v return s } -// SetSource sets the Source field's value. -func (s *GetPlanInput) SetSource(v *CatalogEntry) *GetPlanInput { - s.Source = v +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *Job) SetMaxCapacity(v float64) *Job { + s.MaxCapacity = &v return s } -type GetPlanOutput struct { - _ struct{} `type:"structure"` - - // A Python script to perform the mapping. - PythonScript *string `type:"string"` - - // Scala code to perform the mapping. - ScalaCode *string `type:"string"` +// SetMaxRetries sets the MaxRetries field's value. +func (s *Job) SetMaxRetries(v int64) *Job { + s.MaxRetries = &v + return s } -// String returns the string representation -func (s GetPlanOutput) String() string { - return awsutil.Prettify(s) +// SetName sets the Name field's value. +func (s *Job) SetName(v string) *Job { + s.Name = &v + return s } -// GoString returns the string representation -func (s GetPlanOutput) GoString() string { - return s.String() +// SetNotificationProperty sets the NotificationProperty field's value. +func (s *Job) SetNotificationProperty(v *NotificationProperty) *Job { + s.NotificationProperty = v + return s } -// SetPythonScript sets the PythonScript field's value. -func (s *GetPlanOutput) SetPythonScript(v string) *GetPlanOutput { - s.PythonScript = &v +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *Job) SetNumberOfWorkers(v int64) *Job { + s.NumberOfWorkers = &v return s } -// SetScalaCode sets the ScalaCode field's value. -func (s *GetPlanOutput) SetScalaCode(v string) *GetPlanOutput { - s.ScalaCode = &v +// SetRole sets the Role field's value. +func (s *Job) SetRole(v string) *Job { + s.Role = &v return s } -type GetResourcePolicyInput struct { - _ struct{} `type:"structure"` +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *Job) SetSecurityConfiguration(v string) *Job { + s.SecurityConfiguration = &v + return s } -// String returns the string representation -func (s GetResourcePolicyInput) String() string { - return awsutil.Prettify(s) +// SetTimeout sets the Timeout field's value. +func (s *Job) SetTimeout(v int64) *Job { + s.Timeout = &v + return s } -// GoString returns the string representation -func (s GetResourcePolicyInput) GoString() string { - return s.String() +// SetWorkerType sets the WorkerType field's value. +func (s *Job) SetWorkerType(v string) *Job { + s.WorkerType = &v + return s } -type GetResourcePolicyOutput struct { +// Defines a point that a job can resume processing. +type JobBookmarkEntry struct { _ struct{} `type:"structure"` - // The date and time at which the policy was created. - CreateTime *time.Time `type:"timestamp"` + // The attempt ID number. + Attempt *int64 `type:"integer"` - // Contains the hash value associated with this policy. - PolicyHash *string `min:"1" type:"string"` + // The bookmark itself. + JobBookmark *string `type:"string"` - // Contains the requested policy document, in JSON format. - PolicyInJson *string `min:"2" type:"string"` + // The name of the job in question. + JobName *string `type:"string"` - // The date and time at which the policy was last updated. - UpdateTime *time.Time `type:"timestamp"` + // The unique run identifier associated with the previous job run. + PreviousRunId *string `type:"string"` + + // The run ID number. + Run *int64 `type:"integer"` + + // The run ID number. + RunId *string `type:"string"` + + // The version of the job. + Version *int64 `type:"integer"` } // String returns the string representation -func (s GetResourcePolicyOutput) String() string { +func (s JobBookmarkEntry) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetResourcePolicyOutput) GoString() string { +func (s JobBookmarkEntry) GoString() string { return s.String() } -// SetCreateTime sets the CreateTime field's value. -func (s *GetResourcePolicyOutput) SetCreateTime(v time.Time) *GetResourcePolicyOutput { - s.CreateTime = &v - return s -} - -// SetPolicyHash sets the PolicyHash field's value. -func (s *GetResourcePolicyOutput) SetPolicyHash(v string) *GetResourcePolicyOutput { - s.PolicyHash = &v +// SetAttempt sets the Attempt field's value. +func (s *JobBookmarkEntry) SetAttempt(v int64) *JobBookmarkEntry { + s.Attempt = &v return s } -// SetPolicyInJson sets the PolicyInJson field's value. -func (s *GetResourcePolicyOutput) SetPolicyInJson(v string) *GetResourcePolicyOutput { - s.PolicyInJson = &v +// SetJobBookmark sets the JobBookmark field's value. +func (s *JobBookmarkEntry) SetJobBookmark(v string) *JobBookmarkEntry { + s.JobBookmark = &v return s } -// SetUpdateTime sets the UpdateTime field's value. -func (s *GetResourcePolicyOutput) SetUpdateTime(v time.Time) *GetResourcePolicyOutput { - s.UpdateTime = &v +// SetJobName sets the JobName field's value. +func (s *JobBookmarkEntry) SetJobName(v string) *JobBookmarkEntry { + s.JobName = &v return s } -type GetSecurityConfigurationInput struct { - _ struct{} `type:"structure"` - - // The name of the security configuration to retrieve. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetSecurityConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSecurityConfigurationInput) GoString() string { - return s.String() +// SetPreviousRunId sets the PreviousRunId field's value. +func (s *JobBookmarkEntry) SetPreviousRunId(v string) *JobBookmarkEntry { + s.PreviousRunId = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSecurityConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSecurityConfigurationInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } +// SetRun sets the Run field's value. +func (s *JobBookmarkEntry) SetRun(v int64) *JobBookmarkEntry { + s.Run = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetRunId sets the RunId field's value. +func (s *JobBookmarkEntry) SetRunId(v string) *JobBookmarkEntry { + s.RunId = &v + return s } -// SetName sets the Name field's value. -func (s *GetSecurityConfigurationInput) SetName(v string) *GetSecurityConfigurationInput { - s.Name = &v +// SetVersion sets the Version field's value. +func (s *JobBookmarkEntry) SetVersion(v int64) *JobBookmarkEntry { + s.Version = &v return s } -type GetSecurityConfigurationOutput struct { +// Specifies how job bookmark data should be encrypted. +type JobBookmarksEncryption struct { _ struct{} `type:"structure"` - // The requested security configuration - SecurityConfiguration *SecurityConfiguration `type:"structure"` + // The encryption mode to use for job bookmarks data. + JobBookmarksEncryptionMode *string `type:"string" enum:"JobBookmarksEncryptionMode"` + + // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + KmsKeyArn *string `type:"string"` } // String returns the string representation -func (s GetSecurityConfigurationOutput) String() string { +func (s JobBookmarksEncryption) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSecurityConfigurationOutput) GoString() string { +func (s JobBookmarksEncryption) GoString() string { return s.String() } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *GetSecurityConfigurationOutput) SetSecurityConfiguration(v *SecurityConfiguration) *GetSecurityConfigurationOutput { - s.SecurityConfiguration = v +// SetJobBookmarksEncryptionMode sets the JobBookmarksEncryptionMode field's value. +func (s *JobBookmarksEncryption) SetJobBookmarksEncryptionMode(v string) *JobBookmarksEncryption { + s.JobBookmarksEncryptionMode = &v return s } -type GetSecurityConfigurationsInput struct { +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *JobBookmarksEncryption) SetKmsKeyArn(v string) *JobBookmarksEncryption { + s.KmsKeyArn = &v + return s +} + +// Specifies code executed when a job is run. +type JobCommand struct { _ struct{} `type:"structure"` - // The maximum number of results to return. - MaxResults *int64 `min:"1" type:"integer"` + // The name of the job command. For an Apache Spark ETL job, this must be glueetl. + // For a Python shell job, it must be pythonshell. + Name *string `type:"string"` - // A continuation token, if this is a continuation call. - NextToken *string `type:"string"` + // The Python version being used to execute a Python shell job. Allowed values + // are 2 or 3. + PythonVersion *string `type:"string"` + + // Specifies the Amazon Simple Storage Service (Amazon S3) path to a script + // that executes a job. + ScriptLocation *string `type:"string"` } // String returns the string representation -func (s GetSecurityConfigurationsInput) String() string { +func (s JobCommand) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSecurityConfigurationsInput) GoString() string { +func (s JobCommand) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSecurityConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSecurityConfigurationsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetName sets the Name field's value. +func (s *JobCommand) SetName(v string) *JobCommand { + s.Name = &v + return s } -// SetMaxResults sets the MaxResults field's value. -func (s *GetSecurityConfigurationsInput) SetMaxResults(v int64) *GetSecurityConfigurationsInput { - s.MaxResults = &v +// SetPythonVersion sets the PythonVersion field's value. +func (s *JobCommand) SetPythonVersion(v string) *JobCommand { + s.PythonVersion = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetSecurityConfigurationsInput) SetNextToken(v string) *GetSecurityConfigurationsInput { - s.NextToken = &v +// SetScriptLocation sets the ScriptLocation field's value. +func (s *JobCommand) SetScriptLocation(v string) *JobCommand { + s.ScriptLocation = &v return s } -type GetSecurityConfigurationsOutput struct { +// The details of a Job node present in the workflow. +type JobNodeDetails struct { _ struct{} `type:"structure"` - // A continuation token, if there are more security configurations to return. - NextToken *string `type:"string"` - - // A list of security configurations. - SecurityConfigurations []*SecurityConfiguration `type:"list"` + // The information for the job runs represented by the job node. + JobRuns []*JobRun `type:"list"` } // String returns the string representation -func (s GetSecurityConfigurationsOutput) String() string { +func (s JobNodeDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSecurityConfigurationsOutput) GoString() string { +func (s JobNodeDetails) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *GetSecurityConfigurationsOutput) SetNextToken(v string) *GetSecurityConfigurationsOutput { - s.NextToken = &v - return s -} - -// SetSecurityConfigurations sets the SecurityConfigurations field's value. -func (s *GetSecurityConfigurationsOutput) SetSecurityConfigurations(v []*SecurityConfiguration) *GetSecurityConfigurationsOutput { - s.SecurityConfigurations = v +// SetJobRuns sets the JobRuns field's value. +func (s *JobNodeDetails) SetJobRuns(v []*JobRun) *JobNodeDetails { + s.JobRuns = v return s } -type GetTableInput struct { +// Contains information about a job run. +type JobRun struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog where the table resides. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` + // This field is deprecated. Use MaxCapacity instead. + // + // The number of AWS Glue data processing units (DPUs) allocated to this JobRun. + // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative + // measure of processing power that consists of 4 vCPUs of compute capacity + // and 16 GB of memory. For more information, see the AWS Glue pricing page + // (https://aws.amazon.com/glue/pricing/). + // + // Deprecated: This property is deprecated, use MaxCapacity instead. + AllocatedCapacity *int64 `deprecated:"true" type:"integer"` - // The name of the database in the catalog in which the table resides. For Hive - // compatibility, this name is entirely lowercase. + // The job arguments associated with this run. For this job run, they replace + // the default arguments set in the job definition itself. // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own job arguments, + // see the Calling AWS Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) + // topic in the developer guide. + Arguments map[string]*string `type:"map"` - // The name of the table for which to retrieve the definition. For Hive compatibility, - // this name is entirely lowercase. + // The number of the attempt to run this job. + Attempt *int64 `type:"integer"` + + // The date and time that this job run completed. + CompletedOn *time.Time `type:"timestamp"` + + // An error message associated with this job run. + ErrorMessage *string `type:"string"` + + // The amount of time (in seconds) that the job run consumed resources. + ExecutionTime *int64 `type:"integer"` + + // Glue version determines the versions of Apache Spark and Python that AWS + // Glue supports. The Python version indicates the version supported for jobs + // of type Spark. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} + // For more information about the available AWS Glue versions and corresponding + // Spark and Python versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html) + // in the developer guide. + // + // Jobs that are created without specifying a Glue version default to Glue 0.9. + GlueVersion *string `min:"1" type:"string"` -// String returns the string representation -func (s GetTableInput) String() string { - return awsutil.Prettify(s) -} + // The ID of this job run. + Id *string `min:"1" type:"string"` -// GoString returns the string representation -func (s GetTableInput) GoString() string { - return s.String() + // The name of the job definition being used in this run. + JobName *string `min:"1" type:"string"` + + // The current state of the job run. + JobRunState *string `type:"string" enum:"JobRunState"` + + // The last time that this job run was modified. + LastModifiedOn *time.Time `type:"timestamp"` + + // The name of the log group for secure logging that can be server-side encrypted + // in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/, in + // which case the default encryption is NONE. If you add a role name and SecurityConfiguration + // name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), + // then that security configuration is used to encrypt the log group. + LogGroupName *string `type:"string"` + + // The number of AWS Glue data processing units (DPUs) that can be allocated + // when this job runs. A DPU is a relative measure of processing power that + // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, + // see the AWS Glue pricing page (https://docs.aws.amazon.com/https:/aws.amazon.com/glue/pricing/). + // + // Do not set Max Capacity if using WorkerType and NumberOfWorkers. + // + // The value that can be allocated for MaxCapacity depends on whether you are + // running a Python shell job or an Apache Spark ETL job: + // + // * When you specify a Python shell job (JobCommand.Name="pythonshell"), + // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. + // + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), + // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job + // type cannot have a fractional DPU allocation. + MaxCapacity *float64 `type:"double"` + + // Specifies configuration properties of a job run notification. + NotificationProperty *NotificationProperty `type:"structure"` + + // The number of workers of a defined workerType that are allocated when a job + // runs. + // + // The maximum number of workers you can define are 299 for G.1X, and 149 for + // G.2X. + NumberOfWorkers *int64 `type:"integer"` + + // A list of predecessors to this job run. + PredecessorRuns []*Predecessor `type:"list"` + + // The ID of the previous run of this job. For example, the JobRunId specified + // in the StartJobRun action. + PreviousRunId *string `min:"1" type:"string"` + + // The name of the SecurityConfiguration structure to be used with this job + // run. + SecurityConfiguration *string `min:"1" type:"string"` + + // The date and time at which this job run was started. + StartedOn *time.Time `type:"timestamp"` + + // The JobRun timeout in minutes. This is the maximum time that a job run can + // consume resources before it is terminated and enters TIMEOUT status. The + // default is 2,880 minutes (48 hours). This overrides the timeout value set + // in the parent job. + Timeout *int64 `min:"1" type:"integer"` + + // The name of the trigger that started this job run. + TriggerName *string `min:"1" type:"string"` + + // The type of predefined worker that is allocated when a job runs. Accepts + // a value of Standard, G.1X, or G.2X. + // + // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of + // memory and a 50GB disk, and 2 executors per worker. + // + // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory + // and a 64GB disk, and 1 executor per worker. + // + // * For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory + // and a 128GB disk, and 1 executor per worker. + WorkerType *string `type:"string" enum:"WorkerType"` } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTableInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } +// String returns the string representation +func (s JobRun) String() string { + return awsutil.Prettify(s) +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// GoString returns the string representation +func (s JobRun) GoString() string { + return s.String() } -// SetCatalogId sets the CatalogId field's value. -func (s *GetTableInput) SetCatalogId(v string) *GetTableInput { - s.CatalogId = &v +// SetAllocatedCapacity sets the AllocatedCapacity field's value. +func (s *JobRun) SetAllocatedCapacity(v int64) *JobRun { + s.AllocatedCapacity = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetTableInput) SetDatabaseName(v string) *GetTableInput { - s.DatabaseName = &v +// SetArguments sets the Arguments field's value. +func (s *JobRun) SetArguments(v map[string]*string) *JobRun { + s.Arguments = v return s } -// SetName sets the Name field's value. -func (s *GetTableInput) SetName(v string) *GetTableInput { - s.Name = &v +// SetAttempt sets the Attempt field's value. +func (s *JobRun) SetAttempt(v int64) *JobRun { + s.Attempt = &v return s } -type GetTableOutput struct { - _ struct{} `type:"structure"` - - // The Table object that defines the specified table. - Table *Table `type:"structure"` +// SetCompletedOn sets the CompletedOn field's value. +func (s *JobRun) SetCompletedOn(v time.Time) *JobRun { + s.CompletedOn = &v + return s } -// String returns the string representation -func (s GetTableOutput) String() string { - return awsutil.Prettify(s) +// SetErrorMessage sets the ErrorMessage field's value. +func (s *JobRun) SetErrorMessage(v string) *JobRun { + s.ErrorMessage = &v + return s } -// GoString returns the string representation -func (s GetTableOutput) GoString() string { - return s.String() +// SetExecutionTime sets the ExecutionTime field's value. +func (s *JobRun) SetExecutionTime(v int64) *JobRun { + s.ExecutionTime = &v + return s } -// SetTable sets the Table field's value. -func (s *GetTableOutput) SetTable(v *Table) *GetTableOutput { - s.Table = v +// SetGlueVersion sets the GlueVersion field's value. +func (s *JobRun) SetGlueVersion(v string) *JobRun { + s.GlueVersion = &v return s } -type GetTableVersionInput struct { - _ struct{} `type:"structure"` - - // The ID of the Data Catalog where the tables reside. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` - - // The database in the catalog in which the table resides. For Hive compatibility, - // this name is entirely lowercase. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The name of the table. For Hive compatibility, this name is entirely lowercase. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` +// SetId sets the Id field's value. +func (s *JobRun) SetId(v string) *JobRun { + s.Id = &v + return s +} - // The ID value of the table version to be retrieved. A VersionID is a string - // representation of an integer. Each version is incremented by 1. - VersionId *string `min:"1" type:"string"` +// SetJobName sets the JobName field's value. +func (s *JobRun) SetJobName(v string) *JobRun { + s.JobName = &v + return s } -// String returns the string representation -func (s GetTableVersionInput) String() string { - return awsutil.Prettify(s) +// SetJobRunState sets the JobRunState field's value. +func (s *JobRun) SetJobRunState(v string) *JobRun { + s.JobRunState = &v + return s } -// GoString returns the string representation -func (s GetTableVersionInput) GoString() string { - return s.String() +// SetLastModifiedOn sets the LastModifiedOn field's value. +func (s *JobRun) SetLastModifiedOn(v time.Time) *JobRun { + s.LastModifiedOn = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTableVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTableVersionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.VersionId != nil && len(*s.VersionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VersionId", 1)) - } +// SetLogGroupName sets the LogGroupName field's value. +func (s *JobRun) SetLogGroupName(v string) *JobRun { + s.LogGroupName = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *JobRun) SetMaxCapacity(v float64) *JobRun { + s.MaxCapacity = &v + return s } -// SetCatalogId sets the CatalogId field's value. -func (s *GetTableVersionInput) SetCatalogId(v string) *GetTableVersionInput { - s.CatalogId = &v +// SetNotificationProperty sets the NotificationProperty field's value. +func (s *JobRun) SetNotificationProperty(v *NotificationProperty) *JobRun { + s.NotificationProperty = v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetTableVersionInput) SetDatabaseName(v string) *GetTableVersionInput { - s.DatabaseName = &v +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *JobRun) SetNumberOfWorkers(v int64) *JobRun { + s.NumberOfWorkers = &v return s } -// SetTableName sets the TableName field's value. -func (s *GetTableVersionInput) SetTableName(v string) *GetTableVersionInput { - s.TableName = &v +// SetPredecessorRuns sets the PredecessorRuns field's value. +func (s *JobRun) SetPredecessorRuns(v []*Predecessor) *JobRun { + s.PredecessorRuns = v return s } -// SetVersionId sets the VersionId field's value. -func (s *GetTableVersionInput) SetVersionId(v string) *GetTableVersionInput { - s.VersionId = &v +// SetPreviousRunId sets the PreviousRunId field's value. +func (s *JobRun) SetPreviousRunId(v string) *JobRun { + s.PreviousRunId = &v return s } -type GetTableVersionOutput struct { - _ struct{} `type:"structure"` +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *JobRun) SetSecurityConfiguration(v string) *JobRun { + s.SecurityConfiguration = &v + return s +} - // The requested table version. - TableVersion *TableVersion `type:"structure"` +// SetStartedOn sets the StartedOn field's value. +func (s *JobRun) SetStartedOn(v time.Time) *JobRun { + s.StartedOn = &v + return s } -// String returns the string representation -func (s GetTableVersionOutput) String() string { - return awsutil.Prettify(s) +// SetTimeout sets the Timeout field's value. +func (s *JobRun) SetTimeout(v int64) *JobRun { + s.Timeout = &v + return s } -// GoString returns the string representation -func (s GetTableVersionOutput) GoString() string { - return s.String() +// SetTriggerName sets the TriggerName field's value. +func (s *JobRun) SetTriggerName(v string) *JobRun { + s.TriggerName = &v + return s } -// SetTableVersion sets the TableVersion field's value. -func (s *GetTableVersionOutput) SetTableVersion(v *TableVersion) *GetTableVersionOutput { - s.TableVersion = v +// SetWorkerType sets the WorkerType field's value. +func (s *JobRun) SetWorkerType(v string) *JobRun { + s.WorkerType = &v return s } -type GetTableVersionsInput struct { +// Specifies information used to update an existing job definition. The previous +// job definition is completely overwritten by this information. +type JobUpdate struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog where the tables reside. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` + // This field is deprecated. Use MaxCapacity instead. + // + // The number of AWS Glue data processing units (DPUs) to allocate to this job. + // You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative + // measure of processing power that consists of 4 vCPUs of compute capacity + // and 16 GB of memory. For more information, see the AWS Glue pricing page + // (https://aws.amazon.com/glue/pricing/). + // + // Deprecated: This property is deprecated, use MaxCapacity instead. + AllocatedCapacity *int64 `deprecated:"true" type:"integer"` - // The database in the catalog in which the table resides. For Hive compatibility, - // this name is entirely lowercase. + // The JobCommand that executes this job (required). + Command *JobCommand `type:"structure"` + + // The connections used for this job. + Connections *ConnectionsList `type:"structure"` + + // The default arguments for this job. // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) + // topic in the developer guide. + DefaultArguments map[string]*string `type:"map"` + + // Description of the job being defined. + Description *string `type:"string"` + + // An ExecutionProperty specifying the maximum number of concurrent runs allowed + // for this job. + ExecutionProperty *ExecutionProperty `type:"structure"` + + // Glue version determines the versions of Apache Spark and Python that AWS + // Glue supports. The Python version indicates the version supported for jobs + // of type Spark. + // + // For more information about the available AWS Glue versions and corresponding + // Spark and Python versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html) + // in the developer guide. + GlueVersion *string `min:"1" type:"string"` + + // This field is reserved for future use. + LogUri *string `type:"string"` + + // The number of AWS Glue data processing units (DPUs) that can be allocated + // when this job runs. A DPU is a relative measure of processing power that + // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, + // see the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). + // + // Do not set Max Capacity if using WorkerType and NumberOfWorkers. + // + // The value that can be allocated for MaxCapacity depends on whether you are + // running a Python shell job or an Apache Spark ETL job: + // + // * When you specify a Python shell job (JobCommand.Name="pythonshell"), + // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. + // + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), + // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job + // type cannot have a fractional DPU allocation. + MaxCapacity *float64 `type:"double"` + + // The maximum number of times to retry this job if it fails. + MaxRetries *int64 `type:"integer"` + + // Specifies the configuration properties of a job notification. + NotificationProperty *NotificationProperty `type:"structure"` + + // The number of workers of a defined workerType that are allocated when a job + // runs. + // + // The maximum number of workers you can define are 299 for G.1X, and 149 for + // G.2X. + NumberOfWorkers *int64 `type:"integer"` - // The maximum number of table versions to return in one response. - MaxResults *int64 `min:"1" type:"integer"` + // The name or Amazon Resource Name (ARN) of the IAM role associated with this + // job (required). + Role *string `type:"string"` - // A continuation token, if this is not the first call. - NextToken *string `type:"string"` + // The name of the SecurityConfiguration structure to be used with this job. + SecurityConfiguration *string `min:"1" type:"string"` - // The name of the table. For Hive compatibility, this name is entirely lowercase. + // The job timeout in minutes. This is the maximum time that a job run can consume + // resources before it is terminated and enters TIMEOUT status. The default + // is 2,880 minutes (48 hours). + Timeout *int64 `min:"1" type:"integer"` + + // The type of predefined worker that is allocated when a job runs. Accepts + // a value of Standard, G.1X, or G.2X. // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of + // memory and a 50GB disk, and 2 executors per worker. + // + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of + // memory, 64 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of + // memory, 128 GB disk), and provides 1 executor per worker. We recommend + // this worker type for memory-intensive jobs. + WorkerType *string `type:"string" enum:"WorkerType"` } // String returns the string representation -func (s GetTableVersionsInput) String() string { +func (s JobUpdate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetTableVersionsInput) GoString() string { +func (s JobUpdate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetTableVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTableVersionsInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) +func (s *JobUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobUpdate"} + if s.GlueVersion != nil && len(*s.GlueVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlueVersion", 1)) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) + if s.Timeout != nil && *s.Timeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + if s.NotificationProperty != nil { + if err := s.NotificationProperty.Validate(); err != nil { + invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -18271,278 +25204,290 @@ func (s *GetTableVersionsInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *GetTableVersionsInput) SetCatalogId(v string) *GetTableVersionsInput { - s.CatalogId = &v +// SetAllocatedCapacity sets the AllocatedCapacity field's value. +func (s *JobUpdate) SetAllocatedCapacity(v int64) *JobUpdate { + s.AllocatedCapacity = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetTableVersionsInput) SetDatabaseName(v string) *GetTableVersionsInput { - s.DatabaseName = &v +// SetCommand sets the Command field's value. +func (s *JobUpdate) SetCommand(v *JobCommand) *JobUpdate { + s.Command = v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *GetTableVersionsInput) SetMaxResults(v int64) *GetTableVersionsInput { - s.MaxResults = &v +// SetConnections sets the Connections field's value. +func (s *JobUpdate) SetConnections(v *ConnectionsList) *JobUpdate { + s.Connections = v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetTableVersionsInput) SetNextToken(v string) *GetTableVersionsInput { - s.NextToken = &v +// SetDefaultArguments sets the DefaultArguments field's value. +func (s *JobUpdate) SetDefaultArguments(v map[string]*string) *JobUpdate { + s.DefaultArguments = v return s } -// SetTableName sets the TableName field's value. -func (s *GetTableVersionsInput) SetTableName(v string) *GetTableVersionsInput { - s.TableName = &v +// SetDescription sets the Description field's value. +func (s *JobUpdate) SetDescription(v string) *JobUpdate { + s.Description = &v return s } -type GetTableVersionsOutput struct { - _ struct{} `type:"structure"` +// SetExecutionProperty sets the ExecutionProperty field's value. +func (s *JobUpdate) SetExecutionProperty(v *ExecutionProperty) *JobUpdate { + s.ExecutionProperty = v + return s +} - // A continuation token, if the list of available versions does not include - // the last one. - NextToken *string `type:"string"` +// SetGlueVersion sets the GlueVersion field's value. +func (s *JobUpdate) SetGlueVersion(v string) *JobUpdate { + s.GlueVersion = &v + return s +} - // A list of strings identifying available versions of the specified table. - TableVersions []*TableVersion `type:"list"` +// SetLogUri sets the LogUri field's value. +func (s *JobUpdate) SetLogUri(v string) *JobUpdate { + s.LogUri = &v + return s } -// String returns the string representation -func (s GetTableVersionsOutput) String() string { - return awsutil.Prettify(s) +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *JobUpdate) SetMaxCapacity(v float64) *JobUpdate { + s.MaxCapacity = &v + return s } -// GoString returns the string representation -func (s GetTableVersionsOutput) GoString() string { - return s.String() +// SetMaxRetries sets the MaxRetries field's value. +func (s *JobUpdate) SetMaxRetries(v int64) *JobUpdate { + s.MaxRetries = &v + return s } -// SetNextToken sets the NextToken field's value. -func (s *GetTableVersionsOutput) SetNextToken(v string) *GetTableVersionsOutput { - s.NextToken = &v +// SetNotificationProperty sets the NotificationProperty field's value. +func (s *JobUpdate) SetNotificationProperty(v *NotificationProperty) *JobUpdate { + s.NotificationProperty = v return s } -// SetTableVersions sets the TableVersions field's value. -func (s *GetTableVersionsOutput) SetTableVersions(v []*TableVersion) *GetTableVersionsOutput { - s.TableVersions = v +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *JobUpdate) SetNumberOfWorkers(v int64) *JobUpdate { + s.NumberOfWorkers = &v return s } -type GetTablesInput struct { +// SetRole sets the Role field's value. +func (s *JobUpdate) SetRole(v string) *JobUpdate { + s.Role = &v + return s +} + +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *JobUpdate) SetSecurityConfiguration(v string) *JobUpdate { + s.SecurityConfiguration = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *JobUpdate) SetTimeout(v int64) *JobUpdate { + s.Timeout = &v + return s +} + +// SetWorkerType sets the WorkerType field's value. +func (s *JobUpdate) SetWorkerType(v string) *JobUpdate { + s.WorkerType = &v + return s +} + +// A classifier for JSON content. +type JsonClassifier struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog where the tables reside. If none is supplied, - // the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` + // The time that this classifier was registered. + CreationTime *time.Time `type:"timestamp"` - // The database in the catalog whose tables to list. For Hive compatibility, - // this name is entirely lowercase. + // A JsonPath string defining the JSON data for the classifier to classify. + // AWS Glue supports a subset of JsonPath, as described in Writing JsonPath + // Custom Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json). // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // JsonPath is a required field + JsonPath *string `type:"string" required:"true"` - // A regular expression pattern. If present, only those tables whose names match - // the pattern are returned. - Expression *string `type:"string"` + // The time that this classifier was last updated. + LastUpdated *time.Time `type:"timestamp"` - // The maximum number of tables to return in a single response. - MaxResults *int64 `min:"1" type:"integer"` + // The name of the classifier. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // A continuation token, included if this is a continuation call. - NextToken *string `type:"string"` + // The version of this classifier. + Version *int64 `type:"long"` } // String returns the string representation -func (s GetTablesInput) String() string { +func (s JsonClassifier) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetTablesInput) GoString() string { +func (s JsonClassifier) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTablesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTablesInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *GetTablesInput) SetCatalogId(v string) *GetTablesInput { - s.CatalogId = &v +// SetCreationTime sets the CreationTime field's value. +func (s *JsonClassifier) SetCreationTime(v time.Time) *JsonClassifier { + s.CreationTime = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetTablesInput) SetDatabaseName(v string) *GetTablesInput { - s.DatabaseName = &v +// SetJsonPath sets the JsonPath field's value. +func (s *JsonClassifier) SetJsonPath(v string) *JsonClassifier { + s.JsonPath = &v return s } -// SetExpression sets the Expression field's value. -func (s *GetTablesInput) SetExpression(v string) *GetTablesInput { - s.Expression = &v +// SetLastUpdated sets the LastUpdated field's value. +func (s *JsonClassifier) SetLastUpdated(v time.Time) *JsonClassifier { + s.LastUpdated = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *GetTablesInput) SetMaxResults(v int64) *GetTablesInput { - s.MaxResults = &v +// SetName sets the Name field's value. +func (s *JsonClassifier) SetName(v string) *JsonClassifier { + s.Name = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetTablesInput) SetNextToken(v string) *GetTablesInput { - s.NextToken = &v +// SetVersion sets the Version field's value. +func (s *JsonClassifier) SetVersion(v int64) *JsonClassifier { + s.Version = &v return s } -type GetTablesOutput struct { +// Specifies configuration properties for a labeling set generation task run. +type LabelingSetGenerationTaskRunProperties struct { _ struct{} `type:"structure"` - // A continuation token, present if the current list segment is not the last. - NextToken *string `type:"string"` - - // A list of the requested Table objects. - TableList []*Table `type:"list"` + // The Amazon Simple Storage Service (Amazon S3) path where you will generate + // the labeling set. + OutputS3Path *string `type:"string"` } // String returns the string representation -func (s GetTablesOutput) String() string { +func (s LabelingSetGenerationTaskRunProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetTablesOutput) GoString() string { +func (s LabelingSetGenerationTaskRunProperties) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *GetTablesOutput) SetNextToken(v string) *GetTablesOutput { - s.NextToken = &v +// SetOutputS3Path sets the OutputS3Path field's value. +func (s *LabelingSetGenerationTaskRunProperties) SetOutputS3Path(v string) *LabelingSetGenerationTaskRunProperties { + s.OutputS3Path = &v return s } -// SetTableList sets the TableList field's value. -func (s *GetTablesOutput) SetTableList(v []*Table) *GetTablesOutput { - s.TableList = v - return s -} +// Status and error information about the most recent crawl. +type LastCrawlInfo struct { + _ struct{} `type:"structure"` + + // If an error occurred, the error information about the last crawl. + ErrorMessage *string `type:"string"` + + // The log group for the last crawl. + LogGroup *string `min:"1" type:"string"` + + // The log stream for the last crawl. + LogStream *string `min:"1" type:"string"` -type GetTagsInput struct { - _ struct{} `type:"structure"` + // The prefix for a message about this crawl. + MessagePrefix *string `min:"1" type:"string"` - // The Amazon ARN of the resource for which to retrieve tags. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` + // The time at which the crawl started. + StartTime *time.Time `type:"timestamp"` + + // Status of the last crawl. + Status *string `type:"string" enum:"LastCrawlStatus"` } // String returns the string representation -func (s GetTagsInput) String() string { +func (s LastCrawlInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetTagsInput) GoString() string { +func (s LastCrawlInfo) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTagsInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetErrorMessage sets the ErrorMessage field's value. +func (s *LastCrawlInfo) SetErrorMessage(v string) *LastCrawlInfo { + s.ErrorMessage = &v + return s } -// SetResourceArn sets the ResourceArn field's value. -func (s *GetTagsInput) SetResourceArn(v string) *GetTagsInput { - s.ResourceArn = &v +// SetLogGroup sets the LogGroup field's value. +func (s *LastCrawlInfo) SetLogGroup(v string) *LastCrawlInfo { + s.LogGroup = &v return s } -type GetTagsOutput struct { - _ struct{} `type:"structure"` - - // The requested tags. - Tags map[string]*string `type:"map"` +// SetLogStream sets the LogStream field's value. +func (s *LastCrawlInfo) SetLogStream(v string) *LastCrawlInfo { + s.LogStream = &v + return s } -// String returns the string representation -func (s GetTagsOutput) String() string { - return awsutil.Prettify(s) +// SetMessagePrefix sets the MessagePrefix field's value. +func (s *LastCrawlInfo) SetMessagePrefix(v string) *LastCrawlInfo { + s.MessagePrefix = &v + return s } -// GoString returns the string representation -func (s GetTagsOutput) GoString() string { - return s.String() +// SetStartTime sets the StartTime field's value. +func (s *LastCrawlInfo) SetStartTime(v time.Time) *LastCrawlInfo { + s.StartTime = &v + return s } -// SetTags sets the Tags field's value. -func (s *GetTagsOutput) SetTags(v map[string]*string) *GetTagsOutput { - s.Tags = v +// SetStatus sets the Status field's value. +func (s *LastCrawlInfo) SetStatus(v string) *LastCrawlInfo { + s.Status = &v return s } -type GetTriggerInput struct { +type ListCrawlersInput struct { _ struct{} `type:"structure"` - // The name of the trigger to retrieve. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The maximum size of a list to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is a continuation request. + NextToken *string `type:"string"` + + // Specifies to return only these tagged resources. + Tags map[string]*string `type:"map"` } // String returns the string representation -func (s GetTriggerInput) String() string { +func (s ListCrawlersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetTriggerInput) GoString() string { +func (s ListCrawlersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTriggerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *ListCrawlersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCrawlersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -18551,66 +25496,84 @@ func (s *GetTriggerInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *GetTriggerInput) SetName(v string) *GetTriggerInput { - s.Name = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListCrawlersInput) SetMaxResults(v int64) *ListCrawlersInput { + s.MaxResults = &v return s } -type GetTriggerOutput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListCrawlersInput) SetNextToken(v string) *ListCrawlersInput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListCrawlersInput) SetTags(v map[string]*string) *ListCrawlersInput { + s.Tags = v + return s +} + +type ListCrawlersOutput struct { _ struct{} `type:"structure"` - // The requested trigger definition. - Trigger *Trigger `type:"structure"` + // The names of all crawlers in the account, or the crawlers with the specified + // tags. + CrawlerNames []*string `type:"list"` + + // A continuation token, if the returned list does not contain the last metric + // available. + NextToken *string `type:"string"` } // String returns the string representation -func (s GetTriggerOutput) String() string { +func (s ListCrawlersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetTriggerOutput) GoString() string { +func (s ListCrawlersOutput) GoString() string { return s.String() } -// SetTrigger sets the Trigger field's value. -func (s *GetTriggerOutput) SetTrigger(v *Trigger) *GetTriggerOutput { - s.Trigger = v +// SetCrawlerNames sets the CrawlerNames field's value. +func (s *ListCrawlersOutput) SetCrawlerNames(v []*string) *ListCrawlersOutput { + s.CrawlerNames = v return s } -type GetTriggersInput struct { - _ struct{} `type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *ListCrawlersOutput) SetNextToken(v string) *ListCrawlersOutput { + s.NextToken = &v + return s +} - // The name of the job for which to retrieve triggers. The trigger that can - // start this job will be returned, and if there is no such trigger, all triggers - // will be returned. - DependentJobName *string `min:"1" type:"string"` +type ListDevEndpointsInput struct { + _ struct{} `type:"structure"` - // The maximum size of the response. + // The maximum size of a list to return. MaxResults *int64 `min:"1" type:"integer"` - // A continuation token, if this is a continuation call. + // A continuation token, if this is a continuation request. NextToken *string `type:"string"` + + // Specifies to return only these tagged resources. + Tags map[string]*string `type:"map"` } // String returns the string representation -func (s GetTriggersInput) String() string { +func (s ListDevEndpointsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetTriggersInput) GoString() string { +func (s ListDevEndpointsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetTriggersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTriggersInput"} - if s.DependentJobName != nil && len(*s.DependentJobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DependentJobName", 1)) - } +func (s *ListDevEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDevEndpointsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } @@ -18621,101 +25584,86 @@ func (s *GetTriggersInput) Validate() error { return nil } -// SetDependentJobName sets the DependentJobName field's value. -func (s *GetTriggersInput) SetDependentJobName(v string) *GetTriggersInput { - s.DependentJobName = &v - return s -} - // SetMaxResults sets the MaxResults field's value. -func (s *GetTriggersInput) SetMaxResults(v int64) *GetTriggersInput { +func (s *ListDevEndpointsInput) SetMaxResults(v int64) *ListDevEndpointsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetTriggersInput) SetNextToken(v string) *GetTriggersInput { +func (s *ListDevEndpointsInput) SetNextToken(v string) *ListDevEndpointsInput { s.NextToken = &v return s } -type GetTriggersOutput struct { +// SetTags sets the Tags field's value. +func (s *ListDevEndpointsInput) SetTags(v map[string]*string) *ListDevEndpointsInput { + s.Tags = v + return s +} + +type ListDevEndpointsOutput struct { _ struct{} `type:"structure"` - // A continuation token, if not all the requested triggers have yet been returned. - NextToken *string `type:"string"` + // The names of all the DevEndpoints in the account, or the DevEndpoints with + // the specified tags. + DevEndpointNames []*string `type:"list"` - // A list of triggers for the specified job. - Triggers []*Trigger `type:"list"` + // A continuation token, if the returned list does not contain the last metric + // available. + NextToken *string `type:"string"` } // String returns the string representation -func (s GetTriggersOutput) String() string { +func (s ListDevEndpointsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetTriggersOutput) GoString() string { +func (s ListDevEndpointsOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *GetTriggersOutput) SetNextToken(v string) *GetTriggersOutput { - s.NextToken = &v +// SetDevEndpointNames sets the DevEndpointNames field's value. +func (s *ListDevEndpointsOutput) SetDevEndpointNames(v []*string) *ListDevEndpointsOutput { + s.DevEndpointNames = v return s } -// SetTriggers sets the Triggers field's value. -func (s *GetTriggersOutput) SetTriggers(v []*Trigger) *GetTriggersOutput { - s.Triggers = v +// SetNextToken sets the NextToken field's value. +func (s *ListDevEndpointsOutput) SetNextToken(v string) *ListDevEndpointsOutput { + s.NextToken = &v return s } -type GetUserDefinedFunctionInput struct { +type ListJobsInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog where the function to be retrieved is located. - // If none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` + // The maximum size of a list to return. + MaxResults *int64 `min:"1" type:"integer"` - // The name of the catalog database where the function is located. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // A continuation token, if this is a continuation request. + NextToken *string `type:"string"` - // The name of the function. - // - // FunctionName is a required field - FunctionName *string `min:"1" type:"string" required:"true"` + // Specifies to return only these tagged resources. + Tags map[string]*string `type:"map"` } // String returns the string representation -func (s GetUserDefinedFunctionInput) String() string { +func (s ListJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetUserDefinedFunctionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetUserDefinedFunctionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetUserDefinedFunctionInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.FunctionName == nil { - invalidParams.Add(request.NewErrParamRequired("FunctionName")) - } - if s.FunctionName != nil && len(*s.FunctionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) +func (s ListJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -18724,103 +25672,94 @@ func (s *GetUserDefinedFunctionInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *GetUserDefinedFunctionInput) SetCatalogId(v string) *GetUserDefinedFunctionInput { - s.CatalogId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput { + s.MaxResults = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetUserDefinedFunctionInput) SetDatabaseName(v string) *GetUserDefinedFunctionInput { - s.DatabaseName = &v +// SetNextToken sets the NextToken field's value. +func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { + s.NextToken = &v return s } -// SetFunctionName sets the FunctionName field's value. -func (s *GetUserDefinedFunctionInput) SetFunctionName(v string) *GetUserDefinedFunctionInput { - s.FunctionName = &v +// SetTags sets the Tags field's value. +func (s *ListJobsInput) SetTags(v map[string]*string) *ListJobsInput { + s.Tags = v return s } -type GetUserDefinedFunctionOutput struct { +type ListJobsOutput struct { _ struct{} `type:"structure"` - // The requested function definition. - UserDefinedFunction *UserDefinedFunction `type:"structure"` + // The names of all jobs in the account, or the jobs with the specified tags. + JobNames []*string `type:"list"` + + // A continuation token, if the returned list does not contain the last metric + // available. + NextToken *string `type:"string"` } // String returns the string representation -func (s GetUserDefinedFunctionOutput) String() string { +func (s ListJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetUserDefinedFunctionOutput) GoString() string { +func (s ListJobsOutput) GoString() string { return s.String() } -// SetUserDefinedFunction sets the UserDefinedFunction field's value. -func (s *GetUserDefinedFunctionOutput) SetUserDefinedFunction(v *UserDefinedFunction) *GetUserDefinedFunctionOutput { - s.UserDefinedFunction = v +// SetJobNames sets the JobNames field's value. +func (s *ListJobsOutput) SetJobNames(v []*string) *ListJobsOutput { + s.JobNames = v return s } -type GetUserDefinedFunctionsInput struct { - _ struct{} `type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { + s.NextToken = &v + return s +} - // The ID of the Data Catalog where the functions to be retrieved are located. - // If none is supplied, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` +type ListTriggersInput struct { + _ struct{} `type:"structure"` - // The name of the catalog database where the functions are located. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // The name of the job for which to retrieve triggers. The trigger that can + // start this job is returned. If there is no such trigger, all triggers are + // returned. + DependentJobName *string `min:"1" type:"string"` - // The maximum number of functions to return in one response. + // The maximum size of a list to return. MaxResults *int64 `min:"1" type:"integer"` - // A continuation token, if this is a continuation call. + // A continuation token, if this is a continuation request. NextToken *string `type:"string"` - // An optional function-name pattern string that filters the function definitions - // returned. - // - // Pattern is a required field - Pattern *string `min:"1" type:"string" required:"true"` + // Specifies to return only these tagged resources. + Tags map[string]*string `type:"map"` } // String returns the string representation -func (s GetUserDefinedFunctionsInput) String() string { +func (s ListTriggersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetUserDefinedFunctionsInput) GoString() string { +func (s ListTriggersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetUserDefinedFunctionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetUserDefinedFunctionsInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) +func (s *ListTriggersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTriggersInput"} + if s.DependentJobName != nil && len(*s.DependentJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DependentJobName", 1)) } if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.Pattern == nil { - invalidParams.Add(request.NewErrParamRequired("Pattern")) - } - if s.Pattern != nil && len(*s.Pattern) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Pattern", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -18828,348 +25767,300 @@ func (s *GetUserDefinedFunctionsInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *GetUserDefinedFunctionsInput) SetCatalogId(v string) *GetUserDefinedFunctionsInput { - s.CatalogId = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetUserDefinedFunctionsInput) SetDatabaseName(v string) *GetUserDefinedFunctionsInput { - s.DatabaseName = &v +// SetDependentJobName sets the DependentJobName field's value. +func (s *ListTriggersInput) SetDependentJobName(v string) *ListTriggersInput { + s.DependentJobName = &v return s } // SetMaxResults sets the MaxResults field's value. -func (s *GetUserDefinedFunctionsInput) SetMaxResults(v int64) *GetUserDefinedFunctionsInput { +func (s *ListTriggersInput) SetMaxResults(v int64) *ListTriggersInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *GetUserDefinedFunctionsInput) SetNextToken(v string) *GetUserDefinedFunctionsInput { +func (s *ListTriggersInput) SetNextToken(v string) *ListTriggersInput { s.NextToken = &v return s } -// SetPattern sets the Pattern field's value. -func (s *GetUserDefinedFunctionsInput) SetPattern(v string) *GetUserDefinedFunctionsInput { - s.Pattern = &v +// SetTags sets the Tags field's value. +func (s *ListTriggersInput) SetTags(v map[string]*string) *ListTriggersInput { + s.Tags = v return s } -type GetUserDefinedFunctionsOutput struct { +type ListTriggersOutput struct { _ struct{} `type:"structure"` - // A continuation token, if the list of functions returned does not include - // the last requested function. + // A continuation token, if the returned list does not contain the last metric + // available. NextToken *string `type:"string"` - // A list of requested function definitions. - UserDefinedFunctions []*UserDefinedFunction `type:"list"` + // The names of all triggers in the account, or the triggers with the specified + // tags. + TriggerNames []*string `type:"list"` } // String returns the string representation -func (s GetUserDefinedFunctionsOutput) String() string { +func (s ListTriggersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetUserDefinedFunctionsOutput) GoString() string { +func (s ListTriggersOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. -func (s *GetUserDefinedFunctionsOutput) SetNextToken(v string) *GetUserDefinedFunctionsOutput { +func (s *ListTriggersOutput) SetNextToken(v string) *ListTriggersOutput { s.NextToken = &v return s } -// SetUserDefinedFunctions sets the UserDefinedFunctions field's value. -func (s *GetUserDefinedFunctionsOutput) SetUserDefinedFunctions(v []*UserDefinedFunction) *GetUserDefinedFunctionsOutput { - s.UserDefinedFunctions = v +// SetTriggerNames sets the TriggerNames field's value. +func (s *ListTriggersOutput) SetTriggerNames(v []*string) *ListTriggersOutput { + s.TriggerNames = v return s } -// A classifier that uses grok patterns. -type GrokClassifier struct { +type ListWorkflowsInput struct { _ struct{} `type:"structure"` - // An identifier of the data format that the classifier matches, such as Twitter, - // JSON, Omniture logs, and so on. - // - // Classification is a required field - Classification *string `type:"string" required:"true"` - - // The time this classifier was registered. - CreationTime *time.Time `type:"timestamp"` - - // Optional custom grok patterns defined by this classifier. For more information, - // see custom patterns in Writing Custom Classifers (http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). - CustomPatterns *string `type:"string"` - - // The grok pattern applied to a data store by this classifier. For more information, - // see built-in patterns in Writing Custom Classifers (http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). - // - // GrokPattern is a required field - GrokPattern *string `min:"1" type:"string" required:"true"` - - // The time this classifier was last updated. - LastUpdated *time.Time `type:"timestamp"` - - // The name of the classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The maximum size of a list to return. + MaxResults *int64 `min:"1" type:"integer"` - // The version of this classifier. - Version *int64 `type:"long"` + // A continuation token, if this is a continuation request. + NextToken *string `type:"string"` } // String returns the string representation -func (s GrokClassifier) String() string { +func (s ListWorkflowsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GrokClassifier) GoString() string { +func (s ListWorkflowsInput) GoString() string { return s.String() } -// SetClassification sets the Classification field's value. -func (s *GrokClassifier) SetClassification(v string) *GrokClassifier { - s.Classification = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *GrokClassifier) SetCreationTime(v time.Time) *GrokClassifier { - s.CreationTime = &v - return s -} - -// SetCustomPatterns sets the CustomPatterns field's value. -func (s *GrokClassifier) SetCustomPatterns(v string) *GrokClassifier { - s.CustomPatterns = &v - return s -} - -// SetGrokPattern sets the GrokPattern field's value. -func (s *GrokClassifier) SetGrokPattern(v string) *GrokClassifier { - s.GrokPattern = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListWorkflowsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListWorkflowsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } -// SetLastUpdated sets the LastUpdated field's value. -func (s *GrokClassifier) SetLastUpdated(v time.Time) *GrokClassifier { - s.LastUpdated = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetName sets the Name field's value. -func (s *GrokClassifier) SetName(v string) *GrokClassifier { - s.Name = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListWorkflowsInput) SetMaxResults(v int64) *ListWorkflowsInput { + s.MaxResults = &v return s } -// SetVersion sets the Version field's value. -func (s *GrokClassifier) SetVersion(v int64) *GrokClassifier { - s.Version = &v +// SetNextToken sets the NextToken field's value. +func (s *ListWorkflowsInput) SetNextToken(v string) *ListWorkflowsInput { + s.NextToken = &v return s } -type ImportCatalogToGlueInput struct { +type ListWorkflowsOutput struct { _ struct{} `type:"structure"` - // The ID of the catalog to import. Currently, this should be the AWS account - // ID. - CatalogId *string `min:"1" type:"string"` + // A continuation token, if not all workflow names have been returned. + NextToken *string `type:"string"` + + // List of names of workflows in the account. + Workflows []*string `min:"1" type:"list"` } // String returns the string representation -func (s ImportCatalogToGlueInput) String() string { +func (s ListWorkflowsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ImportCatalogToGlueInput) GoString() string { +func (s ListWorkflowsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ImportCatalogToGlueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ImportCatalogToGlueInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogId sets the CatalogId field's value. -func (s *ImportCatalogToGlueInput) SetCatalogId(v string) *ImportCatalogToGlueInput { - s.CatalogId = &v +// SetNextToken sets the NextToken field's value. +func (s *ListWorkflowsOutput) SetNextToken(v string) *ListWorkflowsOutput { + s.NextToken = &v return s } -type ImportCatalogToGlueOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s ImportCatalogToGlueOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ImportCatalogToGlueOutput) GoString() string { - return s.String() +// SetWorkflows sets the Workflows field's value. +func (s *ListWorkflowsOutput) SetWorkflows(v []*string) *ListWorkflowsOutput { + s.Workflows = v + return s } -// Specifies a JDBC data store to crawl. -type JdbcTarget struct { +// The location of resources. +type Location struct { _ struct{} `type:"structure"` - // The name of the connection to use to connect to the JDBC target. - ConnectionName *string `type:"string"` + // An Amazon DynamoDB table location. + DynamoDB []*CodeGenNodeArg `type:"list"` - // A list of glob patterns used to exclude from the crawl. For more information, - // see Catalog Tables with a Crawler (http://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). - Exclusions []*string `type:"list"` + // A JDBC location. + Jdbc []*CodeGenNodeArg `type:"list"` - // The path of the JDBC target. - Path *string `type:"string"` + // An Amazon Simple Storage Service (Amazon S3) location. + S3 []*CodeGenNodeArg `type:"list"` } // String returns the string representation -func (s JdbcTarget) String() string { +func (s Location) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JdbcTarget) GoString() string { +func (s Location) GoString() string { return s.String() } -// SetConnectionName sets the ConnectionName field's value. -func (s *JdbcTarget) SetConnectionName(v string) *JdbcTarget { - s.ConnectionName = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.DynamoDB != nil { + for i, v := range s.DynamoDB { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DynamoDB", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Jdbc != nil { + for i, v := range s.Jdbc { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Jdbc", i), err.(request.ErrInvalidParams)) + } + } + } + if s.S3 != nil { + for i, v := range s.S3 { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "S3", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDynamoDB sets the DynamoDB field's value. +func (s *Location) SetDynamoDB(v []*CodeGenNodeArg) *Location { + s.DynamoDB = v return s } -// SetExclusions sets the Exclusions field's value. -func (s *JdbcTarget) SetExclusions(v []*string) *JdbcTarget { - s.Exclusions = v +// SetJdbc sets the Jdbc field's value. +func (s *Location) SetJdbc(v []*CodeGenNodeArg) *Location { + s.Jdbc = v return s } -// SetPath sets the Path field's value. -func (s *JdbcTarget) SetPath(v string) *JdbcTarget { - s.Path = &v +// SetS3 sets the S3 field's value. +func (s *Location) SetS3(v []*CodeGenNodeArg) *Location { + s.S3 = v return s } -// Specifies a job definition. -type Job struct { +// A structure for a machine learning transform. +type MLTransform struct { _ struct{} `type:"structure"` - // This field is deprecated, use MaxCapacity instead. - // - // The number of AWS Glue data processing units (DPUs) allocated to runs of - // this job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is - // a relative measure of processing power that consists of 4 vCPUs of compute - // capacity and 16 GB of memory. For more information, see the AWS Glue pricing - // page (https://aws.amazon.com/glue/pricing/). - // - // Deprecated: This property is deprecated, use MaxCapacity instead. - AllocatedCapacity *int64 `deprecated:"true" type:"integer"` - - // The JobCommand that executes this job. - Command *JobCommand `type:"structure"` - - // The connections used for this job. - Connections *ConnectionsList `type:"structure"` - - // The time and date that this job definition was created. + // A timestamp. The time and date that this machine learning transform was created. CreatedOn *time.Time `type:"timestamp"` - // The default arguments for this job, specified as name-value pairs. - // - // You can specify arguments here that your own job-execution script consumes, - // as well as arguments that AWS Glue itself consumes. - // - // For information about how to specify and consume your own Job arguments, - // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) - // topic in the developer guide. - // - // For information about the key-value pairs that AWS Glue consumes to set up - // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) - // topic in the developer guide. - DefaultArguments map[string]*string `type:"map"` - - // Description of the job being defined. + // A user-defined, long-form description text for the machine learning transform. + // Descriptions are not guaranteed to be unique and can be changed at any time. Description *string `type:"string"` - // An ExecutionProperty specifying the maximum number of concurrent runs allowed - // for this job. - ExecutionProperty *ExecutionProperty `type:"structure"` + // An EvaluationMetrics object. Evaluation metrics provide an estimate of the + // quality of your machine learning transform. + EvaluationMetrics *EvaluationMetrics `type:"structure"` - // The last point in time when this job definition was modified. - LastModifiedOn *time.Time `type:"timestamp"` + // A list of AWS Glue table definitions used by the transform. + InputRecordTables []*Table `type:"list"` - // This field is reserved for future use. - LogUri *string `type:"string"` + // A count identifier for the labeling files generated by AWS Glue for this + // transform. As you create a better transform, you can iteratively download, + // label, and upload the labeling file. + LabelCount *int64 `type:"integer"` - // The number of AWS Glue data processing units (DPUs) that can be allocated - // when this job runs. A DPU is a relative measure of processing power that - // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, - // see the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). - // - // Do not set Max Capacity if using WorkerType and NumberOfWorkers. - // - // The value that can be allocated for MaxCapacity depends on whether you are - // running a python shell job, or an Apache Spark ETL job: - // - // * When you specify a python shell job (JobCommand.Name="pythonshell"), - // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. + // A timestamp. The last point in time when this machine learning transform + // was modified. + LastModifiedOn *time.Time `type:"timestamp"` + + // The number of AWS Glue data processing units (DPUs) that are allocated to + // task runs for this transform. You can allocate from 2 to 100 DPUs; the default + // is 10. A DPU is a relative measure of processing power that consists of 4 + // vCPUs of compute capacity and 16 GB of memory. For more information, see + // the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. + // When the WorkerType field is set to a value other than Standard, the MaxCapacity + // field is set automatically and becomes read-only. MaxCapacity *float64 `type:"double"` - // The maximum number of times to retry this job after a JobRun fails. + // The maximum number of times to retry after an MLTaskRun of the machine learning + // transform fails. MaxRetries *int64 `type:"integer"` - // The name you assign to this job definition. + // A user-defined name for the machine learning transform. Names are not guaranteed + // unique and can be changed at any time. Name *string `min:"1" type:"string"` - // Specifies configuration properties of a job notification. - NotificationProperty *NotificationProperty `type:"structure"` - - // The number of workers of a defined workerType that are allocated when a job - // runs. - // - // The maximum number of workers you can define are 299 for G.1X, and 149 for - // G.2X. + // The number of workers of a defined workerType that are allocated when a task + // of the transform runs. NumberOfWorkers *int64 `type:"integer"` - // The name or ARN of the IAM role associated with this job. + // A TransformParameters object. You can use parameters to tune (customize) + // the behavior of the machine learning transform by specifying what data it + // learns from and your preference on various tradeoffs (such as precious vs. + // recall, or accuracy vs. cost). + Parameters *TransformParameters `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the IAM role with the required + // permissions. This role needs permission to your Amazon Simple Storage Service + // (Amazon S3) sources, targets, temporary directory, scripts, and any libraries + // used by the task run for this transform. Role *string `type:"string"` - // The name of the SecurityConfiguration structure to be used with this job. - SecurityConfiguration *string `min:"1" type:"string"` + // A map of key-value pairs representing the columns and data types that this + // transform can run against. Has an upper bound of 100 columns. + Schema []*SchemaColumn `type:"list"` - // The job timeout in minutes. This is the maximum time that a job run can consume - // resources before it is terminated and enters TIMEOUT status. The default - // is 2,880 minutes (48 hours). + // The current status of the machine learning transform. + Status *string `type:"string" enum:"TransformStatusType"` + + // The timeout in minutes of the machine learning transform. Timeout *int64 `min:"1" type:"integer"` - // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, or G.2X. + // The unique transform ID that is generated for the machine learning transform. + // The ID is guaranteed to be unique and does not change. + TransformId *string `min:"1" type:"string"` + + // The type of predefined worker that is allocated when a task of this transform + // runs. Accepts a value of Standard, G.1X, or G.2X. // // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of // memory and a 50GB disk, and 2 executors per worker. @@ -19183,637 +26074,625 @@ type Job struct { } // String returns the string representation -func (s Job) String() string { +func (s MLTransform) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Job) GoString() string { +func (s MLTransform) GoString() string { return s.String() } -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *Job) SetAllocatedCapacity(v int64) *Job { - s.AllocatedCapacity = &v +// SetCreatedOn sets the CreatedOn field's value. +func (s *MLTransform) SetCreatedOn(v time.Time) *MLTransform { + s.CreatedOn = &v return s } -// SetCommand sets the Command field's value. -func (s *Job) SetCommand(v *JobCommand) *Job { - s.Command = v +// SetDescription sets the Description field's value. +func (s *MLTransform) SetDescription(v string) *MLTransform { + s.Description = &v return s } -// SetConnections sets the Connections field's value. -func (s *Job) SetConnections(v *ConnectionsList) *Job { - s.Connections = v +// SetEvaluationMetrics sets the EvaluationMetrics field's value. +func (s *MLTransform) SetEvaluationMetrics(v *EvaluationMetrics) *MLTransform { + s.EvaluationMetrics = v return s } -// SetCreatedOn sets the CreatedOn field's value. -func (s *Job) SetCreatedOn(v time.Time) *Job { - s.CreatedOn = &v +// SetInputRecordTables sets the InputRecordTables field's value. +func (s *MLTransform) SetInputRecordTables(v []*Table) *MLTransform { + s.InputRecordTables = v return s } -// SetDefaultArguments sets the DefaultArguments field's value. -func (s *Job) SetDefaultArguments(v map[string]*string) *Job { - s.DefaultArguments = v +// SetLabelCount sets the LabelCount field's value. +func (s *MLTransform) SetLabelCount(v int64) *MLTransform { + s.LabelCount = &v return s } -// SetDescription sets the Description field's value. -func (s *Job) SetDescription(v string) *Job { - s.Description = &v +// SetLastModifiedOn sets the LastModifiedOn field's value. +func (s *MLTransform) SetLastModifiedOn(v time.Time) *MLTransform { + s.LastModifiedOn = &v return s } -// SetExecutionProperty sets the ExecutionProperty field's value. -func (s *Job) SetExecutionProperty(v *ExecutionProperty) *Job { - s.ExecutionProperty = v +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *MLTransform) SetMaxCapacity(v float64) *MLTransform { + s.MaxCapacity = &v return s } -// SetLastModifiedOn sets the LastModifiedOn field's value. -func (s *Job) SetLastModifiedOn(v time.Time) *Job { - s.LastModifiedOn = &v +// SetMaxRetries sets the MaxRetries field's value. +func (s *MLTransform) SetMaxRetries(v int64) *MLTransform { + s.MaxRetries = &v return s } -// SetLogUri sets the LogUri field's value. -func (s *Job) SetLogUri(v string) *Job { - s.LogUri = &v +// SetName sets the Name field's value. +func (s *MLTransform) SetName(v string) *MLTransform { + s.Name = &v return s } -// SetMaxCapacity sets the MaxCapacity field's value. -func (s *Job) SetMaxCapacity(v float64) *Job { - s.MaxCapacity = &v +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *MLTransform) SetNumberOfWorkers(v int64) *MLTransform { + s.NumberOfWorkers = &v return s } -// SetMaxRetries sets the MaxRetries field's value. -func (s *Job) SetMaxRetries(v int64) *Job { - s.MaxRetries = &v +// SetParameters sets the Parameters field's value. +func (s *MLTransform) SetParameters(v *TransformParameters) *MLTransform { + s.Parameters = v + return s +} + +// SetRole sets the Role field's value. +func (s *MLTransform) SetRole(v string) *MLTransform { + s.Role = &v + return s +} + +// SetSchema sets the Schema field's value. +func (s *MLTransform) SetSchema(v []*SchemaColumn) *MLTransform { + s.Schema = v + return s +} + +// SetStatus sets the Status field's value. +func (s *MLTransform) SetStatus(v string) *MLTransform { + s.Status = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *MLTransform) SetTimeout(v int64) *MLTransform { + s.Timeout = &v + return s +} + +// SetTransformId sets the TransformId field's value. +func (s *MLTransform) SetTransformId(v string) *MLTransform { + s.TransformId = &v + return s +} + +// SetWorkerType sets the WorkerType field's value. +func (s *MLTransform) SetWorkerType(v string) *MLTransform { + s.WorkerType = &v return s } -// SetName sets the Name field's value. -func (s *Job) SetName(v string) *Job { - s.Name = &v - return s +// Defines a mapping. +type MappingEntry struct { + _ struct{} `type:"structure"` + + // The source path. + SourcePath *string `type:"string"` + + // The name of the source table. + SourceTable *string `type:"string"` + + // The source type. + SourceType *string `type:"string"` + + // The target path. + TargetPath *string `type:"string"` + + // The target table. + TargetTable *string `type:"string"` + + // The target type. + TargetType *string `type:"string"` +} + +// String returns the string representation +func (s MappingEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MappingEntry) GoString() string { + return s.String() } -// SetNotificationProperty sets the NotificationProperty field's value. -func (s *Job) SetNotificationProperty(v *NotificationProperty) *Job { - s.NotificationProperty = v +// SetSourcePath sets the SourcePath field's value. +func (s *MappingEntry) SetSourcePath(v string) *MappingEntry { + s.SourcePath = &v return s } -// SetNumberOfWorkers sets the NumberOfWorkers field's value. -func (s *Job) SetNumberOfWorkers(v int64) *Job { - s.NumberOfWorkers = &v +// SetSourceTable sets the SourceTable field's value. +func (s *MappingEntry) SetSourceTable(v string) *MappingEntry { + s.SourceTable = &v return s } -// SetRole sets the Role field's value. -func (s *Job) SetRole(v string) *Job { - s.Role = &v +// SetSourceType sets the SourceType field's value. +func (s *MappingEntry) SetSourceType(v string) *MappingEntry { + s.SourceType = &v return s } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *Job) SetSecurityConfiguration(v string) *Job { - s.SecurityConfiguration = &v +// SetTargetPath sets the TargetPath field's value. +func (s *MappingEntry) SetTargetPath(v string) *MappingEntry { + s.TargetPath = &v return s } -// SetTimeout sets the Timeout field's value. -func (s *Job) SetTimeout(v int64) *Job { - s.Timeout = &v +// SetTargetTable sets the TargetTable field's value. +func (s *MappingEntry) SetTargetTable(v string) *MappingEntry { + s.TargetTable = &v return s } -// SetWorkerType sets the WorkerType field's value. -func (s *Job) SetWorkerType(v string) *Job { - s.WorkerType = &v +// SetTargetType sets the TargetType field's value. +func (s *MappingEntry) SetTargetType(v string) *MappingEntry { + s.TargetType = &v return s } -// Defines a point which a job can resume processing. -type JobBookmarkEntry struct { +// A node represents an AWS Glue component like Trigger, Job etc. which is part +// of a workflow. +type Node struct { _ struct{} `type:"structure"` - // The attempt ID number. - Attempt *int64 `type:"integer"` + // Details of the crawler when the node represents a crawler. + CrawlerDetails *CrawlerNodeDetails `type:"structure"` - // The bookmark itself. - JobBookmark *string `type:"string"` + // Details of the Job when the node represents a Job. + JobDetails *JobNodeDetails `type:"structure"` - // Name of the job in question. - JobName *string `type:"string"` + // The name of the AWS Glue component represented by the node. + Name *string `min:"1" type:"string"` - // The run ID number. - Run *int64 `type:"integer"` + // Details of the Trigger when the node represents a Trigger. + TriggerDetails *TriggerNodeDetails `type:"structure"` - // Version of the job. - Version *int64 `type:"integer"` + // The type of AWS Glue component represented by the node. + Type *string `type:"string" enum:"NodeType"` + + // The unique Id assigned to the node within the workflow. + UniqueId *string `min:"1" type:"string"` } // String returns the string representation -func (s JobBookmarkEntry) String() string { +func (s Node) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobBookmarkEntry) GoString() string { +func (s Node) GoString() string { return s.String() } -// SetAttempt sets the Attempt field's value. -func (s *JobBookmarkEntry) SetAttempt(v int64) *JobBookmarkEntry { - s.Attempt = &v +// SetCrawlerDetails sets the CrawlerDetails field's value. +func (s *Node) SetCrawlerDetails(v *CrawlerNodeDetails) *Node { + s.CrawlerDetails = v return s } -// SetJobBookmark sets the JobBookmark field's value. -func (s *JobBookmarkEntry) SetJobBookmark(v string) *JobBookmarkEntry { - s.JobBookmark = &v +// SetJobDetails sets the JobDetails field's value. +func (s *Node) SetJobDetails(v *JobNodeDetails) *Node { + s.JobDetails = v return s } -// SetJobName sets the JobName field's value. -func (s *JobBookmarkEntry) SetJobName(v string) *JobBookmarkEntry { - s.JobName = &v +// SetName sets the Name field's value. +func (s *Node) SetName(v string) *Node { + s.Name = &v return s } -// SetRun sets the Run field's value. -func (s *JobBookmarkEntry) SetRun(v int64) *JobBookmarkEntry { - s.Run = &v +// SetTriggerDetails sets the TriggerDetails field's value. +func (s *Node) SetTriggerDetails(v *TriggerNodeDetails) *Node { + s.TriggerDetails = v return s } -// SetVersion sets the Version field's value. -func (s *JobBookmarkEntry) SetVersion(v int64) *JobBookmarkEntry { - s.Version = &v +// SetType sets the Type field's value. +func (s *Node) SetType(v string) *Node { + s.Type = &v return s } -// Specifies how Job bookmark data should be encrypted. -type JobBookmarksEncryption struct { - _ struct{} `type:"structure"` +// SetUniqueId sets the UniqueId field's value. +func (s *Node) SetUniqueId(v string) *Node { + s.UniqueId = &v + return s +} - // The encryption mode to use for Job bookmarks data. - JobBookmarksEncryptionMode *string `type:"string" enum:"JobBookmarksEncryptionMode"` +// Specifies configuration properties of a notification. +type NotificationProperty struct { + _ struct{} `type:"structure"` - // The AWS ARN of the KMS key to be used to encrypt the data. - KmsKeyArn *string `type:"string"` + // After a job run starts, the number of minutes to wait before sending a job + // run delay notification. + NotifyDelayAfter *int64 `min:"1" type:"integer"` } // String returns the string representation -func (s JobBookmarksEncryption) String() string { +func (s NotificationProperty) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobBookmarksEncryption) GoString() string { +func (s NotificationProperty) GoString() string { return s.String() } -// SetJobBookmarksEncryptionMode sets the JobBookmarksEncryptionMode field's value. -func (s *JobBookmarksEncryption) SetJobBookmarksEncryptionMode(v string) *JobBookmarksEncryption { - s.JobBookmarksEncryptionMode = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationProperty) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationProperty"} + if s.NotifyDelayAfter != nil && *s.NotifyDelayAfter < 1 { + invalidParams.Add(request.NewErrParamMinValue("NotifyDelayAfter", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetKmsKeyArn sets the KmsKeyArn field's value. -func (s *JobBookmarksEncryption) SetKmsKeyArn(v string) *JobBookmarksEncryption { - s.KmsKeyArn = &v +// SetNotifyDelayAfter sets the NotifyDelayAfter field's value. +func (s *NotificationProperty) SetNotifyDelayAfter(v int64) *NotificationProperty { + s.NotifyDelayAfter = &v return s } -// Specifies code executed when a job is run. -type JobCommand struct { +// Specifies the sort order of a sorted column. +type Order struct { _ struct{} `type:"structure"` - // The name of the job command: this must be glueetl, for an Apache Spark ETL - // job, or pythonshell, for a Python shell job. - Name *string `type:"string"` + // The name of the column. + // + // Column is a required field + Column *string `min:"1" type:"string" required:"true"` - // Specifies the S3 path to a script that executes a job (required). - ScriptLocation *string `type:"string"` + // Indicates that the column is sorted in ascending order (== 1), or in descending + // order (==0). + // + // SortOrder is a required field + SortOrder *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s JobCommand) String() string { +func (s Order) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobCommand) GoString() string { +func (s Order) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *JobCommand) SetName(v string) *JobCommand { - s.Name = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *Order) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Order"} + if s.Column == nil { + invalidParams.Add(request.NewErrParamRequired("Column")) + } + if s.Column != nil && len(*s.Column) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Column", 1)) + } + if s.SortOrder == nil { + invalidParams.Add(request.NewErrParamRequired("SortOrder")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumn sets the Column field's value. +func (s *Order) SetColumn(v string) *Order { + s.Column = &v return s } -// SetScriptLocation sets the ScriptLocation field's value. -func (s *JobCommand) SetScriptLocation(v string) *JobCommand { - s.ScriptLocation = &v +// SetSortOrder sets the SortOrder field's value. +func (s *Order) SetSortOrder(v int64) *Order { + s.SortOrder = &v return s } -// Contains information about a job run. -type JobRun struct { +// Represents a slice of table data. +type Partition struct { _ struct{} `type:"structure"` - // This field is deprecated, use MaxCapacity instead. - // - // The number of AWS Glue data processing units (DPUs) allocated to this JobRun. - // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative - // measure of processing power that consists of 4 vCPUs of compute capacity - // and 16 GB of memory. For more information, see the AWS Glue pricing page - // (https://aws.amazon.com/glue/pricing/). - // - // Deprecated: This property is deprecated, use MaxCapacity instead. - AllocatedCapacity *int64 `deprecated:"true" type:"integer"` - - // The job arguments associated with this run. For this job run, they replace - // the default arguments set in the job definition itself. - // - // You can specify arguments here that your own job-execution script consumes, - // as well as arguments that AWS Glue itself consumes. - // - // For information about how to specify and consume your own job arguments, - // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) - // topic in the developer guide. - // - // For information about the key-value pairs that AWS Glue consumes to set up - // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) - // topic in the developer guide. - Arguments map[string]*string `type:"map"` - - // The number of the attempt to run this job. - Attempt *int64 `type:"integer"` - - // The date and time this job run completed. - CompletedOn *time.Time `type:"timestamp"` - - // An error message associated with this job run. - ErrorMessage *string `type:"string"` - - // The amount of time (in seconds) that the job run consumed resources. - ExecutionTime *int64 `type:"integer"` - - // The ID of this job run. - Id *string `min:"1" type:"string"` - - // The name of the job definition being used in this run. - JobName *string `min:"1" type:"string"` - - // The current state of the job run. - JobRunState *string `type:"string" enum:"JobRunState"` - - // The last time this job run was modified. - LastModifiedOn *time.Time `type:"timestamp"` - - // The name of the log group for secure logging, that can be server-side encrypted - // in CloudWatch using KMS. This name can be /aws-glue/jobs/, in which case - // the default encryption is NONE. If you add a role name and SecurityConfiguration - // name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), - // then that security configuration will be used to encrypt the log group. - LogGroupName *string `type:"string"` - - // The number of AWS Glue data processing units (DPUs) that can be allocated - // when this job runs. A DPU is a relative measure of processing power that - // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, - // see the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). - // - // Do not set Max Capacity if using WorkerType and NumberOfWorkers. - // - // The value that can be allocated for MaxCapacity depends on whether you are - // running a python shell job, or an Apache Spark ETL job: - // - // * When you specify a python shell job (JobCommand.Name="pythonshell"), - // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. - // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. - MaxCapacity *float64 `type:"double"` - - // Specifies configuration properties of a job run notification. - NotificationProperty *NotificationProperty `type:"structure"` - - // The number of workers of a defined workerType that are allocated when a job - // runs. - // - // The maximum number of workers you can define are 299 for G.1X, and 149 for - // G.2X. - NumberOfWorkers *int64 `type:"integer"` - - // A list of predecessors to this job run. - PredecessorRuns []*Predecessor `type:"list"` + // The time at which the partition was created. + CreationTime *time.Time `type:"timestamp"` - // The ID of the previous run of this job. For example, the JobRunId specified - // in the StartJobRun action. - PreviousRunId *string `min:"1" type:"string"` + // The name of the catalog database in which to create the partition. + DatabaseName *string `min:"1" type:"string"` - // The name of the SecurityConfiguration structure to be used with this job - // run. - SecurityConfiguration *string `min:"1" type:"string"` + // The last time at which the partition was accessed. + LastAccessTime *time.Time `type:"timestamp"` - // The date and time at which this job run was started. - StartedOn *time.Time `type:"timestamp"` + // The last time at which column statistics were computed for this partition. + LastAnalyzedTime *time.Time `type:"timestamp"` - // The JobRun timeout in minutes. This is the maximum time that a job run can - // consume resources before it is terminated and enters TIMEOUT status. The - // default is 2,880 minutes (48 hours). This overrides the timeout value set - // in the parent job. - Timeout *int64 `min:"1" type:"integer"` + // These key-value pairs define partition parameters. + Parameters map[string]*string `type:"map"` - // The name of the trigger that started this job run. - TriggerName *string `min:"1" type:"string"` + // Provides information about the physical location where the partition is stored. + StorageDescriptor *StorageDescriptor `type:"structure"` - // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, or G.2X. - // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory - // and a 64GB disk, and 1 executor per worker. - // - // * For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory - // and a 128GB disk, and 1 executor per worker. - WorkerType *string `type:"string" enum:"WorkerType"` + // The name of the database table in which to create the partition. + TableName *string `min:"1" type:"string"` + + // The values of the partition. + Values []*string `type:"list"` } // String returns the string representation -func (s JobRun) String() string { +func (s Partition) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobRun) GoString() string { +func (s Partition) GoString() string { return s.String() } -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *JobRun) SetAllocatedCapacity(v int64) *JobRun { - s.AllocatedCapacity = &v +// SetCreationTime sets the CreationTime field's value. +func (s *Partition) SetCreationTime(v time.Time) *Partition { + s.CreationTime = &v return s } -// SetArguments sets the Arguments field's value. -func (s *JobRun) SetArguments(v map[string]*string) *JobRun { - s.Arguments = v +// SetDatabaseName sets the DatabaseName field's value. +func (s *Partition) SetDatabaseName(v string) *Partition { + s.DatabaseName = &v return s } -// SetAttempt sets the Attempt field's value. -func (s *JobRun) SetAttempt(v int64) *JobRun { - s.Attempt = &v +// SetLastAccessTime sets the LastAccessTime field's value. +func (s *Partition) SetLastAccessTime(v time.Time) *Partition { + s.LastAccessTime = &v return s } -// SetCompletedOn sets the CompletedOn field's value. -func (s *JobRun) SetCompletedOn(v time.Time) *JobRun { - s.CompletedOn = &v +// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. +func (s *Partition) SetLastAnalyzedTime(v time.Time) *Partition { + s.LastAnalyzedTime = &v return s } -// SetErrorMessage sets the ErrorMessage field's value. -func (s *JobRun) SetErrorMessage(v string) *JobRun { - s.ErrorMessage = &v +// SetParameters sets the Parameters field's value. +func (s *Partition) SetParameters(v map[string]*string) *Partition { + s.Parameters = v return s } -// SetExecutionTime sets the ExecutionTime field's value. -func (s *JobRun) SetExecutionTime(v int64) *JobRun { - s.ExecutionTime = &v +// SetStorageDescriptor sets the StorageDescriptor field's value. +func (s *Partition) SetStorageDescriptor(v *StorageDescriptor) *Partition { + s.StorageDescriptor = v return s } -// SetId sets the Id field's value. -func (s *JobRun) SetId(v string) *JobRun { - s.Id = &v +// SetTableName sets the TableName field's value. +func (s *Partition) SetTableName(v string) *Partition { + s.TableName = &v return s } -// SetJobName sets the JobName field's value. -func (s *JobRun) SetJobName(v string) *JobRun { - s.JobName = &v +// SetValues sets the Values field's value. +func (s *Partition) SetValues(v []*string) *Partition { + s.Values = v return s } -// SetJobRunState sets the JobRunState field's value. -func (s *JobRun) SetJobRunState(v string) *JobRun { - s.JobRunState = &v - return s +// Contains information about a partition error. +type PartitionError struct { + _ struct{} `type:"structure"` + + // The details about the partition error. + ErrorDetail *ErrorDetail `type:"structure"` + + // The values that define the partition. + PartitionValues []*string `type:"list"` } -// SetLastModifiedOn sets the LastModifiedOn field's value. -func (s *JobRun) SetLastModifiedOn(v time.Time) *JobRun { - s.LastModifiedOn = &v - return s +// String returns the string representation +func (s PartitionError) String() string { + return awsutil.Prettify(s) } -// SetLogGroupName sets the LogGroupName field's value. -func (s *JobRun) SetLogGroupName(v string) *JobRun { - s.LogGroupName = &v - return s +// GoString returns the string representation +func (s PartitionError) GoString() string { + return s.String() } -// SetMaxCapacity sets the MaxCapacity field's value. -func (s *JobRun) SetMaxCapacity(v float64) *JobRun { - s.MaxCapacity = &v +// SetErrorDetail sets the ErrorDetail field's value. +func (s *PartitionError) SetErrorDetail(v *ErrorDetail) *PartitionError { + s.ErrorDetail = v return s } -// SetNotificationProperty sets the NotificationProperty field's value. -func (s *JobRun) SetNotificationProperty(v *NotificationProperty) *JobRun { - s.NotificationProperty = v +// SetPartitionValues sets the PartitionValues field's value. +func (s *PartitionError) SetPartitionValues(v []*string) *PartitionError { + s.PartitionValues = v return s } -// SetNumberOfWorkers sets the NumberOfWorkers field's value. -func (s *JobRun) SetNumberOfWorkers(v int64) *JobRun { - s.NumberOfWorkers = &v - return s +// The structure used to create and update a partition. +type PartitionInput struct { + _ struct{} `type:"structure"` + + // The last time at which the partition was accessed. + LastAccessTime *time.Time `type:"timestamp"` + + // The last time at which column statistics were computed for this partition. + LastAnalyzedTime *time.Time `type:"timestamp"` + + // These key-value pairs define partition parameters. + Parameters map[string]*string `type:"map"` + + // Provides information about the physical location where the partition is stored. + StorageDescriptor *StorageDescriptor `type:"structure"` + + // The values of the partition. Although this parameter is not required by the + // SDK, you must specify this parameter for a valid input. + // + // The values for the keys for the new partition must be passed as an array + // of String objects that must be ordered in the same order as the partition + // keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values + // to the wrong keys. + Values []*string `type:"list"` } -// SetPredecessorRuns sets the PredecessorRuns field's value. -func (s *JobRun) SetPredecessorRuns(v []*Predecessor) *JobRun { - s.PredecessorRuns = v - return s +// String returns the string representation +func (s PartitionInput) String() string { + return awsutil.Prettify(s) } -// SetPreviousRunId sets the PreviousRunId field's value. -func (s *JobRun) SetPreviousRunId(v string) *JobRun { - s.PreviousRunId = &v - return s +// GoString returns the string representation +func (s PartitionInput) GoString() string { + return s.String() } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *JobRun) SetSecurityConfiguration(v string) *JobRun { - s.SecurityConfiguration = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *PartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PartitionInput"} + if s.StorageDescriptor != nil { + if err := s.StorageDescriptor.Validate(); err != nil { + invalidParams.AddNested("StorageDescriptor", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetStartedOn sets the StartedOn field's value. -func (s *JobRun) SetStartedOn(v time.Time) *JobRun { - s.StartedOn = &v +// SetLastAccessTime sets the LastAccessTime field's value. +func (s *PartitionInput) SetLastAccessTime(v time.Time) *PartitionInput { + s.LastAccessTime = &v return s } -// SetTimeout sets the Timeout field's value. -func (s *JobRun) SetTimeout(v int64) *JobRun { - s.Timeout = &v +// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. +func (s *PartitionInput) SetLastAnalyzedTime(v time.Time) *PartitionInput { + s.LastAnalyzedTime = &v return s } -// SetTriggerName sets the TriggerName field's value. -func (s *JobRun) SetTriggerName(v string) *JobRun { - s.TriggerName = &v +// SetParameters sets the Parameters field's value. +func (s *PartitionInput) SetParameters(v map[string]*string) *PartitionInput { + s.Parameters = v return s } -// SetWorkerType sets the WorkerType field's value. -func (s *JobRun) SetWorkerType(v string) *JobRun { - s.WorkerType = &v +// SetStorageDescriptor sets the StorageDescriptor field's value. +func (s *PartitionInput) SetStorageDescriptor(v *StorageDescriptor) *PartitionInput { + s.StorageDescriptor = v return s } -// Specifies information used to update an existing job definition. Note that -// the previous job definition will be completely overwritten by this information. -type JobUpdate struct { - _ struct{} `type:"structure"` - - // This field is deprecated. Use MaxCapacity instead. - // - // The number of AWS Glue data processing units (DPUs) to allocate to this Job. - // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative - // measure of processing power that consists of 4 vCPUs of compute capacity - // and 16 GB of memory. For more information, see the AWS Glue pricing page - // (https://aws.amazon.com/glue/pricing/). - // - // Deprecated: This property is deprecated, use MaxCapacity instead. - AllocatedCapacity *int64 `deprecated:"true" type:"integer"` - - // The JobCommand that executes this job (required). - Command *JobCommand `type:"structure"` - - // The connections used for this job. - Connections *ConnectionsList `type:"structure"` - - // The default arguments for this job. - // - // You can specify arguments here that your own job-execution script consumes, - // as well as arguments that AWS Glue itself consumes. - // - // For information about how to specify and consume your own Job arguments, - // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) - // topic in the developer guide. - // - // For information about the key-value pairs that AWS Glue consumes to set up - // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) - // topic in the developer guide. - DefaultArguments map[string]*string `type:"map"` - - // Description of the job being defined. - Description *string `type:"string"` - - // An ExecutionProperty specifying the maximum number of concurrent runs allowed - // for this job. - ExecutionProperty *ExecutionProperty `type:"structure"` - - // This field is reserved for future use. - LogUri *string `type:"string"` - - // The number of AWS Glue data processing units (DPUs) that can be allocated - // when this job runs. A DPU is a relative measure of processing power that - // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, - // see the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). - // - // Do not set Max Capacity if using WorkerType and NumberOfWorkers. - // - // The value that can be allocated for MaxCapacity depends on whether you are - // running a python shell job, or an Apache Spark ETL job: - // - // * When you specify a python shell job (JobCommand.Name="pythonshell"), - // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. +// SetValues sets the Values field's value. +func (s *PartitionInput) SetValues(v []*string) *PartitionInput { + s.Values = v + return s +} + +// Contains a list of values defining partitions. +type PartitionValueList struct { + _ struct{} `type:"structure"` + + // The list of values. // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. - MaxCapacity *float64 `type:"double"` + // Values is a required field + Values []*string `type:"list" required:"true"` +} - // The maximum number of times to retry this job if it fails. - MaxRetries *int64 `type:"integer"` +// String returns the string representation +func (s PartitionValueList) String() string { + return awsutil.Prettify(s) +} - // Specifies configuration properties of a job notification. - NotificationProperty *NotificationProperty `type:"structure"` +// GoString returns the string representation +func (s PartitionValueList) GoString() string { + return s.String() +} - // The number of workers of a defined workerType that are allocated when a job - // runs. - // - // The maximum number of workers you can define are 299 for G.1X, and 149 for - // G.2X. - NumberOfWorkers *int64 `type:"integer"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *PartitionValueList) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PartitionValueList"} + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } - // The name or ARN of the IAM role associated with this job (required). - Role *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The name of the SecurityConfiguration structure to be used with this job. - SecurityConfiguration *string `min:"1" type:"string"` +// SetValues sets the Values field's value. +func (s *PartitionValueList) SetValues(v []*string) *PartitionValueList { + s.Values = v + return s +} - // The job timeout in minutes. This is the maximum time that a job run can consume - // resources before it is terminated and enters TIMEOUT status. The default - // is 2,880 minutes (48 hours). - Timeout *int64 `min:"1" type:"integer"` +// Specifies the physical requirements for a connection. +type PhysicalConnectionRequirements struct { + _ struct{} `type:"structure"` - // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, or G.2X. - // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory - // and a 64GB disk, and 1 executor per worker. - // - // * For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory - // and a 128GB disk, and 1 executor per worker. - WorkerType *string `type:"string" enum:"WorkerType"` + // The connection's Availability Zone. This field is redundant because the specified + // subnet implies the Availability Zone to be used. Currently the field must + // be populated, but it will be deprecated in the future. + AvailabilityZone *string `min:"1" type:"string"` + + // The security group ID list used by the connection. + SecurityGroupIdList []*string `type:"list"` + + // The subnet ID used by the connection. + SubnetId *string `min:"1" type:"string"` } // String returns the string representation -func (s JobUpdate) String() string { +func (s PhysicalConnectionRequirements) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobUpdate) GoString() string { +func (s PhysicalConnectionRequirements) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *JobUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "JobUpdate"} - if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) - } - if s.Timeout != nil && *s.Timeout < 1 { - invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) +func (s *PhysicalConnectionRequirements) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PhysicalConnectionRequirements"} + if s.AvailabilityZone != nil && len(*s.AvailabilityZone) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AvailabilityZone", 1)) } - if s.NotificationProperty != nil { - if err := s.NotificationProperty.Validate(); err != nil { - invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) - } + if s.SubnetId != nil && len(*s.SubnetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubnetId", 1)) } if invalidParams.Len() > 0 { @@ -19822,259 +26701,312 @@ func (s *JobUpdate) Validate() error { return nil } -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *JobUpdate) SetAllocatedCapacity(v int64) *JobUpdate { - s.AllocatedCapacity = &v +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *PhysicalConnectionRequirements) SetAvailabilityZone(v string) *PhysicalConnectionRequirements { + s.AvailabilityZone = &v return s } -// SetCommand sets the Command field's value. -func (s *JobUpdate) SetCommand(v *JobCommand) *JobUpdate { - s.Command = v +// SetSecurityGroupIdList sets the SecurityGroupIdList field's value. +func (s *PhysicalConnectionRequirements) SetSecurityGroupIdList(v []*string) *PhysicalConnectionRequirements { + s.SecurityGroupIdList = v return s } -// SetConnections sets the Connections field's value. -func (s *JobUpdate) SetConnections(v *ConnectionsList) *JobUpdate { - s.Connections = v +// SetSubnetId sets the SubnetId field's value. +func (s *PhysicalConnectionRequirements) SetSubnetId(v string) *PhysicalConnectionRequirements { + s.SubnetId = &v return s } -// SetDefaultArguments sets the DefaultArguments field's value. -func (s *JobUpdate) SetDefaultArguments(v map[string]*string) *JobUpdate { - s.DefaultArguments = v - return s -} +// A job run that was used in the predicate of a conditional trigger that triggered +// this job run. +type Predecessor struct { + _ struct{} `type:"structure"` -// SetDescription sets the Description field's value. -func (s *JobUpdate) SetDescription(v string) *JobUpdate { - s.Description = &v - return s + // The name of the job definition used by the predecessor job run. + JobName *string `min:"1" type:"string"` + + // The job-run ID of the predecessor job run. + RunId *string `min:"1" type:"string"` } -// SetExecutionProperty sets the ExecutionProperty field's value. -func (s *JobUpdate) SetExecutionProperty(v *ExecutionProperty) *JobUpdate { - s.ExecutionProperty = v - return s +// String returns the string representation +func (s Predecessor) String() string { + return awsutil.Prettify(s) } -// SetLogUri sets the LogUri field's value. -func (s *JobUpdate) SetLogUri(v string) *JobUpdate { - s.LogUri = &v - return s +// GoString returns the string representation +func (s Predecessor) GoString() string { + return s.String() } -// SetMaxCapacity sets the MaxCapacity field's value. -func (s *JobUpdate) SetMaxCapacity(v float64) *JobUpdate { - s.MaxCapacity = &v +// SetJobName sets the JobName field's value. +func (s *Predecessor) SetJobName(v string) *Predecessor { + s.JobName = &v return s } -// SetMaxRetries sets the MaxRetries field's value. -func (s *JobUpdate) SetMaxRetries(v int64) *JobUpdate { - s.MaxRetries = &v +// SetRunId sets the RunId field's value. +func (s *Predecessor) SetRunId(v string) *Predecessor { + s.RunId = &v return s } -// SetNotificationProperty sets the NotificationProperty field's value. -func (s *JobUpdate) SetNotificationProperty(v *NotificationProperty) *JobUpdate { - s.NotificationProperty = v - return s +// Defines the predicate of the trigger, which determines when it fires. +type Predicate struct { + _ struct{} `type:"structure"` + + // A list of the conditions that determine when the trigger will fire. + Conditions []*Condition `type:"list"` + + // An optional field if only one condition is listed. If multiple conditions + // are listed, then this field is required. + Logical *string `type:"string" enum:"Logical"` } -// SetNumberOfWorkers sets the NumberOfWorkers field's value. -func (s *JobUpdate) SetNumberOfWorkers(v int64) *JobUpdate { - s.NumberOfWorkers = &v - return s +// String returns the string representation +func (s Predicate) String() string { + return awsutil.Prettify(s) } -// SetRole sets the Role field's value. -func (s *JobUpdate) SetRole(v string) *JobUpdate { - s.Role = &v - return s +// GoString returns the string representation +func (s Predicate) GoString() string { + return s.String() } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *JobUpdate) SetSecurityConfiguration(v string) *JobUpdate { - s.SecurityConfiguration = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *Predicate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Predicate"} + if s.Conditions != nil { + for i, v := range s.Conditions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Conditions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetTimeout sets the Timeout field's value. -func (s *JobUpdate) SetTimeout(v int64) *JobUpdate { - s.Timeout = &v +// SetConditions sets the Conditions field's value. +func (s *Predicate) SetConditions(v []*Condition) *Predicate { + s.Conditions = v return s } -// SetWorkerType sets the WorkerType field's value. -func (s *JobUpdate) SetWorkerType(v string) *JobUpdate { - s.WorkerType = &v +// SetLogical sets the Logical field's value. +func (s *Predicate) SetLogical(v string) *Predicate { + s.Logical = &v return s } -// A classifier for JSON content. -type JsonClassifier struct { +// Permissions granted to a principal. +type PrincipalPermissions struct { _ struct{} `type:"structure"` - // The time this classifier was registered. - CreationTime *time.Time `type:"timestamp"` + // The permissions that are granted to the principal. + Permissions []*string `type:"list"` - // A JsonPath string defining the JSON data for the classifier to classify. - // AWS Glue supports a subset of JsonPath, as described in Writing JsonPath - // Custom Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json). - // - // JsonPath is a required field - JsonPath *string `type:"string" required:"true"` + // The principal who is granted permissions. + Principal *DataLakePrincipal `type:"structure"` +} + +// String returns the string representation +func (s PrincipalPermissions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrincipalPermissions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PrincipalPermissions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PrincipalPermissions"} + if s.Principal != nil { + if err := s.Principal.Validate(); err != nil { + invalidParams.AddNested("Principal", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPermissions sets the Permissions field's value. +func (s *PrincipalPermissions) SetPermissions(v []*string) *PrincipalPermissions { + s.Permissions = v + return s +} + +// SetPrincipal sets the Principal field's value. +func (s *PrincipalPermissions) SetPrincipal(v *DataLakePrincipal) *PrincipalPermissions { + s.Principal = v + return s +} - // The time this classifier was last updated. - LastUpdated *time.Time `type:"timestamp"` +// Defines a property predicate. +type PropertyPredicate struct { + _ struct{} `type:"structure"` - // The name of the classifier. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The comparator used to compare this property to others. + Comparator *string `type:"string" enum:"Comparator"` - // The version of this classifier. - Version *int64 `type:"long"` + // The key of the property. + Key *string `type:"string"` + + // The value of the property. + Value *string `type:"string"` } // String returns the string representation -func (s JsonClassifier) String() string { +func (s PropertyPredicate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JsonClassifier) GoString() string { +func (s PropertyPredicate) GoString() string { return s.String() } -// SetCreationTime sets the CreationTime field's value. -func (s *JsonClassifier) SetCreationTime(v time.Time) *JsonClassifier { - s.CreationTime = &v - return s -} - -// SetJsonPath sets the JsonPath field's value. -func (s *JsonClassifier) SetJsonPath(v string) *JsonClassifier { - s.JsonPath = &v - return s -} - -// SetLastUpdated sets the LastUpdated field's value. -func (s *JsonClassifier) SetLastUpdated(v time.Time) *JsonClassifier { - s.LastUpdated = &v +// SetComparator sets the Comparator field's value. +func (s *PropertyPredicate) SetComparator(v string) *PropertyPredicate { + s.Comparator = &v return s } -// SetName sets the Name field's value. -func (s *JsonClassifier) SetName(v string) *JsonClassifier { - s.Name = &v +// SetKey sets the Key field's value. +func (s *PropertyPredicate) SetKey(v string) *PropertyPredicate { + s.Key = &v return s } -// SetVersion sets the Version field's value. -func (s *JsonClassifier) SetVersion(v int64) *JsonClassifier { - s.Version = &v +// SetValue sets the Value field's value. +func (s *PropertyPredicate) SetValue(v string) *PropertyPredicate { + s.Value = &v return s } -// Status and error information about the most recent crawl. -type LastCrawlInfo struct { +type PutDataCatalogEncryptionSettingsInput struct { _ struct{} `type:"structure"` - // If an error occurred, the error information about the last crawl. - ErrorMessage *string `type:"string"` - - // The log group for the last crawl. - LogGroup *string `min:"1" type:"string"` - - // The log stream for the last crawl. - LogStream *string `min:"1" type:"string"` - - // The prefix for a message about this crawl. - MessagePrefix *string `min:"1" type:"string"` - - // The time at which the crawl started. - StartTime *time.Time `type:"timestamp"` + // The ID of the Data Catalog to set the security configuration for. If none + // is provided, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` - // Status of the last crawl. - Status *string `type:"string" enum:"LastCrawlStatus"` + // The security configuration to set. + // + // DataCatalogEncryptionSettings is a required field + DataCatalogEncryptionSettings *DataCatalogEncryptionSettings `type:"structure" required:"true"` } // String returns the string representation -func (s LastCrawlInfo) String() string { +func (s PutDataCatalogEncryptionSettingsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LastCrawlInfo) GoString() string { +func (s PutDataCatalogEncryptionSettingsInput) GoString() string { return s.String() } -// SetErrorMessage sets the ErrorMessage field's value. -func (s *LastCrawlInfo) SetErrorMessage(v string) *LastCrawlInfo { - s.ErrorMessage = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDataCatalogEncryptionSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDataCatalogEncryptionSettingsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DataCatalogEncryptionSettings == nil { + invalidParams.Add(request.NewErrParamRequired("DataCatalogEncryptionSettings")) + } + if s.DataCatalogEncryptionSettings != nil { + if err := s.DataCatalogEncryptionSettings.Validate(); err != nil { + invalidParams.AddNested("DataCatalogEncryptionSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLogGroup sets the LogGroup field's value. -func (s *LastCrawlInfo) SetLogGroup(v string) *LastCrawlInfo { - s.LogGroup = &v +// SetCatalogId sets the CatalogId field's value. +func (s *PutDataCatalogEncryptionSettingsInput) SetCatalogId(v string) *PutDataCatalogEncryptionSettingsInput { + s.CatalogId = &v return s } -// SetLogStream sets the LogStream field's value. -func (s *LastCrawlInfo) SetLogStream(v string) *LastCrawlInfo { - s.LogStream = &v +// SetDataCatalogEncryptionSettings sets the DataCatalogEncryptionSettings field's value. +func (s *PutDataCatalogEncryptionSettingsInput) SetDataCatalogEncryptionSettings(v *DataCatalogEncryptionSettings) *PutDataCatalogEncryptionSettingsInput { + s.DataCatalogEncryptionSettings = v return s } -// SetMessagePrefix sets the MessagePrefix field's value. -func (s *LastCrawlInfo) SetMessagePrefix(v string) *LastCrawlInfo { - s.MessagePrefix = &v - return s +type PutDataCatalogEncryptionSettingsOutput struct { + _ struct{} `type:"structure"` } -// SetStartTime sets the StartTime field's value. -func (s *LastCrawlInfo) SetStartTime(v time.Time) *LastCrawlInfo { - s.StartTime = &v - return s +// String returns the string representation +func (s PutDataCatalogEncryptionSettingsOutput) String() string { + return awsutil.Prettify(s) } -// SetStatus sets the Status field's value. -func (s *LastCrawlInfo) SetStatus(v string) *LastCrawlInfo { - s.Status = &v - return s +// GoString returns the string representation +func (s PutDataCatalogEncryptionSettingsOutput) GoString() string { + return s.String() } -type ListCrawlersInput struct { +type PutResourcePolicyInput struct { _ struct{} `type:"structure"` - // The maximum size of a list to return. - MaxResults *int64 `min:"1" type:"integer"` + // A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is + // used to create a new policy. If a value of NONE or a null value is used, + // the call will not depend on the existence of a policy. + PolicyExistsCondition *string `type:"string" enum:"ExistCondition"` - // A continuation token, if this is a continuation request. - NextToken *string `type:"string"` + // The hash value returned when the previous policy was set using PutResourcePolicy. + // Its purpose is to prevent concurrent modifications of a policy. Do not use + // this parameter if no previous policy has been set. + PolicyHashCondition *string `min:"1" type:"string"` - // Specifies to return only these tagged resources. - Tags map[string]*string `type:"map"` + // Contains the policy document to set, in JSON format. + // + // PolicyInJson is a required field + PolicyInJson *string `min:"2" type:"string" required:"true"` } // String returns the string representation -func (s ListCrawlersInput) String() string { +func (s PutResourcePolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListCrawlersInput) GoString() string { +func (s PutResourcePolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListCrawlersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListCrawlersInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *PutResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"} + if s.PolicyHashCondition != nil && len(*s.PolicyHashCondition) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyHashCondition", 1)) + } + if s.PolicyInJson == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyInJson")) + } + if s.PolicyInJson != nil && len(*s.PolicyInJson) < 2 { + invalidParams.Add(request.NewErrParamMinLen("PolicyInJson", 2)) } if invalidParams.Len() > 0 { @@ -20083,86 +27015,94 @@ func (s *ListCrawlersInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *ListCrawlersInput) SetMaxResults(v int64) *ListCrawlersInput { - s.MaxResults = &v +// SetPolicyExistsCondition sets the PolicyExistsCondition field's value. +func (s *PutResourcePolicyInput) SetPolicyExistsCondition(v string) *PutResourcePolicyInput { + s.PolicyExistsCondition = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListCrawlersInput) SetNextToken(v string) *ListCrawlersInput { - s.NextToken = &v +// SetPolicyHashCondition sets the PolicyHashCondition field's value. +func (s *PutResourcePolicyInput) SetPolicyHashCondition(v string) *PutResourcePolicyInput { + s.PolicyHashCondition = &v return s } -// SetTags sets the Tags field's value. -func (s *ListCrawlersInput) SetTags(v map[string]*string) *ListCrawlersInput { - s.Tags = v +// SetPolicyInJson sets the PolicyInJson field's value. +func (s *PutResourcePolicyInput) SetPolicyInJson(v string) *PutResourcePolicyInput { + s.PolicyInJson = &v return s } -type ListCrawlersOutput struct { +type PutResourcePolicyOutput struct { _ struct{} `type:"structure"` - // The names of all crawlers in the account, or the crawlers with the specified - // tags. - CrawlerNames []*string `type:"list"` - - // A continuation token, if the returned list does not contain the last metric - // available. - NextToken *string `type:"string"` + // A hash of the policy that has just been set. This must be included in a subsequent + // call that overwrites or updates this policy. + PolicyHash *string `min:"1" type:"string"` } // String returns the string representation -func (s ListCrawlersOutput) String() string { +func (s PutResourcePolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListCrawlersOutput) GoString() string { +func (s PutResourcePolicyOutput) GoString() string { return s.String() } -// SetCrawlerNames sets the CrawlerNames field's value. -func (s *ListCrawlersOutput) SetCrawlerNames(v []*string) *ListCrawlersOutput { - s.CrawlerNames = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListCrawlersOutput) SetNextToken(v string) *ListCrawlersOutput { - s.NextToken = &v +// SetPolicyHash sets the PolicyHash field's value. +func (s *PutResourcePolicyOutput) SetPolicyHash(v string) *PutResourcePolicyOutput { + s.PolicyHash = &v return s } -type ListDevEndpointsInput struct { +type PutWorkflowRunPropertiesInput struct { _ struct{} `type:"structure"` - // The maximum size of a list to return. - MaxResults *int64 `min:"1" type:"integer"` + // Name of the workflow which was run. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // A continuation token, if this is a continuation request. - NextToken *string `type:"string"` + // The ID of the workflow run for which the run properties should be updated. + // + // RunId is a required field + RunId *string `min:"1" type:"string" required:"true"` - // Specifies to return only these tagged resources. - Tags map[string]*string `type:"map"` + // The properties to put for the specified run. + // + // RunProperties is a required field + RunProperties map[string]*string `type:"map" required:"true"` } // String returns the string representation -func (s ListDevEndpointsInput) String() string { +func (s PutWorkflowRunPropertiesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDevEndpointsInput) GoString() string { +func (s PutWorkflowRunPropertiesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListDevEndpointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDevEndpointsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *PutWorkflowRunPropertiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutWorkflowRunPropertiesInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RunId == nil { + invalidParams.Add(request.NewErrParamRequired("RunId")) + } + if s.RunId != nil && len(*s.RunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) + } + if s.RunProperties == nil { + invalidParams.Add(request.NewErrParamRequired("RunProperties")) } if invalidParams.Len() > 0 { @@ -20171,86 +27111,65 @@ func (s *ListDevEndpointsInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *ListDevEndpointsInput) SetMaxResults(v int64) *ListDevEndpointsInput { - s.MaxResults = &v +// SetName sets the Name field's value. +func (s *PutWorkflowRunPropertiesInput) SetName(v string) *PutWorkflowRunPropertiesInput { + s.Name = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListDevEndpointsInput) SetNextToken(v string) *ListDevEndpointsInput { - s.NextToken = &v +// SetRunId sets the RunId field's value. +func (s *PutWorkflowRunPropertiesInput) SetRunId(v string) *PutWorkflowRunPropertiesInput { + s.RunId = &v return s } -// SetTags sets the Tags field's value. -func (s *ListDevEndpointsInput) SetTags(v map[string]*string) *ListDevEndpointsInput { - s.Tags = v +// SetRunProperties sets the RunProperties field's value. +func (s *PutWorkflowRunPropertiesInput) SetRunProperties(v map[string]*string) *PutWorkflowRunPropertiesInput { + s.RunProperties = v return s } -type ListDevEndpointsOutput struct { - _ struct{} `type:"structure"` - - // The names of all DevEndpoints in the account, or the DevEndpoints with the - // specified tags. - DevEndpointNames []*string `type:"list"` - - // A continuation token, if the returned list does not contain the last metric - // available. - NextToken *string `type:"string"` -} - +type PutWorkflowRunPropertiesOutput struct { + _ struct{} `type:"structure"` +} + // String returns the string representation -func (s ListDevEndpointsOutput) String() string { +func (s PutWorkflowRunPropertiesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDevEndpointsOutput) GoString() string { +func (s PutWorkflowRunPropertiesOutput) GoString() string { return s.String() } -// SetDevEndpointNames sets the DevEndpointNames field's value. -func (s *ListDevEndpointsOutput) SetDevEndpointNames(v []*string) *ListDevEndpointsOutput { - s.DevEndpointNames = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDevEndpointsOutput) SetNextToken(v string) *ListDevEndpointsOutput { - s.NextToken = &v - return s -} - -type ListJobsInput struct { +type ResetJobBookmarkInput struct { _ struct{} `type:"structure"` - // The maximum size of a list to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation request. - NextToken *string `type:"string"` + // The name of the job in question. + // + // JobName is a required field + JobName *string `type:"string" required:"true"` - // Specifies to return only these tagged resources. - Tags map[string]*string `type:"map"` + // The unique run identifier associated with this job run. + RunId *string `type:"string"` } // String returns the string representation -func (s ListJobsInput) String() string { +func (s ResetJobBookmarkInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListJobsInput) GoString() string { +func (s ResetJobBookmarkInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *ResetJobBookmarkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetJobBookmarkInput"} + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) } if invalidParams.Len() > 0 { @@ -20259,93 +27178,67 @@ func (s *ListJobsInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { - s.NextToken = &v +// SetJobName sets the JobName field's value. +func (s *ResetJobBookmarkInput) SetJobName(v string) *ResetJobBookmarkInput { + s.JobName = &v return s } -// SetTags sets the Tags field's value. -func (s *ListJobsInput) SetTags(v map[string]*string) *ListJobsInput { - s.Tags = v +// SetRunId sets the RunId field's value. +func (s *ResetJobBookmarkInput) SetRunId(v string) *ResetJobBookmarkInput { + s.RunId = &v return s } -type ListJobsOutput struct { +type ResetJobBookmarkOutput struct { _ struct{} `type:"structure"` - // The names of all jobs in the account, or the jobs with the specified tags. - JobNames []*string `type:"list"` - - // A continuation token, if the returned list does not contain the last metric - // available. - NextToken *string `type:"string"` + // The reset bookmark entry. + JobBookmarkEntry *JobBookmarkEntry `type:"structure"` } // String returns the string representation -func (s ListJobsOutput) String() string { +func (s ResetJobBookmarkOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListJobsOutput) GoString() string { +func (s ResetJobBookmarkOutput) GoString() string { return s.String() } -// SetJobNames sets the JobNames field's value. -func (s *ListJobsOutput) SetJobNames(v []*string) *ListJobsOutput { - s.JobNames = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { - s.NextToken = &v +// SetJobBookmarkEntry sets the JobBookmarkEntry field's value. +func (s *ResetJobBookmarkOutput) SetJobBookmarkEntry(v *JobBookmarkEntry) *ResetJobBookmarkOutput { + s.JobBookmarkEntry = v return s } -type ListTriggersInput struct { +// The URIs for function resources. +type ResourceUri struct { _ struct{} `type:"structure"` - // The name of the job for which to retrieve triggers. The trigger that can - // start this job will be returned, and if there is no such trigger, all triggers - // will be returned. - DependentJobName *string `min:"1" type:"string"` - - // The maximum size of a list to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A continuation token, if this is a continuation request. - NextToken *string `type:"string"` + // The type of the resource. + ResourceType *string `type:"string" enum:"ResourceType"` - // Specifies to return only these tagged resources. - Tags map[string]*string `type:"map"` + // The URI for accessing the resource. + Uri *string `min:"1" type:"string"` } // String returns the string representation -func (s ListTriggersInput) String() string { +func (s ResourceUri) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTriggersInput) GoString() string { +func (s ResourceUri) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListTriggersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTriggersInput"} - if s.DependentJobName != nil && len(*s.DependentJobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DependentJobName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *ResourceUri) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceUri"} + if s.Uri != nil && len(*s.Uri) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Uri", 1)) } if invalidParams.Len() > 0 { @@ -20354,239 +27247,182 @@ func (s *ListTriggersInput) Validate() error { return nil } -// SetDependentJobName sets the DependentJobName field's value. -func (s *ListTriggersInput) SetDependentJobName(v string) *ListTriggersInput { - s.DependentJobName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListTriggersInput) SetMaxResults(v int64) *ListTriggersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTriggersInput) SetNextToken(v string) *ListTriggersInput { - s.NextToken = &v +// SetResourceType sets the ResourceType field's value. +func (s *ResourceUri) SetResourceType(v string) *ResourceUri { + s.ResourceType = &v return s } -// SetTags sets the Tags field's value. -func (s *ListTriggersInput) SetTags(v map[string]*string) *ListTriggersInput { - s.Tags = v +// SetUri sets the Uri field's value. +func (s *ResourceUri) SetUri(v string) *ResourceUri { + s.Uri = &v return s } -type ListTriggersOutput struct { +// Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted. +type S3Encryption struct { _ struct{} `type:"structure"` - // A continuation token, if the returned list does not contain the last metric - // available. - NextToken *string `type:"string"` + // The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + KmsKeyArn *string `type:"string"` - // The names of all triggers in the account, or the triggers with the specified - // tags. - TriggerNames []*string `type:"list"` + // The encryption mode to use for Amazon S3 data. + S3EncryptionMode *string `type:"string" enum:"S3EncryptionMode"` } // String returns the string representation -func (s ListTriggersOutput) String() string { +func (s S3Encryption) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTriggersOutput) GoString() string { +func (s S3Encryption) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListTriggersOutput) SetNextToken(v string) *ListTriggersOutput { - s.NextToken = &v +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *S3Encryption) SetKmsKeyArn(v string) *S3Encryption { + s.KmsKeyArn = &v return s } -// SetTriggerNames sets the TriggerNames field's value. -func (s *ListTriggersOutput) SetTriggerNames(v []*string) *ListTriggersOutput { - s.TriggerNames = v +// SetS3EncryptionMode sets the S3EncryptionMode field's value. +func (s *S3Encryption) SetS3EncryptionMode(v string) *S3Encryption { + s.S3EncryptionMode = &v return s } -// The location of resources. -type Location struct { +// Specifies a data store in Amazon Simple Storage Service (Amazon S3). +type S3Target struct { _ struct{} `type:"structure"` - // A DynamoDB Table location. - DynamoDB []*CodeGenNodeArg `type:"list"` - - // A JDBC location. - Jdbc []*CodeGenNodeArg `type:"list"` + // A list of glob patterns used to exclude from the crawl. For more information, + // see Catalog Tables with a Crawler (http://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). + Exclusions []*string `type:"list"` - // An Amazon S3 location. - S3 []*CodeGenNodeArg `type:"list"` + // The path to the Amazon S3 target. + Path *string `type:"string"` } // String returns the string representation -func (s Location) String() string { +func (s S3Target) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Location) GoString() string { +func (s S3Target) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Location) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Location"} - if s.DynamoDB != nil { - for i, v := range s.DynamoDB { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DynamoDB", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Jdbc != nil { - for i, v := range s.Jdbc { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Jdbc", i), err.(request.ErrInvalidParams)) - } - } - } - if s.S3 != nil { - for i, v := range s.S3 { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "S3", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDynamoDB sets the DynamoDB field's value. -func (s *Location) SetDynamoDB(v []*CodeGenNodeArg) *Location { - s.DynamoDB = v - return s -} - -// SetJdbc sets the Jdbc field's value. -func (s *Location) SetJdbc(v []*CodeGenNodeArg) *Location { - s.Jdbc = v +// SetExclusions sets the Exclusions field's value. +func (s *S3Target) SetExclusions(v []*string) *S3Target { + s.Exclusions = v return s } -// SetS3 sets the S3 field's value. -func (s *Location) SetS3(v []*CodeGenNodeArg) *Location { - s.S3 = v +// SetPath sets the Path field's value. +func (s *S3Target) SetPath(v string) *S3Target { + s.Path = &v return s } -// Defines a mapping. -type MappingEntry struct { - _ struct{} `type:"structure"` - - // The source path. - SourcePath *string `type:"string"` - - // The name of the source table. - SourceTable *string `type:"string"` - - // The source type. - SourceType *string `type:"string"` - - // The target path. - TargetPath *string `type:"string"` +// A scheduling object using a cron statement to schedule an event. +type Schedule struct { + _ struct{} `type:"structure"` - // The target table. - TargetTable *string `type:"string"` + // A cron expression used to specify the schedule. For more information, see + // Time-Based Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // For example, to run something every day at 12:15 UTC, specify cron(15 12 + // * * ? *). + ScheduleExpression *string `type:"string"` - // The target type. - TargetType *string `type:"string"` + // The state of the schedule. + State *string `type:"string" enum:"ScheduleState"` } // String returns the string representation -func (s MappingEntry) String() string { +func (s Schedule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MappingEntry) GoString() string { +func (s Schedule) GoString() string { return s.String() } -// SetSourcePath sets the SourcePath field's value. -func (s *MappingEntry) SetSourcePath(v string) *MappingEntry { - s.SourcePath = &v +// SetScheduleExpression sets the ScheduleExpression field's value. +func (s *Schedule) SetScheduleExpression(v string) *Schedule { + s.ScheduleExpression = &v return s } -// SetSourceTable sets the SourceTable field's value. -func (s *MappingEntry) SetSourceTable(v string) *MappingEntry { - s.SourceTable = &v +// SetState sets the State field's value. +func (s *Schedule) SetState(v string) *Schedule { + s.State = &v return s } -// SetSourceType sets the SourceType field's value. -func (s *MappingEntry) SetSourceType(v string) *MappingEntry { - s.SourceType = &v - return s +// A policy that specifies update and deletion behaviors for the crawler. +type SchemaChangePolicy struct { + _ struct{} `type:"structure"` + + // The deletion behavior when the crawler finds a deleted object. + DeleteBehavior *string `type:"string" enum:"DeleteBehavior"` + + // The update behavior when the crawler finds a changed schema. + UpdateBehavior *string `type:"string" enum:"UpdateBehavior"` } -// SetTargetPath sets the TargetPath field's value. -func (s *MappingEntry) SetTargetPath(v string) *MappingEntry { - s.TargetPath = &v - return s +// String returns the string representation +func (s SchemaChangePolicy) String() string { + return awsutil.Prettify(s) } -// SetTargetTable sets the TargetTable field's value. -func (s *MappingEntry) SetTargetTable(v string) *MappingEntry { - s.TargetTable = &v +// GoString returns the string representation +func (s SchemaChangePolicy) GoString() string { + return s.String() +} + +// SetDeleteBehavior sets the DeleteBehavior field's value. +func (s *SchemaChangePolicy) SetDeleteBehavior(v string) *SchemaChangePolicy { + s.DeleteBehavior = &v return s } -// SetTargetType sets the TargetType field's value. -func (s *MappingEntry) SetTargetType(v string) *MappingEntry { - s.TargetType = &v +// SetUpdateBehavior sets the UpdateBehavior field's value. +func (s *SchemaChangePolicy) SetUpdateBehavior(v string) *SchemaChangePolicy { + s.UpdateBehavior = &v return s } -// Specifies configuration properties of a notification. -type NotificationProperty struct { +// A key-value pair representing a column and data type that this transform +// can run against. The Schema parameter of the MLTransform may contain up to +// 100 of these structures. +type SchemaColumn struct { _ struct{} `type:"structure"` - // After a job run starts, the number of minutes to wait before sending a job - // run delay notification. - NotifyDelayAfter *int64 `min:"1" type:"integer"` + // The type of data in the column. + DataType *string `type:"string"` + + // The name of the column. + Name *string `min:"1" type:"string"` } // String returns the string representation -func (s NotificationProperty) String() string { +func (s SchemaColumn) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NotificationProperty) GoString() string { +func (s SchemaColumn) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *NotificationProperty) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "NotificationProperty"} - if s.NotifyDelayAfter != nil && *s.NotifyDelayAfter < 1 { - invalidParams.Add(request.NewErrParamMinValue("NotifyDelayAfter", 1)) +func (s *SchemaColumn) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SchemaColumn"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -20595,49 +27431,62 @@ func (s *NotificationProperty) Validate() error { return nil } -// SetNotifyDelayAfter sets the NotifyDelayAfter field's value. -func (s *NotificationProperty) SetNotifyDelayAfter(v int64) *NotificationProperty { - s.NotifyDelayAfter = &v +// SetDataType sets the DataType field's value. +func (s *SchemaColumn) SetDataType(v string) *SchemaColumn { + s.DataType = &v return s } -// Specifies the sort order of a sorted column. -type Order struct { +// SetName sets the Name field's value. +func (s *SchemaColumn) SetName(v string) *SchemaColumn { + s.Name = &v + return s +} + +type SearchTablesInput struct { _ struct{} `type:"structure"` - // The name of the column. - // - // Column is a required field - Column *string `min:"1" type:"string" required:"true"` + // A unique identifier, consisting of account_id/datalake. + CatalogId *string `min:"1" type:"string"` - // Indicates that the column is sorted in ascending order (== 1), or in descending - // order (==0). + // A list of key-value pairs, and a comparator used to filter the search results. + // Returns all entities matching the predicate. + Filters []*PropertyPredicate `type:"list"` + + // The maximum number of tables to return in a single response. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, included if this is a continuation call. + NextToken *string `type:"string"` + + // A string used for a text search. // - // SortOrder is a required field - SortOrder *int64 `type:"integer" required:"true"` + // Specifying a value in quotes filters based on an exact match to the value. + SearchText *string `type:"string"` + + // A list of criteria for sorting the results by a field name, in an ascending + // or descending order. + SortCriteria []*SortCriterion `type:"list"` } // String returns the string representation -func (s Order) String() string { +func (s SearchTablesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Order) GoString() string { +func (s SearchTablesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Order) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Order"} - if s.Column == nil { - invalidParams.Add(request.NewErrParamRequired("Column")) - } - if s.Column != nil && len(*s.Column) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Column", 1)) +func (s *SearchTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SearchTablesInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.SortOrder == nil { - invalidParams.Add(request.NewErrParamRequired("SortOrder")) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -20646,176 +27495,208 @@ func (s *Order) Validate() error { return nil } -// SetColumn sets the Column field's value. -func (s *Order) SetColumn(v string) *Order { - s.Column = &v +// SetCatalogId sets the CatalogId field's value. +func (s *SearchTablesInput) SetCatalogId(v string) *SearchTablesInput { + s.CatalogId = &v return s } -// SetSortOrder sets the SortOrder field's value. -func (s *Order) SetSortOrder(v int64) *Order { - s.SortOrder = &v +// SetFilters sets the Filters field's value. +func (s *SearchTablesInput) SetFilters(v []*PropertyPredicate) *SearchTablesInput { + s.Filters = v return s } -// Represents a slice of table data. -type Partition struct { - _ struct{} `type:"structure"` - - // The time at which the partition was created. - CreationTime *time.Time `type:"timestamp"` - - // The name of the catalog database where the table in question is located. - DatabaseName *string `min:"1" type:"string"` +// SetMaxResults sets the MaxResults field's value. +func (s *SearchTablesInput) SetMaxResults(v int64) *SearchTablesInput { + s.MaxResults = &v + return s +} - // The last time at which the partition was accessed. - LastAccessTime *time.Time `type:"timestamp"` +// SetNextToken sets the NextToken field's value. +func (s *SearchTablesInput) SetNextToken(v string) *SearchTablesInput { + s.NextToken = &v + return s +} - // The last time at which column statistics were computed for this partition. - LastAnalyzedTime *time.Time `type:"timestamp"` +// SetSearchText sets the SearchText field's value. +func (s *SearchTablesInput) SetSearchText(v string) *SearchTablesInput { + s.SearchText = &v + return s +} - // These key-value pairs define partition parameters. - Parameters map[string]*string `type:"map"` +// SetSortCriteria sets the SortCriteria field's value. +func (s *SearchTablesInput) SetSortCriteria(v []*SortCriterion) *SearchTablesInput { + s.SortCriteria = v + return s +} - // Provides information about the physical location where the partition is stored. - StorageDescriptor *StorageDescriptor `type:"structure"` +type SearchTablesOutput struct { + _ struct{} `type:"structure"` - // The name of the table in question. - TableName *string `min:"1" type:"string"` + // A continuation token, present if the current list segment is not the last. + NextToken *string `type:"string"` - // The values of the partition. - Values []*string `type:"list"` + // A list of the requested Table objects. The SearchTables response returns + // only the tables that you have access to. + TableList []*TableData `type:"list"` } // String returns the string representation -func (s Partition) String() string { +func (s SearchTablesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Partition) GoString() string { +func (s SearchTablesOutput) GoString() string { return s.String() } -// SetCreationTime sets the CreationTime field's value. -func (s *Partition) SetCreationTime(v time.Time) *Partition { - s.CreationTime = &v +// SetNextToken sets the NextToken field's value. +func (s *SearchTablesOutput) SetNextToken(v string) *SearchTablesOutput { + s.NextToken = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *Partition) SetDatabaseName(v string) *Partition { - s.DatabaseName = &v +// SetTableList sets the TableList field's value. +func (s *SearchTablesOutput) SetTableList(v []*TableData) *SearchTablesOutput { + s.TableList = v return s } -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *Partition) SetLastAccessTime(v time.Time) *Partition { - s.LastAccessTime = &v - return s +// Specifies a security configuration. +type SecurityConfiguration struct { + _ struct{} `type:"structure"` + + // The time at which this security configuration was created. + CreatedTimeStamp *time.Time `type:"timestamp"` + + // The encryption configuration associated with this security configuration. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The name of the security configuration. + Name *string `min:"1" type:"string"` } -// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. -func (s *Partition) SetLastAnalyzedTime(v time.Time) *Partition { - s.LastAnalyzedTime = &v - return s +// String returns the string representation +func (s SecurityConfiguration) String() string { + return awsutil.Prettify(s) } -// SetParameters sets the Parameters field's value. -func (s *Partition) SetParameters(v map[string]*string) *Partition { - s.Parameters = v - return s +// GoString returns the string representation +func (s SecurityConfiguration) GoString() string { + return s.String() } -// SetStorageDescriptor sets the StorageDescriptor field's value. -func (s *Partition) SetStorageDescriptor(v *StorageDescriptor) *Partition { - s.StorageDescriptor = v +// SetCreatedTimeStamp sets the CreatedTimeStamp field's value. +func (s *SecurityConfiguration) SetCreatedTimeStamp(v time.Time) *SecurityConfiguration { + s.CreatedTimeStamp = &v return s } -// SetTableName sets the TableName field's value. -func (s *Partition) SetTableName(v string) *Partition { - s.TableName = &v +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *SecurityConfiguration) SetEncryptionConfiguration(v *EncryptionConfiguration) *SecurityConfiguration { + s.EncryptionConfiguration = v return s } -// SetValues sets the Values field's value. -func (s *Partition) SetValues(v []*string) *Partition { - s.Values = v +// SetName sets the Name field's value. +func (s *SecurityConfiguration) SetName(v string) *SecurityConfiguration { + s.Name = &v return s } -// Contains information about a partition error. -type PartitionError struct { +// Defines a non-overlapping region of a table's partitions, allowing multiple +// requests to be executed in parallel. +type Segment struct { _ struct{} `type:"structure"` - // Details about the partition error. - ErrorDetail *ErrorDetail `type:"structure"` + // The zero-based index number of the segment. For example, if the total number + // of segments is 4, SegmentNumber values range from 0 through 3. + // + // SegmentNumber is a required field + SegmentNumber *int64 `type:"integer" required:"true"` - // The values that define the partition. - PartitionValues []*string `type:"list"` + // The total number of segments. + // + // TotalSegments is a required field + TotalSegments *int64 `min:"1" type:"integer" required:"true"` } // String returns the string representation -func (s PartitionError) String() string { +func (s Segment) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PartitionError) GoString() string { +func (s Segment) GoString() string { return s.String() } -// SetErrorDetail sets the ErrorDetail field's value. -func (s *PartitionError) SetErrorDetail(v *ErrorDetail) *PartitionError { - s.ErrorDetail = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *Segment) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Segment"} + if s.SegmentNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SegmentNumber")) + } + if s.TotalSegments == nil { + invalidParams.Add(request.NewErrParamRequired("TotalSegments")) + } + if s.TotalSegments != nil && *s.TotalSegments < 1 { + invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSegmentNumber sets the SegmentNumber field's value. +func (s *Segment) SetSegmentNumber(v int64) *Segment { + s.SegmentNumber = &v return s } -// SetPartitionValues sets the PartitionValues field's value. -func (s *PartitionError) SetPartitionValues(v []*string) *PartitionError { - s.PartitionValues = v +// SetTotalSegments sets the TotalSegments field's value. +func (s *Segment) SetTotalSegments(v int64) *Segment { + s.TotalSegments = &v return s } -// The structure used to create and update a partion. -type PartitionInput struct { +// Information about a serialization/deserialization program (SerDe) that serves +// as an extractor and loader. +type SerDeInfo struct { _ struct{} `type:"structure"` - // The last time at which the partition was accessed. - LastAccessTime *time.Time `type:"timestamp"` - - // The last time at which column statistics were computed for this partition. - LastAnalyzedTime *time.Time `type:"timestamp"` + // Name of the SerDe. + Name *string `min:"1" type:"string"` - // These key-value pairs define partition parameters. + // These key-value pairs define initialization parameters for the SerDe. Parameters map[string]*string `type:"map"` - // Provides information about the physical location where the partition is stored. - StorageDescriptor *StorageDescriptor `type:"structure"` - - // The values of the partition. Although this parameter is not required by the - // SDK, you must specify this parameter for a valid input. - Values []*string `type:"list"` + // Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. + SerializationLibrary *string `min:"1" type:"string"` } // String returns the string representation -func (s PartitionInput) String() string { +func (s SerDeInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PartitionInput) GoString() string { +func (s SerDeInfo) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PartitionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PartitionInput"} - if s.StorageDescriptor != nil { - if err := s.StorageDescriptor.Validate(); err != nil { - invalidParams.AddNested("StorageDescriptor", err.(request.ErrInvalidParams)) - } +func (s *SerDeInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SerDeInfo"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SerializationLibrary != nil && len(*s.SerializationLibrary) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SerializationLibrary", 1)) } if invalidParams.Len() > 0 { @@ -20824,109 +27705,127 @@ func (s *PartitionInput) Validate() error { return nil } -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *PartitionInput) SetLastAccessTime(v time.Time) *PartitionInput { - s.LastAccessTime = &v +// SetName sets the Name field's value. +func (s *SerDeInfo) SetName(v string) *SerDeInfo { + s.Name = &v return s } -// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. -func (s *PartitionInput) SetLastAnalyzedTime(v time.Time) *PartitionInput { - s.LastAnalyzedTime = &v +// SetParameters sets the Parameters field's value. +func (s *SerDeInfo) SetParameters(v map[string]*string) *SerDeInfo { + s.Parameters = v return s } -// SetParameters sets the Parameters field's value. -func (s *PartitionInput) SetParameters(v map[string]*string) *PartitionInput { - s.Parameters = v +// SetSerializationLibrary sets the SerializationLibrary field's value. +func (s *SerDeInfo) SetSerializationLibrary(v string) *SerDeInfo { + s.SerializationLibrary = &v return s } -// SetStorageDescriptor sets the StorageDescriptor field's value. -func (s *PartitionInput) SetStorageDescriptor(v *StorageDescriptor) *PartitionInput { - s.StorageDescriptor = v +// Specifies skewed values in a table. Skewed values are those that occur with +// very high frequency. +type SkewedInfo struct { + _ struct{} `type:"structure"` + + // A list of names of columns that contain skewed values. + SkewedColumnNames []*string `type:"list"` + + // A mapping of skewed values to the columns that contain them. + SkewedColumnValueLocationMaps map[string]*string `type:"map"` + + // A list of values that appear so frequently as to be considered skewed. + SkewedColumnValues []*string `type:"list"` +} + +// String returns the string representation +func (s SkewedInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SkewedInfo) GoString() string { + return s.String() +} + +// SetSkewedColumnNames sets the SkewedColumnNames field's value. +func (s *SkewedInfo) SetSkewedColumnNames(v []*string) *SkewedInfo { + s.SkewedColumnNames = v return s } -// SetValues sets the Values field's value. -func (s *PartitionInput) SetValues(v []*string) *PartitionInput { - s.Values = v +// SetSkewedColumnValueLocationMaps sets the SkewedColumnValueLocationMaps field's value. +func (s *SkewedInfo) SetSkewedColumnValueLocationMaps(v map[string]*string) *SkewedInfo { + s.SkewedColumnValueLocationMaps = v return s } -// Contains a list of values defining partitions. -type PartitionValueList struct { +// SetSkewedColumnValues sets the SkewedColumnValues field's value. +func (s *SkewedInfo) SetSkewedColumnValues(v []*string) *SkewedInfo { + s.SkewedColumnValues = v + return s +} + +// Specifies a field to sort by and a sort order. +type SortCriterion struct { _ struct{} `type:"structure"` - // The list of values. - // - // Values is a required field - Values []*string `type:"list" required:"true"` + // The name of the field on which to sort. + FieldName *string `type:"string"` + + // An ascending or descending sort. + Sort *string `type:"string" enum:"Sort"` } // String returns the string representation -func (s PartitionValueList) String() string { +func (s SortCriterion) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PartitionValueList) GoString() string { +func (s SortCriterion) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PartitionValueList) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PartitionValueList"} - if s.Values == nil { - invalidParams.Add(request.NewErrParamRequired("Values")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetFieldName sets the FieldName field's value. +func (s *SortCriterion) SetFieldName(v string) *SortCriterion { + s.FieldName = &v + return s } -// SetValues sets the Values field's value. -func (s *PartitionValueList) SetValues(v []*string) *PartitionValueList { - s.Values = v +// SetSort sets the Sort field's value. +func (s *SortCriterion) SetSort(v string) *SortCriterion { + s.Sort = &v return s } -// Specifies the physical requirements for a connection. -type PhysicalConnectionRequirements struct { +type StartCrawlerInput struct { _ struct{} `type:"structure"` - // The connection's Availability Zone. This field is redundant because the specified - // subnet implies the Availability Zone to be used. Currently the field must - // be populated, but it will be deprecated in the future. - AvailabilityZone *string `min:"1" type:"string"` - - // The security group ID list used by the connection. - SecurityGroupIdList []*string `type:"list"` - - // The subnet ID used by the connection. - SubnetId *string `min:"1" type:"string"` + // Name of the crawler to start. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s PhysicalConnectionRequirements) String() string { +func (s StartCrawlerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PhysicalConnectionRequirements) GoString() string { +func (s StartCrawlerInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PhysicalConnectionRequirements) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PhysicalConnectionRequirements"} - if s.AvailabilityZone != nil && len(*s.AvailabilityZone) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AvailabilityZone", 1)) +func (s *StartCrawlerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartCrawlerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.SubnetId != nil && len(*s.SubnetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SubnetId", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -20935,148 +27834,116 @@ func (s *PhysicalConnectionRequirements) Validate() error { return nil } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *PhysicalConnectionRequirements) SetAvailabilityZone(v string) *PhysicalConnectionRequirements { - s.AvailabilityZone = &v - return s -} - -// SetSecurityGroupIdList sets the SecurityGroupIdList field's value. -func (s *PhysicalConnectionRequirements) SetSecurityGroupIdList(v []*string) *PhysicalConnectionRequirements { - s.SecurityGroupIdList = v - return s -} - -// SetSubnetId sets the SubnetId field's value. -func (s *PhysicalConnectionRequirements) SetSubnetId(v string) *PhysicalConnectionRequirements { - s.SubnetId = &v +// SetName sets the Name field's value. +func (s *StartCrawlerInput) SetName(v string) *StartCrawlerInput { + s.Name = &v return s } -// A job run that was used in the predicate of a conditional trigger that triggered -// this job run. -type Predecessor struct { +type StartCrawlerOutput struct { _ struct{} `type:"structure"` - - // The name of the job definition used by the predecessor job run. - JobName *string `min:"1" type:"string"` - - // The job-run ID of the predecessor job run. - RunId *string `min:"1" type:"string"` } // String returns the string representation -func (s Predecessor) String() string { +func (s StartCrawlerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Predecessor) GoString() string { +func (s StartCrawlerOutput) GoString() string { return s.String() } -// SetJobName sets the JobName field's value. -func (s *Predecessor) SetJobName(v string) *Predecessor { - s.JobName = &v - return s -} - -// SetRunId sets the RunId field's value. -func (s *Predecessor) SetRunId(v string) *Predecessor { - s.RunId = &v - return s -} - -// Defines the predicate of the trigger, which determines when it fires. -type Predicate struct { +type StartCrawlerScheduleInput struct { _ struct{} `type:"structure"` - // A list of the conditions that determine when the trigger will fire. - Conditions []*Condition `type:"list"` - - // Optional field if only one condition is listed. If multiple conditions are - // listed, then this field is required. - Logical *string `type:"string" enum:"Logical"` + // Name of the crawler to schedule. + // + // CrawlerName is a required field + CrawlerName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s Predicate) String() string { +func (s StartCrawlerScheduleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Predicate) GoString() string { +func (s StartCrawlerScheduleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Predicate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Predicate"} - if s.Conditions != nil { - for i, v := range s.Conditions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Conditions", i), err.(request.ErrInvalidParams)) - } - } +func (s *StartCrawlerScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartCrawlerScheduleInput"} + if s.CrawlerName == nil { + invalidParams.Add(request.NewErrParamRequired("CrawlerName")) + } + if s.CrawlerName != nil && len(*s.CrawlerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CrawlerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams } + return nil +} + +// SetCrawlerName sets the CrawlerName field's value. +func (s *StartCrawlerScheduleInput) SetCrawlerName(v string) *StartCrawlerScheduleInput { + s.CrawlerName = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +type StartCrawlerScheduleOutput struct { + _ struct{} `type:"structure"` } -// SetConditions sets the Conditions field's value. -func (s *Predicate) SetConditions(v []*Condition) *Predicate { - s.Conditions = v - return s +// String returns the string representation +func (s StartCrawlerScheduleOutput) String() string { + return awsutil.Prettify(s) } -// SetLogical sets the Logical field's value. -func (s *Predicate) SetLogical(v string) *Predicate { - s.Logical = &v - return s +// GoString returns the string representation +func (s StartCrawlerScheduleOutput) GoString() string { + return s.String() } -type PutDataCatalogEncryptionSettingsInput struct { +type StartExportLabelsTaskRunInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog for which to set the security configuration. If - // none is provided, the AWS account ID is used by default. - CatalogId *string `min:"1" type:"string"` + // The Amazon S3 path where you export the labels. + // + // OutputS3Path is a required field + OutputS3Path *string `type:"string" required:"true"` - // The security configuration to set. + // The unique identifier of the machine learning transform. // - // DataCatalogEncryptionSettings is a required field - DataCatalogEncryptionSettings *DataCatalogEncryptionSettings `type:"structure" required:"true"` + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s PutDataCatalogEncryptionSettingsInput) String() string { +func (s StartExportLabelsTaskRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutDataCatalogEncryptionSettingsInput) GoString() string { +func (s StartExportLabelsTaskRunInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutDataCatalogEncryptionSettingsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutDataCatalogEncryptionSettingsInput"} - if s.CatalogId != nil && len(*s.CatalogId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) +func (s *StartExportLabelsTaskRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartExportLabelsTaskRunInput"} + if s.OutputS3Path == nil { + invalidParams.Add(request.NewErrParamRequired("OutputS3Path")) } - if s.DataCatalogEncryptionSettings == nil { - invalidParams.Add(request.NewErrParamRequired("DataCatalogEncryptionSettings")) + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) } - if s.DataCatalogEncryptionSettings != nil { - if err := s.DataCatalogEncryptionSettings.Validate(); err != nil { - invalidParams.AddNested("DataCatalogEncryptionSettings", err.(request.ErrInvalidParams)) - } + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) } if invalidParams.Len() > 0 { @@ -21085,72 +27952,80 @@ func (s *PutDataCatalogEncryptionSettingsInput) Validate() error { return nil } -// SetCatalogId sets the CatalogId field's value. -func (s *PutDataCatalogEncryptionSettingsInput) SetCatalogId(v string) *PutDataCatalogEncryptionSettingsInput { - s.CatalogId = &v +// SetOutputS3Path sets the OutputS3Path field's value. +func (s *StartExportLabelsTaskRunInput) SetOutputS3Path(v string) *StartExportLabelsTaskRunInput { + s.OutputS3Path = &v return s } -// SetDataCatalogEncryptionSettings sets the DataCatalogEncryptionSettings field's value. -func (s *PutDataCatalogEncryptionSettingsInput) SetDataCatalogEncryptionSettings(v *DataCatalogEncryptionSettings) *PutDataCatalogEncryptionSettingsInput { - s.DataCatalogEncryptionSettings = v +// SetTransformId sets the TransformId field's value. +func (s *StartExportLabelsTaskRunInput) SetTransformId(v string) *StartExportLabelsTaskRunInput { + s.TransformId = &v return s } -type PutDataCatalogEncryptionSettingsOutput struct { +type StartExportLabelsTaskRunOutput struct { _ struct{} `type:"structure"` + + // The unique identifier for the task run. + TaskRunId *string `min:"1" type:"string"` } // String returns the string representation -func (s PutDataCatalogEncryptionSettingsOutput) String() string { +func (s StartExportLabelsTaskRunOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutDataCatalogEncryptionSettingsOutput) GoString() string { +func (s StartExportLabelsTaskRunOutput) GoString() string { return s.String() } -type PutResourcePolicyInput struct { +// SetTaskRunId sets the TaskRunId field's value. +func (s *StartExportLabelsTaskRunOutput) SetTaskRunId(v string) *StartExportLabelsTaskRunOutput { + s.TaskRunId = &v + return s +} + +type StartImportLabelsTaskRunInput struct { _ struct{} `type:"structure"` - // A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is - // used to create a new policy. If a value of NONE or a null value is used, - // the call will not depend on the existence of a policy. - PolicyExistsCondition *string `type:"string" enum:"ExistCondition"` + // The Amazon Simple Storage Service (Amazon S3) path from where you import + // the labels. + // + // InputS3Path is a required field + InputS3Path *string `type:"string" required:"true"` - // The hash value returned when the previous policy was set using PutResourcePolicy. - // Its purpose is to prevent concurrent modifications of a policy. Do not use - // this parameter if no previous policy has been set. - PolicyHashCondition *string `min:"1" type:"string"` + // Indicates whether to overwrite your existing labels. + ReplaceAllLabels *bool `type:"boolean"` - // Contains the policy document to set, in JSON format. + // The unique identifier of the machine learning transform. // - // PolicyInJson is a required field - PolicyInJson *string `min:"2" type:"string" required:"true"` + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s PutResourcePolicyInput) String() string { +func (s StartImportLabelsTaskRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutResourcePolicyInput) GoString() string { +func (s StartImportLabelsTaskRunInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutResourcePolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"} - if s.PolicyHashCondition != nil && len(*s.PolicyHashCondition) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PolicyHashCondition", 1)) +func (s *StartImportLabelsTaskRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartImportLabelsTaskRunInput"} + if s.InputS3Path == nil { + invalidParams.Add(request.NewErrParamRequired("InputS3Path")) } - if s.PolicyInJson == nil { - invalidParams.Add(request.NewErrParamRequired("PolicyInJson")) + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) } - if s.PolicyInJson != nil && len(*s.PolicyInJson) < 2 { - invalidParams.Add(request.NewErrParamMinLen("PolicyInJson", 2)) + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) } if invalidParams.Len() > 0 { @@ -21159,73 +28034,169 @@ func (s *PutResourcePolicyInput) Validate() error { return nil } -// SetPolicyExistsCondition sets the PolicyExistsCondition field's value. -func (s *PutResourcePolicyInput) SetPolicyExistsCondition(v string) *PutResourcePolicyInput { - s.PolicyExistsCondition = &v +// SetInputS3Path sets the InputS3Path field's value. +func (s *StartImportLabelsTaskRunInput) SetInputS3Path(v string) *StartImportLabelsTaskRunInput { + s.InputS3Path = &v return s } -// SetPolicyHashCondition sets the PolicyHashCondition field's value. -func (s *PutResourcePolicyInput) SetPolicyHashCondition(v string) *PutResourcePolicyInput { - s.PolicyHashCondition = &v +// SetReplaceAllLabels sets the ReplaceAllLabels field's value. +func (s *StartImportLabelsTaskRunInput) SetReplaceAllLabels(v bool) *StartImportLabelsTaskRunInput { + s.ReplaceAllLabels = &v return s } -// SetPolicyInJson sets the PolicyInJson field's value. -func (s *PutResourcePolicyInput) SetPolicyInJson(v string) *PutResourcePolicyInput { - s.PolicyInJson = &v +// SetTransformId sets the TransformId field's value. +func (s *StartImportLabelsTaskRunInput) SetTransformId(v string) *StartImportLabelsTaskRunInput { + s.TransformId = &v return s } -type PutResourcePolicyOutput struct { +type StartImportLabelsTaskRunOutput struct { _ struct{} `type:"structure"` - // A hash of the policy that has just been set. This must be included in a subsequent - // call that overwrites or updates this policy. - PolicyHash *string `min:"1" type:"string"` + // The unique identifier for the task run. + TaskRunId *string `min:"1" type:"string"` } // String returns the string representation -func (s PutResourcePolicyOutput) String() string { +func (s StartImportLabelsTaskRunOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutResourcePolicyOutput) GoString() string { +func (s StartImportLabelsTaskRunOutput) GoString() string { return s.String() } -// SetPolicyHash sets the PolicyHash field's value. -func (s *PutResourcePolicyOutput) SetPolicyHash(v string) *PutResourcePolicyOutput { - s.PolicyHash = &v +// SetTaskRunId sets the TaskRunId field's value. +func (s *StartImportLabelsTaskRunOutput) SetTaskRunId(v string) *StartImportLabelsTaskRunOutput { + s.TaskRunId = &v return s } -type ResetJobBookmarkInput struct { +type StartJobRunInput struct { _ struct{} `type:"structure"` - // The name of the job in question. + // This field is deprecated. Use MaxCapacity instead. + // + // The number of AWS Glue data processing units (DPUs) to allocate to this JobRun. + // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative + // measure of processing power that consists of 4 vCPUs of compute capacity + // and 16 GB of memory. For more information, see the AWS Glue pricing page + // (https://docs.aws.amazon.com/https:/aws.amazon.com/glue/pricing/). + // + // Deprecated: This property is deprecated, use MaxCapacity instead. + AllocatedCapacity *int64 `deprecated:"true" type:"integer"` + + // The job arguments specifically for this run. For this job run, they replace + // the default arguments set in the job definition itself. + // + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) + // topic in the developer guide. + Arguments map[string]*string `type:"map"` + + // The name of the job definition to use. // // JobName is a required field - JobName *string `type:"string" required:"true"` + JobName *string `min:"1" type:"string" required:"true"` + + // The ID of a previous JobRun to retry. + JobRunId *string `min:"1" type:"string"` + + // The number of AWS Glue data processing units (DPUs) that can be allocated + // when this job runs. A DPU is a relative measure of processing power that + // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, + // see the AWS Glue pricing page (https://docs.aws.amazon.com/https:/aws.amazon.com/glue/pricing/). + // + // Do not set Max Capacity if using WorkerType and NumberOfWorkers. + // + // The value that can be allocated for MaxCapacity depends on whether you are + // running a Python shell job, or an Apache Spark ETL job: + // + // * When you specify a Python shell job (JobCommand.Name="pythonshell"), + // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. + // + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), + // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job + // type cannot have a fractional DPU allocation. + MaxCapacity *float64 `type:"double"` + + // Specifies configuration properties of a job run notification. + NotificationProperty *NotificationProperty `type:"structure"` + + // The number of workers of a defined workerType that are allocated when a job + // runs. + // + // The maximum number of workers you can define are 299 for G.1X, and 149 for + // G.2X. + NumberOfWorkers *int64 `type:"integer"` + + // The name of the SecurityConfiguration structure to be used with this job + // run. + SecurityConfiguration *string `min:"1" type:"string"` + + // The JobRun timeout in minutes. This is the maximum time that a job run can + // consume resources before it is terminated and enters TIMEOUT status. The + // default is 2,880 minutes (48 hours). This overrides the timeout value set + // in the parent job. + Timeout *int64 `min:"1" type:"integer"` + + // The type of predefined worker that is allocated when a job runs. Accepts + // a value of Standard, G.1X, or G.2X. + // + // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of + // memory and a 50GB disk, and 2 executors per worker. + // + // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory + // and a 64GB disk, and 1 executor per worker. + // + // * For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory + // and a 128GB disk, and 1 executor per worker. + WorkerType *string `type:"string" enum:"WorkerType"` } // String returns the string representation -func (s ResetJobBookmarkInput) String() string { +func (s StartJobRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ResetJobBookmarkInput) GoString() string { +func (s StartJobRunInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ResetJobBookmarkInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResetJobBookmarkInput"} +func (s *StartJobRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartJobRunInput"} if s.JobName == nil { invalidParams.Add(request.NewErrParamRequired("JobName")) } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.JobRunId != nil && len(*s.JobRunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobRunId", 1)) + } + if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) + } + if s.Timeout != nil && *s.Timeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + } + if s.NotificationProperty != nil { + if err := s.NotificationProperty.Validate(); err != nil { + invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -21233,61 +28204,116 @@ func (s *ResetJobBookmarkInput) Validate() error { return nil } +// SetAllocatedCapacity sets the AllocatedCapacity field's value. +func (s *StartJobRunInput) SetAllocatedCapacity(v int64) *StartJobRunInput { + s.AllocatedCapacity = &v + return s +} + +// SetArguments sets the Arguments field's value. +func (s *StartJobRunInput) SetArguments(v map[string]*string) *StartJobRunInput { + s.Arguments = v + return s +} + // SetJobName sets the JobName field's value. -func (s *ResetJobBookmarkInput) SetJobName(v string) *ResetJobBookmarkInput { +func (s *StartJobRunInput) SetJobName(v string) *StartJobRunInput { s.JobName = &v return s } -type ResetJobBookmarkOutput struct { +// SetJobRunId sets the JobRunId field's value. +func (s *StartJobRunInput) SetJobRunId(v string) *StartJobRunInput { + s.JobRunId = &v + return s +} + +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *StartJobRunInput) SetMaxCapacity(v float64) *StartJobRunInput { + s.MaxCapacity = &v + return s +} + +// SetNotificationProperty sets the NotificationProperty field's value. +func (s *StartJobRunInput) SetNotificationProperty(v *NotificationProperty) *StartJobRunInput { + s.NotificationProperty = v + return s +} + +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *StartJobRunInput) SetNumberOfWorkers(v int64) *StartJobRunInput { + s.NumberOfWorkers = &v + return s +} + +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *StartJobRunInput) SetSecurityConfiguration(v string) *StartJobRunInput { + s.SecurityConfiguration = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *StartJobRunInput) SetTimeout(v int64) *StartJobRunInput { + s.Timeout = &v + return s +} + +// SetWorkerType sets the WorkerType field's value. +func (s *StartJobRunInput) SetWorkerType(v string) *StartJobRunInput { + s.WorkerType = &v + return s +} + +type StartJobRunOutput struct { _ struct{} `type:"structure"` - // The reset bookmark entry. - JobBookmarkEntry *JobBookmarkEntry `type:"structure"` + // The ID assigned to this job run. + JobRunId *string `min:"1" type:"string"` } // String returns the string representation -func (s ResetJobBookmarkOutput) String() string { +func (s StartJobRunOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ResetJobBookmarkOutput) GoString() string { +func (s StartJobRunOutput) GoString() string { return s.String() } -// SetJobBookmarkEntry sets the JobBookmarkEntry field's value. -func (s *ResetJobBookmarkOutput) SetJobBookmarkEntry(v *JobBookmarkEntry) *ResetJobBookmarkOutput { - s.JobBookmarkEntry = v +// SetJobRunId sets the JobRunId field's value. +func (s *StartJobRunOutput) SetJobRunId(v string) *StartJobRunOutput { + s.JobRunId = &v return s } -// URIs for function resources. -type ResourceUri struct { +type StartMLEvaluationTaskRunInput struct { _ struct{} `type:"structure"` - // The type of the resource. - ResourceType *string `type:"string" enum:"ResourceType"` - - // The URI for accessing the resource. - Uri *string `min:"1" type:"string"` + // The unique identifier of the machine learning transform. + // + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ResourceUri) String() string { +func (s StartMLEvaluationTaskRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ResourceUri) GoString() string { +func (s StartMLEvaluationTaskRunInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ResourceUri) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResourceUri"} - if s.Uri != nil && len(*s.Uri) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Uri", 1)) +func (s *StartMLEvaluationTaskRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMLEvaluationTaskRunInput"} + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) + } + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) } if invalidParams.Len() > 0 { @@ -21296,379 +28322,264 @@ func (s *ResourceUri) Validate() error { return nil } -// SetResourceType sets the ResourceType field's value. -func (s *ResourceUri) SetResourceType(v string) *ResourceUri { - s.ResourceType = &v - return s -} - -// SetUri sets the Uri field's value. -func (s *ResourceUri) SetUri(v string) *ResourceUri { - s.Uri = &v +// SetTransformId sets the TransformId field's value. +func (s *StartMLEvaluationTaskRunInput) SetTransformId(v string) *StartMLEvaluationTaskRunInput { + s.TransformId = &v return s } -// Specifies how S3 data should be encrypted. -type S3Encryption struct { +type StartMLEvaluationTaskRunOutput struct { _ struct{} `type:"structure"` - // The AWS ARN of the KMS key to be used to encrypt the data. - KmsKeyArn *string `type:"string"` - - // The encryption mode to use for S3 data. - S3EncryptionMode *string `type:"string" enum:"S3EncryptionMode"` + // The unique identifier associated with this run. + TaskRunId *string `min:"1" type:"string"` } // String returns the string representation -func (s S3Encryption) String() string { +func (s StartMLEvaluationTaskRunOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s S3Encryption) GoString() string { +func (s StartMLEvaluationTaskRunOutput) GoString() string { return s.String() } -// SetKmsKeyArn sets the KmsKeyArn field's value. -func (s *S3Encryption) SetKmsKeyArn(v string) *S3Encryption { - s.KmsKeyArn = &v - return s -} - -// SetS3EncryptionMode sets the S3EncryptionMode field's value. -func (s *S3Encryption) SetS3EncryptionMode(v string) *S3Encryption { - s.S3EncryptionMode = &v +// SetTaskRunId sets the TaskRunId field's value. +func (s *StartMLEvaluationTaskRunOutput) SetTaskRunId(v string) *StartMLEvaluationTaskRunOutput { + s.TaskRunId = &v return s } -// Specifies a data store in Amazon S3. -type S3Target struct { +type StartMLLabelingSetGenerationTaskRunInput struct { _ struct{} `type:"structure"` - // A list of glob patterns used to exclude from the crawl. For more information, - // see Catalog Tables with a Crawler (http://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). - Exclusions []*string `type:"list"` + // The Amazon Simple Storage Service (Amazon S3) path where you generate the + // labeling set. + // + // OutputS3Path is a required field + OutputS3Path *string `type:"string" required:"true"` - // The path to the Amazon S3 target. - Path *string `type:"string"` + // The unique identifier of the machine learning transform. + // + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s S3Target) String() string { +func (s StartMLLabelingSetGenerationTaskRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s S3Target) GoString() string { +func (s StartMLLabelingSetGenerationTaskRunInput) GoString() string { return s.String() } -// SetExclusions sets the Exclusions field's value. -func (s *S3Target) SetExclusions(v []*string) *S3Target { - s.Exclusions = v - return s -} - -// SetPath sets the Path field's value. -func (s *S3Target) SetPath(v string) *S3Target { - s.Path = &v - return s -} - -// A scheduling object using a cron statement to schedule an event. -type Schedule struct { - _ struct{} `type:"structure"` - - // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). - ScheduleExpression *string `type:"string"` - - // The state of the schedule. - State *string `type:"string" enum:"ScheduleState"` -} - -// String returns the string representation -func (s Schedule) String() string { - return awsutil.Prettify(s) -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartMLLabelingSetGenerationTaskRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMLLabelingSetGenerationTaskRunInput"} + if s.OutputS3Path == nil { + invalidParams.Add(request.NewErrParamRequired("OutputS3Path")) + } + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) + } + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) + } -// GoString returns the string representation -func (s Schedule) GoString() string { - return s.String() + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetScheduleExpression sets the ScheduleExpression field's value. -func (s *Schedule) SetScheduleExpression(v string) *Schedule { - s.ScheduleExpression = &v +// SetOutputS3Path sets the OutputS3Path field's value. +func (s *StartMLLabelingSetGenerationTaskRunInput) SetOutputS3Path(v string) *StartMLLabelingSetGenerationTaskRunInput { + s.OutputS3Path = &v return s } -// SetState sets the State field's value. -func (s *Schedule) SetState(v string) *Schedule { - s.State = &v +// SetTransformId sets the TransformId field's value. +func (s *StartMLLabelingSetGenerationTaskRunInput) SetTransformId(v string) *StartMLLabelingSetGenerationTaskRunInput { + s.TransformId = &v return s } -// Crawler policy for update and deletion behavior. -type SchemaChangePolicy struct { +type StartMLLabelingSetGenerationTaskRunOutput struct { _ struct{} `type:"structure"` - // The deletion behavior when the crawler finds a deleted object. - DeleteBehavior *string `type:"string" enum:"DeleteBehavior"` - - // The update behavior when the crawler finds a changed schema. - UpdateBehavior *string `type:"string" enum:"UpdateBehavior"` + // The unique run identifier that is associated with this task run. + TaskRunId *string `min:"1" type:"string"` } // String returns the string representation -func (s SchemaChangePolicy) String() string { +func (s StartMLLabelingSetGenerationTaskRunOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SchemaChangePolicy) GoString() string { +func (s StartMLLabelingSetGenerationTaskRunOutput) GoString() string { return s.String() } -// SetDeleteBehavior sets the DeleteBehavior field's value. -func (s *SchemaChangePolicy) SetDeleteBehavior(v string) *SchemaChangePolicy { - s.DeleteBehavior = &v - return s -} - -// SetUpdateBehavior sets the UpdateBehavior field's value. -func (s *SchemaChangePolicy) SetUpdateBehavior(v string) *SchemaChangePolicy { - s.UpdateBehavior = &v +// SetTaskRunId sets the TaskRunId field's value. +func (s *StartMLLabelingSetGenerationTaskRunOutput) SetTaskRunId(v string) *StartMLLabelingSetGenerationTaskRunOutput { + s.TaskRunId = &v return s } -// Specifies a security configuration. -type SecurityConfiguration struct { +type StartTriggerInput struct { _ struct{} `type:"structure"` - // The time at which this security configuration was created. - CreatedTimeStamp *time.Time `type:"timestamp"` - - // The encryption configuration associated with this security configuration. - EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - - // The name of the security configuration. - Name *string `min:"1" type:"string"` + // The name of the trigger to start. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s SecurityConfiguration) String() string { +func (s StartTriggerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SecurityConfiguration) GoString() string { +func (s StartTriggerInput) GoString() string { return s.String() } -// SetCreatedTimeStamp sets the CreatedTimeStamp field's value. -func (s *SecurityConfiguration) SetCreatedTimeStamp(v time.Time) *SecurityConfiguration { - s.CreatedTimeStamp = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartTriggerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartTriggerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } -// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. -func (s *SecurityConfiguration) SetEncryptionConfiguration(v *EncryptionConfiguration) *SecurityConfiguration { - s.EncryptionConfiguration = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetName sets the Name field's value. -func (s *SecurityConfiguration) SetName(v string) *SecurityConfiguration { +func (s *StartTriggerInput) SetName(v string) *StartTriggerInput { s.Name = &v return s } -// Defines a non-overlapping region of a table's partitions, allowing multiple -// requests to be executed in parallel. -type Segment struct { +type StartTriggerOutput struct { _ struct{} `type:"structure"` - // The zero-based index number of the this segment. For example, if the total - // number of segments is 4, SegmentNumber values will range from zero through - // three. - // - // SegmentNumber is a required field - SegmentNumber *int64 `type:"integer" required:"true"` - - // The total numer of segments. - // - // TotalSegments is a required field - TotalSegments *int64 `min:"1" type:"integer" required:"true"` + // The name of the trigger that was started. + Name *string `min:"1" type:"string"` } // String returns the string representation -func (s Segment) String() string { +func (s StartTriggerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Segment) GoString() string { +func (s StartTriggerOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Segment) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Segment"} - if s.SegmentNumber == nil { - invalidParams.Add(request.NewErrParamRequired("SegmentNumber")) - } - if s.TotalSegments == nil { - invalidParams.Add(request.NewErrParamRequired("TotalSegments")) - } - if s.TotalSegments != nil && *s.TotalSegments < 1 { - invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSegmentNumber sets the SegmentNumber field's value. -func (s *Segment) SetSegmentNumber(v int64) *Segment { - s.SegmentNumber = &v - return s -} - -// SetTotalSegments sets the TotalSegments field's value. -func (s *Segment) SetTotalSegments(v int64) *Segment { - s.TotalSegments = &v +// SetName sets the Name field's value. +func (s *StartTriggerOutput) SetName(v string) *StartTriggerOutput { + s.Name = &v return s } -// Information about a serialization/deserialization program (SerDe) which serves -// as an extractor and loader. -type SerDeInfo struct { +type StartWorkflowRunInput struct { _ struct{} `type:"structure"` - // Name of the SerDe. - Name *string `min:"1" type:"string"` - - // These key-value pairs define initialization parameters for the SerDe. - Parameters map[string]*string `type:"map"` - - // Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. - SerializationLibrary *string `min:"1" type:"string"` + // The name of the workflow to start. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s SerDeInfo) String() string { +func (s StartWorkflowRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SerDeInfo) GoString() string { +func (s StartWorkflowRunInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *SerDeInfo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SerDeInfo"} +func (s *StartWorkflowRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartWorkflowRunInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.SerializationLibrary != nil && len(*s.SerializationLibrary) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SerializationLibrary", 1)) - } if invalidParams.Len() > 0 { return invalidParams } return nil -} - -// SetName sets the Name field's value. -func (s *SerDeInfo) SetName(v string) *SerDeInfo { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *SerDeInfo) SetParameters(v map[string]*string) *SerDeInfo { - s.Parameters = v - return s -} - -// SetSerializationLibrary sets the SerializationLibrary field's value. -func (s *SerDeInfo) SetSerializationLibrary(v string) *SerDeInfo { - s.SerializationLibrary = &v +} + +// SetName sets the Name field's value. +func (s *StartWorkflowRunInput) SetName(v string) *StartWorkflowRunInput { + s.Name = &v return s } -// Specifies skewed values in a table. Skewed are ones that occur with very -// high frequency. -type SkewedInfo struct { +type StartWorkflowRunOutput struct { _ struct{} `type:"structure"` - // A list of names of columns that contain skewed values. - SkewedColumnNames []*string `type:"list"` - - // A mapping of skewed values to the columns that contain them. - SkewedColumnValueLocationMaps map[string]*string `type:"map"` - - // A list of values that appear so frequently as to be considered skewed. - SkewedColumnValues []*string `type:"list"` + // An Id for the new run. + RunId *string `min:"1" type:"string"` } // String returns the string representation -func (s SkewedInfo) String() string { +func (s StartWorkflowRunOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SkewedInfo) GoString() string { +func (s StartWorkflowRunOutput) GoString() string { return s.String() } -// SetSkewedColumnNames sets the SkewedColumnNames field's value. -func (s *SkewedInfo) SetSkewedColumnNames(v []*string) *SkewedInfo { - s.SkewedColumnNames = v - return s -} - -// SetSkewedColumnValueLocationMaps sets the SkewedColumnValueLocationMaps field's value. -func (s *SkewedInfo) SetSkewedColumnValueLocationMaps(v map[string]*string) *SkewedInfo { - s.SkewedColumnValueLocationMaps = v - return s -} - -// SetSkewedColumnValues sets the SkewedColumnValues field's value. -func (s *SkewedInfo) SetSkewedColumnValues(v []*string) *SkewedInfo { - s.SkewedColumnValues = v +// SetRunId sets the RunId field's value. +func (s *StartWorkflowRunOutput) SetRunId(v string) *StartWorkflowRunOutput { + s.RunId = &v return s } -type StartCrawlerInput struct { +type StopCrawlerInput struct { _ struct{} `type:"structure"` - // Name of the crawler to start. + // Name of the crawler to stop. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s StartCrawlerInput) String() string { +func (s StopCrawlerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s StartCrawlerInput) GoString() string { +func (s StopCrawlerInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *StartCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartCrawlerInput"} +func (s *StopCrawlerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopCrawlerInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -21683,47 +28594,47 @@ func (s *StartCrawlerInput) Validate() error { } // SetName sets the Name field's value. -func (s *StartCrawlerInput) SetName(v string) *StartCrawlerInput { +func (s *StopCrawlerInput) SetName(v string) *StopCrawlerInput { s.Name = &v return s } -type StartCrawlerOutput struct { +type StopCrawlerOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s StartCrawlerOutput) String() string { +func (s StopCrawlerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s StartCrawlerOutput) GoString() string { +func (s StopCrawlerOutput) GoString() string { return s.String() } -type StartCrawlerScheduleInput struct { +type StopCrawlerScheduleInput struct { _ struct{} `type:"structure"` - // Name of the crawler to schedule. + // Name of the crawler whose schedule state to set. // // CrawlerName is a required field CrawlerName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s StartCrawlerScheduleInput) String() string { +func (s StopCrawlerScheduleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s StartCrawlerScheduleInput) GoString() string { +func (s StopCrawlerScheduleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *StartCrawlerScheduleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartCrawlerScheduleInput"} +func (s *StopCrawlerScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopCrawlerScheduleInput"} if s.CrawlerName == nil { invalidParams.Add(request.NewErrParamRequired("CrawlerName")) } @@ -21738,145 +28649,171 @@ func (s *StartCrawlerScheduleInput) Validate() error { } // SetCrawlerName sets the CrawlerName field's value. -func (s *StartCrawlerScheduleInput) SetCrawlerName(v string) *StartCrawlerScheduleInput { +func (s *StopCrawlerScheduleInput) SetCrawlerName(v string) *StopCrawlerScheduleInput { s.CrawlerName = &v return s } -type StartCrawlerScheduleOutput struct { +type StopCrawlerScheduleOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s StartCrawlerScheduleOutput) String() string { +func (s StopCrawlerScheduleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s StartCrawlerScheduleOutput) GoString() string { +func (s StopCrawlerScheduleOutput) GoString() string { return s.String() } -type StartJobRunInput struct { +type StopTriggerInput struct { _ struct{} `type:"structure"` - // This field is deprecated, use MaxCapacity instead. - // - // The number of AWS Glue data processing units (DPUs) to allocate to this JobRun. - // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative - // measure of processing power that consists of 4 vCPUs of compute capacity - // and 16 GB of memory. For more information, see the AWS Glue pricing page - // (https://aws.amazon.com/glue/pricing/). + // The name of the trigger to stop. // - // Deprecated: This property is deprecated, use MaxCapacity instead. - AllocatedCapacity *int64 `deprecated:"true" type:"integer"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} - // The job arguments specifically for this run. For this job run, they replace - // the default arguments set in the job definition itself. - // - // You can specify arguments here that your own job-execution script consumes, - // as well as arguments that AWS Glue itself consumes. - // - // For information about how to specify and consume your own Job arguments, - // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) - // topic in the developer guide. - // - // For information about the key-value pairs that AWS Glue consumes to set up - // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) - // topic in the developer guide. - Arguments map[string]*string `type:"map"` +// String returns the string representation +func (s StopTriggerInput) String() string { + return awsutil.Prettify(s) +} - // The name of the job definition to use. - // - // JobName is a required field - JobName *string `min:"1" type:"string" required:"true"` +// GoString returns the string representation +func (s StopTriggerInput) GoString() string { + return s.String() +} - // The ID of a previous JobRun to retry. - JobRunId *string `min:"1" type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopTriggerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopTriggerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } - // The number of AWS Glue data processing units (DPUs) that can be allocated - // when this job runs. A DPU is a relative measure of processing power that - // consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, - // see the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). - // - // Do not set Max Capacity if using WorkerType and NumberOfWorkers. - // - // The value that can be allocated for MaxCapacity depends on whether you are - // running a python shell job, or an Apache Spark ETL job: - // - // * When you specify a python shell job (JobCommand.Name="pythonshell"), - // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. - // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. - MaxCapacity *float64 `type:"double"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Specifies configuration properties of a job run notification. - NotificationProperty *NotificationProperty `type:"structure"` +// SetName sets the Name field's value. +func (s *StopTriggerInput) SetName(v string) *StopTriggerInput { + s.Name = &v + return s +} - // The number of workers of a defined workerType that are allocated when a job - // runs. - // - // The maximum number of workers you can define are 299 for G.1X, and 149 for - // G.2X. - NumberOfWorkers *int64 `type:"integer"` +type StopTriggerOutput struct { + _ struct{} `type:"structure"` - // The name of the SecurityConfiguration structure to be used with this job - // run. - SecurityConfiguration *string `min:"1" type:"string"` + // The name of the trigger that was stopped. + Name *string `min:"1" type:"string"` +} - // The JobRun timeout in minutes. This is the maximum time that a job run can - // consume resources before it is terminated and enters TIMEOUT status. The - // default is 2,880 minutes (48 hours). This overrides the timeout value set - // in the parent job. - Timeout *int64 `min:"1" type:"integer"` +// String returns the string representation +func (s StopTriggerOutput) String() string { + return awsutil.Prettify(s) +} - // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, or G.2X. - // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory - // and a 64GB disk, and 1 executor per worker. - // - // * For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory - // and a 128GB disk, and 1 executor per worker. - WorkerType *string `type:"string" enum:"WorkerType"` +// GoString returns the string representation +func (s StopTriggerOutput) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *StopTriggerOutput) SetName(v string) *StopTriggerOutput { + s.Name = &v + return s +} + +// Describes the physical storage of table data. +type StorageDescriptor struct { + _ struct{} `type:"structure"` + + // A list of reducer grouping columns, clustering columns, and bucketing columns + // in the table. + BucketColumns []*string `type:"list"` + + // A list of the Columns in the table. + Columns []*Column `type:"list"` + + // True if the data in the table is compressed, or False if not. + Compressed *bool `type:"boolean"` + + // The input format: SequenceFileInputFormat (binary), or TextInputFormat, or + // a custom format. + InputFormat *string `type:"string"` + + // The physical location of the table. By default, this takes the form of the + // warehouse location, followed by the database location in the warehouse, followed + // by the table name. + Location *string `type:"string"` + + // Must be specified if the table contains any dimension columns. + NumberOfBuckets *int64 `type:"integer"` + + // The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, + // or a custom format. + OutputFormat *string `type:"string"` + + // The user-supplied properties in key-value form. + Parameters map[string]*string `type:"map"` + + // The serialization/deserialization (SerDe) information. + SerdeInfo *SerDeInfo `type:"structure"` + + // The information about values that appear frequently in a column (skewed values). + SkewedInfo *SkewedInfo `type:"structure"` + + // A list specifying the sort order of each bucket in the table. + SortColumns []*Order `type:"list"` + + // True if the table data is stored in subdirectories, or False if not. + StoredAsSubDirectories *bool `type:"boolean"` } // String returns the string representation -func (s StartJobRunInput) String() string { +func (s StorageDescriptor) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s StartJobRunInput) GoString() string { +func (s StorageDescriptor) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *StartJobRunInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartJobRunInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - if s.JobName != nil && len(*s.JobName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) - } - if s.JobRunId != nil && len(*s.JobRunId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobRunId", 1)) - } - if s.SecurityConfiguration != nil && len(*s.SecurityConfiguration) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SecurityConfiguration", 1)) +func (s *StorageDescriptor) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageDescriptor"} + if s.Columns != nil { + for i, v := range s.Columns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(request.ErrInvalidParams)) + } + } } - if s.Timeout != nil && *s.Timeout < 1 { - invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + if s.SerdeInfo != nil { + if err := s.SerdeInfo.Validate(); err != nil { + invalidParams.AddNested("SerdeInfo", err.(request.ErrInvalidParams)) + } } - if s.NotificationProperty != nil { - if err := s.NotificationProperty.Validate(); err != nil { - invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) + if s.SortColumns != nil { + for i, v := range s.SortColumns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SortColumns", i), err.(request.ErrInvalidParams)) + } } } @@ -21886,116 +28823,130 @@ func (s *StartJobRunInput) Validate() error { return nil } -// SetAllocatedCapacity sets the AllocatedCapacity field's value. -func (s *StartJobRunInput) SetAllocatedCapacity(v int64) *StartJobRunInput { - s.AllocatedCapacity = &v +// SetBucketColumns sets the BucketColumns field's value. +func (s *StorageDescriptor) SetBucketColumns(v []*string) *StorageDescriptor { + s.BucketColumns = v return s } -// SetArguments sets the Arguments field's value. -func (s *StartJobRunInput) SetArguments(v map[string]*string) *StartJobRunInput { - s.Arguments = v +// SetColumns sets the Columns field's value. +func (s *StorageDescriptor) SetColumns(v []*Column) *StorageDescriptor { + s.Columns = v return s } -// SetJobName sets the JobName field's value. -func (s *StartJobRunInput) SetJobName(v string) *StartJobRunInput { - s.JobName = &v +// SetCompressed sets the Compressed field's value. +func (s *StorageDescriptor) SetCompressed(v bool) *StorageDescriptor { + s.Compressed = &v return s } -// SetJobRunId sets the JobRunId field's value. -func (s *StartJobRunInput) SetJobRunId(v string) *StartJobRunInput { - s.JobRunId = &v +// SetInputFormat sets the InputFormat field's value. +func (s *StorageDescriptor) SetInputFormat(v string) *StorageDescriptor { + s.InputFormat = &v return s } -// SetMaxCapacity sets the MaxCapacity field's value. -func (s *StartJobRunInput) SetMaxCapacity(v float64) *StartJobRunInput { - s.MaxCapacity = &v +// SetLocation sets the Location field's value. +func (s *StorageDescriptor) SetLocation(v string) *StorageDescriptor { + s.Location = &v return s } -// SetNotificationProperty sets the NotificationProperty field's value. -func (s *StartJobRunInput) SetNotificationProperty(v *NotificationProperty) *StartJobRunInput { - s.NotificationProperty = v +// SetNumberOfBuckets sets the NumberOfBuckets field's value. +func (s *StorageDescriptor) SetNumberOfBuckets(v int64) *StorageDescriptor { + s.NumberOfBuckets = &v return s } -// SetNumberOfWorkers sets the NumberOfWorkers field's value. -func (s *StartJobRunInput) SetNumberOfWorkers(v int64) *StartJobRunInput { - s.NumberOfWorkers = &v +// SetOutputFormat sets the OutputFormat field's value. +func (s *StorageDescriptor) SetOutputFormat(v string) *StorageDescriptor { + s.OutputFormat = &v return s } -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *StartJobRunInput) SetSecurityConfiguration(v string) *StartJobRunInput { - s.SecurityConfiguration = &v +// SetParameters sets the Parameters field's value. +func (s *StorageDescriptor) SetParameters(v map[string]*string) *StorageDescriptor { + s.Parameters = v return s } -// SetTimeout sets the Timeout field's value. -func (s *StartJobRunInput) SetTimeout(v int64) *StartJobRunInput { - s.Timeout = &v +// SetSerdeInfo sets the SerdeInfo field's value. +func (s *StorageDescriptor) SetSerdeInfo(v *SerDeInfo) *StorageDescriptor { + s.SerdeInfo = v return s } -// SetWorkerType sets the WorkerType field's value. -func (s *StartJobRunInput) SetWorkerType(v string) *StartJobRunInput { - s.WorkerType = &v +// SetSkewedInfo sets the SkewedInfo field's value. +func (s *StorageDescriptor) SetSkewedInfo(v *SkewedInfo) *StorageDescriptor { + s.SkewedInfo = v return s } -type StartJobRunOutput struct { - _ struct{} `type:"structure"` - - // The ID assigned to this job run. - JobRunId *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s StartJobRunOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StartJobRunOutput) GoString() string { - return s.String() +// SetSortColumns sets the SortColumns field's value. +func (s *StorageDescriptor) SetSortColumns(v []*Order) *StorageDescriptor { + s.SortColumns = v + return s } -// SetJobRunId sets the JobRunId field's value. -func (s *StartJobRunOutput) SetJobRunId(v string) *StartJobRunOutput { - s.JobRunId = &v +// SetStoredAsSubDirectories sets the StoredAsSubDirectories field's value. +func (s *StorageDescriptor) SetStoredAsSubDirectories(v bool) *StorageDescriptor { + s.StoredAsSubDirectories = &v return s } -type StartTriggerInput struct { +// The database and table in the AWS Glue Data Catalog that is used for input +// or output data. +type Table struct { _ struct{} `type:"structure"` - // The name of the trigger to start. + // A unique identifier for the AWS Glue Data Catalog. + CatalogId *string `min:"1" type:"string"` + + // The name of the connection to the AWS Glue Data Catalog. + ConnectionName *string `min:"1" type:"string"` + + // A database name in the AWS Glue Data Catalog. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A table name in the AWS Glue Data Catalog. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s StartTriggerInput) String() string { +func (s Table) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s StartTriggerInput) GoString() string { +func (s Table) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *StartTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartTriggerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *Table) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Table"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.ConnectionName != nil && len(*s.ConnectionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConnectionName", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -22004,294 +28955,332 @@ func (s *StartTriggerInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *StartTriggerInput) SetName(v string) *StartTriggerInput { - s.Name = &v +// SetCatalogId sets the CatalogId field's value. +func (s *Table) SetCatalogId(v string) *Table { + s.CatalogId = &v return s } -type StartTriggerOutput struct { - _ struct{} `type:"structure"` - - // The name of the trigger that was started. - Name *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s StartTriggerOutput) String() string { - return awsutil.Prettify(s) +// SetConnectionName sets the ConnectionName field's value. +func (s *Table) SetConnectionName(v string) *Table { + s.ConnectionName = &v + return s } -// GoString returns the string representation -func (s StartTriggerOutput) GoString() string { - return s.String() +// SetDatabaseName sets the DatabaseName field's value. +func (s *Table) SetDatabaseName(v string) *Table { + s.DatabaseName = &v + return s } -// SetName sets the Name field's value. -func (s *StartTriggerOutput) SetName(v string) *StartTriggerOutput { - s.Name = &v +// SetTableName sets the TableName field's value. +func (s *Table) SetTableName(v string) *Table { + s.TableName = &v return s } -type StopCrawlerInput struct { +// Represents a collection of related data organized in columns and rows. +type TableData struct { _ struct{} `type:"structure"` - // Name of the crawler to stop. + // The time when the table definition was created in the Data Catalog. + CreateTime *time.Time `type:"timestamp"` + + // The person or entity who created the table. + CreatedBy *string `min:"1" type:"string"` + + // The name of the database where the table metadata resides. For Hive compatibility, + // this must be all lowercase. + DatabaseName *string `min:"1" type:"string"` + + // A description of the table. + Description *string `type:"string"` + + // Indicates whether the table has been registered with AWS Lake Formation. + IsRegisteredWithLakeFormation *bool `type:"boolean"` + + // The last time that the table was accessed. This is usually taken from HDFS, + // and might not be reliable. + LastAccessTime *time.Time `type:"timestamp"` + + // The last time that column statistics were computed for this table. + LastAnalyzedTime *time.Time `type:"timestamp"` + + // The table name. For Hive compatibility, this must be entirely lowercase. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` -} -// String returns the string representation -func (s StopCrawlerInput) String() string { - return awsutil.Prettify(s) -} + // The owner of the table. + Owner *string `min:"1" type:"string"` -// GoString returns the string representation -func (s StopCrawlerInput) GoString() string { - return s.String() -} + // These key-value pairs define properties associated with the table. + Parameters map[string]*string `type:"map"` -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopCrawlerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopCrawlerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } + // A list of columns by which the table is partitioned. Only primitive types + // are supported as partition keys. + // + // When you create a table used by Amazon Athena, and you do not specify any + // partitionKeys, you must at least set the value of partitionKeys to an empty + // list. For example: + // + // "PartitionKeys": [] + PartitionKeys []*Column `type:"list"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // The retention time for this table. + Retention *int64 `type:"integer"` -// SetName sets the Name field's value. -func (s *StopCrawlerInput) SetName(v string) *StopCrawlerInput { - s.Name = &v - return s -} + // A storage descriptor containing information about the physical storage of + // this table. + StorageDescriptor *StorageDescriptor `type:"structure"` -type StopCrawlerOutput struct { - _ struct{} `type:"structure"` + // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). + TableType *string `type:"string"` + + // The last time that the table was updated. + UpdateTime *time.Time `type:"timestamp"` + + // If the table is a view, the expanded text of the view; otherwise null. + ViewExpandedText *string `type:"string"` + + // If the table is a view, the original text of the view; otherwise null. + ViewOriginalText *string `type:"string"` } // String returns the string representation -func (s StopCrawlerOutput) String() string { +func (s TableData) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s StopCrawlerOutput) GoString() string { +func (s TableData) GoString() string { return s.String() } -type StopCrawlerScheduleInput struct { - _ struct{} `type:"structure"` +// SetCreateTime sets the CreateTime field's value. +func (s *TableData) SetCreateTime(v time.Time) *TableData { + s.CreateTime = &v + return s +} - // Name of the crawler whose schedule state to set. - // - // CrawlerName is a required field - CrawlerName *string `min:"1" type:"string" required:"true"` +// SetCreatedBy sets the CreatedBy field's value. +func (s *TableData) SetCreatedBy(v string) *TableData { + s.CreatedBy = &v + return s } -// String returns the string representation -func (s StopCrawlerScheduleInput) String() string { - return awsutil.Prettify(s) +// SetDatabaseName sets the DatabaseName field's value. +func (s *TableData) SetDatabaseName(v string) *TableData { + s.DatabaseName = &v + return s } -// GoString returns the string representation -func (s StopCrawlerScheduleInput) GoString() string { - return s.String() +// SetDescription sets the Description field's value. +func (s *TableData) SetDescription(v string) *TableData { + s.Description = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopCrawlerScheduleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopCrawlerScheduleInput"} - if s.CrawlerName == nil { - invalidParams.Add(request.NewErrParamRequired("CrawlerName")) - } - if s.CrawlerName != nil && len(*s.CrawlerName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CrawlerName", 1)) - } +// SetIsRegisteredWithLakeFormation sets the IsRegisteredWithLakeFormation field's value. +func (s *TableData) SetIsRegisteredWithLakeFormation(v bool) *TableData { + s.IsRegisteredWithLakeFormation = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLastAccessTime sets the LastAccessTime field's value. +func (s *TableData) SetLastAccessTime(v time.Time) *TableData { + s.LastAccessTime = &v + return s } -// SetCrawlerName sets the CrawlerName field's value. -func (s *StopCrawlerScheduleInput) SetCrawlerName(v string) *StopCrawlerScheduleInput { - s.CrawlerName = &v +// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. +func (s *TableData) SetLastAnalyzedTime(v time.Time) *TableData { + s.LastAnalyzedTime = &v return s } -type StopCrawlerScheduleOutput struct { - _ struct{} `type:"structure"` +// SetName sets the Name field's value. +func (s *TableData) SetName(v string) *TableData { + s.Name = &v + return s } -// String returns the string representation -func (s StopCrawlerScheduleOutput) String() string { - return awsutil.Prettify(s) +// SetOwner sets the Owner field's value. +func (s *TableData) SetOwner(v string) *TableData { + s.Owner = &v + return s } -// GoString returns the string representation -func (s StopCrawlerScheduleOutput) GoString() string { - return s.String() +// SetParameters sets the Parameters field's value. +func (s *TableData) SetParameters(v map[string]*string) *TableData { + s.Parameters = v + return s } -type StopTriggerInput struct { - _ struct{} `type:"structure"` +// SetPartitionKeys sets the PartitionKeys field's value. +func (s *TableData) SetPartitionKeys(v []*Column) *TableData { + s.PartitionKeys = v + return s +} - // The name of the trigger to stop. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` +// SetRetention sets the Retention field's value. +func (s *TableData) SetRetention(v int64) *TableData { + s.Retention = &v + return s } -// String returns the string representation -func (s StopTriggerInput) String() string { - return awsutil.Prettify(s) +// SetStorageDescriptor sets the StorageDescriptor field's value. +func (s *TableData) SetStorageDescriptor(v *StorageDescriptor) *TableData { + s.StorageDescriptor = v + return s } -// GoString returns the string representation -func (s StopTriggerInput) GoString() string { - return s.String() +// SetTableType sets the TableType field's value. +func (s *TableData) SetTableType(v string) *TableData { + s.TableType = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopTriggerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopTriggerInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } +// SetUpdateTime sets the UpdateTime field's value. +func (s *TableData) SetUpdateTime(v time.Time) *TableData { + s.UpdateTime = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetViewExpandedText sets the ViewExpandedText field's value. +func (s *TableData) SetViewExpandedText(v string) *TableData { + s.ViewExpandedText = &v + return s } -// SetName sets the Name field's value. -func (s *StopTriggerInput) SetName(v string) *StopTriggerInput { - s.Name = &v +// SetViewOriginalText sets the ViewOriginalText field's value. +func (s *TableData) SetViewOriginalText(v string) *TableData { + s.ViewOriginalText = &v return s } -type StopTriggerOutput struct { +// An error record for table operations. +type TableError struct { _ struct{} `type:"structure"` - // The name of the trigger that was stopped. - Name *string `min:"1" type:"string"` + // The details about the error. + ErrorDetail *ErrorDetail `type:"structure"` + + // The name of the table. For Hive compatibility, this must be entirely lowercase. + TableName *string `min:"1" type:"string"` } // String returns the string representation -func (s StopTriggerOutput) String() string { +func (s TableError) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s StopTriggerOutput) GoString() string { +func (s TableError) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *StopTriggerOutput) SetName(v string) *StopTriggerOutput { - s.Name = &v +// SetErrorDetail sets the ErrorDetail field's value. +func (s *TableError) SetErrorDetail(v *ErrorDetail) *TableError { + s.ErrorDetail = v return s } -// Describes the physical storage of table data. -type StorageDescriptor struct { +// SetTableName sets the TableName field's value. +func (s *TableError) SetTableName(v string) *TableError { + s.TableName = &v + return s +} + +// A structure used to define a table. +type TableInput struct { _ struct{} `type:"structure"` - // A list of reducer grouping columns, clustering columns, and bucketing columns - // in the table. - BucketColumns []*string `type:"list"` + // A description of the table. + Description *string `type:"string"` - // A list of the Columns in the table. - Columns []*Column `type:"list"` + // The last time that the table was accessed. + LastAccessTime *time.Time `type:"timestamp"` - // True if the data in the table is compressed, or False if not. - Compressed *bool `type:"boolean"` + // The last time that column statistics were computed for this table. + LastAnalyzedTime *time.Time `type:"timestamp"` - // The input format: SequenceFileInputFormat (binary), or TextInputFormat, or - // a custom format. - InputFormat *string `type:"string"` + // The table name. For Hive compatibility, this is folded to lowercase when + // it is stored. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // The physical location of the table. By default this takes the form of the - // warehouse location, followed by the database location in the warehouse, followed - // by the table name. - Location *string `type:"string"` + // The table owner. + Owner *string `min:"1" type:"string"` - // Must be specified if the table contains any dimension columns. - NumberOfBuckets *int64 `type:"integer"` + // These key-value pairs define properties associated with the table. + Parameters map[string]*string `type:"map"` - // The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, - // or a custom format. - OutputFormat *string `type:"string"` + // A list of columns by which the table is partitioned. Only primitive types + // are supported as partition keys. + // + // When you create a table used by Amazon Athena, and you do not specify any + // partitionKeys, you must at least set the value of partitionKeys to an empty + // list. For example: + // + // "PartitionKeys": [] + PartitionKeys []*Column `type:"list"` - // User-supplied properties in key-value form. - Parameters map[string]*string `type:"map"` + // The retention time for this table. + Retention *int64 `type:"integer"` - // Serialization/deserialization (SerDe) information. - SerdeInfo *SerDeInfo `type:"structure"` + // A storage descriptor containing information about the physical storage of + // this table. + StorageDescriptor *StorageDescriptor `type:"structure"` - // Information about values that appear very frequently in a column (skewed - // values). - SkewedInfo *SkewedInfo `type:"structure"` + // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). + TableType *string `type:"string"` - // A list specifying the sort order of each bucket in the table. - SortColumns []*Order `type:"list"` + // If the table is a view, the expanded text of the view; otherwise null. + ViewExpandedText *string `type:"string"` - // True if the table data is stored in subdirectories, or False if not. - StoredAsSubDirectories *bool `type:"boolean"` + // If the table is a view, the original text of the view; otherwise null. + ViewOriginalText *string `type:"string"` } // String returns the string representation -func (s StorageDescriptor) String() string { +func (s TableInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s StorageDescriptor) GoString() string { +func (s TableInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *StorageDescriptor) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StorageDescriptor"} - if s.Columns != nil { - for i, v := range s.Columns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(request.ErrInvalidParams)) - } - } +func (s *TableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TableInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.SerdeInfo != nil { - if err := s.SerdeInfo.Validate(); err != nil { - invalidParams.AddNested("SerdeInfo", err.(request.ErrInvalidParams)) - } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.SortColumns != nil { - for i, v := range s.SortColumns { + if s.Owner != nil && len(*s.Owner) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Owner", 1)) + } + if s.PartitionKeys != nil { + for i, v := range s.PartitionKeys { if v == nil { continue } if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SortColumns", i), err.(request.ErrInvalidParams)) + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionKeys", i), err.(request.ErrInvalidParams)) } } } + if s.StorageDescriptor != nil { + if err := s.StorageDescriptor.Validate(); err != nil { + invalidParams.AddNested("StorageDescriptor", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -22299,561 +29288,652 @@ func (s *StorageDescriptor) Validate() error { return nil } -// SetBucketColumns sets the BucketColumns field's value. -func (s *StorageDescriptor) SetBucketColumns(v []*string) *StorageDescriptor { - s.BucketColumns = v +// SetDescription sets the Description field's value. +func (s *TableInput) SetDescription(v string) *TableInput { + s.Description = &v + return s +} + +// SetLastAccessTime sets the LastAccessTime field's value. +func (s *TableInput) SetLastAccessTime(v time.Time) *TableInput { + s.LastAccessTime = &v + return s +} + +// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. +func (s *TableInput) SetLastAnalyzedTime(v time.Time) *TableInput { + s.LastAnalyzedTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *TableInput) SetName(v string) *TableInput { + s.Name = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *TableInput) SetOwner(v string) *TableInput { + s.Owner = &v return s } -// SetColumns sets the Columns field's value. -func (s *StorageDescriptor) SetColumns(v []*Column) *StorageDescriptor { - s.Columns = v +// SetParameters sets the Parameters field's value. +func (s *TableInput) SetParameters(v map[string]*string) *TableInput { + s.Parameters = v return s } -// SetCompressed sets the Compressed field's value. -func (s *StorageDescriptor) SetCompressed(v bool) *StorageDescriptor { - s.Compressed = &v +// SetPartitionKeys sets the PartitionKeys field's value. +func (s *TableInput) SetPartitionKeys(v []*Column) *TableInput { + s.PartitionKeys = v return s } -// SetInputFormat sets the InputFormat field's value. -func (s *StorageDescriptor) SetInputFormat(v string) *StorageDescriptor { - s.InputFormat = &v +// SetRetention sets the Retention field's value. +func (s *TableInput) SetRetention(v int64) *TableInput { + s.Retention = &v return s } -// SetLocation sets the Location field's value. -func (s *StorageDescriptor) SetLocation(v string) *StorageDescriptor { - s.Location = &v +// SetStorageDescriptor sets the StorageDescriptor field's value. +func (s *TableInput) SetStorageDescriptor(v *StorageDescriptor) *TableInput { + s.StorageDescriptor = v return s } -// SetNumberOfBuckets sets the NumberOfBuckets field's value. -func (s *StorageDescriptor) SetNumberOfBuckets(v int64) *StorageDescriptor { - s.NumberOfBuckets = &v +// SetTableType sets the TableType field's value. +func (s *TableInput) SetTableType(v string) *TableInput { + s.TableType = &v return s } -// SetOutputFormat sets the OutputFormat field's value. -func (s *StorageDescriptor) SetOutputFormat(v string) *StorageDescriptor { - s.OutputFormat = &v +// SetViewExpandedText sets the ViewExpandedText field's value. +func (s *TableInput) SetViewExpandedText(v string) *TableInput { + s.ViewExpandedText = &v return s } -// SetParameters sets the Parameters field's value. -func (s *StorageDescriptor) SetParameters(v map[string]*string) *StorageDescriptor { - s.Parameters = v +// SetViewOriginalText sets the ViewOriginalText field's value. +func (s *TableInput) SetViewOriginalText(v string) *TableInput { + s.ViewOriginalText = &v return s } -// SetSerdeInfo sets the SerdeInfo field's value. -func (s *StorageDescriptor) SetSerdeInfo(v *SerDeInfo) *StorageDescriptor { - s.SerdeInfo = v - return s +// Specifies a version of a table. +type TableVersion struct { + _ struct{} `type:"structure"` + + // The table in question. + Table *TableData `type:"structure"` + + // The ID value that identifies this table version. A VersionId is a string + // representation of an integer. Each version is incremented by 1. + VersionId *string `min:"1" type:"string"` } -// SetSkewedInfo sets the SkewedInfo field's value. -func (s *StorageDescriptor) SetSkewedInfo(v *SkewedInfo) *StorageDescriptor { - s.SkewedInfo = v - return s +// String returns the string representation +func (s TableVersion) String() string { + return awsutil.Prettify(s) } -// SetSortColumns sets the SortColumns field's value. -func (s *StorageDescriptor) SetSortColumns(v []*Order) *StorageDescriptor { - s.SortColumns = v +// GoString returns the string representation +func (s TableVersion) GoString() string { + return s.String() +} + +// SetTable sets the Table field's value. +func (s *TableVersion) SetTable(v *TableData) *TableVersion { + s.Table = v return s } -// SetStoredAsSubDirectories sets the StoredAsSubDirectories field's value. -func (s *StorageDescriptor) SetStoredAsSubDirectories(v bool) *StorageDescriptor { - s.StoredAsSubDirectories = &v +// SetVersionId sets the VersionId field's value. +func (s *TableVersion) SetVersionId(v string) *TableVersion { + s.VersionId = &v return s } -// Represents a collection of related data organized in columns and rows. -type Table struct { +// An error record for table-version operations. +type TableVersionError struct { _ struct{} `type:"structure"` - // Time when the table definition was created in the Data Catalog. - CreateTime *time.Time `type:"timestamp"` + // The details about the error. + ErrorDetail *ErrorDetail `type:"structure"` - // Person or entity who created the table. - CreatedBy *string `min:"1" type:"string"` + // The name of the table in question. + TableName *string `min:"1" type:"string"` - // Name of the metadata database where the table metadata resides. For Hive - // compatibility, this must be all lowercase. - DatabaseName *string `min:"1" type:"string"` + // The ID value of the version in question. A VersionID is a string representation + // of an integer. Each version is incremented by 1. + VersionId *string `min:"1" type:"string"` +} - // Description of the table. - Description *string `type:"string"` +// String returns the string representation +func (s TableVersionError) String() string { + return awsutil.Prettify(s) +} - // Last time the table was accessed. This is usually taken from HDFS, and may - // not be reliable. - LastAccessTime *time.Time `type:"timestamp"` +// GoString returns the string representation +func (s TableVersionError) GoString() string { + return s.String() +} - // Last time column statistics were computed for this table. - LastAnalyzedTime *time.Time `type:"timestamp"` +// SetErrorDetail sets the ErrorDetail field's value. +func (s *TableVersionError) SetErrorDetail(v *ErrorDetail) *TableVersionError { + s.ErrorDetail = v + return s +} - // Name of the table. For Hive compatibility, this must be entirely lowercase. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` +// SetTableName sets the TableName field's value. +func (s *TableVersionError) SetTableName(v string) *TableVersionError { + s.TableName = &v + return s +} - // Owner of the table. - Owner *string `min:"1" type:"string"` +// SetVersionId sets the VersionId field's value. +func (s *TableVersionError) SetVersionId(v string) *TableVersionError { + s.VersionId = &v + return s +} - // These key-value pairs define properties associated with the table. - Parameters map[string]*string `type:"map"` +type TagResourceInput struct { + _ struct{} `type:"structure"` - // A list of columns by which the table is partitioned. Only primitive types - // are supported as partition keys. + // The ARN of the AWS Glue resource to which to add the tags. For more information + // about AWS Glue resource ARNs, see the AWS Glue ARN string pattern (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-common.html#aws-glue-api-regex-aws-glue-arn-id). // - // When creating a table used by Athena, and you do not specify any partitionKeys, - // you must at least set the value of partitionKeys to an empty list. For example: + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // Tags to add to this resource. // - // "PartitionKeys": [] - PartitionKeys []*Column `type:"list"` + // TagsToAdd is a required field + TagsToAdd map[string]*string `type:"map" required:"true"` +} - // Retention time for this table. - Retention *int64 `type:"integer"` +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} - // A storage descriptor containing information about the physical storage of - // this table. - StorageDescriptor *StorageDescriptor `type:"structure"` +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} - // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). - TableType *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagsToAdd == nil { + invalidParams.Add(request.NewErrParamRequired("TagsToAdd")) + } - // Last time the table was updated. - UpdateTime *time.Time `type:"timestamp"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // If the table is a view, the expanded text of the view; otherwise null. - ViewExpandedText *string `type:"string"` +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} - // If the table is a view, the original text of the view; otherwise null. - ViewOriginalText *string `type:"string"` +// SetTagsToAdd sets the TagsToAdd field's value. +func (s *TagResourceInput) SetTagsToAdd(v map[string]*string) *TagResourceInput { + s.TagsToAdd = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s Table) String() string { +func (s TagResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Table) GoString() string { +func (s TagResourceOutput) GoString() string { return s.String() } -// SetCreateTime sets the CreateTime field's value. -func (s *Table) SetCreateTime(v time.Time) *Table { - s.CreateTime = &v - return s -} +// The sampling parameters that are associated with the machine learning transform. +type TaskRun struct { + _ struct{} `type:"structure"` -// SetCreatedBy sets the CreatedBy field's value. -func (s *Table) SetCreatedBy(v string) *Table { - s.CreatedBy = &v - return s -} + // The last point in time that the requested task run was completed. + CompletedOn *time.Time `type:"timestamp"` -// SetDatabaseName sets the DatabaseName field's value. -func (s *Table) SetDatabaseName(v string) *Table { - s.DatabaseName = &v - return s -} + // The list of error strings associated with this task run. + ErrorString *string `type:"string"` -// SetDescription sets the Description field's value. -func (s *Table) SetDescription(v string) *Table { - s.Description = &v - return s + // The amount of time (in seconds) that the task run consumed resources. + ExecutionTime *int64 `type:"integer"` + + // The last point in time that the requested task run was updated. + LastModifiedOn *time.Time `type:"timestamp"` + + // The names of the log group for secure logging, associated with this task + // run. + LogGroupName *string `type:"string"` + + // Specifies configuration properties associated with this task run. + Properties *TaskRunProperties `type:"structure"` + + // The date and time that this task run started. + StartedOn *time.Time `type:"timestamp"` + + // The current status of the requested task run. + Status *string `type:"string" enum:"TaskStatusType"` + + // The unique identifier for this task run. + TaskRunId *string `min:"1" type:"string"` + + // The unique identifier for the transform. + TransformId *string `min:"1" type:"string"` } -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *Table) SetLastAccessTime(v time.Time) *Table { - s.LastAccessTime = &v - return s +// String returns the string representation +func (s TaskRun) String() string { + return awsutil.Prettify(s) } -// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. -func (s *Table) SetLastAnalyzedTime(v time.Time) *Table { - s.LastAnalyzedTime = &v - return s +// GoString returns the string representation +func (s TaskRun) GoString() string { + return s.String() } -// SetName sets the Name field's value. -func (s *Table) SetName(v string) *Table { - s.Name = &v +// SetCompletedOn sets the CompletedOn field's value. +func (s *TaskRun) SetCompletedOn(v time.Time) *TaskRun { + s.CompletedOn = &v return s } -// SetOwner sets the Owner field's value. -func (s *Table) SetOwner(v string) *Table { - s.Owner = &v +// SetErrorString sets the ErrorString field's value. +func (s *TaskRun) SetErrorString(v string) *TaskRun { + s.ErrorString = &v return s } -// SetParameters sets the Parameters field's value. -func (s *Table) SetParameters(v map[string]*string) *Table { - s.Parameters = v +// SetExecutionTime sets the ExecutionTime field's value. +func (s *TaskRun) SetExecutionTime(v int64) *TaskRun { + s.ExecutionTime = &v return s } -// SetPartitionKeys sets the PartitionKeys field's value. -func (s *Table) SetPartitionKeys(v []*Column) *Table { - s.PartitionKeys = v +// SetLastModifiedOn sets the LastModifiedOn field's value. +func (s *TaskRun) SetLastModifiedOn(v time.Time) *TaskRun { + s.LastModifiedOn = &v return s } -// SetRetention sets the Retention field's value. -func (s *Table) SetRetention(v int64) *Table { - s.Retention = &v +// SetLogGroupName sets the LogGroupName field's value. +func (s *TaskRun) SetLogGroupName(v string) *TaskRun { + s.LogGroupName = &v return s } -// SetStorageDescriptor sets the StorageDescriptor field's value. -func (s *Table) SetStorageDescriptor(v *StorageDescriptor) *Table { - s.StorageDescriptor = v +// SetProperties sets the Properties field's value. +func (s *TaskRun) SetProperties(v *TaskRunProperties) *TaskRun { + s.Properties = v return s } -// SetTableType sets the TableType field's value. -func (s *Table) SetTableType(v string) *Table { - s.TableType = &v +// SetStartedOn sets the StartedOn field's value. +func (s *TaskRun) SetStartedOn(v time.Time) *TaskRun { + s.StartedOn = &v return s } -// SetUpdateTime sets the UpdateTime field's value. -func (s *Table) SetUpdateTime(v time.Time) *Table { - s.UpdateTime = &v +// SetStatus sets the Status field's value. +func (s *TaskRun) SetStatus(v string) *TaskRun { + s.Status = &v return s } -// SetViewExpandedText sets the ViewExpandedText field's value. -func (s *Table) SetViewExpandedText(v string) *Table { - s.ViewExpandedText = &v +// SetTaskRunId sets the TaskRunId field's value. +func (s *TaskRun) SetTaskRunId(v string) *TaskRun { + s.TaskRunId = &v return s } -// SetViewOriginalText sets the ViewOriginalText field's value. -func (s *Table) SetViewOriginalText(v string) *Table { - s.ViewOriginalText = &v +// SetTransformId sets the TransformId field's value. +func (s *TaskRun) SetTransformId(v string) *TaskRun { + s.TransformId = &v return s } -// An error record for table operations. -type TableError struct { +// The criteria that are used to filter the task runs for the machine learning +// transform. +type TaskRunFilterCriteria struct { _ struct{} `type:"structure"` - // Detail about the error. - ErrorDetail *ErrorDetail `type:"structure"` + // Filter on task runs started after this date. + StartedAfter *time.Time `type:"timestamp"` - // Name of the table. For Hive compatibility, this must be entirely lowercase. - TableName *string `min:"1" type:"string"` + // Filter on task runs started before this date. + StartedBefore *time.Time `type:"timestamp"` + + // The current status of the task run. + Status *string `type:"string" enum:"TaskStatusType"` + + // The type of task run. + TaskRunType *string `type:"string" enum:"TaskType"` } // String returns the string representation -func (s TableError) String() string { +func (s TaskRunFilterCriteria) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TableError) GoString() string { +func (s TaskRunFilterCriteria) GoString() string { return s.String() } -// SetErrorDetail sets the ErrorDetail field's value. -func (s *TableError) SetErrorDetail(v *ErrorDetail) *TableError { - s.ErrorDetail = v +// SetStartedAfter sets the StartedAfter field's value. +func (s *TaskRunFilterCriteria) SetStartedAfter(v time.Time) *TaskRunFilterCriteria { + s.StartedAfter = &v return s } -// SetTableName sets the TableName field's value. -func (s *TableError) SetTableName(v string) *TableError { - s.TableName = &v +// SetStartedBefore sets the StartedBefore field's value. +func (s *TaskRunFilterCriteria) SetStartedBefore(v time.Time) *TaskRunFilterCriteria { + s.StartedBefore = &v return s } -// Structure used to create or update the table. -type TableInput struct { - _ struct{} `type:"structure"` - - // Description of the table. - Description *string `type:"string"` - - // Last time the table was accessed. - LastAccessTime *time.Time `type:"timestamp"` - - // Last time column statistics were computed for this table. - LastAnalyzedTime *time.Time `type:"timestamp"` - - // Name of the table. For Hive compatibility, this is folded to lowercase when - // it is stored. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Owner of the table. - Owner *string `min:"1" type:"string"` +// SetStatus sets the Status field's value. +func (s *TaskRunFilterCriteria) SetStatus(v string) *TaskRunFilterCriteria { + s.Status = &v + return s +} - // These key-value pairs define properties associated with the table. - Parameters map[string]*string `type:"map"` +// SetTaskRunType sets the TaskRunType field's value. +func (s *TaskRunFilterCriteria) SetTaskRunType(v string) *TaskRunFilterCriteria { + s.TaskRunType = &v + return s +} - // A list of columns by which the table is partitioned. Only primitive types - // are supported as partition keys. - // - // When creating a table used by Athena, and you do not specify any partitionKeys, - // you must at least set the value of partitionKeys to an empty list. For example: - // - // "PartitionKeys": [] - PartitionKeys []*Column `type:"list"` +// The configuration properties for the task run. +type TaskRunProperties struct { + _ struct{} `type:"structure"` - // Retention time for this table. - Retention *int64 `type:"integer"` + // The configuration properties for an exporting labels task run. + ExportLabelsTaskRunProperties *ExportLabelsTaskRunProperties `type:"structure"` - // A storage descriptor containing information about the physical storage of - // this table. - StorageDescriptor *StorageDescriptor `type:"structure"` + // The configuration properties for a find matches task run. + FindMatchesTaskRunProperties *FindMatchesTaskRunProperties `type:"structure"` - // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). - TableType *string `type:"string"` + // The configuration properties for an importing labels task run. + ImportLabelsTaskRunProperties *ImportLabelsTaskRunProperties `type:"structure"` - // If the table is a view, the expanded text of the view; otherwise null. - ViewExpandedText *string `type:"string"` + // The configuration properties for a labeling set generation task run. + LabelingSetGenerationTaskRunProperties *LabelingSetGenerationTaskRunProperties `type:"structure"` - // If the table is a view, the original text of the view; otherwise null. - ViewOriginalText *string `type:"string"` + // The type of task run. + TaskType *string `type:"string" enum:"TaskType"` } // String returns the string representation -func (s TableInput) String() string { +func (s TaskRunProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TableInput) GoString() string { +func (s TaskRunProperties) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *TableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TableInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Owner != nil && len(*s.Owner) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Owner", 1)) - } - if s.PartitionKeys != nil { - for i, v := range s.PartitionKeys { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionKeys", i), err.(request.ErrInvalidParams)) - } - } - } - if s.StorageDescriptor != nil { - if err := s.StorageDescriptor.Validate(); err != nil { - invalidParams.AddNested("StorageDescriptor", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *TableInput) SetDescription(v string) *TableInput { - s.Description = &v +// SetExportLabelsTaskRunProperties sets the ExportLabelsTaskRunProperties field's value. +func (s *TaskRunProperties) SetExportLabelsTaskRunProperties(v *ExportLabelsTaskRunProperties) *TaskRunProperties { + s.ExportLabelsTaskRunProperties = v return s } -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *TableInput) SetLastAccessTime(v time.Time) *TableInput { - s.LastAccessTime = &v +// SetFindMatchesTaskRunProperties sets the FindMatchesTaskRunProperties field's value. +func (s *TaskRunProperties) SetFindMatchesTaskRunProperties(v *FindMatchesTaskRunProperties) *TaskRunProperties { + s.FindMatchesTaskRunProperties = v return s } -// SetLastAnalyzedTime sets the LastAnalyzedTime field's value. -func (s *TableInput) SetLastAnalyzedTime(v time.Time) *TableInput { - s.LastAnalyzedTime = &v +// SetImportLabelsTaskRunProperties sets the ImportLabelsTaskRunProperties field's value. +func (s *TaskRunProperties) SetImportLabelsTaskRunProperties(v *ImportLabelsTaskRunProperties) *TaskRunProperties { + s.ImportLabelsTaskRunProperties = v return s } -// SetName sets the Name field's value. -func (s *TableInput) SetName(v string) *TableInput { - s.Name = &v +// SetLabelingSetGenerationTaskRunProperties sets the LabelingSetGenerationTaskRunProperties field's value. +func (s *TaskRunProperties) SetLabelingSetGenerationTaskRunProperties(v *LabelingSetGenerationTaskRunProperties) *TaskRunProperties { + s.LabelingSetGenerationTaskRunProperties = v return s } -// SetOwner sets the Owner field's value. -func (s *TableInput) SetOwner(v string) *TableInput { - s.Owner = &v +// SetTaskType sets the TaskType field's value. +func (s *TaskRunProperties) SetTaskType(v string) *TaskRunProperties { + s.TaskType = &v return s } -// SetParameters sets the Parameters field's value. -func (s *TableInput) SetParameters(v map[string]*string) *TableInput { - s.Parameters = v - return s -} +// The sorting criteria that are used to sort the list of task runs for the +// machine learning transform. +type TaskRunSortCriteria struct { + _ struct{} `type:"structure"` -// SetPartitionKeys sets the PartitionKeys field's value. -func (s *TableInput) SetPartitionKeys(v []*Column) *TableInput { - s.PartitionKeys = v - return s + // The column to be used to sort the list of task runs for the machine learning + // transform. + // + // Column is a required field + Column *string `type:"string" required:"true" enum:"TaskRunSortColumnType"` + + // The sort direction to be used to sort the list of task runs for the machine + // learning transform. + // + // SortDirection is a required field + SortDirection *string `type:"string" required:"true" enum:"SortDirectionType"` } -// SetRetention sets the Retention field's value. -func (s *TableInput) SetRetention(v int64) *TableInput { - s.Retention = &v - return s +// String returns the string representation +func (s TaskRunSortCriteria) String() string { + return awsutil.Prettify(s) } -// SetStorageDescriptor sets the StorageDescriptor field's value. -func (s *TableInput) SetStorageDescriptor(v *StorageDescriptor) *TableInput { - s.StorageDescriptor = v - return s +// GoString returns the string representation +func (s TaskRunSortCriteria) GoString() string { + return s.String() } -// SetTableType sets the TableType field's value. -func (s *TableInput) SetTableType(v string) *TableInput { - s.TableType = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *TaskRunSortCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TaskRunSortCriteria"} + if s.Column == nil { + invalidParams.Add(request.NewErrParamRequired("Column")) + } + if s.SortDirection == nil { + invalidParams.Add(request.NewErrParamRequired("SortDirection")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetViewExpandedText sets the ViewExpandedText field's value. -func (s *TableInput) SetViewExpandedText(v string) *TableInput { - s.ViewExpandedText = &v +// SetColumn sets the Column field's value. +func (s *TaskRunSortCriteria) SetColumn(v string) *TaskRunSortCriteria { + s.Column = &v return s } -// SetViewOriginalText sets the ViewOriginalText field's value. -func (s *TableInput) SetViewOriginalText(v string) *TableInput { - s.ViewOriginalText = &v +// SetSortDirection sets the SortDirection field's value. +func (s *TaskRunSortCriteria) SetSortDirection(v string) *TaskRunSortCriteria { + s.SortDirection = &v return s } -// Specifies a version of a table. -type TableVersion struct { +// The criteria used to filter the machine learning transforms. +type TransformFilterCriteria struct { _ struct{} `type:"structure"` - // The table in question - Table *Table `type:"structure"` + // The time and date after which the transforms were created. + CreatedAfter *time.Time `type:"timestamp"` - // The ID value that identifies this table version. A VersionId is a string - // representation of an integer. Each version is incremented by 1. - VersionId *string `min:"1" type:"string"` + // The time and date before which the transforms were created. + CreatedBefore *time.Time `type:"timestamp"` + + // Filter on transforms last modified after this date. + LastModifiedAfter *time.Time `type:"timestamp"` + + // Filter on transforms last modified before this date. + LastModifiedBefore *time.Time `type:"timestamp"` + + // A unique transform name that is used to filter the machine learning transforms. + Name *string `min:"1" type:"string"` + + // Filters on datasets with a specific schema. The Map object + // is an array of key-value pairs representing the schema this transform accepts, + // where Column is the name of a column, and Type is the type of the data such + // as an integer or string. Has an upper bound of 100 columns. + Schema []*SchemaColumn `type:"list"` + + // Filters the list of machine learning transforms by the last known status + // of the transforms (to indicate whether a transform can be used or not). One + // of "NOT_READY", "READY", or "DELETING". + Status *string `type:"string" enum:"TransformStatusType"` + + // The type of machine learning transform that is used to filter the machine + // learning transforms. + TransformType *string `type:"string" enum:"TransformType"` } // String returns the string representation -func (s TableVersion) String() string { +func (s TransformFilterCriteria) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TableVersion) GoString() string { +func (s TransformFilterCriteria) GoString() string { return s.String() } -// SetTable sets the Table field's value. -func (s *TableVersion) SetTable(v *Table) *TableVersion { - s.Table = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransformFilterCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransformFilterCriteria"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Schema != nil { + for i, v := range s.Schema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Schema", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetVersionId sets the VersionId field's value. -func (s *TableVersion) SetVersionId(v string) *TableVersion { - s.VersionId = &v +// SetCreatedAfter sets the CreatedAfter field's value. +func (s *TransformFilterCriteria) SetCreatedAfter(v time.Time) *TransformFilterCriteria { + s.CreatedAfter = &v return s } -// An error record for table-version operations. -type TableVersionError struct { - _ struct{} `type:"structure"` - - // Detail about the error. - ErrorDetail *ErrorDetail `type:"structure"` - - // The name of the table in question. - TableName *string `min:"1" type:"string"` +// SetCreatedBefore sets the CreatedBefore field's value. +func (s *TransformFilterCriteria) SetCreatedBefore(v time.Time) *TransformFilterCriteria { + s.CreatedBefore = &v + return s +} - // The ID value of the version in question. A VersionID is a string representation - // of an integer. Each version is incremented by 1. - VersionId *string `min:"1" type:"string"` +// SetLastModifiedAfter sets the LastModifiedAfter field's value. +func (s *TransformFilterCriteria) SetLastModifiedAfter(v time.Time) *TransformFilterCriteria { + s.LastModifiedAfter = &v + return s } -// String returns the string representation -func (s TableVersionError) String() string { - return awsutil.Prettify(s) +// SetLastModifiedBefore sets the LastModifiedBefore field's value. +func (s *TransformFilterCriteria) SetLastModifiedBefore(v time.Time) *TransformFilterCriteria { + s.LastModifiedBefore = &v + return s } -// GoString returns the string representation -func (s TableVersionError) GoString() string { - return s.String() +// SetName sets the Name field's value. +func (s *TransformFilterCriteria) SetName(v string) *TransformFilterCriteria { + s.Name = &v + return s } -// SetErrorDetail sets the ErrorDetail field's value. -func (s *TableVersionError) SetErrorDetail(v *ErrorDetail) *TableVersionError { - s.ErrorDetail = v +// SetSchema sets the Schema field's value. +func (s *TransformFilterCriteria) SetSchema(v []*SchemaColumn) *TransformFilterCriteria { + s.Schema = v return s } -// SetTableName sets the TableName field's value. -func (s *TableVersionError) SetTableName(v string) *TableVersionError { - s.TableName = &v +// SetStatus sets the Status field's value. +func (s *TransformFilterCriteria) SetStatus(v string) *TransformFilterCriteria { + s.Status = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *TableVersionError) SetVersionId(v string) *TableVersionError { - s.VersionId = &v +// SetTransformType sets the TransformType field's value. +func (s *TransformFilterCriteria) SetTransformType(v string) *TransformFilterCriteria { + s.TransformType = &v return s } -type TagResourceInput struct { +// The algorithm-specific parameters that are associated with the machine learning +// transform. +type TransformParameters struct { _ struct{} `type:"structure"` - // The ARN of the AWS Glue resource to which to add the tags. For more information - // about AWS Glue resource ARNs, see the AWS Glue ARN string pattern (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-common.html#aws-glue-api-regex-aws-glue-arn-id). - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` + // The parameters for the find matches algorithm. + FindMatchesParameters *FindMatchesParameters `type:"structure"` - // Tags to add to this resource. + // The type of machine learning transform. // - // TagsToAdd is a required field - TagsToAdd map[string]*string `type:"map" required:"true"` + // For information about the types of machine learning transforms, see Creating + // Machine Learning Transforms (http://docs.aws.amazon.com/glue/latest/dg/add-job-machine-learning-transform.html). + // + // TransformType is a required field + TransformType *string `type:"string" required:"true" enum:"TransformType"` } // String returns the string representation -func (s TagResourceInput) String() string { +func (s TransformParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TagResourceInput) GoString() string { +func (s TransformParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - if s.TagsToAdd == nil { - invalidParams.Add(request.NewErrParamRequired("TagsToAdd")) +func (s *TransformParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransformParameters"} + if s.TransformType == nil { + invalidParams.Add(request.NewErrParamRequired("TransformType")) + } + if s.FindMatchesParameters != nil { + if err := s.FindMatchesParameters.Validate(); err != nil { + invalidParams.AddNested("FindMatchesParameters", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -22861,33 +29941,74 @@ func (s *TagResourceInput) Validate() error { } return nil } - -// SetResourceArn sets the ResourceArn field's value. -func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { - s.ResourceArn = &v + +// SetFindMatchesParameters sets the FindMatchesParameters field's value. +func (s *TransformParameters) SetFindMatchesParameters(v *FindMatchesParameters) *TransformParameters { + s.FindMatchesParameters = v return s } -// SetTagsToAdd sets the TagsToAdd field's value. -func (s *TagResourceInput) SetTagsToAdd(v map[string]*string) *TagResourceInput { - s.TagsToAdd = v +// SetTransformType sets the TransformType field's value. +func (s *TransformParameters) SetTransformType(v string) *TransformParameters { + s.TransformType = &v return s } -type TagResourceOutput struct { +// The sorting criteria that are associated with the machine learning transform. +type TransformSortCriteria struct { _ struct{} `type:"structure"` + + // The column to be used in the sorting criteria that are associated with the + // machine learning transform. + // + // Column is a required field + Column *string `type:"string" required:"true" enum:"TransformSortColumnType"` + + // The sort direction to be used in the sorting criteria that are associated + // with the machine learning transform. + // + // SortDirection is a required field + SortDirection *string `type:"string" required:"true" enum:"SortDirectionType"` } // String returns the string representation -func (s TagResourceOutput) String() string { +func (s TransformSortCriteria) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TagResourceOutput) GoString() string { +func (s TransformSortCriteria) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransformSortCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransformSortCriteria"} + if s.Column == nil { + invalidParams.Add(request.NewErrParamRequired("Column")) + } + if s.SortDirection == nil { + invalidParams.Add(request.NewErrParamRequired("SortDirection")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumn sets the Column field's value. +func (s *TransformSortCriteria) SetColumn(v string) *TransformSortCriteria { + s.Column = &v + return s +} + +// SetSortDirection sets the SortDirection field's value. +func (s *TransformSortCriteria) SetSortDirection(v string) *TransformSortCriteria { + s.SortDirection = &v + return s +} + // Information about a specific trigger. type Trigger struct { _ struct{} `type:"structure"` @@ -22901,14 +30022,14 @@ type Trigger struct { // Reserved for future use. Id *string `min:"1" type:"string"` - // Name of the trigger. + // The name of the trigger. Name *string `min:"1" type:"string"` // The predicate of this trigger, which defines when it will fire. Predicate *Predicate `type:"structure"` // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // for Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). // For example, to run something every day at 12:15 UTC, you would specify: // cron(15 12 * * ? *). Schedule *string `type:"string"` @@ -22918,6 +30039,9 @@ type Trigger struct { // The type of trigger that this is. Type *string `type:"string" enum:"TriggerType"` + + // The name of the workflow associated with the trigger. + WorkflowName *string `min:"1" type:"string"` } // String returns the string representation @@ -22978,8 +30102,38 @@ func (s *Trigger) SetType(v string) *Trigger { return s } +// SetWorkflowName sets the WorkflowName field's value. +func (s *Trigger) SetWorkflowName(v string) *Trigger { + s.WorkflowName = &v + return s +} + +// The details of a Trigger node present in the workflow. +type TriggerNodeDetails struct { + _ struct{} `type:"structure"` + + // The information of the trigger represented by the trigger node. + Trigger *Trigger `type:"structure"` +} + +// String returns the string representation +func (s TriggerNodeDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TriggerNodeDetails) GoString() string { + return s.String() +} + +// SetTrigger sets the Trigger field's value. +func (s *TriggerNodeDetails) SetTrigger(v *Trigger) *TriggerNodeDetails { + s.Trigger = v + return s +} + // A structure used to provide information used to update a trigger. This object -// will update the the previous trigger definition by overwriting it completely. +// updates the previous trigger definition by overwriting it completely. type TriggerUpdate struct { _ struct{} `type:"structure"` @@ -22996,7 +30150,7 @@ type TriggerUpdate struct { Predicate *Predicate `type:"structure"` // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // for Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). // For example, to run something every day at 12:15 UTC, you would specify: // cron(15 12 * * ? *). Schedule *string `type:"string"` @@ -23073,7 +30227,7 @@ func (s *TriggerUpdate) SetSchedule(v string) *TriggerUpdate { type UntagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the resource from which to remove the tags. + // The Amazon Resource Name (ARN) of the resource from which to remove the tags. // // ResourceArn is a required field ResourceArn *string `min:"1" type:"string" required:"true"` @@ -23328,12 +30482,12 @@ type UpdateCrawlerInput struct { // always override the default classifiers for a given classification. Classifiers []*string `type:"list"` - // Crawler configuration information. This versioned JSON string allows users - // to specify aspects of a crawler's behavior. For more information, see Configuring - // a Crawler (http://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). + // The crawler configuration information. This versioned JSON string allows + // users to specify aspects of a crawler's behavior. For more information, see + // Configuring a Crawler (http://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). Configuration *string `type:"string"` - // The name of the SecurityConfiguration structure to be used by this Crawler. + // The name of the SecurityConfiguration structure to be used by this crawler. CrawlerSecurityConfiguration *string `type:"string"` // The AWS Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database/sometable/*. @@ -23347,17 +30501,17 @@ type UpdateCrawlerInput struct { // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // The IAM role (or ARN of an IAM role) used by the new crawler to access customer - // resources. + // The IAM role or Amazon Resource Name (ARN) of an IAM role that is used by + // the new crawler to access customer resources. Role *string `type:"string"` - // A cron expression used to specify the schedule (see Time-Based Schedules - // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). + // A cron expression used to specify the schedule. For more information, see + // Time-Based Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // For example, to run something every day at 12:15 UTC, specify cron(15 12 + // * * ? *). Schedule *string `type:"string"` - // Policy for the crawler's update and deletion behavior. + // The policy for the crawler's update and deletion behavior. SchemaChangePolicy *SchemaChangePolicy `type:"structure"` // The table prefix used for catalog tables that are created. @@ -23386,6 +30540,11 @@ func (s *UpdateCrawlerInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } + if s.Targets != nil { + if err := s.Targets.Validate(); err != nil { + invalidParams.AddNested("Targets", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -23476,15 +30635,15 @@ func (s UpdateCrawlerOutput) GoString() string { type UpdateCrawlerScheduleInput struct { _ struct{} `type:"structure"` - // Name of the crawler whose schedule to update. + // The name of the crawler whose schedule to update. // // CrawlerName is a required field CrawlerName *string `min:"1" type:"string" required:"true"` - // The updated cron expression used to specify the schedule (see Time-Based - // Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, you would specify: - // cron(15 12 * * ? *). + // The updated cron expression used to specify the schedule. For more information, + // see Time-Based Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // For example, to run something every day at 12:15 UTC, specify cron(15 12 + // * * ? *). Schedule *string `type:"string"` } @@ -23566,7 +30725,7 @@ type UpdateCsvClassifierRequest struct { Name *string `min:"1" type:"string" required:"true"` // A custom symbol to denote what combines content into a single column value. - // Must be different from the column delimiter. + // It must be different from the column delimiter. QuoteSymbol *string `min:"1" type:"string"` } @@ -23648,7 +30807,7 @@ type UpdateDatabaseInput struct { _ struct{} `type:"structure"` // The ID of the Data Catalog in which the metadata database resides. If none - // is supplied, the AWS account ID is used by default. + // is provided, the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` // A DatabaseInput object specifying the new definition of the metadata database @@ -23737,6 +30896,18 @@ type UpdateDevEndpointInput struct { _ struct{} `type:"structure"` // The map of arguments to add the map of arguments used to configure the DevEndpoint. + // + // Valid arguments are: + // + // * "--enable-glue-datacatalog": "" + // + // * "GLUE_PYTHON_VERSION": "3" + // + // * "GLUE_PYTHON_VERSION": "2" + // + // You can specify a version of Python support for development endpoints by + // using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint + // APIs. If no arguments are provided, the version defaults to Python 2. AddArguments map[string]*string `type:"map"` // The list of public keys for the DevEndpoint to use. @@ -23761,7 +30932,7 @@ type UpdateDevEndpointInput struct { PublicKey *string `type:"string"` // True if the list of custom libraries to be loaded in the development endpoint - // needs to be updated, or False otherwise. + // needs to be updated, or False if otherwise. UpdateEtlLibraries *bool `type:"boolean"` } @@ -23926,7 +31097,7 @@ func (s *UpdateGrokClassifierRequest) SetName(v string) *UpdateGrokClassifierReq type UpdateJobInput struct { _ struct{} `type:"structure"` - // Name of the job definition to update. + // The name of the job definition to update. // // JobName is a required field JobName *string `min:"1" type:"string" required:"true"` @@ -24059,11 +31230,190 @@ func (s *UpdateJsonClassifierRequest) SetName(v string) *UpdateJsonClassifierReq return s } +type UpdateMLTransformInput struct { + _ struct{} `type:"structure"` + + // A description of the transform. The default is an empty string. + Description *string `type:"string"` + + // The number of AWS Glue data processing units (DPUs) that are allocated to + // task runs for this transform. You can allocate from 2 to 100 DPUs; the default + // is 10. A DPU is a relative measure of processing power that consists of 4 + // vCPUs of compute capacity and 16 GB of memory. For more information, see + // the AWS Glue pricing page (https://aws.amazon.com/glue/pricing/). + // + // When the WorkerType field is set to a value other than Standard, the MaxCapacity + // field is set automatically and becomes read-only. + MaxCapacity *float64 `type:"double"` + + // The maximum number of times to retry a task for this transform after a task + // run fails. + MaxRetries *int64 `type:"integer"` + + // The unique name that you gave the transform when you created it. + Name *string `min:"1" type:"string"` + + // The number of workers of a defined workerType that are allocated when this + // task runs. + NumberOfWorkers *int64 `type:"integer"` + + // The configuration parameters that are specific to the transform type (algorithm) + // used. Conditionally dependent on the transform type. + Parameters *TransformParameters `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the IAM role with the required + // permissions. + Role *string `type:"string"` + + // The timeout for a task run for this transform in minutes. This is the maximum + // time that a task run for this transform can consume resources before it is + // terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). + Timeout *int64 `min:"1" type:"integer"` + + // A unique identifier that was generated when the transform was created. + // + // TransformId is a required field + TransformId *string `min:"1" type:"string" required:"true"` + + // The type of predefined worker that is allocated when this task runs. Accepts + // a value of Standard, G.1X, or G.2X. + // + // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of + // memory and a 50GB disk, and 2 executors per worker. + // + // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory + // and a 64GB disk, and 1 executor per worker. + // + // * For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory + // and a 128GB disk, and 1 executor per worker. + WorkerType *string `type:"string" enum:"WorkerType"` +} + +// String returns the string representation +func (s UpdateMLTransformInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMLTransformInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMLTransformInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMLTransformInput"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Timeout != nil && *s.Timeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + } + if s.TransformId == nil { + invalidParams.Add(request.NewErrParamRequired("TransformId")) + } + if s.TransformId != nil && len(*s.TransformId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransformId", 1)) + } + if s.Parameters != nil { + if err := s.Parameters.Validate(); err != nil { + invalidParams.AddNested("Parameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateMLTransformInput) SetDescription(v string) *UpdateMLTransformInput { + s.Description = &v + return s +} + +// SetMaxCapacity sets the MaxCapacity field's value. +func (s *UpdateMLTransformInput) SetMaxCapacity(v float64) *UpdateMLTransformInput { + s.MaxCapacity = &v + return s +} + +// SetMaxRetries sets the MaxRetries field's value. +func (s *UpdateMLTransformInput) SetMaxRetries(v int64) *UpdateMLTransformInput { + s.MaxRetries = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateMLTransformInput) SetName(v string) *UpdateMLTransformInput { + s.Name = &v + return s +} + +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *UpdateMLTransformInput) SetNumberOfWorkers(v int64) *UpdateMLTransformInput { + s.NumberOfWorkers = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *UpdateMLTransformInput) SetParameters(v *TransformParameters) *UpdateMLTransformInput { + s.Parameters = v + return s +} + +// SetRole sets the Role field's value. +func (s *UpdateMLTransformInput) SetRole(v string) *UpdateMLTransformInput { + s.Role = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *UpdateMLTransformInput) SetTimeout(v int64) *UpdateMLTransformInput { + s.Timeout = &v + return s +} + +// SetTransformId sets the TransformId field's value. +func (s *UpdateMLTransformInput) SetTransformId(v string) *UpdateMLTransformInput { + s.TransformId = &v + return s +} + +// SetWorkerType sets the WorkerType field's value. +func (s *UpdateMLTransformInput) SetWorkerType(v string) *UpdateMLTransformInput { + s.WorkerType = &v + return s +} + +type UpdateMLTransformOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the transform that was updated. + TransformId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateMLTransformOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMLTransformOutput) GoString() string { + return s.String() +} + +// SetTransformId sets the TransformId field's value. +func (s *UpdateMLTransformOutput) SetTransformId(v string) *UpdateMLTransformOutput { + s.TransformId = &v + return s +} + type UpdatePartitionInput struct { _ struct{} `type:"structure"` // The ID of the Data Catalog where the partition to be updated resides. If - // none is supplied, the AWS account ID is used by default. + // none is provided, the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` // The name of the catalog database in which the table in question resides. @@ -24071,7 +31421,7 @@ type UpdatePartitionInput struct { // DatabaseName is a required field DatabaseName *string `min:"1" type:"string" required:"true"` - // The new partition object to which to update the partition. + // The new partition object to update the partition to. // // PartitionInput is a required field PartitionInput *PartitionInput `type:"structure" required:"true"` @@ -24081,7 +31431,7 @@ type UpdatePartitionInput struct { // PartitionValueList is a required field PartitionValueList []*string `type:"list" required:"true"` - // The name of the table where the partition to be updated is located. + // The name of the table in which the partition to be updated is located. // // TableName is a required field TableName *string `min:"1" type:"string" required:"true"` @@ -24180,7 +31530,7 @@ func (s UpdatePartitionOutput) GoString() string { type UpdateTableInput struct { _ struct{} `type:"structure"` - // The ID of the Data Catalog where the table resides. If none is supplied, + // The ID of the Data Catalog where the table resides. If none is provided, // the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` @@ -24191,7 +31541,7 @@ type UpdateTableInput struct { DatabaseName *string `min:"1" type:"string" required:"true"` // By default, UpdateTable always creates an archived version of the table before - // updating it. If skipArchive is set to true, however, UpdateTable does not + // updating it. However, if skipArchive is set to true, UpdateTable does not // create the archived version. SkipArchive *bool `type:"boolean"` @@ -24363,7 +31713,7 @@ type UpdateUserDefinedFunctionInput struct { _ struct{} `type:"structure"` // The ID of the Data Catalog where the function to be updated is located. If - // none is supplied, the AWS account ID is used by default. + // none is provided, the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` // The name of the catalog database where the function to be updated is located. @@ -24371,7 +31721,7 @@ type UpdateUserDefinedFunctionInput struct { // DatabaseName is a required field DatabaseName *string `min:"1" type:"string" required:"true"` - // A FunctionInput object that re-defines the function in the Data Catalog. + // A FunctionInput object that redefines the function in the Data Catalog. // // FunctionInput is a required field FunctionInput *UserDefinedFunctionInput `type:"structure" required:"true"` @@ -24443,26 +31793,108 @@ func (s *UpdateUserDefinedFunctionInput) SetFunctionInput(v *UserDefinedFunction return s } -// SetFunctionName sets the FunctionName field's value. -func (s *UpdateUserDefinedFunctionInput) SetFunctionName(v string) *UpdateUserDefinedFunctionInput { - s.FunctionName = &v +// SetFunctionName sets the FunctionName field's value. +func (s *UpdateUserDefinedFunctionInput) SetFunctionName(v string) *UpdateUserDefinedFunctionInput { + s.FunctionName = &v + return s +} + +type UpdateUserDefinedFunctionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserDefinedFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserDefinedFunctionOutput) GoString() string { + return s.String() +} + +type UpdateWorkflowInput struct { + _ struct{} `type:"structure"` + + // A collection of properties to be used as part of each execution of the workflow. + DefaultRunProperties map[string]*string `type:"map"` + + // The description of the workflow. + Description *string `type:"string"` + + // Name of the workflow to be updated. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateWorkflowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateWorkflowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateWorkflowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateWorkflowInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultRunProperties sets the DefaultRunProperties field's value. +func (s *UpdateWorkflowInput) SetDefaultRunProperties(v map[string]*string) *UpdateWorkflowInput { + s.DefaultRunProperties = v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateWorkflowInput) SetDescription(v string) *UpdateWorkflowInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateWorkflowInput) SetName(v string) *UpdateWorkflowInput { + s.Name = &v return s } -type UpdateUserDefinedFunctionOutput struct { +type UpdateWorkflowOutput struct { _ struct{} `type:"structure"` + + // The name of the workflow which was specified in input. + Name *string `min:"1" type:"string"` } // String returns the string representation -func (s UpdateUserDefinedFunctionOutput) String() string { +func (s UpdateWorkflowOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateUserDefinedFunctionOutput) GoString() string { +func (s UpdateWorkflowOutput) GoString() string { return s.String() } +// SetName sets the Name field's value. +func (s *UpdateWorkflowOutput) SetName(v string) *UpdateWorkflowOutput { + s.Name = &v + return s +} + // Specifies an XML classifier to be updated. type UpdateXMLClassifierRequest struct { _ struct{} `type:"structure"` @@ -24476,9 +31908,9 @@ type UpdateXMLClassifierRequest struct { Name *string `min:"1" type:"string" required:"true"` // The XML tag designating the element that contains each record in an XML document - // being parsed. Note that this cannot identify a self-closing element (closed - // by />). An empty row element that contains only attributes can be parsed - // as long as it ends with a closing tag (for example, + // being parsed. This cannot identify a self-closing element (closed by />). + // An empty row element that contains only attributes can be parsed as long + // as it ends with a closing tag (for example, // is okay, but is not). RowTag *string `type:"string"` } @@ -24596,7 +32028,7 @@ func (s *UserDefinedFunction) SetResourceUris(v []*ResourceUri) *UserDefinedFunc return s } -// A structure used to create or updata a user-defined function. +// A structure used to create or update a user-defined function. type UserDefinedFunctionInput struct { _ struct{} `type:"structure"` @@ -24685,6 +32117,280 @@ func (s *UserDefinedFunctionInput) SetResourceUris(v []*ResourceUri) *UserDefine return s } +// A workflow represents a flow in which AWS Glue components should be executed +// to complete a logical task. +type Workflow struct { + _ struct{} `type:"structure"` + + // The date and time when the workflow was created. + CreatedOn *time.Time `type:"timestamp"` + + // A collection of properties to be used as part of each execution of the workflow. + DefaultRunProperties map[string]*string `type:"map"` + + // A description of the workflow. + Description *string `type:"string"` + + // The graph representing all the AWS Glue components that belong to the workflow + // as nodes and directed connections between them as edges. + Graph *WorkflowGraph `type:"structure"` + + // The date and time when the workflow was last modified. + LastModifiedOn *time.Time `type:"timestamp"` + + // The information about the last execution of the workflow. + LastRun *WorkflowRun `type:"structure"` + + // The name of the workflow representing the flow. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Workflow) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Workflow) GoString() string { + return s.String() +} + +// SetCreatedOn sets the CreatedOn field's value. +func (s *Workflow) SetCreatedOn(v time.Time) *Workflow { + s.CreatedOn = &v + return s +} + +// SetDefaultRunProperties sets the DefaultRunProperties field's value. +func (s *Workflow) SetDefaultRunProperties(v map[string]*string) *Workflow { + s.DefaultRunProperties = v + return s +} + +// SetDescription sets the Description field's value. +func (s *Workflow) SetDescription(v string) *Workflow { + s.Description = &v + return s +} + +// SetGraph sets the Graph field's value. +func (s *Workflow) SetGraph(v *WorkflowGraph) *Workflow { + s.Graph = v + return s +} + +// SetLastModifiedOn sets the LastModifiedOn field's value. +func (s *Workflow) SetLastModifiedOn(v time.Time) *Workflow { + s.LastModifiedOn = &v + return s +} + +// SetLastRun sets the LastRun field's value. +func (s *Workflow) SetLastRun(v *WorkflowRun) *Workflow { + s.LastRun = v + return s +} + +// SetName sets the Name field's value. +func (s *Workflow) SetName(v string) *Workflow { + s.Name = &v + return s +} + +// A workflow graph represents the complete workflow containing all the AWS +// Glue components present in the workflow and all the directed connections +// between them. +type WorkflowGraph struct { + _ struct{} `type:"structure"` + + // A list of all the directed connections between the nodes belonging to the + // workflow. + Edges []*Edge `type:"list"` + + // A list of the the AWS Glue components belong to the workflow represented + // as nodes. + Nodes []*Node `type:"list"` +} + +// String returns the string representation +func (s WorkflowGraph) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowGraph) GoString() string { + return s.String() +} + +// SetEdges sets the Edges field's value. +func (s *WorkflowGraph) SetEdges(v []*Edge) *WorkflowGraph { + s.Edges = v + return s +} + +// SetNodes sets the Nodes field's value. +func (s *WorkflowGraph) SetNodes(v []*Node) *WorkflowGraph { + s.Nodes = v + return s +} + +// A workflow run is an execution of a workflow providing all the runtime information. +type WorkflowRun struct { + _ struct{} `type:"structure"` + + // The date and time when the workflow run completed. + CompletedOn *time.Time `type:"timestamp"` + + // The graph representing all the AWS Glue components that belong to the workflow + // as nodes and directed connections between them as edges. + Graph *WorkflowGraph `type:"structure"` + + // Name of the workflow which was executed. + Name *string `min:"1" type:"string"` + + // The date and time when the workflow run was started. + StartedOn *time.Time `type:"timestamp"` + + // The statistics of the run. + Statistics *WorkflowRunStatistics `type:"structure"` + + // The status of the workflow run. + Status *string `type:"string" enum:"WorkflowRunStatus"` + + // The ID of this workflow run. + WorkflowRunId *string `min:"1" type:"string"` + + // The workflow run properties which were set during the run. + WorkflowRunProperties map[string]*string `type:"map"` +} + +// String returns the string representation +func (s WorkflowRun) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowRun) GoString() string { + return s.String() +} + +// SetCompletedOn sets the CompletedOn field's value. +func (s *WorkflowRun) SetCompletedOn(v time.Time) *WorkflowRun { + s.CompletedOn = &v + return s +} + +// SetGraph sets the Graph field's value. +func (s *WorkflowRun) SetGraph(v *WorkflowGraph) *WorkflowRun { + s.Graph = v + return s +} + +// SetName sets the Name field's value. +func (s *WorkflowRun) SetName(v string) *WorkflowRun { + s.Name = &v + return s +} + +// SetStartedOn sets the StartedOn field's value. +func (s *WorkflowRun) SetStartedOn(v time.Time) *WorkflowRun { + s.StartedOn = &v + return s +} + +// SetStatistics sets the Statistics field's value. +func (s *WorkflowRun) SetStatistics(v *WorkflowRunStatistics) *WorkflowRun { + s.Statistics = v + return s +} + +// SetStatus sets the Status field's value. +func (s *WorkflowRun) SetStatus(v string) *WorkflowRun { + s.Status = &v + return s +} + +// SetWorkflowRunId sets the WorkflowRunId field's value. +func (s *WorkflowRun) SetWorkflowRunId(v string) *WorkflowRun { + s.WorkflowRunId = &v + return s +} + +// SetWorkflowRunProperties sets the WorkflowRunProperties field's value. +func (s *WorkflowRun) SetWorkflowRunProperties(v map[string]*string) *WorkflowRun { + s.WorkflowRunProperties = v + return s +} + +// Workflow run statistics provides statistics about the workflow run. +type WorkflowRunStatistics struct { + _ struct{} `type:"structure"` + + // Total number of Actions which have failed. + FailedActions *int64 `type:"integer"` + + // Total number Actions in running state. + RunningActions *int64 `type:"integer"` + + // Total number of Actions which have stopped. + StoppedActions *int64 `type:"integer"` + + // Total number of Actions which have succeeded. + SucceededActions *int64 `type:"integer"` + + // Total number of Actions which timed out. + TimeoutActions *int64 `type:"integer"` + + // Total number of Actions in the workflow run. + TotalActions *int64 `type:"integer"` +} + +// String returns the string representation +func (s WorkflowRunStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowRunStatistics) GoString() string { + return s.String() +} + +// SetFailedActions sets the FailedActions field's value. +func (s *WorkflowRunStatistics) SetFailedActions(v int64) *WorkflowRunStatistics { + s.FailedActions = &v + return s +} + +// SetRunningActions sets the RunningActions field's value. +func (s *WorkflowRunStatistics) SetRunningActions(v int64) *WorkflowRunStatistics { + s.RunningActions = &v + return s +} + +// SetStoppedActions sets the StoppedActions field's value. +func (s *WorkflowRunStatistics) SetStoppedActions(v int64) *WorkflowRunStatistics { + s.StoppedActions = &v + return s +} + +// SetSucceededActions sets the SucceededActions field's value. +func (s *WorkflowRunStatistics) SetSucceededActions(v int64) *WorkflowRunStatistics { + s.SucceededActions = &v + return s +} + +// SetTimeoutActions sets the TimeoutActions field's value. +func (s *WorkflowRunStatistics) SetTimeoutActions(v int64) *WorkflowRunStatistics { + s.TimeoutActions = &v + return s +} + +// SetTotalActions sets the TotalActions field's value. +func (s *WorkflowRunStatistics) SetTotalActions(v int64) *WorkflowRunStatistics { + s.TotalActions = &v + return s +} + // A classifier for XML content. type XMLClassifier struct { _ struct{} `type:"structure"` @@ -24694,10 +32400,10 @@ type XMLClassifier struct { // Classification is a required field Classification *string `type:"string" required:"true"` - // The time this classifier was registered. + // The time that this classifier was registered. CreationTime *time.Time `type:"timestamp"` - // The time this classifier was last updated. + // The time that this classifier was last updated. LastUpdated *time.Time `type:"timestamp"` // The name of the classifier. @@ -24706,9 +32412,9 @@ type XMLClassifier struct { Name *string `min:"1" type:"string" required:"true"` // The XML tag designating the element that contains each record in an XML document - // being parsed. Note that this cannot identify a self-closing element (closed - // by />). An empty row element that contains only attributes can be parsed - // as long as it ends with a closing tag (for example, + // being parsed. This can't identify a self-closing element (closed by />). + // An empty row element that contains only attributes can be parsed as long + // as it ends with a closing tag (for example, // is okay, but is not). RowTag *string `type:"string"` @@ -24778,6 +32484,23 @@ const ( CloudWatchEncryptionModeSseKms = "SSE-KMS" ) +const ( + // ComparatorEquals is a Comparator enum value + ComparatorEquals = "EQUALS" + + // ComparatorGreaterThan is a Comparator enum value + ComparatorGreaterThan = "GREATER_THAN" + + // ComparatorLessThan is a Comparator enum value + ComparatorLessThan = "LESS_THAN" + + // ComparatorGreaterThanEquals is a Comparator enum value + ComparatorGreaterThanEquals = "GREATER_THAN_EQUALS" + + // ComparatorLessThanEquals is a Comparator enum value + ComparatorLessThanEquals = "LESS_THAN_EQUALS" +) + const ( // ConnectionPropertyKeyHost is a ConnectionPropertyKey enum value ConnectionPropertyKeyHost = "HOST" @@ -24817,6 +32540,15 @@ const ( // ConnectionPropertyKeyJdbcEnforceSsl is a ConnectionPropertyKey enum value ConnectionPropertyKeyJdbcEnforceSsl = "JDBC_ENFORCE_SSL" + + // ConnectionPropertyKeyCustomJdbcCert is a ConnectionPropertyKey enum value + ConnectionPropertyKeyCustomJdbcCert = "CUSTOM_JDBC_CERT" + + // ConnectionPropertyKeySkipCustomJdbcCertValidation is a ConnectionPropertyKey enum value + ConnectionPropertyKeySkipCustomJdbcCertValidation = "SKIP_CUSTOM_JDBC_CERT_VALIDATION" + + // ConnectionPropertyKeyCustomJdbcCertString is a ConnectionPropertyKey enum value + ConnectionPropertyKeyCustomJdbcCertString = "CUSTOM_JDBC_CERT_STRING" ) const ( @@ -24827,6 +32559,20 @@ const ( ConnectionTypeSftp = "SFTP" ) +const ( + // CrawlStateRunning is a CrawlState enum value + CrawlStateRunning = "RUNNING" + + // CrawlStateSucceeded is a CrawlState enum value + CrawlStateSucceeded = "SUCCEEDED" + + // CrawlStateCancelled is a CrawlState enum value + CrawlStateCancelled = "CANCELLED" + + // CrawlStateFailed is a CrawlState enum value + CrawlStateFailed = "FAILED" +) + const ( // CrawlerStateReady is a CrawlerState enum value CrawlerStateReady = "READY" @@ -24934,6 +32680,46 @@ const ( LogicalOperatorEquals = "EQUALS" ) +const ( + // NodeTypeCrawler is a NodeType enum value + NodeTypeCrawler = "CRAWLER" + + // NodeTypeJob is a NodeType enum value + NodeTypeJob = "JOB" + + // NodeTypeTrigger is a NodeType enum value + NodeTypeTrigger = "TRIGGER" +) + +const ( + // PermissionAll is a Permission enum value + PermissionAll = "ALL" + + // PermissionSelect is a Permission enum value + PermissionSelect = "SELECT" + + // PermissionAlter is a Permission enum value + PermissionAlter = "ALTER" + + // PermissionDrop is a Permission enum value + PermissionDrop = "DROP" + + // PermissionDelete is a Permission enum value + PermissionDelete = "DELETE" + + // PermissionInsert is a Permission enum value + PermissionInsert = "INSERT" + + // PermissionCreateDatabase is a Permission enum value + PermissionCreateDatabase = "CREATE_DATABASE" + + // PermissionCreateTable is a Permission enum value + PermissionCreateTable = "CREATE_TABLE" + + // PermissionDataLocationAccess is a Permission enum value + PermissionDataLocationAccess = "DATA_LOCATION_ACCESS" +) + const ( // PrincipalTypeUser is a PrincipalType enum value PrincipalTypeUser = "USER" @@ -24978,6 +32764,106 @@ const ( ScheduleStateTransitioning = "TRANSITIONING" ) +const ( + // SortAsc is a Sort enum value + SortAsc = "ASC" + + // SortDesc is a Sort enum value + SortDesc = "DESC" +) + +const ( + // SortDirectionTypeDescending is a SortDirectionType enum value + SortDirectionTypeDescending = "DESCENDING" + + // SortDirectionTypeAscending is a SortDirectionType enum value + SortDirectionTypeAscending = "ASCENDING" +) + +const ( + // TaskRunSortColumnTypeTaskRunType is a TaskRunSortColumnType enum value + TaskRunSortColumnTypeTaskRunType = "TASK_RUN_TYPE" + + // TaskRunSortColumnTypeStatus is a TaskRunSortColumnType enum value + TaskRunSortColumnTypeStatus = "STATUS" + + // TaskRunSortColumnTypeStarted is a TaskRunSortColumnType enum value + TaskRunSortColumnTypeStarted = "STARTED" +) + +const ( + // TaskStatusTypeStarting is a TaskStatusType enum value + TaskStatusTypeStarting = "STARTING" + + // TaskStatusTypeRunning is a TaskStatusType enum value + TaskStatusTypeRunning = "RUNNING" + + // TaskStatusTypeStopping is a TaskStatusType enum value + TaskStatusTypeStopping = "STOPPING" + + // TaskStatusTypeStopped is a TaskStatusType enum value + TaskStatusTypeStopped = "STOPPED" + + // TaskStatusTypeSucceeded is a TaskStatusType enum value + TaskStatusTypeSucceeded = "SUCCEEDED" + + // TaskStatusTypeFailed is a TaskStatusType enum value + TaskStatusTypeFailed = "FAILED" + + // TaskStatusTypeTimeout is a TaskStatusType enum value + TaskStatusTypeTimeout = "TIMEOUT" +) + +const ( + // TaskTypeEvaluation is a TaskType enum value + TaskTypeEvaluation = "EVALUATION" + + // TaskTypeLabelingSetGeneration is a TaskType enum value + TaskTypeLabelingSetGeneration = "LABELING_SET_GENERATION" + + // TaskTypeImportLabels is a TaskType enum value + TaskTypeImportLabels = "IMPORT_LABELS" + + // TaskTypeExportLabels is a TaskType enum value + TaskTypeExportLabels = "EXPORT_LABELS" + + // TaskTypeFindMatches is a TaskType enum value + TaskTypeFindMatches = "FIND_MATCHES" +) + +const ( + // TransformSortColumnTypeName is a TransformSortColumnType enum value + TransformSortColumnTypeName = "NAME" + + // TransformSortColumnTypeTransformType is a TransformSortColumnType enum value + TransformSortColumnTypeTransformType = "TRANSFORM_TYPE" + + // TransformSortColumnTypeStatus is a TransformSortColumnType enum value + TransformSortColumnTypeStatus = "STATUS" + + // TransformSortColumnTypeCreated is a TransformSortColumnType enum value + TransformSortColumnTypeCreated = "CREATED" + + // TransformSortColumnTypeLastModified is a TransformSortColumnType enum value + TransformSortColumnTypeLastModified = "LAST_MODIFIED" +) + +const ( + // TransformStatusTypeNotReady is a TransformStatusType enum value + TransformStatusTypeNotReady = "NOT_READY" + + // TransformStatusTypeReady is a TransformStatusType enum value + TransformStatusTypeReady = "READY" + + // TransformStatusTypeDeleting is a TransformStatusType enum value + TransformStatusTypeDeleting = "DELETING" +) + +const ( + // TransformTypeFindMatches is a TransformType enum value + TransformTypeFindMatches = "FIND_MATCHES" +) + const ( // TriggerStateCreating is a TriggerState enum value TriggerStateCreating = "CREATING" @@ -25033,3 +32919,11 @@ const ( // WorkerTypeG2x is a WorkerType enum value WorkerTypeG2x = "G.2X" ) + +const ( + // WorkflowRunStatusRunning is a WorkflowRunStatus enum value + WorkflowRunStatusRunning = "RUNNING" + + // WorkflowRunStatusCompleted is a WorkflowRunStatus enum value + WorkflowRunStatusCompleted = "COMPLETED" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go index a1e9b4d4d6b..b9588ea38b8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go @@ -82,6 +82,12 @@ const ( // The input provided was not valid. ErrCodeInvalidInputException = "InvalidInputException" + // ErrCodeMLTransformNotReadyException for service response error code + // "MLTransformNotReadyException". + // + // The machine learning transform is not ready to run. + ErrCodeMLTransformNotReadyException = "MLTransformNotReadyException" + // ErrCodeNoScheduleException for service response error code // "NoScheduleException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/service.go index 075b0a1df6c..25770968a17 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/glue/service.go @@ -46,11 +46,11 @@ const ( // svc := glue.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glue { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Glue { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Glue { svc := &Glue{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-03-31", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/api.go index 59239f04d8d..0760c6ca003 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/api.go @@ -68,10 +68,10 @@ func (c *GuardDuty) AcceptInvitationRequest(input *AcceptInvitationInput) (req * // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/AcceptInvitation func (c *GuardDuty) AcceptInvitation(input *AcceptInvitationInput) (*AcceptInvitationOutput, error) { @@ -140,7 +140,10 @@ func (c *GuardDuty) ArchiveFindingsRequest(input *ArchiveFindingsInput) (req *re // ArchiveFindings API operation for Amazon GuardDuty. // -// Archives Amazon GuardDuty findings specified by the list of finding IDs. +// Archives GuardDuty findings specified by the list of finding IDs. +// +// Only the master account can archive findings. Member accounts do not have +// permission to archive findings from their accounts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -151,10 +154,10 @@ func (c *GuardDuty) ArchiveFindingsRequest(input *ArchiveFindingsInput) (req *re // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ArchiveFindings func (c *GuardDuty) ArchiveFindings(input *ArchiveFindingsInput) (*ArchiveFindingsOutput, error) { @@ -222,9 +225,10 @@ func (c *GuardDuty) CreateDetectorRequest(input *CreateDetectorInput) (req *requ // CreateDetector API operation for Amazon GuardDuty. // -// Creates a single Amazon GuardDuty detector. A detector is an object that -// represents the GuardDuty service. A detector must be created in order for -// GuardDuty to become operational. +// Creates a single Amazon GuardDuty detector. A detector is a resource that +// represents the GuardDuty service. To start using GuardDuty, you must create +// a detector in each region that you enable the service. You can have only +// one detector per account per region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -235,10 +239,10 @@ func (c *GuardDuty) CreateDetectorRequest(input *CreateDetectorInput) (req *requ // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateDetector func (c *GuardDuty) CreateDetector(input *CreateDetectorInput) (*CreateDetectorOutput, error) { @@ -317,10 +321,10 @@ func (c *GuardDuty) CreateFilterRequest(input *CreateFilterInput) (req *request. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateFilter func (c *GuardDuty) CreateFilter(input *CreateFilterInput) (*CreateFilterOutput, error) { @@ -400,10 +404,10 @@ func (c *GuardDuty) CreateIPSetRequest(input *CreateIPSetInput) (req *request.Re // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateIPSet func (c *GuardDuty) CreateIPSet(input *CreateIPSetInput) (*CreateIPSetOutput, error) { @@ -484,10 +488,10 @@ func (c *GuardDuty) CreateMembersRequest(input *CreateMembersInput) (req *reques // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateMembers func (c *GuardDuty) CreateMembers(input *CreateMembersInput) (*CreateMembersOutput, error) { @@ -569,10 +573,10 @@ func (c *GuardDuty) CreateSampleFindingsRequest(input *CreateSampleFindingsInput // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateSampleFindings func (c *GuardDuty) CreateSampleFindings(input *CreateSampleFindingsInput) (*CreateSampleFindingsOutput, error) { @@ -652,10 +656,10 @@ func (c *GuardDuty) CreateThreatIntelSetRequest(input *CreateThreatIntelSetInput // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateThreatIntelSet func (c *GuardDuty) CreateThreatIntelSet(input *CreateThreatIntelSetInput) (*CreateThreatIntelSetOutput, error) { @@ -735,10 +739,10 @@ func (c *GuardDuty) DeclineInvitationsRequest(input *DeclineInvitationsInput) (r // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeclineInvitations func (c *GuardDuty) DeclineInvitations(input *DeclineInvitationsInput) (*DeclineInvitationsOutput, error) { @@ -818,10 +822,10 @@ func (c *GuardDuty) DeleteDetectorRequest(input *DeleteDetectorInput) (req *requ // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteDetector func (c *GuardDuty) DeleteDetector(input *DeleteDetectorInput) (*DeleteDetectorOutput, error) { @@ -901,10 +905,10 @@ func (c *GuardDuty) DeleteFilterRequest(input *DeleteFilterInput) (req *request. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteFilter func (c *GuardDuty) DeleteFilter(input *DeleteFilterInput) (*DeleteFilterOutput, error) { @@ -984,10 +988,10 @@ func (c *GuardDuty) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Re // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteIPSet func (c *GuardDuty) DeleteIPSet(input *DeleteIPSetInput) (*DeleteIPSetOutput, error) { @@ -1067,10 +1071,10 @@ func (c *GuardDuty) DeleteInvitationsRequest(input *DeleteInvitationsInput) (req // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteInvitations func (c *GuardDuty) DeleteInvitations(input *DeleteInvitationsInput) (*DeleteInvitationsOutput, error) { @@ -1150,10 +1154,10 @@ func (c *GuardDuty) DeleteMembersRequest(input *DeleteMembersInput) (req *reques // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteMembers func (c *GuardDuty) DeleteMembers(input *DeleteMembersInput) (*DeleteMembersOutput, error) { @@ -1233,10 +1237,10 @@ func (c *GuardDuty) DeleteThreatIntelSetRequest(input *DeleteThreatIntelSetInput // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteThreatIntelSet func (c *GuardDuty) DeleteThreatIntelSet(input *DeleteThreatIntelSetInput) (*DeleteThreatIntelSetOutput, error) { @@ -1316,10 +1320,10 @@ func (c *GuardDuty) DisassociateFromMasterAccountRequest(input *DisassociateFrom // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DisassociateFromMasterAccount func (c *GuardDuty) DisassociateFromMasterAccount(input *DisassociateFromMasterAccountInput) (*DisassociateFromMasterAccountOutput, error) { @@ -1399,10 +1403,10 @@ func (c *GuardDuty) DisassociateMembersRequest(input *DisassociateMembersInput) // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DisassociateMembers func (c *GuardDuty) DisassociateMembers(input *DisassociateMembersInput) (*DisassociateMembersOutput, error) { @@ -1481,10 +1485,10 @@ func (c *GuardDuty) GetDetectorRequest(input *GetDetectorInput) (req *request.Re // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetDetector func (c *GuardDuty) GetDetector(input *GetDetectorInput) (*GetDetectorOutput, error) { @@ -1563,10 +1567,10 @@ func (c *GuardDuty) GetFilterRequest(input *GetFilterInput) (req *request.Reques // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetFilter func (c *GuardDuty) GetFilter(input *GetFilterInput) (*GetFilterOutput, error) { @@ -1645,10 +1649,10 @@ func (c *GuardDuty) GetFindingsRequest(input *GetFindingsInput) (req *request.Re // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetFindings func (c *GuardDuty) GetFindings(input *GetFindingsInput) (*GetFindingsOutput, error) { @@ -1727,10 +1731,10 @@ func (c *GuardDuty) GetFindingsStatisticsRequest(input *GetFindingsStatisticsInp // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetFindingsStatistics func (c *GuardDuty) GetFindingsStatistics(input *GetFindingsStatisticsInput) (*GetFindingsStatisticsOutput, error) { @@ -1809,10 +1813,10 @@ func (c *GuardDuty) GetIPSetRequest(input *GetIPSetInput) (req *request.Request, // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetIPSet func (c *GuardDuty) GetIPSet(input *GetIPSetInput) (*GetIPSetOutput, error) { @@ -1892,10 +1896,10 @@ func (c *GuardDuty) GetInvitationsCountRequest(input *GetInvitationsCountInput) // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetInvitationsCount func (c *GuardDuty) GetInvitationsCount(input *GetInvitationsCountInput) (*GetInvitationsCountOutput, error) { @@ -1963,8 +1967,8 @@ func (c *GuardDuty) GetMasterAccountRequest(input *GetMasterAccountInput) (req * // GetMasterAccount API operation for Amazon GuardDuty. // -// Provides the details for the GuardDuty master account to the current GuardDuty -// member account. +// Provides the details for the GuardDuty master account associated with the +// current GuardDuty member account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1975,10 +1979,10 @@ func (c *GuardDuty) GetMasterAccountRequest(input *GetMasterAccountInput) (req * // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetMasterAccount func (c *GuardDuty) GetMasterAccount(input *GetMasterAccountInput) (*GetMasterAccountOutput, error) { @@ -2058,10 +2062,10 @@ func (c *GuardDuty) GetMembersRequest(input *GetMembersInput) (req *request.Requ // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetMembers func (c *GuardDuty) GetMembers(input *GetMembersInput) (*GetMembersOutput, error) { @@ -2140,10 +2144,10 @@ func (c *GuardDuty) GetThreatIntelSetRequest(input *GetThreatIntelSetInput) (req // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetThreatIntelSet func (c *GuardDuty) GetThreatIntelSet(input *GetThreatIntelSetInput) (*GetThreatIntelSetOutput, error) { @@ -2225,10 +2229,10 @@ func (c *GuardDuty) InviteMembersRequest(input *InviteMembersInput) (req *reques // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/InviteMembers func (c *GuardDuty) InviteMembers(input *InviteMembersInput) (*InviteMembersOutput, error) { @@ -2313,10 +2317,10 @@ func (c *GuardDuty) ListDetectorsRequest(input *ListDetectorsInput) (req *reques // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListDetectors func (c *GuardDuty) ListDetectors(input *ListDetectorsInput) (*ListDetectorsOutput, error) { @@ -2351,7 +2355,7 @@ func (c *GuardDuty) ListDetectorsWithContext(ctx aws.Context, input *ListDetecto // // Example iterating over at most 3 pages of a ListDetectors operation. // pageNum := 0 // err := client.ListDetectorsPages(params, -// func(page *ListDetectorsOutput, lastPage bool) bool { +// func(page *guardduty.ListDetectorsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2383,10 +2387,12 @@ func (c *GuardDuty) ListDetectorsPagesWithContext(ctx aws.Context, input *ListDe }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDetectorsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDetectorsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2451,10 +2457,10 @@ func (c *GuardDuty) ListFiltersRequest(input *ListFiltersInput) (req *request.Re // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListFilters func (c *GuardDuty) ListFilters(input *ListFiltersInput) (*ListFiltersOutput, error) { @@ -2489,7 +2495,7 @@ func (c *GuardDuty) ListFiltersWithContext(ctx aws.Context, input *ListFiltersIn // // Example iterating over at most 3 pages of a ListFilters operation. // pageNum := 0 // err := client.ListFiltersPages(params, -// func(page *ListFiltersOutput, lastPage bool) bool { +// func(page *guardduty.ListFiltersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2521,10 +2527,12 @@ func (c *GuardDuty) ListFiltersPagesWithContext(ctx aws.Context, input *ListFilt }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListFiltersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListFiltersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2589,10 +2597,10 @@ func (c *GuardDuty) ListFindingsRequest(input *ListFindingsInput) (req *request. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListFindings func (c *GuardDuty) ListFindings(input *ListFindingsInput) (*ListFindingsOutput, error) { @@ -2627,7 +2635,7 @@ func (c *GuardDuty) ListFindingsWithContext(ctx aws.Context, input *ListFindings // // Example iterating over at most 3 pages of a ListFindings operation. // pageNum := 0 // err := client.ListFindingsPages(params, -// func(page *ListFindingsOutput, lastPage bool) bool { +// func(page *guardduty.ListFindingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2659,10 +2667,12 @@ func (c *GuardDuty) ListFindingsPagesWithContext(ctx aws.Context, input *ListFin }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListFindingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListFindingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2727,10 +2737,10 @@ func (c *GuardDuty) ListIPSetsRequest(input *ListIPSetsInput) (req *request.Requ // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListIPSets func (c *GuardDuty) ListIPSets(input *ListIPSetsInput) (*ListIPSetsOutput, error) { @@ -2765,7 +2775,7 @@ func (c *GuardDuty) ListIPSetsWithContext(ctx aws.Context, input *ListIPSetsInpu // // Example iterating over at most 3 pages of a ListIPSets operation. // pageNum := 0 // err := client.ListIPSetsPages(params, -// func(page *ListIPSetsOutput, lastPage bool) bool { +// func(page *guardduty.ListIPSetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2797,10 +2807,12 @@ func (c *GuardDuty) ListIPSetsPagesWithContext(ctx aws.Context, input *ListIPSet }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListIPSetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListIPSetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2866,10 +2878,10 @@ func (c *GuardDuty) ListInvitationsRequest(input *ListInvitationsInput) (req *re // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListInvitations func (c *GuardDuty) ListInvitations(input *ListInvitationsInput) (*ListInvitationsOutput, error) { @@ -2904,7 +2916,7 @@ func (c *GuardDuty) ListInvitationsWithContext(ctx aws.Context, input *ListInvit // // Example iterating over at most 3 pages of a ListInvitations operation. // pageNum := 0 // err := client.ListInvitationsPages(params, -// func(page *ListInvitationsOutput, lastPage bool) bool { +// func(page *guardduty.ListInvitationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2936,10 +2948,12 @@ func (c *GuardDuty) ListInvitationsPagesWithContext(ctx aws.Context, input *List }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInvitationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInvitationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3005,10 +3019,10 @@ func (c *GuardDuty) ListMembersRequest(input *ListMembersInput) (req *request.Re // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListMembers func (c *GuardDuty) ListMembers(input *ListMembersInput) (*ListMembersOutput, error) { @@ -3043,7 +3057,7 @@ func (c *GuardDuty) ListMembersWithContext(ctx aws.Context, input *ListMembersIn // // Example iterating over at most 3 pages of a ListMembers operation. // pageNum := 0 // err := client.ListMembersPages(params, -// func(page *ListMembersOutput, lastPage bool) bool { +// func(page *guardduty.ListMembersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3075,13 +3089,100 @@ func (c *GuardDuty) ListMembersPagesWithContext(ctx aws.Context, input *ListMemb }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMembersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMembersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListTagsForResource +func (c *GuardDuty) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon GuardDuty. +// +// Lists tags for a resource. Tagging is currently supported for detectors, +// finding filters, IP sets, and Threat Intel sets, with a limit of 50 tags +// per resource. When invoked, this operation returns all assigned tags for +// a given resource.. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Bad request exception object. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListTagsForResource +func (c *GuardDuty) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListThreatIntelSets = "ListThreatIntelSets" // ListThreatIntelSetsRequest generates a "aws/request.Request" representing the @@ -3144,10 +3245,10 @@ func (c *GuardDuty) ListThreatIntelSetsRequest(input *ListThreatIntelSetsInput) // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListThreatIntelSets func (c *GuardDuty) ListThreatIntelSets(input *ListThreatIntelSetsInput) (*ListThreatIntelSetsOutput, error) { @@ -3182,7 +3283,7 @@ func (c *GuardDuty) ListThreatIntelSetsWithContext(ctx aws.Context, input *ListT // // Example iterating over at most 3 pages of a ListThreatIntelSets operation. // pageNum := 0 // err := client.ListThreatIntelSetsPages(params, -// func(page *ListThreatIntelSetsOutput, lastPage bool) bool { +// func(page *guardduty.ListThreatIntelSetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3214,10 +3315,12 @@ func (c *GuardDuty) ListThreatIntelSetsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListThreatIntelSetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListThreatIntelSetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3278,10 +3381,10 @@ func (c *GuardDuty) StartMonitoringMembersRequest(input *StartMonitoringMembersI // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/StartMonitoringMembers func (c *GuardDuty) StartMonitoringMembers(input *StartMonitoringMembersInput) (*StartMonitoringMembersOutput, error) { @@ -3363,10 +3466,10 @@ func (c *GuardDuty) StopMonitoringMembersRequest(input *StopMonitoringMembersInp // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/StopMonitoringMembers func (c *GuardDuty) StopMonitoringMembers(input *StopMonitoringMembersInput) (*StopMonitoringMembersOutput, error) { @@ -3390,6 +3493,89 @@ func (c *GuardDuty) StopMonitoringMembersWithContext(ctx aws.Context, input *Sto return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/TagResource +func (c *GuardDuty) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon GuardDuty. +// +// Adds tags to a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Bad request exception object. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/TagResource +func (c *GuardDuty) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUnarchiveFindings = "UnarchiveFindings" // UnarchiveFindingsRequest generates a "aws/request.Request" representing the @@ -3446,10 +3632,10 @@ func (c *GuardDuty) UnarchiveFindingsRequest(input *UnarchiveFindingsInput) (req // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UnarchiveFindings func (c *GuardDuty) UnarchiveFindings(input *UnarchiveFindingsInput) (*UnarchiveFindingsOutput, error) { @@ -3473,6 +3659,89 @@ func (c *GuardDuty) UnarchiveFindingsWithContext(ctx aws.Context, input *Unarchi return out, req.Send() } +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UntagResource +func (c *GuardDuty) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon GuardDuty. +// +// Removes tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Bad request exception object. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UntagResource +func (c *GuardDuty) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateDetector = "UpdateDetector" // UpdateDetectorRequest generates a "aws/request.Request" representing the @@ -3529,10 +3798,10 @@ func (c *GuardDuty) UpdateDetectorRequest(input *UpdateDetectorInput) (req *requ // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateDetector func (c *GuardDuty) UpdateDetector(input *UpdateDetectorInput) (*UpdateDetectorOutput, error) { @@ -3611,10 +3880,10 @@ func (c *GuardDuty) UpdateFilterRequest(input *UpdateFilterInput) (req *request. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateFilter func (c *GuardDuty) UpdateFilter(input *UpdateFilterInput) (*UpdateFilterOutput, error) { @@ -3694,10 +3963,10 @@ func (c *GuardDuty) UpdateFindingsFeedbackRequest(input *UpdateFindingsFeedbackI // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateFindingsFeedback func (c *GuardDuty) UpdateFindingsFeedback(input *UpdateFindingsFeedbackInput) (*UpdateFindingsFeedbackOutput, error) { @@ -3777,10 +4046,10 @@ func (c *GuardDuty) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Re // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateIPSet func (c *GuardDuty) UpdateIPSet(input *UpdateIPSetInput) (*UpdateIPSetOutput, error) { @@ -3860,10 +4129,10 @@ func (c *GuardDuty) UpdateThreatIntelSetRequest(input *UpdateThreatIntelSetInput // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Error response object. +// Bad request exception object. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Error response object. +// Internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateThreatIntelSet func (c *GuardDuty) UpdateThreatIntelSet(input *UpdateThreatIntelSetInput) (*UpdateThreatIntelSetOutput, error) { @@ -3887,12 +4156,13 @@ func (c *GuardDuty) UpdateThreatIntelSetWithContext(ctx aws.Context, input *Upda return out, req.Send() } -// AcceptInvitation request body. type AcceptInvitationInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector of the GuardDuty member account. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // This value is used to validate the master account to the member account. // @@ -3969,8 +4239,7 @@ func (s AcceptInvitationOutput) GoString() string { return s.String() } -// The IAM access key details (IAM user information) of a user that engaged -// in the activity that prompted GuardDuty to generate a finding. +// Contains information about the access keys. type AccessKeyDetails struct { _ struct{} `type:"structure"` @@ -4021,19 +4290,19 @@ func (s *AccessKeyDetails) SetUserType(v string) *AccessKeyDetails { return s } -// An object containing the member's accountId and email address. +// Contains information about the account. type AccountDetail struct { _ struct{} `type:"structure"` // Member account ID. // // AccountId is a required field - AccountId *string `locationName:"accountId" type:"string" required:"true"` + AccountId *string `locationName:"accountId" min:"12" type:"string" required:"true"` // Member account's email address. // // Email is a required field - Email *string `locationName:"email" type:"string" required:"true"` + Email *string `locationName:"email" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -4052,9 +4321,15 @@ func (s *AccountDetail) Validate() error { if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } if s.Email == nil { invalidParams.Add(request.NewErrParamRequired("Email")) } + if s.Email != nil && len(*s.Email) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Email", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4074,7 +4349,7 @@ func (s *AccountDetail) SetEmail(v string) *AccountDetail { return s } -// Information about the activity described in a finding. +// Contains information about action. type Action struct { _ struct{} `type:"structure"` @@ -4134,12 +4409,14 @@ func (s *Action) SetPortProbeAction(v *PortProbeAction) *Action { return s } -// Archive Findings Request type ArchiveFindingsInput struct { _ struct{} `type:"structure"` + // The ID of the detector that specifies the GuardDuty service whose findings + // you want to archive. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // IDs of the findings that you want to archive. // @@ -4202,7 +4479,7 @@ func (s ArchiveFindingsOutput) GoString() string { return s.String() } -// Information about the AWS_API_CALL action described in this finding. +// Contains information about the API operation. type AwsApiCallAction struct { _ struct{} `type:"structure"` @@ -4262,7 +4539,7 @@ func (s *AwsApiCallAction) SetServiceName(v string) *AwsApiCallAction { return s } -// City information of the remote IP address. +// Contains information about the city associated with the IP address. type City struct { _ struct{} `type:"structure"` @@ -4286,34 +4563,69 @@ func (s *City) SetCityName(v string) *City { return s } -// Finding attribute (for example, accountId) for which conditions and values -// must be specified when querying findings. +// Contains information about the condition. type Condition struct { _ struct{} `type:"structure"` - // Represents the equal condition to be applied to a single field when querying + // Deprecated. Represents the equal condition to be applied to a single field + // when querying for findings. + // + // Deprecated: Eq has been deprecated + Eq []*string `locationName:"eq" deprecated:"true" type:"list"` + + // Represents an equal condition to be applied to a single field when querying // for findings. - Eq []*string `locationName:"eq" type:"list"` + Equals []*string `locationName:"equals" type:"list"` - // Represents the greater than condition to be applied to a single field when + // Represents a greater than condition to be applied to a single field when // querying for findings. - Gt *int64 `locationName:"gt" type:"integer"` + GreaterThan *int64 `locationName:"greaterThan" type:"long"` - // Represents the greater than equal condition to be applied to a single field + // Represents a greater than equal condition to be applied to a single field // when querying for findings. - Gte *int64 `locationName:"gte" type:"integer"` + GreaterThanOrEqual *int64 `locationName:"greaterThanOrEqual" type:"long"` + + // Deprecated. Represents a greater than condition to be applied to a single + // field when querying for findings. + // + // Deprecated: Gt has been deprecated + Gt *int64 `locationName:"gt" deprecated:"true" type:"integer"` + + // Deprecated. Represents a greater than equal condition to be applied to a + // single field when querying for findings. + // + // Deprecated: Gte has been deprecated + Gte *int64 `locationName:"gte" deprecated:"true" type:"integer"` - // Represents the less than condition to be applied to a single field when querying + // Represents a less than condition to be applied to a single field when querying // for findings. - Lt *int64 `locationName:"lt" type:"integer"` + LessThan *int64 `locationName:"lessThan" type:"long"` + + // Represents a less than equal condition to be applied to a single field when + // querying for findings. + LessThanOrEqual *int64 `locationName:"lessThanOrEqual" type:"long"` - // Represents the less than equal condition to be applied to a single field + // Deprecated. Represents a less than condition to be applied to a single field // when querying for findings. - Lte *int64 `locationName:"lte" type:"integer"` + // + // Deprecated: Lt has been deprecated + Lt *int64 `locationName:"lt" deprecated:"true" type:"integer"` + + // Deprecated. Represents a less than equal condition to be applied to a single + // field when querying for findings. + // + // Deprecated: Lte has been deprecated + Lte *int64 `locationName:"lte" deprecated:"true" type:"integer"` - // Represents the not equal condition to be applied to a single field when querying + // Deprecated. Represents the not equal condition to be applied to a single + // field when querying for findings. + // + // Deprecated: Neq has been deprecated + Neq []*string `locationName:"neq" deprecated:"true" type:"list"` + + // Represents an not equal condition to be applied to a single field when querying // for findings. - Neq []*string `locationName:"neq" type:"list"` + NotEquals []*string `locationName:"notEquals" type:"list"` } // String returns the string representation @@ -4332,6 +4644,24 @@ func (s *Condition) SetEq(v []*string) *Condition { return s } +// SetEquals sets the Equals field's value. +func (s *Condition) SetEquals(v []*string) *Condition { + s.Equals = v + return s +} + +// SetGreaterThan sets the GreaterThan field's value. +func (s *Condition) SetGreaterThan(v int64) *Condition { + s.GreaterThan = &v + return s +} + +// SetGreaterThanOrEqual sets the GreaterThanOrEqual field's value. +func (s *Condition) SetGreaterThanOrEqual(v int64) *Condition { + s.GreaterThanOrEqual = &v + return s +} + // SetGt sets the Gt field's value. func (s *Condition) SetGt(v int64) *Condition { s.Gt = &v @@ -4344,6 +4674,18 @@ func (s *Condition) SetGte(v int64) *Condition { return s } +// SetLessThan sets the LessThan field's value. +func (s *Condition) SetLessThan(v int64) *Condition { + s.LessThan = &v + return s +} + +// SetLessThanOrEqual sets the LessThanOrEqual field's value. +func (s *Condition) SetLessThanOrEqual(v int64) *Condition { + s.LessThanOrEqual = &v + return s +} + // SetLt sets the Lt field's value. func (s *Condition) SetLt(v int64) *Condition { s.Lt = &v @@ -4362,7 +4704,13 @@ func (s *Condition) SetNeq(v []*string) *Condition { return s } -// Country information of the remote IP address. +// SetNotEquals sets the NotEquals field's value. +func (s *Condition) SetNotEquals(v []*string) *Condition { + s.NotEquals = v + return s +} + +// Contains information about the country. type Country struct { _ struct{} `type:"structure"` @@ -4395,7 +4743,6 @@ func (s *Country) SetCountryName(v string) *Country { return s } -// Create Detector Request type CreateDetectorInput struct { _ struct{} `type:"structure"` @@ -4409,6 +4756,9 @@ type CreateDetectorInput struct { // A enum value that specifies how frequently customer got Finding updates published. FindingPublishingFrequency *string `locationName:"findingPublishingFrequency" type:"string" enum:"FindingPublishingFrequency"` + + // The tags to be added to a new detector resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } // String returns the string representation @@ -4427,6 +4777,9 @@ func (s *CreateDetectorInput) Validate() error { if s.Enable == nil { invalidParams.Add(request.NewErrParamRequired("Enable")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4452,12 +4805,17 @@ func (s *CreateDetectorInput) SetFindingPublishingFrequency(v string) *CreateDet return s } -// CreateDetector response object. +// SetTags sets the Tags field's value. +func (s *CreateDetectorInput) SetTags(v map[string]*string) *CreateDetectorInput { + s.Tags = v + return s +} + type CreateDetectorOutput struct { _ struct{} `type:"structure"` // The unique ID of the created detector. - DetectorId *string `locationName:"detectorId" type:"string"` + DetectorId *string `locationName:"detectorId" min:"1" type:"string"` } // String returns the string representation @@ -4476,13 +4834,12 @@ func (s *CreateDetectorOutput) SetDetectorId(v string) *CreateDetectorOutput { return s } -// CreateFilter request object. type CreateFilterInput struct { _ struct{} `type:"structure"` // Specifies the action that is to be applied to the findings that match the // filter. - Action *string `locationName:"action" type:"string" enum:"FilterAction"` + Action *string `locationName:"action" min:"1" type:"string" enum:"FilterAction"` // The idempotency token for the create request. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` @@ -4490,8 +4847,11 @@ type CreateFilterInput struct { // The description of the filter. Description *string `locationName:"description" type:"string"` + // The unique ID of the detector of the GuardDuty account for which you want + // to create a filter. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // Represents the criteria to be used in the filter for querying findings. // @@ -4501,11 +4861,14 @@ type CreateFilterInput struct { // The name of the filter. // // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` + Name *string `locationName:"name" min:"3" type:"string" required:"true"` // Specifies the position of the filter in the list of current filters. Also // specifies the order in which this filter is applied to the findings. - Rank *int64 `locationName:"rank" type:"integer"` + Rank *int64 `locationName:"rank" min:"1" type:"integer"` + + // The tags to be added to a new filter resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } // String returns the string representation @@ -4521,6 +4884,9 @@ func (s CreateFilterInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateFilterInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateFilterInput"} + if s.Action != nil && len(*s.Action) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Action", 1)) + } if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } @@ -4533,6 +4899,15 @@ func (s *CreateFilterInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.Rank != nil && *s.Rank < 1 { + invalidParams.Add(request.NewErrParamMinValue("Rank", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4582,12 +4957,19 @@ func (s *CreateFilterInput) SetRank(v int64) *CreateFilterInput { return s } -// CreateFilter response object. +// SetTags sets the Tags field's value. +func (s *CreateFilterInput) SetTags(v map[string]*string) *CreateFilterInput { + s.Tags = v + return s +} + type CreateFilterOutput struct { _ struct{} `type:"structure"` // The name of the successfully created filter. - Name *string `locationName:"name" type:"string"` + // + // Name is a required field + Name *string `locationName:"name" min:"3" type:"string" required:"true"` } // String returns the string representation @@ -4606,7 +4988,6 @@ func (s *CreateFilterOutput) SetName(v string) *CreateFilterOutput { return s } -// Create IP Set Request type CreateIPSetInput struct { _ struct{} `type:"structure"` @@ -4619,25 +5000,31 @@ type CreateIPSetInput struct { // The idempotency token for the create request. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + // The unique ID of the detector of the GuardDuty account for which you want + // to create an IPSet. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // The format of the file that contains the IPSet. // // Format is a required field - Format *string `locationName:"format" type:"string" required:"true" enum:"IpSetFormat"` + Format *string `locationName:"format" min:"1" type:"string" required:"true" enum:"IpSetFormat"` // The URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key) // // Location is a required field - Location *string `locationName:"location" type:"string" required:"true"` + Location *string `locationName:"location" min:"1" type:"string" required:"true"` // The user friendly name to identify the IPSet. This name is displayed in all // findings that are triggered by activity that involves IP addresses included // in this IPSet. // // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The tags to be added to a new IP set resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } // String returns the string representation @@ -4665,12 +5052,24 @@ func (s *CreateIPSetInput) Validate() error { if s.Format == nil { invalidParams.Add(request.NewErrParamRequired("Format")) } + if s.Format != nil && len(*s.Format) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Format", 1)) + } if s.Location == nil { invalidParams.Add(request.NewErrParamRequired("Location")) } + if s.Location != nil && len(*s.Location) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Location", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4714,12 +5113,19 @@ func (s *CreateIPSetInput) SetName(v string) *CreateIPSetInput { return s } -// CreateIPSet response object. -type CreateIPSetOutput struct { - _ struct{} `type:"structure"` +// SetTags sets the Tags field's value. +func (s *CreateIPSetInput) SetTags(v map[string]*string) *CreateIPSetInput { + s.Tags = v + return s +} + +type CreateIPSetOutput struct { + _ struct{} `type:"structure"` - // The unique identifier for an IP Set - IpSetId *string `locationName:"ipSetId" type:"string"` + // The ID of the IPSet resource. + // + // IpSetId is a required field + IpSetId *string `locationName:"ipSetId" type:"string" required:"true"` } // String returns the string representation @@ -4738,7 +5144,6 @@ func (s *CreateIPSetOutput) SetIpSetId(v string) *CreateIPSetOutput { return s } -// CreateMembers body type CreateMembersInput struct { _ struct{} `type:"structure"` @@ -4746,10 +5151,13 @@ type CreateMembersInput struct { // to associate with the master GuardDuty account. // // AccountDetails is a required field - AccountDetails []*AccountDetail `locationName:"accountDetails" type:"list" required:"true"` + AccountDetails []*AccountDetail `locationName:"accountDetails" min:"1" type:"list" required:"true"` + // The unique ID of the detector of the GuardDuty account with which you want + // to associate member accounts. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -4768,6 +5176,9 @@ func (s *CreateMembersInput) Validate() error { if s.AccountDetails == nil { invalidParams.Add(request.NewErrParamRequired("AccountDetails")) } + if s.AccountDetails != nil && len(s.AccountDetails) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountDetails", 1)) + } if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } @@ -4803,13 +5214,14 @@ func (s *CreateMembersInput) SetDetectorId(v string) *CreateMembersInput { return s } -// CreateMembers response object. type CreateMembersOutput struct { _ struct{} `type:"structure"` // A list of objects containing the unprocessed account and a result string // explaining why it was unprocessed. - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list"` + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` } // String returns the string representation @@ -4828,12 +5240,13 @@ func (s *CreateMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAccount) *C return s } -// Create Sample Findings Request type CreateSampleFindingsInput struct { _ struct{} `type:"structure"` + // The ID of the detector to create sample findings for. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // Types of sample findings that you want to generate. FindingTypes []*string `locationName:"findingTypes" type:"list"` @@ -4891,7 +5304,6 @@ func (s CreateSampleFindingsOutput) GoString() string { return s.String() } -// Create Threat Intel Set Request type CreateThreatIntelSetInput struct { _ struct{} `type:"structure"` @@ -4904,24 +5316,30 @@ type CreateThreatIntelSetInput struct { // The idempotency token for the create request. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + // The unique ID of the detector of the GuardDuty account for which you want + // to create a threatIntelSet. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // The format of the file that contains the ThreatIntelSet. // // Format is a required field - Format *string `locationName:"format" type:"string" required:"true" enum:"ThreatIntelSetFormat"` + Format *string `locationName:"format" min:"1" type:"string" required:"true" enum:"ThreatIntelSetFormat"` // The URI of the file that contains the ThreatIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key). // // Location is a required field - Location *string `locationName:"location" type:"string" required:"true"` + Location *string `locationName:"location" min:"1" type:"string" required:"true"` // A user-friendly ThreatIntelSet name that is displayed in all finding generated // by activity that involves IP addresses included in this ThreatIntelSet. // // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The tags to be added to a new Threat List resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } // String returns the string representation @@ -4949,12 +5367,24 @@ func (s *CreateThreatIntelSetInput) Validate() error { if s.Format == nil { invalidParams.Add(request.NewErrParamRequired("Format")) } + if s.Format != nil && len(*s.Format) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Format", 1)) + } if s.Location == nil { invalidParams.Add(request.NewErrParamRequired("Location")) } + if s.Location != nil && len(*s.Location) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Location", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4998,12 +5428,19 @@ func (s *CreateThreatIntelSetInput) SetName(v string) *CreateThreatIntelSetInput return s } -// CreateThreatIntelSet response object. +// SetTags sets the Tags field's value. +func (s *CreateThreatIntelSetInput) SetTags(v map[string]*string) *CreateThreatIntelSetInput { + s.Tags = v + return s +} + type CreateThreatIntelSetOutput struct { _ struct{} `type:"structure"` - // The unique identifier for an threat intel set - ThreatIntelSetId *string `locationName:"threatIntelSetId" type:"string"` + // The ID of the ThreatIntelSet resource. + // + // ThreatIntelSetId is a required field + ThreatIntelSetId *string `locationName:"threatIntelSetId" type:"string" required:"true"` } // String returns the string representation @@ -5022,7 +5459,6 @@ func (s *CreateThreatIntelSetOutput) SetThreatIntelSetId(v string) *CreateThreat return s } -// DeclineInvitations request body. type DeclineInvitationsInput struct { _ struct{} `type:"structure"` @@ -5030,7 +5466,7 @@ type DeclineInvitationsInput struct { // member account that you want to decline invitations from. // // AccountIds is a required field - AccountIds []*string `locationName:"accountIds" type:"list" required:"true"` + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` } // String returns the string representation @@ -5049,6 +5485,9 @@ func (s *DeclineInvitationsInput) Validate() error { if s.AccountIds == nil { invalidParams.Add(request.NewErrParamRequired("AccountIds")) } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5062,13 +5501,14 @@ func (s *DeclineInvitationsInput) SetAccountIds(v []*string) *DeclineInvitations return s } -// DeclineInvitations response object. type DeclineInvitationsOutput struct { _ struct{} `type:"structure"` // A list of objects containing the unprocessed account and a result string // explaining why it was unprocessed. - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list"` + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` } // String returns the string representation @@ -5090,8 +5530,10 @@ func (s *DeclineInvitationsOutput) SetUnprocessedAccounts(v []*UnprocessedAccoun type DeleteDetectorInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector that you want to delete. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -5143,9 +5585,13 @@ func (s DeleteDetectorOutput) GoString() string { type DeleteFilterInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the filter is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + // The name of the filter you want to delete. + // // FilterName is a required field FilterName *string `location:"uri" locationName:"filterName" type:"string" required:"true"` } @@ -5211,9 +5657,13 @@ func (s DeleteFilterOutput) GoString() string { type DeleteIPSetInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the ipSet is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + // The unique ID of the ipSet you want to delete. + // // IpSetId is a required field IpSetId *string `location:"uri" locationName:"ipSetId" type:"string" required:"true"` } @@ -5276,7 +5726,6 @@ func (s DeleteIPSetOutput) GoString() string { return s.String() } -// DeleteInvitations request body. type DeleteInvitationsInput struct { _ struct{} `type:"structure"` @@ -5284,7 +5733,7 @@ type DeleteInvitationsInput struct { // member account that you want to delete invitations from. // // AccountIds is a required field - AccountIds []*string `locationName:"accountIds" type:"list" required:"true"` + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` } // String returns the string representation @@ -5303,6 +5752,9 @@ func (s *DeleteInvitationsInput) Validate() error { if s.AccountIds == nil { invalidParams.Add(request.NewErrParamRequired("AccountIds")) } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5316,13 +5768,14 @@ func (s *DeleteInvitationsInput) SetAccountIds(v []*string) *DeleteInvitationsIn return s } -// DeleteInvitations response object. type DeleteInvitationsOutput struct { _ struct{} `type:"structure"` // A list of objects containing the unprocessed account and a result string // explaining why it was unprocessed. - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list"` + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` } // String returns the string representation @@ -5341,17 +5794,19 @@ func (s *DeleteInvitationsOutput) SetUnprocessedAccounts(v []*UnprocessedAccount return s } -// DeleteMembers request body. type DeleteMembersInput struct { _ struct{} `type:"structure"` // A list of account IDs of the GuardDuty member accounts that you want to delete. // // AccountIds is a required field - AccountIds []*string `locationName:"accountIds" type:"list" required:"true"` + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` + // The unique ID of the detector of the GuardDuty account whose members you + // want to delete. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -5370,6 +5825,9 @@ func (s *DeleteMembersInput) Validate() error { if s.AccountIds == nil { invalidParams.Add(request.NewErrParamRequired("AccountIds")) } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } @@ -5395,13 +5853,13 @@ func (s *DeleteMembersInput) SetDetectorId(v string) *DeleteMembersInput { return s } -// DeleteMembers response object. type DeleteMembersOutput struct { _ struct{} `type:"structure"` - // A list of objects containing the unprocessed account and a result string - // explaining why it was unprocessed. - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list"` + // The accounts that could not be processed. + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` } // String returns the string representation @@ -5423,9 +5881,13 @@ func (s *DeleteMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAccount) *D type DeleteThreatIntelSetInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the threatIntelSet is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + // The unique ID of the threatIntelSet you want to delete. + // // ThreatIntelSetId is a required field ThreatIntelSetId *string `location:"uri" locationName:"threatIntelSetId" type:"string" required:"true"` } @@ -5491,8 +5953,10 @@ func (s DeleteThreatIntelSetOutput) GoString() string { type DisassociateFromMasterAccountInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector of the GuardDuty member account. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -5541,7 +6005,6 @@ func (s DisassociateFromMasterAccountOutput) GoString() string { return s.String() } -// DisassociateMembers request body. type DisassociateMembersInput struct { _ struct{} `type:"structure"` @@ -5549,10 +6012,13 @@ type DisassociateMembersInput struct { // from master. // // AccountIds is a required field - AccountIds []*string `locationName:"accountIds" type:"list" required:"true"` + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` + // The unique ID of the detector of the GuardDuty account whose members you + // want to disassociate from master. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -5571,6 +6037,9 @@ func (s *DisassociateMembersInput) Validate() error { if s.AccountIds == nil { invalidParams.Add(request.NewErrParamRequired("AccountIds")) } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } @@ -5596,13 +6065,14 @@ func (s *DisassociateMembersInput) SetDetectorId(v string) *DisassociateMembersI return s } -// DisassociateMembers response object. type DisassociateMembersOutput struct { _ struct{} `type:"structure"` // A list of objects containing the unprocessed account and a result string // explaining why it was unprocessed. - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list"` + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` } // String returns the string representation @@ -5621,7 +6091,7 @@ func (s *DisassociateMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAccou return s } -// Information about the DNS_REQUEST action described in this finding. +// Contains information about the DNS request. type DnsRequestAction struct { _ struct{} `type:"structure"` @@ -5645,9 +6115,12 @@ func (s *DnsRequestAction) SetDomain(v string) *DnsRequestAction { return s } -// Domain information for the AWS API call. +// Contains information about the domain. type DomainDetails struct { _ struct{} `type:"structure"` + + // Domain information for the AWS API call. + Domain *string `locationName:"domain" type:"string"` } // String returns the string representation @@ -5660,74 +6133,101 @@ func (s DomainDetails) GoString() string { return s.String() } -// Representation of a abnormal or suspicious activity. +// SetDomain sets the Domain field's value. +func (s *DomainDetails) SetDomain(v string) *DomainDetails { + s.Domain = &v + return s +} + +// Contains information about the reason that the finding was generated. +type Evidence struct { + _ struct{} `type:"structure"` + + // A list of threat intelligence details related to the evidence. + ThreatIntelligenceDetails []*ThreatIntelligenceDetail `locationName:"threatIntelligenceDetails" type:"list"` +} + +// String returns the string representation +func (s Evidence) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Evidence) GoString() string { + return s.String() +} + +// SetThreatIntelligenceDetails sets the ThreatIntelligenceDetails field's value. +func (s *Evidence) SetThreatIntelligenceDetails(v []*ThreatIntelligenceDetail) *Evidence { + s.ThreatIntelligenceDetails = v + return s +} + +// Contains information about the finding. type Finding struct { _ struct{} `type:"structure"` - // AWS account ID where the activity occurred that prompted GuardDuty to generate - // a finding. + // The ID of the account in which the finding was generated. // // AccountId is a required field AccountId *string `locationName:"accountId" type:"string" required:"true"` - // The ARN of a finding described by the action. + // The ARN for the finding. // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` - // The confidence level of a finding. + // The confidence score for the finding. Confidence *float64 `locationName:"confidence" type:"double"` - // The time stamp at which a finding was generated. + // The time and date at which the finding was created. // // CreatedAt is a required field CreatedAt *string `locationName:"createdAt" type:"string" required:"true"` - // The description of a finding. + // The description of the finding. Description *string `locationName:"description" type:"string"` - // The identifier that corresponds to a finding described by the action. + // The ID of the finding. // // Id is a required field Id *string `locationName:"id" type:"string" required:"true"` - // The AWS resource partition. + // The partition associated with the finding. Partition *string `locationName:"partition" type:"string"` - // The AWS region where the activity occurred that prompted GuardDuty to generate - // a finding. + // The Region in which the finding was generated. // // Region is a required field Region *string `locationName:"region" type:"string" required:"true"` - // The AWS resource associated with the activity that prompted GuardDuty to - // generate a finding. + // Contains information about the resource. // // Resource is a required field Resource *Resource `locationName:"resource" type:"structure" required:"true"` - // Findings' schema version. + // The version of the schema used for the finding. // // SchemaVersion is a required field SchemaVersion *string `locationName:"schemaVersion" type:"string" required:"true"` - // Additional information assigned to the generated finding by GuardDuty. + // Contains information about the service. Service *Service `locationName:"service" type:"structure"` - // The severity of a finding. + // The severity of the finding. // // Severity is a required field Severity *float64 `locationName:"severity" type:"double" required:"true"` - // The title of a finding. + // The title for the finding. Title *string `locationName:"title" type:"string"` - // The type of a finding described by the action. + // The type of the finding. // // Type is a required field - Type *string `locationName:"type" type:"string" required:"true"` + Type *string `locationName:"type" min:"1" type:"string" required:"true"` - // The time stamp at which a finding was last updated. + // The time and date at which the finding was laste updated. // // UpdatedAt is a required field UpdatedAt *string `locationName:"updatedAt" type:"string" required:"true"` @@ -5833,7 +6333,7 @@ func (s *Finding) SetUpdatedAt(v string) *Finding { return s } -// Represents the criteria used for querying findings. +// Contains finding criteria information. type FindingCriteria struct { _ struct{} `type:"structure"` @@ -5858,7 +6358,7 @@ func (s *FindingCriteria) SetCriterion(v map[string]*Condition) *FindingCriteria return s } -// Finding statistics object. +// Contains information about finding statistics. type FindingStatistics struct { _ struct{} `type:"structure"` @@ -5882,7 +6382,7 @@ func (s *FindingStatistics) SetCountBySeverity(v map[string]*int64) *FindingStat return s } -// Location information of the remote IP address. +// Contains information about the type GeoLocation struct { _ struct{} `type:"structure"` @@ -5918,8 +6418,10 @@ func (s *GeoLocation) SetLon(v float64) *GeoLocation { type GetDetectorInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector that you want to get. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -5954,23 +6456,29 @@ func (s *GetDetectorInput) SetDetectorId(v string) *GetDetectorInput { return s } -// GetDetector response object. type GetDetectorOutput struct { _ struct{} `type:"structure"` - // The first time a resource was created. The format will be ISO-8601. + // Detector creation timestamp. CreatedAt *string `locationName:"createdAt" type:"string"` - // A enum value that specifies how frequently customer got Finding updates published. + // Finding publishing frequency. FindingPublishingFrequency *string `locationName:"findingPublishingFrequency" type:"string" enum:"FindingPublishingFrequency"` - // Customer serviceRole name or ARN for accessing customer resources - ServiceRole *string `locationName:"serviceRole" type:"string"` + // The GuardDuty service role. + // + // ServiceRole is a required field + ServiceRole *string `locationName:"serviceRole" type:"string" required:"true"` + + // The detector status. + // + // Status is a required field + Status *string `locationName:"status" min:"1" type:"string" required:"true" enum:"DetectorStatus"` - // The status of detector. - Status *string `locationName:"status" type:"string" enum:"DetectorStatus"` + // The tags of the detector resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` - // The first time a resource was created. The format will be ISO-8601. + // Detector last update timestamp. UpdatedAt *string `locationName:"updatedAt" type:"string"` } @@ -6008,6 +6516,12 @@ func (s *GetDetectorOutput) SetStatus(v string) *GetDetectorOutput { return s } +// SetTags sets the Tags field's value. +func (s *GetDetectorOutput) SetTags(v map[string]*string) *GetDetectorOutput { + s.Tags = v + return s +} + // SetUpdatedAt sets the UpdatedAt field's value. func (s *GetDetectorOutput) SetUpdatedAt(v string) *GetDetectorOutput { s.UpdatedAt = &v @@ -6017,9 +6531,13 @@ func (s *GetDetectorOutput) SetUpdatedAt(v string) *GetDetectorOutput { type GetFilterInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the filter is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + // The name of the filter you want to get. + // // FilterName is a required field FilterName *string `location:"uri" locationName:"filterName" type:"string" required:"true"` } @@ -6068,26 +6586,34 @@ func (s *GetFilterInput) SetFilterName(v string) *GetFilterInput { return s } -// GetFilter response object. type GetFilterOutput struct { _ struct{} `type:"structure"` // Specifies the action that is to be applied to the findings that match the // filter. - Action *string `locationName:"action" type:"string" enum:"FilterAction"` + // + // Action is a required field + Action *string `locationName:"action" min:"1" type:"string" required:"true" enum:"FilterAction"` // The description of the filter. Description *string `locationName:"description" type:"string"` // Represents the criteria to be used in the filter for querying findings. - FindingCriteria *FindingCriteria `locationName:"findingCriteria" type:"structure"` + // + // FindingCriteria is a required field + FindingCriteria *FindingCriteria `locationName:"findingCriteria" type:"structure" required:"true"` // The name of the filter. - Name *string `locationName:"name" type:"string"` + // + // Name is a required field + Name *string `locationName:"name" min:"3" type:"string" required:"true"` // Specifies the position of the filter in the list of current filters. Also // specifies the order in which this filter is applied to the findings. - Rank *int64 `locationName:"rank" type:"integer"` + Rank *int64 `locationName:"rank" min:"1" type:"integer"` + + // The tags of the filter resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } // String returns the string representation @@ -6130,12 +6656,20 @@ func (s *GetFilterOutput) SetRank(v int64) *GetFilterOutput { return s } -// Get Findings Request +// SetTags sets the Tags field's value. +func (s *GetFilterOutput) SetTags(v map[string]*string) *GetFilterOutput { + s.Tags = v + return s +} + type GetFindingsInput struct { _ struct{} `type:"structure"` + // The ID of the detector that specifies the GuardDuty service whose findings + // you want to retrieve. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // IDs of the findings that you want to retrieve. // @@ -6193,12 +6727,13 @@ func (s *GetFindingsInput) SetSortCriteria(v *SortCriteria) *GetFindingsInput { return s } -// GetFindings response object. type GetFindingsOutput struct { _ struct{} `type:"structure"` // A list of findings. - Findings []*Finding `locationName:"findings" type:"list"` + // + // Findings is a required field + Findings []*Finding `locationName:"findings" type:"list" required:"true"` } // String returns the string representation @@ -6217,12 +6752,14 @@ func (s *GetFindingsOutput) SetFindings(v []*Finding) *GetFindingsOutput { return s } -// Get Findings Statistics Request type GetFindingsStatisticsInput struct { _ struct{} `type:"structure"` + // The ID of the detector that specifies the GuardDuty service whose findings' + // statistics you want to retrieve. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // Represents the criteria used for querying findings. FindingCriteria *FindingCriteria `locationName:"findingCriteria" type:"structure"` @@ -6280,12 +6817,13 @@ func (s *GetFindingsStatisticsInput) SetFindingStatisticTypes(v []*string) *GetF return s } -// GetFindingsStatistics response object. type GetFindingsStatisticsOutput struct { _ struct{} `type:"structure"` // Finding statistics object. - FindingStatistics *FindingStatistics `locationName:"findingStatistics" type:"structure"` + // + // FindingStatistics is a required field + FindingStatistics *FindingStatistics `locationName:"findingStatistics" type:"structure" required:"true"` } // String returns the string representation @@ -6307,9 +6845,13 @@ func (s *GetFindingsStatisticsOutput) SetFindingStatistics(v *FindingStatistics) type GetIPSetInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the ipSet is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + // The unique ID of the ipSet you want to get. + // // IpSetId is a required field IpSetId *string `location:"uri" locationName:"ipSetId" type:"string" required:"true"` } @@ -6358,23 +6900,33 @@ func (s *GetIPSetInput) SetIpSetId(v string) *GetIPSetInput { return s } -// GetIPSet response object. type GetIPSetOutput struct { _ struct{} `type:"structure"` // The format of the file that contains the IPSet. - Format *string `locationName:"format" type:"string" enum:"IpSetFormat"` + // + // Format is a required field + Format *string `locationName:"format" min:"1" type:"string" required:"true" enum:"IpSetFormat"` // The URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key) - Location *string `locationName:"location" type:"string"` + // + // Location is a required field + Location *string `locationName:"location" min:"1" type:"string" required:"true"` // The user friendly name to identify the IPSet. This name is displayed in all // findings that are triggered by activity that involves IP addresses included // in this IPSet. - Name *string `locationName:"name" type:"string"` + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` // The status of ipSet file uploaded. - Status *string `locationName:"status" type:"string" enum:"IpSetStatus"` + // + // Status is a required field + Status *string `locationName:"status" min:"1" type:"string" required:"true" enum:"IpSetStatus"` + + // The tags of the IP set resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } // String returns the string representation @@ -6411,6 +6963,12 @@ func (s *GetIPSetOutput) SetStatus(v string) *GetIPSetOutput { return s } +// SetTags sets the Tags field's value. +func (s *GetIPSetOutput) SetTags(v map[string]*string) *GetIPSetOutput { + s.Tags = v + return s +} + type GetInvitationsCountInput struct { _ struct{} `type:"structure"` } @@ -6425,7 +6983,6 @@ func (s GetInvitationsCountInput) GoString() string { return s.String() } -// GetInvitationsCount response object. type GetInvitationsCountOutput struct { _ struct{} `type:"structure"` @@ -6452,8 +7009,10 @@ func (s *GetInvitationsCountOutput) SetInvitationsCount(v int64) *GetInvitations type GetMasterAccountInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector of the GuardDuty member account. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -6488,12 +7047,13 @@ func (s *GetMasterAccountInput) SetDetectorId(v string) *GetMasterAccountInput { return s } -// GetMasterAccount response object. type GetMasterAccountOutput struct { _ struct{} `type:"structure"` - // Contains details about the master account. - Master *Master `locationName:"master" type:"structure"` + // Master account details. + // + // Master is a required field + Master *Master `locationName:"master" type:"structure" required:"true"` } // String returns the string representation @@ -6512,17 +7072,19 @@ func (s *GetMasterAccountOutput) SetMaster(v *Master) *GetMasterAccountOutput { return s } -// GetMembers request body. type GetMembersInput struct { _ struct{} `type:"structure"` // A list of account IDs of the GuardDuty member accounts that you want to describe. // // AccountIds is a required field - AccountIds []*string `locationName:"accountIds" type:"list" required:"true"` + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` + // The unique ID of the detector of the GuardDuty account whose members you + // want to retrieve. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -6541,6 +7103,9 @@ func (s *GetMembersInput) Validate() error { if s.AccountIds == nil { invalidParams.Add(request.NewErrParamRequired("AccountIds")) } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } @@ -6566,16 +7131,19 @@ func (s *GetMembersInput) SetDetectorId(v string) *GetMembersInput { return s } -// GetMembers response object. type GetMembersOutput struct { _ struct{} `type:"structure"` - // A list of member descriptions. - Members []*Member `locationName:"members" type:"list"` + // A list of members. + // + // Members is a required field + Members []*Member `locationName:"members" type:"list" required:"true"` // A list of objects containing the unprocessed account and a result string // explaining why it was unprocessed. - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list"` + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` } // String returns the string representation @@ -6603,9 +7171,13 @@ func (s *GetMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAccount) *GetM type GetThreatIntelSetInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the threatIntelSet is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + // The unique ID of the threatIntelSet you want to get. + // // ThreatIntelSetId is a required field ThreatIntelSetId *string `location:"uri" locationName:"threatIntelSetId" type:"string" required:"true"` } @@ -6654,22 +7226,32 @@ func (s *GetThreatIntelSetInput) SetThreatIntelSetId(v string) *GetThreatIntelSe return s } -// GetThreatIntelSet response object type GetThreatIntelSetOutput struct { _ struct{} `type:"structure"` // The format of the threatIntelSet. - Format *string `locationName:"format" type:"string" enum:"ThreatIntelSetFormat"` + // + // Format is a required field + Format *string `locationName:"format" min:"1" type:"string" required:"true" enum:"ThreatIntelSetFormat"` // The URI of the file that contains the ThreatIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key). - Location *string `locationName:"location" type:"string"` + // + // Location is a required field + Location *string `locationName:"location" min:"1" type:"string" required:"true"` // A user-friendly ThreatIntelSet name that is displayed in all finding generated // by activity that involves IP addresses included in this ThreatIntelSet. - Name *string `locationName:"name" type:"string"` + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` // The status of threatIntelSet file uploaded. - Status *string `locationName:"status" type:"string" enum:"ThreatIntelSetStatus"` + // + // Status is a required field + Status *string `locationName:"status" min:"1" type:"string" required:"true" enum:"ThreatIntelSetStatus"` + + // The tags of the Threat List resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } // String returns the string representation @@ -6706,7 +7288,13 @@ func (s *GetThreatIntelSetOutput) SetStatus(v string) *GetThreatIntelSetOutput { return s } -// The profile information of the EC2 instance. +// SetTags sets the Tags field's value. +func (s *GetThreatIntelSetOutput) SetTags(v map[string]*string) *GetThreatIntelSetOutput { + s.Tags = v + return s +} + +// Contains information about the instance profile. type IamInstanceProfile struct { _ struct{} `type:"structure"` @@ -6739,8 +7327,7 @@ func (s *IamInstanceProfile) SetId(v string) *IamInstanceProfile { return s } -// The information about the EC2 instance associated with the activity that -// prompted GuardDuty to generate a finding. +// Contains information about the details of an instance. type InstanceDetails struct { _ struct{} `type:"structure"` @@ -6863,12 +7450,12 @@ func (s *InstanceDetails) SetTags(v []*Tag) *InstanceDetails { return s } -// Invitation from an AWS account to become the current account's master. +// Contains information about the invitation. type Invitation struct { _ struct{} `type:"structure"` // Inviter account ID - AccountId *string `locationName:"accountId" type:"string"` + AccountId *string `locationName:"accountId" min:"12" type:"string"` // This value is used to validate the inviter account to the member account. InvitationId *string `locationName:"invitationId" type:"string"` @@ -6914,7 +7501,6 @@ func (s *Invitation) SetRelationshipStatus(v string) *Invitation { return s } -// InviteMembers request body. type InviteMembersInput struct { _ struct{} `type:"structure"` @@ -6922,10 +7508,13 @@ type InviteMembersInput struct { // as members. // // AccountIds is a required field - AccountIds []*string `locationName:"accountIds" type:"list" required:"true"` + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` + // The unique ID of the detector of the GuardDuty account with which you want + // to invite members. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // A boolean value that specifies whether you want to disable email notification // to the accounts that you’re inviting to GuardDuty as members. @@ -6952,6 +7541,9 @@ func (s *InviteMembersInput) Validate() error { if s.AccountIds == nil { invalidParams.Add(request.NewErrParamRequired("AccountIds")) } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } @@ -6989,13 +7581,14 @@ func (s *InviteMembersInput) SetMessage(v string) *InviteMembersInput { return s } -// InviteMembers response object. type InviteMembersOutput struct { _ struct{} `type:"structure"` // A list of objects containing the unprocessed account and a result string // explaining why it was unprocessed. - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list"` + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` } // String returns the string representation @@ -7017,10 +7610,14 @@ func (s *InviteMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAccount) *I type ListDetectorsInput struct { _ struct{} `type:"structure"` - // You can use this parameter to indicate the maximum number of items that you - // want in the response. + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the list action. For subsequent calls + // to the action fill nextToken in the request with the value of NextToken from + // the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7059,17 +7656,16 @@ func (s *ListDetectorsInput) SetNextToken(v string) *ListDetectorsInput { return s } -// ListDetectors response object. type ListDetectorsOutput struct { _ struct{} `type:"structure"` // A list of detector Ids. - DetectorIds []*string `locationName:"detectorIds" type:"list"` + // + // DetectorIds is a required field + DetectorIds []*string `locationName:"detectorIds" type:"list" required:"true"` - // You can use this parameter when paginating results. Set the value of this - // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // Pagination parameter to be used on the next list operation to retrieve more + // items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7098,13 +7694,19 @@ func (s *ListDetectorsOutput) SetNextToken(v string) *ListDetectorsOutput { type ListFiltersInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the filter is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // You can use this parameter to indicate the maximum number of items that you - // want in the response. + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the list action. For subsequent calls + // to the action fill nextToken in the request with the value of NextToken from + // the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7155,17 +7757,16 @@ func (s *ListFiltersInput) SetNextToken(v string) *ListFiltersInput { return s } -// ListFilters response object. type ListFiltersOutput struct { _ struct{} `type:"structure"` // A list of filter names - FilterNames []*string `locationName:"filterNames" type:"list"` + // + // FilterNames is a required field + FilterNames []*string `locationName:"filterNames" type:"list" required:"true"` - // You can use this parameter when paginating results. Set the value of this - // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // Pagination parameter to be used on the next list operation to retrieve more + // items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7191,12 +7792,14 @@ func (s *ListFiltersOutput) SetNextToken(v string) *ListFiltersOutput { return s } -// List Findings Request type ListFindingsInput struct { _ struct{} `type:"structure"` + // The ID of the detector that specifies the GuardDuty service whose findings + // you want to list. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // Represents the criteria used for querying findings. FindingCriteria *FindingCriteria `locationName:"findingCriteria" type:"structure"` @@ -7206,9 +7809,9 @@ type ListFindingsInput struct { MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` // You can use this parameter when paginating results. Set the value of this - // parameter to null on your first call to the ListFindings action. For subsequent - // calls to the action fill nextToken in the request with the value of nextToken - // from the previous response to continue listing data. + // parameter to null on your first call to the list action. For subsequent calls + // to the action fill nextToken in the request with the value of NextToken from + // the previous response to continue listing data. NextToken *string `locationName:"nextToken" type:"string"` // Represents the criteria used for sorting findings. @@ -7274,17 +7877,16 @@ func (s *ListFindingsInput) SetSortCriteria(v *SortCriteria) *ListFindingsInput return s } -// ListFindings response object. type ListFindingsOutput struct { _ struct{} `type:"structure"` - // The list of the Findings. - FindingIds []*string `locationName:"findingIds" type:"list"` + // The IDs of the findings you are listing. + // + // FindingIds is a required field + FindingIds []*string `locationName:"findingIds" type:"list" required:"true"` - // You can use this parameter when paginating results. Set the value of this - // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // Pagination parameter to be used on the next list operation to retrieve more + // items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7313,13 +7915,19 @@ func (s *ListFindingsOutput) SetNextToken(v string) *ListFindingsOutput { type ListIPSetsInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the ipSet is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // You can use this parameter to indicate the maximum number of items that you - // want in the response. + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the list action. For subsequent calls + // to the action fill nextToken in the request with the value of NextToken from + // the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7370,17 +7978,16 @@ func (s *ListIPSetsInput) SetNextToken(v string) *ListIPSetsInput { return s } -// ListIPSets response object. type ListIPSetsOutput struct { _ struct{} `type:"structure"` - // A list of the IP set IDs - IpSetIds []*string `locationName:"ipSetIds" type:"list"` + // The IDs of the IPSet resources. + // + // IpSetIds is a required field + IpSetIds []*string `locationName:"ipSetIds" type:"list" required:"true"` - // You can use this parameter when paginating results. Set the value of this - // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // Pagination parameter to be used on the next list operation to retrieve more + // items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7409,10 +8016,14 @@ func (s *ListIPSetsOutput) SetNextToken(v string) *ListIPSetsOutput { type ListInvitationsInput struct { _ struct{} `type:"structure"` - // You can use this parameter to indicate the maximum number of items that you - // want in the response. + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the list action. For subsequent calls + // to the action fill nextToken in the request with the value of NextToken from + // the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7451,17 +8062,14 @@ func (s *ListInvitationsInput) SetNextToken(v string) *ListInvitationsInput { return s } -// ListInvitations response object. type ListInvitationsOutput struct { _ struct{} `type:"structure"` // A list of invitation descriptions. Invitations []*Invitation `locationName:"invitations" type:"list"` - // You can use this parameter when paginating results. Set the value of this - // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // Pagination parameter to be used on the next list operation to retrieve more + // items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7490,15 +8098,23 @@ func (s *ListInvitationsOutput) SetNextToken(v string) *ListInvitationsOutput { type ListMembersInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the member is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // You can use this parameter to indicate the maximum number of items that you - // want in the response. + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the list action. For subsequent calls + // to the action fill nextToken in the request with the value of NextToken from + // the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + // Specifies whether to only return associated members or to return all members + // (including members which haven't been invited yet or have been disassociated). OnlyAssociated *string `location:"querystring" locationName:"onlyAssociated" type:"string"` } @@ -7555,17 +8171,14 @@ func (s *ListMembersInput) SetOnlyAssociated(v string) *ListMembersInput { return s } -// ListMembers response object. type ListMembersOutput struct { _ struct{} `type:"structure"` - // A list of member descriptions. + // A list of members. Members []*Member `locationName:"members" type:"list"` - // You can use this parameter when paginating results. Set the value of this - // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // Pagination parameter to be used on the next list operation to retrieve more + // items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7591,16 +8204,86 @@ func (s *ListMembersOutput) SetNextToken(v string) *ListMembersOutput { return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the given GuardDuty resource + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags associated with the resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + type ListThreatIntelSetsInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector the threatIntelSet is associated with. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // You can use this parameter to indicate the maximum number of items that you - // want in the response. + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the list action. For subsequent calls + // to the action fill nextToken in the request with the value of NextToken from + // the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7651,18 +8334,17 @@ func (s *ListThreatIntelSetsInput) SetNextToken(v string) *ListThreatIntelSetsIn return s } -// ListThreatIntelSets response object. type ListThreatIntelSetsOutput struct { _ struct{} `type:"structure"` - // You can use this parameter when paginating results. Set the value of this - // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // Pagination parameter to be used on the next list operation to retrieve more + // items. NextToken *string `locationName:"nextToken" type:"string"` - // The list of the threat intel set IDs - ThreatIntelSetIds []*string `locationName:"threatIntelSetIds" type:"list"` + // The IDs of the ThreatIntelSet resources. + // + // ThreatIntelSetIds is a required field + ThreatIntelSetIds []*string `locationName:"threatIntelSetIds" type:"list" required:"true"` } // String returns the string representation @@ -7687,7 +8369,7 @@ func (s *ListThreatIntelSetsOutput) SetThreatIntelSetIds(v []*string) *ListThrea return s } -// Local port information of the connection. +// Contains information about the port for the local connection. type LocalPortDetails struct { _ struct{} `type:"structure"` @@ -7720,17 +8402,17 @@ func (s *LocalPortDetails) SetPortName(v string) *LocalPortDetails { return s } -// Contains details about the master account. +// Contains information about the Master account and invitation. type Master struct { _ struct{} `type:"structure"` - // Master account ID - AccountId *string `locationName:"accountId" type:"string"` + // The ID of the account used as the Master account. + AccountId *string `locationName:"accountId" min:"12" type:"string"` // This value is used to validate the master account to the member account. InvitationId *string `locationName:"invitationId" type:"string"` - // Timestamp at which the invitation was sent + // Timestamp at which the invitation was sent. InvitedAt *string `locationName:"invitedAt" type:"string"` // The status of the relationship between the master and member accounts. @@ -7771,27 +8453,27 @@ func (s *Master) SetRelationshipStatus(v string) *Master { return s } -// Contains details about the member account. +// Continas information about the member account type Member struct { _ struct{} `type:"structure"` - // AWS account ID. + // Member account ID. // // AccountId is a required field - AccountId *string `locationName:"accountId" type:"string" required:"true"` + AccountId *string `locationName:"accountId" min:"12" type:"string" required:"true"` - // The unique identifier for a detector. - DetectorId *string `locationName:"detectorId" type:"string"` + // Member account's detector ID. + DetectorId *string `locationName:"detectorId" min:"1" type:"string"` // Member account's email address. // // Email is a required field - Email *string `locationName:"email" type:"string" required:"true"` + Email *string `locationName:"email" min:"1" type:"string" required:"true"` // Timestamp at which the invitation was sent InvitedAt *string `locationName:"invitedAt" type:"string"` - // The master account ID. + // Master account ID. // // MasterId is a required field MasterId *string `locationName:"masterId" type:"string" required:"true"` @@ -7801,7 +8483,7 @@ type Member struct { // RelationshipStatus is a required field RelationshipStatus *string `locationName:"relationshipStatus" type:"string" required:"true"` - // The first time a resource was created. The format will be ISO-8601. + // Member last updated timestamp. // // UpdatedAt is a required field UpdatedAt *string `locationName:"updatedAt" type:"string" required:"true"` @@ -7859,7 +8541,7 @@ func (s *Member) SetUpdatedAt(v string) *Member { return s } -// Information about the NETWORK_CONNECTION action described in this finding. +// Contains information about the network connection. type NetworkConnectionAction struct { _ struct{} `type:"structure"` @@ -7928,7 +8610,7 @@ func (s *NetworkConnectionAction) SetRemotePortDetails(v *RemotePortDetails) *Ne return s } -// The network interface information of the EC2 instance. +// Contains information about the network interface. type NetworkInterface struct { _ struct{} `type:"structure"` @@ -8033,7 +8715,7 @@ func (s *NetworkInterface) SetVpcId(v string) *NetworkInterface { return s } -// ISP Organization information of the remote IP address. +// Continas information about the organization. type Organization struct { _ struct{} `type:"structure"` @@ -8084,7 +8766,7 @@ func (s *Organization) SetOrg(v string) *Organization { return s } -// Information about the PORT_PROBE action described in this finding. +// Contains information about the port probe. type PortProbeAction struct { _ struct{} `type:"structure"` @@ -8117,7 +8799,7 @@ func (s *PortProbeAction) SetPortProbeDetails(v []*PortProbeDetail) *PortProbeAc return s } -// Details about the port probe finding. +// Contains information about the port probe details. type PortProbeDetail struct { _ struct{} `type:"structure"` @@ -8150,7 +8832,7 @@ func (s *PortProbeDetail) SetRemoteIpDetails(v *RemoteIpDetails) *PortProbeDetai return s } -// Other private IP address information of the EC2 instance. +// Contains information about the private IP address. type PrivateIpAddressDetails struct { _ struct{} `type:"structure"` @@ -8183,7 +8865,7 @@ func (s *PrivateIpAddressDetails) SetPrivateIpAddress(v string) *PrivateIpAddres return s } -// The product code of the EC2 instance. +// Contains information about the product code. type ProductCode struct { _ struct{} `type:"structure"` @@ -8216,7 +8898,7 @@ func (s *ProductCode) SetProductType(v string) *ProductCode { return s } -// Remote IP information of the connection. +// Continas information about the remote IP address. type RemoteIpDetails struct { _ struct{} `type:"structure"` @@ -8276,7 +8958,7 @@ func (s *RemoteIpDetails) SetOrganization(v *Organization) *RemoteIpDetails { return s } -// Remote port information of the connection. +// Contains information about the remote port. type RemotePortDetails struct { _ struct{} `type:"structure"` @@ -8309,8 +8991,7 @@ func (s *RemotePortDetails) SetPortName(v string) *RemotePortDetails { return s } -// The AWS resource associated with the activity that prompted GuardDuty to -// generate a finding. +// Contains information about the resource. type Resource struct { _ struct{} `type:"structure"` @@ -8354,7 +9035,7 @@ func (s *Resource) SetResourceType(v string) *Resource { return s } -// Security groups associated with the EC2 instance. +// Contains information about the security group. type SecurityGroup struct { _ struct{} `type:"structure"` @@ -8387,7 +9068,7 @@ func (s *SecurityGroup) SetGroupName(v string) *SecurityGroup { return s } -// Additional information assigned to the generated finding by GuardDuty. +// Contains information about the service. type Service struct { _ struct{} `type:"structure"` @@ -8401,7 +9082,7 @@ type Service struct { Count *int64 `locationName:"count" type:"integer"` // Detector ID for the GuardDuty service. - DetectorId *string `locationName:"detectorId" type:"string"` + DetectorId *string `locationName:"detectorId" min:"1" type:"string"` // First seen timestamp of the activity that prompted GuardDuty to generate // this finding. @@ -8411,6 +9092,9 @@ type Service struct { // finding. EventLastSeen *string `locationName:"eventLastSeen" type:"string"` + // An evidence object associated with the service. + Evidence *Evidence `locationName:"evidence" type:"structure"` + // Resource role information for this finding. ResourceRole *string `locationName:"resourceRole" type:"string"` @@ -8467,6 +9151,12 @@ func (s *Service) SetEventLastSeen(v string) *Service { return s } +// SetEvidence sets the Evidence field's value. +func (s *Service) SetEvidence(v *Evidence) *Service { + s.Evidence = v + return s +} + // SetResourceRole sets the ResourceRole field's value. func (s *Service) SetResourceRole(v string) *Service { s.ResourceRole = &v @@ -8485,7 +9175,7 @@ func (s *Service) SetUserFeedback(v string) *Service { return s } -// Represents the criteria used for sorting findings. +// Contains information about the criteria for sorting. type SortCriteria struct { _ struct{} `type:"structure"` @@ -8519,7 +9209,6 @@ func (s *SortCriteria) SetOrderBy(v string) *SortCriteria { return s } -// StartMonitoringMembers request body. type StartMonitoringMembersInput struct { _ struct{} `type:"structure"` @@ -8527,10 +9216,13 @@ type StartMonitoringMembersInput struct { // want the master account to monitor. // // AccountIds is a required field - AccountIds []*string `locationName:"accountIds" type:"list" required:"true"` + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` + // The unique ID of the detector of the GuardDuty account whom you want to re-enable + // to monitor members' findings. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -8549,6 +9241,9 @@ func (s *StartMonitoringMembersInput) Validate() error { if s.AccountIds == nil { invalidParams.Add(request.NewErrParamRequired("AccountIds")) } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } @@ -8574,13 +9269,14 @@ func (s *StartMonitoringMembersInput) SetDetectorId(v string) *StartMonitoringMe return s } -// StartMonitoringMembers response object. type StartMonitoringMembersOutput struct { _ struct{} `type:"structure"` // A list of objects containing the unprocessed account and a result string // explaining why it was unprocessed. - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list"` + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` } // String returns the string representation @@ -8599,7 +9295,6 @@ func (s *StartMonitoringMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAc return s } -// StopMonitoringMembers request body. type StopMonitoringMembersInput struct { _ struct{} `type:"structure"` @@ -8607,10 +9302,13 @@ type StopMonitoringMembersInput struct { // want the master account to stop monitoring. // // AccountIds is a required field - AccountIds []*string `locationName:"accountIds" type:"list" required:"true"` + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` + // The unique ID of the detector of the GuardDuty account that you want to stop + // from monitor members' findings. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -8629,6 +9327,9 @@ func (s *StopMonitoringMembersInput) Validate() error { if s.AccountIds == nil { invalidParams.Add(request.NewErrParamRequired("AccountIds")) } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } @@ -8654,13 +9355,14 @@ func (s *StopMonitoringMembersInput) SetDetectorId(v string) *StopMonitoringMemb return s } -// StopMonitoringMembers response object. type StopMonitoringMembersOutput struct { _ struct{} `type:"structure"` // A list of objects containing the unprocessed account and a result string // explaining why it was unprocessed. - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list"` + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` } // String returns the string representation @@ -8679,7 +9381,7 @@ func (s *StopMonitoringMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAcc return s } -// A tag of the EC2 instance. +// Contains information about the tag associated with the resource. type Tag struct { _ struct{} `type:"structure"` @@ -8712,12 +9414,121 @@ func (s *Tag) SetValue(v string) *Tag { return s } -// Unrchive Findings Request +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the given GuardDuty resource + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The tags to be added to a resource. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// An instance of a threat intelligence detail that constitutes evidence for +// the finding. +type ThreatIntelligenceDetail struct { + _ struct{} `type:"structure"` + + // The name of the threat intelligence list that triggered the finding. + ThreatListName *string `locationName:"threatListName" type:"string"` + + // A list of names of the threats in the threat intelligence list that triggered + // the finding. + ThreatNames []*string `locationName:"threatNames" type:"list"` +} + +// String returns the string representation +func (s ThreatIntelligenceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThreatIntelligenceDetail) GoString() string { + return s.String() +} + +// SetThreatListName sets the ThreatListName field's value. +func (s *ThreatIntelligenceDetail) SetThreatListName(v string) *ThreatIntelligenceDetail { + s.ThreatListName = &v + return s +} + +// SetThreatNames sets the ThreatNames field's value. +func (s *ThreatIntelligenceDetail) SetThreatNames(v []*string) *ThreatIntelligenceDetail { + s.ThreatNames = v + return s +} + type UnarchiveFindingsInput struct { _ struct{} `type:"structure"` + // The ID of the detector that specifies the GuardDuty service whose findings + // you want to unarchive. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // IDs of the findings that you want to unarchive. // @@ -8780,15 +9591,14 @@ func (s UnarchiveFindingsOutput) GoString() string { return s.String() } -// An object containing the unprocessed account and a result string explaining -// why it was unprocessed. +// Contains information about the accounts that were not processed. type UnprocessedAccount struct { _ struct{} `type:"structure"` // AWS Account ID. // // AccountId is a required field - AccountId *string `locationName:"accountId" type:"string" required:"true"` + AccountId *string `locationName:"accountId" min:"12" type:"string" required:"true"` // A reason why the account hasn't been processed. // @@ -8818,12 +9628,85 @@ func (s *UnprocessedAccount) SetResult(v string) *UnprocessedAccount { return s } -// Update Detector Request +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the given GuardDuty resource + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The tag keys to remove from a resource. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateDetectorInput struct { _ struct{} `type:"structure"` + // The unique ID of the detector that you want to update. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // Updated boolean value for the detector that specifies whether the detector // is enabled. @@ -8891,20 +9774,24 @@ func (s UpdateDetectorOutput) GoString() string { return s.String() } -// UpdateFilter request object. type UpdateFilterInput struct { _ struct{} `type:"structure"` // Specifies the action that is to be applied to the findings that match the // filter. - Action *string `locationName:"action" type:"string" enum:"FilterAction"` + Action *string `locationName:"action" min:"1" type:"string" enum:"FilterAction"` // The description of the filter. Description *string `locationName:"description" type:"string"` + // The unique ID of the detector that specifies the GuardDuty service where + // you want to update a filter. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + // The name of the filter. + // // FilterName is a required field FilterName *string `location:"uri" locationName:"filterName" type:"string" required:"true"` @@ -8913,7 +9800,7 @@ type UpdateFilterInput struct { // Specifies the position of the filter in the list of current filters. Also // specifies the order in which this filter is applied to the findings. - Rank *int64 `locationName:"rank" type:"integer"` + Rank *int64 `locationName:"rank" min:"1" type:"integer"` } // String returns the string representation @@ -8929,6 +9816,9 @@ func (s UpdateFilterInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateFilterInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateFilterInput"} + if s.Action != nil && len(*s.Action) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Action", 1)) + } if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } @@ -8941,6 +9831,9 @@ func (s *UpdateFilterInput) Validate() error { if s.FilterName != nil && len(*s.FilterName) < 1 { invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) } + if s.Rank != nil && *s.Rank < 1 { + invalidParams.Add(request.NewErrParamMinValue("Rank", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8984,12 +9877,13 @@ func (s *UpdateFilterInput) SetRank(v int64) *UpdateFilterInput { return s } -// UpdateFilter response object. type UpdateFilterOutput struct { _ struct{} `type:"structure"` // The name of the filter. - Name *string `locationName:"name" type:"string"` + // + // Name is a required field + Name *string `locationName:"name" min:"3" type:"string" required:"true"` } // String returns the string representation @@ -9008,15 +9902,17 @@ func (s *UpdateFilterOutput) SetName(v string) *UpdateFilterOutput { return s } -// Update findings feedback body type UpdateFindingsFeedbackInput struct { _ struct{} `type:"structure"` // Additional feedback about the GuardDuty findings. Comments *string `locationName:"comments" type:"string"` + // The ID of the detector that specifies the GuardDuty service whose findings + // you want to mark as useful or not useful. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // Valid values: USEFUL | NOT_USEFUL // @@ -9099,24 +9995,28 @@ func (s UpdateFindingsFeedbackOutput) GoString() string { return s.String() } -// Update IP Set Request type UpdateIPSetInput struct { _ struct{} `type:"structure"` // The updated boolean value that specifies whether the IPSet is active or not. Activate *bool `locationName:"activate" type:"boolean"` + // The detectorID that specifies the GuardDuty service whose IPSet you want + // to update. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + // The unique ID that specifies the IPSet that you want to update. + // // IpSetId is a required field IpSetId *string `location:"uri" locationName:"ipSetId" type:"string" required:"true"` // The updated URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key). - Location *string `locationName:"location" type:"string"` + Location *string `locationName:"location" min:"1" type:"string"` // The unique ID that specifies the IPSet that you want to update. - Name *string `locationName:"name" type:"string"` + Name *string `locationName:"name" min:"1" type:"string"` } // String returns the string representation @@ -9144,6 +10044,12 @@ func (s *UpdateIPSetInput) Validate() error { if s.IpSetId != nil && len(*s.IpSetId) < 1 { invalidParams.Add(request.NewErrParamMinLen("IpSetId", 1)) } + if s.Location != nil && len(*s.Location) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Location", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9195,7 +10101,6 @@ func (s UpdateIPSetOutput) GoString() string { return s.String() } -// Update Threat Intel Set Request type UpdateThreatIntelSetInput struct { _ struct{} `type:"structure"` @@ -9203,16 +10108,21 @@ type UpdateThreatIntelSetInput struct { // or not. Activate *bool `locationName:"activate" type:"boolean"` + // The detectorID that specifies the GuardDuty service whose ThreatIntelSet + // you want to update. + // // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" type:"string" required:"true"` + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // The updated URI of the file that contains the ThreateIntelSet. For example // (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key) - Location *string `locationName:"location" type:"string"` + Location *string `locationName:"location" min:"1" type:"string"` // The unique ID that specifies the ThreatIntelSet that you want to update. - Name *string `locationName:"name" type:"string"` + Name *string `locationName:"name" min:"1" type:"string"` + // The unique ID that specifies the ThreatIntelSet that you want to update. + // // ThreatIntelSetId is a required field ThreatIntelSetId *string `location:"uri" locationName:"threatIntelSetId" type:"string" required:"true"` } @@ -9236,6 +10146,12 @@ func (s *UpdateThreatIntelSetInput) Validate() error { if s.DetectorId != nil && len(*s.DetectorId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DetectorId", 1)) } + if s.Location != nil && len(*s.Location) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Location", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } if s.ThreatIntelSetId == nil { invalidParams.Add(request.NewErrParamRequired("ThreatIntelSetId")) } @@ -9293,7 +10209,6 @@ func (s UpdateThreatIntelSetOutput) GoString() string { return s.String() } -// The status of detector. const ( // DetectorStatusEnabled is a DetectorStatus enum value DetectorStatusEnabled = "ENABLED" @@ -9302,7 +10217,6 @@ const ( DetectorStatusDisabled = "DISABLED" ) -// Finding Feedback Value const ( // FeedbackUseful is a Feedback enum value FeedbackUseful = "USEFUL" @@ -9311,7 +10225,6 @@ const ( FeedbackNotUseful = "NOT_USEFUL" ) -// The action associated with a filter. const ( // FilterActionNoop is a FilterAction enum value FilterActionNoop = "NOOP" @@ -9320,7 +10233,6 @@ const ( FilterActionArchive = "ARCHIVE" ) -// A enum value that specifies how frequently customer got Finding updates published. const ( // FindingPublishingFrequencyFifteenMinutes is a FindingPublishingFrequency enum value FindingPublishingFrequencyFifteenMinutes = "FIFTEEN_MINUTES" @@ -9332,13 +10244,11 @@ const ( FindingPublishingFrequencySixHours = "SIX_HOURS" ) -// The types of finding statistics. const ( // FindingStatisticTypeCountBySeverity is a FindingStatisticType enum value FindingStatisticTypeCountBySeverity = "COUNT_BY_SEVERITY" ) -// The format of the ipSet. const ( // IpSetFormatTxt is a IpSetFormat enum value IpSetFormatTxt = "TXT" @@ -9359,7 +10269,6 @@ const ( IpSetFormatFireEye = "FIRE_EYE" ) -// The status of ipSet file uploaded. const ( // IpSetStatusInactive is a IpSetStatus enum value IpSetStatusInactive = "INACTIVE" @@ -9391,7 +10300,6 @@ const ( OrderByDesc = "DESC" ) -// The format of the threatIntelSet. const ( // ThreatIntelSetFormatTxt is a ThreatIntelSetFormat enum value ThreatIntelSetFormatTxt = "TXT" @@ -9412,7 +10320,6 @@ const ( ThreatIntelSetFormatFireEye = "FIRE_EYE" ) -// The status of threatIntelSet file uploaded. const ( // ThreatIntelSetStatusInactive is a ThreatIntelSetStatus enum value ThreatIntelSetStatusInactive = "INACTIVE" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/doc.go index 91bb16a2ed3..fcc8098dfda 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/doc.go @@ -3,8 +3,21 @@ // Package guardduty provides the client and types for making API // requests to Amazon GuardDuty. // -// Assess, monitor, manage, and remediate security issues across your AWS infrastructure, -// applications, and data. +// Amazon GuardDuty is a continuous security monitoring service that analyzes +// and processes the following data sources: VPC Flow Logs, AWS CloudTrail event +// logs, and DNS logs. It uses threat intelligence feeds, such as lists of malicious +// IPs and domains, and machine learning to identify unexpected and potentially +// unauthorized and malicious activity within your AWS environment. This can +// include issues like escalations of privileges, uses of exposed credentials, +// or communication with malicious IPs, URLs, or domains. For example, GuardDuty +// can detect compromised EC2 instances serving malware or mining bitcoin. It +// also monitors AWS account access behavior for signs of compromise, such as +// unauthorized infrastructure deployments, like instances deployed in a region +// that has never been used, or unusual API calls, like a password policy change +// to reduce password strength. GuardDuty informs you of the status of your +// AWS environment by producing security findings that you can view in the GuardDuty +// console or through Amazon CloudWatch events. For more information, see Amazon +// GuardDuty User Guide (https://docs.aws.amazon.com/guardduty/latest/ug/what-is-guardduty.html). // // See https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/errors.go index 8f0473b6318..6d2c4ea610a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/errors.go @@ -7,12 +7,12 @@ const ( // ErrCodeBadRequestException for service response error code // "BadRequestException". // - // Error response object. + // Bad request exception object. ErrCodeBadRequestException = "BadRequestException" // ErrCodeInternalServerErrorException for service response error code // "InternalServerErrorException". // - // Error response object. + // Internal server error exception object. ErrCodeInternalServerErrorException = "InternalServerErrorException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go index 1c9835a9161..eec5c772aed 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *GuardDuty { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "guardduty" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GuardDuty { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *GuardDuty { svc := &GuardDuty{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-28", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/api.go index b9ef5f44b56..353421f0817 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -5040,6 +5040,199 @@ func (c *IAM) GenerateCredentialReportWithContext(ctx aws.Context, input *Genera return out, req.Send() } +const opGenerateOrganizationsAccessReport = "GenerateOrganizationsAccessReport" + +// GenerateOrganizationsAccessReportRequest generates a "aws/request.Request" representing the +// client's request for the GenerateOrganizationsAccessReport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GenerateOrganizationsAccessReport for more information on using the GenerateOrganizationsAccessReport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GenerateOrganizationsAccessReportRequest method. +// req, resp := client.GenerateOrganizationsAccessReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateOrganizationsAccessReport +func (c *IAM) GenerateOrganizationsAccessReportRequest(input *GenerateOrganizationsAccessReportInput) (req *request.Request, output *GenerateOrganizationsAccessReportOutput) { + op := &request.Operation{ + Name: opGenerateOrganizationsAccessReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateOrganizationsAccessReportInput{} + } + + output = &GenerateOrganizationsAccessReportOutput{} + req = c.newRequest(op, input, output) + return +} + +// GenerateOrganizationsAccessReport API operation for AWS Identity and Access Management. +// +// Generates a report for service last accessed data for AWS Organizations. +// You can generate a report for any entities (organization root, organizational +// unit, or account) or policies in your organization. +// +// To call this operation, you must be signed in using your AWS Organizations +// master account credentials. You can use your long-term IAM user or root user +// credentials, or temporary credentials from assuming an IAM role. SCPs must +// be enabled for your organization root. You must have the required IAM and +// AWS Organizations permissions. For more information, see Refining Permissions +// Using Service Last Accessed Data (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// You can generate a service last accessed data report for entities by specifying +// only the entity's path. This data includes a list of services that are allowed +// by any service control policies (SCPs) that apply to the entity. +// +// You can generate a service last accessed data report for a policy by specifying +// an entity's path and an optional AWS Organizations policy ID. This data includes +// a list of services that are allowed by the specified SCP. +// +// For each service in both report types, the data includes the most recent +// account activity that the policy allows to account principals in the entity +// or the entity's children. For important information about the data, reporting +// period, permissions required, troubleshooting, and supported Regions see +// Reducing Permissions Using Service Last Accessed Data (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// The data includes all attempts to access AWS, not just the successful ones. +// This includes all attempts that were made using the AWS Management Console, +// the AWS API through any of the SDKs, or any of the command line tools. An +// unexpected entry in the service last accessed data does not mean that an +// account has been compromised, because the request might have been denied. +// Refer to your CloudTrail logs as the authoritative source for information +// about all API calls and whether they were successful or denied access. For +// more information, see Logging IAM Events with CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation returns a JobId. Use this parameter in the GetOrganizationsAccessReport +// operation to check the status of the report generation. To check the status +// of this request, use the JobId parameter in the GetOrganizationsAccessReport +// operation and test the JobStatus response parameter. When the job is complete, +// you can retrieve the report. +// +// To generate a service last accessed data report for entities, specify an +// entity path without specifying the optional AWS Organizations policy ID. +// The type of entity that you specify determines the data returned in the report. +// +// * Root – When you specify the organizations root as the entity, the +// resulting report lists all of the services allowed by SCPs that are attached +// to your root. For each service, the report includes data for all accounts +// in your organization except the master account, because the master account +// is not limited by SCPs. +// +// * OU – When you specify an organizational unit (OU) as the entity, the +// resulting report lists all of the services allowed by SCPs that are attached +// to the OU and its parents. For each service, the report includes data +// for all accounts in the OU or its children. This data excludes the master +// account, because the master account is not limited by SCPs. +// +// * Master account – When you specify the master account, the resulting +// report lists all AWS services, because the master account is not limited +// by SCPs. For each service, the report includes data for only the master +// account. +// +// * Account – When you specify another account as the entity, the resulting +// report lists all of the services allowed by SCPs that are attached to +// the account and its parents. For each service, the report includes data +// for only the specified account. +// +// To generate a service last accessed data report for policies, specify an +// entity path and the optional AWS Organizations policy ID. The type of entity +// that you specify determines the data returned for each service. +// +// * Root – When you specify the root entity and a policy ID, the resulting +// report lists all of the services that are allowed by the specified SCP. +// For each service, the report includes data for all accounts in your organization +// to which the SCP applies. This data excludes the master account, because +// the master account is not limited by SCPs. If the SCP is not attached +// to any entities in the organization, then the report will return a list +// of services with no data. +// +// * OU – When you specify an OU entity and a policy ID, the resulting +// report lists all of the services that are allowed by the specified SCP. +// For each service, the report includes data for all accounts in the OU +// or its children to which the SCP applies. This means that other accounts +// outside the OU that are affected by the SCP might not be included in the +// data. This data excludes the master account, because the master account +// is not limited by SCPs. If the SCP is not attached to the OU or one of +// its children, the report will return a list of services with no data. +// +// * Master account – When you specify the master account, the resulting +// report lists all AWS services, because the master account is not limited +// by SCPs. If you specify a policy ID in the CLI or API, the policy is ignored. +// For each service, the report includes data for only the master account. +// +// * Account – When you specify another account entity and a policy ID, +// the resulting report lists all of the services that are allowed by the +// specified SCP. For each service, the report includes data for only the +// specified account. This means that other accounts in the organization +// that are affected by the SCP might not be included in the data. If the +// SCP is not attached to the account, the report will return a list of services +// with no data. +// +// Service last accessed data does not use other policy types when determining +// whether a principal could access a service. These other policy types include +// identity-based policies, resource-based policies, access control lists, IAM +// permissions boundaries, and STS assume role policies. It only applies SCP +// logic. For more about the evaluation of policy types, see Evaluating Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-basics) +// in the IAM User Guide. +// +// For more information about service last accessed data, see Reducing Policy +// Scope by Viewing User Activity (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GenerateOrganizationsAccessReport for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReportGenerationLimitExceededException "ReportGenerationLimitExceeded" +// The request failed because the maximum number of concurrent requests for +// this account are already running. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateOrganizationsAccessReport +func (c *IAM) GenerateOrganizationsAccessReport(input *GenerateOrganizationsAccessReportInput) (*GenerateOrganizationsAccessReportOutput, error) { + req, out := c.GenerateOrganizationsAccessReportRequest(input) + return out, req.Send() +} + +// GenerateOrganizationsAccessReportWithContext is the same as GenerateOrganizationsAccessReport with the addition of +// the ability to pass a context and additional request options. +// +// See GenerateOrganizationsAccessReport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GenerateOrganizationsAccessReportWithContext(ctx aws.Context, input *GenerateOrganizationsAccessReportInput, opts ...request.Option) (*GenerateOrganizationsAccessReportOutput, error) { + req, out := c.GenerateOrganizationsAccessReportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGenerateServiceLastAccessedDetails = "GenerateServiceLastAccessedDetails" // GenerateServiceLastAccessedDetailsRequest generates a "aws/request.Request" representing the @@ -5084,21 +5277,20 @@ func (c *IAM) GenerateServiceLastAccessedDetailsRequest(input *GenerateServiceLa // GenerateServiceLastAccessedDetails API operation for AWS Identity and Access Management. // -// Generates a request for a report that includes details about when an IAM -// resource (user, group, role, or policy) was last used in an attempt to access -// AWS services. Recent activity usually appears within four hours. IAM reports -// activity for the last 365 days, or less if your Region began supporting this -// feature within the last year. For more information, see Regions Where Data -// Is Tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period). +// Generates a report that includes details about when an IAM resource (user, +// group, role, or policy) was last used in an attempt to access AWS services. +// Recent activity usually appears within four hours. IAM reports activity for +// the last 365 days, or less if your Region began supporting this feature within +// the last year. For more information, see Regions Where Data Is Tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period). // -// The service last accessed data includes all attempts to access an AWS API, +// The service last accessed data includes all attempts to access an AWS API, // not just the successful ones. This includes all attempts that were made using // the AWS Management Console, the AWS API through any of the SDKs, or any of // the command line tools. An unexpected entry in the service last accessed // data does not mean that your account has been compromised, because the request // might have been denied. Refer to your CloudTrail logs as the authoritative // source for information about all API calls and whether they were successful -// or denied access. For more information, see Logging IAM Events with CloudTrail +// or denied access. For more information, see Logging IAM Events with CloudTrail // (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) // in the IAM User Guide. // @@ -5111,9 +5303,9 @@ func (c *IAM) GenerateServiceLastAccessedDetailsRequest(input *GenerateServiceLa // using permissions policies. For each service, the response includes information // about the most recent access attempt. // -// * GetServiceLastAccessedDetailsWithEntities – Use this operation for groups -// and policies to list information about the associated entities (users -// or roles) that attempted to access a specific AWS service. +// * GetServiceLastAccessedDetailsWithEntities – Use this operation for +// groups and policies to list information about the associated entities +// (users or roles) that attempted to access a specific AWS service. // // To check the status of the GenerateServiceLastAccessedDetails request, use // the JobId parameter in the same operations and test the JobStatus response @@ -5228,12 +5420,6 @@ func (c *IAM) GetAccessKeyLastUsedRequest(input *GetAccessKeyLastUsedInput) (req // // See the AWS API reference guide for AWS Identity and Access Management's // API operation GetAccessKeyLastUsed for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchEntityException "NoSuchEntity" -// The request was rejected because it referenced a resource entity that does -// not exist. The error message describes the resource. -// // See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccessKeyLastUsed func (c *IAM) GetAccessKeyLastUsed(input *GetAccessKeyLastUsedInput) (*GetAccessKeyLastUsedOutput, error) { req, out := c.GetAccessKeyLastUsedRequest(input) @@ -5365,7 +5551,7 @@ func (c *IAM) GetAccountAuthorizationDetailsWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a GetAccountAuthorizationDetails operation. // pageNum := 0 // err := client.GetAccountAuthorizationDetailsPages(params, -// func(page *GetAccountAuthorizationDetailsOutput, lastPage bool) bool { +// func(page *iam.GetAccountAuthorizationDetailsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5397,10 +5583,12 @@ func (c *IAM) GetAccountAuthorizationDetailsPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetAccountAuthorizationDetailsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetAccountAuthorizationDetailsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5960,7 +6148,7 @@ func (c *IAM) GetGroupWithContext(ctx aws.Context, input *GetGroupInput, opts .. // // Example iterating over at most 3 pages of a GetGroup operation. // pageNum := 0 // err := client.GetGroupPages(params, -// func(page *GetGroupOutput, lastPage bool) bool { +// func(page *iam.GetGroupOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5992,10 +6180,12 @@ func (c *IAM) GetGroupPagesWithContext(ctx aws.Context, input *GetGroupInput, fn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetGroupOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetGroupOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6361,6 +6551,105 @@ func (c *IAM) GetOpenIDConnectProviderWithContext(ctx aws.Context, input *GetOpe return out, req.Send() } +const opGetOrganizationsAccessReport = "GetOrganizationsAccessReport" + +// GetOrganizationsAccessReportRequest generates a "aws/request.Request" representing the +// client's request for the GetOrganizationsAccessReport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetOrganizationsAccessReport for more information on using the GetOrganizationsAccessReport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetOrganizationsAccessReportRequest method. +// req, resp := client.GetOrganizationsAccessReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetOrganizationsAccessReport +func (c *IAM) GetOrganizationsAccessReportRequest(input *GetOrganizationsAccessReportInput) (req *request.Request, output *GetOrganizationsAccessReportOutput) { + op := &request.Operation{ + Name: opGetOrganizationsAccessReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOrganizationsAccessReportInput{} + } + + output = &GetOrganizationsAccessReportOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOrganizationsAccessReport API operation for AWS Identity and Access Management. +// +// Retrieves the service last accessed data report for AWS Organizations that +// was previously generated using the GenerateOrganizationsAccessReport operation. +// This operation retrieves the status of your report job and the report contents. +// +// Depending on the parameters that you passed when you generated the report, +// the data returned could include different information. For details, see GenerateOrganizationsAccessReport. +// +// To call this operation, you must be signed in to the master account in your +// organization. SCPs must be enabled for your organization root. You must have +// permissions to perform this operation. For more information, see Refining +// Permissions Using Service Last Accessed Data (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// For each service that principals in an account (root users, IAM users, or +// IAM roles) could access using SCPs, the operation returns details about the +// most recent access attempt. If there was no attempt, the service is listed +// without details about the most recent attempt to access the service. If the +// operation fails, it returns the reason that it failed. +// +// By default, the list is sorted by service namespace. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetOrganizationsAccessReport for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetOrganizationsAccessReport +func (c *IAM) GetOrganizationsAccessReport(input *GetOrganizationsAccessReportInput) (*GetOrganizationsAccessReportOutput, error) { + req, out := c.GetOrganizationsAccessReportRequest(input) + return out, req.Send() +} + +// GetOrganizationsAccessReportWithContext is the same as GetOrganizationsAccessReport with the addition of +// the ability to pass a context and additional request options. +// +// See GetOrganizationsAccessReport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetOrganizationsAccessReportWithContext(ctx aws.Context, input *GetOrganizationsAccessReportInput, opts ...request.Option) (*GetOrganizationsAccessReportOutput, error) { + req, out := c.GetOrganizationsAccessReportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetPolicy = "GetPolicy" // GetPolicyRequest generates a "aws/request.Request" representing the @@ -7080,10 +7369,11 @@ func (c *IAM) GetServiceLastAccessedDetailsRequest(input *GetServiceLastAccessed // GetServiceLastAccessedDetails API operation for AWS Identity and Access Management. // -// After you generate a user, group, role, or policy report using the GenerateServiceLastAccessedDetails -// operation, you can use the JobId parameter in GetServiceLastAccessedDetails. -// This operation retrieves the status of your report job and a list of AWS -// services that the resource (user, group, role, or managed policy) can access. +// Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails +// operation. You can use the JobId parameter in GetServiceLastAccessedDetails +// to retrieve the status of your report job. When the report is complete, you +// can retrieve the generated report. The report includes a list of AWS services +// that the resource (user, group, role, or managed policy) can access. // // Service last accessed data does not use other policy types when determining // whether a resource could access a service. These other policy types include @@ -7205,9 +7495,9 @@ func (c *IAM) GetServiceLastAccessedDetailsWithEntitiesRequest(input *GetService // that could have used group or policy permissions to access the specified // service. // -// * Group – For a group report, this operation returns a list of users in -// the group that could have used the group’s policies in an attempt to access -// the service. +// * Group – For a group report, this operation returns a list of users +// in the group that could have used the group’s policies in an attempt +// to access the service. // // * Policy – For a policy report, this operation returns a list of entities // (users or roles) that could have used the policy in an attempt to access @@ -7655,7 +7945,7 @@ func (c *IAM) ListAccessKeysWithContext(ctx aws.Context, input *ListAccessKeysIn // // Example iterating over at most 3 pages of a ListAccessKeys operation. // pageNum := 0 // err := client.ListAccessKeysPages(params, -// func(page *ListAccessKeysOutput, lastPage bool) bool { +// func(page *iam.ListAccessKeysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7687,10 +7977,12 @@ func (c *IAM) ListAccessKeysPagesWithContext(ctx aws.Context, input *ListAccessK }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAccessKeysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAccessKeysOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7794,7 +8086,7 @@ func (c *IAM) ListAccountAliasesWithContext(ctx aws.Context, input *ListAccountA // // Example iterating over at most 3 pages of a ListAccountAliases operation. // pageNum := 0 // err := client.ListAccountAliasesPages(params, -// func(page *ListAccountAliasesOutput, lastPage bool) bool { +// func(page *iam.ListAccountAliasesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7826,10 +8118,12 @@ func (c *IAM) ListAccountAliasesPagesWithContext(ctx aws.Context, input *ListAcc }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAccountAliasesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAccountAliasesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7949,7 +8243,7 @@ func (c *IAM) ListAttachedGroupPoliciesWithContext(ctx aws.Context, input *ListA // // Example iterating over at most 3 pages of a ListAttachedGroupPolicies operation. // pageNum := 0 // err := client.ListAttachedGroupPoliciesPages(params, -// func(page *ListAttachedGroupPoliciesOutput, lastPage bool) bool { +// func(page *iam.ListAttachedGroupPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7981,10 +8275,12 @@ func (c *IAM) ListAttachedGroupPoliciesPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAttachedGroupPoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAttachedGroupPoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8104,7 +8400,7 @@ func (c *IAM) ListAttachedRolePoliciesWithContext(ctx aws.Context, input *ListAt // // Example iterating over at most 3 pages of a ListAttachedRolePolicies operation. // pageNum := 0 // err := client.ListAttachedRolePoliciesPages(params, -// func(page *ListAttachedRolePoliciesOutput, lastPage bool) bool { +// func(page *iam.ListAttachedRolePoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8136,10 +8432,12 @@ func (c *IAM) ListAttachedRolePoliciesPagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAttachedRolePoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAttachedRolePoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8259,7 +8557,7 @@ func (c *IAM) ListAttachedUserPoliciesWithContext(ctx aws.Context, input *ListAt // // Example iterating over at most 3 pages of a ListAttachedUserPolicies operation. // pageNum := 0 // err := client.ListAttachedUserPoliciesPages(params, -// func(page *ListAttachedUserPoliciesOutput, lastPage bool) bool { +// func(page *iam.ListAttachedUserPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8291,10 +8589,12 @@ func (c *IAM) ListAttachedUserPoliciesPagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAttachedUserPoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAttachedUserPoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8411,7 +8711,7 @@ func (c *IAM) ListEntitiesForPolicyWithContext(ctx aws.Context, input *ListEntit // // Example iterating over at most 3 pages of a ListEntitiesForPolicy operation. // pageNum := 0 // err := client.ListEntitiesForPolicyPages(params, -// func(page *ListEntitiesForPolicyOutput, lastPage bool) bool { +// func(page *iam.ListEntitiesForPolicyOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8443,10 +8743,12 @@ func (c *IAM) ListEntitiesForPolicyPagesWithContext(ctx aws.Context, input *List }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEntitiesForPolicyOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListEntitiesForPolicyOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8562,7 +8864,7 @@ func (c *IAM) ListGroupPoliciesWithContext(ctx aws.Context, input *ListGroupPoli // // Example iterating over at most 3 pages of a ListGroupPolicies operation. // pageNum := 0 // err := client.ListGroupPoliciesPages(params, -// func(page *ListGroupPoliciesOutput, lastPage bool) bool { +// func(page *iam.ListGroupPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8594,10 +8896,12 @@ func (c *IAM) ListGroupPoliciesPagesWithContext(ctx aws.Context, input *ListGrou }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListGroupPoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListGroupPoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8700,7 +9004,7 @@ func (c *IAM) ListGroupsWithContext(ctx aws.Context, input *ListGroupsInput, opt // // Example iterating over at most 3 pages of a ListGroups operation. // pageNum := 0 // err := client.ListGroupsPages(params, -// func(page *ListGroupsOutput, lastPage bool) bool { +// func(page *iam.ListGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8732,10 +9036,12 @@ func (c *IAM) ListGroupsPagesWithContext(ctx aws.Context, input *ListGroupsInput }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8842,7 +9148,7 @@ func (c *IAM) ListGroupsForUserWithContext(ctx aws.Context, input *ListGroupsFor // // Example iterating over at most 3 pages of a ListGroupsForUser operation. // pageNum := 0 // err := client.ListGroupsForUserPages(params, -// func(page *ListGroupsForUserOutput, lastPage bool) bool { +// func(page *iam.ListGroupsForUserOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8874,10 +9180,12 @@ func (c *IAM) ListGroupsForUserPagesWithContext(ctx aws.Context, input *ListGrou }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListGroupsForUserOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListGroupsForUserOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8982,7 +9290,7 @@ func (c *IAM) ListInstanceProfilesWithContext(ctx aws.Context, input *ListInstan // // Example iterating over at most 3 pages of a ListInstanceProfiles operation. // pageNum := 0 // err := client.ListInstanceProfilesPages(params, -// func(page *ListInstanceProfilesOutput, lastPage bool) bool { +// func(page *iam.ListInstanceProfilesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -9014,10 +9322,12 @@ func (c *IAM) ListInstanceProfilesPagesWithContext(ctx aws.Context, input *ListI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInstanceProfilesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInstanceProfilesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -9126,7 +9436,7 @@ func (c *IAM) ListInstanceProfilesForRoleWithContext(ctx aws.Context, input *Lis // // Example iterating over at most 3 pages of a ListInstanceProfilesForRole operation. // pageNum := 0 // err := client.ListInstanceProfilesForRolePages(params, -// func(page *ListInstanceProfilesForRoleOutput, lastPage bool) bool { +// func(page *iam.ListInstanceProfilesForRoleOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -9158,10 +9468,12 @@ func (c *IAM) ListInstanceProfilesForRolePagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInstanceProfilesForRoleOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInstanceProfilesForRoleOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -9271,7 +9583,7 @@ func (c *IAM) ListMFADevicesWithContext(ctx aws.Context, input *ListMFADevicesIn // // Example iterating over at most 3 pages of a ListMFADevices operation. // pageNum := 0 // err := client.ListMFADevicesPages(params, -// func(page *ListMFADevicesOutput, lastPage bool) bool { +// func(page *iam.ListMFADevicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -9303,10 +9615,12 @@ func (c *IAM) ListMFADevicesPagesWithContext(ctx aws.Context, input *ListMFADevi }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMFADevicesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMFADevicesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -9500,7 +9814,7 @@ func (c *IAM) ListPoliciesWithContext(ctx aws.Context, input *ListPoliciesInput, // // Example iterating over at most 3 pages of a ListPolicies operation. // pageNum := 0 // err := client.ListPoliciesPages(params, -// func(page *ListPoliciesOutput, lastPage bool) bool { +// func(page *iam.ListPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -9532,10 +9846,12 @@ func (c *IAM) ListPoliciesPagesWithContext(ctx aws.Context, input *ListPoliciesI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -9602,9 +9918,9 @@ func (c *IAM) ListPoliciesGrantingServiceAccessRequest(input *ListPoliciesGranti // managed and inline policies that are attached to the group to which the // user belongs. // -// * Group – The list of policies includes only the managed and inline policies -// that are attached to the group directly. Policies that are attached to -// the group’s user are not included. +// * Group – The list of policies includes only the managed and inline +// policies that are attached to the group directly. Policies that are attached +// to the group’s user are not included. // // * Role – The list of policies includes only the managed and inline policies // that are attached to the role. @@ -9767,7 +10083,7 @@ func (c *IAM) ListPolicyVersionsWithContext(ctx aws.Context, input *ListPolicyVe // // Example iterating over at most 3 pages of a ListPolicyVersions operation. // pageNum := 0 // err := client.ListPolicyVersionsPages(params, -// func(page *ListPolicyVersionsOutput, lastPage bool) bool { +// func(page *iam.ListPolicyVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -9799,10 +10115,12 @@ func (c *IAM) ListPolicyVersionsPagesWithContext(ctx aws.Context, input *ListPol }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPolicyVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPolicyVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -9917,7 +10235,7 @@ func (c *IAM) ListRolePoliciesWithContext(ctx aws.Context, input *ListRolePolici // // Example iterating over at most 3 pages of a ListRolePolicies operation. // pageNum := 0 // err := client.ListRolePoliciesPages(params, -// func(page *ListRolePoliciesOutput, lastPage bool) bool { +// func(page *iam.ListRolePoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -9949,10 +10267,12 @@ func (c *IAM) ListRolePoliciesPagesWithContext(ctx aws.Context, input *ListRoleP }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRolePoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRolePoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -10144,7 +10464,7 @@ func (c *IAM) ListRolesWithContext(ctx aws.Context, input *ListRolesInput, opts // // Example iterating over at most 3 pages of a ListRoles operation. // pageNum := 0 // err := client.ListRolesPages(params, -// func(page *ListRolesOutput, lastPage bool) bool { +// func(page *iam.ListRolesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -10176,10 +10496,12 @@ func (c *IAM) ListRolesPagesWithContext(ctx aws.Context, input *ListRolesInput, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRolesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRolesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -10372,7 +10694,7 @@ func (c *IAM) ListSSHPublicKeysWithContext(ctx aws.Context, input *ListSSHPublic // // Example iterating over at most 3 pages of a ListSSHPublicKeys operation. // pageNum := 0 // err := client.ListSSHPublicKeysPages(params, -// func(page *ListSSHPublicKeysOutput, lastPage bool) bool { +// func(page *iam.ListSSHPublicKeysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -10404,10 +10726,12 @@ func (c *IAM) ListSSHPublicKeysPagesWithContext(ctx aws.Context, input *ListSSHP }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSSHPublicKeysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSSHPublicKeysOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -10516,7 +10840,7 @@ func (c *IAM) ListServerCertificatesWithContext(ctx aws.Context, input *ListServ // // Example iterating over at most 3 pages of a ListServerCertificates operation. // pageNum := 0 // err := client.ListServerCertificatesPages(params, -// func(page *ListServerCertificatesOutput, lastPage bool) bool { +// func(page *iam.ListServerCertificatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -10548,10 +10872,12 @@ func (c *IAM) ListServerCertificatesPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListServerCertificatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListServerCertificatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -10755,7 +11081,7 @@ func (c *IAM) ListSigningCertificatesWithContext(ctx aws.Context, input *ListSig // // Example iterating over at most 3 pages of a ListSigningCertificates operation. // pageNum := 0 // err := client.ListSigningCertificatesPages(params, -// func(page *ListSigningCertificatesOutput, lastPage bool) bool { +// func(page *iam.ListSigningCertificatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -10787,10 +11113,12 @@ func (c *IAM) ListSigningCertificatesPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSigningCertificatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSigningCertificatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -10904,7 +11232,7 @@ func (c *IAM) ListUserPoliciesWithContext(ctx aws.Context, input *ListUserPolici // // Example iterating over at most 3 pages of a ListUserPolicies operation. // pageNum := 0 // err := client.ListUserPoliciesPages(params, -// func(page *ListUserPoliciesOutput, lastPage bool) bool { +// func(page *iam.ListUserPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -10936,10 +11264,12 @@ func (c *IAM) ListUserPoliciesPagesWithContext(ctx aws.Context, input *ListUserP }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListUserPoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListUserPoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -11131,7 +11461,7 @@ func (c *IAM) ListUsersWithContext(ctx aws.Context, input *ListUsersInput, opts // // Example iterating over at most 3 pages of a ListUsers operation. // pageNum := 0 // err := client.ListUsersPages(params, -// func(page *ListUsersOutput, lastPage bool) bool { +// func(page *iam.ListUsersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -11163,10 +11493,12 @@ func (c *IAM) ListUsersPagesWithContext(ctx aws.Context, input *ListUsersInput, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListUsersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListUsersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -11266,7 +11598,7 @@ func (c *IAM) ListVirtualMFADevicesWithContext(ctx aws.Context, input *ListVirtu // // Example iterating over at most 3 pages of a ListVirtualMFADevices operation. // pageNum := 0 // err := client.ListVirtualMFADevicesPages(params, -// func(page *ListVirtualMFADevicesOutput, lastPage bool) bool { +// func(page *iam.ListVirtualMFADevicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -11298,10 +11630,12 @@ func (c *IAM) ListVirtualMFADevicesPagesWithContext(ctx aws.Context, input *List }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListVirtualMFADevicesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListVirtualMFADevicesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -12647,7 +12981,7 @@ func (c *IAM) SimulateCustomPolicyWithContext(ctx aws.Context, input *SimulateCu // // Example iterating over at most 3 pages of a SimulateCustomPolicy operation. // pageNum := 0 // err := client.SimulateCustomPolicyPages(params, -// func(page *SimulatePolicyResponse, lastPage bool) bool { +// func(page *iam.SimulatePolicyResponse, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -12679,10 +13013,12 @@ func (c *IAM) SimulateCustomPolicyPagesWithContext(ctx aws.Context, input *Simul }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*SimulatePolicyResponse), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*SimulatePolicyResponse), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -12817,7 +13153,7 @@ func (c *IAM) SimulatePrincipalPolicyWithContext(ctx aws.Context, input *Simulat // // Example iterating over at most 3 pages of a SimulatePrincipalPolicy operation. // pageNum := 0 // err := client.SimulatePrincipalPolicyPages(params, -// func(page *SimulatePolicyResponse, lastPage bool) bool { +// func(page *iam.SimulatePolicyResponse, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -12849,10 +13185,12 @@ func (c *IAM) SimulatePrincipalPolicyPagesWithContext(ctx aws.Context, input *Si }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*SimulatePolicyResponse), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*SimulatePolicyResponse), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -12925,13 +13263,13 @@ func (c *IAM) TagRoleRequest(input *TagRoleInput) (req *request.Request, output // * Cost allocation - Use tags to help track which individuals and teams // are using which AWS resources. // -// Make sure that you have no invalid tags and that you do not exceed the allowed -// number of tags per role. In either case, the entire request fails and no -// tags are added to the role. +// * Make sure that you have no invalid tags and that you do not exceed the +// allowed number of tags per role. In either case, the entire request fails +// and no tags are added to the role. // -// AWS always interprets the tag Value as a single string. If you need to store -// an array, you can store comma-separated values in the string. However, you -// must interpret the value in your code. +// * AWS always interprets the tag Value as a single string. If you need +// to store an array, you can store comma-separated values in the string. +// However, you must interpret the value in your code. // // For more information about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) // in the IAM User Guide. @@ -13055,13 +13393,13 @@ func (c *IAM) TagUserRequest(input *TagUserInput) (req *request.Request, output // * Cost allocation - Use tags to help track which individuals and teams // are using which AWS resources. // -// Make sure that you have no invalid tags and that you do not exceed the allowed -// number of tags per role. In either case, the entire request fails and no -// tags are added to the role. +// * Make sure that you have no invalid tags and that you do not exceed the +// allowed number of tags per role. In either case, the entire request fails +// and no tags are added to the role. // -// AWS always interprets the tag Value as a single string. If you need to store -// an array, you can store comma-separated values in the string. However, you -// must interpret the value in your code. +// * AWS always interprets the tag Value as a single string. If you need +// to store an array, you can store comma-separated values in the string. +// However, you must interpret the value in your code. // // For more information about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) // in the IAM User Guide. @@ -13448,12 +13786,12 @@ func (c *IAM) UpdateAccountPasswordPolicyRequest(input *UpdateAccountPasswordPol // // Updates the password policy settings for the AWS account. // -// This operation does not support partial updates. No parameters are required, -// but if you do not specify a parameter, that parameter's value reverts to -// its default value. See the Request Parameters section for each parameter's -// default value. Also note that some parameters do not allow the default parameter -// to be explicitly set. Instead, to invoke the default value, do not include -// that parameter when you invoke the operation. +// * This operation does not support partial updates. No parameters are required, +// but if you do not specify a parameter, that parameter's value reverts +// to its default value. See the Request Parameters section for each parameter's +// default value. Also note that some parameters do not allow the default +// parameter to be explicitly set. Instead, to invoke the default value, +// do not include that parameter when you invoke the operation. // // For more information about using a password policy, see Managing an IAM Password // Policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html) @@ -15036,6 +15374,108 @@ func (c *IAM) UploadSigningCertificateWithContext(ctx aws.Context, input *Upload return out, req.Send() } +// An object that contains details about when a principal in the reported AWS +// Organizations entity last attempted to access an AWS service. A principal +// can be an IAM user, an IAM role, or the AWS account root user within the +// reported Organizations entity. +// +// This data type is a response element in the GetOrganizationsAccessReport +// operation. +type AccessDetail struct { + _ struct{} `type:"structure"` + + // The path of the Organizations entity (root, organizational unit, or account) + // from which an authenticated principal last attempted to access the service. + // AWS does not report unauthenticated requests. + // + // This field is null if no principals (IAM users, IAM roles, or root users) + // in the reported Organizations entity attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + EntityPath *string `min:"19" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when an authenticated principal most recently attempted to access the service. + // AWS does not report unauthenticated requests. + // + // This field is null if no principals in the reported Organizations entity + // attempted to access the service within the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAuthenticatedTime *time.Time `type:"timestamp"` + + // The Region where the last service access attempt occurred. + // + // This field is null if no principals in the reported Organizations entity + // attempted to access the service within the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + Region *string `type:"string"` + + // The name of the service in which access was attempted. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The namespace of the service in which access was attempted. + // + // To learn the service namespace of a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + // + // ServiceNamespace is a required field + ServiceNamespace *string `min:"1" type:"string" required:"true"` + + // The number of accounts with authenticated principals (root users, IAM users, + // and IAM roles) that attempted to access the service in the reporting period. + TotalAuthenticatedEntities *int64 `type:"integer"` +} + +// String returns the string representation +func (s AccessDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDetail) GoString() string { + return s.String() +} + +// SetEntityPath sets the EntityPath field's value. +func (s *AccessDetail) SetEntityPath(v string) *AccessDetail { + s.EntityPath = &v + return s +} + +// SetLastAuthenticatedTime sets the LastAuthenticatedTime field's value. +func (s *AccessDetail) SetLastAuthenticatedTime(v time.Time) *AccessDetail { + s.LastAuthenticatedTime = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *AccessDetail) SetRegion(v string) *AccessDetail { + s.Region = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *AccessDetail) SetServiceName(v string) *AccessDetail { + s.ServiceName = &v + return s +} + +// SetServiceNamespace sets the ServiceNamespace field's value. +func (s *AccessDetail) SetServiceNamespace(v string) *AccessDetail { + s.ServiceNamespace = &v + return s +} + +// SetTotalAuthenticatedEntities sets the TotalAuthenticatedEntities field's value. +func (s *AccessDetail) SetTotalAuthenticatedEntities(v int64) *AccessDetail { + s.TotalAuthenticatedEntities = &v + return s +} + // Contains information about an AWS access key. // // This data type is used as a response element in the CreateAccessKey and ListAccessKeys @@ -15130,12 +15570,12 @@ type AccessKeyLastUsed struct { // * An access key exists but has not been used since IAM began tracking // this information. // - // * There is no sign-in data associated with the user + // * There is no sign-in data associated with the user. // // LastUsedDate is a required field LastUsedDate *time.Time `type:"timestamp" required:"true"` - // The AWS region where this access key was most recently used. The value for + // The AWS Region where this access key was most recently used. The value for // this field is "N/A" in the following situations: // // * The user does not have an access key. @@ -15143,9 +15583,9 @@ type AccessKeyLastUsed struct { // * An access key exists but has not been used since IAM began tracking // this information. // - // * There is no sign-in data associated with the user + // * There is no sign-in data associated with the user. // - // For more information about AWS regions, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html) + // For more information about AWS Regions, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html) // in the Amazon Web Services General Reference. // // Region is a required field @@ -15159,7 +15599,7 @@ type AccessKeyLastUsed struct { // * An access key exists but has not been used since IAM started tracking // this information. // - // * There is no sign-in data associated with the user + // * There is no sign-in data associated with the user. // // ServiceName is a required field ServiceName *string `type:"string" required:"true"` @@ -15728,7 +16168,7 @@ func (s AttachUserPolicyOutput) GoString() string { // to a user or role to set the permissions boundary. // // For more information about permissions boundaries, see Permissions Boundaries -// for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) +// for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) // in the IAM User Guide. type AttachedPermissionsBoundary struct { _ struct{} `type:"structure"` @@ -15899,7 +16339,7 @@ func (s ChangePasswordOutput) GoString() string { // evaluating the Condition elements of the input policies. // // This data type is used as an input parameter to SimulateCustomPolicy and -// SimulateCustomPolicy. +// SimulatePrincipalPolicy . type ContextEntry struct { _ struct{} `type:"structure"` @@ -16532,7 +16972,7 @@ type CreatePolicyInput struct { // can contain any ASCII character from the ! (\u0021) through the DEL character // (\u007F), including most punctuation characters, digits, and upper and lowercased // letters. - Path *string `type:"string"` + Path *string `min:"1" type:"string"` // The JSON policy document that you want to use as the content for the new // policy. @@ -16580,6 +17020,9 @@ func (s CreatePolicyInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreatePolicyInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreatePolicyInput"} + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } if s.PolicyDocument == nil { invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) } @@ -16775,10 +17218,10 @@ type CreateRoleInput struct { // The trust relationship policy document that grants an entity permission to // assume the role. // - // You must provide policies in JSON format in IAM. However, for AWS CloudFormation - // templates formatted in YAML, you can provide the policy in JSON or YAML format. - // AWS CloudFormation always converts a YAML policy to JSON format before submitting - // it to IAM. + // In IAM, you must provide a JSON policy that has been converted to a string. + // However, for AWS CloudFormation templates formatted in YAML, you can provide + // the policy in JSON or YAML format. AWS CloudFormation always converts a YAML + // policy to JSON format before submitting it to IAM. // // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this // parameter is a string of characters consisting of the following: @@ -16792,6 +17235,8 @@ type CreateRoleInput struct { // * The special characters tab (\u0009), line feed (\u000A), and carriage // return (\u000D) // + // Upon success, the response includes the same trust policy in JSON format. + // // AssumeRolePolicyDocument is a required field AssumeRolePolicyDocument *string `min:"1" type:"string" required:"true"` @@ -17083,10 +17528,13 @@ type CreateServiceLinkedRoleInput struct { // A string that you provide, which is combined with the service-provided prefix // to form the complete role name. If you make multiple requests for the same - // service, then you must supply a different CustomSuffixfor each request. Otherwise the request fails with a duplicate role name - // error. For example, you could add -1or -debugto the suffix. + // service, then you must supply a different CustomSuffix for each request. + // Otherwise the request fails with a duplicate role name error. For example, + // you could add -1 or -debug to the suffix. // - // Some services do not support the CustomSuffix + // Some services do not support the CustomSuffix parameter. If you provide an + // optional suffix and the operation fails, try the operation again without + // the suffix. CustomSuffix *string `min:"1" type:"string"` // The description of the role. @@ -19063,7 +19511,7 @@ type DeletionTaskFailureReasonType struct { // role has active sessions or if any resources that were used by the role have // not been deleted from the linked service, the role can't be deleted. This // parameter includes a list of the resources that are associated with the role - // and the region in which the resources are being used. + // and the Region in which the resources are being used. RoleUsageList []*RoleUsageType `type:"list"` } @@ -19470,12 +19918,12 @@ func (s EnableMFADeviceOutput) GoString() string { type EntityDetails struct { _ struct{} `type:"structure"` - // The EntityInfo object that contains details about the entity (user or role). + // The EntityInfo object that contains details about the entity (user or role). // // EntityInfo is a required field EntityInfo *EntityInfo `type:"structure" required:"true"` - // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), // when the authenticated entity last attempted to access AWS. AWS does not // report unauthenticated requests. // @@ -19584,8 +20032,9 @@ func (s *EntityInfo) SetType(v string) *EntityInfo { // Contains information about the reason that the operation failed. // -// This data type is used as a response element in the GetServiceLastAccessedDetails -// operation and the GetServiceLastAccessedDetailsWithEntities operation. +// This data type is used as a response element in the GetOrganizationsAccessReport, +// GetServiceLastAccessedDetails, and GetServiceLastAccessedDetailsWithEntities +// operations. type ErrorDetails struct { _ struct{} `type:"structure"` @@ -19625,7 +20074,7 @@ func (s *ErrorDetails) SetMessage(v string) *ErrorDetails { // Contains the results of a simulation. // // This data type is used by the return parameter of SimulateCustomPolicy and -// SimulatePrincipalPolicy. +// SimulatePrincipalPolicy . type EvaluationResult struct { _ struct{} `type:"structure"` @@ -19653,7 +20102,7 @@ type EvaluationResult struct { // A list of the statements in the input policies that determine the result // for this scenario. Remember that even if multiple statements allow the operation // on the resource, if only one statement denies that operation, then the explicit - // deny overrides any allow. Inaddition, the deny statement is the only entry + // deny overrides any allow. In addition, the deny statement is the only entry // included in the result. MatchedStatements []*Statement `type:"list"` @@ -19666,7 +20115,7 @@ type EvaluationResult struct { // call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy. MissingContextValues []*string `type:"list"` - // A structure that details how AWS Organizations and its service control policies + // A structure that details how Organizations and its service control policies // affect the results of the simulation. Only applies if the simulated user's // account is part of an organization. OrganizationsDecisionDetail *OrganizationsDecisionDetail `type:"structure"` @@ -19734,50 +20183,131 @@ func (s *EvaluationResult) SetResourceSpecificResults(v []*ResourceSpecificResul return s } -type GenerateCredentialReportInput struct { +type GenerateCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GenerateCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GenerateCredentialReport request. +type GenerateCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Information about the credential report. + Description *string `type:"string"` + + // Information about the state of the credential report. + State *string `type:"string" enum:"ReportStateType"` +} + +// String returns the string representation +func (s GenerateCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *GenerateCredentialReportOutput) SetDescription(v string) *GenerateCredentialReportOutput { + s.Description = &v + return s +} + +// SetState sets the State field's value. +func (s *GenerateCredentialReportOutput) SetState(v string) *GenerateCredentialReportOutput { + s.State = &v + return s +} + +type GenerateOrganizationsAccessReportInput struct { _ struct{} `type:"structure"` + + // The path of the AWS Organizations entity (root, OU, or account). You can + // build an entity path using the known structure of your organization. For + // example, assume that your account ID is 123456789012 and its parent OU ID + // is ou-rge0-awsabcde. The organization root ID is r-f6g7h8i9j0example and + // your organization ID is o-a1b2c3d4e5. Your entity path is o-a1b2c3d4e5/r-f6g7h8i9j0example/ou-rge0-awsabcde/123456789012. + // + // EntityPath is a required field + EntityPath *string `min:"19" type:"string" required:"true"` + + // The identifier of the AWS Organizations service control policy (SCP). This + // parameter is optional. + // + // This ID is used to generate information about when an account principal that + // is limited by the SCP attempted to access an AWS service. + OrganizationsPolicyId *string `type:"string"` } // String returns the string representation -func (s GenerateCredentialReportInput) String() string { +func (s GenerateOrganizationsAccessReportInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GenerateCredentialReportInput) GoString() string { +func (s GenerateOrganizationsAccessReportInput) GoString() string { return s.String() } -// Contains the response to a successful GenerateCredentialReport request. -type GenerateCredentialReportOutput struct { - _ struct{} `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *GenerateOrganizationsAccessReportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GenerateOrganizationsAccessReportInput"} + if s.EntityPath == nil { + invalidParams.Add(request.NewErrParamRequired("EntityPath")) + } + if s.EntityPath != nil && len(*s.EntityPath) < 19 { + invalidParams.Add(request.NewErrParamMinLen("EntityPath", 19)) + } - // Information about the credential report. - Description *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Information about the state of the credential report. - State *string `type:"string" enum:"ReportStateType"` +// SetEntityPath sets the EntityPath field's value. +func (s *GenerateOrganizationsAccessReportInput) SetEntityPath(v string) *GenerateOrganizationsAccessReportInput { + s.EntityPath = &v + return s +} + +// SetOrganizationsPolicyId sets the OrganizationsPolicyId field's value. +func (s *GenerateOrganizationsAccessReportInput) SetOrganizationsPolicyId(v string) *GenerateOrganizationsAccessReportInput { + s.OrganizationsPolicyId = &v + return s +} + +type GenerateOrganizationsAccessReportOutput struct { + _ struct{} `type:"structure"` + + // The job identifier that you can use in the GetOrganizationsAccessReport operation. + JobId *string `min:"36" type:"string"` } // String returns the string representation -func (s GenerateCredentialReportOutput) String() string { +func (s GenerateOrganizationsAccessReportOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GenerateCredentialReportOutput) GoString() string { +func (s GenerateOrganizationsAccessReportOutput) GoString() string { return s.String() } -// SetDescription sets the Description field's value. -func (s *GenerateCredentialReportOutput) SetDescription(v string) *GenerateCredentialReportOutput { - s.Description = &v - return s -} - -// SetState sets the State field's value. -func (s *GenerateCredentialReportOutput) SetState(v string) *GenerateCredentialReportOutput { - s.State = &v +// SetJobId sets the JobId field's value. +func (s *GenerateOrganizationsAccessReportOutput) SetJobId(v string) *GenerateOrganizationsAccessReportOutput { + s.JobId = &v return s } @@ -20135,8 +20665,8 @@ func (s GetAccountSummaryInput) GoString() string { type GetAccountSummaryOutput struct { _ struct{} `type:"structure"` - // A set of key–value pairs containing information about IAM entity usage and - // IAM quotas. + // A set of key–value pairs containing information about IAM entity usage + // and IAM quotas. SummaryMap map[string]*int64 `type:"map"` } @@ -20869,6 +21399,211 @@ func (s *GetOpenIDConnectProviderOutput) SetUrl(v string) *GetOpenIDConnectProvi return s } +type GetOrganizationsAccessReportInput struct { + _ struct{} `type:"structure"` + + // The identifier of the request generated by the GenerateOrganizationsAccessReport + // operation. + // + // JobId is a required field + JobId *string `min:"36" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The key that is used to sort the results. If you choose the namespace key, + // the results are returned in alphabetical order. If you choose the time key, + // the results are sorted numerically by the date and time. + SortKey *string `type:"string" enum:"sortKeyType"` +} + +// String returns the string representation +func (s GetOrganizationsAccessReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOrganizationsAccessReportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOrganizationsAccessReportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOrganizationsAccessReportInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 36)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *GetOrganizationsAccessReportInput) SetJobId(v string) *GetOrganizationsAccessReportInput { + s.JobId = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetOrganizationsAccessReportInput) SetMarker(v string) *GetOrganizationsAccessReportInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *GetOrganizationsAccessReportInput) SetMaxItems(v int64) *GetOrganizationsAccessReportInput { + s.MaxItems = &v + return s +} + +// SetSortKey sets the SortKey field's value. +func (s *GetOrganizationsAccessReportInput) SetSortKey(v string) *GetOrganizationsAccessReportInput { + s.SortKey = &v + return s +} + +type GetOrganizationsAccessReportOutput struct { + _ struct{} `type:"structure"` + + // An object that contains details about the most recent attempt to access the + // service. + AccessDetails []*AccessDetail `type:"list"` + + // Contains information about the reason that the operation failed. + // + // This data type is used as a response element in the GetOrganizationsAccessReport, + // GetServiceLastAccessedDetails, and GetServiceLastAccessedDetailsWithEntities + // operations. + ErrorDetails *ErrorDetails `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the generated report job was completed or failed. + // + // This field is null if the job is still in progress, as indicated by a job + // status value of IN_PROGRESS. + JobCompletionDate *time.Time `type:"timestamp"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the report job was created. + // + // JobCreationDate is a required field + JobCreationDate *time.Time `type:"timestamp" required:"true"` + + // The status of the job. + // + // JobStatus is a required field + JobStatus *string `type:"string" required:"true" enum:"jobStatusType"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // The number of services that the applicable SCPs allow account principals + // to access. + NumberOfServicesAccessible *int64 `type:"integer"` + + // The number of services that account principals are allowed but did not attempt + // to access. + NumberOfServicesNotAccessed *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetOrganizationsAccessReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOrganizationsAccessReportOutput) GoString() string { + return s.String() +} + +// SetAccessDetails sets the AccessDetails field's value. +func (s *GetOrganizationsAccessReportOutput) SetAccessDetails(v []*AccessDetail) *GetOrganizationsAccessReportOutput { + s.AccessDetails = v + return s +} + +// SetErrorDetails sets the ErrorDetails field's value. +func (s *GetOrganizationsAccessReportOutput) SetErrorDetails(v *ErrorDetails) *GetOrganizationsAccessReportOutput { + s.ErrorDetails = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetOrganizationsAccessReportOutput) SetIsTruncated(v bool) *GetOrganizationsAccessReportOutput { + s.IsTruncated = &v + return s +} + +// SetJobCompletionDate sets the JobCompletionDate field's value. +func (s *GetOrganizationsAccessReportOutput) SetJobCompletionDate(v time.Time) *GetOrganizationsAccessReportOutput { + s.JobCompletionDate = &v + return s +} + +// SetJobCreationDate sets the JobCreationDate field's value. +func (s *GetOrganizationsAccessReportOutput) SetJobCreationDate(v time.Time) *GetOrganizationsAccessReportOutput { + s.JobCreationDate = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *GetOrganizationsAccessReportOutput) SetJobStatus(v string) *GetOrganizationsAccessReportOutput { + s.JobStatus = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetOrganizationsAccessReportOutput) SetMarker(v string) *GetOrganizationsAccessReportOutput { + s.Marker = &v + return s +} + +// SetNumberOfServicesAccessible sets the NumberOfServicesAccessible field's value. +func (s *GetOrganizationsAccessReportOutput) SetNumberOfServicesAccessible(v int64) *GetOrganizationsAccessReportOutput { + s.NumberOfServicesAccessible = &v + return s +} + +// SetNumberOfServicesNotAccessed sets the NumberOfServicesNotAccessed field's value. +func (s *GetOrganizationsAccessReportOutput) SetNumberOfServicesNotAccessed(v int64) *GetOrganizationsAccessReportOutput { + s.NumberOfServicesNotAccessed = &v + return s +} + type GetPolicyInput struct { _ struct{} `type:"structure"` @@ -21566,21 +22301,23 @@ type GetServiceLastAccessedDetailsOutput struct { Error *ErrorDetails `type:"structure"` // A flag that indicates whether there are more items to return. If your results - // were truncated, you can make a subsequent pagination request using the Markerrequest parameter to retrieve more items. Note that IAM might return fewer - // than the MaxItemsnumber of results even when there are more results available. We recommend - // that you check IsTruncated + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. IsTruncated *bool `type:"boolean"` - // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), // when the generated report job was completed or failed. // - // This field is null if the job is still in progress, as indicated by a JobStatus - // value of IN_PROGRESS. + // This field is null if the job is still in progress, as indicated by a job + // status value of IN_PROGRESS. // // JobCompletionDate is a required field JobCompletionDate *time.Time `type:"timestamp" required:"true"` - // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), // when the report job was created. // // JobCreationDate is a required field @@ -21595,7 +22332,7 @@ type GetServiceLastAccessedDetailsOutput struct { // to use for the Marker parameter in a subsequent pagination request. Marker *string `type:"string"` - // A ServiceLastAccessed object that contains details about the most recent + // A ServiceLastAccessed object that contains details about the most recent // attempt to access the service. // // ServicesLastAccessed is a required field @@ -21689,7 +22426,7 @@ type GetServiceLastAccessedDetailsWithEntitiesInput struct { // that service. In the first paragraph, find the service prefix. For example, // (service prefix: a4b). For more information about service namespaces, see // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the AWS General Reference. + // in the AWS General Reference. // // ServiceNamespace is a required field ServiceNamespace *string `min:"1" type:"string" required:"true"` @@ -21760,7 +22497,7 @@ func (s *GetServiceLastAccessedDetailsWithEntitiesInput) SetServiceNamespace(v s type GetServiceLastAccessedDetailsWithEntitiesOutput struct { _ struct{} `type:"structure"` - // An EntityDetailsList object that contains details about when an IAM entity + // An EntityDetailsList object that contains details about when an IAM entity // (user or role) used group or policy permissions in an attempt to access the // specified AWS service. // @@ -21778,13 +22515,16 @@ type GetServiceLastAccessedDetailsWithEntitiesOutput struct { // receive all your results. IsTruncated *bool `type:"boolean"` - // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), // when the generated report job was completed or failed. // + // This field is null if the job is still in progress, as indicated by a job + // status value of IN_PROGRESS. + // // JobCompletionDate is a required field JobCompletionDate *time.Time `type:"timestamp" required:"true"` - // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), // when the report job was created. // // JobCreationDate is a required field @@ -22677,7 +23417,7 @@ type ListAttachedGroupPoliciesInput struct { // can contain any ASCII character from the ! (\u0021) through the DEL character // (\u007F), including most punctuation characters, digits, and upper and lowercased // letters. - PathPrefix *string `type:"string"` + PathPrefix *string `min:"1" type:"string"` } // String returns the string representation @@ -22705,6 +23445,9 @@ func (s *ListAttachedGroupPoliciesInput) Validate() error { if s.MaxItems != nil && *s.MaxItems < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -22813,7 +23556,7 @@ type ListAttachedRolePoliciesInput struct { // can contain any ASCII character from the ! (\u0021) through the DEL character // (\u007F), including most punctuation characters, digits, and upper and lowercased // letters. - PathPrefix *string `type:"string"` + PathPrefix *string `min:"1" type:"string"` // The name (friendly name, not ARN) of the role to list attached policies for. // @@ -22844,6 +23587,9 @@ func (s *ListAttachedRolePoliciesInput) Validate() error { if s.MaxItems != nil && *s.MaxItems < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } if s.RoleName == nil { invalidParams.Add(request.NewErrParamRequired("RoleName")) } @@ -22958,7 +23704,7 @@ type ListAttachedUserPoliciesInput struct { // can contain any ASCII character from the ! (\u0021) through the DEL character // (\u007F), including most punctuation characters, digits, and upper and lowercased // letters. - PathPrefix *string `type:"string"` + PathPrefix *string `min:"1" type:"string"` // The name (friendly name, not ARN) of the user to list attached policies for. // @@ -22989,6 +23735,9 @@ func (s *ListAttachedUserPoliciesInput) Validate() error { if s.MaxItems != nil && *s.MaxItems < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } @@ -23124,9 +23873,9 @@ type ListEntitiesForPolicyInput struct { // The policy usage method to use for filtering the results. // - // To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. - // To list only the policies used to set permissions boundaries, set the value - // to PermissionsBoundary. + // To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. + // To list only the policies used to set permissions boundaries, set the value + // to PermissionsBoundary. // // This parameter is optional. If it is not included, all policies are returned. PolicyUsageFilter *string `type:"string" enum:"PolicyUsageType"` @@ -24094,7 +24843,7 @@ func (s *ListOpenIDConnectProvidersOutput) SetOpenIDConnectProviderList(v []*Ope type ListPoliciesGrantingServiceAccessEntry struct { _ struct{} `type:"structure"` - // The PoliciesGrantingServiceAccess object that contains details about the + // The PoliciesGrantingServiceAccess object that contains details about the // policy. Policies []*PolicyGrantingServiceAccess `type:"list"` @@ -24106,7 +24855,7 @@ type ListPoliciesGrantingServiceAccessEntry struct { // that service. In the first paragraph, find the service prefix. For example, // (service prefix: a4b). For more information about service namespaces, see // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the AWS General Reference. + // in the AWS General Reference. ServiceNamespace *string `min:"1" type:"string"` } @@ -24155,7 +24904,7 @@ type ListPoliciesGrantingServiceAccessInput struct { // that service. In the first paragraph, find the service prefix. For example, // (service prefix: a4b). For more information about service namespaces, see // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the AWS General Reference. + // in the AWS General Reference. // // ServiceNamespaces is a required field ServiceNamespaces []*string `min:"1" type:"list" required:"true"` @@ -24227,7 +24976,7 @@ type ListPoliciesGrantingServiceAccessOutput struct { // to use for the Marker parameter in a subsequent pagination request. Marker *string `type:"string"` - // A ListPoliciesGrantingServiceAccess object that contains details about the + // A ListPoliciesGrantingServiceAccess object that contains details about the // permissions policies attached to the specified identity (user, group, or // role). // @@ -24298,13 +25047,13 @@ type ListPoliciesInput struct { // can contain any ASCII character from the ! (\u0021) through the DEL character // (\u007F), including most punctuation characters, digits, and upper and lowercased // letters. - PathPrefix *string `type:"string"` + PathPrefix *string `min:"1" type:"string"` // The policy usage method to use for filtering the results. // - // To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. - // To list only the policies used to set permissions boundaries, set the value - // to PermissionsBoundary. + // To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. + // To list only the policies used to set permissions boundaries, set the value + // to PermissionsBoundary. // // This parameter is optional. If it is not included, all policies are returned. PolicyUsageFilter *string `type:"string" enum:"PolicyUsageType"` @@ -24338,6 +25087,9 @@ func (s *ListPoliciesInput) Validate() error { if s.MaxItems != nil && *s.MaxItems < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -26106,13 +26858,13 @@ type ManagedPolicyDetail struct { // // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) // in the Using IAM guide. - Path *string `type:"string"` + Path *string `min:"1" type:"string"` // The number of entities (users and roles) for which the policy is used as // the permissions boundary. // // For more information about permissions boundaries, see Permissions Boundaries - // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) // in the IAM User Guide. PermissionsBoundaryUsageCount *int64 `type:"integer"` @@ -26248,11 +27000,12 @@ func (s *OpenIDConnectProviderListEntry) SetArn(v string) *OpenIDConnectProvider return s } -// Contains information about AWS Organizations's effect on a policy simulation. +// Contains information about the effect that Organizations has on a policy +// simulation. type OrganizationsDecisionDetail struct { _ struct{} `type:"structure"` - // Specifies whether the simulated operation is allowed by the AWS Organizations + // Specifies whether the simulated operation is allowed by the Organizations // service control policies that impact the simulated user's account. AllowedByOrganizations *bool `type:"boolean"` } @@ -26427,13 +27180,13 @@ type Policy struct { // // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) // in the Using IAM guide. - Path *string `type:"string"` + Path *string `min:"1" type:"string"` // The number of entities (users and roles) for which the policy is used to // set the permissions boundary. // // For more information about permissions boundaries, see Permissions Boundaries - // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) // in the IAM User Guide. PermissionsBoundaryUsageCount *int64 `type:"integer"` @@ -26891,7 +27644,9 @@ type PutGroupPolicyInput struct { // The name of the group to associate the policy with. // - // ®ex-name;. + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@-. // // GroupName is a required field GroupName *string `min:"1" type:"string" required:"true"` @@ -27944,7 +28699,7 @@ type Role struct { // The ARN of the policy used to set the permissions boundary for the role. // // For more information about permissions boundaries, see Permissions Boundaries - // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) // in the IAM User Guide. PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` @@ -28072,7 +28827,7 @@ type RoleDetail struct { // The ARN of the policy used to set the permissions boundary for the role. // // For more information about permissions boundaries, see Permissions Boundaries - // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) // in the IAM User Guide. PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` @@ -28178,7 +28933,7 @@ func (s *RoleDetail) SetTags(v []*Tag) *RoleDetail { type RoleUsageType struct { _ struct{} `type:"structure"` - // The name of the region where the service-linked role is being used. + // The name of the Region where the service-linked role is being used. Region *string `min:"1" type:"string"` // The name of the resource that is using the service-linked role. @@ -28543,7 +29298,7 @@ func (s *ServerCertificateMetadata) SetUploadDate(v time.Time) *ServerCertificat type ServiceLastAccessed struct { _ struct{} `type:"structure"` - // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), // when an authenticated entity most recently attempted to access the service. // AWS does not report unauthenticated requests. // @@ -28571,15 +29326,15 @@ type ServiceLastAccessed struct { // that service. In the first paragraph, find the service prefix. For example, // (service prefix: a4b). For more information about service namespaces, see // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the AWS General Reference. + // in the AWS General Reference. // // ServiceNamespace is a required field ServiceNamespace *string `min:"1" type:"string" required:"true"` - // The total number of authenticated entities that have attempted to access - // the service. + // The total number of authenticated principals (root user, IAM users, or IAM + // roles) that have attempted to access the service. // - // This field is null if no IAM entities attempted to access the service within + // This field is null if no principals attempted to access the service within // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). TotalAuthenticatedEntities *int64 `type:"integer"` } @@ -29114,29 +29869,19 @@ type SimulateCustomPolicyInput struct { // the EC2 scenario options, see Supported Platforms (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) // in the Amazon EC2 User Guide. // - // * EC2-Classic-InstanceStore - // - // instance, image, security-group - // - // * EC2-Classic-EBS - // - // instance, image, security-group, volume + // * EC2-Classic-InstanceStore instance, image, security-group // - // * EC2-VPC-InstanceStore + // * EC2-Classic-EBS instance, image, security-group, volume // - // instance, image, security-group, network-interface + // * EC2-VPC-InstanceStore instance, image, security-group, network-interface // - // * EC2-VPC-InstanceStore-Subnet + // * EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, + // subnet // - // instance, image, security-group, network-interface, subnet + // * EC2-VPC-EBS instance, image, security-group, network-interface, volume // - // * EC2-VPC-EBS - // - // instance, image, security-group, network-interface, volume - // - // * EC2-VPC-EBS-Subnet - // - // instance, image, security-group, network-interface, subnet, volume + // * EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, + // subnet, volume ResourceHandlingOption *string `min:"1" type:"string"` // An ARN representing the AWS account ID that specifies the owner of any simulated @@ -29366,7 +30111,7 @@ type SimulatePrincipalPolicyInput struct { CallerArn *string `min:"1" type:"string"` // A list of context keys and corresponding values for the simulation to use. - // Whenever a context key is evaluated in one of the simulated IAM permission + // Whenever a context key is evaluated in one of the simulated IAM permissions // policies, the corresponding value is supplied. ContextEntries []*ContextEntry `type:"list"` @@ -29448,29 +30193,19 @@ type SimulatePrincipalPolicyInput struct { // the EC2 scenario options, see Supported Platforms (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) // in the Amazon EC2 User Guide. // - // * EC2-Classic-InstanceStore - // - // instance, image, security group - // - // * EC2-Classic-EBS - // - // instance, image, security group, volume - // - // * EC2-VPC-InstanceStore + // * EC2-Classic-InstanceStore instance, image, security group // - // instance, image, security group, network interface + // * EC2-Classic-EBS instance, image, security group, volume // - // * EC2-VPC-InstanceStore-Subnet + // * EC2-VPC-InstanceStore instance, image, security group, network interface // - // instance, image, security group, network interface, subnet + // * EC2-VPC-InstanceStore-Subnet instance, image, security group, network + // interface, subnet // - // * EC2-VPC-EBS + // * EC2-VPC-EBS instance, image, security group, network interface, volume // - // instance, image, security group, network interface, volume - // - // * EC2-VPC-EBS-Subnet - // - // instance, image, security group, network interface, subnet, volume + // * EC2-VPC-EBS-Subnet instance, image, security group, network interface, + // subnet, volume ResourceHandlingOption *string `min:"1" type:"string"` // An AWS account ID that specifies the owner of any simulated resource that @@ -31834,7 +32569,7 @@ type User struct { // * A password exists but has not been used since IAM started tracking this // information on October 20, 2014. // - // A null valuedoes not mean that the user never had a password. Also, if the + // A null value does not mean that the user never had a password. Also, if the // user does not currently have a password, but had one in the past, then this // field contains the date and time the most recent password was used. // @@ -31851,7 +32586,7 @@ type User struct { // The ARN of the policy used to set the permissions boundary for the user. // // For more information about permissions boundaries, see Permissions Boundaries - // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) // in the IAM User Guide. PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` @@ -31964,7 +32699,7 @@ type UserDetail struct { // The ARN of the policy used to set the permissions boundary for the user. // // For more information about permissions boundaries, see Permissions Boundaries - // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) // in the IAM User Guide. PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` @@ -32237,7 +32972,7 @@ const ( // policy or as the permissions boundary for an entity. // // For more information about permissions boundaries, see Permissions Boundaries -// for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) +// for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) // in the IAM User Guide. const ( // PolicyUsageTypePermissionsPolicy is a PolicyUsageType enum value @@ -32331,6 +33066,20 @@ const ( PolicyTypeManaged = "MANAGED" ) +const ( + // SortKeyTypeServiceNamespaceAscending is a sortKeyType enum value + SortKeyTypeServiceNamespaceAscending = "SERVICE_NAMESPACE_ASCENDING" + + // SortKeyTypeServiceNamespaceDescending is a sortKeyType enum value + SortKeyTypeServiceNamespaceDescending = "SERVICE_NAMESPACE_DESCENDING" + + // SortKeyTypeLastAuthenticatedTimeAscending is a sortKeyType enum value + SortKeyTypeLastAuthenticatedTimeAscending = "LAST_AUTHENTICATED_TIME_ASCENDING" + + // SortKeyTypeLastAuthenticatedTimeDescending is a sortKeyType enum value + SortKeyTypeLastAuthenticatedTimeDescending = "LAST_AUTHENTICATED_TIME_DESCENDING" +) + const ( // StatusTypeActive is a statusType enum value StatusTypeActive = "Active" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go index 403317b87f4..30a85b3b44d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go @@ -162,6 +162,13 @@ const ( // to the service-linked role for that service. ErrCodePolicyNotAttachableException = "PolicyNotAttachable" + // ErrCodeReportGenerationLimitExceededException for service response error code + // "ReportGenerationLimitExceeded". + // + // The request failed because the maximum number of concurrent requests for + // this account are already running. + ErrCodeReportGenerationLimitExceededException = "ReportGenerationLimitExceeded" + // ErrCodeServiceFailureException for service response error code // "ServiceFailure". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/service.go index 940b4ce3283..62228c482e3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iam/service.go @@ -46,11 +46,11 @@ const ( // svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IAM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IAM { svc := &IAM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-05-08", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go index ac37e4b9ca4..19ddc93f877 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go @@ -159,7 +159,7 @@ func (c *Inspector) CreateAssessmentTargetRequest(input *CreateAssessmentTargetI // role to grant Amazon Inspector access to AWS Services needed to perform security // assessments. You can create up to 50 assessment targets per AWS account. // You can run up to 500 concurrent agents per AWS account. For more information, -// see Amazon Inspector Assessment Targets (https://docs.aws.amazon.com/inspector/latest/userguide/inspector_applications.html). +// see Amazon Inspector Assessment Targets (https://docs.aws.amazon.com/inspector/latest/userguide/inspector_applications.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1688,7 +1688,7 @@ func (c *Inspector) GetExclusionsPreviewWithContext(ctx aws.Context, input *GetE // // Example iterating over at most 3 pages of a GetExclusionsPreview operation. // pageNum := 0 // err := client.GetExclusionsPreviewPages(params, -// func(page *GetExclusionsPreviewOutput, lastPage bool) bool { +// func(page *inspector.GetExclusionsPreviewOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1720,10 +1720,12 @@ func (c *Inspector) GetExclusionsPreviewPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetExclusionsPreviewOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetExclusionsPreviewOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1926,7 +1928,7 @@ func (c *Inspector) ListAssessmentRunAgentsWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListAssessmentRunAgents operation. // pageNum := 0 // err := client.ListAssessmentRunAgentsPages(params, -// func(page *ListAssessmentRunAgentsOutput, lastPage bool) bool { +// func(page *inspector.ListAssessmentRunAgentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1958,10 +1960,12 @@ func (c *Inspector) ListAssessmentRunAgentsPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAssessmentRunAgentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAssessmentRunAgentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2073,7 +2077,7 @@ func (c *Inspector) ListAssessmentRunsWithContext(ctx aws.Context, input *ListAs // // Example iterating over at most 3 pages of a ListAssessmentRuns operation. // pageNum := 0 // err := client.ListAssessmentRunsPages(params, -// func(page *ListAssessmentRunsOutput, lastPage bool) bool { +// func(page *inspector.ListAssessmentRunsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2105,10 +2109,12 @@ func (c *Inspector) ListAssessmentRunsPagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAssessmentRunsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAssessmentRunsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2217,7 +2223,7 @@ func (c *Inspector) ListAssessmentTargetsWithContext(ctx aws.Context, input *Lis // // Example iterating over at most 3 pages of a ListAssessmentTargets operation. // pageNum := 0 // err := client.ListAssessmentTargetsPages(params, -// func(page *ListAssessmentTargetsOutput, lastPage bool) bool { +// func(page *inspector.ListAssessmentTargetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2249,10 +2255,12 @@ func (c *Inspector) ListAssessmentTargetsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAssessmentTargetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAssessmentTargetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2364,7 +2372,7 @@ func (c *Inspector) ListAssessmentTemplatesWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListAssessmentTemplates operation. // pageNum := 0 // err := client.ListAssessmentTemplatesPages(params, -// func(page *ListAssessmentTemplatesOutput, lastPage bool) bool { +// func(page *inspector.ListAssessmentTemplatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2396,10 +2404,12 @@ func (c *Inspector) ListAssessmentTemplatesPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAssessmentTemplatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAssessmentTemplatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2512,7 +2522,7 @@ func (c *Inspector) ListEventSubscriptionsWithContext(ctx aws.Context, input *Li // // Example iterating over at most 3 pages of a ListEventSubscriptions operation. // pageNum := 0 // err := client.ListEventSubscriptionsPages(params, -// func(page *ListEventSubscriptionsOutput, lastPage bool) bool { +// func(page *inspector.ListEventSubscriptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2544,10 +2554,12 @@ func (c *Inspector) ListEventSubscriptionsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEventSubscriptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListEventSubscriptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2658,7 +2670,7 @@ func (c *Inspector) ListExclusionsWithContext(ctx aws.Context, input *ListExclus // // Example iterating over at most 3 pages of a ListExclusions operation. // pageNum := 0 // err := client.ListExclusionsPages(params, -// func(page *ListExclusionsOutput, lastPage bool) bool { +// func(page *inspector.ListExclusionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2690,10 +2702,12 @@ func (c *Inspector) ListExclusionsPagesWithContext(ctx aws.Context, input *ListE }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListExclusionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListExclusionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2805,7 +2819,7 @@ func (c *Inspector) ListFindingsWithContext(ctx aws.Context, input *ListFindings // // Example iterating over at most 3 pages of a ListFindings operation. // pageNum := 0 // err := client.ListFindingsPages(params, -// func(page *ListFindingsOutput, lastPage bool) bool { +// func(page *inspector.ListFindingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2837,10 +2851,12 @@ func (c *Inspector) ListFindingsPagesWithContext(ctx aws.Context, input *ListFin }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListFindingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListFindingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2947,7 +2963,7 @@ func (c *Inspector) ListRulesPackagesWithContext(ctx aws.Context, input *ListRul // // Example iterating over at most 3 pages of a ListRulesPackages operation. // pageNum := 0 // err := client.ListRulesPackagesPages(params, -// func(page *ListRulesPackagesOutput, lastPage bool) bool { +// func(page *inspector.ListRulesPackagesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2979,10 +2995,12 @@ func (c *Inspector) ListRulesPackagesPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRulesPackagesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRulesPackagesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3188,7 +3206,7 @@ func (c *Inspector) PreviewAgentsWithContext(ctx aws.Context, input *PreviewAgen // // Example iterating over at most 3 pages of a PreviewAgents operation. // pageNum := 0 // err := client.PreviewAgentsPages(params, -// func(page *PreviewAgentsOutput, lastPage bool) bool { +// func(page *inspector.PreviewAgentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3220,10 +3238,12 @@ func (c *Inspector) PreviewAgentsPagesWithContext(ctx aws.Context, input *Previe }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*PreviewAgentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*PreviewAgentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/doc.go index 0e660cc1984..7b0b15bd172 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/doc.go @@ -4,7 +4,7 @@ // requests to Amazon Inspector. // // Amazon Inspector enables you to analyze the behavior of your AWS resources -// and to identify potential security issues. For more information, see Amazon +// and to identify potential security issues. For more information, see Amazon // Inspector User Guide (https://docs.aws.amazon.com/inspector/latest/userguide/inspector_introduction.html). // // See https://docs.aws.amazon.com/goto/WebAPI/inspector-2016-02-16 for more information on this service. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go index 2e68b4e4d23..aae2fa86571 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go @@ -46,11 +46,11 @@ const ( // svc := inspector.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Inspector { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Inspector { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Inspector { svc := &Inspector{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-02-16", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/api.go index c00ab4c4c7c..1632b7b9cc8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/api.go @@ -626,8 +626,8 @@ func (c *IoT) AttachSecurityProfileRequest(input *AttachSecurityProfileInput) (r // AttachSecurityProfile API operation for AWS IoT. // -// Associates a Device Defender security profile with a thing group or with -// this account. Each thing group or account can have up to five security profiles +// Associates a Device Defender security profile with a thing group or this +// account. Each thing group or account can have up to five security profiles // associated with it. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -772,6 +772,93 @@ func (c *IoT) AttachThingPrincipalWithContext(ctx aws.Context, input *AttachThin return out, req.Send() } +const opCancelAuditMitigationActionsTask = "CancelAuditMitigationActionsTask" + +// CancelAuditMitigationActionsTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelAuditMitigationActionsTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelAuditMitigationActionsTask for more information on using the CancelAuditMitigationActionsTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelAuditMitigationActionsTaskRequest method. +// req, resp := client.CancelAuditMitigationActionsTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) CancelAuditMitigationActionsTaskRequest(input *CancelAuditMitigationActionsTaskInput) (req *request.Request, output *CancelAuditMitigationActionsTaskOutput) { + op := &request.Operation{ + Name: opCancelAuditMitigationActionsTask, + HTTPMethod: "PUT", + HTTPPath: "/audit/mitigationactions/tasks/{taskId}/cancel", + } + + if input == nil { + input = &CancelAuditMitigationActionsTaskInput{} + } + + output = &CancelAuditMitigationActionsTaskOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CancelAuditMitigationActionsTask API operation for AWS IoT. +// +// Cancels a mitigation action task that is in progress. If the task is not +// in progress, an InvalidRequestException occurs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation CancelAuditMitigationActionsTask for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) CancelAuditMitigationActionsTask(input *CancelAuditMitigationActionsTaskInput) (*CancelAuditMitigationActionsTaskOutput, error) { + req, out := c.CancelAuditMitigationActionsTaskRequest(input) + return out, req.Send() +} + +// CancelAuditMitigationActionsTaskWithContext is the same as CancelAuditMitigationActionsTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelAuditMitigationActionsTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) CancelAuditMitigationActionsTaskWithContext(ctx aws.Context, input *CancelAuditMitigationActionsTaskInput, opts ...request.Option) (*CancelAuditMitigationActionsTaskOutput, error) { + req, out := c.CancelAuditMitigationActionsTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCancelAuditTask = "CancelAuditTask" // CancelAuditTaskRequest generates a "aws/request.Request" representing the @@ -1822,6 +1909,95 @@ func (c *IoT) CreateKeysAndCertificateWithContext(ctx aws.Context, input *Create return out, req.Send() } +const opCreateMitigationAction = "CreateMitigationAction" + +// CreateMitigationActionRequest generates a "aws/request.Request" representing the +// client's request for the CreateMitigationAction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMitigationAction for more information on using the CreateMitigationAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMitigationActionRequest method. +// req, resp := client.CreateMitigationActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) CreateMitigationActionRequest(input *CreateMitigationActionInput) (req *request.Request, output *CreateMitigationActionOutput) { + op := &request.Operation{ + Name: opCreateMitigationAction, + HTTPMethod: "POST", + HTTPPath: "/mitigationactions/actions/{actionName}", + } + + if input == nil { + input = &CreateMitigationActionInput{} + } + + output = &CreateMitigationActionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMitigationAction API operation for AWS IoT. +// +// Defines an action that can be applied to audit findings by using StartAuditMitigationActionsTask. +// Each mitigation action can apply only one type of change. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation CreateMitigationAction for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// A limit has been exceeded. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) CreateMitigationAction(input *CreateMitigationActionInput) (*CreateMitigationActionOutput, error) { + req, out := c.CreateMitigationActionRequest(input) + return out, req.Send() +} + +// CreateMitigationActionWithContext is the same as CreateMitigationAction with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMitigationAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) CreateMitigationActionWithContext(ctx aws.Context, input *CreateMitigationActionInput, opts ...request.Option) (*CreateMitigationActionOutput, error) { + req, out := c.CreateMitigationActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateOTAUpdate = "CreateOTAUpdate" // CreateOTAUpdateRequest generates a "aws/request.Request" representing the @@ -2270,6 +2446,9 @@ func (c *IoT) CreateScheduledAuditRequest(input *CreateScheduledAuditInput) (req // * ErrCodeInvalidRequestException "InvalidRequestException" // The request is not valid. // +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource already exists. +// // * ErrCodeThrottlingException "ThrottlingException" // The rate exceeds the limit. // @@ -2430,10 +2609,6 @@ func (c *IoT) CreateStreamRequest(input *CreateStreamInput) (req *request.Reques // Creates a stream for delivering one or more large files in chunks over MQTT. // A stream transports data bytes in chunks or blocks packaged as MQTT messages // from a source like S3. You can have one or more files associated with a stream. -// The total size of a file associated with the stream cannot exceed more than -// 2 MB. The stream will be created with version 0. If a stream is created with -// the same streamID as a stream that existed and was deleted within last 90 -// days, we will resurrect that old stream by incrementing the version by 1. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3272,10 +3447,10 @@ func (c *IoT) DeleteCertificateRequest(input *DeleteCertificateInput) (req *requ // // Deletes the specified certificate. // -// A certificate cannot be deleted if it has a policy attached to it or if its -// status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy -// API to detach all policies. Next, use the UpdateCertificate API to set the -// certificate to the INACTIVE status. +// A certificate cannot be deleted if it has a policy or IoT thing attached +// to it or if its status is set to ACTIVE. To delete a certificate, first use +// the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate +// API to set the certificate to the INACTIVE status. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3611,261 +3786,344 @@ func (c *IoT) DeleteJobExecutionWithContext(ctx aws.Context, input *DeleteJobExe return out, req.Send() } -const opDeleteOTAUpdate = "DeleteOTAUpdate" +const opDeleteMitigationAction = "DeleteMitigationAction" -// DeleteOTAUpdateRequest generates a "aws/request.Request" representing the -// client's request for the DeleteOTAUpdate operation. The "output" return +// DeleteMitigationActionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMitigationAction operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteOTAUpdate for more information on using the DeleteOTAUpdate +// See DeleteMitigationAction for more information on using the DeleteMitigationAction // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteOTAUpdateRequest method. -// req, resp := client.DeleteOTAUpdateRequest(params) +// // Example sending a request using the DeleteMitigationActionRequest method. +// req, resp := client.DeleteMitigationActionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *IoT) DeleteOTAUpdateRequest(input *DeleteOTAUpdateInput) (req *request.Request, output *DeleteOTAUpdateOutput) { +func (c *IoT) DeleteMitigationActionRequest(input *DeleteMitigationActionInput) (req *request.Request, output *DeleteMitigationActionOutput) { op := &request.Operation{ - Name: opDeleteOTAUpdate, + Name: opDeleteMitigationAction, HTTPMethod: "DELETE", - HTTPPath: "/otaUpdates/{otaUpdateId}", + HTTPPath: "/mitigationactions/actions/{actionName}", } if input == nil { - input = &DeleteOTAUpdateInput{} + input = &DeleteMitigationActionInput{} } - output = &DeleteOTAUpdateOutput{} + output = &DeleteMitigationActionOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteOTAUpdate API operation for AWS IoT. +// DeleteMitigationAction API operation for AWS IoT. // -// Delete an OTA update. +// Deletes a defined mitigation action from your AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS IoT's -// API operation DeleteOTAUpdate for usage and error information. +// API operation DeleteMitigationAction for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidRequestException "InvalidRequestException" // The request is not valid. // -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The specified resource does not exist. -// // * ErrCodeThrottlingException "ThrottlingException" // The rate exceeds the limit. // -// * ErrCodeUnauthorizedException "UnauthorizedException" -// You are not authorized to perform this operation. -// // * ErrCodeInternalFailureException "InternalFailureException" // An unexpected error has occurred. // -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// The service is temporarily unavailable. -// -// * ErrCodeVersionConflictException "VersionConflictException" -// An exception thrown when the version of an entity specified with the expectedVersion -// parameter does not match the latest version in the system. -// -func (c *IoT) DeleteOTAUpdate(input *DeleteOTAUpdateInput) (*DeleteOTAUpdateOutput, error) { - req, out := c.DeleteOTAUpdateRequest(input) +func (c *IoT) DeleteMitigationAction(input *DeleteMitigationActionInput) (*DeleteMitigationActionOutput, error) { + req, out := c.DeleteMitigationActionRequest(input) return out, req.Send() } -// DeleteOTAUpdateWithContext is the same as DeleteOTAUpdate with the addition of +// DeleteMitigationActionWithContext is the same as DeleteMitigationAction with the addition of // the ability to pass a context and additional request options. // -// See DeleteOTAUpdate for details on how to use this API operation. +// See DeleteMitigationAction for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IoT) DeleteOTAUpdateWithContext(ctx aws.Context, input *DeleteOTAUpdateInput, opts ...request.Option) (*DeleteOTAUpdateOutput, error) { - req, out := c.DeleteOTAUpdateRequest(input) +func (c *IoT) DeleteMitigationActionWithContext(ctx aws.Context, input *DeleteMitigationActionInput, opts ...request.Option) (*DeleteMitigationActionOutput, error) { + req, out := c.DeleteMitigationActionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeletePolicy = "DeletePolicy" +const opDeleteOTAUpdate = "DeleteOTAUpdate" -// DeletePolicyRequest generates a "aws/request.Request" representing the -// client's request for the DeletePolicy operation. The "output" return +// DeleteOTAUpdateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOTAUpdate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeletePolicy for more information on using the DeletePolicy +// See DeleteOTAUpdate for more information on using the DeleteOTAUpdate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeletePolicyRequest method. -// req, resp := client.DeletePolicyRequest(params) +// // Example sending a request using the DeleteOTAUpdateRequest method. +// req, resp := client.DeleteOTAUpdateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *IoT) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { +func (c *IoT) DeleteOTAUpdateRequest(input *DeleteOTAUpdateInput) (req *request.Request, output *DeleteOTAUpdateOutput) { op := &request.Operation{ - Name: opDeletePolicy, + Name: opDeleteOTAUpdate, HTTPMethod: "DELETE", - HTTPPath: "/policies/{policyName}", + HTTPPath: "/otaUpdates/{otaUpdateId}", } if input == nil { - input = &DeletePolicyInput{} + input = &DeleteOTAUpdateInput{} } - output = &DeletePolicyOutput{} + output = &DeleteOTAUpdateOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeletePolicy API operation for AWS IoT. -// -// Deletes the specified policy. -// -// A policy cannot be deleted if it has non-default versions or it is attached -// to any certificate. -// -// To delete a policy, use the DeletePolicyVersion API to delete all non-default -// versions of the policy; use the DetachPrincipalPolicy API to detach the policy -// from any certificate; and then use the DeletePolicy API to delete the policy. +// DeleteOTAUpdate API operation for AWS IoT. // -// When a policy is deleted using DeletePolicy, its default version is deleted -// with it. +// Delete an OTA update. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS IoT's -// API operation DeletePolicy for usage and error information. +// API operation DeleteOTAUpdate for usage and error information. // // Returned Error Codes: -// * ErrCodeDeleteConflictException "DeleteConflictException" -// You can't delete the resource because it is attached to one or more resources. +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // The specified resource does not exist. // -// * ErrCodeInvalidRequestException "InvalidRequestException" -// The request is not valid. -// // * ErrCodeThrottlingException "ThrottlingException" // The rate exceeds the limit. // // * ErrCodeUnauthorizedException "UnauthorizedException" // You are not authorized to perform this operation. // +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // The service is temporarily unavailable. // -// * ErrCodeInternalFailureException "InternalFailureException" -// An unexpected error has occurred. +// * ErrCodeVersionConflictException "VersionConflictException" +// An exception thrown when the version of an entity specified with the expectedVersion +// parameter does not match the latest version in the system. // -func (c *IoT) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { - req, out := c.DeletePolicyRequest(input) +func (c *IoT) DeleteOTAUpdate(input *DeleteOTAUpdateInput) (*DeleteOTAUpdateOutput, error) { + req, out := c.DeleteOTAUpdateRequest(input) return out, req.Send() } -// DeletePolicyWithContext is the same as DeletePolicy with the addition of +// DeleteOTAUpdateWithContext is the same as DeleteOTAUpdate with the addition of // the ability to pass a context and additional request options. // -// See DeletePolicy for details on how to use this API operation. +// See DeleteOTAUpdate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IoT) DeletePolicyWithContext(ctx aws.Context, input *DeletePolicyInput, opts ...request.Option) (*DeletePolicyOutput, error) { - req, out := c.DeletePolicyRequest(input) +func (c *IoT) DeleteOTAUpdateWithContext(ctx aws.Context, input *DeleteOTAUpdateInput, opts ...request.Option) (*DeleteOTAUpdateOutput, error) { + req, out := c.DeleteOTAUpdateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeletePolicyVersion = "DeletePolicyVersion" +const opDeletePolicy = "DeletePolicy" -// DeletePolicyVersionRequest generates a "aws/request.Request" representing the -// client's request for the DeletePolicyVersion operation. The "output" return +// DeletePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeletePolicyVersion for more information on using the DeletePolicyVersion +// See DeletePolicy for more information on using the DeletePolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeletePolicyVersionRequest method. -// req, resp := client.DeletePolicyVersionRequest(params) +// // Example sending a request using the DeletePolicyRequest method. +// req, resp := client.DeletePolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *IoT) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req *request.Request, output *DeletePolicyVersionOutput) { +func (c *IoT) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { op := &request.Operation{ - Name: opDeletePolicyVersion, + Name: opDeletePolicy, HTTPMethod: "DELETE", - HTTPPath: "/policies/{policyName}/version/{policyVersionId}", + HTTPPath: "/policies/{policyName}", } if input == nil { - input = &DeletePolicyVersionInput{} + input = &DeletePolicyInput{} } - output = &DeletePolicyVersionOutput{} + output = &DeletePolicyOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeletePolicyVersion API operation for AWS IoT. +// DeletePolicy API operation for AWS IoT. // -// Deletes the specified version of the specified policy. You cannot delete -// the default version of a policy using this API. To delete the default version -// of a policy, use DeletePolicy. To find out which version of a policy is marked -// as the default version, use ListPolicyVersions. +// Deletes the specified policy. +// +// A policy cannot be deleted if it has non-default versions or it is attached +// to any certificate. +// +// To delete a policy, use the DeletePolicyVersion API to delete all non-default +// versions of the policy; use the DetachPrincipalPolicy API to detach the policy +// from any certificate; and then use the DeletePolicy API to delete the policy. +// +// When a policy is deleted using DeletePolicy, its default version is deleted +// with it. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS IoT's -// API operation DeletePolicyVersion for usage and error information. +// API operation DeletePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDeleteConflictException "DeleteConflictException" +// You can't delete the resource because it is attached to one or more resources. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + return out, req.Send() +} + +// DeletePolicyWithContext is the same as DeletePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DeletePolicyWithContext(ctx aws.Context, input *DeletePolicyInput, opts ...request.Option) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePolicyVersion = "DeletePolicyVersion" + +// DeletePolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicyVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePolicyVersion for more information on using the DeletePolicyVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePolicyVersionRequest method. +// req, resp := client.DeletePolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req *request.Request, output *DeletePolicyVersionOutput) { + op := &request.Operation{ + Name: opDeletePolicyVersion, + HTTPMethod: "DELETE", + HTTPPath: "/policies/{policyName}/version/{policyVersionId}", + } + + if input == nil { + input = &DeletePolicyVersionInput{} + } + + output = &DeletePolicyVersionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePolicyVersion API operation for AWS IoT. +// +// Deletes the specified version of the specified policy. You cannot delete +// the default version of a policy using this API. To delete the default version +// of a policy, use DeletePolicy. To find out which version of a policy is marked +// as the default version, use ListPolicyVersions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DeletePolicyVersion for usage and error information. // // Returned Error Codes: // * ErrCodeDeleteConflictException "DeleteConflictException" @@ -4989,6 +5247,181 @@ func (c *IoT) DescribeAccountAuditConfigurationWithContext(ctx aws.Context, inpu return out, req.Send() } +const opDescribeAuditFinding = "DescribeAuditFinding" + +// DescribeAuditFindingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAuditFinding operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAuditFinding for more information on using the DescribeAuditFinding +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAuditFindingRequest method. +// req, resp := client.DescribeAuditFindingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DescribeAuditFindingRequest(input *DescribeAuditFindingInput) (req *request.Request, output *DescribeAuditFindingOutput) { + op := &request.Operation{ + Name: opDescribeAuditFinding, + HTTPMethod: "GET", + HTTPPath: "/audit/findings/{findingId}", + } + + if input == nil { + input = &DescribeAuditFindingInput{} + } + + output = &DescribeAuditFindingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAuditFinding API operation for AWS IoT. +// +// Gets information about a single audit finding. Properties include the reason +// for noncompliance, the severity of the issue, and when the audit that returned +// the finding was started. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DescribeAuditFinding for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) DescribeAuditFinding(input *DescribeAuditFindingInput) (*DescribeAuditFindingOutput, error) { + req, out := c.DescribeAuditFindingRequest(input) + return out, req.Send() +} + +// DescribeAuditFindingWithContext is the same as DescribeAuditFinding with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAuditFinding for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DescribeAuditFindingWithContext(ctx aws.Context, input *DescribeAuditFindingInput, opts ...request.Option) (*DescribeAuditFindingOutput, error) { + req, out := c.DescribeAuditFindingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAuditMitigationActionsTask = "DescribeAuditMitigationActionsTask" + +// DescribeAuditMitigationActionsTaskRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAuditMitigationActionsTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAuditMitigationActionsTask for more information on using the DescribeAuditMitigationActionsTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAuditMitigationActionsTaskRequest method. +// req, resp := client.DescribeAuditMitigationActionsTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DescribeAuditMitigationActionsTaskRequest(input *DescribeAuditMitigationActionsTaskInput) (req *request.Request, output *DescribeAuditMitigationActionsTaskOutput) { + op := &request.Operation{ + Name: opDescribeAuditMitigationActionsTask, + HTTPMethod: "GET", + HTTPPath: "/audit/mitigationactions/tasks/{taskId}", + } + + if input == nil { + input = &DescribeAuditMitigationActionsTaskInput{} + } + + output = &DescribeAuditMitigationActionsTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAuditMitigationActionsTask API operation for AWS IoT. +// +// Gets information about an audit mitigation task that is used to apply mitigation +// actions to a set of audit findings. Properties include the actions being +// applied, the audit checks to which they're being applied, the task status, +// and aggregated task statistics. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DescribeAuditMitigationActionsTask for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) DescribeAuditMitigationActionsTask(input *DescribeAuditMitigationActionsTaskInput) (*DescribeAuditMitigationActionsTaskOutput, error) { + req, out := c.DescribeAuditMitigationActionsTaskRequest(input) + return out, req.Send() +} + +// DescribeAuditMitigationActionsTaskWithContext is the same as DescribeAuditMitigationActionsTask with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAuditMitigationActionsTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DescribeAuditMitigationActionsTaskWithContext(ctx aws.Context, input *DescribeAuditMitigationActionsTaskInput, opts ...request.Option) (*DescribeAuditMitigationActionsTaskOutput, error) { + req, out := c.DescribeAuditMitigationActionsTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeAuditTask = "DescribeAuditTask" // DescribeAuditTaskRequest generates a "aws/request.Request" representing the @@ -5948,6 +6381,91 @@ func (c *IoT) DescribeJobExecutionWithContext(ctx aws.Context, input *DescribeJo return out, req.Send() } +const opDescribeMitigationAction = "DescribeMitigationAction" + +// DescribeMitigationActionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMitigationAction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMitigationAction for more information on using the DescribeMitigationAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMitigationActionRequest method. +// req, resp := client.DescribeMitigationActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DescribeMitigationActionRequest(input *DescribeMitigationActionInput) (req *request.Request, output *DescribeMitigationActionOutput) { + op := &request.Operation{ + Name: opDescribeMitigationAction, + HTTPMethod: "GET", + HTTPPath: "/mitigationactions/actions/{actionName}", + } + + if input == nil { + input = &DescribeMitigationActionInput{} + } + + output = &DescribeMitigationActionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMitigationAction API operation for AWS IoT. +// +// Gets information about a mitigation action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DescribeMitigationAction for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) DescribeMitigationAction(input *DescribeMitigationActionInput) (*DescribeMitigationActionOutput, error) { + req, out := c.DescribeMitigationActionRequest(input) + return out, req.Send() +} + +// DescribeMitigationActionWithContext is the same as DescribeMitigationAction with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMitigationAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DescribeMitigationActionWithContext(ctx aws.Context, input *DescribeMitigationActionInput, opts ...request.Option) (*DescribeMitigationActionOutput, error) { + req, out := c.DescribeMitigationActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeRoleAlias = "DescribeRoleAlias" // DescribeRoleAliasRequest generates a "aws/request.Request" representing the @@ -7214,6 +7732,106 @@ func (c *IoT) EnableTopicRuleWithContext(ctx aws.Context, input *EnableTopicRule return out, req.Send() } +const opGetCardinality = "GetCardinality" + +// GetCardinalityRequest generates a "aws/request.Request" representing the +// client's request for the GetCardinality operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCardinality for more information on using the GetCardinality +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCardinalityRequest method. +// req, resp := client.GetCardinalityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) GetCardinalityRequest(input *GetCardinalityInput) (req *request.Request, output *GetCardinalityOutput) { + op := &request.Operation{ + Name: opGetCardinality, + HTTPMethod: "POST", + HTTPPath: "/indices/cardinality", + } + + if input == nil { + input = &GetCardinalityInput{} + } + + output = &GetCardinalityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCardinality API operation for AWS IoT. +// +// Returns the number of things with distinct values for the aggregation field. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation GetCardinality for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeInvalidQueryException "InvalidQueryException" +// The query is invalid. +// +// * ErrCodeInvalidAggregationException "InvalidAggregationException" +// The aggregation is invalid. +// +// * ErrCodeIndexNotReadyException "IndexNotReadyException" +// The index is not ready. +// +func (c *IoT) GetCardinality(input *GetCardinalityInput) (*GetCardinalityOutput, error) { + req, out := c.GetCardinalityRequest(input) + return out, req.Send() +} + +// GetCardinalityWithContext is the same as GetCardinality with the addition of +// the ability to pass a context and additional request options. +// +// See GetCardinality for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) GetCardinalityWithContext(ctx aws.Context, input *GetCardinalityInput, opts ...request.Option) (*GetCardinalityOutput, error) { + req, out := c.GetCardinalityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetEffectivePolicies = "GetEffectivePolicies" // GetEffectivePoliciesRequest generates a "aws/request.Request" representing the @@ -7657,6 +8275,109 @@ func (c *IoT) GetOTAUpdateWithContext(ctx aws.Context, input *GetOTAUpdateInput, return out, req.Send() } +const opGetPercentiles = "GetPercentiles" + +// GetPercentilesRequest generates a "aws/request.Request" representing the +// client's request for the GetPercentiles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPercentiles for more information on using the GetPercentiles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPercentilesRequest method. +// req, resp := client.GetPercentilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) GetPercentilesRequest(input *GetPercentilesInput) (req *request.Request, output *GetPercentilesOutput) { + op := &request.Operation{ + Name: opGetPercentiles, + HTTPMethod: "POST", + HTTPPath: "/indices/percentiles", + } + + if input == nil { + input = &GetPercentilesInput{} + } + + output = &GetPercentilesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPercentiles API operation for AWS IoT. +// +// Returns the percentile values for the aggregation field. The results from +// GetPercentiles is an approximation. The default percentile groupings are: +// 1,5,25,50,75,95,99. You can specify custom percentile grouping using the +// percents argument to the GetPercentiles API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation GetPercentiles for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this operation. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeInvalidQueryException "InvalidQueryException" +// The query is invalid. +// +// * ErrCodeInvalidAggregationException "InvalidAggregationException" +// The aggregation is invalid. +// +// * ErrCodeIndexNotReadyException "IndexNotReadyException" +// The index is not ready. +// +func (c *IoT) GetPercentiles(input *GetPercentilesInput) (*GetPercentilesOutput, error) { + req, out := c.GetPercentilesRequest(input) + return out, req.Send() +} + +// GetPercentilesWithContext is the same as GetPercentiles with the addition of +// the ability to pass a context and additional request options. +// +// See GetPercentiles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) GetPercentilesWithContext(ctx aws.Context, input *GetPercentilesInput, opts ...request.Option) (*GetPercentilesOutput, error) { + req, out := c.GetPercentilesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetPolicy = "GetPolicy" // GetPolicyRequest generates a "aws/request.Request" representing the @@ -7970,7 +8691,9 @@ func (c *IoT) GetStatisticsRequest(input *GetStatisticsInput) (req *request.Requ // GetStatistics API operation for AWS IoT. // -// Gets statistics about things that match the specified query. +// Gets statistics returns the count, average, sum, minimum, maximum, sumOfSquares, +// variance, and standard deviation for the specified aggregated field. If the +// aggregation field is of type String, only the count statistic is returned. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8458,6 +9181,170 @@ func (c *IoT) ListAuditFindingsWithContext(ctx aws.Context, input *ListAuditFind return out, req.Send() } +const opListAuditMitigationActionsExecutions = "ListAuditMitigationActionsExecutions" + +// ListAuditMitigationActionsExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the ListAuditMitigationActionsExecutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAuditMitigationActionsExecutions for more information on using the ListAuditMitigationActionsExecutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAuditMitigationActionsExecutionsRequest method. +// req, resp := client.ListAuditMitigationActionsExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) ListAuditMitigationActionsExecutionsRequest(input *ListAuditMitigationActionsExecutionsInput) (req *request.Request, output *ListAuditMitigationActionsExecutionsOutput) { + op := &request.Operation{ + Name: opListAuditMitigationActionsExecutions, + HTTPMethod: "GET", + HTTPPath: "/audit/mitigationactions/executions", + } + + if input == nil { + input = &ListAuditMitigationActionsExecutionsInput{} + } + + output = &ListAuditMitigationActionsExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAuditMitigationActionsExecutions API operation for AWS IoT. +// +// Gets the status of audit mitigation action tasks that were executed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation ListAuditMitigationActionsExecutions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) ListAuditMitigationActionsExecutions(input *ListAuditMitigationActionsExecutionsInput) (*ListAuditMitigationActionsExecutionsOutput, error) { + req, out := c.ListAuditMitigationActionsExecutionsRequest(input) + return out, req.Send() +} + +// ListAuditMitigationActionsExecutionsWithContext is the same as ListAuditMitigationActionsExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See ListAuditMitigationActionsExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAuditMitigationActionsExecutionsWithContext(ctx aws.Context, input *ListAuditMitigationActionsExecutionsInput, opts ...request.Option) (*ListAuditMitigationActionsExecutionsOutput, error) { + req, out := c.ListAuditMitigationActionsExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAuditMitigationActionsTasks = "ListAuditMitigationActionsTasks" + +// ListAuditMitigationActionsTasksRequest generates a "aws/request.Request" representing the +// client's request for the ListAuditMitigationActionsTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAuditMitigationActionsTasks for more information on using the ListAuditMitigationActionsTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAuditMitigationActionsTasksRequest method. +// req, resp := client.ListAuditMitigationActionsTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) ListAuditMitigationActionsTasksRequest(input *ListAuditMitigationActionsTasksInput) (req *request.Request, output *ListAuditMitigationActionsTasksOutput) { + op := &request.Operation{ + Name: opListAuditMitigationActionsTasks, + HTTPMethod: "GET", + HTTPPath: "/audit/mitigationactions/tasks", + } + + if input == nil { + input = &ListAuditMitigationActionsTasksInput{} + } + + output = &ListAuditMitigationActionsTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAuditMitigationActionsTasks API operation for AWS IoT. +// +// Gets a list of audit mitigation action tasks that match the specified filters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation ListAuditMitigationActionsTasks for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) ListAuditMitigationActionsTasks(input *ListAuditMitigationActionsTasksInput) (*ListAuditMitigationActionsTasksOutput, error) { + req, out := c.ListAuditMitigationActionsTasksRequest(input) + return out, req.Send() +} + +// ListAuditMitigationActionsTasksWithContext is the same as ListAuditMitigationActionsTasks with the addition of +// the ability to pass a context and additional request options. +// +// See ListAuditMitigationActionsTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAuditMitigationActionsTasksWithContext(ctx aws.Context, input *ListAuditMitigationActionsTasksInput, opts ...request.Option) (*ListAuditMitigationActionsTasksOutput, error) { + req, out := c.ListAuditMitigationActionsTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListAuditTasks = "ListAuditTasks" // ListAuditTasksRequest generates a "aws/request.Request" representing the @@ -9327,6 +10214,88 @@ func (c *IoT) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts .. return out, req.Send() } +const opListMitigationActions = "ListMitigationActions" + +// ListMitigationActionsRequest generates a "aws/request.Request" representing the +// client's request for the ListMitigationActions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMitigationActions for more information on using the ListMitigationActions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListMitigationActionsRequest method. +// req, resp := client.ListMitigationActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) ListMitigationActionsRequest(input *ListMitigationActionsInput) (req *request.Request, output *ListMitigationActionsOutput) { + op := &request.Operation{ + Name: opListMitigationActions, + HTTPMethod: "GET", + HTTPPath: "/mitigationactions/actions", + } + + if input == nil { + input = &ListMitigationActionsInput{} + } + + output = &ListMitigationActionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMitigationActions API operation for AWS IoT. +// +// Gets a list of all mitigation actions that match the specified filter criteria. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation ListMitigationActions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) ListMitigationActions(input *ListMitigationActionsInput) (*ListMitigationActionsOutput, error) { + req, out := c.ListMitigationActionsRequest(input) + return out, req.Send() +} + +// ListMitigationActionsWithContext is the same as ListMitigationActions with the addition of +// the ability to pass a context and additional request options. +// +// See ListMitigationActions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListMitigationActionsWithContext(ctx aws.Context, input *ListMitigationActionsInput, opts ...request.Option) (*ListMitigationActionsOutput, error) { + req, out := c.ListMitigationActionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListOTAUpdates = "ListOTAUpdates" // ListOTAUpdatesRequest generates a "aws/request.Request" representing the @@ -11656,7 +12625,7 @@ func (c *IoT) ListViolationEventsRequest(input *ListViolationEventsInput) (req * // // Lists the Device Defender security profile violations discovered during the // given time period. You can use filters to limit the results to those alerts -// issued for a particular security profile, behavior or thing (device). +// issued for a particular security profile, behavior, or thing (device). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11948,7 +12917,11 @@ func (c *IoT) RegisterThingRequest(input *RegisterThingInput) (req *request.Requ // RegisterThing API operation for AWS IoT. // -// Provisions a thing. +// Provisions a thing in the device registry. RegisterThing calls other AWS +// IoT control plane APIs. These calls might exceed your account level AWS IoT +// Throttling Limits (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_iot) +// and cause throttle errors. Please contact AWS Customer Support (https://console.aws.amazon.com/support/home) +// to raise your throttling limits if necessary. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12913,6 +13886,95 @@ func (c *IoT) SetV2LoggingOptionsWithContext(ctx aws.Context, input *SetV2Loggin return out, req.Send() } +const opStartAuditMitigationActionsTask = "StartAuditMitigationActionsTask" + +// StartAuditMitigationActionsTaskRequest generates a "aws/request.Request" representing the +// client's request for the StartAuditMitigationActionsTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartAuditMitigationActionsTask for more information on using the StartAuditMitigationActionsTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartAuditMitigationActionsTaskRequest method. +// req, resp := client.StartAuditMitigationActionsTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) StartAuditMitigationActionsTaskRequest(input *StartAuditMitigationActionsTaskInput) (req *request.Request, output *StartAuditMitigationActionsTaskOutput) { + op := &request.Operation{ + Name: opStartAuditMitigationActionsTask, + HTTPMethod: "POST", + HTTPPath: "/audit/mitigationactions/tasks/{taskId}", + } + + if input == nil { + input = &StartAuditMitigationActionsTaskInput{} + } + + output = &StartAuditMitigationActionsTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartAuditMitigationActionsTask API operation for AWS IoT. +// +// Starts a task that applies a set of mitigation actions to the specified target. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation StartAuditMitigationActionsTask for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeTaskAlreadyExistsException "TaskAlreadyExistsException" +// This exception occurs if you attempt to start a task with the same task-id +// as an existing task but with a different clientRequestToken. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// A limit has been exceeded. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) StartAuditMitigationActionsTask(input *StartAuditMitigationActionsTaskInput) (*StartAuditMitigationActionsTaskOutput, error) { + req, out := c.StartAuditMitigationActionsTaskRequest(input) + return out, req.Send() +} + +// StartAuditMitigationActionsTaskWithContext is the same as StartAuditMitigationActionsTask with the addition of +// the ability to pass a context and additional request options. +// +// See StartAuditMitigationActionsTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) StartAuditMitigationActionsTaskWithContext(ctx aws.Context, input *StartAuditMitigationActionsTaskInput, opts ...request.Option) (*StartAuditMitigationActionsTaskOutput, error) { + req, out := c.StartAuditMitigationActionsTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartOnDemandAuditTask = "StartOnDemandAuditTask" // StartOnDemandAuditTaskRequest generates a "aws/request.Request" representing the @@ -14460,6 +15522,91 @@ func (c *IoT) UpdateJobWithContext(ctx aws.Context, input *UpdateJobInput, opts return out, req.Send() } +const opUpdateMitigationAction = "UpdateMitigationAction" + +// UpdateMitigationActionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMitigationAction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateMitigationAction for more information on using the UpdateMitigationAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateMitigationActionRequest method. +// req, resp := client.UpdateMitigationActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) UpdateMitigationActionRequest(input *UpdateMitigationActionInput) (req *request.Request, output *UpdateMitigationActionOutput) { + op := &request.Operation{ + Name: opUpdateMitigationAction, + HTTPMethod: "PATCH", + HTTPPath: "/mitigationactions/actions/{actionName}", + } + + if input == nil { + input = &UpdateMitigationActionInput{} + } + + output = &UpdateMitigationActionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateMitigationAction API operation for AWS IoT. +// +// Updates the definition for the specified mitigation action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation UpdateMitigationAction for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource does not exist. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The rate exceeds the limit. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An unexpected error has occurred. +// +func (c *IoT) UpdateMitigationAction(input *UpdateMitigationActionInput) (*UpdateMitigationActionOutput, error) { + req, out := c.UpdateMitigationActionRequest(input) + return out, req.Send() +} + +// UpdateMitigationActionWithContext is the same as UpdateMitigationAction with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateMitigationAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) UpdateMitigationActionWithContext(ctx aws.Context, input *UpdateMitigationActionInput, opts ...request.Option) (*UpdateMitigationActionOutput, error) { + req, out := c.UpdateMitigationActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateRoleAlias = "UpdateRoleAlias" // UpdateRoleAliasRequest generates a "aws/request.Request" representing the @@ -14593,7 +15740,7 @@ func (c *IoT) UpdateScheduledAuditRequest(input *UpdateScheduledAuditInput) (req // UpdateScheduledAudit API operation for AWS IoT. // -// Updates a scheduled audit, including what checks are performed and how often +// Updates a scheduled audit, including which checks are performed and how often // the audit takes place. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -15872,6 +17019,61 @@ func (s AddThingToThingGroupOutput) GoString() string { return s.String() } +// Parameters used when defining a mitigation action that move a set of things +// to a thing group. +type AddThingsToThingGroupParams struct { + _ struct{} `type:"structure"` + + // Specifies if this mitigation action can move the things that triggered the + // mitigation action even if they are part of one or more dynamic things groups. + OverrideDynamicGroups *bool `locationName:"overrideDynamicGroups" type:"boolean"` + + // The list of groups to which you want to add the things that triggered the + // mitigation action. You can add a thing to a maximum of 10 groups, but you + // cannot add a thing to more than one group in the same hierarchy. + // + // ThingGroupNames is a required field + ThingGroupNames []*string `locationName:"thingGroupNames" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddThingsToThingGroupParams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddThingsToThingGroupParams) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddThingsToThingGroupParams) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddThingsToThingGroupParams"} + if s.ThingGroupNames == nil { + invalidParams.Add(request.NewErrParamRequired("ThingGroupNames")) + } + if s.ThingGroupNames != nil && len(s.ThingGroupNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingGroupNames", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOverrideDynamicGroups sets the OverrideDynamicGroups field's value. +func (s *AddThingsToThingGroupParams) SetOverrideDynamicGroups(v bool) *AddThingsToThingGroupParams { + s.OverrideDynamicGroups = &v + return s +} + +// SetThingGroupNames sets the ThingGroupNames field's value. +func (s *AddThingsToThingGroupParams) SetThingGroupNames(v []*string) *AddThingsToThingGroupParams { + s.ThingGroupNames = v + return s +} + // A structure containing the alert target ARN and the role ARN. type AlertTarget struct { _ struct{} `type:"structure"` @@ -16276,7 +17478,8 @@ func (s AttachSecurityProfileOutput) GoString() string { type AttachThingPrincipalInput struct { _ struct{} `type:"structure"` - // The principal, such as a certificate or other credential. + // The principal, which can be a certificate ARN (as returned from the CreateCertificate + // operation) or an Amazon Cognito ID. // // Principal is a required field Principal *string `location:"header" locationName:"x-amzn-principal" type:"string" required:"true"` @@ -16358,7 +17561,7 @@ type AttributePayload struct { // // To remove an attribute, call UpdateThing with an empty attribute value. // - // The merge attribute is only valid when calling UpdateThing. + // The merge attribute is only valid when calling UpdateThing or UpdateThingGroup. Merge *bool `locationName:"merge" type:"boolean"` } @@ -16412,22 +17615,22 @@ func (s *AuditCheckConfiguration) SetEnabled(v bool) *AuditCheckConfiguration { type AuditCheckDetails struct { _ struct{} `type:"structure"` - // True if the check completed and found all resources compliant. + // True if the check is complete and found all resources compliant. CheckCompliant *bool `locationName:"checkCompliant" type:"boolean"` - // The completion status of this check, one of "IN_PROGRESS", "WAITING_FOR_DATA_COLLECTION", + // The completion status of this check. One of "IN_PROGRESS", "WAITING_FOR_DATA_COLLECTION", // "CANCELED", "COMPLETED_COMPLIANT", "COMPLETED_NON_COMPLIANT", or "FAILED". CheckRunStatus *string `locationName:"checkRunStatus" type:"string" enum:"AuditCheckRunStatus"` - // The code of any error encountered when performing this check during this - // audit. One of "INSUFFICIENT_PERMISSIONS", or "AUDIT_CHECK_DISABLED". + // The code of any error encountered when this check is performed during this + // audit. One of "INSUFFICIENT_PERMISSIONS" or "AUDIT_CHECK_DISABLED". ErrorCode *string `locationName:"errorCode" type:"string"` - // The message associated with any error encountered when performing this check + // The message associated with any error encountered when this check is performed // during this audit. Message *string `locationName:"message" type:"string"` - // The number of resources that the check found non-compliant. + // The number of resources that were found noncompliant during the check. NonCompliantResourcesCount *int64 `locationName:"nonCompliantResourcesCount" type:"long"` // The number of resources on which the check was performed. @@ -16487,16 +17690,20 @@ type AuditFinding struct { // The audit check that generated this result. CheckName *string `locationName:"checkName" type:"string"` + // A unique identifier for this set of audit findings. This identifier is used + // to apply mitigation tasks to one or more sets of findings. + FindingId *string `locationName:"findingId" min:"1" type:"string"` + // The time the result (finding) was discovered. FindingTime *time.Time `locationName:"findingTime" type:"timestamp"` - // The resource that was found to be non-compliant with the audit check. + // The resource that was found to be noncompliant with the audit check. NonCompliantResource *NonCompliantResource `locationName:"nonCompliantResource" type:"structure"` - // The reason the resource was non-compliant. + // The reason the resource was noncompliant. ReasonForNonCompliance *string `locationName:"reasonForNonCompliance" type:"string"` - // A code which indicates the reason that the resource was non-compliant. + // A code that indicates the reason that the resource was noncompliant. ReasonForNonComplianceCode *string `locationName:"reasonForNonComplianceCode" type:"string"` // The list of related resources. @@ -16505,7 +17712,7 @@ type AuditFinding struct { // The severity of the result (finding). Severity *string `locationName:"severity" type:"string" enum:"AuditFindingSeverity"` - // The ID of the audit that generated this result (finding) + // The ID of the audit that generated this result (finding). TaskId *string `locationName:"taskId" min:"1" type:"string"` // The time the audit started. @@ -16528,6 +17735,12 @@ func (s *AuditFinding) SetCheckName(v string) *AuditFinding { return s } +// SetFindingId sets the FindingId field's value. +func (s *AuditFinding) SetFindingId(v string) *AuditFinding { + s.FindingId = &v + return s +} + // SetFindingTime sets the FindingTime field's value. func (s *AuditFinding) SetFindingTime(v time.Time) *AuditFinding { s.FindingTime = &v @@ -16576,6 +17789,210 @@ func (s *AuditFinding) SetTaskStartTime(v time.Time) *AuditFinding { return s } +// Returned by ListAuditMitigationActionsTask, this object contains information +// that describes a mitigation action that has been started. +type AuditMitigationActionExecutionMetadata struct { + _ struct{} `type:"structure"` + + // The unique identifier for the mitigation action being applied by the task. + ActionId *string `locationName:"actionId" type:"string"` + + // The friendly name of the mitigation action being applied by the task. + ActionName *string `locationName:"actionName" type:"string"` + + // The date and time when the task was completed or canceled. Blank if the task + // is still running. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // If an error occurred, the code that indicates which type of error occurred. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // The unique identifier for the findings to which the task and associated mitigation + // action are applied. + FindingId *string `locationName:"findingId" min:"1" type:"string"` + + // If an error occurred, a message that describes the error. + Message *string `locationName:"message" type:"string"` + + // The date and time when the task was started. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // The current status of the task being executed. + Status *string `locationName:"status" type:"string" enum:"AuditMitigationActionsExecutionStatus"` + + // The unique identifier for the task that applies the mitigation action. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s AuditMitigationActionExecutionMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuditMitigationActionExecutionMetadata) GoString() string { + return s.String() +} + +// SetActionId sets the ActionId field's value. +func (s *AuditMitigationActionExecutionMetadata) SetActionId(v string) *AuditMitigationActionExecutionMetadata { + s.ActionId = &v + return s +} + +// SetActionName sets the ActionName field's value. +func (s *AuditMitigationActionExecutionMetadata) SetActionName(v string) *AuditMitigationActionExecutionMetadata { + s.ActionName = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *AuditMitigationActionExecutionMetadata) SetEndTime(v time.Time) *AuditMitigationActionExecutionMetadata { + s.EndTime = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *AuditMitigationActionExecutionMetadata) SetErrorCode(v string) *AuditMitigationActionExecutionMetadata { + s.ErrorCode = &v + return s +} + +// SetFindingId sets the FindingId field's value. +func (s *AuditMitigationActionExecutionMetadata) SetFindingId(v string) *AuditMitigationActionExecutionMetadata { + s.FindingId = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *AuditMitigationActionExecutionMetadata) SetMessage(v string) *AuditMitigationActionExecutionMetadata { + s.Message = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *AuditMitigationActionExecutionMetadata) SetStartTime(v time.Time) *AuditMitigationActionExecutionMetadata { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AuditMitigationActionExecutionMetadata) SetStatus(v string) *AuditMitigationActionExecutionMetadata { + s.Status = &v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *AuditMitigationActionExecutionMetadata) SetTaskId(v string) *AuditMitigationActionExecutionMetadata { + s.TaskId = &v + return s +} + +// Information about an audit mitigation actions task that is returned by ListAuditMitigationActionsTasks. +type AuditMitigationActionsTaskMetadata struct { + _ struct{} `type:"structure"` + + // The time at which the audit mitigation actions task was started. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // The unique identifier for the task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` + + // The current state of the audit mitigation actions task. + TaskStatus *string `locationName:"taskStatus" type:"string" enum:"AuditMitigationActionsTaskStatus"` +} + +// String returns the string representation +func (s AuditMitigationActionsTaskMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuditMitigationActionsTaskMetadata) GoString() string { + return s.String() +} + +// SetStartTime sets the StartTime field's value. +func (s *AuditMitigationActionsTaskMetadata) SetStartTime(v time.Time) *AuditMitigationActionsTaskMetadata { + s.StartTime = &v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *AuditMitigationActionsTaskMetadata) SetTaskId(v string) *AuditMitigationActionsTaskMetadata { + s.TaskId = &v + return s +} + +// SetTaskStatus sets the TaskStatus field's value. +func (s *AuditMitigationActionsTaskMetadata) SetTaskStatus(v string) *AuditMitigationActionsTaskMetadata { + s.TaskStatus = &v + return s +} + +// Used in MitigationActionParams, this information identifies the target findings +// to which the mitigation actions are applied. Only one entry appears. +type AuditMitigationActionsTaskTarget struct { + _ struct{} `type:"structure"` + + // Specifies a filter in the form of an audit check and set of reason codes + // that identify the findings from the audit to which the audit mitigation actions + // task apply. + AuditCheckToReasonCodeFilter map[string][]*string `locationName:"auditCheckToReasonCodeFilter" type:"map"` + + // If the task will apply a mitigation action to findings from a specific audit, + // this value uniquely identifies the audit. + AuditTaskId *string `locationName:"auditTaskId" min:"1" type:"string"` + + // If the task will apply a mitigation action to one or more listed findings, + // this value uniquely identifies those findings. + FindingIds []*string `locationName:"findingIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s AuditMitigationActionsTaskTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuditMitigationActionsTaskTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuditMitigationActionsTaskTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuditMitigationActionsTaskTarget"} + if s.AuditTaskId != nil && len(*s.AuditTaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AuditTaskId", 1)) + } + if s.FindingIds != nil && len(s.FindingIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FindingIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuditCheckToReasonCodeFilter sets the AuditCheckToReasonCodeFilter field's value. +func (s *AuditMitigationActionsTaskTarget) SetAuditCheckToReasonCodeFilter(v map[string][]*string) *AuditMitigationActionsTaskTarget { + s.AuditCheckToReasonCodeFilter = v + return s +} + +// SetAuditTaskId sets the AuditTaskId field's value. +func (s *AuditMitigationActionsTaskTarget) SetAuditTaskId(v string) *AuditMitigationActionsTaskTarget { + s.AuditTaskId = &v + return s +} + +// SetFindingIds sets the FindingIds field's value. +func (s *AuditMitigationActionsTaskTarget) SetFindingIds(v []*string) *AuditMitigationActionsTaskTarget { + s.FindingIds = v + return s +} + // Information about the targets to which audit notifications are sent. type AuditNotificationTarget struct { _ struct{} `type:"structure"` @@ -16638,11 +18055,11 @@ type AuditTaskMetadata struct { // The ID of this audit. TaskId *string `locationName:"taskId" min:"1" type:"string"` - // The status of this audit: one of "IN_PROGRESS", "COMPLETED", "FAILED" or + // The status of this audit. One of "IN_PROGRESS", "COMPLETED", "FAILED", or // "CANCELED". TaskStatus *string `locationName:"taskStatus" type:"string" enum:"AuditTaskStatus"` - // The type of this audit: one of "ON_DEMAND_AUDIT_TASK" or "SCHEDULED_AUDIT_TASK". + // The type of this audit. One of "ON_DEMAND_AUDIT_TASK" or "SCHEDULED_AUDIT_TASK". TaskType *string `locationName:"taskType" type:"string" enum:"AuditTaskType"` } @@ -17305,6 +18722,61 @@ func (s *CACertificateDescription) SetValidity(v *CertificateValidity) *CACertif return s } +type CancelAuditMitigationActionsTaskInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the task that you want to cancel. + // + // TaskId is a required field + TaskId *string `location:"uri" locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelAuditMitigationActionsTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelAuditMitigationActionsTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelAuditMitigationActionsTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelAuditMitigationActionsTaskInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTaskId sets the TaskId field's value. +func (s *CancelAuditMitigationActionsTaskInput) SetTaskId(v string) *CancelAuditMitigationActionsTaskInput { + s.TaskId = &v + return s +} + +type CancelAuditMitigationActionsTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelAuditMitigationActionsTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelAuditMitigationActionsTaskOutput) GoString() string { + return s.String() +} + type CancelAuditTaskInput struct { _ struct{} `type:"structure"` @@ -19011,6 +20483,125 @@ func (s *CreateKeysAndCertificateOutput) SetKeyPair(v *KeyPair) *CreateKeysAndCe return s } +type CreateMitigationActionInput struct { + _ struct{} `type:"structure"` + + // A friendly name for the action. Choose a friendly name that accurately describes + // the action (for example, EnableLoggingAction). + // + // ActionName is a required field + ActionName *string `location:"uri" locationName:"actionName" type:"string" required:"true"` + + // Defines the type of action and the parameters for that action. + // + // ActionParams is a required field + ActionParams *MitigationActionParams `locationName:"actionParams" type:"structure" required:"true"` + + // The ARN of the IAM role that is used to apply the mitigation action. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` + + // Metadata that can be used to manage the mitigation action. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s CreateMitigationActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMitigationActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMitigationActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMitigationActionInput"} + if s.ActionName == nil { + invalidParams.Add(request.NewErrParamRequired("ActionName")) + } + if s.ActionName != nil && len(*s.ActionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionName", 1)) + } + if s.ActionParams == nil { + invalidParams.Add(request.NewErrParamRequired("ActionParams")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.ActionParams != nil { + if err := s.ActionParams.Validate(); err != nil { + invalidParams.AddNested("ActionParams", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionName sets the ActionName field's value. +func (s *CreateMitigationActionInput) SetActionName(v string) *CreateMitigationActionInput { + s.ActionName = &v + return s +} + +// SetActionParams sets the ActionParams field's value. +func (s *CreateMitigationActionInput) SetActionParams(v *MitigationActionParams) *CreateMitigationActionInput { + s.ActionParams = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateMitigationActionInput) SetRoleArn(v string) *CreateMitigationActionInput { + s.RoleArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateMitigationActionInput) SetTags(v []*Tag) *CreateMitigationActionInput { + s.Tags = v + return s +} + +type CreateMitigationActionOutput struct { + _ struct{} `type:"structure"` + + // The ARN for the new mitigation action. + ActionArn *string `locationName:"actionArn" type:"string"` + + // A unique identifier for the new mitigation action. + ActionId *string `locationName:"actionId" type:"string"` +} + +// String returns the string representation +func (s CreateMitigationActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMitigationActionOutput) GoString() string { + return s.String() +} + +// SetActionArn sets the ActionArn field's value. +func (s *CreateMitigationActionOutput) SetActionArn(v string) *CreateMitigationActionOutput { + s.ActionArn = &v + return s +} + +// SetActionId sets the ActionId field's value. +func (s *CreateMitigationActionOutput) SetActionId(v string) *CreateMitigationActionOutput { + s.ActionId = &v + return s +} + type CreateOTAUpdateInput struct { _ struct{} `type:"structure"` @@ -19567,13 +21158,13 @@ type CreateScheduledAuditInput struct { DayOfMonth *string `locationName:"dayOfMonth" type:"string"` // The day of the week on which the scheduled audit takes place. Can be one - // of "SUN", "MON", "TUE", "WED", "THU", "FRI" or "SAT". This field is required + // of "SUN", "MON", "TUE", "WED", "THU", "FRI", or "SAT". This field is required // if the "frequency" parameter is set to "WEEKLY" or "BIWEEKLY". DayOfWeek *string `locationName:"dayOfWeek" type:"string" enum:"DayOfWeek"` // How often the scheduled audit takes place. Can be one of "DAILY", "WEEKLY", - // "BIWEEKLY" or "MONTHLY". The actual start time of each audit is determined - // by the system. + // "BIWEEKLY" or "MONTHLY". The start time of each audit is determined by the + // system. // // Frequency is a required field Frequency *string `locationName:"frequency" type:"string" required:"true" enum:"AuditFrequency"` @@ -19583,12 +21174,12 @@ type CreateScheduledAuditInput struct { // ScheduledAuditName is a required field ScheduledAuditName *string `location:"uri" locationName:"scheduledAuditName" min:"1" type:"string" required:"true"` - // Metadata which can be used to manage the scheduled audit. + // Metadata that can be used to manage the scheduled audit. Tags []*Tag `locationName:"tags" type:"list"` // Which checks are performed during the scheduled audit. Checks must be enabled // for your account. (Use DescribeAccountAuditConfiguration to see the list - // of all checks including those that are enabled or UpdateAccountAuditConfiguration + // of all checks, including those that are enabled or use UpdateAccountAuditConfiguration // to select which checks are enabled.) // // TargetCheckNames is a required field @@ -19690,7 +21281,7 @@ type CreateSecurityProfileInput struct { _ struct{} `type:"structure"` // A list of metrics whose data is retained (stored). By default, data is retained - // for any metric used in the profile's behaviors but it is also retained for + // for any metric used in the profile's behaviors, but it is also retained for // any metric specified here. AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" type:"list"` @@ -19710,7 +21301,7 @@ type CreateSecurityProfileInput struct { // SecurityProfileName is a required field SecurityProfileName *string `location:"uri" locationName:"securityProfileName" min:"1" type:"string" required:"true"` - // Metadata which can be used to manage the security profile. + // Metadata that can be used to manage the security profile. Tags []*Tag `locationName:"tags" type:"list"` } @@ -20685,7 +22276,8 @@ type DeleteCertificateInput struct { // CertificateId is a required field CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` - // Forces a certificate request to be deleted. + // Forces the deletion of a certificate if it is inactive and is not attached + // to an IoT thing. ForceDelete *bool `location:"querystring" locationName:"forceDelete" type:"boolean"` } @@ -20986,6 +22578,61 @@ func (s DeleteJobOutput) GoString() string { return s.String() } +type DeleteMitigationActionInput struct { + _ struct{} `type:"structure"` + + // The name of the mitigation action that you want to delete. + // + // ActionName is a required field + ActionName *string `location:"uri" locationName:"actionName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMitigationActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMitigationActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMitigationActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMitigationActionInput"} + if s.ActionName == nil { + invalidParams.Add(request.NewErrParamRequired("ActionName")) + } + if s.ActionName != nil && len(*s.ActionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionName sets the ActionName field's value. +func (s *DeleteMitigationActionInput) SetActionName(v string) *DeleteMitigationActionInput { + s.ActionName = &v + return s +} + +type DeleteMitigationActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMitigationActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMitigationActionOutput) GoString() string { + return s.String() +} + type DeleteOTAUpdateInput struct { _ struct{} `type:"structure"` @@ -21335,7 +22982,7 @@ type DeleteSecurityProfileInput struct { // The expected version of the security profile. A new version is generated // whenever the security profile is updated. If you specify a value that is - // different than the actual version, a VersionConflictException is thrown. + // different from the actual version, a VersionConflictException is thrown. ExpectedVersion *int64 `location:"querystring" locationName:"expectedVersion" type:"long"` // The name of the security profile to be deleted. @@ -21889,10 +23536,10 @@ type DescribeAccountAuditConfigurationOutput struct { AuditNotificationTargetConfigurations map[string]*AuditNotificationTarget `locationName:"auditNotificationTargetConfigurations" type:"map"` // The ARN of the role that grants permission to AWS IoT to access information - // about your devices, policies, certificates and other items as necessary when + // about your devices, policies, certificates, and other items as required when // performing an audit. // - // On the first call to UpdateAccountAuditConfiguration this parameter is required. + // On the first call to UpdateAccountAuditConfiguration, this parameter is required. RoleArn *string `locationName:"roleArn" min:"20" type:"string"` } @@ -21924,6 +23571,193 @@ func (s *DescribeAccountAuditConfigurationOutput) SetRoleArn(v string) *Describe return s } +type DescribeAuditFindingInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for a single audit finding. You can use this identifier + // to apply mitigation actions to the finding. + // + // FindingId is a required field + FindingId *string `location:"uri" locationName:"findingId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAuditFindingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAuditFindingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAuditFindingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAuditFindingInput"} + if s.FindingId == nil { + invalidParams.Add(request.NewErrParamRequired("FindingId")) + } + if s.FindingId != nil && len(*s.FindingId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FindingId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFindingId sets the FindingId field's value. +func (s *DescribeAuditFindingInput) SetFindingId(v string) *DescribeAuditFindingInput { + s.FindingId = &v + return s +} + +type DescribeAuditFindingOutput struct { + _ struct{} `type:"structure"` + + // The findings (results) of the audit. + Finding *AuditFinding `locationName:"finding" type:"structure"` +} + +// String returns the string representation +func (s DescribeAuditFindingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAuditFindingOutput) GoString() string { + return s.String() +} + +// SetFinding sets the Finding field's value. +func (s *DescribeAuditFindingOutput) SetFinding(v *AuditFinding) *DescribeAuditFindingOutput { + s.Finding = v + return s +} + +type DescribeAuditMitigationActionsTaskInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the audit mitigation task. + // + // TaskId is a required field + TaskId *string `location:"uri" locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAuditMitigationActionsTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAuditMitigationActionsTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAuditMitigationActionsTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAuditMitigationActionsTaskInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTaskId sets the TaskId field's value. +func (s *DescribeAuditMitigationActionsTaskInput) SetTaskId(v string) *DescribeAuditMitigationActionsTaskInput { + s.TaskId = &v + return s +} + +type DescribeAuditMitigationActionsTaskOutput struct { + _ struct{} `type:"structure"` + + // Specifies the mitigation actions and their parameters that are applied as + // part of this task. + ActionsDefinition []*MitigationAction `locationName:"actionsDefinition" type:"list"` + + // Specifies the mitigation actions that should be applied to specific audit + // checks. + AuditCheckToActionsMapping map[string][]*string `locationName:"auditCheckToActionsMapping" type:"map"` + + // The date and time when the task was completed or canceled. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // The date and time when the task was started. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // Identifies the findings to which the mitigation actions are applied. This + // can be by audit checks, by audit task, or a set of findings. + Target *AuditMitigationActionsTaskTarget `locationName:"target" type:"structure"` + + // Aggregate counts of the results when the mitigation tasks were applied to + // the findings for this audit mitigation actions task. + TaskStatistics map[string]*TaskStatisticsForAuditCheck `locationName:"taskStatistics" type:"map"` + + // The current status of the task. + TaskStatus *string `locationName:"taskStatus" type:"string" enum:"AuditMitigationActionsTaskStatus"` +} + +// String returns the string representation +func (s DescribeAuditMitigationActionsTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAuditMitigationActionsTaskOutput) GoString() string { + return s.String() +} + +// SetActionsDefinition sets the ActionsDefinition field's value. +func (s *DescribeAuditMitigationActionsTaskOutput) SetActionsDefinition(v []*MitigationAction) *DescribeAuditMitigationActionsTaskOutput { + s.ActionsDefinition = v + return s +} + +// SetAuditCheckToActionsMapping sets the AuditCheckToActionsMapping field's value. +func (s *DescribeAuditMitigationActionsTaskOutput) SetAuditCheckToActionsMapping(v map[string][]*string) *DescribeAuditMitigationActionsTaskOutput { + s.AuditCheckToActionsMapping = v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *DescribeAuditMitigationActionsTaskOutput) SetEndTime(v time.Time) *DescribeAuditMitigationActionsTaskOutput { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeAuditMitigationActionsTaskOutput) SetStartTime(v time.Time) *DescribeAuditMitigationActionsTaskOutput { + s.StartTime = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *DescribeAuditMitigationActionsTaskOutput) SetTarget(v *AuditMitigationActionsTaskTarget) *DescribeAuditMitigationActionsTaskOutput { + s.Target = v + return s +} + +// SetTaskStatistics sets the TaskStatistics field's value. +func (s *DescribeAuditMitigationActionsTaskOutput) SetTaskStatistics(v map[string]*TaskStatisticsForAuditCheck) *DescribeAuditMitigationActionsTaskOutput { + s.TaskStatistics = v + return s +} + +// SetTaskStatus sets the TaskStatus field's value. +func (s *DescribeAuditMitigationActionsTaskOutput) SetTaskStatus(v string) *DescribeAuditMitigationActionsTaskOutput { + s.TaskStatus = &v + return s +} + type DescribeAuditTaskInput struct { _ struct{} `type:"structure"` @@ -22756,6 +24590,134 @@ func (s *DescribeJobOutput) SetJob(v *Job) *DescribeJobOutput { return s } +type DescribeMitigationActionInput struct { + _ struct{} `type:"structure"` + + // The friendly name that uniquely identifies the mitigation action. + // + // ActionName is a required field + ActionName *string `location:"uri" locationName:"actionName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMitigationActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMitigationActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMitigationActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMitigationActionInput"} + if s.ActionName == nil { + invalidParams.Add(request.NewErrParamRequired("ActionName")) + } + if s.ActionName != nil && len(*s.ActionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionName sets the ActionName field's value. +func (s *DescribeMitigationActionInput) SetActionName(v string) *DescribeMitigationActionInput { + s.ActionName = &v + return s +} + +type DescribeMitigationActionOutput struct { + _ struct{} `type:"structure"` + + // The ARN that identifies this migration action. + ActionArn *string `locationName:"actionArn" type:"string"` + + // A unique identifier for this action. + ActionId *string `locationName:"actionId" type:"string"` + + // The friendly name that uniquely identifies the mitigation action. + ActionName *string `locationName:"actionName" type:"string"` + + // Parameters that control how the mitigation action is applied, specific to + // the type of mitigation action. + ActionParams *MitigationActionParams `locationName:"actionParams" type:"structure"` + + // The type of mitigation action. + ActionType *string `locationName:"actionType" type:"string" enum:"MitigationActionType"` + + // The date and time when the mitigation action was added to your AWS account. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` + + // The date and time when the mitigation action was last changed. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + + // The ARN of the IAM role used to apply this action. + RoleArn *string `locationName:"roleArn" min:"20" type:"string"` +} + +// String returns the string representation +func (s DescribeMitigationActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMitigationActionOutput) GoString() string { + return s.String() +} + +// SetActionArn sets the ActionArn field's value. +func (s *DescribeMitigationActionOutput) SetActionArn(v string) *DescribeMitigationActionOutput { + s.ActionArn = &v + return s +} + +// SetActionId sets the ActionId field's value. +func (s *DescribeMitigationActionOutput) SetActionId(v string) *DescribeMitigationActionOutput { + s.ActionId = &v + return s +} + +// SetActionName sets the ActionName field's value. +func (s *DescribeMitigationActionOutput) SetActionName(v string) *DescribeMitigationActionOutput { + s.ActionName = &v + return s +} + +// SetActionParams sets the ActionParams field's value. +func (s *DescribeMitigationActionOutput) SetActionParams(v *MitigationActionParams) *DescribeMitigationActionOutput { + s.ActionParams = v + return s +} + +// SetActionType sets the ActionType field's value. +func (s *DescribeMitigationActionOutput) SetActionType(v string) *DescribeMitigationActionOutput { + s.ActionType = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *DescribeMitigationActionOutput) SetCreationDate(v time.Time) *DescribeMitigationActionOutput { + s.CreationDate = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *DescribeMitigationActionOutput) SetLastModifiedDate(v time.Time) *DescribeMitigationActionOutput { + s.LastModifiedDate = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeMitigationActionOutput) SetRoleArn(v string) *DescribeMitigationActionOutput { + s.RoleArn = &v + return s +} + type DescribeRoleAliasInput struct { _ struct{} `type:"structure"` @@ -22870,11 +24832,11 @@ type DescribeScheduledAuditOutput struct { DayOfMonth *string `locationName:"dayOfMonth" type:"string"` // The day of the week on which the scheduled audit takes place. One of "SUN", - // "MON", "TUE", "WED", "THU", "FRI" or "SAT". + // "MON", "TUE", "WED", "THU", "FRI", or "SAT". DayOfWeek *string `locationName:"dayOfWeek" type:"string" enum:"DayOfWeek"` - // How often the scheduled audit takes place. One of "DAILY", "WEEKLY", "BIWEEKLY" - // or "MONTHLY". The actual start time of each audit is determined by the system. + // How often the scheduled audit takes place. One of "DAILY", "WEEKLY", "BIWEEKLY", + // or "MONTHLY". The start time of each audit is determined by the system. Frequency *string `locationName:"frequency" type:"string" enum:"AuditFrequency"` // The ARN of the scheduled audit. @@ -22883,9 +24845,9 @@ type DescribeScheduledAuditOutput struct { // The name of the scheduled audit. ScheduledAuditName *string `locationName:"scheduledAuditName" min:"1" type:"string"` - // Which checks are performed during the scheduled audit. (Note that checks - // must be enabled for your account. (Use DescribeAccountAuditConfiguration - // to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration + // Which checks are performed during the scheduled audit. Checks must be enabled + // for your account. (Use DescribeAccountAuditConfiguration to see the list + // of all checks, including those that are enabled or use UpdateAccountAuditConfiguration // to select which checks are enabled.) TargetCheckNames []*string `locationName:"targetCheckNames" type:"list"` } @@ -22981,7 +24943,7 @@ type DescribeSecurityProfileOutput struct { _ struct{} `type:"structure"` // A list of metrics whose data is retained (stored). By default, data is retained - // for any metric used in the profile's behaviors but it is also retained for + // for any metric used in the profile's behaviors, but it is also retained for // any metric specified here. AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" type:"list"` @@ -24424,6 +26386,62 @@ func (s *ElasticsearchAction) SetType(v string) *ElasticsearchAction { return s } +// Parameters used when defining a mitigation action that enable AWS IoT logging. +type EnableIoTLoggingParams struct { + _ struct{} `type:"structure"` + + // Specifies the types of information to be logged. + // + // LogLevel is a required field + LogLevel *string `locationName:"logLevel" type:"string" required:"true" enum:"LogLevel"` + + // The ARN of the IAM role used for logging. + // + // RoleArnForLogging is a required field + RoleArnForLogging *string `locationName:"roleArnForLogging" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableIoTLoggingParams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableIoTLoggingParams) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableIoTLoggingParams) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableIoTLoggingParams"} + if s.LogLevel == nil { + invalidParams.Add(request.NewErrParamRequired("LogLevel")) + } + if s.RoleArnForLogging == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArnForLogging")) + } + if s.RoleArnForLogging != nil && len(*s.RoleArnForLogging) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArnForLogging", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogLevel sets the LogLevel field's value. +func (s *EnableIoTLoggingParams) SetLogLevel(v string) *EnableIoTLoggingParams { + s.LogLevel = &v + return s +} + +// SetRoleArnForLogging sets the RoleArnForLogging field's value. +func (s *EnableIoTLoggingParams) SetRoleArnForLogging(v string) *EnableIoTLoggingParams { + s.RoleArnForLogging = &v + return s +} + // The input for the EnableTopicRuleRequest operation. type EnableTopicRuleInput struct { _ struct{} `type:"structure"` @@ -24620,6 +26638,39 @@ func (s *ExponentialRolloutRate) SetRateIncreaseCriteria(v *RateIncreaseCriteria return s } +// Describes the name and data type at a field. +type Field struct { + _ struct{} `type:"structure"` + + // The name of the field. + Name *string `locationName:"name" type:"string"` + + // The datatype of the field. + Type *string `locationName:"type" type:"string" enum:"FieldType"` +} + +// String returns the string representation +func (s Field) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Field) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *Field) SetName(v string) *Field { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *Field) SetType(v string) *Field { + s.Type = &v + return s +} + // The location of the OTA update. type FileLocation struct { _ struct{} `type:"structure"` @@ -24737,6 +26788,103 @@ func (s *FirehoseAction) SetSeparator(v string) *FirehoseAction { return s } +type GetCardinalityInput struct { + _ struct{} `type:"structure"` + + // The field to aggregate. + AggregationField *string `locationName:"aggregationField" min:"1" type:"string"` + + // The name of the index to search. + IndexName *string `locationName:"indexName" min:"1" type:"string"` + + // The search query. + // + // QueryString is a required field + QueryString *string `locationName:"queryString" min:"1" type:"string" required:"true"` + + // The query version. + QueryVersion *string `locationName:"queryVersion" type:"string"` +} + +// String returns the string representation +func (s GetCardinalityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCardinalityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCardinalityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCardinalityInput"} + if s.AggregationField != nil && len(*s.AggregationField) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AggregationField", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregationField sets the AggregationField field's value. +func (s *GetCardinalityInput) SetAggregationField(v string) *GetCardinalityInput { + s.AggregationField = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *GetCardinalityInput) SetIndexName(v string) *GetCardinalityInput { + s.IndexName = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *GetCardinalityInput) SetQueryString(v string) *GetCardinalityInput { + s.QueryString = &v + return s +} + +// SetQueryVersion sets the QueryVersion field's value. +func (s *GetCardinalityInput) SetQueryVersion(v string) *GetCardinalityInput { + s.QueryVersion = &v + return s +} + +type GetCardinalityOutput struct { + _ struct{} `type:"structure"` + + // The number of things that match the query. + Cardinality *int64 `locationName:"cardinality" type:"integer"` +} + +// String returns the string representation +func (s GetCardinalityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCardinalityOutput) GoString() string { + return s.String() +} + +// SetCardinality sets the Cardinality field's value. +func (s *GetCardinalityOutput) SetCardinality(v int64) *GetCardinalityOutput { + s.Cardinality = &v + return s +} + type GetEffectivePoliciesInput struct { _ struct{} `type:"structure"` @@ -25036,6 +27184,112 @@ func (s *GetOTAUpdateOutput) SetOtaUpdateInfo(v *OTAUpdateInfo) *GetOTAUpdateOut return s } +type GetPercentilesInput struct { + _ struct{} `type:"structure"` + + // The field to aggregate. + AggregationField *string `locationName:"aggregationField" min:"1" type:"string"` + + // The name of the index to search. + IndexName *string `locationName:"indexName" min:"1" type:"string"` + + // The percentile groups returned. + Percents []*float64 `locationName:"percents" type:"list"` + + // The query string. + // + // QueryString is a required field + QueryString *string `locationName:"queryString" min:"1" type:"string" required:"true"` + + // The query version. + QueryVersion *string `locationName:"queryVersion" type:"string"` +} + +// String returns the string representation +func (s GetPercentilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPercentilesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPercentilesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPercentilesInput"} + if s.AggregationField != nil && len(*s.AggregationField) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AggregationField", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregationField sets the AggregationField field's value. +func (s *GetPercentilesInput) SetAggregationField(v string) *GetPercentilesInput { + s.AggregationField = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *GetPercentilesInput) SetIndexName(v string) *GetPercentilesInput { + s.IndexName = &v + return s +} + +// SetPercents sets the Percents field's value. +func (s *GetPercentilesInput) SetPercents(v []*float64) *GetPercentilesInput { + s.Percents = v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *GetPercentilesInput) SetQueryString(v string) *GetPercentilesInput { + s.QueryString = &v + return s +} + +// SetQueryVersion sets the QueryVersion field's value. +func (s *GetPercentilesInput) SetQueryVersion(v string) *GetPercentilesInput { + s.QueryVersion = &v + return s +} + +type GetPercentilesOutput struct { + _ struct{} `type:"structure"` + + // The percentile values of the aggregated fields. + Percentiles []*PercentPair `locationName:"percentiles" type:"list"` +} + +// String returns the string representation +func (s GetPercentilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPercentilesOutput) GoString() string { + return s.String() +} + +// SetPercentiles sets the Percentiles field's value. +func (s *GetPercentilesOutput) SetPercentiles(v []*PercentPair) *GetPercentilesOutput { + s.Percentiles = v + return s +} + // The input for the GetPolicy operation. type GetPolicyInput struct { _ struct{} `type:"structure"` @@ -25219,7 +27473,7 @@ func (s *GetPolicyVersionInput) SetPolicyVersionId(v string) *GetPolicyVersionIn type GetPolicyVersionOutput struct { _ struct{} `type:"structure"` - // The date the policy version was created. + // The date the policy was created. CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` // The generation ID of the policy version. @@ -25228,7 +27482,7 @@ type GetPolicyVersionOutput struct { // Specifies whether the policy version is the default. IsDefaultVersion *bool `locationName:"isDefaultVersion" type:"boolean"` - // The date the policy version was last modified. + // The date the policy was last modified. LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` // The policy ARN. @@ -25344,7 +27598,7 @@ func (s *GetRegistrationCodeOutput) SetRegistrationCode(v string) *GetRegistrati type GetStatisticsInput struct { _ struct{} `type:"structure"` - // The aggregation field name. Currently not supported. + // The aggregation field name. AggregationField *string `locationName:"aggregationField" min:"1" type:"string"` // The name of the index to search. The default value is AWS_Things. @@ -25629,7 +27883,7 @@ func (s *ImplicitDeny) SetPolicies(v []*Policy) *ImplicitDeny { return s } -// Sends messge data to an AWS IoT Analytics channel. +// Sends message data to an AWS IoT Analytics channel. type IotAnalyticsAction struct { _ struct{} `type:"structure"` @@ -26688,7 +28942,7 @@ type ListAttachedPoliciesInput struct { // When true, recursively list attached policies. Recursive *bool `location:"querystring" locationName:"recursive" type:"boolean"` - // The group for which the policies will be listed. + // The group or principal for which the policies will be listed. // // Target is a required field Target *string `location:"uri" locationName:"target" type:"string" required:"true"` @@ -26796,7 +29050,7 @@ type ListAuditFindingsInput struct { // The token for the next set of results. NextToken *string `locationName:"nextToken" type:"string"` - // Information identifying the non-compliant resource. + // Information identifying the noncompliant resource. ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure"` // A filter to limit results to those found after the specified time. You must @@ -26914,6 +29168,271 @@ func (s *ListAuditFindingsOutput) SetNextToken(v string) *ListAuditFindingsOutpu return s } +type ListAuditMitigationActionsExecutionsInput struct { + _ struct{} `type:"structure"` + + // Specify this filter to limit results to those with a specific status. + ActionStatus *string `location:"querystring" locationName:"actionStatus" type:"string" enum:"AuditMitigationActionsExecutionStatus"` + + // Specify this filter to limit results to those that were applied to a specific + // audit finding. + // + // FindingId is a required field + FindingId *string `location:"querystring" locationName:"findingId" min:"1" type:"string" required:"true"` + + // The maximum number of results to return at one time. The default is 25. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // Specify this filter to limit results to actions for a specific audit mitigation + // actions task. + // + // TaskId is a required field + TaskId *string `location:"querystring" locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAuditMitigationActionsExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAuditMitigationActionsExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAuditMitigationActionsExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAuditMitigationActionsExecutionsInput"} + if s.FindingId == nil { + invalidParams.Add(request.NewErrParamRequired("FindingId")) + } + if s.FindingId != nil && len(*s.FindingId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FindingId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionStatus sets the ActionStatus field's value. +func (s *ListAuditMitigationActionsExecutionsInput) SetActionStatus(v string) *ListAuditMitigationActionsExecutionsInput { + s.ActionStatus = &v + return s +} + +// SetFindingId sets the FindingId field's value. +func (s *ListAuditMitigationActionsExecutionsInput) SetFindingId(v string) *ListAuditMitigationActionsExecutionsInput { + s.FindingId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAuditMitigationActionsExecutionsInput) SetMaxResults(v int64) *ListAuditMitigationActionsExecutionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAuditMitigationActionsExecutionsInput) SetNextToken(v string) *ListAuditMitigationActionsExecutionsInput { + s.NextToken = &v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *ListAuditMitigationActionsExecutionsInput) SetTaskId(v string) *ListAuditMitigationActionsExecutionsInput { + s.TaskId = &v + return s +} + +type ListAuditMitigationActionsExecutionsOutput struct { + _ struct{} `type:"structure"` + + // A set of task execution results based on the input parameters. Details include + // the mitigation action applied, start time, and task status. + ActionsExecutions []*AuditMitigationActionExecutionMetadata `locationName:"actionsExecutions" type:"list"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAuditMitigationActionsExecutionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAuditMitigationActionsExecutionsOutput) GoString() string { + return s.String() +} + +// SetActionsExecutions sets the ActionsExecutions field's value. +func (s *ListAuditMitigationActionsExecutionsOutput) SetActionsExecutions(v []*AuditMitigationActionExecutionMetadata) *ListAuditMitigationActionsExecutionsOutput { + s.ActionsExecutions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAuditMitigationActionsExecutionsOutput) SetNextToken(v string) *ListAuditMitigationActionsExecutionsOutput { + s.NextToken = &v + return s +} + +type ListAuditMitigationActionsTasksInput struct { + _ struct{} `type:"structure"` + + // Specify this filter to limit results to tasks that were applied to results + // for a specific audit. + AuditTaskId *string `location:"querystring" locationName:"auditTaskId" min:"1" type:"string"` + + // Specify this filter to limit results to tasks that were completed or canceled + // on or before a specific date and time. + // + // EndTime is a required field + EndTime *time.Time `location:"querystring" locationName:"endTime" type:"timestamp" required:"true"` + + // Specify this filter to limit results to tasks that were applied to a specific + // audit finding. + FindingId *string `location:"querystring" locationName:"findingId" min:"1" type:"string"` + + // The maximum number of results to return at one time. The default is 25. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // Specify this filter to limit results to tasks that began on or after a specific + // date and time. + // + // StartTime is a required field + StartTime *time.Time `location:"querystring" locationName:"startTime" type:"timestamp" required:"true"` + + // Specify this filter to limit results to tasks that are in a specific state. + TaskStatus *string `location:"querystring" locationName:"taskStatus" type:"string" enum:"AuditMitigationActionsTaskStatus"` +} + +// String returns the string representation +func (s ListAuditMitigationActionsTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAuditMitigationActionsTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAuditMitigationActionsTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAuditMitigationActionsTasksInput"} + if s.AuditTaskId != nil && len(*s.AuditTaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AuditTaskId", 1)) + } + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.FindingId != nil && len(*s.FindingId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FindingId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuditTaskId sets the AuditTaskId field's value. +func (s *ListAuditMitigationActionsTasksInput) SetAuditTaskId(v string) *ListAuditMitigationActionsTasksInput { + s.AuditTaskId = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ListAuditMitigationActionsTasksInput) SetEndTime(v time.Time) *ListAuditMitigationActionsTasksInput { + s.EndTime = &v + return s +} + +// SetFindingId sets the FindingId field's value. +func (s *ListAuditMitigationActionsTasksInput) SetFindingId(v string) *ListAuditMitigationActionsTasksInput { + s.FindingId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAuditMitigationActionsTasksInput) SetMaxResults(v int64) *ListAuditMitigationActionsTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAuditMitigationActionsTasksInput) SetNextToken(v string) *ListAuditMitigationActionsTasksInput { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ListAuditMitigationActionsTasksInput) SetStartTime(v time.Time) *ListAuditMitigationActionsTasksInput { + s.StartTime = &v + return s +} + +// SetTaskStatus sets the TaskStatus field's value. +func (s *ListAuditMitigationActionsTasksInput) SetTaskStatus(v string) *ListAuditMitigationActionsTasksInput { + s.TaskStatus = &v + return s +} + +type ListAuditMitigationActionsTasksOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The collection of audit mitigation tasks that matched the filter criteria. + Tasks []*AuditMitigationActionsTaskMetadata `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s ListAuditMitigationActionsTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAuditMitigationActionsTasksOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAuditMitigationActionsTasksOutput) SetNextToken(v string) *ListAuditMitigationActionsTasksOutput { + s.NextToken = &v + return s +} + +// SetTasks sets the Tasks field's value. +func (s *ListAuditMitigationActionsTasksOutput) SetTasks(v []*AuditMitigationActionsTaskMetadata) *ListAuditMitigationActionsTasksOutput { + s.Tasks = v + return s +} + type ListAuditTasksInput struct { _ struct{} `type:"structure"` @@ -26928,15 +29447,15 @@ type ListAuditTasksInput struct { // The token for the next set of results. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - // The beginning of the time period. Note that audit information is retained - // for a limited time (180 days). Requesting a start time prior to what is retained - // results in an "InvalidRequestException". + // The beginning of the time period. Audit information is retained for a limited + // time (180 days). Requesting a start time prior to what is retained results + // in an "InvalidRequestException". // // StartTime is a required field StartTime *time.Time `location:"querystring" locationName:"startTime" type:"timestamp" required:"true"` // A filter to limit the output to audits with the specified completion status: - // can be one of "IN_PROGRESS", "COMPLETED", "FAILED" or "CANCELED". + // can be one of "IN_PROGRESS", "COMPLETED", "FAILED", or "CANCELED". TaskStatus *string `location:"querystring" locationName:"taskStatus" type:"string" enum:"AuditTaskStatus"` // A filter to limit the output to the specified type of audit: can be one of @@ -27927,6 +30446,93 @@ func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { return s } +type ListMitigationActionsInput struct { + _ struct{} `type:"structure"` + + // Specify a value to limit the result to mitigation actions with a specific + // action type. + ActionType *string `location:"querystring" locationName:"actionType" type:"string" enum:"MitigationActionType"` + + // The maximum number of results to return at one time. The default is 25. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListMitigationActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMitigationActionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMitigationActionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMitigationActionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionType sets the ActionType field's value. +func (s *ListMitigationActionsInput) SetActionType(v string) *ListMitigationActionsInput { + s.ActionType = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListMitigationActionsInput) SetMaxResults(v int64) *ListMitigationActionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMitigationActionsInput) SetNextToken(v string) *ListMitigationActionsInput { + s.NextToken = &v + return s +} + +type ListMitigationActionsOutput struct { + _ struct{} `type:"structure"` + + // A set of actions that matched the specified filter criteria. + ActionIdentifiers []*MitigationActionIdentifier `locationName:"actionIdentifiers" type:"list"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListMitigationActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMitigationActionsOutput) GoString() string { + return s.String() +} + +// SetActionIdentifiers sets the ActionIdentifiers field's value. +func (s *ListMitigationActionsOutput) SetActionIdentifiers(v []*MitigationActionIdentifier) *ListMitigationActionsOutput { + s.ActionIdentifiers = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMitigationActionsOutput) SetNextToken(v string) *ListMitigationActionsOutput { + s.NextToken = &v + return s +} + type ListOTAUpdatesInput struct { _ struct{} `type:"structure"` @@ -28736,7 +31342,7 @@ type ListSecurityProfilesForTargetInput struct { // The token for the next set of results. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - // If true, return child groups as well. + // If true, return child groups too. Recursive *bool `location:"querystring" locationName:"recursive" type:"boolean"` // The ARN of the target (thing group) whose attached security profiles you @@ -30431,7 +33037,7 @@ type ListViolationEventsOutput struct { NextToken *string `locationName:"nextToken" type:"string"` // The security profile violation alerts issued for this account during the - // given time frame, potentially filtered by security profile, behavior violated, + // given time period, potentially filtered by security profile, behavior violated, // or thing (device) violating. ViolationEvents []*ViolationEvent `locationName:"violationEvents" type:"list"` } @@ -30632,17 +33238,230 @@ func (s *MetricValue) SetPorts(v []*int64) *MetricValue { return s } -// Information about the resource that was non-compliant with the audit check. +// Describes which changes should be applied as part of a mitigation action. +type MitigationAction struct { + _ struct{} `type:"structure"` + + // The set of parameters for this mitigation action. The parameters vary, depending + // on the kind of action you apply. + ActionParams *MitigationActionParams `locationName:"actionParams" type:"structure"` + + // A unique identifier for the mitigation action. + Id *string `locationName:"id" type:"string"` + + // A user-friendly name for the mitigation action. + Name *string `locationName:"name" type:"string"` + + // The IAM role ARN used to apply this mitigation action. + RoleArn *string `locationName:"roleArn" min:"20" type:"string"` +} + +// String returns the string representation +func (s MitigationAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MitigationAction) GoString() string { + return s.String() +} + +// SetActionParams sets the ActionParams field's value. +func (s *MitigationAction) SetActionParams(v *MitigationActionParams) *MitigationAction { + s.ActionParams = v + return s +} + +// SetId sets the Id field's value. +func (s *MitigationAction) SetId(v string) *MitigationAction { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *MitigationAction) SetName(v string) *MitigationAction { + s.Name = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *MitigationAction) SetRoleArn(v string) *MitigationAction { + s.RoleArn = &v + return s +} + +// Information that identifies a mitigation action. This information is returned +// by ListMitigationActions. +type MitigationActionIdentifier struct { + _ struct{} `type:"structure"` + + // The IAM role ARN used to apply this mitigation action. + ActionArn *string `locationName:"actionArn" type:"string"` + + // The friendly name of the mitigation action. + ActionName *string `locationName:"actionName" type:"string"` + + // The date when this mitigation action was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` +} + +// String returns the string representation +func (s MitigationActionIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MitigationActionIdentifier) GoString() string { + return s.String() +} + +// SetActionArn sets the ActionArn field's value. +func (s *MitigationActionIdentifier) SetActionArn(v string) *MitigationActionIdentifier { + s.ActionArn = &v + return s +} + +// SetActionName sets the ActionName field's value. +func (s *MitigationActionIdentifier) SetActionName(v string) *MitigationActionIdentifier { + s.ActionName = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *MitigationActionIdentifier) SetCreationDate(v time.Time) *MitigationActionIdentifier { + s.CreationDate = &v + return s +} + +// The set of parameters for this mitigation action. You can specify only one +// type of parameter (in other words, you can apply only one action for each +// defined mitigation action). +type MitigationActionParams struct { + _ struct{} `type:"structure"` + + // Parameters to define a mitigation action that moves devices associated with + // a certificate to one or more specified thing groups, typically for quarantine. + AddThingsToThingGroupParams *AddThingsToThingGroupParams `locationName:"addThingsToThingGroupParams" type:"structure"` + + // Parameters to define a mitigation action that enables AWS IoT logging at + // a specified level of detail. + EnableIoTLoggingParams *EnableIoTLoggingParams `locationName:"enableIoTLoggingParams" type:"structure"` + + // Parameters to define a mitigation action that publishes findings to Amazon + // SNS. You can implement your own custom actions in response to the Amazon + // SNS messages. + PublishFindingToSnsParams *PublishFindingToSnsParams `locationName:"publishFindingToSnsParams" type:"structure"` + + // Parameters to define a mitigation action that adds a blank policy to restrict + // permissions. + ReplaceDefaultPolicyVersionParams *ReplaceDefaultPolicyVersionParams `locationName:"replaceDefaultPolicyVersionParams" type:"structure"` + + // Parameters to define a mitigation action that changes the state of the CA + // certificate to inactive. + UpdateCACertificateParams *UpdateCACertificateParams `locationName:"updateCACertificateParams" type:"structure"` + + // Parameters to define a mitigation action that changes the state of the device + // certificate to inactive. + UpdateDeviceCertificateParams *UpdateDeviceCertificateParams `locationName:"updateDeviceCertificateParams" type:"structure"` +} + +// String returns the string representation +func (s MitigationActionParams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MitigationActionParams) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MitigationActionParams) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MitigationActionParams"} + if s.AddThingsToThingGroupParams != nil { + if err := s.AddThingsToThingGroupParams.Validate(); err != nil { + invalidParams.AddNested("AddThingsToThingGroupParams", err.(request.ErrInvalidParams)) + } + } + if s.EnableIoTLoggingParams != nil { + if err := s.EnableIoTLoggingParams.Validate(); err != nil { + invalidParams.AddNested("EnableIoTLoggingParams", err.(request.ErrInvalidParams)) + } + } + if s.PublishFindingToSnsParams != nil { + if err := s.PublishFindingToSnsParams.Validate(); err != nil { + invalidParams.AddNested("PublishFindingToSnsParams", err.(request.ErrInvalidParams)) + } + } + if s.ReplaceDefaultPolicyVersionParams != nil { + if err := s.ReplaceDefaultPolicyVersionParams.Validate(); err != nil { + invalidParams.AddNested("ReplaceDefaultPolicyVersionParams", err.(request.ErrInvalidParams)) + } + } + if s.UpdateCACertificateParams != nil { + if err := s.UpdateCACertificateParams.Validate(); err != nil { + invalidParams.AddNested("UpdateCACertificateParams", err.(request.ErrInvalidParams)) + } + } + if s.UpdateDeviceCertificateParams != nil { + if err := s.UpdateDeviceCertificateParams.Validate(); err != nil { + invalidParams.AddNested("UpdateDeviceCertificateParams", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddThingsToThingGroupParams sets the AddThingsToThingGroupParams field's value. +func (s *MitigationActionParams) SetAddThingsToThingGroupParams(v *AddThingsToThingGroupParams) *MitigationActionParams { + s.AddThingsToThingGroupParams = v + return s +} + +// SetEnableIoTLoggingParams sets the EnableIoTLoggingParams field's value. +func (s *MitigationActionParams) SetEnableIoTLoggingParams(v *EnableIoTLoggingParams) *MitigationActionParams { + s.EnableIoTLoggingParams = v + return s +} + +// SetPublishFindingToSnsParams sets the PublishFindingToSnsParams field's value. +func (s *MitigationActionParams) SetPublishFindingToSnsParams(v *PublishFindingToSnsParams) *MitigationActionParams { + s.PublishFindingToSnsParams = v + return s +} + +// SetReplaceDefaultPolicyVersionParams sets the ReplaceDefaultPolicyVersionParams field's value. +func (s *MitigationActionParams) SetReplaceDefaultPolicyVersionParams(v *ReplaceDefaultPolicyVersionParams) *MitigationActionParams { + s.ReplaceDefaultPolicyVersionParams = v + return s +} + +// SetUpdateCACertificateParams sets the UpdateCACertificateParams field's value. +func (s *MitigationActionParams) SetUpdateCACertificateParams(v *UpdateCACertificateParams) *MitigationActionParams { + s.UpdateCACertificateParams = v + return s +} + +// SetUpdateDeviceCertificateParams sets the UpdateDeviceCertificateParams field's value. +func (s *MitigationActionParams) SetUpdateDeviceCertificateParams(v *UpdateDeviceCertificateParams) *MitigationActionParams { + s.UpdateDeviceCertificateParams = v + return s +} + +// Information about the resource that was noncompliant with the audit check. type NonCompliantResource struct { _ struct{} `type:"structure"` - // Additional information about the non-compliant resource. + // Other information about the noncompliant resource. AdditionalInfo map[string]*string `locationName:"additionalInfo" type:"map"` - // Information identifying the non-compliant resource. + // Information that identifies the noncompliant resource. ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure"` - // The type of the non-compliant resource. + // The type of the noncompliant resource. ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` } @@ -31011,6 +33830,39 @@ func (s *OutgoingCertificate) SetTransferredTo(v string) *OutgoingCertificate { return s } +// Describes the percentile and percentile value. +type PercentPair struct { + _ struct{} `type:"structure"` + + // The percentile. + Percent *float64 `locationName:"percent" type:"double"` + + // The value. + Value *float64 `locationName:"value" type:"double"` +} + +// String returns the string representation +func (s PercentPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PercentPair) GoString() string { + return s.String() +} + +// SetPercent sets the Percent field's value. +func (s *PercentPair) SetPercent(v float64) *PercentPair { + s.Percent = &v + return s +} + +// SetValue sets the Value field's value. +func (s *PercentPair) SetValue(v float64) *PercentPair { + s.Value = &v + return s +} + // Describes an AWS IoT policy. type Policy struct { _ struct{} `type:"structure"` @@ -31185,12 +34037,53 @@ func (s *PresignedUrlConfig) SetRoleArn(v string) *PresignedUrlConfig { return s } +// Parameters to define a mitigation action that publishes findings to Amazon +// SNS. You can implement your own custom actions in response to the Amazon +// SNS messages. +type PublishFindingToSnsParams struct { + _ struct{} `type:"structure"` + + // The ARN of the topic to which you want to publish the findings. + // + // TopicArn is a required field + TopicArn *string `locationName:"topicArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s PublishFindingToSnsParams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishFindingToSnsParams) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PublishFindingToSnsParams) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PublishFindingToSnsParams"} + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTopicArn sets the TopicArn field's value. +func (s *PublishFindingToSnsParams) SetTopicArn(v string) *PublishFindingToSnsParams { + s.TopicArn = &v + return s +} + // The input for the DynamoActionVS action that specifies the DynamoDB table // to which the message data will be written. type PutItemInput struct { _ struct{} `type:"structure"` - // The table where the message data will be written + // The table where the message data will be written. // // TableName is a required field TableName *string `locationName:"tableName" type:"string" required:"true"` @@ -31705,10 +34598,10 @@ func (s RejectCertificateTransferOutput) GoString() string { type RelatedResource struct { _ struct{} `type:"structure"` - // Additional information about the resource. + // Other information about the resource. AdditionalInfo map[string]*string `locationName:"additionalInfo" type:"map"` - // Information identifying the resource. + // Information that identifies the resource. ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure"` // The type of resource. @@ -31903,6 +34796,46 @@ func (s RemoveThingFromThingGroupOutput) GoString() string { return s.String() } +// Parameters to define a mitigation action that adds a blank policy to restrict +// permissions. +type ReplaceDefaultPolicyVersionParams struct { + _ struct{} `type:"structure"` + + // The name of the template to be applied. The only supported value is BLANK_POLICY. + // + // TemplateName is a required field + TemplateName *string `locationName:"templateName" type:"string" required:"true" enum:"PolicyTemplateName"` +} + +// String returns the string representation +func (s ReplaceDefaultPolicyVersionParams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceDefaultPolicyVersionParams) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceDefaultPolicyVersionParams) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceDefaultPolicyVersionParams"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplateName sets the TemplateName field's value. +func (s *ReplaceDefaultPolicyVersionParams) SetTemplateName(v string) *ReplaceDefaultPolicyVersionParams { + s.TemplateName = &v + return s +} + // The input for the ReplaceTopicRule operation. type ReplaceTopicRuleInput struct { _ struct{} `type:"structure" payload:"TopicRulePayload"` @@ -31982,6 +34915,10 @@ func (s ReplaceTopicRuleOutput) GoString() string { type RepublishAction struct { _ struct{} `type:"structure"` + // The Quality of Service (QoS) level to use when republishing messages. The + // default value is 0. + Qos *int64 `locationName:"qos" type:"integer"` + // The ARN of the IAM role that grants access. // // RoleArn is a required field @@ -32019,6 +34956,12 @@ func (s *RepublishAction) Validate() error { return nil } +// SetQos sets the Qos field's value. +func (s *RepublishAction) SetQos(v int64) *RepublishAction { + s.Qos = &v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *RepublishAction) SetRoleArn(v string) *RepublishAction { s.RoleArn = &v @@ -32031,7 +34974,7 @@ func (s *RepublishAction) SetTopic(v string) *RepublishAction { return s } -// Information identifying the non-compliant resource. +// Information that identifies the noncompliant resource. type ResourceIdentifier struct { _ struct{} `type:"structure"` @@ -32044,7 +34987,7 @@ type ResourceIdentifier struct { // The client ID. ClientId *string `locationName:"clientId" type:"string"` - // The ID of the Cognito Identity Pool. + // The ID of the Amazon Cognito identity pool. CognitoIdentityPoolId *string `locationName:"cognitoIdentityPoolId" type:"string"` // The ID of the certificate attached to the resource. @@ -32455,7 +35398,7 @@ type ScheduledAuditMetadata struct { // is "WEEKLY" or "BIWEEKLY"). DayOfWeek *string `locationName:"dayOfWeek" type:"string" enum:"DayOfWeek"` - // How often the scheduled audit takes place. + // How often the scheduled audit occurs. Frequency *string `locationName:"frequency" type:"string" enum:"AuditFrequency"` // The ARN of the scheduled audit. @@ -33228,12 +36171,129 @@ func (s *SqsAction) SetUseBase64(v bool) *SqsAction { return s } +type StartAuditMitigationActionsTaskInput struct { + _ struct{} `type:"structure"` + + // For an audit check, specifies which mitigation actions to apply. Those actions + // must be defined in your AWS account. + // + // AuditCheckToActionsMapping is a required field + AuditCheckToActionsMapping map[string][]*string `locationName:"auditCheckToActionsMapping" type:"map" required:"true"` + + // Each audit mitigation task must have a unique client request token. If you + // try to start a new task with the same token as a task that already exists, + // an exception occurs. If you omit this value, a unique client request token + // is generated automatically. + ClientRequestToken *string `locationName:"clientRequestToken" min:"1" type:"string" idempotencyToken:"true"` + + // Specifies the audit findings to which the mitigation actions are applied. + // You can apply them to a type of audit check, to all findings from an audit, + // or to a speecific set of findings. + // + // Target is a required field + Target *AuditMitigationActionsTaskTarget `locationName:"target" type:"structure" required:"true"` + + // A unique identifier for the task. You can use this identifier to check the + // status of the task or to cancel it. + // + // TaskId is a required field + TaskId *string `location:"uri" locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartAuditMitigationActionsTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartAuditMitigationActionsTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartAuditMitigationActionsTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartAuditMitigationActionsTaskInput"} + if s.AuditCheckToActionsMapping == nil { + invalidParams.Add(request.NewErrParamRequired("AuditCheckToActionsMapping")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + if s.Target != nil { + if err := s.Target.Validate(); err != nil { + invalidParams.AddNested("Target", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuditCheckToActionsMapping sets the AuditCheckToActionsMapping field's value. +func (s *StartAuditMitigationActionsTaskInput) SetAuditCheckToActionsMapping(v map[string][]*string) *StartAuditMitigationActionsTaskInput { + s.AuditCheckToActionsMapping = v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *StartAuditMitigationActionsTaskInput) SetClientRequestToken(v string) *StartAuditMitigationActionsTaskInput { + s.ClientRequestToken = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *StartAuditMitigationActionsTaskInput) SetTarget(v *AuditMitigationActionsTaskTarget) *StartAuditMitigationActionsTaskInput { + s.Target = v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *StartAuditMitigationActionsTaskInput) SetTaskId(v string) *StartAuditMitigationActionsTaskInput { + s.TaskId = &v + return s +} + +type StartAuditMitigationActionsTaskOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the audit mitigation task. This matches the taskId + // that you specified in the request. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s StartAuditMitigationActionsTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartAuditMitigationActionsTaskOutput) GoString() string { + return s.String() +} + +// SetTaskId sets the TaskId field's value. +func (s *StartAuditMitigationActionsTaskOutput) SetTaskId(v string) *StartAuditMitigationActionsTaskOutput { + s.TaskId = &v + return s +} + type StartOnDemandAuditTaskInput struct { _ struct{} `type:"structure"` // Which checks are performed during the audit. The checks you specify must // be enabled for your account or an exception occurs. Use DescribeAccountAuditConfiguration - // to see the list of all checks including those that are enabled or UpdateAccountAuditConfiguration + // to see the list of all checks, including those that are enabled or UpdateAccountAuditConfiguration // to select which checks are enabled. // // TargetCheckNames is a required field @@ -33500,8 +36560,29 @@ func (s *StatisticalThreshold) SetStatistic(v string) *StatisticalThreshold { type Statistics struct { _ struct{} `type:"structure"` + // The average of the aggregated field values. + Average *float64 `locationName:"average" type:"double"` + // The count of things that match the query. Count *int64 `locationName:"count" type:"integer"` + + // The maximum aggregated field value. + Maximum *float64 `locationName:"maximum" type:"double"` + + // The minimum aggregated field value. + Minimum *float64 `locationName:"minimum" type:"double"` + + // The standard deviation of the aggregated field valuesl + StdDeviation *float64 `locationName:"stdDeviation" type:"double"` + + // The sum of the aggregated field values. + Sum *float64 `locationName:"sum" type:"double"` + + // The sum of the squares of the aggregated field values. + SumOfSquares *float64 `locationName:"sumOfSquares" type:"double"` + + // The variance of the aggregated field values. + Variance *float64 `locationName:"variance" type:"double"` } // String returns the string representation @@ -33514,12 +36595,54 @@ func (s Statistics) GoString() string { return s.String() } +// SetAverage sets the Average field's value. +func (s *Statistics) SetAverage(v float64) *Statistics { + s.Average = &v + return s +} + // SetCount sets the Count field's value. func (s *Statistics) SetCount(v int64) *Statistics { s.Count = &v return s } +// SetMaximum sets the Maximum field's value. +func (s *Statistics) SetMaximum(v float64) *Statistics { + s.Maximum = &v + return s +} + +// SetMinimum sets the Minimum field's value. +func (s *Statistics) SetMinimum(v float64) *Statistics { + s.Minimum = &v + return s +} + +// SetStdDeviation sets the StdDeviation field's value. +func (s *Statistics) SetStdDeviation(v float64) *Statistics { + s.StdDeviation = &v + return s +} + +// SetSum sets the Sum field's value. +func (s *Statistics) SetSum(v float64) *Statistics { + s.Sum = &v + return s +} + +// SetSumOfSquares sets the SumOfSquares field's value. +func (s *Statistics) SetSumOfSquares(v float64) *Statistics { + s.SumOfSquares = &v + return s +} + +// SetVariance sets the Variance field's value. +func (s *Statistics) SetVariance(v float64) *Statistics { + s.Variance = &v + return s +} + // Starts execution of a Step Functions state machine. type StepFunctionsAction struct { _ struct{} `type:"structure"` @@ -33981,13 +37104,13 @@ type TaskStatistics struct { // The number of checks that found compliant resources. CompliantChecks *int64 `locationName:"compliantChecks" type:"integer"` - // The number of checks + // The number of checks. FailedChecks *int64 `locationName:"failedChecks" type:"integer"` // The number of checks in progress. InProgressChecks *int64 `locationName:"inProgressChecks" type:"integer"` - // The number of checks that found non-compliant resources. + // The number of checks that found noncompliant resources. NonCompliantChecks *int64 `locationName:"nonCompliantChecks" type:"integer"` // The number of checks in this audit. @@ -34049,6 +37172,70 @@ func (s *TaskStatistics) SetWaitingForDataCollectionChecks(v int64) *TaskStatist return s } +// Provides summary counts of how many tasks for findings are in a particular +// state. This information is included in the response from DescribeAuditMitigationActionsTask. +type TaskStatisticsForAuditCheck struct { + _ struct{} `type:"structure"` + + // The number of findings to which the mitigation action task was canceled when + // applied. + CanceledFindingsCount *int64 `locationName:"canceledFindingsCount" type:"long"` + + // The number of findings for which at least one of the actions failed when + // applied. + FailedFindingsCount *int64 `locationName:"failedFindingsCount" type:"long"` + + // The number of findings skipped because of filter conditions provided in the + // parameters to the command. + SkippedFindingsCount *int64 `locationName:"skippedFindingsCount" type:"long"` + + // The number of findings for which all mitigation actions succeeded when applied. + SucceededFindingsCount *int64 `locationName:"succeededFindingsCount" type:"long"` + + // The total number of findings to which a task is being applied. + TotalFindingsCount *int64 `locationName:"totalFindingsCount" type:"long"` +} + +// String returns the string representation +func (s TaskStatisticsForAuditCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskStatisticsForAuditCheck) GoString() string { + return s.String() +} + +// SetCanceledFindingsCount sets the CanceledFindingsCount field's value. +func (s *TaskStatisticsForAuditCheck) SetCanceledFindingsCount(v int64) *TaskStatisticsForAuditCheck { + s.CanceledFindingsCount = &v + return s +} + +// SetFailedFindingsCount sets the FailedFindingsCount field's value. +func (s *TaskStatisticsForAuditCheck) SetFailedFindingsCount(v int64) *TaskStatisticsForAuditCheck { + s.FailedFindingsCount = &v + return s +} + +// SetSkippedFindingsCount sets the SkippedFindingsCount field's value. +func (s *TaskStatisticsForAuditCheck) SetSkippedFindingsCount(v int64) *TaskStatisticsForAuditCheck { + s.SkippedFindingsCount = &v + return s +} + +// SetSucceededFindingsCount sets the SucceededFindingsCount field's value. +func (s *TaskStatisticsForAuditCheck) SetSucceededFindingsCount(v int64) *TaskStatisticsForAuditCheck { + s.SucceededFindingsCount = &v + return s +} + +// SetTotalFindingsCount sets the TotalFindingsCount field's value. +func (s *TaskStatisticsForAuditCheck) SetTotalFindingsCount(v int64) *TaskStatisticsForAuditCheck { + s.TotalFindingsCount = &v + return s +} + type TestAuthorizationInput struct { _ struct{} `type:"structure"` @@ -34535,6 +37722,13 @@ func (s *ThingGroupDocument) SetThingGroupName(v string) *ThingGroupDocument { type ThingGroupIndexingConfiguration struct { _ struct{} `type:"structure"` + // Contains custom field names and their data type. + CustomFields []*Field `locationName:"customFields" type:"list"` + + // Contains fields that are indexed and whose types are already known by the + // Fleet Indexing service. + ManagedFields []*Field `locationName:"managedFields" type:"list"` + // Thing group indexing mode. // // ThingGroupIndexingMode is a required field @@ -34564,6 +37758,18 @@ func (s *ThingGroupIndexingConfiguration) Validate() error { return nil } +// SetCustomFields sets the CustomFields field's value. +func (s *ThingGroupIndexingConfiguration) SetCustomFields(v []*Field) *ThingGroupIndexingConfiguration { + s.CustomFields = v + return s +} + +// SetManagedFields sets the ManagedFields field's value. +func (s *ThingGroupIndexingConfiguration) SetManagedFields(v []*Field) *ThingGroupIndexingConfiguration { + s.ManagedFields = v + return s +} + // SetThingGroupIndexingMode sets the ThingGroupIndexingMode field's value. func (s *ThingGroupIndexingConfiguration) SetThingGroupIndexingMode(v string) *ThingGroupIndexingConfiguration { s.ThingGroupIndexingMode = &v @@ -34650,10 +37856,17 @@ func (s *ThingGroupProperties) SetThingGroupDescription(v string) *ThingGroupPro type ThingIndexingConfiguration struct { _ struct{} `type:"structure"` + // Contains custom field names and their data type. + CustomFields []*Field `locationName:"customFields" type:"list"` + + // Contains fields that are indexed and whose types are already known by the + // Fleet Indexing service. + ManagedFields []*Field `locationName:"managedFields" type:"list"` + // Thing connectivity indexing mode. Valid values are: // - // * STATUS – Your thing index contains connectivity status. To enable thing - // connectivity indexing, thingIndexMode must not be set to OFF. + // * STATUS – Your thing index contains connectivity status. To enable + // thing connectivity indexing, thingIndexMode must not be set to OFF. // // * OFF - Thing connectivity status indexing is disabled. ThingConnectivityIndexingMode *string `locationName:"thingConnectivityIndexingMode" type:"string" enum:"ThingConnectivityIndexingMode"` @@ -34694,6 +37907,18 @@ func (s *ThingIndexingConfiguration) Validate() error { return nil } +// SetCustomFields sets the CustomFields field's value. +func (s *ThingIndexingConfiguration) SetCustomFields(v []*Field) *ThingIndexingConfiguration { + s.CustomFields = v + return s +} + +// SetManagedFields sets the ManagedFields field's value. +func (s *ThingIndexingConfiguration) SetManagedFields(v []*Field) *ThingIndexingConfiguration { + s.ManagedFields = v + return s +} + // SetThingConnectivityIndexingMode sets the ThingConnectivityIndexingMode field's value. func (s *ThingIndexingConfiguration) SetThingConnectivityIndexingMode(v string) *ThingIndexingConfiguration { s.ThingConnectivityIndexingMode = &v @@ -35347,18 +38572,18 @@ type UpdateAccountAuditConfigurationInput struct { _ struct{} `type:"structure"` // Specifies which audit checks are enabled and disabled for this account. Use - // DescribeAccountAuditConfiguration to see the list of all checks including + // DescribeAccountAuditConfiguration to see the list of all checks, including // those that are currently enabled. // - // Note that some data collection may begin immediately when certain checks - // are enabled. When a check is disabled, any data collected so far in relation - // to the check is deleted. + // Some data collection might start immediately when certain checks are enabled. + // When a check is disabled, any data collected so far in relation to the check + // is deleted. // // You cannot disable a check if it is used by any scheduled audit. You must // first delete the check from the scheduled audit or delete the scheduled audit // itself. // - // On the first call to UpdateAccountAuditConfiguration this parameter is required + // On the first call to UpdateAccountAuditConfiguration, this parameter is required // and must specify at least one enabled check. AuditCheckConfigurations map[string]*AuditCheckConfiguration `locationName:"auditCheckConfigurations" type:"map"` @@ -35366,7 +38591,7 @@ type UpdateAccountAuditConfigurationInput struct { AuditNotificationTargetConfigurations map[string]*AuditNotificationTarget `locationName:"auditNotificationTargetConfigurations" type:"map"` // The ARN of the role that grants permission to AWS IoT to access information - // about your devices, policies, certificates and other items as necessary when + // about your devices, policies, certificates and other items as required when // performing an audit. RoleArn *string `locationName:"roleArn" min:"20" type:"string"` } @@ -35659,7 +38884,7 @@ type UpdateCACertificateInput struct { // Information about the registration configuration. RegistrationConfig *RegistrationConfig `locationName:"registrationConfig" type:"structure"` - // If true, remove auto registration. + // If true, removes auto registration. RemoveAutoRegistration *bool `locationName:"removeAutoRegistration" type:"boolean"` } @@ -35738,6 +38963,47 @@ func (s UpdateCACertificateOutput) GoString() string { return s.String() } +// Parameters to define a mitigation action that changes the state of the CA +// certificate to inactive. +type UpdateCACertificateParams struct { + _ struct{} `type:"structure"` + + // The action that you want to apply to the CA cerrtificate. The only supported + // value is DEACTIVATE. + // + // Action is a required field + Action *string `locationName:"action" type:"string" required:"true" enum:"CACertificateUpdateAction"` +} + +// String returns the string representation +func (s UpdateCACertificateParams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCACertificateParams) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateCACertificateParams) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateCACertificateParams"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *UpdateCACertificateParams) SetAction(v string) *UpdateCACertificateParams { + s.Action = &v + return s +} + // The input for the UpdateCertificate operation. type UpdateCertificateInput struct { _ struct{} `type:"structure"` @@ -35816,6 +39082,47 @@ func (s UpdateCertificateOutput) GoString() string { return s.String() } +// Parameters to define a mitigation action that changes the state of the device +// certificate to inactive. +type UpdateDeviceCertificateParams struct { + _ struct{} `type:"structure"` + + // The action that you want to apply to the device cerrtificate. The only supported + // value is DEACTIVATE. + // + // Action is a required field + Action *string `locationName:"action" type:"string" required:"true" enum:"DeviceCertificateUpdateAction"` +} + +// String returns the string representation +func (s UpdateDeviceCertificateParams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDeviceCertificateParams) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDeviceCertificateParams) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDeviceCertificateParams"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *UpdateDeviceCertificateParams) SetAction(v string) *UpdateDeviceCertificateParams { + s.Action = &v + return s +} + type UpdateDynamicThingGroupInput struct { _ struct{} `type:"structure"` @@ -36162,6 +39469,107 @@ func (s UpdateJobOutput) GoString() string { return s.String() } +type UpdateMitigationActionInput struct { + _ struct{} `type:"structure"` + + // The friendly name for the mitigation action. You can't change the name by + // using UpdateMitigationAction. Instead, you must delete and re-create the + // mitigation action with the new name. + // + // ActionName is a required field + ActionName *string `location:"uri" locationName:"actionName" type:"string" required:"true"` + + // Defines the type of action and the parameters for that action. + ActionParams *MitigationActionParams `locationName:"actionParams" type:"structure"` + + // The ARN of the IAM role that is used to apply the mitigation action. + RoleArn *string `locationName:"roleArn" min:"20" type:"string"` +} + +// String returns the string representation +func (s UpdateMitigationActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMitigationActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMitigationActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMitigationActionInput"} + if s.ActionName == nil { + invalidParams.Add(request.NewErrParamRequired("ActionName")) + } + if s.ActionName != nil && len(*s.ActionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionName", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.ActionParams != nil { + if err := s.ActionParams.Validate(); err != nil { + invalidParams.AddNested("ActionParams", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionName sets the ActionName field's value. +func (s *UpdateMitigationActionInput) SetActionName(v string) *UpdateMitigationActionInput { + s.ActionName = &v + return s +} + +// SetActionParams sets the ActionParams field's value. +func (s *UpdateMitigationActionInput) SetActionParams(v *MitigationActionParams) *UpdateMitigationActionInput { + s.ActionParams = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateMitigationActionInput) SetRoleArn(v string) *UpdateMitigationActionInput { + s.RoleArn = &v + return s +} + +type UpdateMitigationActionOutput struct { + _ struct{} `type:"structure"` + + // The ARN for the new mitigation action. + ActionArn *string `locationName:"actionArn" type:"string"` + + // A unique identifier for the mitigation action. + ActionId *string `locationName:"actionId" type:"string"` +} + +// String returns the string representation +func (s UpdateMitigationActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMitigationActionOutput) GoString() string { + return s.String() +} + +// SetActionArn sets the ActionArn field's value. +func (s *UpdateMitigationActionOutput) SetActionArn(v string) *UpdateMitigationActionOutput { + s.ActionArn = &v + return s +} + +// SetActionId sets the ActionId field's value. +func (s *UpdateMitigationActionOutput) SetActionId(v string) *UpdateMitigationActionOutput { + s.ActionId = &v + return s +} + type UpdateRoleAliasInput struct { _ struct{} `type:"structure"` @@ -36269,13 +39677,13 @@ type UpdateScheduledAuditInput struct { DayOfMonth *string `locationName:"dayOfMonth" type:"string"` // The day of the week on which the scheduled audit takes place. Can be one - // of "SUN", "MON", "TUE", "WED", "THU", "FRI" or "SAT". This field is required + // of "SUN", "MON", "TUE", "WED", "THU", "FRI", or "SAT". This field is required // if the "frequency" parameter is set to "WEEKLY" or "BIWEEKLY". DayOfWeek *string `locationName:"dayOfWeek" type:"string" enum:"DayOfWeek"` // How often the scheduled audit takes place. Can be one of "DAILY", "WEEKLY", - // "BIWEEKLY" or "MONTHLY". The actual start time of each audit is determined - // by the system. + // "BIWEEKLY", or "MONTHLY". The start time of each audit is determined by the + // system. Frequency *string `locationName:"frequency" type:"string" enum:"AuditFrequency"` // The name of the scheduled audit. (Max. 128 chars) @@ -36285,7 +39693,7 @@ type UpdateScheduledAuditInput struct { // Which checks are performed during the scheduled audit. Checks must be enabled // for your account. (Use DescribeAccountAuditConfiguration to see the list - // of all checks including those that are enabled or UpdateAccountAuditConfiguration + // of all checks, including those that are enabled or use UpdateAccountAuditConfiguration // to select which checks are enabled.) TargetCheckNames []*string `locationName:"targetCheckNames" type:"list"` } @@ -36373,7 +39781,7 @@ type UpdateSecurityProfileInput struct { _ struct{} `type:"structure"` // A list of metrics whose data is retained (stored). By default, data is retained - // for any metric used in the profile's behaviors but it is also retained for + // for any metric used in the profile's behaviors, but it is also retained for // any metric specified here. AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" type:"list"` @@ -36385,21 +39793,21 @@ type UpdateSecurityProfileInput struct { Behaviors []*Behavior `locationName:"behaviors" type:"list"` // If true, delete all additionalMetricsToRetain defined for this security profile. - // If any additionalMetricsToRetain are defined in the current invocation an + // If any additionalMetricsToRetain are defined in the current invocation, an // exception occurs. DeleteAdditionalMetricsToRetain *bool `locationName:"deleteAdditionalMetricsToRetain" type:"boolean"` // If true, delete all alertTargets defined for this security profile. If any - // alertTargets are defined in the current invocation an exception occurs. + // alertTargets are defined in the current invocation, an exception occurs. DeleteAlertTargets *bool `locationName:"deleteAlertTargets" type:"boolean"` // If true, delete all behaviors defined for this security profile. If any behaviors - // are defined in the current invocation an exception occurs. + // are defined in the current invocation, an exception occurs. DeleteBehaviors *bool `locationName:"deleteBehaviors" type:"boolean"` // The expected version of the security profile. A new version is generated // whenever the security profile is updated. If you specify a value that is - // different than the actual version, a VersionConflictException is thrown. + // different from the actual version, a VersionConflictException is thrown. ExpectedVersion *int64 `location:"querystring" locationName:"expectedVersion" type:"long"` // A description of the security profile. @@ -36515,7 +39923,7 @@ type UpdateSecurityProfileOutput struct { _ struct{} `type:"structure"` // A list of metrics whose data is retained (stored). By default, data is retained - // for any metric used in the security profile's behaviors but it is also retained + // for any metric used in the security profile's behaviors, but it is also retained // for any metric specified here. AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" type:"list"` @@ -37271,6 +40679,40 @@ const ( AuditFrequencyMonthly = "MONTHLY" ) +const ( + // AuditMitigationActionsExecutionStatusInProgress is a AuditMitigationActionsExecutionStatus enum value + AuditMitigationActionsExecutionStatusInProgress = "IN_PROGRESS" + + // AuditMitigationActionsExecutionStatusCompleted is a AuditMitigationActionsExecutionStatus enum value + AuditMitigationActionsExecutionStatusCompleted = "COMPLETED" + + // AuditMitigationActionsExecutionStatusFailed is a AuditMitigationActionsExecutionStatus enum value + AuditMitigationActionsExecutionStatusFailed = "FAILED" + + // AuditMitigationActionsExecutionStatusCanceled is a AuditMitigationActionsExecutionStatus enum value + AuditMitigationActionsExecutionStatusCanceled = "CANCELED" + + // AuditMitigationActionsExecutionStatusSkipped is a AuditMitigationActionsExecutionStatus enum value + AuditMitigationActionsExecutionStatusSkipped = "SKIPPED" + + // AuditMitigationActionsExecutionStatusPending is a AuditMitigationActionsExecutionStatus enum value + AuditMitigationActionsExecutionStatusPending = "PENDING" +) + +const ( + // AuditMitigationActionsTaskStatusInProgress is a AuditMitigationActionsTaskStatus enum value + AuditMitigationActionsTaskStatusInProgress = "IN_PROGRESS" + + // AuditMitigationActionsTaskStatusCompleted is a AuditMitigationActionsTaskStatus enum value + AuditMitigationActionsTaskStatusCompleted = "COMPLETED" + + // AuditMitigationActionsTaskStatusFailed is a AuditMitigationActionsTaskStatus enum value + AuditMitigationActionsTaskStatusFailed = "FAILED" + + // AuditMitigationActionsTaskStatusCanceled is a AuditMitigationActionsTaskStatus enum value + AuditMitigationActionsTaskStatusCanceled = "CANCELED" +) + const ( // AuditNotificationTypeSns is a AuditNotificationType enum value AuditNotificationTypeSns = "SNS" @@ -37333,6 +40775,11 @@ const ( CACertificateStatusInactive = "INACTIVE" ) +const ( + // CACertificateUpdateActionDeactivate is a CACertificateUpdateAction enum value + CACertificateUpdateActionDeactivate = "DEACTIVATE" +) + const ( // CannedAccessControlListPrivate is a CannedAccessControlList enum value CannedAccessControlListPrivate = "private" @@ -37428,6 +40875,11 @@ const ( DayOfWeekSat = "SAT" ) +const ( + // DeviceCertificateUpdateActionDeactivate is a DeviceCertificateUpdateAction enum value + DeviceCertificateUpdateActionDeactivate = "DEACTIVATE" +) + const ( // DynamicGroupStatusActive is a DynamicGroupStatus enum value DynamicGroupStatusActive = "ACTIVE" @@ -37482,6 +40934,17 @@ const ( EventTypeCaCertificate = "CA_CERTIFICATE" ) +const ( + // FieldTypeNumber is a FieldType enum value + FieldTypeNumber = "Number" + + // FieldTypeString is a FieldType enum value + FieldTypeString = "String" + + // FieldTypeBoolean is a FieldType enum value + FieldTypeBoolean = "Boolean" +) + const ( // IndexStatusActive is a IndexStatus enum value IndexStatusActive = "ACTIVE" @@ -37580,6 +41043,26 @@ const ( MessageFormatJson = "JSON" ) +const ( + // MitigationActionTypeUpdateDeviceCertificate is a MitigationActionType enum value + MitigationActionTypeUpdateDeviceCertificate = "UPDATE_DEVICE_CERTIFICATE" + + // MitigationActionTypeUpdateCaCertificate is a MitigationActionType enum value + MitigationActionTypeUpdateCaCertificate = "UPDATE_CA_CERTIFICATE" + + // MitigationActionTypeAddThingsToThingGroup is a MitigationActionType enum value + MitigationActionTypeAddThingsToThingGroup = "ADD_THINGS_TO_THING_GROUP" + + // MitigationActionTypeReplaceDefaultPolicyVersion is a MitigationActionType enum value + MitigationActionTypeReplaceDefaultPolicyVersion = "REPLACE_DEFAULT_POLICY_VERSION" + + // MitigationActionTypeEnableIotLogging is a MitigationActionType enum value + MitigationActionTypeEnableIotLogging = "ENABLE_IOT_LOGGING" + + // MitigationActionTypePublishFindingToSns is a MitigationActionType enum value + MitigationActionTypePublishFindingToSns = "PUBLISH_FINDING_TO_SNS" +) + const ( // OTAUpdateStatusCreatePending is a OTAUpdateStatus enum value OTAUpdateStatusCreatePending = "CREATE_PENDING" @@ -37594,6 +41077,11 @@ const ( OTAUpdateStatusCreateFailed = "CREATE_FAILED" ) +const ( + // PolicyTemplateNameBlankPolicy is a PolicyTemplateName enum value + PolicyTemplateNameBlankPolicy = "BLANK_POLICY" +) + const ( // ReportTypeErrors is a ReportType enum value ReportTypeErrors = "ERRORS" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/errors.go index 3c613eb8bbc..b4e8556c217 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/errors.go @@ -141,6 +141,13 @@ const ( // The Rule-SQL expression can't be parsed correctly. ErrCodeSqlParseException = "SqlParseException" + // ErrCodeTaskAlreadyExistsException for service response error code + // "TaskAlreadyExistsException". + // + // This exception occurs if you attempt to start a task with the same task-id + // as an existing task but with a different clientRequestToken. + ErrCodeTaskAlreadyExistsException = "TaskAlreadyExistsException" + // ErrCodeThrottlingException for service response error code // "ThrottlingException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/service.go index 10a95d5607c..0391dde5474 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iot/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoT { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "execute-api" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IoT { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IoT { svc := &IoT{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-05-28", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/api.go new file mode 100644 index 00000000000..2192689007d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/api.go @@ -0,0 +1,10192 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iotanalytics + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opBatchPutMessage = "BatchPutMessage" + +// BatchPutMessageRequest generates a "aws/request.Request" representing the +// client's request for the BatchPutMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchPutMessage for more information on using the BatchPutMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchPutMessageRequest method. +// req, resp := client.BatchPutMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/BatchPutMessage +func (c *IoTAnalytics) BatchPutMessageRequest(input *BatchPutMessageInput) (req *request.Request, output *BatchPutMessageOutput) { + op := &request.Operation{ + Name: opBatchPutMessage, + HTTPMethod: "POST", + HTTPPath: "/messages/batch", + } + + if input == nil { + input = &BatchPutMessageInput{} + } + + output = &BatchPutMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchPutMessage API operation for AWS IoT Analytics. +// +// Sends messages to a channel. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation BatchPutMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/BatchPutMessage +func (c *IoTAnalytics) BatchPutMessage(input *BatchPutMessageInput) (*BatchPutMessageOutput, error) { + req, out := c.BatchPutMessageRequest(input) + return out, req.Send() +} + +// BatchPutMessageWithContext is the same as BatchPutMessage with the addition of +// the ability to pass a context and additional request options. +// +// See BatchPutMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) BatchPutMessageWithContext(ctx aws.Context, input *BatchPutMessageInput, opts ...request.Option) (*BatchPutMessageOutput, error) { + req, out := c.BatchPutMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelPipelineReprocessing = "CancelPipelineReprocessing" + +// CancelPipelineReprocessingRequest generates a "aws/request.Request" representing the +// client's request for the CancelPipelineReprocessing operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelPipelineReprocessing for more information on using the CancelPipelineReprocessing +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelPipelineReprocessingRequest method. +// req, resp := client.CancelPipelineReprocessingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CancelPipelineReprocessing +func (c *IoTAnalytics) CancelPipelineReprocessingRequest(input *CancelPipelineReprocessingInput) (req *request.Request, output *CancelPipelineReprocessingOutput) { + op := &request.Operation{ + Name: opCancelPipelineReprocessing, + HTTPMethod: "DELETE", + HTTPPath: "/pipelines/{pipelineName}/reprocessing/{reprocessingId}", + } + + if input == nil { + input = &CancelPipelineReprocessingInput{} + } + + output = &CancelPipelineReprocessingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CancelPipelineReprocessing API operation for AWS IoT Analytics. +// +// Cancels the reprocessing of data through the pipeline. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation CancelPipelineReprocessing for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CancelPipelineReprocessing +func (c *IoTAnalytics) CancelPipelineReprocessing(input *CancelPipelineReprocessingInput) (*CancelPipelineReprocessingOutput, error) { + req, out := c.CancelPipelineReprocessingRequest(input) + return out, req.Send() +} + +// CancelPipelineReprocessingWithContext is the same as CancelPipelineReprocessing with the addition of +// the ability to pass a context and additional request options. +// +// See CancelPipelineReprocessing for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) CancelPipelineReprocessingWithContext(ctx aws.Context, input *CancelPipelineReprocessingInput, opts ...request.Option) (*CancelPipelineReprocessingOutput, error) { + req, out := c.CancelPipelineReprocessingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateChannel = "CreateChannel" + +// CreateChannelRequest generates a "aws/request.Request" representing the +// client's request for the CreateChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateChannel for more information on using the CreateChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateChannelRequest method. +// req, resp := client.CreateChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreateChannel +func (c *IoTAnalytics) CreateChannelRequest(input *CreateChannelInput) (req *request.Request, output *CreateChannelOutput) { + op := &request.Operation{ + Name: opCreateChannel, + HTTPMethod: "POST", + HTTPPath: "/channels", + } + + if input == nil { + input = &CreateChannelInput{} + } + + output = &CreateChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateChannel API operation for AWS IoT Analytics. +// +// Creates a channel. A channel collects data from an MQTT topic and archives +// the raw, unprocessed messages before publishing the data to a pipeline. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation CreateChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// A resource with the same name already exists. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The command caused an internal limit to be exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreateChannel +func (c *IoTAnalytics) CreateChannel(input *CreateChannelInput) (*CreateChannelOutput, error) { + req, out := c.CreateChannelRequest(input) + return out, req.Send() +} + +// CreateChannelWithContext is the same as CreateChannel with the addition of +// the ability to pass a context and additional request options. +// +// See CreateChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) CreateChannelWithContext(ctx aws.Context, input *CreateChannelInput, opts ...request.Option) (*CreateChannelOutput, error) { + req, out := c.CreateChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDataset = "CreateDataset" + +// CreateDatasetRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDataset for more information on using the CreateDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatasetRequest method. +// req, resp := client.CreateDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreateDataset +func (c *IoTAnalytics) CreateDatasetRequest(input *CreateDatasetInput) (req *request.Request, output *CreateDatasetOutput) { + op := &request.Operation{ + Name: opCreateDataset, + HTTPMethod: "POST", + HTTPPath: "/datasets", + } + + if input == nil { + input = &CreateDatasetInput{} + } + + output = &CreateDatasetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDataset API operation for AWS IoT Analytics. +// +// Creates a data set. A data set stores data retrieved from a data store by +// applying a "queryAction" (a SQL query) or a "containerAction" (executing +// a containerized application). This operation creates the skeleton of a data +// set. The data set can be populated manually by calling "CreateDatasetContent" +// or automatically according to a "trigger" you specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation CreateDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// A resource with the same name already exists. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The command caused an internal limit to be exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreateDataset +func (c *IoTAnalytics) CreateDataset(input *CreateDatasetInput) (*CreateDatasetOutput, error) { + req, out := c.CreateDatasetRequest(input) + return out, req.Send() +} + +// CreateDatasetWithContext is the same as CreateDataset with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) CreateDatasetWithContext(ctx aws.Context, input *CreateDatasetInput, opts ...request.Option) (*CreateDatasetOutput, error) { + req, out := c.CreateDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDatasetContent = "CreateDatasetContent" + +// CreateDatasetContentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDatasetContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDatasetContent for more information on using the CreateDatasetContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatasetContentRequest method. +// req, resp := client.CreateDatasetContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreateDatasetContent +func (c *IoTAnalytics) CreateDatasetContentRequest(input *CreateDatasetContentInput) (req *request.Request, output *CreateDatasetContentOutput) { + op := &request.Operation{ + Name: opCreateDatasetContent, + HTTPMethod: "POST", + HTTPPath: "/datasets/{datasetName}/content", + } + + if input == nil { + input = &CreateDatasetContentInput{} + } + + output = &CreateDatasetContentOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDatasetContent API operation for AWS IoT Analytics. +// +// Creates the content of a data set by applying a "queryAction" (a SQL query) +// or a "containerAction" (executing a containerized application). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation CreateDatasetContent for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreateDatasetContent +func (c *IoTAnalytics) CreateDatasetContent(input *CreateDatasetContentInput) (*CreateDatasetContentOutput, error) { + req, out := c.CreateDatasetContentRequest(input) + return out, req.Send() +} + +// CreateDatasetContentWithContext is the same as CreateDatasetContent with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDatasetContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) CreateDatasetContentWithContext(ctx aws.Context, input *CreateDatasetContentInput, opts ...request.Option) (*CreateDatasetContentOutput, error) { + req, out := c.CreateDatasetContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDatastore = "CreateDatastore" + +// CreateDatastoreRequest generates a "aws/request.Request" representing the +// client's request for the CreateDatastore operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDatastore for more information on using the CreateDatastore +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatastoreRequest method. +// req, resp := client.CreateDatastoreRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreateDatastore +func (c *IoTAnalytics) CreateDatastoreRequest(input *CreateDatastoreInput) (req *request.Request, output *CreateDatastoreOutput) { + op := &request.Operation{ + Name: opCreateDatastore, + HTTPMethod: "POST", + HTTPPath: "/datastores", + } + + if input == nil { + input = &CreateDatastoreInput{} + } + + output = &CreateDatastoreOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDatastore API operation for AWS IoT Analytics. +// +// Creates a data store, which is a repository for messages. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation CreateDatastore for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// A resource with the same name already exists. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The command caused an internal limit to be exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreateDatastore +func (c *IoTAnalytics) CreateDatastore(input *CreateDatastoreInput) (*CreateDatastoreOutput, error) { + req, out := c.CreateDatastoreRequest(input) + return out, req.Send() +} + +// CreateDatastoreWithContext is the same as CreateDatastore with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDatastore for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) CreateDatastoreWithContext(ctx aws.Context, input *CreateDatastoreInput, opts ...request.Option) (*CreateDatastoreOutput, error) { + req, out := c.CreateDatastoreRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePipeline = "CreatePipeline" + +// CreatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the CreatePipeline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePipeline for more information on using the CreatePipeline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePipelineRequest method. +// req, resp := client.CreatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreatePipeline +func (c *IoTAnalytics) CreatePipelineRequest(input *CreatePipelineInput) (req *request.Request, output *CreatePipelineOutput) { + op := &request.Operation{ + Name: opCreatePipeline, + HTTPMethod: "POST", + HTTPPath: "/pipelines", + } + + if input == nil { + input = &CreatePipelineInput{} + } + + output = &CreatePipelineOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreatePipeline API operation for AWS IoT Analytics. +// +// Creates a pipeline. A pipeline consumes messages from a channel and allows +// you to process the messages before storing them in a data store. You must +// specify both a channel and a datastore activity and, optionally, as many +// as 23 additional activities in the pipelineActivities array. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation CreatePipeline for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// A resource with the same name already exists. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The command caused an internal limit to be exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/CreatePipeline +func (c *IoTAnalytics) CreatePipeline(input *CreatePipelineInput) (*CreatePipelineOutput, error) { + req, out := c.CreatePipelineRequest(input) + return out, req.Send() +} + +// CreatePipelineWithContext is the same as CreatePipeline with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePipeline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) CreatePipelineWithContext(ctx aws.Context, input *CreatePipelineInput, opts ...request.Option) (*CreatePipelineOutput, error) { + req, out := c.CreatePipelineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteChannel = "DeleteChannel" + +// DeleteChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteChannel for more information on using the DeleteChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteChannelRequest method. +// req, resp := client.DeleteChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeleteChannel +func (c *IoTAnalytics) DeleteChannelRequest(input *DeleteChannelInput) (req *request.Request, output *DeleteChannelOutput) { + op := &request.Operation{ + Name: opDeleteChannel, + HTTPMethod: "DELETE", + HTTPPath: "/channels/{channelName}", + } + + if input == nil { + input = &DeleteChannelInput{} + } + + output = &DeleteChannelOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteChannel API operation for AWS IoT Analytics. +// +// Deletes the specified channel. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DeleteChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeleteChannel +func (c *IoTAnalytics) DeleteChannel(input *DeleteChannelInput) (*DeleteChannelOutput, error) { + req, out := c.DeleteChannelRequest(input) + return out, req.Send() +} + +// DeleteChannelWithContext is the same as DeleteChannel with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DeleteChannelWithContext(ctx aws.Context, input *DeleteChannelInput, opts ...request.Option) (*DeleteChannelOutput, error) { + req, out := c.DeleteChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDataset = "DeleteDataset" + +// DeleteDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDataset for more information on using the DeleteDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDatasetRequest method. +// req, resp := client.DeleteDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeleteDataset +func (c *IoTAnalytics) DeleteDatasetRequest(input *DeleteDatasetInput) (req *request.Request, output *DeleteDatasetOutput) { + op := &request.Operation{ + Name: opDeleteDataset, + HTTPMethod: "DELETE", + HTTPPath: "/datasets/{datasetName}", + } + + if input == nil { + input = &DeleteDatasetInput{} + } + + output = &DeleteDatasetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDataset API operation for AWS IoT Analytics. +// +// Deletes the specified data set. +// +// You do not have to delete the content of the data set before you perform +// this operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DeleteDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeleteDataset +func (c *IoTAnalytics) DeleteDataset(input *DeleteDatasetInput) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + return out, req.Send() +} + +// DeleteDatasetWithContext is the same as DeleteDataset with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DeleteDatasetWithContext(ctx aws.Context, input *DeleteDatasetInput, opts ...request.Option) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDatasetContent = "DeleteDatasetContent" + +// DeleteDatasetContentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDatasetContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDatasetContent for more information on using the DeleteDatasetContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDatasetContentRequest method. +// req, resp := client.DeleteDatasetContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeleteDatasetContent +func (c *IoTAnalytics) DeleteDatasetContentRequest(input *DeleteDatasetContentInput) (req *request.Request, output *DeleteDatasetContentOutput) { + op := &request.Operation{ + Name: opDeleteDatasetContent, + HTTPMethod: "DELETE", + HTTPPath: "/datasets/{datasetName}/content", + } + + if input == nil { + input = &DeleteDatasetContentInput{} + } + + output = &DeleteDatasetContentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDatasetContent API operation for AWS IoT Analytics. +// +// Deletes the content of the specified data set. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DeleteDatasetContent for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeleteDatasetContent +func (c *IoTAnalytics) DeleteDatasetContent(input *DeleteDatasetContentInput) (*DeleteDatasetContentOutput, error) { + req, out := c.DeleteDatasetContentRequest(input) + return out, req.Send() +} + +// DeleteDatasetContentWithContext is the same as DeleteDatasetContent with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDatasetContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DeleteDatasetContentWithContext(ctx aws.Context, input *DeleteDatasetContentInput, opts ...request.Option) (*DeleteDatasetContentOutput, error) { + req, out := c.DeleteDatasetContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDatastore = "DeleteDatastore" + +// DeleteDatastoreRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDatastore operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDatastore for more information on using the DeleteDatastore +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDatastoreRequest method. +// req, resp := client.DeleteDatastoreRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeleteDatastore +func (c *IoTAnalytics) DeleteDatastoreRequest(input *DeleteDatastoreInput) (req *request.Request, output *DeleteDatastoreOutput) { + op := &request.Operation{ + Name: opDeleteDatastore, + HTTPMethod: "DELETE", + HTTPPath: "/datastores/{datastoreName}", + } + + if input == nil { + input = &DeleteDatastoreInput{} + } + + output = &DeleteDatastoreOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDatastore API operation for AWS IoT Analytics. +// +// Deletes the specified data store. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DeleteDatastore for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeleteDatastore +func (c *IoTAnalytics) DeleteDatastore(input *DeleteDatastoreInput) (*DeleteDatastoreOutput, error) { + req, out := c.DeleteDatastoreRequest(input) + return out, req.Send() +} + +// DeleteDatastoreWithContext is the same as DeleteDatastore with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDatastore for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DeleteDatastoreWithContext(ctx aws.Context, input *DeleteDatastoreInput, opts ...request.Option) (*DeleteDatastoreOutput, error) { + req, out := c.DeleteDatastoreRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePipeline = "DeletePipeline" + +// DeletePipelineRequest generates a "aws/request.Request" representing the +// client's request for the DeletePipeline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePipeline for more information on using the DeletePipeline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePipelineRequest method. +// req, resp := client.DeletePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeletePipeline +func (c *IoTAnalytics) DeletePipelineRequest(input *DeletePipelineInput) (req *request.Request, output *DeletePipelineOutput) { + op := &request.Operation{ + Name: opDeletePipeline, + HTTPMethod: "DELETE", + HTTPPath: "/pipelines/{pipelineName}", + } + + if input == nil { + input = &DeletePipelineInput{} + } + + output = &DeletePipelineOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePipeline API operation for AWS IoT Analytics. +// +// Deletes the specified pipeline. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DeletePipeline for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DeletePipeline +func (c *IoTAnalytics) DeletePipeline(input *DeletePipelineInput) (*DeletePipelineOutput, error) { + req, out := c.DeletePipelineRequest(input) + return out, req.Send() +} + +// DeletePipelineWithContext is the same as DeletePipeline with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePipeline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DeletePipelineWithContext(ctx aws.Context, input *DeletePipelineInput, opts ...request.Option) (*DeletePipelineOutput, error) { + req, out := c.DeletePipelineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeChannel = "DescribeChannel" + +// DescribeChannelRequest generates a "aws/request.Request" representing the +// client's request for the DescribeChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeChannel for more information on using the DescribeChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeChannelRequest method. +// req, resp := client.DescribeChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribeChannel +func (c *IoTAnalytics) DescribeChannelRequest(input *DescribeChannelInput) (req *request.Request, output *DescribeChannelOutput) { + op := &request.Operation{ + Name: opDescribeChannel, + HTTPMethod: "GET", + HTTPPath: "/channels/{channelName}", + } + + if input == nil { + input = &DescribeChannelInput{} + } + + output = &DescribeChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeChannel API operation for AWS IoT Analytics. +// +// Retrieves information about a channel. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DescribeChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribeChannel +func (c *IoTAnalytics) DescribeChannel(input *DescribeChannelInput) (*DescribeChannelOutput, error) { + req, out := c.DescribeChannelRequest(input) + return out, req.Send() +} + +// DescribeChannelWithContext is the same as DescribeChannel with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DescribeChannelWithContext(ctx aws.Context, input *DescribeChannelInput, opts ...request.Option) (*DescribeChannelOutput, error) { + req, out := c.DescribeChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDataset = "DescribeDataset" + +// DescribeDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDataset for more information on using the DescribeDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDatasetRequest method. +// req, resp := client.DescribeDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribeDataset +func (c *IoTAnalytics) DescribeDatasetRequest(input *DescribeDatasetInput) (req *request.Request, output *DescribeDatasetOutput) { + op := &request.Operation{ + Name: opDescribeDataset, + HTTPMethod: "GET", + HTTPPath: "/datasets/{datasetName}", + } + + if input == nil { + input = &DescribeDatasetInput{} + } + + output = &DescribeDatasetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDataset API operation for AWS IoT Analytics. +// +// Retrieves information about a data set. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DescribeDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribeDataset +func (c *IoTAnalytics) DescribeDataset(input *DescribeDatasetInput) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + return out, req.Send() +} + +// DescribeDatasetWithContext is the same as DescribeDataset with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DescribeDatasetWithContext(ctx aws.Context, input *DescribeDatasetInput, opts ...request.Option) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDatastore = "DescribeDatastore" + +// DescribeDatastoreRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDatastore operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDatastore for more information on using the DescribeDatastore +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDatastoreRequest method. +// req, resp := client.DescribeDatastoreRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribeDatastore +func (c *IoTAnalytics) DescribeDatastoreRequest(input *DescribeDatastoreInput) (req *request.Request, output *DescribeDatastoreOutput) { + op := &request.Operation{ + Name: opDescribeDatastore, + HTTPMethod: "GET", + HTTPPath: "/datastores/{datastoreName}", + } + + if input == nil { + input = &DescribeDatastoreInput{} + } + + output = &DescribeDatastoreOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDatastore API operation for AWS IoT Analytics. +// +// Retrieves information about a data store. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DescribeDatastore for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribeDatastore +func (c *IoTAnalytics) DescribeDatastore(input *DescribeDatastoreInput) (*DescribeDatastoreOutput, error) { + req, out := c.DescribeDatastoreRequest(input) + return out, req.Send() +} + +// DescribeDatastoreWithContext is the same as DescribeDatastore with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDatastore for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DescribeDatastoreWithContext(ctx aws.Context, input *DescribeDatastoreInput, opts ...request.Option) (*DescribeDatastoreOutput, error) { + req, out := c.DescribeDatastoreRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLoggingOptions = "DescribeLoggingOptions" + +// DescribeLoggingOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoggingOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLoggingOptions for more information on using the DescribeLoggingOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLoggingOptionsRequest method. +// req, resp := client.DescribeLoggingOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribeLoggingOptions +func (c *IoTAnalytics) DescribeLoggingOptionsRequest(input *DescribeLoggingOptionsInput) (req *request.Request, output *DescribeLoggingOptionsOutput) { + op := &request.Operation{ + Name: opDescribeLoggingOptions, + HTTPMethod: "GET", + HTTPPath: "/logging", + } + + if input == nil { + input = &DescribeLoggingOptionsInput{} + } + + output = &DescribeLoggingOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLoggingOptions API operation for AWS IoT Analytics. +// +// Retrieves the current settings of the AWS IoT Analytics logging options. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DescribeLoggingOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribeLoggingOptions +func (c *IoTAnalytics) DescribeLoggingOptions(input *DescribeLoggingOptionsInput) (*DescribeLoggingOptionsOutput, error) { + req, out := c.DescribeLoggingOptionsRequest(input) + return out, req.Send() +} + +// DescribeLoggingOptionsWithContext is the same as DescribeLoggingOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoggingOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DescribeLoggingOptionsWithContext(ctx aws.Context, input *DescribeLoggingOptionsInput, opts ...request.Option) (*DescribeLoggingOptionsOutput, error) { + req, out := c.DescribeLoggingOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribePipeline = "DescribePipeline" + +// DescribePipelineRequest generates a "aws/request.Request" representing the +// client's request for the DescribePipeline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePipeline for more information on using the DescribePipeline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePipelineRequest method. +// req, resp := client.DescribePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribePipeline +func (c *IoTAnalytics) DescribePipelineRequest(input *DescribePipelineInput) (req *request.Request, output *DescribePipelineOutput) { + op := &request.Operation{ + Name: opDescribePipeline, + HTTPMethod: "GET", + HTTPPath: "/pipelines/{pipelineName}", + } + + if input == nil { + input = &DescribePipelineInput{} + } + + output = &DescribePipelineOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePipeline API operation for AWS IoT Analytics. +// +// Retrieves information about a pipeline. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation DescribePipeline for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DescribePipeline +func (c *IoTAnalytics) DescribePipeline(input *DescribePipelineInput) (*DescribePipelineOutput, error) { + req, out := c.DescribePipelineRequest(input) + return out, req.Send() +} + +// DescribePipelineWithContext is the same as DescribePipeline with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePipeline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) DescribePipelineWithContext(ctx aws.Context, input *DescribePipelineInput, opts ...request.Option) (*DescribePipelineOutput, error) { + req, out := c.DescribePipelineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDatasetContent = "GetDatasetContent" + +// GetDatasetContentRequest generates a "aws/request.Request" representing the +// client's request for the GetDatasetContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDatasetContent for more information on using the GetDatasetContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDatasetContentRequest method. +// req, resp := client.GetDatasetContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/GetDatasetContent +func (c *IoTAnalytics) GetDatasetContentRequest(input *GetDatasetContentInput) (req *request.Request, output *GetDatasetContentOutput) { + op := &request.Operation{ + Name: opGetDatasetContent, + HTTPMethod: "GET", + HTTPPath: "/datasets/{datasetName}/content", + } + + if input == nil { + input = &GetDatasetContentInput{} + } + + output = &GetDatasetContentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDatasetContent API operation for AWS IoT Analytics. +// +// Retrieves the contents of a data set as pre-signed URIs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation GetDatasetContent for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/GetDatasetContent +func (c *IoTAnalytics) GetDatasetContent(input *GetDatasetContentInput) (*GetDatasetContentOutput, error) { + req, out := c.GetDatasetContentRequest(input) + return out, req.Send() +} + +// GetDatasetContentWithContext is the same as GetDatasetContent with the addition of +// the ability to pass a context and additional request options. +// +// See GetDatasetContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) GetDatasetContentWithContext(ctx aws.Context, input *GetDatasetContentInput, opts ...request.Option) (*GetDatasetContentOutput, error) { + req, out := c.GetDatasetContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListChannels = "ListChannels" + +// ListChannelsRequest generates a "aws/request.Request" representing the +// client's request for the ListChannels operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListChannels for more information on using the ListChannels +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListChannelsRequest method. +// req, resp := client.ListChannelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListChannels +func (c *IoTAnalytics) ListChannelsRequest(input *ListChannelsInput) (req *request.Request, output *ListChannelsOutput) { + op := &request.Operation{ + Name: opListChannels, + HTTPMethod: "GET", + HTTPPath: "/channels", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListChannelsInput{} + } + + output = &ListChannelsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListChannels API operation for AWS IoT Analytics. +// +// Retrieves a list of channels. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation ListChannels for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListChannels +func (c *IoTAnalytics) ListChannels(input *ListChannelsInput) (*ListChannelsOutput, error) { + req, out := c.ListChannelsRequest(input) + return out, req.Send() +} + +// ListChannelsWithContext is the same as ListChannels with the addition of +// the ability to pass a context and additional request options. +// +// See ListChannels for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListChannelsWithContext(ctx aws.Context, input *ListChannelsInput, opts ...request.Option) (*ListChannelsOutput, error) { + req, out := c.ListChannelsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListChannelsPages iterates over the pages of a ListChannels operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListChannels method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListChannels operation. +// pageNum := 0 +// err := client.ListChannelsPages(params, +// func(page *iotanalytics.ListChannelsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoTAnalytics) ListChannelsPages(input *ListChannelsInput, fn func(*ListChannelsOutput, bool) bool) error { + return c.ListChannelsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListChannelsPagesWithContext same as ListChannelsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListChannelsPagesWithContext(ctx aws.Context, input *ListChannelsInput, fn func(*ListChannelsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListChannelsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListChannelsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListChannelsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatasetContents = "ListDatasetContents" + +// ListDatasetContentsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasetContents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatasetContents for more information on using the ListDatasetContents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatasetContentsRequest method. +// req, resp := client.ListDatasetContentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListDatasetContents +func (c *IoTAnalytics) ListDatasetContentsRequest(input *ListDatasetContentsInput) (req *request.Request, output *ListDatasetContentsOutput) { + op := &request.Operation{ + Name: opListDatasetContents, + HTTPMethod: "GET", + HTTPPath: "/datasets/{datasetName}/contents", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatasetContentsInput{} + } + + output = &ListDatasetContentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatasetContents API operation for AWS IoT Analytics. +// +// Lists information about data set contents that have been created. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation ListDatasetContents for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListDatasetContents +func (c *IoTAnalytics) ListDatasetContents(input *ListDatasetContentsInput) (*ListDatasetContentsOutput, error) { + req, out := c.ListDatasetContentsRequest(input) + return out, req.Send() +} + +// ListDatasetContentsWithContext is the same as ListDatasetContents with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatasetContents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListDatasetContentsWithContext(ctx aws.Context, input *ListDatasetContentsInput, opts ...request.Option) (*ListDatasetContentsOutput, error) { + req, out := c.ListDatasetContentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatasetContentsPages iterates over the pages of a ListDatasetContents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatasetContents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatasetContents operation. +// pageNum := 0 +// err := client.ListDatasetContentsPages(params, +// func(page *iotanalytics.ListDatasetContentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoTAnalytics) ListDatasetContentsPages(input *ListDatasetContentsInput, fn func(*ListDatasetContentsOutput, bool) bool) error { + return c.ListDatasetContentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatasetContentsPagesWithContext same as ListDatasetContentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListDatasetContentsPagesWithContext(ctx aws.Context, input *ListDatasetContentsInput, fn func(*ListDatasetContentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatasetContentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatasetContentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatasetContentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatasets = "ListDatasets" + +// ListDatasetsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatasets for more information on using the ListDatasets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatasetsRequest method. +// req, resp := client.ListDatasetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListDatasets +func (c *IoTAnalytics) ListDatasetsRequest(input *ListDatasetsInput) (req *request.Request, output *ListDatasetsOutput) { + op := &request.Operation{ + Name: opListDatasets, + HTTPMethod: "GET", + HTTPPath: "/datasets", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatasetsInput{} + } + + output = &ListDatasetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatasets API operation for AWS IoT Analytics. +// +// Retrieves information about data sets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation ListDatasets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListDatasets +func (c *IoTAnalytics) ListDatasets(input *ListDatasetsInput) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + return out, req.Send() +} + +// ListDatasetsWithContext is the same as ListDatasets with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatasets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListDatasetsWithContext(ctx aws.Context, input *ListDatasetsInput, opts ...request.Option) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatasetsPages iterates over the pages of a ListDatasets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatasets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatasets operation. +// pageNum := 0 +// err := client.ListDatasetsPages(params, +// func(page *iotanalytics.ListDatasetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoTAnalytics) ListDatasetsPages(input *ListDatasetsInput, fn func(*ListDatasetsOutput, bool) bool) error { + return c.ListDatasetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatasetsPagesWithContext same as ListDatasetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListDatasetsPagesWithContext(ctx aws.Context, input *ListDatasetsInput, fn func(*ListDatasetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatasetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatasetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatasetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatastores = "ListDatastores" + +// ListDatastoresRequest generates a "aws/request.Request" representing the +// client's request for the ListDatastores operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatastores for more information on using the ListDatastores +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatastoresRequest method. +// req, resp := client.ListDatastoresRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListDatastores +func (c *IoTAnalytics) ListDatastoresRequest(input *ListDatastoresInput) (req *request.Request, output *ListDatastoresOutput) { + op := &request.Operation{ + Name: opListDatastores, + HTTPMethod: "GET", + HTTPPath: "/datastores", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatastoresInput{} + } + + output = &ListDatastoresOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatastores API operation for AWS IoT Analytics. +// +// Retrieves a list of data stores. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation ListDatastores for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListDatastores +func (c *IoTAnalytics) ListDatastores(input *ListDatastoresInput) (*ListDatastoresOutput, error) { + req, out := c.ListDatastoresRequest(input) + return out, req.Send() +} + +// ListDatastoresWithContext is the same as ListDatastores with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatastores for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListDatastoresWithContext(ctx aws.Context, input *ListDatastoresInput, opts ...request.Option) (*ListDatastoresOutput, error) { + req, out := c.ListDatastoresRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatastoresPages iterates over the pages of a ListDatastores operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatastores method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatastores operation. +// pageNum := 0 +// err := client.ListDatastoresPages(params, +// func(page *iotanalytics.ListDatastoresOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoTAnalytics) ListDatastoresPages(input *ListDatastoresInput, fn func(*ListDatastoresOutput, bool) bool) error { + return c.ListDatastoresPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatastoresPagesWithContext same as ListDatastoresPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListDatastoresPagesWithContext(ctx aws.Context, input *ListDatastoresInput, fn func(*ListDatastoresOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatastoresInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatastoresRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatastoresOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListPipelines = "ListPipelines" + +// ListPipelinesRequest generates a "aws/request.Request" representing the +// client's request for the ListPipelines operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPipelines for more information on using the ListPipelines +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPipelinesRequest method. +// req, resp := client.ListPipelinesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListPipelines +func (c *IoTAnalytics) ListPipelinesRequest(input *ListPipelinesInput) (req *request.Request, output *ListPipelinesOutput) { + op := &request.Operation{ + Name: opListPipelines, + HTTPMethod: "GET", + HTTPPath: "/pipelines", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPipelinesInput{} + } + + output = &ListPipelinesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPipelines API operation for AWS IoT Analytics. +// +// Retrieves a list of pipelines. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation ListPipelines for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListPipelines +func (c *IoTAnalytics) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) { + req, out := c.ListPipelinesRequest(input) + return out, req.Send() +} + +// ListPipelinesWithContext is the same as ListPipelines with the addition of +// the ability to pass a context and additional request options. +// +// See ListPipelines for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListPipelinesWithContext(ctx aws.Context, input *ListPipelinesInput, opts ...request.Option) (*ListPipelinesOutput, error) { + req, out := c.ListPipelinesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPipelinesPages iterates over the pages of a ListPipelines operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPipelines method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPipelines operation. +// pageNum := 0 +// err := client.ListPipelinesPages(params, +// func(page *iotanalytics.ListPipelinesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoTAnalytics) ListPipelinesPages(input *ListPipelinesInput, fn func(*ListPipelinesOutput, bool) bool) error { + return c.ListPipelinesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPipelinesPagesWithContext same as ListPipelinesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListPipelinesPagesWithContext(ctx aws.Context, input *ListPipelinesInput, fn func(*ListPipelinesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPipelinesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPipelinesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPipelinesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListTagsForResource +func (c *IoTAnalytics) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS IoT Analytics. +// +// Lists the tags (metadata) which you have assigned to the resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The command caused an internal limit to be exceeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/ListTagsForResource +func (c *IoTAnalytics) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutLoggingOptions = "PutLoggingOptions" + +// PutLoggingOptionsRequest generates a "aws/request.Request" representing the +// client's request for the PutLoggingOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutLoggingOptions for more information on using the PutLoggingOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutLoggingOptionsRequest method. +// req, resp := client.PutLoggingOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/PutLoggingOptions +func (c *IoTAnalytics) PutLoggingOptionsRequest(input *PutLoggingOptionsInput) (req *request.Request, output *PutLoggingOptionsOutput) { + op := &request.Operation{ + Name: opPutLoggingOptions, + HTTPMethod: "PUT", + HTTPPath: "/logging", + } + + if input == nil { + input = &PutLoggingOptionsInput{} + } + + output = &PutLoggingOptionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutLoggingOptions API operation for AWS IoT Analytics. +// +// Sets or updates the AWS IoT Analytics logging options. +// +// Note that if you update the value of any loggingOptions field, it takes up +// to one minute for the change to take effect. Also, if you change the policy +// attached to the role you specified in the roleArn field (for example, to +// correct an invalid policy) it takes up to 5 minutes for that change to take +// effect. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation PutLoggingOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/PutLoggingOptions +func (c *IoTAnalytics) PutLoggingOptions(input *PutLoggingOptionsInput) (*PutLoggingOptionsOutput, error) { + req, out := c.PutLoggingOptionsRequest(input) + return out, req.Send() +} + +// PutLoggingOptionsWithContext is the same as PutLoggingOptions with the addition of +// the ability to pass a context and additional request options. +// +// See PutLoggingOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) PutLoggingOptionsWithContext(ctx aws.Context, input *PutLoggingOptionsInput, opts ...request.Option) (*PutLoggingOptionsOutput, error) { + req, out := c.PutLoggingOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRunPipelineActivity = "RunPipelineActivity" + +// RunPipelineActivityRequest generates a "aws/request.Request" representing the +// client's request for the RunPipelineActivity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RunPipelineActivity for more information on using the RunPipelineActivity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RunPipelineActivityRequest method. +// req, resp := client.RunPipelineActivityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/RunPipelineActivity +func (c *IoTAnalytics) RunPipelineActivityRequest(input *RunPipelineActivityInput) (req *request.Request, output *RunPipelineActivityOutput) { + op := &request.Operation{ + Name: opRunPipelineActivity, + HTTPMethod: "POST", + HTTPPath: "/pipelineactivities/run", + } + + if input == nil { + input = &RunPipelineActivityInput{} + } + + output = &RunPipelineActivityOutput{} + req = c.newRequest(op, input, output) + return +} + +// RunPipelineActivity API operation for AWS IoT Analytics. +// +// Simulates the results of running a pipeline activity on a message payload. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation RunPipelineActivity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/RunPipelineActivity +func (c *IoTAnalytics) RunPipelineActivity(input *RunPipelineActivityInput) (*RunPipelineActivityOutput, error) { + req, out := c.RunPipelineActivityRequest(input) + return out, req.Send() +} + +// RunPipelineActivityWithContext is the same as RunPipelineActivity with the addition of +// the ability to pass a context and additional request options. +// +// See RunPipelineActivity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) RunPipelineActivityWithContext(ctx aws.Context, input *RunPipelineActivityInput, opts ...request.Option) (*RunPipelineActivityOutput, error) { + req, out := c.RunPipelineActivityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSampleChannelData = "SampleChannelData" + +// SampleChannelDataRequest generates a "aws/request.Request" representing the +// client's request for the SampleChannelData operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SampleChannelData for more information on using the SampleChannelData +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SampleChannelDataRequest method. +// req, resp := client.SampleChannelDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/SampleChannelData +func (c *IoTAnalytics) SampleChannelDataRequest(input *SampleChannelDataInput) (req *request.Request, output *SampleChannelDataOutput) { + op := &request.Operation{ + Name: opSampleChannelData, + HTTPMethod: "GET", + HTTPPath: "/channels/{channelName}/sample", + } + + if input == nil { + input = &SampleChannelDataInput{} + } + + output = &SampleChannelDataOutput{} + req = c.newRequest(op, input, output) + return +} + +// SampleChannelData API operation for AWS IoT Analytics. +// +// Retrieves a sample of messages from the specified channel ingested during +// the specified timeframe. Up to 10 messages can be retrieved. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation SampleChannelData for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/SampleChannelData +func (c *IoTAnalytics) SampleChannelData(input *SampleChannelDataInput) (*SampleChannelDataOutput, error) { + req, out := c.SampleChannelDataRequest(input) + return out, req.Send() +} + +// SampleChannelDataWithContext is the same as SampleChannelData with the addition of +// the ability to pass a context and additional request options. +// +// See SampleChannelData for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) SampleChannelDataWithContext(ctx aws.Context, input *SampleChannelDataInput, opts ...request.Option) (*SampleChannelDataOutput, error) { + req, out := c.SampleChannelDataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartPipelineReprocessing = "StartPipelineReprocessing" + +// StartPipelineReprocessingRequest generates a "aws/request.Request" representing the +// client's request for the StartPipelineReprocessing operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartPipelineReprocessing for more information on using the StartPipelineReprocessing +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartPipelineReprocessingRequest method. +// req, resp := client.StartPipelineReprocessingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/StartPipelineReprocessing +func (c *IoTAnalytics) StartPipelineReprocessingRequest(input *StartPipelineReprocessingInput) (req *request.Request, output *StartPipelineReprocessingOutput) { + op := &request.Operation{ + Name: opStartPipelineReprocessing, + HTTPMethod: "POST", + HTTPPath: "/pipelines/{pipelineName}/reprocessing", + } + + if input == nil { + input = &StartPipelineReprocessingInput{} + } + + output = &StartPipelineReprocessingOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartPipelineReprocessing API operation for AWS IoT Analytics. +// +// Starts the reprocessing of raw message data through the pipeline. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation StartPipelineReprocessing for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// A resource with the same name already exists. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/StartPipelineReprocessing +func (c *IoTAnalytics) StartPipelineReprocessing(input *StartPipelineReprocessingInput) (*StartPipelineReprocessingOutput, error) { + req, out := c.StartPipelineReprocessingRequest(input) + return out, req.Send() +} + +// StartPipelineReprocessingWithContext is the same as StartPipelineReprocessing with the addition of +// the ability to pass a context and additional request options. +// +// See StartPipelineReprocessing for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) StartPipelineReprocessingWithContext(ctx aws.Context, input *StartPipelineReprocessingInput, opts ...request.Option) (*StartPipelineReprocessingOutput, error) { + req, out := c.StartPipelineReprocessingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/TagResource +func (c *IoTAnalytics) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS IoT Analytics. +// +// Adds to or modifies the tags of the given resource. Tags are metadata which +// can be used to manage a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The command caused an internal limit to be exceeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/TagResource +func (c *IoTAnalytics) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UntagResource +func (c *IoTAnalytics) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS IoT Analytics. +// +// Removes the given tags (metadata) from the resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The command caused an internal limit to be exceeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UntagResource +func (c *IoTAnalytics) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateChannel = "UpdateChannel" + +// UpdateChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateChannel for more information on using the UpdateChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateChannelRequest method. +// req, resp := client.UpdateChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UpdateChannel +func (c *IoTAnalytics) UpdateChannelRequest(input *UpdateChannelInput) (req *request.Request, output *UpdateChannelOutput) { + op := &request.Operation{ + Name: opUpdateChannel, + HTTPMethod: "PUT", + HTTPPath: "/channels/{channelName}", + } + + if input == nil { + input = &UpdateChannelInput{} + } + + output = &UpdateChannelOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateChannel API operation for AWS IoT Analytics. +// +// Updates the settings of a channel. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation UpdateChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UpdateChannel +func (c *IoTAnalytics) UpdateChannel(input *UpdateChannelInput) (*UpdateChannelOutput, error) { + req, out := c.UpdateChannelRequest(input) + return out, req.Send() +} + +// UpdateChannelWithContext is the same as UpdateChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) UpdateChannelWithContext(ctx aws.Context, input *UpdateChannelInput, opts ...request.Option) (*UpdateChannelOutput, error) { + req, out := c.UpdateChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDataset = "UpdateDataset" + +// UpdateDatasetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDataset for more information on using the UpdateDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDatasetRequest method. +// req, resp := client.UpdateDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UpdateDataset +func (c *IoTAnalytics) UpdateDatasetRequest(input *UpdateDatasetInput) (req *request.Request, output *UpdateDatasetOutput) { + op := &request.Operation{ + Name: opUpdateDataset, + HTTPMethod: "PUT", + HTTPPath: "/datasets/{datasetName}", + } + + if input == nil { + input = &UpdateDatasetInput{} + } + + output = &UpdateDatasetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateDataset API operation for AWS IoT Analytics. +// +// Updates the settings of a data set. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation UpdateDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UpdateDataset +func (c *IoTAnalytics) UpdateDataset(input *UpdateDatasetInput) (*UpdateDatasetOutput, error) { + req, out := c.UpdateDatasetRequest(input) + return out, req.Send() +} + +// UpdateDatasetWithContext is the same as UpdateDataset with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) UpdateDatasetWithContext(ctx aws.Context, input *UpdateDatasetInput, opts ...request.Option) (*UpdateDatasetOutput, error) { + req, out := c.UpdateDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDatastore = "UpdateDatastore" + +// UpdateDatastoreRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDatastore operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDatastore for more information on using the UpdateDatastore +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDatastoreRequest method. +// req, resp := client.UpdateDatastoreRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UpdateDatastore +func (c *IoTAnalytics) UpdateDatastoreRequest(input *UpdateDatastoreInput) (req *request.Request, output *UpdateDatastoreOutput) { + op := &request.Operation{ + Name: opUpdateDatastore, + HTTPMethod: "PUT", + HTTPPath: "/datastores/{datastoreName}", + } + + if input == nil { + input = &UpdateDatastoreInput{} + } + + output = &UpdateDatastoreOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateDatastore API operation for AWS IoT Analytics. +// +// Updates the settings of a data store. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation UpdateDatastore for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UpdateDatastore +func (c *IoTAnalytics) UpdateDatastore(input *UpdateDatastoreInput) (*UpdateDatastoreOutput, error) { + req, out := c.UpdateDatastoreRequest(input) + return out, req.Send() +} + +// UpdateDatastoreWithContext is the same as UpdateDatastore with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDatastore for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) UpdateDatastoreWithContext(ctx aws.Context, input *UpdateDatastoreInput, opts ...request.Option) (*UpdateDatastoreOutput, error) { + req, out := c.UpdateDatastoreRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePipeline = "UpdatePipeline" + +// UpdatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePipeline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePipeline for more information on using the UpdatePipeline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePipelineRequest method. +// req, resp := client.UpdatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UpdatePipeline +func (c *IoTAnalytics) UpdatePipelineRequest(input *UpdatePipelineInput) (req *request.Request, output *UpdatePipelineOutput) { + op := &request.Operation{ + Name: opUpdatePipeline, + HTTPMethod: "PUT", + HTTPPath: "/pipelines/{pipelineName}", + } + + if input == nil { + input = &UpdatePipelineInput{} + } + + output = &UpdatePipelineOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdatePipeline API operation for AWS IoT Analytics. +// +// Updates the settings of a pipeline. You must specify both a channel and a +// datastore activity and, optionally, as many as 23 additional activities in +// the pipelineActivities array. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Analytics's +// API operation UpdatePipeline for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource with the specified name could not be found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// There was an internal failure. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is temporarily unavailable. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The command caused an internal limit to be exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/UpdatePipeline +func (c *IoTAnalytics) UpdatePipeline(input *UpdatePipelineInput) (*UpdatePipelineOutput, error) { + req, out := c.UpdatePipelineRequest(input) + return out, req.Send() +} + +// UpdatePipelineWithContext is the same as UpdatePipeline with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePipeline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTAnalytics) UpdatePipelineWithContext(ctx aws.Context, input *UpdatePipelineInput, opts ...request.Option) (*UpdatePipelineOutput, error) { + req, out := c.UpdatePipelineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// An activity that adds other attributes based on existing attributes in the +// message. +type AddAttributesActivity struct { + _ struct{} `type:"structure"` + + // A list of 1-50 "AttributeNameMapping" objects that map an existing attribute + // to a new attribute. + // + // The existing attributes remain in the message, so if you want to remove the + // originals, use "RemoveAttributeActivity". + // + // Attributes is a required field + Attributes map[string]*string `locationName:"attributes" min:"1" type:"map" required:"true"` + + // The name of the 'addAttributes' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The next activity in the pipeline. + Next *string `locationName:"next" min:"1" type:"string"` +} + +// String returns the string representation +func (s AddAttributesActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddAttributesActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddAttributesActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddAttributesActivity"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Attributes != nil && len(s.Attributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Attributes", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Next != nil && len(*s.Next) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Next", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *AddAttributesActivity) SetAttributes(v map[string]*string) *AddAttributesActivity { + s.Attributes = v + return s +} + +// SetName sets the Name field's value. +func (s *AddAttributesActivity) SetName(v string) *AddAttributesActivity { + s.Name = &v + return s +} + +// SetNext sets the Next field's value. +func (s *AddAttributesActivity) SetNext(v string) *AddAttributesActivity { + s.Next = &v + return s +} + +// Contains informations about errors. +type BatchPutMessageErrorEntry struct { + _ struct{} `type:"structure"` + + // The code associated with the error. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // The message associated with the error. + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // The ID of the message that caused the error. (See the value corresponding + // to the "messageId" key in the message object.) + MessageId *string `locationName:"messageId" min:"1" type:"string"` +} + +// String returns the string representation +func (s BatchPutMessageErrorEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPutMessageErrorEntry) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *BatchPutMessageErrorEntry) SetErrorCode(v string) *BatchPutMessageErrorEntry { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *BatchPutMessageErrorEntry) SetErrorMessage(v string) *BatchPutMessageErrorEntry { + s.ErrorMessage = &v + return s +} + +// SetMessageId sets the MessageId field's value. +func (s *BatchPutMessageErrorEntry) SetMessageId(v string) *BatchPutMessageErrorEntry { + s.MessageId = &v + return s +} + +type BatchPutMessageInput struct { + _ struct{} `type:"structure"` + + // The name of the channel where the messages are sent. + // + // ChannelName is a required field + ChannelName *string `locationName:"channelName" min:"1" type:"string" required:"true"` + + // The list of messages to be sent. Each message has format: '{ "messageId": + // "string", "payload": "string"}'. + // + // Note that the field names of message payloads (data) that you send to AWS + // IoT Analytics: + // + // * Must contain only alphanumeric characters and undescores (_); no other + // special characters are allowed. + // + // * Must begin with an alphabetic character or single underscore (_). + // + // * Cannot contain hyphens (-). + // + // * In regular expression terms: "^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$". + // + // * Cannot be greater than 255 characters. + // + // * Are case-insensitive. (Fields named "foo" and "FOO" in the same payload + // are considered duplicates.) + // + // For example, {"temp_01": 29} or {"_temp_01": 29} are valid, but {"temp-01": + // 29}, {"01_temp": 29} or {"__temp_01": 29} are invalid in message payloads. + // + // Messages is a required field + Messages []*Message `locationName:"messages" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchPutMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPutMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchPutMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchPutMessageInput"} + if s.ChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelName")) + } + if s.ChannelName != nil && len(*s.ChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) + } + if s.Messages == nil { + invalidParams.Add(request.NewErrParamRequired("Messages")) + } + if s.Messages != nil { + for i, v := range s.Messages { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Messages", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelName sets the ChannelName field's value. +func (s *BatchPutMessageInput) SetChannelName(v string) *BatchPutMessageInput { + s.ChannelName = &v + return s +} + +// SetMessages sets the Messages field's value. +func (s *BatchPutMessageInput) SetMessages(v []*Message) *BatchPutMessageInput { + s.Messages = v + return s +} + +type BatchPutMessageOutput struct { + _ struct{} `type:"structure"` + + // A list of any errors encountered when sending the messages to the channel. + BatchPutMessageErrorEntries []*BatchPutMessageErrorEntry `locationName:"batchPutMessageErrorEntries" type:"list"` +} + +// String returns the string representation +func (s BatchPutMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPutMessageOutput) GoString() string { + return s.String() +} + +// SetBatchPutMessageErrorEntries sets the BatchPutMessageErrorEntries field's value. +func (s *BatchPutMessageOutput) SetBatchPutMessageErrorEntries(v []*BatchPutMessageErrorEntry) *BatchPutMessageOutput { + s.BatchPutMessageErrorEntries = v + return s +} + +type CancelPipelineReprocessingInput struct { + _ struct{} `type:"structure"` + + // The name of pipeline for which data reprocessing is canceled. + // + // PipelineName is a required field + PipelineName *string `location:"uri" locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // The ID of the reprocessing task (returned by "StartPipelineReprocessing"). + // + // ReprocessingId is a required field + ReprocessingId *string `location:"uri" locationName:"reprocessingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelPipelineReprocessingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelPipelineReprocessingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelPipelineReprocessingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelPipelineReprocessingInput"} + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + if s.ReprocessingId == nil { + invalidParams.Add(request.NewErrParamRequired("ReprocessingId")) + } + if s.ReprocessingId != nil && len(*s.ReprocessingId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReprocessingId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPipelineName sets the PipelineName field's value. +func (s *CancelPipelineReprocessingInput) SetPipelineName(v string) *CancelPipelineReprocessingInput { + s.PipelineName = &v + return s +} + +// SetReprocessingId sets the ReprocessingId field's value. +func (s *CancelPipelineReprocessingInput) SetReprocessingId(v string) *CancelPipelineReprocessingInput { + s.ReprocessingId = &v + return s +} + +type CancelPipelineReprocessingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelPipelineReprocessingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelPipelineReprocessingOutput) GoString() string { + return s.String() +} + +// A collection of data from an MQTT topic. Channels archive the raw, unprocessed +// messages before publishing the data to a pipeline. +type Channel struct { + _ struct{} `type:"structure"` + + // The ARN of the channel. + Arn *string `locationName:"arn" type:"string"` + + // When the channel was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // When the channel was last updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The name of the channel. + Name *string `locationName:"name" min:"1" type:"string"` + + // How long, in days, message data is kept for the channel. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` + + // The status of the channel. + Status *string `locationName:"status" type:"string" enum:"ChannelStatus"` + + // Where channel data is stored. You may choose one of "serviceManagedS3" or + // "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". + // This cannot be changed after creation of the channel. + Storage *ChannelStorage `locationName:"storage" type:"structure"` +} + +// String returns the string representation +func (s Channel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Channel) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Channel) SetArn(v string) *Channel { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Channel) SetCreationTime(v time.Time) *Channel { + s.CreationTime = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *Channel) SetLastUpdateTime(v time.Time) *Channel { + s.LastUpdateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Channel) SetName(v string) *Channel { + s.Name = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *Channel) SetRetentionPeriod(v *RetentionPeriod) *Channel { + s.RetentionPeriod = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Channel) SetStatus(v string) *Channel { + s.Status = &v + return s +} + +// SetStorage sets the Storage field's value. +func (s *Channel) SetStorage(v *ChannelStorage) *Channel { + s.Storage = v + return s +} + +// The activity that determines the source of the messages to be processed. +type ChannelActivity struct { + _ struct{} `type:"structure"` + + // The name of the channel from which the messages are processed. + // + // ChannelName is a required field + ChannelName *string `locationName:"channelName" min:"1" type:"string" required:"true"` + + // The name of the 'channel' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The next activity in the pipeline. + Next *string `locationName:"next" min:"1" type:"string"` +} + +// String returns the string representation +func (s ChannelActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChannelActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChannelActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChannelActivity"} + if s.ChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelName")) + } + if s.ChannelName != nil && len(*s.ChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Next != nil && len(*s.Next) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Next", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelName sets the ChannelName field's value. +func (s *ChannelActivity) SetChannelName(v string) *ChannelActivity { + s.ChannelName = &v + return s +} + +// SetName sets the Name field's value. +func (s *ChannelActivity) SetName(v string) *ChannelActivity { + s.Name = &v + return s +} + +// SetNext sets the Next field's value. +func (s *ChannelActivity) SetNext(v string) *ChannelActivity { + s.Next = &v + return s +} + +// Statistics information about the channel. +type ChannelStatistics struct { + _ struct{} `type:"structure"` + + // The estimated size of the channel. + Size *EstimatedResourceSize `locationName:"size" type:"structure"` +} + +// String returns the string representation +func (s ChannelStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChannelStatistics) GoString() string { + return s.String() +} + +// SetSize sets the Size field's value. +func (s *ChannelStatistics) SetSize(v *EstimatedResourceSize) *ChannelStatistics { + s.Size = v + return s +} + +// Where channel data is stored. You may choose one of "serviceManagedS3" or +// "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". +// This cannot be changed after creation of the channel. +type ChannelStorage struct { + _ struct{} `type:"structure"` + + // Use this to store channel data in an S3 bucket that you manage. If customer + // managed storage is selected, the "retentionPeriod" parameter is ignored. + // The choice of service-managed or customer-managed S3 storage cannot be changed + // after creation of the channel. + CustomerManagedS3 *CustomerManagedChannelS3Storage `locationName:"customerManagedS3" type:"structure"` + + // Use this to store channel data in an S3 bucket managed by the AWS IoT Analytics + // service. The choice of service-managed or customer-managed S3 storage cannot + // be changed after creation of the channel. + ServiceManagedS3 *ServiceManagedChannelS3Storage `locationName:"serviceManagedS3" type:"structure"` +} + +// String returns the string representation +func (s ChannelStorage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChannelStorage) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChannelStorage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChannelStorage"} + if s.CustomerManagedS3 != nil { + if err := s.CustomerManagedS3.Validate(); err != nil { + invalidParams.AddNested("CustomerManagedS3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomerManagedS3 sets the CustomerManagedS3 field's value. +func (s *ChannelStorage) SetCustomerManagedS3(v *CustomerManagedChannelS3Storage) *ChannelStorage { + s.CustomerManagedS3 = v + return s +} + +// SetServiceManagedS3 sets the ServiceManagedS3 field's value. +func (s *ChannelStorage) SetServiceManagedS3(v *ServiceManagedChannelS3Storage) *ChannelStorage { + s.ServiceManagedS3 = v + return s +} + +// Where channel data is stored. +type ChannelStorageSummary struct { + _ struct{} `type:"structure"` + + // Used to store channel data in an S3 bucket that you manage. + CustomerManagedS3 *CustomerManagedChannelS3StorageSummary `locationName:"customerManagedS3" type:"structure"` + + // Used to store channel data in an S3 bucket managed by the AWS IoT Analytics + // service. + ServiceManagedS3 *ServiceManagedChannelS3StorageSummary `locationName:"serviceManagedS3" type:"structure"` +} + +// String returns the string representation +func (s ChannelStorageSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChannelStorageSummary) GoString() string { + return s.String() +} + +// SetCustomerManagedS3 sets the CustomerManagedS3 field's value. +func (s *ChannelStorageSummary) SetCustomerManagedS3(v *CustomerManagedChannelS3StorageSummary) *ChannelStorageSummary { + s.CustomerManagedS3 = v + return s +} + +// SetServiceManagedS3 sets the ServiceManagedS3 field's value. +func (s *ChannelStorageSummary) SetServiceManagedS3(v *ServiceManagedChannelS3StorageSummary) *ChannelStorageSummary { + s.ServiceManagedS3 = v + return s +} + +// A summary of information about a channel. +type ChannelSummary struct { + _ struct{} `type:"structure"` + + // The name of the channel. + ChannelName *string `locationName:"channelName" min:"1" type:"string"` + + // Where channel data is stored. + ChannelStorage *ChannelStorageSummary `locationName:"channelStorage" type:"structure"` + + // When the channel was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The last time the channel was updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The status of the channel. + Status *string `locationName:"status" type:"string" enum:"ChannelStatus"` +} + +// String returns the string representation +func (s ChannelSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChannelSummary) GoString() string { + return s.String() +} + +// SetChannelName sets the ChannelName field's value. +func (s *ChannelSummary) SetChannelName(v string) *ChannelSummary { + s.ChannelName = &v + return s +} + +// SetChannelStorage sets the ChannelStorage field's value. +func (s *ChannelSummary) SetChannelStorage(v *ChannelStorageSummary) *ChannelSummary { + s.ChannelStorage = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ChannelSummary) SetCreationTime(v time.Time) *ChannelSummary { + s.CreationTime = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *ChannelSummary) SetLastUpdateTime(v time.Time) *ChannelSummary { + s.LastUpdateTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ChannelSummary) SetStatus(v string) *ChannelSummary { + s.Status = &v + return s +} + +// Information needed to run the "containerAction" to produce data set contents. +type ContainerDatasetAction struct { + _ struct{} `type:"structure"` + + // The ARN of the role which gives permission to the system to access needed + // resources in order to run the "containerAction". This includes, at minimum, + // permission to retrieve the data set contents which are the input to the containerized + // application. + // + // ExecutionRoleArn is a required field + ExecutionRoleArn *string `locationName:"executionRoleArn" min:"20" type:"string" required:"true"` + + // The ARN of the Docker container stored in your account. The Docker container + // contains an application and needed support libraries and is used to generate + // data set contents. + // + // Image is a required field + Image *string `locationName:"image" type:"string" required:"true"` + + // Configuration of the resource which executes the "containerAction". + // + // ResourceConfiguration is a required field + ResourceConfiguration *ResourceConfiguration `locationName:"resourceConfiguration" type:"structure" required:"true"` + + // The values of variables used within the context of the execution of the containerized + // application (basically, parameters passed to the application). Each variable + // must have a name and a value given by one of "stringValue", "datasetContentVersionValue", + // or "outputFileUriValue". + Variables []*Variable `locationName:"variables" type:"list"` +} + +// String returns the string representation +func (s ContainerDatasetAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerDatasetAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContainerDatasetAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContainerDatasetAction"} + if s.ExecutionRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("ExecutionRoleArn")) + } + if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRoleArn", 20)) + } + if s.Image == nil { + invalidParams.Add(request.NewErrParamRequired("Image")) + } + if s.ResourceConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceConfiguration")) + } + if s.ResourceConfiguration != nil { + if err := s.ResourceConfiguration.Validate(); err != nil { + invalidParams.AddNested("ResourceConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Variables != nil { + for i, v := range s.Variables { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Variables", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *ContainerDatasetAction) SetExecutionRoleArn(v string) *ContainerDatasetAction { + s.ExecutionRoleArn = &v + return s +} + +// SetImage sets the Image field's value. +func (s *ContainerDatasetAction) SetImage(v string) *ContainerDatasetAction { + s.Image = &v + return s +} + +// SetResourceConfiguration sets the ResourceConfiguration field's value. +func (s *ContainerDatasetAction) SetResourceConfiguration(v *ResourceConfiguration) *ContainerDatasetAction { + s.ResourceConfiguration = v + return s +} + +// SetVariables sets the Variables field's value. +func (s *ContainerDatasetAction) SetVariables(v []*Variable) *ContainerDatasetAction { + s.Variables = v + return s +} + +type CreateChannelInput struct { + _ struct{} `type:"structure"` + + // The name of the channel. + // + // ChannelName is a required field + ChannelName *string `locationName:"channelName" min:"1" type:"string" required:"true"` + + // Where channel data is stored. You may choose one of "serviceManagedS3" or + // "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". + // This cannot be changed after creation of the channel. + ChannelStorage *ChannelStorage `locationName:"channelStorage" type:"structure"` + + // How long, in days, message data is kept for the channel. When "customerManagedS3" + // storage is selected, this parameter is ignored. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` + + // Metadata which can be used to manage the channel. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateChannelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateChannelInput"} + if s.ChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelName")) + } + if s.ChannelName != nil && len(*s.ChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.ChannelStorage != nil { + if err := s.ChannelStorage.Validate(); err != nil { + invalidParams.AddNested("ChannelStorage", err.(request.ErrInvalidParams)) + } + } + if s.RetentionPeriod != nil { + if err := s.RetentionPeriod.Validate(); err != nil { + invalidParams.AddNested("RetentionPeriod", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelName sets the ChannelName field's value. +func (s *CreateChannelInput) SetChannelName(v string) *CreateChannelInput { + s.ChannelName = &v + return s +} + +// SetChannelStorage sets the ChannelStorage field's value. +func (s *CreateChannelInput) SetChannelStorage(v *ChannelStorage) *CreateChannelInput { + s.ChannelStorage = v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CreateChannelInput) SetRetentionPeriod(v *RetentionPeriod) *CreateChannelInput { + s.RetentionPeriod = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateChannelInput) SetTags(v []*Tag) *CreateChannelInput { + s.Tags = v + return s +} + +type CreateChannelOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the channel. + ChannelArn *string `locationName:"channelArn" type:"string"` + + // The name of the channel. + ChannelName *string `locationName:"channelName" min:"1" type:"string"` + + // How long, in days, message data is kept for the channel. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` +} + +// String returns the string representation +func (s CreateChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateChannelOutput) GoString() string { + return s.String() +} + +// SetChannelArn sets the ChannelArn field's value. +func (s *CreateChannelOutput) SetChannelArn(v string) *CreateChannelOutput { + s.ChannelArn = &v + return s +} + +// SetChannelName sets the ChannelName field's value. +func (s *CreateChannelOutput) SetChannelName(v string) *CreateChannelOutput { + s.ChannelName = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CreateChannelOutput) SetRetentionPeriod(v *RetentionPeriod) *CreateChannelOutput { + s.RetentionPeriod = v + return s +} + +type CreateDatasetContentInput struct { + _ struct{} `type:"structure"` + + // The name of the data set. + // + // DatasetName is a required field + DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDatasetContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetContentInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *CreateDatasetContentInput) SetDatasetName(v string) *CreateDatasetContentInput { + s.DatasetName = &v + return s +} + +type CreateDatasetContentOutput struct { + _ struct{} `type:"structure"` + + // The version ID of the data set contents which are being created. + VersionId *string `locationName:"versionId" min:"7" type:"string"` +} + +// String returns the string representation +func (s CreateDatasetContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetContentOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *CreateDatasetContentOutput) SetVersionId(v string) *CreateDatasetContentOutput { + s.VersionId = &v + return s +} + +type CreateDatasetInput struct { + _ struct{} `type:"structure"` + + // A list of actions that create the data set contents. + // + // Actions is a required field + Actions []*DatasetAction `locationName:"actions" min:"1" type:"list" required:"true"` + + // When data set contents are created they are delivered to destinations specified + // here. + ContentDeliveryRules []*DatasetContentDeliveryRule `locationName:"contentDeliveryRules" type:"list"` + + // The name of the data set. + // + // DatasetName is a required field + DatasetName *string `locationName:"datasetName" min:"1" type:"string" required:"true"` + + // [Optional] How long, in days, versions of data set contents are kept for + // the data set. If not specified or set to null, versions of data set contents + // are retained for at most 90 days. The number of versions of data set contents + // retained is determined by the versioningConfiguration parameter. (For more + // information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` + + // Metadata which can be used to manage the data set. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` + + // A list of triggers. A trigger causes data set contents to be populated at + // a specified time interval or when another data set's contents are created. + // The list of triggers can be empty or contain up to five DataSetTrigger objects. + Triggers []*DatasetTrigger `locationName:"triggers" type:"list"` + + // [Optional] How many versions of data set contents are kept. If not specified + // or set to null, only the latest version plus the latest succeeded version + // (if they are different) are kept for the time period specified by the "retentionPeriod" + // parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + VersioningConfiguration *VersioningConfiguration `locationName:"versioningConfiguration" type:"structure"` +} + +// String returns the string representation +func (s CreateDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetInput"} + if s.Actions == nil { + invalidParams.Add(request.NewErrParamRequired("Actions")) + } + if s.Actions != nil && len(s.Actions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Actions", 1)) + } + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ContentDeliveryRules != nil { + for i, v := range s.ContentDeliveryRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContentDeliveryRules", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RetentionPeriod != nil { + if err := s.RetentionPeriod.Validate(); err != nil { + invalidParams.AddNested("RetentionPeriod", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Triggers != nil { + for i, v := range s.Triggers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Triggers", i), err.(request.ErrInvalidParams)) + } + } + } + if s.VersioningConfiguration != nil { + if err := s.VersioningConfiguration.Validate(); err != nil { + invalidParams.AddNested("VersioningConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActions sets the Actions field's value. +func (s *CreateDatasetInput) SetActions(v []*DatasetAction) *CreateDatasetInput { + s.Actions = v + return s +} + +// SetContentDeliveryRules sets the ContentDeliveryRules field's value. +func (s *CreateDatasetInput) SetContentDeliveryRules(v []*DatasetContentDeliveryRule) *CreateDatasetInput { + s.ContentDeliveryRules = v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *CreateDatasetInput) SetDatasetName(v string) *CreateDatasetInput { + s.DatasetName = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CreateDatasetInput) SetRetentionPeriod(v *RetentionPeriod) *CreateDatasetInput { + s.RetentionPeriod = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDatasetInput) SetTags(v []*Tag) *CreateDatasetInput { + s.Tags = v + return s +} + +// SetTriggers sets the Triggers field's value. +func (s *CreateDatasetInput) SetTriggers(v []*DatasetTrigger) *CreateDatasetInput { + s.Triggers = v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *CreateDatasetInput) SetVersioningConfiguration(v *VersioningConfiguration) *CreateDatasetInput { + s.VersioningConfiguration = v + return s +} + +type CreateDatasetOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the data set. + DatasetArn *string `locationName:"datasetArn" type:"string"` + + // The name of the data set. + DatasetName *string `locationName:"datasetName" min:"1" type:"string"` + + // How long, in days, data set contents are kept for the data set. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` +} + +// String returns the string representation +func (s CreateDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetOutput) GoString() string { + return s.String() +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *CreateDatasetOutput) SetDatasetArn(v string) *CreateDatasetOutput { + s.DatasetArn = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *CreateDatasetOutput) SetDatasetName(v string) *CreateDatasetOutput { + s.DatasetName = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CreateDatasetOutput) SetRetentionPeriod(v *RetentionPeriod) *CreateDatasetOutput { + s.RetentionPeriod = v + return s +} + +type CreateDatastoreInput struct { + _ struct{} `type:"structure"` + + // The name of the data store. + // + // DatastoreName is a required field + DatastoreName *string `locationName:"datastoreName" min:"1" type:"string" required:"true"` + + // Where data store data is stored. You may choose one of "serviceManagedS3" + // or "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". + // This cannot be changed after the data store is created. + DatastoreStorage *DatastoreStorage `locationName:"datastoreStorage" type:"structure"` + + // How long, in days, message data is kept for the data store. When "customerManagedS3" + // storage is selected, this parameter is ignored. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` + + // Metadata which can be used to manage the data store. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateDatastoreInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatastoreInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatastoreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatastoreInput"} + if s.DatastoreName == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreName")) + } + if s.DatastoreName != nil && len(*s.DatastoreName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreName", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.DatastoreStorage != nil { + if err := s.DatastoreStorage.Validate(); err != nil { + invalidParams.AddNested("DatastoreStorage", err.(request.ErrInvalidParams)) + } + } + if s.RetentionPeriod != nil { + if err := s.RetentionPeriod.Validate(); err != nil { + invalidParams.AddNested("RetentionPeriod", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatastoreName sets the DatastoreName field's value. +func (s *CreateDatastoreInput) SetDatastoreName(v string) *CreateDatastoreInput { + s.DatastoreName = &v + return s +} + +// SetDatastoreStorage sets the DatastoreStorage field's value. +func (s *CreateDatastoreInput) SetDatastoreStorage(v *DatastoreStorage) *CreateDatastoreInput { + s.DatastoreStorage = v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CreateDatastoreInput) SetRetentionPeriod(v *RetentionPeriod) *CreateDatastoreInput { + s.RetentionPeriod = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDatastoreInput) SetTags(v []*Tag) *CreateDatastoreInput { + s.Tags = v + return s +} + +type CreateDatastoreOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the data store. + DatastoreArn *string `locationName:"datastoreArn" type:"string"` + + // The name of the data store. + DatastoreName *string `locationName:"datastoreName" min:"1" type:"string"` + + // How long, in days, message data is kept for the data store. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` +} + +// String returns the string representation +func (s CreateDatastoreOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatastoreOutput) GoString() string { + return s.String() +} + +// SetDatastoreArn sets the DatastoreArn field's value. +func (s *CreateDatastoreOutput) SetDatastoreArn(v string) *CreateDatastoreOutput { + s.DatastoreArn = &v + return s +} + +// SetDatastoreName sets the DatastoreName field's value. +func (s *CreateDatastoreOutput) SetDatastoreName(v string) *CreateDatastoreOutput { + s.DatastoreName = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *CreateDatastoreOutput) SetRetentionPeriod(v *RetentionPeriod) *CreateDatastoreOutput { + s.RetentionPeriod = v + return s +} + +type CreatePipelineInput struct { + _ struct{} `type:"structure"` + + // A list of "PipelineActivity" objects. Activities perform transformations + // on your messages, such as removing, renaming or adding message attributes; + // filtering messages based on attribute values; invoking your Lambda functions + // on messages for advanced processing; or performing mathematical transformations + // to normalize device data. + // + // The list can be 2-25 PipelineActivity objects and must contain both a channel + // and a datastore activity. Each entry in the list must contain only one activity, + // for example: + // + // pipelineActivities = [ { "channel": { ... } }, { "lambda": { ... } }, ... + // ] + // + // PipelineActivities is a required field + PipelineActivities []*PipelineActivity `locationName:"pipelineActivities" min:"1" type:"list" required:"true"` + + // The name of the pipeline. + // + // PipelineName is a required field + PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // Metadata which can be used to manage the pipeline. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` +} + +// String returns the string representation +func (s CreatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePipelineInput"} + if s.PipelineActivities == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineActivities")) + } + if s.PipelineActivities != nil && len(s.PipelineActivities) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineActivities", 1)) + } + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.PipelineActivities != nil { + for i, v := range s.PipelineActivities { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PipelineActivities", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPipelineActivities sets the PipelineActivities field's value. +func (s *CreatePipelineInput) SetPipelineActivities(v []*PipelineActivity) *CreatePipelineInput { + s.PipelineActivities = v + return s +} + +// SetPipelineName sets the PipelineName field's value. +func (s *CreatePipelineInput) SetPipelineName(v string) *CreatePipelineInput { + s.PipelineName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreatePipelineInput) SetTags(v []*Tag) *CreatePipelineInput { + s.Tags = v + return s +} + +type CreatePipelineOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the pipeline. + PipelineArn *string `locationName:"pipelineArn" type:"string"` + + // The name of the pipeline. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineOutput) GoString() string { + return s.String() +} + +// SetPipelineArn sets the PipelineArn field's value. +func (s *CreatePipelineOutput) SetPipelineArn(v string) *CreatePipelineOutput { + s.PipelineArn = &v + return s +} + +// SetPipelineName sets the PipelineName field's value. +func (s *CreatePipelineOutput) SetPipelineName(v string) *CreatePipelineOutput { + s.PipelineName = &v + return s +} + +// Use this to store channel data in an S3 bucket that you manage. If customer +// managed storage is selected, the "retentionPeriod" parameter is ignored. +// The choice of service-managed or customer-managed S3 storage cannot be changed +// after creation of the channel. +type CustomerManagedChannelS3Storage struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket in which channel data is stored. + // + // Bucket is a required field + Bucket *string `locationName:"bucket" min:"3" type:"string" required:"true"` + + // [Optional] The prefix used to create the keys of the channel data objects. + // Each object in an Amazon S3 bucket has a key that is its unique identifier + // within the bucket (each object in a bucket has exactly one key). The prefix + // must end with a '/'. + KeyPrefix *string `locationName:"keyPrefix" min:"1" type:"string"` + + // The ARN of the role which grants AWS IoT Analytics permission to interact + // with your Amazon S3 resources. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s CustomerManagedChannelS3Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerManagedChannelS3Storage) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomerManagedChannelS3Storage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomerManagedChannelS3Storage"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.KeyPrefix != nil && len(*s.KeyPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyPrefix", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CustomerManagedChannelS3Storage) SetBucket(v string) *CustomerManagedChannelS3Storage { + s.Bucket = &v + return s +} + +// SetKeyPrefix sets the KeyPrefix field's value. +func (s *CustomerManagedChannelS3Storage) SetKeyPrefix(v string) *CustomerManagedChannelS3Storage { + s.KeyPrefix = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CustomerManagedChannelS3Storage) SetRoleArn(v string) *CustomerManagedChannelS3Storage { + s.RoleArn = &v + return s +} + +// Used to store channel data in an S3 bucket that you manage. +type CustomerManagedChannelS3StorageSummary struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket in which channel data is stored. + Bucket *string `locationName:"bucket" min:"3" type:"string"` + + // [Optional] The prefix used to create the keys of the channel data objects. + // Each object in an Amazon S3 bucket has a key that is its unique identifier + // within the bucket (each object in a bucket has exactly one key). The prefix + // must end with a '/'. + KeyPrefix *string `locationName:"keyPrefix" min:"1" type:"string"` + + // The ARN of the role which grants AWS IoT Analytics permission to interact + // with your Amazon S3 resources. + RoleArn *string `locationName:"roleArn" min:"20" type:"string"` +} + +// String returns the string representation +func (s CustomerManagedChannelS3StorageSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerManagedChannelS3StorageSummary) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CustomerManagedChannelS3StorageSummary) SetBucket(v string) *CustomerManagedChannelS3StorageSummary { + s.Bucket = &v + return s +} + +// SetKeyPrefix sets the KeyPrefix field's value. +func (s *CustomerManagedChannelS3StorageSummary) SetKeyPrefix(v string) *CustomerManagedChannelS3StorageSummary { + s.KeyPrefix = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CustomerManagedChannelS3StorageSummary) SetRoleArn(v string) *CustomerManagedChannelS3StorageSummary { + s.RoleArn = &v + return s +} + +// Use this to store data store data in an S3 bucket that you manage. When customer +// managed storage is selected, the "retentionPeriod" parameter is ignored. +// The choice of service-managed or customer-managed S3 storage cannot be changed +// after creation of the data store. +type CustomerManagedDatastoreS3Storage struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket in which data store data is stored. + // + // Bucket is a required field + Bucket *string `locationName:"bucket" min:"3" type:"string" required:"true"` + + // [Optional] The prefix used to create the keys of the data store data objects. + // Each object in an Amazon S3 bucket has a key that is its unique identifier + // within the bucket (each object in a bucket has exactly one key). The prefix + // must end with a '/'. + KeyPrefix *string `locationName:"keyPrefix" min:"1" type:"string"` + + // The ARN of the role which grants AWS IoT Analytics permission to interact + // with your Amazon S3 resources. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s CustomerManagedDatastoreS3Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerManagedDatastoreS3Storage) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomerManagedDatastoreS3Storage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomerManagedDatastoreS3Storage"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.KeyPrefix != nil && len(*s.KeyPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyPrefix", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CustomerManagedDatastoreS3Storage) SetBucket(v string) *CustomerManagedDatastoreS3Storage { + s.Bucket = &v + return s +} + +// SetKeyPrefix sets the KeyPrefix field's value. +func (s *CustomerManagedDatastoreS3Storage) SetKeyPrefix(v string) *CustomerManagedDatastoreS3Storage { + s.KeyPrefix = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CustomerManagedDatastoreS3Storage) SetRoleArn(v string) *CustomerManagedDatastoreS3Storage { + s.RoleArn = &v + return s +} + +// Used to store data store data in an S3 bucket that you manage. +type CustomerManagedDatastoreS3StorageSummary struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket in which data store data is stored. + Bucket *string `locationName:"bucket" min:"3" type:"string"` + + // [Optional] The prefix used to create the keys of the data store data objects. + // Each object in an Amazon S3 bucket has a key that is its unique identifier + // within the bucket (each object in a bucket has exactly one key). The prefix + // must end with a '/'. + KeyPrefix *string `locationName:"keyPrefix" min:"1" type:"string"` + + // The ARN of the role which grants AWS IoT Analytics permission to interact + // with your Amazon S3 resources. + RoleArn *string `locationName:"roleArn" min:"20" type:"string"` +} + +// String returns the string representation +func (s CustomerManagedDatastoreS3StorageSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerManagedDatastoreS3StorageSummary) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CustomerManagedDatastoreS3StorageSummary) SetBucket(v string) *CustomerManagedDatastoreS3StorageSummary { + s.Bucket = &v + return s +} + +// SetKeyPrefix sets the KeyPrefix field's value. +func (s *CustomerManagedDatastoreS3StorageSummary) SetKeyPrefix(v string) *CustomerManagedDatastoreS3StorageSummary { + s.KeyPrefix = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CustomerManagedDatastoreS3StorageSummary) SetRoleArn(v string) *CustomerManagedDatastoreS3StorageSummary { + s.RoleArn = &v + return s +} + +// Information about a data set. +type Dataset struct { + _ struct{} `type:"structure"` + + // The "DatasetAction" objects that automatically create the data set contents. + Actions []*DatasetAction `locationName:"actions" min:"1" type:"list"` + + // The ARN of the data set. + Arn *string `locationName:"arn" type:"string"` + + // When data set contents are created they are delivered to destinations specified + // here. + ContentDeliveryRules []*DatasetContentDeliveryRule `locationName:"contentDeliveryRules" type:"list"` + + // When the data set was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The last time the data set was updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The name of the data set. + Name *string `locationName:"name" min:"1" type:"string"` + + // [Optional] How long, in days, message data is kept for the data set. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` + + // The status of the data set. + Status *string `locationName:"status" type:"string" enum:"DatasetStatus"` + + // The "DatasetTrigger" objects that specify when the data set is automatically + // updated. + Triggers []*DatasetTrigger `locationName:"triggers" type:"list"` + + // [Optional] How many versions of data set contents are kept. If not specified + // or set to null, only the latest version plus the latest succeeded version + // (if they are different) are kept for the time period specified by the "retentionPeriod" + // parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + VersioningConfiguration *VersioningConfiguration `locationName:"versioningConfiguration" type:"structure"` +} + +// String returns the string representation +func (s Dataset) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dataset) GoString() string { + return s.String() +} + +// SetActions sets the Actions field's value. +func (s *Dataset) SetActions(v []*DatasetAction) *Dataset { + s.Actions = v + return s +} + +// SetArn sets the Arn field's value. +func (s *Dataset) SetArn(v string) *Dataset { + s.Arn = &v + return s +} + +// SetContentDeliveryRules sets the ContentDeliveryRules field's value. +func (s *Dataset) SetContentDeliveryRules(v []*DatasetContentDeliveryRule) *Dataset { + s.ContentDeliveryRules = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Dataset) SetCreationTime(v time.Time) *Dataset { + s.CreationTime = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *Dataset) SetLastUpdateTime(v time.Time) *Dataset { + s.LastUpdateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Dataset) SetName(v string) *Dataset { + s.Name = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *Dataset) SetRetentionPeriod(v *RetentionPeriod) *Dataset { + s.RetentionPeriod = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Dataset) SetStatus(v string) *Dataset { + s.Status = &v + return s +} + +// SetTriggers sets the Triggers field's value. +func (s *Dataset) SetTriggers(v []*DatasetTrigger) *Dataset { + s.Triggers = v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *Dataset) SetVersioningConfiguration(v *VersioningConfiguration) *Dataset { + s.VersioningConfiguration = v + return s +} + +// A "DatasetAction" object that specifies how data set contents are automatically +// created. +type DatasetAction struct { + _ struct{} `type:"structure"` + + // The name of the data set action by which data set contents are automatically + // created. + ActionName *string `locationName:"actionName" min:"1" type:"string"` + + // Information which allows the system to run a containerized application in + // order to create the data set contents. The application must be in a Docker + // container along with any needed support libraries. + ContainerAction *ContainerDatasetAction `locationName:"containerAction" type:"structure"` + + // An "SqlQueryDatasetAction" object that uses an SQL query to automatically + // create data set contents. + QueryAction *SqlQueryDatasetAction `locationName:"queryAction" type:"structure"` +} + +// String returns the string representation +func (s DatasetAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatasetAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatasetAction"} + if s.ActionName != nil && len(*s.ActionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionName", 1)) + } + if s.ContainerAction != nil { + if err := s.ContainerAction.Validate(); err != nil { + invalidParams.AddNested("ContainerAction", err.(request.ErrInvalidParams)) + } + } + if s.QueryAction != nil { + if err := s.QueryAction.Validate(); err != nil { + invalidParams.AddNested("QueryAction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionName sets the ActionName field's value. +func (s *DatasetAction) SetActionName(v string) *DatasetAction { + s.ActionName = &v + return s +} + +// SetContainerAction sets the ContainerAction field's value. +func (s *DatasetAction) SetContainerAction(v *ContainerDatasetAction) *DatasetAction { + s.ContainerAction = v + return s +} + +// SetQueryAction sets the QueryAction field's value. +func (s *DatasetAction) SetQueryAction(v *SqlQueryDatasetAction) *DatasetAction { + s.QueryAction = v + return s +} + +// Information about the action which automatically creates the data set's contents. +type DatasetActionSummary struct { + _ struct{} `type:"structure"` + + // The name of the action which automatically creates the data set's contents. + ActionName *string `locationName:"actionName" min:"1" type:"string"` + + // The type of action by which the data set's contents are automatically created. + ActionType *string `locationName:"actionType" type:"string" enum:"DatasetActionType"` +} + +// String returns the string representation +func (s DatasetActionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetActionSummary) GoString() string { + return s.String() +} + +// SetActionName sets the ActionName field's value. +func (s *DatasetActionSummary) SetActionName(v string) *DatasetActionSummary { + s.ActionName = &v + return s +} + +// SetActionType sets the ActionType field's value. +func (s *DatasetActionSummary) SetActionType(v string) *DatasetActionSummary { + s.ActionType = &v + return s +} + +// The destination to which data set contents are delivered. +type DatasetContentDeliveryDestination struct { + _ struct{} `type:"structure"` + + // Configuration information for delivery of data set contents to AWS IoT Events. + IotEventsDestinationConfiguration *IotEventsDestinationConfiguration `locationName:"iotEventsDestinationConfiguration" type:"structure"` + + // Configuration information for delivery of data set contents to Amazon S3. + S3DestinationConfiguration *S3DestinationConfiguration `locationName:"s3DestinationConfiguration" type:"structure"` +} + +// String returns the string representation +func (s DatasetContentDeliveryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetContentDeliveryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatasetContentDeliveryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatasetContentDeliveryDestination"} + if s.IotEventsDestinationConfiguration != nil { + if err := s.IotEventsDestinationConfiguration.Validate(); err != nil { + invalidParams.AddNested("IotEventsDestinationConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.S3DestinationConfiguration != nil { + if err := s.S3DestinationConfiguration.Validate(); err != nil { + invalidParams.AddNested("S3DestinationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIotEventsDestinationConfiguration sets the IotEventsDestinationConfiguration field's value. +func (s *DatasetContentDeliveryDestination) SetIotEventsDestinationConfiguration(v *IotEventsDestinationConfiguration) *DatasetContentDeliveryDestination { + s.IotEventsDestinationConfiguration = v + return s +} + +// SetS3DestinationConfiguration sets the S3DestinationConfiguration field's value. +func (s *DatasetContentDeliveryDestination) SetS3DestinationConfiguration(v *S3DestinationConfiguration) *DatasetContentDeliveryDestination { + s.S3DestinationConfiguration = v + return s +} + +// When data set contents are created they are delivered to destination specified +// here. +type DatasetContentDeliveryRule struct { + _ struct{} `type:"structure"` + + // The destination to which data set contents are delivered. + // + // Destination is a required field + Destination *DatasetContentDeliveryDestination `locationName:"destination" type:"structure" required:"true"` + + // The name of the data set content delivery rules entry. + EntryName *string `locationName:"entryName" type:"string"` +} + +// String returns the string representation +func (s DatasetContentDeliveryRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetContentDeliveryRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatasetContentDeliveryRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatasetContentDeliveryRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *DatasetContentDeliveryRule) SetDestination(v *DatasetContentDeliveryDestination) *DatasetContentDeliveryRule { + s.Destination = v + return s +} + +// SetEntryName sets the EntryName field's value. +func (s *DatasetContentDeliveryRule) SetEntryName(v string) *DatasetContentDeliveryRule { + s.EntryName = &v + return s +} + +// The state of the data set contents and the reason they are in this state. +type DatasetContentStatus struct { + _ struct{} `type:"structure"` + + // The reason the data set contents are in this state. + Reason *string `locationName:"reason" type:"string"` + + // The state of the data set contents. Can be one of "READY", "CREATING", "SUCCEEDED" + // or "FAILED". + State *string `locationName:"state" type:"string" enum:"DatasetContentState"` +} + +// String returns the string representation +func (s DatasetContentStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetContentStatus) GoString() string { + return s.String() +} + +// SetReason sets the Reason field's value. +func (s *DatasetContentStatus) SetReason(v string) *DatasetContentStatus { + s.Reason = &v + return s +} + +// SetState sets the State field's value. +func (s *DatasetContentStatus) SetState(v string) *DatasetContentStatus { + s.State = &v + return s +} + +// Summary information about data set contents. +type DatasetContentSummary struct { + _ struct{} `type:"structure"` + + // The time the dataset content status was updated to SUCCEEDED or FAILED. + CompletionTime *time.Time `locationName:"completionTime" type:"timestamp"` + + // The actual time the creation of the data set contents was started. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The time the creation of the data set contents was scheduled to start. + ScheduleTime *time.Time `locationName:"scheduleTime" type:"timestamp"` + + // The status of the data set contents. + Status *DatasetContentStatus `locationName:"status" type:"structure"` + + // The version of the data set contents. + Version *string `locationName:"version" min:"7" type:"string"` +} + +// String returns the string representation +func (s DatasetContentSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetContentSummary) GoString() string { + return s.String() +} + +// SetCompletionTime sets the CompletionTime field's value. +func (s *DatasetContentSummary) SetCompletionTime(v time.Time) *DatasetContentSummary { + s.CompletionTime = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DatasetContentSummary) SetCreationTime(v time.Time) *DatasetContentSummary { + s.CreationTime = &v + return s +} + +// SetScheduleTime sets the ScheduleTime field's value. +func (s *DatasetContentSummary) SetScheduleTime(v time.Time) *DatasetContentSummary { + s.ScheduleTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatasetContentSummary) SetStatus(v *DatasetContentStatus) *DatasetContentSummary { + s.Status = v + return s +} + +// SetVersion sets the Version field's value. +func (s *DatasetContentSummary) SetVersion(v string) *DatasetContentSummary { + s.Version = &v + return s +} + +// The data set whose latest contents are used as input to the notebook or application. +type DatasetContentVersionValue struct { + _ struct{} `type:"structure"` + + // The name of the data set whose latest contents are used as input to the notebook + // or application. + // + // DatasetName is a required field + DatasetName *string `locationName:"datasetName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DatasetContentVersionValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetContentVersionValue) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatasetContentVersionValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatasetContentVersionValue"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DatasetContentVersionValue) SetDatasetName(v string) *DatasetContentVersionValue { + s.DatasetName = &v + return s +} + +// The reference to a data set entry. +type DatasetEntry struct { + _ struct{} `type:"structure"` + + // The pre-signed URI of the data set item. + DataURI *string `locationName:"dataURI" type:"string"` + + // The name of the data set item. + EntryName *string `locationName:"entryName" type:"string"` +} + +// String returns the string representation +func (s DatasetEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetEntry) GoString() string { + return s.String() +} + +// SetDataURI sets the DataURI field's value. +func (s *DatasetEntry) SetDataURI(v string) *DatasetEntry { + s.DataURI = &v + return s +} + +// SetEntryName sets the EntryName field's value. +func (s *DatasetEntry) SetEntryName(v string) *DatasetEntry { + s.EntryName = &v + return s +} + +// A summary of information about a data set. +type DatasetSummary struct { + _ struct{} `type:"structure"` + + // A list of "DataActionSummary" objects. + Actions []*DatasetActionSummary `locationName:"actions" min:"1" type:"list"` + + // The time the data set was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The name of the data set. + DatasetName *string `locationName:"datasetName" min:"1" type:"string"` + + // The last time the data set was updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The status of the data set. + Status *string `locationName:"status" type:"string" enum:"DatasetStatus"` + + // A list of triggers. A trigger causes data set content to be populated at + // a specified time interval or when another data set is populated. The list + // of triggers can be empty or contain up to five DataSetTrigger objects + Triggers []*DatasetTrigger `locationName:"triggers" type:"list"` +} + +// String returns the string representation +func (s DatasetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetSummary) GoString() string { + return s.String() +} + +// SetActions sets the Actions field's value. +func (s *DatasetSummary) SetActions(v []*DatasetActionSummary) *DatasetSummary { + s.Actions = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DatasetSummary) SetCreationTime(v time.Time) *DatasetSummary { + s.CreationTime = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DatasetSummary) SetDatasetName(v string) *DatasetSummary { + s.DatasetName = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *DatasetSummary) SetLastUpdateTime(v time.Time) *DatasetSummary { + s.LastUpdateTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatasetSummary) SetStatus(v string) *DatasetSummary { + s.Status = &v + return s +} + +// SetTriggers sets the Triggers field's value. +func (s *DatasetSummary) SetTriggers(v []*DatasetTrigger) *DatasetSummary { + s.Triggers = v + return s +} + +// The "DatasetTrigger" that specifies when the data set is automatically updated. +type DatasetTrigger struct { + _ struct{} `type:"structure"` + + // The data set whose content creation triggers the creation of this data set's + // contents. + Dataset *TriggeringDataset `locationName:"dataset" type:"structure"` + + // The "Schedule" when the trigger is initiated. + Schedule *Schedule `locationName:"schedule" type:"structure"` +} + +// String returns the string representation +func (s DatasetTrigger) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetTrigger) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatasetTrigger) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatasetTrigger"} + if s.Dataset != nil { + if err := s.Dataset.Validate(); err != nil { + invalidParams.AddNested("Dataset", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataset sets the Dataset field's value. +func (s *DatasetTrigger) SetDataset(v *TriggeringDataset) *DatasetTrigger { + s.Dataset = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *DatasetTrigger) SetSchedule(v *Schedule) *DatasetTrigger { + s.Schedule = v + return s +} + +// Information about a data store. +type Datastore struct { + _ struct{} `type:"structure"` + + // The ARN of the data store. + Arn *string `locationName:"arn" type:"string"` + + // When the data store was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The last time the data store was updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The name of the data store. + Name *string `locationName:"name" min:"1" type:"string"` + + // How long, in days, message data is kept for the data store. When "customerManagedS3" + // storage is selected, this parameter is ignored. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` + + // The status of a data store: + // + // CREATING + // + // The data store is being created. + // + // ACTIVE + // + // The data store has been created and can be used. + // + // DELETING + // + // The data store is being deleted. + Status *string `locationName:"status" type:"string" enum:"DatastoreStatus"` + + // Where data store data is stored. You may choose one of "serviceManagedS3" + // or "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". + // This cannot be changed after the data store is created. + Storage *DatastoreStorage `locationName:"storage" type:"structure"` +} + +// String returns the string representation +func (s Datastore) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Datastore) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Datastore) SetArn(v string) *Datastore { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Datastore) SetCreationTime(v time.Time) *Datastore { + s.CreationTime = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *Datastore) SetLastUpdateTime(v time.Time) *Datastore { + s.LastUpdateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Datastore) SetName(v string) *Datastore { + s.Name = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *Datastore) SetRetentionPeriod(v *RetentionPeriod) *Datastore { + s.RetentionPeriod = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Datastore) SetStatus(v string) *Datastore { + s.Status = &v + return s +} + +// SetStorage sets the Storage field's value. +func (s *Datastore) SetStorage(v *DatastoreStorage) *Datastore { + s.Storage = v + return s +} + +// The 'datastore' activity that specifies where to store the processed data. +type DatastoreActivity struct { + _ struct{} `type:"structure"` + + // The name of the data store where processed messages are stored. + // + // DatastoreName is a required field + DatastoreName *string `locationName:"datastoreName" min:"1" type:"string" required:"true"` + + // The name of the 'datastore' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DatastoreActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatastoreActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatastoreActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatastoreActivity"} + if s.DatastoreName == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreName")) + } + if s.DatastoreName != nil && len(*s.DatastoreName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatastoreName sets the DatastoreName field's value. +func (s *DatastoreActivity) SetDatastoreName(v string) *DatastoreActivity { + s.DatastoreName = &v + return s +} + +// SetName sets the Name field's value. +func (s *DatastoreActivity) SetName(v string) *DatastoreActivity { + s.Name = &v + return s +} + +// Statistical information about the data store. +type DatastoreStatistics struct { + _ struct{} `type:"structure"` + + // The estimated size of the data store. + Size *EstimatedResourceSize `locationName:"size" type:"structure"` +} + +// String returns the string representation +func (s DatastoreStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatastoreStatistics) GoString() string { + return s.String() +} + +// SetSize sets the Size field's value. +func (s *DatastoreStatistics) SetSize(v *EstimatedResourceSize) *DatastoreStatistics { + s.Size = v + return s +} + +// Where data store data is stored. You may choose one of "serviceManagedS3" +// or "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". +// This cannot be changed after the data store is created. +type DatastoreStorage struct { + _ struct{} `type:"structure"` + + // Use this to store data store data in an S3 bucket that you manage. When customer + // managed storage is selected, the "retentionPeriod" parameter is ignored. + // The choice of service-managed or customer-managed S3 storage cannot be changed + // after creation of the data store. + CustomerManagedS3 *CustomerManagedDatastoreS3Storage `locationName:"customerManagedS3" type:"structure"` + + // Use this to store data store data in an S3 bucket managed by the AWS IoT + // Analytics service. The choice of service-managed or customer-managed S3 storage + // cannot be changed after creation of the data store. + ServiceManagedS3 *ServiceManagedDatastoreS3Storage `locationName:"serviceManagedS3" type:"structure"` +} + +// String returns the string representation +func (s DatastoreStorage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatastoreStorage) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatastoreStorage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatastoreStorage"} + if s.CustomerManagedS3 != nil { + if err := s.CustomerManagedS3.Validate(); err != nil { + invalidParams.AddNested("CustomerManagedS3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomerManagedS3 sets the CustomerManagedS3 field's value. +func (s *DatastoreStorage) SetCustomerManagedS3(v *CustomerManagedDatastoreS3Storage) *DatastoreStorage { + s.CustomerManagedS3 = v + return s +} + +// SetServiceManagedS3 sets the ServiceManagedS3 field's value. +func (s *DatastoreStorage) SetServiceManagedS3(v *ServiceManagedDatastoreS3Storage) *DatastoreStorage { + s.ServiceManagedS3 = v + return s +} + +// Where data store data is stored. +type DatastoreStorageSummary struct { + _ struct{} `type:"structure"` + + // Used to store data store data in an S3 bucket that you manage. + CustomerManagedS3 *CustomerManagedDatastoreS3StorageSummary `locationName:"customerManagedS3" type:"structure"` + + // Used to store data store data in an S3 bucket managed by the AWS IoT Analytics + // service. + ServiceManagedS3 *ServiceManagedDatastoreS3StorageSummary `locationName:"serviceManagedS3" type:"structure"` +} + +// String returns the string representation +func (s DatastoreStorageSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatastoreStorageSummary) GoString() string { + return s.String() +} + +// SetCustomerManagedS3 sets the CustomerManagedS3 field's value. +func (s *DatastoreStorageSummary) SetCustomerManagedS3(v *CustomerManagedDatastoreS3StorageSummary) *DatastoreStorageSummary { + s.CustomerManagedS3 = v + return s +} + +// SetServiceManagedS3 sets the ServiceManagedS3 field's value. +func (s *DatastoreStorageSummary) SetServiceManagedS3(v *ServiceManagedDatastoreS3StorageSummary) *DatastoreStorageSummary { + s.ServiceManagedS3 = v + return s +} + +// A summary of information about a data store. +type DatastoreSummary struct { + _ struct{} `type:"structure"` + + // When the data store was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The name of the data store. + DatastoreName *string `locationName:"datastoreName" min:"1" type:"string"` + + // Where data store data is stored. + DatastoreStorage *DatastoreStorageSummary `locationName:"datastoreStorage" type:"structure"` + + // The last time the data store was updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The status of the data store. + Status *string `locationName:"status" type:"string" enum:"DatastoreStatus"` +} + +// String returns the string representation +func (s DatastoreSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatastoreSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DatastoreSummary) SetCreationTime(v time.Time) *DatastoreSummary { + s.CreationTime = &v + return s +} + +// SetDatastoreName sets the DatastoreName field's value. +func (s *DatastoreSummary) SetDatastoreName(v string) *DatastoreSummary { + s.DatastoreName = &v + return s +} + +// SetDatastoreStorage sets the DatastoreStorage field's value. +func (s *DatastoreSummary) SetDatastoreStorage(v *DatastoreStorageSummary) *DatastoreSummary { + s.DatastoreStorage = v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *DatastoreSummary) SetLastUpdateTime(v time.Time) *DatastoreSummary { + s.LastUpdateTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatastoreSummary) SetStatus(v string) *DatastoreSummary { + s.Status = &v + return s +} + +type DeleteChannelInput struct { + _ struct{} `type:"structure"` + + // The name of the channel to delete. + // + // ChannelName is a required field + ChannelName *string `location:"uri" locationName:"channelName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteChannelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteChannelInput"} + if s.ChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelName")) + } + if s.ChannelName != nil && len(*s.ChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelName sets the ChannelName field's value. +func (s *DeleteChannelInput) SetChannelName(v string) *DeleteChannelInput { + s.ChannelName = &v + return s +} + +type DeleteChannelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteChannelOutput) GoString() string { + return s.String() +} + +type DeleteDatasetContentInput struct { + _ struct{} `type:"structure"` + + // The name of the data set whose content is deleted. + // + // DatasetName is a required field + DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` + + // The version of the data set whose content is deleted. You can also use the + // strings "$LATEST" or "$LATEST_SUCCEEDED" to delete the latest or latest successfully + // completed data set. If not specified, "$LATEST_SUCCEEDED" is the default. + VersionId *string `location:"querystring" locationName:"versionId" min:"7" type:"string"` +} + +// String returns the string representation +func (s DeleteDatasetContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatasetContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatasetContentInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.VersionId != nil && len(*s.VersionId) < 7 { + invalidParams.Add(request.NewErrParamMinLen("VersionId", 7)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DeleteDatasetContentInput) SetDatasetName(v string) *DeleteDatasetContentInput { + s.DatasetName = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteDatasetContentInput) SetVersionId(v string) *DeleteDatasetContentInput { + s.VersionId = &v + return s +} + +type DeleteDatasetContentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetContentOutput) GoString() string { + return s.String() +} + +type DeleteDatasetInput struct { + _ struct{} `type:"structure"` + + // The name of the data set to delete. + // + // DatasetName is a required field + DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatasetInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DeleteDatasetInput) SetDatasetName(v string) *DeleteDatasetInput { + s.DatasetName = &v + return s +} + +type DeleteDatasetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetOutput) GoString() string { + return s.String() +} + +type DeleteDatastoreInput struct { + _ struct{} `type:"structure"` + + // The name of the data store to delete. + // + // DatastoreName is a required field + DatastoreName *string `location:"uri" locationName:"datastoreName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatastoreInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatastoreInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatastoreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatastoreInput"} + if s.DatastoreName == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreName")) + } + if s.DatastoreName != nil && len(*s.DatastoreName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatastoreName sets the DatastoreName field's value. +func (s *DeleteDatastoreInput) SetDatastoreName(v string) *DeleteDatastoreInput { + s.DatastoreName = &v + return s +} + +type DeleteDatastoreOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatastoreOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatastoreOutput) GoString() string { + return s.String() +} + +type DeletePipelineInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline to delete. + // + // PipelineName is a required field + PipelineName *string `location:"uri" locationName:"pipelineName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePipelineInput"} + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPipelineName sets the PipelineName field's value. +func (s *DeletePipelineInput) SetPipelineName(v string) *DeletePipelineInput { + s.PipelineName = &v + return s +} + +type DeletePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineOutput) GoString() string { + return s.String() +} + +// Used to limit data to that which has arrived since the last execution of +// the action. +type DeltaTime struct { + _ struct{} `type:"structure"` + + // The number of seconds of estimated "in flight" lag time of message data. + // When you create data set contents using message data from a specified time + // frame, some message data may still be "in flight" when processing begins, + // and so will not arrive in time to be processed. Use this field to make allowances + // for the "in flight" time of your message data, so that data not processed + // from a previous time frame will be included with the next time frame. Without + // this, missed message data would be excluded from processing during the next + // time frame as well, because its timestamp places it within the previous time + // frame. + // + // OffsetSeconds is a required field + OffsetSeconds *int64 `locationName:"offsetSeconds" type:"integer" required:"true"` + + // An expression by which the time of the message data may be determined. This + // may be the name of a timestamp field, or a SQL expression which is used to + // derive the time the message data was generated. + // + // TimeExpression is a required field + TimeExpression *string `locationName:"timeExpression" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeltaTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeltaTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeltaTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeltaTime"} + if s.OffsetSeconds == nil { + invalidParams.Add(request.NewErrParamRequired("OffsetSeconds")) + } + if s.TimeExpression == nil { + invalidParams.Add(request.NewErrParamRequired("TimeExpression")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOffsetSeconds sets the OffsetSeconds field's value. +func (s *DeltaTime) SetOffsetSeconds(v int64) *DeltaTime { + s.OffsetSeconds = &v + return s +} + +// SetTimeExpression sets the TimeExpression field's value. +func (s *DeltaTime) SetTimeExpression(v string) *DeltaTime { + s.TimeExpression = &v + return s +} + +type DescribeChannelInput struct { + _ struct{} `type:"structure"` + + // The name of the channel whose information is retrieved. + // + // ChannelName is a required field + ChannelName *string `location:"uri" locationName:"channelName" min:"1" type:"string" required:"true"` + + // If true, additional statistical information about the channel is included + // in the response. This feature cannot be used with a channel whose S3 storage + // is customer-managed. + IncludeStatistics *bool `location:"querystring" locationName:"includeStatistics" type:"boolean"` +} + +// String returns the string representation +func (s DescribeChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChannelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeChannelInput"} + if s.ChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelName")) + } + if s.ChannelName != nil && len(*s.ChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelName sets the ChannelName field's value. +func (s *DescribeChannelInput) SetChannelName(v string) *DescribeChannelInput { + s.ChannelName = &v + return s +} + +// SetIncludeStatistics sets the IncludeStatistics field's value. +func (s *DescribeChannelInput) SetIncludeStatistics(v bool) *DescribeChannelInput { + s.IncludeStatistics = &v + return s +} + +type DescribeChannelOutput struct { + _ struct{} `type:"structure"` + + // An object that contains information about the channel. + Channel *Channel `locationName:"channel" type:"structure"` + + // Statistics about the channel. Included if the 'includeStatistics' parameter + // is set to true in the request. + Statistics *ChannelStatistics `locationName:"statistics" type:"structure"` +} + +// String returns the string representation +func (s DescribeChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChannelOutput) GoString() string { + return s.String() +} + +// SetChannel sets the Channel field's value. +func (s *DescribeChannelOutput) SetChannel(v *Channel) *DescribeChannelOutput { + s.Channel = v + return s +} + +// SetStatistics sets the Statistics field's value. +func (s *DescribeChannelOutput) SetStatistics(v *ChannelStatistics) *DescribeChannelOutput { + s.Statistics = v + return s +} + +type DescribeDatasetInput struct { + _ struct{} `type:"structure"` + + // The name of the data set whose information is retrieved. + // + // DatasetName is a required field + DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatasetInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DescribeDatasetInput) SetDatasetName(v string) *DescribeDatasetInput { + s.DatasetName = &v + return s +} + +type DescribeDatasetOutput struct { + _ struct{} `type:"structure"` + + // An object that contains information about the data set. + Dataset *Dataset `locationName:"dataset" type:"structure"` +} + +// String returns the string representation +func (s DescribeDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetOutput) GoString() string { + return s.String() +} + +// SetDataset sets the Dataset field's value. +func (s *DescribeDatasetOutput) SetDataset(v *Dataset) *DescribeDatasetOutput { + s.Dataset = v + return s +} + +type DescribeDatastoreInput struct { + _ struct{} `type:"structure"` + + // The name of the data store + // + // DatastoreName is a required field + DatastoreName *string `location:"uri" locationName:"datastoreName" min:"1" type:"string" required:"true"` + + // If true, additional statistical information about the data store is included + // in the response. This feature cannot be used with a data store whose S3 storage + // is customer-managed. + IncludeStatistics *bool `location:"querystring" locationName:"includeStatistics" type:"boolean"` +} + +// String returns the string representation +func (s DescribeDatastoreInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatastoreInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatastoreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatastoreInput"} + if s.DatastoreName == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreName")) + } + if s.DatastoreName != nil && len(*s.DatastoreName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatastoreName sets the DatastoreName field's value. +func (s *DescribeDatastoreInput) SetDatastoreName(v string) *DescribeDatastoreInput { + s.DatastoreName = &v + return s +} + +// SetIncludeStatistics sets the IncludeStatistics field's value. +func (s *DescribeDatastoreInput) SetIncludeStatistics(v bool) *DescribeDatastoreInput { + s.IncludeStatistics = &v + return s +} + +type DescribeDatastoreOutput struct { + _ struct{} `type:"structure"` + + // Information about the data store. + Datastore *Datastore `locationName:"datastore" type:"structure"` + + // Additional statistical information about the data store. Included if the + // 'includeStatistics' parameter is set to true in the request. + Statistics *DatastoreStatistics `locationName:"statistics" type:"structure"` +} + +// String returns the string representation +func (s DescribeDatastoreOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatastoreOutput) GoString() string { + return s.String() +} + +// SetDatastore sets the Datastore field's value. +func (s *DescribeDatastoreOutput) SetDatastore(v *Datastore) *DescribeDatastoreOutput { + s.Datastore = v + return s +} + +// SetStatistics sets the Statistics field's value. +func (s *DescribeDatastoreOutput) SetStatistics(v *DatastoreStatistics) *DescribeDatastoreOutput { + s.Statistics = v + return s +} + +type DescribeLoggingOptionsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLoggingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoggingOptionsInput) GoString() string { + return s.String() +} + +type DescribeLoggingOptionsOutput struct { + _ struct{} `type:"structure"` + + // The current settings of the AWS IoT Analytics logging options. + LoggingOptions *LoggingOptions `locationName:"loggingOptions" type:"structure"` +} + +// String returns the string representation +func (s DescribeLoggingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoggingOptionsOutput) GoString() string { + return s.String() +} + +// SetLoggingOptions sets the LoggingOptions field's value. +func (s *DescribeLoggingOptionsOutput) SetLoggingOptions(v *LoggingOptions) *DescribeLoggingOptionsOutput { + s.LoggingOptions = v + return s +} + +type DescribePipelineInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline whose information is retrieved. + // + // PipelineName is a required field + PipelineName *string `location:"uri" locationName:"pipelineName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePipelineInput"} + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPipelineName sets the PipelineName field's value. +func (s *DescribePipelineInput) SetPipelineName(v string) *DescribePipelineInput { + s.PipelineName = &v + return s +} + +type DescribePipelineOutput struct { + _ struct{} `type:"structure"` + + // A "Pipeline" object that contains information about the pipeline. + Pipeline *Pipeline `locationName:"pipeline" type:"structure"` +} + +// String returns the string representation +func (s DescribePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePipelineOutput) GoString() string { + return s.String() +} + +// SetPipeline sets the Pipeline field's value. +func (s *DescribePipelineOutput) SetPipeline(v *Pipeline) *DescribePipelineOutput { + s.Pipeline = v + return s +} + +// An activity that adds data from the AWS IoT device registry to your message. +type DeviceRegistryEnrichActivity struct { + _ struct{} `type:"structure"` + + // The name of the attribute that is added to the message. + // + // Attribute is a required field + Attribute *string `locationName:"attribute" min:"1" type:"string" required:"true"` + + // The name of the 'deviceRegistryEnrich' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The next activity in the pipeline. + Next *string `locationName:"next" min:"1" type:"string"` + + // The ARN of the role that allows access to the device's registry information. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` + + // The name of the IoT device whose registry information is added to the message. + // + // ThingName is a required field + ThingName *string `locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeviceRegistryEnrichActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeviceRegistryEnrichActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeviceRegistryEnrichActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeviceRegistryEnrichActivity"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.Attribute != nil && len(*s.Attribute) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Attribute", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Next != nil && len(*s.Next) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Next", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DeviceRegistryEnrichActivity) SetAttribute(v string) *DeviceRegistryEnrichActivity { + s.Attribute = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeviceRegistryEnrichActivity) SetName(v string) *DeviceRegistryEnrichActivity { + s.Name = &v + return s +} + +// SetNext sets the Next field's value. +func (s *DeviceRegistryEnrichActivity) SetNext(v string) *DeviceRegistryEnrichActivity { + s.Next = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DeviceRegistryEnrichActivity) SetRoleArn(v string) *DeviceRegistryEnrichActivity { + s.RoleArn = &v + return s +} + +// SetThingName sets the ThingName field's value. +func (s *DeviceRegistryEnrichActivity) SetThingName(v string) *DeviceRegistryEnrichActivity { + s.ThingName = &v + return s +} + +// An activity that adds information from the AWS IoT Device Shadows service +// to a message. +type DeviceShadowEnrichActivity struct { + _ struct{} `type:"structure"` + + // The name of the attribute that is added to the message. + // + // Attribute is a required field + Attribute *string `locationName:"attribute" min:"1" type:"string" required:"true"` + + // The name of the 'deviceShadowEnrich' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The next activity in the pipeline. + Next *string `locationName:"next" min:"1" type:"string"` + + // The ARN of the role that allows access to the device's shadow. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` + + // The name of the IoT device whose shadow information is added to the message. + // + // ThingName is a required field + ThingName *string `locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeviceShadowEnrichActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeviceShadowEnrichActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeviceShadowEnrichActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeviceShadowEnrichActivity"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.Attribute != nil && len(*s.Attribute) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Attribute", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Next != nil && len(*s.Next) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Next", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DeviceShadowEnrichActivity) SetAttribute(v string) *DeviceShadowEnrichActivity { + s.Attribute = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeviceShadowEnrichActivity) SetName(v string) *DeviceShadowEnrichActivity { + s.Name = &v + return s +} + +// SetNext sets the Next field's value. +func (s *DeviceShadowEnrichActivity) SetNext(v string) *DeviceShadowEnrichActivity { + s.Next = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DeviceShadowEnrichActivity) SetRoleArn(v string) *DeviceShadowEnrichActivity { + s.RoleArn = &v + return s +} + +// SetThingName sets the ThingName field's value. +func (s *DeviceShadowEnrichActivity) SetThingName(v string) *DeviceShadowEnrichActivity { + s.ThingName = &v + return s +} + +// The estimated size of the resource. +type EstimatedResourceSize struct { + _ struct{} `type:"structure"` + + // The time when the estimate of the size of the resource was made. + EstimatedOn *time.Time `locationName:"estimatedOn" type:"timestamp"` + + // The estimated size of the resource in bytes. + EstimatedSizeInBytes *float64 `locationName:"estimatedSizeInBytes" type:"double"` +} + +// String returns the string representation +func (s EstimatedResourceSize) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EstimatedResourceSize) GoString() string { + return s.String() +} + +// SetEstimatedOn sets the EstimatedOn field's value. +func (s *EstimatedResourceSize) SetEstimatedOn(v time.Time) *EstimatedResourceSize { + s.EstimatedOn = &v + return s +} + +// SetEstimatedSizeInBytes sets the EstimatedSizeInBytes field's value. +func (s *EstimatedResourceSize) SetEstimatedSizeInBytes(v float64) *EstimatedResourceSize { + s.EstimatedSizeInBytes = &v + return s +} + +// An activity that filters a message based on its attributes. +type FilterActivity struct { + _ struct{} `type:"structure"` + + // An expression that looks like a SQL WHERE clause that must return a Boolean + // value. + // + // Filter is a required field + Filter *string `locationName:"filter" min:"1" type:"string" required:"true"` + + // The name of the 'filter' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The next activity in the pipeline. + Next *string `locationName:"next" min:"1" type:"string"` +} + +// String returns the string representation +func (s FilterActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FilterActivity"} + if s.Filter == nil { + invalidParams.Add(request.NewErrParamRequired("Filter")) + } + if s.Filter != nil && len(*s.Filter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filter", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Next != nil && len(*s.Next) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Next", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *FilterActivity) SetFilter(v string) *FilterActivity { + s.Filter = &v + return s +} + +// SetName sets the Name field's value. +func (s *FilterActivity) SetName(v string) *FilterActivity { + s.Name = &v + return s +} + +// SetNext sets the Next field's value. +func (s *FilterActivity) SetNext(v string) *FilterActivity { + s.Next = &v + return s +} + +type GetDatasetContentInput struct { + _ struct{} `type:"structure"` + + // The name of the data set whose contents are retrieved. + // + // DatasetName is a required field + DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` + + // The version of the data set whose contents are retrieved. You can also use + // the strings "$LATEST" or "$LATEST_SUCCEEDED" to retrieve the contents of + // the latest or latest successfully completed data set. If not specified, "$LATEST_SUCCEEDED" + // is the default. + VersionId *string `location:"querystring" locationName:"versionId" min:"7" type:"string"` +} + +// String returns the string representation +func (s GetDatasetContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDatasetContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDatasetContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDatasetContentInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.VersionId != nil && len(*s.VersionId) < 7 { + invalidParams.Add(request.NewErrParamMinLen("VersionId", 7)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *GetDatasetContentInput) SetDatasetName(v string) *GetDatasetContentInput { + s.DatasetName = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetDatasetContentInput) SetVersionId(v string) *GetDatasetContentInput { + s.VersionId = &v + return s +} + +type GetDatasetContentOutput struct { + _ struct{} `type:"structure"` + + // A list of "DatasetEntry" objects. + Entries []*DatasetEntry `locationName:"entries" type:"list"` + + // The status of the data set content. + Status *DatasetContentStatus `locationName:"status" type:"structure"` + + // The time when the request was made. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s GetDatasetContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDatasetContentOutput) GoString() string { + return s.String() +} + +// SetEntries sets the Entries field's value. +func (s *GetDatasetContentOutput) SetEntries(v []*DatasetEntry) *GetDatasetContentOutput { + s.Entries = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetDatasetContentOutput) SetStatus(v *DatasetContentStatus) *GetDatasetContentOutput { + s.Status = v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *GetDatasetContentOutput) SetTimestamp(v time.Time) *GetDatasetContentOutput { + s.Timestamp = &v + return s +} + +// Configuration information for coordination with the AWS Glue ETL (extract, +// transform and load) service. +type GlueConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the database in your AWS Glue Data Catalog in which the table + // is located. (An AWS Glue Data Catalog database contains Glue Data tables.) + // + // DatabaseName is a required field + DatabaseName *string `locationName:"databaseName" min:"1" type:"string" required:"true"` + + // The name of the table in your AWS Glue Data Catalog which is used to perform + // the ETL (extract, transform and load) operations. (An AWS Glue Data Catalog + // table contains partitioned data and descriptions of data sources and targets.) + // + // TableName is a required field + TableName *string `locationName:"tableName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GlueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlueConfiguration"} + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *GlueConfiguration) SetDatabaseName(v string) *GlueConfiguration { + s.DatabaseName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *GlueConfiguration) SetTableName(v string) *GlueConfiguration { + s.TableName = &v + return s +} + +// Configuration information for delivery of data set contents to AWS IoT Events. +type IotEventsDestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the AWS IoT Events input to which data set contents are delivered. + // + // InputName is a required field + InputName *string `locationName:"inputName" min:"1" type:"string" required:"true"` + + // The ARN of the role which grants AWS IoT Analytics permission to deliver + // data set contents to an AWS IoT Events input. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s IotEventsDestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IotEventsDestinationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IotEventsDestinationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IotEventsDestinationConfiguration"} + if s.InputName == nil { + invalidParams.Add(request.NewErrParamRequired("InputName")) + } + if s.InputName != nil && len(*s.InputName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputName", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputName sets the InputName field's value. +func (s *IotEventsDestinationConfiguration) SetInputName(v string) *IotEventsDestinationConfiguration { + s.InputName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *IotEventsDestinationConfiguration) SetRoleArn(v string) *IotEventsDestinationConfiguration { + s.RoleArn = &v + return s +} + +// An activity that runs a Lambda function to modify the message. +type LambdaActivity struct { + _ struct{} `type:"structure"` + + // The number of messages passed to the Lambda function for processing. + // + // The AWS Lambda function must be able to process all of these messages within + // five minutes, which is the maximum timeout duration for Lambda functions. + // + // BatchSize is a required field + BatchSize *int64 `locationName:"batchSize" min:"1" type:"integer" required:"true"` + + // The name of the Lambda function that is run on the message. + // + // LambdaName is a required field + LambdaName *string `locationName:"lambdaName" min:"1" type:"string" required:"true"` + + // The name of the 'lambda' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The next activity in the pipeline. + Next *string `locationName:"next" min:"1" type:"string"` +} + +// String returns the string representation +func (s LambdaActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaActivity"} + if s.BatchSize == nil { + invalidParams.Add(request.NewErrParamRequired("BatchSize")) + } + if s.BatchSize != nil && *s.BatchSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("BatchSize", 1)) + } + if s.LambdaName == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaName")) + } + if s.LambdaName != nil && len(*s.LambdaName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LambdaName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Next != nil && len(*s.Next) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Next", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBatchSize sets the BatchSize field's value. +func (s *LambdaActivity) SetBatchSize(v int64) *LambdaActivity { + s.BatchSize = &v + return s +} + +// SetLambdaName sets the LambdaName field's value. +func (s *LambdaActivity) SetLambdaName(v string) *LambdaActivity { + s.LambdaName = &v + return s +} + +// SetName sets the Name field's value. +func (s *LambdaActivity) SetName(v string) *LambdaActivity { + s.Name = &v + return s +} + +// SetNext sets the Next field's value. +func (s *LambdaActivity) SetNext(v string) *LambdaActivity { + s.Next = &v + return s +} + +type ListChannelsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in this request. + // + // The default value is 100. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListChannelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChannelsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListChannelsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListChannelsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListChannelsInput) SetMaxResults(v int64) *ListChannelsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListChannelsInput) SetNextToken(v string) *ListChannelsInput { + s.NextToken = &v + return s +} + +type ListChannelsOutput struct { + _ struct{} `type:"structure"` + + // A list of "ChannelSummary" objects. + ChannelSummaries []*ChannelSummary `locationName:"channelSummaries" type:"list"` + + // The token to retrieve the next set of results, or null if there are no more + // results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListChannelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChannelsOutput) GoString() string { + return s.String() +} + +// SetChannelSummaries sets the ChannelSummaries field's value. +func (s *ListChannelsOutput) SetChannelSummaries(v []*ChannelSummary) *ListChannelsOutput { + s.ChannelSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListChannelsOutput) SetNextToken(v string) *ListChannelsOutput { + s.NextToken = &v + return s +} + +type ListDatasetContentsInput struct { + _ struct{} `type:"structure"` + + // The name of the data set whose contents information you want to list. + // + // DatasetName is a required field + DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` + + // The maximum number of results to return in this request. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // A filter to limit results to those data set contents whose creation is scheduled + // before the given time. See the field triggers.schedule in the CreateDataset + // request. (timestamp) + ScheduledBefore *time.Time `location:"querystring" locationName:"scheduledBefore" type:"timestamp"` + + // A filter to limit results to those data set contents whose creation is scheduled + // on or after the given time. See the field triggers.schedule in the CreateDataset + // request. (timestamp) + ScheduledOnOrAfter *time.Time `location:"querystring" locationName:"scheduledOnOrAfter" type:"timestamp"` +} + +// String returns the string representation +func (s ListDatasetContentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetContentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetContentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetContentsInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *ListDatasetContentsInput) SetDatasetName(v string) *ListDatasetContentsInput { + s.DatasetName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatasetContentsInput) SetMaxResults(v int64) *ListDatasetContentsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetContentsInput) SetNextToken(v string) *ListDatasetContentsInput { + s.NextToken = &v + return s +} + +// SetScheduledBefore sets the ScheduledBefore field's value. +func (s *ListDatasetContentsInput) SetScheduledBefore(v time.Time) *ListDatasetContentsInput { + s.ScheduledBefore = &v + return s +} + +// SetScheduledOnOrAfter sets the ScheduledOnOrAfter field's value. +func (s *ListDatasetContentsInput) SetScheduledOnOrAfter(v time.Time) *ListDatasetContentsInput { + s.ScheduledOnOrAfter = &v + return s +} + +type ListDatasetContentsOutput struct { + _ struct{} `type:"structure"` + + // Summary information about data set contents that have been created. + DatasetContentSummaries []*DatasetContentSummary `locationName:"datasetContentSummaries" type:"list"` + + // The token to retrieve the next set of results, or null if there are no more + // results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetContentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetContentsOutput) GoString() string { + return s.String() +} + +// SetDatasetContentSummaries sets the DatasetContentSummaries field's value. +func (s *ListDatasetContentsOutput) SetDatasetContentSummaries(v []*DatasetContentSummary) *ListDatasetContentsOutput { + s.DatasetContentSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetContentsOutput) SetNextToken(v string) *ListDatasetContentsOutput { + s.NextToken = &v + return s +} + +type ListDatasetsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in this request. + // + // The default value is 100. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatasetsInput) SetMaxResults(v int64) *ListDatasetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetsInput) SetNextToken(v string) *ListDatasetsInput { + s.NextToken = &v + return s +} + +type ListDatasetsOutput struct { + _ struct{} `type:"structure"` + + // A list of "DatasetSummary" objects. + DatasetSummaries []*DatasetSummary `locationName:"datasetSummaries" type:"list"` + + // The token to retrieve the next set of results, or null if there are no more + // results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsOutput) GoString() string { + return s.String() +} + +// SetDatasetSummaries sets the DatasetSummaries field's value. +func (s *ListDatasetsOutput) SetDatasetSummaries(v []*DatasetSummary) *ListDatasetsOutput { + s.DatasetSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetsOutput) SetNextToken(v string) *ListDatasetsOutput { + s.NextToken = &v + return s +} + +type ListDatastoresInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in this request. + // + // The default value is 100. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatastoresInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatastoresInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatastoresInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatastoresInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatastoresInput) SetMaxResults(v int64) *ListDatastoresInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatastoresInput) SetNextToken(v string) *ListDatastoresInput { + s.NextToken = &v + return s +} + +type ListDatastoresOutput struct { + _ struct{} `type:"structure"` + + // A list of "DatastoreSummary" objects. + DatastoreSummaries []*DatastoreSummary `locationName:"datastoreSummaries" type:"list"` + + // The token to retrieve the next set of results, or null if there are no more + // results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatastoresOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatastoresOutput) GoString() string { + return s.String() +} + +// SetDatastoreSummaries sets the DatastoreSummaries field's value. +func (s *ListDatastoresOutput) SetDatastoreSummaries(v []*DatastoreSummary) *ListDatastoresOutput { + s.DatastoreSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatastoresOutput) SetNextToken(v string) *ListDatastoresOutput { + s.NextToken = &v + return s +} + +type ListPipelinesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in this request. + // + // The default value is 100. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListPipelinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPipelinesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPipelinesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListPipelinesInput) SetMaxResults(v int64) *ListPipelinesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPipelinesInput) SetNextToken(v string) *ListPipelinesInput { + s.NextToken = &v + return s +} + +type ListPipelinesOutput struct { + _ struct{} `type:"structure"` + + // The token to retrieve the next set of results, or null if there are no more + // results. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of "PipelineSummary" objects. + PipelineSummaries []*PipelineSummary `locationName:"pipelineSummaries" type:"list"` +} + +// String returns the string representation +func (s ListPipelinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPipelinesOutput) SetNextToken(v string) *ListPipelinesOutput { + s.NextToken = &v + return s +} + +// SetPipelineSummaries sets the PipelineSummaries field's value. +func (s *ListPipelinesOutput) SetPipelineSummaries(v []*PipelineSummary) *ListPipelinesOutput { + s.PipelineSummaries = v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource whose tags you want to list. + // + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"resourceArn" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags (metadata) which you have assigned to the resource. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// Information about logging options. +type LoggingOptions struct { + _ struct{} `type:"structure"` + + // If true, logging is enabled for AWS IoT Analytics. + // + // Enabled is a required field + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` + + // The logging level. Currently, only "ERROR" is supported. + // + // Level is a required field + Level *string `locationName:"level" type:"string" required:"true" enum:"LoggingLevel"` + + // The ARN of the role that grants permission to AWS IoT Analytics to perform + // logging. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingOptions"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Level == nil { + invalidParams.Add(request.NewErrParamRequired("Level")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnabled sets the Enabled field's value. +func (s *LoggingOptions) SetEnabled(v bool) *LoggingOptions { + s.Enabled = &v + return s +} + +// SetLevel sets the Level field's value. +func (s *LoggingOptions) SetLevel(v string) *LoggingOptions { + s.Level = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *LoggingOptions) SetRoleArn(v string) *LoggingOptions { + s.RoleArn = &v + return s +} + +// An activity that computes an arithmetic expression using the message's attributes. +type MathActivity struct { + _ struct{} `type:"structure"` + + // The name of the attribute that contains the result of the math operation. + // + // Attribute is a required field + Attribute *string `locationName:"attribute" min:"1" type:"string" required:"true"` + + // An expression that uses one or more existing attributes and must return an + // integer value. + // + // Math is a required field + Math *string `locationName:"math" min:"1" type:"string" required:"true"` + + // The name of the 'math' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The next activity in the pipeline. + Next *string `locationName:"next" min:"1" type:"string"` +} + +// String returns the string representation +func (s MathActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MathActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MathActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MathActivity"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.Attribute != nil && len(*s.Attribute) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Attribute", 1)) + } + if s.Math == nil { + invalidParams.Add(request.NewErrParamRequired("Math")) + } + if s.Math != nil && len(*s.Math) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Math", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Next != nil && len(*s.Next) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Next", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *MathActivity) SetAttribute(v string) *MathActivity { + s.Attribute = &v + return s +} + +// SetMath sets the Math field's value. +func (s *MathActivity) SetMath(v string) *MathActivity { + s.Math = &v + return s +} + +// SetName sets the Name field's value. +func (s *MathActivity) SetName(v string) *MathActivity { + s.Name = &v + return s +} + +// SetNext sets the Next field's value. +func (s *MathActivity) SetNext(v string) *MathActivity { + s.Next = &v + return s +} + +// Information about a message. +type Message struct { + _ struct{} `type:"structure"` + + // The ID you wish to assign to the message. Each "messageId" must be unique + // within each batch sent. + // + // MessageId is a required field + MessageId *string `locationName:"messageId" min:"1" type:"string" required:"true"` + + // The payload of the message. This may be a JSON string or a Base64-encoded + // string representing binary data (in which case you must decode it by means + // of a pipeline activity). + // + // Payload is automatically base64 encoded/decoded by the SDK. + // + // Payload is a required field + Payload []byte `locationName:"payload" type:"blob" required:"true"` +} + +// String returns the string representation +func (s Message) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Message) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Message) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Message"} + if s.MessageId == nil { + invalidParams.Add(request.NewErrParamRequired("MessageId")) + } + if s.MessageId != nil && len(*s.MessageId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MessageId", 1)) + } + if s.Payload == nil { + invalidParams.Add(request.NewErrParamRequired("Payload")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMessageId sets the MessageId field's value. +func (s *Message) SetMessageId(v string) *Message { + s.MessageId = &v + return s +} + +// SetPayload sets the Payload field's value. +func (s *Message) SetPayload(v []byte) *Message { + s.Payload = v + return s +} + +// The value of the variable as a structure that specifies an output file URI. +type OutputFileUriValue struct { + _ struct{} `type:"structure"` + + // The URI of the location where data set contents are stored, usually the URI + // of a file in an S3 bucket. + // + // FileName is a required field + FileName *string `locationName:"fileName" type:"string" required:"true"` +} + +// String returns the string representation +func (s OutputFileUriValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputFileUriValue) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputFileUriValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputFileUriValue"} + if s.FileName == nil { + invalidParams.Add(request.NewErrParamRequired("FileName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFileName sets the FileName field's value. +func (s *OutputFileUriValue) SetFileName(v string) *OutputFileUriValue { + s.FileName = &v + return s +} + +// Contains information about a pipeline. +type Pipeline struct { + _ struct{} `type:"structure"` + + // The activities that perform transformations on the messages. + Activities []*PipelineActivity `locationName:"activities" min:"1" type:"list"` + + // The ARN of the pipeline. + Arn *string `locationName:"arn" type:"string"` + + // When the pipeline was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The last time the pipeline was updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The name of the pipeline. + Name *string `locationName:"name" min:"1" type:"string"` + + // A summary of information about the pipeline reprocessing. + ReprocessingSummaries []*ReprocessingSummary `locationName:"reprocessingSummaries" type:"list"` +} + +// String returns the string representation +func (s Pipeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Pipeline) GoString() string { + return s.String() +} + +// SetActivities sets the Activities field's value. +func (s *Pipeline) SetActivities(v []*PipelineActivity) *Pipeline { + s.Activities = v + return s +} + +// SetArn sets the Arn field's value. +func (s *Pipeline) SetArn(v string) *Pipeline { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Pipeline) SetCreationTime(v time.Time) *Pipeline { + s.CreationTime = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *Pipeline) SetLastUpdateTime(v time.Time) *Pipeline { + s.LastUpdateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Pipeline) SetName(v string) *Pipeline { + s.Name = &v + return s +} + +// SetReprocessingSummaries sets the ReprocessingSummaries field's value. +func (s *Pipeline) SetReprocessingSummaries(v []*ReprocessingSummary) *Pipeline { + s.ReprocessingSummaries = v + return s +} + +// An activity that performs a transformation on a message. +type PipelineActivity struct { + _ struct{} `type:"structure"` + + // Adds other attributes based on existing attributes in the message. + AddAttributes *AddAttributesActivity `locationName:"addAttributes" type:"structure"` + + // Determines the source of the messages to be processed. + Channel *ChannelActivity `locationName:"channel" type:"structure"` + + // Specifies where to store the processed message data. + Datastore *DatastoreActivity `locationName:"datastore" type:"structure"` + + // Adds data from the AWS IoT device registry to your message. + DeviceRegistryEnrich *DeviceRegistryEnrichActivity `locationName:"deviceRegistryEnrich" type:"structure"` + + // Adds information from the AWS IoT Device Shadows service to a message. + DeviceShadowEnrich *DeviceShadowEnrichActivity `locationName:"deviceShadowEnrich" type:"structure"` + + // Filters a message based on its attributes. + Filter *FilterActivity `locationName:"filter" type:"structure"` + + // Runs a Lambda function to modify the message. + Lambda *LambdaActivity `locationName:"lambda" type:"structure"` + + // Computes an arithmetic expression using the message's attributes and adds + // it to the message. + Math *MathActivity `locationName:"math" type:"structure"` + + // Removes attributes from a message. + RemoveAttributes *RemoveAttributesActivity `locationName:"removeAttributes" type:"structure"` + + // Creates a new message using only the specified attributes from the original + // message. + SelectAttributes *SelectAttributesActivity `locationName:"selectAttributes" type:"structure"` +} + +// String returns the string representation +func (s PipelineActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PipelineActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PipelineActivity"} + if s.AddAttributes != nil { + if err := s.AddAttributes.Validate(); err != nil { + invalidParams.AddNested("AddAttributes", err.(request.ErrInvalidParams)) + } + } + if s.Channel != nil { + if err := s.Channel.Validate(); err != nil { + invalidParams.AddNested("Channel", err.(request.ErrInvalidParams)) + } + } + if s.Datastore != nil { + if err := s.Datastore.Validate(); err != nil { + invalidParams.AddNested("Datastore", err.(request.ErrInvalidParams)) + } + } + if s.DeviceRegistryEnrich != nil { + if err := s.DeviceRegistryEnrich.Validate(); err != nil { + invalidParams.AddNested("DeviceRegistryEnrich", err.(request.ErrInvalidParams)) + } + } + if s.DeviceShadowEnrich != nil { + if err := s.DeviceShadowEnrich.Validate(); err != nil { + invalidParams.AddNested("DeviceShadowEnrich", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Lambda != nil { + if err := s.Lambda.Validate(); err != nil { + invalidParams.AddNested("Lambda", err.(request.ErrInvalidParams)) + } + } + if s.Math != nil { + if err := s.Math.Validate(); err != nil { + invalidParams.AddNested("Math", err.(request.ErrInvalidParams)) + } + } + if s.RemoveAttributes != nil { + if err := s.RemoveAttributes.Validate(); err != nil { + invalidParams.AddNested("RemoveAttributes", err.(request.ErrInvalidParams)) + } + } + if s.SelectAttributes != nil { + if err := s.SelectAttributes.Validate(); err != nil { + invalidParams.AddNested("SelectAttributes", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddAttributes sets the AddAttributes field's value. +func (s *PipelineActivity) SetAddAttributes(v *AddAttributesActivity) *PipelineActivity { + s.AddAttributes = v + return s +} + +// SetChannel sets the Channel field's value. +func (s *PipelineActivity) SetChannel(v *ChannelActivity) *PipelineActivity { + s.Channel = v + return s +} + +// SetDatastore sets the Datastore field's value. +func (s *PipelineActivity) SetDatastore(v *DatastoreActivity) *PipelineActivity { + s.Datastore = v + return s +} + +// SetDeviceRegistryEnrich sets the DeviceRegistryEnrich field's value. +func (s *PipelineActivity) SetDeviceRegistryEnrich(v *DeviceRegistryEnrichActivity) *PipelineActivity { + s.DeviceRegistryEnrich = v + return s +} + +// SetDeviceShadowEnrich sets the DeviceShadowEnrich field's value. +func (s *PipelineActivity) SetDeviceShadowEnrich(v *DeviceShadowEnrichActivity) *PipelineActivity { + s.DeviceShadowEnrich = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *PipelineActivity) SetFilter(v *FilterActivity) *PipelineActivity { + s.Filter = v + return s +} + +// SetLambda sets the Lambda field's value. +func (s *PipelineActivity) SetLambda(v *LambdaActivity) *PipelineActivity { + s.Lambda = v + return s +} + +// SetMath sets the Math field's value. +func (s *PipelineActivity) SetMath(v *MathActivity) *PipelineActivity { + s.Math = v + return s +} + +// SetRemoveAttributes sets the RemoveAttributes field's value. +func (s *PipelineActivity) SetRemoveAttributes(v *RemoveAttributesActivity) *PipelineActivity { + s.RemoveAttributes = v + return s +} + +// SetSelectAttributes sets the SelectAttributes field's value. +func (s *PipelineActivity) SetSelectAttributes(v *SelectAttributesActivity) *PipelineActivity { + s.SelectAttributes = v + return s +} + +// A summary of information about a pipeline. +type PipelineSummary struct { + _ struct{} `type:"structure"` + + // When the pipeline was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // When the pipeline was last updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The name of the pipeline. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string"` + + // A summary of information about the pipeline reprocessing. + ReprocessingSummaries []*ReprocessingSummary `locationName:"reprocessingSummaries" type:"list"` +} + +// String returns the string representation +func (s PipelineSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *PipelineSummary) SetCreationTime(v time.Time) *PipelineSummary { + s.CreationTime = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *PipelineSummary) SetLastUpdateTime(v time.Time) *PipelineSummary { + s.LastUpdateTime = &v + return s +} + +// SetPipelineName sets the PipelineName field's value. +func (s *PipelineSummary) SetPipelineName(v string) *PipelineSummary { + s.PipelineName = &v + return s +} + +// SetReprocessingSummaries sets the ReprocessingSummaries field's value. +func (s *PipelineSummary) SetReprocessingSummaries(v []*ReprocessingSummary) *PipelineSummary { + s.ReprocessingSummaries = v + return s +} + +type PutLoggingOptionsInput struct { + _ struct{} `type:"structure"` + + // The new values of the AWS IoT Analytics logging options. + // + // LoggingOptions is a required field + LoggingOptions *LoggingOptions `locationName:"loggingOptions" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutLoggingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLoggingOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLoggingOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLoggingOptionsInput"} + if s.LoggingOptions == nil { + invalidParams.Add(request.NewErrParamRequired("LoggingOptions")) + } + if s.LoggingOptions != nil { + if err := s.LoggingOptions.Validate(); err != nil { + invalidParams.AddNested("LoggingOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingOptions sets the LoggingOptions field's value. +func (s *PutLoggingOptionsInput) SetLoggingOptions(v *LoggingOptions) *PutLoggingOptionsInput { + s.LoggingOptions = v + return s +} + +type PutLoggingOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutLoggingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLoggingOptionsOutput) GoString() string { + return s.String() +} + +// Information which is used to filter message data, to segregate it according +// to the time frame in which it arrives. +type QueryFilter struct { + _ struct{} `type:"structure"` + + // Used to limit data to that which has arrived since the last execution of + // the action. + DeltaTime *DeltaTime `locationName:"deltaTime" type:"structure"` +} + +// String returns the string representation +func (s QueryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueryFilter"} + if s.DeltaTime != nil { + if err := s.DeltaTime.Validate(); err != nil { + invalidParams.AddNested("DeltaTime", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeltaTime sets the DeltaTime field's value. +func (s *QueryFilter) SetDeltaTime(v *DeltaTime) *QueryFilter { + s.DeltaTime = v + return s +} + +// An activity that removes attributes from a message. +type RemoveAttributesActivity struct { + _ struct{} `type:"structure"` + + // A list of 1-50 attributes to remove from the message. + // + // Attributes is a required field + Attributes []*string `locationName:"attributes" min:"1" type:"list" required:"true"` + + // The name of the 'removeAttributes' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The next activity in the pipeline. + Next *string `locationName:"next" min:"1" type:"string"` +} + +// String returns the string representation +func (s RemoveAttributesActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveAttributesActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveAttributesActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveAttributesActivity"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Attributes != nil && len(s.Attributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Attributes", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Next != nil && len(*s.Next) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Next", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *RemoveAttributesActivity) SetAttributes(v []*string) *RemoveAttributesActivity { + s.Attributes = v + return s +} + +// SetName sets the Name field's value. +func (s *RemoveAttributesActivity) SetName(v string) *RemoveAttributesActivity { + s.Name = &v + return s +} + +// SetNext sets the Next field's value. +func (s *RemoveAttributesActivity) SetNext(v string) *RemoveAttributesActivity { + s.Next = &v + return s +} + +// Information about pipeline reprocessing. +type ReprocessingSummary struct { + _ struct{} `type:"structure"` + + // The time the pipeline reprocessing was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The 'reprocessingId' returned by "StartPipelineReprocessing". + Id *string `locationName:"id" type:"string"` + + // The status of the pipeline reprocessing. + Status *string `locationName:"status" type:"string" enum:"ReprocessingStatus"` +} + +// String returns the string representation +func (s ReprocessingSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReprocessingSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ReprocessingSummary) SetCreationTime(v time.Time) *ReprocessingSummary { + s.CreationTime = &v + return s +} + +// SetId sets the Id field's value. +func (s *ReprocessingSummary) SetId(v string) *ReprocessingSummary { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReprocessingSummary) SetStatus(v string) *ReprocessingSummary { + s.Status = &v + return s +} + +// The configuration of the resource used to execute the "containerAction". +type ResourceConfiguration struct { + _ struct{} `type:"structure"` + + // The type of the compute resource used to execute the "containerAction". Possible + // values are: ACU_1 (vCPU=4, memory=16GiB) or ACU_2 (vCPU=8, memory=32GiB). + // + // ComputeType is a required field + ComputeType *string `locationName:"computeType" type:"string" required:"true" enum:"ComputeType"` + + // The size (in GB) of the persistent storage available to the resource instance + // used to execute the "containerAction" (min: 1, max: 50). + // + // VolumeSizeInGB is a required field + VolumeSizeInGB *int64 `locationName:"volumeSizeInGB" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ResourceConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceConfiguration"} + if s.ComputeType == nil { + invalidParams.Add(request.NewErrParamRequired("ComputeType")) + } + if s.VolumeSizeInGB == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeSizeInGB")) + } + if s.VolumeSizeInGB != nil && *s.VolumeSizeInGB < 1 { + invalidParams.Add(request.NewErrParamMinValue("VolumeSizeInGB", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComputeType sets the ComputeType field's value. +func (s *ResourceConfiguration) SetComputeType(v string) *ResourceConfiguration { + s.ComputeType = &v + return s +} + +// SetVolumeSizeInGB sets the VolumeSizeInGB field's value. +func (s *ResourceConfiguration) SetVolumeSizeInGB(v int64) *ResourceConfiguration { + s.VolumeSizeInGB = &v + return s +} + +// How long, in days, message data is kept. +type RetentionPeriod struct { + _ struct{} `type:"structure"` + + // The number of days that message data is kept. The "unlimited" parameter must + // be false. + NumberOfDays *int64 `locationName:"numberOfDays" min:"1" type:"integer"` + + // If true, message data is kept indefinitely. + Unlimited *bool `locationName:"unlimited" type:"boolean"` +} + +// String returns the string representation +func (s RetentionPeriod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetentionPeriod) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetentionPeriod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetentionPeriod"} + if s.NumberOfDays != nil && *s.NumberOfDays < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumberOfDays", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNumberOfDays sets the NumberOfDays field's value. +func (s *RetentionPeriod) SetNumberOfDays(v int64) *RetentionPeriod { + s.NumberOfDays = &v + return s +} + +// SetUnlimited sets the Unlimited field's value. +func (s *RetentionPeriod) SetUnlimited(v bool) *RetentionPeriod { + s.Unlimited = &v + return s +} + +type RunPipelineActivityInput struct { + _ struct{} `type:"structure"` + + // The sample message payloads on which the pipeline activity is run. + // + // Payloads is a required field + Payloads [][]byte `locationName:"payloads" min:"1" type:"list" required:"true"` + + // The pipeline activity that is run. This must not be a 'channel' activity + // or a 'datastore' activity because these activities are used in a pipeline + // only to load the original message and to store the (possibly) transformed + // message. If a 'lambda' activity is specified, only short-running Lambda functions + // (those with a timeout of less than 30 seconds or less) can be used. + // + // PipelineActivity is a required field + PipelineActivity *PipelineActivity `locationName:"pipelineActivity" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RunPipelineActivityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunPipelineActivityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunPipelineActivityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunPipelineActivityInput"} + if s.Payloads == nil { + invalidParams.Add(request.NewErrParamRequired("Payloads")) + } + if s.Payloads != nil && len(s.Payloads) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Payloads", 1)) + } + if s.PipelineActivity == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineActivity")) + } + if s.PipelineActivity != nil { + if err := s.PipelineActivity.Validate(); err != nil { + invalidParams.AddNested("PipelineActivity", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayloads sets the Payloads field's value. +func (s *RunPipelineActivityInput) SetPayloads(v [][]byte) *RunPipelineActivityInput { + s.Payloads = v + return s +} + +// SetPipelineActivity sets the PipelineActivity field's value. +func (s *RunPipelineActivityInput) SetPipelineActivity(v *PipelineActivity) *RunPipelineActivityInput { + s.PipelineActivity = v + return s +} + +type RunPipelineActivityOutput struct { + _ struct{} `type:"structure"` + + // In case the pipeline activity fails, the log message that is generated. + LogResult *string `locationName:"logResult" type:"string"` + + // The enriched or transformed sample message payloads as base64-encoded strings. + // (The results of running the pipeline activity on each input sample message + // payload, encoded in base64.) + Payloads [][]byte `locationName:"payloads" min:"1" type:"list"` +} + +// String returns the string representation +func (s RunPipelineActivityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunPipelineActivityOutput) GoString() string { + return s.String() +} + +// SetLogResult sets the LogResult field's value. +func (s *RunPipelineActivityOutput) SetLogResult(v string) *RunPipelineActivityOutput { + s.LogResult = &v + return s +} + +// SetPayloads sets the Payloads field's value. +func (s *RunPipelineActivityOutput) SetPayloads(v [][]byte) *RunPipelineActivityOutput { + s.Payloads = v + return s +} + +// Configuration information for delivery of data set contents to Amazon S3. +type S3DestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket to which data set contents are delivered. + // + // Bucket is a required field + Bucket *string `locationName:"bucket" min:"3" type:"string" required:"true"` + + // Configuration information for coordination with the AWS Glue ETL (extract, + // transform and load) service. + GlueConfiguration *GlueConfiguration `locationName:"glueConfiguration" type:"structure"` + + // The key of the data set contents object. Each object in an Amazon S3 bucket + // has a key that is its unique identifier within the bucket (each object in + // a bucket has exactly one key). To produce a unique key, you can use "!{iotanalytics:scheduledTime}" + // to insert the time of the scheduled SQL query run, or "!{iotanalytics:versioned} + // to insert a unique hash identifying the data set, for example: "/DataSet/!{iotanalytics:scheduledTime}/!{iotanalytics:versioned}.csv". + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The ARN of the role which grants AWS IoT Analytics permission to interact + // with your Amazon S3 and AWS Glue resources. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3DestinationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3DestinationConfiguration"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.GlueConfiguration != nil { + if err := s.GlueConfiguration.Validate(); err != nil { + invalidParams.AddNested("GlueConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *S3DestinationConfiguration) SetBucket(v string) *S3DestinationConfiguration { + s.Bucket = &v + return s +} + +// SetGlueConfiguration sets the GlueConfiguration field's value. +func (s *S3DestinationConfiguration) SetGlueConfiguration(v *GlueConfiguration) *S3DestinationConfiguration { + s.GlueConfiguration = v + return s +} + +// SetKey sets the Key field's value. +func (s *S3DestinationConfiguration) SetKey(v string) *S3DestinationConfiguration { + s.Key = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *S3DestinationConfiguration) SetRoleArn(v string) *S3DestinationConfiguration { + s.RoleArn = &v + return s +} + +type SampleChannelDataInput struct { + _ struct{} `type:"structure"` + + // The name of the channel whose message samples are retrieved. + // + // ChannelName is a required field + ChannelName *string `location:"uri" locationName:"channelName" min:"1" type:"string" required:"true"` + + // The end of the time window from which sample messages are retrieved. + EndTime *time.Time `location:"querystring" locationName:"endTime" type:"timestamp"` + + // The number of sample messages to be retrieved. The limit is 10, the default + // is also 10. + MaxMessages *int64 `location:"querystring" locationName:"maxMessages" min:"1" type:"integer"` + + // The start of the time window from which sample messages are retrieved. + StartTime *time.Time `location:"querystring" locationName:"startTime" type:"timestamp"` +} + +// String returns the string representation +func (s SampleChannelDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SampleChannelDataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SampleChannelDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SampleChannelDataInput"} + if s.ChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelName")) + } + if s.ChannelName != nil && len(*s.ChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) + } + if s.MaxMessages != nil && *s.MaxMessages < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxMessages", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelName sets the ChannelName field's value. +func (s *SampleChannelDataInput) SetChannelName(v string) *SampleChannelDataInput { + s.ChannelName = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *SampleChannelDataInput) SetEndTime(v time.Time) *SampleChannelDataInput { + s.EndTime = &v + return s +} + +// SetMaxMessages sets the MaxMessages field's value. +func (s *SampleChannelDataInput) SetMaxMessages(v int64) *SampleChannelDataInput { + s.MaxMessages = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *SampleChannelDataInput) SetStartTime(v time.Time) *SampleChannelDataInput { + s.StartTime = &v + return s +} + +type SampleChannelDataOutput struct { + _ struct{} `type:"structure"` + + // The list of message samples. Each sample message is returned as a base64-encoded + // string. + Payloads [][]byte `locationName:"payloads" min:"1" type:"list"` +} + +// String returns the string representation +func (s SampleChannelDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SampleChannelDataOutput) GoString() string { + return s.String() +} + +// SetPayloads sets the Payloads field's value. +func (s *SampleChannelDataOutput) SetPayloads(v [][]byte) *SampleChannelDataOutput { + s.Payloads = v + return s +} + +// The schedule for when to trigger an update. +type Schedule struct { + _ struct{} `type:"structure"` + + // The expression that defines when to trigger an update. For more information, + // see Schedule Expressions for Rules (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html) + // in the Amazon CloudWatch Events User Guide. + Expression *string `locationName:"expression" type:"string"` +} + +// String returns the string representation +func (s Schedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Schedule) GoString() string { + return s.String() +} + +// SetExpression sets the Expression field's value. +func (s *Schedule) SetExpression(v string) *Schedule { + s.Expression = &v + return s +} + +// Creates a new message using only the specified attributes from the original +// message. +type SelectAttributesActivity struct { + _ struct{} `type:"structure"` + + // A list of the attributes to select from the message. + // + // Attributes is a required field + Attributes []*string `locationName:"attributes" min:"1" type:"list" required:"true"` + + // The name of the 'selectAttributes' activity. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The next activity in the pipeline. + Next *string `locationName:"next" min:"1" type:"string"` +} + +// String returns the string representation +func (s SelectAttributesActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectAttributesActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectAttributesActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectAttributesActivity"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Attributes != nil && len(s.Attributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Attributes", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Next != nil && len(*s.Next) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Next", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *SelectAttributesActivity) SetAttributes(v []*string) *SelectAttributesActivity { + s.Attributes = v + return s +} + +// SetName sets the Name field's value. +func (s *SelectAttributesActivity) SetName(v string) *SelectAttributesActivity { + s.Name = &v + return s +} + +// SetNext sets the Next field's value. +func (s *SelectAttributesActivity) SetNext(v string) *SelectAttributesActivity { + s.Next = &v + return s +} + +// Use this to store channel data in an S3 bucket managed by the AWS IoT Analytics +// service. The choice of service-managed or customer-managed S3 storage cannot +// be changed after creation of the channel. +type ServiceManagedChannelS3Storage struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ServiceManagedChannelS3Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceManagedChannelS3Storage) GoString() string { + return s.String() +} + +// Used to store channel data in an S3 bucket managed by the AWS IoT Analytics +// service. +type ServiceManagedChannelS3StorageSummary struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ServiceManagedChannelS3StorageSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceManagedChannelS3StorageSummary) GoString() string { + return s.String() +} + +// Use this to store data store data in an S3 bucket managed by the AWS IoT +// Analytics service. The choice of service-managed or customer-managed S3 storage +// cannot be changed after creation of the data store. +type ServiceManagedDatastoreS3Storage struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ServiceManagedDatastoreS3Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceManagedDatastoreS3Storage) GoString() string { + return s.String() +} + +// Used to store data store data in an S3 bucket managed by the AWS IoT Analytics +// service. +type ServiceManagedDatastoreS3StorageSummary struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ServiceManagedDatastoreS3StorageSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceManagedDatastoreS3StorageSummary) GoString() string { + return s.String() +} + +// The SQL query to modify the message. +type SqlQueryDatasetAction struct { + _ struct{} `type:"structure"` + + // Pre-filters applied to message data. + Filters []*QueryFilter `locationName:"filters" type:"list"` + + // A SQL query string. + // + // SqlQuery is a required field + SqlQuery *string `locationName:"sqlQuery" type:"string" required:"true"` +} + +// String returns the string representation +func (s SqlQueryDatasetAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlQueryDatasetAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SqlQueryDatasetAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SqlQueryDatasetAction"} + if s.SqlQuery == nil { + invalidParams.Add(request.NewErrParamRequired("SqlQuery")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *SqlQueryDatasetAction) SetFilters(v []*QueryFilter) *SqlQueryDatasetAction { + s.Filters = v + return s +} + +// SetSqlQuery sets the SqlQuery field's value. +func (s *SqlQueryDatasetAction) SetSqlQuery(v string) *SqlQueryDatasetAction { + s.SqlQuery = &v + return s +} + +type StartPipelineReprocessingInput struct { + _ struct{} `type:"structure"` + + // The end time (exclusive) of raw message data that is reprocessed. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // The name of the pipeline on which to start reprocessing. + // + // PipelineName is a required field + PipelineName *string `location:"uri" locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // The start time (inclusive) of raw message data that is reprocessed. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` +} + +// String returns the string representation +func (s StartPipelineReprocessingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartPipelineReprocessingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartPipelineReprocessingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartPipelineReprocessingInput"} + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *StartPipelineReprocessingInput) SetEndTime(v time.Time) *StartPipelineReprocessingInput { + s.EndTime = &v + return s +} + +// SetPipelineName sets the PipelineName field's value. +func (s *StartPipelineReprocessingInput) SetPipelineName(v string) *StartPipelineReprocessingInput { + s.PipelineName = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *StartPipelineReprocessingInput) SetStartTime(v time.Time) *StartPipelineReprocessingInput { + s.StartTime = &v + return s +} + +type StartPipelineReprocessingOutput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline reprocessing activity that was started. + ReprocessingId *string `locationName:"reprocessingId" type:"string"` +} + +// String returns the string representation +func (s StartPipelineReprocessingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartPipelineReprocessingOutput) GoString() string { + return s.String() +} + +// SetReprocessingId sets the ReprocessingId field's value. +func (s *StartPipelineReprocessingOutput) SetReprocessingId(v string) *StartPipelineReprocessingOutput { + s.ReprocessingId = &v + return s +} + +// A set of key/value pairs which are used to manage the resource. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag's key. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The tag's value. + // + // Value is a required field + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource whose tags you want to modify. + // + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"resourceArn" min:"20" type:"string" required:"true"` + + // The new or modified tags for the resource. + // + // Tags is a required field + Tags []*Tag `locationName:"tags" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// Information about the data set whose content generation triggers the new +// data set content generation. +type TriggeringDataset struct { + _ struct{} `type:"structure"` + + // The name of the data set whose content generation triggers the new data set + // content generation. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TriggeringDataset) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TriggeringDataset) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TriggeringDataset) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TriggeringDataset"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *TriggeringDataset) SetName(v string) *TriggeringDataset { + s.Name = &v + return s +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource whose tags you want to remove. + // + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"resourceArn" min:"20" type:"string" required:"true"` + + // The keys of those tags which you want to remove. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateChannelInput struct { + _ struct{} `type:"structure"` + + // The name of the channel to be updated. + // + // ChannelName is a required field + ChannelName *string `location:"uri" locationName:"channelName" min:"1" type:"string" required:"true"` + + // Where channel data is stored. You may choose one of "serviceManagedS3" or + // "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". + // This cannot be changed after creation of the channel. + ChannelStorage *ChannelStorage `locationName:"channelStorage" type:"structure"` + + // How long, in days, message data is kept for the channel. The retention period + // cannot be updated if the channel's S3 storage is customer-managed. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` +} + +// String returns the string representation +func (s UpdateChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateChannelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateChannelInput"} + if s.ChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelName")) + } + if s.ChannelName != nil && len(*s.ChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) + } + if s.ChannelStorage != nil { + if err := s.ChannelStorage.Validate(); err != nil { + invalidParams.AddNested("ChannelStorage", err.(request.ErrInvalidParams)) + } + } + if s.RetentionPeriod != nil { + if err := s.RetentionPeriod.Validate(); err != nil { + invalidParams.AddNested("RetentionPeriod", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelName sets the ChannelName field's value. +func (s *UpdateChannelInput) SetChannelName(v string) *UpdateChannelInput { + s.ChannelName = &v + return s +} + +// SetChannelStorage sets the ChannelStorage field's value. +func (s *UpdateChannelInput) SetChannelStorage(v *ChannelStorage) *UpdateChannelInput { + s.ChannelStorage = v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *UpdateChannelInput) SetRetentionPeriod(v *RetentionPeriod) *UpdateChannelInput { + s.RetentionPeriod = v + return s +} + +type UpdateChannelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateChannelOutput) GoString() string { + return s.String() +} + +type UpdateDatasetInput struct { + _ struct{} `type:"structure"` + + // A list of "DatasetAction" objects. + // + // Actions is a required field + Actions []*DatasetAction `locationName:"actions" min:"1" type:"list" required:"true"` + + // When data set contents are created they are delivered to destinations specified + // here. + ContentDeliveryRules []*DatasetContentDeliveryRule `locationName:"contentDeliveryRules" type:"list"` + + // The name of the data set to update. + // + // DatasetName is a required field + DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` + + // How long, in days, data set contents are kept for the data set. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` + + // A list of "DatasetTrigger" objects. The list can be empty or can contain + // up to five DataSetTrigger objects. + Triggers []*DatasetTrigger `locationName:"triggers" type:"list"` + + // [Optional] How many versions of data set contents are kept. If not specified + // or set to null, only the latest version plus the latest succeeded version + // (if they are different) are kept for the time period specified by the "retentionPeriod" + // parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + VersioningConfiguration *VersioningConfiguration `locationName:"versioningConfiguration" type:"structure"` +} + +// String returns the string representation +func (s UpdateDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDatasetInput"} + if s.Actions == nil { + invalidParams.Add(request.NewErrParamRequired("Actions")) + } + if s.Actions != nil && len(s.Actions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Actions", 1)) + } + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ContentDeliveryRules != nil { + for i, v := range s.ContentDeliveryRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContentDeliveryRules", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RetentionPeriod != nil { + if err := s.RetentionPeriod.Validate(); err != nil { + invalidParams.AddNested("RetentionPeriod", err.(request.ErrInvalidParams)) + } + } + if s.Triggers != nil { + for i, v := range s.Triggers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Triggers", i), err.(request.ErrInvalidParams)) + } + } + } + if s.VersioningConfiguration != nil { + if err := s.VersioningConfiguration.Validate(); err != nil { + invalidParams.AddNested("VersioningConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActions sets the Actions field's value. +func (s *UpdateDatasetInput) SetActions(v []*DatasetAction) *UpdateDatasetInput { + s.Actions = v + return s +} + +// SetContentDeliveryRules sets the ContentDeliveryRules field's value. +func (s *UpdateDatasetInput) SetContentDeliveryRules(v []*DatasetContentDeliveryRule) *UpdateDatasetInput { + s.ContentDeliveryRules = v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *UpdateDatasetInput) SetDatasetName(v string) *UpdateDatasetInput { + s.DatasetName = &v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *UpdateDatasetInput) SetRetentionPeriod(v *RetentionPeriod) *UpdateDatasetInput { + s.RetentionPeriod = v + return s +} + +// SetTriggers sets the Triggers field's value. +func (s *UpdateDatasetInput) SetTriggers(v []*DatasetTrigger) *UpdateDatasetInput { + s.Triggers = v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *UpdateDatasetInput) SetVersioningConfiguration(v *VersioningConfiguration) *UpdateDatasetInput { + s.VersioningConfiguration = v + return s +} + +type UpdateDatasetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDatasetOutput) GoString() string { + return s.String() +} + +type UpdateDatastoreInput struct { + _ struct{} `type:"structure"` + + // The name of the data store to be updated. + // + // DatastoreName is a required field + DatastoreName *string `location:"uri" locationName:"datastoreName" min:"1" type:"string" required:"true"` + + // Where data store data is stored. You may choose one of "serviceManagedS3" + // or "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". + // This cannot be changed after the data store is created. + DatastoreStorage *DatastoreStorage `locationName:"datastoreStorage" type:"structure"` + + // How long, in days, message data is kept for the data store. The retention + // period cannot be updated if the data store's S3 storage is customer-managed. + RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` +} + +// String returns the string representation +func (s UpdateDatastoreInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDatastoreInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDatastoreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDatastoreInput"} + if s.DatastoreName == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreName")) + } + if s.DatastoreName != nil && len(*s.DatastoreName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreName", 1)) + } + if s.DatastoreStorage != nil { + if err := s.DatastoreStorage.Validate(); err != nil { + invalidParams.AddNested("DatastoreStorage", err.(request.ErrInvalidParams)) + } + } + if s.RetentionPeriod != nil { + if err := s.RetentionPeriod.Validate(); err != nil { + invalidParams.AddNested("RetentionPeriod", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatastoreName sets the DatastoreName field's value. +func (s *UpdateDatastoreInput) SetDatastoreName(v string) *UpdateDatastoreInput { + s.DatastoreName = &v + return s +} + +// SetDatastoreStorage sets the DatastoreStorage field's value. +func (s *UpdateDatastoreInput) SetDatastoreStorage(v *DatastoreStorage) *UpdateDatastoreInput { + s.DatastoreStorage = v + return s +} + +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *UpdateDatastoreInput) SetRetentionPeriod(v *RetentionPeriod) *UpdateDatastoreInput { + s.RetentionPeriod = v + return s +} + +type UpdateDatastoreOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDatastoreOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDatastoreOutput) GoString() string { + return s.String() +} + +type UpdatePipelineInput struct { + _ struct{} `type:"structure"` + + // A list of "PipelineActivity" objects. Activities perform transformations + // on your messages, such as removing, renaming or adding message attributes; + // filtering messages based on attribute values; invoking your Lambda functions + // on messages for advanced processing; or performing mathematical transformations + // to normalize device data. + // + // The list can be 2-25 PipelineActivity objects and must contain both a channel + // and a datastore activity. Each entry in the list must contain only one activity, + // for example: + // + // pipelineActivities = [ { "channel": { ... } }, { "lambda": { ... } }, ... + // ] + // + // PipelineActivities is a required field + PipelineActivities []*PipelineActivity `locationName:"pipelineActivities" min:"1" type:"list" required:"true"` + + // The name of the pipeline to update. + // + // PipelineName is a required field + PipelineName *string `location:"uri" locationName:"pipelineName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdatePipelineInput"} + if s.PipelineActivities == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineActivities")) + } + if s.PipelineActivities != nil && len(s.PipelineActivities) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineActivities", 1)) + } + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + if s.PipelineActivities != nil { + for i, v := range s.PipelineActivities { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PipelineActivities", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPipelineActivities sets the PipelineActivities field's value. +func (s *UpdatePipelineInput) SetPipelineActivities(v []*PipelineActivity) *UpdatePipelineInput { + s.PipelineActivities = v + return s +} + +// SetPipelineName sets the PipelineName field's value. +func (s *UpdatePipelineInput) SetPipelineName(v string) *UpdatePipelineInput { + s.PipelineName = &v + return s +} + +type UpdatePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineOutput) GoString() string { + return s.String() +} + +// An instance of a variable to be passed to the "containerAction" execution. +// Each variable must have a name and a value given by one of "stringValue", +// "datasetContentVersionValue", or "outputFileUriValue". +type Variable struct { + _ struct{} `type:"structure"` + + // The value of the variable as a structure that specifies a data set content + // version. + DatasetContentVersionValue *DatasetContentVersionValue `locationName:"datasetContentVersionValue" type:"structure"` + + // The value of the variable as a double (numeric). + DoubleValue *float64 `locationName:"doubleValue" type:"double"` + + // The name of the variable. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The value of the variable as a structure that specifies an output file URI. + OutputFileUriValue *OutputFileUriValue `locationName:"outputFileUriValue" type:"structure"` + + // The value of the variable as a string. + StringValue *string `locationName:"stringValue" type:"string"` +} + +// String returns the string representation +func (s Variable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Variable) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Variable) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Variable"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.DatasetContentVersionValue != nil { + if err := s.DatasetContentVersionValue.Validate(); err != nil { + invalidParams.AddNested("DatasetContentVersionValue", err.(request.ErrInvalidParams)) + } + } + if s.OutputFileUriValue != nil { + if err := s.OutputFileUriValue.Validate(); err != nil { + invalidParams.AddNested("OutputFileUriValue", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetContentVersionValue sets the DatasetContentVersionValue field's value. +func (s *Variable) SetDatasetContentVersionValue(v *DatasetContentVersionValue) *Variable { + s.DatasetContentVersionValue = v + return s +} + +// SetDoubleValue sets the DoubleValue field's value. +func (s *Variable) SetDoubleValue(v float64) *Variable { + s.DoubleValue = &v + return s +} + +// SetName sets the Name field's value. +func (s *Variable) SetName(v string) *Variable { + s.Name = &v + return s +} + +// SetOutputFileUriValue sets the OutputFileUriValue field's value. +func (s *Variable) SetOutputFileUriValue(v *OutputFileUriValue) *Variable { + s.OutputFileUriValue = v + return s +} + +// SetStringValue sets the StringValue field's value. +func (s *Variable) SetStringValue(v string) *Variable { + s.StringValue = &v + return s +} + +// Information about the versioning of data set contents. +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // How many versions of data set contents will be kept. The "unlimited" parameter + // must be false. + MaxVersions *int64 `locationName:"maxVersions" min:"1" type:"integer"` + + // If true, unlimited versions of data set contents will be kept. + Unlimited *bool `locationName:"unlimited" type:"boolean"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VersioningConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VersioningConfiguration"} + if s.MaxVersions != nil && *s.MaxVersions < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxVersions", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxVersions sets the MaxVersions field's value. +func (s *VersioningConfiguration) SetMaxVersions(v int64) *VersioningConfiguration { + s.MaxVersions = &v + return s +} + +// SetUnlimited sets the Unlimited field's value. +func (s *VersioningConfiguration) SetUnlimited(v bool) *VersioningConfiguration { + s.Unlimited = &v + return s +} + +const ( + // ChannelStatusCreating is a ChannelStatus enum value + ChannelStatusCreating = "CREATING" + + // ChannelStatusActive is a ChannelStatus enum value + ChannelStatusActive = "ACTIVE" + + // ChannelStatusDeleting is a ChannelStatus enum value + ChannelStatusDeleting = "DELETING" +) + +const ( + // ComputeTypeAcu1 is a ComputeType enum value + ComputeTypeAcu1 = "ACU_1" + + // ComputeTypeAcu2 is a ComputeType enum value + ComputeTypeAcu2 = "ACU_2" +) + +const ( + // DatasetActionTypeQuery is a DatasetActionType enum value + DatasetActionTypeQuery = "QUERY" + + // DatasetActionTypeContainer is a DatasetActionType enum value + DatasetActionTypeContainer = "CONTAINER" +) + +const ( + // DatasetContentStateCreating is a DatasetContentState enum value + DatasetContentStateCreating = "CREATING" + + // DatasetContentStateSucceeded is a DatasetContentState enum value + DatasetContentStateSucceeded = "SUCCEEDED" + + // DatasetContentStateFailed is a DatasetContentState enum value + DatasetContentStateFailed = "FAILED" +) + +const ( + // DatasetStatusCreating is a DatasetStatus enum value + DatasetStatusCreating = "CREATING" + + // DatasetStatusActive is a DatasetStatus enum value + DatasetStatusActive = "ACTIVE" + + // DatasetStatusDeleting is a DatasetStatus enum value + DatasetStatusDeleting = "DELETING" +) + +const ( + // DatastoreStatusCreating is a DatastoreStatus enum value + DatastoreStatusCreating = "CREATING" + + // DatastoreStatusActive is a DatastoreStatus enum value + DatastoreStatusActive = "ACTIVE" + + // DatastoreStatusDeleting is a DatastoreStatus enum value + DatastoreStatusDeleting = "DELETING" +) + +const ( + // LoggingLevelError is a LoggingLevel enum value + LoggingLevelError = "ERROR" +) + +const ( + // ReprocessingStatusRunning is a ReprocessingStatus enum value + ReprocessingStatusRunning = "RUNNING" + + // ReprocessingStatusSucceeded is a ReprocessingStatus enum value + ReprocessingStatusSucceeded = "SUCCEEDED" + + // ReprocessingStatusCancelled is a ReprocessingStatus enum value + ReprocessingStatusCancelled = "CANCELLED" + + // ReprocessingStatusFailed is a ReprocessingStatus enum value + ReprocessingStatusFailed = "FAILED" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/doc.go new file mode 100644 index 00000000000..9ea1c7e8c98 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/doc.go @@ -0,0 +1,51 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package iotanalytics provides the client and types for making API +// requests to AWS IoT Analytics. +// +// AWS IoT Analytics allows you to collect large amounts of device data, process +// messages, and store them. You can then query the data and run sophisticated +// analytics on it. AWS IoT Analytics enables advanced data exploration through +// integration with Jupyter Notebooks and data visualization through integration +// with Amazon QuickSight. +// +// Traditional analytics and business intelligence tools are designed to process +// structured data. IoT data often comes from devices that record noisy processes +// (such as temperature, motion, or sound). As a result the data from these +// devices can have significant gaps, corrupted messages, and false readings +// that must be cleaned up before analysis can occur. Also, IoT data is often +// only meaningful in the context of other data from external sources. +// +// AWS IoT Analytics automates the steps required to analyze data from IoT devices. +// AWS IoT Analytics filters, transforms, and enriches IoT data before storing +// it in a time-series data store for analysis. You can set up the service to +// collect only the data you need from your devices, apply mathematical transforms +// to process the data, and enrich the data with device-specific metadata such +// as device type and location before storing it. Then, you can analyze your +// data by running queries using the built-in SQL query engine, or perform more +// complex analytics and machine learning inference. AWS IoT Analytics includes +// pre-built models for common IoT use cases so you can answer questions like +// which devices are about to fail or which customers are at risk of abandoning +// their wearable devices. +// +// See https://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27 for more information on this service. +// +// See iotanalytics package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/iotanalytics/ +// +// Using the Client +// +// To contact AWS IoT Analytics with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS IoT Analytics client IoTAnalytics for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/iotanalytics/#New +package iotanalytics diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/errors.go new file mode 100644 index 00000000000..790e802f3bf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/errors.go @@ -0,0 +1,48 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iotanalytics + +const ( + + // ErrCodeInternalFailureException for service response error code + // "InternalFailureException". + // + // There was an internal failure. + ErrCodeInternalFailureException = "InternalFailureException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // The request was not valid. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The command caused an internal limit to be exceeded. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // A resource with the same name already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // A resource with the specified name could not be found. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeServiceUnavailableException for service response error code + // "ServiceUnavailableException". + // + // The service is temporarily unavailable. + ErrCodeServiceUnavailableException = "ServiceUnavailableException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The request was denied due to request throttling. + ErrCodeThrottlingException = "ThrottlingException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go new file mode 100644 index 00000000000..97b51926b97 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go @@ -0,0 +1,99 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iotanalytics + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// IoTAnalytics provides the API operation methods for making requests to +// AWS IoT Analytics. See this package's package overview docs +// for details on the service. +// +// IoTAnalytics methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type IoTAnalytics struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "iotanalytics" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "IoTAnalytics" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the IoTAnalytics client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IoTAnalytics client from just a session. +// svc := iotanalytics.New(mySession) +// +// // Create a IoTAnalytics client with additional configuration +// svc := iotanalytics.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoTAnalytics { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "iotanalytics" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IoTAnalytics { + svc := &IoTAnalytics{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2017-11-27", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IoTAnalytics operation and runs any +// custom request initialization. +func (c *IoTAnalytics) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go new file mode 100644 index 00000000000..954cf1a17dc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go @@ -0,0 +1,4734 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iotevents + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opCreateDetectorModel = "CreateDetectorModel" + +// CreateDetectorModelRequest generates a "aws/request.Request" representing the +// client's request for the CreateDetectorModel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDetectorModel for more information on using the CreateDetectorModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDetectorModelRequest method. +// req, resp := client.CreateDetectorModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/CreateDetectorModel +func (c *IoTEvents) CreateDetectorModelRequest(input *CreateDetectorModelInput) (req *request.Request, output *CreateDetectorModelOutput) { + op := &request.Operation{ + Name: opCreateDetectorModel, + HTTPMethod: "POST", + HTTPPath: "/detector-models", + } + + if input == nil { + input = &CreateDetectorModelInput{} + } + + output = &CreateDetectorModelOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDetectorModel API operation for AWS IoT Events. +// +// Creates a detector model. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation CreateDetectorModel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is in use. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// A limit was exceeded. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/CreateDetectorModel +func (c *IoTEvents) CreateDetectorModel(input *CreateDetectorModelInput) (*CreateDetectorModelOutput, error) { + req, out := c.CreateDetectorModelRequest(input) + return out, req.Send() +} + +// CreateDetectorModelWithContext is the same as CreateDetectorModel with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDetectorModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) CreateDetectorModelWithContext(ctx aws.Context, input *CreateDetectorModelInput, opts ...request.Option) (*CreateDetectorModelOutput, error) { + req, out := c.CreateDetectorModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateInput = "CreateInput" + +// CreateInputRequest generates a "aws/request.Request" representing the +// client's request for the CreateInput operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateInput for more information on using the CreateInput +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateInputRequest method. +// req, resp := client.CreateInputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/CreateInput +func (c *IoTEvents) CreateInputRequest(input *CreateInputInput) (req *request.Request, output *CreateInputOutput) { + op := &request.Operation{ + Name: opCreateInput, + HTTPMethod: "POST", + HTTPPath: "/inputs", + } + + if input == nil { + input = &CreateInputInput{} + } + + output = &CreateInputOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateInput API operation for AWS IoT Events. +// +// Creates an input. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation CreateInput for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource already exists. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/CreateInput +func (c *IoTEvents) CreateInput(input *CreateInputInput) (*CreateInputOutput, error) { + req, out := c.CreateInputRequest(input) + return out, req.Send() +} + +// CreateInputWithContext is the same as CreateInput with the addition of +// the ability to pass a context and additional request options. +// +// See CreateInput for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) CreateInputWithContext(ctx aws.Context, input *CreateInputInput, opts ...request.Option) (*CreateInputOutput, error) { + req, out := c.CreateInputRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDetectorModel = "DeleteDetectorModel" + +// DeleteDetectorModelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDetectorModel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDetectorModel for more information on using the DeleteDetectorModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDetectorModelRequest method. +// req, resp := client.DeleteDetectorModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DeleteDetectorModel +func (c *IoTEvents) DeleteDetectorModelRequest(input *DeleteDetectorModelInput) (req *request.Request, output *DeleteDetectorModelOutput) { + op := &request.Operation{ + Name: opDeleteDetectorModel, + HTTPMethod: "DELETE", + HTTPPath: "/detector-models/{detectorModelName}", + } + + if input == nil { + input = &DeleteDetectorModelInput{} + } + + output = &DeleteDetectorModelOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDetectorModel API operation for AWS IoT Events. +// +// Deletes a detector model. Any active instances of the detector model are +// also deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation DeleteDetectorModel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DeleteDetectorModel +func (c *IoTEvents) DeleteDetectorModel(input *DeleteDetectorModelInput) (*DeleteDetectorModelOutput, error) { + req, out := c.DeleteDetectorModelRequest(input) + return out, req.Send() +} + +// DeleteDetectorModelWithContext is the same as DeleteDetectorModel with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDetectorModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) DeleteDetectorModelWithContext(ctx aws.Context, input *DeleteDetectorModelInput, opts ...request.Option) (*DeleteDetectorModelOutput, error) { + req, out := c.DeleteDetectorModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteInput = "DeleteInput" + +// DeleteInputRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInput operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInput for more information on using the DeleteInput +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteInputRequest method. +// req, resp := client.DeleteInputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DeleteInput +func (c *IoTEvents) DeleteInputRequest(input *DeleteInputInput) (req *request.Request, output *DeleteInputOutput) { + op := &request.Operation{ + Name: opDeleteInput, + HTTPMethod: "DELETE", + HTTPPath: "/inputs/{inputName}", + } + + if input == nil { + input = &DeleteInputInput{} + } + + output = &DeleteInputOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteInput API operation for AWS IoT Events. +// +// Deletes an input. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation DeleteInput for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DeleteInput +func (c *IoTEvents) DeleteInput(input *DeleteInputInput) (*DeleteInputOutput, error) { + req, out := c.DeleteInputRequest(input) + return out, req.Send() +} + +// DeleteInputWithContext is the same as DeleteInput with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInput for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) DeleteInputWithContext(ctx aws.Context, input *DeleteInputInput, opts ...request.Option) (*DeleteInputOutput, error) { + req, out := c.DeleteInputRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDetectorModel = "DescribeDetectorModel" + +// DescribeDetectorModelRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDetectorModel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDetectorModel for more information on using the DescribeDetectorModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDetectorModelRequest method. +// req, resp := client.DescribeDetectorModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DescribeDetectorModel +func (c *IoTEvents) DescribeDetectorModelRequest(input *DescribeDetectorModelInput) (req *request.Request, output *DescribeDetectorModelOutput) { + op := &request.Operation{ + Name: opDescribeDetectorModel, + HTTPMethod: "GET", + HTTPPath: "/detector-models/{detectorModelName}", + } + + if input == nil { + input = &DescribeDetectorModelInput{} + } + + output = &DescribeDetectorModelOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDetectorModel API operation for AWS IoT Events. +// +// Describes a detector model. If the "version" parameter is not specified, +// information about the latest version is returned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation DescribeDetectorModel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DescribeDetectorModel +func (c *IoTEvents) DescribeDetectorModel(input *DescribeDetectorModelInput) (*DescribeDetectorModelOutput, error) { + req, out := c.DescribeDetectorModelRequest(input) + return out, req.Send() +} + +// DescribeDetectorModelWithContext is the same as DescribeDetectorModel with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDetectorModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) DescribeDetectorModelWithContext(ctx aws.Context, input *DescribeDetectorModelInput, opts ...request.Option) (*DescribeDetectorModelOutput, error) { + req, out := c.DescribeDetectorModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeInput = "DescribeInput" + +// DescribeInputRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInput operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInput for more information on using the DescribeInput +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInputRequest method. +// req, resp := client.DescribeInputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DescribeInput +func (c *IoTEvents) DescribeInputRequest(input *DescribeInputInput) (req *request.Request, output *DescribeInputOutput) { + op := &request.Operation{ + Name: opDescribeInput, + HTTPMethod: "GET", + HTTPPath: "/inputs/{inputName}", + } + + if input == nil { + input = &DescribeInputInput{} + } + + output = &DescribeInputOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInput API operation for AWS IoT Events. +// +// Describes an input. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation DescribeInput for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DescribeInput +func (c *IoTEvents) DescribeInput(input *DescribeInputInput) (*DescribeInputOutput, error) { + req, out := c.DescribeInputRequest(input) + return out, req.Send() +} + +// DescribeInputWithContext is the same as DescribeInput with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInput for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) DescribeInputWithContext(ctx aws.Context, input *DescribeInputInput, opts ...request.Option) (*DescribeInputOutput, error) { + req, out := c.DescribeInputRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLoggingOptions = "DescribeLoggingOptions" + +// DescribeLoggingOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoggingOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLoggingOptions for more information on using the DescribeLoggingOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLoggingOptionsRequest method. +// req, resp := client.DescribeLoggingOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DescribeLoggingOptions +func (c *IoTEvents) DescribeLoggingOptionsRequest(input *DescribeLoggingOptionsInput) (req *request.Request, output *DescribeLoggingOptionsOutput) { + op := &request.Operation{ + Name: opDescribeLoggingOptions, + HTTPMethod: "GET", + HTTPPath: "/logging", + } + + if input == nil { + input = &DescribeLoggingOptionsInput{} + } + + output = &DescribeLoggingOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLoggingOptions API operation for AWS IoT Events. +// +// Retrieves the current settings of the AWS IoT Events logging options. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation DescribeLoggingOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The requested operation is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/DescribeLoggingOptions +func (c *IoTEvents) DescribeLoggingOptions(input *DescribeLoggingOptionsInput) (*DescribeLoggingOptionsOutput, error) { + req, out := c.DescribeLoggingOptionsRequest(input) + return out, req.Send() +} + +// DescribeLoggingOptionsWithContext is the same as DescribeLoggingOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoggingOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) DescribeLoggingOptionsWithContext(ctx aws.Context, input *DescribeLoggingOptionsInput, opts ...request.Option) (*DescribeLoggingOptionsOutput, error) { + req, out := c.DescribeLoggingOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDetectorModelVersions = "ListDetectorModelVersions" + +// ListDetectorModelVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListDetectorModelVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDetectorModelVersions for more information on using the ListDetectorModelVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDetectorModelVersionsRequest method. +// req, resp := client.ListDetectorModelVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/ListDetectorModelVersions +func (c *IoTEvents) ListDetectorModelVersionsRequest(input *ListDetectorModelVersionsInput) (req *request.Request, output *ListDetectorModelVersionsOutput) { + op := &request.Operation{ + Name: opListDetectorModelVersions, + HTTPMethod: "GET", + HTTPPath: "/detector-models/{detectorModelName}/versions", + } + + if input == nil { + input = &ListDetectorModelVersionsInput{} + } + + output = &ListDetectorModelVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDetectorModelVersions API operation for AWS IoT Events. +// +// Lists all the versions of a detector model. Only the metadata associated +// with each detector model version is returned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation ListDetectorModelVersions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/ListDetectorModelVersions +func (c *IoTEvents) ListDetectorModelVersions(input *ListDetectorModelVersionsInput) (*ListDetectorModelVersionsOutput, error) { + req, out := c.ListDetectorModelVersionsRequest(input) + return out, req.Send() +} + +// ListDetectorModelVersionsWithContext is the same as ListDetectorModelVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListDetectorModelVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) ListDetectorModelVersionsWithContext(ctx aws.Context, input *ListDetectorModelVersionsInput, opts ...request.Option) (*ListDetectorModelVersionsOutput, error) { + req, out := c.ListDetectorModelVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDetectorModels = "ListDetectorModels" + +// ListDetectorModelsRequest generates a "aws/request.Request" representing the +// client's request for the ListDetectorModels operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDetectorModels for more information on using the ListDetectorModels +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDetectorModelsRequest method. +// req, resp := client.ListDetectorModelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/ListDetectorModels +func (c *IoTEvents) ListDetectorModelsRequest(input *ListDetectorModelsInput) (req *request.Request, output *ListDetectorModelsOutput) { + op := &request.Operation{ + Name: opListDetectorModels, + HTTPMethod: "GET", + HTTPPath: "/detector-models", + } + + if input == nil { + input = &ListDetectorModelsInput{} + } + + output = &ListDetectorModelsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDetectorModels API operation for AWS IoT Events. +// +// Lists the detector models you have created. Only the metadata associated +// with each detector model is returned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation ListDetectorModels for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/ListDetectorModels +func (c *IoTEvents) ListDetectorModels(input *ListDetectorModelsInput) (*ListDetectorModelsOutput, error) { + req, out := c.ListDetectorModelsRequest(input) + return out, req.Send() +} + +// ListDetectorModelsWithContext is the same as ListDetectorModels with the addition of +// the ability to pass a context and additional request options. +// +// See ListDetectorModels for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) ListDetectorModelsWithContext(ctx aws.Context, input *ListDetectorModelsInput, opts ...request.Option) (*ListDetectorModelsOutput, error) { + req, out := c.ListDetectorModelsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListInputs = "ListInputs" + +// ListInputsRequest generates a "aws/request.Request" representing the +// client's request for the ListInputs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListInputs for more information on using the ListInputs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListInputsRequest method. +// req, resp := client.ListInputsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/ListInputs +func (c *IoTEvents) ListInputsRequest(input *ListInputsInput) (req *request.Request, output *ListInputsOutput) { + op := &request.Operation{ + Name: opListInputs, + HTTPMethod: "GET", + HTTPPath: "/inputs", + } + + if input == nil { + input = &ListInputsInput{} + } + + output = &ListInputsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListInputs API operation for AWS IoT Events. +// +// Lists the inputs you have created. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation ListInputs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/ListInputs +func (c *IoTEvents) ListInputs(input *ListInputsInput) (*ListInputsOutput, error) { + req, out := c.ListInputsRequest(input) + return out, req.Send() +} + +// ListInputsWithContext is the same as ListInputs with the addition of +// the ability to pass a context and additional request options. +// +// See ListInputs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) ListInputsWithContext(ctx aws.Context, input *ListInputsInput, opts ...request.Option) (*ListInputsOutput, error) { + req, out := c.ListInputsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/ListTagsForResource +func (c *IoTEvents) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS IoT Events. +// +// Lists the tags (metadata) you have assigned to the resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is in use. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/ListTagsForResource +func (c *IoTEvents) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutLoggingOptions = "PutLoggingOptions" + +// PutLoggingOptionsRequest generates a "aws/request.Request" representing the +// client's request for the PutLoggingOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutLoggingOptions for more information on using the PutLoggingOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutLoggingOptionsRequest method. +// req, resp := client.PutLoggingOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/PutLoggingOptions +func (c *IoTEvents) PutLoggingOptionsRequest(input *PutLoggingOptionsInput) (req *request.Request, output *PutLoggingOptionsOutput) { + op := &request.Operation{ + Name: opPutLoggingOptions, + HTTPMethod: "PUT", + HTTPPath: "/logging", + } + + if input == nil { + input = &PutLoggingOptionsInput{} + } + + output = &PutLoggingOptionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutLoggingOptions API operation for AWS IoT Events. +// +// Sets or updates the AWS IoT Events logging options. +// +// If you update the value of any "loggingOptions" field, it takes up to one +// minute for the change to take effect. Also, if you change the policy attached +// to the role you specified in the "roleArn" field (for example, to correct +// an invalid policy) it takes up to five minutes for that change to take effect. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation PutLoggingOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The requested operation is not supported. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/PutLoggingOptions +func (c *IoTEvents) PutLoggingOptions(input *PutLoggingOptionsInput) (*PutLoggingOptionsOutput, error) { + req, out := c.PutLoggingOptionsRequest(input) + return out, req.Send() +} + +// PutLoggingOptionsWithContext is the same as PutLoggingOptions with the addition of +// the ability to pass a context and additional request options. +// +// See PutLoggingOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) PutLoggingOptionsWithContext(ctx aws.Context, input *PutLoggingOptionsInput, opts ...request.Option) (*PutLoggingOptionsOutput, error) { + req, out := c.PutLoggingOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/TagResource +func (c *IoTEvents) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS IoT Events. +// +// Adds to or modifies the tags of the given resource. Tags are metadata that +// can be used to manage a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is in use. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// A limit was exceeded. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/TagResource +func (c *IoTEvents) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/UntagResource +func (c *IoTEvents) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS IoT Events. +// +// Removes the given tags (metadata) from the resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is in use. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/UntagResource +func (c *IoTEvents) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDetectorModel = "UpdateDetectorModel" + +// UpdateDetectorModelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDetectorModel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDetectorModel for more information on using the UpdateDetectorModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDetectorModelRequest method. +// req, resp := client.UpdateDetectorModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/UpdateDetectorModel +func (c *IoTEvents) UpdateDetectorModelRequest(input *UpdateDetectorModelInput) (req *request.Request, output *UpdateDetectorModelOutput) { + op := &request.Operation{ + Name: opUpdateDetectorModel, + HTTPMethod: "POST", + HTTPPath: "/detector-models/{detectorModelName}", + } + + if input == nil { + input = &UpdateDetectorModelInput{} + } + + output = &UpdateDetectorModelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDetectorModel API operation for AWS IoT Events. +// +// Updates a detector model. Detectors (instances) spawned by the previous version +// are deleted and then re-created as new inputs arrive. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation UpdateDetectorModel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/UpdateDetectorModel +func (c *IoTEvents) UpdateDetectorModel(input *UpdateDetectorModelInput) (*UpdateDetectorModelOutput, error) { + req, out := c.UpdateDetectorModelRequest(input) + return out, req.Send() +} + +// UpdateDetectorModelWithContext is the same as UpdateDetectorModel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDetectorModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) UpdateDetectorModelWithContext(ctx aws.Context, input *UpdateDetectorModelInput, opts ...request.Option) (*UpdateDetectorModelOutput, error) { + req, out := c.UpdateDetectorModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateInput = "UpdateInput" + +// UpdateInputRequest generates a "aws/request.Request" representing the +// client's request for the UpdateInput operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateInput for more information on using the UpdateInput +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateInputRequest method. +// req, resp := client.UpdateInputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/UpdateInput +func (c *IoTEvents) UpdateInputRequest(input *UpdateInputInput) (req *request.Request, output *UpdateInputOutput) { + op := &request.Operation{ + Name: opUpdateInput, + HTTPMethod: "PUT", + HTTPPath: "/inputs/{inputName}", + } + + if input == nil { + input = &UpdateInputInput{} + } + + output = &UpdateInputOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateInput API operation for AWS IoT Events. +// +// Updates an input. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT Events's +// API operation UpdateInput for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request was invalid. +// +// * ErrCodeThrottlingException "ThrottlingException" +// The request could not be completed due to throttling. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource was not found. +// +// * ErrCodeInternalFailureException "InternalFailureException" +// An internal failure occurred. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is currently unavailable. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/UpdateInput +func (c *IoTEvents) UpdateInput(input *UpdateInputInput) (*UpdateInputOutput, error) { + req, out := c.UpdateInputRequest(input) + return out, req.Send() +} + +// UpdateInputWithContext is the same as UpdateInput with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateInput for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTEvents) UpdateInputWithContext(ctx aws.Context, input *UpdateInputInput, opts ...request.Option) (*UpdateInputOutput, error) { + req, out := c.UpdateInputRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Sends an IoT Events input, passing in information about the detector model +// instance and the event which triggered the action. +type Action struct { + _ struct{} `type:"structure"` + + // The name of the AWS IoT Events input where the data is sent. + // + // InputName is a required field + InputName *string `locationName:"inputName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Action) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Action"} + if s.InputName == nil { + invalidParams.Add(request.NewErrParamRequired("InputName")) + } + if s.InputName != nil && len(*s.InputName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputName sets the InputName field's value. +func (s *Action) SetInputName(v string) *Action { + s.InputName = &v + return s +} + +// An action to be performed when the "condition" is TRUE. +type ActionData struct { + _ struct{} `type:"structure"` + + // Information needed to clear the timer. + ClearTimer *ClearTimerAction `locationName:"clearTimer" type:"structure"` + + // Sends information about the detector model instance and the event which triggered + // the action to a Kinesis Data Firehose delivery stream. + Firehose *FirehoseAction `locationName:"firehose" type:"structure"` + + // Sends an IoT Events input, passing in information about the detector model + // instance and the event which triggered the action. + IotEvents *Action `locationName:"iotEvents" type:"structure"` + + // Publishes an MQTT message with the given topic to the AWS IoT message broker. + IotTopicPublish *IotTopicPublishAction `locationName:"iotTopicPublish" type:"structure"` + + // Calls an AWS Lambda function, passing in information about the detector model + // instance and the event which triggered the action. + Lambda *LambdaAction `locationName:"lambda" type:"structure"` + + // Information needed to reset the timer. + ResetTimer *ResetTimerAction `locationName:"resetTimer" type:"structure"` + + // Information needed to set the timer. + SetTimer *SetTimerAction `locationName:"setTimer" type:"structure"` + + // Sets a variable to a specified value. + SetVariable *SetVariableAction `locationName:"setVariable" type:"structure"` + + // Sends an Amazon SNS message. + Sns *SNSTopicPublishAction `locationName:"sns" type:"structure"` + + // Sends information about the detector model instance and the event which triggered + // the action to an Amazon SQS queue. + Sqs *SqsAction `locationName:"sqs" type:"structure"` +} + +// String returns the string representation +func (s ActionData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActionData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActionData"} + if s.ClearTimer != nil { + if err := s.ClearTimer.Validate(); err != nil { + invalidParams.AddNested("ClearTimer", err.(request.ErrInvalidParams)) + } + } + if s.Firehose != nil { + if err := s.Firehose.Validate(); err != nil { + invalidParams.AddNested("Firehose", err.(request.ErrInvalidParams)) + } + } + if s.IotEvents != nil { + if err := s.IotEvents.Validate(); err != nil { + invalidParams.AddNested("IotEvents", err.(request.ErrInvalidParams)) + } + } + if s.IotTopicPublish != nil { + if err := s.IotTopicPublish.Validate(); err != nil { + invalidParams.AddNested("IotTopicPublish", err.(request.ErrInvalidParams)) + } + } + if s.Lambda != nil { + if err := s.Lambda.Validate(); err != nil { + invalidParams.AddNested("Lambda", err.(request.ErrInvalidParams)) + } + } + if s.ResetTimer != nil { + if err := s.ResetTimer.Validate(); err != nil { + invalidParams.AddNested("ResetTimer", err.(request.ErrInvalidParams)) + } + } + if s.SetTimer != nil { + if err := s.SetTimer.Validate(); err != nil { + invalidParams.AddNested("SetTimer", err.(request.ErrInvalidParams)) + } + } + if s.SetVariable != nil { + if err := s.SetVariable.Validate(); err != nil { + invalidParams.AddNested("SetVariable", err.(request.ErrInvalidParams)) + } + } + if s.Sns != nil { + if err := s.Sns.Validate(); err != nil { + invalidParams.AddNested("Sns", err.(request.ErrInvalidParams)) + } + } + if s.Sqs != nil { + if err := s.Sqs.Validate(); err != nil { + invalidParams.AddNested("Sqs", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClearTimer sets the ClearTimer field's value. +func (s *ActionData) SetClearTimer(v *ClearTimerAction) *ActionData { + s.ClearTimer = v + return s +} + +// SetFirehose sets the Firehose field's value. +func (s *ActionData) SetFirehose(v *FirehoseAction) *ActionData { + s.Firehose = v + return s +} + +// SetIotEvents sets the IotEvents field's value. +func (s *ActionData) SetIotEvents(v *Action) *ActionData { + s.IotEvents = v + return s +} + +// SetIotTopicPublish sets the IotTopicPublish field's value. +func (s *ActionData) SetIotTopicPublish(v *IotTopicPublishAction) *ActionData { + s.IotTopicPublish = v + return s +} + +// SetLambda sets the Lambda field's value. +func (s *ActionData) SetLambda(v *LambdaAction) *ActionData { + s.Lambda = v + return s +} + +// SetResetTimer sets the ResetTimer field's value. +func (s *ActionData) SetResetTimer(v *ResetTimerAction) *ActionData { + s.ResetTimer = v + return s +} + +// SetSetTimer sets the SetTimer field's value. +func (s *ActionData) SetSetTimer(v *SetTimerAction) *ActionData { + s.SetTimer = v + return s +} + +// SetSetVariable sets the SetVariable field's value. +func (s *ActionData) SetSetVariable(v *SetVariableAction) *ActionData { + s.SetVariable = v + return s +} + +// SetSns sets the Sns field's value. +func (s *ActionData) SetSns(v *SNSTopicPublishAction) *ActionData { + s.Sns = v + return s +} + +// SetSqs sets the Sqs field's value. +func (s *ActionData) SetSqs(v *SqsAction) *ActionData { + s.Sqs = v + return s +} + +// The attributes from the JSON payload that are made available by the input. +// Inputs are derived from messages sent to the AWS IoT Events system using +// BatchPutMessage. Each such message contains a JSON payload, and those attributes +// (and their paired values) specified here are available for use in the condition +// expressions used by detectors. +type Attribute struct { + _ struct{} `type:"structure"` + + // An expression that specifies an attribute-value pair in a JSON structure. + // Use this to specify an attribute from the JSON payload that is made available + // by the input. Inputs are derived from messages sent to the AWS IoT Events + // system (BatchPutMessage). Each such message contains a JSON payload, and + // the attribute (and its paired value) specified here are available for use + // in the "condition" expressions used by detectors. + // + // Syntax: .... + // + // JsonPath is a required field + JsonPath *string `locationName:"jsonPath" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Attribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Attribute"} + if s.JsonPath == nil { + invalidParams.Add(request.NewErrParamRequired("JsonPath")) + } + if s.JsonPath != nil && len(*s.JsonPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JsonPath", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJsonPath sets the JsonPath field's value. +func (s *Attribute) SetJsonPath(v string) *Attribute { + s.JsonPath = &v + return s +} + +// Information needed to clear the timer. +type ClearTimerAction struct { + _ struct{} `type:"structure"` + + // The name of the timer to clear. + // + // TimerName is a required field + TimerName *string `locationName:"timerName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ClearTimerAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClearTimerAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ClearTimerAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ClearTimerAction"} + if s.TimerName == nil { + invalidParams.Add(request.NewErrParamRequired("TimerName")) + } + if s.TimerName != nil && len(*s.TimerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TimerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTimerName sets the TimerName field's value. +func (s *ClearTimerAction) SetTimerName(v string) *ClearTimerAction { + s.TimerName = &v + return s +} + +type CreateDetectorModelInput struct { + _ struct{} `type:"structure"` + + // Information that defines how the detectors operate. + // + // DetectorModelDefinition is a required field + DetectorModelDefinition *DetectorModelDefinition `locationName:"detectorModelDefinition" type:"structure" required:"true"` + + // A brief description of the detector model. + DetectorModelDescription *string `locationName:"detectorModelDescription" type:"string"` + + // The name of the detector model. + // + // DetectorModelName is a required field + DetectorModelName *string `locationName:"detectorModelName" min:"1" type:"string" required:"true"` + + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + + // The input attribute key used to identify a device or system to create a detector + // (an instance of the detector model) and then to route each input received + // to the appropriate detector (instance). This parameter uses a JSON-path expression + // to specify the attribute-value pair in the message payload of each input + // that is used to identify the device associated with the input. + Key *string `locationName:"key" min:"1" type:"string"` + + // The ARN of the role that grants permission to AWS IoT Events to perform its + // operations. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` + + // Metadata that can be used to manage the detector model. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s CreateDetectorModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDetectorModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDetectorModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDetectorModelInput"} + if s.DetectorModelDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorModelDefinition")) + } + if s.DetectorModelName == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorModelName")) + } + if s.DetectorModelName != nil && len(*s.DetectorModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorModelName", 1)) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.DetectorModelDefinition != nil { + if err := s.DetectorModelDefinition.Validate(); err != nil { + invalidParams.AddNested("DetectorModelDefinition", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorModelDefinition sets the DetectorModelDefinition field's value. +func (s *CreateDetectorModelInput) SetDetectorModelDefinition(v *DetectorModelDefinition) *CreateDetectorModelInput { + s.DetectorModelDefinition = v + return s +} + +// SetDetectorModelDescription sets the DetectorModelDescription field's value. +func (s *CreateDetectorModelInput) SetDetectorModelDescription(v string) *CreateDetectorModelInput { + s.DetectorModelDescription = &v + return s +} + +// SetDetectorModelName sets the DetectorModelName field's value. +func (s *CreateDetectorModelInput) SetDetectorModelName(v string) *CreateDetectorModelInput { + s.DetectorModelName = &v + return s +} + +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *CreateDetectorModelInput) SetEvaluationMethod(v string) *CreateDetectorModelInput { + s.EvaluationMethod = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateDetectorModelInput) SetKey(v string) *CreateDetectorModelInput { + s.Key = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateDetectorModelInput) SetRoleArn(v string) *CreateDetectorModelInput { + s.RoleArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDetectorModelInput) SetTags(v []*Tag) *CreateDetectorModelInput { + s.Tags = v + return s +} + +type CreateDetectorModelOutput struct { + _ struct{} `type:"structure"` + + // Information about how the detector model is configured. + DetectorModelConfiguration *DetectorModelConfiguration `locationName:"detectorModelConfiguration" type:"structure"` +} + +// String returns the string representation +func (s CreateDetectorModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDetectorModelOutput) GoString() string { + return s.String() +} + +// SetDetectorModelConfiguration sets the DetectorModelConfiguration field's value. +func (s *CreateDetectorModelOutput) SetDetectorModelConfiguration(v *DetectorModelConfiguration) *CreateDetectorModelOutput { + s.DetectorModelConfiguration = v + return s +} + +type CreateInputInput struct { + _ struct{} `type:"structure"` + + // The definition of the input. + // + // InputDefinition is a required field + InputDefinition *InputDefinition `locationName:"inputDefinition" type:"structure" required:"true"` + + // A brief description of the input. + InputDescription *string `locationName:"inputDescription" type:"string"` + + // The name you want to give to the input. + // + // InputName is a required field + InputName *string `locationName:"inputName" min:"1" type:"string" required:"true"` + + // Metadata that can be used to manage the input. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s CreateInputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInputInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInputInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInputInput"} + if s.InputDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("InputDefinition")) + } + if s.InputName == nil { + invalidParams.Add(request.NewErrParamRequired("InputName")) + } + if s.InputName != nil && len(*s.InputName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputName", 1)) + } + if s.InputDefinition != nil { + if err := s.InputDefinition.Validate(); err != nil { + invalidParams.AddNested("InputDefinition", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputDefinition sets the InputDefinition field's value. +func (s *CreateInputInput) SetInputDefinition(v *InputDefinition) *CreateInputInput { + s.InputDefinition = v + return s +} + +// SetInputDescription sets the InputDescription field's value. +func (s *CreateInputInput) SetInputDescription(v string) *CreateInputInput { + s.InputDescription = &v + return s +} + +// SetInputName sets the InputName field's value. +func (s *CreateInputInput) SetInputName(v string) *CreateInputInput { + s.InputName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateInputInput) SetTags(v []*Tag) *CreateInputInput { + s.Tags = v + return s +} + +type CreateInputOutput struct { + _ struct{} `type:"structure"` + + // Information about the configuration of the input. + InputConfiguration *InputConfiguration `locationName:"inputConfiguration" type:"structure"` +} + +// String returns the string representation +func (s CreateInputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInputOutput) GoString() string { + return s.String() +} + +// SetInputConfiguration sets the InputConfiguration field's value. +func (s *CreateInputOutput) SetInputConfiguration(v *InputConfiguration) *CreateInputOutput { + s.InputConfiguration = v + return s +} + +type DeleteDetectorModelInput struct { + _ struct{} `type:"structure"` + + // The name of the detector model to be deleted. + // + // DetectorModelName is a required field + DetectorModelName *string `location:"uri" locationName:"detectorModelName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDetectorModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDetectorModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDetectorModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDetectorModelInput"} + if s.DetectorModelName == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorModelName")) + } + if s.DetectorModelName != nil && len(*s.DetectorModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorModelName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorModelName sets the DetectorModelName field's value. +func (s *DeleteDetectorModelInput) SetDetectorModelName(v string) *DeleteDetectorModelInput { + s.DetectorModelName = &v + return s +} + +type DeleteDetectorModelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDetectorModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDetectorModelOutput) GoString() string { + return s.String() +} + +type DeleteInputInput struct { + _ struct{} `type:"structure"` + + // The name of the input to delete. + // + // InputName is a required field + InputName *string `location:"uri" locationName:"inputName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInputInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInputInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInputInput"} + if s.InputName == nil { + invalidParams.Add(request.NewErrParamRequired("InputName")) + } + if s.InputName != nil && len(*s.InputName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputName sets the InputName field's value. +func (s *DeleteInputInput) SetInputName(v string) *DeleteInputInput { + s.InputName = &v + return s +} + +type DeleteInputOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInputOutput) GoString() string { + return s.String() +} + +type DescribeDetectorModelInput struct { + _ struct{} `type:"structure"` + + // The name of the detector model. + // + // DetectorModelName is a required field + DetectorModelName *string `location:"uri" locationName:"detectorModelName" min:"1" type:"string" required:"true"` + + // The version of the detector model. + DetectorModelVersion *string `location:"querystring" locationName:"version" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDetectorModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDetectorModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDetectorModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDetectorModelInput"} + if s.DetectorModelName == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorModelName")) + } + if s.DetectorModelName != nil && len(*s.DetectorModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorModelName", 1)) + } + if s.DetectorModelVersion != nil && len(*s.DetectorModelVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorModelVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorModelName sets the DetectorModelName field's value. +func (s *DescribeDetectorModelInput) SetDetectorModelName(v string) *DescribeDetectorModelInput { + s.DetectorModelName = &v + return s +} + +// SetDetectorModelVersion sets the DetectorModelVersion field's value. +func (s *DescribeDetectorModelInput) SetDetectorModelVersion(v string) *DescribeDetectorModelInput { + s.DetectorModelVersion = &v + return s +} + +type DescribeDetectorModelOutput struct { + _ struct{} `type:"structure"` + + // Information about the detector model. + DetectorModel *DetectorModel `locationName:"detectorModel" type:"structure"` +} + +// String returns the string representation +func (s DescribeDetectorModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDetectorModelOutput) GoString() string { + return s.String() +} + +// SetDetectorModel sets the DetectorModel field's value. +func (s *DescribeDetectorModelOutput) SetDetectorModel(v *DetectorModel) *DescribeDetectorModelOutput { + s.DetectorModel = v + return s +} + +type DescribeInputInput struct { + _ struct{} `type:"structure"` + + // The name of the input. + // + // InputName is a required field + InputName *string `location:"uri" locationName:"inputName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInputInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInputInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInputInput"} + if s.InputName == nil { + invalidParams.Add(request.NewErrParamRequired("InputName")) + } + if s.InputName != nil && len(*s.InputName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputName sets the InputName field's value. +func (s *DescribeInputInput) SetInputName(v string) *DescribeInputInput { + s.InputName = &v + return s +} + +type DescribeInputOutput struct { + _ struct{} `type:"structure"` + + // Information about the input. + Input *Input `locationName:"input" type:"structure"` +} + +// String returns the string representation +func (s DescribeInputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInputOutput) GoString() string { + return s.String() +} + +// SetInput sets the Input field's value. +func (s *DescribeInputOutput) SetInput(v *Input) *DescribeInputOutput { + s.Input = v + return s +} + +type DescribeLoggingOptionsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLoggingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoggingOptionsInput) GoString() string { + return s.String() +} + +type DescribeLoggingOptionsOutput struct { + _ struct{} `type:"structure"` + + // The current settings of the AWS IoT Events logging options. + LoggingOptions *LoggingOptions `locationName:"loggingOptions" type:"structure"` +} + +// String returns the string representation +func (s DescribeLoggingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoggingOptionsOutput) GoString() string { + return s.String() +} + +// SetLoggingOptions sets the LoggingOptions field's value. +func (s *DescribeLoggingOptionsOutput) SetLoggingOptions(v *LoggingOptions) *DescribeLoggingOptionsOutput { + s.LoggingOptions = v + return s +} + +// The detector model and the specific detectors (instances) for which the logging +// level is given. +type DetectorDebugOption struct { + _ struct{} `type:"structure"` + + // The name of the detector model. + // + // DetectorModelName is a required field + DetectorModelName *string `locationName:"detectorModelName" min:"1" type:"string" required:"true"` + + // The value of the input attribute key used to create the detector (the instance + // of the detector model). + KeyValue *string `locationName:"keyValue" min:"1" type:"string"` +} + +// String returns the string representation +func (s DetectorDebugOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetectorDebugOption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetectorDebugOption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetectorDebugOption"} + if s.DetectorModelName == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorModelName")) + } + if s.DetectorModelName != nil && len(*s.DetectorModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorModelName", 1)) + } + if s.KeyValue != nil && len(*s.KeyValue) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyValue", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorModelName sets the DetectorModelName field's value. +func (s *DetectorDebugOption) SetDetectorModelName(v string) *DetectorDebugOption { + s.DetectorModelName = &v + return s +} + +// SetKeyValue sets the KeyValue field's value. +func (s *DetectorDebugOption) SetKeyValue(v string) *DetectorDebugOption { + s.KeyValue = &v + return s +} + +// Information about the detector model. +type DetectorModel struct { + _ struct{} `type:"structure"` + + // Information about how the detector is configured. + DetectorModelConfiguration *DetectorModelConfiguration `locationName:"detectorModelConfiguration" type:"structure"` + + // Information that defines how a detector operates. + DetectorModelDefinition *DetectorModelDefinition `locationName:"detectorModelDefinition" type:"structure"` +} + +// String returns the string representation +func (s DetectorModel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetectorModel) GoString() string { + return s.String() +} + +// SetDetectorModelConfiguration sets the DetectorModelConfiguration field's value. +func (s *DetectorModel) SetDetectorModelConfiguration(v *DetectorModelConfiguration) *DetectorModel { + s.DetectorModelConfiguration = v + return s +} + +// SetDetectorModelDefinition sets the DetectorModelDefinition field's value. +func (s *DetectorModel) SetDetectorModelDefinition(v *DetectorModelDefinition) *DetectorModel { + s.DetectorModelDefinition = v + return s +} + +// Information about how the detector model is configured. +type DetectorModelConfiguration struct { + _ struct{} `type:"structure"` + + // The time the detector model was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The ARN of the detector model. + DetectorModelArn *string `locationName:"detectorModelArn" type:"string"` + + // A brief description of the detector model. + DetectorModelDescription *string `locationName:"detectorModelDescription" type:"string"` + + // The name of the detector model. + DetectorModelName *string `locationName:"detectorModelName" min:"1" type:"string"` + + // The version of the detector model. + DetectorModelVersion *string `locationName:"detectorModelVersion" min:"1" type:"string"` + + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + + // The input attribute key used to identify a device or system to create a detector + // (an instance of the detector model) and then to route each input received + // to the appropriate detector (instance). This parameter uses a JSON-path expression + // to specify the attribute-value pair in the message payload of each input + // that is used to identify the device associated with the input. + Key *string `locationName:"key" min:"1" type:"string"` + + // The time the detector model was last updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The ARN of the role that grants permission to AWS IoT Events to perform its + // operations. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` + + // The status of the detector model. + Status *string `locationName:"status" type:"string" enum:"DetectorModelVersionStatus"` +} + +// String returns the string representation +func (s DetectorModelConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetectorModelConfiguration) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DetectorModelConfiguration) SetCreationTime(v time.Time) *DetectorModelConfiguration { + s.CreationTime = &v + return s +} + +// SetDetectorModelArn sets the DetectorModelArn field's value. +func (s *DetectorModelConfiguration) SetDetectorModelArn(v string) *DetectorModelConfiguration { + s.DetectorModelArn = &v + return s +} + +// SetDetectorModelDescription sets the DetectorModelDescription field's value. +func (s *DetectorModelConfiguration) SetDetectorModelDescription(v string) *DetectorModelConfiguration { + s.DetectorModelDescription = &v + return s +} + +// SetDetectorModelName sets the DetectorModelName field's value. +func (s *DetectorModelConfiguration) SetDetectorModelName(v string) *DetectorModelConfiguration { + s.DetectorModelName = &v + return s +} + +// SetDetectorModelVersion sets the DetectorModelVersion field's value. +func (s *DetectorModelConfiguration) SetDetectorModelVersion(v string) *DetectorModelConfiguration { + s.DetectorModelVersion = &v + return s +} + +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *DetectorModelConfiguration) SetEvaluationMethod(v string) *DetectorModelConfiguration { + s.EvaluationMethod = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DetectorModelConfiguration) SetKey(v string) *DetectorModelConfiguration { + s.Key = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *DetectorModelConfiguration) SetLastUpdateTime(v time.Time) *DetectorModelConfiguration { + s.LastUpdateTime = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DetectorModelConfiguration) SetRoleArn(v string) *DetectorModelConfiguration { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DetectorModelConfiguration) SetStatus(v string) *DetectorModelConfiguration { + s.Status = &v + return s +} + +// Information that defines how a detector operates. +type DetectorModelDefinition struct { + _ struct{} `type:"structure"` + + // The state that is entered at the creation of each detector (instance). + // + // InitialStateName is a required field + InitialStateName *string `locationName:"initialStateName" min:"1" type:"string" required:"true"` + + // Information about the states of the detector. + // + // States is a required field + States []*State `locationName:"states" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DetectorModelDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetectorModelDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetectorModelDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetectorModelDefinition"} + if s.InitialStateName == nil { + invalidParams.Add(request.NewErrParamRequired("InitialStateName")) + } + if s.InitialStateName != nil && len(*s.InitialStateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InitialStateName", 1)) + } + if s.States == nil { + invalidParams.Add(request.NewErrParamRequired("States")) + } + if s.States != nil && len(s.States) < 1 { + invalidParams.Add(request.NewErrParamMinLen("States", 1)) + } + if s.States != nil { + for i, v := range s.States { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "States", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInitialStateName sets the InitialStateName field's value. +func (s *DetectorModelDefinition) SetInitialStateName(v string) *DetectorModelDefinition { + s.InitialStateName = &v + return s +} + +// SetStates sets the States field's value. +func (s *DetectorModelDefinition) SetStates(v []*State) *DetectorModelDefinition { + s.States = v + return s +} + +// Information about the detector model. +type DetectorModelSummary struct { + _ struct{} `type:"structure"` + + // The time the detector model was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // A brief description of the detector model. + DetectorModelDescription *string `locationName:"detectorModelDescription" type:"string"` + + // The name of the detector model. + DetectorModelName *string `locationName:"detectorModelName" min:"1" type:"string"` +} + +// String returns the string representation +func (s DetectorModelSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetectorModelSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DetectorModelSummary) SetCreationTime(v time.Time) *DetectorModelSummary { + s.CreationTime = &v + return s +} + +// SetDetectorModelDescription sets the DetectorModelDescription field's value. +func (s *DetectorModelSummary) SetDetectorModelDescription(v string) *DetectorModelSummary { + s.DetectorModelDescription = &v + return s +} + +// SetDetectorModelName sets the DetectorModelName field's value. +func (s *DetectorModelSummary) SetDetectorModelName(v string) *DetectorModelSummary { + s.DetectorModelName = &v + return s +} + +// Information about the detector model version. +type DetectorModelVersionSummary struct { + _ struct{} `type:"structure"` + + // The time the detector model version was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The ARN of the detector model version. + DetectorModelArn *string `locationName:"detectorModelArn" type:"string"` + + // The name of the detector model. + DetectorModelName *string `locationName:"detectorModelName" min:"1" type:"string"` + + // The ID of the detector model version. + DetectorModelVersion *string `locationName:"detectorModelVersion" min:"1" type:"string"` + + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + + // The last time the detector model version was updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The ARN of the role that grants the detector model permission to perform + // its tasks. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` + + // The status of the detector model version. + Status *string `locationName:"status" type:"string" enum:"DetectorModelVersionStatus"` +} + +// String returns the string representation +func (s DetectorModelVersionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetectorModelVersionSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DetectorModelVersionSummary) SetCreationTime(v time.Time) *DetectorModelVersionSummary { + s.CreationTime = &v + return s +} + +// SetDetectorModelArn sets the DetectorModelArn field's value. +func (s *DetectorModelVersionSummary) SetDetectorModelArn(v string) *DetectorModelVersionSummary { + s.DetectorModelArn = &v + return s +} + +// SetDetectorModelName sets the DetectorModelName field's value. +func (s *DetectorModelVersionSummary) SetDetectorModelName(v string) *DetectorModelVersionSummary { + s.DetectorModelName = &v + return s +} + +// SetDetectorModelVersion sets the DetectorModelVersion field's value. +func (s *DetectorModelVersionSummary) SetDetectorModelVersion(v string) *DetectorModelVersionSummary { + s.DetectorModelVersion = &v + return s +} + +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *DetectorModelVersionSummary) SetEvaluationMethod(v string) *DetectorModelVersionSummary { + s.EvaluationMethod = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *DetectorModelVersionSummary) SetLastUpdateTime(v time.Time) *DetectorModelVersionSummary { + s.LastUpdateTime = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DetectorModelVersionSummary) SetRoleArn(v string) *DetectorModelVersionSummary { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DetectorModelVersionSummary) SetStatus(v string) *DetectorModelVersionSummary { + s.Status = &v + return s +} + +// Specifies the "actions" to be performed when the "condition" evaluates to +// TRUE. +type Event struct { + _ struct{} `type:"structure"` + + // The actions to be performed. + Actions []*ActionData `locationName:"actions" type:"list"` + + // [Optional] The Boolean expression that when TRUE causes the "actions" to + // be performed. If not present, the actions are performed (=TRUE); if the expression + // result is not a Boolean value, the actions are NOT performed (=FALSE). + Condition *string `locationName:"condition" type:"string"` + + // The name of the event. + // + // EventName is a required field + EventName *string `locationName:"eventName" type:"string" required:"true"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Event) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Event"} + if s.EventName == nil { + invalidParams.Add(request.NewErrParamRequired("EventName")) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActions sets the Actions field's value. +func (s *Event) SetActions(v []*ActionData) *Event { + s.Actions = v + return s +} + +// SetCondition sets the Condition field's value. +func (s *Event) SetCondition(v string) *Event { + s.Condition = &v + return s +} + +// SetEventName sets the EventName field's value. +func (s *Event) SetEventName(v string) *Event { + s.EventName = &v + return s +} + +// Sends information about the detector model instance and the event which triggered +// the action to a Kinesis Data Firehose delivery stream. +type FirehoseAction struct { + _ struct{} `type:"structure"` + + // The name of the Kinesis Data Firehose delivery stream where the data is written. + // + // DeliveryStreamName is a required field + DeliveryStreamName *string `locationName:"deliveryStreamName" type:"string" required:"true"` + + // A character separator that is used to separate records written to the Kinesis + // Data Firehose delivery stream. Valid values are: '\n' (newline), '\t' (tab), + // '\r\n' (Windows newline), ',' (comma). + Separator *string `locationName:"separator" type:"string"` +} + +// String returns the string representation +func (s FirehoseAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FirehoseAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FirehoseAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FirehoseAction"} + if s.DeliveryStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryStreamName sets the DeliveryStreamName field's value. +func (s *FirehoseAction) SetDeliveryStreamName(v string) *FirehoseAction { + s.DeliveryStreamName = &v + return s +} + +// SetSeparator sets the Separator field's value. +func (s *FirehoseAction) SetSeparator(v string) *FirehoseAction { + s.Separator = &v + return s +} + +// Information about the input. +type Input struct { + _ struct{} `type:"structure"` + + // Information about the configuration of an input. + InputConfiguration *InputConfiguration `locationName:"inputConfiguration" type:"structure"` + + // The definition of the input. + InputDefinition *InputDefinition `locationName:"inputDefinition" type:"structure"` +} + +// String returns the string representation +func (s Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Input) GoString() string { + return s.String() +} + +// SetInputConfiguration sets the InputConfiguration field's value. +func (s *Input) SetInputConfiguration(v *InputConfiguration) *Input { + s.InputConfiguration = v + return s +} + +// SetInputDefinition sets the InputDefinition field's value. +func (s *Input) SetInputDefinition(v *InputDefinition) *Input { + s.InputDefinition = v + return s +} + +// Information about the configuration of an input. +type InputConfiguration struct { + _ struct{} `type:"structure"` + + // The time the input was created. + // + // CreationTime is a required field + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" required:"true"` + + // The ARN of the input. + // + // InputArn is a required field + InputArn *string `locationName:"inputArn" type:"string" required:"true"` + + // A brief description of the input. + InputDescription *string `locationName:"inputDescription" type:"string"` + + // The name of the input. + // + // InputName is a required field + InputName *string `locationName:"inputName" min:"1" type:"string" required:"true"` + + // The last time the input was updated. + // + // LastUpdateTime is a required field + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp" required:"true"` + + // The status of the input. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"InputStatus"` +} + +// String returns the string representation +func (s InputConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputConfiguration) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *InputConfiguration) SetCreationTime(v time.Time) *InputConfiguration { + s.CreationTime = &v + return s +} + +// SetInputArn sets the InputArn field's value. +func (s *InputConfiguration) SetInputArn(v string) *InputConfiguration { + s.InputArn = &v + return s +} + +// SetInputDescription sets the InputDescription field's value. +func (s *InputConfiguration) SetInputDescription(v string) *InputConfiguration { + s.InputDescription = &v + return s +} + +// SetInputName sets the InputName field's value. +func (s *InputConfiguration) SetInputName(v string) *InputConfiguration { + s.InputName = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *InputConfiguration) SetLastUpdateTime(v time.Time) *InputConfiguration { + s.LastUpdateTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InputConfiguration) SetStatus(v string) *InputConfiguration { + s.Status = &v + return s +} + +// The definition of the input. +type InputDefinition struct { + _ struct{} `type:"structure"` + + // The attributes from the JSON payload that are made available by the input. + // Inputs are derived from messages sent to the AWS IoT Events system using + // BatchPutMessage. Each such message contains a JSON payload, and those attributes + // (and their paired values) specified here are available for use in the "condition" + // expressions used by detectors that monitor this input. + // + // Attributes is a required field + Attributes []*Attribute `locationName:"attributes" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s InputDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputDefinition"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Attributes != nil && len(s.Attributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Attributes", 1)) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *InputDefinition) SetAttributes(v []*Attribute) *InputDefinition { + s.Attributes = v + return s +} + +// Information about the input. +type InputSummary struct { + _ struct{} `type:"structure"` + + // The time the input was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The ARN of the input. + InputArn *string `locationName:"inputArn" type:"string"` + + // A brief description of the input. + InputDescription *string `locationName:"inputDescription" type:"string"` + + // The name of the input. + InputName *string `locationName:"inputName" min:"1" type:"string"` + + // The last time the input was updated. + LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + + // The status of the input. + Status *string `locationName:"status" type:"string" enum:"InputStatus"` +} + +// String returns the string representation +func (s InputSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *InputSummary) SetCreationTime(v time.Time) *InputSummary { + s.CreationTime = &v + return s +} + +// SetInputArn sets the InputArn field's value. +func (s *InputSummary) SetInputArn(v string) *InputSummary { + s.InputArn = &v + return s +} + +// SetInputDescription sets the InputDescription field's value. +func (s *InputSummary) SetInputDescription(v string) *InputSummary { + s.InputDescription = &v + return s +} + +// SetInputName sets the InputName field's value. +func (s *InputSummary) SetInputName(v string) *InputSummary { + s.InputName = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *InputSummary) SetLastUpdateTime(v time.Time) *InputSummary { + s.LastUpdateTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InputSummary) SetStatus(v string) *InputSummary { + s.Status = &v + return s +} + +// Information required to publish the MQTT message via the AWS IoT message +// broker. +type IotTopicPublishAction struct { + _ struct{} `type:"structure"` + + // The MQTT topic of the message. + // + // MqttTopic is a required field + MqttTopic *string `locationName:"mqttTopic" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s IotTopicPublishAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IotTopicPublishAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IotTopicPublishAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IotTopicPublishAction"} + if s.MqttTopic == nil { + invalidParams.Add(request.NewErrParamRequired("MqttTopic")) + } + if s.MqttTopic != nil && len(*s.MqttTopic) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MqttTopic", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMqttTopic sets the MqttTopic field's value. +func (s *IotTopicPublishAction) SetMqttTopic(v string) *IotTopicPublishAction { + s.MqttTopic = &v + return s +} + +// Calls an AWS Lambda function, passing in information about the detector model +// instance and the event which triggered the action. +type LambdaAction struct { + _ struct{} `type:"structure"` + + // The ARN of the AWS Lambda function which is executed. + // + // FunctionArn is a required field + FunctionArn *string `locationName:"functionArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaAction"} + if s.FunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionArn")) + } + if s.FunctionArn != nil && len(*s.FunctionArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFunctionArn sets the FunctionArn field's value. +func (s *LambdaAction) SetFunctionArn(v string) *LambdaAction { + s.FunctionArn = &v + return s +} + +type ListDetectorModelVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of the detector model whose versions are returned. + // + // DetectorModelName is a required field + DetectorModelName *string `location:"uri" locationName:"detectorModelName" min:"1" type:"string" required:"true"` + + // The maximum number of results to return at one time. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDetectorModelVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDetectorModelVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDetectorModelVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDetectorModelVersionsInput"} + if s.DetectorModelName == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorModelName")) + } + if s.DetectorModelName != nil && len(*s.DetectorModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorModelName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorModelName sets the DetectorModelName field's value. +func (s *ListDetectorModelVersionsInput) SetDetectorModelName(v string) *ListDetectorModelVersionsInput { + s.DetectorModelName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDetectorModelVersionsInput) SetMaxResults(v int64) *ListDetectorModelVersionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDetectorModelVersionsInput) SetNextToken(v string) *ListDetectorModelVersionsInput { + s.NextToken = &v + return s +} + +type ListDetectorModelVersionsOutput struct { + _ struct{} `type:"structure"` + + // Summary information about the detector model versions. + DetectorModelVersionSummaries []*DetectorModelVersionSummary `locationName:"detectorModelVersionSummaries" type:"list"` + + // A token to retrieve the next set of results, or null if there are no additional + // results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDetectorModelVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDetectorModelVersionsOutput) GoString() string { + return s.String() +} + +// SetDetectorModelVersionSummaries sets the DetectorModelVersionSummaries field's value. +func (s *ListDetectorModelVersionsOutput) SetDetectorModelVersionSummaries(v []*DetectorModelVersionSummary) *ListDetectorModelVersionsOutput { + s.DetectorModelVersionSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDetectorModelVersionsOutput) SetNextToken(v string) *ListDetectorModelVersionsOutput { + s.NextToken = &v + return s +} + +type ListDetectorModelsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return at one time. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDetectorModelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDetectorModelsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDetectorModelsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDetectorModelsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDetectorModelsInput) SetMaxResults(v int64) *ListDetectorModelsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDetectorModelsInput) SetNextToken(v string) *ListDetectorModelsInput { + s.NextToken = &v + return s +} + +type ListDetectorModelsOutput struct { + _ struct{} `type:"structure"` + + // Summary information about the detector models. + DetectorModelSummaries []*DetectorModelSummary `locationName:"detectorModelSummaries" type:"list"` + + // A token to retrieve the next set of results, or null if there are no additional + // results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDetectorModelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDetectorModelsOutput) GoString() string { + return s.String() +} + +// SetDetectorModelSummaries sets the DetectorModelSummaries field's value. +func (s *ListDetectorModelsOutput) SetDetectorModelSummaries(v []*DetectorModelSummary) *ListDetectorModelsOutput { + s.DetectorModelSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDetectorModelsOutput) SetNextToken(v string) *ListDetectorModelsOutput { + s.NextToken = &v + return s +} + +type ListInputsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return at one time. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListInputsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInputsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInputsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInputsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListInputsInput) SetMaxResults(v int64) *ListInputsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListInputsInput) SetNextToken(v string) *ListInputsInput { + s.NextToken = &v + return s +} + +type ListInputsOutput struct { + _ struct{} `type:"structure"` + + // Summary information about the inputs. + InputSummaries []*InputSummary `locationName:"inputSummaries" type:"list"` + + // A token to retrieve the next set of results, or null if there are no additional + // results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListInputsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInputsOutput) GoString() string { + return s.String() +} + +// SetInputSummaries sets the InputSummaries field's value. +func (s *ListInputsOutput) SetInputSummaries(v []*InputSummary) *ListInputsOutput { + s.InputSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListInputsOutput) SetNextToken(v string) *ListInputsOutput { + s.NextToken = &v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"resourceArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The list of tags assigned to the resource. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// The values of the AWS IoT Events logging options. +type LoggingOptions struct { + _ struct{} `type:"structure"` + + // Information that identifies those detector models and their detectors (instances) + // for which the logging level is given. + DetectorDebugOptions []*DetectorDebugOption `locationName:"detectorDebugOptions" min:"1" type:"list"` + + // If TRUE, logging is enabled for AWS IoT Events. + // + // Enabled is a required field + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` + + // The logging level. + // + // Level is a required field + Level *string `locationName:"level" type:"string" required:"true" enum:"LoggingLevel"` + + // The ARN of the role that grants permission to AWS IoT Events to perform logging. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingOptions"} + if s.DetectorDebugOptions != nil && len(s.DetectorDebugOptions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorDebugOptions", 1)) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Level == nil { + invalidParams.Add(request.NewErrParamRequired("Level")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.DetectorDebugOptions != nil { + for i, v := range s.DetectorDebugOptions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DetectorDebugOptions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorDebugOptions sets the DetectorDebugOptions field's value. +func (s *LoggingOptions) SetDetectorDebugOptions(v []*DetectorDebugOption) *LoggingOptions { + s.DetectorDebugOptions = v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *LoggingOptions) SetEnabled(v bool) *LoggingOptions { + s.Enabled = &v + return s +} + +// SetLevel sets the Level field's value. +func (s *LoggingOptions) SetLevel(v string) *LoggingOptions { + s.Level = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *LoggingOptions) SetRoleArn(v string) *LoggingOptions { + s.RoleArn = &v + return s +} + +// When entering this state, perform these actions if the condition is TRUE. +type OnEnterLifecycle struct { + _ struct{} `type:"structure"` + + // Specifies the actions that are performed when the state is entered and the + // "condition" is TRUE. + Events []*Event `locationName:"events" type:"list"` +} + +// String returns the string representation +func (s OnEnterLifecycle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OnEnterLifecycle) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OnEnterLifecycle) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OnEnterLifecycle"} + if s.Events != nil { + for i, v := range s.Events { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Events", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *OnEnterLifecycle) SetEvents(v []*Event) *OnEnterLifecycle { + s.Events = v + return s +} + +// When exiting this state, perform these "actions" if the specified "condition" +// is TRUE. +type OnExitLifecycle struct { + _ struct{} `type:"structure"` + + // Specifies the "actions" that are performed when the state is exited and the + // "condition" is TRUE. + Events []*Event `locationName:"events" type:"list"` +} + +// String returns the string representation +func (s OnExitLifecycle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OnExitLifecycle) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OnExitLifecycle) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OnExitLifecycle"} + if s.Events != nil { + for i, v := range s.Events { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Events", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *OnExitLifecycle) SetEvents(v []*Event) *OnExitLifecycle { + s.Events = v + return s +} + +// Specifies the actions performed when the "condition" evaluates to TRUE. +type OnInputLifecycle struct { + _ struct{} `type:"structure"` + + // Specifies the actions performed when the "condition" evaluates to TRUE. + Events []*Event `locationName:"events" type:"list"` + + // Specifies the actions performed, and the next state entered, when a "condition" + // evaluates to TRUE. + TransitionEvents []*TransitionEvent `locationName:"transitionEvents" type:"list"` +} + +// String returns the string representation +func (s OnInputLifecycle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OnInputLifecycle) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OnInputLifecycle) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OnInputLifecycle"} + if s.Events != nil { + for i, v := range s.Events { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Events", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TransitionEvents != nil { + for i, v := range s.TransitionEvents { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransitionEvents", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *OnInputLifecycle) SetEvents(v []*Event) *OnInputLifecycle { + s.Events = v + return s +} + +// SetTransitionEvents sets the TransitionEvents field's value. +func (s *OnInputLifecycle) SetTransitionEvents(v []*TransitionEvent) *OnInputLifecycle { + s.TransitionEvents = v + return s +} + +type PutLoggingOptionsInput struct { + _ struct{} `type:"structure"` + + // The new values of the AWS IoT Events logging options. + // + // LoggingOptions is a required field + LoggingOptions *LoggingOptions `locationName:"loggingOptions" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutLoggingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLoggingOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLoggingOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLoggingOptionsInput"} + if s.LoggingOptions == nil { + invalidParams.Add(request.NewErrParamRequired("LoggingOptions")) + } + if s.LoggingOptions != nil { + if err := s.LoggingOptions.Validate(); err != nil { + invalidParams.AddNested("LoggingOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingOptions sets the LoggingOptions field's value. +func (s *PutLoggingOptionsInput) SetLoggingOptions(v *LoggingOptions) *PutLoggingOptionsInput { + s.LoggingOptions = v + return s +} + +type PutLoggingOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutLoggingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLoggingOptionsOutput) GoString() string { + return s.String() +} + +// Information needed to reset the timer. +type ResetTimerAction struct { + _ struct{} `type:"structure"` + + // The name of the timer to reset. + // + // TimerName is a required field + TimerName *string `locationName:"timerName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetTimerAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetTimerAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetTimerAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetTimerAction"} + if s.TimerName == nil { + invalidParams.Add(request.NewErrParamRequired("TimerName")) + } + if s.TimerName != nil && len(*s.TimerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TimerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTimerName sets the TimerName field's value. +func (s *ResetTimerAction) SetTimerName(v string) *ResetTimerAction { + s.TimerName = &v + return s +} + +// Information required to publish the Amazon SNS message. +type SNSTopicPublishAction struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon SNS target where the message is sent. + // + // TargetArn is a required field + TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SNSTopicPublishAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SNSTopicPublishAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SNSTopicPublishAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SNSTopicPublishAction"} + if s.TargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetArn")) + } + if s.TargetArn != nil && len(*s.TargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetArn sets the TargetArn field's value. +func (s *SNSTopicPublishAction) SetTargetArn(v string) *SNSTopicPublishAction { + s.TargetArn = &v + return s +} + +// Information needed to set the timer. +type SetTimerAction struct { + _ struct{} `type:"structure"` + + // The number of seconds until the timer expires. The minimum value is 60 seconds + // to ensure accuracy. + // + // Seconds is a required field + Seconds *int64 `locationName:"seconds" type:"integer" required:"true"` + + // The name of the timer. + // + // TimerName is a required field + TimerName *string `locationName:"timerName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetTimerAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTimerAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetTimerAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetTimerAction"} + if s.Seconds == nil { + invalidParams.Add(request.NewErrParamRequired("Seconds")) + } + if s.TimerName == nil { + invalidParams.Add(request.NewErrParamRequired("TimerName")) + } + if s.TimerName != nil && len(*s.TimerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TimerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSeconds sets the Seconds field's value. +func (s *SetTimerAction) SetSeconds(v int64) *SetTimerAction { + s.Seconds = &v + return s +} + +// SetTimerName sets the TimerName field's value. +func (s *SetTimerAction) SetTimerName(v string) *SetTimerAction { + s.TimerName = &v + return s +} + +// Information about the variable and its new value. +type SetVariableAction struct { + _ struct{} `type:"structure"` + + // The new value of the variable. + // + // Value is a required field + Value *string `locationName:"value" min:"1" type:"string" required:"true"` + + // The name of the variable. + // + // VariableName is a required field + VariableName *string `locationName:"variableName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetVariableAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVariableAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetVariableAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetVariableAction"} + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + if s.VariableName == nil { + invalidParams.Add(request.NewErrParamRequired("VariableName")) + } + if s.VariableName != nil && len(*s.VariableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VariableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetValue sets the Value field's value. +func (s *SetVariableAction) SetValue(v string) *SetVariableAction { + s.Value = &v + return s +} + +// SetVariableName sets the VariableName field's value. +func (s *SetVariableAction) SetVariableName(v string) *SetVariableAction { + s.VariableName = &v + return s +} + +// Sends information about the detector model instance and the event which triggered +// the action to an Amazon SQS queue. +type SqsAction struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue where the data is written. + // + // QueueUrl is a required field + QueueUrl *string `locationName:"queueUrl" type:"string" required:"true"` + + // Set this to TRUE if you want the data to be Base-64 encoded before it is + // written to the queue. Otherwise, set this to FALSE. + UseBase64 *bool `locationName:"useBase64" type:"boolean"` +} + +// String returns the string representation +func (s SqsAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqsAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SqsAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SqsAction"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueueUrl sets the QueueUrl field's value. +func (s *SqsAction) SetQueueUrl(v string) *SqsAction { + s.QueueUrl = &v + return s +} + +// SetUseBase64 sets the UseBase64 field's value. +func (s *SqsAction) SetUseBase64(v bool) *SqsAction { + s.UseBase64 = &v + return s +} + +// Information that defines a state of a detector. +type State struct { + _ struct{} `type:"structure"` + + // When entering this state, perform these "actions" if the "condition" is TRUE. + OnEnter *OnEnterLifecycle `locationName:"onEnter" type:"structure"` + + // When exiting this state, perform these "actions" if the specified "condition" + // is TRUE. + OnExit *OnExitLifecycle `locationName:"onExit" type:"structure"` + + // When an input is received and the "condition" is TRUE, perform the specified + // "actions". + OnInput *OnInputLifecycle `locationName:"onInput" type:"structure"` + + // The name of the state. + // + // StateName is a required field + StateName *string `locationName:"stateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s State) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s State) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *State) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "State"} + if s.StateName == nil { + invalidParams.Add(request.NewErrParamRequired("StateName")) + } + if s.StateName != nil && len(*s.StateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StateName", 1)) + } + if s.OnEnter != nil { + if err := s.OnEnter.Validate(); err != nil { + invalidParams.AddNested("OnEnter", err.(request.ErrInvalidParams)) + } + } + if s.OnExit != nil { + if err := s.OnExit.Validate(); err != nil { + invalidParams.AddNested("OnExit", err.(request.ErrInvalidParams)) + } + } + if s.OnInput != nil { + if err := s.OnInput.Validate(); err != nil { + invalidParams.AddNested("OnInput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOnEnter sets the OnEnter field's value. +func (s *State) SetOnEnter(v *OnEnterLifecycle) *State { + s.OnEnter = v + return s +} + +// SetOnExit sets the OnExit field's value. +func (s *State) SetOnExit(v *OnExitLifecycle) *State { + s.OnExit = v + return s +} + +// SetOnInput sets the OnInput field's value. +func (s *State) SetOnInput(v *OnInputLifecycle) *State { + s.OnInput = v + return s +} + +// SetStateName sets the StateName field's value. +func (s *State) SetStateName(v string) *State { + s.StateName = &v + return s +} + +// Metadata that can be used to manage the resource. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag's key. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The tag's value. + // + // Value is a required field + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // The new or modified tags for the resource. + // + // Tags is a required field + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// Specifies the actions performed and the next state entered when a "condition" +// evaluates to TRUE. +type TransitionEvent struct { + _ struct{} `type:"structure"` + + // The actions to be performed. + Actions []*ActionData `locationName:"actions" type:"list"` + + // [Required] A Boolean expression that when TRUE causes the actions to be performed + // and the "nextState" to be entered. + // + // Condition is a required field + Condition *string `locationName:"condition" type:"string" required:"true"` + + // The name of the transition event. + // + // EventName is a required field + EventName *string `locationName:"eventName" type:"string" required:"true"` + + // The next state to enter. + // + // NextState is a required field + NextState *string `locationName:"nextState" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TransitionEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitionEvent) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransitionEvent) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransitionEvent"} + if s.Condition == nil { + invalidParams.Add(request.NewErrParamRequired("Condition")) + } + if s.EventName == nil { + invalidParams.Add(request.NewErrParamRequired("EventName")) + } + if s.NextState == nil { + invalidParams.Add(request.NewErrParamRequired("NextState")) + } + if s.NextState != nil && len(*s.NextState) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextState", 1)) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActions sets the Actions field's value. +func (s *TransitionEvent) SetActions(v []*ActionData) *TransitionEvent { + s.Actions = v + return s +} + +// SetCondition sets the Condition field's value. +func (s *TransitionEvent) SetCondition(v string) *TransitionEvent { + s.Condition = &v + return s +} + +// SetEventName sets the EventName field's value. +func (s *TransitionEvent) SetEventName(v string) *TransitionEvent { + s.EventName = &v + return s +} + +// SetNextState sets the NextState field's value. +func (s *TransitionEvent) SetNextState(v string) *TransitionEvent { + s.NextState = &v + return s +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // A list of the keys of the tags to be removed from the resource. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateDetectorModelInput struct { + _ struct{} `type:"structure"` + + // Information that defines how a detector operates. + // + // DetectorModelDefinition is a required field + DetectorModelDefinition *DetectorModelDefinition `locationName:"detectorModelDefinition" type:"structure" required:"true"` + + // A brief description of the detector model. + DetectorModelDescription *string `locationName:"detectorModelDescription" type:"string"` + + // The name of the detector model that is updated. + // + // DetectorModelName is a required field + DetectorModelName *string `location:"uri" locationName:"detectorModelName" min:"1" type:"string" required:"true"` + + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + + // The ARN of the role that grants permission to AWS IoT Events to perform its + // operations. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDetectorModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDetectorModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDetectorModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDetectorModelInput"} + if s.DetectorModelDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorModelDefinition")) + } + if s.DetectorModelName == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorModelName")) + } + if s.DetectorModelName != nil && len(*s.DetectorModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorModelName", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.DetectorModelDefinition != nil { + if err := s.DetectorModelDefinition.Validate(); err != nil { + invalidParams.AddNested("DetectorModelDefinition", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorModelDefinition sets the DetectorModelDefinition field's value. +func (s *UpdateDetectorModelInput) SetDetectorModelDefinition(v *DetectorModelDefinition) *UpdateDetectorModelInput { + s.DetectorModelDefinition = v + return s +} + +// SetDetectorModelDescription sets the DetectorModelDescription field's value. +func (s *UpdateDetectorModelInput) SetDetectorModelDescription(v string) *UpdateDetectorModelInput { + s.DetectorModelDescription = &v + return s +} + +// SetDetectorModelName sets the DetectorModelName field's value. +func (s *UpdateDetectorModelInput) SetDetectorModelName(v string) *UpdateDetectorModelInput { + s.DetectorModelName = &v + return s +} + +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *UpdateDetectorModelInput) SetEvaluationMethod(v string) *UpdateDetectorModelInput { + s.EvaluationMethod = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateDetectorModelInput) SetRoleArn(v string) *UpdateDetectorModelInput { + s.RoleArn = &v + return s +} + +type UpdateDetectorModelOutput struct { + _ struct{} `type:"structure"` + + // Information about how the detector model is configured. + DetectorModelConfiguration *DetectorModelConfiguration `locationName:"detectorModelConfiguration" type:"structure"` +} + +// String returns the string representation +func (s UpdateDetectorModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDetectorModelOutput) GoString() string { + return s.String() +} + +// SetDetectorModelConfiguration sets the DetectorModelConfiguration field's value. +func (s *UpdateDetectorModelOutput) SetDetectorModelConfiguration(v *DetectorModelConfiguration) *UpdateDetectorModelOutput { + s.DetectorModelConfiguration = v + return s +} + +type UpdateInputInput struct { + _ struct{} `type:"structure"` + + // The definition of the input. + // + // InputDefinition is a required field + InputDefinition *InputDefinition `locationName:"inputDefinition" type:"structure" required:"true"` + + // A brief description of the input. + InputDescription *string `locationName:"inputDescription" type:"string"` + + // The name of the input you want to update. + // + // InputName is a required field + InputName *string `location:"uri" locationName:"inputName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateInputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInputInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateInputInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateInputInput"} + if s.InputDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("InputDefinition")) + } + if s.InputName == nil { + invalidParams.Add(request.NewErrParamRequired("InputName")) + } + if s.InputName != nil && len(*s.InputName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputName", 1)) + } + if s.InputDefinition != nil { + if err := s.InputDefinition.Validate(); err != nil { + invalidParams.AddNested("InputDefinition", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputDefinition sets the InputDefinition field's value. +func (s *UpdateInputInput) SetInputDefinition(v *InputDefinition) *UpdateInputInput { + s.InputDefinition = v + return s +} + +// SetInputDescription sets the InputDescription field's value. +func (s *UpdateInputInput) SetInputDescription(v string) *UpdateInputInput { + s.InputDescription = &v + return s +} + +// SetInputName sets the InputName field's value. +func (s *UpdateInputInput) SetInputName(v string) *UpdateInputInput { + s.InputName = &v + return s +} + +type UpdateInputOutput struct { + _ struct{} `type:"structure"` + + // Information about the configuration of the input. + InputConfiguration *InputConfiguration `locationName:"inputConfiguration" type:"structure"` +} + +// String returns the string representation +func (s UpdateInputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInputOutput) GoString() string { + return s.String() +} + +// SetInputConfiguration sets the InputConfiguration field's value. +func (s *UpdateInputOutput) SetInputConfiguration(v *InputConfiguration) *UpdateInputOutput { + s.InputConfiguration = v + return s +} + +const ( + // DetectorModelVersionStatusActive is a DetectorModelVersionStatus enum value + DetectorModelVersionStatusActive = "ACTIVE" + + // DetectorModelVersionStatusActivating is a DetectorModelVersionStatus enum value + DetectorModelVersionStatusActivating = "ACTIVATING" + + // DetectorModelVersionStatusInactive is a DetectorModelVersionStatus enum value + DetectorModelVersionStatusInactive = "INACTIVE" + + // DetectorModelVersionStatusDeprecated is a DetectorModelVersionStatus enum value + DetectorModelVersionStatusDeprecated = "DEPRECATED" + + // DetectorModelVersionStatusDraft is a DetectorModelVersionStatus enum value + DetectorModelVersionStatusDraft = "DRAFT" + + // DetectorModelVersionStatusPaused is a DetectorModelVersionStatus enum value + DetectorModelVersionStatusPaused = "PAUSED" + + // DetectorModelVersionStatusFailed is a DetectorModelVersionStatus enum value + DetectorModelVersionStatusFailed = "FAILED" +) + +const ( + // EvaluationMethodBatch is a EvaluationMethod enum value + EvaluationMethodBatch = "BATCH" + + // EvaluationMethodSerial is a EvaluationMethod enum value + EvaluationMethodSerial = "SERIAL" +) + +const ( + // InputStatusCreating is a InputStatus enum value + InputStatusCreating = "CREATING" + + // InputStatusUpdating is a InputStatus enum value + InputStatusUpdating = "UPDATING" + + // InputStatusActive is a InputStatus enum value + InputStatusActive = "ACTIVE" + + // InputStatusDeleting is a InputStatus enum value + InputStatusDeleting = "DELETING" +) + +const ( + // LoggingLevelError is a LoggingLevel enum value + LoggingLevelError = "ERROR" + + // LoggingLevelInfo is a LoggingLevel enum value + LoggingLevelInfo = "INFO" + + // LoggingLevelDebug is a LoggingLevel enum value + LoggingLevelDebug = "DEBUG" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/doc.go new file mode 100644 index 00000000000..e1b69a693bb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/doc.go @@ -0,0 +1,31 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package iotevents provides the client and types for making API +// requests to AWS IoT Events. +// +// AWS IoT Events monitors your equipment or device fleets for failures or changes +// in operation, and triggers actions when such events occur. AWS IoT Events +// API commands enable you to create, read, update and delete inputs and detector +// models, and to list their versions. +// +// See https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27 for more information on this service. +// +// See iotevents package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/iotevents/ +// +// Using the Client +// +// To contact AWS IoT Events with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS IoT Events client IoTEvents for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/iotevents/#New +package iotevents diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/errors.go new file mode 100644 index 00000000000..e36954c548f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/errors.go @@ -0,0 +1,60 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iotevents + +const ( + + // ErrCodeInternalFailureException for service response error code + // "InternalFailureException". + // + // An internal failure occurred. + ErrCodeInternalFailureException = "InternalFailureException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // The request was invalid. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // A limit was exceeded. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // The resource already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // The resource is in use. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource was not found. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeServiceUnavailableException for service response error code + // "ServiceUnavailableException". + // + // The service is currently unavailable. + ErrCodeServiceUnavailableException = "ServiceUnavailableException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The request could not be completed due to throttling. + ErrCodeThrottlingException = "ThrottlingException" + + // ErrCodeUnsupportedOperationException for service response error code + // "UnsupportedOperationException". + // + // The requested operation is not supported. + ErrCodeUnsupportedOperationException = "UnsupportedOperationException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go new file mode 100644 index 00000000000..13eeb1dfb3c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go @@ -0,0 +1,99 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iotevents + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// IoTEvents provides the API operation methods for making requests to +// AWS IoT Events. See this package's package overview docs +// for details on the service. +// +// IoTEvents methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type IoTEvents struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "IoT Events" // Name of service. + EndpointsID = "iotevents" // ID to lookup a service endpoint with. + ServiceID = "IoT Events" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the IoTEvents client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IoTEvents client from just a session. +// svc := iotevents.New(mySession) +// +// // Create a IoTEvents client with additional configuration +// svc := iotevents.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoTEvents { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "iotevents" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IoTEvents { + svc := &IoTEvents{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2018-07-27", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IoTEvents operation and runs any +// custom request initialization. +func (c *IoTEvents) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go index f0674cb9491..418fe50a087 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go @@ -3,6 +3,7 @@ package kafka import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws" @@ -109,6 +110,103 @@ func (c *Kafka) CreateClusterWithContext(ctx aws.Context, input *CreateClusterIn return out, req.Send() } +const opCreateConfiguration = "CreateConfiguration" + +// CreateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the CreateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateConfiguration for more information on using the CreateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateConfigurationRequest method. +// req, resp := client.CreateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/CreateConfiguration +func (c *Kafka) CreateConfigurationRequest(input *CreateConfigurationInput) (req *request.Request, output *CreateConfigurationOutput) { + op := &request.Operation{ + Name: opCreateConfiguration, + HTTPMethod: "POST", + HTTPPath: "/v1/configurations", + } + + if input == nil { + input = &CreateConfigurationInput{} + } + + output = &CreateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateConfiguration API operation for Managed Streaming for Kafka. +// +// Creates a new MSK configuration. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation CreateConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Returns information about an error. +// +// * ErrCodeConflictException "ConflictException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/CreateConfiguration +func (c *Kafka) CreateConfiguration(input *CreateConfigurationInput) (*CreateConfigurationOutput, error) { + req, out := c.CreateConfigurationRequest(input) + return out, req.Send() +} + +// CreateConfigurationWithContext is the same as CreateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See CreateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) CreateConfigurationWithContext(ctx aws.Context, input *CreateConfigurationInput, opts ...request.Option) (*CreateConfigurationOutput, error) { + req, out := c.CreateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteCluster = "DeleteCluster" // DeleteClusterRequest generates a "aws/request.Request" representing the @@ -290,60 +388,63 @@ func (c *Kafka) DescribeClusterWithContext(ctx aws.Context, input *DescribeClust return out, req.Send() } -const opGetBootstrapBrokers = "GetBootstrapBrokers" +const opDescribeClusterOperation = "DescribeClusterOperation" -// GetBootstrapBrokersRequest generates a "aws/request.Request" representing the -// client's request for the GetBootstrapBrokers operation. The "output" return +// DescribeClusterOperationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterOperation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetBootstrapBrokers for more information on using the GetBootstrapBrokers +// See DescribeClusterOperation for more information on using the DescribeClusterOperation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetBootstrapBrokersRequest method. -// req, resp := client.GetBootstrapBrokersRequest(params) +// // Example sending a request using the DescribeClusterOperationRequest method. +// req, resp := client.DescribeClusterOperationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetBootstrapBrokers -func (c *Kafka) GetBootstrapBrokersRequest(input *GetBootstrapBrokersInput) (req *request.Request, output *GetBootstrapBrokersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeClusterOperation +func (c *Kafka) DescribeClusterOperationRequest(input *DescribeClusterOperationInput) (req *request.Request, output *DescribeClusterOperationOutput) { op := &request.Operation{ - Name: opGetBootstrapBrokers, + Name: opDescribeClusterOperation, HTTPMethod: "GET", - HTTPPath: "/v1/clusters/{clusterArn}/bootstrap-brokers", + HTTPPath: "/v1/operations/{clusterOperationArn}", } if input == nil { - input = &GetBootstrapBrokersInput{} + input = &DescribeClusterOperationInput{} } - output = &GetBootstrapBrokersOutput{} + output = &DescribeClusterOperationOutput{} req = c.newRequest(op, input, output) return } -// GetBootstrapBrokers API operation for Managed Streaming for Kafka. +// DescribeClusterOperation API operation for Managed Streaming for Kafka. // -// A list of brokers that a client application can use to bootstrap. +// Returns a description of the cluster operation specified by the ARN. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation GetBootstrapBrokers for usage and error information. +// API operation DescribeClusterOperation for usage and error information. // // Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// // * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // @@ -353,180 +454,183 @@ func (c *Kafka) GetBootstrapBrokersRequest(input *GetBootstrapBrokersInput) (req // * ErrCodeInternalServerErrorException "InternalServerErrorException" // Returns information about an error. // -// * ErrCodeConflictException "ConflictException" -// Returns information about an error. -// // * ErrCodeForbiddenException "ForbiddenException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetBootstrapBrokers -func (c *Kafka) GetBootstrapBrokers(input *GetBootstrapBrokersInput) (*GetBootstrapBrokersOutput, error) { - req, out := c.GetBootstrapBrokersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeClusterOperation +func (c *Kafka) DescribeClusterOperation(input *DescribeClusterOperationInput) (*DescribeClusterOperationOutput, error) { + req, out := c.DescribeClusterOperationRequest(input) return out, req.Send() } -// GetBootstrapBrokersWithContext is the same as GetBootstrapBrokers with the addition of +// DescribeClusterOperationWithContext is the same as DescribeClusterOperation with the addition of // the ability to pass a context and additional request options. // -// See GetBootstrapBrokers for details on how to use this API operation. +// See DescribeClusterOperation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) GetBootstrapBrokersWithContext(ctx aws.Context, input *GetBootstrapBrokersInput, opts ...request.Option) (*GetBootstrapBrokersOutput, error) { - req, out := c.GetBootstrapBrokersRequest(input) +func (c *Kafka) DescribeClusterOperationWithContext(ctx aws.Context, input *DescribeClusterOperationInput, opts ...request.Option) (*DescribeClusterOperationOutput, error) { + req, out := c.DescribeClusterOperationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListClusters = "ListClusters" +const opDescribeConfiguration = "DescribeConfiguration" -// ListClustersRequest generates a "aws/request.Request" representing the -// client's request for the ListClusters operation. The "output" return +// DescribeConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListClusters for more information on using the ListClusters +// See DescribeConfiguration for more information on using the DescribeConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListClustersRequest method. -// req, resp := client.ListClustersRequest(params) +// // Example sending a request using the DescribeConfigurationRequest method. +// req, resp := client.DescribeConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusters -func (c *Kafka) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeConfiguration +func (c *Kafka) DescribeConfigurationRequest(input *DescribeConfigurationInput) (req *request.Request, output *DescribeConfigurationOutput) { op := &request.Operation{ - Name: opListClusters, + Name: opDescribeConfiguration, HTTPMethod: "GET", - HTTPPath: "/v1/clusters", + HTTPPath: "/v1/configurations/{arn}", } if input == nil { - input = &ListClustersInput{} + input = &DescribeConfigurationInput{} } - output = &ListClustersOutput{} + output = &DescribeConfigurationOutput{} req = c.newRequest(op, input, output) return } -// ListClusters API operation for Managed Streaming for Kafka. +// DescribeConfiguration API operation for Managed Streaming for Kafka. // -// Returns a list of clusters in an account. +// Returns a description of this MSK configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation ListClusters for usage and error information. +// API operation DescribeConfiguration for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // -// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// * ErrCodeUnauthorizedException "UnauthorizedException" // Returns information about an error. // -// * ErrCodeUnauthorizedException "UnauthorizedException" +// * ErrCodeInternalServerErrorException "InternalServerErrorException" // Returns information about an error. // // * ErrCodeForbiddenException "ForbiddenException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusters -func (c *Kafka) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { - req, out := c.ListClustersRequest(input) +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeConfiguration +func (c *Kafka) DescribeConfiguration(input *DescribeConfigurationInput) (*DescribeConfigurationOutput, error) { + req, out := c.DescribeConfigurationRequest(input) return out, req.Send() } -// ListClustersWithContext is the same as ListClusters with the addition of +// DescribeConfigurationWithContext is the same as DescribeConfiguration with the addition of // the ability to pass a context and additional request options. // -// See ListClusters for details on how to use this API operation. +// See DescribeConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListClustersWithContext(ctx aws.Context, input *ListClustersInput, opts ...request.Option) (*ListClustersOutput, error) { - req, out := c.ListClustersRequest(input) +func (c *Kafka) DescribeConfigurationWithContext(ctx aws.Context, input *DescribeConfigurationInput, opts ...request.Option) (*DescribeConfigurationOutput, error) { + req, out := c.DescribeConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListNodes = "ListNodes" +const opDescribeConfigurationRevision = "DescribeConfigurationRevision" -// ListNodesRequest generates a "aws/request.Request" representing the -// client's request for the ListNodes operation. The "output" return +// DescribeConfigurationRevisionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigurationRevision operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListNodes for more information on using the ListNodes +// See DescribeConfigurationRevision for more information on using the DescribeConfigurationRevision // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListNodesRequest method. -// req, resp := client.ListNodesRequest(params) +// // Example sending a request using the DescribeConfigurationRevisionRequest method. +// req, resp := client.DescribeConfigurationRevisionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListNodes -func (c *Kafka) ListNodesRequest(input *ListNodesInput) (req *request.Request, output *ListNodesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeConfigurationRevision +func (c *Kafka) DescribeConfigurationRevisionRequest(input *DescribeConfigurationRevisionInput) (req *request.Request, output *DescribeConfigurationRevisionOutput) { op := &request.Operation{ - Name: opListNodes, + Name: opDescribeConfigurationRevision, HTTPMethod: "GET", - HTTPPath: "/v1/clusters/{clusterArn}/nodes", + HTTPPath: "/v1/configurations/{arn}/revisions/{revision}", } if input == nil { - input = &ListNodesInput{} + input = &DescribeConfigurationRevisionInput{} } - output = &ListNodesOutput{} + output = &DescribeConfigurationRevisionOutput{} req = c.newRequest(op, input, output) return } -// ListNodes API operation for Managed Streaming for Kafka. +// DescribeConfigurationRevision API operation for Managed Streaming for Kafka. // -// Returns a list of the broker nodes in the cluster. +// Returns a description of this revision of the configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation ListNodes for usage and error information. +// API operation DescribeConfigurationRevision for usage and error information. // // Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" +// * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // -// * ErrCodeBadRequestException "BadRequestException" +// * ErrCodeUnauthorizedException "UnauthorizedException" // Returns information about an error. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" @@ -535,342 +639,2591 @@ func (c *Kafka) ListNodesRequest(input *ListNodesInput) (req *request.Request, o // * ErrCodeForbiddenException "ForbiddenException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListNodes -func (c *Kafka) ListNodes(input *ListNodesInput) (*ListNodesOutput, error) { - req, out := c.ListNodesRequest(input) +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeConfigurationRevision +func (c *Kafka) DescribeConfigurationRevision(input *DescribeConfigurationRevisionInput) (*DescribeConfigurationRevisionOutput, error) { + req, out := c.DescribeConfigurationRevisionRequest(input) return out, req.Send() } -// ListNodesWithContext is the same as ListNodes with the addition of +// DescribeConfigurationRevisionWithContext is the same as DescribeConfigurationRevision with the addition of // the ability to pass a context and additional request options. // -// See ListNodes for details on how to use this API operation. +// See DescribeConfigurationRevision for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListNodesWithContext(ctx aws.Context, input *ListNodesInput, opts ...request.Option) (*ListNodesOutput, error) { - req, out := c.ListNodesRequest(input) +func (c *Kafka) DescribeConfigurationRevisionWithContext(ctx aws.Context, input *DescribeConfigurationRevisionInput, opts ...request.Option) (*DescribeConfigurationRevisionOutput, error) { + req, out := c.DescribeConfigurationRevisionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTagsForResource = "ListTagsForResource" +const opGetBootstrapBrokers = "GetBootstrapBrokers" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// GetBootstrapBrokersRequest generates a "aws/request.Request" representing the +// client's request for the GetBootstrapBrokers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See GetBootstrapBrokers for more information on using the GetBootstrapBrokers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the GetBootstrapBrokersRequest method. +// req, resp := client.GetBootstrapBrokersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource -func (c *Kafka) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetBootstrapBrokers +func (c *Kafka) GetBootstrapBrokersRequest(input *GetBootstrapBrokersInput) (req *request.Request, output *GetBootstrapBrokersOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opGetBootstrapBrokers, HTTPMethod: "GET", - HTTPPath: "/v1/tags/{resourceArn}", + HTTPPath: "/v1/clusters/{clusterArn}/bootstrap-brokers", } if input == nil { - input = &ListTagsForResourceInput{} + input = &GetBootstrapBrokersInput{} } - output = &ListTagsForResourceOutput{} + output = &GetBootstrapBrokersOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Managed Streaming for Kafka. +// GetBootstrapBrokers API operation for Managed Streaming for Kafka. // -// List tags for a resource. +// A list of brokers that a client application can use to bootstrap. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation ListTagsForResource for usage and error information. +// API operation GetBootstrapBrokers for usage and error information. // // Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" +// * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // -// * ErrCodeBadRequestException "BadRequestException" +// * ErrCodeUnauthorizedException "UnauthorizedException" // Returns information about an error. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource -func (c *Kafka) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// * ErrCodeConflictException "ConflictException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetBootstrapBrokers +func (c *Kafka) GetBootstrapBrokers(input *GetBootstrapBrokersInput) (*GetBootstrapBrokersOutput, error) { + req, out := c.GetBootstrapBrokersRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// GetBootstrapBrokersWithContext is the same as GetBootstrapBrokers with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See GetBootstrapBrokers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *Kafka) GetBootstrapBrokersWithContext(ctx aws.Context, input *GetBootstrapBrokersInput, opts ...request.Option) (*GetBootstrapBrokersOutput, error) { + req, out := c.GetBootstrapBrokersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opListClusterOperations = "ListClusterOperations" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// ListClusterOperationsRequest generates a "aws/request.Request" representing the +// client's request for the ListClusterOperations operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See ListClusterOperations for more information on using the ListClusterOperations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the ListClusterOperationsRequest method. +// req, resp := client.ListClusterOperationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource -func (c *Kafka) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations +func (c *Kafka) ListClusterOperationsRequest(input *ListClusterOperationsInput) (req *request.Request, output *ListClusterOperationsOutput) { op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/v1/tags/{resourceArn}", + Name: opListClusterOperations, + HTTPMethod: "GET", + HTTPPath: "/v1/clusters/{clusterArn}/operations", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &TagResourceInput{} + input = &ListClusterOperationsInput{} } - output = &TagResourceOutput{} + output = &ListClusterOperationsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for Managed Streaming for Kafka. +// ListClusterOperations API operation for Managed Streaming for Kafka. // -// Add tags to a resource +// Returns a list of all the operations that have been performed on the specified +// MSK cluster. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation TagResource for usage and error information. +// API operation ListClusterOperations for usage and error information. // // Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" -// Returns information about an error. -// // * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource -func (c *Kafka) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations +func (c *Kafka) ListClusterOperations(input *ListClusterOperationsInput) (*ListClusterOperationsOutput, error) { + req, out := c.ListClusterOperationsRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// ListClusterOperationsWithContext is the same as ListClusterOperations with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See ListClusterOperations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *Kafka) ListClusterOperationsWithContext(ctx aws.Context, input *ListClusterOperationsInput, opts ...request.Option) (*ListClusterOperationsOutput, error) { + req, out := c.ListClusterOperationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" - -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// ListClusterOperationsPages iterates over the pages of a ListClusterOperations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListClusterOperations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListClusterOperations operation. +// pageNum := 0 +// err := client.ListClusterOperationsPages(params, +// func(page *kafka.ListClusterOperationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kafka) ListClusterOperationsPages(input *ListClusterOperationsInput, fn func(*ListClusterOperationsOutput, bool) bool) error { + return c.ListClusterOperationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListClusterOperationsPagesWithContext same as ListClusterOperationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListClusterOperationsPagesWithContext(ctx aws.Context, input *ListClusterOperationsInput, fn func(*ListClusterOperationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListClusterOperationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListClusterOperationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListClusterOperationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListClusters = "ListClusters" + +// ListClustersRequest generates a "aws/request.Request" representing the +// client's request for the ListClusters operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See ListClusters for more information on using the ListClusters // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the ListClustersRequest method. +// req, resp := client.ListClustersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource -func (c *Kafka) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusters +func (c *Kafka) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "DELETE", - HTTPPath: "/v1/tags/{resourceArn}", + Name: opListClusters, + HTTPMethod: "GET", + HTTPPath: "/v1/clusters", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UntagResourceInput{} + input = &ListClustersInput{} } - output = &UntagResourceOutput{} + output = &ListClustersOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for Managed Streaming for Kafka. +// ListClusters API operation for Managed Streaming for Kafka. // -// Remove tags from a resource. +// Returns a list of all the MSK clusters in the current Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation UntagResource for usage and error information. +// API operation ListClusters for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusters +func (c *Kafka) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) + return out, req.Send() +} + +// ListClustersWithContext is the same as ListClusters with the addition of +// the ability to pass a context and additional request options. +// +// See ListClusters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListClustersWithContext(ctx aws.Context, input *ListClustersInput, opts ...request.Option) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListClustersPages iterates over the pages of a ListClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListClusters operation. +// pageNum := 0 +// err := client.ListClustersPages(params, +// func(page *kafka.ListClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kafka) ListClustersPages(input *ListClustersInput, fn func(*ListClustersOutput, bool) bool) error { + return c.ListClustersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListClustersPagesWithContext same as ListClustersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListClustersPagesWithContext(ctx aws.Context, input *ListClustersInput, fn func(*ListClustersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListClustersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListClustersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListClustersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListConfigurationRevisions = "ListConfigurationRevisions" + +// ListConfigurationRevisionsRequest generates a "aws/request.Request" representing the +// client's request for the ListConfigurationRevisions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListConfigurationRevisions for more information on using the ListConfigurationRevisions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListConfigurationRevisionsRequest method. +// req, resp := client.ListConfigurationRevisionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurationRevisions +func (c *Kafka) ListConfigurationRevisionsRequest(input *ListConfigurationRevisionsInput) (req *request.Request, output *ListConfigurationRevisionsOutput) { + op := &request.Operation{ + Name: opListConfigurationRevisions, + HTTPMethod: "GET", + HTTPPath: "/v1/configurations/{arn}/revisions", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListConfigurationRevisionsInput{} + } + + output = &ListConfigurationRevisionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListConfigurationRevisions API operation for Managed Streaming for Kafka. +// +// Returns a list of all the revisions of an MSK configuration. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation ListConfigurationRevisions for usage and error information. // // Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// // * ErrCodeNotFoundException "NotFoundException" // Returns information about an error. // +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurationRevisions +func (c *Kafka) ListConfigurationRevisions(input *ListConfigurationRevisionsInput) (*ListConfigurationRevisionsOutput, error) { + req, out := c.ListConfigurationRevisionsRequest(input) + return out, req.Send() +} + +// ListConfigurationRevisionsWithContext is the same as ListConfigurationRevisions with the addition of +// the ability to pass a context and additional request options. +// +// See ListConfigurationRevisions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListConfigurationRevisionsWithContext(ctx aws.Context, input *ListConfigurationRevisionsInput, opts ...request.Option) (*ListConfigurationRevisionsOutput, error) { + req, out := c.ListConfigurationRevisionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListConfigurationRevisionsPages iterates over the pages of a ListConfigurationRevisions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListConfigurationRevisions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListConfigurationRevisions operation. +// pageNum := 0 +// err := client.ListConfigurationRevisionsPages(params, +// func(page *kafka.ListConfigurationRevisionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kafka) ListConfigurationRevisionsPages(input *ListConfigurationRevisionsInput, fn func(*ListConfigurationRevisionsOutput, bool) bool) error { + return c.ListConfigurationRevisionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListConfigurationRevisionsPagesWithContext same as ListConfigurationRevisionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListConfigurationRevisionsPagesWithContext(ctx aws.Context, input *ListConfigurationRevisionsInput, fn func(*ListConfigurationRevisionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListConfigurationRevisionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListConfigurationRevisionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListConfigurationRevisionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListConfigurations = "ListConfigurations" + +// ListConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListConfigurations for more information on using the ListConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListConfigurationsRequest method. +// req, resp := client.ListConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurations +func (c *Kafka) ListConfigurationsRequest(input *ListConfigurationsInput) (req *request.Request, output *ListConfigurationsOutput) { + op := &request.Operation{ + Name: opListConfigurations, + HTTPMethod: "GET", + HTTPPath: "/v1/configurations", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListConfigurationsInput{} + } + + output = &ListConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListConfigurations API operation for Managed Streaming for Kafka. +// +// Returns a list of all the MSK configurations in this Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation ListConfigurations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// // * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// // * ErrCodeInternalServerErrorException "InternalServerErrorException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource -func (c *Kafka) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurations +func (c *Kafka) ListConfigurations(input *ListConfigurationsInput) (*ListConfigurationsOutput, error) { + req, out := c.ListConfigurationsRequest(input) + return out, req.Send() +} + +// ListConfigurationsWithContext is the same as ListConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListConfigurationsWithContext(ctx aws.Context, input *ListConfigurationsInput, opts ...request.Option) (*ListConfigurationsOutput, error) { + req, out := c.ListConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of -// the ability to pass a context and additional request options. -// -// See UntagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Kafka) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() +// ListConfigurationsPages iterates over the pages of a ListConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListConfigurations operation. +// pageNum := 0 +// err := client.ListConfigurationsPages(params, +// func(page *kafka.ListConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kafka) ListConfigurationsPages(input *ListConfigurationsInput, fn func(*ListConfigurationsOutput, bool) bool) error { + return c.ListConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListConfigurationsPagesWithContext same as ListConfigurationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListConfigurationsPagesWithContext(ctx aws.Context, input *ListConfigurationsInput, fn func(*ListConfigurationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListConfigurationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListConfigurationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListConfigurationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListNodes = "ListNodes" + +// ListNodesRequest generates a "aws/request.Request" representing the +// client's request for the ListNodes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListNodes for more information on using the ListNodes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListNodesRequest method. +// req, resp := client.ListNodesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListNodes +func (c *Kafka) ListNodesRequest(input *ListNodesInput) (req *request.Request, output *ListNodesOutput) { + op := &request.Operation{ + Name: opListNodes, + HTTPMethod: "GET", + HTTPPath: "/v1/clusters/{clusterArn}/nodes", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListNodesInput{} + } + + output = &ListNodesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListNodes API operation for Managed Streaming for Kafka. +// +// Returns a list of the broker nodes in the cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation ListNodes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListNodes +func (c *Kafka) ListNodes(input *ListNodesInput) (*ListNodesOutput, error) { + req, out := c.ListNodesRequest(input) + return out, req.Send() +} + +// ListNodesWithContext is the same as ListNodes with the addition of +// the ability to pass a context and additional request options. +// +// See ListNodes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListNodesWithContext(ctx aws.Context, input *ListNodesInput, opts ...request.Option) (*ListNodesOutput, error) { + req, out := c.ListNodesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListNodesPages iterates over the pages of a ListNodes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListNodes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListNodes operation. +// pageNum := 0 +// err := client.ListNodesPages(params, +// func(page *kafka.ListNodesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kafka) ListNodesPages(input *ListNodesInput, fn func(*ListNodesOutput, bool) bool) error { + return c.ListNodesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListNodesPagesWithContext same as ListNodesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListNodesPagesWithContext(ctx aws.Context, input *ListNodesInput, fn func(*ListNodesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListNodesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListNodesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListNodesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource +func (c *Kafka) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Managed Streaming for Kafka. +// +// Returns a list of the tags associated with the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource +func (c *Kafka) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource +func (c *Kafka) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Managed Streaming for Kafka. +// +// Adds tags to the specified MSK resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource +func (c *Kafka) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource +func (c *Kafka) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Managed Streaming for Kafka. +// +// Removes the tags associated with the keys that are provided in the query. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource +func (c *Kafka) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateBrokerCount = "UpdateBrokerCount" + +// UpdateBrokerCountRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBrokerCount operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBrokerCount for more information on using the UpdateBrokerCount +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBrokerCountRequest method. +// req, resp := client.UpdateBrokerCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerCount +func (c *Kafka) UpdateBrokerCountRequest(input *UpdateBrokerCountInput) (req *request.Request, output *UpdateBrokerCountOutput) { + op := &request.Operation{ + Name: opUpdateBrokerCount, + HTTPMethod: "PUT", + HTTPPath: "/v1/clusters/{clusterArn}/nodes/count", + } + + if input == nil { + input = &UpdateBrokerCountInput{} + } + + output = &UpdateBrokerCountOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBrokerCount API operation for Managed Streaming for Kafka. +// +// Updates the number of broker nodes in the cluster. You can use this operation +// to increase the number of brokers in an existing cluster. You can't decrease +// the number of brokers. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UpdateBrokerCount for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerCount +func (c *Kafka) UpdateBrokerCount(input *UpdateBrokerCountInput) (*UpdateBrokerCountOutput, error) { + req, out := c.UpdateBrokerCountRequest(input) + return out, req.Send() +} + +// UpdateBrokerCountWithContext is the same as UpdateBrokerCount with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBrokerCount for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) UpdateBrokerCountWithContext(ctx aws.Context, input *UpdateBrokerCountInput, opts ...request.Option) (*UpdateBrokerCountOutput, error) { + req, out := c.UpdateBrokerCountRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateBrokerStorage = "UpdateBrokerStorage" + +// UpdateBrokerStorageRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBrokerStorage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBrokerStorage for more information on using the UpdateBrokerStorage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBrokerStorageRequest method. +// req, resp := client.UpdateBrokerStorageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerStorage +func (c *Kafka) UpdateBrokerStorageRequest(input *UpdateBrokerStorageInput) (req *request.Request, output *UpdateBrokerStorageOutput) { + op := &request.Operation{ + Name: opUpdateBrokerStorage, + HTTPMethod: "PUT", + HTTPPath: "/v1/clusters/{clusterArn}/nodes/storage", + } + + if input == nil { + input = &UpdateBrokerStorageInput{} + } + + output = &UpdateBrokerStorageOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBrokerStorage API operation for Managed Streaming for Kafka. +// +// Updates the EBS storage associated with MSK brokers. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UpdateBrokerStorage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerStorage +func (c *Kafka) UpdateBrokerStorage(input *UpdateBrokerStorageInput) (*UpdateBrokerStorageOutput, error) { + req, out := c.UpdateBrokerStorageRequest(input) + return out, req.Send() +} + +// UpdateBrokerStorageWithContext is the same as UpdateBrokerStorage with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBrokerStorage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) UpdateBrokerStorageWithContext(ctx aws.Context, input *UpdateBrokerStorageInput, opts ...request.Option) (*UpdateBrokerStorageOutput, error) { + req, out := c.UpdateBrokerStorageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateClusterConfiguration = "UpdateClusterConfiguration" + +// UpdateClusterConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateClusterConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateClusterConfiguration for more information on using the UpdateClusterConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateClusterConfigurationRequest method. +// req, resp := client.UpdateClusterConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateClusterConfiguration +func (c *Kafka) UpdateClusterConfigurationRequest(input *UpdateClusterConfigurationInput) (req *request.Request, output *UpdateClusterConfigurationOutput) { + op := &request.Operation{ + Name: opUpdateClusterConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/v1/clusters/{clusterArn}/configuration", + } + + if input == nil { + input = &UpdateClusterConfigurationInput{} + } + + output = &UpdateClusterConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateClusterConfiguration API operation for Managed Streaming for Kafka. +// +// Updates the cluster with the configuration that is specified in the request +// body. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UpdateClusterConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateClusterConfiguration +func (c *Kafka) UpdateClusterConfiguration(input *UpdateClusterConfigurationInput) (*UpdateClusterConfigurationOutput, error) { + req, out := c.UpdateClusterConfigurationRequest(input) + return out, req.Send() +} + +// UpdateClusterConfigurationWithContext is the same as UpdateClusterConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateClusterConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) UpdateClusterConfigurationWithContext(ctx aws.Context, input *UpdateClusterConfigurationInput, opts ...request.Option) (*UpdateClusterConfigurationOutput, error) { + req, out := c.UpdateClusterConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the EBS volume upgrade information. The broker identifier must +// be set to the keyword ALL. This means the changes apply to all the brokers +// in the cluster. +type BrokerEBSVolumeInfo struct { + _ struct{} `type:"structure"` + + // The ID of the broker to update. + // + // KafkaBrokerNodeId is a required field + KafkaBrokerNodeId *string `locationName:"kafkaBrokerNodeId" type:"string" required:"true"` + + // Size of the EBS volume to update. + // + // VolumeSizeGB is a required field + VolumeSizeGB *int64 `locationName:"volumeSizeGB" type:"integer" required:"true"` +} + +// String returns the string representation +func (s BrokerEBSVolumeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BrokerEBSVolumeInfo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BrokerEBSVolumeInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BrokerEBSVolumeInfo"} + if s.KafkaBrokerNodeId == nil { + invalidParams.Add(request.NewErrParamRequired("KafkaBrokerNodeId")) + } + if s.VolumeSizeGB == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeSizeGB")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKafkaBrokerNodeId sets the KafkaBrokerNodeId field's value. +func (s *BrokerEBSVolumeInfo) SetKafkaBrokerNodeId(v string) *BrokerEBSVolumeInfo { + s.KafkaBrokerNodeId = &v + return s +} + +// SetVolumeSizeGB sets the VolumeSizeGB field's value. +func (s *BrokerEBSVolumeInfo) SetVolumeSizeGB(v int64) *BrokerEBSVolumeInfo { + s.VolumeSizeGB = &v + return s +} + +// Describes the setup to be used for Kafka broker nodes in the cluster. +type BrokerNodeGroupInfo struct { + _ struct{} `type:"structure"` + + // The distribution of broker nodes across Availability Zones. + BrokerAZDistribution *string `locationName:"brokerAZDistribution" type:"string" enum:"BrokerAZDistribution"` + + // The list of subnets to connect to in the client virtual private cloud (VPC). + // AWS creates elastic network interfaces inside these subnets. Client applications + // use elastic network interfaces to produce and consume data. Client subnets + // can't be in Availability Zone us-east-1e. + // + // ClientSubnets is a required field + ClientSubnets []*string `locationName:"clientSubnets" type:"list" required:"true"` + + // The type of Amazon EC2 instances to use for Kafka brokers. The following + // instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, + // kafka.m5.4xlarge, kafka.m5.12xlarge, and kafka.m5.24xlarge. + // + // InstanceType is a required field + InstanceType *string `locationName:"instanceType" min:"5" type:"string" required:"true"` + + // The AWS security groups to associate with the elastic network interfaces + // in order to specify who can connect to and communicate with the Amazon MSK + // cluster. If you don't specify a security group, Amazon MSK uses the default + // security group associated with the VPC. If you specify security groups that + // were shared with you, you must ensure that you have permissions to them. + // Specifically, you need the ec2:DescribeSecurityGroups permission. + SecurityGroups []*string `locationName:"securityGroups" type:"list"` + + // Contains information about storage volumes attached to MSK broker nodes. + StorageInfo *StorageInfo `locationName:"storageInfo" type:"structure"` +} + +// String returns the string representation +func (s BrokerNodeGroupInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BrokerNodeGroupInfo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BrokerNodeGroupInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BrokerNodeGroupInfo"} + if s.ClientSubnets == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSubnets")) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.InstanceType != nil && len(*s.InstanceType) < 5 { + invalidParams.Add(request.NewErrParamMinLen("InstanceType", 5)) + } + if s.StorageInfo != nil { + if err := s.StorageInfo.Validate(); err != nil { + invalidParams.AddNested("StorageInfo", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBrokerAZDistribution sets the BrokerAZDistribution field's value. +func (s *BrokerNodeGroupInfo) SetBrokerAZDistribution(v string) *BrokerNodeGroupInfo { + s.BrokerAZDistribution = &v + return s +} + +// SetClientSubnets sets the ClientSubnets field's value. +func (s *BrokerNodeGroupInfo) SetClientSubnets(v []*string) *BrokerNodeGroupInfo { + s.ClientSubnets = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *BrokerNodeGroupInfo) SetInstanceType(v string) *BrokerNodeGroupInfo { + s.InstanceType = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *BrokerNodeGroupInfo) SetSecurityGroups(v []*string) *BrokerNodeGroupInfo { + s.SecurityGroups = v + return s +} + +// SetStorageInfo sets the StorageInfo field's value. +func (s *BrokerNodeGroupInfo) SetStorageInfo(v *StorageInfo) *BrokerNodeGroupInfo { + s.StorageInfo = v + return s +} + +// BrokerNodeInfo +type BrokerNodeInfo struct { + _ struct{} `type:"structure"` + + // The attached elastic network interface of the broker. + AttachedENIId *string `locationName:"attachedENIId" type:"string"` + + // The ID of the broker. + BrokerId *float64 `locationName:"brokerId" type:"double"` + + // The client subnet to which this broker node belongs. + ClientSubnet *string `locationName:"clientSubnet" type:"string"` + + // The virtual private cloud (VPC) of the client. + ClientVpcIpAddress *string `locationName:"clientVpcIpAddress" type:"string"` + + // Information about the version of software currently deployed on the Kafka + // brokers in the cluster. + CurrentBrokerSoftwareInfo *BrokerSoftwareInfo `locationName:"currentBrokerSoftwareInfo" type:"structure"` + + // Endpoints for accessing the broker. + Endpoints []*string `locationName:"endpoints" type:"list"` +} + +// String returns the string representation +func (s BrokerNodeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BrokerNodeInfo) GoString() string { + return s.String() +} + +// SetAttachedENIId sets the AttachedENIId field's value. +func (s *BrokerNodeInfo) SetAttachedENIId(v string) *BrokerNodeInfo { + s.AttachedENIId = &v + return s +} + +// SetBrokerId sets the BrokerId field's value. +func (s *BrokerNodeInfo) SetBrokerId(v float64) *BrokerNodeInfo { + s.BrokerId = &v + return s +} + +// SetClientSubnet sets the ClientSubnet field's value. +func (s *BrokerNodeInfo) SetClientSubnet(v string) *BrokerNodeInfo { + s.ClientSubnet = &v + return s +} + +// SetClientVpcIpAddress sets the ClientVpcIpAddress field's value. +func (s *BrokerNodeInfo) SetClientVpcIpAddress(v string) *BrokerNodeInfo { + s.ClientVpcIpAddress = &v + return s +} + +// SetCurrentBrokerSoftwareInfo sets the CurrentBrokerSoftwareInfo field's value. +func (s *BrokerNodeInfo) SetCurrentBrokerSoftwareInfo(v *BrokerSoftwareInfo) *BrokerNodeInfo { + s.CurrentBrokerSoftwareInfo = v + return s +} + +// SetEndpoints sets the Endpoints field's value. +func (s *BrokerNodeInfo) SetEndpoints(v []*string) *BrokerNodeInfo { + s.Endpoints = v + return s +} + +// Information about the current software installed on the cluster. +type BrokerSoftwareInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the configuration used for the cluster. + // This field isn't visible in this preview release. + ConfigurationArn *string `locationName:"configurationArn" type:"string"` + + // The revision of the configuration to use. This field isn't visible in this + // preview release. + ConfigurationRevision *int64 `locationName:"configurationRevision" type:"long"` + + // The version of Apache Kafka. + KafkaVersion *string `locationName:"kafkaVersion" type:"string"` +} + +// String returns the string representation +func (s BrokerSoftwareInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BrokerSoftwareInfo) GoString() string { + return s.String() +} + +// SetConfigurationArn sets the ConfigurationArn field's value. +func (s *BrokerSoftwareInfo) SetConfigurationArn(v string) *BrokerSoftwareInfo { + s.ConfigurationArn = &v + return s +} + +// SetConfigurationRevision sets the ConfigurationRevision field's value. +func (s *BrokerSoftwareInfo) SetConfigurationRevision(v int64) *BrokerSoftwareInfo { + s.ConfigurationRevision = &v + return s +} + +// SetKafkaVersion sets the KafkaVersion field's value. +func (s *BrokerSoftwareInfo) SetKafkaVersion(v string) *BrokerSoftwareInfo { + s.KafkaVersion = &v + return s +} + +// Includes all client authentication information. +type ClientAuthentication struct { + _ struct{} `type:"structure"` + + // Details for ClientAuthentication using TLS. + Tls *Tls `locationName:"tls" type:"structure"` +} + +// String returns the string representation +func (s ClientAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientAuthentication) GoString() string { + return s.String() +} + +// SetTls sets the Tls field's value. +func (s *ClientAuthentication) SetTls(v *Tls) *ClientAuthentication { + s.Tls = v + return s +} + +// Returns information about a cluster. +type ClusterInfo struct { + _ struct{} `type:"structure"` + + // Arn of active cluster operation. + ActiveOperationArn *string `locationName:"activeOperationArn" type:"string"` + + // Information about the broker nodes. + BrokerNodeGroupInfo *BrokerNodeGroupInfo `locationName:"brokerNodeGroupInfo" type:"structure"` + + // Includes all client authentication information. + ClientAuthentication *ClientAuthentication `locationName:"clientAuthentication" type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The name of the cluster. + ClusterName *string `locationName:"clusterName" type:"string"` + + // The time when the cluster was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + + // Information about the version of software currently deployed on the Kafka + // brokers in the cluster. + CurrentBrokerSoftwareInfo *BrokerSoftwareInfo `locationName:"currentBrokerSoftwareInfo" type:"structure"` + + // The current version of the MSK cluster. Cluster versions aren't simple integers. + // You can obtain the current version by describing the cluster. An example + // version is KTVPDKIKX0DER. + CurrentVersion *string `locationName:"currentVersion" type:"string"` + + // Includes all encryption-related information. + EncryptionInfo *EncryptionInfo `locationName:"encryptionInfo" type:"structure"` + + // Specifies which metrics are gathered for the MSK cluster. This property has + // three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. For + // a list of the metrics associated with each of these three levels of monitoring, + // see Monitoring (https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html). + EnhancedMonitoring *string `locationName:"enhancedMonitoring" type:"string" enum:"EnhancedMonitoring"` + + // The number of broker nodes in the cluster. + NumberOfBrokerNodes *int64 `locationName:"numberOfBrokerNodes" type:"integer"` + + // The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED. + State *string `locationName:"state" type:"string" enum:"ClusterState"` + + // Tags attached to the cluster. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The connection string to use to connect to the Apache ZooKeeper cluster. + ZookeeperConnectString *string `locationName:"zookeeperConnectString" type:"string"` +} + +// String returns the string representation +func (s ClusterInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterInfo) GoString() string { + return s.String() +} + +// SetActiveOperationArn sets the ActiveOperationArn field's value. +func (s *ClusterInfo) SetActiveOperationArn(v string) *ClusterInfo { + s.ActiveOperationArn = &v + return s +} + +// SetBrokerNodeGroupInfo sets the BrokerNodeGroupInfo field's value. +func (s *ClusterInfo) SetBrokerNodeGroupInfo(v *BrokerNodeGroupInfo) *ClusterInfo { + s.BrokerNodeGroupInfo = v + return s +} + +// SetClientAuthentication sets the ClientAuthentication field's value. +func (s *ClusterInfo) SetClientAuthentication(v *ClientAuthentication) *ClusterInfo { + s.ClientAuthentication = v + return s +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *ClusterInfo) SetClusterArn(v string) *ClusterInfo { + s.ClusterArn = &v + return s +} + +// SetClusterName sets the ClusterName field's value. +func (s *ClusterInfo) SetClusterName(v string) *ClusterInfo { + s.ClusterName = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ClusterInfo) SetCreationTime(v time.Time) *ClusterInfo { + s.CreationTime = &v + return s +} + +// SetCurrentBrokerSoftwareInfo sets the CurrentBrokerSoftwareInfo field's value. +func (s *ClusterInfo) SetCurrentBrokerSoftwareInfo(v *BrokerSoftwareInfo) *ClusterInfo { + s.CurrentBrokerSoftwareInfo = v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *ClusterInfo) SetCurrentVersion(v string) *ClusterInfo { + s.CurrentVersion = &v + return s +} + +// SetEncryptionInfo sets the EncryptionInfo field's value. +func (s *ClusterInfo) SetEncryptionInfo(v *EncryptionInfo) *ClusterInfo { + s.EncryptionInfo = v + return s +} + +// SetEnhancedMonitoring sets the EnhancedMonitoring field's value. +func (s *ClusterInfo) SetEnhancedMonitoring(v string) *ClusterInfo { + s.EnhancedMonitoring = &v + return s +} + +// SetNumberOfBrokerNodes sets the NumberOfBrokerNodes field's value. +func (s *ClusterInfo) SetNumberOfBrokerNodes(v int64) *ClusterInfo { + s.NumberOfBrokerNodes = &v + return s +} + +// SetState sets the State field's value. +func (s *ClusterInfo) SetState(v string) *ClusterInfo { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ClusterInfo) SetTags(v map[string]*string) *ClusterInfo { + s.Tags = v + return s +} + +// SetZookeeperConnectString sets the ZookeeperConnectString field's value. +func (s *ClusterInfo) SetZookeeperConnectString(v string) *ClusterInfo { + s.ZookeeperConnectString = &v + return s +} + +// Returns information about a cluster operation. +type ClusterOperationInfo struct { + _ struct{} `type:"structure"` + + // The ID of the API request that triggered this operation. + ClientRequestId *string `locationName:"clientRequestId" type:"string"` + + // ARN of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The time at which operation was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + + // The time at which the operation finished. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // Describes the error if the operation fails. + ErrorInfo *ErrorInfo `locationName:"errorInfo" type:"structure"` + + // ARN of the cluster operation. + OperationArn *string `locationName:"operationArn" type:"string"` + + // State of the cluster operation. + OperationState *string `locationName:"operationState" type:"string"` + + // Type of the cluster operation. + OperationType *string `locationName:"operationType" type:"string"` + + // Information about cluster attributes before a cluster is updated. + SourceClusterInfo *MutableClusterInfo `locationName:"sourceClusterInfo" type:"structure"` + + // Information about cluster attributes after a cluster is updated. + TargetClusterInfo *MutableClusterInfo `locationName:"targetClusterInfo" type:"structure"` +} + +// String returns the string representation +func (s ClusterOperationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterOperationInfo) GoString() string { + return s.String() +} + +// SetClientRequestId sets the ClientRequestId field's value. +func (s *ClusterOperationInfo) SetClientRequestId(v string) *ClusterOperationInfo { + s.ClientRequestId = &v + return s +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *ClusterOperationInfo) SetClusterArn(v string) *ClusterOperationInfo { + s.ClusterArn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ClusterOperationInfo) SetCreationTime(v time.Time) *ClusterOperationInfo { + s.CreationTime = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ClusterOperationInfo) SetEndTime(v time.Time) *ClusterOperationInfo { + s.EndTime = &v + return s +} + +// SetErrorInfo sets the ErrorInfo field's value. +func (s *ClusterOperationInfo) SetErrorInfo(v *ErrorInfo) *ClusterOperationInfo { + s.ErrorInfo = v + return s +} + +// SetOperationArn sets the OperationArn field's value. +func (s *ClusterOperationInfo) SetOperationArn(v string) *ClusterOperationInfo { + s.OperationArn = &v + return s +} + +// SetOperationState sets the OperationState field's value. +func (s *ClusterOperationInfo) SetOperationState(v string) *ClusterOperationInfo { + s.OperationState = &v + return s +} + +// SetOperationType sets the OperationType field's value. +func (s *ClusterOperationInfo) SetOperationType(v string) *ClusterOperationInfo { + s.OperationType = &v + return s +} + +// SetSourceClusterInfo sets the SourceClusterInfo field's value. +func (s *ClusterOperationInfo) SetSourceClusterInfo(v *MutableClusterInfo) *ClusterOperationInfo { + s.SourceClusterInfo = v + return s +} + +// SetTargetClusterInfo sets the TargetClusterInfo field's value. +func (s *ClusterOperationInfo) SetTargetClusterInfo(v *MutableClusterInfo) *ClusterOperationInfo { + s.TargetClusterInfo = v + return s +} + +// Represents an MSK Configuration. +type Configuration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the configuration. + // + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // CreationTime is a required field + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The description of the configuration. + // + // Description is a required field + Description *string `locationName:"description" type:"string" required:"true"` + + // An array of the versions of Apache Kafka with which you can use this MSK + // configuration. You can use this configuration for an MSK cluster only if + // the Apache Kafka version specified for the cluster appears in this array. + // + // KafkaVersions is a required field + KafkaVersions []*string `locationName:"kafkaVersions" type:"list" required:"true"` + + // Latest revision of the configuration. + // + // LatestRevision is a required field + LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure" required:"true"` + + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation +func (s Configuration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Configuration) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Configuration) SetArn(v string) *Configuration { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Configuration) SetCreationTime(v time.Time) *Configuration { + s.CreationTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Configuration) SetDescription(v string) *Configuration { + s.Description = &v + return s +} + +// SetKafkaVersions sets the KafkaVersions field's value. +func (s *Configuration) SetKafkaVersions(v []*string) *Configuration { + s.KafkaVersions = v + return s +} + +// SetLatestRevision sets the LatestRevision field's value. +func (s *Configuration) SetLatestRevision(v *ConfigurationRevision) *Configuration { + s.LatestRevision = v + return s +} + +// SetName sets the Name field's value. +func (s *Configuration) SetName(v string) *Configuration { + s.Name = &v + return s +} + +// Specifies the configuration to use for the brokers. +type ConfigurationInfo struct { + _ struct{} `type:"structure"` + + // ARN of the configuration to use. + // + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // The revision of the configuration to use. + // + // Revision is a required field + Revision *int64 `locationName:"revision" type:"long" required:"true"` +} + +// String returns the string representation +func (s ConfigurationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationInfo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigurationInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigurationInfo"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Revision == nil { + invalidParams.Add(request.NewErrParamRequired("Revision")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *ConfigurationInfo) SetArn(v string) *ConfigurationInfo { + s.Arn = &v + return s +} + +// SetRevision sets the Revision field's value. +func (s *ConfigurationInfo) SetRevision(v int64) *ConfigurationInfo { + s.Revision = &v + return s +} + +// Describes a configuration revision. +type ConfigurationRevision struct { + _ struct{} `type:"structure"` + + // The time when the configuration revision was created. + // + // CreationTime is a required field + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The description of the configuration revision. + Description *string `locationName:"description" type:"string"` + + // The revision number. + // + // Revision is a required field + Revision *int64 `locationName:"revision" type:"long" required:"true"` +} + +// String returns the string representation +func (s ConfigurationRevision) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationRevision) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ConfigurationRevision) SetCreationTime(v time.Time) *ConfigurationRevision { + s.CreationTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ConfigurationRevision) SetDescription(v string) *ConfigurationRevision { + s.Description = &v + return s +} + +// SetRevision sets the Revision field's value. +func (s *ConfigurationRevision) SetRevision(v int64) *ConfigurationRevision { + s.Revision = &v + return s +} + +// Creates a cluster. +type CreateClusterInput struct { + _ struct{} `type:"structure"` + + // Information about the broker nodes in the cluster. + // + // BrokerNodeGroupInfo is a required field + BrokerNodeGroupInfo *BrokerNodeGroupInfo `locationName:"brokerNodeGroupInfo" type:"structure" required:"true"` + + // Includes all client authentication related information. + ClientAuthentication *ClientAuthentication `locationName:"clientAuthentication" type:"structure"` + + // The name of the cluster. + // + // ClusterName is a required field + ClusterName *string `locationName:"clusterName" min:"1" type:"string" required:"true"` + + // Represents the configuration that you want MSK to use for the cluster. + ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure"` + + // Includes all encryption-related information. + EncryptionInfo *EncryptionInfo `locationName:"encryptionInfo" type:"structure"` + + // Specifies the level of monitoring for the MSK cluster. The possible values + // are DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. + EnhancedMonitoring *string `locationName:"enhancedMonitoring" type:"string" enum:"EnhancedMonitoring"` + + // The version of Apache Kafka. + // + // KafkaVersion is a required field + KafkaVersion *string `locationName:"kafkaVersion" min:"1" type:"string" required:"true"` + + // The number of Kafka broker nodes in the Amazon MSK cluster. + // + // NumberOfBrokerNodes is a required field + NumberOfBrokerNodes *int64 `locationName:"numberOfBrokerNodes" min:"1" type:"integer" required:"true"` + + // Create tags when creating the cluster. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s CreateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClusterInput"} + if s.BrokerNodeGroupInfo == nil { + invalidParams.Add(request.NewErrParamRequired("BrokerNodeGroupInfo")) + } + if s.ClusterName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterName")) + } + if s.ClusterName != nil && len(*s.ClusterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterName", 1)) + } + if s.KafkaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("KafkaVersion")) + } + if s.KafkaVersion != nil && len(*s.KafkaVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KafkaVersion", 1)) + } + if s.NumberOfBrokerNodes == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfBrokerNodes")) + } + if s.NumberOfBrokerNodes != nil && *s.NumberOfBrokerNodes < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumberOfBrokerNodes", 1)) + } + if s.BrokerNodeGroupInfo != nil { + if err := s.BrokerNodeGroupInfo.Validate(); err != nil { + invalidParams.AddNested("BrokerNodeGroupInfo", err.(request.ErrInvalidParams)) + } + } + if s.ConfigurationInfo != nil { + if err := s.ConfigurationInfo.Validate(); err != nil { + invalidParams.AddNested("ConfigurationInfo", err.(request.ErrInvalidParams)) + } + } + if s.EncryptionInfo != nil { + if err := s.EncryptionInfo.Validate(); err != nil { + invalidParams.AddNested("EncryptionInfo", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBrokerNodeGroupInfo sets the BrokerNodeGroupInfo field's value. +func (s *CreateClusterInput) SetBrokerNodeGroupInfo(v *BrokerNodeGroupInfo) *CreateClusterInput { + s.BrokerNodeGroupInfo = v + return s +} + +// SetClientAuthentication sets the ClientAuthentication field's value. +func (s *CreateClusterInput) SetClientAuthentication(v *ClientAuthentication) *CreateClusterInput { + s.ClientAuthentication = v + return s +} + +// SetClusterName sets the ClusterName field's value. +func (s *CreateClusterInput) SetClusterName(v string) *CreateClusterInput { + s.ClusterName = &v + return s +} + +// SetConfigurationInfo sets the ConfigurationInfo field's value. +func (s *CreateClusterInput) SetConfigurationInfo(v *ConfigurationInfo) *CreateClusterInput { + s.ConfigurationInfo = v + return s +} + +// SetEncryptionInfo sets the EncryptionInfo field's value. +func (s *CreateClusterInput) SetEncryptionInfo(v *EncryptionInfo) *CreateClusterInput { + s.EncryptionInfo = v + return s +} + +// SetEnhancedMonitoring sets the EnhancedMonitoring field's value. +func (s *CreateClusterInput) SetEnhancedMonitoring(v string) *CreateClusterInput { + s.EnhancedMonitoring = &v + return s +} + +// SetKafkaVersion sets the KafkaVersion field's value. +func (s *CreateClusterInput) SetKafkaVersion(v string) *CreateClusterInput { + s.KafkaVersion = &v + return s +} + +// SetNumberOfBrokerNodes sets the NumberOfBrokerNodes field's value. +func (s *CreateClusterInput) SetNumberOfBrokerNodes(v int64) *CreateClusterInput { + s.NumberOfBrokerNodes = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateClusterInput) SetTags(v map[string]*string) *CreateClusterInput { + s.Tags = v + return s +} + +// Returns information about the created cluster. +type CreateClusterOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The name of the MSK cluster. + ClusterName *string `locationName:"clusterName" type:"string"` + + // The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED. + State *string `locationName:"state" type:"string" enum:"ClusterState"` +} + +// String returns the string representation +func (s CreateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *CreateClusterOutput) SetClusterArn(v string) *CreateClusterOutput { + s.ClusterArn = &v + return s +} + +// SetClusterName sets the ClusterName field's value. +func (s *CreateClusterOutput) SetClusterName(v string) *CreateClusterOutput { + s.ClusterName = &v + return s +} + +// SetState sets the State field's value. +func (s *CreateClusterOutput) SetState(v string) *CreateClusterOutput { + s.State = &v + return s +} + +// Request body for CreateConfiguration. +type CreateConfigurationInput struct { + _ struct{} `type:"structure"` + + // The description of the configuration. + Description *string `locationName:"description" type:"string"` + + // The versions of Apache Kafka with which you can use this MSK configuration. + // + // KafkaVersions is a required field + KafkaVersions []*string `locationName:"kafkaVersions" type:"list" required:"true"` + + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // ServerProperties is automatically base64 encoded/decoded by the SDK. + // + // ServerProperties is a required field + ServerProperties []byte `locationName:"serverProperties" type:"blob" required:"true"` +} + +// String returns the string representation +func (s CreateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConfigurationInput"} + if s.KafkaVersions == nil { + invalidParams.Add(request.NewErrParamRequired("KafkaVersions")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ServerProperties == nil { + invalidParams.Add(request.NewErrParamRequired("ServerProperties")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateConfigurationInput) SetDescription(v string) *CreateConfigurationInput { + s.Description = &v + return s +} + +// SetKafkaVersions sets the KafkaVersions field's value. +func (s *CreateConfigurationInput) SetKafkaVersions(v []*string) *CreateConfigurationInput { + s.KafkaVersions = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateConfigurationInput) SetName(v string) *CreateConfigurationInput { + s.Name = &v + return s +} + +// SetServerProperties sets the ServerProperties field's value. +func (s *CreateConfigurationInput) SetServerProperties(v []byte) *CreateConfigurationInput { + s.ServerProperties = v + return s +} + +// Response body for CreateConfiguration +type CreateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the configuration. + Arn *string `locationName:"arn" type:"string"` + + // The time when the configuration was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + + // Latest revision of the configuration. + LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure"` + + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s CreateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateConfigurationOutput) SetArn(v string) *CreateConfigurationOutput { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *CreateConfigurationOutput) SetCreationTime(v time.Time) *CreateConfigurationOutput { + s.CreationTime = &v + return s +} + +// SetLatestRevision sets the LatestRevision field's value. +func (s *CreateConfigurationOutput) SetLatestRevision(v *ConfigurationRevision) *CreateConfigurationOutput { + s.LatestRevision = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateConfigurationOutput) SetName(v string) *CreateConfigurationOutput { + s.Name = &v + return s +} + +type DeleteClusterInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + CurrentVersion *string `location:"querystring" locationName:"currentVersion" type:"string"` +} + +// String returns the string representation +func (s DeleteClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClusterInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *DeleteClusterInput) SetClusterArn(v string) *DeleteClusterInput { + s.ClusterArn = &v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *DeleteClusterInput) SetCurrentVersion(v string) *DeleteClusterInput { + s.CurrentVersion = &v + return s +} + +// Returns information about the deleted cluster. +type DeleteClusterOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED. + State *string `locationName:"state" type:"string" enum:"ClusterState"` +} + +// String returns the string representation +func (s DeleteClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *DeleteClusterOutput) SetClusterArn(v string) *DeleteClusterOutput { + s.ClusterArn = &v + return s +} + +// SetState sets the State field's value. +func (s *DeleteClusterOutput) SetState(v string) *DeleteClusterOutput { + s.State = &v + return s +} + +type DescribeClusterInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeClusterInput) String() string { + return awsutil.Prettify(s) } -// Describes the setup to be used for Kafka broker nodes in the cluster. -type BrokerNodeGroupInfo struct { - _ struct{} `type:"structure"` +// GoString returns the string representation +func (s DescribeClusterInput) GoString() string { + return s.String() +} - // The distribution of broker nodes across Availability Zones. - BrokerAZDistribution *string `locationName:"brokerAZDistribution" type:"string" enum:"BrokerAZDistribution"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClusterInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } - // The list of subnets to connect to in the client virtual private cloud (VPC). - // AWS creates elastic network interfaces inside these subnets. Client applications - // use elastic network interfaces to produce and consume data. Client subnets - // can't be in Availability Zone us-east-1e. - // - // ClientSubnets is a required field - ClientSubnets []*string `locationName:"clientSubnets" type:"list" required:"true"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The type of Amazon EC2 instances to use for Kafka brokers. The following - // instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge,kafka.m5.4xlarge, - // kafka.m5.12xlarge, and kafka.m5.24xlarge. - // - // InstanceType is a required field - InstanceType *string `locationName:"instanceType" min:"5" type:"string" required:"true"` +// SetClusterArn sets the ClusterArn field's value. +func (s *DescribeClusterInput) SetClusterArn(v string) *DescribeClusterInput { + s.ClusterArn = &v + return s +} - // The AWS security groups to associate with the elastic network interfaces - // in order to specify who can connect to and communicate with the Amazon MSK - // cluster. - SecurityGroups []*string `locationName:"securityGroups" type:"list"` +type DescribeClusterOperationInput struct { + _ struct{} `type:"structure"` - // Contains information about storage volumes attached to MSK broker nodes. - StorageInfo *StorageInfo `locationName:"storageInfo" type:"structure"` + // ClusterOperationArn is a required field + ClusterOperationArn *string `location:"uri" locationName:"clusterOperationArn" type:"string" required:"true"` } // String returns the string representation -func (s BrokerNodeGroupInfo) String() string { +func (s DescribeClusterOperationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BrokerNodeGroupInfo) GoString() string { +func (s DescribeClusterOperationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BrokerNodeGroupInfo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BrokerNodeGroupInfo"} - if s.ClientSubnets == nil { - invalidParams.Add(request.NewErrParamRequired("ClientSubnets")) - } - if s.InstanceType == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceType")) - } - if s.InstanceType != nil && len(*s.InstanceType) < 5 { - invalidParams.Add(request.NewErrParamMinLen("InstanceType", 5)) +func (s *DescribeClusterOperationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClusterOperationInput"} + if s.ClusterOperationArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterOperationArn")) } - if s.StorageInfo != nil { - if err := s.StorageInfo.Validate(); err != nil { - invalidParams.AddNested("StorageInfo", err.(request.ErrInvalidParams)) - } + if s.ClusterOperationArn != nil && len(*s.ClusterOperationArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterOperationArn", 1)) } if invalidParams.Len() > 0 { @@ -879,330 +3232,344 @@ func (s *BrokerNodeGroupInfo) Validate() error { return nil } -// SetBrokerAZDistribution sets the BrokerAZDistribution field's value. -func (s *BrokerNodeGroupInfo) SetBrokerAZDistribution(v string) *BrokerNodeGroupInfo { - s.BrokerAZDistribution = &v +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *DescribeClusterOperationInput) SetClusterOperationArn(v string) *DescribeClusterOperationInput { + s.ClusterOperationArn = &v return s } -// SetClientSubnets sets the ClientSubnets field's value. -func (s *BrokerNodeGroupInfo) SetClientSubnets(v []*string) *BrokerNodeGroupInfo { - s.ClientSubnets = v - return s +// Information about a cluster operation. +type DescribeClusterOperationOutput struct { + _ struct{} `type:"structure"` + + // Cluster operation information + ClusterOperationInfo *ClusterOperationInfo `locationName:"clusterOperationInfo" type:"structure"` } -// SetInstanceType sets the InstanceType field's value. -func (s *BrokerNodeGroupInfo) SetInstanceType(v string) *BrokerNodeGroupInfo { - s.InstanceType = &v +// String returns the string representation +func (s DescribeClusterOperationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterOperationOutput) GoString() string { + return s.String() +} + +// SetClusterOperationInfo sets the ClusterOperationInfo field's value. +func (s *DescribeClusterOperationOutput) SetClusterOperationInfo(v *ClusterOperationInfo) *DescribeClusterOperationOutput { + s.ClusterOperationInfo = v return s } -// SetSecurityGroups sets the SecurityGroups field's value. -func (s *BrokerNodeGroupInfo) SetSecurityGroups(v []*string) *BrokerNodeGroupInfo { - s.SecurityGroups = v +// Returns information about a cluster. +type DescribeClusterOutput struct { + _ struct{} `type:"structure"` + + // The cluster information. + ClusterInfo *ClusterInfo `locationName:"clusterInfo" type:"structure"` +} + +// String returns the string representation +func (s DescribeClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterOutput) GoString() string { + return s.String() +} + +// SetClusterInfo sets the ClusterInfo field's value. +func (s *DescribeClusterOutput) SetClusterInfo(v *ClusterInfo) *DescribeClusterOutput { + s.ClusterInfo = v return s } -// SetStorageInfo sets the StorageInfo field's value. -func (s *BrokerNodeGroupInfo) SetStorageInfo(v *StorageInfo) *BrokerNodeGroupInfo { - s.StorageInfo = v +type DescribeConfigurationInput struct { + _ struct{} `type:"structure"` + + // Arn is a required field + Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConfigurationInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *DescribeConfigurationInput) SetArn(v string) *DescribeConfigurationInput { + s.Arn = &v return s } -// BrokerNodeInfo -type BrokerNodeInfo struct { +// Response body for DescribeConfiguration. +type DescribeConfigurationOutput struct { _ struct{} `type:"structure"` - // The attached elastic network interface of the broker. - AttachedENIId *string `locationName:"attachedENIId" type:"string"` + // The Amazon Resource Name (ARN) of the configuration. + Arn *string `locationName:"arn" type:"string"` - // The ID of the broker. - BrokerId *float64 `locationName:"brokerId" type:"double"` + // The time when the configuration was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` - // The client subnet to which this broker node belongs. - ClientSubnet *string `locationName:"clientSubnet" type:"string"` + // The description of the configuration. + Description *string `locationName:"description" type:"string"` - // The virtual private cloud (VPC) of the client. - ClientVpcIpAddress *string `locationName:"clientVpcIpAddress" type:"string"` + // The versions of Apache Kafka with which you can use this MSK configuration. + KafkaVersions []*string `locationName:"kafkaVersions" type:"list"` - // Information about the version of software currently deployed on the Kafka - // brokers in the cluster. - CurrentBrokerSoftwareInfo *BrokerSoftwareInfo `locationName:"currentBrokerSoftwareInfo" type:"structure"` + // Latest revision of the configuration. + LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure"` + + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". + Name *string `locationName:"name" type:"string"` } // String returns the string representation -func (s BrokerNodeInfo) String() string { +func (s DescribeConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BrokerNodeInfo) GoString() string { +func (s DescribeConfigurationOutput) GoString() string { return s.String() } -// SetAttachedENIId sets the AttachedENIId field's value. -func (s *BrokerNodeInfo) SetAttachedENIId(v string) *BrokerNodeInfo { - s.AttachedENIId = &v +// SetArn sets the Arn field's value. +func (s *DescribeConfigurationOutput) SetArn(v string) *DescribeConfigurationOutput { + s.Arn = &v return s } -// SetBrokerId sets the BrokerId field's value. -func (s *BrokerNodeInfo) SetBrokerId(v float64) *BrokerNodeInfo { - s.BrokerId = &v +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeConfigurationOutput) SetCreationTime(v time.Time) *DescribeConfigurationOutput { + s.CreationTime = &v return s } -// SetClientSubnet sets the ClientSubnet field's value. -func (s *BrokerNodeInfo) SetClientSubnet(v string) *BrokerNodeInfo { - s.ClientSubnet = &v +// SetDescription sets the Description field's value. +func (s *DescribeConfigurationOutput) SetDescription(v string) *DescribeConfigurationOutput { + s.Description = &v return s } -// SetClientVpcIpAddress sets the ClientVpcIpAddress field's value. -func (s *BrokerNodeInfo) SetClientVpcIpAddress(v string) *BrokerNodeInfo { - s.ClientVpcIpAddress = &v +// SetKafkaVersions sets the KafkaVersions field's value. +func (s *DescribeConfigurationOutput) SetKafkaVersions(v []*string) *DescribeConfigurationOutput { + s.KafkaVersions = v return s } -// SetCurrentBrokerSoftwareInfo sets the CurrentBrokerSoftwareInfo field's value. -func (s *BrokerNodeInfo) SetCurrentBrokerSoftwareInfo(v *BrokerSoftwareInfo) *BrokerNodeInfo { - s.CurrentBrokerSoftwareInfo = v +// SetLatestRevision sets the LatestRevision field's value. +func (s *DescribeConfigurationOutput) SetLatestRevision(v *ConfigurationRevision) *DescribeConfigurationOutput { + s.LatestRevision = v return s } -// Information about the current software installed on the cluster. -type BrokerSoftwareInfo struct { - _ struct{} `type:"structure"` +// SetName sets the Name field's value. +func (s *DescribeConfigurationOutput) SetName(v string) *DescribeConfigurationOutput { + s.Name = &v + return s +} - // The Amazon Resource Name (ARN) of the configuration used for the cluster. - ConfigurationArn *string `locationName:"configurationArn" type:"string"` +type DescribeConfigurationRevisionInput struct { + _ struct{} `type:"structure"` - // The revision of the configuration to use. - ConfigurationRevision *string `locationName:"configurationRevision" type:"string"` + // Arn is a required field + Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` - // The version of Apache Kafka. - KafkaVersion *string `locationName:"kafkaVersion" type:"string"` + // Revision is a required field + Revision *int64 `location:"uri" locationName:"revision" type:"long" required:"true"` } // String returns the string representation -func (s BrokerSoftwareInfo) String() string { +func (s DescribeConfigurationRevisionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BrokerSoftwareInfo) GoString() string { +func (s DescribeConfigurationRevisionInput) GoString() string { return s.String() } -// SetConfigurationArn sets the ConfigurationArn field's value. -func (s *BrokerSoftwareInfo) SetConfigurationArn(v string) *BrokerSoftwareInfo { - s.ConfigurationArn = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConfigurationRevisionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConfigurationRevisionInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + if s.Revision == nil { + invalidParams.Add(request.NewErrParamRequired("Revision")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetConfigurationRevision sets the ConfigurationRevision field's value. -func (s *BrokerSoftwareInfo) SetConfigurationRevision(v string) *BrokerSoftwareInfo { - s.ConfigurationRevision = &v +// SetArn sets the Arn field's value. +func (s *DescribeConfigurationRevisionInput) SetArn(v string) *DescribeConfigurationRevisionInput { + s.Arn = &v return s } -// SetKafkaVersion sets the KafkaVersion field's value. -func (s *BrokerSoftwareInfo) SetKafkaVersion(v string) *BrokerSoftwareInfo { - s.KafkaVersion = &v +// SetRevision sets the Revision field's value. +func (s *DescribeConfigurationRevisionInput) SetRevision(v int64) *DescribeConfigurationRevisionInput { + s.Revision = &v return s } -// Returns information about a cluster. -type ClusterInfo struct { +// Response body for DescribeConfigurationRevision. +type DescribeConfigurationRevisionOutput struct { _ struct{} `type:"structure"` - // Information about the broker nodes. - BrokerNodeGroupInfo *BrokerNodeGroupInfo `locationName:"brokerNodeGroupInfo" type:"structure"` - - // The Amazon Resource Name (ARN) that uniquely identifies the cluster. - ClusterArn *string `locationName:"clusterArn" type:"string"` - - // The name of the cluster. - ClusterName *string `locationName:"clusterName" type:"string"` + // The Amazon Resource Name (ARN) of the configuration. + Arn *string `locationName:"arn" type:"string"` - // The time when the cluster was created. + // The time when the configuration was created. CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` - // Information about the version of software currently deployed on the Kafka - // brokers in the cluster. - CurrentBrokerSoftwareInfo *BrokerSoftwareInfo `locationName:"currentBrokerSoftwareInfo" type:"structure"` - - // The current version of the MSK cluster. - CurrentVersion *string `locationName:"currentVersion" type:"string"` - - // Includes all encryption-related information. - EncryptionInfo *EncryptionInfo `locationName:"encryptionInfo" type:"structure"` - - // Specifies which metrics are gathered for the MSK cluster. This property has - // three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. - EnhancedMonitoring *string `locationName:"enhancedMonitoring" type:"string" enum:"EnhancedMonitoring"` - - // The number of Kafka broker nodes in the cluster. - NumberOfBrokerNodes *int64 `locationName:"numberOfBrokerNodes" type:"integer"` + // The description of the configuration. + Description *string `locationName:"description" type:"string"` - // The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED. - State *string `locationName:"state" type:"string" enum:"ClusterState"` + // The revision number. + Revision *int64 `locationName:"revision" type:"long"` - // The connection string to use to connect to the Apache ZooKeeper cluster. - ZookeeperConnectString *string `locationName:"zookeeperConnectString" type:"string"` + // ServerProperties is automatically base64 encoded/decoded by the SDK. + ServerProperties []byte `locationName:"serverProperties" type:"blob"` } // String returns the string representation -func (s ClusterInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ClusterInfo) GoString() string { - return s.String() -} - -// SetBrokerNodeGroupInfo sets the BrokerNodeGroupInfo field's value. -func (s *ClusterInfo) SetBrokerNodeGroupInfo(v *BrokerNodeGroupInfo) *ClusterInfo { - s.BrokerNodeGroupInfo = v - return s +func (s DescribeConfigurationRevisionOutput) String() string { + return awsutil.Prettify(s) } -// SetClusterArn sets the ClusterArn field's value. -func (s *ClusterInfo) SetClusterArn(v string) *ClusterInfo { - s.ClusterArn = &v - return s +// GoString returns the string representation +func (s DescribeConfigurationRevisionOutput) GoString() string { + return s.String() } -// SetClusterName sets the ClusterName field's value. -func (s *ClusterInfo) SetClusterName(v string) *ClusterInfo { - s.ClusterName = &v +// SetArn sets the Arn field's value. +func (s *DescribeConfigurationRevisionOutput) SetArn(v string) *DescribeConfigurationRevisionOutput { + s.Arn = &v return s } // SetCreationTime sets the CreationTime field's value. -func (s *ClusterInfo) SetCreationTime(v time.Time) *ClusterInfo { +func (s *DescribeConfigurationRevisionOutput) SetCreationTime(v time.Time) *DescribeConfigurationRevisionOutput { s.CreationTime = &v return s } -// SetCurrentBrokerSoftwareInfo sets the CurrentBrokerSoftwareInfo field's value. -func (s *ClusterInfo) SetCurrentBrokerSoftwareInfo(v *BrokerSoftwareInfo) *ClusterInfo { - s.CurrentBrokerSoftwareInfo = v +// SetDescription sets the Description field's value. +func (s *DescribeConfigurationRevisionOutput) SetDescription(v string) *DescribeConfigurationRevisionOutput { + s.Description = &v return s } -// SetCurrentVersion sets the CurrentVersion field's value. -func (s *ClusterInfo) SetCurrentVersion(v string) *ClusterInfo { - s.CurrentVersion = &v +// SetRevision sets the Revision field's value. +func (s *DescribeConfigurationRevisionOutput) SetRevision(v int64) *DescribeConfigurationRevisionOutput { + s.Revision = &v return s } -// SetEncryptionInfo sets the EncryptionInfo field's value. -func (s *ClusterInfo) SetEncryptionInfo(v *EncryptionInfo) *ClusterInfo { - s.EncryptionInfo = v +// SetServerProperties sets the ServerProperties field's value. +func (s *DescribeConfigurationRevisionOutput) SetServerProperties(v []byte) *DescribeConfigurationRevisionOutput { + s.ServerProperties = v return s } -// SetEnhancedMonitoring sets the EnhancedMonitoring field's value. -func (s *ClusterInfo) SetEnhancedMonitoring(v string) *ClusterInfo { - s.EnhancedMonitoring = &v - return s -} +// Contains information about the EBS storage volumes attached to Kafka broker +// nodes. +type EBSStorageInfo struct { + _ struct{} `type:"structure"` -// SetNumberOfBrokerNodes sets the NumberOfBrokerNodes field's value. -func (s *ClusterInfo) SetNumberOfBrokerNodes(v int64) *ClusterInfo { - s.NumberOfBrokerNodes = &v - return s + // The size in GiB of the EBS volume for the data drive on each broker node. + VolumeSize *int64 `locationName:"volumeSize" min:"1" type:"integer"` } -// SetState sets the State field's value. -func (s *ClusterInfo) SetState(v string) *ClusterInfo { - s.State = &v - return s +// String returns the string representation +func (s EBSStorageInfo) String() string { + return awsutil.Prettify(s) } -// SetZookeeperConnectString sets the ZookeeperConnectString field's value. -func (s *ClusterInfo) SetZookeeperConnectString(v string) *ClusterInfo { - s.ZookeeperConnectString = &v - return s +// GoString returns the string representation +func (s EBSStorageInfo) GoString() string { + return s.String() } -// Creates a cluster. -type CreateClusterInput struct { - _ struct{} `type:"structure"` - - // Information about the broker nodes in the cluster. - // - // BrokerNodeGroupInfo is a required field - BrokerNodeGroupInfo *BrokerNodeGroupInfo `locationName:"brokerNodeGroupInfo" type:"structure" required:"true"` - - // The name of the cluster. - // - // ClusterName is a required field - ClusterName *string `locationName:"clusterName" min:"1" type:"string" required:"true"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *EBSStorageInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EBSStorageInfo"} + if s.VolumeSize != nil && *s.VolumeSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("VolumeSize", 1)) + } - // Includes all encryption-related information. - EncryptionInfo *EncryptionInfo `locationName:"encryptionInfo" type:"structure"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Specifies the level of monitoring for the MSK cluster. The possible values - // are DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. - EnhancedMonitoring *string `locationName:"enhancedMonitoring" type:"string" enum:"EnhancedMonitoring"` +// SetVolumeSize sets the VolumeSize field's value. +func (s *EBSStorageInfo) SetVolumeSize(v int64) *EBSStorageInfo { + s.VolumeSize = &v + return s +} - // The version of Apache Kafka. - // - // KafkaVersion is a required field - KafkaVersion *string `locationName:"kafkaVersion" min:"1" type:"string" required:"true"` +// The data-volume encryption details. +type EncryptionAtRest struct { + _ struct{} `type:"structure"` - // The number of Kafka broker nodes in the Amazon MSK cluster. + // The ARN of the AWS KMS key for encrypting data at rest. If you don't specify + // a KMS key, MSK creates one for you and uses it. // - // NumberOfBrokerNodes is a required field - NumberOfBrokerNodes *int64 `locationName:"numberOfBrokerNodes" min:"1" type:"integer" required:"true"` + // DataVolumeKMSKeyId is a required field + DataVolumeKMSKeyId *string `locationName:"dataVolumeKMSKeyId" type:"string" required:"true"` } // String returns the string representation -func (s CreateClusterInput) String() string { +func (s EncryptionAtRest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateClusterInput) GoString() string { +func (s EncryptionAtRest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateClusterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateClusterInput"} - if s.BrokerNodeGroupInfo == nil { - invalidParams.Add(request.NewErrParamRequired("BrokerNodeGroupInfo")) - } - if s.ClusterName == nil { - invalidParams.Add(request.NewErrParamRequired("ClusterName")) - } - if s.ClusterName != nil && len(*s.ClusterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClusterName", 1)) - } - if s.KafkaVersion == nil { - invalidParams.Add(request.NewErrParamRequired("KafkaVersion")) - } - if s.KafkaVersion != nil && len(*s.KafkaVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KafkaVersion", 1)) - } - if s.NumberOfBrokerNodes == nil { - invalidParams.Add(request.NewErrParamRequired("NumberOfBrokerNodes")) - } - if s.NumberOfBrokerNodes != nil && *s.NumberOfBrokerNodes < 1 { - invalidParams.Add(request.NewErrParamMinValue("NumberOfBrokerNodes", 1)) - } - if s.BrokerNodeGroupInfo != nil { - if err := s.BrokerNodeGroupInfo.Validate(); err != nil { - invalidParams.AddNested("BrokerNodeGroupInfo", err.(request.ErrInvalidParams)) - } - } - if s.EncryptionInfo != nil { - if err := s.EncryptionInfo.Validate(); err != nil { - invalidParams.AddNested("EncryptionInfo", err.(request.ErrInvalidParams)) - } +func (s *EncryptionAtRest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionAtRest"} + if s.DataVolumeKMSKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("DataVolumeKMSKeyId")) } if invalidParams.Len() > 0 { @@ -1211,106 +3578,163 @@ func (s *CreateClusterInput) Validate() error { return nil } -// SetBrokerNodeGroupInfo sets the BrokerNodeGroupInfo field's value. -func (s *CreateClusterInput) SetBrokerNodeGroupInfo(v *BrokerNodeGroupInfo) *CreateClusterInput { - s.BrokerNodeGroupInfo = v +// SetDataVolumeKMSKeyId sets the DataVolumeKMSKeyId field's value. +func (s *EncryptionAtRest) SetDataVolumeKMSKeyId(v string) *EncryptionAtRest { + s.DataVolumeKMSKeyId = &v return s } -// SetClusterName sets the ClusterName field's value. -func (s *CreateClusterInput) SetClusterName(v string) *CreateClusterInput { - s.ClusterName = &v - return s +// The settings for encrypting data in transit. +type EncryptionInTransit struct { + _ struct{} `type:"structure"` + + // Indicates the encryption setting for data in transit between clients and + // brokers. You must set it to one of the following values. + // + // TLS means that client-broker communication is enabled with TLS only. + // + // TLS_PLAINTEXT means that client-broker communication is enabled for both + // TLS-encrypted, as well as plaintext data. + // + // PLAINTEXT means that client-broker communication is enabled in plaintext + // only. + // + // The default value is TLS. + ClientBroker *string `locationName:"clientBroker" type:"string" enum:"ClientBroker"` + + // When set to true, it indicates that data communication among the broker nodes + // of the cluster is encrypted. When set to false, the communication happens + // in plaintext. + // + // The default value is true. + InCluster *bool `locationName:"inCluster" type:"boolean"` } -// SetEncryptionInfo sets the EncryptionInfo field's value. -func (s *CreateClusterInput) SetEncryptionInfo(v *EncryptionInfo) *CreateClusterInput { - s.EncryptionInfo = v - return s +// String returns the string representation +func (s EncryptionInTransit) String() string { + return awsutil.Prettify(s) } -// SetEnhancedMonitoring sets the EnhancedMonitoring field's value. -func (s *CreateClusterInput) SetEnhancedMonitoring(v string) *CreateClusterInput { - s.EnhancedMonitoring = &v - return s +// GoString returns the string representation +func (s EncryptionInTransit) GoString() string { + return s.String() } -// SetKafkaVersion sets the KafkaVersion field's value. -func (s *CreateClusterInput) SetKafkaVersion(v string) *CreateClusterInput { - s.KafkaVersion = &v +// SetClientBroker sets the ClientBroker field's value. +func (s *EncryptionInTransit) SetClientBroker(v string) *EncryptionInTransit { + s.ClientBroker = &v return s } -// SetNumberOfBrokerNodes sets the NumberOfBrokerNodes field's value. -func (s *CreateClusterInput) SetNumberOfBrokerNodes(v int64) *CreateClusterInput { - s.NumberOfBrokerNodes = &v +// SetInCluster sets the InCluster field's value. +func (s *EncryptionInTransit) SetInCluster(v bool) *EncryptionInTransit { + s.InCluster = &v return s } -// Returns information about the created cluster. -type CreateClusterOutput struct { +// Includes encryption-related information, such as the AWS KMS key used for +// encrypting data at rest and whether you want MSK to encrypt your data in +// transit. +type EncryptionInfo struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the cluster. - ClusterArn *string `locationName:"clusterArn" type:"string"` - - // The name of the MSK cluster. - ClusterName *string `locationName:"clusterName" type:"string"` + // The data-volume encryption details. + EncryptionAtRest *EncryptionAtRest `locationName:"encryptionAtRest" type:"structure"` - // The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED. - State *string `locationName:"state" type:"string" enum:"ClusterState"` + // The details for encryption in transit. + EncryptionInTransit *EncryptionInTransit `locationName:"encryptionInTransit" type:"structure"` } // String returns the string representation -func (s CreateClusterOutput) String() string { +func (s EncryptionInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateClusterOutput) GoString() string { +func (s EncryptionInfo) GoString() string { return s.String() } -// SetClusterArn sets the ClusterArn field's value. -func (s *CreateClusterOutput) SetClusterArn(v string) *CreateClusterOutput { - s.ClusterArn = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *EncryptionInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionInfo"} + if s.EncryptionAtRest != nil { + if err := s.EncryptionAtRest.Validate(); err != nil { + invalidParams.AddNested("EncryptionAtRest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionAtRest sets the EncryptionAtRest field's value. +func (s *EncryptionInfo) SetEncryptionAtRest(v *EncryptionAtRest) *EncryptionInfo { + s.EncryptionAtRest = v return s } -// SetClusterName sets the ClusterName field's value. -func (s *CreateClusterOutput) SetClusterName(v string) *CreateClusterOutput { - s.ClusterName = &v +// SetEncryptionInTransit sets the EncryptionInTransit field's value. +func (s *EncryptionInfo) SetEncryptionInTransit(v *EncryptionInTransit) *EncryptionInfo { + s.EncryptionInTransit = v return s } -// SetState sets the State field's value. -func (s *CreateClusterOutput) SetState(v string) *CreateClusterOutput { - s.State = &v +// Returns information about an error state of the cluster. +type ErrorInfo struct { + _ struct{} `type:"structure"` + + // A number describing the error programmatically. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // An optional field to provide more details about the error. + ErrorString *string `locationName:"errorString" type:"string"` +} + +// String returns the string representation +func (s ErrorInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorInfo) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *ErrorInfo) SetErrorCode(v string) *ErrorInfo { + s.ErrorCode = &v return s } -type DeleteClusterInput struct { +// SetErrorString sets the ErrorString field's value. +func (s *ErrorInfo) SetErrorString(v string) *ErrorInfo { + s.ErrorString = &v + return s +} + +type GetBootstrapBrokersInput struct { _ struct{} `type:"structure"` // ClusterArn is a required field ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` - - CurrentVersion *string `location:"querystring" locationName:"currentVersion" type:"string"` } // String returns the string representation -func (s DeleteClusterInput) String() string { +func (s GetBootstrapBrokersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteClusterInput) GoString() string { +func (s GetBootstrapBrokersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteClusterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteClusterInput"} +func (s *GetBootstrapBrokersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBootstrapBrokersInput"} if s.ClusterArn == nil { invalidParams.Add(request.NewErrParamRequired("ClusterArn")) } @@ -1325,76 +3749,79 @@ func (s *DeleteClusterInput) Validate() error { } // SetClusterArn sets the ClusterArn field's value. -func (s *DeleteClusterInput) SetClusterArn(v string) *DeleteClusterInput { +func (s *GetBootstrapBrokersInput) SetClusterArn(v string) *GetBootstrapBrokersInput { s.ClusterArn = &v return s } -// SetCurrentVersion sets the CurrentVersion field's value. -func (s *DeleteClusterInput) SetCurrentVersion(v string) *DeleteClusterInput { - s.CurrentVersion = &v - return s -} - -// Returns information about the deleted cluster. -type DeleteClusterOutput struct { +// Returns a string containing one or more hostname:port pairs. +type GetBootstrapBrokersOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the cluster. - ClusterArn *string `locationName:"clusterArn" type:"string"` + // A string containing one or more hostname:port pairs. + BootstrapBrokerString *string `locationName:"bootstrapBrokerString" type:"string"` - // The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED. - State *string `locationName:"state" type:"string" enum:"ClusterState"` + // A string containing one or more DNS names (or IP) and TLS port pairs. The + // following is an example. + // { "BootstrapBrokerStringTls": "b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094"} + BootstrapBrokerStringTls *string `locationName:"bootstrapBrokerStringTls" type:"string"` } // String returns the string representation -func (s DeleteClusterOutput) String() string { +func (s GetBootstrapBrokersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteClusterOutput) GoString() string { +func (s GetBootstrapBrokersOutput) GoString() string { return s.String() } -// SetClusterArn sets the ClusterArn field's value. -func (s *DeleteClusterOutput) SetClusterArn(v string) *DeleteClusterOutput { - s.ClusterArn = &v +// SetBootstrapBrokerString sets the BootstrapBrokerString field's value. +func (s *GetBootstrapBrokersOutput) SetBootstrapBrokerString(v string) *GetBootstrapBrokersOutput { + s.BootstrapBrokerString = &v return s } -// SetState sets the State field's value. -func (s *DeleteClusterOutput) SetState(v string) *DeleteClusterOutput { - s.State = &v +// SetBootstrapBrokerStringTls sets the BootstrapBrokerStringTls field's value. +func (s *GetBootstrapBrokersOutput) SetBootstrapBrokerStringTls(v string) *GetBootstrapBrokersOutput { + s.BootstrapBrokerStringTls = &v return s } -type DescribeClusterInput struct { +type ListClusterOperationsInput struct { _ struct{} `type:"structure"` // ClusterArn is a required field ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s DescribeClusterInput) String() string { +func (s ListClusterOperationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeClusterInput) GoString() string { +func (s ListClusterOperationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeClusterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeClusterInput"} +func (s *ListClusterOperationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListClusterOperationsInput"} if s.ClusterArn == nil { invalidParams.Add(request.NewErrParamRequired("ClusterArn")) } if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -1403,98 +3830,84 @@ func (s *DescribeClusterInput) Validate() error { } // SetClusterArn sets the ClusterArn field's value. -func (s *DescribeClusterInput) SetClusterArn(v string) *DescribeClusterInput { +func (s *ListClusterOperationsInput) SetClusterArn(v string) *ListClusterOperationsInput { s.ClusterArn = &v return s } -// Returns information about a cluster. -type DescribeClusterOutput struct { - _ struct{} `type:"structure"` - - // The cluster information. - ClusterInfo *ClusterInfo `locationName:"clusterInfo" type:"structure"` -} - -// String returns the string representation -func (s DescribeClusterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeClusterOutput) GoString() string { - return s.String() +// SetMaxResults sets the MaxResults field's value. +func (s *ListClusterOperationsInput) SetMaxResults(v int64) *ListClusterOperationsInput { + s.MaxResults = &v + return s } -// SetClusterInfo sets the ClusterInfo field's value. -func (s *DescribeClusterOutput) SetClusterInfo(v *ClusterInfo) *DescribeClusterOutput { - s.ClusterInfo = v +// SetNextToken sets the NextToken field's value. +func (s *ListClusterOperationsInput) SetNextToken(v string) *ListClusterOperationsInput { + s.NextToken = &v return s } -// Contains information about the EBS storage volumes attached to Kafka broker -// nodes. -type EBSStorageInfo struct { +// The response contains an array containing cluster operation information and +// a next token if the response is truncated. +type ListClusterOperationsOutput struct { _ struct{} `type:"structure"` - // The size in GiB of the EBS volume for the data drive on each broker node. - VolumeSize *int64 `locationName:"volumeSize" min:"1" type:"integer"` + // An array of cluster operation information objects. + ClusterOperationInfoList []*ClusterOperationInfo `locationName:"clusterOperationInfoList" type:"list"` + + // If the response of ListClusterOperations is truncated, it returns a NextToken + // in the response. This Nexttoken should be sent in the subsequent request + // to ListClusterOperations. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s EBSStorageInfo) String() string { +func (s ListClusterOperationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EBSStorageInfo) GoString() string { +func (s ListClusterOperationsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *EBSStorageInfo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EBSStorageInfo"} - if s.VolumeSize != nil && *s.VolumeSize < 1 { - invalidParams.Add(request.NewErrParamMinValue("VolumeSize", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetClusterOperationInfoList sets the ClusterOperationInfoList field's value. +func (s *ListClusterOperationsOutput) SetClusterOperationInfoList(v []*ClusterOperationInfo) *ListClusterOperationsOutput { + s.ClusterOperationInfoList = v + return s } -// SetVolumeSize sets the VolumeSize field's value. -func (s *EBSStorageInfo) SetVolumeSize(v int64) *EBSStorageInfo { - s.VolumeSize = &v +// SetNextToken sets the NextToken field's value. +func (s *ListClusterOperationsOutput) SetNextToken(v string) *ListClusterOperationsOutput { + s.NextToken = &v return s } -// The data volume encryption details. -type EncryptionAtRest struct { +type ListClustersInput struct { _ struct{} `type:"structure"` - // The AWS KMS key used for data encryption. - // - // DataVolumeKMSKeyId is a required field - DataVolumeKMSKeyId *string `locationName:"dataVolumeKMSKeyId" type:"string" required:"true"` + ClusterNameFilter *string `location:"querystring" locationName:"clusterNameFilter" type:"string"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s EncryptionAtRest) String() string { +func (s ListClustersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EncryptionAtRest) GoString() string { +func (s ListClustersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *EncryptionAtRest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EncryptionAtRest"} - if s.DataVolumeKMSKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("DataVolumeKMSKeyId")) +func (s *ListClustersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListClustersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -1503,77 +3916,92 @@ func (s *EncryptionAtRest) Validate() error { return nil } -// SetDataVolumeKMSKeyId sets the DataVolumeKMSKeyId field's value. -func (s *EncryptionAtRest) SetDataVolumeKMSKeyId(v string) *EncryptionAtRest { - s.DataVolumeKMSKeyId = &v +// SetClusterNameFilter sets the ClusterNameFilter field's value. +func (s *ListClustersInput) SetClusterNameFilter(v string) *ListClustersInput { + s.ClusterNameFilter = &v return s } -// Includes encryption-related information, such as the AWS KMS key used for -// encrypting data at rest. -type EncryptionInfo struct { +// SetMaxResults sets the MaxResults field's value. +func (s *ListClustersInput) SetMaxResults(v int64) *ListClustersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListClustersInput) SetNextToken(v string) *ListClustersInput { + s.NextToken = &v + return s +} + +// The response contains an array containing cluster information and a next +// token if the response is truncated. +type ListClustersOutput struct { _ struct{} `type:"structure"` - // The data volume encryption details. - EncryptionAtRest *EncryptionAtRest `locationName:"encryptionAtRest" type:"structure"` + // Information on each of the MSK clusters in the response. + ClusterInfoList []*ClusterInfo `locationName:"clusterInfoList" type:"list"` + + // The paginated results marker. When the result of a ListClusters operation + // is truncated, the call returns NextToken in the response. To get another + // batch of clusters, provide this token in your next request. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s EncryptionInfo) String() string { +func (s ListClustersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EncryptionInfo) GoString() string { +func (s ListClustersOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *EncryptionInfo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EncryptionInfo"} - if s.EncryptionAtRest != nil { - if err := s.EncryptionAtRest.Validate(); err != nil { - invalidParams.AddNested("EncryptionAtRest", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetClusterInfoList sets the ClusterInfoList field's value. +func (s *ListClustersOutput) SetClusterInfoList(v []*ClusterInfo) *ListClustersOutput { + s.ClusterInfoList = v + return s } -// SetEncryptionAtRest sets the EncryptionAtRest field's value. -func (s *EncryptionInfo) SetEncryptionAtRest(v *EncryptionAtRest) *EncryptionInfo { - s.EncryptionAtRest = v +// SetNextToken sets the NextToken field's value. +func (s *ListClustersOutput) SetNextToken(v string) *ListClustersOutput { + s.NextToken = &v return s } -type GetBootstrapBrokersInput struct { +type ListConfigurationRevisionsInput struct { _ struct{} `type:"structure"` - // ClusterArn is a required field - ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + // Arn is a required field + Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s GetBootstrapBrokersInput) String() string { +func (s ListConfigurationRevisionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBootstrapBrokersInput) GoString() string { +func (s ListConfigurationRevisionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBootstrapBrokersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBootstrapBrokersInput"} - if s.ClusterArn == nil { - invalidParams.Add(request.NewErrParamRequired("ClusterArn")) +func (s *ListConfigurationRevisionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListConfigurationRevisionsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) } - if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -1582,40 +4010,59 @@ func (s *GetBootstrapBrokersInput) Validate() error { return nil } -// SetClusterArn sets the ClusterArn field's value. -func (s *GetBootstrapBrokersInput) SetClusterArn(v string) *GetBootstrapBrokersInput { - s.ClusterArn = &v +// SetArn sets the Arn field's value. +func (s *ListConfigurationRevisionsInput) SetArn(v string) *ListConfigurationRevisionsInput { + s.Arn = &v return s } -// Returns a string containing one or more hostname:port pairs. -type GetBootstrapBrokersOutput struct { +// SetMaxResults sets the MaxResults field's value. +func (s *ListConfigurationRevisionsInput) SetMaxResults(v int64) *ListConfigurationRevisionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListConfigurationRevisionsInput) SetNextToken(v string) *ListConfigurationRevisionsInput { + s.NextToken = &v + return s +} + +// Information about revisions of an MSK configuration. +type ListConfigurationRevisionsOutput struct { _ struct{} `type:"structure"` - // A string containing one or more hostname:port pairs. - BootstrapBrokerString *string `locationName:"bootstrapBrokerString" type:"string"` + // Paginated results marker. + NextToken *string `locationName:"nextToken" type:"string"` + + // List of ConfigurationRevision objects. + Revisions []*ConfigurationRevision `locationName:"revisions" type:"list"` } // String returns the string representation -func (s GetBootstrapBrokersOutput) String() string { +func (s ListConfigurationRevisionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBootstrapBrokersOutput) GoString() string { +func (s ListConfigurationRevisionsOutput) GoString() string { return s.String() } -// SetBootstrapBrokerString sets the BootstrapBrokerString field's value. -func (s *GetBootstrapBrokersOutput) SetBootstrapBrokerString(v string) *GetBootstrapBrokersOutput { - s.BootstrapBrokerString = &v +// SetNextToken sets the NextToken field's value. +func (s *ListConfigurationRevisionsOutput) SetNextToken(v string) *ListConfigurationRevisionsOutput { + s.NextToken = &v return s } -type ListClustersInput struct { - _ struct{} `type:"structure"` +// SetRevisions sets the Revisions field's value. +func (s *ListConfigurationRevisionsOutput) SetRevisions(v []*ConfigurationRevision) *ListConfigurationRevisionsOutput { + s.Revisions = v + return s +} - ClusterNameFilter *string `location:"querystring" locationName:"clusterNameFilter" type:"string"` +type ListConfigurationsInput struct { + _ struct{} `type:"structure"` MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` @@ -1623,18 +4070,18 @@ type ListClustersInput struct { } // String returns the string representation -func (s ListClustersInput) String() string { +func (s ListConfigurationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListClustersInput) GoString() string { +func (s ListConfigurationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListClustersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListClustersInput"} +func (s *ListConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListConfigurationsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } @@ -1645,56 +4092,50 @@ func (s *ListClustersInput) Validate() error { return nil } -// SetClusterNameFilter sets the ClusterNameFilter field's value. -func (s *ListClustersInput) SetClusterNameFilter(v string) *ListClustersInput { - s.ClusterNameFilter = &v - return s -} - // SetMaxResults sets the MaxResults field's value. -func (s *ListClustersInput) SetMaxResults(v int64) *ListClustersInput { +func (s *ListConfigurationsInput) SetMaxResults(v int64) *ListConfigurationsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListClustersInput) SetNextToken(v string) *ListClustersInput { +func (s *ListConfigurationsInput) SetNextToken(v string) *ListConfigurationsInput { s.NextToken = &v return s } -// The response contains an array containing cluster information and a next -// token if the response is truncated. -type ListClustersOutput struct { +// The response contains an array of Configuration and a next token if the response +// is truncated. +type ListConfigurationsOutput struct { _ struct{} `type:"structure"` - // Information on each of the MSK clusters in the response. - ClusterInfoList []*ClusterInfo `locationName:"clusterInfoList" type:"list"` + // An array of MSK configurations. + Configurations []*Configuration `locationName:"configurations" type:"list"` - // The paginated results marker. When the result of a ListClusters operation + // The paginated results marker. When the result of a ListConfigurations operation // is truncated, the call returns NextToken in the response. To get another - // batch of clusters, provide this token in your next request. + // batch of configurations, provide this token in your next request. NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s ListClustersOutput) String() string { +func (s ListConfigurationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListClustersOutput) GoString() string { +func (s ListConfigurationsOutput) GoString() string { return s.String() } -// SetClusterInfoList sets the ClusterInfoList field's value. -func (s *ListClustersOutput) SetClusterInfoList(v []*ClusterInfo) *ListClustersOutput { - s.ClusterInfoList = v +// SetConfigurations sets the Configurations field's value. +func (s *ListConfigurationsOutput) SetConfigurations(v []*Configuration) *ListConfigurationsOutput { + s.Configurations = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListClustersOutput) SetNextToken(v string) *ListClustersOutput { +func (s *ListConfigurationsOutput) SetNextToken(v string) *ListConfigurationsOutput { s.NextToken = &v return s } @@ -1831,27 +4272,69 @@ func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResource return s } -// List tags for a resource +// Response of listing tags for a resource. type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // The Key value pairs indicating resource tags. + // The key-value pair for the resource tag. Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation -func (s ListTagsForResourceOutput) String() string { +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// Information about cluster attributes that can be updated via update APIs. +type MutableClusterInfo struct { + _ struct{} `type:"structure"` + + // Specifies the size of the EBS volume and the ID of the associated broker. + BrokerEBSVolumeInfo []*BrokerEBSVolumeInfo `locationName:"brokerEBSVolumeInfo" type:"list"` + + // Information about the changes in the configuration of the brokers. + ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure"` + + // The number of broker nodes in the cluster. + NumberOfBrokerNodes *int64 `locationName:"numberOfBrokerNodes" type:"integer"` +} + +// String returns the string representation +func (s MutableClusterInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceOutput) GoString() string { +func (s MutableClusterInfo) GoString() string { return s.String() } -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { - s.Tags = v +// SetBrokerEBSVolumeInfo sets the BrokerEBSVolumeInfo field's value. +func (s *MutableClusterInfo) SetBrokerEBSVolumeInfo(v []*BrokerEBSVolumeInfo) *MutableClusterInfo { + s.BrokerEBSVolumeInfo = v + return s +} + +// SetConfigurationInfo sets the ConfigurationInfo field's value. +func (s *MutableClusterInfo) SetConfigurationInfo(v *ConfigurationInfo) *MutableClusterInfo { + s.ConfigurationInfo = v + return s +} + +// SetNumberOfBrokerNodes sets the NumberOfBrokerNodes field's value. +func (s *MutableClusterInfo) SetNumberOfBrokerNodes(v int64) *MutableClusterInfo { + s.NumberOfBrokerNodes = &v return s } @@ -1963,14 +4446,14 @@ func (s *StorageInfo) SetEbsStorageInfo(v *EBSStorageInfo) *StorageInfo { return s } -// Add tags for a resource +// Tag a resource. type TagResourceInput struct { _ struct{} `type:"structure"` // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` - // The Key value pairs indicating resource tags. + // The key-value pair for the resource tag. // // Tags is a required field Tags map[string]*string `locationName:"tags" type:"map" required:"true"` @@ -2031,6 +4514,30 @@ func (s TagResourceOutput) GoString() string { return s.String() } +// Details for client authentication using TLS. +type Tls struct { + _ struct{} `type:"structure"` + + // List of ACM Certificate Authority ARNs. + CertificateAuthorityArnList []*string `locationName:"certificateAuthorityArnList" type:"list"` +} + +// String returns the string representation +func (s Tls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tls) GoString() string { + return s.String() +} + +// SetCertificateAuthorityArnList sets the CertificateAuthorityArnList field's value. +func (s *Tls) SetCertificateAuthorityArnList(v []*string) *Tls { + s.CertificateAuthorityArnList = v + return s +} + type UntagResourceInput struct { _ struct{} `type:"structure"` @@ -2096,6 +4603,335 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +// Request body for UpdateBrokerCount. +type UpdateBrokerCountInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // The current version of the cluster. + // + // CurrentVersion is a required field + CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` + + // The number of broker nodes that you want the cluster to have after this operation + // completes successfully. + // + // TargetNumberOfBrokerNodes is a required field + TargetNumberOfBrokerNodes *int64 `locationName:"targetNumberOfBrokerNodes" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateBrokerCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerCountInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBrokerCountInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBrokerCountInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.TargetNumberOfBrokerNodes == nil { + invalidParams.Add(request.NewErrParamRequired("TargetNumberOfBrokerNodes")) + } + if s.TargetNumberOfBrokerNodes != nil && *s.TargetNumberOfBrokerNodes < 1 { + invalidParams.Add(request.NewErrParamMinValue("TargetNumberOfBrokerNodes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerCountInput) SetClusterArn(v string) *UpdateBrokerCountInput { + s.ClusterArn = &v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *UpdateBrokerCountInput) SetCurrentVersion(v string) *UpdateBrokerCountInput { + s.CurrentVersion = &v + return s +} + +// SetTargetNumberOfBrokerNodes sets the TargetNumberOfBrokerNodes field's value. +func (s *UpdateBrokerCountInput) SetTargetNumberOfBrokerNodes(v int64) *UpdateBrokerCountInput { + s.TargetNumberOfBrokerNodes = &v + return s +} + +// Response body for UpdateBrokerCount. +type UpdateBrokerCountOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the cluster operation. + ClusterOperationArn *string `locationName:"clusterOperationArn" type:"string"` +} + +// String returns the string representation +func (s UpdateBrokerCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerCountOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerCountOutput) SetClusterArn(v string) *UpdateBrokerCountOutput { + s.ClusterArn = &v + return s +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *UpdateBrokerCountOutput) SetClusterOperationArn(v string) *UpdateBrokerCountOutput { + s.ClusterOperationArn = &v + return s +} + +// Request object for UpdateBrokerStorage. +type UpdateBrokerStorageInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // The version of cluster to update from. A successful operation will then generate + // a new version. + // + // CurrentVersion is a required field + CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` + + // Describes the target volume size and the ID of the broker to apply the update + // to. + // + // The value you specify for Target-Volume-in-GiB must be a whole number that + // is greater than 100 GiB. + // + // The storage per broker after the update operation can't exceed 16384 GiB. + // + // TargetBrokerEBSVolumeInfo is a required field + TargetBrokerEBSVolumeInfo []*BrokerEBSVolumeInfo `locationName:"targetBrokerEBSVolumeInfo" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateBrokerStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerStorageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBrokerStorageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBrokerStorageInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.TargetBrokerEBSVolumeInfo == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBrokerEBSVolumeInfo")) + } + if s.TargetBrokerEBSVolumeInfo != nil { + for i, v := range s.TargetBrokerEBSVolumeInfo { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetBrokerEBSVolumeInfo", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerStorageInput) SetClusterArn(v string) *UpdateBrokerStorageInput { + s.ClusterArn = &v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *UpdateBrokerStorageInput) SetCurrentVersion(v string) *UpdateBrokerStorageInput { + s.CurrentVersion = &v + return s +} + +// SetTargetBrokerEBSVolumeInfo sets the TargetBrokerEBSVolumeInfo field's value. +func (s *UpdateBrokerStorageInput) SetTargetBrokerEBSVolumeInfo(v []*BrokerEBSVolumeInfo) *UpdateBrokerStorageInput { + s.TargetBrokerEBSVolumeInfo = v + return s +} + +// Response body for UpdateBrokerStorage. +type UpdateBrokerStorageOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the cluster operation. + ClusterOperationArn *string `locationName:"clusterOperationArn" type:"string"` +} + +// String returns the string representation +func (s UpdateBrokerStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerStorageOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerStorageOutput) SetClusterArn(v string) *UpdateBrokerStorageOutput { + s.ClusterArn = &v + return s +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *UpdateBrokerStorageOutput) SetClusterOperationArn(v string) *UpdateBrokerStorageOutput { + s.ClusterOperationArn = &v + return s +} + +// Request body for UpdateClusterConfiguration. +type UpdateClusterConfigurationInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // Represents the configuration that you want MSK to use for the cluster. + // + // ConfigurationInfo is a required field + ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure" required:"true"` + + // The version of the cluster that you want to update. + // + // CurrentVersion is a required field + CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateClusterConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateClusterConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateClusterConfigurationInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.ConfigurationInfo == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationInfo")) + } + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.ConfigurationInfo != nil { + if err := s.ConfigurationInfo.Validate(); err != nil { + invalidParams.AddNested("ConfigurationInfo", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateClusterConfigurationInput) SetClusterArn(v string) *UpdateClusterConfigurationInput { + s.ClusterArn = &v + return s +} + +// SetConfigurationInfo sets the ConfigurationInfo field's value. +func (s *UpdateClusterConfigurationInput) SetConfigurationInfo(v *ConfigurationInfo) *UpdateClusterConfigurationInput { + s.ConfigurationInfo = v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *UpdateClusterConfigurationInput) SetCurrentVersion(v string) *UpdateClusterConfigurationInput { + s.CurrentVersion = &v + return s +} + +// Response body for UpdateClusterConfiguration. +type UpdateClusterConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the cluster operation. + ClusterOperationArn *string `locationName:"clusterOperationArn" type:"string"` +} + +// String returns the string representation +func (s UpdateClusterConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterConfigurationOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateClusterConfigurationOutput) SetClusterArn(v string) *UpdateClusterConfigurationOutput { + s.ClusterArn = &v + return s +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *UpdateClusterConfigurationOutput) SetClusterOperationArn(v string) *UpdateClusterConfigurationOutput { + s.ClusterOperationArn = &v + return s +} + // Zookeeper node information. type ZookeeperNodeInfo struct { _ struct{} `type:"structure"` @@ -2106,6 +4942,9 @@ type ZookeeperNodeInfo struct { // The virtual private cloud (VPC) IP address of the client. ClientVpcIpAddress *string `locationName:"clientVpcIpAddress" type:"string"` + // Endpoints for accessing the ZooKeeper. + Endpoints []*string `locationName:"endpoints" type:"list"` + // The role-specific ID for Zookeeper. ZookeeperId *float64 `locationName:"zookeeperId" type:"double"` @@ -2135,6 +4974,12 @@ func (s *ZookeeperNodeInfo) SetClientVpcIpAddress(v string) *ZookeeperNodeInfo { return s } +// SetEndpoints sets the Endpoints field's value. +func (s *ZookeeperNodeInfo) SetEndpoints(v []*string) *ZookeeperNodeInfo { + s.Endpoints = v + return s +} + // SetZookeeperId sets the ZookeeperId field's value. func (s *ZookeeperNodeInfo) SetZookeeperId(v float64) *ZookeeperNodeInfo { s.ZookeeperId = &v @@ -2148,14 +4993,26 @@ func (s *ZookeeperNodeInfo) SetZookeeperVersion(v string) *ZookeeperNodeInfo { } // The distribution of broker nodes across Availability Zones. By default, broker -// nodes are distributed among three Availability Zones. Currently, the only -// supported value is DEFAULT. You can either specify this value explicitly +// nodes are distributed among the Availability Zones of your Region. Currently, +// the only supported value is DEFAULT. You can either specify this value explicitly // or leave it out. const ( // BrokerAZDistributionDefault is a BrokerAZDistribution enum value BrokerAZDistributionDefault = "DEFAULT" ) +// Client-broker encryption in transit setting. +const ( + // ClientBrokerTls is a ClientBroker enum value + ClientBrokerTls = "TLS" + + // ClientBrokerTlsPlaintext is a ClientBroker enum value + ClientBrokerTlsPlaintext = "TLS_PLAINTEXT" + + // ClientBrokerPlaintext is a ClientBroker enum value + ClientBrokerPlaintext = "PLAINTEXT" +) + // The state of a Kafka cluster. const ( // ClusterStateActive is a ClusterState enum value @@ -2164,6 +5021,9 @@ const ( // ClusterStateCreating is a ClusterState enum value ClusterStateCreating = "CREATING" + // ClusterStateUpdating is a ClusterState enum value + ClusterStateUpdating = "UPDATING" + // ClusterStateDeleting is a ClusterState enum value ClusterStateDeleting = "DELETING" @@ -2172,7 +5032,9 @@ const ( ) // Specifies which metrics are gathered for the MSK cluster. This property has -// three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. +// three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. For +// a list of the metrics associated with each of these three levels of monitoring, +// see Monitoring (https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html). const ( // EnhancedMonitoringDefault is a EnhancedMonitoring enum value EnhancedMonitoringDefault = "DEFAULT" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go index 577c3777e17..9c07694b13a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Kafka { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "kafka" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Kafka { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Kafka { svc := &Kafka{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-14", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go index ad14aecba4f..a4a81b872f1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go @@ -759,7 +759,7 @@ func (c *Kinesis) DescribeStreamWithContext(ctx aws.Context, input *DescribeStre // // Example iterating over at most 3 pages of a DescribeStream operation. // pageNum := 0 // err := client.DescribeStreamPages(params, -// func(page *DescribeStreamOutput, lastPage bool) bool { +// func(page *kinesis.DescribeStreamOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -791,10 +791,12 @@ func (c *Kinesis) DescribeStreamPagesWithContext(ctx aws.Context, input *Describ }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeStreamOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeStreamOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1793,7 +1795,7 @@ func (c *Kinesis) ListStreamConsumersWithContext(ctx aws.Context, input *ListStr // // Example iterating over at most 3 pages of a ListStreamConsumers operation. // pageNum := 0 // err := client.ListStreamConsumersPages(params, -// func(page *ListStreamConsumersOutput, lastPage bool) bool { +// func(page *kinesis.ListStreamConsumersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1825,10 +1827,12 @@ func (c *Kinesis) ListStreamConsumersPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListStreamConsumersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListStreamConsumersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1944,7 +1948,7 @@ func (c *Kinesis) ListStreamsWithContext(ctx aws.Context, input *ListStreamsInpu // // Example iterating over at most 3 pages of a ListStreams operation. // pageNum := 0 // err := client.ListStreamsPages(params, -// func(page *ListStreamsOutput, lastPage bool) bool { +// func(page *kinesis.ListStreamsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1976,10 +1980,12 @@ func (c *Kinesis) ListStreamsPagesWithContext(ctx aws.Context, input *ListStream }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListStreamsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListStreamsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7152,7 +7158,7 @@ type StreamDescriptionSummary struct { // // * Key ARN example: arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 // - // * Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // * Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName // // * Globally unique key ID example: 12345678-1234-1234-1234-123456789012 // @@ -7379,6 +7385,8 @@ type SubscribeToShardEventStream struct { // may result in resource leaks. func (es *SubscribeToShardEventStream) Close() (err error) { es.Reader.Close() + es.StreamCloser.Close() + return es.Err() } @@ -7388,8 +7396,6 @@ func (es *SubscribeToShardEventStream) Err() error { if err := es.Reader.Err(); err != nil { return err } - es.StreamCloser.Close() - return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/customizations.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/customizations.go index f618f0da698..0ab636735ef 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/customizations.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/customizations.go @@ -9,14 +9,14 @@ import ( var readDuration = 5 * time.Second func init() { - ops := []string{ - opGetRecords, - } - initRequest = func(r *request.Request) { - for _, operation := range ops { - if r.Operation.Name == operation { - r.ApplyOptions(request.WithResponseReadTimeout(readDuration)) - } - } + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + if r.Operation.Name == opGetRecords { + r.ApplyOptions(request.WithResponseReadTimeout(readDuration)) } + + // Service specific error codes. Github(aws/aws-sdk-go#1376) + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeLimitExceededException) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go index 7c3e8c48a28..6c561f1e8eb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go @@ -46,11 +46,11 @@ const ( // svc := kinesis.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Kinesis { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Kinesis { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Kinesis { svc := &Kinesis{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-12-02", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/api.go index dfcfe35293b..d536e7b1470 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/api.go @@ -58,9 +58,15 @@ func (c *KinesisAnalytics) AddApplicationCloudWatchLoggingOptionRequest(input *A // AddApplicationCloudWatchLoggingOption API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Adds a CloudWatch log stream to monitor application configuration errors. // For more information about using CloudWatch log streams with Amazon Kinesis -// Analytics applications, see Working with Amazon CloudWatch Logs (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). +// Analytics applications, see Working with Amazon CloudWatch Logs (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -85,6 +91,8 @@ func (c *KinesisAnalytics) AddApplicationCloudWatchLoggingOptionRequest(input *A // same time. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/AddApplicationCloudWatchLoggingOption func (c *KinesisAnalytics) AddApplicationCloudWatchLoggingOption(input *AddApplicationCloudWatchLoggingOptionInput) (*AddApplicationCloudWatchLoggingOptionOutput, error) { @@ -153,15 +161,22 @@ func (c *KinesisAnalytics) AddApplicationInputRequest(input *AddApplicationInput // AddApplicationInput API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Adds a streaming source to your Amazon Kinesis application. For conceptual -// information, see Configuring Application Input (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). +// information, see Configuring Application Input (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). // // You can add a streaming source either when you create an application or you // can use this operation to add a streaming source after you create an application. -// For more information, see CreateApplication. +// For more information, see CreateApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_CreateApplication.html). // // Any configuration update, including adding a streaming source using this // operation, results in a new version of the application. You can use the DescribeApplication +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) // operation to find the current application version. // // This operation requires permissions to perform the kinesisanalytics:AddApplicationInput @@ -194,6 +209,8 @@ func (c *KinesisAnalytics) AddApplicationInputRequest(input *AddApplicationInput // error. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/AddApplicationInput func (c *KinesisAnalytics) AddApplicationInput(input *AddApplicationInputInput) (*AddApplicationInputOutput, error) { @@ -262,9 +279,16 @@ func (c *KinesisAnalytics) AddApplicationInputProcessingConfigurationRequest(inp // AddApplicationInputProcessingConfiguration API operation for Amazon Kinesis Analytics. // -// Adds an InputProcessingConfiguration to an application. An input processor -// preprocesses records on the input stream before the application's SQL code -// executes. Currently, the only input processor available is AWS Lambda (https://aws.amazon.com/documentation/lambda/). +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// +// Adds an InputProcessingConfiguration (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputProcessingConfiguration.html) +// to an application. An input processor preprocesses records on the input stream +// before the application's SQL code executes. Currently, the only input processor +// available is AWS Lambda (https://docs.aws.amazon.com/lambda/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -289,6 +313,8 @@ func (c *KinesisAnalytics) AddApplicationInputProcessingConfigurationRequest(inp // same time. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/AddApplicationInputProcessingConfiguration func (c *KinesisAnalytics) AddApplicationInputProcessingConfiguration(input *AddApplicationInputProcessingConfigurationInput) (*AddApplicationInputProcessingConfigurationOutput, error) { @@ -357,6 +383,12 @@ func (c *KinesisAnalytics) AddApplicationOutputRequest(input *AddApplicationOutp // AddApplicationOutput API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Adds an external destination to your Amazon Kinesis Analytics application. // // If you want Amazon Kinesis Analytics to deliver data from an in-application @@ -369,14 +401,15 @@ func (c *KinesisAnalytics) AddApplicationOutputRequest(input *AddApplicationOutp // You can use one of the output configurations to deliver data from your in-application // error stream to an external destination so that you can analyze the errors. // For more information, see Understanding Application Output (Destination) -// (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). // // Any configuration update, including adding a streaming source using this // operation, results in a new version of the application. You can use the DescribeApplication +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) // operation to find the current application version. // // For the limits on the number of application inputs and outputs you can configure, -// see Limits (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html). +// see Limits (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html). // // This operation requires permissions to perform the kinesisanalytics:AddApplicationOutput // action. @@ -404,6 +437,8 @@ func (c *KinesisAnalytics) AddApplicationOutputRequest(input *AddApplicationOutp // same time. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/AddApplicationOutput func (c *KinesisAnalytics) AddApplicationOutput(input *AddApplicationOutputInput) (*AddApplicationOutputOutput, error) { @@ -472,6 +507,12 @@ func (c *KinesisAnalytics) AddApplicationReferenceDataSourceRequest(input *AddAp // AddApplicationReferenceDataSource API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Adds a reference data source to an existing application. // // Amazon Kinesis Analytics reads reference data (that is, an Amazon S3 object) @@ -481,9 +522,9 @@ func (c *KinesisAnalytics) AddApplicationReferenceDataSourceRequest(input *AddAp // describes how data in Amazon S3 object maps to columns in the resulting in-application // table. // -// For conceptual information, see Configuring Application Input (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). +// For conceptual information, see Configuring Application Input (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). // For the limits on data sources you can add to your application, see Limits -// (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html). +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html). // // This operation requires permissions to perform the kinesisanalytics:AddApplicationOutput // action. @@ -511,6 +552,8 @@ func (c *KinesisAnalytics) AddApplicationReferenceDataSourceRequest(input *AddAp // same time. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/AddApplicationReferenceDataSource func (c *KinesisAnalytics) AddApplicationReferenceDataSource(input *AddApplicationReferenceDataSourceInput) (*AddApplicationReferenceDataSourceOutput, error) { @@ -578,11 +621,17 @@ func (c *KinesisAnalytics) CreateApplicationRequest(input *CreateApplicationInpu // CreateApplication API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Creates an Amazon Kinesis Analytics application. You can configure each application // with one streaming source as input, application code to process the input, // and up to three destinations where you want Amazon Kinesis Analytics to write // the output data from your application. For an overview, see How it Works -// (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works.html). +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works.html). // // In the input configuration, you map the streaming source to an in-application // stream, which you can think of as a constantly updating table. In the mapping, @@ -602,7 +651,7 @@ func (c *KinesisAnalytics) CreateApplicationRequest(input *CreateApplicationInpu // kinesisanalytics:CreateApplication action. // // For introductory exercises to create an Amazon Kinesis Analytics application, -// see Getting Started (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/getting-started.html). +// see Getting Started (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/getting-started.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -625,6 +674,16 @@ func (c *KinesisAnalytics) CreateApplicationRequest(input *CreateApplicationInpu // * ErrCodeInvalidArgumentException "InvalidArgumentException" // Specified input parameter value is invalid. // +// * ErrCodeTooManyTagsException "TooManyTagsException" +// Application created with too many tags, or too many tags added to an application. +// Note that the maximum number of application tags includes system tags. The +// maximum number of user-defined application tags is 50. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Exception thrown as a result of concurrent modification to an application. +// For example, two individuals attempting to edit the same application at the +// same time. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/CreateApplication func (c *KinesisAnalytics) CreateApplication(input *CreateApplicationInput) (*CreateApplicationOutput, error) { req, out := c.CreateApplicationRequest(input) @@ -692,6 +751,12 @@ func (c *KinesisAnalytics) DeleteApplicationRequest(input *DeleteApplicationInpu // DeleteApplication API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Deletes the specified application. Amazon Kinesis Analytics halts application // execution and deletes the application, including any application artifacts // (such as in-application streams, reference table, and application code). @@ -719,6 +784,8 @@ func (c *KinesisAnalytics) DeleteApplicationRequest(input *DeleteApplicationInpu // Application is not available for this operation. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/DeleteApplication func (c *KinesisAnalytics) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { @@ -787,9 +854,15 @@ func (c *KinesisAnalytics) DeleteApplicationCloudWatchLoggingOptionRequest(input // DeleteApplicationCloudWatchLoggingOption API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Deletes a CloudWatch log stream from an application. For more information // about using CloudWatch log streams with Amazon Kinesis Analytics applications, -// see Working with Amazon CloudWatch Logs (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). +// see Working with Amazon CloudWatch Logs (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -814,6 +887,8 @@ func (c *KinesisAnalytics) DeleteApplicationCloudWatchLoggingOptionRequest(input // same time. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/DeleteApplicationCloudWatchLoggingOption func (c *KinesisAnalytics) DeleteApplicationCloudWatchLoggingOption(input *DeleteApplicationCloudWatchLoggingOptionInput) (*DeleteApplicationCloudWatchLoggingOptionOutput, error) { @@ -882,7 +957,14 @@ func (c *KinesisAnalytics) DeleteApplicationInputProcessingConfigurationRequest( // DeleteApplicationInputProcessingConfiguration API operation for Amazon Kinesis Analytics. // -// Deletes an InputProcessingConfiguration from an input. +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// +// Deletes an InputProcessingConfiguration (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputProcessingConfiguration.html) +// from an input. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -907,6 +989,8 @@ func (c *KinesisAnalytics) DeleteApplicationInputProcessingConfigurationRequest( // same time. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/DeleteApplicationInputProcessingConfiguration func (c *KinesisAnalytics) DeleteApplicationInputProcessingConfiguration(input *DeleteApplicationInputProcessingConfigurationInput) (*DeleteApplicationInputProcessingConfigurationOutput, error) { @@ -975,6 +1059,12 @@ func (c *KinesisAnalytics) DeleteApplicationOutputRequest(input *DeleteApplicati // DeleteApplicationOutput API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Deletes output destination configuration from your application configuration. // Amazon Kinesis Analytics will no longer write data from the corresponding // in-application stream to the external output destination. @@ -1005,6 +1095,8 @@ func (c *KinesisAnalytics) DeleteApplicationOutputRequest(input *DeleteApplicati // same time. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/DeleteApplicationOutput func (c *KinesisAnalytics) DeleteApplicationOutput(input *DeleteApplicationOutputInput) (*DeleteApplicationOutputOutput, error) { @@ -1073,11 +1165,18 @@ func (c *KinesisAnalytics) DeleteApplicationReferenceDataSourceRequest(input *De // DeleteApplicationReferenceDataSource API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Deletes a reference data source configuration from the specified application // configuration. // // If the application is running, Amazon Kinesis Analytics immediately removes // the in-application table that you created using the AddApplicationReferenceDataSource +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_AddApplicationReferenceDataSource.html) // operation. // // This operation requires permissions to perform the kinesisanalytics.DeleteApplicationReferenceDataSource @@ -1106,6 +1205,8 @@ func (c *KinesisAnalytics) DeleteApplicationReferenceDataSourceRequest(input *De // same time. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/DeleteApplicationReferenceDataSource func (c *KinesisAnalytics) DeleteApplicationReferenceDataSource(input *DeleteApplicationReferenceDataSourceInput) (*DeleteApplicationReferenceDataSourceOutput, error) { @@ -1173,10 +1274,17 @@ func (c *KinesisAnalytics) DescribeApplicationRequest(input *DescribeApplication // DescribeApplication API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Returns information about a specific Amazon Kinesis Analytics application. // // If you want to retrieve a list of all applications in your account, use the -// ListApplications operation. +// ListApplications (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_ListApplications.html) +// operation. // // This operation requires permissions to perform the kinesisanalytics:DescribeApplication // action. You can use DescribeApplication to get the current application versionId, @@ -1194,6 +1302,8 @@ func (c *KinesisAnalytics) DescribeApplicationRequest(input *DescribeApplication // Specified application can't be found. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/DescribeApplication func (c *KinesisAnalytics) DescribeApplication(input *DescribeApplicationInput) (*DescribeApplicationOutput, error) { @@ -1261,6 +1371,12 @@ func (c *KinesisAnalytics) DiscoverInputSchemaRequest(input *DiscoverInputSchema // DiscoverInputSchema API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Infers a schema by evaluating sample records on the specified streaming source // (Amazon Kinesis stream or Amazon Kinesis Firehose delivery stream) or S3 // object. In the response, the operation returns the inferred schema and also @@ -1268,7 +1384,7 @@ func (c *KinesisAnalytics) DiscoverInputSchemaRequest(input *DiscoverInputSchema // // You can use the inferred schema when configuring a streaming source for your // application. For conceptual information, see Configuring Application Input -// (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). // Note that when you create an application using the Amazon Kinesis Analytics // console, the console uses this operation to infer a schema and show it in // the console user interface. @@ -1294,7 +1410,7 @@ func (c *KinesisAnalytics) DiscoverInputSchemaRequest(input *DiscoverInputSchema // * ErrCodeResourceProvisionedThroughputExceededException "ResourceProvisionedThroughputExceededException" // Discovery failed to get a record from the streaming source because of the // Amazon Kinesis Streams ProvisionedThroughputExceededException. For more information, -// see GetRecords (http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) +// see GetRecords (https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) // in the Amazon Kinesis Streams API Reference. // // * ErrCodeServiceUnavailableException "ServiceUnavailableException" @@ -1366,6 +1482,12 @@ func (c *KinesisAnalytics) ListApplicationsRequest(input *ListApplicationsInput) // ListApplications API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Returns a list of Amazon Kinesis Analytics applications in your account. // For each application, the response includes the application name, Amazon // Resource Name (ARN), and status. If the response returns the HasMoreApplications @@ -1373,7 +1495,8 @@ func (c *KinesisAnalytics) ListApplicationsRequest(input *ListApplicationsInput) // in the request body, and set the value of this to the last application name // from the previous response. // -// If you want detailed information about a specific application, use DescribeApplication. +// If you want detailed information about a specific application, use DescribeApplication +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html). // // This operation requires permissions to perform the kinesisanalytics:ListApplications // action. @@ -1406,6 +1529,94 @@ func (c *KinesisAnalytics) ListApplicationsWithContext(ctx aws.Context, input *L return out, req.Send() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/ListTagsForResource +func (c *KinesisAnalytics) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Kinesis Analytics. +// +// Retrieves the list of key-value tags assigned to the application. For more +// information, see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-tagging.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis Analytics's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Specified application can't be found. +// +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// Specified input parameter value is invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Exception thrown as a result of concurrent modification to an application. +// For example, two individuals attempting to edit the same application at the +// same time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/ListTagsForResource +func (c *KinesisAnalytics) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KinesisAnalytics) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartApplication = "StartApplication" // StartApplicationRequest generates a "aws/request.Request" representing the @@ -1451,6 +1662,12 @@ func (c *KinesisAnalytics) StartApplicationRequest(input *StartApplicationInput) // StartApplication API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Starts the specified Amazon Kinesis Analytics application. After creating // an application, you must exclusively call this operation to start your application. // @@ -1459,10 +1676,12 @@ func (c *KinesisAnalytics) StartApplicationRequest(input *StartApplicationInput) // // The application status must be READY for you to start an application. You // can get the application status in the console or using the DescribeApplication +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) // operation. // // After you start the application, you can stop the application from processing -// the input by calling the StopApplication operation. +// the input by calling the StopApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_StopApplication.html) +// operation. // // This operation requires permissions to perform the kinesisanalytics:StartApplication // action. @@ -1488,6 +1707,8 @@ func (c *KinesisAnalytics) StartApplicationRequest(input *StartApplicationInput) // User-provided application configuration is not valid. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/StartApplication func (c *KinesisAnalytics) StartApplication(input *StartApplicationInput) (*StartApplicationOutput, error) { @@ -1556,11 +1777,17 @@ func (c *KinesisAnalytics) StopApplicationRequest(input *StopApplicationInput) ( // StopApplication API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Stops the application from processing input data. You can stop an application -// only if it is in the running state. You can use the DescribeApplication operation -// to find the application state. After the application is stopped, Amazon Kinesis -// Analytics stops reading data from the input, the application stops processing -// data, and there is no output written to the destination. +// only if it is in the running state. You can use the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) +// operation to find the application state. After the application is stopped, +// Amazon Kinesis Analytics stops reading data from the input, the application +// stops processing data, and there is no output written to the destination. // // This operation requires permissions to perform the kinesisanalytics:StopApplication // action. @@ -1580,6 +1807,8 @@ func (c *KinesisAnalytics) StopApplicationRequest(input *StopApplicationInput) ( // Application is not available for this operation. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/StopApplication func (c *KinesisAnalytics) StopApplication(input *StopApplicationInput) (*StopApplicationOutput, error) { @@ -1603,6 +1832,202 @@ func (c *KinesisAnalytics) StopApplicationWithContext(ctx aws.Context, input *St return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/TagResource +func (c *KinesisAnalytics) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon Kinesis Analytics. +// +// Adds one or more key-value tags to a Kinesis Analytics application. Note +// that the maximum number of application tags includes system tags. The maximum +// number of user-defined application tags is 50. For more information, see +// Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-tagging.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis Analytics's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Specified application can't be found. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// Application is not available for this operation. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// Application created with too many tags, or too many tags added to an application. +// Note that the maximum number of application tags includes system tags. The +// maximum number of user-defined application tags is 50. +// +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// Specified input parameter value is invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Exception thrown as a result of concurrent modification to an application. +// For example, two individuals attempting to edit the same application at the +// same time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/TagResource +func (c *KinesisAnalytics) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KinesisAnalytics) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/UntagResource +func (c *KinesisAnalytics) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Kinesis Analytics. +// +// Removes one or more tags from a Kinesis Analytics application. For more information, +// see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-tagging.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis Analytics's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Specified application can't be found. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// Application is not available for this operation. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// Application created with too many tags, or too many tags added to an application. +// Note that the maximum number of application tags includes system tags. The +// maximum number of user-defined application tags is 50. +// +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// Specified input parameter value is invalid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Exception thrown as a result of concurrent modification to an application. +// For example, two individuals attempting to edit the same application at the +// same time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/UntagResource +func (c *KinesisAnalytics) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KinesisAnalytics) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateApplication = "UpdateApplication" // UpdateApplicationRequest generates a "aws/request.Request" representing the @@ -1648,6 +2073,12 @@ func (c *KinesisAnalytics) UpdateApplicationRequest(input *UpdateApplicationInpu // UpdateApplication API operation for Amazon Kinesis Analytics. // +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Updates an existing Amazon Kinesis Analytics application. Using this API, // you can update application code, input configuration, and output configuration. // @@ -1684,6 +2115,8 @@ func (c *KinesisAnalytics) UpdateApplicationRequest(input *UpdateApplicationInpu // same time. // // * ErrCodeUnsupportedOperationException "UnsupportedOperationException" +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/UpdateApplication func (c *KinesisAnalytics) UpdateApplication(input *UpdateApplicationInput) (*UpdateApplicationOutput, error) { @@ -1810,12 +2243,14 @@ type AddApplicationInputInput struct { ApplicationName *string `min:"1" type:"string" required:"true"` // Current version of your Amazon Kinesis Analytics application. You can use - // the DescribeApplication operation to find the current application version. + // the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation to find the current application version. // // CurrentApplicationVersionId is a required field CurrentApplicationVersionId *int64 `min:"1" type:"long" required:"true"` - // The Input to add. + // The Input (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_Input.html) + // to add. // // Input is a required field Input *Input `type:"structure" required:"true"` @@ -1902,21 +2337,23 @@ type AddApplicationInputProcessingConfigurationInput struct { ApplicationName *string `min:"1" type:"string" required:"true"` // Version of the application to which you want to add the input processing - // configuration. You can use the DescribeApplication operation to get the current - // application version. If the version specified is not the current version, - // the ConcurrentModificationException is returned. + // configuration. You can use the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation to get the current application version. If the version specified + // is not the current version, the ConcurrentModificationException is returned. // // CurrentApplicationVersionId is a required field CurrentApplicationVersionId *int64 `min:"1" type:"long" required:"true"` // The ID of the input configuration to add the input processing configuration // to. You can get a list of the input IDs for an application using the DescribeApplication + // (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) // operation. // // InputId is a required field InputId *string `min:"1" type:"string" required:"true"` - // The InputProcessingConfiguration to add to the application. + // The InputProcessingConfiguration (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputProcessingConfiguration.html) + // to add to the application. // // InputProcessingConfiguration is a required field InputProcessingConfiguration *InputProcessingConfiguration `type:"structure" required:"true"` @@ -2015,9 +2452,9 @@ type AddApplicationOutputInput struct { ApplicationName *string `min:"1" type:"string" required:"true"` // Version of the application to which you want to add the output configuration. - // You can use the DescribeApplication operation to get the current application - // version. If the version specified is not the current version, the ConcurrentModificationException - // is returned. + // You can use the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation to get the current application version. If the version specified + // is not the current version, the ConcurrentModificationException is returned. // // CurrentApplicationVersionId is a required field CurrentApplicationVersionId *int64 `min:"1" type:"long" required:"true"` @@ -2113,9 +2550,9 @@ type AddApplicationReferenceDataSourceInput struct { ApplicationName *string `min:"1" type:"string" required:"true"` // Version of the application for which you are adding the reference data source. - // You can use the DescribeApplication operation to get the current application - // version. If the version specified is not the current version, the ConcurrentModificationException - // is returned. + // You can use the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation to get the current application version. If the version specified + // is not the current version, the ConcurrentModificationException is returned. // // CurrentApplicationVersionId is a required field CurrentApplicationVersionId *int64 `min:"1" type:"long" required:"true"` @@ -2203,6 +2640,12 @@ func (s AddApplicationReferenceDataSourceOutput) GoString() string { return s.String() } +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Provides a description of the application, including the application Amazon // Resource Name (ARN), status, latest version, and input and output configuration. type ApplicationDetail struct { @@ -2237,25 +2680,25 @@ type ApplicationDetail struct { // Describes the CloudWatch log streams that are configured to receive application // messages. For more information about using CloudWatch log streams with Amazon - // Kinesis Analytics applications, see Working with Amazon CloudWatch Logs (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). + // Kinesis Analytics applications, see Working with Amazon CloudWatch Logs (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). CloudWatchLoggingOptionDescriptions []*CloudWatchLoggingOptionDescription `type:"list"` // Time stamp when the application version was created. CreateTimestamp *time.Time `type:"timestamp"` // Describes the application input configuration. For more information, see - // Configuring Application Input (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). + // Configuring Application Input (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). InputDescriptions []*InputDescription `type:"list"` // Time stamp when the application was last updated. LastUpdateTimestamp *time.Time `type:"timestamp"` // Describes the application output configuration. For more information, see - // Configuring Application Output (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). + // Configuring Application Output (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). OutputDescriptions []*OutputDescription `type:"list"` // Describes reference data sources configured for the application. For more - // information, see Configuring Application Input (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). + // information, see Configuring Application Input (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). ReferenceDataSourceDescriptions []*ReferenceDataSourceDescription `type:"list"` } @@ -2341,6 +2784,12 @@ func (s *ApplicationDetail) SetReferenceDataSourceDescriptions(v []*ReferenceDat return s } +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// // Provides application summary information, including the application Amazon // Resource Name (ARN), name, and status. type ApplicationSummary struct { @@ -2754,7 +3203,7 @@ type CreateApplicationInput struct { // in-application stream, generates a running average of the number of advertisement // clicks by vendor, and insert resulting rows in another in-application stream // using pumps. For more information about the typical pattern, see Application - // Code (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-app-code.html). + // Code (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-app-code.html). // // You can provide such series of SQL statements, where output of one statement // can be used as the input for the next statement. You store intermediate results @@ -2776,7 +3225,7 @@ type CreateApplicationInput struct { // Use this parameter to configure a CloudWatch log stream to monitor application // configuration errors. For more information, see Working with Amazon CloudWatch - // Logs (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). + // Logs (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). CloudWatchLoggingOptions []*CloudWatchLoggingOption `type:"list"` // Use this parameter to configure the application input. @@ -2815,6 +3264,12 @@ type CreateApplicationInput struct { // Amazon Kinesis Analytics can assume to write to the stream or Lambda function // on your behalf. Outputs []*Output `type:"list"` + + // A list of one or more tags to assign to the application. A tag is a key-value + // pair that identifies an application. Note that the maximum number of application + // tags includes system tags. The maximum number of user-defined application + // tags is 50. For more information, see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-tagging.html). + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -2836,6 +3291,9 @@ func (s *CreateApplicationInput) Validate() error { if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.CloudWatchLoggingOptions != nil { for i, v := range s.CloudWatchLoggingOptions { if v == nil { @@ -2866,6 +3324,16 @@ func (s *CreateApplicationInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2909,6 +3377,12 @@ func (s *CreateApplicationInput) SetOutputs(v []*Output) *CreateApplicationInput return s } +// SetTags sets the Tags field's value. +func (s *CreateApplicationInput) SetTags(v []*Tag) *CreateApplicationInput { + s.Tags = v + return s +} + // TBD type CreateApplicationOutput struct { _ struct{} `type:"structure"` @@ -2947,6 +3421,7 @@ type DeleteApplicationCloudWatchLoggingOptionInput struct { // The CloudWatchLoggingOptionId of the CloudWatch logging option to delete. // You can get the CloudWatchLoggingOptionId by using the DescribeApplication + // (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) // operation. // // CloudWatchLoggingOptionId is a required field @@ -3098,7 +3573,8 @@ type DeleteApplicationInputProcessingConfigurationInput struct { // The ID of the input configuration from which to delete the input processing // configuration. You can get a list of the input IDs for an application by - // using the DescribeApplication operation. + // using the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation. // // InputId is a required field InputId *string `min:"1" type:"string" required:"true"` @@ -3197,6 +3673,7 @@ type DeleteApplicationOutputInput struct { ApplicationName *string `min:"1" type:"string" required:"true"` // Amazon Kinesis Analytics application version. You can use the DescribeApplication + // (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) // operation to get the current application version. If the version specified // is not the current version, the ConcurrentModificationException is returned. // @@ -3205,10 +3682,11 @@ type DeleteApplicationOutputInput struct { // The ID of the configuration to delete. Each output configuration that is // added to the application, either when the application is created or later - // using the AddApplicationOutput operation, has a unique ID. You need to provide - // the ID to uniquely identify the output configuration that you want to delete - // from the application configuration. You can use the DescribeApplication operation - // to get the specific OutputId. + // using the AddApplicationOutput (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_AddApplicationOutput.html) + // operation, has a unique ID. You need to provide the ID to uniquely identify + // the output configuration that you want to delete from the application configuration. + // You can use the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation to get the specific OutputId. // // OutputId is a required field OutputId *string `min:"1" type:"string" required:"true"` @@ -3292,17 +3770,18 @@ type DeleteApplicationReferenceDataSourceInput struct { // ApplicationName is a required field ApplicationName *string `min:"1" type:"string" required:"true"` - // Version of the application. You can use the DescribeApplication operation - // to get the current application version. If the version specified is not the - // current version, the ConcurrentModificationException is returned. + // Version of the application. You can use the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation to get the current application version. If the version specified + // is not the current version, the ConcurrentModificationException is returned. // // CurrentApplicationVersionId is a required field CurrentApplicationVersionId *int64 `min:"1" type:"long" required:"true"` // ID of the reference data source. When you add a reference data source to - // your application using the AddApplicationReferenceDataSource, Amazon Kinesis - // Analytics assigns an ID. You can use the DescribeApplication operation to - // get the reference ID. + // your application using the AddApplicationReferenceDataSource (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_AddApplicationReferenceDataSource.html), + // Amazon Kinesis Analytics assigns an ID. You can use the DescribeApplication + // (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation to get the reference ID. // // ReferenceId is a required field ReferenceId *string `min:"1" type:"string" required:"true"` @@ -3447,7 +3926,7 @@ func (s *DescribeApplicationOutput) SetApplicationDetail(v *ApplicationDetail) * } // Describes the data format when records are written to the destination. For -// more information, see Configuring Application Output (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). +// more information, see Configuring Application Output (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). type DestinationSchema struct { _ struct{} `type:"structure"` @@ -3489,8 +3968,8 @@ func (s *DestinationSchema) SetRecordFormatType(v string) *DestinationSchema { type DiscoverInputSchemaInput struct { _ struct{} `type:"structure"` - // The InputProcessingConfiguration to use to preprocess the records before - // discovering the schema of the records. + // The InputProcessingConfiguration (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputProcessingConfiguration.html) + // to use to preprocess the records before discovering the schema of the records. InputProcessingConfiguration *InputProcessingConfiguration `type:"structure"` // Point at which you want Amazon Kinesis Analytics to start reading records @@ -3630,7 +4109,7 @@ func (s *DiscoverInputSchemaOutput) SetRawInputRecords(v []*string) *DiscoverInp // When you configure the application input, you specify the streaming source, // the in-application stream name that is created, and the mapping between the -// two. For more information, see Configuring Application Input (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). +// two. For more information, see Configuring Application Input (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). type Input struct { _ struct{} `type:"structure"` @@ -3638,13 +4117,13 @@ type Input struct { // // Data from your source is routed to these in-application input streams. // - // (see Configuring Application Input (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). + // (see Configuring Application Input (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). InputParallelism *InputParallelism `type:"structure"` - // The InputProcessingConfiguration for the input. An input processor transforms - // records as they are received from the stream, before the application's SQL - // code executes. Currently, the only input processing configuration available - // is InputLambdaProcessor. + // The InputProcessingConfiguration (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputProcessingConfiguration.html) + // for the input. An input processor transforms records as they are received + // from the stream, before the application's SQL code executes. Currently, the + // only input processing configuration available is InputLambdaProcessor (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputLambdaProcessor.html). InputProcessingConfiguration *InputProcessingConfiguration `type:"structure"` // Describes the format of the data in the streaming source, and how each data @@ -3776,7 +4255,8 @@ func (s *Input) SetNamePrefix(v string) *Input { type InputConfiguration struct { _ struct{} `type:"structure"` - // Input source ID. You can get this ID by calling the DescribeApplication operation. + // Input source ID. You can get this ID by calling the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation. // // Id is a required field Id *string `min:"1" type:"string" required:"true"` @@ -3830,7 +4310,7 @@ func (s *InputConfiguration) SetInputStartingPositionConfiguration(v *InputStart } // Describes the application input configuration. For more information, see -// Configuring Application Input (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). +// Configuring Application Input (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). type InputDescription struct { _ struct{} `type:"structure"` @@ -3936,14 +4416,18 @@ func (s *InputDescription) SetNamePrefix(v string) *InputDescription { } // An object that contains the Amazon Resource Name (ARN) of the AWS Lambda -// (https://aws.amazon.com/documentation/lambda/) function that is used to preprocess +// (https://docs.aws.amazon.com/lambda/) function that is used to preprocess // records in the stream, and the ARN of the IAM role that is used to access // the AWS Lambda function. type InputLambdaProcessor struct { _ struct{} `type:"structure"` - // The ARN of the AWS Lambda (https://aws.amazon.com/documentation/lambda/) - // function that operates on records in the stream. + // The ARN of the AWS Lambda (https://docs.aws.amazon.com/lambda/) function + // that operates on records in the stream. + // + // To specify an earlier version of the Lambda function than the latest, include + // the Lambda function version in the Lambda function ARN. For more information + // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -3999,14 +4483,14 @@ func (s *InputLambdaProcessor) SetRoleARN(v string) *InputLambdaProcessor { } // An object that contains the Amazon Resource Name (ARN) of the AWS Lambda -// (https://aws.amazon.com/documentation/lambda/) function that is used to preprocess +// (https://docs.aws.amazon.com/lambda/) function that is used to preprocess // records in the stream, and the ARN of the IAM role that is used to access // the AWS Lambda expression. type InputLambdaProcessorDescription struct { _ struct{} `type:"structure"` - // The ARN of the AWS Lambda (https://aws.amazon.com/documentation/lambda/) - // function that is used to preprocess the records in the stream. + // The ARN of the AWS Lambda (https://docs.aws.amazon.com/lambda/) function + // that is used to preprocess the records in the stream. ResourceARN *string `min:"1" type:"string"` // The ARN of the IAM role that is used to access the AWS Lambda function. @@ -4035,13 +4519,17 @@ func (s *InputLambdaProcessorDescription) SetRoleARN(v string) *InputLambdaProce return s } -// Represents an update to the InputLambdaProcessor that is used to preprocess -// the records in the stream. +// Represents an update to the InputLambdaProcessor (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputLambdaProcessor.html) +// that is used to preprocess the records in the stream. type InputLambdaProcessorUpdate struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the new AWS Lambda (https://aws.amazon.com/documentation/lambda/) + // The Amazon Resource Name (ARN) of the new AWS Lambda (https://docs.aws.amazon.com/lambda/) // function that is used to preprocess the records in the stream. + // + // To specify an earlier version of the Lambda function than the latest, include + // the Lambda function version in the Lambda function ARN. For more information + // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) ResourceARNUpdate *string `min:"1" type:"string"` // The ARN of the new IAM role that is used to access the AWS Lambda function. @@ -4088,12 +4576,12 @@ func (s *InputLambdaProcessorUpdate) SetRoleARNUpdate(v string) *InputLambdaProc // Describes the number of in-application streams to create for a given streaming // source. For information about parallelism, see Configuring Application Input -// (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). +// (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). type InputParallelism struct { _ struct{} `type:"structure"` // Number of in-application streams to create. For more information, see Limits - // (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html). + // (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html). Count *int64 `min:"1" type:"integer"` } @@ -4165,12 +4653,13 @@ func (s *InputParallelismUpdate) SetCountUpdate(v int64) *InputParallelismUpdate // Provides a description of a processor that is used to preprocess the records // in the stream before being processed by your application code. Currently, -// the only input processor available is AWS Lambda (https://aws.amazon.com/documentation/lambda/). +// the only input processor available is AWS Lambda (https://docs.aws.amazon.com/lambda/). type InputProcessingConfiguration struct { _ struct{} `type:"structure"` - // The InputLambdaProcessor that is used to preprocess the records in the stream - // before being processed by your application code. + // The InputLambdaProcessor (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputLambdaProcessor.html) + // that is used to preprocess the records in the stream before being processed + // by your application code. // // InputLambdaProcessor is a required field InputLambdaProcessor *InputLambdaProcessor `type:"structure" required:"true"` @@ -4211,11 +4700,12 @@ func (s *InputProcessingConfiguration) SetInputLambdaProcessor(v *InputLambdaPro } // Provides configuration information about an input processor. Currently, the -// only input processor available is AWS Lambda (https://aws.amazon.com/documentation/lambda/). +// only input processor available is AWS Lambda (https://docs.aws.amazon.com/lambda/). type InputProcessingConfigurationDescription struct { _ struct{} `type:"structure"` - // Provides configuration information about the associated InputLambdaProcessorDescription. + // Provides configuration information about the associated InputLambdaProcessorDescription + // (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputLambdaProcessorDescription.html). InputLambdaProcessorDescription *InputLambdaProcessorDescription `type:"structure"` } @@ -4235,11 +4725,11 @@ func (s *InputProcessingConfigurationDescription) SetInputLambdaProcessorDescrip return s } -// Describes updates to an InputProcessingConfiguration. +// Describes updates to an InputProcessingConfiguration (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputProcessingConfiguration.html). type InputProcessingConfigurationUpdate struct { _ struct{} `type:"structure"` - // Provides update information for an InputLambdaProcessor. + // Provides update information for an InputLambdaProcessor (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputLambdaProcessor.html). // // InputLambdaProcessorUpdate is a required field InputLambdaProcessorUpdate *InputLambdaProcessorUpdate `type:"structure" required:"true"` @@ -4809,9 +5299,9 @@ func (s *KinesisFirehoseOutputDescription) SetRoleARN(v string) *KinesisFirehose return s } -// When updating an output configuration using the UpdateApplication operation, -// provides information about an Amazon Kinesis Firehose delivery stream configured -// as the destination. +// When updating an output configuration using the UpdateApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_UpdateApplication.html) +// operation, provides information about an Amazon Kinesis Firehose delivery +// stream configured as the destination. type KinesisFirehoseOutputUpdate struct { _ struct{} `type:"structure"` @@ -5112,8 +5602,9 @@ func (s *KinesisStreamsOutputDescription) SetRoleARN(v string) *KinesisStreamsOu return s } -// When updating an output configuration using the UpdateApplication operation, -// provides information about an Amazon Kinesis stream configured as the destination. +// When updating an output configuration using the UpdateApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_UpdateApplication.html) +// operation, provides information about an Amazon Kinesis stream configured +// as the destination. type KinesisStreamsOutputUpdate struct { _ struct{} `type:"structure"` @@ -5174,6 +5665,10 @@ type LambdaOutput struct { // Amazon Resource Name (ARN) of the destination Lambda function to write to. // + // To specify an earlier version of the Lambda function than the latest, include + // the Lambda function version in the Lambda function ARN. For more information + // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -5264,12 +5759,17 @@ func (s *LambdaOutputDescription) SetRoleARN(v string) *LambdaOutputDescription return s } -// When updating an output configuration using the UpdateApplication operation, -// provides information about an AWS Lambda function configured as the destination. +// When updating an output configuration using the UpdateApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_UpdateApplication.html) +// operation, provides information about an AWS Lambda function configured as +// the destination. type LambdaOutputUpdate struct { _ struct{} `type:"structure"` // Amazon Resource Name (ARN) of the destination Lambda function. + // + // To specify an earlier version of the Lambda function than the latest, include + // the Lambda function version in the Lambda function ARN. For more information + // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) ResourceARNUpdate *string `min:"1" type:"string"` // ARN of the IAM role that Amazon Kinesis Analytics can assume to write to @@ -5403,6 +5903,70 @@ func (s *ListApplicationsOutput) SetHasMoreApplications(v bool) *ListApplication return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the application for which to retrieve tags. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The key-value tags assigned to the application. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // When configuring application input at the time of creating or updating an // application, provides additional mapping information specific to the record // format (such as JSON, CSV, or record fields delimited by some delimiter) @@ -5467,12 +6031,12 @@ func (s *MappingParameters) SetJSONMappingParameters(v *JSONMappingParameters) * // Kinesis Firehose delivery stream. // // For limits on how many destinations an application can write and other limitations, -// see Limits (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html) +// see Limits (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html). type Output struct { _ struct{} `type:"structure"` // Describes the data format when records are written to the destination. For - // more information, see Configuring Application Output (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). + // more information, see Configuring Application Output (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). // // DestinationSchema is a required field DestinationSchema *DestinationSchema `type:"structure" required:"true"` @@ -5650,7 +6214,7 @@ type OutputUpdate struct { _ struct{} `type:"structure"` // Describes the data format when records are written to the destination. For - // more information, see Configuring Application Output (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). + // more information, see Configuring Application Output (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). DestinationSchemaUpdate *DestinationSchema `type:"structure"` // Describes an Amazon Kinesis Firehose delivery stream as the destination for @@ -5765,8 +6329,9 @@ func (s *OutputUpdate) SetOutputId(v string) *OutputUpdate { type RecordColumn struct { _ struct{} `type:"structure"` - // Reference to the data element in the streaming input of the reference data - // source. + // Reference to the data element in the streaming input or the reference data + // source. This element is required if the RecordFormatType (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_RecordFormat.html#analytics-Type-RecordFormat-RecordFormatTypel) + // is JSON. Mapping *string `type:"string"` // Name of the column created in the in-application input stream or reference @@ -5974,7 +6539,8 @@ type ReferenceDataSourceDescription struct { // ID of the reference data source. This is the ID that Amazon Kinesis Analytics // assigns when you add the reference data source to your application using - // the AddApplicationReferenceDataSource operation. + // the AddApplicationReferenceDataSource (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_AddApplicationReferenceDataSource.html) + // operation. // // ReferenceId is a required field ReferenceId *string `min:"1" type:"string" required:"true"` @@ -6041,6 +6607,7 @@ type ReferenceDataSourceUpdate struct { _ struct{} `type:"structure"` // ID of the reference data source being updated. You can use the DescribeApplication + // (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) // operation to get this value. // // ReferenceId is a required field @@ -6206,8 +6773,8 @@ func (s *S3Configuration) SetRoleARN(v string) *S3Configuration { // object on your behalf. // // An Amazon Kinesis Analytics application loads reference data only once. If -// the data changes, you call the UpdateApplication operation to trigger reloading -// of data into your application. +// the data changes, you call the UpdateApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_UpdateApplication.html) +// operation to trigger reloading of data into your application. type S3ReferenceDataSource struct { _ struct{} `type:"structure"` @@ -6619,6 +7186,215 @@ func (s StopApplicationOutput) GoString() string { return s.String() } +// A key-value pair (the value is optional) that you can define and assign to +// AWS resources. If you specify a tag that already exists, the tag value is +// replaced with the value that you specify in the request. Note that the maximum +// number of application tags includes system tags. The maximum number of user-defined +// application tags is 50. For more information, see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-tagging.html). +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the key-value tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value of the key-value tag. The value is optional. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the application to assign the tags. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // The key-value tags to assign to the application. + // + // Tags is a required field + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the Kinesis Analytics application from which to remove the tags. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // A list of keys of tags to remove from the specified application. + // + // TagKeys is a required field + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateApplicationInput struct { _ struct{} `type:"structure"` @@ -6632,8 +7408,8 @@ type UpdateApplicationInput struct { // ApplicationUpdate is a required field ApplicationUpdate *ApplicationUpdate `type:"structure" required:"true"` - // The current application version ID. You can use the DescribeApplication operation - // to get this value. + // The current application version ID. You can use the DescribeApplication (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) + // operation to get this value. // // CurrentApplicationVersionId is a required field CurrentApplicationVersionId *int64 `min:"1" type:"long" required:"true"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/doc.go index 8e2c7811337..07b64b530ff 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/doc.go @@ -3,6 +3,16 @@ // Package kinesisanalytics provides the client and types for making API // requests to Amazon Kinesis Analytics. // +// Overview +// +// This documentation is for version 1 of the Amazon Kinesis Data Analytics +// API, which only supports SQL applications. Version 2 of the API supports +// SQL and Java applications. For more information about version 2, see Amazon +// Kinesis Data Analytics API V2 Documentation (/kinesisanalytics/latest/apiv2/Welcome.html). +// +// This is the Amazon Kinesis Analytics v1 API Reference. The Amazon Kinesis +// Analytics Developer Guide provides additional information. +// // See https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14 for more information on this service. // // See kinesisanalytics package documentation for more information. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/errors.go index 7091938f16e..812bac82658 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/errors.go @@ -54,7 +54,7 @@ const ( // // Discovery failed to get a record from the streaming source because of the // Amazon Kinesis Streams ProvisionedThroughputExceededException. For more information, - // see GetRecords (http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) + // see GetRecords (https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) // in the Amazon Kinesis Streams API Reference. ErrCodeResourceProvisionedThroughputExceededException = "ResourceProvisionedThroughputExceededException" @@ -64,6 +64,14 @@ const ( // The service is unavailable. Back off and retry the operation. ErrCodeServiceUnavailableException = "ServiceUnavailableException" + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // Application created with too many tags, or too many tags added to an application. + // Note that the maximum number of application tags includes system tags. The + // maximum number of user-defined application tags is 50. + ErrCodeTooManyTagsException = "TooManyTagsException" + // ErrCodeUnableToDetectSchemaException for service response error code // "UnableToDetectSchemaException". // @@ -73,5 +81,8 @@ const ( // ErrCodeUnsupportedOperationException for service response error code // "UnsupportedOperationException". + // + // The request was rejected because a specified parameter is not supported or + // a specified resource is not valid for this operation. ErrCodeUnsupportedOperationException = "UnsupportedOperationException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go index 153daad6eba..d6375e1a205 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go @@ -46,11 +46,11 @@ const ( // svc := kinesisanalytics.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *KinesisAnalytics { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KinesisAnalytics { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KinesisAnalytics { svc := &KinesisAnalytics{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-08-14", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/api.go index 091d0c94f4b..f547bd1d544 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/api.go @@ -85,6 +85,9 @@ func (c *KinesisAnalyticsV2) AddApplicationCloudWatchLoggingOptionRequest(input // * ErrCodeInvalidRequestException "InvalidRequestException" // The request JSON is not valid for the operation. // +// * ErrCodeInvalidApplicationConfigurationException "InvalidApplicationConfigurationException" +// The user-provided application configuration is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/AddApplicationCloudWatchLoggingOption func (c *KinesisAnalyticsV2) AddApplicationCloudWatchLoggingOption(input *AddApplicationCloudWatchLoggingOptionInput) (*AddApplicationCloudWatchLoggingOptionOutput, error) { req, out := c.AddApplicationCloudWatchLoggingOptionRequest(input) @@ -563,10 +566,7 @@ func (c *KinesisAnalyticsV2) CreateApplicationRequest(input *CreateApplicationIn // // Creates an Amazon Kinesis Data Analytics application. For information about // creating a Kinesis Data Analytics application, see Creating an Application -// (https://docs.aws.amazon.com/kinesisanalytics/latest/Java/creating-app.html). -// -// SQL is not enabled for this private beta release. Using SQL parameters (such -// as SqlApplicationConfiguration) will result in an error. +// (https://docs.aws.amazon.com/kinesisanalytics/latest/java/getting-started.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -592,6 +592,16 @@ func (c *KinesisAnalyticsV2) CreateApplicationRequest(input *CreateApplicationIn // * ErrCodeInvalidRequestException "InvalidRequestException" // The request JSON is not valid for the operation. // +// * ErrCodeTooManyTagsException "TooManyTagsException" +// Application created with too many tags, or too many tags added to an application. +// Note that the maximum number of application tags includes system tags. The +// maximum number of user-defined application tags is 50. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Exception thrown as a result of concurrent modifications to an application. +// This error can be the result of attempting to modify an application without +// using the current application ID. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/CreateApplication func (c *KinesisAnalyticsV2) CreateApplication(input *CreateApplicationInput) (*CreateApplicationOutput, error) { req, out := c.CreateApplicationRequest(input) @@ -783,6 +793,9 @@ func (c *KinesisAnalyticsV2) DeleteApplicationRequest(input *DeleteApplicationIn // * ErrCodeInvalidRequestException "InvalidRequestException" // The request JSON is not valid for the operation. // +// * ErrCodeInvalidApplicationConfigurationException "InvalidApplicationConfigurationException" +// The user-provided application configuration is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/DeleteApplication func (c *KinesisAnalyticsV2) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { req, out := c.DeleteApplicationRequest(input) @@ -877,6 +890,9 @@ func (c *KinesisAnalyticsV2) DeleteApplicationCloudWatchLoggingOptionRequest(inp // * ErrCodeInvalidRequestException "InvalidRequestException" // The request JSON is not valid for the operation. // +// * ErrCodeInvalidApplicationConfigurationException "InvalidApplicationConfigurationException" +// The user-provided application configuration is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/DeleteApplicationCloudWatchLoggingOption func (c *KinesisAnalyticsV2) DeleteApplicationCloudWatchLoggingOption(input *DeleteApplicationCloudWatchLoggingOptionInput) (*DeleteApplicationCloudWatchLoggingOptionOutput, error) { req, out := c.DeleteApplicationCloudWatchLoggingOptionRequest(input) @@ -1723,6 +1739,93 @@ func (c *KinesisAnalyticsV2) ListApplicationsWithContext(ctx aws.Context, input return out, req.Send() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/ListTagsForResource +func (c *KinesisAnalyticsV2) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Kinesis Analytics. +// +// Retrieves the list of key-value tags assigned to the application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis Analytics's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Specified application can't be found. +// +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// The specified input parameter value is not valid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Exception thrown as a result of concurrent modifications to an application. +// This error can be the result of attempting to modify an application without +// using the current application ID. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/ListTagsForResource +func (c *KinesisAnalyticsV2) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KinesisAnalyticsV2) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartApplication = "StartApplication" // StartApplicationRequest generates a "aws/request.Request" representing the @@ -1771,9 +1874,6 @@ func (c *KinesisAnalyticsV2) StartApplicationRequest(input *StartApplicationInpu // Starts the specified Amazon Kinesis Data Analytics application. After creating // an application, you must exclusively call this operation to start your application. // -// SQL is not enabled for this private beta. Using SQL parameters (such as RunConfiguration$SqlRunConfigurations) -// will result in an error. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1888,6 +1988,9 @@ func (c *KinesisAnalyticsV2) StopApplicationRequest(input *StopApplicationInput) // * ErrCodeInvalidRequestException "InvalidRequestException" // The request JSON is not valid for the operation. // +// * ErrCodeInvalidApplicationConfigurationException "InvalidApplicationConfigurationException" +// The user-provided application configuration is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/StopApplication func (c *KinesisAnalyticsV2) StopApplication(input *StopApplicationInput) (*StopApplicationOutput, error) { req, out := c.StopApplicationRequest(input) @@ -1910,6 +2013,200 @@ func (c *KinesisAnalyticsV2) StopApplicationWithContext(ctx aws.Context, input * return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/TagResource +func (c *KinesisAnalyticsV2) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon Kinesis Analytics. +// +// Adds one or more key-value tags to a Kinesis Analytics application. Note +// that the maximum number of application tags includes system tags. The maximum +// number of user-defined application tags is 50. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis Analytics's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Specified application can't be found. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The application is not available for this operation. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// Application created with too many tags, or too many tags added to an application. +// Note that the maximum number of application tags includes system tags. The +// maximum number of user-defined application tags is 50. +// +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// The specified input parameter value is not valid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Exception thrown as a result of concurrent modifications to an application. +// This error can be the result of attempting to modify an application without +// using the current application ID. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/TagResource +func (c *KinesisAnalyticsV2) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KinesisAnalyticsV2) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/UntagResource +func (c *KinesisAnalyticsV2) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Kinesis Analytics. +// +// Removes one or more tags from a Kinesis Analytics application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis Analytics's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Specified application can't be found. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The application is not available for this operation. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// Application created with too many tags, or too many tags added to an application. +// Note that the maximum number of application tags includes system tags. The +// maximum number of user-defined application tags is 50. +// +// * ErrCodeInvalidArgumentException "InvalidArgumentException" +// The specified input parameter value is not valid. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Exception thrown as a result of concurrent modifications to an application. +// This error can be the result of attempting to modify an application without +// using the current application ID. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/UntagResource +func (c *KinesisAnalyticsV2) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KinesisAnalyticsV2) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateApplication = "UpdateApplication" // UpdateApplicationRequest generates a "aws/request.Request" representing the @@ -1961,9 +2258,6 @@ func (c *KinesisAnalyticsV2) UpdateApplicationRequest(input *UpdateApplicationIn // Kinesis Data Analytics updates the ApplicationVersionId each time you update // your application. // -// SQL is not enabled for this private beta. Using SQL parameters (such as SqlApplicationConfigurationUpdate) -// will result in an error. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1993,6 +2287,9 @@ func (c *KinesisAnalyticsV2) UpdateApplicationRequest(input *UpdateApplicationIn // * ErrCodeInvalidRequestException "InvalidRequestException" // The request JSON is not valid for the operation. // +// * ErrCodeInvalidApplicationConfigurationException "InvalidApplicationConfigurationException" +// The user-provided application configuration is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/UpdateApplication func (c *KinesisAnalyticsV2) UpdateApplication(input *UpdateApplicationInput) (*UpdateApplicationOutput, error) { req, out := c.UpdateApplicationRequest(input) @@ -2505,7 +2802,7 @@ type AddApplicationOutputOutput struct { ApplicationVersionId *int64 `min:"1" type:"long"` // Describes the application output configuration. For more information, see - // Configuring Application Output (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). + // Configuring Application Output (https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). OutputDescriptions []*OutputDescription `type:"list"` } @@ -3114,7 +3411,7 @@ type ApplicationDetail struct { // The current timestamp when the application was last updated. LastUpdateTimestamp *time.Time `type:"timestamp"` - // The runtime environment for the application (SQL-1.0 or JAVA-8-FLINK-1.5). + // The runtime environment for the application (SQL-1.0 or FLINK-1_6). // // RuntimeEnvironment is a required field RuntimeEnvironment *string `type:"string" required:"true" enum:"RuntimeEnvironment"` @@ -3388,7 +3685,7 @@ type ApplicationSummary struct { // ApplicationVersionId is a required field ApplicationVersionId *int64 `min:"1" type:"long" required:"true"` - // The runtime environment for the application (SQL-1.0 or JAVA-8-FLINK-1.5). + // The runtime environment for the application (SQL-1.0 or FLINK-1_6). // // RuntimeEnvironment is a required field RuntimeEnvironment *string `type:"string" required:"true" enum:"RuntimeEnvironment"` @@ -3504,7 +3801,7 @@ func (s *CSVMappingParameters) SetRecordRowDelimiter(v string) *CSVMappingParame // Describes an application's checkpointing configuration. Checkpointing is // the process of persisting application state for fault tolerance. For more -// information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.6/concepts/programming-model.html#checkpoints-for-fault-tolerance) +// information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.6/concepts/programming-model.html#checkpoints-for-fault-tolerance) // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.6/). type CheckpointConfiguration struct { _ struct{} `type:"structure"` @@ -3525,7 +3822,7 @@ type CheckpointConfiguration struct { // Describes the minimum time in milliseconds after a checkpoint operation completes // that a new checkpoint operation can start. If a checkpoint operation takes // longer than the CheckpointInterval, the application otherwise performs continual - // checkpoint operations. For more information, see Tuning Checkpointing (https://ci.apache.org/projects/flink/flink-docs-stable/ops/state/large_state_tuning.html#tuning-checkpointing) + // checkpoint operations. For more information, see Tuning Checkpointing (https://ci.apache.org/projects/flink/flink-docs-stable/ops/state/large_state_tuning.html#tuning-checkpointing) // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.6/). MinPauseBetweenCheckpoints *int64 `type:"long"` } @@ -4024,7 +4321,7 @@ type CreateApplicationInput struct { // application configuration errors. CloudWatchLoggingOptions []*CloudWatchLoggingOption `type:"list"` - // The runtime environment for the application (SQL-1.0 or JAVA-8-FLINK-1.5). + // The runtime environment for the application (SQL-1.0 or FLINK-1_6). // // RuntimeEnvironment is a required field RuntimeEnvironment *string `type:"string" required:"true" enum:"RuntimeEnvironment"` @@ -4034,6 +4331,13 @@ type CreateApplicationInput struct { // // ServiceExecutionRole is a required field ServiceExecutionRole *string `min:"1" type:"string" required:"true"` + + // A list of one or more tags to assign to the application. A tag is a key-value + // pair that identifies an application. Note that the maximum number of application + // tags includes system tags. The maximum number of user-defined application + // tags is 50. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) + // in the AWS Billing and Cost Management Guide. + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -4064,6 +4368,9 @@ func (s *CreateApplicationInput) Validate() error { if s.ServiceExecutionRole != nil && len(*s.ServiceExecutionRole) < 1 { invalidParams.Add(request.NewErrParamMinLen("ServiceExecutionRole", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.ApplicationConfiguration != nil { if err := s.ApplicationConfiguration.Validate(); err != nil { invalidParams.AddNested("ApplicationConfiguration", err.(request.ErrInvalidParams)) @@ -4079,6 +4386,16 @@ func (s *CreateApplicationInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4122,6 +4439,12 @@ func (s *CreateApplicationInput) SetServiceExecutionRole(v string) *CreateApplic return s } +// SetTags sets the Tags field's value. +func (s *CreateApplicationInput) SetTags(v []*Tag) *CreateApplicationInput { + s.Tags = v + return s +} + type CreateApplicationOutput struct { _ struct{} `type:"structure"` @@ -4918,8 +5241,7 @@ type DescribeApplicationSnapshotInput struct { // ApplicationName is a required field ApplicationName *string `min:"1" type:"string" required:"true"` - // The identifier of an application snapshot. You can retrieve this value using - // . + // The identifier of an application snapshot. You can retrieve this value using . // // SnapshotName is a required field SnapshotName *string `min:"1" type:"string" required:"true"` @@ -5311,7 +5633,7 @@ type FlinkApplicationConfiguration struct { // Describes an application's checkpointing configuration. Checkpointing is // the process of persisting application state for fault tolerance. For more - // information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.6/concepts/programming-model.html#checkpoints-for-fault-tolerance) + // information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.6/concepts/programming-model.html#checkpoints-for-fault-tolerance) // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.6/). CheckpointConfiguration *CheckpointConfiguration `type:"structure"` @@ -5738,6 +6060,10 @@ type InputLambdaProcessor struct { // The ARN of the AWS Lambda function that operates on records in the stream. // + // To specify an earlier version of the Lambda function than the latest, include + // the Lambda function version in the Lambda function ARN. For more information + // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` } @@ -5783,6 +6109,10 @@ type InputLambdaProcessorDescription struct { // The ARN of the AWS Lambda function that is used to preprocess the records // in the stream. // + // To specify an earlier version of the Lambda function than the latest, include + // the Lambda function version in the Lambda function ARN. For more information + // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -5825,6 +6155,10 @@ type InputLambdaProcessorUpdate struct { // The Amazon Resource Name (ARN) of the new AWS Lambda function that is used // to preprocess the records in the stream. // + // To specify an earlier version of the Lambda function than the latest, include + // the Lambda function version in the Lambda function ARN. For more information + // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // // ResourceARNUpdate is a required field ResourceARNUpdate *string `min:"1" type:"string" required:"true"` } @@ -6871,6 +7205,10 @@ type LambdaOutput struct { // The Amazon Resource Name (ARN) of the destination Lambda function to write // to. // + // To specify an earlier version of the Lambda function than the latest, include + // the Lambda function version in the Lambda function ARN. For more information + // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` } @@ -6956,6 +7294,10 @@ type LambdaOutputUpdate struct { // The Amazon Resource Name (ARN) of the destination AWS Lambda function. // + // To specify an earlier version of the Lambda function than the latest, include + // the Lambda function version in the Lambda function ARN. For more information + // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // // ResourceARNUpdate is a required field ResourceARNUpdate *string `min:"1" type:"string" required:"true"` } @@ -7180,6 +7522,70 @@ func (s *ListApplicationsOutput) SetNextToken(v string) *ListApplicationsOutput return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the application for which to retrieve tags. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The key-value tags assigned to the application. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // When you configure an SQL-based Amazon Kinesis Data Analytics application's // input at the time of creating or updating an application, provides additional // mapping information specific to the record format (such as JSON, CSV, or @@ -7240,7 +7646,7 @@ func (s *MappingParameters) SetJSONMappingParameters(v *JSONMappingParameters) * // Describes configuration parameters for Amazon CloudWatch logging for a Java-based // Kinesis Data Analytics application. For more information about CloudWatch -// logging, see Monitoring (https://docs.aws.amazon.com/kinesisanalytics/latest/Java/monitoring-overview.html). +// logging, see Monitoring (https://docs.aws.amazon.com/kinesisanalytics/latest/java/monitoring-overview.html). type MonitoringConfiguration struct { _ struct{} `type:"structure"` @@ -7969,7 +8375,7 @@ func (s *PropertyGroup) SetPropertyMap(v map[string]*string) *PropertyGroup { type RecordColumn struct { _ struct{} `type:"structure"` - // A reference to the data element in the streaming input of the reference data + // A reference to the data element in the streaming input or the reference data // source. Mapping *string `type:"string"` @@ -9396,6 +9802,217 @@ func (s StopApplicationOutput) GoString() string { return s.String() } +// A key-value pair (the value is optional) that you can define and assign to +// AWS resources. If you specify a tag that already exists, the tag value is +// replaced with the value that you specify in the request. Note that the maximum +// number of application tags includes system tags. The maximum number of user-defined +// application tags is 50. For more information, see Using Cost Allocation Tags +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// in the AWS Billing and Cost Management Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the key-value tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value of the key-value tag. The value is optional. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the application to assign the tags. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // The key-value tags to assign to the application. + // + // Tags is a required field + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the Kinesis Analytics application from which to remove the tags. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // A list of keys of tags to remove from the specified application. + // + // TagKeys is a required field + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateApplicationInput struct { _ struct{} `type:"structure"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/doc.go index 7237721b25f..e4ae663b59f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/doc.go @@ -3,7 +3,11 @@ // Package kinesisanalyticsv2 provides the client and types for making API // requests to Amazon Kinesis Analytics. // -// Documentation for Kinesis Data Analytics API v2 +// Amazon Kinesis Data Analytics is a fully managed service that you can use +// to process and analyze streaming data using SQL or Java. The service enables +// you to quickly author and run SQL or Java code against streaming sources +// to perform time series analytics, feed real-time dashboards, and create real-time +// metrics. // // See https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/errors.go index defe252c7d9..f4cac8577d0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/errors.go @@ -70,6 +70,14 @@ const ( // The service cannot complete the request. ErrCodeServiceUnavailableException = "ServiceUnavailableException" + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // Application created with too many tags, or too many tags added to an application. + // Note that the maximum number of application tags includes system tags. The + // maximum number of user-defined application tags is 50. + ErrCodeTooManyTagsException = "TooManyTagsException" + // ErrCodeUnableToDetectSchemaException for service response error code // "UnableToDetectSchemaException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go index d066c2e6c70..6399b45ae45 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *KinesisAnalyticsV2 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "kinesisanalytics" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KinesisAnalyticsV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KinesisAnalyticsV2 { svc := &KinesisAnalyticsV2{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-05-23", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/api.go index c01f89dec04..595a2fcb5ae 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/api.go @@ -445,6 +445,12 @@ func (c *KinesisVideo) ListStreamsRequest(input *ListStreamsInput) (req *request Name: opListStreams, HTTPMethod: "POST", HTTPPath: "/listStreams", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -499,6 +505,58 @@ func (c *KinesisVideo) ListStreamsWithContext(ctx aws.Context, input *ListStream return out, req.Send() } +// ListStreamsPages iterates over the pages of a ListStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStreams operation. +// pageNum := 0 +// err := client.ListStreamsPages(params, +// func(page *kinesisvideo.ListStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *KinesisVideo) ListStreamsPages(input *ListStreamsInput, fn func(*ListStreamsOutput, bool) bool) error { + return c.ListStreamsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListStreamsPagesWithContext same as ListStreamsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KinesisVideo) ListStreamsPagesWithContext(ctx aws.Context, input *ListStreamsInput, fn func(*ListStreamsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListStreamsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListStreamsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListStreamsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTagsForStream = "ListTagsForStream" // ListTagsForStreamRequest generates a "aws/request.Request" representing the @@ -2142,6 +2200,9 @@ const ( // APINameGetHlsStreamingSessionUrl is a APIName enum value APINameGetHlsStreamingSessionUrl = "GET_HLS_STREAMING_SESSION_URL" + + // APINameGetDashStreamingSessionUrl is a APIName enum value + APINameGetDashStreamingSessionUrl = "GET_DASH_STREAMING_SESSION_URL" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go index a723794cd00..dc21b52f359 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go @@ -46,11 +46,11 @@ const ( // svc := kinesisvideo.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *KinesisVideo { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KinesisVideo { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KinesisVideo { svc := &KinesisVideo{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-30", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index de91f57ae75..d1375b22606 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -193,8 +193,8 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r // During the connection process, AWS KMS finds the AWS CloudHSM cluster that // is associated with the custom key store, creates the connection infrastructure, // connects to the cluster, logs into the AWS CloudHSM client as the kmsuser -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) -// crypto user (CU), and rotates its password. +// crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) +// (CU), and rotates its password. // // The ConnectCustomKeyStore operation might fail for various reasons. To find // the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode @@ -268,11 +268,9 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. -// -// For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, -// the AWS CloudHSM cluster must have at least two active HSMs, each in a -// different Availability Zone. For the ConnectCustomKeyStore operation, +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the AWS CloudHSM cluster must have at least two active HSMs, +// each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -282,7 +280,7 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the AWS CloudHSM User Guide. +// in the AWS CloudHSM User Guide . // // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ConnectCustomKeyStore func (c *KMS) ConnectCustomKeyStore(input *ConnectCustomKeyStoreInput) (*ConnectCustomKeyStoreOutput, error) { @@ -575,11 +573,9 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. -// -// For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, -// the AWS CloudHSM cluster must have at least two active HSMs, each in a -// different Availability Zone. For the ConnectCustomKeyStore operation, +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the AWS CloudHSM cluster must have at least two active HSMs, +// each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -589,7 +585,7 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the AWS CloudHSM User Guide. +// in the AWS CloudHSM User Guide . // // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStore func (c *KMS) CreateCustomKeyStore(input *CreateCustomKeyStoreInput) (*CreateCustomKeyStoreOutput, error) { @@ -668,7 +664,7 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, // To perform this operation on a CMK in a different AWS account, specify the // key ARN in the value of the KeyId parameter. For more information about grants, // see Grants (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the AWS Key Management Service Developer Guide. +// in the AWS Key Management Service Developer Guide . // // The result of this operation varies with the key state of the CMK. For details, // see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -878,11 +874,9 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. -// -// For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, -// the AWS CloudHSM cluster must have at least two active HSMs, each in a -// different Availability Zone. For the ConnectCustomKeyStore operation, +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the AWS CloudHSM cluster must have at least two active HSMs, +// each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -892,7 +886,7 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the AWS CloudHSM User Guide. +// in the AWS CloudHSM User Guide . // // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) { @@ -1671,7 +1665,7 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o // // For more information about how key state affects the use of a CMK, see How // Key State Affects the Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the AWS Key Management Service Developer Guide. +// in the AWS Key Management Service Developer Guide . // // The result of this operation varies with the key state of the CMK. For details, // see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -1901,11 +1895,15 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp // operations will fail. This action can prevent users from storing and accessing // sensitive data. // -// To find the connection state of a custom key store, use the DescribeCustomKeyStoresoperation. To reconnect a custom key store, use the ConnectCustomKeyStoreoperation. +// To find the connection state of a custom key store, use the DescribeCustomKeyStores +// operation. To reconnect a custom key store, use the ConnectCustomKeyStore +// operation. // // If the operation succeeds, it returns a JSON object with no properties. // // This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in AWS KMS, which combines the convenience and extensive integration +// of AWS KMS with the isolation and control of a single-tenant key store. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2420,7 +2418,7 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // exact match) in your request to Decrypt the data key. Otherwise, the request // to decrypt fails with an InvalidCiphertextException. For more information, // see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the AWS Key Management Service Developer Guide. +// in the AWS Key Management Service Developer Guide . // // The result of this operation varies with the key state of the CMK. For details, // see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -3143,10 +3141,9 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ // When calling this operation, you must specify the following values: // // * The key ID or key ARN of a CMK with no key material. Its Origin must -// be EXTERNAL. -// -// To create a CMK with no key material, call CreateKey and set the value of -// its Origin parameter to EXTERNAL. To get the Origin of a CMK, call DescribeKey.) +// be EXTERNAL. To create a CMK with no key material, call CreateKey and +// set the value of its Origin parameter to EXTERNAL. To get the Origin of +// a CMK, call DescribeKey.) // // * The encrypted key material. To get the public key to encrypt the key // material, call GetParametersForImport. @@ -3375,7 +3372,7 @@ func (c *KMS) ListAliasesWithContext(ctx aws.Context, input *ListAliasesInput, o // // Example iterating over at most 3 pages of a ListAliases operation. // pageNum := 0 // err := client.ListAliasesPages(params, -// func(page *ListAliasesOutput, lastPage bool) bool { +// func(page *kms.ListAliasesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3407,10 +3404,12 @@ func (c *KMS) ListAliasesPagesWithContext(ctx aws.Context, input *ListAliasesInp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAliasesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAliasesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3538,7 +3537,7 @@ func (c *KMS) ListGrantsWithContext(ctx aws.Context, input *ListGrantsInput, opt // // Example iterating over at most 3 pages of a ListGrants operation. // pageNum := 0 // err := client.ListGrantsPages(params, -// func(page *ListGrantsResponse, lastPage bool) bool { +// func(page *kms.ListGrantsResponse, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3570,10 +3569,12 @@ func (c *KMS) ListGrantsPagesWithContext(ctx aws.Context, input *ListGrantsInput }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListGrantsResponse), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListGrantsResponse), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3697,7 +3698,7 @@ func (c *KMS) ListKeyPoliciesWithContext(ctx aws.Context, input *ListKeyPolicies // // Example iterating over at most 3 pages of a ListKeyPolicies operation. // pageNum := 0 // err := client.ListKeyPoliciesPages(params, -// func(page *ListKeyPoliciesOutput, lastPage bool) bool { +// func(page *kms.ListKeyPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3729,10 +3730,12 @@ func (c *KMS) ListKeyPoliciesPagesWithContext(ctx aws.Context, input *ListKeyPol }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListKeyPoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListKeyPoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3842,7 +3845,7 @@ func (c *KMS) ListKeysWithContext(ctx aws.Context, input *ListKeysInput, opts .. // // Example iterating over at most 3 pages of a ListKeys operation. // pageNum := 0 // err := client.ListKeysPages(params, -// func(page *ListKeysOutput, lastPage bool) bool { +// func(page *kms.ListKeysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3874,10 +3877,12 @@ func (c *KMS) ListKeysPagesWithContext(ctx aws.Context, input *ListKeysInput, fn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListKeysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListKeysOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5089,9 +5094,9 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req // of the custom key store to the value that you specify. // // * Use the KeyStorePassword parameter tell AWS KMS the current password -// of the kmsuser (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) -// crypto user (CU) in the associated AWS CloudHSM cluster. You can use this -// parameter to fix connection failures (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password) +// of the kmsuser crypto user (CU) (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) +// in the associated AWS CloudHSM cluster. You can use this parameter to +// fix connection failures (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password) // that occur when AWS KMS cannot log into the associated cluster because // the kmsuser password has changed. This value does not change the password // in the AWS CloudHSM cluster. @@ -5187,11 +5192,9 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. -// -// For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, -// the AWS CloudHSM cluster must have at least two active HSMs, each in a -// different Availability Zone. For the ConnectCustomKeyStore operation, +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the AWS CloudHSM cluster must have at least two active HSMs, +// each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -5201,7 +5204,7 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the AWS CloudHSM User Guide. +// in the AWS CloudHSM User Guide . // // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStore func (c *KMS) UpdateCustomKeyStore(input *UpdateCustomKeyStoreInput) (*UpdateCustomKeyStoreOutput, error) { @@ -5600,9 +5603,9 @@ type CreateCustomKeyStoreInput struct { // CustomKeyStoreName is a required field CustomKeyStoreName *string `min:"1" type:"string" required:"true"` - // Enter the password of the kmsuser (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) - // crypto user (CU) account in the specified AWS CloudHSM cluster. AWS KMS logs - // into the cluster as this user to manage key material on your behalf. + // Enter the password of the kmsuser crypto user (CU) account (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) + // in the specified AWS CloudHSM cluster. AWS KMS logs into the cluster as this + // user to manage key material on your behalf. // // This parameter tells AWS KMS the kmsuser account password; it does not change // the password in the AWS CloudHSM cluster. @@ -5715,7 +5718,7 @@ type CreateGrantInput struct { // Allows a cryptographic operation only when the encryption context matches // or includes the encryption context specified in this structure. For more // information about encryption context, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the AWS Key Management Service Developer Guide. + // in the AWS Key Management Service Developer Guide . Constraints *GrantConstraints `type:"structure"` // A list of grant tokens. @@ -5914,7 +5917,7 @@ type CreateKeyInput struct { // Do not set this value to true indiscriminately. // // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the AWS Key Management Service Developer Guide. + // section in the AWS Key Management Service Developer Guide . // // Use this parameter only when you include a policy in the request and you // intend to prevent the principal that is making the request from making a @@ -5978,7 +5981,7 @@ type CreateKeyInput struct { // a subsequent PutKeyPolicy request on the CMK. This reduces the risk that // the CMK becomes unmanageable. For more information, refer to the scenario // in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the AWS Key Management Service Developer Guide. + // section of the AWS Key Management Service Developer Guide . // // * Each statement in the key policy must contain one or more principals. // The principals in the key policy must exist and be visible to AWS KMS. @@ -7902,7 +7905,7 @@ func (s *GetParametersForImportOutput) SetPublicKey(v []byte) *GetParametersForI // only by case. To require a fully case-sensitive encryption context, use the // kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM // or key policy. For details, see kms:EncryptionContext: (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context) -// in the AWS Key Management Service Developer Guide. +// in the AWS Key Management Service Developer Guide . type GrantConstraints struct { _ struct{} `type:"structure"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go index b2513484f6c..e8ce42f3b9c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go @@ -44,11 +44,9 @@ const ( // // * The cluster must contain at least as many HSMs as the operation requires. // To add HSMs, use the AWS CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) - // operation. - // - // For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey operations, - // the AWS CloudHSM cluster must have at least two active HSMs, each in a - // different Availability Zone. For the ConnectCustomKeyStore operation, + // operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey + // operations, the AWS CloudHSM cluster must have at least two active HSMs, + // each in a different Availability Zone. For the ConnectCustomKeyStore operation, // the AWS CloudHSM must contain at least one active HSM. // // For information about the requirements for an AWS CloudHSM cluster that is @@ -58,7 +56,7 @@ const ( // Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) // in the AWS CloudHSM User Guide. For information about cluster security groups, // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) - // in the AWS CloudHSM User Guide. + // in the AWS CloudHSM User Guide . ErrCodeCloudHsmClusterInvalidConfigurationException = "CloudHsmClusterInvalidConfigurationException" // ErrCodeCloudHsmClusterNotActiveException for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/service.go index 6d062f32fc8..30a7b6875d3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/kms/service.go @@ -46,11 +46,11 @@ const ( // svc := kms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *KMS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KMS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KMS { svc := &KMS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-01", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/api.go new file mode 100644 index 00000000000..b457320c659 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/api.go @@ -0,0 +1,3372 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package lakeformation + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opBatchGrantPermissions = "BatchGrantPermissions" + +// BatchGrantPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGrantPermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGrantPermissions for more information on using the BatchGrantPermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchGrantPermissionsRequest method. +// req, resp := client.BatchGrantPermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/BatchGrantPermissions +func (c *LakeFormation) BatchGrantPermissionsRequest(input *BatchGrantPermissionsInput) (req *request.Request, output *BatchGrantPermissionsOutput) { + op := &request.Operation{ + Name: opBatchGrantPermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGrantPermissionsInput{} + } + + output = &BatchGrantPermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchGrantPermissions API operation for AWS Lake Formation. +// +// Batch operation to grant permissions to the principal. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation BatchGrantPermissions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/BatchGrantPermissions +func (c *LakeFormation) BatchGrantPermissions(input *BatchGrantPermissionsInput) (*BatchGrantPermissionsOutput, error) { + req, out := c.BatchGrantPermissionsRequest(input) + return out, req.Send() +} + +// BatchGrantPermissionsWithContext is the same as BatchGrantPermissions with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGrantPermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) BatchGrantPermissionsWithContext(ctx aws.Context, input *BatchGrantPermissionsInput, opts ...request.Option) (*BatchGrantPermissionsOutput, error) { + req, out := c.BatchGrantPermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchRevokePermissions = "BatchRevokePermissions" + +// BatchRevokePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the BatchRevokePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchRevokePermissions for more information on using the BatchRevokePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchRevokePermissionsRequest method. +// req, resp := client.BatchRevokePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/BatchRevokePermissions +func (c *LakeFormation) BatchRevokePermissionsRequest(input *BatchRevokePermissionsInput) (req *request.Request, output *BatchRevokePermissionsOutput) { + op := &request.Operation{ + Name: opBatchRevokePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchRevokePermissionsInput{} + } + + output = &BatchRevokePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchRevokePermissions API operation for AWS Lake Formation. +// +// Batch operation to revoke permissions from the principal. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation BatchRevokePermissions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/BatchRevokePermissions +func (c *LakeFormation) BatchRevokePermissions(input *BatchRevokePermissionsInput) (*BatchRevokePermissionsOutput, error) { + req, out := c.BatchRevokePermissionsRequest(input) + return out, req.Send() +} + +// BatchRevokePermissionsWithContext is the same as BatchRevokePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See BatchRevokePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) BatchRevokePermissionsWithContext(ctx aws.Context, input *BatchRevokePermissionsInput, opts ...request.Option) (*BatchRevokePermissionsOutput, error) { + req, out := c.BatchRevokePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterResource = "DeregisterResource" + +// DeregisterResourceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterResource for more information on using the DeregisterResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterResourceRequest method. +// req, resp := client.DeregisterResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/DeregisterResource +func (c *LakeFormation) DeregisterResourceRequest(input *DeregisterResourceInput) (req *request.Request, output *DeregisterResourceOutput) { + op := &request.Operation{ + Name: opDeregisterResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterResourceInput{} + } + + output = &DeregisterResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeregisterResource API operation for AWS Lake Formation. +// +// Deregisters the resource as managed by the Data Catalog. +// +// When you deregister a path, Lake Formation removes the path from the inline +// policy attached to your service-linked role. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation DeregisterResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/DeregisterResource +func (c *LakeFormation) DeregisterResource(input *DeregisterResourceInput) (*DeregisterResourceOutput, error) { + req, out := c.DeregisterResourceRequest(input) + return out, req.Send() +} + +// DeregisterResourceWithContext is the same as DeregisterResource with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) DeregisterResourceWithContext(ctx aws.Context, input *DeregisterResourceInput, opts ...request.Option) (*DeregisterResourceOutput, error) { + req, out := c.DeregisterResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeResource = "DescribeResource" + +// DescribeResourceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeResource for more information on using the DescribeResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeResourceRequest method. +// req, resp := client.DescribeResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/DescribeResource +func (c *LakeFormation) DescribeResourceRequest(input *DescribeResourceInput) (req *request.Request, output *DescribeResourceOutput) { + op := &request.Operation{ + Name: opDescribeResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeResourceInput{} + } + + output = &DescribeResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeResource API operation for AWS Lake Formation. +// +// Retrieves the current data access role for the given resource registered +// in AWS Lake Formation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation DescribeResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/DescribeResource +func (c *LakeFormation) DescribeResource(input *DescribeResourceInput) (*DescribeResourceOutput, error) { + req, out := c.DescribeResourceRequest(input) + return out, req.Send() +} + +// DescribeResourceWithContext is the same as DescribeResource with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) DescribeResourceWithContext(ctx aws.Context, input *DescribeResourceInput, opts ...request.Option) (*DescribeResourceOutput, error) { + req, out := c.DescribeResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDataLakeSettings = "GetDataLakeSettings" + +// GetDataLakeSettingsRequest generates a "aws/request.Request" representing the +// client's request for the GetDataLakeSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDataLakeSettings for more information on using the GetDataLakeSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDataLakeSettingsRequest method. +// req, resp := client.GetDataLakeSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/GetDataLakeSettings +func (c *LakeFormation) GetDataLakeSettingsRequest(input *GetDataLakeSettingsInput) (req *request.Request, output *GetDataLakeSettingsOutput) { + op := &request.Operation{ + Name: opGetDataLakeSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDataLakeSettingsInput{} + } + + output = &GetDataLakeSettingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDataLakeSettings API operation for AWS Lake Formation. +// +// The AWS Lake Formation principal. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation GetDataLakeSettings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/GetDataLakeSettings +func (c *LakeFormation) GetDataLakeSettings(input *GetDataLakeSettingsInput) (*GetDataLakeSettingsOutput, error) { + req, out := c.GetDataLakeSettingsRequest(input) + return out, req.Send() +} + +// GetDataLakeSettingsWithContext is the same as GetDataLakeSettings with the addition of +// the ability to pass a context and additional request options. +// +// See GetDataLakeSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) GetDataLakeSettingsWithContext(ctx aws.Context, input *GetDataLakeSettingsInput, opts ...request.Option) (*GetDataLakeSettingsOutput, error) { + req, out := c.GetDataLakeSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetEffectivePermissionsForPath = "GetEffectivePermissionsForPath" + +// GetEffectivePermissionsForPathRequest generates a "aws/request.Request" representing the +// client's request for the GetEffectivePermissionsForPath operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetEffectivePermissionsForPath for more information on using the GetEffectivePermissionsForPath +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetEffectivePermissionsForPathRequest method. +// req, resp := client.GetEffectivePermissionsForPathRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/GetEffectivePermissionsForPath +func (c *LakeFormation) GetEffectivePermissionsForPathRequest(input *GetEffectivePermissionsForPathInput) (req *request.Request, output *GetEffectivePermissionsForPathOutput) { + op := &request.Operation{ + Name: opGetEffectivePermissionsForPath, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetEffectivePermissionsForPathInput{} + } + + output = &GetEffectivePermissionsForPathOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetEffectivePermissionsForPath API operation for AWS Lake Formation. +// +// Returns the permissions for a specified table or database resource located +// at a path in Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation GetEffectivePermissionsForPath for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/GetEffectivePermissionsForPath +func (c *LakeFormation) GetEffectivePermissionsForPath(input *GetEffectivePermissionsForPathInput) (*GetEffectivePermissionsForPathOutput, error) { + req, out := c.GetEffectivePermissionsForPathRequest(input) + return out, req.Send() +} + +// GetEffectivePermissionsForPathWithContext is the same as GetEffectivePermissionsForPath with the addition of +// the ability to pass a context and additional request options. +// +// See GetEffectivePermissionsForPath for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) GetEffectivePermissionsForPathWithContext(ctx aws.Context, input *GetEffectivePermissionsForPathInput, opts ...request.Option) (*GetEffectivePermissionsForPathOutput, error) { + req, out := c.GetEffectivePermissionsForPathRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetEffectivePermissionsForPathPages iterates over the pages of a GetEffectivePermissionsForPath operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetEffectivePermissionsForPath method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetEffectivePermissionsForPath operation. +// pageNum := 0 +// err := client.GetEffectivePermissionsForPathPages(params, +// func(page *lakeformation.GetEffectivePermissionsForPathOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *LakeFormation) GetEffectivePermissionsForPathPages(input *GetEffectivePermissionsForPathInput, fn func(*GetEffectivePermissionsForPathOutput, bool) bool) error { + return c.GetEffectivePermissionsForPathPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetEffectivePermissionsForPathPagesWithContext same as GetEffectivePermissionsForPathPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) GetEffectivePermissionsForPathPagesWithContext(ctx aws.Context, input *GetEffectivePermissionsForPathInput, fn func(*GetEffectivePermissionsForPathOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetEffectivePermissionsForPathInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetEffectivePermissionsForPathRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetEffectivePermissionsForPathOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGrantPermissions = "GrantPermissions" + +// GrantPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the GrantPermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GrantPermissions for more information on using the GrantPermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GrantPermissionsRequest method. +// req, resp := client.GrantPermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/GrantPermissions +func (c *LakeFormation) GrantPermissionsRequest(input *GrantPermissionsInput) (req *request.Request, output *GrantPermissionsOutput) { + op := &request.Operation{ + Name: opGrantPermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GrantPermissionsInput{} + } + + output = &GrantPermissionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// GrantPermissions API operation for AWS Lake Formation. +// +// Grants permissions to the principal to access metadata in the Data Catalog +// and data organized in underlying data storage such as Amazon S3. +// +// For information about permissions, see Security and Access Control to Metadata +// and Data (https://docs-aws.amazon.com/michigan/latest/dg/security-data-access.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation GrantPermissions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/GrantPermissions +func (c *LakeFormation) GrantPermissions(input *GrantPermissionsInput) (*GrantPermissionsOutput, error) { + req, out := c.GrantPermissionsRequest(input) + return out, req.Send() +} + +// GrantPermissionsWithContext is the same as GrantPermissions with the addition of +// the ability to pass a context and additional request options. +// +// See GrantPermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) GrantPermissionsWithContext(ctx aws.Context, input *GrantPermissionsInput, opts ...request.Option) (*GrantPermissionsOutput, error) { + req, out := c.GrantPermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListPermissions = "ListPermissions" + +// ListPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the ListPermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPermissions for more information on using the ListPermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPermissionsRequest method. +// req, resp := client.ListPermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/ListPermissions +func (c *LakeFormation) ListPermissionsRequest(input *ListPermissionsInput) (req *request.Request, output *ListPermissionsOutput) { + op := &request.Operation{ + Name: opListPermissions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPermissionsInput{} + } + + output = &ListPermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPermissions API operation for AWS Lake Formation. +// +// Returns a list of the principal permissions on the resource, filtered by +// the permissions of the caller. For example, if you are granted an ALTER permission, +// you are able to see only the principal permissions for ALTER. +// +// This operation returns only those permissions that have been explicitly granted. +// +// For information about permissions, see Security and Access Control to Metadata +// and Data (https://docs-aws.amazon.com/michigan/latest/dg/security-data-access.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation ListPermissions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/ListPermissions +func (c *LakeFormation) ListPermissions(input *ListPermissionsInput) (*ListPermissionsOutput, error) { + req, out := c.ListPermissionsRequest(input) + return out, req.Send() +} + +// ListPermissionsWithContext is the same as ListPermissions with the addition of +// the ability to pass a context and additional request options. +// +// See ListPermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) ListPermissionsWithContext(ctx aws.Context, input *ListPermissionsInput, opts ...request.Option) (*ListPermissionsOutput, error) { + req, out := c.ListPermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPermissionsPages iterates over the pages of a ListPermissions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPermissions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPermissions operation. +// pageNum := 0 +// err := client.ListPermissionsPages(params, +// func(page *lakeformation.ListPermissionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *LakeFormation) ListPermissionsPages(input *ListPermissionsInput, fn func(*ListPermissionsOutput, bool) bool) error { + return c.ListPermissionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPermissionsPagesWithContext same as ListPermissionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) ListPermissionsPagesWithContext(ctx aws.Context, input *ListPermissionsInput, fn func(*ListPermissionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPermissionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPermissionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPermissionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListResources = "ListResources" + +// ListResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListResources operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListResources for more information on using the ListResources +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListResourcesRequest method. +// req, resp := client.ListResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/ListResources +func (c *LakeFormation) ListResourcesRequest(input *ListResourcesInput) (req *request.Request, output *ListResourcesOutput) { + op := &request.Operation{ + Name: opListResources, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListResourcesInput{} + } + + output = &ListResourcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResources API operation for AWS Lake Formation. +// +// Lists the resources registered to be managed by the Data Catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation ListResources for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/ListResources +func (c *LakeFormation) ListResources(input *ListResourcesInput) (*ListResourcesOutput, error) { + req, out := c.ListResourcesRequest(input) + return out, req.Send() +} + +// ListResourcesWithContext is the same as ListResources with the addition of +// the ability to pass a context and additional request options. +// +// See ListResources for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) ListResourcesWithContext(ctx aws.Context, input *ListResourcesInput, opts ...request.Option) (*ListResourcesOutput, error) { + req, out := c.ListResourcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListResourcesPages iterates over the pages of a ListResources operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResources method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResources operation. +// pageNum := 0 +// err := client.ListResourcesPages(params, +// func(page *lakeformation.ListResourcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *LakeFormation) ListResourcesPages(input *ListResourcesInput, fn func(*ListResourcesOutput, bool) bool) error { + return c.ListResourcesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListResourcesPagesWithContext same as ListResourcesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) ListResourcesPagesWithContext(ctx aws.Context, input *ListResourcesInput, fn func(*ListResourcesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListResourcesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListResourcesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListResourcesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutDataLakeSettings = "PutDataLakeSettings" + +// PutDataLakeSettingsRequest generates a "aws/request.Request" representing the +// client's request for the PutDataLakeSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDataLakeSettings for more information on using the PutDataLakeSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutDataLakeSettingsRequest method. +// req, resp := client.PutDataLakeSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/PutDataLakeSettings +func (c *LakeFormation) PutDataLakeSettingsRequest(input *PutDataLakeSettingsInput) (req *request.Request, output *PutDataLakeSettingsOutput) { + op := &request.Operation{ + Name: opPutDataLakeSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDataLakeSettingsInput{} + } + + output = &PutDataLakeSettingsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutDataLakeSettings API operation for AWS Lake Formation. +// +// The AWS Lake Formation principal. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation PutDataLakeSettings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/PutDataLakeSettings +func (c *LakeFormation) PutDataLakeSettings(input *PutDataLakeSettingsInput) (*PutDataLakeSettingsOutput, error) { + req, out := c.PutDataLakeSettingsRequest(input) + return out, req.Send() +} + +// PutDataLakeSettingsWithContext is the same as PutDataLakeSettings with the addition of +// the ability to pass a context and additional request options. +// +// See PutDataLakeSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) PutDataLakeSettingsWithContext(ctx aws.Context, input *PutDataLakeSettingsInput, opts ...request.Option) (*PutDataLakeSettingsOutput, error) { + req, out := c.PutDataLakeSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterResource = "RegisterResource" + +// RegisterResourceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterResource for more information on using the RegisterResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterResourceRequest method. +// req, resp := client.RegisterResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/RegisterResource +func (c *LakeFormation) RegisterResourceRequest(input *RegisterResourceInput) (req *request.Request, output *RegisterResourceOutput) { + op := &request.Operation{ + Name: opRegisterResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterResourceInput{} + } + + output = &RegisterResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RegisterResource API operation for AWS Lake Formation. +// +// Registers the resource as managed by the Data Catalog. +// +// To add or update data, Lake Formation needs read/write access to the chosen +// Amazon S3 path. Choose a role that you know has permission to do this, or +// choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. +// When you register the first Amazon S3 path, the service-linked role and a +// new inline policy are created on your behalf. Lake Formation adds the first +// path to the inline policy and attaches it to the service-linked role. When +// you register subsequent paths, Lake Formation adds the path to the existing +// policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation RegisterResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeAlreadyExistsException "AlreadyExistsException" +// A resource to be created or added already exists. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/RegisterResource +func (c *LakeFormation) RegisterResource(input *RegisterResourceInput) (*RegisterResourceOutput, error) { + req, out := c.RegisterResourceRequest(input) + return out, req.Send() +} + +// RegisterResourceWithContext is the same as RegisterResource with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) RegisterResourceWithContext(ctx aws.Context, input *RegisterResourceInput, opts ...request.Option) (*RegisterResourceOutput, error) { + req, out := c.RegisterResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRevokePermissions = "RevokePermissions" + +// RevokePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the RevokePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RevokePermissions for more information on using the RevokePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RevokePermissionsRequest method. +// req, resp := client.RevokePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/RevokePermissions +func (c *LakeFormation) RevokePermissionsRequest(input *RevokePermissionsInput) (req *request.Request, output *RevokePermissionsOutput) { + op := &request.Operation{ + Name: opRevokePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokePermissionsInput{} + } + + output = &RevokePermissionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RevokePermissions API operation for AWS Lake Formation. +// +// Revokes permissions to the principal to access metadata in the Data Catalog +// and data organized in underlying data storage such as Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation RevokePermissions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Two processes are trying to modify a resource simultaneously. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/RevokePermissions +func (c *LakeFormation) RevokePermissions(input *RevokePermissionsInput) (*RevokePermissionsOutput, error) { + req, out := c.RevokePermissionsRequest(input) + return out, req.Send() +} + +// RevokePermissionsWithContext is the same as RevokePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See RevokePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) RevokePermissionsWithContext(ctx aws.Context, input *RevokePermissionsInput, opts ...request.Option) (*RevokePermissionsOutput, error) { + req, out := c.RevokePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateResource = "UpdateResource" + +// UpdateResourceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateResource for more information on using the UpdateResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateResourceRequest method. +// req, resp := client.UpdateResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/UpdateResource +func (c *LakeFormation) UpdateResourceRequest(input *UpdateResourceInput) (req *request.Request, output *UpdateResourceOutput) { + op := &request.Operation{ + Name: opUpdateResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateResourceInput{} + } + + output = &UpdateResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateResource API operation for AWS Lake Formation. +// +// Updates the data access role used for vending access to the given (registered) +// resource in AWS Lake Formation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lake Formation's +// API operation UpdateResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// The input provided was not valid. +// +// * ErrCodeInternalServiceException "InternalServiceException" +// An internal service error occurred. +// +// * ErrCodeOperationTimeoutException "OperationTimeoutException" +// The operation timed out. +// +// * ErrCodeEntityNotFoundException "EntityNotFoundException" +// A specified entity does not exist +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/UpdateResource +func (c *LakeFormation) UpdateResource(input *UpdateResourceInput) (*UpdateResourceOutput, error) { + req, out := c.UpdateResourceRequest(input) + return out, req.Send() +} + +// UpdateResourceWithContext is the same as UpdateResource with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LakeFormation) UpdateResourceWithContext(ctx aws.Context, input *UpdateResourceInput, opts ...request.Option) (*UpdateResourceOutput, error) { + req, out := c.UpdateResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type BatchGrantPermissionsInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Data Catalog. By default, the account ID. The Data + // Catalog is the persistent metadata store. It contains database definitions, + // table definitions, and other control information to manage your AWS Lake + // Formation environment. + CatalogId *string `min:"1" type:"string"` + + // A list of up to 20 entries for resource permissions to be granted by batch + // operation to the principal. + // + // Entries is a required field + Entries []*BatchPermissionsRequestEntry `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGrantPermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGrantPermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGrantPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGrantPermissionsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchGrantPermissionsInput) SetCatalogId(v string) *BatchGrantPermissionsInput { + s.CatalogId = &v + return s +} + +// SetEntries sets the Entries field's value. +func (s *BatchGrantPermissionsInput) SetEntries(v []*BatchPermissionsRequestEntry) *BatchGrantPermissionsInput { + s.Entries = v + return s +} + +type BatchGrantPermissionsOutput struct { + _ struct{} `type:"structure"` + + // A list of failures to grant permissions to the resources. + Failures []*BatchPermissionsFailureEntry `type:"list"` +} + +// String returns the string representation +func (s BatchGrantPermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGrantPermissionsOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *BatchGrantPermissionsOutput) SetFailures(v []*BatchPermissionsFailureEntry) *BatchGrantPermissionsOutput { + s.Failures = v + return s +} + +// A list of failures when performing a batch grant or batch revoke operation. +type BatchPermissionsFailureEntry struct { + _ struct{} `type:"structure"` + + // An error message that applies to the failure of the entry. + Error *ErrorDetail `type:"structure"` + + // An identifier for an entry of the batch request. + RequestEntry *BatchPermissionsRequestEntry `type:"structure"` +} + +// String returns the string representation +func (s BatchPermissionsFailureEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPermissionsFailureEntry) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *BatchPermissionsFailureEntry) SetError(v *ErrorDetail) *BatchPermissionsFailureEntry { + s.Error = v + return s +} + +// SetRequestEntry sets the RequestEntry field's value. +func (s *BatchPermissionsFailureEntry) SetRequestEntry(v *BatchPermissionsRequestEntry) *BatchPermissionsFailureEntry { + s.RequestEntry = v + return s +} + +// A permission to a resource granted by batch operation to the principal. +type BatchPermissionsRequestEntry struct { + _ struct{} `type:"structure"` + + // A unique identifier for the batch permissions request entry. + // + // Id is a required field + Id *string `min:"1" type:"string" required:"true"` + + // The permissions to be granted. + Permissions []*string `type:"list"` + + // Indicates if the option to pass permissions is granted. + PermissionsWithGrantOption []*string `type:"list"` + + // The principal to be granted a permission. + Principal *DataLakePrincipal `type:"structure"` + + // The resource to which the principal is to be granted a permission. + Resource *Resource `type:"structure"` +} + +// String returns the string representation +func (s BatchPermissionsRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPermissionsRequestEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchPermissionsRequestEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchPermissionsRequestEntry"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.Principal != nil { + if err := s.Principal.Validate(); err != nil { + invalidParams.AddNested("Principal", err.(request.ErrInvalidParams)) + } + } + if s.Resource != nil { + if err := s.Resource.Validate(); err != nil { + invalidParams.AddNested("Resource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *BatchPermissionsRequestEntry) SetId(v string) *BatchPermissionsRequestEntry { + s.Id = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *BatchPermissionsRequestEntry) SetPermissions(v []*string) *BatchPermissionsRequestEntry { + s.Permissions = v + return s +} + +// SetPermissionsWithGrantOption sets the PermissionsWithGrantOption field's value. +func (s *BatchPermissionsRequestEntry) SetPermissionsWithGrantOption(v []*string) *BatchPermissionsRequestEntry { + s.PermissionsWithGrantOption = v + return s +} + +// SetPrincipal sets the Principal field's value. +func (s *BatchPermissionsRequestEntry) SetPrincipal(v *DataLakePrincipal) *BatchPermissionsRequestEntry { + s.Principal = v + return s +} + +// SetResource sets the Resource field's value. +func (s *BatchPermissionsRequestEntry) SetResource(v *Resource) *BatchPermissionsRequestEntry { + s.Resource = v + return s +} + +type BatchRevokePermissionsInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Data Catalog. By default, the account ID. The Data + // Catalog is the persistent metadata store. It contains database definitions, + // table definitions, and other control information to manage your AWS Lake + // Formation environment. + CatalogId *string `min:"1" type:"string"` + + // A list of up to 20 entries for resource permissions to be revoked by batch + // operation to the principal. + // + // Entries is a required field + Entries []*BatchPermissionsRequestEntry `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchRevokePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchRevokePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchRevokePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchRevokePermissionsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchRevokePermissionsInput) SetCatalogId(v string) *BatchRevokePermissionsInput { + s.CatalogId = &v + return s +} + +// SetEntries sets the Entries field's value. +func (s *BatchRevokePermissionsInput) SetEntries(v []*BatchPermissionsRequestEntry) *BatchRevokePermissionsInput { + s.Entries = v + return s +} + +type BatchRevokePermissionsOutput struct { + _ struct{} `type:"structure"` + + // A list of failures to revoke permissions to the resources. + Failures []*BatchPermissionsFailureEntry `type:"list"` +} + +// String returns the string representation +func (s BatchRevokePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchRevokePermissionsOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *BatchRevokePermissionsOutput) SetFailures(v []*BatchPermissionsFailureEntry) *BatchRevokePermissionsOutput { + s.Failures = v + return s +} + +// A structure for the catalog object. +type CatalogResource struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CatalogResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CatalogResource) GoString() string { + return s.String() +} + +// A wildcard object, consisting of an optional list of excluded column names +// or indexes. +type ColumnWildcard struct { + _ struct{} `type:"structure"` + + // Excludes column names. Any column with this name will be excluded. + ExcludedColumnNames []*string `type:"list"` +} + +// String returns the string representation +func (s ColumnWildcard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnWildcard) GoString() string { + return s.String() +} + +// SetExcludedColumnNames sets the ExcludedColumnNames field's value. +func (s *ColumnWildcard) SetExcludedColumnNames(v []*string) *ColumnWildcard { + s.ExcludedColumnNames = v + return s +} + +// The AWS Lake Formation principal. +type DataLakePrincipal struct { + _ struct{} `type:"structure"` + + // An identifier for the AWS Lake Formation principal. + DataLakePrincipalIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DataLakePrincipal) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataLakePrincipal) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataLakePrincipal) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataLakePrincipal"} + if s.DataLakePrincipalIdentifier != nil && len(*s.DataLakePrincipalIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataLakePrincipalIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataLakePrincipalIdentifier sets the DataLakePrincipalIdentifier field's value. +func (s *DataLakePrincipal) SetDataLakePrincipalIdentifier(v string) *DataLakePrincipal { + s.DataLakePrincipalIdentifier = &v + return s +} + +// The AWS Lake Formation principal. +type DataLakeSettings struct { + _ struct{} `type:"structure"` + + // A list of up to three principal permissions entries for default create database + // permissions. + CreateDatabaseDefaultPermissions []*PrincipalPermissions `type:"list"` + + // A list of up to three principal permissions entries for default create table + // permissions. + CreateTableDefaultPermissions []*PrincipalPermissions `type:"list"` + + // A list of AWS Lake Formation principals. + DataLakeAdmins []*DataLakePrincipal `type:"list"` +} + +// String returns the string representation +func (s DataLakeSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataLakeSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataLakeSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataLakeSettings"} + if s.CreateDatabaseDefaultPermissions != nil { + for i, v := range s.CreateDatabaseDefaultPermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CreateDatabaseDefaultPermissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.CreateTableDefaultPermissions != nil { + for i, v := range s.CreateTableDefaultPermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CreateTableDefaultPermissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.DataLakeAdmins != nil { + for i, v := range s.DataLakeAdmins { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataLakeAdmins", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreateDatabaseDefaultPermissions sets the CreateDatabaseDefaultPermissions field's value. +func (s *DataLakeSettings) SetCreateDatabaseDefaultPermissions(v []*PrincipalPermissions) *DataLakeSettings { + s.CreateDatabaseDefaultPermissions = v + return s +} + +// SetCreateTableDefaultPermissions sets the CreateTableDefaultPermissions field's value. +func (s *DataLakeSettings) SetCreateTableDefaultPermissions(v []*PrincipalPermissions) *DataLakeSettings { + s.CreateTableDefaultPermissions = v + return s +} + +// SetDataLakeAdmins sets the DataLakeAdmins field's value. +func (s *DataLakeSettings) SetDataLakeAdmins(v []*DataLakePrincipal) *DataLakeSettings { + s.DataLakeAdmins = v + return s +} + +// A structure for a data location object where permissions are granted or revoked. +type DataLocationResource struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the data location + // resource. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DataLocationResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataLocationResource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataLocationResource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataLocationResource"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *DataLocationResource) SetResourceArn(v string) *DataLocationResource { + s.ResourceArn = &v + return s +} + +// A structure for the database object. +type DatabaseResource struct { + _ struct{} `type:"structure"` + + // The name of the database resource. Unique to the Data Catalog. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DatabaseResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatabaseResource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatabaseResource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatabaseResource"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DatabaseResource) SetName(v string) *DatabaseResource { + s.Name = &v + return s +} + +type DeregisterResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you want to deregister. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *DeregisterResourceInput) SetResourceArn(v string) *DeregisterResourceInput { + s.ResourceArn = &v + return s +} + +type DeregisterResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterResourceOutput) GoString() string { + return s.String() +} + +type DescribeResourceInput struct { + _ struct{} `type:"structure"` + + // The resource ARN. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *DescribeResourceInput) SetResourceArn(v string) *DescribeResourceInput { + s.ResourceArn = &v + return s +} + +type DescribeResourceOutput struct { + _ struct{} `type:"structure"` + + // A structure containing information about an AWS Lake Formation resource. + ResourceInfo *ResourceInfo `type:"structure"` +} + +// String returns the string representation +func (s DescribeResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResourceOutput) GoString() string { + return s.String() +} + +// SetResourceInfo sets the ResourceInfo field's value. +func (s *DescribeResourceOutput) SetResourceInfo(v *ResourceInfo) *DescribeResourceOutput { + s.ResourceInfo = v + return s +} + +// Contains details about an error. +type ErrorDetail struct { + _ struct{} `type:"structure"` + + // The code associated with this error. + ErrorCode *string `min:"1" type:"string"` + + // A message describing the error. + ErrorMessage *string `type:"string"` +} + +// String returns the string representation +func (s ErrorDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDetail) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *ErrorDetail) SetErrorCode(v string) *ErrorDetail { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *ErrorDetail) SetErrorMessage(v string) *ErrorDetail { + s.ErrorMessage = &v + return s +} + +// This structure describes the filtering of columns in a table based on a filter +// condition. +type FilterCondition struct { + _ struct{} `type:"structure"` + + // The comparison operator used in the filter condition. + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // The field to filter in the filter condition. + Field *string `type:"string" enum:"FieldNameString"` + + // A string with values used in evaluating the filter condition. + StringValueList []*string `type:"list"` +} + +// String returns the string representation +func (s FilterCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterCondition) GoString() string { + return s.String() +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *FilterCondition) SetComparisonOperator(v string) *FilterCondition { + s.ComparisonOperator = &v + return s +} + +// SetField sets the Field field's value. +func (s *FilterCondition) SetField(v string) *FilterCondition { + s.Field = &v + return s +} + +// SetStringValueList sets the StringValueList field's value. +func (s *FilterCondition) SetStringValueList(v []*string) *FilterCondition { + s.StringValueList = v + return s +} + +type GetDataLakeSettingsInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Data Catalog. By default, the account ID. The Data + // Catalog is the persistent metadata store. It contains database definitions, + // table definitions, and other control information to manage your AWS Lake + // Formation environment. + CatalogId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetDataLakeSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataLakeSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDataLakeSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDataLakeSettingsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *GetDataLakeSettingsInput) SetCatalogId(v string) *GetDataLakeSettingsInput { + s.CatalogId = &v + return s +} + +type GetDataLakeSettingsOutput struct { + _ struct{} `type:"structure"` + + // A list of AWS Lake Formation principals. + DataLakeSettings *DataLakeSettings `type:"structure"` +} + +// String returns the string representation +func (s GetDataLakeSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataLakeSettingsOutput) GoString() string { + return s.String() +} + +// SetDataLakeSettings sets the DataLakeSettings field's value. +func (s *GetDataLakeSettingsOutput) SetDataLakeSettings(v *DataLakeSettings) *GetDataLakeSettingsOutput { + s.DataLakeSettings = v + return s +} + +type GetEffectivePermissionsForPathInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Data Catalog. By default, the account ID. The Data + // Catalog is the persistent metadata store. It contains database definitions, + // table definitions, and other control information to manage your AWS Lake + // Formation environment. + CatalogId *string `min:"1" type:"string"` + + // The maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is not the first call to retrieve this list. + NextToken *string `type:"string"` + + // The Amazon Resource Name (ARN) of the resource for which you want to get + // permissions. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEffectivePermissionsForPathInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEffectivePermissionsForPathInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetEffectivePermissionsForPathInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetEffectivePermissionsForPathInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *GetEffectivePermissionsForPathInput) SetCatalogId(v string) *GetEffectivePermissionsForPathInput { + s.CatalogId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetEffectivePermissionsForPathInput) SetMaxResults(v int64) *GetEffectivePermissionsForPathInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetEffectivePermissionsForPathInput) SetNextToken(v string) *GetEffectivePermissionsForPathInput { + s.NextToken = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetEffectivePermissionsForPathInput) SetResourceArn(v string) *GetEffectivePermissionsForPathInput { + s.ResourceArn = &v + return s +} + +type GetEffectivePermissionsForPathOutput struct { + _ struct{} `type:"structure"` + + // A continuation token, if this is not the first call to retrieve this list. + NextToken *string `type:"string"` + + // A list of the permissions for the specified table or database resource located + // at the path in Amazon S3. + Permissions []*PrincipalResourcePermissions `type:"list"` +} + +// String returns the string representation +func (s GetEffectivePermissionsForPathOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEffectivePermissionsForPathOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetEffectivePermissionsForPathOutput) SetNextToken(v string) *GetEffectivePermissionsForPathOutput { + s.NextToken = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *GetEffectivePermissionsForPathOutput) SetPermissions(v []*PrincipalResourcePermissions) *GetEffectivePermissionsForPathOutput { + s.Permissions = v + return s +} + +type GrantPermissionsInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Data Catalog. By default, the account ID. The Data + // Catalog is the persistent metadata store. It contains database definitions, + // table definitions, and other control information to manage your AWS Lake + // Formation environment. + CatalogId *string `min:"1" type:"string"` + + // The permissions granted to the principal on the resource. AWS Lake Formation + // defines privileges to grant and revoke access to metadata in the Data Catalog + // and data organized in underlying data storage such as Amazon S3. AWS Lake + // Formation requires that each principal be authorized to perform a specific + // task on AWS Lake Formation resources. + // + // Permissions is a required field + Permissions []*string `type:"list" required:"true"` + + // Indicates a list of the granted permissions that the principal may pass to + // other users. These permissions may only be a subset of the permissions granted + // in the Privileges. + PermissionsWithGrantOption []*string `type:"list"` + + // The principal to be granted the permissions on the resource. Supported principals + // are IAM users or IAM roles, and they are defined by their principal type + // and their ARN. + // + // Note that if you define a resource with a particular ARN, then later delete, + // and recreate a resource with that same ARN, the resource maintains the permissions + // already granted. + // + // Principal is a required field + Principal *DataLakePrincipal `type:"structure" required:"true"` + + // The resource to which permissions are to be granted. Resources in AWS Lake + // Formation are the Data Catalog, databases, and tables. + // + // Resource is a required field + Resource *Resource `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GrantPermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantPermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrantPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrantPermissionsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.Permissions == nil { + invalidParams.Add(request.NewErrParamRequired("Permissions")) + } + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) + } + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.Principal != nil { + if err := s.Principal.Validate(); err != nil { + invalidParams.AddNested("Principal", err.(request.ErrInvalidParams)) + } + } + if s.Resource != nil { + if err := s.Resource.Validate(); err != nil { + invalidParams.AddNested("Resource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *GrantPermissionsInput) SetCatalogId(v string) *GrantPermissionsInput { + s.CatalogId = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *GrantPermissionsInput) SetPermissions(v []*string) *GrantPermissionsInput { + s.Permissions = v + return s +} + +// SetPermissionsWithGrantOption sets the PermissionsWithGrantOption field's value. +func (s *GrantPermissionsInput) SetPermissionsWithGrantOption(v []*string) *GrantPermissionsInput { + s.PermissionsWithGrantOption = v + return s +} + +// SetPrincipal sets the Principal field's value. +func (s *GrantPermissionsInput) SetPrincipal(v *DataLakePrincipal) *GrantPermissionsInput { + s.Principal = v + return s +} + +// SetResource sets the Resource field's value. +func (s *GrantPermissionsInput) SetResource(v *Resource) *GrantPermissionsInput { + s.Resource = v + return s +} + +type GrantPermissionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GrantPermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantPermissionsOutput) GoString() string { + return s.String() +} + +type ListPermissionsInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Data Catalog. By default, the account ID. The Data + // Catalog is the persistent metadata store. It contains database definitions, + // table definitions, and other control information to manage your AWS Lake + // Formation environment. + CatalogId *string `min:"1" type:"string"` + + // The maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is not the first call to retrieve this list. + NextToken *string `type:"string"` + + // Specifies a principal to filter the permissions returned. + Principal *DataLakePrincipal `type:"structure"` + + // A resource where you will get a list of the principal permissions. + // + // This operation does not support getting privileges on a table with columns. + // Instead, call this operation on the table, and the operation returns the + // table and the table w columns. + Resource *Resource `type:"structure"` + + // Specifies a resource type to filter the permissions returned. + ResourceType *string `type:"string" enum:"DataLakeResourceType"` +} + +// String returns the string representation +func (s ListPermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPermissionsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Principal != nil { + if err := s.Principal.Validate(); err != nil { + invalidParams.AddNested("Principal", err.(request.ErrInvalidParams)) + } + } + if s.Resource != nil { + if err := s.Resource.Validate(); err != nil { + invalidParams.AddNested("Resource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *ListPermissionsInput) SetCatalogId(v string) *ListPermissionsInput { + s.CatalogId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListPermissionsInput) SetMaxResults(v int64) *ListPermissionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPermissionsInput) SetNextToken(v string) *ListPermissionsInput { + s.NextToken = &v + return s +} + +// SetPrincipal sets the Principal field's value. +func (s *ListPermissionsInput) SetPrincipal(v *DataLakePrincipal) *ListPermissionsInput { + s.Principal = v + return s +} + +// SetResource sets the Resource field's value. +func (s *ListPermissionsInput) SetResource(v *Resource) *ListPermissionsInput { + s.Resource = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ListPermissionsInput) SetResourceType(v string) *ListPermissionsInput { + s.ResourceType = &v + return s +} + +type ListPermissionsOutput struct { + _ struct{} `type:"structure"` + + // A continuation token, if this is not the first call to retrieve this list. + NextToken *string `type:"string"` + + // A list of principals and their permissions on the resource for the specified + // principal and resource types. + PrincipalResourcePermissions []*PrincipalResourcePermissions `type:"list"` +} + +// String returns the string representation +func (s ListPermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPermissionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPermissionsOutput) SetNextToken(v string) *ListPermissionsOutput { + s.NextToken = &v + return s +} + +// SetPrincipalResourcePermissions sets the PrincipalResourcePermissions field's value. +func (s *ListPermissionsOutput) SetPrincipalResourcePermissions(v []*PrincipalResourcePermissions) *ListPermissionsOutput { + s.PrincipalResourcePermissions = v + return s +} + +type ListResourcesInput struct { + _ struct{} `type:"structure"` + + // Any applicable row-level and/or column-level filtering conditions for the + // resources. + FilterConditionList []*FilterCondition `min:"1" type:"list"` + + // The maximum number of resource results. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is not the first call to retrieve these resources. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResourcesInput"} + if s.FilterConditionList != nil && len(s.FilterConditionList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterConditionList", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterConditionList sets the FilterConditionList field's value. +func (s *ListResourcesInput) SetFilterConditionList(v []*FilterCondition) *ListResourcesInput { + s.FilterConditionList = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListResourcesInput) SetMaxResults(v int64) *ListResourcesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResourcesInput) SetNextToken(v string) *ListResourcesInput { + s.NextToken = &v + return s +} + +type ListResourcesOutput struct { + _ struct{} `type:"structure"` + + // A continuation token, if this is not the first call to retrieve these resources. + NextToken *string `type:"string"` + + // A summary of the data lake resources. + ResourceInfoList []*ResourceInfo `type:"list"` +} + +// String returns the string representation +func (s ListResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourcesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResourcesOutput) SetNextToken(v string) *ListResourcesOutput { + s.NextToken = &v + return s +} + +// SetResourceInfoList sets the ResourceInfoList field's value. +func (s *ListResourcesOutput) SetResourceInfoList(v []*ResourceInfo) *ListResourcesOutput { + s.ResourceInfoList = v + return s +} + +// Permissions granted to a principal. +type PrincipalPermissions struct { + _ struct{} `type:"structure"` + + // The permissions that are granted to the principal. + Permissions []*string `type:"list"` + + // The principal who is granted permissions. + Principal *DataLakePrincipal `type:"structure"` +} + +// String returns the string representation +func (s PrincipalPermissions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrincipalPermissions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PrincipalPermissions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PrincipalPermissions"} + if s.Principal != nil { + if err := s.Principal.Validate(); err != nil { + invalidParams.AddNested("Principal", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPermissions sets the Permissions field's value. +func (s *PrincipalPermissions) SetPermissions(v []*string) *PrincipalPermissions { + s.Permissions = v + return s +} + +// SetPrincipal sets the Principal field's value. +func (s *PrincipalPermissions) SetPrincipal(v *DataLakePrincipal) *PrincipalPermissions { + s.Principal = v + return s +} + +// The permissions granted or revoked on a resource. +type PrincipalResourcePermissions struct { + _ struct{} `type:"structure"` + + // The permissions to be granted or revoked on the resource. + Permissions []*string `type:"list"` + + // Indicates whether to grant the ability to grant permissions (as a subset + // of permissions granted). + PermissionsWithGrantOption []*string `type:"list"` + + // The Data Lake principal to be granted or revoked permissions. + Principal *DataLakePrincipal `type:"structure"` + + // The resource where permissions are to be granted or revoked. + Resource *Resource `type:"structure"` +} + +// String returns the string representation +func (s PrincipalResourcePermissions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrincipalResourcePermissions) GoString() string { + return s.String() +} + +// SetPermissions sets the Permissions field's value. +func (s *PrincipalResourcePermissions) SetPermissions(v []*string) *PrincipalResourcePermissions { + s.Permissions = v + return s +} + +// SetPermissionsWithGrantOption sets the PermissionsWithGrantOption field's value. +func (s *PrincipalResourcePermissions) SetPermissionsWithGrantOption(v []*string) *PrincipalResourcePermissions { + s.PermissionsWithGrantOption = v + return s +} + +// SetPrincipal sets the Principal field's value. +func (s *PrincipalResourcePermissions) SetPrincipal(v *DataLakePrincipal) *PrincipalResourcePermissions { + s.Principal = v + return s +} + +// SetResource sets the Resource field's value. +func (s *PrincipalResourcePermissions) SetResource(v *Resource) *PrincipalResourcePermissions { + s.Resource = v + return s +} + +type PutDataLakeSettingsInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Data Catalog. By default, the account ID. The Data + // Catalog is the persistent metadata store. It contains database definitions, + // table definitions, and other control information to manage your AWS Lake + // Formation environment. + CatalogId *string `min:"1" type:"string"` + + // A list of AWS Lake Formation principals. + // + // DataLakeSettings is a required field + DataLakeSettings *DataLakeSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutDataLakeSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDataLakeSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDataLakeSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDataLakeSettingsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DataLakeSettings == nil { + invalidParams.Add(request.NewErrParamRequired("DataLakeSettings")) + } + if s.DataLakeSettings != nil { + if err := s.DataLakeSettings.Validate(); err != nil { + invalidParams.AddNested("DataLakeSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *PutDataLakeSettingsInput) SetCatalogId(v string) *PutDataLakeSettingsInput { + s.CatalogId = &v + return s +} + +// SetDataLakeSettings sets the DataLakeSettings field's value. +func (s *PutDataLakeSettingsInput) SetDataLakeSettings(v *DataLakeSettings) *PutDataLakeSettingsInput { + s.DataLakeSettings = v + return s +} + +type PutDataLakeSettingsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDataLakeSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDataLakeSettingsOutput) GoString() string { + return s.String() +} + +type RegisterResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you want to register. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // The identifier for the role. + RoleArn *string `type:"string"` + + // Designates a trusted caller, an IAM principal, by registering this caller + // with the Data Catalog. + UseServiceLinkedRole *bool `type:"boolean"` +} + +// String returns the string representation +func (s RegisterResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *RegisterResourceInput) SetResourceArn(v string) *RegisterResourceInput { + s.ResourceArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *RegisterResourceInput) SetRoleArn(v string) *RegisterResourceInput { + s.RoleArn = &v + return s +} + +// SetUseServiceLinkedRole sets the UseServiceLinkedRole field's value. +func (s *RegisterResourceInput) SetUseServiceLinkedRole(v bool) *RegisterResourceInput { + s.UseServiceLinkedRole = &v + return s +} + +type RegisterResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterResourceOutput) GoString() string { + return s.String() +} + +// A structure for the resource. +type Resource struct { + _ struct{} `type:"structure"` + + // The identifier for the Data Catalog. By default, the account ID. The Data + // Catalog is the persistent metadata store. It contains database definitions, + // table definitions, and other control information to manage your AWS Lake + // Formation environment. + Catalog *CatalogResource `type:"structure"` + + // The location of an Amazon S3 path where permissions are granted or revoked. + DataLocation *DataLocationResource `type:"structure"` + + // The database for the resource. Unique to the Data Catalog. A database is + // a set of associated table definitions organized into a logical group. You + // can Grant and Revoke database permissions to a principal. + Database *DatabaseResource `type:"structure"` + + // The table for the resource. A table is a metadata definition that represents + // your data. You can Grant and Revoke table privileges to a principal. + Table *TableResource `type:"structure"` + + // The table with columns for the resource. A principal with permissions to + // this resource can select metadata from the columns of a table in the Data + // Catalog and the underlying data in Amazon S3. + TableWithColumns *TableWithColumnsResource `type:"structure"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Resource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Resource"} + if s.DataLocation != nil { + if err := s.DataLocation.Validate(); err != nil { + invalidParams.AddNested("DataLocation", err.(request.ErrInvalidParams)) + } + } + if s.Database != nil { + if err := s.Database.Validate(); err != nil { + invalidParams.AddNested("Database", err.(request.ErrInvalidParams)) + } + } + if s.Table != nil { + if err := s.Table.Validate(); err != nil { + invalidParams.AddNested("Table", err.(request.ErrInvalidParams)) + } + } + if s.TableWithColumns != nil { + if err := s.TableWithColumns.Validate(); err != nil { + invalidParams.AddNested("TableWithColumns", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalog sets the Catalog field's value. +func (s *Resource) SetCatalog(v *CatalogResource) *Resource { + s.Catalog = v + return s +} + +// SetDataLocation sets the DataLocation field's value. +func (s *Resource) SetDataLocation(v *DataLocationResource) *Resource { + s.DataLocation = v + return s +} + +// SetDatabase sets the Database field's value. +func (s *Resource) SetDatabase(v *DatabaseResource) *Resource { + s.Database = v + return s +} + +// SetTable sets the Table field's value. +func (s *Resource) SetTable(v *TableResource) *Resource { + s.Table = v + return s +} + +// SetTableWithColumns sets the TableWithColumns field's value. +func (s *Resource) SetTableWithColumns(v *TableWithColumnsResource) *Resource { + s.TableWithColumns = v + return s +} + +// A structure containing information about an AWS Lake Formation resource. +type ResourceInfo struct { + _ struct{} `type:"structure"` + + // The date and time the resource was last modified. + LastModified *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the resource. + ResourceArn *string `type:"string"` + + // The IAM role that registered a resource. + RoleArn *string `type:"string"` +} + +// String returns the string representation +func (s ResourceInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceInfo) GoString() string { + return s.String() +} + +// SetLastModified sets the LastModified field's value. +func (s *ResourceInfo) SetLastModified(v time.Time) *ResourceInfo { + s.LastModified = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ResourceInfo) SetResourceArn(v string) *ResourceInfo { + s.ResourceArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *ResourceInfo) SetRoleArn(v string) *ResourceInfo { + s.RoleArn = &v + return s +} + +type RevokePermissionsInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Data Catalog. By default, the account ID. The Data + // Catalog is the persistent metadata store. It contains database definitions, + // table definitions, and other control information to manage your AWS Lake + // Formation environment. + CatalogId *string `min:"1" type:"string"` + + // The permissions revoked to the principal on the resource. For information + // about permissions, see Security and Access Control to Metadata and Data (https://docs-aws.amazon.com/michigan/latest/dg/security-data-access.html). + // + // Permissions is a required field + Permissions []*string `type:"list" required:"true"` + + // Indicates a list of permissions for which to revoke the grant option allowing + // the principal to pass permissions to other principals. + PermissionsWithGrantOption []*string `type:"list"` + + // The principal to be revoked permissions on the resource. + // + // Principal is a required field + Principal *DataLakePrincipal `type:"structure" required:"true"` + + // The resource to which permissions are to be revoked. + // + // Resource is a required field + Resource *Resource `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RevokePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RevokePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RevokePermissionsInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.Permissions == nil { + invalidParams.Add(request.NewErrParamRequired("Permissions")) + } + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) + } + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.Principal != nil { + if err := s.Principal.Validate(); err != nil { + invalidParams.AddNested("Principal", err.(request.ErrInvalidParams)) + } + } + if s.Resource != nil { + if err := s.Resource.Validate(); err != nil { + invalidParams.AddNested("Resource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *RevokePermissionsInput) SetCatalogId(v string) *RevokePermissionsInput { + s.CatalogId = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *RevokePermissionsInput) SetPermissions(v []*string) *RevokePermissionsInput { + s.Permissions = v + return s +} + +// SetPermissionsWithGrantOption sets the PermissionsWithGrantOption field's value. +func (s *RevokePermissionsInput) SetPermissionsWithGrantOption(v []*string) *RevokePermissionsInput { + s.PermissionsWithGrantOption = v + return s +} + +// SetPrincipal sets the Principal field's value. +func (s *RevokePermissionsInput) SetPrincipal(v *DataLakePrincipal) *RevokePermissionsInput { + s.Principal = v + return s +} + +// SetResource sets the Resource field's value. +func (s *RevokePermissionsInput) SetResource(v *Resource) *RevokePermissionsInput { + s.Resource = v + return s +} + +type RevokePermissionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RevokePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokePermissionsOutput) GoString() string { + return s.String() +} + +// A structure for the table object. A table is a metadata definition that represents +// your data. You can Grant and Revoke table privileges to a principal. +type TableResource struct { + _ struct{} `type:"structure"` + + // The name of the database for the table. Unique to a Data Catalog. A database + // is a set of associated table definitions organized into a logical group. + // You can Grant and Revoke database privileges to a principal. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the table. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TableResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableResource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TableResource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TableResource"} + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *TableResource) SetDatabaseName(v string) *TableResource { + s.DatabaseName = &v + return s +} + +// SetName sets the Name field's value. +func (s *TableResource) SetName(v string) *TableResource { + s.Name = &v + return s +} + +// A structure for a table with columns object. This object is only used when +// granting a SELECT permission. +// +// This object must take a value for at least one of ColumnsNames, ColumnsIndexes, +// or ColumnsWildcard. +type TableWithColumnsResource struct { + _ struct{} `type:"structure"` + + // The list of column names for the table. At least one of ColumnNames or ColumnWildcard + // is required. + ColumnNames []*string `type:"list"` + + // A wildcard specified by a ColumnWildcard object. At least one of ColumnNames + // or ColumnWildcard is required. + ColumnWildcard *ColumnWildcard `type:"structure"` + + // The name of the database for the table with columns resource. Unique to the + // Data Catalog. A database is a set of associated table definitions organized + // into a logical group. You can Grant and Revoke database privileges to a principal. + DatabaseName *string `min:"1" type:"string"` + + // The name of the table resource. A table is a metadata definition that represents + // your data. You can Grant and Revoke table privileges to a principal. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TableWithColumnsResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableWithColumnsResource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TableWithColumnsResource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TableWithColumnsResource"} + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumnNames sets the ColumnNames field's value. +func (s *TableWithColumnsResource) SetColumnNames(v []*string) *TableWithColumnsResource { + s.ColumnNames = v + return s +} + +// SetColumnWildcard sets the ColumnWildcard field's value. +func (s *TableWithColumnsResource) SetColumnWildcard(v *ColumnWildcard) *TableWithColumnsResource { + s.ColumnWildcard = v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *TableWithColumnsResource) SetDatabaseName(v string) *TableWithColumnsResource { + s.DatabaseName = &v + return s +} + +// SetName sets the Name field's value. +func (s *TableWithColumnsResource) SetName(v string) *TableWithColumnsResource { + s.Name = &v + return s +} + +type UpdateResourceInput struct { + _ struct{} `type:"structure"` + + // The resource ARN. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // The new role to use for the given resource registered in AWS Lake Formation. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UpdateResourceInput) SetResourceArn(v string) *UpdateResourceInput { + s.ResourceArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateResourceInput) SetRoleArn(v string) *UpdateResourceInput { + s.RoleArn = &v + return s +} + +type UpdateResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateResourceOutput) GoString() string { + return s.String() +} + +const ( + // ComparisonOperatorEq is a ComparisonOperator enum value + ComparisonOperatorEq = "EQ" + + // ComparisonOperatorNe is a ComparisonOperator enum value + ComparisonOperatorNe = "NE" + + // ComparisonOperatorLe is a ComparisonOperator enum value + ComparisonOperatorLe = "LE" + + // ComparisonOperatorLt is a ComparisonOperator enum value + ComparisonOperatorLt = "LT" + + // ComparisonOperatorGe is a ComparisonOperator enum value + ComparisonOperatorGe = "GE" + + // ComparisonOperatorGt is a ComparisonOperator enum value + ComparisonOperatorGt = "GT" + + // ComparisonOperatorContains is a ComparisonOperator enum value + ComparisonOperatorContains = "CONTAINS" + + // ComparisonOperatorNotContains is a ComparisonOperator enum value + ComparisonOperatorNotContains = "NOT_CONTAINS" + + // ComparisonOperatorBeginsWith is a ComparisonOperator enum value + ComparisonOperatorBeginsWith = "BEGINS_WITH" + + // ComparisonOperatorIn is a ComparisonOperator enum value + ComparisonOperatorIn = "IN" + + // ComparisonOperatorBetween is a ComparisonOperator enum value + ComparisonOperatorBetween = "BETWEEN" +) + +const ( + // DataLakeResourceTypeCatalog is a DataLakeResourceType enum value + DataLakeResourceTypeCatalog = "CATALOG" + + // DataLakeResourceTypeDatabase is a DataLakeResourceType enum value + DataLakeResourceTypeDatabase = "DATABASE" + + // DataLakeResourceTypeTable is a DataLakeResourceType enum value + DataLakeResourceTypeTable = "TABLE" + + // DataLakeResourceTypeDataLocation is a DataLakeResourceType enum value + DataLakeResourceTypeDataLocation = "DATA_LOCATION" +) + +const ( + // FieldNameStringResourceArn is a FieldNameString enum value + FieldNameStringResourceArn = "RESOURCE_ARN" + + // FieldNameStringRoleArn is a FieldNameString enum value + FieldNameStringRoleArn = "ROLE_ARN" + + // FieldNameStringLastModified is a FieldNameString enum value + FieldNameStringLastModified = "LAST_MODIFIED" +) + +const ( + // PermissionAll is a Permission enum value + PermissionAll = "ALL" + + // PermissionSelect is a Permission enum value + PermissionSelect = "SELECT" + + // PermissionAlter is a Permission enum value + PermissionAlter = "ALTER" + + // PermissionDrop is a Permission enum value + PermissionDrop = "DROP" + + // PermissionDelete is a Permission enum value + PermissionDelete = "DELETE" + + // PermissionInsert is a Permission enum value + PermissionInsert = "INSERT" + + // PermissionCreateDatabase is a Permission enum value + PermissionCreateDatabase = "CREATE_DATABASE" + + // PermissionCreateTable is a Permission enum value + PermissionCreateTable = "CREATE_TABLE" + + // PermissionDataLocationAccess is a Permission enum value + PermissionDataLocationAccess = "DATA_LOCATION_ACCESS" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/doc.go new file mode 100644 index 00000000000..a0c8dcf9993 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/doc.go @@ -0,0 +1,28 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package lakeformation provides the client and types for making API +// requests to AWS Lake Formation. +// +// Defines the public endpoint for the AWS Lake Formation service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31 for more information on this service. +// +// See lakeformation package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/lakeformation/ +// +// Using the Client +// +// To contact AWS Lake Formation with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Lake Formation client LakeFormation for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/lakeformation/#New +package lakeformation diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/errors.go new file mode 100644 index 00000000000..1bbdaf1289d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/errors.go @@ -0,0 +1,42 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package lakeformation + +const ( + + // ErrCodeAlreadyExistsException for service response error code + // "AlreadyExistsException". + // + // A resource to be created or added already exists. + ErrCodeAlreadyExistsException = "AlreadyExistsException" + + // ErrCodeConcurrentModificationException for service response error code + // "ConcurrentModificationException". + // + // Two processes are trying to modify a resource simultaneously. + ErrCodeConcurrentModificationException = "ConcurrentModificationException" + + // ErrCodeEntityNotFoundException for service response error code + // "EntityNotFoundException". + // + // A specified entity does not exist + ErrCodeEntityNotFoundException = "EntityNotFoundException" + + // ErrCodeInternalServiceException for service response error code + // "InternalServiceException". + // + // An internal service error occurred. + ErrCodeInternalServiceException = "InternalServiceException" + + // ErrCodeInvalidInputException for service response error code + // "InvalidInputException". + // + // The input provided was not valid. + ErrCodeInvalidInputException = "InvalidInputException" + + // ErrCodeOperationTimeoutException for service response error code + // "OperationTimeoutException". + // + // The operation timed out. + ErrCodeOperationTimeoutException = "OperationTimeoutException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go new file mode 100644 index 00000000000..a13b061c64d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go @@ -0,0 +1,101 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package lakeformation + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// LakeFormation provides the API operation methods for making requests to +// AWS Lake Formation. See this package's package overview docs +// for details on the service. +// +// LakeFormation methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type LakeFormation struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "LakeFormation" // Name of service. + EndpointsID = "lakeformation" // ID to lookup a service endpoint with. + ServiceID = "LakeFormation" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the LakeFormation client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a LakeFormation client from just a session. +// svc := lakeformation.New(mySession) +// +// // Create a LakeFormation client with additional configuration +// svc := lakeformation.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *LakeFormation { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "lakeformation" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LakeFormation { + svc := &LakeFormation{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2017-03-31", + JSONVersion: "1.1", + TargetPrefix: "AWSLakeFormation", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a LakeFormation operation and runs any +// custom request initialization. +func (c *LakeFormation) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go index fe5e6785b1e..c5c91217388 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go @@ -2083,6 +2083,7 @@ func (c *Lambda) InvokeAsyncRequest(input *InvokeAsyncInput) (req *request.Reque // InvokeAsync API operation for AWS Lambda. // +// // For asynchronous function invocation, use Invoke. // // Invokes a function asynchronously. @@ -2165,6 +2166,12 @@ func (c *Lambda) ListAliasesRequest(input *ListAliasesInput) (req *request.Reque Name: opListAliases, HTTPMethod: "GET", HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, } if input == nil { @@ -2226,6 +2233,58 @@ func (c *Lambda) ListAliasesWithContext(ctx aws.Context, input *ListAliasesInput return out, req.Send() } +// ListAliasesPages iterates over the pages of a ListAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAliases operation. +// pageNum := 0 +// err := client.ListAliasesPages(params, +// func(page *lambda.ListAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Lambda) ListAliasesPages(input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool) error { + return c.ListAliasesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAliasesPagesWithContext same as ListAliasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) ListAliasesPagesWithContext(ctx aws.Context, input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAliasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAliasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAliasesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListEventSourceMappings = "ListEventSourceMappings" // ListEventSourceMappingsRequest generates a "aws/request.Request" representing the @@ -2335,7 +2394,7 @@ func (c *Lambda) ListEventSourceMappingsWithContext(ctx aws.Context, input *List // // Example iterating over at most 3 pages of a ListEventSourceMappings operation. // pageNum := 0 // err := client.ListEventSourceMappingsPages(params, -// func(page *ListEventSourceMappingsOutput, lastPage bool) bool { +// func(page *lambda.ListEventSourceMappingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2367,10 +2426,12 @@ func (c *Lambda) ListEventSourceMappingsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEventSourceMappingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListEventSourceMappingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2483,7 +2544,7 @@ func (c *Lambda) ListFunctionsWithContext(ctx aws.Context, input *ListFunctionsI // // Example iterating over at most 3 pages of a ListFunctions operation. // pageNum := 0 // err := client.ListFunctionsPages(params, -// func(page *ListFunctionsOutput, lastPage bool) bool { +// func(page *lambda.ListFunctionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2515,10 +2576,12 @@ func (c *Lambda) ListFunctionsPagesWithContext(ctx aws.Context, input *ListFunct }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListFunctionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListFunctionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2553,6 +2616,12 @@ func (c *Lambda) ListLayerVersionsRequest(input *ListLayerVersionsInput) (req *r Name: opListLayerVersions, HTTPMethod: "GET", HTTPPath: "/2018-10-31/layers/{LayerName}/versions", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, } if input == nil { @@ -2616,6 +2685,58 @@ func (c *Lambda) ListLayerVersionsWithContext(ctx aws.Context, input *ListLayerV return out, req.Send() } +// ListLayerVersionsPages iterates over the pages of a ListLayerVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListLayerVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListLayerVersions operation. +// pageNum := 0 +// err := client.ListLayerVersionsPages(params, +// func(page *lambda.ListLayerVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Lambda) ListLayerVersionsPages(input *ListLayerVersionsInput, fn func(*ListLayerVersionsOutput, bool) bool) error { + return c.ListLayerVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListLayerVersionsPagesWithContext same as ListLayerVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) ListLayerVersionsPagesWithContext(ctx aws.Context, input *ListLayerVersionsInput, fn func(*ListLayerVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListLayerVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListLayerVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListLayerVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListLayers = "ListLayers" // ListLayersRequest generates a "aws/request.Request" representing the @@ -2647,6 +2768,12 @@ func (c *Lambda) ListLayersRequest(input *ListLayersInput) (req *request.Request Name: opListLayers, HTTPMethod: "GET", HTTPPath: "/2018-10-31/layers", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, } if input == nil { @@ -2706,6 +2833,58 @@ func (c *Lambda) ListLayersWithContext(ctx aws.Context, input *ListLayersInput, return out, req.Send() } +// ListLayersPages iterates over the pages of a ListLayers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListLayers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListLayers operation. +// pageNum := 0 +// err := client.ListLayersPages(params, +// func(page *lambda.ListLayersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Lambda) ListLayersPages(input *ListLayersInput, fn func(*ListLayersOutput, bool) bool) error { + return c.ListLayersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListLayersPagesWithContext same as ListLayersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) ListLayersPagesWithContext(ctx aws.Context, input *ListLayersInput, fn func(*ListLayersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListLayersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListLayersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListLayersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTags = "ListTags" // ListTagsRequest generates a "aws/request.Request" representing the @@ -2829,6 +3008,12 @@ func (c *Lambda) ListVersionsByFunctionRequest(input *ListVersionsByFunctionInpu Name: opListVersionsByFunction, HTTPMethod: "GET", HTTPPath: "/2015-03-31/functions/{FunctionName}/versions", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, } if input == nil { @@ -2890,6 +3075,58 @@ func (c *Lambda) ListVersionsByFunctionWithContext(ctx aws.Context, input *ListV return out, req.Send() } +// ListVersionsByFunctionPages iterates over the pages of a ListVersionsByFunction operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListVersionsByFunction method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListVersionsByFunction operation. +// pageNum := 0 +// err := client.ListVersionsByFunctionPages(params, +// func(page *lambda.ListVersionsByFunctionOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Lambda) ListVersionsByFunctionPages(input *ListVersionsByFunctionInput, fn func(*ListVersionsByFunctionOutput, bool) bool) error { + return c.ListVersionsByFunctionPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListVersionsByFunctionPagesWithContext same as ListVersionsByFunctionPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) ListVersionsByFunctionPagesWithContext(ctx aws.Context, input *ListVersionsByFunctionInput, fn func(*ListVersionsByFunctionOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListVersionsByFunctionInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListVersionsByFunctionRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListVersionsByFunctionOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opPublishLayerVersion = "PublishLayerVersion" // PublishLayerVersionRequest generates a "aws/request.Request" representing the @@ -4665,6 +4902,8 @@ type CreateEventSourceMappingInput struct { // FunctionName is a required field FunctionName *string `min:"1" type:"string" required:"true"` + MaximumBatchingWindowInSeconds *int64 `type:"integer"` + // The position in a stream from which to start reading. Required for Amazon // Kinesis and Amazon DynamoDB Streams sources. AT_TIMESTAMP is only supported // for Amazon Kinesis streams. @@ -4730,6 +4969,12 @@ func (s *CreateEventSourceMappingInput) SetFunctionName(v string) *CreateEventSo return s } +// SetMaximumBatchingWindowInSeconds sets the MaximumBatchingWindowInSeconds field's value. +func (s *CreateEventSourceMappingInput) SetMaximumBatchingWindowInSeconds(v int64) *CreateEventSourceMappingInput { + s.MaximumBatchingWindowInSeconds = &v + return s +} + // SetStartingPosition sets the StartingPosition field's value. func (s *CreateEventSourceMappingInput) SetStartingPosition(v string) *CreateEventSourceMappingInput { s.StartingPosition = &v @@ -5451,6 +5696,8 @@ type EventSourceMappingConfiguration struct { // The result of the last AWS Lambda invocation of your Lambda function. LastProcessingResult *string `type:"string"` + MaximumBatchingWindowInSeconds *int64 `type:"integer"` + // The state of the event source mapping. It can be one of the following: Creating, // Enabling, Enabled, Disabling, Disabled, Updating, or Deleting. State *string `type:"string"` @@ -5502,6 +5749,12 @@ func (s *EventSourceMappingConfiguration) SetLastProcessingResult(v string) *Eve return s } +// SetMaximumBatchingWindowInSeconds sets the MaximumBatchingWindowInSeconds field's value. +func (s *EventSourceMappingConfiguration) SetMaximumBatchingWindowInSeconds(v int64) *EventSourceMappingConfiguration { + s.MaximumBatchingWindowInSeconds = &v + return s +} + // SetState sets the State field's value. func (s *EventSourceMappingConfiguration) SetState(v string) *EventSourceMappingConfiguration { s.State = &v @@ -5664,7 +5917,7 @@ type FunctionConfiguration struct { // (https://www.w3.org/TR/NOTE-datetime) (YYYY-MM-DDThh:mm:ss.sTZD). LastModified *string `type:"string"` - // The function's layers (https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html). + // The function's layers (https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html). Layers []*Layer `type:"list"` // For Lambda@Edge functions, the ARN of the master function. @@ -8737,6 +8990,8 @@ type UpdateEventSourceMappingInput struct { // function name, it's limited to 64 characters in length. FunctionName *string `min:"1" type:"string"` + MaximumBatchingWindowInSeconds *int64 `type:"integer"` + // The identifier of the event source mapping. // // UUID is a required field @@ -8793,6 +9048,12 @@ func (s *UpdateEventSourceMappingInput) SetFunctionName(v string) *UpdateEventSo return s } +// SetMaximumBatchingWindowInSeconds sets the MaximumBatchingWindowInSeconds field's value. +func (s *UpdateEventSourceMappingInput) SetMaximumBatchingWindowInSeconds(v int64) *UpdateEventSourceMappingInput { + s.MaximumBatchingWindowInSeconds = &v + return s +} + // SetUUID sets the UUID field's value. func (s *UpdateEventSourceMappingInput) SetUUID(v string) *UpdateEventSourceMappingInput { s.UUID = &v @@ -9247,6 +9508,9 @@ const ( // RuntimeNodejs810 is a Runtime enum value RuntimeNodejs810 = "nodejs8.10" + // RuntimeNodejs10X is a Runtime enum value + RuntimeNodejs10X = "nodejs10.x" + // RuntimeJava8 is a Runtime enum value RuntimeJava8 = "java8" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go index 1cccdda0842..f137393fc0c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go @@ -46,11 +46,11 @@ const ( // svc := lambda.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lambda { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Lambda { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Lambda { svc := &Lambda{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-03-31", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/waiters.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/waiters.go new file mode 100644 index 00000000000..8c55f9f32d1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lambda/waiters.go @@ -0,0 +1,61 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package lambda + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilFunctionExists uses the AWS Lambda API operation +// GetFunction to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *Lambda) WaitUntilFunctionExists(input *GetFunctionInput) error { + return c.WaitUntilFunctionExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilFunctionExistsWithContext is an extended version of WaitUntilFunctionExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) WaitUntilFunctionExistsWithContext(ctx aws.Context, input *GetFunctionInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilFunctionExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(1 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ResourceNotFoundException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetFunctionInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetFunctionRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go index 39ce69d1e7b..f99d0efabb8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go @@ -1629,7 +1629,7 @@ func (c *LexModelBuildingService) GetBotAliasesWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a GetBotAliases operation. // pageNum := 0 // err := client.GetBotAliasesPages(params, -// func(page *GetBotAliasesOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetBotAliasesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1661,10 +1661,12 @@ func (c *LexModelBuildingService) GetBotAliasesPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBotAliasesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetBotAliasesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1868,7 +1870,7 @@ func (c *LexModelBuildingService) GetBotChannelAssociationsWithContext(ctx aws.C // // Example iterating over at most 3 pages of a GetBotChannelAssociations operation. // pageNum := 0 // err := client.GetBotChannelAssociationsPages(params, -// func(page *GetBotChannelAssociationsOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetBotChannelAssociationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1900,10 +1902,12 @@ func (c *LexModelBuildingService) GetBotChannelAssociationsPagesWithContext(ctx }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBotChannelAssociationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetBotChannelAssociationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2024,7 +2028,7 @@ func (c *LexModelBuildingService) GetBotVersionsWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a GetBotVersions operation. // pageNum := 0 // err := client.GetBotVersionsPages(params, -// func(page *GetBotVersionsOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetBotVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2056,10 +2060,12 @@ func (c *LexModelBuildingService) GetBotVersionsPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBotVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetBotVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2179,7 +2185,7 @@ func (c *LexModelBuildingService) GetBotsWithContext(ctx aws.Context, input *Get // // Example iterating over at most 3 pages of a GetBots operation. // pageNum := 0 // err := client.GetBotsPages(params, -// func(page *GetBotsOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetBotsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2211,10 +2217,12 @@ func (c *LexModelBuildingService) GetBotsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBotsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetBotsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2415,7 +2423,7 @@ func (c *LexModelBuildingService) GetBuiltinIntentsWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a GetBuiltinIntents operation. // pageNum := 0 // err := client.GetBuiltinIntentsPages(params, -// func(page *GetBuiltinIntentsOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetBuiltinIntentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2447,10 +2455,12 @@ func (c *LexModelBuildingService) GetBuiltinIntentsPagesWithContext(ctx aws.Cont }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBuiltinIntentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetBuiltinIntentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2562,7 +2572,7 @@ func (c *LexModelBuildingService) GetBuiltinSlotTypesWithContext(ctx aws.Context // // Example iterating over at most 3 pages of a GetBuiltinSlotTypes operation. // pageNum := 0 // err := client.GetBuiltinSlotTypesPages(params, -// func(page *GetBuiltinSlotTypesOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetBuiltinSlotTypesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2594,10 +2604,12 @@ func (c *LexModelBuildingService) GetBuiltinSlotTypesPagesWithContext(ctx aws.Co }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetBuiltinSlotTypesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetBuiltinSlotTypesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2991,7 +3003,7 @@ func (c *LexModelBuildingService) GetIntentVersionsWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a GetIntentVersions operation. // pageNum := 0 // err := client.GetIntentVersionsPages(params, -// func(page *GetIntentVersionsOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetIntentVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3023,10 +3035,12 @@ func (c *LexModelBuildingService) GetIntentVersionsPagesWithContext(ctx aws.Cont }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetIntentVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetIntentVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3085,7 +3099,7 @@ func (c *LexModelBuildingService) GetIntentsRequest(input *GetIntentsInput) (req // * If you specify the nameContains field, returns the $LATEST version of // all intents that contain the specified string. // -// * If you don't specify the nameContains field, returns information about +// * If you don't specify the nameContains field, returns information about // the $LATEST version of all intents. // // The operation requires permission for the lex:GetIntents action. @@ -3145,7 +3159,7 @@ func (c *LexModelBuildingService) GetIntentsWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a GetIntents operation. // pageNum := 0 // err := client.GetIntentsPages(params, -// func(page *GetIntentsOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetIntentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3177,10 +3191,12 @@ func (c *LexModelBuildingService) GetIntentsPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetIntentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetIntentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3394,7 +3410,7 @@ func (c *LexModelBuildingService) GetSlotTypeVersionsWithContext(ctx aws.Context // // Example iterating over at most 3 pages of a GetSlotTypeVersions operation. // pageNum := 0 // err := client.GetSlotTypeVersionsPages(params, -// func(page *GetSlotTypeVersionsOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetSlotTypeVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3426,10 +3442,12 @@ func (c *LexModelBuildingService) GetSlotTypeVersionsPagesWithContext(ctx aws.Co }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetSlotTypeVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetSlotTypeVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3488,7 +3506,7 @@ func (c *LexModelBuildingService) GetSlotTypesRequest(input *GetSlotTypesInput) // * If you specify the nameContains field, returns the $LATEST version of // all slot types that contain the specified string. // -// * If you don't specify the nameContains field, returns information about +// * If you don't specify the nameContains field, returns information about // the $LATEST version of all slot types. // // The operation requires permission for the lex:GetSlotTypes action. @@ -3548,7 +3566,7 @@ func (c *LexModelBuildingService) GetSlotTypesWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a GetSlotTypes operation. // pageNum := 0 // err := client.GetSlotTypesPages(params, -// func(page *GetSlotTypesOutput, lastPage bool) bool { +// func(page *lexmodelbuildingservice.GetSlotTypesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3580,10 +3598,12 @@ func (c *LexModelBuildingService) GetSlotTypesPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetSlotTypesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetSlotTypesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3742,15 +3762,19 @@ func (c *LexModelBuildingService) PutBotRequest(input *PutBotInput) (req *reques // and whether the bot is directed toward children under age 13. You can use // this to add intents later, or to remove intents from an existing bot. When // you create a bot with the minimum information, the bot is created or updated -// but Amazon Lex returns the response FAILED. You can build the bot after you add one or more intents. For more information -// about Amazon Lex bots, see how-it-works. +// but Amazon Lex returns the response FAILED. You can build the bot after you +// add one or more intents. For more information about Amazon Lex bots, see +// how-it-works. // // If you specify the name of an existing bot, the fields in the request replace -// the existing values in the $LATESTversion of the bot. Amazon Lex removes any fields that you don't provide -// values for in the request, except for the idleTTLInSecondsand privacySettingsfields, which are set to their default values. If you don't specify values -// for required fields, Amazon Lex throws an exception. +// the existing values in the $LATEST version of the bot. Amazon Lex removes +// any fields that you don't provide values for in the request, except for the +// idleTTLInSeconds and privacySettings fields, which are set to their default +// values. If you don't specify values for required fields, Amazon Lex throws +// an exception. // -// This operation requires permissions for the lex:PutBotaction. For more information, see auth-and-access-control +// This operation requires permissions for the lex:PutBot action. For more information, +// see auth-and-access-control. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3964,7 +3988,6 @@ func (c *LexModelBuildingService) PutIntentRequest(input *PutIntentInput) (req * // is available, Amazon Lex invokes your Lambda function. If you configure // your intent to return the intent information to the client application. // -// // You can specify other optional information in the request, such as: // // * A confirmation prompt to ask the user to confirm an intent. For example, @@ -6192,8 +6215,7 @@ type GetBotAliasesInput struct { // BotName is a required field BotName *string `location:"uri" locationName:"botName" min:"2" type:"string" required:"true"` - // The maximum number of aliases to return in the response. The default is 50. - // . + // The maximum number of aliases to return in the response. The default is 50. . MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` // Substring to match in bot alias names. An alias will be returned if any part @@ -8799,7 +8821,7 @@ type PutBotAliasInput struct { // // When you want to update a bot alias, set the checksum field to the checksum // of the most recent revision of the $LATEST version. If you don't specify - // the checksum field, or if the checksum does not match the $LATEST version, + // the checksum field, or if the checksum does not match the $LATEST version, // you get a PreconditionFailedException exception. Checksum *string `locationName:"checksum" type:"string"` @@ -8984,7 +9006,7 @@ type PutBotInput struct { // // When you want to update a bot, set the checksum field to the checksum of // the most recent revision of the $LATEST version. If you don't specify the - // checksum field, or if the checksum does not match the $LATEST version, you + // checksum field, or if the checksum does not match the $LATEST version, you // get a PreconditionFailedException exception. Checksum *string `locationName:"checksum" type:"string"` @@ -9411,7 +9433,7 @@ type PutIntentInput struct { // // When you want to update a intent, set the checksum field to the checksum // of the most recent revision of the $LATEST version. If you don't specify - // the checksum field, or if the checksum does not match the $LATEST version, + // the checksum field, or if the checksum does not match the $LATEST version, // you get a PreconditionFailedException exception. Checksum *string `locationName:"checksum" type:"string"` @@ -9841,7 +9863,7 @@ type PutSlotTypeInput struct { // // When you want to update a slot type, set the checksum field to the checksum // of the most recent revision of the $LATEST version. If you don't specify - // the checksum field, or if the checksum does not match the $LATEST version, + // the checksum field, or if the checksum does not match the $LATEST version, // you get a PreconditionFailedException exception. Checksum *string `locationName:"checksum" type:"string"` @@ -10313,11 +10335,10 @@ type StartImportInput struct { // * FAIL_ON_CONFLICT - The import operation is stopped on the first conflict // between a resource in the import file and an existing resource. The name // of the resource causing the conflict is in the failureReason field of - // the response to the GetImport operation. - // - // OVERWRITE_LATEST - The import operation proceeds even if there is a conflict - // with an existing resource. The $LASTEST version of the existing resource - // is overwritten with the data from the import file. + // the response to the GetImport operation. OVERWRITE_LATEST - The import + // operation proceeds even if there is a conflict with an existing resource. + // The $LASTEST version of the existing resource is overwritten with the + // data from the import file. // // MergeStrategy is a required field MergeStrategy *string `locationName:"mergeStrategy" type:"string" required:"true" enum:"MergeStrategy"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go index 0f9509ff725..98937996781 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *LexModelBuildingService if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "lex" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *LexModelBuildingService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LexModelBuildingService { svc := &LexModelBuildingService{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-04-19", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go index 5a8fc996539..fca9b9c9f47 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go @@ -46,11 +46,11 @@ const ( // svc := licensemanager.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *LicenseManager { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *LicenseManager { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LicenseManager { svc := &LicenseManager{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-08-01", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go index 133938f6800..84cac53bb6d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go @@ -163,7 +163,7 @@ func (c *Lightsail) AttachDiskRequest(input *AttachDiskInput) (req *request.Requ // and exposes it to the instance with the specified disk name. // // The attach disk operation supports tag-based access control via resource -// tags applied to the resource identified by diskName. For more information, +// tags applied to the resource identified by disk name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -274,8 +274,8 @@ func (c *Lightsail) AttachInstancesToLoadBalancerRequest(input *AttachInstancesT // health check status is available. // // The attach instances to load balancer operation supports tag-based access -// control via resource tags applied to the resource identified by loadBalancerName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// control via resource tags applied to the resource identified by load balancer +// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -384,13 +384,13 @@ func (c *Lightsail) AttachLoadBalancerTlsCertificateRequest(input *AttachLoadBal // // Once you create and validate your certificate, you can attach it to your // load balancer. You can also use this API to rotate the certificates on your -// account. Use the AttachLoadBalancerTlsCertificate operation with the non-attached -// certificate, and it will replace the existing one and become the attached -// certificate. +// account. Use the attach load balancer tls certificate operation with the +// non-attached certificate, and it will replace the existing one and become +// the attached certificate. // // The attach load balancer tls certificate operation supports tag-based access -// control via resource tags applied to the resource identified by loadBalancerName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// control via resource tags applied to the resource identified by load balancer +// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -601,7 +601,7 @@ func (c *Lightsail) CloseInstancePublicPortsRequest(input *CloseInstancePublicPo // Closes the public ports on a specific Amazon Lightsail instance. // // The close instance public ports operation supports tag-based access control -// via resource tags applied to the resource identified by instanceName. For +// via resource tags applied to the resource identified by instance name. For // more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -706,8 +706,19 @@ func (c *Lightsail) CopySnapshotRequest(input *CopySnapshotInput) (req *request. // CopySnapshot API operation for Amazon Lightsail. // -// Copies an instance or disk snapshot from one AWS Region to another in Amazon -// Lightsail. +// Copies a manual instance or disk snapshot as another manual snapshot, or +// copies an automatic instance or disk snapshot as a manual snapshot. This +// operation can also be used to copy a manual or automatic snapshot of an instance +// or a disk from one AWS Region to another in Amazon Lightsail. +// +// When copying a manual snapshot, be sure to define the source region, source +// snapshot name, and target snapshot name parameters. +// +// When copying an automatic snapshot, be sure to define the source region, +// source resource name, target snapshot name, and either the restore date or +// the use latest restorable auto snapshot parameters. +// +// Database snapshots cannot be copied at this time. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -923,10 +934,8 @@ func (c *Lightsail) CreateDiskRequest(input *CreateDiskInput) (req *request.Requ // CreateDisk API operation for Amazon Lightsail. // -// Creates a block storage disk that can be attached to a Lightsail instance -// in the same Availability Zone (e.g., us-east-2a). The disk is created in -// the regional endpoint that you send the HTTP request to. For more information, -// see Regions and Availability Zones in Lightsail (https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail). +// Creates a block storage disk that can be attached to an Amazon Lightsail +// instance in the same Availability Zone (e.g., us-east-2a). // // The create disk operation supports tag-based access control via request tags. // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). @@ -1033,14 +1042,13 @@ func (c *Lightsail) CreateDiskFromSnapshotRequest(input *CreateDiskFromSnapshotI // CreateDiskFromSnapshot API operation for Amazon Lightsail. // -// Creates a block storage disk from a disk snapshot that can be attached to -// a Lightsail instance in the same Availability Zone (e.g., us-east-2a). The -// disk is created in the regional endpoint that you send the HTTP request to. -// For more information, see Regions and Availability Zones in Lightsail (https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail). +// Creates a block storage disk from a manual or automatic snapshot of a disk. +// The resulting disk can be attached to an Amazon Lightsail instance in the +// same Availability Zone (e.g., us-east-2a). // // The create disk from snapshot operation supports tag-based access control // via request tags and resource tags applied to the resource identified by -// diskSnapshotName. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// disk snapshot name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1386,7 +1394,7 @@ func (c *Lightsail) CreateDomainEntryRequest(input *CreateDomainEntryInput) (req // of authority (SOA), service locator (SRV), or text (TXT). // // The create domain entry operation supports tag-based access control via resource -// tags applied to the resource identified by domainName. For more information, +// tags applied to the resource identified by domain name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1599,12 +1607,7 @@ func (c *Lightsail) CreateInstancesRequest(input *CreateInstancesInput) (req *re // CreateInstances API operation for Amazon Lightsail. // -// Creates one or more Amazon Lightsail virtual private servers, or instances. -// Create instances using active blueprints. Inactive blueprints are listed -// to support customers with existing instances but are not necessarily available -// for launch of new instances. Blueprints are marked inactive when they become -// outdated due to operating system updates or new application releases. Use -// the get blueprints operation to return a list of available blueprints. +// Creates one or more Amazon Lightsail instances. // // The create instances operation supports tag-based access control via request // tags. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). @@ -1711,12 +1714,13 @@ func (c *Lightsail) CreateInstancesFromSnapshotRequest(input *CreateInstancesFro // CreateInstancesFromSnapshot API operation for Amazon Lightsail. // -// Uses a specific snapshot as a blueprint for creating one or more new instances -// that are based on that identical configuration. +// Creates one or more new instances from a manual or automatic snapshot of +// an instance. // // The create instances from snapshot operation supports tag-based access control // via request tags and resource tags applied to the resource identified by -// instanceSnapshotName. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// instance snapshot name. For more information, see the Lightsail Dev Guide +// (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2046,8 +2050,8 @@ func (c *Lightsail) CreateLoadBalancerTlsCertificateRequest(input *CreateLoadBal // TLS is just an updated, more secure version of Secure Socket Layer (SSL). // // The create load balancer tls certificate operation supports tag-based access -// control via resource tags applied to the resource identified by loadBalancerName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// control via resource tags applied to the resource identified by load balancer +// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2437,6 +2441,106 @@ func (c *Lightsail) CreateRelationalDatabaseSnapshotWithContext(ctx aws.Context, return out, req.Send() } +const opDeleteAutoSnapshot = "DeleteAutoSnapshot" + +// DeleteAutoSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAutoSnapshot operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAutoSnapshot for more information on using the DeleteAutoSnapshot +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAutoSnapshotRequest method. +// req, resp := client.DeleteAutoSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteAutoSnapshot +func (c *Lightsail) DeleteAutoSnapshotRequest(input *DeleteAutoSnapshotInput) (req *request.Request, output *DeleteAutoSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteAutoSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAutoSnapshotInput{} + } + + output = &DeleteAutoSnapshotOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteAutoSnapshot API operation for Amazon Lightsail. +// +// Deletes an automatic snapshot for an instance or disk. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation DeleteAutoSnapshot for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceException "ServiceException" +// A general service exception. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * ErrCodeNotFoundException "NotFoundException" +// Lightsail throws this exception when it cannot find a resource. +// +// * ErrCodeOperationFailureException "OperationFailureException" +// Lightsail throws this exception when an operation fails to execute. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * ErrCodeUnauthenticatedException "UnauthenticatedException" +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteAutoSnapshot +func (c *Lightsail) DeleteAutoSnapshot(input *DeleteAutoSnapshotInput) (*DeleteAutoSnapshotOutput, error) { + req, out := c.DeleteAutoSnapshotRequest(input) + return out, req.Send() +} + +// DeleteAutoSnapshotWithContext is the same as DeleteAutoSnapshot with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAutoSnapshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) DeleteAutoSnapshotWithContext(ctx aws.Context, input *DeleteAutoSnapshotInput, opts ...request.Option) (*DeleteAutoSnapshotOutput, error) { + req, out := c.DeleteAutoSnapshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteDisk = "DeleteDisk" // DeleteDiskRequest generates a "aws/request.Request" representing the @@ -2487,7 +2591,7 @@ func (c *Lightsail) DeleteDiskRequest(input *DeleteDiskInput) (req *request.Requ // The disk may remain in the deleting state for several minutes. // // The delete disk operation supports tag-based access control via resource -// tags applied to the resource identified by diskName. For more information, +// tags applied to the resource identified by disk name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2602,7 +2706,7 @@ func (c *Lightsail) DeleteDiskSnapshotRequest(input *DeleteDiskSnapshotInput) (r // the information needed to restore the disk. // // The delete disk snapshot operation supports tag-based access control via -// resource tags applied to the resource identified by diskSnapshotName. For +// resource tags applied to the resource identified by disk snapshot name. For // more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2710,7 +2814,7 @@ func (c *Lightsail) DeleteDomainRequest(input *DeleteDomainInput) (req *request. // Deletes the specified domain recordset and all of its domain records. // // The delete domain operation supports tag-based access control via resource -// tags applied to the resource identified by domainName. For more information, +// tags applied to the resource identified by domain name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2818,7 +2922,7 @@ func (c *Lightsail) DeleteDomainEntryRequest(input *DeleteDomainEntryInput) (req // Deletes a specific domain entry. // // The delete domain entry operation supports tag-based access control via resource -// tags applied to the resource identified by domainName. For more information, +// tags applied to the resource identified by domain name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2923,10 +3027,10 @@ func (c *Lightsail) DeleteInstanceRequest(input *DeleteInstanceInput) (req *requ // DeleteInstance API operation for Amazon Lightsail. // -// Deletes a specific Amazon Lightsail virtual private server, or instance. +// Deletes an Amazon Lightsail instance. // // The delete instance operation supports tag-based access control via resource -// tags applied to the resource identified by instanceName. For more information, +// tags applied to the resource identified by instance name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3034,8 +3138,8 @@ func (c *Lightsail) DeleteInstanceSnapshotRequest(input *DeleteInstanceSnapshotI // Deletes a specific snapshot of a virtual private server (or instance). // // The delete instance snapshot operation supports tag-based access control -// via resource tags applied to the resource identified by instanceSnapshotName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// via resource tags applied to the resource identified by instance snapshot +// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3142,7 +3246,7 @@ func (c *Lightsail) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *reques // Deletes a specific SSH key pair. // // The delete key pair operation supports tag-based access control via resource -// tags applied to the resource identified by keyPairName. For more information, +// tags applied to the resource identified by key pair name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3364,7 +3468,7 @@ func (c *Lightsail) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (r // create a new certificate, and verify domain ownership again. // // The delete load balancer operation supports tag-based access control via -// resource tags applied to the resource identified by loadBalancerName. For +// resource tags applied to the resource identified by load balancer name. For // more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3472,8 +3576,8 @@ func (c *Lightsail) DeleteLoadBalancerTlsCertificateRequest(input *DeleteLoadBal // Deletes an SSL/TLS certificate associated with a Lightsail load balancer. // // The delete load balancer tls certificate operation supports tag-based access -// control via resource tags applied to the resource identified by loadBalancerName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// control via resource tags applied to the resource identified by load balancer +// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3798,7 +3902,7 @@ func (c *Lightsail) DetachDiskRequest(input *DetachDiskInput) (req *request.Requ // stopping the instance and detaching the disk. // // The detach disk operation supports tag-based access control via resource -// tags applied to the resource identified by diskName. For more information, +// tags applied to the resource identified by disk name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3909,8 +4013,8 @@ func (c *Lightsail) DetachInstancesFromLoadBalancerRequest(input *DetachInstance // are detached from the load balancer. // // The detach instances from load balancer operation supports tag-based access -// control via resource tags applied to the resource identified by loadBalancerName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// control via resource tags applied to the resource identified by load balancer +// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4074,6 +4178,107 @@ func (c *Lightsail) DetachStaticIpWithContext(ctx aws.Context, input *DetachStat return out, req.Send() } +const opDisableAddOn = "DisableAddOn" + +// DisableAddOnRequest generates a "aws/request.Request" representing the +// client's request for the DisableAddOn operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableAddOn for more information on using the DisableAddOn +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableAddOnRequest method. +// req, resp := client.DisableAddOnRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DisableAddOn +func (c *Lightsail) DisableAddOnRequest(input *DisableAddOnInput) (req *request.Request, output *DisableAddOnOutput) { + op := &request.Operation{ + Name: opDisableAddOn, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAddOnInput{} + } + + output = &DisableAddOnOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableAddOn API operation for Amazon Lightsail. +// +// Disables an add-on for an Amazon Lightsail resource. For more information, +// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation DisableAddOn for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceException "ServiceException" +// A general service exception. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * ErrCodeNotFoundException "NotFoundException" +// Lightsail throws this exception when it cannot find a resource. +// +// * ErrCodeOperationFailureException "OperationFailureException" +// Lightsail throws this exception when an operation fails to execute. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * ErrCodeUnauthenticatedException "UnauthenticatedException" +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DisableAddOn +func (c *Lightsail) DisableAddOn(input *DisableAddOnInput) (*DisableAddOnOutput, error) { + req, out := c.DisableAddOnRequest(input) + return out, req.Send() +} + +// DisableAddOnWithContext is the same as DisableAddOn with the addition of +// the ability to pass a context and additional request options. +// +// See DisableAddOn for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) DisableAddOnWithContext(ctx aws.Context, input *DisableAddOnInput, opts ...request.Option) (*DisableAddOnOutput, error) { + req, out := c.DisableAddOnRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDownloadDefaultKeyPair = "DownloadDefaultKeyPair" // DownloadDefaultKeyPairRequest generates a "aws/request.Request" representing the @@ -4178,6 +4383,107 @@ func (c *Lightsail) DownloadDefaultKeyPairWithContext(ctx aws.Context, input *Do return out, req.Send() } +const opEnableAddOn = "EnableAddOn" + +// EnableAddOnRequest generates a "aws/request.Request" representing the +// client's request for the EnableAddOn operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableAddOn for more information on using the EnableAddOn +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableAddOnRequest method. +// req, resp := client.EnableAddOnRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/EnableAddOn +func (c *Lightsail) EnableAddOnRequest(input *EnableAddOnInput) (req *request.Request, output *EnableAddOnOutput) { + op := &request.Operation{ + Name: opEnableAddOn, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAddOnInput{} + } + + output = &EnableAddOnOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableAddOn API operation for Amazon Lightsail. +// +// Enables or modifies an add-on for an Amazon Lightsail resource. For more +// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation EnableAddOn for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceException "ServiceException" +// A general service exception. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * ErrCodeNotFoundException "NotFoundException" +// Lightsail throws this exception when it cannot find a resource. +// +// * ErrCodeOperationFailureException "OperationFailureException" +// Lightsail throws this exception when an operation fails to execute. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * ErrCodeUnauthenticatedException "UnauthenticatedException" +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/EnableAddOn +func (c *Lightsail) EnableAddOn(input *EnableAddOnInput) (*EnableAddOnOutput, error) { + req, out := c.EnableAddOnRequest(input) + return out, req.Send() +} + +// EnableAddOnWithContext is the same as EnableAddOn with the addition of +// the ability to pass a context and additional request options. +// +// See EnableAddOn for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) EnableAddOnWithContext(ctx aws.Context, input *EnableAddOnInput, opts ...request.Option) (*EnableAddOnOutput, error) { + req, out := c.EnableAddOnRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opExportSnapshot = "ExportSnapshot" // ExportSnapshotRequest generates a "aws/request.Request" representing the @@ -4233,9 +4539,9 @@ func (c *Lightsail) ExportSnapshotRequest(input *ExportSnapshotInput) (req *requ // EBS volumes. Snapshots are exported to the same Amazon Web Services Region // in Amazon EC2 as the source Lightsail snapshot. // -// The export snapshotoperation supports tag-based access control via resource tags applied to -// the resource identified by sourceSnapshotName. For more information, see -// the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// The export snapshot operation supports tag-based access control via resource +// tags applied to the resource identified by source snapshot name. For more +// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Use the get instance snapshots or get disk snapshots operations to get a // list of snapshots that you can export to Amazon EC2. @@ -4402,33 +4708,134 @@ func (c *Lightsail) GetActiveNamesWithContext(ctx aws.Context, input *GetActiveN return out, req.Send() } -const opGetBlueprints = "GetBlueprints" +const opGetAutoSnapshots = "GetAutoSnapshots" -// GetBlueprintsRequest generates a "aws/request.Request" representing the -// client's request for the GetBlueprints operation. The "output" return +// GetAutoSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the GetAutoSnapshots operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetBlueprints for more information on using the GetBlueprints +// See GetAutoSnapshots for more information on using the GetAutoSnapshots // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetBlueprintsRequest method. -// req, resp := client.GetBlueprintsRequest(params) +// // Example sending a request using the GetAutoSnapshotsRequest method. +// req, resp := client.GetAutoSnapshotsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBlueprints -func (c *Lightsail) GetBlueprintsRequest(input *GetBlueprintsInput) (req *request.Request, output *GetBlueprintsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetAutoSnapshots +func (c *Lightsail) GetAutoSnapshotsRequest(input *GetAutoSnapshotsInput) (req *request.Request, output *GetAutoSnapshotsOutput) { + op := &request.Operation{ + Name: opGetAutoSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAutoSnapshotsInput{} + } + + output = &GetAutoSnapshotsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAutoSnapshots API operation for Amazon Lightsail. +// +// Returns the available automatic snapshots for the specified resource name. +// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation GetAutoSnapshots for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceException "ServiceException" +// A general service exception. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * ErrCodeNotFoundException "NotFoundException" +// Lightsail throws this exception when it cannot find a resource. +// +// * ErrCodeOperationFailureException "OperationFailureException" +// Lightsail throws this exception when an operation fails to execute. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * ErrCodeUnauthenticatedException "UnauthenticatedException" +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetAutoSnapshots +func (c *Lightsail) GetAutoSnapshots(input *GetAutoSnapshotsInput) (*GetAutoSnapshotsOutput, error) { + req, out := c.GetAutoSnapshotsRequest(input) + return out, req.Send() +} + +// GetAutoSnapshotsWithContext is the same as GetAutoSnapshots with the addition of +// the ability to pass a context and additional request options. +// +// See GetAutoSnapshots for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) GetAutoSnapshotsWithContext(ctx aws.Context, input *GetAutoSnapshotsInput, opts ...request.Option) (*GetAutoSnapshotsOutput, error) { + req, out := c.GetAutoSnapshotsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBlueprints = "GetBlueprints" + +// GetBlueprintsRequest generates a "aws/request.Request" representing the +// client's request for the GetBlueprints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBlueprints for more information on using the GetBlueprints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBlueprintsRequest method. +// req, resp := client.GetBlueprintsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBlueprints +func (c *Lightsail) GetBlueprintsRequest(input *GetBlueprintsInput) (req *request.Request, output *GetBlueprintsOutput) { op := &request.Operation{ Name: opGetBlueprints, HTTPMethod: "POST", @@ -4447,9 +4854,14 @@ func (c *Lightsail) GetBlueprintsRequest(input *GetBlueprintsInput) (req *reques // GetBlueprints API operation for Amazon Lightsail. // // Returns the list of available instance images, or blueprints. You can use -// a blueprint to create a new virtual private server already running a specific -// operating system, as well as a preinstalled app or development stack. The -// software each instance is running depends on the blueprint image you choose. +// a blueprint to create a new instance already running a specific operating +// system, as well as a preinstalled app or development stack. The software +// each instance is running depends on the blueprint image you choose. +// +// Use active blueprints when creating new instances. Inactive blueprints are +// listed to support customers with existing instances and are not necessarily +// available to create new instances. Blueprints are marked inactive when they +// become outdated due to operating system updates or new application releases. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5617,7 +6029,7 @@ func (c *Lightsail) GetInstanceAccessDetailsRequest(input *GetInstanceAccessDeta // server, or instance. // // The get instance access details operation supports tag-based access control -// via resource tags applied to the resource identified by instanceName. For +// via resource tags applied to the resource identified by instance name. For // more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -8041,7 +8453,7 @@ func (c *Lightsail) GetRelationalDatabaseMasterUserPasswordRequest(input *GetRel // Returns the current, previous, or pending versions of the master user password // for a Lightsail database. // -// The asdf operation GetRelationalDatabaseMasterUserPassword supports tag-based +// The GetRelationalDatabaseMasterUserPassword operation supports tag-based // access control via resource tags applied to the resource identified by relationalDatabaseName. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9092,7 +9504,7 @@ func (c *Lightsail) OpenInstancePublicPortsRequest(input *OpenInstancePublicPort // Adds public ports to an Amazon Lightsail instance. // // The open instance public ports operation supports tag-based access control -// via resource tags applied to the resource identified by instanceName. For +// via resource tags applied to the resource identified by instance name. For // more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9305,7 +9717,7 @@ func (c *Lightsail) PutInstancePublicPortsRequest(input *PutInstancePublicPortsI // all ports for every protocol not included in the current request. // // The put instance public ports operation supports tag-based access control -// via resource tags applied to the resource identified by instanceName. For +// via resource tags applied to the resource identified by instance name. For // more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9413,7 +9825,7 @@ func (c *Lightsail) RebootInstanceRequest(input *RebootInstanceInput) (req *requ // Restarts a specific instance. // // The reboot instance operation supports tag-based access control via resource -// tags applied to the resource identified by instanceName. For more information, +// tags applied to the resource identified by instance name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9739,7 +10151,7 @@ func (c *Lightsail) StartInstanceRequest(input *StartInstanceInput) (req *reques // information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-create-static-ip). // // The start instance operation supports tag-based access control via resource -// tags applied to the resource identified by instanceName. For more information, +// tags applied to the resource identified by instance name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9961,7 +10373,7 @@ func (c *Lightsail) StopInstanceRequest(input *StopInstanceInput) (req *request. // information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-create-static-ip). // // The stop instance operation supports tag-based access control via resource -// tags applied to the resource identified by instanceName. For more information, +// tags applied to the resource identified by instance name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10180,7 +10592,7 @@ func (c *Lightsail) TagResourceRequest(input *TagResourceInput) (req *request.Re // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). // // The tag resource operation supports tag-based access control via request -// tags and resource tags applied to the resource identified by resourceName. +// tags and resource tags applied to the resource identified by resource name. // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10393,7 +10805,7 @@ func (c *Lightsail) UntagResourceRequest(input *UntagResourceInput) (req *reques // Amazon Lightsail resource. // // The untag resource operation supports tag-based access control via request -// tags and resource tags applied to the resource identified by resourceName. +// tags and resource tags applied to the resource identified by resource name. // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10501,7 +10913,7 @@ func (c *Lightsail) UpdateDomainEntryRequest(input *UpdateDomainEntryInput) (req // Updates a domain recordset after it is created. // // The update domain entry operation supports tag-based access control via resource -// tags applied to the resource identified by domainName. For more information, +// tags applied to the resource identified by domain name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10610,7 +11022,7 @@ func (c *Lightsail) UpdateLoadBalancerAttributeRequest(input *UpdateLoadBalancer // one attribute at a time. // // The update load balancer attribute operation supports tag-based access control -// via resource tags applied to the resource identified by loadBalancerName. +// via resource tags applied to the resource identified by load balancer name. // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10897,6 +11309,120 @@ func (c *Lightsail) UpdateRelationalDatabaseParametersWithContext(ctx aws.Contex return out, req.Send() } +// Describes an add-on that is enabled for an Amazon Lightsail resource. +type AddOn struct { + _ struct{} `type:"structure"` + + // The name of the add-on. + Name *string `locationName:"name" type:"string"` + + // The next daily time an automatic snapshot will be created. + // + // The time shown is in HH:00 format, and in Coordinated Universal Time (UTC). + // + // The snapshot is automatically created between the time shown and up to 45 + // minutes after. + NextSnapshotTimeOfDay *string `locationName:"nextSnapshotTimeOfDay" type:"string"` + + // The daily time when an automatic snapshot is created. + // + // The time shown is in HH:00 format, and in Coordinated Universal Time (UTC). + // + // The snapshot is automatically created between the time shown and up to 45 + // minutes after. + SnapshotTimeOfDay *string `locationName:"snapshotTimeOfDay" type:"string"` + + // The status of the add-on. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s AddOn) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddOn) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *AddOn) SetName(v string) *AddOn { + s.Name = &v + return s +} + +// SetNextSnapshotTimeOfDay sets the NextSnapshotTimeOfDay field's value. +func (s *AddOn) SetNextSnapshotTimeOfDay(v string) *AddOn { + s.NextSnapshotTimeOfDay = &v + return s +} + +// SetSnapshotTimeOfDay sets the SnapshotTimeOfDay field's value. +func (s *AddOn) SetSnapshotTimeOfDay(v string) *AddOn { + s.SnapshotTimeOfDay = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AddOn) SetStatus(v string) *AddOn { + s.Status = &v + return s +} + +// Describes a request to enable, modify, or disable an add-on for an Amazon +// Lightsail resource. +// +// An additional cost may be associated with enabling add-ons. For more information, +// see the Lightsail pricing page (https://aws.amazon.com/lightsail/pricing/). +type AddOnRequest struct { + _ struct{} `type:"structure"` + + // The add-on type. + // + // AddOnType is a required field + AddOnType *string `locationName:"addOnType" type:"string" required:"true" enum:"AddOnType"` + + // An object that represents additional parameters when enabling or modifying + // the automatic snapshot add-on. + AutoSnapshotAddOnRequest *AutoSnapshotAddOnRequest `locationName:"autoSnapshotAddOnRequest" type:"structure"` +} + +// String returns the string representation +func (s AddOnRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddOnRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddOnRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddOnRequest"} + if s.AddOnType == nil { + invalidParams.Add(request.NewErrParamRequired("AddOnType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddOnType sets the AddOnType field's value. +func (s *AddOnRequest) SetAddOnType(v string) *AddOnRequest { + s.AddOnType = &v + return s +} + +// SetAutoSnapshotAddOnRequest sets the AutoSnapshotAddOnRequest field's value. +func (s *AddOnRequest) SetAutoSnapshotAddOnRequest(v *AutoSnapshotAddOnRequest) *AddOnRequest { + s.AutoSnapshotAddOnRequest = v + return s +} + type AllocateStaticIpInput struct { _ struct{} `type:"structure"` @@ -11285,6 +11811,155 @@ func (s *AttachStaticIpOutput) SetOperations(v []*Operation) *AttachStaticIpOutp return s } +// Describes a block storage disk that is attached to an instance, and is included +// in an automatic snapshot. +type AttachedDisk struct { + _ struct{} `type:"structure"` + + // The path of the disk (e.g., /dev/xvdf). + Path *string `locationName:"path" type:"string"` + + // The size of the disk in GB. + SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` +} + +// String returns the string representation +func (s AttachedDisk) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachedDisk) GoString() string { + return s.String() +} + +// SetPath sets the Path field's value. +func (s *AttachedDisk) SetPath(v string) *AttachedDisk { + s.Path = &v + return s +} + +// SetSizeInGb sets the SizeInGb field's value. +func (s *AttachedDisk) SetSizeInGb(v int64) *AttachedDisk { + s.SizeInGb = &v + return s +} + +// Describes a request to enable or modify the automatic snapshot add-on for +// an Amazon Lightsail instance or disk. +// +// When you modify the automatic snapshot time for a resource, it is typically +// effective immediately except under the following conditions: +// +// * If an automatic snapshot has been created for the current day, and you +// change the snapshot time to a later time of day, then the new snapshot +// time will be effective the following day. This ensures that two snapshots +// are not created for the current day. +// +// * If an automatic snapshot has not yet been created for the current day, +// and you change the snapshot time to an earlier time of day, then the new +// snapshot time will be effective the following day and a snapshot is automatically +// created at the previously set time for the current day. This ensures that +// a snapshot is created for the current day. +// +// * If an automatic snapshot has not yet been created for the current day, +// and you change the snapshot time to a time that is within 30 minutes from +// your current time, then the new snapshot time will be effective the following +// day and a snapshot is automatically created at the previously set time +// for the current day. This ensures that a snapshot is created for the current +// day, because 30 minutes is required between your current time and the +// new snapshot time that you specify. +// +// * If an automatic snapshot is scheduled to be created within 30 minutes +// from your current time and you change the snapshot time, then the new +// snapshot time will be effective the following day and a snapshot is automatically +// created at the previously set time for the current day. This ensures that +// a snapshot is created for the current day, because 30 minutes is required +// between your current time and the new snapshot time that you specify. +type AutoSnapshotAddOnRequest struct { + _ struct{} `type:"structure"` + + // The daily time when an automatic snapshot will be created. + // + // Constraints: + // + // * Must be in HH:00 format, and in an hourly increment. + // + // * Specified in Coordinated Universal Time (UTC). + // + // * The snapshot will be automatically created between the time specified + // and up to 45 minutes after. + SnapshotTimeOfDay *string `locationName:"snapshotTimeOfDay" type:"string"` +} + +// String returns the string representation +func (s AutoSnapshotAddOnRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoSnapshotAddOnRequest) GoString() string { + return s.String() +} + +// SetSnapshotTimeOfDay sets the SnapshotTimeOfDay field's value. +func (s *AutoSnapshotAddOnRequest) SetSnapshotTimeOfDay(v string) *AutoSnapshotAddOnRequest { + s.SnapshotTimeOfDay = &v + return s +} + +// Describes an automatic snapshot. +type AutoSnapshotDetails struct { + _ struct{} `type:"structure"` + + // The timestamp when the automatic snapshot was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The date of the automatic snapshot in YYYY-MM-DD format. + Date *string `locationName:"date" type:"string"` + + // An array of objects that describe the block storage disks attached to the + // instance when the automatic snapshot was created. + FromAttachedDisks []*AttachedDisk `locationName:"fromAttachedDisks" type:"list"` + + // The status of the automatic snapshot. + Status *string `locationName:"status" type:"string" enum:"AutoSnapshotStatus"` +} + +// String returns the string representation +func (s AutoSnapshotDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoSnapshotDetails) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *AutoSnapshotDetails) SetCreatedAt(v time.Time) *AutoSnapshotDetails { + s.CreatedAt = &v + return s +} + +// SetDate sets the Date field's value. +func (s *AutoSnapshotDetails) SetDate(v string) *AutoSnapshotDetails { + s.Date = &v + return s +} + +// SetFromAttachedDisks sets the FromAttachedDisks field's value. +func (s *AutoSnapshotDetails) SetFromAttachedDisks(v []*AttachedDisk) *AutoSnapshotDetails { + s.FromAttachedDisks = v + return s +} + +// SetStatus sets the Status field's value. +func (s *AutoSnapshotDetails) SetStatus(v string) *AutoSnapshotDetails { + s.Status = &v + return s +} + // Describes an Availability Zone. type AvailabilityZone struct { _ struct{} `type:"structure"` @@ -11787,20 +12462,55 @@ func (s *CloudFormationStackRecordSourceInfo) SetResourceType(v string) *CloudFo type CopySnapshotInput struct { _ struct{} `type:"structure"` - // The AWS Region where the source snapshot is located. + // The date of the automatic snapshot to copy for the new manual snapshot. + // + // Use the get auto snapshots operation to identify the dates of the available + // automatic snapshots. + // + // Constraints: + // + // * Must be specified in YYYY-MM-DD format. + // + // * This parameter cannot be defined together with the use latest restorable + // auto snapshot parameter. The restore date and use latest restorable auto + // snapshot parameters are mutually exclusive. + // + // Define this parameter only when copying an automatic snapshot as a manual + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + RestoreDate *string `locationName:"restoreDate" type:"string"` + + // The AWS Region where the source manual or automatic snapshot is located. // // SourceRegion is a required field SourceRegion *string `locationName:"sourceRegion" type:"string" required:"true" enum:"RegionName"` + // The name of the source resource from which the automatic snapshot was created. + // + // Define this parameter only when copying an automatic snapshot as a manual + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + SourceResourceName *string `locationName:"sourceResourceName" type:"string"` + // The name of the source instance or disk snapshot to be copied. // - // SourceSnapshotName is a required field - SourceSnapshotName *string `locationName:"sourceSnapshotName" type:"string" required:"true"` + // Define this parameter only when copying a manual snapshot as another manual + // snapshot. + SourceSnapshotName *string `locationName:"sourceSnapshotName" type:"string"` // The name of the new instance or disk snapshot to be created as a copy. // // TargetSnapshotName is a required field TargetSnapshotName *string `locationName:"targetSnapshotName" type:"string" required:"true"` + + // A Boolean value to indicate whether to use the latest available automatic + // snapshot. + // + // This parameter cannot be defined together with the restore date parameter. + // The use latest restorable auto snapshot and restore date parameters are mutually + // exclusive. + // + // Define this parameter only when copying an automatic snapshot as a manual + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` } // String returns the string representation @@ -11819,9 +12529,6 @@ func (s *CopySnapshotInput) Validate() error { if s.SourceRegion == nil { invalidParams.Add(request.NewErrParamRequired("SourceRegion")) } - if s.SourceSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("SourceSnapshotName")) - } if s.TargetSnapshotName == nil { invalidParams.Add(request.NewErrParamRequired("TargetSnapshotName")) } @@ -11832,12 +12539,24 @@ func (s *CopySnapshotInput) Validate() error { return nil } +// SetRestoreDate sets the RestoreDate field's value. +func (s *CopySnapshotInput) SetRestoreDate(v string) *CopySnapshotInput { + s.RestoreDate = &v + return s +} + // SetSourceRegion sets the SourceRegion field's value. func (s *CopySnapshotInput) SetSourceRegion(v string) *CopySnapshotInput { s.SourceRegion = &v return s } +// SetSourceResourceName sets the SourceResourceName field's value. +func (s *CopySnapshotInput) SetSourceResourceName(v string) *CopySnapshotInput { + s.SourceResourceName = &v + return s +} + // SetSourceSnapshotName sets the SourceSnapshotName field's value. func (s *CopySnapshotInput) SetSourceSnapshotName(v string) *CopySnapshotInput { s.SourceSnapshotName = &v @@ -11850,6 +12569,12 @@ func (s *CopySnapshotInput) SetTargetSnapshotName(v string) *CopySnapshotInput { return s } +// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value. +func (s *CopySnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CopySnapshotInput { + s.UseLatestRestorableAutoSnapshot = &v + return s +} + type CopySnapshotOutput struct { _ struct{} `type:"structure"` @@ -11950,6 +12675,9 @@ func (s *CreateCloudFormationStackOutput) SetOperations(v []*Operation) *CreateC type CreateDiskFromSnapshotInput struct { _ struct{} `type:"structure"` + // An array of objects that represent the add-ons to enable for the new disk. + AddOns []*AddOnRequest `locationName:"addOns" type:"list"` + // The Availability Zone where you want to create the disk (e.g., us-east-2a). // Choose the same Availability Zone as the Lightsail instance where you want // to create the disk. @@ -11968,18 +12696,57 @@ type CreateDiskFromSnapshotInput struct { // The name of the disk snapshot (e.g., my-snapshot) from which to create the // new storage disk. // - // DiskSnapshotName is a required field - DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"` + // This parameter cannot be defined together with the source disk name parameter. + // The disk snapshot name and source disk name parameters are mutually exclusive. + DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string"` + + // The date of the automatic snapshot to use for the new disk. + // + // Use the get auto snapshots operation to identify the dates of the available + // automatic snapshots. + // + // Constraints: + // + // * Must be specified in YYYY-MM-DD format. + // + // * This parameter cannot be defined together with the use latest restorable + // auto snapshot parameter. The restore date and use latest restorable auto + // snapshot parameters are mutually exclusive. + // + // Define this parameter only when creating a new disk from an automatic snapshot. + // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + RestoreDate *string `locationName:"restoreDate" type:"string"` // The size of the disk in GB (e.g., 32). // // SizeInGb is a required field SizeInGb *int64 `locationName:"sizeInGb" type:"integer" required:"true"` + // The name of the source disk from which the source automatic snapshot was + // created. + // + // This parameter cannot be defined together with the disk snapshot name parameter. + // The source disk name and disk snapshot name parameters are mutually exclusive. + // + // Define this parameter only when creating a new disk from an automatic snapshot. + // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + SourceDiskName *string `locationName:"sourceDiskName" type:"string"` + // The tag keys and optional values to add to the resource during create. // // To tag a resource after it has been created, see the tag resource operation. Tags []*Tag `locationName:"tags" type:"list"` + + // A Boolean value to indicate whether to use the latest available automatic + // snapshot. + // + // This parameter cannot be defined together with the restore date parameter. + // The use latest restorable auto snapshot and restore date parameters are mutually + // exclusive. + // + // Define this parameter only when creating a new disk from an automatic snapshot. + // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` } // String returns the string representation @@ -12001,12 +12768,19 @@ func (s *CreateDiskFromSnapshotInput) Validate() error { if s.DiskName == nil { invalidParams.Add(request.NewErrParamRequired("DiskName")) } - if s.DiskSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName")) - } if s.SizeInGb == nil { invalidParams.Add(request.NewErrParamRequired("SizeInGb")) } + if s.AddOns != nil { + for i, v := range s.AddOns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12014,6 +12788,12 @@ func (s *CreateDiskFromSnapshotInput) Validate() error { return nil } +// SetAddOns sets the AddOns field's value. +func (s *CreateDiskFromSnapshotInput) SetAddOns(v []*AddOnRequest) *CreateDiskFromSnapshotInput { + s.AddOns = v + return s +} + // SetAvailabilityZone sets the AvailabilityZone field's value. func (s *CreateDiskFromSnapshotInput) SetAvailabilityZone(v string) *CreateDiskFromSnapshotInput { s.AvailabilityZone = &v @@ -12032,18 +12812,36 @@ func (s *CreateDiskFromSnapshotInput) SetDiskSnapshotName(v string) *CreateDiskF return s } +// SetRestoreDate sets the RestoreDate field's value. +func (s *CreateDiskFromSnapshotInput) SetRestoreDate(v string) *CreateDiskFromSnapshotInput { + s.RestoreDate = &v + return s +} + // SetSizeInGb sets the SizeInGb field's value. func (s *CreateDiskFromSnapshotInput) SetSizeInGb(v int64) *CreateDiskFromSnapshotInput { s.SizeInGb = &v return s } +// SetSourceDiskName sets the SourceDiskName field's value. +func (s *CreateDiskFromSnapshotInput) SetSourceDiskName(v string) *CreateDiskFromSnapshotInput { + s.SourceDiskName = &v + return s +} + // SetTags sets the Tags field's value. func (s *CreateDiskFromSnapshotInput) SetTags(v []*Tag) *CreateDiskFromSnapshotInput { s.Tags = v return s } +// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value. +func (s *CreateDiskFromSnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CreateDiskFromSnapshotInput { + s.UseLatestRestorableAutoSnapshot = &v + return s +} + type CreateDiskFromSnapshotOutput struct { _ struct{} `type:"structure"` @@ -12070,11 +12868,14 @@ func (s *CreateDiskFromSnapshotOutput) SetOperations(v []*Operation) *CreateDisk type CreateDiskInput struct { _ struct{} `type:"structure"` + // An array of objects that represent the add-ons to enable for the new disk. + AddOns []*AddOnRequest `locationName:"addOns" type:"list"` + // The Availability Zone where you want to create the disk (e.g., us-east-2a). - // Choose the same Availability Zone as the Lightsail instance where you want - // to create the disk. + // Use the same Availability Zone as the Lightsail instance to which you want + // to attach the disk. // - // Use the GetRegions operation to list the Availability Zones where Lightsail + // Use the get regions operation to list the Availability Zones where Lightsail // is currently available. // // AvailabilityZone is a required field @@ -12118,6 +12919,16 @@ func (s *CreateDiskInput) Validate() error { if s.SizeInGb == nil { invalidParams.Add(request.NewErrParamRequired("SizeInGb")) } + if s.AddOns != nil { + for i, v := range s.AddOns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12125,6 +12936,12 @@ func (s *CreateDiskInput) Validate() error { return nil } +// SetAddOns sets the AddOns field's value. +func (s *CreateDiskInput) SetAddOns(v []*AddOnRequest) *CreateDiskInput { + s.AddOns = v + return s +} + // SetAvailabilityZone sets the AvailabilityZone field's value. func (s *CreateDiskInput) SetAvailabilityZone(v string) *CreateDiskInput { s.AvailabilityZone = &v @@ -12515,6 +13332,9 @@ func (s *CreateInstanceSnapshotOutput) SetOperations(v []*Operation) *CreateInst type CreateInstancesFromSnapshotInput struct { _ struct{} `type:"structure"` + // An array of objects representing the add-ons to enable for the new instance. + AddOns []*AddOnRequest `locationName:"addOns" type:"list"` + // An object containing information about one or more disk mappings. AttachedDiskMapping map[string][]*DiskMap `locationName:"attachedDiskMapping" type:"map"` @@ -12542,17 +13362,58 @@ type CreateInstancesFromSnapshotInput struct { // Use the get instance snapshots operation to return information about your // existing snapshots. // - // InstanceSnapshotName is a required field - InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string" required:"true"` + // This parameter cannot be defined together with the source instance name parameter. + // The instance snapshot name and source instance name parameters are mutually + // exclusive. + InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string"` // The name for your key pair. KeyPairName *string `locationName:"keyPairName" type:"string"` + // The date of the automatic snapshot to use for the new instance. + // + // Use the get auto snapshots operation to identify the dates of the available + // automatic snapshots. + // + // Constraints: + // + // * Must be specified in YYYY-MM-DD format. + // + // * This parameter cannot be defined together with the use latest restorable + // auto snapshot parameter. The restore date and use latest restorable auto + // snapshot parameters are mutually exclusive. + // + // Define this parameter only when creating a new instance from an automatic + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + RestoreDate *string `locationName:"restoreDate" type:"string"` + + // The name of the source instance from which the source automatic snapshot + // was created. + // + // This parameter cannot be defined together with the instance snapshot name + // parameter. The source instance name and instance snapshot name parameters + // are mutually exclusive. + // + // Define this parameter only when creating a new instance from an automatic + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + SourceInstanceName *string `locationName:"sourceInstanceName" type:"string"` + // The tag keys and optional values to add to the resource during create. // // To tag a resource after it has been created, see the tag resource operation. Tags []*Tag `locationName:"tags" type:"list"` + // A Boolean value to indicate whether to use the latest available automatic + // snapshot. + // + // This parameter cannot be defined together with the restore date parameter. + // The use latest restorable auto snapshot and restore date parameters are mutually + // exclusive. + // + // Define this parameter only when creating a new instance from an automatic + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` + // You can create a launch script that configures a server with additional user // data. For example, apt-get -y update. // @@ -12585,8 +13446,15 @@ func (s *CreateInstancesFromSnapshotInput) Validate() error { if s.InstanceNames == nil { invalidParams.Add(request.NewErrParamRequired("InstanceNames")) } - if s.InstanceSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceSnapshotName")) + if s.AddOns != nil { + for i, v := range s.AddOns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -12595,6 +13463,12 @@ func (s *CreateInstancesFromSnapshotInput) Validate() error { return nil } +// SetAddOns sets the AddOns field's value. +func (s *CreateInstancesFromSnapshotInput) SetAddOns(v []*AddOnRequest) *CreateInstancesFromSnapshotInput { + s.AddOns = v + return s +} + // SetAttachedDiskMapping sets the AttachedDiskMapping field's value. func (s *CreateInstancesFromSnapshotInput) SetAttachedDiskMapping(v map[string][]*DiskMap) *CreateInstancesFromSnapshotInput { s.AttachedDiskMapping = v @@ -12631,12 +13505,30 @@ func (s *CreateInstancesFromSnapshotInput) SetKeyPairName(v string) *CreateInsta return s } +// SetRestoreDate sets the RestoreDate field's value. +func (s *CreateInstancesFromSnapshotInput) SetRestoreDate(v string) *CreateInstancesFromSnapshotInput { + s.RestoreDate = &v + return s +} + +// SetSourceInstanceName sets the SourceInstanceName field's value. +func (s *CreateInstancesFromSnapshotInput) SetSourceInstanceName(v string) *CreateInstancesFromSnapshotInput { + s.SourceInstanceName = &v + return s +} + // SetTags sets the Tags field's value. func (s *CreateInstancesFromSnapshotInput) SetTags(v []*Tag) *CreateInstancesFromSnapshotInput { s.Tags = v return s } +// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value. +func (s *CreateInstancesFromSnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CreateInstancesFromSnapshotInput { + s.UseLatestRestorableAutoSnapshot = &v + return s +} + // SetUserData sets the UserData field's value. func (s *CreateInstancesFromSnapshotInput) SetUserData(v string) *CreateInstancesFromSnapshotInput { s.UserData = &v @@ -12670,6 +13562,9 @@ func (s *CreateInstancesFromSnapshotOutput) SetOperations(v []*Operation) *Creat type CreateInstancesInput struct { _ struct{} `type:"structure"` + // An array of objects representing the add-ons to enable for the new instance. + AddOns []*AddOnRequest `locationName:"addOns" type:"list"` + // The Availability Zone in which to create your instance. Use the following // format: us-east-2a (case sensitive). You can get a list of Availability Zones // by using the get regions (http://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetRegions.html) @@ -12683,6 +13578,11 @@ type CreateInstancesInput struct { // Use the get blueprints operation to return a list of available images (or // blueprints). // + // Use active blueprints when creating new instances. Inactive blueprints are + // listed to support customers with existing instances and are not necessarily + // available to create new instances. Blueprints are marked inactive when they + // become outdated due to operating system updates or new application releases. + // // BlueprintId is a required field BlueprintId *string `locationName:"blueprintId" type:"string" required:"true"` @@ -12749,6 +13649,16 @@ func (s *CreateInstancesInput) Validate() error { if s.InstanceNames == nil { invalidParams.Add(request.NewErrParamRequired("InstanceNames")) } + if s.AddOns != nil { + for i, v := range s.AddOns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12756,6 +13666,12 @@ func (s *CreateInstancesInput) Validate() error { return nil } +// SetAddOns sets the AddOns field's value. +func (s *CreateInstancesInput) SetAddOns(v []*AddOnRequest) *CreateInstancesInput { + s.AddOns = v + return s +} + // SetAvailabilityZone sets the AvailabilityZone field's value. func (s *CreateInstancesInput) SetAvailabilityZone(v string) *CreateInstancesInput { s.AvailabilityZone = &v @@ -13230,12 +14146,11 @@ type CreateRelationalDatabaseFromSnapshotInput struct { // // * Cannot be specified if the use latest restorable time parameter is true. // - // * Specified in Universal Coordinated Time (UTC). + // * Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time format. - // - // For example, if you wish to use a restore time of October 1, 2018, at 8 PM - // UTC, then you input 1538424000 as the restore time. + // * Specified in the Unix time format. For example, if you wish to use a + // restore time of October 1, 2018, at 8 PM UTC, then you input 1538424000 + // as the restore time. RestoreTime *time.Time `locationName:"restoreTime" type:"timestamp"` // The name of the source database. @@ -13395,9 +14310,8 @@ type CreateRelationalDatabaseInput struct { // // * The first character must be a letter. // - // * Cannot be a reserved word for the database engine you choose. - // - // For more information about reserved words in MySQL 5.6 or 5.7, see the Keywords + // * Cannot be a reserved word for the database engine you choose. For more + // information about reserved words in MySQL 5.6 or 5.7, see the Keywords // and Reserved Words articles for MySQL 5.6 (https://dev.mysql.com/doc/refman/5.6/en/keywords.html) // or MySQL 5.7 (https://dev.mysql.com/doc/refman/5.7/en/keywords.html) respectively. // @@ -13414,11 +14328,9 @@ type CreateRelationalDatabaseInput struct { // // Constraints: // - // * Must be in the hh24:mi-hh24:mi format. - // - // Example: 16:00-16:30 + // * Must be in the hh24:mi-hh24:mi format. Example: 16:00-16:30 // - // * Specified in Universal Coordinated Time (UTC). + // * Specified in Coordinated Universal Time (UTC). // // * Must not conflict with the preferred maintenance window. // @@ -13439,7 +14351,7 @@ type CreateRelationalDatabaseInput struct { // // * Must be at least 30 minutes. // - // * Specified in Universal Coordinated Time (UTC). + // * Specified in Coordinated Universal Time (UTC). // // * Example: Tue:17:00-Tue:17:30 PreferredMaintenanceWindow *string `locationName:"preferredMaintenanceWindow" type:"string"` @@ -13687,17 +14599,95 @@ type CreateRelationalDatabaseSnapshotOutput struct { } // String returns the string representation -func (s CreateRelationalDatabaseSnapshotOutput) String() string { +func (s CreateRelationalDatabaseSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRelationalDatabaseSnapshotOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *CreateRelationalDatabaseSnapshotOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseSnapshotOutput { + s.Operations = v + return s +} + +type DeleteAutoSnapshotInput struct { + _ struct{} `type:"structure"` + + // The date of the automatic snapshot to delete in YYYY-MM-DD format. + // + // Use the get auto snapshots operation to get the available automatic snapshots + // for a resource. + // + // Date is a required field + Date *string `locationName:"date" type:"string" required:"true"` + + // The name of the source resource from which to delete the automatic snapshot. + // + // ResourceName is a required field + ResourceName *string `locationName:"resourceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAutoSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAutoSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAutoSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAutoSnapshotInput"} + if s.Date == nil { + invalidParams.Add(request.NewErrParamRequired("Date")) + } + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDate sets the Date field's value. +func (s *DeleteAutoSnapshotInput) SetDate(v string) *DeleteAutoSnapshotInput { + s.Date = &v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *DeleteAutoSnapshotInput) SetResourceName(v string) *DeleteAutoSnapshotInput { + s.ResourceName = &v + return s +} + +type DeleteAutoSnapshotOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of your request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s DeleteAutoSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRelationalDatabaseSnapshotOutput) GoString() string { +func (s DeleteAutoSnapshotOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateRelationalDatabaseSnapshotOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseSnapshotOutput { +func (s *DeleteAutoSnapshotOutput) SetOperations(v []*Operation) *DeleteAutoSnapshotOutput { s.Operations = v return s } @@ -13709,6 +14699,10 @@ type DeleteDiskInput struct { // // DiskName is a required field DiskName *string `locationName:"diskName" type:"string" required:"true"` + + // A Boolean value to indicate whether to delete the enabled add-ons for the + // disk. + ForceDeleteAddOns *bool `locationName:"forceDeleteAddOns" type:"boolean"` } // String returns the string representation @@ -13740,10 +14734,16 @@ func (s *DeleteDiskInput) SetDiskName(v string) *DeleteDiskInput { return s } +// SetForceDeleteAddOns sets the ForceDeleteAddOns field's value. +func (s *DeleteDiskInput) SetForceDeleteAddOns(v bool) *DeleteDiskInput { + s.ForceDeleteAddOns = &v + return s +} + type DeleteDiskOutput struct { _ struct{} `type:"structure"` - // An object describing the API operations. + // An array of objects that describe the result of your request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -13965,6 +14965,10 @@ func (s *DeleteDomainOutput) SetOperation(v *Operation) *DeleteDomainOutput { type DeleteInstanceInput struct { _ struct{} `type:"structure"` + // A Boolean value to indicate whether to delete the enabled add-ons for the + // disk. + ForceDeleteAddOns *bool `locationName:"forceDeleteAddOns" type:"boolean"` + // The name of the instance to delete. // // InstanceName is a required field @@ -13994,6 +14998,12 @@ func (s *DeleteInstanceInput) Validate() error { return nil } +// SetForceDeleteAddOns sets the ForceDeleteAddOns field's value. +func (s *DeleteInstanceInput) SetForceDeleteAddOns(v bool) *DeleteInstanceInput { + s.ForceDeleteAddOns = &v + return s +} + // SetInstanceName sets the InstanceName field's value. func (s *DeleteInstanceInput) SetInstanceName(v string) *DeleteInstanceInput { s.InstanceName = &v @@ -14749,10 +15759,88 @@ func (s *DetachStaticIpOutput) SetOperations(v []*Operation) *DetachStaticIpOutp return s } -// Describes a system disk or an block storage disk. +type DisableAddOnInput struct { + _ struct{} `type:"structure"` + + // The add-on type to disable. + // + // AddOnType is a required field + AddOnType *string `locationName:"addOnType" type:"string" required:"true" enum:"AddOnType"` + + // The name of the source resource from which to disable the add-on. + // + // ResourceName is a required field + ResourceName *string `locationName:"resourceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableAddOnInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAddOnInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableAddOnInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableAddOnInput"} + if s.AddOnType == nil { + invalidParams.Add(request.NewErrParamRequired("AddOnType")) + } + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddOnType sets the AddOnType field's value. +func (s *DisableAddOnInput) SetAddOnType(v string) *DisableAddOnInput { + s.AddOnType = &v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *DisableAddOnInput) SetResourceName(v string) *DisableAddOnInput { + s.ResourceName = &v + return s +} + +type DisableAddOnOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of your request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s DisableAddOnOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAddOnOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *DisableAddOnOutput) SetOperations(v []*Operation) *DisableAddOnOutput { + s.Operations = v + return s +} + +// Describes a system disk or a block storage disk. type Disk struct { _ struct{} `type:"structure"` + // An array of objects representing the add-ons enabled on the disk. + AddOns []*AddOn `locationName:"addOns" type:"list"` + // The Amazon Resource Name (ARN) of the disk. Arn *string `locationName:"arn" type:"string"` @@ -14827,6 +15915,12 @@ func (s Disk) GoString() string { return s.String() } +// SetAddOns sets the AddOns field's value. +func (s *Disk) SetAddOns(v []*AddOn) *Disk { + s.AddOns = v + return s +} + // SetArn sets the Arn field's value. func (s *Disk) SetArn(v string) *Disk { s.Arn = &v @@ -15033,6 +16127,10 @@ type DiskSnapshot struct { // snapshot was created. FromInstanceName *string `locationName:"fromInstanceName" type:"string"` + // A Boolean value indicating whether the snapshot was created from an automatic + // snapshot. + IsFromAutoSnapshot *bool `locationName:"isFromAutoSnapshot" type:"boolean"` + // The AWS Region and Availability Zone where the disk snapshot was created. Location *ResourceLocation `locationName:"location" type:"structure"` @@ -15107,6 +16205,12 @@ func (s *DiskSnapshot) SetFromInstanceName(v string) *DiskSnapshot { return s } +// SetIsFromAutoSnapshot sets the IsFromAutoSnapshot field's value. +func (s *DiskSnapshot) SetIsFromAutoSnapshot(v bool) *DiskSnapshot { + s.IsFromAutoSnapshot = &v + return s +} + // SetLocation sets the Location field's value. func (s *DiskSnapshot) SetLocation(v *ResourceLocation) *DiskSnapshot { s.Location = v @@ -15414,6 +16518,86 @@ func (s *DownloadDefaultKeyPairOutput) SetPublicKeyBase64(v string) *DownloadDef return s } +type EnableAddOnInput struct { + _ struct{} `type:"structure"` + + // An array of strings representing the add-on to enable or modify. + // + // AddOnRequest is a required field + AddOnRequest *AddOnRequest `locationName:"addOnRequest" type:"structure" required:"true"` + + // The name of the source resource for which to enable or modify the add-on. + // + // ResourceName is a required field + ResourceName *string `locationName:"resourceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableAddOnInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAddOnInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableAddOnInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableAddOnInput"} + if s.AddOnRequest == nil { + invalidParams.Add(request.NewErrParamRequired("AddOnRequest")) + } + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.AddOnRequest != nil { + if err := s.AddOnRequest.Validate(); err != nil { + invalidParams.AddNested("AddOnRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddOnRequest sets the AddOnRequest field's value. +func (s *EnableAddOnInput) SetAddOnRequest(v *AddOnRequest) *EnableAddOnInput { + s.AddOnRequest = v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *EnableAddOnInput) SetResourceName(v string) *EnableAddOnInput { + s.ResourceName = &v + return s +} + +type EnableAddOnOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of your request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s EnableAddOnOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAddOnOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *EnableAddOnOutput) SetOperations(v []*Operation) *EnableAddOnOutput { + s.Operations = v + return s +} + type ExportSnapshotInput struct { _ struct{} `type:"structure"` @@ -15706,6 +16890,86 @@ func (s *GetActiveNamesOutput) SetNextPageToken(v string) *GetActiveNamesOutput return s } +type GetAutoSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The name of the source resource from which to get automatic snapshot information. + // + // ResourceName is a required field + ResourceName *string `locationName:"resourceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAutoSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAutoSnapshotsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAutoSnapshotsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAutoSnapshotsInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceName sets the ResourceName field's value. +func (s *GetAutoSnapshotsInput) SetResourceName(v string) *GetAutoSnapshotsInput { + s.ResourceName = &v + return s +} + +type GetAutoSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the automatic snapshots that are available + // for the specified source resource.asdf + AutoSnapshots []*AutoSnapshotDetails `locationName:"autoSnapshots" type:"list"` + + // The name of the source resource for the automatic snapshots. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The resource type (e.g., Instance or Disk). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` +} + +// String returns the string representation +func (s GetAutoSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAutoSnapshotsOutput) GoString() string { + return s.String() +} + +// SetAutoSnapshots sets the AutoSnapshots field's value. +func (s *GetAutoSnapshotsOutput) SetAutoSnapshots(v []*AutoSnapshotDetails) *GetAutoSnapshotsOutput { + s.AutoSnapshots = v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *GetAutoSnapshotsOutput) SetResourceName(v string) *GetAutoSnapshotsOutput { + s.ResourceName = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *GetAutoSnapshotsOutput) SetResourceType(v string) *GetAutoSnapshotsOutput { + s.ResourceType = &v + return s +} + type GetBlueprintsInput struct { _ struct{} `type:"structure"` @@ -17074,82 +18338,62 @@ type GetLoadBalancerMetricDataInput struct { // // * ClientTLSNegotiationErrorCount - The number of TLS connections initiated // by the client that did not establish a session with the load balancer. - // Possible causes include a mismatch of ciphers or protocols. - // - // Statistics: The most useful statistic is Sum. + // Possible causes include a mismatch of ciphers or protocols. Statistics: + // The most useful statistic is Sum. // // * HealthyHostCount - The number of target instances that are considered - // healthy. - // - // Statistics: The most useful statistic are Average, Minimum, and Maximum. + // healthy. Statistics: The most useful statistic are Average, Minimum, and + // Maximum. // // * UnhealthyHostCount - The number of target instances that are considered - // unhealthy. - // - // Statistics: The most useful statistic are Average, Minimum, and Maximum. + // unhealthy. Statistics: The most useful statistic are Average, Minimum, + // and Maximum. // // * HTTPCode_LB_4XX_Count - The number of HTTP 4XX client error codes that // originate from the load balancer. Client errors are generated when requests // are malformed or incomplete. These requests have not been received by // the target instance. This count does not include any response codes generated - // by the target instances. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the target instances. Statistics: The most useful statistic is Sum. + // Note that Minimum, Maximum, and Average all return 1. // // * HTTPCode_LB_5XX_Count - The number of HTTP 5XX server error codes that // originate from the load balancer. This count does not include any response - // codes generated by the target instances. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. Note that Minimum, Maximum, and Average all - // return 1. + // codes generated by the target instances. Statistics: The most useful statistic + // is Sum. Note that Minimum, Maximum, and Average all return 1. Note that + // Minimum, Maximum, and Average all return 1. // // * HTTPCode_Instance_2XX_Count - The number of HTTP response codes generated // by the target instances. This does not include any response codes generated - // by the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. // // * HTTPCode_Instance_3XX_Count - The number of HTTP response codes generated // by the target instances. This does not include any response codes generated - // by the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. // // * HTTPCode_Instance_4XX_Count - The number of HTTP response codes generated // by the target instances. This does not include any response codes generated - // by the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. // // * HTTPCode_Instance_5XX_Count - The number of HTTP response codes generated // by the target instances. This does not include any response codes generated - // by the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. // // * InstanceResponseTime - The time elapsed, in seconds, after the request // leaves the load balancer until a response from the target instance is - // received. - // - // Statistics: The most useful statistic is Average. + // received. Statistics: The most useful statistic is Average. // // * RejectedConnectionCount - The number of connections that were rejected // because the load balancer had reached its maximum number of connections. - // - // Statistics: The most useful statistic is Sum. + // Statistics: The most useful statistic is Sum. // // * RequestCount - The number of requests processed over IPv4. This count // includes only the requests with a response generated by a target instance - // of the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // of the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. // // MetricName is a required field MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"LoadBalancerMetricName"` @@ -17291,82 +18535,62 @@ type GetLoadBalancerMetricDataOutput struct { // // * ClientTLSNegotiationErrorCount - The number of TLS connections initiated // by the client that did not establish a session with the load balancer. - // Possible causes include a mismatch of ciphers or protocols. - // - // Statistics: The most useful statistic is Sum. + // Possible causes include a mismatch of ciphers or protocols. Statistics: + // The most useful statistic is Sum. // // * HealthyHostCount - The number of target instances that are considered - // healthy. - // - // Statistics: The most useful statistic are Average, Minimum, and Maximum. + // healthy. Statistics: The most useful statistic are Average, Minimum, and + // Maximum. // // * UnhealthyHostCount - The number of target instances that are considered - // unhealthy. - // - // Statistics: The most useful statistic are Average, Minimum, and Maximum. + // unhealthy. Statistics: The most useful statistic are Average, Minimum, + // and Maximum. // // * HTTPCode_LB_4XX_Count - The number of HTTP 4XX client error codes that // originate from the load balancer. Client errors are generated when requests // are malformed or incomplete. These requests have not been received by // the target instance. This count does not include any response codes generated - // by the target instances. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the target instances. Statistics: The most useful statistic is Sum. + // Note that Minimum, Maximum, and Average all return 1. // // * HTTPCode_LB_5XX_Count - The number of HTTP 5XX server error codes that // originate from the load balancer. This count does not include any response - // codes generated by the target instances. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. Note that Minimum, Maximum, and Average all - // return 1. + // codes generated by the target instances. Statistics: The most useful statistic + // is Sum. Note that Minimum, Maximum, and Average all return 1. Note that + // Minimum, Maximum, and Average all return 1. // // * HTTPCode_Instance_2XX_Count - The number of HTTP response codes generated // by the target instances. This does not include any response codes generated - // by the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. // // * HTTPCode_Instance_3XX_Count - The number of HTTP response codes generated // by the target instances. This does not include any response codes generated - // by the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. // // * HTTPCode_Instance_4XX_Count - The number of HTTP response codes generated // by the target instances. This does not include any response codes generated - // by the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. // // * HTTPCode_Instance_5XX_Count - The number of HTTP response codes generated // by the target instances. This does not include any response codes generated - // by the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // by the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. // // * InstanceResponseTime - The time elapsed, in seconds, after the request // leaves the load balancer until a response from the target instance is - // received. - // - // Statistics: The most useful statistic is Average. + // received. Statistics: The most useful statistic is Average. // // * RejectedConnectionCount - The number of connections that were rejected // because the load balancer had reached its maximum number of connections. - // - // Statistics: The most useful statistic is Sum. + // Statistics: The most useful statistic is Sum. // // * RequestCount - The number of requests processed over IPv4. This count // includes only the requests with a response generated by a target instance - // of the load balancer. - // - // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, - // and Average all return 1. + // of the load balancer. Statistics: The most useful statistic is Sum. Note + // that Minimum, Maximum, and Average all return 1. MetricName *string `locationName:"metricName" type:"string" enum:"LoadBalancerMetricName"` } @@ -18063,12 +19287,11 @@ type GetRelationalDatabaseLogEventsInput struct { // // Constraints: // - // * Specified in Universal Coordinated Time (UTC). + // * Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time format. - // - // For example, if you wish to use an end time of October 1, 2018, at 8 PM UTC, - // then you input 1538424000 as the end time. + // * Specified in the Unix time format. For example, if you wish to use an + // end time of October 1, 2018, at 8 PM UTC, then you input 1538424000 as + // the end time. EndTime *time.Time `locationName:"endTime" type:"timestamp"` // The name of the log stream. @@ -18092,19 +19315,18 @@ type GetRelationalDatabaseLogEventsInput struct { // specified, the log event starts from the head of the log. If false is specified, // the log event starts from the tail of the log. // - // Default: false + // For PostgreSQL, the default value of false is the only option available. StartFromHead *bool `locationName:"startFromHead" type:"boolean"` // The start of the time interval from which to get log events. // // Constraints: // - // * Specified in Universal Coordinated Time (UTC). + // * Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time format. - // - // For example, if you wish to use a start time of October 1, 2018, at 8 PM - // UTC, then you input 1538424000 as the start time. + // * Specified in the Unix time format. For example, if you wish to use a + // start time of October 1, 2018, at 8 PM UTC, then you input 1538424000 + // as the start time. StartTime *time.Time `locationName:"startTime" type:"timestamp"` } @@ -18370,12 +19592,11 @@ type GetRelationalDatabaseMetricDataInput struct { // // Constraints: // - // * Specified in Universal Coordinated Time (UTC). - // - // * Specified in the Unix time format. + // * Specified in Coordinated Universal Time (UTC). // - // For example, if you wish to use an end time of October 1, 2018, at 8 PM UTC, - // then you input 1538424000 as the end time. + // * Specified in the Unix time format. For example, if you wish to use an + // end time of October 1, 2018, at 8 PM UTC, then you input 1538424000 as + // the end time. // // EndTime is a required field EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"` @@ -18399,12 +19620,11 @@ type GetRelationalDatabaseMetricDataInput struct { // // Constraints: // - // * Specified in Universal Coordinated Time (UTC). + // * Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time format. - // - // For example, if you wish to use a start time of October 1, 2018, at 8 PM - // UTC, then you input 1538424000 as the start time. + // * Specified in the Unix time format. For example, if you wish to use a + // start time of October 1, 2018, at 8 PM UTC, then you input 1538424000 + // as the start time. // // StartTime is a required field StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` @@ -18952,24 +20172,16 @@ type HostKeyAttributes struct { // The SHA-1 fingerprint of the returned SSH host key or RDP certificate. // - // * Example of an SHA-1 SSH fingerprint: - // - // SHA1:1CHH6FaAaXjtFOsR/t83vf91SR0 + // * Example of an SHA-1 SSH fingerprint: SHA1:1CHH6FaAaXjtFOsR/t83vf91SR0 // - // * Example of an SHA-1 RDP fingerprint: - // - // af:34:51:fe:09:f0:e0:da:b8:4e:56:ca:60:c2:10:ff:38:06:db:45 + // * Example of an SHA-1 RDP fingerprint: af:34:51:fe:09:f0:e0:da:b8:4e:56:ca:60:c2:10:ff:38:06:db:45 FingerprintSHA1 *string `locationName:"fingerprintSHA1" type:"string"` // The SHA-256 fingerprint of the returned SSH host key or RDP certificate. // - // * Example of an SHA-256 SSH fingerprint: - // - // SHA256:KTsMnRBh1IhD17HpdfsbzeGA4jOijm5tyXsMjKVbB8o + // * Example of an SHA-256 SSH fingerprint: SHA256:KTsMnRBh1IhD17HpdfsbzeGA4jOijm5tyXsMjKVbB8o // - // * Example of an SHA-256 RDP fingerprint: - // - // 03:9b:36:9f:4b:de:4e:61:70:fc:7c:c9:78:e7:d2:1a:1c:25:a8:0c:91:f6:7c:e4:d6:a0:85:c8:b4:53:99:68 + // * Example of an SHA-256 RDP fingerprint: 03:9b:36:9f:4b:de:4e:61:70:fc:7c:c9:78:e7:d2:1a:1c:25:a8:0c:91:f6:7c:e4:d6:a0:85:c8:b4:53:99:68 FingerprintSHA256 *string `locationName:"fingerprintSHA256" type:"string"` // The returned RDP certificate is not valid after this point in time. @@ -19120,6 +20332,9 @@ func (s *ImportKeyPairOutput) SetOperation(v *Operation) *ImportKeyPairOutput { type Instance struct { _ struct{} `type:"structure"` + // An array of objects representing the add-ons enabled on the instance. + AddOns []*AddOn `locationName:"addOns" type:"list"` + // The Amazon Resource Name (ARN) of the instance (e.g., arn:aws:lightsail:us-east-2:123456789101:Instance/244ad76f-8aad-4741-809f-12345EXAMPLE). Arn *string `locationName:"arn" type:"string"` @@ -19193,6 +20408,12 @@ func (s Instance) GoString() string { return s.String() } +// SetAddOns sets the AddOns field's value. +func (s *Instance) SetAddOns(v []*AddOn) *Instance { + s.AddOns = v + return s +} + // SetArn sets the Arn field's value. func (s *Instance) SetArn(v string) *Instance { s.Arn = &v @@ -19867,7 +21088,7 @@ func (s *InstancePortState) SetToPort(v int64) *InstancePortState { return s } -// Describes the snapshot of the virtual private server, or instance. +// Describes an instance snapshot. type InstanceSnapshot struct { _ struct{} `type:"structure"` @@ -19895,6 +21116,10 @@ type InstanceSnapshot struct { // The instance from which the snapshot was created. FromInstanceName *string `locationName:"fromInstanceName" type:"string"` + // A Boolean value indicating whether the snapshot was created from an automatic + // snapshot. + IsFromAutoSnapshot *bool `locationName:"isFromAutoSnapshot" type:"boolean"` + // The region name and Availability Zone where you created the snapshot. Location *ResourceLocation `locationName:"location" type:"structure"` @@ -19975,6 +21200,12 @@ func (s *InstanceSnapshot) SetFromInstanceName(v string) *InstanceSnapshot { return s } +// SetIsFromAutoSnapshot sets the IsFromAutoSnapshot field's value. +func (s *InstanceSnapshot) SetIsFromAutoSnapshot(v bool) *InstanceSnapshot { + s.IsFromAutoSnapshot = &v + return s +} + // SetLocation sets the Location field's value. func (s *InstanceSnapshot) SetLocation(v *ResourceLocation) *InstanceSnapshot { s.Location = v @@ -21066,7 +22297,7 @@ type Operation struct { // A Boolean value indicating whether the operation is terminal. IsTerminal *bool `locationName:"isTerminal" type:"boolean"` - // The region and Availability Zone. + // The AWS Region and Availability Zone. Location *ResourceLocation `locationName:"location" type:"structure"` // Details about the operation (e.g., Debian-1GB-Ohio-1). @@ -22951,6 +24182,10 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the resource to which you want to add a + // tag. + ResourceArn *string `locationName:"resourceArn" type:"string"` + // The name of the resource to which you are adding tags. // // ResourceName is a required field @@ -22988,6 +24223,12 @@ func (s *TagResourceInput) Validate() error { return nil } +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + // SetResourceName sets the ResourceName field's value. func (s *TagResourceInput) SetResourceName(v string) *TagResourceInput { s.ResourceName = &v @@ -23063,6 +24304,10 @@ func (s *UnpeerVpcOutput) SetOperation(v *Operation) *UnpeerVpcOutput { type UntagResourceInput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the resource from which you want to remove + // a tag. + ResourceArn *string `locationName:"resourceArn" type:"string"` + // The name of the resource from which you are removing a tag. // // ResourceName is a required field @@ -23100,6 +24345,12 @@ func (s *UntagResourceInput) Validate() error { return nil } +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + // SetResourceName sets the ResourceName field's value. func (s *UntagResourceInput) SetResourceName(v string) *UntagResourceInput { s.ResourceName = &v @@ -23338,11 +24589,9 @@ type UpdateRelationalDatabaseInput struct { // // Constraints: // - // * Must be in the hh24:mi-hh24:mi format. + // * Must be in the hh24:mi-hh24:mi format. Example: 16:00-16:30 // - // Example: 16:00-16:30 - // - // * Specified in Universal Coordinated Time (UTC). + // * Specified in Coordinated Universal Time (UTC). // // * Must not conflict with the preferred maintenance window. // @@ -23362,7 +24611,7 @@ type UpdateRelationalDatabaseInput struct { // // * Must be at least 30 minutes. // - // * Specified in Universal Coordinated Time (UTC). + // * Specified in Coordinated Universal Time (UTC). // // * Example: Tue:17:00-Tue:17:30 PreferredMaintenanceWindow *string `locationName:"preferredMaintenanceWindow" type:"string"` @@ -23570,6 +24819,25 @@ const ( AccessDirectionOutbound = "outbound" ) +const ( + // AddOnTypeAutoSnapshot is a AddOnType enum value + AddOnTypeAutoSnapshot = "AutoSnapshot" +) + +const ( + // AutoSnapshotStatusSuccess is a AutoSnapshotStatus enum value + AutoSnapshotStatusSuccess = "Success" + + // AutoSnapshotStatusFailed is a AutoSnapshotStatus enum value + AutoSnapshotStatusFailed = "Failed" + + // AutoSnapshotStatusInProgress is a AutoSnapshotStatus enum value + AutoSnapshotStatusInProgress = "InProgress" + + // AutoSnapshotStatusNotFound is a AutoSnapshotStatus enum value + AutoSnapshotStatusNotFound = "NotFound" +) + const ( // BlueprintTypeOs is a BlueprintType enum value BlueprintTypeOs = "os" @@ -24161,6 +25429,12 @@ const ( // OperationTypeStopRelationalDatabase is a OperationType enum value OperationTypeStopRelationalDatabase = "StopRelationalDatabase" + + // OperationTypeEnableAddOn is a OperationType enum value + OperationTypeEnableAddOn = "EnableAddOn" + + // OperationTypeDisableAddOn is a OperationType enum value + OperationTypeDisableAddOn = "DisableAddOn" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go index b9f97faa8a6..092633b75a2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go @@ -46,11 +46,11 @@ const ( // svc := lightsail.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lightsail { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Lightsail { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Lightsail { svc := &Lightsail{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-28", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/macie/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/macie/api.go index d09b97380f6..a3a890dcb22 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/macie/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/macie/api.go @@ -468,7 +468,7 @@ func (c *Macie) ListMemberAccountsWithContext(ctx aws.Context, input *ListMember // // Example iterating over at most 3 pages of a ListMemberAccounts operation. // pageNum := 0 // err := client.ListMemberAccountsPages(params, -// func(page *ListMemberAccountsOutput, lastPage bool) bool { +// func(page *macie.ListMemberAccountsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -500,10 +500,12 @@ func (c *Macie) ListMemberAccountsPagesWithContext(ctx aws.Context, input *ListM }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMemberAccountsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMemberAccountsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -614,7 +616,7 @@ func (c *Macie) ListS3ResourcesWithContext(ctx aws.Context, input *ListS3Resourc // // Example iterating over at most 3 pages of a ListS3Resources operation. // pageNum := 0 // err := client.ListS3ResourcesPages(params, -// func(page *ListS3ResourcesOutput, lastPage bool) bool { +// func(page *macie.ListS3ResourcesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -646,10 +648,12 @@ func (c *Macie) ListS3ResourcesPagesWithContext(ctx aws.Context, input *ListS3Re }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListS3ResourcesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListS3ResourcesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/macie/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/macie/service.go index 0b38598f0fe..9e5f20a2699 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/macie/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/macie/service.go @@ -46,11 +46,11 @@ const ( // svc := macie.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Macie { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Macie { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Macie { svc := &Macie{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-12-19", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/api.go index 396379b8739..bea6e734bed 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/api.go @@ -1153,7 +1153,7 @@ func (c *ManagedBlockchain) ListInvitationsWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListInvitations operation. // pageNum := 0 // err := client.ListInvitationsPages(params, -// func(page *ListInvitationsOutput, lastPage bool) bool { +// func(page *managedblockchain.ListInvitationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1185,10 +1185,12 @@ func (c *ManagedBlockchain) ListInvitationsPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInvitationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInvitationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1302,7 +1304,7 @@ func (c *ManagedBlockchain) ListMembersWithContext(ctx aws.Context, input *ListM // // Example iterating over at most 3 pages of a ListMembers operation. // pageNum := 0 // err := client.ListMembersPages(params, -// func(page *ListMembersOutput, lastPage bool) bool { +// func(page *managedblockchain.ListMembersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1334,10 +1336,12 @@ func (c *ManagedBlockchain) ListMembersPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMembersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMembersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1452,7 +1456,7 @@ func (c *ManagedBlockchain) ListNetworksWithContext(ctx aws.Context, input *List // // Example iterating over at most 3 pages of a ListNetworks operation. // pageNum := 0 // err := client.ListNetworksPages(params, -// func(page *ListNetworksOutput, lastPage bool) bool { +// func(page *managedblockchain.ListNetworksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1484,10 +1488,12 @@ func (c *ManagedBlockchain) ListNetworksPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListNetworksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListNetworksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1601,7 +1607,7 @@ func (c *ManagedBlockchain) ListNodesWithContext(ctx aws.Context, input *ListNod // // Example iterating over at most 3 pages of a ListNodes operation. // pageNum := 0 // err := client.ListNodesPages(params, -// func(page *ListNodesOutput, lastPage bool) bool { +// func(page *managedblockchain.ListNodesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1633,10 +1639,12 @@ func (c *ManagedBlockchain) ListNodesPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListNodesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListNodesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1751,7 +1759,7 @@ func (c *ManagedBlockchain) ListProposalVotesWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a ListProposalVotes operation. // pageNum := 0 // err := client.ListProposalVotesPages(params, -// func(page *ListProposalVotesOutput, lastPage bool) bool { +// func(page *managedblockchain.ListProposalVotesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1783,10 +1791,12 @@ func (c *ManagedBlockchain) ListProposalVotesPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListProposalVotesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListProposalVotesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1904,7 +1914,7 @@ func (c *ManagedBlockchain) ListProposalsWithContext(ctx aws.Context, input *Lis // // Example iterating over at most 3 pages of a ListProposals operation. // pageNum := 0 // err := client.ListProposalsPages(params, -// func(page *ListProposalsOutput, lastPage bool) bool { +// func(page *managedblockchain.ListProposalsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1936,10 +1946,12 @@ func (c *ManagedBlockchain) ListProposalsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListProposalsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListProposalsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4031,13 +4043,13 @@ type Member struct { // // * DELETING - The member and all associated resources are in the process // of being deleted. Either the AWS account that owns the member deleted - // it, or the member is being deleted as the result of an APPROVEDPROPOSAL + // it, or the member is being deleted as the result of an APPROVED PROPOSAL // to remove the member. // // * DELETED - The member can no longer participate on the network and all // associated resources are deleted. Either the AWS account that owns the // member deleted it, or the member is being deleted as the result of an - // APPROVEDPROPOSAL to remove the member. + // APPROVED PROPOSAL to remove the member. Status *string `type:"string" enum:"MemberStatus"` } @@ -4359,13 +4371,13 @@ type MemberSummary struct { // // * DELETING - The member and all associated resources are in the process // of being deleted. Either the AWS account that owns the member deleted - // it, or the member is being deleted as the result of an APPROVEDPROPOSAL + // it, or the member is being deleted as the result of an APPROVED PROPOSAL // to remove the member. // // * DELETED - The member can no longer participate on the network and all // associated resources are deleted. Either the AWS account that owns the // member deleted it, or the member is being deleted as the result of an - // APPROVEDPROPOSAL to remove the member. + // APPROVED PROPOSAL to remove the member. Status *string `type:"string" enum:"MemberStatus"` } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go index d3049e6c546..acd42867a67 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ManagedBlockchain { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "managedblockchain" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ManagedBlockchain { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ManagedBlockchain { svc := &ManagedBlockchain{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-09-24", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go index 5408085acc9..552798277ef 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go @@ -669,7 +669,7 @@ func (c *MediaConnect) ListEntitlementsWithContext(ctx aws.Context, input *ListE // // Example iterating over at most 3 pages of a ListEntitlements operation. // pageNum := 0 // err := client.ListEntitlementsPages(params, -// func(page *ListEntitlementsOutput, lastPage bool) bool { +// func(page *mediaconnect.ListEntitlementsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -701,10 +701,12 @@ func (c *MediaConnect) ListEntitlementsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEntitlementsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListEntitlementsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -822,7 +824,7 @@ func (c *MediaConnect) ListFlowsWithContext(ctx aws.Context, input *ListFlowsInp // // Example iterating over at most 3 pages of a ListFlows operation. // pageNum := 0 // err := client.ListFlowsPages(params, -// func(page *ListFlowsOutput, lastPage bool) bool { +// func(page *mediaconnect.ListFlowsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -854,10 +856,12 @@ func (c *MediaConnect) ListFlowsPagesWithContext(ctx aws.Context, input *ListFlo }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListFlowsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListFlowsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1990,14 +1994,17 @@ func (s *AddFlowOutputsOutput) SetOutputs(v []*Output) *AddFlowOutputsOutput { type AddOutputRequest struct { _ struct{} `type:"structure"` + // The range of IP addresses that should be allowed to initiate output requests + // to this flow. These IP addresses should be in the form of a Classless Inter-Domain + // Routing (CIDR) block; for example, 10.0.0.0/16. + CidrAllowList []*string `locationName:"cidrAllowList" type:"list"` + // A description of the output. This description appears only on the AWS Elemental // MediaConnect console and will not be seen by the end user. Description *string `locationName:"description" type:"string"` // The IP address from which video will be sent to output destinations. - // - // Destination is a required field - Destination *string `locationName:"destination" type:"string" required:"true"` + Destination *string `locationName:"destination" type:"string"` // The type of key used for the encryption. If no keyType is provided, the service // will use the default setting (static-key). @@ -2010,16 +2017,17 @@ type AddOutputRequest struct { Name *string `locationName:"name" type:"string"` // The port to use when content is distributed to this output. - // - // Port is a required field - Port *int64 `locationName:"port" type:"integer" required:"true"` + Port *int64 `locationName:"port" type:"integer"` // The protocol to use for the output. // // Protocol is a required field Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"Protocol"` - // The smoothing latency in milliseconds for RTP and RTP-FEC streams. + // The remote ID for the Zixi-pull output stream. + RemoteId *string `locationName:"remoteId" type:"string"` + + // The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams. SmoothingLatency *int64 `locationName:"smoothingLatency" type:"integer"` // The stream ID that you want to use for this transport. This parameter applies @@ -2040,12 +2048,6 @@ func (s AddOutputRequest) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *AddOutputRequest) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AddOutputRequest"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) - } if s.Protocol == nil { invalidParams.Add(request.NewErrParamRequired("Protocol")) } @@ -2061,6 +2063,12 @@ func (s *AddOutputRequest) Validate() error { return nil } +// SetCidrAllowList sets the CidrAllowList field's value. +func (s *AddOutputRequest) SetCidrAllowList(v []*string) *AddOutputRequest { + s.CidrAllowList = v + return s +} + // SetDescription sets the Description field's value. func (s *AddOutputRequest) SetDescription(v string) *AddOutputRequest { s.Description = &v @@ -2103,6 +2111,12 @@ func (s *AddOutputRequest) SetProtocol(v string) *AddOutputRequest { return s } +// SetRemoteId sets the RemoteId field's value. +func (s *AddOutputRequest) SetRemoteId(v string) *AddOutputRequest { + s.RemoteId = &v + return s +} + // SetSmoothingLatency sets the SmoothingLatency field's value. func (s *AddOutputRequest) SetSmoothingLatency(v int64) *AddOutputRequest { s.SmoothingLatency = &v @@ -2400,21 +2414,45 @@ type Encryption struct { // Algorithm is a required field Algorithm *string `locationName:"algorithm" type:"string" required:"true" enum:"Algorithm"` + // A 128-bit, 16-byte hex value represented by a 32-character string, to be + // used with the key for encrypting content. This parameter is not valid for + // static key encryption. + ConstantInitializationVector *string `locationName:"constantInitializationVector" type:"string"` + + // The value of one of the devices that you configured with your digital rights + // management (DRM) platform key provider. This parameter is required for SPEKE + // encryption and is not valid for static key encryption. + DeviceId *string `locationName:"deviceId" type:"string"` + // The type of key that is used for the encryption. If no keyType is provided, // the service will use the default setting (static-key). KeyType *string `locationName:"keyType" type:"string" enum:"KeyType"` + // The AWS Region that the API Gateway proxy endpoint was created in. This parameter + // is required for SPEKE encryption and is not valid for static key encryption. + Region *string `locationName:"region" type:"string"` + + // An identifier for the content. The service sends this value to the key server + // to identify the current endpoint. The resource ID is also known as the content + // ID. This parameter is required for SPEKE encryption and is not valid for + // static key encryption. + ResourceId *string `locationName:"resourceId" type:"string"` + // The ARN of the role that you created during setup (when you set up AWS Elemental // MediaConnect as a trusted entity). // // RoleArn is a required field RoleArn *string `locationName:"roleArn" type:"string" required:"true"` - // The ARN that was assigned to the secret that you created in AWS Secrets Manager - // to store the encryption key. - // - // SecretArn is a required field - SecretArn *string `locationName:"secretArn" type:"string" required:"true"` + // The ARN of the secret that you created in AWS Secrets Manager to store the + // encryption key. This parameter is required for static key encryption and + // is not valid for SPEKE encryption. + SecretArn *string `locationName:"secretArn" type:"string"` + + // The URL from the API Gateway proxy that you set up to talk to your key server. + // This parameter is required for SPEKE encryption and is not valid for static + // key encryption. + Url *string `locationName:"url" type:"string"` } // String returns the string representation @@ -2436,9 +2474,6 @@ func (s *Encryption) Validate() error { if s.RoleArn == nil { invalidParams.Add(request.NewErrParamRequired("RoleArn")) } - if s.SecretArn == nil { - invalidParams.Add(request.NewErrParamRequired("SecretArn")) - } if invalidParams.Len() > 0 { return invalidParams @@ -2452,12 +2487,36 @@ func (s *Encryption) SetAlgorithm(v string) *Encryption { return s } +// SetConstantInitializationVector sets the ConstantInitializationVector field's value. +func (s *Encryption) SetConstantInitializationVector(v string) *Encryption { + s.ConstantInitializationVector = &v + return s +} + +// SetDeviceId sets the DeviceId field's value. +func (s *Encryption) SetDeviceId(v string) *Encryption { + s.DeviceId = &v + return s +} + // SetKeyType sets the KeyType field's value. func (s *Encryption) SetKeyType(v string) *Encryption { s.KeyType = &v return s } +// SetRegion sets the Region field's value. +func (s *Encryption) SetRegion(v string) *Encryption { + s.Region = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *Encryption) SetResourceId(v string) *Encryption { + s.ResourceId = &v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *Encryption) SetRoleArn(v string) *Encryption { s.RoleArn = &v @@ -2470,10 +2529,19 @@ func (s *Encryption) SetSecretArn(v string) *Encryption { return s } +// SetUrl sets the Url field's value. +func (s *Encryption) SetUrl(v string) *Encryption { + s.Url = &v + return s +} + // The settings for a flow entitlement. type Entitlement struct { _ struct{} `type:"structure"` + // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. + DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` + // A description of the entitlement. Description *string `locationName:"description" type:"string"` @@ -2509,6 +2577,12 @@ func (s Entitlement) GoString() string { return s.String() } +// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. +func (s *Entitlement) SetDataTransferSubscriberFeePercent(v int64) *Entitlement { + s.DataTransferSubscriberFeePercent = &v + return s +} + // SetDescription sets the Description field's value. func (s *Entitlement) SetDescription(v string) *Entitlement { s.Description = &v @@ -2656,6 +2730,9 @@ func (s *Flow) SetStatus(v string) *Flow { type GrantEntitlementRequest struct { _ struct{} `type:"structure"` + // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. + DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` + // A description of the entitlement. This description appears only on the AWS // Elemental MediaConnect console and will not be seen by the subscriber or // end user. @@ -2705,6 +2782,12 @@ func (s *GrantEntitlementRequest) Validate() error { return nil } +// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. +func (s *GrantEntitlementRequest) SetDataTransferSubscriberFeePercent(v int64) *GrantEntitlementRequest { + s.DataTransferSubscriberFeePercent = &v + return s +} + // SetDescription sets the Description field's value. func (s *GrantEntitlementRequest) SetDescription(v string) *GrantEntitlementRequest { s.Description = &v @@ -3057,6 +3140,9 @@ func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForRe type ListedEntitlement struct { _ struct{} `type:"structure"` + // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. + DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` + // The ARN of the entitlement. // // EntitlementArn is a required field @@ -3078,6 +3164,12 @@ func (s ListedEntitlement) GoString() string { return s.String() } +// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. +func (s *ListedEntitlement) SetDataTransferSubscriberFeePercent(v int64) *ListedEntitlement { + s.DataTransferSubscriberFeePercent = &v + return s +} + // SetEntitlementArn sets the EntitlementArn field's value. func (s *ListedEntitlement) SetEntitlementArn(v string) *ListedEntitlement { s.EntitlementArn = &v @@ -3205,6 +3297,9 @@ func (s *Messages) SetErrors(v []*string) *Messages { type Output struct { _ struct{} `type:"structure"` + // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. + DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` + // A description of the output. Description *string `locationName:"description" type:"string"` @@ -3250,6 +3345,12 @@ func (s Output) GoString() string { return s.String() } +// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. +func (s *Output) SetDataTransferSubscriberFeePercent(v int64) *Output { + s.DataTransferSubscriberFeePercent = &v + return s +} + // SetDescription sets the Description field's value. func (s *Output) SetDescription(v string) *Output { s.Description = &v @@ -3500,10 +3601,11 @@ type SetSourceRequest struct { // The port that the flow will be listening on for incoming content. IngestPort *int64 `locationName:"ingestPort" type:"integer"` - // The smoothing max bitrate for RTP and RTP-FEC streams. + // The smoothing max bitrate for RIST, RTP, and RTP-FEC streams. MaxBitrate *int64 `locationName:"maxBitrate" type:"integer"` - // The maximum latency in milliseconds for Zixi-based streams. + // The maximum latency in milliseconds. This parameter applies only to RIST-based + // and Zixi-based streams. MaxLatency *int64 `locationName:"maxLatency" type:"integer"` // The name of the source. @@ -3517,7 +3619,7 @@ type SetSourceRequest struct { StreamId *string `locationName:"streamId" type:"string"` // The range of IP addresses that should be allowed to contribute content to - // your source. These IP addresses should in the form of a Classless Inter-Domain + // your source. These IP addresses should be in the form of a Classless Inter-Domain // Routing (CIDR) block; for example, 10.0.0.0/16. WhitelistCidr *string `locationName:"whitelistCidr" type:"string"` } @@ -3611,6 +3713,9 @@ func (s *SetSourceRequest) SetWhitelistCidr(v string) *SetSourceRequest { type Source struct { _ struct{} `type:"structure"` + // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. + DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` + // The type of encryption that is used on the content ingested from this source. Decryption *Encryption `locationName:"decryption" type:"structure"` @@ -3643,7 +3748,7 @@ type Source struct { Transport *Transport `locationName:"transport" type:"structure"` // The range of IP addresses that should be allowed to contribute content to - // your source. These IP addresses should in the form of a Classless Inter-Domain + // your source. These IP addresses should be in the form of a Classless Inter-Domain // Routing (CIDR) block; for example, 10.0.0.0/16. WhitelistCidr *string `locationName:"whitelistCidr" type:"string"` } @@ -3658,6 +3763,12 @@ func (s Source) GoString() string { return s.String() } +// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. +func (s *Source) SetDataTransferSubscriberFeePercent(v int64) *Source { + s.DataTransferSubscriberFeePercent = &v + return s +} + // SetDecryption sets the Decryption field's value. func (s *Source) SetDecryption(v *Encryption) *Source { s.Decryption = v @@ -3930,10 +4041,16 @@ func (s TagResourceOutput) GoString() string { type Transport struct { _ struct{} `type:"structure"` - // The smoothing max bitrate for RTP and RTP-FEC streams. + // The range of IP addresses that should be allowed to initiate output requests + // to this flow. These IP addresses should be in the form of a Classless Inter-Domain + // Routing (CIDR) block; for example, 10.0.0.0/16. + CidrAllowList []*string `locationName:"cidrAllowList" type:"list"` + + // The smoothing max bitrate for RIST, RTP, and RTP-FEC streams. MaxBitrate *int64 `locationName:"maxBitrate" type:"integer"` - // The maximum latency in milliseconds for Zixi-based streams. + // The maximum latency in milliseconds. This parameter applies only to RIST-based + // and Zixi-based streams. MaxLatency *int64 `locationName:"maxLatency" type:"integer"` // The protocol that is used by the source or output. @@ -3941,7 +4058,10 @@ type Transport struct { // Protocol is a required field Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"Protocol"` - // The smoothing latency in milliseconds for RTP and RTP-FEC streams. + // The remote ID for the Zixi-pull stream. + RemoteId *string `locationName:"remoteId" type:"string"` + + // The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams. SmoothingLatency *int64 `locationName:"smoothingLatency" type:"integer"` // The stream ID that you want to use for this transport. This parameter applies @@ -3959,6 +4079,12 @@ func (s Transport) GoString() string { return s.String() } +// SetCidrAllowList sets the CidrAllowList field's value. +func (s *Transport) SetCidrAllowList(v []*string) *Transport { + s.CidrAllowList = v + return s +} + // SetMaxBitrate sets the MaxBitrate field's value. func (s *Transport) SetMaxBitrate(v int64) *Transport { s.MaxBitrate = &v @@ -3977,6 +4103,12 @@ func (s *Transport) SetProtocol(v string) *Transport { return s } +// SetRemoteId sets the RemoteId field's value. +func (s *Transport) SetRemoteId(v string) *Transport { + s.RemoteId = &v + return s +} + // SetSmoothingLatency sets the SmoothingLatency field's value. func (s *Transport) SetSmoothingLatency(v int64) *Transport { s.SmoothingLatency = &v @@ -4062,17 +4194,43 @@ type UpdateEncryption struct { // or aes256). Algorithm *string `locationName:"algorithm" type:"string" enum:"Algorithm"` + // A 128-bit, 16-byte hex value represented by a 32-character string, to be + // used with the key for encrypting content. This parameter is not valid for + // static key encryption. + ConstantInitializationVector *string `locationName:"constantInitializationVector" type:"string"` + + // The value of one of the devices that you configured with your digital rights + // management (DRM) platform key provider. This parameter is required for SPEKE + // encryption and is not valid for static key encryption. + DeviceId *string `locationName:"deviceId" type:"string"` + // The type of key that is used for the encryption. If no keyType is provided, // the service will use the default setting (static-key). KeyType *string `locationName:"keyType" type:"string" enum:"KeyType"` + // The AWS Region that the API Gateway proxy endpoint was created in. This parameter + // is required for SPEKE encryption and is not valid for static key encryption. + Region *string `locationName:"region" type:"string"` + + // An identifier for the content. The service sends this value to the key server + // to identify the current endpoint. The resource ID is also known as the content + // ID. This parameter is required for SPEKE encryption and is not valid for + // static key encryption. + ResourceId *string `locationName:"resourceId" type:"string"` + // The ARN of the role that you created during setup (when you set up AWS Elemental // MediaConnect as a trusted entity). RoleArn *string `locationName:"roleArn" type:"string"` - // The ARN that was assigned to the secret that you created in AWS Secrets Manager - // to store the encryption key. + // The ARN of the secret that you created in AWS Secrets Manager to store the + // encryption key. This parameter is required for static key encryption and + // is not valid for SPEKE encryption. SecretArn *string `locationName:"secretArn" type:"string"` + + // The URL from the API Gateway proxy that you set up to talk to your key server. + // This parameter is required for SPEKE encryption and is not valid for static + // key encryption. + Url *string `locationName:"url" type:"string"` } // String returns the string representation @@ -4091,12 +4249,36 @@ func (s *UpdateEncryption) SetAlgorithm(v string) *UpdateEncryption { return s } +// SetConstantInitializationVector sets the ConstantInitializationVector field's value. +func (s *UpdateEncryption) SetConstantInitializationVector(v string) *UpdateEncryption { + s.ConstantInitializationVector = &v + return s +} + +// SetDeviceId sets the DeviceId field's value. +func (s *UpdateEncryption) SetDeviceId(v string) *UpdateEncryption { + s.DeviceId = &v + return s +} + // SetKeyType sets the KeyType field's value. func (s *UpdateEncryption) SetKeyType(v string) *UpdateEncryption { s.KeyType = &v return s } +// SetRegion sets the Region field's value. +func (s *UpdateEncryption) SetRegion(v string) *UpdateEncryption { + s.Region = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *UpdateEncryption) SetResourceId(v string) *UpdateEncryption { + s.ResourceId = &v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *UpdateEncryption) SetRoleArn(v string) *UpdateEncryption { s.RoleArn = &v @@ -4109,6 +4291,12 @@ func (s *UpdateEncryption) SetSecretArn(v string) *UpdateEncryption { return s } +// SetUrl sets the Url field's value. +func (s *UpdateEncryption) SetUrl(v string) *UpdateEncryption { + s.Url = &v + return s +} + // The updates that you want to make to a specific entitlement. type UpdateFlowEntitlementInput struct { _ struct{} `type:"structure"` @@ -4234,6 +4422,11 @@ func (s *UpdateFlowEntitlementOutput) SetFlowArn(v string) *UpdateFlowEntitlemen type UpdateFlowOutputInput struct { _ struct{} `type:"structure"` + // The range of IP addresses that should be allowed to initiate output requests + // to this flow. These IP addresses should be in the form of a Classless Inter-Domain + // Routing (CIDR) block; for example, 10.0.0.0/16. + CidrAllowList []*string `locationName:"cidrAllowList" type:"list"` + // A description of the output. This description appears only on the AWS Elemental // MediaConnect console and will not be seen by the end user. Description *string `locationName:"description" type:"string"` @@ -4260,7 +4453,10 @@ type UpdateFlowOutputInput struct { // The protocol to use for the output. Protocol *string `locationName:"protocol" type:"string" enum:"Protocol"` - // The smoothing latency in milliseconds for RTP and RTP-FEC streams. + // The remote ID for the Zixi-pull stream. + RemoteId *string `locationName:"remoteId" type:"string"` + + // The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams. SmoothingLatency *int64 `locationName:"smoothingLatency" type:"integer"` // The stream ID that you want to use for this transport. This parameter applies @@ -4300,6 +4496,12 @@ func (s *UpdateFlowOutputInput) Validate() error { return nil } +// SetCidrAllowList sets the CidrAllowList field's value. +func (s *UpdateFlowOutputInput) SetCidrAllowList(v []*string) *UpdateFlowOutputInput { + s.CidrAllowList = v + return s +} + // SetDescription sets the Description field's value. func (s *UpdateFlowOutputInput) SetDescription(v string) *UpdateFlowOutputInput { s.Description = &v @@ -4348,6 +4550,12 @@ func (s *UpdateFlowOutputInput) SetProtocol(v string) *UpdateFlowOutputInput { return s } +// SetRemoteId sets the RemoteId field's value. +func (s *UpdateFlowOutputInput) SetRemoteId(v string) *UpdateFlowOutputInput { + s.RemoteId = &v + return s +} + // SetSmoothingLatency sets the SmoothingLatency field's value. func (s *UpdateFlowOutputInput) SetSmoothingLatency(v int64) *UpdateFlowOutputInput { s.SmoothingLatency = &v @@ -4416,10 +4624,11 @@ type UpdateFlowSourceInput struct { // The port that the flow will be listening on for incoming content. IngestPort *int64 `locationName:"ingestPort" type:"integer"` - // The smoothing max bitrate for RTP and RTP-FEC streams. + // The smoothing max bitrate for RIST, RTP, and RTP-FEC streams. MaxBitrate *int64 `locationName:"maxBitrate" type:"integer"` - // The maximum latency in milliseconds for Zixi-based streams. + // The maximum latency in milliseconds. This parameter applies only to RIST-based + // and Zixi-based streams. MaxLatency *int64 `locationName:"maxLatency" type:"integer"` // The protocol that is used by the source. @@ -4433,7 +4642,7 @@ type UpdateFlowSourceInput struct { StreamId *string `locationName:"streamId" type:"string"` // The range of IP addresses that should be allowed to contribute content to - // your source. These IP addresses should in the form of a Classless Inter-Domain + // your source. These IP addresses should be in the form of a Classless Inter-Domain // Routing (CIDR) block; for example, 10.0.0.0/16. WhitelistCidr *string `locationName:"whitelistCidr" type:"string"` } @@ -4582,6 +4791,9 @@ const ( ) const ( + // KeyTypeSpeke is a KeyType enum value + KeyTypeSpeke = "speke" + // KeyTypeStaticKey is a KeyType enum value KeyTypeStaticKey = "static-key" ) @@ -4595,6 +4807,12 @@ const ( // ProtocolRtp is a Protocol enum value ProtocolRtp = "rtp" + + // ProtocolZixiPull is a Protocol enum value + ProtocolZixiPull = "zixi-pull" + + // ProtocolRist is a Protocol enum value + ProtocolRist = "rist" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go index 218d8e1905a..72aacea0b1c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaConnect { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediaconnect" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaConnect { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaConnect { svc := &MediaConnect{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-14", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go index 531482789ad..ab8eadf8ac2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go @@ -922,7 +922,7 @@ func (c *MediaConvert) DescribeEndpointsWithContext(ctx aws.Context, input *Desc // // Example iterating over at most 3 pages of a DescribeEndpoints operation. // pageNum := 0 // err := client.DescribeEndpointsPages(params, -// func(page *DescribeEndpointsOutput, lastPage bool) bool { +// func(page *mediaconvert.DescribeEndpointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -954,10 +954,12 @@ func (c *MediaConvert) DescribeEndpointsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEndpointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEndpointsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1510,7 +1512,7 @@ func (c *MediaConvert) ListJobTemplatesWithContext(ctx aws.Context, input *ListJ // // Example iterating over at most 3 pages of a ListJobTemplates operation. // pageNum := 0 // err := client.ListJobTemplatesPages(params, -// func(page *ListJobTemplatesOutput, lastPage bool) bool { +// func(page *mediaconvert.ListJobTemplatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1542,10 +1544,12 @@ func (c *MediaConvert) ListJobTemplatesPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListJobTemplatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListJobTemplatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1657,7 +1661,7 @@ func (c *MediaConvert) ListJobsWithContext(ctx aws.Context, input *ListJobsInput // // Example iterating over at most 3 pages of a ListJobs operation. // pageNum := 0 // err := client.ListJobsPages(params, -// func(page *ListJobsOutput, lastPage bool) bool { +// func(page *mediaconvert.ListJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1689,10 +1693,12 @@ func (c *MediaConvert) ListJobsPagesWithContext(ctx aws.Context, input *ListJobs }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1803,7 +1809,7 @@ func (c *MediaConvert) ListPresetsWithContext(ctx aws.Context, input *ListPreset // // Example iterating over at most 3 pages of a ListPresets operation. // pageNum := 0 // err := client.ListPresetsPages(params, -// func(page *ListPresetsOutput, lastPage bool) bool { +// func(page *mediaconvert.ListPresetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1835,10 +1841,12 @@ func (c *MediaConvert) ListPresetsPagesWithContext(ctx aws.Context, input *ListP }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPresetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPresetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1949,7 +1957,7 @@ func (c *MediaConvert) ListQueuesWithContext(ctx aws.Context, input *ListQueuesI // // Example iterating over at most 3 pages of a ListQueues operation. // pageNum := 0 // err := client.ListQueuesPages(params, -// func(page *ListQueuesOutput, lastPage bool) bool { +// func(page *mediaconvert.ListQueuesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1981,10 +1989,12 @@ func (c *MediaConvert) ListQueuesPagesWithContext(ctx aws.Context, input *ListQu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListQueuesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListQueuesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2541,13 +2551,14 @@ type AacSettings struct { // and FollowInputAudioType. AudioDescriptionBroadcasterMix *string `locationName:"audioDescriptionBroadcasterMix" type:"string" enum:"AacAudioDescriptionBroadcasterMix"` - // Average bitrate in bits/second. The set of valid values for this setting - // is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, - // 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, - // 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, - // 1024000. The value you set is also constrained by the values you choose for - // Profile (codecProfile), Bitrate control mode (codingMode), and Sample rate - // (sampleRate). Default values depend on Bitrate control mode and Profile. + // Specify the average bitrate in bits per second. The set of valid values for + // this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, + // 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, + // 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, + // 768000, 896000, 1024000. The value you set is also constrained by the values + // that you choose for Profile (codecProfile), Bitrate control mode (codingMode), + // and Sample rate (sampleRate). Default values depend on Bitrate control mode + // and Profile. Bitrate *int64 `locationName:"bitrate" min:"6000" type:"integer"` // AAC Profile. @@ -2663,11 +2674,13 @@ func (s *AacSettings) SetVbrQuality(v string) *AacSettings { type Ac3Settings struct { _ struct{} `type:"structure"` - // Average bitrate in bits/second. Valid bitrates depend on the coding mode. + // Specify the average bitrate in bits per second. Valid bitrates depend on + // the coding mode. Bitrate *int64 `locationName:"bitrate" min:"64000" type:"integer"` - // Specifies the "Bitstream Mode" (bsmod) for the emitted AC-3 stream. See ATSC - // A/52-2012 for background on these values. + // Specify the bitstream mode for the AC-3 stream that the encoder emits. For + // more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex + // E). BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Ac3BitstreamMode"` // Dolby Digital coding mode. Determines number of channels. @@ -2690,7 +2703,7 @@ type Ac3Settings struct { // from one of these streams, then the static metadata settings will be used. MetadataControl *string `locationName:"metadataControl" type:"string" enum:"Ac3MetadataControl"` - // Sample rate in hz. Sample rate is always 48000. + // This value is always 48000. It represents the sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` } @@ -2778,7 +2791,8 @@ func (s *Ac3Settings) SetSampleRate(v int64) *Ac3Settings { type AccelerationSettings struct { _ struct{} `type:"structure"` - // Acceleration configuration for the job. + // Specify the conditions when the service will run your job with accelerated + // transcoding. // // Mode is a required field Mode *string `locationName:"mode" type:"string" required:"true" enum:"AccelerationMode"` @@ -2822,9 +2836,8 @@ type AiffSettings struct { // quality for this audio track. BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` - // Set Channels to specify the number of channels in this output audio track. - // Choosing Mono in the console will give you 1 output channel; choosing Stereo - // will give you 2. In the API, valid values are 1 and 2. + // Specify the number of channels in this output audio track. Valid values are + // 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Sample rate in hz. @@ -2882,9 +2895,21 @@ func (s *AiffSettings) SetSampleRate(v int64) *AiffSettings { type AncillarySourceSettings struct { _ struct{} `type:"structure"` + // Specify whether this set of input captions appears in your outputs in both + // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes + // the captions data in two ways: it passes the 608 data through using the 608 + // compatibility bytes fields of the 708 wrapper, and it also translates the + // 608 data into 708. + Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"AncillaryConvert608To708"` + // Specifies the 608 channel number in the ancillary data track from which to // extract captions. Unused for passthrough. SourceAncillaryChannelNumber *int64 `locationName:"sourceAncillaryChannelNumber" min:"1" type:"integer"` + + // By default, the service terminates any unterminated captions at the end of + // each input. If you want the caption to continue onto your next input, disable + // this setting. + TerminateCaptions *string `locationName:"terminateCaptions" type:"string" enum:"AncillaryTerminateCaptions"` } // String returns the string representation @@ -2910,12 +2935,24 @@ func (s *AncillarySourceSettings) Validate() error { return nil } +// SetConvert608To708 sets the Convert608To708 field's value. +func (s *AncillarySourceSettings) SetConvert608To708(v string) *AncillarySourceSettings { + s.Convert608To708 = &v + return s +} + // SetSourceAncillaryChannelNumber sets the SourceAncillaryChannelNumber field's value. func (s *AncillarySourceSettings) SetSourceAncillaryChannelNumber(v int64) *AncillarySourceSettings { s.SourceAncillaryChannelNumber = &v return s } +// SetTerminateCaptions sets the TerminateCaptions field's value. +func (s *AncillarySourceSettings) SetTerminateCaptions(v string) *AncillarySourceSettings { + s.TerminateCaptions = &v + return s +} + // Associates the Amazon Resource Name (ARN) of an AWS Certificate Manager (ACM) // certificate with an AWS Elemental MediaConvert resource. type AssociateCertificateInput struct { @@ -2975,10 +3012,11 @@ func (s AssociateCertificateOutput) GoString() string { // Audio codec settings (CodecSettings) under (AudioDescriptions) contains the // group of settings related to audio encoding. The settings in this group vary -// depending on the value you choose for Audio codec (Codec). For each codec -// enum you choose, define the corresponding settings object. The following -// lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings -// * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings +// depending on the value that you choose for Audio codec (Codec). For each +// codec enum that you choose, define the corresponding settings object. The +// following lists the codec enum, settings object pairs. * AAC, AacSettings +// * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings +// * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings type AudioCodecSettings struct { _ struct{} `type:"structure"` @@ -3002,6 +3040,10 @@ type AudioCodecSettings struct { // Type of Audio codec. Codec *string `locationName:"codec" type:"string" enum:"AudioCodec"` + // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to + // the value EAC3_ATMOS. + Eac3AtmosSettings *Eac3AtmosSettings `locationName:"eac3AtmosSettings" type:"structure"` + // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3. Eac3Settings *Eac3Settings `locationName:"eac3Settings" type:"structure"` @@ -3043,6 +3085,11 @@ func (s *AudioCodecSettings) Validate() error { invalidParams.AddNested("AiffSettings", err.(request.ErrInvalidParams)) } } + if s.Eac3AtmosSettings != nil { + if err := s.Eac3AtmosSettings.Validate(); err != nil { + invalidParams.AddNested("Eac3AtmosSettings", err.(request.ErrInvalidParams)) + } + } if s.Eac3Settings != nil { if err := s.Eac3Settings.Validate(); err != nil { invalidParams.AddNested("Eac3Settings", err.(request.ErrInvalidParams)) @@ -3089,6 +3136,12 @@ func (s *AudioCodecSettings) SetCodec(v string) *AudioCodecSettings { return s } +// SetEac3AtmosSettings sets the Eac3AtmosSettings field's value. +func (s *AudioCodecSettings) SetEac3AtmosSettings(v *Eac3AtmosSettings) *AudioCodecSettings { + s.Eac3AtmosSettings = v + return s +} + // SetEac3Settings sets the Eac3Settings field's value. func (s *AudioCodecSettings) SetEac3Settings(v *Eac3Settings) *AudioCodecSettings { s.Eac3Settings = v @@ -3111,7 +3164,8 @@ func (s *AudioCodecSettings) SetWavSettings(v *WavSettings) *AudioCodecSettings type AudioDescription struct { _ struct{} `type:"structure"` - // Advanced audio normalization settings. + // Advanced audio normalization settings. Ignore these settings unless you need + // to comply with a loudness standard. AudioNormalizationSettings *AudioNormalizationSettings `locationName:"audioNormalizationSettings" type:"structure"` // Specifies which audio data to use from each input. In the simplest case, @@ -3141,10 +3195,11 @@ type AudioDescription struct { // Audio codec settings (CodecSettings) under (AudioDescriptions) contains the // group of settings related to audio encoding. The settings in this group vary - // depending on the value you choose for Audio codec (Codec). For each codec - // enum you choose, define the corresponding settings object. The following - // lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings - // * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings + // depending on the value that you choose for Audio codec (Codec). For each + // codec enum that you choose, define the corresponding settings object. The + // following lists the codec enum, settings object pairs. * AAC, AacSettings + // * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings + // * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"` // Specify the language for this audio output track, using the ISO 639-2 or @@ -3169,9 +3224,10 @@ type AudioDescription struct { // Advanced audio remixing settings. RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"` - // Used for MS Smooth and Apple HLS outputs. Indicates the name displayed by - // the player (eg. English, or Director Commentary). Alphanumeric characters, - // spaces, and underscore are legal. + // Specify a label for this output audio stream. For example, "English", "Director + // commentary", or "track_2". For streaming outputs, MediaConvert passes this + // information into destination manifests for display on the end-viewer's player + // device. For outputs in other output groups, the service ignores this setting. StreamName *string `locationName:"streamName" type:"string"` } @@ -3273,12 +3329,21 @@ func (s *AudioDescription) SetStreamName(v string) *AudioDescription { return s } -// Advanced audio normalization settings. +// Advanced audio normalization settings. Ignore these settings unless you need +// to comply with a loudness standard. type AudioNormalizationSettings struct { _ struct{} `type:"structure"` - // Audio normalization algorithm to use. 1770-1 conforms to the CALM Act specification, - // 1770-2 conforms to the EBU R-128 specification. + // Choose one of the following audio normalization algorithms: ITU-R BS.1770-1: + // Ungated loudness. A measurement of ungated average loudness for an entire + // piece of content, suitable for measurement of short-form content under ATSC + // recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2: + // Gated loudness. A measurement of gated average loudness compliant with the + // requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3: + // Modified peak. The same loudness measurement algorithm as 1770-2, with an + // updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows + // for more audio channels than the other algorithms, including configurations + // such as 7.1. Algorithm *string `locationName:"algorithm" type:"string" enum:"AudioNormalizationAlgorithm"` // When enabled the output audio is corrected using the chosen algorithm. If @@ -3297,10 +3362,11 @@ type AudioNormalizationSettings struct { // track loudness. PeakCalculation *string `locationName:"peakCalculation" type:"string" enum:"AudioNormalizationPeakCalculation"` - // Target LKFS(loudness) to adjust volume to. If no value is entered, a default - // value will be used according to the chosen algorithm. The CALM Act (1770-1) - // recommends a target of -24 LKFS. The EBU R-128 specification (1770-2) recommends - // a target of -23 LKFS. + // When you use Audio normalization (AudioNormalizationSettings), optionally + // use this setting to specify a target loudness. If you don't specify a value + // here, the encoder chooses a value for you, based on the algorithm that you + // choose for Algorithm (algorithm). If you choose algorithm 1770-1, the encoder + // will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS. TargetLkfs *float64 `locationName:"targetLkfs" type:"double"` } @@ -3896,9 +3962,11 @@ type CaptionDescription struct { // the captions text. LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` - // Human readable information to indicate captions available for players (eg. - // English, or Spanish). Alphanumeric characters, spaces, and underscore are - // legal. + // Specify a label for this set of output captions. For example, "English", + // "Director commentary", or "track_2". For streaming outputs, MediaConvert + // passes this information into destination manifests for display on the end-viewer's + // player device. For outputs in other output groups, the service ignores this + // setting. LanguageDescription *string `locationName:"languageDescription" type:"string"` } @@ -3986,9 +4054,11 @@ type CaptionDescriptionPreset struct { // the captions text. LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` - // Human readable information to indicate captions available for players (eg. - // English, or Spanish). Alphanumeric characters, spaces, and underscore are - // legal. + // Specify a label for this set of output captions. For example, "English", + // "Director commentary", or "track_2". For streaming outputs, MediaConvert + // passes this information into destination manifests for display on the end-viewer's + // player device. For outputs in other output groups, the service ignores this + // setting. LanguageDescription *string `locationName:"languageDescription" type:"string"` } @@ -4054,7 +4124,7 @@ type CaptionDestinationSettings struct { // Specify the format for this set of captions on this output. The default format // is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, - // DVB-sub, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, + // DVB-sub, IMSC, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, // choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED) to create an output that // complies with the SCTE-43 spec. To create a non-compliant output where the // embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20). @@ -4067,6 +4137,9 @@ type CaptionDestinationSettings struct { // Channel destination number. EmbeddedDestinationSettings *EmbeddedDestinationSettings `locationName:"embeddedDestinationSettings" type:"structure"` + // Settings specific to IMSC caption outputs. + ImscDestinationSettings *ImscDestinationSettings `locationName:"imscDestinationSettings" type:"structure"` + // Settings for SCC caption output. SccDestinationSettings *SccDestinationSettings `locationName:"sccDestinationSettings" type:"structure"` @@ -4142,6 +4215,12 @@ func (s *CaptionDestinationSettings) SetEmbeddedDestinationSettings(v *EmbeddedD return s } +// SetImscDestinationSettings sets the ImscDestinationSettings field's value. +func (s *CaptionDestinationSettings) SetImscDestinationSettings(v *ImscDestinationSettings) *CaptionDestinationSettings { + s.ImscDestinationSettings = v + return s +} + // SetSccDestinationSettings sets the SccDestinationSettings field's value. func (s *CaptionDestinationSettings) SetSccDestinationSettings(v *SccDestinationSettings) *CaptionDestinationSettings { s.SccDestinationSettings = v @@ -4181,8 +4260,9 @@ type CaptionSelector struct { // extract a specific language with pass-through captions. LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` - // Source settings (SourceSettings) contains the group of settings for captions - // in the input. + // If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, + // specify the URI of the input captions source file. If your input captions + // are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. SourceSettings *CaptionSourceSettings `locationName:"sourceSettings" type:"structure"` } @@ -4232,8 +4312,9 @@ func (s *CaptionSelector) SetSourceSettings(v *CaptionSourceSettings) *CaptionSe return s } -// Source settings (SourceSettings) contains the group of settings for captions -// in the input. +// If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, +// specify the URI of the input captions source file. If your input captions +// are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. type CaptionSourceSettings struct { _ struct{} `type:"structure"` @@ -4246,7 +4327,9 @@ type CaptionSourceSettings struct { // Settings for embedded captions Source EmbeddedSourceSettings *EmbeddedSourceSettings `locationName:"embeddedSourceSettings" type:"structure"` - // Settings for File-based Captions in Source + // If your input captions are SCC, SMI, SRT, STL, TTML, or IMSC 1.1 in an xml + // file, specify the URI of the input caption source file. If your caption source + // is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. FileSourceSettings *FileSourceSettings `locationName:"fileSourceSettings" type:"structure"` // Use Source (SourceType) to identify the format of your input captions. The @@ -4256,8 +4339,10 @@ type CaptionSourceSettings struct { // Settings specific to Teletext caption sources, including Page number. TeletextSourceSettings *TeletextSourceSettings `locationName:"teletextSourceSettings" type:"structure"` - // Settings specific to caption sources that are specfied by track number. Sources - // include IMSC in IMF. + // Settings specific to caption sources that are specified by track number. + // Currently, this is only IMSC captions in an IMF package. If your caption + // source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead + // of TrackSourceSettings. TrackSourceSettings *TrackSourceSettings `locationName:"trackSourceSettings" type:"structure"` } @@ -4389,19 +4474,24 @@ type CmafEncryptionSettings struct { // segment number by default. ConstantInitializationVector *string `locationName:"constantInitializationVector" min:"32" type:"string"` - // Encrypts the segments with the given encryption scheme. Leave blank to disable. - // Selecting 'Disabled' in the web interface also disables encryption. + // Specify the encryption scheme that you want the service to use when encrypting + // your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR). EncryptionMethod *string `locationName:"encryptionMethod" type:"string" enum:"CmafEncryptionType"` - // The Initialization Vector is a 128-bit number used in conjunction with the - // key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed - // in the manifest. Otherwise Initialization Vector is not in the manifest. + // When you use DRM with CMAF outputs, choose whether the service writes the + // 128-bit encryption initialization vector in the HLS and DASH manifests. InitializationVectorInManifest *string `locationName:"initializationVectorInManifest" type:"string" enum:"CmafInitializationVectorInManifest"` + // If your output group type is CMAF, use these settings when doing DRM encryption + // with a SPEKE-compliant key provider. If your output group type is HLS, DASH, + // or Microsoft Smooth, use the SpekeKeyProvider settings instead. + SpekeKeyProvider *SpekeKeyProviderCmaf `locationName:"spekeKeyProvider" type:"structure"` + // Use these settings to set up encryption with a static key provider. StaticKeyProvider *StaticKeyProvider `locationName:"staticKeyProvider" type:"structure"` - // Indicates which type of key provider is used for encryption. + // Specify whether your DRM encryption key is static or from a key provider + // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. Type *string `locationName:"type" type:"string" enum:"CmafKeyProviderType"` } @@ -4446,6 +4536,12 @@ func (s *CmafEncryptionSettings) SetInitializationVectorInManifest(v string) *Cm return s } +// SetSpekeKeyProvider sets the SpekeKeyProvider field's value. +func (s *CmafEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProviderCmaf) *CmafEncryptionSettings { + s.SpekeKeyProvider = v + return s +} + // SetStaticKeyProvider sets the StaticKeyProvider field's value. func (s *CmafEncryptionSettings) SetStaticKeyProvider(v *StaticKeyProvider) *CmafEncryptionSettings { s.StaticKeyProvider = v @@ -4523,6 +4619,14 @@ type CmafGroupSettings struct { // to 1, your final segment is 3.5 seconds. MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"` + // Specify whether your DASH profile is on-demand or main. When you choose Main + // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 + // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), + // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. + // When you choose On-demand, you must also set the output group setting Segment + // control (SegmentControl) to Single file (SINGLE_FILE). + MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"CmafMpdProfile"` + // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. @@ -4646,6 +4750,12 @@ func (s *CmafGroupSettings) SetMinFinalSegmentLength(v float64) *CmafGroupSettin return s } +// SetMpdProfile sets the MpdProfile field's value. +func (s *CmafGroupSettings) SetMpdProfile(v string) *CmafGroupSettings { + s.MpdProfile = &v + return s +} + // SetSegmentControl sets the SegmentControl field's value. func (s *CmafGroupSettings) SetSegmentControl(v string) *CmafGroupSettings { s.SegmentControl = &v @@ -4683,18 +4793,28 @@ type ColorCorrector struct { // Brightness level. Brightness *int64 `locationName:"brightness" min:"1" type:"integer"` - // Determines if colorspace conversion will be performed. If set to _None_, - // no conversion will be performed. If _Force 601_ or _Force 709_ are selected, - // conversion will be performed for inputs with differing colorspaces. An input's - // colorspace can be specified explicitly in the "Video Selector":#inputs-video_selector - // if necessary. + // Specify the color space you want for this output. The service supports conversion + // between HDR formats, between SDR formats, and from SDR to HDR. The service + // doesn't support conversion from HDR to SDR. SDR to HDR conversion doesn't + // upgrade the dynamic range. The converted video has an HDR format, but visually + // appears the same as an unconverted output. ColorSpaceConversion *string `locationName:"colorSpaceConversion" type:"string" enum:"ColorSpaceConversion"` // Contrast level. Contrast *int64 `locationName:"contrast" min:"1" type:"integer"` - // Use the HDR master display (Hdr10Metadata) settings to correct HDR metadata - // or to provide missing metadata. Note that these settings are not color correction. + // Use these settings when you convert to the HDR 10 color space. Specify the + // SMPTE ST 2086 Mastering Display Color Volume static metadata that you want + // signaled in the output. These values don't affect the pixel values that are + // encoded in the video stream. They are intended to help the downstream video + // player display content in a way that reflects the intentions of the the content + // creator. When you set Color space conversion (ColorSpaceConversion) to HDR + // 10 (FORCE_HDR10), these settings are required. You must set values for Max + // frame average light level (maxFrameAverageLightLevel) and Max content light + // level (maxContentLightLevel); these settings don't have a default value. + // The default values for the other HDR 10 metadata settings are defined by + // the P3D65 color space. For more information about MediaConvert HDR jobs, + // see https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` // Hue in degrees. @@ -4801,7 +4921,8 @@ type ContainerSettings struct { // Settings for MOV Container. MovSettings *MovSettings `locationName:"movSettings" type:"structure"` - // Settings for MP4 Container + // Settings for MP4 container. You can create audio-only AAC outputs with this + // container. Mp4Settings *Mp4Settings `locationName:"mp4Settings" type:"structure"` } @@ -4896,6 +5017,13 @@ type CreateJobInput struct { // transcoding settings individually JobTemplate *string `locationName:"jobTemplate" type:"string"` + // Specify the relative priority for this job. In any given queue, the service + // begins processing the job with the highest value first. When more than one + // job has the same priority, the service begins processing the job that you + // submitted first. If you don't specify a priority, the service uses the default + // value 0. + Priority *int64 `locationName:"priority" type:"integer"` + // Optional. When you create a job, you can specify a queue to send it to. If // you don't specify, the job will go to the default queue. For more about queues, // see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html. @@ -4912,12 +5040,22 @@ type CreateJobInput struct { // Settings is a required field Settings *JobSettings `locationName:"settings" type:"structure" required:"true"` + // Enable this setting when you run a test job to estimate how many reserved + // transcoding slots (RTS) you need. When this is enabled, MediaConvert runs + // your job from an on-demand queue with similar performance to what you will + // see with one RTS in a reserved queue. This setting is disabled by default. + SimulateReservedQueue *string `locationName:"simulateReservedQueue" type:"string" enum:"SimulateReservedQueue"` + // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing // your job to the time it completes the transcode or encounters an error. StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"` + // The tags that you want to add to the resource. You can tag resources with + // a key-value pair or with only a key. + Tags map[string]*string `locationName:"tags" type:"map"` + // User-defined metadata that you want to associate with an MediaConvert job. // You specify metadata in key/value pairs. UserMetadata map[string]*string `locationName:"userMetadata" type:"map"` @@ -4936,6 +5074,9 @@ func (s CreateJobInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateJobInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"} + if s.Priority != nil && *s.Priority < -50 { + invalidParams.Add(request.NewErrParamMinValue("Priority", -50)) + } if s.Role == nil { invalidParams.Add(request.NewErrParamRequired("Role")) } @@ -4983,6 +5124,12 @@ func (s *CreateJobInput) SetJobTemplate(v string) *CreateJobInput { return s } +// SetPriority sets the Priority field's value. +func (s *CreateJobInput) SetPriority(v int64) *CreateJobInput { + s.Priority = &v + return s +} + // SetQueue sets the Queue field's value. func (s *CreateJobInput) SetQueue(v string) *CreateJobInput { s.Queue = &v @@ -5001,12 +5148,24 @@ func (s *CreateJobInput) SetSettings(v *JobSettings) *CreateJobInput { return s } +// SetSimulateReservedQueue sets the SimulateReservedQueue field's value. +func (s *CreateJobInput) SetSimulateReservedQueue(v string) *CreateJobInput { + s.SimulateReservedQueue = &v + return s +} + // SetStatusUpdateInterval sets the StatusUpdateInterval field's value. func (s *CreateJobInput) SetStatusUpdateInterval(v string) *CreateJobInput { s.StatusUpdateInterval = &v return s } +// SetTags sets the Tags field's value. +func (s *CreateJobInput) SetTags(v map[string]*string) *CreateJobInput { + s.Tags = v + return s +} + // SetUserMetadata sets the UserMetadata field's value. func (s *CreateJobInput) SetUserMetadata(v map[string]*string) *CreateJobInput { s.UserMetadata = v @@ -5061,6 +5220,13 @@ type CreateJobTemplateInput struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` + // Specify the relative priority for this job. In any given queue, the service + // begins processing the job with the highest value first. When more than one + // job has the same priority, the service begins processing the job that you + // submitted first. If you don't specify a priority, the service uses the default + // value 0. + Priority *int64 `locationName:"priority" type:"integer"` + // Optional. The queue that jobs created from this template are assigned to. // If you don't specify this, jobs will go to the default queue. Queue *string `locationName:"queue" type:"string"` @@ -5098,6 +5264,9 @@ func (s *CreateJobTemplateInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } + if s.Priority != nil && *s.Priority < -50 { + invalidParams.Add(request.NewErrParamMinValue("Priority", -50)) + } if s.Settings == nil { invalidParams.Add(request.NewErrParamRequired("Settings")) } @@ -5142,6 +5311,12 @@ func (s *CreateJobTemplateInput) SetName(v string) *CreateJobTemplateInput { return s } +// SetPriority sets the Priority field's value. +func (s *CreateJobTemplateInput) SetPriority(v int64) *CreateJobTemplateInput { + s.Priority = &v + return s +} + // SetQueue sets the Queue field's value. func (s *CreateJobTemplateInput) SetQueue(v string) *CreateJobTemplateInput { s.Queue = &v @@ -5331,6 +5506,10 @@ type CreateQueueInput struct { // queues and not applicable to on-demand queues. ReservationPlanSettings *ReservationPlanSettings `locationName:"reservationPlanSettings" type:"structure"` + // Initial state of the queue. If you create a paused queue, then jobs in that + // queue won't begin. + Status *string `locationName:"status" type:"string" enum:"QueueStatus"` + // The tags that you want to add to the resource. You can tag resources with // a key-value pair or with only a key. Tags map[string]*string `locationName:"tags" type:"map"` @@ -5388,6 +5567,12 @@ func (s *CreateQueueInput) SetReservationPlanSettings(v *ReservationPlanSettings return s } +// SetStatus sets the Status field's value. +func (s *CreateQueueInput) SetStatus(v string) *CreateQueueInput { + s.Status = &v + return s +} + // SetTags sets the Tags field's value. func (s *CreateQueueInput) SetTags(v map[string]*string) *CreateQueueInput { s.Tags = v @@ -5434,7 +5619,9 @@ type DashIsoEncryptionSettings struct { // the access unit delimiter and will leave the SEI NAL units unencrypted. PlaybackDeviceCompatibility *string `locationName:"playbackDeviceCompatibility" type:"string" enum:"DashIsoPlaybackDeviceCompatibility"` - // Settings for use with a SPEKE key provider + // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings + // when doing DRM encryption with a SPEKE-compliant key provider. If your output + // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` } @@ -5499,6 +5686,14 @@ type DashIsoGroupSettings struct { // playout. MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"` + // Specify whether your DASH profile is on-demand or main. When you choose Main + // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 + // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), + // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. + // When you choose On-demand, you must also set the output group setting Segment + // control (SegmentControl) to Single file (SINGLE_FILE). + MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"DashIsoMpdProfile"` + // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. @@ -5511,12 +5706,13 @@ type DashIsoGroupSettings struct { // files as in other output types. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` - // When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), - // your DASH manifest shows precise segment durations. The segment duration - // information appears inside the SegmentTimeline element, inside SegmentTemplate - // at the Representation level. When this feature isn't enabled, the segment - // durations in your DASH manifest are approximate. The segment duration information - // appears in the duration attribute of the SegmentTemplate element. + // If you get an HTTP error in the 400 range when you play back your DASH output, + // enable this setting and run your transcoding job again. When you enable this + // setting, the service writes precise segment durations in the DASH manifest. + // The segment duration information appears inside the SegmentTimeline element, + // inside SegmentTemplate at the Representation level. When you don't enable + // this setting, the service writes approximate segment durations in your DASH + // manifest. WriteSegmentTimelineInRepresentation *string `locationName:"writeSegmentTimelineInRepresentation" type:"string" enum:"DashIsoWriteSegmentTimelineInRepresentation"` } @@ -5588,6 +5784,12 @@ func (s *DashIsoGroupSettings) SetMinBufferTime(v int64) *DashIsoGroupSettings { return s } +// SetMpdProfile sets the MpdProfile field's value. +func (s *DashIsoGroupSettings) SetMpdProfile(v string) *DashIsoGroupSettings { + s.MpdProfile = &v + return s +} + // SetSegmentControl sets the SegmentControl field's value. func (s *DashIsoGroupSettings) SetSegmentControl(v string) *DashIsoGroupSettings { s.SegmentControl = &v @@ -6456,6 +6658,197 @@ func (s *DvbTdtSettings) SetTdtInterval(v int64) *DvbTdtSettings { return s } +// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to +// the value EAC3_ATMOS. +type Eac3AtmosSettings struct { + _ struct{} `type:"structure"` + + // Specify the average bitrate in bits per second.Valid values: 384k, 448k, + // 640k, 768k + Bitrate *int64 `locationName:"bitrate" min:"384000" type:"integer"` + + // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. + // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex + // E). + BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Eac3AtmosBitstreamMode"` + + // The coding mode for Dolby Digital Plus JOC (Atmos) is always 9.1.6 (CODING_MODE_9_1_6). + CodingMode *string `locationName:"codingMode" type:"string" enum:"Eac3AtmosCodingMode"` + + // Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis. + DialogueIntelligence *string `locationName:"dialogueIntelligence" type:"string" enum:"Eac3AtmosDialogueIntelligence"` + + // Specify the absolute peak level for a signal with dynamic range compression. + DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Eac3AtmosDynamicRangeCompressionLine"` + + // Specify how the service limits the audio dynamic range when compressing the + // audio. + DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Eac3AtmosDynamicRangeCompressionRf"` + + // Specify a value for the following Dolby Atmos setting: Left only/Right only + // center mix(Lo/Ro center). MediaConvert uses this value for downmixing. How + // the service uses thisvalue depends on the value that you choose for Stereo + // downmix (Eac3AtmosStereoDownmix).Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, + // -4.5, and -6.0. + LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` + + // Specify a value for the following Dolby Atmos setting: Left only/Right only + // (Lo/Ro surround). MediaConvert uses this value for downmixing. How the service + // uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). + // Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. + LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` + + // Specify a value for the following Dolby Atmos setting: Left total/Right total + // center mix (Lt/Rt center). MediaConvert uses this value for downmixing. How + // the service uses this value depends on the value that you choose for Stereo + // downmix (Eac3AtmosStereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, + // -4.5, and -6.0. + LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` + + // Specify a value for the following Dolby Atmos setting: Left total/Right total + // surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. + // How the service uses this value depends on the value that you choose for + // Stereo downmix (Eac3AtmosStereoDownmix). Valid values: -1.5, -3.0, -4.5, + // -6.0, and -60. The value -60 mutes the channel. + LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` + + // Choose how the service meters the loudness of your audio. + MeteringMode *string `locationName:"meteringMode" type:"string" enum:"Eac3AtmosMeteringMode"` + + // This value is always 48000. It represents the sample rate in Hz. + SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` + + // Specify the percentage of audio content that must be speech before the encoder + // uses the measured speech loudness as the overall program loudness. + SpeechThreshold *int64 `locationName:"speechThreshold" min:"1" type:"integer"` + + // Choose how the service does stereo downmixing. + StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3AtmosStereoDownmix"` + + // Specify whether your input audio has an additional center rear surround channel + // matrix encoded into your left and right surround channels. + SurroundExMode *string `locationName:"surroundExMode" type:"string" enum:"Eac3AtmosSurroundExMode"` +} + +// String returns the string representation +func (s Eac3AtmosSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Eac3AtmosSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Eac3AtmosSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Eac3AtmosSettings"} + if s.Bitrate != nil && *s.Bitrate < 384000 { + invalidParams.Add(request.NewErrParamMinValue("Bitrate", 384000)) + } + if s.SampleRate != nil && *s.SampleRate < 48000 { + invalidParams.Add(request.NewErrParamMinValue("SampleRate", 48000)) + } + if s.SpeechThreshold != nil && *s.SpeechThreshold < 1 { + invalidParams.Add(request.NewErrParamMinValue("SpeechThreshold", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBitrate sets the Bitrate field's value. +func (s *Eac3AtmosSettings) SetBitrate(v int64) *Eac3AtmosSettings { + s.Bitrate = &v + return s +} + +// SetBitstreamMode sets the BitstreamMode field's value. +func (s *Eac3AtmosSettings) SetBitstreamMode(v string) *Eac3AtmosSettings { + s.BitstreamMode = &v + return s +} + +// SetCodingMode sets the CodingMode field's value. +func (s *Eac3AtmosSettings) SetCodingMode(v string) *Eac3AtmosSettings { + s.CodingMode = &v + return s +} + +// SetDialogueIntelligence sets the DialogueIntelligence field's value. +func (s *Eac3AtmosSettings) SetDialogueIntelligence(v string) *Eac3AtmosSettings { + s.DialogueIntelligence = &v + return s +} + +// SetDynamicRangeCompressionLine sets the DynamicRangeCompressionLine field's value. +func (s *Eac3AtmosSettings) SetDynamicRangeCompressionLine(v string) *Eac3AtmosSettings { + s.DynamicRangeCompressionLine = &v + return s +} + +// SetDynamicRangeCompressionRf sets the DynamicRangeCompressionRf field's value. +func (s *Eac3AtmosSettings) SetDynamicRangeCompressionRf(v string) *Eac3AtmosSettings { + s.DynamicRangeCompressionRf = &v + return s +} + +// SetLoRoCenterMixLevel sets the LoRoCenterMixLevel field's value. +func (s *Eac3AtmosSettings) SetLoRoCenterMixLevel(v float64) *Eac3AtmosSettings { + s.LoRoCenterMixLevel = &v + return s +} + +// SetLoRoSurroundMixLevel sets the LoRoSurroundMixLevel field's value. +func (s *Eac3AtmosSettings) SetLoRoSurroundMixLevel(v float64) *Eac3AtmosSettings { + s.LoRoSurroundMixLevel = &v + return s +} + +// SetLtRtCenterMixLevel sets the LtRtCenterMixLevel field's value. +func (s *Eac3AtmosSettings) SetLtRtCenterMixLevel(v float64) *Eac3AtmosSettings { + s.LtRtCenterMixLevel = &v + return s +} + +// SetLtRtSurroundMixLevel sets the LtRtSurroundMixLevel field's value. +func (s *Eac3AtmosSettings) SetLtRtSurroundMixLevel(v float64) *Eac3AtmosSettings { + s.LtRtSurroundMixLevel = &v + return s +} + +// SetMeteringMode sets the MeteringMode field's value. +func (s *Eac3AtmosSettings) SetMeteringMode(v string) *Eac3AtmosSettings { + s.MeteringMode = &v + return s +} + +// SetSampleRate sets the SampleRate field's value. +func (s *Eac3AtmosSettings) SetSampleRate(v int64) *Eac3AtmosSettings { + s.SampleRate = &v + return s +} + +// SetSpeechThreshold sets the SpeechThreshold field's value. +func (s *Eac3AtmosSettings) SetSpeechThreshold(v int64) *Eac3AtmosSettings { + s.SpeechThreshold = &v + return s +} + +// SetStereoDownmix sets the StereoDownmix field's value. +func (s *Eac3AtmosSettings) SetStereoDownmix(v string) *Eac3AtmosSettings { + s.StereoDownmix = &v + return s +} + +// SetSurroundExMode sets the SurroundExMode field's value. +func (s *Eac3AtmosSettings) SetSurroundExMode(v string) *Eac3AtmosSettings { + s.SurroundExMode = &v + return s +} + // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3. type Eac3Settings struct { @@ -6465,11 +6858,13 @@ type Eac3Settings struct { // Only used for 3/2 coding mode. AttenuationControl *string `locationName:"attenuationControl" type:"string" enum:"Eac3AttenuationControl"` - // Average bitrate in bits/second. Valid bitrates depend on the coding mode. + // Specify the average bitrate in bits per second. Valid bitrates depend on + // the coding mode. Bitrate *int64 `locationName:"bitrate" min:"64000" type:"integer"` - // Specifies the "Bitstream Mode" (bsmod) for the emitted E-AC-3 stream. See - // ATSC A/52-2012 (Annex E) for background on these values. + // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. + // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex + // E). BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Eac3BitstreamMode"` // Dolby Digital Plus coding mode. Determines number of channels. @@ -6482,12 +6877,11 @@ type Eac3Settings struct { // Plus, dialnorm will be passed through. Dialnorm *int64 `locationName:"dialnorm" min:"1" type:"integer"` - // Enables Dynamic Range Compression that restricts the absolute peak level - // for a signal. + // Specify the absolute peak level for a signal with dynamic range compression. DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Eac3DynamicRangeCompressionLine"` - // Enables Heavy Dynamic Range Compression, ensures that the instantaneous signal - // peaks do not exceed specified levels. + // Specify how the service limits the audio dynamic range when compressing the + // audio. DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Eac3DynamicRangeCompressionRf"` // When encoding 3/2 audio, controls whether the LFE channel is enabled @@ -6497,20 +6891,44 @@ type Eac3Settings struct { // valid with 3_2_LFE coding mode. LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Eac3LfeFilter"` - // Left only/Right only center mix level. Only used for 3/2 coding mode.Valid - // values: 3.0, 1.5, 0.0, -1.5 -3.0 -4.5 -6.0 -60 + // Specify a value for the following Dolby Digital Plus setting: Left only/Right + // only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. + // How the service uses this value depends on the value that you choose for + // Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, + // -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies + // only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) + // for the setting Coding mode (Eac3CodingMode). If you choose a different value + // for Coding mode, the service ignores Left only/Right only center (loRoCenterMixLevel). LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` - // Left only/Right only surround mix level. Only used for 3/2 coding mode.Valid - // values: -1.5 -3.0 -4.5 -6.0 -60 + // Specify a value for the following Dolby Digital Plus setting: Left only/Right + // only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the + // service uses this value depends on the value that you choose for Stereo downmix + // (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value + // -60 mutes the channel. This setting applies only if you keep the default + // value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode + // (Eac3CodingMode). If you choose a different value for Coding mode, the service + // ignores Left only/Right only surround (loRoSurroundMixLevel). LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` - // Left total/Right total center mix level. Only used for 3/2 coding mode.Valid - // values: 3.0, 1.5, 0.0, -1.5 -3.0 -4.5 -6.0 -60 + // Specify a value for the following Dolby Digital Plus setting: Left total/Right + // total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. + // How the service uses this value depends on the value that you choose for + // Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, + // -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies + // only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) + // for the setting Coding mode (Eac3CodingMode). If you choose a different value + // for Coding mode, the service ignores Left total/Right total center (ltRtCenterMixLevel). LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` - // Left total/Right total surround mix level. Only used for 3/2 coding mode.Valid - // values: -1.5 -3.0 -4.5 -6.0 -60 + // Specify a value for the following Dolby Digital Plus setting: Left total/Right + // total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. + // How the service uses this value depends on the value that you choose for + // Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, + // and -60. The value -60 mutes the channel. This setting applies only if you + // keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the + // setting Coding mode (Eac3CodingMode). If you choose a different value for + // Coding mode, the service ignores Left total/Right total surround (ltRtSurroundMixLevel). LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, @@ -6528,10 +6946,13 @@ type Eac3Settings struct { // used for 3/2 coding mode. PhaseControl *string `locationName:"phaseControl" type:"string" enum:"Eac3PhaseControl"` - // Sample rate in hz. Sample rate is always 48000. + // This value is always 48000. It represents the sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` - // Stereo downmix preference. Only used for 3/2 coding mode. + // Choose how the service does stereo downmixing. This setting only applies + // if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) + // for the setting Coding mode (Eac3CodingMode). If you choose a different value + // for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix). StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3StereoDownmix"` // When encoding 3/2 audio, sets whether an extra center back surround channel @@ -6704,15 +7125,21 @@ type EmbeddedDestinationSettings struct { _ struct{} `type:"structure"` // Ignore this setting unless your input captions are SCC format and your output - // container is MXF. With this combination of input captions format and output - // container, you can optionally use this setting to replace the input channel - // number with the track number that you specify. Specify a different number - // for each output captions track. If you don't specify an output track number, - // the system uses the input channel number for the output channel number. This - // setting applies to each output individually. You can optionally combine two - // captions channels in your output. The two output channel numbers can be one - // of the following pairs: 1,3; 2,4; 1,4; or 2,3. + // captions are embedded in the video stream. Specify a CC number for each captions + // channel in this output. If you have two channels, choose CC numbers that + // aren't in the same field. For example, choose 1 and 3. For more information, + // see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded. Destination608ChannelNumber *int64 `locationName:"destination608ChannelNumber" min:"1" type:"integer"` + + // Ignore this setting unless your input captions are SCC format and you want + // both 608 and 708 captions embedded in your output stream. Optionally, specify + // the 708 service number for each output captions channel. Choose a different + // number for each channel. To use this setting, also set Force 608 to 708 upconvert + // (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector + // settings. If you choose to upconvert but don't specify a 708 service number, + // MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber) + // for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded. + Destination708ServiceNumber *int64 `locationName:"destination708ServiceNumber" min:"1" type:"integer"` } // String returns the string representation @@ -6731,6 +7158,9 @@ func (s *EmbeddedDestinationSettings) Validate() error { if s.Destination608ChannelNumber != nil && *s.Destination608ChannelNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("Destination608ChannelNumber", 1)) } + if s.Destination708ServiceNumber != nil && *s.Destination708ServiceNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("Destination708ServiceNumber", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -6744,13 +7174,21 @@ func (s *EmbeddedDestinationSettings) SetDestination608ChannelNumber(v int64) *E return s } +// SetDestination708ServiceNumber sets the Destination708ServiceNumber field's value. +func (s *EmbeddedDestinationSettings) SetDestination708ServiceNumber(v int64) *EmbeddedDestinationSettings { + s.Destination708ServiceNumber = &v + return s +} + // Settings for embedded captions Source type EmbeddedSourceSettings struct { _ struct{} `type:"structure"` - // When set to UPCONVERT, 608 data is both passed through via the "608 compatibility - // bytes" fields of the 708 wrapper as well as translated into 708. 708 data - // present in the source content will be discarded. + // Specify whether this set of input captions appears in your outputs in both + // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes + // the captions data in two ways: it passes the 608 data through using the 608 + // compatibility bytes fields of the 708 wrapper, and it also translates the + // 608 data into 708. Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"EmbeddedConvert608To708"` // Specifies the 608/708 channel number within the video track from which to @@ -6760,6 +7198,11 @@ type EmbeddedSourceSettings struct { // Specifies the video track index used for extracting captions. The system // only supports one input video track, so this should always be set to '1'. Source608TrackNumber *int64 `locationName:"source608TrackNumber" min:"1" type:"integer"` + + // By default, the service terminates any unterminated captions at the end of + // each input. If you want the caption to continue onto your next input, disable + // this setting. + TerminateCaptions *string `locationName:"terminateCaptions" type:"string" enum:"EmbeddedTerminateCaptions"` } // String returns the string representation @@ -6806,6 +7249,12 @@ func (s *EmbeddedSourceSettings) SetSource608TrackNumber(v int64) *EmbeddedSourc return s } +// SetTerminateCaptions sets the TerminateCaptions field's value. +func (s *EmbeddedSourceSettings) SetTerminateCaptions(v string) *EmbeddedSourceSettings { + s.TerminateCaptions = &v + return s +} + // Describes an account-specific API endpoint. type Endpoint struct { _ struct{} `type:"structure"` @@ -6916,10 +7365,11 @@ type EsamSignalProcessingNotification struct { // job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The // transcoder will use the signal processing instructions in the message that // you supply. Provide your ESAM SignalProcessingNotification XML document inside - // your JSON job settings. If you want the service to place SCTE-35 markers - // at the insertion points you specify in the XML document, you must also enable - // SCTE-35 ESAM (scte35Esam). Note that you can either specify an ESAM XML document - // or enable SCTE-35 passthrough. You can't do both. + // your JSON job settings. For your MPEG2-TS file outputs, if you want the service + // to place SCTE-35 markers at the insertion points you specify in the XML document, + // you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either + // specify an ESAM XML document or enable SCTE-35 passthrough. You can't do + // both. SccXml *string `locationName:"sccXml" type:"string"` } @@ -7004,17 +7454,21 @@ func (s *FileGroupSettings) SetDestinationSettings(v *DestinationSettings) *File return s } -// Settings for File-based Captions in Source +// If your input captions are SCC, SMI, SRT, STL, TTML, or IMSC 1.1 in an xml +// file, specify the URI of the input caption source file. If your caption source +// is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. type FileSourceSettings struct { _ struct{} `type:"structure"` - // If set to UPCONVERT, 608 caption data is both passed through via the "608 - // compatibility bytes" fields of the 708 wrapper as well as translated into - // 708. 708 data present in the source content will be discarded. + // Specify whether this set of input captions appears in your outputs in both + // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes + // the captions data in two ways: it passes the 608 data through using the 608 + // compatibility bytes fields of the 708 wrapper, and it also translates the + // 608 data into 708. Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"FileSourceConvert608To708"` // External caption file used for loading captions. Accepted file extensions - // are 'scc', 'ttml', 'dfxp', 'stl', 'srt', and 'smi'. + // are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', and 'smi'. SourceFile *string `locationName:"sourceFile" min:"14" type:"string"` // Specifies a time delta in seconds to offset the captions from the source @@ -7432,9 +7886,9 @@ type H264QvbrSettings struct { // Use this setting only when Rate control mode is QVBR and Quality tuning level // is Multi-pass HQ. For Max average bitrate values suited to the complexity // of your input video, the service limits the average bitrate of the video - // part of this output to the value you choose. That is, the total size of the - // video element is less than or equal to the value you set multiplied by the - // number of seconds of encoded output. + // part of this output to the value that you choose. That is, the total size + // of the video element is less than or equal to the value you set multiplied + // by the number of seconds of encoded output. MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"` // Required when you use QVBR rate control mode. That is, when you specify qvbrSettings @@ -7492,8 +7946,9 @@ type H264Settings struct { // quality. AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H264AdaptiveQuantization"` - // Average bitrate in bits/second. Required for VBR and CBR. For MS Smooth outputs, - // bitrates must be unique when rounded down to the nearest multiple of 1000. + // Specify the average bitrate in bits per second. Required for VBR and CBR. + // For MS Smooth outputs, bitrates must be unique when rounded down to the nearest + // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // Specify an H.264 level that is consistent with your output video settings. @@ -7634,7 +8089,11 @@ type H264Settings struct { // Places a PPS header on each encoded picture, even if repeated. RepeatPps *string `locationName:"repeatPps" type:"string" enum:"H264RepeatPps"` - // Scene change detection (inserts I-frames on scene changes). + // Enable this setting to insert I-frames at scene changes that the service + // automatically detects. This improves video quality and is enabled by default. + // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) + // for further video quality improvement. For more information about QVBR, see + // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H264SceneChangeDetect"` // Number of slices per picture. Must be less than or equal to the number of @@ -7967,9 +8426,9 @@ type H265QvbrSettings struct { // Use this setting only when Rate control mode is QVBR and Quality tuning level // is Multi-pass HQ. For Max average bitrate values suited to the complexity // of your input video, the service limits the average bitrate of the video - // part of this output to the value you choose. That is, the total size of the - // video element is less than or equal to the value you set multiplied by the - // number of seconds of encoded output. + // part of this output to the value that you choose. That is, the total size + // of the video element is less than or equal to the value you set multiplied + // by the number of seconds of encoded output. MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"` // Required when you use QVBR rate control mode. That is, when you specify qvbrSettings @@ -8030,8 +8489,9 @@ type H265Settings struct { // Log Gamma (HLG) Electro-Optical Transfer Function (EOTF). AlternateTransferFunctionSei *string `locationName:"alternateTransferFunctionSei" type:"string" enum:"H265AlternateTransferFunctionSei"` - // Average bitrate in bits/second. Required for VBR and CBR. For MS Smooth outputs, - // bitrates must be unique when rounded down to the nearest multiple of 1000. + // Specify the average bitrate in bits per second. Required for VBR and CBR. + // For MS Smooth outputs, bitrates must be unique when rounded down to the nearest + // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // H.265 Level. @@ -8099,17 +8559,18 @@ type H265Settings struct { // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` - // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. - // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce - // interlaced output with the entire output having the same field polarity (top - // or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default - // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, - // behavior depends on the input scan type. - If the source is interlaced, the - // output will be interlaced with the same polarity as the source (it will follow - // the source). The output could therefore be a mix of "top field first" and - // "bottom field first". - If the source is progressive, the output will be - // interlaced with "top field first" or "bottom field first" polarity, depending - // on which of the Follow options you chose. + // Choose the scan line type for the output. Choose Progressive (PROGRESSIVE) + // to create a progressive output, regardless of the scan type of your input. + // Choose Top Field First (TOP_FIELD) or Bottom Field First (BOTTOM_FIELD) to + // create an output that's interlaced with the same field polarity throughout. + // Choose Follow, Default Top (FOLLOW_TOP_FIELD) or Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) + // to create an interlaced output with the same field polarity as the source. + // If the source is interlaced, the output will be interlaced with the same + // polarity as the source (it will follow the source). The output could therefore + // be a mix of "top field first" and "bottom field first". If the source is + // progressive, your output will be interlaced with "top field first" or "bottom + // field first" polarity, depending on which of the Follow options you chose. + // If you don't choose a value, the service will default to Progressive (PROGRESSIVE). InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H265InterlaceMode"` // Maximum bitrate in bits/second. For example, enter five megabits per second @@ -8162,7 +8623,11 @@ type H265Settings struct { // selects best strength based on content SampleAdaptiveOffsetFilterMode *string `locationName:"sampleAdaptiveOffsetFilterMode" type:"string" enum:"H265SampleAdaptiveOffsetFilterMode"` - // Scene change detection (inserts I-frames on scene changes). + // Enable this setting to insert I-frames at scene changes that the service + // automatically detects. This improves video quality and is enabled by default. + // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) + // for further video quality improvement. For more information about QVBR, see + // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H265SceneChangeDetect"` // Number of slices per picture. Must be less than or equal to the number of @@ -8208,17 +8673,16 @@ type H265Settings struct { // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. UnregisteredSeiTimecode *string `locationName:"unregisteredSeiTimecode" type:"string" enum:"H265UnregisteredSeiTimecode"` - // Use this setting only for outputs encoded with H.265 that are in CMAF or - // DASH output groups. If you include writeMp4PackagingType in your JSON job - // specification for other outputs, your video might not work properly with - // downstream systems and video players. If the location of parameter set NAL - // units don't matter in your workflow, ignore this setting. The service defaults - // to marking your output as HEV1. Choose HVC1 to mark your output as HVC1. - // This makes your output compliant with this specification: ISO IECJTC1 SC29 - // N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service - // stores parameter set NAL units in the sample headers but not in the samples - // directly. Keep the default HEV1 to mark your output as HEV1. For these outputs, - // the service writes parameter set NAL units directly into the samples. + // If the location of parameter set NAL units doesn't matter in your workflow, + // ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. + // For file MP4 outputs, choosing HVC1 can create video that doesn't work properly + // with some downstream systems and video players. Choose HVC1 to mark your + // output as HVC1. This makes your output compliant with the following specification: + // ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these + // outputs, the service stores parameter set NAL units in the sample headers + // but not in the samples directly. The service defaults to marking your output + // as HEV1. For these outputs, the service writes parameter set NAL units directly + // into the samples. WriteMp4PackagingType *string `locationName:"writeMp4PackagingType" type:"string" enum:"H265WriteMp4PackagingType"` } @@ -8505,15 +8969,11 @@ func (s *H265Settings) SetWriteMp4PackagingType(v string) *H265Settings { return s } -// Use the "HDR master display information" (Hdr10Metadata) settings to correct -// HDR metadata or to provide missing metadata. These values vary depending -// on the input video and must be provided by a color grader. Range is 0 to -// 50,000; each increment represents 0.00002 in CIE1931 color coordinate. Note -// that these settings are not color correction. Note that if you are creating -// HDR outputs inside of an HLS CMAF package, to comply with the Apple specification, -// you must use the following settings. Set "MP4 packaging type" (writeMp4PackagingType) -// to HVC1 (HVC1). Set "Profile" (H265Settings > codecProfile) to Main10/High -// (MAIN10_HIGH). Set "Level" (H265Settings > codecLevel) to 5 (LEVEL_5). +// Use these settings to specify static color calibration metadata, as defined +// by SMPTE ST 2086. These values don't affect the pixel values that are encoded +// in the video stream. They are intended to help the downstream video player +// display content in a way that reflects the intentions of the the content +// creator. type Hdr10Metadata struct { _ struct{} `type:"structure"` @@ -8538,11 +8998,13 @@ type Hdr10Metadata struct { GreenPrimaryY *int64 `locationName:"greenPrimaryY" type:"integer"` // Maximum light level among all samples in the coded video sequence, in units - // of candelas per square meter. + // of candelas per square meter. This setting doesn't have a default value; + // you must specify a value that is suitable for the content. MaxContentLightLevel *int64 `locationName:"maxContentLightLevel" type:"integer"` // Maximum average light level of any frame in the coded video sequence, in - // units of candelas per square meter. + // units of candelas per square meter. This setting doesn't have a default value; + // you must specify a value that is suitable for the content. MaxFrameAverageLightLevel *int64 `locationName:"maxFrameAverageLightLevel" type:"integer"` // Nominal maximum mastering display luminance in units of of 0.0001 candelas @@ -8746,13 +9208,16 @@ type HlsEncryptionSettings struct { // playlist. This allows for offline Apple HLS FairPlay content protection. OfflineEncrypted *string `locationName:"offlineEncrypted" type:"string" enum:"HlsOfflineEncrypted"` - // Settings for use with a SPEKE key provider + // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings + // when doing DRM encryption with a SPEKE-compliant key provider. If your output + // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` // Use these settings to set up encryption with a static key provider. StaticKeyProvider *StaticKeyProvider `locationName:"staticKeyProvider" type:"structure"` - // Indicates which type of key provider is used for encryption. + // Specify whether your DRM encryption key is static or from a key provider + // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. Type *string `locationName:"type" type:"string" enum:"HlsKeyProviderType"` } @@ -8826,8 +9291,9 @@ func (s *HlsEncryptionSettings) SetType(v string) *HlsEncryptionSettings { type HlsGroupSettings struct { _ struct{} `type:"structure"` - // Choose one or more ad marker types to pass SCTE35 signals through to this - // group of Apple HLS outputs. + // Choose one or more ad marker types to decorate your Apple HLS manifest. This + // setting does not determine whether SCTE-35 markers appear in the outputs + // themselves. AdMarkers []*string `locationName:"adMarkers" type:"list"` // A partial URI prefix that will be prepended to each output in the media .m3u8 @@ -9137,6 +9603,13 @@ type HlsSettings struct { // Specifies the group to which the audio Rendition belongs. AudioGroupId *string `locationName:"audioGroupId" type:"string"` + // Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream + // (M2TS) to create a file in an MPEG2-TS container. Keep the default value + // Automatic (AUTOMATIC) to create an audio-only file in a raw container. Regardless + // of the value that you specify here, if this output has video, the service + // will place the output into an MPEG2-TS container. + AudioOnlyContainer *string `locationName:"audioOnlyContainer" type:"string" enum:"HlsAudioOnlyContainer"` + // List all the audio groups that are used with the video output stream. Input // all the audio GROUP-IDs that are associated to the video, separate by ','. AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"` @@ -9178,6 +9651,12 @@ func (s *HlsSettings) SetAudioGroupId(v string) *HlsSettings { return s } +// SetAudioOnlyContainer sets the AudioOnlyContainer field's value. +func (s *HlsSettings) SetAudioOnlyContainer(v string) *HlsSettings { + s.AudioOnlyContainer = &v + return s +} + // SetAudioRenditionSets sets the AudioRenditionSets field's value. func (s *HlsSettings) SetAudioRenditionSets(v string) *HlsSettings { s.AudioRenditionSets = &v @@ -9285,6 +9764,33 @@ func (s *ImageInserter) SetInsertableImages(v []*InsertableImage) *ImageInserter return s } +// Settings specific to IMSC caption outputs. +type ImscDestinationSettings struct { + _ struct{} `type:"structure"` + + // Keep this setting enabled to have MediaConvert use the font style and position + // information from the captions source in the output. This option is available + // only when your input captions are CFF-TT, IMSC, SMPTE-TT, or TTML. Disable + // this setting for simplified output captions. + StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"ImscStylePassthrough"` +} + +// String returns the string representation +func (s ImscDestinationSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImscDestinationSettings) GoString() string { + return s.String() +} + +// SetStylePassthrough sets the StylePassthrough field's value. +func (s *ImscDestinationSettings) SetStylePassthrough(v string) *ImscDestinationSettings { + s.StylePassthrough = &v + return s +} + // Specifies media input type Input struct { _ struct{} `type:"structure"` @@ -9304,6 +9810,12 @@ type Input struct { // selectors per input. CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` + // Use Cropping selection (crop) to specify the video area that the service + // will include in the output video frame. If you specify a value here, it will + // override any value that you specify in the output setting Cropping selection + // (crop). + Crop *Rectangle `locationName:"crop" type:"structure"` + // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. // Default is disabled. Only manaully controllable for MPEG2 and uncompressed // video inputs. @@ -9355,6 +9867,16 @@ type Input struct { // job outputs by stringing the clips together in the order you specify them. InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` + // Use Selection placement (position) to define the video area in your output + // frame. The area outside of the rectangle that you specify here is black. + // If you specify a value here, it will override any value that you specify + // in the output setting Selection placement (position). If you specify a value + // here, this will override any AFD values in your input, even if you set Respond + // to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, + // this will ignore anything that you specify for the setting Scaling Behavior + // (scalingBehavior). + Position *Rectangle `locationName:"position" type:"structure"` + // Use Program (programNumber) to select a specific program from within a multi-program // transport stream. Note that Quad 4K is not currently supported. Default is // the first program within the transport stream. If the program you specify @@ -9374,15 +9896,24 @@ type Input struct { // service automatically detects it. SupplementalImps []*string `locationName:"supplementalImps" type:"list"` - // Timecode source under input settings (InputTimecodeSource) only affects the - // behavior of features that apply to a single input at a time, such as input - // clipping and synchronizing some captions formats. Use this setting to specify - // whether the service counts frames by timecodes embedded in the video (EMBEDDED) - // or by starting the first frame at zero (ZEROBASED). In both cases, the timecode - // format is HH:MM:SS:FF or HH:MM:SS;FF, where FF is the frame number. Only - // set this to EMBEDDED if your source video has embedded timecodes. + // Use this Timecode source setting, located under the input settings (InputTimecodeSource), + // to specify how the service counts input video frames. This input frame count + // affects only the behavior of features that apply to a single input at a time, + // such as input clipping and synchronizing some captions formats. Choose Embedded + // (EMBEDDED) to use the timecodes in your input video. Choose Start at zero + // (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) + // to start the first frame at the timecode that you specify in the setting + // Start timecode (timecodeStart). If you don't specify a value for Timecode + // source, the service will use Embedded by default. For more information about + // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"` + // Specify the timecode that you want the service to use for this input's initial + // frame. To use this setting, you must set the Timecode source setting, located + // under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). + // For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. + TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"` + // Selector for video. VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"` } @@ -9406,6 +9937,9 @@ func (s *Input) Validate() error { if s.ProgramNumber != nil && *s.ProgramNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("ProgramNumber", 1)) } + if s.TimecodeStart != nil && len(*s.TimecodeStart) < 11 { + invalidParams.Add(request.NewErrParamMinLen("TimecodeStart", 11)) + } if s.AudioSelectors != nil { for i, v := range s.AudioSelectors { if v == nil { @@ -9426,6 +9960,11 @@ func (s *Input) Validate() error { } } } + if s.Crop != nil { + if err := s.Crop.Validate(); err != nil { + invalidParams.AddNested("Crop", err.(request.ErrInvalidParams)) + } + } if s.DecryptionSettings != nil { if err := s.DecryptionSettings.Validate(); err != nil { invalidParams.AddNested("DecryptionSettings", err.(request.ErrInvalidParams)) @@ -9436,6 +9975,11 @@ func (s *Input) Validate() error { invalidParams.AddNested("ImageInserter", err.(request.ErrInvalidParams)) } } + if s.Position != nil { + if err := s.Position.Validate(); err != nil { + invalidParams.AddNested("Position", err.(request.ErrInvalidParams)) + } + } if s.VideoSelector != nil { if err := s.VideoSelector.Validate(); err != nil { invalidParams.AddNested("VideoSelector", err.(request.ErrInvalidParams)) @@ -9466,6 +10010,12 @@ func (s *Input) SetCaptionSelectors(v map[string]*CaptionSelector) *Input { return s } +// SetCrop sets the Crop field's value. +func (s *Input) SetCrop(v *Rectangle) *Input { + s.Crop = v + return s +} + // SetDeblockFilter sets the DeblockFilter field's value. func (s *Input) SetDeblockFilter(v string) *Input { s.DeblockFilter = &v @@ -9514,6 +10064,12 @@ func (s *Input) SetInputClippings(v []*InputClipping) *Input { return s } +// SetPosition sets the Position field's value. +func (s *Input) SetPosition(v *Rectangle) *Input { + s.Position = v + return s +} + // SetProgramNumber sets the ProgramNumber field's value. func (s *Input) SetProgramNumber(v int64) *Input { s.ProgramNumber = &v @@ -9538,6 +10094,12 @@ func (s *Input) SetTimecodeSource(v string) *Input { return s } +// SetTimecodeStart sets the TimecodeStart field's value. +func (s *Input) SetTimecodeStart(v string) *Input { + s.TimecodeStart = &v + return s +} + // SetVideoSelector sets the VideoSelector field's value. func (s *Input) SetVideoSelector(v *VideoSelector) *Input { s.VideoSelector = v @@ -9696,6 +10258,12 @@ type InputTemplate struct { // selectors per input. CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` + // Use Cropping selection (crop) to specify the video area that the service + // will include in the output video frame. If you specify a value here, it will + // override any value that you specify in the output setting Cropping selection + // (crop). + Crop *Rectangle `locationName:"crop" type:"structure"` + // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. // Default is disabled. Only manaully controllable for MPEG2 and uncompressed // video inputs. @@ -9732,6 +10300,16 @@ type InputTemplate struct { // job outputs by stringing the clips together in the order you specify them. InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` + // Use Selection placement (position) to define the video area in your output + // frame. The area outside of the rectangle that you specify here is black. + // If you specify a value here, it will override any value that you specify + // in the output setting Selection placement (position). If you specify a value + // here, this will override any AFD values in your input, even if you set Respond + // to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, + // this will ignore anything that you specify for the setting Scaling Behavior + // (scalingBehavior). + Position *Rectangle `locationName:"position" type:"structure"` + // Use Program (programNumber) to select a specific program from within a multi-program // transport stream. Note that Quad 4K is not currently supported. Default is // the first program within the transport stream. If the program you specify @@ -9743,15 +10321,24 @@ type InputTemplate struct { // and video. * Use PSI - Scan only PSI data. PsiControl *string `locationName:"psiControl" type:"string" enum:"InputPsiControl"` - // Timecode source under input settings (InputTimecodeSource) only affects the - // behavior of features that apply to a single input at a time, such as input - // clipping and synchronizing some captions formats. Use this setting to specify - // whether the service counts frames by timecodes embedded in the video (EMBEDDED) - // or by starting the first frame at zero (ZEROBASED). In both cases, the timecode - // format is HH:MM:SS:FF or HH:MM:SS;FF, where FF is the frame number. Only - // set this to EMBEDDED if your source video has embedded timecodes. + // Use this Timecode source setting, located under the input settings (InputTimecodeSource), + // to specify how the service counts input video frames. This input frame count + // affects only the behavior of features that apply to a single input at a time, + // such as input clipping and synchronizing some captions formats. Choose Embedded + // (EMBEDDED) to use the timecodes in your input video. Choose Start at zero + // (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) + // to start the first frame at the timecode that you specify in the setting + // Start timecode (timecodeStart). If you don't specify a value for Timecode + // source, the service will use Embedded by default. For more information about + // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"` + // Specify the timecode that you want the service to use for this input's initial + // frame. To use this setting, you must set the Timecode source setting, located + // under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). + // For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. + TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"` + // Selector for video. VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"` } @@ -9775,6 +10362,9 @@ func (s *InputTemplate) Validate() error { if s.ProgramNumber != nil && *s.ProgramNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("ProgramNumber", 1)) } + if s.TimecodeStart != nil && len(*s.TimecodeStart) < 11 { + invalidParams.Add(request.NewErrParamMinLen("TimecodeStart", 11)) + } if s.AudioSelectors != nil { for i, v := range s.AudioSelectors { if v == nil { @@ -9795,11 +10385,21 @@ func (s *InputTemplate) Validate() error { } } } + if s.Crop != nil { + if err := s.Crop.Validate(); err != nil { + invalidParams.AddNested("Crop", err.(request.ErrInvalidParams)) + } + } if s.ImageInserter != nil { if err := s.ImageInserter.Validate(); err != nil { invalidParams.AddNested("ImageInserter", err.(request.ErrInvalidParams)) } } + if s.Position != nil { + if err := s.Position.Validate(); err != nil { + invalidParams.AddNested("Position", err.(request.ErrInvalidParams)) + } + } if s.VideoSelector != nil { if err := s.VideoSelector.Validate(); err != nil { invalidParams.AddNested("VideoSelector", err.(request.ErrInvalidParams)) @@ -9830,6 +10430,12 @@ func (s *InputTemplate) SetCaptionSelectors(v map[string]*CaptionSelector) *Inpu return s } +// SetCrop sets the Crop field's value. +func (s *InputTemplate) SetCrop(v *Rectangle) *InputTemplate { + s.Crop = v + return s +} + // SetDeblockFilter sets the DeblockFilter field's value. func (s *InputTemplate) SetDeblockFilter(v string) *InputTemplate { s.DeblockFilter = &v @@ -9866,6 +10472,12 @@ func (s *InputTemplate) SetInputClippings(v []*InputClipping) *InputTemplate { return s } +// SetPosition sets the Position field's value. +func (s *InputTemplate) SetPosition(v *Rectangle) *InputTemplate { + s.Position = v + return s +} + // SetProgramNumber sets the ProgramNumber field's value. func (s *InputTemplate) SetProgramNumber(v int64) *InputTemplate { s.ProgramNumber = &v @@ -9884,6 +10496,12 @@ func (s *InputTemplate) SetTimecodeSource(v string) *InputTemplate { return s } +// SetTimecodeStart sets the TimecodeStart field's value. +func (s *InputTemplate) SetTimecodeStart(v string) *InputTemplate { + s.TimecodeStart = &v + return s +} + // SetVideoSelector sets the VideoSelector field's value. func (s *InputTemplate) SetVideoSelector(v *VideoSelector) *InputTemplate { s.VideoSelector = v @@ -9917,8 +10535,8 @@ type InsertableImage struct { // blank. Height *int64 `locationName:"height" type:"integer"` - // Specify the Amazon S3 location of the image that you want to overlay on the - // video. Use a PNG or TGA file. + // Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want + // to overlay on the video. Use a PNG or TGA file. ImageInserterInput *string `locationName:"imageInserterInput" min:"14" type:"string"` // Specify the distance, in pixels, between the inserted image and the left @@ -10048,6 +10666,19 @@ type Job struct { // complex content. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` + // Describes whether the current job is running with accelerated transcoding. + // For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus + // is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) + // set to ENABLED or PREFERRED, AccelerationStatus is one of the other states. + // AccelerationStatus is IN_PROGRESS initially, while the service determines + // whether the input files and job settings are compatible with accelerated + // transcoding. If they are, AcclerationStatus is ACCELERATED. If your input + // files and job settings aren't compatible with accelerated transcoding, the + // service either fails your job or runs it without accelerated transcoding, + // depending on how you set Acceleration (AccelerationMode). When the service + // runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED. + AccelerationStatus *string `locationName:"accelerationStatus" type:"string" enum:"AccelerationStatus"` + // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` @@ -10080,18 +10711,24 @@ type Job struct { // provides jobPercentComplete in CloudWatch STATUS_UPDATE events and in the // response to GetJob and ListJobs requests. The jobPercentComplete estimate // is reliable for the following input containers: Quicktime, Transport Stream, - // MP4, and MXF. For some jobs, including audio-only jobs and jobs that use - // input clipping, the service can't provide information about job progress. - // In those cases, jobPercentComplete returns a null value. + // MP4, and MXF. For some jobs, the service can't provide information about + // job progress. In those cases, jobPercentComplete returns a null value. JobPercentComplete *int64 `locationName:"jobPercentComplete" type:"integer"` // The job template that the job is created from, if it is created from a job // template. JobTemplate *string `locationName:"jobTemplate" type:"string"` + // Provides messages from the service about jobs that you have already successfully + // submitted. + Messages *JobMessages `locationName:"messages" type:"structure"` + // List of output group details OutputGroupDetails []*OutputGroupDetail `locationName:"outputGroupDetails" type:"list"` + // Relative priority on the job. + Priority *int64 `locationName:"priority" type:"integer"` + // Optional. When you create a job, you can specify a queue to send it to. If // you don't specify, the job will go to the default queue. For more about queues, // see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html @@ -10112,6 +10749,12 @@ type Job struct { // Settings is a required field Settings *JobSettings `locationName:"settings" type:"structure" required:"true"` + // Enable this setting when you run a test job to estimate how many reserved + // transcoding slots (RTS) you need. When this is enabled, MediaConvert runs + // your job from an on-demand queue with similar performance to what you will + // see with one RTS in a reserved queue. This setting is disabled by default. + SimulateReservedQueue *string `locationName:"simulateReservedQueue" type:"string" enum:"SimulateReservedQueue"` + // A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. Status *string `locationName:"status" type:"string" enum:"JobStatus"` @@ -10146,6 +10789,12 @@ func (s *Job) SetAccelerationSettings(v *AccelerationSettings) *Job { return s } +// SetAccelerationStatus sets the AccelerationStatus field's value. +func (s *Job) SetAccelerationStatus(v string) *Job { + s.AccelerationStatus = &v + return s +} + // SetArn sets the Arn field's value. func (s *Job) SetArn(v string) *Job { s.Arn = &v @@ -10200,12 +10849,24 @@ func (s *Job) SetJobTemplate(v string) *Job { return s } +// SetMessages sets the Messages field's value. +func (s *Job) SetMessages(v *JobMessages) *Job { + s.Messages = v + return s +} + // SetOutputGroupDetails sets the OutputGroupDetails field's value. func (s *Job) SetOutputGroupDetails(v []*OutputGroupDetail) *Job { s.OutputGroupDetails = v return s } +// SetPriority sets the Priority field's value. +func (s *Job) SetPriority(v int64) *Job { + s.Priority = &v + return s +} + // SetQueue sets the Queue field's value. func (s *Job) SetQueue(v string) *Job { s.Queue = &v @@ -10230,6 +10891,12 @@ func (s *Job) SetSettings(v *JobSettings) *Job { return s } +// SetSimulateReservedQueue sets the SimulateReservedQueue field's value. +func (s *Job) SetSimulateReservedQueue(v string) *Job { + s.SimulateReservedQueue = &v + return s +} + // SetStatus sets the Status field's value. func (s *Job) SetStatus(v string) *Job { s.Status = &v @@ -10254,6 +10921,42 @@ func (s *Job) SetUserMetadata(v map[string]*string) *Job { return s } +// Provides messages from the service about jobs that you have already successfully +// submitted. +type JobMessages struct { + _ struct{} `type:"structure"` + + // List of messages that are informational only and don't indicate a problem + // with your job. + Info []*string `locationName:"info" type:"list"` + + // List of messages that warn about conditions that might cause your job not + // to run or to fail. + Warning []*string `locationName:"warning" type:"list"` +} + +// String returns the string representation +func (s JobMessages) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobMessages) GoString() string { + return s.String() +} + +// SetInfo sets the Info field's value. +func (s *JobMessages) SetInfo(v []*string) *JobMessages { + s.Info = v + return s +} + +// SetWarning sets the Warning field's value. +func (s *JobMessages) SetWarning(v []*string) *JobMessages { + s.Warning = v + return s +} + // JobSettings contains all the transcode settings for a job. type JobSettings struct { _ struct{} `type:"structure"` @@ -10278,7 +10981,13 @@ type JobSettings struct { // specify here appear on all outputs in all output groups. MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"` - // Settings for Nielsen Configuration + // Settings for your Nielsen configuration. If you don't do Nielsen measurement + // and analytics, ignore these settings. When you enable Nielsen configuration + // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs + // in the job. To enable Nielsen configuration programmatically, include an + // instance of nielsenConfiguration in your JSON job specification. Even if + // you don't include any children of nielsenConfiguration, you still enable + // the setting. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` // (OutputGroups) contains one group of settings for each set of outputs that @@ -10414,8 +11123,8 @@ func (s *JobSettings) SetTimedMetadataInsertion(v *TimedMetadataInsertion) *JobS type JobTemplate struct { _ struct{} `type:"structure"` - // Accelerated transcoding is currently in private preview. Contact AWS for - // more information. + // Accelerated transcoding can significantly speed up jobs with long, visually + // complex content. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` // An identifier for this resource that is unique within all of AWS. @@ -10439,6 +11148,9 @@ type JobTemplate struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` + // Relative priority on the job. + Priority *int64 `locationName:"priority" type:"integer"` + // Optional. The queue that jobs created from this template are assigned to. // If you don't specify this, jobs will go to the default queue. Queue *string `locationName:"queue" type:"string"` @@ -10512,6 +11224,12 @@ func (s *JobTemplate) SetName(v string) *JobTemplate { return s } +// SetPriority sets the Priority field's value. +func (s *JobTemplate) SetPriority(v int64) *JobTemplate { + s.Priority = &v + return s +} + // SetQueue sets the Queue field's value. func (s *JobTemplate) SetQueue(v string) *JobTemplate { s.Queue = &v @@ -10561,7 +11279,13 @@ type JobTemplateSettings struct { // specify here appear on all outputs in all output groups. MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"` - // Settings for Nielsen Configuration + // Settings for your Nielsen configuration. If you don't do Nielsen measurement + // and analytics, ignore these settings. When you enable Nielsen configuration + // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs + // in the job. To enable Nielsen configuration programmatically, include an + // instance of nielsenConfiguration in your JSON job specification. Even if + // you don't include any children of nielsenConfiguration, you still enable + // the setting. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` // (OutputGroups) contains one group of settings for each set of outputs that @@ -11396,8 +12120,12 @@ type M2tsSettings struct { // stream. Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"` - // Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from - // input to output. + // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if + // you want SCTE-35 markers that appear in your input to also appear in this + // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. + // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also + // provide the ESAM XML as a string in the setting Signal processing notification + // XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam). Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M2tsScte35Source"` // Inserts segmentation markers at each segmentation_time period. rai_segstart @@ -11764,8 +12492,14 @@ type M3u8Settings struct { // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"` - // Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from - // input to output. + // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if + // you want SCTE-35 markers that appear in your input to also appear in this + // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. + // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you + // don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose + // Ad markers (adMarkers) if you do want manifest conditioning. In both cases, + // also provide the ESAM XML as a string in the setting Signal processing notification + // XML (sccXml). Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M3u8Scte35Source"` // Applies only to HLS outputs. Use this setting to specify whether the service @@ -12196,7 +12930,7 @@ func (s *MovSettings) SetReference(v string) *MovSettings { type Mp2Settings struct { _ struct{} `type:"structure"` - // Average bitrate in bits/second. + // Specify the average bitrate in bits per second. Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"` // Set Channels to specify the number of channels in this output audio track. @@ -12255,7 +12989,8 @@ func (s *Mp2Settings) SetSampleRate(v int64) *Mp2Settings { return s } -// Settings for MP4 Container +// Settings for MP4 container. You can create audio-only AAC outputs with this +// container. type Mp4Settings struct { _ struct{} `type:"structure"` @@ -12321,8 +13056,9 @@ type Mpeg2Settings struct { // quality. AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Mpeg2AdaptiveQuantization"` - // Average bitrate in bits/second. Required for VBR and CBR. For MS Smooth outputs, - // bitrates must be unique when rounded down to the nearest multiple of 1000. + // Specify the average bitrate in bits per second. Required for VBR and CBR. + // For MS Smooth outputs, bitrates must be unique when rounded down to the nearest + // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. @@ -12435,7 +13171,8 @@ type Mpeg2Settings struct { // is variable (vbr) or constant (cbr). RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Mpeg2RateControlMode"` - // Scene change detection (inserts I-frames on scene changes). + // Enable this setting to insert I-frames at scene changes that the service + // automatically detects. This improves video quality and is enabled by default. SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"Mpeg2SceneChangeDetect"` // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled @@ -12693,7 +13430,9 @@ func (s *Mpeg2Settings) SetTemporalAdaptiveQuantization(v string) *Mpeg2Settings type MsSmoothEncryptionSettings struct { _ struct{} `type:"structure"` - // Settings for use with a SPEKE key provider + // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings + // when doing DRM encryption with a SPEKE-compliant key provider. If your output + // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` } @@ -12805,12 +13544,18 @@ func (s *MsSmoothGroupSettings) SetManifestEncoding(v string) *MsSmoothGroupSett return s } -// Settings for Nielsen Configuration +// Settings for your Nielsen configuration. If you don't do Nielsen measurement +// and analytics, ignore these settings. When you enable Nielsen configuration +// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs +// in the job. To enable Nielsen configuration programmatically, include an +// instance of nielsenConfiguration in your JSON job specification. Even if +// you don't include any children of nielsenConfiguration, you still enable +// the setting. type NielsenConfiguration struct { _ struct{} `type:"structure"` - // Use Nielsen Configuration (NielsenConfiguration) to set the Nielsen measurement - // system breakout code. Supported values are 0, 3, 7, and 9. + // Nielsen has discontinued the use of breakout code functionality. If you must + // include this property, set the value to zero. BreakoutCode *int64 `locationName:"breakoutCode" type:"integer"` // Use Distributor ID (DistributorID) to specify the distributor ID that is @@ -12850,10 +13595,11 @@ type NoiseReducer struct { // Use Noise reducer filter (NoiseReducerFilter) to select one of the following // spatial image filtering functions. To use this setting, you must also enable - // Noise reducer (NoiseReducer). * Bilateral is an edge preserving noise reduction - // filter. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) are convolution - // filters. * Conserve is a min/max noise reduction filter. * Spatial is a frequency-domain - // filter based on JND principles. + // Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing + // noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution + // filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain + // filtering based on JND principles. * Temporal optimizes video quality for + // complex motion. Filter *string `locationName:"filter" type:"string" enum:"NoiseReducerFilter"` // Settings for a noise reducer filter @@ -12861,6 +13607,9 @@ type NoiseReducer struct { // Noise reducer filter settings for spatial filter. SpatialFilterSettings *NoiseReducerSpatialFilterSettings `locationName:"spatialFilterSettings" type:"structure"` + + // Noise reducer filter settings for temporal filter. + TemporalFilterSettings *NoiseReducerTemporalFilterSettings `locationName:"temporalFilterSettings" type:"structure"` } // String returns the string representation @@ -12881,6 +13630,11 @@ func (s *NoiseReducer) Validate() error { invalidParams.AddNested("SpatialFilterSettings", err.(request.ErrInvalidParams)) } } + if s.TemporalFilterSettings != nil { + if err := s.TemporalFilterSettings.Validate(); err != nil { + invalidParams.AddNested("TemporalFilterSettings", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12906,6 +13660,12 @@ func (s *NoiseReducer) SetSpatialFilterSettings(v *NoiseReducerSpatialFilterSett return s } +// SetTemporalFilterSettings sets the TemporalFilterSettings field's value. +func (s *NoiseReducer) SetTemporalFilterSettings(v *NoiseReducerTemporalFilterSettings) *NoiseReducer { + s.TemporalFilterSettings = v + return s +} + // Settings for a noise reducer filter type NoiseReducerFilterSettings struct { _ struct{} `type:"structure"` @@ -12989,6 +13749,69 @@ func (s *NoiseReducerSpatialFilterSettings) SetStrength(v int64) *NoiseReducerSp return s } +// Noise reducer filter settings for temporal filter. +type NoiseReducerTemporalFilterSettings struct { + _ struct{} `type:"structure"` + + // Use Aggressive mode for content that has complex motion. Higher values produce + // stronger temporal filtering. This filters highly complex scenes more aggressively + // and creates better VQ for low bitrate outputs. + AggressiveMode *int64 `locationName:"aggressiveMode" type:"integer"` + + // The speed of the filter (higher number is faster). Low setting reduces bit + // rate at the cost of transcode time, high setting improves transcode time + // at the cost of bit rate. + Speed *int64 `locationName:"speed" type:"integer"` + + // Specify the strength of the noise reducing filter on this output. Higher + // values produce stronger filtering. We recommend the following value ranges, + // depending on the result that you want: * 0-2 for complexity reduction with + // minimal sharpness loss * 2-8 for complexity reduction with image preservation + // * 8-16 for a high level of complexity reduction + Strength *int64 `locationName:"strength" type:"integer"` +} + +// String returns the string representation +func (s NoiseReducerTemporalFilterSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoiseReducerTemporalFilterSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NoiseReducerTemporalFilterSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NoiseReducerTemporalFilterSettings"} + if s.Speed != nil && *s.Speed < -1 { + invalidParams.Add(request.NewErrParamMinValue("Speed", -1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggressiveMode sets the AggressiveMode field's value. +func (s *NoiseReducerTemporalFilterSettings) SetAggressiveMode(v int64) *NoiseReducerTemporalFilterSettings { + s.AggressiveMode = &v + return s +} + +// SetSpeed sets the Speed field's value. +func (s *NoiseReducerTemporalFilterSettings) SetSpeed(v int64) *NoiseReducerTemporalFilterSettings { + s.Speed = &v + return s +} + +// SetStrength sets the Strength field's value. +func (s *NoiseReducerTemporalFilterSettings) SetStrength(v int64) *NoiseReducerTemporalFilterSettings { + s.Strength = &v + return s +} + // An output object describes the settings for a single output file or stream // in an output group. type Output struct { @@ -13032,8 +13855,9 @@ type Output struct { Preset *string `locationName:"preset" type:"string"` // (VideoDescription) contains a group of video encoding settings. The specific - // video settings depend on the video codec you choose when you specify a value - // for Video codec (codec). Include one instance of (VideoDescription) per output. + // video settings depend on the video codec that you choose when you specify + // a value for Video codec (codec). Include one instance of (VideoDescription) + // per output. VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"` } @@ -13539,8 +14363,9 @@ type PresetSettings struct { ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"` // (VideoDescription) contains a group of video encoding settings. The specific - // video settings depend on the video codec you choose when you specify a value - // for Video codec (codec). Include one instance of (VideoDescription) per output. + // video settings depend on the video codec that you choose when you specify + // a value for Video codec (codec). Include one instance of (VideoDescription) + // per output. VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"` } @@ -14003,7 +14828,7 @@ type RemixSettings struct { ChannelsIn *int64 `locationName:"channelsIn" min:"1" type:"integer"` // Specify the number of channels in this output after remixing. Valid values: - // 1, 2, 4, 6, 8 + // 1, 2, 4, 6, 8... 64. (1 and even numbers to 64.) ChannelsOut *int64 `locationName:"channelsOut" min:"1" type:"integer"` } @@ -14344,24 +15169,28 @@ func (s *SccDestinationSettings) SetFramerate(v string) *SccDestinationSettings return s } -// Settings for use with a SPEKE key provider +// If your output group type is HLS, DASH, or Microsoft Smooth, use these settings +// when doing DRM encryption with a SPEKE-compliant key provider. If your output +// group type is CMAF, use the SpekeKeyProviderCmaf settings instead. type SpekeKeyProvider struct { _ struct{} `type:"structure"` - // Optional AWS Certificate Manager ARN for a certificate to send to the keyprovider. - // The certificate holds a key used by the keyprovider to encrypt the keys in - // its response. + // If you want your key provider to encrypt the content keys that it provides + // to MediaConvert, set up a certificate with a master key using AWS Certificate + // Manager. Specify the certificate's Amazon Resource Name (ARN) here. CertificateArn *string `locationName:"certificateArn" type:"string"` - // The SPEKE-compliant server uses Resource ID (ResourceId) to identify content. + // Specify the resource ID that your SPEKE-compliant key provider uses to identify + // this content. ResourceId *string `locationName:"resourceId" type:"string"` // Relates to SPEKE implementation. DRM system identifiers. DASH output groups // support a max of two system ids. Other group types support one system id. + // See https://dashif.org/identifiers/content_protection/ for more details. SystemIds []*string `locationName:"systemIds" type:"list"` - // Use URL (Url) to specify the SPEKE-compliant server that will provide keys - // for content. + // Specify the URL to the key server that your SPEKE-compliant DRM key provider + // uses to provide keys for encrypting your content. Url *string `locationName:"url" type:"string"` } @@ -14399,6 +15228,76 @@ func (s *SpekeKeyProvider) SetUrl(v string) *SpekeKeyProvider { return s } +// If your output group type is CMAF, use these settings when doing DRM encryption +// with a SPEKE-compliant key provider. If your output group type is HLS, DASH, +// or Microsoft Smooth, use the SpekeKeyProvider settings instead. +type SpekeKeyProviderCmaf struct { + _ struct{} `type:"structure"` + + // If you want your key provider to encrypt the content keys that it provides + // to MediaConvert, set up a certificate with a master key using AWS Certificate + // Manager. Specify the certificate's Amazon Resource Name (ARN) here. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // Specify the DRM system IDs that you want signaled in the DASH manifest that + // MediaConvert creates as part of this CMAF package. The DASH manifest can + // currently signal up to three system IDs. For more information, see https://dashif.org/identifiers/content_protection/. + DashSignaledSystemIds []*string `locationName:"dashSignaledSystemIds" type:"list"` + + // Specify the DRM system ID that you want signaled in the HLS manifest that + // MediaConvert creates as part of this CMAF package. The HLS manifest can currently + // signal only one system ID. For more information, see https://dashif.org/identifiers/content_protection/. + HlsSignaledSystemIds []*string `locationName:"hlsSignaledSystemIds" type:"list"` + + // Specify the resource ID that your SPEKE-compliant key provider uses to identify + // this content. + ResourceId *string `locationName:"resourceId" type:"string"` + + // Specify the URL to the key server that your SPEKE-compliant DRM key provider + // uses to provide keys for encrypting your content. + Url *string `locationName:"url" type:"string"` +} + +// String returns the string representation +func (s SpekeKeyProviderCmaf) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpekeKeyProviderCmaf) GoString() string { + return s.String() +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *SpekeKeyProviderCmaf) SetCertificateArn(v string) *SpekeKeyProviderCmaf { + s.CertificateArn = &v + return s +} + +// SetDashSignaledSystemIds sets the DashSignaledSystemIds field's value. +func (s *SpekeKeyProviderCmaf) SetDashSignaledSystemIds(v []*string) *SpekeKeyProviderCmaf { + s.DashSignaledSystemIds = v + return s +} + +// SetHlsSignaledSystemIds sets the HlsSignaledSystemIds field's value. +func (s *SpekeKeyProviderCmaf) SetHlsSignaledSystemIds(v []*string) *SpekeKeyProviderCmaf { + s.HlsSignaledSystemIds = v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *SpekeKeyProviderCmaf) SetResourceId(v string) *SpekeKeyProviderCmaf { + s.ResourceId = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *SpekeKeyProviderCmaf) SetUrl(v string) *SpekeKeyProviderCmaf { + s.Url = &v + return s +} + // Use these settings to set up encryption with a static key provider. type StaticKeyProvider struct { _ struct{} `type:"structure"` @@ -14536,6 +15435,13 @@ type TeletextDestinationSettings struct { // ending in -FF are invalid. If you are passing through the entire set of Teletext // data, do not use this field. PageNumber *string `locationName:"pageNumber" min:"3" type:"string"` + + // Specify the page types for this Teletext page. If you don't specify a value + // here, the service sets the page type to the default value Subtitle (PAGE_TYPE_SUBTITLE). + // If you pass through the entire set of Teletext data, don't use this field. + // When you pass through a set of Teletext pages, your output has the same page + // types as your input. + PageTypes []*string `locationName:"pageTypes" type:"list"` } // String returns the string representation @@ -14567,6 +15473,12 @@ func (s *TeletextDestinationSettings) SetPageNumber(v string) *TeletextDestinati return s } +// SetPageTypes sets the PageTypes field's value. +func (s *TeletextDestinationSettings) SetPageTypes(v []*string) *TeletextDestinationSettings { + s.PageTypes = v + return s +} + // Settings specific to Teletext caption sources, including Page number. type TeletextSourceSettings struct { _ struct{} `type:"structure"` @@ -14818,8 +15730,10 @@ func (s *Timing) SetSubmitTime(v time.Time) *Timing { return s } -// Settings specific to caption sources that are specfied by track number. Sources -// include IMSC in IMF. +// Settings specific to caption sources that are specified by track number. +// Currently, this is only IMSC captions in an IMF package. If your caption +// source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead +// of TrackSourceSettings. type TrackSourceSettings struct { _ struct{} `type:"structure"` @@ -14978,6 +15892,13 @@ type UpdateJobTemplateInput struct { // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` + // Specify the relative priority for this job. In any given queue, the service + // begins processing the job with the highest value first. When more than one + // job has the same priority, the service begins processing the job that you + // submitted first. If you don't specify a priority, the service uses the default + // value 0. + Priority *int64 `locationName:"priority" type:"integer"` + // The new queue for the job template, if you are changing it. Queue *string `locationName:"queue" type:"string"` @@ -15011,6 +15932,9 @@ func (s *UpdateJobTemplateInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } + if s.Priority != nil && *s.Priority < -50 { + invalidParams.Add(request.NewErrParamMinValue("Priority", -50)) + } if s.AccelerationSettings != nil { if err := s.AccelerationSettings.Validate(); err != nil { invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams)) @@ -15052,6 +15976,12 @@ func (s *UpdateJobTemplateInput) SetName(v string) *UpdateJobTemplateInput { return s } +// SetPriority sets the Priority field's value. +func (s *UpdateJobTemplateInput) SetPriority(v int64) *UpdateJobTemplateInput { + s.Priority = &v + return s +} + // SetQueue sets the Queue field's value. func (s *UpdateJobTemplateInput) SetQueue(v string) *UpdateJobTemplateInput { s.Queue = &v @@ -15309,10 +16239,10 @@ func (s *UpdateQueueOutput) SetQueue(v *Queue) *UpdateQueueOutput { // Video codec settings, (CodecSettings) under (VideoDescription), contains // the group of settings related to video encoding. The settings in this group -// vary depending on the value you choose for Video codec (Codec). For each -// codec enum you choose, define the corresponding settings object. The following -// lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, -// H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, +// vary depending on the value that you choose for Video codec (Codec). For +// each codec enum that you choose, define the corresponding settings object. +// The following lists the codec enum, settings object pairs. * H_264, H264Settings +// * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, // FrameCaptureSettings type VideoCodecSettings struct { _ struct{} `type:"structure"` @@ -15434,30 +16364,27 @@ type VideoDescription struct { // to calculate output AFD values based on the input AFD scaler data. AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"AfdSignaling"` - // The service automatically applies the anti-alias filter to all outputs. The - // service no longer accepts the value DISABLED for AntiAlias. If you specify - // that in your job, the service will ignore the setting. + // The anti-alias filter is automatically applied to all outputs. The service + // no longer accepts the value DISABLED for AntiAlias. If you specify that in + // your job, the service will ignore the setting. AntiAlias *string `locationName:"antiAlias" type:"string" enum:"AntiAlias"` // Video codec settings, (CodecSettings) under (VideoDescription), contains // the group of settings related to video encoding. The settings in this group - // vary depending on the value you choose for Video codec (Codec). For each - // codec enum you choose, define the corresponding settings object. The following - // lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, - // H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, + // vary depending on the value that you choose for Video codec (Codec). For + // each codec enum that you choose, define the corresponding settings object. + // The following lists the codec enum, settings object pairs. * H_264, H264Settings + // * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, // FrameCaptureSettings CodecSettings *VideoCodecSettings `locationName:"codecSettings" type:"structure"` - // Enable Insert color metadata (ColorMetadata) to include color metadata in - // this output. This setting is enabled by default. + // Choose Insert (INSERT) for this setting to include color metadata in this + // output. Choose Ignore (IGNORE) to exclude color metadata from this output. + // If you don't specify a value, the service sets this to Insert by default. ColorMetadata *string `locationName:"colorMetadata" type:"string" enum:"ColorMetadata"` - // Applies only if your input aspect ratio is different from your output aspect - // ratio. Use Input cropping rectangle (Crop) to specify the video area the - // service will include in the output. This will crop the input source, causing - // video pixels to be removed on encode. If you crop your input frame size to - // smaller than your output frame size, make sure to specify the behavior you - // want in your output setting "Scaling behavior". + // Use Cropping selection (crop) to specify the video area that the service + // will include in the output video frame. Crop *Rectangle `locationName:"crop" type:"structure"` // Applies only to 29.97 fps outputs. When this feature is enabled, the service @@ -15476,8 +16403,8 @@ type VideoDescription struct { // will use the input height. Height *int64 `locationName:"height" min:"32" type:"integer"` - // Use Position (Position) to point to a rectangle object to define your position. - // This setting overrides any other aspect ratio. + // Use Selection placement (position) to define the video area in your output + // frame. The area outside of the rectangle that you specify here is black. Position *Rectangle `locationName:"position" type:"structure"` // Use Respond to AFD (RespondToAfd) to specify how the service changes the @@ -15490,11 +16417,12 @@ type VideoDescription struct { // from this output. RespondToAfd *string `locationName:"respondToAfd" type:"string" enum:"RespondToAfd"` - // Applies only if your input aspect ratio is different from your output aspect - // ratio. Choose "Stretch to output" to have the service stretch your video - // image to fit. Keep the setting "Default" to allow the service to letterbox - // your video instead. This setting overrides any positioning value you specify - // elsewhere in the job. + // Specify how the service handles outputs that have a different aspect ratio + // from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) + // to have the service stretch your video image to fit. Keep the setting Default + // (DEFAULT) to have the service letterbox your video instead. This setting + // overrides any value that you specify for the setting Selection placement + // (position) in this output. ScalingBehavior *string `locationName:"scalingBehavior" type:"string" enum:"ScalingBehavior"` // Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing. @@ -15803,33 +16731,37 @@ type VideoSelector struct { _ struct{} `type:"structure"` // If your input video has accurate color space metadata, or if you don't know - // about color space, leave this set to the default value FOLLOW. The service - // will automatically detect your input color space. If your input video has - // metadata indicating the wrong color space, or if your input video is missing - // color space metadata that should be there, specify the accurate color space - // here. If you choose HDR10, you can also correct inaccurate color space coefficients, - // using the HDR master display information controls. You must also set Color - // space usage (ColorSpaceUsage) to FORCE for the service to use these values. + // about color space, leave this set to the default value Follow (FOLLOW). The + // service will automatically detect your input color space. If your input video + // has metadata indicating the wrong color space, specify the accurate color + // space here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering + // Display Color Volume static metadata isn't present in your video stream, + // or if that metadata is present but not accurate, choose Force HDR 10 (FORCE_HDR10) + // here and specify correct values in the input HDR 10 metadata (Hdr10Metadata) + // settings. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. ColorSpace *string `locationName:"colorSpace" type:"string" enum:"ColorSpace"` - // There are two sources for color metadata, the input file and the job configuration - // (in the Color space and HDR master display informaiton settings). The Color - // space usage setting controls which takes precedence. FORCE: The system will - // use color metadata supplied by user, if any. If the user does not supply - // color metadata, the system will use data from the source. FALLBACK: The system - // will use color metadata from the source. If source has no color metadata, - // the system will use user-supplied color metadata values if available. + // There are two sources for color metadata, the input file and the job input + // settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). + // The Color space usage setting determines which takes precedence. Choose Force + // (FORCE) to use color metadata from the input job settings. If you don't specify + // values for those settings, the service defaults to using metadata from your + // input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the + // source when it is present. If there's no color metadata in your input file, + // the service defaults to using values you specify in the input settings. ColorSpaceUsage *string `locationName:"colorSpaceUsage" type:"string" enum:"ColorSpaceUsage"` - // Use the "HDR master display information" (Hdr10Metadata) settings to correct - // HDR metadata or to provide missing metadata. These values vary depending - // on the input video and must be provided by a color grader. Range is 0 to - // 50,000; each increment represents 0.00002 in CIE1931 color coordinate. Note - // that these settings are not color correction. Note that if you are creating - // HDR outputs inside of an HLS CMAF package, to comply with the Apple specification, - // you must use the following settings. Set "MP4 packaging type" (writeMp4PackagingType) - // to HVC1 (HVC1). Set "Profile" (H265Settings > codecProfile) to Main10/High - // (MAIN10_HIGH). Set "Level" (H265Settings > codecLevel) to 5 (LEVEL_5). + // Use these settings to provide HDR 10 metadata that is missing or inaccurate + // in your input video. Appropriate values vary depending on the input video + // and must be provided by a color grader. The color grader generates these + // values during the HDR 10 mastering process. The valid range for each of these + // settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color + // coordinate. Related settings - When you specify these values, you must also + // set Color space (ColorSpace) to HDR 10 (HDR10). To specify whether the the + // values you specify here take precedence over the values in the metadata of + // your input file, set Color space usage (ColorSpaceUsage). To specify whether + // color metadata is included in an output, set Color metadata (ColorMetadata). + // For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` // Use PID (Pid) to select specific video data from an input file. Specify this @@ -15926,9 +16858,8 @@ type WavSettings struct { // quality for this audio track. BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` - // Set Channels to specify the number of channels in this output audio track. - // With WAV, valid values 1, 2, 4, and 8. In the console, these values are Mono, - // Stereo, 4-Channel, and 8-Channel, respectively. + // Specify the number of channels in this output audio track. Valid values are + // 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // The service defaults to using RIFF for WAV outputs. If your output audio @@ -16088,8 +17019,9 @@ const ( AacVbrQualityHigh = "HIGH" ) -// Specifies the "Bitstream Mode" (bsmod) for the emitted AC-3 stream. See ATSC -// A/52-2012 for background on these values. +// Specify the bitstream mode for the AC-3 stream that the encoder emits. For +// more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex +// E). const ( // Ac3BitstreamModeCompleteMain is a Ac3BitstreamMode enum value Ac3BitstreamModeCompleteMain = "COMPLETE_MAIN" @@ -16162,14 +17094,47 @@ const ( Ac3MetadataControlUseConfigured = "USE_CONFIGURED" ) -// Enable Acceleration (AccelerationMode) on any job that you want processed -// with accelerated transcoding. +// Specify whether the service runs your job with accelerated transcoding. Choose +// DISABLED if you don't want accelerated transcoding. Choose ENABLED if you +// want your job to run with accelerated transcoding and to fail if your input +// files or your job settings aren't compatible with accelerated transcoding. +// Choose PREFERRED if you want your job to run with accelerated transcoding +// if the job is compatible with the feature and to run at standard speed if +// it's not. const ( // AccelerationModeDisabled is a AccelerationMode enum value AccelerationModeDisabled = "DISABLED" // AccelerationModeEnabled is a AccelerationMode enum value AccelerationModeEnabled = "ENABLED" + + // AccelerationModePreferred is a AccelerationMode enum value + AccelerationModePreferred = "PREFERRED" +) + +// Describes whether the current job is running with accelerated transcoding. +// For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus +// is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) +// set to ENABLED or PREFERRED, AccelerationStatus is one of the other states. +// AccelerationStatus is IN_PROGRESS initially, while the service determines +// whether the input files and job settings are compatible with accelerated +// transcoding. If they are, AcclerationStatus is ACCELERATED. If your input +// files and job settings aren't compatible with accelerated transcoding, the +// service either fails your job or runs it without accelerated transcoding, +// depending on how you set Acceleration (AccelerationMode). When the service +// runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED. +const ( + // AccelerationStatusNotApplicable is a AccelerationStatus enum value + AccelerationStatusNotApplicable = "NOT_APPLICABLE" + + // AccelerationStatusInProgress is a AccelerationStatus enum value + AccelerationStatusInProgress = "IN_PROGRESS" + + // AccelerationStatusAccelerated is a AccelerationStatus enum value + AccelerationStatusAccelerated = "ACCELERATED" + + // AccelerationStatusNotAccelerated is a AccelerationStatus enum value + AccelerationStatusNotAccelerated = "NOT_ACCELERATED" ) // This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert @@ -16189,9 +17154,33 @@ const ( AfdSignalingFixed = "FIXED" ) -// The service automatically applies the anti-alias filter to all outputs. The -// service no longer accepts the value DISABLED for AntiAlias. If you specify -// that in your job, the service will ignore the setting. +// Specify whether this set of input captions appears in your outputs in both +// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes +// the captions data in two ways: it passes the 608 data through using the 608 +// compatibility bytes fields of the 708 wrapper, and it also translates the +// 608 data into 708. +const ( + // AncillaryConvert608To708Upconvert is a AncillaryConvert608To708 enum value + AncillaryConvert608To708Upconvert = "UPCONVERT" + + // AncillaryConvert608To708Disabled is a AncillaryConvert608To708 enum value + AncillaryConvert608To708Disabled = "DISABLED" +) + +// By default, the service terminates any unterminated captions at the end of +// each input. If you want the caption to continue onto your next input, disable +// this setting. +const ( + // AncillaryTerminateCaptionsEndOfInput is a AncillaryTerminateCaptions enum value + AncillaryTerminateCaptionsEndOfInput = "END_OF_INPUT" + + // AncillaryTerminateCaptionsDisabled is a AncillaryTerminateCaptions enum value + AncillaryTerminateCaptionsDisabled = "DISABLED" +) + +// The anti-alias filter is automatically applied to all outputs. The service +// no longer accepts the value DISABLED for AntiAlias. If you specify that in +// your job, the service will ignore the setting. const ( // AntiAliasDisabled is a AntiAlias enum value AntiAliasDisabled = "DISABLED" @@ -16220,6 +17209,9 @@ const ( // AudioCodecEac3 is a AudioCodec enum value AudioCodecEac3 = "EAC3" + // AudioCodecEac3Atmos is a AudioCodec enum value + AudioCodecEac3Atmos = "EAC3_ATMOS" + // AudioCodecPassthrough is a AudioCodec enum value AudioCodecPassthrough = "PASSTHROUGH" ) @@ -16247,14 +17239,28 @@ const ( AudioLanguageCodeControlUseConfigured = "USE_CONFIGURED" ) -// Audio normalization algorithm to use. 1770-1 conforms to the CALM Act specification, -// 1770-2 conforms to the EBU R-128 specification. +// Choose one of the following audio normalization algorithms: ITU-R BS.1770-1: +// Ungated loudness. A measurement of ungated average loudness for an entire +// piece of content, suitable for measurement of short-form content under ATSC +// recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2: +// Gated loudness. A measurement of gated average loudness compliant with the +// requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3: +// Modified peak. The same loudness measurement algorithm as 1770-2, with an +// updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows +// for more audio channels than the other algorithms, including configurations +// such as 7.1. const ( // AudioNormalizationAlgorithmItuBs17701 is a AudioNormalizationAlgorithm enum value AudioNormalizationAlgorithmItuBs17701 = "ITU_BS_1770_1" // AudioNormalizationAlgorithmItuBs17702 is a AudioNormalizationAlgorithm enum value AudioNormalizationAlgorithmItuBs17702 = "ITU_BS_1770_2" + + // AudioNormalizationAlgorithmItuBs17703 is a AudioNormalizationAlgorithm enum value + AudioNormalizationAlgorithmItuBs17703 = "ITU_BS_1770_3" + + // AudioNormalizationAlgorithmItuBs17704 is a AudioNormalizationAlgorithm enum value + AudioNormalizationAlgorithmItuBs17704 = "ITU_BS_1770_4" ) // When enabled the output audio is corrected using the chosen algorithm. If @@ -16325,6 +17331,9 @@ const ( // BillingTagsSourceJobTemplate is a BillingTagsSource enum value BillingTagsSourceJobTemplate = "JOB_TEMPLATE" + + // BillingTagsSourceJob is a BillingTagsSource enum value + BillingTagsSourceJob = "JOB" ) // If no explicit x_position or y_position is provided, setting alignment to @@ -16432,7 +17441,7 @@ const ( // Specify the format for this set of captions on this output. The default format // is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, -// DVB-sub, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, +// DVB-sub, IMSC, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, // choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED) to create an output that // complies with the SCTE-43 spec. To create a non-compliant output where the // embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20). @@ -16449,6 +17458,9 @@ const ( // CaptionDestinationTypeEmbeddedPlusScte20 is a CaptionDestinationType enum value CaptionDestinationTypeEmbeddedPlusScte20 = "EMBEDDED_PLUS_SCTE20" + // CaptionDestinationTypeImsc is a CaptionDestinationType enum value + CaptionDestinationTypeImsc = "IMSC" + // CaptionDestinationTypeScte20PlusEmbedded is a CaptionDestinationType enum value CaptionDestinationTypeScte20PlusEmbedded = "SCTE20_PLUS_EMBEDDED" @@ -16531,16 +17543,18 @@ const ( CmafCodecSpecificationRfc4281 = "RFC_4281" ) -// Encrypts the segments with the given encryption scheme. Leave blank to disable. -// Selecting 'Disabled' in the web interface also disables encryption. +// Specify the encryption scheme that you want the service to use when encrypting +// your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR). const ( // CmafEncryptionTypeSampleAes is a CmafEncryptionType enum value CmafEncryptionTypeSampleAes = "SAMPLE_AES" + + // CmafEncryptionTypeAesCtr is a CmafEncryptionType enum value + CmafEncryptionTypeAesCtr = "AES_CTR" ) -// The Initialization Vector is a 128-bit number used in conjunction with the -// key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed -// in the manifest. Otherwise Initialization Vector is not in the manifest. +// When you use DRM with CMAF outputs, choose whether the service writes the +// 128-bit encryption initialization vector in the HLS and DASH manifests. const ( // CmafInitializationVectorInManifestInclude is a CmafInitializationVectorInManifest enum value CmafInitializationVectorInManifestInclude = "INCLUDE" @@ -16549,8 +17563,12 @@ const ( CmafInitializationVectorInManifestExclude = "EXCLUDE" ) -// Indicates which type of key provider is used for encryption. +// Specify whether your DRM encryption key is static or from a key provider +// that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. const ( + // CmafKeyProviderTypeSpeke is a CmafKeyProviderType enum value + CmafKeyProviderTypeSpeke = "SPEKE" + // CmafKeyProviderTypeStaticKey is a CmafKeyProviderType enum value CmafKeyProviderTypeStaticKey = "STATIC_KEY" ) @@ -16574,6 +17592,20 @@ const ( CmafManifestDurationFormatInteger = "INTEGER" ) +// Specify whether your DASH profile is on-demand or main. When you choose Main +// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 +// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), +// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. +// When you choose On-demand, you must also set the output group setting Segment +// control (SegmentControl) to Single file (SINGLE_FILE). +const ( + // CmafMpdProfileMainProfile is a CmafMpdProfile enum value + CmafMpdProfileMainProfile = "MAIN_PROFILE" + + // CmafMpdProfileOnDemandProfile is a CmafMpdProfile enum value + CmafMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE" +) + // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. @@ -16613,8 +17645,9 @@ const ( CmafWriteHLSManifestEnabled = "ENABLED" ) -// Enable Insert color metadata (ColorMetadata) to include color metadata in -// this output. This setting is enabled by default. +// Choose Insert (INSERT) for this setting to include color metadata in this +// output. Choose Ignore (IGNORE) to exclude color metadata from this output. +// If you don't specify a value, the service sets this to Insert by default. const ( // ColorMetadataIgnore is a ColorMetadata enum value ColorMetadataIgnore = "IGNORE" @@ -16624,13 +17657,14 @@ const ( ) // If your input video has accurate color space metadata, or if you don't know -// about color space, leave this set to the default value FOLLOW. The service -// will automatically detect your input color space. If your input video has -// metadata indicating the wrong color space, or if your input video is missing -// color space metadata that should be there, specify the accurate color space -// here. If you choose HDR10, you can also correct inaccurate color space coefficients, -// using the HDR master display information controls. You must also set Color -// space usage (ColorSpaceUsage) to FORCE for the service to use these values. +// about color space, leave this set to the default value Follow (FOLLOW). The +// service will automatically detect your input color space. If your input video +// has metadata indicating the wrong color space, specify the accurate color +// space here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering +// Display Color Volume static metadata isn't present in your video stream, +// or if that metadata is present but not accurate, choose Force HDR 10 (FORCE_HDR10) +// here and specify correct values in the input HDR 10 metadata (Hdr10Metadata) +// settings. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. const ( // ColorSpaceFollow is a ColorSpace enum value ColorSpaceFollow = "FOLLOW" @@ -16648,11 +17682,11 @@ const ( ColorSpaceHlg2020 = "HLG_2020" ) -// Determines if colorspace conversion will be performed. If set to _None_, -// no conversion will be performed. If _Force 601_ or _Force 709_ are selected, -// conversion will be performed for inputs with differing colorspaces. An input's -// colorspace can be specified explicitly in the "Video Selector":#inputs-video_selector -// if necessary. +// Specify the color space you want for this output. The service supports conversion +// between HDR formats, between SDR formats, and from SDR to HDR. The service +// doesn't support conversion from HDR to SDR. SDR to HDR conversion doesn't +// upgrade the dynamic range. The converted video has an HDR format, but visually +// appears the same as an unconverted output. const ( // ColorSpaceConversionNone is a ColorSpaceConversion enum value ColorSpaceConversionNone = "NONE" @@ -16670,13 +17704,14 @@ const ( ColorSpaceConversionForceHlg2020 = "FORCE_HLG_2020" ) -// There are two sources for color metadata, the input file and the job configuration -// (in the Color space and HDR master display informaiton settings). The Color -// space usage setting controls which takes precedence. FORCE: The system will -// use color metadata supplied by user, if any. If the user does not supply -// color metadata, the system will use data from the source. FALLBACK: The system -// will use color metadata from the source. If source has no color metadata, -// the system will use user-supplied color metadata values if available. +// There are two sources for color metadata, the input file and the job input +// settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). +// The Color space usage setting determines which takes precedence. Choose Force +// (FORCE) to use color metadata from the input job settings. If you don't specify +// values for those settings, the service defaults to using metadata from your +// input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the +// source when it is present. If there's no color metadata in your input file, +// the service defaults to using values you specify in the input settings. const ( // ColorSpaceUsageForce is a ColorSpaceUsage enum value ColorSpaceUsageForce = "FORCE" @@ -16734,6 +17769,20 @@ const ( DashIsoHbbtvComplianceNone = "NONE" ) +// Specify whether your DASH profile is on-demand or main. When you choose Main +// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 +// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), +// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. +// When you choose On-demand, you must also set the output group setting Segment +// control (SegmentControl) to Single file (SINGLE_FILE). +const ( + // DashIsoMpdProfileMainProfile is a DashIsoMpdProfile enum value + DashIsoMpdProfileMainProfile = "MAIN_PROFILE" + + // DashIsoMpdProfileOnDemandProfile is a DashIsoMpdProfile enum value + DashIsoMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE" +) + // This setting can improve the compatibility of your output with video players // on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. // Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback @@ -16962,6 +18011,118 @@ const ( DvbSubtitleTeletextSpacingProportional = "PROPORTIONAL" ) +// Specify the bitstream mode for the E-AC-3 stream that the encoder emits. +// For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex +// E). +const ( + // Eac3AtmosBitstreamModeCompleteMain is a Eac3AtmosBitstreamMode enum value + Eac3AtmosBitstreamModeCompleteMain = "COMPLETE_MAIN" +) + +// The coding mode for Dolby Digital Plus JOC (Atmos) is always 9.1.6 (CODING_MODE_9_1_6). +const ( + // Eac3AtmosCodingModeCodingMode916 is a Eac3AtmosCodingMode enum value + Eac3AtmosCodingModeCodingMode916 = "CODING_MODE_9_1_6" +) + +// Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis. +const ( + // Eac3AtmosDialogueIntelligenceEnabled is a Eac3AtmosDialogueIntelligence enum value + Eac3AtmosDialogueIntelligenceEnabled = "ENABLED" + + // Eac3AtmosDialogueIntelligenceDisabled is a Eac3AtmosDialogueIntelligence enum value + Eac3AtmosDialogueIntelligenceDisabled = "DISABLED" +) + +// Specify the absolute peak level for a signal with dynamic range compression. +const ( + // Eac3AtmosDynamicRangeCompressionLineNone is a Eac3AtmosDynamicRangeCompressionLine enum value + Eac3AtmosDynamicRangeCompressionLineNone = "NONE" + + // Eac3AtmosDynamicRangeCompressionLineFilmStandard is a Eac3AtmosDynamicRangeCompressionLine enum value + Eac3AtmosDynamicRangeCompressionLineFilmStandard = "FILM_STANDARD" + + // Eac3AtmosDynamicRangeCompressionLineFilmLight is a Eac3AtmosDynamicRangeCompressionLine enum value + Eac3AtmosDynamicRangeCompressionLineFilmLight = "FILM_LIGHT" + + // Eac3AtmosDynamicRangeCompressionLineMusicStandard is a Eac3AtmosDynamicRangeCompressionLine enum value + Eac3AtmosDynamicRangeCompressionLineMusicStandard = "MUSIC_STANDARD" + + // Eac3AtmosDynamicRangeCompressionLineMusicLight is a Eac3AtmosDynamicRangeCompressionLine enum value + Eac3AtmosDynamicRangeCompressionLineMusicLight = "MUSIC_LIGHT" + + // Eac3AtmosDynamicRangeCompressionLineSpeech is a Eac3AtmosDynamicRangeCompressionLine enum value + Eac3AtmosDynamicRangeCompressionLineSpeech = "SPEECH" +) + +// Specify how the service limits the audio dynamic range when compressing the +// audio. +const ( + // Eac3AtmosDynamicRangeCompressionRfNone is a Eac3AtmosDynamicRangeCompressionRf enum value + Eac3AtmosDynamicRangeCompressionRfNone = "NONE" + + // Eac3AtmosDynamicRangeCompressionRfFilmStandard is a Eac3AtmosDynamicRangeCompressionRf enum value + Eac3AtmosDynamicRangeCompressionRfFilmStandard = "FILM_STANDARD" + + // Eac3AtmosDynamicRangeCompressionRfFilmLight is a Eac3AtmosDynamicRangeCompressionRf enum value + Eac3AtmosDynamicRangeCompressionRfFilmLight = "FILM_LIGHT" + + // Eac3AtmosDynamicRangeCompressionRfMusicStandard is a Eac3AtmosDynamicRangeCompressionRf enum value + Eac3AtmosDynamicRangeCompressionRfMusicStandard = "MUSIC_STANDARD" + + // Eac3AtmosDynamicRangeCompressionRfMusicLight is a Eac3AtmosDynamicRangeCompressionRf enum value + Eac3AtmosDynamicRangeCompressionRfMusicLight = "MUSIC_LIGHT" + + // Eac3AtmosDynamicRangeCompressionRfSpeech is a Eac3AtmosDynamicRangeCompressionRf enum value + Eac3AtmosDynamicRangeCompressionRfSpeech = "SPEECH" +) + +// Choose how the service meters the loudness of your audio. +const ( + // Eac3AtmosMeteringModeLeqA is a Eac3AtmosMeteringMode enum value + Eac3AtmosMeteringModeLeqA = "LEQ_A" + + // Eac3AtmosMeteringModeItuBs17701 is a Eac3AtmosMeteringMode enum value + Eac3AtmosMeteringModeItuBs17701 = "ITU_BS_1770_1" + + // Eac3AtmosMeteringModeItuBs17702 is a Eac3AtmosMeteringMode enum value + Eac3AtmosMeteringModeItuBs17702 = "ITU_BS_1770_2" + + // Eac3AtmosMeteringModeItuBs17703 is a Eac3AtmosMeteringMode enum value + Eac3AtmosMeteringModeItuBs17703 = "ITU_BS_1770_3" + + // Eac3AtmosMeteringModeItuBs17704 is a Eac3AtmosMeteringMode enum value + Eac3AtmosMeteringModeItuBs17704 = "ITU_BS_1770_4" +) + +// Choose how the service does stereo downmixing. +const ( + // Eac3AtmosStereoDownmixNotIndicated is a Eac3AtmosStereoDownmix enum value + Eac3AtmosStereoDownmixNotIndicated = "NOT_INDICATED" + + // Eac3AtmosStereoDownmixStereo is a Eac3AtmosStereoDownmix enum value + Eac3AtmosStereoDownmixStereo = "STEREO" + + // Eac3AtmosStereoDownmixSurround is a Eac3AtmosStereoDownmix enum value + Eac3AtmosStereoDownmixSurround = "SURROUND" + + // Eac3AtmosStereoDownmixDpl2 is a Eac3AtmosStereoDownmix enum value + Eac3AtmosStereoDownmixDpl2 = "DPL2" +) + +// Specify whether your input audio has an additional center rear surround channel +// matrix encoded into your left and right surround channels. +const ( + // Eac3AtmosSurroundExModeNotIndicated is a Eac3AtmosSurroundExMode enum value + Eac3AtmosSurroundExModeNotIndicated = "NOT_INDICATED" + + // Eac3AtmosSurroundExModeEnabled is a Eac3AtmosSurroundExMode enum value + Eac3AtmosSurroundExModeEnabled = "ENABLED" + + // Eac3AtmosSurroundExModeDisabled is a Eac3AtmosSurroundExMode enum value + Eac3AtmosSurroundExModeDisabled = "DISABLED" +) + // If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels. // Only used for 3/2 coding mode. const ( @@ -16972,8 +18133,9 @@ const ( Eac3AttenuationControlNone = "NONE" ) -// Specifies the "Bitstream Mode" (bsmod) for the emitted E-AC-3 stream. See -// ATSC A/52-2012 (Annex E) for background on these values. +// Specify the bitstream mode for the E-AC-3 stream that the encoder emits. +// For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex +// E). const ( // Eac3BitstreamModeCompleteMain is a Eac3BitstreamMode enum value Eac3BitstreamModeCompleteMain = "COMPLETE_MAIN" @@ -17012,8 +18174,7 @@ const ( Eac3DcFilterDisabled = "DISABLED" ) -// Enables Dynamic Range Compression that restricts the absolute peak level -// for a signal. +// Specify the absolute peak level for a signal with dynamic range compression. const ( // Eac3DynamicRangeCompressionLineNone is a Eac3DynamicRangeCompressionLine enum value Eac3DynamicRangeCompressionLineNone = "NONE" @@ -17034,8 +18195,8 @@ const ( Eac3DynamicRangeCompressionLineSpeech = "SPEECH" ) -// Enables Heavy Dynamic Range Compression, ensures that the instantaneous signal -// peaks do not exceed specified levels. +// Specify how the service limits the audio dynamic range when compressing the +// audio. const ( // Eac3DynamicRangeCompressionRfNone is a Eac3DynamicRangeCompressionRf enum value Eac3DynamicRangeCompressionRfNone = "NONE" @@ -17108,7 +18269,10 @@ const ( Eac3PhaseControlNoShift = "NO_SHIFT" ) -// Stereo downmix preference. Only used for 3/2 coding mode. +// Choose how the service does stereo downmixing. This setting only applies +// if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) +// for the setting Coding mode (Eac3CodingMode). If you choose a different value +// for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix). const ( // Eac3StereoDownmixNotIndicated is a Eac3StereoDownmix enum value Eac3StereoDownmixNotIndicated = "NOT_INDICATED" @@ -17149,9 +18313,11 @@ const ( Eac3SurroundModeDisabled = "DISABLED" ) -// When set to UPCONVERT, 608 data is both passed through via the "608 compatibility -// bytes" fields of the 708 wrapper as well as translated into 708. 708 data -// present in the source content will be discarded. +// Specify whether this set of input captions appears in your outputs in both +// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes +// the captions data in two ways: it passes the 608 data through using the 608 +// compatibility bytes fields of the 708 wrapper, and it also translates the +// 608 data into 708. const ( // EmbeddedConvert608To708Upconvert is a EmbeddedConvert608To708 enum value EmbeddedConvert608To708Upconvert = "UPCONVERT" @@ -17160,6 +18326,17 @@ const ( EmbeddedConvert608To708Disabled = "DISABLED" ) +// By default, the service terminates any unterminated captions at the end of +// each input. If you want the caption to continue onto your next input, disable +// this setting. +const ( + // EmbeddedTerminateCaptionsEndOfInput is a EmbeddedTerminateCaptions enum value + EmbeddedTerminateCaptionsEndOfInput = "END_OF_INPUT" + + // EmbeddedTerminateCaptionsDisabled is a EmbeddedTerminateCaptions enum value + EmbeddedTerminateCaptionsDisabled = "DISABLED" +) + // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning // of the archive as required for progressive downloading. Otherwise it is placed // normally at the end. @@ -17171,9 +18348,11 @@ const ( F4vMoovPlacementNormal = "NORMAL" ) -// If set to UPCONVERT, 608 caption data is both passed through via the "608 -// compatibility bytes" fields of the 708 wrapper as well as translated into -// 708. 708 data present in the source content will be discarded. +// Specify whether this set of input captions appears in your outputs in both +// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes +// the captions data in two ways: it passes the 608 data through using the 608 +// compatibility bytes fields of the 708 wrapper, and it also translates the +// 608 data into 708. const ( // FileSourceConvert608To708Upconvert is a FileSourceConvert608To708 enum value FileSourceConvert608To708Upconvert = "UPCONVERT" @@ -17458,13 +18637,20 @@ const ( H264RepeatPpsEnabled = "ENABLED" ) -// Scene change detection (inserts I-frames on scene changes). +// Enable this setting to insert I-frames at scene changes that the service +// automatically detects. This improves video quality and is enabled by default. +// If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) +// for further video quality improvement. For more information about QVBR, see +// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. const ( // H264SceneChangeDetectDisabled is a H264SceneChangeDetect enum value H264SceneChangeDetectDisabled = "DISABLED" // H264SceneChangeDetectEnabled is a H264SceneChangeDetect enum value H264SceneChangeDetectEnabled = "ENABLED" + + // H264SceneChangeDetectTransitionDetection is a H264SceneChangeDetect enum value + H264SceneChangeDetectTransitionDetection = "TRANSITION_DETECTION" ) // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled @@ -17709,17 +18895,18 @@ const ( H265GopSizeUnitsSeconds = "SECONDS" ) -// Use Interlace mode (InterlaceMode) to choose the scan line type for the output. -// * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce -// interlaced output with the entire output having the same field polarity (top -// or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default -// Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, -// behavior depends on the input scan type. - If the source is interlaced, the -// output will be interlaced with the same polarity as the source (it will follow -// the source). The output could therefore be a mix of "top field first" and -// "bottom field first". - If the source is progressive, the output will be -// interlaced with "top field first" or "bottom field first" polarity, depending -// on which of the Follow options you chose. +// Choose the scan line type for the output. Choose Progressive (PROGRESSIVE) +// to create a progressive output, regardless of the scan type of your input. +// Choose Top Field First (TOP_FIELD) or Bottom Field First (BOTTOM_FIELD) to +// create an output that's interlaced with the same field polarity throughout. +// Choose Follow, Default Top (FOLLOW_TOP_FIELD) or Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) +// to create an interlaced output with the same field polarity as the source. +// If the source is interlaced, the output will be interlaced with the same +// polarity as the source (it will follow the source). The output could therefore +// be a mix of "top field first" and "bottom field first". If the source is +// progressive, your output will be interlaced with "top field first" or "bottom +// field first" polarity, depending on which of the Follow options you chose. +// If you don't choose a value, the service will default to Progressive (PROGRESSIVE). const ( // H265InterlaceModeProgressive is a H265InterlaceMode enum value H265InterlaceModeProgressive = "PROGRESSIVE" @@ -17788,13 +18975,20 @@ const ( H265SampleAdaptiveOffsetFilterModeOff = "OFF" ) -// Scene change detection (inserts I-frames on scene changes). +// Enable this setting to insert I-frames at scene changes that the service +// automatically detects. This improves video quality and is enabled by default. +// If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) +// for further video quality improvement. For more information about QVBR, see +// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. const ( // H265SceneChangeDetectDisabled is a H265SceneChangeDetect enum value H265SceneChangeDetectDisabled = "DISABLED" // H265SceneChangeDetectEnabled is a H265SceneChangeDetect enum value H265SceneChangeDetectEnabled = "ENABLED" + + // H265SceneChangeDetectTransitionDetection is a H265SceneChangeDetect enum value + H265SceneChangeDetectTransitionDetection = "TRANSITION_DETECTION" ) // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled @@ -17880,17 +19074,16 @@ const ( H265UnregisteredSeiTimecodeEnabled = "ENABLED" ) -// Use this setting only for outputs encoded with H.265 that are in CMAF or -// DASH output groups. If you include writeMp4PackagingType in your JSON job -// specification for other outputs, your video might not work properly with -// downstream systems and video players. If the location of parameter set NAL -// units don't matter in your workflow, ignore this setting. The service defaults -// to marking your output as HEV1. Choose HVC1 to mark your output as HVC1. -// This makes your output compliant with this specification: ISO IECJTC1 SC29 -// N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service -// stores parameter set NAL units in the sample headers but not in the samples -// directly. Keep the default HEV1 to mark your output as HEV1. For these outputs, -// the service writes parameter set NAL units directly into the samples. +// If the location of parameter set NAL units doesn't matter in your workflow, +// ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. +// For file MP4 outputs, choosing HVC1 can create video that doesn't work properly +// with some downstream systems and video players. Choose HVC1 to mark your +// output as HVC1. This makes your output compliant with the following specification: +// ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these +// outputs, the service stores parameter set NAL units in the sample headers +// but not in the samples directly. The service defaults to marking your output +// as HEV1. For these outputs, the service writes parameter set NAL units directly +// into the samples. const ( // H265WriteMp4PackagingTypeHvc1 is a H265WriteMp4PackagingType enum value H265WriteMp4PackagingTypeHvc1 = "HVC1" @@ -17907,6 +19100,19 @@ const ( HlsAdMarkersElementalScte35 = "ELEMENTAL_SCTE35" ) +// Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream +// (M2TS) to create a file in an MPEG2-TS container. Keep the default value +// Automatic (AUTOMATIC) to create a raw audio-only file with no container. +// Regardless of the value that you specify here, if this output has video, +// the service will place outputs into an MPEG2-TS container. +const ( + // HlsAudioOnlyContainerAutomatic is a HlsAudioOnlyContainer enum value + HlsAudioOnlyContainerAutomatic = "AUTOMATIC" + + // HlsAudioOnlyContainerM2ts is a HlsAudioOnlyContainer enum value + HlsAudioOnlyContainerM2ts = "M2TS" +) + // Four types of audio-only tracks are supported: Audio-Only Variant Stream // The client can play back this audio-only stream instead of video in low-bandwidth // scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate @@ -18012,7 +19218,8 @@ const ( HlsInitializationVectorInManifestExclude = "EXCLUDE" ) -// Indicates which type of key provider is used for encryption. +// Specify whether your DRM encryption key is static or from a key provider +// that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. const ( // HlsKeyProviderTypeSpeke is a HlsKeyProviderType enum value HlsKeyProviderTypeSpeke = "SPEKE" @@ -18104,6 +19311,18 @@ const ( HlsTimedMetadataId3FrameTdrl = "TDRL" ) +// Keep this setting enabled to have MediaConvert use the font style and position +// information from the captions source in the output. This option is available +// only when your input captions are CFF-TT, IMSC, SMPTE-TT, or TTML. Disable +// this setting for simplified output captions. +const ( + // ImscStylePassthroughEnabled is a ImscStylePassthrough enum value + ImscStylePassthroughEnabled = "ENABLED" + + // ImscStylePassthroughDisabled is a ImscStylePassthrough enum value + ImscStylePassthroughDisabled = "DISABLED" +) + // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. // Default is disabled. Only manaully controllable for MPEG2 and uncompressed // video inputs. @@ -18182,13 +19401,16 @@ const ( InputRotateAuto = "AUTO" ) -// Timecode source under input settings (InputTimecodeSource) only affects the -// behavior of features that apply to a single input at a time, such as input -// clipping and synchronizing some captions formats. Use this setting to specify -// whether the service counts frames by timecodes embedded in the video (EMBEDDED) -// or by starting the first frame at zero (ZEROBASED). In both cases, the timecode -// format is HH:MM:SS:FF or HH:MM:SS;FF, where FF is the frame number. Only -// set this to EMBEDDED if your source video has embedded timecodes. +// Use this Timecode source setting, located under the input settings (InputTimecodeSource), +// to specify how the service counts input video frames. This input frame count +// affects only the behavior of features that apply to a single input at a time, +// such as input clipping and synchronizing some captions formats. Choose Embedded +// (EMBEDDED) to use the timecodes in your input video. Choose Start at zero +// (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) +// to start the first frame at the timecode that you specify in the setting +// Start timecode (timecodeStart). If you don't specify a value for Timecode +// source, the service will use Embedded by default. For more information about +// timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. const ( // InputTimecodeSourceEmbedded is a InputTimecodeSource enum value InputTimecodeSourceEmbedded = "EMBEDDED" @@ -18918,8 +20140,12 @@ const ( M2tsRateModeCbr = "CBR" ) -// Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from -// input to output. +// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if +// you want SCTE-35 markers that appear in your input to also appear in this +// output. Choose None (NONE) if you don't want SCTE-35 markers in this output. +// For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also +// provide the ESAM XML as a string in the setting Signal processing notification +// XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam). const ( // M2tsScte35SourcePassthrough is a M2tsScte35Source enum value M2tsScte35SourcePassthrough = "PASSTHROUGH" @@ -18995,8 +20221,14 @@ const ( M3u8PcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD" ) -// Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from -// input to output. +// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if +// you want SCTE-35 markers that appear in your input to also appear in this +// output. Choose None (NONE) if you don't want SCTE-35 markers in this output. +// For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you +// don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose +// Ad markers (adMarkers) if you do want manifest conditioning. In both cases, +// also provide the ESAM XML as a string in the setting Signal processing notification +// XML (sccXml). const ( // M3u8Scte35SourcePassthrough is a M3u8Scte35Source enum value M3u8Scte35SourcePassthrough = "PASSTHROUGH" @@ -19282,7 +20514,8 @@ const ( Mpeg2RateControlModeCbr = "CBR" ) -// Scene change detection (inserts I-frames on scene changes). +// Enable this setting to insert I-frames at scene changes that the service +// automatically detects. This improves video quality and is enabled by default. const ( // Mpeg2SceneChangeDetectDisabled is a Mpeg2SceneChangeDetect enum value Mpeg2SceneChangeDetectDisabled = "DISABLED" @@ -19367,10 +20600,11 @@ const ( // Use Noise reducer filter (NoiseReducerFilter) to select one of the following // spatial image filtering functions. To use this setting, you must also enable -// Noise reducer (NoiseReducer). * Bilateral is an edge preserving noise reduction -// filter. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) are convolution -// filters. * Conserve is a min/max noise reduction filter. * Spatial is a frequency-domain -// filter based on JND principles. +// Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing +// noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution +// filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain +// filtering based on JND principles. * Temporal optimizes video quality for +// complex motion. const ( // NoiseReducerFilterBilateral is a NoiseReducerFilter enum value NoiseReducerFilterBilateral = "BILATERAL" @@ -19392,6 +20626,9 @@ const ( // NoiseReducerFilterSpatial is a NoiseReducerFilter enum value NoiseReducerFilterSpatial = "SPATIAL" + + // NoiseReducerFilterTemporal is a NoiseReducerFilter enum value + NoiseReducerFilterTemporal = "TEMPORAL" ) // When you request lists of resources, you can optionally specify whether they @@ -19656,11 +20893,12 @@ const ( S3ServerSideEncryptionTypeServerSideEncryptionKms = "SERVER_SIDE_ENCRYPTION_KMS" ) -// Applies only if your input aspect ratio is different from your output aspect -// ratio. Choose "Stretch to output" to have the service stretch your video -// image to fit. Keep the setting "Default" to allow the service to letterbox -// your video instead. This setting overrides any positioning value you specify -// elsewhere in the job. +// Specify how the service handles outputs that have a different aspect ratio +// from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) +// to have the service stretch your video image to fit. Keep the setting Default +// (DEFAULT) to have the service letterbox your video instead. This setting +// overrides any value that you specify for the setting Selection placement +// (position) in this output. const ( // ScalingBehaviorDefault is a ScalingBehavior enum value ScalingBehaviorDefault = "DEFAULT" @@ -19689,6 +20927,18 @@ const ( SccDestinationFramerateFramerate2997NonDropframe = "FRAMERATE_29_97_NON_DROPFRAME" ) +// Enable this setting when you run a test job to estimate how many reserved +// transcoding slots (RTS) you need. When this is enabled, MediaConvert runs +// your job from an on-demand queue with similar performance to what you will +// see with one RTS in a reserved queue. This setting is disabled by default. +const ( + // SimulateReservedQueueDisabled is a SimulateReservedQueue enum value + SimulateReservedQueueDisabled = "DISABLED" + + // SimulateReservedQueueEnabled is a SimulateReservedQueue enum value + SimulateReservedQueueEnabled = "ENABLED" +) + // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing @@ -19740,6 +20990,24 @@ const ( StatusUpdateIntervalSeconds600 = "SECONDS_600" ) +// A page type as defined in the standard ETSI EN 300 468, Table 94 +const ( + // TeletextPageTypePageTypeInitial is a TeletextPageType enum value + TeletextPageTypePageTypeInitial = "PAGE_TYPE_INITIAL" + + // TeletextPageTypePageTypeSubtitle is a TeletextPageType enum value + TeletextPageTypePageTypeSubtitle = "PAGE_TYPE_SUBTITLE" + + // TeletextPageTypePageTypeAddlInfo is a TeletextPageType enum value + TeletextPageTypePageTypeAddlInfo = "PAGE_TYPE_ADDL_INFO" + + // TeletextPageTypePageTypeProgramSchedule is a TeletextPageType enum value + TeletextPageTypePageTypeProgramSchedule = "PAGE_TYPE_PROGRAM_SCHEDULE" + + // TeletextPageTypePageTypeHearingImpairedSubtitle is a TeletextPageType enum value + TeletextPageTypePageTypeHearingImpairedSubtitle = "PAGE_TYPE_HEARING_IMPAIRED_SUBTITLE" +) + // Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to // specify the location the burned-in timecode on output video. const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go index 43d8865f45f..beada6a05f0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaConvert { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediaconvert" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaConvert { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaConvert { svc := &MediaConvert{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-08-29", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go index 74b8e835c67..84f7d8662f6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go @@ -1558,7 +1558,7 @@ func (c *MediaLive) DescribeScheduleWithContext(ctx aws.Context, input *Describe // // Example iterating over at most 3 pages of a DescribeSchedule operation. // pageNum := 0 // err := client.DescribeSchedulePages(params, -// func(page *DescribeScheduleOutput, lastPage bool) bool { +// func(page *medialive.DescribeScheduleOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1590,10 +1590,12 @@ func (c *MediaLive) DescribeSchedulePagesWithContext(ctx aws.Context, input *Des }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeScheduleOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeScheduleOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1702,7 +1704,7 @@ func (c *MediaLive) ListChannelsWithContext(ctx aws.Context, input *ListChannels // // Example iterating over at most 3 pages of a ListChannels operation. // pageNum := 0 // err := client.ListChannelsPages(params, -// func(page *ListChannelsOutput, lastPage bool) bool { +// func(page *medialive.ListChannelsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1734,10 +1736,12 @@ func (c *MediaLive) ListChannelsPagesWithContext(ctx aws.Context, input *ListCha }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListChannelsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListChannelsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1846,7 +1850,7 @@ func (c *MediaLive) ListInputSecurityGroupsWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListInputSecurityGroups operation. // pageNum := 0 // err := client.ListInputSecurityGroupsPages(params, -// func(page *ListInputSecurityGroupsOutput, lastPage bool) bool { +// func(page *medialive.ListInputSecurityGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1878,10 +1882,12 @@ func (c *MediaLive) ListInputSecurityGroupsPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInputSecurityGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInputSecurityGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1990,7 +1996,7 @@ func (c *MediaLive) ListInputsWithContext(ctx aws.Context, input *ListInputsInpu // // Example iterating over at most 3 pages of a ListInputs operation. // pageNum := 0 // err := client.ListInputsPages(params, -// func(page *ListInputsOutput, lastPage bool) bool { +// func(page *medialive.ListInputsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2022,10 +2028,12 @@ func (c *MediaLive) ListInputsPagesWithContext(ctx aws.Context, input *ListInput }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInputsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInputsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2134,7 +2142,7 @@ func (c *MediaLive) ListOfferingsWithContext(ctx aws.Context, input *ListOfferin // // Example iterating over at most 3 pages of a ListOfferings operation. // pageNum := 0 // err := client.ListOfferingsPages(params, -// func(page *ListOfferingsOutput, lastPage bool) bool { +// func(page *medialive.ListOfferingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2166,10 +2174,12 @@ func (c *MediaLive) ListOfferingsPagesWithContext(ctx aws.Context, input *ListOf }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListOfferingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListOfferingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2278,7 +2288,7 @@ func (c *MediaLive) ListReservationsWithContext(ctx aws.Context, input *ListRese // // Example iterating over at most 3 pages of a ListReservations operation. // pageNum := 0 // err := client.ListReservationsPages(params, -// func(page *ListReservationsOutput, lastPage bool) bool { +// func(page *medialive.ListReservationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2310,10 +2320,12 @@ func (c *MediaLive) ListReservationsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListReservationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListReservationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3926,13 +3938,11 @@ type AudioOnlyHlsSettings struct { // Specifies the group to which the audio Rendition belongs. AudioGroupId *string `locationName:"audioGroupId" type:"string"` - // For use with an audio only Stream. Must be a .jpg or .png file. If given, - // this image will be used as the cover-art for the audio only output. Ideally, - // it should be formatted for an iPhone screen for two reasons. The iPhone does - // not resize the image, it crops a centered image on the top/bottom and left/right. - // Additionally, this image file gets saved bit-for-bit into every 10-second - // segment file, so will increase bandwidth by {image file size} * {segment - // count} * {user count.}. + // Optional. Specifies the .jpg or .png image to use as the cover art for an + // audio-only output. We recommend a low bit-size file because the image increases + // the output audio bandwidth.The image is attached to the audio as an ID3 tag, + // frame type APIC, picture type 0x10, as per the "ID3 tag version 2.4.0 - Native + // Frames" standard. AudioOnlyImage *InputLocation `locationName:"audioOnlyImage" type:"structure"` // Four types of audio-only tracks are supported:Audio-Only Variant StreamThe @@ -5358,6 +5368,9 @@ type Channel struct { // The name of the channel. (user-mutable) Name *string `locationName:"name" type:"string"` + // Runtime details for the pipelines of a running channel. + PipelineDetails []*PipelineDetail `locationName:"pipelineDetails" type:"list"` + // The number of currently healthy pipelines. PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` @@ -5440,6 +5453,12 @@ func (s *Channel) SetName(v string) *Channel { return s } +// SetPipelineDetails sets the PipelineDetails field's value. +func (s *Channel) SetPipelineDetails(v []*PipelineDetail) *Channel { + s.PipelineDetails = v + return s +} + // SetPipelinesRunningCount sets the PipelinesRunningCount field's value. func (s *Channel) SetPipelinesRunningCount(v int64) *Channel { s.PipelinesRunningCount = &v @@ -5619,6 +5638,21 @@ func (s *ChannelSummary) SetTags(v map[string]*string) *ChannelSummary { return s } +// Passthrough applies no color space conversion to the output +type ColorSpacePassthroughSettings struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ColorSpacePassthroughSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColorSpacePassthroughSettings) GoString() string { + return s.String() +} + type CreateChannelInput struct { _ struct{} `type:"structure"` @@ -6097,6 +6131,8 @@ type DeleteChannelOutput struct { Name *string `locationName:"name" type:"string"` + PipelineDetails []*PipelineDetail `locationName:"pipelineDetails" type:"list"` + PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` RoleArn *string `locationName:"roleArn" type:"string"` @@ -6176,6 +6212,12 @@ func (s *DeleteChannelOutput) SetName(v string) *DeleteChannelOutput { return s } +// SetPipelineDetails sets the PipelineDetails field's value. +func (s *DeleteChannelOutput) SetPipelineDetails(v []*PipelineDetail) *DeleteChannelOutput { + s.PipelineDetails = v + return s +} + // SetPipelinesRunningCount sets the PipelinesRunningCount field's value. func (s *DeleteChannelOutput) SetPipelinesRunningCount(v int64) *DeleteChannelOutput { s.PipelinesRunningCount = &v @@ -6691,6 +6733,8 @@ type DescribeChannelOutput struct { Name *string `locationName:"name" type:"string"` + PipelineDetails []*PipelineDetail `locationName:"pipelineDetails" type:"list"` + PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` RoleArn *string `locationName:"roleArn" type:"string"` @@ -6770,6 +6814,12 @@ func (s *DescribeChannelOutput) SetName(v string) *DescribeChannelOutput { return s } +// SetPipelineDetails sets the PipelineDetails field's value. +func (s *DescribeChannelOutput) SetPipelineDetails(v []*PipelineDetail) *DescribeChannelOutput { + s.PipelineDetails = v + return s +} + // SetPipelinesRunningCount sets the PipelinesRunningCount field's value. func (s *DescribeChannelOutput) SetPipelinesRunningCount(v int64) *DescribeChannelOutput { s.PipelinesRunningCount = &v @@ -6847,6 +6897,12 @@ type DescribeInputOutput struct { // A standard input has two sources and a single pipeline input only has one. InputClass *string `locationName:"inputClass" type:"string" enum:"InputClass"` + // There are two types of input sources, static and dynamic. If an input source + // is dynamic you canchange the source url of the input dynamically using an + // input switch action. However, the only input typeto support a dynamic url + // at this time is MP4_FILE. By default all input sources are static. + InputSourceType *string `locationName:"inputSourceType" type:"string" enum:"InputSourceType"` + MediaConnectFlows []*MediaConnectFlow `locationName:"mediaConnectFlows" type:"list"` Name *string `locationName:"name" type:"string"` @@ -6904,6 +6960,12 @@ func (s *DescribeInputOutput) SetInputClass(v string) *DescribeInputOutput { return s } +// SetInputSourceType sets the InputSourceType field's value. +func (s *DescribeInputOutput) SetInputSourceType(v string) *DescribeInputOutput { + s.InputSourceType = &v + return s +} + // SetMediaConnectFlows sets the MediaConnectFlows field's value. func (s *DescribeInputOutput) SetMediaConnectFlows(v []*MediaConnectFlow) *DescribeInputOutput { s.MediaConnectFlows = v @@ -8827,6 +8889,48 @@ func (s *GlobalConfiguration) SetSupportLowFramerateInputs(v string) *GlobalConf return s } +// H264 Color Space Settings +type H264ColorSpaceSettings struct { + _ struct{} `type:"structure"` + + // Passthrough applies no color space conversion to the output + ColorSpacePassthroughSettings *ColorSpacePassthroughSettings `locationName:"colorSpacePassthroughSettings" type:"structure"` + + // Rec601 Settings + Rec601Settings *Rec601Settings `locationName:"rec601Settings" type:"structure"` + + // Rec709 Settings + Rec709Settings *Rec709Settings `locationName:"rec709Settings" type:"structure"` +} + +// String returns the string representation +func (s H264ColorSpaceSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s H264ColorSpaceSettings) GoString() string { + return s.String() +} + +// SetColorSpacePassthroughSettings sets the ColorSpacePassthroughSettings field's value. +func (s *H264ColorSpaceSettings) SetColorSpacePassthroughSettings(v *ColorSpacePassthroughSettings) *H264ColorSpaceSettings { + s.ColorSpacePassthroughSettings = v + return s +} + +// SetRec601Settings sets the Rec601Settings field's value. +func (s *H264ColorSpaceSettings) SetRec601Settings(v *Rec601Settings) *H264ColorSpaceSettings { + s.Rec601Settings = v + return s +} + +// SetRec709Settings sets the Rec709Settings field's value. +func (s *H264ColorSpaceSettings) SetRec709Settings(v *Rec709Settings) *H264ColorSpaceSettings { + s.Rec709Settings = v + return s +} + // H264 Settings type H264Settings struct { _ struct{} `type:"structure"` @@ -8850,12 +8954,15 @@ type H264Settings struct { // Percentage of the buffer that should initially be filled (HRD buffer model). BufFillPct *int64 `locationName:"bufFillPct" type:"integer"` - // Size of buffer (HRD buffer model) in bits/second. + // Size of buffer (HRD buffer model) in bits. BufSize *int64 `locationName:"bufSize" type:"integer"` // Includes colorspace metadata in the output. ColorMetadata *string `locationName:"colorMetadata" type:"string" enum:"H264ColorMetadata"` + // Color Space settings + ColorSpaceSettings *H264ColorSpaceSettings `locationName:"colorSpaceSettings" type:"structure"` + // Entropy encoding mode. Use cabac (must be in Main or High profile) or cavlc. EntropyEncoding *string `locationName:"entropyEncoding" type:"string" enum:"H264EntropyEncoding"` @@ -8908,7 +9015,7 @@ type H264Settings struct { // while high can produce better quality for certain content. LookAheadRateControl *string `locationName:"lookAheadRateControl" type:"string" enum:"H264LookAheadRateControl"` - // For QVBR: See the tooltip for Quality level For VBR: Set the maximum bitrate + // For QVBR: See the tooltip for Quality levelFor VBR: Set the maximum bitrate // in order to accommodate expected spikes in the complexity of the video. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` @@ -8949,7 +9056,7 @@ type H264Settings struct { // level: 6. Max bitrate: 1M to 1.5M QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` - // Rate control mode. QVBR: Quality will match the specified quality level except + // Rate control mode.QVBR: Quality will match the specified quality level except // when it is constrained by themaximum bitrate. Recommended if you or your // viewers pay for bandwidth.VBR: Quality and bitrate vary, depending on the // video complexity. Recommended instead of QVBRif you want to maintain a specific @@ -9077,6 +9184,12 @@ func (s *H264Settings) SetColorMetadata(v string) *H264Settings { return s } +// SetColorSpaceSettings sets the ColorSpaceSettings field's value. +func (s *H264Settings) SetColorSpaceSettings(v *H264ColorSpaceSettings) *H264Settings { + s.ColorSpaceSettings = v + return s +} + // SetEntropyEncoding sets the EntropyEncoding field's value. func (s *H264Settings) SetEntropyEncoding(v string) *H264Settings { s.EntropyEncoding = &v @@ -9251,15 +9364,454 @@ func (s *H264Settings) SetSyntax(v string) *H264Settings { return s } -// SetTemporalAq sets the TemporalAq field's value. -func (s *H264Settings) SetTemporalAq(v string) *H264Settings { - s.TemporalAq = &v +// SetTemporalAq sets the TemporalAq field's value. +func (s *H264Settings) SetTemporalAq(v string) *H264Settings { + s.TemporalAq = &v + return s +} + +// SetTimecodeInsertion sets the TimecodeInsertion field's value. +func (s *H264Settings) SetTimecodeInsertion(v string) *H264Settings { + s.TimecodeInsertion = &v + return s +} + +// H265 Color Space Settings +type H265ColorSpaceSettings struct { + _ struct{} `type:"structure"` + + // Passthrough applies no color space conversion to the output + ColorSpacePassthroughSettings *ColorSpacePassthroughSettings `locationName:"colorSpacePassthroughSettings" type:"structure"` + + // Hdr10 Settings + Hdr10Settings *Hdr10Settings `locationName:"hdr10Settings" type:"structure"` + + // Rec601 Settings + Rec601Settings *Rec601Settings `locationName:"rec601Settings" type:"structure"` + + // Rec709 Settings + Rec709Settings *Rec709Settings `locationName:"rec709Settings" type:"structure"` +} + +// String returns the string representation +func (s H265ColorSpaceSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s H265ColorSpaceSettings) GoString() string { + return s.String() +} + +// SetColorSpacePassthroughSettings sets the ColorSpacePassthroughSettings field's value. +func (s *H265ColorSpaceSettings) SetColorSpacePassthroughSettings(v *ColorSpacePassthroughSettings) *H265ColorSpaceSettings { + s.ColorSpacePassthroughSettings = v + return s +} + +// SetHdr10Settings sets the Hdr10Settings field's value. +func (s *H265ColorSpaceSettings) SetHdr10Settings(v *Hdr10Settings) *H265ColorSpaceSettings { + s.Hdr10Settings = v + return s +} + +// SetRec601Settings sets the Rec601Settings field's value. +func (s *H265ColorSpaceSettings) SetRec601Settings(v *Rec601Settings) *H265ColorSpaceSettings { + s.Rec601Settings = v + return s +} + +// SetRec709Settings sets the Rec709Settings field's value. +func (s *H265ColorSpaceSettings) SetRec709Settings(v *Rec709Settings) *H265ColorSpaceSettings { + s.Rec709Settings = v + return s +} + +// H265 Settings +type H265Settings struct { + _ struct{} `type:"structure"` + + // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual + // quality. + AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H265AdaptiveQuantization"` + + // Indicates that AFD values will be written into the output stream. If afdSignaling + // is "auto", the system will try to preserve the input AFD value (in cases + // where multiple AFD values are valid). If set to "fixed", the AFD value will + // be the value configured in the fixedAfd parameter. + AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"AfdSignaling"` + + // Whether or not EML should insert an Alternative Transfer Function SEI message + // to support backwards compatibility with non-HDR decoders and displays. + AlternativeTransferFunction *string `locationName:"alternativeTransferFunction" type:"string" enum:"H265AlternativeTransferFunction"` + + // Average bitrate in bits/second. Required when the rate control mode is VBR + // or CBR. Not used for QVBR. In an MS Smooth output group, each output must + // have a unique value when its bitrate is rounded down to the nearest multiple + // of 1000. + Bitrate *int64 `locationName:"bitrate" min:"100000" type:"integer"` + + // Size of buffer (HRD buffer model) in bits. + BufSize *int64 `locationName:"bufSize" min:"100000" type:"integer"` + + // Includes colorspace metadata in the output. + ColorMetadata *string `locationName:"colorMetadata" type:"string" enum:"H265ColorMetadata"` + + // Color Space settings + ColorSpaceSettings *H265ColorSpaceSettings `locationName:"colorSpaceSettings" type:"structure"` + + // Four bit AFD value to write on all frames of video in the output stream. + // Only valid when afdSignaling is set to 'Fixed'. + FixedAfd *string `locationName:"fixedAfd" type:"string" enum:"FixedAfd"` + + // If set to enabled, adjust quantization within each frame to reduce flicker + // or 'pop' on I-frames. + FlickerAq *string `locationName:"flickerAq" type:"string" enum:"H265FlickerAq"` + + // Framerate denominator. + // + // FramerateDenominator is a required field + FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer" required:"true"` + + // Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 + // fps. + // + // FramerateNumerator is a required field + FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer" required:"true"` + + // Frequency of closed GOPs. In streaming applications, it is recommended that + // this be set to 1 so a decoder joining mid-stream will receive an IDR frame + // as quickly as possible. Setting this value to 0 will break output segmenting. + GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` + + // GOP size (keyframe interval) in units of either frames or seconds per gopSizeUnits. + // Must be greater than zero. + GopSize *float64 `locationName:"gopSize" type:"double"` + + // Indicates if the gopSize is specified in frames or seconds. If seconds the + // system will convert the gopSize into a frame count at run time. + GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H265GopSizeUnits"` + + // H.265 Level. + Level *string `locationName:"level" type:"string" enum:"H265Level"` + + // Amount of lookahead. A value of low can decrease latency and memory usage, + // while high can produce better quality for certain content. + LookAheadRateControl *string `locationName:"lookAheadRateControl" type:"string" enum:"H265LookAheadRateControl"` + + // For QVBR: See the tooltip for Quality level + MaxBitrate *int64 `locationName:"maxBitrate" min:"100000" type:"integer"` + + // Only meaningful if sceneChangeDetect is set to enabled. Enforces separation + // between repeated (cadence) I-frames and I-frames inserted by Scene Change + // Detection. If a scene change I-frame is within I-interval frames of a cadence + // I-frame, the GOP is shrunk and/or stretched to the scene change I-frame. + // GOP stretch requires enabling lookahead as well as setting I-interval. The + // normal cadence resumes for the next GOP. Note: Maximum GOP stretch = GOP + // size + Min-I-interval - 1 + MinIInterval *int64 `locationName:"minIInterval" type:"integer"` + + // Pixel Aspect Ratio denominator. + ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` + + // Pixel Aspect Ratio numerator. + ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` + + // H.265 Profile. + Profile *string `locationName:"profile" type:"string" enum:"H265Profile"` + + // Controls the target quality for the video encode. Applies only when the rate + // control mode is QVBR. Set values for the QVBR quality level field and Max + // bitrate field that suit your most important viewing devices. Recommended + // values are:- Primary screen: Quality level: 8 to 10. Max bitrate: 4M- PC + // or tablet: Quality level: 7. Max bitrate: 1.5M to 3M- Smartphone: Quality + // level: 6. Max bitrate: 1M to 1.5M + QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` + + // Rate control mode.QVBR: Quality will match the specified quality level except + // when it is constrained by themaximum bitrate. Recommended if you or your + // viewers pay for bandwidth.CBR: Quality varies, depending on the video complexity. + // Recommended only if you distributeyour assets to devices that cannot handle + // variable bitrates. + RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"H265RateControlMode"` + + // Sets the scan type of the output to progressive or top-field-first interlaced. + ScanType *string `locationName:"scanType" type:"string" enum:"H265ScanType"` + + // Scene change detection. + SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H265SceneChangeDetect"` + + // Number of slices per picture. Must be less than or equal to the number of + // macroblock rows for progressive pictures, and less than or equal to half + // the number of macroblock rows for interlaced pictures.This field is optional; + // when no value is specified the encoder will choose the number of slices based + // on encode resolution. + Slices *int64 `locationName:"slices" min:"1" type:"integer"` + + // H.265 Tier. + Tier *string `locationName:"tier" type:"string" enum:"H265Tier"` + + // Determines how timecodes should be inserted into the video elementary stream.- + // 'disabled': Do not include timecodes- 'picTimingSei': Pass through picture + // timing SEI messages from the source specified in Timecode Config + TimecodeInsertion *string `locationName:"timecodeInsertion" type:"string" enum:"H265TimecodeInsertionBehavior"` +} + +// String returns the string representation +func (s H265Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s H265Settings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *H265Settings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "H265Settings"} + if s.Bitrate != nil && *s.Bitrate < 100000 { + invalidParams.Add(request.NewErrParamMinValue("Bitrate", 100000)) + } + if s.BufSize != nil && *s.BufSize < 100000 { + invalidParams.Add(request.NewErrParamMinValue("BufSize", 100000)) + } + if s.FramerateDenominator == nil { + invalidParams.Add(request.NewErrParamRequired("FramerateDenominator")) + } + if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) + } + if s.FramerateNumerator == nil { + invalidParams.Add(request.NewErrParamRequired("FramerateNumerator")) + } + if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) + } + if s.MaxBitrate != nil && *s.MaxBitrate < 100000 { + invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 100000)) + } + if s.ParDenominator != nil && *s.ParDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) + } + if s.ParNumerator != nil && *s.ParNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) + } + if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 { + invalidParams.Add(request.NewErrParamMinValue("QvbrQualityLevel", 1)) + } + if s.Slices != nil && *s.Slices < 1 { + invalidParams.Add(request.NewErrParamMinValue("Slices", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdaptiveQuantization sets the AdaptiveQuantization field's value. +func (s *H265Settings) SetAdaptiveQuantization(v string) *H265Settings { + s.AdaptiveQuantization = &v + return s +} + +// SetAfdSignaling sets the AfdSignaling field's value. +func (s *H265Settings) SetAfdSignaling(v string) *H265Settings { + s.AfdSignaling = &v + return s +} + +// SetAlternativeTransferFunction sets the AlternativeTransferFunction field's value. +func (s *H265Settings) SetAlternativeTransferFunction(v string) *H265Settings { + s.AlternativeTransferFunction = &v + return s +} + +// SetBitrate sets the Bitrate field's value. +func (s *H265Settings) SetBitrate(v int64) *H265Settings { + s.Bitrate = &v + return s +} + +// SetBufSize sets the BufSize field's value. +func (s *H265Settings) SetBufSize(v int64) *H265Settings { + s.BufSize = &v + return s +} + +// SetColorMetadata sets the ColorMetadata field's value. +func (s *H265Settings) SetColorMetadata(v string) *H265Settings { + s.ColorMetadata = &v + return s +} + +// SetColorSpaceSettings sets the ColorSpaceSettings field's value. +func (s *H265Settings) SetColorSpaceSettings(v *H265ColorSpaceSettings) *H265Settings { + s.ColorSpaceSettings = v + return s +} + +// SetFixedAfd sets the FixedAfd field's value. +func (s *H265Settings) SetFixedAfd(v string) *H265Settings { + s.FixedAfd = &v + return s +} + +// SetFlickerAq sets the FlickerAq field's value. +func (s *H265Settings) SetFlickerAq(v string) *H265Settings { + s.FlickerAq = &v + return s +} + +// SetFramerateDenominator sets the FramerateDenominator field's value. +func (s *H265Settings) SetFramerateDenominator(v int64) *H265Settings { + s.FramerateDenominator = &v + return s +} + +// SetFramerateNumerator sets the FramerateNumerator field's value. +func (s *H265Settings) SetFramerateNumerator(v int64) *H265Settings { + s.FramerateNumerator = &v + return s +} + +// SetGopClosedCadence sets the GopClosedCadence field's value. +func (s *H265Settings) SetGopClosedCadence(v int64) *H265Settings { + s.GopClosedCadence = &v + return s +} + +// SetGopSize sets the GopSize field's value. +func (s *H265Settings) SetGopSize(v float64) *H265Settings { + s.GopSize = &v + return s +} + +// SetGopSizeUnits sets the GopSizeUnits field's value. +func (s *H265Settings) SetGopSizeUnits(v string) *H265Settings { + s.GopSizeUnits = &v + return s +} + +// SetLevel sets the Level field's value. +func (s *H265Settings) SetLevel(v string) *H265Settings { + s.Level = &v + return s +} + +// SetLookAheadRateControl sets the LookAheadRateControl field's value. +func (s *H265Settings) SetLookAheadRateControl(v string) *H265Settings { + s.LookAheadRateControl = &v + return s +} + +// SetMaxBitrate sets the MaxBitrate field's value. +func (s *H265Settings) SetMaxBitrate(v int64) *H265Settings { + s.MaxBitrate = &v + return s +} + +// SetMinIInterval sets the MinIInterval field's value. +func (s *H265Settings) SetMinIInterval(v int64) *H265Settings { + s.MinIInterval = &v + return s +} + +// SetParDenominator sets the ParDenominator field's value. +func (s *H265Settings) SetParDenominator(v int64) *H265Settings { + s.ParDenominator = &v + return s +} + +// SetParNumerator sets the ParNumerator field's value. +func (s *H265Settings) SetParNumerator(v int64) *H265Settings { + s.ParNumerator = &v + return s +} + +// SetProfile sets the Profile field's value. +func (s *H265Settings) SetProfile(v string) *H265Settings { + s.Profile = &v + return s +} + +// SetQvbrQualityLevel sets the QvbrQualityLevel field's value. +func (s *H265Settings) SetQvbrQualityLevel(v int64) *H265Settings { + s.QvbrQualityLevel = &v + return s +} + +// SetRateControlMode sets the RateControlMode field's value. +func (s *H265Settings) SetRateControlMode(v string) *H265Settings { + s.RateControlMode = &v + return s +} + +// SetScanType sets the ScanType field's value. +func (s *H265Settings) SetScanType(v string) *H265Settings { + s.ScanType = &v + return s +} + +// SetSceneChangeDetect sets the SceneChangeDetect field's value. +func (s *H265Settings) SetSceneChangeDetect(v string) *H265Settings { + s.SceneChangeDetect = &v + return s +} + +// SetSlices sets the Slices field's value. +func (s *H265Settings) SetSlices(v int64) *H265Settings { + s.Slices = &v + return s +} + +// SetTier sets the Tier field's value. +func (s *H265Settings) SetTier(v string) *H265Settings { + s.Tier = &v + return s +} + +// SetTimecodeInsertion sets the TimecodeInsertion field's value. +func (s *H265Settings) SetTimecodeInsertion(v string) *H265Settings { + s.TimecodeInsertion = &v + return s +} + +// Hdr10 Settings +type Hdr10Settings struct { + _ struct{} `type:"structure"` + + // Maximum Content Light LevelAn integer metadata value defining the maximum + // light level, in nits,of any single pixel within an encoded HDR video stream + // or file. + MaxCll *int64 `locationName:"maxCll" type:"integer"` + + // Maximum Frame Average Light LevelAn integer metadata value defining the maximum + // average light level, in nits,for any single frame within an encoded HDR video + // stream or file. + MaxFall *int64 `locationName:"maxFall" type:"integer"` +} + +// String returns the string representation +func (s Hdr10Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Hdr10Settings) GoString() string { + return s.String() +} + +// SetMaxCll sets the MaxCll field's value. +func (s *Hdr10Settings) SetMaxCll(v int64) *Hdr10Settings { + s.MaxCll = &v return s } -// SetTimecodeInsertion sets the TimecodeInsertion field's value. -func (s *H264Settings) SetTimecodeInsertion(v string) *H264Settings { - s.TimecodeInsertion = &v +// SetMaxFall sets the MaxFall field's value. +func (s *Hdr10Settings) SetMaxFall(v int64) *Hdr10Settings { + s.MaxFall = &v return s } @@ -10260,6 +10812,22 @@ func (s *HlsWebdavSettings) SetRestartDelay(v int64) *HlsWebdavSettings { return s } +// Settings to configure an action so that it occurs immediately. This is only +// supported for input switch actions currently. +type ImmediateModeScheduleActionStartSettings struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ImmediateModeScheduleActionStartSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImmediateModeScheduleActionStartSettings) GoString() string { + return s.String() +} + type Input struct { _ struct{} `type:"structure"` @@ -10285,6 +10853,11 @@ type Input struct { // value is not valid because the channel requires two sources in the input. InputClass *string `locationName:"inputClass" type:"string" enum:"InputClass"` + // Certain pull input sources can be dynamic, meaning that they can have their + // URL's dynamically changesduring input switch actions. Presently, this functionality + // only works with MP4_FILE inputs. + InputSourceType *string `locationName:"inputSourceType" type:"string" enum:"InputSourceType"` + // A list of MediaConnect Flows for this input. MediaConnectFlows []*MediaConnectFlow `locationName:"mediaConnectFlows" type:"list"` @@ -10349,6 +10922,12 @@ func (s *Input) SetInputClass(v string) *Input { return s } +// SetInputSourceType sets the InputSourceType field's value. +func (s *Input) SetInputSourceType(v string) *Input { + s.InputSourceType = &v + return s +} + // SetMediaConnectFlows sets the MediaConnectFlows field's value. func (s *Input) SetMediaConnectFlows(v []*MediaConnectFlow) *Input { s.MediaConnectFlows = v @@ -10511,6 +11090,64 @@ func (s *InputChannelLevel) SetInputChannel(v int64) *InputChannelLevel { return s } +// Settings to let you create a clip of the file input, in order to set up the +// input to ingest only a portion of the file. +type InputClippingSettings struct { + _ struct{} `type:"structure"` + + // The source of the timecodes in the source being clipped. + // + // InputTimecodeSource is a required field + InputTimecodeSource *string `locationName:"inputTimecodeSource" type:"string" required:"true" enum:"InputTimecodeSource"` + + // Settings to identify the start of the clip. + StartTimecode *StartTimecode `locationName:"startTimecode" type:"structure"` + + // Settings to identify the end of the clip. + StopTimecode *StopTimecode `locationName:"stopTimecode" type:"structure"` +} + +// String returns the string representation +func (s InputClippingSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputClippingSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputClippingSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputClippingSettings"} + if s.InputTimecodeSource == nil { + invalidParams.Add(request.NewErrParamRequired("InputTimecodeSource")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputTimecodeSource sets the InputTimecodeSource field's value. +func (s *InputClippingSettings) SetInputTimecodeSource(v string) *InputClippingSettings { + s.InputTimecodeSource = &v + return s +} + +// SetStartTimecode sets the StartTimecode field's value. +func (s *InputClippingSettings) SetStartTimecode(v *StartTimecode) *InputClippingSettings { + s.StartTimecode = v + return s +} + +// SetStopTimecode sets the StopTimecode field's value. +func (s *InputClippingSettings) SetStopTimecode(v *StopTimecode) *InputClippingSettings { + s.StopTimecode = v + return s +} + // The settings for a PUSH type input. type InputDestination struct { _ struct{} `type:"structure"` @@ -11096,14 +11733,26 @@ func (s *InputSpecification) SetResolution(v string) *InputSpecification { return s } -// Settings for the action to switch an input. +// Settings for the "switch input" action: to switch from ingesting one input +// to ingesting another input. type InputSwitchScheduleActionSettings struct { _ struct{} `type:"structure"` - // The name of the input attachment that should be switched to by this action. + // The name of the input attachment (not the name of the input!) to switch to. + // The name is specified in the channel configuration. // // InputAttachmentNameReference is a required field InputAttachmentNameReference *string `locationName:"inputAttachmentNameReference" type:"string" required:"true"` + + // Settings to let you create a clip of the file input, in order to set up the + // input to ingest only a portion of the file. + InputClippingSettings *InputClippingSettings `locationName:"inputClippingSettings" type:"structure"` + + // The value for the variable portion of the URL for the dynamic input, for + // this instance of the input. Each time you use the same dynamic input in an + // input switch action, you can provide a different value, in order to connect + // the input to a different content source. + UrlPath []*string `locationName:"urlPath" type:"list"` } // String returns the string representation @@ -11122,6 +11771,11 @@ func (s *InputSwitchScheduleActionSettings) Validate() error { if s.InputAttachmentNameReference == nil { invalidParams.Add(request.NewErrParamRequired("InputAttachmentNameReference")) } + if s.InputClippingSettings != nil { + if err := s.InputClippingSettings.Validate(); err != nil { + invalidParams.AddNested("InputClippingSettings", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -11135,6 +11789,18 @@ func (s *InputSwitchScheduleActionSettings) SetInputAttachmentNameReference(v st return s } +// SetInputClippingSettings sets the InputClippingSettings field's value. +func (s *InputSwitchScheduleActionSettings) SetInputClippingSettings(v *InputClippingSettings) *InputSwitchScheduleActionSettings { + s.InputClippingSettings = v + return s +} + +// SetUrlPath sets the UrlPath field's value. +func (s *InputSwitchScheduleActionSettings) SetUrlPath(v []*string) *InputSwitchScheduleActionSettings { + s.UrlPath = v + return s +} + // Settings for a private VPC Input.When this property is specified, the input // destination addresses will be created in a VPC rather than with public Internet // addresses.This property requires setting the roleArn property on Input creation.Not @@ -12636,7 +13302,7 @@ func (s *MediaPackageGroupSettings) SetDestination(v *OutputLocationRef) *MediaP return s } -// Media Package Output Destination Settings +// MediaPackage Output Destination Settings type MediaPackageOutputDestinationSettings struct { _ struct{} `type:"structure"` @@ -12965,6 +13631,10 @@ func (s *MsSmoothGroupSettings) SetTimestampOffsetMode(v string) *MsSmoothGroupS type MsSmoothOutputSettings struct { _ struct{} `type:"structure"` + // Only applicable when this output is referencing an H.265 video description.Specifies + // whether MP4 segments should be packaged as HEV1 or HVC1. + H265PackagingType *string `locationName:"h265PackagingType" type:"string" enum:"MsSmoothH265PackagingType"` + // String concatenated to the end of the destination filename. Required for // multiple outputs of the same type. NameModifier *string `locationName:"nameModifier" type:"string"` @@ -12980,6 +13650,12 @@ func (s MsSmoothOutputSettings) GoString() string { return s.String() } +// SetH265PackagingType sets the H265PackagingType field's value. +func (s *MsSmoothOutputSettings) SetH265PackagingType(v string) *MsSmoothOutputSettings { + s.H265PackagingType = &v + return s +} + // SetNameModifier sets the NameModifier field's value. func (s *MsSmoothOutputSettings) SetNameModifier(v string) *MsSmoothOutputSettings { s.NameModifier = &v @@ -13720,6 +14396,51 @@ func (s *PauseStateScheduleActionSettings) SetPipelines(v []*PipelinePauseStateS return s } +// Runtime details of a pipeline when a channel is running. +type PipelineDetail struct { + _ struct{} `type:"structure"` + + // The name of the active input attachment currently being ingested by this + // pipeline. + ActiveInputAttachmentName *string `locationName:"activeInputAttachmentName" type:"string"` + + // The name of the input switch schedule action that occurred most recently + // and that resulted in the switch to the current input attachment for this + // pipeline. + ActiveInputSwitchActionName *string `locationName:"activeInputSwitchActionName" type:"string"` + + // Pipeline ID + PipelineId *string `locationName:"pipelineId" type:"string"` +} + +// String returns the string representation +func (s PipelineDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineDetail) GoString() string { + return s.String() +} + +// SetActiveInputAttachmentName sets the ActiveInputAttachmentName field's value. +func (s *PipelineDetail) SetActiveInputAttachmentName(v string) *PipelineDetail { + s.ActiveInputAttachmentName = &v + return s +} + +// SetActiveInputSwitchActionName sets the ActiveInputSwitchActionName field's value. +func (s *PipelineDetail) SetActiveInputSwitchActionName(v string) *PipelineDetail { + s.ActiveInputSwitchActionName = &v + return s +} + +// SetPipelineId sets the PipelineId field's value. +func (s *PipelineDetail) SetPipelineId(v string) *PipelineDetail { + s.PipelineId = &v + return s +} + // Settings for pausing a pipeline. type PipelinePauseStateSettings struct { _ struct{} `type:"structure"` @@ -13868,6 +14589,36 @@ func (s *PurchaseOfferingOutput) SetReservation(v *Reservation) *PurchaseOfferin return s } +// Rec601 Settings +type Rec601Settings struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s Rec601Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rec601Settings) GoString() string { + return s.String() +} + +// Rec709 Settings +type Rec709Settings struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s Rec709Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rec709Settings) GoString() string { + return s.String() +} + // Remix Settings type RemixSettings struct { _ struct{} `type:"structure"` @@ -14603,15 +15354,19 @@ func (s *ScheduleActionSettings) SetStaticImageDeactivateSettings(v *StaticImage return s } -// Settings to specify the start time for an action. +// Settings to specify when an action should occur. Only one of the options +// must be selected. type ScheduleActionStartSettings struct { _ struct{} `type:"structure"` - // Holds the start time for the action. + // Option for specifying the start time for an action. FixedModeScheduleActionStartSettings *FixedModeScheduleActionStartSettings `locationName:"fixedModeScheduleActionStartSettings" type:"structure"` - // Specifies an action to follow for scheduling this action. + // Option for specifying an action as relative to another action. FollowModeScheduleActionStartSettings *FollowModeScheduleActionStartSettings `locationName:"followModeScheduleActionStartSettings" type:"structure"` + + // Option for specifying an action that should be applied immediately. + ImmediateModeScheduleActionStartSettings *ImmediateModeScheduleActionStartSettings `locationName:"immediateModeScheduleActionStartSettings" type:"structure"` } // String returns the string representation @@ -14656,6 +15411,12 @@ func (s *ScheduleActionStartSettings) SetFollowModeScheduleActionStartSettings(v return s } +// SetImmediateModeScheduleActionStartSettings sets the ImmediateModeScheduleActionStartSettings field's value. +func (s *ScheduleActionStartSettings) SetImmediateModeScheduleActionStartSettings(v *ImmediateModeScheduleActionStartSettings) *ScheduleActionStartSettings { + s.ImmediateModeScheduleActionStartSettings = v + return s +} + // Scte20 Plus Embedded Destination Settings type Scte20PlusEmbeddedDestinationSettings struct { _ struct{} `type:"structure"` @@ -15496,6 +16257,8 @@ type StartChannelOutput struct { Name *string `locationName:"name" type:"string"` + PipelineDetails []*PipelineDetail `locationName:"pipelineDetails" type:"list"` + PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` RoleArn *string `locationName:"roleArn" type:"string"` @@ -15575,6 +16338,12 @@ func (s *StartChannelOutput) SetName(v string) *StartChannelOutput { return s } +// SetPipelineDetails sets the PipelineDetails field's value. +func (s *StartChannelOutput) SetPipelineDetails(v []*PipelineDetail) *StartChannelOutput { + s.PipelineDetails = v + return s +} + // SetPipelinesRunningCount sets the PipelinesRunningCount field's value. func (s *StartChannelOutput) SetPipelinesRunningCount(v int64) *StartChannelOutput { s.PipelinesRunningCount = &v @@ -15599,6 +16368,32 @@ func (s *StartChannelOutput) SetTags(v map[string]*string) *StartChannelOutput { return s } +// Settings to identify the start of the clip. +type StartTimecode struct { + _ struct{} `type:"structure"` + + // The timecode for the frame where you want to start the clip. Optional; if + // not specified, the clip starts at first frame in the file. Enter the timecode + // as HH:MM:SS:FF or HH:MM:SS;FF. + Timecode *string `locationName:"timecode" type:"string"` +} + +// String returns the string representation +func (s StartTimecode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTimecode) GoString() string { + return s.String() +} + +// SetTimecode sets the Timecode field's value. +func (s *StartTimecode) SetTimecode(v string) *StartTimecode { + s.Timecode = &v + return s +} + // Settings for the action to activate a static image. type StaticImageActivateScheduleActionSettings struct { _ struct{} `type:"structure"` @@ -15906,6 +16701,8 @@ type StopChannelOutput struct { Name *string `locationName:"name" type:"string"` + PipelineDetails []*PipelineDetail `locationName:"pipelineDetails" type:"list"` + PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` RoleArn *string `locationName:"roleArn" type:"string"` @@ -15985,6 +16782,12 @@ func (s *StopChannelOutput) SetName(v string) *StopChannelOutput { return s } +// SetPipelineDetails sets the PipelineDetails field's value. +func (s *StopChannelOutput) SetPipelineDetails(v []*PipelineDetail) *StopChannelOutput { + s.PipelineDetails = v + return s +} + // SetPipelinesRunningCount sets the PipelinesRunningCount field's value. func (s *StopChannelOutput) SetPipelinesRunningCount(v int64) *StopChannelOutput { s.PipelinesRunningCount = &v @@ -16009,6 +16812,43 @@ func (s *StopChannelOutput) SetTags(v map[string]*string) *StopChannelOutput { return s } +// Settings to identify the end of the clip. +type StopTimecode struct { + _ struct{} `type:"structure"` + + // If you specify a StopTimecode in an input (in order to clip the file), you + // can specify if you want the clip to exclude (the default) or include the + // frame specified by the timecode. + LastFrameClippingBehavior *string `locationName:"lastFrameClippingBehavior" type:"string" enum:"LastFrameClippingBehavior"` + + // The timecode for the frame where you want to stop the clip. Optional; if + // not specified, the clip continues to the end of the file. Enter the timecode + // as HH:MM:SS:FF or HH:MM:SS;FF. + Timecode *string `locationName:"timecode" type:"string"` +} + +// String returns the string representation +func (s StopTimecode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTimecode) GoString() string { + return s.String() +} + +// SetLastFrameClippingBehavior sets the LastFrameClippingBehavior field's value. +func (s *StopTimecode) SetLastFrameClippingBehavior(v string) *StopTimecode { + s.LastFrameClippingBehavior = &v + return s +} + +// SetTimecode sets the Timecode field's value. +func (s *StopTimecode) SetTimecode(v string) *StopTimecode { + s.Timecode = &v + return s +} + // Teletext Destination Settings type TeletextDestinationSettings struct { _ struct{} `type:"structure"` @@ -16841,6 +17681,9 @@ type VideoCodecSettings struct { // H264 Settings H264Settings *H264Settings `locationName:"h264Settings" type:"structure"` + + // H265 Settings + H265Settings *H265Settings `locationName:"h265Settings" type:"structure"` } // String returns the string representation @@ -16866,6 +17709,11 @@ func (s *VideoCodecSettings) Validate() error { invalidParams.AddNested("H264Settings", err.(request.ErrInvalidParams)) } } + if s.H265Settings != nil { + if err := s.H265Settings.Validate(); err != nil { + invalidParams.AddNested("H265Settings", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -16885,6 +17733,12 @@ func (s *VideoCodecSettings) SetH264Settings(v *H264Settings) *VideoCodecSetting return s } +// SetH265Settings sets the H265Settings field's value. +func (s *VideoCodecSettings) SetH265Settings(v *H265Settings) *VideoCodecSettings { + s.H265Settings = v + return s +} + // Video settings for this stream. type VideoDescription struct { _ struct{} `type:"structure"` @@ -17007,8 +17861,9 @@ func (s *VideoDescription) SetWidth(v int64) *VideoDescription { type VideoSelector struct { _ struct{} `type:"structure"` - // Specifies the colorspace of an input. This setting works in tandem with colorSpaceConversion - // to determine if any conversion will be performed. + // Specifies the color space of an input. This setting works in tandem with + // colorSpaceUsage and a video description's colorSpaceSettingsChoice to determine + // if any conversion will be performed. ColorSpace *string `locationName:"colorSpace" type:"string" enum:"VideoSelectorColorSpace"` // Applies only if colorSpace is a value other than follow. This field controls @@ -18113,6 +18968,9 @@ const ( // H264RateControlModeCbr is a H264RateControlMode enum value H264RateControlModeCbr = "CBR" + // H264RateControlModeMultiplex is a H264RateControlMode enum value + H264RateControlModeMultiplex = "MULTIPLEX" + // H264RateControlModeQvbr is a H264RateControlMode enum value H264RateControlModeQvbr = "QVBR" @@ -18183,6 +19041,171 @@ const ( H264TimecodeInsertionBehaviorPicTimingSei = "PIC_TIMING_SEI" ) +// H265 Adaptive Quantization +const ( + // H265AdaptiveQuantizationHigh is a H265AdaptiveQuantization enum value + H265AdaptiveQuantizationHigh = "HIGH" + + // H265AdaptiveQuantizationHigher is a H265AdaptiveQuantization enum value + H265AdaptiveQuantizationHigher = "HIGHER" + + // H265AdaptiveQuantizationLow is a H265AdaptiveQuantization enum value + H265AdaptiveQuantizationLow = "LOW" + + // H265AdaptiveQuantizationMax is a H265AdaptiveQuantization enum value + H265AdaptiveQuantizationMax = "MAX" + + // H265AdaptiveQuantizationMedium is a H265AdaptiveQuantization enum value + H265AdaptiveQuantizationMedium = "MEDIUM" + + // H265AdaptiveQuantizationOff is a H265AdaptiveQuantization enum value + H265AdaptiveQuantizationOff = "OFF" +) + +// H265 Alternative Transfer Function +const ( + // H265AlternativeTransferFunctionInsert is a H265AlternativeTransferFunction enum value + H265AlternativeTransferFunctionInsert = "INSERT" + + // H265AlternativeTransferFunctionOmit is a H265AlternativeTransferFunction enum value + H265AlternativeTransferFunctionOmit = "OMIT" +) + +// H265 Color Metadata +const ( + // H265ColorMetadataIgnore is a H265ColorMetadata enum value + H265ColorMetadataIgnore = "IGNORE" + + // H265ColorMetadataInsert is a H265ColorMetadata enum value + H265ColorMetadataInsert = "INSERT" +) + +// H265 Flicker Aq +const ( + // H265FlickerAqDisabled is a H265FlickerAq enum value + H265FlickerAqDisabled = "DISABLED" + + // H265FlickerAqEnabled is a H265FlickerAq enum value + H265FlickerAqEnabled = "ENABLED" +) + +// H265 Gop Size Units +const ( + // H265GopSizeUnitsFrames is a H265GopSizeUnits enum value + H265GopSizeUnitsFrames = "FRAMES" + + // H265GopSizeUnitsSeconds is a H265GopSizeUnits enum value + H265GopSizeUnitsSeconds = "SECONDS" +) + +// H265 Level +const ( + // H265LevelH265Level1 is a H265Level enum value + H265LevelH265Level1 = "H265_LEVEL_1" + + // H265LevelH265Level2 is a H265Level enum value + H265LevelH265Level2 = "H265_LEVEL_2" + + // H265LevelH265Level21 is a H265Level enum value + H265LevelH265Level21 = "H265_LEVEL_2_1" + + // H265LevelH265Level3 is a H265Level enum value + H265LevelH265Level3 = "H265_LEVEL_3" + + // H265LevelH265Level31 is a H265Level enum value + H265LevelH265Level31 = "H265_LEVEL_3_1" + + // H265LevelH265Level4 is a H265Level enum value + H265LevelH265Level4 = "H265_LEVEL_4" + + // H265LevelH265Level41 is a H265Level enum value + H265LevelH265Level41 = "H265_LEVEL_4_1" + + // H265LevelH265Level5 is a H265Level enum value + H265LevelH265Level5 = "H265_LEVEL_5" + + // H265LevelH265Level51 is a H265Level enum value + H265LevelH265Level51 = "H265_LEVEL_5_1" + + // H265LevelH265Level52 is a H265Level enum value + H265LevelH265Level52 = "H265_LEVEL_5_2" + + // H265LevelH265Level6 is a H265Level enum value + H265LevelH265Level6 = "H265_LEVEL_6" + + // H265LevelH265Level61 is a H265Level enum value + H265LevelH265Level61 = "H265_LEVEL_6_1" + + // H265LevelH265Level62 is a H265Level enum value + H265LevelH265Level62 = "H265_LEVEL_6_2" + + // H265LevelH265LevelAuto is a H265Level enum value + H265LevelH265LevelAuto = "H265_LEVEL_AUTO" +) + +// H265 Look Ahead Rate Control +const ( + // H265LookAheadRateControlHigh is a H265LookAheadRateControl enum value + H265LookAheadRateControlHigh = "HIGH" + + // H265LookAheadRateControlLow is a H265LookAheadRateControl enum value + H265LookAheadRateControlLow = "LOW" + + // H265LookAheadRateControlMedium is a H265LookAheadRateControl enum value + H265LookAheadRateControlMedium = "MEDIUM" +) + +// H265 Profile +const ( + // H265ProfileMain is a H265Profile enum value + H265ProfileMain = "MAIN" + + // H265ProfileMain10bit is a H265Profile enum value + H265ProfileMain10bit = "MAIN_10BIT" +) + +// H265 Rate Control Mode +const ( + // H265RateControlModeCbr is a H265RateControlMode enum value + H265RateControlModeCbr = "CBR" + + // H265RateControlModeQvbr is a H265RateControlMode enum value + H265RateControlModeQvbr = "QVBR" +) + +// H265 Scan Type +const ( + // H265ScanTypeProgressive is a H265ScanType enum value + H265ScanTypeProgressive = "PROGRESSIVE" +) + +// H265 Scene Change Detect +const ( + // H265SceneChangeDetectDisabled is a H265SceneChangeDetect enum value + H265SceneChangeDetectDisabled = "DISABLED" + + // H265SceneChangeDetectEnabled is a H265SceneChangeDetect enum value + H265SceneChangeDetectEnabled = "ENABLED" +) + +// H265 Tier +const ( + // H265TierHigh is a H265Tier enum value + H265TierHigh = "HIGH" + + // H265TierMain is a H265Tier enum value + H265TierMain = "MAIN" +) + +// H265 Timecode Insertion Behavior +const ( + // H265TimecodeInsertionBehaviorDisabled is a H265TimecodeInsertionBehavior enum value + H265TimecodeInsertionBehaviorDisabled = "DISABLED" + + // H265TimecodeInsertionBehaviorPicTimingSei is a H265TimecodeInsertionBehavior enum value + H265TimecodeInsertionBehaviorPicTimingSei = "PIC_TIMING_SEI" +) + // Hls Ad Markers const ( // HlsAdMarkersAdobe is a HlsAdMarkers enum value @@ -18537,6 +19560,18 @@ const ( InputSourceEndBehaviorLoop = "LOOP" ) +// There are two types of input sources, static and dynamic. If an input source +// is dynamic you canchange the source url of the input dynamically using an +// input switch action. However, the only input typeto support a dynamic url +// at this time is MP4_FILE. By default all input sources are static. +const ( + // InputSourceTypeStatic is a InputSourceType enum value + InputSourceTypeStatic = "STATIC" + + // InputSourceTypeDynamic is a InputSourceType enum value + InputSourceTypeDynamic = "DYNAMIC" +) + const ( // InputStateCreating is a InputState enum value InputStateCreating = "CREATING" @@ -18554,6 +19589,21 @@ const ( InputStateDeleted = "DELETED" ) +// To clip the file, you must specify the timecode for the start and end of +// the clip. Specify EMBEDDED to use the timecode embedded in the source content. +// The embedded timecode must exist in the source content, otherwise MediaLive +// will output black frames until it reaches the end of the source. Specify +// ZEROBASED to use a timecode that assumes that the first frame in the file +// has the timestamp 00:00:00.00. There is no default for this field, you must +// specify a value. +const ( + // InputTimecodeSourceZerobased is a InputTimecodeSource enum value + InputTimecodeSourceZerobased = "ZEROBASED" + + // InputTimecodeSourceEmbedded is a InputTimecodeSource enum value + InputTimecodeSourceEmbedded = "EMBEDDED" +) + const ( // InputTypeUdpPush is a InputType enum value InputTypeUdpPush = "UDP_PUSH" @@ -18577,6 +19627,17 @@ const ( InputTypeMediaconnect = "MEDIACONNECT" ) +// If you specify a StopTimecode in an input (in order to clip the file), you +// can specify if you want the clip to exclude (the default) or include the +// frame specified by the timecode. +const ( + // LastFrameClippingBehaviorExcludeLastFrame is a LastFrameClippingBehavior enum value + LastFrameClippingBehaviorExcludeLastFrame = "EXCLUDE_LAST_FRAME" + + // LastFrameClippingBehaviorIncludeLastFrame is a LastFrameClippingBehavior enum value + LastFrameClippingBehaviorIncludeLastFrame = "INCLUDE_LAST_FRAME" +) + // The log level the user wants for their channel. const ( // LogLevelError is a LogLevel enum value @@ -18805,6 +19866,15 @@ const ( Mp2CodingModeCodingMode20 = "CODING_MODE_2_0" ) +// Ms Smooth H265 Packaging Type +const ( + // MsSmoothH265PackagingTypeHev1 is a MsSmoothH265PackagingType enum value + MsSmoothH265PackagingTypeHev1 = "HEV1" + + // MsSmoothH265PackagingTypeHvc1 is a MsSmoothH265PackagingType enum value + MsSmoothH265PackagingTypeHvc1 = "HVC1" +) + // Network Input Server Validation const ( // NetworkInputServerValidationCheckCryptographyAndValidateName is a NetworkInputServerValidation enum value diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go index 621855b02ad..885e54d4a7a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaLive { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "medialive" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaLive { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaLive { svc := &MediaLive{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-14", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/waiters.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/waiters.go new file mode 100644 index 00000000000..63f7e0e6621 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/medialive/waiters.go @@ -0,0 +1,239 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package medialive + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilChannelCreated uses the MediaLive API operation +// DescribeChannel to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *MediaLive) WaitUntilChannelCreated(input *DescribeChannelInput) error { + return c.WaitUntilChannelCreatedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilChannelCreatedWithContext is an extended version of WaitUntilChannelCreated. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) WaitUntilChannelCreatedWithContext(ctx aws.Context, input *DescribeChannelInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilChannelCreated", + MaxAttempts: 5, + Delay: request.ConstantWaiterDelay(3 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "IDLE", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "CREATING", + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 500, + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "CREATE_FAILED", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeChannelInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeChannelRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilChannelDeleted uses the MediaLive API operation +// DescribeChannel to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *MediaLive) WaitUntilChannelDeleted(input *DescribeChannelInput) error { + return c.WaitUntilChannelDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilChannelDeletedWithContext is an extended version of WaitUntilChannelDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) WaitUntilChannelDeletedWithContext(ctx aws.Context, input *DescribeChannelInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilChannelDeleted", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "DELETED", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "DELETING", + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 500, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeChannelInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeChannelRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilChannelRunning uses the MediaLive API operation +// DescribeChannel to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *MediaLive) WaitUntilChannelRunning(input *DescribeChannelInput) error { + return c.WaitUntilChannelRunningWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilChannelRunningWithContext is an extended version of WaitUntilChannelRunning. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) WaitUntilChannelRunningWithContext(ctx aws.Context, input *DescribeChannelInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilChannelRunning", + MaxAttempts: 120, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "RUNNING", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "STARTING", + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 500, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeChannelInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeChannelRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilChannelStopped uses the MediaLive API operation +// DescribeChannel to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *MediaLive) WaitUntilChannelStopped(input *DescribeChannelInput) error { + return c.WaitUntilChannelStoppedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilChannelStoppedWithContext is an extended version of WaitUntilChannelStopped. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) WaitUntilChannelStoppedWithContext(ctx aws.Context, input *DescribeChannelInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilChannelStopped", + MaxAttempts: 28, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "IDLE", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "STOPPING", + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 500, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeChannelInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeChannelRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go index a7ca10f3e12..5007aceb1fe 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go @@ -100,6 +100,94 @@ func (c *MediaPackage) CreateChannelWithContext(ctx aws.Context, input *CreateCh return out, req.Send() } +const opCreateHarvestJob = "CreateHarvestJob" + +// CreateHarvestJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateHarvestJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateHarvestJob for more information on using the CreateHarvestJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateHarvestJobRequest method. +// req, resp := client.CreateHarvestJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateHarvestJob +func (c *MediaPackage) CreateHarvestJobRequest(input *CreateHarvestJobInput) (req *request.Request, output *CreateHarvestJobOutput) { + op := &request.Operation{ + Name: opCreateHarvestJob, + HTTPMethod: "POST", + HTTPPath: "/harvest_jobs", + } + + if input == nil { + input = &CreateHarvestJobInput{} + } + + output = &CreateHarvestJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateHarvestJob API operation for AWS Elemental MediaPackage. +// +// Creates a new HarvestJob record. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaPackage's +// API operation CreateHarvestJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// +// * ErrCodeForbiddenException "ForbiddenException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/CreateHarvestJob +func (c *MediaPackage) CreateHarvestJob(input *CreateHarvestJobInput) (*CreateHarvestJobOutput, error) { + req, out := c.CreateHarvestJobRequest(input) + return out, req.Send() +} + +// CreateHarvestJobWithContext is the same as CreateHarvestJob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateHarvestJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaPackage) CreateHarvestJobWithContext(ctx aws.Context, input *CreateHarvestJobInput, opts ...request.Option) (*CreateHarvestJobOutput, error) { + req, out := c.CreateHarvestJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateOriginEndpoint = "CreateOriginEndpoint" // CreateOriginEndpointRequest generates a "aws/request.Request" representing the @@ -454,6 +542,94 @@ func (c *MediaPackage) DescribeChannelWithContext(ctx aws.Context, input *Descri return out, req.Send() } +const opDescribeHarvestJob = "DescribeHarvestJob" + +// DescribeHarvestJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHarvestJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeHarvestJob for more information on using the DescribeHarvestJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeHarvestJobRequest method. +// req, resp := client.DescribeHarvestJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeHarvestJob +func (c *MediaPackage) DescribeHarvestJobRequest(input *DescribeHarvestJobInput) (req *request.Request, output *DescribeHarvestJobOutput) { + op := &request.Operation{ + Name: opDescribeHarvestJob, + HTTPMethod: "GET", + HTTPPath: "/harvest_jobs/{id}", + } + + if input == nil { + input = &DescribeHarvestJobInput{} + } + + output = &DescribeHarvestJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeHarvestJob API operation for AWS Elemental MediaPackage. +// +// Gets details about an existing HarvestJob. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaPackage's +// API operation DescribeHarvestJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// +// * ErrCodeForbiddenException "ForbiddenException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/DescribeHarvestJob +func (c *MediaPackage) DescribeHarvestJob(input *DescribeHarvestJobInput) (*DescribeHarvestJobOutput, error) { + req, out := c.DescribeHarvestJobRequest(input) + return out, req.Send() +} + +// DescribeHarvestJobWithContext is the same as DescribeHarvestJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeHarvestJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaPackage) DescribeHarvestJobWithContext(ctx aws.Context, input *DescribeHarvestJobInput, opts ...request.Option) (*DescribeHarvestJobOutput, error) { + req, out := c.DescribeHarvestJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeOriginEndpoint = "DescribeOriginEndpoint" // DescribeOriginEndpointRequest generates a "aws/request.Request" representing the @@ -647,7 +823,7 @@ func (c *MediaPackage) ListChannelsWithContext(ctx aws.Context, input *ListChann // // Example iterating over at most 3 pages of a ListChannels operation. // pageNum := 0 // err := client.ListChannelsPages(params, -// func(page *ListChannelsOutput, lastPage bool) bool { +// func(page *mediapackage.ListChannelsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -679,10 +855,158 @@ func (c *MediaPackage) ListChannelsPagesWithContext(ctx aws.Context, input *List }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListChannelsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListChannelsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListHarvestJobs = "ListHarvestJobs" + +// ListHarvestJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListHarvestJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListHarvestJobs for more information on using the ListHarvestJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListHarvestJobsRequest method. +// req, resp := client.ListHarvestJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListHarvestJobs +func (c *MediaPackage) ListHarvestJobsRequest(input *ListHarvestJobsInput) (req *request.Request, output *ListHarvestJobsOutput) { + op := &request.Operation{ + Name: opListHarvestJobs, + HTTPMethod: "GET", + HTTPPath: "/harvest_jobs", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListHarvestJobsInput{} + } + + output = &ListHarvestJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListHarvestJobs API operation for AWS Elemental MediaPackage. +// +// Returns a collection of HarvestJob records. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaPackage's +// API operation ListHarvestJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnprocessableEntityException "UnprocessableEntityException" +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// +// * ErrCodeForbiddenException "ForbiddenException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ListHarvestJobs +func (c *MediaPackage) ListHarvestJobs(input *ListHarvestJobsInput) (*ListHarvestJobsOutput, error) { + req, out := c.ListHarvestJobsRequest(input) + return out, req.Send() +} + +// ListHarvestJobsWithContext is the same as ListHarvestJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListHarvestJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaPackage) ListHarvestJobsWithContext(ctx aws.Context, input *ListHarvestJobsInput, opts ...request.Option) (*ListHarvestJobsOutput, error) { + req, out := c.ListHarvestJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListHarvestJobsPages iterates over the pages of a ListHarvestJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListHarvestJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListHarvestJobs operation. +// pageNum := 0 +// err := client.ListHarvestJobsPages(params, +// func(page *mediapackage.ListHarvestJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MediaPackage) ListHarvestJobsPages(input *ListHarvestJobsInput, fn func(*ListHarvestJobsOutput, bool) bool) error { + return c.ListHarvestJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListHarvestJobsPagesWithContext same as ListHarvestJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaPackage) ListHarvestJobsPagesWithContext(ctx aws.Context, input *ListHarvestJobsInput, fn func(*ListHarvestJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListHarvestJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListHarvestJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListHarvestJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -791,7 +1115,7 @@ func (c *MediaPackage) ListOriginEndpointsWithContext(ctx aws.Context, input *Li // // Example iterating over at most 3 pages of a ListOriginEndpoints operation. // pageNum := 0 // err := client.ListOriginEndpointsPages(params, -// func(page *ListOriginEndpointsOutput, lastPage bool) bool { +// func(page *mediapackage.ListOriginEndpointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -823,10 +1147,12 @@ func (c *MediaPackage) ListOriginEndpointsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListOriginEndpointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListOriginEndpointsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1783,6 +2109,186 @@ func (s *CreateChannelOutput) SetTags(v map[string]*string) *CreateChannelOutput return s } +type CreateHarvestJobInput struct { + _ struct{} `type:"structure"` + + // EndTime is a required field + EndTime *string `locationName:"endTime" type:"string" required:"true"` + + // Id is a required field + Id *string `locationName:"id" type:"string" required:"true"` + + // OriginEndpointId is a required field + OriginEndpointId *string `locationName:"originEndpointId" type:"string" required:"true"` + + // Configuration parameters for where in an S3 bucket to place the harvested + // content + // + // S3Destination is a required field + S3Destination *S3Destination `locationName:"s3Destination" type:"structure" required:"true"` + + // StartTime is a required field + StartTime *string `locationName:"startTime" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateHarvestJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHarvestJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHarvestJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateHarvestJobInput"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.OriginEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("OriginEndpointId")) + } + if s.S3Destination == nil { + invalidParams.Add(request.NewErrParamRequired("S3Destination")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + if s.S3Destination != nil { + if err := s.S3Destination.Validate(); err != nil { + invalidParams.AddNested("S3Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *CreateHarvestJobInput) SetEndTime(v string) *CreateHarvestJobInput { + s.EndTime = &v + return s +} + +// SetId sets the Id field's value. +func (s *CreateHarvestJobInput) SetId(v string) *CreateHarvestJobInput { + s.Id = &v + return s +} + +// SetOriginEndpointId sets the OriginEndpointId field's value. +func (s *CreateHarvestJobInput) SetOriginEndpointId(v string) *CreateHarvestJobInput { + s.OriginEndpointId = &v + return s +} + +// SetS3Destination sets the S3Destination field's value. +func (s *CreateHarvestJobInput) SetS3Destination(v *S3Destination) *CreateHarvestJobInput { + s.S3Destination = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *CreateHarvestJobInput) SetStartTime(v string) *CreateHarvestJobInput { + s.StartTime = &v + return s +} + +type CreateHarvestJobOutput struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + ChannelId *string `locationName:"channelId" type:"string"` + + CreatedAt *string `locationName:"createdAt" type:"string"` + + EndTime *string `locationName:"endTime" type:"string"` + + Id *string `locationName:"id" type:"string"` + + OriginEndpointId *string `locationName:"originEndpointId" type:"string"` + + // Configuration parameters for where in an S3 bucket to place the harvested + // content + S3Destination *S3Destination `locationName:"s3Destination" type:"structure"` + + StartTime *string `locationName:"startTime" type:"string"` + + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s CreateHarvestJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHarvestJobOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateHarvestJobOutput) SetArn(v string) *CreateHarvestJobOutput { + s.Arn = &v + return s +} + +// SetChannelId sets the ChannelId field's value. +func (s *CreateHarvestJobOutput) SetChannelId(v string) *CreateHarvestJobOutput { + s.ChannelId = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *CreateHarvestJobOutput) SetCreatedAt(v string) *CreateHarvestJobOutput { + s.CreatedAt = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *CreateHarvestJobOutput) SetEndTime(v string) *CreateHarvestJobOutput { + s.EndTime = &v + return s +} + +// SetId sets the Id field's value. +func (s *CreateHarvestJobOutput) SetId(v string) *CreateHarvestJobOutput { + s.Id = &v + return s +} + +// SetOriginEndpointId sets the OriginEndpointId field's value. +func (s *CreateHarvestJobOutput) SetOriginEndpointId(v string) *CreateHarvestJobOutput { + s.OriginEndpointId = &v + return s +} + +// SetS3Destination sets the S3Destination field's value. +func (s *CreateHarvestJobOutput) SetS3Destination(v *S3Destination) *CreateHarvestJobOutput { + s.S3Destination = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *CreateHarvestJobOutput) SetStartTime(v string) *CreateHarvestJobOutput { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateHarvestJobOutput) SetStatus(v string) *CreateHarvestJobOutput { + s.Status = &v + return s +} + type CreateOriginEndpointInput struct { _ struct{} `type:"structure"` @@ -1808,6 +2314,8 @@ type CreateOriginEndpointInput struct { // A Microsoft Smooth Streaming (MSS) packaging configuration. MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` + Origination *string `locationName:"origination" type:"string" enum:"Origination"` + StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` // A collection of tags associated with a resource @@ -1912,6 +2420,12 @@ func (s *CreateOriginEndpointInput) SetMssPackage(v *MssPackage) *CreateOriginEn return s } +// SetOrigination sets the Origination field's value. +func (s *CreateOriginEndpointInput) SetOrigination(v string) *CreateOriginEndpointInput { + s.Origination = &v + return s +} + // SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. func (s *CreateOriginEndpointInput) SetStartoverWindowSeconds(v int64) *CreateOriginEndpointInput { s.StartoverWindowSeconds = &v @@ -1961,6 +2475,8 @@ type CreateOriginEndpointOutput struct { // A Microsoft Smooth Streaming (MSS) packaging configuration. MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` + Origination *string `locationName:"origination" type:"string" enum:"Origination"` + StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` // A collection of tags associated with a resource @@ -2037,6 +2553,12 @@ func (s *CreateOriginEndpointOutput) SetMssPackage(v *MssPackage) *CreateOriginE return s } +// SetOrigination sets the Origination field's value. +func (s *CreateOriginEndpointOutput) SetOrigination(v string) *CreateOriginEndpointOutput { + s.Origination = &v + return s +} + // SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. func (s *CreateOriginEndpointOutput) SetStartoverWindowSeconds(v int64) *CreateOriginEndpointOutput { s.StartoverWindowSeconds = &v @@ -2125,6 +2647,23 @@ func (s *DashEncryption) SetSpekeKeyProvider(v *SpekeKeyProvider) *DashEncryptio type DashPackage struct { _ struct{} `type:"structure"` + // A list of SCTE-35 message types that are treated as ad markers in the output. + // If empty, noad markers are output. Specify multiple items to create ad markers + // for all of the includedmessage types. + AdTriggers []*string `locationName:"adTriggers" type:"list"` + + // This setting allows the delivery restriction flags on SCTE-35 segmentation + // descriptors todetermine whether a message signals an ad. Choosing "NONE" + // means no SCTE-35 messages becomeads. Choosing "RESTRICTED" means SCTE-35 + // messages of the types specified in AdTriggers thatcontain delivery restrictions + // will be treated as ads. Choosing "UNRESTRICTED" means SCTE-35messages of + // the types specified in AdTriggers that do not contain delivery restrictions + // willbe treated as ads. Choosing "BOTH" means all SCTE-35 messages of the + // types specified inAdTriggers will be treated as ads. Note that Splice Insert + // messages do not have these flagsand are always treated as ads if specified + // in AdTriggers. + AdsOnDeliveryRestrictions *string `locationName:"adsOnDeliveryRestrictions" type:"string" enum:"AdsOnDeliveryRestrictions"` + // A Dynamic Adaptive Streaming over HTTP (DASH) encryption configuration. Encryption *DashEncryption `locationName:"encryption" type:"structure"` @@ -2160,10 +2699,12 @@ type DashPackage struct { // the nearest multiple of the source segment duration. SegmentDurationSeconds *int64 `locationName:"segmentDurationSeconds" type:"integer"` - // Determines the type of SegmentTimeline included in the Media Presentation + // Determines the type of SegmentTemplate included in the Media Presentation // Description (MPD). When set to NUMBER_WITH_TIMELINE, a full timeline is presented // in each SegmentTemplate, with $Number$ media URLs. When set to TIME_WITH_TIMELINE, // a full timeline is presented in each SegmentTemplate, with $Time$ media URLs. + // When set to NUMBER_WITH_DURATION, only a duration is included in each SegmentTemplate, + // with $Number$ media URLs. SegmentTemplateFormat *string `locationName:"segmentTemplateFormat" type:"string" enum:"SegmentTemplateFormat"` // A StreamSelection configuration. @@ -2198,6 +2739,18 @@ func (s *DashPackage) Validate() error { return nil } +// SetAdTriggers sets the AdTriggers field's value. +func (s *DashPackage) SetAdTriggers(v []*string) *DashPackage { + s.AdTriggers = v + return s +} + +// SetAdsOnDeliveryRestrictions sets the AdsOnDeliveryRestrictions field's value. +func (s *DashPackage) SetAdsOnDeliveryRestrictions(v string) *DashPackage { + s.AdsOnDeliveryRestrictions = &v + return s +} + // SetEncryption sets the Encryption field's value. func (s *DashPackage) SetEncryption(v *DashEncryption) *DashPackage { s.Encryption = v @@ -2414,54 +2967,181 @@ type DescribeChannelOutput struct { Arn *string `locationName:"arn" type:"string"` - Description *string `locationName:"description" type:"string"` + Description *string `locationName:"description" type:"string"` + + // An HTTP Live Streaming (HLS) ingest resource configuration. + HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` + + Id *string `locationName:"id" type:"string"` + + // A collection of tags associated with a resource + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s DescribeChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChannelOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DescribeChannelOutput) SetArn(v string) *DescribeChannelOutput { + s.Arn = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *DescribeChannelOutput) SetDescription(v string) *DescribeChannelOutput { + s.Description = &v + return s +} + +// SetHlsIngest sets the HlsIngest field's value. +func (s *DescribeChannelOutput) SetHlsIngest(v *HlsIngest) *DescribeChannelOutput { + s.HlsIngest = v + return s +} + +// SetId sets the Id field's value. +func (s *DescribeChannelOutput) SetId(v string) *DescribeChannelOutput { + s.Id = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *DescribeChannelOutput) SetTags(v map[string]*string) *DescribeChannelOutput { + s.Tags = v + return s +} + +type DescribeHarvestJobInput struct { + _ struct{} `type:"structure"` + + // Id is a required field + Id *string `location:"uri" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeHarvestJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHarvestJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeHarvestJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeHarvestJobInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *DescribeHarvestJobInput) SetId(v string) *DescribeHarvestJobInput { + s.Id = &v + return s +} + +type DescribeHarvestJobOutput struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + ChannelId *string `locationName:"channelId" type:"string"` + + CreatedAt *string `locationName:"createdAt" type:"string"` - // An HTTP Live Streaming (HLS) ingest resource configuration. - HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` + EndTime *string `locationName:"endTime" type:"string"` Id *string `locationName:"id" type:"string"` - // A collection of tags associated with a resource - Tags map[string]*string `locationName:"tags" type:"map"` + OriginEndpointId *string `locationName:"originEndpointId" type:"string"` + + // Configuration parameters for where in an S3 bucket to place the harvested + // content + S3Destination *S3Destination `locationName:"s3Destination" type:"structure"` + + StartTime *string `locationName:"startTime" type:"string"` + + Status *string `locationName:"status" type:"string" enum:"Status"` } // String returns the string representation -func (s DescribeChannelOutput) String() string { +func (s DescribeHarvestJobOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeChannelOutput) GoString() string { +func (s DescribeHarvestJobOutput) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *DescribeChannelOutput) SetArn(v string) *DescribeChannelOutput { +func (s *DescribeHarvestJobOutput) SetArn(v string) *DescribeHarvestJobOutput { s.Arn = &v return s } -// SetDescription sets the Description field's value. -func (s *DescribeChannelOutput) SetDescription(v string) *DescribeChannelOutput { - s.Description = &v +// SetChannelId sets the ChannelId field's value. +func (s *DescribeHarvestJobOutput) SetChannelId(v string) *DescribeHarvestJobOutput { + s.ChannelId = &v return s } -// SetHlsIngest sets the HlsIngest field's value. -func (s *DescribeChannelOutput) SetHlsIngest(v *HlsIngest) *DescribeChannelOutput { - s.HlsIngest = v +// SetCreatedAt sets the CreatedAt field's value. +func (s *DescribeHarvestJobOutput) SetCreatedAt(v string) *DescribeHarvestJobOutput { + s.CreatedAt = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *DescribeHarvestJobOutput) SetEndTime(v string) *DescribeHarvestJobOutput { + s.EndTime = &v return s } // SetId sets the Id field's value. -func (s *DescribeChannelOutput) SetId(v string) *DescribeChannelOutput { +func (s *DescribeHarvestJobOutput) SetId(v string) *DescribeHarvestJobOutput { s.Id = &v return s } -// SetTags sets the Tags field's value. -func (s *DescribeChannelOutput) SetTags(v map[string]*string) *DescribeChannelOutput { - s.Tags = v +// SetOriginEndpointId sets the OriginEndpointId field's value. +func (s *DescribeHarvestJobOutput) SetOriginEndpointId(v string) *DescribeHarvestJobOutput { + s.OriginEndpointId = &v + return s +} + +// SetS3Destination sets the S3Destination field's value. +func (s *DescribeHarvestJobOutput) SetS3Destination(v *S3Destination) *DescribeHarvestJobOutput { + s.S3Destination = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeHarvestJobOutput) SetStartTime(v string) *DescribeHarvestJobOutput { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeHarvestJobOutput) SetStatus(v string) *DescribeHarvestJobOutput { + s.Status = &v return s } @@ -2529,6 +3209,8 @@ type DescribeOriginEndpointOutput struct { // A Microsoft Smooth Streaming (MSS) packaging configuration. MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` + Origination *string `locationName:"origination" type:"string" enum:"Origination"` + StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` // A collection of tags associated with a resource @@ -2605,6 +3287,12 @@ func (s *DescribeOriginEndpointOutput) SetMssPackage(v *MssPackage) *DescribeOri return s } +// SetOrigination sets the Origination field's value. +func (s *DescribeOriginEndpointOutput) SetOrigination(v string) *DescribeOriginEndpointOutput { + s.Origination = &v + return s +} + // SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. func (s *DescribeOriginEndpointOutput) SetStartoverWindowSeconds(v int64) *DescribeOriginEndpointOutput { s.StartoverWindowSeconds = &v @@ -2635,6 +3323,107 @@ func (s *DescribeOriginEndpointOutput) SetWhitelist(v []*string) *DescribeOrigin return s } +// A HarvestJob resource configuration +type HarvestJob struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) assigned to the HarvestJob. + Arn *string `locationName:"arn" type:"string"` + + // The ID of the Channel that the HarvestJob will harvest from. + ChannelId *string `locationName:"channelId" type:"string"` + + // The time the HarvestJob was submitted + CreatedAt *string `locationName:"createdAt" type:"string"` + + // The end of the time-window which will be harvested. + EndTime *string `locationName:"endTime" type:"string"` + + // The ID of the HarvestJob. The ID must be unique within the regionand it cannot + // be changed after the HarvestJob is submitted. + Id *string `locationName:"id" type:"string"` + + // The ID of the OriginEndpoint that the HarvestJob will harvest from.This cannot + // be changed after the HarvestJob is submitted. + OriginEndpointId *string `locationName:"originEndpointId" type:"string"` + + // Configuration parameters for where in an S3 bucket to place the harvested + // content + S3Destination *S3Destination `locationName:"s3Destination" type:"structure"` + + // The start of the time-window which will be harvested. + StartTime *string `locationName:"startTime" type:"string"` + + // The current status of the HarvestJob. Consider setting up a CloudWatch Event + // to listen forHarvestJobs as they succeed or fail. In the event of failure, + // the CloudWatch Event willinclude an explanation of why the HarvestJob failed. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s HarvestJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HarvestJob) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *HarvestJob) SetArn(v string) *HarvestJob { + s.Arn = &v + return s +} + +// SetChannelId sets the ChannelId field's value. +func (s *HarvestJob) SetChannelId(v string) *HarvestJob { + s.ChannelId = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *HarvestJob) SetCreatedAt(v string) *HarvestJob { + s.CreatedAt = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *HarvestJob) SetEndTime(v string) *HarvestJob { + s.EndTime = &v + return s +} + +// SetId sets the Id field's value. +func (s *HarvestJob) SetId(v string) *HarvestJob { + s.Id = &v + return s +} + +// SetOriginEndpointId sets the OriginEndpointId field's value. +func (s *HarvestJob) SetOriginEndpointId(v string) *HarvestJob { + s.OriginEndpointId = &v + return s +} + +// SetS3Destination sets the S3Destination field's value. +func (s *HarvestJob) SetS3Destination(v *S3Destination) *HarvestJob { + s.S3Destination = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *HarvestJob) SetStartTime(v string) *HarvestJob { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *HarvestJob) SetStatus(v string) *HarvestJob { + s.Status = &v + return s +} + // An HTTP Live Streaming (HLS) encryption configuration. type HlsEncryption struct { _ struct{} `type:"structure"` @@ -2856,6 +3645,23 @@ type HlsManifestCreateOrUpdateParameters struct { // ad markers and blackout tags based on SCTE-35messages in the input source. AdMarkers *string `locationName:"adMarkers" type:"string" enum:"AdMarkers"` + // A list of SCTE-35 message types that are treated as ad markers in the output. + // If empty, noad markers are output. Specify multiple items to create ad markers + // for all of the includedmessage types. + AdTriggers []*string `locationName:"adTriggers" type:"list"` + + // This setting allows the delivery restriction flags on SCTE-35 segmentation + // descriptors todetermine whether a message signals an ad. Choosing "NONE" + // means no SCTE-35 messages becomeads. Choosing "RESTRICTED" means SCTE-35 + // messages of the types specified in AdTriggers thatcontain delivery restrictions + // will be treated as ads. Choosing "UNRESTRICTED" means SCTE-35messages of + // the types specified in AdTriggers that do not contain delivery restrictions + // willbe treated as ads. Choosing "BOTH" means all SCTE-35 messages of the + // types specified inAdTriggers will be treated as ads. Note that Splice Insert + // messages do not have these flagsand are always treated as ads if specified + // in AdTriggers. + AdsOnDeliveryRestrictions *string `locationName:"adsOnDeliveryRestrictions" type:"string" enum:"AdsOnDeliveryRestrictions"` + // The ID of the manifest. The ID must be unique within the OriginEndpoint and // it cannot be changed after it is created. // @@ -2917,6 +3723,18 @@ func (s *HlsManifestCreateOrUpdateParameters) SetAdMarkers(v string) *HlsManifes return s } +// SetAdTriggers sets the AdTriggers field's value. +func (s *HlsManifestCreateOrUpdateParameters) SetAdTriggers(v []*string) *HlsManifestCreateOrUpdateParameters { + s.AdTriggers = v + return s +} + +// SetAdsOnDeliveryRestrictions sets the AdsOnDeliveryRestrictions field's value. +func (s *HlsManifestCreateOrUpdateParameters) SetAdsOnDeliveryRestrictions(v string) *HlsManifestCreateOrUpdateParameters { + s.AdsOnDeliveryRestrictions = &v + return s +} + // SetId sets the Id field's value. func (s *HlsManifestCreateOrUpdateParameters) SetId(v string) *HlsManifestCreateOrUpdateParameters { s.Id = &v @@ -2964,6 +3782,23 @@ type HlsPackage struct { // ad markers and blackout tags based on SCTE-35messages in the input source. AdMarkers *string `locationName:"adMarkers" type:"string" enum:"AdMarkers"` + // A list of SCTE-35 message types that are treated as ad markers in the output. + // If empty, noad markers are output. Specify multiple items to create ad markers + // for all of the includedmessage types. + AdTriggers []*string `locationName:"adTriggers" type:"list"` + + // This setting allows the delivery restriction flags on SCTE-35 segmentation + // descriptors todetermine whether a message signals an ad. Choosing "NONE" + // means no SCTE-35 messages becomeads. Choosing "RESTRICTED" means SCTE-35 + // messages of the types specified in AdTriggers thatcontain delivery restrictions + // will be treated as ads. Choosing "UNRESTRICTED" means SCTE-35messages of + // the types specified in AdTriggers that do not contain delivery restrictions + // willbe treated as ads. Choosing "BOTH" means all SCTE-35 messages of the + // types specified inAdTriggers will be treated as ads. Note that Splice Insert + // messages do not have these flagsand are always treated as ads if specified + // in AdTriggers. + AdsOnDeliveryRestrictions *string `locationName:"adsOnDeliveryRestrictions" type:"string" enum:"AdsOnDeliveryRestrictions"` + // An HTTP Live Streaming (HLS) encryption configuration. Encryption *HlsEncryption `locationName:"encryption" type:"structure"` @@ -3030,6 +3865,18 @@ func (s *HlsPackage) SetAdMarkers(v string) *HlsPackage { return s } +// SetAdTriggers sets the AdTriggers field's value. +func (s *HlsPackage) SetAdTriggers(v []*string) *HlsPackage { + s.AdTriggers = v + return s +} + +// SetAdsOnDeliveryRestrictions sets the AdsOnDeliveryRestrictions field's value. +func (s *HlsPackage) SetAdsOnDeliveryRestrictions(v string) *HlsPackage { + s.AdsOnDeliveryRestrictions = &v + return s +} + // SetEncryption sets the Encryption field's value. func (s *HlsPackage) SetEncryption(v *HlsEncryption) *HlsPackage { s.Encryption = v @@ -3202,6 +4049,95 @@ func (s *ListChannelsOutput) SetNextToken(v string) *ListChannelsOutput { return s } +type ListHarvestJobsInput struct { + _ struct{} `type:"structure"` + + IncludeChannelId *string `location:"querystring" locationName:"includeChannelId" type:"string"` + + IncludeStatus *string `location:"querystring" locationName:"includeStatus" type:"string"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListHarvestJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHarvestJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListHarvestJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListHarvestJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIncludeChannelId sets the IncludeChannelId field's value. +func (s *ListHarvestJobsInput) SetIncludeChannelId(v string) *ListHarvestJobsInput { + s.IncludeChannelId = &v + return s +} + +// SetIncludeStatus sets the IncludeStatus field's value. +func (s *ListHarvestJobsInput) SetIncludeStatus(v string) *ListHarvestJobsInput { + s.IncludeStatus = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListHarvestJobsInput) SetMaxResults(v int64) *ListHarvestJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListHarvestJobsInput) SetNextToken(v string) *ListHarvestJobsInput { + s.NextToken = &v + return s +} + +type ListHarvestJobsOutput struct { + _ struct{} `type:"structure"` + + HarvestJobs []*HarvestJob `locationName:"harvestJobs" type:"list"` + + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListHarvestJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHarvestJobsOutput) GoString() string { + return s.String() +} + +// SetHarvestJobs sets the HarvestJobs field's value. +func (s *ListHarvestJobsOutput) SetHarvestJobs(v []*HarvestJob) *ListHarvestJobsOutput { + s.HarvestJobs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListHarvestJobsOutput) SetNextToken(v string) *ListHarvestJobsOutput { + s.NextToken = &v + return s +} + type ListOriginEndpointsInput struct { _ struct{} `type:"structure"` @@ -3486,6 +4422,13 @@ type OriginEndpoint struct { // A Microsoft Smooth Streaming (MSS) packaging configuration. MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` + // Control whether origination of video is allowed for this OriginEndpoint. + // If set to ALLOW, the OriginEndpointmay by requested, pursuant to any other + // form of access control. If set to DENY, the OriginEndpoint may not berequested. + // This can be helpful for Live to VOD harvesting, or for temporarily disabling + // origination + Origination *string `locationName:"origination" type:"string" enum:"Origination"` + // Maximum duration (seconds) of content to retain for startover playback.If // not specified, startover playback will be disabled for the OriginEndpoint. StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` @@ -3568,6 +4511,12 @@ func (s *OriginEndpoint) SetMssPackage(v *MssPackage) *OriginEndpoint { return s } +// SetOrigination sets the Origination field's value. +func (s *OriginEndpoint) SetOrigination(v string) *OriginEndpoint { + s.Origination = &v + return s +} + // SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. func (s *OriginEndpoint) SetStartoverWindowSeconds(v int64) *OriginEndpoint { s.StartoverWindowSeconds = &v @@ -3805,6 +4754,75 @@ func (s *RotateIngestEndpointCredentialsOutput) SetTags(v map[string]*string) *R return s } +// Configuration parameters for where in an S3 bucket to place the harvested +// content +type S3Destination struct { + _ struct{} `type:"structure"` + + // The name of an S3 bucket within which harvested content will be exported + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" type:"string" required:"true"` + + // The key in the specified S3 bucket where the harvested top-level manifest + // will be placed. + // + // ManifestKey is a required field + ManifestKey *string `locationName:"manifestKey" type:"string" required:"true"` + + // The IAM role used to write to the specified S3 bucket + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Destination"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.ManifestKey == nil { + invalidParams.Add(request.NewErrParamRequired("ManifestKey")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *S3Destination) SetBucketName(v string) *S3Destination { + s.BucketName = &v + return s +} + +// SetManifestKey sets the ManifestKey field's value. +func (s *S3Destination) SetManifestKey(v string) *S3Destination { + s.ManifestKey = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *S3Destination) SetRoleArn(v string) *S3Destination { + s.RoleArn = &v + return s +} + // A configuration for accessing an external Secure Packager and Encoder Key // Exchange (SPEKE) service that will provide encryption keys. type SpekeKeyProvider struct { @@ -4196,6 +5214,8 @@ type UpdateOriginEndpointInput struct { // A Microsoft Smooth Streaming (MSS) packaging configuration. MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` + Origination *string `locationName:"origination" type:"string" enum:"Origination"` + StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` TimeDelaySeconds *int64 `locationName:"timeDelaySeconds" type:"integer"` @@ -4291,6 +5311,12 @@ func (s *UpdateOriginEndpointInput) SetMssPackage(v *MssPackage) *UpdateOriginEn return s } +// SetOrigination sets the Origination field's value. +func (s *UpdateOriginEndpointInput) SetOrigination(v string) *UpdateOriginEndpointInput { + s.Origination = &v + return s +} + // SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. func (s *UpdateOriginEndpointInput) SetStartoverWindowSeconds(v int64) *UpdateOriginEndpointInput { s.StartoverWindowSeconds = &v @@ -4334,6 +5360,8 @@ type UpdateOriginEndpointOutput struct { // A Microsoft Smooth Streaming (MSS) packaging configuration. MssPackage *MssPackage `locationName:"mssPackage" type:"structure"` + Origination *string `locationName:"origination" type:"string" enum:"Origination"` + StartoverWindowSeconds *int64 `locationName:"startoverWindowSeconds" type:"integer"` // A collection of tags associated with a resource @@ -4410,6 +5438,12 @@ func (s *UpdateOriginEndpointOutput) SetMssPackage(v *MssPackage) *UpdateOriginE return s } +// SetOrigination sets the Origination field's value. +func (s *UpdateOriginEndpointOutput) SetOrigination(v string) *UpdateOriginEndpointOutput { + s.Origination = &v + return s +} + // SetStartoverWindowSeconds sets the StartoverWindowSeconds field's value. func (s *UpdateOriginEndpointOutput) SetStartoverWindowSeconds(v int64) *UpdateOriginEndpointOutput { s.StartoverWindowSeconds = &v @@ -4451,6 +5485,30 @@ const ( AdMarkersPassthrough = "PASSTHROUGH" ) +// This setting allows the delivery restriction flags on SCTE-35 segmentation +// descriptors todetermine whether a message signals an ad. Choosing "NONE" +// means no SCTE-35 messages becomeads. Choosing "RESTRICTED" means SCTE-35 +// messages of the types specified in AdTriggers thatcontain delivery restrictions +// will be treated as ads. Choosing "UNRESTRICTED" means SCTE-35messages of +// the types specified in AdTriggers that do not contain delivery restrictions +// willbe treated as ads. Choosing "BOTH" means all SCTE-35 messages of the +// types specified inAdTriggers will be treated as ads. Note that Splice Insert +// messages do not have these flagsand are always treated as ads if specified +// in AdTriggers. +const ( + // AdsOnDeliveryRestrictionsNone is a AdsOnDeliveryRestrictions enum value + AdsOnDeliveryRestrictionsNone = "NONE" + + // AdsOnDeliveryRestrictionsRestricted is a AdsOnDeliveryRestrictions enum value + AdsOnDeliveryRestrictionsRestricted = "RESTRICTED" + + // AdsOnDeliveryRestrictionsUnrestricted is a AdsOnDeliveryRestrictions enum value + AdsOnDeliveryRestrictionsUnrestricted = "UNRESTRICTED" + + // AdsOnDeliveryRestrictionsBoth is a AdsOnDeliveryRestrictions enum value + AdsOnDeliveryRestrictionsBoth = "BOTH" +) + const ( // EncryptionMethodAes128 is a EncryptionMethod enum value EncryptionMethodAes128 = "AES_128" @@ -4467,6 +5525,14 @@ const ( ManifestLayoutCompact = "COMPACT" ) +const ( + // OriginationAllow is a Origination enum value + OriginationAllow = "ALLOW" + + // OriginationDeny is a Origination enum value + OriginationDeny = "DENY" +) + const ( // PlaylistTypeNone is a PlaylistType enum value PlaylistTypeNone = "NONE" @@ -4492,6 +5558,20 @@ const ( // SegmentTemplateFormatTimeWithTimeline is a SegmentTemplateFormat enum value SegmentTemplateFormatTimeWithTimeline = "TIME_WITH_TIMELINE" + + // SegmentTemplateFormatNumberWithDuration is a SegmentTemplateFormat enum value + SegmentTemplateFormatNumberWithDuration = "NUMBER_WITH_DURATION" +) + +const ( + // StatusInProgress is a Status enum value + StatusInProgress = "IN_PROGRESS" + + // StatusSucceeded is a Status enum value + StatusSucceeded = "SUCCEEDED" + + // StatusFailed is a Status enum value + StatusFailed = "FAILED" ) const ( @@ -4505,6 +5585,32 @@ const ( StreamOrderVideoBitrateDescending = "VIDEO_BITRATE_DESCENDING" ) +const ( + // __AdTriggersElementSpliceInsert is a __AdTriggersElement enum value + __AdTriggersElementSpliceInsert = "SPLICE_INSERT" + + // __AdTriggersElementBreak is a __AdTriggersElement enum value + __AdTriggersElementBreak = "BREAK" + + // __AdTriggersElementProviderAdvertisement is a __AdTriggersElement enum value + __AdTriggersElementProviderAdvertisement = "PROVIDER_ADVERTISEMENT" + + // __AdTriggersElementDistributorAdvertisement is a __AdTriggersElement enum value + __AdTriggersElementDistributorAdvertisement = "DISTRIBUTOR_ADVERTISEMENT" + + // __AdTriggersElementProviderPlacementOpportunity is a __AdTriggersElement enum value + __AdTriggersElementProviderPlacementOpportunity = "PROVIDER_PLACEMENT_OPPORTUNITY" + + // __AdTriggersElementDistributorPlacementOpportunity is a __AdTriggersElement enum value + __AdTriggersElementDistributorPlacementOpportunity = "DISTRIBUTOR_PLACEMENT_OPPORTUNITY" + + // __AdTriggersElementProviderOverlayPlacementOpportunity is a __AdTriggersElement enum value + __AdTriggersElementProviderOverlayPlacementOpportunity = "PROVIDER_OVERLAY_PLACEMENT_OPPORTUNITY" + + // __AdTriggersElementDistributorOverlayPlacementOpportunity is a __AdTriggersElement enum value + __AdTriggersElementDistributorOverlayPlacementOpportunity = "DISTRIBUTOR_OVERLAY_PLACEMENT_OPPORTUNITY" +) + const ( // __PeriodTriggersElementAds is a __PeriodTriggersElement enum value __PeriodTriggersElementAds = "ADS" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go index e08a3e8787c..70659b45953 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaPackage { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediapackage" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaPackage { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaPackage { svc := &MediaPackage{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-12", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastore/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastore/api.go index 7e3997ff0f0..d378c5f7561 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastore/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastore/api.go @@ -857,6 +857,12 @@ func (c *MediaStore) ListContainersRequest(input *ListContainersInput) (req *req Name: opListContainers, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -914,6 +920,144 @@ func (c *MediaStore) ListContainersWithContext(ctx aws.Context, input *ListConta return out, req.Send() } +// ListContainersPages iterates over the pages of a ListContainers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListContainers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListContainers operation. +// pageNum := 0 +// err := client.ListContainersPages(params, +// func(page *mediastore.ListContainersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MediaStore) ListContainersPages(input *ListContainersInput, fn func(*ListContainersOutput, bool) bool) error { + return c.ListContainersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListContainersPagesWithContext same as ListContainersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaStore) ListContainersPagesWithContext(ctx aws.Context, input *ListContainersInput, fn func(*ListContainersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListContainersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListContainersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListContainersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/ListTagsForResource +func (c *MediaStore) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS Elemental MediaStore. +// +// Returns a list of the tags assigned to the specified container. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaStore's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeContainerInUseException "ContainerInUseException" +// The container that you specified in the request already exists or is being +// updated. +// +// * ErrCodeContainerNotFoundException "ContainerNotFoundException" +// The container that you specified in the request does not exist. +// +// * ErrCodeInternalServerError "InternalServerError" +// The service is temporarily unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/ListTagsForResource +func (c *MediaStore) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaStore) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutContainerPolicy = "PutContainerPolicy" // PutContainerPolicyRequest generates a "aws/request.Request" representing the @@ -1378,6 +1522,186 @@ func (c *MediaStore) StopAccessLoggingWithContext(ctx aws.Context, input *StopAc return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/TagResource +func (c *MediaStore) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Elemental MediaStore. +// +// Adds tags to the specified AWS Elemental MediaStore container. Tags are key:value +// pairs that you can associate with AWS resources. For example, the tag key +// might be "customer" and the tag value might be "companyA." You can specify +// one or more tags to add to each container. You can add up to 50 tags to each +// container. For more information about tagging, including naming and usage +// conventions, see Tagging Resources in MediaStore (https://aws.amazon.com/documentation/mediastore/tagging). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaStore's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeContainerInUseException "ContainerInUseException" +// The container that you specified in the request already exists or is being +// updated. +// +// * ErrCodeContainerNotFoundException "ContainerNotFoundException" +// The container that you specified in the request does not exist. +// +// * ErrCodeInternalServerError "InternalServerError" +// The service is temporarily unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/TagResource +func (c *MediaStore) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaStore) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/UntagResource +func (c *MediaStore) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Elemental MediaStore. +// +// Removes tags from the specified container. You can specify one or more tags +// to remove. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaStore's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeContainerInUseException "ContainerInUseException" +// The container that you specified in the request already exists or is being +// updated. +// +// * ErrCodeContainerNotFoundException "ContainerNotFoundException" +// The container that you specified in the request does not exist. +// +// * ErrCodeInternalServerError "InternalServerError" +// The service is temporarily unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/UntagResource +func (c *MediaStore) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaStore) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // This section describes operations that you can perform on an AWS Elemental // MediaStore container. type Container struct { @@ -1581,6 +1905,14 @@ type CreateContainerInput struct { // // ContainerName is a required field ContainerName *string `min:"1" type:"string" required:"true"` + + // An array of key:value pairs that you define. These values can be anything + // that you want. Typically, the tag key represents a category (such as "environment") + // and the tag value represents a specific value within that category (such + // as "test," "development," or "production"). You can add up to 50 tags to + // each container. For more information about tagging, including naming and + // usage conventions, see Tagging Resources in MediaStore (https://aws.amazon.com/documentation/mediastore/tagging). + Tags []*Tag `type:"list"` } // String returns the string representation @@ -1602,6 +1934,16 @@ func (s *CreateContainerInput) Validate() error { if s.ContainerName != nil && len(*s.ContainerName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ContainerName", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1615,6 +1957,12 @@ func (s *CreateContainerInput) SetContainerName(v string) *CreateContainerInput return s } +// SetTags sets the Tags field's value. +func (s *CreateContainerInput) SetTags(v []*Tag) *CreateContainerInput { + s.Tags = v + return s +} + type CreateContainerOutput struct { _ struct{} `type:"structure"` @@ -2219,6 +2567,70 @@ func (s *ListContainersOutput) SetNextToken(v string) *ListContainersOutput { return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the container. + // + // Resource is a required field + Resource *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.Resource != nil && len(*s.Resource) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Resource", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResource sets the Resource field's value. +func (s *ListTagsForResourceInput) SetResource(v string) *ListTagsForResourceInput { + s.Resource = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // An array of key:value pairs that are assigned to the container. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + type PutContainerPolicyInput struct { _ struct{} `type:"structure"` @@ -2557,6 +2969,218 @@ func (s StopAccessLoggingOutput) GoString() string { return s.String() } +// A collection of tags associated with a container. Each tag consists of a +// key:value pair, which can be anything you define. Typically, the tag key +// represents a category (such as "environment") and the tag value represents +// a specific value within that category (such as "test," "development," or +// "production"). You can add up to 50 tags to each container. For more information +// about tagging, including naming and usage conventions, see Tagging Resources +// in MediaStore (https://aws.amazon.com/documentation/mediastore/tagging). +type Tag struct { + _ struct{} `type:"structure"` + + // Part of the key:value pair that defines a tag. You can use a tag key to describe + // a category of information, such as "customer." Tag keys are case-sensitive. + Key *string `min:"1" type:"string"` + + // Part of the key:value pair that defines a tag. You can use a tag value to + // describe a specific value within a category, such as "companyA" or "companyB." + // Tag values are case-sensitive. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the container. + // + // Resource is a required field + Resource *string `min:"1" type:"string" required:"true"` + + // An array of key:value pairs that you want to add to the container. You need + // to specify only the tags that you want to add or update. For example, suppose + // a container already has two tags (customer:CompanyA and priority:High). You + // want to change the priority tag and also add a third tag (type:Contract). + // For TagResource, you specify the following tags: priority:Medium, type:Contract. + // The result is that your container has three tags: customer:CompanyA, priority:Medium, + // and type:Contract. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.Resource != nil && len(*s.Resource) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Resource", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResource sets the Resource field's value. +func (s *TagResourceInput) SetResource(v string) *TagResourceInput { + s.Resource = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the container. + // + // Resource is a required field + Resource *string `min:"1" type:"string" required:"true"` + + // A comma-separated list of keys for tags that you want to remove from the + // container. For example, if your container has two tags (customer:CompanyA + // and priority:High) and you want to remove one of the tags (priority:High), + // you specify the key for the tag that you want to remove (priority). + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.Resource != nil && len(*s.Resource) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Resource", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResource sets the Resource field's value. +func (s *UntagResourceInput) SetResource(v string) *UntagResourceInput { + s.Resource = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + const ( // ContainerStatusActive is a ContainerStatus enum value ContainerStatusActive = "ACTIVE" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go index 492b3bf04c3..0dbef72bfa9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaStore { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediastore" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaStore { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaStore { svc := &MediaStore{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-01", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go index a8e1df9fc54..773f341b62e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go @@ -229,7 +229,9 @@ func (c *MediaStoreData) GetObjectRequest(input *GetObjectInput) (req *request.R // GetObject API operation for AWS Elemental MediaStore Data Plane. // -// Downloads the object at the specified path. +// Downloads the object at the specified path. If the object’s upload availability +// is set to streaming, AWS Elemental MediaStore downloads the object even if +// it’s still uploading the object. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -304,6 +306,12 @@ func (c *MediaStoreData) ListItemsRequest(input *ListItemsInput) (req *request.R Name: opListItems, HTTPMethod: "GET", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -356,6 +364,58 @@ func (c *MediaStoreData) ListItemsWithContext(ctx aws.Context, input *ListItemsI return out, req.Send() } +// ListItemsPages iterates over the pages of a ListItems operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListItems method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListItems operation. +// pageNum := 0 +// err := client.ListItemsPages(params, +// func(page *mediastoredata.ListItemsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MediaStoreData) ListItemsPages(input *ListItemsInput, fn func(*ListItemsOutput, bool) bool) error { + return c.ListItemsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListItemsPagesWithContext same as ListItemsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaStoreData) ListItemsPagesWithContext(ctx aws.Context, input *ListItemsInput, fn func(*ListItemsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListItemsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListItemsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListItemsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opPutObject = "PutObject" // PutObjectRequest generates a "aws/request.Request" representing the @@ -403,7 +463,8 @@ func (c *MediaStoreData) PutObjectRequest(input *PutObjectInput) (req *request.R // PutObject API operation for AWS Elemental MediaStore Data Plane. // -// Uploads an object to the specified path. Object sizes are limited to 25 MB. +// Uploads an object to the specified path. Object sizes are limited to 25 MB +// for standard upload availability and 10 MB for streaming upload availability. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -633,8 +694,10 @@ type GetObjectInput struct { Path *string `location:"uri" locationName:"Path" min:"1" type:"string" required:"true"` // The range bytes of an object to retrieve. For more information about the - // Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). AWS Elemental + // MediaStore ignores this header for partially uploaded objects that have streaming + // upload availability. Range *string `location:"header" locationName:"Range" type:"string"` } @@ -949,6 +1012,11 @@ type PutObjectInput struct { // The bytes to be stored. // + // To use an non-seekable io.Reader for this request wrap the io.Reader with + // "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable + // readers. This will allow the SDK to send the reader's payload as chunked + // transfer encoding. + // // Body is a required field Body io.ReadSeeker `type:"blob" required:"true"` @@ -993,6 +1061,16 @@ type PutObjectInput struct { // temporal storage class, and objects are persisted into durable storage shortly // after being received. StorageClass *string `location:"header" locationName:"x-amz-storage-class" min:"1" type:"string" enum:"StorageClass"` + + // Indicates the availability of an object while it is still uploading. If the + // value is set to streaming, the object is available for downloading after + // some initial buffering but before the object is uploaded completely. If the + // value is set to standard, the object is available for downloading only when + // it is uploaded completely. The default value for this header is standard. + // + // To use this header, you must also set the HTTP Transfer-Encoding header to + // chunked. + UploadAvailability *string `location:"header" locationName:"x-amz-upload-availability" min:"1" type:"string" enum:"UploadAvailability"` } // String returns the string representation @@ -1020,6 +1098,9 @@ func (s *PutObjectInput) Validate() error { if s.StorageClass != nil && len(*s.StorageClass) < 1 { invalidParams.Add(request.NewErrParamMinLen("StorageClass", 1)) } + if s.UploadAvailability != nil && len(*s.UploadAvailability) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UploadAvailability", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -1057,6 +1138,12 @@ func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { return s } +// SetUploadAvailability sets the UploadAvailability field's value. +func (s *PutObjectInput) SetUploadAvailability(v string) *PutObjectInput { + s.UploadAvailability = &v + return s +} + type PutObjectOutput struct { _ struct{} `type:"structure"` @@ -1110,3 +1197,11 @@ const ( // StorageClassTemporal is a StorageClass enum value StorageClassTemporal = "TEMPORAL" ) + +const ( + // UploadAvailabilityStandard is a UploadAvailability enum value + UploadAvailabilityStandard = "STANDARD" + + // UploadAvailabilityStreaming is a UploadAvailability enum value + UploadAvailabilityStreaming = "STREAMING" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go index 4203140146d..0fcbc2141b6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaStoreData { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediastore" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaStoreData { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaStoreData { svc := &MediaStoreData{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mq/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mq/api.go index aedf340b292..0b3086c2acb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mq/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mq/api.go @@ -2421,6 +2421,9 @@ type CreateBrokerRequest struct { // The deployment mode of the broker. DeploymentMode *string `locationName:"deploymentMode" type:"string" enum:"DeploymentMode"` + // Encryption options for the broker. + EncryptionOptions *EncryptionOptions `locationName:"encryptionOptions" type:"structure"` + // The type of broker engine. Note: Currently, Amazon MQ supports only ActiveMQ. EngineType *string `locationName:"engineType" type:"string" enum:"EngineType"` @@ -2456,6 +2459,21 @@ func (s CreateBrokerRequest) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBrokerRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBrokerRequest"} + if s.EncryptionOptions != nil { + if err := s.EncryptionOptions.Validate(); err != nil { + invalidParams.AddNested("EncryptionOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *CreateBrokerRequest) SetAutoMinorVersionUpgrade(v bool) *CreateBrokerRequest { s.AutoMinorVersionUpgrade = &v @@ -2486,6 +2504,12 @@ func (s *CreateBrokerRequest) SetDeploymentMode(v string) *CreateBrokerRequest { return s } +// SetEncryptionOptions sets the EncryptionOptions field's value. +func (s *CreateBrokerRequest) SetEncryptionOptions(v *EncryptionOptions) *CreateBrokerRequest { + s.EncryptionOptions = v + return s +} + // SetEngineType sets the EngineType field's value. func (s *CreateBrokerRequest) SetEngineType(v string) *CreateBrokerRequest { s.EngineType = &v @@ -3274,6 +3298,9 @@ type DescribeBrokerResponse struct { // The deployment mode of the broker. DeploymentMode *string `locationName:"deploymentMode" type:"string" enum:"DeploymentMode"` + // Encryption options for the broker. + EncryptionOptions *EncryptionOptions `locationName:"encryptionOptions" type:"structure"` + // The type of broker engine. Note: Currently, Amazon MQ supports only ActiveMQ. EngineType *string `locationName:"engineType" type:"string" enum:"EngineType"` @@ -3291,6 +3318,10 @@ type DescribeBrokerResponse struct { PendingEngineVersion *string `locationName:"pendingEngineVersion" type:"string"` + PendingHostInstanceType *string `locationName:"pendingHostInstanceType" type:"string"` + + PendingSecurityGroups []*string `locationName:"pendingSecurityGroups" type:"list"` + PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"` SecurityGroups []*string `locationName:"securityGroups" type:"list"` @@ -3366,6 +3397,12 @@ func (s *DescribeBrokerResponse) SetDeploymentMode(v string) *DescribeBrokerResp return s } +// SetEncryptionOptions sets the EncryptionOptions field's value. +func (s *DescribeBrokerResponse) SetEncryptionOptions(v *EncryptionOptions) *DescribeBrokerResponse { + s.EncryptionOptions = v + return s +} + // SetEngineType sets the EngineType field's value. func (s *DescribeBrokerResponse) SetEngineType(v string) *DescribeBrokerResponse { s.EngineType = &v @@ -3402,6 +3439,18 @@ func (s *DescribeBrokerResponse) SetPendingEngineVersion(v string) *DescribeBrok return s } +// SetPendingHostInstanceType sets the PendingHostInstanceType field's value. +func (s *DescribeBrokerResponse) SetPendingHostInstanceType(v string) *DescribeBrokerResponse { + s.PendingHostInstanceType = &v + return s +} + +// SetPendingSecurityGroups sets the PendingSecurityGroups field's value. +func (s *DescribeBrokerResponse) SetPendingSecurityGroups(v []*string) *DescribeBrokerResponse { + s.PendingSecurityGroups = v + return s +} + // SetPubliclyAccessible sets the PubliclyAccessible field's value. func (s *DescribeBrokerResponse) SetPubliclyAccessible(v bool) *DescribeBrokerResponse { s.PubliclyAccessible = &v @@ -3769,6 +3818,56 @@ func (s *DescribeUserResponse) SetUsername(v string) *DescribeUserResponse { return s } +// Encryption options for the broker. +type EncryptionOptions struct { + _ struct{} `type:"structure"` + + // The customer master key (CMK) to use for the AWS Key Management Service (KMS). + // This key is used to encrypt your data at rest. If not provided, Amazon MQ + // will use a default CMK to encrypt your data. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // Enables the use of an AWS owned CMK using AWS Key Management Service (KMS). + // + // UseAwsOwnedKey is a required field + UseAwsOwnedKey *bool `locationName:"useAwsOwnedKey" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s EncryptionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EncryptionOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionOptions"} + if s.UseAwsOwnedKey == nil { + invalidParams.Add(request.NewErrParamRequired("UseAwsOwnedKey")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *EncryptionOptions) SetKmsKeyId(v string) *EncryptionOptions { + s.KmsKeyId = &v + return s +} + +// SetUseAwsOwnedKey sets the UseAwsOwnedKey field's value. +func (s *EncryptionOptions) SetUseAwsOwnedKey(v bool) *EncryptionOptions { + s.UseAwsOwnedKey = &v + return s +} + // Id of the engine version. type EngineVersion struct { _ struct{} `type:"structure"` @@ -4456,8 +4555,12 @@ type UpdateBrokerRequest struct { EngineVersion *string `locationName:"engineVersion" type:"string"` + HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The list of information about logs to be enabled for the specified broker. Logs *Logs `locationName:"logs" type:"structure"` + + SecurityGroups []*string `locationName:"securityGroups" type:"list"` } // String returns the string representation @@ -4510,12 +4613,24 @@ func (s *UpdateBrokerRequest) SetEngineVersion(v string) *UpdateBrokerRequest { return s } +// SetHostInstanceType sets the HostInstanceType field's value. +func (s *UpdateBrokerRequest) SetHostInstanceType(v string) *UpdateBrokerRequest { + s.HostInstanceType = &v + return s +} + // SetLogs sets the Logs field's value. func (s *UpdateBrokerRequest) SetLogs(v *Logs) *UpdateBrokerRequest { s.Logs = v return s } +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *UpdateBrokerRequest) SetSecurityGroups(v []*string) *UpdateBrokerRequest { + s.SecurityGroups = v + return s +} + type UpdateBrokerResponse struct { _ struct{} `type:"structure"` @@ -4528,8 +4643,12 @@ type UpdateBrokerResponse struct { EngineVersion *string `locationName:"engineVersion" type:"string"` + HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The list of information about logs to be enabled for the specified broker. Logs *Logs `locationName:"logs" type:"structure"` + + SecurityGroups []*string `locationName:"securityGroups" type:"list"` } // String returns the string representation @@ -4566,12 +4685,24 @@ func (s *UpdateBrokerResponse) SetEngineVersion(v string) *UpdateBrokerResponse return s } +// SetHostInstanceType sets the HostInstanceType field's value. +func (s *UpdateBrokerResponse) SetHostInstanceType(v string) *UpdateBrokerResponse { + s.HostInstanceType = &v + return s +} + // SetLogs sets the Logs field's value. func (s *UpdateBrokerResponse) SetLogs(v *Logs) *UpdateBrokerResponse { s.Logs = v return s } +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *UpdateBrokerResponse) SetSecurityGroups(v []*string) *UpdateBrokerResponse { + s.SecurityGroups = v + return s +} + type UpdateConfigurationRequest struct { _ struct{} `type:"structure"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mq/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mq/service.go index 4baaa385e39..d218f98529e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mq/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/mq/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MQ { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mq" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MQ { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MQ { svc := &MQ{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-27", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/neptune/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/neptune/api.go index ac4e6da854c..4afd98929b6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/neptune/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/neptune/api.go @@ -1733,10 +1733,7 @@ func (c *Neptune) DeleteDBInstanceRequest(input *DeleteDBInstanceInput) (req *re // incompatible-restore, or incompatible-network, you can only delete it when // the SkipFinalSnapshot parameter is set to true. // -// If the specified DB instance is part of a DB cluster, you can't delete the -// DB instance if both of the following conditions are true: -// -// * The DB instance is the only instance in the DB cluster. +// You can't delete a DB instance if it is the only instance in the DB cluster. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2540,7 +2537,7 @@ func (c *Neptune) DescribeDBEngineVersionsWithContext(ctx aws.Context, input *De // // Example iterating over at most 3 pages of a DescribeDBEngineVersions operation. // pageNum := 0 // err := client.DescribeDBEngineVersionsPages(params, -// func(page *DescribeDBEngineVersionsOutput, lastPage bool) bool { +// func(page *neptune.DescribeDBEngineVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2572,10 +2569,12 @@ func (c *Neptune) DescribeDBEngineVersionsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBEngineVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBEngineVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2675,7 +2674,7 @@ func (c *Neptune) DescribeDBInstancesWithContext(ctx aws.Context, input *Describ // // Example iterating over at most 3 pages of a DescribeDBInstances operation. // pageNum := 0 // err := client.DescribeDBInstancesPages(params, -// func(page *DescribeDBInstancesOutput, lastPage bool) bool { +// func(page *neptune.DescribeDBInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2707,10 +2706,12 @@ func (c *Neptune) DescribeDBInstancesPagesWithContext(ctx aws.Context, input *De }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2812,7 +2813,7 @@ func (c *Neptune) DescribeDBParameterGroupsWithContext(ctx aws.Context, input *D // // Example iterating over at most 3 pages of a DescribeDBParameterGroups operation. // pageNum := 0 // err := client.DescribeDBParameterGroupsPages(params, -// func(page *DescribeDBParameterGroupsOutput, lastPage bool) bool { +// func(page *neptune.DescribeDBParameterGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2844,10 +2845,12 @@ func (c *Neptune) DescribeDBParameterGroupsPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBParameterGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBParameterGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2947,7 +2950,7 @@ func (c *Neptune) DescribeDBParametersWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeDBParameters operation. // pageNum := 0 // err := client.DescribeDBParametersPages(params, -// func(page *DescribeDBParametersOutput, lastPage bool) bool { +// func(page *neptune.DescribeDBParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2979,10 +2982,12 @@ func (c *Neptune) DescribeDBParametersPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBParametersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBParametersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3085,7 +3090,7 @@ func (c *Neptune) DescribeDBSubnetGroupsWithContext(ctx aws.Context, input *Desc // // Example iterating over at most 3 pages of a DescribeDBSubnetGroups operation. // pageNum := 0 // err := client.DescribeDBSubnetGroupsPages(params, -// func(page *DescribeDBSubnetGroupsOutput, lastPage bool) bool { +// func(page *neptune.DescribeDBSubnetGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3117,10 +3122,12 @@ func (c *Neptune) DescribeDBSubnetGroupsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBSubnetGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBSubnetGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3291,7 +3298,7 @@ func (c *Neptune) DescribeEngineDefaultParametersWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation. // pageNum := 0 // err := client.DescribeEngineDefaultParametersPages(params, -// func(page *DescribeEngineDefaultParametersOutput, lastPage bool) bool { +// func(page *neptune.DescribeEngineDefaultParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3323,10 +3330,12 @@ func (c *Neptune) DescribeEngineDefaultParametersPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEngineDefaultParametersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEngineDefaultParametersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3505,7 +3514,7 @@ func (c *Neptune) DescribeEventSubscriptionsWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a DescribeEventSubscriptions operation. // pageNum := 0 // err := client.DescribeEventSubscriptionsPages(params, -// func(page *DescribeEventSubscriptionsOutput, lastPage bool) bool { +// func(page *neptune.DescribeEventSubscriptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3537,10 +3546,12 @@ func (c *Neptune) DescribeEventSubscriptionsPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventSubscriptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventSubscriptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3639,7 +3650,7 @@ func (c *Neptune) DescribeEventsWithContext(ctx aws.Context, input *DescribeEven // // Example iterating over at most 3 pages of a DescribeEvents operation. // pageNum := 0 // err := client.DescribeEventsPages(params, -// func(page *DescribeEventsOutput, lastPage bool) bool { +// func(page *neptune.DescribeEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3671,10 +3682,12 @@ func (c *Neptune) DescribeEventsPagesWithContext(ctx aws.Context, input *Describ }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3769,7 +3782,7 @@ func (c *Neptune) DescribeOrderableDBInstanceOptionsWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeOrderableDBInstanceOptions operation. // pageNum := 0 // err := client.DescribeOrderableDBInstanceOptionsPages(params, -// func(page *DescribeOrderableDBInstanceOptionsOutput, lastPage bool) bool { +// func(page *neptune.DescribeOrderableDBInstanceOptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3801,10 +3814,12 @@ func (c *Neptune) DescribeOrderableDBInstanceOptionsPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeOrderableDBInstanceOptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeOrderableDBInstanceOptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5961,7 +5976,7 @@ type AddTagsToResourceInput struct { _ struct{} `type:"structure"` // The Amazon Neptune resource that the tags are added to. This value is an - // Amazon Resource Name (ARN). For information about creating an ARN, see Constructing + // Amazon Resource Name (ARN). For information about creating an ARN, see Constructing // an Amazon Resource Name (ARN) (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). // // ResourceName is a required field @@ -6051,7 +6066,7 @@ type ApplyPendingMaintenanceActionInput struct { OptInType *string `type:"string" required:"true"` // The Amazon Resource Name (ARN) of the resource that the pending maintenance - // action applies to. For information about creating an ARN, see Constructing + // action applies to. For information about creating an ARN, see Constructing // an Amazon Resource Name (ARN) (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). // // ResourceIdentifier is a required field @@ -6187,6 +6202,9 @@ func (s *CharacterSet) SetCharacterSetName(v string) *CharacterSet { // The configuration setting for the log types to be enabled for export to CloudWatch // Logs for a specific DB instance or DB cluster. +// +// The EnableLogTypes and DisableLogTypes arrays determine which logs will be +// exported (or not exported) to CloudWatch Logs. type CloudwatchLogsExportConfiguration struct { _ struct{} `type:"structure"` @@ -6223,7 +6241,7 @@ type CopyDBClusterParameterGroupInput struct { _ struct{} `type:"structure"` // The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter - // group. For information about creating an ARN, see Constructing an Amazon + // group. For information about creating an ARN, see Constructing an Amazon // Resource Name (ARN) (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). // // Constraints: @@ -6504,7 +6522,7 @@ type CopyDBParameterGroupInput struct { _ struct{} `type:"structure"` // The identifier or ARN for the source DB parameter group. For information - // about creating an ARN, see Constructing an Amazon Resource Name (ARN) (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). + // about creating an ARN, see Constructing an Amazon Resource Name (ARN) (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). // // Constraints: // @@ -6678,6 +6696,10 @@ type CreateDBClusterInput struct { // you are creating. DatabaseName *string `type:"string"` + // The list of log types that need to be enabled for exporting to CloudWatch + // Logs. + EnableCloudwatchLogsExports []*string `type:"list"` + // True to enable mapping of AWS Identity and Access Management (IAM) accounts // to database accounts, and otherwise false. // @@ -6757,7 +6779,7 @@ type CreateDBClusterInput struct { // backups are enabled using the BackupRetentionPeriod parameter. // // The default is a 30-minute window selected at random from an 8-hour block - // of time for each AWS Region. To see the time blocks available, see Adjusting + // of time for each AWS Region. To see the time blocks available, see Adjusting // the Preferred Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) // in the Amazon Neptune User Guide. // @@ -6779,7 +6801,7 @@ type CreateDBClusterInput struct { // // The default is a 30-minute window selected at random from an 8-hour block // of time for each AWS Region, occurring on a random day of the week. To see - // the time blocks available, see Adjusting the Preferred Maintenance Window + // the time blocks available, see Adjusting the Preferred Maintenance Window // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) // in the Amazon Neptune User Guide. // @@ -6870,6 +6892,12 @@ func (s *CreateDBClusterInput) SetDatabaseName(v string) *CreateDBClusterInput { return s } +// SetEnableCloudwatchLogsExports sets the EnableCloudwatchLogsExports field's value. +func (s *CreateDBClusterInput) SetEnableCloudwatchLogsExports(v []*string) *CreateDBClusterInput { + s.EnableCloudwatchLogsExports = v + return s +} + // SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value. func (s *CreateDBClusterInput) SetEnableIAMDatabaseAuthentication(v bool) *CreateDBClusterInput { s.EnableIAMDatabaseAuthentication = &v @@ -8229,6 +8257,10 @@ type DBCluster struct { // restore. EarliestRestorableTime *time.Time `type:"timestamp"` + // A list of log types that this DB cluster is configured to export to CloudWatch + // Logs. + EnabledCloudwatchLogsExports []*string `type:"list"` + // Specifies the connection endpoint for the primary instance of the DB cluster. Endpoint *string `type:"string"` @@ -8409,6 +8441,12 @@ func (s *DBCluster) SetEarliestRestorableTime(v time.Time) *DBCluster { return s } +// SetEnabledCloudwatchLogsExports sets the EnabledCloudwatchLogsExports field's value. +func (s *DBCluster) SetEnabledCloudwatchLogsExports(v []*string) *DBCluster { + s.EnabledCloudwatchLogsExports = v + return s +} + // SetEndpoint sets the Endpoint field's value. func (s *DBCluster) SetEndpoint(v string) *DBCluster { s.Endpoint = &v @@ -13176,7 +13214,7 @@ type ListTagsForResourceInput struct { Filters []*Filter `locationNameList:"Filter" type:"list"` // The Amazon Neptune resource with tags to be listed. This value is an Amazon - // Resource Name (ARN). For information about creating an ARN, see Constructing + // Resource Name (ARN). For information about creating an ARN, see Constructing // an Amazon Resource Name (ARN) (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). // // ResourceName is a required field @@ -13280,6 +13318,10 @@ type ModifyDBClusterInput struct { // * Must be a value from 1 to 35 BackupRetentionPeriod *int64 `type:"integer"` + // The configuration setting for the log types to be enabled for export to CloudWatch + // Logs for a specific DB cluster. + CloudwatchLogsExportConfiguration *CloudwatchLogsExportConfiguration `type:"structure"` + // The DB cluster identifier for the cluster being modified. This parameter // is not case-sensitive. // @@ -13414,6 +13456,12 @@ func (s *ModifyDBClusterInput) SetBackupRetentionPeriod(v int64) *ModifyDBCluste return s } +// SetCloudwatchLogsExportConfiguration sets the CloudwatchLogsExportConfiguration field's value. +func (s *ModifyDBClusterInput) SetCloudwatchLogsExportConfiguration(v *CloudwatchLogsExportConfiguration) *ModifyDBClusterInput { + s.CloudwatchLogsExportConfiguration = v + return s +} + // SetDBClusterIdentifier sets the DBClusterIdentifier field's value. func (s *ModifyDBClusterInput) SetDBClusterIdentifier(v string) *ModifyDBClusterInput { s.DBClusterIdentifier = &v @@ -15430,7 +15478,7 @@ type RemoveTagsFromResourceInput struct { // The Amazon Neptune resource that the tags are removed from. This value is // an Amazon Resource Name (ARN). For information about creating an ARN, see - // Constructing an Amazon Resource Name (ARN) (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). + // Constructing an Amazon Resource Name (ARN) (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). // // ResourceName is a required field ResourceName *string `type:"string" required:"true"` @@ -15751,6 +15799,10 @@ type RestoreDBClusterFromSnapshotInput struct { // Not supported. DatabaseName *string `type:"string"` + // The list of logs that the restored DB cluster is to export to Amazon CloudWatch + // Logs. + EnableCloudwatchLogsExports []*string `type:"list"` + // True to enable mapping of AWS Identity and Access Management (IAM) accounts // to database accounts, and otherwise false. // @@ -15877,6 +15929,12 @@ func (s *RestoreDBClusterFromSnapshotInput) SetDatabaseName(v string) *RestoreDB return s } +// SetEnableCloudwatchLogsExports sets the EnableCloudwatchLogsExports field's value. +func (s *RestoreDBClusterFromSnapshotInput) SetEnableCloudwatchLogsExports(v []*string) *RestoreDBClusterFromSnapshotInput { + s.EnableCloudwatchLogsExports = v + return s +} + // SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value. func (s *RestoreDBClusterFromSnapshotInput) SetEnableIAMDatabaseAuthentication(v bool) *RestoreDBClusterFromSnapshotInput { s.EnableIAMDatabaseAuthentication = &v @@ -15986,6 +16044,10 @@ type RestoreDBClusterToPointInTimeInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` + // The list of logs that the restored DB cluster is to export to CloudWatch + // Logs. + EnableCloudwatchLogsExports []*string `type:"list"` + // True to enable mapping of AWS Identity and Access Management (IAM) accounts // to database accounts, and otherwise false. // @@ -16045,8 +16107,17 @@ type RestoreDBClusterToPointInTimeInput struct { // Example: 2015-03-07T23:45:00Z RestoreToTime *time.Time `type:"timestamp"` - // The type of restore to be performed. The only type of restore currently supported - // is full-copy (the default). + // The type of restore to be performed. You can specify one of the following + // values: + // + // * full-copy - The new DB cluster is restored as a full copy of the source + // DB cluster. + // + // * copy-on-write - The new DB cluster is restored as a clone of the source + // DB cluster. + // + // If you don't specify a RestoreType value, then the new DB cluster is restored + // as a full copy of the source DB cluster. RestoreType *string `type:"string"` // The identifier of the source DB cluster from which to restore. @@ -16117,6 +16188,12 @@ func (s *RestoreDBClusterToPointInTimeInput) SetDBSubnetGroupName(v string) *Res return s } +// SetEnableCloudwatchLogsExports sets the EnableCloudwatchLogsExports field's value. +func (s *RestoreDBClusterToPointInTimeInput) SetEnableCloudwatchLogsExports(v []*string) *RestoreDBClusterToPointInTimeInput { + s.EnableCloudwatchLogsExports = v + return s +} + // SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value. func (s *RestoreDBClusterToPointInTimeInput) SetEnableIAMDatabaseAuthentication(v bool) *RestoreDBClusterToPointInTimeInput { s.EnableIAMDatabaseAuthentication = &v diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go index 3ddc5e5fba7..d5bb045c0c5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Neptune { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "rds" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Neptune { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Neptune { svc := &Neptune{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-31", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go index e2ea62709ee..b16906833d2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go @@ -69,7 +69,7 @@ func (c *OpsWorks) AssignInstanceRequest(input *AssignInstanceInput) (req *reque // Required Permissions: To use this action, an AWS Identity and Access Management // (IAM) user must have a Manage permissions level for the stack or an attached // policy that explicitly grants permissions. For more information on user permissions, -// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -156,12 +156,12 @@ func (c *OpsWorks) AssignVolumeRequest(input *AssignVolumeInput) (req *request.R // The volume must first be registered with the stack by calling RegisterVolume. // After you register the volume, you must call UpdateVolume to specify a mount // point before calling AssignVolume. For more information, see Resource Management -// (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -246,12 +246,12 @@ func (c *OpsWorks) AssociateElasticIpRequest(input *AssociateElasticIpInput) (re // // Associates one of the stack's registered Elastic IP addresses with a specified // instance. The address must first be registered with the stack by calling -// RegisterElasticIp. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// RegisterElasticIp. For more information, see Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -337,16 +337,16 @@ func (c *OpsWorks) AttachElasticLoadBalancerRequest(input *AttachElasticLoadBala // Attaches an Elastic Load Balancing load balancer to a specified layer. AWS // OpsWorks Stacks does not support Application Load Balancer. You can only // use Classic Load Balancer with AWS OpsWorks Stacks. For more information, -// see Elastic Load Balancing (http://docs.aws.amazon.com/opsworks/latest/userguide/layers-elb.html). +// see Elastic Load Balancing (https://docs.aws.amazon.com/opsworks/latest/userguide/layers-elb.html). // // You must create the Elastic Load Balancing instance separately, by using // the Elastic Load Balancing console, API, or CLI. For more information, see -// Elastic Load Balancing Developer Guide (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/Welcome.html). +// Elastic Load Balancing Developer Guide (https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/Welcome.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -429,12 +429,12 @@ func (c *OpsWorks) CloneStackRequest(input *CloneStackInput) (req *request.Reque // CloneStack API operation for AWS OpsWorks. // // Creates a clone of a specified stack. For more information, see Clone a Stack -// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-cloning.html). +// (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-cloning.html). // By default, all parameters are set to the values used by the parent stack. // // Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information about user -// permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// permissions, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -517,12 +517,12 @@ func (c *OpsWorks) CreateAppRequest(input *CreateAppInput) (req *request.Request // CreateApp API operation for AWS OpsWorks. // // Creates an app for a specified stack. For more information, see Creating -// Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html). +// Apps (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -605,13 +605,13 @@ func (c *OpsWorks) CreateDeploymentRequest(input *CreateDeploymentInput) (req *r // CreateDeployment API operation for AWS OpsWorks. // // Runs deployment or stack commands. For more information, see Deploying Apps -// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-deploying.html) -// and Run Stack Commands (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-commands.html). +// (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-deploying.html) +// and Run Stack Commands (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-commands.html). // // Required Permissions: To use this action, an IAM user must have a Deploy // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information on user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -694,12 +694,12 @@ func (c *OpsWorks) CreateInstanceRequest(input *CreateInstanceInput) (req *reque // CreateInstance API operation for AWS OpsWorks. // // Creates an instance in a specified stack. For more information, see Adding -// an Instance to a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). +// an Instance to a Layer (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -781,7 +781,7 @@ func (c *OpsWorks) CreateLayerRequest(input *CreateLayerInput) (req *request.Req // CreateLayer API operation for AWS OpsWorks. // -// Creates a layer. For more information, see How to Create a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-create.html). +// Creates a layer. For more information, see How to Create a Layer (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-create.html). // // You should use CreateLayer for noncustom layer types such as PHP App Server // only if the stack does not have an existing layer of that type. A stack can @@ -793,7 +793,7 @@ func (c *OpsWorks) CreateLayerRequest(input *CreateLayerInput) (req *request.Req // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -875,11 +875,11 @@ func (c *OpsWorks) CreateStackRequest(input *CreateStackInput) (req *request.Req // CreateStack API operation for AWS OpsWorks. // -// Creates a new stack. For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-edit.html). +// Creates a new stack. For more information, see Create a New Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-edit.html). // // Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information about user -// permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// permissions, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -962,7 +962,7 @@ func (c *OpsWorks) CreateUserProfileRequest(input *CreateUserProfileInput) (req // // Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information about user -// permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// permissions, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1047,7 +1047,7 @@ func (c *OpsWorks) DeleteAppRequest(input *DeleteAppInput) (req *request.Request // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1133,12 +1133,12 @@ func (c *OpsWorks) DeleteInstanceRequest(input *DeleteInstanceInput) (req *reque // Deletes a specified instance, which terminates the associated Amazon EC2 // instance. You must stop an instance before you can delete it. // -// For more information, see Deleting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-delete.html). +// For more information, see Deleting Instances (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-delete.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1223,12 +1223,12 @@ func (c *OpsWorks) DeleteLayerRequest(input *DeleteLayerInput) (req *request.Req // // Deletes a specified layer. You must first stop and then delete all associated // instances or unassign registered instances. For more information, see How -// to Delete a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-delete.html). +// to Delete a Layer (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-delete.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1313,12 +1313,12 @@ func (c *OpsWorks) DeleteStackRequest(input *DeleteStackInput) (req *request.Req // // Deletes a specified stack. You must first delete all instances, layers, and // apps or deregister registered instances. For more information, see Shut Down -// a Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-shutting.html). +// a Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-shutting.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1405,7 +1405,7 @@ func (c *OpsWorks) DeleteUserProfileRequest(input *DeleteUserProfileInput) (req // // Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information about user -// permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// permissions, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1489,12 +1489,12 @@ func (c *OpsWorks) DeregisterEcsClusterRequest(input *DeregisterEcsClusterInput) // DeregisterEcsCluster API operation for AWS OpsWorks. // // Deregisters a specified Amazon ECS cluster from a stack. For more information, -// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html#workinglayers-ecscluster-delete). +// see Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html#workinglayers-ecscluster-delete). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants -// permissions. For more information on user permissions, see http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html -// (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// permissions. For more information on user permissions, see https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html +// (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1578,12 +1578,12 @@ func (c *OpsWorks) DeregisterElasticIpRequest(input *DeregisterElasticIpInput) ( // DeregisterElasticIp API operation for AWS OpsWorks. // // Deregisters a specified Elastic IP address. The address can then be registered -// by another stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// by another stack. For more information, see Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1673,7 +1673,7 @@ func (c *OpsWorks) DeregisterInstanceRequest(input *DeregisterInstanceInput) (re // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1761,7 +1761,7 @@ func (c *OpsWorks) DeregisterRdsDbInstanceRequest(input *DeregisterRdsDbInstance // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1845,12 +1845,12 @@ func (c *OpsWorks) DeregisterVolumeRequest(input *DeregisterVolumeInput) (req *r // DeregisterVolume API operation for AWS OpsWorks. // // Deregisters an Amazon EBS volume. The volume can then be registered by another -// stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// stack. For more information, see Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2023,7 +2023,7 @@ func (c *OpsWorks) DescribeAppsRequest(input *DescribeAppsInput) (req *request.R // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2112,7 +2112,7 @@ func (c *OpsWorks) DescribeCommandsRequest(input *DescribeCommandsInput) (req *r // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2201,7 +2201,7 @@ func (c *OpsWorks) DescribeDeploymentsRequest(input *DescribeDeploymentsInput) ( // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2297,7 +2297,7 @@ func (c *OpsWorks) DescribeEcsClustersRequest(input *DescribeEcsClustersInput) ( // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack or an attached policy that explicitly // grants permission. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // This call accepts only one resource-identifying parameter. // @@ -2348,7 +2348,7 @@ func (c *OpsWorks) DescribeEcsClustersWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeEcsClusters operation. // pageNum := 0 // err := client.DescribeEcsClustersPages(params, -// func(page *DescribeEcsClustersOutput, lastPage bool) bool { +// func(page *opsworks.DescribeEcsClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2380,10 +2380,12 @@ func (c *OpsWorks) DescribeEcsClustersPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEcsClustersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEcsClustersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2431,14 +2433,14 @@ func (c *OpsWorks) DescribeElasticIpsRequest(input *DescribeElasticIpsInput) (re // DescribeElasticIps API operation for AWS OpsWorks. // -// Describes Elastic IP addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). +// Describes Elastic IP addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). // // This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2527,7 +2529,7 @@ func (c *OpsWorks) DescribeElasticLoadBalancersRequest(input *DescribeElasticLoa // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2616,7 +2618,7 @@ func (c *OpsWorks) DescribeInstancesRequest(input *DescribeInstancesInput) (req // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2705,7 +2707,7 @@ func (c *OpsWorks) DescribeLayersRequest(input *DescribeLayersInput) (req *reque // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2794,7 +2796,7 @@ func (c *OpsWorks) DescribeLoadBasedAutoScalingRequest(input *DescribeLoadBasedA // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2880,7 +2882,7 @@ func (c *OpsWorks) DescribeMyUserProfileRequest(input *DescribeMyUserProfileInpu // // Required Permissions: To use this action, an IAM user must have self-management // enabled or an attached policy that explicitly grants permissions. For more -// information about user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// information about user permissions, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3033,7 +3035,7 @@ func (c *OpsWorks) DescribePermissionsRequest(input *DescribePermissionsInput) ( // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3122,7 +3124,7 @@ func (c *OpsWorks) DescribeRaidArraysRequest(input *DescribeRaidArraysInput) (re // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3209,7 +3211,7 @@ func (c *OpsWorks) DescribeRdsDbInstancesRequest(input *DescribeRdsDbInstancesIn // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // This call accepts only one resource-identifying parameter. // @@ -3298,7 +3300,7 @@ func (c *OpsWorks) DescribeServiceErrorsRequest(input *DescribeServiceErrorsInpu // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // This call accepts only one resource-identifying parameter. // @@ -3387,7 +3389,7 @@ func (c *OpsWorks) DescribeStackProvisioningParametersRequest(input *DescribeSta // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3475,7 +3477,7 @@ func (c *OpsWorks) DescribeStackSummaryRequest(input *DescribeStackSummaryInput) // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3562,7 +3564,7 @@ func (c *OpsWorks) DescribeStacksRequest(input *DescribeStacksInput) (req *reque // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3651,7 +3653,7 @@ func (c *OpsWorks) DescribeTimeBasedAutoScalingRequest(input *DescribeTimeBasedA // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3737,7 +3739,7 @@ func (c *OpsWorks) DescribeUserProfilesRequest(input *DescribeUserProfilesInput) // // Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information about user -// permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// permissions, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3826,7 +3828,7 @@ func (c *OpsWorks) DescribeVolumesRequest(input *DescribeVolumesInput) (req *req // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information about user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3914,7 +3916,7 @@ func (c *OpsWorks) DetachElasticLoadBalancerRequest(input *DetachElasticLoadBala // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3996,12 +3998,12 @@ func (c *OpsWorks) DisassociateElasticIpRequest(input *DisassociateElasticIpInpu // // Disassociates an Elastic IP address from its instance. The address remains // registered with the stack. For more information, see Resource Management -// (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4089,7 +4091,7 @@ func (c *OpsWorks) GetHostnameSuggestionRequest(input *GetHostnameSuggestionInpu // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4171,6 +4173,7 @@ func (c *OpsWorks) GrantAccessRequest(input *GrantAccessInput) (req *request.Req // GrantAccess API operation for AWS OpsWorks. // +// // This action can be used only with Windows stacks. // // Grants RDP access to a Windows instance for a specified time period. @@ -4339,12 +4342,12 @@ func (c *OpsWorks) RebootInstanceRequest(input *RebootInstanceInput) (req *reque // RebootInstance API operation for AWS OpsWorks. // // Reboots a specified instance. For more information, see Starting, Stopping, -// and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// and Rebooting Instances (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4428,12 +4431,12 @@ func (c *OpsWorks) RegisterEcsClusterRequest(input *RegisterEcsClusterInput) (re // // Registers a specified Amazon ECS cluster with a stack. You can register only // one cluster with a stack. A cluster can be registered with only one stack. -// For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html). +// For more information, see Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants -// permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// permissions. For more information on user permissions, see Managing User +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4518,12 +4521,12 @@ func (c *OpsWorks) RegisterElasticIpRequest(input *RegisterElasticIpInput) (req // Registers an Elastic IP address with a specified stack. An address can be // registered with only one stack at a time. If the address is already registered, // you must first deregister it by calling DeregisterElasticIp. For more information, -// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// see Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4613,18 +4616,18 @@ func (c *OpsWorks) RegisterInstanceRequest(input *RegisterInstanceInput) (req *r // agent on the instance, and registering the instance with the stack. RegisterInstance // handles only the second step. You should instead use the AWS CLI register // command, which performs the entire registration operation. For more information, -// see Registering an Instance with an AWS OpsWorks Stacks Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html). +// see Registering an Instance with an AWS OpsWorks Stacks Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html). // // Registered instances have the same requirements as instances that are created // by using the CreateInstance API. For example, registered instances must be // running a supported Linux-based operating system, and they must have a supported // instance type. For more information about requirements for instances that -// you want to register, see Preparing the Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register-registering-preparer.html). +// you want to register, see Preparing the Instance (https://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register-registering-preparer.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4712,7 +4715,7 @@ func (c *OpsWorks) RegisterRdsDbInstanceRequest(input *RegisterRdsDbInstanceInpu // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4797,12 +4800,12 @@ func (c *OpsWorks) RegisterVolumeRequest(input *RegisterVolumeInput) (req *reque // Registers an Amazon EBS volume with a specified stack. A volume can be registered // with only one stack at a time. If the volume is already registered, you must // first deregister it by calling DeregisterVolume. For more information, see -// Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4887,7 +4890,7 @@ func (c *OpsWorks) SetLoadBasedAutoScalingRequest(input *SetLoadBasedAutoScaling // // Specify the load-based auto scaling configuration for a specified layer. // For more information, see Managing Load with Time-based and Load-based Instances -// (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). +// (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). // // To use load-based auto scaling, you must create a set of load-based auto // scaling instances. Load-based auto scaling operates only on the instances @@ -4897,7 +4900,7 @@ func (c *OpsWorks) SetLoadBasedAutoScalingRequest(input *SetLoadBasedAutoScaling // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4981,12 +4984,12 @@ func (c *OpsWorks) SetPermissionRequest(input *SetPermissionInput) (req *request // SetPermission API operation for AWS OpsWorks. // // Specifies a user's permissions. For more information, see Security and Permissions -// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingsecurity.html). +// (https://docs.aws.amazon.com/opsworks/latest/userguide/workingsecurity.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5071,12 +5074,12 @@ func (c *OpsWorks) SetTimeBasedAutoScalingRequest(input *SetTimeBasedAutoScaling // // Specify the time-based auto scaling configuration for a specified instance. // For more information, see Managing Load with Time-based and Load-based Instances -// (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). +// (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5160,12 +5163,12 @@ func (c *OpsWorks) StartInstanceRequest(input *StartInstanceInput) (req *request // StartInstance API operation for AWS OpsWorks. // // Starts a specified instance. For more information, see Starting, Stopping, -// and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// and Rebooting Instances (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5253,7 +5256,7 @@ func (c *OpsWorks) StartStackRequest(input *StartStackInput) (req *request.Reque // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5339,12 +5342,12 @@ func (c *OpsWorks) StopInstanceRequest(input *StopInstanceInput) (req *request.R // Stops a specified instance. When you stop a standard instance, the data disappears // and must be reinstalled when you restart the instance. You can stop an Amazon // EBS-backed instance without losing data. For more information, see Starting, -// Stopping, and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// Stopping, and Rebooting Instances (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5432,7 +5435,7 @@ func (c *OpsWorks) StopStackRequest(input *StopStackInput) (req *request.Request // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5516,7 +5519,7 @@ func (c *OpsWorks) TagResourceRequest(input *TagResourceInput) (req *request.Req // TagResource API operation for AWS OpsWorks. // // Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks -// Stacks. For more information about how tagging works, see Tags (http://docs.aws.amazon.com/opsworks/latest/userguide/tagging.html) +// Stacks. For more information about how tagging works, see Tags (https://docs.aws.amazon.com/opsworks/latest/userguide/tagging.html) // in the AWS OpsWorks User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5608,7 +5611,7 @@ func (c *OpsWorks) UnassignInstanceRequest(input *UnassignInstanceInput) (req *r // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants // permissions. For more information about user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5692,12 +5695,12 @@ func (c *OpsWorks) UnassignVolumeRequest(input *UnassignVolumeInput) (req *reque // UnassignVolume API operation for AWS OpsWorks. // // Unassigns an assigned Amazon EBS volume. The volume remains registered with -// the stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// the stack. For more information, see Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5868,7 +5871,7 @@ func (c *OpsWorks) UpdateAppRequest(input *UpdateAppInput) (req *request.Request // Required Permissions: To use this action, an IAM user must have a Deploy // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information on user permissions, see Managing -// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5952,12 +5955,12 @@ func (c *OpsWorks) UpdateElasticIpRequest(input *UpdateElasticIpInput) (req *req // UpdateElasticIp API operation for AWS OpsWorks. // // Updates a registered Elastic IP address's name. For more information, see -// Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6045,7 +6048,7 @@ func (c *OpsWorks) UpdateInstanceRequest(input *UpdateInstanceInput) (req *reque // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6133,7 +6136,7 @@ func (c *OpsWorks) UpdateLayerRequest(input *UpdateLayerInput) (req *request.Req // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6220,7 +6223,7 @@ func (c *OpsWorks) UpdateMyUserProfileRequest(input *UpdateMyUserProfileInput) ( // // Required Permissions: To use this action, an IAM user must have self-management // enabled or an attached policy that explicitly grants permissions. For more -// information about user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// information about user permissions, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6305,7 +6308,7 @@ func (c *OpsWorks) UpdateRdsDbInstanceRequest(input *UpdateRdsDbInstanceInput) ( // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6393,7 +6396,7 @@ func (c *OpsWorks) UpdateStackRequest(input *UpdateStackInput) (req *request.Req // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6480,7 +6483,7 @@ func (c *OpsWorks) UpdateUserProfileRequest(input *UpdateUserProfileInput) (req // // Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information about user -// permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// permissions, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6564,12 +6567,12 @@ func (c *OpsWorks) UpdateVolumeRequest(input *UpdateVolumeInput) (req *request.R // UpdateVolume API operation for AWS OpsWorks. // // Updates an Amazon EBS volume's name or mount point. For more information, -// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// see Resource Management (https://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User -// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6672,14 +6675,14 @@ type App struct { // An array of EnvironmentVariable objects that specify environment variables // to be associated with the app. After you deploy the app, these variables // are defined on the associated app server instances. For more information, - // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // see Environment Variables (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). // // There is no specific limit on the number of environment variables. However, // the size of the associated data structure - which includes the variable names, - // values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This - // limit should accommodate most if not all use cases, but if you do exceed - // it, you will cause an exception (API) with an "Environment: is too large - // (maximum is 10KB)" message. + // values, and protected flag values - cannot exceed 20 KB. This limit should + // accommodate most if not all use cases, but if you do exceed it, you will + // cause an exception (API) with an "Environment: is too large (maximum is 20 + // KB)" message. Environment []*EnvironmentVariable `type:"list"` // The app name. @@ -7060,7 +7063,7 @@ type AutoScalingThresholds struct { // To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. // You can either have AWS OpsWorks Stacks update the role for you when you // first use this feature or you can edit the role manually. For more information, - // see Allowing AWS OpsWorks Stacks to Act on Your Behalf (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-servicerole.html). + // see Allowing AWS OpsWorks Stacks to Act on Your Behalf (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-servicerole.html). Alarms []*string `type:"list"` // The CPU utilization threshold, as a percent of the available CPU. A value @@ -7162,7 +7165,7 @@ func (s *AutoScalingThresholds) SetThresholdsWaitTime(v int64) *AutoScalingThres } // Describes a block device mapping. This data type maps directly to the Amazon -// EC2 BlockDeviceMapping (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) +// EC2 BlockDeviceMapping (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) // data type. type BlockDeviceMapping struct { _ struct{} `type:"structure"` @@ -7179,7 +7182,7 @@ type BlockDeviceMapping struct { // Suppresses the specified device included in the AMI's block device mapping. NoDevice *string `type:"string"` - // The virtual device name. For more information, see BlockDeviceMapping (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). + // The virtual device name. For more information, see BlockDeviceMapping (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). VirtualName *string `type:"string"` } @@ -7279,7 +7282,7 @@ type CloneStackInput struct { // A ChefConfiguration object that specifies whether to enable Berkshelf and // the Berkshelf version on Chef 11.10 stacks. For more information, see Create - // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + // a New Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). ChefConfiguration *ChefConfiguration `type:"structure"` // A list of source stack app IDs to be included in the cloned stack. @@ -7295,8 +7298,8 @@ type CloneStackInput struct { ConfigurationManager *StackConfigurationManager `type:"structure"` // Contains the information required to retrieve an app or cookbook from a repository. - // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) - // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + // For more information, see Adding Apps (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Cookbooks and Recipes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). CustomCookbooksSource *Source `type:"structure"` // A string that contains user-defined, custom JSON. It is used to override @@ -7306,25 +7309,26 @@ type CloneStackInput struct { // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" // // For more information about custom JSON, see Use Custom JSON to Modify the - // Stack Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html) + // Stack Configuration Attributes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html) CustomJson *string `type:"string"` // The cloned stack's default Availability Zone, which must be in the specified - // region. For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // region. For more information, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html). // If you also specify a value for DefaultSubnetId, the subnet must be in the // same zone. For more information, see the VpcId parameter description. DefaultAvailabilityZone *string `type:"string"` // The Amazon Resource Name (ARN) of an IAM profile that is the default profile // for all of the stack's EC2 instances. For more information about IAM ARNs, - // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // see Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). DefaultInstanceProfileArn *string `type:"string"` // The stack's operating system, which must be set to one of the following. // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon - // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon + // Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux + // 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. @@ -7339,11 +7343,11 @@ type CloneStackInput struct { // // * A custom AMI: Custom. You specify the custom AMI you want to use when // you create instances. For more information about how to use custom AMIs - // with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // with OpsWorks, see Using Custom AMIs (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // The default option is the parent stack's operating system. For more information // about supported operating systems, see AWS OpsWorks Stacks Operating Systems - // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // // You can specify a different Linux operating system for the cloned stack, // but you cannot change from Linux to Windows or Windows to Linux. @@ -7351,16 +7355,16 @@ type CloneStackInput struct { // The default root device type. This value is used by default for all instances // in the cloned stack, but you can override it when you create an instance. - // For more information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + // For more information, see Storage for the Root Device (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` // A default Amazon EC2 key pair name. The default value is none. If you specify // a key pair name, AWS OpsWorks installs the public key on the instance and // you can use the private key with an SSH client to log in to the instance. - // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) - // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // For more information, see Using SSH to Communicate with an Instance (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (https://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). // You can override this setting by specifying a different key pair, or no key - // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + // pair, when you create an instance (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). DefaultSshKeyName *string `type:"string"` // The stack's default VPC subnet ID. This parameter is required if you specify @@ -7384,7 +7388,7 @@ type CloneStackInput struct { // // * Fruits // - // * Greek_Deities + // * Greek_Deities_and_Titans // // * Legendary_creatures_from_Japan // @@ -7406,7 +7410,7 @@ type CloneStackInput struct { Name *string `type:"string"` // The cloned stack AWS region, such as "ap-northeast-2". For more information - // about AWS regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // about AWS regions, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html). Region *string `type:"string"` // The stack AWS Identity and Access Management (IAM) role, which allows AWS @@ -7415,7 +7419,7 @@ type CloneStackInput struct { // you create a stack by using the AWS OpsWorks Stacks console, it creates the // role for you. You can obtain an existing stack's IAM ARN programmatically // by calling DescribePermissions. For more information about IAM ARNs, see - // Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). // // You must set this parameter to a valid service role ARN or the action will // fail; there is no default value. You can specify the source stack's service @@ -7452,7 +7456,7 @@ type CloneStackInput struct { // security group with a layer on creation; custom security groups are required // only for those layers that need custom settings. // - // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + // For more information, see Create a New Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). UseOpsworksSecurityGroups *bool `type:"boolean"` // The ID of the VPC that the cloned stack is to be launched into. It must be @@ -7478,9 +7482,9 @@ type CloneStackInput struct { // * You must specify a value for DefaultSubnetId. // // For more information about how to use AWS OpsWorks Stacks with a VPC, see - // Running a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // Running a Stack in a VPC (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). // For more information about default VPC and EC2 Classic, see Supported Platforms - // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). VpcId *string `type:"string"` } @@ -7701,7 +7705,7 @@ func (s *CloudWatchLogsConfiguration) SetLogStreams(v []*CloudWatchLogsLogStream // Describes the Amazon CloudWatch logs configuration for a layer. For detailed // information about members of this data type, see the CloudWatch Logs Agent -// Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). +// Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). type CloudWatchLogsLogStream struct { _ struct{} `type:"structure"` @@ -7719,7 +7723,7 @@ type CloudWatchLogsLogStream struct { BufferDuration *int64 `type:"integer"` // Specifies how the time stamp is extracted from logs. For more information, - // see the CloudWatch Logs Agent Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). + // see the CloudWatch Logs Agent Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). DatetimeFormat *string `type:"string"` // Specifies the encoding of the log file so that the file can be read correctly. @@ -8005,17 +8009,16 @@ type CreateAppInput struct { // An array of EnvironmentVariable objects that specify environment variables // to be associated with the app. After you deploy the app, these variables // are defined on the associated app server instance. For more information, - // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // see Environment Variables (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). // // There is no specific limit on the number of environment variables. However, // the size of the associated data structure - which includes the variables' - // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). - // This limit should accommodate most if not all use cases. Exceeding it will - // cause an exception with the message, "Environment: is too large (maximum - // is 10KB)." + // names, values, and protected flag values - cannot exceed 20 KB. This limit + // should accommodate most if not all use cases. Exceeding it will cause an + // exception with the message, "Environment: is too large (maximum is 20KB)." // - // This parameter is supported only by Chef 11.10 stacks. If you have specified - // one or more environment variables, you cannot modify the stack's Chef version. + // If you have specified one or more environment variables, you cannot modify + // the stack's Chef version. Environment []*EnvironmentVariable `type:"list"` // The app name. @@ -8200,14 +8203,15 @@ type CreateDeploymentInput struct { // A user-defined comment. Comment *string `type:"string"` - // A string that contains user-defined, custom JSON. It is used to override - // the corresponding default stack configuration JSON values. The string should - // be in the following format: + // A string that contains user-defined, custom JSON. You can use this parameter + // to override some corresponding default stack configuration JSON values. The + // string should be in the following format: // // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" // // For more information about custom JSON, see Use Custom JSON to Modify the - // Stack Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + // Stack Configuration Attributes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html) + // and Overriding Attributes With Custom JSON (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). CustomJson *string `type:"string"` // The instance IDs for the deployment targets. @@ -8340,7 +8344,7 @@ type CreateInstanceInput struct { // A custom AMI ID to be used to create the instance. The AMI should be based // on one of the supported operating systems. For more information, see Using - // Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // Custom AMIs (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // If you specify a custom AMI, you must set Os to Custom. AmiId *string `type:"string"` @@ -8348,7 +8352,7 @@ type CreateInstanceInput struct { // The instance architecture. The default option is x86_64. Instance types do // not necessarily support both architectures. For a list of the architectures // that are supported by the different instance types, see Instance Families - // and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // and Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). Architecture *string `type:"string" enum:"Architecture"` // For load-based or time-based instances, the type. Windows stacks can use @@ -8356,11 +8360,11 @@ type CreateInstanceInput struct { AutoScalingType *string `type:"string" enum:"AutoScalingType"` // The instance Availability Zone. For more information, see Regions and Endpoints - // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // (https://docs.aws.amazon.com/general/latest/gr/rande.html). AvailabilityZone *string `type:"string"` // An array of BlockDeviceMapping objects that specify the instance's block - // devices. For more information, see Block Device Mapping (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html). + // devices. For more information, see Block Device Mapping (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html). // Note that block device mappings are not supported for custom AMIs. BlockDeviceMappings []*BlockDeviceMapping `type:"list"` @@ -8383,7 +8387,7 @@ type CreateInstanceInput struct { // The instance type, such as t2.micro. For a list of supported instance types, // open the stack in the console, choose Instances, and choose + Instance. The // Size list contains the currently supported types. For more information, see - // Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // Instance Families and Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // The parameter values that you use to specify the various types are in the // API Name column of the Available Instance Types table. // @@ -8398,8 +8402,9 @@ type CreateInstanceInput struct { // The instance's operating system, which must be set to one of the following. // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon - // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon + // Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux + // 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. @@ -8416,19 +8421,19 @@ type CreateInstanceInput struct { // * A custom AMI: Custom. // // For more information about the supported operating systems, see AWS OpsWorks - // Stacks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // Stacks Operating Systems (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // // The default option is the current Amazon Linux version. If you set this parameter // to Custom, you must use the CreateInstance action's AmiId parameter to specify // the custom AMI that you want to use. Block device mappings are not supported // if the value is Custom. For more information about supported operating systems, - // see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html)For + // see Operating Systems (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html)For // more information about how to use custom AMIs with AWS OpsWorks Stacks, see - // Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // Using Custom AMIs (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). Os *string `type:"string"` // The instance root device type. For more information, see Storage for the - // Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + // Root Device (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). RootDeviceType *string `type:"string" enum:"RootDeviceType"` // The instance's Amazon EC2 key-pair name. @@ -8452,7 +8457,7 @@ type CreateInstanceInput struct { // For more information about dedicated hosts, see Dedicated Hosts Overview // (http://aws.amazon.com/ec2/dedicated-hosts/) and Amazon EC2 Dedicated Hosts // (http://aws.amazon.com/ec2/dedicated-hosts/). For more information about - // dedicated instances, see Dedicated Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/dedicated-instance.html) + // dedicated instances, see Dedicated Instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/dedicated-instance.html) // and Amazon EC2 Dedicated Instances (http://aws.amazon.com/ec2/purchasing-options/dedicated-instances/). Tenancy *string `type:"string"` @@ -8630,13 +8635,13 @@ type CreateLayerInput struct { // ARN. Attributes map[string]*string `type:"map"` - // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) - // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + // Whether to automatically assign an Elastic IP address (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). AutoAssignElasticIps *bool `type:"boolean"` // For stacks that are running in a VPC, whether to automatically assign a public // IP address to the layer's instances. For more information, see How to Edit - // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + // a Layer (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). AutoAssignPublicIps *bool `type:"boolean"` // Specifies CloudWatch Logs configuration options for the layer. For more information, @@ -8644,12 +8649,12 @@ type CreateLayerInput struct { CloudWatchLogsConfiguration *CloudWatchLogsConfiguration `type:"structure"` // The ARN of an IAM profile to be used for the layer's EC2 instances. For more - // information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // information about IAM ARNs, see Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). CustomInstanceProfileArn *string `type:"string"` // A JSON-formatted string containing custom stack configuration and deployment // attributes to be installed on the layer's instances. For more information, - // see Using Custom JSON (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). + // see Using Custom JSON (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). // This feature is supported as of version 1.7.42 of the AWS CLI. CustomJson *string `type:"string"` @@ -8692,7 +8697,7 @@ type CreateLayerInput struct { // to the alphanumeric characters, '-', '_', and '.'. // // The built-in layers' short names are defined by AWS OpsWorks Stacks. For - // more information, see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html). + // more information, see the Layer Reference (https://docs.aws.amazon.com/opsworks/latest/userguide/layers.html). // // Shortname is a required field Shortname *string `type:"string" required:"true"` @@ -8918,7 +8923,7 @@ type CreateStackInput struct { // A ChefConfiguration object that specifies whether to enable Berkshelf and // the Berkshelf version on Chef 11.10 stacks. For more information, see Create - // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + // a New Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). ChefConfiguration *ChefConfiguration `type:"structure"` // The configuration manager. When you create a stack we recommend that you @@ -8928,8 +8933,8 @@ type CreateStackInput struct { ConfigurationManager *StackConfigurationManager `type:"structure"` // Contains the information required to retrieve an app or cookbook from a repository. - // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) - // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + // For more information, see Adding Apps (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Cookbooks and Recipes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). CustomCookbooksSource *Source `type:"structure"` // A string that contains user-defined, custom JSON. It can be used to override @@ -8939,18 +8944,18 @@ type CreateStackInput struct { // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" // // For more information about custom JSON, see Use Custom JSON to Modify the - // Stack Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + // Stack Configuration Attributes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). CustomJson *string `type:"string"` // The stack's default Availability Zone, which must be in the specified region. - // For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // For more information, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html). // If you also specify a value for DefaultSubnetId, the subnet must be in the // same zone. For more information, see the VpcId parameter description. DefaultAvailabilityZone *string `type:"string"` // The Amazon Resource Name (ARN) of an IAM profile that is the default profile // for all of the stack's EC2 instances. For more information about IAM ARNs, - // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // see Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). // // DefaultInstanceProfileArn is a required field DefaultInstanceProfileArn *string `type:"string" required:"true"` @@ -8960,8 +8965,9 @@ type CreateStackInput struct { // You can specify one of the following. // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon - // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon + // Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux + // 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. @@ -8976,26 +8982,26 @@ type CreateStackInput struct { // Windows Server 2012 R2 with SQL Server Web. // // * A custom AMI: Custom. You specify the custom AMI you want to use when - // you create instances. For more information, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // you create instances. For more information, see Using Custom AMIs (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // The default option is the current Amazon Linux version. For more information // about supported operating systems, see AWS OpsWorks Stacks Operating Systems - // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). DefaultOs *string `type:"string"` // The default root device type. This value is the default for all instances // in the stack, but you can override it when you create an instance. The default // option is instance-store. For more information, see Storage for the Root - // Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + // Device (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` // A default Amazon EC2 key pair name. The default value is none. If you specify // a key pair name, AWS OpsWorks installs the public key on the instance and // you can use the private key with an SSH client to log in to the instance. - // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) - // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // For more information, see Using SSH to Communicate with an Instance (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (https://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). // You can override this setting by specifying a different key pair, or no key - // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + // pair, when you create an instance (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). DefaultSshKeyName *string `type:"string"` // The stack's default VPC subnet ID. This parameter is required if you specify @@ -9019,7 +9025,7 @@ type CreateStackInput struct { // // * Fruits // - // * Greek_Deities + // * Greek_Deities_and_Titans // // * Legendary_creatures_from_Japan // @@ -9043,7 +9049,7 @@ type CreateStackInput struct { Name *string `type:"string" required:"true"` // The stack's AWS region, such as ap-south-1. For more information about Amazon - // regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // regions, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html). // // In the AWS CLI, this API maps to the --stack-region parameter. If the --stack-region // parameter and the AWS CLI common parameter --region are set to the same value, @@ -9067,7 +9073,7 @@ type CreateStackInput struct { // The stack's AWS Identity and Access Management (IAM) role, which allows AWS // OpsWorks Stacks to work with AWS resources on your behalf. You must set this // parameter to the Amazon Resource Name (ARN) for an existing IAM role. For - // more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // more information about IAM ARNs, see Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). // // ServiceRoleArn is a required field ServiceRoleArn *string `type:"string" required:"true"` @@ -9095,7 +9101,7 @@ type CreateStackInput struct { // custom security groups are required only for those layers that need custom // settings. // - // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + // For more information, see Create a New Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). UseOpsworksSecurityGroups *bool `type:"boolean"` // The ID of the VPC that the stack is to be launched into. The VPC must be @@ -9121,9 +9127,9 @@ type CreateStackInput struct { // * You must specify a value for DefaultSubnetId. // // For more information about how to use AWS OpsWorks Stacks with a VPC, see - // Running a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // Running a Stack in a VPC (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). // For more information about default VPC and EC2-Classic, see Supported Platforms - // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). VpcId *string `type:"string"` } @@ -9302,7 +9308,7 @@ type CreateUserProfileInput struct { _ struct{} `type:"structure"` // Whether users can specify their own SSH public key through the My Settings - // page. For more information, see Setting an IAM User's Public SSH Key (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + // page. For more information, see Setting an IAM User's Public SSH Key (https://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). AllowSelfManagement *bool `type:"boolean"` // The user's IAM ARN; this can also be a federated user's ARN. @@ -9739,7 +9745,7 @@ type Deployment struct { // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" // // For more information on custom JSON, see Use Custom JSON to Modify the Stack - // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + // Configuration Attributes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). CustomJson *string `type:"string"` // The deployment ID. @@ -10960,7 +10966,7 @@ type DescribePermissionsInput struct { _ struct{} `type:"structure"` // The user's IAM ARN. This can also be a federated user's ARN. For more information - // about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // about IAM ARNs, see Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). IamUserArn *string `type:"string"` // The stack ID. @@ -11721,7 +11727,7 @@ func (s DisassociateElasticIpOutput) GoString() string { } // Describes an Amazon EBS volume. This data type maps directly to the Amazon -// EC2 EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) +// EC2 EbsBlockDevice (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) // data type. type EbsBlockDevice struct { _ struct{} `type:"structure"` @@ -11730,13 +11736,13 @@ type EbsBlockDevice struct { DeleteOnTermination *bool `type:"boolean"` // The number of I/O operations per second (IOPS) that the volume supports. - // For more information, see EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). + // For more information, see EbsBlockDevice (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). Iops *int64 `type:"integer"` // The snapshot ID. SnapshotId *string `type:"string"` - // The volume size, in GiB. For more information, see EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). + // The volume size, in GiB. For more information, see EbsBlockDevice (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). VolumeSize *int64 `type:"integer"` // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned @@ -11857,7 +11863,7 @@ type ElasticIp struct { // The name. Name *string `type:"string"` - // The AWS region. For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // The AWS region. For more information, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html). Region *string `type:"string"` } @@ -12226,7 +12232,7 @@ type Instance struct { AgentVersion *string `type:"string"` // A custom AMI ID to be used to create the instance. For more information, - // see Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html) + // see Instances (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html) AmiId *string `type:"string"` // The instance architecture: "i386" or "x86_64". @@ -12239,7 +12245,7 @@ type Instance struct { AutoScalingType *string `type:"string" enum:"AutoScalingType"` // The instance Availability Zone. For more information, see Regions and Endpoints - // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // (https://docs.aws.amazon.com/general/latest/gr/rande.html). AvailabilityZone *string `type:"string"` // An array of BlockDeviceMapping objects that specify the instance's block @@ -12261,7 +12267,7 @@ type Instance struct { // For container instances, the instance's ARN. EcsContainerInstanceArn *string `type:"string"` - // The instance Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). + // The instance Elastic IP address (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). ElasticIp *string `type:"string"` // The instance host name. @@ -12284,7 +12290,7 @@ type Instance struct { InstanceId *string `type:"string"` // The ARN of the instance's IAM profile. For more information about IAM ARNs, - // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // see Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). InstanceProfileArn *string `type:"string"` // The instance type, such as t2.micro. @@ -12324,7 +12330,7 @@ type Instance struct { ReportedOs *ReportedOs `type:"structure"` // The instance's root device type. For more information, see Storage for the - // Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + // Root Device (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). RootDeviceType *string `type:"string" enum:"RootDeviceType"` // The root device volume ID. @@ -12645,7 +12651,7 @@ func (s *Instance) SetVirtualizationType(v string) *Instance { } // Contains a description of an Amazon EC2 instance from the Amazon EC2 metadata -// service. For more information, see Instance Metadata and User Data (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/Index.html). +// service. For more information, see Instance Metadata and User Data (https://docs.aws.amazon.com/sdkfornet/latest/apidocs/Index.html). type InstanceIdentity struct { _ struct{} `type:"structure"` @@ -12889,13 +12895,13 @@ type Layer struct { // is set to the cluster's ARN. Attributes map[string]*string `type:"map"` - // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) - // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + // Whether to automatically assign an Elastic IP address (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). AutoAssignElasticIps *bool `type:"boolean"` // For stacks that are running in a VPC, whether to automatically assign a public // IP address to the layer's instances. For more information, see How to Edit - // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + // a Layer (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). AutoAssignPublicIps *bool `type:"boolean"` // The Amazon CloudWatch Logs configuration settings for the layer. @@ -12905,7 +12911,7 @@ type Layer struct { CreatedAt *string `type:"string"` // The ARN of the default IAM profile to be used for the layer's EC2 instances. - // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // For more information about IAM ARNs, see Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). CustomInstanceProfileArn *string `type:"string"` // A JSON formatted string containing the layer's custom stack configuration @@ -12920,11 +12926,10 @@ type Layer struct { // AWS OpsWorks Stacks supports five lifecycle events: setup, configuration, // deploy, undeploy, and shutdown. For each layer, AWS OpsWorks Stacks runs - // a set of standard recipes for each event. In addition, you can provide custom - // recipes for any or all layers and events. AWS OpsWorks Stacks runs custom - // event recipes after the standard recipes. LayerCustomRecipes specifies the - // custom recipes for a particular layer to be run in response to each of the - // five events. + // a set of standard recipes for each event. You can also provide custom recipes + // for any or all layers and events. AWS OpsWorks Stacks runs custom event recipes + // after the standard recipes. LayerCustomRecipes specifies the custom recipes + // for a particular layer to be run in response to each of the five events. // // To specify a recipe, use the cookbook's directory name in the repository // followed by two colons and the recipe name, which is the recipe's file name @@ -13304,10 +13309,10 @@ type OperatingSystem struct { // operating system. ConfigurationManagers []*OperatingSystemConfigurationManager `type:"list"` - // The ID of a supported operating system, such as Amazon Linux 2017.09. + // The ID of a supported operating system, such as Amazon Linux 2018.03. Id *string `type:"string"` - // The name of the operating system, such as Amazon Linux 2017.09. + // The name of the operating system, such as Amazon Linux 2018.03. Name *string `type:"string"` // A short name for the operating system manufacturer. @@ -13423,7 +13428,7 @@ type Permission struct { AllowSudo *bool `type:"boolean"` // The Amazon Resource Name (ARN) for an AWS Identity and Access Management - // (IAM) role. For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // (IAM) role. For more information about IAM ARNs, see Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). IamUserArn *string `type:"string"` // The user's permission level, which must be the following: @@ -13439,7 +13444,7 @@ type Permission struct { // * iam_only // // For more information on the permissions associated with these levels, see - // Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html) + // Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html) Level *string `type:"string"` // A stack ID. @@ -13491,7 +13496,7 @@ type RaidArray struct { _ struct{} `type:"structure"` // The array's Availability Zone. For more information, see Regions and Endpoints - // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // (https://docs.aws.amazon.com/general/latest/gr/rande.html). AvailabilityZone *string `type:"string"` // When the RAID array was created. @@ -14558,7 +14563,7 @@ type SetPermissionInput struct { // * iam_only // // For more information about the permissions associated with these levels, - // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). + // see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). Level *string `type:"string"` // The stack ID. @@ -14703,7 +14708,7 @@ type ShutdownEventConfiguration struct { _ struct{} `type:"structure"` // Whether to enable Elastic Load Balancing connection draining. For more information, - // see Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) + // see Connection Draining (https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) DelayUntilElbConnectionsDrained *bool `type:"boolean"` // The time, in seconds, that AWS OpsWorks Stacks will wait after triggering @@ -14734,8 +14739,8 @@ func (s *ShutdownEventConfiguration) SetExecutionTimeout(v int64) *ShutdownEvent } // Contains the information required to retrieve an app or cookbook from a repository. -// For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) -// or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). +// For more information, see Creating Apps (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) +// or Custom Recipes and Cookbooks (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). type Source struct { _ struct{} `type:"structure"` @@ -14746,8 +14751,8 @@ type Source struct { // // * For HTTP bundles and Subversion repositories, set Password to the password. // - // For more information on how to safely handle IAM credentials, see http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html - // (http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). + // For more information on how to safely handle IAM credentials, see https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html + // (https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). // // In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the // actual value. @@ -14905,7 +14910,7 @@ type Stack struct { Attributes map[string]*string `type:"map"` // A ChefConfiguration object that specifies whether to enable Berkshelf and - // the Berkshelf version. For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + // the Berkshelf version. For more information, see Create a New Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). ChefConfiguration *ChefConfiguration `type:"structure"` // The configuration manager. @@ -14915,8 +14920,8 @@ type Stack struct { CreatedAt *string `type:"string"` // Contains the information required to retrieve an app or cookbook from a repository. - // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) - // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + // For more information, see Adding Apps (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Cookbooks and Recipes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). CustomCookbooksSource *Source `type:"structure"` // A JSON object that contains user-defined attributes to be added to the stack @@ -14927,16 +14932,16 @@ type Stack struct { // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" // // For more information on custom JSON, see Use Custom JSON to Modify the Stack - // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + // Configuration Attributes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). CustomJson *string `type:"string"` // The stack's default Availability Zone. For more information, see Regions - // and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html). DefaultAvailabilityZone *string `type:"string"` // The ARN of an IAM profile that is the default profile for all of the stack's // EC2 instances. For more information about IAM ARNs, see Using Identifiers - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). DefaultInstanceProfileArn *string `type:"string"` // The stack's default operating system. @@ -14944,7 +14949,7 @@ type Stack struct { // The default root device type. This value is used by default for all instances // in the stack, but you can override it when you create an instance. For more - // information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + // information, see Storage for the Root Device (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` // A default Amazon EC2 key pair for the stack's instances. You can override @@ -14961,7 +14966,7 @@ type Stack struct { Name *string `type:"string"` // The stack AWS region, such as "ap-northeast-2". For more information about - // AWS regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // AWS regions, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html). Region *string `type:"string"` // The stack AWS Identity and Access Management (IAM) role. @@ -15334,7 +15339,12 @@ func (s StartStackOutput) GoString() string { type StopInstanceInput struct { _ struct{} `type:"structure"` - // Specifies whether to force an instance to stop. + // Specifies whether to force an instance to stop. If the instance's root device + // type is ebs, or EBS-backed, adding the Force parameter to the StopInstances + // API call disassociates the AWS OpsWorks Stacks instance from EC2, and forces + // deletion of only the OpsWorks Stacks instance. You must also delete the formerly-associated + // instance in EC2 after troubleshooting and replacing the AWS OpsWorks Stacks + // instance with a new one. Force *bool `type:"boolean"` // The instance ID. @@ -15814,17 +15824,16 @@ type UpdateAppInput struct { // An array of EnvironmentVariable objects that specify environment variables // to be associated with the app. After you deploy the app, these variables // are defined on the associated app server instances.For more information, - // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // see Environment Variables (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). // // There is no specific limit on the number of environment variables. However, // the size of the associated data structure - which includes the variables' - // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). - // This limit should accommodate most if not all use cases. Exceeding it will - // cause an exception with the message, "Environment: is too large (maximum - // is 10KB)." + // names, values, and protected flag values - cannot exceed 20 KB. This limit + // should accommodate most if not all use cases. Exceeding it will cause an + // exception with the message, "Environment: is too large (maximum is 20 KB)." // - // This parameter is supported only by Chef 11.10 stacks. If you have specified - // one or more environment variables, you cannot modify the stack's Chef version. + // If you have specified one or more environment variables, you cannot modify + // the stack's Chef version. Environment []*EnvironmentVariable `type:"list"` // The app name. @@ -16043,7 +16052,7 @@ type UpdateInstanceInput struct { // The instance architecture. Instance types do not necessarily support both // architectures. For a list of the architectures that are supported by the - // different instance types, see Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // different instance types, see Instance Families and Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). Architecture *string `type:"string" enum:"Architecture"` // For load-based or time-based instances, the type. Windows stacks can use @@ -16074,7 +16083,7 @@ type UpdateInstanceInput struct { // The instance type, such as t2.micro. For a list of supported instance types, // open the stack in the console, choose Instances, and choose + Instance. The // Size list contains the currently supported types. For more information, see - // Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // Instance Families and Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // The parameter values that you use to specify the various types are in the // API Name column of the Available Instance Types table. InstanceType *string `type:"string"` @@ -16086,8 +16095,9 @@ type UpdateInstanceInput struct { // You cannot update an instance that is using a custom AMI. // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon - // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon + // Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux + // 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. @@ -16102,14 +16112,14 @@ type UpdateInstanceInput struct { // Windows Server 2012 R2 with SQL Server Web. // // For more information about supported operating systems, see AWS OpsWorks - // Stacks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // Stacks Operating Systems (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // // The default option is the current Amazon Linux version. If you set this parameter // to Custom, you must use the AmiId parameter to specify the custom AMI that // you want to use. For more information about supported operating systems, - // see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // see Operating Systems (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // For more information about how to use custom AMIs with OpsWorks, see Using - // Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // Custom AMIs (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // You can specify a different Linux operating system for the updated stack, // but you cannot change from Linux to Windows or Windows to Linux. @@ -16234,13 +16244,13 @@ type UpdateLayerInput struct { // One or more user-defined key/value pairs to be added to the stack attributes. Attributes map[string]*string `type:"map"` - // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) - // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + // Whether to automatically assign an Elastic IP address (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). AutoAssignElasticIps *bool `type:"boolean"` // For stacks that are running in a VPC, whether to automatically assign a public // IP address to the layer's instances. For more information, see How to Edit - // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + // a Layer (https://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). AutoAssignPublicIps *bool `type:"boolean"` // Specifies CloudWatch Logs configuration options for the layer. For more information, @@ -16248,12 +16258,12 @@ type UpdateLayerInput struct { CloudWatchLogsConfiguration *CloudWatchLogsConfiguration `type:"structure"` // The ARN of an IAM profile to be used for all of the layer's EC2 instances. - // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // For more information about IAM ARNs, see Using Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). CustomInstanceProfileArn *string `type:"string"` // A JSON-formatted string containing custom stack configuration and deployment // attributes to be installed on the layer's instances. For more information, - // see Using Custom JSON (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). + // see Using Custom JSON (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). CustomJson *string `type:"string"` // A LayerCustomRecipes object that specifies the layer's custom recipes. @@ -16296,7 +16306,7 @@ type UpdateLayerInput struct { // /\A[a-z0-9\-\_\.]+\Z/. // // The built-in layers' short names are defined by AWS OpsWorks Stacks. For - // more information, see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html) + // more information, see the Layer Reference (https://docs.aws.amazon.com/opsworks/latest/userguide/layers.html) Shortname *string `type:"string"` // Whether to use Amazon EBS-optimized instances. @@ -16590,7 +16600,7 @@ type UpdateStackInput struct { // A ChefConfiguration object that specifies whether to enable Berkshelf and // the Berkshelf version on Chef 11.10 stacks. For more information, see Create - // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + // a New Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). ChefConfiguration *ChefConfiguration `type:"structure"` // The configuration manager. When you update a stack, we recommend that you @@ -16600,8 +16610,8 @@ type UpdateStackInput struct { ConfigurationManager *StackConfigurationManager `type:"structure"` // Contains the information required to retrieve an app or cookbook from a repository. - // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) - // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + // For more information, see Adding Apps (https://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Cookbooks and Recipes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). CustomCookbooksSource *Source `type:"structure"` // A string that contains user-defined, custom JSON. It can be used to override @@ -16611,25 +16621,26 @@ type UpdateStackInput struct { // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" // // For more information about custom JSON, see Use Custom JSON to Modify the - // Stack Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + // Stack Configuration Attributes (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). CustomJson *string `type:"string"` // The stack's default Availability Zone, which must be in the stack's region. - // For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // For more information, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html). // If you also specify a value for DefaultSubnetId, the subnet must be in the // same zone. For more information, see CreateStack. DefaultAvailabilityZone *string `type:"string"` // The ARN of an IAM profile that is the default profile for all of the stack's // EC2 instances. For more information about IAM ARNs, see Using Identifiers - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). DefaultInstanceProfileArn *string `type:"string"` // The stack's operating system, which must be set to one of the following: // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon - // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon + // Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux + // 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. @@ -16645,25 +16656,25 @@ type UpdateStackInput struct { // // * A custom AMI: Custom. You specify the custom AMI you want to use when // you create instances. For more information about how to use custom AMIs - // with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // with OpsWorks, see Using Custom AMIs (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // The default option is the stack's current operating system. For more information // about supported operating systems, see AWS OpsWorks Stacks Operating Systems - // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). DefaultOs *string `type:"string"` // The default root device type. This value is used by default for all instances // in the stack, but you can override it when you create an instance. For more - // information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + // information, see Storage for the Root Device (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` // A default Amazon EC2 key-pair name. The default value is none. If you specify // a key-pair name, AWS OpsWorks Stacks installs the public key on the instance // and you can use the private key with an SSH client to log in to the instance. - // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) - // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // For more information, see Using SSH to Communicate with an Instance (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (https://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). // You can override this setting by specifying a different key pair, or no key - // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + // pair, when you create an instance (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). DefaultSshKeyName *string `type:"string"` // The stack's default VPC subnet ID. This parameter is required if you specify @@ -16687,7 +16698,7 @@ type UpdateStackInput struct { // // * Fruits // - // * Greek_Deities + // * Greek_Deities_and_Titans // // * Legendary_creatures_from_Japan // @@ -16738,7 +16749,7 @@ type UpdateStackInput struct { // manually associate a built-in security group with a layer on. Custom security // groups are required only for those layers that need custom settings. // - // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + // For more information, see Create a New Stack (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). UseOpsworksSecurityGroups *bool `type:"boolean"` } @@ -16891,7 +16902,7 @@ type UpdateUserProfileInput struct { _ struct{} `type:"structure"` // Whether users can specify their own SSH public key through the My Settings - // page. For more information, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + // page. For more information, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). AllowSelfManagement *bool `type:"boolean"` // The user IAM ARN. This can also be a federated user's ARN. @@ -17046,7 +17057,7 @@ type UserProfile struct { _ struct{} `type:"structure"` // Whether users can specify their own SSH public key through the My Settings - // page. For more information, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + // page. For more information, see Managing User Permissions (https://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). AllowSelfManagement *bool `type:"boolean"` // The user's IAM ARN. @@ -17107,7 +17118,7 @@ type Volume struct { _ struct{} `type:"structure"` // The volume Availability Zone. For more information, see Regions and Endpoints - // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // (https://docs.aws.amazon.com/general/latest/gr/rande.html). AvailabilityZone *string `type:"string"` // The device name. @@ -17117,7 +17128,7 @@ type Volume struct { Ec2VolumeId *string `type:"string"` // Specifies whether an Amazon EBS volume is encrypted. For more information, - // see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html). + // see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html). Encrypted *bool `type:"boolean"` // The instance ID. @@ -17136,19 +17147,19 @@ type Volume struct { RaidArrayId *string `type:"string"` // The AWS region. For more information about AWS regions, see Regions and Endpoints - // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // (https://docs.aws.amazon.com/general/latest/gr/rande.html). Region *string `type:"string"` // The volume size. Size *int64 `type:"integer"` - // The value returned by DescribeVolumes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeVolumes.html). + // The value returned by DescribeVolumes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeVolumes.html). Status *string `type:"string"` // The volume ID. VolumeId *string `type:"string"` - // The volume type. For more information, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). + // The volume type. For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). // // * standard - Magnetic. Magnetic volumes must have a minimum size of 1 // GiB and a maximum size of 1024 GiB. @@ -17267,7 +17278,7 @@ type VolumeConfiguration struct { _ struct{} `type:"structure"` // Specifies whether an Amazon EBS volume is encrypted. For more information, - // see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html). + // see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html). Encrypted *bool `type:"boolean"` // For PIOPS volumes, the IOPS per disk. @@ -17291,7 +17302,7 @@ type VolumeConfiguration struct { // Size is a required field Size *int64 `type:"integer" required:"true"` - // The volume type. For more information, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). + // The volume type. For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). // // * standard - Magnetic. Magnetic volumes must have a minimum size of 1 // GiB and a maximum size of 1024 GiB. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/doc.go index 4e25ae8d236..28c9722144a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/doc.go @@ -18,13 +18,13 @@ // Command Line Interface (CLI) or by using one of the AWS SDKs to implement // applications in your preferred language. For more information, see: // -// * AWS CLI (http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) +// * AWS CLI (https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) // -// * AWS SDK for Java (http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html) +// * AWS SDK for Java (https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html) // -// * AWS SDK for .NET (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm) +// * AWS SDK for .NET (https://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm) // -// * AWS SDK for PHP 2 (http://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html) +// * AWS SDK for PHP 2 (https://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html) // // * AWS SDK for Ruby (http://docs.aws.amazon.com/sdkforruby/api/) // @@ -74,7 +74,7 @@ // When you call CreateStack, CloneStack, or UpdateStack we recommend you use // the ConfigurationManager parameter to specify the Chef version. The recommended // and default value for Linux stacks is currently 12. Windows stacks use Chef -// 12.2. For more information, see Chef Versions (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). +// 12.2. For more information, see Chef Versions (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). // // You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend // migrating your existing Linux stacks to Chef 12 as soon as possible. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go index a1a8307cae4..6abe01ab353 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go @@ -46,11 +46,11 @@ const ( // svc := opsworks.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *OpsWorks { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *OpsWorks { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *OpsWorks { svc := &OpsWorks{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-02-18", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/api.go index 5672f5cfef3..10f02dbeabf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/api.go @@ -3,6 +3,7 @@ package organizations import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws" @@ -63,27 +64,24 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // have the relevant IAM permissions: // // * Invitation to join or Approve all features request handshakes: only -// a principal from the member account. -// -// The user who calls the API for an invitation to join must have the organizations:AcceptHandshake -// permission. If you enabled all features in the organization, then the -// user must also have the iam:CreateServiceLinkedRole permission so that -// Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. +// a principal from the member account. The user who calls the API for an +// invitation to join must have the organizations:AcceptHandshake permission. +// If you enabled all features in the organization, the user must also have +// the iam:CreateServiceLinkedRole permission so that AWS Organizations can +// create the required service-linked role named AWSServiceRoleForOrganizations. // For more information, see AWS Organizations and Service-Linked Roles (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integration_services.html#orgs_integration_service-linked-roles) // in the AWS Organizations User Guide. // // * Enable all features final confirmation handshake: only a principal from -// the master account. -// -// For more information about invitations, see Inviting an AWS Account to Join -// Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_invites.html) +// the master account. For more information about invitations, see Inviting +// an AWS Account to Join Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_invites.html) // in the AWS Organizations User Guide. For more information about requests // to enable all features in the organization, see Enabling All Features // in Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html) // in the AWS Organizations User Guide. // // After you accept a handshake, it continues to appear in the results of relevant -// APIs for only 30 days. After that it is deleted. +// APIs for only 30 days. After that, it's deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -113,29 +111,23 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // // * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on // the number of accounts in an organization. Note that deleted and closed -// accounts still count toward your limit. -// -// If you get this exception immediately after creating the organization, wait -// one hour and try again. If after an hour it continues to fail with this -// error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. +// accounts still count toward your limit. If you get this exception immediately +// after creating the organization, wait one hour and try again. If after +// an hour it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). // // * ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because // the invited account is already a member of an organization. // -// * ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid -// because the organization has already enabled all features. +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. // // * INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations // to join an organization while it's in the process of enabling all features. // You can resume inviting accounts after you finalize the process when all // accounts have agreed to the change. // -// * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an -// account that doesn't have a payment instrument, such as a credit card, -// associated with it. +// * ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid +// because the organization has already enabled all features. // // * ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because // the account is from a different marketplace than the accounts in the organization. @@ -146,6 +138,10 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // * ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to // change the membership of an account too quickly after its previous change. // +// * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an +// account that doesn't have a payment instrument, such as a credit card, +// associated with it. +// // * ErrCodeHandshakeNotFoundException "HandshakeNotFoundException" // We can't find a handshake with the HandshakeId that you specified. // @@ -171,7 +167,7 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -179,12 +175,12 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -199,6 +195,11 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -226,11 +227,11 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // * ErrCodeAccessDeniedForDependencyException "AccessDeniedForDependencyException" @@ -310,37 +311,29 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // // * Service control policy (SCP) - An SCP specifies what permissions can // be delegated to users in affected member accounts. The scope of influence -// for a policy depends on what you attach the policy to: -// -// If you attach an SCP to a root, it affects all accounts in the organization. -// -// If you attach an SCP to an OU, it affects all accounts in that OU and in -// any child OUs. -// -// If you attach the policy directly to an account, then it affects only that -// account. -// -// SCPs are JSON policies that specify the maximum permissions for an organization -// or organizational unit (OU). When you attach one SCP to a higher level -// root or OU, and you also attach a different SCP to a child OU or to an -// account, the child policy can further restrict only the permissions that +// for a policy depends on what you attach the policy to: If you attach an +// SCP to a root, it affects all accounts in the organization. If you attach +// an SCP to an OU, it affects all accounts in that OU and in any child OUs. +// If you attach the policy directly to an account, it affects only that +// account. SCPs are JSON policies that specify the maximum permissions for +// an organization or organizational unit (OU). You can attach one SCP to +// a higher level root or OU, and a different SCP to a child OU or to an +// account. The child policy can further restrict only the permissions that // pass through the parent filter and are available to the child. An SCP -// that is attached to a child cannot grant a permission that is not already -// granted by the parent. For example, imagine that the parent SCP allows -// permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. -// The result is that the accounts affected by the child SCP are allowed -// to use only C, D, and E. They cannot use A or B because they were filtered -// out by the child OU. They also cannot use F and G because they were filtered -// out by the parent OU. They cannot be granted back by the child SCP; child -// SCPs can only filter the permissions they receive from the parent SCP. -// -// AWS Organizations attaches a default SCP named "FullAWSAccess to every root, -// OU, and account. This default SCP allows all services and actions, enabling -// any new child OU or account to inherit the permissions of the parent root -// or OU. If you detach the default policy, you must replace it with a policy -// that specifies the permissions that you want to allow in that OU or account. -// -// For more information about how Organizations policies permissions work, see +// that is attached to a child can't grant a permission that the parent hasn't +// already granted. For example, imagine that the parent SCP allows permissions +// A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result +// is that the accounts affected by the child SCP are allowed to use only +// C, D, and E. They can't use A or B because the child OU filtered them +// out. They also can't use F and G because the parent OU filtered them out. +// They can't be granted back by the child SCP; child SCPs can only filter +// the permissions they receive from the parent SCP. AWS Organizations attaches +// a default SCP named "FullAWSAccess to every root, OU, and account. This +// default SCP allows all services and actions, enabling any new child OU +// or account to inherit the permissions of the parent root or OU. If you +// detach the default policy, you must replace it with a policy that specifies +// the permissions that you want to allow in that OU or account. For more +// information about how AWS Organizations policies permissions work, see // Using Service Control Policies (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) // in the AWS Organizations User Guide. // @@ -380,46 +373,6 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -434,23 +387,24 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -467,6 +421,45 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeDuplicatePolicyAttachmentException "DuplicatePolicyAttachmentException" // The selected policy is already attached to the specified target. // @@ -483,7 +476,7 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -491,12 +484,12 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -511,6 +504,11 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -547,11 +545,11 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // We can't find a root, OU, or account with the TargetId that you specified. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/AttachPolicy @@ -628,7 +626,7 @@ func (c *Organizations) CancelHandshakeRequest(input *CancelHandshakeInput) (req // to that handshake. // // After you cancel a handshake, it continues to appear in the results of relevant -// APIs for only 30 days. After that it is deleted. +// APIs for only 30 days. After that, it's deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -674,7 +672,7 @@ func (c *Organizations) CancelHandshakeRequest(input *CancelHandshakeInput) (req // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -682,12 +680,12 @@ func (c *Organizations) CancelHandshakeRequest(input *CancelHandshakeInput) (req // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -702,6 +700,11 @@ func (c *Organizations) CancelHandshakeRequest(input *CancelHandshakeInput) (req // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -725,11 +728,11 @@ func (c *Organizations) CancelHandshakeRequest(input *CancelHandshakeInput) (req // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/CancelHandshake @@ -810,45 +813,50 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // as a parameter to the DescribeCreateAccountStatus operation. // // * Check the AWS CloudTrail log for the CreateAccountResult event. For -// information on using AWS CloudTrail with Organizations, see Monitoring +// information on using AWS CloudTrail with AWS Organizations, see Monitoring // the Activity in Your Organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_monitoring.html) // in the AWS Organizations User Guide. // -// The user who calls the API to create an account must have the organizations:CreateAccountpermission. If you enabled all features in the organization, AWS Organizations -// will create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs)in the AWS Organizations User Guide. +// The user who calls the API to create an account must have the organizations:CreateAccount +// permission. If you enabled all features in the organization, AWS Organizations +// creates the required service-linked role named AWSServiceRoleForOrganizations. +// For more information, see AWS Organizations and Service-Linked Roles (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs) +// in the AWS Organizations User Guide. // // AWS Organizations preconfigures the new member account with a role (named -// OrganizationAccountAccessRoleby default) that grants users in the master account administrator permissions -// in the new member account. Principals in the master account can assume the -// role. AWS Organizations clones the company name and address information for -// the new account from the organization's master account. +// OrganizationAccountAccessRole by default) that grants users in the master +// account administrator permissions in the new member account. Principals in +// the master account can assume the role. AWS Organizations clones the company +// name and address information for the new account from the organization's +// master account. // // This operation can be called only from the organization's master account. // // For more information about creating accounts, see Creating an AWS Account -// in Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html)in the AWS Organizations User Guide. -// -// When you create an account in an organization using the AWS Organizations -// console, API, or CLI commands, the information required for the account to -// operate as a standalone account, such as a payment method and signing the -// end user license agreement (EULA) is not automatically collected. If you -// must remove an account from your organization later, you can do so only after -// you provide the missing information. Follow the steps at To leave an organization -// as a member account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html) // in the AWS Organizations User Guide. // -// If you get an exception that indicates that you exceeded your account limits -// for the organization, contact AWS Support (https://console.aws.amazon.com/support/home#/). +// * When you create an account in an organization using the AWS Organizations +// console, API, or CLI commands, the information required for the account +// to operate as a standalone account, such as a payment method and signing +// the end user license agreement (EULA) is not automatically collected. +// If you must remove an account from your organization later, you can do +// so only after you provide the missing information. Follow the steps at +// To leave an organization as a member account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. // -// If you get an exception that indicates that the operation failed because -// your organization is still initializing, wait one hour and then try again. -// If the error persists, contact AWS Support (https://console.aws.amazon.com/support/home#/). +// * If you get an exception that indicates that you exceeded your account +// limits for the organization, contact AWS Support (https://console.aws.amazon.com/support/home#/). // -// Using CreateAccount to create multiple temporary accounts isn't recommended. -// You can only close an account from the Billing and Cost Management Console, -// and you must be signed in as the root user. For information on the requirements -// and process for closing an account, see Closing an AWS Account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html) -// in the AWS Organizations User Guide. +// * If you get an exception that indicates that the operation failed because +// your organization is still initializing, wait one hour and then try again. +// If the error persists, contact AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * Using CreateAccount to create multiple temporary accounts isn't recommended. +// You can only close an account from the Billing and Cost Management Console, +// and you must be signed in as the root user. For information on the requirements +// and process for closing an account, see Closing an AWS Account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html) +// in the AWS Organizations User Guide. // // When you create a member account with this operation, you can choose whether // to create the account with the IAM User and Role Access to Billing Information @@ -892,46 +900,6 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -946,23 +914,24 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -979,6 +948,45 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -992,7 +1000,7 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -1000,12 +1008,12 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -1020,6 +1028,11 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -1049,11 +1062,11 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // * ErrCodeUnsupportedAPIEndpointException "UnsupportedAPIEndpointException" @@ -1127,9 +1140,9 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // // This action is available if all of the following are true: // -// * You are authorized to create accounts in the AWS GovCloud (US) Region. +// * You're authorized to create accounts in the AWS GovCloud (US) Region. // For more information on the AWS GovCloud (US) Region, see the AWS GovCloud -// User Guide (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/welcome.html). +// User Guide. (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/welcome.html) // // * You already have an account in the AWS GovCloud (US) Region that is // associated with your master account in the commercial Region. @@ -1147,9 +1160,8 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // // * Verify that AWS CloudTrail is enabled to store logs. // -// * Create an S3 bucket for AWS CloudTrail log storage. -// -// For more information, see Verifying AWS CloudTrail Is Enabled (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/verifying-cloudtrail.html) +// * Create an S3 bucket for AWS CloudTrail log storage. For more information, +// see Verifying AWS CloudTrail Is Enabled (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/verifying-cloudtrail.html) // in the AWS GovCloud User Guide. // // You call this action from the master account of your organization in the @@ -1175,11 +1187,12 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // the Activity in Your Organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_monitoring.html) // in the AWS Organizations User Guide. // -// When you call the CreateGovCloudAccountaction, you create two accounts: a standalone account in the AWS GovCloud -// (US) Region and an associated account in the commercial Region for billing -// and support purposes. The account in the commercial Region is automatically -// a member of the organization whose credentials made the request. Both accounts -// are associated with the same email address. +// When you call the CreateGovCloudAccount action, you create two accounts: +// a standalone account in the AWS GovCloud (US) Region and an associated account +// in the commercial Region for billing and support purposes. The account in +// the commercial Region is automatically a member of the organization whose +// credentials made the request. Both accounts are associated with the same +// email address. // // A role is created in the new account in the commercial Region that allows // the master account in the organization in the commercial Region to assume @@ -1188,32 +1201,35 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // GovCloud (US) account that can be assumed by the AWS GovCloud (US) account // that is associated with the master account of the commercial organization. // For more information and to view a diagram that explains how account access -// works, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html)in the AWS GovCloud User Guide. +// works, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. // // For more information about creating accounts, see Creating an AWS Account -// in Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html)in the AWS Organizations User Guide. -// -// When you create an account in an organization using the AWS Organizations -// console, API, or CLI commands, the information required for the account to -// operate as a standalone account, such as a payment method and signing the -// end user license agreement (EULA) is not automatically collected. If you -// must remove an account from your organization later, you can do so only after -// you provide the missing information. Follow the steps at To leave an organization -// as a member account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html) // in the AWS Organizations User Guide. // -// If you get an exception that indicates that you exceeded your account limits -// for the organization, contact AWS Support (https://console.aws.amazon.com/support/home#/). +// * When you create an account in an organization using the AWS Organizations +// console, API, or CLI commands, the information required for the account +// to operate as a standalone account, such as a payment method and signing +// the end user license agreement (EULA) is not automatically collected. +// If you must remove an account from your organization later, you can do +// so only after you provide the missing information. Follow the steps at +// To leave an organization as a member account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * If you get an exception that indicates that you exceeded your account +// limits for the organization, contact AWS Support (https://console.aws.amazon.com/support/home#/). // -// If you get an exception that indicates that the operation failed because -// your organization is still initializing, wait one hour and then try again. -// If the error persists, contact AWS Support (https://console.aws.amazon.com/support/home#/). +// * If you get an exception that indicates that the operation failed because +// your organization is still initializing, wait one hour and then try again. +// If the error persists, contact AWS Support (https://console.aws.amazon.com/support/home#/). // -// Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. -// You can only close an account from the AWS Billing and Cost Management console, -// and you must be signed in as the root user. For information on the requirements -// and process for closing an account, see Closing an AWS Account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html) -// in the AWS Organizations User Guide. +// * Using CreateGovCloudAccount to create multiple temporary accounts isn't +// recommended. You can only close an account from the AWS Billing and Cost +// Management console, and you must be signed in as the root user. For information +// on the requirements and process for closing an account, see Closing an +// AWS Account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html) +// in the AWS Organizations User Guide. // // When you create a member account with this operation, you can choose whether // to create the account with the IAM User and Role Access to Billing Information @@ -1257,52 +1273,12 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// -// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account -// from the organization that doesn't yet have enough information to exist -// as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at To leave an organization when +// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information @@ -1311,23 +1287,24 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -1344,6 +1321,45 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -1357,7 +1373,7 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -1365,12 +1381,12 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -1385,6 +1401,11 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -1414,11 +1435,11 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // * ErrCodeUnsupportedAPIEndpointException "UnsupportedAPIEndpointException" @@ -1502,8 +1523,8 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // is created with all features enabled and service control policies automatically // enabled in the root. If you instead choose to create the organization supporting // only the consolidated billing features by setting the FeatureSet parameter -// to CONSOLIDATED_BILLING", then no policy types are enabled by default and -// you cannot use organization policies. +// to CONSOLIDATED_BILLING", no policy types are enabled by default, and you +// can't use organization policies. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1539,46 +1560,6 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -1593,23 +1574,24 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -1626,6 +1608,45 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -1639,7 +1660,7 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -1647,12 +1668,12 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -1667,6 +1688,11 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -1690,11 +1716,11 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // * ErrCodeAccessDeniedForDependencyException "AccessDeniedForDependencyException" @@ -1813,46 +1839,6 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -1867,23 +1853,24 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -1900,11 +1887,50 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // -// * ErrCodeDuplicateOrganizationalUnitException "DuplicateOrganizationalUnitException" -// An OU with the same name already exists. +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. // -// * ErrCodeInvalidInputException "InvalidInputException" -// The requested operation failed because you provided invalid values for one +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// +// * ErrCodeDuplicateOrganizationalUnitException "DuplicateOrganizationalUnitException" +// An OU with the same name already exists. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that // contains additional information about the violated limit: // @@ -1916,7 +1942,7 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -1924,12 +1950,12 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -1944,6 +1970,11 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -1970,11 +2001,11 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/CreateOrganizationalUnit @@ -2085,46 +2116,6 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -2139,23 +2130,24 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -2172,6 +2164,45 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeDuplicatePolicyException "DuplicatePolicyException" // A policy with the same name already exists. // @@ -2188,7 +2219,7 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2196,12 +2227,12 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -2216,6 +2247,11 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -2252,11 +2288,11 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/CreatePolicy @@ -2330,11 +2366,11 @@ func (c *Organizations) DeclineHandshakeRequest(input *DeclineHandshakeInput) (r // // This operation can be called only from the account that received the handshake. // The originator of the handshake can use CancelHandshake instead. The originator -// can't reactivate a declined request, but can re-initiate the process with +// can't reactivate a declined request, but can reinitiate the process with // a new handshake request. // // After you decline a handshake, it continues to appear in the results of relevant -// APIs for only 30 days. After that it is deleted. +// APIs for only 30 days. After that, it's deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2380,7 +2416,7 @@ func (c *Organizations) DeclineHandshakeRequest(input *DeclineHandshakeInput) (r // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2388,12 +2424,12 @@ func (c *Organizations) DeclineHandshakeRequest(input *DeclineHandshakeInput) (r // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -2408,6 +2444,11 @@ func (c *Organizations) DeclineHandshakeRequest(input *DeclineHandshakeInput) (r // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -2431,11 +2472,11 @@ func (c *Organizations) DeclineHandshakeRequest(input *DeclineHandshakeInput) (r // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeclineHandshake @@ -2544,7 +2585,7 @@ func (c *Organizations) DeleteOrganizationRequest(input *DeleteOrganizationInput // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2552,12 +2593,12 @@ func (c *Organizations) DeleteOrganizationRequest(input *DeleteOrganizationInput // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -2572,6 +2613,11 @@ func (c *Organizations) DeleteOrganizationRequest(input *DeleteOrganizationInput // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -2599,11 +2645,11 @@ func (c *Organizations) DeleteOrganizationRequest(input *DeleteOrganizationInput // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeleteOrganization @@ -2714,7 +2760,7 @@ func (c *Organizations) DeleteOrganizationalUnitRequest(input *DeleteOrganizatio // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2722,12 +2768,12 @@ func (c *Organizations) DeleteOrganizationalUnitRequest(input *DeleteOrganizatio // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -2742,6 +2788,11 @@ func (c *Organizations) DeleteOrganizationalUnitRequest(input *DeleteOrganizatio // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -2772,11 +2823,11 @@ func (c *Organizations) DeleteOrganizationalUnitRequest(input *DeleteOrganizatio // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeleteOrganizationalUnit @@ -2888,7 +2939,7 @@ func (c *Organizations) DeletePolicyRequest(input *DeletePolicyInput) (req *requ // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2896,12 +2947,12 @@ func (c *Organizations) DeletePolicyRequest(input *DeletePolicyInput) (req *requ // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -2916,6 +2967,11 @@ func (c *Organizations) DeletePolicyRequest(input *DeletePolicyInput) (req *requ // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -2946,11 +3002,11 @@ func (c *Organizations) DeletePolicyRequest(input *DeletePolicyInput) (req *requ // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeletePolicy @@ -3019,7 +3075,7 @@ func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req // DescribeAccount API operation for AWS Organizations. // -// Retrieves Organizations-related information about the specified account. +// Retrieves AWS Organizations-related information about the specified account. // // This operation can be called only from the organization's master account. // @@ -3060,7 +3116,7 @@ func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -3068,12 +3124,12 @@ func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -3088,6 +3144,11 @@ func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -3111,11 +3172,11 @@ func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeAccount @@ -3224,7 +3285,7 @@ func (c *Organizations) DescribeCreateAccountStatusRequest(input *DescribeCreate // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -3232,12 +3293,12 @@ func (c *Organizations) DescribeCreateAccountStatusRequest(input *DescribeCreate // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -3252,6 +3313,11 @@ func (c *Organizations) DescribeCreateAccountStatusRequest(input *DescribeCreate // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -3275,11 +3341,11 @@ func (c *Organizations) DescribeCreateAccountStatusRequest(input *DescribeCreate // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // * ErrCodeUnsupportedAPIEndpointException "UnsupportedAPIEndpointException" @@ -3356,7 +3422,7 @@ func (c *Organizations) DescribeHandshakeRequest(input *DescribeHandshakeInput) // that generated the handshake. // // You can access handshakes that are ACCEPTED, DECLINED, or CANCELED for only -// 30 days after they change to that state. They are then deleted and no longer +// 30 days after they change to that state. They're then deleted and no longer // accessible. // // This operation can be called from any account in the organization. @@ -3396,7 +3462,7 @@ func (c *Organizations) DescribeHandshakeRequest(input *DescribeHandshakeInput) // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -3404,12 +3470,12 @@ func (c *Organizations) DescribeHandshakeRequest(input *DescribeHandshakeInput) // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -3424,6 +3490,11 @@ func (c *Organizations) DescribeHandshakeRequest(input *DescribeHandshakeInput) // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -3447,11 +3518,11 @@ func (c *Organizations) DescribeHandshakeRequest(input *DescribeHandshakeInput) // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeHandshake @@ -3525,8 +3596,8 @@ func (c *Organizations) DescribeOrganizationRequest(input *DescribeOrganizationI // // This operation can be called from any account in the organization. // -// Even if a policy type is shown as available in the organization, it can be -// disabled separately at the root level with DisablePolicyType. Use ListRoots +// Even if a policy type is shown as available in the organization, you can +// disable it separately at the root level with DisablePolicyType. Use ListRoots // to see the status of policy types for a specified root. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3557,11 +3628,11 @@ func (c *Organizations) DescribeOrganizationRequest(input *DescribeOrganizationI // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeOrganization @@ -3666,7 +3737,7 @@ func (c *Organizations) DescribeOrganizationalUnitRequest(input *DescribeOrganiz // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -3674,12 +3745,12 @@ func (c *Organizations) DescribeOrganizationalUnitRequest(input *DescribeOrganiz // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -3694,6 +3765,11 @@ func (c *Organizations) DescribeOrganizationalUnitRequest(input *DescribeOrganiz // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -3720,11 +3796,11 @@ func (c *Organizations) DescribeOrganizationalUnitRequest(input *DescribeOrganiz // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeOrganizationalUnit @@ -3829,7 +3905,7 @@ func (c *Organizations) DescribePolicyRequest(input *DescribePolicyInput) (req * // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -3837,12 +3913,12 @@ func (c *Organizations) DescribePolicyRequest(input *DescribePolicyInput) (req * // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -3857,6 +3933,11 @@ func (c *Organizations) DescribePolicyRequest(input *DescribePolicyInput) (req * // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -3883,11 +3964,11 @@ func (c *Organizations) DescribePolicyRequest(input *DescribePolicyInput) (req * // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribePolicy @@ -3963,13 +4044,13 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // // Note: Every root, OU, and account must have at least one SCP attached. If // you want to replace the default FullAWSAccess policy with one that limits -// the permissions that can be delegated, then you must attach the replacement -// policy before you can remove the default one. This is the authorization strategy +// the permissions that can be delegated, you must attach the replacement policy +// before you can remove the default one. This is the authorization strategy // of whitelisting (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_about-scps.html#orgs_policies_whitelist). // If you instead attach a second SCP and leave the FullAWSAccess SCP still // attached, and specify "Effect": "Deny" in the second SCP to override the // "Effect": "Allow" in the FullAWSAccess policy (or any other attached SCP), -// then you are using the authorization strategy of blacklisting (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_about-scps.html#orgs_policies_blacklist). +// you're using the authorization strategy of blacklisting (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_about-scps.html#orgs_policies_blacklist) . // // This operation can be called only from the organization's master account. // @@ -4007,46 +4088,6 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -4061,23 +4102,24 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -4094,7 +4136,46 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // -// * ErrCodeInvalidInputException "InvalidInputException" +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// +// * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that // contains additional information about the violated limit: @@ -4107,7 +4188,7 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -4115,12 +4196,12 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -4135,6 +4216,11 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -4167,11 +4253,11 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // We can't find a root, OU, or account with the TargetId that you specified. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DetachPolicy @@ -4257,13 +4343,17 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // on that service. For more information, see the documentation for the other // AWS service. // -// After you perform the DisableAWSServiceAccessoperation, the specified service can no longer perform operations in your -// organization's accounts unless the operations are explicitly permitted by -// the IAM policies that are attached to your roles. +// After you perform the DisableAWSServiceAccess operation, the specified service +// can no longer perform operations in your organization's accounts unless the +// operations are explicitly permitted by the IAM policies that are attached +// to your roles. // // For more information about integrating other services with AWS Organizations, // including the list of services that work with Organizations, see Integrating -// AWS Organizations with Other AWS Services (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)in the AWS Organizations User Guide +// AWS Organizations with Other AWS Services (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html) +// in the AWS Organizations User Guide. +// +// This operation can be called only from the organization's master account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4299,46 +4389,6 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -4353,23 +4403,24 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -4386,6 +4437,45 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -4399,7 +4489,7 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -4407,12 +4497,12 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -4427,6 +4517,11 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -4450,11 +4545,11 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DisableAWSServiceAccess @@ -4529,12 +4624,16 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // of the specified type to that root or to any organizational unit (OU) or // account in that root. You can undo this by using the EnablePolicyType operation. // +// This is an asynchronous request that AWS performs in the background. If you +// disable a policy for a root, it still appears enabled for the organization +// if all features (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html) +// are enabled for the organization. AWS recommends that you first use ListRoots +// to see the status of policy types for a specified root, and then use this +// operation. +// // This operation can be called only from the organization's master account. // -// If you disable a policy type for a root, it still shows as enabled for the -// organization if all features are enabled in that organization. Use ListRoots -// to see the status of policy types for a specified root. Use DescribeOrganization -// to see the status of policy types in the organization. +// To view the status of available policy types in the organization, use DescribeOrganization. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4570,46 +4669,6 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -4624,23 +4683,24 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -4657,6 +4717,45 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -4670,7 +4769,7 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -4678,12 +4777,12 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -4698,6 +4797,11 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -4731,11 +4835,11 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DisablePolicyType @@ -4860,52 +4964,12 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// -// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account -// from the organization that doesn't yet have enough information to exist -// as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at To leave an organization when +// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information @@ -4914,23 +4978,24 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -4947,6 +5012,45 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -4960,7 +5064,7 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -4968,12 +5072,12 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -4988,6 +5092,11 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -5011,11 +5120,11 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/EnableAWSServiceAccess @@ -5147,29 +5256,23 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // // * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on // the number of accounts in an organization. Note that deleted and closed -// accounts still count toward your limit. -// -// If you get this exception immediately after creating the organization, wait -// one hour and try again. If after an hour it continues to fail with this -// error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. +// accounts still count toward your limit. If you get this exception immediately +// after creating the organization, wait one hour and try again. If after +// an hour it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). // // * ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because // the invited account is already a member of an organization. // -// * ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid -// because the organization has already enabled all features. +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. // // * INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations // to join an organization while it's in the process of enabling all features. // You can resume inviting accounts after you finalize the process when all // accounts have agreed to the change. // -// * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an -// account that doesn't have a payment instrument, such as a credit card, -// associated with it. +// * ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid +// because the organization has already enabled all features. // // * ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because // the account is from a different marketplace than the accounts in the organization. @@ -5180,6 +5283,10 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // * ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to // change the membership of an account too quickly after its previous change. // +// * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an +// account that doesn't have a payment instrument, such as a credit card, +// associated with it. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -5193,7 +5300,7 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -5201,12 +5308,12 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -5221,6 +5328,11 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -5244,11 +5356,11 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/EnableAllFeatures @@ -5322,13 +5434,15 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // (OU), or account in that root. You can undo this by using the DisablePolicyType // operation. // +// This is an asynchronous request that AWS performs in the background. AWS +// recommends that you first use ListRoots to see the status of policy types +// for a specified root, and then use this operation. +// // This operation can be called only from the organization's master account. // // You can enable a policy type in a root only if that policy type is available -// in the organization. Use DescribeOrganization to view the status of available -// policy types in the organization. -// -// To view the status of policy type in a root, use ListRoots. +// in the organization. To view the status of available policy types in the +// organization, use DescribeOrganization. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5364,46 +5478,6 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -5418,23 +5492,24 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -5451,6 +5526,45 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -5464,7 +5578,7 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -5472,12 +5586,12 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -5492,6 +5606,11 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -5521,11 +5640,11 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // * ErrCodePolicyTypeNotAvailableForOrganizationException "PolicyTypeNotAvailableForOrganizationException" @@ -5602,21 +5721,21 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // InviteAccountToOrganization API operation for AWS Organizations. // // Sends an invitation to another account to join your organization as a member -// account. Organizations sends email on your behalf to the email address that -// is associated with the other account's owner. The invitation is implemented +// account. AWS Organizations sends email on your behalf to the email address +// that is associated with the other account's owner. The invitation is implemented // as a Handshake whose details are in the response. // -// You can invite AWS accounts only from the same seller as the master account. -// For example, if your organization's master account was created by Amazon -// Internet Services Pvt. Ltd (AISPL), an AWS seller in India, then you can -// only invite other AISPL accounts to your organization. You can't combine -// accounts from AISPL and AWS, or any other AWS seller. For more information, -// see Consolidated Billing in India (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/useconsolidatedbilliing-India.html). +// * You can invite AWS accounts only from the same seller as the master +// account. For example, if your organization's master account was created +// by Amazon Internet Services Pvt. Ltd (AISPL), an AWS seller in India, +// you can invite only other AISPL accounts to your organization. You can't +// combine accounts from AISPL and AWS or from any other AWS seller. For +// more information, see Consolidated Billing in India (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/useconsolidatedbilliing-India.html). // -// If you receive an exception that indicates that you exceeded your account -// limits for the organization or that the operation failed because your organization -// is still initializing, wait one hour and then try again. If the error persists -// after an hour, then contact AWS Customer Support (https://console.aws.amazon.com/support/home#/). +// * If you receive an exception that indicates that you exceeded your account +// limits for the organization or that the operation failed because your +// organization is still initializing, wait one hour and then try again. +// If the error persists after an hour, contact AWS Support (https://console.aws.amazon.com/support/home#/). // // This operation can be called only from the organization's master account. // @@ -5658,29 +5777,23 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // // * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on // the number of accounts in an organization. Note that deleted and closed -// accounts still count toward your limit. -// -// If you get this exception immediately after creating the organization, wait -// one hour and try again. If after an hour it continues to fail with this -// error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. +// accounts still count toward your limit. If you get this exception immediately +// after creating the organization, wait one hour and try again. If after +// an hour it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). // // * ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because // the invited account is already a member of an organization. // -// * ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid -// because the organization has already enabled all features. +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. // // * INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations // to join an organization while it's in the process of enabling all features. // You can resume inviting accounts after you finalize the process when all // accounts have agreed to the change. // -// * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an -// account that doesn't have a payment instrument, such as a credit card, -// associated with it. +// * ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid +// because the organization has already enabled all features. // // * ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because // the account is from a different marketplace than the accounts in the organization. @@ -5691,6 +5804,10 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // * ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to // change the membership of an account too quickly after its previous change. // +// * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an +// account that doesn't have a payment instrument, such as a credit card, +// associated with it. +// // * ErrCodeDuplicateHandshakeException "DuplicateHandshakeException" // A handshake with the same action and target already exists. For example, // if you invited an account to join your organization, the invited account @@ -5711,7 +5828,7 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -5719,12 +5836,12 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -5739,6 +5856,11 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -5768,11 +5890,11 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/InviteAccountToOrganization @@ -5849,28 +5971,28 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // // This operation can be called only from a member account in the organization. // -// The master account in an organization with all features enabled can set service -// control policies (SCPs) that can restrict what administrators of member accounts -// can do, including preventing them from successfully calling LeaveOrganization -// and leaving the organization. -// -// You can leave an organization as a member account only if the account is -// configured with the information required to operate as a standalone account. -// When you create an account in an organization using the AWS Organizations -// console, API, or CLI commands, the information required of standalone accounts -// is not automatically collected. For each account that you want to make standalone, -// you must accept the End User License Agreement (EULA), choose a support plan, -// provide and verify the required contact information, and provide a current -// payment method. AWS uses the payment method to charge for any billable (not -// free tier) AWS activity that occurs while the account is not attached to -// an organization. Follow the steps at To leave an organization when all required -// account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// * The master account in an organization with all features enabled can +// set service control policies (SCPs) that can restrict what administrators +// of member accounts can do, including preventing them from successfully +// calling LeaveOrganization and leaving the organization. +// +// * You can leave an organization as a member account only if the account +// is configured with the information required to operate as a standalone +// account. When you create an account in an organization using the AWS Organizations +// console, API, or CLI commands, the information required of standalone +// accounts is not automatically collected. For each account that you want +// to make standalone, you must accept the end user license agreement (EULA), +// choose a support plan, provide and verify the required contact information, +// and provide a current payment method. AWS uses the payment method to charge +// for any billable (not free tier) AWS activity that occurs while the account +// isn't attached to an organization. Follow the steps at To leave an organization +// when all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. // -// You can leave an organization only after you enable IAM user access to billing -// in your account. For more information, see Activating Access to the Billing -// and Cost Management Console (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate) -// in the AWS Billing and Cost Management User Guide. +// * You can leave an organization only after you enable IAM user access +// to billing in your account. For more information, see Activating Access +// to the Billing and Cost Management Console (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate) +// in the AWS Billing and Cost Management User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5911,46 +6033,6 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -5965,23 +6047,24 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -5998,6 +6081,45 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -6011,7 +6133,7 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -6019,12 +6141,12 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -6039,6 +6161,11 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -6067,11 +6194,11 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/LeaveOrganization @@ -6188,46 +6315,6 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -6242,23 +6329,24 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -6275,6 +6363,45 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -6288,7 +6415,7 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -6296,12 +6423,12 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -6316,6 +6443,11 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -6339,11 +6471,11 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAWSServiceAccessForOrganization @@ -6379,7 +6511,7 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationWithContext(ctx aws.C // // Example iterating over at most 3 pages of a ListAWSServiceAccessForOrganization operation. // pageNum := 0 // err := client.ListAWSServiceAccessForOrganizationPages(params, -// func(page *ListAWSServiceAccessForOrganizationOutput, lastPage bool) bool { +// func(page *organizations.ListAWSServiceAccessForOrganizationOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6411,10 +6543,12 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationPagesWithContext(ctx }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAWSServiceAccessForOrganizationOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAWSServiceAccessForOrganizationOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6475,7 +6609,7 @@ func (c *Organizations) ListAccountsRequest(input *ListAccountsInput) (req *requ // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -6511,7 +6645,7 @@ func (c *Organizations) ListAccountsRequest(input *ListAccountsInput) (req *requ // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -6519,12 +6653,12 @@ func (c *Organizations) ListAccountsRequest(input *ListAccountsInput) (req *requ // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -6539,6 +6673,11 @@ func (c *Organizations) ListAccountsRequest(input *ListAccountsInput) (req *requ // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -6562,11 +6701,11 @@ func (c *Organizations) ListAccountsRequest(input *ListAccountsInput) (req *requ // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAccounts @@ -6602,7 +6741,7 @@ func (c *Organizations) ListAccountsWithContext(ctx aws.Context, input *ListAcco // // Example iterating over at most 3 pages of a ListAccounts operation. // pageNum := 0 // err := client.ListAccountsPages(params, -// func(page *ListAccountsOutput, lastPage bool) bool { +// func(page *organizations.ListAccountsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6634,10 +6773,12 @@ func (c *Organizations) ListAccountsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6693,15 +6834,14 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // // Lists the accounts in an organization that are contained by the specified // target root or organizational unit (OU). If you specify the root, you get -// a list of all the accounts that are not in any OU. If you specify an OU, -// you get a list of all the accounts in only that OU, and not in any child -// OUs. To get a list of all accounts in the organization, use the ListAccounts -// operation. +// a list of all the accounts that aren't in any OU. If you specify an OU, you +// get a list of all the accounts in only that OU and not in any child OUs. +// To get a list of all accounts in the organization, use the ListAccounts operation. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -6737,7 +6877,7 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -6745,12 +6885,12 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -6765,6 +6905,11 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -6791,11 +6936,11 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAccountsForParent @@ -6831,7 +6976,7 @@ func (c *Organizations) ListAccountsForParentWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a ListAccountsForParent operation. // pageNum := 0 // err := client.ListAccountsForParentPages(params, -// func(page *ListAccountsForParentOutput, lastPage bool) bool { +// func(page *organizations.ListAccountsForParentOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6863,10 +7008,12 @@ func (c *Organizations) ListAccountsForParentPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAccountsForParentOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAccountsForParentOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6927,7 +7074,7 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -6963,7 +7110,7 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -6971,12 +7118,12 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -6991,6 +7138,11 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -7017,11 +7169,11 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListChildren @@ -7057,7 +7209,7 @@ func (c *Organizations) ListChildrenWithContext(ctx aws.Context, input *ListChil // // Example iterating over at most 3 pages of a ListChildren operation. // pageNum := 0 // err := client.ListChildrenPages(params, -// func(page *ListChildrenOutput, lastPage bool) bool { +// func(page *organizations.ListChildrenOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7089,10 +7241,12 @@ func (c *Organizations) ListChildrenPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListChildrenOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListChildrenOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7152,7 +7306,7 @@ func (c *Organizations) ListCreateAccountStatusRequest(input *ListCreateAccountS // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -7188,7 +7342,7 @@ func (c *Organizations) ListCreateAccountStatusRequest(input *ListCreateAccountS // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -7196,12 +7350,12 @@ func (c *Organizations) ListCreateAccountStatusRequest(input *ListCreateAccountS // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -7216,6 +7370,11 @@ func (c *Organizations) ListCreateAccountStatusRequest(input *ListCreateAccountS // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -7239,11 +7398,11 @@ func (c *Organizations) ListCreateAccountStatusRequest(input *ListCreateAccountS // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // * ErrCodeUnsupportedAPIEndpointException "UnsupportedAPIEndpointException" @@ -7282,7 +7441,7 @@ func (c *Organizations) ListCreateAccountStatusWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a ListCreateAccountStatus operation. // pageNum := 0 // err := client.ListCreateAccountStatusPages(params, -// func(page *ListCreateAccountStatusOutput, lastPage bool) bool { +// func(page *organizations.ListCreateAccountStatusOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7314,10 +7473,12 @@ func (c *Organizations) ListCreateAccountStatusPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListCreateAccountStatusOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListCreateAccountStatusOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7375,13 +7536,13 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor // requesting user. // // Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results -// of this API for only 30 days after changing to that state. After that they -// are deleted and no longer accessible. +// of this API for only 30 days after changing to that state. After that, they're +// deleted and no longer accessible. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called from any account in the organization. // @@ -7417,7 +7578,7 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -7425,12 +7586,12 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -7445,6 +7606,11 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -7468,11 +7634,11 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForAccount @@ -7508,7 +7674,7 @@ func (c *Organizations) ListHandshakesForAccountWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a ListHandshakesForAccount operation. // pageNum := 0 // err := client.ListHandshakesForAccountPages(params, -// func(page *ListHandshakesForAccountOutput, lastPage bool) bool { +// func(page *organizations.ListHandshakesForAccountOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7540,10 +7706,12 @@ func (c *Organizations) ListHandshakesForAccountPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListHandshakesForAccountOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListHandshakesForAccountOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7603,13 +7771,13 @@ func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshak // a handshake. // // Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results -// of this API for only 30 days after changing to that state. After that they -// are deleted and no longer accessible. +// of this API for only 30 days after changing to that state. After that, they're +// deleted and no longer accessible. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -7649,7 +7817,7 @@ func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshak // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -7657,12 +7825,12 @@ func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshak // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -7677,6 +7845,11 @@ func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshak // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -7700,11 +7873,11 @@ func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshak // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForOrganization @@ -7740,7 +7913,7 @@ func (c *Organizations) ListHandshakesForOrganizationWithContext(ctx aws.Context // // Example iterating over at most 3 pages of a ListHandshakesForOrganization operation. // pageNum := 0 // err := client.ListHandshakesForOrganizationPages(params, -// func(page *ListHandshakesForOrganizationOutput, lastPage bool) bool { +// func(page *organizations.ListHandshakesForOrganizationOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7772,10 +7945,12 @@ func (c *Organizations) ListHandshakesForOrganizationPagesWithContext(ctx aws.Co }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListHandshakesForOrganizationOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListHandshakesForOrganizationOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7834,7 +8009,7 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -7870,7 +8045,7 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -7878,12 +8053,12 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -7898,6 +8073,11 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -7924,11 +8104,11 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListOrganizationalUnitsForParent @@ -7964,7 +8144,7 @@ func (c *Organizations) ListOrganizationalUnitsForParentWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a ListOrganizationalUnitsForParent operation. // pageNum := 0 // err := client.ListOrganizationalUnitsForParentPages(params, -// func(page *ListOrganizationalUnitsForParentOutput, lastPage bool) bool { +// func(page *organizations.ListOrganizationalUnitsForParentOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7996,10 +8176,12 @@ func (c *Organizations) ListOrganizationalUnitsForParentPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListOrganizationalUnitsForParentOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListOrganizationalUnitsForParentOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8060,7 +8242,7 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -8102,7 +8284,7 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -8110,13 +8292,13 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // -// * INVALID_PATTERN: You provided a value that doesn't match the required +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // // * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't @@ -8130,6 +8312,11 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -8153,11 +8340,11 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListParents @@ -8193,7 +8380,7 @@ func (c *Organizations) ListParentsWithContext(ctx aws.Context, input *ListParen // // Example iterating over at most 3 pages of a ListParents operation. // pageNum := 0 // err := client.ListParentsPages(params, -// func(page *ListParentsOutput, lastPage bool) bool { +// func(page *organizations.ListParentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8225,10 +8412,12 @@ func (c *Organizations) ListParentsPagesWithContext(ctx aws.Context, input *List }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListParentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListParentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8287,7 +8476,7 @@ func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *requ // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -8323,7 +8512,7 @@ func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *requ // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -8331,12 +8520,12 @@ func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *requ // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -8351,6 +8540,11 @@ func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *requ // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -8374,11 +8568,11 @@ func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *requ // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPolicies @@ -8414,7 +8608,7 @@ func (c *Organizations) ListPoliciesWithContext(ctx aws.Context, input *ListPoli // // Example iterating over at most 3 pages of a ListPolicies operation. // pageNum := 0 // err := client.ListPoliciesPages(params, -// func(page *ListPoliciesOutput, lastPage bool) bool { +// func(page *organizations.ListPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8446,10 +8640,12 @@ func (c *Organizations) ListPoliciesPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8510,7 +8706,7 @@ func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTarge // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -8546,7 +8742,7 @@ func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTarge // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -8554,12 +8750,12 @@ func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTarge // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -8574,6 +8770,11 @@ func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTarge // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -8600,11 +8801,11 @@ func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTarge // We can't find a root, OU, or account with the TargetId that you specified. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPoliciesForTarget @@ -8640,7 +8841,7 @@ func (c *Organizations) ListPoliciesForTargetWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a ListPoliciesForTarget operation. // pageNum := 0 // err := client.ListPoliciesForTargetPages(params, -// func(page *ListPoliciesForTargetOutput, lastPage bool) bool { +// func(page *organizations.ListPoliciesForTargetOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8672,10 +8873,12 @@ func (c *Organizations) ListPoliciesForTargetPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPoliciesForTargetOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPoliciesForTargetOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8734,12 +8937,12 @@ func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Re // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // // Policy types can be enabled and disabled in roots. This is distinct from -// whether they are available in the organization. When you enable all features, +// whether they're available in the organization. When you enable all features, // you make policy types available for use in that organization. Individual // policy types can then be enabled and disabled in a root. To see the availability // of a policy type in an organization, use DescribeOrganization. @@ -8776,7 +8979,7 @@ func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Re // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -8784,12 +8987,12 @@ func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Re // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -8804,6 +9007,11 @@ func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Re // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -8827,11 +9035,11 @@ func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Re // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListRoots @@ -8867,7 +9075,7 @@ func (c *Organizations) ListRootsWithContext(ctx aws.Context, input *ListRootsIn // // Example iterating over at most 3 pages of a ListRoots operation. // pageNum := 0 // err := client.ListRootsPages(params, -// func(page *ListRootsOutput, lastPage bool) bool { +// func(page *organizations.ListRootsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8899,10 +9107,240 @@ func (c *Organizations) ListRootsPagesWithContext(ctx aws.Context, input *ListRo }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListRootsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListRootsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTagsForResource +func (c *Organizations) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS Organizations. +// +// Lists tags for the specified resource. +// +// Currently, you can list tags on an account in AWS Organizations. +// +// This operation can be called only from the organization's master account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Organizations's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You don't have permissions to perform the requested operation. The user or +// role that is making the request must have at least one IAM permissions policy +// attached that grants the required permissions. For more information, see +// Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// * ErrCodeAWSOrganizationsNotInUseException "AWSOrganizationsNotInUseException" +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * ErrCodeTargetNotFoundException "TargetNotFoundException" +// We can't find a root, OU, or account with the TargetId that you specified. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The requested operation failed because you provided invalid values for one +// or more of the request parameters. This exception includes a reason that +// contains additional information about the violated limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation: +// +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. +// +// * INPUT_REQUIRED: You must include a value for all required parameters. +// +// * INVALID_ENUM: You specified an invalid value. +// +// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid +// characters. +// +// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains +// at least one invalid value. +// +// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter +// from the response to a previous call of the operation. +// +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required +// pattern. +// +// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't +// match the required pattern. +// +// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role +// name can't begin with the reserved prefix AWSServiceRoleFor. +// +// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource +// Name (ARN) for the organization. +// +// * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// +// * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter +// for the operation. +// +// * MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer +// than allowed. +// +// * MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger +// value than allowed. +// +// * MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter +// than allowed. +// +// * MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller +// value than allowed. +// +// * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only +// between entities in the same root. +// +// * ErrCodeServiceException "ServiceException" +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. +// +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// in the AWS Organizations User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTagsForResource +func (c *Organizations) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTagsForResource method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTagsForResource operation. +// pageNum := 0 +// err := client.ListTagsForResourcePages(params, +// func(page *organizations.ListTagsForResourceOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Organizations) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { + return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTagsForResourceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTagsForResourceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8956,13 +9394,13 @@ func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyI // ListTargetsForPolicy API operation for AWS Organizations. // -// Lists all the roots, organizational units (OUs), and accounts to which the -// specified policy is attached. +// Lists all the roots, organizational units (OUs), and accounts that the specified +// policy is attached to. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response -// parameter value is nullonly when there are no more results to display. +// parameter value is null only when there are no more results to display. // // This operation can be called only from the organization's master account. // @@ -8998,7 +9436,7 @@ func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyI // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -9006,12 +9444,12 @@ func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyI // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -9026,6 +9464,11 @@ func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyI // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -9052,11 +9495,11 @@ func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyI // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTargetsForPolicy @@ -9092,7 +9535,7 @@ func (c *Organizations) ListTargetsForPolicyWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a ListTargetsForPolicy operation. // pageNum := 0 // err := client.ListTargetsForPolicyPages(params, -// func(page *ListTargetsForPolicyOutput, lastPage bool) bool { +// func(page *organizations.ListTargetsForPolicyOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -9124,10 +9567,12 @@ func (c *Organizations) ListTargetsForPolicyPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTargetsForPolicyOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTargetsForPolicyOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -9209,7 +9654,7 @@ func (c *Organizations) MoveAccountRequest(input *MoveAccountInput) (req *reques // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -9217,12 +9662,12 @@ func (c *Organizations) MoveAccountRequest(input *MoveAccountInput) (req *reques // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -9237,6 +9682,11 @@ func (c *Organizations) MoveAccountRequest(input *MoveAccountInput) (req *reques // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -9271,124 +9721,677 @@ func (c *Organizations) MoveAccountRequest(input *MoveAccountInput) (req *reques // an organization. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // * ErrCodeConcurrentModificationException "ConcurrentModificationException" // The target of the operation is currently being modified by a different request. // Try again later. // -// * ErrCodeAWSOrganizationsNotInUseException "AWSOrganizationsNotInUseException" -// Your account isn't a member of an organization. To make this request, you -// must use the credentials of an account that belongs to an organization. +// * ErrCodeAWSOrganizationsNotInUseException "AWSOrganizationsNotInUseException" +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * ErrCodeServiceException "ServiceException" +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/MoveAccount +func (c *Organizations) MoveAccount(input *MoveAccountInput) (*MoveAccountOutput, error) { + req, out := c.MoveAccountRequest(input) + return out, req.Send() +} + +// MoveAccountWithContext is the same as MoveAccount with the addition of +// the ability to pass a context and additional request options. +// +// See MoveAccount for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) MoveAccountWithContext(ctx aws.Context, input *MoveAccountInput, opts ...request.Option) (*MoveAccountOutput, error) { + req, out := c.MoveAccountRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveAccountFromOrganization = "RemoveAccountFromOrganization" + +// RemoveAccountFromOrganizationRequest generates a "aws/request.Request" representing the +// client's request for the RemoveAccountFromOrganization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveAccountFromOrganization for more information on using the RemoveAccountFromOrganization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveAccountFromOrganizationRequest method. +// req, resp := client.RemoveAccountFromOrganizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/RemoveAccountFromOrganization +func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccountFromOrganizationInput) (req *request.Request, output *RemoveAccountFromOrganizationOutput) { + op := &request.Operation{ + Name: opRemoveAccountFromOrganization, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveAccountFromOrganizationInput{} + } + + output = &RemoveAccountFromOrganizationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RemoveAccountFromOrganization API operation for AWS Organizations. +// +// Removes the specified account from the organization. +// +// The removed account becomes a standalone account that isn't a member of any +// organization. It's no longer subject to any policies and is responsible for +// its own bill payments. The organization's master account is no longer charged +// for any expenses accrued by the member account after it's removed from the +// organization. +// +// This operation can be called only from the organization's master account. +// Member accounts can remove themselves with LeaveOrganization instead. +// +// You can remove an account from your organization only if the account is configured +// with the information required to operate as a standalone account. When you +// create an account in an organization using the AWS Organizations console, +// API, or CLI commands, the information required of standalone accounts is +// not automatically collected. For an account that you want to make standalone, +// you must accept the end user license agreement (EULA), choose a support plan, +// provide and verify the required contact information, and provide a current +// payment method. AWS uses the payment method to charge for any billable (not +// free tier) AWS activity that occurs while the account isn't attached to an +// organization. To remove an account that doesn't yet have this information, +// you must sign in as the member account and follow the steps at To leave an +// organization when all required account information has not yet been provided +// (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Organizations's +// API operation RemoveAccountFromOrganization for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You don't have permissions to perform the requested operation. The user or +// role that is making the request must have at least one IAM permissions policy +// attached that grants the required permissions. For more information, see +// Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// * ErrCodeAccountNotFoundException "AccountNotFoundException" +// We can't find an AWS account with the AccountId that you specified, or the +// account whose credentials you used to make this request isn't a member of +// an organization. +// +// * ErrCodeAWSOrganizationsNotInUseException "AWSOrganizationsNotInUseException" +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// The target of the operation is currently being modified by a different request. +// Try again later. +// +// * ErrCodeConstraintViolationException "ConstraintViolationException" +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit. +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation: +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at To leave an organization when +// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove +// an account from the organization that doesn't yet have enough information +// to exist as a standalone account. This account requires you to first complete +// phone verification. Follow the steps at To leave an organization when +// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number +// of accounts that you can create in one day. +// +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// +// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account +// in this organization, you first must migrate the organization's master +// account to the marketplace that corresponds to the master account's address. +// For example, accounts with India addresses must be associated with the +// AISPL marketplace. All accounts in an organization must be associated +// with the same marketplace. +// +// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you +// must first provide contact a valid address and phone number for the master +// account. Then try the operation again. +// +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the +// master account must have an associated account in the AWS GovCloud (US-West) +// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. +// +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The requested operation failed because you provided invalid values for one +// or more of the request parameters. This exception includes a reason that +// contains additional information about the violated limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation: +// +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. +// +// * INPUT_REQUIRED: You must include a value for all required parameters. +// +// * INVALID_ENUM: You specified an invalid value. +// +// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid +// characters. +// +// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains +// at least one invalid value. +// +// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter +// from the response to a previous call of the operation. +// +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required +// pattern. +// +// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't +// match the required pattern. +// +// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role +// name can't begin with the reserved prefix AWSServiceRoleFor. +// +// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource +// Name (ARN) for the organization. +// +// * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// +// * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter +// for the operation. +// +// * MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer +// than allowed. +// +// * MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger +// value than allowed. +// +// * MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter +// than allowed. +// +// * MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller +// value than allowed. +// +// * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only +// between entities in the same root. +// +// * ErrCodeMasterCannotLeaveOrganizationException "MasterCannotLeaveOrganizationException" +// You can't remove a master account from an organization. If you want the master +// account to become a member account in another organization, you must first +// delete the current organization of the master account. +// +// * ErrCodeServiceException "ServiceException" +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. +// +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// in the AWS Organizations User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/RemoveAccountFromOrganization +func (c *Organizations) RemoveAccountFromOrganization(input *RemoveAccountFromOrganizationInput) (*RemoveAccountFromOrganizationOutput, error) { + req, out := c.RemoveAccountFromOrganizationRequest(input) + return out, req.Send() +} + +// RemoveAccountFromOrganizationWithContext is the same as RemoveAccountFromOrganization with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveAccountFromOrganization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) RemoveAccountFromOrganizationWithContext(ctx aws.Context, input *RemoveAccountFromOrganizationInput, opts ...request.Option) (*RemoveAccountFromOrganizationOutput, error) { + req, out := c.RemoveAccountFromOrganizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/TagResource +func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Organizations. +// +// Adds one or more tags to the specified resource. +// +// Currently, you can tag and untag accounts in AWS Organizations. +// +// This operation can be called only from the organization's master account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Organizations's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You don't have permissions to perform the requested operation. The user or +// role that is making the request must have at least one IAM permissions policy +// attached that grants the required permissions. For more information, see +// Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// The target of the operation is currently being modified by a different request. +// Try again later. +// +// * ErrCodeAWSOrganizationsNotInUseException "AWSOrganizationsNotInUseException" +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * ErrCodeTargetNotFoundException "TargetNotFoundException" +// We can't find a root, OU, or account with the TargetId that you specified. +// +// * ErrCodeConstraintViolationException "ConstraintViolationException" +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit. +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation: +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at To leave an organization when +// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove +// an account from the organization that doesn't yet have enough information +// to exist as a standalone account. This account requires you to first complete +// phone verification. Follow the steps at To leave an organization when +// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number +// of accounts that you can create in one day. +// +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// +// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account +// in this organization, you first must migrate the organization's master +// account to the marketplace that corresponds to the master account's address. +// For example, accounts with India addresses must be associated with the +// AISPL marketplace. All accounts in an organization must be associated +// with the same marketplace. +// +// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you +// must first provide contact a valid address and phone number for the master +// account. Then try the operation again. +// +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the +// master account must have an associated account in the AWS GovCloud (US-West) +// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. +// +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The requested operation failed because you provided invalid values for one +// or more of the request parameters. This exception includes a reason that +// contains additional information about the violated limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation: +// +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. +// +// * INPUT_REQUIRED: You must include a value for all required parameters. +// +// * INVALID_ENUM: You specified an invalid value. +// +// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid +// characters. +// +// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains +// at least one invalid value. +// +// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter +// from the response to a previous call of the operation. +// +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required +// pattern. +// +// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't +// match the required pattern. +// +// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role +// name can't begin with the reserved prefix AWSServiceRoleFor. +// +// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource +// Name (ARN) for the organization. +// +// * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// +// * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter +// for the operation. +// +// * MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer +// than allowed. +// +// * MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger +// value than allowed. +// +// * MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter +// than allowed. +// +// * MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller +// value than allowed. +// +// * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only +// between entities in the same root. // // * ErrCodeServiceException "ServiceException" // AWS Organizations can't complete your request because of an internal service // error. Try again later. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/MoveAccount -func (c *Organizations) MoveAccount(input *MoveAccountInput) (*MoveAccountOutput, error) { - req, out := c.MoveAccountRequest(input) +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. +// +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// in the AWS Organizations User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/TagResource +func (c *Organizations) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) return out, req.Send() } -// MoveAccountWithContext is the same as MoveAccount with the addition of +// TagResourceWithContext is the same as TagResource with the addition of // the ability to pass a context and additional request options. // -// See MoveAccount for details on how to use this API operation. +// See TagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) MoveAccountWithContext(ctx aws.Context, input *MoveAccountInput, opts ...request.Option) (*MoveAccountOutput, error) { - req, out := c.MoveAccountRequest(input) +func (c *Organizations) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRemoveAccountFromOrganization = "RemoveAccountFromOrganization" +const opUntagResource = "UntagResource" -// RemoveAccountFromOrganizationRequest generates a "aws/request.Request" representing the -// client's request for the RemoveAccountFromOrganization operation. The "output" return +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RemoveAccountFromOrganization for more information on using the RemoveAccountFromOrganization +// See UntagResource for more information on using the UntagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RemoveAccountFromOrganizationRequest method. -// req, resp := client.RemoveAccountFromOrganizationRequest(params) +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/RemoveAccountFromOrganization -func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccountFromOrganizationInput) (req *request.Request, output *RemoveAccountFromOrganizationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/UntagResource +func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { op := &request.Operation{ - Name: opRemoveAccountFromOrganization, + Name: opUntagResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RemoveAccountFromOrganizationInput{} + input = &UntagResourceInput{} } - output = &RemoveAccountFromOrganizationOutput{} + output = &UntagResourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// RemoveAccountFromOrganization API operation for AWS Organizations. +// UntagResource API operation for AWS Organizations. // -// Removes the specified account from the organization. +// Removes a tag from the specified resource. // -// The removed account becomes a stand-alone account that is not a member of -// any organization. It is no longer subject to any policies and is responsible -// for its own bill payments. The organization's master account is no longer -// charged for any expenses accrued by the member account after it is removed -// from the organization. +// Currently, you can tag and untag accounts in AWS Organizations. // // This operation can be called only from the organization's master account. -// Member accounts can remove themselves with LeaveOrganization instead. -// -// You can remove an account from your organization only if the account is configured -// with the information required to operate as a standalone account. When you -// create an account in an organization using the AWS Organizations console, -// API, or CLI commands, the information required of standalone accounts is -// not automatically collected. For an account that you want to make standalone, -// you must accept the End User License Agreement (EULA), choose a support plan, -// provide and verify the required contact information, and provide a current -// payment method. AWS uses the payment method to charge for any billable (not -// free tier) AWS activity that occurs while the account is not attached to -// an organization. To remove an account that does not yet have this information, -// you must sign in as the member account and follow the steps at To leave -// an organization when all required account information has not yet been provided -// (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation RemoveAccountFromOrganization for usage and error information. +// API operation UntagResource for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDeniedException "AccessDeniedException" @@ -9398,18 +10401,16 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) // in the IAM User Guide. // -// * ErrCodeAccountNotFoundException "AccountNotFoundException" -// We can't find an AWS account with the AccountId that you specified, or the -// account whose credentials you used to make this request isn't a member of -// an organization. +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// The target of the operation is currently being modified by a different request. +// Try again later. // // * ErrCodeAWSOrganizationsNotInUseException "AWSOrganizationsNotInUseException" // Your account isn't a member of an organization. To make this request, you // must use the credentials of an account that belongs to an organization. // -// * ErrCodeConcurrentModificationException "ConcurrentModificationException" -// The target of the operation is currently being modified by a different request. -// Try again later. +// * ErrCodeTargetNotFoundException "TargetNotFoundException" +// We can't find a root, OU, or account with the TargetId that you specified. // // * ErrCodeConstraintViolationException "ConstraintViolationException" // Performing this operation violates a minimum or maximum value limit. For @@ -9422,46 +10423,6 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -9476,23 +10437,24 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation -// with this member account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. -// // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -9509,6 +10471,45 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. +// // * ErrCodeInvalidInputException "InvalidInputException" // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that @@ -9522,7 +10523,7 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -9530,12 +10531,12 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -9550,6 +10551,11 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -9568,40 +10574,35 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // -// * ErrCodeMasterCannotLeaveOrganizationException "MasterCannotLeaveOrganizationException" -// You can't remove a master account from an organization. If you want the master -// account to become a member account in another organization, you must first -// delete the current organization of the master account. -// // * ErrCodeServiceException "ServiceException" // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/RemoveAccountFromOrganization -func (c *Organizations) RemoveAccountFromOrganization(input *RemoveAccountFromOrganizationInput) (*RemoveAccountFromOrganizationOutput, error) { - req, out := c.RemoveAccountFromOrganizationRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/UntagResource +func (c *Organizations) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) return out, req.Send() } -// RemoveAccountFromOrganizationWithContext is the same as RemoveAccountFromOrganization with the addition of +// UntagResourceWithContext is the same as UntagResource with the addition of // the ability to pass a context and additional request options. // -// See RemoveAccountFromOrganization for details on how to use this API operation. +// See UntagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) RemoveAccountFromOrganizationWithContext(ctx aws.Context, input *RemoveAccountFromOrganizationInput, opts ...request.Option) (*RemoveAccountFromOrganizationOutput, error) { - req, out := c.RemoveAccountFromOrganizationRequest(input) +func (c *Organizations) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -9651,7 +10652,7 @@ func (c *Organizations) UpdateOrganizationalUnitRequest(input *UpdateOrganizatio // UpdateOrganizationalUnit API operation for AWS Organizations. // -// Renames the specified organizational unit (OU). The ID and ARN do not change. +// Renames the specified organizational unit (OU). The ID and ARN don't change. // The child OUs and accounts remain in place, and any attached policies of // the OU remain attached. // @@ -9696,7 +10697,7 @@ func (c *Organizations) UpdateOrganizationalUnitRequest(input *UpdateOrganizatio // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -9704,12 +10705,12 @@ func (c *Organizations) UpdateOrganizationalUnitRequest(input *UpdateOrganizatio // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -9724,6 +10725,11 @@ func (c *Organizations) UpdateOrganizationalUnitRequest(input *UpdateOrganizatio // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -9750,11 +10756,11 @@ func (c *Organizations) UpdateOrganizationalUnitRequest(input *UpdateOrganizatio // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/UpdateOrganizationalUnit @@ -9823,9 +10829,9 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // UpdatePolicy API operation for AWS Organizations. // -// Updates an existing policy with a new name, description, or content. If any -// parameter is not supplied, that value remains unchanged. Note that you cannot -// change a policy's type. +// Updates an existing policy with a new name, description, or content. If you +// don't supply any parameter, that value remains unchanged. You can't change +// a policy's type. // // This operation can be called only from the organization's master account. // @@ -9863,46 +10869,6 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. -// -// Or the number of invitations that you tried to send would cause you to exceed -// the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. -// -// Deleted and closed accounts still count toward your limit. -// -// If you get receive this exception when running a command immediately after -// creating the organization, wait one hour and try again. If after an hour -// it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. -// -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs -// that you can have in an organization. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. -// -// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. -// An organization that supports only consolidated billing features can't -// perform this operation. -// -// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of -// policies that you can have in an organization. -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the -// number of policies of a certain type that can be attached to an entity -// at one time. -// -// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity that would cause the entity to have fewer than the -// minimum number of policies of a certain type required. -// // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -9917,6 +10883,40 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number +// of accounts that you can create in one day. +// +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get receive this exception when running +// a command immediately after creating the organization, wait one hour and +// try again. If after an hour it continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// +// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account +// in this organization, you first must migrate the organization's master +// account to the marketplace that corresponds to the master account's address. +// For example, accounts with India addresses must be associated with the +// AISPL marketplace. All accounts in an organization must be associated +// with the same marketplace. +// +// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you +// must first provide contact a valid address and phone number for the master +// account. Then try the operation again. +// +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the +// master account must have an associated account in the AWS GovCloud (US-West) +// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. +// // * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization // with this master account, you first must associate a valid payment instrument, // such as a credit card, with the account. Follow the steps at To leave @@ -9924,6 +10924,13 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// // * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation // with this member account, you first must associate a valid payment instrument, // such as a credit card, with the account. Follow the steps at To leave @@ -9931,24 +10938,23 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number -// of accounts that you can create in one day. +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // -// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account -// in this organization, you first must migrate the organization's master -// account to the marketplace that corresponds to the master account's address. -// For example, accounts with India addresses must be associated with the -// AISPL marketplace. All accounts in an organization must be associated -// with the same marketplace. +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. // -// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master -// account. Then try the operation again. +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. // -// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the -// master account must have an associated account in the AWS GovCloud (US-West) -// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) -// in the AWS GovCloud User Guide. +// * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of +// policies that you can have in an organization. // // * ErrCodeDuplicatePolicyException "DuplicatePolicyException" // A policy with the same name already exists. @@ -9966,7 +10972,7 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // // * INPUT_REQUIRED: You must include a value for all required parameters. // -// * INVALID_ENUM: You specified a value that isn't valid for that parameter. +// * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -9974,12 +10980,12 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. -// // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -9994,6 +11000,11 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -10026,11 +11037,11 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // error. Try again later. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// You've sent too many requests in too short a period of time. The limit helps -// protect against denial-of-service attacks. Try again later. +// You have sent too many requests in too short a period of time. The limit +// helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect Organizations, see Limits of AWS Organizations -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) +// For information on limits that affect AWS Organizations, see Limits of AWS +// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/UpdatePolicy @@ -10230,15 +11241,15 @@ type AttachPolicyInput struct { // The regex pattern (http://wikipedia.org/wiki/regex) for a target ID string // requires one of the following: // - // * Root: a string that begins with "r-" followed by from 4 to 32 lower-case + // * Root - A string that begins with "r-" followed by from 4 to 32 lower-case // letters or digits. // - // * Account: a string that consists of exactly 12 digits. + // * Account - A string that consists of exactly 12 digits. // - // * Organizational unit (OU): a string that begins with "ou-" followed by - // from 4 to 32 lower-case letters or digits (the ID of the root that the - // OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case - // letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lower-case letters or digits (the ID of the root that + // the OU is in) followed by a second "-" dash and from 8 to 32 additional + // lower-case letters or digits. // // TargetId is a required field TargetId *string `type:"string" required:"true"` @@ -10569,6 +11580,10 @@ type CreateAccountStatus struct { // * EMAIL_ALREADY_EXISTS: The account could not be created because another // AWS account with that email address already exists. // + // * GOVCLOUD_ACCOUNT_ALREADY_EXISTS: The account in the AWS GovCloud (US) + // Region could not be created because this Region already includes an account + // with that email address. + // // * INVALID_ADDRESS: The account could not be created because the address // you provided is not valid. // @@ -10579,6 +11594,8 @@ type CreateAccountStatus struct { // failure. Try again later. If the problem persists, contact Customer Support. FailureReason *string `type:"string" enum:"CreateAccountFailureReason"` + // If the account was created successfully, the unique identifier (ID) of the + // new account in the AWS GovCloud (US) Region. GovCloudAccountId *string `type:"string"` // The unique identifier (ID) that references this request. You get this value @@ -10799,13 +11816,11 @@ type CreateOrganizationInput struct { // * CONSOLIDATED_BILLING: All member accounts have their bills consolidated // to and paid by the master account. For more information, see Consolidated // billing (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#feature-set-cb-only) - // in the AWS Organizations User Guide. - // - // The consolidated billing feature subset isn't available for organizations - // in the AWS GovCloud (US) Region. + // in the AWS Organizations User Guide. The consolidated billing feature + // subset isn't available for organizations in the AWS GovCloud (US) Region. // // * ALL: In addition to all the features supported by the consolidated billing - // feature set, the master account can also apply any type of policy to any + // feature set, the master account can also apply any policy type to any // member account in the organization. For more information, see All features // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#feature-set-all) // in the AWS Organizations User Guide. @@ -10859,19 +11874,19 @@ type CreateOrganizationalUnitInput struct { // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // The unique identifier (ID) of the parent root or OU in which you want to - // create the new OU. + // The unique identifier (ID) of the parent root or OU that you want to create + // the new OU in. // // The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID string // requires one of the following: // - // * Root: a string that begins with "r-" followed by from 4 to 32 lower-case + // * Root - A string that begins with "r-" followed by from 4 to 32 lower-case // letters or digits. // - // * Organizational unit (OU): a string that begins with "ou-" followed by - // from 4 to 32 lower-case letters or digits (the ID of the root that the - // OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case - // letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lower-case letters or digits (the ID of the root that + // the OU is in) followed by a second "-" dash and from 8 to 32 additional + // lower-case letters or digits. // // ParentId is a required field ParentId *string `type:"string" required:"true"` @@ -11343,7 +12358,7 @@ type DescribeCreateAccountStatusInput struct { // the ID from the response to an earlier CreateAccount request, or from the // ListCreateAccountStatus operation. // - // The regex pattern (http://wikipedia.org/wiki/regex) for an create account + // The regex pattern (http://wikipedia.org/wiki/regex) for a create account // request ID string requires "car-" followed by from 8 to 32 lower-case letters // or digits. // @@ -11650,22 +12665,22 @@ type DetachPolicyInput struct { // PolicyId is a required field PolicyId *string `type:"string" required:"true"` - // The unique identifier (ID) of the root, OU, or account from which you want - // to detach the policy. You can get the ID from the ListRoots, ListOrganizationalUnitsForParent, + // The unique identifier (ID) of the root, OU, or account that you want to detach + // the policy from. You can get the ID from the ListRoots, ListOrganizationalUnitsForParent, // or ListAccounts operations. // // The regex pattern (http://wikipedia.org/wiki/regex) for a target ID string // requires one of the following: // - // * Root: a string that begins with "r-" followed by from 4 to 32 lower-case + // * Root - A string that begins with "r-" followed by from 4 to 32 lower-case // letters or digits. // - // * Account: a string that consists of exactly 12 digits. + // * Account - A string that consists of exactly 12 digits. // - // * Organizational unit (OU): a string that begins with "ou-" followed by - // from 4 to 32 lower-case letters or digits (the ID of the root that the - // OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case - // letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lower-case letters or digits (the ID of the root that + // the OU is in) followed by a second "-" dash and from 8 to 32 additional + // lower-case letters or digits. // // TargetId is a required field TargetId *string `type:"string" required:"true"` @@ -12381,16 +13396,16 @@ type InviteAccountToOrganizationInput struct { // The identifier (ID) of the AWS account that you want to invite to join your // organization. This is a JSON object that contains the following elements: // - // { "Type": "ACCOUNT", "Id": "" } + // { "Type": "ACCOUNT", "Id": "< account id number >" } // // If you use the AWS CLI, you can submit this as a single string, similar to // the following example: // // --target Id=123456789012,Type=ACCOUNT // - // If you specify "Type": "ACCOUNT", then you must provide the AWS account ID - // number as the Id. If you specify "Type": "EMAIL", then you must specify the - // email address that is associated with the account. + // If you specify "Type": "ACCOUNT", you must provide the AWS account ID number + // as the Id. If you specify "Type": "EMAIL", you must specify the email address + // that is associated with the account. // // --target Id=diego@example.com,Type=EMAIL // @@ -12814,13 +13829,13 @@ type ListChildrenInput struct { // The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID string // requires one of the following: // - // * Root: a string that begins with "r-" followed by from 4 to 32 lower-case + // * Root - A string that begins with "r-" followed by from 4 to 32 lower-case // letters or digits. // - // * Organizational unit (OU): a string that begins with "ou-" followed by - // from 4 to 32 lower-case letters or digits (the ID of the root that the - // OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case - // letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lower-case letters or digits (the ID of the root that + // the OU is in) followed by a second "-" dash and from 8 to 32 additional + // lower-case letters or digits. // // ParentId is a required field ParentId *string `type:"string" required:"true"` @@ -12936,7 +13951,7 @@ type ListCreateAccountStatusInput struct { NextToken *string `type:"string"` // A list of one or more states that you want included in the response. If this - // parameter is not present, then all requests are included in the response. + // parameter isn't present, all requests are included in the response. States []*string `type:"list"` } @@ -13259,13 +14274,13 @@ type ListOrganizationalUnitsForParentInput struct { // The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID string // requires one of the following: // - // * Root: a string that begins with "r-" followed by from 4 to 32 lower-case + // * Root - A string that begins with "r-" followed by from 4 to 32 lower-case // letters or digits. // - // * Organizational unit (OU): a string that begins with "ou-" followed by - // from 4 to 32 lower-case letters or digits (the ID of the root that the - // OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case - // letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lower-case letters or digits (the ID of the root that + // the OU is in) followed by a second "-" dash and from 8 to 32 additional + // lower-case letters or digits. // // ParentId is a required field ParentId *string `type:"string" required:"true"` @@ -13355,17 +14370,17 @@ type ListParentsInput struct { _ struct{} `type:"structure"` // The unique identifier (ID) of the OU or account whose parent containers you - // want to list. Do not specify a root. + // want to list. Don't specify a root. // // The regex pattern (http://wikipedia.org/wiki/regex) for a child ID string // requires one of the following: // - // * Account: a string that consists of exactly 12 digits. + // * Account - A string that consists of exactly 12 digits. // - // * Organizational unit (OU): a string that begins with "ou-" followed by - // from 4 to 32 lower-case letters or digits (the ID of the root that contains - // the OU) followed by a second "-" dash and from 8 to 32 additional lower-case - // letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lower-case letters or digits (the ID of the root that + // contains the OU) followed by a second "-" dash and from 8 to 32 additional + // lower-case letters or digits. // // ChildId is a required field ChildId *string `type:"string" required:"true"` @@ -13499,15 +14514,15 @@ type ListPoliciesForTargetInput struct { // The regex pattern (http://wikipedia.org/wiki/regex) for a target ID string // requires one of the following: // - // * Root: a string that begins with "r-" followed by from 4 to 32 lower-case + // * Root - A string that begins with "r-" followed by from 4 to 32 lower-case // letters or digits. // - // * Account: a string that consists of exactly 12 digits. + // * Account - A string that consists of exactly 12 digits. // - // * Organizational unit (OU): a string that begins with "ou-" followed by - // from 4 to 32 lower-case letters or digits (the ID of the root that the - // OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case - // letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lower-case letters or digits (the ID of the root that + // the OU is in) followed by a second "-" dash and from 8 to 32 additional + // lower-case letters or digits. // // TargetId is a required field TargetId *string `type:"string" required:"true"` @@ -13683,7 +14698,7 @@ type ListPoliciesOutput struct { NextToken *string `type:"string"` // A list of policies that match the filter criteria in the request. The output - // list does not include the policy contents. To see the content for a policy, + // list doesn't include the policy contents. To see the content for a policy, // see DescribePolicy. Policies []*PolicySummary `type:"list"` } @@ -13802,6 +14817,92 @@ func (s *ListRootsOutput) SetRoots(v []*Root) *ListRootsOutput { return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // Use this parameter if you receive a NextToken response in a previous request + // that indicates that there is more output available. Set it to the value of + // the previous call's NextToken response to indicate where the output should + // continue from. + NextToken *string `type:"string"` + + // The ID of the resource that you want to retrieve tags for. + // + // ResourceId is a required field + ResourceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *ListTagsForResourceInput) SetResourceId(v string) *ListTagsForResourceInput { + s.ResourceId = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // If present, this value indicates that there is more output available than + // is included in the current response. Use this value in the NextToken request + // parameter in a subsequent call to the operation to get the next part of the + // output. You should repeat this until the NextToken response element comes + // back as null. + NextToken *string `type:"string"` + + // The tags that are assigned to the resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + type ListTargetsForPolicyInput struct { _ struct{} `type:"structure"` @@ -13822,7 +14923,7 @@ type ListTargetsForPolicyInput struct { // continue from. NextToken *string `type:"string"` - // The unique identifier (ID) of the policy for which you want to know its attachments. + // The unique identifier (ID) of the policy whose attachments you want to know. // // The regex pattern (http://wikipedia.org/wiki/regex) for a policy ID string // requires "p-" followed by from 8 to 128 lower-case letters or digits. @@ -13929,13 +15030,13 @@ type MoveAccountInput struct { // The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID string // requires one of the following: // - // * Root: a string that begins with "r-" followed by from 4 to 32 lower-case + // * Root - A string that begins with "r-" followed by from 4 to 32 lower-case // letters or digits. // - // * Organizational unit (OU): a string that begins with "ou-" followed by - // from 4 to 32 lower-case letters or digits (the ID of the root that the - // OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case - // letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lower-case letters or digits (the ID of the root that + // the OU is in) followed by a second "-" dash and from 8 to 32 additional + // lower-case letters or digits. // // DestinationParentId is a required field DestinationParentId *string `type:"string" required:"true"` @@ -13946,13 +15047,13 @@ type MoveAccountInput struct { // The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID string // requires one of the following: // - // * Root: a string that begins with "r-" followed by from 4 to 32 lower-case + // * Root - A string that begins with "r-" followed by from 4 to 32 lower-case // letters or digits. // - // * Organizational unit (OU): a string that begins with "ou-" followed by - // from 4 to 32 lower-case letters or digits (the ID of the root that the - // OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case - // letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lower-case letters or digits (the ID of the root that + // the OU is in) followed by a second "-" dash and from 8 to 32 additional + // lower-case letters or digits. // // SourceParentId is a required field SourceParentId *string `type:"string" required:"true"` @@ -14585,6 +15686,209 @@ func (s *Root) SetPolicyTypes(v []*PolicyTypeSummary) *Root { return s } +// A custom key-value pair associated with a resource such as an account within +// your organization. +type Tag struct { + _ struct{} `type:"structure"` + + // The key identifier, or name, of the tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The string value that's associated with the key of the tag. You can set the + // value of a tag to an empty string, but you can't set the value of a tag to + // null. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the resource to add a tag to. + // + // ResourceId is a required field + ResourceId *string `type:"string" required:"true"` + + // The tag to add to the specified resource. Specifying the tag key is required. + // You can set the value of a tag to an empty string, but you can't set the + // value of a tag to null. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceId sets the ResourceId field's value. +func (s *TagResourceInput) SetResourceId(v string) *TagResourceInput { + s.ResourceId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the resource to remove the tag from. + // + // ResourceId is a required field + ResourceId *string `type:"string" required:"true"` + + // The tag to remove from the specified resource. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceId sets the ResourceId field's value. +func (s *UntagResourceInput) SetResourceId(v string) *UntagResourceInput { + s.ResourceId = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateOrganizationalUnitInput struct { _ struct{} `type:"structure"` @@ -14877,6 +16181,12 @@ const ( // ConstraintViolationExceptionReasonWaitPeriodActive is a ConstraintViolationExceptionReason enum value ConstraintViolationExceptionReasonWaitPeriodActive = "WAIT_PERIOD_ACTIVE" + + // ConstraintViolationExceptionReasonMaxTagLimitExceeded is a ConstraintViolationExceptionReason enum value + ConstraintViolationExceptionReasonMaxTagLimitExceeded = "MAX_TAG_LIMIT_EXCEEDED" + + // ConstraintViolationExceptionReasonTagPolicyViolation is a ConstraintViolationExceptionReason enum value + ConstraintViolationExceptionReasonTagPolicyViolation = "TAG_POLICY_VIOLATION" ) const ( @@ -15058,6 +16368,9 @@ const ( // InvalidInputExceptionReasonInvalidRoleName is a InvalidInputExceptionReason enum value InvalidInputExceptionReasonInvalidRoleName = "INVALID_ROLE_NAME" + + // InvalidInputExceptionReasonInvalidSystemTagsParameter is a InvalidInputExceptionReason enum value + InvalidInputExceptionReasonInvalidSystemTagsParameter = "INVALID_SYSTEM_TAGS_PARAMETER" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/doc.go index d23748a00db..0cd7cc997ce 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/doc.go @@ -3,116 +3,7 @@ // Package organizations provides the client and types for making API // requests to AWS Organizations. // -// AWS Organizations is a web service that enables you to consolidate your multiple -// AWS accounts into an organization and centrally manage your accounts and -// their resources. -// -// This guide provides descriptions of the Organizations API. For more information -// about using this service, see the AWS Organizations User Guide (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_introduction.html). -// -// API Version -// -// This version of the Organizations API Reference documents the Organizations -// API version 2016-11-28. -// -// As an alternative to using the API directly, you can use one of the AWS SDKs, -// which consist of libraries and sample code for various programming languages -// and platforms (Java, Ruby, .NET, iOS, Android, and more). The SDKs provide -// a convenient way to create programmatic access to AWS Organizations. For -// example, the SDKs take care of cryptographically signing requests, managing -// errors, and retrying requests automatically. For more information about the -// AWS SDKs, including how to download and install them, see Tools for Amazon -// Web Services (http://aws.amazon.com/tools/). -// -// We recommend that you use the AWS SDKs to make programmatic API calls to -// Organizations. However, you also can use the Organizations Query API to make -// direct calls to the Organizations web service. To learn more about the Organizations -// Query API, see Making Query Requests (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_query-requests.html) -// in the AWS Organizations User Guide. Organizations supports GET and POST -// requests for all actions. That is, the API does not require you to use GET -// for some actions and POST for others. However, GET requests are subject to -// the limitation size of a URL. Therefore, for operations that require larger -// sizes, use a POST request. -// -// Signing Requests -// -// When you send HTTP requests to AWS, you must sign the requests so that AWS -// can identify who sent them. You sign requests with your AWS access key, which -// consists of an access key ID and a secret access key. We strongly recommend -// that you do not create an access key for your root account. Anyone who has -// the access key for your root account has unrestricted access to all the resources -// in your account. Instead, create an access key for an IAM user account that -// has administrative privileges. As another option, use AWS Security Token -// Service to generate temporary security credentials, and use those credentials -// to sign requests. -// -// To sign requests, we recommend that you use Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). -// If you have an existing application that uses Signature Version 2, you do -// not have to update it to use Signature Version 4. However, some operations -// now require Signature Version 4. The documentation for operations that require -// version 4 indicate this requirement. -// -// When you use the AWS Command Line Interface (AWS CLI) or one of the AWS SDKs -// to make requests to AWS, these tools automatically sign the requests for -// you with the access key that you specify when you configure the tools. -// -// In this release, each organization can have only one root. In a future release, -// a single organization will support multiple roots. -// -// Support and Feedback for AWS Organizations -// -// We welcome your feedback. Send your comments to feedback-awsorganizations@amazon.com -// (mailto:feedback-awsorganizations@amazon.com) or post your feedback and questions -// in the AWS Organizations support forum (http://forums.aws.amazon.com/forum.jspa?forumID=219). -// For more information about the AWS support forums, see Forums Help (http://forums.aws.amazon.com/help.jspa). -// -// Endpoint to Call When Using the CLI or the AWS API -// -// For the current release of Organizations, you must specify the us-east-1 -// region for all AWS API and CLI calls. You can do this in the CLI by using -// these parameters and commands: -// -// * Use the following parameter with each command to specify both the endpoint -// and its region: -// -// --endpoint-url https://organizations.us-east-1.amazonaws.com -// -// * Use the default endpoint, but configure your default region with this -// command: -// -// aws configure set default.region us-east-1 -// -// * Use the following parameter with each command to specify the endpoint: -// -// --region us-east-1 -// -// For the various SDKs used to call the APIs, see the documentation for the -// SDK of interest to learn how to direct the requests to a specific endpoint. -// For more information, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) -// in the AWS General Reference. -// -// How examples are presented -// -// The JSON returned by the AWS Organizations service as response to your requests -// is returned as a single long string without line breaks or formatting whitespace. -// Both line breaks and whitespace are included in the examples in this guide -// to improve readability. When example input parameters also would result in -// long strings that would extend beyond the screen, we insert line breaks to -// enhance readability. You should always submit the input as a single JSON -// text string. -// -// Recording API Requests -// -// AWS Organizations supports AWS CloudTrail, a service that records AWS API -// calls for your AWS account and delivers log files to an Amazon S3 bucket. -// By using information collected by AWS CloudTrail, you can determine which -// requests were successfully made to Organizations, who made the request, when -// it was made, and so on. For more about AWS Organizations and its support -// for AWS CloudTrail, see Logging AWS Organizations Events with AWS CloudTrail -// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_monitoring.html#orgs_cloudtrail-integration) -// in the AWS Organizations User Guide. To learn more about CloudTrail, including -// how to turn it on and find your log files, see the AWS CloudTrail User Guide -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// AWS Organizations // // See https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/errors.go index 5a0631d0f8c..8c3d7b05803 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/errors.go @@ -80,46 +80,6 @@ const ( // Some of the reasons in the following list might not be applicable to this // specific API or operation: // - // * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on - // the number of accounts in an organization. If you need more accounts, - // contact AWS Support (https://console.aws.amazon.com/support/home#/) to - // request an increase in your limit. - // - // Or the number of invitations that you tried to send would cause you to exceed - // the limit of accounts in your organization. Send fewer invitations or - // contact AWS Support to request an increase in the number of accounts. - // - // Deleted and closed accounts still count toward your limit. - // - // If you get receive this exception when running a command immediately after - // creating the organization, wait one hour and try again. If after an hour - // it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). - // - // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of - // handshakes that you can send in one day. - // - // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs - // that you can have in an organization. - // - // * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is - // too many levels deep. - // - // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation - // that requires the organization to be configured to support all features. - // An organization that supports only consolidated billing features can't - // perform this operation. - // - // * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of - // policies that you can have in an organization. - // - // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the - // number of policies of a certain type that can be attached to an entity - // at one time. - // - // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a - // policy from an entity that would cause the entity to have fewer than the - // minimum number of policies of a certain type required. - // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the @@ -134,23 +94,24 @@ const ( // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // - // * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization - // with this master account, you first must associate a valid payment instrument, - // such as a credit card, with the account. Follow the steps at To leave - // an organization when all required account information has not yet been - // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) - // in the AWS Organizations User Guide. - // - // * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation - // with this member account, you first must associate a valid payment instrument, - // such as a credit card, with the account. Follow the steps at To leave - // an organization when all required account information has not yet been - // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) - // in the AWS Organizations User Guide. - // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number // of accounts that you can create in one day. // + // * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on + // the number of accounts in an organization. If you need more accounts, + // contact AWS Support (https://console.aws.amazon.com/support/home#/) to + // request an increase in your limit. Or the number of invitations that you + // tried to send would cause you to exceed the limit of accounts in your + // organization. Send fewer invitations or contact AWS Support to request + // an increase in the number of accounts. Deleted and closed accounts still + // count toward your limit. If you get receive this exception when running + // a command immediately after creating the organization, wait one hour and + // try again. If after an hour it continues to fail with this error, contact + // AWS Support (https://console.aws.amazon.com/support/home#/). + // + // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of + // handshakes that you can send in one day. + // // * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account // in this organization, you first must migrate the organization's master // account to the marketplace that corresponds to the master account's address. @@ -166,6 +127,45 @@ const ( // master account must have an associated account in the AWS GovCloud (US-West) // Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. + // + // * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization + // with this master account, you first must associate a valid payment instrument, + // such as a credit card, with the account. Follow the steps at To leave + // an organization when all required account information has not yet been + // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) + // in the AWS Organizations User Guide. + // + // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the + // number of policies of a certain type that can be attached to an entity + // at one time. + // + // * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed + // on this resource. + // + // * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation + // with this member account, you first must associate a valid payment instrument, + // such as a credit card, with the account. Follow the steps at To leave + // an organization when all required account information has not yet been + // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) + // in the AWS Organizations User Guide. + // + // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a + // policy from an entity that would cause the entity to have fewer than the + // minimum number of policies of a certain type required. + // + // * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is + // too many levels deep. + // + // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation + // that requires the organization to be configured to support all features. + // An organization that supports only consolidated billing features can't + // perform this operation. + // + // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs + // that you can have in an organization. + // + // * POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of + // policies that you can have in an organization. ErrCodeConstraintViolationException = "ConstraintViolationException" // ErrCodeCreateAccountStatusNotFoundException for service response error code @@ -243,29 +243,23 @@ const ( // // * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on // the number of accounts in an organization. Note that deleted and closed - // accounts still count toward your limit. - // - // If you get this exception immediately after creating the organization, wait - // one hour and try again. If after an hour it continues to fail with this - // error, contact AWS Support (https://console.aws.amazon.com/support/home#/). - // - // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of - // handshakes that you can send in one day. + // accounts still count toward your limit. If you get this exception immediately + // after creating the organization, wait one hour and try again. If after + // an hour it continues to fail with this error, contact AWS Support (https://console.aws.amazon.com/support/home#/). // // * ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because // the invited account is already a member of an organization. // - // * ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid - // because the organization has already enabled all features. + // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of + // handshakes that you can send in one day. // // * INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations // to join an organization while it's in the process of enabling all features. // You can resume inviting accounts after you finalize the process when all // accounts have agreed to the change. // - // * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an - // account that doesn't have a payment instrument, such as a credit card, - // associated with it. + // * ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid + // because the organization has already enabled all features. // // * ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because // the account is from a different marketplace than the accounts in the organization. @@ -275,6 +269,10 @@ const ( // // * ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to // change the membership of an account too quickly after its previous change. + // + // * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an + // account that doesn't have a payment instrument, such as a credit card, + // associated with it. ErrCodeHandshakeConstraintViolationException = "HandshakeConstraintViolationException" // ErrCodeHandshakeNotFoundException for service response error code @@ -306,7 +304,7 @@ const ( // // * INPUT_REQUIRED: You must include a value for all required parameters. // - // * INVALID_ENUM: You specified a value that isn't valid for that parameter. + // * INVALID_ENUM: You specified an invalid value. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -314,12 +312,12 @@ const ( // * INVALID_LIST_MEMBER: You provided a list to a parameter that contains // at least one invalid value. // - // * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, - // organization, or email) as a party. - // // * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter // from the response to a previous call of the operation. // + // * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, + // organization, or email) as a party. + // // * INVALID_PATTERN: You provided a value that doesn't match the required // pattern. // @@ -334,6 +332,11 @@ const ( // // * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. // + // * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system + // tag. You can’t add, edit, or delete system tag keys because they're + // reserved for AWS use. System tags don’t count against your tags per + // resource limit. + // // * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter // for the operation. // @@ -469,11 +472,11 @@ const ( // ErrCodeTooManyRequestsException for service response error code // "TooManyRequestsException". // - // You've sent too many requests in too short a period of time. The limit helps - // protect against denial-of-service attacks. Try again later. + // You have sent too many requests in too short a period of time. The limit + // helps protect against denial-of-service attacks. Try again later. // - // For information on limits that affect Organizations, see Limits of AWS Organizations - // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) + // For information on limits that affect AWS Organizations, see Limits of AWS + // Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) // in the AWS Organizations User Guide. ErrCodeTooManyRequestsException = "TooManyRequestsException" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go index 565c1715f3c..37d5a2dcd4a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go @@ -46,11 +46,11 @@ const ( // svc := organizations.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Organizations { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Organizations { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Organizations { svc := &Organizations{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-28", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go new file mode 100644 index 00000000000..06ea030a9df --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go @@ -0,0 +1,10197 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package personalize + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCreateBatchInferenceJob = "CreateBatchInferenceJob" + +// CreateBatchInferenceJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateBatchInferenceJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBatchInferenceJob for more information on using the CreateBatchInferenceJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBatchInferenceJobRequest method. +// req, resp := client.CreateBatchInferenceJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateBatchInferenceJob +func (c *Personalize) CreateBatchInferenceJobRequest(input *CreateBatchInferenceJobInput) (req *request.Request, output *CreateBatchInferenceJobOutput) { + op := &request.Operation{ + Name: opCreateBatchInferenceJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBatchInferenceJobInput{} + } + + output = &CreateBatchInferenceJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBatchInferenceJob API operation for Amazon Personalize. +// +// Creates a batch inference job. The operation can handle up to 50 million +// records and the input file must be in JSON format. For more information, +// see recommendations-batch. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateBatchInferenceJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateBatchInferenceJob +func (c *Personalize) CreateBatchInferenceJob(input *CreateBatchInferenceJobInput) (*CreateBatchInferenceJobOutput, error) { + req, out := c.CreateBatchInferenceJobRequest(input) + return out, req.Send() +} + +// CreateBatchInferenceJobWithContext is the same as CreateBatchInferenceJob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBatchInferenceJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateBatchInferenceJobWithContext(ctx aws.Context, input *CreateBatchInferenceJobInput, opts ...request.Option) (*CreateBatchInferenceJobOutput, error) { + req, out := c.CreateBatchInferenceJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateCampaign = "CreateCampaign" + +// CreateCampaignRequest generates a "aws/request.Request" representing the +// client's request for the CreateCampaign operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCampaign for more information on using the CreateCampaign +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCampaignRequest method. +// req, resp := client.CreateCampaignRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateCampaign +func (c *Personalize) CreateCampaignRequest(input *CreateCampaignInput) (req *request.Request, output *CreateCampaignOutput) { + op := &request.Operation{ + Name: opCreateCampaign, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCampaignInput{} + } + + output = &CreateCampaignOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCampaign API operation for Amazon Personalize. +// +// Creates a campaign by deploying a solution version. When a client calls the +// GetRecommendations (https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html) +// and GetPersonalizedRanking (https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetPersonalizedRanking.html) +// APIs, a campaign is specified in the request. +// +// Minimum Provisioned TPS and Auto-Scaling +// +// A transaction is a single GetRecommendations or GetPersonalizedRanking call. +// Transactions per second (TPS) is the throughput and unit of billing for Amazon +// Personalize. The minimum provisioned TPS (minProvisionedTPS) specifies the +// baseline throughput provisioned by Amazon Personalize, and thus, the minimum +// billing charge. If your TPS increases beyond minProvisionedTPS, Amazon Personalize +// auto-scales the provisioned capacity up and down, but never below minProvisionedTPS, +// to maintain a 70% utilization. There's a short time delay while the capacity +// is increased that might cause loss of transactions. It's recommended to start +// with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, +// and then increase the minProvisionedTPS as necessary. +// +// Status +// +// A campaign can be in one of the following states: +// +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// +// * DELETE PENDING > DELETE IN_PROGRESS +// +// To get the campaign status, call DescribeCampaign. +// +// Wait until the status of the campaign is ACTIVE before asking the campaign +// for recommendations. +// +// Related APIs +// +// * ListCampaigns +// +// * DescribeCampaign +// +// * UpdateCampaign +// +// * DeleteCampaign +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateCampaign for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateCampaign +func (c *Personalize) CreateCampaign(input *CreateCampaignInput) (*CreateCampaignOutput, error) { + req, out := c.CreateCampaignRequest(input) + return out, req.Send() +} + +// CreateCampaignWithContext is the same as CreateCampaign with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCampaign for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateCampaignWithContext(ctx aws.Context, input *CreateCampaignInput, opts ...request.Option) (*CreateCampaignOutput, error) { + req, out := c.CreateCampaignRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDataset = "CreateDataset" + +// CreateDatasetRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDataset for more information on using the CreateDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatasetRequest method. +// req, resp := client.CreateDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateDataset +func (c *Personalize) CreateDatasetRequest(input *CreateDatasetInput) (req *request.Request, output *CreateDatasetOutput) { + op := &request.Operation{ + Name: opCreateDataset, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDatasetInput{} + } + + output = &CreateDatasetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDataset API operation for Amazon Personalize. +// +// Creates an empty dataset and adds it to the specified dataset group. Use +// CreateDatasetImportJob to import your training data to a dataset. +// +// There are three types of datasets: +// +// * Interactions +// +// * Items +// +// * Users +// +// Each dataset type has an associated schema with required field types. Only +// the Interactions dataset is required in order to train a model (also referred +// to as creating a solution). +// +// A dataset can be in one of the following states: +// +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// +// * DELETE PENDING > DELETE IN_PROGRESS +// +// To get the status of the dataset, call DescribeDataset. +// +// Related APIs +// +// * CreateDatasetGroup +// +// * ListDatasets +// +// * DescribeDataset +// +// * DeleteDataset +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateDataset +func (c *Personalize) CreateDataset(input *CreateDatasetInput) (*CreateDatasetOutput, error) { + req, out := c.CreateDatasetRequest(input) + return out, req.Send() +} + +// CreateDatasetWithContext is the same as CreateDataset with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateDatasetWithContext(ctx aws.Context, input *CreateDatasetInput, opts ...request.Option) (*CreateDatasetOutput, error) { + req, out := c.CreateDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDatasetGroup = "CreateDatasetGroup" + +// CreateDatasetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDatasetGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDatasetGroup for more information on using the CreateDatasetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatasetGroupRequest method. +// req, resp := client.CreateDatasetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateDatasetGroup +func (c *Personalize) CreateDatasetGroupRequest(input *CreateDatasetGroupInput) (req *request.Request, output *CreateDatasetGroupOutput) { + op := &request.Operation{ + Name: opCreateDatasetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDatasetGroupInput{} + } + + output = &CreateDatasetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDatasetGroup API operation for Amazon Personalize. +// +// Creates an empty dataset group. A dataset group contains related datasets +// that supply data for training a model. A dataset group can contain at most +// three datasets, one for each type of dataset: +// +// * Interactions +// +// * Items +// +// * Users +// +// To train a model (create a solution), a dataset group that contains an Interactions +// dataset is required. Call CreateDataset to add a dataset to the group. +// +// A dataset group can be in one of the following states: +// +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// +// * DELETE PENDING +// +// To get the status of the dataset group, call DescribeDatasetGroup. If the +// status shows as CREATE FAILED, the response includes a failureReason key, +// which describes why the creation failed. +// +// You must wait until the status of the dataset group is ACTIVE before adding +// a dataset to the group. +// +// You can specify an AWS Key Management Service (KMS) key to encrypt the datasets +// in the group. If you specify a KMS key, you must also include an AWS Identity +// and Access Management (IAM) role that has permission to access the key. +// +// APIs that require a dataset group ARN in the request +// +// * CreateDataset +// +// * CreateEventTracker +// +// * CreateSolution +// +// Related APIs +// +// * ListDatasetGroups +// +// * DescribeDatasetGroup +// +// * DeleteDatasetGroup +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateDatasetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateDatasetGroup +func (c *Personalize) CreateDatasetGroup(input *CreateDatasetGroupInput) (*CreateDatasetGroupOutput, error) { + req, out := c.CreateDatasetGroupRequest(input) + return out, req.Send() +} + +// CreateDatasetGroupWithContext is the same as CreateDatasetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDatasetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateDatasetGroupWithContext(ctx aws.Context, input *CreateDatasetGroupInput, opts ...request.Option) (*CreateDatasetGroupOutput, error) { + req, out := c.CreateDatasetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDatasetImportJob = "CreateDatasetImportJob" + +// CreateDatasetImportJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateDatasetImportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDatasetImportJob for more information on using the CreateDatasetImportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatasetImportJobRequest method. +// req, resp := client.CreateDatasetImportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateDatasetImportJob +func (c *Personalize) CreateDatasetImportJobRequest(input *CreateDatasetImportJobInput) (req *request.Request, output *CreateDatasetImportJobOutput) { + op := &request.Operation{ + Name: opCreateDatasetImportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDatasetImportJobInput{} + } + + output = &CreateDatasetImportJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDatasetImportJob API operation for Amazon Personalize. +// +// Creates a job that imports training data from your data source (an Amazon +// S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize +// to import the training data, you must specify an AWS Identity and Access +// Management (IAM) role that has permission to read from the data source. +// +// The dataset import job replaces any previous data in the dataset. +// +// Status +// +// A dataset import job can be in one of the following states: +// +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// +// To get the status of the import job, call DescribeDatasetImportJob, providing +// the Amazon Resource Name (ARN) of the dataset import job. The dataset import +// is complete when the status shows as ACTIVE. If the status shows as CREATE +// FAILED, the response includes a failureReason key, which describes why the +// job failed. +// +// Importing takes time. You must wait until the status shows as ACTIVE before +// training a model using the dataset. +// +// Related APIs +// +// * ListDatasetImportJobs +// +// * DescribeDatasetImportJob +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateDatasetImportJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateDatasetImportJob +func (c *Personalize) CreateDatasetImportJob(input *CreateDatasetImportJobInput) (*CreateDatasetImportJobOutput, error) { + req, out := c.CreateDatasetImportJobRequest(input) + return out, req.Send() +} + +// CreateDatasetImportJobWithContext is the same as CreateDatasetImportJob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDatasetImportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateDatasetImportJobWithContext(ctx aws.Context, input *CreateDatasetImportJobInput, opts ...request.Option) (*CreateDatasetImportJobOutput, error) { + req, out := c.CreateDatasetImportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateEventTracker = "CreateEventTracker" + +// CreateEventTrackerRequest generates a "aws/request.Request" representing the +// client's request for the CreateEventTracker operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateEventTracker for more information on using the CreateEventTracker +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateEventTrackerRequest method. +// req, resp := client.CreateEventTrackerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateEventTracker +func (c *Personalize) CreateEventTrackerRequest(input *CreateEventTrackerInput) (req *request.Request, output *CreateEventTrackerOutput) { + op := &request.Operation{ + Name: opCreateEventTracker, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEventTrackerInput{} + } + + output = &CreateEventTrackerOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateEventTracker API operation for Amazon Personalize. +// +// Creates an event tracker that you use when sending event data to the specified +// dataset group using the PutEvents (https://docs.aws.amazon.com/personalize/latest/dg/API_UBS_PutEvents.html) +// API. +// +// When Amazon Personalize creates an event tracker, it also creates an event-interactions +// dataset in the dataset group associated with the event tracker. The event-interactions +// dataset stores the event data from the PutEvents call. The contents of this +// dataset are not available to the user. +// +// Only one event tracker can be associated with a dataset group. You will get +// an error if you call CreateEventTracker using the same dataset group as an +// existing event tracker. +// +// When you send event data you include your tracking ID. The tracking ID identifies +// the customer and authorizes the customer to send the data. +// +// The event tracker can be in one of the following states: +// +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// +// * DELETE PENDING > DELETE IN_PROGRESS +// +// To get the status of the event tracker, call DescribeEventTracker. +// +// The event tracker must be in the ACTIVE state before using the tracking ID. +// +// Related APIs +// +// * ListEventTrackers +// +// * DescribeEventTracker +// +// * DeleteEventTracker +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateEventTracker for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateEventTracker +func (c *Personalize) CreateEventTracker(input *CreateEventTrackerInput) (*CreateEventTrackerOutput, error) { + req, out := c.CreateEventTrackerRequest(input) + return out, req.Send() +} + +// CreateEventTrackerWithContext is the same as CreateEventTracker with the addition of +// the ability to pass a context and additional request options. +// +// See CreateEventTracker for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateEventTrackerWithContext(ctx aws.Context, input *CreateEventTrackerInput, opts ...request.Option) (*CreateEventTrackerOutput, error) { + req, out := c.CreateEventTrackerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSchema = "CreateSchema" + +// CreateSchemaRequest generates a "aws/request.Request" representing the +// client's request for the CreateSchema operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSchema for more information on using the CreateSchema +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSchemaRequest method. +// req, resp := client.CreateSchemaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateSchema +func (c *Personalize) CreateSchemaRequest(input *CreateSchemaInput) (req *request.Request, output *CreateSchemaOutput) { + op := &request.Operation{ + Name: opCreateSchema, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSchemaInput{} + } + + output = &CreateSchemaOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSchema API operation for Amazon Personalize. +// +// Creates an Amazon Personalize schema from the specified schema string. The +// schema you create must be in Avro JSON format. +// +// Amazon Personalize recognizes three schema variants. Each schema is associated +// with a dataset type and has a set of required field and keywords. You specify +// a schema when you call CreateDataset. +// +// Related APIs +// +// * ListSchemas +// +// * DescribeSchema +// +// * DeleteSchema +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateSchema for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateSchema +func (c *Personalize) CreateSchema(input *CreateSchemaInput) (*CreateSchemaOutput, error) { + req, out := c.CreateSchemaRequest(input) + return out, req.Send() +} + +// CreateSchemaWithContext is the same as CreateSchema with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSchema for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateSchemaWithContext(ctx aws.Context, input *CreateSchemaInput, opts ...request.Option) (*CreateSchemaOutput, error) { + req, out := c.CreateSchemaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSolution = "CreateSolution" + +// CreateSolutionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSolution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSolution for more information on using the CreateSolution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSolutionRequest method. +// req, resp := client.CreateSolutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateSolution +func (c *Personalize) CreateSolutionRequest(input *CreateSolutionInput) (req *request.Request, output *CreateSolutionOutput) { + op := &request.Operation{ + Name: opCreateSolution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSolutionInput{} + } + + output = &CreateSolutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSolution API operation for Amazon Personalize. +// +// Creates the configuration for training a model. A trained model is known +// as a solution. After the configuration is created, you train the model (create +// a solution) by calling the CreateSolutionVersion operation. Every time you +// call CreateSolutionVersion, a new version of the solution is created. +// +// After creating a solution version, you check its accuracy by calling GetSolutionMetrics. +// When you are satisfied with the version, you deploy it using CreateCampaign. +// The campaign provides recommendations to a client through the GetRecommendations +// (https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html) +// API. +// +// To train a model, Amazon Personalize requires training data and a recipe. +// The training data comes from the dataset group that you provide in the request. +// A recipe specifies the training algorithm and a feature transformation. You +// can specify one of the predefined recipes provided by Amazon Personalize. +// Alternatively, you can specify performAutoML and Amazon Personalize will +// analyze your data and select the optimum USER_PERSONALIZATION recipe for +// you. +// +// Status +// +// A solution can be in one of the following states: +// +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// +// * DELETE PENDING > DELETE IN_PROGRESS +// +// To get the status of the solution, call DescribeSolution. Wait until the +// status shows as ACTIVE before calling CreateSolutionVersion. +// +// Related APIs +// +// * ListSolutions +// +// * CreateSolutionVersion +// +// * DescribeSolution +// +// * DeleteSolution +// +// * ListSolutionVersions +// +// * DescribeSolutionVersion +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateSolution for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The limit on the number of requests per second has been exceeded. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateSolution +func (c *Personalize) CreateSolution(input *CreateSolutionInput) (*CreateSolutionOutput, error) { + req, out := c.CreateSolutionRequest(input) + return out, req.Send() +} + +// CreateSolutionWithContext is the same as CreateSolution with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSolution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateSolutionWithContext(ctx aws.Context, input *CreateSolutionInput, opts ...request.Option) (*CreateSolutionOutput, error) { + req, out := c.CreateSolutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSolutionVersion = "CreateSolutionVersion" + +// CreateSolutionVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSolutionVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSolutionVersion for more information on using the CreateSolutionVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSolutionVersionRequest method. +// req, resp := client.CreateSolutionVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateSolutionVersion +func (c *Personalize) CreateSolutionVersionRequest(input *CreateSolutionVersionInput) (req *request.Request, output *CreateSolutionVersionOutput) { + op := &request.Operation{ + Name: opCreateSolutionVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSolutionVersionInput{} + } + + output = &CreateSolutionVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSolutionVersion API operation for Amazon Personalize. +// +// Trains or retrains an active solution. A solution is created using the CreateSolution +// operation and must be in the ACTIVE state before calling CreateSolutionVersion. +// A new version of the solution is created every time you call this operation. +// +// Status +// +// A solution version can be in one of the following states: +// +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// +// To get the status of the version, call DescribeSolutionVersion. Wait until +// the status shows as ACTIVE before calling CreateCampaign. +// +// If the status shows as CREATE FAILED, the response includes a failureReason +// key, which describes why the job failed. +// +// Related APIs +// +// * ListSolutionVersions +// +// * DescribeSolutionVersion +// +// * ListSolutions +// +// * CreateSolution +// +// * DescribeSolution +// +// * DeleteSolution +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateSolutionVersion for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateSolutionVersion +func (c *Personalize) CreateSolutionVersion(input *CreateSolutionVersionInput) (*CreateSolutionVersionOutput, error) { + req, out := c.CreateSolutionVersionRequest(input) + return out, req.Send() +} + +// CreateSolutionVersionWithContext is the same as CreateSolutionVersion with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSolutionVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateSolutionVersionWithContext(ctx aws.Context, input *CreateSolutionVersionInput, opts ...request.Option) (*CreateSolutionVersionOutput, error) { + req, out := c.CreateSolutionVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteCampaign = "DeleteCampaign" + +// DeleteCampaignRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCampaign operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCampaign for more information on using the DeleteCampaign +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCampaignRequest method. +// req, resp := client.DeleteCampaignRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteCampaign +func (c *Personalize) DeleteCampaignRequest(input *DeleteCampaignInput) (req *request.Request, output *DeleteCampaignOutput) { + op := &request.Operation{ + Name: opDeleteCampaign, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCampaignInput{} + } + + output = &DeleteCampaignOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteCampaign API operation for Amazon Personalize. +// +// Removes a campaign by deleting the solution deployment. The solution that +// the campaign is based on is not deleted and can be redeployed when needed. +// A deleted campaign can no longer be specified in a GetRecommendations (https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html) +// request. For more information on campaigns, see CreateCampaign. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DeleteCampaign for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteCampaign +func (c *Personalize) DeleteCampaign(input *DeleteCampaignInput) (*DeleteCampaignOutput, error) { + req, out := c.DeleteCampaignRequest(input) + return out, req.Send() +} + +// DeleteCampaignWithContext is the same as DeleteCampaign with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCampaign for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DeleteCampaignWithContext(ctx aws.Context, input *DeleteCampaignInput, opts ...request.Option) (*DeleteCampaignOutput, error) { + req, out := c.DeleteCampaignRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDataset = "DeleteDataset" + +// DeleteDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDataset for more information on using the DeleteDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDatasetRequest method. +// req, resp := client.DeleteDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteDataset +func (c *Personalize) DeleteDatasetRequest(input *DeleteDatasetInput) (req *request.Request, output *DeleteDatasetOutput) { + op := &request.Operation{ + Name: opDeleteDataset, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDatasetInput{} + } + + output = &DeleteDatasetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDataset API operation for Amazon Personalize. +// +// Deletes a dataset. You can't delete a dataset if an associated DatasetImportJob +// or SolutionVersion is in the CREATE PENDING or IN PROGRESS state. For more +// information on datasets, see CreateDataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DeleteDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteDataset +func (c *Personalize) DeleteDataset(input *DeleteDatasetInput) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + return out, req.Send() +} + +// DeleteDatasetWithContext is the same as DeleteDataset with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DeleteDatasetWithContext(ctx aws.Context, input *DeleteDatasetInput, opts ...request.Option) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDatasetGroup = "DeleteDatasetGroup" + +// DeleteDatasetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDatasetGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDatasetGroup for more information on using the DeleteDatasetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDatasetGroupRequest method. +// req, resp := client.DeleteDatasetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteDatasetGroup +func (c *Personalize) DeleteDatasetGroupRequest(input *DeleteDatasetGroupInput) (req *request.Request, output *DeleteDatasetGroupOutput) { + op := &request.Operation{ + Name: opDeleteDatasetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDatasetGroupInput{} + } + + output = &DeleteDatasetGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDatasetGroup API operation for Amazon Personalize. +// +// Deletes a dataset group. Before you delete a dataset group, you must delete +// the following: +// +// * All associated event trackers. +// +// * All associated solutions. +// +// * All datasets in the dataset group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DeleteDatasetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteDatasetGroup +func (c *Personalize) DeleteDatasetGroup(input *DeleteDatasetGroupInput) (*DeleteDatasetGroupOutput, error) { + req, out := c.DeleteDatasetGroupRequest(input) + return out, req.Send() +} + +// DeleteDatasetGroupWithContext is the same as DeleteDatasetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDatasetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DeleteDatasetGroupWithContext(ctx aws.Context, input *DeleteDatasetGroupInput, opts ...request.Option) (*DeleteDatasetGroupOutput, error) { + req, out := c.DeleteDatasetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteEventTracker = "DeleteEventTracker" + +// DeleteEventTrackerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEventTracker operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteEventTracker for more information on using the DeleteEventTracker +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteEventTrackerRequest method. +// req, resp := client.DeleteEventTrackerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteEventTracker +func (c *Personalize) DeleteEventTrackerRequest(input *DeleteEventTrackerInput) (req *request.Request, output *DeleteEventTrackerOutput) { + op := &request.Operation{ + Name: opDeleteEventTracker, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEventTrackerInput{} + } + + output = &DeleteEventTrackerOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteEventTracker API operation for Amazon Personalize. +// +// Deletes the event tracker. Does not delete the event-interactions dataset +// from the associated dataset group. For more information on event trackers, +// see CreateEventTracker. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DeleteEventTracker for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteEventTracker +func (c *Personalize) DeleteEventTracker(input *DeleteEventTrackerInput) (*DeleteEventTrackerOutput, error) { + req, out := c.DeleteEventTrackerRequest(input) + return out, req.Send() +} + +// DeleteEventTrackerWithContext is the same as DeleteEventTracker with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteEventTracker for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DeleteEventTrackerWithContext(ctx aws.Context, input *DeleteEventTrackerInput, opts ...request.Option) (*DeleteEventTrackerOutput, error) { + req, out := c.DeleteEventTrackerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSchema = "DeleteSchema" + +// DeleteSchemaRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSchema operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSchema for more information on using the DeleteSchema +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSchemaRequest method. +// req, resp := client.DeleteSchemaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteSchema +func (c *Personalize) DeleteSchemaRequest(input *DeleteSchemaInput) (req *request.Request, output *DeleteSchemaOutput) { + op := &request.Operation{ + Name: opDeleteSchema, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSchemaInput{} + } + + output = &DeleteSchemaOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSchema API operation for Amazon Personalize. +// +// Deletes a schema. Before deleting a schema, you must delete all datasets +// referencing the schema. For more information on schemas, see CreateSchema. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DeleteSchema for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteSchema +func (c *Personalize) DeleteSchema(input *DeleteSchemaInput) (*DeleteSchemaOutput, error) { + req, out := c.DeleteSchemaRequest(input) + return out, req.Send() +} + +// DeleteSchemaWithContext is the same as DeleteSchema with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSchema for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DeleteSchemaWithContext(ctx aws.Context, input *DeleteSchemaInput, opts ...request.Option) (*DeleteSchemaOutput, error) { + req, out := c.DeleteSchemaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSolution = "DeleteSolution" + +// DeleteSolutionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSolution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSolution for more information on using the DeleteSolution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSolutionRequest method. +// req, resp := client.DeleteSolutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteSolution +func (c *Personalize) DeleteSolutionRequest(input *DeleteSolutionInput) (req *request.Request, output *DeleteSolutionOutput) { + op := &request.Operation{ + Name: opDeleteSolution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSolutionInput{} + } + + output = &DeleteSolutionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSolution API operation for Amazon Personalize. +// +// Deletes all versions of a solution and the Solution object itself. Before +// deleting a solution, you must delete all campaigns based on the solution. +// To determine what campaigns are using the solution, call ListCampaigns and +// supply the Amazon Resource Name (ARN) of the solution. You can't delete a +// solution if an associated SolutionVersion is in the CREATE PENDING or IN +// PROGRESS state. For more information on solutions, see CreateSolution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DeleteSolution for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteSolution +func (c *Personalize) DeleteSolution(input *DeleteSolutionInput) (*DeleteSolutionOutput, error) { + req, out := c.DeleteSolutionRequest(input) + return out, req.Send() +} + +// DeleteSolutionWithContext is the same as DeleteSolution with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSolution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DeleteSolutionWithContext(ctx aws.Context, input *DeleteSolutionInput, opts ...request.Option) (*DeleteSolutionOutput, error) { + req, out := c.DeleteSolutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAlgorithm = "DescribeAlgorithm" + +// DescribeAlgorithmRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlgorithm operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAlgorithm for more information on using the DescribeAlgorithm +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAlgorithmRequest method. +// req, resp := client.DescribeAlgorithmRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeAlgorithm +func (c *Personalize) DescribeAlgorithmRequest(input *DescribeAlgorithmInput) (req *request.Request, output *DescribeAlgorithmOutput) { + op := &request.Operation{ + Name: opDescribeAlgorithm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAlgorithmInput{} + } + + output = &DescribeAlgorithmOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAlgorithm API operation for Amazon Personalize. +// +// Describes the given algorithm. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeAlgorithm for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeAlgorithm +func (c *Personalize) DescribeAlgorithm(input *DescribeAlgorithmInput) (*DescribeAlgorithmOutput, error) { + req, out := c.DescribeAlgorithmRequest(input) + return out, req.Send() +} + +// DescribeAlgorithmWithContext is the same as DescribeAlgorithm with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAlgorithm for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeAlgorithmWithContext(ctx aws.Context, input *DescribeAlgorithmInput, opts ...request.Option) (*DescribeAlgorithmOutput, error) { + req, out := c.DescribeAlgorithmRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeBatchInferenceJob = "DescribeBatchInferenceJob" + +// DescribeBatchInferenceJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBatchInferenceJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBatchInferenceJob for more information on using the DescribeBatchInferenceJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeBatchInferenceJobRequest method. +// req, resp := client.DescribeBatchInferenceJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeBatchInferenceJob +func (c *Personalize) DescribeBatchInferenceJobRequest(input *DescribeBatchInferenceJobInput) (req *request.Request, output *DescribeBatchInferenceJobOutput) { + op := &request.Operation{ + Name: opDescribeBatchInferenceJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBatchInferenceJobInput{} + } + + output = &DescribeBatchInferenceJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeBatchInferenceJob API operation for Amazon Personalize. +// +// Gets the properties of a batch inference job including name, Amazon Resource +// Name (ARN), status, input and output configurations, and the ARN of the solution +// version used to generate the recommendations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeBatchInferenceJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeBatchInferenceJob +func (c *Personalize) DescribeBatchInferenceJob(input *DescribeBatchInferenceJobInput) (*DescribeBatchInferenceJobOutput, error) { + req, out := c.DescribeBatchInferenceJobRequest(input) + return out, req.Send() +} + +// DescribeBatchInferenceJobWithContext is the same as DescribeBatchInferenceJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBatchInferenceJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeBatchInferenceJobWithContext(ctx aws.Context, input *DescribeBatchInferenceJobInput, opts ...request.Option) (*DescribeBatchInferenceJobOutput, error) { + req, out := c.DescribeBatchInferenceJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeCampaign = "DescribeCampaign" + +// DescribeCampaignRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCampaign operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCampaign for more information on using the DescribeCampaign +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCampaignRequest method. +// req, resp := client.DescribeCampaignRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeCampaign +func (c *Personalize) DescribeCampaignRequest(input *DescribeCampaignInput) (req *request.Request, output *DescribeCampaignOutput) { + op := &request.Operation{ + Name: opDescribeCampaign, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCampaignInput{} + } + + output = &DescribeCampaignOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCampaign API operation for Amazon Personalize. +// +// Describes the given campaign, including its status. +// +// A campaign can be in one of the following states: +// +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// +// * DELETE PENDING > DELETE IN_PROGRESS +// +// When the status is CREATE FAILED, the response includes the failureReason +// key, which describes why. +// +// For more information on campaigns, see CreateCampaign. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeCampaign for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeCampaign +func (c *Personalize) DescribeCampaign(input *DescribeCampaignInput) (*DescribeCampaignOutput, error) { + req, out := c.DescribeCampaignRequest(input) + return out, req.Send() +} + +// DescribeCampaignWithContext is the same as DescribeCampaign with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCampaign for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeCampaignWithContext(ctx aws.Context, input *DescribeCampaignInput, opts ...request.Option) (*DescribeCampaignOutput, error) { + req, out := c.DescribeCampaignRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDataset = "DescribeDataset" + +// DescribeDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDataset for more information on using the DescribeDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDatasetRequest method. +// req, resp := client.DescribeDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeDataset +func (c *Personalize) DescribeDatasetRequest(input *DescribeDatasetInput) (req *request.Request, output *DescribeDatasetOutput) { + op := &request.Operation{ + Name: opDescribeDataset, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDatasetInput{} + } + + output = &DescribeDatasetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDataset API operation for Amazon Personalize. +// +// Describes the given dataset. For more information on datasets, see CreateDataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeDataset for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeDataset +func (c *Personalize) DescribeDataset(input *DescribeDatasetInput) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + return out, req.Send() +} + +// DescribeDatasetWithContext is the same as DescribeDataset with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeDatasetWithContext(ctx aws.Context, input *DescribeDatasetInput, opts ...request.Option) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDatasetGroup = "DescribeDatasetGroup" + +// DescribeDatasetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDatasetGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDatasetGroup for more information on using the DescribeDatasetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDatasetGroupRequest method. +// req, resp := client.DescribeDatasetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeDatasetGroup +func (c *Personalize) DescribeDatasetGroupRequest(input *DescribeDatasetGroupInput) (req *request.Request, output *DescribeDatasetGroupOutput) { + op := &request.Operation{ + Name: opDescribeDatasetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDatasetGroupInput{} + } + + output = &DescribeDatasetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDatasetGroup API operation for Amazon Personalize. +// +// Describes the given dataset group. For more information on dataset groups, +// see CreateDatasetGroup. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeDatasetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeDatasetGroup +func (c *Personalize) DescribeDatasetGroup(input *DescribeDatasetGroupInput) (*DescribeDatasetGroupOutput, error) { + req, out := c.DescribeDatasetGroupRequest(input) + return out, req.Send() +} + +// DescribeDatasetGroupWithContext is the same as DescribeDatasetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDatasetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeDatasetGroupWithContext(ctx aws.Context, input *DescribeDatasetGroupInput, opts ...request.Option) (*DescribeDatasetGroupOutput, error) { + req, out := c.DescribeDatasetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDatasetImportJob = "DescribeDatasetImportJob" + +// DescribeDatasetImportJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDatasetImportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDatasetImportJob for more information on using the DescribeDatasetImportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDatasetImportJobRequest method. +// req, resp := client.DescribeDatasetImportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeDatasetImportJob +func (c *Personalize) DescribeDatasetImportJobRequest(input *DescribeDatasetImportJobInput) (req *request.Request, output *DescribeDatasetImportJobOutput) { + op := &request.Operation{ + Name: opDescribeDatasetImportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDatasetImportJobInput{} + } + + output = &DescribeDatasetImportJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDatasetImportJob API operation for Amazon Personalize. +// +// Describes the dataset import job created by CreateDatasetImportJob, including +// the import job status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeDatasetImportJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeDatasetImportJob +func (c *Personalize) DescribeDatasetImportJob(input *DescribeDatasetImportJobInput) (*DescribeDatasetImportJobOutput, error) { + req, out := c.DescribeDatasetImportJobRequest(input) + return out, req.Send() +} + +// DescribeDatasetImportJobWithContext is the same as DescribeDatasetImportJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDatasetImportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeDatasetImportJobWithContext(ctx aws.Context, input *DescribeDatasetImportJobInput, opts ...request.Option) (*DescribeDatasetImportJobOutput, error) { + req, out := c.DescribeDatasetImportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEventTracker = "DescribeEventTracker" + +// DescribeEventTrackerRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventTracker operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEventTracker for more information on using the DescribeEventTracker +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEventTrackerRequest method. +// req, resp := client.DescribeEventTrackerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeEventTracker +func (c *Personalize) DescribeEventTrackerRequest(input *DescribeEventTrackerInput) (req *request.Request, output *DescribeEventTrackerOutput) { + op := &request.Operation{ + Name: opDescribeEventTracker, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventTrackerInput{} + } + + output = &DescribeEventTrackerOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEventTracker API operation for Amazon Personalize. +// +// Describes an event tracker. The response includes the trackingId and status +// of the event tracker. For more information on event trackers, see CreateEventTracker. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeEventTracker for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeEventTracker +func (c *Personalize) DescribeEventTracker(input *DescribeEventTrackerInput) (*DescribeEventTrackerOutput, error) { + req, out := c.DescribeEventTrackerRequest(input) + return out, req.Send() +} + +// DescribeEventTrackerWithContext is the same as DescribeEventTracker with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEventTracker for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeEventTrackerWithContext(ctx aws.Context, input *DescribeEventTrackerInput, opts ...request.Option) (*DescribeEventTrackerOutput, error) { + req, out := c.DescribeEventTrackerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFeatureTransformation = "DescribeFeatureTransformation" + +// DescribeFeatureTransformationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFeatureTransformation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFeatureTransformation for more information on using the DescribeFeatureTransformation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFeatureTransformationRequest method. +// req, resp := client.DescribeFeatureTransformationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeFeatureTransformation +func (c *Personalize) DescribeFeatureTransformationRequest(input *DescribeFeatureTransformationInput) (req *request.Request, output *DescribeFeatureTransformationOutput) { + op := &request.Operation{ + Name: opDescribeFeatureTransformation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFeatureTransformationInput{} + } + + output = &DescribeFeatureTransformationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFeatureTransformation API operation for Amazon Personalize. +// +// Describes the given feature transformation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeFeatureTransformation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeFeatureTransformation +func (c *Personalize) DescribeFeatureTransformation(input *DescribeFeatureTransformationInput) (*DescribeFeatureTransformationOutput, error) { + req, out := c.DescribeFeatureTransformationRequest(input) + return out, req.Send() +} + +// DescribeFeatureTransformationWithContext is the same as DescribeFeatureTransformation with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFeatureTransformation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeFeatureTransformationWithContext(ctx aws.Context, input *DescribeFeatureTransformationInput, opts ...request.Option) (*DescribeFeatureTransformationOutput, error) { + req, out := c.DescribeFeatureTransformationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeRecipe = "DescribeRecipe" + +// DescribeRecipeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRecipe operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeRecipe for more information on using the DescribeRecipe +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeRecipeRequest method. +// req, resp := client.DescribeRecipeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeRecipe +func (c *Personalize) DescribeRecipeRequest(input *DescribeRecipeInput) (req *request.Request, output *DescribeRecipeOutput) { + op := &request.Operation{ + Name: opDescribeRecipe, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRecipeInput{} + } + + output = &DescribeRecipeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeRecipe API operation for Amazon Personalize. +// +// Describes a recipe. +// +// A recipe contains three items: +// +// * An algorithm that trains a model. +// +// * Hyperparameters that govern the training. +// +// * Feature transformation information for modifying the input data before +// training. +// +// Amazon Personalize provides a set of predefined recipes. You specify a recipe +// when you create a solution with the CreateSolution API. CreateSolution trains +// a model by using the algorithm in the specified recipe and a training dataset. +// The solution, when deployed as a campaign, can provide recommendations using +// the GetRecommendations (https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html) +// API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeRecipe for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeRecipe +func (c *Personalize) DescribeRecipe(input *DescribeRecipeInput) (*DescribeRecipeOutput, error) { + req, out := c.DescribeRecipeRequest(input) + return out, req.Send() +} + +// DescribeRecipeWithContext is the same as DescribeRecipe with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRecipe for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeRecipeWithContext(ctx aws.Context, input *DescribeRecipeInput, opts ...request.Option) (*DescribeRecipeOutput, error) { + req, out := c.DescribeRecipeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSchema = "DescribeSchema" + +// DescribeSchemaRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSchema operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSchema for more information on using the DescribeSchema +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSchemaRequest method. +// req, resp := client.DescribeSchemaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeSchema +func (c *Personalize) DescribeSchemaRequest(input *DescribeSchemaInput) (req *request.Request, output *DescribeSchemaOutput) { + op := &request.Operation{ + Name: opDescribeSchema, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSchemaInput{} + } + + output = &DescribeSchemaOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSchema API operation for Amazon Personalize. +// +// Describes a schema. For more information on schemas, see CreateSchema. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeSchema for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeSchema +func (c *Personalize) DescribeSchema(input *DescribeSchemaInput) (*DescribeSchemaOutput, error) { + req, out := c.DescribeSchemaRequest(input) + return out, req.Send() +} + +// DescribeSchemaWithContext is the same as DescribeSchema with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSchema for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeSchemaWithContext(ctx aws.Context, input *DescribeSchemaInput, opts ...request.Option) (*DescribeSchemaOutput, error) { + req, out := c.DescribeSchemaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSolution = "DescribeSolution" + +// DescribeSolutionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSolution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSolution for more information on using the DescribeSolution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSolutionRequest method. +// req, resp := client.DescribeSolutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeSolution +func (c *Personalize) DescribeSolutionRequest(input *DescribeSolutionInput) (req *request.Request, output *DescribeSolutionOutput) { + op := &request.Operation{ + Name: opDescribeSolution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSolutionInput{} + } + + output = &DescribeSolutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSolution API operation for Amazon Personalize. +// +// Describes a solution. For more information on solutions, see CreateSolution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeSolution for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeSolution +func (c *Personalize) DescribeSolution(input *DescribeSolutionInput) (*DescribeSolutionOutput, error) { + req, out := c.DescribeSolutionRequest(input) + return out, req.Send() +} + +// DescribeSolutionWithContext is the same as DescribeSolution with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSolution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeSolutionWithContext(ctx aws.Context, input *DescribeSolutionInput, opts ...request.Option) (*DescribeSolutionOutput, error) { + req, out := c.DescribeSolutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSolutionVersion = "DescribeSolutionVersion" + +// DescribeSolutionVersionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSolutionVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSolutionVersion for more information on using the DescribeSolutionVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSolutionVersionRequest method. +// req, resp := client.DescribeSolutionVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeSolutionVersion +func (c *Personalize) DescribeSolutionVersionRequest(input *DescribeSolutionVersionInput) (req *request.Request, output *DescribeSolutionVersionOutput) { + op := &request.Operation{ + Name: opDescribeSolutionVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSolutionVersionInput{} + } + + output = &DescribeSolutionVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSolutionVersion API operation for Amazon Personalize. +// +// Describes a specific version of a solution. For more information on solutions, +// see CreateSolution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeSolutionVersion for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeSolutionVersion +func (c *Personalize) DescribeSolutionVersion(input *DescribeSolutionVersionInput) (*DescribeSolutionVersionOutput, error) { + req, out := c.DescribeSolutionVersionRequest(input) + return out, req.Send() +} + +// DescribeSolutionVersionWithContext is the same as DescribeSolutionVersion with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSolutionVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeSolutionVersionWithContext(ctx aws.Context, input *DescribeSolutionVersionInput, opts ...request.Option) (*DescribeSolutionVersionOutput, error) { + req, out := c.DescribeSolutionVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSolutionMetrics = "GetSolutionMetrics" + +// GetSolutionMetricsRequest generates a "aws/request.Request" representing the +// client's request for the GetSolutionMetrics operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSolutionMetrics for more information on using the GetSolutionMetrics +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSolutionMetricsRequest method. +// req, resp := client.GetSolutionMetricsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/GetSolutionMetrics +func (c *Personalize) GetSolutionMetricsRequest(input *GetSolutionMetricsInput) (req *request.Request, output *GetSolutionMetricsOutput) { + op := &request.Operation{ + Name: opGetSolutionMetrics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSolutionMetricsInput{} + } + + output = &GetSolutionMetricsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSolutionMetrics API operation for Amazon Personalize. +// +// Gets the metrics for the specified solution version. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation GetSolutionMetrics for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/GetSolutionMetrics +func (c *Personalize) GetSolutionMetrics(input *GetSolutionMetricsInput) (*GetSolutionMetricsOutput, error) { + req, out := c.GetSolutionMetricsRequest(input) + return out, req.Send() +} + +// GetSolutionMetricsWithContext is the same as GetSolutionMetrics with the addition of +// the ability to pass a context and additional request options. +// +// See GetSolutionMetrics for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) GetSolutionMetricsWithContext(ctx aws.Context, input *GetSolutionMetricsInput, opts ...request.Option) (*GetSolutionMetricsOutput, error) { + req, out := c.GetSolutionMetricsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBatchInferenceJobs = "ListBatchInferenceJobs" + +// ListBatchInferenceJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListBatchInferenceJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBatchInferenceJobs for more information on using the ListBatchInferenceJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBatchInferenceJobsRequest method. +// req, resp := client.ListBatchInferenceJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListBatchInferenceJobs +func (c *Personalize) ListBatchInferenceJobsRequest(input *ListBatchInferenceJobsInput) (req *request.Request, output *ListBatchInferenceJobsOutput) { + op := &request.Operation{ + Name: opListBatchInferenceJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListBatchInferenceJobsInput{} + } + + output = &ListBatchInferenceJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBatchInferenceJobs API operation for Amazon Personalize. +// +// Gets a list of the batch inference jobs that have been performed off of a +// solution version. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListBatchInferenceJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListBatchInferenceJobs +func (c *Personalize) ListBatchInferenceJobs(input *ListBatchInferenceJobsInput) (*ListBatchInferenceJobsOutput, error) { + req, out := c.ListBatchInferenceJobsRequest(input) + return out, req.Send() +} + +// ListBatchInferenceJobsWithContext is the same as ListBatchInferenceJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListBatchInferenceJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListBatchInferenceJobsWithContext(ctx aws.Context, input *ListBatchInferenceJobsInput, opts ...request.Option) (*ListBatchInferenceJobsOutput, error) { + req, out := c.ListBatchInferenceJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListBatchInferenceJobsPages iterates over the pages of a ListBatchInferenceJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBatchInferenceJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBatchInferenceJobs operation. +// pageNum := 0 +// err := client.ListBatchInferenceJobsPages(params, +// func(page *personalize.ListBatchInferenceJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListBatchInferenceJobsPages(input *ListBatchInferenceJobsInput, fn func(*ListBatchInferenceJobsOutput, bool) bool) error { + return c.ListBatchInferenceJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListBatchInferenceJobsPagesWithContext same as ListBatchInferenceJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListBatchInferenceJobsPagesWithContext(ctx aws.Context, input *ListBatchInferenceJobsInput, fn func(*ListBatchInferenceJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBatchInferenceJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBatchInferenceJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBatchInferenceJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListCampaigns = "ListCampaigns" + +// ListCampaignsRequest generates a "aws/request.Request" representing the +// client's request for the ListCampaigns operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListCampaigns for more information on using the ListCampaigns +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListCampaignsRequest method. +// req, resp := client.ListCampaignsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListCampaigns +func (c *Personalize) ListCampaignsRequest(input *ListCampaignsInput) (req *request.Request, output *ListCampaignsOutput) { + op := &request.Operation{ + Name: opListCampaigns, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCampaignsInput{} + } + + output = &ListCampaignsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListCampaigns API operation for Amazon Personalize. +// +// Returns a list of campaigns that use the given solution. When a solution +// is not specified, all the campaigns associated with the account are listed. +// The response provides the properties for each campaign, including the Amazon +// Resource Name (ARN). For more information on campaigns, see CreateCampaign. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListCampaigns for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListCampaigns +func (c *Personalize) ListCampaigns(input *ListCampaignsInput) (*ListCampaignsOutput, error) { + req, out := c.ListCampaignsRequest(input) + return out, req.Send() +} + +// ListCampaignsWithContext is the same as ListCampaigns with the addition of +// the ability to pass a context and additional request options. +// +// See ListCampaigns for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListCampaignsWithContext(ctx aws.Context, input *ListCampaignsInput, opts ...request.Option) (*ListCampaignsOutput, error) { + req, out := c.ListCampaignsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListCampaignsPages iterates over the pages of a ListCampaigns operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCampaigns method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCampaigns operation. +// pageNum := 0 +// err := client.ListCampaignsPages(params, +// func(page *personalize.ListCampaignsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListCampaignsPages(input *ListCampaignsInput, fn func(*ListCampaignsOutput, bool) bool) error { + return c.ListCampaignsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCampaignsPagesWithContext same as ListCampaignsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListCampaignsPagesWithContext(ctx aws.Context, input *ListCampaignsInput, fn func(*ListCampaignsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCampaignsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCampaignsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCampaignsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatasetGroups = "ListDatasetGroups" + +// ListDatasetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasetGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatasetGroups for more information on using the ListDatasetGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatasetGroupsRequest method. +// req, resp := client.ListDatasetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListDatasetGroups +func (c *Personalize) ListDatasetGroupsRequest(input *ListDatasetGroupsInput) (req *request.Request, output *ListDatasetGroupsOutput) { + op := &request.Operation{ + Name: opListDatasetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatasetGroupsInput{} + } + + output = &ListDatasetGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatasetGroups API operation for Amazon Personalize. +// +// Returns a list of dataset groups. The response provides the properties for +// each dataset group, including the Amazon Resource Name (ARN). For more information +// on dataset groups, see CreateDatasetGroup. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListDatasetGroups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListDatasetGroups +func (c *Personalize) ListDatasetGroups(input *ListDatasetGroupsInput) (*ListDatasetGroupsOutput, error) { + req, out := c.ListDatasetGroupsRequest(input) + return out, req.Send() +} + +// ListDatasetGroupsWithContext is the same as ListDatasetGroups with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatasetGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListDatasetGroupsWithContext(ctx aws.Context, input *ListDatasetGroupsInput, opts ...request.Option) (*ListDatasetGroupsOutput, error) { + req, out := c.ListDatasetGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatasetGroupsPages iterates over the pages of a ListDatasetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatasetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatasetGroups operation. +// pageNum := 0 +// err := client.ListDatasetGroupsPages(params, +// func(page *personalize.ListDatasetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListDatasetGroupsPages(input *ListDatasetGroupsInput, fn func(*ListDatasetGroupsOutput, bool) bool) error { + return c.ListDatasetGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatasetGroupsPagesWithContext same as ListDatasetGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListDatasetGroupsPagesWithContext(ctx aws.Context, input *ListDatasetGroupsInput, fn func(*ListDatasetGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatasetGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatasetGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatasetGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatasetImportJobs = "ListDatasetImportJobs" + +// ListDatasetImportJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasetImportJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatasetImportJobs for more information on using the ListDatasetImportJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatasetImportJobsRequest method. +// req, resp := client.ListDatasetImportJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListDatasetImportJobs +func (c *Personalize) ListDatasetImportJobsRequest(input *ListDatasetImportJobsInput) (req *request.Request, output *ListDatasetImportJobsOutput) { + op := &request.Operation{ + Name: opListDatasetImportJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatasetImportJobsInput{} + } + + output = &ListDatasetImportJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatasetImportJobs API operation for Amazon Personalize. +// +// Returns a list of dataset import jobs that use the given dataset. When a +// dataset is not specified, all the dataset import jobs associated with the +// account are listed. The response provides the properties for each dataset +// import job, including the Amazon Resource Name (ARN). For more information +// on dataset import jobs, see CreateDatasetImportJob. For more information +// on datasets, see CreateDataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListDatasetImportJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListDatasetImportJobs +func (c *Personalize) ListDatasetImportJobs(input *ListDatasetImportJobsInput) (*ListDatasetImportJobsOutput, error) { + req, out := c.ListDatasetImportJobsRequest(input) + return out, req.Send() +} + +// ListDatasetImportJobsWithContext is the same as ListDatasetImportJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatasetImportJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListDatasetImportJobsWithContext(ctx aws.Context, input *ListDatasetImportJobsInput, opts ...request.Option) (*ListDatasetImportJobsOutput, error) { + req, out := c.ListDatasetImportJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatasetImportJobsPages iterates over the pages of a ListDatasetImportJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatasetImportJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatasetImportJobs operation. +// pageNum := 0 +// err := client.ListDatasetImportJobsPages(params, +// func(page *personalize.ListDatasetImportJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListDatasetImportJobsPages(input *ListDatasetImportJobsInput, fn func(*ListDatasetImportJobsOutput, bool) bool) error { + return c.ListDatasetImportJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatasetImportJobsPagesWithContext same as ListDatasetImportJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListDatasetImportJobsPagesWithContext(ctx aws.Context, input *ListDatasetImportJobsInput, fn func(*ListDatasetImportJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatasetImportJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatasetImportJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatasetImportJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatasets = "ListDatasets" + +// ListDatasetsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatasets for more information on using the ListDatasets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatasetsRequest method. +// req, resp := client.ListDatasetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListDatasets +func (c *Personalize) ListDatasetsRequest(input *ListDatasetsInput) (req *request.Request, output *ListDatasetsOutput) { + op := &request.Operation{ + Name: opListDatasets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatasetsInput{} + } + + output = &ListDatasetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatasets API operation for Amazon Personalize. +// +// Returns the list of datasets contained in the given dataset group. The response +// provides the properties for each dataset, including the Amazon Resource Name +// (ARN). For more information on datasets, see CreateDataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListDatasets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListDatasets +func (c *Personalize) ListDatasets(input *ListDatasetsInput) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + return out, req.Send() +} + +// ListDatasetsWithContext is the same as ListDatasets with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatasets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListDatasetsWithContext(ctx aws.Context, input *ListDatasetsInput, opts ...request.Option) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatasetsPages iterates over the pages of a ListDatasets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatasets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatasets operation. +// pageNum := 0 +// err := client.ListDatasetsPages(params, +// func(page *personalize.ListDatasetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListDatasetsPages(input *ListDatasetsInput, fn func(*ListDatasetsOutput, bool) bool) error { + return c.ListDatasetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatasetsPagesWithContext same as ListDatasetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListDatasetsPagesWithContext(ctx aws.Context, input *ListDatasetsInput, fn func(*ListDatasetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatasetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatasetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatasetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListEventTrackers = "ListEventTrackers" + +// ListEventTrackersRequest generates a "aws/request.Request" representing the +// client's request for the ListEventTrackers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEventTrackers for more information on using the ListEventTrackers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListEventTrackersRequest method. +// req, resp := client.ListEventTrackersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListEventTrackers +func (c *Personalize) ListEventTrackersRequest(input *ListEventTrackersInput) (req *request.Request, output *ListEventTrackersOutput) { + op := &request.Operation{ + Name: opListEventTrackers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEventTrackersInput{} + } + + output = &ListEventTrackersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListEventTrackers API operation for Amazon Personalize. +// +// Returns the list of event trackers associated with the account. The response +// provides the properties for each event tracker, including the Amazon Resource +// Name (ARN) and tracking ID. For more information on event trackers, see CreateEventTracker. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListEventTrackers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListEventTrackers +func (c *Personalize) ListEventTrackers(input *ListEventTrackersInput) (*ListEventTrackersOutput, error) { + req, out := c.ListEventTrackersRequest(input) + return out, req.Send() +} + +// ListEventTrackersWithContext is the same as ListEventTrackers with the addition of +// the ability to pass a context and additional request options. +// +// See ListEventTrackers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListEventTrackersWithContext(ctx aws.Context, input *ListEventTrackersInput, opts ...request.Option) (*ListEventTrackersOutput, error) { + req, out := c.ListEventTrackersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEventTrackersPages iterates over the pages of a ListEventTrackers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEventTrackers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEventTrackers operation. +// pageNum := 0 +// err := client.ListEventTrackersPages(params, +// func(page *personalize.ListEventTrackersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListEventTrackersPages(input *ListEventTrackersInput, fn func(*ListEventTrackersOutput, bool) bool) error { + return c.ListEventTrackersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListEventTrackersPagesWithContext same as ListEventTrackersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListEventTrackersPagesWithContext(ctx aws.Context, input *ListEventTrackersInput, fn func(*ListEventTrackersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEventTrackersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEventTrackersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListEventTrackersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListRecipes = "ListRecipes" + +// ListRecipesRequest generates a "aws/request.Request" representing the +// client's request for the ListRecipes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRecipes for more information on using the ListRecipes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListRecipesRequest method. +// req, resp := client.ListRecipesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListRecipes +func (c *Personalize) ListRecipesRequest(input *ListRecipesInput) (req *request.Request, output *ListRecipesOutput) { + op := &request.Operation{ + Name: opListRecipes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRecipesInput{} + } + + output = &ListRecipesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRecipes API operation for Amazon Personalize. +// +// Returns a list of available recipes. The response provides the properties +// for each recipe, including the recipe's Amazon Resource Name (ARN). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListRecipes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListRecipes +func (c *Personalize) ListRecipes(input *ListRecipesInput) (*ListRecipesOutput, error) { + req, out := c.ListRecipesRequest(input) + return out, req.Send() +} + +// ListRecipesWithContext is the same as ListRecipes with the addition of +// the ability to pass a context and additional request options. +// +// See ListRecipes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListRecipesWithContext(ctx aws.Context, input *ListRecipesInput, opts ...request.Option) (*ListRecipesOutput, error) { + req, out := c.ListRecipesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListRecipesPages iterates over the pages of a ListRecipes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRecipes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRecipes operation. +// pageNum := 0 +// err := client.ListRecipesPages(params, +// func(page *personalize.ListRecipesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListRecipesPages(input *ListRecipesInput, fn func(*ListRecipesOutput, bool) bool) error { + return c.ListRecipesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListRecipesPagesWithContext same as ListRecipesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListRecipesPagesWithContext(ctx aws.Context, input *ListRecipesInput, fn func(*ListRecipesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListRecipesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListRecipesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListRecipesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListSchemas = "ListSchemas" + +// ListSchemasRequest generates a "aws/request.Request" representing the +// client's request for the ListSchemas operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSchemas for more information on using the ListSchemas +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSchemasRequest method. +// req, resp := client.ListSchemasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListSchemas +func (c *Personalize) ListSchemasRequest(input *ListSchemasInput) (req *request.Request, output *ListSchemasOutput) { + op := &request.Operation{ + Name: opListSchemas, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSchemasInput{} + } + + output = &ListSchemasOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSchemas API operation for Amazon Personalize. +// +// Returns the list of schemas associated with the account. The response provides +// the properties for each schema, including the Amazon Resource Name (ARN). +// For more information on schemas, see CreateSchema. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListSchemas for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListSchemas +func (c *Personalize) ListSchemas(input *ListSchemasInput) (*ListSchemasOutput, error) { + req, out := c.ListSchemasRequest(input) + return out, req.Send() +} + +// ListSchemasWithContext is the same as ListSchemas with the addition of +// the ability to pass a context and additional request options. +// +// See ListSchemas for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListSchemasWithContext(ctx aws.Context, input *ListSchemasInput, opts ...request.Option) (*ListSchemasOutput, error) { + req, out := c.ListSchemasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSchemasPages iterates over the pages of a ListSchemas operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSchemas method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSchemas operation. +// pageNum := 0 +// err := client.ListSchemasPages(params, +// func(page *personalize.ListSchemasOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListSchemasPages(input *ListSchemasInput, fn func(*ListSchemasOutput, bool) bool) error { + return c.ListSchemasPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSchemasPagesWithContext same as ListSchemasPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListSchemasPagesWithContext(ctx aws.Context, input *ListSchemasInput, fn func(*ListSchemasOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSchemasInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSchemasRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSchemasOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListSolutionVersions = "ListSolutionVersions" + +// ListSolutionVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListSolutionVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSolutionVersions for more information on using the ListSolutionVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSolutionVersionsRequest method. +// req, resp := client.ListSolutionVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListSolutionVersions +func (c *Personalize) ListSolutionVersionsRequest(input *ListSolutionVersionsInput) (req *request.Request, output *ListSolutionVersionsOutput) { + op := &request.Operation{ + Name: opListSolutionVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSolutionVersionsInput{} + } + + output = &ListSolutionVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSolutionVersions API operation for Amazon Personalize. +// +// Returns a list of solution versions for the given solution. When a solution +// is not specified, all the solution versions associated with the account are +// listed. The response provides the properties for each solution version, including +// the Amazon Resource Name (ARN). For more information on solutions, see CreateSolution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListSolutionVersions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListSolutionVersions +func (c *Personalize) ListSolutionVersions(input *ListSolutionVersionsInput) (*ListSolutionVersionsOutput, error) { + req, out := c.ListSolutionVersionsRequest(input) + return out, req.Send() +} + +// ListSolutionVersionsWithContext is the same as ListSolutionVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListSolutionVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListSolutionVersionsWithContext(ctx aws.Context, input *ListSolutionVersionsInput, opts ...request.Option) (*ListSolutionVersionsOutput, error) { + req, out := c.ListSolutionVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSolutionVersionsPages iterates over the pages of a ListSolutionVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSolutionVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSolutionVersions operation. +// pageNum := 0 +// err := client.ListSolutionVersionsPages(params, +// func(page *personalize.ListSolutionVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListSolutionVersionsPages(input *ListSolutionVersionsInput, fn func(*ListSolutionVersionsOutput, bool) bool) error { + return c.ListSolutionVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSolutionVersionsPagesWithContext same as ListSolutionVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListSolutionVersionsPagesWithContext(ctx aws.Context, input *ListSolutionVersionsInput, fn func(*ListSolutionVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSolutionVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSolutionVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSolutionVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListSolutions = "ListSolutions" + +// ListSolutionsRequest generates a "aws/request.Request" representing the +// client's request for the ListSolutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSolutions for more information on using the ListSolutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSolutionsRequest method. +// req, resp := client.ListSolutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListSolutions +func (c *Personalize) ListSolutionsRequest(input *ListSolutionsInput) (req *request.Request, output *ListSolutionsOutput) { + op := &request.Operation{ + Name: opListSolutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSolutionsInput{} + } + + output = &ListSolutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSolutions API operation for Amazon Personalize. +// +// Returns a list of solutions that use the given dataset group. When a dataset +// group is not specified, all the solutions associated with the account are +// listed. The response provides the properties for each solution, including +// the Amazon Resource Name (ARN). For more information on solutions, see CreateSolution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListSolutions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListSolutions +func (c *Personalize) ListSolutions(input *ListSolutionsInput) (*ListSolutionsOutput, error) { + req, out := c.ListSolutionsRequest(input) + return out, req.Send() +} + +// ListSolutionsWithContext is the same as ListSolutions with the addition of +// the ability to pass a context and additional request options. +// +// See ListSolutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListSolutionsWithContext(ctx aws.Context, input *ListSolutionsInput, opts ...request.Option) (*ListSolutionsOutput, error) { + req, out := c.ListSolutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSolutionsPages iterates over the pages of a ListSolutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSolutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSolutions operation. +// pageNum := 0 +// err := client.ListSolutionsPages(params, +// func(page *personalize.ListSolutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListSolutionsPages(input *ListSolutionsInput, fn func(*ListSolutionsOutput, bool) bool) error { + return c.ListSolutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSolutionsPagesWithContext same as ListSolutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListSolutionsPagesWithContext(ctx aws.Context, input *ListSolutionsInput, fn func(*ListSolutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSolutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSolutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSolutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opUpdateCampaign = "UpdateCampaign" + +// UpdateCampaignRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCampaign operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateCampaign for more information on using the UpdateCampaign +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateCampaignRequest method. +// req, resp := client.UpdateCampaignRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/UpdateCampaign +func (c *Personalize) UpdateCampaignRequest(input *UpdateCampaignInput) (req *request.Request, output *UpdateCampaignOutput) { + op := &request.Operation{ + Name: opUpdateCampaign, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateCampaignInput{} + } + + output = &UpdateCampaignOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateCampaign API operation for Amazon Personalize. +// +// Updates a campaign by either deploying a new solution or changing the value +// of the campaign's minProvisionedTPS parameter. +// +// To update a campaign, the campaign status must be ACTIVE or CREATE FAILED. +// Check the campaign status using the DescribeCampaign API. +// +// You must wait until the status of the updated campaign is ACTIVE before asking +// the campaign for recommendations. +// +// For more information on campaigns, see CreateCampaign. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation UpdateCampaign for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInputException" +// Provide a valid value for the field or parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Could not find the specified resource. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/UpdateCampaign +func (c *Personalize) UpdateCampaign(input *UpdateCampaignInput) (*UpdateCampaignOutput, error) { + req, out := c.UpdateCampaignRequest(input) + return out, req.Send() +} + +// UpdateCampaignWithContext is the same as UpdateCampaign with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateCampaign for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) UpdateCampaignWithContext(ctx aws.Context, input *UpdateCampaignInput, opts ...request.Option) (*UpdateCampaignOutput, error) { + req, out := c.UpdateCampaignRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Describes a custom algorithm. +type Algorithm struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the algorithm. + AlgorithmArn *string `locationName:"algorithmArn" type:"string"` + + // The URI of the Docker container for the algorithm image. + AlgorithmImage *AlgorithmImage `locationName:"algorithmImage" type:"structure"` + + // The date and time (in Unix time) that the algorithm was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // Specifies the default hyperparameters, their ranges, and whether they are + // tunable. A tunable hyperparameter can have its value determined during hyperparameter + // optimization (HPO). + DefaultHyperParameterRanges *DefaultHyperParameterRanges `locationName:"defaultHyperParameterRanges" type:"structure"` + + // Specifies the default hyperparameters. + DefaultHyperParameters map[string]*string `locationName:"defaultHyperParameters" type:"map"` + + // Specifies the default maximum number of training jobs and parallel training + // jobs. + DefaultResourceConfig map[string]*string `locationName:"defaultResourceConfig" type:"map"` + + // The date and time (in Unix time) that the algorithm was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the algorithm. + Name *string `locationName:"name" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The training input mode. + TrainingInputMode *string `locationName:"trainingInputMode" type:"string"` +} + +// String returns the string representation +func (s Algorithm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Algorithm) GoString() string { + return s.String() +} + +// SetAlgorithmArn sets the AlgorithmArn field's value. +func (s *Algorithm) SetAlgorithmArn(v string) *Algorithm { + s.AlgorithmArn = &v + return s +} + +// SetAlgorithmImage sets the AlgorithmImage field's value. +func (s *Algorithm) SetAlgorithmImage(v *AlgorithmImage) *Algorithm { + s.AlgorithmImage = v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *Algorithm) SetCreationDateTime(v time.Time) *Algorithm { + s.CreationDateTime = &v + return s +} + +// SetDefaultHyperParameterRanges sets the DefaultHyperParameterRanges field's value. +func (s *Algorithm) SetDefaultHyperParameterRanges(v *DefaultHyperParameterRanges) *Algorithm { + s.DefaultHyperParameterRanges = v + return s +} + +// SetDefaultHyperParameters sets the DefaultHyperParameters field's value. +func (s *Algorithm) SetDefaultHyperParameters(v map[string]*string) *Algorithm { + s.DefaultHyperParameters = v + return s +} + +// SetDefaultResourceConfig sets the DefaultResourceConfig field's value. +func (s *Algorithm) SetDefaultResourceConfig(v map[string]*string) *Algorithm { + s.DefaultResourceConfig = v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *Algorithm) SetLastUpdatedDateTime(v time.Time) *Algorithm { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Algorithm) SetName(v string) *Algorithm { + s.Name = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *Algorithm) SetRoleArn(v string) *Algorithm { + s.RoleArn = &v + return s +} + +// SetTrainingInputMode sets the TrainingInputMode field's value. +func (s *Algorithm) SetTrainingInputMode(v string) *Algorithm { + s.TrainingInputMode = &v + return s +} + +// Describes an algorithm image. +type AlgorithmImage struct { + _ struct{} `type:"structure"` + + // The URI of the Docker container for the algorithm image. + // + // DockerURI is a required field + DockerURI *string `locationName:"dockerURI" type:"string" required:"true"` + + // The name of the algorithm image. + Name *string `locationName:"name" min:"1" type:"string"` +} + +// String returns the string representation +func (s AlgorithmImage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AlgorithmImage) GoString() string { + return s.String() +} + +// SetDockerURI sets the DockerURI field's value. +func (s *AlgorithmImage) SetDockerURI(v string) *AlgorithmImage { + s.DockerURI = &v + return s +} + +// SetName sets the Name field's value. +func (s *AlgorithmImage) SetName(v string) *AlgorithmImage { + s.Name = &v + return s +} + +// When the solution performs AutoML (performAutoML is true in CreateSolution), +// Amazon Personalize determines which recipe, from the specified list, optimizes +// the given metric. Amazon Personalize then uses that recipe for the solution. +type AutoMLConfig struct { + _ struct{} `type:"structure"` + + // The metric to optimize. + MetricName *string `locationName:"metricName" type:"string"` + + // The list of candidate recipes. + RecipeList []*string `locationName:"recipeList" type:"list"` +} + +// String returns the string representation +func (s AutoMLConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoMLConfig) GoString() string { + return s.String() +} + +// SetMetricName sets the MetricName field's value. +func (s *AutoMLConfig) SetMetricName(v string) *AutoMLConfig { + s.MetricName = &v + return s +} + +// SetRecipeList sets the RecipeList field's value. +func (s *AutoMLConfig) SetRecipeList(v []*string) *AutoMLConfig { + s.RecipeList = v + return s +} + +// When the solution performs AutoML (performAutoML is true in CreateSolution), +// specifies the recipe that best optimized the specified metric. +type AutoMLResult struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the best recipe. + BestRecipeArn *string `locationName:"bestRecipeArn" type:"string"` +} + +// String returns the string representation +func (s AutoMLResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoMLResult) GoString() string { + return s.String() +} + +// SetBestRecipeArn sets the BestRecipeArn field's value. +func (s *AutoMLResult) SetBestRecipeArn(v string) *AutoMLResult { + s.BestRecipeArn = &v + return s +} + +// Contains information on a batch inference job. +type BatchInferenceJob struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the batch inference job. + BatchInferenceJobArn *string `locationName:"batchInferenceJobArn" type:"string"` + + // The time at which the batch inference job was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // If the batch inference job failed, the reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The Amazon S3 path that leads to the input data used to generate the batch + // inference job. + JobInput *BatchInferenceJobInput `locationName:"jobInput" type:"structure"` + + // The name of the batch inference job. + JobName *string `locationName:"jobName" min:"1" type:"string"` + + // The Amazon S3 bucket that contains the output data generated by the batch + // inference job. + JobOutput *BatchInferenceJobOutput `locationName:"jobOutput" type:"structure"` + + // The time at which the batch inference job was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The number of recommendations generated by the batch inference job. This + // number includes the error messages generated for failed input records. + NumResults *int64 `locationName:"numResults" type:"integer"` + + // The ARN of the Amazon Identity and Access Management (IAM) role that requested + // the batch inference job. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The Amazon Resource Name (ARN) of the solution version from which the batch + // inference job was created. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` + + // The status of the batch inference job. The status is one of the following + // values: + // + // * PENDING + // + // * IN PROGRESS + // + // * ACTIVE + // + // * CREATE FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s BatchInferenceJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchInferenceJob) GoString() string { + return s.String() +} + +// SetBatchInferenceJobArn sets the BatchInferenceJobArn field's value. +func (s *BatchInferenceJob) SetBatchInferenceJobArn(v string) *BatchInferenceJob { + s.BatchInferenceJobArn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *BatchInferenceJob) SetCreationDateTime(v time.Time) *BatchInferenceJob { + s.CreationDateTime = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *BatchInferenceJob) SetFailureReason(v string) *BatchInferenceJob { + s.FailureReason = &v + return s +} + +// SetJobInput sets the JobInput field's value. +func (s *BatchInferenceJob) SetJobInput(v *BatchInferenceJobInput) *BatchInferenceJob { + s.JobInput = v + return s +} + +// SetJobName sets the JobName field's value. +func (s *BatchInferenceJob) SetJobName(v string) *BatchInferenceJob { + s.JobName = &v + return s +} + +// SetJobOutput sets the JobOutput field's value. +func (s *BatchInferenceJob) SetJobOutput(v *BatchInferenceJobOutput) *BatchInferenceJob { + s.JobOutput = v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *BatchInferenceJob) SetLastUpdatedDateTime(v time.Time) *BatchInferenceJob { + s.LastUpdatedDateTime = &v + return s +} + +// SetNumResults sets the NumResults field's value. +func (s *BatchInferenceJob) SetNumResults(v int64) *BatchInferenceJob { + s.NumResults = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *BatchInferenceJob) SetRoleArn(v string) *BatchInferenceJob { + s.RoleArn = &v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *BatchInferenceJob) SetSolutionVersionArn(v string) *BatchInferenceJob { + s.SolutionVersionArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *BatchInferenceJob) SetStatus(v string) *BatchInferenceJob { + s.Status = &v + return s +} + +// The input configuration of a batch inference job. +type BatchInferenceJobInput struct { + _ struct{} `type:"structure"` + + // The URI of the Amazon S3 location that contains your input data. The Amazon + // S3 bucket must be in the same region as the API endpoint you are calling. + // + // S3DataSource is a required field + S3DataSource *S3DataConfig `locationName:"s3DataSource" type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchInferenceJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchInferenceJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchInferenceJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchInferenceJobInput"} + if s.S3DataSource == nil { + invalidParams.Add(request.NewErrParamRequired("S3DataSource")) + } + if s.S3DataSource != nil { + if err := s.S3DataSource.Validate(); err != nil { + invalidParams.AddNested("S3DataSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3DataSource sets the S3DataSource field's value. +func (s *BatchInferenceJobInput) SetS3DataSource(v *S3DataConfig) *BatchInferenceJobInput { + s.S3DataSource = v + return s +} + +// The output configuration parameters of a batch inference job. +type BatchInferenceJobOutput struct { + _ struct{} `type:"structure"` + + // Information on the Amazon S3 bucket in which the batch inference job's output + // is stored. + // + // S3DataDestination is a required field + S3DataDestination *S3DataConfig `locationName:"s3DataDestination" type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchInferenceJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchInferenceJobOutput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchInferenceJobOutput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchInferenceJobOutput"} + if s.S3DataDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3DataDestination")) + } + if s.S3DataDestination != nil { + if err := s.S3DataDestination.Validate(); err != nil { + invalidParams.AddNested("S3DataDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3DataDestination sets the S3DataDestination field's value. +func (s *BatchInferenceJobOutput) SetS3DataDestination(v *S3DataConfig) *BatchInferenceJobOutput { + s.S3DataDestination = v + return s +} + +// A truncated version of the BatchInferenceJob datatype. The ListBatchInferenceJobs +// operation returns a list of batch inference job summaries. +type BatchInferenceJobSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the batch inference job. + BatchInferenceJobArn *string `locationName:"batchInferenceJobArn" type:"string"` + + // The time at which the batch inference job was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // If the batch inference job failed, the reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The name of the batch inference job. + JobName *string `locationName:"jobName" min:"1" type:"string"` + + // The time at which the batch inference job was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The status of the batch inference job. The status is one of the following + // values: + // + // * PENDING + // + // * IN PROGRESS + // + // * ACTIVE + // + // * CREATE FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s BatchInferenceJobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchInferenceJobSummary) GoString() string { + return s.String() +} + +// SetBatchInferenceJobArn sets the BatchInferenceJobArn field's value. +func (s *BatchInferenceJobSummary) SetBatchInferenceJobArn(v string) *BatchInferenceJobSummary { + s.BatchInferenceJobArn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *BatchInferenceJobSummary) SetCreationDateTime(v time.Time) *BatchInferenceJobSummary { + s.CreationDateTime = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *BatchInferenceJobSummary) SetFailureReason(v string) *BatchInferenceJobSummary { + s.FailureReason = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *BatchInferenceJobSummary) SetJobName(v string) *BatchInferenceJobSummary { + s.JobName = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *BatchInferenceJobSummary) SetLastUpdatedDateTime(v time.Time) *BatchInferenceJobSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *BatchInferenceJobSummary) SetStatus(v string) *BatchInferenceJobSummary { + s.Status = &v + return s +} + +// Describes a deployed solution version, otherwise known as a campaign. For +// more information on campaigns, see CreateCampaign. +type Campaign struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the campaign. + CampaignArn *string `locationName:"campaignArn" type:"string"` + + // The date and time (in Unix format) that the campaign was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // If a campaign fails, the reason behind the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The date and time (in Unix format) that the campaign was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // Provides a summary of the properties of a campaign update. For a complete + // listing, call the DescribeCampaign API. + LatestCampaignUpdate *CampaignUpdateSummary `locationName:"latestCampaignUpdate" type:"structure"` + + // Specifies the requested minimum provisioned transactions (recommendations) + // per second. + MinProvisionedTPS *int64 `locationName:"minProvisionedTPS" min:"1" type:"integer"` + + // The name of the campaign. + Name *string `locationName:"name" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of a specific version of the solution. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` + + // The status of the campaign. + // + // A campaign can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING > DELETE IN_PROGRESS + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Campaign) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Campaign) GoString() string { + return s.String() +} + +// SetCampaignArn sets the CampaignArn field's value. +func (s *Campaign) SetCampaignArn(v string) *Campaign { + s.CampaignArn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *Campaign) SetCreationDateTime(v time.Time) *Campaign { + s.CreationDateTime = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *Campaign) SetFailureReason(v string) *Campaign { + s.FailureReason = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *Campaign) SetLastUpdatedDateTime(v time.Time) *Campaign { + s.LastUpdatedDateTime = &v + return s +} + +// SetLatestCampaignUpdate sets the LatestCampaignUpdate field's value. +func (s *Campaign) SetLatestCampaignUpdate(v *CampaignUpdateSummary) *Campaign { + s.LatestCampaignUpdate = v + return s +} + +// SetMinProvisionedTPS sets the MinProvisionedTPS field's value. +func (s *Campaign) SetMinProvisionedTPS(v int64) *Campaign { + s.MinProvisionedTPS = &v + return s +} + +// SetName sets the Name field's value. +func (s *Campaign) SetName(v string) *Campaign { + s.Name = &v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *Campaign) SetSolutionVersionArn(v string) *Campaign { + s.SolutionVersionArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Campaign) SetStatus(v string) *Campaign { + s.Status = &v + return s +} + +// Provides a summary of the properties of a campaign. For a complete listing, +// call the DescribeCampaign API. +type CampaignSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the campaign. + CampaignArn *string `locationName:"campaignArn" type:"string"` + + // The date and time (in Unix time) that the campaign was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // If a campaign fails, the reason behind the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The date and time (in Unix time) that the campaign was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the campaign. + Name *string `locationName:"name" min:"1" type:"string"` + + // The status of the campaign. + // + // A campaign can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING > DELETE IN_PROGRESS + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s CampaignSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignSummary) GoString() string { + return s.String() +} + +// SetCampaignArn sets the CampaignArn field's value. +func (s *CampaignSummary) SetCampaignArn(v string) *CampaignSummary { + s.CampaignArn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *CampaignSummary) SetCreationDateTime(v time.Time) *CampaignSummary { + s.CreationDateTime = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *CampaignSummary) SetFailureReason(v string) *CampaignSummary { + s.FailureReason = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *CampaignSummary) SetLastUpdatedDateTime(v time.Time) *CampaignSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *CampaignSummary) SetName(v string) *CampaignSummary { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CampaignSummary) SetStatus(v string) *CampaignSummary { + s.Status = &v + return s +} + +// Provides a summary of the properties of a campaign update. For a complete +// listing, call the DescribeCampaign API. +type CampaignUpdateSummary struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that the campaign update was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // If a campaign update fails, the reason behind the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The date and time (in Unix time) that the campaign update was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // Specifies the requested minimum provisioned transactions (recommendations) + // per second that Amazon Personalize will support. + MinProvisionedTPS *int64 `locationName:"minProvisionedTPS" min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) of the deployed solution version. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` + + // The status of the campaign update. + // + // A campaign update can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING > DELETE IN_PROGRESS + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s CampaignUpdateSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignUpdateSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *CampaignUpdateSummary) SetCreationDateTime(v time.Time) *CampaignUpdateSummary { + s.CreationDateTime = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *CampaignUpdateSummary) SetFailureReason(v string) *CampaignUpdateSummary { + s.FailureReason = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *CampaignUpdateSummary) SetLastUpdatedDateTime(v time.Time) *CampaignUpdateSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetMinProvisionedTPS sets the MinProvisionedTPS field's value. +func (s *CampaignUpdateSummary) SetMinProvisionedTPS(v int64) *CampaignUpdateSummary { + s.MinProvisionedTPS = &v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *CampaignUpdateSummary) SetSolutionVersionArn(v string) *CampaignUpdateSummary { + s.SolutionVersionArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CampaignUpdateSummary) SetStatus(v string) *CampaignUpdateSummary { + s.Status = &v + return s +} + +// Provides the name and range of a categorical hyperparameter. +type CategoricalHyperParameterRange struct { + _ struct{} `type:"structure"` + + // The name of the hyperparameter. + Name *string `locationName:"name" type:"string"` + + // A list of the categories for the hyperparameter. + Values []*string `locationName:"values" type:"list"` +} + +// String returns the string representation +func (s CategoricalHyperParameterRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CategoricalHyperParameterRange) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *CategoricalHyperParameterRange) SetName(v string) *CategoricalHyperParameterRange { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *CategoricalHyperParameterRange) SetValues(v []*string) *CategoricalHyperParameterRange { + s.Values = v + return s +} + +// Provides the name and range of a continuous hyperparameter. +type ContinuousHyperParameterRange struct { + _ struct{} `type:"structure"` + + // The maximum allowable value for the hyperparameter. + MaxValue *float64 `locationName:"maxValue" type:"double"` + + // The minimum allowable value for the hyperparameter. + MinValue *float64 `locationName:"minValue" type:"double"` + + // The name of the hyperparameter. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s ContinuousHyperParameterRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuousHyperParameterRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContinuousHyperParameterRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContinuousHyperParameterRange"} + if s.MaxValue != nil && *s.MaxValue < -1e+06 { + invalidParams.Add(request.NewErrParamMinValue("MaxValue", -1e+06)) + } + if s.MinValue != nil && *s.MinValue < -1e+06 { + invalidParams.Add(request.NewErrParamMinValue("MinValue", -1e+06)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxValue sets the MaxValue field's value. +func (s *ContinuousHyperParameterRange) SetMaxValue(v float64) *ContinuousHyperParameterRange { + s.MaxValue = &v + return s +} + +// SetMinValue sets the MinValue field's value. +func (s *ContinuousHyperParameterRange) SetMinValue(v float64) *ContinuousHyperParameterRange { + s.MinValue = &v + return s +} + +// SetName sets the Name field's value. +func (s *ContinuousHyperParameterRange) SetName(v string) *ContinuousHyperParameterRange { + s.Name = &v + return s +} + +type CreateBatchInferenceJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 path that leads to the input file to base your recommendations + // on. The input material must be in JSON format. + // + // JobInput is a required field + JobInput *BatchInferenceJobInput `locationName:"jobInput" type:"structure" required:"true"` + + // The name of the batch inference job to create. + // + // JobName is a required field + JobName *string `locationName:"jobName" min:"1" type:"string" required:"true"` + + // The path to the Amazon S3 bucket where the job's output will be stored. + // + // JobOutput is a required field + JobOutput *BatchInferenceJobOutput `locationName:"jobOutput" type:"structure" required:"true"` + + // The number of recommendations to retreive. + NumResults *int64 `locationName:"numResults" type:"integer"` + + // The ARN of the Amazon Identity and Access Management role that has permissions + // to read and write to your input and out Amazon S3 buckets respectively. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the solution version that will be used + // to generate the batch inference recommendations. + // + // SolutionVersionArn is a required field + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBatchInferenceJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBatchInferenceJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBatchInferenceJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBatchInferenceJobInput"} + if s.JobInput == nil { + invalidParams.Add(request.NewErrParamRequired("JobInput")) + } + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.JobOutput == nil { + invalidParams.Add(request.NewErrParamRequired("JobOutput")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.SolutionVersionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SolutionVersionArn")) + } + if s.JobInput != nil { + if err := s.JobInput.Validate(); err != nil { + invalidParams.AddNested("JobInput", err.(request.ErrInvalidParams)) + } + } + if s.JobOutput != nil { + if err := s.JobOutput.Validate(); err != nil { + invalidParams.AddNested("JobOutput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobInput sets the JobInput field's value. +func (s *CreateBatchInferenceJobInput) SetJobInput(v *BatchInferenceJobInput) *CreateBatchInferenceJobInput { + s.JobInput = v + return s +} + +// SetJobName sets the JobName field's value. +func (s *CreateBatchInferenceJobInput) SetJobName(v string) *CreateBatchInferenceJobInput { + s.JobName = &v + return s +} + +// SetJobOutput sets the JobOutput field's value. +func (s *CreateBatchInferenceJobInput) SetJobOutput(v *BatchInferenceJobOutput) *CreateBatchInferenceJobInput { + s.JobOutput = v + return s +} + +// SetNumResults sets the NumResults field's value. +func (s *CreateBatchInferenceJobInput) SetNumResults(v int64) *CreateBatchInferenceJobInput { + s.NumResults = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateBatchInferenceJobInput) SetRoleArn(v string) *CreateBatchInferenceJobInput { + s.RoleArn = &v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *CreateBatchInferenceJobInput) SetSolutionVersionArn(v string) *CreateBatchInferenceJobInput { + s.SolutionVersionArn = &v + return s +} + +type CreateBatchInferenceJobOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the batch inference job. + BatchInferenceJobArn *string `locationName:"batchInferenceJobArn" type:"string"` +} + +// String returns the string representation +func (s CreateBatchInferenceJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBatchInferenceJobOutput) GoString() string { + return s.String() +} + +// SetBatchInferenceJobArn sets the BatchInferenceJobArn field's value. +func (s *CreateBatchInferenceJobOutput) SetBatchInferenceJobArn(v string) *CreateBatchInferenceJobOutput { + s.BatchInferenceJobArn = &v + return s +} + +type CreateCampaignInput struct { + _ struct{} `type:"structure"` + + // Specifies the requested minimum provisioned transactions (recommendations) + // per second that Amazon Personalize will support. + // + // MinProvisionedTPS is a required field + MinProvisionedTPS *int64 `locationName:"minProvisionedTPS" min:"1" type:"integer" required:"true"` + + // A name for the new campaign. The campaign name must be unique within your + // account. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the solution version to deploy. + // + // SolutionVersionArn is a required field + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCampaignInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCampaignInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCampaignInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCampaignInput"} + if s.MinProvisionedTPS == nil { + invalidParams.Add(request.NewErrParamRequired("MinProvisionedTPS")) + } + if s.MinProvisionedTPS != nil && *s.MinProvisionedTPS < 1 { + invalidParams.Add(request.NewErrParamMinValue("MinProvisionedTPS", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SolutionVersionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SolutionVersionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMinProvisionedTPS sets the MinProvisionedTPS field's value. +func (s *CreateCampaignInput) SetMinProvisionedTPS(v int64) *CreateCampaignInput { + s.MinProvisionedTPS = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateCampaignInput) SetName(v string) *CreateCampaignInput { + s.Name = &v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *CreateCampaignInput) SetSolutionVersionArn(v string) *CreateCampaignInput { + s.SolutionVersionArn = &v + return s +} + +type CreateCampaignOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the campaign. + CampaignArn *string `locationName:"campaignArn" type:"string"` +} + +// String returns the string representation +func (s CreateCampaignOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCampaignOutput) GoString() string { + return s.String() +} + +// SetCampaignArn sets the CampaignArn field's value. +func (s *CreateCampaignOutput) SetCampaignArn(v string) *CreateCampaignOutput { + s.CampaignArn = &v + return s +} + +type CreateDatasetGroupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of a KMS key used to encrypt the datasets. + KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` + + // The name for the new dataset group. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The ARN of the IAM role that has permissions to access the KMS key. Supplying + // an IAM role is only valid when also specifying a KMS key. + RoleArn *string `locationName:"roleArn" type:"string"` +} + +// String returns the string representation +func (s CreateDatasetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetGroupInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *CreateDatasetGroupInput) SetKmsKeyArn(v string) *CreateDatasetGroupInput { + s.KmsKeyArn = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDatasetGroupInput) SetName(v string) *CreateDatasetGroupInput { + s.Name = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateDatasetGroupInput) SetRoleArn(v string) *CreateDatasetGroupInput { + s.RoleArn = &v + return s +} + +type CreateDatasetGroupOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the new dataset group. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` +} + +// String returns the string representation +func (s CreateDatasetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetGroupOutput) GoString() string { + return s.String() +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *CreateDatasetGroupOutput) SetDatasetGroupArn(v string) *CreateDatasetGroupOutput { + s.DatasetGroupArn = &v + return s +} + +type CreateDatasetImportJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket that contains the training data to import. + // + // DataSource is a required field + DataSource *DataSource `locationName:"dataSource" type:"structure" required:"true"` + + // The ARN of the dataset that receives the imported data. + // + // DatasetArn is a required field + DatasetArn *string `locationName:"datasetArn" type:"string" required:"true"` + + // The name for the dataset import job. + // + // JobName is a required field + JobName *string `locationName:"jobName" min:"1" type:"string" required:"true"` + + // The ARN of the IAM role that has permissions to read from the Amazon S3 data + // source. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDatasetImportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetImportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetImportJobInput"} + if s.DataSource == nil { + invalidParams.Add(request.NewErrParamRequired("DataSource")) + } + if s.DatasetArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetArn")) + } + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataSource sets the DataSource field's value. +func (s *CreateDatasetImportJobInput) SetDataSource(v *DataSource) *CreateDatasetImportJobInput { + s.DataSource = v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *CreateDatasetImportJobInput) SetDatasetArn(v string) *CreateDatasetImportJobInput { + s.DatasetArn = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *CreateDatasetImportJobInput) SetJobName(v string) *CreateDatasetImportJobInput { + s.JobName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateDatasetImportJobInput) SetRoleArn(v string) *CreateDatasetImportJobInput { + s.RoleArn = &v + return s +} + +type CreateDatasetImportJobOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset import job. + DatasetImportJobArn *string `locationName:"datasetImportJobArn" type:"string"` +} + +// String returns the string representation +func (s CreateDatasetImportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetImportJobOutput) GoString() string { + return s.String() +} + +// SetDatasetImportJobArn sets the DatasetImportJobArn field's value. +func (s *CreateDatasetImportJobOutput) SetDatasetImportJobArn(v string) *CreateDatasetImportJobOutput { + s.DatasetImportJobArn = &v + return s +} + +type CreateDatasetInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group to add the dataset to. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string" required:"true"` + + // The type of dataset. + // + // One of the following (case insensitive) values: + // + // * Interactions + // + // * Items + // + // * Users + // + // DatasetType is a required field + DatasetType *string `locationName:"datasetType" type:"string" required:"true"` + + // The name for the dataset. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The ARN of the schema to associate with the dataset. The schema defines the + // dataset fields. + // + // SchemaArn is a required field + SchemaArn *string `locationName:"schemaArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetInput"} + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + if s.DatasetType == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetType")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SchemaArn == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *CreateDatasetInput) SetDatasetGroupArn(v string) *CreateDatasetInput { + s.DatasetGroupArn = &v + return s +} + +// SetDatasetType sets the DatasetType field's value. +func (s *CreateDatasetInput) SetDatasetType(v string) *CreateDatasetInput { + s.DatasetType = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDatasetInput) SetName(v string) *CreateDatasetInput { + s.Name = &v + return s +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *CreateDatasetInput) SetSchemaArn(v string) *CreateDatasetInput { + s.SchemaArn = &v + return s +} + +type CreateDatasetOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset. + DatasetArn *string `locationName:"datasetArn" type:"string"` +} + +// String returns the string representation +func (s CreateDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetOutput) GoString() string { + return s.String() +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *CreateDatasetOutput) SetDatasetArn(v string) *CreateDatasetOutput { + s.DatasetArn = &v + return s +} + +type CreateEventTrackerInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group that receives the event + // data. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string" required:"true"` + + // The name for the event tracker. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateEventTrackerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventTrackerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEventTrackerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEventTrackerInput"} + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *CreateEventTrackerInput) SetDatasetGroupArn(v string) *CreateEventTrackerInput { + s.DatasetGroupArn = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateEventTrackerInput) SetName(v string) *CreateEventTrackerInput { + s.Name = &v + return s +} + +type CreateEventTrackerOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the event tracker. + EventTrackerArn *string `locationName:"eventTrackerArn" type:"string"` + + // The ID of the event tracker. Include this ID in requests to the PutEvents + // (https://docs.aws.amazon.com/personalize/latest/dg/API_UBS_PutEvents.html) + // API. + TrackingId *string `locationName:"trackingId" type:"string"` +} + +// String returns the string representation +func (s CreateEventTrackerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventTrackerOutput) GoString() string { + return s.String() +} + +// SetEventTrackerArn sets the EventTrackerArn field's value. +func (s *CreateEventTrackerOutput) SetEventTrackerArn(v string) *CreateEventTrackerOutput { + s.EventTrackerArn = &v + return s +} + +// SetTrackingId sets the TrackingId field's value. +func (s *CreateEventTrackerOutput) SetTrackingId(v string) *CreateEventTrackerOutput { + s.TrackingId = &v + return s +} + +type CreateSchemaInput struct { + _ struct{} `type:"structure"` + + // The name for the schema. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // A schema in Avro JSON format. + // + // Schema is a required field + Schema *string `locationName:"schema" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSchemaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSchemaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSchemaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSchemaInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Schema == nil { + invalidParams.Add(request.NewErrParamRequired("Schema")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateSchemaInput) SetName(v string) *CreateSchemaInput { + s.Name = &v + return s +} + +// SetSchema sets the Schema field's value. +func (s *CreateSchemaInput) SetSchema(v string) *CreateSchemaInput { + s.Schema = &v + return s +} + +type CreateSchemaOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the created schema. + SchemaArn *string `locationName:"schemaArn" type:"string"` +} + +// String returns the string representation +func (s CreateSchemaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSchemaOutput) GoString() string { + return s.String() +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *CreateSchemaOutput) SetSchemaArn(v string) *CreateSchemaOutput { + s.SchemaArn = &v + return s +} + +type CreateSolutionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group that provides the training + // data. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string" required:"true"` + + // When your have multiple event types (using an EVENT_TYPE schema field), this + // parameter specifies which event type (for example, 'click' or 'like') is + // used for training the model. + EventType *string `locationName:"eventType" type:"string"` + + // The name for the solution. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Whether to perform automated machine learning (AutoML). The default is false. + // For this case, you must specify recipeArn. + // + // When set to true, Amazon Personalize analyzes your training data and selects + // the optimal USER_PERSONALIZATION recipe and hyperparameters. In this case, + // you must omit recipeArn. Amazon Personalize determines the optimal recipe + // by running tests with different values for the hyperparameters. AutoML lengthens + // the training process as compared to selecting a specific recipe. + PerformAutoML *bool `locationName:"performAutoML" type:"boolean"` + + // Whether to perform hyperparameter optimization (HPO) on the specified or + // selected recipe. The default is false. + // + // When performing AutoML, this parameter is always true and you should not + // set it to false. + PerformHPO *bool `locationName:"performHPO" type:"boolean"` + + // The ARN of the recipe to use for model training. Only specified when performAutoML + // is false. + RecipeArn *string `locationName:"recipeArn" type:"string"` + + // The configuration to use with the solution. When performAutoML is set to + // true, Amazon Personalize only evaluates the autoMLConfig section of the solution + // configuration. + SolutionConfig *SolutionConfig `locationName:"solutionConfig" type:"structure"` +} + +// String returns the string representation +func (s CreateSolutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSolutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSolutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSolutionInput"} + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SolutionConfig != nil { + if err := s.SolutionConfig.Validate(); err != nil { + invalidParams.AddNested("SolutionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *CreateSolutionInput) SetDatasetGroupArn(v string) *CreateSolutionInput { + s.DatasetGroupArn = &v + return s +} + +// SetEventType sets the EventType field's value. +func (s *CreateSolutionInput) SetEventType(v string) *CreateSolutionInput { + s.EventType = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateSolutionInput) SetName(v string) *CreateSolutionInput { + s.Name = &v + return s +} + +// SetPerformAutoML sets the PerformAutoML field's value. +func (s *CreateSolutionInput) SetPerformAutoML(v bool) *CreateSolutionInput { + s.PerformAutoML = &v + return s +} + +// SetPerformHPO sets the PerformHPO field's value. +func (s *CreateSolutionInput) SetPerformHPO(v bool) *CreateSolutionInput { + s.PerformHPO = &v + return s +} + +// SetRecipeArn sets the RecipeArn field's value. +func (s *CreateSolutionInput) SetRecipeArn(v string) *CreateSolutionInput { + s.RecipeArn = &v + return s +} + +// SetSolutionConfig sets the SolutionConfig field's value. +func (s *CreateSolutionInput) SetSolutionConfig(v *SolutionConfig) *CreateSolutionInput { + s.SolutionConfig = v + return s +} + +type CreateSolutionOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the solution. + SolutionArn *string `locationName:"solutionArn" type:"string"` +} + +// String returns the string representation +func (s CreateSolutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSolutionOutput) GoString() string { + return s.String() +} + +// SetSolutionArn sets the SolutionArn field's value. +func (s *CreateSolutionOutput) SetSolutionArn(v string) *CreateSolutionOutput { + s.SolutionArn = &v + return s +} + +type CreateSolutionVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the solution containing the training configuration + // information. + // + // SolutionArn is a required field + SolutionArn *string `locationName:"solutionArn" type:"string" required:"true"` + + // The scope of training to be performed when creating the solution version. + // The FULL option trains the solution version based on the entirety of the + // input solution's training data, while the UPDATE option processes only the + // data that has changed in comparison to the input solution. Choose UPDATE + // when you want to incrementally update your solution version instead of creating + // an entirely new one. + // + // The UPDATE option can only be used when you already have an active solution + // version created from the input solution using the FULL option and the input + // solution was trained with the native-recipe-hrnn-coldstart recipe. + TrainingMode *string `locationName:"trainingMode" type:"string" enum:"TrainingMode"` +} + +// String returns the string representation +func (s CreateSolutionVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSolutionVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSolutionVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSolutionVersionInput"} + if s.SolutionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SolutionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSolutionArn sets the SolutionArn field's value. +func (s *CreateSolutionVersionInput) SetSolutionArn(v string) *CreateSolutionVersionInput { + s.SolutionArn = &v + return s +} + +// SetTrainingMode sets the TrainingMode field's value. +func (s *CreateSolutionVersionInput) SetTrainingMode(v string) *CreateSolutionVersionInput { + s.TrainingMode = &v + return s +} + +type CreateSolutionVersionOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the new solution version. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` +} + +// String returns the string representation +func (s CreateSolutionVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSolutionVersionOutput) GoString() string { + return s.String() +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *CreateSolutionVersionOutput) SetSolutionVersionArn(v string) *CreateSolutionVersionOutput { + s.SolutionVersionArn = &v + return s +} + +// Describes the data source that contains the data to upload to a dataset. +type DataSource struct { + _ struct{} `type:"structure"` + + // The path to the Amazon S3 bucket where the data that you want to upload to + // your dataset is stored. For example: + // + // s3://bucket-name/training-data.csv + DataLocation *string `locationName:"dataLocation" type:"string"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSource) GoString() string { + return s.String() +} + +// SetDataLocation sets the DataLocation field's value. +func (s *DataSource) SetDataLocation(v string) *DataSource { + s.DataLocation = &v + return s +} + +// Provides metadata for a dataset. +type Dataset struct { + _ struct{} `type:"structure"` + + // The creation date and time (in Unix time) of the dataset. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset that you want metadata for. + DatasetArn *string `locationName:"datasetArn" type:"string"` + + // The Amazon Resource Name (ARN) of the dataset group. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // One of the following values: + // + // * Interactions + // + // * Items + // + // * Users + DatasetType *string `locationName:"datasetType" type:"string"` + + // A time stamp that shows when the dataset was updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the dataset. + Name *string `locationName:"name" min:"1" type:"string"` + + // The ARN of the associated schema. + SchemaArn *string `locationName:"schemaArn" type:"string"` + + // The status of the dataset. + // + // A dataset can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING > DELETE IN_PROGRESS + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Dataset) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dataset) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *Dataset) SetCreationDateTime(v time.Time) *Dataset { + s.CreationDateTime = &v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *Dataset) SetDatasetArn(v string) *Dataset { + s.DatasetArn = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *Dataset) SetDatasetGroupArn(v string) *Dataset { + s.DatasetGroupArn = &v + return s +} + +// SetDatasetType sets the DatasetType field's value. +func (s *Dataset) SetDatasetType(v string) *Dataset { + s.DatasetType = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *Dataset) SetLastUpdatedDateTime(v time.Time) *Dataset { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Dataset) SetName(v string) *Dataset { + s.Name = &v + return s +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *Dataset) SetSchemaArn(v string) *Dataset { + s.SchemaArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Dataset) SetStatus(v string) *Dataset { + s.Status = &v + return s +} + +// A dataset group is a collection of related datasets (Interactions, User, +// and Item). You create a dataset group by calling CreateDatasetGroup. You +// then create a dataset and add it to a dataset group by calling CreateDataset. +// The dataset group is used to create and train a solution by calling CreateSolution. +// A dataset group can contain only one of each type of dataset. +// +// You can specify an AWS Key Management Service (KMS) key to encrypt the datasets +// in the group. +type DatasetGroup struct { + _ struct{} `type:"structure"` + + // The creation date and time (in Unix time) of the dataset group. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset group. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // If creating a dataset group fails, provides the reason why. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets. + KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` + + // The last update date and time (in Unix time) of the dataset group. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the dataset group. + Name *string `locationName:"name" min:"1" type:"string"` + + // The ARN of the IAM role that has permissions to create the dataset group. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The current status of the dataset group. + // + // A dataset group can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s DatasetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetGroup) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DatasetGroup) SetCreationDateTime(v time.Time) *DatasetGroup { + s.CreationDateTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DatasetGroup) SetDatasetGroupArn(v string) *DatasetGroup { + s.DatasetGroupArn = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DatasetGroup) SetFailureReason(v string) *DatasetGroup { + s.FailureReason = &v + return s +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *DatasetGroup) SetKmsKeyArn(v string) *DatasetGroup { + s.KmsKeyArn = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *DatasetGroup) SetLastUpdatedDateTime(v time.Time) *DatasetGroup { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *DatasetGroup) SetName(v string) *DatasetGroup { + s.Name = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DatasetGroup) SetRoleArn(v string) *DatasetGroup { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatasetGroup) SetStatus(v string) *DatasetGroup { + s.Status = &v + return s +} + +// Provides a summary of the properties of a dataset group. For a complete listing, +// call the DescribeDatasetGroup API. +type DatasetGroupSummary struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that the dataset group was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset group. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // If creating a dataset group fails, the reason behind the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The date and time (in Unix time) that the dataset group was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the dataset group. + Name *string `locationName:"name" min:"1" type:"string"` + + // The status of the dataset group. + // + // A dataset group can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s DatasetGroupSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetGroupSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DatasetGroupSummary) SetCreationDateTime(v time.Time) *DatasetGroupSummary { + s.CreationDateTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DatasetGroupSummary) SetDatasetGroupArn(v string) *DatasetGroupSummary { + s.DatasetGroupArn = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DatasetGroupSummary) SetFailureReason(v string) *DatasetGroupSummary { + s.FailureReason = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *DatasetGroupSummary) SetLastUpdatedDateTime(v time.Time) *DatasetGroupSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *DatasetGroupSummary) SetName(v string) *DatasetGroupSummary { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatasetGroupSummary) SetStatus(v string) *DatasetGroupSummary { + s.Status = &v + return s +} + +// Describes a job that imports training data from a data source (Amazon S3 +// bucket) to an Amazon Personalize dataset. For more information, see CreateDatasetImportJob. +// +// A dataset import job can be in one of the following states: +// +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +type DatasetImportJob struct { + _ struct{} `type:"structure"` + + // The creation date and time (in Unix time) of the dataset import job. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon S3 bucket that contains the training data to import. + DataSource *DataSource `locationName:"dataSource" type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset that receives the imported + // data. + DatasetArn *string `locationName:"datasetArn" type:"string"` + + // The ARN of the dataset import job. + DatasetImportJobArn *string `locationName:"datasetImportJobArn" type:"string"` + + // If a dataset import job fails, provides the reason why. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The name of the import job. + JobName *string `locationName:"jobName" min:"1" type:"string"` + + // The date and time (in Unix time) the dataset was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The ARN of the AWS Identity and Access Management (IAM) role that has permissions + // to read from the Amazon S3 data source. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The status of the dataset import job. + // + // A dataset import job can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s DatasetImportJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetImportJob) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DatasetImportJob) SetCreationDateTime(v time.Time) *DatasetImportJob { + s.CreationDateTime = &v + return s +} + +// SetDataSource sets the DataSource field's value. +func (s *DatasetImportJob) SetDataSource(v *DataSource) *DatasetImportJob { + s.DataSource = v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DatasetImportJob) SetDatasetArn(v string) *DatasetImportJob { + s.DatasetArn = &v + return s +} + +// SetDatasetImportJobArn sets the DatasetImportJobArn field's value. +func (s *DatasetImportJob) SetDatasetImportJobArn(v string) *DatasetImportJob { + s.DatasetImportJobArn = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DatasetImportJob) SetFailureReason(v string) *DatasetImportJob { + s.FailureReason = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *DatasetImportJob) SetJobName(v string) *DatasetImportJob { + s.JobName = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *DatasetImportJob) SetLastUpdatedDateTime(v time.Time) *DatasetImportJob { + s.LastUpdatedDateTime = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DatasetImportJob) SetRoleArn(v string) *DatasetImportJob { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatasetImportJob) SetStatus(v string) *DatasetImportJob { + s.Status = &v + return s +} + +// Provides a summary of the properties of a dataset import job. For a complete +// listing, call the DescribeDatasetImportJob API. +type DatasetImportJobSummary struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that the dataset import job was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset import job. + DatasetImportJobArn *string `locationName:"datasetImportJobArn" type:"string"` + + // If a dataset import job fails, the reason behind the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The name of the dataset import job. + JobName *string `locationName:"jobName" min:"1" type:"string"` + + // The date and time (in Unix time) that the dataset was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The status of the dataset import job. + // + // A dataset import job can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s DatasetImportJobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetImportJobSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DatasetImportJobSummary) SetCreationDateTime(v time.Time) *DatasetImportJobSummary { + s.CreationDateTime = &v + return s +} + +// SetDatasetImportJobArn sets the DatasetImportJobArn field's value. +func (s *DatasetImportJobSummary) SetDatasetImportJobArn(v string) *DatasetImportJobSummary { + s.DatasetImportJobArn = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DatasetImportJobSummary) SetFailureReason(v string) *DatasetImportJobSummary { + s.FailureReason = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *DatasetImportJobSummary) SetJobName(v string) *DatasetImportJobSummary { + s.JobName = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *DatasetImportJobSummary) SetLastUpdatedDateTime(v time.Time) *DatasetImportJobSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatasetImportJobSummary) SetStatus(v string) *DatasetImportJobSummary { + s.Status = &v + return s +} + +// Describes the schema for a dataset. For more information on schemas, see +// CreateSchema. +type DatasetSchema struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that the schema was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The date and time (in Unix time) that the schema was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the schema. + Name *string `locationName:"name" min:"1" type:"string"` + + // The schema. + Schema *string `locationName:"schema" type:"string"` + + // The Amazon Resource Name (ARN) of the schema. + SchemaArn *string `locationName:"schemaArn" type:"string"` +} + +// String returns the string representation +func (s DatasetSchema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetSchema) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DatasetSchema) SetCreationDateTime(v time.Time) *DatasetSchema { + s.CreationDateTime = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *DatasetSchema) SetLastUpdatedDateTime(v time.Time) *DatasetSchema { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *DatasetSchema) SetName(v string) *DatasetSchema { + s.Name = &v + return s +} + +// SetSchema sets the Schema field's value. +func (s *DatasetSchema) SetSchema(v string) *DatasetSchema { + s.Schema = &v + return s +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *DatasetSchema) SetSchemaArn(v string) *DatasetSchema { + s.SchemaArn = &v + return s +} + +// Provides a summary of the properties of a dataset schema. For a complete +// listing, call the DescribeSchema API. +type DatasetSchemaSummary struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that the schema was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The date and time (in Unix time) that the schema was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the schema. + Name *string `locationName:"name" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the schema. + SchemaArn *string `locationName:"schemaArn" type:"string"` +} + +// String returns the string representation +func (s DatasetSchemaSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetSchemaSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DatasetSchemaSummary) SetCreationDateTime(v time.Time) *DatasetSchemaSummary { + s.CreationDateTime = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *DatasetSchemaSummary) SetLastUpdatedDateTime(v time.Time) *DatasetSchemaSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *DatasetSchemaSummary) SetName(v string) *DatasetSchemaSummary { + s.Name = &v + return s +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *DatasetSchemaSummary) SetSchemaArn(v string) *DatasetSchemaSummary { + s.SchemaArn = &v + return s +} + +// Provides a summary of the properties of a dataset. For a complete listing, +// call the DescribeDataset API. +type DatasetSummary struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that the dataset was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset. + DatasetArn *string `locationName:"datasetArn" type:"string"` + + // The dataset type. One of the following values: + // + // * Interactions + // + // * Items + // + // * Users + // + // * Event-Interactions + DatasetType *string `locationName:"datasetType" type:"string"` + + // The date and time (in Unix time) that the dataset was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the dataset. + Name *string `locationName:"name" min:"1" type:"string"` + + // The status of the dataset. + // + // A dataset can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING > DELETE IN_PROGRESS + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s DatasetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DatasetSummary) SetCreationDateTime(v time.Time) *DatasetSummary { + s.CreationDateTime = &v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DatasetSummary) SetDatasetArn(v string) *DatasetSummary { + s.DatasetArn = &v + return s +} + +// SetDatasetType sets the DatasetType field's value. +func (s *DatasetSummary) SetDatasetType(v string) *DatasetSummary { + s.DatasetType = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *DatasetSummary) SetLastUpdatedDateTime(v time.Time) *DatasetSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *DatasetSummary) SetName(v string) *DatasetSummary { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatasetSummary) SetStatus(v string) *DatasetSummary { + s.Status = &v + return s +} + +// Provides the name and default range of a categorical hyperparameter and whether +// the hyperparameter is tunable. A tunable hyperparameter can have its value +// determined during hyperparameter optimization (HPO). +type DefaultCategoricalHyperParameterRange struct { + _ struct{} `type:"structure"` + + // Whether the hyperparameter is tunable. + IsTunable *bool `locationName:"isTunable" type:"boolean"` + + // The name of the hyperparameter. + Name *string `locationName:"name" type:"string"` + + // A list of the categories for the hyperparameter. + Values []*string `locationName:"values" type:"list"` +} + +// String returns the string representation +func (s DefaultCategoricalHyperParameterRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultCategoricalHyperParameterRange) GoString() string { + return s.String() +} + +// SetIsTunable sets the IsTunable field's value. +func (s *DefaultCategoricalHyperParameterRange) SetIsTunable(v bool) *DefaultCategoricalHyperParameterRange { + s.IsTunable = &v + return s +} + +// SetName sets the Name field's value. +func (s *DefaultCategoricalHyperParameterRange) SetName(v string) *DefaultCategoricalHyperParameterRange { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *DefaultCategoricalHyperParameterRange) SetValues(v []*string) *DefaultCategoricalHyperParameterRange { + s.Values = v + return s +} + +// Provides the name and default range of a continuous hyperparameter and whether +// the hyperparameter is tunable. A tunable hyperparameter can have its value +// determined during hyperparameter optimization (HPO). +type DefaultContinuousHyperParameterRange struct { + _ struct{} `type:"structure"` + + // Whether the hyperparameter is tunable. + IsTunable *bool `locationName:"isTunable" type:"boolean"` + + // The maximum allowable value for the hyperparameter. + MaxValue *float64 `locationName:"maxValue" type:"double"` + + // The minimum allowable value for the hyperparameter. + MinValue *float64 `locationName:"minValue" type:"double"` + + // The name of the hyperparameter. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s DefaultContinuousHyperParameterRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultContinuousHyperParameterRange) GoString() string { + return s.String() +} + +// SetIsTunable sets the IsTunable field's value. +func (s *DefaultContinuousHyperParameterRange) SetIsTunable(v bool) *DefaultContinuousHyperParameterRange { + s.IsTunable = &v + return s +} + +// SetMaxValue sets the MaxValue field's value. +func (s *DefaultContinuousHyperParameterRange) SetMaxValue(v float64) *DefaultContinuousHyperParameterRange { + s.MaxValue = &v + return s +} + +// SetMinValue sets the MinValue field's value. +func (s *DefaultContinuousHyperParameterRange) SetMinValue(v float64) *DefaultContinuousHyperParameterRange { + s.MinValue = &v + return s +} + +// SetName sets the Name field's value. +func (s *DefaultContinuousHyperParameterRange) SetName(v string) *DefaultContinuousHyperParameterRange { + s.Name = &v + return s +} + +// Specifies the hyperparameters and their default ranges. Hyperparameters can +// be categorical, continuous, or integer-valued. +type DefaultHyperParameterRanges struct { + _ struct{} `type:"structure"` + + // The categorical hyperparameters and their default ranges. + CategoricalHyperParameterRanges []*DefaultCategoricalHyperParameterRange `locationName:"categoricalHyperParameterRanges" type:"list"` + + // The continuous hyperparameters and their default ranges. + ContinuousHyperParameterRanges []*DefaultContinuousHyperParameterRange `locationName:"continuousHyperParameterRanges" type:"list"` + + // The integer-valued hyperparameters and their default ranges. + IntegerHyperParameterRanges []*DefaultIntegerHyperParameterRange `locationName:"integerHyperParameterRanges" type:"list"` +} + +// String returns the string representation +func (s DefaultHyperParameterRanges) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultHyperParameterRanges) GoString() string { + return s.String() +} + +// SetCategoricalHyperParameterRanges sets the CategoricalHyperParameterRanges field's value. +func (s *DefaultHyperParameterRanges) SetCategoricalHyperParameterRanges(v []*DefaultCategoricalHyperParameterRange) *DefaultHyperParameterRanges { + s.CategoricalHyperParameterRanges = v + return s +} + +// SetContinuousHyperParameterRanges sets the ContinuousHyperParameterRanges field's value. +func (s *DefaultHyperParameterRanges) SetContinuousHyperParameterRanges(v []*DefaultContinuousHyperParameterRange) *DefaultHyperParameterRanges { + s.ContinuousHyperParameterRanges = v + return s +} + +// SetIntegerHyperParameterRanges sets the IntegerHyperParameterRanges field's value. +func (s *DefaultHyperParameterRanges) SetIntegerHyperParameterRanges(v []*DefaultIntegerHyperParameterRange) *DefaultHyperParameterRanges { + s.IntegerHyperParameterRanges = v + return s +} + +// Provides the name and default range of a integer-valued hyperparameter and +// whether the hyperparameter is tunable. A tunable hyperparameter can have +// its value determined during hyperparameter optimization (HPO). +type DefaultIntegerHyperParameterRange struct { + _ struct{} `type:"structure"` + + // Indicates whether the hyperparameter is tunable. + IsTunable *bool `locationName:"isTunable" type:"boolean"` + + // The maximum allowable value for the hyperparameter. + MaxValue *int64 `locationName:"maxValue" type:"integer"` + + // The minimum allowable value for the hyperparameter. + MinValue *int64 `locationName:"minValue" type:"integer"` + + // The name of the hyperparameter. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s DefaultIntegerHyperParameterRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultIntegerHyperParameterRange) GoString() string { + return s.String() +} + +// SetIsTunable sets the IsTunable field's value. +func (s *DefaultIntegerHyperParameterRange) SetIsTunable(v bool) *DefaultIntegerHyperParameterRange { + s.IsTunable = &v + return s +} + +// SetMaxValue sets the MaxValue field's value. +func (s *DefaultIntegerHyperParameterRange) SetMaxValue(v int64) *DefaultIntegerHyperParameterRange { + s.MaxValue = &v + return s +} + +// SetMinValue sets the MinValue field's value. +func (s *DefaultIntegerHyperParameterRange) SetMinValue(v int64) *DefaultIntegerHyperParameterRange { + s.MinValue = &v + return s +} + +// SetName sets the Name field's value. +func (s *DefaultIntegerHyperParameterRange) SetName(v string) *DefaultIntegerHyperParameterRange { + s.Name = &v + return s +} + +type DeleteCampaignInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the campaign to delete. + // + // CampaignArn is a required field + CampaignArn *string `locationName:"campaignArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCampaignInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCampaignInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCampaignInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCampaignInput"} + if s.CampaignArn == nil { + invalidParams.Add(request.NewErrParamRequired("CampaignArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCampaignArn sets the CampaignArn field's value. +func (s *DeleteCampaignInput) SetCampaignArn(v string) *DeleteCampaignInput { + s.CampaignArn = &v + return s +} + +type DeleteCampaignOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCampaignOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCampaignOutput) GoString() string { + return s.String() +} + +type DeleteDatasetGroupInput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset group to delete. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatasetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatasetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatasetGroupInput"} + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DeleteDatasetGroupInput) SetDatasetGroupArn(v string) *DeleteDatasetGroupInput { + s.DatasetGroupArn = &v + return s +} + +type DeleteDatasetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetGroupOutput) GoString() string { + return s.String() +} + +type DeleteDatasetInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset to delete. + // + // DatasetArn is a required field + DatasetArn *string `locationName:"datasetArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatasetInput"} + if s.DatasetArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DeleteDatasetInput) SetDatasetArn(v string) *DeleteDatasetInput { + s.DatasetArn = &v + return s +} + +type DeleteDatasetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetOutput) GoString() string { + return s.String() +} + +type DeleteEventTrackerInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the event tracker to delete. + // + // EventTrackerArn is a required field + EventTrackerArn *string `locationName:"eventTrackerArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventTrackerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventTrackerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEventTrackerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEventTrackerInput"} + if s.EventTrackerArn == nil { + invalidParams.Add(request.NewErrParamRequired("EventTrackerArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventTrackerArn sets the EventTrackerArn field's value. +func (s *DeleteEventTrackerInput) SetEventTrackerArn(v string) *DeleteEventTrackerInput { + s.EventTrackerArn = &v + return s +} + +type DeleteEventTrackerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEventTrackerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventTrackerOutput) GoString() string { + return s.String() +} + +type DeleteSchemaInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the schema to delete. + // + // SchemaArn is a required field + SchemaArn *string `locationName:"schemaArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSchemaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSchemaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSchemaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSchemaInput"} + if s.SchemaArn == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *DeleteSchemaInput) SetSchemaArn(v string) *DeleteSchemaInput { + s.SchemaArn = &v + return s +} + +type DeleteSchemaOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSchemaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSchemaOutput) GoString() string { + return s.String() +} + +type DeleteSolutionInput struct { + _ struct{} `type:"structure"` + + // The ARN of the solution to delete. + // + // SolutionArn is a required field + SolutionArn *string `locationName:"solutionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSolutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSolutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSolutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSolutionInput"} + if s.SolutionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SolutionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSolutionArn sets the SolutionArn field's value. +func (s *DeleteSolutionInput) SetSolutionArn(v string) *DeleteSolutionInput { + s.SolutionArn = &v + return s +} + +type DeleteSolutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSolutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSolutionOutput) GoString() string { + return s.String() +} + +type DescribeAlgorithmInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the algorithm to describe. + // + // AlgorithmArn is a required field + AlgorithmArn *string `locationName:"algorithmArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAlgorithmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlgorithmInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAlgorithmInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAlgorithmInput"} + if s.AlgorithmArn == nil { + invalidParams.Add(request.NewErrParamRequired("AlgorithmArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlgorithmArn sets the AlgorithmArn field's value. +func (s *DescribeAlgorithmInput) SetAlgorithmArn(v string) *DescribeAlgorithmInput { + s.AlgorithmArn = &v + return s +} + +type DescribeAlgorithmOutput struct { + _ struct{} `type:"structure"` + + // A listing of the properties of the algorithm. + Algorithm *Algorithm `locationName:"algorithm" type:"structure"` +} + +// String returns the string representation +func (s DescribeAlgorithmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlgorithmOutput) GoString() string { + return s.String() +} + +// SetAlgorithm sets the Algorithm field's value. +func (s *DescribeAlgorithmOutput) SetAlgorithm(v *Algorithm) *DescribeAlgorithmOutput { + s.Algorithm = v + return s +} + +type DescribeBatchInferenceJobInput struct { + _ struct{} `type:"structure"` + + // The ARN of the batch inference job to describe. + // + // BatchInferenceJobArn is a required field + BatchInferenceJobArn *string `locationName:"batchInferenceJobArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBatchInferenceJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBatchInferenceJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBatchInferenceJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBatchInferenceJobInput"} + if s.BatchInferenceJobArn == nil { + invalidParams.Add(request.NewErrParamRequired("BatchInferenceJobArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBatchInferenceJobArn sets the BatchInferenceJobArn field's value. +func (s *DescribeBatchInferenceJobInput) SetBatchInferenceJobArn(v string) *DescribeBatchInferenceJobInput { + s.BatchInferenceJobArn = &v + return s +} + +type DescribeBatchInferenceJobOutput struct { + _ struct{} `type:"structure"` + + // Information on the specified batch inference job. + BatchInferenceJob *BatchInferenceJob `locationName:"batchInferenceJob" type:"structure"` +} + +// String returns the string representation +func (s DescribeBatchInferenceJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBatchInferenceJobOutput) GoString() string { + return s.String() +} + +// SetBatchInferenceJob sets the BatchInferenceJob field's value. +func (s *DescribeBatchInferenceJobOutput) SetBatchInferenceJob(v *BatchInferenceJob) *DescribeBatchInferenceJobOutput { + s.BatchInferenceJob = v + return s +} + +type DescribeCampaignInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the campaign. + // + // CampaignArn is a required field + CampaignArn *string `locationName:"campaignArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeCampaignInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCampaignInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCampaignInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCampaignInput"} + if s.CampaignArn == nil { + invalidParams.Add(request.NewErrParamRequired("CampaignArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCampaignArn sets the CampaignArn field's value. +func (s *DescribeCampaignInput) SetCampaignArn(v string) *DescribeCampaignInput { + s.CampaignArn = &v + return s +} + +type DescribeCampaignOutput struct { + _ struct{} `type:"structure"` + + // The properties of the campaign. + Campaign *Campaign `locationName:"campaign" type:"structure"` +} + +// String returns the string representation +func (s DescribeCampaignOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCampaignOutput) GoString() string { + return s.String() +} + +// SetCampaign sets the Campaign field's value. +func (s *DescribeCampaignOutput) SetCampaign(v *Campaign) *DescribeCampaignOutput { + s.Campaign = v + return s +} + +type DescribeDatasetGroupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group to describe. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatasetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatasetGroupInput"} + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *DescribeDatasetGroupInput) SetDatasetGroupArn(v string) *DescribeDatasetGroupInput { + s.DatasetGroupArn = &v + return s +} + +type DescribeDatasetGroupOutput struct { + _ struct{} `type:"structure"` + + // A listing of the dataset group's properties. + DatasetGroup *DatasetGroup `locationName:"datasetGroup" type:"structure"` +} + +// String returns the string representation +func (s DescribeDatasetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetGroupOutput) GoString() string { + return s.String() +} + +// SetDatasetGroup sets the DatasetGroup field's value. +func (s *DescribeDatasetGroupOutput) SetDatasetGroup(v *DatasetGroup) *DescribeDatasetGroupOutput { + s.DatasetGroup = v + return s +} + +type DescribeDatasetImportJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset import job to describe. + // + // DatasetImportJobArn is a required field + DatasetImportJobArn *string `locationName:"datasetImportJobArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetImportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetImportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatasetImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatasetImportJobInput"} + if s.DatasetImportJobArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetImportJobArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetImportJobArn sets the DatasetImportJobArn field's value. +func (s *DescribeDatasetImportJobInput) SetDatasetImportJobArn(v string) *DescribeDatasetImportJobInput { + s.DatasetImportJobArn = &v + return s +} + +type DescribeDatasetImportJobOutput struct { + _ struct{} `type:"structure"` + + // Information about the dataset import job, including the status. + // + // The status is one of the following values: + // + // * CREATE PENDING + // + // * CREATE IN_PROGRESS + // + // * ACTIVE + // + // * CREATE FAILED + DatasetImportJob *DatasetImportJob `locationName:"datasetImportJob" type:"structure"` +} + +// String returns the string representation +func (s DescribeDatasetImportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetImportJobOutput) GoString() string { + return s.String() +} + +// SetDatasetImportJob sets the DatasetImportJob field's value. +func (s *DescribeDatasetImportJobOutput) SetDatasetImportJob(v *DatasetImportJob) *DescribeDatasetImportJobOutput { + s.DatasetImportJob = v + return s +} + +type DescribeDatasetInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset to describe. + // + // DatasetArn is a required field + DatasetArn *string `locationName:"datasetArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatasetInput"} + if s.DatasetArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DescribeDatasetInput) SetDatasetArn(v string) *DescribeDatasetInput { + s.DatasetArn = &v + return s +} + +type DescribeDatasetOutput struct { + _ struct{} `type:"structure"` + + // A listing of the dataset's properties. + Dataset *Dataset `locationName:"dataset" type:"structure"` +} + +// String returns the string representation +func (s DescribeDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetOutput) GoString() string { + return s.String() +} + +// SetDataset sets the Dataset field's value. +func (s *DescribeDatasetOutput) SetDataset(v *Dataset) *DescribeDatasetOutput { + s.Dataset = v + return s +} + +type DescribeEventTrackerInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the event tracker to describe. + // + // EventTrackerArn is a required field + EventTrackerArn *string `locationName:"eventTrackerArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeEventTrackerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventTrackerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEventTrackerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEventTrackerInput"} + if s.EventTrackerArn == nil { + invalidParams.Add(request.NewErrParamRequired("EventTrackerArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventTrackerArn sets the EventTrackerArn field's value. +func (s *DescribeEventTrackerInput) SetEventTrackerArn(v string) *DescribeEventTrackerInput { + s.EventTrackerArn = &v + return s +} + +type DescribeEventTrackerOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the event tracker. + EventTracker *EventTracker `locationName:"eventTracker" type:"structure"` +} + +// String returns the string representation +func (s DescribeEventTrackerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventTrackerOutput) GoString() string { + return s.String() +} + +// SetEventTracker sets the EventTracker field's value. +func (s *DescribeEventTrackerOutput) SetEventTracker(v *EventTracker) *DescribeEventTrackerOutput { + s.EventTracker = v + return s +} + +type DescribeFeatureTransformationInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the feature transformation to describe. + // + // FeatureTransformationArn is a required field + FeatureTransformationArn *string `locationName:"featureTransformationArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFeatureTransformationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFeatureTransformationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFeatureTransformationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFeatureTransformationInput"} + if s.FeatureTransformationArn == nil { + invalidParams.Add(request.NewErrParamRequired("FeatureTransformationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFeatureTransformationArn sets the FeatureTransformationArn field's value. +func (s *DescribeFeatureTransformationInput) SetFeatureTransformationArn(v string) *DescribeFeatureTransformationInput { + s.FeatureTransformationArn = &v + return s +} + +type DescribeFeatureTransformationOutput struct { + _ struct{} `type:"structure"` + + // A listing of the FeatureTransformation properties. + FeatureTransformation *FeatureTransformation `locationName:"featureTransformation" type:"structure"` +} + +// String returns the string representation +func (s DescribeFeatureTransformationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFeatureTransformationOutput) GoString() string { + return s.String() +} + +// SetFeatureTransformation sets the FeatureTransformation field's value. +func (s *DescribeFeatureTransformationOutput) SetFeatureTransformation(v *FeatureTransformation) *DescribeFeatureTransformationOutput { + s.FeatureTransformation = v + return s +} + +type DescribeRecipeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the recipe to describe. + // + // RecipeArn is a required field + RecipeArn *string `locationName:"recipeArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRecipeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRecipeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRecipeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRecipeInput"} + if s.RecipeArn == nil { + invalidParams.Add(request.NewErrParamRequired("RecipeArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRecipeArn sets the RecipeArn field's value. +func (s *DescribeRecipeInput) SetRecipeArn(v string) *DescribeRecipeInput { + s.RecipeArn = &v + return s +} + +type DescribeRecipeOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the recipe. + Recipe *Recipe `locationName:"recipe" type:"structure"` +} + +// String returns the string representation +func (s DescribeRecipeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRecipeOutput) GoString() string { + return s.String() +} + +// SetRecipe sets the Recipe field's value. +func (s *DescribeRecipeOutput) SetRecipe(v *Recipe) *DescribeRecipeOutput { + s.Recipe = v + return s +} + +type DescribeSchemaInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the schema to retrieve. + // + // SchemaArn is a required field + SchemaArn *string `locationName:"schemaArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSchemaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSchemaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSchemaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSchemaInput"} + if s.SchemaArn == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *DescribeSchemaInput) SetSchemaArn(v string) *DescribeSchemaInput { + s.SchemaArn = &v + return s +} + +type DescribeSchemaOutput struct { + _ struct{} `type:"structure"` + + // The requested schema. + Schema *DatasetSchema `locationName:"schema" type:"structure"` +} + +// String returns the string representation +func (s DescribeSchemaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSchemaOutput) GoString() string { + return s.String() +} + +// SetSchema sets the Schema field's value. +func (s *DescribeSchemaOutput) SetSchema(v *DatasetSchema) *DescribeSchemaOutput { + s.Schema = v + return s +} + +type DescribeSolutionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the solution to describe. + // + // SolutionArn is a required field + SolutionArn *string `locationName:"solutionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSolutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSolutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSolutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSolutionInput"} + if s.SolutionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SolutionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSolutionArn sets the SolutionArn field's value. +func (s *DescribeSolutionInput) SetSolutionArn(v string) *DescribeSolutionInput { + s.SolutionArn = &v + return s +} + +type DescribeSolutionOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the solution. + Solution *Solution `locationName:"solution" type:"structure"` +} + +// String returns the string representation +func (s DescribeSolutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSolutionOutput) GoString() string { + return s.String() +} + +// SetSolution sets the Solution field's value. +func (s *DescribeSolutionOutput) SetSolution(v *Solution) *DescribeSolutionOutput { + s.Solution = v + return s +} + +type DescribeSolutionVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the solution version. + // + // SolutionVersionArn is a required field + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSolutionVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSolutionVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSolutionVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSolutionVersionInput"} + if s.SolutionVersionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SolutionVersionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *DescribeSolutionVersionInput) SetSolutionVersionArn(v string) *DescribeSolutionVersionInput { + s.SolutionVersionArn = &v + return s +} + +type DescribeSolutionVersionOutput struct { + _ struct{} `type:"structure"` + + // The solution version. + SolutionVersion *SolutionVersion `locationName:"solutionVersion" type:"structure"` +} + +// String returns the string representation +func (s DescribeSolutionVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSolutionVersionOutput) GoString() string { + return s.String() +} + +// SetSolutionVersion sets the SolutionVersion field's value. +func (s *DescribeSolutionVersionOutput) SetSolutionVersion(v *SolutionVersion) *DescribeSolutionVersionOutput { + s.SolutionVersion = v + return s +} + +// Provides information about an event tracker. +type EventTracker struct { + _ struct{} `type:"structure"` + + // The Amazon AWS account that owns the event tracker. + AccountId *string `locationName:"accountId" type:"string"` + + // The date and time (in Unix format) that the event tracker was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset group that receives the event + // data. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // The ARN of the event tracker. + EventTrackerArn *string `locationName:"eventTrackerArn" type:"string"` + + // The date and time (in Unix time) that the event tracker was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the event tracker. + Name *string `locationName:"name" min:"1" type:"string"` + + // The status of the event tracker. + // + // An event tracker can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING > DELETE IN_PROGRESS + Status *string `locationName:"status" type:"string"` + + // The ID of the event tracker. Include this ID in requests to the PutEvents + // (https://docs.aws.amazon.com/personalize/latest/dg/API_UBS_PutEvents.html) + // API. + TrackingId *string `locationName:"trackingId" type:"string"` +} + +// String returns the string representation +func (s EventTracker) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventTracker) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *EventTracker) SetAccountId(v string) *EventTracker { + s.AccountId = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *EventTracker) SetCreationDateTime(v time.Time) *EventTracker { + s.CreationDateTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *EventTracker) SetDatasetGroupArn(v string) *EventTracker { + s.DatasetGroupArn = &v + return s +} + +// SetEventTrackerArn sets the EventTrackerArn field's value. +func (s *EventTracker) SetEventTrackerArn(v string) *EventTracker { + s.EventTrackerArn = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *EventTracker) SetLastUpdatedDateTime(v time.Time) *EventTracker { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *EventTracker) SetName(v string) *EventTracker { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *EventTracker) SetStatus(v string) *EventTracker { + s.Status = &v + return s +} + +// SetTrackingId sets the TrackingId field's value. +func (s *EventTracker) SetTrackingId(v string) *EventTracker { + s.TrackingId = &v + return s +} + +// Provides a summary of the properties of an event tracker. For a complete +// listing, call the DescribeEventTracker API. +type EventTrackerSummary struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that the event tracker was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the event tracker. + EventTrackerArn *string `locationName:"eventTrackerArn" type:"string"` + + // The date and time (in Unix time) that the event tracker was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the event tracker. + Name *string `locationName:"name" min:"1" type:"string"` + + // The status of the event tracker. + // + // An event tracker can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING > DELETE IN_PROGRESS + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s EventTrackerSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventTrackerSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *EventTrackerSummary) SetCreationDateTime(v time.Time) *EventTrackerSummary { + s.CreationDateTime = &v + return s +} + +// SetEventTrackerArn sets the EventTrackerArn field's value. +func (s *EventTrackerSummary) SetEventTrackerArn(v string) *EventTrackerSummary { + s.EventTrackerArn = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *EventTrackerSummary) SetLastUpdatedDateTime(v time.Time) *EventTrackerSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *EventTrackerSummary) SetName(v string) *EventTrackerSummary { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *EventTrackerSummary) SetStatus(v string) *EventTrackerSummary { + s.Status = &v + return s +} + +// Provides feature transformation information. Feature transformation is the +// process of modifying raw input data into a form more suitable for model training. +type FeatureTransformation struct { + _ struct{} `type:"structure"` + + // The creation date and time (in Unix time) of the feature transformation. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // Provides the default parameters for feature transformation. + DefaultParameters map[string]*string `locationName:"defaultParameters" type:"map"` + + // The Amazon Resource Name (ARN) of the FeatureTransformation object. + FeatureTransformationArn *string `locationName:"featureTransformationArn" type:"string"` + + // The last update date and time (in Unix time) of the feature transformation. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the feature transformation. + Name *string `locationName:"name" min:"1" type:"string"` + + // The status of the feature transformation. + // + // A feature transformation can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s FeatureTransformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FeatureTransformation) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *FeatureTransformation) SetCreationDateTime(v time.Time) *FeatureTransformation { + s.CreationDateTime = &v + return s +} + +// SetDefaultParameters sets the DefaultParameters field's value. +func (s *FeatureTransformation) SetDefaultParameters(v map[string]*string) *FeatureTransformation { + s.DefaultParameters = v + return s +} + +// SetFeatureTransformationArn sets the FeatureTransformationArn field's value. +func (s *FeatureTransformation) SetFeatureTransformationArn(v string) *FeatureTransformation { + s.FeatureTransformationArn = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *FeatureTransformation) SetLastUpdatedDateTime(v time.Time) *FeatureTransformation { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *FeatureTransformation) SetName(v string) *FeatureTransformation { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *FeatureTransformation) SetStatus(v string) *FeatureTransformation { + s.Status = &v + return s +} + +type GetSolutionMetricsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the solution version for which to get metrics. + // + // SolutionVersionArn is a required field + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSolutionMetricsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSolutionMetricsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSolutionMetricsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSolutionMetricsInput"} + if s.SolutionVersionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SolutionVersionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *GetSolutionMetricsInput) SetSolutionVersionArn(v string) *GetSolutionMetricsInput { + s.SolutionVersionArn = &v + return s +} + +type GetSolutionMetricsOutput struct { + _ struct{} `type:"structure"` + + // The metrics for the solution version. + Metrics map[string]*float64 `locationName:"metrics" type:"map"` + + // The same solution version ARN as specified in the request. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` +} + +// String returns the string representation +func (s GetSolutionMetricsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSolutionMetricsOutput) GoString() string { + return s.String() +} + +// SetMetrics sets the Metrics field's value. +func (s *GetSolutionMetricsOutput) SetMetrics(v map[string]*float64) *GetSolutionMetricsOutput { + s.Metrics = v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *GetSolutionMetricsOutput) SetSolutionVersionArn(v string) *GetSolutionMetricsOutput { + s.SolutionVersionArn = &v + return s +} + +// Describes the properties for hyperparameter optimization (HPO). For use with +// the bring-your-own-recipe feature. Do not use for Amazon Personalize native +// recipes. +type HPOConfig struct { + _ struct{} `type:"structure"` + + // The hyperparameters and their allowable ranges. + AlgorithmHyperParameterRanges *HyperParameterRanges `locationName:"algorithmHyperParameterRanges" type:"structure"` + + // The metric to optimize during HPO. + HpoObjective *HPOObjective `locationName:"hpoObjective" type:"structure"` + + // Describes the resource configuration for HPO. + HpoResourceConfig *HPOResourceConfig `locationName:"hpoResourceConfig" type:"structure"` +} + +// String returns the string representation +func (s HPOConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HPOConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HPOConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HPOConfig"} + if s.AlgorithmHyperParameterRanges != nil { + if err := s.AlgorithmHyperParameterRanges.Validate(); err != nil { + invalidParams.AddNested("AlgorithmHyperParameterRanges", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlgorithmHyperParameterRanges sets the AlgorithmHyperParameterRanges field's value. +func (s *HPOConfig) SetAlgorithmHyperParameterRanges(v *HyperParameterRanges) *HPOConfig { + s.AlgorithmHyperParameterRanges = v + return s +} + +// SetHpoObjective sets the HpoObjective field's value. +func (s *HPOConfig) SetHpoObjective(v *HPOObjective) *HPOConfig { + s.HpoObjective = v + return s +} + +// SetHpoResourceConfig sets the HpoResourceConfig field's value. +func (s *HPOConfig) SetHpoResourceConfig(v *HPOResourceConfig) *HPOConfig { + s.HpoResourceConfig = v + return s +} + +// The metric to optimize during hyperparameter optimization (HPO). +type HPOObjective struct { + _ struct{} `type:"structure"` + + // The name of the metric. + MetricName *string `locationName:"metricName" type:"string"` + + // A regular expression for finding the metric in the training job logs. + MetricRegex *string `locationName:"metricRegex" type:"string"` + + // The data type of the metric. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s HPOObjective) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HPOObjective) GoString() string { + return s.String() +} + +// SetMetricName sets the MetricName field's value. +func (s *HPOObjective) SetMetricName(v string) *HPOObjective { + s.MetricName = &v + return s +} + +// SetMetricRegex sets the MetricRegex field's value. +func (s *HPOObjective) SetMetricRegex(v string) *HPOObjective { + s.MetricRegex = &v + return s +} + +// SetType sets the Type field's value. +func (s *HPOObjective) SetType(v string) *HPOObjective { + s.Type = &v + return s +} + +// Describes the resource configuration for hyperparameter optimization (HPO). +type HPOResourceConfig struct { + _ struct{} `type:"structure"` + + // The maximum number of training jobs when you create a solution version. The + // maximum value for maxNumberOfTrainingJobs is 40. + MaxNumberOfTrainingJobs *string `locationName:"maxNumberOfTrainingJobs" type:"string"` + + // The maximum number of parallel training jobs when you create a solution version. + // The maximum value for maxParallelTrainingJobs is 10. + MaxParallelTrainingJobs *string `locationName:"maxParallelTrainingJobs" type:"string"` +} + +// String returns the string representation +func (s HPOResourceConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HPOResourceConfig) GoString() string { + return s.String() +} + +// SetMaxNumberOfTrainingJobs sets the MaxNumberOfTrainingJobs field's value. +func (s *HPOResourceConfig) SetMaxNumberOfTrainingJobs(v string) *HPOResourceConfig { + s.MaxNumberOfTrainingJobs = &v + return s +} + +// SetMaxParallelTrainingJobs sets the MaxParallelTrainingJobs field's value. +func (s *HPOResourceConfig) SetMaxParallelTrainingJobs(v string) *HPOResourceConfig { + s.MaxParallelTrainingJobs = &v + return s +} + +// Specifies the hyperparameters and their ranges. Hyperparameters can be categorical, +// continuous, or integer-valued. +type HyperParameterRanges struct { + _ struct{} `type:"structure"` + + // The categorical hyperparameters and their ranges. + CategoricalHyperParameterRanges []*CategoricalHyperParameterRange `locationName:"categoricalHyperParameterRanges" type:"list"` + + // The continuous hyperparameters and their ranges. + ContinuousHyperParameterRanges []*ContinuousHyperParameterRange `locationName:"continuousHyperParameterRanges" type:"list"` + + // The integer-valued hyperparameters and their ranges. + IntegerHyperParameterRanges []*IntegerHyperParameterRange `locationName:"integerHyperParameterRanges" type:"list"` +} + +// String returns the string representation +func (s HyperParameterRanges) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HyperParameterRanges) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HyperParameterRanges) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HyperParameterRanges"} + if s.ContinuousHyperParameterRanges != nil { + for i, v := range s.ContinuousHyperParameterRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContinuousHyperParameterRanges", i), err.(request.ErrInvalidParams)) + } + } + } + if s.IntegerHyperParameterRanges != nil { + for i, v := range s.IntegerHyperParameterRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "IntegerHyperParameterRanges", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCategoricalHyperParameterRanges sets the CategoricalHyperParameterRanges field's value. +func (s *HyperParameterRanges) SetCategoricalHyperParameterRanges(v []*CategoricalHyperParameterRange) *HyperParameterRanges { + s.CategoricalHyperParameterRanges = v + return s +} + +// SetContinuousHyperParameterRanges sets the ContinuousHyperParameterRanges field's value. +func (s *HyperParameterRanges) SetContinuousHyperParameterRanges(v []*ContinuousHyperParameterRange) *HyperParameterRanges { + s.ContinuousHyperParameterRanges = v + return s +} + +// SetIntegerHyperParameterRanges sets the IntegerHyperParameterRanges field's value. +func (s *HyperParameterRanges) SetIntegerHyperParameterRanges(v []*IntegerHyperParameterRange) *HyperParameterRanges { + s.IntegerHyperParameterRanges = v + return s +} + +// Provides the name and range of an integer-valued hyperparameter. +type IntegerHyperParameterRange struct { + _ struct{} `type:"structure"` + + // The maximum allowable value for the hyperparameter. + MaxValue *int64 `locationName:"maxValue" type:"integer"` + + // The minimum allowable value for the hyperparameter. + MinValue *int64 `locationName:"minValue" type:"integer"` + + // The name of the hyperparameter. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s IntegerHyperParameterRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntegerHyperParameterRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntegerHyperParameterRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntegerHyperParameterRange"} + if s.MinValue != nil && *s.MinValue < -1e+06 { + invalidParams.Add(request.NewErrParamMinValue("MinValue", -1e+06)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxValue sets the MaxValue field's value. +func (s *IntegerHyperParameterRange) SetMaxValue(v int64) *IntegerHyperParameterRange { + s.MaxValue = &v + return s +} + +// SetMinValue sets the MinValue field's value. +func (s *IntegerHyperParameterRange) SetMinValue(v int64) *IntegerHyperParameterRange { + s.MinValue = &v + return s +} + +// SetName sets the Name field's value. +func (s *IntegerHyperParameterRange) SetName(v string) *IntegerHyperParameterRange { + s.Name = &v + return s +} + +type ListBatchInferenceJobsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of batch inference job results to return in each page. + // The default value is 100. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token to request the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Amazon Resource Name (ARN) of the solution version from which the batch + // inference jobs were created. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` +} + +// String returns the string representation +func (s ListBatchInferenceJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBatchInferenceJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBatchInferenceJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBatchInferenceJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListBatchInferenceJobsInput) SetMaxResults(v int64) *ListBatchInferenceJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBatchInferenceJobsInput) SetNextToken(v string) *ListBatchInferenceJobsInput { + s.NextToken = &v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *ListBatchInferenceJobsInput) SetSolutionVersionArn(v string) *ListBatchInferenceJobsInput { + s.SolutionVersionArn = &v + return s +} + +type ListBatchInferenceJobsOutput struct { + _ struct{} `type:"structure"` + + // A list containing information on each job that is returned. + BatchInferenceJobs []*BatchInferenceJobSummary `locationName:"batchInferenceJobs" type:"list"` + + // The token to use to retreive the next page of results. The value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListBatchInferenceJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBatchInferenceJobsOutput) GoString() string { + return s.String() +} + +// SetBatchInferenceJobs sets the BatchInferenceJobs field's value. +func (s *ListBatchInferenceJobsOutput) SetBatchInferenceJobs(v []*BatchInferenceJobSummary) *ListBatchInferenceJobsOutput { + s.BatchInferenceJobs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBatchInferenceJobsOutput) SetNextToken(v string) *ListBatchInferenceJobsOutput { + s.NextToken = &v + return s +} + +type ListCampaignsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of campaigns to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListCampaigns for getting the + // next set of campaigns (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` + + // The Amazon Resource Name (ARN) of the solution to list the campaigns for. + // When a solution is not specified, all the campaigns associated with the account + // are listed. + SolutionArn *string `locationName:"solutionArn" type:"string"` +} + +// String returns the string representation +func (s ListCampaignsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCampaignsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCampaignsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCampaignsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListCampaignsInput) SetMaxResults(v int64) *ListCampaignsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCampaignsInput) SetNextToken(v string) *ListCampaignsInput { + s.NextToken = &v + return s +} + +// SetSolutionArn sets the SolutionArn field's value. +func (s *ListCampaignsInput) SetSolutionArn(v string) *ListCampaignsInput { + s.SolutionArn = &v + return s +} + +type ListCampaignsOutput struct { + _ struct{} `type:"structure"` + + // A list of the campaigns. + Campaigns []*CampaignSummary `locationName:"campaigns" type:"list"` + + // A token for getting the next set of campaigns (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListCampaignsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCampaignsOutput) GoString() string { + return s.String() +} + +// SetCampaigns sets the Campaigns field's value. +func (s *ListCampaignsOutput) SetCampaigns(v []*CampaignSummary) *ListCampaignsOutput { + s.Campaigns = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCampaignsOutput) SetNextToken(v string) *ListCampaignsOutput { + s.NextToken = &v + return s +} + +type ListDatasetGroupsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of dataset groups to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListDatasetGroups for getting + // the next set of dataset groups (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatasetGroupsInput) SetMaxResults(v int64) *ListDatasetGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetGroupsInput) SetNextToken(v string) *ListDatasetGroupsInput { + s.NextToken = &v + return s +} + +type ListDatasetGroupsOutput struct { + _ struct{} `type:"structure"` + + // The list of your dataset groups. + DatasetGroups []*DatasetGroupSummary `locationName:"datasetGroups" type:"list"` + + // A token for getting the next set of dataset groups (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetGroupsOutput) GoString() string { + return s.String() +} + +// SetDatasetGroups sets the DatasetGroups field's value. +func (s *ListDatasetGroupsOutput) SetDatasetGroups(v []*DatasetGroupSummary) *ListDatasetGroupsOutput { + s.DatasetGroups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetGroupsOutput) SetNextToken(v string) *ListDatasetGroupsOutput { + s.NextToken = &v + return s +} + +type ListDatasetImportJobsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset to list the dataset import + // jobs for. + DatasetArn *string `locationName:"datasetArn" type:"string"` + + // The maximum number of dataset import jobs to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListDatasetImportJobs for getting + // the next set of dataset import jobs (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetImportJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetImportJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetImportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetImportJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *ListDatasetImportJobsInput) SetDatasetArn(v string) *ListDatasetImportJobsInput { + s.DatasetArn = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatasetImportJobsInput) SetMaxResults(v int64) *ListDatasetImportJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetImportJobsInput) SetNextToken(v string) *ListDatasetImportJobsInput { + s.NextToken = &v + return s +} + +type ListDatasetImportJobsOutput struct { + _ struct{} `type:"structure"` + + // The list of dataset import jobs. + DatasetImportJobs []*DatasetImportJobSummary `locationName:"datasetImportJobs" type:"list"` + + // A token for getting the next set of dataset import jobs (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetImportJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetImportJobsOutput) GoString() string { + return s.String() +} + +// SetDatasetImportJobs sets the DatasetImportJobs field's value. +func (s *ListDatasetImportJobsOutput) SetDatasetImportJobs(v []*DatasetImportJobSummary) *ListDatasetImportJobsOutput { + s.DatasetImportJobs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetImportJobsOutput) SetNextToken(v string) *ListDatasetImportJobsOutput { + s.NextToken = &v + return s +} + +type ListDatasetsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group that contains the datasets + // to list. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // The maximum number of datasets to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListDatasetImportJobs for getting + // the next set of dataset import jobs (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *ListDatasetsInput) SetDatasetGroupArn(v string) *ListDatasetsInput { + s.DatasetGroupArn = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatasetsInput) SetMaxResults(v int64) *ListDatasetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetsInput) SetNextToken(v string) *ListDatasetsInput { + s.NextToken = &v + return s +} + +type ListDatasetsOutput struct { + _ struct{} `type:"structure"` + + // An array of Dataset objects. Each object provides metadata information. + Datasets []*DatasetSummary `locationName:"datasets" type:"list"` + + // A token for getting the next set of datasets (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsOutput) GoString() string { + return s.String() +} + +// SetDatasets sets the Datasets field's value. +func (s *ListDatasetsOutput) SetDatasets(v []*DatasetSummary) *ListDatasetsOutput { + s.Datasets = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetsOutput) SetNextToken(v string) *ListDatasetsOutput { + s.NextToken = &v + return s +} + +type ListEventTrackersInput struct { + _ struct{} `type:"structure"` + + // The ARN of a dataset group used to filter the response. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // The maximum number of event trackers to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListEventTrackers for getting + // the next set of event trackers (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListEventTrackersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventTrackersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEventTrackersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEventTrackersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *ListEventTrackersInput) SetDatasetGroupArn(v string) *ListEventTrackersInput { + s.DatasetGroupArn = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListEventTrackersInput) SetMaxResults(v int64) *ListEventTrackersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEventTrackersInput) SetNextToken(v string) *ListEventTrackersInput { + s.NextToken = &v + return s +} + +type ListEventTrackersOutput struct { + _ struct{} `type:"structure"` + + // A list of event trackers. + EventTrackers []*EventTrackerSummary `locationName:"eventTrackers" type:"list"` + + // A token for getting the next set of event trackers (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListEventTrackersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventTrackersOutput) GoString() string { + return s.String() +} + +// SetEventTrackers sets the EventTrackers field's value. +func (s *ListEventTrackersOutput) SetEventTrackers(v []*EventTrackerSummary) *ListEventTrackersOutput { + s.EventTrackers = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEventTrackersOutput) SetNextToken(v string) *ListEventTrackersOutput { + s.NextToken = &v + return s +} + +type ListRecipesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of recipes to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListRecipes for getting the next + // set of recipes (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` + + // The default is SERVICE. + RecipeProvider *string `locationName:"recipeProvider" type:"string" enum:"RecipeProvider"` +} + +// String returns the string representation +func (s ListRecipesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRecipesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRecipesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRecipesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListRecipesInput) SetMaxResults(v int64) *ListRecipesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListRecipesInput) SetNextToken(v string) *ListRecipesInput { + s.NextToken = &v + return s +} + +// SetRecipeProvider sets the RecipeProvider field's value. +func (s *ListRecipesInput) SetRecipeProvider(v string) *ListRecipesInput { + s.RecipeProvider = &v + return s +} + +type ListRecipesOutput struct { + _ struct{} `type:"structure"` + + // A token for getting the next set of recipes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of available recipes. + Recipes []*RecipeSummary `locationName:"recipes" type:"list"` +} + +// String returns the string representation +func (s ListRecipesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRecipesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListRecipesOutput) SetNextToken(v string) *ListRecipesOutput { + s.NextToken = &v + return s +} + +// SetRecipes sets the Recipes field's value. +func (s *ListRecipesOutput) SetRecipes(v []*RecipeSummary) *ListRecipesOutput { + s.Recipes = v + return s +} + +type ListSchemasInput struct { + _ struct{} `type:"structure"` + + // The maximum number of schemas to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListSchemas for getting the next + // set of schemas (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListSchemasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSchemasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSchemasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSchemasInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListSchemasInput) SetMaxResults(v int64) *ListSchemasInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSchemasInput) SetNextToken(v string) *ListSchemasInput { + s.NextToken = &v + return s +} + +type ListSchemasOutput struct { + _ struct{} `type:"structure"` + + // A token used to get the next set of schemas (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of schemas. + Schemas []*DatasetSchemaSummary `locationName:"schemas" type:"list"` +} + +// String returns the string representation +func (s ListSchemasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSchemasOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSchemasOutput) SetNextToken(v string) *ListSchemasOutput { + s.NextToken = &v + return s +} + +// SetSchemas sets the Schemas field's value. +func (s *ListSchemasOutput) SetSchemas(v []*DatasetSchemaSummary) *ListSchemasOutput { + s.Schemas = v + return s +} + +type ListSolutionVersionsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of solution versions to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListSolutionVersions for getting + // the next set of solution versions (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` + + // The Amazon Resource Name (ARN) of the solution. + SolutionArn *string `locationName:"solutionArn" type:"string"` +} + +// String returns the string representation +func (s ListSolutionVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSolutionVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSolutionVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSolutionVersionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListSolutionVersionsInput) SetMaxResults(v int64) *ListSolutionVersionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSolutionVersionsInput) SetNextToken(v string) *ListSolutionVersionsInput { + s.NextToken = &v + return s +} + +// SetSolutionArn sets the SolutionArn field's value. +func (s *ListSolutionVersionsInput) SetSolutionArn(v string) *ListSolutionVersionsInput { + s.SolutionArn = &v + return s +} + +type ListSolutionVersionsOutput struct { + _ struct{} `type:"structure"` + + // A token for getting the next set of solution versions (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of solution versions describing the version properties. + SolutionVersions []*SolutionVersionSummary `locationName:"solutionVersions" type:"list"` +} + +// String returns the string representation +func (s ListSolutionVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSolutionVersionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSolutionVersionsOutput) SetNextToken(v string) *ListSolutionVersionsOutput { + s.NextToken = &v + return s +} + +// SetSolutionVersions sets the SolutionVersions field's value. +func (s *ListSolutionVersionsOutput) SetSolutionVersions(v []*SolutionVersionSummary) *ListSolutionVersionsOutput { + s.SolutionVersions = v + return s +} + +type ListSolutionsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset group. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // The maximum number of solutions to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListSolutions for getting the + // next set of solutions (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListSolutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSolutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSolutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSolutionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *ListSolutionsInput) SetDatasetGroupArn(v string) *ListSolutionsInput { + s.DatasetGroupArn = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListSolutionsInput) SetMaxResults(v int64) *ListSolutionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSolutionsInput) SetNextToken(v string) *ListSolutionsInput { + s.NextToken = &v + return s +} + +type ListSolutionsOutput struct { + _ struct{} `type:"structure"` + + // A token for getting the next set of solutions (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of the current solutions. + Solutions []*SolutionSummary `locationName:"solutions" type:"list"` +} + +// String returns the string representation +func (s ListSolutionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSolutionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSolutionsOutput) SetNextToken(v string) *ListSolutionsOutput { + s.NextToken = &v + return s +} + +// SetSolutions sets the Solutions field's value. +func (s *ListSolutionsOutput) SetSolutions(v []*SolutionSummary) *ListSolutionsOutput { + s.Solutions = v + return s +} + +// Provides information about a recipe. Each recipe provides an algorithm that +// Amazon Personalize uses in model training when you use the CreateSolution +// operation. +type Recipe struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the algorithm that Amazon Personalize uses + // to train the model. + AlgorithmArn *string `locationName:"algorithmArn" type:"string"` + + // The date and time (in Unix format) that the recipe was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The description of the recipe. + Description *string `locationName:"description" type:"string"` + + // The ARN of the FeatureTransformation object. + FeatureTransformationArn *string `locationName:"featureTransformationArn" type:"string"` + + // The date and time (in Unix format) that the recipe was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the recipe. + Name *string `locationName:"name" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the recipe. + RecipeArn *string `locationName:"recipeArn" type:"string"` + + // One of the following values: + // + // * PERSONALIZED_RANKING + // + // * RELATED_ITEMS + // + // * USER_PERSONALIZATION + RecipeType *string `locationName:"recipeType" type:"string"` + + // The status of the recipe. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Recipe) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Recipe) GoString() string { + return s.String() +} + +// SetAlgorithmArn sets the AlgorithmArn field's value. +func (s *Recipe) SetAlgorithmArn(v string) *Recipe { + s.AlgorithmArn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *Recipe) SetCreationDateTime(v time.Time) *Recipe { + s.CreationDateTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Recipe) SetDescription(v string) *Recipe { + s.Description = &v + return s +} + +// SetFeatureTransformationArn sets the FeatureTransformationArn field's value. +func (s *Recipe) SetFeatureTransformationArn(v string) *Recipe { + s.FeatureTransformationArn = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *Recipe) SetLastUpdatedDateTime(v time.Time) *Recipe { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Recipe) SetName(v string) *Recipe { + s.Name = &v + return s +} + +// SetRecipeArn sets the RecipeArn field's value. +func (s *Recipe) SetRecipeArn(v string) *Recipe { + s.RecipeArn = &v + return s +} + +// SetRecipeType sets the RecipeType field's value. +func (s *Recipe) SetRecipeType(v string) *Recipe { + s.RecipeType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Recipe) SetStatus(v string) *Recipe { + s.Status = &v + return s +} + +// Provides a summary of the properties of a recipe. For a complete listing, +// call the DescribeRecipe API. +type RecipeSummary struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that the recipe was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The date and time (in Unix time) that the recipe was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the recipe. + Name *string `locationName:"name" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the recipe. + RecipeArn *string `locationName:"recipeArn" type:"string"` + + // The status of the recipe. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s RecipeSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecipeSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *RecipeSummary) SetCreationDateTime(v time.Time) *RecipeSummary { + s.CreationDateTime = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *RecipeSummary) SetLastUpdatedDateTime(v time.Time) *RecipeSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *RecipeSummary) SetName(v string) *RecipeSummary { + s.Name = &v + return s +} + +// SetRecipeArn sets the RecipeArn field's value. +func (s *RecipeSummary) SetRecipeArn(v string) *RecipeSummary { + s.RecipeArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *RecipeSummary) SetStatus(v string) *RecipeSummary { + s.Status = &v + return s +} + +// The configuration details of an Amazon S3 input or output bucket. +type S3DataConfig struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Key Management Service (KMS) + // key that Amazon Personalize uses to encrypt or decrypt the input and output + // files of a batch inference job. + KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` + + // The file path of the Amazon S3 bucket. + // + // Path is a required field + Path *string `locationName:"path" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DataConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DataConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3DataConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3DataConfig"} + if s.Path == nil { + invalidParams.Add(request.NewErrParamRequired("Path")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *S3DataConfig) SetKmsKeyArn(v string) *S3DataConfig { + s.KmsKeyArn = &v + return s +} + +// SetPath sets the Path field's value. +func (s *S3DataConfig) SetPath(v string) *S3DataConfig { + s.Path = &v + return s +} + +// An object that provides information about a solution. A solution is a trained +// model that can be deployed as a campaign. +type Solution struct { + _ struct{} `type:"structure"` + + // When performAutoML is true, specifies the best recipe found. + AutoMLResult *AutoMLResult `locationName:"autoMLResult" type:"structure"` + + // The creation date and time (in Unix time) of the solution. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset group that provides the training + // data. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // The event type (for example, 'click' or 'like') that is used for training + // the model. + EventType *string `locationName:"eventType" type:"string"` + + // The date and time (in Unix time) that the solution was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // Describes the latest version of the solution, including the status and the + // ARN. + LatestSolutionVersion *SolutionVersionSummary `locationName:"latestSolutionVersion" type:"structure"` + + // The name of the solution. + Name *string `locationName:"name" min:"1" type:"string"` + + // When true, Amazon Personalize performs a search for the best USER_PERSONALIZATION + // recipe from the list specified in the solution configuration (recipeArn must + // not be specified). When false (the default), Amazon Personalize uses recipeArn + // for training. + PerformAutoML *bool `locationName:"performAutoML" type:"boolean"` + + // Whether to perform hyperparameter optimization (HPO) on the chosen recipe. + // The default is false. + PerformHPO *bool `locationName:"performHPO" type:"boolean"` + + // The ARN of the recipe used to create the solution. + RecipeArn *string `locationName:"recipeArn" type:"string"` + + // The ARN of the solution. + SolutionArn *string `locationName:"solutionArn" type:"string"` + + // Describes the configuration properties for the solution. + SolutionConfig *SolutionConfig `locationName:"solutionConfig" type:"structure"` + + // The status of the solution. + // + // A solution can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING > DELETE IN_PROGRESS + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Solution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Solution) GoString() string { + return s.String() +} + +// SetAutoMLResult sets the AutoMLResult field's value. +func (s *Solution) SetAutoMLResult(v *AutoMLResult) *Solution { + s.AutoMLResult = v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *Solution) SetCreationDateTime(v time.Time) *Solution { + s.CreationDateTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *Solution) SetDatasetGroupArn(v string) *Solution { + s.DatasetGroupArn = &v + return s +} + +// SetEventType sets the EventType field's value. +func (s *Solution) SetEventType(v string) *Solution { + s.EventType = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *Solution) SetLastUpdatedDateTime(v time.Time) *Solution { + s.LastUpdatedDateTime = &v + return s +} + +// SetLatestSolutionVersion sets the LatestSolutionVersion field's value. +func (s *Solution) SetLatestSolutionVersion(v *SolutionVersionSummary) *Solution { + s.LatestSolutionVersion = v + return s +} + +// SetName sets the Name field's value. +func (s *Solution) SetName(v string) *Solution { + s.Name = &v + return s +} + +// SetPerformAutoML sets the PerformAutoML field's value. +func (s *Solution) SetPerformAutoML(v bool) *Solution { + s.PerformAutoML = &v + return s +} + +// SetPerformHPO sets the PerformHPO field's value. +func (s *Solution) SetPerformHPO(v bool) *Solution { + s.PerformHPO = &v + return s +} + +// SetRecipeArn sets the RecipeArn field's value. +func (s *Solution) SetRecipeArn(v string) *Solution { + s.RecipeArn = &v + return s +} + +// SetSolutionArn sets the SolutionArn field's value. +func (s *Solution) SetSolutionArn(v string) *Solution { + s.SolutionArn = &v + return s +} + +// SetSolutionConfig sets the SolutionConfig field's value. +func (s *Solution) SetSolutionConfig(v *SolutionConfig) *Solution { + s.SolutionConfig = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Solution) SetStatus(v string) *Solution { + s.Status = &v + return s +} + +// Describes the configuration properties for the solution. +type SolutionConfig struct { + _ struct{} `type:"structure"` + + // Lists the hyperparameter names and ranges. + AlgorithmHyperParameters map[string]*string `locationName:"algorithmHyperParameters" type:"map"` + + // The AutoMLConfig object containing a list of recipes to search when AutoML + // is performed. + AutoMLConfig *AutoMLConfig `locationName:"autoMLConfig" type:"structure"` + + // Only events with a value greater than or equal to this threshold are used + // for training a model. + EventValueThreshold *string `locationName:"eventValueThreshold" type:"string"` + + // Lists the feature transformation parameters. + FeatureTransformationParameters map[string]*string `locationName:"featureTransformationParameters" type:"map"` + + // Describes the properties for hyperparameter optimization (HPO). + HpoConfig *HPOConfig `locationName:"hpoConfig" type:"structure"` +} + +// String returns the string representation +func (s SolutionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SolutionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SolutionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SolutionConfig"} + if s.HpoConfig != nil { + if err := s.HpoConfig.Validate(); err != nil { + invalidParams.AddNested("HpoConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlgorithmHyperParameters sets the AlgorithmHyperParameters field's value. +func (s *SolutionConfig) SetAlgorithmHyperParameters(v map[string]*string) *SolutionConfig { + s.AlgorithmHyperParameters = v + return s +} + +// SetAutoMLConfig sets the AutoMLConfig field's value. +func (s *SolutionConfig) SetAutoMLConfig(v *AutoMLConfig) *SolutionConfig { + s.AutoMLConfig = v + return s +} + +// SetEventValueThreshold sets the EventValueThreshold field's value. +func (s *SolutionConfig) SetEventValueThreshold(v string) *SolutionConfig { + s.EventValueThreshold = &v + return s +} + +// SetFeatureTransformationParameters sets the FeatureTransformationParameters field's value. +func (s *SolutionConfig) SetFeatureTransformationParameters(v map[string]*string) *SolutionConfig { + s.FeatureTransformationParameters = v + return s +} + +// SetHpoConfig sets the HpoConfig field's value. +func (s *SolutionConfig) SetHpoConfig(v *HPOConfig) *SolutionConfig { + s.HpoConfig = v + return s +} + +// Provides a summary of the properties of a solution. For a complete listing, +// call the DescribeSolution API. +type SolutionSummary struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that the solution was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The date and time (in Unix time) that the solution was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the solution. + Name *string `locationName:"name" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the solution. + SolutionArn *string `locationName:"solutionArn" type:"string"` + + // The status of the solution. + // + // A solution can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // + // * DELETE PENDING > DELETE IN_PROGRESS + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s SolutionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SolutionSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *SolutionSummary) SetCreationDateTime(v time.Time) *SolutionSummary { + s.CreationDateTime = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *SolutionSummary) SetLastUpdatedDateTime(v time.Time) *SolutionSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *SolutionSummary) SetName(v string) *SolutionSummary { + s.Name = &v + return s +} + +// SetSolutionArn sets the SolutionArn field's value. +func (s *SolutionSummary) SetSolutionArn(v string) *SolutionSummary { + s.SolutionArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SolutionSummary) SetStatus(v string) *SolutionSummary { + s.Status = &v + return s +} + +// An object that provides information about a specific version of a Solution. +type SolutionVersion struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that this version of the solution was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset group providing the training + // data. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // The event type (for example, 'click' or 'like') that is used for training + // the model. + EventType *string `locationName:"eventType" type:"string"` + + // If training a solution version fails, the reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The date and time (in Unix time) that the solution was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // When true, Amazon Personalize searches for the most optimal recipe according + // to the solution configuration. When false (the default), Amazon Personalize + // uses recipeArn. + PerformAutoML *bool `locationName:"performAutoML" type:"boolean"` + + // Whether to perform hyperparameter optimization (HPO) on the chosen recipe. + // The default is false. + PerformHPO *bool `locationName:"performHPO" type:"boolean"` + + // The ARN of the recipe used in the solution. + RecipeArn *string `locationName:"recipeArn" type:"string"` + + // The ARN of the solution. + SolutionArn *string `locationName:"solutionArn" type:"string"` + + // Describes the configuration properties for the solution. + SolutionConfig *SolutionConfig `locationName:"solutionConfig" type:"structure"` + + // The ARN of the solution version. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` + + // The status of the solution version. + // + // A solution version can be in one of the following states: + // + // * CREATE PENDING + // + // * CREATE IN_PROGRESS + // + // * ACTIVE + // + // * CREATE FAILED + Status *string `locationName:"status" type:"string"` + + // The time used to train the model. You are billed for the time it takes to + // train a model. This field is visible only after Amazon Personalize successfully + // trains a model. + TrainingHours *float64 `locationName:"trainingHours" type:"double"` + + // The scope of training used to create the solution version. The FULL option + // trains the solution version based on the entirety of the input solution's + // training data, while the UPDATE option processes only the training data that + // has changed since the creation of the last solution version. Choose UPDATE + // when you want to start recommending items added to the dataset without retraining + // the model. + // + // The UPDATE option can only be used after you've created a solution version + // with the FULL option and the training solution uses the native-recipe-hrnn-coldstart. + TrainingMode *string `locationName:"trainingMode" type:"string" enum:"TrainingMode"` +} + +// String returns the string representation +func (s SolutionVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SolutionVersion) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *SolutionVersion) SetCreationDateTime(v time.Time) *SolutionVersion { + s.CreationDateTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *SolutionVersion) SetDatasetGroupArn(v string) *SolutionVersion { + s.DatasetGroupArn = &v + return s +} + +// SetEventType sets the EventType field's value. +func (s *SolutionVersion) SetEventType(v string) *SolutionVersion { + s.EventType = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *SolutionVersion) SetFailureReason(v string) *SolutionVersion { + s.FailureReason = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *SolutionVersion) SetLastUpdatedDateTime(v time.Time) *SolutionVersion { + s.LastUpdatedDateTime = &v + return s +} + +// SetPerformAutoML sets the PerformAutoML field's value. +func (s *SolutionVersion) SetPerformAutoML(v bool) *SolutionVersion { + s.PerformAutoML = &v + return s +} + +// SetPerformHPO sets the PerformHPO field's value. +func (s *SolutionVersion) SetPerformHPO(v bool) *SolutionVersion { + s.PerformHPO = &v + return s +} + +// SetRecipeArn sets the RecipeArn field's value. +func (s *SolutionVersion) SetRecipeArn(v string) *SolutionVersion { + s.RecipeArn = &v + return s +} + +// SetSolutionArn sets the SolutionArn field's value. +func (s *SolutionVersion) SetSolutionArn(v string) *SolutionVersion { + s.SolutionArn = &v + return s +} + +// SetSolutionConfig sets the SolutionConfig field's value. +func (s *SolutionVersion) SetSolutionConfig(v *SolutionConfig) *SolutionVersion { + s.SolutionConfig = v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *SolutionVersion) SetSolutionVersionArn(v string) *SolutionVersion { + s.SolutionVersionArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SolutionVersion) SetStatus(v string) *SolutionVersion { + s.Status = &v + return s +} + +// SetTrainingHours sets the TrainingHours field's value. +func (s *SolutionVersion) SetTrainingHours(v float64) *SolutionVersion { + s.TrainingHours = &v + return s +} + +// SetTrainingMode sets the TrainingMode field's value. +func (s *SolutionVersion) SetTrainingMode(v string) *SolutionVersion { + s.TrainingMode = &v + return s +} + +// Provides a summary of the properties of a solution version. For a complete +// listing, call the DescribeSolutionVersion API. +type SolutionVersionSummary struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) that this version of a solution was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // If a solution version fails, the reason behind the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The date and time (in Unix time) that the solution version was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the solution version. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` + + // The status of the solution version. + // + // A solution version can be in one of the following states: + // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s SolutionVersionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SolutionVersionSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *SolutionVersionSummary) SetCreationDateTime(v time.Time) *SolutionVersionSummary { + s.CreationDateTime = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *SolutionVersionSummary) SetFailureReason(v string) *SolutionVersionSummary { + s.FailureReason = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *SolutionVersionSummary) SetLastUpdatedDateTime(v time.Time) *SolutionVersionSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *SolutionVersionSummary) SetSolutionVersionArn(v string) *SolutionVersionSummary { + s.SolutionVersionArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SolutionVersionSummary) SetStatus(v string) *SolutionVersionSummary { + s.Status = &v + return s +} + +type UpdateCampaignInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the campaign. + // + // CampaignArn is a required field + CampaignArn *string `locationName:"campaignArn" type:"string" required:"true"` + + // Specifies the requested minimum provisioned transactions (recommendations) + // per second that Amazon Personalize will support. + MinProvisionedTPS *int64 `locationName:"minProvisionedTPS" min:"1" type:"integer"` + + // The ARN of a new solution version to deploy. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` +} + +// String returns the string representation +func (s UpdateCampaignInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCampaignInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateCampaignInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateCampaignInput"} + if s.CampaignArn == nil { + invalidParams.Add(request.NewErrParamRequired("CampaignArn")) + } + if s.MinProvisionedTPS != nil && *s.MinProvisionedTPS < 1 { + invalidParams.Add(request.NewErrParamMinValue("MinProvisionedTPS", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCampaignArn sets the CampaignArn field's value. +func (s *UpdateCampaignInput) SetCampaignArn(v string) *UpdateCampaignInput { + s.CampaignArn = &v + return s +} + +// SetMinProvisionedTPS sets the MinProvisionedTPS field's value. +func (s *UpdateCampaignInput) SetMinProvisionedTPS(v int64) *UpdateCampaignInput { + s.MinProvisionedTPS = &v + return s +} + +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *UpdateCampaignInput) SetSolutionVersionArn(v string) *UpdateCampaignInput { + s.SolutionVersionArn = &v + return s +} + +type UpdateCampaignOutput struct { + _ struct{} `type:"structure"` + + // The same campaign ARN as given in the request. + CampaignArn *string `locationName:"campaignArn" type:"string"` +} + +// String returns the string representation +func (s UpdateCampaignOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCampaignOutput) GoString() string { + return s.String() +} + +// SetCampaignArn sets the CampaignArn field's value. +func (s *UpdateCampaignOutput) SetCampaignArn(v string) *UpdateCampaignOutput { + s.CampaignArn = &v + return s +} + +const ( + // RecipeProviderService is a RecipeProvider enum value + RecipeProviderService = "SERVICE" +) + +const ( + // TrainingModeFull is a TrainingMode enum value + TrainingModeFull = "FULL" + + // TrainingModeUpdate is a TrainingMode enum value + TrainingModeUpdate = "UPDATE" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/doc.go new file mode 100644 index 00000000000..5ed3cd2b582 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/doc.go @@ -0,0 +1,29 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package personalize provides the client and types for making API +// requests to Amazon Personalize. +// +// Amazon Personalize is a machine learning service that makes it easy to add +// individualized recommendations to customers. +// +// See https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22 for more information on this service. +// +// See personalize package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/personalize/ +// +// Using the Client +// +// To contact Amazon Personalize with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Personalize client Personalize for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/personalize/#New +package personalize diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/errors.go new file mode 100644 index 00000000000..9511bd051a9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/errors.go @@ -0,0 +1,42 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package personalize + +const ( + + // ErrCodeInvalidInputException for service response error code + // "InvalidInputException". + // + // Provide a valid value for the field or parameter. + ErrCodeInvalidInputException = "InvalidInputException" + + // ErrCodeInvalidNextTokenException for service response error code + // "InvalidNextTokenException". + // + // The token is not valid. + ErrCodeInvalidNextTokenException = "InvalidNextTokenException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The limit on the number of requests per second has been exceeded. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // The specified resource already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // The specified resource is in use. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // Could not find the specified resource. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go new file mode 100644 index 00000000000..3ec8ede0c9c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go @@ -0,0 +1,101 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package personalize + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Personalize provides the API operation methods for making requests to +// Amazon Personalize. See this package's package overview docs +// for details on the service. +// +// Personalize methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type Personalize struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "Personalize" // Name of service. + EndpointsID = "personalize" // ID to lookup a service endpoint with. + ServiceID = "Personalize" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the Personalize client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Personalize client from just a session. +// svc := personalize.New(mySession) +// +// // Create a Personalize client with additional configuration +// svc := personalize.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Personalize { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "personalize" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Personalize { + svc := &Personalize{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2018-05-22", + JSONVersion: "1.1", + TargetPrefix: "AmazonPersonalize", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Personalize operation and runs any +// custom request initialization. +func (c *Personalize) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/api.go index 416eb6012dd..291da014496 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/api.go @@ -3,6 +3,9 @@ package pinpoint import ( + "fmt" + "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" @@ -54,7 +57,7 @@ func (c *Pinpoint) CreateAppRequest(input *CreateAppInput) (req *request.Request // CreateApp API operation for Amazon Pinpoint. // -// Creates or updates an app. +// Creates an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -65,22 +68,22 @@ func (c *Pinpoint) CreateAppRequest(input *CreateAppInput) (req *request.Request // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // // See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateApp func (c *Pinpoint) CreateApp(input *CreateAppInput) (*CreateAppOutput, error) { @@ -148,7 +151,8 @@ func (c *Pinpoint) CreateCampaignRequest(input *CreateCampaignInput) (req *reque // CreateCampaign API operation for Amazon Pinpoint. // -// Creates or updates a campaign. +// Creates a new campaign for an application or updates the settings of an existing +// campaign for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -159,22 +163,22 @@ func (c *Pinpoint) CreateCampaignRequest(input *CreateCampaignInput) (req *reque // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // // See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateCampaign func (c *Pinpoint) CreateCampaign(input *CreateCampaignInput) (*CreateCampaignOutput, error) { @@ -198,6 +202,98 @@ func (c *Pinpoint) CreateCampaignWithContext(ctx aws.Context, input *CreateCampa return out, req.Send() } +const opCreateEmailTemplate = "CreateEmailTemplate" + +// CreateEmailTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreateEmailTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateEmailTemplate for more information on using the CreateEmailTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateEmailTemplateRequest method. +// req, resp := client.CreateEmailTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateEmailTemplate +func (c *Pinpoint) CreateEmailTemplateRequest(input *CreateEmailTemplateInput) (req *request.Request, output *CreateEmailTemplateOutput) { + op := &request.Operation{ + Name: opCreateEmailTemplate, + HTTPMethod: "POST", + HTTPPath: "/v1/templates/{template-name}/email", + } + + if input == nil { + input = &CreateEmailTemplateInput{} + } + + output = &CreateEmailTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateEmailTemplate API operation for Amazon Pinpoint. +// +// Creates a message template that you can use in messages that are sent through +// the email channel. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation CreateEmailTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateEmailTemplate +func (c *Pinpoint) CreateEmailTemplate(input *CreateEmailTemplateInput) (*CreateEmailTemplateOutput, error) { + req, out := c.CreateEmailTemplateRequest(input) + return out, req.Send() +} + +// CreateEmailTemplateWithContext is the same as CreateEmailTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See CreateEmailTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) CreateEmailTemplateWithContext(ctx aws.Context, input *CreateEmailTemplateInput, opts ...request.Option) (*CreateEmailTemplateOutput, error) { + req, out := c.CreateEmailTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateExportJob = "CreateExportJob" // CreateExportJobRequest generates a "aws/request.Request" representing the @@ -242,7 +338,7 @@ func (c *Pinpoint) CreateExportJobRequest(input *CreateExportJobInput) (req *req // CreateExportJob API operation for Amazon Pinpoint. // -// Creates an export job. +// Creates an export job for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -253,22 +349,22 @@ func (c *Pinpoint) CreateExportJobRequest(input *CreateExportJobInput) (req *req // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // // See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateExportJob func (c *Pinpoint) CreateExportJob(input *CreateExportJobInput) (*CreateExportJobOutput, error) { @@ -336,7 +432,7 @@ func (c *Pinpoint) CreateImportJobRequest(input *CreateImportJobInput) (req *req // CreateImportJob API operation for Amazon Pinpoint. // -// Creates or updates an import job. +// Creates an import job for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -347,22 +443,22 @@ func (c *Pinpoint) CreateImportJobRequest(input *CreateImportJobInput) (req *req // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // // See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateImportJob func (c *Pinpoint) CreateImportJob(input *CreateImportJobInput) (*CreateImportJobOutput, error) { @@ -386,10830 +482,16943 @@ func (c *Pinpoint) CreateImportJobWithContext(ctx aws.Context, input *CreateImpo return out, req.Send() } -const opCreateSegment = "CreateSegment" +const opCreateJourney = "CreateJourney" -// CreateSegmentRequest generates a "aws/request.Request" representing the -// client's request for the CreateSegment operation. The "output" return +// CreateJourneyRequest generates a "aws/request.Request" representing the +// client's request for the CreateJourney operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateSegment for more information on using the CreateSegment +// See CreateJourney for more information on using the CreateJourney // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateSegmentRequest method. -// req, resp := client.CreateSegmentRequest(params) +// // Example sending a request using the CreateJourneyRequest method. +// req, resp := client.CreateJourneyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateSegment -func (c *Pinpoint) CreateSegmentRequest(input *CreateSegmentInput) (req *request.Request, output *CreateSegmentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateJourney +func (c *Pinpoint) CreateJourneyRequest(input *CreateJourneyInput) (req *request.Request, output *CreateJourneyOutput) { op := &request.Operation{ - Name: opCreateSegment, + Name: opCreateJourney, HTTPMethod: "POST", - HTTPPath: "/v1/apps/{application-id}/segments", + HTTPPath: "/v1/apps/{application-id}/journeys", } if input == nil { - input = &CreateSegmentInput{} + input = &CreateJourneyInput{} } - output = &CreateSegmentOutput{} + output = &CreateJourneyOutput{} req = c.newRequest(op, input, output) return } -// CreateSegment API operation for Amazon Pinpoint. +// CreateJourney API operation for Amazon Pinpoint. // -// Used to create or update a segment. +// Creates a journey for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation CreateSegment for usage and error information. +// API operation CreateJourney for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateSegment -func (c *Pinpoint) CreateSegment(input *CreateSegmentInput) (*CreateSegmentOutput, error) { - req, out := c.CreateSegmentRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateJourney +func (c *Pinpoint) CreateJourney(input *CreateJourneyInput) (*CreateJourneyOutput, error) { + req, out := c.CreateJourneyRequest(input) return out, req.Send() } -// CreateSegmentWithContext is the same as CreateSegment with the addition of +// CreateJourneyWithContext is the same as CreateJourney with the addition of // the ability to pass a context and additional request options. // -// See CreateSegment for details on how to use this API operation. +// See CreateJourney for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) CreateSegmentWithContext(ctx aws.Context, input *CreateSegmentInput, opts ...request.Option) (*CreateSegmentOutput, error) { - req, out := c.CreateSegmentRequest(input) +func (c *Pinpoint) CreateJourneyWithContext(ctx aws.Context, input *CreateJourneyInput, opts ...request.Option) (*CreateJourneyOutput, error) { + req, out := c.CreateJourneyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteAdmChannel = "DeleteAdmChannel" +const opCreatePushTemplate = "CreatePushTemplate" -// DeleteAdmChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteAdmChannel operation. The "output" return +// CreatePushTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreatePushTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteAdmChannel for more information on using the DeleteAdmChannel +// See CreatePushTemplate for more information on using the CreatePushTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteAdmChannelRequest method. -// req, resp := client.DeleteAdmChannelRequest(params) +// // Example sending a request using the CreatePushTemplateRequest method. +// req, resp := client.CreatePushTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteAdmChannel -func (c *Pinpoint) DeleteAdmChannelRequest(input *DeleteAdmChannelInput) (req *request.Request, output *DeleteAdmChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreatePushTemplate +func (c *Pinpoint) CreatePushTemplateRequest(input *CreatePushTemplateInput) (req *request.Request, output *CreatePushTemplateOutput) { op := &request.Operation{ - Name: opDeleteAdmChannel, - HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/adm", + Name: opCreatePushTemplate, + HTTPMethod: "POST", + HTTPPath: "/v1/templates/{template-name}/push", } if input == nil { - input = &DeleteAdmChannelInput{} + input = &CreatePushTemplateInput{} } - output = &DeleteAdmChannelOutput{} + output = &CreatePushTemplateOutput{} req = c.newRequest(op, input, output) return } -// DeleteAdmChannel API operation for Amazon Pinpoint. +// CreatePushTemplate API operation for Amazon Pinpoint. // -// Delete an ADM channel. +// Creates a message template that you can use in messages that are sent through +// a push notification channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteAdmChannel for usage and error information. +// API operation CreatePushTemplate for usage and error information. // // Returned Error Codes: +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. -// -// * ErrCodeNotFoundException "NotFoundException" -// Simple message object. -// -// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteAdmChannel -func (c *Pinpoint) DeleteAdmChannel(input *DeleteAdmChannelInput) (*DeleteAdmChannelOutput, error) { - req, out := c.DeleteAdmChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreatePushTemplate +func (c *Pinpoint) CreatePushTemplate(input *CreatePushTemplateInput) (*CreatePushTemplateOutput, error) { + req, out := c.CreatePushTemplateRequest(input) return out, req.Send() } -// DeleteAdmChannelWithContext is the same as DeleteAdmChannel with the addition of +// CreatePushTemplateWithContext is the same as CreatePushTemplate with the addition of // the ability to pass a context and additional request options. // -// See DeleteAdmChannel for details on how to use this API operation. +// See CreatePushTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteAdmChannelWithContext(ctx aws.Context, input *DeleteAdmChannelInput, opts ...request.Option) (*DeleteAdmChannelOutput, error) { - req, out := c.DeleteAdmChannelRequest(input) +func (c *Pinpoint) CreatePushTemplateWithContext(ctx aws.Context, input *CreatePushTemplateInput, opts ...request.Option) (*CreatePushTemplateOutput, error) { + req, out := c.CreatePushTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteApnsChannel = "DeleteApnsChannel" +const opCreateSegment = "CreateSegment" -// DeleteApnsChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteApnsChannel operation. The "output" return +// CreateSegmentRequest generates a "aws/request.Request" representing the +// client's request for the CreateSegment operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteApnsChannel for more information on using the DeleteApnsChannel +// See CreateSegment for more information on using the CreateSegment // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteApnsChannelRequest method. -// req, resp := client.DeleteApnsChannelRequest(params) +// // Example sending a request using the CreateSegmentRequest method. +// req, resp := client.CreateSegmentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsChannel -func (c *Pinpoint) DeleteApnsChannelRequest(input *DeleteApnsChannelInput) (req *request.Request, output *DeleteApnsChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateSegment +func (c *Pinpoint) CreateSegmentRequest(input *CreateSegmentInput) (req *request.Request, output *CreateSegmentOutput) { op := &request.Operation{ - Name: opDeleteApnsChannel, - HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/apns", + Name: opCreateSegment, + HTTPMethod: "POST", + HTTPPath: "/v1/apps/{application-id}/segments", } if input == nil { - input = &DeleteApnsChannelInput{} + input = &CreateSegmentInput{} } - output = &DeleteApnsChannelOutput{} + output = &CreateSegmentOutput{} req = c.newRequest(op, input, output) return } -// DeleteApnsChannel API operation for Amazon Pinpoint. +// CreateSegment API operation for Amazon Pinpoint. // -// Deletes the APNs channel for an app. +// Creates a new segment for an application or updates the configuration, dimension, +// and other settings for an existing segment that's associated with an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteApnsChannel for usage and error information. +// API operation CreateSegment for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsChannel -func (c *Pinpoint) DeleteApnsChannel(input *DeleteApnsChannelInput) (*DeleteApnsChannelOutput, error) { - req, out := c.DeleteApnsChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateSegment +func (c *Pinpoint) CreateSegment(input *CreateSegmentInput) (*CreateSegmentOutput, error) { + req, out := c.CreateSegmentRequest(input) return out, req.Send() } -// DeleteApnsChannelWithContext is the same as DeleteApnsChannel with the addition of +// CreateSegmentWithContext is the same as CreateSegment with the addition of // the ability to pass a context and additional request options. // -// See DeleteApnsChannel for details on how to use this API operation. +// See CreateSegment for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteApnsChannelWithContext(ctx aws.Context, input *DeleteApnsChannelInput, opts ...request.Option) (*DeleteApnsChannelOutput, error) { - req, out := c.DeleteApnsChannelRequest(input) +func (c *Pinpoint) CreateSegmentWithContext(ctx aws.Context, input *CreateSegmentInput, opts ...request.Option) (*CreateSegmentOutput, error) { + req, out := c.CreateSegmentRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteApnsSandboxChannel = "DeleteApnsSandboxChannel" +const opCreateSmsTemplate = "CreateSmsTemplate" -// DeleteApnsSandboxChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteApnsSandboxChannel operation. The "output" return +// CreateSmsTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreateSmsTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteApnsSandboxChannel for more information on using the DeleteApnsSandboxChannel +// See CreateSmsTemplate for more information on using the CreateSmsTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteApnsSandboxChannelRequest method. -// req, resp := client.DeleteApnsSandboxChannelRequest(params) +// // Example sending a request using the CreateSmsTemplateRequest method. +// req, resp := client.CreateSmsTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsSandboxChannel -func (c *Pinpoint) DeleteApnsSandboxChannelRequest(input *DeleteApnsSandboxChannelInput) (req *request.Request, output *DeleteApnsSandboxChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateSmsTemplate +func (c *Pinpoint) CreateSmsTemplateRequest(input *CreateSmsTemplateInput) (req *request.Request, output *CreateSmsTemplateOutput) { op := &request.Operation{ - Name: opDeleteApnsSandboxChannel, - HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/apns_sandbox", + Name: opCreateSmsTemplate, + HTTPMethod: "POST", + HTTPPath: "/v1/templates/{template-name}/sms", } if input == nil { - input = &DeleteApnsSandboxChannelInput{} + input = &CreateSmsTemplateInput{} } - output = &DeleteApnsSandboxChannelOutput{} + output = &CreateSmsTemplateOutput{} req = c.newRequest(op, input, output) return } -// DeleteApnsSandboxChannel API operation for Amazon Pinpoint. +// CreateSmsTemplate API operation for Amazon Pinpoint. // -// Delete an APNS sandbox channel. +// Creates a message template that you can use in messages that are sent through +// the SMS channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteApnsSandboxChannel for usage and error information. +// API operation CreateSmsTemplate for usage and error information. // // Returned Error Codes: +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. -// -// * ErrCodeNotFoundException "NotFoundException" -// Simple message object. -// -// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsSandboxChannel -func (c *Pinpoint) DeleteApnsSandboxChannel(input *DeleteApnsSandboxChannelInput) (*DeleteApnsSandboxChannelOutput, error) { - req, out := c.DeleteApnsSandboxChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateSmsTemplate +func (c *Pinpoint) CreateSmsTemplate(input *CreateSmsTemplateInput) (*CreateSmsTemplateOutput, error) { + req, out := c.CreateSmsTemplateRequest(input) return out, req.Send() } -// DeleteApnsSandboxChannelWithContext is the same as DeleteApnsSandboxChannel with the addition of +// CreateSmsTemplateWithContext is the same as CreateSmsTemplate with the addition of // the ability to pass a context and additional request options. // -// See DeleteApnsSandboxChannel for details on how to use this API operation. +// See CreateSmsTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteApnsSandboxChannelWithContext(ctx aws.Context, input *DeleteApnsSandboxChannelInput, opts ...request.Option) (*DeleteApnsSandboxChannelOutput, error) { - req, out := c.DeleteApnsSandboxChannelRequest(input) +func (c *Pinpoint) CreateSmsTemplateWithContext(ctx aws.Context, input *CreateSmsTemplateInput, opts ...request.Option) (*CreateSmsTemplateOutput, error) { + req, out := c.CreateSmsTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteApnsVoipChannel = "DeleteApnsVoipChannel" +const opDeleteAdmChannel = "DeleteAdmChannel" -// DeleteApnsVoipChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteApnsVoipChannel operation. The "output" return +// DeleteAdmChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAdmChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteApnsVoipChannel for more information on using the DeleteApnsVoipChannel +// See DeleteAdmChannel for more information on using the DeleteAdmChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteApnsVoipChannelRequest method. -// req, resp := client.DeleteApnsVoipChannelRequest(params) +// // Example sending a request using the DeleteAdmChannelRequest method. +// req, resp := client.DeleteAdmChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsVoipChannel -func (c *Pinpoint) DeleteApnsVoipChannelRequest(input *DeleteApnsVoipChannelInput) (req *request.Request, output *DeleteApnsVoipChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteAdmChannel +func (c *Pinpoint) DeleteAdmChannelRequest(input *DeleteAdmChannelInput) (req *request.Request, output *DeleteAdmChannelOutput) { op := &request.Operation{ - Name: opDeleteApnsVoipChannel, + Name: opDeleteAdmChannel, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/apns_voip", + HTTPPath: "/v1/apps/{application-id}/channels/adm", } if input == nil { - input = &DeleteApnsVoipChannelInput{} + input = &DeleteAdmChannelInput{} } - output = &DeleteApnsVoipChannelOutput{} + output = &DeleteAdmChannelOutput{} req = c.newRequest(op, input, output) return } -// DeleteApnsVoipChannel API operation for Amazon Pinpoint. +// DeleteAdmChannel API operation for Amazon Pinpoint. // -// Delete an APNS VoIP channel +// Disables the ADM channel for an application and deletes any existing settings +// for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteApnsVoipChannel for usage and error information. +// API operation DeleteAdmChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsVoipChannel -func (c *Pinpoint) DeleteApnsVoipChannel(input *DeleteApnsVoipChannelInput) (*DeleteApnsVoipChannelOutput, error) { - req, out := c.DeleteApnsVoipChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteAdmChannel +func (c *Pinpoint) DeleteAdmChannel(input *DeleteAdmChannelInput) (*DeleteAdmChannelOutput, error) { + req, out := c.DeleteAdmChannelRequest(input) return out, req.Send() } -// DeleteApnsVoipChannelWithContext is the same as DeleteApnsVoipChannel with the addition of +// DeleteAdmChannelWithContext is the same as DeleteAdmChannel with the addition of // the ability to pass a context and additional request options. // -// See DeleteApnsVoipChannel for details on how to use this API operation. +// See DeleteAdmChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteApnsVoipChannelWithContext(ctx aws.Context, input *DeleteApnsVoipChannelInput, opts ...request.Option) (*DeleteApnsVoipChannelOutput, error) { - req, out := c.DeleteApnsVoipChannelRequest(input) +func (c *Pinpoint) DeleteAdmChannelWithContext(ctx aws.Context, input *DeleteAdmChannelInput, opts ...request.Option) (*DeleteAdmChannelOutput, error) { + req, out := c.DeleteAdmChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteApnsVoipSandboxChannel = "DeleteApnsVoipSandboxChannel" +const opDeleteApnsChannel = "DeleteApnsChannel" -// DeleteApnsVoipSandboxChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteApnsVoipSandboxChannel operation. The "output" return +// DeleteApnsChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApnsChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteApnsVoipSandboxChannel for more information on using the DeleteApnsVoipSandboxChannel +// See DeleteApnsChannel for more information on using the DeleteApnsChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteApnsVoipSandboxChannelRequest method. -// req, resp := client.DeleteApnsVoipSandboxChannelRequest(params) +// // Example sending a request using the DeleteApnsChannelRequest method. +// req, resp := client.DeleteApnsChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsVoipSandboxChannel -func (c *Pinpoint) DeleteApnsVoipSandboxChannelRequest(input *DeleteApnsVoipSandboxChannelInput) (req *request.Request, output *DeleteApnsVoipSandboxChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsChannel +func (c *Pinpoint) DeleteApnsChannelRequest(input *DeleteApnsChannelInput) (req *request.Request, output *DeleteApnsChannelOutput) { op := &request.Operation{ - Name: opDeleteApnsVoipSandboxChannel, + Name: opDeleteApnsChannel, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/apns_voip_sandbox", + HTTPPath: "/v1/apps/{application-id}/channels/apns", } if input == nil { - input = &DeleteApnsVoipSandboxChannelInput{} + input = &DeleteApnsChannelInput{} } - output = &DeleteApnsVoipSandboxChannelOutput{} + output = &DeleteApnsChannelOutput{} req = c.newRequest(op, input, output) return } -// DeleteApnsVoipSandboxChannel API operation for Amazon Pinpoint. +// DeleteApnsChannel API operation for Amazon Pinpoint. // -// Delete an APNS VoIP sandbox channel +// Disables the APNs channel for an application and deletes any existing settings +// for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteApnsVoipSandboxChannel for usage and error information. +// API operation DeleteApnsChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsVoipSandboxChannel -func (c *Pinpoint) DeleteApnsVoipSandboxChannel(input *DeleteApnsVoipSandboxChannelInput) (*DeleteApnsVoipSandboxChannelOutput, error) { - req, out := c.DeleteApnsVoipSandboxChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsChannel +func (c *Pinpoint) DeleteApnsChannel(input *DeleteApnsChannelInput) (*DeleteApnsChannelOutput, error) { + req, out := c.DeleteApnsChannelRequest(input) return out, req.Send() } -// DeleteApnsVoipSandboxChannelWithContext is the same as DeleteApnsVoipSandboxChannel with the addition of +// DeleteApnsChannelWithContext is the same as DeleteApnsChannel with the addition of // the ability to pass a context and additional request options. // -// See DeleteApnsVoipSandboxChannel for details on how to use this API operation. +// See DeleteApnsChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteApnsVoipSandboxChannelWithContext(ctx aws.Context, input *DeleteApnsVoipSandboxChannelInput, opts ...request.Option) (*DeleteApnsVoipSandboxChannelOutput, error) { - req, out := c.DeleteApnsVoipSandboxChannelRequest(input) +func (c *Pinpoint) DeleteApnsChannelWithContext(ctx aws.Context, input *DeleteApnsChannelInput, opts ...request.Option) (*DeleteApnsChannelOutput, error) { + req, out := c.DeleteApnsChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteApp = "DeleteApp" +const opDeleteApnsSandboxChannel = "DeleteApnsSandboxChannel" -// DeleteAppRequest generates a "aws/request.Request" representing the -// client's request for the DeleteApp operation. The "output" return +// DeleteApnsSandboxChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApnsSandboxChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteApp for more information on using the DeleteApp +// See DeleteApnsSandboxChannel for more information on using the DeleteApnsSandboxChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteAppRequest method. -// req, resp := client.DeleteAppRequest(params) +// // Example sending a request using the DeleteApnsSandboxChannelRequest method. +// req, resp := client.DeleteApnsSandboxChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApp -func (c *Pinpoint) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, output *DeleteAppOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsSandboxChannel +func (c *Pinpoint) DeleteApnsSandboxChannelRequest(input *DeleteApnsSandboxChannelInput) (req *request.Request, output *DeleteApnsSandboxChannelOutput) { op := &request.Operation{ - Name: opDeleteApp, + Name: opDeleteApnsSandboxChannel, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}", + HTTPPath: "/v1/apps/{application-id}/channels/apns_sandbox", } if input == nil { - input = &DeleteAppInput{} + input = &DeleteApnsSandboxChannelInput{} } - output = &DeleteAppOutput{} + output = &DeleteApnsSandboxChannelOutput{} req = c.newRequest(op, input, output) return } -// DeleteApp API operation for Amazon Pinpoint. +// DeleteApnsSandboxChannel API operation for Amazon Pinpoint. // -// Deletes an app. +// Disables the APNs sandbox channel for an application and deletes any existing +// settings for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteApp for usage and error information. +// API operation DeleteApnsSandboxChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApp -func (c *Pinpoint) DeleteApp(input *DeleteAppInput) (*DeleteAppOutput, error) { - req, out := c.DeleteAppRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsSandboxChannel +func (c *Pinpoint) DeleteApnsSandboxChannel(input *DeleteApnsSandboxChannelInput) (*DeleteApnsSandboxChannelOutput, error) { + req, out := c.DeleteApnsSandboxChannelRequest(input) return out, req.Send() } -// DeleteAppWithContext is the same as DeleteApp with the addition of +// DeleteApnsSandboxChannelWithContext is the same as DeleteApnsSandboxChannel with the addition of // the ability to pass a context and additional request options. // -// See DeleteApp for details on how to use this API operation. +// See DeleteApnsSandboxChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteAppWithContext(ctx aws.Context, input *DeleteAppInput, opts ...request.Option) (*DeleteAppOutput, error) { - req, out := c.DeleteAppRequest(input) +func (c *Pinpoint) DeleteApnsSandboxChannelWithContext(ctx aws.Context, input *DeleteApnsSandboxChannelInput, opts ...request.Option) (*DeleteApnsSandboxChannelOutput, error) { + req, out := c.DeleteApnsSandboxChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteBaiduChannel = "DeleteBaiduChannel" +const opDeleteApnsVoipChannel = "DeleteApnsVoipChannel" -// DeleteBaiduChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBaiduChannel operation. The "output" return +// DeleteApnsVoipChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApnsVoipChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteBaiduChannel for more information on using the DeleteBaiduChannel +// See DeleteApnsVoipChannel for more information on using the DeleteApnsVoipChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteBaiduChannelRequest method. -// req, resp := client.DeleteBaiduChannelRequest(params) +// // Example sending a request using the DeleteApnsVoipChannelRequest method. +// req, resp := client.DeleteApnsVoipChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteBaiduChannel -func (c *Pinpoint) DeleteBaiduChannelRequest(input *DeleteBaiduChannelInput) (req *request.Request, output *DeleteBaiduChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsVoipChannel +func (c *Pinpoint) DeleteApnsVoipChannelRequest(input *DeleteApnsVoipChannelInput) (req *request.Request, output *DeleteApnsVoipChannelOutput) { op := &request.Operation{ - Name: opDeleteBaiduChannel, + Name: opDeleteApnsVoipChannel, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/baidu", + HTTPPath: "/v1/apps/{application-id}/channels/apns_voip", } if input == nil { - input = &DeleteBaiduChannelInput{} + input = &DeleteApnsVoipChannelInput{} } - output = &DeleteBaiduChannelOutput{} + output = &DeleteApnsVoipChannelOutput{} req = c.newRequest(op, input, output) return } -// DeleteBaiduChannel API operation for Amazon Pinpoint. +// DeleteApnsVoipChannel API operation for Amazon Pinpoint. // -// Delete a BAIDU GCM channel +// Disables the APNs VoIP channel for an application and deletes any existing +// settings for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteBaiduChannel for usage and error information. +// API operation DeleteApnsVoipChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteBaiduChannel -func (c *Pinpoint) DeleteBaiduChannel(input *DeleteBaiduChannelInput) (*DeleteBaiduChannelOutput, error) { - req, out := c.DeleteBaiduChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsVoipChannel +func (c *Pinpoint) DeleteApnsVoipChannel(input *DeleteApnsVoipChannelInput) (*DeleteApnsVoipChannelOutput, error) { + req, out := c.DeleteApnsVoipChannelRequest(input) return out, req.Send() } -// DeleteBaiduChannelWithContext is the same as DeleteBaiduChannel with the addition of +// DeleteApnsVoipChannelWithContext is the same as DeleteApnsVoipChannel with the addition of // the ability to pass a context and additional request options. // -// See DeleteBaiduChannel for details on how to use this API operation. +// See DeleteApnsVoipChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteBaiduChannelWithContext(ctx aws.Context, input *DeleteBaiduChannelInput, opts ...request.Option) (*DeleteBaiduChannelOutput, error) { - req, out := c.DeleteBaiduChannelRequest(input) +func (c *Pinpoint) DeleteApnsVoipChannelWithContext(ctx aws.Context, input *DeleteApnsVoipChannelInput, opts ...request.Option) (*DeleteApnsVoipChannelOutput, error) { + req, out := c.DeleteApnsVoipChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteCampaign = "DeleteCampaign" +const opDeleteApnsVoipSandboxChannel = "DeleteApnsVoipSandboxChannel" -// DeleteCampaignRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCampaign operation. The "output" return +// DeleteApnsVoipSandboxChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApnsVoipSandboxChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteCampaign for more information on using the DeleteCampaign +// See DeleteApnsVoipSandboxChannel for more information on using the DeleteApnsVoipSandboxChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteCampaignRequest method. -// req, resp := client.DeleteCampaignRequest(params) +// // Example sending a request using the DeleteApnsVoipSandboxChannelRequest method. +// req, resp := client.DeleteApnsVoipSandboxChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteCampaign -func (c *Pinpoint) DeleteCampaignRequest(input *DeleteCampaignInput) (req *request.Request, output *DeleteCampaignOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsVoipSandboxChannel +func (c *Pinpoint) DeleteApnsVoipSandboxChannelRequest(input *DeleteApnsVoipSandboxChannelInput) (req *request.Request, output *DeleteApnsVoipSandboxChannelOutput) { op := &request.Operation{ - Name: opDeleteCampaign, + Name: opDeleteApnsVoipSandboxChannel, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}", + HTTPPath: "/v1/apps/{application-id}/channels/apns_voip_sandbox", } if input == nil { - input = &DeleteCampaignInput{} + input = &DeleteApnsVoipSandboxChannelInput{} } - output = &DeleteCampaignOutput{} + output = &DeleteApnsVoipSandboxChannelOutput{} req = c.newRequest(op, input, output) return } -// DeleteCampaign API operation for Amazon Pinpoint. +// DeleteApnsVoipSandboxChannel API operation for Amazon Pinpoint. // -// Deletes a campaign. +// Disables the APNs VoIP sandbox channel for an application and deletes any +// existing settings for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteCampaign for usage and error information. +// API operation DeleteApnsVoipSandboxChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteCampaign -func (c *Pinpoint) DeleteCampaign(input *DeleteCampaignInput) (*DeleteCampaignOutput, error) { - req, out := c.DeleteCampaignRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApnsVoipSandboxChannel +func (c *Pinpoint) DeleteApnsVoipSandboxChannel(input *DeleteApnsVoipSandboxChannelInput) (*DeleteApnsVoipSandboxChannelOutput, error) { + req, out := c.DeleteApnsVoipSandboxChannelRequest(input) return out, req.Send() } -// DeleteCampaignWithContext is the same as DeleteCampaign with the addition of +// DeleteApnsVoipSandboxChannelWithContext is the same as DeleteApnsVoipSandboxChannel with the addition of // the ability to pass a context and additional request options. // -// See DeleteCampaign for details on how to use this API operation. +// See DeleteApnsVoipSandboxChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteCampaignWithContext(ctx aws.Context, input *DeleteCampaignInput, opts ...request.Option) (*DeleteCampaignOutput, error) { - req, out := c.DeleteCampaignRequest(input) +func (c *Pinpoint) DeleteApnsVoipSandboxChannelWithContext(ctx aws.Context, input *DeleteApnsVoipSandboxChannelInput, opts ...request.Option) (*DeleteApnsVoipSandboxChannelOutput, error) { + req, out := c.DeleteApnsVoipSandboxChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteEmailChannel = "DeleteEmailChannel" +const opDeleteApp = "DeleteApp" -// DeleteEmailChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteEmailChannel operation. The "output" return +// DeleteAppRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApp operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteEmailChannel for more information on using the DeleteEmailChannel +// See DeleteApp for more information on using the DeleteApp // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteEmailChannelRequest method. -// req, resp := client.DeleteEmailChannelRequest(params) +// // Example sending a request using the DeleteAppRequest method. +// req, resp := client.DeleteAppRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEmailChannel -func (c *Pinpoint) DeleteEmailChannelRequest(input *DeleteEmailChannelInput) (req *request.Request, output *DeleteEmailChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApp +func (c *Pinpoint) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, output *DeleteAppOutput) { op := &request.Operation{ - Name: opDeleteEmailChannel, + Name: opDeleteApp, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/email", + HTTPPath: "/v1/apps/{application-id}", } if input == nil { - input = &DeleteEmailChannelInput{} + input = &DeleteAppInput{} } - output = &DeleteEmailChannelOutput{} + output = &DeleteAppOutput{} req = c.newRequest(op, input, output) return } -// DeleteEmailChannel API operation for Amazon Pinpoint. +// DeleteApp API operation for Amazon Pinpoint. // -// Delete an email channel. +// Deletes an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteEmailChannel for usage and error information. +// API operation DeleteApp for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEmailChannel -func (c *Pinpoint) DeleteEmailChannel(input *DeleteEmailChannelInput) (*DeleteEmailChannelOutput, error) { - req, out := c.DeleteEmailChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteApp +func (c *Pinpoint) DeleteApp(input *DeleteAppInput) (*DeleteAppOutput, error) { + req, out := c.DeleteAppRequest(input) return out, req.Send() } -// DeleteEmailChannelWithContext is the same as DeleteEmailChannel with the addition of +// DeleteAppWithContext is the same as DeleteApp with the addition of // the ability to pass a context and additional request options. // -// See DeleteEmailChannel for details on how to use this API operation. +// See DeleteApp for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteEmailChannelWithContext(ctx aws.Context, input *DeleteEmailChannelInput, opts ...request.Option) (*DeleteEmailChannelOutput, error) { - req, out := c.DeleteEmailChannelRequest(input) +func (c *Pinpoint) DeleteAppWithContext(ctx aws.Context, input *DeleteAppInput, opts ...request.Option) (*DeleteAppOutput, error) { + req, out := c.DeleteAppRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteEndpoint = "DeleteEndpoint" +const opDeleteBaiduChannel = "DeleteBaiduChannel" -// DeleteEndpointRequest generates a "aws/request.Request" representing the -// client's request for the DeleteEndpoint operation. The "output" return +// DeleteBaiduChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBaiduChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteEndpoint for more information on using the DeleteEndpoint +// See DeleteBaiduChannel for more information on using the DeleteBaiduChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteEndpointRequest method. -// req, resp := client.DeleteEndpointRequest(params) +// // Example sending a request using the DeleteBaiduChannelRequest method. +// req, resp := client.DeleteBaiduChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEndpoint -func (c *Pinpoint) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteBaiduChannel +func (c *Pinpoint) DeleteBaiduChannelRequest(input *DeleteBaiduChannelInput) (req *request.Request, output *DeleteBaiduChannelOutput) { op := &request.Operation{ - Name: opDeleteEndpoint, + Name: opDeleteBaiduChannel, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/endpoints/{endpoint-id}", + HTTPPath: "/v1/apps/{application-id}/channels/baidu", } if input == nil { - input = &DeleteEndpointInput{} + input = &DeleteBaiduChannelInput{} } - output = &DeleteEndpointOutput{} + output = &DeleteBaiduChannelOutput{} req = c.newRequest(op, input, output) return } -// DeleteEndpoint API operation for Amazon Pinpoint. +// DeleteBaiduChannel API operation for Amazon Pinpoint. // -// Deletes an endpoint. +// Disables the Baidu channel for an application and deletes any existing settings +// for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteEndpoint for usage and error information. +// API operation DeleteBaiduChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEndpoint -func (c *Pinpoint) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { - req, out := c.DeleteEndpointRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteBaiduChannel +func (c *Pinpoint) DeleteBaiduChannel(input *DeleteBaiduChannelInput) (*DeleteBaiduChannelOutput, error) { + req, out := c.DeleteBaiduChannelRequest(input) return out, req.Send() } -// DeleteEndpointWithContext is the same as DeleteEndpoint with the addition of +// DeleteBaiduChannelWithContext is the same as DeleteBaiduChannel with the addition of // the ability to pass a context and additional request options. // -// See DeleteEndpoint for details on how to use this API operation. +// See DeleteBaiduChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteEndpointWithContext(ctx aws.Context, input *DeleteEndpointInput, opts ...request.Option) (*DeleteEndpointOutput, error) { - req, out := c.DeleteEndpointRequest(input) +func (c *Pinpoint) DeleteBaiduChannelWithContext(ctx aws.Context, input *DeleteBaiduChannelInput, opts ...request.Option) (*DeleteBaiduChannelOutput, error) { + req, out := c.DeleteBaiduChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteEventStream = "DeleteEventStream" +const opDeleteCampaign = "DeleteCampaign" -// DeleteEventStreamRequest generates a "aws/request.Request" representing the -// client's request for the DeleteEventStream operation. The "output" return +// DeleteCampaignRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCampaign operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteEventStream for more information on using the DeleteEventStream +// See DeleteCampaign for more information on using the DeleteCampaign // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteEventStreamRequest method. -// req, resp := client.DeleteEventStreamRequest(params) +// // Example sending a request using the DeleteCampaignRequest method. +// req, resp := client.DeleteCampaignRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEventStream -func (c *Pinpoint) DeleteEventStreamRequest(input *DeleteEventStreamInput) (req *request.Request, output *DeleteEventStreamOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteCampaign +func (c *Pinpoint) DeleteCampaignRequest(input *DeleteCampaignInput) (req *request.Request, output *DeleteCampaignOutput) { op := &request.Operation{ - Name: opDeleteEventStream, + Name: opDeleteCampaign, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/eventstream", + HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}", } if input == nil { - input = &DeleteEventStreamInput{} + input = &DeleteCampaignInput{} } - output = &DeleteEventStreamOutput{} + output = &DeleteCampaignOutput{} req = c.newRequest(op, input, output) return } -// DeleteEventStream API operation for Amazon Pinpoint. +// DeleteCampaign API operation for Amazon Pinpoint. // -// Deletes the event stream for an app. +// Deletes a campaign from an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteEventStream for usage and error information. +// API operation DeleteCampaign for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEventStream -func (c *Pinpoint) DeleteEventStream(input *DeleteEventStreamInput) (*DeleteEventStreamOutput, error) { - req, out := c.DeleteEventStreamRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteCampaign +func (c *Pinpoint) DeleteCampaign(input *DeleteCampaignInput) (*DeleteCampaignOutput, error) { + req, out := c.DeleteCampaignRequest(input) return out, req.Send() } -// DeleteEventStreamWithContext is the same as DeleteEventStream with the addition of +// DeleteCampaignWithContext is the same as DeleteCampaign with the addition of // the ability to pass a context and additional request options. // -// See DeleteEventStream for details on how to use this API operation. +// See DeleteCampaign for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteEventStreamWithContext(ctx aws.Context, input *DeleteEventStreamInput, opts ...request.Option) (*DeleteEventStreamOutput, error) { - req, out := c.DeleteEventStreamRequest(input) +func (c *Pinpoint) DeleteCampaignWithContext(ctx aws.Context, input *DeleteCampaignInput, opts ...request.Option) (*DeleteCampaignOutput, error) { + req, out := c.DeleteCampaignRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteGcmChannel = "DeleteGcmChannel" +const opDeleteEmailChannel = "DeleteEmailChannel" -// DeleteGcmChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteGcmChannel operation. The "output" return +// DeleteEmailChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEmailChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteGcmChannel for more information on using the DeleteGcmChannel +// See DeleteEmailChannel for more information on using the DeleteEmailChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteGcmChannelRequest method. -// req, resp := client.DeleteGcmChannelRequest(params) +// // Example sending a request using the DeleteEmailChannelRequest method. +// req, resp := client.DeleteEmailChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteGcmChannel -func (c *Pinpoint) DeleteGcmChannelRequest(input *DeleteGcmChannelInput) (req *request.Request, output *DeleteGcmChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEmailChannel +func (c *Pinpoint) DeleteEmailChannelRequest(input *DeleteEmailChannelInput) (req *request.Request, output *DeleteEmailChannelOutput) { op := &request.Operation{ - Name: opDeleteGcmChannel, + Name: opDeleteEmailChannel, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/gcm", + HTTPPath: "/v1/apps/{application-id}/channels/email", } if input == nil { - input = &DeleteGcmChannelInput{} + input = &DeleteEmailChannelInput{} } - output = &DeleteGcmChannelOutput{} + output = &DeleteEmailChannelOutput{} req = c.newRequest(op, input, output) return } -// DeleteGcmChannel API operation for Amazon Pinpoint. +// DeleteEmailChannel API operation for Amazon Pinpoint. // -// Deletes the GCM channel for an app. +// Disables the email channel for an application and deletes any existing settings +// for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteGcmChannel for usage and error information. +// API operation DeleteEmailChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteGcmChannel -func (c *Pinpoint) DeleteGcmChannel(input *DeleteGcmChannelInput) (*DeleteGcmChannelOutput, error) { - req, out := c.DeleteGcmChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEmailChannel +func (c *Pinpoint) DeleteEmailChannel(input *DeleteEmailChannelInput) (*DeleteEmailChannelOutput, error) { + req, out := c.DeleteEmailChannelRequest(input) return out, req.Send() } -// DeleteGcmChannelWithContext is the same as DeleteGcmChannel with the addition of +// DeleteEmailChannelWithContext is the same as DeleteEmailChannel with the addition of // the ability to pass a context and additional request options. // -// See DeleteGcmChannel for details on how to use this API operation. +// See DeleteEmailChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteGcmChannelWithContext(ctx aws.Context, input *DeleteGcmChannelInput, opts ...request.Option) (*DeleteGcmChannelOutput, error) { - req, out := c.DeleteGcmChannelRequest(input) +func (c *Pinpoint) DeleteEmailChannelWithContext(ctx aws.Context, input *DeleteEmailChannelInput, opts ...request.Option) (*DeleteEmailChannelOutput, error) { + req, out := c.DeleteEmailChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteSegment = "DeleteSegment" +const opDeleteEmailTemplate = "DeleteEmailTemplate" -// DeleteSegmentRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSegment operation. The "output" return +// DeleteEmailTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEmailTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteSegment for more information on using the DeleteSegment +// See DeleteEmailTemplate for more information on using the DeleteEmailTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteSegmentRequest method. -// req, resp := client.DeleteSegmentRequest(params) +// // Example sending a request using the DeleteEmailTemplateRequest method. +// req, resp := client.DeleteEmailTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSegment -func (c *Pinpoint) DeleteSegmentRequest(input *DeleteSegmentInput) (req *request.Request, output *DeleteSegmentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEmailTemplate +func (c *Pinpoint) DeleteEmailTemplateRequest(input *DeleteEmailTemplateInput) (req *request.Request, output *DeleteEmailTemplateOutput) { op := &request.Operation{ - Name: opDeleteSegment, + Name: opDeleteEmailTemplate, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}", + HTTPPath: "/v1/templates/{template-name}/email", } if input == nil { - input = &DeleteSegmentInput{} + input = &DeleteEmailTemplateInput{} } - output = &DeleteSegmentOutput{} + output = &DeleteEmailTemplateOutput{} req = c.newRequest(op, input, output) return } -// DeleteSegment API operation for Amazon Pinpoint. +// DeleteEmailTemplate API operation for Amazon Pinpoint. // -// Deletes a segment. +// Deletes a message template that was designed for use in messages that were +// sent through the email channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteSegment for usage and error information. +// API operation DeleteEmailTemplate for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSegment -func (c *Pinpoint) DeleteSegment(input *DeleteSegmentInput) (*DeleteSegmentOutput, error) { - req, out := c.DeleteSegmentRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEmailTemplate +func (c *Pinpoint) DeleteEmailTemplate(input *DeleteEmailTemplateInput) (*DeleteEmailTemplateOutput, error) { + req, out := c.DeleteEmailTemplateRequest(input) return out, req.Send() } -// DeleteSegmentWithContext is the same as DeleteSegment with the addition of +// DeleteEmailTemplateWithContext is the same as DeleteEmailTemplate with the addition of // the ability to pass a context and additional request options. // -// See DeleteSegment for details on how to use this API operation. +// See DeleteEmailTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteSegmentWithContext(ctx aws.Context, input *DeleteSegmentInput, opts ...request.Option) (*DeleteSegmentOutput, error) { - req, out := c.DeleteSegmentRequest(input) +func (c *Pinpoint) DeleteEmailTemplateWithContext(ctx aws.Context, input *DeleteEmailTemplateInput, opts ...request.Option) (*DeleteEmailTemplateOutput, error) { + req, out := c.DeleteEmailTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteSmsChannel = "DeleteSmsChannel" +const opDeleteEndpoint = "DeleteEndpoint" -// DeleteSmsChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSmsChannel operation. The "output" return +// DeleteEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEndpoint operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteSmsChannel for more information on using the DeleteSmsChannel +// See DeleteEndpoint for more information on using the DeleteEndpoint // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteSmsChannelRequest method. -// req, resp := client.DeleteSmsChannelRequest(params) +// // Example sending a request using the DeleteEndpointRequest method. +// req, resp := client.DeleteEndpointRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSmsChannel -func (c *Pinpoint) DeleteSmsChannelRequest(input *DeleteSmsChannelInput) (req *request.Request, output *DeleteSmsChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEndpoint +func (c *Pinpoint) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { op := &request.Operation{ - Name: opDeleteSmsChannel, + Name: opDeleteEndpoint, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/sms", + HTTPPath: "/v1/apps/{application-id}/endpoints/{endpoint-id}", } if input == nil { - input = &DeleteSmsChannelInput{} + input = &DeleteEndpointInput{} } - output = &DeleteSmsChannelOutput{} + output = &DeleteEndpointOutput{} req = c.newRequest(op, input, output) return } -// DeleteSmsChannel API operation for Amazon Pinpoint. +// DeleteEndpoint API operation for Amazon Pinpoint. // -// Delete an SMS channel. +// Deletes an endpoint from an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteSmsChannel for usage and error information. +// API operation DeleteEndpoint for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSmsChannel -func (c *Pinpoint) DeleteSmsChannel(input *DeleteSmsChannelInput) (*DeleteSmsChannelOutput, error) { - req, out := c.DeleteSmsChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEndpoint +func (c *Pinpoint) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { + req, out := c.DeleteEndpointRequest(input) return out, req.Send() } -// DeleteSmsChannelWithContext is the same as DeleteSmsChannel with the addition of +// DeleteEndpointWithContext is the same as DeleteEndpoint with the addition of // the ability to pass a context and additional request options. // -// See DeleteSmsChannel for details on how to use this API operation. +// See DeleteEndpoint for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteSmsChannelWithContext(ctx aws.Context, input *DeleteSmsChannelInput, opts ...request.Option) (*DeleteSmsChannelOutput, error) { - req, out := c.DeleteSmsChannelRequest(input) +func (c *Pinpoint) DeleteEndpointWithContext(ctx aws.Context, input *DeleteEndpointInput, opts ...request.Option) (*DeleteEndpointOutput, error) { + req, out := c.DeleteEndpointRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteUserEndpoints = "DeleteUserEndpoints" +const opDeleteEventStream = "DeleteEventStream" -// DeleteUserEndpointsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteUserEndpoints operation. The "output" return +// DeleteEventStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEventStream operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteUserEndpoints for more information on using the DeleteUserEndpoints +// See DeleteEventStream for more information on using the DeleteEventStream // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteUserEndpointsRequest method. -// req, resp := client.DeleteUserEndpointsRequest(params) +// // Example sending a request using the DeleteEventStreamRequest method. +// req, resp := client.DeleteEventStreamRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteUserEndpoints -func (c *Pinpoint) DeleteUserEndpointsRequest(input *DeleteUserEndpointsInput) (req *request.Request, output *DeleteUserEndpointsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEventStream +func (c *Pinpoint) DeleteEventStreamRequest(input *DeleteEventStreamInput) (req *request.Request, output *DeleteEventStreamOutput) { op := &request.Operation{ - Name: opDeleteUserEndpoints, + Name: opDeleteEventStream, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/users/{user-id}", + HTTPPath: "/v1/apps/{application-id}/eventstream", } if input == nil { - input = &DeleteUserEndpointsInput{} + input = &DeleteEventStreamInput{} } - output = &DeleteUserEndpointsOutput{} + output = &DeleteEventStreamOutput{} req = c.newRequest(op, input, output) return } -// DeleteUserEndpoints API operation for Amazon Pinpoint. +// DeleteEventStream API operation for Amazon Pinpoint. // -// Deletes endpoints that are associated with a User ID. +// Deletes the event stream for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteUserEndpoints for usage and error information. +// API operation DeleteEventStream for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteUserEndpoints -func (c *Pinpoint) DeleteUserEndpoints(input *DeleteUserEndpointsInput) (*DeleteUserEndpointsOutput, error) { - req, out := c.DeleteUserEndpointsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteEventStream +func (c *Pinpoint) DeleteEventStream(input *DeleteEventStreamInput) (*DeleteEventStreamOutput, error) { + req, out := c.DeleteEventStreamRequest(input) return out, req.Send() } -// DeleteUserEndpointsWithContext is the same as DeleteUserEndpoints with the addition of +// DeleteEventStreamWithContext is the same as DeleteEventStream with the addition of // the ability to pass a context and additional request options. // -// See DeleteUserEndpoints for details on how to use this API operation. +// See DeleteEventStream for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteUserEndpointsWithContext(ctx aws.Context, input *DeleteUserEndpointsInput, opts ...request.Option) (*DeleteUserEndpointsOutput, error) { - req, out := c.DeleteUserEndpointsRequest(input) +func (c *Pinpoint) DeleteEventStreamWithContext(ctx aws.Context, input *DeleteEventStreamInput, opts ...request.Option) (*DeleteEventStreamOutput, error) { + req, out := c.DeleteEventStreamRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteVoiceChannel = "DeleteVoiceChannel" +const opDeleteGcmChannel = "DeleteGcmChannel" -// DeleteVoiceChannelRequest generates a "aws/request.Request" representing the -// client's request for the DeleteVoiceChannel operation. The "output" return +// DeleteGcmChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGcmChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteVoiceChannel for more information on using the DeleteVoiceChannel +// See DeleteGcmChannel for more information on using the DeleteGcmChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteVoiceChannelRequest method. -// req, resp := client.DeleteVoiceChannelRequest(params) +// // Example sending a request using the DeleteGcmChannelRequest method. +// req, resp := client.DeleteGcmChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteVoiceChannel -func (c *Pinpoint) DeleteVoiceChannelRequest(input *DeleteVoiceChannelInput) (req *request.Request, output *DeleteVoiceChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteGcmChannel +func (c *Pinpoint) DeleteGcmChannelRequest(input *DeleteGcmChannelInput) (req *request.Request, output *DeleteGcmChannelOutput) { op := &request.Operation{ - Name: opDeleteVoiceChannel, + Name: opDeleteGcmChannel, HTTPMethod: "DELETE", - HTTPPath: "/v1/apps/{application-id}/channels/voice", + HTTPPath: "/v1/apps/{application-id}/channels/gcm", } if input == nil { - input = &DeleteVoiceChannelInput{} + input = &DeleteGcmChannelInput{} } - output = &DeleteVoiceChannelOutput{} + output = &DeleteGcmChannelOutput{} req = c.newRequest(op, input, output) return } -// DeleteVoiceChannel API operation for Amazon Pinpoint. +// DeleteGcmChannel API operation for Amazon Pinpoint. // -// Delete an Voice channel +// Disables the GCM channel for an application and deletes any existing settings +// for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation DeleteVoiceChannel for usage and error information. +// API operation DeleteGcmChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteVoiceChannel -func (c *Pinpoint) DeleteVoiceChannel(input *DeleteVoiceChannelInput) (*DeleteVoiceChannelOutput, error) { - req, out := c.DeleteVoiceChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteGcmChannel +func (c *Pinpoint) DeleteGcmChannel(input *DeleteGcmChannelInput) (*DeleteGcmChannelOutput, error) { + req, out := c.DeleteGcmChannelRequest(input) return out, req.Send() } -// DeleteVoiceChannelWithContext is the same as DeleteVoiceChannel with the addition of +// DeleteGcmChannelWithContext is the same as DeleteGcmChannel with the addition of // the ability to pass a context and additional request options. // -// See DeleteVoiceChannel for details on how to use this API operation. +// See DeleteGcmChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) DeleteVoiceChannelWithContext(ctx aws.Context, input *DeleteVoiceChannelInput, opts ...request.Option) (*DeleteVoiceChannelOutput, error) { - req, out := c.DeleteVoiceChannelRequest(input) +func (c *Pinpoint) DeleteGcmChannelWithContext(ctx aws.Context, input *DeleteGcmChannelInput, opts ...request.Option) (*DeleteGcmChannelOutput, error) { + req, out := c.DeleteGcmChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetAdmChannel = "GetAdmChannel" +const opDeleteJourney = "DeleteJourney" -// GetAdmChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetAdmChannel operation. The "output" return +// DeleteJourneyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteJourney operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetAdmChannel for more information on using the GetAdmChannel +// See DeleteJourney for more information on using the DeleteJourney // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAdmChannelRequest method. -// req, resp := client.GetAdmChannelRequest(params) +// // Example sending a request using the DeleteJourneyRequest method. +// req, resp := client.DeleteJourneyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetAdmChannel -func (c *Pinpoint) GetAdmChannelRequest(input *GetAdmChannelInput) (req *request.Request, output *GetAdmChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteJourney +func (c *Pinpoint) DeleteJourneyRequest(input *DeleteJourneyInput) (req *request.Request, output *DeleteJourneyOutput) { op := &request.Operation{ - Name: opGetAdmChannel, - HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/adm", + Name: opDeleteJourney, + HTTPMethod: "DELETE", + HTTPPath: "/v1/apps/{application-id}/journeys/{journey-id}", } if input == nil { - input = &GetAdmChannelInput{} + input = &DeleteJourneyInput{} } - output = &GetAdmChannelOutput{} + output = &DeleteJourneyOutput{} req = c.newRequest(op, input, output) return } -// GetAdmChannel API operation for Amazon Pinpoint. +// DeleteJourney API operation for Amazon Pinpoint. // -// Get an ADM channel. +// Deletes a journey from an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetAdmChannel for usage and error information. +// API operation DeleteJourney for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetAdmChannel -func (c *Pinpoint) GetAdmChannel(input *GetAdmChannelInput) (*GetAdmChannelOutput, error) { - req, out := c.GetAdmChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteJourney +func (c *Pinpoint) DeleteJourney(input *DeleteJourneyInput) (*DeleteJourneyOutput, error) { + req, out := c.DeleteJourneyRequest(input) return out, req.Send() } -// GetAdmChannelWithContext is the same as GetAdmChannel with the addition of +// DeleteJourneyWithContext is the same as DeleteJourney with the addition of // the ability to pass a context and additional request options. // -// See GetAdmChannel for details on how to use this API operation. +// See DeleteJourney for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetAdmChannelWithContext(ctx aws.Context, input *GetAdmChannelInput, opts ...request.Option) (*GetAdmChannelOutput, error) { - req, out := c.GetAdmChannelRequest(input) +func (c *Pinpoint) DeleteJourneyWithContext(ctx aws.Context, input *DeleteJourneyInput, opts ...request.Option) (*DeleteJourneyOutput, error) { + req, out := c.DeleteJourneyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetApnsChannel = "GetApnsChannel" +const opDeletePushTemplate = "DeletePushTemplate" -// GetApnsChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetApnsChannel operation. The "output" return +// DeletePushTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeletePushTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetApnsChannel for more information on using the GetApnsChannel +// See DeletePushTemplate for more information on using the DeletePushTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetApnsChannelRequest method. -// req, resp := client.GetApnsChannelRequest(params) +// // Example sending a request using the DeletePushTemplateRequest method. +// req, resp := client.DeletePushTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsChannel -func (c *Pinpoint) GetApnsChannelRequest(input *GetApnsChannelInput) (req *request.Request, output *GetApnsChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeletePushTemplate +func (c *Pinpoint) DeletePushTemplateRequest(input *DeletePushTemplateInput) (req *request.Request, output *DeletePushTemplateOutput) { op := &request.Operation{ - Name: opGetApnsChannel, - HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/apns", + Name: opDeletePushTemplate, + HTTPMethod: "DELETE", + HTTPPath: "/v1/templates/{template-name}/push", } if input == nil { - input = &GetApnsChannelInput{} + input = &DeletePushTemplateInput{} } - output = &GetApnsChannelOutput{} + output = &DeletePushTemplateOutput{} req = c.newRequest(op, input, output) return } -// GetApnsChannel API operation for Amazon Pinpoint. +// DeletePushTemplate API operation for Amazon Pinpoint. // -// Returns information about the APNs channel for an app. +// Deletes a message template that was designed for use in messages that were +// sent through a push notification channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetApnsChannel for usage and error information. +// API operation DeletePushTemplate for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsChannel -func (c *Pinpoint) GetApnsChannel(input *GetApnsChannelInput) (*GetApnsChannelOutput, error) { - req, out := c.GetApnsChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeletePushTemplate +func (c *Pinpoint) DeletePushTemplate(input *DeletePushTemplateInput) (*DeletePushTemplateOutput, error) { + req, out := c.DeletePushTemplateRequest(input) return out, req.Send() } -// GetApnsChannelWithContext is the same as GetApnsChannel with the addition of +// DeletePushTemplateWithContext is the same as DeletePushTemplate with the addition of // the ability to pass a context and additional request options. // -// See GetApnsChannel for details on how to use this API operation. +// See DeletePushTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetApnsChannelWithContext(ctx aws.Context, input *GetApnsChannelInput, opts ...request.Option) (*GetApnsChannelOutput, error) { - req, out := c.GetApnsChannelRequest(input) +func (c *Pinpoint) DeletePushTemplateWithContext(ctx aws.Context, input *DeletePushTemplateInput, opts ...request.Option) (*DeletePushTemplateOutput, error) { + req, out := c.DeletePushTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetApnsSandboxChannel = "GetApnsSandboxChannel" +const opDeleteSegment = "DeleteSegment" -// GetApnsSandboxChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetApnsSandboxChannel operation. The "output" return +// DeleteSegmentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSegment operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetApnsSandboxChannel for more information on using the GetApnsSandboxChannel +// See DeleteSegment for more information on using the DeleteSegment // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetApnsSandboxChannelRequest method. -// req, resp := client.GetApnsSandboxChannelRequest(params) +// // Example sending a request using the DeleteSegmentRequest method. +// req, resp := client.DeleteSegmentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsSandboxChannel -func (c *Pinpoint) GetApnsSandboxChannelRequest(input *GetApnsSandboxChannelInput) (req *request.Request, output *GetApnsSandboxChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSegment +func (c *Pinpoint) DeleteSegmentRequest(input *DeleteSegmentInput) (req *request.Request, output *DeleteSegmentOutput) { op := &request.Operation{ - Name: opGetApnsSandboxChannel, - HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/apns_sandbox", + Name: opDeleteSegment, + HTTPMethod: "DELETE", + HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}", } if input == nil { - input = &GetApnsSandboxChannelInput{} + input = &DeleteSegmentInput{} } - output = &GetApnsSandboxChannelOutput{} + output = &DeleteSegmentOutput{} req = c.newRequest(op, input, output) return } -// GetApnsSandboxChannel API operation for Amazon Pinpoint. +// DeleteSegment API operation for Amazon Pinpoint. // -// Get an APNS sandbox channel. +// Deletes a segment from an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetApnsSandboxChannel for usage and error information. +// API operation DeleteSegment for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsSandboxChannel -func (c *Pinpoint) GetApnsSandboxChannel(input *GetApnsSandboxChannelInput) (*GetApnsSandboxChannelOutput, error) { - req, out := c.GetApnsSandboxChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSegment +func (c *Pinpoint) DeleteSegment(input *DeleteSegmentInput) (*DeleteSegmentOutput, error) { + req, out := c.DeleteSegmentRequest(input) return out, req.Send() } -// GetApnsSandboxChannelWithContext is the same as GetApnsSandboxChannel with the addition of +// DeleteSegmentWithContext is the same as DeleteSegment with the addition of // the ability to pass a context and additional request options. // -// See GetApnsSandboxChannel for details on how to use this API operation. +// See DeleteSegment for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetApnsSandboxChannelWithContext(ctx aws.Context, input *GetApnsSandboxChannelInput, opts ...request.Option) (*GetApnsSandboxChannelOutput, error) { - req, out := c.GetApnsSandboxChannelRequest(input) +func (c *Pinpoint) DeleteSegmentWithContext(ctx aws.Context, input *DeleteSegmentInput, opts ...request.Option) (*DeleteSegmentOutput, error) { + req, out := c.DeleteSegmentRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetApnsVoipChannel = "GetApnsVoipChannel" +const opDeleteSmsChannel = "DeleteSmsChannel" -// GetApnsVoipChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetApnsVoipChannel operation. The "output" return +// DeleteSmsChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSmsChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetApnsVoipChannel for more information on using the GetApnsVoipChannel +// See DeleteSmsChannel for more information on using the DeleteSmsChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetApnsVoipChannelRequest method. -// req, resp := client.GetApnsVoipChannelRequest(params) +// // Example sending a request using the DeleteSmsChannelRequest method. +// req, resp := client.DeleteSmsChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsVoipChannel -func (c *Pinpoint) GetApnsVoipChannelRequest(input *GetApnsVoipChannelInput) (req *request.Request, output *GetApnsVoipChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSmsChannel +func (c *Pinpoint) DeleteSmsChannelRequest(input *DeleteSmsChannelInput) (req *request.Request, output *DeleteSmsChannelOutput) { op := &request.Operation{ - Name: opGetApnsVoipChannel, - HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/apns_voip", + Name: opDeleteSmsChannel, + HTTPMethod: "DELETE", + HTTPPath: "/v1/apps/{application-id}/channels/sms", } if input == nil { - input = &GetApnsVoipChannelInput{} + input = &DeleteSmsChannelInput{} } - output = &GetApnsVoipChannelOutput{} + output = &DeleteSmsChannelOutput{} req = c.newRequest(op, input, output) return } -// GetApnsVoipChannel API operation for Amazon Pinpoint. +// DeleteSmsChannel API operation for Amazon Pinpoint. // -// Get an APNS VoIP channel +// Disables the SMS channel for an application and deletes any existing settings +// for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetApnsVoipChannel for usage and error information. +// API operation DeleteSmsChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsVoipChannel -func (c *Pinpoint) GetApnsVoipChannel(input *GetApnsVoipChannelInput) (*GetApnsVoipChannelOutput, error) { - req, out := c.GetApnsVoipChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSmsChannel +func (c *Pinpoint) DeleteSmsChannel(input *DeleteSmsChannelInput) (*DeleteSmsChannelOutput, error) { + req, out := c.DeleteSmsChannelRequest(input) return out, req.Send() } -// GetApnsVoipChannelWithContext is the same as GetApnsVoipChannel with the addition of +// DeleteSmsChannelWithContext is the same as DeleteSmsChannel with the addition of // the ability to pass a context and additional request options. // -// See GetApnsVoipChannel for details on how to use this API operation. +// See DeleteSmsChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetApnsVoipChannelWithContext(ctx aws.Context, input *GetApnsVoipChannelInput, opts ...request.Option) (*GetApnsVoipChannelOutput, error) { - req, out := c.GetApnsVoipChannelRequest(input) +func (c *Pinpoint) DeleteSmsChannelWithContext(ctx aws.Context, input *DeleteSmsChannelInput, opts ...request.Option) (*DeleteSmsChannelOutput, error) { + req, out := c.DeleteSmsChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetApnsVoipSandboxChannel = "GetApnsVoipSandboxChannel" +const opDeleteSmsTemplate = "DeleteSmsTemplate" -// GetApnsVoipSandboxChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetApnsVoipSandboxChannel operation. The "output" return +// DeleteSmsTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSmsTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetApnsVoipSandboxChannel for more information on using the GetApnsVoipSandboxChannel +// See DeleteSmsTemplate for more information on using the DeleteSmsTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetApnsVoipSandboxChannelRequest method. -// req, resp := client.GetApnsVoipSandboxChannelRequest(params) +// // Example sending a request using the DeleteSmsTemplateRequest method. +// req, resp := client.DeleteSmsTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsVoipSandboxChannel -func (c *Pinpoint) GetApnsVoipSandboxChannelRequest(input *GetApnsVoipSandboxChannelInput) (req *request.Request, output *GetApnsVoipSandboxChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSmsTemplate +func (c *Pinpoint) DeleteSmsTemplateRequest(input *DeleteSmsTemplateInput) (req *request.Request, output *DeleteSmsTemplateOutput) { op := &request.Operation{ - Name: opGetApnsVoipSandboxChannel, - HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/apns_voip_sandbox", + Name: opDeleteSmsTemplate, + HTTPMethod: "DELETE", + HTTPPath: "/v1/templates/{template-name}/sms", } if input == nil { - input = &GetApnsVoipSandboxChannelInput{} + input = &DeleteSmsTemplateInput{} } - output = &GetApnsVoipSandboxChannelOutput{} + output = &DeleteSmsTemplateOutput{} req = c.newRequest(op, input, output) return } -// GetApnsVoipSandboxChannel API operation for Amazon Pinpoint. +// DeleteSmsTemplate API operation for Amazon Pinpoint. // -// Get an APNS VoIPSandbox channel +// Deletes a message template that was designed for use in messages that were +// sent through the SMS channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetApnsVoipSandboxChannel for usage and error information. +// API operation DeleteSmsTemplate for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsVoipSandboxChannel -func (c *Pinpoint) GetApnsVoipSandboxChannel(input *GetApnsVoipSandboxChannelInput) (*GetApnsVoipSandboxChannelOutput, error) { - req, out := c.GetApnsVoipSandboxChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteSmsTemplate +func (c *Pinpoint) DeleteSmsTemplate(input *DeleteSmsTemplateInput) (*DeleteSmsTemplateOutput, error) { + req, out := c.DeleteSmsTemplateRequest(input) return out, req.Send() } -// GetApnsVoipSandboxChannelWithContext is the same as GetApnsVoipSandboxChannel with the addition of +// DeleteSmsTemplateWithContext is the same as DeleteSmsTemplate with the addition of // the ability to pass a context and additional request options. // -// See GetApnsVoipSandboxChannel for details on how to use this API operation. +// See DeleteSmsTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetApnsVoipSandboxChannelWithContext(ctx aws.Context, input *GetApnsVoipSandboxChannelInput, opts ...request.Option) (*GetApnsVoipSandboxChannelOutput, error) { - req, out := c.GetApnsVoipSandboxChannelRequest(input) +func (c *Pinpoint) DeleteSmsTemplateWithContext(ctx aws.Context, input *DeleteSmsTemplateInput, opts ...request.Option) (*DeleteSmsTemplateOutput, error) { + req, out := c.DeleteSmsTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetApp = "GetApp" +const opDeleteUserEndpoints = "DeleteUserEndpoints" -// GetAppRequest generates a "aws/request.Request" representing the -// client's request for the GetApp operation. The "output" return +// DeleteUserEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserEndpoints operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetApp for more information on using the GetApp +// See DeleteUserEndpoints for more information on using the DeleteUserEndpoints // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAppRequest method. -// req, resp := client.GetAppRequest(params) +// // Example sending a request using the DeleteUserEndpointsRequest method. +// req, resp := client.DeleteUserEndpointsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApp -func (c *Pinpoint) GetAppRequest(input *GetAppInput) (req *request.Request, output *GetAppOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteUserEndpoints +func (c *Pinpoint) DeleteUserEndpointsRequest(input *DeleteUserEndpointsInput) (req *request.Request, output *DeleteUserEndpointsOutput) { op := &request.Operation{ - Name: opGetApp, - HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}", + Name: opDeleteUserEndpoints, + HTTPMethod: "DELETE", + HTTPPath: "/v1/apps/{application-id}/users/{user-id}", } if input == nil { - input = &GetAppInput{} + input = &DeleteUserEndpointsInput{} } - output = &GetAppOutput{} + output = &DeleteUserEndpointsOutput{} req = c.newRequest(op, input, output) return } -// GetApp API operation for Amazon Pinpoint. +// DeleteUserEndpoints API operation for Amazon Pinpoint. // -// Returns information about an app. +// Deletes all the endpoints that are associated with a specific user ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetApp for usage and error information. +// API operation DeleteUserEndpoints for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApp -func (c *Pinpoint) GetApp(input *GetAppInput) (*GetAppOutput, error) { - req, out := c.GetAppRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteUserEndpoints +func (c *Pinpoint) DeleteUserEndpoints(input *DeleteUserEndpointsInput) (*DeleteUserEndpointsOutput, error) { + req, out := c.DeleteUserEndpointsRequest(input) return out, req.Send() } -// GetAppWithContext is the same as GetApp with the addition of +// DeleteUserEndpointsWithContext is the same as DeleteUserEndpoints with the addition of // the ability to pass a context and additional request options. // -// See GetApp for details on how to use this API operation. +// See DeleteUserEndpoints for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetAppWithContext(ctx aws.Context, input *GetAppInput, opts ...request.Option) (*GetAppOutput, error) { - req, out := c.GetAppRequest(input) +func (c *Pinpoint) DeleteUserEndpointsWithContext(ctx aws.Context, input *DeleteUserEndpointsInput, opts ...request.Option) (*DeleteUserEndpointsOutput, error) { + req, out := c.DeleteUserEndpointsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetApplicationSettings = "GetApplicationSettings" +const opDeleteVoiceChannel = "DeleteVoiceChannel" -// GetApplicationSettingsRequest generates a "aws/request.Request" representing the -// client's request for the GetApplicationSettings operation. The "output" return +// DeleteVoiceChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVoiceChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetApplicationSettings for more information on using the GetApplicationSettings +// See DeleteVoiceChannel for more information on using the DeleteVoiceChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetApplicationSettingsRequest method. -// req, resp := client.GetApplicationSettingsRequest(params) +// // Example sending a request using the DeleteVoiceChannelRequest method. +// req, resp := client.DeleteVoiceChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApplicationSettings -func (c *Pinpoint) GetApplicationSettingsRequest(input *GetApplicationSettingsInput) (req *request.Request, output *GetApplicationSettingsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteVoiceChannel +func (c *Pinpoint) DeleteVoiceChannelRequest(input *DeleteVoiceChannelInput) (req *request.Request, output *DeleteVoiceChannelOutput) { op := &request.Operation{ - Name: opGetApplicationSettings, - HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/settings", + Name: opDeleteVoiceChannel, + HTTPMethod: "DELETE", + HTTPPath: "/v1/apps/{application-id}/channels/voice", } if input == nil { - input = &GetApplicationSettingsInput{} + input = &DeleteVoiceChannelInput{} } - output = &GetApplicationSettingsOutput{} + output = &DeleteVoiceChannelOutput{} req = c.newRequest(op, input, output) return } -// GetApplicationSettings API operation for Amazon Pinpoint. +// DeleteVoiceChannel API operation for Amazon Pinpoint. // -// Used to request the settings for an app. +// Disables the voice channel for an application and deletes any existing settings +// for the channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetApplicationSettings for usage and error information. +// API operation DeleteVoiceChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApplicationSettings -func (c *Pinpoint) GetApplicationSettings(input *GetApplicationSettingsInput) (*GetApplicationSettingsOutput, error) { - req, out := c.GetApplicationSettingsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteVoiceChannel +func (c *Pinpoint) DeleteVoiceChannel(input *DeleteVoiceChannelInput) (*DeleteVoiceChannelOutput, error) { + req, out := c.DeleteVoiceChannelRequest(input) return out, req.Send() } -// GetApplicationSettingsWithContext is the same as GetApplicationSettings with the addition of +// DeleteVoiceChannelWithContext is the same as DeleteVoiceChannel with the addition of // the ability to pass a context and additional request options. // -// See GetApplicationSettings for details on how to use this API operation. +// See DeleteVoiceChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetApplicationSettingsWithContext(ctx aws.Context, input *GetApplicationSettingsInput, opts ...request.Option) (*GetApplicationSettingsOutput, error) { - req, out := c.GetApplicationSettingsRequest(input) +func (c *Pinpoint) DeleteVoiceChannelWithContext(ctx aws.Context, input *DeleteVoiceChannelInput, opts ...request.Option) (*DeleteVoiceChannelOutput, error) { + req, out := c.DeleteVoiceChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetApps = "GetApps" +const opGetAdmChannel = "GetAdmChannel" -// GetAppsRequest generates a "aws/request.Request" representing the -// client's request for the GetApps operation. The "output" return +// GetAdmChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetAdmChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetApps for more information on using the GetApps +// See GetAdmChannel for more information on using the GetAdmChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAppsRequest method. -// req, resp := client.GetAppsRequest(params) +// // Example sending a request using the GetAdmChannelRequest method. +// req, resp := client.GetAdmChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApps -func (c *Pinpoint) GetAppsRequest(input *GetAppsInput) (req *request.Request, output *GetAppsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetAdmChannel +func (c *Pinpoint) GetAdmChannelRequest(input *GetAdmChannelInput) (req *request.Request, output *GetAdmChannelOutput) { op := &request.Operation{ - Name: opGetApps, + Name: opGetAdmChannel, HTTPMethod: "GET", - HTTPPath: "/v1/apps", + HTTPPath: "/v1/apps/{application-id}/channels/adm", } if input == nil { - input = &GetAppsInput{} + input = &GetAdmChannelInput{} } - output = &GetAppsOutput{} + output = &GetAdmChannelOutput{} req = c.newRequest(op, input, output) return } -// GetApps API operation for Amazon Pinpoint. +// GetAdmChannel API operation for Amazon Pinpoint. // -// Returns information about your apps. +// Retrieves information about the status and settings of the ADM channel for +// an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetApps for usage and error information. +// API operation GetAdmChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApps -func (c *Pinpoint) GetApps(input *GetAppsInput) (*GetAppsOutput, error) { - req, out := c.GetAppsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetAdmChannel +func (c *Pinpoint) GetAdmChannel(input *GetAdmChannelInput) (*GetAdmChannelOutput, error) { + req, out := c.GetAdmChannelRequest(input) return out, req.Send() } -// GetAppsWithContext is the same as GetApps with the addition of +// GetAdmChannelWithContext is the same as GetAdmChannel with the addition of // the ability to pass a context and additional request options. // -// See GetApps for details on how to use this API operation. +// See GetAdmChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetAppsWithContext(ctx aws.Context, input *GetAppsInput, opts ...request.Option) (*GetAppsOutput, error) { - req, out := c.GetAppsRequest(input) +func (c *Pinpoint) GetAdmChannelWithContext(ctx aws.Context, input *GetAdmChannelInput, opts ...request.Option) (*GetAdmChannelOutput, error) { + req, out := c.GetAdmChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetBaiduChannel = "GetBaiduChannel" +const opGetApnsChannel = "GetApnsChannel" -// GetBaiduChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetBaiduChannel operation. The "output" return +// GetApnsChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetApnsChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetBaiduChannel for more information on using the GetBaiduChannel +// See GetApnsChannel for more information on using the GetApnsChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetBaiduChannelRequest method. -// req, resp := client.GetBaiduChannelRequest(params) +// // Example sending a request using the GetApnsChannelRequest method. +// req, resp := client.GetApnsChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetBaiduChannel -func (c *Pinpoint) GetBaiduChannelRequest(input *GetBaiduChannelInput) (req *request.Request, output *GetBaiduChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsChannel +func (c *Pinpoint) GetApnsChannelRequest(input *GetApnsChannelInput) (req *request.Request, output *GetApnsChannelOutput) { op := &request.Operation{ - Name: opGetBaiduChannel, + Name: opGetApnsChannel, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/baidu", + HTTPPath: "/v1/apps/{application-id}/channels/apns", } if input == nil { - input = &GetBaiduChannelInput{} + input = &GetApnsChannelInput{} } - output = &GetBaiduChannelOutput{} + output = &GetApnsChannelOutput{} req = c.newRequest(op, input, output) return } -// GetBaiduChannel API operation for Amazon Pinpoint. +// GetApnsChannel API operation for Amazon Pinpoint. // -// Get a BAIDU GCM channel +// Retrieves information about the status and settings of the APNs channel for +// an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetBaiduChannel for usage and error information. +// API operation GetApnsChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetBaiduChannel -func (c *Pinpoint) GetBaiduChannel(input *GetBaiduChannelInput) (*GetBaiduChannelOutput, error) { - req, out := c.GetBaiduChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsChannel +func (c *Pinpoint) GetApnsChannel(input *GetApnsChannelInput) (*GetApnsChannelOutput, error) { + req, out := c.GetApnsChannelRequest(input) return out, req.Send() } -// GetBaiduChannelWithContext is the same as GetBaiduChannel with the addition of +// GetApnsChannelWithContext is the same as GetApnsChannel with the addition of // the ability to pass a context and additional request options. // -// See GetBaiduChannel for details on how to use this API operation. +// See GetApnsChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetBaiduChannelWithContext(ctx aws.Context, input *GetBaiduChannelInput, opts ...request.Option) (*GetBaiduChannelOutput, error) { - req, out := c.GetBaiduChannelRequest(input) +func (c *Pinpoint) GetApnsChannelWithContext(ctx aws.Context, input *GetApnsChannelInput, opts ...request.Option) (*GetApnsChannelOutput, error) { + req, out := c.GetApnsChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetCampaign = "GetCampaign" +const opGetApnsSandboxChannel = "GetApnsSandboxChannel" -// GetCampaignRequest generates a "aws/request.Request" representing the -// client's request for the GetCampaign operation. The "output" return +// GetApnsSandboxChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetApnsSandboxChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetCampaign for more information on using the GetCampaign +// See GetApnsSandboxChannel for more information on using the GetApnsSandboxChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetCampaignRequest method. -// req, resp := client.GetCampaignRequest(params) +// // Example sending a request using the GetApnsSandboxChannelRequest method. +// req, resp := client.GetApnsSandboxChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaign -func (c *Pinpoint) GetCampaignRequest(input *GetCampaignInput) (req *request.Request, output *GetCampaignOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsSandboxChannel +func (c *Pinpoint) GetApnsSandboxChannelRequest(input *GetApnsSandboxChannelInput) (req *request.Request, output *GetApnsSandboxChannelOutput) { op := &request.Operation{ - Name: opGetCampaign, + Name: opGetApnsSandboxChannel, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}", + HTTPPath: "/v1/apps/{application-id}/channels/apns_sandbox", } if input == nil { - input = &GetCampaignInput{} + input = &GetApnsSandboxChannelInput{} } - output = &GetCampaignOutput{} + output = &GetApnsSandboxChannelOutput{} req = c.newRequest(op, input, output) return } -// GetCampaign API operation for Amazon Pinpoint. +// GetApnsSandboxChannel API operation for Amazon Pinpoint. // -// Returns information about a campaign. +// Retrieves information about the status and settings of the APNs sandbox channel +// for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetCampaign for usage and error information. +// API operation GetApnsSandboxChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaign -func (c *Pinpoint) GetCampaign(input *GetCampaignInput) (*GetCampaignOutput, error) { - req, out := c.GetCampaignRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsSandboxChannel +func (c *Pinpoint) GetApnsSandboxChannel(input *GetApnsSandboxChannelInput) (*GetApnsSandboxChannelOutput, error) { + req, out := c.GetApnsSandboxChannelRequest(input) return out, req.Send() } -// GetCampaignWithContext is the same as GetCampaign with the addition of +// GetApnsSandboxChannelWithContext is the same as GetApnsSandboxChannel with the addition of // the ability to pass a context and additional request options. // -// See GetCampaign for details on how to use this API operation. +// See GetApnsSandboxChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetCampaignWithContext(ctx aws.Context, input *GetCampaignInput, opts ...request.Option) (*GetCampaignOutput, error) { - req, out := c.GetCampaignRequest(input) +func (c *Pinpoint) GetApnsSandboxChannelWithContext(ctx aws.Context, input *GetApnsSandboxChannelInput, opts ...request.Option) (*GetApnsSandboxChannelOutput, error) { + req, out := c.GetApnsSandboxChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetCampaignActivities = "GetCampaignActivities" +const opGetApnsVoipChannel = "GetApnsVoipChannel" -// GetCampaignActivitiesRequest generates a "aws/request.Request" representing the -// client's request for the GetCampaignActivities operation. The "output" return +// GetApnsVoipChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetApnsVoipChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetCampaignActivities for more information on using the GetCampaignActivities +// See GetApnsVoipChannel for more information on using the GetApnsVoipChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetCampaignActivitiesRequest method. -// req, resp := client.GetCampaignActivitiesRequest(params) +// // Example sending a request using the GetApnsVoipChannelRequest method. +// req, resp := client.GetApnsVoipChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignActivities -func (c *Pinpoint) GetCampaignActivitiesRequest(input *GetCampaignActivitiesInput) (req *request.Request, output *GetCampaignActivitiesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsVoipChannel +func (c *Pinpoint) GetApnsVoipChannelRequest(input *GetApnsVoipChannelInput) (req *request.Request, output *GetApnsVoipChannelOutput) { op := &request.Operation{ - Name: opGetCampaignActivities, + Name: opGetApnsVoipChannel, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}/activities", + HTTPPath: "/v1/apps/{application-id}/channels/apns_voip", } if input == nil { - input = &GetCampaignActivitiesInput{} + input = &GetApnsVoipChannelInput{} } - output = &GetCampaignActivitiesOutput{} + output = &GetApnsVoipChannelOutput{} req = c.newRequest(op, input, output) return } -// GetCampaignActivities API operation for Amazon Pinpoint. +// GetApnsVoipChannel API operation for Amazon Pinpoint. // -// Returns information about the activity performed by a campaign. +// Retrieves information about the status and settings of the APNs VoIP channel +// for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetCampaignActivities for usage and error information. +// API operation GetApnsVoipChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignActivities -func (c *Pinpoint) GetCampaignActivities(input *GetCampaignActivitiesInput) (*GetCampaignActivitiesOutput, error) { - req, out := c.GetCampaignActivitiesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsVoipChannel +func (c *Pinpoint) GetApnsVoipChannel(input *GetApnsVoipChannelInput) (*GetApnsVoipChannelOutput, error) { + req, out := c.GetApnsVoipChannelRequest(input) return out, req.Send() } -// GetCampaignActivitiesWithContext is the same as GetCampaignActivities with the addition of +// GetApnsVoipChannelWithContext is the same as GetApnsVoipChannel with the addition of // the ability to pass a context and additional request options. // -// See GetCampaignActivities for details on how to use this API operation. +// See GetApnsVoipChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetCampaignActivitiesWithContext(ctx aws.Context, input *GetCampaignActivitiesInput, opts ...request.Option) (*GetCampaignActivitiesOutput, error) { - req, out := c.GetCampaignActivitiesRequest(input) +func (c *Pinpoint) GetApnsVoipChannelWithContext(ctx aws.Context, input *GetApnsVoipChannelInput, opts ...request.Option) (*GetApnsVoipChannelOutput, error) { + req, out := c.GetApnsVoipChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetCampaignVersion = "GetCampaignVersion" +const opGetApnsVoipSandboxChannel = "GetApnsVoipSandboxChannel" -// GetCampaignVersionRequest generates a "aws/request.Request" representing the -// client's request for the GetCampaignVersion operation. The "output" return +// GetApnsVoipSandboxChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetApnsVoipSandboxChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetCampaignVersion for more information on using the GetCampaignVersion +// See GetApnsVoipSandboxChannel for more information on using the GetApnsVoipSandboxChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetCampaignVersionRequest method. -// req, resp := client.GetCampaignVersionRequest(params) +// // Example sending a request using the GetApnsVoipSandboxChannelRequest method. +// req, resp := client.GetApnsVoipSandboxChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignVersion -func (c *Pinpoint) GetCampaignVersionRequest(input *GetCampaignVersionInput) (req *request.Request, output *GetCampaignVersionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsVoipSandboxChannel +func (c *Pinpoint) GetApnsVoipSandboxChannelRequest(input *GetApnsVoipSandboxChannelInput) (req *request.Request, output *GetApnsVoipSandboxChannelOutput) { op := &request.Operation{ - Name: opGetCampaignVersion, + Name: opGetApnsVoipSandboxChannel, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}/versions/{version}", + HTTPPath: "/v1/apps/{application-id}/channels/apns_voip_sandbox", } if input == nil { - input = &GetCampaignVersionInput{} + input = &GetApnsVoipSandboxChannelInput{} } - output = &GetCampaignVersionOutput{} + output = &GetApnsVoipSandboxChannelOutput{} req = c.newRequest(op, input, output) return } -// GetCampaignVersion API operation for Amazon Pinpoint. +// GetApnsVoipSandboxChannel API operation for Amazon Pinpoint. // -// Returns information about a specific version of a campaign. +// Retrieves information about the status and settings of the APNs VoIP sandbox +// channel for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetCampaignVersion for usage and error information. +// API operation GetApnsVoipSandboxChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignVersion -func (c *Pinpoint) GetCampaignVersion(input *GetCampaignVersionInput) (*GetCampaignVersionOutput, error) { - req, out := c.GetCampaignVersionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApnsVoipSandboxChannel +func (c *Pinpoint) GetApnsVoipSandboxChannel(input *GetApnsVoipSandboxChannelInput) (*GetApnsVoipSandboxChannelOutput, error) { + req, out := c.GetApnsVoipSandboxChannelRequest(input) return out, req.Send() } -// GetCampaignVersionWithContext is the same as GetCampaignVersion with the addition of +// GetApnsVoipSandboxChannelWithContext is the same as GetApnsVoipSandboxChannel with the addition of // the ability to pass a context and additional request options. // -// See GetCampaignVersion for details on how to use this API operation. +// See GetApnsVoipSandboxChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetCampaignVersionWithContext(ctx aws.Context, input *GetCampaignVersionInput, opts ...request.Option) (*GetCampaignVersionOutput, error) { - req, out := c.GetCampaignVersionRequest(input) +func (c *Pinpoint) GetApnsVoipSandboxChannelWithContext(ctx aws.Context, input *GetApnsVoipSandboxChannelInput, opts ...request.Option) (*GetApnsVoipSandboxChannelOutput, error) { + req, out := c.GetApnsVoipSandboxChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetCampaignVersions = "GetCampaignVersions" +const opGetApp = "GetApp" -// GetCampaignVersionsRequest generates a "aws/request.Request" representing the -// client's request for the GetCampaignVersions operation. The "output" return +// GetAppRequest generates a "aws/request.Request" representing the +// client's request for the GetApp operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetCampaignVersions for more information on using the GetCampaignVersions +// See GetApp for more information on using the GetApp // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetCampaignVersionsRequest method. -// req, resp := client.GetCampaignVersionsRequest(params) +// // Example sending a request using the GetAppRequest method. +// req, resp := client.GetAppRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignVersions -func (c *Pinpoint) GetCampaignVersionsRequest(input *GetCampaignVersionsInput) (req *request.Request, output *GetCampaignVersionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApp +func (c *Pinpoint) GetAppRequest(input *GetAppInput) (req *request.Request, output *GetAppOutput) { op := &request.Operation{ - Name: opGetCampaignVersions, + Name: opGetApp, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}/versions", + HTTPPath: "/v1/apps/{application-id}", } if input == nil { - input = &GetCampaignVersionsInput{} + input = &GetAppInput{} } - output = &GetCampaignVersionsOutput{} + output = &GetAppOutput{} req = c.newRequest(op, input, output) return } -// GetCampaignVersions API operation for Amazon Pinpoint. +// GetApp API operation for Amazon Pinpoint. // -// Returns information about your campaign versions. +// Retrieves information about an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetCampaignVersions for usage and error information. +// API operation GetApp for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignVersions -func (c *Pinpoint) GetCampaignVersions(input *GetCampaignVersionsInput) (*GetCampaignVersionsOutput, error) { - req, out := c.GetCampaignVersionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApp +func (c *Pinpoint) GetApp(input *GetAppInput) (*GetAppOutput, error) { + req, out := c.GetAppRequest(input) return out, req.Send() } -// GetCampaignVersionsWithContext is the same as GetCampaignVersions with the addition of +// GetAppWithContext is the same as GetApp with the addition of // the ability to pass a context and additional request options. // -// See GetCampaignVersions for details on how to use this API operation. +// See GetApp for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetCampaignVersionsWithContext(ctx aws.Context, input *GetCampaignVersionsInput, opts ...request.Option) (*GetCampaignVersionsOutput, error) { - req, out := c.GetCampaignVersionsRequest(input) +func (c *Pinpoint) GetAppWithContext(ctx aws.Context, input *GetAppInput, opts ...request.Option) (*GetAppOutput, error) { + req, out := c.GetAppRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetCampaigns = "GetCampaigns" +const opGetApplicationDateRangeKpi = "GetApplicationDateRangeKpi" -// GetCampaignsRequest generates a "aws/request.Request" representing the -// client's request for the GetCampaigns operation. The "output" return +// GetApplicationDateRangeKpiRequest generates a "aws/request.Request" representing the +// client's request for the GetApplicationDateRangeKpi operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetCampaigns for more information on using the GetCampaigns +// See GetApplicationDateRangeKpi for more information on using the GetApplicationDateRangeKpi // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetCampaignsRequest method. -// req, resp := client.GetCampaignsRequest(params) +// // Example sending a request using the GetApplicationDateRangeKpiRequest method. +// req, resp := client.GetApplicationDateRangeKpiRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaigns -func (c *Pinpoint) GetCampaignsRequest(input *GetCampaignsInput) (req *request.Request, output *GetCampaignsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApplicationDateRangeKpi +func (c *Pinpoint) GetApplicationDateRangeKpiRequest(input *GetApplicationDateRangeKpiInput) (req *request.Request, output *GetApplicationDateRangeKpiOutput) { op := &request.Operation{ - Name: opGetCampaigns, + Name: opGetApplicationDateRangeKpi, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/campaigns", + HTTPPath: "/v1/apps/{application-id}/kpis/daterange/{kpi-name}", } if input == nil { - input = &GetCampaignsInput{} + input = &GetApplicationDateRangeKpiInput{} } - output = &GetCampaignsOutput{} + output = &GetApplicationDateRangeKpiOutput{} req = c.newRequest(op, input, output) return } -// GetCampaigns API operation for Amazon Pinpoint. +// GetApplicationDateRangeKpi API operation for Amazon Pinpoint. // -// Returns information about your campaigns. +// Retrieves (queries) pre-aggregated data for a standard metric that applies +// to an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetCampaigns for usage and error information. +// API operation GetApplicationDateRangeKpi for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaigns -func (c *Pinpoint) GetCampaigns(input *GetCampaignsInput) (*GetCampaignsOutput, error) { - req, out := c.GetCampaignsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApplicationDateRangeKpi +func (c *Pinpoint) GetApplicationDateRangeKpi(input *GetApplicationDateRangeKpiInput) (*GetApplicationDateRangeKpiOutput, error) { + req, out := c.GetApplicationDateRangeKpiRequest(input) return out, req.Send() } -// GetCampaignsWithContext is the same as GetCampaigns with the addition of +// GetApplicationDateRangeKpiWithContext is the same as GetApplicationDateRangeKpi with the addition of // the ability to pass a context and additional request options. // -// See GetCampaigns for details on how to use this API operation. +// See GetApplicationDateRangeKpi for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetCampaignsWithContext(ctx aws.Context, input *GetCampaignsInput, opts ...request.Option) (*GetCampaignsOutput, error) { - req, out := c.GetCampaignsRequest(input) +func (c *Pinpoint) GetApplicationDateRangeKpiWithContext(ctx aws.Context, input *GetApplicationDateRangeKpiInput, opts ...request.Option) (*GetApplicationDateRangeKpiOutput, error) { + req, out := c.GetApplicationDateRangeKpiRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetChannels = "GetChannels" +const opGetApplicationSettings = "GetApplicationSettings" -// GetChannelsRequest generates a "aws/request.Request" representing the -// client's request for the GetChannels operation. The "output" return +// GetApplicationSettingsRequest generates a "aws/request.Request" representing the +// client's request for the GetApplicationSettings operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetChannels for more information on using the GetChannels +// See GetApplicationSettings for more information on using the GetApplicationSettings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetChannelsRequest method. -// req, resp := client.GetChannelsRequest(params) +// // Example sending a request using the GetApplicationSettingsRequest method. +// req, resp := client.GetApplicationSettingsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetChannels -func (c *Pinpoint) GetChannelsRequest(input *GetChannelsInput) (req *request.Request, output *GetChannelsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApplicationSettings +func (c *Pinpoint) GetApplicationSettingsRequest(input *GetApplicationSettingsInput) (req *request.Request, output *GetApplicationSettingsOutput) { op := &request.Operation{ - Name: opGetChannels, + Name: opGetApplicationSettings, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels", + HTTPPath: "/v1/apps/{application-id}/settings", } if input == nil { - input = &GetChannelsInput{} + input = &GetApplicationSettingsInput{} } - output = &GetChannelsOutput{} + output = &GetApplicationSettingsOutput{} req = c.newRequest(op, input, output) return } -// GetChannels API operation for Amazon Pinpoint. +// GetApplicationSettings API operation for Amazon Pinpoint. // -// Get all channels. +// Retrieves information about the settings for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetChannels for usage and error information. +// API operation GetApplicationSettings for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetChannels -func (c *Pinpoint) GetChannels(input *GetChannelsInput) (*GetChannelsOutput, error) { - req, out := c.GetChannelsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApplicationSettings +func (c *Pinpoint) GetApplicationSettings(input *GetApplicationSettingsInput) (*GetApplicationSettingsOutput, error) { + req, out := c.GetApplicationSettingsRequest(input) return out, req.Send() } -// GetChannelsWithContext is the same as GetChannels with the addition of +// GetApplicationSettingsWithContext is the same as GetApplicationSettings with the addition of // the ability to pass a context and additional request options. // -// See GetChannels for details on how to use this API operation. +// See GetApplicationSettings for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetChannelsWithContext(ctx aws.Context, input *GetChannelsInput, opts ...request.Option) (*GetChannelsOutput, error) { - req, out := c.GetChannelsRequest(input) - req.SetContext(ctx) +func (c *Pinpoint) GetApplicationSettingsWithContext(ctx aws.Context, input *GetApplicationSettingsInput, opts ...request.Option) (*GetApplicationSettingsOutput, error) { + req, out := c.GetApplicationSettingsRequest(input) + req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetEmailChannel = "GetEmailChannel" +const opGetApps = "GetApps" -// GetEmailChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetEmailChannel operation. The "output" return +// GetAppsRequest generates a "aws/request.Request" representing the +// client's request for the GetApps operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetEmailChannel for more information on using the GetEmailChannel +// See GetApps for more information on using the GetApps // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetEmailChannelRequest method. -// req, resp := client.GetEmailChannelRequest(params) +// // Example sending a request using the GetAppsRequest method. +// req, resp := client.GetAppsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEmailChannel -func (c *Pinpoint) GetEmailChannelRequest(input *GetEmailChannelInput) (req *request.Request, output *GetEmailChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApps +func (c *Pinpoint) GetAppsRequest(input *GetAppsInput) (req *request.Request, output *GetAppsOutput) { op := &request.Operation{ - Name: opGetEmailChannel, + Name: opGetApps, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/email", + HTTPPath: "/v1/apps", } if input == nil { - input = &GetEmailChannelInput{} + input = &GetAppsInput{} } - output = &GetEmailChannelOutput{} + output = &GetAppsOutput{} req = c.newRequest(op, input, output) return } -// GetEmailChannel API operation for Amazon Pinpoint. +// GetApps API operation for Amazon Pinpoint. // -// Get an email channel. +// Retrieves information about all of your applications. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetEmailChannel for usage and error information. +// API operation GetApps for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEmailChannel -func (c *Pinpoint) GetEmailChannel(input *GetEmailChannelInput) (*GetEmailChannelOutput, error) { - req, out := c.GetEmailChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetApps +func (c *Pinpoint) GetApps(input *GetAppsInput) (*GetAppsOutput, error) { + req, out := c.GetAppsRequest(input) return out, req.Send() } -// GetEmailChannelWithContext is the same as GetEmailChannel with the addition of +// GetAppsWithContext is the same as GetApps with the addition of // the ability to pass a context and additional request options. // -// See GetEmailChannel for details on how to use this API operation. +// See GetApps for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetEmailChannelWithContext(ctx aws.Context, input *GetEmailChannelInput, opts ...request.Option) (*GetEmailChannelOutput, error) { - req, out := c.GetEmailChannelRequest(input) +func (c *Pinpoint) GetAppsWithContext(ctx aws.Context, input *GetAppsInput, opts ...request.Option) (*GetAppsOutput, error) { + req, out := c.GetAppsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetEndpoint = "GetEndpoint" +const opGetBaiduChannel = "GetBaiduChannel" -// GetEndpointRequest generates a "aws/request.Request" representing the -// client's request for the GetEndpoint operation. The "output" return +// GetBaiduChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetBaiduChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetEndpoint for more information on using the GetEndpoint +// See GetBaiduChannel for more information on using the GetBaiduChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetEndpointRequest method. -// req, resp := client.GetEndpointRequest(params) +// // Example sending a request using the GetBaiduChannelRequest method. +// req, resp := client.GetBaiduChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEndpoint -func (c *Pinpoint) GetEndpointRequest(input *GetEndpointInput) (req *request.Request, output *GetEndpointOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetBaiduChannel +func (c *Pinpoint) GetBaiduChannelRequest(input *GetBaiduChannelInput) (req *request.Request, output *GetBaiduChannelOutput) { op := &request.Operation{ - Name: opGetEndpoint, + Name: opGetBaiduChannel, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/endpoints/{endpoint-id}", + HTTPPath: "/v1/apps/{application-id}/channels/baidu", } if input == nil { - input = &GetEndpointInput{} + input = &GetBaiduChannelInput{} } - output = &GetEndpointOutput{} + output = &GetBaiduChannelOutput{} req = c.newRequest(op, input, output) return } -// GetEndpoint API operation for Amazon Pinpoint. +// GetBaiduChannel API operation for Amazon Pinpoint. // -// Returns information about an endpoint. +// Retrieves information about the status and settings of the Baidu channel +// for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetEndpoint for usage and error information. +// API operation GetBaiduChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEndpoint -func (c *Pinpoint) GetEndpoint(input *GetEndpointInput) (*GetEndpointOutput, error) { - req, out := c.GetEndpointRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetBaiduChannel +func (c *Pinpoint) GetBaiduChannel(input *GetBaiduChannelInput) (*GetBaiduChannelOutput, error) { + req, out := c.GetBaiduChannelRequest(input) return out, req.Send() } -// GetEndpointWithContext is the same as GetEndpoint with the addition of +// GetBaiduChannelWithContext is the same as GetBaiduChannel with the addition of // the ability to pass a context and additional request options. // -// See GetEndpoint for details on how to use this API operation. +// See GetBaiduChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetEndpointWithContext(ctx aws.Context, input *GetEndpointInput, opts ...request.Option) (*GetEndpointOutput, error) { - req, out := c.GetEndpointRequest(input) +func (c *Pinpoint) GetBaiduChannelWithContext(ctx aws.Context, input *GetBaiduChannelInput, opts ...request.Option) (*GetBaiduChannelOutput, error) { + req, out := c.GetBaiduChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetEventStream = "GetEventStream" +const opGetCampaign = "GetCampaign" -// GetEventStreamRequest generates a "aws/request.Request" representing the -// client's request for the GetEventStream operation. The "output" return +// GetCampaignRequest generates a "aws/request.Request" representing the +// client's request for the GetCampaign operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetEventStream for more information on using the GetEventStream +// See GetCampaign for more information on using the GetCampaign // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetEventStreamRequest method. -// req, resp := client.GetEventStreamRequest(params) +// // Example sending a request using the GetCampaignRequest method. +// req, resp := client.GetCampaignRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEventStream -func (c *Pinpoint) GetEventStreamRequest(input *GetEventStreamInput) (req *request.Request, output *GetEventStreamOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaign +func (c *Pinpoint) GetCampaignRequest(input *GetCampaignInput) (req *request.Request, output *GetCampaignOutput) { op := &request.Operation{ - Name: opGetEventStream, + Name: opGetCampaign, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/eventstream", + HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}", } if input == nil { - input = &GetEventStreamInput{} + input = &GetCampaignInput{} } - output = &GetEventStreamOutput{} + output = &GetCampaignOutput{} req = c.newRequest(op, input, output) return } -// GetEventStream API operation for Amazon Pinpoint. +// GetCampaign API operation for Amazon Pinpoint. // -// Returns the event stream for an app. +// Retrieves information about the status, configuration, and other settings +// for a campaign. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetEventStream for usage and error information. +// API operation GetCampaign for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEventStream -func (c *Pinpoint) GetEventStream(input *GetEventStreamInput) (*GetEventStreamOutput, error) { - req, out := c.GetEventStreamRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaign +func (c *Pinpoint) GetCampaign(input *GetCampaignInput) (*GetCampaignOutput, error) { + req, out := c.GetCampaignRequest(input) return out, req.Send() } -// GetEventStreamWithContext is the same as GetEventStream with the addition of +// GetCampaignWithContext is the same as GetCampaign with the addition of // the ability to pass a context and additional request options. // -// See GetEventStream for details on how to use this API operation. +// See GetCampaign for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetEventStreamWithContext(ctx aws.Context, input *GetEventStreamInput, opts ...request.Option) (*GetEventStreamOutput, error) { - req, out := c.GetEventStreamRequest(input) +func (c *Pinpoint) GetCampaignWithContext(ctx aws.Context, input *GetCampaignInput, opts ...request.Option) (*GetCampaignOutput, error) { + req, out := c.GetCampaignRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetExportJob = "GetExportJob" +const opGetCampaignActivities = "GetCampaignActivities" -// GetExportJobRequest generates a "aws/request.Request" representing the -// client's request for the GetExportJob operation. The "output" return +// GetCampaignActivitiesRequest generates a "aws/request.Request" representing the +// client's request for the GetCampaignActivities operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetExportJob for more information on using the GetExportJob +// See GetCampaignActivities for more information on using the GetCampaignActivities // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetExportJobRequest method. -// req, resp := client.GetExportJobRequest(params) +// // Example sending a request using the GetCampaignActivitiesRequest method. +// req, resp := client.GetCampaignActivitiesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetExportJob -func (c *Pinpoint) GetExportJobRequest(input *GetExportJobInput) (req *request.Request, output *GetExportJobOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignActivities +func (c *Pinpoint) GetCampaignActivitiesRequest(input *GetCampaignActivitiesInput) (req *request.Request, output *GetCampaignActivitiesOutput) { op := &request.Operation{ - Name: opGetExportJob, + Name: opGetCampaignActivities, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/jobs/export/{job-id}", + HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}/activities", } if input == nil { - input = &GetExportJobInput{} + input = &GetCampaignActivitiesInput{} } - output = &GetExportJobOutput{} + output = &GetCampaignActivitiesOutput{} req = c.newRequest(op, input, output) return } -// GetExportJob API operation for Amazon Pinpoint. +// GetCampaignActivities API operation for Amazon Pinpoint. // -// Returns information about an export job. +// Retrieves information about all the activities for a campaign. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetExportJob for usage and error information. +// API operation GetCampaignActivities for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetExportJob -func (c *Pinpoint) GetExportJob(input *GetExportJobInput) (*GetExportJobOutput, error) { - req, out := c.GetExportJobRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignActivities +func (c *Pinpoint) GetCampaignActivities(input *GetCampaignActivitiesInput) (*GetCampaignActivitiesOutput, error) { + req, out := c.GetCampaignActivitiesRequest(input) return out, req.Send() } -// GetExportJobWithContext is the same as GetExportJob with the addition of +// GetCampaignActivitiesWithContext is the same as GetCampaignActivities with the addition of // the ability to pass a context and additional request options. // -// See GetExportJob for details on how to use this API operation. +// See GetCampaignActivities for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetExportJobWithContext(ctx aws.Context, input *GetExportJobInput, opts ...request.Option) (*GetExportJobOutput, error) { - req, out := c.GetExportJobRequest(input) +func (c *Pinpoint) GetCampaignActivitiesWithContext(ctx aws.Context, input *GetCampaignActivitiesInput, opts ...request.Option) (*GetCampaignActivitiesOutput, error) { + req, out := c.GetCampaignActivitiesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetExportJobs = "GetExportJobs" +const opGetCampaignDateRangeKpi = "GetCampaignDateRangeKpi" -// GetExportJobsRequest generates a "aws/request.Request" representing the -// client's request for the GetExportJobs operation. The "output" return +// GetCampaignDateRangeKpiRequest generates a "aws/request.Request" representing the +// client's request for the GetCampaignDateRangeKpi operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetExportJobs for more information on using the GetExportJobs +// See GetCampaignDateRangeKpi for more information on using the GetCampaignDateRangeKpi // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetExportJobsRequest method. -// req, resp := client.GetExportJobsRequest(params) +// // Example sending a request using the GetCampaignDateRangeKpiRequest method. +// req, resp := client.GetCampaignDateRangeKpiRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetExportJobs -func (c *Pinpoint) GetExportJobsRequest(input *GetExportJobsInput) (req *request.Request, output *GetExportJobsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignDateRangeKpi +func (c *Pinpoint) GetCampaignDateRangeKpiRequest(input *GetCampaignDateRangeKpiInput) (req *request.Request, output *GetCampaignDateRangeKpiOutput) { op := &request.Operation{ - Name: opGetExportJobs, + Name: opGetCampaignDateRangeKpi, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/jobs/export", + HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}/kpis/daterange/{kpi-name}", } if input == nil { - input = &GetExportJobsInput{} + input = &GetCampaignDateRangeKpiInput{} } - output = &GetExportJobsOutput{} + output = &GetCampaignDateRangeKpiOutput{} req = c.newRequest(op, input, output) return } -// GetExportJobs API operation for Amazon Pinpoint. +// GetCampaignDateRangeKpi API operation for Amazon Pinpoint. // -// Returns information about your export jobs. +// Retrieves (queries) pre-aggregated data for a standard metric that applies +// to a campaign. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetExportJobs for usage and error information. +// API operation GetCampaignDateRangeKpi for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetExportJobs -func (c *Pinpoint) GetExportJobs(input *GetExportJobsInput) (*GetExportJobsOutput, error) { - req, out := c.GetExportJobsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignDateRangeKpi +func (c *Pinpoint) GetCampaignDateRangeKpi(input *GetCampaignDateRangeKpiInput) (*GetCampaignDateRangeKpiOutput, error) { + req, out := c.GetCampaignDateRangeKpiRequest(input) return out, req.Send() } -// GetExportJobsWithContext is the same as GetExportJobs with the addition of +// GetCampaignDateRangeKpiWithContext is the same as GetCampaignDateRangeKpi with the addition of // the ability to pass a context and additional request options. // -// See GetExportJobs for details on how to use this API operation. +// See GetCampaignDateRangeKpi for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetExportJobsWithContext(ctx aws.Context, input *GetExportJobsInput, opts ...request.Option) (*GetExportJobsOutput, error) { - req, out := c.GetExportJobsRequest(input) +func (c *Pinpoint) GetCampaignDateRangeKpiWithContext(ctx aws.Context, input *GetCampaignDateRangeKpiInput, opts ...request.Option) (*GetCampaignDateRangeKpiOutput, error) { + req, out := c.GetCampaignDateRangeKpiRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetGcmChannel = "GetGcmChannel" +const opGetCampaignVersion = "GetCampaignVersion" -// GetGcmChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetGcmChannel operation. The "output" return +// GetCampaignVersionRequest generates a "aws/request.Request" representing the +// client's request for the GetCampaignVersion operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetGcmChannel for more information on using the GetGcmChannel +// See GetCampaignVersion for more information on using the GetCampaignVersion // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetGcmChannelRequest method. -// req, resp := client.GetGcmChannelRequest(params) +// // Example sending a request using the GetCampaignVersionRequest method. +// req, resp := client.GetCampaignVersionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetGcmChannel -func (c *Pinpoint) GetGcmChannelRequest(input *GetGcmChannelInput) (req *request.Request, output *GetGcmChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignVersion +func (c *Pinpoint) GetCampaignVersionRequest(input *GetCampaignVersionInput) (req *request.Request, output *GetCampaignVersionOutput) { op := &request.Operation{ - Name: opGetGcmChannel, + Name: opGetCampaignVersion, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/gcm", + HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}/versions/{version}", } if input == nil { - input = &GetGcmChannelInput{} + input = &GetCampaignVersionInput{} } - output = &GetGcmChannelOutput{} + output = &GetCampaignVersionOutput{} req = c.newRequest(op, input, output) return } -// GetGcmChannel API operation for Amazon Pinpoint. +// GetCampaignVersion API operation for Amazon Pinpoint. // -// Returns information about the GCM channel for an app. +// Retrieves information about the status, configuration, and other settings +// for a specific version of a campaign. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetGcmChannel for usage and error information. +// API operation GetCampaignVersion for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetGcmChannel -func (c *Pinpoint) GetGcmChannel(input *GetGcmChannelInput) (*GetGcmChannelOutput, error) { - req, out := c.GetGcmChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignVersion +func (c *Pinpoint) GetCampaignVersion(input *GetCampaignVersionInput) (*GetCampaignVersionOutput, error) { + req, out := c.GetCampaignVersionRequest(input) return out, req.Send() } -// GetGcmChannelWithContext is the same as GetGcmChannel with the addition of +// GetCampaignVersionWithContext is the same as GetCampaignVersion with the addition of // the ability to pass a context and additional request options. // -// See GetGcmChannel for details on how to use this API operation. +// See GetCampaignVersion for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetGcmChannelWithContext(ctx aws.Context, input *GetGcmChannelInput, opts ...request.Option) (*GetGcmChannelOutput, error) { - req, out := c.GetGcmChannelRequest(input) +func (c *Pinpoint) GetCampaignVersionWithContext(ctx aws.Context, input *GetCampaignVersionInput, opts ...request.Option) (*GetCampaignVersionOutput, error) { + req, out := c.GetCampaignVersionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetImportJob = "GetImportJob" +const opGetCampaignVersions = "GetCampaignVersions" -// GetImportJobRequest generates a "aws/request.Request" representing the -// client's request for the GetImportJob operation. The "output" return +// GetCampaignVersionsRequest generates a "aws/request.Request" representing the +// client's request for the GetCampaignVersions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetImportJob for more information on using the GetImportJob +// See GetCampaignVersions for more information on using the GetCampaignVersions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetImportJobRequest method. -// req, resp := client.GetImportJobRequest(params) +// // Example sending a request using the GetCampaignVersionsRequest method. +// req, resp := client.GetCampaignVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetImportJob -func (c *Pinpoint) GetImportJobRequest(input *GetImportJobInput) (req *request.Request, output *GetImportJobOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignVersions +func (c *Pinpoint) GetCampaignVersionsRequest(input *GetCampaignVersionsInput) (req *request.Request, output *GetCampaignVersionsOutput) { op := &request.Operation{ - Name: opGetImportJob, + Name: opGetCampaignVersions, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/jobs/import/{job-id}", + HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}/versions", } if input == nil { - input = &GetImportJobInput{} + input = &GetCampaignVersionsInput{} } - output = &GetImportJobOutput{} + output = &GetCampaignVersionsOutput{} req = c.newRequest(op, input, output) return } -// GetImportJob API operation for Amazon Pinpoint. +// GetCampaignVersions API operation for Amazon Pinpoint. // -// Returns information about an import job. +// Retrieves information about the status, configuration, and other settings +// for all versions of a campaign. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetImportJob for usage and error information. +// API operation GetCampaignVersions for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetImportJob -func (c *Pinpoint) GetImportJob(input *GetImportJobInput) (*GetImportJobOutput, error) { - req, out := c.GetImportJobRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaignVersions +func (c *Pinpoint) GetCampaignVersions(input *GetCampaignVersionsInput) (*GetCampaignVersionsOutput, error) { + req, out := c.GetCampaignVersionsRequest(input) return out, req.Send() } -// GetImportJobWithContext is the same as GetImportJob with the addition of +// GetCampaignVersionsWithContext is the same as GetCampaignVersions with the addition of // the ability to pass a context and additional request options. // -// See GetImportJob for details on how to use this API operation. +// See GetCampaignVersions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetImportJobWithContext(ctx aws.Context, input *GetImportJobInput, opts ...request.Option) (*GetImportJobOutput, error) { - req, out := c.GetImportJobRequest(input) +func (c *Pinpoint) GetCampaignVersionsWithContext(ctx aws.Context, input *GetCampaignVersionsInput, opts ...request.Option) (*GetCampaignVersionsOutput, error) { + req, out := c.GetCampaignVersionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetImportJobs = "GetImportJobs" +const opGetCampaigns = "GetCampaigns" -// GetImportJobsRequest generates a "aws/request.Request" representing the -// client's request for the GetImportJobs operation. The "output" return +// GetCampaignsRequest generates a "aws/request.Request" representing the +// client's request for the GetCampaigns operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetImportJobs for more information on using the GetImportJobs +// See GetCampaigns for more information on using the GetCampaigns // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetImportJobsRequest method. -// req, resp := client.GetImportJobsRequest(params) +// // Example sending a request using the GetCampaignsRequest method. +// req, resp := client.GetCampaignsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetImportJobs -func (c *Pinpoint) GetImportJobsRequest(input *GetImportJobsInput) (req *request.Request, output *GetImportJobsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaigns +func (c *Pinpoint) GetCampaignsRequest(input *GetCampaignsInput) (req *request.Request, output *GetCampaignsOutput) { op := &request.Operation{ - Name: opGetImportJobs, + Name: opGetCampaigns, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/jobs/import", + HTTPPath: "/v1/apps/{application-id}/campaigns", } if input == nil { - input = &GetImportJobsInput{} + input = &GetCampaignsInput{} } - output = &GetImportJobsOutput{} + output = &GetCampaignsOutput{} req = c.newRequest(op, input, output) return } -// GetImportJobs API operation for Amazon Pinpoint. +// GetCampaigns API operation for Amazon Pinpoint. // -// Returns information about your import jobs. +// Retrieves information about the status, configuration, and other settings +// for all the campaigns that are associated with an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetImportJobs for usage and error information. +// API operation GetCampaigns for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetImportJobs -func (c *Pinpoint) GetImportJobs(input *GetImportJobsInput) (*GetImportJobsOutput, error) { - req, out := c.GetImportJobsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetCampaigns +func (c *Pinpoint) GetCampaigns(input *GetCampaignsInput) (*GetCampaignsOutput, error) { + req, out := c.GetCampaignsRequest(input) return out, req.Send() } -// GetImportJobsWithContext is the same as GetImportJobs with the addition of +// GetCampaignsWithContext is the same as GetCampaigns with the addition of // the ability to pass a context and additional request options. // -// See GetImportJobs for details on how to use this API operation. +// See GetCampaigns for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetImportJobsWithContext(ctx aws.Context, input *GetImportJobsInput, opts ...request.Option) (*GetImportJobsOutput, error) { - req, out := c.GetImportJobsRequest(input) +func (c *Pinpoint) GetCampaignsWithContext(ctx aws.Context, input *GetCampaignsInput, opts ...request.Option) (*GetCampaignsOutput, error) { + req, out := c.GetCampaignsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSegment = "GetSegment" +const opGetChannels = "GetChannels" -// GetSegmentRequest generates a "aws/request.Request" representing the -// client's request for the GetSegment operation. The "output" return +// GetChannelsRequest generates a "aws/request.Request" representing the +// client's request for the GetChannels operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSegment for more information on using the GetSegment +// See GetChannels for more information on using the GetChannels // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSegmentRequest method. -// req, resp := client.GetSegmentRequest(params) +// // Example sending a request using the GetChannelsRequest method. +// req, resp := client.GetChannelsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegment -func (c *Pinpoint) GetSegmentRequest(input *GetSegmentInput) (req *request.Request, output *GetSegmentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetChannels +func (c *Pinpoint) GetChannelsRequest(input *GetChannelsInput) (req *request.Request, output *GetChannelsOutput) { op := &request.Operation{ - Name: opGetSegment, + Name: opGetChannels, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}", + HTTPPath: "/v1/apps/{application-id}/channels", } if input == nil { - input = &GetSegmentInput{} + input = &GetChannelsInput{} } - output = &GetSegmentOutput{} + output = &GetChannelsOutput{} req = c.newRequest(op, input, output) return } -// GetSegment API operation for Amazon Pinpoint. +// GetChannels API operation for Amazon Pinpoint. // -// Returns information about a segment. +// Retrieves information about the history and status of each channel for an +// application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetSegment for usage and error information. +// API operation GetChannels for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegment -func (c *Pinpoint) GetSegment(input *GetSegmentInput) (*GetSegmentOutput, error) { - req, out := c.GetSegmentRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetChannels +func (c *Pinpoint) GetChannels(input *GetChannelsInput) (*GetChannelsOutput, error) { + req, out := c.GetChannelsRequest(input) return out, req.Send() } -// GetSegmentWithContext is the same as GetSegment with the addition of +// GetChannelsWithContext is the same as GetChannels with the addition of // the ability to pass a context and additional request options. // -// See GetSegment for details on how to use this API operation. +// See GetChannels for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetSegmentWithContext(ctx aws.Context, input *GetSegmentInput, opts ...request.Option) (*GetSegmentOutput, error) { - req, out := c.GetSegmentRequest(input) +func (c *Pinpoint) GetChannelsWithContext(ctx aws.Context, input *GetChannelsInput, opts ...request.Option) (*GetChannelsOutput, error) { + req, out := c.GetChannelsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSegmentExportJobs = "GetSegmentExportJobs" +const opGetEmailChannel = "GetEmailChannel" -// GetSegmentExportJobsRequest generates a "aws/request.Request" representing the -// client's request for the GetSegmentExportJobs operation. The "output" return +// GetEmailChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetEmailChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSegmentExportJobs for more information on using the GetSegmentExportJobs +// See GetEmailChannel for more information on using the GetEmailChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSegmentExportJobsRequest method. -// req, resp := client.GetSegmentExportJobsRequest(params) +// // Example sending a request using the GetEmailChannelRequest method. +// req, resp := client.GetEmailChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentExportJobs -func (c *Pinpoint) GetSegmentExportJobsRequest(input *GetSegmentExportJobsInput) (req *request.Request, output *GetSegmentExportJobsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEmailChannel +func (c *Pinpoint) GetEmailChannelRequest(input *GetEmailChannelInput) (req *request.Request, output *GetEmailChannelOutput) { op := &request.Operation{ - Name: opGetSegmentExportJobs, + Name: opGetEmailChannel, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}/jobs/export", + HTTPPath: "/v1/apps/{application-id}/channels/email", } if input == nil { - input = &GetSegmentExportJobsInput{} + input = &GetEmailChannelInput{} } - output = &GetSegmentExportJobsOutput{} + output = &GetEmailChannelOutput{} req = c.newRequest(op, input, output) return } -// GetSegmentExportJobs API operation for Amazon Pinpoint. +// GetEmailChannel API operation for Amazon Pinpoint. // -// Returns a list of export jobs for a specific segment. +// Retrieves information about the status and settings of the email channel +// for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetSegmentExportJobs for usage and error information. +// API operation GetEmailChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentExportJobs -func (c *Pinpoint) GetSegmentExportJobs(input *GetSegmentExportJobsInput) (*GetSegmentExportJobsOutput, error) { - req, out := c.GetSegmentExportJobsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEmailChannel +func (c *Pinpoint) GetEmailChannel(input *GetEmailChannelInput) (*GetEmailChannelOutput, error) { + req, out := c.GetEmailChannelRequest(input) return out, req.Send() } -// GetSegmentExportJobsWithContext is the same as GetSegmentExportJobs with the addition of +// GetEmailChannelWithContext is the same as GetEmailChannel with the addition of // the ability to pass a context and additional request options. // -// See GetSegmentExportJobs for details on how to use this API operation. +// See GetEmailChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetSegmentExportJobsWithContext(ctx aws.Context, input *GetSegmentExportJobsInput, opts ...request.Option) (*GetSegmentExportJobsOutput, error) { - req, out := c.GetSegmentExportJobsRequest(input) +func (c *Pinpoint) GetEmailChannelWithContext(ctx aws.Context, input *GetEmailChannelInput, opts ...request.Option) (*GetEmailChannelOutput, error) { + req, out := c.GetEmailChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSegmentImportJobs = "GetSegmentImportJobs" +const opGetEmailTemplate = "GetEmailTemplate" -// GetSegmentImportJobsRequest generates a "aws/request.Request" representing the -// client's request for the GetSegmentImportJobs operation. The "output" return +// GetEmailTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetEmailTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSegmentImportJobs for more information on using the GetSegmentImportJobs +// See GetEmailTemplate for more information on using the GetEmailTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSegmentImportJobsRequest method. -// req, resp := client.GetSegmentImportJobsRequest(params) +// // Example sending a request using the GetEmailTemplateRequest method. +// req, resp := client.GetEmailTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentImportJobs -func (c *Pinpoint) GetSegmentImportJobsRequest(input *GetSegmentImportJobsInput) (req *request.Request, output *GetSegmentImportJobsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEmailTemplate +func (c *Pinpoint) GetEmailTemplateRequest(input *GetEmailTemplateInput) (req *request.Request, output *GetEmailTemplateOutput) { op := &request.Operation{ - Name: opGetSegmentImportJobs, + Name: opGetEmailTemplate, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}/jobs/import", + HTTPPath: "/v1/templates/{template-name}/email", } if input == nil { - input = &GetSegmentImportJobsInput{} + input = &GetEmailTemplateInput{} } - output = &GetSegmentImportJobsOutput{} + output = &GetEmailTemplateOutput{} req = c.newRequest(op, input, output) return } -// GetSegmentImportJobs API operation for Amazon Pinpoint. +// GetEmailTemplate API operation for Amazon Pinpoint. // -// Returns a list of import jobs for a specific segment. +// Retrieves the content and settings for a message template that you can use +// in messages that are sent through the email channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetSegmentImportJobs for usage and error information. +// API operation GetEmailTemplate for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentImportJobs -func (c *Pinpoint) GetSegmentImportJobs(input *GetSegmentImportJobsInput) (*GetSegmentImportJobsOutput, error) { - req, out := c.GetSegmentImportJobsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEmailTemplate +func (c *Pinpoint) GetEmailTemplate(input *GetEmailTemplateInput) (*GetEmailTemplateOutput, error) { + req, out := c.GetEmailTemplateRequest(input) return out, req.Send() } -// GetSegmentImportJobsWithContext is the same as GetSegmentImportJobs with the addition of +// GetEmailTemplateWithContext is the same as GetEmailTemplate with the addition of // the ability to pass a context and additional request options. // -// See GetSegmentImportJobs for details on how to use this API operation. +// See GetEmailTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetSegmentImportJobsWithContext(ctx aws.Context, input *GetSegmentImportJobsInput, opts ...request.Option) (*GetSegmentImportJobsOutput, error) { - req, out := c.GetSegmentImportJobsRequest(input) +func (c *Pinpoint) GetEmailTemplateWithContext(ctx aws.Context, input *GetEmailTemplateInput, opts ...request.Option) (*GetEmailTemplateOutput, error) { + req, out := c.GetEmailTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSegmentVersion = "GetSegmentVersion" +const opGetEndpoint = "GetEndpoint" -// GetSegmentVersionRequest generates a "aws/request.Request" representing the -// client's request for the GetSegmentVersion operation. The "output" return +// GetEndpointRequest generates a "aws/request.Request" representing the +// client's request for the GetEndpoint operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSegmentVersion for more information on using the GetSegmentVersion +// See GetEndpoint for more information on using the GetEndpoint // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSegmentVersionRequest method. -// req, resp := client.GetSegmentVersionRequest(params) +// // Example sending a request using the GetEndpointRequest method. +// req, resp := client.GetEndpointRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentVersion -func (c *Pinpoint) GetSegmentVersionRequest(input *GetSegmentVersionInput) (req *request.Request, output *GetSegmentVersionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEndpoint +func (c *Pinpoint) GetEndpointRequest(input *GetEndpointInput) (req *request.Request, output *GetEndpointOutput) { op := &request.Operation{ - Name: opGetSegmentVersion, + Name: opGetEndpoint, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}/versions/{version}", + HTTPPath: "/v1/apps/{application-id}/endpoints/{endpoint-id}", } if input == nil { - input = &GetSegmentVersionInput{} + input = &GetEndpointInput{} } - output = &GetSegmentVersionOutput{} + output = &GetEndpointOutput{} req = c.newRequest(op, input, output) return } -// GetSegmentVersion API operation for Amazon Pinpoint. +// GetEndpoint API operation for Amazon Pinpoint. // -// Returns information about a segment version. +// Retrieves information about the settings and attributes of a specific endpoint +// for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetSegmentVersion for usage and error information. +// API operation GetEndpoint for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentVersion -func (c *Pinpoint) GetSegmentVersion(input *GetSegmentVersionInput) (*GetSegmentVersionOutput, error) { - req, out := c.GetSegmentVersionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEndpoint +func (c *Pinpoint) GetEndpoint(input *GetEndpointInput) (*GetEndpointOutput, error) { + req, out := c.GetEndpointRequest(input) return out, req.Send() } -// GetSegmentVersionWithContext is the same as GetSegmentVersion with the addition of +// GetEndpointWithContext is the same as GetEndpoint with the addition of // the ability to pass a context and additional request options. // -// See GetSegmentVersion for details on how to use this API operation. +// See GetEndpoint for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetSegmentVersionWithContext(ctx aws.Context, input *GetSegmentVersionInput, opts ...request.Option) (*GetSegmentVersionOutput, error) { - req, out := c.GetSegmentVersionRequest(input) +func (c *Pinpoint) GetEndpointWithContext(ctx aws.Context, input *GetEndpointInput, opts ...request.Option) (*GetEndpointOutput, error) { + req, out := c.GetEndpointRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSegmentVersions = "GetSegmentVersions" +const opGetEventStream = "GetEventStream" -// GetSegmentVersionsRequest generates a "aws/request.Request" representing the -// client's request for the GetSegmentVersions operation. The "output" return +// GetEventStreamRequest generates a "aws/request.Request" representing the +// client's request for the GetEventStream operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSegmentVersions for more information on using the GetSegmentVersions +// See GetEventStream for more information on using the GetEventStream // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSegmentVersionsRequest method. -// req, resp := client.GetSegmentVersionsRequest(params) +// // Example sending a request using the GetEventStreamRequest method. +// req, resp := client.GetEventStreamRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentVersions -func (c *Pinpoint) GetSegmentVersionsRequest(input *GetSegmentVersionsInput) (req *request.Request, output *GetSegmentVersionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEventStream +func (c *Pinpoint) GetEventStreamRequest(input *GetEventStreamInput) (req *request.Request, output *GetEventStreamOutput) { op := &request.Operation{ - Name: opGetSegmentVersions, + Name: opGetEventStream, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}/versions", + HTTPPath: "/v1/apps/{application-id}/eventstream", } if input == nil { - input = &GetSegmentVersionsInput{} + input = &GetEventStreamInput{} } - output = &GetSegmentVersionsOutput{} + output = &GetEventStreamOutput{} req = c.newRequest(op, input, output) return } -// GetSegmentVersions API operation for Amazon Pinpoint. +// GetEventStream API operation for Amazon Pinpoint. // -// Returns information about your segment versions. +// Retrieves information about the event stream settings for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetSegmentVersions for usage and error information. +// API operation GetEventStream for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentVersions -func (c *Pinpoint) GetSegmentVersions(input *GetSegmentVersionsInput) (*GetSegmentVersionsOutput, error) { - req, out := c.GetSegmentVersionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetEventStream +func (c *Pinpoint) GetEventStream(input *GetEventStreamInput) (*GetEventStreamOutput, error) { + req, out := c.GetEventStreamRequest(input) return out, req.Send() } -// GetSegmentVersionsWithContext is the same as GetSegmentVersions with the addition of +// GetEventStreamWithContext is the same as GetEventStream with the addition of // the ability to pass a context and additional request options. // -// See GetSegmentVersions for details on how to use this API operation. +// See GetEventStream for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetSegmentVersionsWithContext(ctx aws.Context, input *GetSegmentVersionsInput, opts ...request.Option) (*GetSegmentVersionsOutput, error) { - req, out := c.GetSegmentVersionsRequest(input) +func (c *Pinpoint) GetEventStreamWithContext(ctx aws.Context, input *GetEventStreamInput, opts ...request.Option) (*GetEventStreamOutput, error) { + req, out := c.GetEventStreamRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSegments = "GetSegments" +const opGetExportJob = "GetExportJob" -// GetSegmentsRequest generates a "aws/request.Request" representing the -// client's request for the GetSegments operation. The "output" return -// value will be populated with the request's response once the request completes +// GetExportJobRequest generates a "aws/request.Request" representing the +// client's request for the GetExportJob operation. The "output" return +// value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSegments for more information on using the GetSegments +// See GetExportJob for more information on using the GetExportJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSegmentsRequest method. -// req, resp := client.GetSegmentsRequest(params) +// // Example sending a request using the GetExportJobRequest method. +// req, resp := client.GetExportJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegments -func (c *Pinpoint) GetSegmentsRequest(input *GetSegmentsInput) (req *request.Request, output *GetSegmentsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetExportJob +func (c *Pinpoint) GetExportJobRequest(input *GetExportJobInput) (req *request.Request, output *GetExportJobOutput) { op := &request.Operation{ - Name: opGetSegments, + Name: opGetExportJob, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/segments", + HTTPPath: "/v1/apps/{application-id}/jobs/export/{job-id}", } if input == nil { - input = &GetSegmentsInput{} + input = &GetExportJobInput{} } - output = &GetSegmentsOutput{} + output = &GetExportJobOutput{} req = c.newRequest(op, input, output) return } -// GetSegments API operation for Amazon Pinpoint. +// GetExportJob API operation for Amazon Pinpoint. // -// Used to get information about your segments. +// Retrieves information about the status and settings of a specific export +// job for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetSegments for usage and error information. +// API operation GetExportJob for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegments -func (c *Pinpoint) GetSegments(input *GetSegmentsInput) (*GetSegmentsOutput, error) { - req, out := c.GetSegmentsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetExportJob +func (c *Pinpoint) GetExportJob(input *GetExportJobInput) (*GetExportJobOutput, error) { + req, out := c.GetExportJobRequest(input) return out, req.Send() } -// GetSegmentsWithContext is the same as GetSegments with the addition of +// GetExportJobWithContext is the same as GetExportJob with the addition of // the ability to pass a context and additional request options. // -// See GetSegments for details on how to use this API operation. +// See GetExportJob for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetSegmentsWithContext(ctx aws.Context, input *GetSegmentsInput, opts ...request.Option) (*GetSegmentsOutput, error) { - req, out := c.GetSegmentsRequest(input) +func (c *Pinpoint) GetExportJobWithContext(ctx aws.Context, input *GetExportJobInput, opts ...request.Option) (*GetExportJobOutput, error) { + req, out := c.GetExportJobRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSmsChannel = "GetSmsChannel" +const opGetExportJobs = "GetExportJobs" -// GetSmsChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetSmsChannel operation. The "output" return +// GetExportJobsRequest generates a "aws/request.Request" representing the +// client's request for the GetExportJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSmsChannel for more information on using the GetSmsChannel +// See GetExportJobs for more information on using the GetExportJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSmsChannelRequest method. -// req, resp := client.GetSmsChannelRequest(params) +// // Example sending a request using the GetExportJobsRequest method. +// req, resp := client.GetExportJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSmsChannel -func (c *Pinpoint) GetSmsChannelRequest(input *GetSmsChannelInput) (req *request.Request, output *GetSmsChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetExportJobs +func (c *Pinpoint) GetExportJobsRequest(input *GetExportJobsInput) (req *request.Request, output *GetExportJobsOutput) { op := &request.Operation{ - Name: opGetSmsChannel, + Name: opGetExportJobs, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/sms", + HTTPPath: "/v1/apps/{application-id}/jobs/export", } if input == nil { - input = &GetSmsChannelInput{} + input = &GetExportJobsInput{} } - output = &GetSmsChannelOutput{} + output = &GetExportJobsOutput{} req = c.newRequest(op, input, output) return } -// GetSmsChannel API operation for Amazon Pinpoint. +// GetExportJobs API operation for Amazon Pinpoint. // -// Get an SMS channel. +// Retrieves information about the status and settings of all the export jobs +// for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetSmsChannel for usage and error information. +// API operation GetExportJobs for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSmsChannel -func (c *Pinpoint) GetSmsChannel(input *GetSmsChannelInput) (*GetSmsChannelOutput, error) { - req, out := c.GetSmsChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetExportJobs +func (c *Pinpoint) GetExportJobs(input *GetExportJobsInput) (*GetExportJobsOutput, error) { + req, out := c.GetExportJobsRequest(input) return out, req.Send() } -// GetSmsChannelWithContext is the same as GetSmsChannel with the addition of +// GetExportJobsWithContext is the same as GetExportJobs with the addition of // the ability to pass a context and additional request options. // -// See GetSmsChannel for details on how to use this API operation. +// See GetExportJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetSmsChannelWithContext(ctx aws.Context, input *GetSmsChannelInput, opts ...request.Option) (*GetSmsChannelOutput, error) { - req, out := c.GetSmsChannelRequest(input) +func (c *Pinpoint) GetExportJobsWithContext(ctx aws.Context, input *GetExportJobsInput, opts ...request.Option) (*GetExportJobsOutput, error) { + req, out := c.GetExportJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetUserEndpoints = "GetUserEndpoints" +const opGetGcmChannel = "GetGcmChannel" -// GetUserEndpointsRequest generates a "aws/request.Request" representing the -// client's request for the GetUserEndpoints operation. The "output" return +// GetGcmChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetGcmChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetUserEndpoints for more information on using the GetUserEndpoints +// See GetGcmChannel for more information on using the GetGcmChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetUserEndpointsRequest method. -// req, resp := client.GetUserEndpointsRequest(params) +// // Example sending a request using the GetGcmChannelRequest method. +// req, resp := client.GetGcmChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetUserEndpoints -func (c *Pinpoint) GetUserEndpointsRequest(input *GetUserEndpointsInput) (req *request.Request, output *GetUserEndpointsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetGcmChannel +func (c *Pinpoint) GetGcmChannelRequest(input *GetGcmChannelInput) (req *request.Request, output *GetGcmChannelOutput) { op := &request.Operation{ - Name: opGetUserEndpoints, + Name: opGetGcmChannel, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/users/{user-id}", + HTTPPath: "/v1/apps/{application-id}/channels/gcm", } if input == nil { - input = &GetUserEndpointsInput{} + input = &GetGcmChannelInput{} } - output = &GetUserEndpointsOutput{} + output = &GetGcmChannelOutput{} req = c.newRequest(op, input, output) return } -// GetUserEndpoints API operation for Amazon Pinpoint. +// GetGcmChannel API operation for Amazon Pinpoint. // -// Returns information about the endpoints that are associated with a User ID. +// Retrieves information about the status and settings of the GCM channel for +// an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetUserEndpoints for usage and error information. +// API operation GetGcmChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetUserEndpoints -func (c *Pinpoint) GetUserEndpoints(input *GetUserEndpointsInput) (*GetUserEndpointsOutput, error) { - req, out := c.GetUserEndpointsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetGcmChannel +func (c *Pinpoint) GetGcmChannel(input *GetGcmChannelInput) (*GetGcmChannelOutput, error) { + req, out := c.GetGcmChannelRequest(input) return out, req.Send() } -// GetUserEndpointsWithContext is the same as GetUserEndpoints with the addition of +// GetGcmChannelWithContext is the same as GetGcmChannel with the addition of // the ability to pass a context and additional request options. // -// See GetUserEndpoints for details on how to use this API operation. +// See GetGcmChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetUserEndpointsWithContext(ctx aws.Context, input *GetUserEndpointsInput, opts ...request.Option) (*GetUserEndpointsOutput, error) { - req, out := c.GetUserEndpointsRequest(input) +func (c *Pinpoint) GetGcmChannelWithContext(ctx aws.Context, input *GetGcmChannelInput, opts ...request.Option) (*GetGcmChannelOutput, error) { + req, out := c.GetGcmChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetVoiceChannel = "GetVoiceChannel" +const opGetImportJob = "GetImportJob" -// GetVoiceChannelRequest generates a "aws/request.Request" representing the -// client's request for the GetVoiceChannel operation. The "output" return +// GetImportJobRequest generates a "aws/request.Request" representing the +// client's request for the GetImportJob operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetVoiceChannel for more information on using the GetVoiceChannel +// See GetImportJob for more information on using the GetImportJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetVoiceChannelRequest method. -// req, resp := client.GetVoiceChannelRequest(params) +// // Example sending a request using the GetImportJobRequest method. +// req, resp := client.GetImportJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetVoiceChannel -func (c *Pinpoint) GetVoiceChannelRequest(input *GetVoiceChannelInput) (req *request.Request, output *GetVoiceChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetImportJob +func (c *Pinpoint) GetImportJobRequest(input *GetImportJobInput) (req *request.Request, output *GetImportJobOutput) { op := &request.Operation{ - Name: opGetVoiceChannel, + Name: opGetImportJob, HTTPMethod: "GET", - HTTPPath: "/v1/apps/{application-id}/channels/voice", + HTTPPath: "/v1/apps/{application-id}/jobs/import/{job-id}", } if input == nil { - input = &GetVoiceChannelInput{} + input = &GetImportJobInput{} } - output = &GetVoiceChannelOutput{} + output = &GetImportJobOutput{} req = c.newRequest(op, input, output) return } -// GetVoiceChannel API operation for Amazon Pinpoint. +// GetImportJob API operation for Amazon Pinpoint. // -// Get a Voice Channel +// Retrieves information about the status and settings of a specific import +// job for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation GetVoiceChannel for usage and error information. +// API operation GetImportJob for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetVoiceChannel -func (c *Pinpoint) GetVoiceChannel(input *GetVoiceChannelInput) (*GetVoiceChannelOutput, error) { - req, out := c.GetVoiceChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetImportJob +func (c *Pinpoint) GetImportJob(input *GetImportJobInput) (*GetImportJobOutput, error) { + req, out := c.GetImportJobRequest(input) return out, req.Send() } -// GetVoiceChannelWithContext is the same as GetVoiceChannel with the addition of +// GetImportJobWithContext is the same as GetImportJob with the addition of // the ability to pass a context and additional request options. // -// See GetVoiceChannel for details on how to use this API operation. +// See GetImportJob for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) GetVoiceChannelWithContext(ctx aws.Context, input *GetVoiceChannelInput, opts ...request.Option) (*GetVoiceChannelOutput, error) { - req, out := c.GetVoiceChannelRequest(input) +func (c *Pinpoint) GetImportJobWithContext(ctx aws.Context, input *GetImportJobInput, opts ...request.Option) (*GetImportJobOutput, error) { + req, out := c.GetImportJobRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTagsForResource = "ListTagsForResource" +const opGetImportJobs = "GetImportJobs" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// GetImportJobsRequest generates a "aws/request.Request" representing the +// client's request for the GetImportJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See GetImportJobs for more information on using the GetImportJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the GetImportJobsRequest method. +// req, resp := client.GetImportJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/ListTagsForResource -func (c *Pinpoint) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetImportJobs +func (c *Pinpoint) GetImportJobsRequest(input *GetImportJobsInput) (req *request.Request, output *GetImportJobsOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opGetImportJobs, HTTPMethod: "GET", - HTTPPath: "/v1/tags/{resource-arn}", + HTTPPath: "/v1/apps/{application-id}/jobs/import", } if input == nil { - input = &ListTagsForResourceInput{} + input = &GetImportJobsInput{} } - output = &ListTagsForResourceOutput{} + output = &GetImportJobsOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Amazon Pinpoint. +// GetImportJobs API operation for Amazon Pinpoint. // -// Get list of all tags for a given resource arn +// Retrieves information about the status and settings of all the import jobs +// for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation ListTagsForResource for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/ListTagsForResource -func (c *Pinpoint) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// API operation GetImportJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetImportJobs +func (c *Pinpoint) GetImportJobs(input *GetImportJobsInput) (*GetImportJobsOutput, error) { + req, out := c.GetImportJobsRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// GetImportJobsWithContext is the same as GetImportJobs with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See GetImportJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *Pinpoint) GetImportJobsWithContext(ctx aws.Context, input *GetImportJobsInput, opts ...request.Option) (*GetImportJobsOutput, error) { + req, out := c.GetImportJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPhoneNumberValidate = "PhoneNumberValidate" +const opGetJourney = "GetJourney" -// PhoneNumberValidateRequest generates a "aws/request.Request" representing the -// client's request for the PhoneNumberValidate operation. The "output" return +// GetJourneyRequest generates a "aws/request.Request" representing the +// client's request for the GetJourney operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PhoneNumberValidate for more information on using the PhoneNumberValidate +// See GetJourney for more information on using the GetJourney // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PhoneNumberValidateRequest method. -// req, resp := client.PhoneNumberValidateRequest(params) +// // Example sending a request using the GetJourneyRequest method. +// req, resp := client.GetJourneyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PhoneNumberValidate -func (c *Pinpoint) PhoneNumberValidateRequest(input *PhoneNumberValidateInput) (req *request.Request, output *PhoneNumberValidateOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetJourney +func (c *Pinpoint) GetJourneyRequest(input *GetJourneyInput) (req *request.Request, output *GetJourneyOutput) { op := &request.Operation{ - Name: opPhoneNumberValidate, - HTTPMethod: "POST", - HTTPPath: "/v1/phone/number/validate", + Name: opGetJourney, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/journeys/{journey-id}", } if input == nil { - input = &PhoneNumberValidateInput{} + input = &GetJourneyInput{} } - output = &PhoneNumberValidateOutput{} + output = &GetJourneyOutput{} req = c.newRequest(op, input, output) return } -// PhoneNumberValidate API operation for Amazon Pinpoint. +// GetJourney API operation for Amazon Pinpoint. // -// Returns information about the specified phone number. +// Retrieves information about the status, configuration, and other settings +// for a journey. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation PhoneNumberValidate for usage and error information. +// API operation GetJourney for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PhoneNumberValidate -func (c *Pinpoint) PhoneNumberValidate(input *PhoneNumberValidateInput) (*PhoneNumberValidateOutput, error) { - req, out := c.PhoneNumberValidateRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetJourney +func (c *Pinpoint) GetJourney(input *GetJourneyInput) (*GetJourneyOutput, error) { + req, out := c.GetJourneyRequest(input) return out, req.Send() } -// PhoneNumberValidateWithContext is the same as PhoneNumberValidate with the addition of +// GetJourneyWithContext is the same as GetJourney with the addition of // the ability to pass a context and additional request options. // -// See PhoneNumberValidate for details on how to use this API operation. +// See GetJourney for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) PhoneNumberValidateWithContext(ctx aws.Context, input *PhoneNumberValidateInput, opts ...request.Option) (*PhoneNumberValidateOutput, error) { - req, out := c.PhoneNumberValidateRequest(input) +func (c *Pinpoint) GetJourneyWithContext(ctx aws.Context, input *GetJourneyInput, opts ...request.Option) (*GetJourneyOutput, error) { + req, out := c.GetJourneyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutEventStream = "PutEventStream" +const opGetJourneyDateRangeKpi = "GetJourneyDateRangeKpi" -// PutEventStreamRequest generates a "aws/request.Request" representing the -// client's request for the PutEventStream operation. The "output" return +// GetJourneyDateRangeKpiRequest generates a "aws/request.Request" representing the +// client's request for the GetJourneyDateRangeKpi operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutEventStream for more information on using the PutEventStream +// See GetJourneyDateRangeKpi for more information on using the GetJourneyDateRangeKpi // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutEventStreamRequest method. -// req, resp := client.PutEventStreamRequest(params) +// // Example sending a request using the GetJourneyDateRangeKpiRequest method. +// req, resp := client.GetJourneyDateRangeKpiRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PutEventStream -func (c *Pinpoint) PutEventStreamRequest(input *PutEventStreamInput) (req *request.Request, output *PutEventStreamOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetJourneyDateRangeKpi +func (c *Pinpoint) GetJourneyDateRangeKpiRequest(input *GetJourneyDateRangeKpiInput) (req *request.Request, output *GetJourneyDateRangeKpiOutput) { op := &request.Operation{ - Name: opPutEventStream, - HTTPMethod: "POST", - HTTPPath: "/v1/apps/{application-id}/eventstream", + Name: opGetJourneyDateRangeKpi, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/journeys/{journey-id}/kpis/daterange/{kpi-name}", } if input == nil { - input = &PutEventStreamInput{} + input = &GetJourneyDateRangeKpiInput{} } - output = &PutEventStreamOutput{} + output = &GetJourneyDateRangeKpiOutput{} req = c.newRequest(op, input, output) return } -// PutEventStream API operation for Amazon Pinpoint. +// GetJourneyDateRangeKpi API operation for Amazon Pinpoint. // -// Use to create or update the event stream for an app. +// Retrieves (queries) pre-aggregated data for a standard engagement metric +// that applies to a journey. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation PutEventStream for usage and error information. +// API operation GetJourneyDateRangeKpi for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PutEventStream -func (c *Pinpoint) PutEventStream(input *PutEventStreamInput) (*PutEventStreamOutput, error) { - req, out := c.PutEventStreamRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetJourneyDateRangeKpi +func (c *Pinpoint) GetJourneyDateRangeKpi(input *GetJourneyDateRangeKpiInput) (*GetJourneyDateRangeKpiOutput, error) { + req, out := c.GetJourneyDateRangeKpiRequest(input) return out, req.Send() } -// PutEventStreamWithContext is the same as PutEventStream with the addition of +// GetJourneyDateRangeKpiWithContext is the same as GetJourneyDateRangeKpi with the addition of // the ability to pass a context and additional request options. // -// See PutEventStream for details on how to use this API operation. +// See GetJourneyDateRangeKpi for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) PutEventStreamWithContext(ctx aws.Context, input *PutEventStreamInput, opts ...request.Option) (*PutEventStreamOutput, error) { - req, out := c.PutEventStreamRequest(input) +func (c *Pinpoint) GetJourneyDateRangeKpiWithContext(ctx aws.Context, input *GetJourneyDateRangeKpiInput, opts ...request.Option) (*GetJourneyDateRangeKpiOutput, error) { + req, out := c.GetJourneyDateRangeKpiRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutEvents = "PutEvents" +const opGetJourneyExecutionActivityMetrics = "GetJourneyExecutionActivityMetrics" -// PutEventsRequest generates a "aws/request.Request" representing the -// client's request for the PutEvents operation. The "output" return +// GetJourneyExecutionActivityMetricsRequest generates a "aws/request.Request" representing the +// client's request for the GetJourneyExecutionActivityMetrics operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutEvents for more information on using the PutEvents +// See GetJourneyExecutionActivityMetrics for more information on using the GetJourneyExecutionActivityMetrics // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutEventsRequest method. -// req, resp := client.PutEventsRequest(params) +// // Example sending a request using the GetJourneyExecutionActivityMetricsRequest method. +// req, resp := client.GetJourneyExecutionActivityMetricsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PutEvents -func (c *Pinpoint) PutEventsRequest(input *PutEventsInput) (req *request.Request, output *PutEventsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetJourneyExecutionActivityMetrics +func (c *Pinpoint) GetJourneyExecutionActivityMetricsRequest(input *GetJourneyExecutionActivityMetricsInput) (req *request.Request, output *GetJourneyExecutionActivityMetricsOutput) { op := &request.Operation{ - Name: opPutEvents, - HTTPMethod: "POST", - HTTPPath: "/v1/apps/{application-id}/events", + Name: opGetJourneyExecutionActivityMetrics, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/journeys/{journey-id}/activities/{journey-activity-id}/execution-metrics", } if input == nil { - input = &PutEventsInput{} + input = &GetJourneyExecutionActivityMetricsInput{} } - output = &PutEventsOutput{} + output = &GetJourneyExecutionActivityMetricsOutput{} req = c.newRequest(op, input, output) return } -// PutEvents API operation for Amazon Pinpoint. +// GetJourneyExecutionActivityMetrics API operation for Amazon Pinpoint. // -// Use to record events for endpoints. This method creates events and creates -// or updates the endpoints that those events are associated with. +// Retrieves (queries) pre-aggregated data for a standard execution metric that +// applies to a journey activity. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation PutEvents for usage and error information. +// API operation GetJourneyExecutionActivityMetrics for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PutEvents -func (c *Pinpoint) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { - req, out := c.PutEventsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetJourneyExecutionActivityMetrics +func (c *Pinpoint) GetJourneyExecutionActivityMetrics(input *GetJourneyExecutionActivityMetricsInput) (*GetJourneyExecutionActivityMetricsOutput, error) { + req, out := c.GetJourneyExecutionActivityMetricsRequest(input) return out, req.Send() } -// PutEventsWithContext is the same as PutEvents with the addition of +// GetJourneyExecutionActivityMetricsWithContext is the same as GetJourneyExecutionActivityMetrics with the addition of // the ability to pass a context and additional request options. // -// See PutEvents for details on how to use this API operation. +// See GetJourneyExecutionActivityMetrics for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) PutEventsWithContext(ctx aws.Context, input *PutEventsInput, opts ...request.Option) (*PutEventsOutput, error) { - req, out := c.PutEventsRequest(input) +func (c *Pinpoint) GetJourneyExecutionActivityMetricsWithContext(ctx aws.Context, input *GetJourneyExecutionActivityMetricsInput, opts ...request.Option) (*GetJourneyExecutionActivityMetricsOutput, error) { + req, out := c.GetJourneyExecutionActivityMetricsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRemoveAttributes = "RemoveAttributes" +const opGetJourneyExecutionMetrics = "GetJourneyExecutionMetrics" -// RemoveAttributesRequest generates a "aws/request.Request" representing the -// client's request for the RemoveAttributes operation. The "output" return +// GetJourneyExecutionMetricsRequest generates a "aws/request.Request" representing the +// client's request for the GetJourneyExecutionMetrics operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RemoveAttributes for more information on using the RemoveAttributes +// See GetJourneyExecutionMetrics for more information on using the GetJourneyExecutionMetrics // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RemoveAttributesRequest method. -// req, resp := client.RemoveAttributesRequest(params) +// // Example sending a request using the GetJourneyExecutionMetricsRequest method. +// req, resp := client.GetJourneyExecutionMetricsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/RemoveAttributes -func (c *Pinpoint) RemoveAttributesRequest(input *RemoveAttributesInput) (req *request.Request, output *RemoveAttributesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetJourneyExecutionMetrics +func (c *Pinpoint) GetJourneyExecutionMetricsRequest(input *GetJourneyExecutionMetricsInput) (req *request.Request, output *GetJourneyExecutionMetricsOutput) { op := &request.Operation{ - Name: opRemoveAttributes, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/attributes/{attribute-type}", + Name: opGetJourneyExecutionMetrics, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/journeys/{journey-id}/execution-metrics", } if input == nil { - input = &RemoveAttributesInput{} + input = &GetJourneyExecutionMetricsInput{} } - output = &RemoveAttributesOutput{} + output = &GetJourneyExecutionMetricsOutput{} req = c.newRequest(op, input, output) return } -// RemoveAttributes API operation for Amazon Pinpoint. +// GetJourneyExecutionMetrics API operation for Amazon Pinpoint. // -// Used to remove the attributes for an app +// Retrieves (queries) pre-aggregated data for a standard execution metric that +// applies to a journey. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation RemoveAttributes for usage and error information. +// API operation GetJourneyExecutionMetrics for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/RemoveAttributes -func (c *Pinpoint) RemoveAttributes(input *RemoveAttributesInput) (*RemoveAttributesOutput, error) { - req, out := c.RemoveAttributesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetJourneyExecutionMetrics +func (c *Pinpoint) GetJourneyExecutionMetrics(input *GetJourneyExecutionMetricsInput) (*GetJourneyExecutionMetricsOutput, error) { + req, out := c.GetJourneyExecutionMetricsRequest(input) return out, req.Send() } -// RemoveAttributesWithContext is the same as RemoveAttributes with the addition of +// GetJourneyExecutionMetricsWithContext is the same as GetJourneyExecutionMetrics with the addition of // the ability to pass a context and additional request options. // -// See RemoveAttributes for details on how to use this API operation. +// See GetJourneyExecutionMetrics for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) RemoveAttributesWithContext(ctx aws.Context, input *RemoveAttributesInput, opts ...request.Option) (*RemoveAttributesOutput, error) { - req, out := c.RemoveAttributesRequest(input) +func (c *Pinpoint) GetJourneyExecutionMetricsWithContext(ctx aws.Context, input *GetJourneyExecutionMetricsInput, opts ...request.Option) (*GetJourneyExecutionMetricsOutput, error) { + req, out := c.GetJourneyExecutionMetricsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opSendMessages = "SendMessages" +const opGetPushTemplate = "GetPushTemplate" -// SendMessagesRequest generates a "aws/request.Request" representing the -// client's request for the SendMessages operation. The "output" return +// GetPushTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetPushTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SendMessages for more information on using the SendMessages +// See GetPushTemplate for more information on using the GetPushTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SendMessagesRequest method. -// req, resp := client.SendMessagesRequest(params) +// // Example sending a request using the GetPushTemplateRequest method. +// req, resp := client.GetPushTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/SendMessages -func (c *Pinpoint) SendMessagesRequest(input *SendMessagesInput) (req *request.Request, output *SendMessagesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetPushTemplate +func (c *Pinpoint) GetPushTemplateRequest(input *GetPushTemplateInput) (req *request.Request, output *GetPushTemplateOutput) { op := &request.Operation{ - Name: opSendMessages, - HTTPMethod: "POST", - HTTPPath: "/v1/apps/{application-id}/messages", + Name: opGetPushTemplate, + HTTPMethod: "GET", + HTTPPath: "/v1/templates/{template-name}/push", } if input == nil { - input = &SendMessagesInput{} + input = &GetPushTemplateInput{} } - output = &SendMessagesOutput{} + output = &GetPushTemplateOutput{} req = c.newRequest(op, input, output) return } -// SendMessages API operation for Amazon Pinpoint. +// GetPushTemplate API operation for Amazon Pinpoint. // -// Used to send a direct message. +// Retrieves the content and settings for a message template that you can use +// in messages that are sent through a push notification channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation SendMessages for usage and error information. +// API operation GetPushTemplate for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/SendMessages -func (c *Pinpoint) SendMessages(input *SendMessagesInput) (*SendMessagesOutput, error) { - req, out := c.SendMessagesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetPushTemplate +func (c *Pinpoint) GetPushTemplate(input *GetPushTemplateInput) (*GetPushTemplateOutput, error) { + req, out := c.GetPushTemplateRequest(input) return out, req.Send() } -// SendMessagesWithContext is the same as SendMessages with the addition of +// GetPushTemplateWithContext is the same as GetPushTemplate with the addition of // the ability to pass a context and additional request options. // -// See SendMessages for details on how to use this API operation. +// See GetPushTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) SendMessagesWithContext(ctx aws.Context, input *SendMessagesInput, opts ...request.Option) (*SendMessagesOutput, error) { - req, out := c.SendMessagesRequest(input) +func (c *Pinpoint) GetPushTemplateWithContext(ctx aws.Context, input *GetPushTemplateInput, opts ...request.Option) (*GetPushTemplateOutput, error) { + req, out := c.GetPushTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opSendUsersMessages = "SendUsersMessages" +const opGetSegment = "GetSegment" -// SendUsersMessagesRequest generates a "aws/request.Request" representing the -// client's request for the SendUsersMessages operation. The "output" return +// GetSegmentRequest generates a "aws/request.Request" representing the +// client's request for the GetSegment operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SendUsersMessages for more information on using the SendUsersMessages +// See GetSegment for more information on using the GetSegment // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SendUsersMessagesRequest method. -// req, resp := client.SendUsersMessagesRequest(params) +// // Example sending a request using the GetSegmentRequest method. +// req, resp := client.GetSegmentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/SendUsersMessages -func (c *Pinpoint) SendUsersMessagesRequest(input *SendUsersMessagesInput) (req *request.Request, output *SendUsersMessagesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegment +func (c *Pinpoint) GetSegmentRequest(input *GetSegmentInput) (req *request.Request, output *GetSegmentOutput) { op := &request.Operation{ - Name: opSendUsersMessages, - HTTPMethod: "POST", - HTTPPath: "/v1/apps/{application-id}/users-messages", + Name: opGetSegment, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}", } if input == nil { - input = &SendUsersMessagesInput{} + input = &GetSegmentInput{} } - output = &SendUsersMessagesOutput{} + output = &GetSegmentOutput{} req = c.newRequest(op, input, output) return } -// SendUsersMessages API operation for Amazon Pinpoint. +// GetSegment API operation for Amazon Pinpoint. // -// Used to send a message to a list of users. +// Retrieves information about the configuration, dimension, and other settings +// for a specific segment that's associated with an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation SendUsersMessages for usage and error information. +// API operation GetSegment for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/SendUsersMessages -func (c *Pinpoint) SendUsersMessages(input *SendUsersMessagesInput) (*SendUsersMessagesOutput, error) { - req, out := c.SendUsersMessagesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegment +func (c *Pinpoint) GetSegment(input *GetSegmentInput) (*GetSegmentOutput, error) { + req, out := c.GetSegmentRequest(input) return out, req.Send() } -// SendUsersMessagesWithContext is the same as SendUsersMessages with the addition of +// GetSegmentWithContext is the same as GetSegment with the addition of // the ability to pass a context and additional request options. // -// See SendUsersMessages for details on how to use this API operation. +// See GetSegment for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) SendUsersMessagesWithContext(ctx aws.Context, input *SendUsersMessagesInput, opts ...request.Option) (*SendUsersMessagesOutput, error) { - req, out := c.SendUsersMessagesRequest(input) +func (c *Pinpoint) GetSegmentWithContext(ctx aws.Context, input *GetSegmentInput, opts ...request.Option) (*GetSegmentOutput, error) { + req, out := c.GetSegmentRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opGetSegmentExportJobs = "GetSegmentExportJobs" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// GetSegmentExportJobsRequest generates a "aws/request.Request" representing the +// client's request for the GetSegmentExportJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See GetSegmentExportJobs for more information on using the GetSegmentExportJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the GetSegmentExportJobsRequest method. +// req, resp := client.GetSegmentExportJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/TagResource -func (c *Pinpoint) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentExportJobs +func (c *Pinpoint) GetSegmentExportJobsRequest(input *GetSegmentExportJobsInput) (req *request.Request, output *GetSegmentExportJobsOutput) { op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/v1/tags/{resource-arn}", + Name: opGetSegmentExportJobs, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}/jobs/export", } if input == nil { - input = &TagResourceInput{} + input = &GetSegmentExportJobsInput{} } - output = &TagResourceOutput{} + output = &GetSegmentExportJobsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for Amazon Pinpoint. +// GetSegmentExportJobs API operation for Amazon Pinpoint. // -// Adds tags to a resource. +// Retrieves information about the status and settings of the export jobs for +// a segment. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation TagResource for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/TagResource -func (c *Pinpoint) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// API operation GetSegmentExportJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentExportJobs +func (c *Pinpoint) GetSegmentExportJobs(input *GetSegmentExportJobsInput) (*GetSegmentExportJobsOutput, error) { + req, out := c.GetSegmentExportJobsRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// GetSegmentExportJobsWithContext is the same as GetSegmentExportJobs with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See GetSegmentExportJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *Pinpoint) GetSegmentExportJobsWithContext(ctx aws.Context, input *GetSegmentExportJobsInput, opts ...request.Option) (*GetSegmentExportJobsOutput, error) { + req, out := c.GetSegmentExportJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +const opGetSegmentImportJobs = "GetSegmentImportJobs" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// GetSegmentImportJobsRequest generates a "aws/request.Request" representing the +// client's request for the GetSegmentImportJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See GetSegmentImportJobs for more information on using the GetSegmentImportJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the GetSegmentImportJobsRequest method. +// req, resp := client.GetSegmentImportJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UntagResource -func (c *Pinpoint) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentImportJobs +func (c *Pinpoint) GetSegmentImportJobsRequest(input *GetSegmentImportJobsInput) (req *request.Request, output *GetSegmentImportJobsOutput) { op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "DELETE", - HTTPPath: "/v1/tags/{resource-arn}", + Name: opGetSegmentImportJobs, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}/jobs/import", } if input == nil { - input = &UntagResourceInput{} + input = &GetSegmentImportJobsInput{} } - output = &UntagResourceOutput{} + output = &GetSegmentImportJobsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for Amazon Pinpoint. +// GetSegmentImportJobs API operation for Amazon Pinpoint. // -// Remove tags from a resource. +// Retrieves information about the status and settings of the import jobs for +// a segment. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UntagResource for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UntagResource -func (c *Pinpoint) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// API operation GetSegmentImportJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentImportJobs +func (c *Pinpoint) GetSegmentImportJobs(input *GetSegmentImportJobsInput) (*GetSegmentImportJobsOutput, error) { + req, out := c.GetSegmentImportJobsRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// GetSegmentImportJobsWithContext is the same as GetSegmentImportJobs with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See GetSegmentImportJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *Pinpoint) GetSegmentImportJobsWithContext(ctx aws.Context, input *GetSegmentImportJobsInput, opts ...request.Option) (*GetSegmentImportJobsOutput, error) { + req, out := c.GetSegmentImportJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateAdmChannel = "UpdateAdmChannel" +const opGetSegmentVersion = "GetSegmentVersion" -// UpdateAdmChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateAdmChannel operation. The "output" return +// GetSegmentVersionRequest generates a "aws/request.Request" representing the +// client's request for the GetSegmentVersion operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateAdmChannel for more information on using the UpdateAdmChannel +// See GetSegmentVersion for more information on using the GetSegmentVersion // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateAdmChannelRequest method. -// req, resp := client.UpdateAdmChannelRequest(params) +// // Example sending a request using the GetSegmentVersionRequest method. +// req, resp := client.GetSegmentVersionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateAdmChannel -func (c *Pinpoint) UpdateAdmChannelRequest(input *UpdateAdmChannelInput) (req *request.Request, output *UpdateAdmChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentVersion +func (c *Pinpoint) GetSegmentVersionRequest(input *GetSegmentVersionInput) (req *request.Request, output *GetSegmentVersionOutput) { op := &request.Operation{ - Name: opUpdateAdmChannel, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/adm", + Name: opGetSegmentVersion, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}/versions/{version}", } if input == nil { - input = &UpdateAdmChannelInput{} + input = &GetSegmentVersionInput{} } - output = &UpdateAdmChannelOutput{} + output = &GetSegmentVersionOutput{} req = c.newRequest(op, input, output) return } -// UpdateAdmChannel API operation for Amazon Pinpoint. +// GetSegmentVersion API operation for Amazon Pinpoint. // -// Update an ADM channel. +// Retrieves information about the configuration, dimension, and other settings +// for a specific version of a segment that's associated with an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateAdmChannel for usage and error information. +// API operation GetSegmentVersion for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateAdmChannel -func (c *Pinpoint) UpdateAdmChannel(input *UpdateAdmChannelInput) (*UpdateAdmChannelOutput, error) { - req, out := c.UpdateAdmChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentVersion +func (c *Pinpoint) GetSegmentVersion(input *GetSegmentVersionInput) (*GetSegmentVersionOutput, error) { + req, out := c.GetSegmentVersionRequest(input) return out, req.Send() } -// UpdateAdmChannelWithContext is the same as UpdateAdmChannel with the addition of +// GetSegmentVersionWithContext is the same as GetSegmentVersion with the addition of // the ability to pass a context and additional request options. // -// See UpdateAdmChannel for details on how to use this API operation. +// See GetSegmentVersion for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateAdmChannelWithContext(ctx aws.Context, input *UpdateAdmChannelInput, opts ...request.Option) (*UpdateAdmChannelOutput, error) { - req, out := c.UpdateAdmChannelRequest(input) +func (c *Pinpoint) GetSegmentVersionWithContext(ctx aws.Context, input *GetSegmentVersionInput, opts ...request.Option) (*GetSegmentVersionOutput, error) { + req, out := c.GetSegmentVersionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateApnsChannel = "UpdateApnsChannel" +const opGetSegmentVersions = "GetSegmentVersions" -// UpdateApnsChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateApnsChannel operation. The "output" return +// GetSegmentVersionsRequest generates a "aws/request.Request" representing the +// client's request for the GetSegmentVersions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateApnsChannel for more information on using the UpdateApnsChannel +// See GetSegmentVersions for more information on using the GetSegmentVersions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateApnsChannelRequest method. -// req, resp := client.UpdateApnsChannelRequest(params) +// // Example sending a request using the GetSegmentVersionsRequest method. +// req, resp := client.GetSegmentVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsChannel -func (c *Pinpoint) UpdateApnsChannelRequest(input *UpdateApnsChannelInput) (req *request.Request, output *UpdateApnsChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentVersions +func (c *Pinpoint) GetSegmentVersionsRequest(input *GetSegmentVersionsInput) (req *request.Request, output *GetSegmentVersionsOutput) { op := &request.Operation{ - Name: opUpdateApnsChannel, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/apns", + Name: opGetSegmentVersions, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}/versions", } if input == nil { - input = &UpdateApnsChannelInput{} + input = &GetSegmentVersionsInput{} } - output = &UpdateApnsChannelOutput{} + output = &GetSegmentVersionsOutput{} req = c.newRequest(op, input, output) return } -// UpdateApnsChannel API operation for Amazon Pinpoint. +// GetSegmentVersions API operation for Amazon Pinpoint. // -// Use to update the APNs channel for an app. +// Retrieves information about the configuration, dimension, and other settings +// for all versions of a specific segment that's associated with an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateApnsChannel for usage and error information. +// API operation GetSegmentVersions for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsChannel -func (c *Pinpoint) UpdateApnsChannel(input *UpdateApnsChannelInput) (*UpdateApnsChannelOutput, error) { - req, out := c.UpdateApnsChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegmentVersions +func (c *Pinpoint) GetSegmentVersions(input *GetSegmentVersionsInput) (*GetSegmentVersionsOutput, error) { + req, out := c.GetSegmentVersionsRequest(input) return out, req.Send() } -// UpdateApnsChannelWithContext is the same as UpdateApnsChannel with the addition of +// GetSegmentVersionsWithContext is the same as GetSegmentVersions with the addition of // the ability to pass a context and additional request options. // -// See UpdateApnsChannel for details on how to use this API operation. +// See GetSegmentVersions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateApnsChannelWithContext(ctx aws.Context, input *UpdateApnsChannelInput, opts ...request.Option) (*UpdateApnsChannelOutput, error) { - req, out := c.UpdateApnsChannelRequest(input) +func (c *Pinpoint) GetSegmentVersionsWithContext(ctx aws.Context, input *GetSegmentVersionsInput, opts ...request.Option) (*GetSegmentVersionsOutput, error) { + req, out := c.GetSegmentVersionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateApnsSandboxChannel = "UpdateApnsSandboxChannel" +const opGetSegments = "GetSegments" -// UpdateApnsSandboxChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateApnsSandboxChannel operation. The "output" return +// GetSegmentsRequest generates a "aws/request.Request" representing the +// client's request for the GetSegments operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateApnsSandboxChannel for more information on using the UpdateApnsSandboxChannel +// See GetSegments for more information on using the GetSegments // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateApnsSandboxChannelRequest method. -// req, resp := client.UpdateApnsSandboxChannelRequest(params) +// // Example sending a request using the GetSegmentsRequest method. +// req, resp := client.GetSegmentsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsSandboxChannel -func (c *Pinpoint) UpdateApnsSandboxChannelRequest(input *UpdateApnsSandboxChannelInput) (req *request.Request, output *UpdateApnsSandboxChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegments +func (c *Pinpoint) GetSegmentsRequest(input *GetSegmentsInput) (req *request.Request, output *GetSegmentsOutput) { op := &request.Operation{ - Name: opUpdateApnsSandboxChannel, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/apns_sandbox", + Name: opGetSegments, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/segments", } if input == nil { - input = &UpdateApnsSandboxChannelInput{} + input = &GetSegmentsInput{} } - output = &UpdateApnsSandboxChannelOutput{} + output = &GetSegmentsOutput{} req = c.newRequest(op, input, output) return } -// UpdateApnsSandboxChannel API operation for Amazon Pinpoint. +// GetSegments API operation for Amazon Pinpoint. // -// Update an APNS sandbox channel. +// Retrieves information about the configuration, dimension, and other settings +// for all the segments that are associated with an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateApnsSandboxChannel for usage and error information. +// API operation GetSegments for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsSandboxChannel -func (c *Pinpoint) UpdateApnsSandboxChannel(input *UpdateApnsSandboxChannelInput) (*UpdateApnsSandboxChannelOutput, error) { - req, out := c.UpdateApnsSandboxChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSegments +func (c *Pinpoint) GetSegments(input *GetSegmentsInput) (*GetSegmentsOutput, error) { + req, out := c.GetSegmentsRequest(input) return out, req.Send() } -// UpdateApnsSandboxChannelWithContext is the same as UpdateApnsSandboxChannel with the addition of +// GetSegmentsWithContext is the same as GetSegments with the addition of // the ability to pass a context and additional request options. // -// See UpdateApnsSandboxChannel for details on how to use this API operation. +// See GetSegments for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateApnsSandboxChannelWithContext(ctx aws.Context, input *UpdateApnsSandboxChannelInput, opts ...request.Option) (*UpdateApnsSandboxChannelOutput, error) { - req, out := c.UpdateApnsSandboxChannelRequest(input) +func (c *Pinpoint) GetSegmentsWithContext(ctx aws.Context, input *GetSegmentsInput, opts ...request.Option) (*GetSegmentsOutput, error) { + req, out := c.GetSegmentsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateApnsVoipChannel = "UpdateApnsVoipChannel" +const opGetSmsChannel = "GetSmsChannel" -// UpdateApnsVoipChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateApnsVoipChannel operation. The "output" return +// GetSmsChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetSmsChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateApnsVoipChannel for more information on using the UpdateApnsVoipChannel +// See GetSmsChannel for more information on using the GetSmsChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateApnsVoipChannelRequest method. -// req, resp := client.UpdateApnsVoipChannelRequest(params) +// // Example sending a request using the GetSmsChannelRequest method. +// req, resp := client.GetSmsChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsVoipChannel -func (c *Pinpoint) UpdateApnsVoipChannelRequest(input *UpdateApnsVoipChannelInput) (req *request.Request, output *UpdateApnsVoipChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSmsChannel +func (c *Pinpoint) GetSmsChannelRequest(input *GetSmsChannelInput) (req *request.Request, output *GetSmsChannelOutput) { op := &request.Operation{ - Name: opUpdateApnsVoipChannel, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/apns_voip", + Name: opGetSmsChannel, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/channels/sms", } if input == nil { - input = &UpdateApnsVoipChannelInput{} + input = &GetSmsChannelInput{} } - output = &UpdateApnsVoipChannelOutput{} + output = &GetSmsChannelOutput{} req = c.newRequest(op, input, output) return } -// UpdateApnsVoipChannel API operation for Amazon Pinpoint. +// GetSmsChannel API operation for Amazon Pinpoint. // -// Update an APNS VoIP channel +// Retrieves information about the status and settings of the SMS channel for +// an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateApnsVoipChannel for usage and error information. +// API operation GetSmsChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsVoipChannel -func (c *Pinpoint) UpdateApnsVoipChannel(input *UpdateApnsVoipChannelInput) (*UpdateApnsVoipChannelOutput, error) { - req, out := c.UpdateApnsVoipChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSmsChannel +func (c *Pinpoint) GetSmsChannel(input *GetSmsChannelInput) (*GetSmsChannelOutput, error) { + req, out := c.GetSmsChannelRequest(input) return out, req.Send() } -// UpdateApnsVoipChannelWithContext is the same as UpdateApnsVoipChannel with the addition of +// GetSmsChannelWithContext is the same as GetSmsChannel with the addition of // the ability to pass a context and additional request options. // -// See UpdateApnsVoipChannel for details on how to use this API operation. +// See GetSmsChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateApnsVoipChannelWithContext(ctx aws.Context, input *UpdateApnsVoipChannelInput, opts ...request.Option) (*UpdateApnsVoipChannelOutput, error) { - req, out := c.UpdateApnsVoipChannelRequest(input) +func (c *Pinpoint) GetSmsChannelWithContext(ctx aws.Context, input *GetSmsChannelInput, opts ...request.Option) (*GetSmsChannelOutput, error) { + req, out := c.GetSmsChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateApnsVoipSandboxChannel = "UpdateApnsVoipSandboxChannel" +const opGetSmsTemplate = "GetSmsTemplate" -// UpdateApnsVoipSandboxChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateApnsVoipSandboxChannel operation. The "output" return +// GetSmsTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetSmsTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateApnsVoipSandboxChannel for more information on using the UpdateApnsVoipSandboxChannel +// See GetSmsTemplate for more information on using the GetSmsTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateApnsVoipSandboxChannelRequest method. -// req, resp := client.UpdateApnsVoipSandboxChannelRequest(params) +// // Example sending a request using the GetSmsTemplateRequest method. +// req, resp := client.GetSmsTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsVoipSandboxChannel -func (c *Pinpoint) UpdateApnsVoipSandboxChannelRequest(input *UpdateApnsVoipSandboxChannelInput) (req *request.Request, output *UpdateApnsVoipSandboxChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSmsTemplate +func (c *Pinpoint) GetSmsTemplateRequest(input *GetSmsTemplateInput) (req *request.Request, output *GetSmsTemplateOutput) { op := &request.Operation{ - Name: opUpdateApnsVoipSandboxChannel, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/apns_voip_sandbox", + Name: opGetSmsTemplate, + HTTPMethod: "GET", + HTTPPath: "/v1/templates/{template-name}/sms", } if input == nil { - input = &UpdateApnsVoipSandboxChannelInput{} + input = &GetSmsTemplateInput{} } - output = &UpdateApnsVoipSandboxChannelOutput{} + output = &GetSmsTemplateOutput{} req = c.newRequest(op, input, output) return } -// UpdateApnsVoipSandboxChannel API operation for Amazon Pinpoint. +// GetSmsTemplate API operation for Amazon Pinpoint. // -// Update an APNS VoIP sandbox channel +// Retrieves the content and settings for a message template that you can use +// in messages that are sent through the SMS channel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateApnsVoipSandboxChannel for usage and error information. +// API operation GetSmsTemplate for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsVoipSandboxChannel -func (c *Pinpoint) UpdateApnsVoipSandboxChannel(input *UpdateApnsVoipSandboxChannelInput) (*UpdateApnsVoipSandboxChannelOutput, error) { - req, out := c.UpdateApnsVoipSandboxChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetSmsTemplate +func (c *Pinpoint) GetSmsTemplate(input *GetSmsTemplateInput) (*GetSmsTemplateOutput, error) { + req, out := c.GetSmsTemplateRequest(input) return out, req.Send() } -// UpdateApnsVoipSandboxChannelWithContext is the same as UpdateApnsVoipSandboxChannel with the addition of +// GetSmsTemplateWithContext is the same as GetSmsTemplate with the addition of // the ability to pass a context and additional request options. // -// See UpdateApnsVoipSandboxChannel for details on how to use this API operation. +// See GetSmsTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateApnsVoipSandboxChannelWithContext(ctx aws.Context, input *UpdateApnsVoipSandboxChannelInput, opts ...request.Option) (*UpdateApnsVoipSandboxChannelOutput, error) { - req, out := c.UpdateApnsVoipSandboxChannelRequest(input) +func (c *Pinpoint) GetSmsTemplateWithContext(ctx aws.Context, input *GetSmsTemplateInput, opts ...request.Option) (*GetSmsTemplateOutput, error) { + req, out := c.GetSmsTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateApplicationSettings = "UpdateApplicationSettings" +const opGetUserEndpoints = "GetUserEndpoints" -// UpdateApplicationSettingsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateApplicationSettings operation. The "output" return +// GetUserEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the GetUserEndpoints operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateApplicationSettings for more information on using the UpdateApplicationSettings +// See GetUserEndpoints for more information on using the GetUserEndpoints // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateApplicationSettingsRequest method. -// req, resp := client.UpdateApplicationSettingsRequest(params) +// // Example sending a request using the GetUserEndpointsRequest method. +// req, resp := client.GetUserEndpointsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApplicationSettings -func (c *Pinpoint) UpdateApplicationSettingsRequest(input *UpdateApplicationSettingsInput) (req *request.Request, output *UpdateApplicationSettingsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetUserEndpoints +func (c *Pinpoint) GetUserEndpointsRequest(input *GetUserEndpointsInput) (req *request.Request, output *GetUserEndpointsOutput) { op := &request.Operation{ - Name: opUpdateApplicationSettings, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/settings", + Name: opGetUserEndpoints, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/users/{user-id}", } if input == nil { - input = &UpdateApplicationSettingsInput{} + input = &GetUserEndpointsInput{} } - output = &UpdateApplicationSettingsOutput{} + output = &GetUserEndpointsOutput{} req = c.newRequest(op, input, output) return } -// UpdateApplicationSettings API operation for Amazon Pinpoint. +// GetUserEndpoints API operation for Amazon Pinpoint. // -// Used to update the settings for an app. +// Retrieves information about all the endpoints that are associated with a +// specific user ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateApplicationSettings for usage and error information. +// API operation GetUserEndpoints for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApplicationSettings -func (c *Pinpoint) UpdateApplicationSettings(input *UpdateApplicationSettingsInput) (*UpdateApplicationSettingsOutput, error) { - req, out := c.UpdateApplicationSettingsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetUserEndpoints +func (c *Pinpoint) GetUserEndpoints(input *GetUserEndpointsInput) (*GetUserEndpointsOutput, error) { + req, out := c.GetUserEndpointsRequest(input) return out, req.Send() } -// UpdateApplicationSettingsWithContext is the same as UpdateApplicationSettings with the addition of +// GetUserEndpointsWithContext is the same as GetUserEndpoints with the addition of // the ability to pass a context and additional request options. // -// See UpdateApplicationSettings for details on how to use this API operation. +// See GetUserEndpoints for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateApplicationSettingsWithContext(ctx aws.Context, input *UpdateApplicationSettingsInput, opts ...request.Option) (*UpdateApplicationSettingsOutput, error) { - req, out := c.UpdateApplicationSettingsRequest(input) +func (c *Pinpoint) GetUserEndpointsWithContext(ctx aws.Context, input *GetUserEndpointsInput, opts ...request.Option) (*GetUserEndpointsOutput, error) { + req, out := c.GetUserEndpointsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateBaiduChannel = "UpdateBaiduChannel" +const opGetVoiceChannel = "GetVoiceChannel" -// UpdateBaiduChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateBaiduChannel operation. The "output" return +// GetVoiceChannelRequest generates a "aws/request.Request" representing the +// client's request for the GetVoiceChannel operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateBaiduChannel for more information on using the UpdateBaiduChannel +// See GetVoiceChannel for more information on using the GetVoiceChannel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateBaiduChannelRequest method. -// req, resp := client.UpdateBaiduChannelRequest(params) +// // Example sending a request using the GetVoiceChannelRequest method. +// req, resp := client.GetVoiceChannelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateBaiduChannel -func (c *Pinpoint) UpdateBaiduChannelRequest(input *UpdateBaiduChannelInput) (req *request.Request, output *UpdateBaiduChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetVoiceChannel +func (c *Pinpoint) GetVoiceChannelRequest(input *GetVoiceChannelInput) (req *request.Request, output *GetVoiceChannelOutput) { op := &request.Operation{ - Name: opUpdateBaiduChannel, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/baidu", + Name: opGetVoiceChannel, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/channels/voice", } if input == nil { - input = &UpdateBaiduChannelInput{} + input = &GetVoiceChannelInput{} } - output = &UpdateBaiduChannelOutput{} + output = &GetVoiceChannelOutput{} req = c.newRequest(op, input, output) return } -// UpdateBaiduChannel API operation for Amazon Pinpoint. +// GetVoiceChannel API operation for Amazon Pinpoint. // -// Update a BAIDU GCM channel +// Retrieves information about the status and settings of the voice channel +// for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateBaiduChannel for usage and error information. +// API operation GetVoiceChannel for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateBaiduChannel -func (c *Pinpoint) UpdateBaiduChannel(input *UpdateBaiduChannelInput) (*UpdateBaiduChannelOutput, error) { - req, out := c.UpdateBaiduChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetVoiceChannel +func (c *Pinpoint) GetVoiceChannel(input *GetVoiceChannelInput) (*GetVoiceChannelOutput, error) { + req, out := c.GetVoiceChannelRequest(input) return out, req.Send() } -// UpdateBaiduChannelWithContext is the same as UpdateBaiduChannel with the addition of +// GetVoiceChannelWithContext is the same as GetVoiceChannel with the addition of // the ability to pass a context and additional request options. // -// See UpdateBaiduChannel for details on how to use this API operation. +// See GetVoiceChannel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateBaiduChannelWithContext(ctx aws.Context, input *UpdateBaiduChannelInput, opts ...request.Option) (*UpdateBaiduChannelOutput, error) { - req, out := c.UpdateBaiduChannelRequest(input) +func (c *Pinpoint) GetVoiceChannelWithContext(ctx aws.Context, input *GetVoiceChannelInput, opts ...request.Option) (*GetVoiceChannelOutput, error) { + req, out := c.GetVoiceChannelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateCampaign = "UpdateCampaign" +const opListJourneys = "ListJourneys" -// UpdateCampaignRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCampaign operation. The "output" return +// ListJourneysRequest generates a "aws/request.Request" representing the +// client's request for the ListJourneys operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateCampaign for more information on using the UpdateCampaign +// See ListJourneys for more information on using the ListJourneys // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateCampaignRequest method. -// req, resp := client.UpdateCampaignRequest(params) +// // Example sending a request using the ListJourneysRequest method. +// req, resp := client.ListJourneysRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateCampaign -func (c *Pinpoint) UpdateCampaignRequest(input *UpdateCampaignInput) (req *request.Request, output *UpdateCampaignOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/ListJourneys +func (c *Pinpoint) ListJourneysRequest(input *ListJourneysInput) (req *request.Request, output *ListJourneysOutput) { op := &request.Operation{ - Name: opUpdateCampaign, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}", + Name: opListJourneys, + HTTPMethod: "GET", + HTTPPath: "/v1/apps/{application-id}/journeys", } if input == nil { - input = &UpdateCampaignInput{} + input = &ListJourneysInput{} } - output = &UpdateCampaignOutput{} + output = &ListJourneysOutput{} req = c.newRequest(op, input, output) return } -// UpdateCampaign API operation for Amazon Pinpoint. +// ListJourneys API operation for Amazon Pinpoint. // -// Use to update a campaign. +// Retrieves information about the status, configuration, and other settings +// for all the journeys that are associated with an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateCampaign for usage and error information. +// API operation ListJourneys for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateCampaign -func (c *Pinpoint) UpdateCampaign(input *UpdateCampaignInput) (*UpdateCampaignOutput, error) { - req, out := c.UpdateCampaignRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/ListJourneys +func (c *Pinpoint) ListJourneys(input *ListJourneysInput) (*ListJourneysOutput, error) { + req, out := c.ListJourneysRequest(input) return out, req.Send() } -// UpdateCampaignWithContext is the same as UpdateCampaign with the addition of +// ListJourneysWithContext is the same as ListJourneys with the addition of // the ability to pass a context and additional request options. // -// See UpdateCampaign for details on how to use this API operation. +// See ListJourneys for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateCampaignWithContext(ctx aws.Context, input *UpdateCampaignInput, opts ...request.Option) (*UpdateCampaignOutput, error) { - req, out := c.UpdateCampaignRequest(input) +func (c *Pinpoint) ListJourneysWithContext(ctx aws.Context, input *ListJourneysInput, opts ...request.Option) (*ListJourneysOutput, error) { + req, out := c.ListJourneysRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateEmailChannel = "UpdateEmailChannel" +const opListTagsForResource = "ListTagsForResource" -// UpdateEmailChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateEmailChannel operation. The "output" return +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateEmailChannel for more information on using the UpdateEmailChannel +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateEmailChannelRequest method. -// req, resp := client.UpdateEmailChannelRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEmailChannel -func (c *Pinpoint) UpdateEmailChannelRequest(input *UpdateEmailChannelInput) (req *request.Request, output *UpdateEmailChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/ListTagsForResource +func (c *Pinpoint) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ - Name: opUpdateEmailChannel, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/email", + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/v1/tags/{resource-arn}", } if input == nil { - input = &UpdateEmailChannelInput{} + input = &ListTagsForResourceInput{} } - output = &UpdateEmailChannelOutput{} + output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) return } -// UpdateEmailChannel API operation for Amazon Pinpoint. +// ListTagsForResource API operation for Amazon Pinpoint. // -// Update an email channel. +// Retrieves all the tags (keys and values) that are associated with an application, +// campaign, journey, message template, or segment. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateEmailChannel for usage and error information. -// -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" -// Simple message object. -// -// * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. -// -// * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. -// -// * ErrCodeNotFoundException "NotFoundException" -// Simple message object. -// -// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEmailChannel -func (c *Pinpoint) UpdateEmailChannel(input *UpdateEmailChannelInput) (*UpdateEmailChannelOutput, error) { - req, out := c.UpdateEmailChannelRequest(input) +// API operation ListTagsForResource for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/ListTagsForResource +func (c *Pinpoint) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } -// UpdateEmailChannelWithContext is the same as UpdateEmailChannel with the addition of +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of // the ability to pass a context and additional request options. // -// See UpdateEmailChannel for details on how to use this API operation. +// See ListTagsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateEmailChannelWithContext(ctx aws.Context, input *UpdateEmailChannelInput, opts ...request.Option) (*UpdateEmailChannelOutput, error) { - req, out := c.UpdateEmailChannelRequest(input) +func (c *Pinpoint) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateEndpoint = "UpdateEndpoint" +const opListTemplates = "ListTemplates" -// UpdateEndpointRequest generates a "aws/request.Request" representing the -// client's request for the UpdateEndpoint operation. The "output" return +// ListTemplatesRequest generates a "aws/request.Request" representing the +// client's request for the ListTemplates operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateEndpoint for more information on using the UpdateEndpoint +// See ListTemplates for more information on using the ListTemplates // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateEndpointRequest method. -// req, resp := client.UpdateEndpointRequest(params) +// // Example sending a request using the ListTemplatesRequest method. +// req, resp := client.ListTemplatesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEndpoint -func (c *Pinpoint) UpdateEndpointRequest(input *UpdateEndpointInput) (req *request.Request, output *UpdateEndpointOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/ListTemplates +func (c *Pinpoint) ListTemplatesRequest(input *ListTemplatesInput) (req *request.Request, output *ListTemplatesOutput) { op := &request.Operation{ - Name: opUpdateEndpoint, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/endpoints/{endpoint-id}", + Name: opListTemplates, + HTTPMethod: "GET", + HTTPPath: "/v1/templates", } if input == nil { - input = &UpdateEndpointInput{} + input = &ListTemplatesInput{} } - output = &UpdateEndpointOutput{} + output = &ListTemplatesOutput{} req = c.newRequest(op, input, output) return } -// UpdateEndpoint API operation for Amazon Pinpoint. +// ListTemplates API operation for Amazon Pinpoint. // -// Creates or updates an endpoint. +// Retrieves information about all the message templates that are associated +// with your Amazon Pinpoint account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateEndpoint for usage and error information. +// API operation ListTemplates for usage and error information. // // Returned Error Codes: +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. -// -// * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // -// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. -// -// * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEndpoint -func (c *Pinpoint) UpdateEndpoint(input *UpdateEndpointInput) (*UpdateEndpointOutput, error) { - req, out := c.UpdateEndpointRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/ListTemplates +func (c *Pinpoint) ListTemplates(input *ListTemplatesInput) (*ListTemplatesOutput, error) { + req, out := c.ListTemplatesRequest(input) return out, req.Send() } -// UpdateEndpointWithContext is the same as UpdateEndpoint with the addition of +// ListTemplatesWithContext is the same as ListTemplates with the addition of // the ability to pass a context and additional request options. // -// See UpdateEndpoint for details on how to use this API operation. +// See ListTemplates for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateEndpointWithContext(ctx aws.Context, input *UpdateEndpointInput, opts ...request.Option) (*UpdateEndpointOutput, error) { - req, out := c.UpdateEndpointRequest(input) +func (c *Pinpoint) ListTemplatesWithContext(ctx aws.Context, input *ListTemplatesInput, opts ...request.Option) (*ListTemplatesOutput, error) { + req, out := c.ListTemplatesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateEndpointsBatch = "UpdateEndpointsBatch" +const opPhoneNumberValidate = "PhoneNumberValidate" -// UpdateEndpointsBatchRequest generates a "aws/request.Request" representing the -// client's request for the UpdateEndpointsBatch operation. The "output" return +// PhoneNumberValidateRequest generates a "aws/request.Request" representing the +// client's request for the PhoneNumberValidate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateEndpointsBatch for more information on using the UpdateEndpointsBatch +// See PhoneNumberValidate for more information on using the PhoneNumberValidate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateEndpointsBatchRequest method. -// req, resp := client.UpdateEndpointsBatchRequest(params) +// // Example sending a request using the PhoneNumberValidateRequest method. +// req, resp := client.PhoneNumberValidateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEndpointsBatch -func (c *Pinpoint) UpdateEndpointsBatchRequest(input *UpdateEndpointsBatchInput) (req *request.Request, output *UpdateEndpointsBatchOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PhoneNumberValidate +func (c *Pinpoint) PhoneNumberValidateRequest(input *PhoneNumberValidateInput) (req *request.Request, output *PhoneNumberValidateOutput) { op := &request.Operation{ - Name: opUpdateEndpointsBatch, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/endpoints", + Name: opPhoneNumberValidate, + HTTPMethod: "POST", + HTTPPath: "/v1/phone/number/validate", } if input == nil { - input = &UpdateEndpointsBatchInput{} + input = &PhoneNumberValidateInput{} } - output = &UpdateEndpointsBatchOutput{} + output = &PhoneNumberValidateOutput{} req = c.newRequest(op, input, output) return } -// UpdateEndpointsBatch API operation for Amazon Pinpoint. +// PhoneNumberValidate API operation for Amazon Pinpoint. // -// Use to update a batch of endpoints. +// Retrieves information about a phone number. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateEndpointsBatch for usage and error information. +// API operation PhoneNumberValidate for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEndpointsBatch -func (c *Pinpoint) UpdateEndpointsBatch(input *UpdateEndpointsBatchInput) (*UpdateEndpointsBatchOutput, error) { - req, out := c.UpdateEndpointsBatchRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PhoneNumberValidate +func (c *Pinpoint) PhoneNumberValidate(input *PhoneNumberValidateInput) (*PhoneNumberValidateOutput, error) { + req, out := c.PhoneNumberValidateRequest(input) return out, req.Send() } -// UpdateEndpointsBatchWithContext is the same as UpdateEndpointsBatch with the addition of +// PhoneNumberValidateWithContext is the same as PhoneNumberValidate with the addition of // the ability to pass a context and additional request options. // -// See UpdateEndpointsBatch for details on how to use this API operation. +// See PhoneNumberValidate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateEndpointsBatchWithContext(ctx aws.Context, input *UpdateEndpointsBatchInput, opts ...request.Option) (*UpdateEndpointsBatchOutput, error) { - req, out := c.UpdateEndpointsBatchRequest(input) +func (c *Pinpoint) PhoneNumberValidateWithContext(ctx aws.Context, input *PhoneNumberValidateInput, opts ...request.Option) (*PhoneNumberValidateOutput, error) { + req, out := c.PhoneNumberValidateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateGcmChannel = "UpdateGcmChannel" +const opPutEventStream = "PutEventStream" -// UpdateGcmChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateGcmChannel operation. The "output" return +// PutEventStreamRequest generates a "aws/request.Request" representing the +// client's request for the PutEventStream operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateGcmChannel for more information on using the UpdateGcmChannel +// See PutEventStream for more information on using the PutEventStream // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateGcmChannelRequest method. -// req, resp := client.UpdateGcmChannelRequest(params) +// // Example sending a request using the PutEventStreamRequest method. +// req, resp := client.PutEventStreamRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateGcmChannel -func (c *Pinpoint) UpdateGcmChannelRequest(input *UpdateGcmChannelInput) (req *request.Request, output *UpdateGcmChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PutEventStream +func (c *Pinpoint) PutEventStreamRequest(input *PutEventStreamInput) (req *request.Request, output *PutEventStreamOutput) { op := &request.Operation{ - Name: opUpdateGcmChannel, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/gcm", + Name: opPutEventStream, + HTTPMethod: "POST", + HTTPPath: "/v1/apps/{application-id}/eventstream", } if input == nil { - input = &UpdateGcmChannelInput{} + input = &PutEventStreamInput{} } - output = &UpdateGcmChannelOutput{} + output = &PutEventStreamOutput{} req = c.newRequest(op, input, output) return } -// UpdateGcmChannel API operation for Amazon Pinpoint. +// PutEventStream API operation for Amazon Pinpoint. // -// Use to update the GCM channel for an app. +// Creates a new event stream for an application or updates the settings of +// an existing event stream for an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateGcmChannel for usage and error information. +// API operation PutEventStream for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateGcmChannel -func (c *Pinpoint) UpdateGcmChannel(input *UpdateGcmChannelInput) (*UpdateGcmChannelOutput, error) { - req, out := c.UpdateGcmChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PutEventStream +func (c *Pinpoint) PutEventStream(input *PutEventStreamInput) (*PutEventStreamOutput, error) { + req, out := c.PutEventStreamRequest(input) return out, req.Send() } -// UpdateGcmChannelWithContext is the same as UpdateGcmChannel with the addition of +// PutEventStreamWithContext is the same as PutEventStream with the addition of // the ability to pass a context and additional request options. // -// See UpdateGcmChannel for details on how to use this API operation. +// See PutEventStream for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateGcmChannelWithContext(ctx aws.Context, input *UpdateGcmChannelInput, opts ...request.Option) (*UpdateGcmChannelOutput, error) { - req, out := c.UpdateGcmChannelRequest(input) +func (c *Pinpoint) PutEventStreamWithContext(ctx aws.Context, input *PutEventStreamInput, opts ...request.Option) (*PutEventStreamOutput, error) { + req, out := c.PutEventStreamRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateSegment = "UpdateSegment" +const opPutEvents = "PutEvents" -// UpdateSegmentRequest generates a "aws/request.Request" representing the -// client's request for the UpdateSegment operation. The "output" return +// PutEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutEvents operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateSegment for more information on using the UpdateSegment +// See PutEvents for more information on using the PutEvents // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateSegmentRequest method. -// req, resp := client.UpdateSegmentRequest(params) +// // Example sending a request using the PutEventsRequest method. +// req, resp := client.PutEventsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSegment -func (c *Pinpoint) UpdateSegmentRequest(input *UpdateSegmentInput) (req *request.Request, output *UpdateSegmentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PutEvents +func (c *Pinpoint) PutEventsRequest(input *PutEventsInput) (req *request.Request, output *PutEventsOutput) { op := &request.Operation{ - Name: opUpdateSegment, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}", + Name: opPutEvents, + HTTPMethod: "POST", + HTTPPath: "/v1/apps/{application-id}/events", } if input == nil { - input = &UpdateSegmentInput{} + input = &PutEventsInput{} } - output = &UpdateSegmentOutput{} + output = &PutEventsOutput{} req = c.newRequest(op, input, output) return } -// UpdateSegment API operation for Amazon Pinpoint. +// PutEvents API operation for Amazon Pinpoint. // -// Used to update a segment. +// Creates a new event to record for endpoints, or creates or updates endpoint +// data that existing events are associated with. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateSegment for usage and error information. +// API operation PutEvents for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSegment -func (c *Pinpoint) UpdateSegment(input *UpdateSegmentInput) (*UpdateSegmentOutput, error) { - req, out := c.UpdateSegmentRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/PutEvents +func (c *Pinpoint) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { + req, out := c.PutEventsRequest(input) return out, req.Send() } -// UpdateSegmentWithContext is the same as UpdateSegment with the addition of +// PutEventsWithContext is the same as PutEvents with the addition of // the ability to pass a context and additional request options. // -// See UpdateSegment for details on how to use this API operation. +// See PutEvents for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateSegmentWithContext(ctx aws.Context, input *UpdateSegmentInput, opts ...request.Option) (*UpdateSegmentOutput, error) { - req, out := c.UpdateSegmentRequest(input) +func (c *Pinpoint) PutEventsWithContext(ctx aws.Context, input *PutEventsInput, opts ...request.Option) (*PutEventsOutput, error) { + req, out := c.PutEventsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateSmsChannel = "UpdateSmsChannel" +const opRemoveAttributes = "RemoveAttributes" -// UpdateSmsChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateSmsChannel operation. The "output" return +// RemoveAttributesRequest generates a "aws/request.Request" representing the +// client's request for the RemoveAttributes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateSmsChannel for more information on using the UpdateSmsChannel +// See RemoveAttributes for more information on using the RemoveAttributes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateSmsChannelRequest method. -// req, resp := client.UpdateSmsChannelRequest(params) +// // Example sending a request using the RemoveAttributesRequest method. +// req, resp := client.RemoveAttributesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSmsChannel -func (c *Pinpoint) UpdateSmsChannelRequest(input *UpdateSmsChannelInput) (req *request.Request, output *UpdateSmsChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/RemoveAttributes +func (c *Pinpoint) RemoveAttributesRequest(input *RemoveAttributesInput) (req *request.Request, output *RemoveAttributesOutput) { op := &request.Operation{ - Name: opUpdateSmsChannel, + Name: opRemoveAttributes, HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/sms", + HTTPPath: "/v1/apps/{application-id}/attributes/{attribute-type}", } if input == nil { - input = &UpdateSmsChannelInput{} + input = &RemoveAttributesInput{} } - output = &UpdateSmsChannelOutput{} + output = &RemoveAttributesOutput{} req = c.newRequest(op, input, output) return } -// UpdateSmsChannel API operation for Amazon Pinpoint. +// RemoveAttributes API operation for Amazon Pinpoint. // -// Update an SMS channel. +// Removes one or more attributes, of the same attribute type, from all the +// endpoints that are associated with an application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateSmsChannel for usage and error information. +// API operation RemoveAttributes for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSmsChannel -func (c *Pinpoint) UpdateSmsChannel(input *UpdateSmsChannelInput) (*UpdateSmsChannelOutput, error) { - req, out := c.UpdateSmsChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/RemoveAttributes +func (c *Pinpoint) RemoveAttributes(input *RemoveAttributesInput) (*RemoveAttributesOutput, error) { + req, out := c.RemoveAttributesRequest(input) return out, req.Send() } -// UpdateSmsChannelWithContext is the same as UpdateSmsChannel with the addition of +// RemoveAttributesWithContext is the same as RemoveAttributes with the addition of // the ability to pass a context and additional request options. // -// See UpdateSmsChannel for details on how to use this API operation. +// See RemoveAttributes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateSmsChannelWithContext(ctx aws.Context, input *UpdateSmsChannelInput, opts ...request.Option) (*UpdateSmsChannelOutput, error) { - req, out := c.UpdateSmsChannelRequest(input) +func (c *Pinpoint) RemoveAttributesWithContext(ctx aws.Context, input *RemoveAttributesInput, opts ...request.Option) (*RemoveAttributesOutput, error) { + req, out := c.RemoveAttributesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateVoiceChannel = "UpdateVoiceChannel" +const opSendMessages = "SendMessages" -// UpdateVoiceChannelRequest generates a "aws/request.Request" representing the -// client's request for the UpdateVoiceChannel operation. The "output" return +// SendMessagesRequest generates a "aws/request.Request" representing the +// client's request for the SendMessages operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateVoiceChannel for more information on using the UpdateVoiceChannel +// See SendMessages for more information on using the SendMessages // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateVoiceChannelRequest method. -// req, resp := client.UpdateVoiceChannelRequest(params) +// // Example sending a request using the SendMessagesRequest method. +// req, resp := client.SendMessagesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateVoiceChannel -func (c *Pinpoint) UpdateVoiceChannelRequest(input *UpdateVoiceChannelInput) (req *request.Request, output *UpdateVoiceChannelOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/SendMessages +func (c *Pinpoint) SendMessagesRequest(input *SendMessagesInput) (req *request.Request, output *SendMessagesOutput) { op := &request.Operation{ - Name: opUpdateVoiceChannel, - HTTPMethod: "PUT", - HTTPPath: "/v1/apps/{application-id}/channels/voice", + Name: opSendMessages, + HTTPMethod: "POST", + HTTPPath: "/v1/apps/{application-id}/messages", } if input == nil { - input = &UpdateVoiceChannelInput{} + input = &SendMessagesInput{} } - output = &UpdateVoiceChannelOutput{} + output = &SendMessagesOutput{} req = c.newRequest(op, input, output) return } -// UpdateVoiceChannel API operation for Amazon Pinpoint. +// SendMessages API operation for Amazon Pinpoint. // -// Update an Voice channel +// Creates and sends a direct message. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Pinpoint's -// API operation UpdateVoiceChannel for usage and error information. +// API operation SendMessages for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeForbiddenException "ForbiddenException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeNotFoundException "NotFoundException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeMethodNotAllowedException "MethodNotAllowedException" -// Simple message object. +// Provides information about an API request or response. // // * ErrCodeTooManyRequestsException "TooManyRequestsException" -// Simple message object. +// Provides information about an API request or response. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateVoiceChannel -func (c *Pinpoint) UpdateVoiceChannel(input *UpdateVoiceChannelInput) (*UpdateVoiceChannelOutput, error) { - req, out := c.UpdateVoiceChannelRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/SendMessages +func (c *Pinpoint) SendMessages(input *SendMessagesInput) (*SendMessagesOutput, error) { + req, out := c.SendMessagesRequest(input) return out, req.Send() } -// UpdateVoiceChannelWithContext is the same as UpdateVoiceChannel with the addition of +// SendMessagesWithContext is the same as SendMessages with the addition of // the ability to pass a context and additional request options. // -// See UpdateVoiceChannel for details on how to use this API operation. +// See SendMessages for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Pinpoint) UpdateVoiceChannelWithContext(ctx aws.Context, input *UpdateVoiceChannelInput, opts ...request.Option) (*UpdateVoiceChannelOutput, error) { - req, out := c.UpdateVoiceChannelRequest(input) +func (c *Pinpoint) SendMessagesWithContext(ctx aws.Context, input *SendMessagesInput, opts ...request.Option) (*SendMessagesOutput, error) { + req, out := c.SendMessagesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Amazon Device Messaging channel definition. -type ADMChannelRequest struct { - _ struct{} `type:"structure"` - - // The Client ID that you obtained from the Amazon App Distribution Portal. - ClientId *string `type:"string"` - - // The Client Secret that you obtained from the Amazon App Distribution Portal. - ClientSecret *string `type:"string"` - - // Indicates whether or not the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` -} +const opSendUsersMessages = "SendUsersMessages" -// String returns the string representation -func (s ADMChannelRequest) String() string { - return awsutil.Prettify(s) -} +// SendUsersMessagesRequest generates a "aws/request.Request" representing the +// client's request for the SendUsersMessages operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendUsersMessages for more information on using the SendUsersMessages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendUsersMessagesRequest method. +// req, resp := client.SendUsersMessagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/SendUsersMessages +func (c *Pinpoint) SendUsersMessagesRequest(input *SendUsersMessagesInput) (req *request.Request, output *SendUsersMessagesOutput) { + op := &request.Operation{ + Name: opSendUsersMessages, + HTTPMethod: "POST", + HTTPPath: "/v1/apps/{application-id}/users-messages", + } -// GoString returns the string representation -func (s ADMChannelRequest) GoString() string { - return s.String() -} + if input == nil { + input = &SendUsersMessagesInput{} + } -// SetClientId sets the ClientId field's value. -func (s *ADMChannelRequest) SetClientId(v string) *ADMChannelRequest { - s.ClientId = &v - return s + output = &SendUsersMessagesOutput{} + req = c.newRequest(op, input, output) + return } -// SetClientSecret sets the ClientSecret field's value. -func (s *ADMChannelRequest) SetClientSecret(v string) *ADMChannelRequest { - s.ClientSecret = &v - return s +// SendUsersMessages API operation for Amazon Pinpoint. +// +// Creates and sends a message to a list of users. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation SendUsersMessages for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/SendUsersMessages +func (c *Pinpoint) SendUsersMessages(input *SendUsersMessagesInput) (*SendUsersMessagesOutput, error) { + req, out := c.SendUsersMessagesRequest(input) + return out, req.Send() } -// SetEnabled sets the Enabled field's value. -func (s *ADMChannelRequest) SetEnabled(v bool) *ADMChannelRequest { - s.Enabled = &v - return s +// SendUsersMessagesWithContext is the same as SendUsersMessages with the addition of +// the ability to pass a context and additional request options. +// +// See SendUsersMessages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) SendUsersMessagesWithContext(ctx aws.Context, input *SendUsersMessagesInput, opts ...request.Option) (*SendUsersMessagesOutput, error) { + req, out := c.SendUsersMessagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// Amazon Device Messaging channel definition. -type ADMChannelResponse struct { - _ struct{} `type:"structure"` - - // The ID of the application to which the channel applies. - ApplicationId *string `type:"string"` - - // The date and time when this channel was created. - CreationDate *string `type:"string"` - - // Indicates whether or not the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` - - // Not used. Retained for backwards compatibility. - HasCredential *bool `type:"boolean"` - - // (Deprecated) An identifier for the channel. Retained for backwards compatibility. - Id *string `type:"string"` - - // Indicates whether or not the channel is archived. - IsArchived *bool `type:"boolean"` - - // The user who last updated this channel. - LastModifiedBy *string `type:"string"` +const opTagResource = "TagResource" - // The date and time when this channel was last modified. - LastModifiedDate *string `type:"string"` +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/TagResource +func (c *Pinpoint) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/v1/tags/{resource-arn}", + } - // The platform type. For this channel, the value is always "ADM." - Platform *string `type:"string"` + if input == nil { + input = &TagResourceInput{} + } - // The channel version. - Version *int64 `type:"integer"` + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return } -// String returns the string representation +// TagResource API operation for Amazon Pinpoint. +// +// Adds one or more tags (keys and values) to an application, campaign, journey, +// message template, or segment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation TagResource for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/TagResource +func (c *Pinpoint) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UntagResource +func (c *Pinpoint) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/v1/tags/{resource-arn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Pinpoint. +// +// Removes one or more tags (keys and values) from an application, campaign, +// journey, message template, or segment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UntagResource for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UntagResource +func (c *Pinpoint) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAdmChannel = "UpdateAdmChannel" + +// UpdateAdmChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAdmChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAdmChannel for more information on using the UpdateAdmChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAdmChannelRequest method. +// req, resp := client.UpdateAdmChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateAdmChannel +func (c *Pinpoint) UpdateAdmChannelRequest(input *UpdateAdmChannelInput) (req *request.Request, output *UpdateAdmChannelOutput) { + op := &request.Operation{ + Name: opUpdateAdmChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/adm", + } + + if input == nil { + input = &UpdateAdmChannelInput{} + } + + output = &UpdateAdmChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateAdmChannel API operation for Amazon Pinpoint. +// +// Enables the ADM channel for an application or updates the status and settings +// of the ADM channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateAdmChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateAdmChannel +func (c *Pinpoint) UpdateAdmChannel(input *UpdateAdmChannelInput) (*UpdateAdmChannelOutput, error) { + req, out := c.UpdateAdmChannelRequest(input) + return out, req.Send() +} + +// UpdateAdmChannelWithContext is the same as UpdateAdmChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAdmChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateAdmChannelWithContext(ctx aws.Context, input *UpdateAdmChannelInput, opts ...request.Option) (*UpdateAdmChannelOutput, error) { + req, out := c.UpdateAdmChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateApnsChannel = "UpdateApnsChannel" + +// UpdateApnsChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApnsChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateApnsChannel for more information on using the UpdateApnsChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateApnsChannelRequest method. +// req, resp := client.UpdateApnsChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsChannel +func (c *Pinpoint) UpdateApnsChannelRequest(input *UpdateApnsChannelInput) (req *request.Request, output *UpdateApnsChannelOutput) { + op := &request.Operation{ + Name: opUpdateApnsChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/apns", + } + + if input == nil { + input = &UpdateApnsChannelInput{} + } + + output = &UpdateApnsChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateApnsChannel API operation for Amazon Pinpoint. +// +// Enables the APNs channel for an application or updates the status and settings +// of the APNs channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateApnsChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsChannel +func (c *Pinpoint) UpdateApnsChannel(input *UpdateApnsChannelInput) (*UpdateApnsChannelOutput, error) { + req, out := c.UpdateApnsChannelRequest(input) + return out, req.Send() +} + +// UpdateApnsChannelWithContext is the same as UpdateApnsChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateApnsChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateApnsChannelWithContext(ctx aws.Context, input *UpdateApnsChannelInput, opts ...request.Option) (*UpdateApnsChannelOutput, error) { + req, out := c.UpdateApnsChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateApnsSandboxChannel = "UpdateApnsSandboxChannel" + +// UpdateApnsSandboxChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApnsSandboxChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateApnsSandboxChannel for more information on using the UpdateApnsSandboxChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateApnsSandboxChannelRequest method. +// req, resp := client.UpdateApnsSandboxChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsSandboxChannel +func (c *Pinpoint) UpdateApnsSandboxChannelRequest(input *UpdateApnsSandboxChannelInput) (req *request.Request, output *UpdateApnsSandboxChannelOutput) { + op := &request.Operation{ + Name: opUpdateApnsSandboxChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/apns_sandbox", + } + + if input == nil { + input = &UpdateApnsSandboxChannelInput{} + } + + output = &UpdateApnsSandboxChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateApnsSandboxChannel API operation for Amazon Pinpoint. +// +// Enables the APNs sandbox channel for an application or updates the status +// and settings of the APNs sandbox channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateApnsSandboxChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsSandboxChannel +func (c *Pinpoint) UpdateApnsSandboxChannel(input *UpdateApnsSandboxChannelInput) (*UpdateApnsSandboxChannelOutput, error) { + req, out := c.UpdateApnsSandboxChannelRequest(input) + return out, req.Send() +} + +// UpdateApnsSandboxChannelWithContext is the same as UpdateApnsSandboxChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateApnsSandboxChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateApnsSandboxChannelWithContext(ctx aws.Context, input *UpdateApnsSandboxChannelInput, opts ...request.Option) (*UpdateApnsSandboxChannelOutput, error) { + req, out := c.UpdateApnsSandboxChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateApnsVoipChannel = "UpdateApnsVoipChannel" + +// UpdateApnsVoipChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApnsVoipChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateApnsVoipChannel for more information on using the UpdateApnsVoipChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateApnsVoipChannelRequest method. +// req, resp := client.UpdateApnsVoipChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsVoipChannel +func (c *Pinpoint) UpdateApnsVoipChannelRequest(input *UpdateApnsVoipChannelInput) (req *request.Request, output *UpdateApnsVoipChannelOutput) { + op := &request.Operation{ + Name: opUpdateApnsVoipChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/apns_voip", + } + + if input == nil { + input = &UpdateApnsVoipChannelInput{} + } + + output = &UpdateApnsVoipChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateApnsVoipChannel API operation for Amazon Pinpoint. +// +// Enables the APNs VoIP channel for an application or updates the status and +// settings of the APNs VoIP channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateApnsVoipChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsVoipChannel +func (c *Pinpoint) UpdateApnsVoipChannel(input *UpdateApnsVoipChannelInput) (*UpdateApnsVoipChannelOutput, error) { + req, out := c.UpdateApnsVoipChannelRequest(input) + return out, req.Send() +} + +// UpdateApnsVoipChannelWithContext is the same as UpdateApnsVoipChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateApnsVoipChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateApnsVoipChannelWithContext(ctx aws.Context, input *UpdateApnsVoipChannelInput, opts ...request.Option) (*UpdateApnsVoipChannelOutput, error) { + req, out := c.UpdateApnsVoipChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateApnsVoipSandboxChannel = "UpdateApnsVoipSandboxChannel" + +// UpdateApnsVoipSandboxChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApnsVoipSandboxChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateApnsVoipSandboxChannel for more information on using the UpdateApnsVoipSandboxChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateApnsVoipSandboxChannelRequest method. +// req, resp := client.UpdateApnsVoipSandboxChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsVoipSandboxChannel +func (c *Pinpoint) UpdateApnsVoipSandboxChannelRequest(input *UpdateApnsVoipSandboxChannelInput) (req *request.Request, output *UpdateApnsVoipSandboxChannelOutput) { + op := &request.Operation{ + Name: opUpdateApnsVoipSandboxChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/apns_voip_sandbox", + } + + if input == nil { + input = &UpdateApnsVoipSandboxChannelInput{} + } + + output = &UpdateApnsVoipSandboxChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateApnsVoipSandboxChannel API operation for Amazon Pinpoint. +// +// Enables the APNs VoIP sandbox channel for an application or updates the status +// and settings of the APNs VoIP sandbox channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateApnsVoipSandboxChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApnsVoipSandboxChannel +func (c *Pinpoint) UpdateApnsVoipSandboxChannel(input *UpdateApnsVoipSandboxChannelInput) (*UpdateApnsVoipSandboxChannelOutput, error) { + req, out := c.UpdateApnsVoipSandboxChannelRequest(input) + return out, req.Send() +} + +// UpdateApnsVoipSandboxChannelWithContext is the same as UpdateApnsVoipSandboxChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateApnsVoipSandboxChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateApnsVoipSandboxChannelWithContext(ctx aws.Context, input *UpdateApnsVoipSandboxChannelInput, opts ...request.Option) (*UpdateApnsVoipSandboxChannelOutput, error) { + req, out := c.UpdateApnsVoipSandboxChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateApplicationSettings = "UpdateApplicationSettings" + +// UpdateApplicationSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApplicationSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateApplicationSettings for more information on using the UpdateApplicationSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateApplicationSettingsRequest method. +// req, resp := client.UpdateApplicationSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApplicationSettings +func (c *Pinpoint) UpdateApplicationSettingsRequest(input *UpdateApplicationSettingsInput) (req *request.Request, output *UpdateApplicationSettingsOutput) { + op := &request.Operation{ + Name: opUpdateApplicationSettings, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/settings", + } + + if input == nil { + input = &UpdateApplicationSettingsInput{} + } + + output = &UpdateApplicationSettingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateApplicationSettings API operation for Amazon Pinpoint. +// +// Updates the settings for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateApplicationSettings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateApplicationSettings +func (c *Pinpoint) UpdateApplicationSettings(input *UpdateApplicationSettingsInput) (*UpdateApplicationSettingsOutput, error) { + req, out := c.UpdateApplicationSettingsRequest(input) + return out, req.Send() +} + +// UpdateApplicationSettingsWithContext is the same as UpdateApplicationSettings with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateApplicationSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateApplicationSettingsWithContext(ctx aws.Context, input *UpdateApplicationSettingsInput, opts ...request.Option) (*UpdateApplicationSettingsOutput, error) { + req, out := c.UpdateApplicationSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateBaiduChannel = "UpdateBaiduChannel" + +// UpdateBaiduChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBaiduChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBaiduChannel for more information on using the UpdateBaiduChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBaiduChannelRequest method. +// req, resp := client.UpdateBaiduChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateBaiduChannel +func (c *Pinpoint) UpdateBaiduChannelRequest(input *UpdateBaiduChannelInput) (req *request.Request, output *UpdateBaiduChannelOutput) { + op := &request.Operation{ + Name: opUpdateBaiduChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/baidu", + } + + if input == nil { + input = &UpdateBaiduChannelInput{} + } + + output = &UpdateBaiduChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBaiduChannel API operation for Amazon Pinpoint. +// +// Enables the Baidu channel for an application or updates the status and settings +// of the Baidu channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateBaiduChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateBaiduChannel +func (c *Pinpoint) UpdateBaiduChannel(input *UpdateBaiduChannelInput) (*UpdateBaiduChannelOutput, error) { + req, out := c.UpdateBaiduChannelRequest(input) + return out, req.Send() +} + +// UpdateBaiduChannelWithContext is the same as UpdateBaiduChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBaiduChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateBaiduChannelWithContext(ctx aws.Context, input *UpdateBaiduChannelInput, opts ...request.Option) (*UpdateBaiduChannelOutput, error) { + req, out := c.UpdateBaiduChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateCampaign = "UpdateCampaign" + +// UpdateCampaignRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCampaign operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateCampaign for more information on using the UpdateCampaign +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateCampaignRequest method. +// req, resp := client.UpdateCampaignRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateCampaign +func (c *Pinpoint) UpdateCampaignRequest(input *UpdateCampaignInput) (req *request.Request, output *UpdateCampaignOutput) { + op := &request.Operation{ + Name: opUpdateCampaign, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/campaigns/{campaign-id}", + } + + if input == nil { + input = &UpdateCampaignInput{} + } + + output = &UpdateCampaignOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateCampaign API operation for Amazon Pinpoint. +// +// Updates the configuration and other settings for a campaign. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateCampaign for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateCampaign +func (c *Pinpoint) UpdateCampaign(input *UpdateCampaignInput) (*UpdateCampaignOutput, error) { + req, out := c.UpdateCampaignRequest(input) + return out, req.Send() +} + +// UpdateCampaignWithContext is the same as UpdateCampaign with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateCampaign for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateCampaignWithContext(ctx aws.Context, input *UpdateCampaignInput, opts ...request.Option) (*UpdateCampaignOutput, error) { + req, out := c.UpdateCampaignRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateEmailChannel = "UpdateEmailChannel" + +// UpdateEmailChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEmailChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateEmailChannel for more information on using the UpdateEmailChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateEmailChannelRequest method. +// req, resp := client.UpdateEmailChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEmailChannel +func (c *Pinpoint) UpdateEmailChannelRequest(input *UpdateEmailChannelInput) (req *request.Request, output *UpdateEmailChannelOutput) { + op := &request.Operation{ + Name: opUpdateEmailChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/email", + } + + if input == nil { + input = &UpdateEmailChannelInput{} + } + + output = &UpdateEmailChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateEmailChannel API operation for Amazon Pinpoint. +// +// Enables the email channel for an application or updates the status and settings +// of the email channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateEmailChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEmailChannel +func (c *Pinpoint) UpdateEmailChannel(input *UpdateEmailChannelInput) (*UpdateEmailChannelOutput, error) { + req, out := c.UpdateEmailChannelRequest(input) + return out, req.Send() +} + +// UpdateEmailChannelWithContext is the same as UpdateEmailChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateEmailChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateEmailChannelWithContext(ctx aws.Context, input *UpdateEmailChannelInput, opts ...request.Option) (*UpdateEmailChannelOutput, error) { + req, out := c.UpdateEmailChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateEmailTemplate = "UpdateEmailTemplate" + +// UpdateEmailTemplateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEmailTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateEmailTemplate for more information on using the UpdateEmailTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateEmailTemplateRequest method. +// req, resp := client.UpdateEmailTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEmailTemplate +func (c *Pinpoint) UpdateEmailTemplateRequest(input *UpdateEmailTemplateInput) (req *request.Request, output *UpdateEmailTemplateOutput) { + op := &request.Operation{ + Name: opUpdateEmailTemplate, + HTTPMethod: "PUT", + HTTPPath: "/v1/templates/{template-name}/email", + } + + if input == nil { + input = &UpdateEmailTemplateInput{} + } + + output = &UpdateEmailTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateEmailTemplate API operation for Amazon Pinpoint. +// +// Updates an existing message template that you can use in messages that are +// sent through the email channel. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateEmailTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEmailTemplate +func (c *Pinpoint) UpdateEmailTemplate(input *UpdateEmailTemplateInput) (*UpdateEmailTemplateOutput, error) { + req, out := c.UpdateEmailTemplateRequest(input) + return out, req.Send() +} + +// UpdateEmailTemplateWithContext is the same as UpdateEmailTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateEmailTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateEmailTemplateWithContext(ctx aws.Context, input *UpdateEmailTemplateInput, opts ...request.Option) (*UpdateEmailTemplateOutput, error) { + req, out := c.UpdateEmailTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateEndpoint = "UpdateEndpoint" + +// UpdateEndpointRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateEndpoint for more information on using the UpdateEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateEndpointRequest method. +// req, resp := client.UpdateEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEndpoint +func (c *Pinpoint) UpdateEndpointRequest(input *UpdateEndpointInput) (req *request.Request, output *UpdateEndpointOutput) { + op := &request.Operation{ + Name: opUpdateEndpoint, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/endpoints/{endpoint-id}", + } + + if input == nil { + input = &UpdateEndpointInput{} + } + + output = &UpdateEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateEndpoint API operation for Amazon Pinpoint. +// +// Creates a new endpoint for an application or updates the settings and attributes +// of an existing endpoint for an application. You can also use this operation +// to define custom attributes (Attributes, Metrics, and UserAttributes properties) +// for an endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateEndpoint for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEndpoint +func (c *Pinpoint) UpdateEndpoint(input *UpdateEndpointInput) (*UpdateEndpointOutput, error) { + req, out := c.UpdateEndpointRequest(input) + return out, req.Send() +} + +// UpdateEndpointWithContext is the same as UpdateEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateEndpointWithContext(ctx aws.Context, input *UpdateEndpointInput, opts ...request.Option) (*UpdateEndpointOutput, error) { + req, out := c.UpdateEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateEndpointsBatch = "UpdateEndpointsBatch" + +// UpdateEndpointsBatchRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEndpointsBatch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateEndpointsBatch for more information on using the UpdateEndpointsBatch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateEndpointsBatchRequest method. +// req, resp := client.UpdateEndpointsBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEndpointsBatch +func (c *Pinpoint) UpdateEndpointsBatchRequest(input *UpdateEndpointsBatchInput) (req *request.Request, output *UpdateEndpointsBatchOutput) { + op := &request.Operation{ + Name: opUpdateEndpointsBatch, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/endpoints", + } + + if input == nil { + input = &UpdateEndpointsBatchInput{} + } + + output = &UpdateEndpointsBatchOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateEndpointsBatch API operation for Amazon Pinpoint. +// +// Creates a new batch of endpoints for an application or updates the settings +// and attributes of a batch of existing endpoints for an application. You can +// also use this operation to define custom attributes (Attributes, Metrics, +// and UserAttributes properties) for a batch of endpoints. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateEndpointsBatch for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateEndpointsBatch +func (c *Pinpoint) UpdateEndpointsBatch(input *UpdateEndpointsBatchInput) (*UpdateEndpointsBatchOutput, error) { + req, out := c.UpdateEndpointsBatchRequest(input) + return out, req.Send() +} + +// UpdateEndpointsBatchWithContext is the same as UpdateEndpointsBatch with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateEndpointsBatch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateEndpointsBatchWithContext(ctx aws.Context, input *UpdateEndpointsBatchInput, opts ...request.Option) (*UpdateEndpointsBatchOutput, error) { + req, out := c.UpdateEndpointsBatchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGcmChannel = "UpdateGcmChannel" + +// UpdateGcmChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGcmChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGcmChannel for more information on using the UpdateGcmChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGcmChannelRequest method. +// req, resp := client.UpdateGcmChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateGcmChannel +func (c *Pinpoint) UpdateGcmChannelRequest(input *UpdateGcmChannelInput) (req *request.Request, output *UpdateGcmChannelOutput) { + op := &request.Operation{ + Name: opUpdateGcmChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/gcm", + } + + if input == nil { + input = &UpdateGcmChannelInput{} + } + + output = &UpdateGcmChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateGcmChannel API operation for Amazon Pinpoint. +// +// Enables the GCM channel for an application or updates the status and settings +// of the GCM channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateGcmChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateGcmChannel +func (c *Pinpoint) UpdateGcmChannel(input *UpdateGcmChannelInput) (*UpdateGcmChannelOutput, error) { + req, out := c.UpdateGcmChannelRequest(input) + return out, req.Send() +} + +// UpdateGcmChannelWithContext is the same as UpdateGcmChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGcmChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateGcmChannelWithContext(ctx aws.Context, input *UpdateGcmChannelInput, opts ...request.Option) (*UpdateGcmChannelOutput, error) { + req, out := c.UpdateGcmChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateJourney = "UpdateJourney" + +// UpdateJourneyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateJourney operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateJourney for more information on using the UpdateJourney +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateJourneyRequest method. +// req, resp := client.UpdateJourneyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateJourney +func (c *Pinpoint) UpdateJourneyRequest(input *UpdateJourneyInput) (req *request.Request, output *UpdateJourneyOutput) { + op := &request.Operation{ + Name: opUpdateJourney, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/journeys/{journey-id}", + } + + if input == nil { + input = &UpdateJourneyInput{} + } + + output = &UpdateJourneyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateJourney API operation for Amazon Pinpoint. +// +// Updates the configuration and other settings for a journey. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateJourney for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateJourney +func (c *Pinpoint) UpdateJourney(input *UpdateJourneyInput) (*UpdateJourneyOutput, error) { + req, out := c.UpdateJourneyRequest(input) + return out, req.Send() +} + +// UpdateJourneyWithContext is the same as UpdateJourney with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateJourney for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateJourneyWithContext(ctx aws.Context, input *UpdateJourneyInput, opts ...request.Option) (*UpdateJourneyOutput, error) { + req, out := c.UpdateJourneyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateJourneyState = "UpdateJourneyState" + +// UpdateJourneyStateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateJourneyState operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateJourneyState for more information on using the UpdateJourneyState +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateJourneyStateRequest method. +// req, resp := client.UpdateJourneyStateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateJourneyState +func (c *Pinpoint) UpdateJourneyStateRequest(input *UpdateJourneyStateInput) (req *request.Request, output *UpdateJourneyStateOutput) { + op := &request.Operation{ + Name: opUpdateJourneyState, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/journeys/{journey-id}/state", + } + + if input == nil { + input = &UpdateJourneyStateInput{} + } + + output = &UpdateJourneyStateOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateJourneyState API operation for Amazon Pinpoint. +// +// Cancels an active journey. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateJourneyState for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateJourneyState +func (c *Pinpoint) UpdateJourneyState(input *UpdateJourneyStateInput) (*UpdateJourneyStateOutput, error) { + req, out := c.UpdateJourneyStateRequest(input) + return out, req.Send() +} + +// UpdateJourneyStateWithContext is the same as UpdateJourneyState with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateJourneyState for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateJourneyStateWithContext(ctx aws.Context, input *UpdateJourneyStateInput, opts ...request.Option) (*UpdateJourneyStateOutput, error) { + req, out := c.UpdateJourneyStateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePushTemplate = "UpdatePushTemplate" + +// UpdatePushTemplateRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePushTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePushTemplate for more information on using the UpdatePushTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePushTemplateRequest method. +// req, resp := client.UpdatePushTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdatePushTemplate +func (c *Pinpoint) UpdatePushTemplateRequest(input *UpdatePushTemplateInput) (req *request.Request, output *UpdatePushTemplateOutput) { + op := &request.Operation{ + Name: opUpdatePushTemplate, + HTTPMethod: "PUT", + HTTPPath: "/v1/templates/{template-name}/push", + } + + if input == nil { + input = &UpdatePushTemplateInput{} + } + + output = &UpdatePushTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdatePushTemplate API operation for Amazon Pinpoint. +// +// Updates an existing message template that you can use in messages that are +// sent through a push notification channel. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdatePushTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdatePushTemplate +func (c *Pinpoint) UpdatePushTemplate(input *UpdatePushTemplateInput) (*UpdatePushTemplateOutput, error) { + req, out := c.UpdatePushTemplateRequest(input) + return out, req.Send() +} + +// UpdatePushTemplateWithContext is the same as UpdatePushTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePushTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdatePushTemplateWithContext(ctx aws.Context, input *UpdatePushTemplateInput, opts ...request.Option) (*UpdatePushTemplateOutput, error) { + req, out := c.UpdatePushTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSegment = "UpdateSegment" + +// UpdateSegmentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSegment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSegment for more information on using the UpdateSegment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSegmentRequest method. +// req, resp := client.UpdateSegmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSegment +func (c *Pinpoint) UpdateSegmentRequest(input *UpdateSegmentInput) (req *request.Request, output *UpdateSegmentOutput) { + op := &request.Operation{ + Name: opUpdateSegment, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/segments/{segment-id}", + } + + if input == nil { + input = &UpdateSegmentInput{} + } + + output = &UpdateSegmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSegment API operation for Amazon Pinpoint. +// +// Creates a new segment for an application or updates the configuration, dimension, +// and other settings for an existing segment that's associated with an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateSegment for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSegment +func (c *Pinpoint) UpdateSegment(input *UpdateSegmentInput) (*UpdateSegmentOutput, error) { + req, out := c.UpdateSegmentRequest(input) + return out, req.Send() +} + +// UpdateSegmentWithContext is the same as UpdateSegment with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSegment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateSegmentWithContext(ctx aws.Context, input *UpdateSegmentInput, opts ...request.Option) (*UpdateSegmentOutput, error) { + req, out := c.UpdateSegmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSmsChannel = "UpdateSmsChannel" + +// UpdateSmsChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSmsChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSmsChannel for more information on using the UpdateSmsChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSmsChannelRequest method. +// req, resp := client.UpdateSmsChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSmsChannel +func (c *Pinpoint) UpdateSmsChannelRequest(input *UpdateSmsChannelInput) (req *request.Request, output *UpdateSmsChannelOutput) { + op := &request.Operation{ + Name: opUpdateSmsChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/sms", + } + + if input == nil { + input = &UpdateSmsChannelInput{} + } + + output = &UpdateSmsChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSmsChannel API operation for Amazon Pinpoint. +// +// Enables the SMS channel for an application or updates the status and settings +// of the SMS channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateSmsChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSmsChannel +func (c *Pinpoint) UpdateSmsChannel(input *UpdateSmsChannelInput) (*UpdateSmsChannelOutput, error) { + req, out := c.UpdateSmsChannelRequest(input) + return out, req.Send() +} + +// UpdateSmsChannelWithContext is the same as UpdateSmsChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSmsChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateSmsChannelWithContext(ctx aws.Context, input *UpdateSmsChannelInput, opts ...request.Option) (*UpdateSmsChannelOutput, error) { + req, out := c.UpdateSmsChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSmsTemplate = "UpdateSmsTemplate" + +// UpdateSmsTemplateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSmsTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSmsTemplate for more information on using the UpdateSmsTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSmsTemplateRequest method. +// req, resp := client.UpdateSmsTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSmsTemplate +func (c *Pinpoint) UpdateSmsTemplateRequest(input *UpdateSmsTemplateInput) (req *request.Request, output *UpdateSmsTemplateOutput) { + op := &request.Operation{ + Name: opUpdateSmsTemplate, + HTTPMethod: "PUT", + HTTPPath: "/v1/templates/{template-name}/sms", + } + + if input == nil { + input = &UpdateSmsTemplateInput{} + } + + output = &UpdateSmsTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSmsTemplate API operation for Amazon Pinpoint. +// +// Updates an existing message template that you can use in messages that are +// sent through the SMS channel. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateSmsTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateSmsTemplate +func (c *Pinpoint) UpdateSmsTemplate(input *UpdateSmsTemplateInput) (*UpdateSmsTemplateOutput, error) { + req, out := c.UpdateSmsTemplateRequest(input) + return out, req.Send() +} + +// UpdateSmsTemplateWithContext is the same as UpdateSmsTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSmsTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateSmsTemplateWithContext(ctx aws.Context, input *UpdateSmsTemplateInput, opts ...request.Option) (*UpdateSmsTemplateOutput, error) { + req, out := c.UpdateSmsTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateVoiceChannel = "UpdateVoiceChannel" + +// UpdateVoiceChannelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateVoiceChannel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateVoiceChannel for more information on using the UpdateVoiceChannel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateVoiceChannelRequest method. +// req, resp := client.UpdateVoiceChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateVoiceChannel +func (c *Pinpoint) UpdateVoiceChannelRequest(input *UpdateVoiceChannelInput) (req *request.Request, output *UpdateVoiceChannelOutput) { + op := &request.Operation{ + Name: opUpdateVoiceChannel, + HTTPMethod: "PUT", + HTTPPath: "/v1/apps/{application-id}/channels/voice", + } + + if input == nil { + input = &UpdateVoiceChannelInput{} + } + + output = &UpdateVoiceChannelOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateVoiceChannel API operation for Amazon Pinpoint. +// +// Enables the voice channel for an application or updates the status and settings +// of the voice channel for an application. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Pinpoint's +// API operation UpdateVoiceChannel for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// Provides information about an API request or response. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Provides information about an API request or response. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Provides information about an API request or response. +// +// * ErrCodeNotFoundException "NotFoundException" +// Provides information about an API request or response. +// +// * ErrCodeMethodNotAllowedException "MethodNotAllowedException" +// Provides information about an API request or response. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Provides information about an API request or response. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateVoiceChannel +func (c *Pinpoint) UpdateVoiceChannel(input *UpdateVoiceChannelInput) (*UpdateVoiceChannelOutput, error) { + req, out := c.UpdateVoiceChannelRequest(input) + return out, req.Send() +} + +// UpdateVoiceChannelWithContext is the same as UpdateVoiceChannel with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateVoiceChannel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Pinpoint) UpdateVoiceChannelWithContext(ctx aws.Context, input *UpdateVoiceChannelInput, opts ...request.Option) (*UpdateVoiceChannelOutput, error) { + req, out := c.UpdateVoiceChannelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the status and settings of the ADM (Amazon Device Messaging) channel +// for an application. +type ADMChannelRequest struct { + _ struct{} `type:"structure"` + + // The Client ID that you received from Amazon to send messages by using ADM. + // + // ClientId is a required field + ClientId *string `type:"string" required:"true"` + + // The Client Secret that you received from Amazon to send messages by using + // ADM. + // + // ClientSecret is a required field + ClientSecret *string `type:"string" required:"true"` + + // Specifies whether to enable the ADM channel for the application. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ADMChannelRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ADMChannelRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ADMChannelRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ADMChannelRequest"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *ADMChannelRequest) SetClientId(v string) *ADMChannelRequest { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *ADMChannelRequest) SetClientSecret(v string) *ADMChannelRequest { + s.ClientSecret = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *ADMChannelRequest) SetEnabled(v bool) *ADMChannelRequest { + s.Enabled = &v + return s +} + +// Provides information about the status and settings of the ADM (Amazon Device +// Messaging) channel for an application. +type ADMChannelResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application that the ADM channel applies to. + ApplicationId *string `type:"string"` + + // The date and time when the ADM channel was enabled. + CreationDate *string `type:"string"` + + // Specifies whether the ADM channel is enabled for the application. + Enabled *bool `type:"boolean"` + + // (Not used) This property is retained only for backward compatibility. + HasCredential *bool `type:"boolean"` + + // (Deprecated) An identifier for the ADM channel. This property is retained + // only for backward compatibility. + Id *string `type:"string"` + + // Specifies whether the ADM channel is archived. + IsArchived *bool `type:"boolean"` + + // The user who last modified the ADM channel. + LastModifiedBy *string `type:"string"` + + // The date and time when the ADM channel was last modified. + LastModifiedDate *string `type:"string"` + + // The type of messaging or notification platform for the channel. For the ADM + // channel, this value is ADM. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` + + // The current version of the ADM channel. + Version *int64 `type:"integer"` +} + +// String returns the string representation func (s ADMChannelResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ADMChannelResponse) GoString() string { +func (s ADMChannelResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *ADMChannelResponse) SetApplicationId(v string) *ADMChannelResponse { + s.ApplicationId = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *ADMChannelResponse) SetCreationDate(v string) *ADMChannelResponse { + s.CreationDate = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *ADMChannelResponse) SetEnabled(v bool) *ADMChannelResponse { + s.Enabled = &v + return s +} + +// SetHasCredential sets the HasCredential field's value. +func (s *ADMChannelResponse) SetHasCredential(v bool) *ADMChannelResponse { + s.HasCredential = &v + return s +} + +// SetId sets the Id field's value. +func (s *ADMChannelResponse) SetId(v string) *ADMChannelResponse { + s.Id = &v + return s +} + +// SetIsArchived sets the IsArchived field's value. +func (s *ADMChannelResponse) SetIsArchived(v bool) *ADMChannelResponse { + s.IsArchived = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *ADMChannelResponse) SetLastModifiedBy(v string) *ADMChannelResponse { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *ADMChannelResponse) SetLastModifiedDate(v string) *ADMChannelResponse { + s.LastModifiedDate = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ADMChannelResponse) SetPlatform(v string) *ADMChannelResponse { + s.Platform = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *ADMChannelResponse) SetVersion(v int64) *ADMChannelResponse { + s.Version = &v + return s +} + +// Specifies the settings for a one-time message that's sent directly to an +// endpoint through the ADM (Amazon Device Messaging) channel. +type ADMMessage struct { + _ struct{} `type:"structure"` + + // The action to occur if the recipient taps the push notification. Valid values + // are: + // + // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // sent to the background. This is the default action. + // + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This action uses the deep-linking features of the Android + // platform. + // + // * URL - The default mobile browser on the recipient's device opens and + // loads the web page at a URL that you specify. + Action *string `type:"string" enum:"Action"` + + // The body of the notification message. + Body *string `type:"string"` + + // An arbitrary string that indicates that multiple messages are logically the + // same and that Amazon Device Messaging (ADM) can drop previously enqueued + // messages in favor of this message. + ConsolidationKey *string `type:"string"` + + // The JSON data payload to use for the push notification, if the notification + // is a silent push notification. This payload is added to the data.pinpoint.jsonBody + // object of the notification. + Data map[string]*string `type:"map"` + + // The amount of time, in seconds, that ADM should store the message if the + // recipient's device is offline. Amazon Pinpoint specifies this value in the + // expiresAfter parameter when it sends the notification message to ADM. + ExpiresAfter *string `type:"string"` + + // The icon image name of the asset saved in your app. + IconReference *string `type:"string"` + + // The URL of the large icon image to display in the content view of the push + // notification. + ImageIconUrl *string `type:"string"` + + // The URL of an image to display in the push notification. + ImageUrl *string `type:"string"` + + // The base64-encoded, MD5 checksum of the value specified by the Data property. + // ADM uses the MD5 value to verify the integrity of the data. + MD5 *string `type:"string"` + + // The raw, JSON-formatted string to use as the payload for the notification + // message. This value overrides the message. + RawContent *string `type:"string"` + + // Specifies whether the notification is a silent push notification, which is + // a push notification that doesn't display on a recipient's device. Silent + // push notifications can be used for cases such as updating an app's configuration + // or supporting phone home functionality. + SilentPush *bool `type:"boolean"` + + // The URL of the small icon image to display in the status bar and the content + // view of the push notification. + SmallImageIconUrl *string `type:"string"` + + // The sound to play when the recipient receives the push notification. You + // can use the default stream or specify the file name of a sound resource that's + // bundled in your app. On an Android platform, the sound file must reside in + // /res/raw/. + Sound *string `type:"string"` + + // The default message variables to use in the notification message. You can + // override the default variables with individual address variables. + Substitutions map[string][]*string `type:"map"` + + // The title to display above the notification message on the recipient's device. + Title *string `type:"string"` + + // The URL to open in the recipient's default mobile browser, if a recipient + // taps the push notification and the value of the Action property is URL. + Url *string `type:"string"` +} + +// String returns the string representation +func (s ADMMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ADMMessage) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *ADMMessage) SetAction(v string) *ADMMessage { + s.Action = &v + return s +} + +// SetBody sets the Body field's value. +func (s *ADMMessage) SetBody(v string) *ADMMessage { + s.Body = &v + return s +} + +// SetConsolidationKey sets the ConsolidationKey field's value. +func (s *ADMMessage) SetConsolidationKey(v string) *ADMMessage { + s.ConsolidationKey = &v + return s +} + +// SetData sets the Data field's value. +func (s *ADMMessage) SetData(v map[string]*string) *ADMMessage { + s.Data = v + return s +} + +// SetExpiresAfter sets the ExpiresAfter field's value. +func (s *ADMMessage) SetExpiresAfter(v string) *ADMMessage { + s.ExpiresAfter = &v + return s +} + +// SetIconReference sets the IconReference field's value. +func (s *ADMMessage) SetIconReference(v string) *ADMMessage { + s.IconReference = &v + return s +} + +// SetImageIconUrl sets the ImageIconUrl field's value. +func (s *ADMMessage) SetImageIconUrl(v string) *ADMMessage { + s.ImageIconUrl = &v + return s +} + +// SetImageUrl sets the ImageUrl field's value. +func (s *ADMMessage) SetImageUrl(v string) *ADMMessage { + s.ImageUrl = &v + return s +} + +// SetMD5 sets the MD5 field's value. +func (s *ADMMessage) SetMD5(v string) *ADMMessage { + s.MD5 = &v + return s +} + +// SetRawContent sets the RawContent field's value. +func (s *ADMMessage) SetRawContent(v string) *ADMMessage { + s.RawContent = &v + return s +} + +// SetSilentPush sets the SilentPush field's value. +func (s *ADMMessage) SetSilentPush(v bool) *ADMMessage { + s.SilentPush = &v + return s +} + +// SetSmallImageIconUrl sets the SmallImageIconUrl field's value. +func (s *ADMMessage) SetSmallImageIconUrl(v string) *ADMMessage { + s.SmallImageIconUrl = &v + return s +} + +// SetSound sets the Sound field's value. +func (s *ADMMessage) SetSound(v string) *ADMMessage { + s.Sound = &v + return s +} + +// SetSubstitutions sets the Substitutions field's value. +func (s *ADMMessage) SetSubstitutions(v map[string][]*string) *ADMMessage { + s.Substitutions = v + return s +} + +// SetTitle sets the Title field's value. +func (s *ADMMessage) SetTitle(v string) *ADMMessage { + s.Title = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *ADMMessage) SetUrl(v string) *ADMMessage { + s.Url = &v + return s +} + +// Specifies the status and settings of the APNs (Apple Push Notification service) +// channel for an application. +type APNSChannelRequest struct { + _ struct{} `type:"structure"` + + // The bundle identifier that's assigned to your iOS app. This identifier is + // used for APNs tokens. + BundleId *string `type:"string"` + + // The APNs client certificate that you received from Apple, if you want Amazon + // Pinpoint to communicate with APNs by using an APNs certificate. + Certificate *string `type:"string"` + + // The default authentication method that you want Amazon Pinpoint to use when + // authenticating with APNs, key or certificate. + DefaultAuthenticationMethod *string `type:"string"` + + // Specifies whether to enable the APNs channel for the application. + Enabled *bool `type:"boolean"` + + // The private key for the APNs client certificate that you want Amazon Pinpoint + // to use to communicate with APNs. + PrivateKey *string `type:"string"` + + // The identifier that's assigned to your Apple developer account team. This + // identifier is used for APNs tokens. + TeamId *string `type:"string"` + + // The authentication key to use for APNs tokens. + TokenKey *string `type:"string"` + + // The key identifier that's assigned to your APNs signing key, if you want + // Amazon Pinpoint to communicate with APNs by using APNs tokens. + TokenKeyId *string `type:"string"` +} + +// String returns the string representation +func (s APNSChannelRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSChannelRequest) GoString() string { + return s.String() +} + +// SetBundleId sets the BundleId field's value. +func (s *APNSChannelRequest) SetBundleId(v string) *APNSChannelRequest { + s.BundleId = &v + return s +} + +// SetCertificate sets the Certificate field's value. +func (s *APNSChannelRequest) SetCertificate(v string) *APNSChannelRequest { + s.Certificate = &v + return s +} + +// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. +func (s *APNSChannelRequest) SetDefaultAuthenticationMethod(v string) *APNSChannelRequest { + s.DefaultAuthenticationMethod = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *APNSChannelRequest) SetEnabled(v bool) *APNSChannelRequest { + s.Enabled = &v + return s +} + +// SetPrivateKey sets the PrivateKey field's value. +func (s *APNSChannelRequest) SetPrivateKey(v string) *APNSChannelRequest { + s.PrivateKey = &v + return s +} + +// SetTeamId sets the TeamId field's value. +func (s *APNSChannelRequest) SetTeamId(v string) *APNSChannelRequest { + s.TeamId = &v + return s +} + +// SetTokenKey sets the TokenKey field's value. +func (s *APNSChannelRequest) SetTokenKey(v string) *APNSChannelRequest { + s.TokenKey = &v + return s +} + +// SetTokenKeyId sets the TokenKeyId field's value. +func (s *APNSChannelRequest) SetTokenKeyId(v string) *APNSChannelRequest { + s.TokenKeyId = &v + return s +} + +// Provides information about the status and settings of the APNs (Apple Push +// Notification service) channel for an application. +type APNSChannelResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application that the APNs channel applies to. + ApplicationId *string `type:"string"` + + // The date and time when the APNs channel was enabled. + CreationDate *string `type:"string"` + + // The default authentication method that Amazon Pinpoint uses to authenticate + // with APNs for this channel, key or certificate. + DefaultAuthenticationMethod *string `type:"string"` + + // Specifies whether the APNs channel is enabled for the application. + Enabled *bool `type:"boolean"` + + // (Not used) This property is retained only for backward compatibility. + HasCredential *bool `type:"boolean"` + + // Specifies whether the APNs channel is configured to communicate with APNs + // by using APNs tokens. To provide an authentication key for APNs tokens, set + // the TokenKey property of the channel. + HasTokenKey *bool `type:"boolean"` + + // (Deprecated) An identifier for the APNs channel. This property is retained + // only for backward compatibility. + Id *string `type:"string"` + + // Specifies whether the APNs channel is archived. + IsArchived *bool `type:"boolean"` + + // The user who last modified the APNs channel. + LastModifiedBy *string `type:"string"` + + // The date and time when the APNs channel was last modified. + LastModifiedDate *string `type:"string"` + + // The type of messaging or notification platform for the channel. For the APNs + // channel, this value is APNS. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` + + // The current version of the APNs channel. + Version *int64 `type:"integer"` +} + +// String returns the string representation +func (s APNSChannelResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSChannelResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *APNSChannelResponse) SetApplicationId(v string) *APNSChannelResponse { + s.ApplicationId = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *APNSChannelResponse) SetCreationDate(v string) *APNSChannelResponse { + s.CreationDate = &v + return s +} + +// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. +func (s *APNSChannelResponse) SetDefaultAuthenticationMethod(v string) *APNSChannelResponse { + s.DefaultAuthenticationMethod = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *APNSChannelResponse) SetEnabled(v bool) *APNSChannelResponse { + s.Enabled = &v + return s +} + +// SetHasCredential sets the HasCredential field's value. +func (s *APNSChannelResponse) SetHasCredential(v bool) *APNSChannelResponse { + s.HasCredential = &v + return s +} + +// SetHasTokenKey sets the HasTokenKey field's value. +func (s *APNSChannelResponse) SetHasTokenKey(v bool) *APNSChannelResponse { + s.HasTokenKey = &v + return s +} + +// SetId sets the Id field's value. +func (s *APNSChannelResponse) SetId(v string) *APNSChannelResponse { + s.Id = &v + return s +} + +// SetIsArchived sets the IsArchived field's value. +func (s *APNSChannelResponse) SetIsArchived(v bool) *APNSChannelResponse { + s.IsArchived = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *APNSChannelResponse) SetLastModifiedBy(v string) *APNSChannelResponse { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *APNSChannelResponse) SetLastModifiedDate(v string) *APNSChannelResponse { + s.LastModifiedDate = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *APNSChannelResponse) SetPlatform(v string) *APNSChannelResponse { + s.Platform = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *APNSChannelResponse) SetVersion(v int64) *APNSChannelResponse { + s.Version = &v + return s +} + +// Specifies the settings for a one-time message that's sent directly to an +// endpoint through the APNs (Apple Push Notification service) channel. +type APNSMessage struct { + _ struct{} `type:"structure"` + + // The type of push notification to send. Valid values are: + // + // * alert - For a standard notification that's displayed on recipients' + // devices and prompts a recipient to interact with the notification. + // + // * background - For a silent notification that delivers content in the + // background and isn't displayed on recipients' devices. + // + // * complication - For a notification that contains update information for + // an app’s complication timeline. + // + // * fileprovider - For a notification that signals changes to a File Provider + // extension. + // + // * mdm - For a notification that tells managed devices to contact the MDM + // server. + // + // * voip - For a notification that provides information about an incoming + // VoIP call. + // + // Amazon Pinpoint specifies this value in the apns-push-type request header + // when it sends the notification message to APNs. If you don't specify a value + // for this property, Amazon Pinpoint sets the value to alert or background + // automatically, based on the value that you specify for the SilentPush or + // RawContent property of the message. + // + // For more information about the apns-push-type request header, see Sending + // Notification Requests to APNs (https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/sending_notification_requests_to_apns) + // on the Apple Developer website. + APNSPushType *string `type:"string"` + + // The action to occur if the recipient taps the push notification. Valid values + // are: + // + // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // sent to the background. This is the default action. + // + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This setting uses the deep-linking features of the iOS platform. + // + // * URL - The default mobile browser on the recipient's device opens and + // loads the web page at a URL that you specify. + Action *string `type:"string" enum:"Action"` + + // The key that indicates whether and how to modify the badge of your app's + // icon when the recipient receives the push notification. If this key isn't + // included in the dictionary, the badge doesn't change. To remove the badge, + // set this value to 0. + Badge *int64 `type:"integer"` + + // The body of the notification message. + Body *string `type:"string"` + + // The key that indicates the notification type for the push notification. This + // key is a value that's defined by the identifier property of one of your app's + // registered categories. + Category *string `type:"string"` + + // An arbitrary identifier that, if assigned to multiple messages, APNs uses + // to coalesce the messages into a single push notification instead of delivering + // each message individually. This value can't exceed 64 bytes. + // + // Amazon Pinpoint specifies this value in the apns-collapse-id request header + // when it sends the notification message to APNs. + CollapseId *string `type:"string"` + + // The JSON payload to use for a silent push notification. This payload is added + // to the data.pinpoint.jsonBody object of the notification. + Data map[string]*string `type:"map"` + + // The URL of an image or video to display in the push notification. + MediaUrl *string `type:"string"` + + // The authentication method that you want Amazon Pinpoint to use when authenticating + // with APNs, CERTIFICATE or TOKEN. + PreferredAuthenticationMethod *string `type:"string"` + + // para>5 - Low priority, the notification might be delayed, delivered as part + // of a group, or throttled. + // /listitem> + // 10 - High priority, the notification is sent immediately. This is the default + // value. A high priority notification should trigger an alert, play a sound, + // or badge your app's icon on the recipient's device. + // /para> + // Amazon Pinpoint specifies this value in the apns-priority request header + // when it sends the notification message to APNs. + // + // The equivalent values for Firebase Cloud Messaging (FCM), formerly Google + // Cloud Messaging (GCM), are normal, for 5, and high, for 10. If you specify + // an FCM value for this property, Amazon Pinpoint accepts and converts the + // value to the corresponding APNs value. + Priority *string `type:"string"` + + // The raw, JSON-formatted string to use as the payload for the notification + // message. This value overrides all other content for the message. + // + // If you specify the raw content of an APNs push notification, the message + // payload has to include the content-available key. The value of the content-available + // key has to be an integer, and can only be 0 or 1. If you're sending a standard + // notification, set the value of content-available to 0. If you're sending + // a silent (background) notification, set the value of content-available to + // 1. Additionally, silent notification payloads can't include the alert, badge, + // or sound keys. For more information, see Generating a Remote Notification + // (https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/generating_a_remote_notification) + // and Pushing Background Updates to Your App (https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/pushing_background_updates_to_your_app) + // on the Apple Developer website. + RawContent *string `type:"string"` + + // Specifies whether the notification is a silent push notification. A silent + // (or background) push notification isn't displayed on recipients' devices. + // You can use silent push notifications to make small updates to your app, + // or to display messages in an in-app message center. + // + // Amazon Pinpoint uses this property to determine the correct value for the + // apns-push-type request header when it sends the notification message to APNs. + // If you specify a value of true for this property, Amazon Pinpoint sets the + // value for the apns-push-type header field to background. + // + // If you specify the raw content of an APNs push notification, the message + // payload has to include the content-available key. For silent (background) + // notifications, set the value of content-available to 1. Additionally, the + // message payload for a silent notification can't include the alert, badge, + // or sound keys. For more information, see Generating a Remote Notification + // (https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/generating_a_remote_notification) + // and Pushing Background Updates to Your App (https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/pushing_background_updates_to_your_app) + // on the Apple Developer website. + // + // Apple has indicated that they will throttle "excessive" background notifications + // based on current traffic volumes. To prevent your notifications being throttled, + // Apple recommends that you send no more than 3 silent push notifications to + // each recipient per hour. + SilentPush *bool `type:"boolean"` + + // The key for the sound to play when the recipient receives the push notification. + // The value for this key is the name of a sound file in your app's main bundle + // or the Library/Sounds folder in your app's data container. If the sound file + // can't be found or you specify default for the value, the system plays the + // default alert sound. + Sound *string `type:"string"` + + // The default message variables to use in the notification message. You can + // override these default variables with individual address variables. + Substitutions map[string][]*string `type:"map"` + + // The key that represents your app-specific identifier for grouping notifications. + // If you provide a Notification Content app extension, you can use this value + // to group your notifications together. + ThreadId *string `type:"string"` + + // The amount of time, in seconds, that APNs should store and attempt to deliver + // the push notification, if the service is unable to deliver the notification + // the first time. If this value is 0, APNs treats the notification as if it + // expires immediately and the service doesn't store or try to deliver the notification + // again. + // + // Amazon Pinpoint specifies this value in the apns-expiration request header + // when it sends the notification message to APNs. + TimeToLive *int64 `type:"integer"` + + // The title to display above the notification message on the recipient's device. + Title *string `type:"string"` + + // The URL to open in the recipient's default mobile browser, if a recipient + // taps the push notification and the value of the Action property is URL. + Url *string `type:"string"` +} + +// String returns the string representation +func (s APNSMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSMessage) GoString() string { + return s.String() +} + +// SetAPNSPushType sets the APNSPushType field's value. +func (s *APNSMessage) SetAPNSPushType(v string) *APNSMessage { + s.APNSPushType = &v + return s +} + +// SetAction sets the Action field's value. +func (s *APNSMessage) SetAction(v string) *APNSMessage { + s.Action = &v + return s +} + +// SetBadge sets the Badge field's value. +func (s *APNSMessage) SetBadge(v int64) *APNSMessage { + s.Badge = &v + return s +} + +// SetBody sets the Body field's value. +func (s *APNSMessage) SetBody(v string) *APNSMessage { + s.Body = &v + return s +} + +// SetCategory sets the Category field's value. +func (s *APNSMessage) SetCategory(v string) *APNSMessage { + s.Category = &v + return s +} + +// SetCollapseId sets the CollapseId field's value. +func (s *APNSMessage) SetCollapseId(v string) *APNSMessage { + s.CollapseId = &v + return s +} + +// SetData sets the Data field's value. +func (s *APNSMessage) SetData(v map[string]*string) *APNSMessage { + s.Data = v + return s +} + +// SetMediaUrl sets the MediaUrl field's value. +func (s *APNSMessage) SetMediaUrl(v string) *APNSMessage { + s.MediaUrl = &v + return s +} + +// SetPreferredAuthenticationMethod sets the PreferredAuthenticationMethod field's value. +func (s *APNSMessage) SetPreferredAuthenticationMethod(v string) *APNSMessage { + s.PreferredAuthenticationMethod = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *APNSMessage) SetPriority(v string) *APNSMessage { + s.Priority = &v + return s +} + +// SetRawContent sets the RawContent field's value. +func (s *APNSMessage) SetRawContent(v string) *APNSMessage { + s.RawContent = &v + return s +} + +// SetSilentPush sets the SilentPush field's value. +func (s *APNSMessage) SetSilentPush(v bool) *APNSMessage { + s.SilentPush = &v + return s +} + +// SetSound sets the Sound field's value. +func (s *APNSMessage) SetSound(v string) *APNSMessage { + s.Sound = &v + return s +} + +// SetSubstitutions sets the Substitutions field's value. +func (s *APNSMessage) SetSubstitutions(v map[string][]*string) *APNSMessage { + s.Substitutions = v + return s +} + +// SetThreadId sets the ThreadId field's value. +func (s *APNSMessage) SetThreadId(v string) *APNSMessage { + s.ThreadId = &v + return s +} + +// SetTimeToLive sets the TimeToLive field's value. +func (s *APNSMessage) SetTimeToLive(v int64) *APNSMessage { + s.TimeToLive = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *APNSMessage) SetTitle(v string) *APNSMessage { + s.Title = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *APNSMessage) SetUrl(v string) *APNSMessage { + s.Url = &v + return s +} + +// Specifies channel-specific content and settings for a message template that +// can be used in push notifications that are sent through the APNs (Apple Push +// Notification service) channel. +type APNSPushNotificationTemplate struct { + _ struct{} `type:"structure"` + + // The action to occur if a recipient taps a push notification that's based + // on the message template. Valid values are: + // + // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // sent to the background. This is the default action. + // + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This setting uses the deep-linking features of the iOS platform. + // + // * URL - The default mobile browser on the recipient's device opens and + // loads the web page at a URL that you specify. + Action *string `type:"string" enum:"Action"` + + // The message body to use in push notifications that are based on the message + // template. + Body *string `type:"string"` + + // The URL of an image or video to display in push notifications that are based + // on the message template. + MediaUrl *string `type:"string"` + + // The key for the sound to play when the recipient receives a push notification + // that's based on the message template. The value for this key is the name + // of a sound file in your app's main bundle or the Library/Sounds folder in + // your app's data container. If the sound file can't be found or you specify + // default for the value, the system plays the default alert sound. + Sound *string `type:"string"` + + // The title to use in push notifications that are based on the message template. + // This title appears above the notification message on a recipient's device. + Title *string `type:"string"` + + // The URL to open in the recipient's default mobile browser, if a recipient + // taps a push notification that's based on the message template and the value + // of the Action property is URL. + Url *string `type:"string"` +} + +// String returns the string representation +func (s APNSPushNotificationTemplate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSPushNotificationTemplate) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *APNSPushNotificationTemplate) SetAction(v string) *APNSPushNotificationTemplate { + s.Action = &v + return s +} + +// SetBody sets the Body field's value. +func (s *APNSPushNotificationTemplate) SetBody(v string) *APNSPushNotificationTemplate { + s.Body = &v + return s +} + +// SetMediaUrl sets the MediaUrl field's value. +func (s *APNSPushNotificationTemplate) SetMediaUrl(v string) *APNSPushNotificationTemplate { + s.MediaUrl = &v + return s +} + +// SetSound sets the Sound field's value. +func (s *APNSPushNotificationTemplate) SetSound(v string) *APNSPushNotificationTemplate { + s.Sound = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *APNSPushNotificationTemplate) SetTitle(v string) *APNSPushNotificationTemplate { + s.Title = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *APNSPushNotificationTemplate) SetUrl(v string) *APNSPushNotificationTemplate { + s.Url = &v + return s +} + +// Specifies the status and settings of the APNs (Apple Push Notification service) +// sandbox channel for an application. +type APNSSandboxChannelRequest struct { + _ struct{} `type:"structure"` + + // The bundle identifier that's assigned to your iOS app. This identifier is + // used for APNs tokens. + BundleId *string `type:"string"` + + // The APNs client certificate that you received from Apple, if you want Amazon + // Pinpoint to communicate with the APNs sandbox environment by using an APNs + // certificate. + Certificate *string `type:"string"` + + // The default authentication method that you want Amazon Pinpoint to use when + // authenticating with the APNs sandbox environment, key or certificate. + DefaultAuthenticationMethod *string `type:"string"` + + // Specifies whether to enable the APNs sandbox channel for the application. + Enabled *bool `type:"boolean"` + + // The private key for the APNs client certificate that you want Amazon Pinpoint + // to use to communicate with the APNs sandbox environment. + PrivateKey *string `type:"string"` + + // The identifier that's assigned to your Apple developer account team. This + // identifier is used for APNs tokens. + TeamId *string `type:"string"` + + // The authentication key to use for APNs tokens. + TokenKey *string `type:"string"` + + // The key identifier that's assigned to your APNs signing key, if you want + // Amazon Pinpoint to communicate with the APNs sandbox environment by using + // APNs tokens. + TokenKeyId *string `type:"string"` +} + +// String returns the string representation +func (s APNSSandboxChannelRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSSandboxChannelRequest) GoString() string { + return s.String() +} + +// SetBundleId sets the BundleId field's value. +func (s *APNSSandboxChannelRequest) SetBundleId(v string) *APNSSandboxChannelRequest { + s.BundleId = &v + return s +} + +// SetCertificate sets the Certificate field's value. +func (s *APNSSandboxChannelRequest) SetCertificate(v string) *APNSSandboxChannelRequest { + s.Certificate = &v + return s +} + +// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. +func (s *APNSSandboxChannelRequest) SetDefaultAuthenticationMethod(v string) *APNSSandboxChannelRequest { + s.DefaultAuthenticationMethod = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *APNSSandboxChannelRequest) SetEnabled(v bool) *APNSSandboxChannelRequest { + s.Enabled = &v + return s +} + +// SetPrivateKey sets the PrivateKey field's value. +func (s *APNSSandboxChannelRequest) SetPrivateKey(v string) *APNSSandboxChannelRequest { + s.PrivateKey = &v + return s +} + +// SetTeamId sets the TeamId field's value. +func (s *APNSSandboxChannelRequest) SetTeamId(v string) *APNSSandboxChannelRequest { + s.TeamId = &v + return s +} + +// SetTokenKey sets the TokenKey field's value. +func (s *APNSSandboxChannelRequest) SetTokenKey(v string) *APNSSandboxChannelRequest { + s.TokenKey = &v + return s +} + +// SetTokenKeyId sets the TokenKeyId field's value. +func (s *APNSSandboxChannelRequest) SetTokenKeyId(v string) *APNSSandboxChannelRequest { + s.TokenKeyId = &v + return s +} + +// Provides information about the status and settings of the APNs (Apple Push +// Notification service) sandbox channel for an application. +type APNSSandboxChannelResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application that the APNs sandbox channel applies + // to. + ApplicationId *string `type:"string"` + + // The date and time when the APNs sandbox channel was enabled. + CreationDate *string `type:"string"` + + // The default authentication method that Amazon Pinpoint uses to authenticate + // with the APNs sandbox environment for this channel, key or certificate. + DefaultAuthenticationMethod *string `type:"string"` + + // Specifies whether the APNs sandbox channel is enabled for the application. + Enabled *bool `type:"boolean"` + + // (Not used) This property is retained only for backward compatibility. + HasCredential *bool `type:"boolean"` + + // Specifies whether the APNs sandbox channel is configured to communicate with + // APNs by using APNs tokens. To provide an authentication key for APNs tokens, + // set the TokenKey property of the channel. + HasTokenKey *bool `type:"boolean"` + + // (Deprecated) An identifier for the APNs sandbox channel. This property is + // retained only for backward compatibility. + Id *string `type:"string"` + + // Specifies whether the APNs sandbox channel is archived. + IsArchived *bool `type:"boolean"` + + // The user who last modified the APNs sandbox channel. + LastModifiedBy *string `type:"string"` + + // The date and time when the APNs sandbox channel was last modified. + LastModifiedDate *string `type:"string"` + + // The type of messaging or notification platform for the channel. For the APNs + // sandbox channel, this value is APNS_SANDBOX. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` + + // The current version of the APNs sandbox channel. + Version *int64 `type:"integer"` +} + +// String returns the string representation +func (s APNSSandboxChannelResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSSandboxChannelResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *APNSSandboxChannelResponse) SetApplicationId(v string) *APNSSandboxChannelResponse { + s.ApplicationId = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *APNSSandboxChannelResponse) SetCreationDate(v string) *APNSSandboxChannelResponse { + s.CreationDate = &v + return s +} + +// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. +func (s *APNSSandboxChannelResponse) SetDefaultAuthenticationMethod(v string) *APNSSandboxChannelResponse { + s.DefaultAuthenticationMethod = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *APNSSandboxChannelResponse) SetEnabled(v bool) *APNSSandboxChannelResponse { + s.Enabled = &v + return s +} + +// SetHasCredential sets the HasCredential field's value. +func (s *APNSSandboxChannelResponse) SetHasCredential(v bool) *APNSSandboxChannelResponse { + s.HasCredential = &v + return s +} + +// SetHasTokenKey sets the HasTokenKey field's value. +func (s *APNSSandboxChannelResponse) SetHasTokenKey(v bool) *APNSSandboxChannelResponse { + s.HasTokenKey = &v + return s +} + +// SetId sets the Id field's value. +func (s *APNSSandboxChannelResponse) SetId(v string) *APNSSandboxChannelResponse { + s.Id = &v + return s +} + +// SetIsArchived sets the IsArchived field's value. +func (s *APNSSandboxChannelResponse) SetIsArchived(v bool) *APNSSandboxChannelResponse { + s.IsArchived = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *APNSSandboxChannelResponse) SetLastModifiedBy(v string) *APNSSandboxChannelResponse { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *APNSSandboxChannelResponse) SetLastModifiedDate(v string) *APNSSandboxChannelResponse { + s.LastModifiedDate = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *APNSSandboxChannelResponse) SetPlatform(v string) *APNSSandboxChannelResponse { + s.Platform = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *APNSSandboxChannelResponse) SetVersion(v int64) *APNSSandboxChannelResponse { + s.Version = &v + return s +} + +// Specifies the status and settings of the APNs (Apple Push Notification service) +// VoIP channel for an application. +type APNSVoipChannelRequest struct { + _ struct{} `type:"structure"` + + // The bundle identifier that's assigned to your iOS app. This identifier is + // used for APNs tokens. + BundleId *string `type:"string"` + + // The APNs client certificate that you received from Apple, if you want Amazon + // Pinpoint to communicate with APNs by using an APNs certificate. + Certificate *string `type:"string"` + + // The default authentication method that you want Amazon Pinpoint to use when + // authenticating with APNs, key or certificate. + DefaultAuthenticationMethod *string `type:"string"` + + // Specifies whether to enable the APNs VoIP channel for the application. + Enabled *bool `type:"boolean"` + + // The private key for the APNs client certificate that you want Amazon Pinpoint + // to use to communicate with APNs. + PrivateKey *string `type:"string"` + + // The identifier that's assigned to your Apple developer account team. This + // identifier is used for APNs tokens. + TeamId *string `type:"string"` + + // The authentication key to use for APNs tokens. + TokenKey *string `type:"string"` + + // The key identifier that's assigned to your APNs signing key, if you want + // Amazon Pinpoint to communicate with APNs by using APNs tokens. + TokenKeyId *string `type:"string"` +} + +// String returns the string representation +func (s APNSVoipChannelRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSVoipChannelRequest) GoString() string { + return s.String() +} + +// SetBundleId sets the BundleId field's value. +func (s *APNSVoipChannelRequest) SetBundleId(v string) *APNSVoipChannelRequest { + s.BundleId = &v + return s +} + +// SetCertificate sets the Certificate field's value. +func (s *APNSVoipChannelRequest) SetCertificate(v string) *APNSVoipChannelRequest { + s.Certificate = &v + return s +} + +// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. +func (s *APNSVoipChannelRequest) SetDefaultAuthenticationMethod(v string) *APNSVoipChannelRequest { + s.DefaultAuthenticationMethod = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *APNSVoipChannelRequest) SetEnabled(v bool) *APNSVoipChannelRequest { + s.Enabled = &v + return s +} + +// SetPrivateKey sets the PrivateKey field's value. +func (s *APNSVoipChannelRequest) SetPrivateKey(v string) *APNSVoipChannelRequest { + s.PrivateKey = &v + return s +} + +// SetTeamId sets the TeamId field's value. +func (s *APNSVoipChannelRequest) SetTeamId(v string) *APNSVoipChannelRequest { + s.TeamId = &v + return s +} + +// SetTokenKey sets the TokenKey field's value. +func (s *APNSVoipChannelRequest) SetTokenKey(v string) *APNSVoipChannelRequest { + s.TokenKey = &v + return s +} + +// SetTokenKeyId sets the TokenKeyId field's value. +func (s *APNSVoipChannelRequest) SetTokenKeyId(v string) *APNSVoipChannelRequest { + s.TokenKeyId = &v + return s +} + +// Provides information about the status and settings of the APNs (Apple Push +// Notification service) VoIP channel for an application. +type APNSVoipChannelResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application that the APNs VoIP channel applies + // to. + ApplicationId *string `type:"string"` + + // The date and time when the APNs VoIP channel was enabled. + CreationDate *string `type:"string"` + + // The default authentication method that Amazon Pinpoint uses to authenticate + // with APNs for this channel, key or certificate. + DefaultAuthenticationMethod *string `type:"string"` + + // Specifies whether the APNs VoIP channel is enabled for the application. + Enabled *bool `type:"boolean"` + + // (Not used) This property is retained only for backward compatibility. + HasCredential *bool `type:"boolean"` + + // Specifies whether the APNs VoIP channel is configured to communicate with + // APNs by using APNs tokens. To provide an authentication key for APNs tokens, + // set the TokenKey property of the channel. + HasTokenKey *bool `type:"boolean"` + + // (Deprecated) An identifier for the APNs VoIP channel. This property is retained + // only for backward compatibility. + Id *string `type:"string"` + + // Specifies whether the APNs VoIP channel is archived. + IsArchived *bool `type:"boolean"` + + // The user who last modified the APNs VoIP channel. + LastModifiedBy *string `type:"string"` + + // The date and time when the APNs VoIP channel was last modified. + LastModifiedDate *string `type:"string"` + + // The type of messaging or notification platform for the channel. For the APNs + // VoIP channel, this value is APNS_VOIP. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` + + // The current version of the APNs VoIP channel. + Version *int64 `type:"integer"` +} + +// String returns the string representation +func (s APNSVoipChannelResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSVoipChannelResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *APNSVoipChannelResponse) SetApplicationId(v string) *APNSVoipChannelResponse { + s.ApplicationId = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *APNSVoipChannelResponse) SetCreationDate(v string) *APNSVoipChannelResponse { + s.CreationDate = &v + return s +} + +// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. +func (s *APNSVoipChannelResponse) SetDefaultAuthenticationMethod(v string) *APNSVoipChannelResponse { + s.DefaultAuthenticationMethod = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *APNSVoipChannelResponse) SetEnabled(v bool) *APNSVoipChannelResponse { + s.Enabled = &v + return s +} + +// SetHasCredential sets the HasCredential field's value. +func (s *APNSVoipChannelResponse) SetHasCredential(v bool) *APNSVoipChannelResponse { + s.HasCredential = &v + return s +} + +// SetHasTokenKey sets the HasTokenKey field's value. +func (s *APNSVoipChannelResponse) SetHasTokenKey(v bool) *APNSVoipChannelResponse { + s.HasTokenKey = &v + return s +} + +// SetId sets the Id field's value. +func (s *APNSVoipChannelResponse) SetId(v string) *APNSVoipChannelResponse { + s.Id = &v + return s +} + +// SetIsArchived sets the IsArchived field's value. +func (s *APNSVoipChannelResponse) SetIsArchived(v bool) *APNSVoipChannelResponse { + s.IsArchived = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *APNSVoipChannelResponse) SetLastModifiedBy(v string) *APNSVoipChannelResponse { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *APNSVoipChannelResponse) SetLastModifiedDate(v string) *APNSVoipChannelResponse { + s.LastModifiedDate = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *APNSVoipChannelResponse) SetPlatform(v string) *APNSVoipChannelResponse { + s.Platform = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *APNSVoipChannelResponse) SetVersion(v int64) *APNSVoipChannelResponse { + s.Version = &v + return s +} + +// Specifies the status and settings of the APNs (Apple Push Notification service) +// VoIP sandbox channel for an application. +type APNSVoipSandboxChannelRequest struct { + _ struct{} `type:"structure"` + + // The bundle identifier that's assigned to your iOS app. This identifier is + // used for APNs tokens. + BundleId *string `type:"string"` + + // The APNs client certificate that you received from Apple, if you want Amazon + // Pinpoint to communicate with the APNs sandbox environment by using an APNs + // certificate. + Certificate *string `type:"string"` + + // The default authentication method that you want Amazon Pinpoint to use when + // authenticating with the APNs sandbox environment for this channel, key or + // certificate. + DefaultAuthenticationMethod *string `type:"string"` + + // Specifies whether the APNs VoIP sandbox channel is enabled for the application. + Enabled *bool `type:"boolean"` + + // The private key for the APNs client certificate that you want Amazon Pinpoint + // to use to communicate with the APNs sandbox environment. + PrivateKey *string `type:"string"` + + // The identifier that's assigned to your Apple developer account team. This + // identifier is used for APNs tokens. + TeamId *string `type:"string"` + + // The authentication key to use for APNs tokens. + TokenKey *string `type:"string"` + + // The key identifier that's assigned to your APNs signing key, if you want + // Amazon Pinpoint to communicate with the APNs sandbox environment by using + // APNs tokens. + TokenKeyId *string `type:"string"` +} + +// String returns the string representation +func (s APNSVoipSandboxChannelRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSVoipSandboxChannelRequest) GoString() string { + return s.String() +} + +// SetBundleId sets the BundleId field's value. +func (s *APNSVoipSandboxChannelRequest) SetBundleId(v string) *APNSVoipSandboxChannelRequest { + s.BundleId = &v + return s +} + +// SetCertificate sets the Certificate field's value. +func (s *APNSVoipSandboxChannelRequest) SetCertificate(v string) *APNSVoipSandboxChannelRequest { + s.Certificate = &v + return s +} + +// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. +func (s *APNSVoipSandboxChannelRequest) SetDefaultAuthenticationMethod(v string) *APNSVoipSandboxChannelRequest { + s.DefaultAuthenticationMethod = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *APNSVoipSandboxChannelRequest) SetEnabled(v bool) *APNSVoipSandboxChannelRequest { + s.Enabled = &v + return s +} + +// SetPrivateKey sets the PrivateKey field's value. +func (s *APNSVoipSandboxChannelRequest) SetPrivateKey(v string) *APNSVoipSandboxChannelRequest { + s.PrivateKey = &v + return s +} + +// SetTeamId sets the TeamId field's value. +func (s *APNSVoipSandboxChannelRequest) SetTeamId(v string) *APNSVoipSandboxChannelRequest { + s.TeamId = &v + return s +} + +// SetTokenKey sets the TokenKey field's value. +func (s *APNSVoipSandboxChannelRequest) SetTokenKey(v string) *APNSVoipSandboxChannelRequest { + s.TokenKey = &v + return s +} + +// SetTokenKeyId sets the TokenKeyId field's value. +func (s *APNSVoipSandboxChannelRequest) SetTokenKeyId(v string) *APNSVoipSandboxChannelRequest { + s.TokenKeyId = &v + return s +} + +// Provides information about the status and settings of the APNs (Apple Push +// Notification service) VoIP sandbox channel for an application. +type APNSVoipSandboxChannelResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application that the APNs VoIP sandbox channel + // applies to. + ApplicationId *string `type:"string"` + + // The date and time when the APNs VoIP sandbox channel was enabled. + CreationDate *string `type:"string"` + + // The default authentication method that Amazon Pinpoint uses to authenticate + // with the APNs sandbox environment for this channel, key or certificate. + DefaultAuthenticationMethod *string `type:"string"` + + // Specifies whether the APNs VoIP sandbox channel is enabled for the application. + Enabled *bool `type:"boolean"` + + // (Not used) This property is retained only for backward compatibility. + HasCredential *bool `type:"boolean"` + + // Specifies whether the APNs VoIP sandbox channel is configured to communicate + // with APNs by using APNs tokens. To provide an authentication key for APNs + // tokens, set the TokenKey property of the channel. + HasTokenKey *bool `type:"boolean"` + + // (Deprecated) An identifier for the APNs VoIP sandbox channel. This property + // is retained only for backward compatibility. + Id *string `type:"string"` + + // Specifies whether the APNs VoIP sandbox channel is archived. + IsArchived *bool `type:"boolean"` + + // The user who last modified the APNs VoIP sandbox channel. + LastModifiedBy *string `type:"string"` + + // The date and time when the APNs VoIP sandbox channel was last modified. + LastModifiedDate *string `type:"string"` + + // The type of messaging or notification platform for the channel. For the APNs + // VoIP sandbox channel, this value is APNS_VOIP_SANDBOX. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` + + // The current version of the APNs VoIP sandbox channel. + Version *int64 `type:"integer"` +} + +// String returns the string representation +func (s APNSVoipSandboxChannelResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s APNSVoipSandboxChannelResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *APNSVoipSandboxChannelResponse) SetApplicationId(v string) *APNSVoipSandboxChannelResponse { + s.ApplicationId = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *APNSVoipSandboxChannelResponse) SetCreationDate(v string) *APNSVoipSandboxChannelResponse { + s.CreationDate = &v + return s +} + +// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. +func (s *APNSVoipSandboxChannelResponse) SetDefaultAuthenticationMethod(v string) *APNSVoipSandboxChannelResponse { + s.DefaultAuthenticationMethod = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *APNSVoipSandboxChannelResponse) SetEnabled(v bool) *APNSVoipSandboxChannelResponse { + s.Enabled = &v + return s +} + +// SetHasCredential sets the HasCredential field's value. +func (s *APNSVoipSandboxChannelResponse) SetHasCredential(v bool) *APNSVoipSandboxChannelResponse { + s.HasCredential = &v + return s +} + +// SetHasTokenKey sets the HasTokenKey field's value. +func (s *APNSVoipSandboxChannelResponse) SetHasTokenKey(v bool) *APNSVoipSandboxChannelResponse { + s.HasTokenKey = &v + return s +} + +// SetId sets the Id field's value. +func (s *APNSVoipSandboxChannelResponse) SetId(v string) *APNSVoipSandboxChannelResponse { + s.Id = &v + return s +} + +// SetIsArchived sets the IsArchived field's value. +func (s *APNSVoipSandboxChannelResponse) SetIsArchived(v bool) *APNSVoipSandboxChannelResponse { + s.IsArchived = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *APNSVoipSandboxChannelResponse) SetLastModifiedBy(v string) *APNSVoipSandboxChannelResponse { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *APNSVoipSandboxChannelResponse) SetLastModifiedDate(v string) *APNSVoipSandboxChannelResponse { + s.LastModifiedDate = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *APNSVoipSandboxChannelResponse) SetPlatform(v string) *APNSVoipSandboxChannelResponse { + s.Platform = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *APNSVoipSandboxChannelResponse) SetVersion(v int64) *APNSVoipSandboxChannelResponse { + s.Version = &v + return s +} + +// Provides information about the activities that were performed by a campaign. +type ActivitiesResponse struct { + _ struct{} `type:"structure"` + + // An array of responses, one for each activity that was performed by the campaign. + // + // Item is a required field + Item []*ActivityResponse `type:"list" required:"true"` + + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null if there are no additional pages. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ActivitiesResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivitiesResponse) GoString() string { + return s.String() +} + +// SetItem sets the Item field's value. +func (s *ActivitiesResponse) SetItem(v []*ActivityResponse) *ActivitiesResponse { + s.Item = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ActivitiesResponse) SetNextToken(v string) *ActivitiesResponse { + s.NextToken = &v + return s +} + +// Specifies the configuration and other settings for an activity in a journey. +type Activity struct { + _ struct{} `type:"structure"` + + // The settings for a yes/no split activity. This type of activity sends participants + // down one of two paths in a journey, based on conditions that you specify. + ConditionalSplit *ConditionalSplitActivity `type:"structure"` + + // The custom description of the activity. + Description *string `type:"string"` + + // The settings for an email activity. This type of activity sends an email + // message to participants. + EMAIL *EmailMessageActivity `type:"structure"` + + // The settings for a holdout activity. This type of activity stops a journey + // for a specified percentage of participants. + Holdout *HoldoutActivity `type:"structure"` + + // The settings for a multivariate split activity. This type of activity sends + // participants down one of as many as five paths in a journey, based on conditions + // that you specify. + MultiCondition *MultiConditionalSplitActivity `type:"structure"` + + // The settings for a random split activity. This type of activity randomly + // sends specified percentages of participants down one of as many as five paths + // in a journey, based on conditions that you specify. + RandomSplit *RandomSplitActivity `type:"structure"` + + // The settings for a wait activity. This type of activity waits for a certain + // amount of time or until a specific date and time before moving participants + // to the next activity in a journey. + Wait *WaitActivity `type:"structure"` +} + +// String returns the string representation +func (s Activity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Activity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Activity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Activity"} + if s.ConditionalSplit != nil { + if err := s.ConditionalSplit.Validate(); err != nil { + invalidParams.AddNested("ConditionalSplit", err.(request.ErrInvalidParams)) + } + } + if s.Holdout != nil { + if err := s.Holdout.Validate(); err != nil { + invalidParams.AddNested("Holdout", err.(request.ErrInvalidParams)) + } + } + if s.MultiCondition != nil { + if err := s.MultiCondition.Validate(); err != nil { + invalidParams.AddNested("MultiCondition", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionalSplit sets the ConditionalSplit field's value. +func (s *Activity) SetConditionalSplit(v *ConditionalSplitActivity) *Activity { + s.ConditionalSplit = v + return s +} + +// SetDescription sets the Description field's value. +func (s *Activity) SetDescription(v string) *Activity { + s.Description = &v + return s +} + +// SetEMAIL sets the EMAIL field's value. +func (s *Activity) SetEMAIL(v *EmailMessageActivity) *Activity { + s.EMAIL = v + return s +} + +// SetHoldout sets the Holdout field's value. +func (s *Activity) SetHoldout(v *HoldoutActivity) *Activity { + s.Holdout = v + return s +} + +// SetMultiCondition sets the MultiCondition field's value. +func (s *Activity) SetMultiCondition(v *MultiConditionalSplitActivity) *Activity { + s.MultiCondition = v + return s +} + +// SetRandomSplit sets the RandomSplit field's value. +func (s *Activity) SetRandomSplit(v *RandomSplitActivity) *Activity { + s.RandomSplit = v + return s +} + +// SetWait sets the Wait field's value. +func (s *Activity) SetWait(v *WaitActivity) *Activity { + s.Wait = v + return s +} + +// Provides information about an activity that was performed by a campaign. +type ActivityResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application that the campaign applies to. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` + + // The unique identifier for the campaign that the activity applies to. + // + // CampaignId is a required field + CampaignId *string `type:"string" required:"true"` + + // The actual time, in ISO 8601 format, when the activity was marked CANCELLED + // or COMPLETED. + End *string `type:"string"` + + // The unique identifier for the activity. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies whether the activity succeeded. Possible values are SUCCESS and + // FAIL. + Result *string `type:"string"` + + // The scheduled start time, in ISO 8601 format, for the activity. + ScheduledStart *string `type:"string"` + + // The actual start time, in ISO 8601 format, of the activity. + Start *string `type:"string"` + + // The current status of the activity. Possible values are: PENDING, INITIALIZING, + // RUNNING, PAUSED, CANCELLED, and COMPLETED. + State *string `type:"string"` + + // The total number of endpoints that the campaign successfully delivered messages + // to. + SuccessfulEndpointCount *int64 `type:"integer"` + + // The total number of time zones that were completed. + TimezonesCompletedCount *int64 `type:"integer"` + + // The total number of unique time zones that are in the segment for the campaign. + TimezonesTotalCount *int64 `type:"integer"` + + // The total number of endpoints that the campaign attempted to deliver messages + // to. + TotalEndpointCount *int64 `type:"integer"` + + // The unique identifier for the campaign treatment that the activity applies + // to. A treatment is a variation of a campaign that's used for A/B testing + // of a campaign. + TreatmentId *string `type:"string"` +} + +// String returns the string representation +func (s ActivityResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *ActivityResponse) SetApplicationId(v string) *ActivityResponse { + s.ApplicationId = &v + return s +} + +// SetCampaignId sets the CampaignId field's value. +func (s *ActivityResponse) SetCampaignId(v string) *ActivityResponse { + s.CampaignId = &v + return s +} + +// SetEnd sets the End field's value. +func (s *ActivityResponse) SetEnd(v string) *ActivityResponse { + s.End = &v + return s +} + +// SetId sets the Id field's value. +func (s *ActivityResponse) SetId(v string) *ActivityResponse { + s.Id = &v + return s +} + +// SetResult sets the Result field's value. +func (s *ActivityResponse) SetResult(v string) *ActivityResponse { + s.Result = &v + return s +} + +// SetScheduledStart sets the ScheduledStart field's value. +func (s *ActivityResponse) SetScheduledStart(v string) *ActivityResponse { + s.ScheduledStart = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ActivityResponse) SetStart(v string) *ActivityResponse { + s.Start = &v + return s +} + +// SetState sets the State field's value. +func (s *ActivityResponse) SetState(v string) *ActivityResponse { + s.State = &v + return s +} + +// SetSuccessfulEndpointCount sets the SuccessfulEndpointCount field's value. +func (s *ActivityResponse) SetSuccessfulEndpointCount(v int64) *ActivityResponse { + s.SuccessfulEndpointCount = &v + return s +} + +// SetTimezonesCompletedCount sets the TimezonesCompletedCount field's value. +func (s *ActivityResponse) SetTimezonesCompletedCount(v int64) *ActivityResponse { + s.TimezonesCompletedCount = &v + return s +} + +// SetTimezonesTotalCount sets the TimezonesTotalCount field's value. +func (s *ActivityResponse) SetTimezonesTotalCount(v int64) *ActivityResponse { + s.TimezonesTotalCount = &v + return s +} + +// SetTotalEndpointCount sets the TotalEndpointCount field's value. +func (s *ActivityResponse) SetTotalEndpointCount(v int64) *ActivityResponse { + s.TotalEndpointCount = &v + return s +} + +// SetTreatmentId sets the TreatmentId field's value. +func (s *ActivityResponse) SetTreatmentId(v string) *ActivityResponse { + s.TreatmentId = &v + return s +} + +// Specifies address-based configuration settings for a message that's sent +// directly to an endpoint. +type AddressConfiguration struct { + _ struct{} `type:"structure"` + + // The message body to use instead of the default message body. This value overrides + // the default message body. + BodyOverride *string `type:"string"` + + // The channel to use when sending the message. + ChannelType *string `type:"string" enum:"ChannelType"` + + // An object that maps custom attributes to attributes for the address and is + // attached to the message. For a push notification, this payload is added to + // the data.pinpoint object. For an email or text message, this payload is added + // to email/SMS delivery receipt event attributes. + Context map[string]*string `type:"map"` + + // The raw, JSON-formatted string to use as the payload for the notification + // message. This value overrides the message. + RawContent *string `type:"string"` + + // An object that maps variable values for the message. Amazon Pinpoint merges + // these values with the variable values specified by properties of the DefaultMessage + // object. The substitutions in this map take precedence over all other substitutions. + Substitutions map[string][]*string `type:"map"` + + // The message title to use instead of the default message title. This value + // overrides the default message title. + TitleOverride *string `type:"string"` +} + +// String returns the string representation +func (s AddressConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddressConfiguration) GoString() string { + return s.String() +} + +// SetBodyOverride sets the BodyOverride field's value. +func (s *AddressConfiguration) SetBodyOverride(v string) *AddressConfiguration { + s.BodyOverride = &v + return s +} + +// SetChannelType sets the ChannelType field's value. +func (s *AddressConfiguration) SetChannelType(v string) *AddressConfiguration { + s.ChannelType = &v + return s +} + +// SetContext sets the Context field's value. +func (s *AddressConfiguration) SetContext(v map[string]*string) *AddressConfiguration { + s.Context = v + return s +} + +// SetRawContent sets the RawContent field's value. +func (s *AddressConfiguration) SetRawContent(v string) *AddressConfiguration { + s.RawContent = &v + return s +} + +// SetSubstitutions sets the Substitutions field's value. +func (s *AddressConfiguration) SetSubstitutions(v map[string][]*string) *AddressConfiguration { + s.Substitutions = v + return s +} + +// SetTitleOverride sets the TitleOverride field's value. +func (s *AddressConfiguration) SetTitleOverride(v string) *AddressConfiguration { + s.TitleOverride = &v + return s +} + +// Specifies channel-specific content and settings for a message template that +// can be used in push notifications that are sent through the ADM (Amazon Device +// Messaging), Baidu (Baidu Cloud Push), or GCM (Firebase Cloud Messaging, formerly +// Google Cloud Messaging) channel. +type AndroidPushNotificationTemplate struct { + _ struct{} `type:"structure"` + + // The action to occur if a recipient taps a push notification that's based + // on the message template. Valid values are: + // + // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // sent to the background. This is the default action. + // + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This action uses the deep-linking features of the Android + // platform. + // + // * URL - The default mobile browser on the recipient's device opens and + // loads the web page at a URL that you specify. + Action *string `type:"string" enum:"Action"` + + // The message body to use in a push notification that's based on the message + // template. + Body *string `type:"string"` + + // The URL of the large icon image to display in the content view of a push + // notification that's based on the message template. + ImageIconUrl *string `type:"string"` + + // The URL of an image to display in a push notification that's based on the + // message template. + ImageUrl *string `type:"string"` + + // The URL of the small icon image to display in the status bar and the content + // view of a push notification that's based on the message template. + SmallImageIconUrl *string `type:"string"` + + // The sound to play when a recipient receives a push notification that's based + // on the message template. You can use the default stream or specify the file + // name of a sound resource that's bundled in your app. On an Android platform, + // the sound file must reside in /res/raw/. + Sound *string `type:"string"` + + // The title to use in a push notification that's based on the message template. + // This title appears above the notification message on a recipient's device. + Title *string `type:"string"` + + // The URL to open in a recipient's default mobile browser, if a recipient taps + // a a push notification that's based on the message template and the value + // of the Action property is URL. + Url *string `type:"string"` +} + +// String returns the string representation +func (s AndroidPushNotificationTemplate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AndroidPushNotificationTemplate) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *AndroidPushNotificationTemplate) SetAction(v string) *AndroidPushNotificationTemplate { + s.Action = &v + return s +} + +// SetBody sets the Body field's value. +func (s *AndroidPushNotificationTemplate) SetBody(v string) *AndroidPushNotificationTemplate { + s.Body = &v + return s +} + +// SetImageIconUrl sets the ImageIconUrl field's value. +func (s *AndroidPushNotificationTemplate) SetImageIconUrl(v string) *AndroidPushNotificationTemplate { + s.ImageIconUrl = &v + return s +} + +// SetImageUrl sets the ImageUrl field's value. +func (s *AndroidPushNotificationTemplate) SetImageUrl(v string) *AndroidPushNotificationTemplate { + s.ImageUrl = &v + return s +} + +// SetSmallImageIconUrl sets the SmallImageIconUrl field's value. +func (s *AndroidPushNotificationTemplate) SetSmallImageIconUrl(v string) *AndroidPushNotificationTemplate { + s.SmallImageIconUrl = &v + return s +} + +// SetSound sets the Sound field's value. +func (s *AndroidPushNotificationTemplate) SetSound(v string) *AndroidPushNotificationTemplate { + s.Sound = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *AndroidPushNotificationTemplate) SetTitle(v string) *AndroidPushNotificationTemplate { + s.Title = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *AndroidPushNotificationTemplate) SetUrl(v string) *AndroidPushNotificationTemplate { + s.Url = &v + return s +} + +// Provides the results of a query that retrieved the data for a standard metric +// that applies to an application, and provides information about that query. +type ApplicationDateRangeKpiResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application that the metric applies to. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` + + // EndTime is a required field + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the metric, also referred to as a key performance indicator (KPI), + // that the data was retrieved for. This value describes the associated metric + // and consists of two or more terms, which are comprised of lowercase alphanumeric + // characters, separated by a hyphen. For a list of possible values, see the + // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // + // KpiName is a required field + KpiName *string `type:"string" required:"true"` + + // An array of objects that contains the results of the query. Each object contains + // the value for the metric and metadata about that value. + // + // KpiResult is a required field + KpiResult *BaseKpiResult `type:"structure" required:"true"` + + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null for the Application Metrics resource + // because the resource returns all results in a single page. + NextToken *string `type:"string"` + + // StartTime is a required field + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s ApplicationDateRangeKpiResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationDateRangeKpiResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *ApplicationDateRangeKpiResponse) SetApplicationId(v string) *ApplicationDateRangeKpiResponse { + s.ApplicationId = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ApplicationDateRangeKpiResponse) SetEndTime(v time.Time) *ApplicationDateRangeKpiResponse { + s.EndTime = &v + return s +} + +// SetKpiName sets the KpiName field's value. +func (s *ApplicationDateRangeKpiResponse) SetKpiName(v string) *ApplicationDateRangeKpiResponse { + s.KpiName = &v + return s +} + +// SetKpiResult sets the KpiResult field's value. +func (s *ApplicationDateRangeKpiResponse) SetKpiResult(v *BaseKpiResult) *ApplicationDateRangeKpiResponse { + s.KpiResult = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ApplicationDateRangeKpiResponse) SetNextToken(v string) *ApplicationDateRangeKpiResponse { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ApplicationDateRangeKpiResponse) SetStartTime(v time.Time) *ApplicationDateRangeKpiResponse { + s.StartTime = &v + return s +} + +// Provides information about an application. +type ApplicationResponse struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the application. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // The unique identifier for the application. This identifier is displayed as + // the Project ID on the Amazon Pinpoint console. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The display name of the application. This name is displayed as the Project + // name on the Amazon Pinpoint console. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A string-to-string map of key-value pairs that identifies the tags that are + // associated with the application. Each tag consists of a required tag key + // and an associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s ApplicationResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationResponse) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *ApplicationResponse) SetArn(v string) *ApplicationResponse { + s.Arn = &v + return s +} + +// SetId sets the Id field's value. +func (s *ApplicationResponse) SetId(v string) *ApplicationResponse { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *ApplicationResponse) SetName(v string) *ApplicationResponse { + s.Name = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ApplicationResponse) SetTags(v map[string]*string) *ApplicationResponse { + s.Tags = v + return s +} + +// Provides information about an application, including the default settings +// for an application. +type ApplicationSettingsResource struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application. This identifier is displayed as + // the Project ID on the Amazon Pinpoint console. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` + + // The settings for the AWS Lambda function to use by default as a code hook + // for campaigns in the application. + CampaignHook *CampaignHook `type:"structure"` + + // The date and time, in ISO 8601 format, when the application's settings were + // last modified. + LastModifiedDate *string `type:"string"` + + // The default sending limits for campaigns in the application. + Limits *CampaignLimits `type:"structure"` + + // The default quiet time for campaigns and journeys in the application. Quiet + // time is a specific time range when messages aren't sent to endpoints, if + // all the following conditions are met: + // + // * The EndpointDemographic.Timezone property of the endpoint is set to + // a valid value. + // + // * The current time in the endpoint's time zone is later than or equal + // to the time specified by the QuietTime.Start property for the application + // (or a campaign or journey that has custom quiet time settings). + // + // * The current time in the endpoint's time zone is earlier than or equal + // to the time specified by the QuietTime.End property for the application + // (or a campaign or journey that has custom quiet time settings). + // + // If any of the preceding conditions isn't met, the endpoint will receive messages + // from a campaign or journey, even if quiet time is enabled. + QuietTime *QuietTime `type:"structure"` +} + +// String returns the string representation +func (s ApplicationSettingsResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationSettingsResource) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *ApplicationSettingsResource) SetApplicationId(v string) *ApplicationSettingsResource { + s.ApplicationId = &v + return s +} + +// SetCampaignHook sets the CampaignHook field's value. +func (s *ApplicationSettingsResource) SetCampaignHook(v *CampaignHook) *ApplicationSettingsResource { + s.CampaignHook = v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *ApplicationSettingsResource) SetLastModifiedDate(v string) *ApplicationSettingsResource { + s.LastModifiedDate = &v + return s +} + +// SetLimits sets the Limits field's value. +func (s *ApplicationSettingsResource) SetLimits(v *CampaignLimits) *ApplicationSettingsResource { + s.Limits = v + return s +} + +// SetQuietTime sets the QuietTime field's value. +func (s *ApplicationSettingsResource) SetQuietTime(v *QuietTime) *ApplicationSettingsResource { + s.QuietTime = v + return s +} + +// Provides information about all of your applications. +type ApplicationsResponse struct { + _ struct{} `type:"structure"` + + // An array of responses, one for each application that was returned. + Item []*ApplicationResponse `type:"list"` + + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null if there are no additional pages. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ApplicationsResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationsResponse) GoString() string { + return s.String() +} + +// SetItem sets the Item field's value. +func (s *ApplicationsResponse) SetItem(v []*ApplicationResponse) *ApplicationsResponse { + s.Item = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ApplicationsResponse) SetNextToken(v string) *ApplicationsResponse { + s.NextToken = &v + return s +} + +// Specifies attribute-based criteria for including or excluding endpoints from +// a segment. +type AttributeDimension struct { + _ struct{} `type:"structure"` + + // The type of segment dimension to use. Valid values are: INCLUSIVE, endpoints + // that match the criteria are included in the segment; and, EXCLUSIVE, endpoints + // that match the criteria are excluded from the segment. + AttributeType *string `type:"string" enum:"AttributeType"` + + // The criteria values to use for the segment dimension. Depending on the value + // of the AttributeType property, endpoints are included or excluded from the + // segment if their attribute values match the criteria values. + // + // Values is a required field + Values []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AttributeDimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeDimension) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttributeDimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttributeDimension"} + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeType sets the AttributeType field's value. +func (s *AttributeDimension) SetAttributeType(v string) *AttributeDimension { + s.AttributeType = &v + return s +} + +// SetValues sets the Values field's value. +func (s *AttributeDimension) SetValues(v []*string) *AttributeDimension { + s.Values = v + return s +} + +// Provides information about the type and the names of attributes that were +// removed from all the endpoints that are associated with an application. +type AttributesResource struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` + + // The type of attribute or attributes that were removed from the endpoints. + // Valid values are: + // + // * endpoint-custom-attributes - Custom attributes that describe endpoints. + // + // * endpoint-metric-attributes - Custom metrics that your app reports to + // Amazon Pinpoint for endpoints. + // + // * endpoint-user-attributes - Custom attributes that describe users. + // + // AttributeType is a required field + AttributeType *string `type:"string" required:"true"` + + // An array that specifies the names of the attributes that were removed from + // the endpoints. + Attributes []*string `type:"list"` +} + +// String returns the string representation +func (s AttributesResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributesResource) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *AttributesResource) SetApplicationId(v string) *AttributesResource { + s.ApplicationId = &v + return s +} + +// SetAttributeType sets the AttributeType field's value. +func (s *AttributesResource) SetAttributeType(v string) *AttributesResource { + s.AttributeType = &v + return s +} + +// SetAttributes sets the Attributes field's value. +func (s *AttributesResource) SetAttributes(v []*string) *AttributesResource { + s.Attributes = v + return s +} + +// Specifies the status and settings of the Baidu (Baidu Cloud Push) channel +// for an application. +type BaiduChannelRequest struct { + _ struct{} `type:"structure"` + + // The API key that you received from the Baidu Cloud Push service to communicate + // with the service. + // + // ApiKey is a required field + ApiKey *string `type:"string" required:"true"` + + // Specifies whether to enable the Baidu channel for the application. + Enabled *bool `type:"boolean"` + + // The secret key that you received from the Baidu Cloud Push service to communicate + // with the service. + // + // SecretKey is a required field + SecretKey *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s BaiduChannelRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BaiduChannelRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BaiduChannelRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BaiduChannelRequest"} + if s.ApiKey == nil { + invalidParams.Add(request.NewErrParamRequired("ApiKey")) + } + if s.SecretKey == nil { + invalidParams.Add(request.NewErrParamRequired("SecretKey")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApiKey sets the ApiKey field's value. +func (s *BaiduChannelRequest) SetApiKey(v string) *BaiduChannelRequest { + s.ApiKey = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *BaiduChannelRequest) SetEnabled(v bool) *BaiduChannelRequest { + s.Enabled = &v + return s +} + +// SetSecretKey sets the SecretKey field's value. +func (s *BaiduChannelRequest) SetSecretKey(v string) *BaiduChannelRequest { + s.SecretKey = &v + return s +} + +// Provides information about the status and settings of the Baidu (Baidu Cloud +// Push) channel for an application. +type BaiduChannelResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application that the Baidu channel applies + // to. + ApplicationId *string `type:"string"` + + // The date and time when the Baidu channel was enabled. + CreationDate *string `type:"string"` + + // The API key that you received from the Baidu Cloud Push service to communicate + // with the service. + // + // Credential is a required field + Credential *string `type:"string" required:"true"` + + // Specifies whether the Baidu channel is enabled for the application. + Enabled *bool `type:"boolean"` + + // (Not used) This property is retained only for backward compatibility. + HasCredential *bool `type:"boolean"` + + // (Deprecated) An identifier for the Baidu channel. This property is retained + // only for backward compatibility. + Id *string `type:"string"` + + // Specifies whether the Baidu channel is archived. + IsArchived *bool `type:"boolean"` + + // The user who last modified the Baidu channel. + LastModifiedBy *string `type:"string"` + + // The date and time when the Baidu channel was last modified. + LastModifiedDate *string `type:"string"` + + // The type of messaging or notification platform for the channel. For the Baidu + // channel, this value is BAIDU. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` + + // The current version of the Baidu channel. + Version *int64 `type:"integer"` +} + +// String returns the string representation +func (s BaiduChannelResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BaiduChannelResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *BaiduChannelResponse) SetApplicationId(v string) *BaiduChannelResponse { + s.ApplicationId = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *BaiduChannelResponse) SetCreationDate(v string) *BaiduChannelResponse { + s.CreationDate = &v + return s +} + +// SetCredential sets the Credential field's value. +func (s *BaiduChannelResponse) SetCredential(v string) *BaiduChannelResponse { + s.Credential = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *BaiduChannelResponse) SetEnabled(v bool) *BaiduChannelResponse { + s.Enabled = &v + return s +} + +// SetHasCredential sets the HasCredential field's value. +func (s *BaiduChannelResponse) SetHasCredential(v bool) *BaiduChannelResponse { + s.HasCredential = &v + return s +} + +// SetId sets the Id field's value. +func (s *BaiduChannelResponse) SetId(v string) *BaiduChannelResponse { + s.Id = &v + return s +} + +// SetIsArchived sets the IsArchived field's value. +func (s *BaiduChannelResponse) SetIsArchived(v bool) *BaiduChannelResponse { + s.IsArchived = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *BaiduChannelResponse) SetLastModifiedBy(v string) *BaiduChannelResponse { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *BaiduChannelResponse) SetLastModifiedDate(v string) *BaiduChannelResponse { + s.LastModifiedDate = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *BaiduChannelResponse) SetPlatform(v string) *BaiduChannelResponse { + s.Platform = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *BaiduChannelResponse) SetVersion(v int64) *BaiduChannelResponse { + s.Version = &v + return s +} + +// Specifies the settings for a one-time message that's sent directly to an +// endpoint through the Baidu (Baidu Cloud Push) channel. +type BaiduMessage struct { + _ struct{} `type:"structure"` + + // The action to occur if the recipient taps the push notification. Valid values + // are: + // + // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // sent to the background. This is the default action. + // + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This action uses the deep-linking features of the Android + // platform. + // + // * URL - The default mobile browser on the recipient's device opens and + // loads the web page at a URL that you specify. + Action *string `type:"string" enum:"Action"` + + // The body of the notification message. + Body *string `type:"string"` + + // The JSON data payload to use for the push notification, if the notification + // is a silent push notification. This payload is added to the data.pinpoint.jsonBody + // object of the notification. + Data map[string]*string `type:"map"` + + // The icon image name of the asset saved in your app. + IconReference *string `type:"string"` + + // The URL of the large icon image to display in the content view of the push + // notification. + ImageIconUrl *string `type:"string"` + + // The URL of an image to display in the push notification. + ImageUrl *string `type:"string"` + + // The raw, JSON-formatted string to use as the payload for the notification + // message. This value overrides the message. + RawContent *string `type:"string"` + + // Specifies whether the notification is a silent push notification, which is + // a push notification that doesn't display on a recipient's device. Silent + // push notifications can be used for cases such as updating an app's configuration + // or supporting phone home functionality. + SilentPush *bool `type:"boolean"` + + // The URL of the small icon image to display in the status bar and the content + // view of the push notification. + SmallImageIconUrl *string `type:"string"` + + // The sound to play when the recipient receives the push notification. You + // can use the default stream or specify the file name of a sound resource that's + // bundled in your app. On an Android platform, the sound file must reside in + // /res/raw/. + Sound *string `type:"string"` + + // The default message variables to use in the notification message. You can + // override the default variables with individual address variables. + Substitutions map[string][]*string `type:"map"` + + // The amount of time, in seconds, that the Baidu Cloud Push service should + // store the message if the recipient's device is offline. The default value + // and maximum supported time is 604,800 seconds (7 days). + TimeToLive *int64 `type:"integer"` + + // The title to display above the notification message on the recipient's device. + Title *string `type:"string"` + + // The URL to open in the recipient's default mobile browser, if a recipient + // taps the push notification and the value of the Action property is URL. + Url *string `type:"string"` +} + +// String returns the string representation +func (s BaiduMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BaiduMessage) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *BaiduMessage) SetAction(v string) *BaiduMessage { + s.Action = &v + return s +} + +// SetBody sets the Body field's value. +func (s *BaiduMessage) SetBody(v string) *BaiduMessage { + s.Body = &v + return s +} + +// SetData sets the Data field's value. +func (s *BaiduMessage) SetData(v map[string]*string) *BaiduMessage { + s.Data = v + return s +} + +// SetIconReference sets the IconReference field's value. +func (s *BaiduMessage) SetIconReference(v string) *BaiduMessage { + s.IconReference = &v + return s +} + +// SetImageIconUrl sets the ImageIconUrl field's value. +func (s *BaiduMessage) SetImageIconUrl(v string) *BaiduMessage { + s.ImageIconUrl = &v + return s +} + +// SetImageUrl sets the ImageUrl field's value. +func (s *BaiduMessage) SetImageUrl(v string) *BaiduMessage { + s.ImageUrl = &v + return s +} + +// SetRawContent sets the RawContent field's value. +func (s *BaiduMessage) SetRawContent(v string) *BaiduMessage { + s.RawContent = &v + return s +} + +// SetSilentPush sets the SilentPush field's value. +func (s *BaiduMessage) SetSilentPush(v bool) *BaiduMessage { + s.SilentPush = &v + return s +} + +// SetSmallImageIconUrl sets the SmallImageIconUrl field's value. +func (s *BaiduMessage) SetSmallImageIconUrl(v string) *BaiduMessage { + s.SmallImageIconUrl = &v + return s +} + +// SetSound sets the Sound field's value. +func (s *BaiduMessage) SetSound(v string) *BaiduMessage { + s.Sound = &v + return s +} + +// SetSubstitutions sets the Substitutions field's value. +func (s *BaiduMessage) SetSubstitutions(v map[string][]*string) *BaiduMessage { + s.Substitutions = v + return s +} + +// SetTimeToLive sets the TimeToLive field's value. +func (s *BaiduMessage) SetTimeToLive(v int64) *BaiduMessage { + s.TimeToLive = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *BaiduMessage) SetTitle(v string) *BaiduMessage { + s.Title = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *BaiduMessage) SetUrl(v string) *BaiduMessage { + s.Url = &v + return s +} + +// Provides the results of a query that retrieved the data for a standard metric +// that applies to an application, campaign, or journey. +type BaseKpiResult struct { + _ struct{} `type:"structure"` + + // An array of objects that provides the results of a query that retrieved the + // data for a standard metric that applies to an application, campaign, or journey. + // + // Rows is a required field + Rows []*ResultRow `type:"list" required:"true"` +} + +// String returns the string representation +func (s BaseKpiResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BaseKpiResult) GoString() string { + return s.String() +} + +// SetRows sets the Rows field's value. +func (s *BaseKpiResult) SetRows(v []*ResultRow) *BaseKpiResult { + s.Rows = v + return s +} + +// Provides the results of a query that retrieved the data for a standard metric +// that applies to a campaign, and provides information about that query. +type CampaignDateRangeKpiResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application that the metric applies to. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` + + // The unique identifier for the campaign that the metric applies to. + // + // CampaignId is a required field + CampaignId *string `type:"string" required:"true"` + + // EndTime is a required field + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the metric, also referred to as a key performance indicator (KPI), + // that the data was retrieved for. This value describes the associated metric + // and consists of two or more terms, which are comprised of lowercase alphanumeric + // characters, separated by a hyphen. For a list of possible values, see the + // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // + // KpiName is a required field + KpiName *string `type:"string" required:"true"` + + // An array of objects that contains the results of the query. Each object contains + // the value for the metric and metadata about that value. + // + // KpiResult is a required field + KpiResult *BaseKpiResult `type:"structure" required:"true"` + + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null for the Campaign Metrics resource + // because the resource returns all results in a single page. + NextToken *string `type:"string"` + + // StartTime is a required field + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s CampaignDateRangeKpiResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignDateRangeKpiResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *CampaignDateRangeKpiResponse) SetApplicationId(v string) *CampaignDateRangeKpiResponse { + s.ApplicationId = &v + return s +} + +// SetCampaignId sets the CampaignId field's value. +func (s *CampaignDateRangeKpiResponse) SetCampaignId(v string) *CampaignDateRangeKpiResponse { + s.CampaignId = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *CampaignDateRangeKpiResponse) SetEndTime(v time.Time) *CampaignDateRangeKpiResponse { + s.EndTime = &v + return s +} + +// SetKpiName sets the KpiName field's value. +func (s *CampaignDateRangeKpiResponse) SetKpiName(v string) *CampaignDateRangeKpiResponse { + s.KpiName = &v + return s +} + +// SetKpiResult sets the KpiResult field's value. +func (s *CampaignDateRangeKpiResponse) SetKpiResult(v *BaseKpiResult) *CampaignDateRangeKpiResponse { + s.KpiResult = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *CampaignDateRangeKpiResponse) SetNextToken(v string) *CampaignDateRangeKpiResponse { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *CampaignDateRangeKpiResponse) SetStartTime(v time.Time) *CampaignDateRangeKpiResponse { + s.StartTime = &v + return s +} + +// Specifies the content and "From" address for an email message that's sent +// to recipients of a campaign. +type CampaignEmailMessage struct { + _ struct{} `type:"structure"` + + // The body of the email for recipients whose email clients don't support HTML + // content. + Body *string `type:"string"` + + // The verified email address to send the email from. The default address is + // the FromAddress specified for the email channel for the application. + FromAddress *string `type:"string"` + + // The body of the email, in HTML format, for recipients whose email clients + // support HTML content. + HtmlBody *string `type:"string"` + + // The subject line, or title, of the email. + // + // Title is a required field + Title *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CampaignEmailMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignEmailMessage) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CampaignEmailMessage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CampaignEmailMessage"} + if s.Title == nil { + invalidParams.Add(request.NewErrParamRequired("Title")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *CampaignEmailMessage) SetBody(v string) *CampaignEmailMessage { + s.Body = &v + return s +} + +// SetFromAddress sets the FromAddress field's value. +func (s *CampaignEmailMessage) SetFromAddress(v string) *CampaignEmailMessage { + s.FromAddress = &v + return s +} + +// SetHtmlBody sets the HtmlBody field's value. +func (s *CampaignEmailMessage) SetHtmlBody(v string) *CampaignEmailMessage { + s.HtmlBody = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *CampaignEmailMessage) SetTitle(v string) *CampaignEmailMessage { + s.Title = &v + return s +} + +// Specifies the settings for events that cause a campaign to be sent. +type CampaignEventFilter struct { + _ struct{} `type:"structure"` + + // The dimension settings of the event filter for the campaign. + // + // Dimensions is a required field + Dimensions *EventDimensions `type:"structure" required:"true"` + + // The type of event that causes the campaign to be sent. Valid values are: + // SYSTEM, sends the campaign when a system event occurs; and, ENDPOINT, sends + // the campaign when an endpoint event (Events resource) occurs. + // + // FilterType is a required field + FilterType *string `type:"string" required:"true" enum:"FilterType"` +} + +// String returns the string representation +func (s CampaignEventFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignEventFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CampaignEventFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CampaignEventFilter"} + if s.Dimensions == nil { + invalidParams.Add(request.NewErrParamRequired("Dimensions")) + } + if s.FilterType == nil { + invalidParams.Add(request.NewErrParamRequired("FilterType")) + } + if s.Dimensions != nil { + if err := s.Dimensions.Validate(); err != nil { + invalidParams.AddNested("Dimensions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *CampaignEventFilter) SetDimensions(v *EventDimensions) *CampaignEventFilter { + s.Dimensions = v + return s +} + +// SetFilterType sets the FilterType field's value. +func (s *CampaignEventFilter) SetFilterType(v string) *CampaignEventFilter { + s.FilterType = &v + return s +} + +// Specifies the AWS Lambda function to use as a code hook for a campaign. +type CampaignHook struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon + // Pinpoint invokes to send messages for a campaign. + LambdaFunctionName *string `type:"string"` + + // Specifies which Lambda mode to use when invoking the AWS Lambda function. + Mode *string `type:"string" enum:"Mode"` + + // The web URL that Amazon Pinpoint calls to invoke the AWS Lambda function + // over HTTPS. + WebUrl *string `type:"string"` +} + +// String returns the string representation +func (s CampaignHook) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignHook) GoString() string { + return s.String() +} + +// SetLambdaFunctionName sets the LambdaFunctionName field's value. +func (s *CampaignHook) SetLambdaFunctionName(v string) *CampaignHook { + s.LambdaFunctionName = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *CampaignHook) SetMode(v string) *CampaignHook { + s.Mode = &v + return s +} + +// SetWebUrl sets the WebUrl field's value. +func (s *CampaignHook) SetWebUrl(v string) *CampaignHook { + s.WebUrl = &v + return s +} + +// Specifies limits on the messages that a campaign can send. +type CampaignLimits struct { + _ struct{} `type:"structure"` + + // The maximum number of messages that a campaign can send to a single endpoint + // during a 24-hour period. The maximum value is 100. + Daily *int64 `type:"integer"` + + // The maximum amount of time, in seconds, that a campaign can attempt to deliver + // a message after the scheduled start time for the campaign. The minimum value + // is 60 seconds. + MaximumDuration *int64 `type:"integer"` + + // The maximum number of messages that a campaign can send each second. The + // minimum value is 50. The maximum value is 20,000. + MessagesPerSecond *int64 `type:"integer"` + + // The maximum number of messages that a campaign can send to a single endpoint + // during the course of the campaign. The maximum value is 100. + Total *int64 `type:"integer"` +} + +// String returns the string representation +func (s CampaignLimits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignLimits) GoString() string { + return s.String() +} + +// SetDaily sets the Daily field's value. +func (s *CampaignLimits) SetDaily(v int64) *CampaignLimits { + s.Daily = &v + return s +} + +// SetMaximumDuration sets the MaximumDuration field's value. +func (s *CampaignLimits) SetMaximumDuration(v int64) *CampaignLimits { + s.MaximumDuration = &v + return s +} + +// SetMessagesPerSecond sets the MessagesPerSecond field's value. +func (s *CampaignLimits) SetMessagesPerSecond(v int64) *CampaignLimits { + s.MessagesPerSecond = &v + return s +} + +// SetTotal sets the Total field's value. +func (s *CampaignLimits) SetTotal(v int64) *CampaignLimits { + s.Total = &v + return s +} + +// Provides information about the status, configuration, and other settings +// for a campaign. +type CampaignResponse struct { + _ struct{} `type:"structure"` + + // An array of responses, one for each treatment that you defined for the campaign, + // in addition to the default treatment. + AdditionalTreatments []*TreatmentResource `type:"list"` + + // The unique identifier for the application that the campaign applies to. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the campaign. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // The date, ISO 8601 format, when the campaign was created. + // + // CreationDate is a required field + CreationDate *string `type:"string" required:"true"` + + // The current status of the campaign's default treatment. This value exists + // only for campaigns that have more than one treatment, to support A/B testing. + DefaultState *CampaignState `type:"structure"` + + // The custom description of the campaign. + Description *string `type:"string"` + + // The allocated percentage of users (segment members) who shouldn't receive + // messages from the campaign. + HoldoutPercent *int64 `type:"integer"` + + // The settings for the AWS Lambda function to use as a code hook for the campaign. + Hook *CampaignHook `type:"structure"` + + // The unique identifier for the campaign. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies whether the campaign is paused. A paused campaign doesn't run unless + // you resume it by changing this value to false. + IsPaused *bool `type:"boolean"` + + // The date, in ISO 8601 format, when the campaign was last modified. + // + // LastModifiedDate is a required field + LastModifiedDate *string `type:"string" required:"true"` + + // The messaging limits for the campaign. + Limits *CampaignLimits `type:"structure"` + + // The message configuration settings for the campaign. + MessageConfiguration *MessageConfiguration `type:"structure"` + + // The name of the campaign. + Name *string `type:"string"` + + // The schedule settings for the campaign. + Schedule *Schedule `type:"structure"` + + // The unique identifier for the segment that's associated with the campaign. + // + // SegmentId is a required field + SegmentId *string `type:"string" required:"true"` + + // The version number of the segment that's associated with the campaign. + // + // SegmentVersion is a required field + SegmentVersion *int64 `type:"integer" required:"true"` + + // The current status of the campaign. + State *CampaignState `type:"structure"` + + // A string-to-string map of key-value pairs that identifies the tags that are + // associated with the campaign. Each tag consists of a required tag key and + // an associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The message template that’s used for the campaign. + TemplateConfiguration *TemplateConfiguration `type:"structure"` + + // The custom description of a variation of the campaign that's used for A/B + // testing. + TreatmentDescription *string `type:"string"` + + // The custom name of a variation of the campaign that's used for A/B testing. + TreatmentName *string `type:"string"` + + // The version number of the campaign. + Version *int64 `type:"integer"` +} + +// String returns the string representation +func (s CampaignResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignResponse) GoString() string { return s.String() } +// SetAdditionalTreatments sets the AdditionalTreatments field's value. +func (s *CampaignResponse) SetAdditionalTreatments(v []*TreatmentResource) *CampaignResponse { + s.AdditionalTreatments = v + return s +} + // SetApplicationId sets the ApplicationId field's value. -func (s *ADMChannelResponse) SetApplicationId(v string) *ADMChannelResponse { +func (s *CampaignResponse) SetApplicationId(v string) *CampaignResponse { s.ApplicationId = &v return s } +// SetArn sets the Arn field's value. +func (s *CampaignResponse) SetArn(v string) *CampaignResponse { + s.Arn = &v + return s +} + // SetCreationDate sets the CreationDate field's value. -func (s *ADMChannelResponse) SetCreationDate(v string) *ADMChannelResponse { +func (s *CampaignResponse) SetCreationDate(v string) *CampaignResponse { s.CreationDate = &v return s } -// SetEnabled sets the Enabled field's value. -func (s *ADMChannelResponse) SetEnabled(v bool) *ADMChannelResponse { - s.Enabled = &v +// SetDefaultState sets the DefaultState field's value. +func (s *CampaignResponse) SetDefaultState(v *CampaignState) *CampaignResponse { + s.DefaultState = v return s } -// SetHasCredential sets the HasCredential field's value. -func (s *ADMChannelResponse) SetHasCredential(v bool) *ADMChannelResponse { - s.HasCredential = &v +// SetDescription sets the Description field's value. +func (s *CampaignResponse) SetDescription(v string) *CampaignResponse { + s.Description = &v return s } -// SetId sets the Id field's value. -func (s *ADMChannelResponse) SetId(v string) *ADMChannelResponse { - s.Id = &v +// SetHoldoutPercent sets the HoldoutPercent field's value. +func (s *CampaignResponse) SetHoldoutPercent(v int64) *CampaignResponse { + s.HoldoutPercent = &v return s } -// SetIsArchived sets the IsArchived field's value. -func (s *ADMChannelResponse) SetIsArchived(v bool) *ADMChannelResponse { - s.IsArchived = &v +// SetHook sets the Hook field's value. +func (s *CampaignResponse) SetHook(v *CampaignHook) *CampaignResponse { + s.Hook = v return s } -// SetLastModifiedBy sets the LastModifiedBy field's value. -func (s *ADMChannelResponse) SetLastModifiedBy(v string) *ADMChannelResponse { - s.LastModifiedBy = &v +// SetId sets the Id field's value. +func (s *CampaignResponse) SetId(v string) *CampaignResponse { + s.Id = &v + return s +} + +// SetIsPaused sets the IsPaused field's value. +func (s *CampaignResponse) SetIsPaused(v bool) *CampaignResponse { + s.IsPaused = &v return s } // SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *ADMChannelResponse) SetLastModifiedDate(v string) *ADMChannelResponse { +func (s *CampaignResponse) SetLastModifiedDate(v string) *CampaignResponse { s.LastModifiedDate = &v return s } -// SetPlatform sets the Platform field's value. -func (s *ADMChannelResponse) SetPlatform(v string) *ADMChannelResponse { - s.Platform = &v +// SetLimits sets the Limits field's value. +func (s *CampaignResponse) SetLimits(v *CampaignLimits) *CampaignResponse { + s.Limits = v + return s +} + +// SetMessageConfiguration sets the MessageConfiguration field's value. +func (s *CampaignResponse) SetMessageConfiguration(v *MessageConfiguration) *CampaignResponse { + s.MessageConfiguration = v + return s +} + +// SetName sets the Name field's value. +func (s *CampaignResponse) SetName(v string) *CampaignResponse { + s.Name = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *CampaignResponse) SetSchedule(v *Schedule) *CampaignResponse { + s.Schedule = v + return s +} + +// SetSegmentId sets the SegmentId field's value. +func (s *CampaignResponse) SetSegmentId(v string) *CampaignResponse { + s.SegmentId = &v + return s +} + +// SetSegmentVersion sets the SegmentVersion field's value. +func (s *CampaignResponse) SetSegmentVersion(v int64) *CampaignResponse { + s.SegmentVersion = &v + return s +} + +// SetState sets the State field's value. +func (s *CampaignResponse) SetState(v *CampaignState) *CampaignResponse { + s.State = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CampaignResponse) SetTags(v map[string]*string) *CampaignResponse { + s.Tags = v + return s +} + +// SetTemplateConfiguration sets the TemplateConfiguration field's value. +func (s *CampaignResponse) SetTemplateConfiguration(v *TemplateConfiguration) *CampaignResponse { + s.TemplateConfiguration = v + return s +} + +// SetTreatmentDescription sets the TreatmentDescription field's value. +func (s *CampaignResponse) SetTreatmentDescription(v string) *CampaignResponse { + s.TreatmentDescription = &v + return s +} + +// SetTreatmentName sets the TreatmentName field's value. +func (s *CampaignResponse) SetTreatmentName(v string) *CampaignResponse { + s.TreatmentName = &v return s } // SetVersion sets the Version field's value. -func (s *ADMChannelResponse) SetVersion(v int64) *ADMChannelResponse { +func (s *CampaignResponse) SetVersion(v int64) *CampaignResponse { s.Version = &v return s } -// ADM Message. -type ADMMessage struct { +// Specifies the content and settings for an SMS message that's sent to recipients +// of a campaign. +type CampaignSmsMessage struct { _ struct{} `type:"structure"` - // The action that occurs if the user taps a push notification delivered by - // the campaign: OPEN_APP - Your app launches, or it becomes the foreground - // app if it has been sent to the background. This is the default action. DEEP_LINK - // - Uses deep linking features in iOS and Android to open your app and display - // a designated user interface within the app. URL - The default mobile browser - // on the user's device launches and opens a web page at the URL you specify. - // Possible values include: OPEN_APP | DEEP_LINK | URL - Action *string `type:"string" enum:"Action"` - - // The message body of the notification. + // The body of the SMS message. Body *string `type:"string"` - // Optional. Arbitrary string used to indicate multiple messages are logically - // the same and that ADM is allowed to drop previously enqueued messages in - // favor of this one. - ConsolidationKey *string `type:"string"` + // The type of SMS message. Valid values are: TRANSACTIONAL, the message is + // critical or time-sensitive, such as a one-time password that supports a customer + // transaction; and, PROMOTIONAL, the message isn't critical or time-sensitive, + // such as a marketing message. + MessageType *string `type:"string" enum:"MessageType"` - // The data payload used for a silent push. This payload is added to the notifications' - // data.pinpoint.jsonBody' object - Data map[string]*string `type:"map"` + // The sender ID to display on recipients' devices when they receive the SMS + // message. + SenderId *string `type:"string"` +} - // Optional. Number of seconds ADM should retain the message if the device is - // offline - ExpiresAfter *string `type:"string"` +// String returns the string representation +func (s CampaignSmsMessage) String() string { + return awsutil.Prettify(s) +} - // The icon image name of the asset saved in your application. - IconReference *string `type:"string"` +// GoString returns the string representation +func (s CampaignSmsMessage) GoString() string { + return s.String() +} - // The URL that points to an image used as the large icon to the notification - // content view. - ImageIconUrl *string `type:"string"` +// SetBody sets the Body field's value. +func (s *CampaignSmsMessage) SetBody(v string) *CampaignSmsMessage { + s.Body = &v + return s +} - // The URL that points to an image used in the push notification. - ImageUrl *string `type:"string"` +// SetMessageType sets the MessageType field's value. +func (s *CampaignSmsMessage) SetMessageType(v string) *CampaignSmsMessage { + s.MessageType = &v + return s +} - // Optional. Base-64-encoded MD5 checksum of the data parameter. Used to verify - // data integrity - MD5 *string `type:"string"` +// SetSenderId sets the SenderId field's value. +func (s *CampaignSmsMessage) SetSenderId(v string) *CampaignSmsMessage { + s.SenderId = &v + return s +} - // The Raw JSON formatted string to be used as the payload. This value overrides - // the message. - RawContent *string `type:"string"` +// Provides information about the status of a campaign. +type CampaignState struct { + _ struct{} `type:"structure"` - // Indicates if the message should display on the users device. Silent pushes - // can be used for Remote Configuration and Phone Home use cases. - SilentPush *bool `type:"boolean"` + // The current status of the campaign, or the current status of a treatment + // that belongs to an A/B test campaign. If a campaign uses A/B testing, the + // campaign has a status of COMPLETED only if all campaign treatments have a + // status of COMPLETED. + CampaignStatus *string `type:"string" enum:"CampaignStatus"` +} - // The URL that points to an image used as the small icon for the notification - // which will be used to represent the notification in the status bar and content - // view - SmallImageIconUrl *string `type:"string"` +// String returns the string representation +func (s CampaignState) String() string { + return awsutil.Prettify(s) +} - // Indicates a sound to play when the device receives the notification. Supports - // default, or the filename of a sound resource bundled in the app. Android - // sound files must reside in /res/raw/ - Sound *string `type:"string"` +// GoString returns the string representation +func (s CampaignState) GoString() string { + return s.String() +} - // Default message substitutions. Can be overridden by individual address substitutions. - Substitutions map[string][]*string `type:"map"` +// SetCampaignStatus sets the CampaignStatus field's value. +func (s *CampaignState) SetCampaignStatus(v string) *CampaignState { + s.CampaignStatus = &v + return s +} - // The message title that displays above the message on the user's device. - Title *string `type:"string"` +// Provides information about the configuration and other settings for all the +// campaigns that are associated with an application. +type CampaignsResponse struct { + _ struct{} `type:"structure"` - // The URL to open in the user's mobile browser. Used if the value for Action - // is URL. - Url *string `type:"string"` + // An array of responses, one for each campaign that's associated with the application. + // + // Item is a required field + Item []*CampaignResponse `type:"list" required:"true"` + + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null if there are no additional pages. + NextToken *string `type:"string"` } // String returns the string representation -func (s ADMMessage) String() string { +func (s CampaignsResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ADMMessage) GoString() string { +func (s CampaignsResponse) GoString() string { return s.String() } -// SetAction sets the Action field's value. -func (s *ADMMessage) SetAction(v string) *ADMMessage { - s.Action = &v +// SetItem sets the Item field's value. +func (s *CampaignsResponse) SetItem(v []*CampaignResponse) *CampaignsResponse { + s.Item = v return s } -// SetBody sets the Body field's value. -func (s *ADMMessage) SetBody(v string) *ADMMessage { - s.Body = &v +// SetNextToken sets the NextToken field's value. +func (s *CampaignsResponse) SetNextToken(v string) *CampaignsResponse { + s.NextToken = &v + return s +} + +// Provides information about the general settings and status of a channel for +// an application. +type ChannelResponse struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application. + ApplicationId *string `type:"string"` + + // The date and time, in ISO 8601 format, when the channel was enabled. + CreationDate *string `type:"string"` + + // Specifies whether the channel is enabled for the application. + Enabled *bool `type:"boolean"` + + // (Not used) This property is retained only for backward compatibility. + HasCredential *bool `type:"boolean"` + + // (Deprecated) An identifier for the channel. This property is retained only + // for backward compatibility. + Id *string `type:"string"` + + // Specifies whether the channel is archived. + IsArchived *bool `type:"boolean"` + + // The user who last modified the channel. + LastModifiedBy *string `type:"string"` + + // The date and time, in ISO 8601 format, when the channel was last modified. + LastModifiedDate *string `type:"string"` + + // The current version of the channel. + Version *int64 `type:"integer"` +} + +// String returns the string representation +func (s ChannelResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChannelResponse) GoString() string { + return s.String() +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *ChannelResponse) SetApplicationId(v string) *ChannelResponse { + s.ApplicationId = &v return s } -// SetConsolidationKey sets the ConsolidationKey field's value. -func (s *ADMMessage) SetConsolidationKey(v string) *ADMMessage { - s.ConsolidationKey = &v +// SetCreationDate sets the CreationDate field's value. +func (s *ChannelResponse) SetCreationDate(v string) *ChannelResponse { + s.CreationDate = &v return s } -// SetData sets the Data field's value. -func (s *ADMMessage) SetData(v map[string]*string) *ADMMessage { - s.Data = v +// SetEnabled sets the Enabled field's value. +func (s *ChannelResponse) SetEnabled(v bool) *ChannelResponse { + s.Enabled = &v return s } -// SetExpiresAfter sets the ExpiresAfter field's value. -func (s *ADMMessage) SetExpiresAfter(v string) *ADMMessage { - s.ExpiresAfter = &v +// SetHasCredential sets the HasCredential field's value. +func (s *ChannelResponse) SetHasCredential(v bool) *ChannelResponse { + s.HasCredential = &v return s } -// SetIconReference sets the IconReference field's value. -func (s *ADMMessage) SetIconReference(v string) *ADMMessage { - s.IconReference = &v +// SetId sets the Id field's value. +func (s *ChannelResponse) SetId(v string) *ChannelResponse { + s.Id = &v return s } -// SetImageIconUrl sets the ImageIconUrl field's value. -func (s *ADMMessage) SetImageIconUrl(v string) *ADMMessage { - s.ImageIconUrl = &v +// SetIsArchived sets the IsArchived field's value. +func (s *ChannelResponse) SetIsArchived(v bool) *ChannelResponse { + s.IsArchived = &v return s } -// SetImageUrl sets the ImageUrl field's value. -func (s *ADMMessage) SetImageUrl(v string) *ADMMessage { - s.ImageUrl = &v +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *ChannelResponse) SetLastModifiedBy(v string) *ChannelResponse { + s.LastModifiedBy = &v return s } -// SetMD5 sets the MD5 field's value. -func (s *ADMMessage) SetMD5(v string) *ADMMessage { - s.MD5 = &v +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *ChannelResponse) SetLastModifiedDate(v string) *ChannelResponse { + s.LastModifiedDate = &v return s } -// SetRawContent sets the RawContent field's value. -func (s *ADMMessage) SetRawContent(v string) *ADMMessage { - s.RawContent = &v +// SetVersion sets the Version field's value. +func (s *ChannelResponse) SetVersion(v int64) *ChannelResponse { + s.Version = &v return s } -// SetSilentPush sets the SilentPush field's value. -func (s *ADMMessage) SetSilentPush(v bool) *ADMMessage { - s.SilentPush = &v - return s +// Provides information about the general settings and status of all channels +// for an application, including channels that aren't enabled for the application. +type ChannelsResponse struct { + _ struct{} `type:"structure"` + + // A map that contains a multipart response for each channel. For each item + // in this object, the ChannelType is the key and the Channel is the value. + // + // Channels is a required field + Channels map[string]*ChannelResponse `type:"map" required:"true"` } -// SetSmallImageIconUrl sets the SmallImageIconUrl field's value. -func (s *ADMMessage) SetSmallImageIconUrl(v string) *ADMMessage { - s.SmallImageIconUrl = &v - return s +// String returns the string representation +func (s ChannelsResponse) String() string { + return awsutil.Prettify(s) } -// SetSound sets the Sound field's value. -func (s *ADMMessage) SetSound(v string) *ADMMessage { - s.Sound = &v - return s +// GoString returns the string representation +func (s ChannelsResponse) GoString() string { + return s.String() } -// SetSubstitutions sets the Substitutions field's value. -func (s *ADMMessage) SetSubstitutions(v map[string][]*string) *ADMMessage { - s.Substitutions = v +// SetChannels sets the Channels field's value. +func (s *ChannelsResponse) SetChannels(v map[string]*ChannelResponse) *ChannelsResponse { + s.Channels = v return s } -// SetTitle sets the Title field's value. -func (s *ADMMessage) SetTitle(v string) *ADMMessage { - s.Title = &v - return s +// Specifies the conditions to evaluate for an activity in a journey, and how +// to evaluate those conditions. +type Condition struct { + _ struct{} `type:"structure"` + + // The conditions to evaluate for the activity. + Conditions []*SimpleCondition `type:"list"` + + // Specifies how to handle multiple conditions for the activity. For example, + // if you specify two conditions for an activity, whether both or only one of + // the conditions must be met for the activity to be performed. + Operator *string `type:"string" enum:"Operator"` } -// SetUrl sets the Url field's value. -func (s *ADMMessage) SetUrl(v string) *ADMMessage { - s.Url = &v - return s +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) } -// Apple Push Notification Service channel definition. -type APNSChannelRequest struct { - _ struct{} `type:"structure"` +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} - // The bundle id used for APNs Tokens. - BundleId *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *Condition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Condition"} + if s.Conditions != nil { + for i, v := range s.Conditions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Conditions", i), err.(request.ErrInvalidParams)) + } + } + } - // The distribution certificate from Apple. - Certificate *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The default authentication method used for APNs. - DefaultAuthenticationMethod *string `type:"string"` +// SetConditions sets the Conditions field's value. +func (s *Condition) SetConditions(v []*SimpleCondition) *Condition { + s.Conditions = v + return s +} - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` +// SetOperator sets the Operator field's value. +func (s *Condition) SetOperator(v string) *Condition { + s.Operator = &v + return s +} - // The certificate private key. - PrivateKey *string `type:"string"` +// Specifies the settings for a yes/no split activity in a journey. This type +// of activity sends participants down one of two paths in a journey, based +// on conditions that you specify. +type ConditionalSplitActivity struct { + _ struct{} `type:"structure"` - // The team id used for APNs Tokens. - TeamId *string `type:"string"` + // The conditions that define the paths for the activity, and the relationship + // between the conditions. + Condition *Condition `type:"structure"` - // The token key used for APNs Tokens. - TokenKey *string `type:"string"` + // The amount of time to wait before determining whether the conditions are + // met, or the date and time when Amazon Pinpoint determines whether the conditions + // are met. + EvaluationWaitTime *WaitTime `type:"structure"` - // The token key used for APNs Tokens. - TokenKeyId *string `type:"string"` + // The unique identifier for the activity to perform if the condition isn't + // met. + FalseActivity *string `type:"string"` + + // The unique identifier for the activity to perform if the condition is met. + TrueActivity *string `type:"string"` } // String returns the string representation -func (s APNSChannelRequest) String() string { +func (s ConditionalSplitActivity) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s APNSChannelRequest) GoString() string { +func (s ConditionalSplitActivity) GoString() string { return s.String() } -// SetBundleId sets the BundleId field's value. -func (s *APNSChannelRequest) SetBundleId(v string) *APNSChannelRequest { - s.BundleId = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConditionalSplitActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConditionalSplitActivity"} + if s.Condition != nil { + if err := s.Condition.Validate(); err != nil { + invalidParams.AddNested("Condition", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCertificate sets the Certificate field's value. -func (s *APNSChannelRequest) SetCertificate(v string) *APNSChannelRequest { - s.Certificate = &v +// SetCondition sets the Condition field's value. +func (s *ConditionalSplitActivity) SetCondition(v *Condition) *ConditionalSplitActivity { + s.Condition = v return s } -// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. -func (s *APNSChannelRequest) SetDefaultAuthenticationMethod(v string) *APNSChannelRequest { - s.DefaultAuthenticationMethod = &v +// SetEvaluationWaitTime sets the EvaluationWaitTime field's value. +func (s *ConditionalSplitActivity) SetEvaluationWaitTime(v *WaitTime) *ConditionalSplitActivity { + s.EvaluationWaitTime = v return s } -// SetEnabled sets the Enabled field's value. -func (s *APNSChannelRequest) SetEnabled(v bool) *APNSChannelRequest { - s.Enabled = &v +// SetFalseActivity sets the FalseActivity field's value. +func (s *ConditionalSplitActivity) SetFalseActivity(v string) *ConditionalSplitActivity { + s.FalseActivity = &v return s } -// SetPrivateKey sets the PrivateKey field's value. -func (s *APNSChannelRequest) SetPrivateKey(v string) *APNSChannelRequest { - s.PrivateKey = &v +// SetTrueActivity sets the TrueActivity field's value. +func (s *ConditionalSplitActivity) SetTrueActivity(v string) *ConditionalSplitActivity { + s.TrueActivity = &v return s } -// SetTeamId sets the TeamId field's value. -func (s *APNSChannelRequest) SetTeamId(v string) *APNSChannelRequest { - s.TeamId = &v - return s +type CreateAppInput struct { + _ struct{} `type:"structure" payload:"CreateApplicationRequest"` + + // Specifies the display name of an application and the tags to associate with + // the application. + // + // CreateApplicationRequest is a required field + CreateApplicationRequest *CreateApplicationRequest `type:"structure" required:"true"` } -// SetTokenKey sets the TokenKey field's value. -func (s *APNSChannelRequest) SetTokenKey(v string) *APNSChannelRequest { - s.TokenKey = &v +// String returns the string representation +func (s CreateAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAppInput"} + if s.CreateApplicationRequest == nil { + invalidParams.Add(request.NewErrParamRequired("CreateApplicationRequest")) + } + if s.CreateApplicationRequest != nil { + if err := s.CreateApplicationRequest.Validate(); err != nil { + invalidParams.AddNested("CreateApplicationRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreateApplicationRequest sets the CreateApplicationRequest field's value. +func (s *CreateAppInput) SetCreateApplicationRequest(v *CreateApplicationRequest) *CreateAppInput { + s.CreateApplicationRequest = v return s } -// SetTokenKeyId sets the TokenKeyId field's value. -func (s *APNSChannelRequest) SetTokenKeyId(v string) *APNSChannelRequest { - s.TokenKeyId = &v +type CreateAppOutput struct { + _ struct{} `type:"structure" payload:"ApplicationResponse"` + + // Provides information about an application. + // + // ApplicationResponse is a required field + ApplicationResponse *ApplicationResponse `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppOutput) GoString() string { + return s.String() +} + +// SetApplicationResponse sets the ApplicationResponse field's value. +func (s *CreateAppOutput) SetApplicationResponse(v *ApplicationResponse) *CreateAppOutput { + s.ApplicationResponse = v return s } -// Apple Distribution Push Notification Service channel definition. -type APNSChannelResponse struct { +// Specifies the display name of an application and the tags to associate with +// the application. +type CreateApplicationRequest struct { _ struct{} `type:"structure"` - // The ID of the application that the channel applies to. - ApplicationId *string `type:"string"` - - // The date and time when this channel was created. - CreationDate *string `type:"string"` + // The display name of the application. This name is displayed as the Project + // name on the Amazon Pinpoint console. + // + // Name is a required field + Name *string `type:"string" required:"true"` - // The default authentication method used for APNs. - DefaultAuthenticationMethod *string `type:"string"` + // A string-to-string map of key-value pairs that defines the tags to associate + // with the application. Each tag consists of a required tag key and an associated + // tag value. + Tags map[string]*string `locationName:"tags" type:"map"` +} - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` +// String returns the string representation +func (s CreateApplicationRequest) String() string { + return awsutil.Prettify(s) +} - // Not used. Retained for backwards compatibility. - HasCredential *bool `type:"boolean"` +// GoString returns the string representation +func (s CreateApplicationRequest) GoString() string { + return s.String() +} - // Indicates whether the channel is configured with a key for APNs token authentication. - // Provide a token key by setting the TokenKey attribute. - HasTokenKey *bool `type:"boolean"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateApplicationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateApplicationRequest"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } - // (Deprecated) An identifier for the channel. Retained for backwards compatibility. - Id *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Indicates whether or not the channel is archived. - IsArchived *bool `type:"boolean"` +// SetName sets the Name field's value. +func (s *CreateApplicationRequest) SetName(v string) *CreateApplicationRequest { + s.Name = &v + return s +} - // The user who last updated this channel. - LastModifiedBy *string `type:"string"` +// SetTags sets the Tags field's value. +func (s *CreateApplicationRequest) SetTags(v map[string]*string) *CreateApplicationRequest { + s.Tags = v + return s +} - // The date and time when this channel was last modified. - LastModifiedDate *string `type:"string"` +type CreateCampaignInput struct { + _ struct{} `type:"structure" payload:"WriteCampaignRequest"` - // The platform type. For this channel, the value is always "ADM." - Platform *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The channel version. - Version *int64 `type:"integer"` + // Specifies the configuration and other settings for a campaign. + // + // WriteCampaignRequest is a required field + WriteCampaignRequest *WriteCampaignRequest `type:"structure" required:"true"` } // String returns the string representation -func (s APNSChannelResponse) String() string { +func (s CreateCampaignInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s APNSChannelResponse) GoString() string { +func (s CreateCampaignInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCampaignInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCampaignInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.WriteCampaignRequest == nil { + invalidParams.Add(request.NewErrParamRequired("WriteCampaignRequest")) + } + if s.WriteCampaignRequest != nil { + if err := s.WriteCampaignRequest.Validate(); err != nil { + invalidParams.AddNested("WriteCampaignRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetApplicationId sets the ApplicationId field's value. -func (s *APNSChannelResponse) SetApplicationId(v string) *APNSChannelResponse { +func (s *CreateCampaignInput) SetApplicationId(v string) *CreateCampaignInput { s.ApplicationId = &v return s } -// SetCreationDate sets the CreationDate field's value. -func (s *APNSChannelResponse) SetCreationDate(v string) *APNSChannelResponse { - s.CreationDate = &v +// SetWriteCampaignRequest sets the WriteCampaignRequest field's value. +func (s *CreateCampaignInput) SetWriteCampaignRequest(v *WriteCampaignRequest) *CreateCampaignInput { + s.WriteCampaignRequest = v return s } -// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. -func (s *APNSChannelResponse) SetDefaultAuthenticationMethod(v string) *APNSChannelResponse { - s.DefaultAuthenticationMethod = &v - return s +type CreateCampaignOutput struct { + _ struct{} `type:"structure" payload:"CampaignResponse"` + + // Provides information about the status, configuration, and other settings + // for a campaign. + // + // CampaignResponse is a required field + CampaignResponse *CampaignResponse `type:"structure" required:"true"` } -// SetEnabled sets the Enabled field's value. -func (s *APNSChannelResponse) SetEnabled(v bool) *APNSChannelResponse { - s.Enabled = &v - return s +// String returns the string representation +func (s CreateCampaignOutput) String() string { + return awsutil.Prettify(s) } -// SetHasCredential sets the HasCredential field's value. -func (s *APNSChannelResponse) SetHasCredential(v bool) *APNSChannelResponse { - s.HasCredential = &v - return s +// GoString returns the string representation +func (s CreateCampaignOutput) GoString() string { + return s.String() } -// SetHasTokenKey sets the HasTokenKey field's value. -func (s *APNSChannelResponse) SetHasTokenKey(v bool) *APNSChannelResponse { - s.HasTokenKey = &v +// SetCampaignResponse sets the CampaignResponse field's value. +func (s *CreateCampaignOutput) SetCampaignResponse(v *CampaignResponse) *CreateCampaignOutput { + s.CampaignResponse = v return s } -// SetId sets the Id field's value. -func (s *APNSChannelResponse) SetId(v string) *APNSChannelResponse { - s.Id = &v - return s +type CreateEmailTemplateInput struct { + _ struct{} `type:"structure" payload:"EmailTemplateRequest"` + + // Specifies the content and settings for a message template that can be used + // in messages that are sent through the email channel. + // + // EmailTemplateRequest is a required field + EmailTemplateRequest *EmailTemplateRequest `type:"structure" required:"true"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` } -// SetIsArchived sets the IsArchived field's value. -func (s *APNSChannelResponse) SetIsArchived(v bool) *APNSChannelResponse { - s.IsArchived = &v - return s +// String returns the string representation +func (s CreateEmailTemplateInput) String() string { + return awsutil.Prettify(s) } -// SetLastModifiedBy sets the LastModifiedBy field's value. -func (s *APNSChannelResponse) SetLastModifiedBy(v string) *APNSChannelResponse { - s.LastModifiedBy = &v - return s +// GoString returns the string representation +func (s CreateEmailTemplateInput) GoString() string { + return s.String() } -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *APNSChannelResponse) SetLastModifiedDate(v string) *APNSChannelResponse { - s.LastModifiedDate = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEmailTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEmailTemplateInput"} + if s.EmailTemplateRequest == nil { + invalidParams.Add(request.NewErrParamRequired("EmailTemplateRequest")) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetPlatform sets the Platform field's value. -func (s *APNSChannelResponse) SetPlatform(v string) *APNSChannelResponse { - s.Platform = &v +// SetEmailTemplateRequest sets the EmailTemplateRequest field's value. +func (s *CreateEmailTemplateInput) SetEmailTemplateRequest(v *EmailTemplateRequest) *CreateEmailTemplateInput { + s.EmailTemplateRequest = v return s } -// SetVersion sets the Version field's value. -func (s *APNSChannelResponse) SetVersion(v int64) *APNSChannelResponse { - s.Version = &v +// SetTemplateName sets the TemplateName field's value. +func (s *CreateEmailTemplateInput) SetTemplateName(v string) *CreateEmailTemplateInput { + s.TemplateName = &v return s } -// APNS Message. -type APNSMessage struct { - _ struct{} `type:"structure"` - - // The action that occurs if the user taps a push notification delivered by - // the campaign: OPEN_APP - Your app launches, or it becomes the foreground - // app if it has been sent to the background. This is the default action. DEEP_LINK - // - Uses deep linking features in iOS and Android to open your app and display - // a designated user interface within the app. URL - The default mobile browser - // on the user's device launches and opens a web page at the URL you specify. - // Possible values include: OPEN_APP | DEEP_LINK | URL - Action *string `type:"string" enum:"Action"` - - // Include this key when you want the system to modify the badge of your app - // icon. If this key is not included in the dictionary, the badge is not changed. - // To remove the badge, set the value of this key to 0. - Badge *int64 `type:"integer"` - - // The message body of the notification. - Body *string `type:"string"` - - // Provide this key with a string value that represents the notification's type. - // This value corresponds to the value in the identifier property of one of - // your app's registered categories. - Category *string `type:"string"` - - // An ID that, if assigned to multiple messages, causes APNs to coalesce the - // messages into a single push notification instead of delivering each message - // individually. The value must not exceed 64 bytes. Amazon Pinpoint uses this - // value to set the apns-collapse-id request header when it sends the message - // to APNs. - CollapseId *string `type:"string"` - - // The data payload used for a silent push. This payload is added to the notifications' - // data.pinpoint.jsonBody' object - Data map[string]*string `type:"map"` - - // A URL that refers to the location of an image or video that you want to display - // in the push notification. - MediaUrl *string `type:"string"` - - // The preferred authentication method, either "CERTIFICATE" or "TOKEN" - PreferredAuthenticationMethod *string `type:"string"` - - // The message priority. Amazon Pinpoint uses this value to set the apns-priority - // request header when it sends the message to APNs. Accepts the following values:"5" - // - Low priority. Messages might be delayed, delivered in groups, and throttled."10" - // - High priority. Messages are sent immediately. High priority messages must - // cause an alert, sound, or badge on the receiving device.The default value - // is "10".The equivalent values for FCM or GCM messages are "normal" and "high". - // Amazon Pinpoint accepts these values for APNs messages and converts them.For - // more information about the apns-priority parameter, see Communicating with - // APNs in the APNs Local and Remote Notification Programming Guide. - Priority *string `type:"string"` - - // The Raw JSON formatted string to be used as the payload. This value overrides - // the message. - RawContent *string `type:"string"` +type CreateEmailTemplateOutput struct { + _ struct{} `type:"structure" payload:"CreateTemplateMessageBody"` - // Indicates if the message should display on the users device. Silent pushes - // can be used for Remote Configuration and Phone Home use cases. - SilentPush *bool `type:"boolean"` + // Provides information about a request to create a message template. + // + // CreateTemplateMessageBody is a required field + CreateTemplateMessageBody *CreateTemplateMessageBody `type:"structure" required:"true"` +} - // Include this key when you want the system to play a sound. The value of this - // key is the name of a sound file in your app's main bundle or in the Library/Sounds - // folder of your app's data container. If the sound file cannot be found, or - // if you specify defaultfor the value, the system plays the default alert sound. - Sound *string `type:"string"` +// String returns the string representation +func (s CreateEmailTemplateOutput) String() string { + return awsutil.Prettify(s) +} - // Default message substitutions. Can be overridden by individual address substitutions. - Substitutions map[string][]*string `type:"map"` +// GoString returns the string representation +func (s CreateEmailTemplateOutput) GoString() string { + return s.String() +} - // Provide this key with a string value that represents the app-specific identifier - // for grouping notifications. If you provide a Notification Content app extension, - // you can use this value to group your notifications together. - ThreadId *string `type:"string"` +// SetCreateTemplateMessageBody sets the CreateTemplateMessageBody field's value. +func (s *CreateEmailTemplateOutput) SetCreateTemplateMessageBody(v *CreateTemplateMessageBody) *CreateEmailTemplateOutput { + s.CreateTemplateMessageBody = v + return s +} - // The length of time (in seconds) that APNs stores and attempts to deliver - // the message. If the value is 0, APNs does not store the message or attempt - // to deliver it more than once. Amazon Pinpoint uses this value to set the - // apns-expiration request header when it sends the message to APNs. - TimeToLive *int64 `type:"integer"` +type CreateExportJobInput struct { + _ struct{} `type:"structure" payload:"ExportJobRequest"` - // The message title that displays above the message on the user's device. - Title *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The URL to open in the user's mobile browser. Used if the value for Action - // is URL. - Url *string `type:"string"` + // Specifies the settings for a job that exports endpoint definitions to an + // Amazon Simple Storage Service (Amazon S3) bucket. + // + // ExportJobRequest is a required field + ExportJobRequest *ExportJobRequest `type:"structure" required:"true"` } // String returns the string representation -func (s APNSMessage) String() string { +func (s CreateExportJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s APNSMessage) GoString() string { +func (s CreateExportJobInput) GoString() string { return s.String() } -// SetAction sets the Action field's value. -func (s *APNSMessage) SetAction(v string) *APNSMessage { - s.Action = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateExportJobInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.ExportJobRequest == nil { + invalidParams.Add(request.NewErrParamRequired("ExportJobRequest")) + } + if s.ExportJobRequest != nil { + if err := s.ExportJobRequest.Validate(); err != nil { + invalidParams.AddNested("ExportJobRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetBadge sets the Badge field's value. -func (s *APNSMessage) SetBadge(v int64) *APNSMessage { - s.Badge = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *CreateExportJobInput) SetApplicationId(v string) *CreateExportJobInput { + s.ApplicationId = &v return s } -// SetBody sets the Body field's value. -func (s *APNSMessage) SetBody(v string) *APNSMessage { - s.Body = &v +// SetExportJobRequest sets the ExportJobRequest field's value. +func (s *CreateExportJobInput) SetExportJobRequest(v *ExportJobRequest) *CreateExportJobInput { + s.ExportJobRequest = v return s } -// SetCategory sets the Category field's value. -func (s *APNSMessage) SetCategory(v string) *APNSMessage { - s.Category = &v - return s +type CreateExportJobOutput struct { + _ struct{} `type:"structure" payload:"ExportJobResponse"` + + // Provides information about the status and settings of a job that exports + // endpoint definitions to a file. The file can be added directly to an Amazon + // Simple Storage Service (Amazon S3) bucket by using the Amazon Pinpoint API + // or downloaded directly to a computer by using the Amazon Pinpoint console. + // + // ExportJobResponse is a required field + ExportJobResponse *ExportJobResponse `type:"structure" required:"true"` } -// SetCollapseId sets the CollapseId field's value. -func (s *APNSMessage) SetCollapseId(v string) *APNSMessage { - s.CollapseId = &v - return s +// String returns the string representation +func (s CreateExportJobOutput) String() string { + return awsutil.Prettify(s) } -// SetData sets the Data field's value. -func (s *APNSMessage) SetData(v map[string]*string) *APNSMessage { - s.Data = v - return s +// GoString returns the string representation +func (s CreateExportJobOutput) GoString() string { + return s.String() } -// SetMediaUrl sets the MediaUrl field's value. -func (s *APNSMessage) SetMediaUrl(v string) *APNSMessage { - s.MediaUrl = &v +// SetExportJobResponse sets the ExportJobResponse field's value. +func (s *CreateExportJobOutput) SetExportJobResponse(v *ExportJobResponse) *CreateExportJobOutput { + s.ExportJobResponse = v return s } -// SetPreferredAuthenticationMethod sets the PreferredAuthenticationMethod field's value. -func (s *APNSMessage) SetPreferredAuthenticationMethod(v string) *APNSMessage { - s.PreferredAuthenticationMethod = &v - return s +type CreateImportJobInput struct { + _ struct{} `type:"structure" payload:"ImportJobRequest"` + + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + // Specifies the settings for a job that imports endpoint definitions from an + // Amazon Simple Storage Service (Amazon S3) bucket. + // + // ImportJobRequest is a required field + ImportJobRequest *ImportJobRequest `type:"structure" required:"true"` } -// SetPriority sets the Priority field's value. -func (s *APNSMessage) SetPriority(v string) *APNSMessage { - s.Priority = &v - return s +// String returns the string representation +func (s CreateImportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImportJobInput) GoString() string { + return s.String() } -// SetRawContent sets the RawContent field's value. -func (s *APNSMessage) SetRawContent(v string) *APNSMessage { - s.RawContent = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateImportJobInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.ImportJobRequest == nil { + invalidParams.Add(request.NewErrParamRequired("ImportJobRequest")) + } + if s.ImportJobRequest != nil { + if err := s.ImportJobRequest.Validate(); err != nil { + invalidParams.AddNested("ImportJobRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSilentPush sets the SilentPush field's value. -func (s *APNSMessage) SetSilentPush(v bool) *APNSMessage { - s.SilentPush = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *CreateImportJobInput) SetApplicationId(v string) *CreateImportJobInput { + s.ApplicationId = &v return s } -// SetSound sets the Sound field's value. -func (s *APNSMessage) SetSound(v string) *APNSMessage { - s.Sound = &v +// SetImportJobRequest sets the ImportJobRequest field's value. +func (s *CreateImportJobInput) SetImportJobRequest(v *ImportJobRequest) *CreateImportJobInput { + s.ImportJobRequest = v return s } -// SetSubstitutions sets the Substitutions field's value. -func (s *APNSMessage) SetSubstitutions(v map[string][]*string) *APNSMessage { - s.Substitutions = v - return s -} +type CreateImportJobOutput struct { + _ struct{} `type:"structure" payload:"ImportJobResponse"` -// SetThreadId sets the ThreadId field's value. -func (s *APNSMessage) SetThreadId(v string) *APNSMessage { - s.ThreadId = &v - return s + // Provides information about the status and settings of a job that imports + // endpoint definitions from one or more files. The files can be stored in an + // Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from + // a computer by using the Amazon Pinpoint console. + // + // ImportJobResponse is a required field + ImportJobResponse *ImportJobResponse `type:"structure" required:"true"` } -// SetTimeToLive sets the TimeToLive field's value. -func (s *APNSMessage) SetTimeToLive(v int64) *APNSMessage { - s.TimeToLive = &v - return s +// String returns the string representation +func (s CreateImportJobOutput) String() string { + return awsutil.Prettify(s) } -// SetTitle sets the Title field's value. -func (s *APNSMessage) SetTitle(v string) *APNSMessage { - s.Title = &v - return s +// GoString returns the string representation +func (s CreateImportJobOutput) GoString() string { + return s.String() } -// SetUrl sets the Url field's value. -func (s *APNSMessage) SetUrl(v string) *APNSMessage { - s.Url = &v +// SetImportJobResponse sets the ImportJobResponse field's value. +func (s *CreateImportJobOutput) SetImportJobResponse(v *ImportJobResponse) *CreateImportJobOutput { + s.ImportJobResponse = v return s } -// Apple Development Push Notification Service channel definition. -type APNSSandboxChannelRequest struct { - _ struct{} `type:"structure"` - - // The bundle id used for APNs Tokens. - BundleId *string `type:"string"` - - // The distribution certificate from Apple. - Certificate *string `type:"string"` - - // The default authentication method used for APNs. - DefaultAuthenticationMethod *string `type:"string"` - - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` - - // The certificate private key. - PrivateKey *string `type:"string"` - - // The team id used for APNs Tokens. - TeamId *string `type:"string"` +type CreateJourneyInput struct { + _ struct{} `type:"structure" payload:"WriteJourneyRequest"` - // The token key used for APNs Tokens. - TokenKey *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The token key used for APNs Tokens. - TokenKeyId *string `type:"string"` + // Specifies the configuration and other settings for a journey. + // + // WriteJourneyRequest is a required field + WriteJourneyRequest *WriteJourneyRequest `type:"structure" required:"true"` } // String returns the string representation -func (s APNSSandboxChannelRequest) String() string { +func (s CreateJourneyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s APNSSandboxChannelRequest) GoString() string { +func (s CreateJourneyInput) GoString() string { return s.String() } -// SetBundleId sets the BundleId field's value. -func (s *APNSSandboxChannelRequest) SetBundleId(v string) *APNSSandboxChannelRequest { - s.BundleId = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateJourneyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateJourneyInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.WriteJourneyRequest == nil { + invalidParams.Add(request.NewErrParamRequired("WriteJourneyRequest")) + } + if s.WriteJourneyRequest != nil { + if err := s.WriteJourneyRequest.Validate(); err != nil { + invalidParams.AddNested("WriteJourneyRequest", err.(request.ErrInvalidParams)) + } + } -// SetCertificate sets the Certificate field's value. -func (s *APNSSandboxChannelRequest) SetCertificate(v string) *APNSSandboxChannelRequest { - s.Certificate = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. -func (s *APNSSandboxChannelRequest) SetDefaultAuthenticationMethod(v string) *APNSSandboxChannelRequest { - s.DefaultAuthenticationMethod = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *CreateJourneyInput) SetApplicationId(v string) *CreateJourneyInput { + s.ApplicationId = &v return s } -// SetEnabled sets the Enabled field's value. -func (s *APNSSandboxChannelRequest) SetEnabled(v bool) *APNSSandboxChannelRequest { - s.Enabled = &v +// SetWriteJourneyRequest sets the WriteJourneyRequest field's value. +func (s *CreateJourneyInput) SetWriteJourneyRequest(v *WriteJourneyRequest) *CreateJourneyInput { + s.WriteJourneyRequest = v return s } -// SetPrivateKey sets the PrivateKey field's value. -func (s *APNSSandboxChannelRequest) SetPrivateKey(v string) *APNSSandboxChannelRequest { - s.PrivateKey = &v - return s +type CreateJourneyOutput struct { + _ struct{} `type:"structure" payload:"JourneyResponse"` + + // Provides information about the status, configuration, and other settings + // for a journey. + // + // JourneyResponse is a required field + JourneyResponse *JourneyResponse `type:"structure" required:"true"` } -// SetTeamId sets the TeamId field's value. -func (s *APNSSandboxChannelRequest) SetTeamId(v string) *APNSSandboxChannelRequest { - s.TeamId = &v - return s +// String returns the string representation +func (s CreateJourneyOutput) String() string { + return awsutil.Prettify(s) } -// SetTokenKey sets the TokenKey field's value. -func (s *APNSSandboxChannelRequest) SetTokenKey(v string) *APNSSandboxChannelRequest { - s.TokenKey = &v - return s +// GoString returns the string representation +func (s CreateJourneyOutput) GoString() string { + return s.String() } -// SetTokenKeyId sets the TokenKeyId field's value. -func (s *APNSSandboxChannelRequest) SetTokenKeyId(v string) *APNSSandboxChannelRequest { - s.TokenKeyId = &v +// SetJourneyResponse sets the JourneyResponse field's value. +func (s *CreateJourneyOutput) SetJourneyResponse(v *JourneyResponse) *CreateJourneyOutput { + s.JourneyResponse = v return s } -// Apple Development Push Notification Service channel definition. -type APNSSandboxChannelResponse struct { - _ struct{} `type:"structure"` - - // The ID of the application to which the channel applies. - ApplicationId *string `type:"string"` - - // When was this segment created - CreationDate *string `type:"string"` +type CreatePushTemplateInput struct { + _ struct{} `type:"structure" payload:"PushNotificationTemplateRequest"` - // The default authentication method used for APNs. - DefaultAuthenticationMethod *string `type:"string"` + // Specifies the content and settings for a message template that can be used + // in messages that are sent through a push notification channel. + // + // PushNotificationTemplateRequest is a required field + PushNotificationTemplateRequest *PushNotificationTemplateRequest `type:"structure" required:"true"` - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` +} - // Not used. Retained for backwards compatibility. - HasCredential *bool `type:"boolean"` +// String returns the string representation +func (s CreatePushTemplateInput) String() string { + return awsutil.Prettify(s) +} - // Indicates whether the channel is configured with a key for APNs token authentication. - // Provide a token key by setting the TokenKey attribute. - HasTokenKey *bool `type:"boolean"` +// GoString returns the string representation +func (s CreatePushTemplateInput) GoString() string { + return s.String() +} - // Channel ID. Not used, only for backwards compatibility. - Id *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePushTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePushTemplateInput"} + if s.PushNotificationTemplateRequest == nil { + invalidParams.Add(request.NewErrParamRequired("PushNotificationTemplateRequest")) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } - // Is this channel archived - IsArchived *bool `type:"boolean"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Who last updated this entry - LastModifiedBy *string `type:"string"` +// SetPushNotificationTemplateRequest sets the PushNotificationTemplateRequest field's value. +func (s *CreatePushTemplateInput) SetPushNotificationTemplateRequest(v *PushNotificationTemplateRequest) *CreatePushTemplateInput { + s.PushNotificationTemplateRequest = v + return s +} - // Last date this was updated - LastModifiedDate *string `type:"string"` +// SetTemplateName sets the TemplateName field's value. +func (s *CreatePushTemplateInput) SetTemplateName(v string) *CreatePushTemplateInput { + s.TemplateName = &v + return s +} - // The platform type. Will be APNS_SANDBOX. - Platform *string `type:"string"` +type CreatePushTemplateOutput struct { + _ struct{} `type:"structure" payload:"CreateTemplateMessageBody"` - // Version of channel - Version *int64 `type:"integer"` + // Provides information about a request to create a message template. + // + // CreateTemplateMessageBody is a required field + CreateTemplateMessageBody *CreateTemplateMessageBody `type:"structure" required:"true"` } // String returns the string representation -func (s APNSSandboxChannelResponse) String() string { +func (s CreatePushTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s APNSSandboxChannelResponse) GoString() string { +func (s CreatePushTemplateOutput) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *APNSSandboxChannelResponse) SetApplicationId(v string) *APNSSandboxChannelResponse { - s.ApplicationId = &v +// SetCreateTemplateMessageBody sets the CreateTemplateMessageBody field's value. +func (s *CreatePushTemplateOutput) SetCreateTemplateMessageBody(v *CreateTemplateMessageBody) *CreatePushTemplateOutput { + s.CreateTemplateMessageBody = v return s } -// SetCreationDate sets the CreationDate field's value. -func (s *APNSSandboxChannelResponse) SetCreationDate(v string) *APNSSandboxChannelResponse { - s.CreationDate = &v - return s -} +type CreateSegmentInput struct { + _ struct{} `type:"structure" payload:"WriteSegmentRequest"` -// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. -func (s *APNSSandboxChannelResponse) SetDefaultAuthenticationMethod(v string) *APNSSandboxChannelResponse { - s.DefaultAuthenticationMethod = &v - return s + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + // Specifies the configuration, dimension, and other settings for a segment. + // A WriteSegmentRequest object can include a Dimensions object or a SegmentGroups + // object, but not both. + // + // WriteSegmentRequest is a required field + WriteSegmentRequest *WriteSegmentRequest `type:"structure" required:"true"` } -// SetEnabled sets the Enabled field's value. -func (s *APNSSandboxChannelResponse) SetEnabled(v bool) *APNSSandboxChannelResponse { - s.Enabled = &v - return s +// String returns the string representation +func (s CreateSegmentInput) String() string { + return awsutil.Prettify(s) } -// SetHasCredential sets the HasCredential field's value. -func (s *APNSSandboxChannelResponse) SetHasCredential(v bool) *APNSSandboxChannelResponse { - s.HasCredential = &v - return s +// GoString returns the string representation +func (s CreateSegmentInput) GoString() string { + return s.String() } -// SetHasTokenKey sets the HasTokenKey field's value. -func (s *APNSSandboxChannelResponse) SetHasTokenKey(v bool) *APNSSandboxChannelResponse { - s.HasTokenKey = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSegmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSegmentInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.WriteSegmentRequest == nil { + invalidParams.Add(request.NewErrParamRequired("WriteSegmentRequest")) + } + if s.WriteSegmentRequest != nil { + if err := s.WriteSegmentRequest.Validate(); err != nil { + invalidParams.AddNested("WriteSegmentRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetId sets the Id field's value. -func (s *APNSSandboxChannelResponse) SetId(v string) *APNSSandboxChannelResponse { - s.Id = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *CreateSegmentInput) SetApplicationId(v string) *CreateSegmentInput { + s.ApplicationId = &v return s } -// SetIsArchived sets the IsArchived field's value. -func (s *APNSSandboxChannelResponse) SetIsArchived(v bool) *APNSSandboxChannelResponse { - s.IsArchived = &v +// SetWriteSegmentRequest sets the WriteSegmentRequest field's value. +func (s *CreateSegmentInput) SetWriteSegmentRequest(v *WriteSegmentRequest) *CreateSegmentInput { + s.WriteSegmentRequest = v return s } -// SetLastModifiedBy sets the LastModifiedBy field's value. -func (s *APNSSandboxChannelResponse) SetLastModifiedBy(v string) *APNSSandboxChannelResponse { - s.LastModifiedBy = &v - return s +type CreateSegmentOutput struct { + _ struct{} `type:"structure" payload:"SegmentResponse"` + + // Provides information about the configuration, dimension, and other settings + // for a segment. + // + // SegmentResponse is a required field + SegmentResponse *SegmentResponse `type:"structure" required:"true"` } -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *APNSSandboxChannelResponse) SetLastModifiedDate(v string) *APNSSandboxChannelResponse { - s.LastModifiedDate = &v - return s +// String returns the string representation +func (s CreateSegmentOutput) String() string { + return awsutil.Prettify(s) } -// SetPlatform sets the Platform field's value. -func (s *APNSSandboxChannelResponse) SetPlatform(v string) *APNSSandboxChannelResponse { - s.Platform = &v - return s +// GoString returns the string representation +func (s CreateSegmentOutput) GoString() string { + return s.String() } -// SetVersion sets the Version field's value. -func (s *APNSSandboxChannelResponse) SetVersion(v int64) *APNSSandboxChannelResponse { - s.Version = &v +// SetSegmentResponse sets the SegmentResponse field's value. +func (s *CreateSegmentOutput) SetSegmentResponse(v *SegmentResponse) *CreateSegmentOutput { + s.SegmentResponse = v return s } -// Apple VoIP Push Notification Service channel definition. -type APNSVoipChannelRequest struct { - _ struct{} `type:"structure"` - - // The bundle id used for APNs Tokens. - BundleId *string `type:"string"` - - // The distribution certificate from Apple. - Certificate *string `type:"string"` - - // The default authentication method used for APNs. - DefaultAuthenticationMethod *string `type:"string"` - - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` - - // The certificate private key. - PrivateKey *string `type:"string"` - - // The team id used for APNs Tokens. - TeamId *string `type:"string"` +type CreateSmsTemplateInput struct { + _ struct{} `type:"structure" payload:"SMSTemplateRequest"` - // The token key used for APNs Tokens. - TokenKey *string `type:"string"` + // Specifies the content and settings for a message template that can be used + // in text messages that are sent through the SMS channel. + // + // SMSTemplateRequest is a required field + SMSTemplateRequest *SMSTemplateRequest `type:"structure" required:"true"` - // The token key used for APNs Tokens. - TokenKeyId *string `type:"string"` + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` } // String returns the string representation -func (s APNSVoipChannelRequest) String() string { +func (s CreateSmsTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s APNSVoipChannelRequest) GoString() string { +func (s CreateSmsTemplateInput) GoString() string { return s.String() } -// SetBundleId sets the BundleId field's value. -func (s *APNSVoipChannelRequest) SetBundleId(v string) *APNSVoipChannelRequest { - s.BundleId = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSmsTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSmsTemplateInput"} + if s.SMSTemplateRequest == nil { + invalidParams.Add(request.NewErrParamRequired("SMSTemplateRequest")) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } -// SetCertificate sets the Certificate field's value. -func (s *APNSVoipChannelRequest) SetCertificate(v string) *APNSVoipChannelRequest { - s.Certificate = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. -func (s *APNSVoipChannelRequest) SetDefaultAuthenticationMethod(v string) *APNSVoipChannelRequest { - s.DefaultAuthenticationMethod = &v +// SetSMSTemplateRequest sets the SMSTemplateRequest field's value. +func (s *CreateSmsTemplateInput) SetSMSTemplateRequest(v *SMSTemplateRequest) *CreateSmsTemplateInput { + s.SMSTemplateRequest = v return s } -// SetEnabled sets the Enabled field's value. -func (s *APNSVoipChannelRequest) SetEnabled(v bool) *APNSVoipChannelRequest { - s.Enabled = &v +// SetTemplateName sets the TemplateName field's value. +func (s *CreateSmsTemplateInput) SetTemplateName(v string) *CreateSmsTemplateInput { + s.TemplateName = &v return s } -// SetPrivateKey sets the PrivateKey field's value. -func (s *APNSVoipChannelRequest) SetPrivateKey(v string) *APNSVoipChannelRequest { - s.PrivateKey = &v - return s +type CreateSmsTemplateOutput struct { + _ struct{} `type:"structure" payload:"CreateTemplateMessageBody"` + + // Provides information about a request to create a message template. + // + // CreateTemplateMessageBody is a required field + CreateTemplateMessageBody *CreateTemplateMessageBody `type:"structure" required:"true"` } -// SetTeamId sets the TeamId field's value. -func (s *APNSVoipChannelRequest) SetTeamId(v string) *APNSVoipChannelRequest { - s.TeamId = &v - return s +// String returns the string representation +func (s CreateSmsTemplateOutput) String() string { + return awsutil.Prettify(s) } -// SetTokenKey sets the TokenKey field's value. -func (s *APNSVoipChannelRequest) SetTokenKey(v string) *APNSVoipChannelRequest { - s.TokenKey = &v - return s +// GoString returns the string representation +func (s CreateSmsTemplateOutput) GoString() string { + return s.String() } -// SetTokenKeyId sets the TokenKeyId field's value. -func (s *APNSVoipChannelRequest) SetTokenKeyId(v string) *APNSVoipChannelRequest { - s.TokenKeyId = &v +// SetCreateTemplateMessageBody sets the CreateTemplateMessageBody field's value. +func (s *CreateSmsTemplateOutput) SetCreateTemplateMessageBody(v *CreateTemplateMessageBody) *CreateSmsTemplateOutput { + s.CreateTemplateMessageBody = v return s } -// Apple VoIP Push Notification Service channel definition. -type APNSVoipChannelResponse struct { +// Provides information about a request to create a message template. +type CreateTemplateMessageBody struct { _ struct{} `type:"structure"` - // Application id - ApplicationId *string `type:"string"` - - // When was this segment created - CreationDate *string `type:"string"` + // The Amazon Resource Name (ARN) of the message template that was created. + Arn *string `type:"string"` - // The default authentication method used for APNs. - DefaultAuthenticationMethod *string `type:"string"` + // The message that's returned from the API for the request to create the message + // template. + Message *string `type:"string"` - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` + // The unique identifier for the request to create the message template. + RequestID *string `type:"string"` +} - // Not used. Retained for backwards compatibility. - HasCredential *bool `type:"boolean"` +// String returns the string representation +func (s CreateTemplateMessageBody) String() string { + return awsutil.Prettify(s) +} - // If the channel is registered with a token key for authentication. - HasTokenKey *bool `type:"boolean"` +// GoString returns the string representation +func (s CreateTemplateMessageBody) GoString() string { + return s.String() +} - // Channel ID. Not used, only for backwards compatibility. - Id *string `type:"string"` +// SetArn sets the Arn field's value. +func (s *CreateTemplateMessageBody) SetArn(v string) *CreateTemplateMessageBody { + s.Arn = &v + return s +} - // Is this channel archived - IsArchived *bool `type:"boolean"` +// SetMessage sets the Message field's value. +func (s *CreateTemplateMessageBody) SetMessage(v string) *CreateTemplateMessageBody { + s.Message = &v + return s +} - // Who made the last change - LastModifiedBy *string `type:"string"` +// SetRequestID sets the RequestID field's value. +func (s *CreateTemplateMessageBody) SetRequestID(v string) *CreateTemplateMessageBody { + s.RequestID = &v + return s +} - // Last date this was updated - LastModifiedDate *string `type:"string"` +// Specifies the default message to use for all channels. +type DefaultMessage struct { + _ struct{} `type:"structure"` - // The platform type. Will be APNS. - Platform *string `type:"string"` + // The default message body of the push notification, email, or SMS message. + Body *string `type:"string"` - // Version of channel - Version *int64 `type:"integer"` + // The default message variables to use in the push notification, email, or + // SMS message. You can override these default variables with individual address + // variables. + Substitutions map[string][]*string `type:"map"` } // String returns the string representation -func (s APNSVoipChannelResponse) String() string { +func (s DefaultMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s APNSVoipChannelResponse) GoString() string { +func (s DefaultMessage) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *APNSVoipChannelResponse) SetApplicationId(v string) *APNSVoipChannelResponse { - s.ApplicationId = &v +// SetBody sets the Body field's value. +func (s *DefaultMessage) SetBody(v string) *DefaultMessage { + s.Body = &v return s } -// SetCreationDate sets the CreationDate field's value. -func (s *APNSVoipChannelResponse) SetCreationDate(v string) *APNSVoipChannelResponse { - s.CreationDate = &v +// SetSubstitutions sets the Substitutions field's value. +func (s *DefaultMessage) SetSubstitutions(v map[string][]*string) *DefaultMessage { + s.Substitutions = v return s } -// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. -func (s *APNSVoipChannelResponse) SetDefaultAuthenticationMethod(v string) *APNSVoipChannelResponse { - s.DefaultAuthenticationMethod = &v - return s +// Specifies the default settings and content for a push notification that's +// sent directly to an endpoint. +type DefaultPushNotificationMessage struct { + _ struct{} `type:"structure"` + + // The default action to occur if a recipient taps the push notification. Valid + // values are: + // + // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // sent to the background. This is the default action. + // + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This setting uses the deep-linking features of the iOS and + // Android platforms. + // + // * URL - The default mobile browser on the recipient's device opens and + // loads the web page at a URL that you specify. + Action *string `type:"string" enum:"Action"` + + // The default body of the notification message. + Body *string `type:"string"` + + // The JSON data payload to use for the default push notification, if the notification + // is a silent push notification. This payload is added to the data.pinpoint.jsonBody + // object of the notification. + Data map[string]*string `type:"map"` + + // Specifies whether the default notification is a silent push notification, + // which is a push notification that doesn't display on a recipient's device. + // Silent push notifications can be used for cases such as updating an app's + // configuration or delivering messages to an in-app notification center. + SilentPush *bool `type:"boolean"` + + // The default message variables to use in the notification message. You can + // override the default variables with individual address variables. + Substitutions map[string][]*string `type:"map"` + + // The default title to display above the notification message on a recipient's + // device. + Title *string `type:"string"` + + // The default URL to open in a recipient's default mobile browser, if a recipient + // taps the push notification and the value of the Action property is URL. + Url *string `type:"string"` } -// SetEnabled sets the Enabled field's value. -func (s *APNSVoipChannelResponse) SetEnabled(v bool) *APNSVoipChannelResponse { - s.Enabled = &v - return s +// String returns the string representation +func (s DefaultPushNotificationMessage) String() string { + return awsutil.Prettify(s) } -// SetHasCredential sets the HasCredential field's value. -func (s *APNSVoipChannelResponse) SetHasCredential(v bool) *APNSVoipChannelResponse { - s.HasCredential = &v - return s +// GoString returns the string representation +func (s DefaultPushNotificationMessage) GoString() string { + return s.String() } -// SetHasTokenKey sets the HasTokenKey field's value. -func (s *APNSVoipChannelResponse) SetHasTokenKey(v bool) *APNSVoipChannelResponse { - s.HasTokenKey = &v +// SetAction sets the Action field's value. +func (s *DefaultPushNotificationMessage) SetAction(v string) *DefaultPushNotificationMessage { + s.Action = &v return s } -// SetId sets the Id field's value. -func (s *APNSVoipChannelResponse) SetId(v string) *APNSVoipChannelResponse { - s.Id = &v +// SetBody sets the Body field's value. +func (s *DefaultPushNotificationMessage) SetBody(v string) *DefaultPushNotificationMessage { + s.Body = &v return s } -// SetIsArchived sets the IsArchived field's value. -func (s *APNSVoipChannelResponse) SetIsArchived(v bool) *APNSVoipChannelResponse { - s.IsArchived = &v +// SetData sets the Data field's value. +func (s *DefaultPushNotificationMessage) SetData(v map[string]*string) *DefaultPushNotificationMessage { + s.Data = v return s } -// SetLastModifiedBy sets the LastModifiedBy field's value. -func (s *APNSVoipChannelResponse) SetLastModifiedBy(v string) *APNSVoipChannelResponse { - s.LastModifiedBy = &v +// SetSilentPush sets the SilentPush field's value. +func (s *DefaultPushNotificationMessage) SetSilentPush(v bool) *DefaultPushNotificationMessage { + s.SilentPush = &v return s } -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *APNSVoipChannelResponse) SetLastModifiedDate(v string) *APNSVoipChannelResponse { - s.LastModifiedDate = &v +// SetSubstitutions sets the Substitutions field's value. +func (s *DefaultPushNotificationMessage) SetSubstitutions(v map[string][]*string) *DefaultPushNotificationMessage { + s.Substitutions = v return s } -// SetPlatform sets the Platform field's value. -func (s *APNSVoipChannelResponse) SetPlatform(v string) *APNSVoipChannelResponse { - s.Platform = &v +// SetTitle sets the Title field's value. +func (s *DefaultPushNotificationMessage) SetTitle(v string) *DefaultPushNotificationMessage { + s.Title = &v return s } -// SetVersion sets the Version field's value. -func (s *APNSVoipChannelResponse) SetVersion(v int64) *APNSVoipChannelResponse { - s.Version = &v +// SetUrl sets the Url field's value. +func (s *DefaultPushNotificationMessage) SetUrl(v string) *DefaultPushNotificationMessage { + s.Url = &v return s } -// Apple VoIP Developer Push Notification Service channel definition. -type APNSVoipSandboxChannelRequest struct { +// Specifies the default settings and content for a message template that can +// be used in messages that are sent through a push notification channel. +type DefaultPushNotificationTemplate struct { _ struct{} `type:"structure"` - // The bundle id used for APNs Tokens. - BundleId *string `type:"string"` - - // The distribution certificate from Apple. - Certificate *string `type:"string"` - - // The default authentication method used for APNs. - DefaultAuthenticationMethod *string `type:"string"` - - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` + // The action to occur if a recipient taps a push notification that's based + // on the message template. Valid values are: + // + // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // sent to the background. This is the default action. + // + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This setting uses the deep-linking features of the iOS and + // Android platforms. + // + // * URL - The default mobile browser on the recipient's device opens and + // loads the web page at a URL that you specify. + Action *string `type:"string" enum:"Action"` - // The certificate private key. - PrivateKey *string `type:"string"` + // The message body to use in push notifications that are based on the message + // template. + Body *string `type:"string"` - // The team id used for APNs Tokens. - TeamId *string `type:"string"` + // The sound to play when a recipient receives a push notification that's based + // on the message template. You can use the default stream or specify the file + // name of a sound resource that's bundled in your app. On an Android platform, + // the sound file must reside in /res/raw/. + // + // For an iOS platform, this value is the key for the name of a sound file in + // your app's main bundle or the Library/Sounds folder in your app's data container. + // If the sound file can't be found or you specify default for the value, the + // system plays the default alert sound. + Sound *string `type:"string"` - // The token key used for APNs Tokens. - TokenKey *string `type:"string"` + // The title to use in push notifications that are based on the message template. + // This title appears above the notification message on a recipient's device. + Title *string `type:"string"` - // The token key used for APNs Tokens. - TokenKeyId *string `type:"string"` + // The URL to open in a recipient's default mobile browser, if a recipient taps + // a push notification that's based on the message template and the value of + // the Action property is URL. + Url *string `type:"string"` } // String returns the string representation -func (s APNSVoipSandboxChannelRequest) String() string { +func (s DefaultPushNotificationTemplate) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation -func (s APNSVoipSandboxChannelRequest) GoString() string { - return s.String() -} - -// SetBundleId sets the BundleId field's value. -func (s *APNSVoipSandboxChannelRequest) SetBundleId(v string) *APNSVoipSandboxChannelRequest { - s.BundleId = &v - return s -} - -// SetCertificate sets the Certificate field's value. -func (s *APNSVoipSandboxChannelRequest) SetCertificate(v string) *APNSVoipSandboxChannelRequest { - s.Certificate = &v - return s -} - -// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. -func (s *APNSVoipSandboxChannelRequest) SetDefaultAuthenticationMethod(v string) *APNSVoipSandboxChannelRequest { - s.DefaultAuthenticationMethod = &v - return s -} - -// SetEnabled sets the Enabled field's value. -func (s *APNSVoipSandboxChannelRequest) SetEnabled(v bool) *APNSVoipSandboxChannelRequest { - s.Enabled = &v +// GoString returns the string representation +func (s DefaultPushNotificationTemplate) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *DefaultPushNotificationTemplate) SetAction(v string) *DefaultPushNotificationTemplate { + s.Action = &v return s } -// SetPrivateKey sets the PrivateKey field's value. -func (s *APNSVoipSandboxChannelRequest) SetPrivateKey(v string) *APNSVoipSandboxChannelRequest { - s.PrivateKey = &v +// SetBody sets the Body field's value. +func (s *DefaultPushNotificationTemplate) SetBody(v string) *DefaultPushNotificationTemplate { + s.Body = &v return s } -// SetTeamId sets the TeamId field's value. -func (s *APNSVoipSandboxChannelRequest) SetTeamId(v string) *APNSVoipSandboxChannelRequest { - s.TeamId = &v +// SetSound sets the Sound field's value. +func (s *DefaultPushNotificationTemplate) SetSound(v string) *DefaultPushNotificationTemplate { + s.Sound = &v return s } -// SetTokenKey sets the TokenKey field's value. -func (s *APNSVoipSandboxChannelRequest) SetTokenKey(v string) *APNSVoipSandboxChannelRequest { - s.TokenKey = &v +// SetTitle sets the Title field's value. +func (s *DefaultPushNotificationTemplate) SetTitle(v string) *DefaultPushNotificationTemplate { + s.Title = &v return s } -// SetTokenKeyId sets the TokenKeyId field's value. -func (s *APNSVoipSandboxChannelRequest) SetTokenKeyId(v string) *APNSVoipSandboxChannelRequest { - s.TokenKeyId = &v +// SetUrl sets the Url field's value. +func (s *DefaultPushNotificationTemplate) SetUrl(v string) *DefaultPushNotificationTemplate { + s.Url = &v return s } -// Apple VoIP Developer Push Notification Service channel definition. -type APNSVoipSandboxChannelResponse struct { +type DeleteAdmChannelInput struct { _ struct{} `type:"structure"` - // Application id - ApplicationId *string `type:"string"` - - // When was this segment created - CreationDate *string `type:"string"` - - // The default authentication method used for APNs. - DefaultAuthenticationMethod *string `type:"string"` - - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` - - // Not used. Retained for backwards compatibility. - HasCredential *bool `type:"boolean"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` +} - // If the channel is registered with a token key for authentication. - HasTokenKey *bool `type:"boolean"` +// String returns the string representation +func (s DeleteAdmChannelInput) String() string { + return awsutil.Prettify(s) +} - // Channel ID. Not used, only for backwards compatibility. - Id *string `type:"string"` +// GoString returns the string representation +func (s DeleteAdmChannelInput) GoString() string { + return s.String() +} - // Is this channel archived - IsArchived *bool `type:"boolean"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAdmChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAdmChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } - // Who made the last change - LastModifiedBy *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Last date this was updated - LastModifiedDate *string `type:"string"` +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteAdmChannelInput) SetApplicationId(v string) *DeleteAdmChannelInput { + s.ApplicationId = &v + return s +} - // The platform type. Will be APNS. - Platform *string `type:"string"` +type DeleteAdmChannelOutput struct { + _ struct{} `type:"structure" payload:"ADMChannelResponse"` - // Version of channel - Version *int64 `type:"integer"` + // Provides information about the status and settings of the ADM (Amazon Device + // Messaging) channel for an application. + // + // ADMChannelResponse is a required field + ADMChannelResponse *ADMChannelResponse `type:"structure" required:"true"` } // String returns the string representation -func (s APNSVoipSandboxChannelResponse) String() string { +func (s DeleteAdmChannelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s APNSVoipSandboxChannelResponse) GoString() string { +func (s DeleteAdmChannelOutput) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *APNSVoipSandboxChannelResponse) SetApplicationId(v string) *APNSVoipSandboxChannelResponse { - s.ApplicationId = &v +// SetADMChannelResponse sets the ADMChannelResponse field's value. +func (s *DeleteAdmChannelOutput) SetADMChannelResponse(v *ADMChannelResponse) *DeleteAdmChannelOutput { + s.ADMChannelResponse = v return s } -// SetCreationDate sets the CreationDate field's value. -func (s *APNSVoipSandboxChannelResponse) SetCreationDate(v string) *APNSVoipSandboxChannelResponse { - s.CreationDate = &v - return s -} +type DeleteApnsChannelInput struct { + _ struct{} `type:"structure"` -// SetDefaultAuthenticationMethod sets the DefaultAuthenticationMethod field's value. -func (s *APNSVoipSandboxChannelResponse) SetDefaultAuthenticationMethod(v string) *APNSVoipSandboxChannelResponse { - s.DefaultAuthenticationMethod = &v - return s + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } -// SetEnabled sets the Enabled field's value. -func (s *APNSVoipSandboxChannelResponse) SetEnabled(v bool) *APNSVoipSandboxChannelResponse { - s.Enabled = &v - return s +// String returns the string representation +func (s DeleteApnsChannelInput) String() string { + return awsutil.Prettify(s) } -// SetHasCredential sets the HasCredential field's value. -func (s *APNSVoipSandboxChannelResponse) SetHasCredential(v bool) *APNSVoipSandboxChannelResponse { - s.HasCredential = &v - return s +// GoString returns the string representation +func (s DeleteApnsChannelInput) GoString() string { + return s.String() } -// SetHasTokenKey sets the HasTokenKey field's value. -func (s *APNSVoipSandboxChannelResponse) SetHasTokenKey(v bool) *APNSVoipSandboxChannelResponse { - s.HasTokenKey = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApnsChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteApnsChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } -// SetId sets the Id field's value. -func (s *APNSVoipSandboxChannelResponse) SetId(v string) *APNSVoipSandboxChannelResponse { - s.Id = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetIsArchived sets the IsArchived field's value. -func (s *APNSVoipSandboxChannelResponse) SetIsArchived(v bool) *APNSVoipSandboxChannelResponse { - s.IsArchived = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteApnsChannelInput) SetApplicationId(v string) *DeleteApnsChannelInput { + s.ApplicationId = &v return s } -// SetLastModifiedBy sets the LastModifiedBy field's value. -func (s *APNSVoipSandboxChannelResponse) SetLastModifiedBy(v string) *APNSVoipSandboxChannelResponse { - s.LastModifiedBy = &v - return s +type DeleteApnsChannelOutput struct { + _ struct{} `type:"structure" payload:"APNSChannelResponse"` + + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) channel for an application. + // + // APNSChannelResponse is a required field + APNSChannelResponse *APNSChannelResponse `type:"structure" required:"true"` } -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *APNSVoipSandboxChannelResponse) SetLastModifiedDate(v string) *APNSVoipSandboxChannelResponse { - s.LastModifiedDate = &v - return s +// String returns the string representation +func (s DeleteApnsChannelOutput) String() string { + return awsutil.Prettify(s) } -// SetPlatform sets the Platform field's value. -func (s *APNSVoipSandboxChannelResponse) SetPlatform(v string) *APNSVoipSandboxChannelResponse { - s.Platform = &v - return s +// GoString returns the string representation +func (s DeleteApnsChannelOutput) GoString() string { + return s.String() } -// SetVersion sets the Version field's value. -func (s *APNSVoipSandboxChannelResponse) SetVersion(v int64) *APNSVoipSandboxChannelResponse { - s.Version = &v +// SetAPNSChannelResponse sets the APNSChannelResponse field's value. +func (s *DeleteApnsChannelOutput) SetAPNSChannelResponse(v *APNSChannelResponse) *DeleteApnsChannelOutput { + s.APNSChannelResponse = v return s } -// Activities for campaign. -type ActivitiesResponse struct { +type DeleteApnsSandboxChannelInput struct { _ struct{} `type:"structure"` - // List of campaign activities - Item []*ActivityResponse `type:"list"` - - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. - NextToken *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s ActivitiesResponse) String() string { +func (s DeleteApnsSandboxChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ActivitiesResponse) GoString() string { +func (s DeleteApnsSandboxChannelInput) GoString() string { return s.String() } -// SetItem sets the Item field's value. -func (s *ActivitiesResponse) SetItem(v []*ActivityResponse) *ActivitiesResponse { - s.Item = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApnsSandboxChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteApnsSandboxChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextToken sets the NextToken field's value. -func (s *ActivitiesResponse) SetNextToken(v string) *ActivitiesResponse { - s.NextToken = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteApnsSandboxChannelInput) SetApplicationId(v string) *DeleteApnsSandboxChannelInput { + s.ApplicationId = &v return s } -// Activity definition -type ActivityResponse struct { - _ struct{} `type:"structure"` +type DeleteApnsSandboxChannelOutput struct { + _ struct{} `type:"structure" payload:"APNSSandboxChannelResponse"` - // The ID of the application to which the campaign applies. - ApplicationId *string `type:"string"` + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) sandbox channel for an application. + // + // APNSSandboxChannelResponse is a required field + APNSSandboxChannelResponse *APNSSandboxChannelResponse `type:"structure" required:"true"` +} - // The ID of the campaign to which the activity applies. - CampaignId *string `type:"string"` +// String returns the string representation +func (s DeleteApnsSandboxChannelOutput) String() string { + return awsutil.Prettify(s) +} - // The actual time the activity was marked CANCELLED or COMPLETED. Provided - // in ISO 8601 format. - End *string `type:"string"` +// GoString returns the string representation +func (s DeleteApnsSandboxChannelOutput) GoString() string { + return s.String() +} - // The unique activity ID. - Id *string `type:"string"` +// SetAPNSSandboxChannelResponse sets the APNSSandboxChannelResponse field's value. +func (s *DeleteApnsSandboxChannelOutput) SetAPNSSandboxChannelResponse(v *APNSSandboxChannelResponse) *DeleteApnsSandboxChannelOutput { + s.APNSSandboxChannelResponse = v + return s +} - // Indicates whether the activity succeeded.Valid values: SUCCESS, FAIL - Result *string `type:"string"` +type DeleteApnsVoipChannelInput struct { + _ struct{} `type:"structure"` - // The scheduled start time for the activity in ISO 8601 format. - ScheduledStart *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` +} - // The actual start time of the activity in ISO 8601 format. - Start *string `type:"string"` +// String returns the string representation +func (s DeleteApnsVoipChannelInput) String() string { + return awsutil.Prettify(s) +} - // The state of the activity.Valid values: PENDING, INITIALIZING, RUNNING, PAUSED, - // CANCELLED, COMPLETED - State *string `type:"string"` +// GoString returns the string representation +func (s DeleteApnsVoipChannelInput) GoString() string { + return s.String() +} - // The total number of endpoints to which the campaign successfully delivered - // messages. - SuccessfulEndpointCount *int64 `type:"integer"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApnsVoipChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteApnsVoipChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } - // The total number of timezones completed. - TimezonesCompletedCount *int64 `type:"integer"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The total number of unique timezones present in the segment. - TimezonesTotalCount *int64 `type:"integer"` +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteApnsVoipChannelInput) SetApplicationId(v string) *DeleteApnsVoipChannelInput { + s.ApplicationId = &v + return s +} - // The total number of endpoints to which the campaign attempts to deliver messages. - TotalEndpointCount *int64 `type:"integer"` +type DeleteApnsVoipChannelOutput struct { + _ struct{} `type:"structure" payload:"APNSVoipChannelResponse"` - // The ID of a variation of the campaign used for A/B testing. - TreatmentId *string `type:"string"` + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) VoIP channel for an application. + // + // APNSVoipChannelResponse is a required field + APNSVoipChannelResponse *APNSVoipChannelResponse `type:"structure" required:"true"` } // String returns the string representation -func (s ActivityResponse) String() string { +func (s DeleteApnsVoipChannelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ActivityResponse) GoString() string { +func (s DeleteApnsVoipChannelOutput) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *ActivityResponse) SetApplicationId(v string) *ActivityResponse { - s.ApplicationId = &v +// SetAPNSVoipChannelResponse sets the APNSVoipChannelResponse field's value. +func (s *DeleteApnsVoipChannelOutput) SetAPNSVoipChannelResponse(v *APNSVoipChannelResponse) *DeleteApnsVoipChannelOutput { + s.APNSVoipChannelResponse = v return s } -// SetCampaignId sets the CampaignId field's value. -func (s *ActivityResponse) SetCampaignId(v string) *ActivityResponse { - s.CampaignId = &v - return s -} +type DeleteApnsVoipSandboxChannelInput struct { + _ struct{} `type:"structure"` -// SetEnd sets the End field's value. -func (s *ActivityResponse) SetEnd(v string) *ActivityResponse { - s.End = &v - return s + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } -// SetId sets the Id field's value. -func (s *ActivityResponse) SetId(v string) *ActivityResponse { - s.Id = &v - return s +// String returns the string representation +func (s DeleteApnsVoipSandboxChannelInput) String() string { + return awsutil.Prettify(s) } -// SetResult sets the Result field's value. -func (s *ActivityResponse) SetResult(v string) *ActivityResponse { - s.Result = &v - return s +// GoString returns the string representation +func (s DeleteApnsVoipSandboxChannelInput) GoString() string { + return s.String() } -// SetScheduledStart sets the ScheduledStart field's value. -func (s *ActivityResponse) SetScheduledStart(v string) *ActivityResponse { - s.ScheduledStart = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApnsVoipSandboxChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteApnsVoipSandboxChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } -// SetStart sets the Start field's value. -func (s *ActivityResponse) SetStart(v string) *ActivityResponse { - s.Start = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetState sets the State field's value. -func (s *ActivityResponse) SetState(v string) *ActivityResponse { - s.State = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteApnsVoipSandboxChannelInput) SetApplicationId(v string) *DeleteApnsVoipSandboxChannelInput { + s.ApplicationId = &v return s } -// SetSuccessfulEndpointCount sets the SuccessfulEndpointCount field's value. -func (s *ActivityResponse) SetSuccessfulEndpointCount(v int64) *ActivityResponse { - s.SuccessfulEndpointCount = &v - return s -} +type DeleteApnsVoipSandboxChannelOutput struct { + _ struct{} `type:"structure" payload:"APNSVoipSandboxChannelResponse"` -// SetTimezonesCompletedCount sets the TimezonesCompletedCount field's value. -func (s *ActivityResponse) SetTimezonesCompletedCount(v int64) *ActivityResponse { - s.TimezonesCompletedCount = &v - return s + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) VoIP sandbox channel for an application. + // + // APNSVoipSandboxChannelResponse is a required field + APNSVoipSandboxChannelResponse *APNSVoipSandboxChannelResponse `type:"structure" required:"true"` } -// SetTimezonesTotalCount sets the TimezonesTotalCount field's value. -func (s *ActivityResponse) SetTimezonesTotalCount(v int64) *ActivityResponse { - s.TimezonesTotalCount = &v - return s +// String returns the string representation +func (s DeleteApnsVoipSandboxChannelOutput) String() string { + return awsutil.Prettify(s) } -// SetTotalEndpointCount sets the TotalEndpointCount field's value. -func (s *ActivityResponse) SetTotalEndpointCount(v int64) *ActivityResponse { - s.TotalEndpointCount = &v - return s +// GoString returns the string representation +func (s DeleteApnsVoipSandboxChannelOutput) GoString() string { + return s.String() } -// SetTreatmentId sets the TreatmentId field's value. -func (s *ActivityResponse) SetTreatmentId(v string) *ActivityResponse { - s.TreatmentId = &v +// SetAPNSVoipSandboxChannelResponse sets the APNSVoipSandboxChannelResponse field's value. +func (s *DeleteApnsVoipSandboxChannelOutput) SetAPNSVoipSandboxChannelResponse(v *APNSVoipSandboxChannelResponse) *DeleteApnsVoipSandboxChannelOutput { + s.APNSVoipSandboxChannelResponse = v return s } -// Address configuration. -type AddressConfiguration struct { +type DeleteAppInput struct { _ struct{} `type:"structure"` - // Body override. If specified will override default body. - BodyOverride *string `type:"string"` - - // The channel type.Valid values: GCM | APNS | APNS_SANDBOX | APNS_VOIP | APNS_VOIP_SANDBOX - // | ADM | SMS | EMAIL | BAIDU - ChannelType *string `type:"string" enum:"ChannelType"` - - // A map of custom attributes to attributes to be attached to the message for - // this address. This payload is added to the push notification's 'data.pinpoint' - // object or added to the email/sms delivery receipt event attributes. - Context map[string]*string `type:"map"` - - // The Raw JSON formatted string to be used as the payload. This value overrides - // the message. - RawContent *string `type:"string"` - - // A map of substitution values for the message to be merged with the DefaultMessage's - // substitutions. Substitutions on this map take precedence over the all other - // substitutions. - Substitutions map[string][]*string `type:"map"` - - // Title override. If specified will override default title if applicable. - TitleOverride *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s AddressConfiguration) String() string { +func (s DeleteAppInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AddressConfiguration) GoString() string { +func (s DeleteAppInput) GoString() string { return s.String() } -// SetBodyOverride sets the BodyOverride field's value. -func (s *AddressConfiguration) SetBodyOverride(v string) *AddressConfiguration { - s.BodyOverride = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAppInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetChannelType sets the ChannelType field's value. -func (s *AddressConfiguration) SetChannelType(v string) *AddressConfiguration { - s.ChannelType = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteAppInput) SetApplicationId(v string) *DeleteAppInput { + s.ApplicationId = &v return s } -// SetContext sets the Context field's value. -func (s *AddressConfiguration) SetContext(v map[string]*string) *AddressConfiguration { - s.Context = v - return s +type DeleteAppOutput struct { + _ struct{} `type:"structure" payload:"ApplicationResponse"` + + // Provides information about an application. + // + // ApplicationResponse is a required field + ApplicationResponse *ApplicationResponse `type:"structure" required:"true"` } -// SetRawContent sets the RawContent field's value. -func (s *AddressConfiguration) SetRawContent(v string) *AddressConfiguration { - s.RawContent = &v - return s +// String returns the string representation +func (s DeleteAppOutput) String() string { + return awsutil.Prettify(s) } -// SetSubstitutions sets the Substitutions field's value. -func (s *AddressConfiguration) SetSubstitutions(v map[string][]*string) *AddressConfiguration { - s.Substitutions = v - return s +// GoString returns the string representation +func (s DeleteAppOutput) GoString() string { + return s.String() } -// SetTitleOverride sets the TitleOverride field's value. -func (s *AddressConfiguration) SetTitleOverride(v string) *AddressConfiguration { - s.TitleOverride = &v +// SetApplicationResponse sets the ApplicationResponse field's value. +func (s *DeleteAppOutput) SetApplicationResponse(v *ApplicationResponse) *DeleteAppOutput { + s.ApplicationResponse = v return s } -// Application Response. -type ApplicationResponse struct { +type DeleteBaiduChannelInput struct { _ struct{} `type:"structure"` - // The arn for the application. - Arn *string `type:"string"` - - // The unique application ID. - Id *string `type:"string"` - - // The display name of the application. - Name *string `type:"string"` - - // The Tags for the application. - Tags map[string]*string `locationName:"tags" type:"map"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s ApplicationResponse) String() string { +func (s DeleteBaiduChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ApplicationResponse) GoString() string { +func (s DeleteBaiduChannelInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *ApplicationResponse) SetArn(v string) *ApplicationResponse { - s.Arn = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBaiduChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBaiduChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetId sets the Id field's value. -func (s *ApplicationResponse) SetId(v string) *ApplicationResponse { - s.Id = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteBaiduChannelInput) SetApplicationId(v string) *DeleteBaiduChannelInput { + s.ApplicationId = &v return s } -// SetName sets the Name field's value. -func (s *ApplicationResponse) SetName(v string) *ApplicationResponse { - s.Name = &v - return s +type DeleteBaiduChannelOutput struct { + _ struct{} `type:"structure" payload:"BaiduChannelResponse"` + + // Provides information about the status and settings of the Baidu (Baidu Cloud + // Push) channel for an application. + // + // BaiduChannelResponse is a required field + BaiduChannelResponse *BaiduChannelResponse `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteBaiduChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBaiduChannelOutput) GoString() string { + return s.String() } -// SetTags sets the Tags field's value. -func (s *ApplicationResponse) SetTags(v map[string]*string) *ApplicationResponse { - s.Tags = v +// SetBaiduChannelResponse sets the BaiduChannelResponse field's value. +func (s *DeleteBaiduChannelOutput) SetBaiduChannelResponse(v *BaiduChannelResponse) *DeleteBaiduChannelOutput { + s.BaiduChannelResponse = v return s } -// Application settings. -type ApplicationSettingsResource struct { +type DeleteCampaignInput struct { _ struct{} `type:"structure"` - // The unique ID for the application. - ApplicationId *string `type:"string"` - - // Default campaign hook. - CampaignHook *CampaignHook `type:"structure"` - - // The date that the settings were last updated in ISO 8601 format. - LastModifiedDate *string `type:"string"` - - // The default campaign limits for the app. These limits apply to each campaign - // for the app, unless the campaign overrides the default with limits of its - // own. - Limits *CampaignLimits `type:"structure"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The default quiet time for the app. Campaigns in the app don't send messages - // to endpoints during the quiet time.Note: Make sure that your endpoints include - // the Demographics.Timezone attribute if you plan to enable a quiet time for - // your app. If your endpoints don't include this attribute, they'll receive - // the messages that you send them, even if quiet time is enabled.When you set - // up an app to use quiet time, campaigns in that app don't send messages during - // the time range you specified, as long as all of the following are true:- - // The endpoint includes a valid Demographic.Timezone attribute.- The current - // time in the endpoint's time zone is later than or equal to the time specified - // in the QuietTime.Start attribute for the app (or campaign, if applicable).- - // The current time in the endpoint's time zone is earlier than or equal to - // the time specified in the QuietTime.End attribute for the app (or campaign, - // if applicable).Individual campaigns within the app can have their own quiet - // time settings, which override the quiet time settings at the app level. - QuietTime *QuietTime `type:"structure"` + // CampaignId is a required field + CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` } // String returns the string representation -func (s ApplicationSettingsResource) String() string { +func (s DeleteCampaignInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ApplicationSettingsResource) GoString() string { +func (s DeleteCampaignInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCampaignInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCampaignInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.CampaignId == nil { + invalidParams.Add(request.NewErrParamRequired("CampaignId")) + } + if s.CampaignId != nil && len(*s.CampaignId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetApplicationId sets the ApplicationId field's value. -func (s *ApplicationSettingsResource) SetApplicationId(v string) *ApplicationSettingsResource { +func (s *DeleteCampaignInput) SetApplicationId(v string) *DeleteCampaignInput { s.ApplicationId = &v return s } -// SetCampaignHook sets the CampaignHook field's value. -func (s *ApplicationSettingsResource) SetCampaignHook(v *CampaignHook) *ApplicationSettingsResource { - s.CampaignHook = v +// SetCampaignId sets the CampaignId field's value. +func (s *DeleteCampaignInput) SetCampaignId(v string) *DeleteCampaignInput { + s.CampaignId = &v return s } -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *ApplicationSettingsResource) SetLastModifiedDate(v string) *ApplicationSettingsResource { - s.LastModifiedDate = &v - return s +type DeleteCampaignOutput struct { + _ struct{} `type:"structure" payload:"CampaignResponse"` + + // Provides information about the status, configuration, and other settings + // for a campaign. + // + // CampaignResponse is a required field + CampaignResponse *CampaignResponse `type:"structure" required:"true"` } -// SetLimits sets the Limits field's value. -func (s *ApplicationSettingsResource) SetLimits(v *CampaignLimits) *ApplicationSettingsResource { - s.Limits = v - return s +// String returns the string representation +func (s DeleteCampaignOutput) String() string { + return awsutil.Prettify(s) } -// SetQuietTime sets the QuietTime field's value. -func (s *ApplicationSettingsResource) SetQuietTime(v *QuietTime) *ApplicationSettingsResource { - s.QuietTime = v +// GoString returns the string representation +func (s DeleteCampaignOutput) GoString() string { + return s.String() +} + +// SetCampaignResponse sets the CampaignResponse field's value. +func (s *DeleteCampaignOutput) SetCampaignResponse(v *CampaignResponse) *DeleteCampaignOutput { + s.CampaignResponse = v return s } -// Get Applications Result. -type ApplicationsResponse struct { +type DeleteEmailChannelInput struct { _ struct{} `type:"structure"` - // List of applications returned in this page. - Item []*ApplicationResponse `type:"list"` - - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. - NextToken *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s ApplicationsResponse) String() string { +func (s DeleteEmailChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ApplicationsResponse) GoString() string { +func (s DeleteEmailChannelInput) GoString() string { return s.String() } -// SetItem sets the Item field's value. -func (s *ApplicationsResponse) SetItem(v []*ApplicationResponse) *ApplicationsResponse { - s.Item = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEmailChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEmailChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextToken sets the NextToken field's value. -func (s *ApplicationsResponse) SetNextToken(v string) *ApplicationsResponse { - s.NextToken = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteEmailChannelInput) SetApplicationId(v string) *DeleteEmailChannelInput { + s.ApplicationId = &v return s } -// Custom attibute dimension -type AttributeDimension struct { - _ struct{} `type:"structure"` - - // The type of dimension:INCLUSIVE - Endpoints that match the criteria are included - // in the segment.EXCLUSIVE - Endpoints that match the criteria are excluded - // from the segment. - AttributeType *string `type:"string" enum:"AttributeType"` +type DeleteEmailChannelOutput struct { + _ struct{} `type:"structure" payload:"EmailChannelResponse"` - // The criteria values for the segment dimension. Endpoints with matching attribute - // values are included or excluded from the segment, depending on the setting - // for Type. - Values []*string `type:"list"` + // Provides information about the status and settings of the email channel for + // an application. + // + // EmailChannelResponse is a required field + EmailChannelResponse *EmailChannelResponse `type:"structure" required:"true"` } // String returns the string representation -func (s AttributeDimension) String() string { +func (s DeleteEmailChannelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AttributeDimension) GoString() string { +func (s DeleteEmailChannelOutput) GoString() string { return s.String() } -// SetAttributeType sets the AttributeType field's value. -func (s *AttributeDimension) SetAttributeType(v string) *AttributeDimension { - s.AttributeType = &v - return s -} - -// SetValues sets the Values field's value. -func (s *AttributeDimension) SetValues(v []*string) *AttributeDimension { - s.Values = v +// SetEmailChannelResponse sets the EmailChannelResponse field's value. +func (s *DeleteEmailChannelOutput) SetEmailChannelResponse(v *EmailChannelResponse) *DeleteEmailChannelOutput { + s.EmailChannelResponse = v return s } -// Attributes. -type AttributesResource struct { +type DeleteEmailTemplateInput struct { _ struct{} `type:"structure"` - // The unique ID for the application. - ApplicationId *string `type:"string"` - - // The attribute type for the application. - AttributeType *string `type:"string"` - - // The attributes for the application. - Attributes []*string `type:"list"` + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` } // String returns the string representation -func (s AttributesResource) String() string { +func (s DeleteEmailTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AttributesResource) GoString() string { +func (s DeleteEmailTemplateInput) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *AttributesResource) SetApplicationId(v string) *AttributesResource { - s.ApplicationId = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEmailTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEmailTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } -// SetAttributeType sets the AttributeType field's value. -func (s *AttributesResource) SetAttributeType(v string) *AttributesResource { - s.AttributeType = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetAttributes sets the Attributes field's value. -func (s *AttributesResource) SetAttributes(v []*string) *AttributesResource { - s.Attributes = v +// SetTemplateName sets the TemplateName field's value. +func (s *DeleteEmailTemplateInput) SetTemplateName(v string) *DeleteEmailTemplateInput { + s.TemplateName = &v return s } -// Baidu Cloud Push credentials -type BaiduChannelRequest struct { - _ struct{} `type:"structure"` - - // Platform credential API key from Baidu. - ApiKey *string `type:"string"` - - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` +type DeleteEmailTemplateOutput struct { + _ struct{} `type:"structure" payload:"MessageBody"` - // Platform credential Secret key from Baidu. - SecretKey *string `type:"string"` + // Provides information about an API request or response. + // + // MessageBody is a required field + MessageBody *MessageBody `type:"structure" required:"true"` } // String returns the string representation -func (s BaiduChannelRequest) String() string { +func (s DeleteEmailTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BaiduChannelRequest) GoString() string { +func (s DeleteEmailTemplateOutput) GoString() string { return s.String() } -// SetApiKey sets the ApiKey field's value. -func (s *BaiduChannelRequest) SetApiKey(v string) *BaiduChannelRequest { - s.ApiKey = &v - return s -} - -// SetEnabled sets the Enabled field's value. -func (s *BaiduChannelRequest) SetEnabled(v bool) *BaiduChannelRequest { - s.Enabled = &v - return s -} - -// SetSecretKey sets the SecretKey field's value. -func (s *BaiduChannelRequest) SetSecretKey(v string) *BaiduChannelRequest { - s.SecretKey = &v +// SetMessageBody sets the MessageBody field's value. +func (s *DeleteEmailTemplateOutput) SetMessageBody(v *MessageBody) *DeleteEmailTemplateOutput { + s.MessageBody = v return s } -// Baidu Cloud Messaging channel definition -type BaiduChannelResponse struct { +type DeleteEndpointInput struct { _ struct{} `type:"structure"` - // Application id - ApplicationId *string `type:"string"` - - // When was this segment created - CreationDate *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The Baidu API key from Baidu. - Credential *string `type:"string"` + // EndpointId is a required field + EndpointId *string `location:"uri" locationName:"endpoint-id" type:"string" required:"true"` +} - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` +// String returns the string representation +func (s DeleteEndpointInput) String() string { + return awsutil.Prettify(s) +} - // Not used. Retained for backwards compatibility. - HasCredential *bool `type:"boolean"` +// GoString returns the string representation +func (s DeleteEndpointInput) GoString() string { + return s.String() +} - // Channel ID. Not used, only for backwards compatibility. - Id *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEndpointInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.EndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointId")) + } + if s.EndpointId != nil && len(*s.EndpointId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EndpointId", 1)) + } - // Is this channel archived - IsArchived *bool `type:"boolean"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Who made the last change - LastModifiedBy *string `type:"string"` +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteEndpointInput) SetApplicationId(v string) *DeleteEndpointInput { + s.ApplicationId = &v + return s +} - // Last date this was updated - LastModifiedDate *string `type:"string"` +// SetEndpointId sets the EndpointId field's value. +func (s *DeleteEndpointInput) SetEndpointId(v string) *DeleteEndpointInput { + s.EndpointId = &v + return s +} - // The platform type. Will be BAIDU - Platform *string `type:"string"` +type DeleteEndpointOutput struct { + _ struct{} `type:"structure" payload:"EndpointResponse"` - // Version of channel - Version *int64 `type:"integer"` + // Provides information about the channel type and other settings for an endpoint. + // + // EndpointResponse is a required field + EndpointResponse *EndpointResponse `type:"structure" required:"true"` } // String returns the string representation -func (s BaiduChannelResponse) String() string { +func (s DeleteEndpointOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BaiduChannelResponse) GoString() string { +func (s DeleteEndpointOutput) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *BaiduChannelResponse) SetApplicationId(v string) *BaiduChannelResponse { - s.ApplicationId = &v +// SetEndpointResponse sets the EndpointResponse field's value. +func (s *DeleteEndpointOutput) SetEndpointResponse(v *EndpointResponse) *DeleteEndpointOutput { + s.EndpointResponse = v return s } -// SetCreationDate sets the CreationDate field's value. -func (s *BaiduChannelResponse) SetCreationDate(v string) *BaiduChannelResponse { - s.CreationDate = &v - return s -} +type DeleteEventStreamInput struct { + _ struct{} `type:"structure"` -// SetCredential sets the Credential field's value. -func (s *BaiduChannelResponse) SetCredential(v string) *BaiduChannelResponse { - s.Credential = &v - return s + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } -// SetEnabled sets the Enabled field's value. -func (s *BaiduChannelResponse) SetEnabled(v bool) *BaiduChannelResponse { - s.Enabled = &v - return s +// String returns the string representation +func (s DeleteEventStreamInput) String() string { + return awsutil.Prettify(s) } -// SetHasCredential sets the HasCredential field's value. -func (s *BaiduChannelResponse) SetHasCredential(v bool) *BaiduChannelResponse { - s.HasCredential = &v - return s +// GoString returns the string representation +func (s DeleteEventStreamInput) GoString() string { + return s.String() } -// SetId sets the Id field's value. -func (s *BaiduChannelResponse) SetId(v string) *BaiduChannelResponse { - s.Id = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEventStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEventStreamInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetIsArchived sets the IsArchived field's value. -func (s *BaiduChannelResponse) SetIsArchived(v bool) *BaiduChannelResponse { - s.IsArchived = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteEventStreamInput) SetApplicationId(v string) *DeleteEventStreamInput { + s.ApplicationId = &v return s } -// SetLastModifiedBy sets the LastModifiedBy field's value. -func (s *BaiduChannelResponse) SetLastModifiedBy(v string) *BaiduChannelResponse { - s.LastModifiedBy = &v - return s +type DeleteEventStreamOutput struct { + _ struct{} `type:"structure" payload:"EventStream"` + + // Specifies settings for publishing event data to an Amazon Kinesis data stream + // or an Amazon Kinesis Data Firehose delivery stream. + // + // EventStream is a required field + EventStream *EventStream `type:"structure" required:"true"` } -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *BaiduChannelResponse) SetLastModifiedDate(v string) *BaiduChannelResponse { - s.LastModifiedDate = &v - return s +// String returns the string representation +func (s DeleteEventStreamOutput) String() string { + return awsutil.Prettify(s) } -// SetPlatform sets the Platform field's value. -func (s *BaiduChannelResponse) SetPlatform(v string) *BaiduChannelResponse { - s.Platform = &v - return s +// GoString returns the string representation +func (s DeleteEventStreamOutput) GoString() string { + return s.String() } -// SetVersion sets the Version field's value. -func (s *BaiduChannelResponse) SetVersion(v int64) *BaiduChannelResponse { - s.Version = &v +// SetEventStream sets the EventStream field's value. +func (s *DeleteEventStreamOutput) SetEventStream(v *EventStream) *DeleteEventStreamOutput { + s.EventStream = v return s } -// Baidu Message. -type BaiduMessage struct { +type DeleteGcmChannelInput struct { _ struct{} `type:"structure"` - // The action that occurs if the user taps a push notification delivered by - // the campaign: OPEN_APP - Your app launches, or it becomes the foreground - // app if it has been sent to the background. This is the default action. DEEP_LINK - // - Uses deep linking features in iOS and Android to open your app and display - // a designated user interface within the app. URL - The default mobile browser - // on the user's device launches and opens a web page at the URL you specify. - // Possible values include: OPEN_APP | DEEP_LINK | URL - Action *string `type:"string" enum:"Action"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` +} - // The message body of the notification. - Body *string `type:"string"` +// String returns the string representation +func (s DeleteGcmChannelInput) String() string { + return awsutil.Prettify(s) +} - // The data payload used for a silent push. This payload is added to the notifications' - // data.pinpoint.jsonBody' object - Data map[string]*string `type:"map"` +// GoString returns the string representation +func (s DeleteGcmChannelInput) GoString() string { + return s.String() +} - // The icon image name of the asset saved in your application. - IconReference *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGcmChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGcmChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } - // The URL that points to an image used as the large icon to the notification - // content view. - ImageIconUrl *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The URL that points to an image used in the push notification. - ImageUrl *string `type:"string"` +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteGcmChannelInput) SetApplicationId(v string) *DeleteGcmChannelInput { + s.ApplicationId = &v + return s +} - // The Raw JSON formatted string to be used as the payload. This value overrides - // the message. - RawContent *string `type:"string"` +type DeleteGcmChannelOutput struct { + _ struct{} `type:"structure" payload:"GCMChannelResponse"` - // Indicates if the message should display on the users device. Silent pushes - // can be used for Remote Configuration and Phone Home use cases. - SilentPush *bool `type:"boolean"` + // Provides information about the status and settings of the GCM channel for + // an application. The GCM channel enables Amazon Pinpoint to send push notifications + // through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging + // (GCM), service. + // + // GCMChannelResponse is a required field + GCMChannelResponse *GCMChannelResponse `type:"structure" required:"true"` +} - // The URL that points to an image used as the small icon for the notification - // which will be used to represent the notification in the status bar and content - // view - SmallImageIconUrl *string `type:"string"` +// String returns the string representation +func (s DeleteGcmChannelOutput) String() string { + return awsutil.Prettify(s) +} - // Indicates a sound to play when the device receives the notification. Supports - // default, or the filename of a sound resource bundled in the app. Android - // sound files must reside in /res/raw/ - Sound *string `type:"string"` +// GoString returns the string representation +func (s DeleteGcmChannelOutput) GoString() string { + return s.String() +} - // Default message substitutions. Can be overridden by individual address substitutions. - Substitutions map[string][]*string `type:"map"` +// SetGCMChannelResponse sets the GCMChannelResponse field's value. +func (s *DeleteGcmChannelOutput) SetGCMChannelResponse(v *GCMChannelResponse) *DeleteGcmChannelOutput { + s.GCMChannelResponse = v + return s +} - // This parameter specifies how long (in seconds) the message should be kept - // in Baidu storage if the device is offline. The and the default value and - // the maximum time to live supported is 7 days (604800 seconds) - TimeToLive *int64 `type:"integer"` +type DeleteJourneyInput struct { + _ struct{} `type:"structure"` - // The message title that displays above the message on the user's device. - Title *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The URL to open in the user's mobile browser. Used if the value for Action - // is URL. - Url *string `type:"string"` + // JourneyId is a required field + JourneyId *string `location:"uri" locationName:"journey-id" type:"string" required:"true"` } // String returns the string representation -func (s BaiduMessage) String() string { +func (s DeleteJourneyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BaiduMessage) GoString() string { +func (s DeleteJourneyInput) GoString() string { return s.String() } -// SetAction sets the Action field's value. -func (s *BaiduMessage) SetAction(v string) *BaiduMessage { - s.Action = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteJourneyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteJourneyInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.JourneyId == nil { + invalidParams.Add(request.NewErrParamRequired("JourneyId")) + } + if s.JourneyId != nil && len(*s.JourneyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JourneyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetBody sets the Body field's value. -func (s *BaiduMessage) SetBody(v string) *BaiduMessage { - s.Body = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteJourneyInput) SetApplicationId(v string) *DeleteJourneyInput { + s.ApplicationId = &v return s } -// SetData sets the Data field's value. -func (s *BaiduMessage) SetData(v map[string]*string) *BaiduMessage { - s.Data = v +// SetJourneyId sets the JourneyId field's value. +func (s *DeleteJourneyInput) SetJourneyId(v string) *DeleteJourneyInput { + s.JourneyId = &v return s } -// SetIconReference sets the IconReference field's value. -func (s *BaiduMessage) SetIconReference(v string) *BaiduMessage { - s.IconReference = &v - return s +type DeleteJourneyOutput struct { + _ struct{} `type:"structure" payload:"JourneyResponse"` + + // Provides information about the status, configuration, and other settings + // for a journey. + // + // JourneyResponse is a required field + JourneyResponse *JourneyResponse `type:"structure" required:"true"` } -// SetImageIconUrl sets the ImageIconUrl field's value. -func (s *BaiduMessage) SetImageIconUrl(v string) *BaiduMessage { - s.ImageIconUrl = &v - return s +// String returns the string representation +func (s DeleteJourneyOutput) String() string { + return awsutil.Prettify(s) } -// SetImageUrl sets the ImageUrl field's value. -func (s *BaiduMessage) SetImageUrl(v string) *BaiduMessage { - s.ImageUrl = &v - return s +// GoString returns the string representation +func (s DeleteJourneyOutput) GoString() string { + return s.String() } -// SetRawContent sets the RawContent field's value. -func (s *BaiduMessage) SetRawContent(v string) *BaiduMessage { - s.RawContent = &v +// SetJourneyResponse sets the JourneyResponse field's value. +func (s *DeleteJourneyOutput) SetJourneyResponse(v *JourneyResponse) *DeleteJourneyOutput { + s.JourneyResponse = v return s } -// SetSilentPush sets the SilentPush field's value. -func (s *BaiduMessage) SetSilentPush(v bool) *BaiduMessage { - s.SilentPush = &v - return s +type DeletePushTemplateInput struct { + _ struct{} `type:"structure"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` } -// SetSmallImageIconUrl sets the SmallImageIconUrl field's value. -func (s *BaiduMessage) SetSmallImageIconUrl(v string) *BaiduMessage { - s.SmallImageIconUrl = &v - return s +// String returns the string representation +func (s DeletePushTemplateInput) String() string { + return awsutil.Prettify(s) } -// SetSound sets the Sound field's value. -func (s *BaiduMessage) SetSound(v string) *BaiduMessage { - s.Sound = &v - return s +// GoString returns the string representation +func (s DeletePushTemplateInput) GoString() string { + return s.String() } -// SetSubstitutions sets the Substitutions field's value. -func (s *BaiduMessage) SetSubstitutions(v map[string][]*string) *BaiduMessage { - s.Substitutions = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePushTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePushTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetTimeToLive sets the TimeToLive field's value. -func (s *BaiduMessage) SetTimeToLive(v int64) *BaiduMessage { - s.TimeToLive = &v +// SetTemplateName sets the TemplateName field's value. +func (s *DeletePushTemplateInput) SetTemplateName(v string) *DeletePushTemplateInput { + s.TemplateName = &v return s } -// SetTitle sets the Title field's value. -func (s *BaiduMessage) SetTitle(v string) *BaiduMessage { - s.Title = &v - return s +type DeletePushTemplateOutput struct { + _ struct{} `type:"structure" payload:"MessageBody"` + + // Provides information about an API request or response. + // + // MessageBody is a required field + MessageBody *MessageBody `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeletePushTemplateOutput) String() string { + return awsutil.Prettify(s) } -// SetUrl sets the Url field's value. -func (s *BaiduMessage) SetUrl(v string) *BaiduMessage { - s.Url = &v +// GoString returns the string representation +func (s DeletePushTemplateOutput) GoString() string { + return s.String() +} + +// SetMessageBody sets the MessageBody field's value. +func (s *DeletePushTemplateOutput) SetMessageBody(v *MessageBody) *DeletePushTemplateOutput { + s.MessageBody = v return s } -// The email message configuration. -type CampaignEmailMessage struct { +type DeleteSegmentInput struct { _ struct{} `type:"structure"` - // The email text body. - Body *string `type:"string"` - - // The email address used to send the email from. Defaults to use FromAddress - // specified in the Email Channel. - FromAddress *string `type:"string"` - - // The email html body. - HtmlBody *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The email title (Or subject). - Title *string `type:"string"` + // SegmentId is a required field + SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` } // String returns the string representation -func (s CampaignEmailMessage) String() string { +func (s DeleteSegmentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CampaignEmailMessage) GoString() string { +func (s DeleteSegmentInput) GoString() string { return s.String() } -// SetBody sets the Body field's value. -func (s *CampaignEmailMessage) SetBody(v string) *CampaignEmailMessage { - s.Body = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSegmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSegmentInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.SegmentId == nil { + invalidParams.Add(request.NewErrParamRequired("SegmentId")) + } + if s.SegmentId != nil && len(*s.SegmentId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) + } -// SetFromAddress sets the FromAddress field's value. -func (s *CampaignEmailMessage) SetFromAddress(v string) *CampaignEmailMessage { - s.FromAddress = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetHtmlBody sets the HtmlBody field's value. -func (s *CampaignEmailMessage) SetHtmlBody(v string) *CampaignEmailMessage { - s.HtmlBody = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteSegmentInput) SetApplicationId(v string) *DeleteSegmentInput { + s.ApplicationId = &v return s } -// SetTitle sets the Title field's value. -func (s *CampaignEmailMessage) SetTitle(v string) *CampaignEmailMessage { - s.Title = &v +// SetSegmentId sets the SegmentId field's value. +func (s *DeleteSegmentInput) SetSegmentId(v string) *DeleteSegmentInput { + s.SegmentId = &v return s } -// An object that defines the events that cause the campaign to be sent. -type CampaignEventFilter struct { - _ struct{} `type:"structure"` - - // An object that defines the dimensions for the event filter. - Dimensions *EventDimensions `type:"structure"` +type DeleteSegmentOutput struct { + _ struct{} `type:"structure" payload:"SegmentResponse"` - // The type of event that causes the campaign to be sent. Possible values:SYSTEM - // - Send the campaign when a system event occurs. See the System resource for - // more information.ENDPOINT - Send the campaign when an endpoint event occurs. - // See the Event resource for more information. - FilterType *string `type:"string" enum:"FilterType"` + // Provides information about the configuration, dimension, and other settings + // for a segment. + // + // SegmentResponse is a required field + SegmentResponse *SegmentResponse `type:"structure" required:"true"` } // String returns the string representation -func (s CampaignEventFilter) String() string { +func (s DeleteSegmentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CampaignEventFilter) GoString() string { +func (s DeleteSegmentOutput) GoString() string { return s.String() } -// SetDimensions sets the Dimensions field's value. -func (s *CampaignEventFilter) SetDimensions(v *EventDimensions) *CampaignEventFilter { - s.Dimensions = v - return s -} - -// SetFilterType sets the FilterType field's value. -func (s *CampaignEventFilter) SetFilterType(v string) *CampaignEventFilter { - s.FilterType = &v +// SetSegmentResponse sets the SegmentResponse field's value. +func (s *DeleteSegmentOutput) SetSegmentResponse(v *SegmentResponse) *DeleteSegmentOutput { + s.SegmentResponse = v return s } -// Campaign hook information. -type CampaignHook struct { +type DeleteSmsChannelInput struct { _ struct{} `type:"structure"` - // Lambda function name or arn to be called for delivery - LambdaFunctionName *string `type:"string"` - - // What mode Lambda should be invoked in. - Mode *string `type:"string" enum:"Mode"` - - // Web URL to call for hook. If the URL has authentication specified it will - // be added as authentication to the request - WebUrl *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s CampaignHook) String() string { +func (s DeleteSmsChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CampaignHook) GoString() string { +func (s DeleteSmsChannelInput) GoString() string { return s.String() } -// SetLambdaFunctionName sets the LambdaFunctionName field's value. -func (s *CampaignHook) SetLambdaFunctionName(v string) *CampaignHook { - s.LambdaFunctionName = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSmsChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSmsChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } -// SetMode sets the Mode field's value. -func (s *CampaignHook) SetMode(v string) *CampaignHook { - s.Mode = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetWebUrl sets the WebUrl field's value. -func (s *CampaignHook) SetWebUrl(v string) *CampaignHook { - s.WebUrl = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteSmsChannelInput) SetApplicationId(v string) *DeleteSmsChannelInput { + s.ApplicationId = &v return s } -// Campaign Limits are used to limit the number of messages that can be sent -// to a single endpoint. -type CampaignLimits struct { - _ struct{} `type:"structure"` - - // The maximum number of messages that each campaign can send to a single endpoint - // in a 24-hour period. - Daily *int64 `type:"integer"` - - // The length of time (in seconds) that the campaign can run before it ends - // and message deliveries stop. This duration begins at the scheduled start - // time for the campaign. The minimum value is 60. - MaximumDuration *int64 `type:"integer"` - - // The number of messages that the campaign can send per second. The minimum - // value is 50, and the maximum is 20000. - MessagesPerSecond *int64 `type:"integer"` +type DeleteSmsChannelOutput struct { + _ struct{} `type:"structure" payload:"SMSChannelResponse"` - // The maximum number of messages that an individual campaign can send to a - // single endpoint over the course of the campaign. - Total *int64 `type:"integer"` + // Provides information about the status and settings of the SMS channel for + // an application. + // + // SMSChannelResponse is a required field + SMSChannelResponse *SMSChannelResponse `type:"structure" required:"true"` } // String returns the string representation -func (s CampaignLimits) String() string { +func (s DeleteSmsChannelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CampaignLimits) GoString() string { +func (s DeleteSmsChannelOutput) GoString() string { return s.String() } -// SetDaily sets the Daily field's value. -func (s *CampaignLimits) SetDaily(v int64) *CampaignLimits { - s.Daily = &v +// SetSMSChannelResponse sets the SMSChannelResponse field's value. +func (s *DeleteSmsChannelOutput) SetSMSChannelResponse(v *SMSChannelResponse) *DeleteSmsChannelOutput { + s.SMSChannelResponse = v return s } -// SetMaximumDuration sets the MaximumDuration field's value. -func (s *CampaignLimits) SetMaximumDuration(v int64) *CampaignLimits { - s.MaximumDuration = &v - return s +type DeleteSmsTemplateInput struct { + _ struct{} `type:"structure"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` } -// SetMessagesPerSecond sets the MessagesPerSecond field's value. -func (s *CampaignLimits) SetMessagesPerSecond(v int64) *CampaignLimits { - s.MessagesPerSecond = &v - return s +// String returns the string representation +func (s DeleteSmsTemplateInput) String() string { + return awsutil.Prettify(s) } -// SetTotal sets the Total field's value. -func (s *CampaignLimits) SetTotal(v int64) *CampaignLimits { - s.Total = &v - return s +// GoString returns the string representation +func (s DeleteSmsTemplateInput) GoString() string { + return s.String() } -// Campaign definition -type CampaignResponse struct { - _ struct{} `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSmsTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSmsTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } - // Treatments that are defined in addition to the default treatment. - AdditionalTreatments []*TreatmentResource `type:"list"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The ID of the application to which the campaign applies. - ApplicationId *string `type:"string"` +// SetTemplateName sets the TemplateName field's value. +func (s *DeleteSmsTemplateInput) SetTemplateName(v string) *DeleteSmsTemplateInput { + s.TemplateName = &v + return s +} - // The arn for the campaign. - Arn *string `type:"string"` +type DeleteSmsTemplateOutput struct { + _ struct{} `type:"structure" payload:"MessageBody"` - // The date the campaign was created in ISO 8601 format. - CreationDate *string `type:"string"` + // Provides information about an API request or response. + // + // MessageBody is a required field + MessageBody *MessageBody `type:"structure" required:"true"` +} - // The status of the campaign's default treatment. Only present for A/B test - // campaigns. - DefaultState *CampaignState `type:"structure"` +// String returns the string representation +func (s DeleteSmsTemplateOutput) String() string { + return awsutil.Prettify(s) +} - // A description of the campaign. - Description *string `type:"string"` +// GoString returns the string representation +func (s DeleteSmsTemplateOutput) GoString() string { + return s.String() +} - // The allocated percentage of end users who will not receive messages from - // this campaign. - HoldoutPercent *int64 `type:"integer"` +// SetMessageBody sets the MessageBody field's value. +func (s *DeleteSmsTemplateOutput) SetMessageBody(v *MessageBody) *DeleteSmsTemplateOutput { + s.MessageBody = v + return s +} - // Campaign hook information. - Hook *CampaignHook `type:"structure"` +type DeleteUserEndpointsInput struct { + _ struct{} `type:"structure"` - // The unique campaign ID. - Id *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Indicates whether the campaign is paused. A paused campaign does not send - // messages unless you resume it by setting IsPaused to false. - IsPaused *bool `type:"boolean"` + // UserId is a required field + UserId *string `location:"uri" locationName:"user-id" type:"string" required:"true"` +} - // The date the campaign was last updated in ISO 8601 format. - LastModifiedDate *string `type:"string"` +// String returns the string representation +func (s DeleteUserEndpointsInput) String() string { + return awsutil.Prettify(s) +} - // The campaign limits settings. - Limits *CampaignLimits `type:"structure"` +// GoString returns the string representation +func (s DeleteUserEndpointsInput) GoString() string { + return s.String() +} - // The message configuration settings. - MessageConfiguration *MessageConfiguration `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserEndpointsInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.UserId == nil { + invalidParams.Add(request.NewErrParamRequired("UserId")) + } + if s.UserId != nil && len(*s.UserId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserId", 1)) + } - // The custom name of the campaign. - Name *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *DeleteUserEndpointsInput) SetApplicationId(v string) *DeleteUserEndpointsInput { + s.ApplicationId = &v + return s +} - // The campaign schedule. - Schedule *Schedule `type:"structure"` +// SetUserId sets the UserId field's value. +func (s *DeleteUserEndpointsInput) SetUserId(v string) *DeleteUserEndpointsInput { + s.UserId = &v + return s +} - // The ID of the segment to which the campaign sends messages. - SegmentId *string `type:"string"` +type DeleteUserEndpointsOutput struct { + _ struct{} `type:"structure" payload:"EndpointsResponse"` - // The version of the segment to which the campaign sends messages. - SegmentVersion *int64 `type:"integer"` + // Provides information about all the endpoints that are associated with a user + // ID. + // + // EndpointsResponse is a required field + EndpointsResponse *EndpointsResponse `type:"structure" required:"true"` +} - // The campaign status.An A/B test campaign will have a status of COMPLETED - // only when all treatments have a status of COMPLETED. - State *CampaignState `type:"structure"` +// String returns the string representation +func (s DeleteUserEndpointsOutput) String() string { + return awsutil.Prettify(s) +} - // The Tags for the campaign. - Tags map[string]*string `locationName:"tags" type:"map"` +// GoString returns the string representation +func (s DeleteUserEndpointsOutput) GoString() string { + return s.String() +} - // A custom description for the treatment. - TreatmentDescription *string `type:"string"` +// SetEndpointsResponse sets the EndpointsResponse field's value. +func (s *DeleteUserEndpointsOutput) SetEndpointsResponse(v *EndpointsResponse) *DeleteUserEndpointsOutput { + s.EndpointsResponse = v + return s +} - // The custom name of a variation of the campaign used for A/B testing. - TreatmentName *string `type:"string"` +type DeleteVoiceChannelInput struct { + _ struct{} `type:"structure"` - // The campaign version number. - Version *int64 `type:"integer"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s CampaignResponse) String() string { +func (s DeleteVoiceChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CampaignResponse) GoString() string { +func (s DeleteVoiceChannelInput) GoString() string { return s.String() } -// SetAdditionalTreatments sets the AdditionalTreatments field's value. -func (s *CampaignResponse) SetAdditionalTreatments(v []*TreatmentResource) *CampaignResponse { - s.AdditionalTreatments = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVoiceChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVoiceChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetApplicationId sets the ApplicationId field's value. -func (s *CampaignResponse) SetApplicationId(v string) *CampaignResponse { +func (s *DeleteVoiceChannelInput) SetApplicationId(v string) *DeleteVoiceChannelInput { s.ApplicationId = &v return s } -// SetArn sets the Arn field's value. -func (s *CampaignResponse) SetArn(v string) *CampaignResponse { - s.Arn = &v - return s -} +type DeleteVoiceChannelOutput struct { + _ struct{} `type:"structure" payload:"VoiceChannelResponse"` -// SetCreationDate sets the CreationDate field's value. -func (s *CampaignResponse) SetCreationDate(v string) *CampaignResponse { - s.CreationDate = &v - return s + // Provides information about the status and settings of the voice channel for + // an application. + // + // VoiceChannelResponse is a required field + VoiceChannelResponse *VoiceChannelResponse `type:"structure" required:"true"` } -// SetDefaultState sets the DefaultState field's value. -func (s *CampaignResponse) SetDefaultState(v *CampaignState) *CampaignResponse { - s.DefaultState = v - return s +// String returns the string representation +func (s DeleteVoiceChannelOutput) String() string { + return awsutil.Prettify(s) } -// SetDescription sets the Description field's value. -func (s *CampaignResponse) SetDescription(v string) *CampaignResponse { - s.Description = &v - return s +// GoString returns the string representation +func (s DeleteVoiceChannelOutput) GoString() string { + return s.String() } -// SetHoldoutPercent sets the HoldoutPercent field's value. -func (s *CampaignResponse) SetHoldoutPercent(v int64) *CampaignResponse { - s.HoldoutPercent = &v +// SetVoiceChannelResponse sets the VoiceChannelResponse field's value. +func (s *DeleteVoiceChannelOutput) SetVoiceChannelResponse(v *VoiceChannelResponse) *DeleteVoiceChannelOutput { + s.VoiceChannelResponse = v return s } -// SetHook sets the Hook field's value. -func (s *CampaignResponse) SetHook(v *CampaignHook) *CampaignResponse { - s.Hook = v - return s -} +// Specifies the settings and content for the default message and any default +// messages that you tailored for specific channels. +type DirectMessageConfiguration struct { + _ struct{} `type:"structure"` -// SetId sets the Id field's value. -func (s *CampaignResponse) SetId(v string) *CampaignResponse { - s.Id = &v - return s -} + // The default push notification message for the ADM (Amazon Device Messaging) + // channel. This message overrides the default push notification message (DefaultPushNotificationMessage). + ADMMessage *ADMMessage `type:"structure"` -// SetIsPaused sets the IsPaused field's value. -func (s *CampaignResponse) SetIsPaused(v bool) *CampaignResponse { - s.IsPaused = &v - return s -} + // The default push notification message for the APNs (Apple Push Notification + // service) channel. This message overrides the default push notification message + // (DefaultPushNotificationMessage). + APNSMessage *APNSMessage `type:"structure"` -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *CampaignResponse) SetLastModifiedDate(v string) *CampaignResponse { - s.LastModifiedDate = &v - return s + // The default push notification message for the Baidu (Baidu Cloud Push) channel. + // This message overrides the default push notification message (DefaultPushNotificationMessage). + BaiduMessage *BaiduMessage `type:"structure"` + + // The default message body for all channels. + DefaultMessage *DefaultMessage `type:"structure"` + + // The default push notification message for all push notification channels. + DefaultPushNotificationMessage *DefaultPushNotificationMessage `type:"structure"` + + // The default message for the email channel. This message overrides the default + // message (DefaultMessage). + EmailMessage *EmailMessage `type:"structure"` + + // The default push notification message for the GCM channel, which is used + // to send notifications through the Firebase Cloud Messaging (FCM), formerly + // Google Cloud Messaging (GCM), service. This message overrides the default + // push notification message (DefaultPushNotificationMessage). + GCMMessage *GCMMessage `type:"structure"` + + // The default message for the SMS channel. This message overrides the default + // message (DefaultMessage). + SMSMessage *SMSMessage `type:"structure"` + + // The default message for the voice channel. This message overrides the default + // message (DefaultMessage). + VoiceMessage *VoiceMessage `type:"structure"` } -// SetLimits sets the Limits field's value. -func (s *CampaignResponse) SetLimits(v *CampaignLimits) *CampaignResponse { - s.Limits = v - return s +// String returns the string representation +func (s DirectMessageConfiguration) String() string { + return awsutil.Prettify(s) } -// SetMessageConfiguration sets the MessageConfiguration field's value. -func (s *CampaignResponse) SetMessageConfiguration(v *MessageConfiguration) *CampaignResponse { - s.MessageConfiguration = v - return s +// GoString returns the string representation +func (s DirectMessageConfiguration) GoString() string { + return s.String() } -// SetName sets the Name field's value. -func (s *CampaignResponse) SetName(v string) *CampaignResponse { - s.Name = &v +// SetADMMessage sets the ADMMessage field's value. +func (s *DirectMessageConfiguration) SetADMMessage(v *ADMMessage) *DirectMessageConfiguration { + s.ADMMessage = v return s } -// SetSchedule sets the Schedule field's value. -func (s *CampaignResponse) SetSchedule(v *Schedule) *CampaignResponse { - s.Schedule = v +// SetAPNSMessage sets the APNSMessage field's value. +func (s *DirectMessageConfiguration) SetAPNSMessage(v *APNSMessage) *DirectMessageConfiguration { + s.APNSMessage = v return s } -// SetSegmentId sets the SegmentId field's value. -func (s *CampaignResponse) SetSegmentId(v string) *CampaignResponse { - s.SegmentId = &v +// SetBaiduMessage sets the BaiduMessage field's value. +func (s *DirectMessageConfiguration) SetBaiduMessage(v *BaiduMessage) *DirectMessageConfiguration { + s.BaiduMessage = v return s } -// SetSegmentVersion sets the SegmentVersion field's value. -func (s *CampaignResponse) SetSegmentVersion(v int64) *CampaignResponse { - s.SegmentVersion = &v +// SetDefaultMessage sets the DefaultMessage field's value. +func (s *DirectMessageConfiguration) SetDefaultMessage(v *DefaultMessage) *DirectMessageConfiguration { + s.DefaultMessage = v return s } -// SetState sets the State field's value. -func (s *CampaignResponse) SetState(v *CampaignState) *CampaignResponse { - s.State = v +// SetDefaultPushNotificationMessage sets the DefaultPushNotificationMessage field's value. +func (s *DirectMessageConfiguration) SetDefaultPushNotificationMessage(v *DefaultPushNotificationMessage) *DirectMessageConfiguration { + s.DefaultPushNotificationMessage = v return s } -// SetTags sets the Tags field's value. -func (s *CampaignResponse) SetTags(v map[string]*string) *CampaignResponse { - s.Tags = v +// SetEmailMessage sets the EmailMessage field's value. +func (s *DirectMessageConfiguration) SetEmailMessage(v *EmailMessage) *DirectMessageConfiguration { + s.EmailMessage = v return s } -// SetTreatmentDescription sets the TreatmentDescription field's value. -func (s *CampaignResponse) SetTreatmentDescription(v string) *CampaignResponse { - s.TreatmentDescription = &v +// SetGCMMessage sets the GCMMessage field's value. +func (s *DirectMessageConfiguration) SetGCMMessage(v *GCMMessage) *DirectMessageConfiguration { + s.GCMMessage = v return s } -// SetTreatmentName sets the TreatmentName field's value. -func (s *CampaignResponse) SetTreatmentName(v string) *CampaignResponse { - s.TreatmentName = &v +// SetSMSMessage sets the SMSMessage field's value. +func (s *DirectMessageConfiguration) SetSMSMessage(v *SMSMessage) *DirectMessageConfiguration { + s.SMSMessage = v return s } -// SetVersion sets the Version field's value. -func (s *CampaignResponse) SetVersion(v int64) *CampaignResponse { - s.Version = &v +// SetVoiceMessage sets the VoiceMessage field's value. +func (s *DirectMessageConfiguration) SetVoiceMessage(v *VoiceMessage) *DirectMessageConfiguration { + s.VoiceMessage = v return s } -// SMS message configuration. -type CampaignSmsMessage struct { +// Specifies the status and settings of the email channel for an application. +type EmailChannelRequest struct { _ struct{} `type:"structure"` - // The SMS text body. - Body *string `type:"string"` - - // Is this is a transactional SMS message, otherwise a promotional message. - MessageType *string `type:"string" enum:"MessageType"` - - // Sender ID of sent message. - SenderId *string `type:"string"` -} - -// String returns the string representation -func (s CampaignSmsMessage) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CampaignSmsMessage) GoString() string { - return s.String() -} - -// SetBody sets the Body field's value. -func (s *CampaignSmsMessage) SetBody(v string) *CampaignSmsMessage { - s.Body = &v - return s -} + // The configuration set that you want to apply to email that you send through + // the channel by using the Amazon Pinpoint Email API (emailAPIreference.html). + ConfigurationSet *string `type:"string"` -// SetMessageType sets the MessageType field's value. -func (s *CampaignSmsMessage) SetMessageType(v string) *CampaignSmsMessage { - s.MessageType = &v - return s -} + // Specifies whether to enable the email channel for the application. + Enabled *bool `type:"boolean"` -// SetSenderId sets the SenderId field's value. -func (s *CampaignSmsMessage) SetSenderId(v string) *CampaignSmsMessage { - s.SenderId = &v - return s -} + // The verified email address that you want to send email from when you send + // email through the channel. + // + // FromAddress is a required field + FromAddress *string `type:"string" required:"true"` -// State of the Campaign -type CampaignState struct { - _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple + // Email Service (Amazon SES), that you want to use when you send email through + // the channel. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` - // The status of the campaign, or the status of a treatment that belongs to - // an A/B test campaign.Valid values: SCHEDULED, EXECUTING, PENDING_NEXT_RUN, - // COMPLETED, PAUSED - CampaignStatus *string `type:"string" enum:"CampaignStatus"` + // The ARN of the AWS Identity and Access Management (IAM) role that you want + // Amazon Pinpoint to use when it submits email-related event data for the channel. + RoleArn *string `type:"string"` } // String returns the string representation -func (s CampaignState) String() string { +func (s EmailChannelRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CampaignState) GoString() string { +func (s EmailChannelRequest) GoString() string { return s.String() } -// SetCampaignStatus sets the CampaignStatus field's value. -func (s *CampaignState) SetCampaignStatus(v string) *CampaignState { - s.CampaignStatus = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *EmailChannelRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EmailChannelRequest"} + if s.FromAddress == nil { + invalidParams.Add(request.NewErrParamRequired("FromAddress")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// List of available campaigns. -type CampaignsResponse struct { - _ struct{} `type:"structure"` - - // A list of campaigns. - Item []*CampaignResponse `type:"list"` - - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. - NextToken *string `type:"string"` +// SetConfigurationSet sets the ConfigurationSet field's value. +func (s *EmailChannelRequest) SetConfigurationSet(v string) *EmailChannelRequest { + s.ConfigurationSet = &v + return s } -// String returns the string representation -func (s CampaignsResponse) String() string { - return awsutil.Prettify(s) +// SetEnabled sets the Enabled field's value. +func (s *EmailChannelRequest) SetEnabled(v bool) *EmailChannelRequest { + s.Enabled = &v + return s } -// GoString returns the string representation -func (s CampaignsResponse) GoString() string { - return s.String() +// SetFromAddress sets the FromAddress field's value. +func (s *EmailChannelRequest) SetFromAddress(v string) *EmailChannelRequest { + s.FromAddress = &v + return s } -// SetItem sets the Item field's value. -func (s *CampaignsResponse) SetItem(v []*CampaignResponse) *CampaignsResponse { - s.Item = v +// SetIdentity sets the Identity field's value. +func (s *EmailChannelRequest) SetIdentity(v string) *EmailChannelRequest { + s.Identity = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *CampaignsResponse) SetNextToken(v string) *CampaignsResponse { - s.NextToken = &v +// SetRoleArn sets the RoleArn field's value. +func (s *EmailChannelRequest) SetRoleArn(v string) *EmailChannelRequest { + s.RoleArn = &v return s } -// Base definition for channel response. -type ChannelResponse struct { +// Provides information about the status and settings of the email channel for +// an application. +type EmailChannelResponse struct { _ struct{} `type:"structure"` - // Application id + // The unique identifier for the application that the email channel applies + // to. ApplicationId *string `type:"string"` - // When was this segment created + // The configuration set that's applied to email that's sent through the channel + // by using the Amazon Pinpoint Email API (emailAPIreference.html). + ConfigurationSet *string `type:"string"` + + // The date and time, in ISO 8601 format, when the email channel was enabled. CreationDate *string `type:"string"` - // If the channel is enabled for sending messages. + // Specifies whether the email channel is enabled for the application. Enabled *bool `type:"boolean"` - // Not used. Retained for backwards compatibility. + // The verified email address that you send email from when you send email through + // the channel. + FromAddress *string `type:"string"` + + // (Not used) This property is retained only for backward compatibility. HasCredential *bool `type:"boolean"` - // Channel ID. Not used, only for backwards compatibility. + // (Deprecated) An identifier for the email channel. This property is retained + // only for backward compatibility. Id *string `type:"string"` - // Is this channel archived + // The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple + // Email Service (Amazon SES), that you use when you send email through the + // channel. + Identity *string `type:"string"` + + // Specifies whether the email channel is archived. IsArchived *bool `type:"boolean"` - // Who made the last change + // The user who last modified the email channel. LastModifiedBy *string `type:"string"` - // Last date this was updated + // The date and time, in ISO 8601 format, when the email channel was last modified. LastModifiedDate *string `type:"string"` - // Version of channel + // The maximum number of emails that you can send through the channel each second. + MessagesPerSecond *int64 `type:"integer"` + + // The type of messaging or notification platform for the channel. For the email + // channel, this value is EMAIL. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` + + // The ARN of the AWS Identity and Access Management (IAM) role that Amazon + // Pinpoint uses to submit email-related event data for the channel. + RoleArn *string `type:"string"` + + // The current version of the email channel. Version *int64 `type:"integer"` } // String returns the string representation -func (s ChannelResponse) String() string { +func (s EmailChannelResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ChannelResponse) GoString() string { +func (s EmailChannelResponse) GoString() string { return s.String() } // SetApplicationId sets the ApplicationId field's value. -func (s *ChannelResponse) SetApplicationId(v string) *ChannelResponse { +func (s *EmailChannelResponse) SetApplicationId(v string) *EmailChannelResponse { s.ApplicationId = &v return s } +// SetConfigurationSet sets the ConfigurationSet field's value. +func (s *EmailChannelResponse) SetConfigurationSet(v string) *EmailChannelResponse { + s.ConfigurationSet = &v + return s +} + // SetCreationDate sets the CreationDate field's value. -func (s *ChannelResponse) SetCreationDate(v string) *ChannelResponse { +func (s *EmailChannelResponse) SetCreationDate(v string) *EmailChannelResponse { s.CreationDate = &v return s } // SetEnabled sets the Enabled field's value. -func (s *ChannelResponse) SetEnabled(v bool) *ChannelResponse { +func (s *EmailChannelResponse) SetEnabled(v bool) *EmailChannelResponse { s.Enabled = &v return s } +// SetFromAddress sets the FromAddress field's value. +func (s *EmailChannelResponse) SetFromAddress(v string) *EmailChannelResponse { + s.FromAddress = &v + return s +} + // SetHasCredential sets the HasCredential field's value. -func (s *ChannelResponse) SetHasCredential(v bool) *ChannelResponse { +func (s *EmailChannelResponse) SetHasCredential(v bool) *EmailChannelResponse { s.HasCredential = &v return s } // SetId sets the Id field's value. -func (s *ChannelResponse) SetId(v string) *ChannelResponse { +func (s *EmailChannelResponse) SetId(v string) *EmailChannelResponse { s.Id = &v return s } +// SetIdentity sets the Identity field's value. +func (s *EmailChannelResponse) SetIdentity(v string) *EmailChannelResponse { + s.Identity = &v + return s +} + // SetIsArchived sets the IsArchived field's value. -func (s *ChannelResponse) SetIsArchived(v bool) *ChannelResponse { +func (s *EmailChannelResponse) SetIsArchived(v bool) *EmailChannelResponse { s.IsArchived = &v return s } // SetLastModifiedBy sets the LastModifiedBy field's value. -func (s *ChannelResponse) SetLastModifiedBy(v string) *ChannelResponse { +func (s *EmailChannelResponse) SetLastModifiedBy(v string) *EmailChannelResponse { s.LastModifiedBy = &v return s } // SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *ChannelResponse) SetLastModifiedDate(v string) *ChannelResponse { +func (s *EmailChannelResponse) SetLastModifiedDate(v string) *EmailChannelResponse { s.LastModifiedDate = &v return s } -// SetVersion sets the Version field's value. -func (s *ChannelResponse) SetVersion(v int64) *ChannelResponse { - s.Version = &v +// SetMessagesPerSecond sets the MessagesPerSecond field's value. +func (s *EmailChannelResponse) SetMessagesPerSecond(v int64) *EmailChannelResponse { + s.MessagesPerSecond = &v return s } -// Get channels definition -type ChannelsResponse struct { - _ struct{} `type:"structure"` - - // A map of channels, with the ChannelType as the key and the Channel as the - // value. - Channels map[string]*ChannelResponse `type:"map"` -} - -// String returns the string representation -func (s ChannelsResponse) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ChannelsResponse) GoString() string { - return s.String() -} - -// SetChannels sets the Channels field's value. -func (s *ChannelsResponse) SetChannels(v map[string]*ChannelResponse) *ChannelsResponse { - s.Channels = v +// SetPlatform sets the Platform field's value. +func (s *EmailChannelResponse) SetPlatform(v string) *EmailChannelResponse { + s.Platform = &v return s } -type CreateAppInput struct { - _ struct{} `type:"structure" payload:"CreateApplicationRequest"` - - // Application Request. - // - // CreateApplicationRequest is a required field - CreateApplicationRequest *CreateApplicationRequest `type:"structure" required:"true"` -} - -// String returns the string representation -func (s CreateAppInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateAppInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateAppInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateAppInput"} - if s.CreateApplicationRequest == nil { - invalidParams.Add(request.NewErrParamRequired("CreateApplicationRequest")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetRoleArn sets the RoleArn field's value. +func (s *EmailChannelResponse) SetRoleArn(v string) *EmailChannelResponse { + s.RoleArn = &v + return s } -// SetCreateApplicationRequest sets the CreateApplicationRequest field's value. -func (s *CreateAppInput) SetCreateApplicationRequest(v *CreateApplicationRequest) *CreateAppInput { - s.CreateApplicationRequest = v +// SetVersion sets the Version field's value. +func (s *EmailChannelResponse) SetVersion(v int64) *EmailChannelResponse { + s.Version = &v return s } -type CreateAppOutput struct { - _ struct{} `type:"structure" payload:"ApplicationResponse"` +// Specifies the default settings and content for a one-time email message that's +// sent directly to an endpoint. +type EmailMessage struct { + _ struct{} `type:"structure"` - // Application Response. - // - // ApplicationResponse is a required field - ApplicationResponse *ApplicationResponse `type:"structure" required:"true"` -} + // The body of the email message. + Body *string `type:"string"` -// String returns the string representation -func (s CreateAppOutput) String() string { - return awsutil.Prettify(s) -} + // The email address to forward bounces and complaints to, if feedback forwarding + // is enabled. + FeedbackForwardingAddress *string `type:"string"` -// GoString returns the string representation -func (s CreateAppOutput) GoString() string { - return s.String() -} + // The verified email address to send the email message from. The default value + // is the FromAddress specified for the email channel. + FromAddress *string `type:"string"` -// SetApplicationResponse sets the ApplicationResponse field's value. -func (s *CreateAppOutput) SetApplicationResponse(v *ApplicationResponse) *CreateAppOutput { - s.ApplicationResponse = v - return s -} + // The email message, represented as a raw MIME message. + RawEmail *RawEmail `type:"structure"` -// Application Request. -type CreateApplicationRequest struct { - _ struct{} `type:"structure"` + // The reply-to email address(es) for the email message. If a recipient replies + // to the email, each reply-to address receives the reply. + ReplyToAddresses []*string `type:"list"` - // The display name of the application. Used in the Amazon Pinpoint console. - Name *string `type:"string"` + // The email message, composed of a subject, a text part, and an HTML part. + SimpleEmail *SimpleEmail `type:"structure"` - // The Tags for the app. - Tags map[string]*string `locationName:"tags" type:"map"` + // The default message variables to use in the email message. You can override + // the default variables with individual address variables. + Substitutions map[string][]*string `type:"map"` } // String returns the string representation -func (s CreateApplicationRequest) String() string { +func (s EmailMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateApplicationRequest) GoString() string { +func (s EmailMessage) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *CreateApplicationRequest) SetName(v string) *CreateApplicationRequest { - s.Name = &v +// SetBody sets the Body field's value. +func (s *EmailMessage) SetBody(v string) *EmailMessage { + s.Body = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateApplicationRequest) SetTags(v map[string]*string) *CreateApplicationRequest { - s.Tags = v +// SetFeedbackForwardingAddress sets the FeedbackForwardingAddress field's value. +func (s *EmailMessage) SetFeedbackForwardingAddress(v string) *EmailMessage { + s.FeedbackForwardingAddress = &v return s } -type CreateCampaignInput struct { - _ struct{} `type:"structure" payload:"WriteCampaignRequest"` - - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - - // Used to create a campaign. - // - // WriteCampaignRequest is a required field - WriteCampaignRequest *WriteCampaignRequest `type:"structure" required:"true"` +// SetFromAddress sets the FromAddress field's value. +func (s *EmailMessage) SetFromAddress(v string) *EmailMessage { + s.FromAddress = &v + return s } -// String returns the string representation -func (s CreateCampaignInput) String() string { - return awsutil.Prettify(s) +// SetRawEmail sets the RawEmail field's value. +func (s *EmailMessage) SetRawEmail(v *RawEmail) *EmailMessage { + s.RawEmail = v + return s } -// GoString returns the string representation -func (s CreateCampaignInput) GoString() string { - return s.String() +// SetReplyToAddresses sets the ReplyToAddresses field's value. +func (s *EmailMessage) SetReplyToAddresses(v []*string) *EmailMessage { + s.ReplyToAddresses = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCampaignInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCampaignInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.WriteCampaignRequest == nil { - invalidParams.Add(request.NewErrParamRequired("WriteCampaignRequest")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSimpleEmail sets the SimpleEmail field's value. +func (s *EmailMessage) SetSimpleEmail(v *SimpleEmail) *EmailMessage { + s.SimpleEmail = v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *CreateCampaignInput) SetApplicationId(v string) *CreateCampaignInput { - s.ApplicationId = &v +// SetSubstitutions sets the Substitutions field's value. +func (s *EmailMessage) SetSubstitutions(v map[string][]*string) *EmailMessage { + s.Substitutions = v return s } -// SetWriteCampaignRequest sets the WriteCampaignRequest field's value. -func (s *CreateCampaignInput) SetWriteCampaignRequest(v *WriteCampaignRequest) *CreateCampaignInput { - s.WriteCampaignRequest = v - return s -} +// Specifies the settings for an email activity in a journey. This type of activity +// sends an email message to participants. +type EmailMessageActivity struct { + _ struct{} `type:"structure"` -type CreateCampaignOutput struct { - _ struct{} `type:"structure" payload:"CampaignResponse"` + // The "From" address to use for the message. + MessageConfig *JourneyEmailMessage `type:"structure"` - // Campaign definition - // - // CampaignResponse is a required field - CampaignResponse *CampaignResponse `type:"structure" required:"true"` + // The unique identifier for the next activity to perform, after the message + // is sent. + NextActivity *string `type:"string"` + + // The name of the email template to use for the message. + TemplateName *string `type:"string"` } // String returns the string representation -func (s CreateCampaignOutput) String() string { +func (s EmailMessageActivity) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCampaignOutput) GoString() string { +func (s EmailMessageActivity) GoString() string { return s.String() } -// SetCampaignResponse sets the CampaignResponse field's value. -func (s *CreateCampaignOutput) SetCampaignResponse(v *CampaignResponse) *CreateCampaignOutput { - s.CampaignResponse = v +// SetMessageConfig sets the MessageConfig field's value. +func (s *EmailMessageActivity) SetMessageConfig(v *JourneyEmailMessage) *EmailMessageActivity { + s.MessageConfig = v return s } -type CreateExportJobInput struct { - _ struct{} `type:"structure" payload:"ExportJobRequest"` +// SetNextActivity sets the NextActivity field's value. +func (s *EmailMessageActivity) SetNextActivity(v string) *EmailMessageActivity { + s.NextActivity = &v + return s +} - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` +// SetTemplateName sets the TemplateName field's value. +func (s *EmailMessageActivity) SetTemplateName(v string) *EmailMessageActivity { + s.TemplateName = &v + return s +} - // Export job request. - // - // ExportJobRequest is a required field - ExportJobRequest *ExportJobRequest `type:"structure" required:"true"` +// Specifies the content and settings for a message template that can be used +// in messages that are sent through the email channel. +type EmailTemplateRequest struct { + _ struct{} `type:"structure"` + + // The message body, in HTML format, to use in email messages that are based + // on the message template. We recommend using HTML format for email clients + // that support HTML. You can include links, formatted text, and more in an + // HTML message. + HtmlPart *string `type:"string"` + + // The subject line, or title, to use in email messages that are based on the + // message template. + Subject *string `type:"string"` + + // A string-to-string map of key-value pairs that defines the tags to associate + // with the message template. Each tag consists of a required tag key and an + // associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The message body, in text format, to use in email messages that are based + // on the message template. We recommend using text format for email clients + // that don't support HTML and clients that are connected to high-latency networks, + // such as mobile devices. + TextPart *string `type:"string"` } // String returns the string representation -func (s CreateExportJobInput) String() string { +func (s EmailTemplateRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateExportJobInput) GoString() string { +func (s EmailTemplateRequest) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateExportJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateExportJobInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.ExportJobRequest == nil { - invalidParams.Add(request.NewErrParamRequired("ExportJobRequest")) - } +// SetHtmlPart sets the HtmlPart field's value. +func (s *EmailTemplateRequest) SetHtmlPart(v string) *EmailTemplateRequest { + s.HtmlPart = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSubject sets the Subject field's value. +func (s *EmailTemplateRequest) SetSubject(v string) *EmailTemplateRequest { + s.Subject = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *CreateExportJobInput) SetApplicationId(v string) *CreateExportJobInput { - s.ApplicationId = &v +// SetTags sets the Tags field's value. +func (s *EmailTemplateRequest) SetTags(v map[string]*string) *EmailTemplateRequest { + s.Tags = v return s } -// SetExportJobRequest sets the ExportJobRequest field's value. -func (s *CreateExportJobInput) SetExportJobRequest(v *ExportJobRequest) *CreateExportJobInput { - s.ExportJobRequest = v +// SetTextPart sets the TextPart field's value. +func (s *EmailTemplateRequest) SetTextPart(v string) *EmailTemplateRequest { + s.TextPart = &v return s } -type CreateExportJobOutput struct { - _ struct{} `type:"structure" payload:"ExportJobResponse"` +// Provides information about the content and settings for a message template +// that can be used in messages that are sent through the email channel. +type EmailTemplateResponse struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the message template. + Arn *string `type:"string"` - // Export job response. + // The date when the message template was created. // - // ExportJobResponse is a required field - ExportJobResponse *ExportJobResponse `type:"structure" required:"true"` -} + // CreationDate is a required field + CreationDate *string `type:"string" required:"true"` -// String returns the string representation -func (s CreateExportJobOutput) String() string { - return awsutil.Prettify(s) -} + // The message body, in HTML format, that's used in email messages that are + // based on the message template. + HtmlPart *string `type:"string"` -// GoString returns the string representation -func (s CreateExportJobOutput) GoString() string { - return s.String() -} + // The date when the message template was last modified. + // + // LastModifiedDate is a required field + LastModifiedDate *string `type:"string" required:"true"` -// SetExportJobResponse sets the ExportJobResponse field's value. -func (s *CreateExportJobOutput) SetExportJobResponse(v *ExportJobResponse) *CreateExportJobOutput { - s.ExportJobResponse = v - return s -} + // The subject line, or title, that's used in email messages that are based + // on the message template. + Subject *string `type:"string"` -type CreateImportJobInput struct { - _ struct{} `type:"structure" payload:"ImportJobRequest"` + // A string-to-string map of key-value pairs that identifies the tags that are + // associated with the message template. Each tag consists of a required tag + // key and an associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // The name of the message template. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` - // Import job request. + // The type of channel that the message template is designed for. For an email + // template, this value is EMAIL. // - // ImportJobRequest is a required field - ImportJobRequest *ImportJobRequest `type:"structure" required:"true"` + // TemplateType is a required field + TemplateType *string `type:"string" required:"true" enum:"TemplateType"` + + // The message body, in text format, that's used in email messages that are + // based on the message template. + TextPart *string `type:"string"` } // String returns the string representation -func (s CreateImportJobInput) String() string { +func (s EmailTemplateResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateImportJobInput) GoString() string { +func (s EmailTemplateResponse) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateImportJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateImportJobInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.ImportJobRequest == nil { - invalidParams.Add(request.NewErrParamRequired("ImportJobRequest")) - } +// SetArn sets the Arn field's value. +func (s *EmailTemplateResponse) SetArn(v string) *EmailTemplateResponse { + s.Arn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreationDate sets the CreationDate field's value. +func (s *EmailTemplateResponse) SetCreationDate(v string) *EmailTemplateResponse { + s.CreationDate = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *CreateImportJobInput) SetApplicationId(v string) *CreateImportJobInput { - s.ApplicationId = &v +// SetHtmlPart sets the HtmlPart field's value. +func (s *EmailTemplateResponse) SetHtmlPart(v string) *EmailTemplateResponse { + s.HtmlPart = &v return s } -// SetImportJobRequest sets the ImportJobRequest field's value. -func (s *CreateImportJobInput) SetImportJobRequest(v *ImportJobRequest) *CreateImportJobInput { - s.ImportJobRequest = v +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *EmailTemplateResponse) SetLastModifiedDate(v string) *EmailTemplateResponse { + s.LastModifiedDate = &v return s } -type CreateImportJobOutput struct { - _ struct{} `type:"structure" payload:"ImportJobResponse"` +// SetSubject sets the Subject field's value. +func (s *EmailTemplateResponse) SetSubject(v string) *EmailTemplateResponse { + s.Subject = &v + return s +} - // Import job response. - // - // ImportJobResponse is a required field - ImportJobResponse *ImportJobResponse `type:"structure" required:"true"` +// SetTags sets the Tags field's value. +func (s *EmailTemplateResponse) SetTags(v map[string]*string) *EmailTemplateResponse { + s.Tags = v + return s } -// String returns the string representation -func (s CreateImportJobOutput) String() string { - return awsutil.Prettify(s) +// SetTemplateName sets the TemplateName field's value. +func (s *EmailTemplateResponse) SetTemplateName(v string) *EmailTemplateResponse { + s.TemplateName = &v + return s } -// GoString returns the string representation -func (s CreateImportJobOutput) GoString() string { - return s.String() +// SetTemplateType sets the TemplateType field's value. +func (s *EmailTemplateResponse) SetTemplateType(v string) *EmailTemplateResponse { + s.TemplateType = &v + return s } -// SetImportJobResponse sets the ImportJobResponse field's value. -func (s *CreateImportJobOutput) SetImportJobResponse(v *ImportJobResponse) *CreateImportJobOutput { - s.ImportJobResponse = v +// SetTextPart sets the TextPart field's value. +func (s *EmailTemplateResponse) SetTextPart(v string) *EmailTemplateResponse { + s.TextPart = &v return s } -type CreateSegmentInput struct { - _ struct{} `type:"structure" payload:"WriteSegmentRequest"` +// Specifies an endpoint to create or update and the settings and attributes +// to set or change for the endpoint. +type EndpointBatchItem struct { + _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // The destination address for messages or push notifications that you send + // to the endpoint. The address varies by channel. For a push-notification channel, + // use the token provided by the push notification service, such as an Apple + // Push Notification service (APNs) device token or a Firebase Cloud Messaging + // (FCM) registration token. For the SMS channel, use a phone number in E.164 + // format, such as +12065550100. For the email channel, use an email address. + Address *string `type:"string"` - // Segment definition. + // One or more custom attributes that describe the endpoint by associating a + // name with an array of values. For example, the value of a custom attribute + // named Interests might be: ["science", "music", "travel"]. You can use these + // attributes as filter criteria when you create segments. // - // WriteSegmentRequest is a required field - WriteSegmentRequest *WriteSegmentRequest `type:"structure" required:"true"` + // When you define the name of a custom attribute, avoid using the following + // characters: number sign (#), colon (:), question mark (?), backslash (\), + // and slash (/). The Amazon Pinpoint console can't display attribute names + // that contain these characters. This limitation doesn't apply to attribute + // values. + Attributes map[string][]*string `type:"map"` + + // The channel to use when sending messages or push notifications to the endpoint. + ChannelType *string `type:"string" enum:"ChannelType"` + + // The demographic information for the endpoint, such as the time zone and platform. + Demographic *EndpointDemographic `type:"structure"` + + // The date and time, in ISO 8601 format, when the endpoint was created or updated. + EffectiveDate *string `type:"string"` + + // Specifies whether to send messages or push notifications to the endpoint. + // Valid values are: ACTIVE, messages are sent to the endpoint; and, INACTIVE, + // messages aren’t sent to the endpoint. + // + // Amazon Pinpoint automatically sets this value to ACTIVE when you create an + // endpoint or update an existing endpoint. Amazon Pinpoint automatically sets + // this value to INACTIVE if you update another endpoint that has the same address + // specified by the Address property. + EndpointStatus *string `type:"string"` + + // The unique identifier for the endpoint in the context of the batch. + Id *string `type:"string"` + + // The geographic information for the endpoint. + Location *EndpointLocation `type:"structure"` + + // One or more custom metrics that your app reports to Amazon Pinpoint for the + // endpoint. + Metrics map[string]*float64 `type:"map"` + + // Specifies whether the user who's associated with the endpoint has opted out + // of receiving messages and push notifications from you. Possible values are: + // ALL, the user has opted out and doesn't want to receive any messages or push + // notifications; and, NONE, the user hasn't opted out and wants to receive + // all messages and push notifications. + OptOut *string `type:"string"` + + // The unique identifier for the request to create or update the endpoint. + RequestId *string `type:"string"` + + // One or more custom user attributes that your app reports to Amazon Pinpoint + // for the user who's associated with the endpoint. + User *EndpointUser `type:"structure"` } // String returns the string representation -func (s CreateSegmentInput) String() string { +func (s EndpointBatchItem) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateSegmentInput) GoString() string { +func (s EndpointBatchItem) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSegmentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSegmentInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.WriteSegmentRequest == nil { - invalidParams.Add(request.NewErrParamRequired("WriteSegmentRequest")) - } +// SetAddress sets the Address field's value. +func (s *EndpointBatchItem) SetAddress(v string) *EndpointBatchItem { + s.Address = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetAttributes sets the Attributes field's value. +func (s *EndpointBatchItem) SetAttributes(v map[string][]*string) *EndpointBatchItem { + s.Attributes = v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *CreateSegmentInput) SetApplicationId(v string) *CreateSegmentInput { - s.ApplicationId = &v +// SetChannelType sets the ChannelType field's value. +func (s *EndpointBatchItem) SetChannelType(v string) *EndpointBatchItem { + s.ChannelType = &v return s } -// SetWriteSegmentRequest sets the WriteSegmentRequest field's value. -func (s *CreateSegmentInput) SetWriteSegmentRequest(v *WriteSegmentRequest) *CreateSegmentInput { - s.WriteSegmentRequest = v +// SetDemographic sets the Demographic field's value. +func (s *EndpointBatchItem) SetDemographic(v *EndpointDemographic) *EndpointBatchItem { + s.Demographic = v return s } -type CreateSegmentOutput struct { - _ struct{} `type:"structure" payload:"SegmentResponse"` +// SetEffectiveDate sets the EffectiveDate field's value. +func (s *EndpointBatchItem) SetEffectiveDate(v string) *EndpointBatchItem { + s.EffectiveDate = &v + return s +} - // Segment definition. - // - // SegmentResponse is a required field - SegmentResponse *SegmentResponse `type:"structure" required:"true"` +// SetEndpointStatus sets the EndpointStatus field's value. +func (s *EndpointBatchItem) SetEndpointStatus(v string) *EndpointBatchItem { + s.EndpointStatus = &v + return s +} + +// SetId sets the Id field's value. +func (s *EndpointBatchItem) SetId(v string) *EndpointBatchItem { + s.Id = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *EndpointBatchItem) SetLocation(v *EndpointLocation) *EndpointBatchItem { + s.Location = v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *EndpointBatchItem) SetMetrics(v map[string]*float64) *EndpointBatchItem { + s.Metrics = v + return s } -// String returns the string representation -func (s CreateSegmentOutput) String() string { - return awsutil.Prettify(s) +// SetOptOut sets the OptOut field's value. +func (s *EndpointBatchItem) SetOptOut(v string) *EndpointBatchItem { + s.OptOut = &v + return s } -// GoString returns the string representation -func (s CreateSegmentOutput) GoString() string { - return s.String() +// SetRequestId sets the RequestId field's value. +func (s *EndpointBatchItem) SetRequestId(v string) *EndpointBatchItem { + s.RequestId = &v + return s } -// SetSegmentResponse sets the SegmentResponse field's value. -func (s *CreateSegmentOutput) SetSegmentResponse(v *SegmentResponse) *CreateSegmentOutput { - s.SegmentResponse = v +// SetUser sets the User field's value. +func (s *EndpointBatchItem) SetUser(v *EndpointUser) *EndpointBatchItem { + s.User = v return s } -// The default message to use across all channels. -type DefaultMessage struct { +// Specifies a batch of endpoints to create or update and the settings and attributes +// to set or change for each endpoint. +type EndpointBatchRequest struct { _ struct{} `type:"structure"` - // The message body of the notification, the email body or the text message. - Body *string `type:"string"` - - // Default message substitutions. Can be overridden by individual address substitutions. - Substitutions map[string][]*string `type:"map"` + // An array that defines the endpoints to create or update and, for each endpoint, + // the property values to set or change. An array can contain a maximum of 100 + // items. + // + // Item is a required field + Item []*EndpointBatchItem `type:"list" required:"true"` } // String returns the string representation -func (s DefaultMessage) String() string { +func (s EndpointBatchRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DefaultMessage) GoString() string { +func (s EndpointBatchRequest) GoString() string { return s.String() } -// SetBody sets the Body field's value. -func (s *DefaultMessage) SetBody(v string) *DefaultMessage { - s.Body = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *EndpointBatchRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EndpointBatchRequest"} + if s.Item == nil { + invalidParams.Add(request.NewErrParamRequired("Item")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSubstitutions sets the Substitutions field's value. -func (s *DefaultMessage) SetSubstitutions(v map[string][]*string) *DefaultMessage { - s.Substitutions = v +// SetItem sets the Item field's value. +func (s *EndpointBatchRequest) SetItem(v []*EndpointBatchItem) *EndpointBatchRequest { + s.Item = v return s } -// Default Push Notification Message. -type DefaultPushNotificationMessage struct { +// Specifies demographic information about an endpoint, such as the applicable +// time zone and platform. +type EndpointDemographic struct { _ struct{} `type:"structure"` - // The action that occurs if the user taps a push notification delivered by - // the campaign: OPEN_APP - Your app launches, or it becomes the foreground - // app if it has been sent to the background. This is the default action. DEEP_LINK - // - Uses deep linking features in iOS and Android to open your app and display - // a designated user interface within the app. URL - The default mobile browser - // on the user's device launches and opens a web page at the URL you specify. - // Possible values include: OPEN_APP | DEEP_LINK | URL - Action *string `type:"string" enum:"Action"` + // The version of the app that's associated with the endpoint. + AppVersion *string `type:"string"` - // The message body of the notification. - Body *string `type:"string"` + // The locale of the endpoint, in the following format: the ISO 639-1 alpha-2 + // code, followed by an underscore (_), followed by an ISO 3166-1 alpha-2 value. + Locale *string `type:"string"` - // The data payload used for a silent push. This payload is added to the notifications' - // data.pinpoint.jsonBody' object - Data map[string]*string `type:"map"` + // The manufacturer of the endpoint device, such as Apple or Samsung. + Make *string `type:"string"` - // Indicates if the message should display on the recipient's device. You can - // use silent pushes for remote configuration or to deliver messages to in-app - // notification centers. - SilentPush *bool `type:"boolean"` + // The model name or number of the endpoint device, such as iPhone. + Model *string `type:"string"` - // Default message substitutions. Can be overridden by individual address substitutions. - Substitutions map[string][]*string `type:"map"` + // The model version of the endpoint device. + ModelVersion *string `type:"string"` - // The message title that displays above the message on the user's device. - Title *string `type:"string"` + // The platform of the endpoint device, such as iOS or Android. + Platform *string `type:"string"` - // The URL to open in the user's mobile browser. Used if the value for Action - // is URL. - Url *string `type:"string"` + // The platform version of the endpoint device. + PlatformVersion *string `type:"string"` + + // The time zone of the endpoint, specified as a tz database name value, such + // as America/Los_Angeles. + Timezone *string `type:"string"` } // String returns the string representation -func (s DefaultPushNotificationMessage) String() string { +func (s EndpointDemographic) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DefaultPushNotificationMessage) GoString() string { +func (s EndpointDemographic) GoString() string { return s.String() } -// SetAction sets the Action field's value. -func (s *DefaultPushNotificationMessage) SetAction(v string) *DefaultPushNotificationMessage { - s.Action = &v +// SetAppVersion sets the AppVersion field's value. +func (s *EndpointDemographic) SetAppVersion(v string) *EndpointDemographic { + s.AppVersion = &v return s } -// SetBody sets the Body field's value. -func (s *DefaultPushNotificationMessage) SetBody(v string) *DefaultPushNotificationMessage { - s.Body = &v +// SetLocale sets the Locale field's value. +func (s *EndpointDemographic) SetLocale(v string) *EndpointDemographic { + s.Locale = &v return s } -// SetData sets the Data field's value. -func (s *DefaultPushNotificationMessage) SetData(v map[string]*string) *DefaultPushNotificationMessage { - s.Data = v +// SetMake sets the Make field's value. +func (s *EndpointDemographic) SetMake(v string) *EndpointDemographic { + s.Make = &v return s } -// SetSilentPush sets the SilentPush field's value. -func (s *DefaultPushNotificationMessage) SetSilentPush(v bool) *DefaultPushNotificationMessage { - s.SilentPush = &v +// SetModel sets the Model field's value. +func (s *EndpointDemographic) SetModel(v string) *EndpointDemographic { + s.Model = &v return s } -// SetSubstitutions sets the Substitutions field's value. -func (s *DefaultPushNotificationMessage) SetSubstitutions(v map[string][]*string) *DefaultPushNotificationMessage { - s.Substitutions = v +// SetModelVersion sets the ModelVersion field's value. +func (s *EndpointDemographic) SetModelVersion(v string) *EndpointDemographic { + s.ModelVersion = &v return s } -// SetTitle sets the Title field's value. -func (s *DefaultPushNotificationMessage) SetTitle(v string) *DefaultPushNotificationMessage { - s.Title = &v +// SetPlatform sets the Platform field's value. +func (s *EndpointDemographic) SetPlatform(v string) *EndpointDemographic { + s.Platform = &v return s } -// SetUrl sets the Url field's value. -func (s *DefaultPushNotificationMessage) SetUrl(v string) *DefaultPushNotificationMessage { - s.Url = &v +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *EndpointDemographic) SetPlatformVersion(v string) *EndpointDemographic { + s.PlatformVersion = &v return s } -type DeleteAdmChannelInput struct { +// SetTimezone sets the Timezone field's value. +func (s *EndpointDemographic) SetTimezone(v string) *EndpointDemographic { + s.Timezone = &v + return s +} + +// Provides the status code and message that result from processing data for +// an endpoint. +type EndpointItemResponse struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // The custom message that's returned in the response as a result of processing + // the endpoint data. + Message *string `type:"string"` + + // The status code that's returned in the response as a result of processing + // the endpoint data. + StatusCode *int64 `type:"integer"` } // String returns the string representation -func (s DeleteAdmChannelInput) String() string { +func (s EndpointItemResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteAdmChannelInput) GoString() string { +func (s EndpointItemResponse) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAdmChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAdmChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMessage sets the Message field's value. +func (s *EndpointItemResponse) SetMessage(v string) *EndpointItemResponse { + s.Message = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteAdmChannelInput) SetApplicationId(v string) *DeleteAdmChannelInput { - s.ApplicationId = &v +// SetStatusCode sets the StatusCode field's value. +func (s *EndpointItemResponse) SetStatusCode(v int64) *EndpointItemResponse { + s.StatusCode = &v return s } -type DeleteAdmChannelOutput struct { - _ struct{} `type:"structure" payload:"ADMChannelResponse"` +// Specifies geographic information about an endpoint. +type EndpointLocation struct { + _ struct{} `type:"structure"` - // Amazon Device Messaging channel definition. - // - // ADMChannelResponse is a required field - ADMChannelResponse *ADMChannelResponse `type:"structure" required:"true"` + // The name of the city where the endpoint is located. + City *string `type:"string"` + + // The two-character code, in ISO 3166-1 alpha-2 format, for the country or + // region where the endpoint is located. For example, US for the United States. + Country *string `type:"string"` + + // The latitude coordinate of the endpoint location, rounded to one decimal + // place. + Latitude *float64 `type:"double"` + + // The longitude coordinate of the endpoint location, rounded to one decimal + // place. + Longitude *float64 `type:"double"` + + // The postal or ZIP code for the area where the endpoint is located. + PostalCode *string `type:"string"` + + // The name of the region where the endpoint is located. For locations in the + // United States, this value is the name of a state. + Region *string `type:"string"` } // String returns the string representation -func (s DeleteAdmChannelOutput) String() string { +func (s EndpointLocation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteAdmChannelOutput) GoString() string { +func (s EndpointLocation) GoString() string { return s.String() } -// SetADMChannelResponse sets the ADMChannelResponse field's value. -func (s *DeleteAdmChannelOutput) SetADMChannelResponse(v *ADMChannelResponse) *DeleteAdmChannelOutput { - s.ADMChannelResponse = v +// SetCity sets the City field's value. +func (s *EndpointLocation) SetCity(v string) *EndpointLocation { + s.City = &v return s } -type DeleteApnsChannelInput struct { - _ struct{} `type:"structure"` - - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` +// SetCountry sets the Country field's value. +func (s *EndpointLocation) SetCountry(v string) *EndpointLocation { + s.Country = &v + return s } -// String returns the string representation -func (s DeleteApnsChannelInput) String() string { - return awsutil.Prettify(s) +// SetLatitude sets the Latitude field's value. +func (s *EndpointLocation) SetLatitude(v float64) *EndpointLocation { + s.Latitude = &v + return s } -// GoString returns the string representation -func (s DeleteApnsChannelInput) GoString() string { - return s.String() +// SetLongitude sets the Longitude field's value. +func (s *EndpointLocation) SetLongitude(v float64) *EndpointLocation { + s.Longitude = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteApnsChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteApnsChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetPostalCode sets the PostalCode field's value. +func (s *EndpointLocation) SetPostalCode(v string) *EndpointLocation { + s.PostalCode = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteApnsChannelInput) SetApplicationId(v string) *DeleteApnsChannelInput { - s.ApplicationId = &v +// SetRegion sets the Region field's value. +func (s *EndpointLocation) SetRegion(v string) *EndpointLocation { + s.Region = &v return s } -type DeleteApnsChannelOutput struct { - _ struct{} `type:"structure" payload:"APNSChannelResponse"` +// Provides information about the delivery status and results of sending a message +// directly to an endpoint. +type EndpointMessageResult struct { + _ struct{} `type:"structure"` - // Apple Distribution Push Notification Service channel definition. - // - // APNSChannelResponse is a required field - APNSChannelResponse *APNSChannelResponse `type:"structure" required:"true"` -} + // The endpoint address that the message was delivered to. + Address *string `type:"string"` -// String returns the string representation -func (s DeleteApnsChannelOutput) String() string { - return awsutil.Prettify(s) -} + // The delivery status of the message. Possible values are: + // + // * DUPLICATE - The endpoint address is a duplicate of another endpoint + // address. Amazon Pinpoint won't attempt to send the message again. + // + // * OPT_OUT - The user who's associated with the endpoint has opted out + // of receiving messages from you. Amazon Pinpoint won't attempt to send + // the message again. + // + // * PERMANENT_FAILURE - An error occurred when delivering the message to + // the endpoint. Amazon Pinpoint won't attempt to send the message again. + // + // * SUCCESSFUL - The message was successfully delivered to the endpoint. + // + // * TEMPORARY_FAILURE - A temporary error occurred. Amazon Pinpoint will + // attempt to deliver the message again later. + // + // * THROTTLED - Amazon Pinpoint throttled the operation to send the message + // to the endpoint. + // + // * TIMEOUT - The message couldn't be sent within the timeout period. + // + // * UNKNOWN_FAILURE - An unknown error occurred. + // + // DeliveryStatus is a required field + DeliveryStatus *string `type:"string" required:"true" enum:"DeliveryStatus"` -// GoString returns the string representation -func (s DeleteApnsChannelOutput) GoString() string { - return s.String() -} + // The unique identifier for the message that was sent. + MessageId *string `type:"string"` -// SetAPNSChannelResponse sets the APNSChannelResponse field's value. -func (s *DeleteApnsChannelOutput) SetAPNSChannelResponse(v *APNSChannelResponse) *DeleteApnsChannelOutput { - s.APNSChannelResponse = v - return s -} + // The downstream service status code for delivering the message. + // + // StatusCode is a required field + StatusCode *int64 `type:"integer" required:"true"` -type DeleteApnsSandboxChannelInput struct { - _ struct{} `type:"structure"` + // The status message for delivering the message. + StatusMessage *string `type:"string"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // For push notifications that are sent through the GCM channel, specifies whether + // the endpoint's device registration token was updated as part of delivering + // the message. + UpdatedToken *string `type:"string"` } // String returns the string representation -func (s DeleteApnsSandboxChannelInput) String() string { +func (s EndpointMessageResult) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteApnsSandboxChannelInput) GoString() string { +func (s EndpointMessageResult) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteApnsSandboxChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteApnsSandboxChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetAddress sets the Address field's value. +func (s *EndpointMessageResult) SetAddress(v string) *EndpointMessageResult { + s.Address = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteApnsSandboxChannelInput) SetApplicationId(v string) *DeleteApnsSandboxChannelInput { - s.ApplicationId = &v +// SetDeliveryStatus sets the DeliveryStatus field's value. +func (s *EndpointMessageResult) SetDeliveryStatus(v string) *EndpointMessageResult { + s.DeliveryStatus = &v return s } -type DeleteApnsSandboxChannelOutput struct { - _ struct{} `type:"structure" payload:"APNSSandboxChannelResponse"` - - // Apple Development Push Notification Service channel definition. - // - // APNSSandboxChannelResponse is a required field - APNSSandboxChannelResponse *APNSSandboxChannelResponse `type:"structure" required:"true"` +// SetMessageId sets the MessageId field's value. +func (s *EndpointMessageResult) SetMessageId(v string) *EndpointMessageResult { + s.MessageId = &v + return s } -// String returns the string representation -func (s DeleteApnsSandboxChannelOutput) String() string { - return awsutil.Prettify(s) +// SetStatusCode sets the StatusCode field's value. +func (s *EndpointMessageResult) SetStatusCode(v int64) *EndpointMessageResult { + s.StatusCode = &v + return s } -// GoString returns the string representation -func (s DeleteApnsSandboxChannelOutput) GoString() string { - return s.String() +// SetStatusMessage sets the StatusMessage field's value. +func (s *EndpointMessageResult) SetStatusMessage(v string) *EndpointMessageResult { + s.StatusMessage = &v + return s } -// SetAPNSSandboxChannelResponse sets the APNSSandboxChannelResponse field's value. -func (s *DeleteApnsSandboxChannelOutput) SetAPNSSandboxChannelResponse(v *APNSSandboxChannelResponse) *DeleteApnsSandboxChannelOutput { - s.APNSSandboxChannelResponse = v +// SetUpdatedToken sets the UpdatedToken field's value. +func (s *EndpointMessageResult) SetUpdatedToken(v string) *EndpointMessageResult { + s.UpdatedToken = &v return s } -type DeleteApnsVoipChannelInput struct { +// Specifies the channel type and other settings for an endpoint. +type EndpointRequest struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` -} + // The destination address for messages or push notifications that you send + // to the endpoint. The address varies by channel. For a push-notification channel, + // use the token provided by the push notification service, such as an Apple + // Push Notification service (APNs) device token or a Firebase Cloud Messaging + // (FCM) registration token. For the SMS channel, use a phone number in E.164 + // format, such as +12065550100. For the email channel, use an email address. + Address *string `type:"string"` -// String returns the string representation -func (s DeleteApnsVoipChannelInput) String() string { - return awsutil.Prettify(s) -} + // One or more custom attributes that describe the endpoint by associating a + // name with an array of values. For example, the value of a custom attribute + // named Interests might be: ["science", "music", "travel"]. You can use these + // attributes as filter criteria when you create segments. + // + // When you define the name of a custom attribute, avoid using the following + // characters: number sign (#), colon (:), question mark (?), backslash (\), + // and slash (/). The Amazon Pinpoint console can't display attribute names + // that contain these characters. This limitation doesn't apply to attribute + // values. + Attributes map[string][]*string `type:"map"` -// GoString returns the string representation -func (s DeleteApnsVoipChannelInput) GoString() string { - return s.String() -} + // The channel to use when sending messages or push notifications to the endpoint. + ChannelType *string `type:"string" enum:"ChannelType"` -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteApnsVoipChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteApnsVoipChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } + // The demographic information for the endpoint, such as the time zone and platform. + Demographic *EndpointDemographic `type:"structure"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // The date and time, in ISO 8601 format, when the endpoint is updated. + EffectiveDate *string `type:"string"` -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteApnsVoipChannelInput) SetApplicationId(v string) *DeleteApnsVoipChannelInput { - s.ApplicationId = &v - return s -} + // Specifies whether to send messages or push notifications to the endpoint. + // Valid values are: ACTIVE, messages are sent to the endpoint; and, INACTIVE, + // messages aren’t sent to the endpoint. + // + // Amazon Pinpoint automatically sets this value to ACTIVE when you create an + // endpoint or update an existing endpoint. Amazon Pinpoint automatically sets + // this value to INACTIVE if you update another endpoint that has the same address + // specified by the Address property. + EndpointStatus *string `type:"string"` -type DeleteApnsVoipChannelOutput struct { - _ struct{} `type:"structure" payload:"APNSVoipChannelResponse"` + // The geographic information for the endpoint. + Location *EndpointLocation `type:"structure"` - // Apple VoIP Push Notification Service channel definition. - // - // APNSVoipChannelResponse is a required field - APNSVoipChannelResponse *APNSVoipChannelResponse `type:"structure" required:"true"` + // One or more custom metrics that your app reports to Amazon Pinpoint for the + // endpoint. + Metrics map[string]*float64 `type:"map"` + + // Specifies whether the user who's associated with the endpoint has opted out + // of receiving messages and push notifications from you. Possible values are: + // ALL, the user has opted out and doesn't want to receive any messages or push + // notifications; and, NONE, the user hasn't opted out and wants to receive + // all messages and push notifications. + OptOut *string `type:"string"` + + // The unique identifier for the most recent request to update the endpoint. + RequestId *string `type:"string"` + + // One or more custom user attributes that describe the user who's associated + // with the endpoint. + User *EndpointUser `type:"structure"` } // String returns the string representation -func (s DeleteApnsVoipChannelOutput) String() string { +func (s EndpointRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteApnsVoipChannelOutput) GoString() string { +func (s EndpointRequest) GoString() string { return s.String() } -// SetAPNSVoipChannelResponse sets the APNSVoipChannelResponse field's value. -func (s *DeleteApnsVoipChannelOutput) SetAPNSVoipChannelResponse(v *APNSVoipChannelResponse) *DeleteApnsVoipChannelOutput { - s.APNSVoipChannelResponse = v +// SetAddress sets the Address field's value. +func (s *EndpointRequest) SetAddress(v string) *EndpointRequest { + s.Address = &v return s } -type DeleteApnsVoipSandboxChannelInput struct { - _ struct{} `type:"structure"` - - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` +// SetAttributes sets the Attributes field's value. +func (s *EndpointRequest) SetAttributes(v map[string][]*string) *EndpointRequest { + s.Attributes = v + return s } -// String returns the string representation -func (s DeleteApnsVoipSandboxChannelInput) String() string { - return awsutil.Prettify(s) +// SetChannelType sets the ChannelType field's value. +func (s *EndpointRequest) SetChannelType(v string) *EndpointRequest { + s.ChannelType = &v + return s } -// GoString returns the string representation -func (s DeleteApnsVoipSandboxChannelInput) GoString() string { - return s.String() +// SetDemographic sets the Demographic field's value. +func (s *EndpointRequest) SetDemographic(v *EndpointDemographic) *EndpointRequest { + s.Demographic = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteApnsVoipSandboxChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteApnsVoipSandboxChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetEffectiveDate sets the EffectiveDate field's value. +func (s *EndpointRequest) SetEffectiveDate(v string) *EndpointRequest { + s.EffectiveDate = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteApnsVoipSandboxChannelInput) SetApplicationId(v string) *DeleteApnsVoipSandboxChannelInput { - s.ApplicationId = &v +// SetEndpointStatus sets the EndpointStatus field's value. +func (s *EndpointRequest) SetEndpointStatus(v string) *EndpointRequest { + s.EndpointStatus = &v return s } -type DeleteApnsVoipSandboxChannelOutput struct { - _ struct{} `type:"structure" payload:"APNSVoipSandboxChannelResponse"` +// SetLocation sets the Location field's value. +func (s *EndpointRequest) SetLocation(v *EndpointLocation) *EndpointRequest { + s.Location = v + return s +} - // Apple VoIP Developer Push Notification Service channel definition. - // - // APNSVoipSandboxChannelResponse is a required field - APNSVoipSandboxChannelResponse *APNSVoipSandboxChannelResponse `type:"structure" required:"true"` +// SetMetrics sets the Metrics field's value. +func (s *EndpointRequest) SetMetrics(v map[string]*float64) *EndpointRequest { + s.Metrics = v + return s } -// String returns the string representation -func (s DeleteApnsVoipSandboxChannelOutput) String() string { - return awsutil.Prettify(s) +// SetOptOut sets the OptOut field's value. +func (s *EndpointRequest) SetOptOut(v string) *EndpointRequest { + s.OptOut = &v + return s } -// GoString returns the string representation -func (s DeleteApnsVoipSandboxChannelOutput) GoString() string { - return s.String() +// SetRequestId sets the RequestId field's value. +func (s *EndpointRequest) SetRequestId(v string) *EndpointRequest { + s.RequestId = &v + return s } -// SetAPNSVoipSandboxChannelResponse sets the APNSVoipSandboxChannelResponse field's value. -func (s *DeleteApnsVoipSandboxChannelOutput) SetAPNSVoipSandboxChannelResponse(v *APNSVoipSandboxChannelResponse) *DeleteApnsVoipSandboxChannelOutput { - s.APNSVoipSandboxChannelResponse = v +// SetUser sets the User field's value. +func (s *EndpointRequest) SetUser(v *EndpointUser) *EndpointRequest { + s.User = v return s } -type DeleteAppInput struct { +// Provides information about the channel type and other settings for an endpoint. +type EndpointResponse struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteAppInput) String() string { - return awsutil.Prettify(s) -} + // The destination address for messages or push notifications that you send + // to the endpoint. The address varies by channel. For example, the address + // for a push-notification channel is typically the token provided by a push + // notification service, such as an Apple Push Notification service (APNs) device + // token or a Firebase Cloud Messaging (FCM) registration token. The address + // for the SMS channel is a phone number in E.164 format, such as +12065550100. + // The address for the email channel is an email address. + Address *string `type:"string"` -// GoString returns the string representation -func (s DeleteAppInput) GoString() string { - return s.String() -} + // The unique identifier for the application that's associated with the endpoint. + ApplicationId *string `type:"string"` -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAppInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAppInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } + // One or more custom attributes that describe the endpoint by associating a + // name with an array of values. For example, the value of a custom attribute + // named Interests might be: ["science", "music", "travel"]. You can use these + // attributes as filter criteria when you create segments. + Attributes map[string][]*string `type:"map"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // The channel that's used when sending messages or push notifications to the + // endpoint. + ChannelType *string `type:"string" enum:"ChannelType"` -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteAppInput) SetApplicationId(v string) *DeleteAppInput { - s.ApplicationId = &v - return s -} + // A number from 0-99 that represents the cohort that the endpoint is assigned + // to. Endpoints are grouped into cohorts randomly, and each cohort contains + // approximately 1 percent of the endpoints for an application. Amazon Pinpoint + // assigns cohorts to the holdout or treatment allocations for campaigns. + CohortId *string `type:"string"` -type DeleteAppOutput struct { - _ struct{} `type:"structure" payload:"ApplicationResponse"` + // The date and time, in ISO 8601 format, when the endpoint was created. + CreationDate *string `type:"string"` + + // The demographic information for the endpoint, such as the time zone and platform. + Demographic *EndpointDemographic `type:"structure"` + + // The date and time, in ISO 8601 format, when the endpoint was last updated. + EffectiveDate *string `type:"string"` - // Application Response. + // Specifies whether messages or push notifications are sent to the endpoint. + // Possible values are: ACTIVE, messages are sent to the endpoint; and, INACTIVE, + // messages aren’t sent to the endpoint. // - // ApplicationResponse is a required field - ApplicationResponse *ApplicationResponse `type:"structure" required:"true"` -} + // Amazon Pinpoint automatically sets this value to ACTIVE when you create an + // endpoint or update an existing endpoint. Amazon Pinpoint automatically sets + // this value to INACTIVE if you update another endpoint that has the same address + // specified by the Address property. + EndpointStatus *string `type:"string"` -// String returns the string representation -func (s DeleteAppOutput) String() string { - return awsutil.Prettify(s) -} + // The unique identifier that you assigned to the endpoint. The identifier should + // be a globally unique identifier (GUID) to ensure that it doesn't conflict + // with other endpoint identifiers that are associated with the application. + Id *string `type:"string"` -// GoString returns the string representation -func (s DeleteAppOutput) GoString() string { - return s.String() -} + // The geographic information for the endpoint. + Location *EndpointLocation `type:"structure"` -// SetApplicationResponse sets the ApplicationResponse field's value. -func (s *DeleteAppOutput) SetApplicationResponse(v *ApplicationResponse) *DeleteAppOutput { - s.ApplicationResponse = v - return s -} + // One or more custom metrics that your app reports to Amazon Pinpoint for the + // endpoint. + Metrics map[string]*float64 `type:"map"` -type DeleteBaiduChannelInput struct { - _ struct{} `type:"structure"` + // Specifies whether the user who's associated with the endpoint has opted out + // of receiving messages and push notifications from you. Possible values are: + // ALL, the user has opted out and doesn't want to receive any messages or push + // notifications; and, NONE, the user hasn't opted out and wants to receive + // all messages and push notifications. + OptOut *string `type:"string"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // The unique identifier for the most recent request to update the endpoint. + RequestId *string `type:"string"` + + // One or more custom user attributes that your app reports to Amazon Pinpoint + // for the user who's associated with the endpoint. + User *EndpointUser `type:"structure"` } // String returns the string representation -func (s DeleteBaiduChannelInput) String() string { +func (s EndpointResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBaiduChannelInput) GoString() string { +func (s EndpointResponse) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBaiduChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBaiduChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetAddress sets the Address field's value. +func (s *EndpointResponse) SetAddress(v string) *EndpointResponse { + s.Address = &v + return s } // SetApplicationId sets the ApplicationId field's value. -func (s *DeleteBaiduChannelInput) SetApplicationId(v string) *DeleteBaiduChannelInput { +func (s *EndpointResponse) SetApplicationId(v string) *EndpointResponse { s.ApplicationId = &v return s } -type DeleteBaiduChannelOutput struct { - _ struct{} `type:"structure" payload:"BaiduChannelResponse"` - - // Baidu Cloud Messaging channel definition - // - // BaiduChannelResponse is a required field - BaiduChannelResponse *BaiduChannelResponse `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DeleteBaiduChannelOutput) String() string { - return awsutil.Prettify(s) +// SetAttributes sets the Attributes field's value. +func (s *EndpointResponse) SetAttributes(v map[string][]*string) *EndpointResponse { + s.Attributes = v + return s } -// GoString returns the string representation -func (s DeleteBaiduChannelOutput) GoString() string { - return s.String() +// SetChannelType sets the ChannelType field's value. +func (s *EndpointResponse) SetChannelType(v string) *EndpointResponse { + s.ChannelType = &v + return s } -// SetBaiduChannelResponse sets the BaiduChannelResponse field's value. -func (s *DeleteBaiduChannelOutput) SetBaiduChannelResponse(v *BaiduChannelResponse) *DeleteBaiduChannelOutput { - s.BaiduChannelResponse = v +// SetCohortId sets the CohortId field's value. +func (s *EndpointResponse) SetCohortId(v string) *EndpointResponse { + s.CohortId = &v return s } -type DeleteCampaignInput struct { - _ struct{} `type:"structure"` - - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - - // CampaignId is a required field - CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` +// SetCreationDate sets the CreationDate field's value. +func (s *EndpointResponse) SetCreationDate(v string) *EndpointResponse { + s.CreationDate = &v + return s } -// String returns the string representation -func (s DeleteCampaignInput) String() string { - return awsutil.Prettify(s) +// SetDemographic sets the Demographic field's value. +func (s *EndpointResponse) SetDemographic(v *EndpointDemographic) *EndpointResponse { + s.Demographic = v + return s } -// GoString returns the string representation -func (s DeleteCampaignInput) GoString() string { - return s.String() +// SetEffectiveDate sets the EffectiveDate field's value. +func (s *EndpointResponse) SetEffectiveDate(v string) *EndpointResponse { + s.EffectiveDate = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCampaignInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCampaignInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.CampaignId == nil { - invalidParams.Add(request.NewErrParamRequired("CampaignId")) - } - if s.CampaignId != nil && len(*s.CampaignId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetEndpointStatus sets the EndpointStatus field's value. +func (s *EndpointResponse) SetEndpointStatus(v string) *EndpointResponse { + s.EndpointStatus = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteCampaignInput) SetApplicationId(v string) *DeleteCampaignInput { - s.ApplicationId = &v +// SetId sets the Id field's value. +func (s *EndpointResponse) SetId(v string) *EndpointResponse { + s.Id = &v return s } -// SetCampaignId sets the CampaignId field's value. -func (s *DeleteCampaignInput) SetCampaignId(v string) *DeleteCampaignInput { - s.CampaignId = &v +// SetLocation sets the Location field's value. +func (s *EndpointResponse) SetLocation(v *EndpointLocation) *EndpointResponse { + s.Location = v return s } -type DeleteCampaignOutput struct { - _ struct{} `type:"structure" payload:"CampaignResponse"` - - // Campaign definition - // - // CampaignResponse is a required field - CampaignResponse *CampaignResponse `type:"structure" required:"true"` +// SetMetrics sets the Metrics field's value. +func (s *EndpointResponse) SetMetrics(v map[string]*float64) *EndpointResponse { + s.Metrics = v + return s } -// String returns the string representation -func (s DeleteCampaignOutput) String() string { - return awsutil.Prettify(s) +// SetOptOut sets the OptOut field's value. +func (s *EndpointResponse) SetOptOut(v string) *EndpointResponse { + s.OptOut = &v + return s } -// GoString returns the string representation -func (s DeleteCampaignOutput) GoString() string { - return s.String() +// SetRequestId sets the RequestId field's value. +func (s *EndpointResponse) SetRequestId(v string) *EndpointResponse { + s.RequestId = &v + return s } -// SetCampaignResponse sets the CampaignResponse field's value. -func (s *DeleteCampaignOutput) SetCampaignResponse(v *CampaignResponse) *DeleteCampaignOutput { - s.CampaignResponse = v +// SetUser sets the User field's value. +func (s *EndpointResponse) SetUser(v *EndpointUser) *EndpointResponse { + s.User = v return s } -type DeleteEmailChannelInput struct { +// Specifies the content, including message variables and attributes, to use +// in a message that's sent directly to an endpoint. +type EndpointSendConfiguration struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // The body of the message. If specified, this value overrides the default message + // body. + BodyOverride *string `type:"string"` + + // A map of custom attributes to attach to the message for the address. For + // a push notification, this payload is added to the data.pinpoint object. For + // an email or text message, this payload is added to email/SMS delivery receipt + // event attributes. + Context map[string]*string `type:"map"` + + // The raw, JSON-formatted string to use as the payload for the message. If + // specified, this value overrides the message. + RawContent *string `type:"string"` + + // A map of the message variables to merge with the variables specified for + // the default message (DefaultMessage.Substitutions). The variables specified + // in this map take precedence over all other variables. + Substitutions map[string][]*string `type:"map"` + + // The title or subject line of the message. If specified, this value overrides + // the default message title or subject line. + TitleOverride *string `type:"string"` } // String returns the string representation -func (s DeleteEmailChannelInput) String() string { +func (s EndpointSendConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteEmailChannelInput) GoString() string { +func (s EndpointSendConfiguration) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteEmailChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteEmailChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteEmailChannelInput) SetApplicationId(v string) *DeleteEmailChannelInput { - s.ApplicationId = &v +// SetBodyOverride sets the BodyOverride field's value. +func (s *EndpointSendConfiguration) SetBodyOverride(v string) *EndpointSendConfiguration { + s.BodyOverride = &v return s } -type DeleteEmailChannelOutput struct { - _ struct{} `type:"structure" payload:"EmailChannelResponse"` - - // Email Channel Response. - // - // EmailChannelResponse is a required field - EmailChannelResponse *EmailChannelResponse `type:"structure" required:"true"` +// SetContext sets the Context field's value. +func (s *EndpointSendConfiguration) SetContext(v map[string]*string) *EndpointSendConfiguration { + s.Context = v + return s } -// String returns the string representation -func (s DeleteEmailChannelOutput) String() string { - return awsutil.Prettify(s) +// SetRawContent sets the RawContent field's value. +func (s *EndpointSendConfiguration) SetRawContent(v string) *EndpointSendConfiguration { + s.RawContent = &v + return s } -// GoString returns the string representation -func (s DeleteEmailChannelOutput) GoString() string { - return s.String() +// SetSubstitutions sets the Substitutions field's value. +func (s *EndpointSendConfiguration) SetSubstitutions(v map[string][]*string) *EndpointSendConfiguration { + s.Substitutions = v + return s } -// SetEmailChannelResponse sets the EmailChannelResponse field's value. -func (s *DeleteEmailChannelOutput) SetEmailChannelResponse(v *EmailChannelResponse) *DeleteEmailChannelOutput { - s.EmailChannelResponse = v +// SetTitleOverride sets the TitleOverride field's value. +func (s *EndpointSendConfiguration) SetTitleOverride(v string) *EndpointSendConfiguration { + s.TitleOverride = &v return s } -type DeleteEndpointInput struct { +// Specifies data for one or more attributes that describe the user who's associated +// with an endpoint. +type EndpointUser struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // One or more custom attributes that describe the user by associating a name + // with an array of values. For example, the value of an attribute named Interests + // might be: ["science", "music", "travel"]. You can use these attributes as + // filter criteria when you create segments. + // + // When you define the name of a custom attribute, avoid using the following + // characters: number sign (#), colon (:), question mark (?), backslash (\), + // and slash (/). The Amazon Pinpoint console can't display attribute names + // that contain these characters. This limitation doesn't apply to attribute + // values. + UserAttributes map[string][]*string `type:"map"` - // EndpointId is a required field - EndpointId *string `location:"uri" locationName:"endpoint-id" type:"string" required:"true"` + // The unique identifier for the user. + UserId *string `type:"string"` } // String returns the string representation -func (s DeleteEndpointInput) String() string { +func (s EndpointUser) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteEndpointInput) GoString() string { +func (s EndpointUser) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteEndpointInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.EndpointId == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointId")) - } - if s.EndpointId != nil && len(*s.EndpointId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EndpointId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteEndpointInput) SetApplicationId(v string) *DeleteEndpointInput { - s.ApplicationId = &v +// SetUserAttributes sets the UserAttributes field's value. +func (s *EndpointUser) SetUserAttributes(v map[string][]*string) *EndpointUser { + s.UserAttributes = v return s } -// SetEndpointId sets the EndpointId field's value. -func (s *DeleteEndpointInput) SetEndpointId(v string) *DeleteEndpointInput { - s.EndpointId = &v +// SetUserId sets the UserId field's value. +func (s *EndpointUser) SetUserId(v string) *EndpointUser { + s.UserId = &v return s } -type DeleteEndpointOutput struct { - _ struct{} `type:"structure" payload:"EndpointResponse"` +// Provides information about all the endpoints that are associated with a user +// ID. +type EndpointsResponse struct { + _ struct{} `type:"structure"` - // Endpoint response + // An array of responses, one for each endpoint that's associated with the user + // ID. // - // EndpointResponse is a required field - EndpointResponse *EndpointResponse `type:"structure" required:"true"` + // Item is a required field + Item []*EndpointResponse `type:"list" required:"true"` } // String returns the string representation -func (s DeleteEndpointOutput) String() string { +func (s EndpointsResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteEndpointOutput) GoString() string { +func (s EndpointsResponse) GoString() string { return s.String() } -// SetEndpointResponse sets the EndpointResponse field's value. -func (s *DeleteEndpointOutput) SetEndpointResponse(v *EndpointResponse) *DeleteEndpointOutput { - s.EndpointResponse = v +// SetItem sets the Item field's value. +func (s *EndpointsResponse) SetItem(v []*EndpointResponse) *EndpointsResponse { + s.Item = v return s } -type DeleteEventStreamInput struct { +// Specifies information about an event that reports data to Amazon Pinpoint. +type Event struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // The package name of the app that's recording the event. + AppPackageName *string `type:"string"` + + // The title of the app that's recording the event. + AppTitle *string `type:"string"` + + // The version number of the app that's recording the event. + AppVersionCode *string `type:"string"` + + // One or more custom attributes that are associated with the event. + Attributes map[string]*string `type:"map"` + + // The version of the SDK that's running on the client device. + ClientSdkVersion *string `type:"string"` + + // The name of the event. + // + // EventType is a required field + EventType *string `type:"string" required:"true"` + + // One or more custom metrics that are associated with the event. + Metrics map[string]*float64 `type:"map"` + + // The name of the SDK that's being used to record the event. + SdkName *string `type:"string"` + + // Information about the session in which the event occurred. + Session *Session `type:"structure"` + + // The date and time, in ISO 8601 format, when the event occurred. + // + // Timestamp is a required field + Timestamp *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteEventStreamInput) String() string { +func (s Event) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteEventStreamInput) GoString() string { +func (s Event) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteEventStreamInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteEventStreamInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) +func (s *Event) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Event"} + if s.EventType == nil { + invalidParams.Add(request.NewErrParamRequired("EventType")) } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + if s.Timestamp == nil { + invalidParams.Add(request.NewErrParamRequired("Timestamp")) + } + if s.Session != nil { + if err := s.Session.Validate(); err != nil { + invalidParams.AddNested("Session", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -11218,62 +17427,101 @@ func (s *DeleteEventStreamInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteEventStreamInput) SetApplicationId(v string) *DeleteEventStreamInput { - s.ApplicationId = &v +// SetAppPackageName sets the AppPackageName field's value. +func (s *Event) SetAppPackageName(v string) *Event { + s.AppPackageName = &v return s } -type DeleteEventStreamOutput struct { - _ struct{} `type:"structure" payload:"EventStream"` +// SetAppTitle sets the AppTitle field's value. +func (s *Event) SetAppTitle(v string) *Event { + s.AppTitle = &v + return s +} - // Model for an event publishing subscription export. - // - // EventStream is a required field - EventStream *EventStream `type:"structure" required:"true"` +// SetAppVersionCode sets the AppVersionCode field's value. +func (s *Event) SetAppVersionCode(v string) *Event { + s.AppVersionCode = &v + return s } -// String returns the string representation -func (s DeleteEventStreamOutput) String() string { - return awsutil.Prettify(s) +// SetAttributes sets the Attributes field's value. +func (s *Event) SetAttributes(v map[string]*string) *Event { + s.Attributes = v + return s } -// GoString returns the string representation -func (s DeleteEventStreamOutput) GoString() string { - return s.String() +// SetClientSdkVersion sets the ClientSdkVersion field's value. +func (s *Event) SetClientSdkVersion(v string) *Event { + s.ClientSdkVersion = &v + return s } -// SetEventStream sets the EventStream field's value. -func (s *DeleteEventStreamOutput) SetEventStream(v *EventStream) *DeleteEventStreamOutput { - s.EventStream = v +// SetEventType sets the EventType field's value. +func (s *Event) SetEventType(v string) *Event { + s.EventType = &v return s } -type DeleteGcmChannelInput struct { +// SetMetrics sets the Metrics field's value. +func (s *Event) SetMetrics(v map[string]*float64) *Event { + s.Metrics = v + return s +} + +// SetSdkName sets the SdkName field's value. +func (s *Event) SetSdkName(v string) *Event { + s.SdkName = &v + return s +} + +// SetSession sets the Session field's value. +func (s *Event) SetSession(v *Session) *Event { + s.Session = v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *Event) SetTimestamp(v string) *Event { + s.Timestamp = &v + return s +} + +// Specifies the conditions to evaluate for an event that applies to an activity +// in a journey. +type EventCondition struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // The dimensions for the event filter to use for the activity. + // + // Dimensions is a required field + Dimensions *EventDimensions `type:"structure" required:"true"` + + // The message identifier (message_id) for the message to use when determining + // whether message events meet the condition. + MessageActivity *string `type:"string"` } // String returns the string representation -func (s DeleteGcmChannelInput) String() string { +func (s EventCondition) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteGcmChannelInput) GoString() string { +func (s EventCondition) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteGcmChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteGcmChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) +func (s *EventCondition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventCondition"} + if s.Dimensions == nil { + invalidParams.Add(request.NewErrParamRequired("Dimensions")) + } + if s.Dimensions != nil { + if err := s.Dimensions.Validate(); err != nil { + invalidParams.AddNested("Dimensions", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -11282,71 +17530,77 @@ func (s *DeleteGcmChannelInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteGcmChannelInput) SetApplicationId(v string) *DeleteGcmChannelInput { - s.ApplicationId = &v +// SetDimensions sets the Dimensions field's value. +func (s *EventCondition) SetDimensions(v *EventDimensions) *EventCondition { + s.Dimensions = v return s } -type DeleteGcmChannelOutput struct { - _ struct{} `type:"structure" payload:"GCMChannelResponse"` - - // Google Cloud Messaging channel definition - // - // GCMChannelResponse is a required field - GCMChannelResponse *GCMChannelResponse `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DeleteGcmChannelOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteGcmChannelOutput) GoString() string { - return s.String() -} - -// SetGCMChannelResponse sets the GCMChannelResponse field's value. -func (s *DeleteGcmChannelOutput) SetGCMChannelResponse(v *GCMChannelResponse) *DeleteGcmChannelOutput { - s.GCMChannelResponse = v +// SetMessageActivity sets the MessageActivity field's value. +func (s *EventCondition) SetMessageActivity(v string) *EventCondition { + s.MessageActivity = &v return s } -type DeleteSegmentInput struct { +// Specifies the dimensions for an event filter that determines when a campaign +// is sent or a journey activity is performed. +type EventDimensions struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // One or more custom attributes that your application reports to Amazon Pinpoint. + // You can use these attributes as selection criteria when you create an event + // filter. + Attributes map[string]*AttributeDimension `type:"map"` - // SegmentId is a required field - SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` + // The name of the event that causes the campaign to be sent or the journey + // activity to be performed. This can be a standard type of event that Amazon + // Pinpoint generates, such as _email.delivered, or a custom event that's specific + // to your application. + EventType *SetDimension `type:"structure"` + + // One or more custom metrics that your application reports to Amazon Pinpoint. + // You can use these metrics as selection criteria when you create an event + // filter. + Metrics map[string]*MetricDimension `type:"map"` } // String returns the string representation -func (s DeleteSegmentInput) String() string { +func (s EventDimensions) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSegmentInput) GoString() string { +func (s EventDimensions) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSegmentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSegmentInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) +func (s *EventDimensions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventDimensions"} + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } } - if s.SegmentId == nil { - invalidParams.Add(request.NewErrParamRequired("SegmentId")) + if s.EventType != nil { + if err := s.EventType.Validate(); err != nil { + invalidParams.AddNested("EventType", err.(request.ErrInvalidParams)) + } } - if s.SegmentId != nil && len(*s.SegmentId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) + if s.Metrics != nil { + for i, v := range s.Metrics { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Metrics", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -11355,141 +17609,189 @@ func (s *DeleteSegmentInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteSegmentInput) SetApplicationId(v string) *DeleteSegmentInput { - s.ApplicationId = &v +// SetAttributes sets the Attributes field's value. +func (s *EventDimensions) SetAttributes(v map[string]*AttributeDimension) *EventDimensions { + s.Attributes = v return s } -// SetSegmentId sets the SegmentId field's value. -func (s *DeleteSegmentInput) SetSegmentId(v string) *DeleteSegmentInput { - s.SegmentId = &v +// SetEventType sets the EventType field's value. +func (s *EventDimensions) SetEventType(v *SetDimension) *EventDimensions { + s.EventType = v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *EventDimensions) SetMetrics(v map[string]*MetricDimension) *EventDimensions { + s.Metrics = v return s } -type DeleteSegmentOutput struct { - _ struct{} `type:"structure" payload:"SegmentResponse"` +// Provides the status code and message that result from processing an event. +type EventItemResponse struct { + _ struct{} `type:"structure"` + + // A custom message that's returned in the response as a result of processing + // the event. + Message *string `type:"string"` - // Segment definition. - // - // SegmentResponse is a required field - SegmentResponse *SegmentResponse `type:"structure" required:"true"` + // The status code that's returned in the response as a result of processing + // the event. Possible values are: 202, for events that were accepted; and, + // 400, for events that weren't valid. + StatusCode *int64 `type:"integer"` } // String returns the string representation -func (s DeleteSegmentOutput) String() string { +func (s EventItemResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSegmentOutput) GoString() string { +func (s EventItemResponse) GoString() string { return s.String() } -// SetSegmentResponse sets the SegmentResponse field's value. -func (s *DeleteSegmentOutput) SetSegmentResponse(v *SegmentResponse) *DeleteSegmentOutput { - s.SegmentResponse = v +// SetMessage sets the Message field's value. +func (s *EventItemResponse) SetMessage(v string) *EventItemResponse { + s.Message = &v return s } -type DeleteSmsChannelInput struct { +// SetStatusCode sets the StatusCode field's value. +func (s *EventItemResponse) SetStatusCode(v int64) *EventItemResponse { + s.StatusCode = &v + return s +} + +// Specifies settings for publishing event data to an Amazon Kinesis data stream +// or an Amazon Kinesis Data Firehose delivery stream. +type EventStream struct { _ struct{} `type:"structure"` + // The unique identifier for the application to publish event data for. + // // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + ApplicationId *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon + // Kinesis Data Firehose delivery stream to publish event data to. + // + // For a Kinesis data stream, the ARN format is: arn:aws:kinesis:region:account-id:stream/stream_name + // + // For a Kinesis Data Firehose delivery stream, the ARN format is: arn:aws:firehose:region:account-id:deliverystream/stream_name + // + // DestinationStreamArn is a required field + DestinationStreamArn *string `type:"string" required:"true"` + + // (Deprecated) Your AWS account ID, which you assigned to an external ID key + // in an IAM trust policy. Amazon Pinpoint previously used this value to assume + // an IAM role when publishing event data, but we removed this requirement. + // We don't recommend use of external IDs for IAM roles that are assumed by + // Amazon Pinpoint. + ExternalId *string `type:"string"` + + // The date, in ISO 8601 format, when the event stream was last modified. + LastModifiedDate *string `type:"string"` + + // The IAM user who last modified the event stream. + LastUpdatedBy *string `type:"string"` + + // The AWS Identity and Access Management (IAM) role that authorizes Amazon + // Pinpoint to publish event data to the stream in your AWS account. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteSmsChannelInput) String() string { +func (s EventStream) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSmsChannelInput) GoString() string { +func (s EventStream) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSmsChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSmsChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetApplicationId sets the ApplicationId field's value. -func (s *DeleteSmsChannelInput) SetApplicationId(v string) *DeleteSmsChannelInput { +func (s *EventStream) SetApplicationId(v string) *EventStream { s.ApplicationId = &v return s } -type DeleteSmsChannelOutput struct { - _ struct{} `type:"structure" payload:"SMSChannelResponse"` +// SetDestinationStreamArn sets the DestinationStreamArn field's value. +func (s *EventStream) SetDestinationStreamArn(v string) *EventStream { + s.DestinationStreamArn = &v + return s +} - // SMS Channel Response. - // - // SMSChannelResponse is a required field - SMSChannelResponse *SMSChannelResponse `type:"structure" required:"true"` +// SetExternalId sets the ExternalId field's value. +func (s *EventStream) SetExternalId(v string) *EventStream { + s.ExternalId = &v + return s } -// String returns the string representation -func (s DeleteSmsChannelOutput) String() string { - return awsutil.Prettify(s) +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *EventStream) SetLastModifiedDate(v string) *EventStream { + s.LastModifiedDate = &v + return s } -// GoString returns the string representation -func (s DeleteSmsChannelOutput) GoString() string { - return s.String() +// SetLastUpdatedBy sets the LastUpdatedBy field's value. +func (s *EventStream) SetLastUpdatedBy(v string) *EventStream { + s.LastUpdatedBy = &v + return s } -// SetSMSChannelResponse sets the SMSChannelResponse field's value. -func (s *DeleteSmsChannelOutput) SetSMSChannelResponse(v *SMSChannelResponse) *DeleteSmsChannelOutput { - s.SMSChannelResponse = v +// SetRoleArn sets the RoleArn field's value. +func (s *EventStream) SetRoleArn(v string) *EventStream { + s.RoleArn = &v return s } -type DeleteUserEndpointsInput struct { +// Specifies a batch of endpoints and events to process. +type EventsBatch struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // A set of properties and attributes that are associated with the endpoint. + // + // Endpoint is a required field + Endpoint *PublicEndpoint `type:"structure" required:"true"` - // UserId is a required field - UserId *string `location:"uri" locationName:"user-id" type:"string" required:"true"` + // A set of properties that are associated with the event. + // + // Events is a required field + Events map[string]*Event `type:"map" required:"true"` } // String returns the string representation -func (s DeleteUserEndpointsInput) String() string { +func (s EventsBatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteUserEndpointsInput) GoString() string { +func (s EventsBatch) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteUserEndpointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteUserEndpointsInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.UserId == nil { - invalidParams.Add(request.NewErrParamRequired("UserId")) - } - if s.UserId != nil && len(*s.UserId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserId", 1)) +func (s *EventsBatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventsBatch"} + if s.Endpoint == nil { + invalidParams.Add(request.NewErrParamRequired("Endpoint")) + } + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.Events != nil { + for i, v := range s.Events { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Events", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -11498,68 +17800,54 @@ func (s *DeleteUserEndpointsInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteUserEndpointsInput) SetApplicationId(v string) *DeleteUserEndpointsInput { - s.ApplicationId = &v - return s -} - -// SetUserId sets the UserId field's value. -func (s *DeleteUserEndpointsInput) SetUserId(v string) *DeleteUserEndpointsInput { - s.UserId = &v +// SetEndpoint sets the Endpoint field's value. +func (s *EventsBatch) SetEndpoint(v *PublicEndpoint) *EventsBatch { + s.Endpoint = v return s } -type DeleteUserEndpointsOutput struct { - _ struct{} `type:"structure" payload:"EndpointsResponse"` - - // List of endpoints - // - // EndpointsResponse is a required field - EndpointsResponse *EndpointsResponse `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DeleteUserEndpointsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteUserEndpointsOutput) GoString() string { - return s.String() -} - -// SetEndpointsResponse sets the EndpointsResponse field's value. -func (s *DeleteUserEndpointsOutput) SetEndpointsResponse(v *EndpointsResponse) *DeleteUserEndpointsOutput { - s.EndpointsResponse = v +// SetEvents sets the Events field's value. +func (s *EventsBatch) SetEvents(v map[string]*Event) *EventsBatch { + s.Events = v return s } -type DeleteVoiceChannelInput struct { +// Specifies a batch of events to process. +type EventsRequest struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // The batch of events to process. For each item in a batch, the endpoint ID + // acts as a key that has an EventsBatch object as its value. + // + // BatchItem is a required field + BatchItem map[string]*EventsBatch `type:"map" required:"true"` } // String returns the string representation -func (s DeleteVoiceChannelInput) String() string { +func (s EventsRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteVoiceChannelInput) GoString() string { +func (s EventsRequest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteVoiceChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteVoiceChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) +func (s *EventsRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventsRequest"} + if s.BatchItem == nil { + invalidParams.Add(request.NewErrParamRequired("BatchItem")) + } + if s.BatchItem != nil { + for i, v := range s.BatchItem { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "BatchItem", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -11568,2332 +17856,2143 @@ func (s *DeleteVoiceChannelInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *DeleteVoiceChannelInput) SetApplicationId(v string) *DeleteVoiceChannelInput { - s.ApplicationId = &v +// SetBatchItem sets the BatchItem field's value. +func (s *EventsRequest) SetBatchItem(v map[string]*EventsBatch) *EventsRequest { + s.BatchItem = v return s } -type DeleteVoiceChannelOutput struct { - _ struct{} `type:"structure" payload:"VoiceChannelResponse"` +// Provides information about endpoints and the events that they're associated +// with. +type EventsResponse struct { + _ struct{} `type:"structure"` - // Voice Channel Response. - // - // VoiceChannelResponse is a required field - VoiceChannelResponse *VoiceChannelResponse `type:"structure" required:"true"` + // A map that contains a multipart response for each endpoint. For each item + // in this object, the endpoint ID is the key and the item response is the value. + // If no item response exists, the value can also be one of the following: 202, + // the request was processed successfully; or 400, the payload wasn't valid + // or required fields were missing. + Results map[string]*ItemResponse `type:"map"` } // String returns the string representation -func (s DeleteVoiceChannelOutput) String() string { +func (s EventsResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteVoiceChannelOutput) GoString() string { +func (s EventsResponse) GoString() string { return s.String() } -// SetVoiceChannelResponse sets the VoiceChannelResponse field's value. -func (s *DeleteVoiceChannelOutput) SetVoiceChannelResponse(v *VoiceChannelResponse) *DeleteVoiceChannelOutput { - s.VoiceChannelResponse = v +// SetResults sets the Results field's value. +func (s *EventsResponse) SetResults(v map[string]*ItemResponse) *EventsResponse { + s.Results = v return s } -// Message definitions for the default message and any messages that are tailored -// for specific channels. -type DirectMessageConfiguration struct { +// Specifies the settings for a job that exports endpoint definitions to an +// Amazon Simple Storage Service (Amazon S3) bucket. +type ExportJobRequest struct { _ struct{} `type:"structure"` - // The message to ADM channels. Overrides the default push notification message. - ADMMessage *ADMMessage `type:"structure"` - - // The message to APNS channels. Overrides the default push notification message. - APNSMessage *APNSMessage `type:"structure"` - - // The message to Baidu GCM channels. Overrides the default push notification - // message. - BaiduMessage *BaiduMessage `type:"structure"` - - // The default message for all channels. - DefaultMessage *DefaultMessage `type:"structure"` - - // The default push notification message for all push channels. - DefaultPushNotificationMessage *DefaultPushNotificationMessage `type:"structure"` - - // The message to Email channels. Overrides the default message. - EmailMessage *EmailMessage `type:"structure"` + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that authorizes Amazon Pinpoint to access the Amazon S3 location + // where you want to export endpoint definitions to. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` - // The message to GCM channels. Overrides the default push notification message. - GCMMessage *GCMMessage `type:"structure"` + // The URL of the location in an Amazon Simple Storage Service (Amazon S3) bucket + // where you want to export endpoint definitions to. This location is typically + // a folder that contains multiple files. The URL should be in the following + // format: s3://bucket-name/folder-name/. + // + // S3UrlPrefix is a required field + S3UrlPrefix *string `type:"string" required:"true"` - // The message to SMS channels. Overrides the default message. - SMSMessage *SMSMessage `type:"structure"` + // The identifier for the segment to export endpoint definitions from. If you + // don't specify this value, Amazon Pinpoint exports definitions for all the + // endpoints that are associated with the application. + SegmentId *string `type:"string"` - // The message to Voice channels. Overrides the default message. - VoiceMessage *VoiceMessage `type:"structure"` + // The version of the segment to export endpoint definitions from, if specified. + SegmentVersion *int64 `type:"integer"` } // String returns the string representation -func (s DirectMessageConfiguration) String() string { +func (s ExportJobRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DirectMessageConfiguration) GoString() string { +func (s ExportJobRequest) GoString() string { return s.String() } -// SetADMMessage sets the ADMMessage field's value. -func (s *DirectMessageConfiguration) SetADMMessage(v *ADMMessage) *DirectMessageConfiguration { - s.ADMMessage = v - return s -} - -// SetAPNSMessage sets the APNSMessage field's value. -func (s *DirectMessageConfiguration) SetAPNSMessage(v *APNSMessage) *DirectMessageConfiguration { - s.APNSMessage = v - return s -} - -// SetBaiduMessage sets the BaiduMessage field's value. -func (s *DirectMessageConfiguration) SetBaiduMessage(v *BaiduMessage) *DirectMessageConfiguration { - s.BaiduMessage = v - return s -} - -// SetDefaultMessage sets the DefaultMessage field's value. -func (s *DirectMessageConfiguration) SetDefaultMessage(v *DefaultMessage) *DirectMessageConfiguration { - s.DefaultMessage = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportJobRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportJobRequest"} + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.S3UrlPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("S3UrlPrefix")) + } -// SetDefaultPushNotificationMessage sets the DefaultPushNotificationMessage field's value. -func (s *DirectMessageConfiguration) SetDefaultPushNotificationMessage(v *DefaultPushNotificationMessage) *DirectMessageConfiguration { - s.DefaultPushNotificationMessage = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetEmailMessage sets the EmailMessage field's value. -func (s *DirectMessageConfiguration) SetEmailMessage(v *EmailMessage) *DirectMessageConfiguration { - s.EmailMessage = v +// SetRoleArn sets the RoleArn field's value. +func (s *ExportJobRequest) SetRoleArn(v string) *ExportJobRequest { + s.RoleArn = &v return s } -// SetGCMMessage sets the GCMMessage field's value. -func (s *DirectMessageConfiguration) SetGCMMessage(v *GCMMessage) *DirectMessageConfiguration { - s.GCMMessage = v +// SetS3UrlPrefix sets the S3UrlPrefix field's value. +func (s *ExportJobRequest) SetS3UrlPrefix(v string) *ExportJobRequest { + s.S3UrlPrefix = &v return s } -// SetSMSMessage sets the SMSMessage field's value. -func (s *DirectMessageConfiguration) SetSMSMessage(v *SMSMessage) *DirectMessageConfiguration { - s.SMSMessage = v +// SetSegmentId sets the SegmentId field's value. +func (s *ExportJobRequest) SetSegmentId(v string) *ExportJobRequest { + s.SegmentId = &v return s } -// SetVoiceMessage sets the VoiceMessage field's value. -func (s *DirectMessageConfiguration) SetVoiceMessage(v *VoiceMessage) *DirectMessageConfiguration { - s.VoiceMessage = v +// SetSegmentVersion sets the SegmentVersion field's value. +func (s *ExportJobRequest) SetSegmentVersion(v int64) *ExportJobRequest { + s.SegmentVersion = &v return s } -// Email Channel Request -type EmailChannelRequest struct { +// Provides information about the resource settings for a job that exports endpoint +// definitions to a file. The file can be added directly to an Amazon Simple +// Storage Service (Amazon S3) bucket by using the Amazon Pinpoint API or downloaded +// directly to a computer by using the Amazon Pinpoint console. +type ExportJobResource struct { _ struct{} `type:"structure"` - // The configuration set that you want to use when you send email using the - // Pinpoint Email API. - ConfigurationSet *string `type:"string"` - - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that authorized Amazon Pinpoint to access the Amazon S3 location + // where the endpoint definitions were exported to. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` - // The email address used to send emails from. - FromAddress *string `type:"string"` + // The URL of the location in an Amazon Simple Storage Service (Amazon S3) bucket + // where the endpoint definitions were exported to. This location is typically + // a folder that contains multiple files. The URL should be in the following + // format: s3://bucket-name/folder-name/. + // + // S3UrlPrefix is a required field + S3UrlPrefix *string `type:"string" required:"true"` - // The ARN of an identity verified with SES. - Identity *string `type:"string"` + // The identifier for the segment that the endpoint definitions were exported + // from. If this value isn't present, Amazon Pinpoint exported definitions for + // all the endpoints that are associated with the application. + SegmentId *string `type:"string"` - // The ARN of an IAM Role used to submit events to Mobile Analytics' event ingestion - // service - RoleArn *string `type:"string"` + // The version of the segment that the endpoint definitions were exported from. + SegmentVersion *int64 `type:"integer"` } // String returns the string representation -func (s EmailChannelRequest) String() string { +func (s ExportJobResource) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EmailChannelRequest) GoString() string { +func (s ExportJobResource) GoString() string { return s.String() } -// SetConfigurationSet sets the ConfigurationSet field's value. -func (s *EmailChannelRequest) SetConfigurationSet(v string) *EmailChannelRequest { - s.ConfigurationSet = &v - return s -} - -// SetEnabled sets the Enabled field's value. -func (s *EmailChannelRequest) SetEnabled(v bool) *EmailChannelRequest { - s.Enabled = &v +// SetRoleArn sets the RoleArn field's value. +func (s *ExportJobResource) SetRoleArn(v string) *ExportJobResource { + s.RoleArn = &v return s } -// SetFromAddress sets the FromAddress field's value. -func (s *EmailChannelRequest) SetFromAddress(v string) *EmailChannelRequest { - s.FromAddress = &v +// SetS3UrlPrefix sets the S3UrlPrefix field's value. +func (s *ExportJobResource) SetS3UrlPrefix(v string) *ExportJobResource { + s.S3UrlPrefix = &v return s } -// SetIdentity sets the Identity field's value. -func (s *EmailChannelRequest) SetIdentity(v string) *EmailChannelRequest { - s.Identity = &v +// SetSegmentId sets the SegmentId field's value. +func (s *ExportJobResource) SetSegmentId(v string) *ExportJobResource { + s.SegmentId = &v return s } -// SetRoleArn sets the RoleArn field's value. -func (s *EmailChannelRequest) SetRoleArn(v string) *EmailChannelRequest { - s.RoleArn = &v +// SetSegmentVersion sets the SegmentVersion field's value. +func (s *ExportJobResource) SetSegmentVersion(v int64) *ExportJobResource { + s.SegmentVersion = &v return s } -// Email Channel Response. -type EmailChannelResponse struct { +// Provides information about the status and settings of a job that exports +// endpoint definitions to a file. The file can be added directly to an Amazon +// Simple Storage Service (Amazon S3) bucket by using the Amazon Pinpoint API +// or downloaded directly to a computer by using the Amazon Pinpoint console. +type ExportJobResponse struct { _ struct{} `type:"structure"` - // The unique ID of the application to which the email channel belongs. - ApplicationId *string `type:"string"` - - // The configuration set that you want to use when you send email using the - // Pinpoint Email API. - ConfigurationSet *string `type:"string"` - - // The date that the settings were last updated in ISO 8601 format. - CreationDate *string `type:"string"` + // The unique identifier for the application that's associated with the export + // job. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` + // The number of pieces that were processed successfully (completed) by the + // export job, as of the time of the request. + CompletedPieces *int64 `type:"integer"` - // The email address used to send emails from. - FromAddress *string `type:"string"` + // The date, in ISO 8601 format, when the export job was completed. + CompletionDate *string `type:"string"` - // Not used. Retained for backwards compatibility. - HasCredential *bool `type:"boolean"` + // The date, in ISO 8601 format, when the export job was created. + // + // CreationDate is a required field + CreationDate *string `type:"string" required:"true"` - // Channel ID. Not used, only for backwards compatibility. - Id *string `type:"string"` + // The resource settings that apply to the export job. + // + // Definition is a required field + Definition *ExportJobResource `type:"structure" required:"true"` - // The ARN of an identity verified with SES. - Identity *string `type:"string"` + // The number of pieces that weren't processed successfully (failed) by the + // export job, as of the time of the request. + FailedPieces *int64 `type:"integer"` - // Is this channel archived - IsArchived *bool `type:"boolean"` + // An array of entries, one for each of the first 100 entries that weren't processed + // successfully (failed) by the export job, if any. + Failures []*string `type:"list"` - // Who last updated this entry - LastModifiedBy *string `type:"string"` + // The unique identifier for the export job. + // + // Id is a required field + Id *string `type:"string" required:"true"` - // Last date this was updated - LastModifiedDate *string `type:"string"` + // The status of the export job. The job status is FAILED if Amazon Pinpoint + // wasn't able to process one or more pieces in the job. + // + // JobStatus is a required field + JobStatus *string `type:"string" required:"true" enum:"JobStatus"` - // Messages per second that can be sent - MessagesPerSecond *int64 `type:"integer"` + // The total number of endpoint definitions that weren't processed successfully + // (failed) by the export job, typically because an error, such as a syntax + // error, occurred. + TotalFailures *int64 `type:"integer"` - // Platform type. Will be "EMAIL" - Platform *string `type:"string"` + // The total number of pieces that must be processed to complete the export + // job. Each piece consists of an approximately equal portion of the endpoint + // definitions that are part of the export job. + TotalPieces *int64 `type:"integer"` - // The ARN of an IAM Role used to submit events to Mobile Analytics' event ingestion - // service - RoleArn *string `type:"string"` + // The total number of endpoint definitions that were processed by the export + // job. + TotalProcessed *int64 `type:"integer"` - // Version of channel - Version *int64 `type:"integer"` + // The job type. This value is EXPORT for export jobs. + // + // Type is a required field + Type *string `type:"string" required:"true"` } // String returns the string representation -func (s EmailChannelResponse) String() string { +func (s ExportJobResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EmailChannelResponse) GoString() string { +func (s ExportJobResponse) GoString() string { return s.String() } // SetApplicationId sets the ApplicationId field's value. -func (s *EmailChannelResponse) SetApplicationId(v string) *EmailChannelResponse { +func (s *ExportJobResponse) SetApplicationId(v string) *ExportJobResponse { s.ApplicationId = &v return s } -// SetConfigurationSet sets the ConfigurationSet field's value. -func (s *EmailChannelResponse) SetConfigurationSet(v string) *EmailChannelResponse { - s.ConfigurationSet = &v +// SetCompletedPieces sets the CompletedPieces field's value. +func (s *ExportJobResponse) SetCompletedPieces(v int64) *ExportJobResponse { + s.CompletedPieces = &v + return s +} + +// SetCompletionDate sets the CompletionDate field's value. +func (s *ExportJobResponse) SetCompletionDate(v string) *ExportJobResponse { + s.CompletionDate = &v return s } // SetCreationDate sets the CreationDate field's value. -func (s *EmailChannelResponse) SetCreationDate(v string) *EmailChannelResponse { +func (s *ExportJobResponse) SetCreationDate(v string) *ExportJobResponse { s.CreationDate = &v return s } -// SetEnabled sets the Enabled field's value. -func (s *EmailChannelResponse) SetEnabled(v bool) *EmailChannelResponse { - s.Enabled = &v +// SetDefinition sets the Definition field's value. +func (s *ExportJobResponse) SetDefinition(v *ExportJobResource) *ExportJobResponse { + s.Definition = v return s } -// SetFromAddress sets the FromAddress field's value. -func (s *EmailChannelResponse) SetFromAddress(v string) *EmailChannelResponse { - s.FromAddress = &v +// SetFailedPieces sets the FailedPieces field's value. +func (s *ExportJobResponse) SetFailedPieces(v int64) *ExportJobResponse { + s.FailedPieces = &v return s } -// SetHasCredential sets the HasCredential field's value. -func (s *EmailChannelResponse) SetHasCredential(v bool) *EmailChannelResponse { - s.HasCredential = &v +// SetFailures sets the Failures field's value. +func (s *ExportJobResponse) SetFailures(v []*string) *ExportJobResponse { + s.Failures = v return s } // SetId sets the Id field's value. -func (s *EmailChannelResponse) SetId(v string) *EmailChannelResponse { +func (s *ExportJobResponse) SetId(v string) *ExportJobResponse { s.Id = &v return s } -// SetIdentity sets the Identity field's value. -func (s *EmailChannelResponse) SetIdentity(v string) *EmailChannelResponse { - s.Identity = &v - return s -} - -// SetIsArchived sets the IsArchived field's value. -func (s *EmailChannelResponse) SetIsArchived(v bool) *EmailChannelResponse { - s.IsArchived = &v - return s -} - -// SetLastModifiedBy sets the LastModifiedBy field's value. -func (s *EmailChannelResponse) SetLastModifiedBy(v string) *EmailChannelResponse { - s.LastModifiedBy = &v - return s -} - -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *EmailChannelResponse) SetLastModifiedDate(v string) *EmailChannelResponse { - s.LastModifiedDate = &v +// SetJobStatus sets the JobStatus field's value. +func (s *ExportJobResponse) SetJobStatus(v string) *ExportJobResponse { + s.JobStatus = &v return s } -// SetMessagesPerSecond sets the MessagesPerSecond field's value. -func (s *EmailChannelResponse) SetMessagesPerSecond(v int64) *EmailChannelResponse { - s.MessagesPerSecond = &v +// SetTotalFailures sets the TotalFailures field's value. +func (s *ExportJobResponse) SetTotalFailures(v int64) *ExportJobResponse { + s.TotalFailures = &v return s } -// SetPlatform sets the Platform field's value. -func (s *EmailChannelResponse) SetPlatform(v string) *EmailChannelResponse { - s.Platform = &v +// SetTotalPieces sets the TotalPieces field's value. +func (s *ExportJobResponse) SetTotalPieces(v int64) *ExportJobResponse { + s.TotalPieces = &v return s } -// SetRoleArn sets the RoleArn field's value. -func (s *EmailChannelResponse) SetRoleArn(v string) *EmailChannelResponse { - s.RoleArn = &v +// SetTotalProcessed sets the TotalProcessed field's value. +func (s *ExportJobResponse) SetTotalProcessed(v int64) *ExportJobResponse { + s.TotalProcessed = &v return s } -// SetVersion sets the Version field's value. -func (s *EmailChannelResponse) SetVersion(v int64) *EmailChannelResponse { - s.Version = &v +// SetType sets the Type field's value. +func (s *ExportJobResponse) SetType(v string) *ExportJobResponse { + s.Type = &v return s } -// Email Message. -type EmailMessage struct { +// Provides information about all the export jobs that are associated with an +// application or segment. An export job is a job that exports endpoint definitions +// to a file. +type ExportJobsResponse struct { _ struct{} `type:"structure"` - // The body of the email message. - Body *string `type:"string"` - - // The email address that bounces and complaints will be forwarded to when feedback - // forwarding is enabled. - FeedbackForwardingAddress *string `type:"string"` - - // The email address used to send the email from. Defaults to use FromAddress - // specified in the Email Channel. - FromAddress *string `type:"string"` - - // An email represented as a raw MIME message. - RawEmail *RawEmail `type:"structure"` - - // The reply-to email address(es) for the email. If the recipient replies to - // the email, each reply-to address will receive the reply. - ReplyToAddresses []*string `type:"list"` - - // An email composed of a subject, a text part and a html part. - SimpleEmail *SimpleEmail `type:"structure"` + // An array of responses, one for each export job that's associated with the + // application (Export Jobs resource) or segment (Segment Export Jobs resource). + // + // Item is a required field + Item []*ExportJobResponse `type:"list" required:"true"` - // Default message substitutions. Can be overridden by individual address substitutions. - Substitutions map[string][]*string `type:"map"` + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null if there are no additional pages. + NextToken *string `type:"string"` } // String returns the string representation -func (s EmailMessage) String() string { +func (s ExportJobsResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EmailMessage) GoString() string { +func (s ExportJobsResponse) GoString() string { return s.String() } -// SetBody sets the Body field's value. -func (s *EmailMessage) SetBody(v string) *EmailMessage { - s.Body = &v +// SetItem sets the Item field's value. +func (s *ExportJobsResponse) SetItem(v []*ExportJobResponse) *ExportJobsResponse { + s.Item = v return s } -// SetFeedbackForwardingAddress sets the FeedbackForwardingAddress field's value. -func (s *EmailMessage) SetFeedbackForwardingAddress(v string) *EmailMessage { - s.FeedbackForwardingAddress = &v +// SetNextToken sets the NextToken field's value. +func (s *ExportJobsResponse) SetNextToken(v string) *ExportJobsResponse { + s.NextToken = &v return s } -// SetFromAddress sets the FromAddress field's value. -func (s *EmailMessage) SetFromAddress(v string) *EmailMessage { - s.FromAddress = &v - return s +// Specifies the status and settings of the GCM channel for an application. +// This channel enables Amazon Pinpoint to send push notifications through the +// Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service. +type GCMChannelRequest struct { + _ struct{} `type:"structure"` + + // The Web API Key, also referred to as an API_KEY or server key, that you received + // from Google to communicate with Google services. + // + // ApiKey is a required field + ApiKey *string `type:"string" required:"true"` + + // Specifies whether to enable the GCM channel for the application. + Enabled *bool `type:"boolean"` } -// SetRawEmail sets the RawEmail field's value. -func (s *EmailMessage) SetRawEmail(v *RawEmail) *EmailMessage { - s.RawEmail = v - return s +// String returns the string representation +func (s GCMChannelRequest) String() string { + return awsutil.Prettify(s) } -// SetReplyToAddresses sets the ReplyToAddresses field's value. -func (s *EmailMessage) SetReplyToAddresses(v []*string) *EmailMessage { - s.ReplyToAddresses = v - return s +// GoString returns the string representation +func (s GCMChannelRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GCMChannelRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GCMChannelRequest"} + if s.ApiKey == nil { + invalidParams.Add(request.NewErrParamRequired("ApiKey")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSimpleEmail sets the SimpleEmail field's value. -func (s *EmailMessage) SetSimpleEmail(v *SimpleEmail) *EmailMessage { - s.SimpleEmail = v +// SetApiKey sets the ApiKey field's value. +func (s *GCMChannelRequest) SetApiKey(v string) *GCMChannelRequest { + s.ApiKey = &v return s } -// SetSubstitutions sets the Substitutions field's value. -func (s *EmailMessage) SetSubstitutions(v map[string][]*string) *EmailMessage { - s.Substitutions = v +// SetEnabled sets the Enabled field's value. +func (s *GCMChannelRequest) SetEnabled(v bool) *GCMChannelRequest { + s.Enabled = &v return s } -// Endpoint update request -type EndpointBatchItem struct { +// Provides information about the status and settings of the GCM channel for +// an application. The GCM channel enables Amazon Pinpoint to send push notifications +// through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging +// (GCM), service. +type GCMChannelResponse struct { _ struct{} `type:"structure"` - // The destination for messages that you send to this endpoint. The address - // varies by channel. For mobile push channels, use the token provided by the - // push notification service, such as the APNs device token or the FCM registration - // token. For the SMS channel, use a phone number in E.164 format, such as +12065550100. - // For the email channel, use an email address. - Address *string `type:"string"` - - // Custom attributes that describe the endpoint by associating a name with an - // array of values. For example, an attribute named "interests" might have the - // values ["science", "politics", "travel"]. You can use these attributes as - // selection criteria when you create a segment of users to engage with a messaging - // campaign.The following characters are not recommended in attribute names: - // # : ? \ /. The Amazon Pinpoint console does not display attributes that include - // these characters in the name. This limitation does not apply to attribute - // values. - Attributes map[string][]*string `type:"map"` + // The unique identifier for the application that the GCM channel applies to. + ApplicationId *string `type:"string"` - // The channel type.Valid values: GCM | APNS | APNS_SANDBOX | APNS_VOIP | APNS_VOIP_SANDBOX - // | ADM | SMS | EMAIL | BAIDU - ChannelType *string `type:"string" enum:"ChannelType"` + // The date and time when the GCM channel was enabled. + CreationDate *string `type:"string"` - // The endpoint demographic attributes. - Demographic *EndpointDemographic `type:"structure"` + // The Web API Key, also referred to as an API_KEY or server key, that you received + // from Google to communicate with Google services. + // + // Credential is a required field + Credential *string `type:"string" required:"true"` - // The last time the endpoint was updated. Provided in ISO 8601 format. - EffectiveDate *string `type:"string"` + // Specifies whether the GCM channel is enabled for the application. + Enabled *bool `type:"boolean"` - // Unused. - EndpointStatus *string `type:"string"` + // (Not used) This property is retained only for backward compatibility. + HasCredential *bool `type:"boolean"` - // The unique Id for the Endpoint in the batch. + // (Deprecated) An identifier for the GCM channel. This property is retained + // only for backward compatibility. Id *string `type:"string"` - // The endpoint location attributes. - Location *EndpointLocation `type:"structure"` + // Specifies whether the GCM channel is archived. + IsArchived *bool `type:"boolean"` - // Custom metrics that your app reports to Amazon Pinpoint. - Metrics map[string]*float64 `type:"map"` + // The user who last modified the GCM channel. + LastModifiedBy *string `type:"string"` - // Indicates whether a user has opted out of receiving messages with one of - // the following values:ALL - User has opted out of all messages.NONE - Users - // has not opted out and receives all messages. - OptOut *string `type:"string"` + // The date and time when the GCM channel was last modified. + LastModifiedDate *string `type:"string"` - // The unique ID for the most recent request to update the endpoint. - RequestId *string `type:"string"` + // The type of messaging or notification platform for the channel. For the GCM + // channel, this value is GCM. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` - // Custom user-specific attributes that your app reports to Amazon Pinpoint. - User *EndpointUser `type:"structure"` + // The current version of the GCM channel. + Version *int64 `type:"integer"` } // String returns the string representation -func (s EndpointBatchItem) String() string { +func (s GCMChannelResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EndpointBatchItem) GoString() string { +func (s GCMChannelResponse) GoString() string { return s.String() } -// SetAddress sets the Address field's value. -func (s *EndpointBatchItem) SetAddress(v string) *EndpointBatchItem { - s.Address = &v - return s -} - -// SetAttributes sets the Attributes field's value. -func (s *EndpointBatchItem) SetAttributes(v map[string][]*string) *EndpointBatchItem { - s.Attributes = v +// SetApplicationId sets the ApplicationId field's value. +func (s *GCMChannelResponse) SetApplicationId(v string) *GCMChannelResponse { + s.ApplicationId = &v return s } -// SetChannelType sets the ChannelType field's value. -func (s *EndpointBatchItem) SetChannelType(v string) *EndpointBatchItem { - s.ChannelType = &v +// SetCreationDate sets the CreationDate field's value. +func (s *GCMChannelResponse) SetCreationDate(v string) *GCMChannelResponse { + s.CreationDate = &v return s } -// SetDemographic sets the Demographic field's value. -func (s *EndpointBatchItem) SetDemographic(v *EndpointDemographic) *EndpointBatchItem { - s.Demographic = v +// SetCredential sets the Credential field's value. +func (s *GCMChannelResponse) SetCredential(v string) *GCMChannelResponse { + s.Credential = &v return s } -// SetEffectiveDate sets the EffectiveDate field's value. -func (s *EndpointBatchItem) SetEffectiveDate(v string) *EndpointBatchItem { - s.EffectiveDate = &v +// SetEnabled sets the Enabled field's value. +func (s *GCMChannelResponse) SetEnabled(v bool) *GCMChannelResponse { + s.Enabled = &v return s } -// SetEndpointStatus sets the EndpointStatus field's value. -func (s *EndpointBatchItem) SetEndpointStatus(v string) *EndpointBatchItem { - s.EndpointStatus = &v +// SetHasCredential sets the HasCredential field's value. +func (s *GCMChannelResponse) SetHasCredential(v bool) *GCMChannelResponse { + s.HasCredential = &v return s } // SetId sets the Id field's value. -func (s *EndpointBatchItem) SetId(v string) *EndpointBatchItem { +func (s *GCMChannelResponse) SetId(v string) *GCMChannelResponse { s.Id = &v return s } -// SetLocation sets the Location field's value. -func (s *EndpointBatchItem) SetLocation(v *EndpointLocation) *EndpointBatchItem { - s.Location = v +// SetIsArchived sets the IsArchived field's value. +func (s *GCMChannelResponse) SetIsArchived(v bool) *GCMChannelResponse { + s.IsArchived = &v return s } -// SetMetrics sets the Metrics field's value. -func (s *EndpointBatchItem) SetMetrics(v map[string]*float64) *EndpointBatchItem { - s.Metrics = v +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *GCMChannelResponse) SetLastModifiedBy(v string) *GCMChannelResponse { + s.LastModifiedBy = &v return s } -// SetOptOut sets the OptOut field's value. -func (s *EndpointBatchItem) SetOptOut(v string) *EndpointBatchItem { - s.OptOut = &v +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *GCMChannelResponse) SetLastModifiedDate(v string) *GCMChannelResponse { + s.LastModifiedDate = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *EndpointBatchItem) SetRequestId(v string) *EndpointBatchItem { - s.RequestId = &v +// SetPlatform sets the Platform field's value. +func (s *GCMChannelResponse) SetPlatform(v string) *GCMChannelResponse { + s.Platform = &v return s } -// SetUser sets the User field's value. -func (s *EndpointBatchItem) SetUser(v *EndpointUser) *EndpointBatchItem { - s.User = v +// SetVersion sets the Version field's value. +func (s *GCMChannelResponse) SetVersion(v int64) *GCMChannelResponse { + s.Version = &v return s } -// Endpoint batch update request. -type EndpointBatchRequest struct { +// Specifies the settings for a one-time message that's sent directly to an +// endpoint through the GCM channel. The GCM channel enables Amazon Pinpoint +// to send messages to the Firebase Cloud Messaging (FCM), formerly Google Cloud +// Messaging (GCM), service. +type GCMMessage struct { _ struct{} `type:"structure"` - // List of items to update. Maximum 100 items - Item []*EndpointBatchItem `type:"list"` -} + // The action to occur if the recipient taps the push notification. Valid values + // are: + // + // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // sent to the background. This is the default action. + // + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This action uses the deep-linking features of the Android + // platform. + // + // * URL - The default mobile browser on the recipient's device opens and + // loads the web page at a URL that you specify. + Action *string `type:"string" enum:"Action"` -// String returns the string representation -func (s EndpointBatchRequest) String() string { - return awsutil.Prettify(s) -} + // The body of the notification message. + Body *string `type:"string"` -// GoString returns the string representation -func (s EndpointBatchRequest) GoString() string { - return s.String() -} + // An arbitrary string that identifies a group of messages that can be collapsed + // to ensure that only the last message is sent when delivery can resume. This + // helps avoid sending too many instances of the same messages when the recipient's + // device comes online again or becomes active. + // + // Amazon Pinpoint specifies this value in the Firebase Cloud Messaging (FCM) + // collapse_key parameter when it sends the notification message to FCM. + CollapseKey *string `type:"string"` -// SetItem sets the Item field's value. -func (s *EndpointBatchRequest) SetItem(v []*EndpointBatchItem) *EndpointBatchRequest { - s.Item = v - return s -} + // The JSON data payload to use for the push notification, if the notification + // is a silent push notification. This payload is added to the data.pinpoint.jsonBody + // object of the notification. + Data map[string]*string `type:"map"` -// Demographic information about the endpoint. -type EndpointDemographic struct { - _ struct{} `type:"structure"` + // The icon image name of the asset saved in your app. + IconReference *string `type:"string"` - // The version of the application associated with the endpoint. - AppVersion *string `type:"string"` + // The URL of the large icon image to display in the content view of the push + // notification. + ImageIconUrl *string `type:"string"` - // The endpoint locale in the following format: The ISO 639-1 alpha-2 code, - // followed by an underscore, followed by an ISO 3166-1 alpha-2 value. - Locale *string `type:"string"` + // The URL of an image to display in the push notification. + ImageUrl *string `type:"string"` - // The manufacturer of the endpoint device, such as Apple or Samsung. - Make *string `type:"string"` + // para>normal - The notification might be delayed. Delivery is optimized for + // battery usage on the recipient's device. Use this value unless immediate + // delivery is required. + // /listitem> + // high - The notification is sent immediately and might wake a sleeping device. + // /para> + // Amazon Pinpoint specifies this value in the FCM priority parameter when it + // sends the notification message to FCM. + // + // The equivalent values for Apple Push Notification service (APNs) are 5, for + // normal, and 10, for high. If you specify an APNs value for this property, + // Amazon Pinpoint accepts and converts the value to the corresponding FCM value. + Priority *string `type:"string"` - // The model name or number of the endpoint device, such as iPhone. - Model *string `type:"string"` + // The raw, JSON-formatted string to use as the payload for the notification + // message. This value overrides the message. + RawContent *string `type:"string"` - // The model version of the endpoint device. - ModelVersion *string `type:"string"` + // The package name of the application where registration tokens must match + // in order for the recipient to receive the message. + RestrictedPackageName *string `type:"string"` - // The platform of the endpoint device, such as iOS or Android. - Platform *string `type:"string"` + // Specifies whether the notification is a silent push notification, which is + // a push notification that doesn't display on a recipient's device. Silent + // push notifications can be used for cases such as updating an app's configuration + // or supporting phone home functionality. + SilentPush *bool `type:"boolean"` - // The platform version of the endpoint device. - PlatformVersion *string `type:"string"` + // The URL of the small icon image to display in the status bar and the content + // view of the push notification. + SmallImageIconUrl *string `type:"string"` - // The timezone of the endpoint. Specified as a tz database value, such as Americas/Los_Angeles. - Timezone *string `type:"string"` + // The sound to play when the recipient receives the push notification. You + // can use the default stream or specify the file name of a sound resource that's + // bundled in your app. On an Android platform, the sound file must reside in + // /res/raw/. + Sound *string `type:"string"` + + // The default message variables to use in the notification message. You can + // override the default variables with individual address variables. + Substitutions map[string][]*string `type:"map"` + + // The amount of time, in seconds, that FCM should store and attempt to deliver + // the push notification, if the service is unable to deliver the notification + // the first time. If you don't specify this value, FCM defaults to the maximum + // value, which is 2,419,200 seconds (28 days). + // + // Amazon Pinpoint specifies this value in the FCM time_to_live parameter when + // it sends the notification message to FCM. + TimeToLive *int64 `type:"integer"` + + // The title to display above the notification message on the recipient's device. + Title *string `type:"string"` + + // The URL to open in the recipient's default mobile browser, if a recipient + // taps the push notification and the value of the Action property is URL. + Url *string `type:"string"` } // String returns the string representation -func (s EndpointDemographic) String() string { +func (s GCMMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EndpointDemographic) GoString() string { +func (s GCMMessage) GoString() string { return s.String() } -// SetAppVersion sets the AppVersion field's value. -func (s *EndpointDemographic) SetAppVersion(v string) *EndpointDemographic { - s.AppVersion = &v +// SetAction sets the Action field's value. +func (s *GCMMessage) SetAction(v string) *GCMMessage { + s.Action = &v return s } -// SetLocale sets the Locale field's value. -func (s *EndpointDemographic) SetLocale(v string) *EndpointDemographic { - s.Locale = &v +// SetBody sets the Body field's value. +func (s *GCMMessage) SetBody(v string) *GCMMessage { + s.Body = &v return s } -// SetMake sets the Make field's value. -func (s *EndpointDemographic) SetMake(v string) *EndpointDemographic { - s.Make = &v +// SetCollapseKey sets the CollapseKey field's value. +func (s *GCMMessage) SetCollapseKey(v string) *GCMMessage { + s.CollapseKey = &v return s } -// SetModel sets the Model field's value. -func (s *EndpointDemographic) SetModel(v string) *EndpointDemographic { - s.Model = &v +// SetData sets the Data field's value. +func (s *GCMMessage) SetData(v map[string]*string) *GCMMessage { + s.Data = v return s } -// SetModelVersion sets the ModelVersion field's value. -func (s *EndpointDemographic) SetModelVersion(v string) *EndpointDemographic { - s.ModelVersion = &v +// SetIconReference sets the IconReference field's value. +func (s *GCMMessage) SetIconReference(v string) *GCMMessage { + s.IconReference = &v return s } -// SetPlatform sets the Platform field's value. -func (s *EndpointDemographic) SetPlatform(v string) *EndpointDemographic { - s.Platform = &v +// SetImageIconUrl sets the ImageIconUrl field's value. +func (s *GCMMessage) SetImageIconUrl(v string) *GCMMessage { + s.ImageIconUrl = &v return s } -// SetPlatformVersion sets the PlatformVersion field's value. -func (s *EndpointDemographic) SetPlatformVersion(v string) *EndpointDemographic { - s.PlatformVersion = &v +// SetImageUrl sets the ImageUrl field's value. +func (s *GCMMessage) SetImageUrl(v string) *GCMMessage { + s.ImageUrl = &v return s } -// SetTimezone sets the Timezone field's value. -func (s *EndpointDemographic) SetTimezone(v string) *EndpointDemographic { - s.Timezone = &v +// SetPriority sets the Priority field's value. +func (s *GCMMessage) SetPriority(v string) *GCMMessage { + s.Priority = &v return s } -// A complex object that holds the status code and message as a result of processing -// an endpoint. -type EndpointItemResponse struct { - _ struct{} `type:"structure"` +// SetRawContent sets the RawContent field's value. +func (s *GCMMessage) SetRawContent(v string) *GCMMessage { + s.RawContent = &v + return s +} - // A custom message associated with the registration of an endpoint when issuing - // a response. - Message *string `type:"string"` +// SetRestrictedPackageName sets the RestrictedPackageName field's value. +func (s *GCMMessage) SetRestrictedPackageName(v string) *GCMMessage { + s.RestrictedPackageName = &v + return s +} - // The status code associated with the merging of an endpoint when issuing a - // response. - StatusCode *int64 `type:"integer"` +// SetSilentPush sets the SilentPush field's value. +func (s *GCMMessage) SetSilentPush(v bool) *GCMMessage { + s.SilentPush = &v + return s } -// String returns the string representation -func (s EndpointItemResponse) String() string { - return awsutil.Prettify(s) +// SetSmallImageIconUrl sets the SmallImageIconUrl field's value. +func (s *GCMMessage) SetSmallImageIconUrl(v string) *GCMMessage { + s.SmallImageIconUrl = &v + return s +} + +// SetSound sets the Sound field's value. +func (s *GCMMessage) SetSound(v string) *GCMMessage { + s.Sound = &v + return s +} + +// SetSubstitutions sets the Substitutions field's value. +func (s *GCMMessage) SetSubstitutions(v map[string][]*string) *GCMMessage { + s.Substitutions = v + return s } -// GoString returns the string representation -func (s EndpointItemResponse) GoString() string { - return s.String() +// SetTimeToLive sets the TimeToLive field's value. +func (s *GCMMessage) SetTimeToLive(v int64) *GCMMessage { + s.TimeToLive = &v + return s } -// SetMessage sets the Message field's value. -func (s *EndpointItemResponse) SetMessage(v string) *EndpointItemResponse { - s.Message = &v +// SetTitle sets the Title field's value. +func (s *GCMMessage) SetTitle(v string) *GCMMessage { + s.Title = &v return s } -// SetStatusCode sets the StatusCode field's value. -func (s *EndpointItemResponse) SetStatusCode(v int64) *EndpointItemResponse { - s.StatusCode = &v +// SetUrl sets the Url field's value. +func (s *GCMMessage) SetUrl(v string) *GCMMessage { + s.Url = &v return s } -// Location data for the endpoint. -type EndpointLocation struct { +// Specifies the GPS coordinates of a location. +type GPSCoordinates struct { _ struct{} `type:"structure"` - // The city where the endpoint is located. - City *string `type:"string"` - - // The two-letter code for the country or region of the endpoint. Specified - // as an ISO 3166-1 alpha-2 code, such as "US" for the United States. - Country *string `type:"string"` - - // The latitude of the endpoint location, rounded to one decimal place. - Latitude *float64 `type:"double"` - - // The longitude of the endpoint location, rounded to one decimal place. - Longitude *float64 `type:"double"` - - // The postal code or zip code of the endpoint. - PostalCode *string `type:"string"` + // The latitude coordinate of the location. + // + // Latitude is a required field + Latitude *float64 `type:"double" required:"true"` - // The region of the endpoint location. For example, in the United States, this - // corresponds to a state. - Region *string `type:"string"` + // The longitude coordinate of the location. + // + // Longitude is a required field + Longitude *float64 `type:"double" required:"true"` } // String returns the string representation -func (s EndpointLocation) String() string { +func (s GPSCoordinates) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EndpointLocation) GoString() string { +func (s GPSCoordinates) GoString() string { return s.String() } -// SetCity sets the City field's value. -func (s *EndpointLocation) SetCity(v string) *EndpointLocation { - s.City = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GPSCoordinates) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GPSCoordinates"} + if s.Latitude == nil { + invalidParams.Add(request.NewErrParamRequired("Latitude")) + } + if s.Longitude == nil { + invalidParams.Add(request.NewErrParamRequired("Longitude")) + } -// SetCountry sets the Country field's value. -func (s *EndpointLocation) SetCountry(v string) *EndpointLocation { - s.Country = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetLatitude sets the Latitude field's value. -func (s *EndpointLocation) SetLatitude(v float64) *EndpointLocation { +func (s *GPSCoordinates) SetLatitude(v float64) *GPSCoordinates { s.Latitude = &v return s } // SetLongitude sets the Longitude field's value. -func (s *EndpointLocation) SetLongitude(v float64) *EndpointLocation { +func (s *GPSCoordinates) SetLongitude(v float64) *GPSCoordinates { s.Longitude = &v return s } -// SetPostalCode sets the PostalCode field's value. -func (s *EndpointLocation) SetPostalCode(v string) *EndpointLocation { - s.PostalCode = &v - return s -} - -// SetRegion sets the Region field's value. -func (s *EndpointLocation) SetRegion(v string) *EndpointLocation { - s.Region = &v - return s -} - -// The result from sending a message to an endpoint. -type EndpointMessageResult struct { +// Specifies GPS-based criteria for including or excluding endpoints from a +// segment. +type GPSPointDimension struct { _ struct{} `type:"structure"` - // Address that endpoint message was delivered to. - Address *string `type:"string"` - - // The delivery status of the message. Possible values:SUCCESS - The message - // was successfully delivered to the endpoint.TRANSIENT_FAILURE - A temporary - // error occurred. Amazon Pinpoint will attempt to deliver the message again - // later.FAILURE_PERMANENT - An error occurred when delivering the message to - // the endpoint. Amazon Pinpoint won't attempt to send the message again.TIMEOUT - // - The message couldn't be sent within the timeout period.QUIET_TIME - The - // local time for the endpoint was within the QuietTime for the campaign or - // app.DAILY_CAP - The endpoint has received the maximum number of messages - // it can receive within a 24-hour period.HOLDOUT - The endpoint was in a hold - // out treatment for the campaign.THROTTLED - Amazon Pinpoint throttled sending - // to this endpoint.EXPIRED - The endpoint address is expired.CAMPAIGN_CAP - - // The endpoint received the maximum number of messages allowed by the campaign.SERVICE_FAILURE - // - A service-level failure prevented Amazon Pinpoint from delivering the message.UNKNOWN - // - An unknown error occurred. - DeliveryStatus *string `type:"string" enum:"DeliveryStatus"` - - // Unique message identifier associated with the message that was sent. - MessageId *string `type:"string"` - - // Downstream service status code. - StatusCode *int64 `type:"integer"` - - // Status message for message delivery. - StatusMessage *string `type:"string"` + // The GPS coordinates to measure distance from. + // + // Coordinates is a required field + Coordinates *GPSCoordinates `type:"structure" required:"true"` - // If token was updated as part of delivery. (This is GCM Specific) - UpdatedToken *string `type:"string"` + // The range, in kilometers, from the GPS coordinates. + RangeInKilometers *float64 `type:"double"` } // String returns the string representation -func (s EndpointMessageResult) String() string { +func (s GPSPointDimension) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EndpointMessageResult) GoString() string { +func (s GPSPointDimension) GoString() string { return s.String() } -// SetAddress sets the Address field's value. -func (s *EndpointMessageResult) SetAddress(v string) *EndpointMessageResult { - s.Address = &v - return s -} - -// SetDeliveryStatus sets the DeliveryStatus field's value. -func (s *EndpointMessageResult) SetDeliveryStatus(v string) *EndpointMessageResult { - s.DeliveryStatus = &v - return s -} - -// SetMessageId sets the MessageId field's value. -func (s *EndpointMessageResult) SetMessageId(v string) *EndpointMessageResult { - s.MessageId = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GPSPointDimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GPSPointDimension"} + if s.Coordinates == nil { + invalidParams.Add(request.NewErrParamRequired("Coordinates")) + } + if s.Coordinates != nil { + if err := s.Coordinates.Validate(); err != nil { + invalidParams.AddNested("Coordinates", err.(request.ErrInvalidParams)) + } + } -// SetStatusCode sets the StatusCode field's value. -func (s *EndpointMessageResult) SetStatusCode(v int64) *EndpointMessageResult { - s.StatusCode = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetStatusMessage sets the StatusMessage field's value. -func (s *EndpointMessageResult) SetStatusMessage(v string) *EndpointMessageResult { - s.StatusMessage = &v +// SetCoordinates sets the Coordinates field's value. +func (s *GPSPointDimension) SetCoordinates(v *GPSCoordinates) *GPSPointDimension { + s.Coordinates = v return s } -// SetUpdatedToken sets the UpdatedToken field's value. -func (s *EndpointMessageResult) SetUpdatedToken(v string) *EndpointMessageResult { - s.UpdatedToken = &v +// SetRangeInKilometers sets the RangeInKilometers field's value. +func (s *GPSPointDimension) SetRangeInKilometers(v float64) *GPSPointDimension { + s.RangeInKilometers = &v return s } -// An endpoint update request. -type EndpointRequest struct { +type GetAdmChannelInput struct { _ struct{} `type:"structure"` - // The destination for messages that you send to this endpoint. The address - // varies by channel. For mobile push channels, use the token provided by the - // push notification service, such as the APNs device token or the FCM registration - // token. For the SMS channel, use a phone number in E.164 format, such as +12065550100. - // For the email channel, use an email address. - Address *string `type:"string"` - - // Custom attributes that describe the endpoint by associating a name with an - // array of values. For example, an attribute named "interests" might have the - // values ["science", "politics", "travel"]. You can use these attributes as - // selection criteria when you create a segment of users to engage with a messaging - // campaign.The following characters are not recommended in attribute names: - // # : ? \ /. The Amazon Pinpoint console does not display attributes that include - // these characters in the name. This limitation does not apply to attribute - // values. - Attributes map[string][]*string `type:"map"` - - // The channel type.Valid values: GCM | APNS | APNS_SANDBOX | APNS_VOIP | APNS_VOIP_SANDBOX - // | ADM | SMS | EMAIL | BAIDU - ChannelType *string `type:"string" enum:"ChannelType"` - - // Demographic attributes for the endpoint. - Demographic *EndpointDemographic `type:"structure"` - - // The date and time when the endpoint was updated, shown in ISO 8601 format. - EffectiveDate *string `type:"string"` - - // Unused. - EndpointStatus *string `type:"string"` - - // The endpoint location attributes. - Location *EndpointLocation `type:"structure"` - - // Custom metrics that your app reports to Amazon Pinpoint. - Metrics map[string]*float64 `type:"map"` - - // Indicates whether a user has opted out of receiving messages with one of - // the following values:ALL - User has opted out of all messages.NONE - Users - // has not opted out and receives all messages. - OptOut *string `type:"string"` - - // The unique ID for the most recent request to update the endpoint. - RequestId *string `type:"string"` - - // Custom user-specific attributes that your app reports to Amazon Pinpoint. - User *EndpointUser `type:"structure"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s EndpointRequest) String() string { +func (s GetAdmChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EndpointRequest) GoString() string { +func (s GetAdmChannelInput) GoString() string { return s.String() } -// SetAddress sets the Address field's value. -func (s *EndpointRequest) SetAddress(v string) *EndpointRequest { - s.Address = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAdmChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAdmChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } -// SetAttributes sets the Attributes field's value. -func (s *EndpointRequest) SetAttributes(v map[string][]*string) *EndpointRequest { - s.Attributes = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetChannelType sets the ChannelType field's value. -func (s *EndpointRequest) SetChannelType(v string) *EndpointRequest { - s.ChannelType = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetAdmChannelInput) SetApplicationId(v string) *GetAdmChannelInput { + s.ApplicationId = &v return s } -// SetDemographic sets the Demographic field's value. -func (s *EndpointRequest) SetDemographic(v *EndpointDemographic) *EndpointRequest { - s.Demographic = v - return s -} +type GetAdmChannelOutput struct { + _ struct{} `type:"structure" payload:"ADMChannelResponse"` -// SetEffectiveDate sets the EffectiveDate field's value. -func (s *EndpointRequest) SetEffectiveDate(v string) *EndpointRequest { - s.EffectiveDate = &v - return s + // Provides information about the status and settings of the ADM (Amazon Device + // Messaging) channel for an application. + // + // ADMChannelResponse is a required field + ADMChannelResponse *ADMChannelResponse `type:"structure" required:"true"` } -// SetEndpointStatus sets the EndpointStatus field's value. -func (s *EndpointRequest) SetEndpointStatus(v string) *EndpointRequest { - s.EndpointStatus = &v - return s +// String returns the string representation +func (s GetAdmChannelOutput) String() string { + return awsutil.Prettify(s) } -// SetLocation sets the Location field's value. -func (s *EndpointRequest) SetLocation(v *EndpointLocation) *EndpointRequest { - s.Location = v - return s +// GoString returns the string representation +func (s GetAdmChannelOutput) GoString() string { + return s.String() } -// SetMetrics sets the Metrics field's value. -func (s *EndpointRequest) SetMetrics(v map[string]*float64) *EndpointRequest { - s.Metrics = v +// SetADMChannelResponse sets the ADMChannelResponse field's value. +func (s *GetAdmChannelOutput) SetADMChannelResponse(v *ADMChannelResponse) *GetAdmChannelOutput { + s.ADMChannelResponse = v return s } -// SetOptOut sets the OptOut field's value. -func (s *EndpointRequest) SetOptOut(v string) *EndpointRequest { - s.OptOut = &v - return s -} +type GetApnsChannelInput struct { + _ struct{} `type:"structure"` -// SetRequestId sets the RequestId field's value. -func (s *EndpointRequest) SetRequestId(v string) *EndpointRequest { - s.RequestId = &v - return s + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } -// SetUser sets the User field's value. -func (s *EndpointRequest) SetUser(v *EndpointUser) *EndpointRequest { - s.User = v - return s +// String returns the string representation +func (s GetApnsChannelInput) String() string { + return awsutil.Prettify(s) } -// Endpoint response -type EndpointResponse struct { - _ struct{} `type:"structure"` - - // The address of the endpoint as provided by your push provider. For example, - // the DeviceToken or RegistrationId. - Address *string `type:"string"` - - // The ID of the application that is associated with the endpoint. - ApplicationId *string `type:"string"` - - // Custom attributes that describe the endpoint by associating a name with an - // array of values. For example, an attribute named "interests" might have the - // following values: ["science", "politics", "travel"]. You can use these attributes - // as selection criteria when you create segments.The Amazon Pinpoint console - // can't display attribute names that include the following characters: hash/pound - // sign (#), colon (:), question mark (?), backslash (\), and forward slash - // (/). For this reason, you should avoid using these characters in the names - // of custom attributes. - Attributes map[string][]*string `type:"map"` - - // The channel type.Valid values: GCM | APNS | APNS_SANDBOX | APNS_VOIP | APNS_VOIP_SANDBOX - // | ADM | SMS | EMAIL | BAIDU - ChannelType *string `type:"string" enum:"ChannelType"` - - // A number from 0-99 that represents the cohort the endpoint is assigned to. - // Endpoints are grouped into cohorts randomly, and each cohort contains approximately - // 1 percent of the endpoints for an app. Amazon Pinpoint assigns cohorts to - // the holdout or treatment allocations for a campaign. - CohortId *string `type:"string"` +// GoString returns the string representation +func (s GetApnsChannelInput) GoString() string { + return s.String() +} - // The date and time when the endpoint was created, shown in ISO 8601 format. - CreationDate *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApnsChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApnsChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } - // The endpoint demographic attributes. - Demographic *EndpointDemographic `type:"structure"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The date and time when the endpoint was last updated, shown in ISO 8601 format. - EffectiveDate *string `type:"string"` +// SetApplicationId sets the ApplicationId field's value. +func (s *GetApnsChannelInput) SetApplicationId(v string) *GetApnsChannelInput { + s.ApplicationId = &v + return s +} - // Unused. - EndpointStatus *string `type:"string"` +type GetApnsChannelOutput struct { + _ struct{} `type:"structure" payload:"APNSChannelResponse"` - // The unique ID that you assigned to the endpoint. The ID should be a globally - // unique identifier (GUID) to ensure that it doesn't conflict with other endpoint - // IDs associated with the application. - Id *string `type:"string"` + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) channel for an application. + // + // APNSChannelResponse is a required field + APNSChannelResponse *APNSChannelResponse `type:"structure" required:"true"` +} - // The endpoint location attributes. - Location *EndpointLocation `type:"structure"` +// String returns the string representation +func (s GetApnsChannelOutput) String() string { + return awsutil.Prettify(s) +} - // Custom metrics that your app reports to Amazon Pinpoint. - Metrics map[string]*float64 `type:"map"` +// GoString returns the string representation +func (s GetApnsChannelOutput) GoString() string { + return s.String() +} - // Indicates whether a user has opted out of receiving messages with one of - // the following values:ALL - User has opted out of all messages.NONE - Users - // has not opted out and receives all messages. - OptOut *string `type:"string"` +// SetAPNSChannelResponse sets the APNSChannelResponse field's value. +func (s *GetApnsChannelOutput) SetAPNSChannelResponse(v *APNSChannelResponse) *GetApnsChannelOutput { + s.APNSChannelResponse = v + return s +} - // The unique ID for the most recent request to update the endpoint. - RequestId *string `type:"string"` +type GetApnsSandboxChannelInput struct { + _ struct{} `type:"structure"` - // Custom user-specific attributes that your app reports to Amazon Pinpoint. - User *EndpointUser `type:"structure"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s EndpointResponse) String() string { +func (s GetApnsSandboxChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EndpointResponse) GoString() string { +func (s GetApnsSandboxChannelInput) GoString() string { return s.String() } -// SetAddress sets the Address field's value. -func (s *EndpointResponse) SetAddress(v string) *EndpointResponse { - s.Address = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApnsSandboxChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApnsSandboxChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetApplicationId sets the ApplicationId field's value. -func (s *EndpointResponse) SetApplicationId(v string) *EndpointResponse { +func (s *GetApnsSandboxChannelInput) SetApplicationId(v string) *GetApnsSandboxChannelInput { s.ApplicationId = &v return s } -// SetAttributes sets the Attributes field's value. -func (s *EndpointResponse) SetAttributes(v map[string][]*string) *EndpointResponse { - s.Attributes = v - return s +type GetApnsSandboxChannelOutput struct { + _ struct{} `type:"structure" payload:"APNSSandboxChannelResponse"` + + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) sandbox channel for an application. + // + // APNSSandboxChannelResponse is a required field + APNSSandboxChannelResponse *APNSSandboxChannelResponse `type:"structure" required:"true"` } -// SetChannelType sets the ChannelType field's value. -func (s *EndpointResponse) SetChannelType(v string) *EndpointResponse { - s.ChannelType = &v - return s +// String returns the string representation +func (s GetApnsSandboxChannelOutput) String() string { + return awsutil.Prettify(s) } -// SetCohortId sets the CohortId field's value. -func (s *EndpointResponse) SetCohortId(v string) *EndpointResponse { - s.CohortId = &v - return s +// GoString returns the string representation +func (s GetApnsSandboxChannelOutput) GoString() string { + return s.String() } -// SetCreationDate sets the CreationDate field's value. -func (s *EndpointResponse) SetCreationDate(v string) *EndpointResponse { - s.CreationDate = &v +// SetAPNSSandboxChannelResponse sets the APNSSandboxChannelResponse field's value. +func (s *GetApnsSandboxChannelOutput) SetAPNSSandboxChannelResponse(v *APNSSandboxChannelResponse) *GetApnsSandboxChannelOutput { + s.APNSSandboxChannelResponse = v return s } -// SetDemographic sets the Demographic field's value. -func (s *EndpointResponse) SetDemographic(v *EndpointDemographic) *EndpointResponse { - s.Demographic = v - return s +type GetApnsVoipChannelInput struct { + _ struct{} `type:"structure"` + + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } -// SetEffectiveDate sets the EffectiveDate field's value. -func (s *EndpointResponse) SetEffectiveDate(v string) *EndpointResponse { - s.EffectiveDate = &v - return s +// String returns the string representation +func (s GetApnsVoipChannelInput) String() string { + return awsutil.Prettify(s) } -// SetEndpointStatus sets the EndpointStatus field's value. -func (s *EndpointResponse) SetEndpointStatus(v string) *EndpointResponse { - s.EndpointStatus = &v - return s +// GoString returns the string representation +func (s GetApnsVoipChannelInput) GoString() string { + return s.String() } -// SetId sets the Id field's value. -func (s *EndpointResponse) SetId(v string) *EndpointResponse { - s.Id = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApnsVoipChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApnsVoipChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLocation sets the Location field's value. -func (s *EndpointResponse) SetLocation(v *EndpointLocation) *EndpointResponse { - s.Location = v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetApnsVoipChannelInput) SetApplicationId(v string) *GetApnsVoipChannelInput { + s.ApplicationId = &v return s } -// SetMetrics sets the Metrics field's value. -func (s *EndpointResponse) SetMetrics(v map[string]*float64) *EndpointResponse { - s.Metrics = v - return s +type GetApnsVoipChannelOutput struct { + _ struct{} `type:"structure" payload:"APNSVoipChannelResponse"` + + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) VoIP channel for an application. + // + // APNSVoipChannelResponse is a required field + APNSVoipChannelResponse *APNSVoipChannelResponse `type:"structure" required:"true"` } -// SetOptOut sets the OptOut field's value. -func (s *EndpointResponse) SetOptOut(v string) *EndpointResponse { - s.OptOut = &v - return s +// String returns the string representation +func (s GetApnsVoipChannelOutput) String() string { + return awsutil.Prettify(s) } -// SetRequestId sets the RequestId field's value. -func (s *EndpointResponse) SetRequestId(v string) *EndpointResponse { - s.RequestId = &v - return s +// GoString returns the string representation +func (s GetApnsVoipChannelOutput) GoString() string { + return s.String() } -// SetUser sets the User field's value. -func (s *EndpointResponse) SetUser(v *EndpointUser) *EndpointResponse { - s.User = v +// SetAPNSVoipChannelResponse sets the APNSVoipChannelResponse field's value. +func (s *GetApnsVoipChannelOutput) SetAPNSVoipChannelResponse(v *APNSVoipChannelResponse) *GetApnsVoipChannelOutput { + s.APNSVoipChannelResponse = v return s } -// Endpoint send configuration. -type EndpointSendConfiguration struct { +type GetApnsVoipSandboxChannelInput struct { _ struct{} `type:"structure"` - // Body override. If specified will override default body. - BodyOverride *string `type:"string"` - - // A map of custom attributes to attributes to be attached to the message for - // this address. This payload is added to the push notification's 'data.pinpoint' - // object or added to the email/sms delivery receipt event attributes. - Context map[string]*string `type:"map"` - - // The Raw JSON formatted string to be used as the payload. This value overrides - // the message. - RawContent *string `type:"string"` - - // A map of substitution values for the message to be merged with the DefaultMessage's - // substitutions. Substitutions on this map take precedence over the all other - // substitutions. - Substitutions map[string][]*string `type:"map"` - - // Title override. If specified will override default title if applicable. - TitleOverride *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s EndpointSendConfiguration) String() string { +func (s GetApnsVoipSandboxChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EndpointSendConfiguration) GoString() string { +func (s GetApnsVoipSandboxChannelInput) GoString() string { return s.String() } -// SetBodyOverride sets the BodyOverride field's value. -func (s *EndpointSendConfiguration) SetBodyOverride(v string) *EndpointSendConfiguration { - s.BodyOverride = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApnsVoipSandboxChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApnsVoipSandboxChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetContext sets the Context field's value. -func (s *EndpointSendConfiguration) SetContext(v map[string]*string) *EndpointSendConfiguration { - s.Context = v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetApnsVoipSandboxChannelInput) SetApplicationId(v string) *GetApnsVoipSandboxChannelInput { + s.ApplicationId = &v return s } -// SetRawContent sets the RawContent field's value. -func (s *EndpointSendConfiguration) SetRawContent(v string) *EndpointSendConfiguration { - s.RawContent = &v - return s +type GetApnsVoipSandboxChannelOutput struct { + _ struct{} `type:"structure" payload:"APNSVoipSandboxChannelResponse"` + + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) VoIP sandbox channel for an application. + // + // APNSVoipSandboxChannelResponse is a required field + APNSVoipSandboxChannelResponse *APNSVoipSandboxChannelResponse `type:"structure" required:"true"` } -// SetSubstitutions sets the Substitutions field's value. -func (s *EndpointSendConfiguration) SetSubstitutions(v map[string][]*string) *EndpointSendConfiguration { - s.Substitutions = v - return s +// String returns the string representation +func (s GetApnsVoipSandboxChannelOutput) String() string { + return awsutil.Prettify(s) } -// SetTitleOverride sets the TitleOverride field's value. -func (s *EndpointSendConfiguration) SetTitleOverride(v string) *EndpointSendConfiguration { - s.TitleOverride = &v - return s +// GoString returns the string representation +func (s GetApnsVoipSandboxChannelOutput) GoString() string { + return s.String() } -// Endpoint user specific custom userAttributes -type EndpointUser struct { - _ struct{} `type:"structure"` +// SetAPNSVoipSandboxChannelResponse sets the APNSVoipSandboxChannelResponse field's value. +func (s *GetApnsVoipSandboxChannelOutput) SetAPNSVoipSandboxChannelResponse(v *APNSVoipSandboxChannelResponse) *GetApnsVoipSandboxChannelOutput { + s.APNSVoipSandboxChannelResponse = v + return s +} - // Custom attributes that describe the user by associating a name with an array - // of values. For example, an attribute named "interests" might have the following - // values: ["science", "politics", "travel"]. You can use these attributes as - // selection criteria when you create segments.The Amazon Pinpoint console can't - // display attribute names that include the following characters: hash/pound - // sign (#), colon (:), question mark (?), backslash (\), and forward slash - // (/). For this reason, you should avoid using these characters in the names - // of custom attributes. - UserAttributes map[string][]*string `type:"map"` +type GetAppInput struct { + _ struct{} `type:"structure"` - // The unique ID of the user. - UserId *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s EndpointUser) String() string { +func (s GetAppInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EndpointUser) GoString() string { +func (s GetAppInput) GoString() string { return s.String() } -// SetUserAttributes sets the UserAttributes field's value. -func (s *EndpointUser) SetUserAttributes(v map[string][]*string) *EndpointUser { - s.UserAttributes = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAppInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetUserId sets the UserId field's value. -func (s *EndpointUser) SetUserId(v string) *EndpointUser { - s.UserId = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetAppInput) SetApplicationId(v string) *GetAppInput { + s.ApplicationId = &v return s } -// List of endpoints -type EndpointsResponse struct { - _ struct{} `type:"structure"` +type GetAppOutput struct { + _ struct{} `type:"structure" payload:"ApplicationResponse"` - // The list of endpoints. - Item []*EndpointResponse `type:"list"` + // Provides information about an application. + // + // ApplicationResponse is a required field + ApplicationResponse *ApplicationResponse `type:"structure" required:"true"` } // String returns the string representation -func (s EndpointsResponse) String() string { +func (s GetAppOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EndpointsResponse) GoString() string { +func (s GetAppOutput) GoString() string { return s.String() } -// SetItem sets the Item field's value. -func (s *EndpointsResponse) SetItem(v []*EndpointResponse) *EndpointsResponse { - s.Item = v +// SetApplicationResponse sets the ApplicationResponse field's value. +func (s *GetAppOutput) SetApplicationResponse(v *ApplicationResponse) *GetAppOutput { + s.ApplicationResponse = v return s } -// Model for creating or updating events. -type Event struct { +type GetApplicationDateRangeKpiInput struct { _ struct{} `type:"structure"` - // The package name associated with the app that's recording the event. - AppPackageName *string `type:"string"` - - // The title of the app that's recording the event. - AppTitle *string `type:"string"` - - // The version number of the app that's recording the event. - AppVersionCode *string `type:"string"` - - // Custom attributes that are associated with the event you're adding or updating. - Attributes map[string]*string `type:"map"` - - // The version of the SDK that's running on the client device. - ClientSdkVersion *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The name of the custom event that you're recording. - EventType *string `type:"string"` + EndTime *time.Time `location:"querystring" locationName:"end-time" type:"timestamp" timestampFormat:"iso8601"` - // Custom metrics related to the event. - Metrics map[string]*float64 `type:"map"` + // KpiName is a required field + KpiName *string `location:"uri" locationName:"kpi-name" type:"string" required:"true"` - // The name of the SDK that's being used to record the event. - SdkName *string `type:"string"` + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` - // Information about the session in which the event occurred. - Session *Session `type:"structure"` + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` - // The date and time when the event occurred, in ISO 8601 format. - Timestamp *string `type:"string"` + StartTime *time.Time `location:"querystring" locationName:"start-time" type:"timestamp" timestampFormat:"iso8601"` } // String returns the string representation -func (s Event) String() string { +func (s GetApplicationDateRangeKpiInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Event) GoString() string { +func (s GetApplicationDateRangeKpiInput) GoString() string { return s.String() } -// SetAppPackageName sets the AppPackageName field's value. -func (s *Event) SetAppPackageName(v string) *Event { - s.AppPackageName = &v - return s -} - -// SetAppTitle sets the AppTitle field's value. -func (s *Event) SetAppTitle(v string) *Event { - s.AppTitle = &v - return s -} - -// SetAppVersionCode sets the AppVersionCode field's value. -func (s *Event) SetAppVersionCode(v string) *Event { - s.AppVersionCode = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApplicationDateRangeKpiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApplicationDateRangeKpiInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.KpiName == nil { + invalidParams.Add(request.NewErrParamRequired("KpiName")) + } + if s.KpiName != nil && len(*s.KpiName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KpiName", 1)) + } -// SetAttributes sets the Attributes field's value. -func (s *Event) SetAttributes(v map[string]*string) *Event { - s.Attributes = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetClientSdkVersion sets the ClientSdkVersion field's value. -func (s *Event) SetClientSdkVersion(v string) *Event { - s.ClientSdkVersion = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetApplicationDateRangeKpiInput) SetApplicationId(v string) *GetApplicationDateRangeKpiInput { + s.ApplicationId = &v return s } -// SetEventType sets the EventType field's value. -func (s *Event) SetEventType(v string) *Event { - s.EventType = &v +// SetEndTime sets the EndTime field's value. +func (s *GetApplicationDateRangeKpiInput) SetEndTime(v time.Time) *GetApplicationDateRangeKpiInput { + s.EndTime = &v return s } -// SetMetrics sets the Metrics field's value. -func (s *Event) SetMetrics(v map[string]*float64) *Event { - s.Metrics = v +// SetKpiName sets the KpiName field's value. +func (s *GetApplicationDateRangeKpiInput) SetKpiName(v string) *GetApplicationDateRangeKpiInput { + s.KpiName = &v return s } -// SetSdkName sets the SdkName field's value. -func (s *Event) SetSdkName(v string) *Event { - s.SdkName = &v +// SetNextToken sets the NextToken field's value. +func (s *GetApplicationDateRangeKpiInput) SetNextToken(v string) *GetApplicationDateRangeKpiInput { + s.NextToken = &v return s } -// SetSession sets the Session field's value. -func (s *Event) SetSession(v *Session) *Event { - s.Session = v +// SetPageSize sets the PageSize field's value. +func (s *GetApplicationDateRangeKpiInput) SetPageSize(v string) *GetApplicationDateRangeKpiInput { + s.PageSize = &v return s } -// SetTimestamp sets the Timestamp field's value. -func (s *Event) SetTimestamp(v string) *Event { - s.Timestamp = &v +// SetStartTime sets the StartTime field's value. +func (s *GetApplicationDateRangeKpiInput) SetStartTime(v time.Time) *GetApplicationDateRangeKpiInput { + s.StartTime = &v return s } -// Event dimensions. -type EventDimensions struct { - _ struct{} `type:"structure"` - - // Custom attributes that your app reports to Amazon Pinpoint. You can use these - // attributes as selection criteria when you create an event filter. - Attributes map[string]*AttributeDimension `type:"map"` +type GetApplicationDateRangeKpiOutput struct { + _ struct{} `type:"structure" payload:"ApplicationDateRangeKpiResponse"` - // The name of the event that causes the campaign to be sent. This can be a - // standard event type that Amazon Pinpoint generates, such as _session.start, - // or a custom event that's specific to your app. - EventType *SetDimension `type:"structure"` - - // Custom metrics that your app reports to Amazon Pinpoint. You can use these - // attributes as selection criteria when you create an event filter. - Metrics map[string]*MetricDimension `type:"map"` + // Provides the results of a query that retrieved the data for a standard metric + // that applies to an application, and provides information about that query. + // + // ApplicationDateRangeKpiResponse is a required field + ApplicationDateRangeKpiResponse *ApplicationDateRangeKpiResponse `type:"structure" required:"true"` } // String returns the string representation -func (s EventDimensions) String() string { +func (s GetApplicationDateRangeKpiOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EventDimensions) GoString() string { +func (s GetApplicationDateRangeKpiOutput) GoString() string { return s.String() } -// SetAttributes sets the Attributes field's value. -func (s *EventDimensions) SetAttributes(v map[string]*AttributeDimension) *EventDimensions { - s.Attributes = v - return s -} - -// SetEventType sets the EventType field's value. -func (s *EventDimensions) SetEventType(v *SetDimension) *EventDimensions { - s.EventType = v - return s -} - -// SetMetrics sets the Metrics field's value. -func (s *EventDimensions) SetMetrics(v map[string]*MetricDimension) *EventDimensions { - s.Metrics = v +// SetApplicationDateRangeKpiResponse sets the ApplicationDateRangeKpiResponse field's value. +func (s *GetApplicationDateRangeKpiOutput) SetApplicationDateRangeKpiResponse(v *ApplicationDateRangeKpiResponse) *GetApplicationDateRangeKpiOutput { + s.ApplicationDateRangeKpiResponse = v return s } -// A complex object that holds the status code and message as a result of processing -// an event. -type EventItemResponse struct { +type GetApplicationSettingsInput struct { _ struct{} `type:"structure"` - // A custom message that is associated with the processing of an event. - Message *string `type:"string"` - - // The status returned in the response as a result of processing the event.Possible - // values: 400 (for invalid events) and 202 (for events that were accepted). - StatusCode *int64 `type:"integer"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s EventItemResponse) String() string { +func (s GetApplicationSettingsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EventItemResponse) GoString() string { +func (s GetApplicationSettingsInput) GoString() string { return s.String() } -// SetMessage sets the Message field's value. -func (s *EventItemResponse) SetMessage(v string) *EventItemResponse { - s.Message = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApplicationSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApplicationSettingsInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetStatusCode sets the StatusCode field's value. -func (s *EventItemResponse) SetStatusCode(v int64) *EventItemResponse { - s.StatusCode = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetApplicationSettingsInput) SetApplicationId(v string) *GetApplicationSettingsInput { + s.ApplicationId = &v return s } -// Model for an event publishing subscription export. -type EventStream struct { - _ struct{} `type:"structure"` - - // The ID of the application from which events should be published. - ApplicationId *string `type:"string"` - - // The Amazon Resource Name (ARN) of the Amazon Kinesis stream or Firehose delivery - // stream to which you want to publish events. Firehose ARN: arn:aws:firehose:REGION:ACCOUNT_ID:deliverystream/STREAM_NAME - // Kinesis ARN: arn:aws:kinesis:REGION:ACCOUNT_ID:stream/STREAM_NAME - DestinationStreamArn *string `type:"string"` - - // (Deprecated) Your AWS account ID, which you assigned to the ExternalID key - // in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This - // requirement is removed, and external IDs are not recommended for IAM roles - // assumed by Amazon Pinpoint. - ExternalId *string `type:"string"` - - // The date the event stream was last updated in ISO 8601 format. - LastModifiedDate *string `type:"string"` - - // The IAM user who last modified the event stream. - LastUpdatedBy *string `type:"string"` +type GetApplicationSettingsOutput struct { + _ struct{} `type:"structure" payload:"ApplicationSettingsResource"` - // The IAM role that authorizes Amazon Pinpoint to publish events to the stream - // in your account. - RoleArn *string `type:"string"` + // Provides information about an application, including the default settings + // for an application. + // + // ApplicationSettingsResource is a required field + ApplicationSettingsResource *ApplicationSettingsResource `type:"structure" required:"true"` } // String returns the string representation -func (s EventStream) String() string { +func (s GetApplicationSettingsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EventStream) GoString() string { +func (s GetApplicationSettingsOutput) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *EventStream) SetApplicationId(v string) *EventStream { - s.ApplicationId = &v - return s -} - -// SetDestinationStreamArn sets the DestinationStreamArn field's value. -func (s *EventStream) SetDestinationStreamArn(v string) *EventStream { - s.DestinationStreamArn = &v - return s -} - -// SetExternalId sets the ExternalId field's value. -func (s *EventStream) SetExternalId(v string) *EventStream { - s.ExternalId = &v - return s -} - -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *EventStream) SetLastModifiedDate(v string) *EventStream { - s.LastModifiedDate = &v - return s -} - -// SetLastUpdatedBy sets the LastUpdatedBy field's value. -func (s *EventStream) SetLastUpdatedBy(v string) *EventStream { - s.LastUpdatedBy = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *EventStream) SetRoleArn(v string) *EventStream { - s.RoleArn = &v +// SetApplicationSettingsResource sets the ApplicationSettingsResource field's value. +func (s *GetApplicationSettingsOutput) SetApplicationSettingsResource(v *ApplicationSettingsResource) *GetApplicationSettingsOutput { + s.ApplicationSettingsResource = v return s } -// A batch of PublicEndpoints and Events to process. -type EventsBatch struct { +type GetAppsInput struct { _ struct{} `type:"structure"` - // The PublicEndpoint attached to the EndpointId from the request. - Endpoint *PublicEndpoint `type:"structure"` + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` - // An object that contains a set of events associated with the endpoint. - Events map[string]*Event `type:"map"` + Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s EventsBatch) String() string { +func (s GetAppsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EventsBatch) GoString() string { +func (s GetAppsInput) GoString() string { return s.String() } -// SetEndpoint sets the Endpoint field's value. -func (s *EventsBatch) SetEndpoint(v *PublicEndpoint) *EventsBatch { - s.Endpoint = v +// SetPageSize sets the PageSize field's value. +func (s *GetAppsInput) SetPageSize(v string) *GetAppsInput { + s.PageSize = &v return s } -// SetEvents sets the Events field's value. -func (s *EventsBatch) SetEvents(v map[string]*Event) *EventsBatch { - s.Events = v +// SetToken sets the Token field's value. +func (s *GetAppsInput) SetToken(v string) *GetAppsInput { + s.Token = &v return s } -// A set of events to process. -type EventsRequest struct { - _ struct{} `type:"structure"` +type GetAppsOutput struct { + _ struct{} `type:"structure" payload:"ApplicationsResponse"` - // A batch of events to process. Each BatchItem consists of an endpoint ID as - // the key, and an EventsBatch object as the value. - BatchItem map[string]*EventsBatch `type:"map"` + // Provides information about all of your applications. + // + // ApplicationsResponse is a required field + ApplicationsResponse *ApplicationsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s EventsRequest) String() string { +func (s GetAppsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EventsRequest) GoString() string { +func (s GetAppsOutput) GoString() string { return s.String() } -// SetBatchItem sets the BatchItem field's value. -func (s *EventsRequest) SetBatchItem(v map[string]*EventsBatch) *EventsRequest { - s.BatchItem = v +// SetApplicationsResponse sets the ApplicationsResponse field's value. +func (s *GetAppsOutput) SetApplicationsResponse(v *ApplicationsResponse) *GetAppsOutput { + s.ApplicationsResponse = v return s } -// Custom messages associated with events. -type EventsResponse struct { +type GetBaiduChannelInput struct { _ struct{} `type:"structure"` - // A map that contains a multipart response for each endpoint. Each item in - // this object uses the endpoint ID as the key, and the item response as the - // value.If no item response exists, the value can also be one of the following: - // 202 (if the request was processed successfully) or 400 (if the payload was - // invalid, or required fields were missing). - Results map[string]*ItemResponse `type:"map"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s EventsResponse) String() string { +func (s GetBaiduChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EventsResponse) GoString() string { +func (s GetBaiduChannelInput) GoString() string { return s.String() } -// SetResults sets the Results field's value. -func (s *EventsResponse) SetResults(v map[string]*ItemResponse) *EventsResponse { - s.Results = v - return s -} - -// Export job request. -type ExportJobRequest struct { - _ struct{} `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBaiduChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBaiduChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } - // The Amazon Resource Name (ARN) of an IAM role that grants Amazon Pinpoint - // access to the Amazon S3 location that endpoints will be exported to. - RoleArn *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // A URL that points to the location within an Amazon S3 bucket that will receive - // the export. The location is typically a folder with multiple files.The URL - // should follow this format: s3://bucket-name/folder-name/Amazon Pinpoint will - // export endpoints to this location. - S3UrlPrefix *string `type:"string"` +// SetApplicationId sets the ApplicationId field's value. +func (s *GetBaiduChannelInput) SetApplicationId(v string) *GetBaiduChannelInput { + s.ApplicationId = &v + return s +} - // The ID of the segment to export endpoints from. If not present, Amazon Pinpoint - // exports all of the endpoints that belong to the application. - SegmentId *string `type:"string"` +type GetBaiduChannelOutput struct { + _ struct{} `type:"structure" payload:"BaiduChannelResponse"` - // The version of the segment to export if specified. - SegmentVersion *int64 `type:"integer"` + // Provides information about the status and settings of the Baidu (Baidu Cloud + // Push) channel for an application. + // + // BaiduChannelResponse is a required field + BaiduChannelResponse *BaiduChannelResponse `type:"structure" required:"true"` } // String returns the string representation -func (s ExportJobRequest) String() string { +func (s GetBaiduChannelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportJobRequest) GoString() string { +func (s GetBaiduChannelOutput) GoString() string { return s.String() } -// SetRoleArn sets the RoleArn field's value. -func (s *ExportJobRequest) SetRoleArn(v string) *ExportJobRequest { - s.RoleArn = &v - return s -} - -// SetS3UrlPrefix sets the S3UrlPrefix field's value. -func (s *ExportJobRequest) SetS3UrlPrefix(v string) *ExportJobRequest { - s.S3UrlPrefix = &v - return s -} - -// SetSegmentId sets the SegmentId field's value. -func (s *ExportJobRequest) SetSegmentId(v string) *ExportJobRequest { - s.SegmentId = &v - return s -} - -// SetSegmentVersion sets the SegmentVersion field's value. -func (s *ExportJobRequest) SetSegmentVersion(v int64) *ExportJobRequest { - s.SegmentVersion = &v +// SetBaiduChannelResponse sets the BaiduChannelResponse field's value. +func (s *GetBaiduChannelOutput) SetBaiduChannelResponse(v *BaiduChannelResponse) *GetBaiduChannelOutput { + s.BaiduChannelResponse = v return s } -// Export job resource. -type ExportJobResource struct { +type GetCampaignActivitiesInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of an IAM role that grants Amazon Pinpoint - // access to the Amazon S3 location that endpoints will be exported to. - RoleArn *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // A URL that points to the location within an Amazon S3 bucket that will receive - // the export. The location is typically a folder with multiple files.The URL - // should follow this format: s3://bucket-name/folder-name/Amazon Pinpoint will - // export endpoints to this location. - S3UrlPrefix *string `type:"string"` + // CampaignId is a required field + CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` - // The ID of the segment to export endpoints from. If not present, Amazon Pinpoint - // exports all of the endpoints that belong to the application. - SegmentId *string `type:"string"` + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` - // The version of the segment to export if specified. - SegmentVersion *int64 `type:"integer"` + Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s ExportJobResource) String() string { +func (s GetCampaignActivitiesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportJobResource) GoString() string { +func (s GetCampaignActivitiesInput) GoString() string { return s.String() } -// SetRoleArn sets the RoleArn field's value. -func (s *ExportJobResource) SetRoleArn(v string) *ExportJobResource { - s.RoleArn = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCampaignActivitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCampaignActivitiesInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.CampaignId == nil { + invalidParams.Add(request.NewErrParamRequired("CampaignId")) + } + if s.CampaignId != nil && len(*s.CampaignId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetS3UrlPrefix sets the S3UrlPrefix field's value. -func (s *ExportJobResource) SetS3UrlPrefix(v string) *ExportJobResource { - s.S3UrlPrefix = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetCampaignActivitiesInput) SetApplicationId(v string) *GetCampaignActivitiesInput { + s.ApplicationId = &v return s } -// SetSegmentId sets the SegmentId field's value. -func (s *ExportJobResource) SetSegmentId(v string) *ExportJobResource { - s.SegmentId = &v +// SetCampaignId sets the CampaignId field's value. +func (s *GetCampaignActivitiesInput) SetCampaignId(v string) *GetCampaignActivitiesInput { + s.CampaignId = &v return s } -// SetSegmentVersion sets the SegmentVersion field's value. -func (s *ExportJobResource) SetSegmentVersion(v int64) *ExportJobResource { - s.SegmentVersion = &v +// SetPageSize sets the PageSize field's value. +func (s *GetCampaignActivitiesInput) SetPageSize(v string) *GetCampaignActivitiesInput { + s.PageSize = &v return s } -// Export job response. -type ExportJobResponse struct { - _ struct{} `type:"structure"` +// SetToken sets the Token field's value. +func (s *GetCampaignActivitiesInput) SetToken(v string) *GetCampaignActivitiesInput { + s.Token = &v + return s +} - // The unique ID of the application associated with the export job. - ApplicationId *string `type:"string"` +type GetCampaignActivitiesOutput struct { + _ struct{} `type:"structure" payload:"ActivitiesResponse"` - // The number of pieces that have successfully completed as of the time of the - // request. - CompletedPieces *int64 `type:"integer"` + // Provides information about the activities that were performed by a campaign. + // + // ActivitiesResponse is a required field + ActivitiesResponse *ActivitiesResponse `type:"structure" required:"true"` +} - // The date the job completed in ISO 8601 format. - CompletionDate *string `type:"string"` +// String returns the string representation +func (s GetCampaignActivitiesOutput) String() string { + return awsutil.Prettify(s) +} - // The date the job was created in ISO 8601 format. - CreationDate *string `type:"string"` +// GoString returns the string representation +func (s GetCampaignActivitiesOutput) GoString() string { + return s.String() +} - // The export job settings. - Definition *ExportJobResource `type:"structure"` +// SetActivitiesResponse sets the ActivitiesResponse field's value. +func (s *GetCampaignActivitiesOutput) SetActivitiesResponse(v *ActivitiesResponse) *GetCampaignActivitiesOutput { + s.ActivitiesResponse = v + return s +} - // The number of pieces that failed to be processed as of the time of the request. - FailedPieces *int64 `type:"integer"` +type GetCampaignDateRangeKpiInput struct { + _ struct{} `type:"structure"` - // Provides up to 100 of the first failed entries for the job, if any exist. - Failures []*string `type:"list"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The unique ID of the job. - Id *string `type:"string"` + // CampaignId is a required field + CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` - // The status of the job.Valid values: CREATED, INITIALIZING, PROCESSING, COMPLETING, - // COMPLETED, FAILING, FAILEDThe job status is FAILED if one or more pieces - // failed. - JobStatus *string `type:"string" enum:"JobStatus"` + EndTime *time.Time `location:"querystring" locationName:"end-time" type:"timestamp" timestampFormat:"iso8601"` - // The number of endpoints that were not processed; for example, because of - // syntax errors. - TotalFailures *int64 `type:"integer"` + // KpiName is a required field + KpiName *string `location:"uri" locationName:"kpi-name" type:"string" required:"true"` - // The total number of pieces that must be processed to finish the job. Each - // piece is an approximately equal portion of the endpoints. - TotalPieces *int64 `type:"integer"` + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` - // The number of endpoints that were processed by the job. - TotalProcessed *int64 `type:"integer"` + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` - // The job type. Will be 'EXPORT'. - Type *string `type:"string"` + StartTime *time.Time `location:"querystring" locationName:"start-time" type:"timestamp" timestampFormat:"iso8601"` } // String returns the string representation -func (s ExportJobResponse) String() string { +func (s GetCampaignDateRangeKpiInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportJobResponse) GoString() string { +func (s GetCampaignDateRangeKpiInput) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *ExportJobResponse) SetApplicationId(v string) *ExportJobResponse { - s.ApplicationId = &v - return s -} - -// SetCompletedPieces sets the CompletedPieces field's value. -func (s *ExportJobResponse) SetCompletedPieces(v int64) *ExportJobResponse { - s.CompletedPieces = &v - return s -} - -// SetCompletionDate sets the CompletionDate field's value. -func (s *ExportJobResponse) SetCompletionDate(v string) *ExportJobResponse { - s.CompletionDate = &v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *ExportJobResponse) SetCreationDate(v string) *ExportJobResponse { - s.CreationDate = &v - return s -} - -// SetDefinition sets the Definition field's value. -func (s *ExportJobResponse) SetDefinition(v *ExportJobResource) *ExportJobResponse { - s.Definition = v - return s -} - -// SetFailedPieces sets the FailedPieces field's value. -func (s *ExportJobResponse) SetFailedPieces(v int64) *ExportJobResponse { - s.FailedPieces = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCampaignDateRangeKpiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCampaignDateRangeKpiInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.CampaignId == nil { + invalidParams.Add(request.NewErrParamRequired("CampaignId")) + } + if s.CampaignId != nil && len(*s.CampaignId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) + } + if s.KpiName == nil { + invalidParams.Add(request.NewErrParamRequired("KpiName")) + } + if s.KpiName != nil && len(*s.KpiName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KpiName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetFailures sets the Failures field's value. -func (s *ExportJobResponse) SetFailures(v []*string) *ExportJobResponse { - s.Failures = v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetCampaignDateRangeKpiInput) SetApplicationId(v string) *GetCampaignDateRangeKpiInput { + s.ApplicationId = &v return s } -// SetId sets the Id field's value. -func (s *ExportJobResponse) SetId(v string) *ExportJobResponse { - s.Id = &v +// SetCampaignId sets the CampaignId field's value. +func (s *GetCampaignDateRangeKpiInput) SetCampaignId(v string) *GetCampaignDateRangeKpiInput { + s.CampaignId = &v return s } -// SetJobStatus sets the JobStatus field's value. -func (s *ExportJobResponse) SetJobStatus(v string) *ExportJobResponse { - s.JobStatus = &v +// SetEndTime sets the EndTime field's value. +func (s *GetCampaignDateRangeKpiInput) SetEndTime(v time.Time) *GetCampaignDateRangeKpiInput { + s.EndTime = &v return s } -// SetTotalFailures sets the TotalFailures field's value. -func (s *ExportJobResponse) SetTotalFailures(v int64) *ExportJobResponse { - s.TotalFailures = &v +// SetKpiName sets the KpiName field's value. +func (s *GetCampaignDateRangeKpiInput) SetKpiName(v string) *GetCampaignDateRangeKpiInput { + s.KpiName = &v return s } -// SetTotalPieces sets the TotalPieces field's value. -func (s *ExportJobResponse) SetTotalPieces(v int64) *ExportJobResponse { - s.TotalPieces = &v +// SetNextToken sets the NextToken field's value. +func (s *GetCampaignDateRangeKpiInput) SetNextToken(v string) *GetCampaignDateRangeKpiInput { + s.NextToken = &v return s } -// SetTotalProcessed sets the TotalProcessed field's value. -func (s *ExportJobResponse) SetTotalProcessed(v int64) *ExportJobResponse { - s.TotalProcessed = &v +// SetPageSize sets the PageSize field's value. +func (s *GetCampaignDateRangeKpiInput) SetPageSize(v string) *GetCampaignDateRangeKpiInput { + s.PageSize = &v return s } -// SetType sets the Type field's value. -func (s *ExportJobResponse) SetType(v string) *ExportJobResponse { - s.Type = &v +// SetStartTime sets the StartTime field's value. +func (s *GetCampaignDateRangeKpiInput) SetStartTime(v time.Time) *GetCampaignDateRangeKpiInput { + s.StartTime = &v return s } -// Export job list. -type ExportJobsResponse struct { - _ struct{} `type:"structure"` - - // A list of export jobs for the application. - Item []*ExportJobResponse `type:"list"` +type GetCampaignDateRangeKpiOutput struct { + _ struct{} `type:"structure" payload:"CampaignDateRangeKpiResponse"` - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. - NextToken *string `type:"string"` + // Provides the results of a query that retrieved the data for a standard metric + // that applies to a campaign, and provides information about that query. + // + // CampaignDateRangeKpiResponse is a required field + CampaignDateRangeKpiResponse *CampaignDateRangeKpiResponse `type:"structure" required:"true"` } // String returns the string representation -func (s ExportJobsResponse) String() string { +func (s GetCampaignDateRangeKpiOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportJobsResponse) GoString() string { +func (s GetCampaignDateRangeKpiOutput) GoString() string { return s.String() } -// SetItem sets the Item field's value. -func (s *ExportJobsResponse) SetItem(v []*ExportJobResponse) *ExportJobsResponse { - s.Item = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ExportJobsResponse) SetNextToken(v string) *ExportJobsResponse { - s.NextToken = &v +// SetCampaignDateRangeKpiResponse sets the CampaignDateRangeKpiResponse field's value. +func (s *GetCampaignDateRangeKpiOutput) SetCampaignDateRangeKpiResponse(v *CampaignDateRangeKpiResponse) *GetCampaignDateRangeKpiOutput { + s.CampaignDateRangeKpiResponse = v return s } -// Google Cloud Messaging credentials -type GCMChannelRequest struct { +type GetCampaignInput struct { _ struct{} `type:"structure"` - // Platform credential API key from Google. - ApiKey *string `type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` + // CampaignId is a required field + CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` } // String returns the string representation -func (s GCMChannelRequest) String() string { +func (s GetCampaignInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GCMChannelRequest) GoString() string { +func (s GetCampaignInput) GoString() string { return s.String() } -// SetApiKey sets the ApiKey field's value. -func (s *GCMChannelRequest) SetApiKey(v string) *GCMChannelRequest { - s.ApiKey = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCampaignInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCampaignInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.CampaignId == nil { + invalidParams.Add(request.NewErrParamRequired("CampaignId")) + } + if s.CampaignId != nil && len(*s.CampaignId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetEnabled sets the Enabled field's value. -func (s *GCMChannelRequest) SetEnabled(v bool) *GCMChannelRequest { - s.Enabled = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetCampaignInput) SetApplicationId(v string) *GetCampaignInput { + s.ApplicationId = &v return s } -// Google Cloud Messaging channel definition -type GCMChannelResponse struct { - _ struct{} `type:"structure"` - - // The ID of the application to which the channel applies. - ApplicationId *string `type:"string"` - - // When was this segment created - CreationDate *string `type:"string"` - - // The GCM API key from Google. - Credential *string `type:"string"` - - // If the channel is enabled for sending messages. - Enabled *bool `type:"boolean"` - - // Not used. Retained for backwards compatibility. - HasCredential *bool `type:"boolean"` - - // Channel ID. Not used. Present only for backwards compatibility. - Id *string `type:"string"` - - // Is this channel archived - IsArchived *bool `type:"boolean"` - - // Who last updated this entry - LastModifiedBy *string `type:"string"` - - // Last date this was updated - LastModifiedDate *string `type:"string"` +// SetCampaignId sets the CampaignId field's value. +func (s *GetCampaignInput) SetCampaignId(v string) *GetCampaignInput { + s.CampaignId = &v + return s +} - // The platform type. Will be GCM - Platform *string `type:"string"` +type GetCampaignOutput struct { + _ struct{} `type:"structure" payload:"CampaignResponse"` - // Version of channel - Version *int64 `type:"integer"` + // Provides information about the status, configuration, and other settings + // for a campaign. + // + // CampaignResponse is a required field + CampaignResponse *CampaignResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GCMChannelResponse) String() string { +func (s GetCampaignOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GCMChannelResponse) GoString() string { +func (s GetCampaignOutput) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *GCMChannelResponse) SetApplicationId(v string) *GCMChannelResponse { - s.ApplicationId = &v +// SetCampaignResponse sets the CampaignResponse field's value. +func (s *GetCampaignOutput) SetCampaignResponse(v *CampaignResponse) *GetCampaignOutput { + s.CampaignResponse = v return s } -// SetCreationDate sets the CreationDate field's value. -func (s *GCMChannelResponse) SetCreationDate(v string) *GCMChannelResponse { - s.CreationDate = &v - return s -} +type GetCampaignVersionInput struct { + _ struct{} `type:"structure"` -// SetCredential sets the Credential field's value. -func (s *GCMChannelResponse) SetCredential(v string) *GCMChannelResponse { - s.Credential = &v - return s -} + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` -// SetEnabled sets the Enabled field's value. -func (s *GCMChannelResponse) SetEnabled(v bool) *GCMChannelResponse { - s.Enabled = &v - return s -} + // CampaignId is a required field + CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` -// SetHasCredential sets the HasCredential field's value. -func (s *GCMChannelResponse) SetHasCredential(v bool) *GCMChannelResponse { - s.HasCredential = &v - return s + // Version is a required field + Version *string `location:"uri" locationName:"version" type:"string" required:"true"` } -// SetId sets the Id field's value. -func (s *GCMChannelResponse) SetId(v string) *GCMChannelResponse { - s.Id = &v - return s +// String returns the string representation +func (s GetCampaignVersionInput) String() string { + return awsutil.Prettify(s) } -// SetIsArchived sets the IsArchived field's value. -func (s *GCMChannelResponse) SetIsArchived(v bool) *GCMChannelResponse { - s.IsArchived = &v - return s +// GoString returns the string representation +func (s GetCampaignVersionInput) GoString() string { + return s.String() } -// SetLastModifiedBy sets the LastModifiedBy field's value. -func (s *GCMChannelResponse) SetLastModifiedBy(v string) *GCMChannelResponse { - s.LastModifiedBy = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCampaignVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCampaignVersionInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.CampaignId == nil { + invalidParams.Add(request.NewErrParamRequired("CampaignId")) + } + if s.CampaignId != nil && len(*s.CampaignId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastModifiedDate sets the LastModifiedDate field's value. -func (s *GCMChannelResponse) SetLastModifiedDate(v string) *GCMChannelResponse { - s.LastModifiedDate = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetCampaignVersionInput) SetApplicationId(v string) *GetCampaignVersionInput { + s.ApplicationId = &v return s } -// SetPlatform sets the Platform field's value. -func (s *GCMChannelResponse) SetPlatform(v string) *GCMChannelResponse { - s.Platform = &v +// SetCampaignId sets the CampaignId field's value. +func (s *GetCampaignVersionInput) SetCampaignId(v string) *GetCampaignVersionInput { + s.CampaignId = &v return s } // SetVersion sets the Version field's value. -func (s *GCMChannelResponse) SetVersion(v int64) *GCMChannelResponse { +func (s *GetCampaignVersionInput) SetVersion(v string) *GetCampaignVersionInput { s.Version = &v return s } -// GCM Message. -type GCMMessage struct { - _ struct{} `type:"structure"` - - // The action that occurs if the user taps a push notification delivered by - // the campaign: OPEN_APP - Your app launches, or it becomes the foreground - // app if it has been sent to the background. This is the default action. DEEP_LINK - // - Uses deep linking features in iOS and Android to open your app and display - // a designated user interface within the app. URL - The default mobile browser - // on the user's device launches and opens a web page at the URL you specify. - // Possible values include: OPEN_APP | DEEP_LINK | URL - Action *string `type:"string" enum:"Action"` - - // The message body of the notification. - Body *string `type:"string"` - - // This parameter identifies a group of messages (e.g., with collapse_key: "Updates - // Available") that can be collapsed, so that only the last message gets sent - // when delivery can be resumed. This is intended to avoid sending too many - // of the same messages when the device comes back online or becomes active. - CollapseKey *string `type:"string"` - - // The data payload used for a silent push. This payload is added to the notifications' - // data.pinpoint.jsonBody' object - Data map[string]*string `type:"map"` - - // The icon image name of the asset saved in your application. - IconReference *string `type:"string"` - - // The URL that points to an image used as the large icon to the notification - // content view. - ImageIconUrl *string `type:"string"` - - // The URL that points to an image used in the push notification. - ImageUrl *string `type:"string"` - - // The message priority. Amazon Pinpoint uses this value to set the FCM or GCM - // priority parameter when it sends the message. Accepts the following values:"Normal" - // - Messages might be delayed. Delivery is optimized for battery usage on the - // receiving device. Use normal priority unless immediate delivery is required."High" - // - Messages are sent immediately and might wake a sleeping device.The equivalent - // values for APNs messages are "5" and "10". Amazon Pinpoint accepts these - // values here and converts them.For more information, see About FCM Messages - // in the Firebase documentation. - Priority *string `type:"string"` - - // The Raw JSON formatted string to be used as the payload. This value overrides - // the message. - RawContent *string `type:"string"` - - // This parameter specifies the package name of the application where the registration - // tokens must match in order to receive the message. - RestrictedPackageName *string `type:"string"` - - // Indicates if the message should display on the users device. Silent pushes - // can be used for Remote Configuration and Phone Home use cases. - SilentPush *bool `type:"boolean"` - - // The URL that points to an image used as the small icon for the notification - // which will be used to represent the notification in the status bar and content - // view - SmallImageIconUrl *string `type:"string"` - - // Indicates a sound to play when the device receives the notification. Supports - // default, or the filename of a sound resource bundled in the app. Android - // sound files must reside in /res/raw/ - Sound *string `type:"string"` - - // Default message substitutions. Can be overridden by individual address substitutions. - Substitutions map[string][]*string `type:"map"` - - // The length of time (in seconds) that FCM or GCM stores and attempts to deliver - // the message. If unspecified, the value defaults to the maximum, which is - // 2,419,200 seconds (28 days). Amazon Pinpoint uses this value to set the FCM - // or GCM time_to_live parameter. - TimeToLive *int64 `type:"integer"` - - // The message title that displays above the message on the user's device. - Title *string `type:"string"` +type GetCampaignVersionOutput struct { + _ struct{} `type:"structure" payload:"CampaignResponse"` - // The URL to open in the user's mobile browser. Used if the value for Action - // is URL. - Url *string `type:"string"` + // Provides information about the status, configuration, and other settings + // for a campaign. + // + // CampaignResponse is a required field + CampaignResponse *CampaignResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GCMMessage) String() string { +func (s GetCampaignVersionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GCMMessage) GoString() string { +func (s GetCampaignVersionOutput) GoString() string { return s.String() } -// SetAction sets the Action field's value. -func (s *GCMMessage) SetAction(v string) *GCMMessage { - s.Action = &v +// SetCampaignResponse sets the CampaignResponse field's value. +func (s *GetCampaignVersionOutput) SetCampaignResponse(v *CampaignResponse) *GetCampaignVersionOutput { + s.CampaignResponse = v return s } -// SetBody sets the Body field's value. -func (s *GCMMessage) SetBody(v string) *GCMMessage { - s.Body = &v - return s -} +type GetCampaignVersionsInput struct { + _ struct{} `type:"structure"` -// SetCollapseKey sets the CollapseKey field's value. -func (s *GCMMessage) SetCollapseKey(v string) *GCMMessage { - s.CollapseKey = &v - return s -} + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` -// SetData sets the Data field's value. -func (s *GCMMessage) SetData(v map[string]*string) *GCMMessage { - s.Data = v - return s -} + // CampaignId is a required field + CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` -// SetIconReference sets the IconReference field's value. -func (s *GCMMessage) SetIconReference(v string) *GCMMessage { - s.IconReference = &v - return s -} + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` -// SetImageIconUrl sets the ImageIconUrl field's value. -func (s *GCMMessage) SetImageIconUrl(v string) *GCMMessage { - s.ImageIconUrl = &v - return s + Token *string `location:"querystring" locationName:"token" type:"string"` } -// SetImageUrl sets the ImageUrl field's value. -func (s *GCMMessage) SetImageUrl(v string) *GCMMessage { - s.ImageUrl = &v - return s +// String returns the string representation +func (s GetCampaignVersionsInput) String() string { + return awsutil.Prettify(s) } -// SetPriority sets the Priority field's value. -func (s *GCMMessage) SetPriority(v string) *GCMMessage { - s.Priority = &v - return s +// GoString returns the string representation +func (s GetCampaignVersionsInput) GoString() string { + return s.String() } -// SetRawContent sets the RawContent field's value. -func (s *GCMMessage) SetRawContent(v string) *GCMMessage { - s.RawContent = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCampaignVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCampaignVersionsInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.CampaignId == nil { + invalidParams.Add(request.NewErrParamRequired("CampaignId")) + } + if s.CampaignId != nil && len(*s.CampaignId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetRestrictedPackageName sets the RestrictedPackageName field's value. -func (s *GCMMessage) SetRestrictedPackageName(v string) *GCMMessage { - s.RestrictedPackageName = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetCampaignVersionsInput) SetApplicationId(v string) *GetCampaignVersionsInput { + s.ApplicationId = &v return s } -// SetSilentPush sets the SilentPush field's value. -func (s *GCMMessage) SetSilentPush(v bool) *GCMMessage { - s.SilentPush = &v +// SetCampaignId sets the CampaignId field's value. +func (s *GetCampaignVersionsInput) SetCampaignId(v string) *GetCampaignVersionsInput { + s.CampaignId = &v return s } -// SetSmallImageIconUrl sets the SmallImageIconUrl field's value. -func (s *GCMMessage) SetSmallImageIconUrl(v string) *GCMMessage { - s.SmallImageIconUrl = &v +// SetPageSize sets the PageSize field's value. +func (s *GetCampaignVersionsInput) SetPageSize(v string) *GetCampaignVersionsInput { + s.PageSize = &v return s } -// SetSound sets the Sound field's value. -func (s *GCMMessage) SetSound(v string) *GCMMessage { - s.Sound = &v +// SetToken sets the Token field's value. +func (s *GetCampaignVersionsInput) SetToken(v string) *GetCampaignVersionsInput { + s.Token = &v return s } -// SetSubstitutions sets the Substitutions field's value. -func (s *GCMMessage) SetSubstitutions(v map[string][]*string) *GCMMessage { - s.Substitutions = v - return s +type GetCampaignVersionsOutput struct { + _ struct{} `type:"structure" payload:"CampaignsResponse"` + + // Provides information about the configuration and other settings for all the + // campaigns that are associated with an application. + // + // CampaignsResponse is a required field + CampaignsResponse *CampaignsResponse `type:"structure" required:"true"` } -// SetTimeToLive sets the TimeToLive field's value. -func (s *GCMMessage) SetTimeToLive(v int64) *GCMMessage { - s.TimeToLive = &v - return s +// String returns the string representation +func (s GetCampaignVersionsOutput) String() string { + return awsutil.Prettify(s) } -// SetTitle sets the Title field's value. -func (s *GCMMessage) SetTitle(v string) *GCMMessage { - s.Title = &v - return s +// GoString returns the string representation +func (s GetCampaignVersionsOutput) GoString() string { + return s.String() } -// SetUrl sets the Url field's value. -func (s *GCMMessage) SetUrl(v string) *GCMMessage { - s.Url = &v +// SetCampaignsResponse sets the CampaignsResponse field's value. +func (s *GetCampaignVersionsOutput) SetCampaignsResponse(v *CampaignsResponse) *GetCampaignVersionsOutput { + s.CampaignsResponse = v return s } -// GPS coordinates -type GPSCoordinates struct { +type GetCampaignsInput struct { _ struct{} `type:"structure"` - // Latitude - Latitude *float64 `type:"double"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Longitude - Longitude *float64 `type:"double"` + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + + Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s GPSCoordinates) String() string { +func (s GetCampaignsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GPSCoordinates) GoString() string { +func (s GetCampaignsInput) GoString() string { return s.String() } -// SetLatitude sets the Latitude field's value. -func (s *GPSCoordinates) SetLatitude(v float64) *GPSCoordinates { - s.Latitude = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCampaignsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCampaignsInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *GetCampaignsInput) SetApplicationId(v string) *GetCampaignsInput { + s.ApplicationId = &v return s } -// SetLongitude sets the Longitude field's value. -func (s *GPSCoordinates) SetLongitude(v float64) *GPSCoordinates { - s.Longitude = &v +// SetPageSize sets the PageSize field's value. +func (s *GetCampaignsInput) SetPageSize(v string) *GetCampaignsInput { + s.PageSize = &v return s } -// GPS point location dimension -type GPSPointDimension struct { - _ struct{} `type:"structure"` +// SetToken sets the Token field's value. +func (s *GetCampaignsInput) SetToken(v string) *GetCampaignsInput { + s.Token = &v + return s +} - // Coordinate to measure distance from. - Coordinates *GPSCoordinates `type:"structure"` +type GetCampaignsOutput struct { + _ struct{} `type:"structure" payload:"CampaignsResponse"` - // Range in kilometers from the coordinate. - RangeInKilometers *float64 `type:"double"` + // Provides information about the configuration and other settings for all the + // campaigns that are associated with an application. + // + // CampaignsResponse is a required field + CampaignsResponse *CampaignsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GPSPointDimension) String() string { +func (s GetCampaignsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GPSPointDimension) GoString() string { +func (s GetCampaignsOutput) GoString() string { return s.String() } -// SetCoordinates sets the Coordinates field's value. -func (s *GPSPointDimension) SetCoordinates(v *GPSCoordinates) *GPSPointDimension { - s.Coordinates = v - return s -} - -// SetRangeInKilometers sets the RangeInKilometers field's value. -func (s *GPSPointDimension) SetRangeInKilometers(v float64) *GPSPointDimension { - s.RangeInKilometers = &v +// SetCampaignsResponse sets the CampaignsResponse field's value. +func (s *GetCampaignsOutput) SetCampaignsResponse(v *CampaignsResponse) *GetCampaignsOutput { + s.CampaignsResponse = v return s } -type GetAdmChannelInput struct { +type GetChannelsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field @@ -13901,18 +20000,18 @@ type GetAdmChannelInput struct { } // String returns the string representation -func (s GetAdmChannelInput) String() string { +func (s GetChannelsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAdmChannelInput) GoString() string { +func (s GetChannelsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetAdmChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAdmChannelInput"} +func (s *GetChannelsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetChannelsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } @@ -13927,37 +20026,38 @@ func (s *GetAdmChannelInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetAdmChannelInput) SetApplicationId(v string) *GetAdmChannelInput { +func (s *GetChannelsInput) SetApplicationId(v string) *GetChannelsInput { s.ApplicationId = &v return s } -type GetAdmChannelOutput struct { - _ struct{} `type:"structure" payload:"ADMChannelResponse"` +type GetChannelsOutput struct { + _ struct{} `type:"structure" payload:"ChannelsResponse"` - // Amazon Device Messaging channel definition. + // Provides information about the general settings and status of all channels + // for an application, including channels that aren't enabled for the application. // - // ADMChannelResponse is a required field - ADMChannelResponse *ADMChannelResponse `type:"structure" required:"true"` + // ChannelsResponse is a required field + ChannelsResponse *ChannelsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetAdmChannelOutput) String() string { +func (s GetChannelsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAdmChannelOutput) GoString() string { +func (s GetChannelsOutput) GoString() string { return s.String() } -// SetADMChannelResponse sets the ADMChannelResponse field's value. -func (s *GetAdmChannelOutput) SetADMChannelResponse(v *ADMChannelResponse) *GetAdmChannelOutput { - s.ADMChannelResponse = v +// SetChannelsResponse sets the ChannelsResponse field's value. +func (s *GetChannelsOutput) SetChannelsResponse(v *ChannelsResponse) *GetChannelsOutput { + s.ChannelsResponse = v return s } -type GetApnsChannelInput struct { +type GetEmailChannelInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field @@ -13965,18 +20065,18 @@ type GetApnsChannelInput struct { } // String returns the string representation -func (s GetApnsChannelInput) String() string { +func (s GetEmailChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApnsChannelInput) GoString() string { +func (s GetEmailChannelInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetApnsChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetApnsChannelInput"} +func (s *GetEmailChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetEmailChannelInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } @@ -13991,61 +20091,62 @@ func (s *GetApnsChannelInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetApnsChannelInput) SetApplicationId(v string) *GetApnsChannelInput { +func (s *GetEmailChannelInput) SetApplicationId(v string) *GetEmailChannelInput { s.ApplicationId = &v return s } -type GetApnsChannelOutput struct { - _ struct{} `type:"structure" payload:"APNSChannelResponse"` +type GetEmailChannelOutput struct { + _ struct{} `type:"structure" payload:"EmailChannelResponse"` - // Apple Distribution Push Notification Service channel definition. + // Provides information about the status and settings of the email channel for + // an application. // - // APNSChannelResponse is a required field - APNSChannelResponse *APNSChannelResponse `type:"structure" required:"true"` + // EmailChannelResponse is a required field + EmailChannelResponse *EmailChannelResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetApnsChannelOutput) String() string { +func (s GetEmailChannelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApnsChannelOutput) GoString() string { +func (s GetEmailChannelOutput) GoString() string { return s.String() } -// SetAPNSChannelResponse sets the APNSChannelResponse field's value. -func (s *GetApnsChannelOutput) SetAPNSChannelResponse(v *APNSChannelResponse) *GetApnsChannelOutput { - s.APNSChannelResponse = v +// SetEmailChannelResponse sets the EmailChannelResponse field's value. +func (s *GetEmailChannelOutput) SetEmailChannelResponse(v *EmailChannelResponse) *GetEmailChannelOutput { + s.EmailChannelResponse = v return s } -type GetApnsSandboxChannelInput struct { +type GetEmailTemplateInput struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` } // String returns the string representation -func (s GetApnsSandboxChannelInput) String() string { +func (s GetEmailTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApnsSandboxChannelInput) GoString() string { +func (s GetEmailTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetApnsSandboxChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetApnsSandboxChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) +func (s *GetEmailTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetEmailTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) } if invalidParams.Len() > 0 { @@ -14054,63 +20155,73 @@ func (s *GetApnsSandboxChannelInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *GetApnsSandboxChannelInput) SetApplicationId(v string) *GetApnsSandboxChannelInput { - s.ApplicationId = &v +// SetTemplateName sets the TemplateName field's value. +func (s *GetEmailTemplateInput) SetTemplateName(v string) *GetEmailTemplateInput { + s.TemplateName = &v return s } -type GetApnsSandboxChannelOutput struct { - _ struct{} `type:"structure" payload:"APNSSandboxChannelResponse"` +type GetEmailTemplateOutput struct { + _ struct{} `type:"structure" payload:"EmailTemplateResponse"` - // Apple Development Push Notification Service channel definition. + // Provides information about the content and settings for a message template + // that can be used in messages that are sent through the email channel. // - // APNSSandboxChannelResponse is a required field - APNSSandboxChannelResponse *APNSSandboxChannelResponse `type:"structure" required:"true"` + // EmailTemplateResponse is a required field + EmailTemplateResponse *EmailTemplateResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetApnsSandboxChannelOutput) String() string { +func (s GetEmailTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApnsSandboxChannelOutput) GoString() string { +func (s GetEmailTemplateOutput) GoString() string { return s.String() } -// SetAPNSSandboxChannelResponse sets the APNSSandboxChannelResponse field's value. -func (s *GetApnsSandboxChannelOutput) SetAPNSSandboxChannelResponse(v *APNSSandboxChannelResponse) *GetApnsSandboxChannelOutput { - s.APNSSandboxChannelResponse = v +// SetEmailTemplateResponse sets the EmailTemplateResponse field's value. +func (s *GetEmailTemplateOutput) SetEmailTemplateResponse(v *EmailTemplateResponse) *GetEmailTemplateOutput { + s.EmailTemplateResponse = v return s } -type GetApnsVoipChannelInput struct { +type GetEndpointInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + // EndpointId is a required field + EndpointId *string `location:"uri" locationName:"endpoint-id" type:"string" required:"true"` } // String returns the string representation -func (s GetApnsVoipChannelInput) String() string { +func (s GetEndpointInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApnsVoipChannelInput) GoString() string { +func (s GetEndpointInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetApnsVoipChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetApnsVoipChannelInput"} +func (s *GetEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetEndpointInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } + if s.EndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointId")) + } + if s.EndpointId != nil && len(*s.EndpointId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EndpointId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14119,37 +20230,43 @@ func (s *GetApnsVoipChannelInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetApnsVoipChannelInput) SetApplicationId(v string) *GetApnsVoipChannelInput { +func (s *GetEndpointInput) SetApplicationId(v string) *GetEndpointInput { s.ApplicationId = &v return s } -type GetApnsVoipChannelOutput struct { - _ struct{} `type:"structure" payload:"APNSVoipChannelResponse"` +// SetEndpointId sets the EndpointId field's value. +func (s *GetEndpointInput) SetEndpointId(v string) *GetEndpointInput { + s.EndpointId = &v + return s +} + +type GetEndpointOutput struct { + _ struct{} `type:"structure" payload:"EndpointResponse"` - // Apple VoIP Push Notification Service channel definition. + // Provides information about the channel type and other settings for an endpoint. // - // APNSVoipChannelResponse is a required field - APNSVoipChannelResponse *APNSVoipChannelResponse `type:"structure" required:"true"` + // EndpointResponse is a required field + EndpointResponse *EndpointResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetApnsVoipChannelOutput) String() string { +func (s GetEndpointOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApnsVoipChannelOutput) GoString() string { +func (s GetEndpointOutput) GoString() string { return s.String() } -// SetAPNSVoipChannelResponse sets the APNSVoipChannelResponse field's value. -func (s *GetApnsVoipChannelOutput) SetAPNSVoipChannelResponse(v *APNSVoipChannelResponse) *GetApnsVoipChannelOutput { - s.APNSVoipChannelResponse = v +// SetEndpointResponse sets the EndpointResponse field's value. +func (s *GetEndpointOutput) SetEndpointResponse(v *EndpointResponse) *GetEndpointOutput { + s.EndpointResponse = v return s } -type GetApnsVoipSandboxChannelInput struct { +type GetEventStreamInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field @@ -14157,18 +20274,18 @@ type GetApnsVoipSandboxChannelInput struct { } // String returns the string representation -func (s GetApnsVoipSandboxChannelInput) String() string { +func (s GetEventStreamInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApnsVoipSandboxChannelInput) GoString() string { +func (s GetEventStreamInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetApnsVoipSandboxChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetApnsVoipSandboxChannelInput"} +func (s *GetEventStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetEventStreamInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } @@ -14183,62 +20300,72 @@ func (s *GetApnsVoipSandboxChannelInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetApnsVoipSandboxChannelInput) SetApplicationId(v string) *GetApnsVoipSandboxChannelInput { +func (s *GetEventStreamInput) SetApplicationId(v string) *GetEventStreamInput { s.ApplicationId = &v return s } -type GetApnsVoipSandboxChannelOutput struct { - _ struct{} `type:"structure" payload:"APNSVoipSandboxChannelResponse"` +type GetEventStreamOutput struct { + _ struct{} `type:"structure" payload:"EventStream"` - // Apple VoIP Developer Push Notification Service channel definition. + // Specifies settings for publishing event data to an Amazon Kinesis data stream + // or an Amazon Kinesis Data Firehose delivery stream. // - // APNSVoipSandboxChannelResponse is a required field - APNSVoipSandboxChannelResponse *APNSVoipSandboxChannelResponse `type:"structure" required:"true"` + // EventStream is a required field + EventStream *EventStream `type:"structure" required:"true"` } // String returns the string representation -func (s GetApnsVoipSandboxChannelOutput) String() string { +func (s GetEventStreamOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApnsVoipSandboxChannelOutput) GoString() string { +func (s GetEventStreamOutput) GoString() string { return s.String() } -// SetAPNSVoipSandboxChannelResponse sets the APNSVoipSandboxChannelResponse field's value. -func (s *GetApnsVoipSandboxChannelOutput) SetAPNSVoipSandboxChannelResponse(v *APNSVoipSandboxChannelResponse) *GetApnsVoipSandboxChannelOutput { - s.APNSVoipSandboxChannelResponse = v +// SetEventStream sets the EventStream field's value. +func (s *GetEventStreamOutput) SetEventStream(v *EventStream) *GetEventStreamOutput { + s.EventStream = v return s } -type GetAppInput struct { +type GetExportJobInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + // JobId is a required field + JobId *string `location:"uri" locationName:"job-id" type:"string" required:"true"` } // String returns the string representation -func (s GetAppInput) String() string { +func (s GetExportJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAppInput) GoString() string { +func (s GetExportJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetAppInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAppInput"} +func (s *GetExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetExportJobInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14247,56 +20374,69 @@ func (s *GetAppInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetAppInput) SetApplicationId(v string) *GetAppInput { +func (s *GetExportJobInput) SetApplicationId(v string) *GetExportJobInput { s.ApplicationId = &v return s } -type GetAppOutput struct { - _ struct{} `type:"structure" payload:"ApplicationResponse"` +// SetJobId sets the JobId field's value. +func (s *GetExportJobInput) SetJobId(v string) *GetExportJobInput { + s.JobId = &v + return s +} + +type GetExportJobOutput struct { + _ struct{} `type:"structure" payload:"ExportJobResponse"` - // Application Response. + // Provides information about the status and settings of a job that exports + // endpoint definitions to a file. The file can be added directly to an Amazon + // Simple Storage Service (Amazon S3) bucket by using the Amazon Pinpoint API + // or downloaded directly to a computer by using the Amazon Pinpoint console. // - // ApplicationResponse is a required field - ApplicationResponse *ApplicationResponse `type:"structure" required:"true"` + // ExportJobResponse is a required field + ExportJobResponse *ExportJobResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetAppOutput) String() string { +func (s GetExportJobOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAppOutput) GoString() string { +func (s GetExportJobOutput) GoString() string { return s.String() } -// SetApplicationResponse sets the ApplicationResponse field's value. -func (s *GetAppOutput) SetApplicationResponse(v *ApplicationResponse) *GetAppOutput { - s.ApplicationResponse = v +// SetExportJobResponse sets the ExportJobResponse field's value. +func (s *GetExportJobOutput) SetExportJobResponse(v *ExportJobResponse) *GetExportJobOutput { + s.ExportJobResponse = v return s } -type GetApplicationSettingsInput struct { +type GetExportJobsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + + Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s GetApplicationSettingsInput) String() string { +func (s GetExportJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApplicationSettingsInput) GoString() string { +func (s GetExportJobsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetApplicationSettingsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetApplicationSettingsInput"} +func (s *GetExportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetExportJobsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } @@ -14311,117 +20451,152 @@ func (s *GetApplicationSettingsInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetApplicationSettingsInput) SetApplicationId(v string) *GetApplicationSettingsInput { +func (s *GetExportJobsInput) SetApplicationId(v string) *GetExportJobsInput { s.ApplicationId = &v return s } -type GetApplicationSettingsOutput struct { - _ struct{} `type:"structure" payload:"ApplicationSettingsResource"` +// SetPageSize sets the PageSize field's value. +func (s *GetExportJobsInput) SetPageSize(v string) *GetExportJobsInput { + s.PageSize = &v + return s +} + +// SetToken sets the Token field's value. +func (s *GetExportJobsInput) SetToken(v string) *GetExportJobsInput { + s.Token = &v + return s +} + +type GetExportJobsOutput struct { + _ struct{} `type:"structure" payload:"ExportJobsResponse"` - // Application settings. + // Provides information about all the export jobs that are associated with an + // application or segment. An export job is a job that exports endpoint definitions + // to a file. // - // ApplicationSettingsResource is a required field - ApplicationSettingsResource *ApplicationSettingsResource `type:"structure" required:"true"` + // ExportJobsResponse is a required field + ExportJobsResponse *ExportJobsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetApplicationSettingsOutput) String() string { +func (s GetExportJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApplicationSettingsOutput) GoString() string { +func (s GetExportJobsOutput) GoString() string { return s.String() } -// SetApplicationSettingsResource sets the ApplicationSettingsResource field's value. -func (s *GetApplicationSettingsOutput) SetApplicationSettingsResource(v *ApplicationSettingsResource) *GetApplicationSettingsOutput { - s.ApplicationSettingsResource = v +// SetExportJobsResponse sets the ExportJobsResponse field's value. +func (s *GetExportJobsOutput) SetExportJobsResponse(v *ExportJobsResponse) *GetExportJobsOutput { + s.ExportJobsResponse = v return s } -type GetAppsInput struct { +type GetGcmChannelInput struct { _ struct{} `type:"structure"` - PageSize *string `location:"querystring" locationName:"page-size" type:"string"` - - Token *string `location:"querystring" locationName:"token" type:"string"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` } // String returns the string representation -func (s GetAppsInput) String() string { +func (s GetGcmChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAppsInput) GoString() string { +func (s GetGcmChannelInput) GoString() string { return s.String() } -// SetPageSize sets the PageSize field's value. -func (s *GetAppsInput) SetPageSize(v string) *GetAppsInput { - s.PageSize = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGcmChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGcmChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetToken sets the Token field's value. -func (s *GetAppsInput) SetToken(v string) *GetAppsInput { - s.Token = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetGcmChannelInput) SetApplicationId(v string) *GetGcmChannelInput { + s.ApplicationId = &v return s } -type GetAppsOutput struct { - _ struct{} `type:"structure" payload:"ApplicationsResponse"` +type GetGcmChannelOutput struct { + _ struct{} `type:"structure" payload:"GCMChannelResponse"` - // Get Applications Result. + // Provides information about the status and settings of the GCM channel for + // an application. The GCM channel enables Amazon Pinpoint to send push notifications + // through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging + // (GCM), service. // - // ApplicationsResponse is a required field - ApplicationsResponse *ApplicationsResponse `type:"structure" required:"true"` + // GCMChannelResponse is a required field + GCMChannelResponse *GCMChannelResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetAppsOutput) String() string { +func (s GetGcmChannelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAppsOutput) GoString() string { +func (s GetGcmChannelOutput) GoString() string { return s.String() -} - -// SetApplicationsResponse sets the ApplicationsResponse field's value. -func (s *GetAppsOutput) SetApplicationsResponse(v *ApplicationsResponse) *GetAppsOutput { - s.ApplicationsResponse = v +} + +// SetGCMChannelResponse sets the GCMChannelResponse field's value. +func (s *GetGcmChannelOutput) SetGCMChannelResponse(v *GCMChannelResponse) *GetGcmChannelOutput { + s.GCMChannelResponse = v return s } -type GetBaiduChannelInput struct { +type GetImportJobInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + // JobId is a required field + JobId *string `location:"uri" locationName:"job-id" type:"string" required:"true"` } // String returns the string representation -func (s GetBaiduChannelInput) String() string { +func (s GetImportJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBaiduChannelInput) GoString() string { +func (s GetImportJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBaiduChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBaiduChannelInput"} +func (s *GetImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetImportJobInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14430,75 +20605,75 @@ func (s *GetBaiduChannelInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetBaiduChannelInput) SetApplicationId(v string) *GetBaiduChannelInput { +func (s *GetImportJobInput) SetApplicationId(v string) *GetImportJobInput { s.ApplicationId = &v return s } -type GetBaiduChannelOutput struct { - _ struct{} `type:"structure" payload:"BaiduChannelResponse"` +// SetJobId sets the JobId field's value. +func (s *GetImportJobInput) SetJobId(v string) *GetImportJobInput { + s.JobId = &v + return s +} + +type GetImportJobOutput struct { + _ struct{} `type:"structure" payload:"ImportJobResponse"` - // Baidu Cloud Messaging channel definition + // Provides information about the status and settings of a job that imports + // endpoint definitions from one or more files. The files can be stored in an + // Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from + // a computer by using the Amazon Pinpoint console. // - // BaiduChannelResponse is a required field - BaiduChannelResponse *BaiduChannelResponse `type:"structure" required:"true"` + // ImportJobResponse is a required field + ImportJobResponse *ImportJobResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetBaiduChannelOutput) String() string { +func (s GetImportJobOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBaiduChannelOutput) GoString() string { +func (s GetImportJobOutput) GoString() string { return s.String() } -// SetBaiduChannelResponse sets the BaiduChannelResponse field's value. -func (s *GetBaiduChannelOutput) SetBaiduChannelResponse(v *BaiduChannelResponse) *GetBaiduChannelOutput { - s.BaiduChannelResponse = v +// SetImportJobResponse sets the ImportJobResponse field's value. +func (s *GetImportJobOutput) SetImportJobResponse(v *ImportJobResponse) *GetImportJobOutput { + s.ImportJobResponse = v return s } -type GetCampaignActivitiesInput struct { +type GetImportJobsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // CampaignId is a required field - CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` - PageSize *string `location:"querystring" locationName:"page-size" type:"string"` Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s GetCampaignActivitiesInput) String() string { +func (s GetImportJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignActivitiesInput) GoString() string { +func (s GetImportJobsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCampaignActivitiesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCampaignActivitiesInput"} +func (s *GetImportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetImportJobsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } - if s.CampaignId == nil { - invalidParams.Add(request.NewErrParamRequired("CampaignId")) - } - if s.CampaignId != nil && len(*s.CampaignId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -14507,88 +20682,101 @@ func (s *GetCampaignActivitiesInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetCampaignActivitiesInput) SetApplicationId(v string) *GetCampaignActivitiesInput { +func (s *GetImportJobsInput) SetApplicationId(v string) *GetImportJobsInput { s.ApplicationId = &v return s } -// SetCampaignId sets the CampaignId field's value. -func (s *GetCampaignActivitiesInput) SetCampaignId(v string) *GetCampaignActivitiesInput { - s.CampaignId = &v - return s -} - // SetPageSize sets the PageSize field's value. -func (s *GetCampaignActivitiesInput) SetPageSize(v string) *GetCampaignActivitiesInput { +func (s *GetImportJobsInput) SetPageSize(v string) *GetImportJobsInput { s.PageSize = &v return s } // SetToken sets the Token field's value. -func (s *GetCampaignActivitiesInput) SetToken(v string) *GetCampaignActivitiesInput { +func (s *GetImportJobsInput) SetToken(v string) *GetImportJobsInput { s.Token = &v return s } -type GetCampaignActivitiesOutput struct { - _ struct{} `type:"structure" payload:"ActivitiesResponse"` +type GetImportJobsOutput struct { + _ struct{} `type:"structure" payload:"ImportJobsResponse"` - // Activities for campaign. + // Provides information about the status and settings of all the import jobs + // that are associated with an application or segment. An import job is a job + // that imports endpoint definitions from one or more files. // - // ActivitiesResponse is a required field - ActivitiesResponse *ActivitiesResponse `type:"structure" required:"true"` + // ImportJobsResponse is a required field + ImportJobsResponse *ImportJobsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetCampaignActivitiesOutput) String() string { +func (s GetImportJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignActivitiesOutput) GoString() string { +func (s GetImportJobsOutput) GoString() string { return s.String() } -// SetActivitiesResponse sets the ActivitiesResponse field's value. -func (s *GetCampaignActivitiesOutput) SetActivitiesResponse(v *ActivitiesResponse) *GetCampaignActivitiesOutput { - s.ActivitiesResponse = v +// SetImportJobsResponse sets the ImportJobsResponse field's value. +func (s *GetImportJobsOutput) SetImportJobsResponse(v *ImportJobsResponse) *GetImportJobsOutput { + s.ImportJobsResponse = v return s } -type GetCampaignInput struct { +type GetJourneyDateRangeKpiInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // CampaignId is a required field - CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` + EndTime *time.Time `location:"querystring" locationName:"end-time" type:"timestamp" timestampFormat:"iso8601"` + + // JourneyId is a required field + JourneyId *string `location:"uri" locationName:"journey-id" type:"string" required:"true"` + + // KpiName is a required field + KpiName *string `location:"uri" locationName:"kpi-name" type:"string" required:"true"` + + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + + StartTime *time.Time `location:"querystring" locationName:"start-time" type:"timestamp" timestampFormat:"iso8601"` } // String returns the string representation -func (s GetCampaignInput) String() string { +func (s GetJourneyDateRangeKpiInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignInput) GoString() string { +func (s GetJourneyDateRangeKpiInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCampaignInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCampaignInput"} +func (s *GetJourneyDateRangeKpiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJourneyDateRangeKpiInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } - if s.CampaignId == nil { - invalidParams.Add(request.NewErrParamRequired("CampaignId")) + if s.JourneyId == nil { + invalidParams.Add(request.NewErrParamRequired("JourneyId")) } - if s.CampaignId != nil && len(*s.CampaignId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) + if s.JourneyId != nil && len(*s.JourneyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JourneyId", 1)) + } + if s.KpiName == nil { + invalidParams.Add(request.NewErrParamRequired("KpiName")) + } + if s.KpiName != nil && len(*s.KpiName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KpiName", 1)) } if invalidParams.Len() > 0 { @@ -14598,85 +20786,120 @@ func (s *GetCampaignInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetCampaignInput) SetApplicationId(v string) *GetCampaignInput { +func (s *GetJourneyDateRangeKpiInput) SetApplicationId(v string) *GetJourneyDateRangeKpiInput { s.ApplicationId = &v return s } -// SetCampaignId sets the CampaignId field's value. -func (s *GetCampaignInput) SetCampaignId(v string) *GetCampaignInput { - s.CampaignId = &v +// SetEndTime sets the EndTime field's value. +func (s *GetJourneyDateRangeKpiInput) SetEndTime(v time.Time) *GetJourneyDateRangeKpiInput { + s.EndTime = &v return s } -type GetCampaignOutput struct { - _ struct{} `type:"structure" payload:"CampaignResponse"` +// SetJourneyId sets the JourneyId field's value. +func (s *GetJourneyDateRangeKpiInput) SetJourneyId(v string) *GetJourneyDateRangeKpiInput { + s.JourneyId = &v + return s +} - // Campaign definition +// SetKpiName sets the KpiName field's value. +func (s *GetJourneyDateRangeKpiInput) SetKpiName(v string) *GetJourneyDateRangeKpiInput { + s.KpiName = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetJourneyDateRangeKpiInput) SetNextToken(v string) *GetJourneyDateRangeKpiInput { + s.NextToken = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *GetJourneyDateRangeKpiInput) SetPageSize(v string) *GetJourneyDateRangeKpiInput { + s.PageSize = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetJourneyDateRangeKpiInput) SetStartTime(v time.Time) *GetJourneyDateRangeKpiInput { + s.StartTime = &v + return s +} + +type GetJourneyDateRangeKpiOutput struct { + _ struct{} `type:"structure" payload:"JourneyDateRangeKpiResponse"` + + // Provides the results of a query that retrieved the data for a standard engagement + // metric that applies to a journey, and provides information about that query. // - // CampaignResponse is a required field - CampaignResponse *CampaignResponse `type:"structure" required:"true"` + // JourneyDateRangeKpiResponse is a required field + JourneyDateRangeKpiResponse *JourneyDateRangeKpiResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetCampaignOutput) String() string { +func (s GetJourneyDateRangeKpiOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignOutput) GoString() string { +func (s GetJourneyDateRangeKpiOutput) GoString() string { return s.String() } -// SetCampaignResponse sets the CampaignResponse field's value. -func (s *GetCampaignOutput) SetCampaignResponse(v *CampaignResponse) *GetCampaignOutput { - s.CampaignResponse = v +// SetJourneyDateRangeKpiResponse sets the JourneyDateRangeKpiResponse field's value. +func (s *GetJourneyDateRangeKpiOutput) SetJourneyDateRangeKpiResponse(v *JourneyDateRangeKpiResponse) *GetJourneyDateRangeKpiOutput { + s.JourneyDateRangeKpiResponse = v return s } -type GetCampaignVersionInput struct { +type GetJourneyExecutionActivityMetricsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // CampaignId is a required field - CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` + // JourneyActivityId is a required field + JourneyActivityId *string `location:"uri" locationName:"journey-activity-id" type:"string" required:"true"` - // Version is a required field - Version *string `location:"uri" locationName:"version" type:"string" required:"true"` + // JourneyId is a required field + JourneyId *string `location:"uri" locationName:"journey-id" type:"string" required:"true"` + + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` } // String returns the string representation -func (s GetCampaignVersionInput) String() string { +func (s GetJourneyExecutionActivityMetricsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignVersionInput) GoString() string { +func (s GetJourneyExecutionActivityMetricsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCampaignVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCampaignVersionInput"} +func (s *GetJourneyExecutionActivityMetricsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJourneyExecutionActivityMetricsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } - if s.CampaignId == nil { - invalidParams.Add(request.NewErrParamRequired("CampaignId")) + if s.JourneyActivityId == nil { + invalidParams.Add(request.NewErrParamRequired("JourneyActivityId")) } - if s.CampaignId != nil && len(*s.CampaignId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) + if s.JourneyActivityId != nil && len(*s.JourneyActivityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JourneyActivityId", 1)) } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) + if s.JourneyId == nil { + invalidParams.Add(request.NewErrParamRequired("JourneyId")) } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + if s.JourneyId != nil && len(*s.JourneyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JourneyId", 1)) } if invalidParams.Len() > 0 { @@ -14686,86 +20909,100 @@ func (s *GetCampaignVersionInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetCampaignVersionInput) SetApplicationId(v string) *GetCampaignVersionInput { +func (s *GetJourneyExecutionActivityMetricsInput) SetApplicationId(v string) *GetJourneyExecutionActivityMetricsInput { s.ApplicationId = &v return s } -// SetCampaignId sets the CampaignId field's value. -func (s *GetCampaignVersionInput) SetCampaignId(v string) *GetCampaignVersionInput { - s.CampaignId = &v +// SetJourneyActivityId sets the JourneyActivityId field's value. +func (s *GetJourneyExecutionActivityMetricsInput) SetJourneyActivityId(v string) *GetJourneyExecutionActivityMetricsInput { + s.JourneyActivityId = &v return s } -// SetVersion sets the Version field's value. -func (s *GetCampaignVersionInput) SetVersion(v string) *GetCampaignVersionInput { - s.Version = &v +// SetJourneyId sets the JourneyId field's value. +func (s *GetJourneyExecutionActivityMetricsInput) SetJourneyId(v string) *GetJourneyExecutionActivityMetricsInput { + s.JourneyId = &v return s } -type GetCampaignVersionOutput struct { - _ struct{} `type:"structure" payload:"CampaignResponse"` +// SetNextToken sets the NextToken field's value. +func (s *GetJourneyExecutionActivityMetricsInput) SetNextToken(v string) *GetJourneyExecutionActivityMetricsInput { + s.NextToken = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *GetJourneyExecutionActivityMetricsInput) SetPageSize(v string) *GetJourneyExecutionActivityMetricsInput { + s.PageSize = &v + return s +} - // Campaign definition +type GetJourneyExecutionActivityMetricsOutput struct { + _ struct{} `type:"structure" payload:"JourneyExecutionActivityMetricsResponse"` + + // Provides the results of a query that retrieved the data for a standard execution + // metric that applies to a journey activity, and provides information about + // that query. // - // CampaignResponse is a required field - CampaignResponse *CampaignResponse `type:"structure" required:"true"` + // JourneyExecutionActivityMetricsResponse is a required field + JourneyExecutionActivityMetricsResponse *JourneyExecutionActivityMetricsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetCampaignVersionOutput) String() string { +func (s GetJourneyExecutionActivityMetricsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignVersionOutput) GoString() string { +func (s GetJourneyExecutionActivityMetricsOutput) GoString() string { return s.String() } -// SetCampaignResponse sets the CampaignResponse field's value. -func (s *GetCampaignVersionOutput) SetCampaignResponse(v *CampaignResponse) *GetCampaignVersionOutput { - s.CampaignResponse = v +// SetJourneyExecutionActivityMetricsResponse sets the JourneyExecutionActivityMetricsResponse field's value. +func (s *GetJourneyExecutionActivityMetricsOutput) SetJourneyExecutionActivityMetricsResponse(v *JourneyExecutionActivityMetricsResponse) *GetJourneyExecutionActivityMetricsOutput { + s.JourneyExecutionActivityMetricsResponse = v return s } -type GetCampaignVersionsInput struct { +type GetJourneyExecutionMetricsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // CampaignId is a required field - CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` + // JourneyId is a required field + JourneyId *string `location:"uri" locationName:"journey-id" type:"string" required:"true"` - PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` - Token *string `location:"querystring" locationName:"token" type:"string"` + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` } // String returns the string representation -func (s GetCampaignVersionsInput) String() string { +func (s GetJourneyExecutionMetricsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignVersionsInput) GoString() string { +func (s GetJourneyExecutionMetricsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCampaignVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCampaignVersionsInput"} +func (s *GetJourneyExecutionMetricsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJourneyExecutionMetricsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } - if s.CampaignId == nil { - invalidParams.Add(request.NewErrParamRequired("CampaignId")) + if s.JourneyId == nil { + invalidParams.Add(request.NewErrParamRequired("JourneyId")) } - if s.CampaignId != nil && len(*s.CampaignId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CampaignId", 1)) + if s.JourneyId != nil && len(*s.JourneyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JourneyId", 1)) } if invalidParams.Len() > 0 { @@ -14775,84 +21012,90 @@ func (s *GetCampaignVersionsInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetCampaignVersionsInput) SetApplicationId(v string) *GetCampaignVersionsInput { +func (s *GetJourneyExecutionMetricsInput) SetApplicationId(v string) *GetJourneyExecutionMetricsInput { s.ApplicationId = &v return s } -// SetCampaignId sets the CampaignId field's value. -func (s *GetCampaignVersionsInput) SetCampaignId(v string) *GetCampaignVersionsInput { - s.CampaignId = &v +// SetJourneyId sets the JourneyId field's value. +func (s *GetJourneyExecutionMetricsInput) SetJourneyId(v string) *GetJourneyExecutionMetricsInput { + s.JourneyId = &v return s } -// SetPageSize sets the PageSize field's value. -func (s *GetCampaignVersionsInput) SetPageSize(v string) *GetCampaignVersionsInput { - s.PageSize = &v +// SetNextToken sets the NextToken field's value. +func (s *GetJourneyExecutionMetricsInput) SetNextToken(v string) *GetJourneyExecutionMetricsInput { + s.NextToken = &v return s } -// SetToken sets the Token field's value. -func (s *GetCampaignVersionsInput) SetToken(v string) *GetCampaignVersionsInput { - s.Token = &v +// SetPageSize sets the PageSize field's value. +func (s *GetJourneyExecutionMetricsInput) SetPageSize(v string) *GetJourneyExecutionMetricsInput { + s.PageSize = &v return s } -type GetCampaignVersionsOutput struct { - _ struct{} `type:"structure" payload:"CampaignsResponse"` +type GetJourneyExecutionMetricsOutput struct { + _ struct{} `type:"structure" payload:"JourneyExecutionMetricsResponse"` - // List of available campaigns. + // Provides the results of a query that retrieved the data for a standard execution + // metric that applies to a journey. // - // CampaignsResponse is a required field - CampaignsResponse *CampaignsResponse `type:"structure" required:"true"` + // JourneyExecutionMetricsResponse is a required field + JourneyExecutionMetricsResponse *JourneyExecutionMetricsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetCampaignVersionsOutput) String() string { +func (s GetJourneyExecutionMetricsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignVersionsOutput) GoString() string { +func (s GetJourneyExecutionMetricsOutput) GoString() string { return s.String() } -// SetCampaignsResponse sets the CampaignsResponse field's value. -func (s *GetCampaignVersionsOutput) SetCampaignsResponse(v *CampaignsResponse) *GetCampaignVersionsOutput { - s.CampaignsResponse = v +// SetJourneyExecutionMetricsResponse sets the JourneyExecutionMetricsResponse field's value. +func (s *GetJourneyExecutionMetricsOutput) SetJourneyExecutionMetricsResponse(v *JourneyExecutionMetricsResponse) *GetJourneyExecutionMetricsOutput { + s.JourneyExecutionMetricsResponse = v return s } -type GetCampaignsInput struct { +type GetJourneyInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - PageSize *string `location:"querystring" locationName:"page-size" type:"string"` - - Token *string `location:"querystring" locationName:"token" type:"string"` + // JourneyId is a required field + JourneyId *string `location:"uri" locationName:"journey-id" type:"string" required:"true"` } // String returns the string representation -func (s GetCampaignsInput) String() string { +func (s GetJourneyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignsInput) GoString() string { +func (s GetJourneyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCampaignsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCampaignsInput"} +func (s *GetJourneyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJourneyInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } + if s.JourneyId == nil { + invalidParams.Add(request.NewErrParamRequired("JourneyId")) + } + if s.JourneyId != nil && len(*s.JourneyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JourneyId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14860,74 +21103,69 @@ func (s *GetCampaignsInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *GetCampaignsInput) SetApplicationId(v string) *GetCampaignsInput { - s.ApplicationId = &v - return s -} - -// SetPageSize sets the PageSize field's value. -func (s *GetCampaignsInput) SetPageSize(v string) *GetCampaignsInput { - s.PageSize = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *GetJourneyInput) SetApplicationId(v string) *GetJourneyInput { + s.ApplicationId = &v return s } -// SetToken sets the Token field's value. -func (s *GetCampaignsInput) SetToken(v string) *GetCampaignsInput { - s.Token = &v +// SetJourneyId sets the JourneyId field's value. +func (s *GetJourneyInput) SetJourneyId(v string) *GetJourneyInput { + s.JourneyId = &v return s } -type GetCampaignsOutput struct { - _ struct{} `type:"structure" payload:"CampaignsResponse"` +type GetJourneyOutput struct { + _ struct{} `type:"structure" payload:"JourneyResponse"` - // List of available campaigns. + // Provides information about the status, configuration, and other settings + // for a journey. // - // CampaignsResponse is a required field - CampaignsResponse *CampaignsResponse `type:"structure" required:"true"` + // JourneyResponse is a required field + JourneyResponse *JourneyResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetCampaignsOutput) String() string { +func (s GetJourneyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCampaignsOutput) GoString() string { +func (s GetJourneyOutput) GoString() string { return s.String() } -// SetCampaignsResponse sets the CampaignsResponse field's value. -func (s *GetCampaignsOutput) SetCampaignsResponse(v *CampaignsResponse) *GetCampaignsOutput { - s.CampaignsResponse = v +// SetJourneyResponse sets the JourneyResponse field's value. +func (s *GetJourneyOutput) SetJourneyResponse(v *JourneyResponse) *GetJourneyOutput { + s.JourneyResponse = v return s } -type GetChannelsInput struct { +type GetPushTemplateInput struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` } // String returns the string representation -func (s GetChannelsInput) String() string { +func (s GetPushTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetChannelsInput) GoString() string { +func (s GetPushTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetChannelsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetChannelsInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) +func (s *GetPushTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPushTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) } if invalidParams.Len() > 0 { @@ -14936,63 +21174,77 @@ func (s *GetChannelsInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *GetChannelsInput) SetApplicationId(v string) *GetChannelsInput { - s.ApplicationId = &v +// SetTemplateName sets the TemplateName field's value. +func (s *GetPushTemplateInput) SetTemplateName(v string) *GetPushTemplateInput { + s.TemplateName = &v return s } -type GetChannelsOutput struct { - _ struct{} `type:"structure" payload:"ChannelsResponse"` +type GetPushTemplateOutput struct { + _ struct{} `type:"structure" payload:"PushNotificationTemplateResponse"` - // Get channels definition + // Provides information about the content and settings for a message template + // that can be used in messages that are sent through a push notification channel. // - // ChannelsResponse is a required field - ChannelsResponse *ChannelsResponse `type:"structure" required:"true"` + // PushNotificationTemplateResponse is a required field + PushNotificationTemplateResponse *PushNotificationTemplateResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetChannelsOutput) String() string { +func (s GetPushTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetChannelsOutput) GoString() string { +func (s GetPushTemplateOutput) GoString() string { return s.String() } -// SetChannelsResponse sets the ChannelsResponse field's value. -func (s *GetChannelsOutput) SetChannelsResponse(v *ChannelsResponse) *GetChannelsOutput { - s.ChannelsResponse = v +// SetPushNotificationTemplateResponse sets the PushNotificationTemplateResponse field's value. +func (s *GetPushTemplateOutput) SetPushNotificationTemplateResponse(v *PushNotificationTemplateResponse) *GetPushTemplateOutput { + s.PushNotificationTemplateResponse = v return s } -type GetEmailChannelInput struct { +type GetSegmentExportJobsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + + // SegmentId is a required field + SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` + + Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s GetEmailChannelInput) String() string { +func (s GetSegmentExportJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetEmailChannelInput) GoString() string { +func (s GetSegmentExportJobsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetEmailChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetEmailChannelInput"} +func (s *GetSegmentExportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSegmentExportJobsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } + if s.SegmentId == nil { + invalidParams.Add(request.NewErrParamRequired("SegmentId")) + } + if s.SegmentId != nil && len(*s.SegmentId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -15001,70 +21253,94 @@ func (s *GetEmailChannelInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetEmailChannelInput) SetApplicationId(v string) *GetEmailChannelInput { +func (s *GetSegmentExportJobsInput) SetApplicationId(v string) *GetSegmentExportJobsInput { s.ApplicationId = &v return s } -type GetEmailChannelOutput struct { - _ struct{} `type:"structure" payload:"EmailChannelResponse"` +// SetPageSize sets the PageSize field's value. +func (s *GetSegmentExportJobsInput) SetPageSize(v string) *GetSegmentExportJobsInput { + s.PageSize = &v + return s +} + +// SetSegmentId sets the SegmentId field's value. +func (s *GetSegmentExportJobsInput) SetSegmentId(v string) *GetSegmentExportJobsInput { + s.SegmentId = &v + return s +} + +// SetToken sets the Token field's value. +func (s *GetSegmentExportJobsInput) SetToken(v string) *GetSegmentExportJobsInput { + s.Token = &v + return s +} + +type GetSegmentExportJobsOutput struct { + _ struct{} `type:"structure" payload:"ExportJobsResponse"` - // Email Channel Response. + // Provides information about all the export jobs that are associated with an + // application or segment. An export job is a job that exports endpoint definitions + // to a file. // - // EmailChannelResponse is a required field - EmailChannelResponse *EmailChannelResponse `type:"structure" required:"true"` + // ExportJobsResponse is a required field + ExportJobsResponse *ExportJobsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetEmailChannelOutput) String() string { +func (s GetSegmentExportJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetEmailChannelOutput) GoString() string { +func (s GetSegmentExportJobsOutput) GoString() string { return s.String() } -// SetEmailChannelResponse sets the EmailChannelResponse field's value. -func (s *GetEmailChannelOutput) SetEmailChannelResponse(v *EmailChannelResponse) *GetEmailChannelOutput { - s.EmailChannelResponse = v +// SetExportJobsResponse sets the ExportJobsResponse field's value. +func (s *GetSegmentExportJobsOutput) SetExportJobsResponse(v *ExportJobsResponse) *GetSegmentExportJobsOutput { + s.ExportJobsResponse = v return s } -type GetEndpointInput struct { +type GetSegmentImportJobsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // EndpointId is a required field - EndpointId *string `location:"uri" locationName:"endpoint-id" type:"string" required:"true"` + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + + // SegmentId is a required field + SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` + + Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s GetEndpointInput) String() string { +func (s GetSegmentImportJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetEndpointInput) GoString() string { +func (s GetSegmentImportJobsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetEndpointInput"} +func (s *GetSegmentImportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSegmentImportJobsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } - if s.EndpointId == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointId")) + if s.SegmentId == nil { + invalidParams.Add(request.NewErrParamRequired("SegmentId")) } - if s.EndpointId != nil && len(*s.EndpointId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EndpointId", 1)) + if s.SegmentId != nil && len(*s.SegmentId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) } if invalidParams.Len() > 0 { @@ -15074,68 +21350,91 @@ func (s *GetEndpointInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetEndpointInput) SetApplicationId(v string) *GetEndpointInput { +func (s *GetSegmentImportJobsInput) SetApplicationId(v string) *GetSegmentImportJobsInput { s.ApplicationId = &v return s } -// SetEndpointId sets the EndpointId field's value. -func (s *GetEndpointInput) SetEndpointId(v string) *GetEndpointInput { - s.EndpointId = &v +// SetPageSize sets the PageSize field's value. +func (s *GetSegmentImportJobsInput) SetPageSize(v string) *GetSegmentImportJobsInput { + s.PageSize = &v return s } -type GetEndpointOutput struct { - _ struct{} `type:"structure" payload:"EndpointResponse"` +// SetSegmentId sets the SegmentId field's value. +func (s *GetSegmentImportJobsInput) SetSegmentId(v string) *GetSegmentImportJobsInput { + s.SegmentId = &v + return s +} + +// SetToken sets the Token field's value. +func (s *GetSegmentImportJobsInput) SetToken(v string) *GetSegmentImportJobsInput { + s.Token = &v + return s +} + +type GetSegmentImportJobsOutput struct { + _ struct{} `type:"structure" payload:"ImportJobsResponse"` - // Endpoint response + // Provides information about the status and settings of all the import jobs + // that are associated with an application or segment. An import job is a job + // that imports endpoint definitions from one or more files. // - // EndpointResponse is a required field - EndpointResponse *EndpointResponse `type:"structure" required:"true"` + // ImportJobsResponse is a required field + ImportJobsResponse *ImportJobsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetEndpointOutput) String() string { +func (s GetSegmentImportJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetEndpointOutput) GoString() string { +func (s GetSegmentImportJobsOutput) GoString() string { return s.String() } -// SetEndpointResponse sets the EndpointResponse field's value. -func (s *GetEndpointOutput) SetEndpointResponse(v *EndpointResponse) *GetEndpointOutput { - s.EndpointResponse = v +// SetImportJobsResponse sets the ImportJobsResponse field's value. +func (s *GetSegmentImportJobsOutput) SetImportJobsResponse(v *ImportJobsResponse) *GetSegmentImportJobsOutput { + s.ImportJobsResponse = v return s } -type GetEventStreamInput struct { +type GetSegmentInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + // SegmentId is a required field + SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` } // String returns the string representation -func (s GetEventStreamInput) String() string { +func (s GetSegmentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetEventStreamInput) GoString() string { +func (s GetSegmentInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetEventStreamInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetEventStreamInput"} +func (s *GetSegmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSegmentInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } + if s.SegmentId == nil { + invalidParams.Add(request.NewErrParamRequired("SegmentId")) + } + if s.SegmentId != nil && len(*s.SegmentId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -15144,70 +21443,86 @@ func (s *GetEventStreamInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetEventStreamInput) SetApplicationId(v string) *GetEventStreamInput { +func (s *GetSegmentInput) SetApplicationId(v string) *GetSegmentInput { s.ApplicationId = &v return s } -type GetEventStreamOutput struct { - _ struct{} `type:"structure" payload:"EventStream"` +// SetSegmentId sets the SegmentId field's value. +func (s *GetSegmentInput) SetSegmentId(v string) *GetSegmentInput { + s.SegmentId = &v + return s +} + +type GetSegmentOutput struct { + _ struct{} `type:"structure" payload:"SegmentResponse"` - // Model for an event publishing subscription export. + // Provides information about the configuration, dimension, and other settings + // for a segment. // - // EventStream is a required field - EventStream *EventStream `type:"structure" required:"true"` + // SegmentResponse is a required field + SegmentResponse *SegmentResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetEventStreamOutput) String() string { +func (s GetSegmentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetEventStreamOutput) GoString() string { +func (s GetSegmentOutput) GoString() string { return s.String() } -// SetEventStream sets the EventStream field's value. -func (s *GetEventStreamOutput) SetEventStream(v *EventStream) *GetEventStreamOutput { - s.EventStream = v +// SetSegmentResponse sets the SegmentResponse field's value. +func (s *GetSegmentOutput) SetSegmentResponse(v *SegmentResponse) *GetSegmentOutput { + s.SegmentResponse = v return s } -type GetExportJobInput struct { +type GetSegmentVersionInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // JobId is a required field - JobId *string `location:"uri" locationName:"job-id" type:"string" required:"true"` + // SegmentId is a required field + SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` + + // Version is a required field + Version *string `location:"uri" locationName:"version" type:"string" required:"true"` } // String returns the string representation -func (s GetExportJobInput) String() string { +func (s GetSegmentVersionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetExportJobInput) GoString() string { +func (s GetSegmentVersionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetExportJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetExportJobInput"} +func (s *GetSegmentVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSegmentVersionInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } - if s.JobId == nil { - invalidParams.Add(request.NewErrParamRequired("JobId")) + if s.SegmentId == nil { + invalidParams.Add(request.NewErrParamRequired("SegmentId")) } - if s.JobId != nil && len(*s.JobId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + if s.SegmentId != nil && len(*s.SegmentId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) } if invalidParams.Len() > 0 { @@ -15217,43 +21532,50 @@ func (s *GetExportJobInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetExportJobInput) SetApplicationId(v string) *GetExportJobInput { +func (s *GetSegmentVersionInput) SetApplicationId(v string) *GetSegmentVersionInput { s.ApplicationId = &v return s } -// SetJobId sets the JobId field's value. -func (s *GetExportJobInput) SetJobId(v string) *GetExportJobInput { - s.JobId = &v +// SetSegmentId sets the SegmentId field's value. +func (s *GetSegmentVersionInput) SetSegmentId(v string) *GetSegmentVersionInput { + s.SegmentId = &v return s } -type GetExportJobOutput struct { - _ struct{} `type:"structure" payload:"ExportJobResponse"` +// SetVersion sets the Version field's value. +func (s *GetSegmentVersionInput) SetVersion(v string) *GetSegmentVersionInput { + s.Version = &v + return s +} + +type GetSegmentVersionOutput struct { + _ struct{} `type:"structure" payload:"SegmentResponse"` - // Export job response. + // Provides information about the configuration, dimension, and other settings + // for a segment. // - // ExportJobResponse is a required field - ExportJobResponse *ExportJobResponse `type:"structure" required:"true"` + // SegmentResponse is a required field + SegmentResponse *SegmentResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetExportJobOutput) String() string { +func (s GetSegmentVersionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetExportJobOutput) GoString() string { +func (s GetSegmentVersionOutput) GoString() string { return s.String() } -// SetExportJobResponse sets the ExportJobResponse field's value. -func (s *GetExportJobOutput) SetExportJobResponse(v *ExportJobResponse) *GetExportJobOutput { - s.ExportJobResponse = v +// SetSegmentResponse sets the SegmentResponse field's value. +func (s *GetSegmentVersionOutput) SetSegmentResponse(v *SegmentResponse) *GetSegmentVersionOutput { + s.SegmentResponse = v return s } -type GetExportJobsInput struct { +type GetSegmentVersionsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field @@ -15261,28 +21583,37 @@ type GetExportJobsInput struct { PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + // SegmentId is a required field + SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` + Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s GetExportJobsInput) String() string { +func (s GetSegmentVersionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetExportJobsInput) GoString() string { +func (s GetSegmentVersionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetExportJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetExportJobsInput"} +func (s *GetSegmentVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSegmentVersionsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } + if s.SegmentId == nil { + invalidParams.Add(request.NewErrParamRequired("SegmentId")) + } + if s.SegmentId != nil && len(*s.SegmentId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -15291,68 +21622,78 @@ func (s *GetExportJobsInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetExportJobsInput) SetApplicationId(v string) *GetExportJobsInput { +func (s *GetSegmentVersionsInput) SetApplicationId(v string) *GetSegmentVersionsInput { s.ApplicationId = &v return s } // SetPageSize sets the PageSize field's value. -func (s *GetExportJobsInput) SetPageSize(v string) *GetExportJobsInput { +func (s *GetSegmentVersionsInput) SetPageSize(v string) *GetSegmentVersionsInput { s.PageSize = &v return s } +// SetSegmentId sets the SegmentId field's value. +func (s *GetSegmentVersionsInput) SetSegmentId(v string) *GetSegmentVersionsInput { + s.SegmentId = &v + return s +} + // SetToken sets the Token field's value. -func (s *GetExportJobsInput) SetToken(v string) *GetExportJobsInput { +func (s *GetSegmentVersionsInput) SetToken(v string) *GetSegmentVersionsInput { s.Token = &v return s } -type GetExportJobsOutput struct { - _ struct{} `type:"structure" payload:"ExportJobsResponse"` +type GetSegmentVersionsOutput struct { + _ struct{} `type:"structure" payload:"SegmentsResponse"` - // Export job list. + // Provides information about all the segments that are associated with an application. // - // ExportJobsResponse is a required field - ExportJobsResponse *ExportJobsResponse `type:"structure" required:"true"` + // SegmentsResponse is a required field + SegmentsResponse *SegmentsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetExportJobsOutput) String() string { +func (s GetSegmentVersionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetExportJobsOutput) GoString() string { +func (s GetSegmentVersionsOutput) GoString() string { return s.String() } -// SetExportJobsResponse sets the ExportJobsResponse field's value. -func (s *GetExportJobsOutput) SetExportJobsResponse(v *ExportJobsResponse) *GetExportJobsOutput { - s.ExportJobsResponse = v +// SetSegmentsResponse sets the SegmentsResponse field's value. +func (s *GetSegmentVersionsOutput) SetSegmentsResponse(v *SegmentsResponse) *GetSegmentVersionsOutput { + s.SegmentsResponse = v return s } -type GetGcmChannelInput struct { +type GetSegmentsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + + Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s GetGcmChannelInput) String() string { +func (s GetSegmentsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetGcmChannelInput) GoString() string { +func (s GetSegmentsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetGcmChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetGcmChannelInput"} +func (s *GetSegmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSegmentsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } @@ -15367,71 +21708,74 @@ func (s *GetGcmChannelInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetGcmChannelInput) SetApplicationId(v string) *GetGcmChannelInput { +func (s *GetSegmentsInput) SetApplicationId(v string) *GetSegmentsInput { s.ApplicationId = &v return s } -type GetGcmChannelOutput struct { - _ struct{} `type:"structure" payload:"GCMChannelResponse"` +// SetPageSize sets the PageSize field's value. +func (s *GetSegmentsInput) SetPageSize(v string) *GetSegmentsInput { + s.PageSize = &v + return s +} + +// SetToken sets the Token field's value. +func (s *GetSegmentsInput) SetToken(v string) *GetSegmentsInput { + s.Token = &v + return s +} - // Google Cloud Messaging channel definition +type GetSegmentsOutput struct { + _ struct{} `type:"structure" payload:"SegmentsResponse"` + + // Provides information about all the segments that are associated with an application. // - // GCMChannelResponse is a required field - GCMChannelResponse *GCMChannelResponse `type:"structure" required:"true"` + // SegmentsResponse is a required field + SegmentsResponse *SegmentsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetGcmChannelOutput) String() string { +func (s GetSegmentsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetGcmChannelOutput) GoString() string { +func (s GetSegmentsOutput) GoString() string { return s.String() } -// SetGCMChannelResponse sets the GCMChannelResponse field's value. -func (s *GetGcmChannelOutput) SetGCMChannelResponse(v *GCMChannelResponse) *GetGcmChannelOutput { - s.GCMChannelResponse = v +// SetSegmentsResponse sets the SegmentsResponse field's value. +func (s *GetSegmentsOutput) SetSegmentsResponse(v *SegmentsResponse) *GetSegmentsOutput { + s.SegmentsResponse = v return s } -type GetImportJobInput struct { +type GetSmsChannelInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - - // JobId is a required field - JobId *string `location:"uri" locationName:"job-id" type:"string" required:"true"` } // String returns the string representation -func (s GetImportJobInput) String() string { +func (s GetSmsChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetImportJobInput) GoString() string { +func (s GetSmsChannelInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetImportJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetImportJobInput"} +func (s *GetSmsChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSmsChannelInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } - if s.JobId == nil { - invalidParams.Add(request.NewErrParamRequired("JobId")) - } - if s.JobId != nil && len(*s.JobId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -15440,71 +21784,62 @@ func (s *GetImportJobInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetImportJobInput) SetApplicationId(v string) *GetImportJobInput { +func (s *GetSmsChannelInput) SetApplicationId(v string) *GetSmsChannelInput { s.ApplicationId = &v return s } -// SetJobId sets the JobId field's value. -func (s *GetImportJobInput) SetJobId(v string) *GetImportJobInput { - s.JobId = &v - return s -} - -type GetImportJobOutput struct { - _ struct{} `type:"structure" payload:"ImportJobResponse"` +type GetSmsChannelOutput struct { + _ struct{} `type:"structure" payload:"SMSChannelResponse"` - // Import job response. + // Provides information about the status and settings of the SMS channel for + // an application. // - // ImportJobResponse is a required field - ImportJobResponse *ImportJobResponse `type:"structure" required:"true"` + // SMSChannelResponse is a required field + SMSChannelResponse *SMSChannelResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetImportJobOutput) String() string { +func (s GetSmsChannelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetImportJobOutput) GoString() string { +func (s GetSmsChannelOutput) GoString() string { return s.String() } -// SetImportJobResponse sets the ImportJobResponse field's value. -func (s *GetImportJobOutput) SetImportJobResponse(v *ImportJobResponse) *GetImportJobOutput { - s.ImportJobResponse = v +// SetSMSChannelResponse sets the SMSChannelResponse field's value. +func (s *GetSmsChannelOutput) SetSMSChannelResponse(v *SMSChannelResponse) *GetSmsChannelOutput { + s.SMSChannelResponse = v return s } -type GetImportJobsInput struct { +type GetSmsTemplateInput struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - - PageSize *string `location:"querystring" locationName:"page-size" type:"string"` - - Token *string `location:"querystring" locationName:"token" type:"string"` + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` } // String returns the string representation -func (s GetImportJobsInput) String() string { +func (s GetSmsTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetImportJobsInput) GoString() string { +func (s GetSmsTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetImportJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetImportJobsInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) +func (s *GetSmsTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSmsTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) } if invalidParams.Len() > 0 { @@ -15513,87 +21848,72 @@ func (s *GetImportJobsInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *GetImportJobsInput) SetApplicationId(v string) *GetImportJobsInput { - s.ApplicationId = &v - return s -} - -// SetPageSize sets the PageSize field's value. -func (s *GetImportJobsInput) SetPageSize(v string) *GetImportJobsInput { - s.PageSize = &v - return s -} - -// SetToken sets the Token field's value. -func (s *GetImportJobsInput) SetToken(v string) *GetImportJobsInput { - s.Token = &v +// SetTemplateName sets the TemplateName field's value. +func (s *GetSmsTemplateInput) SetTemplateName(v string) *GetSmsTemplateInput { + s.TemplateName = &v return s } -type GetImportJobsOutput struct { - _ struct{} `type:"structure" payload:"ImportJobsResponse"` +type GetSmsTemplateOutput struct { + _ struct{} `type:"structure" payload:"SMSTemplateResponse"` - // Import job list. + // Provides information about the content and settings for a message template + // that can be used in text messages that are sent through the SMS channel. // - // ImportJobsResponse is a required field - ImportJobsResponse *ImportJobsResponse `type:"structure" required:"true"` + // SMSTemplateResponse is a required field + SMSTemplateResponse *SMSTemplateResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetImportJobsOutput) String() string { +func (s GetSmsTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetImportJobsOutput) GoString() string { +func (s GetSmsTemplateOutput) GoString() string { return s.String() } -// SetImportJobsResponse sets the ImportJobsResponse field's value. -func (s *GetImportJobsOutput) SetImportJobsResponse(v *ImportJobsResponse) *GetImportJobsOutput { - s.ImportJobsResponse = v +// SetSMSTemplateResponse sets the SMSTemplateResponse field's value. +func (s *GetSmsTemplateOutput) SetSMSTemplateResponse(v *SMSTemplateResponse) *GetSmsTemplateOutput { + s.SMSTemplateResponse = v return s } -type GetSegmentExportJobsInput struct { +type GetUserEndpointsInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - PageSize *string `location:"querystring" locationName:"page-size" type:"string"` - - // SegmentId is a required field - SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` - - Token *string `location:"querystring" locationName:"token" type:"string"` + // UserId is a required field + UserId *string `location:"uri" locationName:"user-id" type:"string" required:"true"` } // String returns the string representation -func (s GetSegmentExportJobsInput) String() string { +func (s GetUserEndpointsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentExportJobsInput) GoString() string { +func (s GetUserEndpointsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetSegmentExportJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSegmentExportJobsInput"} +func (s *GetUserEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUserEndpointsInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } - if s.SegmentId == nil { - invalidParams.Add(request.NewErrParamRequired("SegmentId")) + if s.UserId == nil { + invalidParams.Add(request.NewErrParamRequired("UserId")) } - if s.SegmentId != nil && len(*s.SegmentId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) + if s.UserId != nil && len(*s.UserId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserId", 1)) } if invalidParams.Len() > 0 { @@ -15603,93 +21923,69 @@ func (s *GetSegmentExportJobsInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetSegmentExportJobsInput) SetApplicationId(v string) *GetSegmentExportJobsInput { +func (s *GetUserEndpointsInput) SetApplicationId(v string) *GetUserEndpointsInput { s.ApplicationId = &v return s } -// SetPageSize sets the PageSize field's value. -func (s *GetSegmentExportJobsInput) SetPageSize(v string) *GetSegmentExportJobsInput { - s.PageSize = &v - return s -} - -// SetSegmentId sets the SegmentId field's value. -func (s *GetSegmentExportJobsInput) SetSegmentId(v string) *GetSegmentExportJobsInput { - s.SegmentId = &v - return s -} - -// SetToken sets the Token field's value. -func (s *GetSegmentExportJobsInput) SetToken(v string) *GetSegmentExportJobsInput { - s.Token = &v +// SetUserId sets the UserId field's value. +func (s *GetUserEndpointsInput) SetUserId(v string) *GetUserEndpointsInput { + s.UserId = &v return s } -type GetSegmentExportJobsOutput struct { - _ struct{} `type:"structure" payload:"ExportJobsResponse"` +type GetUserEndpointsOutput struct { + _ struct{} `type:"structure" payload:"EndpointsResponse"` - // Export job list. + // Provides information about all the endpoints that are associated with a user + // ID. // - // ExportJobsResponse is a required field - ExportJobsResponse *ExportJobsResponse `type:"structure" required:"true"` + // EndpointsResponse is a required field + EndpointsResponse *EndpointsResponse `type:"structure" required:"true"` } // String returns the string representation -func (s GetSegmentExportJobsOutput) String() string { +func (s GetUserEndpointsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentExportJobsOutput) GoString() string { +func (s GetUserEndpointsOutput) GoString() string { return s.String() } -// SetExportJobsResponse sets the ExportJobsResponse field's value. -func (s *GetSegmentExportJobsOutput) SetExportJobsResponse(v *ExportJobsResponse) *GetSegmentExportJobsOutput { - s.ExportJobsResponse = v +// SetEndpointsResponse sets the EndpointsResponse field's value. +func (s *GetUserEndpointsOutput) SetEndpointsResponse(v *EndpointsResponse) *GetUserEndpointsOutput { + s.EndpointsResponse = v return s } -type GetSegmentImportJobsInput struct { +type GetVoiceChannelInput struct { _ struct{} `type:"structure"` // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - - PageSize *string `location:"querystring" locationName:"page-size" type:"string"` - - // SegmentId is a required field - SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` - - Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s GetSegmentImportJobsInput) String() string { +func (s GetVoiceChannelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentImportJobsInput) GoString() string { +func (s GetVoiceChannelInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetSegmentImportJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSegmentImportJobsInput"} +func (s *GetVoiceChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetVoiceChannelInput"} if s.ApplicationId == nil { invalidParams.Add(request.NewErrParamRequired("ApplicationId")) } if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } - if s.SegmentId == nil { - invalidParams.Add(request.NewErrParamRequired("SegmentId")) - } - if s.SegmentId != nil && len(*s.SegmentId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -15698,88 +21994,166 @@ func (s *GetSegmentImportJobsInput) Validate() error { } // SetApplicationId sets the ApplicationId field's value. -func (s *GetSegmentImportJobsInput) SetApplicationId(v string) *GetSegmentImportJobsInput { +func (s *GetVoiceChannelInput) SetApplicationId(v string) *GetVoiceChannelInput { s.ApplicationId = &v return s } -// SetPageSize sets the PageSize field's value. -func (s *GetSegmentImportJobsInput) SetPageSize(v string) *GetSegmentImportJobsInput { - s.PageSize = &v - return s +type GetVoiceChannelOutput struct { + _ struct{} `type:"structure" payload:"VoiceChannelResponse"` + + // Provides information about the status and settings of the voice channel for + // an application. + // + // VoiceChannelResponse is a required field + VoiceChannelResponse *VoiceChannelResponse `type:"structure" required:"true"` } -// SetSegmentId sets the SegmentId field's value. -func (s *GetSegmentImportJobsInput) SetSegmentId(v string) *GetSegmentImportJobsInput { - s.SegmentId = &v - return s +// String returns the string representation +func (s GetVoiceChannelOutput) String() string { + return awsutil.Prettify(s) } -// SetToken sets the Token field's value. -func (s *GetSegmentImportJobsInput) SetToken(v string) *GetSegmentImportJobsInput { - s.Token = &v +// GoString returns the string representation +func (s GetVoiceChannelOutput) GoString() string { + return s.String() +} + +// SetVoiceChannelResponse sets the VoiceChannelResponse field's value. +func (s *GetVoiceChannelOutput) SetVoiceChannelResponse(v *VoiceChannelResponse) *GetVoiceChannelOutput { + s.VoiceChannelResponse = v return s } -type GetSegmentImportJobsOutput struct { - _ struct{} `type:"structure" payload:"ImportJobsResponse"` +// Specifies the settings for a holdout activity in a journey. This type of +// activity stops a journey for a specified percentage of participants. +type HoldoutActivity struct { + _ struct{} `type:"structure"` + + // The unique identifier for the next activity to perform, after performing + // the holdout activity. + NextActivity *string `type:"string"` - // Import job list. + // The percentage of participants who shouldn't continue the journey. // - // ImportJobsResponse is a required field - ImportJobsResponse *ImportJobsResponse `type:"structure" required:"true"` + // Percentage is a required field + Percentage *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s GetSegmentImportJobsOutput) String() string { +func (s HoldoutActivity) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentImportJobsOutput) GoString() string { +func (s HoldoutActivity) GoString() string { return s.String() } -// SetImportJobsResponse sets the ImportJobsResponse field's value. -func (s *GetSegmentImportJobsOutput) SetImportJobsResponse(v *ImportJobsResponse) *GetSegmentImportJobsOutput { - s.ImportJobsResponse = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *HoldoutActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HoldoutActivity"} + if s.Percentage == nil { + invalidParams.Add(request.NewErrParamRequired("Percentage")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextActivity sets the NextActivity field's value. +func (s *HoldoutActivity) SetNextActivity(v string) *HoldoutActivity { + s.NextActivity = &v return s } -type GetSegmentInput struct { +// SetPercentage sets the Percentage field's value. +func (s *HoldoutActivity) SetPercentage(v int64) *HoldoutActivity { + s.Percentage = &v + return s +} + +// Specifies the settings for a job that imports endpoint definitions from an +// Amazon Simple Storage Service (Amazon S3) bucket. +type ImportJobRequest struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // Specifies whether to create a segment that contains the endpoints, when the + // endpoint definitions are imported. + DefineSegment *bool `type:"boolean"` - // SegmentId is a required field - SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` + // (Deprecated) Your AWS account ID, which you assigned to an external ID key + // in an IAM trust policy. Amazon Pinpoint previously used this value to assume + // an IAM role when importing endpoint definitions, but we removed this requirement. + // We don't recommend use of external IDs for IAM roles that are assumed by + // Amazon Pinpoint. + ExternalId *string `type:"string"` + + // The format of the files that contain the endpoint definitions to import. + // Valid values are: CSV, for comma-separated values format; and, JSON, for + // newline-delimited JSON format. If the Amazon S3 location stores multiple + // files that use different formats, Amazon Pinpoint imports data only from + // the files that use the specified format. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"Format"` + + // Specifies whether to register the endpoints with Amazon Pinpoint, when the + // endpoint definitions are imported. + RegisterEndpoints *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that authorizes Amazon Pinpoint to access the Amazon S3 location + // to import endpoint definitions from. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` + + // The URL of the Amazon Simple Storage Service (Amazon S3) bucket that contains + // the endpoint definitions to import. This location can be a folder or a single + // file. If the location is a folder, Amazon Pinpoint imports endpoint definitions + // from the files in this location, including any subfolders that the folder + // contains. + // + // The URL should be in the following format: s3://bucket-name/folder-name/file-name. + // The location can end with the key for an individual object or a prefix that + // qualifies multiple objects. + // + // S3Url is a required field + S3Url *string `type:"string" required:"true"` + + // The identifier for the segment to update or add the imported endpoint definitions + // to, if the import job is meant to update an existing segment. + SegmentId *string `type:"string"` + + // The custom name for the segment that's created by the import job, if the + // value of the DefineSegment property is true. + SegmentName *string `type:"string"` } // String returns the string representation -func (s GetSegmentInput) String() string { +func (s ImportJobRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentInput) GoString() string { +func (s ImportJobRequest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetSegmentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSegmentInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) +func (s *ImportJobRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportJobRequest"} + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) } - if s.SegmentId == nil { - invalidParams.Add(request.NewErrParamRequired("SegmentId")) + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) } - if s.SegmentId != nil && len(*s.SegmentId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) + if s.S3Url == nil { + invalidParams.Add(request.NewErrParamRequired("S3Url")) } if invalidParams.Len() > 0 { @@ -15788,918 +22162,1135 @@ func (s *GetSegmentInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *GetSegmentInput) SetApplicationId(v string) *GetSegmentInput { - s.ApplicationId = &v +// SetDefineSegment sets the DefineSegment field's value. +func (s *ImportJobRequest) SetDefineSegment(v bool) *ImportJobRequest { + s.DefineSegment = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *ImportJobRequest) SetExternalId(v string) *ImportJobRequest { + s.ExternalId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *ImportJobRequest) SetFormat(v string) *ImportJobRequest { + s.Format = &v + return s +} + +// SetRegisterEndpoints sets the RegisterEndpoints field's value. +func (s *ImportJobRequest) SetRegisterEndpoints(v bool) *ImportJobRequest { + s.RegisterEndpoints = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *ImportJobRequest) SetRoleArn(v string) *ImportJobRequest { + s.RoleArn = &v + return s +} + +// SetS3Url sets the S3Url field's value. +func (s *ImportJobRequest) SetS3Url(v string) *ImportJobRequest { + s.S3Url = &v return s } // SetSegmentId sets the SegmentId field's value. -func (s *GetSegmentInput) SetSegmentId(v string) *GetSegmentInput { +func (s *ImportJobRequest) SetSegmentId(v string) *ImportJobRequest { s.SegmentId = &v return s } -type GetSegmentOutput struct { - _ struct{} `type:"structure" payload:"SegmentResponse"` +// SetSegmentName sets the SegmentName field's value. +func (s *ImportJobRequest) SetSegmentName(v string) *ImportJobRequest { + s.SegmentName = &v + return s +} + +// Provides information about the resource settings for a job that imports endpoint +// definitions from one or more files. The files can be stored in an Amazon +// Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer +// by using the Amazon Pinpoint console. +type ImportJobResource struct { + _ struct{} `type:"structure"` - // Segment definition. + // Specifies whether the import job creates a segment that contains the endpoints, + // when the endpoint definitions are imported. + DefineSegment *bool `type:"boolean"` + + // (Deprecated) Your AWS account ID, which you assigned to an external ID key + // in an IAM trust policy. Amazon Pinpoint previously used this value to assume + // an IAM role when importing endpoint definitions, but we removed this requirement. + // We don't recommend use of external IDs for IAM roles that are assumed by + // Amazon Pinpoint. + ExternalId *string `type:"string"` + + // The format of the files that contain the endpoint definitions to import. + // Valid values are: CSV, for comma-separated values format; and, JSON, for + // newline-delimited JSON format. // - // SegmentResponse is a required field - SegmentResponse *SegmentResponse `type:"structure" required:"true"` + // If the files are stored in an Amazon S3 location and that location contains + // multiple files that use different formats, Amazon Pinpoint imports data only + // from the files that use the specified format. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"Format"` + + // Specifies whether the import job registers the endpoints with Amazon Pinpoint, + // when the endpoint definitions are imported. + RegisterEndpoints *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that authorizes Amazon Pinpoint to access the Amazon S3 location + // to import endpoint definitions from. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` + + // The URL of the Amazon Simple Storage Service (Amazon S3) bucket that contains + // the endpoint definitions to import. This location can be a folder or a single + // file. If the location is a folder, Amazon Pinpoint imports endpoint definitions + // from the files in this location, including any subfolders that the folder + // contains. + // + // The URL should be in the following format: s3://bucket-name/folder-name/file-name. + // The location can end with the key for an individual object or a prefix that + // qualifies multiple objects. + // + // S3Url is a required field + S3Url *string `type:"string" required:"true"` + + // The identifier for the segment that the import job updates or adds endpoint + // definitions to, if the import job updates an existing segment. + SegmentId *string `type:"string"` + + // The custom name for the segment that's created by the import job, if the + // value of the DefineSegment property is true. + SegmentName *string `type:"string"` } // String returns the string representation -func (s GetSegmentOutput) String() string { +func (s ImportJobResource) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentOutput) GoString() string { +func (s ImportJobResource) GoString() string { return s.String() } -// SetSegmentResponse sets the SegmentResponse field's value. -func (s *GetSegmentOutput) SetSegmentResponse(v *SegmentResponse) *GetSegmentOutput { - s.SegmentResponse = v +// SetDefineSegment sets the DefineSegment field's value. +func (s *ImportJobResource) SetDefineSegment(v bool) *ImportJobResource { + s.DefineSegment = &v return s } -type GetSegmentVersionInput struct { +// SetExternalId sets the ExternalId field's value. +func (s *ImportJobResource) SetExternalId(v string) *ImportJobResource { + s.ExternalId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *ImportJobResource) SetFormat(v string) *ImportJobResource { + s.Format = &v + return s +} + +// SetRegisterEndpoints sets the RegisterEndpoints field's value. +func (s *ImportJobResource) SetRegisterEndpoints(v bool) *ImportJobResource { + s.RegisterEndpoints = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *ImportJobResource) SetRoleArn(v string) *ImportJobResource { + s.RoleArn = &v + return s +} + +// SetS3Url sets the S3Url field's value. +func (s *ImportJobResource) SetS3Url(v string) *ImportJobResource { + s.S3Url = &v + return s +} + +// SetSegmentId sets the SegmentId field's value. +func (s *ImportJobResource) SetSegmentId(v string) *ImportJobResource { + s.SegmentId = &v + return s +} + +// SetSegmentName sets the SegmentName field's value. +func (s *ImportJobResource) SetSegmentName(v string) *ImportJobResource { + s.SegmentName = &v + return s +} + +// Provides information about the status and settings of a job that imports +// endpoint definitions from one or more files. The files can be stored in an +// Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from +// a computer by using the Amazon Pinpoint console. +type ImportJobResponse struct { _ struct{} `type:"structure"` + // The unique identifier for the application that's associated with the import + // job. + // // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + ApplicationId *string `type:"string" required:"true"` - // SegmentId is a required field - SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` + // The number of pieces that were processed successfully (completed) by the + // import job, as of the time of the request. + CompletedPieces *int64 `type:"integer"` + + // The date, in ISO 8601 format, when the import job was completed. + CompletionDate *string `type:"string"` + + // The date, in ISO 8601 format, when the import job was created. + // + // CreationDate is a required field + CreationDate *string `type:"string" required:"true"` + + // The resource settings that apply to the import job. + // + // Definition is a required field + Definition *ImportJobResource `type:"structure" required:"true"` + + // The number of pieces that weren't processed successfully (failed) by the + // import job, as of the time of the request. + FailedPieces *int64 `type:"integer"` + + // An array of entries, one for each of the first 100 entries that weren't processed + // successfully (failed) by the import job, if any. + Failures []*string `type:"list"` + + // The unique identifier for the import job. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The status of the import job. The job status is FAILED if Amazon Pinpoint + // wasn't able to process one or more pieces in the job. + // + // JobStatus is a required field + JobStatus *string `type:"string" required:"true" enum:"JobStatus"` + + // The total number of endpoint definitions that weren't processed successfully + // (failed) by the import job, typically because an error, such as a syntax + // error, occurred. + TotalFailures *int64 `type:"integer"` + + // The total number of pieces that must be processed to complete the import + // job. Each piece consists of an approximately equal portion of the endpoint + // definitions that are part of the import job. + TotalPieces *int64 `type:"integer"` + + // The total number of endpoint definitions that were processed by the import + // job. + TotalProcessed *int64 `type:"integer"` - // Version is a required field - Version *string `location:"uri" locationName:"version" type:"string" required:"true"` + // The job type. This value is IMPORT for import jobs. + // + // Type is a required field + Type *string `type:"string" required:"true"` } // String returns the string representation -func (s GetSegmentVersionInput) String() string { +func (s ImportJobResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentVersionInput) GoString() string { +func (s ImportJobResponse) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSegmentVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSegmentVersionInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.SegmentId == nil { - invalidParams.Add(request.NewErrParamRequired("SegmentId")) - } - if s.SegmentId != nil && len(*s.SegmentId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) - } - if s.Version == nil { - invalidParams.Add(request.NewErrParamRequired("Version")) - } - if s.Version != nil && len(*s.Version) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Version", 1)) - } +// SetApplicationId sets the ApplicationId field's value. +func (s *ImportJobResponse) SetApplicationId(v string) *ImportJobResponse { + s.ApplicationId = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCompletedPieces sets the CompletedPieces field's value. +func (s *ImportJobResponse) SetCompletedPieces(v int64) *ImportJobResponse { + s.CompletedPieces = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *GetSegmentVersionInput) SetApplicationId(v string) *GetSegmentVersionInput { - s.ApplicationId = &v +// SetCompletionDate sets the CompletionDate field's value. +func (s *ImportJobResponse) SetCompletionDate(v string) *ImportJobResponse { + s.CompletionDate = &v return s } -// SetSegmentId sets the SegmentId field's value. -func (s *GetSegmentVersionInput) SetSegmentId(v string) *GetSegmentVersionInput { - s.SegmentId = &v +// SetCreationDate sets the CreationDate field's value. +func (s *ImportJobResponse) SetCreationDate(v string) *ImportJobResponse { + s.CreationDate = &v return s } -// SetVersion sets the Version field's value. -func (s *GetSegmentVersionInput) SetVersion(v string) *GetSegmentVersionInput { - s.Version = &v +// SetDefinition sets the Definition field's value. +func (s *ImportJobResponse) SetDefinition(v *ImportJobResource) *ImportJobResponse { + s.Definition = v return s } -type GetSegmentVersionOutput struct { - _ struct{} `type:"structure" payload:"SegmentResponse"` +// SetFailedPieces sets the FailedPieces field's value. +func (s *ImportJobResponse) SetFailedPieces(v int64) *ImportJobResponse { + s.FailedPieces = &v + return s +} - // Segment definition. - // - // SegmentResponse is a required field - SegmentResponse *SegmentResponse `type:"structure" required:"true"` +// SetFailures sets the Failures field's value. +func (s *ImportJobResponse) SetFailures(v []*string) *ImportJobResponse { + s.Failures = v + return s } -// String returns the string representation -func (s GetSegmentVersionOutput) String() string { - return awsutil.Prettify(s) +// SetId sets the Id field's value. +func (s *ImportJobResponse) SetId(v string) *ImportJobResponse { + s.Id = &v + return s } -// GoString returns the string representation -func (s GetSegmentVersionOutput) GoString() string { - return s.String() +// SetJobStatus sets the JobStatus field's value. +func (s *ImportJobResponse) SetJobStatus(v string) *ImportJobResponse { + s.JobStatus = &v + return s } -// SetSegmentResponse sets the SegmentResponse field's value. -func (s *GetSegmentVersionOutput) SetSegmentResponse(v *SegmentResponse) *GetSegmentVersionOutput { - s.SegmentResponse = v +// SetTotalFailures sets the TotalFailures field's value. +func (s *ImportJobResponse) SetTotalFailures(v int64) *ImportJobResponse { + s.TotalFailures = &v return s } -type GetSegmentVersionsInput struct { - _ struct{} `type:"structure"` +// SetTotalPieces sets the TotalPieces field's value. +func (s *ImportJobResponse) SetTotalPieces(v int64) *ImportJobResponse { + s.TotalPieces = &v + return s +} - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` +// SetTotalProcessed sets the TotalProcessed field's value. +func (s *ImportJobResponse) SetTotalProcessed(v int64) *ImportJobResponse { + s.TotalProcessed = &v + return s +} - PageSize *string `location:"querystring" locationName:"page-size" type:"string"` +// SetType sets the Type field's value. +func (s *ImportJobResponse) SetType(v string) *ImportJobResponse { + s.Type = &v + return s +} - // SegmentId is a required field - SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` +// Provides information about the status and settings of all the import jobs +// that are associated with an application or segment. An import job is a job +// that imports endpoint definitions from one or more files. +type ImportJobsResponse struct { + _ struct{} `type:"structure"` - Token *string `location:"querystring" locationName:"token" type:"string"` + // An array of responses, one for each import job that's associated with the + // application (Import Jobs resource) or segment (Segment Import Jobs resource). + // + // Item is a required field + Item []*ImportJobResponse `type:"list" required:"true"` + + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null if there are no additional pages. + NextToken *string `type:"string"` } // String returns the string representation -func (s GetSegmentVersionsInput) String() string { +func (s ImportJobsResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentVersionsInput) GoString() string { +func (s ImportJobsResponse) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSegmentVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSegmentVersionsInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.SegmentId == nil { - invalidParams.Add(request.NewErrParamRequired("SegmentId")) - } - if s.SegmentId != nil && len(*s.SegmentId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SegmentId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApplicationId sets the ApplicationId field's value. -func (s *GetSegmentVersionsInput) SetApplicationId(v string) *GetSegmentVersionsInput { - s.ApplicationId = &v - return s -} - -// SetPageSize sets the PageSize field's value. -func (s *GetSegmentVersionsInput) SetPageSize(v string) *GetSegmentVersionsInput { - s.PageSize = &v +// SetItem sets the Item field's value. +func (s *ImportJobsResponse) SetItem(v []*ImportJobResponse) *ImportJobsResponse { + s.Item = v return s } -// SetSegmentId sets the SegmentId field's value. -func (s *GetSegmentVersionsInput) SetSegmentId(v string) *GetSegmentVersionsInput { - s.SegmentId = &v +// SetNextToken sets the NextToken field's value. +func (s *ImportJobsResponse) SetNextToken(v string) *ImportJobsResponse { + s.NextToken = &v return s } -// SetToken sets the Token field's value. -func (s *GetSegmentVersionsInput) SetToken(v string) *GetSegmentVersionsInput { - s.Token = &v - return s -} +// Provides information about the results of a request to create or update an +// endpoint that's associated with an event. +type ItemResponse struct { + _ struct{} `type:"structure"` -type GetSegmentVersionsOutput struct { - _ struct{} `type:"structure" payload:"SegmentsResponse"` + // The response that was received after the endpoint data was accepted. + EndpointItemResponse *EndpointItemResponse `type:"structure"` - // Segments in your account. - // - // SegmentsResponse is a required field - SegmentsResponse *SegmentsResponse `type:"structure" required:"true"` + // A multipart response object that contains a key and a value for each event + // in the request. In each object, the event ID is the key and an EventItemResponse + // object is the value. + EventsItemResponse map[string]*EventItemResponse `type:"map"` } // String returns the string representation -func (s GetSegmentVersionsOutput) String() string { +func (s ItemResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentVersionsOutput) GoString() string { +func (s ItemResponse) GoString() string { return s.String() } -// SetSegmentsResponse sets the SegmentsResponse field's value. -func (s *GetSegmentVersionsOutput) SetSegmentsResponse(v *SegmentsResponse) *GetSegmentVersionsOutput { - s.SegmentsResponse = v +// SetEndpointItemResponse sets the EndpointItemResponse field's value. +func (s *ItemResponse) SetEndpointItemResponse(v *EndpointItemResponse) *ItemResponse { + s.EndpointItemResponse = v return s } -type GetSegmentsInput struct { +// SetEventsItemResponse sets the EventsItemResponse field's value. +func (s *ItemResponse) SetEventsItemResponse(v map[string]*EventItemResponse) *ItemResponse { + s.EventsItemResponse = v + return s +} + +// Provides the results of a query that retrieved the data for a standard engagement +// metric that applies to a journey, and provides information about that query. +type JourneyDateRangeKpiResponse struct { _ struct{} `type:"structure"` + // The unique identifier for the application that the metric applies to. + // // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + ApplicationId *string `type:"string" required:"true"` - PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + // EndTime is a required field + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` - Token *string `location:"querystring" locationName:"token" type:"string"` + // The unique identifier for the journey that the metric applies to. + // + // JourneyId is a required field + JourneyId *string `type:"string" required:"true"` + + // The name of the metric, also referred to as a key performance indicator (KPI), + // that the data was retrieved for. This value describes the associated metric + // and consists of two or more terms, which are comprised of lowercase alphanumeric + // characters, separated by a hyphen. For a list of possible values, see the + // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // + // KpiName is a required field + KpiName *string `type:"string" required:"true"` + + // An array of objects that contains the results of the query. Each object contains + // the value for the metric and metadata about that value. + // + // KpiResult is a required field + KpiResult *BaseKpiResult `type:"structure" required:"true"` + + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null for the Journey Engagement Metrics + // resource because the resource returns all results in a single page. + NextToken *string `type:"string"` + + // StartTime is a required field + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` } // String returns the string representation -func (s GetSegmentsInput) String() string { +func (s JourneyDateRangeKpiResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentsInput) GoString() string { +func (s JourneyDateRangeKpiResponse) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSegmentsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSegmentsInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } +// SetApplicationId sets the ApplicationId field's value. +func (s *JourneyDateRangeKpiResponse) SetApplicationId(v string) *JourneyDateRangeKpiResponse { + s.ApplicationId = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetEndTime sets the EndTime field's value. +func (s *JourneyDateRangeKpiResponse) SetEndTime(v time.Time) *JourneyDateRangeKpiResponse { + s.EndTime = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *GetSegmentsInput) SetApplicationId(v string) *GetSegmentsInput { - s.ApplicationId = &v +// SetJourneyId sets the JourneyId field's value. +func (s *JourneyDateRangeKpiResponse) SetJourneyId(v string) *JourneyDateRangeKpiResponse { + s.JourneyId = &v return s } -// SetPageSize sets the PageSize field's value. -func (s *GetSegmentsInput) SetPageSize(v string) *GetSegmentsInput { - s.PageSize = &v +// SetKpiName sets the KpiName field's value. +func (s *JourneyDateRangeKpiResponse) SetKpiName(v string) *JourneyDateRangeKpiResponse { + s.KpiName = &v return s } -// SetToken sets the Token field's value. -func (s *GetSegmentsInput) SetToken(v string) *GetSegmentsInput { - s.Token = &v +// SetKpiResult sets the KpiResult field's value. +func (s *JourneyDateRangeKpiResponse) SetKpiResult(v *BaseKpiResult) *JourneyDateRangeKpiResponse { + s.KpiResult = v return s } -type GetSegmentsOutput struct { - _ struct{} `type:"structure" payload:"SegmentsResponse"` +// SetNextToken sets the NextToken field's value. +func (s *JourneyDateRangeKpiResponse) SetNextToken(v string) *JourneyDateRangeKpiResponse { + s.NextToken = &v + return s +} - // Segments in your account. - // - // SegmentsResponse is a required field - SegmentsResponse *SegmentsResponse `type:"structure" required:"true"` +// SetStartTime sets the StartTime field's value. +func (s *JourneyDateRangeKpiResponse) SetStartTime(v time.Time) *JourneyDateRangeKpiResponse { + s.StartTime = &v + return s +} + +// Specifies the "From" address for an email message that's sent to participants +// in a journey. +type JourneyEmailMessage struct { + _ struct{} `type:"structure"` + + // The verified email address to send the email message from. The default address + // is the FromAddress specified for the email channel for the application. + FromAddress *string `type:"string"` } // String returns the string representation -func (s GetSegmentsOutput) String() string { +func (s JourneyEmailMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSegmentsOutput) GoString() string { +func (s JourneyEmailMessage) GoString() string { return s.String() } -// SetSegmentsResponse sets the SegmentsResponse field's value. -func (s *GetSegmentsOutput) SetSegmentsResponse(v *SegmentsResponse) *GetSegmentsOutput { - s.SegmentsResponse = v +// SetFromAddress sets the FromAddress field's value. +func (s *JourneyEmailMessage) SetFromAddress(v string) *JourneyEmailMessage { + s.FromAddress = &v return s } -type GetSmsChannelInput struct { +// Provides the results of a query that retrieved the data for a standard execution +// metric that applies to a journey activity, and provides information about +// that query. +type JourneyExecutionActivityMetricsResponse struct { _ struct{} `type:"structure"` + // The type of activity that the metric applies to. Possible values are: + // + // * CONDITIONAL_SPLIT - For a yes/no split activity, which is an activity + // that sends participants down one of two paths in a journey. + // + // * HOLDOUT - For a holdout activity, which is an activity that stops a + // journey for a specified percentage of participants. + // + // * MESSAGE - For an email activity, which is an activity that sends an + // email message to participants. + // + // * MULTI_CONDITIONAL_SPLIT - For a multivariate split activity, which is + // an activity that sends participants down one of as many as five paths + // in a journey. + // + // * RANDOM_SPLIT - For a random split activity, which is an activity that + // sends specified percentages of participants down one of as many as five + // paths in a journey. + // + // * WAIT - For a wait activity, which is an activity that waits for a certain + // amount of time or until a specific date and time before moving participants + // to the next activity in a journey. + // + // ActivityType is a required field + ActivityType *string `type:"string" required:"true"` + + // The unique identifier for the application that the metric applies to. + // // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + ApplicationId *string `type:"string" required:"true"` + + // The unique identifier for the activity that the metric applies to. + // + // JourneyActivityId is a required field + JourneyActivityId *string `type:"string" required:"true"` + + // The unique identifier for the journey that the metric applies to. + // + // JourneyId is a required field + JourneyId *string `type:"string" required:"true"` + + // The date and time, in ISO 8601 format, when Amazon Pinpoint last evaluated + // the execution status of the activity and updated the data for the metric. + // + // LastEvaluatedTime is a required field + LastEvaluatedTime *string `type:"string" required:"true"` + + // A JSON object that contains the results of the query. The results vary depending + // on the type of activity (ActivityType). For information about the structure + // and contents of the results, see the Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // + // Metrics is a required field + Metrics map[string]*string `type:"map" required:"true"` } // String returns the string representation -func (s GetSmsChannelInput) String() string { +func (s JourneyExecutionActivityMetricsResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSmsChannelInput) GoString() string { +func (s JourneyExecutionActivityMetricsResponse) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSmsChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSmsChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetActivityType sets the ActivityType field's value. +func (s *JourneyExecutionActivityMetricsResponse) SetActivityType(v string) *JourneyExecutionActivityMetricsResponse { + s.ActivityType = &v + return s } // SetApplicationId sets the ApplicationId field's value. -func (s *GetSmsChannelInput) SetApplicationId(v string) *GetSmsChannelInput { +func (s *JourneyExecutionActivityMetricsResponse) SetApplicationId(v string) *JourneyExecutionActivityMetricsResponse { s.ApplicationId = &v return s } -type GetSmsChannelOutput struct { - _ struct{} `type:"structure" payload:"SMSChannelResponse"` - - // SMS Channel Response. - // - // SMSChannelResponse is a required field - SMSChannelResponse *SMSChannelResponse `type:"structure" required:"true"` +// SetJourneyActivityId sets the JourneyActivityId field's value. +func (s *JourneyExecutionActivityMetricsResponse) SetJourneyActivityId(v string) *JourneyExecutionActivityMetricsResponse { + s.JourneyActivityId = &v + return s } -// String returns the string representation -func (s GetSmsChannelOutput) String() string { - return awsutil.Prettify(s) +// SetJourneyId sets the JourneyId field's value. +func (s *JourneyExecutionActivityMetricsResponse) SetJourneyId(v string) *JourneyExecutionActivityMetricsResponse { + s.JourneyId = &v + return s } -// GoString returns the string representation -func (s GetSmsChannelOutput) GoString() string { - return s.String() +// SetLastEvaluatedTime sets the LastEvaluatedTime field's value. +func (s *JourneyExecutionActivityMetricsResponse) SetLastEvaluatedTime(v string) *JourneyExecutionActivityMetricsResponse { + s.LastEvaluatedTime = &v + return s } -// SetSMSChannelResponse sets the SMSChannelResponse field's value. -func (s *GetSmsChannelOutput) SetSMSChannelResponse(v *SMSChannelResponse) *GetSmsChannelOutput { - s.SMSChannelResponse = v +// SetMetrics sets the Metrics field's value. +func (s *JourneyExecutionActivityMetricsResponse) SetMetrics(v map[string]*string) *JourneyExecutionActivityMetricsResponse { + s.Metrics = v return s } -type GetUserEndpointsInput struct { +// Provides the results of a query that retrieved the data for a standard execution +// metric that applies to a journey. +type JourneyExecutionMetricsResponse struct { _ struct{} `type:"structure"` + // The unique identifier for the application that the metric applies to. + // // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + ApplicationId *string `type:"string" required:"true"` - // UserId is a required field - UserId *string `location:"uri" locationName:"user-id" type:"string" required:"true"` + // The unique identifier for the journey that the metric applies to. + // + // JourneyId is a required field + JourneyId *string `type:"string" required:"true"` + + // The date and time, in ISO 8601 format, when Amazon Pinpoint last evaluated + // the journey and updated the data for the metric. + // + // LastEvaluatedTime is a required field + LastEvaluatedTime *string `type:"string" required:"true"` + + // A JSON object that contains the results of the query. For information about + // the structure and contents of the results, see the Amazon Pinpoint Developer + // Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // + // Metrics is a required field + Metrics map[string]*string `type:"map" required:"true"` } // String returns the string representation -func (s GetUserEndpointsInput) String() string { +func (s JourneyExecutionMetricsResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetUserEndpointsInput) GoString() string { +func (s JourneyExecutionMetricsResponse) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetUserEndpointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetUserEndpointsInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } - if s.UserId == nil { - invalidParams.Add(request.NewErrParamRequired("UserId")) - } - if s.UserId != nil && len(*s.UserId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetApplicationId sets the ApplicationId field's value. -func (s *GetUserEndpointsInput) SetApplicationId(v string) *GetUserEndpointsInput { +func (s *JourneyExecutionMetricsResponse) SetApplicationId(v string) *JourneyExecutionMetricsResponse { s.ApplicationId = &v return s } -// SetUserId sets the UserId field's value. -func (s *GetUserEndpointsInput) SetUserId(v string) *GetUserEndpointsInput { - s.UserId = &v +// SetJourneyId sets the JourneyId field's value. +func (s *JourneyExecutionMetricsResponse) SetJourneyId(v string) *JourneyExecutionMetricsResponse { + s.JourneyId = &v return s } -type GetUserEndpointsOutput struct { - _ struct{} `type:"structure" payload:"EndpointsResponse"` - - // List of endpoints - // - // EndpointsResponse is a required field - EndpointsResponse *EndpointsResponse `type:"structure" required:"true"` -} - -// String returns the string representation -func (s GetUserEndpointsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetUserEndpointsOutput) GoString() string { - return s.String() +// SetLastEvaluatedTime sets the LastEvaluatedTime field's value. +func (s *JourneyExecutionMetricsResponse) SetLastEvaluatedTime(v string) *JourneyExecutionMetricsResponse { + s.LastEvaluatedTime = &v + return s } -// SetEndpointsResponse sets the EndpointsResponse field's value. -func (s *GetUserEndpointsOutput) SetEndpointsResponse(v *EndpointsResponse) *GetUserEndpointsOutput { - s.EndpointsResponse = v +// SetMetrics sets the Metrics field's value. +func (s *JourneyExecutionMetricsResponse) SetMetrics(v map[string]*string) *JourneyExecutionMetricsResponse { + s.Metrics = v return s } -type GetVoiceChannelInput struct { +// Specifies limits on the messages that a journey can send and the number of +// times participants can enter a journey. +type JourneyLimits struct { _ struct{} `type:"structure"` - // ApplicationId is a required field - ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + // The maximum number of messages that the journey can send to a single participant + // during a 24-hour period. The maximum value is 100. + DailyCap *int64 `type:"integer"` + + // The maximum number of times that a participant can enter the journey. The + // maximum value is 100. To allow participants to enter the journey an unlimited + // number of times, set this value to 0. + EndpointReentryCap *int64 `type:"integer"` + + // The maximum number of messages that the journey can send each second. + MessagesPerSecond *int64 `type:"integer"` } // String returns the string representation -func (s GetVoiceChannelInput) String() string { +func (s JourneyLimits) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetVoiceChannelInput) GoString() string { +func (s JourneyLimits) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetVoiceChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetVoiceChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) - } +// SetDailyCap sets the DailyCap field's value. +func (s *JourneyLimits) SetDailyCap(v int64) *JourneyLimits { + s.DailyCap = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetEndpointReentryCap sets the EndpointReentryCap field's value. +func (s *JourneyLimits) SetEndpointReentryCap(v int64) *JourneyLimits { + s.EndpointReentryCap = &v + return s } -// SetApplicationId sets the ApplicationId field's value. -func (s *GetVoiceChannelInput) SetApplicationId(v string) *GetVoiceChannelInput { - s.ApplicationId = &v +// SetMessagesPerSecond sets the MessagesPerSecond field's value. +func (s *JourneyLimits) SetMessagesPerSecond(v int64) *JourneyLimits { + s.MessagesPerSecond = &v return s } -type GetVoiceChannelOutput struct { - _ struct{} `type:"structure" payload:"VoiceChannelResponse"` +// Provides information about the status, configuration, and other settings +// for a journey. +type JourneyResponse struct { + _ struct{} `type:"structure"` - // Voice Channel Response. + // The configuration and other settings for the activities that comprise the + // journey. + Activities map[string]*Activity `type:"map"` + + // The unique identifier for the application that the journey applies to. // - // VoiceChannelResponse is a required field - VoiceChannelResponse *VoiceChannelResponse `type:"structure" required:"true"` -} + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` -// String returns the string representation -func (s GetVoiceChannelOutput) String() string { - return awsutil.Prettify(s) -} + // The date, in ISO 8601 format, when the journey was created. + CreationDate *string `type:"string"` -// GoString returns the string representation -func (s GetVoiceChannelOutput) GoString() string { - return s.String() -} + // The unique identifier for the journey. + // + // Id is a required field + Id *string `type:"string" required:"true"` -// SetVoiceChannelResponse sets the VoiceChannelResponse field's value. -func (s *GetVoiceChannelOutput) SetVoiceChannelResponse(v *VoiceChannelResponse) *GetVoiceChannelOutput { - s.VoiceChannelResponse = v - return s -} + // The date, in ISO 8601 format, when the journey was last modified. + LastModifiedDate *string `type:"string"` -// Import job request. -type ImportJobRequest struct { - _ struct{} `type:"structure"` + // The messaging and entry limits for the journey. + Limits *JourneyLimits `type:"structure"` - // Sets whether the endpoints create a segment when they are imported. - DefineSegment *bool `type:"boolean"` + // Specifies whether the journey's scheduled start and end times use each participant's + // local time. If this value is true, the schedule uses each participant's local + // time. + LocalTime *bool `type:"boolean"` - // (Deprecated) Your AWS account ID, which you assigned to the ExternalID key - // in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This - // requirement is removed, and external IDs are not recommended for IAM roles - // assumed by Amazon Pinpoint. - ExternalId *string `type:"string"` + // The name of the journey. + // + // Name is a required field + Name *string `type:"string" required:"true"` - // The format of the files that contain the endpoint definitions.Valid values: - // CSV, JSON - Format *string `type:"string" enum:"Format"` + // The quiet time settings for the journey. Quiet time is a specific time range + // when a journey doesn't send messages to participants, if all the following + // conditions are met: + // + // * The EndpointDemographic.Timezone property of the endpoint for the participant + // is set to a valid value. + // + // * The current time in the participant's time zone is later than or equal + // to the time specified by the QuietTime.Start property for the journey. + // + // * The current time in the participant's time zone is earlier than or equal + // to the time specified by the QuietTime.End property for the journey. + // + // If any of the preceding conditions isn't met, the participant will receive + // messages from the journey, even if quiet time is enabled. + QuietTime *QuietTime `type:"structure"` - // Sets whether the endpoints are registered with Amazon Pinpoint when they - // are imported. - RegisterEndpoints *bool `type:"boolean"` + // The frequency with which Amazon Pinpoint evaluates segment and event data + // for the journey, as a duration in ISO 8601 format. + RefreshFrequency *string `type:"string"` - // The Amazon Resource Name (ARN) of an IAM role that grants Amazon Pinpoint - // access to the Amazon S3 location that contains the endpoints to import. - RoleArn *string `type:"string"` + // The schedule settings for the journey. + Schedule *JourneySchedule `type:"structure"` - // The URL of the S3 bucket that contains the segment information to import. - // The location can be a folder or a single file. The URL should use the following - // format: s3://bucket-name/folder-name/file-nameAmazon Pinpoint imports endpoints - // from this location and any subfolders it contains. - S3Url *string `type:"string"` + // The unique identifier for the first activity in the journey. + StartActivity *string `type:"string"` - // The ID of the segment to update if the import job is meant to update an existing - // segment. - SegmentId *string `type:"string"` + // The segment that defines which users are participants in the journey. + StartCondition *StartCondition `type:"structure"` - // A custom name for the segment created by the import job. Use if DefineSegment - // is true. - SegmentName *string `type:"string"` + // The current status of the journey. Possible values are: + // + // * DRAFT - The journey is being developed and hasn't been published yet. + // + // * ACTIVE - The journey has been developed and published. Depending on + // the journey's schedule, the journey may currently be running or scheduled + // to start running at a later time. If a journey's status is ACTIVE, you + // can't add, change, or remove activities from it. + // + // * COMPLETED - The journey has been published and has finished running. + // All participants have entered the journey and no participants are waiting + // to complete the journey or any activities in the journey. + // + // * CANCELLED - The journey has been stopped. If a journey's status is CANCELLED, + // you can't add, change, or remove activities or segment settings from the + // journey. + // + // * CLOSED - The journey has been published and has started running. It + // may have also passed its scheduled end time, or passed its scheduled start + // time and a refresh frequency hasn't been specified for it. If a journey's + // status is CLOSED, you can't add participants to it, and no existing participants + // can enter the journey for the first time. However, any existing participants + // who are currently waiting to start an activity may resume the journey. + State *string `type:"string" enum:"State"` + + // A string-to-string map of key-value pairs that identifies the tags that are + // associated with the journey. Each tag consists of a required tag key and + // an associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation -func (s ImportJobRequest) String() string { +func (s JourneyResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ImportJobRequest) GoString() string { +func (s JourneyResponse) GoString() string { return s.String() } -// SetDefineSegment sets the DefineSegment field's value. -func (s *ImportJobRequest) SetDefineSegment(v bool) *ImportJobRequest { - s.DefineSegment = &v - return s -} - -// SetExternalId sets the ExternalId field's value. -func (s *ImportJobRequest) SetExternalId(v string) *ImportJobRequest { - s.ExternalId = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *ImportJobRequest) SetFormat(v string) *ImportJobRequest { - s.Format = &v - return s -} - -// SetRegisterEndpoints sets the RegisterEndpoints field's value. -func (s *ImportJobRequest) SetRegisterEndpoints(v bool) *ImportJobRequest { - s.RegisterEndpoints = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *ImportJobRequest) SetRoleArn(v string) *ImportJobRequest { - s.RoleArn = &v +// SetActivities sets the Activities field's value. +func (s *JourneyResponse) SetActivities(v map[string]*Activity) *JourneyResponse { + s.Activities = v return s } -// SetS3Url sets the S3Url field's value. -func (s *ImportJobRequest) SetS3Url(v string) *ImportJobRequest { - s.S3Url = &v +// SetApplicationId sets the ApplicationId field's value. +func (s *JourneyResponse) SetApplicationId(v string) *JourneyResponse { + s.ApplicationId = &v return s } -// SetSegmentId sets the SegmentId field's value. -func (s *ImportJobRequest) SetSegmentId(v string) *ImportJobRequest { - s.SegmentId = &v +// SetCreationDate sets the CreationDate field's value. +func (s *JourneyResponse) SetCreationDate(v string) *JourneyResponse { + s.CreationDate = &v return s } -// SetSegmentName sets the SegmentName field's value. -func (s *ImportJobRequest) SetSegmentName(v string) *ImportJobRequest { - s.SegmentName = &v +// SetId sets the Id field's value. +func (s *JourneyResponse) SetId(v string) *JourneyResponse { + s.Id = &v return s } -// Import job resource -type ImportJobResource struct { - _ struct{} `type:"structure"` - - // Sets whether the endpoints create a segment when they are imported. - DefineSegment *bool `type:"boolean"` - - // (Deprecated) Your AWS account ID, which you assigned to the ExternalID key - // in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This - // requirement is removed, and external IDs are not recommended for IAM roles - // assumed by Amazon Pinpoint. - ExternalId *string `type:"string"` - - // The format of the files that contain the endpoint definitions.Valid values: - // CSV, JSON - Format *string `type:"string" enum:"Format"` - - // Sets whether the endpoints are registered with Amazon Pinpoint when they - // are imported. - RegisterEndpoints *bool `type:"boolean"` - - // The Amazon Resource Name (ARN) of an IAM role that grants Amazon Pinpoint - // access to the Amazon S3 location that contains the endpoints to import. - RoleArn *string `type:"string"` - - // The URL of the S3 bucket that contains the segment information to import. - // The location can be a folder or a single file. The URL should use the following - // format: s3://bucket-name/folder-name/file-nameAmazon Pinpoint imports endpoints - // from this location and any subfolders it contains. - S3Url *string `type:"string"` - - // The ID of the segment to update if the import job is meant to update an existing - // segment. - SegmentId *string `type:"string"` - - // A custom name for the segment created by the import job. Use if DefineSegment - // is true. - SegmentName *string `type:"string"` +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *JourneyResponse) SetLastModifiedDate(v string) *JourneyResponse { + s.LastModifiedDate = &v + return s } -// String returns the string representation -func (s ImportJobResource) String() string { - return awsutil.Prettify(s) +// SetLimits sets the Limits field's value. +func (s *JourneyResponse) SetLimits(v *JourneyLimits) *JourneyResponse { + s.Limits = v + return s } -// GoString returns the string representation -func (s ImportJobResource) GoString() string { - return s.String() +// SetLocalTime sets the LocalTime field's value. +func (s *JourneyResponse) SetLocalTime(v bool) *JourneyResponse { + s.LocalTime = &v + return s } -// SetDefineSegment sets the DefineSegment field's value. -func (s *ImportJobResource) SetDefineSegment(v bool) *ImportJobResource { - s.DefineSegment = &v +// SetName sets the Name field's value. +func (s *JourneyResponse) SetName(v string) *JourneyResponse { + s.Name = &v return s } -// SetExternalId sets the ExternalId field's value. -func (s *ImportJobResource) SetExternalId(v string) *ImportJobResource { - s.ExternalId = &v +// SetQuietTime sets the QuietTime field's value. +func (s *JourneyResponse) SetQuietTime(v *QuietTime) *JourneyResponse { + s.QuietTime = v return s } -// SetFormat sets the Format field's value. -func (s *ImportJobResource) SetFormat(v string) *ImportJobResource { - s.Format = &v +// SetRefreshFrequency sets the RefreshFrequency field's value. +func (s *JourneyResponse) SetRefreshFrequency(v string) *JourneyResponse { + s.RefreshFrequency = &v return s } -// SetRegisterEndpoints sets the RegisterEndpoints field's value. -func (s *ImportJobResource) SetRegisterEndpoints(v bool) *ImportJobResource { - s.RegisterEndpoints = &v +// SetSchedule sets the Schedule field's value. +func (s *JourneyResponse) SetSchedule(v *JourneySchedule) *JourneyResponse { + s.Schedule = v return s } -// SetRoleArn sets the RoleArn field's value. -func (s *ImportJobResource) SetRoleArn(v string) *ImportJobResource { - s.RoleArn = &v +// SetStartActivity sets the StartActivity field's value. +func (s *JourneyResponse) SetStartActivity(v string) *JourneyResponse { + s.StartActivity = &v return s } -// SetS3Url sets the S3Url field's value. -func (s *ImportJobResource) SetS3Url(v string) *ImportJobResource { - s.S3Url = &v +// SetStartCondition sets the StartCondition field's value. +func (s *JourneyResponse) SetStartCondition(v *StartCondition) *JourneyResponse { + s.StartCondition = v return s } -// SetSegmentId sets the SegmentId field's value. -func (s *ImportJobResource) SetSegmentId(v string) *ImportJobResource { - s.SegmentId = &v +// SetState sets the State field's value. +func (s *JourneyResponse) SetState(v string) *JourneyResponse { + s.State = &v return s } -// SetSegmentName sets the SegmentName field's value. -func (s *ImportJobResource) SetSegmentName(v string) *ImportJobResource { - s.SegmentName = &v +// SetTags sets the Tags field's value. +func (s *JourneyResponse) SetTags(v map[string]*string) *JourneyResponse { + s.Tags = v return s } -// Import job response. -type ImportJobResponse struct { +// Specifies the schedule settings for a journey. +type JourneySchedule struct { _ struct{} `type:"structure"` - // The unique ID of the application to which the import job applies. - ApplicationId *string `type:"string"` - - // The number of pieces that have successfully imported as of the time of the - // request. - CompletedPieces *int64 `type:"integer"` - - // The date the import job completed in ISO 8601 format. - CompletionDate *string `type:"string"` - - // The date the import job was created in ISO 8601 format. - CreationDate *string `type:"string"` - - // The import job settings. - Definition *ImportJobResource `type:"structure"` - - // The number of pieces that have failed to import as of the time of the request. - FailedPieces *int64 `type:"integer"` - - // Provides up to 100 of the first failed entries for the job, if any exist. - Failures []*string `type:"list"` - - // The unique ID of the import job. - Id *string `type:"string"` - - // The status of the import job.Valid values: CREATED, INITIALIZING, PROCESSING, - // COMPLETING, COMPLETED, FAILING, FAILEDThe job status is FAILED if one or - // more pieces failed to import. - JobStatus *string `type:"string" enum:"JobStatus"` + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // The number of endpoints that failed to import; for example, because of syntax - // errors. - TotalFailures *int64 `type:"integer"` - - // The total number of pieces that must be imported to finish the job. Each - // piece is an approximately equal portion of the endpoints to import. - TotalPieces *int64 `type:"integer"` - - // The number of endpoints that were processed by the import job. - TotalProcessed *int64 `type:"integer"` + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // The job type. Will be Import. - Type *string `type:"string"` + // The starting UTC offset for the journey schedule, if the value of the journey's + // LocalTime property is true. Valid values are: UTC, UTC+01, UTC+02, UTC+03, + // UTC+03:30, UTC+04, UTC+04:30, UTC+05, UTC+05:30, UTC+05:45, UTC+06, UTC+06:30, + // UTC+07, UTC+08, UTC+08:45, UTC+09, UTC+09:30, UTC+10, UTC+10:30, UTC+11, + // UTC+12, UTC+12:45, UTC+13, UTC+13:45, UTC-02, UTC-02:30, UTC-03, UTC-03:30, + // UTC-04, UTC-05, UTC-06, UTC-07, UTC-08, UTC-09, UTC-09:30, UTC-10, and UTC-11. + Timezone *string `type:"string"` } // String returns the string representation -func (s ImportJobResponse) String() string { +func (s JourneySchedule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ImportJobResponse) GoString() string { +func (s JourneySchedule) GoString() string { return s.String() } -// SetApplicationId sets the ApplicationId field's value. -func (s *ImportJobResponse) SetApplicationId(v string) *ImportJobResponse { - s.ApplicationId = &v +// SetEndTime sets the EndTime field's value. +func (s *JourneySchedule) SetEndTime(v time.Time) *JourneySchedule { + s.EndTime = &v return s } -// SetCompletedPieces sets the CompletedPieces field's value. -func (s *ImportJobResponse) SetCompletedPieces(v int64) *ImportJobResponse { - s.CompletedPieces = &v +// SetStartTime sets the StartTime field's value. +func (s *JourneySchedule) SetStartTime(v time.Time) *JourneySchedule { + s.StartTime = &v return s } -// SetCompletionDate sets the CompletionDate field's value. -func (s *ImportJobResponse) SetCompletionDate(v string) *ImportJobResponse { - s.CompletionDate = &v +// SetTimezone sets the Timezone field's value. +func (s *JourneySchedule) SetTimezone(v string) *JourneySchedule { + s.Timezone = &v return s } -// SetCreationDate sets the CreationDate field's value. -func (s *ImportJobResponse) SetCreationDate(v string) *ImportJobResponse { - s.CreationDate = &v - return s -} +// Changes the status of a journey. +type JourneyStateRequest struct { + _ struct{} `type:"structure"` -// SetDefinition sets the Definition field's value. -func (s *ImportJobResponse) SetDefinition(v *ImportJobResource) *ImportJobResponse { - s.Definition = v - return s + // The status of the journey. Currently, the only supported value is CANCELLED. + // + // If you cancel a journey, Amazon Pinpoint continues to perform activities + // that are currently in progress, until those activities are complete. Amazon + // Pinpoint also continues to collect and aggregate analytics data for those + // activities, until they are complete, and any activities that were complete + // when you cancelled the journey. + // + // After you cancel a journey, you can't add, change, or remove any activities + // from the journey. In addition, Amazon Pinpoint stops evaluating the journey + // and doesn't perform any activities that haven't started. + State *string `type:"string" enum:"State"` } -// SetFailedPieces sets the FailedPieces field's value. -func (s *ImportJobResponse) SetFailedPieces(v int64) *ImportJobResponse { - s.FailedPieces = &v - return s +// String returns the string representation +func (s JourneyStateRequest) String() string { + return awsutil.Prettify(s) } -// SetFailures sets the Failures field's value. -func (s *ImportJobResponse) SetFailures(v []*string) *ImportJobResponse { - s.Failures = v - return s +// GoString returns the string representation +func (s JourneyStateRequest) GoString() string { + return s.String() } -// SetId sets the Id field's value. -func (s *ImportJobResponse) SetId(v string) *ImportJobResponse { - s.Id = &v +// SetState sets the State field's value. +func (s *JourneyStateRequest) SetState(v string) *JourneyStateRequest { + s.State = &v return s } -// SetJobStatus sets the JobStatus field's value. -func (s *ImportJobResponse) SetJobStatus(v string) *ImportJobResponse { - s.JobStatus = &v - return s +// Provides information about the status, configuration, and other settings +// for all the journeys that are associated with an application. +type JourneysResponse struct { + _ struct{} `type:"structure"` + + // An array of responses, one for each journey that's associated with the application. + // + // Item is a required field + Item []*JourneyResponse `type:"list" required:"true"` + + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null if there are no additional pages. + NextToken *string `type:"string"` } -// SetTotalFailures sets the TotalFailures field's value. -func (s *ImportJobResponse) SetTotalFailures(v int64) *ImportJobResponse { - s.TotalFailures = &v - return s +// String returns the string representation +func (s JourneysResponse) String() string { + return awsutil.Prettify(s) } -// SetTotalPieces sets the TotalPieces field's value. -func (s *ImportJobResponse) SetTotalPieces(v int64) *ImportJobResponse { - s.TotalPieces = &v - return s +// GoString returns the string representation +func (s JourneysResponse) GoString() string { + return s.String() } -// SetTotalProcessed sets the TotalProcessed field's value. -func (s *ImportJobResponse) SetTotalProcessed(v int64) *ImportJobResponse { - s.TotalProcessed = &v +// SetItem sets the Item field's value. +func (s *JourneysResponse) SetItem(v []*JourneyResponse) *JourneysResponse { + s.Item = v return s } -// SetType sets the Type field's value. -func (s *ImportJobResponse) SetType(v string) *ImportJobResponse { - s.Type = &v +// SetNextToken sets the NextToken field's value. +func (s *JourneysResponse) SetNextToken(v string) *JourneysResponse { + s.NextToken = &v return s } -// Import job list. -type ImportJobsResponse struct { +type ListJourneysInput struct { _ struct{} `type:"structure"` - // A list of import jobs for the application. - Item []*ImportJobResponse `type:"list"` + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // The string that you use in a subsequent request to get the next page of results - // in a paginated response. - NextToken *string `type:"string"` + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + + Token *string `location:"querystring" locationName:"token" type:"string"` } // String returns the string representation -func (s ImportJobsResponse) String() string { +func (s ListJourneysInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ImportJobsResponse) GoString() string { +func (s ListJourneysInput) GoString() string { return s.String() } -// SetItem sets the Item field's value. -func (s *ImportJobsResponse) SetItem(v []*ImportJobResponse) *ImportJobsResponse { - s.Item = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJourneysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJourneysInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *ListJourneysInput) SetApplicationId(v string) *ListJourneysInput { + s.ApplicationId = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ImportJobsResponse) SetNextToken(v string) *ImportJobsResponse { - s.NextToken = &v +// SetPageSize sets the PageSize field's value. +func (s *ListJourneysInput) SetPageSize(v string) *ListJourneysInput { + s.PageSize = &v return s } -// The response that's provided after registering the endpoint. -type ItemResponse struct { - _ struct{} `type:"structure"` +// SetToken sets the Token field's value. +func (s *ListJourneysInput) SetToken(v string) *ListJourneysInput { + s.Token = &v + return s +} - // The response received after the endpoint was accepted. - EndpointItemResponse *EndpointItemResponse `type:"structure"` +type ListJourneysOutput struct { + _ struct{} `type:"structure" payload:"JourneysResponse"` - // A multipart response object that contains a key and value for each event - // ID in the request. In each object, the event ID is the key, and an EventItemResponse - // object is the value. - EventsItemResponse map[string]*EventItemResponse `type:"map"` + // Provides information about the status, configuration, and other settings + // for all the journeys that are associated with an application. + // + // JourneysResponse is a required field + JourneysResponse *JourneysResponse `type:"structure" required:"true"` } // String returns the string representation -func (s ItemResponse) String() string { +func (s ListJourneysOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ItemResponse) GoString() string { +func (s ListJourneysOutput) GoString() string { return s.String() } -// SetEndpointItemResponse sets the EndpointItemResponse field's value. -func (s *ItemResponse) SetEndpointItemResponse(v *EndpointItemResponse) *ItemResponse { - s.EndpointItemResponse = v - return s -} - -// SetEventsItemResponse sets the EventsItemResponse field's value. -func (s *ItemResponse) SetEventsItemResponse(v map[string]*EventItemResponse) *ItemResponse { - s.EventsItemResponse = v +// SetJourneysResponse sets the JourneysResponse field's value. +func (s *ListJourneysOutput) SetJourneysResponse(v *JourneysResponse) *ListJourneysOutput { + s.JourneysResponse = v return s } @@ -16745,6 +23336,9 @@ func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResource type ListTagsForResourceOutput struct { _ struct{} `type:"structure" payload:"TagsModel"` + // Specifies the tags (keys and values) for an application, campaign, journey, + // message template, or segment. + // // TagsModel is a required field TagsModel *TagsModel `type:"structure" required:"true"` } @@ -16765,60 +23359,144 @@ func (s *ListTagsForResourceOutput) SetTagsModel(v *TagsModel) *ListTagsForResou return s } -// Message to send +type ListTemplatesInput struct { + _ struct{} `type:"structure"` + + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + PageSize *string `location:"querystring" locationName:"page-size" type:"string"` + + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + TemplateType *string `location:"querystring" locationName:"template-type" type:"string"` +} + +// String returns the string representation +func (s ListTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTemplatesInput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTemplatesInput) SetNextToken(v string) *ListTemplatesInput { + s.NextToken = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *ListTemplatesInput) SetPageSize(v string) *ListTemplatesInput { + s.PageSize = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListTemplatesInput) SetPrefix(v string) *ListTemplatesInput { + s.Prefix = &v + return s +} + +// SetTemplateType sets the TemplateType field's value. +func (s *ListTemplatesInput) SetTemplateType(v string) *ListTemplatesInput { + s.TemplateType = &v + return s +} + +type ListTemplatesOutput struct { + _ struct{} `type:"structure" payload:"TemplatesResponse"` + + // Provides information about all the message templates that are associated + // with your Amazon Pinpoint account. + // + // TemplatesResponse is a required field + TemplatesResponse *TemplatesResponse `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ListTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTemplatesOutput) GoString() string { + return s.String() +} + +// SetTemplatesResponse sets the TemplatesResponse field's value. +func (s *ListTemplatesOutput) SetTemplatesResponse(v *TemplatesResponse) *ListTemplatesOutput { + s.TemplatesResponse = v + return s +} + +// Specifies the content and settings for a push notification that's sent to +// recipients of a campaign. type Message struct { _ struct{} `type:"structure"` - // The action that occurs if the user taps a push notification delivered by - // the campaign:OPEN_APP - Your app launches, or it becomes the foreground app - // if it has been sent to the background. This is the default action.DEEP_LINK - // - Uses deep linking features in iOS and Android to open your app and display - // a designated user interface within the app.URL - The default mobile browser - // on the user's device launches and opens a web page at the URL you specify. + // The action to occur if a recipient taps the push notification. Valid values + // are: + // + // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // sent to the background. This is the default action. + // + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This setting uses the deep-linking features of iOS and Android. + // + // * URL - The default mobile browser on the recipient's device opens and + // loads the web page at a URL that you specify. Action *string `type:"string" enum:"Action"` - // The message body. Can include up to 140 characters. + // The body of the notification message. The maximum number of characters is + // 200. Body *string `type:"string"` - // The URL that points to the icon image for the push notification icon, for - // example, the app icon. + // The URL of the image to display as the push-notification icon, such as the + // icon for the app. ImageIconUrl *string `type:"string"` - // The URL that points to the small icon image for the push notification icon, - // for example, the app icon. + // The URL of the image to display as the small, push-notification icon, such + // as a small version of the icon for the app. ImageSmallIconUrl *string `type:"string"` - // The URL that points to an image used in the push notification. + // The URL of an image to display in the push notification. ImageUrl *string `type:"string"` - // The JSON payload used for a silent push. + // The JSON payload to use for a silent push notification. JsonBody *string `type:"string"` - // A URL that refers to the location of an image or video that you want to display - // in the push notification. + // The URL of the image or video to display in the push notification. MediaUrl *string `type:"string"` - // The Raw JSON formatted string to be used as the payload. This value overrides - // the message. + // The raw, JSON-formatted string to use as the payload for the notification + // message. This value overrides other values for the message. RawContent *string `type:"string"` - // Indicates if the message should display on the users device.Silent pushes - // can be used for Remote Configuration and Phone Home use cases. + // Specifies whether the notification is a silent push notification, which is + // a push notification that doesn't display on a recipient's device. Silent + // push notifications can be used for cases such as updating an app's configuration, + // displaying messages in an in-app message center, or supporting phone home + // functionality. SilentPush *bool `type:"boolean"` - // This parameter specifies how long (in seconds) the message should be kept - // if the service is unable to deliver the notification the first time. If the - // value is 0, it treats the notification as if it expires immediately and does - // not store the notification or attempt to redeliver it. This value is converted - // to the expiration field when sent to the service. It only applies to APNs - // and GCM + // The number of seconds that the push-notification service should keep the + // message, if the service is unable to deliver the notification the first time. + // This value is converted to an expiration value when it's sent to a push-notification + // service. If this value is 0, the service treats the notification as if it + // expires immediately and the service doesn't store or try to deliver the notification + // again. + // + // This value doesn't apply to messages that are sent through the Amazon Device + // Messaging (ADM) service. TimeToLive *int64 `type:"integer"` - // The message title that displays above the message on the user's device. + // The title to display above the notification message on a recipient's device. Title *string `type:"string"` - // The URL to open in the user's mobile browser. Used if the value for Action - // is URL. + // The URL to open in a recipient's default mobile browser, if a recipient taps + // the push notification and the value of the Action property is URL. Url *string `type:"string"` } @@ -16904,14 +23582,14 @@ func (s *Message) SetUrl(v string) *Message { return s } -// Simple message object. +// Provides information about an API request or response. type MessageBody struct { _ struct{} `type:"structure"` - // The error message that's returned from the API. + // The message that's returned from the API. Message *string `type:"string"` - // The unique message body ID. + // The unique identifier for the request or response. RequestID *string `type:"string"` } @@ -16937,33 +23615,36 @@ func (s *MessageBody) SetRequestID(v string) *MessageBody { return s } -// Message configuration for a campaign. +// Specifies the message configuration settings for a campaign. type MessageConfiguration struct { _ struct{} `type:"structure"` - // The message that the campaign delivers to ADM channels. Overrides the default - // message. + // The message that the campaign sends through the ADM (Amazon Device Messaging) + // channel. This message overrides the default message. ADMMessage *Message `type:"structure"` - // The message that the campaign delivers to APNS channels. Overrides the default - // message. + // The message that the campaign sends through the APNs (Apple Push Notification + // service) channel. This message overrides the default message. APNSMessage *Message `type:"structure"` - // The message that the campaign delivers to Baidu channels. Overrides the default - // message. + // The message that the campaign sends through the Baidu (Baidu Cloud Push) + // channel. This message overrides the default message. BaiduMessage *Message `type:"structure"` - // The default message for all channels. + // The default message that the campaign sends through all the channels that + // are configured for the campaign. DefaultMessage *Message `type:"structure"` - // The email message configuration. + // The message that the campaign sends through the email channel. EmailMessage *CampaignEmailMessage `type:"structure"` - // The message that the campaign delivers to GCM channels. Overrides the default - // message. + // The message that the campaign sends through the GCM channel, which enables + // Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging + // (FCM), formerly Google Cloud Messaging (GCM), service. This message overrides + // the default message. GCMMessage *Message `type:"structure"` - // The SMS message configuration. + // The message that the campaign sends through the SMS channel. SMSMessage *CampaignSmsMessage `type:"structure"` } @@ -16977,6 +23658,21 @@ func (s MessageConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MessageConfiguration"} + if s.EmailMessage != nil { + if err := s.EmailMessage.Validate(); err != nil { + invalidParams.AddNested("EmailMessage", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetADMMessage sets the ADMMessage field's value. func (s *MessageConfiguration) SetADMMessage(v *Message) *MessageConfiguration { s.ADMMessage = v @@ -17019,30 +23715,39 @@ func (s *MessageConfiguration) SetSMSMessage(v *CampaignSmsMessage) *MessageConf return s } -// Send message request. +// Specifies the objects that define configuration and other settings for a +// message. type MessageRequest struct { _ struct{} `type:"structure"` // A map of key-value pairs, where each key is an address and each value is // an AddressConfiguration object. An address can be a push notification token, - // a phone number, or an email address. + // a phone number, or an email address. You can use an AddressConfiguration + // object to tailor the message for an address by specifying settings such as + // content overrides and message variables. Addresses map[string]*AddressConfiguration `type:"map"` - // A map of custom attributes to attributes to be attached to the message. This - // payload is added to the push notification's 'data.pinpoint' object or added - // to the email/sms delivery receipt event attributes. + // A map of custom attributes to attach to the message. For a push notification, + // this payload is added to the data.pinpoint object. For an email or text message, + // this payload is added to email/SMS delivery receipt event attributes. Context map[string]*string `type:"map"` // A map of key-value pairs, where each key is an endpoint ID and each value - // is an EndpointSendConfiguration object. Within an EndpointSendConfiguration - // object, you can tailor the message for an endpoint by specifying message - // overrides or substitutions. + // is an EndpointSendConfiguration object. You can use an EndpointSendConfiguration + // object to tailor the message for an endpoint by specifying settings such + // as content overrides and message variables. Endpoints map[string]*EndpointSendConfiguration `type:"map"` - // Message configuration. - MessageConfiguration *DirectMessageConfiguration `type:"structure"` + // The set of properties that defines the configuration settings for the message. + // + // MessageConfiguration is a required field + MessageConfiguration *DirectMessageConfiguration `type:"structure" required:"true"` + + // The message template to use for the message. + TemplateConfiguration *TemplateConfiguration `type:"structure"` - // A unique ID that you can use to trace a message. This ID is visible to recipients. + // The unique identifier for tracing the message. This identifier is visible + // to message recipients. TraceId *string `type:"string"` } @@ -17056,6 +23761,19 @@ func (s MessageRequest) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MessageRequest"} + if s.MessageConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MessageConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAddresses sets the Addresses field's value. func (s *MessageRequest) SetAddresses(v map[string]*AddressConfiguration) *MessageRequest { s.Addresses = v @@ -17080,29 +23798,39 @@ func (s *MessageRequest) SetMessageConfiguration(v *DirectMessageConfiguration) return s } +// SetTemplateConfiguration sets the TemplateConfiguration field's value. +func (s *MessageRequest) SetTemplateConfiguration(v *TemplateConfiguration) *MessageRequest { + s.TemplateConfiguration = v + return s +} + // SetTraceId sets the TraceId field's value. func (s *MessageRequest) SetTraceId(v string) *MessageRequest { s.TraceId = &v return s } -// Send message response. +// Provides information about the results of a request to send a message to +// an endpoint address. type MessageResponse struct { _ struct{} `type:"structure"` - // Application id of the message. - ApplicationId *string `type:"string"` + // The unique identifier for the application that was used to send the message. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` - // A map containing a multi part response for each address, with the endpointId - // as the key and the result as the value. + // A map that contains a multipart response for each address that the message + // was sent to. In the map, the endpoint ID is the key and the result is the + // value. EndpointResult map[string]*EndpointMessageResult `type:"map"` - // Original request Id for which this message was delivered. + // The identifier for the original request that the message was delivered for. RequestId *string `type:"string"` - // A map containing a multi part response for each address, with the address - // as the key(Email address, phone number or push token) and the result as the - // value. + // A map that contains a multipart response for each address (email address, + // phone number, or push notification token) that the message was sent to. In + // the map, the address is the key and the result is the value. Result map[string]*MessageResult `type:"map"` } @@ -17140,36 +23868,54 @@ func (s *MessageResponse) SetResult(v map[string]*MessageResult) *MessageRespons return s } -// The result from sending a message to an address. +// Provides information about the results of sending a message directly to an +// endpoint address. type MessageResult struct { _ struct{} `type:"structure"` - // The delivery status of the message. Possible values:SUCCESS - The message - // was successfully delivered to the endpoint.TRANSIENT_FAILURE - A temporary - // error occurred. Amazon Pinpoint will attempt to deliver the message again - // later.FAILURE_PERMANENT - An error occurred when delivering the message to - // the endpoint. Amazon Pinpoint won't attempt to send the message again.TIMEOUT - // - The message couldn't be sent within the timeout period.QUIET_TIME - The - // local time for the endpoint was within the QuietTime for the campaign or - // app.DAILY_CAP - The endpoint has received the maximum number of messages - // it can receive within a 24-hour period.HOLDOUT - The endpoint was in a hold - // out treatment for the campaign.THROTTLED - Amazon Pinpoint throttled sending - // to this endpoint.EXPIRED - The endpoint address is expired.CAMPAIGN_CAP - - // The endpoint received the maximum number of messages allowed by the campaign.SERVICE_FAILURE - // - A service-level failure prevented Amazon Pinpoint from delivering the message.UNKNOWN - // - An unknown error occurred. - DeliveryStatus *string `type:"string" enum:"DeliveryStatus"` - - // Unique message identifier associated with the message that was sent. + // The delivery status of the message. Possible values are: + // + // * DUPLICATE - The endpoint address is a duplicate of another endpoint + // address. Amazon Pinpoint won't attempt to send the message again. + // + // * OPT_OUT - The user who's associated with the endpoint address has opted + // out of receiving messages from you. Amazon Pinpoint won't attempt to send + // the message again. + // + // * PERMANENT_FAILURE - An error occurred when delivering the message to + // the endpoint address. Amazon Pinpoint won't attempt to send the message + // again. + // + // * SUCCESSFUL - The message was successfully delivered to the endpoint + // address. + // + // * TEMPORARY_FAILURE - A temporary error occurred. Amazon Pinpoint will + // attempt to deliver the message again later. + // + // * THROTTLED - Amazon Pinpoint throttled the operation to send the message + // to the endpoint address. + // + // * TIMEOUT - The message couldn't be sent within the timeout period. + // + // * UNKNOWN_FAILURE - An unknown error occurred. + // + // DeliveryStatus is a required field + DeliveryStatus *string `type:"string" required:"true" enum:"DeliveryStatus"` + + // The unique identifier for the message that was sent. MessageId *string `type:"string"` - // Downstream service status code. - StatusCode *int64 `type:"integer"` + // The downstream service status code for delivering the message. + // + // StatusCode is a required field + StatusCode *int64 `type:"integer" required:"true"` - // Status message for message delivery. + // The status message for delivering the message. StatusMessage *string `type:"string"` - // If token was updated as part of delivery. (This is GCM Specific) + // For push notifications that are sent through the GCM channel, specifies whether + // the endpoint's device registration token was updated as part of delivering + // the message. UpdatedToken *string `type:"string"` } @@ -17213,16 +23959,22 @@ func (s *MessageResult) SetUpdatedToken(v string) *MessageResult { return s } -// Custom metric dimension +// Specifies metric-based criteria for including or excluding endpoints from +// a segment. These criteria derive from custom metrics that you define for +// endpoints. type MetricDimension struct { _ struct{} `type:"structure"` - // The operator that you're using to compare metric values. Possible values: - // GREATER_THAN, LESS_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN_OR_EQUAL, or EQUAL - ComparisonOperator *string `type:"string"` + // The operator to use when comparing metric values. Valid values are: GREATER_THAN, + // LESS_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN_OR_EQUAL, and EQUAL. + // + // ComparisonOperator is a required field + ComparisonOperator *string `type:"string" required:"true"` - // The value to be compared. - Value *float64 `type:"double"` + // The value to compare. + // + // Value is a required field + Value *float64 `type:"double" required:"true"` } // String returns the string representation @@ -17235,6 +23987,22 @@ func (s MetricDimension) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricDimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricDimension"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetComparisonOperator sets the ComparisonOperator field's value. func (s *MetricDimension) SetComparisonOperator(v string) *MetricDimension { s.ComparisonOperator = &v @@ -17247,17 +24015,132 @@ func (s *MetricDimension) SetValue(v float64) *MetricDimension { return s } -// Phone Number Validate request. +// Specifies a condition to evaluate for an activity path in a journey. +type MultiConditionalBranch struct { + _ struct{} `type:"structure"` + + // The condition to evaluate for the activity path. + Condition *SimpleCondition `type:"structure"` + + // The unique identifier for the next activity to perform, after completing + // the activity for the path. + NextActivity *string `type:"string"` +} + +// String returns the string representation +func (s MultiConditionalBranch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultiConditionalBranch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MultiConditionalBranch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MultiConditionalBranch"} + if s.Condition != nil { + if err := s.Condition.Validate(); err != nil { + invalidParams.AddNested("Condition", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *MultiConditionalBranch) SetCondition(v *SimpleCondition) *MultiConditionalBranch { + s.Condition = v + return s +} + +// SetNextActivity sets the NextActivity field's value. +func (s *MultiConditionalBranch) SetNextActivity(v string) *MultiConditionalBranch { + s.NextActivity = &v + return s +} + +// Specifies the settings for a multivariate split activity in a journey. This +// type of activity sends participants down one of as many as five paths in +// a journey, based on conditions that you specify. +type MultiConditionalSplitActivity struct { + _ struct{} `type:"structure"` + + // The paths for the activity, including the conditions for entering each path + // and the activity to perform for each path. + Branches []*MultiConditionalBranch `type:"list"` + + // The activity to perform by default for any path in the activity. + DefaultActivity *string `type:"string"` + + // The amount of time to wait or the date and time when Amazon Pinpoint determines + // whether the conditions are met. + EvaluationWaitTime *WaitTime `type:"structure"` +} + +// String returns the string representation +func (s MultiConditionalSplitActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultiConditionalSplitActivity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MultiConditionalSplitActivity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MultiConditionalSplitActivity"} + if s.Branches != nil { + for i, v := range s.Branches { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Branches", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBranches sets the Branches field's value. +func (s *MultiConditionalSplitActivity) SetBranches(v []*MultiConditionalBranch) *MultiConditionalSplitActivity { + s.Branches = v + return s +} + +// SetDefaultActivity sets the DefaultActivity field's value. +func (s *MultiConditionalSplitActivity) SetDefaultActivity(v string) *MultiConditionalSplitActivity { + s.DefaultActivity = &v + return s +} + +// SetEvaluationWaitTime sets the EvaluationWaitTime field's value. +func (s *MultiConditionalSplitActivity) SetEvaluationWaitTime(v *WaitTime) *MultiConditionalSplitActivity { + s.EvaluationWaitTime = v + return s +} + +// Specifies a phone number to validate and retrieve information about. type NumberValidateRequest struct { _ struct{} `type:"structure"` - // (Optional) The two-character ISO country code for the country or region where - // the phone number was originally registered. + // The two-character code, in ISO 3166-1 alpha-2 format, for the country or + // region where the phone number was originally registered. IsoCountryCode *string `type:"string"` - // The phone number to get information about. The phone number that you provide - // should include a country code. If the number doesn't include a valid country - // code, the operation might result in an error. + // The phone number to retrieve information about. The phone number that you + // provide should include a valid numeric country code. Otherwise, the operation + // might result in an error. PhoneNumber *string `type:"string"` } @@ -17283,56 +24166,60 @@ func (s *NumberValidateRequest) SetPhoneNumber(v string) *NumberValidateRequest return s } -// Phone Number Validate response. +// Provides information about a phone number. type NumberValidateResponse struct { _ struct{} `type:"structure"` - // The carrier or servive provider that the phone number is currently registered - // with. + // The carrier or service provider that the phone number is currently registered + // with. In some countries and regions, this value may be the carrier or service + // provider that the phone number was originally registered with. Carrier *string `type:"string"` - // The city where the phone number was originally registered. + // The name of the city where the phone number was originally registered. City *string `type:"string"` - // The cleansed phone number, shown in E.164 format. + // The cleansed phone number, in E.164 format, for the location where the phone + // number was originally registered. CleansedPhoneNumberE164 *string `type:"string"` - // The cleansed phone number, shown in the local phone number format. + // The cleansed phone number, in the format for the location where the phone + // number was originally registered. CleansedPhoneNumberNational *string `type:"string"` - // The country or region where the phone number was originally registered. + // The name of the country or region where the phone number was originally registered. Country *string `type:"string"` - // The two-character ISO code for the country or region where the phone number - // was originally registered. + // The two-character code, in ISO 3166-1 alpha-2 format, for the country or + // region where the phone number was originally registered. CountryCodeIso2 *string `type:"string"` // The numeric code for the country or region where the phone number was originally // registered. CountryCodeNumeric *string `type:"string"` - // The county where the phone number was originally registered. + // The name of the county where the phone number was originally registered. County *string `type:"string"` - // The two-character code (in ISO 3166-1 alpha-2 format) for the country or - // region in the request body. + // The two-character code, in ISO 3166-1 alpha-2 format, that was sent in the + // request body. OriginalCountryCodeIso2 *string `type:"string"` - // The phone number that you included in the request body. + // The phone number that was sent in the request body. OriginalPhoneNumber *string `type:"string"` - // A description of the phone type. Possible values are MOBILE, LANDLINE, VOIP, + // The description of the phone type. Valid values are: MOBILE, LANDLINE, VOIP, // INVALID, PREPAID, and OTHER. PhoneType *string `type:"string"` - // The phone type, represented by an integer. Possible values include 0 (MOBILE), - // 1 (LANDLINE), 2 (VOIP), 3 (INVALID), 4 (OTHER), and 5 (PREPAID). + // The phone type, represented by an integer. Valid values are: 0 (mobile), + // 1 (landline), 2 (VoIP), 3 (invalid), 4 (other), and 5 (prepaid). PhoneTypeCode *int64 `type:"integer"` // The time zone for the location where the phone number was originally registered. Timezone *string `type:"string"` - // The postal code for the location where the phone number was originally registered. + // The postal or ZIP code for the location where the phone number was originally + // registered. ZipCode *string `type:"string"` } @@ -17433,7 +24320,7 @@ func (s *NumberValidateResponse) SetZipCode(v string) *NumberValidateResponse { type PhoneNumberValidateInput struct { _ struct{} `type:"structure" payload:"NumberValidateRequest"` - // Phone Number Validate request. + // Specifies a phone number to validate and retrieve information about. // // NumberValidateRequest is a required field NumberValidateRequest *NumberValidateRequest `type:"structure" required:"true"` @@ -17471,7 +24358,7 @@ func (s *PhoneNumberValidateInput) SetNumberValidateRequest(v *NumberValidateReq type PhoneNumberValidateOutput struct { _ struct{} `type:"structure" payload:"NumberValidateResponse"` - // Phone Number Validate response. + // Provides information about a phone number. // // NumberValidateResponse is a required field NumberValidateResponse *NumberValidateResponse `type:"structure" required:"true"` @@ -17493,122 +24380,351 @@ func (s *PhoneNumberValidateOutput) SetNumberValidateResponse(v *NumberValidateR return s } -// Public endpoint attributes. -type PublicEndpoint struct { +// Specifies the properties and attributes of an endpoint that's associated +// with an event. +type PublicEndpoint struct { + _ struct{} `type:"structure"` + + // The unique identifier for the recipient, such as a device token, email address, + // or mobile phone number. + Address *string `type:"string"` + + // One or more custom attributes that describe the endpoint by associating a + // name with an array of values. You can use these attributes as filter criteria + // when you create segments. + Attributes map[string][]*string `type:"map"` + + // The channel that's used when sending messages or push notifications to the + // endpoint. + ChannelType *string `type:"string" enum:"ChannelType"` + + // The demographic information for the endpoint, such as the time zone and platform. + Demographic *EndpointDemographic `type:"structure"` + + // The date and time, in ISO 8601 format, when the endpoint was last updated. + EffectiveDate *string `type:"string"` + + // Specifies whether to send messages or push notifications to the endpoint. + // Valid values are: ACTIVE, messages are sent to the endpoint; and, INACTIVE, + // messages aren’t sent to the endpoint. + // + // Amazon Pinpoint automatically sets this value to ACTIVE when you create an + // endpoint or update an existing endpoint. Amazon Pinpoint automatically sets + // this value to INACTIVE if you update another endpoint that has the same address + // specified by the Address property. + EndpointStatus *string `type:"string"` + + // The geographic information for the endpoint. + Location *EndpointLocation `type:"structure"` + + // One or more custom metrics that your app reports to Amazon Pinpoint for the + // endpoint. + Metrics map[string]*float64 `type:"map"` + + // Specifies whether the user who's associated with the endpoint has opted out + // of receiving messages and push notifications from you. Possible values are: + // ALL, the user has opted out and doesn't want to receive any messages or push + // notifications; and, NONE, the user hasn't opted out and wants to receive + // all messages and push notifications. + OptOut *string `type:"string"` + + // A unique identifier that's generated each time the endpoint is updated. + RequestId *string `type:"string"` + + // One or more custom user attributes that your app reports to Amazon Pinpoint + // for the user who's associated with the endpoint. + User *EndpointUser `type:"structure"` +} + +// String returns the string representation +func (s PublicEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicEndpoint) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *PublicEndpoint) SetAddress(v string) *PublicEndpoint { + s.Address = &v + return s +} + +// SetAttributes sets the Attributes field's value. +func (s *PublicEndpoint) SetAttributes(v map[string][]*string) *PublicEndpoint { + s.Attributes = v + return s +} + +// SetChannelType sets the ChannelType field's value. +func (s *PublicEndpoint) SetChannelType(v string) *PublicEndpoint { + s.ChannelType = &v + return s +} + +// SetDemographic sets the Demographic field's value. +func (s *PublicEndpoint) SetDemographic(v *EndpointDemographic) *PublicEndpoint { + s.Demographic = v + return s +} + +// SetEffectiveDate sets the EffectiveDate field's value. +func (s *PublicEndpoint) SetEffectiveDate(v string) *PublicEndpoint { + s.EffectiveDate = &v + return s +} + +// SetEndpointStatus sets the EndpointStatus field's value. +func (s *PublicEndpoint) SetEndpointStatus(v string) *PublicEndpoint { + s.EndpointStatus = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *PublicEndpoint) SetLocation(v *EndpointLocation) *PublicEndpoint { + s.Location = v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *PublicEndpoint) SetMetrics(v map[string]*float64) *PublicEndpoint { + s.Metrics = v + return s +} + +// SetOptOut sets the OptOut field's value. +func (s *PublicEndpoint) SetOptOut(v string) *PublicEndpoint { + s.OptOut = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *PublicEndpoint) SetRequestId(v string) *PublicEndpoint { + s.RequestId = &v + return s +} + +// SetUser sets the User field's value. +func (s *PublicEndpoint) SetUser(v *EndpointUser) *PublicEndpoint { + s.User = v + return s +} + +// Specifies the content and settings for a message template that can be used +// in messages that are sent through a push notification channel. +type PushNotificationTemplateRequest struct { + _ struct{} `type:"structure"` + + // The message template to use for the ADM (Amazon Device Messaging) channel. + // This message template overrides the default template for push notification + // channels (DefaultPushNotificationTemplate). + ADM *AndroidPushNotificationTemplate `type:"structure"` + + // The message template to use for the APNs (Apple Push Notification service) + // channel. This message template overrides the default template for push notification + // channels (DefaultPushNotificationTemplate). + APNS *APNSPushNotificationTemplate `type:"structure"` + + // The message template to use for the Baidu (Baidu Cloud Push) channel. This + // message template overrides the default template for push notification channels + // (DefaultPushNotificationTemplate). + Baidu *AndroidPushNotificationTemplate `type:"structure"` + + // The default message template to use for push notification channels. + Default *DefaultPushNotificationTemplate `type:"structure"` + + // The message template to use for the GCM channel, which is used to send notifications + // through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging + // (GCM), service. This message template overrides the default template for + // push notification channels (DefaultPushNotificationTemplate). + GCM *AndroidPushNotificationTemplate `type:"structure"` + + // A string-to-string map of key-value pairs that defines the tags to associate + // with the message template. Each tag consists of a required tag key and an + // associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s PushNotificationTemplateRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PushNotificationTemplateRequest) GoString() string { + return s.String() +} + +// SetADM sets the ADM field's value. +func (s *PushNotificationTemplateRequest) SetADM(v *AndroidPushNotificationTemplate) *PushNotificationTemplateRequest { + s.ADM = v + return s +} + +// SetAPNS sets the APNS field's value. +func (s *PushNotificationTemplateRequest) SetAPNS(v *APNSPushNotificationTemplate) *PushNotificationTemplateRequest { + s.APNS = v + return s +} + +// SetBaidu sets the Baidu field's value. +func (s *PushNotificationTemplateRequest) SetBaidu(v *AndroidPushNotificationTemplate) *PushNotificationTemplateRequest { + s.Baidu = v + return s +} + +// SetDefault sets the Default field's value. +func (s *PushNotificationTemplateRequest) SetDefault(v *DefaultPushNotificationTemplate) *PushNotificationTemplateRequest { + s.Default = v + return s +} + +// SetGCM sets the GCM field's value. +func (s *PushNotificationTemplateRequest) SetGCM(v *AndroidPushNotificationTemplate) *PushNotificationTemplateRequest { + s.GCM = v + return s +} + +// SetTags sets the Tags field's value. +func (s *PushNotificationTemplateRequest) SetTags(v map[string]*string) *PushNotificationTemplateRequest { + s.Tags = v + return s +} + +// Provides information about the content and settings for a message template +// that can be used in messages that are sent through a push notification channel. +type PushNotificationTemplateResponse struct { _ struct{} `type:"structure"` - // The unique identifier for the recipient. For example, an address could be - // a device token, email address, or mobile phone number. - Address *string `type:"string"` + // The message template that's used for the ADM (Amazon Device Messaging) channel. + // This message template overrides the default template for push notification + // channels (DefaultPushNotificationTemplate). + ADM *AndroidPushNotificationTemplate `type:"structure"` - // Custom attributes that your app reports to Amazon Pinpoint. You can use these - // attributes as selection criteria when you create a segment. - Attributes map[string][]*string `type:"map"` + // The message template that's used for the APNs (Apple Push Notification service) + // channel. This message template overrides the default template for push notification + // channels (DefaultPushNotificationTemplate). + APNS *APNSPushNotificationTemplate `type:"structure"` - // The channel type.Valid values: APNS, GCM - ChannelType *string `type:"string" enum:"ChannelType"` + // The Amazon Resource Name (ARN) of the message template. + Arn *string `type:"string"` - // The endpoint demographic attributes. - Demographic *EndpointDemographic `type:"structure"` + // The message template that's used for the Baidu (Baidu Cloud Push) channel. + // This message template overrides the default template for push notification + // channels (DefaultPushNotificationTemplate). + Baidu *AndroidPushNotificationTemplate `type:"structure"` - // The date and time when the endpoint was last updated, in ISO 8601 format. - EffectiveDate *string `type:"string"` + // The date when the message template was created. + // + // CreationDate is a required field + CreationDate *string `type:"string" required:"true"` - // The status of the endpoint. If the update fails, the value is INACTIVE. If - // the endpoint is updated successfully, the value is ACTIVE. - EndpointStatus *string `type:"string"` + // The default message template that's used for push notification channels. + Default *DefaultPushNotificationTemplate `type:"structure"` - // The endpoint location attributes. - Location *EndpointLocation `type:"structure"` + // The message template that's used for the GCM channel, which is used to send + // notifications through the Firebase Cloud Messaging (FCM), formerly Google + // Cloud Messaging (GCM), service. This message template overrides the default + // template for push notification channels (DefaultPushNotificationTemplate). + GCM *AndroidPushNotificationTemplate `type:"structure"` - // Custom metrics that your app reports to Amazon Pinpoint. - Metrics map[string]*float64 `type:"map"` + // The date when the message template was last modified. + // + // LastModifiedDate is a required field + LastModifiedDate *string `type:"string" required:"true"` - // Indicates whether a user has opted out of receiving messages with one of - // the following values:ALL - User has opted out of all messages.NONE - Users - // has not opted out and receives all messages. - OptOut *string `type:"string"` + // A string-to-string map of key-value pairs that identifies the tags that are + // associated with the message template. Each tag consists of a required tag + // key and an associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` - // A unique identifier that is generated each time the endpoint is updated. - RequestId *string `type:"string"` + // The name of the message template. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` - // Custom user-specific attributes that your app reports to Amazon Pinpoint. - User *EndpointUser `type:"structure"` + // The type of channel that the message template is designed for. For a push + // notification template, this value is PUSH. + // + // TemplateType is a required field + TemplateType *string `type:"string" required:"true" enum:"TemplateType"` } // String returns the string representation -func (s PublicEndpoint) String() string { +func (s PushNotificationTemplateResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PublicEndpoint) GoString() string { +func (s PushNotificationTemplateResponse) GoString() string { return s.String() } -// SetAddress sets the Address field's value. -func (s *PublicEndpoint) SetAddress(v string) *PublicEndpoint { - s.Address = &v +// SetADM sets the ADM field's value. +func (s *PushNotificationTemplateResponse) SetADM(v *AndroidPushNotificationTemplate) *PushNotificationTemplateResponse { + s.ADM = v return s } -// SetAttributes sets the Attributes field's value. -func (s *PublicEndpoint) SetAttributes(v map[string][]*string) *PublicEndpoint { - s.Attributes = v +// SetAPNS sets the APNS field's value. +func (s *PushNotificationTemplateResponse) SetAPNS(v *APNSPushNotificationTemplate) *PushNotificationTemplateResponse { + s.APNS = v return s } -// SetChannelType sets the ChannelType field's value. -func (s *PublicEndpoint) SetChannelType(v string) *PublicEndpoint { - s.ChannelType = &v +// SetArn sets the Arn field's value. +func (s *PushNotificationTemplateResponse) SetArn(v string) *PushNotificationTemplateResponse { + s.Arn = &v return s } -// SetDemographic sets the Demographic field's value. -func (s *PublicEndpoint) SetDemographic(v *EndpointDemographic) *PublicEndpoint { - s.Demographic = v +// SetBaidu sets the Baidu field's value. +func (s *PushNotificationTemplateResponse) SetBaidu(v *AndroidPushNotificationTemplate) *PushNotificationTemplateResponse { + s.Baidu = v return s } -// SetEffectiveDate sets the EffectiveDate field's value. -func (s *PublicEndpoint) SetEffectiveDate(v string) *PublicEndpoint { - s.EffectiveDate = &v +// SetCreationDate sets the CreationDate field's value. +func (s *PushNotificationTemplateResponse) SetCreationDate(v string) *PushNotificationTemplateResponse { + s.CreationDate = &v return s } -// SetEndpointStatus sets the EndpointStatus field's value. -func (s *PublicEndpoint) SetEndpointStatus(v string) *PublicEndpoint { - s.EndpointStatus = &v +// SetDefault sets the Default field's value. +func (s *PushNotificationTemplateResponse) SetDefault(v *DefaultPushNotificationTemplate) *PushNotificationTemplateResponse { + s.Default = v return s } -// SetLocation sets the Location field's value. -func (s *PublicEndpoint) SetLocation(v *EndpointLocation) *PublicEndpoint { - s.Location = v +// SetGCM sets the GCM field's value. +func (s *PushNotificationTemplateResponse) SetGCM(v *AndroidPushNotificationTemplate) *PushNotificationTemplateResponse { + s.GCM = v return s } -// SetMetrics sets the Metrics field's value. -func (s *PublicEndpoint) SetMetrics(v map[string]*float64) *PublicEndpoint { - s.Metrics = v +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *PushNotificationTemplateResponse) SetLastModifiedDate(v string) *PushNotificationTemplateResponse { + s.LastModifiedDate = &v return s } -// SetOptOut sets the OptOut field's value. -func (s *PublicEndpoint) SetOptOut(v string) *PublicEndpoint { - s.OptOut = &v +// SetTags sets the Tags field's value. +func (s *PushNotificationTemplateResponse) SetTags(v map[string]*string) *PushNotificationTemplateResponse { + s.Tags = v return s } -// SetRequestId sets the RequestId field's value. -func (s *PublicEndpoint) SetRequestId(v string) *PublicEndpoint { - s.RequestId = &v +// SetTemplateName sets the TemplateName field's value. +func (s *PushNotificationTemplateResponse) SetTemplateName(v string) *PushNotificationTemplateResponse { + s.TemplateName = &v return s } -// SetUser sets the User field's value. -func (s *PublicEndpoint) SetUser(v *EndpointUser) *PublicEndpoint { - s.User = v +// SetTemplateType sets the TemplateType field's value. +func (s *PushNotificationTemplateResponse) SetTemplateType(v string) *PushNotificationTemplateResponse { + s.TemplateType = &v return s } @@ -17618,7 +24734,9 @@ type PutEventStreamInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Request to save an EventStream. + // Specifies the Amazon Resource Name (ARN) of an event stream to publish events + // to and the AWS Identity and Access Management (IAM) role to use when publishing + // those events. // // WriteEventStream is a required field WriteEventStream *WriteEventStream `type:"structure" required:"true"` @@ -17646,6 +24764,11 @@ func (s *PutEventStreamInput) Validate() error { if s.WriteEventStream == nil { invalidParams.Add(request.NewErrParamRequired("WriteEventStream")) } + if s.WriteEventStream != nil { + if err := s.WriteEventStream.Validate(); err != nil { + invalidParams.AddNested("WriteEventStream", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -17668,7 +24791,8 @@ func (s *PutEventStreamInput) SetWriteEventStream(v *WriteEventStream) *PutEvent type PutEventStreamOutput struct { _ struct{} `type:"structure" payload:"EventStream"` - // Model for an event publishing subscription export. + // Specifies settings for publishing event data to an Amazon Kinesis data stream + // or an Amazon Kinesis Data Firehose delivery stream. // // EventStream is a required field EventStream *EventStream `type:"structure" required:"true"` @@ -17696,7 +24820,7 @@ type PutEventsInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // A set of events to process. + // Specifies a batch of events to process. // // EventsRequest is a required field EventsRequest *EventsRequest `type:"structure" required:"true"` @@ -17724,6 +24848,11 @@ func (s *PutEventsInput) Validate() error { if s.EventsRequest == nil { invalidParams.Add(request.NewErrParamRequired("EventsRequest")) } + if s.EventsRequest != nil { + if err := s.EventsRequest.Validate(); err != nil { + invalidParams.AddNested("EventsRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -17746,7 +24875,8 @@ func (s *PutEventsInput) SetEventsRequest(v *EventsRequest) *PutEventsInput { type PutEventsOutput struct { _ struct{} `type:"structure" payload:"EventsResponse"` - // Custom messages associated with events. + // Provides information about endpoints and the events that they're associated + // with. // // EventsResponse is a required field EventsResponse *EventsResponse `type:"structure" required:"true"` @@ -17768,20 +24898,21 @@ func (s *PutEventsOutput) SetEventsResponse(v *EventsResponse) *PutEventsOutput return s } -// Quiet Time +// Specifies the start and end times that define a time range when messages +// aren't sent to endpoints. type QuietTime struct { _ struct{} `type:"structure"` - // The time at which quiet time should end. The value that you specify has to - // be in HH:mm format, where HH is the hour in 24-hour format (with a leading - // zero, if applicable), and mm is the minutes. For example, use 02:30 to represent - // 2:30 AM, or 14:30 to represent 2:30 PM. + // The specific time when quiet time ends. This value has to use 24-hour notation + // and be in HH:MM format, where HH is the hour (with a leading zero, if applicable) + // and MM is the minutes. For example, use 02:30 to represent 2:30 AM, or 14:30 + // to represent 2:30 PM. End *string `type:"string"` - // The time at which quiet time should begin. The value that you specify has - // to be in HH:mm format, where HH is the hour in 24-hour format (with a leading - // zero, if applicable), and mm is the minutes. For example, use 02:30 to represent - // 2:30 AM, or 14:30 to represent 2:30 PM. + // The specific time when quiet time begins. This value has to use 24-hour notation + // and be in HH:MM format, where HH is the hour (with a leading zero, if applicable) + // and MM is the minutes. For example, use 02:30 to represent 2:30 AM, or 14:30 + // to represent 2:30 PM. Start *string `type:"string"` } @@ -17807,12 +24938,71 @@ func (s *QuietTime) SetStart(v string) *QuietTime { return s } -// An email represented as a raw MIME message. +// Specifies the settings for a random split activity in a journey. This type +// of activity randomly sends specified percentages of participants down one +// of as many as five paths in a journey, based on conditions that you specify. +type RandomSplitActivity struct { + _ struct{} `type:"structure"` + + // The paths for the activity, including the percentage of participants to enter + // each path and the activity to perform for each path. + Branches []*RandomSplitEntry `type:"list"` +} + +// String returns the string representation +func (s RandomSplitActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RandomSplitActivity) GoString() string { + return s.String() +} + +// SetBranches sets the Branches field's value. +func (s *RandomSplitActivity) SetBranches(v []*RandomSplitEntry) *RandomSplitActivity { + s.Branches = v + return s +} + +// Specifies the settings for a path in a random split activity in a journey. +type RandomSplitEntry struct { + _ struct{} `type:"structure"` + + // The unique identifier for the next activity to perform, after completing + // the activity for the path. + NextActivity *string `type:"string"` + + // The percentage of participants to send down the activity path. + Percentage *int64 `type:"integer"` +} + +// String returns the string representation +func (s RandomSplitEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RandomSplitEntry) GoString() string { + return s.String() +} + +// SetNextActivity sets the NextActivity field's value. +func (s *RandomSplitEntry) SetNextActivity(v string) *RandomSplitEntry { + s.NextActivity = &v + return s +} + +// SetPercentage sets the Percentage field's value. +func (s *RandomSplitEntry) SetPercentage(v int64) *RandomSplitEntry { + s.Percentage = &v + return s +} + +// Specifies the contents of an email message, represented as a raw MIME message. type RawEmail struct { _ struct{} `type:"structure"` - // The raw email message itself. Then entire message must be base64-encoded. - // // Data is automatically base64 encoded/decoded by the SDK. Data []byte `type:"blob"` } @@ -17833,18 +25023,23 @@ func (s *RawEmail) SetData(v []byte) *RawEmail { return s } -// Define how a segment based on recency of use. +// Specifies criteria for including or excluding endpoints from a segment based +// on how recently an endpoint was active. type RecencyDimension struct { _ struct{} `type:"structure"` - // The length of time during which users have been active or inactive with your - // app.Valid values: HR_24, DAY_7, DAY_14, DAY_30 - Duration *string `type:"string" enum:"Duration"` + // The duration to use when determining whether an endpoint is active or inactive. + // + // Duration is a required field + Duration *string `type:"string" required:"true" enum:"Duration"` - // The recency dimension type:ACTIVE - Users who have used your app within the - // specified duration are included in the segment.INACTIVE - Users who have - // not used your app within the specified duration are included in the segment. - RecencyType *string `type:"string" enum:"RecencyType"` + // The type of recency dimension to use for the segment. Valid values are: ACTIVE, + // endpoints that were active within the specified duration are included in + // the segment; and, INACTIVE, endpoints that weren't active within the specified + // duration are included in the segment. + // + // RecencyType is a required field + RecencyType *string `type:"string" required:"true" enum:"RecencyType"` } // String returns the string representation @@ -17857,6 +25052,22 @@ func (s RecencyDimension) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecencyDimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RecencyDimension"} + if s.Duration == nil { + invalidParams.Add(request.NewErrParamRequired("Duration")) + } + if s.RecencyType == nil { + invalidParams.Add(request.NewErrParamRequired("RecencyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDuration sets the Duration field's value. func (s *RecencyDimension) SetDuration(v string) *RecencyDimension { s.Duration = &v @@ -17878,7 +25089,8 @@ type RemoveAttributesInput struct { // AttributeType is a required field AttributeType *string `location:"uri" locationName:"attribute-type" type:"string" required:"true"` - // Update attributes request + // Specifies one or more attributes to remove from all the endpoints that are + // associated with an application. // // UpdateAttributesRequest is a required field UpdateAttributesRequest *UpdateAttributesRequest `type:"structure" required:"true"` @@ -17940,7 +25152,8 @@ func (s *RemoveAttributesInput) SetUpdateAttributesRequest(v *UpdateAttributesRe type RemoveAttributesOutput struct { _ struct{} `type:"structure" payload:"AttributesResource"` - // Attributes. + // Provides information about the type and the names of attributes that were + // removed from all the endpoints that are associated with an application. // // AttributesResource is a required field AttributesResource *AttributesResource `type:"structure" required:"true"` @@ -17962,17 +25175,112 @@ func (s *RemoveAttributesOutput) SetAttributesResource(v *AttributesResource) *R return s } -// SMS Channel Request +// Provides the results of a query that retrieved the data for a standard metric +// that applies to an application, campaign, or journey. +type ResultRow struct { + _ struct{} `type:"structure"` + + // An array of objects that defines the field and field values that were used + // to group data in a result set that contains multiple results. This value + // is null if the data in a result set isn’t grouped. + // + // GroupedBys is a required field + GroupedBys []*ResultRowValue `type:"list" required:"true"` + + // An array of objects that provides pre-aggregated values for a standard metric + // that applies to an application, campaign, or journey. + // + // Values is a required field + Values []*ResultRowValue `type:"list" required:"true"` +} + +// String returns the string representation +func (s ResultRow) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResultRow) GoString() string { + return s.String() +} + +// SetGroupedBys sets the GroupedBys field's value. +func (s *ResultRow) SetGroupedBys(v []*ResultRowValue) *ResultRow { + s.GroupedBys = v + return s +} + +// SetValues sets the Values field's value. +func (s *ResultRow) SetValues(v []*ResultRowValue) *ResultRow { + s.Values = v + return s +} + +// Provides a single value and metadata about that value as part of an array +// of query results for a standard metric that applies to an application, campaign, +// or journey. +type ResultRowValue struct { + _ struct{} `type:"structure"` + + // The friendly name of the metric whose value is specified by the Value property. + // + // Key is a required field + Key *string `type:"string" required:"true"` + + // The data type of the value specified by the Value property. + // + // Type is a required field + Type *string `type:"string" required:"true"` + + // In a Values object, the value for the metric that the query retrieved data + // for. In a GroupedBys object, the value for the field that was used to group + // data in a result set that contains multiple results (Values objects). + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResultRowValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResultRowValue) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *ResultRowValue) SetKey(v string) *ResultRowValue { + s.Key = &v + return s +} + +// SetType sets the Type field's value. +func (s *ResultRowValue) SetType(v string) *ResultRowValue { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ResultRowValue) SetValue(v string) *ResultRowValue { + s.Value = &v + return s +} + +// Specifies the status and settings of the SMS channel for an application. type SMSChannelRequest struct { _ struct{} `type:"structure"` - // If the channel is enabled for sending messages. + // Specifies whether to enable the SMS channel for the application. Enabled *bool `type:"boolean"` - // Sender identifier of your messages. + // The identity that you want to display on recipients' devices when they receive + // messages from the SMS channel. SenderId *string `type:"string"` - // ShortCode registered with phone provider. + // The registered short code that you want to use when you send messages through + // the SMS channel. ShortCode *string `type:"string"` } @@ -18004,50 +25312,58 @@ func (s *SMSChannelRequest) SetShortCode(v string) *SMSChannelRequest { return s } -// SMS Channel Response. +// Provides information about the status and settings of the SMS channel for +// an application. type SMSChannelResponse struct { _ struct{} `type:"structure"` - // The unique ID of the application to which the SMS channel belongs. + // The unique identifier for the application that the SMS channel applies to. ApplicationId *string `type:"string"` - // The date that the settings were last updated in ISO 8601 format. + // The date and time, in ISO 8601 format, when the SMS channel was enabled. CreationDate *string `type:"string"` - // If the channel is enabled for sending messages. + // Specifies whether the SMS channel is enabled for the application. Enabled *bool `type:"boolean"` - // Not used. Retained for backwards compatibility. + // (Not used) This property is retained only for backward compatibility. HasCredential *bool `type:"boolean"` - // Channel ID. Not used, only for backwards compatibility. + // (Deprecated) An identifier for the SMS channel. This property is retained + // only for backward compatibility. Id *string `type:"string"` - // Is this channel archived + // Specifies whether the SMS channel is archived. IsArchived *bool `type:"boolean"` - // Who last updated this entry + // The user who last modified the SMS channel. LastModifiedBy *string `type:"string"` - // Last date this was updated + // The date and time, in ISO 8601 format, when the SMS channel was last modified. LastModifiedDate *string `type:"string"` - // Platform type. Will be "SMS" - Platform *string `type:"string"` + // The type of messaging or notification platform for the channel. For the SMS + // channel, this value is SMS. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` - // Promotional messages per second that can be sent + // The maximum number of promotional messages that you can send through the + // SMS channel each second. PromotionalMessagesPerSecond *int64 `type:"integer"` - // Sender identifier of your messages. + // The identity that displays on recipients' devices when they receive messages + // from the SMS channel. SenderId *string `type:"string"` - // The short code registered with the phone provider. + // The registered short code to use when you send messages through the SMS channel. ShortCode *string `type:"string"` - // Transactional messages per second that can be sent + // The maximum number of transactional messages that you can send through the + // SMS channel each second. TransactionalMessagesPerSecond *int64 `type:"integer"` - // Version of channel + // The current version of the SMS channel. Version *int64 `type:"integer"` } @@ -18145,7 +25461,8 @@ func (s *SMSChannelResponse) SetVersion(v int64) *SMSChannelResponse { return s } -// SMS Message. +// Specifies the default settings for a one-time SMS message that's sent directly +// to an endpoint. type SMSMessage struct { _ struct{} `type:"structure"` @@ -18156,20 +25473,24 @@ type SMSMessage struct { // your dedicated number. Keyword *string `type:"string"` - // Is this a transaction priority message or lower priority. + // The SMS message type. Valid values are: TRANSACTIONAL, the message is critical + // or time-sensitive, such as a one-time password that supports a customer transaction; + // and, PROMOTIONAL, the message is not critical or time-sensitive, such as + // a marketing message. MessageType *string `type:"string" enum:"MessageType"` - // The phone number that the SMS message originates from. Specify one of the - // dedicated long codes or short codes that you requested from AWS Support and - // that is assigned to your account. If this attribute is not specified, Amazon - // Pinpoint randomly assigns a long code. + // The number to send the SMS message from. This value should be one of the + // dedicated long or short codes that's assigned to your AWS account. If you + // don't specify a long or short code, Amazon Pinpoint assigns a random long + // code to the SMS message and sends the message from that code. OriginationNumber *string `type:"string"` - // The sender ID that is shown as the message sender on the recipient's device. + // The sender ID to display as the sender of the message on a recipient's device. // Support for sender IDs varies by country or region. SenderId *string `type:"string"` - // Default message substitutions. Can be overridden by individual address substitutions. + // The message variables to use in the SMS message. You can override the default + // variables with individual address variables. Substitutions map[string][]*string `type:"map"` } @@ -18195,67 +25516,204 @@ func (s *SMSMessage) SetKeyword(v string) *SMSMessage { return s } -// SetMessageType sets the MessageType field's value. -func (s *SMSMessage) SetMessageType(v string) *SMSMessage { - s.MessageType = &v +// SetMessageType sets the MessageType field's value. +func (s *SMSMessage) SetMessageType(v string) *SMSMessage { + s.MessageType = &v + return s +} + +// SetOriginationNumber sets the OriginationNumber field's value. +func (s *SMSMessage) SetOriginationNumber(v string) *SMSMessage { + s.OriginationNumber = &v + return s +} + +// SetSenderId sets the SenderId field's value. +func (s *SMSMessage) SetSenderId(v string) *SMSMessage { + s.SenderId = &v + return s +} + +// SetSubstitutions sets the Substitutions field's value. +func (s *SMSMessage) SetSubstitutions(v map[string][]*string) *SMSMessage { + s.Substitutions = v + return s +} + +// Specifies the content and settings for a message template that can be used +// in text messages that are sent through the SMS channel. +type SMSTemplateRequest struct { + _ struct{} `type:"structure"` + + // The message body to use in text messages that are based on the message template. + Body *string `type:"string"` + + // A string-to-string map of key-value pairs that defines the tags to associate + // with the message template. Each tag consists of a required tag key and an + // associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s SMSTemplateRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SMSTemplateRequest) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *SMSTemplateRequest) SetBody(v string) *SMSTemplateRequest { + s.Body = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SMSTemplateRequest) SetTags(v map[string]*string) *SMSTemplateRequest { + s.Tags = v + return s +} + +// Provides information about the content and settings for a message template +// that can be used in text messages that are sent through the SMS channel. +type SMSTemplateResponse struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the message template. + Arn *string `type:"string"` + + // The message body that's used in text messages that are based on the message + // template. + Body *string `type:"string"` + + // The date when the message template was created. + // + // CreationDate is a required field + CreationDate *string `type:"string" required:"true"` + + // The date when the message template was last modified. + // + // LastModifiedDate is a required field + LastModifiedDate *string `type:"string" required:"true"` + + // A string-to-string map of key-value pairs that identifies the tags that are + // associated with the message template. Each tag consists of a required tag + // key and an associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The name of the message template. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` + + // The type of channel that the message template is designed for. For an SMS + // template, this value is SMS. + // + // TemplateType is a required field + TemplateType *string `type:"string" required:"true" enum:"TemplateType"` +} + +// String returns the string representation +func (s SMSTemplateResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SMSTemplateResponse) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *SMSTemplateResponse) SetArn(v string) *SMSTemplateResponse { + s.Arn = &v + return s +} + +// SetBody sets the Body field's value. +func (s *SMSTemplateResponse) SetBody(v string) *SMSTemplateResponse { + s.Body = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *SMSTemplateResponse) SetCreationDate(v string) *SMSTemplateResponse { + s.CreationDate = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *SMSTemplateResponse) SetLastModifiedDate(v string) *SMSTemplateResponse { + s.LastModifiedDate = &v return s } -// SetOriginationNumber sets the OriginationNumber field's value. -func (s *SMSMessage) SetOriginationNumber(v string) *SMSMessage { - s.OriginationNumber = &v +// SetTags sets the Tags field's value. +func (s *SMSTemplateResponse) SetTags(v map[string]*string) *SMSTemplateResponse { + s.Tags = v return s } -// SetSenderId sets the SenderId field's value. -func (s *SMSMessage) SetSenderId(v string) *SMSMessage { - s.SenderId = &v +// SetTemplateName sets the TemplateName field's value. +func (s *SMSTemplateResponse) SetTemplateName(v string) *SMSTemplateResponse { + s.TemplateName = &v return s } -// SetSubstitutions sets the Substitutions field's value. -func (s *SMSMessage) SetSubstitutions(v map[string][]*string) *SMSMessage { - s.Substitutions = v +// SetTemplateType sets the TemplateType field's value. +func (s *SMSTemplateResponse) SetTemplateType(v string) *SMSTemplateResponse { + s.TemplateType = &v return s } -// Shcedule that defines when a campaign is run. +// Specifies the schedule settings for a campaign. type Schedule struct { _ struct{} `type:"structure"` - // The scheduled time that the campaign ends in ISO 8601 format. + // The scheduled time, in ISO 8601 format, when the campaign ended or will end. EndTime *string `type:"string"` - // Defines the type of events that can trigger the campaign. Used when the Frequency - // is set to EVENT. + // The type of event that causes the campaign to be sent, if the value of the + // Frequency property is EVENT. EventFilter *CampaignEventFilter `type:"structure"` - // How often the campaign delivers messages.Valid values:ONCEHOURLYDAILYWEEKLYMONTHLYEVENT + // Specifies how often the campaign is sent or whether the campaign is sent + // in response to a specific event. Frequency *string `type:"string" enum:"Frequency"` - // Indicates whether the campaign schedule takes effect according to each user's - // local time. + // Specifies whether the start and end times for the campaign schedule use each + // recipient's local time. To base the schedule on each recipient's local time, + // set this value to true. IsLocalTime *bool `type:"boolean"` - // The default quiet time for the campaign. The campaign doesn't send messages - // to endpoints during the quiet time.Note: Make sure that your endpoints include - // the Demographics.Timezone attribute if you plan to enable a quiet time for - // your campaign. If your endpoints don't include this attribute, they'll receive - // the messages that you send them, even if quiet time is enabled.When you set - // up a campaign to use quiet time, the campaign doesn't send messages during - // the time range you specified, as long as all of the following are true:- - // The endpoint includes a valid Demographic.Timezone attribute.- The current - // time in the endpoint's time zone is later than or equal to the time specified - // in the QuietTime.Start attribute for the campaign.- The current time in the - // endpoint's time zone is earlier than or equal to the time specified in the - // QuietTime.End attribute for the campaign. + // The default quiet time for the campaign. Quiet time is a specific time range + // when a campaign doesn't send messages to endpoints, if all the following + // conditions are met: + // + // * The EndpointDemographic.Timezone property of the endpoint is set to + // a valid value. + // + // * The current time in the endpoint's time zone is later than or equal + // to the time specified by the QuietTime.Start property for the campaign. + // + // * The current time in the endpoint's time zone is earlier than or equal + // to the time specified by the QuietTime.End property for the campaign. + // + // If any of the preceding conditions isn't met, the endpoint will receive messages + // from the campaign, even if quiet time is enabled. QuietTime *QuietTime `type:"structure"` - // The scheduled time that the campaign begins in ISO 8601 format. - StartTime *string `type:"string"` - - // The starting UTC offset for the schedule if the value for isLocalTime is - // trueValid values: UTCUTC+01UTC+02UTC+03UTC+03:30UTC+04UTC+04:30UTC+05UTC+05:30UTC+05:45UTC+06UTC+06:30UTC+07UTC+08UTC+09UTC+09:30UTC+10UTC+10:30UTC+11UTC+12UTC+13UTC-02UTC-03UTC-04UTC-05UTC-06UTC-07UTC-08UTC-09UTC-10UTC-11 + // The scheduled time, in ISO 8601 format, when the campaign began or will begin. + // + // StartTime is a required field + StartTime *string `type:"string" required:"true"` + + // The starting UTC offset for the campaign schedule, if the value of the IsLocalTime + // property is true. Valid values are: UTC, UTC+01, UTC+02, UTC+03, UTC+03:30, + // UTC+04, UTC+04:30, UTC+05, UTC+05:30, UTC+05:45, UTC+06, UTC+06:30, UTC+07, + // UTC+08, UTC+09, UTC+09:30, UTC+10, UTC+10:30, UTC+11, UTC+12, UTC+13, UTC-02, + // UTC-03, UTC-04, UTC-05, UTC-06, UTC-07, UTC-08, UTC-09, UTC-10, and UTC-11. Timezone *string `type:"string"` } @@ -18269,6 +25727,24 @@ func (s Schedule) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Schedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Schedule"} + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + if s.EventFilter != nil { + if err := s.EventFilter.Validate(); err != nil { + invalidParams.AddNested("EventFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetEndTime sets the EndTime field's value. func (s *Schedule) SetEndTime(v string) *Schedule { s.EndTime = &v @@ -18311,11 +25787,12 @@ func (s *Schedule) SetTimezone(v string) *Schedule { return s } -// Segment behavior dimensions +// Specifies dimension settings for including or excluding endpoints from a +// segment based on how recently an endpoint was active. type SegmentBehaviors struct { _ struct{} `type:"structure"` - // The recency of use. + // The dimension settings that are based on how recently an endpoint was active. Recency *RecencyDimension `type:"structure"` } @@ -18329,13 +25806,69 @@ func (s SegmentBehaviors) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SegmentBehaviors) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SegmentBehaviors"} + if s.Recency != nil { + if err := s.Recency.Validate(); err != nil { + invalidParams.AddNested("Recency", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetRecency sets the Recency field's value. func (s *SegmentBehaviors) SetRecency(v *RecencyDimension) *SegmentBehaviors { s.Recency = v return s } -// Segment demographic dimensions +// Specifies a segment to associate with an activity in a journey. +type SegmentCondition struct { + _ struct{} `type:"structure"` + + // The unique identifier for the segment to associate with the activity. + // + // SegmentId is a required field + SegmentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SegmentCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SegmentCondition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SegmentCondition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SegmentCondition"} + if s.SegmentId == nil { + invalidParams.Add(request.NewErrParamRequired("SegmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSegmentId sets the SegmentId field's value. +func (s *SegmentCondition) SetSegmentId(v string) *SegmentCondition { + s.SegmentId = &v + return s +} + +// Specifies demographic-based dimension settings for including or excluding +// endpoints from a segment. These settings derive from characteristics of endpoint +// devices, such as platform, make, and model. type SegmentDemographics struct { _ struct{} `type:"structure"` @@ -18368,6 +25901,46 @@ func (s SegmentDemographics) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SegmentDemographics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SegmentDemographics"} + if s.AppVersion != nil { + if err := s.AppVersion.Validate(); err != nil { + invalidParams.AddNested("AppVersion", err.(request.ErrInvalidParams)) + } + } + if s.Channel != nil { + if err := s.Channel.Validate(); err != nil { + invalidParams.AddNested("Channel", err.(request.ErrInvalidParams)) + } + } + if s.DeviceType != nil { + if err := s.DeviceType.Validate(); err != nil { + invalidParams.AddNested("DeviceType", err.(request.ErrInvalidParams)) + } + } + if s.Make != nil { + if err := s.Make.Validate(); err != nil { + invalidParams.AddNested("Make", err.(request.ErrInvalidParams)) + } + } + if s.Model != nil { + if err := s.Model.Validate(); err != nil { + invalidParams.AddNested("Model", err.(request.ErrInvalidParams)) + } + } + if s.Platform != nil { + if err := s.Platform.Validate(); err != nil { + invalidParams.AddNested("Platform", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAppVersion sets the AppVersion field's value. func (s *SegmentDemographics) SetAppVersion(v *SetDimension) *SegmentDemographics { s.AppVersion = v @@ -18404,26 +25977,27 @@ func (s *SegmentDemographics) SetPlatform(v *SetDimension) *SegmentDemographics return s } -// Segment dimensions +// Specifies the dimension settings for a segment. type SegmentDimensions struct { _ struct{} `type:"structure"` - // Custom segment attributes. + // One or more custom attributes to use as criteria for the segment. Attributes map[string]*AttributeDimension `type:"map"` - // The segment behaviors attributes. + // The behavior-based criteria, such as how recently users have used your app, + // for the segment. Behavior *SegmentBehaviors `type:"structure"` - // The segment demographics attributes. + // The demographic-based criteria, such as device platform, for the segment. Demographic *SegmentDemographics `type:"structure"` - // The segment location attributes. + // The location-based criteria, such as region or GPS coordinates, for the segment. Location *SegmentLocation `type:"structure"` - // Custom segment metrics. + // One or more custom metrics to use as criteria for the segment. Metrics map[string]*MetricDimension `type:"map"` - // Custom segment user attributes. + // One or more custom user attributes to use as criteria for the segment. UserAttributes map[string]*AttributeDimension `type:"map"` } @@ -18437,6 +26011,61 @@ func (s SegmentDimensions) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SegmentDimensions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SegmentDimensions"} + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Behavior != nil { + if err := s.Behavior.Validate(); err != nil { + invalidParams.AddNested("Behavior", err.(request.ErrInvalidParams)) + } + } + if s.Demographic != nil { + if err := s.Demographic.Validate(); err != nil { + invalidParams.AddNested("Demographic", err.(request.ErrInvalidParams)) + } + } + if s.Location != nil { + if err := s.Location.Validate(); err != nil { + invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) + } + } + if s.Metrics != nil { + for i, v := range s.Metrics { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Metrics", i), err.(request.ErrInvalidParams)) + } + } + } + if s.UserAttributes != nil { + for i, v := range s.UserAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAttributes sets the Attributes field's value. func (s *SegmentDimensions) SetAttributes(v map[string]*AttributeDimension) *SegmentDimensions { s.Attributes = v @@ -18473,31 +26102,33 @@ func (s *SegmentDimensions) SetUserAttributes(v map[string]*AttributeDimension) return s } -// Segment group definition. +// Specifies the base segments and dimensions for a segment, and the relationships +// between these base segments and dimensions. type SegmentGroup struct { _ struct{} `type:"structure"` - // List of dimensions to include or exclude. + // An array that defines the dimensions for the segment. Dimensions []*SegmentDimensions `type:"list"` - // The base segment that you build your segment on. The source segment defines - // the starting "universe" of endpoints. When you add dimensions to the segment, - // it filters the source segment based on the dimensions that you specify. You - // can specify more than one dimensional segment. You can only specify one imported - // segment.NOTE: If you specify an imported segment for this attribute, the - // segment size estimate that appears in the Amazon Pinpoint console shows the - // size of the imported segment, without any filters applied to it. + // The base segment to build the segment on. A base segment, also referred to + // as a source segment, defines the initial population of endpoints for a segment. + // When you add dimensions to a segment, Amazon Pinpoint filters the base segment + // by using the dimensions that you specify. + // + // You can specify more than one dimensional segment or only one imported segment. + // If you specify an imported segment, the Amazon Pinpoint console displays + // a segment size estimate that indicates the size of the imported segment without + // any filters applied to it. SourceSegments []*SegmentReference `type:"list"` - // Specify how to handle multiple source segments. For example, if you specify - // three source segments, should the resulting segment be based on any or all - // of the segments? Acceptable values: ANY or ALL. + // Specifies how to handle multiple base segments for the segment. For example, + // if you specify three base segments for the segment, whether the resulting + // segment is based on all, any, or none of the base segments. SourceType *string `type:"string" enum:"SourceType"` - // Specify how to handle multiple segment dimensions. For example, if you specify - // three dimensions, should the resulting segment include endpoints that are - // matched by all, any, or none of the dimensions? Acceptable values: ALL, ANY, - // or NONE. + // Specifies how to handle multiple dimensions for the segment. For example, + // if you specify three dimensions for the segment, whether the resulting segment + // includes endpoints that match all, any, or none of the dimensions. Type *string `type:"string" enum:"Type"` } @@ -18511,6 +26142,36 @@ func (s SegmentGroup) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SegmentGroup) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SegmentGroup"} + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SourceSegments != nil { + for i, v := range s.SourceSegments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SourceSegments", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDimensions sets the Dimensions field's value. func (s *SegmentGroup) SetDimensions(v []*SegmentDimensions) *SegmentGroup { s.Dimensions = v @@ -18535,17 +26196,18 @@ func (s *SegmentGroup) SetType(v string) *SegmentGroup { return s } -// Segment group definition. +// Specifies the settings that define the relationships between segment groups +// for a segment. type SegmentGroupList struct { _ struct{} `type:"structure"` - // A set of segment criteria to evaluate. + // An array that defines the set of segment criteria to evaluate when handling + // segment groups for the segment. Groups []*SegmentGroup `type:"list"` - // Specify how to handle multiple segment groups. For example, if the segment - // includes three segment groups, should the resulting segment include endpoints - // that are matched by all, any, or none of the segment groups you created. - // Acceptable values: ALL, ANY, or NONE. + // Specifies how to handle multiple segment groups for the segment. For example, + // if the segment includes three segment groups, whether the resulting segment + // includes endpoints that match all, any, or none of the segment groups. Include *string `type:"string" enum:"Include"` } @@ -18559,6 +26221,26 @@ func (s SegmentGroupList) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SegmentGroupList) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SegmentGroupList"} + if s.Groups != nil { + for i, v := range s.Groups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Groups", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetGroups sets the Groups field's value. func (s *SegmentGroupList) SetGroups(v []*SegmentGroup) *SegmentGroupList { s.Groups = v @@ -18571,32 +26253,49 @@ func (s *SegmentGroupList) SetInclude(v string) *SegmentGroupList { return s } -// Segment import definition. +// Provides information about the import job that created a segment. An import +// job is a job that creates a user segment by importing endpoint definitions. type SegmentImportResource struct { _ struct{} `type:"structure"` - // The number of channel types in the imported segment. + // The number of channel types in the endpoint definitions that were imported + // to create the segment. ChannelCounts map[string]*int64 `type:"map"` - // (Deprecated) Your AWS account ID, which you assigned to the ExternalID key - // in an IAM trust policy. Used by Amazon Pinpoint to assume an IAM role. This - // requirement is removed, and external IDs are not recommended for IAM roles - // assumed by Amazon Pinpoint. - ExternalId *string `type:"string"` + // (Deprecated) Your AWS account ID, which you assigned to an external ID key + // in an IAM trust policy. Amazon Pinpoint previously used this value to assume + // an IAM role when importing endpoint definitions, but we removed this requirement. + // We don't recommend use of external IDs for IAM roles that are assumed by + // Amazon Pinpoint. + // + // ExternalId is a required field + ExternalId *string `type:"string" required:"true"` - // The format of the endpoint files that were imported to create this segment.Valid - // values: CSV, JSON - Format *string `type:"string" enum:"Format"` + // The format of the files that were imported to create the segment. Valid values + // are: CSV, for comma-separated values format; and, JSON, for newline-delimited + // JSON format. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"Format"` - // The Amazon Resource Name (ARN) of an IAM role that grants Amazon Pinpoint - // access to the endpoints in Amazon S3. - RoleArn *string `type:"string"` + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that authorized Amazon Pinpoint to access the Amazon S3 location + // to import endpoint definitions from. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` - // The URL of the S3 bucket that the segment was imported from. - S3Url *string `type:"string"` + // The URL of the Amazon Simple Storage Service (Amazon S3) bucket that the + // endpoint definitions were imported from to create the segment. + // + // S3Url is a required field + S3Url *string `type:"string" required:"true"` - // The number of endpoints that were successfully imported to create this segment. - Size *int64 `type:"integer"` + // The number of endpoint definitions that were imported successfully to create + // the segment. + // + // Size is a required field + Size *int64 `type:"integer" required:"true"` } // String returns the string representation @@ -18645,14 +26344,14 @@ func (s *SegmentImportResource) SetSize(v int64) *SegmentImportResource { return s } -// Segment location dimensions +// Specifies geographical dimension settings for a segment. type SegmentLocation struct { _ struct{} `type:"structure"` - // The country or region, in ISO 3166-1 alpha-2 format. + // The country or region code, in ISO 3166-1 alpha-2 format, for the segment. Country *SetDimension `type:"structure"` - // The GPS Point dimension. + // The GPS location and range for the segment. GPSPoint *GPSPointDimension `type:"structure"` } @@ -18666,6 +26365,26 @@ func (s SegmentLocation) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SegmentLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SegmentLocation"} + if s.Country != nil { + if err := s.Country.Validate(); err != nil { + invalidParams.AddNested("Country", err.(request.ErrInvalidParams)) + } + } + if s.GPSPoint != nil { + if err := s.GPSPoint.Validate(); err != nil { + invalidParams.AddNested("GPSPoint", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetCountry sets the Country field's value. func (s *SegmentLocation) SetCountry(v *SetDimension) *SegmentLocation { s.Country = v @@ -18678,14 +26397,16 @@ func (s *SegmentLocation) SetGPSPoint(v *GPSPointDimension) *SegmentLocation { return s } -// Segment reference. +// Specifies the segment identifier and version of a segment. type SegmentReference struct { _ struct{} `type:"structure"` - // A unique identifier for the segment. - Id *string `type:"string"` + // The unique identifier for the segment. + // + // Id is a required field + Id *string `type:"string" required:"true"` - // If specified contains a specific version of the segment included. + // The version number of the segment. Version *int64 `type:"integer"` } @@ -18699,6 +26420,19 @@ func (s SegmentReference) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SegmentReference) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SegmentReference"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetId sets the Id field's value. func (s *SegmentReference) SetId(v string) *SegmentReference { s.Id = &v @@ -18711,26 +26445,36 @@ func (s *SegmentReference) SetVersion(v int64) *SegmentReference { return s } -// Segment definition. +// Provides information about the configuration, dimension, and other settings +// for a segment. type SegmentResponse struct { _ struct{} `type:"structure"` - // The ID of the application that the segment applies to. - ApplicationId *string `type:"string"` + // The unique identifier for the application that the segment is associated + // with. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` - // The arn for the segment. - Arn *string `type:"string"` + // The Amazon Resource Name (ARN) of the segment. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` // The date and time when the segment was created. - CreationDate *string `type:"string"` + // + // CreationDate is a required field + CreationDate *string `type:"string" required:"true"` - // The segment dimensions attributes. + // The dimension settings for the segment. Dimensions *SegmentDimensions `type:"structure"` - // The unique segment ID. - Id *string `type:"string"` + // The unique identifier for the segment. + // + // Id is a required field + Id *string `type:"string" required:"true"` - // The import job settings. + // The settings for the import job that's associated with the segment. ImportDefinition *SegmentImportResource `type:"structure"` // The date and time when the segment was last modified. @@ -18739,23 +26483,30 @@ type SegmentResponse struct { // The name of the segment. Name *string `type:"string"` - // A segment group, which consists of zero or more source segments, plus dimensions - // that are applied to those source segments. + // A list of one or more segment groups that apply to the segment. Each segment + // group consists of zero or more base segments and the dimensions that are + // applied to those base segments. SegmentGroups *SegmentGroupList `type:"structure"` - // The segment type:DIMENSIONAL - A dynamic segment built from selection criteria - // based on endpoint data reported by your app. You create this type of segment - // by using the segment builder in the Amazon Pinpoint console or by making - // a POST request to the segments resource.IMPORT - A static segment built from - // an imported set of endpoint definitions. You create this type of segment - // by importing a segment in the Amazon Pinpoint console or by making a POST - // request to the jobs/import resource. - SegmentType *string `type:"string" enum:"SegmentType"` + // The segment type. Valid values are: + // + // * DIMENSIONAL - A dynamic segment, which is a segment that uses selection + // criteria that you specify and is based on endpoint data that's reported + // by your app. Dynamic segments can change over time. + // + // * IMPORT - A static segment, which is a segment that uses selection criteria + // that you specify and is based on endpoint definitions that you import + // from a file. Imported segments are static; they don't change over time. + // + // SegmentType is a required field + SegmentType *string `type:"string" required:"true" enum:"SegmentType"` - // The Tags for the segment. + // A string-to-string map of key-value pairs that identifies the tags that are + // associated with the segment. Each tag consists of a required tag key and + // an associated tag value. Tags map[string]*string `locationName:"tags" type:"map"` - // The segment version number. + // The version number of the segment. Version *int64 `type:"integer"` } @@ -18841,15 +26592,19 @@ func (s *SegmentResponse) SetVersion(v int64) *SegmentResponse { return s } -// Segments in your account. +// Provides information about all the segments that are associated with an application. type SegmentsResponse struct { _ struct{} `type:"structure"` - // The list of segments. - Item []*SegmentResponse `type:"list"` + // An array of responses, one for each segment that's associated with the application + // (Segments resource) or each version of a segment that's associated with the + // application (Segment Versions resource). + // + // Item is a required field + Item []*SegmentResponse `type:"list" required:"true"` - // An identifier used to retrieve the next page of results. The token is null - // if no additional pages exist. + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null if there are no additional pages. NextToken *string `type:"string"` } @@ -18881,7 +26636,8 @@ type SendMessagesInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Send message request. + // Specifies the objects that define configuration and other settings for a + // message. // // MessageRequest is a required field MessageRequest *MessageRequest `type:"structure" required:"true"` @@ -18909,6 +26665,11 @@ func (s *SendMessagesInput) Validate() error { if s.MessageRequest == nil { invalidParams.Add(request.NewErrParamRequired("MessageRequest")) } + if s.MessageRequest != nil { + if err := s.MessageRequest.Validate(); err != nil { + invalidParams.AddNested("MessageRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -18931,7 +26692,8 @@ func (s *SendMessagesInput) SetMessageRequest(v *MessageRequest) *SendMessagesIn type SendMessagesOutput struct { _ struct{} `type:"structure" payload:"MessageResponse"` - // Send message response. + // Provides information about the results of a request to send a message to + // an endpoint address. // // MessageResponse is a required field MessageResponse *MessageResponse `type:"structure" required:"true"` @@ -18953,27 +26715,36 @@ func (s *SendMessagesOutput) SetMessageResponse(v *MessageResponse) *SendMessage return s } -// Send message request. +// Specifies the configuration and other settings for a message to send to all +// the endpoints that are associated with a list of users. type SendUsersMessageRequest struct { _ struct{} `type:"structure"` - // A map of custom attribute-value pairs. Amazon Pinpoint adds these attributes - // to the data.pinpoint object in the body of the push notification payload. - // Amazon Pinpoint also provides these attributes in the events that it generates - // for users-messages deliveries. + // A map of custom attribute-value pairs. For a push notification, Amazon Pinpoint + // adds these attributes to the data.pinpoint object in the body of the notification + // payload. Amazon Pinpoint also provides these attributes in the events that + // it generates for users-messages deliveries. Context map[string]*string `type:"map"` - // Message definitions for the default message and any messages that are tailored - // for specific channels. - MessageConfiguration *DirectMessageConfiguration `type:"structure"` + // The message definitions for the default message and any default messages + // that you defined for specific channels. + // + // MessageConfiguration is a required field + MessageConfiguration *DirectMessageConfiguration `type:"structure" required:"true"` + + // The message template to use for the message. + TemplateConfiguration *TemplateConfiguration `type:"structure"` - // A unique ID that you can use to trace a message. This ID is visible to recipients. + // The unique identifier for tracing the message. This identifier is visible + // to message recipients. TraceId *string `type:"string"` - // A map that associates user IDs with EndpointSendConfiguration objects. Within - // an EndpointSendConfiguration object, you can tailor the message for a user - // by specifying message overrides or substitutions. - Users map[string]*EndpointSendConfiguration `type:"map"` + // A map that associates user IDs with EndpointSendConfiguration objects. You + // can use an EndpointSendConfiguration object to tailor the message for a user + // by specifying settings such as content overrides and message variables. + // + // Users is a required field + Users map[string]*EndpointSendConfiguration `type:"map" required:"true"` } // String returns the string representation @@ -18986,6 +26757,22 @@ func (s SendUsersMessageRequest) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendUsersMessageRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendUsersMessageRequest"} + if s.MessageConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MessageConfiguration")) + } + if s.Users == nil { + invalidParams.Add(request.NewErrParamRequired("Users")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetContext sets the Context field's value. func (s *SendUsersMessageRequest) SetContext(v map[string]*string) *SendUsersMessageRequest { s.Context = v @@ -18998,6 +26785,12 @@ func (s *SendUsersMessageRequest) SetMessageConfiguration(v *DirectMessageConfig return s } +// SetTemplateConfiguration sets the TemplateConfiguration field's value. +func (s *SendUsersMessageRequest) SetTemplateConfiguration(v *TemplateConfiguration) *SendUsersMessageRequest { + s.TemplateConfiguration = v + return s +} + // SetTraceId sets the TraceId field's value. func (s *SendUsersMessageRequest) SetTraceId(v string) *SendUsersMessageRequest { s.TraceId = &v @@ -19010,19 +26803,21 @@ func (s *SendUsersMessageRequest) SetUsers(v map[string]*EndpointSendConfigurati return s } -// User send message response. +// Provides information about which users and endpoints a message was sent to. type SendUsersMessageResponse struct { _ struct{} `type:"structure"` - // The unique ID of the Amazon Pinpoint project used to send the message. - ApplicationId *string `type:"string"` + // The unique identifier for the application that was used to send the message. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` - // The unique ID assigned to the users-messages request. + // The unique identifier that was assigned to the message request. RequestId *string `type:"string"` - // An object that shows the endpoints that were messaged for each user. The - // object provides a list of user IDs. For each user ID, it provides the endpoint - // IDs that were messaged. For each endpoint ID, it provides an EndpointMessageResult + // An object that indicates which endpoints the message was sent to, for each + // user. The object lists user IDs and, for each user ID, provides the endpoint + // IDs that the message was sent to. For each endpoint ID, it provides an EndpointMessageResult // object. Result map[string]map[string]*EndpointMessageResult `type:"map"` } @@ -19061,7 +26856,8 @@ type SendUsersMessagesInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Send message request. + // Specifies the configuration and other settings for a message to send to all + // the endpoints that are associated with a list of users. // // SendUsersMessageRequest is a required field SendUsersMessageRequest *SendUsersMessageRequest `type:"structure" required:"true"` @@ -19089,6 +26885,11 @@ func (s *SendUsersMessagesInput) Validate() error { if s.SendUsersMessageRequest == nil { invalidParams.Add(request.NewErrParamRequired("SendUsersMessageRequest")) } + if s.SendUsersMessageRequest != nil { + if err := s.SendUsersMessageRequest.Validate(); err != nil { + invalidParams.AddNested("SendUsersMessageRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -19111,7 +26912,7 @@ func (s *SendUsersMessagesInput) SetSendUsersMessageRequest(v *SendUsersMessageR type SendUsersMessagesOutput struct { _ struct{} `type:"structure" payload:"SendUsersMessageResponse"` - // User send message response. + // Provides information about which users and endpoints a message was sent to. // // SendUsersMessageResponse is a required field SendUsersMessageResponse *SendUsersMessageResponse `type:"structure" required:"true"` @@ -19133,18 +26934,22 @@ func (s *SendUsersMessagesOutput) SetSendUsersMessageResponse(v *SendUsersMessag return s } -// Information about a session. +// Provides information about a session. type Session struct { _ struct{} `type:"structure"` // The duration of the session, in milliseconds. Duration *int64 `type:"integer"` - // A unique identifier for the session. - Id *string `type:"string"` + // The unique identifier for the session. + // + // Id is a required field + Id *string `type:"string" required:"true"` // The date and time when the session began. - StartTimestamp *string `type:"string"` + // + // StartTimestamp is a required field + StartTimestamp *string `type:"string" required:"true"` // The date and time when the session ended. StopTimestamp *string `type:"string"` @@ -19160,6 +26965,22 @@ func (s Session) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Session) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Session"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StartTimestamp == nil { + invalidParams.Add(request.NewErrParamRequired("StartTimestamp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDuration sets the Duration field's value. func (s *Session) SetDuration(v int64) *Session { s.Duration = &v @@ -19172,70 +26993,153 @@ func (s *Session) SetId(v string) *Session { return s } -// SetStartTimestamp sets the StartTimestamp field's value. -func (s *Session) SetStartTimestamp(v string) *Session { - s.StartTimestamp = &v +// SetStartTimestamp sets the StartTimestamp field's value. +func (s *Session) SetStartTimestamp(v string) *Session { + s.StartTimestamp = &v + return s +} + +// SetStopTimestamp sets the StopTimestamp field's value. +func (s *Session) SetStopTimestamp(v string) *Session { + s.StopTimestamp = &v + return s +} + +// Specifies the dimension type and values for a segment dimension. +type SetDimension struct { + _ struct{} `type:"structure"` + + // The type of segment dimension to use. Valid values are: INCLUSIVE, endpoints + // that match the criteria are included in the segment; and, EXCLUSIVE, endpoints + // that match the criteria are excluded from the segment. + DimensionType *string `type:"string" enum:"DimensionType"` + + // The criteria values to use for the segment dimension. Depending on the value + // of the DimensionType property, endpoints are included or excluded from the + // segment if their values match the criteria values. + // + // Values is a required field + Values []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetDimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDimension) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetDimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetDimension"} + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensionType sets the DimensionType field's value. +func (s *SetDimension) SetDimensionType(v string) *SetDimension { + s.DimensionType = &v return s } -// SetStopTimestamp sets the StopTimestamp field's value. -func (s *Session) SetStopTimestamp(v string) *Session { - s.StopTimestamp = &v +// SetValues sets the Values field's value. +func (s *SetDimension) SetValues(v []*string) *SetDimension { + s.Values = v return s } -// Dimension specification of a segment. -type SetDimension struct { +// Specifies a condition to evaluate for an activity in a journey. +type SimpleCondition struct { _ struct{} `type:"structure"` - // The type of dimension:INCLUSIVE - Endpoints that match the criteria are included - // in the segment.EXCLUSIVE - Endpoints that match the criteria are excluded - // from the segment. - DimensionType *string `type:"string" enum:"DimensionType"` + // The dimension settings for the event that's associated with the activity. + EventCondition *EventCondition `type:"structure"` - // The criteria values for the segment dimension. Endpoints with matching attribute - // values are included or excluded from the segment, depending on the setting - // for Type. - Values []*string `type:"list"` + // The segment that's associated with the activity. + SegmentCondition *SegmentCondition `type:"structure"` + + // The dimension settings for the segment that's associated with the activity. + SegmentDimensions *SegmentDimensions `locationName:"segmentDimensions" type:"structure"` } // String returns the string representation -func (s SetDimension) String() string { +func (s SimpleCondition) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SetDimension) GoString() string { +func (s SimpleCondition) GoString() string { return s.String() } -// SetDimensionType sets the DimensionType field's value. -func (s *SetDimension) SetDimensionType(v string) *SetDimension { - s.DimensionType = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *SimpleCondition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SimpleCondition"} + if s.EventCondition != nil { + if err := s.EventCondition.Validate(); err != nil { + invalidParams.AddNested("EventCondition", err.(request.ErrInvalidParams)) + } + } + if s.SegmentCondition != nil { + if err := s.SegmentCondition.Validate(); err != nil { + invalidParams.AddNested("SegmentCondition", err.(request.ErrInvalidParams)) + } + } + if s.SegmentDimensions != nil { + if err := s.SegmentDimensions.Validate(); err != nil { + invalidParams.AddNested("SegmentDimensions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventCondition sets the EventCondition field's value. +func (s *SimpleCondition) SetEventCondition(v *EventCondition) *SimpleCondition { + s.EventCondition = v return s } -// SetValues sets the Values field's value. -func (s *SetDimension) SetValues(v []*string) *SetDimension { - s.Values = v +// SetSegmentCondition sets the SegmentCondition field's value. +func (s *SimpleCondition) SetSegmentCondition(v *SegmentCondition) *SimpleCondition { + s.SegmentCondition = v + return s +} + +// SetSegmentDimensions sets the SegmentDimensions field's value. +func (s *SimpleCondition) SetSegmentDimensions(v *SegmentDimensions) *SimpleCondition { + s.SegmentDimensions = v return s } -// An email composed of a subject, a text part and a html part. +// Specifies the contents of an email message, composed of a subject, a text +// part, and an HTML part. type SimpleEmail struct { _ struct{} `type:"structure"` - // The content of the message, in HTML format. Use this for email clients that - // can process HTML. You can include clickable links, formatted text, and much - // more in an HTML message. + // The body of the email message, in HTML format. We recommend using an HTML + // part for email clients that support HTML. You can include links, formatted + // text, and more in an HTML message. HtmlPart *SimpleEmailPart `type:"structure"` - // The subject of the message: A short summary of the content, which will appear - // in the recipient's inbox. + // The subject line, or title, of the email. Subject *SimpleEmailPart `type:"structure"` - // The content of the message, in text format. Use this for text-based email - // clients, or clients on high-latency networks (such as mobile devices). + // The body of the email message, in text format. We recommend using a text + // part for email clients that don't support HTML and clients that are connected + // to high-latency networks, such as mobile devices. TextPart *SimpleEmailPart `type:"structure"` } @@ -19267,14 +27171,15 @@ func (s *SimpleEmail) SetTextPart(v *SimpleEmailPart) *SimpleEmail { return s } -// Textual email data, plus an optional character set specification. +// Specifies the subject or body of an email message, represented as textual +// email data and the applicable character set. type SimpleEmailPart struct { _ struct{} `type:"structure"` - // The character set of the content. + // The applicable character set for the message content. Charset *string `type:"string"` - // The textual data of the content. + // The textual data of the message content. Data *string `type:"string"` } @@ -19300,12 +27205,65 @@ func (s *SimpleEmailPart) SetData(v string) *SimpleEmailPart { return s } +// Specifies the conditions for the first activity in a journey. This activity +// and its conditions determine which users are participants in a journey. +type StartCondition struct { + _ struct{} `type:"structure"` + + // The custom description of the condition. + Description *string `type:"string"` + + // The segment that's associated with the first activity in the journey. This + // segment determines which users are participants in the journey. + SegmentStartCondition *SegmentCondition `type:"structure"` +} + +// String returns the string representation +func (s StartCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartCondition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartCondition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartCondition"} + if s.SegmentStartCondition != nil { + if err := s.SegmentStartCondition.Validate(); err != nil { + invalidParams.AddNested("SegmentStartCondition", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *StartCondition) SetDescription(v string) *StartCondition { + s.Description = &v + return s +} + +// SetSegmentStartCondition sets the SegmentStartCondition field's value. +func (s *StartCondition) SetSegmentStartCondition(v *SegmentCondition) *StartCondition { + s.SegmentStartCondition = v + return s +} + type TagResourceInput struct { _ struct{} `type:"structure" payload:"TagsModel"` // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` + // Specifies the tags (keys and values) for an application, campaign, journey, + // message template, or segment. + // // TagsModel is a required field TagsModel *TagsModel `type:"structure" required:"true"` } @@ -19370,9 +27328,19 @@ func (s TagResourceOutput) GoString() string { return s.String() } +// Specifies the tags (keys and values) for an application, campaign, journey, +// message template, or segment. type TagsModel struct { _ struct{} `type:"structure"` + // A string-to-string map of key-value pairs that defines the tags for an application, + // campaign, journey, message template, or segment. Each of these resources + // can have a maximum of 50 tags. + // + // Each tag consists of a required tag key and an associated tag value. The + // maximum length of a tag key is 128 characters. The maximum length of a tag + // value is 256 characters. + // // Tags is a required field Tags map[string]*string `locationName:"tags" type:"map" required:"true"` } @@ -19406,29 +27374,225 @@ func (s *TagsModel) SetTags(v map[string]*string) *TagsModel { return s } -// Treatment resource +// Specifies the name of the message template to use for the message. +type Template struct { + _ struct{} `type:"structure"` + + // The name of the message template to use for the message. If specified, this + // value must match the name of an existing message template. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Template) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Template) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *Template) SetName(v string) *Template { + s.Name = &v + return s +} + +// Specifies the message template for each type of channel. +type TemplateConfiguration struct { + _ struct{} `type:"structure"` + + // The email template to use for the message. + EmailTemplate *Template `type:"structure"` + + // The push notification template to use for the message. + PushTemplate *Template `type:"structure"` + + // The SMS template to use for the message. + SMSTemplate *Template `type:"structure"` +} + +// String returns the string representation +func (s TemplateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemplateConfiguration) GoString() string { + return s.String() +} + +// SetEmailTemplate sets the EmailTemplate field's value. +func (s *TemplateConfiguration) SetEmailTemplate(v *Template) *TemplateConfiguration { + s.EmailTemplate = v + return s +} + +// SetPushTemplate sets the PushTemplate field's value. +func (s *TemplateConfiguration) SetPushTemplate(v *Template) *TemplateConfiguration { + s.PushTemplate = v + return s +} + +// SetSMSTemplate sets the SMSTemplate field's value. +func (s *TemplateConfiguration) SetSMSTemplate(v *Template) *TemplateConfiguration { + s.SMSTemplate = v + return s +} + +// Provides information about a message template that's associated with your +// Amazon Pinpoint account. +type TemplateResponse struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the message template. + Arn *string `type:"string"` + + // The date when the message template was created. + // + // CreationDate is a required field + CreationDate *string `type:"string" required:"true"` + + // The date when the message template was last modified. + // + // LastModifiedDate is a required field + LastModifiedDate *string `type:"string" required:"true"` + + // A string-to-string map of key-value pairs that identifies the tags that are + // associated with the message template. Each tag consists of a required tag + // key and an associated tag value. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The name of the message template. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` + + // The type of channel that the message template is designed for. + // + // TemplateType is a required field + TemplateType *string `type:"string" required:"true" enum:"TemplateType"` +} + +// String returns the string representation +func (s TemplateResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemplateResponse) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *TemplateResponse) SetArn(v string) *TemplateResponse { + s.Arn = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *TemplateResponse) SetCreationDate(v string) *TemplateResponse { + s.CreationDate = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *TemplateResponse) SetLastModifiedDate(v string) *TemplateResponse { + s.LastModifiedDate = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TemplateResponse) SetTags(v map[string]*string) *TemplateResponse { + s.Tags = v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *TemplateResponse) SetTemplateName(v string) *TemplateResponse { + s.TemplateName = &v + return s +} + +// SetTemplateType sets the TemplateType field's value. +func (s *TemplateResponse) SetTemplateType(v string) *TemplateResponse { + s.TemplateType = &v + return s +} + +// Provides information about all the message templates that are associated +// with your Amazon Pinpoint account. +type TemplatesResponse struct { + _ struct{} `type:"structure"` + + // An array of responses, one for each message template that's associated with + // your Amazon Pinpoint account and meets any filter criteria that you specified + // in the request. + // + // Item is a required field + Item []*TemplateResponse `type:"list" required:"true"` + + // The string to use in a subsequent request to get the next page of results + // in a paginated response. This value is null if there are no additional pages. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s TemplatesResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemplatesResponse) GoString() string { + return s.String() +} + +// SetItem sets the Item field's value. +func (s *TemplatesResponse) SetItem(v []*TemplateResponse) *TemplatesResponse { + s.Item = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *TemplatesResponse) SetNextToken(v string) *TemplatesResponse { + s.NextToken = &v + return s +} + +// Specifies the settings for a campaign treatment. A treatment is a variation +// of a campaign that's used for A/B testing of a campaign. type TreatmentResource struct { _ struct{} `type:"structure"` - // The unique treatment ID. - Id *string `type:"string"` + // The unique identifier for the treatment. + // + // Id is a required field + Id *string `type:"string" required:"true"` - // The message configuration settings. + // The message configuration settings for the treatment. MessageConfiguration *MessageConfiguration `type:"structure"` - // The campaign schedule. + // The schedule settings for the treatment. Schedule *Schedule `type:"structure"` - // The allocated percentage of users for this treatment. - SizePercent *int64 `type:"integer"` + // The allocated percentage of users (segment members) that the treatment is + // sent to. + // + // SizePercent is a required field + SizePercent *int64 `type:"integer" required:"true"` - // The treatment status. + // The current status of the treatment. State *CampaignState `type:"structure"` - // A custom description for the treatment. + // The message template to use for the treatment. + TemplateConfiguration *TemplateConfiguration `type:"structure"` + + // The custom description of the treatment. TreatmentDescription *string `type:"string"` - // The custom name of a variation of the campaign used for A/B testing. + // The custom name of the treatment. A treatment is a variation of a campaign + // that's used for A/B testing of a campaign. TreatmentName *string `type:"string"` } @@ -19472,6 +27636,12 @@ func (s *TreatmentResource) SetState(v *CampaignState) *TreatmentResource { return s } +// SetTemplateConfiguration sets the TemplateConfiguration field's value. +func (s *TreatmentResource) SetTemplateConfiguration(v *TemplateConfiguration) *TreatmentResource { + s.TemplateConfiguration = v + return s +} + // SetTreatmentDescription sets the TreatmentDescription field's value. func (s *TreatmentResource) SetTreatmentDescription(v string) *TreatmentResource { s.TreatmentDescription = &v @@ -19552,7 +27722,8 @@ func (s UntagResourceOutput) GoString() string { type UpdateAdmChannelInput struct { _ struct{} `type:"structure" payload:"ADMChannelRequest"` - // Amazon Device Messaging channel definition. + // Specifies the status and settings of the ADM (Amazon Device Messaging) channel + // for an application. // // ADMChannelRequest is a required field ADMChannelRequest *ADMChannelRequest `type:"structure" required:"true"` @@ -19583,6 +27754,11 @@ func (s *UpdateAdmChannelInput) Validate() error { if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) } + if s.ADMChannelRequest != nil { + if err := s.ADMChannelRequest.Validate(); err != nil { + invalidParams.AddNested("ADMChannelRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -19605,7 +27781,8 @@ func (s *UpdateAdmChannelInput) SetApplicationId(v string) *UpdateAdmChannelInpu type UpdateAdmChannelOutput struct { _ struct{} `type:"structure" payload:"ADMChannelResponse"` - // Amazon Device Messaging channel definition. + // Provides information about the status and settings of the ADM (Amazon Device + // Messaging) channel for an application. // // ADMChannelResponse is a required field ADMChannelResponse *ADMChannelResponse `type:"structure" required:"true"` @@ -19630,7 +27807,8 @@ func (s *UpdateAdmChannelOutput) SetADMChannelResponse(v *ADMChannelResponse) *U type UpdateApnsChannelInput struct { _ struct{} `type:"structure" payload:"APNSChannelRequest"` - // Apple Push Notification Service channel definition. + // Specifies the status and settings of the APNs (Apple Push Notification service) + // channel for an application. // // APNSChannelRequest is a required field APNSChannelRequest *APNSChannelRequest `type:"structure" required:"true"` @@ -19683,7 +27861,8 @@ func (s *UpdateApnsChannelInput) SetApplicationId(v string) *UpdateApnsChannelIn type UpdateApnsChannelOutput struct { _ struct{} `type:"structure" payload:"APNSChannelResponse"` - // Apple Distribution Push Notification Service channel definition. + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) channel for an application. // // APNSChannelResponse is a required field APNSChannelResponse *APNSChannelResponse `type:"structure" required:"true"` @@ -19708,7 +27887,8 @@ func (s *UpdateApnsChannelOutput) SetAPNSChannelResponse(v *APNSChannelResponse) type UpdateApnsSandboxChannelInput struct { _ struct{} `type:"structure" payload:"APNSSandboxChannelRequest"` - // Apple Development Push Notification Service channel definition. + // Specifies the status and settings of the APNs (Apple Push Notification service) + // sandbox channel for an application. // // APNSSandboxChannelRequest is a required field APNSSandboxChannelRequest *APNSSandboxChannelRequest `type:"structure" required:"true"` @@ -19761,7 +27941,8 @@ func (s *UpdateApnsSandboxChannelInput) SetApplicationId(v string) *UpdateApnsSa type UpdateApnsSandboxChannelOutput struct { _ struct{} `type:"structure" payload:"APNSSandboxChannelResponse"` - // Apple Development Push Notification Service channel definition. + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) sandbox channel for an application. // // APNSSandboxChannelResponse is a required field APNSSandboxChannelResponse *APNSSandboxChannelResponse `type:"structure" required:"true"` @@ -19786,7 +27967,8 @@ func (s *UpdateApnsSandboxChannelOutput) SetAPNSSandboxChannelResponse(v *APNSSa type UpdateApnsVoipChannelInput struct { _ struct{} `type:"structure" payload:"APNSVoipChannelRequest"` - // Apple VoIP Push Notification Service channel definition. + // Specifies the status and settings of the APNs (Apple Push Notification service) + // VoIP channel for an application. // // APNSVoipChannelRequest is a required field APNSVoipChannelRequest *APNSVoipChannelRequest `type:"structure" required:"true"` @@ -19839,7 +28021,8 @@ func (s *UpdateApnsVoipChannelInput) SetApplicationId(v string) *UpdateApnsVoipC type UpdateApnsVoipChannelOutput struct { _ struct{} `type:"structure" payload:"APNSVoipChannelResponse"` - // Apple VoIP Push Notification Service channel definition. + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) VoIP channel for an application. // // APNSVoipChannelResponse is a required field APNSVoipChannelResponse *APNSVoipChannelResponse `type:"structure" required:"true"` @@ -19864,7 +28047,8 @@ func (s *UpdateApnsVoipChannelOutput) SetAPNSVoipChannelResponse(v *APNSVoipChan type UpdateApnsVoipSandboxChannelInput struct { _ struct{} `type:"structure" payload:"APNSVoipSandboxChannelRequest"` - // Apple VoIP Developer Push Notification Service channel definition. + // Specifies the status and settings of the APNs (Apple Push Notification service) + // VoIP sandbox channel for an application. // // APNSVoipSandboxChannelRequest is a required field APNSVoipSandboxChannelRequest *APNSVoipSandboxChannelRequest `type:"structure" required:"true"` @@ -19917,7 +28101,8 @@ func (s *UpdateApnsVoipSandboxChannelInput) SetApplicationId(v string) *UpdateAp type UpdateApnsVoipSandboxChannelOutput struct { _ struct{} `type:"structure" payload:"APNSVoipSandboxChannelResponse"` - // Apple VoIP Developer Push Notification Service channel definition. + // Provides information about the status and settings of the APNs (Apple Push + // Notification service) VoIP sandbox channel for an application. // // APNSVoipSandboxChannelResponse is a required field APNSVoipSandboxChannelResponse *APNSVoipSandboxChannelResponse `type:"structure" required:"true"` @@ -19945,7 +28130,7 @@ type UpdateApplicationSettingsInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Creating application setting request + // Specifies the default settings for an application. // // WriteApplicationSettingsRequest is a required field WriteApplicationSettingsRequest *WriteApplicationSettingsRequest `type:"structure" required:"true"` @@ -19995,7 +28180,8 @@ func (s *UpdateApplicationSettingsInput) SetWriteApplicationSettingsRequest(v *W type UpdateApplicationSettingsOutput struct { _ struct{} `type:"structure" payload:"ApplicationSettingsResource"` - // Application settings. + // Provides information about an application, including the default settings + // for an application. // // ApplicationSettingsResource is a required field ApplicationSettingsResource *ApplicationSettingsResource `type:"structure" required:"true"` @@ -20017,11 +28203,15 @@ func (s *UpdateApplicationSettingsOutput) SetApplicationSettingsResource(v *Appl return s } -// Update attributes request +// Specifies one or more attributes to remove from all the endpoints that are +// associated with an application. type UpdateAttributesRequest struct { _ struct{} `type:"structure"` - // The GLOB wildcard for removing the attributes in the application + // An array of the attributes to remove from all the endpoints that are associated + // with the application. The array can specify the complete, exact name of each + // attribute to remove or it can specify a glob pattern that an attribute name + // must match in order for the attribute to be removed. Blacklist []*string `type:"list"` } @@ -20047,7 +28237,8 @@ type UpdateBaiduChannelInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Baidu Cloud Push credentials + // Specifies the status and settings of the Baidu (Baidu Cloud Push) channel + // for an application. // // BaiduChannelRequest is a required field BaiduChannelRequest *BaiduChannelRequest `type:"structure" required:"true"` @@ -20075,6 +28266,11 @@ func (s *UpdateBaiduChannelInput) Validate() error { if s.BaiduChannelRequest == nil { invalidParams.Add(request.NewErrParamRequired("BaiduChannelRequest")) } + if s.BaiduChannelRequest != nil { + if err := s.BaiduChannelRequest.Validate(); err != nil { + invalidParams.AddNested("BaiduChannelRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -20097,7 +28293,8 @@ func (s *UpdateBaiduChannelInput) SetBaiduChannelRequest(v *BaiduChannelRequest) type UpdateBaiduChannelOutput struct { _ struct{} `type:"structure" payload:"BaiduChannelResponse"` - // Baidu Cloud Messaging channel definition + // Provides information about the status and settings of the Baidu (Baidu Cloud + // Push) channel for an application. // // BaiduChannelResponse is a required field BaiduChannelResponse *BaiduChannelResponse `type:"structure" required:"true"` @@ -20128,7 +28325,7 @@ type UpdateCampaignInput struct { // CampaignId is a required field CampaignId *string `location:"uri" locationName:"campaign-id" type:"string" required:"true"` - // Used to create a campaign. + // Specifies the configuration and other settings for a campaign. // // WriteCampaignRequest is a required field WriteCampaignRequest *WriteCampaignRequest `type:"structure" required:"true"` @@ -20162,6 +28359,11 @@ func (s *UpdateCampaignInput) Validate() error { if s.WriteCampaignRequest == nil { invalidParams.Add(request.NewErrParamRequired("WriteCampaignRequest")) } + if s.WriteCampaignRequest != nil { + if err := s.WriteCampaignRequest.Validate(); err != nil { + invalidParams.AddNested("WriteCampaignRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -20190,7 +28392,8 @@ func (s *UpdateCampaignInput) SetWriteCampaignRequest(v *WriteCampaignRequest) * type UpdateCampaignOutput struct { _ struct{} `type:"structure" payload:"CampaignResponse"` - // Campaign definition + // Provides information about the status, configuration, and other settings + // for a campaign. // // CampaignResponse is a required field CampaignResponse *CampaignResponse `type:"structure" required:"true"` @@ -20218,7 +28421,7 @@ type UpdateEmailChannelInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Email Channel Request + // Specifies the status and settings of the email channel for an application. // // EmailChannelRequest is a required field EmailChannelRequest *EmailChannelRequest `type:"structure" required:"true"` @@ -20246,6 +28449,11 @@ func (s *UpdateEmailChannelInput) Validate() error { if s.EmailChannelRequest == nil { invalidParams.Add(request.NewErrParamRequired("EmailChannelRequest")) } + if s.EmailChannelRequest != nil { + if err := s.EmailChannelRequest.Validate(); err != nil { + invalidParams.AddNested("EmailChannelRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -20268,25 +28476,105 @@ func (s *UpdateEmailChannelInput) SetEmailChannelRequest(v *EmailChannelRequest) type UpdateEmailChannelOutput struct { _ struct{} `type:"structure" payload:"EmailChannelResponse"` - // Email Channel Response. + // Provides information about the status and settings of the email channel for + // an application. // // EmailChannelResponse is a required field EmailChannelResponse *EmailChannelResponse `type:"structure" required:"true"` } // String returns the string representation -func (s UpdateEmailChannelOutput) String() string { +func (s UpdateEmailChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEmailChannelOutput) GoString() string { + return s.String() +} + +// SetEmailChannelResponse sets the EmailChannelResponse field's value. +func (s *UpdateEmailChannelOutput) SetEmailChannelResponse(v *EmailChannelResponse) *UpdateEmailChannelOutput { + s.EmailChannelResponse = v + return s +} + +type UpdateEmailTemplateInput struct { + _ struct{} `type:"structure" payload:"EmailTemplateRequest"` + + // Specifies the content and settings for a message template that can be used + // in messages that are sent through the email channel. + // + // EmailTemplateRequest is a required field + EmailTemplateRequest *EmailTemplateRequest `type:"structure" required:"true"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEmailTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEmailTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateEmailTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateEmailTemplateInput"} + if s.EmailTemplateRequest == nil { + invalidParams.Add(request.NewErrParamRequired("EmailTemplateRequest")) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEmailTemplateRequest sets the EmailTemplateRequest field's value. +func (s *UpdateEmailTemplateInput) SetEmailTemplateRequest(v *EmailTemplateRequest) *UpdateEmailTemplateInput { + s.EmailTemplateRequest = v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *UpdateEmailTemplateInput) SetTemplateName(v string) *UpdateEmailTemplateInput { + s.TemplateName = &v + return s +} + +type UpdateEmailTemplateOutput struct { + _ struct{} `type:"structure" payload:"MessageBody"` + + // Provides information about an API request or response. + // + // MessageBody is a required field + MessageBody *MessageBody `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateEmailTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateEmailChannelOutput) GoString() string { +func (s UpdateEmailTemplateOutput) GoString() string { return s.String() } -// SetEmailChannelResponse sets the EmailChannelResponse field's value. -func (s *UpdateEmailChannelOutput) SetEmailChannelResponse(v *EmailChannelResponse) *UpdateEmailChannelOutput { - s.EmailChannelResponse = v +// SetMessageBody sets the MessageBody field's value. +func (s *UpdateEmailTemplateOutput) SetMessageBody(v *MessageBody) *UpdateEmailTemplateOutput { + s.MessageBody = v return s } @@ -20299,7 +28587,7 @@ type UpdateEndpointInput struct { // EndpointId is a required field EndpointId *string `location:"uri" locationName:"endpoint-id" type:"string" required:"true"` - // An endpoint update request. + // Specifies the channel type and other settings for an endpoint. // // EndpointRequest is a required field EndpointRequest *EndpointRequest `type:"structure" required:"true"` @@ -20361,7 +28649,7 @@ func (s *UpdateEndpointInput) SetEndpointRequest(v *EndpointRequest) *UpdateEndp type UpdateEndpointOutput struct { _ struct{} `type:"structure" payload:"MessageBody"` - // Simple message object. + // Provides information about an API request or response. // // MessageBody is a required field MessageBody *MessageBody `type:"structure" required:"true"` @@ -20389,7 +28677,8 @@ type UpdateEndpointsBatchInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Endpoint batch update request. + // Specifies a batch of endpoints to create or update and the settings and attributes + // to set or change for each endpoint. // // EndpointBatchRequest is a required field EndpointBatchRequest *EndpointBatchRequest `type:"structure" required:"true"` @@ -20417,6 +28706,11 @@ func (s *UpdateEndpointsBatchInput) Validate() error { if s.EndpointBatchRequest == nil { invalidParams.Add(request.NewErrParamRequired("EndpointBatchRequest")) } + if s.EndpointBatchRequest != nil { + if err := s.EndpointBatchRequest.Validate(); err != nil { + invalidParams.AddNested("EndpointBatchRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -20439,7 +28733,7 @@ func (s *UpdateEndpointsBatchInput) SetEndpointBatchRequest(v *EndpointBatchRequ type UpdateEndpointsBatchOutput struct { _ struct{} `type:"structure" payload:"MessageBody"` - // Simple message object. + // Provides information about an API request or response. // // MessageBody is a required field MessageBody *MessageBody `type:"structure" required:"true"` @@ -20467,7 +28761,9 @@ type UpdateGcmChannelInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Google Cloud Messaging credentials + // Specifies the status and settings of the GCM channel for an application. + // This channel enables Amazon Pinpoint to send push notifications through the + // Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service. // // GCMChannelRequest is a required field GCMChannelRequest *GCMChannelRequest `type:"structure" required:"true"` @@ -20495,6 +28791,11 @@ func (s *UpdateGcmChannelInput) Validate() error { if s.GCMChannelRequest == nil { invalidParams.Add(request.NewErrParamRequired("GCMChannelRequest")) } + if s.GCMChannelRequest != nil { + if err := s.GCMChannelRequest.Validate(); err != nil { + invalidParams.AddNested("GCMChannelRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -20517,7 +28818,10 @@ func (s *UpdateGcmChannelInput) SetGCMChannelRequest(v *GCMChannelRequest) *Upda type UpdateGcmChannelOutput struct { _ struct{} `type:"structure" payload:"GCMChannelResponse"` - // Google Cloud Messaging channel definition + // Provides information about the status and settings of the GCM channel for + // an application. The GCM channel enables Amazon Pinpoint to send push notifications + // through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging + // (GCM), service. // // GCMChannelResponse is a required field GCMChannelResponse *GCMChannelResponse `type:"structure" required:"true"` @@ -20539,6 +28843,278 @@ func (s *UpdateGcmChannelOutput) SetGCMChannelResponse(v *GCMChannelResponse) *U return s } +type UpdateJourneyInput struct { + _ struct{} `type:"structure" payload:"WriteJourneyRequest"` + + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + // JourneyId is a required field + JourneyId *string `location:"uri" locationName:"journey-id" type:"string" required:"true"` + + // Specifies the configuration and other settings for a journey. + // + // WriteJourneyRequest is a required field + WriteJourneyRequest *WriteJourneyRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateJourneyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateJourneyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateJourneyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateJourneyInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.JourneyId == nil { + invalidParams.Add(request.NewErrParamRequired("JourneyId")) + } + if s.JourneyId != nil && len(*s.JourneyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JourneyId", 1)) + } + if s.WriteJourneyRequest == nil { + invalidParams.Add(request.NewErrParamRequired("WriteJourneyRequest")) + } + if s.WriteJourneyRequest != nil { + if err := s.WriteJourneyRequest.Validate(); err != nil { + invalidParams.AddNested("WriteJourneyRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *UpdateJourneyInput) SetApplicationId(v string) *UpdateJourneyInput { + s.ApplicationId = &v + return s +} + +// SetJourneyId sets the JourneyId field's value. +func (s *UpdateJourneyInput) SetJourneyId(v string) *UpdateJourneyInput { + s.JourneyId = &v + return s +} + +// SetWriteJourneyRequest sets the WriteJourneyRequest field's value. +func (s *UpdateJourneyInput) SetWriteJourneyRequest(v *WriteJourneyRequest) *UpdateJourneyInput { + s.WriteJourneyRequest = v + return s +} + +type UpdateJourneyOutput struct { + _ struct{} `type:"structure" payload:"JourneyResponse"` + + // Provides information about the status, configuration, and other settings + // for a journey. + // + // JourneyResponse is a required field + JourneyResponse *JourneyResponse `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateJourneyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateJourneyOutput) GoString() string { + return s.String() +} + +// SetJourneyResponse sets the JourneyResponse field's value. +func (s *UpdateJourneyOutput) SetJourneyResponse(v *JourneyResponse) *UpdateJourneyOutput { + s.JourneyResponse = v + return s +} + +type UpdateJourneyStateInput struct { + _ struct{} `type:"structure" payload:"JourneyStateRequest"` + + // ApplicationId is a required field + ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` + + // JourneyId is a required field + JourneyId *string `location:"uri" locationName:"journey-id" type:"string" required:"true"` + + // Changes the status of a journey. + // + // JourneyStateRequest is a required field + JourneyStateRequest *JourneyStateRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateJourneyStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateJourneyStateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateJourneyStateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateJourneyStateInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.JourneyId == nil { + invalidParams.Add(request.NewErrParamRequired("JourneyId")) + } + if s.JourneyId != nil && len(*s.JourneyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JourneyId", 1)) + } + if s.JourneyStateRequest == nil { + invalidParams.Add(request.NewErrParamRequired("JourneyStateRequest")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *UpdateJourneyStateInput) SetApplicationId(v string) *UpdateJourneyStateInput { + s.ApplicationId = &v + return s +} + +// SetJourneyId sets the JourneyId field's value. +func (s *UpdateJourneyStateInput) SetJourneyId(v string) *UpdateJourneyStateInput { + s.JourneyId = &v + return s +} + +// SetJourneyStateRequest sets the JourneyStateRequest field's value. +func (s *UpdateJourneyStateInput) SetJourneyStateRequest(v *JourneyStateRequest) *UpdateJourneyStateInput { + s.JourneyStateRequest = v + return s +} + +type UpdateJourneyStateOutput struct { + _ struct{} `type:"structure" payload:"JourneyResponse"` + + // Provides information about the status, configuration, and other settings + // for a journey. + // + // JourneyResponse is a required field + JourneyResponse *JourneyResponse `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateJourneyStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateJourneyStateOutput) GoString() string { + return s.String() +} + +// SetJourneyResponse sets the JourneyResponse field's value. +func (s *UpdateJourneyStateOutput) SetJourneyResponse(v *JourneyResponse) *UpdateJourneyStateOutput { + s.JourneyResponse = v + return s +} + +type UpdatePushTemplateInput struct { + _ struct{} `type:"structure" payload:"PushNotificationTemplateRequest"` + + // Specifies the content and settings for a message template that can be used + // in messages that are sent through a push notification channel. + // + // PushNotificationTemplateRequest is a required field + PushNotificationTemplateRequest *PushNotificationTemplateRequest `type:"structure" required:"true"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdatePushTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePushTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePushTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdatePushTemplateInput"} + if s.PushNotificationTemplateRequest == nil { + invalidParams.Add(request.NewErrParamRequired("PushNotificationTemplateRequest")) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPushNotificationTemplateRequest sets the PushNotificationTemplateRequest field's value. +func (s *UpdatePushTemplateInput) SetPushNotificationTemplateRequest(v *PushNotificationTemplateRequest) *UpdatePushTemplateInput { + s.PushNotificationTemplateRequest = v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *UpdatePushTemplateInput) SetTemplateName(v string) *UpdatePushTemplateInput { + s.TemplateName = &v + return s +} + +type UpdatePushTemplateOutput struct { + _ struct{} `type:"structure" payload:"MessageBody"` + + // Provides information about an API request or response. + // + // MessageBody is a required field + MessageBody *MessageBody `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdatePushTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePushTemplateOutput) GoString() string { + return s.String() +} + +// SetMessageBody sets the MessageBody field's value. +func (s *UpdatePushTemplateOutput) SetMessageBody(v *MessageBody) *UpdatePushTemplateOutput { + s.MessageBody = v + return s +} + type UpdateSegmentInput struct { _ struct{} `type:"structure" payload:"WriteSegmentRequest"` @@ -20548,7 +29124,9 @@ type UpdateSegmentInput struct { // SegmentId is a required field SegmentId *string `location:"uri" locationName:"segment-id" type:"string" required:"true"` - // Segment definition. + // Specifies the configuration, dimension, and other settings for a segment. + // A WriteSegmentRequest object can include a Dimensions object or a SegmentGroups + // object, but not both. // // WriteSegmentRequest is a required field WriteSegmentRequest *WriteSegmentRequest `type:"structure" required:"true"` @@ -20582,6 +29160,11 @@ func (s *UpdateSegmentInput) Validate() error { if s.WriteSegmentRequest == nil { invalidParams.Add(request.NewErrParamRequired("WriteSegmentRequest")) } + if s.WriteSegmentRequest != nil { + if err := s.WriteSegmentRequest.Validate(); err != nil { + invalidParams.AddNested("WriteSegmentRequest", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -20610,7 +29193,8 @@ func (s *UpdateSegmentInput) SetWriteSegmentRequest(v *WriteSegmentRequest) *Upd type UpdateSegmentOutput struct { _ struct{} `type:"structure" payload:"SegmentResponse"` - // Segment definition. + // Provides information about the configuration, dimension, and other settings + // for a segment. // // SegmentResponse is a required field SegmentResponse *SegmentResponse `type:"structure" required:"true"` @@ -20638,33 +29222,113 @@ type UpdateSmsChannelInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // SMS Channel Request + // Specifies the status and settings of the SMS channel for an application. + // + // SMSChannelRequest is a required field + SMSChannelRequest *SMSChannelRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateSmsChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSmsChannelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSmsChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSmsChannelInput"} + if s.ApplicationId == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationId")) + } + if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + } + if s.SMSChannelRequest == nil { + invalidParams.Add(request.NewErrParamRequired("SMSChannelRequest")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplicationId sets the ApplicationId field's value. +func (s *UpdateSmsChannelInput) SetApplicationId(v string) *UpdateSmsChannelInput { + s.ApplicationId = &v + return s +} + +// SetSMSChannelRequest sets the SMSChannelRequest field's value. +func (s *UpdateSmsChannelInput) SetSMSChannelRequest(v *SMSChannelRequest) *UpdateSmsChannelInput { + s.SMSChannelRequest = v + return s +} + +type UpdateSmsChannelOutput struct { + _ struct{} `type:"structure" payload:"SMSChannelResponse"` + + // Provides information about the status and settings of the SMS channel for + // an application. + // + // SMSChannelResponse is a required field + SMSChannelResponse *SMSChannelResponse `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateSmsChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSmsChannelOutput) GoString() string { + return s.String() +} + +// SetSMSChannelResponse sets the SMSChannelResponse field's value. +func (s *UpdateSmsChannelOutput) SetSMSChannelResponse(v *SMSChannelResponse) *UpdateSmsChannelOutput { + s.SMSChannelResponse = v + return s +} + +type UpdateSmsTemplateInput struct { + _ struct{} `type:"structure" payload:"SMSTemplateRequest"` + + // Specifies the content and settings for a message template that can be used + // in text messages that are sent through the SMS channel. // - // SMSChannelRequest is a required field - SMSChannelRequest *SMSChannelRequest `type:"structure" required:"true"` + // SMSTemplateRequest is a required field + SMSTemplateRequest *SMSTemplateRequest `type:"structure" required:"true"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` } // String returns the string representation -func (s UpdateSmsChannelInput) String() string { +func (s UpdateSmsTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateSmsChannelInput) GoString() string { +func (s UpdateSmsTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateSmsChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateSmsChannelInput"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) +func (s *UpdateSmsTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSmsTemplateInput"} + if s.SMSTemplateRequest == nil { + invalidParams.Add(request.NewErrParamRequired("SMSTemplateRequest")) } - if s.ApplicationId != nil && len(*s.ApplicationId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApplicationId", 1)) + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) } - if s.SMSChannelRequest == nil { - invalidParams.Add(request.NewErrParamRequired("SMSChannelRequest")) + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) } if invalidParams.Len() > 0 { @@ -20673,40 +29337,40 @@ func (s *UpdateSmsChannelInput) Validate() error { return nil } -// SetApplicationId sets the ApplicationId field's value. -func (s *UpdateSmsChannelInput) SetApplicationId(v string) *UpdateSmsChannelInput { - s.ApplicationId = &v +// SetSMSTemplateRequest sets the SMSTemplateRequest field's value. +func (s *UpdateSmsTemplateInput) SetSMSTemplateRequest(v *SMSTemplateRequest) *UpdateSmsTemplateInput { + s.SMSTemplateRequest = v return s } -// SetSMSChannelRequest sets the SMSChannelRequest field's value. -func (s *UpdateSmsChannelInput) SetSMSChannelRequest(v *SMSChannelRequest) *UpdateSmsChannelInput { - s.SMSChannelRequest = v +// SetTemplateName sets the TemplateName field's value. +func (s *UpdateSmsTemplateInput) SetTemplateName(v string) *UpdateSmsTemplateInput { + s.TemplateName = &v return s } -type UpdateSmsChannelOutput struct { - _ struct{} `type:"structure" payload:"SMSChannelResponse"` +type UpdateSmsTemplateOutput struct { + _ struct{} `type:"structure" payload:"MessageBody"` - // SMS Channel Response. + // Provides information about an API request or response. // - // SMSChannelResponse is a required field - SMSChannelResponse *SMSChannelResponse `type:"structure" required:"true"` + // MessageBody is a required field + MessageBody *MessageBody `type:"structure" required:"true"` } // String returns the string representation -func (s UpdateSmsChannelOutput) String() string { +func (s UpdateSmsTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateSmsChannelOutput) GoString() string { +func (s UpdateSmsTemplateOutput) GoString() string { return s.String() } -// SetSMSChannelResponse sets the SMSChannelResponse field's value. -func (s *UpdateSmsChannelOutput) SetSMSChannelResponse(v *SMSChannelResponse) *UpdateSmsChannelOutput { - s.SMSChannelResponse = v +// SetMessageBody sets the MessageBody field's value. +func (s *UpdateSmsTemplateOutput) SetMessageBody(v *MessageBody) *UpdateSmsTemplateOutput { + s.MessageBody = v return s } @@ -20716,7 +29380,7 @@ type UpdateVoiceChannelInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Voice Channel Request + // Specifies the status and settings of the voice channel for an application. // // VoiceChannelRequest is a required field VoiceChannelRequest *VoiceChannelRequest `type:"structure" required:"true"` @@ -20766,7 +29430,8 @@ func (s *UpdateVoiceChannelInput) SetVoiceChannelRequest(v *VoiceChannelRequest) type UpdateVoiceChannelOutput struct { _ struct{} `type:"structure" payload:"VoiceChannelResponse"` - // Voice Channel Response. + // Provides information about the status and settings of the voice channel for + // an application. // // VoiceChannelResponse is a required field VoiceChannelResponse *VoiceChannelResponse `type:"structure" required:"true"` @@ -20788,11 +29453,11 @@ func (s *UpdateVoiceChannelOutput) SetVoiceChannelResponse(v *VoiceChannelRespon return s } -// Voice Channel Request +// Specifies the status and settings of the voice channel for an application. type VoiceChannelRequest struct { _ struct{} `type:"structure"` - // If the channel is enabled for sending messages. + // Specifies whether to enable the voice channel for the application. Enabled *bool `type:"boolean"` } @@ -20812,37 +29477,44 @@ func (s *VoiceChannelRequest) SetEnabled(v bool) *VoiceChannelRequest { return s } -// Voice Channel Response. +// Provides information about the status and settings of the voice channel for +// an application. type VoiceChannelResponse struct { _ struct{} `type:"structure"` - // Application id + // The unique identifier for the application that the voice channel applies + // to. ApplicationId *string `type:"string"` - // The date that the settings were last updated in ISO 8601 format. + // The date and time, in ISO 8601 format, when the voice channel was enabled. CreationDate *string `type:"string"` - // If the channel is enabled for sending messages. + // Specifies whether the voice channel is enabled for the application. Enabled *bool `type:"boolean"` + // (Not used) This property is retained only for backward compatibility. HasCredential *bool `type:"boolean"` - // Channel ID. Not used, only for backwards compatibility. + // (Deprecated) An identifier for the voice channel. This property is retained + // only for backward compatibility. Id *string `type:"string"` - // Is this channel archived + // Specifies whether the voice channel is archived. IsArchived *bool `type:"boolean"` - // Who made the last change + // The user who last modified the voice channel. LastModifiedBy *string `type:"string"` - // Last date this was updated + // The date and time, in ISO 8601 format, when the voice channel was last modified. LastModifiedDate *string `type:"string"` - // Platform type. Will be "Voice" - Platform *string `type:"string"` + // The type of messaging or notification platform for the channel. For the voice + // channel, this value is VOICE. + // + // Platform is a required field + Platform *string `type:"string" required:"true"` - // Version of channel + // The current version of the voice channel. Version *int64 `type:"integer"` } @@ -20916,23 +29588,30 @@ func (s *VoiceChannelResponse) SetVersion(v int64) *VoiceChannelResponse { return s } -// Voice Message. +// Specifies the settings for a one-time voice message that's sent directly +// to an endpoint through the voice channel. type VoiceMessage struct { _ struct{} `type:"structure"` - // The message body of the notification, the email body or the text message. + // The text script for the voice message. Body *string `type:"string"` - // Language of sent message + // The language to use when delivering the message. For a list of supported + // languages, see the Amazon Polly Developer Guide (AmazonPollyDG.html). LanguageCode *string `type:"string"` - // Is the number from the pool or messaging service to send from. + // The long code to send the voice message from. This value should be one of + // the dedicated long codes that's assigned to your AWS account. Although it + // isn't required, we recommend that you specify the long code in E.164 format, + // for example +12065550100, to ensure prompt and accurate delivery of the message. OriginationNumber *string `type:"string"` - // Default message substitutions. Can be overridden by individual address substitutions. + // The default message variables to use in the voice message. You can override + // the default variables with individual address variables. Substitutions map[string][]*string `type:"map"` - // Voice ID of sent message. + // The name of the voice to use when delivering the message. For a list of supported + // voices, see the Amazon Polly Developer Guide (AmazonPollyDG.html). VoiceId *string `type:"string"` } @@ -20976,35 +29655,121 @@ func (s *VoiceMessage) SetVoiceId(v string) *VoiceMessage { return s } -// Creating application setting request +// Specifies the settings for a wait activity in a journey. This type of activity +// waits for a certain amount of time or until a specific date and time before +// moving participants to the next activity in a journey. +type WaitActivity struct { + _ struct{} `type:"structure"` + + // The unique identifier for the next activity to perform, after performing + // the wait activity. + NextActivity *string `type:"string"` + + // The amount of time to wait or the date and time when the activity moves participants + // to the next activity in the journey. + WaitTime *WaitTime `type:"structure"` +} + +// String returns the string representation +func (s WaitActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WaitActivity) GoString() string { + return s.String() +} + +// SetNextActivity sets the NextActivity field's value. +func (s *WaitActivity) SetNextActivity(v string) *WaitActivity { + s.NextActivity = &v + return s +} + +// SetWaitTime sets the WaitTime field's value. +func (s *WaitActivity) SetWaitTime(v *WaitTime) *WaitActivity { + s.WaitTime = v + return s +} + +// Specifies a duration or a date and time that indicates when Amazon Pinpoint +// determines whether an activity's conditions have been met or an activity +// moves participants to the next activity in a journey. +type WaitTime struct { + _ struct{} `type:"structure"` + + // The amount of time, as a duration in ISO 8601 format, to wait before determining + // whether the activity's conditions have been met or moving participants to + // the next activity in the journey. + WaitFor *string `type:"string"` + + // The date and time, in ISO 8601 format, when Amazon Pinpoint determines whether + // the activity's conditions have been met or the activity moves participants + // to the next activity in the journey. + WaitUntil *string `type:"string"` +} + +// String returns the string representation +func (s WaitTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WaitTime) GoString() string { + return s.String() +} + +// SetWaitFor sets the WaitFor field's value. +func (s *WaitTime) SetWaitFor(v string) *WaitTime { + s.WaitFor = &v + return s +} + +// SetWaitUntil sets the WaitUntil field's value. +func (s *WaitTime) SetWaitUntil(v string) *WaitTime { + s.WaitUntil = &v + return s +} + +// Specifies the default settings for an application. type WriteApplicationSettingsRequest struct { _ struct{} `type:"structure"` - // Default campaign hook information. + // The settings for the AWS Lambda function to use by default as a code hook + // for campaigns in the application. To override these settings for a specific + // campaign, use the Campaign resource to define custom Lambda function settings + // for the campaign. CampaignHook *CampaignHook `type:"structure"` - // The CloudWatchMetrics settings for the app. + // Specifies whether to enable application-related alarms in Amazon CloudWatch. CloudWatchMetricsEnabled *bool `type:"boolean"` - // The limits that apply to each campaign in the project by default. Campaigns - // can also have their own limits, which override the settings at the project - // level. + // The default sending limits for campaigns in the application. To override + // these limits for a specific campaign, use the Campaign resource to define + // custom limits for the campaign. Limits *CampaignLimits `type:"structure"` - // The default quiet time for the app. Campaigns in the app don't send messages - // to endpoints during the quiet time.Note: Make sure that your endpoints include - // the Demographics.Timezone attribute if you plan to enable a quiet time for - // your app. If your endpoints don't include this attribute, they'll receive - // the messages that you send them, even if quiet time is enabled.When you set - // up an app to use quiet time, campaigns in that app don't send messages during - // the time range you specified, as long as all of the following are true:- - // The endpoint includes a valid Demographic.Timezone attribute.- The current - // time in the endpoint's time zone is later than or equal to the time specified - // in the QuietTime.Start attribute for the app (or campaign, if applicable).- - // The current time in the endpoint's time zone is earlier than or equal to - // the time specified in the QuietTime.End attribute for the app (or campaign, - // if applicable).Individual campaigns within the app can have their own quiet - // time settings, which override the quiet time settings at the app level. + // The default quiet time for campaigns and journeys in the application. Quiet + // time is a specific time range when messages aren't sent to endpoints, if + // all the following conditions are met: + // + // * The EndpointDemographic.Timezone property of the endpoint is set to + // a valid value. + // + // * The current time in the endpoint's time zone is later than or equal + // to the time specified by the QuietTime.Start property for the application + // (or a campaign or journey that has custom quiet time settings). + // + // * The current time in the endpoint's time zone is earlier than or equal + // to the time specified by the QuietTime.End property for the application + // (or a campaign or journey that has custom quiet time settings). + // + // If any of the preceding conditions isn't met, the endpoint will receive messages + // from a campaign or journey, even if quiet time is enabled. + // + // To override the default quiet time settings for a specific campaign or journey, + // use the Campaign resource or the Journey resource to define a custom quiet + // time for the campaign or journey. QuietTime *QuietTime `type:"structure"` } @@ -21042,52 +29807,58 @@ func (s *WriteApplicationSettingsRequest) SetQuietTime(v *QuietTime) *WriteAppli return s } -// Used to create a campaign. +// Specifies the configuration and other settings for a campaign. type WriteCampaignRequest struct { _ struct{} `type:"structure"` - // Treatments that are defined in addition to the default treatment. + // An array of requests that defines additional treatments for the campaign, + // in addition to the default treatment for the campaign. AdditionalTreatments []*WriteTreatmentResource `type:"list"` - // A description of the campaign. + // The custom description of the campaign. Description *string `type:"string"` - // The allocated percentage of end users who will not receive messages from - // this campaign. + // The allocated percentage of users (segment members) who shouldn't receive + // messages from the campaign. HoldoutPercent *int64 `type:"integer"` - // Campaign hook information. + // The settings for the AWS Lambda function to use as a code hook for the campaign. Hook *CampaignHook `type:"structure"` - // Indicates whether the campaign is paused. A paused campaign does not send - // messages unless you resume it by setting IsPaused to false. + // Specifies whether to pause the campaign. A paused campaign doesn't run unless + // you resume it by setting this value to false. IsPaused *bool `type:"boolean"` - // The campaign limits settings. + // The messaging limits for the campaign. Limits *CampaignLimits `type:"structure"` - // The message configuration settings. + // The message configuration settings for the campaign. MessageConfiguration *MessageConfiguration `type:"structure"` // The custom name of the campaign. Name *string `type:"string"` - // The campaign schedule. + // The schedule settings for the campaign. Schedule *Schedule `type:"structure"` - // The ID of the segment to which the campaign sends messages. + // The unique identifier for the segment to associate with the campaign. SegmentId *string `type:"string"` - // The version of the segment to which the campaign sends messages. + // The version of the segment to associate with the campaign. SegmentVersion *int64 `type:"integer"` - // The Tags for the campaign. + // A string-to-string map of key-value pairs that defines the tags to associate + // with the campaign. Each tag consists of a required tag key and an associated + // tag value. Tags map[string]*string `locationName:"tags" type:"map"` - // A custom description for the treatment. + // The message template to use for the campaign. + TemplateConfiguration *TemplateConfiguration `type:"structure"` + + // The custom description of a variation of the campaign to use for A/B testing. TreatmentDescription *string `type:"string"` - // The custom name of a variation of the campaign used for A/B testing. + // The custom name of a variation of the campaign to use for A/B testing. TreatmentName *string `type:"string"` } @@ -21101,6 +29872,36 @@ func (s WriteCampaignRequest) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteCampaignRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteCampaignRequest"} + if s.AdditionalTreatments != nil { + for i, v := range s.AdditionalTreatments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalTreatments", i), err.(request.ErrInvalidParams)) + } + } + } + if s.MessageConfiguration != nil { + if err := s.MessageConfiguration.Validate(); err != nil { + invalidParams.AddNested("MessageConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAdditionalTreatments sets the AdditionalTreatments field's value. func (s *WriteCampaignRequest) SetAdditionalTreatments(v []*WriteTreatmentResource) *WriteCampaignRequest { s.AdditionalTreatments = v @@ -21173,6 +29974,12 @@ func (s *WriteCampaignRequest) SetTags(v map[string]*string) *WriteCampaignReque return s } +// SetTemplateConfiguration sets the TemplateConfiguration field's value. +func (s *WriteCampaignRequest) SetTemplateConfiguration(v *TemplateConfiguration) *WriteCampaignRequest { + s.TemplateConfiguration = v + return s +} + // SetTreatmentDescription sets the TreatmentDescription field's value. func (s *WriteCampaignRequest) SetTreatmentDescription(v string) *WriteCampaignRequest { s.TreatmentDescription = &v @@ -21185,18 +29992,28 @@ func (s *WriteCampaignRequest) SetTreatmentName(v string) *WriteCampaignRequest return s } -// Request to save an EventStream. +// Specifies the Amazon Resource Name (ARN) of an event stream to publish events +// to and the AWS Identity and Access Management (IAM) role to use when publishing +// those events. type WriteEventStream struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon Kinesis stream or Firehose delivery - // stream to which you want to publish events. Firehose ARN: arn:aws:firehose:REGION:ACCOUNT_ID:deliverystream/STREAM_NAME - // Kinesis ARN: arn:aws:kinesis:REGION:ACCOUNT_ID:stream/STREAM_NAME - DestinationStreamArn *string `type:"string"` + // The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon + // Kinesis Data Firehose delivery stream that you want to publish event data + // to. + // + // For a Kinesis data stream, the ARN format is: arn:aws:kinesis:region:account-id:stream/stream_name + // + // For a Kinesis Data Firehose delivery stream, the ARN format is: arn:aws:firehose:region:account-id:deliverystream/stream_name + // + // DestinationStreamArn is a required field + DestinationStreamArn *string `type:"string" required:"true"` - // The IAM role that authorizes Amazon Pinpoint to publish events to the stream - // in your account. - RoleArn *string `type:"string"` + // The AWS Identity and Access Management (IAM) role that authorizes Amazon + // Pinpoint to publish event data to the stream in your AWS account. + // + // RoleArn is a required field + RoleArn *string `type:"string" required:"true"` } // String returns the string representation @@ -21209,6 +30026,22 @@ func (s WriteEventStream) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteEventStream) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteEventStream"} + if s.DestinationStreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationStreamArn")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDestinationStreamArn sets the DestinationStreamArn field's value. func (s *WriteEventStream) SetDestinationStreamArn(v string) *WriteEventStream { s.DestinationStreamArn = &v @@ -21221,23 +30054,210 @@ func (s *WriteEventStream) SetRoleArn(v string) *WriteEventStream { return s } -// Segment definition. +// Specifies the configuration and other settings for a journey. +type WriteJourneyRequest struct { + _ struct{} `type:"structure"` + + // The configuration and other settings for the activities that comprise the + // journey. + Activities map[string]*Activity `type:"map"` + + // The date, in ISO 8601 format, when the journey was created. + CreationDate *string `type:"string"` + + // The date, in ISO 8601 format, when the journey was last modified. + LastModifiedDate *string `type:"string"` + + // The messaging and entry limits for the journey. + Limits *JourneyLimits `type:"structure"` + + // Specifies whether the journey's scheduled start and end times use each participant's + // local time. To base the schedule on each participant's local time, set this + // value to true. + LocalTime *bool `type:"boolean"` + + // The name of the journey. A journey name can contain a maximum of 150 characters. + // The characters can be alphanumeric characters or symbols, such as underscores + // (_) or hyphens (-). A journey name can't contain any spaces. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The quiet time settings for the journey. Quiet time is a specific time range + // when a journey doesn't send messages to participants, if all the following + // conditions are met: + // + // * The EndpointDemographic.Timezone property of the endpoint for the participant + // is set to a valid value. + // + // * The current time in the participant's time zone is later than or equal + // to the time specified by the QuietTime.Start property for the journey. + // + // * The current time in the participant's time zone is earlier than or equal + // to the time specified by the QuietTime.End property for the journey. + // + // If any of the preceding conditions isn't met, the participant will receive + // messages from the journey, even if quiet time is enabled. + QuietTime *QuietTime `type:"structure"` + + // The frequency with which Amazon Pinpoint evaluates segment and event data + // for the journey, as a duration in ISO 8601 format. + RefreshFrequency *string `type:"string"` + + // The schedule settings for the journey. + Schedule *JourneySchedule `type:"structure"` + + // The unique identifier for the first activity in the journey. + StartActivity *string `type:"string"` + + // The segment that defines which users are participants in the journey. + StartCondition *StartCondition `type:"structure"` + + // The status of the journey. Valid values are: + // + // * DRAFT - Saves the journey and doesn't publish it. + // + // * ACTIVE - Saves and publishes the journey. Depending on the journey's + // schedule, the journey starts running immediately or at the scheduled start + // time. If a journey's status is ACTIVE, you can't add, change, or remove + // activities from it. + // + // The CANCELLED, COMPLETED, and CLOSED values are not supported in requests + // to create or update a journey. To cancel a journey, use the Journey State + // resource. + State *string `type:"string" enum:"State"` +} + +// String returns the string representation +func (s WriteJourneyRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WriteJourneyRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteJourneyRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteJourneyRequest"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Activities != nil { + for i, v := range s.Activities { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Activities", i), err.(request.ErrInvalidParams)) + } + } + } + if s.StartCondition != nil { + if err := s.StartCondition.Validate(); err != nil { + invalidParams.AddNested("StartCondition", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActivities sets the Activities field's value. +func (s *WriteJourneyRequest) SetActivities(v map[string]*Activity) *WriteJourneyRequest { + s.Activities = v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *WriteJourneyRequest) SetCreationDate(v string) *WriteJourneyRequest { + s.CreationDate = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *WriteJourneyRequest) SetLastModifiedDate(v string) *WriteJourneyRequest { + s.LastModifiedDate = &v + return s +} + +// SetLimits sets the Limits field's value. +func (s *WriteJourneyRequest) SetLimits(v *JourneyLimits) *WriteJourneyRequest { + s.Limits = v + return s +} + +// SetLocalTime sets the LocalTime field's value. +func (s *WriteJourneyRequest) SetLocalTime(v bool) *WriteJourneyRequest { + s.LocalTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *WriteJourneyRequest) SetName(v string) *WriteJourneyRequest { + s.Name = &v + return s +} + +// SetQuietTime sets the QuietTime field's value. +func (s *WriteJourneyRequest) SetQuietTime(v *QuietTime) *WriteJourneyRequest { + s.QuietTime = v + return s +} + +// SetRefreshFrequency sets the RefreshFrequency field's value. +func (s *WriteJourneyRequest) SetRefreshFrequency(v string) *WriteJourneyRequest { + s.RefreshFrequency = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *WriteJourneyRequest) SetSchedule(v *JourneySchedule) *WriteJourneyRequest { + s.Schedule = v + return s +} + +// SetStartActivity sets the StartActivity field's value. +func (s *WriteJourneyRequest) SetStartActivity(v string) *WriteJourneyRequest { + s.StartActivity = &v + return s +} + +// SetStartCondition sets the StartCondition field's value. +func (s *WriteJourneyRequest) SetStartCondition(v *StartCondition) *WriteJourneyRequest { + s.StartCondition = v + return s +} + +// SetState sets the State field's value. +func (s *WriteJourneyRequest) SetState(v string) *WriteJourneyRequest { + s.State = &v + return s +} + +// Specifies the configuration, dimension, and other settings for a segment. +// A WriteSegmentRequest object can include a Dimensions object or a SegmentGroups +// object, but not both. type WriteSegmentRequest struct { _ struct{} `type:"structure"` - // The segment dimensions attributes. + // The criteria that define the dimensions for the segment. Dimensions *SegmentDimensions `type:"structure"` - // The name of segment + // The name of the segment. Name *string `type:"string"` - // A segment group, which consists of zero or more source segments, plus dimensions - // that are applied to those source segments. Your request can only include - // one segment group. Your request can include either a SegmentGroups object - // or a Dimensions object, but not both. + // The segment group to use and the dimensions to apply to the group's base + // segments in order to build the segment. A segment group can consist of zero + // or more base segments. Your request can include only one segment group. SegmentGroups *SegmentGroupList `type:"structure"` - // The Tags for the segments. + // A string-to-string map of key-value pairs that defines the tags to associate + // with the segment. Each tag consists of a required tag key and an associated + // tag value. Tags map[string]*string `locationName:"tags" type:"map"` } @@ -21251,6 +30271,26 @@ func (s WriteSegmentRequest) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteSegmentRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteSegmentRequest"} + if s.Dimensions != nil { + if err := s.Dimensions.Validate(); err != nil { + invalidParams.AddNested("Dimensions", err.(request.ErrInvalidParams)) + } + } + if s.SegmentGroups != nil { + if err := s.SegmentGroups.Validate(); err != nil { + invalidParams.AddNested("SegmentGroups", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDimensions sets the Dimensions field's value. func (s *WriteSegmentRequest) SetDimensions(v *SegmentDimensions) *WriteSegmentRequest { s.Dimensions = v @@ -21275,23 +30315,31 @@ func (s *WriteSegmentRequest) SetTags(v map[string]*string) *WriteSegmentRequest return s } -// Used to create a campaign treatment. +// Specifies the settings for a campaign treatment. A treatment is a variation +// of a campaign that's used for A/B testing of a campaign. type WriteTreatmentResource struct { _ struct{} `type:"structure"` - // The message configuration settings. + // The message configuration settings for the treatment. MessageConfiguration *MessageConfiguration `type:"structure"` - // The campaign schedule. + // The schedule settings for the treatment. Schedule *Schedule `type:"structure"` - // The allocated percentage of users for this treatment. - SizePercent *int64 `type:"integer"` + // The allocated percentage of users (segment members) to send the treatment + // to. + // + // SizePercent is a required field + SizePercent *int64 `type:"integer" required:"true"` + + // The message template to use for the treatment. + TemplateConfiguration *TemplateConfiguration `type:"structure"` - // A custom description for the treatment. + // The custom description of the treatment. TreatmentDescription *string `type:"string"` - // The custom name of a variation of the campaign used for A/B testing. + // The custom name of the treatment. A treatment is a variation of a campaign + // that's used for A/B testing of a campaign. TreatmentName *string `type:"string"` } @@ -21305,6 +30353,29 @@ func (s WriteTreatmentResource) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteTreatmentResource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteTreatmentResource"} + if s.SizePercent == nil { + invalidParams.Add(request.NewErrParamRequired("SizePercent")) + } + if s.MessageConfiguration != nil { + if err := s.MessageConfiguration.Validate(); err != nil { + invalidParams.AddNested("MessageConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetMessageConfiguration sets the MessageConfiguration field's value. func (s *WriteTreatmentResource) SetMessageConfiguration(v *MessageConfiguration) *WriteTreatmentResource { s.MessageConfiguration = v @@ -21323,6 +30394,12 @@ func (s *WriteTreatmentResource) SetSizePercent(v int64) *WriteTreatmentResource return s } +// SetTemplateConfiguration sets the TemplateConfiguration field's value. +func (s *WriteTreatmentResource) SetTemplateConfiguration(v *TemplateConfiguration) *WriteTreatmentResource { + s.TemplateConfiguration = v + return s +} + // SetTreatmentDescription sets the TreatmentDescription field's value. func (s *WriteTreatmentResource) SetTreatmentDescription(v string) *WriteTreatmentResource { s.TreatmentDescription = &v @@ -21505,12 +30582,18 @@ const ( // JobStatusCreated is a JobStatus enum value JobStatusCreated = "CREATED" + // JobStatusPreparingForInitialization is a JobStatus enum value + JobStatusPreparingForInitialization = "PREPARING_FOR_INITIALIZATION" + // JobStatusInitializing is a JobStatus enum value JobStatusInitializing = "INITIALIZING" // JobStatusProcessing is a JobStatus enum value JobStatusProcessing = "PROCESSING" + // JobStatusPendingJob is a JobStatus enum value + JobStatusPendingJob = "PENDING_JOB" + // JobStatusCompleting is a JobStatus enum value JobStatusCompleting = "COMPLETING" @@ -21540,6 +30623,14 @@ const ( ModeFilter = "FILTER" ) +const ( + // OperatorAll is a Operator enum value + OperatorAll = "ALL" + + // OperatorAny is a Operator enum value + OperatorAny = "ANY" +) + const ( // RecencyTypeActive is a RecencyType enum value RecencyTypeActive = "ACTIVE" @@ -21567,6 +30658,37 @@ const ( SourceTypeNone = "NONE" ) +const ( + // StateDraft is a State enum value + StateDraft = "DRAFT" + + // StateActive is a State enum value + StateActive = "ACTIVE" + + // StateCompleted is a State enum value + StateCompleted = "COMPLETED" + + // StateCancelled is a State enum value + StateCancelled = "CANCELLED" + + // StateClosed is a State enum value + StateClosed = "CLOSED" +) + +const ( + // TemplateTypeEmail is a TemplateType enum value + TemplateTypeEmail = "EMAIL" + + // TemplateTypeSms is a TemplateType enum value + TemplateTypeSms = "SMS" + + // TemplateTypeVoice is a TemplateType enum value + TemplateTypeVoice = "VOICE" + + // TemplateTypePush is a TemplateType enum value + TemplateTypePush = "PUSH" +) + const ( // TypeAll is a Type enum value TypeAll = "ALL" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/doc.go index f41551af66e..93d640dfb7c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/doc.go @@ -3,6 +3,8 @@ // Package pinpoint provides the client and types for making API // requests to Amazon Pinpoint. // +// Doc Engage API - Amazon Pinpoint API +// // See https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01 for more information on this service. // // See pinpoint package documentation for more information. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/errors.go index 7bd7525b6a6..e70fd0b8bf9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/errors.go @@ -7,36 +7,36 @@ const ( // ErrCodeBadRequestException for service response error code // "BadRequestException". // - // Simple message object. + // Provides information about an API request or response. ErrCodeBadRequestException = "BadRequestException" // ErrCodeForbiddenException for service response error code // "ForbiddenException". // - // Simple message object. + // Provides information about an API request or response. ErrCodeForbiddenException = "ForbiddenException" // ErrCodeInternalServerErrorException for service response error code // "InternalServerErrorException". // - // Simple message object. + // Provides information about an API request or response. ErrCodeInternalServerErrorException = "InternalServerErrorException" // ErrCodeMethodNotAllowedException for service response error code // "MethodNotAllowedException". // - // Simple message object. + // Provides information about an API request or response. ErrCodeMethodNotAllowedException = "MethodNotAllowedException" // ErrCodeNotFoundException for service response error code // "NotFoundException". // - // Simple message object. + // Provides information about an API request or response. ErrCodeNotFoundException = "NotFoundException" // ErrCodeTooManyRequestsException for service response error code // "TooManyRequestsException". // - // Simple message object. + // Provides information about an API request or response. ErrCodeTooManyRequestsException = "TooManyRequestsException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go index 487964c89cc..2a27113746c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Pinpoint { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mobiletargeting" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Pinpoint { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Pinpoint { svc := &Pinpoint{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-12-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/api.go index 395d8c6e50d..e85726fe6f9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/api.go @@ -124,7 +124,7 @@ func (c *Pricing) DescribeServicesWithContext(ctx aws.Context, input *DescribeSe // // Example iterating over at most 3 pages of a DescribeServices operation. // pageNum := 0 // err := client.DescribeServicesPages(params, -// func(page *DescribeServicesOutput, lastPage bool) bool { +// func(page *pricing.DescribeServicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -156,10 +156,12 @@ func (c *Pricing) DescribeServicesPagesWithContext(ctx aws.Context, input *Descr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeServicesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeServicesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -275,7 +277,7 @@ func (c *Pricing) GetAttributeValuesWithContext(ctx aws.Context, input *GetAttri // // Example iterating over at most 3 pages of a GetAttributeValues operation. // pageNum := 0 // err := client.GetAttributeValuesPages(params, -// func(page *GetAttributeValuesOutput, lastPage bool) bool { +// func(page *pricing.GetAttributeValuesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -307,10 +309,12 @@ func (c *Pricing) GetAttributeValuesPagesWithContext(ctx aws.Context, input *Get }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetAttributeValuesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetAttributeValuesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -423,7 +427,7 @@ func (c *Pricing) GetProductsWithContext(ctx aws.Context, input *GetProductsInpu // // Example iterating over at most 3 pages of a GetProducts operation. // pageNum := 0 // err := client.GetProductsPages(params, -// func(page *GetProductsOutput, lastPage bool) bool { +// func(page *pricing.GetProductsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -455,15 +459,17 @@ func (c *Pricing) GetProductsPagesWithContext(ctx aws.Context, input *GetProduct }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetProductsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetProductsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } // The values of a given attribute, such as Throughput Optimized HDD or Provisioned -// IOPS for the Amazon EC2volumeType attribute. +// IOPS for the Amazon EC2 volumeType attribute. type AttributeValue struct { _ struct{} `type:"structure"` @@ -758,7 +764,7 @@ type GetAttributeValuesOutput struct { _ struct{} `type:"structure"` // The list of values for an attribute. For example, Throughput Optimized HDD - // and Provisioned IOPS are two available values for the AmazonEC2volumeType. + // and Provisioned IOPS are two available values for the AmazonEC2 volumeType. AttributeValues []*AttributeValue `type:"list"` // The pagination token that indicates the next set of results to retrieve. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/doc.go index 0555bc4c3cb..50e997ecdd6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/doc.go @@ -18,7 +18,7 @@ // you can use GetAttributeValues to see what values are available for an attribute. // With the service code and an attribute name and value, you can use GetProducts // to find specific products that you're interested in, such as an AmazonEC2 -// instance, with a Provisioned IOPSvolumeType. +// instance, with a Provisioned IOPS volumeType. // // Service Endpoint // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go index 90ff33d0a08..90f54acd02f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Pricing { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "pricing" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Pricing { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Pricing { svc := &Pricing{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-15", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/api.go new file mode 100644 index 00000000000..a332e1d761c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/api.go @@ -0,0 +1,3364 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package qldb + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opCreateLedger = "CreateLedger" + +// CreateLedgerRequest generates a "aws/request.Request" representing the +// client's request for the CreateLedger operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLedger for more information on using the CreateLedger +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLedgerRequest method. +// req, resp := client.CreateLedgerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/CreateLedger +func (c *QLDB) CreateLedgerRequest(input *CreateLedgerInput) (req *request.Request, output *CreateLedgerOutput) { + op := &request.Operation{ + Name: opCreateLedger, + HTTPMethod: "POST", + HTTPPath: "/ledgers", + } + + if input == nil { + input = &CreateLedgerInput{} + } + + output = &CreateLedgerOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLedger API operation for Amazon QLDB. +// +// Creates a new ledger in your AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation CreateLedger for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// You have reached the limit on the maximum number of resources allowed. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource can't be modified at this time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/CreateLedger +func (c *QLDB) CreateLedger(input *CreateLedgerInput) (*CreateLedgerOutput, error) { + req, out := c.CreateLedgerRequest(input) + return out, req.Send() +} + +// CreateLedgerWithContext is the same as CreateLedger with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLedger for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) CreateLedgerWithContext(ctx aws.Context, input *CreateLedgerInput, opts ...request.Option) (*CreateLedgerOutput, error) { + req, out := c.CreateLedgerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLedger = "DeleteLedger" + +// DeleteLedgerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLedger operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLedger for more information on using the DeleteLedger +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLedgerRequest method. +// req, resp := client.DeleteLedgerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/DeleteLedger +func (c *QLDB) DeleteLedgerRequest(input *DeleteLedgerInput) (req *request.Request, output *DeleteLedgerOutput) { + op := &request.Operation{ + Name: opDeleteLedger, + HTTPMethod: "DELETE", + HTTPPath: "/ledgers/{name}", + } + + if input == nil { + input = &DeleteLedgerInput{} + } + + output = &DeleteLedgerOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteLedger API operation for Amazon QLDB. +// +// Deletes a ledger and all of its contents. This action is irreversible. +// +// If deletion protection is enabled, you must first disable it before you can +// delete the ledger using the QLDB API or the AWS Command Line Interface (AWS +// CLI). You can disable it by calling the UpdateLedger operation to set the +// flag to false. The QLDB console disables deletion protection for you when +// you use it to delete a ledger. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation DeleteLedger for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource can't be modified at this time. +// +// * ErrCodeResourcePreconditionNotMetException "ResourcePreconditionNotMetException" +// The operation failed because a condition wasn't satisfied in advance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/DeleteLedger +func (c *QLDB) DeleteLedger(input *DeleteLedgerInput) (*DeleteLedgerOutput, error) { + req, out := c.DeleteLedgerRequest(input) + return out, req.Send() +} + +// DeleteLedgerWithContext is the same as DeleteLedger with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLedger for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) DeleteLedgerWithContext(ctx aws.Context, input *DeleteLedgerInput, opts ...request.Option) (*DeleteLedgerOutput, error) { + req, out := c.DeleteLedgerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeJournalS3Export = "DescribeJournalS3Export" + +// DescribeJournalS3ExportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeJournalS3Export operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeJournalS3Export for more information on using the DescribeJournalS3Export +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeJournalS3ExportRequest method. +// req, resp := client.DescribeJournalS3ExportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/DescribeJournalS3Export +func (c *QLDB) DescribeJournalS3ExportRequest(input *DescribeJournalS3ExportInput) (req *request.Request, output *DescribeJournalS3ExportOutput) { + op := &request.Operation{ + Name: opDescribeJournalS3Export, + HTTPMethod: "GET", + HTTPPath: "/ledgers/{name}/journal-s3-exports/{exportId}", + } + + if input == nil { + input = &DescribeJournalS3ExportInput{} + } + + output = &DescribeJournalS3ExportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeJournalS3Export API operation for Amazon QLDB. +// +// Returns information about a journal export job, including the ledger name, +// export ID, when it was created, current status, and its start and end time +// export parameters. +// +// If the export job with the given ExportId doesn't exist, then throws ResourceNotFoundException. +// +// If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation DescribeJournalS3Export for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/DescribeJournalS3Export +func (c *QLDB) DescribeJournalS3Export(input *DescribeJournalS3ExportInput) (*DescribeJournalS3ExportOutput, error) { + req, out := c.DescribeJournalS3ExportRequest(input) + return out, req.Send() +} + +// DescribeJournalS3ExportWithContext is the same as DescribeJournalS3Export with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeJournalS3Export for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) DescribeJournalS3ExportWithContext(ctx aws.Context, input *DescribeJournalS3ExportInput, opts ...request.Option) (*DescribeJournalS3ExportOutput, error) { + req, out := c.DescribeJournalS3ExportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLedger = "DescribeLedger" + +// DescribeLedgerRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLedger operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLedger for more information on using the DescribeLedger +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLedgerRequest method. +// req, resp := client.DescribeLedgerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/DescribeLedger +func (c *QLDB) DescribeLedgerRequest(input *DescribeLedgerInput) (req *request.Request, output *DescribeLedgerOutput) { + op := &request.Operation{ + Name: opDescribeLedger, + HTTPMethod: "GET", + HTTPPath: "/ledgers/{name}", + } + + if input == nil { + input = &DescribeLedgerInput{} + } + + output = &DescribeLedgerOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLedger API operation for Amazon QLDB. +// +// Returns information about a ledger, including its state and when it was created. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation DescribeLedger for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/DescribeLedger +func (c *QLDB) DescribeLedger(input *DescribeLedgerInput) (*DescribeLedgerOutput, error) { + req, out := c.DescribeLedgerRequest(input) + return out, req.Send() +} + +// DescribeLedgerWithContext is the same as DescribeLedger with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLedger for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) DescribeLedgerWithContext(ctx aws.Context, input *DescribeLedgerInput, opts ...request.Option) (*DescribeLedgerOutput, error) { + req, out := c.DescribeLedgerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExportJournalToS3 = "ExportJournalToS3" + +// ExportJournalToS3Request generates a "aws/request.Request" representing the +// client's request for the ExportJournalToS3 operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportJournalToS3 for more information on using the ExportJournalToS3 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportJournalToS3Request method. +// req, resp := client.ExportJournalToS3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ExportJournalToS3 +func (c *QLDB) ExportJournalToS3Request(input *ExportJournalToS3Input) (req *request.Request, output *ExportJournalToS3Output) { + op := &request.Operation{ + Name: opExportJournalToS3, + HTTPMethod: "POST", + HTTPPath: "/ledgers/{name}/journal-s3-exports", + } + + if input == nil { + input = &ExportJournalToS3Input{} + } + + output = &ExportJournalToS3Output{} + req = c.newRequest(op, input, output) + return +} + +// ExportJournalToS3 API operation for Amazon QLDB. +// +// Exports journal contents within a date and time range from a ledger into +// a specified Amazon Simple Storage Service (Amazon S3) bucket. The data is +// written as files in Amazon Ion format. +// +// If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException. +// +// If the ledger with the given Name is in CREATING status, then throws ResourcePreconditionNotMetException. +// +// You can initiate up to two concurrent journal export requests for each ledger. +// Beyond this limit, journal export requests throw LimitExceededException. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation ExportJournalToS3 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// * ErrCodeResourcePreconditionNotMetException "ResourcePreconditionNotMetException" +// The operation failed because a condition wasn't satisfied in advance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ExportJournalToS3 +func (c *QLDB) ExportJournalToS3(input *ExportJournalToS3Input) (*ExportJournalToS3Output, error) { + req, out := c.ExportJournalToS3Request(input) + return out, req.Send() +} + +// ExportJournalToS3WithContext is the same as ExportJournalToS3 with the addition of +// the ability to pass a context and additional request options. +// +// See ExportJournalToS3 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ExportJournalToS3WithContext(ctx aws.Context, input *ExportJournalToS3Input, opts ...request.Option) (*ExportJournalToS3Output, error) { + req, out := c.ExportJournalToS3Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBlock = "GetBlock" + +// GetBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBlock for more information on using the GetBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBlockRequest method. +// req, resp := client.GetBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/GetBlock +func (c *QLDB) GetBlockRequest(input *GetBlockInput) (req *request.Request, output *GetBlockOutput) { + op := &request.Operation{ + Name: opGetBlock, + HTTPMethod: "POST", + HTTPPath: "/ledgers/{name}/block", + } + + if input == nil { + input = &GetBlockInput{} + } + + output = &GetBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBlock API operation for Amazon QLDB. +// +// Returns a journal block object at a specified address in a ledger. Also returns +// a proof of the specified block for verification if DigestTipAddress is provided. +// +// If the specified ledger doesn't exist or is in DELETING status, then throws +// ResourceNotFoundException. +// +// If the specified ledger is in CREATING status, then throws ResourcePreconditionNotMetException. +// +// If no block exists with the specified address, then throws InvalidParameterException. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation GetBlock for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// * ErrCodeResourcePreconditionNotMetException "ResourcePreconditionNotMetException" +// The operation failed because a condition wasn't satisfied in advance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/GetBlock +func (c *QLDB) GetBlock(input *GetBlockInput) (*GetBlockOutput, error) { + req, out := c.GetBlockRequest(input) + return out, req.Send() +} + +// GetBlockWithContext is the same as GetBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) GetBlockWithContext(ctx aws.Context, input *GetBlockInput, opts ...request.Option) (*GetBlockOutput, error) { + req, out := c.GetBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDigest = "GetDigest" + +// GetDigestRequest generates a "aws/request.Request" representing the +// client's request for the GetDigest operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDigest for more information on using the GetDigest +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDigestRequest method. +// req, resp := client.GetDigestRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/GetDigest +func (c *QLDB) GetDigestRequest(input *GetDigestInput) (req *request.Request, output *GetDigestOutput) { + op := &request.Operation{ + Name: opGetDigest, + HTTPMethod: "POST", + HTTPPath: "/ledgers/{name}/digest", + } + + if input == nil { + input = &GetDigestInput{} + } + + output = &GetDigestOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDigest API operation for Amazon QLDB. +// +// Returns the digest of a ledger at the latest committed block in the journal. +// The response includes a 256-bit hash value and a block address. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation GetDigest for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// * ErrCodeResourcePreconditionNotMetException "ResourcePreconditionNotMetException" +// The operation failed because a condition wasn't satisfied in advance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/GetDigest +func (c *QLDB) GetDigest(input *GetDigestInput) (*GetDigestOutput, error) { + req, out := c.GetDigestRequest(input) + return out, req.Send() +} + +// GetDigestWithContext is the same as GetDigest with the addition of +// the ability to pass a context and additional request options. +// +// See GetDigest for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) GetDigestWithContext(ctx aws.Context, input *GetDigestInput, opts ...request.Option) (*GetDigestOutput, error) { + req, out := c.GetDigestRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetRevision = "GetRevision" + +// GetRevisionRequest generates a "aws/request.Request" representing the +// client's request for the GetRevision operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRevision for more information on using the GetRevision +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRevisionRequest method. +// req, resp := client.GetRevisionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/GetRevision +func (c *QLDB) GetRevisionRequest(input *GetRevisionInput) (req *request.Request, output *GetRevisionOutput) { + op := &request.Operation{ + Name: opGetRevision, + HTTPMethod: "POST", + HTTPPath: "/ledgers/{name}/revision", + } + + if input == nil { + input = &GetRevisionInput{} + } + + output = &GetRevisionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRevision API operation for Amazon QLDB. +// +// Returns a revision data object for a specified document ID and block address. +// Also returns a proof of the specified revision for verification if DigestTipAddress +// is provided. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation GetRevision for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// * ErrCodeResourcePreconditionNotMetException "ResourcePreconditionNotMetException" +// The operation failed because a condition wasn't satisfied in advance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/GetRevision +func (c *QLDB) GetRevision(input *GetRevisionInput) (*GetRevisionOutput, error) { + req, out := c.GetRevisionRequest(input) + return out, req.Send() +} + +// GetRevisionWithContext is the same as GetRevision with the addition of +// the ability to pass a context and additional request options. +// +// See GetRevision for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) GetRevisionWithContext(ctx aws.Context, input *GetRevisionInput, opts ...request.Option) (*GetRevisionOutput, error) { + req, out := c.GetRevisionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListJournalS3Exports = "ListJournalS3Exports" + +// ListJournalS3ExportsRequest generates a "aws/request.Request" representing the +// client's request for the ListJournalS3Exports operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListJournalS3Exports for more information on using the ListJournalS3Exports +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListJournalS3ExportsRequest method. +// req, resp := client.ListJournalS3ExportsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListJournalS3Exports +func (c *QLDB) ListJournalS3ExportsRequest(input *ListJournalS3ExportsInput) (req *request.Request, output *ListJournalS3ExportsOutput) { + op := &request.Operation{ + Name: opListJournalS3Exports, + HTTPMethod: "GET", + HTTPPath: "/journal-s3-exports", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJournalS3ExportsInput{} + } + + output = &ListJournalS3ExportsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListJournalS3Exports API operation for Amazon QLDB. +// +// Returns an array of journal export job descriptions for all ledgers that +// are associated with the current AWS account and Region. +// +// This action returns a maximum of MaxResults items, and is paginated so that +// you can retrieve all the items by calling ListJournalS3Exports multiple times. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation ListJournalS3Exports for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListJournalS3Exports +func (c *QLDB) ListJournalS3Exports(input *ListJournalS3ExportsInput) (*ListJournalS3ExportsOutput, error) { + req, out := c.ListJournalS3ExportsRequest(input) + return out, req.Send() +} + +// ListJournalS3ExportsWithContext is the same as ListJournalS3Exports with the addition of +// the ability to pass a context and additional request options. +// +// See ListJournalS3Exports for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ListJournalS3ExportsWithContext(ctx aws.Context, input *ListJournalS3ExportsInput, opts ...request.Option) (*ListJournalS3ExportsOutput, error) { + req, out := c.ListJournalS3ExportsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListJournalS3ExportsPages iterates over the pages of a ListJournalS3Exports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJournalS3Exports method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJournalS3Exports operation. +// pageNum := 0 +// err := client.ListJournalS3ExportsPages(params, +// func(page *qldb.ListJournalS3ExportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QLDB) ListJournalS3ExportsPages(input *ListJournalS3ExportsInput, fn func(*ListJournalS3ExportsOutput, bool) bool) error { + return c.ListJournalS3ExportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListJournalS3ExportsPagesWithContext same as ListJournalS3ExportsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ListJournalS3ExportsPagesWithContext(ctx aws.Context, input *ListJournalS3ExportsInput, fn func(*ListJournalS3ExportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListJournalS3ExportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListJournalS3ExportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListJournalS3ExportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListJournalS3ExportsForLedger = "ListJournalS3ExportsForLedger" + +// ListJournalS3ExportsForLedgerRequest generates a "aws/request.Request" representing the +// client's request for the ListJournalS3ExportsForLedger operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListJournalS3ExportsForLedger for more information on using the ListJournalS3ExportsForLedger +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListJournalS3ExportsForLedgerRequest method. +// req, resp := client.ListJournalS3ExportsForLedgerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListJournalS3ExportsForLedger +func (c *QLDB) ListJournalS3ExportsForLedgerRequest(input *ListJournalS3ExportsForLedgerInput) (req *request.Request, output *ListJournalS3ExportsForLedgerOutput) { + op := &request.Operation{ + Name: opListJournalS3ExportsForLedger, + HTTPMethod: "GET", + HTTPPath: "/ledgers/{name}/journal-s3-exports", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJournalS3ExportsForLedgerInput{} + } + + output = &ListJournalS3ExportsForLedgerOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListJournalS3ExportsForLedger API operation for Amazon QLDB. +// +// Returns an array of journal export job descriptions for a specified ledger. +// +// This action returns a maximum of MaxResults items, and is paginated so that +// you can retrieve all the items by calling ListJournalS3ExportsForLedger multiple +// times. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation ListJournalS3ExportsForLedger for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListJournalS3ExportsForLedger +func (c *QLDB) ListJournalS3ExportsForLedger(input *ListJournalS3ExportsForLedgerInput) (*ListJournalS3ExportsForLedgerOutput, error) { + req, out := c.ListJournalS3ExportsForLedgerRequest(input) + return out, req.Send() +} + +// ListJournalS3ExportsForLedgerWithContext is the same as ListJournalS3ExportsForLedger with the addition of +// the ability to pass a context and additional request options. +// +// See ListJournalS3ExportsForLedger for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ListJournalS3ExportsForLedgerWithContext(ctx aws.Context, input *ListJournalS3ExportsForLedgerInput, opts ...request.Option) (*ListJournalS3ExportsForLedgerOutput, error) { + req, out := c.ListJournalS3ExportsForLedgerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListJournalS3ExportsForLedgerPages iterates over the pages of a ListJournalS3ExportsForLedger operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJournalS3ExportsForLedger method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJournalS3ExportsForLedger operation. +// pageNum := 0 +// err := client.ListJournalS3ExportsForLedgerPages(params, +// func(page *qldb.ListJournalS3ExportsForLedgerOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QLDB) ListJournalS3ExportsForLedgerPages(input *ListJournalS3ExportsForLedgerInput, fn func(*ListJournalS3ExportsForLedgerOutput, bool) bool) error { + return c.ListJournalS3ExportsForLedgerPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListJournalS3ExportsForLedgerPagesWithContext same as ListJournalS3ExportsForLedgerPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ListJournalS3ExportsForLedgerPagesWithContext(ctx aws.Context, input *ListJournalS3ExportsForLedgerInput, fn func(*ListJournalS3ExportsForLedgerOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListJournalS3ExportsForLedgerInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListJournalS3ExportsForLedgerRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListJournalS3ExportsForLedgerOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListLedgers = "ListLedgers" + +// ListLedgersRequest generates a "aws/request.Request" representing the +// client's request for the ListLedgers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListLedgers for more information on using the ListLedgers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListLedgersRequest method. +// req, resp := client.ListLedgersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListLedgers +func (c *QLDB) ListLedgersRequest(input *ListLedgersInput) (req *request.Request, output *ListLedgersOutput) { + op := &request.Operation{ + Name: opListLedgers, + HTTPMethod: "GET", + HTTPPath: "/ledgers", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListLedgersInput{} + } + + output = &ListLedgersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListLedgers API operation for Amazon QLDB. +// +// Returns an array of ledger summaries that are associated with the current +// AWS account and Region. +// +// This action returns a maximum of 100 items and is paginated so that you can +// retrieve all the items by calling ListLedgers multiple times. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation ListLedgers for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListLedgers +func (c *QLDB) ListLedgers(input *ListLedgersInput) (*ListLedgersOutput, error) { + req, out := c.ListLedgersRequest(input) + return out, req.Send() +} + +// ListLedgersWithContext is the same as ListLedgers with the addition of +// the ability to pass a context and additional request options. +// +// See ListLedgers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ListLedgersWithContext(ctx aws.Context, input *ListLedgersInput, opts ...request.Option) (*ListLedgersOutput, error) { + req, out := c.ListLedgersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListLedgersPages iterates over the pages of a ListLedgers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListLedgers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListLedgers operation. +// pageNum := 0 +// err := client.ListLedgersPages(params, +// func(page *qldb.ListLedgersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QLDB) ListLedgersPages(input *ListLedgersInput, fn func(*ListLedgersOutput, bool) bool) error { + return c.ListLedgersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListLedgersPagesWithContext same as ListLedgersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ListLedgersPagesWithContext(ctx aws.Context, input *ListLedgersInput, fn func(*ListLedgersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListLedgersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListLedgersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListLedgersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListTagsForResource +func (c *QLDB) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon QLDB. +// +// Returns all tags for a specified Amazon QLDB resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListTagsForResource +func (c *QLDB) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/TagResource +func (c *QLDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon QLDB. +// +// Adds one or more tags to a specified Amazon QLDB resource. +// +// A resource can have up to 50 tags. If you try to create more than 50 tags +// for a resource, your request fails and returns an error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/TagResource +func (c *QLDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/UntagResource +func (c *QLDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon QLDB. +// +// Removes one or more tags from a specified Amazon QLDB resource. You can specify +// up to 50 tag keys to remove. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/UntagResource +func (c *QLDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateLedger = "UpdateLedger" + +// UpdateLedgerRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLedger operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateLedger for more information on using the UpdateLedger +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateLedgerRequest method. +// req, resp := client.UpdateLedgerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/UpdateLedger +func (c *QLDB) UpdateLedgerRequest(input *UpdateLedgerInput) (req *request.Request, output *UpdateLedgerOutput) { + op := &request.Operation{ + Name: opUpdateLedger, + HTTPMethod: "PATCH", + HTTPPath: "/ledgers/{name}", + } + + if input == nil { + input = &UpdateLedgerInput{} + } + + output = &UpdateLedgerOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateLedger API operation for Amazon QLDB. +// +// Updates properties on a ledger. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation UpdateLedger for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// One or more parameters in the request aren't valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/UpdateLedger +func (c *QLDB) UpdateLedger(input *UpdateLedgerInput) (*UpdateLedgerOutput, error) { + req, out := c.UpdateLedgerRequest(input) + return out, req.Send() +} + +// UpdateLedgerWithContext is the same as UpdateLedger with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateLedger for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) UpdateLedgerWithContext(ctx aws.Context, input *UpdateLedgerInput, opts ...request.Option) (*UpdateLedgerOutput, error) { + req, out := c.UpdateLedgerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type CreateLedgerInput struct { + _ struct{} `type:"structure"` + + // The flag that prevents a ledger from being deleted by any user. If not provided + // on ledger creation, this feature is enabled (true) by default. + // + // If deletion protection is enabled, you must first disable it before you can + // delete the ledger using the QLDB API or the AWS Command Line Interface (AWS + // CLI). You can disable it by calling the UpdateLedger operation to set the + // flag to false. The QLDB console disables deletion protection for you when + // you use it to delete a ledger. + DeletionProtection *bool `type:"boolean"` + + // The name of the ledger that you want to create. The name must be unique among + // all of your ledgers in the current AWS Region. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The permissions mode to assign to the ledger that you want to create. + // + // PermissionsMode is a required field + PermissionsMode *string `type:"string" required:"true" enum:"PermissionsMode"` + + // The key-value pairs to add as tags to the ledger that you want to create. + // Tag keys are case sensitive. Tag values are case sensitive and can be null. + Tags map[string]*string `type:"map"` +} + +// String returns the string representation +func (s CreateLedgerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLedgerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLedgerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLedgerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.PermissionsMode == nil { + invalidParams.Add(request.NewErrParamRequired("PermissionsMode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *CreateLedgerInput) SetDeletionProtection(v bool) *CreateLedgerInput { + s.DeletionProtection = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateLedgerInput) SetName(v string) *CreateLedgerInput { + s.Name = &v + return s +} + +// SetPermissionsMode sets the PermissionsMode field's value. +func (s *CreateLedgerInput) SetPermissionsMode(v string) *CreateLedgerInput { + s.PermissionsMode = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLedgerInput) SetTags(v map[string]*string) *CreateLedgerInput { + s.Tags = v + return s +} + +type CreateLedgerOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the ledger. + Arn *string `min:"20" type:"string"` + + // The date and time, in epoch time format, when the ledger was created. (Epoch + // time format is the number of seconds elapsed since 12:00:00 AM January 1, + // 1970 UTC.) + CreationDateTime *time.Time `type:"timestamp"` + + // The flag that prevents a ledger from being deleted by any user. If not provided + // on ledger creation, this feature is enabled (true) by default. + // + // If deletion protection is enabled, you must first disable it before you can + // delete the ledger using the QLDB API or the AWS Command Line Interface (AWS + // CLI). You can disable it by calling the UpdateLedger operation to set the + // flag to false. The QLDB console disables deletion protection for you when + // you use it to delete a ledger. + DeletionProtection *bool `type:"boolean"` + + // The name of the ledger. + Name *string `min:"1" type:"string"` + + // The current status of the ledger. + State *string `type:"string" enum:"LedgerState"` +} + +// String returns the string representation +func (s CreateLedgerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLedgerOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateLedgerOutput) SetArn(v string) *CreateLedgerOutput { + s.Arn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *CreateLedgerOutput) SetCreationDateTime(v time.Time) *CreateLedgerOutput { + s.CreationDateTime = &v + return s +} + +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *CreateLedgerOutput) SetDeletionProtection(v bool) *CreateLedgerOutput { + s.DeletionProtection = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateLedgerOutput) SetName(v string) *CreateLedgerOutput { + s.Name = &v + return s +} + +// SetState sets the State field's value. +func (s *CreateLedgerOutput) SetState(v string) *CreateLedgerOutput { + s.State = &v + return s +} + +type DeleteLedgerInput struct { + _ struct{} `type:"structure"` + + // The name of the ledger that you want to delete. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLedgerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLedgerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLedgerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLedgerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteLedgerInput) SetName(v string) *DeleteLedgerInput { + s.Name = &v + return s +} + +type DeleteLedgerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLedgerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLedgerOutput) GoString() string { + return s.String() +} + +type DescribeJournalS3ExportInput struct { + _ struct{} `type:"structure"` + + // The unique ID of the journal export job that you want to describe. + // + // ExportId is a required field + ExportId *string `location:"uri" locationName:"exportId" min:"22" type:"string" required:"true"` + + // The name of the ledger. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeJournalS3ExportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJournalS3ExportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeJournalS3ExportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeJournalS3ExportInput"} + if s.ExportId == nil { + invalidParams.Add(request.NewErrParamRequired("ExportId")) + } + if s.ExportId != nil && len(*s.ExportId) < 22 { + invalidParams.Add(request.NewErrParamMinLen("ExportId", 22)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExportId sets the ExportId field's value. +func (s *DescribeJournalS3ExportInput) SetExportId(v string) *DescribeJournalS3ExportInput { + s.ExportId = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeJournalS3ExportInput) SetName(v string) *DescribeJournalS3ExportInput { + s.Name = &v + return s +} + +type DescribeJournalS3ExportOutput struct { + _ struct{} `type:"structure"` + + // Information about the journal export job returned by a DescribeJournalS3Export + // request. + // + // ExportDescription is a required field + ExportDescription *JournalS3ExportDescription `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeJournalS3ExportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJournalS3ExportOutput) GoString() string { + return s.String() +} + +// SetExportDescription sets the ExportDescription field's value. +func (s *DescribeJournalS3ExportOutput) SetExportDescription(v *JournalS3ExportDescription) *DescribeJournalS3ExportOutput { + s.ExportDescription = v + return s +} + +type DescribeLedgerInput struct { + _ struct{} `type:"structure"` + + // The name of the ledger that you want to describe. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLedgerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLedgerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLedgerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLedgerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DescribeLedgerInput) SetName(v string) *DescribeLedgerInput { + s.Name = &v + return s +} + +type DescribeLedgerOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the ledger. + Arn *string `min:"20" type:"string"` + + // The date and time, in epoch time format, when the ledger was created. (Epoch + // time format is the number of seconds elapsed since 12:00:00 AM January 1, + // 1970 UTC.) + CreationDateTime *time.Time `type:"timestamp"` + + // The flag that prevents a ledger from being deleted by any user. If not provided + // on ledger creation, this feature is enabled (true) by default. + // + // If deletion protection is enabled, you must first disable it before you can + // delete the ledger using the QLDB API or the AWS Command Line Interface (AWS + // CLI). You can disable it by calling the UpdateLedger operation to set the + // flag to false. The QLDB console disables deletion protection for you when + // you use it to delete a ledger. + DeletionProtection *bool `type:"boolean"` + + // The name of the ledger. + Name *string `min:"1" type:"string"` + + // The current status of the ledger. + State *string `type:"string" enum:"LedgerState"` +} + +// String returns the string representation +func (s DescribeLedgerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLedgerOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DescribeLedgerOutput) SetArn(v string) *DescribeLedgerOutput { + s.Arn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *DescribeLedgerOutput) SetCreationDateTime(v time.Time) *DescribeLedgerOutput { + s.CreationDateTime = &v + return s +} + +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *DescribeLedgerOutput) SetDeletionProtection(v bool) *DescribeLedgerOutput { + s.DeletionProtection = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeLedgerOutput) SetName(v string) *DescribeLedgerOutput { + s.Name = &v + return s +} + +// SetState sets the State field's value. +func (s *DescribeLedgerOutput) SetState(v string) *DescribeLedgerOutput { + s.State = &v + return s +} + +type ExportJournalToS3Input struct { + _ struct{} `type:"structure"` + + // The exclusive end date and time for the range of journal contents that you + // want to export. + // + // The ExclusiveEndTime must be in ISO 8601 date and time format and in Universal + // Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z + // + // The ExclusiveEndTime must be less than or equal to the current UTC date and + // time. + // + // ExclusiveEndTime is a required field + ExclusiveEndTime *time.Time `type:"timestamp" required:"true"` + + // The inclusive start date and time for the range of journal contents that + // you want to export. + // + // The InclusiveStartTime must be in ISO 8601 date and time format and in Universal + // Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z + // + // The InclusiveStartTime must be before ExclusiveEndTime. + // + // If you provide an InclusiveStartTime that is before the ledger's CreationDateTime, + // Amazon QLDB defaults it to the ledger's CreationDateTime. + // + // InclusiveStartTime is a required field + InclusiveStartTime *time.Time `type:"timestamp" required:"true"` + + // The name of the ledger. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions + // for a journal export job to do the following: + // + // * Write objects into your Amazon Simple Storage Service (Amazon S3) bucket. + // + // * (Optional) Use your customer master key (CMK) in AWS Key Management + // Service (AWS KMS) for server-side encryption of your exported data. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The configuration settings of the Amazon S3 bucket destination for your export + // request. + // + // S3ExportConfiguration is a required field + S3ExportConfiguration *S3ExportConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExportJournalToS3Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportJournalToS3Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportJournalToS3Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportJournalToS3Input"} + if s.ExclusiveEndTime == nil { + invalidParams.Add(request.NewErrParamRequired("ExclusiveEndTime")) + } + if s.InclusiveStartTime == nil { + invalidParams.Add(request.NewErrParamRequired("InclusiveStartTime")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.S3ExportConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("S3ExportConfiguration")) + } + if s.S3ExportConfiguration != nil { + if err := s.S3ExportConfiguration.Validate(); err != nil { + invalidParams.AddNested("S3ExportConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExclusiveEndTime sets the ExclusiveEndTime field's value. +func (s *ExportJournalToS3Input) SetExclusiveEndTime(v time.Time) *ExportJournalToS3Input { + s.ExclusiveEndTime = &v + return s +} + +// SetInclusiveStartTime sets the InclusiveStartTime field's value. +func (s *ExportJournalToS3Input) SetInclusiveStartTime(v time.Time) *ExportJournalToS3Input { + s.InclusiveStartTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *ExportJournalToS3Input) SetName(v string) *ExportJournalToS3Input { + s.Name = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *ExportJournalToS3Input) SetRoleArn(v string) *ExportJournalToS3Input { + s.RoleArn = &v + return s +} + +// SetS3ExportConfiguration sets the S3ExportConfiguration field's value. +func (s *ExportJournalToS3Input) SetS3ExportConfiguration(v *S3ExportConfiguration) *ExportJournalToS3Input { + s.S3ExportConfiguration = v + return s +} + +type ExportJournalToS3Output struct { + _ struct{} `type:"structure"` + + // The unique ID that QLDB assigns to each journal export job. + // + // To describe your export request and check the status of the job, you can + // use ExportId to call DescribeJournalS3Export. + // + // ExportId is a required field + ExportId *string `min:"22" type:"string" required:"true"` +} + +// String returns the string representation +func (s ExportJournalToS3Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportJournalToS3Output) GoString() string { + return s.String() +} + +// SetExportId sets the ExportId field's value. +func (s *ExportJournalToS3Output) SetExportId(v string) *ExportJournalToS3Output { + s.ExportId = &v + return s +} + +type GetBlockInput struct { + _ struct{} `type:"structure"` + + // The location of the block that you want to request. An address is an Amazon + // Ion structure that has two fields: strandId and sequenceNo. + // + // For example: {strandId:"BlFTjlSXze9BIh1KOszcE3",sequenceNo:14} + // + // BlockAddress is a required field + BlockAddress *ValueHolder `type:"structure" required:"true" sensitive:"true"` + + // The latest block location covered by the digest for which to request a proof. + // An address is an Amazon Ion structure that has two fields: strandId and sequenceNo. + // + // For example: {strandId:"BlFTjlSXze9BIh1KOszcE3",sequenceNo:49} + DigestTipAddress *ValueHolder `type:"structure" sensitive:"true"` + + // The name of the ledger. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBlockInput"} + if s.BlockAddress == nil { + invalidParams.Add(request.NewErrParamRequired("BlockAddress")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.BlockAddress != nil { + if err := s.BlockAddress.Validate(); err != nil { + invalidParams.AddNested("BlockAddress", err.(request.ErrInvalidParams)) + } + } + if s.DigestTipAddress != nil { + if err := s.DigestTipAddress.Validate(); err != nil { + invalidParams.AddNested("DigestTipAddress", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockAddress sets the BlockAddress field's value. +func (s *GetBlockInput) SetBlockAddress(v *ValueHolder) *GetBlockInput { + s.BlockAddress = v + return s +} + +// SetDigestTipAddress sets the DigestTipAddress field's value. +func (s *GetBlockInput) SetDigestTipAddress(v *ValueHolder) *GetBlockInput { + s.DigestTipAddress = v + return s +} + +// SetName sets the Name field's value. +func (s *GetBlockInput) SetName(v string) *GetBlockInput { + s.Name = &v + return s +} + +type GetBlockOutput struct { + _ struct{} `type:"structure"` + + // The block data object in Amazon Ion format. + // + // Block is a required field + Block *ValueHolder `type:"structure" required:"true" sensitive:"true"` + + // The proof object in Amazon Ion format returned by a GetBlock request. A proof + // contains the list of hash values required to recalculate the specified digest + // using a Merkle tree, starting with the specified block. + Proof *ValueHolder `type:"structure" sensitive:"true"` +} + +// String returns the string representation +func (s GetBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBlockOutput) GoString() string { + return s.String() +} + +// SetBlock sets the Block field's value. +func (s *GetBlockOutput) SetBlock(v *ValueHolder) *GetBlockOutput { + s.Block = v + return s +} + +// SetProof sets the Proof field's value. +func (s *GetBlockOutput) SetProof(v *ValueHolder) *GetBlockOutput { + s.Proof = v + return s +} + +type GetDigestInput struct { + _ struct{} `type:"structure"` + + // The name of the ledger. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDigestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDigestInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDigestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDigestInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetDigestInput) SetName(v string) *GetDigestInput { + s.Name = &v + return s +} + +type GetDigestOutput struct { + _ struct{} `type:"structure"` + + // The 256-bit hash value representing the digest returned by a GetDigest request. + // + // Digest is automatically base64 encoded/decoded by the SDK. + // + // Digest is a required field + Digest []byte `min:"32" type:"blob" required:"true"` + + // The latest block location covered by the digest that you requested. An address + // is an Amazon Ion structure that has two fields: strandId and sequenceNo. + // + // DigestTipAddress is a required field + DigestTipAddress *ValueHolder `type:"structure" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s GetDigestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDigestOutput) GoString() string { + return s.String() +} + +// SetDigest sets the Digest field's value. +func (s *GetDigestOutput) SetDigest(v []byte) *GetDigestOutput { + s.Digest = v + return s +} + +// SetDigestTipAddress sets the DigestTipAddress field's value. +func (s *GetDigestOutput) SetDigestTipAddress(v *ValueHolder) *GetDigestOutput { + s.DigestTipAddress = v + return s +} + +type GetRevisionInput struct { + _ struct{} `type:"structure"` + + // The block location of the document revision to be verified. An address is + // an Amazon Ion structure that has two fields: strandId and sequenceNo. + // + // For example: {strandId:"BlFTjlSXze9BIh1KOszcE3",sequenceNo:14} + // + // BlockAddress is a required field + BlockAddress *ValueHolder `type:"structure" required:"true" sensitive:"true"` + + // The latest block location covered by the digest for which to request a proof. + // An address is an Amazon Ion structure that has two fields: strandId and sequenceNo. + // + // For example: {strandId:"BlFTjlSXze9BIh1KOszcE3",sequenceNo:49} + DigestTipAddress *ValueHolder `type:"structure" sensitive:"true"` + + // The unique ID of the document to be verified. + // + // DocumentId is a required field + DocumentId *string `min:"22" type:"string" required:"true"` + + // The name of the ledger. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRevisionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRevisionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRevisionInput"} + if s.BlockAddress == nil { + invalidParams.Add(request.NewErrParamRequired("BlockAddress")) + } + if s.DocumentId == nil { + invalidParams.Add(request.NewErrParamRequired("DocumentId")) + } + if s.DocumentId != nil && len(*s.DocumentId) < 22 { + invalidParams.Add(request.NewErrParamMinLen("DocumentId", 22)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.BlockAddress != nil { + if err := s.BlockAddress.Validate(); err != nil { + invalidParams.AddNested("BlockAddress", err.(request.ErrInvalidParams)) + } + } + if s.DigestTipAddress != nil { + if err := s.DigestTipAddress.Validate(); err != nil { + invalidParams.AddNested("DigestTipAddress", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockAddress sets the BlockAddress field's value. +func (s *GetRevisionInput) SetBlockAddress(v *ValueHolder) *GetRevisionInput { + s.BlockAddress = v + return s +} + +// SetDigestTipAddress sets the DigestTipAddress field's value. +func (s *GetRevisionInput) SetDigestTipAddress(v *ValueHolder) *GetRevisionInput { + s.DigestTipAddress = v + return s +} + +// SetDocumentId sets the DocumentId field's value. +func (s *GetRevisionInput) SetDocumentId(v string) *GetRevisionInput { + s.DocumentId = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetRevisionInput) SetName(v string) *GetRevisionInput { + s.Name = &v + return s +} + +type GetRevisionOutput struct { + _ struct{} `type:"structure"` + + // The proof object in Amazon Ion format returned by a GetRevision request. + // A proof contains the list of hash values that are required to recalculate + // the specified digest using a Merkle tree, starting with the specified document + // revision. + Proof *ValueHolder `type:"structure" sensitive:"true"` + + // The document revision data object in Amazon Ion format. + // + // Revision is a required field + Revision *ValueHolder `type:"structure" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s GetRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRevisionOutput) GoString() string { + return s.String() +} + +// SetProof sets the Proof field's value. +func (s *GetRevisionOutput) SetProof(v *ValueHolder) *GetRevisionOutput { + s.Proof = v + return s +} + +// SetRevision sets the Revision field's value. +func (s *GetRevisionOutput) SetRevision(v *ValueHolder) *GetRevisionOutput { + s.Revision = v + return s +} + +// The information about a journal export job, including the ledger name, export +// ID, when it was created, current status, and its start and end time export +// parameters. +type JournalS3ExportDescription struct { + _ struct{} `type:"structure"` + + // The exclusive end date and time for the range of journal contents that are + // specified in the original export request. + // + // ExclusiveEndTime is a required field + ExclusiveEndTime *time.Time `type:"timestamp" required:"true"` + + // The date and time, in epoch time format, when the export job was created. + // (Epoch time format is the number of seconds elapsed since 12:00:00 AM January + // 1, 1970 UTC.) + // + // ExportCreationTime is a required field + ExportCreationTime *time.Time `type:"timestamp" required:"true"` + + // The unique ID of the journal export job. + // + // ExportId is a required field + ExportId *string `min:"22" type:"string" required:"true"` + + // The inclusive start date and time for the range of journal contents that + // are specified in the original export request. + // + // InclusiveStartTime is a required field + InclusiveStartTime *time.Time `type:"timestamp" required:"true"` + + // The name of the ledger. + // + // LedgerName is a required field + LedgerName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions + // for a journal export job to do the following: + // + // * Write objects into your Amazon Simple Storage Service (Amazon S3) bucket. + // + // * (Optional) Use your customer master key (CMK) in AWS Key Management + // Service (AWS KMS) for server-side encryption of your exported data. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Simple Storage Service (Amazon S3) bucket location in which a + // journal export job writes the journal contents. + // + // S3ExportConfiguration is a required field + S3ExportConfiguration *S3ExportConfiguration `type:"structure" required:"true"` + + // The current state of the journal export job. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExportStatus"` +} + +// String returns the string representation +func (s JournalS3ExportDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JournalS3ExportDescription) GoString() string { + return s.String() +} + +// SetExclusiveEndTime sets the ExclusiveEndTime field's value. +func (s *JournalS3ExportDescription) SetExclusiveEndTime(v time.Time) *JournalS3ExportDescription { + s.ExclusiveEndTime = &v + return s +} + +// SetExportCreationTime sets the ExportCreationTime field's value. +func (s *JournalS3ExportDescription) SetExportCreationTime(v time.Time) *JournalS3ExportDescription { + s.ExportCreationTime = &v + return s +} + +// SetExportId sets the ExportId field's value. +func (s *JournalS3ExportDescription) SetExportId(v string) *JournalS3ExportDescription { + s.ExportId = &v + return s +} + +// SetInclusiveStartTime sets the InclusiveStartTime field's value. +func (s *JournalS3ExportDescription) SetInclusiveStartTime(v time.Time) *JournalS3ExportDescription { + s.InclusiveStartTime = &v + return s +} + +// SetLedgerName sets the LedgerName field's value. +func (s *JournalS3ExportDescription) SetLedgerName(v string) *JournalS3ExportDescription { + s.LedgerName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *JournalS3ExportDescription) SetRoleArn(v string) *JournalS3ExportDescription { + s.RoleArn = &v + return s +} + +// SetS3ExportConfiguration sets the S3ExportConfiguration field's value. +func (s *JournalS3ExportDescription) SetS3ExportConfiguration(v *S3ExportConfiguration) *JournalS3ExportDescription { + s.S3ExportConfiguration = v + return s +} + +// SetStatus sets the Status field's value. +func (s *JournalS3ExportDescription) SetStatus(v string) *JournalS3ExportDescription { + s.Status = &v + return s +} + +// Information about a ledger, including its name, state, and when it was created. +type LedgerSummary struct { + _ struct{} `type:"structure"` + + // The date and time, in epoch time format, when the ledger was created. (Epoch + // time format is the number of seconds elapsed since 12:00:00 AM January 1, + // 1970 UTC.) + CreationDateTime *time.Time `type:"timestamp"` + + // The name of the ledger. + Name *string `min:"1" type:"string"` + + // The current status of the ledger. + State *string `type:"string" enum:"LedgerState"` +} + +// String returns the string representation +func (s LedgerSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LedgerSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *LedgerSummary) SetCreationDateTime(v time.Time) *LedgerSummary { + s.CreationDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *LedgerSummary) SetName(v string) *LedgerSummary { + s.Name = &v + return s +} + +// SetState sets the State field's value. +func (s *LedgerSummary) SetState(v string) *LedgerSummary { + s.State = &v + return s +} + +type ListJournalS3ExportsForLedgerInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single ListJournalS3ExportsForLedger + // request. (The actual number of results returned might be fewer.) + MaxResults *int64 `location:"querystring" locationName:"max_results" min:"1" type:"integer"` + + // The name of the ledger. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` + + // A pagination token, indicating that you want to retrieve the next page of + // results. If you received a value for NextToken in the response from a previous + // ListJournalS3ExportsForLedger call, then you should use that value as input + // here. + NextToken *string `location:"querystring" locationName:"next_token" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListJournalS3ExportsForLedgerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJournalS3ExportsForLedgerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJournalS3ExportsForLedgerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJournalS3ExportsForLedgerInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListJournalS3ExportsForLedgerInput) SetMaxResults(v int64) *ListJournalS3ExportsForLedgerInput { + s.MaxResults = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListJournalS3ExportsForLedgerInput) SetName(v string) *ListJournalS3ExportsForLedgerInput { + s.Name = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJournalS3ExportsForLedgerInput) SetNextToken(v string) *ListJournalS3ExportsForLedgerInput { + s.NextToken = &v + return s +} + +type ListJournalS3ExportsForLedgerOutput struct { + _ struct{} `type:"structure"` + + // The array of journal export job descriptions that are associated with the + // specified ledger. + JournalS3Exports []*JournalS3ExportDescription `type:"list"` + + // * If NextToken is empty, then the last page of results has been processed + // and there are no more results to be retrieved. + // + // * If NextToken is not empty, then there are more results available. To + // retrieve the next page of results, use the value of NextToken in a subsequent + // ListJournalS3ExportsForLedger call. + NextToken *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s ListJournalS3ExportsForLedgerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJournalS3ExportsForLedgerOutput) GoString() string { + return s.String() +} + +// SetJournalS3Exports sets the JournalS3Exports field's value. +func (s *ListJournalS3ExportsForLedgerOutput) SetJournalS3Exports(v []*JournalS3ExportDescription) *ListJournalS3ExportsForLedgerOutput { + s.JournalS3Exports = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJournalS3ExportsForLedgerOutput) SetNextToken(v string) *ListJournalS3ExportsForLedgerOutput { + s.NextToken = &v + return s +} + +type ListJournalS3ExportsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single ListJournalS3Exports + // request. (The actual number of results returned might be fewer.) + MaxResults *int64 `location:"querystring" locationName:"max_results" min:"1" type:"integer"` + + // A pagination token, indicating that you want to retrieve the next page of + // results. If you received a value for NextToken in the response from a previous + // ListJournalS3Exports call, then you should use that value as input here. + NextToken *string `location:"querystring" locationName:"next_token" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListJournalS3ExportsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJournalS3ExportsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJournalS3ExportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJournalS3ExportsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListJournalS3ExportsInput) SetMaxResults(v int64) *ListJournalS3ExportsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJournalS3ExportsInput) SetNextToken(v string) *ListJournalS3ExportsInput { + s.NextToken = &v + return s +} + +type ListJournalS3ExportsOutput struct { + _ struct{} `type:"structure"` + + // The array of journal export job descriptions for all ledgers that are associated + // with the current AWS account and Region. + JournalS3Exports []*JournalS3ExportDescription `type:"list"` + + // * If NextToken is empty, then the last page of results has been processed + // and there are no more results to be retrieved. + // + // * If NextToken is not empty, then there are more results available. To + // retrieve the next page of results, use the value of NextToken in a subsequent + // ListJournalS3Exports call. + NextToken *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s ListJournalS3ExportsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJournalS3ExportsOutput) GoString() string { + return s.String() +} + +// SetJournalS3Exports sets the JournalS3Exports field's value. +func (s *ListJournalS3ExportsOutput) SetJournalS3Exports(v []*JournalS3ExportDescription) *ListJournalS3ExportsOutput { + s.JournalS3Exports = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJournalS3ExportsOutput) SetNextToken(v string) *ListJournalS3ExportsOutput { + s.NextToken = &v + return s +} + +type ListLedgersInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single ListLedgers request. + // (The actual number of results returned might be fewer.) + MaxResults *int64 `location:"querystring" locationName:"max_results" min:"1" type:"integer"` + + // A pagination token, indicating that you want to retrieve the next page of + // results. If you received a value for NextToken in the response from a previous + // ListLedgers call, then you should use that value as input here. + NextToken *string `location:"querystring" locationName:"next_token" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListLedgersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLedgersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListLedgersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListLedgersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListLedgersInput) SetMaxResults(v int64) *ListLedgersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListLedgersInput) SetNextToken(v string) *ListLedgersInput { + s.NextToken = &v + return s +} + +type ListLedgersOutput struct { + _ struct{} `type:"structure"` + + // The array of ledger summaries that are associated with the current AWS account + // and Region. + Ledgers []*LedgerSummary `type:"list"` + + // A pagination token, indicating whether there are more results available: + // + // * If NextToken is empty, then the last page of results has been processed + // and there are no more results to be retrieved. + // + // * If NextToken is not empty, then there are more results available. To + // retrieve the next page of results, use the value of NextToken in a subsequent + // ListLedgers call. + NextToken *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s ListLedgersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLedgersOutput) GoString() string { + return s.String() +} + +// SetLedgers sets the Ledgers field's value. +func (s *ListLedgersOutput) SetLedgers(v []*LedgerSummary) *ListLedgersOutput { + s.Ledgers = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListLedgersOutput) SetNextToken(v string) *ListLedgersOutput { + s.NextToken = &v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for which you want to list the tags. For example: + // + // arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags that are currently associated with the specified Amazon QLDB resource. + Tags map[string]*string `type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// The encryption settings that are used by a journal export job to write data +// in an Amazon Simple Storage Service (Amazon S3) bucket. +type S3EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for a customer master key (CMK) in AWS Key + // Management Service (AWS KMS). + // + // You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType. + // + // KmsKeyArn is not required if you specify SSE_S3 as the ObjectEncryptionType. + KmsKeyArn *string `min:"20" type:"string"` + + // The Amazon S3 object encryption type. + // + // To learn more about server-side encryption options in Amazon S3, see Protecting + // Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) + // in the Amazon S3 Developer Guide. + // + // ObjectEncryptionType is a required field + ObjectEncryptionType *string `type:"string" required:"true" enum:"S3ObjectEncryptionType"` +} + +// String returns the string representation +func (s S3EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3EncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3EncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3EncryptionConfiguration"} + if s.KmsKeyArn != nil && len(*s.KmsKeyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyArn", 20)) + } + if s.ObjectEncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectEncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *S3EncryptionConfiguration) SetKmsKeyArn(v string) *S3EncryptionConfiguration { + s.KmsKeyArn = &v + return s +} + +// SetObjectEncryptionType sets the ObjectEncryptionType field's value. +func (s *S3EncryptionConfiguration) SetObjectEncryptionType(v string) *S3EncryptionConfiguration { + s.ObjectEncryptionType = &v + return s +} + +// The Amazon Simple Storage Service (Amazon S3) bucket location in which a +// journal export job writes the journal contents. +type S3ExportConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket name in which a journal export job writes the journal + // contents. + // + // The bucket name must comply with the Amazon S3 bucket naming conventions. + // For more information, see Bucket Restrictions and Limitations (https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) + // in the Amazon S3 Developer Guide. + // + // Bucket is a required field + Bucket *string `min:"3" type:"string" required:"true"` + + // The encryption settings that are used by a journal export job to write data + // in an Amazon S3 bucket. + // + // EncryptionConfiguration is a required field + EncryptionConfiguration *S3EncryptionConfiguration `type:"structure" required:"true"` + + // The prefix for the Amazon S3 bucket in which a journal export job writes + // the journal contents. + // + // The prefix must comply with Amazon S3 key naming rules and restrictions. + // For more information, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 Developer Guide. + // + // The following are examples of valid Prefix values: + // + // * JournalExports-ForMyLedger/Testing/ + // + // * JournalExports + // + // * My:Tests/ + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s S3ExportConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3ExportConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3ExportConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3ExportConfiguration"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.EncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionConfiguration")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.EncryptionConfiguration != nil { + if err := s.EncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *S3ExportConfiguration) SetBucket(v string) *S3ExportConfiguration { + s.Bucket = &v + return s +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *S3ExportConfiguration) SetEncryptionConfiguration(v *S3EncryptionConfiguration) *S3ExportConfiguration { + s.EncryptionConfiguration = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *S3ExportConfiguration) SetPrefix(v string) *S3ExportConfiguration { + s.Prefix = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) to which you want to add the tags. For example: + // + // arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" min:"20" type:"string" required:"true"` + + // The key-value pairs to add as tags to the specified QLDB resource. Tag keys + // are case sensitive. If you specify a key that already exists for the resource, + // your request fails and returns an error. Tag values are case sensitive and + // can be null. + // + // Tags is a required field + Tags map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) from which you want to remove the tags. For + // example: + // + // arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" min:"20" type:"string" required:"true"` + + // The list of tag keys that you want to remove. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateLedgerInput struct { + _ struct{} `type:"structure"` + + // The flag that prevents a ledger from being deleted by any user. If not provided + // on ledger creation, this feature is enabled (true) by default. + // + // If deletion protection is enabled, you must first disable it before you can + // delete the ledger using the QLDB API or the AWS Command Line Interface (AWS + // CLI). You can disable it by calling the UpdateLedger operation to set the + // flag to false. The QLDB console disables deletion protection for you when + // you use it to delete a ledger. + DeletionProtection *bool `type:"boolean"` + + // The name of the ledger. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateLedgerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLedgerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateLedgerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateLedgerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *UpdateLedgerInput) SetDeletionProtection(v bool) *UpdateLedgerInput { + s.DeletionProtection = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateLedgerInput) SetName(v string) *UpdateLedgerInput { + s.Name = &v + return s +} + +type UpdateLedgerOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the ledger. + Arn *string `min:"20" type:"string"` + + // The date and time, in epoch time format, when the ledger was created. (Epoch + // time format is the number of seconds elapsed since 12:00:00 AM January 1, + // 1970 UTC.) + CreationDateTime *time.Time `type:"timestamp"` + + // The flag that prevents a ledger from being deleted by any user. If not provided + // on ledger creation, this feature is enabled (true) by default. + // + // If deletion protection is enabled, you must first disable it before you can + // delete the ledger using the QLDB API or the AWS Command Line Interface (AWS + // CLI). You can disable it by calling the UpdateLedger operation to set the + // flag to false. The QLDB console disables deletion protection for you when + // you use it to delete a ledger. + DeletionProtection *bool `type:"boolean"` + + // The name of the ledger. + Name *string `min:"1" type:"string"` + + // The current status of the ledger. + State *string `type:"string" enum:"LedgerState"` +} + +// String returns the string representation +func (s UpdateLedgerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLedgerOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *UpdateLedgerOutput) SetArn(v string) *UpdateLedgerOutput { + s.Arn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *UpdateLedgerOutput) SetCreationDateTime(v time.Time) *UpdateLedgerOutput { + s.CreationDateTime = &v + return s +} + +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *UpdateLedgerOutput) SetDeletionProtection(v bool) *UpdateLedgerOutput { + s.DeletionProtection = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateLedgerOutput) SetName(v string) *UpdateLedgerOutput { + s.Name = &v + return s +} + +// SetState sets the State field's value. +func (s *UpdateLedgerOutput) SetState(v string) *UpdateLedgerOutput { + s.State = &v + return s +} + +// A structure that can contain an Amazon Ion value in multiple encoding formats. +type ValueHolder struct { + _ struct{} `type:"structure" sensitive:"true"` + + // An Amazon Ion plaintext value contained in a ValueHolder structure. + IonText *string `min:"1" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s ValueHolder) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValueHolder) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ValueHolder) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ValueHolder"} + if s.IonText != nil && len(*s.IonText) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IonText", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIonText sets the IonText field's value. +func (s *ValueHolder) SetIonText(v string) *ValueHolder { + s.IonText = &v + return s +} + +const ( + // ExportStatusInProgress is a ExportStatus enum value + ExportStatusInProgress = "IN_PROGRESS" + + // ExportStatusCompleted is a ExportStatus enum value + ExportStatusCompleted = "COMPLETED" + + // ExportStatusCancelled is a ExportStatus enum value + ExportStatusCancelled = "CANCELLED" +) + +const ( + // LedgerStateCreating is a LedgerState enum value + LedgerStateCreating = "CREATING" + + // LedgerStateActive is a LedgerState enum value + LedgerStateActive = "ACTIVE" + + // LedgerStateDeleting is a LedgerState enum value + LedgerStateDeleting = "DELETING" + + // LedgerStateDeleted is a LedgerState enum value + LedgerStateDeleted = "DELETED" +) + +const ( + // PermissionsModeAllowAll is a PermissionsMode enum value + PermissionsModeAllowAll = "ALLOW_ALL" +) + +const ( + // S3ObjectEncryptionTypeSseKms is a S3ObjectEncryptionType enum value + S3ObjectEncryptionTypeSseKms = "SSE_KMS" + + // S3ObjectEncryptionTypeSseS3 is a S3ObjectEncryptionType enum value + S3ObjectEncryptionTypeSseS3 = "SSE_S3" + + // S3ObjectEncryptionTypeNoEncryption is a S3ObjectEncryptionType enum value + S3ObjectEncryptionTypeNoEncryption = "NO_ENCRYPTION" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/doc.go new file mode 100644 index 00000000000..41d3555afa5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/doc.go @@ -0,0 +1,28 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package qldb provides the client and types for making API +// requests to Amazon QLDB. +// +// The control plane for Amazon QLDB +// +// See https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02 for more information on this service. +// +// See qldb package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/qldb/ +// +// Using the Client +// +// To contact Amazon QLDB with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon QLDB client QLDB for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/qldb/#New +package qldb diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/errors.go new file mode 100644 index 00000000000..f6d45d03d47 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/errors.go @@ -0,0 +1,42 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package qldb + +const ( + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // One or more parameters in the request aren't valid. + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // You have reached the limit on the maximum number of resources allowed. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // The specified resource already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // The specified resource can't be modified at this time. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource doesn't exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeResourcePreconditionNotMetException for service response error code + // "ResourcePreconditionNotMetException". + // + // The operation failed because a condition wasn't satisfied in advance. + ErrCodeResourcePreconditionNotMetException = "ResourcePreconditionNotMetException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go new file mode 100644 index 00000000000..ea80bc02e1f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go @@ -0,0 +1,99 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package qldb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// QLDB provides the API operation methods for making requests to +// Amazon QLDB. See this package's package overview docs +// for details on the service. +// +// QLDB methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type QLDB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "QLDB" // Name of service. + EndpointsID = "qldb" // ID to lookup a service endpoint with. + ServiceID = "QLDB" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the QLDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a QLDB client from just a session. +// svc := qldb.New(mySession) +// +// // Create a QLDB client with additional configuration +// svc := qldb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *QLDB { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "qldb" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *QLDB { + svc := &QLDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-01-02", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a QLDB operation and runs any +// custom request initialization. +func (c *QLDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/quicksight/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/quicksight/api.go index e3ae95b7f14..9722c6d884d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/quicksight/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/quicksight/api.go @@ -54,7 +54,7 @@ func (c *QuickSight) CreateGroupRequest(input *CreateGroupInput) (req *request.R // // Creates an Amazon QuickSight group. // -// The permissions resource is arn:aws:quicksight:us-east-1::group/default/. +// The permissions resource is arn:aws:quicksight:us-east-1::group/default/ . // // The response is a group object. // @@ -169,7 +169,7 @@ func (c *QuickSight) CreateGroupMembershipRequest(input *CreateGroupMembershipIn // // Adds an Amazon QuickSight user to an Amazon QuickSight group. // -// The permissions resource is arn:aws:quicksight:us-east-1::group/default/. +// The permissions resource is arn:aws:quicksight:us-east-1::group/default/ . // // The condition resource is the user name. // @@ -282,7 +282,7 @@ func (c *QuickSight) DeleteGroupRequest(input *DeleteGroupInput) (req *request.R // // Removes a user group from Amazon QuickSight. // -// The permissions resource is arn:aws:quicksight:us-east-1::group/default/. +// The permissions resource is arn:aws:quicksight:us-east-1::group/default/ . // // CLI Sample: // @@ -390,7 +390,7 @@ func (c *QuickSight) DeleteGroupMembershipRequest(input *DeleteGroupMembershipIn // Removes a user from a group so that the user is no longer a member of the // group. // -// The permissions resource is arn:aws:quicksight:us-east-1::group/default/. +// The permissions resource is arn:aws:quicksight:us-east-1::group/default/ . // // The condition resource is the user name. // @@ -503,8 +503,7 @@ func (c *QuickSight) DeleteUserRequest(input *DeleteUserInput) (req *request.Req // the AWS Identity and Access Management (IAM) user or role that's making the // call. The IAM user isn't deleted as a result of this call. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/ -// . +// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . // // CLI Sample: // @@ -606,7 +605,14 @@ func (c *QuickSight) DeleteUserByPrincipalIdRequest(input *DeleteUserByPrincipal // DeleteUserByPrincipalId API operation for Amazon QuickSight. // -// Deletes a user after locating the user by its principal ID. +// Deletes a user identified by its principal ID. +// +// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . +// +// CLI Sample: +// +// aws quicksight delete-user-by-principal-id --aws-account-id=111122223333 +// --namespace=default --principal-id=ABCDEFJA26JLI7EUUOEHS // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -706,7 +712,7 @@ func (c *QuickSight) DescribeGroupRequest(input *DescribeGroupInput) (req *reque // Returns an Amazon QuickSight group's description and Amazon Resource Name // (ARN). // -// The permissions resource is arn:aws:quicksight:us-east-1::group/default/. +// The permissions resource is arn:aws:quicksight:us-east-1::group/default/ . // // The response is the group object. // @@ -815,7 +821,7 @@ func (c *QuickSight) DescribeUserRequest(input *DescribeUserInput) (req *request // // Returns information about a user, given the user name. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/. +// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . // // The response is a user object that contains the user's Amazon Resource Name // (ARN), AWS Identity and Access Management (IAM) role, and email address. @@ -922,7 +928,7 @@ func (c *QuickSight) GetDashboardEmbedUrlRequest(input *GetDashboardEmbedUrlInpu // // Generates a server-side embeddable URL and authorization code. Before this // can work properly, first you need to configure the dashboards and user permissions. -// For more information, see Embedding Amazon QuickSight Dashboards (https://docs.aws.amazon.com/en_us/quicksight/latest/user/embedding.html). +// For more information, see Embedding Amazon QuickSight Dashboards (https://docs.aws.amazon.com/en_us/quicksight/latest/user/embedding.html). // // Currently, you can use GetDashboardEmbedURL only from the server, not from // the user’s browser. @@ -1074,7 +1080,7 @@ func (c *QuickSight) ListGroupMembershipsRequest(input *ListGroupMembershipsInpu // // Lists member users in a group. // -// The permissions resource is arn:aws:quicksight:us-east-1::group/default/. +// The permissions resource is arn:aws:quicksight:us-east-1::group/default/ . // // The response is a list of group member objects. // @@ -1297,7 +1303,7 @@ func (c *QuickSight) ListUserGroupsRequest(input *ListUserGroupsInput) (req *req // Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member // of. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/. +// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . // // The response is a one or more group objects. // @@ -1403,7 +1409,7 @@ func (c *QuickSight) ListUsersRequest(input *ListUsersInput) (req *request.Reque // // Returns a list of all of the Amazon QuickSight users belonging to this account. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/*. +// The permission resource is arn:aws:quicksight:us-east-1::user/default/* . // // The response is a list of user objects, containing each user's Amazon Resource // Name (ARN), AWS Identity and Access Management (IAM) role, and email address. @@ -1514,7 +1520,7 @@ func (c *QuickSight) RegisterUserRequest(input *RegisterUserInput) (req *request // AWS Identity and Access Management (IAM) identity or role specified in the // request. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/. +// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . // // The condition resource is the Amazon Resource Name (ARN) for the IAM user // or role, and the session name. @@ -1632,7 +1638,7 @@ func (c *QuickSight) UpdateGroupRequest(input *UpdateGroupInput) (req *request.R // // Changes a group description. // -// The permissions resource is arn:aws:quicksight:us-east-1::group/default/. +// The permissions resource is arn:aws:quicksight:us-east-1::group/default/ . // // The response is a group object. // @@ -1741,7 +1747,7 @@ func (c *QuickSight) UpdateUserRequest(input *UpdateUserInput) (req *request.Req // // Updates an Amazon QuickSight user. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/. +// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . // // The response is a user object that contains the user's Amazon QuickSight // user name, email address, active or inactive status in Amazon QuickSight, @@ -2780,6 +2786,20 @@ type GetDashboardEmbedUrlInput struct { // Remove the undo/redo button on embedded dashboard. The default is FALSE, // which enables the undo/redo button. UndoRedoDisabled *bool `location:"querystring" locationName:"undo-redo-disabled" type:"boolean"` + + // The Amazon QuickSight user's ARN, for use with QUICKSIGHT identity type. + // You can use this for any of the following: + // + // * Amazon QuickSight users in your account (readers, authors, or admins) + // + // * AD users + // + // * Invited non-federated users + // + // * Federated IAM users + // + // * Federated IAM role-based sessions + UserArn *string `location:"querystring" locationName:"user-arn" type:"string"` } // String returns the string representation @@ -2856,6 +2876,12 @@ func (s *GetDashboardEmbedUrlInput) SetUndoRedoDisabled(v bool) *GetDashboardEmb return s } +// SetUserArn sets the UserArn field's value. +func (s *GetDashboardEmbedUrlInput) SetUserArn(v string) *GetDashboardEmbedUrlInput { + s.UserArn = &v + return s +} + type GetDashboardEmbedUrlOutput struct { _ struct{} `type:"structure"` @@ -3574,10 +3600,12 @@ type RegisterUserInput struct { // Namespace is a required field Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` - // The name of the session with the assumed IAM role. By using this parameter, - // you can register multiple users with the same IAM role, provided that each - // has a different session name. For more information on assuming IAM roles, - // see assume-role (https://docs.aws.amazon.com/cli/latest/reference/sts/assume-role.html) + // You need to use this parameter only when you register one or more users using + // an assumed IAM role. You don't need to provide the session name for other + // scenarios, for example when you are registering an IAM user or an Amazon + // QuickSight user. You can register multiple users using the same IAM role + // if each user has a different session name. For more information on assuming + // IAM roles, see assume-role (https://docs.aws.amazon.com/cli/latest/reference/sts/assume-role.html) // in the AWS CLI Reference. SessionName *string `min:"2" type:"string"` @@ -4032,9 +4060,9 @@ func (s *UpdateUserOutput) SetUser(v *User) *UpdateUserOutput { type User struct { _ struct{} `type:"structure"` - // Active status of user. When you create an Amazon QuickSight user that’s not - // an IAM user or an AD user, that user is inactive until they sign in and provide - // a password + // Active status of user. When you create an Amazon QuickSight user that’s + // not an IAM user or an AD user, that user is inactive until they sign in and + // provide a password Active *bool `type:"boolean"` // The Amazon Resource Name (ARN) for the user. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go index 3ba978ad6a5..28623f680ce 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go @@ -46,11 +46,11 @@ const ( // svc := quicksight.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *QuickSight { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *QuickSight { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *QuickSight { svc := &QuickSight{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-04-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/api.go index 0359d03b244..459ec84ca34 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/api.go @@ -90,6 +90,14 @@ func (c *RAM) AcceptResourceShareInvitationRequest(input *AcceptResourceShareInv // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // The service is not available. // +// * ErrCodeInvalidClientTokenException "InvalidClientTokenException" +// A client token is not valid. +// +// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" +// A client token input parameter was reused with an operation, but at least +// one of the other input parameters is different from the previous call to +// the operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ram-2018-01-04/AcceptResourceShareInvitation func (c *RAM) AcceptResourceShareInvitation(input *AcceptResourceShareInvitationInput) (*AcceptResourceShareInvitationOutput, error) { req, out := c.AcceptResourceShareInvitationRequest(input) @@ -307,6 +315,9 @@ func (c *RAM) CreateResourceShareRequest(input *CreateResourceShareInput) (req * // * ErrCodeResourceShareLimitExceededException "ResourceShareLimitExceededException" // The requested resource share exceeds the limit for your account. // +// * ErrCodeTagPolicyViolationException "TagPolicyViolationException" +// The specified tag is a reserved word and cannot be used. +// // * ErrCodeServerInternalException "ServerInternalException" // The service could not respond to the request due to an internal problem. // @@ -593,7 +604,9 @@ func (c *RAM) EnableSharingWithAwsOrganizationRequest(input *EnableSharingWithAw // EnableSharingWithAwsOrganization API operation for AWS Resource Access Manager. // -// Enables resource sharing within your organization. +// Enables resource sharing within your AWS Organization. +// +// The caller must be the master account for the AWS Organization. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -684,7 +697,7 @@ func (c *RAM) GetResourcePoliciesRequest(input *GetResourcePoliciesInput) (req * // GetResourcePolicies API operation for AWS Resource Access Manager. // -// Gets the policies for the specifies resources. +// Gets the policies for the specified resources that you own and have shared. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -742,7 +755,7 @@ func (c *RAM) GetResourcePoliciesWithContext(ctx aws.Context, input *GetResource // // Example iterating over at most 3 pages of a GetResourcePolicies operation. // pageNum := 0 // err := client.GetResourcePoliciesPages(params, -// func(page *GetResourcePoliciesOutput, lastPage bool) bool { +// func(page *ram.GetResourcePoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -774,10 +787,12 @@ func (c *RAM) GetResourcePoliciesPagesWithContext(ctx aws.Context, input *GetRes }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetResourcePoliciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetResourcePoliciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -831,7 +846,7 @@ func (c *RAM) GetResourceShareAssociationsRequest(input *GetResourceShareAssocia // GetResourceShareAssociations API operation for AWS Resource Access Manager. // -// Gets the associations for the specified resource share. +// Gets the resources or principals for the resource shares that you own. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -895,7 +910,7 @@ func (c *RAM) GetResourceShareAssociationsWithContext(ctx aws.Context, input *Ge // // Example iterating over at most 3 pages of a GetResourceShareAssociations operation. // pageNum := 0 // err := client.GetResourceShareAssociationsPages(params, -// func(page *GetResourceShareAssociationsOutput, lastPage bool) bool { +// func(page *ram.GetResourceShareAssociationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -927,10 +942,12 @@ func (c *RAM) GetResourceShareAssociationsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetResourceShareAssociationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetResourceShareAssociationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -984,7 +1001,7 @@ func (c *RAM) GetResourceShareInvitationsRequest(input *GetResourceShareInvitati // GetResourceShareInvitations API operation for AWS Resource Access Manager. // -// Gets the specified invitations for resource sharing. +// Gets the invitations for resource sharing that you've received. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1048,7 +1065,7 @@ func (c *RAM) GetResourceShareInvitationsWithContext(ctx aws.Context, input *Get // // Example iterating over at most 3 pages of a GetResourceShareInvitations operation. // pageNum := 0 // err := client.GetResourceShareInvitationsPages(params, -// func(page *GetResourceShareInvitationsOutput, lastPage bool) bool { +// func(page *ram.GetResourceShareInvitationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1080,10 +1097,12 @@ func (c *RAM) GetResourceShareInvitationsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetResourceShareInvitationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetResourceShareInvitationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1137,7 +1156,8 @@ func (c *RAM) GetResourceSharesRequest(input *GetResourceSharesInput) (req *requ // GetResourceShares API operation for AWS Resource Access Manager. // -// Gets the specified resource shares or all of your resource shares. +// Gets the resource shares that you own or the resource shares that are shared +// with you. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1198,7 +1218,7 @@ func (c *RAM) GetResourceSharesWithContext(ctx aws.Context, input *GetResourceSh // // Example iterating over at most 3 pages of a GetResourceShares operation. // pageNum := 0 // err := client.GetResourceSharesPages(params, -// func(page *GetResourceSharesOutput, lastPage bool) bool { +// func(page *ram.GetResourceSharesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1230,10 +1250,174 @@ func (c *RAM) GetResourceSharesPagesWithContext(ctx aws.Context, input *GetResou }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetResourceSharesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetResourceSharesOutput), !p.HasNextPage()) { + break + } } + + return p.Err() +} + +const opListPendingInvitationResources = "ListPendingInvitationResources" + +// ListPendingInvitationResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListPendingInvitationResources operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPendingInvitationResources for more information on using the ListPendingInvitationResources +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPendingInvitationResourcesRequest method. +// req, resp := client.ListPendingInvitationResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ram-2018-01-04/ListPendingInvitationResources +func (c *RAM) ListPendingInvitationResourcesRequest(input *ListPendingInvitationResourcesInput) (req *request.Request, output *ListPendingInvitationResourcesOutput) { + op := &request.Operation{ + Name: opListPendingInvitationResources, + HTTPMethod: "POST", + HTTPPath: "/listpendinginvitationresources", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPendingInvitationResourcesInput{} + } + + output = &ListPendingInvitationResourcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPendingInvitationResources API operation for AWS Resource Access Manager. +// +// Lists the resources in a resource share that is shared with you but that +// the invitation is still pending for. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Resource Access Manager's +// API operation ListPendingInvitationResources for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedArnException "MalformedArnException" +// The format of an Amazon Resource Name (ARN) is not valid. +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// The specified value for NextToken is not valid. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// A parameter is not valid. +// +// * ErrCodeServerInternalException "ServerInternalException" +// The service could not respond to the request due to an internal problem. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// The service is not available. +// +// * ErrCodeResourceShareInvitationArnNotFoundException "ResourceShareInvitationArnNotFoundException" +// The Amazon Resource Name (ARN) for an invitation was not found. +// +// * ErrCodeMissingRequiredParameterException "MissingRequiredParameterException" +// A required input parameter is missing. +// +// * ErrCodeResourceShareInvitationAlreadyRejectedException "ResourceShareInvitationAlreadyRejectedException" +// The invitation was already rejected. +// +// * ErrCodeResourceShareInvitationExpiredException "ResourceShareInvitationExpiredException" +// The invitation is expired. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ram-2018-01-04/ListPendingInvitationResources +func (c *RAM) ListPendingInvitationResources(input *ListPendingInvitationResourcesInput) (*ListPendingInvitationResourcesOutput, error) { + req, out := c.ListPendingInvitationResourcesRequest(input) + return out, req.Send() +} + +// ListPendingInvitationResourcesWithContext is the same as ListPendingInvitationResources with the addition of +// the ability to pass a context and additional request options. +// +// See ListPendingInvitationResources for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RAM) ListPendingInvitationResourcesWithContext(ctx aws.Context, input *ListPendingInvitationResourcesInput, opts ...request.Option) (*ListPendingInvitationResourcesOutput, error) { + req, out := c.ListPendingInvitationResourcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPendingInvitationResourcesPages iterates over the pages of a ListPendingInvitationResources operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPendingInvitationResources method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPendingInvitationResources operation. +// pageNum := 0 +// err := client.ListPendingInvitationResourcesPages(params, +// func(page *ram.ListPendingInvitationResourcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RAM) ListPendingInvitationResourcesPages(input *ListPendingInvitationResourcesInput, fn func(*ListPendingInvitationResourcesOutput, bool) bool) error { + return c.ListPendingInvitationResourcesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPendingInvitationResourcesPagesWithContext same as ListPendingInvitationResourcesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RAM) ListPendingInvitationResourcesPagesWithContext(ctx aws.Context, input *ListPendingInvitationResourcesInput, fn func(*ListPendingInvitationResourcesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPendingInvitationResourcesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPendingInvitationResourcesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPendingInvitationResourcesOutput), !p.HasNextPage()) { + break + } + } + return p.Err() } @@ -1287,7 +1471,8 @@ func (c *RAM) ListPrincipalsRequest(input *ListPrincipalsInput) (req *request.Re // ListPrincipals API operation for AWS Resource Access Manager. // -// Lists the principals with access to the specified resource. +// Lists the principals that you have shared resources with or the principals +// that have shared resources with you. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1348,7 +1533,7 @@ func (c *RAM) ListPrincipalsWithContext(ctx aws.Context, input *ListPrincipalsIn // // Example iterating over at most 3 pages of a ListPrincipals operation. // pageNum := 0 // err := client.ListPrincipalsPages(params, -// func(page *ListPrincipalsOutput, lastPage bool) bool { +// func(page *ram.ListPrincipalsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1380,10 +1565,12 @@ func (c *RAM) ListPrincipalsPagesWithContext(ctx aws.Context, input *ListPrincip }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPrincipalsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPrincipalsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1437,7 +1624,8 @@ func (c *RAM) ListResourcesRequest(input *ListResourcesInput) (req *request.Requ // ListResources API operation for AWS Resource Access Manager. // -// Lists the resources that the specified principal can access. +// Lists the resources that you added to a resource shares or the resources +// that are shared with you. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1501,7 +1689,7 @@ func (c *RAM) ListResourcesWithContext(ctx aws.Context, input *ListResourcesInpu // // Example iterating over at most 3 pages of a ListResources operation. // pageNum := 0 // err := client.ListResourcesPages(params, -// func(page *ListResourcesOutput, lastPage bool) bool { +// func(page *ram.ListResourcesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1533,10 +1721,12 @@ func (c *RAM) ListResourcesPagesWithContext(ctx aws.Context, input *ListResource }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListResourcesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListResourcesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1618,6 +1808,14 @@ func (c *RAM) RejectResourceShareInvitationRequest(input *RejectResourceShareInv // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // The service is not available. // +// * ErrCodeInvalidClientTokenException "InvalidClientTokenException" +// A client token is not valid. +// +// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" +// A client token input parameter was reused with an operation, but at least +// one of the other input parameters is different from the previous call to +// the operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ram-2018-01-04/RejectResourceShareInvitation func (c *RAM) RejectResourceShareInvitation(input *RejectResourceShareInvitationInput) (*RejectResourceShareInvitationOutput, error) { req, out := c.RejectResourceShareInvitationRequest(input) @@ -1685,7 +1883,7 @@ func (c *RAM) TagResourceRequest(input *TagResourceInput) (req *request.Request, // TagResource API operation for AWS Resource Access Manager. // -// Adds the specified tags to the specified resource share. +// Adds the specified tags to the specified resource share that you own. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1707,6 +1905,9 @@ func (c *RAM) TagResourceRequest(input *TagResourceInput) (req *request.Request, // * ErrCodeResourceArnNotFoundException "ResourceArnNotFoundException" // An Amazon Resource Name (ARN) was not found. // +// * ErrCodeTagPolicyViolationException "TagPolicyViolationException" +// The specified tag is a reserved word and cannot be used. +// // * ErrCodeServerInternalException "ServerInternalException" // The service could not respond to the request due to an internal problem. // @@ -1780,7 +1981,7 @@ func (c *RAM) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ // UntagResource API operation for AWS Resource Access Manager. // -// Removes the specified tags from the specified resource share. +// Removes the specified tags from the specified resource share that you own. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1865,7 +2066,7 @@ func (c *RAM) UpdateResourceShareRequest(input *UpdateResourceShareInput) (req * // UpdateResourceShare API operation for AWS Resource Access Manager. // -// Updates the specified resource share. +// Updates the specified resource share that you own. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2109,7 +2310,7 @@ func (s *AssociateResourceShareOutput) SetResourceShareAssociations(v []*Resourc type CreateResourceShareInput struct { _ struct{} `type:"structure"` - // Indicates whether principals outside your organization can be associated + // Indicates whether principals outside your AWS organization can be associated // with a resource share. AllowExternalPrincipals *bool `locationName:"allowExternalPrincipals" type:"boolean"` @@ -2548,7 +2749,7 @@ func (s *GetResourcePoliciesOutput) SetPolicies(v []*string) *GetResourcePolicie type GetResourceShareAssociationsInput struct { _ struct{} `type:"structure"` - // The status of the association. + // The association status. AssociationStatus *string `locationName:"associationStatus" type:"string" enum:"ResourceShareAssociationStatus"` // The association type. @@ -2563,10 +2764,12 @@ type GetResourceShareAssociationsInput struct { // The token for the next page of results. NextToken *string `locationName:"nextToken" type:"string"` - // The principal. + // The principal. You cannot specify this parameter if the association type + // is RESOURCE. Principal *string `locationName:"principal" type:"string"` - // The Amazon Resource Name (ARN) of the resource. + // The Amazon Resource Name (ARN) of the resource. You cannot specify this parameter + // if the association type is PRINCIPAL. ResourceArn *string `locationName:"resourceArn" type:"string"` // The Amazon Resource Names (ARN) of the resource shares. @@ -2648,7 +2851,7 @@ type GetResourceShareAssociationsOutput struct { // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // Information about the association. + // Information about the associations. ResourceShareAssociations []*ResourceShareAssociation `locationName:"resourceShareAssociations" type:"list"` } @@ -2900,6 +3103,99 @@ func (s *GetResourceSharesOutput) SetResourceShares(v []*ResourceShare) *GetReso return s } +type ListPendingInvitationResourcesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Amazon Resource Name (ARN) of the invitation. + // + // ResourceShareInvitationArn is a required field + ResourceShareInvitationArn *string `locationName:"resourceShareInvitationArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPendingInvitationResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPendingInvitationResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPendingInvitationResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPendingInvitationResourcesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ResourceShareInvitationArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceShareInvitationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListPendingInvitationResourcesInput) SetMaxResults(v int64) *ListPendingInvitationResourcesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPendingInvitationResourcesInput) SetNextToken(v string) *ListPendingInvitationResourcesInput { + s.NextToken = &v + return s +} + +// SetResourceShareInvitationArn sets the ResourceShareInvitationArn field's value. +func (s *ListPendingInvitationResourcesInput) SetResourceShareInvitationArn(v string) *ListPendingInvitationResourcesInput { + s.ResourceShareInvitationArn = &v + return s +} + +type ListPendingInvitationResourcesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the resources included the resource share. + Resources []*Resource `locationName:"resources" type:"list"` +} + +// String returns the string representation +func (s ListPendingInvitationResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPendingInvitationResourcesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPendingInvitationResourcesOutput) SetNextToken(v string) *ListPendingInvitationResourcesOutput { + s.NextToken = &v + return s +} + +// SetResources sets the Resources field's value. +func (s *ListPendingInvitationResourcesOutput) SetResources(v []*Resource) *ListPendingInvitationResourcesOutput { + s.Resources = v + return s +} + type ListPrincipalsInput struct { _ struct{} `type:"structure"` @@ -2925,6 +3221,9 @@ type ListPrincipalsInput struct { ResourceShareArns []*string `locationName:"resourceShareArns" type:"list"` // The resource type. + // + // Valid values: route53resolver:ResolverRule | ec2:TransitGateway | ec2:Subnet + // | license-manager:LicenseConfiguration ResourceType *string `locationName:"resourceType" type:"string"` } @@ -3054,6 +3353,9 @@ type ListResourcesInput struct { ResourceShareArns []*string `locationName:"resourceShareArns" type:"list"` // The resource type. + // + // Valid values: route53resolver:ResolverRule | ec2:TransitGateway | ec2:Subnet + // | license-manager:LicenseConfiguration ResourceType *string `locationName:"resourceType" type:"string"` } @@ -3165,8 +3467,8 @@ type Principal struct { // The time when the principal was associated with the resource share. CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` - // Indicates whether the principal belongs to the same organization as the AWS - // account that owns the resource share. + // Indicates whether the principal belongs to the same AWS organization as the + // AWS account that owns the resource share. External *bool `locationName:"external" type:"boolean"` // The ID of the principal. @@ -3382,7 +3684,7 @@ func (s *Resource) SetType(v string) *Resource { type ResourceShare struct { _ struct{} `type:"structure"` - // Indicates whether principals outside your organization can be associated + // Indicates whether principals outside your AWS organization can be associated // with a resource share. AllowExternalPrincipals *bool `locationName:"allowExternalPrincipals" type:"boolean"` @@ -3490,8 +3792,8 @@ type ResourceShareAssociation struct { // The time when the association was created. CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` - // Indicates whether the principal belongs to the same organization as the AWS - // account that owns the resource share. + // Indicates whether the principal belongs to the same AWS organization as the + // AWS account that owns the resource share. External *bool `locationName:"external" type:"boolean"` // The time when the association was last updated. @@ -3500,6 +3802,9 @@ type ResourceShareAssociation struct { // The Amazon Resource Name (ARN) of the resource share. ResourceShareArn *string `locationName:"resourceShareArn" type:"string"` + // The name of the resource share. + ResourceShareName *string `locationName:"resourceShareName" type:"string"` + // The status of the association. Status *string `locationName:"status" type:"string" enum:"ResourceShareAssociationStatus"` @@ -3553,6 +3858,12 @@ func (s *ResourceShareAssociation) SetResourceShareArn(v string) *ResourceShareA return s } +// SetResourceShareName sets the ResourceShareName field's value. +func (s *ResourceShareAssociation) SetResourceShareName(v string) *ResourceShareAssociation { + s.ResourceShareName = &v + return s +} + // SetStatus sets the Status field's value. func (s *ResourceShareAssociation) SetStatus(v string) *ResourceShareAssociation { s.Status = &v @@ -3578,8 +3889,11 @@ type ResourceShareInvitation struct { // The Amazon Resource Name (ARN) of the resource share. ResourceShareArn *string `locationName:"resourceShareArn" type:"string"` - // The resources associated with the resource share. - ResourceShareAssociations []*ResourceShareAssociation `locationName:"resourceShareAssociations" type:"list"` + // To view the resources associated with a pending resource share invitation, + // use ListPendingInvitationResources (https://docs.aws.amazon.com/ram/latest/APIReference/API_ListPendingInvitationResources.html). + // + // Deprecated: This member has been deprecated. Use ListPendingInvitationResources. + ResourceShareAssociations []*ResourceShareAssociation `locationName:"resourceShareAssociations" deprecated:"true" type:"list"` // The Amazon Resource Name (ARN) of the invitation. ResourceShareInvitationArn *string `locationName:"resourceShareInvitationArn" type:"string"` @@ -3853,7 +4167,7 @@ func (s UntagResourceOutput) GoString() string { type UpdateResourceShareInput struct { _ struct{} `type:"structure"` - // Indicates whether principals outside your organization can be associated + // Indicates whether principals outside your AWS organization can be associated // with a resource share. AllowExternalPrincipals *bool `locationName:"allowExternalPrincipals" type:"boolean"` @@ -4026,4 +4340,7 @@ const ( // ResourceStatusUnavailable is a ResourceStatus enum value ResourceStatusUnavailable = "UNAVAILABLE" + + // ResourceStatusPending is a ResourceStatus enum value + ResourceStatusPending = "PENDING" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/doc.go index 234b180b8b9..59569af3d4a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/doc.go @@ -5,21 +5,12 @@ // // Use AWS Resource Access Manager to share AWS resources between AWS accounts. // To share a resource, you create a resource share, associate the resource -// with the resource share, and specify the principals that can access the resource. -// The following principals are supported: +// with the resource share, and specify the principals that can access the resources +// associated with the resource share. The following principals are supported: +// AWS accounts, organizational units (OU) from AWS Organizations, and organizations +// from AWS Organizations. // -// * The ID of an AWS account -// -// * The Amazon Resource Name (ARN) of an OU from AWS Organizations -// -// * The Amazon Resource Name (ARN) of an organization from AWS Organizations -// -// If you specify an AWS account that doesn't exist in the same organization -// as the account that owns the resource share, the owner of the specified account -// receives an invitation to accept the resource share. After the owner accepts -// the invitation, they can access the resources in the resource share. An administrator -// of the specified account can use IAM policies to restrict access resources -// in the resource share. +// For more information, see the AWS Resource Access Manager User Guide (https://docs.aws.amazon.com/ram/latest/userguide/). // // See https://docs.aws.amazon.com/goto/WebAPI/ram-2018-01-04 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/errors.go index e0f0af48c15..147a3a7d0bd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/errors.go @@ -120,6 +120,12 @@ const ( // The requested tags exceed the limit for your account. ErrCodeTagLimitExceededException = "TagLimitExceededException" + // ErrCodeTagPolicyViolationException for service response error code + // "TagPolicyViolationException". + // + // The specified tag is a reserved word and cannot be used. + ErrCodeTagPolicyViolationException = "TagPolicyViolationException" + // ErrCodeUnknownResourceException for service response error code // "UnknownResourceException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/service.go index e1e1dc6683e..cf09e5ce963 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ram/service.go @@ -46,11 +46,11 @@ const ( // svc := ram.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *RAM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *RAM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *RAM { svc := &RAM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-04", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index eff5f448a54..f34121026ad 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -534,8 +534,8 @@ func (c *RDS) AuthorizeDBSecurityGroupIngressRequest(input *AuthorizeDBSecurityG // The state of the DB security group doesn't allow deletion. // // * ErrCodeAuthorizationAlreadyExistsFault "AuthorizationAlreadyExists" -// The specified CIDRIP or Amazon EC2 security group is already authorized for -// the specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group is already authorized +// for the specified DB security group. // // * ErrCodeAuthorizationQuotaExceededFault "AuthorizationQuotaExceeded" // The DB security group authorization quota has been reached. @@ -608,7 +608,7 @@ func (c *RDS) BacktrackDBClusterRequest(input *BacktrackDBClusterInput) (req *re // // Backtracks a DB cluster to a specific time, without creating a new DB cluster. // -// For more information on backtracking, see Backtracking an Aurora DB Cluster +// For more information on backtracking, see Backtracking an Aurora DB Cluster // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Managing.Backtrack.html) // in the Amazon Aurora User Guide. // @@ -802,29 +802,26 @@ func (c *RDS) CopyDBClusterSnapshotRequest(input *CopyDBClusterSnapshotInput) (r // where the DB cluster snapshot is copied from. The pre-signed URL must // be a valid request for the CopyDBClusterSnapshot API action that can be // executed in the source AWS Region that contains the encrypted DB cluster -// snapshot to be copied. -// -// The pre-signed URL request must contain the following parameter values: -// -// KmsKeyId - The KMS key identifier for the key to use to encrypt the copy -// of the DB cluster snapshot in the destination AWS Region. This is the -// same identifier for both the CopyDBClusterSnapshot action that is called -// in the destination AWS Region, and the action contained in the pre-signed -// URL. -// -// DestinationRegion - The name of the AWS Region that the DB cluster snapshot -// will be created in. -// -// SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for -// the encrypted DB cluster snapshot to be copied. This identifier must be -// in the Amazon Resource Name (ARN) format for the source AWS Region. For -// example, if you are copying an encrypted DB cluster snapshot from the -// us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks -// like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115. -// -// To learn how to generate a Signature Version 4 signed request, see Authenticating +// snapshot to be copied. The pre-signed URL request must contain the following +// parameter values: KmsKeyId - The KMS key identifier for the key to use +// to encrypt the copy of the DB cluster snapshot in the destination AWS +// Region. This is the same identifier for both the CopyDBClusterSnapshot +// action that is called in the destination AWS Region, and the action contained +// in the pre-signed URL. DestinationRegion - The name of the AWS Region +// that the DB cluster snapshot will be created in. SourceDBClusterSnapshotIdentifier +// - The DB cluster snapshot identifier for the encrypted DB cluster snapshot +// to be copied. This identifier must be in the Amazon Resource Name (ARN) +// format for the source AWS Region. For example, if you are copying an encrypted +// DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier +// looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115. +// To learn how to generate a Signature Version 4 signed request, see Authenticating // Requests: Using Query Parameters (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) -// and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion +// (or --source-region for the AWS CLI) instead of specifying PreSignedUrl +// manually. Specifying SourceRegion autogenerates a pre-signed URL that +// is a valid request for the operation that can be executed in the source +// AWS Region. // // * TargetDBClusterSnapshotIdentifier - The identifier for the new copy // of the DB cluster snapshot in the destination AWS Region. @@ -839,10 +836,10 @@ func (c *RDS) CopyDBClusterSnapshotRequest(input *CopyDBClusterSnapshotInput) (r // DB cluster snapshot is in "copying" status. // // For more information on copying encrypted DB cluster snapshots from one AWS -// Region to another, see Copying a Snapshot (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_CopySnapshot.html) +// Region to another, see Copying a Snapshot (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_CopySnapshot.html) // in the Amazon Aurora User Guide. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -1165,6 +1162,98 @@ func (c *RDS) CopyOptionGroupWithContext(ctx aws.Context, input *CopyOptionGroup return out, req.Send() } +const opCreateCustomAvailabilityZone = "CreateCustomAvailabilityZone" + +// CreateCustomAvailabilityZoneRequest generates a "aws/request.Request" representing the +// client's request for the CreateCustomAvailabilityZone operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCustomAvailabilityZone for more information on using the CreateCustomAvailabilityZone +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCustomAvailabilityZoneRequest method. +// req, resp := client.CreateCustomAvailabilityZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateCustomAvailabilityZone +func (c *RDS) CreateCustomAvailabilityZoneRequest(input *CreateCustomAvailabilityZoneInput) (req *request.Request, output *CreateCustomAvailabilityZoneOutput) { + op := &request.Operation{ + Name: opCreateCustomAvailabilityZone, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomAvailabilityZoneInput{} + } + + output = &CreateCustomAvailabilityZoneOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCustomAvailabilityZone API operation for Amazon Relational Database Service. +// +// Creates a custom Availability Zone (AZ). +// +// A custom AZ is an on-premises AZ that is integrated with a VMware vSphere +// cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation CreateCustomAvailabilityZone for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneAlreadyExistsFault "CustomAvailabilityZoneAlreadyExists" +// CustomAvailabilityZoneName is already used by an existing custom Availability +// Zone. +// +// * ErrCodeCustomAvailabilityZoneQuotaExceededFault "CustomAvailabilityZoneQuotaExceeded" +// You have exceeded the maximum number of custom Availability Zones. +// +// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" +// An error occurred accessing an AWS KMS key. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateCustomAvailabilityZone +func (c *RDS) CreateCustomAvailabilityZone(input *CreateCustomAvailabilityZoneInput) (*CreateCustomAvailabilityZoneOutput, error) { + req, out := c.CreateCustomAvailabilityZoneRequest(input) + return out, req.Send() +} + +// CreateCustomAvailabilityZoneWithContext is the same as CreateCustomAvailabilityZone with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCustomAvailabilityZone for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) CreateCustomAvailabilityZoneWithContext(ctx aws.Context, input *CreateCustomAvailabilityZoneInput, opts ...request.Option) (*CreateCustomAvailabilityZoneOutput, error) { + req, out := c.CreateCustomAvailabilityZoneRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateDBCluster = "CreateDBCluster" // CreateDBClusterRequest generates a "aws/request.Request" representing the @@ -1216,7 +1305,7 @@ func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request. // For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier // is encrypted, you must also specify the PreSignedUrl parameter. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -1286,6 +1375,9 @@ func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request. // // * ErrCodeInvalidGlobalClusterStateFault "InvalidGlobalClusterStateFault" // +// * ErrCodeDomainNotFoundFault "DomainNotFoundFault" +// Domain doesn't refer to an existing Active Directory domain. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateDBCluster func (c *RDS) CreateDBCluster(input *CreateDBClusterInput) (*CreateDBClusterOutput, error) { req, out := c.CreateDBClusterRequest(input) @@ -1472,10 +1564,10 @@ func (c *RDS) CreateDBClusterParameterGroupRequest(input *CreateDBClusterParamet // character set for the default database defined by the character_set_database // parameter. You can use the Parameter Groups option of the Amazon RDS console // (https://console.aws.amazon.com/rds/) or the DescribeDBClusterParameters -// command to verify that your DB cluster parameter group has been created or +// action to verify that your DB cluster parameter group has been created or // modified. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -1562,7 +1654,7 @@ func (c *RDS) CreateDBClusterSnapshotRequest(input *CreateDBClusterSnapshotInput // CreateDBClusterSnapshot API operation for Amazon Relational Database Service. // // Creates a snapshot of a DB cluster. For more information on Amazon Aurora, -// see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -1717,11 +1809,11 @@ func (c *RDS) CreateDBInstanceRequest(input *CreateDBInstanceInput) (req *reques // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -1881,6 +1973,9 @@ func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadRepl // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. // +// * ErrCodeDomainNotFoundFault "DomainNotFoundFault" +// Domain doesn't refer to an existing Active Directory domain. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateDBInstanceReadReplica func (c *RDS) CreateDBInstanceReadReplica(input *CreateDBInstanceReadReplicaInput) (*CreateDBInstanceReadReplicaOutput, error) { req, out := c.CreateDBInstanceReadReplicaRequest(input) @@ -2439,6 +2534,7 @@ func (c *RDS) CreateGlobalClusterRequest(input *CreateGlobalClusterInput) (req * // CreateGlobalCluster API operation for Amazon Relational Database Service. // +// // Creates an Aurora global database spread across multiple regions. The global // database contains a single primary cluster with read-write capability, and // a read-only secondary cluster that receives data from the primary cluster @@ -2573,6 +2669,95 @@ func (c *RDS) CreateOptionGroupWithContext(ctx aws.Context, input *CreateOptionG return out, req.Send() } +const opDeleteCustomAvailabilityZone = "DeleteCustomAvailabilityZone" + +// DeleteCustomAvailabilityZoneRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCustomAvailabilityZone operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCustomAvailabilityZone for more information on using the DeleteCustomAvailabilityZone +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCustomAvailabilityZoneRequest method. +// req, resp := client.DeleteCustomAvailabilityZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteCustomAvailabilityZone +func (c *RDS) DeleteCustomAvailabilityZoneRequest(input *DeleteCustomAvailabilityZoneInput) (req *request.Request, output *DeleteCustomAvailabilityZoneOutput) { + op := &request.Operation{ + Name: opDeleteCustomAvailabilityZone, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomAvailabilityZoneInput{} + } + + output = &DeleteCustomAvailabilityZoneOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteCustomAvailabilityZone API operation for Amazon Relational Database Service. +// +// Deletes a custom Availability Zone (AZ). +// +// A custom AZ is an on-premises AZ that is integrated with a VMware vSphere +// cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DeleteCustomAvailabilityZone for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneNotFoundFault "CustomAvailabilityZoneNotFound" +// CustomAvailabilityZoneId doesn't refer to an existing custom Availability +// Zone identifier. +// +// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" +// An error occurred accessing an AWS KMS key. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteCustomAvailabilityZone +func (c *RDS) DeleteCustomAvailabilityZone(input *DeleteCustomAvailabilityZoneInput) (*DeleteCustomAvailabilityZoneOutput, error) { + req, out := c.DeleteCustomAvailabilityZoneRequest(input) + return out, req.Send() +} + +// DeleteCustomAvailabilityZoneWithContext is the same as DeleteCustomAvailabilityZone with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCustomAvailabilityZone for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DeleteCustomAvailabilityZoneWithContext(ctx aws.Context, input *DeleteCustomAvailabilityZoneInput, opts ...request.Option) (*DeleteCustomAvailabilityZoneOutput, error) { + req, out := c.DeleteCustomAvailabilityZoneRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteDBCluster = "DeleteDBCluster" // DeleteDBClusterRequest generates a "aws/request.Request" representing the @@ -2622,7 +2807,8 @@ func (c *RDS) DeleteDBClusterRequest(input *DeleteDBClusterInput) (req *request. // and can't be recovered. Manual DB cluster snapshots of the specified DB cluster // are not deleted. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)in the Amazon Aurora User Guide. +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. // @@ -2807,7 +2993,7 @@ func (c *RDS) DeleteDBClusterParameterGroupRequest(input *DeleteDBClusterParamet // Deletes a specified DB cluster parameter group. The DB cluster parameter // group to be deleted can't be associated with any DB clusters. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -2899,7 +3085,7 @@ func (c *RDS) DeleteDBClusterSnapshotRequest(input *DeleteDBClusterSnapshotInput // // The DB cluster snapshot must be in the available state to be deleted. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -2996,7 +3182,7 @@ func (c *RDS) DeleteDBInstanceRequest(input *DeleteDBInstanceInput) (req *reques // // Note that when a DB instance is in a failure state and has a status of failed, // incompatible-restore, or incompatible-network, you can only delete it when -// the SkipFinalSnapshot parameter is set to true. +// you skip creation of the final snapshot with the SkipFinalSnapshot parameter. // // If the specified DB instance is part of an Amazon Aurora DB cluster, you // can't delete the DB instance if both of the following conditions are true: @@ -3654,6 +3840,86 @@ func (c *RDS) DeleteGlobalClusterWithContext(ctx aws.Context, input *DeleteGloba return out, req.Send() } +const opDeleteInstallationMedia = "DeleteInstallationMedia" + +// DeleteInstallationMediaRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInstallationMedia operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInstallationMedia for more information on using the DeleteInstallationMedia +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteInstallationMediaRequest method. +// req, resp := client.DeleteInstallationMediaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteInstallationMedia +func (c *RDS) DeleteInstallationMediaRequest(input *DeleteInstallationMediaInput) (req *request.Request, output *DeleteInstallationMediaOutput) { + op := &request.Operation{ + Name: opDeleteInstallationMedia, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstallationMediaInput{} + } + + output = &DeleteInstallationMediaOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteInstallationMedia API operation for Amazon Relational Database Service. +// +// Deletes the installation medium for a DB engine that requires an on-premises +// customer provided license, such as Microsoft SQL Server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DeleteInstallationMedia for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInstallationMediaNotFoundFault "InstallationMediaNotFound" +// InstallationMediaID doesn't refer to an existing installation medium. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteInstallationMedia +func (c *RDS) DeleteInstallationMedia(input *DeleteInstallationMediaInput) (*DeleteInstallationMediaOutput, error) { + req, out := c.DeleteInstallationMediaRequest(input) + return out, req.Send() +} + +// DeleteInstallationMediaWithContext is the same as DeleteInstallationMedia with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInstallationMedia for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DeleteInstallationMediaWithContext(ctx aws.Context, input *DeleteInstallationMediaInput, opts ...request.Option) (*DeleteInstallationMediaOutput, error) { + req, out := c.DeleteInstallationMediaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteOptionGroup = "DeleteOptionGroup" // DeleteOptionGroupRequest generates a "aws/request.Request" representing the @@ -3895,6 +4161,150 @@ func (c *RDS) DescribeCertificatesWithContext(ctx aws.Context, input *DescribeCe return out, req.Send() } +const opDescribeCustomAvailabilityZones = "DescribeCustomAvailabilityZones" + +// DescribeCustomAvailabilityZonesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCustomAvailabilityZones operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCustomAvailabilityZones for more information on using the DescribeCustomAvailabilityZones +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCustomAvailabilityZonesRequest method. +// req, resp := client.DescribeCustomAvailabilityZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeCustomAvailabilityZones +func (c *RDS) DescribeCustomAvailabilityZonesRequest(input *DescribeCustomAvailabilityZonesInput) (req *request.Request, output *DescribeCustomAvailabilityZonesOutput) { + op := &request.Operation{ + Name: opDescribeCustomAvailabilityZones, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCustomAvailabilityZonesInput{} + } + + output = &DescribeCustomAvailabilityZonesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCustomAvailabilityZones API operation for Amazon Relational Database Service. +// +// Returns information about custom Availability Zones (AZs). +// +// A custom AZ is an on-premises AZ that is integrated with a VMware vSphere +// cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DescribeCustomAvailabilityZones for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneNotFoundFault "CustomAvailabilityZoneNotFound" +// CustomAvailabilityZoneId doesn't refer to an existing custom Availability +// Zone identifier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeCustomAvailabilityZones +func (c *RDS) DescribeCustomAvailabilityZones(input *DescribeCustomAvailabilityZonesInput) (*DescribeCustomAvailabilityZonesOutput, error) { + req, out := c.DescribeCustomAvailabilityZonesRequest(input) + return out, req.Send() +} + +// DescribeCustomAvailabilityZonesWithContext is the same as DescribeCustomAvailabilityZones with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCustomAvailabilityZones for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeCustomAvailabilityZonesWithContext(ctx aws.Context, input *DescribeCustomAvailabilityZonesInput, opts ...request.Option) (*DescribeCustomAvailabilityZonesOutput, error) { + req, out := c.DescribeCustomAvailabilityZonesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeCustomAvailabilityZonesPages iterates over the pages of a DescribeCustomAvailabilityZones operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCustomAvailabilityZones method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCustomAvailabilityZones operation. +// pageNum := 0 +// err := client.DescribeCustomAvailabilityZonesPages(params, +// func(page *rds.DescribeCustomAvailabilityZonesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeCustomAvailabilityZonesPages(input *DescribeCustomAvailabilityZonesInput, fn func(*DescribeCustomAvailabilityZonesOutput, bool) bool) error { + return c.DescribeCustomAvailabilityZonesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCustomAvailabilityZonesPagesWithContext same as DescribeCustomAvailabilityZonesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeCustomAvailabilityZonesPagesWithContext(ctx aws.Context, input *DescribeCustomAvailabilityZonesInput, fn func(*DescribeCustomAvailabilityZonesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCustomAvailabilityZonesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCustomAvailabilityZonesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCustomAvailabilityZonesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusterBacktracks = "DescribeDBClusterBacktracks" // DescribeDBClusterBacktracksRequest generates a "aws/request.Request" representing the @@ -3941,7 +4351,7 @@ func (c *RDS) DescribeDBClusterBacktracksRequest(input *DescribeDBClusterBacktra // // Returns information about backtracks for a DB cluster. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -4111,7 +4521,7 @@ func (c *RDS) DescribeDBClusterParameterGroupsRequest(input *DescribeDBClusterPa // parameter is specified, the list will contain only the description of the // specified DB cluster parameter group. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -4196,7 +4606,7 @@ func (c *RDS) DescribeDBClusterParametersRequest(input *DescribeDBClusterParamet // Returns the detailed parameter list for a particular DB cluster parameter // group. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -4373,7 +4783,7 @@ func (c *RDS) DescribeDBClusterSnapshotsRequest(input *DescribeDBClusterSnapshot // Returns information about DB cluster snapshots. This API action supports // pagination. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -4464,7 +4874,7 @@ func (c *RDS) DescribeDBClustersRequest(input *DescribeDBClustersInput) (req *re // Returns information about provisioned Aurora DB clusters. This API supports // pagination. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -4513,7 +4923,7 @@ func (c *RDS) DescribeDBClustersWithContext(ctx aws.Context, input *DescribeDBCl // // Example iterating over at most 3 pages of a DescribeDBClusters operation. // pageNum := 0 // err := client.DescribeDBClustersPages(params, -// func(page *DescribeDBClustersOutput, lastPage bool) bool { +// func(page *rds.DescribeDBClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4545,10 +4955,12 @@ func (c *RDS) DescribeDBClustersPagesWithContext(ctx aws.Context, input *Describ }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBClustersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBClustersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4643,7 +5055,7 @@ func (c *RDS) DescribeDBEngineVersionsWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeDBEngineVersions operation. // pageNum := 0 // err := client.DescribeDBEngineVersionsPages(params, -// func(page *DescribeDBEngineVersionsOutput, lastPage bool) bool { +// func(page *rds.DescribeDBEngineVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4675,10 +5087,12 @@ func (c *RDS) DescribeDBEngineVersionsPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBEngineVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBEngineVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4784,7 +5198,7 @@ func (c *RDS) DescribeDBInstanceAutomatedBackupsWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a DescribeDBInstanceAutomatedBackups operation. // pageNum := 0 // err := client.DescribeDBInstanceAutomatedBackupsPages(params, -// func(page *DescribeDBInstanceAutomatedBackupsOutput, lastPage bool) bool { +// func(page *rds.DescribeDBInstanceAutomatedBackupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4816,10 +5230,12 @@ func (c *RDS) DescribeDBInstanceAutomatedBackupsPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBInstanceAutomatedBackupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBInstanceAutomatedBackupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4919,7 +5335,7 @@ func (c *RDS) DescribeDBInstancesWithContext(ctx aws.Context, input *DescribeDBI // // Example iterating over at most 3 pages of a DescribeDBInstances operation. // pageNum := 0 // err := client.DescribeDBInstancesPages(params, -// func(page *DescribeDBInstancesOutput, lastPage bool) bool { +// func(page *rds.DescribeDBInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4951,10 +5367,12 @@ func (c *RDS) DescribeDBInstancesPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5054,7 +5472,7 @@ func (c *RDS) DescribeDBLogFilesWithContext(ctx aws.Context, input *DescribeDBLo // // Example iterating over at most 3 pages of a DescribeDBLogFiles operation. // pageNum := 0 // err := client.DescribeDBLogFilesPages(params, -// func(page *DescribeDBLogFilesOutput, lastPage bool) bool { +// func(page *rds.DescribeDBLogFilesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5086,10 +5504,12 @@ func (c *RDS) DescribeDBLogFilesPagesWithContext(ctx aws.Context, input *Describ }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBLogFilesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBLogFilesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5191,7 +5611,7 @@ func (c *RDS) DescribeDBParameterGroupsWithContext(ctx aws.Context, input *Descr // // Example iterating over at most 3 pages of a DescribeDBParameterGroups operation. // pageNum := 0 // err := client.DescribeDBParameterGroupsPages(params, -// func(page *DescribeDBParameterGroupsOutput, lastPage bool) bool { +// func(page *rds.DescribeDBParameterGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5223,10 +5643,12 @@ func (c *RDS) DescribeDBParameterGroupsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBParameterGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBParameterGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5326,7 +5748,7 @@ func (c *RDS) DescribeDBParametersWithContext(ctx aws.Context, input *DescribeDB // // Example iterating over at most 3 pages of a DescribeDBParameters operation. // pageNum := 0 // err := client.DescribeDBParametersPages(params, -// func(page *DescribeDBParametersOutput, lastPage bool) bool { +// func(page *rds.DescribeDBParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5358,10 +5780,12 @@ func (c *RDS) DescribeDBParametersPagesWithContext(ctx aws.Context, input *Descr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBParametersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBParametersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5463,7 +5887,7 @@ func (c *RDS) DescribeDBSecurityGroupsWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeDBSecurityGroups operation. // pageNum := 0 // err := client.DescribeDBSecurityGroupsPages(params, -// func(page *DescribeDBSecurityGroupsOutput, lastPage bool) bool { +// func(page *rds.DescribeDBSecurityGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5495,10 +5919,12 @@ func (c *RDS) DescribeDBSecurityGroupsPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBSecurityGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBSecurityGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5688,7 +6114,7 @@ func (c *RDS) DescribeDBSnapshotsWithContext(ctx aws.Context, input *DescribeDBS // // Example iterating over at most 3 pages of a DescribeDBSnapshots operation. // pageNum := 0 // err := client.DescribeDBSnapshotsPages(params, -// func(page *DescribeDBSnapshotsOutput, lastPage bool) bool { +// func(page *rds.DescribeDBSnapshotsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5720,10 +6146,12 @@ func (c *RDS) DescribeDBSnapshotsPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBSnapshotsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBSnapshotsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5826,7 +6254,7 @@ func (c *RDS) DescribeDBSubnetGroupsWithContext(ctx aws.Context, input *Describe // // Example iterating over at most 3 pages of a DescribeDBSubnetGroups operation. // pageNum := 0 // err := client.DescribeDBSubnetGroupsPages(params, -// func(page *DescribeDBSubnetGroupsOutput, lastPage bool) bool { +// func(page *rds.DescribeDBSubnetGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5858,10 +6286,12 @@ func (c *RDS) DescribeDBSubnetGroupsPagesWithContext(ctx aws.Context, input *Des }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDBSubnetGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDBSubnetGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5912,7 +6342,7 @@ func (c *RDS) DescribeEngineDefaultClusterParametersRequest(input *DescribeEngin // Returns the default engine and system parameter information for the cluster // database engine. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6035,7 +6465,7 @@ func (c *RDS) DescribeEngineDefaultParametersWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation. // pageNum := 0 // err := client.DescribeEngineDefaultParametersPages(params, -// func(page *DescribeEngineDefaultParametersOutput, lastPage bool) bool { +// func(page *rds.DescribeEngineDefaultParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6067,10 +6497,12 @@ func (c *RDS) DescribeEngineDefaultParametersPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEngineDefaultParametersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEngineDefaultParametersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6120,7 +6552,7 @@ func (c *RDS) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput // // Displays a list of categories for all event source types, or, if specified, // for a specified source type. You can see a list of the event categories and -// source types in the Events (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) +// source types in the Events (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) // topic in the Amazon RDS User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6251,7 +6683,7 @@ func (c *RDS) DescribeEventSubscriptionsWithContext(ctx aws.Context, input *Desc // // Example iterating over at most 3 pages of a DescribeEventSubscriptions operation. // pageNum := 0 // err := client.DescribeEventSubscriptionsPages(params, -// func(page *DescribeEventSubscriptionsOutput, lastPage bool) bool { +// func(page *rds.DescribeEventSubscriptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6283,10 +6715,12 @@ func (c *RDS) DescribeEventSubscriptionsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventSubscriptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventSubscriptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6385,7 +6819,7 @@ func (c *RDS) DescribeEventsWithContext(ctx aws.Context, input *DescribeEventsIn // // Example iterating over at most 3 pages of a DescribeEvents operation. // pageNum := 0 // err := client.DescribeEventsPages(params, -// func(page *DescribeEventsOutput, lastPage bool) bool { +// func(page *rds.DescribeEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6417,10 +6851,12 @@ func (c *RDS) DescribeEventsPagesWithContext(ctx aws.Context, input *DescribeEve }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6477,7 +6913,7 @@ func (c *RDS) DescribeGlobalClustersRequest(input *DescribeGlobalClustersInput) // Returns information about Aurora global database clusters. This API supports // pagination. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -6525,7 +6961,7 @@ func (c *RDS) DescribeGlobalClustersWithContext(ctx aws.Context, input *Describe // // Example iterating over at most 3 pages of a DescribeGlobalClusters operation. // pageNum := 0 // err := client.DescribeGlobalClustersPages(params, -// func(page *DescribeGlobalClustersOutput, lastPage bool) bool { +// func(page *rds.DescribeGlobalClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6557,10 +6993,150 @@ func (c *RDS) DescribeGlobalClustersPagesWithContext(ctx aws.Context, input *Des }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeGlobalClustersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeGlobalClustersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstallationMedia = "DescribeInstallationMedia" + +// DescribeInstallationMediaRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstallationMedia operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstallationMedia for more information on using the DescribeInstallationMedia +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstallationMediaRequest method. +// req, resp := client.DescribeInstallationMediaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeInstallationMedia +func (c *RDS) DescribeInstallationMediaRequest(input *DescribeInstallationMediaInput) (req *request.Request, output *DescribeInstallationMediaOutput) { + op := &request.Operation{ + Name: opDescribeInstallationMedia, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstallationMediaInput{} + } + + output = &DescribeInstallationMediaOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstallationMedia API operation for Amazon Relational Database Service. +// +// Describes the available installation media for a DB engine that requires +// an on-premises customer provided license, such as Microsoft SQL Server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DescribeInstallationMedia for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInstallationMediaNotFoundFault "InstallationMediaNotFound" +// InstallationMediaID doesn't refer to an existing installation medium. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeInstallationMedia +func (c *RDS) DescribeInstallationMedia(input *DescribeInstallationMediaInput) (*DescribeInstallationMediaOutput, error) { + req, out := c.DescribeInstallationMediaRequest(input) + return out, req.Send() +} + +// DescribeInstallationMediaWithContext is the same as DescribeInstallationMedia with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstallationMedia for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeInstallationMediaWithContext(ctx aws.Context, input *DescribeInstallationMediaInput, opts ...request.Option) (*DescribeInstallationMediaOutput, error) { + req, out := c.DescribeInstallationMediaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstallationMediaPages iterates over the pages of a DescribeInstallationMedia operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstallationMedia method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstallationMedia operation. +// pageNum := 0 +// err := client.DescribeInstallationMediaPages(params, +// func(page *rds.DescribeInstallationMediaOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeInstallationMediaPages(input *DescribeInstallationMediaInput, fn func(*DescribeInstallationMediaOutput, bool) bool) error { + return c.DescribeInstallationMediaPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstallationMediaPagesWithContext same as DescribeInstallationMediaPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeInstallationMediaPagesWithContext(ctx aws.Context, input *DescribeInstallationMediaInput, fn func(*DescribeInstallationMediaOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstallationMediaInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstallationMediaRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstallationMediaOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6655,7 +7231,7 @@ func (c *RDS) DescribeOptionGroupOptionsWithContext(ctx aws.Context, input *Desc // // Example iterating over at most 3 pages of a DescribeOptionGroupOptions operation. // pageNum := 0 // err := client.DescribeOptionGroupOptionsPages(params, -// func(page *DescribeOptionGroupOptionsOutput, lastPage bool) bool { +// func(page *rds.DescribeOptionGroupOptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6687,10 +7263,12 @@ func (c *RDS) DescribeOptionGroupOptionsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeOptionGroupOptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeOptionGroupOptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6790,7 +7368,7 @@ func (c *RDS) DescribeOptionGroupsWithContext(ctx aws.Context, input *DescribeOp // // Example iterating over at most 3 pages of a DescribeOptionGroups operation. // pageNum := 0 // err := client.DescribeOptionGroupsPages(params, -// func(page *DescribeOptionGroupsOutput, lastPage bool) bool { +// func(page *rds.DescribeOptionGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6822,10 +7400,12 @@ func (c *RDS) DescribeOptionGroupsPagesWithContext(ctx aws.Context, input *Descr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeOptionGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeOptionGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6920,7 +7500,7 @@ func (c *RDS) DescribeOrderableDBInstanceOptionsWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a DescribeOrderableDBInstanceOptions operation. // pageNum := 0 // err := client.DescribeOrderableDBInstanceOptionsPages(params, -// func(page *DescribeOrderableDBInstanceOptionsOutput, lastPage bool) bool { +// func(page *rds.DescribeOrderableDBInstanceOptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6952,10 +7532,12 @@ func (c *RDS) DescribeOrderableDBInstanceOptionsPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeOrderableDBInstanceOptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeOrderableDBInstanceOptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7136,7 +7718,7 @@ func (c *RDS) DescribeReservedDBInstancesWithContext(ctx aws.Context, input *Des // // Example iterating over at most 3 pages of a DescribeReservedDBInstances operation. // pageNum := 0 // err := client.DescribeReservedDBInstancesPages(params, -// func(page *DescribeReservedDBInstancesOutput, lastPage bool) bool { +// func(page *rds.DescribeReservedDBInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7168,10 +7750,12 @@ func (c *RDS) DescribeReservedDBInstancesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedDBInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedDBInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7271,7 +7855,7 @@ func (c *RDS) DescribeReservedDBInstancesOfferingsWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a DescribeReservedDBInstancesOfferings operation. // pageNum := 0 // err := client.DescribeReservedDBInstancesOfferingsPages(params, -// func(page *DescribeReservedDBInstancesOfferingsOutput, lastPage bool) bool { +// func(page *rds.DescribeReservedDBInstancesOfferingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7303,10 +7887,12 @@ func (c *RDS) DescribeReservedDBInstancesOfferingsPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedDBInstancesOfferingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedDBInstancesOfferingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7569,7 +8155,7 @@ func (c *RDS) DownloadDBLogFilePortionWithContext(ctx aws.Context, input *Downlo // // Example iterating over at most 3 pages of a DownloadDBLogFilePortion operation. // pageNum := 0 // err := client.DownloadDBLogFilePortionPages(params, -// func(page *DownloadDBLogFilePortionOutput, lastPage bool) bool { +// func(page *rds.DownloadDBLogFilePortionOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7601,10 +8187,12 @@ func (c *RDS) DownloadDBLogFilePortionPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DownloadDBLogFilePortionOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DownloadDBLogFilePortionOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7664,7 +8252,7 @@ func (c *RDS) FailoverDBClusterRequest(input *FailoverDBClusterInput) (req *requ // re-establish any existing connections that use those endpoint addresses when // the failover is complete. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -7708,6 +8296,90 @@ func (c *RDS) FailoverDBClusterWithContext(ctx aws.Context, input *FailoverDBClu return out, req.Send() } +const opImportInstallationMedia = "ImportInstallationMedia" + +// ImportInstallationMediaRequest generates a "aws/request.Request" representing the +// client's request for the ImportInstallationMedia operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportInstallationMedia for more information on using the ImportInstallationMedia +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportInstallationMediaRequest method. +// req, resp := client.ImportInstallationMediaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ImportInstallationMedia +func (c *RDS) ImportInstallationMediaRequest(input *ImportInstallationMediaInput) (req *request.Request, output *ImportInstallationMediaOutput) { + op := &request.Operation{ + Name: opImportInstallationMedia, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportInstallationMediaInput{} + } + + output = &ImportInstallationMediaOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportInstallationMedia API operation for Amazon Relational Database Service. +// +// Imports the installation media for a DB engine that requires an on-premises +// customer provided license, such as SQL Server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation ImportInstallationMedia for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneNotFoundFault "CustomAvailabilityZoneNotFound" +// CustomAvailabilityZoneId doesn't refer to an existing custom Availability +// Zone identifier. +// +// * ErrCodeInstallationMediaAlreadyExistsFault "InstallationMediaAlreadyExists" +// The specified installation medium has already been imported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ImportInstallationMedia +func (c *RDS) ImportInstallationMedia(input *ImportInstallationMediaInput) (*ImportInstallationMediaOutput, error) { + req, out := c.ImportInstallationMediaRequest(input) + return out, req.Send() +} + +// ImportInstallationMediaWithContext is the same as ImportInstallationMedia with the addition of +// the ability to pass a context and additional request options. +// +// See ImportInstallationMedia for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) ImportInstallationMediaWithContext(ctx aws.Context, input *ImportInstallationMediaInput, opts ...request.Option) (*ImportInstallationMediaOutput, error) { + req, out := c.ImportInstallationMediaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -7858,7 +8530,7 @@ func (c *RDS) ModifyCurrentDBClusterCapacityRequest(input *ModifyCurrentDBCluste // // If you call ModifyCurrentDBClusterCapacity with the default TimeoutAction, // connections that prevent Aurora Serverless from finding a scaling point might -// be dropped. For more information about scaling points, see Autoscaling for +// be dropped. For more information about scaling points, see Autoscaling for // Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling) // in the Amazon Aurora User Guide. // @@ -7950,7 +8622,7 @@ func (c *RDS) ModifyDBClusterRequest(input *ModifyDBClusterInput) (req *request. // // Modify a setting for an Amazon Aurora DB cluster. You can change one or more // database configuration parameters by specifying these parameters and the -// new values in the request. For more information on Amazon Aurora, see What +// new values in the request. For more information on Amazon Aurora, see What // Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // @@ -8001,6 +8673,9 @@ func (c *RDS) ModifyDBClusterRequest(input *ModifyDBClusterInput) (req *request. // * ErrCodeDBClusterAlreadyExistsFault "DBClusterAlreadyExistsFault" // The user already has a DB cluster with the given identifier. // +// * ErrCodeDomainNotFoundFault "DomainNotFoundFault" +// Domain doesn't refer to an existing Active Directory domain. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ModifyDBCluster func (c *RDS) ModifyDBCluster(input *ModifyDBClusterInput) (*ModifyDBClusterOutput, error) { req, out := c.ModifyDBClusterRequest(input) @@ -8165,7 +8840,7 @@ func (c *RDS) ModifyDBClusterParameterGroupRequest(input *ModifyDBClusterParamet // one parameter, submit a list of the following: ParameterName, ParameterValue, // and ApplyMethod. A maximum of 20 parameters can be modified in a single request. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // Changes to dynamic parameters are applied immediately. Changes to static @@ -8180,7 +8855,7 @@ func (c *RDS) ModifyDBClusterParameterGroupRequest(input *ModifyDBClusterParamet // when creating the default database for a DB cluster, such as the character // set for the default database defined by the character_set_database parameter. // You can use the Parameter Groups option of the Amazon RDS console (https://console.aws.amazon.com/rds/) -// or the DescribeDBClusterParameters command to verify that your DB cluster +// or the DescribeDBClusterParameters action to verify that your DB cluster // parameter group has been created or modified. // // This action only applies to Aurora DB clusters. @@ -8427,11 +9102,11 @@ func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *reques // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeCertificateNotFoundFault "CertificateNotFound" // CertificateIdentifier doesn't refer to an existing certificate. @@ -8992,7 +9667,7 @@ func (c *RDS) ModifyGlobalClusterRequest(input *ModifyGlobalClusterInput) (req * // Modify a setting for an Amazon Aurora global cluster. You can change one // or more database configuration parameters by specifying these parameters // and the new values in the request. For more information on Amazon Aurora, -// see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -9159,15 +9834,16 @@ func (c *RDS) PromoteReadReplicaRequest(input *PromoteReadReplicaInput) (req *re // // Promotes a Read Replica DB instance to a standalone DB instance. // -// Backup duration is a function of the amount of changes to the database since -// the previous backup. If you plan to promote a Read Replica to a standalone -// instance, we recommend that you enable backups and complete at least one -// backup prior to promotion. In addition, a Read Replica cannot be promoted -// to a standalone instance when it is in the backing-up status. If you have -// enabled backups on your Read Replica, configure the automated backup window -// so that daily backups do not interfere with Read Replica promotion. +// * Backup duration is a function of the amount of changes to the database +// since the previous backup. If you plan to promote a Read Replica to a +// standalone instance, we recommend that you enable backups and complete +// at least one backup prior to promotion. In addition, a Read Replica cannot +// be promoted to a standalone instance when it is in the backing-up status. +// If you have enabled backups on your Read Replica, configure the automated +// backup window so that daily backups do not interfere with Read Replica +// promotion. // -// This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. +// * This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9601,7 +10277,7 @@ func (c *RDS) RemoveRoleFromDBClusterRequest(input *RemoveRoleFromDBClusterInput // // Disassociates an AWS Identity and Access Management (IAM) role from an Amazon // Aurora DB cluster. For more information, see Authorizing Amazon Aurora MySQL -// to Access Other AWS Services on Your Behalf (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.Authorizing.html) +// to Access Other AWS Services on Your Behalf (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.Authorizing.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -9706,7 +10382,7 @@ func (c *RDS) RemoveRoleFromDBInstanceRequest(input *RemoveRoleFromDBInstanceInp // DBInstanceIdentifier doesn't refer to an existing DB instance. // // * ErrCodeDBInstanceRoleNotFoundFault "DBInstanceRoleNotFound" -// The specified RoleArn value doesn't match the specifed feature for the DB +// The specified RoleArn value doesn't match the specified feature for the DB // instance. // // * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" @@ -9961,7 +10637,7 @@ func (c *RDS) ResetDBClusterParameterGroupRequest(input *ResetDBClusterParameter // for every DB instance in your DB cluster that you want the updated static // parameter to apply to. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -10140,7 +10816,7 @@ func (c *RDS) RestoreDBClusterFromS3Request(input *RestoreDBClusterFromS3Input) // // Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. // Amazon RDS must be authorized to access the Amazon S3 bucket and the data -// must be created using the Percona XtraBackup utility as described in Migrating +// must be created using the Percona XtraBackup utility as described in Migrating // Data to an Amazon Aurora MySQL DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Migrating.html) // in the Amazon Aurora User Guide. // @@ -10197,6 +10873,9 @@ func (c *RDS) RestoreDBClusterFromS3Request(input *RestoreDBClusterFromS3Input) // * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" // DBClusterIdentifier doesn't refer to an existing DB cluster. // +// * ErrCodeDomainNotFoundFault "DomainNotFoundFault" +// Domain doesn't refer to an existing Active Directory domain. +// // * ErrCodeInsufficientStorageClusterCapacityFault "InsufficientStorageClusterCapacity" // There is insufficient storage available for the current action. You might // be able to resolve this error by updating your subnet group to use different @@ -10275,10 +10954,10 @@ func (c *RDS) RestoreDBClusterFromSnapshotRequest(input *RestoreDBClusterFromSna // // If a DB cluster snapshot is specified, the target DB cluster is created from // the source DB cluster restore point with the same configuration as the original -// source DB cluster, except that the new DB cluster is created with the default -// security group. +// source DB cluster. If you don't specify a security group, the new DB cluster +// is associated with the default security group. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -10349,6 +11028,9 @@ func (c *RDS) RestoreDBClusterFromSnapshotRequest(input *RestoreDBClusterFromSna // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. // +// * ErrCodeDomainNotFoundFault "DomainNotFoundFault" +// Domain doesn't refer to an existing Active Directory domain. +// // * ErrCodeDBClusterParameterGroupNotFoundFault "DBClusterParameterGroupNotFound" // DBClusterParameterGroupName doesn't refer to an existing DB cluster parameter // group. @@ -10432,7 +11114,7 @@ func (c *RDS) RestoreDBClusterToPointInTimeRequest(input *RestoreDBClusterToPoin // RestoreDBClusterToPointInTime action has completed and the DB cluster is // available. // -// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -10499,6 +11181,9 @@ func (c *RDS) RestoreDBClusterToPointInTimeRequest(input *RestoreDBClusterToPoin // The request would result in the user exceeding the allowed amount of storage // available across all DB instances. // +// * ErrCodeDomainNotFoundFault "DomainNotFoundFault" +// Domain doesn't refer to an existing Active Directory domain. +// // * ErrCodeDBClusterParameterGroupNotFoundFault "DBClusterParameterGroupNotFound" // DBClusterParameterGroupName doesn't refer to an existing DB cluster parameter // group. @@ -10648,11 +11333,11 @@ func (c *RDS) RestoreDBInstanceFromDBSnapshotRequest(input *RestoreDBInstanceFro // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -10800,11 +11485,11 @@ func (c *RDS) RestoreDBInstanceFromS3Request(input *RestoreDBInstanceFromS3Input // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -10953,11 +11638,11 @@ func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPo // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -11059,11 +11744,11 @@ func (c *RDS) RevokeDBSecurityGroupIngressRequest(input *RevokeDBSecurityGroupIn // DBSecurityGroupName doesn't refer to an existing DB security group. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeInvalidDBSecurityGroupStateFault "InvalidDBSecurityGroupState" // The state of the DB security group doesn't allow deletion. @@ -11090,6 +11775,102 @@ func (c *RDS) RevokeDBSecurityGroupIngressWithContext(ctx aws.Context, input *Re return out, req.Send() } +const opStartActivityStream = "StartActivityStream" + +// StartActivityStreamRequest generates a "aws/request.Request" representing the +// client's request for the StartActivityStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartActivityStream for more information on using the StartActivityStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartActivityStreamRequest method. +// req, resp := client.StartActivityStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartActivityStream +func (c *RDS) StartActivityStreamRequest(input *StartActivityStreamInput) (req *request.Request, output *StartActivityStreamOutput) { + op := &request.Operation{ + Name: opStartActivityStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartActivityStreamInput{} + } + + output = &StartActivityStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartActivityStream API operation for Amazon Relational Database Service. +// +// Starts a database activity stream to monitor activity on the database. For +// more information, see Database Activity Streams (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/DBActivityStreams.html) +// in the Amazon Aurora User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation StartActivityStream for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" +// The DB instance isn't in a valid state. +// +// * ErrCodeInvalidDBClusterStateFault "InvalidDBClusterStateFault" +// The requested operation can't be performed while the cluster is in this state. +// +// * ErrCodeResourceNotFoundFault "ResourceNotFoundFault" +// The specified resource ID was not found. +// +// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" +// DBClusterIdentifier doesn't refer to an existing DB cluster. +// +// * ErrCodeDBInstanceNotFoundFault "DBInstanceNotFound" +// DBInstanceIdentifier doesn't refer to an existing DB instance. +// +// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" +// An error occurred accessing an AWS KMS key. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartActivityStream +func (c *RDS) StartActivityStream(input *StartActivityStreamInput) (*StartActivityStreamOutput, error) { + req, out := c.StartActivityStreamRequest(input) + return out, req.Send() +} + +// StartActivityStreamWithContext is the same as StartActivityStream with the addition of +// the ability to pass a context and additional request options. +// +// See StartActivityStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) StartActivityStreamWithContext(ctx aws.Context, input *StartActivityStreamInput, opts ...request.Option) (*StartActivityStreamOutput, error) { + req, out := c.StartActivityStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartDBCluster = "StartDBCluster" // StartDBClusterRequest generates a "aws/request.Request" representing the @@ -11137,7 +11918,7 @@ func (c *RDS) StartDBClusterRequest(input *StartDBClusterInput) (req *request.Re // Starts an Amazon Aurora DB cluster that was stopped using the AWS console, // the stop-db-cluster AWS CLI command, or the StopDBCluster action. // -// For more information, see Stopping and Starting an Aurora Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-cluster-stop-start.html) +// For more information, see Stopping and Starting an Aurora Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-cluster-stop-start.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -11228,7 +12009,7 @@ func (c *RDS) StartDBInstanceRequest(input *StartDBInstanceInput) (req *request. // Starts an Amazon RDS DB instance that was stopped using the AWS console, // the stop-db-instance AWS CLI command, or the StopDBInstance action. // -// For more information, see Starting an Amazon RDS DB instance That Was Previously +// For more information, see Starting an Amazon RDS DB instance That Was Previously // Stopped (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StartInstance.html) // in the Amazon RDS User Guide. // @@ -11275,11 +12056,11 @@ func (c *RDS) StartDBInstanceRequest(input *StartDBInstanceInput) (req *request. // DBClusterIdentifier doesn't refer to an existing DB cluster. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -11306,6 +12087,101 @@ func (c *RDS) StartDBInstanceWithContext(ctx aws.Context, input *StartDBInstance return out, req.Send() } +const opStopActivityStream = "StopActivityStream" + +// StopActivityStreamRequest generates a "aws/request.Request" representing the +// client's request for the StopActivityStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopActivityStream for more information on using the StopActivityStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopActivityStreamRequest method. +// req, resp := client.StopActivityStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StopActivityStream +func (c *RDS) StopActivityStreamRequest(input *StopActivityStreamInput) (req *request.Request, output *StopActivityStreamOutput) { + op := &request.Operation{ + Name: opStopActivityStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopActivityStreamInput{} + } + + output = &StopActivityStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopActivityStream API operation for Amazon Relational Database Service. +// +// Stops a database activity stream that was started using the AWS console, +// the start-activity-stream AWS CLI command, or the StartActivityStream action. +// +// For more information, see Database Activity Streams (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/DBActivityStreams.html) +// in the Amazon Aurora User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation StopActivityStream for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" +// The DB instance isn't in a valid state. +// +// * ErrCodeInvalidDBClusterStateFault "InvalidDBClusterStateFault" +// The requested operation can't be performed while the cluster is in this state. +// +// * ErrCodeResourceNotFoundFault "ResourceNotFoundFault" +// The specified resource ID was not found. +// +// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" +// DBClusterIdentifier doesn't refer to an existing DB cluster. +// +// * ErrCodeDBInstanceNotFoundFault "DBInstanceNotFound" +// DBInstanceIdentifier doesn't refer to an existing DB instance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StopActivityStream +func (c *RDS) StopActivityStream(input *StopActivityStreamInput) (*StopActivityStreamOutput, error) { + req, out := c.StopActivityStreamRequest(input) + return out, req.Send() +} + +// StopActivityStreamWithContext is the same as StopActivityStream with the addition of +// the ability to pass a context and additional request options. +// +// See StopActivityStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) StopActivityStreamWithContext(ctx aws.Context, input *StopActivityStreamInput, opts ...request.Option) (*StopActivityStreamOutput, error) { + req, out := c.StopActivityStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStopDBCluster = "StopDBCluster" // StopDBClusterRequest generates a "aws/request.Request" representing the @@ -11355,7 +12231,7 @@ func (c *RDS) StopDBClusterRequest(input *StopDBClusterInput) (req *request.Requ // Aurora also retains the transaction logs so you can do a point-in-time restore // if necessary. // -// For more information, see Stopping and Starting an Aurora Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-cluster-stop-start.html) +// For more information, see Stopping and Starting an Aurora Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-cluster-stop-start.html) // in the Amazon Aurora User Guide. // // This action only applies to Aurora DB clusters. @@ -11448,7 +12324,7 @@ func (c *RDS) StopDBInstanceRequest(input *StopDBInstanceInput) (req *request.Re // group, and option group membership. Amazon RDS also retains the transaction // logs so you can do a point-in-time restore if necessary. // -// For more information, see Stopping an Amazon RDS DB Instance Temporarily +// For more information, see Stopping an Amazon RDS DB Instance Temporarily // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StopInstance.html) // in the Amazon RDS User Guide. // @@ -11500,8 +12376,81 @@ func (c *RDS) StopDBInstanceWithContext(ctx aws.Context, input *StopDBInstanceIn return out, req.Send() } -// Describes a quota for an AWS account, for example, the number of DB instances -// allowed. +// Describes a quota for an AWS account. +// +// The following are account quotas: +// +// * AllocatedStorage - The total allocated storage per account, in GiB. +// The used value is the total allocated storage in the account, in GiB. +// +// * AuthorizationsPerDBSecurityGroup - The number of ingress rules per DB +// security group. The used value is the highest number of ingress rules +// in a DB security group in the account. Other DB security groups in the +// account might have a lower number of ingress rules. +// +// * CustomEndpointsPerDBCluster - The number of custom endpoints per DB +// cluster. The used value is the highest number of custom endpoints in a +// DB clusters in the account. Other DB clusters in the account might have +// a lower number of custom endpoints. +// +// * DBClusterParameterGroups - The number of DB cluster parameter groups +// per account, excluding default parameter groups. The used value is the +// count of nondefault DB cluster parameter groups in the account. +// +// * DBClusterRoles - The number of associated AWS Identity and Access Management +// (IAM) roles per DB cluster. The used value is the highest number of associated +// IAM roles for a DB cluster in the account. Other DB clusters in the account +// might have a lower number of associated IAM roles. +// +// * DBClusters - The number of DB clusters per account. The used value is +// the count of DB clusters in the account. +// +// * DBInstanceRoles - The number of associated IAM roles per DB instance. +// The used value is the highest number of associated IAM roles for a DB +// instance in the account. Other DB instances in the account might have +// a lower number of associated IAM roles. +// +// * DBInstances - The number of DB instances per account. The used value +// is the count of the DB instances in the account. +// +// * DBParameterGroups - The number of DB parameter groups per account, excluding +// default parameter groups. The used value is the count of nondefault DB +// parameter groups in the account. +// +// * DBSecurityGroups - The number of DB security groups (not VPC security +// groups) per account, excluding the default security group. The used value +// is the count of nondefault DB security groups in the account. +// +// * DBSubnetGroups - The number of DB subnet groups per account. The used +// value is the count of the DB subnet groups in the account. +// +// * EventSubscriptions - The number of event subscriptions per account. +// The used value is the count of the event subscriptions in the account. +// +// * ManualSnapshots - The number of manual DB snapshots per account. The +// used value is the count of the manual DB snapshots in the account. +// +// * OptionGroups - The number of DB option groups per account, excluding +// default option groups. The used value is the count of nondefault DB option +// groups in the account. +// +// * ReadReplicasPerMaster - The number of Read Replicas per DB instance. +// The used value is the highest number of Read Replicas for a DB instance +// in the account. Other DB instances in the account might have a lower number +// of Read Replicas. +// +// * ReservedDBInstances - The number of reserved DB instances per account. +// The used value is the count of the active reserved DB instances in the +// account. +// +// * SubnetsPerDBSubnetGroup - The number of subnets per DB subnet group. +// The used value is highest number of subnets for a DB subnet group in the +// account. Other DB subnet groups in the account might have a lower number +// of subnets. +// +// For more information, see Limits (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html) +// in the Amazon RDS User Guide and Limits (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_Limits.html) +// in the Amazon Aurora User Guide. type AccountQuota struct { _ struct{} `type:"structure"` @@ -11797,7 +12746,7 @@ type AddTagsToResourceInput struct { _ struct{} `type:"structure"` // The Amazon RDS resource that the tags are added to. This value is an Amazon - // Resource Name (ARN). For information about creating an ARN, see Constructing + // Resource Name (ARN). For information about creating an ARN, see Constructing // an RDS Amazon Resource Name (ARN) (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing). // // ResourceName is a required field @@ -11887,7 +12836,7 @@ type ApplyPendingMaintenanceActionInput struct { OptInType *string `type:"string" required:"true"` // The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance - // action applies to. For information about creating an ARN, see Constructing + // action applies to. For information about creating an ARN, see Constructing // an RDS Amazon Resource Name (ARN) (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing). // // ResourceIdentifier is a required field @@ -11986,7 +12935,7 @@ type AuthorizeDBSecurityGroupIngressInput struct { EC2SecurityGroupName *string `type:"string"` // AWS account number of the owner of the EC2 security group specified in the - // EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // EC2SecurityGroupName parameter. The AWS access key ID isn't an acceptable // value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, // EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId // must be provided. @@ -12074,9 +13023,8 @@ func (s *AuthorizeDBSecurityGroupIngressOutput) SetDBSecurityGroup(v *DBSecurity // Contains Availability Zone information. // -// This data type is used as an element in the following data type: -// -// * OrderableDBInstanceOption +// This data type is used as an element in the OrderableDBInstanceOption data +// type. type AvailabilityZone struct { _ struct{} `type:"structure"` @@ -12154,7 +13102,7 @@ type BacktrackDBClusterInput struct { // 8601 format. For more information about ISO 8601, see the ISO8601 Wikipedia // page. (http://en.wikipedia.org/wiki/ISO_8601) // - // If the specified time is not a consistent time for the DB cluster, Aurora + // If the specified time isn't a consistent time for the DB cluster, Aurora // automatically chooses the nearest possible consistent time for the DB cluster. // // Constraints: @@ -12184,13 +13132,16 @@ type BacktrackDBClusterInput struct { // DBClusterIdentifier is a required field DBClusterIdentifier *string `type:"string" required:"true"` - // A value that, if specified, forces the DB cluster to backtrack when binary - // logging is enabled. Otherwise, an error occurs when binary logging is enabled. + // A value that indicates whether to force the DB cluster to backtrack when + // binary logging is enabled. Otherwise, an error occurs when binary logging + // is enabled. Force *bool `type:"boolean"` - // If BacktrackTo is set to a timestamp earlier than the earliest backtrack - // time, this value backtracks the DB cluster to the earliest possible backtrack - // time. Otherwise, an error occurs. + // A value that indicates whether to backtrack the DB cluster to the earliest + // possible backtrack time when BacktrackTo is set to a timestamp earlier than + // the earliest backtrack time. When this parameter is disabled and BacktrackTo + // is set to a timestamp earlier than the earliest backtrack time, an error + // occurs. UseEarliestTimeOnPointInTimeUnavailable *bool `type:"boolean"` } @@ -12435,7 +13386,7 @@ func (s *CharacterSet) SetCharacterSetName(v string) *CharacterSet { // The EnableLogTypes and DisableLogTypes arrays determine which logs will be // exported (or not exported) to CloudWatch Logs. The values within these arrays // depend on the DB engine being used. For more information, see Publishing -// Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) +// Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon RDS User Guide. type CloudwatchLogsExportConfiguration struct { _ struct{} `type:"structure"` @@ -12473,7 +13424,7 @@ type CopyDBClusterParameterGroupInput struct { _ struct{} `type:"structure"` // The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter - // group. For information about creating an ARN, see Constructing an ARN for + // group. For information about creating an ARN, see Constructing an ARN for // Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon Aurora User Guide. // @@ -12600,8 +13551,8 @@ func (s *CopyDBClusterParameterGroupOutput) SetDBClusterParameterGroup(v *DBClus type CopyDBClusterSnapshotInput struct { _ struct{} `type:"structure"` - // True to copy all tags from the source DB cluster snapshot to the target DB - // cluster snapshot, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the source DB cluster + // snapshot to the target DB cluster snapshot. By default, tags are not copied. CopyTags *bool `type:"boolean"` // DestinationRegion is used for presigning the request to a given region. @@ -12632,7 +13583,8 @@ type CopyDBClusterSnapshotInput struct { // The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot // API action in the AWS Region that contains the source DB cluster snapshot // to copy. The PreSignedUrl parameter must be used when copying an encrypted - // DB cluster snapshot from another AWS Region. + // DB cluster snapshot from another AWS Region. Don't specify PreSignedUrl when + // you are copying an encrypted DB cluster snapshot in the same AWS Region. // // The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot // API action that can be executed in the source AWS Region that contains the @@ -12655,13 +13607,17 @@ type CopyDBClusterSnapshotInput struct { // the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier // looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115. // - // To learn how to generate a Signature Version 4 signed request, see Authenticating + // To learn how to generate a Signature Version 4 signed request, see Authenticating // Requests: Using Query Parameters (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) - // and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + // and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + // + // If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion + // (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. + // Specifying SourceRegion autogenerates a pre-signed URL that is a valid request + // for the operation that can be executed in the source AWS Region. PreSignedUrl *string `type:"string"` - // The identifier of the DB cluster snapshot to copy. This parameter is not - // case-sensitive. + // The identifier of the DB cluster snapshot to copy. This parameter isn't case-sensitive. // // You can't copy an encrypted, shared DB cluster snapshot from one AWS Region // to another. @@ -12674,8 +13630,8 @@ type CopyDBClusterSnapshotInput struct { // a valid DB snapshot identifier. // // * If the source snapshot is in a different AWS Region than the copy, specify - // a valid DB cluster snapshot ARN. For more information, go to Copying - // Snapshots Across AWS Regions (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_CopySnapshot.html#USER_CopySnapshot.AcrossRegions) + // a valid DB cluster snapshot ARN. For more information, go to Copying Snapshots + // Across AWS Regions (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_CopySnapshot.html#USER_CopySnapshot.AcrossRegions) // in the Amazon Aurora User Guide. // // Example: my-cluster-snapshot1 @@ -12693,7 +13649,7 @@ type CopyDBClusterSnapshotInput struct { Tags []*Tag `locationNameList:"Tag" type:"list"` // The identifier of the new DB cluster snapshot to create from the source DB - // cluster snapshot. This parameter is not case-sensitive. + // cluster snapshot. This parameter isn't case-sensitive. // // Constraints: // @@ -12813,7 +13769,7 @@ type CopyDBParameterGroupInput struct { _ struct{} `type:"structure"` // The identifier or ARN for the source DB parameter group. For information - // about creating an ARN, see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) + // about creating an ARN, see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon RDS User Guide. // // Constraints: @@ -12935,8 +13891,8 @@ func (s *CopyDBParameterGroupOutput) SetDBParameterGroup(v *DBParameterGroup) *C type CopyDBSnapshotInput struct { _ struct{} `type:"structure"` - // True to copy all tags from the source DB snapshot to the target DB snapshot, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the source DB snapshot + // to the target DB snapshot. By default, tags are not copied. CopyTags *bool `type:"boolean"` // DestinationRegion is used for presigning the request to a given region. @@ -12978,9 +13934,8 @@ type CopyDBSnapshotInput struct { // to copy. // // You must specify this parameter when you copy an encrypted DB snapshot from - // another AWS Region by using the Amazon RDS API. You can specify the --source-region - // option instead of this parameter when you copy an encrypted DB snapshot from - // another AWS Region by using the AWS CLI. + // another AWS Region by using the Amazon RDS API. Don't specify PreSignedUrl + // when you are copying an encrypted DB snapshot in the same AWS Region. // // The presigned URL must be a valid request for the CopyDBSnapshot API action // that can be executed in the source AWS Region that contains the encrypted @@ -12989,31 +13944,32 @@ type CopyDBSnapshotInput struct { // // * DestinationRegion - The AWS Region that the encrypted DB snapshot is // copied to. This AWS Region is the same one where the CopyDBSnapshot action - // is called that contains this presigned URL. - // - // For example, if you copy an encrypted DB snapshot from the us-west-2 AWS - // Region to the us-east-1 AWS Region, then you call the CopyDBSnapshot action - // in the us-east-1 AWS Region and provide a presigned URL that contains - // a call to the CopyDBSnapshot action in the us-west-2 AWS Region. For this - // example, the DestinationRegion in the presigned URL must be set to the - // us-east-1 AWS Region. + // is called that contains this presigned URL. For example, if you copy an + // encrypted DB snapshot from the us-west-2 AWS Region to the us-east-1 AWS + // Region, then you call the CopyDBSnapshot action in the us-east-1 AWS Region + // and provide a presigned URL that contains a call to the CopyDBSnapshot + // action in the us-west-2 AWS Region. For this example, the DestinationRegion + // in the presigned URL must be set to the us-east-1 AWS Region. // // * KmsKeyId - The AWS KMS key identifier for the key to use to encrypt // the copy of the DB snapshot in the destination AWS Region. This is the // same identifier for both the CopyDBSnapshot action that is called in the // destination AWS Region, and the action contained in the presigned URL. // - // // * SourceDBSnapshotIdentifier - The DB snapshot identifier for the encrypted // snapshot to be copied. This identifier must be in the Amazon Resource // Name (ARN) format for the source AWS Region. For example, if you are copying // an encrypted DB snapshot from the us-west-2 AWS Region, then your SourceDBSnapshotIdentifier // looks like the following example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115. // - // // To learn how to generate a Signature Version 4 signed request, see Authenticating // Requests: Using Query Parameters (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) // and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + // + // If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion + // (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. + // Specifying SourceRegion autogenerates a pre-signed URL that is a valid request + // for the operation that can be executed in the source AWS Region. PreSignedUrl *string `type:"string"` // The identifier for the source DB snapshot. @@ -13178,7 +14134,7 @@ type CopyOptionGroupInput struct { _ struct{} `type:"structure"` // The identifier or ARN for the source option group. For information about - // creating an ARN, see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) + // creating an ARN, see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon RDS User Guide. // // Constraints: @@ -13297,6 +14253,105 @@ func (s *CopyOptionGroupOutput) SetOptionGroup(v *OptionGroup) *CopyOptionGroupO return s } +type CreateCustomAvailabilityZoneInput struct { + _ struct{} `type:"structure"` + + // The name of the custom Availability Zone (AZ). + // + // CustomAvailabilityZoneName is a required field + CustomAvailabilityZoneName *string `type:"string" required:"true"` + + // The ID of an existing virtual private network (VPN) between the Amazon RDS + // website and the VMware vSphere cluster. + ExistingVpnId *string `type:"string"` + + // The name of a new VPN tunnel between the Amazon RDS website and the VMware + // vSphere cluster. + // + // Specify this parameter only if ExistingVpnId isn't specified. + NewVpnTunnelName *string `type:"string"` + + // The IP address of network traffic from your on-premises data center. A custom + // AZ receives the network traffic. + // + // Specify this parameter only if ExistingVpnId isn't specified. + VpnTunnelOriginatorIP *string `type:"string"` +} + +// String returns the string representation +func (s CreateCustomAvailabilityZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomAvailabilityZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCustomAvailabilityZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCustomAvailabilityZoneInput"} + if s.CustomAvailabilityZoneName == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAvailabilityZoneName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneName sets the CustomAvailabilityZoneName field's value. +func (s *CreateCustomAvailabilityZoneInput) SetCustomAvailabilityZoneName(v string) *CreateCustomAvailabilityZoneInput { + s.CustomAvailabilityZoneName = &v + return s +} + +// SetExistingVpnId sets the ExistingVpnId field's value. +func (s *CreateCustomAvailabilityZoneInput) SetExistingVpnId(v string) *CreateCustomAvailabilityZoneInput { + s.ExistingVpnId = &v + return s +} + +// SetNewVpnTunnelName sets the NewVpnTunnelName field's value. +func (s *CreateCustomAvailabilityZoneInput) SetNewVpnTunnelName(v string) *CreateCustomAvailabilityZoneInput { + s.NewVpnTunnelName = &v + return s +} + +// SetVpnTunnelOriginatorIP sets the VpnTunnelOriginatorIP field's value. +func (s *CreateCustomAvailabilityZoneInput) SetVpnTunnelOriginatorIP(v string) *CreateCustomAvailabilityZoneInput { + s.VpnTunnelOriginatorIP = &v + return s +} + +type CreateCustomAvailabilityZoneOutput struct { + _ struct{} `type:"structure"` + + // A custom Availability Zone (AZ) is an on-premises AZ that is integrated with + // a VMware vSphere cluster. + // + // For more information about RDS on VMware, see the RDS on VMware User Guide. + // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) + CustomAvailabilityZone *CustomAvailabilityZone `type:"structure"` +} + +// String returns the string representation +func (s CreateCustomAvailabilityZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomAvailabilityZoneOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZone sets the CustomAvailabilityZone field's value. +func (s *CreateCustomAvailabilityZoneOutput) SetCustomAvailabilityZone(v *CustomAvailabilityZone) *CreateCustomAvailabilityZoneOutput { + s.CustomAvailabilityZone = v + return s +} + type CreateDBClusterEndpointInput struct { _ struct{} `type:"structure"` @@ -13312,7 +14367,7 @@ type CreateDBClusterEndpointInput struct { // DBClusterIdentifier is a required field DBClusterIdentifier *string `type:"string" required:"true"` - // The type of the endpoint. One of: READER, ANY. + // The type of the endpoint. One of: READER, WRITER, ANY. // // EndpointType is a required field EndpointType *string `type:"string" required:"true"` @@ -13402,7 +14457,7 @@ func (s *CreateDBClusterEndpointInput) SetStaticMembers(v []*string) *CreateDBCl type CreateDBClusterEndpointOutput struct { _ struct{} `type:"structure"` - // The type associated with a custom endpoint. One of: READER, ANY. + // The type associated with a custom endpoint. One of: READER, WRITER, ANY. CustomEndpointType *string `type:"string"` // The Amazon Resource Name (ARN) for the endpoint. @@ -13512,8 +14567,8 @@ func (s *CreateDBClusterEndpointOutput) SetStatus(v string) *CreateDBClusterEndp type CreateDBClusterInput struct { _ struct{} `type:"structure"` - // A list of EC2 Availability Zones that instances in the DB cluster can be - // created in. For information on AWS Regions and Availability Zones, see Choosing + // A list of Availability Zones (AZs) where instances in the DB cluster can + // be created. For information on AWS Regions and Availability Zones, see Choosing // the Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) // in the Amazon Aurora User Guide. AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` @@ -13529,8 +14584,7 @@ type CreateDBClusterInput struct { // hours). BacktrackWindow *int64 `type:"long"` - // The number of days for which automated backups are retained. You must specify - // a minimum value of 1. + // The number of days for which automated backups are retained. // // Default: 1 // @@ -13543,8 +14597,8 @@ type CreateDBClusterInput struct { // specified CharacterSet. CharacterSetName *string `type:"string"` - // True to copy all tags from the DB cluster to snapshots of the DB cluster, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the DB cluster to snapshots + // of the DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The DB cluster identifier. This parameter is stored as a lowercase string. @@ -13563,7 +14617,8 @@ type CreateDBClusterInput struct { DBClusterIdentifier *string `type:"string" required:"true"` // The name of the DB cluster parameter group to associate with this DB cluster. - // If this argument is omitted, default.aurora5.6 is used. + // If you do not specify a value, then the default DB cluster parameter group + // for the specified DB engine and version is used. // // Constraints: // @@ -13584,9 +14639,9 @@ type CreateDBClusterInput struct { // you are creating. DatabaseName *string `type:"string"` - // Indicates if the DB cluster should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` // DestinationRegion is used for presigning the request to a given region. @@ -13598,10 +14653,22 @@ type CreateDBClusterInput struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable the HTTP endpoint for an Aurora + // Serverless DB cluster. By default, the HTTP endpoint is disabled. + // + // When enabled, the HTTP endpoint provides a connectionless web service API + // for running SQL queries on the Aurora Serverless DB cluster. You can also + // query your database from inside the RDS console with the query editor. // - // Default: false + // For more information, see Using the Data API for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) + // in the Amazon Aurora User Guide. + EnableHttpEndpoint *bool `type:"boolean"` + + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The name of the database engine to be used for this DB cluster. @@ -13613,18 +14680,33 @@ type CreateDBClusterInput struct { Engine *string `type:"string" required:"true"` // The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, - // or global. + // global, or multimaster. EngineMode *string `type:"string"` // The version number of the database engine to use. // + // To list all of the available engine versions for aurora (for MySQL 5.6-compatible + // Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-mysql (for MySQL + // 5.7-compatible Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-postgresql, use the + // following command: + // + // aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" + // // Aurora MySQL // - // Example: 5.6.10a, 5.7.12 + // Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5 // // Aurora PostgreSQL // - // Example: 9.6.3 + // Example: 9.6.3, 10.7 EngineVersion *string `type:"string"` // The global cluster ID of an Aurora cluster that becomes the primary cluster @@ -13638,14 +14720,14 @@ type CreateDBClusterInput struct { // the KMS encryption key used to encrypt the new DB cluster, then you can use // the KMS key alias instead of the ARN for the KMS encryption key. // - // If an encryption key is not specified in KmsKeyId: + // If an encryption key isn't specified in KmsKeyId: // // * If ReplicationSourceIdentifier identifies an encrypted source, then // Amazon RDS will use the encryption key used to encrypt the source. Otherwise, // Amazon RDS will use your default encryption key. // - // * If the StorageEncrypted parameter is true and ReplicationSourceIdentifier - // is not specified, then Amazon RDS will use your default encryption key. + // * If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier + // isn't specified, then Amazon RDS will use your default encryption key. // // AWS KMS creates the default encryption key for your AWS account. Your AWS // account has a different default encryption key for each AWS Region. @@ -13710,16 +14792,21 @@ type CreateDBClusterInput struct { // an encrypted DB cluster from the us-west-2 AWS Region, then your ReplicationSourceIdentifier // would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1. // - // To learn how to generate a Signature Version 4 signed request, see Authenticating + // To learn how to generate a Signature Version 4 signed request, see Authenticating // Requests: Using Query Parameters (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) - // and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + // and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + // + // If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion + // (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. + // Specifying SourceRegion autogenerates a pre-signed URL that is a valid request + // for the operation that can be executed in the source AWS Region. PreSignedUrl *string `type:"string"` // The daily time range during which automated backups are created if automated // backups are enabled using the BackupRetentionPeriod parameter. // // The default is a 30-minute window selected at random from an 8-hour block - // of time for each AWS Region. To see the time blocks available, see Adjusting + // of time for each AWS Region. To see the time blocks available, see Adjusting // the Preferred DB Cluster Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. // @@ -13741,7 +14828,7 @@ type CreateDBClusterInput struct { // // The default is a 30-minute window selected at random from an 8-hour block // of time for each AWS Region, occurring on a random day of the week. To see - // the time blocks available, see Adjusting the Preferred DB Cluster Maintenance + // the time blocks available, see Adjusting the Preferred DB Cluster Maintenance // Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. // @@ -13763,11 +14850,10 @@ type CreateDBClusterInput struct { // have the same region as the source ARN. SourceRegion *string `type:"string" ignore:"true"` - // Specifies whether the DB cluster is encrypted. + // A value that indicates whether the DB cluster is encrypted. StorageEncrypted *bool `type:"boolean"` - // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) - // in the Amazon RDS User Guide. + // Tags to assign to the DB cluster. Tags []*Tag `locationNameList:"Tag" type:"list"` // A list of EC2 VPC security groups to associate with this DB cluster. @@ -13872,6 +14958,12 @@ func (s *CreateDBClusterInput) SetEnableCloudwatchLogsExports(v []*string) *Crea return s } +// SetEnableHttpEndpoint sets the EnableHttpEndpoint field's value. +func (s *CreateDBClusterInput) SetEnableHttpEndpoint(v bool) *CreateDBClusterInput { + s.EnableHttpEndpoint = &v + return s +} + // SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value. func (s *CreateDBClusterInput) SetEnableIAMDatabaseAuthentication(v bool) *CreateDBClusterInput { s.EnableIAMDatabaseAuthentication = &v @@ -14047,8 +15139,7 @@ type CreateDBClusterParameterGroupInput struct { // Description is a required field Description *string `type:"string" required:"true"` - // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) - // in the Amazon RDS User Guide. + // Tags to assign to the DB cluster parameter group. Tags []*Tag `locationNameList:"Tag" type:"list"` } @@ -14135,7 +15226,7 @@ type CreateDBClusterSnapshotInput struct { _ struct{} `type:"structure"` // The identifier of the DB cluster to create a snapshot for. This parameter - // is not case-sensitive. + // isn't case-sensitive. // // Constraints: // @@ -14253,9 +15344,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 32768. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 32768. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -14263,9 +15354,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 32768. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 32768. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -14273,9 +15364,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 32768. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 32768. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -14283,9 +15374,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 32768. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 32768. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // // * Magnetic storage (standard): Must be an integer from 10 to 3072. // @@ -14293,32 +15384,25 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): - // - // Enterprise and Standard editions: Must be an integer from 200 to 16384. + // * General Purpose (SSD) storage (gp2): Enterprise and Standard editions: + // Must be an integer from 200 to 16384. Web and Express editions: Must be + // an integer from 20 to 16384. // - // Web and Express editions: Must be an integer from 20 to 16384. + // * Provisioned IOPS storage (io1): Enterprise and Standard editions: Must + // be an integer from 200 to 16384. Web and Express editions: Must be an + // integer from 100 to 16384. // - // * Provisioned IOPS storage (io1): - // - // Enterprise and Standard editions: Must be an integer from 200 to 16384. - // - // Web and Express editions: Must be an integer from 100 to 16384. - // - // * Magnetic storage (standard): - // - // Enterprise and Standard editions: Must be an integer from 200 to 1024. - // - // Web and Express editions: Must be an integer from 20 to 1024. + // * Magnetic storage (standard): Enterprise and Standard editions: Must + // be an integer from 200 to 1024. Web and Express editions: Must be an integer + // from 20 to 1024. AllocatedStorage *int64 `type:"integer"` - // Indicates that minor engine upgrades are applied automatically to the DB - // instance during the maintenance window. - // - // Default: true + // A value that indicates whether minor engine upgrades are applied automatically + // to the DB instance during the maintenance window. By default, minor engine + // upgrades are applied automatically. AutoMinorVersionUpgrade *bool `type:"boolean"` - // The EC2 Availability Zone that the DB instance is created in. For information + // The Availability Zone (AZ) where the database will be created. For information // on AWS Regions and Availability Zones, see Regions and Availability Zones // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). // @@ -14327,9 +15411,16 @@ type CreateDBInstanceInput struct { // // Example: us-east-1d // - // Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ - // parameter is set to true. The specified Availability Zone must be in the + // Constraint: The AvailabilityZone parameter can't be specified if the DB instance + // is a Multi-AZ deployment. The specified Availability Zone must be in the // same AWS Region as the current endpoint. + // + // If you're creating a DB instance in an RDS on VMware environment, specify + // the identifier of the custom Availability Zone to create the DB instance + // in. + // + // For more information about RDS on VMware, see the RDS on VMware User Guide. + // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) AvailabilityZone *string `type:"string"` // The number of days for which automated backups are retained. Setting this @@ -14339,7 +15430,7 @@ type CreateDBInstanceInput struct { // Amazon Aurora // // Not applicable. The retention period for automated backups is managed by - // the DB cluster. For more information, see CreateDBCluster. + // the DB cluster. // // Default: 1 // @@ -14359,21 +15450,16 @@ type CreateDBInstanceInput struct { // information, see CreateDBCluster. CharacterSetName *string `type:"string"` - // True to copy all tags from the DB instance to snapshots of the DB instance, - // and otherwise false. The default is false. + // A value that indicates whether to copy tags from the DB instance to snapshots + // of the DB instance. By default, tags are not copied. // // Amazon Aurora // // Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting // this value for an Aurora DB instance has no effect on the DB cluster setting. - // For more information, see CreateDBCluster. CopyTagsToSnapshot *bool `type:"boolean"` // The identifier of the DB cluster that the instance will belong to. - // - // For information on creating a DB cluster, see CreateDBCluster. - // - // Type: String DBClusterIdentifier *string `type:"string"` // The compute and memory capacity of the DB instance, for example, db.m4.large. @@ -14403,12 +15489,10 @@ type CreateDBInstanceInput struct { // The meaning of this parameter differs according to the database engine you // use. // - // Type: String - // // MySQL // // The name of the database to create when the DB instance is created. If this - // parameter is not specified, no database is created in the DB instance. + // parameter isn't specified, no database is created in the DB instance. // // Constraints: // @@ -14419,7 +15503,7 @@ type CreateDBInstanceInput struct { // MariaDB // // The name of the database to create when the DB instance is created. If this - // parameter is not specified, no database is created in the DB instance. + // parameter isn't specified, no database is created in the DB instance. // // Constraints: // @@ -14430,7 +15514,7 @@ type CreateDBInstanceInput struct { // PostgreSQL // // The name of the database to create when the DB instance is created. If this - // parameter is not specified, the default "postgres" database is created in + // parameter isn't specified, the default "postgres" database is created in // the DB instance. // // Constraints: @@ -14461,7 +15545,7 @@ type CreateDBInstanceInput struct { // Amazon Aurora // // The name of the database to create when the primary instance of the DB cluster - // is created. If this parameter is not specified, no database is created in + // is created. If this parameter isn't specified, no database is created in // the DB instance. // // Constraints: @@ -14472,8 +15556,8 @@ type CreateDBInstanceInput struct { DBName *string `type:"string"` // The name of the DB parameter group to associate with this DB instance. If - // this argument is omitted, the default DBParameterGroup for the specified - // engine is used. + // you do not specify a value, then the default DB parameter group for the specified + // DB engine and version is used. // // Constraints: // @@ -14494,12 +15578,26 @@ type CreateDBInstanceInput struct { // If there is no DB subnet group, then it is a non-VPC DB instance. DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` - // Specify the Active Directory Domain to create the instance in. + // The Active Directory directory ID to create the DB instance in. Currently, + // only Microsoft SQL Server and Oracle DB instances can be created in an Active + // Directory Domain. + // + // For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication + // to authenticate users that connect to the DB instance. For more information, + // see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft + // SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_SQLServerWinAuth.html) + // in the Amazon RDS User Guide. + // + // For Oracle DB instance, Amazon RDS can use Kerberos Authentication to authenticate + // users that connect to the DB instance. For more information, see Using Kerberos + // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // in the Amazon RDS User Guide. Domain *string `type:"string"` // Specify the name of the IAM role to be used when making API calls to the @@ -14508,19 +15606,19 @@ type CreateDBInstanceInput struct { // The list of log types that need to be enabled for exporting to CloudWatch // Logs. The values in the list depend on the DB engine being used. For more - // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon Relational Database Service User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // // You can enable IAM database authentication for the following database engines: // // Amazon Aurora // // Not applicable. Mapping AWS IAM accounts to database accounts is managed - // by the DB cluster. For more information, see CreateDBCluster. + // by the DB cluster. // // MySQL // @@ -14528,10 +15626,23 @@ type CreateDBInstanceInput struct { // // * For MySQL 5.7, minor version 5.7.16 or higher // - // Default: false + // * For MySQL 8.0, minor version 8.0.16 or higher + // + // PostgreSQL + // + // * For PostgreSQL 9.5, minor version 9.5.15 or higher + // + // * For PostgreSQL 9.6, minor version 9.6.11 or higher + // + // * PostgreSQL 10.6, 10.7, and 10.9 + // + // For more information, see IAM Database Authentication for MySQL and PostgreSQL + // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // True to enable Performance Insights for the DB instance, and otherwise false. + // A value that indicates whether to enable Performance Insights for the DB + // instance. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon Relational Database Service User Guide. @@ -14576,7 +15687,7 @@ type CreateDBInstanceInput struct { // The version number of the database engine to use. // - // For a list of valid engine versions, call DescribeDBEngineVersions. + // For a list of valid engine versions, use the DescribeDBEngineVersions action. // // The following are the database engines and links to information about the // major and minor versions that are available with Amazon RDS. Not every database @@ -14585,7 +15696,7 @@ type CreateDBInstanceInput struct { // Amazon Aurora // // Not applicable. The version number of the database engine to be used by the - // DB instance is managed by the DB cluster. For more information, see CreateDBCluster. + // DB instance is managed by the DB cluster. // // MariaDB // @@ -14615,8 +15726,7 @@ type CreateDBInstanceInput struct { // The amount of Provisioned IOPS (input/output operations per second) to be // initially allocated for the DB instance. For information about valid Iops - // values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance - // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) + // values, see Amazon RDS Provisioned IOPS Storage to Improve Performance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) // in the Amazon RDS User Guide. // // Constraints: Must be a multiple between 1 and 50 of the storage amount for @@ -14635,10 +15745,10 @@ type CreateDBInstanceInput struct { // Not applicable. The KMS key identifier is managed by the DB cluster. For // more information, see CreateDBCluster. // - // If the StorageEncrypted parameter is true, and you do not specify a value - // for the KmsKeyId parameter, then Amazon RDS will use your default encryption - // key. AWS KMS creates the default encryption key for your AWS account. Your - // AWS account has a different default encryption key for each AWS Region. + // If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId + // parameter, then Amazon RDS will use your default encryption key. AWS KMS + // creates the default encryption key for your AWS account. Your AWS account + // has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` // License model information for this DB instance. @@ -14652,7 +15762,6 @@ type CreateDBInstanceInput struct { // Amazon Aurora // // Not applicable. The password for the master user is managed by the DB cluster. - // For more information, see CreateDBCluster. // // MariaDB // @@ -14680,7 +15789,6 @@ type CreateDBInstanceInput struct { // Amazon Aurora // // Not applicable. The name for the master user is managed by the DB cluster. - // For more information, see CreateDBCluster. // // MariaDB // @@ -14741,6 +15849,10 @@ type CreateDBInstanceInput struct { // * Can't be a reserved word for the chosen database engine. MasterUsername *string `type:"string"` + // The upper limit to which Amazon RDS can automatically scale the storage of + // the DB instance. + MaxAllocatedStorage *int64 `type:"integer"` + // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the DB instance. To disable collecting Enhanced Monitoring // metrics, specify 0. The default is 0. @@ -14761,9 +15873,9 @@ type CreateDBInstanceInput struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` - // A value that specifies whether the DB instance is a Multi-AZ deployment. - // You can't set the AvailabilityZone parameter if the MultiAZ parameter is - // set to true. + // A value that indicates whether the DB instance is a Multi-AZ deployment. + // You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ + // deployment. MultiAZ *bool `type:"boolean"` // Indicates that the DB instance should be associated with the specified option @@ -14777,6 +15889,11 @@ type CreateDBInstanceInput struct { // The AWS KMS key identifier for encryption of Performance Insights data. The // KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the // KMS key alias for the KMS encryption key. + // + // If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + // RDS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. PerformanceInsightsKMSKeyId *string `type:"string"` // The amount of time, in days, to retain Performance Insights data. Valid values @@ -14839,10 +15956,10 @@ type CreateDBInstanceInput struct { // Amazon Aurora // // Not applicable. The daily time range for creating automated backups is managed - // by the DB cluster. For more information, see CreateDBCluster. + // by the DB cluster. // // The default is a 30-minute window selected at random from an 8-hour block - // of time for each AWS Region. To see the time blocks available, see Adjusting + // of time for each AWS Region. To see the time blocks available, see Adjusting // the Preferred DB Instance Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow) // in the Amazon RDS User Guide. // @@ -14877,7 +15994,7 @@ type CreateDBInstanceInput struct { // A value that specifies the order in which an Aurora Replica is promoted to // the primary instance after a failure of the existing primary instance. For - // more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) + // more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) // in the Amazon Aurora User Guide. // // Default: 1 @@ -14885,15 +16002,16 @@ type CreateDBInstanceInput struct { // Valid Values: 0 - 15 PromotionTier *int64 `type:"integer"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance isn't publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. // // Default: The default behavior varies depending on whether DBSubnetGroupName // is specified. // - // If DBSubnetGroupName is not specified, and PubliclyAccessible is not specified, + // If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, // the following applies: // // * If the default VPC in the target region doesn’t have an Internet gateway @@ -14902,7 +16020,7 @@ type CreateDBInstanceInput struct { // * If the default VPC in the target region has an Internet gateway attached // to it, the DB instance is public. // - // If DBSubnetGroupName is specified, and PubliclyAccessible is not specified, + // If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, // the following applies: // // * If the subnets are part of a VPC that doesn’t have an Internet gateway @@ -14912,14 +16030,12 @@ type CreateDBInstanceInput struct { // to it, the DB instance is public. PubliclyAccessible *bool `type:"boolean"` - // Specifies whether the DB instance is encrypted. + // A value that indicates whether the DB instance is encrypted. By default, + // it isn't encrypted. // // Amazon Aurora // // Not applicable. The encryption for DB instances is managed by the DB cluster. - // For more information, see CreateDBCluster. - // - // Default: false StorageEncrypted *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -14928,11 +16044,10 @@ type CreateDBInstanceInput struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` - // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) - // in the Amazon RDS User Guide. + // Tags to assign to the DB instance. Tags []*Tag `locationNameList:"Tag" type:"list"` // The ARN from the key store with which to associate the instance for TDE encryption. @@ -14951,7 +16066,7 @@ type CreateDBInstanceInput struct { // Amazon Aurora // // Not applicable. The associated list of EC2 VPC security groups is managed - // by the DB cluster. For more information, see CreateDBCluster. + // by the DB cluster. // // Default: The default EC2 VPC security group for the DB subnet group's VPC. VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` @@ -15142,6 +16257,12 @@ func (s *CreateDBInstanceInput) SetMasterUsername(v string) *CreateDBInstanceInp return s } +// SetMaxAllocatedStorage sets the MaxAllocatedStorage field's value. +func (s *CreateDBInstanceInput) SetMaxAllocatedStorage(v int64) *CreateDBInstanceInput { + s.MaxAllocatedStorage = &v + return s +} + // SetMonitoringInterval sets the MonitoringInterval field's value. func (s *CreateDBInstanceInput) SetMonitoringInterval(v int64) *CreateDBInstanceInput { s.MonitoringInterval = &v @@ -15284,13 +16405,13 @@ func (s *CreateDBInstanceOutput) SetDBInstance(v *DBInstance) *CreateDBInstanceO type CreateDBInstanceReadReplicaInput struct { _ struct{} `type:"structure"` - // Indicates that minor engine upgrades are applied automatically to the Read - // Replica during the maintenance window. + // A value that indicates whether minor engine upgrades are applied automatically + // to the Read Replica during the maintenance window. // // Default: Inherits from the source DB instance AutoMinorVersionUpgrade *bool `type:"boolean"` - // The Amazon EC2 Availability Zone that the Read Replica is created in. + // The Availability Zone (AZ) where the Read Replica will be created. // // Default: A random, system-chosen Availability Zone in the endpoint's AWS // Region. @@ -15298,8 +16419,8 @@ type CreateDBInstanceReadReplicaInput struct { // Example: us-east-1d AvailabilityZone *string `type:"string"` - // True to copy all tags from the Read Replica to snapshots of the Read Replica, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the Read Replica to + // snapshots of the Read Replica. By default, tags are not copied. CopyTagsToSnapshot *bool `type:"boolean"` // The compute and memory capacity of the Read Replica, for example, db.m4.large. @@ -15318,9 +16439,28 @@ type CreateDBInstanceReadReplicaInput struct { // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` + // The name of the DB parameter group to associate with this DB instance. + // + // If you do not specify a value for DBParameterGroupName, then Amazon RDS uses + // the DBParameterGroup of source DB instance for a same region Read Replica, + // or the default DBParameterGroup for the specified DB engine for a cross region + // Read Replica. + // + // Currently, specifying a parameter group for this operation is only supported + // for Oracle DB instances. + // + // Constraints: + // + // * Must be 1 to 255 letters, numbers, or hyphens. + // + // * First character must be a letter + // + // * Can't end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string"` + // Specifies a DB subnet group for the DB instance. The new DB instance is created // in the VPC associated with the DB subnet group. If no DB subnet group is - // specified, then the new DB instance is not created in a VPC. + // specified, then the new DB instance isn't created in a VPC. // // Constraints: // @@ -15333,46 +16473,51 @@ type CreateDBInstanceReadReplicaInput struct { // the operation is running. // // * All Read Replicas in one AWS Region that are created from the same source - // DB instance must either:> - // - // Specify DB subnet groups from the same VPC. All these Read Replicas are created - // in the same VPC. - // - // Not specify a DB subnet group. All these Read Replicas are created outside - // of any VPC. + // DB instance must either:> Specify DB subnet groups from the same VPC. + // All these Read Replicas are created in the same VPC. Not specify a DB + // subnet group. All these Read Replicas are created outside of any VPC. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // DestinationRegion is used for presigning the request to a given region. DestinationRegion *string `type:"string"` + // The Active Directory directory ID to create the DB instance in. + // + // For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate + // users that connect to the DB instance. For more information, see Using Kerberos + // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // in the Amazon RDS User Guide. + Domain *string `type:"string"` + + // Specify the name of the IAM role to be used when making API calls to the + // Directory Service. + DomainIAMRoleName *string `type:"string"` + // The list of logs that the new DB instance is to export to CloudWatch Logs. // The values in the list depend on the DB engine being used. For more information, - // see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + // see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon RDS User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // You can enable IAM database authentication for the following database engines - // - // * For MySQL 5.6, minor version 5.6.34 or higher + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. // - // * For MySQL 5.7, minor version 5.7.16 or higher - // - // * Aurora MySQL 5.6 or higher - // - // Default: false + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // True to enable Performance Insights for the Read Replica, and otherwise false. + // A value that indicates whether to enable Performance Insights for the Read + // Replica. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon RDS User Guide. @@ -15418,7 +16563,7 @@ type CreateDBInstanceReadReplicaInput struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` - // Specifies whether the Read Replica is in a Multi-AZ deployment. + // A value that indicates whether the Read Replica is in a Multi-AZ deployment. // // You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby // of your replica in another Availability Zone for failover support for the @@ -15433,6 +16578,11 @@ type CreateDBInstanceReadReplicaInput struct { // The AWS KMS key identifier for encryption of Performance Insights data. The // KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the // KMS key alias for the KMS encryption key. + // + // If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + // RDS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. PerformanceInsightsKMSKeyId *string `type:"string"` // The amount of time, in days, to retain Performance Insights data. Valid values @@ -15450,9 +16600,8 @@ type CreateDBInstanceReadReplicaInput struct { // API action in the source AWS Region that contains the source DB instance. // // You must specify this parameter when you create an encrypted Read Replica - // from another AWS Region by using the Amazon RDS API. You can specify the - // --source-region option instead of this parameter when you create an encrypted - // Read Replica from another AWS Region by using the AWS CLI. + // from another AWS Region by using the Amazon RDS API. Don't specify PreSignedUrl + // when you are creating an encrypted Read Replica in the same AWS Region. // // The presigned URL must be a valid request for the CreateDBInstanceReadReplica // API action that can be executed in the source AWS Region that contains the @@ -15461,21 +16610,19 @@ type CreateDBInstanceReadReplicaInput struct { // // * DestinationRegion - The AWS Region that the encrypted Read Replica is // created in. This AWS Region is the same one where the CreateDBInstanceReadReplica - // action is called that contains this presigned URL. - // - // For example, if you create an encrypted DB instance in the us-west-1 AWS - // Region, from a source DB instance in the us-east-2 AWS Region, then you - // call the CreateDBInstanceReadReplica action in the us-east-1 AWS Region - // and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica - // action in the us-west-2 AWS Region. For this example, the DestinationRegion - // in the presigned URL must be set to the us-east-1 AWS Region. + // action is called that contains this presigned URL. For example, if you + // create an encrypted DB instance in the us-west-1 AWS Region, from a source + // DB instance in the us-east-2 AWS Region, then you call the CreateDBInstanceReadReplica + // action in the us-east-1 AWS Region and provide a presigned URL that contains + // a call to the CreateDBInstanceReadReplica action in the us-west-2 AWS + // Region. For this example, the DestinationRegion in the presigned URL must + // be set to the us-east-1 AWS Region. // // * KmsKeyId - The AWS KMS key identifier for the key to use to encrypt // the Read Replica in the destination AWS Region. This is the same identifier // for both the CreateDBInstanceReadReplica action that is called in the // destination AWS Region, and the action contained in the presigned URL. // - // // * SourceDBInstanceIdentifier - The DB instance identifier for the encrypted // DB instance to be replicated. This identifier must be in the Amazon Resource // Name (ARN) format for the source AWS Region. For example, if you are creating @@ -15483,21 +16630,26 @@ type CreateDBInstanceReadReplicaInput struct { // then your SourceDBInstanceIdentifier looks like the following example: // arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115. // - // // To learn how to generate a Signature Version 4 signed request, see Authenticating // Requests: Using Query Parameters (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) // and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + // + // If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion + // (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. + // Specifying SourceRegion autogenerates a pre-signed URL that is a valid request + // for the operation that can be executed in the source AWS Region. PreSignedUrl *string `type:"string"` // The number of CPU cores and the number of threads per core for the DB instance // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. For more - // information, see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance isn't publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. For more information, + // see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // The identifier of the DB instance that will act as the source for the Read @@ -15527,7 +16679,7 @@ type CreateDBInstanceReadReplicaInput struct { // // * If the source DB instance is in a different AWS Region than the Read // Replica, specify a valid DB instance ARN. For more information, go to - // Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) + // Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon RDS User Guide. // // SourceDBInstanceIdentifier is a required field @@ -15544,14 +16696,14 @@ type CreateDBInstanceReadReplicaInput struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) // in the Amazon RDS User Guide. Tags []*Tag `locationNameList:"Tag" type:"list"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` @@ -15617,6 +16769,12 @@ func (s *CreateDBInstanceReadReplicaInput) SetDBInstanceIdentifier(v string) *Cr return s } +// SetDBParameterGroupName sets the DBParameterGroupName field's value. +func (s *CreateDBInstanceReadReplicaInput) SetDBParameterGroupName(v string) *CreateDBInstanceReadReplicaInput { + s.DBParameterGroupName = &v + return s +} + // SetDBSubnetGroupName sets the DBSubnetGroupName field's value. func (s *CreateDBInstanceReadReplicaInput) SetDBSubnetGroupName(v string) *CreateDBInstanceReadReplicaInput { s.DBSubnetGroupName = &v @@ -15635,6 +16793,18 @@ func (s *CreateDBInstanceReadReplicaInput) SetDestinationRegion(v string) *Creat return s } +// SetDomain sets the Domain field's value. +func (s *CreateDBInstanceReadReplicaInput) SetDomain(v string) *CreateDBInstanceReadReplicaInput { + s.Domain = &v + return s +} + +// SetDomainIAMRoleName sets the DomainIAMRoleName field's value. +func (s *CreateDBInstanceReadReplicaInput) SetDomainIAMRoleName(v string) *CreateDBInstanceReadReplicaInput { + s.DomainIAMRoleName = &v + return s +} + // SetEnableCloudwatchLogsExports sets the EnableCloudwatchLogsExports field's value. func (s *CreateDBInstanceReadReplicaInput) SetEnableCloudwatchLogsExports(v []*string) *CreateDBInstanceReadReplicaInput { s.EnableCloudwatchLogsExports = v @@ -15824,8 +16994,7 @@ type CreateDBParameterGroupInput struct { // Description is a required field Description *string `type:"string" required:"true"` - // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) - // in the Amazon RDS User Guide. + // Tags to assign to the DB parameter group. Tags []*Tag `locationNameList:"Tag" type:"list"` } @@ -15933,8 +17102,7 @@ type CreateDBSecurityGroupInput struct { // DBSecurityGroupName is a required field DBSecurityGroupName *string `type:"string" required:"true"` - // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) - // in the Amazon RDS User Guide. + // Tags to assign to the DB security group. Tags []*Tag `locationNameList:"Tag" type:"list"` } @@ -16134,8 +17302,7 @@ type CreateDBSubnetGroupInput struct { // SubnetIds is a required field SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` - // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) - // in the Amazon RDS User Guide. + // Tags to assign to the DB subnet group. Tags []*Tag `locationNameList:"Tag" type:"list"` } @@ -16221,8 +17388,9 @@ func (s *CreateDBSubnetGroupOutput) SetDBSubnetGroup(v *DBSubnetGroup) *CreateDB type CreateEventSubscriptionInput struct { _ struct{} `type:"structure"` - // A Boolean value; set to true to activate the subscription, set to false to - // create the subscription but not active it. + // A value that indicates whether to activate the subscription. If the event + // notification subscription isn't activated, the subscription is created but + // not active. Enabled *bool `type:"boolean"` // A list of event categories for a SourceType that you want to subscribe to. @@ -16263,7 +17431,7 @@ type CreateEventSubscriptionInput struct { // The type of source that is generating the events. For example, if you want // to be notified of events generated by a DB instance, you would set this parameter - // to db-instance. if this value is not specified, all events are returned. + // to db-instance. if this value isn't specified, all events are returned. // // Valid values: db-instance | db-cluster | db-parameter-group | db-security-group // | db-snapshot | db-cluster-snapshot @@ -16382,7 +17550,7 @@ type CreateGlobalClusterInput struct { DatabaseName *string `type:"string"` // The deletion protection setting for the new global database. The global database - // can't be deleted when this value is set to true. + // can't be deleted when deletion protection is enabled. DeletionProtection *bool `type:"boolean"` // Provides the name of the database engine to be used for this DB cluster. @@ -16512,8 +17680,7 @@ type CreateOptionGroupInput struct { // OptionGroupName is a required field OptionGroupName *string `type:"string" required:"true"` - // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) - // in the Amazon RDS User Guide. + // Tags to assign to the option group. Tags []*Tag `locationNameList:"Tag" type:"list"` } @@ -16601,6 +17768,64 @@ func (s *CreateOptionGroupOutput) SetOptionGroup(v *OptionGroup) *CreateOptionGr return s } +// A custom Availability Zone (AZ) is an on-premises AZ that is integrated with +// a VMware vSphere cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +type CustomAvailabilityZone struct { + _ struct{} `type:"structure"` + + // The identifier of the custom AZ. + // + // Amazon RDS generates a unique identifier when a custom AZ is created. + CustomAvailabilityZoneId *string `type:"string"` + + // The name of the custom AZ. + CustomAvailabilityZoneName *string `type:"string"` + + // The status of the custom AZ. + CustomAvailabilityZoneStatus *string `type:"string"` + + // Information about the virtual private network (VPN) between the VMware vSphere + // cluster and the AWS website. + VpnDetails *VpnDetails `type:"structure"` +} + +// String returns the string representation +func (s CustomAvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomAvailabilityZone) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *CustomAvailabilityZone) SetCustomAvailabilityZoneId(v string) *CustomAvailabilityZone { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetCustomAvailabilityZoneName sets the CustomAvailabilityZoneName field's value. +func (s *CustomAvailabilityZone) SetCustomAvailabilityZoneName(v string) *CustomAvailabilityZone { + s.CustomAvailabilityZoneName = &v + return s +} + +// SetCustomAvailabilityZoneStatus sets the CustomAvailabilityZoneStatus field's value. +func (s *CustomAvailabilityZone) SetCustomAvailabilityZoneStatus(v string) *CustomAvailabilityZone { + s.CustomAvailabilityZoneStatus = &v + return s +} + +// SetVpnDetails sets the VpnDetails field's value. +func (s *CustomAvailabilityZone) SetVpnDetails(v *VpnDetails) *CustomAvailabilityZone { + s.VpnDetails = v + return s +} + // Contains the details of an Amazon Aurora DB cluster. // // This data type is used as a response element in the DescribeDBClusters, StopDBCluster, @@ -16608,9 +17833,25 @@ func (s *CreateOptionGroupOutput) SetOptionGroup(v *OptionGroup) *CreateOptionGr type DBCluster struct { _ struct{} `type:"structure"` + // The name of the Amazon Kinesis data stream used for the database activity + // stream. + ActivityStreamKinesisStreamName *string `type:"string"` + + // The AWS KMS key identifier used for encrypting messages in the database activity + // stream. + ActivityStreamKmsKeyId *string `type:"string"` + + // The mode of the database activity stream. Database events such as a change + // or access generate an activity stream event. The database session can handle + // these events either synchronously or asynchronously. + ActivityStreamMode *string `type:"string" enum:"ActivityStreamMode"` + + // The status of the database activity stream. + ActivityStreamStatus *string `type:"string" enum:"ActivityStreamStatus"` + // For all database engines except Amazon Aurora, AllocatedStorage specifies // the allocated storage size in gibibytes (GiB). For Aurora, AllocatedStorage - // always returns 1, because Aurora DB cluster storage size is not fixed, but + // always returns 1, because Aurora DB cluster storage size isn't fixed, but // instead automatically adjusts as needed. AllocatedStorage *int64 `type:"integer"` @@ -16620,8 +17861,8 @@ type DBCluster struct { // on your behalf. AssociatedRoles []*DBClusterRole `locationNameList:"DBClusterRole" type:"list"` - // Provides the list of EC2 Availability Zones that instances in the DB cluster - // can be created in. + // Provides the list of Availability Zones (AZs) where instances in the DB cluster + // can be created. AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` // The number of change records stored for Backtrack. @@ -16657,6 +17898,10 @@ type DBCluster struct { // DB cluster. CopyTagsToSnapshot *bool `type:"boolean"` + // Specifies whether the DB cluster is a clone of a DB cluster owned by a different + // AWS account. + CrossAccountClone *bool `type:"boolean"` + // Identifies all custom endpoints associated with the cluster. CustomEndpoints []*string `type:"list"` @@ -16691,7 +17936,7 @@ type DBCluster struct { DbClusterResourceId *string `type:"string"` // Indicates if the DB cluster has deletion protection enabled. The database - // can't be deleted when this value is set to true. + // can't be deleted when deletion protection is enabled. DeletionProtection *bool `type:"boolean"` // The earliest time to which a DB cluster can be backtracked. @@ -16715,8 +17960,8 @@ type DBCluster struct { // Provides the name of the database engine to be used for this DB cluster. Engine *string `type:"string"` - // The DB engine mode of the DB cluster, either provisioned, serverless, or - // parallelquery. + // The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, + // global, or multimaster. EngineMode *string `type:"string"` // Indicates the database engine version. @@ -16725,26 +17970,22 @@ type DBCluster struct { // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. HostedZoneId *string `type:"string"` - // HTTP endpoint functionality is in beta for Aurora Serverless and is subject - // to change. - // - // Value that is true if the HTTP endpoint for an Aurora Serverless DB cluster - // is enabled and false otherwise. + // A value that indicates whether the HTTP endpoint for an Aurora Serverless + // DB cluster is enabled. // // When enabled, the HTTP endpoint provides a connectionless web service API // for running SQL queries on the Aurora Serverless DB cluster. You can also // query your database from inside the RDS console with the query editor. // - // For more information about Aurora Serverless, see Using Amazon Aurora Serverless - // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) + // For more information, see Using the Data API for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) // in the Amazon Aurora User Guide. HttpEndpointEnabled *bool `type:"boolean"` - // True if mapping of AWS Identity and Access Management (IAM) accounts to database - // accounts is enabled, and otherwise false. + // A value that indicates whether the mapping of AWS Identity and Access Management + // (IAM) accounts to database accounts is enabled. IAMDatabaseAuthenticationEnabled *bool `type:"boolean"` - // If StorageEncrypted is true, the AWS KMS key identifier for the encrypted + // If StorageEncrypted is enabled, the AWS KMS key identifier for the encrypted // DB cluster. KmsKeyId *string `type:"string"` @@ -16820,6 +18061,30 @@ func (s DBCluster) GoString() string { return s.String() } +// SetActivityStreamKinesisStreamName sets the ActivityStreamKinesisStreamName field's value. +func (s *DBCluster) SetActivityStreamKinesisStreamName(v string) *DBCluster { + s.ActivityStreamKinesisStreamName = &v + return s +} + +// SetActivityStreamKmsKeyId sets the ActivityStreamKmsKeyId field's value. +func (s *DBCluster) SetActivityStreamKmsKeyId(v string) *DBCluster { + s.ActivityStreamKmsKeyId = &v + return s +} + +// SetActivityStreamMode sets the ActivityStreamMode field's value. +func (s *DBCluster) SetActivityStreamMode(v string) *DBCluster { + s.ActivityStreamMode = &v + return s +} + +// SetActivityStreamStatus sets the ActivityStreamStatus field's value. +func (s *DBCluster) SetActivityStreamStatus(v string) *DBCluster { + s.ActivityStreamStatus = &v + return s +} + // SetAllocatedStorage sets the AllocatedStorage field's value. func (s *DBCluster) SetAllocatedStorage(v int64) *DBCluster { s.AllocatedStorage = &v @@ -16886,6 +18151,12 @@ func (s *DBCluster) SetCopyTagsToSnapshot(v bool) *DBCluster { return s } +// SetCrossAccountClone sets the CrossAccountClone field's value. +func (s *DBCluster) SetCrossAccountClone(v bool) *DBCluster { + s.CrossAccountClone = &v + return s +} + // SetCustomEndpoints sets the CustomEndpoints field's value. func (s *DBCluster) SetCustomEndpoints(v []*string) *DBCluster { s.CustomEndpoints = v @@ -17113,7 +18384,7 @@ func (s *DBCluster) SetVpcSecurityGroups(v []*VpcSecurityGroupMembership) *DBClu type DBClusterEndpoint struct { _ struct{} `type:"structure"` - // The type associated with a custom endpoint. One of: READER, ANY. + // The type associated with a custom endpoint. One of: READER, WRITER, ANY. CustomEndpointType *string `type:"string"` // The Amazon Resource Name (ARN) for the endpoint. @@ -17237,7 +18508,7 @@ type DBClusterMember struct { // A value that specifies the order in which an Aurora Replica is promoted to // the primary instance after a failure of the existing primary instance. For - // more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) + // more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) // in the Amazon Aurora User Guide. PromotionTier *int64 `type:"integer"` } @@ -17463,8 +18734,8 @@ type DBClusterSnapshot struct { // Specifies the allocated storage size in gibibytes (GiB). AllocatedStorage *int64 `type:"integer"` - // Provides the list of EC2 Availability Zones that instances in the DB cluster - // snapshot can be restored in. + // Provides the list of Availability Zones (AZs) where instances in the DB cluster + // snapshot can be restored. AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` // Specifies the time when the DB cluster was created, in Universal Coordinated @@ -17760,7 +19031,7 @@ type DBEngineVersion struct { DBParameterGroupFamily *string `type:"string"` // The default character set for new instances of this engine version, if the - // CharacterSetName parameter of the CreateDBInstance API is not specified. + // CharacterSetName parameter of the CreateDBInstance API isn't specified. DefaultCharacterSet *CharacterSet `type:"structure"` // The name of the database engine. @@ -17773,6 +19044,9 @@ type DBEngineVersion struct { // Logs. ExportableLogTypes []*string `type:"list"` + // The status of the DB engine version, either available or deprecated. + Status *string `type:"string"` + // A list of the character sets supported by this engine for the CharacterSetName // parameter of the CreateDBInstance action. SupportedCharacterSets []*CharacterSet `locationNameList:"CharacterSet" type:"list"` @@ -17854,6 +19128,12 @@ func (s *DBEngineVersion) SetExportableLogTypes(v []*string) *DBEngineVersion { return s } +// SetStatus sets the Status field's value. +func (s *DBEngineVersion) SetStatus(v string) *DBEngineVersion { + s.Status = &v + return s +} + // SetSupportedCharacterSets sets the SupportedCharacterSets field's value. func (s *DBEngineVersion) SetSupportedCharacterSets(v []*CharacterSet) *DBEngineVersion { s.SupportedCharacterSets = v @@ -17972,8 +19252,8 @@ type DBInstance struct { // Provides the list of DB parameter groups applied to this DB instance. DBParameterGroups []*DBParameterGroupStatus `locationNameList:"DBParameterGroup" type:"list"` - // Provides List of DB security group elements containing only DBSecurityGroup.Name - // and DBSecurityGroup.Status subelements. + // A list of DB security group elements containing DBSecurityGroup.Name and + // DBSecurityGroup.Status subelements. DBSecurityGroups []*DBSecurityGroupMembership `locationNameList:"DBSecurityGroup" type:"list"` // Specifies information on the subnet group associated with the DB instance, @@ -17990,8 +19270,8 @@ type DBInstance struct { DbiResourceId *string `type:"string"` // Indicates if the DB instance has deletion protection enabled. The database - // can't be deleted when this value is set to true. For more information, see - // Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // can't be deleted when deletion protection is enabled. For more information, + // see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // The Active Directory Domain membership records associated with the DB instance. @@ -18054,6 +19334,10 @@ type DBInstance struct { // Contains the master username for the DB instance. MasterUsername *string `type:"string"` + // The upper limit to which Amazon RDS can automatically scale the storage of + // the DB instance. + MaxAllocatedStorage *int64 `type:"integer"` + // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the DB instance. MonitoringInterval *int64 `type:"integer"` @@ -18099,7 +19383,7 @@ type DBInstance struct { // A value that specifies the order in which an Aurora Replica is promoted to // the primary instance after a failure of the existing primary instance. For - // more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) + // more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) // in the Amazon Aurora User Guide. PromotionTier *int64 `type:"integer"` @@ -18130,7 +19414,7 @@ type DBInstance struct { // instance with multi-AZ support. SecondaryAvailabilityZone *string `type:"string"` - // The status of a Read Replica. If the instance is not a Read Replica, this + // The status of a Read Replica. If the instance isn't a Read Replica, this // is blank. StatusInfos []*DBInstanceStatusInfo `locationNameList:"DBInstanceStatusInfo" type:"list"` @@ -18368,6 +19652,12 @@ func (s *DBInstance) SetMasterUsername(v string) *DBInstance { return s } +// SetMaxAllocatedStorage sets the MaxAllocatedStorage field's value. +func (s *DBInstance) SetMaxAllocatedStorage(v int64) *DBInstance { + s.MaxAllocatedStorage = &v + return s +} + // SetMonitoringInterval sets the MonitoringInterval field's value. func (s *DBInstance) SetMonitoringInterval(v int64) *DBInstance { s.MonitoringInterval = &v @@ -18812,7 +20102,7 @@ type DBInstanceStatusInfo struct { _ struct{} `type:"structure"` // Details of the error if there is an error for the instance. If the instance - // is not in an error state, this value is blank. + // isn't in an error state, this value is blank. Message *string `type:"string"` // Boolean value that is true if the instance is operating normally, or false @@ -18960,7 +20250,7 @@ func (s *DBParameterGroupNameMessage) SetDBParameterGroupName(v string) *DBParam type DBParameterGroupStatus struct { _ struct{} `type:"structure"` - // The name of the DP parameter group. + // The name of the DB parameter group. DBParameterGroupName *string `type:"string"` // The status of parameter updates. @@ -19548,6 +20838,71 @@ func (s *DBSubnetGroup) SetVpcId(v string) *DBSubnetGroup { return s } +type DeleteCustomAvailabilityZoneInput struct { + _ struct{} `type:"structure"` + + // The custom AZ identifier. + // + // CustomAvailabilityZoneId is a required field + CustomAvailabilityZoneId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCustomAvailabilityZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomAvailabilityZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCustomAvailabilityZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCustomAvailabilityZoneInput"} + if s.CustomAvailabilityZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAvailabilityZoneId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *DeleteCustomAvailabilityZoneInput) SetCustomAvailabilityZoneId(v string) *DeleteCustomAvailabilityZoneInput { + s.CustomAvailabilityZoneId = &v + return s +} + +type DeleteCustomAvailabilityZoneOutput struct { + _ struct{} `type:"structure"` + + // A custom Availability Zone (AZ) is an on-premises AZ that is integrated with + // a VMware vSphere cluster. + // + // For more information about RDS on VMware, see the RDS on VMware User Guide. + // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) + CustomAvailabilityZone *CustomAvailabilityZone `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomAvailabilityZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomAvailabilityZoneOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZone sets the CustomAvailabilityZone field's value. +func (s *DeleteCustomAvailabilityZoneOutput) SetCustomAvailabilityZone(v *CustomAvailabilityZone) *DeleteCustomAvailabilityZoneOutput { + s.CustomAvailabilityZone = v + return s +} + type DeleteDBClusterEndpointInput struct { _ struct{} `type:"structure"` @@ -19604,7 +20959,7 @@ func (s *DeleteDBClusterEndpointInput) SetDBClusterEndpointIdentifier(v string) type DeleteDBClusterEndpointOutput struct { _ struct{} `type:"structure"` - // The type associated with a custom endpoint. One of: READER, ANY. + // The type associated with a custom endpoint. One of: READER, WRITER, ANY. CustomEndpointType *string `type:"string"` // The Amazon Resource Name (ARN) for the endpoint. @@ -19725,10 +21080,10 @@ type DeleteDBClusterInput struct { DBClusterIdentifier *string `type:"string" required:"true"` // The DB cluster snapshot identifier of the new DB cluster snapshot created - // when SkipFinalSnapshot is set to false. + // when SkipFinalSnapshot is disabled. // - // Specifying this parameter and also setting the SkipFinalShapshot parameter - // to true results in an error. + // Specifying this parameter and also skipping the creation of a final DB cluster + // snapshot with the SkipFinalShapshot parameter results in an error. // // Constraints: // @@ -19739,14 +21094,14 @@ type DeleteDBClusterInput struct { // * Can't end with a hyphen or contain two consecutive hyphens FinalDBSnapshotIdentifier *string `type:"string"` - // Determines whether a final DB cluster snapshot is created before the DB cluster - // is deleted. If true is specified, no DB cluster snapshot is created. If false - // is specified, a DB cluster snapshot is created before the DB cluster is deleted. + // A value that indicates whether to skip the creation of a final DB cluster + // snapshot before the DB cluster is deleted. If skip is specified, no DB cluster + // snapshot is created. If skip isn't specified, a DB cluster snapshot is created + // before the DB cluster is deleted. By default, skip isn't specified, and the + // DB cluster snapshot is created. By default, this parameter is disabled. // // You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot - // is false. - // - // Default: false + // is disabled. SkipFinalSnapshot *bool `type:"boolean"` } @@ -20023,15 +21378,15 @@ type DeleteDBInstanceInput struct { DBInstanceIdentifier *string `type:"string" required:"true"` // A value that indicates whether to remove automated backups immediately after - // the DB instance is deleted. This parameter isn't case-sensitive. This parameter - // defaults to true. + // the DB instance is deleted. This parameter isn't case-sensitive. The default + // is to remove automated backups immediately after the DB instance is deleted. DeleteAutomatedBackups *bool `type:"boolean"` - // The DBSnapshotIdentifier of the new DB snapshot created when SkipFinalSnapshot - // is set to false. + // The DBSnapshotIdentifier of the new DBSnapshot created when the SkipFinalSnapshot + // parameter is disabled. // - // Specifying this parameter and also setting the SkipFinalShapshot parameter - // to true results in an error. + // Specifying this parameter and also specifying to skip final DB snapshot creation + // in SkipFinalShapshot results in an error. // // Constraints: // @@ -20044,21 +21399,19 @@ type DeleteDBInstanceInput struct { // * Can't be specified when deleting a Read Replica. FinalDBSnapshotIdentifier *string `type:"string"` - // A value that indicates whether a final DB snapshot is created before the - // DB instance is deleted. If true is specified, no DB snapshot is created. - // If false is specified, a DB snapshot is created before the DB instance is - // deleted. - // - // When a DB instance is in a failure state and has a status of failed, incompatible-restore, - // or incompatible-network, you can only delete it when the SkipFinalSnapshot - // parameter is set to true. + // A value that indicates whether to skip the creation of a final DB snapshot + // before the DB instance is deleted. If skip is specified, no DB snapshot is + // created. If skip isn't specified, a DB snapshot is created before the DB + // instance is deleted. By default, skip isn't specified, and the DB snapshot + // is created. // - // Specify true when deleting a Read Replica. + // Note that when a DB instance is in a failure state and has a status of 'failed', + // 'incompatible-restore', or 'incompatible-network', it can only be deleted + // when skip is specified. // - // The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot - // is false. + // Specify skip when deleting a Read Replica. // - // Default: false + // The FinalDBSnapshotIdentifier parameter must be specified if skip isn't specified. SkipFinalSnapshot *bool `type:"boolean"` } @@ -20508,6 +21861,133 @@ func (s *DeleteGlobalClusterOutput) SetGlobalCluster(v *GlobalCluster) *DeleteGl return s } +type DeleteInstallationMediaInput struct { + _ struct{} `type:"structure"` + + // The installation medium ID. + // + // InstallationMediaId is a required field + InstallationMediaId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstallationMediaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstallationMediaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInstallationMediaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInstallationMediaInput"} + if s.InstallationMediaId == nil { + invalidParams.Add(request.NewErrParamRequired("InstallationMediaId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *DeleteInstallationMediaInput) SetInstallationMediaId(v string) *DeleteInstallationMediaInput { + s.InstallationMediaId = &v + return s +} + +// Contains the installation media for a DB engine that requires an on-premises +// customer provided license, such as Microsoft SQL Server. +type DeleteInstallationMediaOutput struct { + _ struct{} `type:"structure"` + + // The custom Availability Zone (AZ) that contains the installation media. + CustomAvailabilityZoneId *string `type:"string"` + + // The DB engine. + Engine *string `type:"string"` + + // The path to the installation medium for the DB engine. + EngineInstallationMediaPath *string `type:"string"` + + // The engine version of the DB engine. + EngineVersion *string `type:"string"` + + // If an installation media failure occurred, the cause of the failure. + FailureCause *InstallationMediaFailureCause `type:"structure"` + + // The installation medium ID. + InstallationMediaId *string `type:"string"` + + // The path to the installation medium for the operating system associated with + // the DB engine. + OSInstallationMediaPath *string `type:"string"` + + // The status of the installation medium. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DeleteInstallationMediaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstallationMediaOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *DeleteInstallationMediaOutput) SetCustomAvailabilityZoneId(v string) *DeleteInstallationMediaOutput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *DeleteInstallationMediaOutput) SetEngine(v string) *DeleteInstallationMediaOutput { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *DeleteInstallationMediaOutput) SetEngineInstallationMediaPath(v string) *DeleteInstallationMediaOutput { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *DeleteInstallationMediaOutput) SetEngineVersion(v string) *DeleteInstallationMediaOutput { + s.EngineVersion = &v + return s +} + +// SetFailureCause sets the FailureCause field's value. +func (s *DeleteInstallationMediaOutput) SetFailureCause(v *InstallationMediaFailureCause) *DeleteInstallationMediaOutput { + s.FailureCause = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *DeleteInstallationMediaOutput) SetInstallationMediaId(v string) *DeleteInstallationMediaOutput { + s.InstallationMediaId = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *DeleteInstallationMediaOutput) SetOSInstallationMediaPath(v string) *DeleteInstallationMediaOutput { + s.OSInstallationMediaPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteInstallationMediaOutput) SetStatus(v string) *DeleteInstallationMediaOutput { + s.Status = &v + return s +} + type DeleteOptionGroupInput struct { _ struct{} `type:"structure"` @@ -20613,7 +22093,7 @@ type DescribeCertificatesInput struct { // * Must match an existing CertificateIdentifier. CertificateIdentifier *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeCertificates @@ -20623,7 +22103,7 @@ type DescribeCertificatesInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -20720,6 +22200,119 @@ func (s *DescribeCertificatesOutput) SetMarker(v string) *DescribeCertificatesOu return s } +type DescribeCustomAvailabilityZonesInput struct { + _ struct{} `type:"structure"` + + // The custom AZ identifier. If this parameter is specified, information from + // only the specific custom AZ is returned. + CustomAvailabilityZoneId *string `type:"string"` + + // A filter that specifies one or more custom AZs to describe. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeCustomAvailabilityZones + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so you can retrieve the remaining results. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCustomAvailabilityZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomAvailabilityZonesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCustomAvailabilityZonesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCustomAvailabilityZonesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetCustomAvailabilityZoneId(v string) *DescribeCustomAvailabilityZonesInput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetFilters(v []*Filter) *DescribeCustomAvailabilityZonesInput { + s.Filters = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetMarker(v string) *DescribeCustomAvailabilityZonesInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetMaxRecords(v int64) *DescribeCustomAvailabilityZonesInput { + s.MaxRecords = &v + return s +} + +type DescribeCustomAvailabilityZonesOutput struct { + _ struct{} `type:"structure"` + + // The list of CustomAvailabilityZone objects for the AWS account. + CustomAvailabilityZones []*CustomAvailabilityZone `locationNameList:"CustomAvailabilityZone" type:"list"` + + // An optional pagination token provided by a previous DescribeCustomAvailabilityZones + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCustomAvailabilityZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomAvailabilityZonesOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZones sets the CustomAvailabilityZones field's value. +func (s *DescribeCustomAvailabilityZonesOutput) SetCustomAvailabilityZones(v []*CustomAvailabilityZone) *DescribeCustomAvailabilityZonesOutput { + s.CustomAvailabilityZones = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeCustomAvailabilityZonesOutput) SetMarker(v string) *DescribeCustomAvailabilityZonesOutput { + s.Marker = &v + return s +} + type DescribeDBClusterBacktracksInput struct { _ struct{} `type:"structure"` @@ -20759,19 +22352,8 @@ type DescribeDBClusterBacktracksInput struct { // identifiers. // // * db-cluster-backtrack-status - Accepts any of the following backtrack - // status values: - // - // applying - // - // completed - // - // failed - // - // pending - // - // The results list includes information about only the backtracks identified - // by these values. For more information about backtrack status values, see - // DBClusterBacktrack. + // status values: applying completed failed pending The results list includes + // information about only the backtracks identified by these values. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBClusterBacktracks @@ -20781,7 +22363,7 @@ type DescribeDBClusterBacktracksInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -20901,7 +22483,7 @@ type DescribeDBClusterEndpointsInput struct { // A set of name-value pairs that define which endpoints to include in the output. // The filters are specified as name-value pairs, in the format Name=endpoint_type,Values=endpoint_type1,endpoint_type2,.... // Name can be one of: db-cluster-endpoint-type, db-cluster-endpoint-custom-type, - // db-cluster-endpoint-id, db-cluster-endpoint-status. Values for the db-cluster-endpoint-type + // db-cluster-endpoint-id, db-cluster-endpoint-status. Values for the db-cluster-endpoint-type // filter can be one or more of: reader, writer, custom. Values for the db-cluster-endpoint-custom-type // filter can be one or more of: reader, any. Values for the db-cluster-endpoint-status // filter can be one or more of: available, creating, deleting, modifying. @@ -20914,7 +22496,7 @@ type DescribeDBClusterEndpointsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -21027,7 +22609,7 @@ type DescribeDBClusterParameterGroupsInput struct { // * If supplied, must match the name of an existing DBClusterParameterGroup. DBClusterParameterGroupName *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBClusterParameterGroups @@ -21037,7 +22619,7 @@ type DescribeDBClusterParameterGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -21146,7 +22728,7 @@ type DescribeDBClusterParametersInput struct { // DBClusterParameterGroupName is a required field DBClusterParameterGroupName *string `type:"string" required:"true"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBClusterParameters @@ -21156,7 +22738,7 @@ type DescribeDBClusterParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -21338,7 +22920,7 @@ type DescribeDBClusterSnapshotsInput struct { // The ID of the DB cluster to retrieve the list of DB cluster snapshots for. // This parameter can't be used in conjunction with the DBClusterSnapshotIdentifier - // parameter. This parameter is not case-sensitive. + // parameter. This parameter isn't case-sensitive. // // Constraints: // @@ -21357,20 +22939,31 @@ type DescribeDBClusterSnapshotsInput struct { // must also be specified. DBClusterSnapshotIdentifier *string `type:"string"` - // This parameter is not currently supported. + // A filter that specifies one or more DB cluster snapshots to describe. + // + // Supported filters: + // + // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon + // Resource Names (ARNs). + // + // * db-cluster-snapshot-id - Accepts DB cluster snapshot identifiers. + // + // * snapshot-type - Accepts types of DB cluster snapshots. + // + // * engine - Accepts names of database engines. Filters []*Filter `locationNameList:"Filter" type:"list"` - // True to include manual DB cluster snapshots that are public and can be copied - // or restored by any AWS account, and otherwise false. The default is false. - // The default is false. + // A value that indicates whether to include manual DB cluster snapshots that + // are public and can be copied or restored by any AWS account. By default, + // the public snapshots are not included. // // You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute // API action. IncludePublic *bool `type:"boolean"` - // True to include shared manual DB cluster snapshots from other AWS accounts - // that this AWS account has been given permission to copy or restore, and otherwise - // false. The default is false. + // A value that indicates whether to include shared manual DB cluster snapshots + // from other AWS accounts that this AWS account has been given permission to + // copy or restore. By default, these snapshots are not included. // // You can give an AWS account permission to restore a manual DB cluster snapshot // from another AWS account by the ModifyDBClusterSnapshotAttribute API action. @@ -21383,7 +22976,7 @@ type DescribeDBClusterSnapshotsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -21406,9 +22999,9 @@ type DescribeDBClusterSnapshotsInput struct { // // If you don't specify a SnapshotType value, then both automated and manual // DB cluster snapshots are returned. You can include shared DB cluster snapshots - // with these results by setting the IncludeShared parameter to true. You can - // include public DB cluster snapshots with these results by setting the IncludePublic - // parameter to true. + // with these results by enabling the IncludeShared parameter. You can include + // public DB cluster snapshots with these results by enabling the IncludePublic + // parameter. // // The IncludeShared and IncludePublic parameters don't apply for SnapshotType // values of manual or automated. The IncludePublic parameter doesn't apply @@ -21552,6 +23145,10 @@ type DescribeDBClustersInput struct { // about the DB clusters identified by these ARNs. Filters []*Filter `locationNameList:"Filter" type:"list"` + // Optional Boolean parameter that specifies whether the output includes information + // about clusters shared from other AWS accounts. + IncludeShared *bool `type:"boolean"` + // An optional pagination token provided by a previous DescribeDBClusters request. // If this parameter is specified, the response includes only records beyond // the marker, up to the value specified by MaxRecords. @@ -21559,7 +23156,7 @@ type DescribeDBClustersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -21609,6 +23206,12 @@ func (s *DescribeDBClustersInput) SetFilters(v []*Filter) *DescribeDBClustersInp return s } +// SetIncludeShared sets the IncludeShared field's value. +func (s *DescribeDBClustersInput) SetIncludeShared(v bool) *DescribeDBClustersInput { + s.IncludeShared = &v + return s +} + // SetMarker sets the Marker field's value. func (s *DescribeDBClustersInput) SetMarker(v string) *DescribeDBClustersInput { s.Marker = &v @@ -21665,8 +23268,8 @@ type DescribeDBEngineVersionsInput struct { // * If supplied, must match an existing DBParameterGroupFamily. DBParameterGroupFamily *string `type:"string"` - // Indicates that only the default version of the specified engine or engine - // and major version combination is returned. + // A value that indicates whether only the default version of the specified + // engine or engine and major version combination is returned. DefaultOnly *bool `type:"boolean"` // The database engine to return. @@ -21677,15 +23280,25 @@ type DescribeDBEngineVersionsInput struct { // Example: 5.1.49 EngineVersion *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` - // If this parameter is specified and the requested engine supports the CharacterSetName + // A value that indicates whether to include engine versions that aren't available + // in the list. The default is to list only available engine versions. + IncludeAll *bool `type:"boolean"` + + // A value that indicates whether to list the supported character sets for each + // engine version. + // + // If this parameter is enabled and the requested engine supports the CharacterSetName // parameter for CreateDBInstance, the response includes a list of supported // character sets for each engine version. ListSupportedCharacterSets *bool `type:"boolean"` - // If this parameter is specified and the requested engine supports the TimeZone + // A value that indicates whether to list the supported time zones for each + // engine version. + // + // If this parameter is enabled and the requested engine supports the TimeZone // parameter for CreateDBInstance, the response includes a list of supported // time zones for each engine version. ListSupportedTimezones *bool `type:"boolean"` @@ -21697,7 +23310,7 @@ type DescribeDBEngineVersionsInput struct { // The maximum number of records to include in the response. If more than the // MaxRecords value is available, a pagination token called a marker is included - // in the response so that the following results can be retrieved. + // in the response so you can retrieve the remaining results. // // Default: 100 // @@ -21765,6 +23378,12 @@ func (s *DescribeDBEngineVersionsInput) SetFilters(v []*Filter) *DescribeDBEngin return s } +// SetIncludeAll sets the IncludeAll field's value. +func (s *DescribeDBEngineVersionsInput) SetIncludeAll(v bool) *DescribeDBEngineVersionsInput { + s.IncludeAll = &v + return s +} + // SetListSupportedCharacterSets sets the ListSupportedCharacterSets field's value. func (s *DescribeDBEngineVersionsInput) SetListSupportedCharacterSets(v bool) *DescribeDBEngineVersionsInput { s.ListSupportedCharacterSets = &v @@ -21842,14 +23461,9 @@ type DescribeDBInstanceAutomatedBackupsInput struct { // // Supported filters are the following: // - // * status - // - // active - automated backups for current instances - // - // retained - automated backups for deleted instances - // - // creating - automated backups that are waiting for the first automated snapshot - // to be available + // * status active - automated backups for current instances retained - automated + // backups for deleted instances creating - automated backups that are waiting + // for the first automated snapshot to be available // // * db-instance-id - Accepts DB instance identifiers and Amazon Resource // Names (ARNs) for DB instances. The results list includes only information @@ -21870,7 +23484,7 @@ type DescribeDBInstanceAutomatedBackupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. MaxRecords *int64 `type:"integer"` } @@ -21993,6 +23607,17 @@ type DescribeDBInstancesInput struct { // * db-instance-id - Accepts DB instance identifiers and DB instance Amazon // Resource Names (ARNs). The results list will only include information // about the DB instances identified by these ARNs. + // + // * dbi-resource-id - Accepts DB instance resource identifiers. The results + // list will only include information about the DB instances identified by + // these DB instance resource identifiers. + // + // * domain - Accepts Active Directory directory IDs. The results list will + // only include information about the DB instances associated with these + // domains. + // + // * engine - Accepts engine names. The results list will only include information + // about the DB instances for these engines. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBInstances request. @@ -22002,7 +23627,7 @@ type DescribeDBInstancesInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22166,7 +23791,7 @@ type DescribeDBLogFilesInput struct { // string. FilenameContains *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // The pagination token provided in the previous request. If this parameter @@ -22176,7 +23801,7 @@ type DescribeDBLogFilesInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. MaxRecords *int64 `type:"integer"` } @@ -22298,7 +23923,7 @@ type DescribeDBParameterGroupsInput struct { // * If supplied, must match the name of an existing DBClusterParameterGroup. DBParameterGroupName *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBParameterGroups @@ -22308,7 +23933,7 @@ type DescribeDBParameterGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22418,7 +24043,7 @@ type DescribeDBParametersInput struct { // DBParameterGroupName is a required field DBParameterGroupName *string `type:"string" required:"true"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBParameters @@ -22428,7 +24053,7 @@ type DescribeDBParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22548,7 +24173,7 @@ type DescribeDBSecurityGroupsInput struct { // The name of the DB security group to return details for. DBSecurityGroupName *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBSecurityGroups @@ -22558,7 +24183,7 @@ type DescribeDBSecurityGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22727,7 +24352,7 @@ type DescribeDBSnapshotsInput struct { // The ID of the DB instance to retrieve the list of DB snapshots for. This // parameter can't be used in conjunction with DBSnapshotIdentifier. This parameter - // is not case-sensitive. + // isn't case-sensitive. // // Constraints: // @@ -22749,19 +24374,33 @@ type DescribeDBSnapshotsInput struct { // A specific DB resource ID to describe. DbiResourceId *string `type:"string"` - // This parameter is not currently supported. + // A filter that specifies one or more DB snapshots to describe. + // + // Supported filters: + // + // * db-instance-id - Accepts DB instance identifiers and DB instance Amazon + // Resource Names (ARNs). + // + // * db-snapshot-id - Accepts DB snapshot identifiers. + // + // * dbi-resource-id - Accepts identifiers of source DB instances. + // + // * snapshot-type - Accepts types of DB snapshots. + // + // * engine - Accepts names of database engines. Filters []*Filter `locationNameList:"Filter" type:"list"` - // True to include manual DB snapshots that are public and can be copied or - // restored by any AWS account, and otherwise false. The default is false. + // A value that indicates whether to include manual DB cluster snapshots that + // are public and can be copied or restored by any AWS account. By default, + // the public snapshots are not included. // // You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute // API. IncludePublic *bool `type:"boolean"` - // True to include shared manual DB snapshots from other AWS accounts that this - // AWS account has been given permission to copy or restore, and otherwise false. - // The default is false. + // A value that indicates whether to include shared manual DB cluster snapshots + // from other AWS accounts that this AWS account has been given permission to + // copy or restore. By default, these snapshots are not included. // // You can give an AWS account permission to restore a manual DB snapshot from // another AWS account by using the ModifyDBSnapshotAttribute API action. @@ -22774,7 +24413,7 @@ type DescribeDBSnapshotsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22795,16 +24434,15 @@ type DescribeDBSnapshotsInput struct { // * public - Return all DB snapshots that have been marked as public. // // * awsbackup - Return the DB snapshots managed by the AWS Backup service. - // - // For information about AWS Backup, see the AWS Backup Developer Guide. (https://docs.aws.amazon.com/aws-backup/latest/devguide/whatisbackup.html) - // - // The awsbackup type does not apply to Aurora. + // For information about AWS Backup, see the AWS Backup Developer Guide. + // (https://docs.aws.amazon.com/aws-backup/latest/devguide/whatisbackup.html) + // The awsbackup type does not apply to Aurora. // // If you don't specify a SnapshotType value, then both automated and manual // snapshots are returned. Shared and public DB snapshots are not included in // the returned results by default. You can include shared snapshots with these - // results by setting the IncludeShared parameter to true. You can include public - // snapshots with these results by setting the IncludePublic parameter to true. + // results by enabling the IncludeShared parameter. You can include public snapshots + // with these results by enabling the IncludePublic parameter. // // The IncludeShared and IncludePublic parameters don't apply for SnapshotType // values of manual or automated. The IncludePublic parameter doesn't apply @@ -22939,7 +24577,7 @@ type DescribeDBSubnetGroupsInput struct { // The name of the DB subnet group to return details for. DBSubnetGroupName *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBSubnetGroups @@ -22949,7 +24587,7 @@ type DescribeDBSubnetGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23056,7 +24694,7 @@ type DescribeEngineDefaultClusterParametersInput struct { // DBParameterGroupFamily is a required field DBParameterGroupFamily *string `type:"string" required:"true"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeEngineDefaultClusterParameters @@ -23066,7 +24704,7 @@ type DescribeEngineDefaultClusterParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -23163,7 +24801,7 @@ type DescribeEngineDefaultParametersInput struct { // DBParameterGroupFamily is a required field DBParameterGroupFamily *string `type:"string" required:"true"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeEngineDefaultParameters @@ -23173,7 +24811,7 @@ type DescribeEngineDefaultParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -23265,7 +24903,7 @@ func (s *DescribeEngineDefaultParametersOutput) SetEngineDefaults(v *EngineDefau type DescribeEventCategoriesInput struct { _ struct{} `type:"structure"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // The type of source that is generating the events. @@ -23343,7 +24981,7 @@ func (s *DescribeEventCategoriesOutput) SetEventCategoriesMapList(v []*EventCate type DescribeEventSubscriptionsInput struct { _ struct{} `type:"structure"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions @@ -23353,7 +24991,7 @@ type DescribeEventSubscriptionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23472,7 +25110,7 @@ type DescribeEventsInput struct { // subscription. EventCategories []*string `locationNameList:"EventCategory" type:"list"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeEvents request. @@ -23482,7 +25120,7 @@ type DescribeEventsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23669,7 +25307,7 @@ type DescribeGlobalClustersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23765,6 +25403,123 @@ func (s *DescribeGlobalClustersOutput) SetMarker(v string) *DescribeGlobalCluste return s } +type DescribeInstallationMediaInput struct { + _ struct{} `type:"structure"` + + // A filter that specifies one or more installation media to describe. Supported + // filters include the following: + // + // * custom-availability-zone-id - Accepts custom Availability Zone (AZ) + // identifiers. The results list includes information about only the custom + // AZs identified by these identifiers. + // + // * engine - Accepts database engines. The results list includes information + // about only the database engines identified by these identifiers. For more + // information about the valid engines for installation media, see ImportInstallationMedia. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The installation medium ID. + InstallationMediaId *string `type:"string"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // An optional pagination token provided by a previous DescribeInstallationMedia + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeInstallationMediaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstallationMediaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstallationMediaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstallationMediaInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstallationMediaInput) SetFilters(v []*Filter) *DescribeInstallationMediaInput { + s.Filters = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *DescribeInstallationMediaInput) SetInstallationMediaId(v string) *DescribeInstallationMediaInput { + s.InstallationMediaId = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeInstallationMediaInput) SetMarker(v string) *DescribeInstallationMediaInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeInstallationMediaInput) SetMaxRecords(v int64) *DescribeInstallationMediaInput { + s.MaxRecords = &v + return s +} + +type DescribeInstallationMediaOutput struct { + _ struct{} `type:"structure"` + + // The list of InstallationMedia objects for the AWS account. + InstallationMedia []*InstallationMedia `locationNameList:"InstallationMedia" type:"list"` + + // An optional pagination token provided by a previous DescribeInstallationMedia + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstallationMediaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstallationMediaOutput) GoString() string { + return s.String() +} + +// SetInstallationMedia sets the InstallationMedia field's value. +func (s *DescribeInstallationMediaOutput) SetInstallationMedia(v []*InstallationMedia) *DescribeInstallationMediaOutput { + s.InstallationMedia = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeInstallationMediaOutput) SetMarker(v string) *DescribeInstallationMediaOutput { + s.Marker = &v + return s +} + type DescribeOptionGroupOptionsInput struct { _ struct{} `type:"structure"` @@ -23773,7 +25528,7 @@ type DescribeOptionGroupOptionsInput struct { // EngineName is a required field EngineName *string `type:"string" required:"true"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // If specified, filters the results to include only options for the specified @@ -23787,7 +25542,7 @@ type DescribeOptionGroupOptionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23899,7 +25654,7 @@ type DescribeOptionGroupsInput struct { // a specific database engine. EngineName *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // Filters the list of option groups to only include groups associated with @@ -23914,7 +25669,7 @@ type DescribeOptionGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24043,7 +25798,7 @@ type DescribeOrderableDBInstanceOptionsInput struct { // available offerings matching the specified engine version. EngineVersion *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // The license model filter value. Specify this parameter to show only the available @@ -24057,15 +25812,14 @@ type DescribeOrderableDBInstanceOptionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The VPC filter value. Specify this parameter to show only the available VPC - // or non-VPC offerings. + // A value that indicates whether to show only VPC or non-VPC offerings. Vpc *bool `type:"boolean"` } @@ -24211,7 +25965,7 @@ type DescribePendingMaintenanceActionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24324,9 +26078,16 @@ type DescribeReservedDBInstancesInput struct { // Valid Values: 1 | 3 | 31536000 | 94608000 Duration *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` + // The lease identifier filter value. Specify this parameter to show only the + // reservation that matches the specified lease ID. + // + // AWS Support might request the lease ID for an issue related to a reserved + // DB instance. + LeaseId *string `type:"string"` + // An optional pagination token provided by a previous request. If this parameter // is specified, the response includes only records beyond the marker, up to // the value specified by MaxRecords. @@ -24334,15 +26095,15 @@ type DescribeReservedDBInstancesInput struct { // The maximum number of records to include in the response. If more than the // MaxRecords value is available, a pagination token called a marker is included - // in the response so that the following results can be retrieved. + // in the response so you can retrieve the remaining results. // // Default: 100 // // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The Multi-AZ filter value. Specify this parameter to show only those reservations - // matching the specified Multi-AZ parameter. + // A value that indicates whether to show only those reservations that support + // Multi-AZ. MultiAZ *bool `type:"boolean"` // The offering type filter value. Specify this parameter to show only the available @@ -24412,6 +26173,12 @@ func (s *DescribeReservedDBInstancesInput) SetFilters(v []*Filter) *DescribeRese return s } +// SetLeaseId sets the LeaseId field's value. +func (s *DescribeReservedDBInstancesInput) SetLeaseId(v string) *DescribeReservedDBInstancesInput { + s.LeaseId = &v + return s +} + // SetMarker sets the Marker field's value. func (s *DescribeReservedDBInstancesInput) SetMarker(v string) *DescribeReservedDBInstancesInput { s.Marker = &v @@ -24467,7 +26234,7 @@ type DescribeReservedDBInstancesOfferingsInput struct { // Valid Values: 1 | 3 | 31536000 | 94608000 Duration *string `type:"string"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous request. If this parameter @@ -24477,15 +26244,15 @@ type DescribeReservedDBInstancesOfferingsInput struct { // The maximum number of records to include in the response. If more than the // MaxRecords value is available, a pagination token called a marker is included - // in the response so that the following results can be retrieved. + // in the response so you can retrieve the remaining results. // // Default: 100 // // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The Multi-AZ filter value. Specify this parameter to show only the available - // offerings matching the specified Multi-AZ parameter. + // A value that indicates whether to show only those reservations that support + // Multi-AZ. MultiAZ *bool `type:"boolean"` // The offering type filter value. Specify this parameter to show only the available @@ -24666,7 +26433,7 @@ func (s *DescribeReservedDBInstancesOutput) SetReservedDBInstances(v []*Reserved type DescribeSourceRegionsInput struct { _ struct{} `type:"structure"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeSourceRegions @@ -24676,7 +26443,7 @@ type DescribeSourceRegionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so you can retrieve the remaining results. // // Default: 100 // @@ -24965,8 +26732,8 @@ type DownloadDBLogFilePortionInput struct { // is returned up to a maximum of 10000 lines, starting with the most recent // log entries first. // - // * If NumberOfLines is specified and Marker is not specified, then the - // most recent lines from the end of the log file are returned. + // * If NumberOfLines is specified and Marker isn't specified, then the most + // recent lines from the end of the log file are returned. // // * If Marker is specified as "0", then the specified number of lines from // the beginning of the log file are returned. @@ -25452,7 +27219,7 @@ func (s *EventSubscription) SetSubscriptionCreationTime(v string) *EventSubscrip type FailoverDBClusterInput struct { _ struct{} `type:"structure"` - // A DB cluster identifier to force a failover for. This parameter is not case-sensitive. + // A DB cluster identifier to force a failover for. This parameter isn't case-sensitive. // // Constraints: // @@ -25788,14 +27555,342 @@ func (s *IPRange) SetStatus(v string) *IPRange { return s } +type ImportInstallationMediaInput struct { + _ struct{} `type:"structure"` + + // The identifier of the custom Availability Zone (AZ) to import the installation + // media to. + // + // CustomAvailabilityZoneId is a required field + CustomAvailabilityZoneId *string `type:"string" required:"true"` + + // The name of the database engine to be used for this instance. + // + // The list only includes supported DB engines that require an on-premises customer + // provided license. + // + // Valid Values: + // + // * sqlserver-ee + // + // * sqlserver-se + // + // * sqlserver-ex + // + // * sqlserver-web + // + // Engine is a required field + Engine *string `type:"string" required:"true"` + + // The path to the installation medium for the specified DB engine. + // + // Example: SQLServerISO/en_sql_server_2016_enterprise_x64_dvd_8701793.iso + // + // EngineInstallationMediaPath is a required field + EngineInstallationMediaPath *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // For a list of valid engine versions, call DescribeDBEngineVersions. + // + // The following are the database engines and links to information about the + // major and minor versions. The list only includes DB engines that require + // an on-premises customer provided license. + // + // Microsoft SQL Server + // + // See Version and Feature Support on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.FeatureSupport) + // in the Amazon RDS User Guide. + // + // EngineVersion is a required field + EngineVersion *string `type:"string" required:"true"` + + // The path to the installation medium for the operating system associated with + // the specified DB engine. + // + // Example: WindowsISO/en_windows_server_2016_x64_dvd_9327751.iso + // + // OSInstallationMediaPath is a required field + OSInstallationMediaPath *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ImportInstallationMediaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstallationMediaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportInstallationMediaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportInstallationMediaInput"} + if s.CustomAvailabilityZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAvailabilityZoneId")) + } + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) + } + if s.EngineInstallationMediaPath == nil { + invalidParams.Add(request.NewErrParamRequired("EngineInstallationMediaPath")) + } + if s.EngineVersion == nil { + invalidParams.Add(request.NewErrParamRequired("EngineVersion")) + } + if s.OSInstallationMediaPath == nil { + invalidParams.Add(request.NewErrParamRequired("OSInstallationMediaPath")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *ImportInstallationMediaInput) SetCustomAvailabilityZoneId(v string) *ImportInstallationMediaInput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *ImportInstallationMediaInput) SetEngine(v string) *ImportInstallationMediaInput { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *ImportInstallationMediaInput) SetEngineInstallationMediaPath(v string) *ImportInstallationMediaInput { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *ImportInstallationMediaInput) SetEngineVersion(v string) *ImportInstallationMediaInput { + s.EngineVersion = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *ImportInstallationMediaInput) SetOSInstallationMediaPath(v string) *ImportInstallationMediaInput { + s.OSInstallationMediaPath = &v + return s +} + +// Contains the installation media for a DB engine that requires an on-premises +// customer provided license, such as Microsoft SQL Server. +type ImportInstallationMediaOutput struct { + _ struct{} `type:"structure"` + + // The custom Availability Zone (AZ) that contains the installation media. + CustomAvailabilityZoneId *string `type:"string"` + + // The DB engine. + Engine *string `type:"string"` + + // The path to the installation medium for the DB engine. + EngineInstallationMediaPath *string `type:"string"` + + // The engine version of the DB engine. + EngineVersion *string `type:"string"` + + // If an installation media failure occurred, the cause of the failure. + FailureCause *InstallationMediaFailureCause `type:"structure"` + + // The installation medium ID. + InstallationMediaId *string `type:"string"` + + // The path to the installation medium for the operating system associated with + // the DB engine. + OSInstallationMediaPath *string `type:"string"` + + // The status of the installation medium. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ImportInstallationMediaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstallationMediaOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *ImportInstallationMediaOutput) SetCustomAvailabilityZoneId(v string) *ImportInstallationMediaOutput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *ImportInstallationMediaOutput) SetEngine(v string) *ImportInstallationMediaOutput { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *ImportInstallationMediaOutput) SetEngineInstallationMediaPath(v string) *ImportInstallationMediaOutput { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *ImportInstallationMediaOutput) SetEngineVersion(v string) *ImportInstallationMediaOutput { + s.EngineVersion = &v + return s +} + +// SetFailureCause sets the FailureCause field's value. +func (s *ImportInstallationMediaOutput) SetFailureCause(v *InstallationMediaFailureCause) *ImportInstallationMediaOutput { + s.FailureCause = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *ImportInstallationMediaOutput) SetInstallationMediaId(v string) *ImportInstallationMediaOutput { + s.InstallationMediaId = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *ImportInstallationMediaOutput) SetOSInstallationMediaPath(v string) *ImportInstallationMediaOutput { + s.OSInstallationMediaPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ImportInstallationMediaOutput) SetStatus(v string) *ImportInstallationMediaOutput { + s.Status = &v + return s +} + +// Contains the installation media for a DB engine that requires an on-premises +// customer provided license, such as Microsoft SQL Server. +type InstallationMedia struct { + _ struct{} `type:"structure"` + + // The custom Availability Zone (AZ) that contains the installation media. + CustomAvailabilityZoneId *string `type:"string"` + + // The DB engine. + Engine *string `type:"string"` + + // The path to the installation medium for the DB engine. + EngineInstallationMediaPath *string `type:"string"` + + // The engine version of the DB engine. + EngineVersion *string `type:"string"` + + // If an installation media failure occurred, the cause of the failure. + FailureCause *InstallationMediaFailureCause `type:"structure"` + + // The installation medium ID. + InstallationMediaId *string `type:"string"` + + // The path to the installation medium for the operating system associated with + // the DB engine. + OSInstallationMediaPath *string `type:"string"` + + // The status of the installation medium. + Status *string `type:"string"` +} + +// String returns the string representation +func (s InstallationMedia) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstallationMedia) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *InstallationMedia) SetCustomAvailabilityZoneId(v string) *InstallationMedia { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *InstallationMedia) SetEngine(v string) *InstallationMedia { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *InstallationMedia) SetEngineInstallationMediaPath(v string) *InstallationMedia { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *InstallationMedia) SetEngineVersion(v string) *InstallationMedia { + s.EngineVersion = &v + return s +} + +// SetFailureCause sets the FailureCause field's value. +func (s *InstallationMedia) SetFailureCause(v *InstallationMediaFailureCause) *InstallationMedia { + s.FailureCause = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *InstallationMedia) SetInstallationMediaId(v string) *InstallationMedia { + s.InstallationMediaId = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *InstallationMedia) SetOSInstallationMediaPath(v string) *InstallationMedia { + s.OSInstallationMediaPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstallationMedia) SetStatus(v string) *InstallationMedia { + s.Status = &v + return s +} + +// Contains the cause of an installation media failure. Installation media is +// used for a DB engine that requires an on-premises customer provided license, +// such as Microsoft SQL Server. +type InstallationMediaFailureCause struct { + _ struct{} `type:"structure"` + + // The reason that an installation media import failed. + Message *string `type:"string"` +} + +// String returns the string representation +func (s InstallationMediaFailureCause) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstallationMediaFailureCause) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *InstallationMediaFailureCause) SetMessage(v string) *InstallationMediaFailureCause { + s.Message = &v + return s +} + type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // This parameter is not currently supported. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // The Amazon RDS resource with tags to be listed. This value is an Amazon Resource - // Name (ARN). For information about creating an ARN, see Constructing an ARN + // Name (ARN). For information about creating an ARN, see Constructing an ARN // for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon RDS User Guide. // @@ -25915,11 +28010,11 @@ type ModifyCurrentDBClusterCapacityInput struct { // // Constraints: // - // * Value must be 2, 4, 8, 16, 32, 64, 128, or 256. + // * Value must be 1, 2, 4, 8, 16, 32, 64, 128, or 256. Capacity *int64 `type:"integer"` // The DB cluster identifier for the cluster being modified. This parameter - // is not case-sensitive. + // isn't case-sensitive. // // Constraints: // @@ -25941,8 +28036,8 @@ type ModifyCurrentDBClusterCapacityInput struct { // ForceApplyCapacityChange, the default, sets the capacity to the specified // value as soon as possible. // - // RollbackCapacityChange ignores the capacity change if a scaling point is - // not found in the timeout period. + // RollbackCapacityChange ignores the capacity change if a scaling point isn't + // found in the timeout period. TimeoutAction *string `type:"string"` } @@ -26064,7 +28159,7 @@ type ModifyDBClusterEndpointInput struct { // DBClusterEndpointIdentifier is a required field DBClusterEndpointIdentifier *string `type:"string" required:"true"` - // The type of the endpoint. One of: READER, ANY. + // The type of the endpoint. One of: READER, WRITER, ANY. EndpointType *string `type:"string"` // List of DB instance identifiers that aren't part of the custom endpoint group. @@ -26140,7 +28235,7 @@ func (s *ModifyDBClusterEndpointInput) SetStaticMembers(v []*string) *ModifyDBCl type ModifyDBClusterEndpointOutput struct { _ struct{} `type:"structure"` - // The type associated with a custom endpoint. One of: READER, ANY. + // The type associated with a custom endpoint. One of: READER, WRITER, ANY. CustomEndpointType *string `type:"string"` // The Amazon Resource Name (ARN) for the endpoint. @@ -26250,20 +28345,27 @@ func (s *ModifyDBClusterEndpointOutput) SetStatus(v string) *ModifyDBClusterEndp type ModifyDBClusterInput struct { _ struct{} `type:"structure"` - // A value that specifies whether the modifications in this request and any + // A value that indicates whether major version upgrades are allowed. + // + // Constraints: You must allow major version upgrades when specifying a value + // for the EngineVersion parameter that is a different major version than the + // DB cluster's current version. + AllowMajorVersionUpgrade *bool `type:"boolean"` + + // A value that indicates whether the modifications in this request and any // pending modifications are asynchronously applied as soon as possible, regardless // of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter - // is set to false, changes to the DB cluster are applied during the next maintenance + // is disabled, changes to the DB cluster are applied during the next maintenance // window. // // The ApplyImmediately parameter only affects the EnableIAMDatabaseAuthentication, - // MasterUserPassword, and NewDBClusterIdentifier values. If you set the ApplyImmediately - // parameter value to false, then changes to the EnableIAMDatabaseAuthentication, + // MasterUserPassword, and NewDBClusterIdentifier values. If the ApplyImmediately + // parameter is disabled, then changes to the EnableIAMDatabaseAuthentication, // MasterUserPassword, and NewDBClusterIdentifier values are applied during // the next maintenance window. All other changes are applied immediately, regardless // of the value of the ApplyImmediately parameter. // - // Default: false + // By default, this parameter is disabled. ApplyImmediately *bool `type:"boolean"` // The target backtrack window, in seconds. To disable backtracking, set this @@ -26291,16 +28393,15 @@ type ModifyDBClusterInput struct { // Logs for a specific DB cluster. CloudwatchLogsExportConfiguration *CloudwatchLogsExportConfiguration `type:"structure"` - // True to copy all tags from the DB cluster to snapshots of the DB cluster, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the DB cluster to snapshots + // of the DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The DB cluster identifier for the cluster being modified. This parameter - // is not case-sensitive. - // - // Constraints: + // isn't case-sensitive. // - // * Must match the identifier of an existing DBCluster. + // Constraints: This identifier must match the identifier of an existing DB + // cluster. // // DBClusterIdentifier is a required field DBClusterIdentifier *string `type:"string" required:"true"` @@ -26308,13 +28409,28 @@ type ModifyDBClusterInput struct { // The name of the DB cluster parameter group to use for the DB cluster. DBClusterParameterGroupName *string `type:"string"` - // Indicates if the DB cluster has deletion protection enabled. The database - // can't be deleted when this value is set to true. + // The name of the DB parameter group to apply to all instances of the DB cluster. + // + // When you apply a parameter group using the DBInstanceParameterGroupName parameter, + // the DB cluster isn't rebooted automatically. Also, parameter changes aren't + // applied during the next maintenance window but instead are applied immediately. + // + // Default: The existing name setting + // + // Constraints: + // + // * The DB parameter group must be in the same DB parameter group family + // as this DB cluster. + // + // * The DBInstanceParameterGroupName parameter is only valid in combination + // with the AllowMajorVersionUpgrade parameter. + DBInstanceParameterGroupName *string `type:"string"` + + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` - // HTTP endpoint functionality is in beta for Aurora Serverless and is subject - // to change. - // // A value that indicates whether to enable the HTTP endpoint for an Aurora // Serverless DB cluster. By default, the HTTP endpoint is disabled. // @@ -26322,22 +28438,35 @@ type ModifyDBClusterInput struct { // for running SQL queries on the Aurora Serverless DB cluster. You can also // query your database from inside the RDS console with the query editor. // - // For more information about Aurora Serverless, see Using Amazon Aurora Serverless - // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) + // For more information, see Using the Data API for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) // in the Amazon Aurora User Guide. EnableHttpEndpoint *bool `type:"boolean"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // - // Default: false + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The version number of the database engine to which you want to upgrade. Changing // this parameter results in an outage. The change is applied during the next - // maintenance window unless the ApplyImmediately parameter is set to true. + // maintenance window unless ApplyImmediately is enabled. + // + // To list all of the available engine versions for aurora (for MySQL 5.6-compatible + // Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-mysql (for MySQL + // 5.7-compatible Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-postgresql, use the + // following command: // - // For a list of valid engine versions, see CreateDBCluster, or call DescribeDBEngineVersions. + // aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" EngineVersion *string `type:"string"` // The new password for the master database user. This password can contain @@ -26363,10 +28492,10 @@ type ModifyDBClusterInput struct { // A value that indicates that the DB cluster should be associated with the // specified option group. Changing this parameter doesn't result in an outage // except in the following case, and the change is applied during the next maintenance - // window unless the ApplyImmediately parameter is set to true for this request. - // If the parameter change results in an option group that enables OEM, this - // change can cause a brief (sub-second) period during which new connections - // are rejected but existing connections are not interrupted. + // window unless the ApplyImmediately is enabled for this request. If the parameter + // change results in an option group that enables OEM, this change can cause + // a brief (sub-second) period during which new connections are rejected but + // existing connections are not interrupted. // // Permanent options can't be removed from an option group. The option group // can't be removed from a DB cluster once it is associated with a DB cluster. @@ -26383,7 +28512,7 @@ type ModifyDBClusterInput struct { // backups are enabled, using the BackupRetentionPeriod parameter. // // The default is a 30-minute window selected at random from an 8-hour block - // of time for each AWS Region. To see the time blocks available, see Adjusting + // of time for each AWS Region. To see the time blocks available, see Adjusting // the Preferred DB Cluster Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. // @@ -26405,7 +28534,7 @@ type ModifyDBClusterInput struct { // // The default is a 30-minute window selected at random from an 8-hour block // of time for each AWS Region, occurring on a random day of the week. To see - // the time blocks available, see Adjusting the Preferred DB Cluster Maintenance + // the time blocks available, see Adjusting the Preferred DB Cluster Maintenance // Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. // @@ -26445,6 +28574,12 @@ func (s *ModifyDBClusterInput) Validate() error { return nil } +// SetAllowMajorVersionUpgrade sets the AllowMajorVersionUpgrade field's value. +func (s *ModifyDBClusterInput) SetAllowMajorVersionUpgrade(v bool) *ModifyDBClusterInput { + s.AllowMajorVersionUpgrade = &v + return s +} + // SetApplyImmediately sets the ApplyImmediately field's value. func (s *ModifyDBClusterInput) SetApplyImmediately(v bool) *ModifyDBClusterInput { s.ApplyImmediately = &v @@ -26487,6 +28622,12 @@ func (s *ModifyDBClusterInput) SetDBClusterParameterGroupName(v string) *ModifyD return s } +// SetDBInstanceParameterGroupName sets the DBInstanceParameterGroupName field's value. +func (s *ModifyDBClusterInput) SetDBInstanceParameterGroupName(v string) *ModifyDBClusterInput { + s.DBInstanceParameterGroupName = &v + return s +} + // SetDeletionProtection sets the DeletionProtection field's value. func (s *ModifyDBClusterInput) SetDeletionProtection(v bool) *ModifyDBClusterInput { s.DeletionProtection = &v @@ -26766,36 +28907,34 @@ type ModifyDBInstanceInput struct { // For the valid values for allocated storage for each engine, see CreateDBInstance. AllocatedStorage *int64 `type:"integer"` - // Indicates that major version upgrades are allowed. Changing this parameter - // doesn't result in an outage and the change is asynchronously applied as soon - // as possible. + // A value that indicates whether major version upgrades are allowed. Changing + // this parameter doesn't result in an outage and the change is asynchronously + // applied as soon as possible. // - // Constraints: This parameter must be set to true when specifying a value for - // the EngineVersion parameter that is a different major version than the DB - // instance's current version. + // Constraints: Major version upgrades must be allowed when specifying a value + // for the EngineVersion parameter that is a different major version than the + // DB instance's current version. AllowMajorVersionUpgrade *bool `type:"boolean"` - // Specifies whether the modifications in this request and any pending modifications - // are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow - // setting for the DB instance. - // - // If this parameter is set to false, changes to the DB instance are applied - // during the next maintenance window. Some parameter changes can cause an outage - // and are applied on the next call to RebootDBInstance, or the next failure - // reboot. Review the table of parameters in Modifying a DB Instance and Using - // the Apply Immediately Parameter (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) - // in the Amazon RDS User Guide. to see the impact that setting ApplyImmediately - // to true or false has for each modified parameter and to determine when the - // changes are applied. - // - // Default: false + // A value that indicates whether the modifications in this request and any + // pending modifications are asynchronously applied as soon as possible, regardless + // of the PreferredMaintenanceWindow setting for the DB instance. By default, + // this parameter is disabled. + // + // If this parameter is disabled, changes to the DB instance are applied during + // the next maintenance window. Some parameter changes can cause an outage and + // are applied on the next call to RebootDBInstance, or the next failure reboot. + // Review the table of parameters in Modifying a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) + // in the Amazon RDS User Guide. to see the impact of enabling or disabling + // ApplyImmediately for each modified parameter and to determine when the changes + // are applied. ApplyImmediately *bool `type:"boolean"` - // Indicates that minor version upgrades are applied automatically to the DB - // instance during the maintenance window. Changing this parameter doesn't result - // in an outage except in the following case and the change is asynchronously - // applied as soon as possible. An outage will result if this parameter is set - // to true during the maintenance window, and a newer minor version is available, + // A value that indicates whether minor version upgrades are applied automatically + // to the DB instance during the maintenance window. Changing this parameter + // doesn't result in an outage except in the following case and the change is + // asynchronously applied as soon as possible. An outage results if this parameter + // is enabled during the maintenance window, and a newer minor version is available, // and RDS has enabled auto patching for that engine version. AutoMinorVersionUpgrade *bool `type:"boolean"` @@ -26805,10 +28944,9 @@ type ModifyDBInstanceInput struct { // // Changing this parameter can result in an outage if you change from 0 to a // non-zero value or from a non-zero value to 0. These changes are applied during - // the next maintenance window unless the ApplyImmediately parameter is set - // to true for this request. If you change the parameter from one non-zero value - // to another non-zero value, the change is asynchronously applied as soon as - // possible. + // the next maintenance window unless the ApplyImmediately parameter is enabled + // for this request. If you change the parameter from one non-zero value to + // another non-zero value, the change is asynchronously applied as soon as possible. // // Amazon Aurora // @@ -26841,8 +28979,8 @@ type ModifyDBInstanceInput struct { // has no effect. CloudwatchLogsExportConfiguration *CloudwatchLogsExportConfiguration `type:"structure"` - // True to copy all tags from the DB instance to snapshots of the DB instance, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the DB instance to snapshots + // of the DB instance. By default, tags are not copied. // // Amazon Aurora // @@ -26859,7 +28997,7 @@ type ModifyDBInstanceInput struct { // // If you modify the DB instance class, an outage occurs during the change. // The change is applied during the next maintenance window, unless ApplyImmediately - // is specified as true for this request. + // is enabled for this request. // // Default: Uses existing setting DBInstanceClass *string `type:"string"` @@ -26876,8 +29014,8 @@ type ModifyDBInstanceInput struct { // The name of the DB parameter group to apply to the DB instance. Changing // this setting doesn't result in an outage. The parameter group name itself // is changed immediately, but the actual parameter changes are not applied - // until you reboot the instance without failover. The db instance will NOT - // be rebooted automatically and the parameter changes will NOT be applied during + // until you reboot the instance without failover. In this case, the DB instance + // isn't rebooted automatically and the parameter changes isn't applied during // the next maintenance window. // // Default: Uses existing setting @@ -26944,54 +29082,56 @@ type ModifyDBInstanceInput struct { DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` // The new DB subnet group for the DB instance. You can use this parameter to - // move your DB instance to a different VPC. If your DB instance is not in a + // move your DB instance to a different VPC. If your DB instance isn't in a // VPC, you can also use this parameter to move your DB instance into a VPC. // For more information, see Updating the VPC for a DB Instance (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html#USER_VPC.Non-VPC2VPC) // in the Amazon RDS User Guide. // // Changing the subnet group causes an outage during the change. The change - // is applied during the next maintenance window, unless you specify true for - // the ApplyImmediately parameter. + // is applied during the next maintenance window, unless you enable ApplyImmediately. // // Constraints: If supplied, must match the name of an existing DBSubnetGroup. // // Example: mySubnetGroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance has deletion protection enabled. The database - // can't be deleted when this value is set to true. For more information, see - // Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` - // The Active Directory Domain to move the instance to. Specify none to remove - // the instance from its current domain. The domain must be created prior to - // this operation. Currently only a Microsoft SQL Server instance can be created - // in a Active Directory Domain. + // The Active Directory directory ID to move the DB instance to. Specify none + // to remove the instance from its current domain. The domain must be created + // prior to this operation. Currently, only Microsoft SQL Server and Oracle + // DB instances can be created in an Active Directory Domain. + // + // For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication + // to authenticate users that connect to the DB instance. For more information, + // see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft + // SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_SQLServerWinAuth.html) + // in the Amazon RDS User Guide. + // + // For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate + // users that connect to the DB instance. For more information, see Using Kerberos + // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // in the Amazon RDS User Guide. Domain *string `type:"string"` // The name of the IAM role to use when making API calls to the Directory Service. DomainIAMRoleName *string `type:"string"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // You can enable IAM database authentication for the following database engines + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. // - // Amazon Aurora - // - // Not applicable. Mapping AWS IAM accounts to database accounts is managed - // by the DB cluster. For more information, see ModifyDBCluster. - // - // MySQL - // - // * For MySQL 5.6, minor version 5.6.34 or higher - // - // * For MySQL 5.7, minor version 5.7.16 or higher - // - // Default: false + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // True to enable Performance Insights for the DB instance, and otherwise false. + // A value that indicates whether to enable Performance Insights for the DB + // instance. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon Relational Database Service User Guide. @@ -26999,7 +29139,7 @@ type ModifyDBInstanceInput struct { // The version number of the database engine to upgrade to. Changing this parameter // results in an outage and the change is applied during the next maintenance - // window unless the ApplyImmediately parameter is set to true for this request. + // window unless the ApplyImmediately parameter is eanbled for this request. // // For major version upgrades, if a nondefault DB parameter group is currently // in use, a new DB parameter group in the DB parameter group family for the @@ -27014,9 +29154,9 @@ type ModifyDBInstanceInput struct { // // Changing this setting doesn't result in an outage and the change is applied // during the next maintenance window unless the ApplyImmediately parameter - // is set to true for this request. If you are migrating from Provisioned IOPS - // to standard storage, set this value to 0. The DB instance will require a - // reboot for the change in storage type to take effect. + // is enabled for this request. If you are migrating from Provisioned IOPS to + // standard storage, set this value to 0. The DB instance will require a reboot + // for the change in storage type to take effect. // // If you choose to migrate your DB instance from using standard storage to // using Provisioned IOPS, or from using Provisioned IOPS to using standard @@ -27084,6 +29224,10 @@ type ModifyDBInstanceInput struct { // This includes restoring privileges that might have been accidentally revoked. MasterUserPassword *string `type:"string"` + // The upper limit to which Amazon RDS can automatically scale the storage of + // the DB instance. + MaxAllocatedStorage *int64 `type:"integer"` + // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the DB instance. To disable collecting Enhanced Monitoring // metrics, specify 0. The default is 0. @@ -27104,16 +29248,17 @@ type ModifyDBInstanceInput struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` - // Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter - // doesn't result in an outage and the change is applied during the next maintenance - // window unless the ApplyImmediately parameter is set to true for this request. + // A value that indicates whether the DB instance is a Multi-AZ deployment. + // Changing this parameter doesn't result in an outage and the change is applied + // during the next maintenance window unless the ApplyImmediately parameter + // is enabled for this request. MultiAZ *bool `type:"boolean"` // The new DB instance identifier for the DB instance when renaming a DB instance. - // When you change the DB instance identifier, an instance reboot will occur - // immediately if you set Apply Immediately to true, or will occur during the - // next maintenance window if Apply Immediately to false. This value is stored - // as a lowercase string. + // When you change the DB instance identifier, an instance reboot occurs immediately + // if you enable ApplyImmediately, or will occur during the next maintenance + // window if you disable Apply Immediately. This value is stored as a lowercase + // string. // // Constraints: // @@ -27129,8 +29274,8 @@ type ModifyDBInstanceInput struct { // Indicates that the DB instance should be associated with the specified option // group. Changing this parameter doesn't result in an outage except in the // following case and the change is applied during the next maintenance window - // unless the ApplyImmediately parameter is set to true for this request. If - // the parameter change results in an option group that enables OEM, this change + // unless the ApplyImmediately parameter is enabled for this request. If the + // parameter change results in an option group that enables OEM, this change // can cause a brief (sub-second) period during which new connections are rejected // but existing connections are not interrupted. // @@ -27142,6 +29287,11 @@ type ModifyDBInstanceInput struct { // The AWS KMS key identifier for encryption of Performance Insights data. The // KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the // KMS key alias for the KMS encryption key. + // + // If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + // RDS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. PerformanceInsightsKMSKeyId *string `type:"string"` // The amount of time, in days, to retain Performance Insights data. Valid values @@ -27193,7 +29343,7 @@ type ModifyDBInstanceInput struct { // A value that specifies the order in which an Aurora Replica is promoted to // the primary instance after a failure of the existing primary instance. For - // more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) + // more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) // in the Amazon Aurora User Guide. // // Default: 1 @@ -27201,20 +29351,18 @@ type ModifyDBInstanceInput struct { // Valid Values: 0 - 15 PromotionTier *int64 `type:"integer"` - // Boolean value that indicates if the DB instance has a publicly resolvable - // DNS name. Set to True to make the DB instance Internet-facing with a publicly - // resolvable DNS name, which resolves to a public IP address. Set to False - // to make the DB instance internal with a DNS name that resolves to a private - // IP address. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance isn't publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. // // PubliclyAccessible only applies to DB instances in a VPC. The DB instance - // must be part of a public subnet and PubliclyAccessible must be true in order - // for it to be publicly accessible. + // must be part of a public subnet and PubliclyAccessible must be enabled for + // it to be publicly accessible. // // Changes to the PubliclyAccessible parameter are applied immediately regardless // of the value of the ApplyImmediately parameter. - // - // Default: false PubliclyAccessible *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -27237,7 +29385,7 @@ type ModifyDBInstanceInput struct { // // Valid values: standard | gp2 | io1 // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` // The ARN from the key store with which to associate the instance for TDE encryption. @@ -27247,7 +29395,7 @@ type ModifyDBInstanceInput struct { // device. TdeCredentialPassword *string `type:"string"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` @@ -27426,6 +29574,12 @@ func (s *ModifyDBInstanceInput) SetMasterUserPassword(v string) *ModifyDBInstanc return s } +// SetMaxAllocatedStorage sets the MaxAllocatedStorage field's value. +func (s *ModifyDBInstanceInput) SetMaxAllocatedStorage(v int64) *ModifyDBInstanceInput { + s.MaxAllocatedStorage = &v + return s +} + // SetMonitoringInterval sets the MonitoringInterval field's value. func (s *ModifyDBInstanceInput) SetMonitoringInterval(v int64) *ModifyDBInstanceInput { s.MonitoringInterval = &v @@ -27930,7 +30084,7 @@ func (s *ModifyDBSubnetGroupOutput) SetDBSubnetGroup(v *DBSubnetGroup) *ModifyDB type ModifyEventSubscriptionInput struct { _ struct{} `type:"structure"` - // A Boolean value; set to true to activate the subscription. + // A value that indicates whether to activate the subscription. Enabled *bool `type:"boolean"` // A list of event categories for a SourceType that you want to subscribe to. @@ -27947,7 +30101,7 @@ type ModifyEventSubscriptionInput struct { // The type of source that is generating the events. For example, if you want // to be notified of events generated by a DB instance, you would set this parameter - // to db-instance. if this value is not specified, all events are returned. + // to db-instance. If this value isn't specified, all events are returned. // // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot SourceType *string `type:"string"` @@ -28039,11 +30193,12 @@ type ModifyGlobalClusterInput struct { _ struct{} `type:"structure"` // Indicates if the global database cluster has deletion protection enabled. - // The global database cluster can't be deleted when this value is set to true. + // The global database cluster can't be deleted when deletion protection is + // enabled. DeletionProtection *bool `type:"boolean"` // The DB cluster identifier for the global cluster being modified. This parameter - // is not case-sensitive. + // isn't case-sensitive. // // Constraints: // @@ -28119,8 +30274,9 @@ func (s *ModifyGlobalClusterOutput) SetGlobalCluster(v *GlobalCluster) *ModifyGl type ModifyOptionGroupInput struct { _ struct{} `type:"structure"` - // Indicates whether the changes should be applied immediately, or during the - // next maintenance window for each instance associated with the option group. + // A value that indicates whether to apply the change immediately or during + // the next maintenance window for each instance associated with the option + // group. ApplyImmediately *bool `type:"boolean"` // The name of the option group to be modified. @@ -28988,9 +31144,16 @@ type OrderableDBInstanceOption struct { // Indicates whether a DB instance supports provisioned IOPS. SupportsIops *bool `type:"boolean"` + // Whether a DB instance supports Kerberos Authentication. + SupportsKerberosAuthentication *bool `type:"boolean"` + // True if a DB instance supports Performance Insights, otherwise false. SupportsPerformanceInsights *bool `type:"boolean"` + // Whether or not Amazon RDS can automatically scale storage for DB instances + // that use the specified instance class. + SupportsStorageAutoscaling *bool `type:"boolean"` + // Indicates whether a DB instance supports encrypted storage. SupportsStorageEncryption *bool `type:"boolean"` @@ -29122,12 +31285,24 @@ func (s *OrderableDBInstanceOption) SetSupportsIops(v bool) *OrderableDBInstance return s } +// SetSupportsKerberosAuthentication sets the SupportsKerberosAuthentication field's value. +func (s *OrderableDBInstanceOption) SetSupportsKerberosAuthentication(v bool) *OrderableDBInstanceOption { + s.SupportsKerberosAuthentication = &v + return s +} + // SetSupportsPerformanceInsights sets the SupportsPerformanceInsights field's value. func (s *OrderableDBInstanceOption) SetSupportsPerformanceInsights(v bool) *OrderableDBInstanceOption { s.SupportsPerformanceInsights = &v return s } +// SetSupportsStorageAutoscaling sets the SupportsStorageAutoscaling field's value. +func (s *OrderableDBInstanceOption) SetSupportsStorageAutoscaling(v bool) *OrderableDBInstanceOption { + s.SupportsStorageAutoscaling = &v + return s +} + // SetSupportsStorageEncryption sets the SupportsStorageEncryption field's value. func (s *OrderableDBInstanceOption) SetSupportsStorageEncryption(v bool) *OrderableDBInstanceOption { s.SupportsStorageEncryption = &v @@ -29306,8 +31481,7 @@ type PendingMaintenanceAction struct { // The date of the maintenance window when the action is applied. The maintenance // action is applied to the resource during its first maintenance window after - // this date. If this date is specified, any next-maintenance opt-in requests - // are ignored. + // this date. AutoAppliedAfterDate *time.Time `type:"timestamp"` // The effective date when the pending maintenance action is applied to the @@ -29322,8 +31496,7 @@ type PendingMaintenanceAction struct { // The date when the maintenance action is automatically applied. The maintenance // action is applied to the resource on this date regardless of the maintenance - // window for the resource. If this date is specified, any immediate opt-in - // requests are ignored. + // window for the resource. ForcedApplyDate *time.Time `type:"timestamp"` // Indicates the type of opt-in request that has been received for the resource. @@ -29606,7 +31779,7 @@ type PromoteReadReplicaDBClusterInput struct { _ struct{} `type:"structure"` // The identifier of the DB cluster Read Replica to promote. This parameter - // is not case-sensitive. + // isn't case-sensitive. // // Constraints: // @@ -29676,15 +31849,17 @@ func (s *PromoteReadReplicaDBClusterOutput) SetDBCluster(v *DBCluster) *PromoteR type PromoteReadReplicaInput struct { _ struct{} `type:"structure"` - // The number of days to retain automated backups. Setting this parameter to - // a positive number enables backups. Setting this parameter to 0 disables automated - // backups. + // The number of days for which automated backups are retained. Setting this + // parameter to a positive number enables backups. Setting this parameter to + // 0 disables automated backups. // // Default: 1 // // Constraints: // - // * Must be a value from 0 to 8 + // * Must be a value from 0 to 35. + // + // * Can't be set to 0 if the DB instance is a source to Read Replicas. BackupRetentionPeriod *int64 `type:"integer"` // The DB instance identifier. This value is stored as a lowercase string. @@ -29702,7 +31877,7 @@ type PromoteReadReplicaInput struct { // backups are enabled, using the BackupRetentionPeriod parameter. // // The default is a 30-minute window selected at random from an 8-hour block - // of time for each AWS Region. To see the time blocks available, see Adjusting + // of time for each AWS Region. To see the time blocks available, see Adjusting // the Preferred Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) // in the Amazon RDS User Guide. // @@ -29937,10 +32112,11 @@ type RebootDBInstanceInput struct { // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` - // When true, the reboot is conducted through a MultiAZ failover. + // A value that indicates whether the reboot is conducted through a Multi-AZ + // failover. // - // Constraint: You can't specify true if the instance is not configured for - // MultiAZ. + // Constraint: You can't enable force failover if the instance isn't configured + // for Multi-AZ. ForceFailover *bool `type:"boolean"` } @@ -30335,7 +32511,7 @@ type RemoveTagsFromResourceInput struct { _ struct{} `type:"structure"` // The Amazon RDS resource that the tags are removed from. This value is an - // Amazon Resource Name (ARN). For information about creating an ARN, see Constructing + // Amazon Resource Name (ARN). For information about creating an ARN, see Constructing // an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon RDS User Guide. // @@ -30420,6 +32596,12 @@ type ReservedDBInstance struct { // The fixed price charged for this reserved DB instance. FixedPrice *float64 `type:"double"` + // The unique identifier for the lease associated with the reserved DB instance. + // + // AWS Support might request the lease ID for an issue related to a reserved + // DB instance. + LeaseId *string `type:"string"` + // Indicates if the reservation applies to Multi-AZ deployments. MultiAZ *bool `type:"boolean"` @@ -30491,6 +32673,12 @@ func (s *ReservedDBInstance) SetFixedPrice(v float64) *ReservedDBInstance { return s } +// SetLeaseId sets the LeaseId field's value. +func (s *ReservedDBInstance) SetLeaseId(v string) *ReservedDBInstance { + s.LeaseId = &v + return s +} + // SetMultiAZ sets the MultiAZ field's value. func (s *ReservedDBInstance) SetMultiAZ(v bool) *ReservedDBInstance { s.MultiAZ = &v @@ -30667,12 +32855,12 @@ type ResetDBClusterParameterGroupInput struct { // A list of parameter names in the DB cluster parameter group to reset to the // default values. You can't use this parameter if the ResetAllParameters parameter - // is set to true. + // is enabled. Parameters []*Parameter `locationNameList:"Parameter" type:"list"` - // A value that is set to true to reset all parameters in the DB cluster parameter - // group to their default values, and false otherwise. You can't use this parameter - // if there is a list of parameter names specified for the Parameters parameter. + // A value that indicates whether to reset all parameters in the DB cluster + // parameter group to their default values. You can't use this parameter if + // there is a list of parameter names specified for the Parameters parameter. ResetAllParameters *bool `type:"boolean"` } @@ -30755,10 +32943,9 @@ type ResetDBParameterGroupInput struct { // Valid Values (for Apply method): pending-reboot Parameters []*Parameter `locationNameList:"Parameter" type:"list"` - // Specifies whether (true) or not (false) to reset all parameters in the DB - // parameter group to default values. - // - // Default: true + // A value that indicates whether to reset all parameters in the DB parameter + // group to default values. By default, all parameters in the DB parameter group + // are reset to default values. ResetAllParameters *bool `type:"boolean"` } @@ -30840,8 +33027,8 @@ func (s *ResourcePendingMaintenanceActions) SetResourceIdentifier(v string) *Res type RestoreDBClusterFromS3Input struct { _ struct{} `type:"structure"` - // A list of EC2 Availability Zones that instances in the restored DB cluster - // can be created in. + // A list of Availability Zones (AZs) where instances in the restored DB cluster + // can be created. AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` // The target backtrack window, in seconds. To disable backtracking, set this @@ -30869,8 +33056,8 @@ type RestoreDBClusterFromS3Input struct { // with the specified CharacterSet. CharacterSetName *string `type:"string"` - // True to copy all tags from the restored DB cluster to snapshots of the restored - // DB cluster, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB cluster + // to snapshots of the restored DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The name of the DB cluster to create from the source data in the Amazon S3 @@ -30907,9 +33094,9 @@ type RestoreDBClusterFromS3Input struct { // The database name for the restored DB cluster. DatabaseName *string `type:"string"` - // Indicates if the DB cluster should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` // The list of logs that the restored DB cluster is to export to CloudWatch @@ -30918,10 +33105,11 @@ type RestoreDBClusterFromS3Input struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // - // Default: false + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The name of the database engine to be used for the restored DB cluster. @@ -30933,13 +33121,28 @@ type RestoreDBClusterFromS3Input struct { // The version number of the database engine to use. // + // To list all of the available engine versions for aurora (for MySQL 5.6-compatible + // Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-mysql (for MySQL + // 5.7-compatible Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-postgresql, use the + // following command: + // + // aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" + // // Aurora MySQL // - // Example: 5.6.10a + // Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5 // // Aurora PostgreSQL // - // Example: 9.6.3 + // Example: 9.6.3, 10.7 EngineVersion *string `type:"string"` // The AWS KMS key identifier for an encrypted DB cluster. @@ -30949,7 +33152,7 @@ type RestoreDBClusterFromS3Input struct { // the KMS encryption key used to encrypt the new DB cluster, then you can use // the KMS key alias instead of the ARN for the KM encryption key. // - // If the StorageEncrypted parameter is true, and you do not specify a value + // If the StorageEncrypted parameter is enabled, and you do not specify a value // for the KmsKeyId parameter, then Amazon RDS will use your default encryption // key. AWS KMS creates the default encryption key for your AWS account. Your // AWS account has a different default encryption key for each AWS Region. @@ -30993,7 +33196,7 @@ type RestoreDBClusterFromS3Input struct { // backups are enabled using the BackupRetentionPeriod parameter. // // The default is a 30-minute window selected at random from an 8-hour block - // of time for each AWS Region. To see the time blocks available, see Adjusting + // of time for each AWS Region. To see the time blocks available, see Adjusting // the Preferred Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. // @@ -31015,7 +33218,7 @@ type RestoreDBClusterFromS3Input struct { // // The default is a 30-minute window selected at random from an 8-hour block // of time for each AWS Region, occurring on a random day of the week. To see - // the time blocks available, see Adjusting the Preferred Maintenance Window + // the time blocks available, see Adjusting the Preferred Maintenance Window // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. // @@ -31060,7 +33263,7 @@ type RestoreDBClusterFromS3Input struct { // SourceEngineVersion is a required field SourceEngineVersion *string `type:"string" required:"true"` - // Specifies whether the restored DB cluster is encrypted. + // A value that indicates whether the restored DB cluster is encrypted. StorageEncrypted *bool `type:"boolean"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) @@ -31318,8 +33521,8 @@ func (s *RestoreDBClusterFromS3Output) SetDBCluster(v *DBCluster) *RestoreDBClus type RestoreDBClusterFromSnapshotInput struct { _ struct{} `type:"structure"` - // Provides the list of Amazon EC2 Availability Zones that instances in the - // restored DB cluster can be created in. + // Provides the list of Availability Zones (AZs) where instances in the restored + // DB cluster can be created. AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` // The target backtrack window, in seconds. To disable backtracking, set this @@ -31333,8 +33536,8 @@ type RestoreDBClusterFromSnapshotInput struct { // hours). BacktrackWindow *int64 `type:"long"` - // True to copy all tags from the restored DB cluster to snapshots of the restored - // DB cluster, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB cluster + // to snapshots of the restored DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The name of the DB cluster to create from the DB snapshot or DB cluster snapshot. @@ -31379,21 +33582,22 @@ type RestoreDBClusterFromSnapshotInput struct { // The database name for the restored DB cluster. DatabaseName *string `type:"string"` - // Indicates if the DB cluster should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` // The list of logs that the restored DB cluster is to export to Amazon CloudWatch // Logs. The values in the list depend on the DB engine being used. For more - // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // - // Default: false + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The database engine to use for the new DB cluster. @@ -31405,11 +33609,37 @@ type RestoreDBClusterFromSnapshotInput struct { // Engine is a required field Engine *string `type:"string" required:"true"` - // The DB engine mode of the DB cluster, either provisioned, serverless, or - // parallelquery. + // The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, + // global, or multimaster. EngineMode *string `type:"string"` // The version of the database engine to use for the new DB cluster. + // + // To list all of the available engine versions for aurora (for MySQL 5.6-compatible + // Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-mysql (for MySQL + // 5.7-compatible Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-postgresql, use the + // following command: + // + // aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" + // + // If you aren't using the default engine version, then you must specify the + // engine version. + // + // Aurora MySQL + // + // Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5 + // + // Aurora PostgreSQL + // + // Example: 9.6.3, 10.7 EngineVersion *string `type:"string"` // The AWS KMS key identifier to use when restoring an encrypted DB cluster @@ -31427,8 +33657,8 @@ type RestoreDBClusterFromSnapshotInput struct { // then the restored DB cluster is encrypted using the KMS key that was used // to encrypt the DB snapshot or DB cluster snapshot. // - // * If the DB snapshot or DB cluster snapshot in SnapshotIdentifier is not - // encrypted, then the restored DB cluster is not encrypted. + // * If the DB snapshot or DB cluster snapshot in SnapshotIdentifier isn't + // encrypted, then the restored DB cluster isn't encrypted. KmsKeyId *string `type:"string"` // The name of the option group to use for the restored DB cluster. @@ -31654,8 +33884,8 @@ type RestoreDBClusterToPointInTimeInput struct { // hours). BacktrackWindow *int64 `type:"long"` - // True to copy all tags from the restored DB cluster to snapshots of the restored - // DB cluster, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB cluster + // to snapshots of the restored DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The name of the new DB cluster to be created. @@ -31694,9 +33924,9 @@ type RestoreDBClusterToPointInTimeInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB cluster should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` // The list of logs that the restored DB cluster is to export to CloudWatch @@ -31705,10 +33935,11 @@ type RestoreDBClusterToPointInTimeInput struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // - // Default: false + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The AWS KMS key identifier to use when restoring an encrypted DB cluster @@ -31730,10 +33961,10 @@ type RestoreDBClusterToPointInTimeInput struct { // * If the DB cluster is encrypted, then the restored DB cluster is encrypted // using the KMS key that was used to encrypt the source DB cluster. // - // * If the DB cluster is not encrypted, then the restored DB cluster is - // not encrypted. + // * If the DB cluster isn't encrypted, then the restored DB cluster isn't + // encrypted. // - // If DBClusterIdentifier refers to a DB cluster that is not encrypted, then + // If DBClusterIdentifier refers to a DB cluster that isn't encrypted, then // the restore request is rejected. KmsKeyId *string `type:"string"` @@ -31755,11 +33986,11 @@ type RestoreDBClusterToPointInTimeInput struct { // // * Must be before the latest restorable time for the DB instance // - // * Must be specified if UseLatestRestorableTime parameter is not provided + // * Must be specified if UseLatestRestorableTime parameter isn't provided // - // * Can't be specified if UseLatestRestorableTime parameter is true + // * Can't be specified if the UseLatestRestorableTime parameter is enabled // - // * Can't be specified if RestoreType parameter is copy-on-write + // * Can't be specified if the RestoreType parameter is copy-on-write // // Example: 2015-03-07T23:45:00Z RestoreToTime *time.Time `type:"timestamp"` @@ -31793,10 +34024,9 @@ type RestoreDBClusterToPointInTimeInput struct { // in the Amazon RDS User Guide. Tags []*Tag `locationNameList:"Tag" type:"list"` - // A value that is set to true to restore the DB cluster to the latest restorable - // backup time, and false otherwise. - // - // Default: false + // A value that indicates whether to restore the DB cluster to the latest restorable + // backup time. By default, the DB cluster isn't restored to the latest restorable + // backup time. // // Constraints: Can't be specified if RestoreToTime parameter is provided. UseLatestRestorableTime *bool `type:"boolean"` @@ -31962,22 +34192,22 @@ func (s *RestoreDBClusterToPointInTimeOutput) SetDBCluster(v *DBCluster) *Restor type RestoreDBInstanceFromDBSnapshotInput struct { _ struct{} `type:"structure"` - // Indicates that minor version upgrades are applied automatically to the DB - // instance during the maintenance window. + // A value that indicates whether minor version upgrades are applied automatically + // to the DB instance during the maintenance window. AutoMinorVersionUpgrade *bool `type:"boolean"` - // The EC2 Availability Zone that the DB instance is created in. + // The Availability Zone (AZ) where the DB instance will be created. // // Default: A random, system-chosen Availability Zone. // - // Constraint: You can't specify the AvailabilityZone parameter if the MultiAZ - // parameter is set to true. + // Constraint: You can't specify the AvailabilityZone parameter if the DB instance + // is a Multi-AZ deployment. // // Example: us-east-1a AvailabilityZone *string `type:"string"` - // True to copy all tags from the restored DB instance to snapshots of the restored - // DB instance, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB instance + // to snapshots of the DB instance. By default, tags are not copied. CopyTagsToSnapshot *bool `type:"boolean"` // The compute and memory capacity of the Amazon RDS DB instance, for example, @@ -32010,9 +34240,10 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // This parameter doesn't apply to the MySQL, PostgreSQL, or MariaDB engines. DBName *string `type:"string"` - // The name of the DB parameter group to associate with this DB instance. If - // this argument is omitted, the default DBParameterGroup for the specified - // engine is used. + // The name of the DB parameter group to associate with this DB instance. + // + // If you do not specify a value for DBParameterGroupName, then the default + // DBParameterGroup for the specified DB engine is used. // // Constraints: // @@ -32044,12 +34275,27 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` - // Specify the Active Directory Domain to restore the instance in. + // Specify the Active Directory directory ID to restore the DB instance in. + // The domain must be created prior to this operation. Currently, only Microsoft + // SQL Server and Oracle DB instances can be created in an Active Directory + // Domain. + // + // For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication + // to authenticate users that connect to the DB instance. For more information, + // see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft + // SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_SQLServerWinAuth.html) + // in the Amazon RDS User Guide. + // + // For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate + // users that connect to the DB instance. For more information, see Using Kerberos + // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // in the Amazon RDS User Guide. Domain *string `type:"string"` // Specify the name of the IAM role to be used when making API calls to the @@ -32062,16 +34308,13 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // You can enable IAM database authentication for the following database engines - // - // * For MySQL 5.6, minor version 5.6.34 or higher + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. // - // * For MySQL 5.7, minor version 5.7.16 or higher - // - // Default: false + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The database engine to use for the new instance. @@ -32107,7 +34350,7 @@ type RestoreDBInstanceFromDBSnapshotInput struct { Engine *string `type:"string"` // Specifies the amount of provisioned IOPS for the DB instance, expressed in - // I/O operations per second. If this parameter is not specified, the IOPS value + // I/O operations per second. If this parameter isn't specified, the IOPS value // is taken from the backup. If this parameter is set to 0, the new instance // is converted to a non-PIOPS instance. The conversion takes additional time, // though your DB instance is available for connections before the conversion @@ -32128,10 +34371,10 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // Valid values: license-included | bring-your-own-license | general-public-license LicenseModel *string `type:"string"` - // Specifies if the DB instance is a Multi-AZ deployment. + // A value that indicates whether the DB instance is a Multi-AZ deployment. // - // Constraint: You can't specify the AvailabilityZone parameter if the MultiAZ - // parameter is set to true. + // Constraint: You can't specify the AvailabilityZone parameter if the DB instance + // is a Multi-AZ deployment. MultiAZ *bool `type:"boolean"` // The name of the option group to be used for the restored DB instance. @@ -32152,11 +34395,12 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. For more - // information, see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance isn't publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. For more information, + // see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -32165,7 +34409,7 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) @@ -32179,7 +34423,7 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // device. TdeCredentialPassword *string `type:"string"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` @@ -32419,10 +34663,9 @@ type RestoreDBInstanceFromS3Input struct { // growth. AllocatedStorage *int64 `type:"integer"` - // True to indicate that minor engine upgrades are applied automatically to - // the DB instance during the maintenance window, and otherwise false. - // - // Default: true + // A value that indicates whether minor engine upgrades are applied automatically + // to the DB instance during the maintenance window. By default, minor engine + // upgrades are not applied automatically. AutoMinorVersionUpgrade *bool `type:"boolean"` // The Availability Zone that the DB instance is created in. For information @@ -32435,8 +34678,8 @@ type RestoreDBInstanceFromS3Input struct { // // Example: us-east-1d // - // Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ - // parameter is set to true. The specified Availability Zone must be in the + // Constraint: The AvailabilityZone parameter can't be specified if the DB instance + // is a Multi-AZ deployment. The specified Availability Zone must be in the // same AWS Region as the current endpoint. AvailabilityZone *string `type:"string"` @@ -32445,10 +34688,8 @@ type RestoreDBInstanceFromS3Input struct { // CreateDBInstance. BackupRetentionPeriod *int64 `type:"integer"` - // True to copy all tags from the restored DB instance to snapshots of the restored - // DB instance, and otherwise false. - // - // Default: false. + // A value that indicates whether to copy all tags from the DB instance to snapshots + // of the DB instance. By default, tags are not copied. CopyTagsToSnapshot *bool `type:"boolean"` // The compute and memory capacity of the DB instance, for example, db.m4.large. @@ -32457,8 +34698,7 @@ type RestoreDBInstanceFromS3Input struct { // for your engine, see DB Instance Class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) // in the Amazon RDS User Guide. // - // Importing from Amazon S3 is not supported on the db.t2.micro DB instance - // class. + // Importing from Amazon S3 isn't supported on the db.t2.micro DB instance class. // // DBInstanceClass is a required field DBInstanceClass *string `type:"string" required:"true"` @@ -32482,9 +34722,10 @@ type RestoreDBInstanceFromS3Input struct { // the naming rules specified in CreateDBInstance. DBName *string `type:"string"` - // The name of the DB parameter group to associate with this DB instance. If - // this argument is omitted, the default parameter group for the specified engine - // is used. + // The name of the DB parameter group to associate with this DB instance. + // + // If you do not specify a value for DBParameterGroupName, then the default + // DBParameterGroup for the specified DB engine is used. DBParameterGroupName *string `type:"string"` // A list of DB security groups to associate with this DB instance. @@ -32495,9 +34736,10 @@ type RestoreDBInstanceFromS3Input struct { // A DB subnet group to associate with this DB instance. DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // The list of logs that the restored DB instance is to export to CloudWatch @@ -32506,13 +34748,17 @@ type RestoreDBInstanceFromS3Input struct { // in the Amazon RDS User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. // - // Default: false + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // True to enable Performance Insights for the DB instance, and otherwise false. + // A value that indicates whether to enable Performance Insights for the DB + // instance. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon Relational Database Service User Guide. @@ -32532,7 +34778,7 @@ type RestoreDBInstanceFromS3Input struct { // The amount of Provisioned IOPS (input/output operations per second) to allocate // initially for the DB instance. For information about valid Iops values, see - // see Amazon RDS Provisioned IOPS Storage to Improve Performance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) + // Amazon RDS Provisioned IOPS Storage to Improve Performance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) // in the Amazon RDS User Guide. Iops *int64 `type:"integer"` @@ -32543,7 +34789,7 @@ type RestoreDBInstanceFromS3Input struct { // the KMS encryption key used to encrypt the new DB instance, then you can // use the KMS key alias instead of the ARN for the KM encryption key. // - // If the StorageEncrypted parameter is true, and you do not specify a value + // If the StorageEncrypted parameter is enabled, and you do not specify a value // for the KmsKeyId parameter, then Amazon RDS will use your default encryption // key. AWS KMS creates the default encryption key for your AWS account. Your // AWS account has a different default encryption key for each AWS Region. @@ -32591,8 +34837,9 @@ type RestoreDBInstanceFromS3Input struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` - // Specifies whether the DB instance is a Multi-AZ deployment. If MultiAZ is - // set to true, you can't set the AvailabilityZone parameter. + // A value that indicates whether the DB instance is a Multi-AZ deployment. + // If the DB instance is a Multi-AZ deployment, you can't set the AvailabilityZone + // parameter. MultiAZ *bool `type:"boolean"` // The name of the option group to associate with this DB instance. If this @@ -32603,6 +34850,11 @@ type RestoreDBInstanceFromS3Input struct { // The AWS KMS key identifier for encryption of Performance Insights data. The // KMS key ID is the Amazon Resource Name (ARN), the KMS key identifier, or // the KMS key alias for the KMS encryption key. + // + // If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + // RDS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. PerformanceInsightsKMSKeyId *string `type:"string"` // The amount of time, in days, to retain Performance Insights data. Valid values @@ -32655,11 +34907,12 @@ type RestoreDBInstanceFromS3Input struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. For more - // information, see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance isn't publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. For more information, + // see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // The name of your Amazon S3 bucket that contains your database backup file. @@ -32690,7 +34943,7 @@ type RestoreDBInstanceFromS3Input struct { // SourceEngineVersion is a required field SourceEngineVersion *string `type:"string" required:"true"` - // Specifies whether the new DB instance is encrypted or not. + // A value that indicates whether the new DB instance is encrypted or not. StorageEncrypted *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -32699,7 +34952,7 @@ type RestoreDBInstanceFromS3Input struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified; otherwise standard + // Default: io1 if the Iops parameter is specified; otherwise gp2 StorageType *string `type:"string"` // A list of tags to associate with this DB instance. For more information, @@ -32707,7 +34960,7 @@ type RestoreDBInstanceFromS3Input struct { // in the Amazon RDS User Guide. Tags []*Tag `locationNameList:"Tag" type:"list"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` @@ -33042,22 +35295,22 @@ func (s *RestoreDBInstanceFromS3Output) SetDBInstance(v *DBInstance) *RestoreDBI type RestoreDBInstanceToPointInTimeInput struct { _ struct{} `type:"structure"` - // Indicates that minor version upgrades are applied automatically to the DB - // instance during the maintenance window. + // A value that indicates whether minor version upgrades are applied automatically + // to the DB instance during the maintenance window. AutoMinorVersionUpgrade *bool `type:"boolean"` - // The EC2 Availability Zone that the DB instance is created in. + // The Availability Zone (AZ) where the DB instance will be created. // // Default: A random, system-chosen Availability Zone. // - // Constraint: You can't specify the AvailabilityZone parameter if the MultiAZ - // parameter is set to true. + // Constraint: You can't specify the AvailabilityZone parameter if the DB instance + // is a Multi-AZ deployment. // // Example: us-east-1a AvailabilityZone *string `type:"string"` - // True to copy all tags from the restored DB instance to snapshots of the restored - // DB instance, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB instance + // to snapshots of the DB instance. By default, tags are not copied. CopyTagsToSnapshot *bool `type:"boolean"` // The compute and memory capacity of the Amazon RDS DB instance, for example, @@ -33071,12 +35324,13 @@ type RestoreDBInstanceToPointInTimeInput struct { // The database name for the restored DB instance. // - // This parameter is not used for the MySQL or MariaDB engines. + // This parameter isn't used for the MySQL or MariaDB engines. DBName *string `type:"string"` - // The name of the DB parameter group to associate with this DB instance. If - // this argument is omitted, the default DBParameterGroup for the specified - // engine is used. + // The name of the DB parameter group to associate with this DB instance. + // + // If you do not specify a value for DBParameterGroupName, then the default + // DBParameterGroup for the specified DB engine is used. // // Constraints: // @@ -33096,12 +35350,27 @@ type RestoreDBInstanceToPointInTimeInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` - // Specify the Active Directory Domain to restore the instance in. + // Specify the Active Directory directory ID to restore the DB instance in. + // The domain must be created prior to this operation. Currently, only Microsoft + // SQL Server and Oracle DB instances can be created in an Active Directory + // Domain. + // + // For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication + // to authenticate users that connect to the DB instance. For more information, + // see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft + // SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_SQLServerWinAuth.html) + // in the Amazon RDS User Guide. + // + // For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate + // users that connect to the DB instance. For more information, see Using Kerberos + // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // in the Amazon RDS User Guide. Domain *string `type:"string"` // Specify the name of the IAM role to be used when making API calls to the @@ -33114,16 +35383,13 @@ type RestoreDBInstanceToPointInTimeInput struct { // in the Amazon RDS User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // You can enable IAM database authentication for the following database engines - // - // * For MySQL 5.6, minor version 5.6.34 or higher - // - // * For MySQL 5.7, minor version 5.7.16 or higher + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. // - // Default: false + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The database engine to use for the new instance. @@ -33164,7 +35430,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // // SQL Server // - // Setting the IOPS value for the SQL Server database engine is not supported. + // Setting the IOPS value for the SQL Server database engine isn't supported. Iops *int64 `type:"integer"` // License model information for the restored DB instance. @@ -33174,10 +35440,10 @@ type RestoreDBInstanceToPointInTimeInput struct { // Valid values: license-included | bring-your-own-license | general-public-license LicenseModel *string `type:"string"` - // Specifies if the DB instance is a Multi-AZ deployment. + // A value that indicates whether the DB instance is a Multi-AZ deployment. // - // Constraint: You can't specify the AvailabilityZone parameter if the MultiAZ - // parameter is set to true. + // Constraint: You can't specify the AvailabilityZone parameter if the DB instance + // is a Multi-AZ deployment. MultiAZ *bool `type:"boolean"` // The name of the option group to be used for the restored DB instance. @@ -33198,11 +35464,12 @@ type RestoreDBInstanceToPointInTimeInput struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. For more - // information, see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance isn't publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. For more information, + // see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // The date and time to restore from. @@ -33213,7 +35480,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // // * Must be before the latest restorable time for the DB instance // - // * Can't be specified if UseLatestRestorableTime parameter is true + // * Can't be specified if the UseLatestRestorableTime parameter is enabled // // Example: 2009-09-07T23:45:00Z RestoreTime *time.Time `type:"timestamp"` @@ -33234,7 +35501,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) @@ -33261,16 +35528,15 @@ type RestoreDBInstanceToPointInTimeInput struct { // device. TdeCredentialPassword *string `type:"string"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` - // Specifies whether (true) or not (false) the DB instance is restored from - // the latest backup time. + // A value that indicates whether the DB instance is restored from the latest + // backup time. By default, the DB instance isn't restored from the latest backup + // time. // - // Default: false - // - // Constraints: Can't be specified if RestoreTime parameter is provided. + // Constraints: Can't be specified if the RestoreTime parameter is provided. UseLatestRestorableTime *bool `type:"boolean"` // A list of EC2 VPC security groups to associate with this DB instance. @@ -33569,8 +35835,8 @@ type RevokeDBSecurityGroupIngressInput struct { // and either EC2SecurityGroupName or EC2SecurityGroupId must be provided. EC2SecurityGroupName *string `type:"string"` - // The AWS Account Number of the owner of the EC2 security group specified in - // the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // The AWS account number of the owner of the EC2 security group specified in + // the EC2SecurityGroupName parameter. The AWS access key ID isn't an acceptable // value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, // EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId // must be provided. @@ -33663,7 +35929,7 @@ func (s *RevokeDBSecurityGroupIngressOutput) SetDBSecurityGroup(v *DBSecurityGro type ScalingConfiguration struct { _ struct{} `type:"structure"` - // A value that specifies whether to allow or disallow automatic pause for an + // A value that indicates whether to allow or disallow automatic pause for an // Aurora DB cluster in serverless DB engine mode. A DB cluster can be paused // only when it's idle (it has no connections). // @@ -33674,14 +35940,14 @@ type ScalingConfiguration struct { // The maximum capacity for an Aurora DB cluster in serverless DB engine mode. // - // Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256. + // Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256. // // The maximum capacity must be greater than or equal to the minimum capacity. MaxCapacity *int64 `type:"integer"` // The minimum capacity for an Aurora DB cluster in serverless DB engine mode. // - // Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256. + // Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256. // // The minimum capacity must be less than or equal to the maximum capacity. MinCapacity *int64 `type:"integer"` @@ -33692,13 +35958,16 @@ type ScalingConfiguration struct { // The action to take when the timeout is reached, either ForceApplyCapacityChange // or RollbackCapacityChange. // - // ForceApplyCapacityChange, the default, sets the capacity to the specified - // value as soon as possible. + // ForceApplyCapacityChange sets the capacity to the specified value as soon + // as possible. // - // RollbackCapacityChange ignores the capacity change if a scaling point is - // not found in the timeout period. + // RollbackCapacityChange, the default, ignores the capacity change if a scaling + // point isn't found in the timeout period. // - // For more information, see Autoscaling for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling) + // If you specify ForceApplyCapacityChange, connections that prevent Aurora + // Serverless from finding a scaling point might be dropped. + // + // For more information, see Autoscaling for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling) // in the Amazon Aurora User Guide. TimeoutAction *string `type:"string"` } @@ -33857,6 +36126,147 @@ func (s *SourceRegion) SetStatus(v string) *SourceRegion { return s } +type StartActivityStreamInput struct { + _ struct{} `type:"structure"` + + // Specifies whether or not the database activity stream is to start as soon + // as possible, regardless of the maintenance window for the database. + ApplyImmediately *bool `type:"boolean"` + + // The AWS KMS key identifier for encrypting messages in the database activity + // stream. The key identifier can be either a key ID, a key ARN, or a key alias. + // + // KmsKeyId is a required field + KmsKeyId *string `type:"string" required:"true"` + + // Specifies the mode of the database activity stream. Database events such + // as a change or access generate an activity stream event. The database session + // can handle these events either synchronously or asynchronously. + // + // Mode is a required field + Mode *string `type:"string" required:"true" enum:"ActivityStreamMode"` + + // The Amazon Resource Name (ARN) of the DB cluster, for example arn:aws:rds:us-east-1:12345667890:cluster:das-cluster. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartActivityStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartActivityStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartActivityStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartActivityStreamInput"} + if s.KmsKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KmsKeyId")) + } + if s.Mode == nil { + invalidParams.Add(request.NewErrParamRequired("Mode")) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *StartActivityStreamInput) SetApplyImmediately(v bool) *StartActivityStreamInput { + s.ApplyImmediately = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *StartActivityStreamInput) SetKmsKeyId(v string) *StartActivityStreamInput { + s.KmsKeyId = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *StartActivityStreamInput) SetMode(v string) *StartActivityStreamInput { + s.Mode = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *StartActivityStreamInput) SetResourceArn(v string) *StartActivityStreamInput { + s.ResourceArn = &v + return s +} + +type StartActivityStreamOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether or not the database activity stream will start as soon + // as possible, regardless of the maintenance window for the database. + ApplyImmediately *bool `type:"boolean"` + + // The name of the Amazon Kinesis data stream to be used for the database activity + // stream. + KinesisStreamName *string `type:"string"` + + // The AWS KMS key identifier for encryption of messages in the database activity + // stream. + KmsKeyId *string `type:"string"` + + // The mode of the database activity stream. + Mode *string `type:"string" enum:"ActivityStreamMode"` + + // The status of the database activity stream. + Status *string `type:"string" enum:"ActivityStreamStatus"` +} + +// String returns the string representation +func (s StartActivityStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartActivityStreamOutput) GoString() string { + return s.String() +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *StartActivityStreamOutput) SetApplyImmediately(v bool) *StartActivityStreamOutput { + s.ApplyImmediately = &v + return s +} + +// SetKinesisStreamName sets the KinesisStreamName field's value. +func (s *StartActivityStreamOutput) SetKinesisStreamName(v string) *StartActivityStreamOutput { + s.KinesisStreamName = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *StartActivityStreamOutput) SetKmsKeyId(v string) *StartActivityStreamOutput { + s.KmsKeyId = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *StartActivityStreamOutput) SetMode(v string) *StartActivityStreamOutput { + s.Mode = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StartActivityStreamOutput) SetStatus(v string) *StartActivityStreamOutput { + s.Status = &v + return s +} + type StartDBClusterInput struct { _ struct{} `type:"structure"` @@ -33985,6 +36395,98 @@ func (s *StartDBInstanceOutput) SetDBInstance(v *DBInstance) *StartDBInstanceOut return s } +type StopActivityStreamInput struct { + _ struct{} `type:"structure"` + + // Specifies whether or not the database activity stream is to stop as soon + // as possible, regardless of the maintenance window for the database. + ApplyImmediately *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the DB cluster for the database activity + // stream. For example, arn:aws:rds:us-east-1:12345667890:cluster:das-cluster. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopActivityStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopActivityStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopActivityStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopActivityStreamInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *StopActivityStreamInput) SetApplyImmediately(v bool) *StopActivityStreamInput { + s.ApplyImmediately = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *StopActivityStreamInput) SetResourceArn(v string) *StopActivityStreamInput { + s.ResourceArn = &v + return s +} + +type StopActivityStreamOutput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon Kinesis data stream used for the database activity + // stream. + KinesisStreamName *string `type:"string"` + + // The AWS KMS key identifier used for encrypting messages in the database activity + // stream. + KmsKeyId *string `type:"string"` + + // The status of the database activity stream. + Status *string `type:"string" enum:"ActivityStreamStatus"` +} + +// String returns the string representation +func (s StopActivityStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopActivityStreamOutput) GoString() string { + return s.String() +} + +// SetKinesisStreamName sets the KinesisStreamName field's value. +func (s *StopActivityStreamOutput) SetKinesisStreamName(v string) *StopActivityStreamOutput { + s.KinesisStreamName = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *StopActivityStreamOutput) SetKmsKeyId(v string) *StopActivityStreamOutput { + s.KmsKeyId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StopActivityStreamOutput) SetStatus(v string) *StopActivityStreamOutput { + s.Status = &v + return s +} + type StopDBClusterInput struct { _ struct{} `type:"structure"` @@ -34130,9 +36632,8 @@ type Subnet struct { // Contains Availability Zone information. // - // This data type is used as an element in the following data type: - // - // * OrderableDBInstanceOption + // This data type is used as an element in the OrderableDBInstanceOption data + // type. SubnetAvailabilityZone *AvailabilityZone `type:"structure"` // Specifies the identifier of the subnet. @@ -34350,6 +36851,10 @@ type ValidStorageOptions struct { // The valid storage types for your DB instance. For example, gp2, io1. StorageType *string `type:"string"` + + // Whether or not Amazon RDS can automatically scale storage for DB instances + // that use the new instance class. + SupportsStorageAutoscaling *bool `type:"boolean"` } // String returns the string representation @@ -34386,6 +36891,12 @@ func (s *ValidStorageOptions) SetStorageType(v string) *ValidStorageOptions { return s } +// SetSupportsStorageAutoscaling sets the SupportsStorageAutoscaling field's value. +func (s *ValidStorageOptions) SetSupportsStorageAutoscaling(v bool) *ValidStorageOptions { + s.SupportsStorageAutoscaling = &v + return s +} + // This data type is used as a response element for queries on VPC security // group membership. type VpcSecurityGroupMembership struct { @@ -34420,6 +36931,102 @@ func (s *VpcSecurityGroupMembership) SetVpcSecurityGroupId(v string) *VpcSecurit return s } +// Information about the virtual private network (VPN) between the VMware vSphere +// cluster and the AWS website. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +type VpnDetails struct { + _ struct{} `type:"structure"` + + // The IP address of network traffic from AWS to your on-premises data center. + VpnGatewayIp *string `type:"string"` + + // The ID of the VPN. + VpnId *string `type:"string"` + + // The name of the VPN. + VpnName *string `type:"string"` + + // The preshared key (PSK) for the VPN. + VpnPSK *string `type:"string" sensitive:"true"` + + // The state of the VPN. + VpnState *string `type:"string"` + + // The IP address of network traffic from your on-premises data center. A custom + // AZ receives the network traffic. + VpnTunnelOriginatorIP *string `type:"string"` +} + +// String returns the string representation +func (s VpnDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnDetails) GoString() string { + return s.String() +} + +// SetVpnGatewayIp sets the VpnGatewayIp field's value. +func (s *VpnDetails) SetVpnGatewayIp(v string) *VpnDetails { + s.VpnGatewayIp = &v + return s +} + +// SetVpnId sets the VpnId field's value. +func (s *VpnDetails) SetVpnId(v string) *VpnDetails { + s.VpnId = &v + return s +} + +// SetVpnName sets the VpnName field's value. +func (s *VpnDetails) SetVpnName(v string) *VpnDetails { + s.VpnName = &v + return s +} + +// SetVpnPSK sets the VpnPSK field's value. +func (s *VpnDetails) SetVpnPSK(v string) *VpnDetails { + s.VpnPSK = &v + return s +} + +// SetVpnState sets the VpnState field's value. +func (s *VpnDetails) SetVpnState(v string) *VpnDetails { + s.VpnState = &v + return s +} + +// SetVpnTunnelOriginatorIP sets the VpnTunnelOriginatorIP field's value. +func (s *VpnDetails) SetVpnTunnelOriginatorIP(v string) *VpnDetails { + s.VpnTunnelOriginatorIP = &v + return s +} + +const ( + // ActivityStreamModeSync is a ActivityStreamMode enum value + ActivityStreamModeSync = "sync" + + // ActivityStreamModeAsync is a ActivityStreamMode enum value + ActivityStreamModeAsync = "async" +) + +const ( + // ActivityStreamStatusStopped is a ActivityStreamStatus enum value + ActivityStreamStatusStopped = "stopped" + + // ActivityStreamStatusStarting is a ActivityStreamStatus enum value + ActivityStreamStatusStarting = "starting" + + // ActivityStreamStatusStarted is a ActivityStreamStatus enum value + ActivityStreamStatusStarted = "started" + + // ActivityStreamStatusStopping is a ActivityStreamStatus enum value + ActivityStreamStatusStopping = "stopping" +) + const ( // ApplyMethodImmediate is a ApplyMethod enum value ApplyMethodImmediate = "immediate" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/customizations.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/customizations.go index d412fb282ba..cee03588640 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/customizations.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/customizations.go @@ -48,6 +48,12 @@ func copyDBSnapshotPresign(r *request.Request) { } originParams.DestinationRegion = r.Config.Region + + // preSignedUrl is not required for instances in the same region. + if *originParams.SourceRegion == *originParams.DestinationRegion { + return + } + newParams := awsutil.CopyOf(r.Params).(*CopyDBSnapshotInput) originParams.PreSignedUrl = presignURL(r, originParams.SourceRegion, newParams) } @@ -60,6 +66,11 @@ func createDBInstanceReadReplicaPresign(r *request.Request) { } originParams.DestinationRegion = r.Config.Region + // preSignedUrl is not required for instances in the same region. + if *originParams.SourceRegion == *originParams.DestinationRegion { + return + } + newParams := awsutil.CopyOf(r.Params).(*CreateDBInstanceReadReplicaInput) originParams.PreSignedUrl = presignURL(r, originParams.SourceRegion, newParams) } @@ -72,6 +83,11 @@ func copyDBClusterSnapshotPresign(r *request.Request) { } originParams.DestinationRegion = r.Config.Region + // preSignedUrl is not required for instances in the same region. + if *originParams.SourceRegion == *originParams.DestinationRegion { + return + } + newParams := awsutil.CopyOf(r.Params).(*CopyDBClusterSnapshotInput) originParams.PreSignedUrl = presignURL(r, originParams.SourceRegion, newParams) } @@ -84,6 +100,11 @@ func createDBClusterPresign(r *request.Request) { } originParams.DestinationRegion = r.Config.Region + // preSignedUrl is not required for instances in the same region. + if *originParams.SourceRegion == *originParams.DestinationRegion { + return + } + newParams := awsutil.CopyOf(r.Params).(*CreateDBClusterInput) originParams.PreSignedUrl = presignURL(r, originParams.SourceRegion, newParams) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go index 7edf6b51882..6a263d8704d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go @@ -3,11 +3,12 @@ // Package rds provides the client and types for making API // requests to Amazon Relational Database Service. // +// // Amazon Relational Database Service (Amazon RDS) is a web service that makes // it easier to set up, operate, and scale a relational database in the cloud. -// It provides cost-efficient, resizable capacity for an industry-standard relational -// database and manages common database administration tasks, freeing up developers -// to focus on what makes their applications and businesses unique. +// It provides cost-efficient, resizeable capacity for an industry-standard +// relational database and manages common database administration tasks, freeing +// up developers to focus on what makes their applications and businesses unique. // // Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, // Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go index 86ee24482b0..25495cfe65a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go @@ -7,18 +7,18 @@ const ( // ErrCodeAuthorizationAlreadyExistsFault for service response error code // "AuthorizationAlreadyExists". // - // The specified CIDRIP or Amazon EC2 security group is already authorized for - // the specified DB security group. + // The specified CIDR IP range or Amazon EC2 security group is already authorized + // for the specified DB security group. ErrCodeAuthorizationAlreadyExistsFault = "AuthorizationAlreadyExists" // ErrCodeAuthorizationNotFoundFault for service response error code // "AuthorizationNotFound". // - // The specified CIDRIP or Amazon EC2 security group isn't authorized for the - // specified DB security group. + // The specified CIDR IP range or Amazon EC2 security group might not be authorized + // for the specified DB security group. // - // RDS also may not be authorized by using IAM to perform necessary actions - // on your behalf. + // Or, RDS might not be authorized to perform necessary actions using IAM on + // your behalf. ErrCodeAuthorizationNotFoundFault = "AuthorizationNotFound" // ErrCodeAuthorizationQuotaExceededFault for service response error code @@ -37,6 +37,26 @@ const ( // CertificateIdentifier doesn't refer to an existing certificate. ErrCodeCertificateNotFoundFault = "CertificateNotFound" + // ErrCodeCustomAvailabilityZoneAlreadyExistsFault for service response error code + // "CustomAvailabilityZoneAlreadyExists". + // + // CustomAvailabilityZoneName is already used by an existing custom Availability + // Zone. + ErrCodeCustomAvailabilityZoneAlreadyExistsFault = "CustomAvailabilityZoneAlreadyExists" + + // ErrCodeCustomAvailabilityZoneNotFoundFault for service response error code + // "CustomAvailabilityZoneNotFound". + // + // CustomAvailabilityZoneId doesn't refer to an existing custom Availability + // Zone identifier. + ErrCodeCustomAvailabilityZoneNotFoundFault = "CustomAvailabilityZoneNotFound" + + // ErrCodeCustomAvailabilityZoneQuotaExceededFault for service response error code + // "CustomAvailabilityZoneQuotaExceeded". + // + // You have exceeded the maximum number of custom Availability Zones. + ErrCodeCustomAvailabilityZoneQuotaExceededFault = "CustomAvailabilityZoneQuotaExceeded" + // ErrCodeDBClusterAlreadyExistsFault for service response error code // "DBClusterAlreadyExistsFault". // @@ -156,7 +176,7 @@ const ( // ErrCodeDBInstanceRoleNotFoundFault for service response error code // "DBInstanceRoleNotFound". // - // The specified RoleArn value doesn't match the specifed feature for the DB + // The specified RoleArn value doesn't match the specified feature for the DB // instance. ErrCodeDBInstanceRoleNotFoundFault = "DBInstanceRoleNotFound" @@ -300,6 +320,18 @@ const ( // "GlobalClusterQuotaExceededFault". ErrCodeGlobalClusterQuotaExceededFault = "GlobalClusterQuotaExceededFault" + // ErrCodeInstallationMediaAlreadyExistsFault for service response error code + // "InstallationMediaAlreadyExists". + // + // The specified installation medium has already been imported. + ErrCodeInstallationMediaAlreadyExistsFault = "InstallationMediaAlreadyExists" + + // ErrCodeInstallationMediaNotFoundFault for service response error code + // "InstallationMediaNotFound". + // + // InstallationMediaID doesn't refer to an existing installation medium. + ErrCodeInstallationMediaNotFoundFault = "InstallationMediaNotFound" + // ErrCodeInstanceQuotaExceededFault for service response error code // "InstanceQuotaExceeded". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/service.go index f2d0efaf7d0..3ae523764c1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/service.go @@ -46,11 +46,11 @@ const ( // svc := rds.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *RDS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *RDS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *RDS { svc := &RDS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-31", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go index 0c21e88bbae..c5e905cec45 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go @@ -9,6 +9,148 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) +// WaitUntilDBClusterSnapshotAvailable uses the Amazon RDS API operation +// DescribeDBClusterSnapshots to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *RDS) WaitUntilDBClusterSnapshotAvailable(input *DescribeDBClusterSnapshotsInput) error { + return c.WaitUntilDBClusterSnapshotAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilDBClusterSnapshotAvailableWithContext is an extended version of WaitUntilDBClusterSnapshotAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) WaitUntilDBClusterSnapshotAvailableWithContext(ctx aws.Context, input *DescribeDBClusterSnapshotsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilDBClusterSnapshotAvailable", + MaxAttempts: 60, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "available", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "deleted", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "deleting", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "failed", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "incompatible-restore", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "incompatible-parameters", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeDBClusterSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilDBClusterSnapshotDeleted uses the Amazon RDS API operation +// DescribeDBClusterSnapshots to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *RDS) WaitUntilDBClusterSnapshotDeleted(input *DescribeDBClusterSnapshotsInput) error { + return c.WaitUntilDBClusterSnapshotDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilDBClusterSnapshotDeletedWithContext is an extended version of WaitUntilDBClusterSnapshotDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) WaitUntilDBClusterSnapshotDeletedWithContext(ctx aws.Context, input *DescribeDBClusterSnapshotsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilDBClusterSnapshotDeleted", + MaxAttempts: 60, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(DBClusterSnapshots) == `0`", + Expected: true, + }, + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "DBClusterSnapshotNotFoundFault", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "creating", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "modifying", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "rebooting", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBClusterSnapshots[].Status", + Expected: "resetting-master-credentials", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeDBClusterSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + // WaitUntilDBInstanceAvailable uses the Amazon RDS API operation // DescribeDBInstances to wait for a condition to be met before returning. // If the condition is not met within the max attempt window, an error will @@ -104,8 +246,8 @@ func (c *RDS) WaitUntilDBInstanceDeletedWithContext(ctx aws.Context, input *Desc Acceptors: []request.WaiterAcceptor{ { State: request.SuccessWaiterState, - Matcher: request.PathAllWaiterMatch, Argument: "DBInstances[].DBInstanceStatus", - Expected: "deleted", + Matcher: request.PathWaiterMatch, Argument: "length(DBInstances) == `0`", + Expected: true, }, { State: request.SuccessWaiterState, @@ -246,8 +388,8 @@ func (c *RDS) WaitUntilDBSnapshotDeletedWithContext(ctx aws.Context, input *Desc Acceptors: []request.WaiterAcceptor{ { State: request.SuccessWaiterState, - Matcher: request.PathAllWaiterMatch, Argument: "DBSnapshots[].Status", - Expected: "deleted", + Matcher: request.PathWaiterMatch, Argument: "length(DBSnapshots) == `0`", + Expected: true, }, { State: request.SuccessWaiterState, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go index cba187dd347..ebb79a96e02 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go @@ -3130,7 +3130,7 @@ func (c *Redshift) DescribeClusterParameterGroupsWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a DescribeClusterParameterGroups operation. // pageNum := 0 // err := client.DescribeClusterParameterGroupsPages(params, -// func(page *DescribeClusterParameterGroupsOutput, lastPage bool) bool { +// func(page *redshift.DescribeClusterParameterGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3162,10 +3162,12 @@ func (c *Redshift) DescribeClusterParameterGroupsPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClusterParameterGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClusterParameterGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3276,7 +3278,7 @@ func (c *Redshift) DescribeClusterParametersWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a DescribeClusterParameters operation. // pageNum := 0 // err := client.DescribeClusterParametersPages(params, -// func(page *DescribeClusterParametersOutput, lastPage bool) bool { +// func(page *redshift.DescribeClusterParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3308,10 +3310,12 @@ func (c *Redshift) DescribeClusterParametersPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClusterParametersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClusterParametersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3431,7 +3435,7 @@ func (c *Redshift) DescribeClusterSecurityGroupsWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a DescribeClusterSecurityGroups operation. // pageNum := 0 // err := client.DescribeClusterSecurityGroupsPages(params, -// func(page *DescribeClusterSecurityGroupsOutput, lastPage bool) bool { +// func(page *redshift.DescribeClusterSecurityGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3463,10 +3467,12 @@ func (c *Redshift) DescribeClusterSecurityGroupsPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClusterSecurityGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClusterSecurityGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3586,7 +3592,7 @@ func (c *Redshift) DescribeClusterSnapshotsWithContext(ctx aws.Context, input *D // // Example iterating over at most 3 pages of a DescribeClusterSnapshots operation. // pageNum := 0 // err := client.DescribeClusterSnapshotsPages(params, -// func(page *DescribeClusterSnapshotsOutput, lastPage bool) bool { +// func(page *redshift.DescribeClusterSnapshotsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3618,10 +3624,12 @@ func (c *Redshift) DescribeClusterSnapshotsPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClusterSnapshotsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClusterSnapshotsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3737,7 +3745,7 @@ func (c *Redshift) DescribeClusterSubnetGroupsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeClusterSubnetGroups operation. // pageNum := 0 // err := client.DescribeClusterSubnetGroupsPages(params, -// func(page *DescribeClusterSubnetGroupsOutput, lastPage bool) bool { +// func(page *redshift.DescribeClusterSubnetGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3769,10 +3777,12 @@ func (c *Redshift) DescribeClusterSubnetGroupsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClusterSubnetGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClusterSubnetGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3953,7 +3963,7 @@ func (c *Redshift) DescribeClusterVersionsWithContext(ctx aws.Context, input *De // // Example iterating over at most 3 pages of a DescribeClusterVersions operation. // pageNum := 0 // err := client.DescribeClusterVersionsPages(params, -// func(page *DescribeClusterVersionsOutput, lastPage bool) bool { +// func(page *redshift.DescribeClusterVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3985,10 +3995,12 @@ func (c *Redshift) DescribeClusterVersionsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClusterVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClusterVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4104,7 +4116,7 @@ func (c *Redshift) DescribeClustersWithContext(ctx aws.Context, input *DescribeC // // Example iterating over at most 3 pages of a DescribeClusters operation. // pageNum := 0 // err := client.DescribeClustersPages(params, -// func(page *DescribeClustersOutput, lastPage bool) bool { +// func(page *redshift.DescribeClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4136,10 +4148,12 @@ func (c *Redshift) DescribeClustersPagesWithContext(ctx aws.Context, input *Desc }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeClustersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeClustersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4238,7 +4252,7 @@ func (c *Redshift) DescribeDefaultClusterParametersWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeDefaultClusterParameters operation. // pageNum := 0 // err := client.DescribeDefaultClusterParametersPages(params, -// func(page *DescribeDefaultClusterParametersOutput, lastPage bool) bool { +// func(page *redshift.DescribeDefaultClusterParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4270,10 +4284,12 @@ func (c *Redshift) DescribeDefaultClusterParametersPagesWithContext(ctx aws.Cont }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeDefaultClusterParametersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeDefaultClusterParametersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4465,7 +4481,7 @@ func (c *Redshift) DescribeEventSubscriptionsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeEventSubscriptions operation. // pageNum := 0 // err := client.DescribeEventSubscriptionsPages(params, -// func(page *DescribeEventSubscriptionsOutput, lastPage bool) bool { +// func(page *redshift.DescribeEventSubscriptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4497,10 +4513,12 @@ func (c *Redshift) DescribeEventSubscriptionsPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventSubscriptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventSubscriptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4598,7 +4616,7 @@ func (c *Redshift) DescribeEventsWithContext(ctx aws.Context, input *DescribeEve // // Example iterating over at most 3 pages of a DescribeEvents operation. // pageNum := 0 // err := client.DescribeEventsPages(params, -// func(page *DescribeEventsOutput, lastPage bool) bool { +// func(page *redshift.DescribeEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4630,10 +4648,12 @@ func (c *Redshift) DescribeEventsPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4748,7 +4768,7 @@ func (c *Redshift) DescribeHsmClientCertificatesWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a DescribeHsmClientCertificates operation. // pageNum := 0 // err := client.DescribeHsmClientCertificatesPages(params, -// func(page *DescribeHsmClientCertificatesOutput, lastPage bool) bool { +// func(page *redshift.DescribeHsmClientCertificatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4780,10 +4800,12 @@ func (c *Redshift) DescribeHsmClientCertificatesPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeHsmClientCertificatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeHsmClientCertificatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4898,7 +4920,7 @@ func (c *Redshift) DescribeHsmConfigurationsWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a DescribeHsmConfigurations operation. // pageNum := 0 // err := client.DescribeHsmConfigurationsPages(params, -// func(page *DescribeHsmConfigurationsOutput, lastPage bool) bool { +// func(page *redshift.DescribeHsmConfigurationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4930,10 +4952,12 @@ func (c *Redshift) DescribeHsmConfigurationsPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeHsmConfigurationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeHsmConfigurationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5017,6 +5041,148 @@ func (c *Redshift) DescribeLoggingStatusWithContext(ctx aws.Context, input *Desc return out, req.Send() } +const opDescribeNodeConfigurationOptions = "DescribeNodeConfigurationOptions" + +// DescribeNodeConfigurationOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNodeConfigurationOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeNodeConfigurationOptions for more information on using the DescribeNodeConfigurationOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeNodeConfigurationOptionsRequest method. +// req, resp := client.DescribeNodeConfigurationOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeNodeConfigurationOptions +func (c *Redshift) DescribeNodeConfigurationOptionsRequest(input *DescribeNodeConfigurationOptionsInput) (req *request.Request, output *DescribeNodeConfigurationOptionsOutput) { + op := &request.Operation{ + Name: opDescribeNodeConfigurationOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeNodeConfigurationOptionsInput{} + } + + output = &DescribeNodeConfigurationOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNodeConfigurationOptions API operation for Amazon Redshift. +// +// Returns properties of possible node configurations such as node type, number +// of nodes, and disk usage for the specified action type. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Redshift's +// API operation DescribeNodeConfigurationOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterSnapshotNotFoundFault "ClusterSnapshotNotFound" +// The snapshot identifier does not refer to an existing cluster snapshot. +// +// * ErrCodeInvalidClusterSnapshotStateFault "InvalidClusterSnapshotState" +// The specified cluster snapshot is not in the available state, or other accounts +// are authorized to access the snapshot. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeNodeConfigurationOptions +func (c *Redshift) DescribeNodeConfigurationOptions(input *DescribeNodeConfigurationOptionsInput) (*DescribeNodeConfigurationOptionsOutput, error) { + req, out := c.DescribeNodeConfigurationOptionsRequest(input) + return out, req.Send() +} + +// DescribeNodeConfigurationOptionsWithContext is the same as DescribeNodeConfigurationOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNodeConfigurationOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Redshift) DescribeNodeConfigurationOptionsWithContext(ctx aws.Context, input *DescribeNodeConfigurationOptionsInput, opts ...request.Option) (*DescribeNodeConfigurationOptionsOutput, error) { + req, out := c.DescribeNodeConfigurationOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeNodeConfigurationOptionsPages iterates over the pages of a DescribeNodeConfigurationOptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeNodeConfigurationOptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeNodeConfigurationOptions operation. +// pageNum := 0 +// err := client.DescribeNodeConfigurationOptionsPages(params, +// func(page *redshift.DescribeNodeConfigurationOptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeNodeConfigurationOptionsPages(input *DescribeNodeConfigurationOptionsInput, fn func(*DescribeNodeConfigurationOptionsOutput, bool) bool) error { + return c.DescribeNodeConfigurationOptionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeNodeConfigurationOptionsPagesWithContext same as DescribeNodeConfigurationOptionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Redshift) DescribeNodeConfigurationOptionsPagesWithContext(ctx aws.Context, input *DescribeNodeConfigurationOptionsInput, fn func(*DescribeNodeConfigurationOptionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeNodeConfigurationOptionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNodeConfigurationOptionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeNodeConfigurationOptionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeOrderableClusterOptions = "DescribeOrderableClusterOptions" // DescribeOrderableClusterOptionsRequest generates a "aws/request.Request" representing the @@ -5116,7 +5282,7 @@ func (c *Redshift) DescribeOrderableClusterOptionsWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a DescribeOrderableClusterOptions operation. // pageNum := 0 // err := client.DescribeOrderableClusterOptionsPages(params, -// func(page *DescribeOrderableClusterOptionsOutput, lastPage bool) bool { +// func(page *redshift.DescribeOrderableClusterOptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5148,10 +5314,12 @@ func (c *Redshift) DescribeOrderableClusterOptionsPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeOrderableClusterOptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeOrderableClusterOptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5267,7 +5435,7 @@ func (c *Redshift) DescribeReservedNodeOfferingsWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a DescribeReservedNodeOfferings operation. // pageNum := 0 // err := client.DescribeReservedNodeOfferingsPages(params, -// func(page *DescribeReservedNodeOfferingsOutput, lastPage bool) bool { +// func(page *redshift.DescribeReservedNodeOfferingsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5299,10 +5467,12 @@ func (c *Redshift) DescribeReservedNodeOfferingsPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedNodeOfferingsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedNodeOfferingsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5406,7 +5576,7 @@ func (c *Redshift) DescribeReservedNodesWithContext(ctx aws.Context, input *Desc // // Example iterating over at most 3 pages of a DescribeReservedNodes operation. // pageNum := 0 // err := client.DescribeReservedNodesPages(params, -// func(page *DescribeReservedNodesOutput, lastPage bool) bool { +// func(page *redshift.DescribeReservedNodesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5438,10 +5608,12 @@ func (c *Redshift) DescribeReservedNodesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeReservedNodesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeReservedNodesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5739,8 +5911,7 @@ func (c *Redshift) DescribeStorageRequest(input *DescribeStorageInput) (req *req // DescribeStorage API operation for Amazon Redshift. // -// Returns the total amount of snapshot usage and provisioned storage for a -// user in megabytes. +// Returns the total amount of snapshot usage and provisioned storage in megabytes. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7930,15 +8101,8 @@ func (c *Redshift) ResizeClusterRequest(input *ResizeClusterInput) (req *request // // Elastic resize operations have the following restrictions: // -// * You can only resize clusters of the following types: -// -// dc2.large -// -// dc2.8xlarge -// -// ds2.xlarge -// -// ds2.8xlarge +// * You can only resize clusters of the following types: dc2.large dc2.8xlarge +// ds2.xlarge ds2.8xlarge // // * The type of nodes that you add must match the node type for the cluster. // @@ -8151,6 +8315,12 @@ func (c *Redshift) RestoreFromClusterSnapshotRequest(input *RestoreFromClusterSn // * ErrCodeSnapshotScheduleNotFoundFault "SnapshotScheduleNotFound" // We could not find the specified snapshot schedule. // +// * ErrCodeTagLimitExceededFault "TagLimitExceededFault" +// You have exceeded the number of tags allowed. +// +// * ErrCodeInvalidTagFault "InvalidTagFault" +// The tag is invalid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/RestoreFromClusterSnapshot func (c *Redshift) RestoreFromClusterSnapshot(input *RestoreFromClusterSnapshotInput) (*RestoreFromClusterSnapshotOutput, error) { req, out := c.RestoreFromClusterSnapshotRequest(input) @@ -9353,6 +9523,22 @@ type Cluster struct { // The name of the Availability Zone in which the cluster is located. AvailabilityZone *string `type:"string"` + // The availability status of the cluster for queries. Possible values are the + // following: + // + // * Available - The cluster is available for queries. + // + // * Unavailable - The cluster is not available for queries. + // + // * Maintenance - The cluster is intermittently available for queries due + // to maintenance activities. + // + // * Modifying - The cluster is intermittently available for queries due + // to changes that modify the cluster. + // + // * Failed - The cluster failed and is not available for queries. + ClusterAvailabilityStatus *string `type:"string"` + // The date and time that the cluster was created. ClusterCreateTime *time.Time `type:"timestamp"` @@ -9470,6 +9656,18 @@ type Cluster struct { // Default: false EnhancedVpcRouting *bool `type:"boolean"` + // The date and time when the next snapshot is expected to be taken for clusters + // with a valid snapshot schedule and backups enabled. + ExpectedNextSnapshotScheduleTime *time.Time `type:"timestamp"` + + // The status of next expected snapshot for clusters having a valid snapshot + // schedule and backups enabled. Possible values are the following: + // + // * OnTrack - The next snapshot is expected to be taken on time. + // + // * Pending - The next snapshot is pending to be taken. + ExpectedNextSnapshotScheduleTimeStatus *string `type:"string"` + // A value that reports whether the Amazon Redshift cluster has finished applying // any hardware security module (HSM) settings changes specified in a modify // cluster command. @@ -9502,6 +9700,9 @@ type Cluster struct { // The status of a modify operation, if any, initiated for the cluster. ModifyStatus *string `type:"string"` + // The date and time in UTC when system maintenance can begin. + NextMaintenanceWindowStartTime *time.Time `type:"timestamp"` + // The node type for the nodes in the cluster. NodeType *string `type:"string"` @@ -9581,6 +9782,12 @@ func (s *Cluster) SetAvailabilityZone(v string) *Cluster { return s } +// SetClusterAvailabilityStatus sets the ClusterAvailabilityStatus field's value. +func (s *Cluster) SetClusterAvailabilityStatus(v string) *Cluster { + s.ClusterAvailabilityStatus = &v + return s +} + // SetClusterCreateTime sets the ClusterCreateTime field's value. func (s *Cluster) SetClusterCreateTime(v time.Time) *Cluster { s.ClusterCreateTime = &v @@ -9695,6 +9902,18 @@ func (s *Cluster) SetEnhancedVpcRouting(v bool) *Cluster { return s } +// SetExpectedNextSnapshotScheduleTime sets the ExpectedNextSnapshotScheduleTime field's value. +func (s *Cluster) SetExpectedNextSnapshotScheduleTime(v time.Time) *Cluster { + s.ExpectedNextSnapshotScheduleTime = &v + return s +} + +// SetExpectedNextSnapshotScheduleTimeStatus sets the ExpectedNextSnapshotScheduleTimeStatus field's value. +func (s *Cluster) SetExpectedNextSnapshotScheduleTimeStatus(v string) *Cluster { + s.ExpectedNextSnapshotScheduleTimeStatus = &v + return s +} + // SetHsmStatus sets the HsmStatus field's value. func (s *Cluster) SetHsmStatus(v *HsmStatus) *Cluster { s.HsmStatus = v @@ -9737,6 +9956,12 @@ func (s *Cluster) SetModifyStatus(v string) *Cluster { return s } +// SetNextMaintenanceWindowStartTime sets the NextMaintenanceWindowStartTime field's value. +func (s *Cluster) SetNextMaintenanceWindowStartTime(v time.Time) *Cluster { + s.NextMaintenanceWindowStartTime = &v + return s +} + // SetNodeType sets the NodeType field's value. func (s *Cluster) SetNodeType(v string) *Cluster { s.NodeType = &v @@ -10786,7 +11011,7 @@ type CreateClusterInput struct { MasterUsername *string `type:"string" required:"true"` // The node type to be provisioned for the cluster. For information about node - // types, go to Working with Clusters (https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) + // types, go to Working with Clusters (https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) // in the Amazon Redshift Cluster Management Guide. // // Valid Values: ds2.xlarge | ds2.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large @@ -10798,7 +11023,7 @@ type CreateClusterInput struct { // The number of compute nodes in the cluster. This parameter is required when // the ClusterType parameter is specified as multi-node. // - // For information about determining how many nodes you need, go to Working + // For information about determining how many nodes you need, go to Working // with Clusters (https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) // in the Amazon Redshift Cluster Management Guide. // @@ -14989,6 +15214,139 @@ func (s *DescribeLoggingStatusInput) SetClusterIdentifier(v string) *DescribeLog return s } +type DescribeNodeConfigurationOptionsInput struct { + _ struct{} `type:"structure"` + + // The action type to evaluate for possible node configurations. Currently, + // it must be "restore-cluster". + // + // ActionType is a required field + ActionType *string `type:"string" required:"true" enum:"ActionType"` + + // A set of name, operator, and value items to filter the results. + Filters []*NodeConfigurationOptionsFilter `locationName:"Filter" locationNameList:"NodeConfigurationOptionsFilter" type:"list"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeNodeConfigurationOptions + // request exceed the value specified in MaxRecords, AWS returns a value in + // the Marker field of the response. You can retrieve the next set of response + // records by providing the returned marker value in the Marker parameter and + // retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 500 + // + // Constraints: minimum 100, maximum 500. + MaxRecords *int64 `type:"integer"` + + // The AWS customer account used to create or copy the snapshot. Required if + // you are restoring a snapshot you do not own, optional if you own the snapshot. + OwnerAccount *string `type:"string"` + + // The identifier of the snapshot to evaluate for possible node configurations. + SnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNodeConfigurationOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNodeConfigurationOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNodeConfigurationOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNodeConfigurationOptionsInput"} + if s.ActionType == nil { + invalidParams.Add(request.NewErrParamRequired("ActionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionType sets the ActionType field's value. +func (s *DescribeNodeConfigurationOptionsInput) SetActionType(v string) *DescribeNodeConfigurationOptionsInput { + s.ActionType = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeNodeConfigurationOptionsInput) SetFilters(v []*NodeConfigurationOptionsFilter) *DescribeNodeConfigurationOptionsInput { + s.Filters = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeNodeConfigurationOptionsInput) SetMarker(v string) *DescribeNodeConfigurationOptionsInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeNodeConfigurationOptionsInput) SetMaxRecords(v int64) *DescribeNodeConfigurationOptionsInput { + s.MaxRecords = &v + return s +} + +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *DescribeNodeConfigurationOptionsInput) SetOwnerAccount(v string) *DescribeNodeConfigurationOptionsInput { + s.OwnerAccount = &v + return s +} + +// SetSnapshotIdentifier sets the SnapshotIdentifier field's value. +func (s *DescribeNodeConfigurationOptionsInput) SetSnapshotIdentifier(v string) *DescribeNodeConfigurationOptionsInput { + s.SnapshotIdentifier = &v + return s +} + +type DescribeNodeConfigurationOptionsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of valid node configurations. + NodeConfigurationOptionList []*NodeConfigurationOption `locationNameList:"NodeConfigurationOption" type:"list"` +} + +// String returns the string representation +func (s DescribeNodeConfigurationOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNodeConfigurationOptionsOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeNodeConfigurationOptionsOutput) SetMarker(v string) *DescribeNodeConfigurationOptionsOutput { + s.Marker = &v + return s +} + +// SetNodeConfigurationOptionList sets the NodeConfigurationOptionList field's value. +func (s *DescribeNodeConfigurationOptionsOutput) SetNodeConfigurationOptionList(v []*NodeConfigurationOption) *DescribeNodeConfigurationOptionsOutput { + s.NodeConfigurationOptionList = v + return s +} + type DescribeOrderableClusterOptionsInput struct { _ struct{} `type:"structure"` @@ -16239,17 +16597,7 @@ type EnableLoggingInput struct { // // * Cannot contain spaces( ), double quotes ("), single quotes ('), a backslash // (\), or control characters. The hexadecimal codes for invalid characters - // are: - // - // x00 to x20 - // - // x22 - // - // x27 - // - // x5c - // - // x7f or larger + // are: x00 to x20 x22 x27 x5c x7f or larger S3KeyPrefix *string `type:"string"` } @@ -17581,7 +17929,7 @@ type ModifyClusterInput struct { // cluster is deleted and your connection is switched to the new cluster. You // can use DescribeResize to track the progress of the resize request. // - // Valid Values: multi-node | single-node + // Valid Values: multi-node | single-node ClusterType *string `type:"string"` // The new version number of the Amazon Redshift engine to upgrade to. @@ -18717,6 +19065,96 @@ func (s *ModifySnapshotScheduleOutput) SetTags(v []*Tag) *ModifySnapshotSchedule return s } +// A list of node configurations. +type NodeConfigurationOption struct { + _ struct{} `type:"structure"` + + // The estimated disk utilizaton percentage. + EstimatedDiskUtilizationPercent *float64 `type:"double"` + + // The node type, such as, "ds2.8xlarge". + NodeType *string `type:"string"` + + // The number of nodes. + NumberOfNodes *int64 `type:"integer"` +} + +// String returns the string representation +func (s NodeConfigurationOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeConfigurationOption) GoString() string { + return s.String() +} + +// SetEstimatedDiskUtilizationPercent sets the EstimatedDiskUtilizationPercent field's value. +func (s *NodeConfigurationOption) SetEstimatedDiskUtilizationPercent(v float64) *NodeConfigurationOption { + s.EstimatedDiskUtilizationPercent = &v + return s +} + +// SetNodeType sets the NodeType field's value. +func (s *NodeConfigurationOption) SetNodeType(v string) *NodeConfigurationOption { + s.NodeType = &v + return s +} + +// SetNumberOfNodes sets the NumberOfNodes field's value. +func (s *NodeConfigurationOption) SetNumberOfNodes(v int64) *NodeConfigurationOption { + s.NumberOfNodes = &v + return s +} + +// A set of elements to filter the returned node configurations. +type NodeConfigurationOptionsFilter struct { + _ struct{} `type:"structure"` + + // The name of the element to filter. + Name *string `type:"string" enum:"NodeConfigurationOptionsFilterName"` + + // The filter operator. If filter Name is NodeType only the 'in' operator is + // supported. Provide one value to evaluate for 'eq', 'lt', 'le', 'gt', and + // 'ge'. Provide two values to evaluate for 'between'. Provide a list of values + // for 'in'. + Operator *string `type:"string" enum:"OperatorType"` + + // List of values. Compare Name using Operator to Values. If filter Name is + // NumberOfNodes, then values can range from 0 to 200. If filter Name is EstimatedDiskUtilizationPercent, + // then values can range from 0 to 100. For example, filter NumberOfNodes (name) + // GT (operator) 3 (values). + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s NodeConfigurationOptionsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeConfigurationOptionsFilter) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *NodeConfigurationOptionsFilter) SetName(v string) *NodeConfigurationOptionsFilter { + s.Name = &v + return s +} + +// SetOperator sets the Operator field's value. +func (s *NodeConfigurationOptionsFilter) SetOperator(v string) *NodeConfigurationOptionsFilter { + s.Operator = &v + return s +} + +// SetValues sets the Values field's value. +func (s *NodeConfigurationOptionsFilter) SetValues(v []*string) *NodeConfigurationOptionsFilter { + s.Values = v + return s +} + // Describes an orderable cluster option. type OrderableClusterOption struct { _ struct{} `type:"structure"` @@ -19491,7 +19929,8 @@ type ResizeClusterInput struct { // The new cluster type for the specified cluster. ClusterType *string `type:"string"` - // The new node type for the nodes you are adding. + // The new node type for the nodes you are adding. If not specified, the cluster's + // current node type is used. NodeType *string `type:"string"` // The new number of nodes for the cluster. @@ -19747,10 +20186,13 @@ type RestoreFromClusterSnapshotInput struct { // type into another dc1.large instance type or dc2.large instance type. You // can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlareg // cluster, then resize to a dc2.8large cluster. For more information about - // node types, see About Clusters and Nodes (https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-about-clusters-and-nodes) + // node types, see About Clusters and Nodes (https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-about-clusters-and-nodes) // in the Amazon Redshift Cluster Management Guide. NodeType *string `type:"string"` + // The number of nodes specified when provisioning the restored cluster. + NumberOfNodes *int64 `type:"integer"` + // The AWS customer account used to create or copy the snapshot. Required if // you are restoring a snapshot you do not own, optional if you own the snapshot. OwnerAccount *string `type:"string"` @@ -19933,6 +20375,12 @@ func (s *RestoreFromClusterSnapshotInput) SetNodeType(v string) *RestoreFromClus return s } +// SetNumberOfNodes sets the NumberOfNodes field's value. +func (s *RestoreFromClusterSnapshotInput) SetNumberOfNodes(v int64) *RestoreFromClusterSnapshotInput { + s.NumberOfNodes = &v + return s +} + // SetOwnerAccount sets the OwnerAccount field's value. func (s *RestoreFromClusterSnapshotInput) SetOwnerAccount(v string) *RestoreFromClusterSnapshotInput { s.OwnerAccount = &v @@ -20635,7 +21083,6 @@ type Snapshot struct { // // * CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating". // - // // * DescribeClusterSnapshots returns status as "creating", "available", // "final snapshot", or "failed". // @@ -21498,6 +21945,45 @@ func (s *VpcSecurityGroupMembership) SetVpcSecurityGroupId(v string) *VpcSecurit return s } +const ( + // ActionTypeRestoreCluster is a ActionType enum value + ActionTypeRestoreCluster = "restore-cluster" +) + +const ( + // NodeConfigurationOptionsFilterNameNodeType is a NodeConfigurationOptionsFilterName enum value + NodeConfigurationOptionsFilterNameNodeType = "NodeType" + + // NodeConfigurationOptionsFilterNameNumberOfNodes is a NodeConfigurationOptionsFilterName enum value + NodeConfigurationOptionsFilterNameNumberOfNodes = "NumberOfNodes" + + // NodeConfigurationOptionsFilterNameEstimatedDiskUtilizationPercent is a NodeConfigurationOptionsFilterName enum value + NodeConfigurationOptionsFilterNameEstimatedDiskUtilizationPercent = "EstimatedDiskUtilizationPercent" +) + +const ( + // OperatorTypeEq is a OperatorType enum value + OperatorTypeEq = "eq" + + // OperatorTypeLt is a OperatorType enum value + OperatorTypeLt = "lt" + + // OperatorTypeGt is a OperatorType enum value + OperatorTypeGt = "gt" + + // OperatorTypeLe is a OperatorType enum value + OperatorTypeLe = "le" + + // OperatorTypeGe is a OperatorType enum value + OperatorTypeGe = "ge" + + // OperatorTypeIn is a OperatorType enum value + OperatorTypeIn = "in" + + // OperatorTypeBetween is a OperatorType enum value + OperatorTypeBetween = "between" +) + const ( // ParameterApplyTypeStatic is a ParameterApplyType enum value ParameterApplyTypeStatic = "static" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go index a750d141c62..fa2fea7a89f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go @@ -46,11 +46,11 @@ const ( // svc := redshift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Redshift { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Redshift { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Redshift { svc := &Redshift{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-12-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/api.go index 99f023eb561..84d3fee94ac 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/api.go @@ -601,7 +601,7 @@ func (c *ResourceGroups) ListGroupResourcesWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListGroupResources operation. // pageNum := 0 // err := client.ListGroupResourcesPages(params, -// func(page *ListGroupResourcesOutput, lastPage bool) bool { +// func(page *resourcegroups.ListGroupResourcesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -633,10 +633,12 @@ func (c *ResourceGroups) ListGroupResourcesPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListGroupResourcesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListGroupResourcesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -749,7 +751,7 @@ func (c *ResourceGroups) ListGroupsWithContext(ctx aws.Context, input *ListGroup // // Example iterating over at most 3 pages of a ListGroups operation. // pageNum := 0 // err := client.ListGroupsPages(params, -// func(page *ListGroupsOutput, lastPage bool) bool { +// func(page *resourcegroups.ListGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -781,10 +783,12 @@ func (c *ResourceGroups) ListGroupsPagesWithContext(ctx aws.Context, input *List }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -903,7 +907,7 @@ func (c *ResourceGroups) SearchResourcesWithContext(ctx aws.Context, input *Sear // // Example iterating over at most 3 pages of a SearchResources operation. // pageNum := 0 // err := client.SearchResourcesPages(params, -// func(page *SearchResourcesOutput, lastPage bool) bool { +// func(page *resourcegroups.SearchResourcesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -935,10 +939,12 @@ func (c *ResourceGroups) SearchResourcesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*SearchResourcesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*SearchResourcesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2290,14 +2296,14 @@ type ResourceQuery struct { // The type of the query. The valid values in this release are TAG_FILTERS_1_0 // and CLOUDFORMATION_STACK_1_0. // - // TAG_FILTERS_1_0: A JSON syntax that lets you specify a collection of simple - // tag filters for resource types and tags, as supported by the AWS Tagging - // API GetResources (https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html) - // operation. If you specify more than one tag key, only resources that match - // all tag keys, and at least one value of each specified tag key, are returned - // in your query. If you specify more than one value for a tag key, a resource - // matches the filter if it has a tag key value that matches any of the specified - // values. + // TAG_FILTERS_1_0: A JSON syntax that lets you specify a collection of simple + // tag filters for resource types and tags, as supported by the AWS Tagging + // API GetResources (https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html) + // operation. If you specify more than one tag key, only resources that match + // all tag keys, and at least one value of each specified tag key, are returned + // in your query. If you specify more than one value for a tag key, a resource + // matches the filter if it has a tag key value that matches any of the specified + // values. // // For example, consider the following sample query for resources that have // two tags, Stage and Version, with two values each. ([{"Key":"Stage","Values":["Test","Deploy"]},{"Key":"Version","Values":["1","2"]}]) @@ -2319,8 +2325,8 @@ type ResourceQuery struct { // * An RDS database that has the following two tags: {"Key":"Stage","Value":"Archived"}, // and {"Key":"Version","Value":"4"} // - // CLOUDFORMATION_STACK_1_0: A JSON syntax that lets you specify a CloudFormation - // stack ARN. + // CLOUDFORMATION_STACK_1_0: A JSON syntax that lets you specify a CloudFormation + // stack ARN. // // Type is a required field Type *string `min:"1" type:"string" required:"true" enum:"QueryType"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go index 46a19ff2316..82f50ca25d7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ResourceGroups { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "resource-groups" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ResourceGroups { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ResourceGroups { svc := &ResourceGroups{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-27", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/api.go index 842c1c3c978..c17f25d357d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -624,11 +624,10 @@ func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *re // SOA record and four NS records for the zone. For more information about // SOA and NS records, see NS and SOA Records that Route 53 Creates for a // Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) -// in the Amazon Route 53 Developer Guide. -// -// If you want to use the same name servers for multiple public hosted zones, -// you can optionally associate a reusable delegation set with the hosted -// zone. See the DelegationSetId element. +// in the Amazon Route 53 Developer Guide. If you want to use the same name +// servers for multiple public hosted zones, you can optionally associate +// a reusable delegation set with the hosted zone. See the DelegationSetId +// element. // // * If your domain is registered with a registrar other than Route 53, you // must update the name servers with your registrar to make Route 53 the @@ -794,8 +793,9 @@ func (c *Route53) CreateQueryLoggingConfigRequest(input *CreateQueryLoggingConfi // // * DNS response code, such as NoError or ServFail // -// Log Group and Resource PolicyBefore you create a query logging configuration, -// perform the following operations. +// Log Group and Resource Policy +// +// Before you create a query logging configuration, perform the following operations. // // If you create a query logging configuration using the Route 53 console, Route // 53 performs these operations automatically. @@ -803,22 +803,19 @@ func (c *Route53) CreateQueryLoggingConfigRequest(input *CreateQueryLoggingConfi // Create a CloudWatch Logs log group, and make note of the ARN, which you specify // when you create a query logging configuration. Note the following: // -// You must create the log group in the us-east-1 region. -// -// You must use the same AWS account to create the log group and the hosted -// zone that you want to configure query logging for. +// * You must create the log group in the us-east-1 region. // -// When you create log groups for query logging, we recommend that you use a -// consistent prefix, for example: +// * You must use the same AWS account to create the log group and the hosted +// zone that you want to configure query logging for. // -// /aws/route53/hosted zone name -// -// In the next step, you'll create a resource policy, which controls access -// to one or more log groups and the associated AWS resources, such as Route -// 53 hosted zones. There's a limit on the number of resource policies that -// you can create, so we recommend that you use a consistent prefix so you can -// use the same resource policy for all the log groups that you create for query -// logging. +// * When you create log groups for query logging, we recommend that you +// use a consistent prefix, for example: /aws/route53/hosted zone name In +// the next step, you'll create a resource policy, which controls access +// to one or more log groups and the associated AWS resources, such as Route +// 53 hosted zones. There's a limit on the number of resource policies that +// you can create, so we recommend that you use a consistent prefix so you +// can use the same resource policy for all the log groups that you create +// for query logging. // // Create a CloudWatch Logs resource policy, and give it the permissions that // Route 53 needs to create log streams and to send query logs to log streams. @@ -832,14 +829,17 @@ func (c *Route53) CreateQueryLoggingConfigRequest(input *CreateQueryLoggingConfi // You can't use the CloudWatch console to create or edit a resource policy. // You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI. // -// Log Streams and Edge LocationsWhen Route 53 finishes creating the configuration -// for DNS query logging, it does the following: +// Log Streams and Edge Locations // -// Creates a log stream for an edge location the first time that the edge location -// responds to DNS queries for the specified hosted zone. That log stream is -// used to log all queries that Route 53 responds to for that edge location. +// When Route 53 finishes creating the configuration for DNS query logging, +// it does the following: // -// Begins to send query logs to the applicable log stream. +// * Creates a log stream for an edge location the first time that the edge +// location responds to DNS queries for the specified hosted zone. That log +// stream is used to log all queries that Route 53 responds to for that edge +// location. +// +// * Begins to send query logs to the applicable log stream. // // The name of each log stream is in the following format: // @@ -852,28 +852,35 @@ func (c *Route53) CreateQueryLoggingConfigRequest(input *CreateQueryLoggingConfi // a list of edge locations, see "The Route 53 Global Network" on the Route // 53 Product Details (http://aws.amazon.com/route53/details/) page. // -// Queries That Are LoggedQuery logs contain only the queries that DNS resolvers -// forward to Route 53. If a DNS resolver has already cached the response to -// a query (such as the IP address for a load balancer for example.com), the -// resolver will continue to return the cached response. It doesn't forward -// another query to Route 53 until the TTL for the corresponding resource record -// set expires. Depending on how many DNS queries are submitted for a resource -// record set, and depending on the TTL for that resource record set, query -// logs might contain information about only one query out of every several -// thousand queries that are submitted to DNS. For more information about how -// DNS works, see Routing Internet Traffic to Your Website or Web Application -// (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) +// Queries That Are Logged +// +// Query logs contain only the queries that DNS resolvers forward to Route 53. +// If a DNS resolver has already cached the response to a query (such as the +// IP address for a load balancer for example.com), the resolver will continue +// to return the cached response. It doesn't forward another query to Route +// 53 until the TTL for the corresponding resource record set expires. Depending +// on how many DNS queries are submitted for a resource record set, and depending +// on the TTL for that resource record set, query logs might contain information +// about only one query out of every several thousand queries that are submitted +// to DNS. For more information about how DNS works, see Routing Internet Traffic +// to Your Website or Web Application (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) // in the Amazon Route 53 Developer Guide. // -// Log File FormatFor a list of the values in each query log and the format -// of each value, see Logging DNS Queries (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) +// Log File Format +// +// For a list of the values in each query log and the format of each value, +// see Logging DNS Queries (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) // in the Amazon Route 53 Developer Guide. // -// PricingFor information about charges for query logs, see Amazon CloudWatch -// Pricing (http://aws.amazon.com/cloudwatch/pricing/). +// Pricing // -// How to Stop LoggingIf you want Route 53 to stop sending query logs to CloudWatch -// Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig +// For information about charges for query logs, see Amazon CloudWatch Pricing +// (http://aws.amazon.com/cloudwatch/pricing/). +// +// How to Stop Logging +// +// If you want Route 53 to stop sending query logs to CloudWatch Logs, delete +// the query logging configuration. For more information, see DeleteQueryLoggingConfig // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2544,6 +2551,7 @@ func (c *Route53) GetCheckerIpRangesRequest(input *GetCheckerIpRangesInput) (req // GetCheckerIpRanges API operation for Amazon Route 53. // +// // GetCheckerIpRanges still works, but we recommend that you download ip-ranges.json, // which includes IP address ranges for all AWS services. For more information, // see IP Address Ranges of Amazon Route 53 Servers (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/route-53-ip-addresses.html) @@ -3943,7 +3951,7 @@ func (c *Route53) ListHealthChecksWithContext(ctx aws.Context, input *ListHealth // // Example iterating over at most 3 pages of a ListHealthChecks operation. // pageNum := 0 // err := client.ListHealthChecksPages(params, -// func(page *ListHealthChecksOutput, lastPage bool) bool { +// func(page *route53.ListHealthChecksOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3975,10 +3983,12 @@ func (c *Route53) ListHealthChecksPagesWithContext(ctx aws.Context, input *ListH }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListHealthChecksOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListHealthChecksOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4090,7 +4100,7 @@ func (c *Route53) ListHostedZonesWithContext(ctx aws.Context, input *ListHostedZ // // Example iterating over at most 3 pages of a ListHostedZones operation. // pageNum := 0 // err := client.ListHostedZonesPages(params, -// func(page *ListHostedZonesOutput, lastPage bool) bool { +// func(page *route53.ListHostedZonesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4122,10 +4132,12 @@ func (c *Route53) ListHostedZonesPagesWithContext(ctx aws.Context, input *ListHo }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListHostedZonesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListHostedZonesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4211,10 +4223,9 @@ func (c *Route53) ListHostedZonesByNameRequest(input *ListHostedZonesByNameInput // the current response. // // * If the value of IsTruncated in the response is true, there are more -// hosted zones associated with the current AWS account. -// -// If IsTruncated is false, this response includes the last hosted zone that -// is associated with the current account. The NextDNSName element and NextHostedZoneId +// hosted zones associated with the current AWS account. If IsTruncated is +// false, this response includes the last hosted zone that is associated +// with the current account. The NextDNSName element and NextHostedZoneId // elements are omitted from the response. // // * The NextDNSName and NextHostedZoneId elements in the response contain @@ -4427,18 +4438,25 @@ func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInp // You can use the name and type elements to specify the resource record set // that the list begins with: // -// If you do not specify Name or TypeThe results begin with the first resource -// record set that the hosted zone contains. +// If you do not specify Name or Type // -// If you specify Name but not TypeThe results begin with the first resource -// record set in the list whose name is greater than or equal to Name. +// The results begin with the first resource record set that the hosted zone +// contains. // -// If you specify Type but not NameAmazon Route 53 returns the InvalidInput -// error. +// If you specify Name but not Type +// +// The results begin with the first resource record set in the list whose name +// is greater than or equal to Name. +// +// If you specify Type but not Name +// +// Amazon Route 53 returns the InvalidInput error. // -// If you specify both Name and TypeThe results begin with the first resource -// record set in the list whose name is greater than or equal to Name, and whose -// type is greater than or equal to Type. +// If you specify both Name and Type +// +// The results begin with the first resource record set in the list whose name +// is greater than or equal to Name, and whose type is greater than or equal +// to Type. // // Resource record sets that are PENDING // @@ -4509,7 +4527,7 @@ func (c *Route53) ListResourceRecordSetsWithContext(ctx aws.Context, input *List // // Example iterating over at most 3 pages of a ListResourceRecordSets operation. // pageNum := 0 // err := client.ListResourceRecordSetsPages(params, -// func(page *ListResourceRecordSetsOutput, lastPage bool) bool { +// func(page *route53.ListResourceRecordSetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4541,10 +4559,12 @@ func (c *Route53) ListResourceRecordSetsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListResourceRecordSetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListResourceRecordSetsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5887,12 +5907,12 @@ type AlarmIdentifier struct { // // Route 53 supports CloudWatch alarms with the following features: // - // Standard-resolution metrics. High-resolution metrics aren't supported. For - // more information, see High-Resolution Metrics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/publishingMetrics.html#high-resolution-metrics) - // in the Amazon CloudWatch User Guide. + // * Standard-resolution metrics. High-resolution metrics aren't supported. + // For more information, see High-Resolution Metrics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/publishingMetrics.html#high-resolution-metrics) + // in the Amazon CloudWatch User Guide. // - // Statistics: Average, Minimum, Maximum, Sum, and SampleCount. Extended statistics - // aren't supported. + // * Statistics: Average, Minimum, Maximum, Sum, and SampleCount. Extended + // statistics aren't supported. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -5970,26 +5990,29 @@ type AliasTarget struct { // Alias resource record sets only: The value that you specify depends on where // you want to route queries: // - // Amazon API Gateway custom regional APIs and edge-optimized APIsSpecify the - // applicable domain name for your API. You can get the applicable value using - // the AWS CLI command get-domain-names (https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-domain-names.html): + // Amazon API Gateway custom regional APIs and edge-optimized APIs // - // For regional APIs, specify the value of regionalDomainName. + // Specify the applicable domain name for your API. You can get the applicable + // value using the AWS CLI command get-domain-names (https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-domain-names.html): // - // For edge-optimized APIs, specify the value of distributionDomainName. This - // is the name of the associated CloudFront distribution, such as da1b2c3d4e5.cloudfront.net. + // * For regional APIs, specify the value of regionalDomainName. + // + // * For edge-optimized APIs, specify the value of distributionDomainName. + // This is the name of the associated CloudFront distribution, such as da1b2c3d4e5.cloudfront.net. // // The name of the record that you're creating must match a custom domain name // for your API, such as api.example.com. // - // Amazon Virtual Private Cloud interface VPC endpointEnter the API endpoint - // for the interface endpoint, such as vpce-123456789abcdef01-example-us-east-1a.elasticloadbalancing.us-east-1.vpce.amazonaws.com. + // Amazon Virtual Private Cloud interface VPC endpoint + // + // Enter the API endpoint for the interface endpoint, such as vpce-123456789abcdef01-example-us-east-1a.elasticloadbalancing.us-east-1.vpce.amazonaws.com. // For edge-optimized APIs, this is the domain name for the corresponding CloudFront // distribution. You can get the value of DnsName using the AWS CLI command // describe-vpc-endpoints (https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html). // - // CloudFront distributionSpecify the domain name that CloudFront assigned when - // you created your distribution. + // CloudFront distribution + // + // Specify the domain name that CloudFront assigned when you created your distribution. // // Your CloudFront distribution must include an alternate domain name that matches // the name of the resource record set. For example, if the name of the resource @@ -6007,11 +6030,12 @@ type AliasTarget struct { // secondary records have the same name, and you can't include the same alternate // domain name in more than one distribution. // - // Elastic Beanstalk environmentIf the domain name for your Elastic Beanstalk - // environment includes the region that you deployed the environment in, you - // can create an alias record that routes traffic to the environment. For example, - // the domain name my-environment.us-west-2.elasticbeanstalk.com is a regionalized - // domain name. + // Elastic Beanstalk environment + // + // If the domain name for your Elastic Beanstalk environment includes the region + // that you deployed the environment in, you can create an alias record that + // routes traffic to the environment. For example, the domain name my-environment.us-west-2.elasticbeanstalk.com + // is a regionalized domain name. // // For environments that were created before early 2016, the domain name doesn't // include the region. To route traffic to these environments, you must create @@ -6025,55 +6049,56 @@ type AliasTarget struct { // the CNAME attribute for the environment. You can use the following methods // to get the value of the CNAME attribute: // - // AWS Management Console: For information about how to get the value by using - // the console, see Using Custom Domains with AWS Elastic Beanstalk (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/customdomains.html) - // in the AWS Elastic Beanstalk Developer Guide. - // - // Elastic Beanstalk API: Use the DescribeEnvironments action to get the value - // of the CNAME attribute. For more information, see DescribeEnvironments (http://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html) - // in the AWS Elastic Beanstalk API Reference. - // - // AWS CLI: Use the describe-environments command to get the value of the CNAME - // attribute. For more information, see describe-environments (http://docs.aws.amazon.com/cli/latest/reference/elasticbeanstalk/describe-environments.html) - // in the AWS Command Line Interface Reference. - // - // ELB load balancerSpecify the DNS name that is associated with the load balancer. - // Get the DNS name by using the AWS Management Console, the ELB API, or the - // AWS CLI. - // - // AWS Management Console: Go to the EC2 page, choose Load Balancers in the - // navigation pane, choose the load balancer, choose the Description tab, and - // get the value of the DNS name field. - // - // If you're routing traffic to a Classic Load Balancer, get the value that - // begins with dualstack. If you're routing traffic to another type of load - // balancer, get the value that applies to the record type, A or AAAA. - // - // Elastic Load Balancing API: Use DescribeLoadBalancers to get the value of - // DNSName. For more information, see the applicable guide: - // - // Classic Load Balancers: DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) - // - // Application and Network Load Balancers: DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) - // - // AWS CLI: Use describe-load-balancers to get the value of DNSName. For more - // information, see the applicable guide: - // - // Classic Load Balancers: describe-load-balancers (http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html) - // - // Application and Network Load Balancers: describe-load-balancers (http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html) - // - // Amazon S3 bucket that is configured as a static websiteSpecify the domain - // name of the Amazon S3 website endpoint that you created the bucket in, for - // example, s3-website.us-east-2.amazonaws.com. For more information about valid - // values, see the table Amazon Simple Storage Service (S3) Website Endpoints - // (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the - // Amazon Web Services General Reference. For more information about using S3 - // buckets for websites, see Getting Started with Amazon Route 53 (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/getting-started.html) + // * AWS Management Console: For information about how to get the value by + // using the console, see Using Custom Domains with AWS Elastic Beanstalk + // (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/customdomains.html) + // in the AWS Elastic Beanstalk Developer Guide. + // + // * Elastic Beanstalk API: Use the DescribeEnvironments action to get the + // value of the CNAME attribute. For more information, see DescribeEnvironments + // (http://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html) + // in the AWS Elastic Beanstalk API Reference. + // + // * AWS CLI: Use the describe-environments command to get the value of the + // CNAME attribute. For more information, see describe-environments (http://docs.aws.amazon.com/cli/latest/reference/elasticbeanstalk/describe-environments.html) + // in the AWS Command Line Interface Reference. + // + // ELB load balancer + // + // Specify the DNS name that is associated with the load balancer. Get the DNS + // name by using the AWS Management Console, the ELB API, or the AWS CLI. + // + // * AWS Management Console: Go to the EC2 page, choose Load Balancers in + // the navigation pane, choose the load balancer, choose the Description + // tab, and get the value of the DNS name field. If you're routing traffic + // to a Classic Load Balancer, get the value that begins with dualstack. + // If you're routing traffic to another type of load balancer, get the value + // that applies to the record type, A or AAAA. + // + // * Elastic Load Balancing API: Use DescribeLoadBalancers to get the value + // of DNSName. For more information, see the applicable guide: Classic Load + // Balancers: DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) + // Application and Network Load Balancers: DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) + // + // * AWS CLI: Use describe-load-balancers to get the value of DNSName. For + // more information, see the applicable guide: Classic Load Balancers: describe-load-balancers + // (http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html) + // Application and Network Load Balancers: describe-load-balancers (http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html) + // + // Amazon S3 bucket that is configured as a static website + // + // Specify the domain name of the Amazon S3 website endpoint that you created + // the bucket in, for example, s3-website.us-east-2.amazonaws.com. For more + // information about valid values, see the table Amazon Simple Storage Service + // (S3) Website Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. For more information about + // using S3 buckets for websites, see Getting Started with Amazon Route 53 (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/getting-started.html) // in the Amazon Route 53 Developer Guide. // - // Another Route 53 resource record setSpecify the value of the Name element - // for a resource record set in the current hosted zone. + // Another Route 53 resource record set + // + // Specify the value of the Name element for a resource record set in the current + // hosted zone. // // If you're creating an alias record that has the same name as the hosted zone // (known as the zone apex), you can't specify the domain name for a record @@ -6092,55 +6117,63 @@ type AliasTarget struct { // // Note the following: // - // CloudFront distributionsYou can't set EvaluateTargetHealth to true when the - // alias target is a CloudFront distribution. + // CloudFront distributions + // + // You can't set EvaluateTargetHealth to true when the alias target is a CloudFront + // distribution. // - // Elastic Beanstalk environments that have regionalized subdomainsIf you specify - // an Elastic Beanstalk environment in DNSName and the environment contains - // an ELB load balancer, Elastic Load Balancing routes queries only to the healthy - // Amazon EC2 instances that are registered with the load balancer. (An environment - // automatically contains an ELB load balancer if it includes more than one - // Amazon EC2 instance.) If you set EvaluateTargetHealth to true and either - // no Amazon EC2 instances are healthy or the load balancer itself is unhealthy, - // Route 53 routes queries to other available resources that are healthy, if - // any. + // Elastic Beanstalk environments that have regionalized subdomains + // + // If you specify an Elastic Beanstalk environment in DNSName and the environment + // contains an ELB load balancer, Elastic Load Balancing routes queries only + // to the healthy Amazon EC2 instances that are registered with the load balancer. + // (An environment automatically contains an ELB load balancer if it includes + // more than one Amazon EC2 instance.) If you set EvaluateTargetHealth to true + // and either no Amazon EC2 instances are healthy or the load balancer itself + // is unhealthy, Route 53 routes queries to other available resources that are + // healthy, if any. // // If the environment contains a single Amazon EC2 instance, there are no special // requirements. // - // ELB load balancersHealth checking behavior depends on the type of load balancer: - // - // Classic Load Balancers: If you specify an ELB Classic Load Balancer in DNSName, - // Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances - // that are registered with the load balancer. If you set EvaluateTargetHealth - // to true and either no EC2 instances are healthy or the load balancer itself - // is unhealthy, Route 53 routes queries to other resources. + // ELB load balancers // - // Application and Network Load Balancers: If you specify an ELB Application - // or Network Load Balancer and you set EvaluateTargetHealth to true, Route - // 53 routes queries to the load balancer based on the health of the target - // groups that are associated with the load balancer: + // Health checking behavior depends on the type of load balancer: // - // For an Application or Network Load Balancer to be considered healthy, every - // target group that contains targets must contain at least one healthy target. - // If any target group contains only unhealthy targets, the load balancer is - // considered unhealthy, and Route 53 routes queries to other resources. + // * Classic Load Balancers: If you specify an ELB Classic Load Balancer + // in DNSName, Elastic Load Balancing routes queries only to the healthy + // Amazon EC2 instances that are registered with the load balancer. If you + // set EvaluateTargetHealth to true and either no EC2 instances are healthy + // or the load balancer itself is unhealthy, Route 53 routes queries to other + // resources. // - // A target group that has no registered targets is considered unhealthy. + // * Application and Network Load Balancers: If you specify an ELB Application + // or Network Load Balancer and you set EvaluateTargetHealth to true, Route + // 53 routes queries to the load balancer based on the health of the target + // groups that are associated with the load balancer: For an Application + // or Network Load Balancer to be considered healthy, every target group + // that contains targets must contain at least one healthy target. If any + // target group contains only unhealthy targets, the load balancer is considered + // unhealthy, and Route 53 routes queries to other resources. A target group + // that has no registered targets is considered unhealthy. // // When you create a load balancer, you configure settings for Elastic Load // Balancing health checks; they're not Route 53 health checks, but they perform // a similar function. Do not create Route 53 health checks for the EC2 instances // that you register with an ELB load balancer. // - // S3 bucketsThere are no special requirements for setting EvaluateTargetHealth - // to true when the alias target is an S3 bucket. + // S3 buckets + // + // There are no special requirements for setting EvaluateTargetHealth to true + // when the alias target is an S3 bucket. // - // Other records in the same hosted zoneIf the AWS resource that you specify - // in DNSName is a record or a group of records (for example, a group of weighted - // records) but is not another alias record, we recommend that you associate - // a health check with all of the records in the alias target. For more information, - // see What Happens When You Omit Health Checks? (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html#dns-failover-complex-configs-hc-omitting) + // Other records in the same hosted zone + // + // If the AWS resource that you specify in DNSName is a record or a group of + // records (for example, a group of weighted records) but is not another alias + // record, we recommend that you associate a health check with all of the records + // in the alias target. For more information, see What Happens When You Omit + // Health Checks? (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html#dns-failover-complex-configs-hc-omitting) // in the Amazon Route 53 Developer Guide. // // For more information and examples, see Amazon Route 53 Health Checks and @@ -6153,69 +6186,75 @@ type AliasTarget struct { // Alias resource records sets only: The value used depends on where you want // to route traffic: // - // Amazon API Gateway custom regional APIs and edge-optimized APIsSpecify the - // hosted zone ID for your API. You can get the applicable value using the AWS - // CLI command get-domain-names (https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-domain-names.html): + // Amazon API Gateway custom regional APIs and edge-optimized APIs + // + // Specify the hosted zone ID for your API. You can get the applicable value + // using the AWS CLI command get-domain-names (https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-domain-names.html): + // + // * For regional APIs, specify the value of regionalHostedZoneId. + // + // * For edge-optimized APIs, specify the value of distributionHostedZoneId. // - // For regional APIs, specify the value of regionalHostedZoneId. + // Amazon Virtual Private Cloud interface VPC endpoint // - // For edge-optimized APIs, specify the value of distributionHostedZoneId. + // Specify the hosted zone ID for your interface endpoint. You can get the value + // of HostedZoneId using the AWS CLI command describe-vpc-endpoints (https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html). // - // Amazon Virtual Private Cloud interface VPC endpointSpecify the hosted zone - // ID for your interface endpoint. You can get the value of HostedZoneId using - // the AWS CLI command describe-vpc-endpoints (https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html). + // CloudFront distribution // - // CloudFront distributionSpecify Z2FDTNDATAQYW2. + // Specify Z2FDTNDATAQYW2. // // Alias resource record sets for CloudFront can't be created in a private zone. // - // Elastic Beanstalk environmentSpecify the hosted zone ID for the region that - // you created the environment in. The environment must have a regionalized - // subdomain. For a list of regions and the corresponding hosted zone IDs, see - // AWS Elastic Beanstalk (http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region) + // Elastic Beanstalk environment + // + // Specify the hosted zone ID for the region that you created the environment + // in. The environment must have a regionalized subdomain. For a list of regions + // and the corresponding hosted zone IDs, see AWS Elastic Beanstalk (http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region) // in the "AWS Regions and Endpoints" chapter of the Amazon Web Services General // Reference. // - // ELB load balancerSpecify the value of the hosted zone ID for the load balancer. - // Use the following methods to get the hosted zone ID: - // - // Elastic Load Balancing (https://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region) - // table in the "AWS Regions and Endpoints" chapter of the Amazon Web Services - // General Reference: Use the value that corresponds with the region that you - // created your load balancer in. Note that there are separate columns for Application - // and Classic Load Balancers and for Network Load Balancers. - // - // AWS Management Console: Go to the Amazon EC2 page, choose Load Balancers - // in the navigation pane, select the load balancer, and get the value of the - // Hosted zone field on the Description tab. + // ELB load balancer // - // Elastic Load Balancing API: Use DescribeLoadBalancers to get the applicable - // value. For more information, see the applicable guide: + // Specify the value of the hosted zone ID for the load balancer. Use the following + // methods to get the hosted zone ID: // - // Classic Load Balancers: Use DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) - // to get the value of CanonicalHostedZoneNameId. + // * Elastic Load Balancing (https://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region) + // table in the "AWS Regions and Endpoints" chapter of the Amazon Web Services + // General Reference: Use the value that corresponds with the region that + // you created your load balancer in. Note that there are separate columns + // for Application and Classic Load Balancers and for Network Load Balancers. // - // Application and Network Load Balancers: Use DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) - // to get the value of CanonicalHostedZoneId. + // * AWS Management Console: Go to the Amazon EC2 page, choose Load Balancers + // in the navigation pane, select the load balancer, and get the value of + // the Hosted zone field on the Description tab. // - // AWS CLI: Use describe-load-balancers to get the applicable value. For more - // information, see the applicable guide: + // * Elastic Load Balancing API: Use DescribeLoadBalancers to get the applicable + // value. For more information, see the applicable guide: Classic Load Balancers: + // Use DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) + // to get the value of CanonicalHostedZoneNameId. Application and Network + // Load Balancers: Use DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) + // to get the value of CanonicalHostedZoneId. // - // Classic Load Balancers: Use describe-load-balancers (http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html) - // to get the value of CanonicalHostedZoneNameId. + // * AWS CLI: Use describe-load-balancers to get the applicable value. For + // more information, see the applicable guide: Classic Load Balancers: Use + // describe-load-balancers (http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html) + // to get the value of CanonicalHostedZoneNameId. Application and Network + // Load Balancers: Use describe-load-balancers (http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html) + // to get the value of CanonicalHostedZoneId. // - // Application and Network Load Balancers: Use describe-load-balancers (http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html) - // to get the value of CanonicalHostedZoneId. + // An Amazon S3 bucket configured as a static website // - // An Amazon S3 bucket configured as a static websiteSpecify the hosted zone - // ID for the region that you created the bucket in. For more information about - // valid values, see the Amazon Simple Storage Service Website Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // Specify the hosted zone ID for the region that you created the bucket in. + // For more information about valid values, see the Amazon Simple Storage Service + // Website Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) // table in the "AWS Regions and Endpoints" chapter of the Amazon Web Services // General Reference. // - // Another Route 53 resource record set in your hosted zoneSpecify the hosted - // zone ID of your hosted zone. (An alias resource record set can't reference - // a resource record set in a different hosted zone.) + // Another Route 53 resource record set in your hosted zone + // + // Specify the hosted zone ID of your hosted zone. (An alias resource record + // set can't reference a resource record set in a different hosted zone.) // // HostedZoneId is a required field HostedZoneId *string `type:"string" required:"true"` @@ -6379,16 +6418,14 @@ type Change struct { // // * CREATE: Creates a resource record set that has the specified values. // - // * DELETE: Deletes a existing resource record set. - // - // To delete the resource record set that is associated with a traffic policy - // instance, use DeleteTrafficPolicyInstance (https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteTrafficPolicyInstance.html). + // * DELETE: Deletes a existing resource record set. To delete the resource + // record set that is associated with a traffic policy instance, use DeleteTrafficPolicyInstance + // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteTrafficPolicyInstance.html). // Amazon Route 53 will delete the resource record set automatically. If // you delete the resource record set by using ChangeResourceRecordSets, // Route 53 doesn't automatically delete the traffic policy instance, and // you'll continue to be charged for it even though it's no longer in use. // - // // * UPSERT: If a resource record set doesn't already exist, Route 53 creates // it. If a resource record set does exist, Route 53 updates it with the // values in the request. @@ -7885,7 +7922,7 @@ func (s *DelegationSet) SetNameServers(v []*string) *DelegationSet { // This action deletes a health check. type DeleteHealthCheckInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteHealthCheckRequest" type:"structure"` // The ID of the health check that you want to delete. // @@ -7942,7 +7979,7 @@ func (s DeleteHealthCheckOutput) GoString() string { // A request to delete a hosted zone. type DeleteHostedZoneInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteHostedZoneRequest" type:"structure"` // The ID of the hosted zone you want to delete. // @@ -8010,7 +8047,7 @@ func (s *DeleteHostedZoneOutput) SetChangeInfo(v *ChangeInfo) *DeleteHostedZoneO } type DeleteQueryLoggingConfigInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteQueryLoggingConfigRequest" type:"structure"` // The ID of the configuration that you want to delete. // @@ -8066,7 +8103,7 @@ func (s DeleteQueryLoggingConfigOutput) GoString() string { // A request to delete a reusable delegation set. type DeleteReusableDelegationSetInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteReusableDelegationSetRequest" type:"structure"` // The ID of the reusable delegation set that you want to delete. // @@ -8123,7 +8160,7 @@ func (s DeleteReusableDelegationSetOutput) GoString() string { // A request to delete a specified traffic policy version. type DeleteTrafficPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteTrafficPolicyRequest" type:"structure"` // The ID of the traffic policy that you want to delete. // @@ -8182,7 +8219,7 @@ func (s *DeleteTrafficPolicyInput) SetVersion(v int64) *DeleteTrafficPolicyInput // A request to delete a specified traffic policy instance. type DeleteTrafficPolicyInstanceInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteTrafficPolicyInstanceRequest" type:"structure"` // The ID of the traffic policy instance that you want to delete. // @@ -8620,7 +8657,7 @@ func (s *GeoLocationDetails) SetSubdivisionName(v string) *GeoLocationDetails { // A complex type that contains information about the request to create a hosted // zone. type GetAccountLimitInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetAccountLimitRequest" type:"structure"` // The limit that you want to get. Valid values include the following: // @@ -8722,7 +8759,7 @@ func (s *GetAccountLimitOutput) SetLimit(v *AccountLimit) *GetAccountLimitOutput // The input for a GetChange request. type GetChangeInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetChangeRequest" type:"structure"` // The ID of the change batch request. The value that you specify here is the // value that ChangeResourceRecordSets returned in the Id element when you submitted @@ -8792,7 +8829,7 @@ func (s *GetChangeOutput) SetChangeInfo(v *ChangeInfo) *GetChangeOutput { // Empty request. type GetCheckerIpRangesInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetCheckerIpRangesRequest" type:"structure"` } // String returns the string representation @@ -8835,7 +8872,7 @@ func (s *GetCheckerIpRangesOutput) SetCheckerIpRanges(v []*string) *GetCheckerIp // A request for information about whether a specified geographic location is // supported for Amazon Route 53 geolocation resource record sets. type GetGeoLocationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetGeoLocationRequest" type:"structure"` // Amazon Route 53 supports the following continent codes: // @@ -8943,7 +8980,7 @@ func (s *GetGeoLocationOutput) SetGeoLocationDetails(v *GeoLocationDetails) *Get // A request for the number of health checks that are associated with the current // AWS account. type GetHealthCheckCountInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetHealthCheckCountRequest" type:"structure"` } // String returns the string representation @@ -8984,7 +9021,7 @@ func (s *GetHealthCheckCountOutput) SetHealthCheckCount(v int64) *GetHealthCheck // A request to get information about a specified health check. type GetHealthCheckInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetHealthCheckRequest" type:"structure"` // The identifier that Amazon Route 53 assigned to the health check when you // created it. When you add or update a resource record set, you use this value @@ -9029,7 +9066,7 @@ func (s *GetHealthCheckInput) SetHealthCheckId(v string) *GetHealthCheckInput { // A request for the reason that a health check failed most recently. type GetHealthCheckLastFailureReasonInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetHealthCheckLastFailureReasonRequest" type:"structure"` // The ID for the health check for which you want the last failure reason. When // you created the health check, CreateHealthCheck returned the ID in the response, @@ -9132,7 +9169,7 @@ func (s *GetHealthCheckOutput) SetHealthCheck(v *HealthCheck) *GetHealthCheckOut // A request to get the status for a health check. type GetHealthCheckStatusInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetHealthCheckStatusRequest" type:"structure"` // The ID for the health check that you want the current status for. When you // created the health check, CreateHealthCheck returned the ID in the response, @@ -9208,7 +9245,7 @@ func (s *GetHealthCheckStatusOutput) SetHealthCheckObservations(v []*HealthCheck // A request to retrieve a count of all the hosted zones that are associated // with the current AWS account. type GetHostedZoneCountInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetHostedZoneCountRequest" type:"structure"` } // String returns the string representation @@ -9250,7 +9287,7 @@ func (s *GetHostedZoneCountOutput) SetHostedZoneCount(v int64) *GetHostedZoneCou // A request to get information about a specified hosted zone. type GetHostedZoneInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetHostedZoneRequest" type:"structure"` // The ID of the hosted zone that you want to get information about. // @@ -9293,7 +9330,7 @@ func (s *GetHostedZoneInput) SetId(v string) *GetHostedZoneInput { // A complex type that contains information about the request to create a hosted // zone. type GetHostedZoneLimitInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetHostedZoneLimitRequest" type:"structure"` // The ID of the hosted zone that you want to get a limit for. // @@ -9447,7 +9484,7 @@ func (s *GetHostedZoneOutput) SetVPCs(v []*VPC) *GetHostedZoneOutput { } type GetQueryLoggingConfigInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetQueryLoggingConfigRequest" type:"structure"` // The ID of the configuration for DNS query logging that you want to get information // about. @@ -9517,7 +9554,7 @@ func (s *GetQueryLoggingConfigOutput) SetQueryLoggingConfig(v *QueryLoggingConfi // A request to get information about a specified reusable delegation set. type GetReusableDelegationSetInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetReusableDelegationSetRequest" type:"structure"` // The ID of the reusable delegation set that you want to get a list of name // servers for. @@ -9561,7 +9598,7 @@ func (s *GetReusableDelegationSetInput) SetId(v string) *GetReusableDelegationSe // A complex type that contains information about the request to create a hosted // zone. type GetReusableDelegationSetLimitInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetReusableDelegationSetLimitRequest" type:"structure"` // The ID of the delegation set that you want to get the limit for. // @@ -9688,7 +9725,7 @@ func (s *GetReusableDelegationSetOutput) SetDelegationSet(v *DelegationSet) *Get // Gets information about a specific traffic policy version. type GetTrafficPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetTrafficPolicyRequest" type:"structure"` // The ID of the traffic policy that you want to get information about. // @@ -9749,7 +9786,7 @@ func (s *GetTrafficPolicyInput) SetVersion(v int64) *GetTrafficPolicyInput { // Request to get the number of traffic policy instances that are associated // with the current AWS account. type GetTrafficPolicyInstanceCountInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetTrafficPolicyInstanceCountRequest" type:"structure"` } // String returns the string representation @@ -9792,7 +9829,7 @@ func (s *GetTrafficPolicyInstanceCountOutput) SetTrafficPolicyInstanceCount(v in // Gets information about a specified traffic policy instance. type GetTrafficPolicyInstanceInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetTrafficPolicyInstanceRequest" type:"structure"` // The ID of the traffic policy instance that you want to get information about. // @@ -10040,7 +10077,7 @@ type HealthCheckConfig struct { // Amazon Route 53 behavior depends on whether you specify a value for IPAddress. // - // If you specify a value forIPAddress: + // If you specify a value for IPAddress: // // Amazon Route 53 sends health check requests to the specified IPv4 or IPv6 // address and passes the value of FullyQualifiedDomainName in the Host header @@ -10066,7 +10103,7 @@ type HealthCheckConfig struct { // If you don't specify a value for FullyQualifiedDomainName, Route 53 substitutes // the value of IPAddress in the Host header in each of the preceding cases. // - // If you don't specify a value for IPAddress: + // If you don't specify a value for IPAddress : // // Route 53 sends a DNS request to the domain that you specify for FullyQualifiedDomainName // at the interval that you specify for RequestInterval. Using an IPv4 address @@ -10229,10 +10266,8 @@ type HealthCheckConfig struct { // // * HTTPS: Route 53 tries to establish a TCP connection. If successful, // Route 53 submits an HTTPS request and waits for an HTTP status code of - // 200 or greater and less than 400. - // - // If you specify HTTPS for the value of Type, the endpoint must support TLS - // v1.0 or later. + // 200 or greater and less than 400. If you specify HTTPS for the value of + // Type, the endpoint must support TLS v1.0 or later. // // * HTTP_STR_MATCH: Route 53 tries to establish a TCP connection. If successful, // Route 53 submits an HTTP request and searches the first 5,120 bytes of @@ -10661,7 +10696,7 @@ func (s *LinkedService) SetServicePrincipal(v string) *LinkedService { // A request to get a list of geographic locations that Amazon Route 53 supports // for geolocation resource record sets. type ListGeoLocationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListGeoLocationsRequest" type:"structure"` // (Optional) The maximum number of geolocations to be included in the response // body for this request. If more than maxitems geolocations remain to be listed, @@ -10842,7 +10877,7 @@ func (s *ListGeoLocationsOutput) SetNextSubdivisionCode(v string) *ListGeoLocati // A request to retrieve a list of the health checks that are associated with // the current AWS account. type ListHealthChecksInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListHealthChecksRequest" type:"structure"` // If the value of IsTruncated in the previous response was true, you have more // health checks. To get another group, submit another ListHealthChecks request. @@ -10963,7 +10998,7 @@ func (s *ListHealthChecksOutput) SetNextMarker(v string) *ListHealthChecksOutput // Retrieves a list of the public and private hosted zones that are associated // with the current AWS account in ASCII order by domain name. type ListHostedZonesByNameInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListHostedZonesByNameRequest" type:"structure"` // (Optional) For your first request to ListHostedZonesByName, include the dnsname // parameter only if you want to specify the name of the first hosted zone in @@ -11125,7 +11160,7 @@ func (s *ListHostedZonesByNameOutput) SetNextHostedZoneId(v string) *ListHostedZ // A request to retrieve a list of the public and private hosted zones that // are associated with the current AWS account. type ListHostedZonesInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListHostedZonesRequest" type:"structure"` // If you're using reusable delegation sets and you want to list all of the // hosted zones that are associated with a reusable delegation set, specify @@ -11257,7 +11292,7 @@ func (s *ListHostedZonesOutput) SetNextMarker(v string) *ListHostedZonesOutput { } type ListQueryLoggingConfigsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListQueryLoggingConfigsRequest" type:"structure"` // (Optional) If you want to list the query logging configuration that is associated // with a hosted zone, specify the ID in HostedZoneId. @@ -11359,7 +11394,7 @@ func (s *ListQueryLoggingConfigsOutput) SetQueryLoggingConfigs(v []*QueryLogging // A request for the resource record sets that are associated with a specified // hosted zone. type ListResourceRecordSetsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListResourceRecordSetsRequest" type:"structure"` // The ID of the hosted zone that contains the resource record sets that you // want to list. @@ -11375,10 +11410,10 @@ type ListResourceRecordSetsInput struct { // of maxitems resource record sets. MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` - // Weighted resource record sets only: If results were truncated for a given - // DNS name and type, specify the value of NextRecordIdentifier from the previous - // response to get the next resource record set that has the current DNS name - // and type. + // Resource record sets that have a routing policy other than simple: If results + // were truncated for a given DNS name and type, specify the value of NextRecordIdentifier + // from the previous response to get the next resource record set that has the + // current DNS name and type. StartRecordIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` // The first name in the lexicographic ordering of resource record sets that @@ -11563,7 +11598,7 @@ func (s *ListResourceRecordSetsOutput) SetResourceRecordSets(v []*ResourceRecord // A request to get a list of the reusable delegation sets that are associated // with the current AWS account. type ListReusableDelegationSetsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListReusableDelegationSetsRequest" type:"structure"` // If the value of IsTruncated in the previous response was true, you have more // reusable delegation sets. To get another group, submit another ListReusableDelegationSets @@ -11684,7 +11719,7 @@ func (s *ListReusableDelegationSetsOutput) SetNextMarker(v string) *ListReusable // A complex type containing information about a request for a list of the tags // that are associated with an individual resource. type ListTagsForResourceInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListTagsForResourceRequest" type:"structure"` // The ID of the resource for which you want to retrieve tags. // @@ -11866,7 +11901,7 @@ func (s *ListTagsForResourcesOutput) SetResourceTagSets(v []*ResourceTagSet) *Li // A complex type that contains the information about the request to list the // traffic policies that are associated with the current AWS account. type ListTrafficPoliciesInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListTrafficPoliciesRequest" type:"structure"` // (Optional) The maximum number of traffic policies that you want Amazon Route // 53 to return in response to this request. If you have more than MaxItems @@ -11989,7 +12024,7 @@ func (s *ListTrafficPoliciesOutput) SetTrafficPolicySummaries(v []*TrafficPolicy // A request for the traffic policy instances that you created in a specified // hosted zone. type ListTrafficPolicyInstancesByHostedZoneInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListTrafficPolicyInstancesByHostedZoneRequest" type:"structure"` // The ID of the hosted zone that you want to list traffic policy instances // for. @@ -12153,7 +12188,7 @@ func (s *ListTrafficPolicyInstancesByHostedZoneOutput) SetTrafficPolicyInstances // A complex type that contains the information about the request to list your // traffic policy instances. type ListTrafficPolicyInstancesByPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListTrafficPolicyInstancesByPolicyRequest" type:"structure"` // If the value of IsTruncated in the previous response was true, you have more // traffic policy instances. To get more traffic policy instances, submit another @@ -12370,7 +12405,7 @@ func (s *ListTrafficPolicyInstancesByPolicyOutput) SetTrafficPolicyInstances(v [ // A request to get information about the traffic policy instances that you // created by using the current AWS account. type ListTrafficPolicyInstancesInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListTrafficPolicyInstancesRequest" type:"structure"` // If the value of IsTruncated in the previous response was true, you have more // traffic policy instances. To get more traffic policy instances, submit another @@ -12539,7 +12574,7 @@ func (s *ListTrafficPolicyInstancesOutput) SetTrafficPolicyInstances(v []*Traffi // A complex type that contains the information about the request to list your // traffic policies. type ListTrafficPolicyVersionsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListTrafficPolicyVersionsRequest" type:"structure"` // Specify the value of Id of the traffic policy for which you want to list // all versions. @@ -12681,7 +12716,7 @@ func (s *ListTrafficPolicyVersionsOutput) SetTrafficPolicyVersionMarker(v string // A complex type that contains information about that can be associated with // your hosted zone. type ListVPCAssociationAuthorizationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListVPCAssociationAuthorizationsRequest" type:"structure"` // The ID of the hosted zone for which you want a list of VPCs that can be associated // with the hosted zone. @@ -13038,43 +13073,29 @@ type ResourceRecordSet struct { // of non-alias resource record sets that have the same routing policy, name, // and type (such as multiple weighted records named www.example.com with // a type of A) and you specify health check IDs for all the resource record - // sets. - // - // If the health check status for a resource record set is healthy, Route 53 - // includes the record among the records that it responds to DNS queries - // with. - // - // If the health check status for a resource record set is unhealthy, Route - // 53 stops responding to DNS queries using the value for that resource record - // set. - // - // If the health check status for all resource record sets in the group is unhealthy, - // Route 53 considers all resource record sets in the group healthy and responds - // to DNS queries accordingly. - // - // * Alias resource record sets: You specify the following settings: - // - // You set EvaluateTargetHealth to true for an alias resource record set in - // a group of resource record sets that have the same routing policy, name, + // sets. If the health check status for a resource record set is healthy, + // Route 53 includes the record among the records that it responds to DNS + // queries with. If the health check status for a resource record set is + // unhealthy, Route 53 stops responding to DNS queries using the value for + // that resource record set. If the health check status for all resource + // record sets in the group is unhealthy, Route 53 considers all resource + // record sets in the group healthy and responds to DNS queries accordingly. + // + // * Alias resource record sets: You specify the following settings: You + // set EvaluateTargetHealth to true for an alias resource record set in a + // group of resource record sets that have the same routing policy, name, // and type (such as multiple weighted records named www.example.com with - // a type of A). - // - // You configure the alias resource record set to route traffic to a non-alias - // resource record set in the same hosted zone. - // - // You specify a health check ID for the non-alias resource record set. - // - // If the health check status is healthy, Route 53 considers the alias resource - // record set to be healthy and includes the alias record among the records - // that it responds to DNS queries with. - // - // If the health check status is unhealthy, Route 53 stops responding to DNS - // queries using the alias resource record set. - // - // The alias resource record set can also route traffic to a group of non-alias - // resource record sets that have the same routing policy, name, and type. - // In that configuration, associate health checks with all of the resource - // record sets in the group of non-alias resource record sets. + // a type of A). You configure the alias resource record set to route traffic + // to a non-alias resource record set in the same hosted zone. You specify + // a health check ID for the non-alias resource record set. If the health + // check status is healthy, Route 53 considers the alias resource record + // set to be healthy and includes the alias record among the records that + // it responds to DNS queries with. If the health check status is unhealthy, + // Route 53 stops responding to DNS queries using the alias resource record + // set. The alias resource record set can also route traffic to a group of + // non-alias resource record sets that have the same routing policy, name, + // and type. In that configuration, associate health checks with all of the + // resource record sets in the group of non-alias resource record sets. // // Geolocation Routing // @@ -13104,10 +13125,10 @@ type ResourceRecordSet struct { // // Health check results will be unpredictable if you do the following: // - // Create a health check that has the same value for FullyQualifiedDomainName - // as the name of a resource record set. + // * Create a health check that has the same value for FullyQualifiedDomainName + // as the name of a resource record set. // - // Associate that health check with the resource record set. + // * Associate that health check with the resource record set. HealthCheckId *string `type:"string"` // Multivalue answer resource record sets only: To route traffic approximately @@ -13165,9 +13186,8 @@ type ResourceRecordSet struct { // // * If you include * in any position other than the leftmost label in a // domain name, DNS treats it as an * character (ASCII 42), not as a wildcard. - // - // You can't use the * wildcard for resource records sets that have a type of - // NS. + // You can't use the * wildcard for resource records sets that have a type + // of NS. // // You can use the * wildcard as the leftmost label in a domain name, for example, // *.example.com. You can't use an * for one of the middle labels, for example, @@ -13283,32 +13303,29 @@ type ResourceRecordSet struct { // // Values for alias resource record sets: // - // * Amazon API Gateway custom regional APIs and edge-optimized APIs:A - // - // * CloudFront distributions:A + // * Amazon API Gateway custom regional APIs and edge-optimized APIs: A // - // If IPv6 is enabled for the distribution, create two resource record sets - // to route traffic to your distribution, one with a value of A and one with - // a value of AAAA. + // * CloudFront distributions: A If IPv6 is enabled for the distribution, + // create two resource record sets to route traffic to your distribution, + // one with a value of A and one with a value of AAAA. // // * AWS Elastic Beanstalk environment that has a regionalized subdomain: // A // - // * ELB load balancers:A | AAAA + // * ELB load balancers: A | AAAA // - // * Amazon S3 buckets:A + // * Amazon S3 buckets: A // - // * Amazon Virtual Private Cloud interface VPC endpointsA + // * Amazon Virtual Private Cloud interface VPC endpoints A // // * Another resource record set in this hosted zone: Specify the type of // the resource record set that you're creating the alias for. All values - // are supported except NS and SOA. - // - // If you're creating an alias record that has the same name as the hosted zone - // (known as the zone apex), you can't route traffic to a record for which - // the value of Type is CNAME. This is because the alias record must have - // the same type as the record you're routing traffic to, and creating a - // CNAME record for the zone apex isn't supported even for an alias record. + // are supported except NS and SOA. If you're creating an alias record that + // has the same name as the hosted zone (known as the zone apex), you can't + // route traffic to a record for which the value of Type is CNAME. This is + // because the alias record must have the same type as the record you're + // routing traffic to, and creating a CNAME record for the zone apex isn't + // supported even for an alias record. // // Type is a required field Type *string `type:"string" required:"true" enum:"RRType"` @@ -13339,8 +13356,7 @@ type ResourceRecordSet struct { // with the applicable value for that resource record set. However, if you // set Weight to 0 for all resource record sets that have the same combination // of DNS name and type, traffic is routed to all resources with equal probability. - // - // The effect of setting Weight to 0 is different when you associate health + // The effect of setting Weight to 0 is different when you associate health // checks with weighted resource record sets. For more information, see Options // for Configuring Route 53 Active-Active and Active-Passive Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) // in the Amazon Route 53 Developer Guide. @@ -13620,7 +13636,7 @@ type Tag struct { // * Edit a tag: Key is the name of the tag that you want to change the Value // for. // - // * Delete a key: Key is the name of the tag you want to remove. + // * Delete a key: Key is the name of the tag you want to remove. // // * Give a name to a health check: Edit the default Name tag. In the Amazon // Route 53 console, the list of your health checks includes a Name column @@ -13662,7 +13678,7 @@ func (s *Tag) SetValue(v string) *Tag { // for a specified record name and type. You can optionally specify the IP address // of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask. type TestDNSAnswerInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"TestDNSAnswerRequest" type:"structure"` // If the resolver that you specified for resolverip supports EDNS0, specify // the IPv4 or IPv6 address of a client in the applicable location, for example, @@ -13974,15 +13990,21 @@ type TrafficPolicyInstance struct { // The value of State is one of the following values: // - // AppliedAmazon Route 53 has finished creating resource record sets, and changes - // have propagated to all Route 53 edge locations. + // Applied // - // CreatingRoute 53 is creating the resource record sets. Use GetTrafficPolicyInstance + // Amazon Route 53 has finished creating resource record sets, and changes have + // propagated to all Route 53 edge locations. + // + // Creating + // + // Route 53 is creating the resource record sets. Use GetTrafficPolicyInstance // to confirm that the CreateTrafficPolicyInstance request completed successfully. // - // FailedRoute 53 wasn't able to create or update the resource record sets. - // When the value of State is Failed, see Message for an explanation of what - // caused the request to fail. + // Failed + // + // Route 53 wasn't able to create or update the resource record sets. When the + // value of State is Failed, see Message for an explanation of what caused the + // request to fail. // // State is a required field State *string `type:"string" required:"true"` @@ -14222,7 +14244,7 @@ type UpdateHealthCheckInput struct { // However, you can't update an existing health check to add or remove the value // of IPAddress. // - // If you specify a value forIPAddress: + // If you specify a value for IPAddress: // // Route 53 sends health check requests to the specified IPv4 or IPv6 address // and passes the value of FullyQualifiedDomainName in the Host header for all @@ -14247,7 +14269,7 @@ type UpdateHealthCheckInput struct { // If you don't specify a value for FullyQualifiedDomainName, Route 53 substitutes // the value of IPAddress in the Host header in each of the above cases. // - // If you don't specify a value forIPAddress: + // If you don't specify a value for IPAddress: // // If you don't specify a value for IPAddress, Route 53 sends a DNS request // to the domain that you specify in FullyQualifiedDomainName at the interval @@ -15010,6 +15032,9 @@ const ( // CloudWatchRegionApEast1 is a CloudWatchRegion enum value CloudWatchRegionApEast1 = "ap-east-1" + // CloudWatchRegionMeSouth1 is a CloudWatchRegion enum value + CloudWatchRegionMeSouth1 = "me-south-1" + // CloudWatchRegionApSouth1 is a CloudWatchRegion enum value CloudWatchRegionApSouth1 = "ap-south-1" @@ -15241,6 +15266,9 @@ const ( // ResourceRecordSetRegionApEast1 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionApEast1 = "ap-east-1" + // ResourceRecordSetRegionMeSouth1 is a ResourceRecordSetRegion enum value + ResourceRecordSetRegionMeSouth1 = "me-south-1" + // ResourceRecordSetRegionApSouth1 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionApSouth1 = "ap-south-1" ) @@ -15303,6 +15331,9 @@ const ( // VPCRegionApEast1 is a VPCRegion enum value VPCRegionApEast1 = "ap-east-1" + // VPCRegionMeSouth1 is a VPCRegion enum value + VPCRegionMeSouth1 = "me-south-1" + // VPCRegionApSoutheast1 is a VPCRegion enum value VPCRegionApSoutheast1 = "ap-southeast-1" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go index efe2d6e7c0a..7aca8722e99 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go @@ -33,7 +33,7 @@ func sanitizeURL(r *request.Request) { // Update Path so that it reflects the cleaned RawPath updated, err := url.Parse(r.HTTPRequest.URL.RawPath) if err != nil { - r.Error = awserr.New("SerializationError", "failed to clean Route53 URL", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to clean Route53 URL", err) return } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/service.go index dd22cb2cd84..391c3e28615 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/service.go @@ -46,11 +46,11 @@ const ( // svc := route53.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Route53 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Route53 { svc := &Route53{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-04-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go index 266e9a8ba43..b3b95a126e2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go @@ -1,77 +1,106 @@ package route53 import ( - "bytes" "encoding/xml" - "io/ioutil" + "fmt" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) -type baseXMLErrorResponse struct { - XMLName xml.Name -} +const errorRespTag = "ErrorResponse" +const invalidChangeTag = "InvalidChangeBatch" type standardXMLErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +func (e standardXMLErrorResponse) FillCommon(c *xmlErrorResponse) { + c.Code = e.Code + c.Message = e.Message + c.RequestID = e.RequestID } type invalidChangeBatchXMLErrorResponse struct { - XMLName xml.Name `xml:"InvalidChangeBatch"` - Messages []string `xml:"Messages>Message"` + Messages []string `xml:"Messages>Message"` + RequestID string `xml:"RequestId"` } -func unmarshalChangeResourceRecordSetsError(r *request.Request) { - defer r.HTTPResponse.Body.Close() +func (e invalidChangeBatchXMLErrorResponse) FillCommon(c *xmlErrorResponse) { + c.Code = invalidChangeTag + c.Message = "ChangeBatch errors occurred" + c.Messages = e.Messages + c.RequestID = e.RequestID +} - responseBody, err := ioutil.ReadAll(r.HTTPResponse.Body) +type xmlErrorResponse struct { + Code string + Message string + Messages []string + RequestID string +} - if err != nil { - r.Error = awserr.New("SerializationError", "failed to read Route53 XML error response", err) - return +func (e *xmlErrorResponse) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type commonFiller interface { + FillCommon(*xmlErrorResponse) } - baseError := &baseXMLErrorResponse{} + var errResp commonFiller + switch start.Name.Local { + case errorRespTag: + errResp = &standardXMLErrorResponse{} - if err := xml.Unmarshal(responseBody, baseError); err != nil { - r.Error = awserr.New("SerializationError", "failed to decode Route53 XML error response", err) - return - } + case invalidChangeTag: + errResp = &invalidChangeBatchXMLErrorResponse{} - switch baseError.XMLName.Local { - case "InvalidChangeBatch": - unmarshalInvalidChangeBatchError(r, responseBody) default: - r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(responseBody)) - restxml.UnmarshalError(r) + return fmt.Errorf("unknown error message, %v", start.Name.Local) } + + if err := d.DecodeElement(errResp, &start); err != nil { + return err + } + + errResp.FillCommon(e) + return nil } -func unmarshalInvalidChangeBatchError(r *request.Request, requestBody []byte) { - resp := &invalidChangeBatchXMLErrorResponse{} - err := xml.Unmarshal(requestBody, resp) +func unmarshalChangeResourceRecordSetsError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var errResp xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } - const errorCode = "InvalidChangeBatch" - errors := []error{} - - for _, msg := range resp.Messages { - errors = append(errors, awserr.New(errorCode, msg, nil)) + var baseErr awserr.Error + if len(errResp.Messages) != 0 { + var errs []error + for _, msg := range errResp.Messages { + errs = append(errs, awserr.New(invalidChangeTag, msg, nil)) + } + baseErr = awserr.NewBatchError(errResp.Code, errResp.Message, errs) + } else { + baseErr = awserr.New(errResp.Code, errResp.Message, nil) } + reqID := errResp.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } r.Error = awserr.NewRequestFailure( - awserr.NewBatchError(errorCode, "ChangeBatch errors occurred", errors), + baseErr, r.HTTPResponse.StatusCode, - r.RequestID, + reqID, ) - } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53resolver/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53resolver/api.go index 6bdbd18bf0e..3e1dc08be48 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53resolver/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53resolver/api.go @@ -1265,7 +1265,7 @@ func (c *Route53Resolver) ListResolverEndpointIpAddressesWithContext(ctx aws.Con // // Example iterating over at most 3 pages of a ListResolverEndpointIpAddresses operation. // pageNum := 0 // err := client.ListResolverEndpointIpAddressesPages(params, -// func(page *ListResolverEndpointIpAddressesOutput, lastPage bool) bool { +// func(page *route53resolver.ListResolverEndpointIpAddressesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1297,10 +1297,12 @@ func (c *Route53Resolver) ListResolverEndpointIpAddressesPagesWithContext(ctx aw }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListResolverEndpointIpAddressesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListResolverEndpointIpAddressesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1413,7 +1415,7 @@ func (c *Route53Resolver) ListResolverEndpointsWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a ListResolverEndpoints operation. // pageNum := 0 // err := client.ListResolverEndpointsPages(params, -// func(page *ListResolverEndpointsOutput, lastPage bool) bool { +// func(page *route53resolver.ListResolverEndpointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1445,10 +1447,12 @@ func (c *Route53Resolver) ListResolverEndpointsPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListResolverEndpointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListResolverEndpointsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1561,7 +1565,7 @@ func (c *Route53Resolver) ListResolverRuleAssociationsWithContext(ctx aws.Contex // // Example iterating over at most 3 pages of a ListResolverRuleAssociations operation. // pageNum := 0 // err := client.ListResolverRuleAssociationsPages(params, -// func(page *ListResolverRuleAssociationsOutput, lastPage bool) bool { +// func(page *route53resolver.ListResolverRuleAssociationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1593,10 +1597,12 @@ func (c *Route53Resolver) ListResolverRuleAssociationsPagesWithContext(ctx aws.C }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListResolverRuleAssociationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListResolverRuleAssociationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1708,7 +1714,7 @@ func (c *Route53Resolver) ListResolverRulesWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListResolverRules operation. // pageNum := 0 // err := client.ListResolverRulesPages(params, -// func(page *ListResolverRulesOutput, lastPage bool) bool { +// func(page *route53resolver.ListResolverRulesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1740,10 +1746,12 @@ func (c *Route53Resolver) ListResolverRulesPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListResolverRulesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListResolverRulesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go index 367e933ebb9..7b8dd4bf715 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go @@ -46,11 +46,11 @@ const ( // svc := route53resolver.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53Resolver { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Route53Resolver { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Route53Resolver { svc := &Route53Resolver{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-04-01", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 83a42d249b4..a979c59f1bb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -545,6 +545,10 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // Deletes an analytics configuration for the bucket (specified by the analytics // configuration ID). // +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1071,7 +1075,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // DeleteBucketReplication API operation for Amazon Simple Storage Service. // // Deletes the replication configuration from the bucket. For information about -// replication configuration, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) // in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3335,8 +3339,8 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration // GetObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Gets the Object Lock configuration for a bucket. The rule specified in the -// Object Lock configuration will be applied by default to every new object +// Gets the object lock configuration for a bucket. The rule specified in the +// object lock configuration will be applied by default to every new object // placed in the specified bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4210,7 +4214,7 @@ func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipa // // Example iterating over at most 3 pages of a ListMultipartUploads operation. // pageNum := 0 // err := client.ListMultipartUploadsPages(params, -// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4242,10 +4246,12 @@ func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4340,7 +4346,7 @@ func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVer // // Example iterating over at most 3 pages of a ListObjectVersions operation. // pageNum := 0 // err := client.ListObjectVersionsPages(params, -// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4372,10 +4378,12 @@ func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObje }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4477,7 +4485,7 @@ func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, op // // Example iterating over at most 3 pages of a ListObjects operation. // pageNum := 0 // err := client.ListObjectsPages(params, -// func(page *ListObjectsOutput, lastPage bool) bool { +// func(page *s3.ListObjectsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4509,10 +4517,12 @@ func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4615,7 +4625,7 @@ func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input // // Example iterating over at most 3 pages of a ListObjectsV2 operation. // pageNum := 0 // err := client.ListObjectsV2Pages(params, -// func(page *ListObjectsV2Output, lastPage bool) bool { +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4647,10 +4657,12 @@ func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2 }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4745,7 +4757,7 @@ func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts . // // Example iterating over at most 3 pages of a ListParts operation. // pageNum := 0 // err := client.ListPartsPages(params, -// func(page *ListPartsOutput, lastPage bool) bool { +// func(page *s3.ListPartsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4777,10 +4789,12 @@ func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, f }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5754,8 +5768,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Replaces a policy on a bucket. If the bucket already has a policy, the one -// in this request completely replaces it. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5831,7 +5844,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // PutBucketReplication API operation for Amazon Simple Storage Service. // // Creates a replication configuration or replaces an existing one. For more -// information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) // in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6439,8 +6452,8 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration // PutObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Places an Object Lock configuration on the specified bucket. The rule specified -// in the Object Lock configuration will be applied by default to every new +// Places an object lock configuration on the specified bucket. The rule specified +// in the object lock configuration will be applied by default to every new // object placed in the specified bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7010,13 +7023,16 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp return out, req.Send() } -// Specifies the days since the initiation of an Incomplete Multipart Upload -// that Lifecycle will wait before permanently removing all parts of the upload. +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon Simple Storage Service Developer Guide. type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` - // Indicates the number of days that must pass since initiation for Lifecycle - // to abort an Incomplete Multipart Upload. + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. DaysAfterInitiation *int64 `type:"integer"` } @@ -7037,11 +7053,15 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI } type AbortMultipartUploadInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + // Name of the bucket to which the multipart upload was initiated. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Key of the object for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -7051,6 +7071,8 @@ type AbortMultipartUploadInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Upload ID that identifies the multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -7145,10 +7167,13 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart return s } +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. type AccelerateConfiguration struct { _ struct{} `type:"structure"` - // The accelerate configuration of the bucket. + // Specifies the transfer acceleration status of the bucket. Status *string `type:"string" enum:"BucketAccelerateStatus"` } @@ -7168,12 +7193,14 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { return s } +// Contains the elements that set the ACL permissions for an object per grantee. type AccessControlPolicy struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -7223,7 +7250,9 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { type AccessControlTranslation struct { _ struct{} `type:"structure"` - // The override value for the owner of the replica object. + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon Simple Storage Service API Reference. // // Owner is a required field Owner *string `type:"string" required:"true" enum:"OwnerOverride"` @@ -7258,10 +7287,14 @@ func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation return s } +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. type AnalyticsAndOperator struct { _ struct{} `type:"structure"` - // The prefix to use when evaluating an AND predicate. + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. Prefix *string `type:"string"` // The list of tags to use when evaluating an AND predicate. @@ -7310,6 +7343,11 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { return s } +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +// +// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html) +// in the Amazon Simple Storage Service API Reference. type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -7318,13 +7356,13 @@ type AnalyticsConfiguration struct { // If no filter is provided, all objects will be considered in any analysis. Filter *AnalyticsFilter `type:"structure"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `type:"string" required:"true"` - // If present, it indicates that data related to access patterns will be collected - // and made available to analyze the tradeoffs between different storage classes. + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. // // StorageClassAnalysis is a required field StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` @@ -7384,6 +7422,7 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis return s } +// Where to publish the analytics results. type AnalyticsExportDestination struct { _ struct{} `type:"structure"` @@ -7492,7 +7531,7 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` - // The Amazon resource name (ARN) of the bucket to which data is exported. + // The Amazon Resource Name (ARN) of the bucket to which data is exported. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -7501,13 +7540,12 @@ type AnalyticsS3BucketDestination struct { // the owner will not be validated prior to exporting data. BucketAccountId *string `type:"string"` - // The file format used when exporting data to Amazon S3. + // Specifies the file format used when exporting data to Amazon S3. // // Format is a required field Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` - // The prefix to use when exporting data. The exported data begins with this - // prefix. + // The prefix to use when exporting data. The prefix is prepended to all results. Prefix *string `type:"string"` } @@ -7600,9 +7638,14 @@ func (s *Bucket) SetName(v string) *Bucket { return s } +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // // Rules is a required field Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -7649,9 +7692,10 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec type BucketLoggingStatus struct { _ struct{} `type:"structure"` - // Container for logging information. Presence of this element indicates that - // logging is enabled. Parameters TargetBucket and TargetPrefix are required - // in this case. + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -7686,9 +7730,15 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. type CORSConfiguration struct { _ struct{} `type:"structure"` + // A set of allowed origins and methods. + // // CORSRules is a required field CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` } @@ -7732,14 +7782,18 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { return s } +// Specifies a cross-origin access rule for an Amazon S3 bucket. type CORSRule struct { _ struct{} `type:"structure"` - // Specifies which headers are allowed in a pre-flight OPTIONS request. + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. // // AllowedMethods is a required field AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` @@ -8040,7 +8094,7 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { } type CompleteMultipartUploadInput struct { - _ struct{} `type:"structure" payload:"MultipartUpload"` + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -8290,6 +8344,7 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } +// Specifies a condition that must be met for a redirect to apply. type Condition struct { _ struct{} `type:"structure"` @@ -8359,7 +8414,7 @@ func (s *ContinuationEvent) UnmarshalEvent( } type CopyObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` @@ -8409,7 +8464,7 @@ type CopyObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -8444,10 +8499,10 @@ type CopyObjectInput struct { // Specifies whether you want to apply a Legal Hold to the copied object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The Object Lock mode that you want to apply to the copied object. + // The object lock mode that you want to apply to the copied object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when you want the copied object's Object Lock to expire. + // The date and time when you want the copied object's object lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the @@ -8464,13 +8519,18 @@ type CopyObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported @@ -8735,6 +8795,12 @@ func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { s.SSEKMSKeyId = &v @@ -8795,6 +8861,11 @@ type CopyObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` @@ -8853,6 +8924,12 @@ func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { s.SSEKMSKeyId = &v @@ -8958,7 +9035,7 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke } type CreateBucketInput struct { - _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` @@ -8984,7 +9061,8 @@ type CreateBucketInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // Specifies whether you want Amazon S3 object lock to be enabled for the new + // bucket. ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } @@ -9098,7 +9176,7 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { } type CreateMultipartUploadInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` @@ -9147,10 +9225,10 @@ type CreateMultipartUploadInput struct { // Specifies whether you want to apply a Legal Hold to the uploaded object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // Specifies the Object Lock mode that you want to apply to the uploaded object. + // Specifies the object lock mode that you want to apply to the uploaded object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // Specifies the date and time when you want the Object Lock to expire. + // Specifies the date and time when you want the object lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the @@ -9167,13 +9245,18 @@ type CreateMultipartUploadInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported @@ -9368,6 +9451,12 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMulti return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { s.SSEKMSKeyId = &v @@ -9428,6 +9517,11 @@ type CreateMultipartUploadOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` @@ -9499,6 +9593,12 @@ func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMult return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { s.SSEKMSKeyId = &v @@ -9517,7 +9617,7 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } -// The container element for specifying the default Object Lock retention settings +// The container element for specifying the default object lock retention settings // for new objects placed in the specified bucket. type DefaultRetention struct { _ struct{} `type:"structure"` @@ -9525,7 +9625,7 @@ type DefaultRetention struct { // The number of days that you want to specify for the default retention period. Days *int64 `type:"integer"` - // The default Object Lock retention mode you want to apply to new objects placed + // The default object lock retention mode you want to apply to new objects placed // in the specified bucket. Mode *string `type:"string" enum:"ObjectLockRetentionMode"` @@ -9618,14 +9718,14 @@ func (s *Delete) SetQuiet(v bool) *Delete { } type DeleteBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` // The name of the bucket from which an analytics configuration is deleted. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -9694,7 +9794,7 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { } type DeleteBucketCorsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9754,7 +9854,7 @@ func (s DeleteBucketCorsOutput) GoString() string { } type DeleteBucketEncryptionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` // The name of the bucket containing the server-side encryption configuration // to delete. @@ -9817,7 +9917,7 @@ func (s DeleteBucketEncryptionOutput) GoString() string { } type DeleteBucketInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9863,7 +9963,7 @@ func (s *DeleteBucketInput) getBucket() (v string) { } type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` // The name of the bucket containing the inventory configuration to delete. // @@ -9939,7 +10039,7 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { } type DeleteBucketLifecycleInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9999,7 +10099,7 @@ func (s DeleteBucketLifecycleOutput) GoString() string { } type DeleteBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` // The name of the bucket containing the metrics configuration to delete. // @@ -10089,7 +10189,7 @@ func (s DeleteBucketOutput) GoString() string { } type DeleteBucketPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10149,7 +10249,7 @@ func (s DeleteBucketPolicyOutput) GoString() string { } type DeleteBucketReplicationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` // The bucket name. // @@ -10214,7 +10314,7 @@ func (s DeleteBucketReplicationOutput) GoString() string { } type DeleteBucketTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10274,7 +10374,7 @@ func (s DeleteBucketTaggingOutput) GoString() string { } type DeleteBucketWebsiteInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10420,12 +10520,12 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { } type DeleteObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether S3 Object Lock should bypass Governance-mode restrictions + // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions // to process this operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` @@ -10566,7 +10666,7 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { } type DeleteObjectTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10659,13 +10759,13 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO } type DeleteObjectsInput struct { - _ struct{} `type:"structure" payload:"Delete"` + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether you want to delete this object even if it has a Governance-type - // Object Lock in place. You must have sufficient permissions to perform this + // object lock in place. You must have sufficient permissions to perform this // operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` @@ -10795,7 +10895,7 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { } type DeletePublicAccessBlockInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. // @@ -10902,33 +11002,33 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { return s } -// A container for information about the replication destination. +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket. type Destination struct { _ struct{} `type:"structure"` - // A container for information about access control for replicas. - // - // Use this element only in a cross-account scenario where source and destination - // bucket owners are not the same to change replica ownership to the AWS account - // that owns the destination bucket. If you don't add this element to the replication - // configuration, the replicas are owned by same AWS account that owns the source - // object. + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the AWS account that owns the destination bucket. If this is not specified + // in the replication configuration, the replicas are owned by same AWS account + // that owns the source object. AccessControlTranslation *AccessControlTranslation `type:"structure"` - // The account ID of the destination bucket. Currently, Amazon S3 verifies this - // value only if Access Control Translation is enabled. - // - // In a cross-account scenario, if you change replica ownership to the AWS account - // that owns the destination bucket by adding the AccessControlTranslation element, - // this is the account ID of the owner of the destination bucket. + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the AWS account that owns + // the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Cross-Region Replication Additional Configuration: Change Replica Owner + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in + // the Amazon Simple Storage Service Developer Guide. Account *string `type:"string"` // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to // store replicas of the object identified by the rule. // - // If there are multiple rules in your replication configuration, all rules - // must specify the same bucket as the destination. A replication configuration - // can replicate objects to only one destination bucket. + // A replication configuration can replicate objects to only one destination + // bucket. If there are multiple rules in your replication configuration, all + // rules must specify the same destination bucket. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -10937,8 +11037,13 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // The class of storage used to store the object. By default Amazon S3 uses - // storage class of the source object when creating a replica. + // The storage class to use when replicating objects, such as standard or reduced + // redundancy. By default, Amazon S3 uses the storage class of the source object + // to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon Simple Storage Service API Reference. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -11068,13 +11173,13 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption { return s } -// A container for information about the encryption-based configuration for -// replicas. +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // The ID of the AWS KMS key for the AWS Region where the destination bucket - // resides. Amazon S3 uses this key to encrypt the replica object. + // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket. + // Amazon S3 uses this key to encrypt replica objects. ReplicaKmsKeyID *string `type:"string"` } @@ -11207,18 +11312,19 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { return s } -// A container for a key value pair that defines the criteria for the filter -// rule. +// Specifies the Amazon S3 object key name to filter on and whether to filter +// on the suffix or prefix of the key name. type FilterRule struct { _ struct{} `type:"structure"` // The object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. The maximum prefix length is 1,024 characters. - // Overlapping prefixes and suffixes are not supported. For more information, - // see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string" enum:"FilterRuleName"` + // The value that the filter searches for in object key names. Value *string `type:"string"` } @@ -11245,7 +11351,7 @@ func (s *FilterRule) SetValue(v string) *FilterRule { } type GetBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` // Name of the bucket for which the accelerate configuration is retrieved. // @@ -11316,7 +11422,7 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA } type GetBucketAclInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11393,14 +11499,14 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { } type GetBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` // The name of the bucket from which an analytics configuration is retrieved. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -11478,7 +11584,7 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana } type GetBucketCorsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11546,7 +11652,7 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { } type GetBucketEncryptionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` // The name of the bucket from which the server-side encryption configuration // is retrieved. @@ -11597,8 +11703,7 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) { type GetBucketEncryptionOutput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - // Container for server-side encryption configuration rules. Currently S3 supports - // one rule only. + // Specifies the default server-side-encryption configuration. ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` } @@ -11619,7 +11724,7 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv } type GetBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` // The name of the bucket containing the inventory configuration to retrieve. // @@ -11704,7 +11809,7 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv } type GetBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11772,7 +11877,7 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge } type GetBucketLifecycleInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11840,7 +11945,7 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput } type GetBucketLocationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11908,7 +12013,7 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca } type GetBucketLoggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11956,9 +12061,10 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` - // Container for logging information. Presence of this element indicates that - // logging is enabled. Parameters TargetBucket and TargetPrefix are required - // in this case. + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -11979,7 +12085,7 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket } type GetBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` // The name of the bucket containing the metrics configuration to retrieve. // @@ -12064,7 +12170,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics } type GetBucketNotificationConfigurationRequest struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` // Name of the bucket to get the notification configuration for. // @@ -12112,7 +12218,7 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { } type GetBucketPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12181,7 +12287,7 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { } type GetBucketPolicyStatusInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` // The name of the Amazon S3 bucket whose policy status you want to retrieve. // @@ -12252,7 +12358,7 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke } type GetBucketReplicationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12322,7 +12428,7 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC } type GetBucketRequestPaymentInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12391,7 +12497,7 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym } type GetBucketTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12460,7 +12566,7 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { } type GetBucketVersioningInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12540,7 +12646,7 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp } type GetBucketWebsiteInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12592,6 +12698,8 @@ type GetBucketWebsiteOutput struct { IndexDocument *IndexDocument `type:"structure"` + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` @@ -12632,7 +12740,7 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb } type GetObjectAclInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12755,7 +12863,7 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { } type GetObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12820,7 +12928,7 @@ type GetObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -12992,7 +13100,7 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { } type GetObjectLegalHoldInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` // The bucket containing the object whose Legal Hold status you want to retrieve. // @@ -13101,9 +13209,9 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje } type GetObjectLockConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` - // The bucket whose Object Lock configuration you want to retrieve. + // The bucket whose object lock configuration you want to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13151,7 +13259,7 @@ func (s *GetObjectLockConfigurationInput) getBucket() (v string) { type GetObjectLockConfigurationOutput struct { _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` - // The specified bucket's Object Lock configuration. + // The specified bucket's object lock configuration. ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` } @@ -13235,10 +13343,10 @@ type GetObjectOutput struct { // returned if you have permission to view an object's legal hold status. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The Object Lock mode currently in place for this object. + // The object lock mode currently in place for this object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when this object's Object Lock will expire. + // The date and time when this object's object lock will expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. @@ -13483,7 +13591,7 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput } type GetObjectRetentionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` // The bucket containing the object whose retention settings you want to retrieve. // @@ -13592,7 +13700,7 @@ func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObje } type GetObjectTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13692,7 +13800,7 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput } type GetObjectTorrentInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13797,7 +13905,7 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu } type GetPublicAccessBlockInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you // want to retrieve. @@ -14028,7 +14136,7 @@ func (s *Grantee) SetURI(v string) *Grantee { } type HeadBucketInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14088,7 +14196,7 @@ func (s HeadBucketOutput) GoString() string { } type HeadObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14136,7 +14244,7 @@ type HeadObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -14328,10 +14436,10 @@ type HeadObjectOutput struct { // The Legal Hold status for the specified object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The Object Lock mode currently in place for this object. + // The object lock mode currently in place for this object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when this object's Object Lock will expire. + // The date and time when this object's object lock expires. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. @@ -14680,6 +14788,9 @@ func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { return s } +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon Simple Storage Service API Reference. type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -14697,12 +14808,16 @@ type InventoryConfiguration struct { // Id is a required field Id *string `type:"string" required:"true"` - // Specifies which object version(s) to included in the inventory results. + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. // // IncludedObjectVersions is a required field IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` - // Specifies whether the inventory is enabled or disabled. + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. // // IsEnabled is a required field IsEnabled *bool `type:"boolean" required:"true"` @@ -15145,11 +15260,15 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { type LambdaFunctionConfiguration struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For + // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // A container for object key name filtering rules. For information about key - // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -15157,8 +15276,8 @@ type LambdaFunctionConfiguration struct { // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` - // The Amazon Resource Name (ARN) of the Lambda cloud function that Amazon S3 - // can invoke when it detects events of the specified type. + // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 + // invokes when the specified event type occurs. // // LambdaFunctionArn is a required field LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` @@ -15309,8 +15428,11 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp type LifecycleRule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` Expiration *LifecycleExpiration `type:"structure"` @@ -15549,7 +15671,7 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { } type ListBucketAnalyticsConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` // The name of the bucket from which analytics configurations are retrieved. // @@ -15661,7 +15783,7 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketInventoryConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` // The name of the bucket containing the inventory configurations to retrieve. // @@ -15775,7 +15897,7 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketMetricsConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` // The name of the bucket containing the metrics configurations to retrieve. // @@ -15935,7 +16057,7 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { } type ListMultipartUploadsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16179,7 +16301,7 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti } type ListObjectVersionsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16412,7 +16534,7 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe } type ListObjectsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16624,7 +16746,7 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { } type ListObjectsV2Input struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` // Name of the bucket to list. // @@ -16885,7 +17007,7 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { } type ListPartsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListPartsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -17267,9 +17389,10 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { return s } -// Container for logging information. Presence of this element indicates that -// logging is enabled. Parameters TargetBucket and TargetPrefix are required -// in this case. +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon Simple Storage Service API Reference. type LoggingEnabled struct { _ struct{} `type:"structure"` @@ -17285,8 +17408,9 @@ type LoggingEnabled struct { TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - // This element lets you specify a prefix for the keys that the log files will - // be stored under. + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. // // TargetPrefix is a required field TargetPrefix *string `type:"string" required:"true"` @@ -17429,6 +17553,13 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { return s } +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PUT Bucket metrics +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// in the Amazon Simple Storage Service API Reference. type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -17624,7 +17755,7 @@ type NoncurrentVersionExpiration struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -17646,11 +17777,11 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or -// DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, +// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning // is suspended), you can set this action to request that Amazon S3 transition // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, -// GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's +// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's // lifetime. type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` @@ -17693,10 +17824,16 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi type NotificationConfiguration struct { _ struct{} `type:"structure"` + // Describes the AWS Lambda functions to invoke and the events for which to + // invoke them. LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + // The topic to which notifications are sent and the events for which notifications + // are generated. TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } @@ -17806,8 +17943,8 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf return s } -// A container for object key name filtering rules. For information about key -// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -17945,14 +18082,14 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { return s } -// The container element for Object Lock configuration parameters. +// The container element for object lock configuration parameters. type ObjectLockConfiguration struct { _ struct{} `type:"structure"` - // Indicates whether this bucket has an Object Lock configuration enabled. + // Indicates whether this bucket has an object lock configuration enabled. ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - // The Object Lock rule in place for the specified object. + // The object lock rule in place for the specified object. Rule *ObjectLockRule `type:"structure"` } @@ -18009,7 +18146,7 @@ type ObjectLockRetention struct { // Indicates the Retention mode for the specified object. Mode *string `type:"string" enum:"ObjectLockRetentionMode"` - // The date on which this Object Lock Retention will expire. + // The date on which this object lock retention expires. RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` } @@ -18035,7 +18172,7 @@ func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetenti return s } -// The container element for an Object Lock rule. +// The container element for an object lock rule. type ObjectLockRule struct { _ struct{} `type:"structure"` @@ -18418,6 +18555,7 @@ func (s *ProgressEvent) UnmarshalEvent( return nil } +// Specifies the Block Public Access configuration for an Amazon S3 bucket. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` @@ -18494,7 +18632,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi } type PutBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` // Specifies the Accelerate Configuration you want to set for the bucket. // @@ -18570,11 +18708,12 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string { } type PutBucketAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field @@ -18698,7 +18837,7 @@ func (s PutBucketAclOutput) GoString() string { } type PutBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` // The configuration and any analyses for the analytics filter. // @@ -18710,7 +18849,7 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -18793,11 +18932,16 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { } type PutBucketCorsInput struct { - _ struct{} `type:"structure" payload:"CORSConfiguration"` + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // Simple Storage Service Developer Guide. + // // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -18870,16 +19014,18 @@ func (s PutBucketCorsOutput) GoString() string { } type PutBucketEncryptionInput struct { - _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` - // The name of the bucket for which the server-side encryption configuration - // is set. + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information + // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Container for server-side encryption configuration rules. Currently S3 supports - // one rule only. + // Specifies the default server-side-encryption configuration. // // ServerSideEncryptionConfiguration is a required field ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -18953,7 +19099,7 @@ func (s PutBucketEncryptionOutput) GoString() string { } type PutBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` // The name of the bucket where the inventory configuration will be stored. // @@ -19048,11 +19194,14 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { } type PutBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. + // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) + // in the Amazon Simple Storage Service Developer Guide. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19121,7 +19270,7 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string { } type PutBucketLifecycleInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19194,7 +19343,7 @@ func (s PutBucketLifecycleOutput) GoString() string { } type PutBucketLoggingInput struct { - _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19271,7 +19420,7 @@ func (s PutBucketLoggingOutput) GoString() string { } type PutBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` // The name of the bucket for which the metrics configuration is set. // @@ -19366,7 +19515,7 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { } type PutBucketNotificationConfigurationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19446,7 +19595,7 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { } type PutBucketNotificationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19518,7 +19667,7 @@ func (s PutBucketNotificationOutput) GoString() string { } type PutBucketPolicyInput struct { - _ struct{} `type:"structure" payload:"Policy"` + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19602,7 +19751,7 @@ func (s PutBucketPolicyOutput) GoString() string { } type PutBucketReplicationInput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19612,6 +19761,9 @@ type PutBucketReplicationInput struct { // // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // A token that allows Amazon S3 object lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } // String returns the string representation @@ -19667,6 +19819,12 @@ func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationCo return s } +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -19682,7 +19840,7 @@ func (s PutBucketReplicationOutput) GoString() string { } type PutBucketRequestPaymentInput struct { - _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19759,7 +19917,7 @@ func (s PutBucketRequestPaymentOutput) GoString() string { } type PutBucketTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19836,7 +19994,7 @@ func (s PutBucketTaggingOutput) GoString() string { } type PutBucketVersioningInput struct { - _ struct{} `type:"structure" payload:"VersioningConfiguration"` + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19845,6 +20003,10 @@ type PutBucketVersioningInput struct { // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + // Describes the versioning state of an Amazon S3 bucket. For more information, + // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) + // in the Amazon Simple Storage Service API Reference. + // // VersioningConfiguration is a required field VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19918,11 +20080,13 @@ func (s PutBucketVersioningOutput) GoString() string { } type PutBucketWebsiteInput struct { - _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies website configuration parameters for an Amazon S3 bucket. + // // WebsiteConfiguration is a required field WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19995,11 +20159,12 @@ func (s PutBucketWebsiteOutput) GoString() string { } type PutObjectAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field @@ -20169,7 +20334,7 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { } type PutObjectInput struct { - _ struct{} `type:"structure" payload:"Body"` + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` @@ -20201,7 +20366,8 @@ type PutObjectInput struct { ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI + // auto-populated when using the command from the CLI. This parameted is required + // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // A standard MIME type describing the format of the object data. @@ -20233,10 +20399,10 @@ type PutObjectInput struct { // The Legal Hold status that you want to apply to the specified object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The Object Lock mode that you want to apply to this object. + // The object lock mode that you want to apply to this object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when you want this object's Object Lock to expire. + // The date and time when you want this object's object lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the @@ -20253,13 +20419,18 @@ type PutObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported @@ -20473,6 +20644,12 @@ func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { s.SSEKMSKeyId = &v @@ -20504,7 +20681,7 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { } type PutObjectLegalHoldInput struct { - _ struct{} `type:"structure" payload:"LegalHold"` + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` // The bucket containing the object that you want to place a Legal Hold on. // @@ -20624,14 +20801,14 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo } type PutObjectLockConfigurationInput struct { - _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` - // The bucket whose Object Lock configuration you want to create or replace. + // The bucket whose object lock configuration you want to create or replace. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The Object Lock configuration that you want to apply to the specified bucket. + // The object lock configuration that you want to apply to the specified bucket. ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that she or he will be charged for the @@ -20640,7 +20817,7 @@ type PutObjectLockConfigurationInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // A token to allow Object Lock to be enabled for an existing bucket. + // A token to allow Amazon S3 object lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -20749,6 +20926,11 @@ type PutObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` @@ -20801,6 +20983,12 @@ func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { s.SSEKMSKeyId = &v @@ -20820,7 +21008,7 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { } type PutObjectRetentionInput struct { - _ struct{} `type:"structure" payload:"Retention"` + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` // The bucket that contains the object you want to apply this Object Retention // configuration to. @@ -20951,7 +21139,7 @@ func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetenti } type PutObjectTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -21059,7 +21247,7 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput } type PutPublicAccessBlockInput struct { - _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you // want to set. @@ -21139,17 +21327,16 @@ func (s PutPublicAccessBlockOutput) GoString() string { return s.String() } -// A container for specifying the configuration for publication of messages -// to an Amazon Simple Queue Service (Amazon SQS) queue.when Amazon S3 detects -// specified events. +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. type QueueConfiguration struct { _ struct{} `type:"structure"` // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // A container for object key name filtering rules. For information about key - // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -21158,7 +21345,7 @@ type QueueConfiguration struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // will publish a message when it detects events of the specified type. + // publishes a message when it detects events of the specified type. // // QueueArn is a required field QueueArn *string `locationName:"Queue" type:"string" required:"true"` @@ -21304,6 +21491,8 @@ func (s *RecordsEvent) UnmarshalEvent( return nil } +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. type Redirect struct { _ struct{} `type:"structure"` @@ -21314,8 +21503,8 @@ type Redirect struct { // siblings is present. HttpRedirectCode *string `type:"string"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. Protocol *string `type:"string" enum:"Protocol"` // The object key prefix to use in the redirect request. For example, to redirect @@ -21327,7 +21516,7 @@ type Redirect struct { ReplaceKeyPrefixWith *string `type:"string"` // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can + // request to error.html. Not required if one of the siblings is present. Can // be present only if ReplaceKeyPrefixWith is not provided. ReplaceKeyWith *string `type:"string"` } @@ -21372,16 +21561,18 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { return s } +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. type RedirectAllRequestsTo struct { _ struct{} `type:"structure"` - // Name of the host where requests will be redirected. + // Name of the host where requests are redirected. // // HostName is a required field HostName *string `type:"string" required:"true"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. Protocol *string `type:"string" enum:"Protocol"` } @@ -21426,7 +21617,9 @@ type ReplicationConfiguration struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that Amazon S3 can assume when replicating the objects. + // (IAM) role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html) + // in the Amazon Simple Storage Service Developer Guide. // // Role is a required field Role *string `type:"string" required:"true"` @@ -21486,7 +21679,7 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo return s } -// A container for information about a specific replication rule. +// Specifies which Amazon S3 objects to replicate and where to store the replicas. type ReplicationRule struct { _ struct{} `type:"structure"` @@ -21506,7 +21699,8 @@ type ReplicationRule struct { ID *string `type:"string"` // An object keyname prefix that identifies the object or objects to which the - // rule applies. The maximum prefix length is 1,024 characters. + // rule applies. The maximum prefix length is 1,024 characters. To include all + // objects in a bucket, specify an empty string. // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -21522,7 +21716,7 @@ type ReplicationRule struct { // * Same object qualify tag based filter criteria specified in multiple // rules // - // For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) + // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) // in the Amazon S3 Developer Guide. Priority *int64 `type:"integer"` @@ -21531,12 +21725,9 @@ type ReplicationRule struct { // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using // an AWS KMS-Managed Key (SSE-KMS). - // - // If you want Amazon S3 to replicate objects created with server-side encryption - // using AWS KMS-Managed Keys. SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` - // If status isn't enabled, the rule is ignored. + // Specifies whether the rule is enabled. // // Status is a required field Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` @@ -21818,7 +22009,7 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { } type RestoreObjectInput struct { - _ struct{} `type:"structure" payload:"RestoreRequest"` + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -22051,6 +22242,7 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { return s } +// Specifies the redirect behavior and when a redirect is applied. type RoutingRule struct { _ struct{} `type:"structure"` @@ -22103,16 +22295,22 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { return s } +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. type Rule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` Expiration *LifecycleExpiration `type:"structure"` - // Unique identifier for the rule. The value cannot be longer than 255 characters. + // Unique identifier for the rule. The value can't be longer than 255 characters. ID *string `type:"string"` // Specifies when noncurrent object versions expire. Upon expiration, Amazon @@ -22123,25 +22321,27 @@ type Rule struct { NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or - // DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, + // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning // is suspended), you can set this action to request that Amazon S3 transition // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, - // GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's + // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's // lifetime. NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - // Prefix identifying one or more objects to which the rule applies. + // Object key prefix that identifies one or more objects to which this rule + // applies. // // Prefix is a required field Prefix *string `type:"string" required:"true"` - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. // // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Specifies when an object transitions to a specified storage class. Transition *Transition `type:"structure"` } @@ -22274,6 +22474,41 @@ func (s SSES3) GoString() string { return s.String() } +type ScanRange struct { + _ struct{} `type:"structure"` + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. + Start *int64 `type:"long"` +} + +// String returns the string representation +func (s ScanRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanRange) GoString() string { + return s.String() +} + +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s +} + // SelectObjectContentEventStream provides handling of EventStreams for // the SelectObjectContent API. // @@ -22313,6 +22548,8 @@ type SelectObjectContentEventStream struct { // may result in resource leaks. func (es *SelectObjectContentEventStream) Close() (err error) { es.Reader.Close() + es.StreamCloser.Close() + return es.Err() } @@ -22322,8 +22559,6 @@ func (es *SelectObjectContentEventStream) Err() error { if err := es.Reader.Err(); err != nil { return err } - es.StreamCloser.Close() - return nil } @@ -22537,17 +22772,23 @@ type SelectObjectContentInput struct { // Specifies if periodic request progress information should be enabled. RequestProgress *RequestProgress `type:"structure"` - // The SSE Algorithm used to encrypt the object. For more information, see - // Server-Side Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The SSE Algorithm used to encrypt the object. For more information, see Server-Side + // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // The SSE Customer Key. For more information, see Server-Side Encryption (Using // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // The SSE Customer Key MD5. For more information, see Server-Side Encryption // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + ScanRange *ScanRange `type:"structure"` } // String returns the string representation @@ -22668,6 +22909,12 @@ func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectC return s } +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + type SelectObjectContentOutput struct { _ struct{} `type:"structure" payload:"Payload"` @@ -22792,13 +23039,15 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec } // Describes the default server-side encryption to apply to new objects in the -// bucket. If Put Object request does not specify any server-side encryption, -// this default encryption will be applied. +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. For more information, see PUT Bucket +// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon Simple Storage Service API Reference. type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` // KMS master key ID to use for the default encryption. This parameter is allowed - // if SSEAlgorithm is aws:kms. + // if and only if SSEAlgorithm is set to aws:kms. KMSMasterKeyID *string `type:"string" sensitive:"true"` // Server-side encryption algorithm to use for the default encryption. @@ -22842,8 +23091,7 @@ func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEnc return s } -// Container for server-side encryption configuration rules. Currently S3 supports -// one rule only. +// Specifies the default server-side-encryption configuration. type ServerSideEncryptionConfiguration struct { _ struct{} `type:"structure"` @@ -22893,13 +23141,12 @@ func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRu return s } -// Container for information about a particular server-side encryption configuration -// rule. +// Specifies the default server-side encryption configuration. type ServerSideEncryptionRule struct { _ struct{} `type:"structure"` - // Describes the default server-side encryption to apply to new objects in the - // bucket. If Put Object request does not specify any server-side encryption, + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, // this default encryption will be applied. ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` } @@ -22935,13 +23182,17 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv return s } -// A container for filters that define which source objects should be replicated. +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// an AWS KMS-Managed Key (SSE-KMS). type SourceSelectionCriteria struct { _ struct{} `type:"structure"` - // A container for filter information for the selection of S3 objects encrypted - // with AWS KMS. If you include SourceSelectionCriteria in the replication configuration, - // this element is required. + // A container for filter information for the selection of Amazon S3 objects + // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication + // configuration, this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` } @@ -22981,8 +23232,8 @@ func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedOb type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` - // If the status is not Enabled, replication for S3 objects encrypted with AWS - // KMS is disabled. + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using an AWS KMS-managed key. // // Status is a required field Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` @@ -23098,11 +23349,14 @@ func (s *StatsEvent) UnmarshalEvent( return nil } +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. type StorageClassAnalysis struct { _ struct{} `type:"structure"` - // A container used to describe how data related to the storage class analysis - // should be exported. + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. DataExport *StorageClassAnalysisDataExport `type:"structure"` } @@ -23342,16 +23596,20 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { } // A container for specifying the configuration for publication of messages -// to an Amazon Simple Notification Service (Amazon SNS) topic.when Amazon S3 +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 // detects specified events. type TopicConfiguration struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // A container for object key name filtering rules. For information about key - // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -23360,7 +23618,7 @@ type TopicConfiguration struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 - // will publish a message when it detects events of the specified type. + // publishes a message when it detects events of the specified type. // // TopicArn is a required field TopicArn *string `locationName:"Topic" type:"string" required:"true"` @@ -23469,18 +23727,19 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } +// Specifies when an object transitions to a specified storage class. type Transition struct { _ struct{} `type:"structure"` - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. Days *int64 `type:"integer"` - // The class of storage used to store the object. + // The storage class to which you want the object to transition. StorageClass *string `type:"string" enum:"TransitionStorageClass"` } @@ -23513,7 +23772,7 @@ func (s *Transition) SetStorageClass(v string) *Transition { } type UploadPartCopyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -23550,7 +23809,7 @@ type UploadPartCopyInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -23581,7 +23840,7 @@ type UploadPartCopyInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -23843,7 +24102,7 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy } type UploadPartInput struct { - _ struct{} `type:"structure" payload:"Body"` + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` // Object data. Body io.ReadSeeker `type:"blob"` @@ -23857,7 +24116,9 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameted is required + // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // Object key for which the multipart upload was initiated. @@ -23886,7 +24147,7 @@ type UploadPartInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -24092,6 +24353,9 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon Simple Storage Service API Reference. type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -24126,15 +24390,22 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } +// Specifies website configuration parameters for an Amazon S3 bucket. type WebsiteConfiguration struct { _ struct{} `type:"structure"` + // The name of the error document for the website. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website. IndexDocument *IndexDocument `type:"structure"` + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -24443,6 +24714,9 @@ const ( // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + + // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value + InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go index bc68a46acfa..9ba8a788720 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -80,7 +80,8 @@ func buildGetBucketLocation(r *request.Request) { out := r.Data.(*GetBucketLocationOutput) b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed reading response body", err) + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) return } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 95f2456363e..23d386b16c8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -17,7 +17,8 @@ func defaultInitClientFn(c *client.Client) { // Require SSL when using SSE keys c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeys) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go index 39b912c260b..4b65f71531a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -63,6 +63,20 @@ // See the s3manager package's Downloader type documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader // +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// // Get Bucket Region // // GetBucketRegion will attempt to get the region for a bucket using a region diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index d17dcc9dadc..07e1297371b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -46,11 +46,11 @@ const ( // svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { svc := &S3{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2006-03-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 8010c4fa196..b71c835deef 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -3,6 +3,7 @@ package s3 import ( "crypto/md5" "encoding/base64" + "net/http" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -30,25 +31,54 @@ func validateSSERequiresSSL(r *request.Request) { } } -func computeSSEKeys(r *request.Request) { - headers := []string{ - "x-amz-server-side-encryption-customer-key", - "x-amz-copy-source-server-side-encryption-customer-key", +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() } - for _, h := range headers { - md5h := h + "-md5" - if key := r.HTTPRequest.Header.Get(h); key != "" { - // Base64-encode the value - b64v := base64.StdEncoding.EncodeToString([]byte(key)) - r.HTTPRequest.Header.Set(h, b64v) - - // Add MD5 if it wasn't computed - if r.HTTPRequest.Header.Get(md5h) == "" { - sum := md5.Sum([]byte(key)) - b64sum := base64.StdEncoding.EncodeToString(sum[:]) - r.HTTPRequest.Header.Set(md5h, b64sum) - } + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return } + + // In backwards compatiable, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) } } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index fde3050f95b..f6a69aed11b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -14,7 +14,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "unable to read response body", err), + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -31,7 +31,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { unmarshalError(r) if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == "SerializationError" { + if err.Code() == request.ErrCodeSerialization { r.Error = nil return } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index 1db7e133baf..5b63fac72ff 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) type xmlErrorResponse struct { @@ -42,29 +43,34 @@ func unmarshalError(r *request.Request) { return } - var errCode, errMsg string - // Attempt to parse error from body if it is known - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - errCode = "SerializationError" - errMsg = "failed to decode S3 XML error response" - } else { - errCode = resp.Code - errMsg = resp.Message + var errResp xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + if err == io.EOF { + // Only capture the error if an unmarshal error occurs that is not EOF, + // because S3 might send an error without a error message which causes + // the XML unmarshal to fail with EOF. err = nil } + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } // Fallback to status code converted to message if still no error code - if len(errCode) == 0 { + if len(errResp.Code) == 0 { statusText := http.StatusText(r.HTTPResponse.StatusCode) - errCode = strings.Replace(statusText, " ", "", -1) - errMsg = statusText + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText } r.Error = awserr.NewRequestFailure( - awserr.New(errCode, errMsg, err), + awserr.New(errResp.Code, errResp.Message, err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3control/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3control/api.go index 2d13335b45d..9ec8ca04541 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3control/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3control/api.go @@ -443,7 +443,7 @@ func (c *S3Control) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, o // // Example iterating over at most 3 pages of a ListJobs operation. // pageNum := 0 // err := client.ListJobsPages(params, -// func(page *ListJobsOutput, lastPage bool) bool { +// func(page *s3control.ListJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -475,10 +475,12 @@ func (c *S3Control) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -931,7 +933,7 @@ func (s *CreateJobOutput) SetJobId(v string) *CreateJobOutput { } type DeletePublicAccessBlockInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` // The account ID for the AWS account whose block public access configuration // you want to delete. @@ -993,7 +995,7 @@ func (s DeletePublicAccessBlockOutput) GoString() string { } type DescribeJobInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DescribeJobRequest" type:"structure"` // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` @@ -1079,7 +1081,7 @@ func (s *DescribeJobOutput) SetJob(v *JobDescriptor) *DescribeJobOutput { } type GetPublicAccessBlockInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` @@ -1896,7 +1898,7 @@ func (s *LambdaInvokeOperation) SetFunctionArn(v string) *LambdaInvokeOperation } type ListJobsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListJobsRequest" type:"structure"` // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` @@ -2059,7 +2061,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi } type PutPublicAccessBlockInput struct { - _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` @@ -2874,7 +2876,7 @@ func (s *S3Tag) SetValue(v string) *S3Tag { } type UpdateJobPriorityInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"UpdateJobPriorityRequest" type:"structure"` // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` @@ -2986,7 +2988,7 @@ func (s *UpdateJobPriorityOutput) SetPriority(v int64) *UpdateJobPriorityOutput } type UpdateJobStatusInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"UpdateJobStatusRequest" type:"structure"` // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go index 377c9d55d55..827741ce235 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3Control { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "s3" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3Control { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3Control { svc := &S3Control{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-08-20", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go index 82e34d591ac..3a9ea70adc4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go @@ -59,7 +59,8 @@ func (c *SageMaker) AddTagsRequest(input *AddTagsInput) (req *request.Request, o // // Adds or overwrites one or more tags for the specified Amazon SageMaker resource. // You can add tags to notebook instances, training jobs, hyperparameter tuning -// jobs, models, endpoint configurations, and endpoints. +// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +// and endpoints. // // Each tag consists of a key and an optional value. Tag keys must be unique // per resource. For more information about tags, see For more information, @@ -227,7 +228,7 @@ func (c *SageMaker) CreateCodeRepositoryRequest(input *CreateCodeRepositoryInput // more than one notebook instance, and it persists independently from the lifecycle // of any notebook instances it is associated with. // -// The repository can be hosted either in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) +// The repository can be hosted either in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -315,7 +316,7 @@ func (c *SageMaker) CreateCompilationJobRequest(input *CreateCompilationJobInput // // * A name for the compilation job // -// * Information about the input model artifacts +// * Information about the input model artifacts // // * The output location for the compiled model and the device (target) that // the model runs on @@ -419,6 +420,10 @@ func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *requ // // Use this API only for hosting models using Amazon SageMaker hosting services. // +// You must not delete an EndpointConfig in use by an endpoint that is live +// or while the UpdateEndpoint or CreateEndpoint operations are being performed +// on the endpoint. To update an endpoint, you must create a new EndpointConfig. +// // The endpoint name must be unique within an AWS Region in your AWS account. // // When it receives the request, Amazon SageMaker creates the endpoint, launches @@ -438,7 +443,7 @@ func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *requ // model artifacts from the S3 path you provided. AWS STS is activated in your // IAM user account by default. If you previously deactivated AWS STS for a // region, you need to reactivate AWS STS for that region. For more information, -// see Activating and Deactivating AWS STS i an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// see Activating and Deactivating AWS STS i an AWS Region (IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the AWS Identity and Access Management User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -724,11 +729,11 @@ func (c *SageMaker) CreateLabelingJobRequest(input *CreateLabelingJobInput) (req // that need to be labeled by a human. Automated data labeling uses active learning // to determine if a data object can be labeled by machine or if it needs to // be sent to a human worker. For more information, see Using Automated Data -// Labeling (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-automated-labeling.html). +// Labeling (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-automated-labeling.html). // // The data objects to be labeled are contained in an Amazon S3 bucket. You // create a manifest file that describes the location of each object. For more -// information, see Using Input and Output Data (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-data.html). +// information, see Using Input and Output Data (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-data.html). // // The output can be used as the manifest file for another labeling job or as // training data for your machine learning models. @@ -1031,7 +1036,8 @@ func (c *SageMaker) CreateNotebookInstanceRequest(input *CreateNotebookInstanceI // groups allow it. // // After creating the notebook instance, Amazon SageMaker returns its Amazon -// Resource Name (ARN). +// Resource Name (ARN). You can't change the name of a notebook instance after +// you create it. // // After Amazon SageMaker creates the notebook instance, you can connect to // the Jupyter server and work in Jupyter notebooks. For example, you can write @@ -1221,15 +1227,18 @@ func (c *SageMaker) CreatePresignedNotebookInstanceUrlRequest(input *CreatePresi // home page from the notebook instance. The console uses this API to get the // URL and show the page. // -// You can restrict access to this API and to the URL that it returns to a list -// of IP addresses that you specify. To restrict access, attach an IAM policy -// that denies access to this API unless the call comes from an IP address in -// the specified list to every AWS Identity and Access Management user, group, -// or role used to access the notebook instance. Use the NotIpAddress condition +// IAM authorization policies for this API are also enforced for every HTTP +// request and WebSocket frame that attempts to connect to the notebook instance.For +// example, you can restrict access to this API and to the URL that it returns +// to a list of IP addresses that you specify. Use the NotIpAddress condition // operator and the aws:SourceIP condition context key to specify the list of // IP addresses that you want to have access to the notebook instance. For more // information, see Limit Access to a Notebook Instance by IP Address (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-ip-filter.html). // +// The URL that you get from a call to is valid only for 5 minutes. If you try +// to use the URL after the 5-minute limit expires, you are directed to the +// AWS console sign-in page. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1314,30 +1323,37 @@ func (c *SageMaker) CreateTrainingJobRequest(input *CreateTrainingJobInput) (req // // * AlgorithmSpecification - Identifies the training algorithm to use. // -// * HyperParameters - Specify these algorithm-specific parameters to influence -// the quality of the final model. For a list of hyperparameters for each -// training algorithm provided by Amazon SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). +// * HyperParameters - Specify these algorithm-specific parameters to enable +// the estimation of model parameters during training. Hyperparameters can +// be tuned to optimize this learning process. For a list of hyperparameters +// for each training algorithm provided by Amazon SageMaker, see Algorithms +// (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). // +// * InputDataConfig - Describes the training dataset and the Amazon S3, +// EFS, or FSx location where it is stored. // -// * InputDataConfig - Describes the training dataset and the Amazon S3 location -// where it is stored. -// -// * OutputDataConfig - Identifies the Amazon S3 location where you want -// Amazon SageMaker to save the results of model training. +// * OutputDataConfig - Identifies the Amazon S3 bucket where you want Amazon +// SageMaker to save the results of model training. // // * ResourceConfig - Identifies the resources, ML compute instances, and // ML storage volumes to deploy for model training. In distributed training, // you specify more than one instance. // +// * EnableManagedSpotTraining - Optimize the cost of training machine learning +// models by up to 80% by using Amazon EC2 Spot instances. For more information, +// see Managed Spot Training (https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html). +// // * RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes // to perform tasks on your behalf during model training. You must grant // this role the necessary permissions so that Amazon SageMaker can successfully // complete model training. // -// * StoppingCondition - Sets a duration for training. Use this parameter -// to cap model training costs. +// * StoppingCondition - To help cap training costs, use MaxRuntimeInSeconds +// to set a time limit for training. Use MaxWaitTimeInSeconds to specify +// how long you are willing to to wait for a managed spot training job to +// complete. // -// For more information about Amazon SageMaker, see How It Works (https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html) +// For more information about Amazon SageMaker, see How It Works (https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3819,7 +3835,7 @@ func (c *SageMaker) ListCompilationJobsWithContext(ctx aws.Context, input *ListC // // Example iterating over at most 3 pages of a ListCompilationJobs operation. // pageNum := 0 // err := client.ListCompilationJobsPages(params, -// func(page *ListCompilationJobsOutput, lastPage bool) bool { +// func(page *sagemaker.ListCompilationJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3851,10 +3867,12 @@ func (c *SageMaker) ListCompilationJobsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListCompilationJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListCompilationJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3949,7 +3967,7 @@ func (c *SageMaker) ListEndpointConfigsWithContext(ctx aws.Context, input *ListE // // Example iterating over at most 3 pages of a ListEndpointConfigs operation. // pageNum := 0 // err := client.ListEndpointConfigsPages(params, -// func(page *ListEndpointConfigsOutput, lastPage bool) bool { +// func(page *sagemaker.ListEndpointConfigsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3981,10 +3999,12 @@ func (c *SageMaker) ListEndpointConfigsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEndpointConfigsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListEndpointConfigsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4079,7 +4099,7 @@ func (c *SageMaker) ListEndpointsWithContext(ctx aws.Context, input *ListEndpoin // // Example iterating over at most 3 pages of a ListEndpoints operation. // pageNum := 0 // err := client.ListEndpointsPages(params, -// func(page *ListEndpointsOutput, lastPage bool) bool { +// func(page *sagemaker.ListEndpointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4111,10 +4131,12 @@ func (c *SageMaker) ListEndpointsPagesWithContext(ctx aws.Context, input *ListEn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEndpointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListEndpointsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4210,7 +4232,7 @@ func (c *SageMaker) ListHyperParameterTuningJobsWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a ListHyperParameterTuningJobs operation. // pageNum := 0 // err := client.ListHyperParameterTuningJobsPages(params, -// func(page *ListHyperParameterTuningJobsOutput, lastPage bool) bool { +// func(page *sagemaker.ListHyperParameterTuningJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4242,10 +4264,12 @@ func (c *SageMaker) ListHyperParameterTuningJobsPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListHyperParameterTuningJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListHyperParameterTuningJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4340,7 +4364,7 @@ func (c *SageMaker) ListLabelingJobsWithContext(ctx aws.Context, input *ListLabe // // Example iterating over at most 3 pages of a ListLabelingJobs operation. // pageNum := 0 // err := client.ListLabelingJobsPages(params, -// func(page *ListLabelingJobsOutput, lastPage bool) bool { +// func(page *sagemaker.ListLabelingJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4372,10 +4396,12 @@ func (c *SageMaker) ListLabelingJobsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListLabelingJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListLabelingJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4475,7 +4501,7 @@ func (c *SageMaker) ListLabelingJobsForWorkteamWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a ListLabelingJobsForWorkteam operation. // pageNum := 0 // err := client.ListLabelingJobsForWorkteamPages(params, -// func(page *ListLabelingJobsForWorkteamOutput, lastPage bool) bool { +// func(page *sagemaker.ListLabelingJobsForWorkteamOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4507,10 +4533,12 @@ func (c *SageMaker) ListLabelingJobsForWorkteamPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListLabelingJobsForWorkteamOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListLabelingJobsForWorkteamOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4680,7 +4708,7 @@ func (c *SageMaker) ListModelsWithContext(ctx aws.Context, input *ListModelsInpu // // Example iterating over at most 3 pages of a ListModels operation. // pageNum := 0 // err := client.ListModelsPages(params, -// func(page *ListModelsOutput, lastPage bool) bool { +// func(page *sagemaker.ListModelsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4712,10 +4740,12 @@ func (c *SageMaker) ListModelsPagesWithContext(ctx aws.Context, input *ListModel }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListModelsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListModelsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4811,7 +4841,7 @@ func (c *SageMaker) ListNotebookInstanceLifecycleConfigsWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a ListNotebookInstanceLifecycleConfigs operation. // pageNum := 0 // err := client.ListNotebookInstanceLifecycleConfigsPages(params, -// func(page *ListNotebookInstanceLifecycleConfigsOutput, lastPage bool) bool { +// func(page *sagemaker.ListNotebookInstanceLifecycleConfigsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4843,10 +4873,12 @@ func (c *SageMaker) ListNotebookInstanceLifecycleConfigsPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListNotebookInstanceLifecycleConfigsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListNotebookInstanceLifecycleConfigsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4942,7 +4974,7 @@ func (c *SageMaker) ListNotebookInstancesWithContext(ctx aws.Context, input *Lis // // Example iterating over at most 3 pages of a ListNotebookInstances operation. // pageNum := 0 // err := client.ListNotebookInstancesPages(params, -// func(page *ListNotebookInstancesOutput, lastPage bool) bool { +// func(page *sagemaker.ListNotebookInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4974,10 +5006,12 @@ func (c *SageMaker) ListNotebookInstancesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListNotebookInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListNotebookInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5074,7 +5108,7 @@ func (c *SageMaker) ListSubscribedWorkteamsWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListSubscribedWorkteams operation. // pageNum := 0 // err := client.ListSubscribedWorkteamsPages(params, -// func(page *ListSubscribedWorkteamsOutput, lastPage bool) bool { +// func(page *sagemaker.ListSubscribedWorkteamsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5106,10 +5140,12 @@ func (c *SageMaker) ListSubscribedWorkteamsPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSubscribedWorkteamsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSubscribedWorkteamsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5204,7 +5240,7 @@ func (c *SageMaker) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, o // // Example iterating over at most 3 pages of a ListTags operation. // pageNum := 0 // err := client.ListTagsPages(params, -// func(page *ListTagsOutput, lastPage bool) bool { +// func(page *sagemaker.ListTagsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5236,10 +5272,12 @@ func (c *SageMaker) ListTagsPagesWithContext(ctx aws.Context, input *ListTagsInp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5334,7 +5372,7 @@ func (c *SageMaker) ListTrainingJobsWithContext(ctx aws.Context, input *ListTrai // // Example iterating over at most 3 pages of a ListTrainingJobs operation. // pageNum := 0 // err := client.ListTrainingJobsPages(params, -// func(page *ListTrainingJobsOutput, lastPage bool) bool { +// func(page *sagemaker.ListTrainingJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5366,10 +5404,12 @@ func (c *SageMaker) ListTrainingJobsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTrainingJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTrainingJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5470,7 +5510,7 @@ func (c *SageMaker) ListTrainingJobsForHyperParameterTuningJobWithContext(ctx aw // // Example iterating over at most 3 pages of a ListTrainingJobsForHyperParameterTuningJob operation. // pageNum := 0 // err := client.ListTrainingJobsForHyperParameterTuningJobPages(params, -// func(page *ListTrainingJobsForHyperParameterTuningJobOutput, lastPage bool) bool { +// func(page *sagemaker.ListTrainingJobsForHyperParameterTuningJobOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5502,10 +5542,12 @@ func (c *SageMaker) ListTrainingJobsForHyperParameterTuningJobPagesWithContext(c }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTrainingJobsForHyperParameterTuningJobOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTrainingJobsForHyperParameterTuningJobOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5600,7 +5642,7 @@ func (c *SageMaker) ListTransformJobsWithContext(ctx aws.Context, input *ListTra // // Example iterating over at most 3 pages of a ListTransformJobs operation. // pageNum := 0 // err := client.ListTransformJobsPages(params, -// func(page *ListTransformJobsOutput, lastPage bool) bool { +// func(page *sagemaker.ListTransformJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5632,10 +5674,12 @@ func (c *SageMaker) ListTransformJobsPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTransformJobsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTransformJobsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5732,7 +5776,7 @@ func (c *SageMaker) ListWorkteamsWithContext(ctx aws.Context, input *ListWorktea // // Example iterating over at most 3 pages of a ListWorkteams operation. // pageNum := 0 // err := client.ListWorkteamsPages(params, -// func(page *ListWorkteamsOutput, lastPage bool) bool { +// func(page *sagemaker.ListWorkteamsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5764,10 +5808,12 @@ func (c *SageMaker) ListWorkteamsPagesWithContext(ctx aws.Context, input *ListWo }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListWorkteamsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListWorkteamsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5942,7 +5988,7 @@ func (c *SageMaker) SearchWithContext(ctx aws.Context, input *SearchInput, opts // // Example iterating over at most 3 pages of a Search operation. // pageNum := 0 // err := client.SearchPages(params, -// func(page *SearchOutput, lastPage bool) bool { +// func(page *sagemaker.SearchOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5974,10 +6020,12 @@ func (c *SageMaker) SearchPagesWithContext(ctx aws.Context, input *SearchInput, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*SearchOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*SearchOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6370,7 +6418,8 @@ func (c *SageMaker) StopNotebookInstanceRequest(input *StopNotebookInstanceInput // // Terminates the ML compute instance. Before terminating the instance, Amazon // SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves -// the ML storage volume. +// the ML storage volume. Amazon SageMaker stops charging you for the ML compute +// instance when you call StopNotebookInstance. // // To access data on the ML storage volume for a notebook instance that has // been terminated, call the StartNotebookInstance API. StartNotebookInstance @@ -6704,8 +6753,9 @@ func (c *SageMaker) UpdateEndpointRequest(input *UpdateEndpointInput) (req *requ // check the status of an endpoint, use the DescribeEndpoint (https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) // API. // -// You cannot update an endpoint with the current EndpointConfig. To update -// an endpoint, you must create a new EndpointConfig. +// You must not delete an EndpointConfig in use by an endpoint that is live +// or while the UpdateEndpoint or CreateEndpoint operations are being performed +// on the endpoint. To update an endpoint, you must create a new EndpointConfig. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6873,8 +6923,7 @@ func (c *SageMaker) UpdateNotebookInstanceRequest(input *UpdateNotebookInstanceI // // Updates a notebook instance. NotebookInstance updates include upgrading or // downgrading the ML compute instance used for your notebook instance to accommodate -// changes in your workload requirements. You can also update the VPC security -// groups. +// changes in your workload requirements. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7585,61 +7634,66 @@ type AnnotationConsolidationConfig struct { // the following Lambda functions: // // * Bounding box - Finds the most similar boxes from different workers based - // on the Jaccard index of the boxes. - // - // arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox - // - // arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox - // - // arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox - // - // arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox - // - // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox + // on the Jaccard index of the boxes. arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox + // arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox // // * Image classification - Uses a variant of the Expectation Maximization // approach to estimate the true class of an image based on annotations from - // individual workers. - // - // arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass - // - // arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass - // - // arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass - // - // arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass - // - // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass + // individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass + // arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass // // * Semantic segmentation - Treats each pixel in an image as a multi-class // classification and treats pixel annotations from workers as "votes" for - // the correct label. - // - // arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation - // - // arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation - // - // arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation - // - // arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation - // - // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation + // the correct label. arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation + // arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation + // arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation // // * Text classification - Uses a variant of the Expectation Maximization // approach to estimate the true class of text based on annotations from - // individual workers. - // - // arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass - // - // arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass - // - // arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass - // - // arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass - // - // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass - // - // For more information, see Annotation Consolidation (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html). + // individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass + // arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass + // + // * Named entity eecognition - Groups similar selections and calculates + // aggregate boundaries, resolving to most-assigned label. arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition + // + // For more information, see Annotation Consolidation (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html). // // AnnotationConsolidationLambdaArn is a required field AnnotationConsolidationLambdaArn *string `type:"string" required:"true"` @@ -7811,7 +7865,9 @@ type Channel struct { // algorithm requires the RecordIO format. In this case, Amazon SageMaker wraps // each individual S3 object in a RecordIO record. If the input data is already // in RecordIO format, you don't need to set this attribute. For more information, - // see Create a Dataset Using RecordIO (https://mxnet.incubator.apache.org/architecture/note_data_loading.html#data-format) + // see Create a Dataset Using RecordIO (https://mxnet.incubator.apache.org/architecture/note_data_loading.html#data-format). + // + // In File mode, leave this field unset or set it to None. RecordWrapperType *string `type:"string" enum:"RecordWrapper"` // A configuration for a shuffle option for input data in a channel. If you @@ -8019,6 +8075,57 @@ func (s *ChannelSpecification) SetSupportedInputModes(v []*string) *ChannelSpeci return s } +// Contains information about the output location for managed spot training +// checkpoint data. +type CheckpointConfig struct { + _ struct{} `type:"structure"` + + // (Optional) The local directory where checkpoints are written. The default + // directory is /opt/ml/checkpoints/. + LocalPath *string `type:"string"` + + // Identifies the S3 path where you want Amazon SageMaker to store checkpoints. + // For example, s3://bucket-name/key-name-prefix. + // + // S3Uri is a required field + S3Uri *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CheckpointConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckpointConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CheckpointConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CheckpointConfig"} + if s.S3Uri == nil { + invalidParams.Add(request.NewErrParamRequired("S3Uri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLocalPath sets the LocalPath field's value. +func (s *CheckpointConfig) SetLocalPath(v string) *CheckpointConfig { + s.LocalPath = &v + return s +} + +// SetS3Uri sets the S3Uri field's value. +func (s *CheckpointConfig) SetS3Uri(v string) *CheckpointConfig { + s.S3Uri = &v + return s +} + // Specifies summary information about a Git repository. type CodeRepositorySummary struct { _ struct{} `type:"structure"` @@ -8269,7 +8376,18 @@ func (s *CompilationJobSummary) SetLastModifiedTime(v time.Time) *CompilationJob type ContainerDefinition struct { _ struct{} `type:"structure"` - // This parameter is ignored. + // This parameter is ignored for models that contain only a PrimaryContainer. + // + // When a ContainerDefinition is part of an inference pipeline, the value of + // ths parameter uniquely identifies the container for the purposes of logging + // and metrics. For information, see Use Logs and Metrics to Monitor an Inference + // Pipeline (https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipeline-logs-metrics.html). + // If you don't specify a value for this parameter for a ContainerDefinition + // that is part of an inference pipeline, a unique name is automatically assigned + // based on the position of the ContainerDefinition in the pipeline. If you + // specify a value for the ContainerHostName for any ContainerDefinition that + // is part of an inference pipeline, you must specify a value for the ContainerHostName + // parameter of every ContainerDefinition in that pipeline. ContainerHostname *string `type:"string"` // The environment variables to set in the Docker container. Each key and value @@ -8287,18 +8405,24 @@ type ContainerDefinition struct { // The S3 path where the model artifacts, which result from model training, // are stored. This path must point to a single gzip compressed tar archive - // (.tar.gz suffix). + // (.tar.gz suffix). The S3 path is required for Amazon SageMaker built-in algorithms, + // but not if you use your own algorithms. For more information on built-in + // algorithms, see Common Parameters (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). // // If you provide a value for this parameter, Amazon SageMaker uses AWS Security // Token Service to download model artifacts from the S3 path you provide. AWS // STS is activated in your IAM user account by default. If you previously deactivated // AWS STS for a region, you need to reactivate AWS STS for that region. For // more information, see Activating and Deactivating AWS STS in an AWS Region - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the AWS Identity and Access Management User Guide. + // + // If you use a built-in algorithm to create a model, Amazon SageMaker requires + // that you provide a S3 path to the model artifacts in ModelDataUrl. ModelDataUrl *string `type:"string"` - // The name of the model package to use to create the model. + // The name or Amazon Resource Name (ARN) of the model package to use to create + // the model. ModelPackageName *string `min:"1" type:"string"` } @@ -8378,23 +8502,30 @@ type ContinuousParameterRange struct { // The scale that hyperparameter tuning uses to search the hyperparameter range. // For information about choosing a hyperparameter scale, see Hyperparameter - // Range Scaling (http://docs.aws.amazon.com//sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). + // Scaling (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). // One of the following values: // - // AutoAmazon SageMaker hyperparameter tuning chooses the best scale for the - // hyperparameter. + // Auto // - // LinearHyperparameter tuning searches the values in the hyperparameter range - // by using a linear scale. + // Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter. // - // LogarithmicHyperparemeter tuning searches the values in the hyperparameter - // range by using a logarithmic scale. + // Linear + // + // Hyperparameter tuning searches the values in the hyperparameter range by + // using a linear scale. + // + // Logarithmic + // + // Hyperparameter tuning searches the values in the hyperparameter range by + // using a logarithmic scale. // // Logarithmic scaling works only for ranges that have only values greater than // 0. // - // ReverseLogarithmicHyperparemeter tuning searches the values in the hyperparameter - // range by using a reverse logarithmic scale. + // ReverseLogarithmic + // + // Hyperparemeter tuning searches the values in the hyperparameter range by + // using a reverse logarithmic scale. // // Reverse logarithmic scaling works only for ranges that are entirely within // the range 0<=x<1.0. @@ -8777,7 +8908,7 @@ type CreateCompilationJobInput struct { // OutputConfig is a required field OutputConfig *OutputConfig `type:"structure" required:"true"` - // The Amazon Resource Name (ARN) of an IIAMAM role that enables Amazon SageMaker + // The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker // to perform tasks on your behalf. // // During model compilation, Amazon SageMaker needs your permission to: @@ -8797,7 +8928,9 @@ type CreateCompilationJobInput struct { // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` - // The duration allowed for model compilation. + // Specifies a limit to how long a model compilation job can run. When the job + // reaches the time limit, Amazon SageMaker ends the compilation job. Use this + // API to cap model training costs. // // StoppingCondition is a required field StoppingCondition *StoppingCondition `type:"structure" required:"true"` @@ -8930,15 +9063,27 @@ type CreateEndpointConfigInput struct { // The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon // SageMaker uses to encrypt data on the storage volume attached to the ML compute // instance that hosts the endpoint. + // + // Nitro-based instances do not support encryption with AWS KMS. If any of the + // models that you specify in the ProductionVariants parameter use nitro-based + // instances, do not specify a value for the KmsKeyId parameter. If you specify + // a value for KmsKeyId when using any nitro-based instances, the call to CreateEndpointConfig + // fails. + // + // For a list of nitro-based instances, see Nitro-based Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) + // in the Amazon Elastic Compute Cloud User Guide for Linux Instances. + // + // For more information about storage volumes on nitro-based instances, see + // Amazon EBS and NVMe on Linux Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html). KmsKeyId *string `type:"string"` - // An array of ProductionVariant objects, one for each model that you want to + // An list of ProductionVariant objects, one for each model that you want to // host at this endpoint. // // ProductionVariants is a required field ProductionVariants []*ProductionVariant `min:"1" type:"list" required:"true"` - // An array of key-value pairs. For more information, see Using Cost Allocation + // A list of key-value pairs. For more information, see Using Cost Allocation // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` @@ -9164,7 +9309,7 @@ type CreateHyperParameterTuningJobInput struct { // An array of key-value pairs. You can use tags to categorize your AWS resources // in different ways, for example, by purpose, owner, or environment. For more - // information, see AWS Tagging Strategies (https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). + // information, see AWS Tagging Strategies (https://docs.aws.amazon.com/https:/aws.amazon.com/answers/account-management/aws-tagging-strategies/). // // Tags that you specify for the tuning job are also added to all training jobs // that the tuning job launches. @@ -9174,9 +9319,7 @@ type CreateHyperParameterTuningJobInput struct { // jobs that this tuning job launches, including static hyperparameters, input // data configuration, output data configuration, resource configuration, and // stopping condition. - // - // TrainingJobDefinition is a required field - TrainingJobDefinition *HyperParameterTrainingJobDefinition `type:"structure" required:"true"` + TrainingJobDefinition *HyperParameterTrainingJobDefinition `type:"structure"` // Specifies the configuration for starting the hyperparameter tuning job using // one or more previous tuning jobs as a starting point. The results of previous @@ -9219,9 +9362,6 @@ func (s *CreateHyperParameterTuningJobInput) Validate() error { if s.HyperParameterTuningJobName != nil && len(*s.HyperParameterTuningJobName) < 1 { invalidParams.Add(request.NewErrParamMinLen("HyperParameterTuningJobName", 1)) } - if s.TrainingJobDefinition == nil { - invalidParams.Add(request.NewErrParamRequired("TrainingJobDefinition")) - } if s.HyperParameterTuningJobConfig != nil { if err := s.HyperParameterTuningJobConfig.Validate(); err != nil { invalidParams.AddNested("HyperParameterTuningJobConfig", err.(request.ErrInvalidParams)) @@ -9399,7 +9539,7 @@ type CreateLabelingJobInput struct { StoppingConditions *LabelingJobStoppingConditions `type:"structure"` // An array of key/value pairs. For more information, see Using Cost Allocation - // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -9611,9 +9751,10 @@ type CreateModelInput struct { // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` - // A VpcConfig object that specifies the VPC that you want your model to connect - // to. Control access to and from your model container by configuring the VPC. - // VpcConfig is used in hosting services and in batch transform. For more information, + // A VpcConfig (https://docs.aws.amazon.com/sagemaker/latest/dg/API_VpcConfig.html) + // object that specifies the VPC that you want your model to connect to. Control + // access to and from your model container by configuring the VPC. VpcConfig + // is used in hosting services and in batch transform. For more information, // see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) // and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private // Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html). @@ -9890,24 +10031,24 @@ type CreateNotebookInstanceInput struct { // A list of Elastic Inference (EI) instance types to associate with this notebook // instance. Currently, only one instance type can be associated with a notebook // instance. For more information, see Using Elastic Inference in Amazon SageMaker - // (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). AcceleratorTypes []*string `type:"list"` // An array of up to three Git repositories to associate with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // your account, or the URL of Git repositories in AWS CodeCommit (codecommit/latest/userguide/welcome.html) // or in any other Git repository. These repositories are cloned at the same // level as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances - // (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // A Git repository to associate with the notebook instance as its default code // repository. This can be either the name of a Git repository stored as a resource - // in your account, or the URL of a Git repository in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // in your account, or the URL of a Git repository in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` // Sets whether Amazon SageMaker provides internet access to the notebook instance. @@ -9927,10 +10068,10 @@ type CreateNotebookInstanceInput struct { // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"InstanceType"` - // If you provide a AWS KMS key ID, Amazon SageMaker uses it to encrypt data - // at rest on the ML storage volume that is attached to your notebook instance. - // The KMS key you provide must be enabled. For information, see Enabling and - // Disabling Keys (http://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) + // The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon + // SageMaker uses to encrypt data on the storage volume attached to your notebook + // instance. The KMS key you provide must be enabled. For information, see Enabling + // and Disabling Keys (https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) // in the AWS Key Management Service Developer Guide. KmsKeyId *string `type:"string"` @@ -9948,7 +10089,7 @@ type CreateNotebookInstanceInput struct { // SageMaker assumes this role to perform tasks on your behalf. You must grant // this role necessary permissions so Amazon SageMaker can perform these tasks. // The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) - // permissions to assume this role. For more information, see Amazon SageMaker + // permissionsto to assume this role. For more information, see Amazon SageMaker // Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). // // To be able to pass this role to Amazon SageMaker, the caller of this API @@ -10325,6 +10466,10 @@ type CreateTrainingJobInput struct { // AlgorithmSpecification is a required field AlgorithmSpecification *AlgorithmSpecification `type:"structure" required:"true"` + // Contains information about the output location for managed spot training + // checkpoint data. + CheckpointConfig *CheckpointConfig `type:"structure"` + // To encrypt all communications between ML compute instances in distributed // training, choose True. Encryption provides greater security for distributed // training, but training might take longer. How long it takes depends on the @@ -10334,6 +10479,18 @@ type CreateTrainingJobInput struct { // Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/train-encrypt.html). EnableInterContainerTrafficEncryption *bool `type:"boolean"` + // To train models using managed spot training, choose True. Managed spot training + // provides a fully managed and scalable infrastructure for training machine + // learning models. this option is useful when training jobs can be interrupted + // and when there is flexibility when the training job is run. + // + // The complete and intermediate results of jobs are stored in an Amazon S3 + // bucket, and can be used as a starting point to train models incrementally. + // Amazon SageMaker provides metrics and logs in CloudWatch. They can be used + // to see when managed spot training jobs are running, interrupted, resumed, + // or completed. + EnableManagedSpotTraining *bool `type:"boolean"` + // Isolates the training container. No inbound or outbound network calls can // be made, except for calls between peers within a training cluster for distributed // training. If you enable network isolation for training jobs that are configured @@ -10359,16 +10516,19 @@ type CreateTrainingJobInput struct { // // Algorithms can accept input data from one or more channels. For example, // an algorithm might have two channels of input data, training_data and validation_data. - // The configuration for each channel provides the S3 location where the input - // data is stored. It also provides information about the stored data: the MIME - // type, compression method, and whether the data is wrapped in RecordIO format. + // The configuration for each channel provides the S3, EFS, or FSx location + // where the input data is stored. It also provides information about the stored + // data: the MIME type, compression method, and whether the data is wrapped + // in RecordIO format. // // Depending on the input mode that the algorithm supports, Amazon SageMaker // either copies input data files from an S3 bucket to a local directory in - // the Docker container, or makes it available as input streams. + // the Docker container, or makes it available as input streams. For example, + // if you specify an EFS location, input data files will be made available as + // input streams. They do not need to be downloaded. InputDataConfig []*Channel `min:"1" type:"list"` - // Specifies the path to the S3 bucket where you want to store model artifacts. + // Specifies the path to the S3 location where you want to store model artifacts. // Amazon SageMaker creates subfolders for the artifacts. // // OutputDataConfig is a required field @@ -10402,15 +10562,13 @@ type CreateTrainingJobInput struct { // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` - // Sets a duration for training. Use this parameter to cap model training costs. - // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which - // delays job termination for 120 seconds. Algorithms might use this 120-second - // window to save the model artifacts. + // Specifies a limit to how long a model training job can run. When the job + // reaches the time limit, Amazon SageMaker ends the training job. Use this + // API to cap model training costs. // - // When Amazon SageMaker terminates a job because the stopping condition has - // been met, training algorithms provided by Amazon SageMaker save the intermediate - // results of the job. This intermediate data is a valid model artifact. You - // can use it to create a model using the CreateModel API. + // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which + // delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts, so the results of training are not lost. // // StoppingCondition is a required field StoppingCondition *StoppingCondition `type:"structure" required:"true"` @@ -10478,6 +10636,11 @@ func (s *CreateTrainingJobInput) Validate() error { invalidParams.AddNested("AlgorithmSpecification", err.(request.ErrInvalidParams)) } } + if s.CheckpointConfig != nil { + if err := s.CheckpointConfig.Validate(); err != nil { + invalidParams.AddNested("CheckpointConfig", err.(request.ErrInvalidParams)) + } + } if s.InputDataConfig != nil { for i, v := range s.InputDataConfig { if v == nil { @@ -10531,12 +10694,24 @@ func (s *CreateTrainingJobInput) SetAlgorithmSpecification(v *AlgorithmSpecifica return s } +// SetCheckpointConfig sets the CheckpointConfig field's value. +func (s *CreateTrainingJobInput) SetCheckpointConfig(v *CheckpointConfig) *CreateTrainingJobInput { + s.CheckpointConfig = v + return s +} + // SetEnableInterContainerTrafficEncryption sets the EnableInterContainerTrafficEncryption field's value. func (s *CreateTrainingJobInput) SetEnableInterContainerTrafficEncryption(v bool) *CreateTrainingJobInput { s.EnableInterContainerTrafficEncryption = &v return s } +// SetEnableManagedSpotTraining sets the EnableManagedSpotTraining field's value. +func (s *CreateTrainingJobInput) SetEnableManagedSpotTraining(v bool) *CreateTrainingJobInput { + s.EnableManagedSpotTraining = &v + return s +} + // SetEnableNetworkIsolation sets the EnableNetworkIsolation field's value. func (s *CreateTrainingJobInput) SetEnableNetworkIsolation(v bool) *CreateTrainingJobInput { s.EnableNetworkIsolation = &v @@ -10626,25 +10801,39 @@ type CreateTransformJobInput struct { _ struct{} `type:"structure"` // Specifies the number of records to include in a mini-batch for an HTTP inference - // request. A recordis a single unit of input data that inference can be made on. For example, - // a single line in a CSV file is a record. + // request. A record is a single unit of input data that inference can be made + // on. For example, a single line in a CSV file is a record. // - // To enable the batch strategy, you must set SplitTypeto Line, RecordIO, or TFRecord. + // To enable the batch strategy, you must set SplitType to Line, RecordIO, or + // TFRecord. // // To use only one record when making an HTTP invocation request to a container, - // set BatchStrategyto SingleRecordand SplitTypeto Line. + // set BatchStrategy to SingleRecord and SplitType to Line. // - // To fit as many records in a mini-batch as can fit within the MaxPayloadInMBlimit, set BatchStrategyto MultiRecordand SplitTypeto Line + // To fit as many records in a mini-batch as can fit within the MaxPayloadInMB + // limit, set BatchStrategy to MultiRecord and SplitType to Line. BatchStrategy *string `type:"string" enum:"BatchStrategy"` + // The data structure used to specify the data to be used for inference in a + // batch transform job and to associate the data that is relevant to the prediction + // results in the output. The input filter provided allows you to exclude input + // data that is not needed for inference in a batch transform job. The output + // filter provided allows you to include input data relevant to interpreting + // the predictions in the output from the job. For more information, see Associate + // Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). + DataProcessing *DataProcessing `type:"structure"` + // The environment variables to set in the Docker container. We support up to // 16 key and values entries in the map. Environment map[string]*string `type:"map"` // The maximum number of parallel requests that can be sent to each instance - // in a transform job. The default value is 1. To allow Amazon SageMaker to - // determine the appropriate number for MaxConcurrentTransforms, set the value - // to 0. + // in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, + // Amazon SageMaker checks the optional execution-parameters to determine the + // optimal settings for your chosen algorithm. If the execution-parameters endpoint + // is not enabled, the default value is 1. For more information on execution-parameters, + // see How Containers Serve Requests (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-batch-code.html#your-algorithms-batch-code-how-containe-serves-requests). + // For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms. MaxConcurrentTransforms *int64 `type:"integer"` // The maximum allowed size of the payload, in MB. A payload is the data portion @@ -10764,6 +10953,12 @@ func (s *CreateTransformJobInput) SetBatchStrategy(v string) *CreateTransformJob return s } +// SetDataProcessing sets the DataProcessing field's value. +func (s *CreateTransformJobInput) SetDataProcessing(v *DataProcessing) *CreateTransformJobInput { + s.DataProcessing = v + return s +} + // SetEnvironment sets the Environment field's value. func (s *CreateTransformJobInput) SetEnvironment(v map[string]*string) *CreateTransformJobInput { s.Environment = v @@ -10853,7 +11048,7 @@ type CreateWorkteamInput struct { // A list of MemberDefinition objects that contains objects that identify the // Amazon Cognito user pool that makes up the work team. For more information, - // see Amazon Cognito User Pools (http://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). + // see Amazon Cognito User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). // // All of the CognitoMemberDefinition objects that make up the member definition // must have the same ClientId and UserPool values. @@ -10861,6 +11056,14 @@ type CreateWorkteamInput struct { // MemberDefinitions is a required field MemberDefinitions []*MemberDefinition `min:"1" type:"list" required:"true"` + // Configures notification of workers regarding available or expiring work items. + NotificationConfiguration *NotificationConfiguration `type:"structure"` + + // An array of key-value pairs. + // + // For more information, see Resource Tag (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) + // and Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` // The name of the work team. Use this name to identify the work team. @@ -10939,6 +11142,12 @@ func (s *CreateWorkteamInput) SetMemberDefinitions(v []*MemberDefinition) *Creat return s } +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *CreateWorkteamInput) SetNotificationConfiguration(v *NotificationConfiguration) *CreateWorkteamInput { + s.NotificationConfiguration = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateWorkteamInput) SetTags(v []*Tag) *CreateWorkteamInput { s.Tags = v @@ -10975,14 +11184,92 @@ func (s *CreateWorkteamOutput) SetWorkteamArn(v string) *CreateWorkteamOutput { return s } +// The data structure used to specify the data to be used for inference in a +// batch transform job and to associate the data that is relevant to the prediction +// results in the output. The input filter provided allows you to exclude input +// data that is not needed for inference in a batch transform job. The output +// filter provided allows you to include input data relevant to interpreting +// the predictions in the output from the job. For more information, see Associate +// Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). +type DataProcessing struct { + _ struct{} `type:"structure"` + + // A JSONPath (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html#data-processing-operators) + // expression used to select a portion of the input data to pass to the algorithm. + // Use the InputFilter parameter to exclude fields, such as an ID column, from + // the input. If you want Amazon SageMaker to pass the entire input dataset + // to the algorithm, accept the default value $. + // + // Examples: "$", "$[1:]", "$.features" + InputFilter *string `type:"string"` + + // Specifies the source of the data to join with the transformed data. The valid + // values are None and Input The default value is None which specifies not to + // join the input with the transformed data. If you want the batch transform + // job to join the original input data with the transformed data, set JoinSource + // to Input. + // + // For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds + // the transformed data to the input JSON object in an attribute called SageMakerOutput. + // The joined result for JSON must be a key-value pair object. If the input + // is not a key-value pair object, Amazon SageMaker creates a new JSON file. + // In the new JSON file, and the input data is stored under the SageMakerInput + // key and the results are stored in SageMakerOutput. + // + // For CSV files, Amazon SageMaker combines the transformed data with the input + // data at the end of the input data and stores it in the output file. The joined + // data has the joined input data followed by the transformed data and the output + // is a CSV file. + JoinSource *string `type:"string" enum:"JoinSource"` + + // A JSONPath (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html#data-processing-operators) + // expression used to select a portion of the joined dataset to save in the + // output file for a batch transform job. If you want Amazon SageMaker to store + // the entire input dataset in the output file, leave the default value, $. + // If you specify indexes that aren't within the dimension size of the joined + // dataset, you get an error. + // + // Examples: "$", "$[0,5:]", "$['id','SageMakerOutput']" + OutputFilter *string `type:"string"` +} + +// String returns the string representation +func (s DataProcessing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataProcessing) GoString() string { + return s.String() +} + +// SetInputFilter sets the InputFilter field's value. +func (s *DataProcessing) SetInputFilter(v string) *DataProcessing { + s.InputFilter = &v + return s +} + +// SetJoinSource sets the JoinSource field's value. +func (s *DataProcessing) SetJoinSource(v string) *DataProcessing { + s.JoinSource = &v + return s +} + +// SetOutputFilter sets the OutputFilter field's value. +func (s *DataProcessing) SetOutputFilter(v string) *DataProcessing { + s.OutputFilter = &v + return s +} + // Describes the location of the channel data. type DataSource struct { _ struct{} `type:"structure"` + // The file system that is associated with a channel. + FileSystemDataSource *FileSystemDataSource `type:"structure"` + // The S3 location of the data source that is associated with a channel. - // - // S3DataSource is a required field - S3DataSource *S3DataSource `type:"structure" required:"true"` + S3DataSource *S3DataSource `type:"structure"` } // String returns the string representation @@ -10998,8 +11285,10 @@ func (s DataSource) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DataSource) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DataSource"} - if s.S3DataSource == nil { - invalidParams.Add(request.NewErrParamRequired("S3DataSource")) + if s.FileSystemDataSource != nil { + if err := s.FileSystemDataSource.Validate(); err != nil { + invalidParams.AddNested("FileSystemDataSource", err.(request.ErrInvalidParams)) + } } if s.S3DataSource != nil { if err := s.S3DataSource.Validate(); err != nil { @@ -11013,6 +11302,12 @@ func (s *DataSource) Validate() error { return nil } +// SetFileSystemDataSource sets the FileSystemDataSource field's value. +func (s *DataSource) SetFileSystemDataSource(v *FileSystemDataSource) *DataSource { + s.FileSystemDataSource = v + return s +} + // SetS3DataSource sets the S3DataSource field's value. func (s *DataSource) SetS3DataSource(v *S3DataSource) *DataSource { s.S3DataSource = v @@ -11588,7 +11883,7 @@ func (s *DeleteWorkteamOutput) SetSuccess(v bool) *DeleteWorkteamOutput { // of the primary container when you created the model hosted in this ProductionVariant, // the path resolves to a path of the form registry/repository[@digest]. A digest // is a hash value that identifies a specific version of an image. For information -// about Amazon ECR paths, see Pulling an Image (http://docs.aws.amazon.com//AmazonECR/latest/userguide/docker-pull-ecr-image.html) +// about Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html) // in the Amazon ECR User Guide. type DeployedImage struct { _ struct{} `type:"structure"` @@ -12023,7 +12318,9 @@ type DescribeCompilationJobOutput struct { // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` - // The duration allowed for model compilation. + // Specifies a limit to how long a model compilation job can run. When the job + // reaches the time limit, Amazon SageMaker ends the compilation job. Use this + // API to cap model training costs. // // StoppingCondition is a required field StoppingCondition *StoppingCondition `type:"structure" required:"true"` @@ -12487,9 +12784,7 @@ type DescribeHyperParameterTuningJobOutput struct { // The HyperParameterTrainingJobDefinition object that specifies the definition // of the training jobs that this tuning job launches. - // - // TrainingJobDefinition is a required field - TrainingJobDefinition *HyperParameterTrainingJobDefinition `type:"structure" required:"true"` + TrainingJobDefinition *HyperParameterTrainingJobDefinition `type:"structure"` // The TrainingJobStatusCounters object that specifies the number of training // jobs, categorized by status, that this tuning job launched. @@ -12757,7 +13052,7 @@ type DescribeLabelingJobOutput struct { StoppingConditions *LabelingJobStoppingConditions `type:"structure"` // An array of key/value pairs. For more information, see Using Cost Allocation - // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -13327,16 +13622,16 @@ type DescribeNotebookInstanceOutput struct { // A list of the Elastic Inference (EI) instance types associated with this // notebook instance. Currently only one EI instance type can be associated // with a notebook instance. For more information, see Using Elastic Inference - // in Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). + // in Amazon SageMaker (sagemaker/latest/dg/ei.html). AcceleratorTypes []*string `type:"list"` // An array of up to three Git repositories associated with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. These repositories are cloned at the same // level as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances - // (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // A timestamp. Use this parameter to return the time when the notebook instance @@ -13345,10 +13640,10 @@ type DescribeNotebookInstanceOutput struct { // The Git repository associated with the notebook instance as its default code // repository. This can be either the name of a Git repository stored as a resource - // in your account, or the URL of a Git repository in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // in your account, or the URL of a Git repository in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` // Describes whether Amazon SageMaker provides internet access to the notebook @@ -13660,6 +13955,18 @@ type DescribeTrainingJobOutput struct { // AlgorithmSpecification is a required field AlgorithmSpecification *AlgorithmSpecification `type:"structure" required:"true"` + // The billable time in seconds. + // + // You can calculate the savings from using managed spot training using the + // formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, + // if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the savings + // is 80%. + BillableTimeInSeconds *int64 `min:"1" type:"integer"` + + // Contains information about the output location for managed spot training + // checkpoint data. + CheckpointConfig *CheckpointConfig `type:"structure"` + // A timestamp that indicates when the training job was created. // // CreationTime is a required field @@ -13669,9 +13976,13 @@ type DescribeTrainingJobOutput struct { // training, choose True. Encryption provides greater security for distributed // training, but training might take longer. How long it takes depends on the // amount of communication between compute instances, especially if you use - // a deep learning algorithm in distributed training. + // a deep learning algorithms in distributed training. EnableInterContainerTrafficEncryption *bool `type:"boolean"` + // A Boolean indicating whether managed spot training is enabled (True) or not + // (False). + EnableManagedSpotTraining *bool `type:"boolean"` + // If you want to allow inbound or outbound network calls, except for calls // between peers within a training cluster for distributed training, choose // True. If you enable network isolation for training jobs that are configured @@ -13729,28 +14040,44 @@ type DescribeTrainingJobOutput struct { // Amazon SageMaker provides primary statuses and secondary statuses that apply // to each of them: // - // InProgressStarting - Starting the training job. + // InProgress + // + // * Starting - Starting the training job. + // + // * Downloading - An optional stage for algorithms that support File training + // input mode. It indicates that data is being downloaded to the ML storage + // volumes. + // + // * Training - Training is in progress. + // + // * Interrupted - The job stopped because the managed spot training instances + // were interrupted. // - // Downloading - An optional stage for algorithms that support File training - // input mode. It indicates that data is being downloaded to the ML storage - // volumes. + // * Uploading - Training is complete and the model artifacts are being uploaded + // to the S3 location. // - // Training - Training is in progress. + // Completed // - // Uploading - Training is complete and the model artifacts are being uploaded - // to the S3 location. + // * Completed - The training job has completed. + // + // Failed + // + // * Failed - The training job has failed. The reason for the failure is + // returned in the FailureReason field of DescribeTrainingJobResponse. + // + // Stopped // - // CompletedCompleted - The training job has completed. + // * MaxRuntimeExceeded - The job stopped because it exceeded the maximum + // allowed runtime. // - // FailedFailed - The training job has failed. The reason for the failure is - // returned in the FailureReason field of DescribeTrainingJobResponse. + // * MaxWaitTmeExceeded - The job stopped because it exceeded the maximum + // allowed wait time. // - // StoppedMaxRuntimeExceeded - The job stopped because it exceeded the maximum - // allowed runtime. + // * Stopped - The training job has stopped. // - // Stopped - The training job has stopped. + // Stopping // - // StoppingStopping - Stopping the training job. + // * Stopping - Stopping the training job. // // Valid values for SecondaryStatus are subject to change. // @@ -13769,7 +14096,14 @@ type DescribeTrainingJobOutput struct { // through. SecondaryStatusTransitions []*SecondaryStatusTransition `type:"list"` - // The condition under which to stop the training job. + // Specifies a limit to how long a model training job can run. It also specifies + // the maximum time to wait for a spot instance. When the job reaches the time + // limit, Amazon SageMaker ends the training job. Use this API to cap model + // training costs. + // + // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which + // delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts, so the results of training are not lost. // // StoppingCondition is a required field StoppingCondition *StoppingCondition `type:"structure" required:"true"` @@ -13819,6 +14153,9 @@ type DescribeTrainingJobOutput struct { // of the training container. TrainingStartTime *time.Time `type:"timestamp"` + // The training time in seconds. + TrainingTimeInSeconds *int64 `min:"1" type:"integer"` + // The Amazon Resource Name (ARN) of the associated hyperparameter tuning job // if the training job was launched by a hyperparameter tuning job. TuningJobArn *string `type:"string"` @@ -13845,6 +14182,18 @@ func (s *DescribeTrainingJobOutput) SetAlgorithmSpecification(v *AlgorithmSpecif return s } +// SetBillableTimeInSeconds sets the BillableTimeInSeconds field's value. +func (s *DescribeTrainingJobOutput) SetBillableTimeInSeconds(v int64) *DescribeTrainingJobOutput { + s.BillableTimeInSeconds = &v + return s +} + +// SetCheckpointConfig sets the CheckpointConfig field's value. +func (s *DescribeTrainingJobOutput) SetCheckpointConfig(v *CheckpointConfig) *DescribeTrainingJobOutput { + s.CheckpointConfig = v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *DescribeTrainingJobOutput) SetCreationTime(v time.Time) *DescribeTrainingJobOutput { s.CreationTime = &v @@ -13857,6 +14206,12 @@ func (s *DescribeTrainingJobOutput) SetEnableInterContainerTrafficEncryption(v b return s } +// SetEnableManagedSpotTraining sets the EnableManagedSpotTraining field's value. +func (s *DescribeTrainingJobOutput) SetEnableManagedSpotTraining(v bool) *DescribeTrainingJobOutput { + s.EnableManagedSpotTraining = &v + return s +} + // SetEnableNetworkIsolation sets the EnableNetworkIsolation field's value. func (s *DescribeTrainingJobOutput) SetEnableNetworkIsolation(v bool) *DescribeTrainingJobOutput { s.EnableNetworkIsolation = &v @@ -13971,6 +14326,12 @@ func (s *DescribeTrainingJobOutput) SetTrainingStartTime(v time.Time) *DescribeT return s } +// SetTrainingTimeInSeconds sets the TrainingTimeInSeconds field's value. +func (s *DescribeTrainingJobOutput) SetTrainingTimeInSeconds(v int64) *DescribeTrainingJobOutput { + s.TrainingTimeInSeconds = &v + return s +} + // SetTuningJobArn sets the TuningJobArn field's value. func (s *DescribeTrainingJobOutput) SetTuningJobArn(v string) *DescribeTrainingJobOutput { s.TuningJobArn = &v @@ -14028,10 +14389,11 @@ type DescribeTransformJobOutput struct { _ struct{} `type:"structure"` // Specifies the number of records to include in a mini-batch for an HTTP inference - // request. A recordis a single unit of input data that inference can be made on. For example, - // a single line in a CSV file is a record. + // request. A record is a single unit of input data that inference can be made + // on. For example, a single line in a CSV file is a record. // - // To enable the batch strategy, you must set SplitTypeto Line, RecordIO, or TFRecord + // To enable the batch strategy, you must set SplitType to Line, RecordIO, or + // TFRecord. BatchStrategy *string `type:"string" enum:"BatchStrategy"` // A timestamp that shows when the transform Job was created. @@ -14039,6 +14401,15 @@ type DescribeTransformJobOutput struct { // CreationTime is a required field CreationTime *time.Time `type:"timestamp" required:"true"` + // The data structure used to specify the data to be used for inference in a + // batch transform job and to associate the data that is relevant to the prediction + // results in the output. The input filter provided allows you to exclude input + // data that is not needed for inference in a batch transform job. The output + // filter provided allows you to include input data relevant to interpreting + // the predictions in the output from the job. For more information, see Associate + // Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). + DataProcessing *DataProcessing `type:"structure"` + // The environment variables to set in the Docker container. We support up to // 16 key and values entries in the map. Environment map[string]*string `type:"map"` @@ -14046,7 +14417,7 @@ type DescribeTransformJobOutput struct { // If the transform job failed, FailureReason describes why it failed. A transform // job creates a log file, which includes error messages, and stores it as an // Amazon S3 object. For more information, see Log Amazon SageMaker Events with - // Amazon CloudWatch (http://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html). + // Amazon CloudWatch (https://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html). FailureReason *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling @@ -14128,6 +14499,12 @@ func (s *DescribeTransformJobOutput) SetCreationTime(v time.Time) *DescribeTrans return s } +// SetDataProcessing sets the DataProcessing field's value. +func (s *DescribeTransformJobOutput) SetDataProcessing(v *DataProcessing) *DescribeTransformJobOutput { + s.DataProcessing = v + return s +} + // SetEnvironment sets the Environment field's value. func (s *DescribeTransformJobOutput) SetEnvironment(v map[string]*string) *DescribeTransformJobOutput { s.Environment = v @@ -14486,20 +14863,107 @@ func (s *EndpointSummary) SetLastModifiedTime(v time.Time) *EndpointSummary { return s } +// Specifies a file system data source for a channel. +type FileSystemDataSource struct { + _ struct{} `type:"structure"` + + // The full path to the directory to associate with the channel. + // + // DirectoryPath is a required field + DirectoryPath *string `type:"string" required:"true"` + + // The access mode of the mount of the directory associated with the channel. + // A directory can be mounted either in ro (read-only) or rw (read-write) mode. + // + // FileSystemAccessMode is a required field + FileSystemAccessMode *string `type:"string" required:"true" enum:"FileSystemAccessMode"` + + // The file system id. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` + + // The file system type. + // + // FileSystemType is a required field + FileSystemType *string `type:"string" required:"true" enum:"FileSystemType"` +} + +// String returns the string representation +func (s FileSystemDataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemDataSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FileSystemDataSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FileSystemDataSource"} + if s.DirectoryPath == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryPath")) + } + if s.FileSystemAccessMode == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemAccessMode")) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + if s.FileSystemType == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryPath sets the DirectoryPath field's value. +func (s *FileSystemDataSource) SetDirectoryPath(v string) *FileSystemDataSource { + s.DirectoryPath = &v + return s +} + +// SetFileSystemAccessMode sets the FileSystemAccessMode field's value. +func (s *FileSystemDataSource) SetFileSystemAccessMode(v string) *FileSystemDataSource { + s.FileSystemAccessMode = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *FileSystemDataSource) SetFileSystemId(v string) *FileSystemDataSource { + s.FileSystemId = &v + return s +} + +// SetFileSystemType sets the FileSystemType field's value. +func (s *FileSystemDataSource) SetFileSystemType(v string) *FileSystemDataSource { + s.FileSystemType = &v + return s +} + // A conditional statement for a search expression that includes a Boolean operator, // a resource property, and a value. // // If you don't specify an Operator and a Value, the filter searches for only // the specified property. For example, defining a Filter for the FailureReason -// for the TrainingJobResource searches for training job objects that have a -// value in the FailureReason field. +// for the TrainingJob Resource searches for training job objects that have +// a value in the FailureReason field. // // If you specify a Value, but not an Operator, Amazon SageMaker uses the equals // operator as the default. // // In search, there are several property types: // -// MetricsTo define a metric filter, enter a value using the form "Metrics.", +// Metrics +// +// To define a metric filter, enter a value using the form "Metrics.", // where is a metric name. For example, the following filter searches // for training jobs with an "accuracy" metric greater than "0.9": // @@ -14513,26 +14977,30 @@ func (s *EndpointSummary) SetLastModifiedTime(v time.Time) *EndpointSummary { // // } // -// HyperParametersTo define a hyperparameter filter, enter a value with the -// form "HyperParameters.". Decimal hyperparameter values are treated -// as a decimal in a comparison if the specified Value is also a decimal value. -// If the specified Value is an integer, the decimal hyperparameter values are -// treated as integers. For example, the following filter is satisfied by training -// jobs with a "learning_rate" hyperparameter that is less than "0.5": +// HyperParameters // -// { +// To define a hyperparameter filter, enter a value with the form "HyperParameters.". +// Decimal hyperparameter values are treated as a decimal in a comparison if +// the specified Value is also a decimal value. If the specified Value is an +// integer, the decimal hyperparameter values are treated as integers. For example, +// the following filter is satisfied by training jobs with a "learning_rate" +// hyperparameter that is less than "0.5": // -// "Name": "HyperParameters.learning_rate", +// { // -// "Operator": "LESS_THAN", +// "Name": "HyperParameters.learning_rate", // -// "Value": "0.5" +// "Operator": "LESS_THAN", // -// } +// "Value": "0.5" // -// TagsTo define a tag filter, enter a value with the form "Tags.". -type Filter struct { - _ struct{} `type:"structure"` +// } +// +// Tags +// +// To define a tag filter, enter a value with the form "Tags.". +type Filter struct { + _ struct{} `type:"structure"` // A property name. For example, TrainingJobName. For the list of valid property // names returned in a search result for each supported resource, see TrainingJob @@ -14544,24 +15012,38 @@ type Filter struct { // A Boolean binary operator that is used to evaluate the filter. The operator // field contains one of the following values: // - // EqualsThe specified resource in Name equals the specified Value. + // Equals // - // NotEqualsThe specified resource in Name does not equal the specified Value. + // The specified resource in Name equals the specified Value. // - // GreaterThanThe specified resource in Name is greater than the specified Value. - // Not supported for text-based properties. + // NotEquals + // + // The specified resource in Name does not equal the specified Value. + // + // GreaterThan + // + // The specified resource in Name is greater than the specified Value. Not supported + // for text-based properties. + // + // GreaterThanOrEqualTo // - // GreaterThanOrEqualToThe specified resource in Name is greater than or equal - // to the specified Value. Not supported for text-based properties. + // The specified resource in Name is greater than or equal to the specified + // Value. Not supported for text-based properties. // - // LessThanThe specified resource in Name is less than the specified Value. + // LessThan + // + // The specified resource in Name is less than the specified Value. Not supported + // for text-based properties. + // + // LessThanOrEqualTo + // + // The specified resource in Name is less than or equal to the specified Value. // Not supported for text-based properties. // - // LessThanOrEqualToThe specified resource in Name is less than or equal to - // the specified Value. Not supported for text-based properties. + // Contains // - // ContainsOnly supported for text-based properties. The word-list of the property - // contains the specified Value. + // Only supported for text-based properties. The word-list of the property contains + // the specified Value. // // If you have specified a filter Value, the default is Equals. Operator *string `type:"string" enum:"Operator"` @@ -14890,6 +15372,8 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition + // // US East (Ohio) (us-east-2): // // * arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox @@ -14900,6 +15384,8 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition + // // US West (Oregon) (us-west-2): // // * arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox @@ -14910,6 +15396,20 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition + // + // Canada (Central) (ca-central-1): + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-BoundingBox + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-ImageMultiClass + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-TextMultiClass + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition + // // EU (Ireland) (eu-west-1): // // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox @@ -14920,7 +15420,33 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass // - // Asia Pacific (Tokyo (ap-northeast-1): + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition + // + // EU (London) (eu-west-2): + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-BoundingBox + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-ImageMultiClass + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-TextMultiClass + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition + // + // EU Frankfurt (eu-central-1): + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-BoundingBox + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-ImageMultiClass + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-TextMultiClass + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition + // + // Asia Pacific (Tokyo) (ap-northeast-1): // // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox // @@ -14930,13 +15456,66 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition + // + // Asia Pacific (Seoul) (ap-northeast-2): + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-BoundingBox + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition + // + // Asia Pacific (Mumbai) (ap-south-1): + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-BoundingBox + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-ImageMultiClass + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-TextMultiClass + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition + // + // Asia Pacific (Singapore) (ap-southeast-1): + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-BoundingBox + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition + // + // Asia Pacific (Sydney) (ap-southeast-2): + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition + // // PreHumanTaskLambdaArn is a required field PreHumanTaskLambdaArn *string `type:"string" required:"true"` - // The price that you pay for each task performed by a public worker. + // The price that you pay for each task performed by an Amazon Mechanical Turk + // worker. PublicWorkforceTaskPrice *PublicWorkforceTaskPrice `type:"structure"` - // The length of time that a task remains available for labelling by human workers. + // The length of time that a task remains available for labeling by human workers. + // If you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours + // (43200). For private and vendor workforces, the maximum is as listed. TaskAvailabilityLifetimeInSeconds *int64 `min:"1" type:"integer"` // A description of the task for your human workers. @@ -14951,7 +15530,7 @@ type HumanTaskConfig struct { // The amount of time that a worker has to complete a task. // // TaskTimeLimitInSeconds is a required field - TaskTimeLimitInSeconds *int64 `min:"1" type:"integer" required:"true"` + TaskTimeLimitInSeconds *int64 `min:"30" type:"integer" required:"true"` // A title for the task for your human workers. // @@ -15014,8 +15593,8 @@ func (s *HumanTaskConfig) Validate() error { if s.TaskTimeLimitInSeconds == nil { invalidParams.Add(request.NewErrParamRequired("TaskTimeLimitInSeconds")) } - if s.TaskTimeLimitInSeconds != nil && *s.TaskTimeLimitInSeconds < 1 { - invalidParams.Add(request.NewErrParamMinValue("TaskTimeLimitInSeconds", 1)) + if s.TaskTimeLimitInSeconds != nil && *s.TaskTimeLimitInSeconds < 30 { + invalidParams.Add(request.NewErrParamMinValue("TaskTimeLimitInSeconds", 30)) } if s.TaskTitle == nil { invalidParams.Add(request.NewErrParamRequired("TaskTitle")) @@ -15150,7 +15729,7 @@ type HyperParameterAlgorithmSpecification struct { // the training data downloaded from Amazon S3, the model artifacts, and intermediate // information. // - // For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html) + // For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). // // TrainingInputMode is a required field TrainingInputMode *string `type:"string" required:"true" enum:"TrainingInputMode"` @@ -15332,6 +15911,10 @@ type HyperParameterTrainingJobDefinition struct { // AlgorithmSpecification is a required field AlgorithmSpecification *HyperParameterAlgorithmSpecification `type:"structure" required:"true"` + // Contains information about the output location for managed spot training + // checkpoint data. + CheckpointConfig *CheckpointConfig `type:"structure"` + // To encrypt all communications between ML compute instances in distributed // training, choose True. Encryption provides greater security for distributed // training, but training might take longer. How long it takes depends on the @@ -15339,6 +15922,10 @@ type HyperParameterTrainingJobDefinition struct { // a deep learning algorithm in distributed training. EnableInterContainerTrafficEncryption *bool `type:"boolean"` + // A Boolean indicating whether managed spot training is enabled (True) or not + // (False). + EnableManagedSpotTraining *bool `type:"boolean"` + // Isolates the training container. No inbound or outbound network calls can // be made, except for calls between peers within a training cluster for distributed // training. If network isolation is used for training jobs that are configured @@ -15381,16 +15968,10 @@ type HyperParameterTrainingJobDefinition struct { // job. StaticHyperParameters map[string]*string `type:"map"` - // Sets a maximum duration for the training jobs that the tuning job launches. - // Use this parameter to limit model training costs. - // - // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This - // delays job termination for 120 seconds. Algorithms might use this 120-second - // window to save the model artifacts. - // - // When Amazon SageMaker terminates a job because the stopping condition has - // been met, training algorithms provided by Amazon SageMaker save the intermediate - // results of the job. + // Specifies a limit to how long a model hyperparameter training job can run. + // It also specifies how long you are willing to wait for a managed spot training + // job to complete. When the job reaches the a limit, Amazon SageMaker ends + // the training job. Use this API to cap model training costs. // // StoppingCondition is a required field StoppingCondition *StoppingCondition `type:"structure" required:"true"` @@ -15441,6 +16022,11 @@ func (s *HyperParameterTrainingJobDefinition) Validate() error { invalidParams.AddNested("AlgorithmSpecification", err.(request.ErrInvalidParams)) } } + if s.CheckpointConfig != nil { + if err := s.CheckpointConfig.Validate(); err != nil { + invalidParams.AddNested("CheckpointConfig", err.(request.ErrInvalidParams)) + } + } if s.InputDataConfig != nil { for i, v := range s.InputDataConfig { if v == nil { @@ -15484,12 +16070,24 @@ func (s *HyperParameterTrainingJobDefinition) SetAlgorithmSpecification(v *Hyper return s } +// SetCheckpointConfig sets the CheckpointConfig field's value. +func (s *HyperParameterTrainingJobDefinition) SetCheckpointConfig(v *CheckpointConfig) *HyperParameterTrainingJobDefinition { + s.CheckpointConfig = v + return s +} + // SetEnableInterContainerTrafficEncryption sets the EnableInterContainerTrafficEncryption field's value. func (s *HyperParameterTrainingJobDefinition) SetEnableInterContainerTrafficEncryption(v bool) *HyperParameterTrainingJobDefinition { s.EnableInterContainerTrafficEncryption = &v return s } +// SetEnableManagedSpotTraining sets the EnableManagedSpotTraining field's value. +func (s *HyperParameterTrainingJobDefinition) SetEnableManagedSpotTraining(v bool) *HyperParameterTrainingJobDefinition { + s.EnableManagedSpotTraining = &v + return s +} + // SetEnableNetworkIsolation sets the EnableNetworkIsolation field's value. func (s *HyperParameterTrainingJobDefinition) SetEnableNetworkIsolation(v bool) *HyperParameterTrainingJobDefinition { s.EnableNetworkIsolation = &v @@ -15685,15 +16283,11 @@ type HyperParameterTuningJobConfig struct { // The HyperParameterTuningJobObjective object that specifies the objective // metric for this tuning job. - // - // HyperParameterTuningJobObjective is a required field - HyperParameterTuningJobObjective *HyperParameterTuningJobObjective `type:"structure" required:"true"` + HyperParameterTuningJobObjective *HyperParameterTuningJobObjective `type:"structure"` // The ParameterRanges object that specifies the ranges of hyperparameters that // this tuning job searches. - // - // ParameterRanges is a required field - ParameterRanges *ParameterRanges `type:"structure" required:"true"` + ParameterRanges *ParameterRanges `type:"structure"` // The ResourceLimits object that specifies the maximum number of training jobs // and parallel training jobs for this tuning job. @@ -15705,7 +16299,7 @@ type HyperParameterTuningJobConfig struct { // values to use for the training job it launches. To use the Bayesian search // stategy, set this to Bayesian. To randomly search, set it to Random. For // information about search strategies, see How Hyperparameter Tuning Works - // (http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). // // Strategy is a required field Strategy *string `type:"string" required:"true" enum:"HyperParameterTuningJobStrategyType"` @@ -15714,12 +16308,16 @@ type HyperParameterTuningJobConfig struct { // hyperparameter tuning job. This can be one of the following values (the default // value is OFF): // - // OFFTraining jobs launched by the hyperparameter tuning job do not use early + // OFF + // + // Training jobs launched by the hyperparameter tuning job do not use early // stopping. // - // AUTOAmazon SageMaker stops training jobs launched by the hyperparameter tuning + // AUTO + // + // Amazon SageMaker stops training jobs launched by the hyperparameter tuning // job when they are unlikely to perform better than previously completed training - // jobs. For more information, see Stop Training Jobs Early (http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-early-stopping.html). + // jobs. For more information, see Stop Training Jobs Early (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-early-stopping.html). TrainingJobEarlyStoppingType *string `type:"string" enum:"TrainingJobEarlyStoppingType"` } @@ -15736,12 +16334,6 @@ func (s HyperParameterTuningJobConfig) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *HyperParameterTuningJobConfig) Validate() error { invalidParams := request.ErrInvalidParams{Context: "HyperParameterTuningJobConfig"} - if s.HyperParameterTuningJobObjective == nil { - invalidParams.Add(request.NewErrParamRequired("HyperParameterTuningJobObjective")) - } - if s.ParameterRanges == nil { - invalidParams.Add(request.NewErrParamRequired("ParameterRanges")) - } if s.ResourceLimits == nil { invalidParams.Add(request.NewErrParamRequired("ResourceLimits")) } @@ -16003,7 +16595,7 @@ type HyperParameterTuningJobWarmStartConfig struct { // An array of hyperparameter tuning jobs that are used as the starting point // for the new hyperparameter tuning job. For more information about warm starting // a hyperparameter tuning job, see Using a Previous Hyperparameter Tuning Job - // as a Starting Point (http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-warm-start.html). + // as a Starting Point (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-warm-start.html). // // Hyperparameter tuning jobs created before October 1, 2018 cannot be used // as parent jobs for warm start tuning jobs. @@ -16013,21 +16605,24 @@ type HyperParameterTuningJobWarmStartConfig struct { // Specifies one of the following: // - // IDENTICAL_DATA_AND_ALGORITHMThe new hyperparameter tuning job uses the same - // input data and training image as the parent tuning jobs. You can change the - // hyperparameter ranges to search and the maximum number of training jobs that - // the hyperparameter tuning job launches. You cannot use a new version of the - // training algorithm, unless the changes in the new version do not affect the - // algorithm itself. For example, changes that improve logging or adding support - // for a different data format are allowed. You can also change hyperparameters - // from tunable to static, and from static to tunable, but the total number - // of static plus tunable hyperparameters must remain the same as it is in all - // parent jobs. The objective metric for the new tuning job must be the same - // as for all parent jobs. - // - // TRANSFER_LEARNINGThe new hyperparameter tuning job can include input data, - // hyperparameter ranges, maximum number of concurrent training jobs, and maximum - // number of training jobs that are different than those of its parent hyperparameter + // IDENTICAL_DATA_AND_ALGORITHM + // + // The new hyperparameter tuning job uses the same input data and training image + // as the parent tuning jobs. You can change the hyperparameter ranges to search + // and the maximum number of training jobs that the hyperparameter tuning job + // launches. You cannot use a new version of the training algorithm, unless + // the changes in the new version do not affect the algorithm itself. For example, + // changes that improve logging or adding support for a different data format + // are allowed. You can also change hyperparameters from tunable to static, + // and from static to tunable, but the total number of static plus tunable hyperparameters + // must remain the same as it is in all parent jobs. The objective metric for + // the new tuning job must be the same as for all parent jobs. + // + // TRANSFER_LEARNING + // + // The new hyperparameter tuning job can include input data, hyperparameter + // ranges, maximum number of concurrent training jobs, and maximum number of + // training jobs that are different than those of its parent hyperparameter // tuning jobs. The training image can also be a different version from the // version used in the parent hyperparameter tuning job. You can also change // hyperparameters from tunable to static, and from static to tunable, but the @@ -16216,56 +16811,29 @@ type InputConfig struct { // * TensorFlow: You must specify the name and shape (NHWC format) of the // expected data inputs using a dictionary format for your trained model. // The dictionary formats required for the console and CLI are different. - // - // Examples for one input: - // - // If using the console, {"input":[1,1024,1024,3]} - // - // If using the CLI, {\"input\":[1,1024,1024,3]} - // - // Examples for two inputs: - // - // If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} - // - // If using the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} + // Examples for one input: If using the console, {"input":[1,1024,1024,3]} + // If using the CLI, {\"input\":[1,1024,1024,3]} Examples for two inputs: + // If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} If using + // the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} // // * MXNET/ONNX: You must specify the name and shape (NCHW format) of the // expected data inputs in order using a dictionary format for your trained // model. The dictionary formats required for the console and CLI are different. - // - // Examples for one input: - // - // If using the console, {"data":[1,3,1024,1024]} - // - // If using the CLI, {\"data\":[1,3,1024,1024]} - // - // Examples for two inputs: - // - // If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} - // - // If using the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} + // Examples for one input: If using the console, {"data":[1,3,1024,1024]} + // If using the CLI, {\"data\":[1,3,1024,1024]} Examples for two inputs: + // If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} If using + // the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} // // * PyTorch: You can either specify the name and shape (NCHW format) of // expected data inputs in order using a dictionary format for your trained // model or you can specify the shape only using a list format. The dictionary // formats required for the console and CLI are different. The list formats - // for the console and CLI are the same. - // - // Examples for one input in dictionary format: - // - // If using the console, {"input0":[1,3,224,224]} - // - // If using the CLI, {\"input0\":[1,3,224,224]} - // - // Example for one input in list format: [[1,3,224,224]] - // - // Examples for two inputs in dictionary format: - // - // If using the console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} - // - // If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} - // - // Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] + // for the console and CLI are the same. Examples for one input in dictionary + // format: If using the console, {"input0":[1,3,224,224]} If using the CLI, + // {\"input0\":[1,3,224,224]} Example for one input in list format: [[1,3,224,224]] + // Examples for two inputs in dictionary format: If using the console, {"input0":[1,3,224,224], + // "input1":[1,3,224,224]} If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} + // Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] // // * XGBOOST: input data name and shape are not needed. // @@ -16357,17 +16925,22 @@ type IntegerParameterRange struct { // The scale that hyperparameter tuning uses to search the hyperparameter range. // For information about choosing a hyperparameter scale, see Hyperparameter - // Range Scaling (http://docs.aws.amazon.com//sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). + // Scaling (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). // One of the following values: // - // AutoAmazon SageMaker hyperparameter tuning chooses the best scale for the - // hyperparameter. + // Auto // - // LinearHyperparameter tuning searches the values in the hyperparameter range - // by using a linear scale. + // Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter. // - // LogarithmicHyperparemeter tuning searches the values in the hyperparameter - // range by using a logarithmic scale. + // Linear + // + // Hyperparameter tuning searches the values in the hyperparameter range by + // using a linear scale. + // + // Logarithmic + // + // Hyperparemeter tuning searches the values in the hyperparameter range by + // using a logarithmic scale. // // Logarithmic scaling works only for ranges that have only values greater than // 0. @@ -16596,17 +17169,13 @@ type LabelingJobAlgorithmsConfig struct { // Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. // You must select one of the following ARNs: // - // * Image classification - // - // arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/image-classification - // - // * Text classification + // * Image classification arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/image-classification // - // arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/text-classification + // * Text classification arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/text-classification // - // * Object detection + // * Object detection arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/object-detection // - // arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/object-detection + // * Semantic Segmentation arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/semantic-segmentation // // LabelingJobAlgorithmSpecificationArn is a required field LabelingJobAlgorithmSpecificationArn *string `type:"string" required:"true"` @@ -16751,6 +17320,9 @@ type LabelingJobForWorkteamSummary struct { // The name of the labeling job that the work team is assigned to. LabelingJobName *string `min:"1" type:"string"` + // The configured number of workers per data object. + NumberOfHumanWorkersPerDataObject *int64 `min:"1" type:"integer"` + // WorkRequesterAccountId is a required field WorkRequesterAccountId *string `type:"string" required:"true"` } @@ -16789,6 +17361,12 @@ func (s *LabelingJobForWorkteamSummary) SetLabelingJobName(v string) *LabelingJo return s } +// SetNumberOfHumanWorkersPerDataObject sets the NumberOfHumanWorkersPerDataObject field's value. +func (s *LabelingJobForWorkteamSummary) SetNumberOfHumanWorkersPerDataObject(v int64) *LabelingJobForWorkteamSummary { + s.NumberOfHumanWorkersPerDataObject = &v + return s +} + // SetWorkRequesterAccountId sets the WorkRequesterAccountId field's value. func (s *LabelingJobForWorkteamSummary) SetWorkRequesterAccountId(v string) *LabelingJobForWorkteamSummary { s.WorkRequesterAccountId = &v @@ -16890,6 +17468,21 @@ type LabelingJobOutputConfig struct { // The AWS Key Management Service ID of the key used to encrypt the output data, // if any. + // + // If you use a KMS key ID or an alias of your master key, the Amazon SageMaker + // execution role must include permissions to call kms:Encrypt. If you don't + // provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon + // S3 for your role's account. Amazon SageMaker uses server-side encryption + // with KMS-managed keys for LabelingJobOutputConfig. If you use a bucket policy + // with an s3:PutObject permission that only allows objects with server-side + // encryption, set the condition key of s3:x-amz-server-side-encryption to "aws:kms". + // For more information, see KMS-Managed Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // The KMS key policy must grant permission to the IAM role that you specify + // in your CreateLabelingJob request. For more information, see Using Key Policies + // in AWS KMS (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // in the AWS Key Management Service Developer Guide. KmsKeyId *string `type:"string"` // The Amazon S3 location to write output data. @@ -16937,8 +17530,14 @@ func (s *LabelingJobOutputConfig) SetS3OutputPath(v string) *LabelingJobOutputCo type LabelingJobResourceConfig struct { _ struct{} `type:"structure"` - // The AWS Key Management Service key ID for the key used to encrypt the output - // data, if any. + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to + // encrypt data on the storage volume attached to the ML compute instance(s) + // that run the training job. The VolumeKmsKeyId can be any of the following + // formats: + // + // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" + // + // * // Amazon Resource Name (ARN) of a KMS Key "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string `type:"string"` } @@ -17055,7 +17654,7 @@ type LabelingJobSummary struct { // The Amazon Resource Name (ARN) of the Lambda function used to consolidate // the annotations from individual workers into a label for a data object. For - // more information, see Annotation Consolidation (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html). + // more information, see Annotation Consolidation (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html). AnnotationConsolidationLambdaArn *string `type:"string"` // The date and time that the job was created (timestamp). @@ -17649,8 +18248,8 @@ func (s *ListCompilationJobsOutput) SetNextToken(v string) *ListCompilationJobsO type ListEndpointConfigsInput struct { _ struct{} `type:"structure"` - // A filter that returns only endpoint configurations created after the specified - // time (timestamp). + // A filter that returns only endpoint configurations with a creation time greater + // than or equal to the specified time (timestamp). CreationTimeAfter *time.Time `type:"timestamp"` // A filter that returns only endpoint configurations created before the specified @@ -17779,8 +18378,8 @@ func (s *ListEndpointConfigsOutput) SetNextToken(v string) *ListEndpointConfigsO type ListEndpointsInput struct { _ struct{} `type:"structure"` - // A filter that returns only endpoints that were created after the specified - // time (timestamp). + // A filter that returns only endpoints with a creation time greater than or + // equal to the specified time (timestamp). CreationTimeAfter *time.Time `type:"timestamp"` // A filter that returns only endpoints that were created before the specified @@ -18534,7 +19133,8 @@ func (s *ListModelPackagesOutput) SetNextToken(v string) *ListModelPackagesOutpu type ListModelsInput struct { _ struct{} `type:"structure"` - // A filter that returns only models created after the specified time (timestamp). + // A filter that returns only models with a creation time greater than or equal + // to the specified time (timestamp). CreationTimeAfter *time.Time `type:"timestamp"` // A filter that returns only models created before the specified time (timestamp). @@ -19836,10 +20436,10 @@ func (s *MetricData) SetValue(v float64) *MetricData { return s } -// Specifies a metric that the training algorithm writes to stderr or stdout. -// Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify -// one metric that a hyperparameter tuning job uses as its objective metric -// to choose the best training job. +// Specifies a metric that the training algorithm writes to stderr or stdout +// . Amazon SageMakerhyperparameter tuning captures all defined metrics. You +// specify one metric that a hyperparameter tuning job uses as its objective +// metric to choose the best training job. type MetricDefinition struct { _ struct{} `type:"structure"` @@ -20374,7 +20974,7 @@ type NestedFilters struct { Filters []*Filter `min:"1" type:"list" required:"true"` // The name of the property to use in the nested filters. The value must match - // a listed property name, such as InputDataConfig. + // a listed property name, such as InputDataConfig . // // NestedPropertyName is a required field NestedPropertyName *string `min:"1" type:"string" required:"true"` @@ -20548,11 +21148,11 @@ type NotebookInstanceSummary struct { // An array of up to three Git repositories associated with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. These repositories are cloned at the same // level as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances - // (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // A timestamp that shows when the notebook instance was created. @@ -20560,10 +21160,10 @@ type NotebookInstanceSummary struct { // The Git repository associated with the notebook instance as its default code // repository. This can be either the name of a Git repository stored as a resource - // in your account, or the URL of a Git repository in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // in your account, or the URL of a Git repository in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` // The type of ML compute instance that the notebook instance is running on. @@ -20667,6 +21267,31 @@ func (s *NotebookInstanceSummary) SetUrl(v string) *NotebookInstanceSummary { return s } +// Configures SNS notifications of available or expiring work items for work +// teams. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The ARN for the SNS topic to which notifications should be published. + NotificationTopicArn *string `type:"string"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// SetNotificationTopicArn sets the NotificationTopicArn field's value. +func (s *NotificationConfiguration) SetNotificationTopicArn(v string) *NotificationConfiguration { + s.NotificationTopicArn = &v + return s +} + // Specifies the number of training jobs that this hyperparameter tuning job // launched, categorized by the status of their objective metric. The objective // metric status shows whether the final objective metric for the training job @@ -20781,30 +21406,27 @@ type OutputDataConfig struct { // encrypt the model artifacts at rest using Amazon S3 server-side encryption. // The KmsKeyId can be any of the following formats: // - // * // KMS Key ID - // - // "1234abcd-12ab-34cd-56ef-1234567890ab" + // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * // Amazon Resource Name (ARN) of a KMS Key + // * // Amazon Resource Name (ARN) of a KMS Key "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" // - // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" + // * // KMS Key Alias "alias/ExampleAlias" // - // * // KMS Key Alias + // * // Amazon Resource Name (ARN) of a KMS Key Alias "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" // - // "alias/ExampleAlias" - // - // * // Amazon Resource Name (ARN) of a KMS Key Alias - // - // "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" - // - // If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS - // key for Amazon S3 for your role's account. For more information, see KMS-Managed - // Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) + // If you use a KMS key ID or an alias of your master key, the Amazon SageMaker + // execution role must include permissions to call kms:Encrypt. If you don't + // provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon + // S3 for your role's account. Amazon SageMaker uses server-side encryption + // with KMS-managed keys for OutputDataConfig. If you use a bucket policy with + // an s3:PutObject permission that only allows objects with server-side encryption, + // set the condition key of s3:x-amz-server-side-encryption to "aws:kms". For + // more information, see KMS-Managed Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) // in the Amazon Simple Storage Service Developer Guide. // // The KMS key policy must grant permission to the IAM role that you specify - // in your CreateTramsformJob request. For more information, see Using Key Policies - // in AWS KMS (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob + // requests. For more information, see Using Key Policies in AWS KMS (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) // in the AWS Key Management Service Developer Guide. KmsKeyId *string `type:"string"` @@ -21062,8 +21684,7 @@ type ProductionVariant struct { // The size of the Elastic Inference (EI) instance to use for the production // variant. EI instances provide on-demand GPU computing for inference. For - // more information, see Using Elastic Inference in Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). - // For more information, see Using Elastic Inference in Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). + // more information, see Using Elastic Inference in Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). AcceleratorType *string `type:"string" enum:"ProductionVariantAcceleratorType"` // Number of instances to launch initially. @@ -21313,7 +21934,8 @@ func (s *PropertyNameSuggestion) SetPropertyName(v string) *PropertyNameSuggesti // each task performed. // // Use one of the following prices for bounding box tasks. Prices are in US -// dollars. +// dollars and should be based on the complexity of the task; the longer it +// takes in your initial testing, the more you should offer. // // * 0.036 // @@ -21391,7 +22013,8 @@ func (s *PropertyNameSuggestion) SetPropertyName(v string) *PropertyNameSuggesti type PublicWorkforceTaskPrice struct { _ struct{} `type:"structure"` - // Defines the amount of money paid to a worker in United States dollars. + // Defines the amount of money paid to an Amazon Mechanical Turk worker in United + // States dollars. AmountInUsd *USD `type:"structure"` } @@ -21631,13 +22254,9 @@ type ResourceConfig struct { // that run the training job. The VolumeKmsKeyId can be any of the following // formats: // - // * // KMS Key ID - // - // "1234abcd-12ab-34cd-56ef-1234567890ab" + // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * // Amazon Resource Name (ARN) of a KMS Key - // - // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" + // * // Amazon Resource Name (ARN) of a KMS Key "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string `type:"string"` // The size of the ML storage volume that you want to provision. @@ -21823,37 +22442,17 @@ type S3DataSource struct { // Depending on the value specified for the S3DataType, identifies either a // key name prefix or a manifest. For example: // - // * A key name prefix might look like this: s3://bucketname/exampleprefix. - // - // - // * A manifest might look like this: s3://bucketname/example.manifest - // - // The manifest is an S3 object which is a JSON file with the following format: - // - // - // [ - // - // {"prefix": "s3://customer_bucket/some/prefix/"}, - // - // "relative/path/to/custdata-1", - // - // "relative/path/custdata-2", - // - // ... - // - // ] - // - // The preceding JSON matches the following s3Uris: - // - // s3://customer_bucket/some/prefix/relative/path/to/custdata-1 - // - // s3://customer_bucket/some/prefix/relative/path/custdata-2 + // * A key name prefix might look like this: s3://bucketname/exampleprefix. // - // ... - // - // The complete set of s3uris in this manifest is the input data for the channel - // for this datasource. The object that each s3uris points to must be readable - // by the IAM role that Amazon SageMaker uses to perform tasks on your behalf. + // * A manifest might look like this: s3://bucketname/example.manifest The + // manifest is an S3 object which is a JSON file with the following format: + // [ {"prefix": "s3://customer_bucket/some/prefix/"}, "relative/path/to/custdata-1", + // "relative/path/custdata-2", ... ] The preceding JSON matches the following + // s3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-2 + // ... The complete set of s3uris in this manifest is the input data for + // the channel for this datasource. The object that each s3uris points to + // must be readable by the IAM role that Amazon SageMaker uses to perform + // tasks on your behalf. // // S3Uri is a required field S3Uri *string `type:"string" required:"true"` @@ -22217,28 +22816,38 @@ type SecondaryStatusTransition struct { // // Status might be one of the following secondary statuses: // - // InProgressStarting - Starting the training job. + // InProgress + // + // * Starting - Starting the training job. + // + // * Downloading - An optional stage for algorithms that support File training + // input mode. It indicates that data is being downloaded to the ML storage + // volumes. + // + // * Training - Training is in progress. + // + // * Uploading - Training is complete and the model artifacts are being uploaded + // to the S3 location. + // + // Completed // - // Downloading - An optional stage for algorithms that support File training - // input mode. It indicates that data is being downloaded to the ML storage - // volumes. + // * Completed - The training job has completed. // - // Training - Training is in progress. + // Failed // - // Uploading - Training is complete and the model artifacts are being uploaded - // to the S3 location. + // * Failed - The training job has failed. The reason for the failure is + // returned in the FailureReason field of DescribeTrainingJobResponse. // - // CompletedCompleted - The training job has completed. + // Stopped // - // FailedFailed - The training job has failed. The reason for the failure is - // returned in the FailureReason field of DescribeTrainingJobResponse. + // * MaxRuntimeExceeded - The job stopped because it exceeded the maximum + // allowed runtime. // - // StoppedMaxRuntimeExceeded - The job stopped because it exceeded the maximum - // allowed runtime. + // * Stopped - The training job has stopped. // - // Stopped - The training job has stopped. + // Stopping // - // StoppingStopping - Stopping the training job. + // * Stopping - Stopping the training job. // // We no longer support the following secondary statuses: // @@ -22256,27 +22865,31 @@ type SecondaryStatusTransition struct { // Amazon SageMaker provides secondary statuses and status messages that apply // to each of them: // - // StartingStarting the training job. + // Starting + // + // * Starting the training job. // - // Launching requested ML instances. + // * Launching requested ML instances. // - // Insufficient capacity error from EC2 while launching instances, retrying! + // * Insufficient capacity error from EC2 while launching instances, retrying! // - // Launched instance was unhealthy, replacing it! + // * Launched instance was unhealthy, replacing it! // - // Preparing the instances for training. + // * Preparing the instances for training. // - // TrainingDownloading the training image. + // Training // - // Training image download completed. Training in progress. + // * Downloading the training image. + // + // * Training image download completed. Training in progress. // // Status messages are subject to change. Therefore, we recommend not including // them in code that programmatically initiates actions. For examples, don't // use status messages in if statements. // // To have an overview of your training job's progress, view TrainingJobStatus - // and SecondaryStatus in DescribeTrainingJobResponse, and StatusMessage together. - // For example, at the start of a training job, you might see the following: + // and SecondaryStatus in DescribeTrainingJob, and StatusMessage together. For + // example, at the start of a training job, you might see the following: // // * TrainingJobStatus - InProgress // @@ -22860,27 +23473,39 @@ func (s StopTransformJobOutput) GoString() string { return s.String() } -// Specifies how long model training can run. When model training reaches the -// limit, Amazon SageMaker ends the training job. Use this API to cap model -// training cost. +// Specifies a limit to how long a model training or compilation job can run. +// It also specifies how long you are willing to wait for a managed spot training +// job to complete. When the job reaches the time limit, Amazon SageMaker ends +// the training or compilation job. Use this API to cap model training costs. // // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which -// delays job termination for120 seconds. Algorithms might use this 120-second -// window to save the model artifacts, so the results of training is not lost. -// -// Training algorithms provided by Amazon SageMaker automatically saves the -// intermediate results of a model training job (it is best effort case, as -// model might not be ready to save as some stages, for example training just -// started). This intermediate data is a valid model artifact. You can use it -// to create a model (CreateModel). +// delays job termination for 120 seconds. Algorithms can use this 120-second +// window to save the model artifacts, so the results of training are not lost. +// +// The training algorithms provided by Amazon SageMaker automatically save the +// intermediate results of a model training job when possible. This attempt +// to save artifacts is only a best effort case as model might not be in a state +// from which it can be saved. For example, if training has just started, the +// model might not be ready to save. When saved, this intermediate data is a +// valid model artifact. You can use it to create a model with CreateModel. +// +// The Neural Topic Model (NTM) currently does not support saving intermediate +// model artifacts. When training NTMs, make sure that the maximum runtime is +// sufficient for the training job to complete. type StoppingCondition struct { _ struct{} `type:"structure"` - // The maximum length of time, in seconds, that the training job can run. If - // model training does not complete during this time, Amazon SageMaker ends - // the job. If value is not specified, default value is 1 day. Maximum value - // is 28 days. + // The maximum length of time, in seconds, that the training or compilation + // job can run. If job does not complete during this time, Amazon SageMaker + // ends the job. If value is not specified, default value is 1 day. The maximum + // value is 28 days. MaxRuntimeInSeconds *int64 `min:"1" type:"integer"` + + // The maximum length of time, in seconds, how long you are willing to wait + // for a managed spot training job to complete. It is the amount of time spent + // waiting for Spot capacity plus the amount of time the training job runs. + // It must be equal to or greater than MaxRuntimeInSeconds. + MaxWaitTimeInSeconds *int64 `min:"1" type:"integer"` } // String returns the string representation @@ -22899,6 +23524,9 @@ func (s *StoppingCondition) Validate() error { if s.MaxRuntimeInSeconds != nil && *s.MaxRuntimeInSeconds < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxRuntimeInSeconds", 1)) } + if s.MaxWaitTimeInSeconds != nil && *s.MaxWaitTimeInSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxWaitTimeInSeconds", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -22912,6 +23540,12 @@ func (s *StoppingCondition) SetMaxRuntimeInSeconds(v int64) *StoppingCondition { return s } +// SetMaxWaitTimeInSeconds sets the MaxWaitTimeInSeconds field's value. +func (s *StoppingCondition) SetMaxWaitTimeInSeconds(v int64) *StoppingCondition { + s.MaxWaitTimeInSeconds = &v + return s +} + // Describes a work team of a vendor that does the a labelling job. type SubscribedWorkteam struct { _ struct{} `type:"structure"` @@ -23133,28 +23767,38 @@ type TrainingJob struct { // Amazon SageMaker provides primary statuses and secondary statuses that apply // to each of them: // - // InProgressStarting - Starting the training job. + // InProgress // - // Downloading - An optional stage for algorithms that support File training - // input mode. It indicates that data is being downloaded to the ML storage - // volumes. + // * Starting - Starting the training job. // - // Training - Training is in progress. + // * Downloading - An optional stage for algorithms that support File training + // input mode. It indicates that data is being downloaded to the ML storage + // volumes. // - // Uploading - Training is complete and the model artifacts are being uploaded - // to the S3 location. + // * Training - Training is in progress. // - // CompletedCompleted - The training job has completed. + // * Uploading - Training is complete and the model artifacts are being uploaded + // to the S3 location. // - // FailedFailed - The training job has failed. The reason for the failure is - // returned in the FailureReason field of DescribeTrainingJobResponse. + // Completed // - // StoppedMaxRuntimeExceeded - The job stopped because it exceeded the maximum - // allowed runtime. + // * Completed - The training job has completed. + // + // Failed + // + // * Failed - The training job has failed. The reason for the failure is + // returned in the FailureReason field of DescribeTrainingJobResponse. // - // Stopped - The training job has stopped. + // Stopped // - // StoppingStopping - Stopping the training job. + // * MaxRuntimeExceeded - The job stopped because it exceeded the maximum + // allowed runtime. + // + // * Stopped - The training job has stopped. + // + // Stopping + // + // * Stopping - Stopping the training job. // // Valid values for SecondaryStatus are subject to change. // @@ -23171,7 +23815,13 @@ type TrainingJob struct { // through. SecondaryStatusTransitions []*SecondaryStatusTransition `type:"list"` - // The condition under which to stop the training job. + // Specifies a limit to how long a model training job can run. When the job + // reaches the time limit, Amazon SageMaker ends the training job. Use this + // API to cap model training costs. + // + // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which + // delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts, so the results of training are not lost. StoppingCondition *StoppingCondition `type:"structure"` // An array of key-value pairs. For more information, see Using Cost Allocation @@ -23412,10 +24062,12 @@ type TrainingJobDefinition struct { // ResourceConfig is a required field ResourceConfig *ResourceConfig `type:"structure" required:"true"` - // Sets a duration for training. Use this parameter to cap model training costs. + // Specifies a limit to how long a model training job can run. When the job + // reaches the time limit, Amazon SageMaker ends the training job. Use this + // API to cap model training costs. // // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which - // delays job termination for 120 seconds. Algorithms might use this 120-second + // delays job termination for 120 seconds. Algorithms can use this 120-second // window to save the model artifacts. // // StoppingCondition is a required field @@ -24221,21 +24873,13 @@ type TransformOutput struct { // encrypt the model artifacts at rest using Amazon S3 server-side encryption. // The KmsKeyId can be any of the following formats: // - // * // KMS Key ID + // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" // - // "1234abcd-12ab-34cd-56ef-1234567890ab" + // * // Amazon Resource Name (ARN) of a KMS Key "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" // - // * // Amazon Resource Name (ARN) of a KMS Key + // * // KMS Key Alias "alias/ExampleAlias" // - // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" - // - // * // KMS Key Alias - // - // "alias/ExampleAlias" - // - // * // Amazon Resource Name (ARN) of a KMS Key Alias - // - // "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" + // * // Amazon Resource Name (ARN) of a KMS Key Alias "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" // // If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS // key for Amazon S3 for your role's account. For more information, see KMS-Managed @@ -24320,14 +24964,14 @@ type TransformResources struct { _ struct{} `type:"structure"` // The number of ML compute instances to use in the transform job. For distributed - // transform, provide a value greater than 1. The default value is 1. + // transform jobs, specify a value greater than 1. The default value is 1. // // InstanceCount is a required field InstanceCount *int64 `min:"1" type:"integer" required:"true"` - // The ML compute instance type for the transform job. For using built-in algorithms - // to transform moderately sized datasets, ml.m4.xlarge or ml.m5.large should - // suffice. There is no default value for InstanceType. + // The ML compute instance type for the transform job. If you are using built-in + // algorithms to transform moderately sized datasets, we recommend using ml.m4.xlarge + // or ml.m5.large instance types. // // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"TransformInstanceType"` @@ -24337,13 +24981,9 @@ type TransformResources struct { // that run the batch transform job. The VolumeKmsKeyId can be any of the following // formats: // - // * // KMS Key ID - // - // "1234abcd-12ab-34cd-56ef-1234567890ab" + // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * // Amazon Resource Name (ARN) of a KMS Key - // - // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" + // * // Amazon Resource Name (ARN) of a KMS Key "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string `type:"string"` } @@ -24405,44 +25045,27 @@ type TransformS3DataSource struct { // file containing a list of object keys that you want Amazon SageMaker to use // for batch transform. // + // The following values are compatible: ManifestFile, S3Prefix + // + // The following value is not compatible: AugmentedManifestFile + // // S3DataType is a required field S3DataType *string `type:"string" required:"true" enum:"S3DataType"` // Depending on the value specified for the S3DataType, identifies either a // key name prefix or a manifest. For example: // - // * A key name prefix might look like this: s3://bucketname/exampleprefix. - // + // * A key name prefix might look like this: s3://bucketname/exampleprefix. // - // * A manifest might look like this: s3://bucketname/example.manifest - // - // The manifest is an S3 object which is a JSON file with the following format: - // - // - // [ - // - // {"prefix": "s3://customer_bucket/some/prefix/"}, - // - // "relative/path/to/custdata-1", - // - // "relative/path/custdata-2", - // - // ... - // - // ] - // - // The preceding JSON matches the following S3Uris: - // - // s3://customer_bucket/some/prefix/relative/path/to/custdata-1 - // - // s3://customer_bucket/some/prefix/relative/path/custdata-1 - // - // ... - // - // The complete set of S3Uris in this manifest constitutes the input data for - // the channel for this datasource. The object that each S3Uris points to - // must be readable by the IAM role that Amazon SageMaker uses to perform - // tasks on your behalf. + // * A manifest might look like this: s3://bucketname/example.manifest The + // manifest is an S3 object which is a JSON file with the following format: + // [ {"prefix": "s3://customer_bucket/some/prefix/"}, "relative/path/to/custdata-1", + // "relative/path/custdata-2", ... ] The preceding JSON matches the following + // S3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-1 + // ... The complete set of S3Uris in this manifest constitutes the input + // data for the channel for this datasource. The object that each S3Uris + // points to must be readable by the IAM role that Amazon SageMaker uses + // to perform tasks on your behalf. // // S3Uri is a required field S3Uri *string `type:"string" required:"true"` @@ -24533,8 +25156,8 @@ type UiConfig struct { _ struct{} `type:"structure"` // The Amazon S3 bucket location of the UI template. For more information about - // the contents of a UI template, see Creating Your Custom Labeling Task Template - // (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step2.html). + // the contents of a UI template, see Creating Your Custom Labeling Task Template + // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step2.html). // // UiTemplateS3Uri is a required field UiTemplateS3Uri *string `type:"string" required:"true"` @@ -24869,41 +25492,49 @@ type UpdateNotebookInstanceInput struct { // A list of the Elastic Inference (EI) instance types to associate with this // notebook instance. Currently only one EI instance type can be associated // with a notebook instance. For more information, see Using Elastic Inference - // in Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). + // in Amazon SageMaker (sagemaker/latest/dg/ei.html). AcceleratorTypes []*string `type:"list"` // An array of up to three Git repositories to associate with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. These repositories are cloned at the same // level as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances - // (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // The Git repository to associate with the notebook instance as its default // code repository. This can be either the name of a Git repository stored as // a resource in your account, or the URL of a Git repository in AWS CodeCommit - // (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or // in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` // A list of the Elastic Inference (EI) instance types to remove from this notebook - // instance. + // instance. This operation is idempotent. If you specify an accelerator type + // that is not associated with the notebook instance when you call this method, + // it does not throw an error. DisassociateAcceleratorTypes *bool `type:"boolean"` // A list of names or URLs of the default Git repositories to remove from this - // notebook instance. + // notebook instance. This operation is idempotent. If you specify a Git repository + // that is not associated with the notebook instance when you call this method, + // it does not throw an error. DisassociateAdditionalCodeRepositories *bool `type:"boolean"` // The name or URL of the default Git repository to remove from this notebook - // instance. + // instance. This operation is idempotent. If you specify a Git repository that + // is not associated with the notebook instance when you call this method, it + // does not throw an error. DisassociateDefaultCodeRepository *bool `type:"boolean"` // Set to true to remove the notebook instance lifecycle configuration currently - // associated with the notebook instance. + // associated with the notebook instance. This operation is idempotent. If you + // specify a lifecycle configuration that is not associated with the notebook + // instance when you call this method, it does not throw an error. DisassociateLifecycleConfig *bool `type:"boolean"` // The Amazon ML compute instance type. @@ -24935,7 +25566,12 @@ type UpdateNotebookInstanceInput struct { RootAccess *string `type:"string" enum:"RootAccess"` // The size, in GB, of the ML storage volume to attach to the notebook instance. - // The default value is 5 GB. + // The default value is 5 GB. ML storage volumes are encrypted, so Amazon SageMaker + // can't determine the amount of available free space on the volume. Because + // of this, you can increase the volume size when you update a notebook instance, + // but you can't decrease the volume size. If you want to decrease the size + // of the ML storage volume in use, create a new notebook instance with the + // desired size. VolumeSizeInGB *int64 `min:"5" type:"integer"` } @@ -25057,11 +25693,13 @@ type UpdateNotebookInstanceLifecycleConfigInput struct { // NotebookInstanceLifecycleConfigName is a required field NotebookInstanceLifecycleConfigName *string `type:"string" required:"true"` - // The shell script that runs only once, when you create a notebook instance + // The shell script that runs only once, when you create a notebook instance. + // The shell script must be a base64-encoded string. OnCreate []*NotebookInstanceLifecycleHook `type:"list"` // The shell script that runs every time you start a notebook instance, including - // when you create the notebook instance. + // when you create the notebook instance. The shell script must be a base64-encoded + // string. OnStart []*NotebookInstanceLifecycleHook `type:"list"` } @@ -25163,6 +25801,9 @@ type UpdateWorkteamInput struct { // A list of MemberDefinition objects that contain the updated work team members. MemberDefinitions []*MemberDefinition `min:"1" type:"list"` + // Configures SNS topic notifications for available or expiring work items + NotificationConfiguration *NotificationConfiguration `type:"structure"` + // The name of the work team to update. // // WorkteamName is a required field @@ -25223,6 +25864,12 @@ func (s *UpdateWorkteamInput) SetMemberDefinitions(v []*MemberDefinition) *Updat return s } +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *UpdateWorkteamInput) SetNotificationConfiguration(v *NotificationConfiguration) *UpdateWorkteamInput { + s.NotificationConfiguration = v + return s +} + // SetWorkteamName sets the WorkteamName field's value. func (s *UpdateWorkteamInput) SetWorkteamName(v string) *UpdateWorkteamInput { s.WorkteamName = &v @@ -25271,6 +25918,11 @@ type VpcConfig struct { // The ID of the subnets in the VPC to which you want to connect your training // job or model. // + // Amazon EC2 P3 accelerated computing instances are not available in the c/d/e + // availability zones of region us-east-1. If you want to create endpoints with + // P3 instances in VPC mode in region us-east-1, create subnets in a/b/f availability + // zones instead. + // // Subnets is a required field Subnets []*string `min:"1" type:"list" required:"true"` } @@ -25339,6 +25991,10 @@ type Workteam struct { // MemberDefinitions is a required field MemberDefinitions []*MemberDefinition `min:"1" type:"list" required:"true"` + // Configures SNS notifications of available or expiring work items for work + // teams. + NotificationConfiguration *NotificationConfiguration `type:"structure"` + // The Amazon Marketplace identifier for a vendor's work team. ProductListingIds []*string `type:"list"` @@ -25391,6 +26047,12 @@ func (s *Workteam) SetMemberDefinitions(v []*MemberDefinition) *Workteam { return s } +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *Workteam) SetNotificationConfiguration(v *NotificationConfiguration) *Workteam { + s.NotificationConfiguration = v + return s +} + // SetProductListingIds sets the ProductListingIds field's value. func (s *Workteam) SetProductListingIds(v []*string) *Workteam { s.ProductListingIds = v @@ -25600,6 +26262,22 @@ const ( EndpointStatusFailed = "Failed" ) +const ( + // FileSystemAccessModeRw is a FileSystemAccessMode enum value + FileSystemAccessModeRw = "rw" + + // FileSystemAccessModeRo is a FileSystemAccessMode enum value + FileSystemAccessModeRo = "ro" +) + +const ( + // FileSystemTypeEfs is a FileSystemType enum value + FileSystemTypeEfs = "EFS" + + // FileSystemTypeFsxLustre is a FileSystemType enum value + FileSystemTypeFsxLustre = "FSxLustre" +) + const ( // FrameworkTensorflow is a Framework enum value FrameworkTensorflow = "TENSORFLOW" @@ -25801,6 +26479,14 @@ const ( InstanceTypeMlP316xlarge = "ml.p3.16xlarge" ) +const ( + // JoinSourceInput is a JoinSource enum value + JoinSourceInput = "Input" + + // JoinSourceNone is a JoinSource enum value + JoinSourceNone = "None" +) + const ( // LabelingJobStatusInProgress is a LabelingJobStatus enum value LabelingJobStatusInProgress = "InProgress" @@ -25884,6 +26570,15 @@ const ( // NotebookInstanceAcceleratorTypeMlEia1Xlarge is a NotebookInstanceAcceleratorType enum value NotebookInstanceAcceleratorTypeMlEia1Xlarge = "ml.eia1.xlarge" + + // NotebookInstanceAcceleratorTypeMlEia2Medium is a NotebookInstanceAcceleratorType enum value + NotebookInstanceAcceleratorTypeMlEia2Medium = "ml.eia2.medium" + + // NotebookInstanceAcceleratorTypeMlEia2Large is a NotebookInstanceAcceleratorType enum value + NotebookInstanceAcceleratorTypeMlEia2Large = "ml.eia2.large" + + // NotebookInstanceAcceleratorTypeMlEia2Xlarge is a NotebookInstanceAcceleratorType enum value + NotebookInstanceAcceleratorTypeMlEia2Xlarge = "ml.eia2.xlarge" ) const ( @@ -26012,6 +26707,15 @@ const ( // ProductionVariantAcceleratorTypeMlEia1Xlarge is a ProductionVariantAcceleratorType enum value ProductionVariantAcceleratorTypeMlEia1Xlarge = "ml.eia1.xlarge" + + // ProductionVariantAcceleratorTypeMlEia2Medium is a ProductionVariantAcceleratorType enum value + ProductionVariantAcceleratorTypeMlEia2Medium = "ml.eia2.medium" + + // ProductionVariantAcceleratorTypeMlEia2Large is a ProductionVariantAcceleratorType enum value + ProductionVariantAcceleratorTypeMlEia2Large = "ml.eia2.large" + + // ProductionVariantAcceleratorTypeMlEia2Xlarge is a ProductionVariantAcceleratorType enum value + ProductionVariantAcceleratorTypeMlEia2Xlarge = "ml.eia2.xlarge" ) const ( @@ -26060,6 +26764,24 @@ const ( // ProductionVariantInstanceTypeMlM524xlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlM524xlarge = "ml.m5.24xlarge" + // ProductionVariantInstanceTypeMlM5dLarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5dLarge = "ml.m5d.large" + + // ProductionVariantInstanceTypeMlM5dXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5dXlarge = "ml.m5d.xlarge" + + // ProductionVariantInstanceTypeMlM5d2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d2xlarge = "ml.m5d.2xlarge" + + // ProductionVariantInstanceTypeMlM5d4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d4xlarge = "ml.m5d.4xlarge" + + // ProductionVariantInstanceTypeMlM5d12xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d12xlarge = "ml.m5d.12xlarge" + + // ProductionVariantInstanceTypeMlM5d24xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d24xlarge = "ml.m5d.24xlarge" + // ProductionVariantInstanceTypeMlC4Large is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlC4Large = "ml.c4.large" @@ -26110,6 +26832,78 @@ const ( // ProductionVariantInstanceTypeMlC518xlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlC518xlarge = "ml.c5.18xlarge" + + // ProductionVariantInstanceTypeMlC5dLarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5dLarge = "ml.c5d.large" + + // ProductionVariantInstanceTypeMlC5dXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5dXlarge = "ml.c5d.xlarge" + + // ProductionVariantInstanceTypeMlC5d2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d2xlarge = "ml.c5d.2xlarge" + + // ProductionVariantInstanceTypeMlC5d4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d4xlarge = "ml.c5d.4xlarge" + + // ProductionVariantInstanceTypeMlC5d9xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d9xlarge = "ml.c5d.9xlarge" + + // ProductionVariantInstanceTypeMlC5d18xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d18xlarge = "ml.c5d.18xlarge" + + // ProductionVariantInstanceTypeMlG4dnXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlG4dnXlarge = "ml.g4dn.xlarge" + + // ProductionVariantInstanceTypeMlG4dn2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlG4dn2xlarge = "ml.g4dn.2xlarge" + + // ProductionVariantInstanceTypeMlG4dn4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlG4dn4xlarge = "ml.g4dn.4xlarge" + + // ProductionVariantInstanceTypeMlG4dn8xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlG4dn8xlarge = "ml.g4dn.8xlarge" + + // ProductionVariantInstanceTypeMlG4dn12xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlG4dn12xlarge = "ml.g4dn.12xlarge" + + // ProductionVariantInstanceTypeMlG4dn16xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlG4dn16xlarge = "ml.g4dn.16xlarge" + + // ProductionVariantInstanceTypeMlR5Large is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5Large = "ml.r5.large" + + // ProductionVariantInstanceTypeMlR5Xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5Xlarge = "ml.r5.xlarge" + + // ProductionVariantInstanceTypeMlR52xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR52xlarge = "ml.r5.2xlarge" + + // ProductionVariantInstanceTypeMlR54xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR54xlarge = "ml.r5.4xlarge" + + // ProductionVariantInstanceTypeMlR512xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR512xlarge = "ml.r5.12xlarge" + + // ProductionVariantInstanceTypeMlR524xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR524xlarge = "ml.r5.24xlarge" + + // ProductionVariantInstanceTypeMlR5dLarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5dLarge = "ml.r5d.large" + + // ProductionVariantInstanceTypeMlR5dXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5dXlarge = "ml.r5d.xlarge" + + // ProductionVariantInstanceTypeMlR5d2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d2xlarge = "ml.r5d.2xlarge" + + // ProductionVariantInstanceTypeMlR5d4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d4xlarge = "ml.r5d.4xlarge" + + // ProductionVariantInstanceTypeMlR5d12xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d12xlarge = "ml.r5d.12xlarge" + + // ProductionVariantInstanceTypeMlR5d24xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d24xlarge = "ml.r5d.24xlarge" ) const ( @@ -26196,6 +26990,12 @@ const ( // SecondaryStatusFailed is a SecondaryStatus enum value SecondaryStatusFailed = "Failed" + + // SecondaryStatusInterrupted is a SecondaryStatus enum value + SecondaryStatusInterrupted = "Interrupted" + + // SecondaryStatusMaxWaitTimeExceeded is a SecondaryStatus enum value + SecondaryStatusMaxWaitTimeExceeded = "MaxWaitTimeExceeded" ) const ( @@ -26232,6 +27032,9 @@ const ( ) const ( + // TargetDeviceLambda is a TargetDevice enum value + TargetDeviceLambda = "lambda" + // TargetDeviceMlM4 is a TargetDevice enum value TargetDeviceMlM4 = "ml_m4" @@ -26256,6 +27059,9 @@ const ( // TargetDeviceJetsonTx2 is a TargetDevice enum value TargetDeviceJetsonTx2 = "jetson_tx2" + // TargetDeviceJetsonNano is a TargetDevice enum value + TargetDeviceJetsonNano = "jetson_nano" + // TargetDeviceRasp3b is a TargetDevice enum value TargetDeviceRasp3b = "rasp3b" @@ -26267,6 +27073,18 @@ const ( // TargetDeviceRk3288 is a TargetDevice enum value TargetDeviceRk3288 = "rk3288" + + // TargetDeviceAisage is a TargetDevice enum value + TargetDeviceAisage = "aisage" + + // TargetDeviceSbeC is a TargetDevice enum value + TargetDeviceSbeC = "sbe_c" + + // TargetDeviceQcs605 is a TargetDevice enum value + TargetDeviceQcs605 = "qcs605" + + // TargetDeviceQcs603 is a TargetDevice enum value + TargetDeviceQcs603 = "qcs603" ) const ( @@ -26341,6 +27159,9 @@ const ( // TrainingInstanceTypeMlP316xlarge is a TrainingInstanceType enum value TrainingInstanceTypeMlP316xlarge = "ml.p3.16xlarge" + // TrainingInstanceTypeMlP3dn24xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlP3dn24xlarge = "ml.p3dn.24xlarge" + // TrainingInstanceTypeMlC5Xlarge is a TrainingInstanceType enum value TrainingInstanceTypeMlC5Xlarge = "ml.c5.xlarge" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go index 7ae1df73414..ae95a939639 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SageMaker { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "sagemaker" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SageMaker { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SageMaker { svc := &SageMaker{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-07-24", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go index 9ff532ab77d..f4233729fa4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go @@ -213,27 +213,27 @@ func (c *SecretsManager) CreateSecretRequest(input *CreateSecretInput) (req *req // also creates an initial secret version and automatically attaches the staging // label AWSCURRENT to the new version. // -// If you call an operation that needs to encrypt or decrypt the SecretString -// or SecretBinary for a secret in the same account as the calling user and -// that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses -// the account's default AWS managed customer master key (CMK) with the alias -// aws/secretsmanager. If this key doesn't already exist in your account then -// Secrets Manager creates it for you automatically. All users and roles in -// the same AWS account automatically have access to use the default CMK. Note -// that if an Secrets Manager API call results in AWS having to create the account's -// AWS-managed CMK, it can result in a one-time significant delay in returning -// the result. -// -// If the secret is in a different AWS account from the credentials calling -// an API that requires encryption or decryption of the secret value then you -// must create and use a custom AWS KMS CMK because you can't access the default -// CMK for the account using credentials from a different AWS account. Store -// the ARN of the CMK in the secret when you create the secret or when you update -// it by including it in the KMSKeyId. If you call an API that must encrypt -// or decrypt SecretString or SecretBinary using credentials from a different -// account then the AWS KMS key policy must grant cross-account access to that -// other account's user or role for both the kms:GenerateDataKey and kms:Decrypt -// operations. +// * If you call an operation that needs to encrypt or decrypt the SecretString +// or SecretBinary for a secret in the same account as the calling user and +// that secret doesn't specify a AWS KMS encryption key, Secrets Manager +// uses the account's default AWS managed customer master key (CMK) with +// the alias aws/secretsmanager. If this key doesn't already exist in your +// account then Secrets Manager creates it for you automatically. All users +// and roles in the same AWS account automatically have access to use the +// default CMK. Note that if an Secrets Manager API call results in AWS having +// to create the account's AWS-managed CMK, it can result in a one-time significant +// delay in returning the result. +// +// * If the secret is in a different AWS account from the credentials calling +// an API that requires encryption or decryption of the secret value then +// you must create and use a custom AWS KMS CMK because you can't access +// the default CMK for the account using credentials from a different AWS +// account. Store the ARN of the CMK in the secret when you create the secret +// or when you update it by including it in the KMSKeyId. If you call an +// API that must encrypt or decrypt SecretString or SecretBinary using credentials +// from a different account then the AWS KMS key policy must grant cross-account +// access to that other account's user or role for both the kms:GenerateDataKey +// and kms:Decrypt operations. // // Minimum permissions // @@ -251,7 +251,6 @@ func (c *SecretsManager) CreateSecretRequest(input *CreateSecretInput) (req *req // // * secretsmanager:TagResource - needed only if you include the Tags parameter. // -// // Related operations // // * To delete a secret, use DeleteSecret. @@ -509,15 +508,15 @@ func (c *SecretsManager) DeleteSecretRequest(input *DeleteSecretInput) (req *req // scheduled for deletion. If you need to access that information, you must // cancel the deletion with RestoreSecret and then retrieve the information. // -// There is no explicit operation to delete a version of a secret. Instead, -// remove all staging labels from the VersionStage field of a version. That -// marks the version as deprecated and allows Secrets Manager to delete it as -// needed. Versions that do not have any staging labels do not show up in ListSecretVersionIds -// unless you specify IncludeDeprecated. +// * There is no explicit operation to delete a version of a secret. Instead, +// remove all staging labels from the VersionStage field of a version. That +// marks the version as deprecated and allows Secrets Manager to delete it +// as needed. Versions that do not have any staging labels do not show up +// in ListSecretVersionIds unless you specify IncludeDeprecated. // -// The permanent secret deletion at the end of the waiting period is performed -// as a background task with low priority. There is no guarantee of a specific -// time after the recovery window for the actual delete operation to occur. +// * The permanent secret deletion at the end of the waiting period is performed +// as a background task with low priority. There is no guarantee of a specific +// time after the recovery window for the actual delete operation to occur. // // Minimum permissions // @@ -1142,7 +1141,7 @@ func (c *SecretsManager) ListSecretVersionIdsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a ListSecretVersionIds operation. // pageNum := 0 // err := client.ListSecretVersionIdsPages(params, -// func(page *ListSecretVersionIdsOutput, lastPage bool) bool { +// func(page *secretsmanager.ListSecretVersionIdsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1174,10 +1173,12 @@ func (c *SecretsManager) ListSecretVersionIdsPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSecretVersionIdsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSecretVersionIdsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1302,7 +1303,7 @@ func (c *SecretsManager) ListSecretsWithContext(ctx aws.Context, input *ListSecr // // Example iterating over at most 3 pages of a ListSecrets operation. // pageNum := 0 // err := client.ListSecretsPages(params, -// func(page *ListSecretsOutput, lastPage bool) bool { +// func(page *secretsmanager.ListSecretsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1334,10 +1335,12 @@ func (c *SecretsManager) ListSecretsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSecretsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSecretsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1539,27 +1542,27 @@ func (c *SecretsManager) PutSecretValueRequest(input *PutSecretValueInput) (req // However, if the secret data is different, then the operation fails because // you cannot modify an existing version; you can only create new ones. // -// If you call an operation that needs to encrypt or decrypt the SecretString -// or SecretBinary for a secret in the same account as the calling user and -// that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses -// the account's default AWS managed customer master key (CMK) with the alias -// aws/secretsmanager. If this key doesn't already exist in your account then -// Secrets Manager creates it for you automatically. All users and roles in -// the same AWS account automatically have access to use the default CMK. Note -// that if an Secrets Manager API call results in AWS having to create the account's -// AWS-managed CMK, it can result in a one-time significant delay in returning -// the result. -// -// If the secret is in a different AWS account from the credentials calling -// an API that requires encryption or decryption of the secret value then you -// must create and use a custom AWS KMS CMK because you can't access the default -// CMK for the account using credentials from a different AWS account. Store -// the ARN of the CMK in the secret when you create the secret or when you update -// it by including it in the KMSKeyId. If you call an API that must encrypt -// or decrypt SecretString or SecretBinary using credentials from a different -// account then the AWS KMS key policy must grant cross-account access to that -// other account's user or role for both the kms:GenerateDataKey and kms:Decrypt -// operations. +// * If you call an operation that needs to encrypt or decrypt the SecretString +// or SecretBinary for a secret in the same account as the calling user and +// that secret doesn't specify a AWS KMS encryption key, Secrets Manager +// uses the account's default AWS managed customer master key (CMK) with +// the alias aws/secretsmanager. If this key doesn't already exist in your +// account then Secrets Manager creates it for you automatically. All users +// and roles in the same AWS account automatically have access to use the +// default CMK. Note that if an Secrets Manager API call results in AWS having +// to create the account's AWS-managed CMK, it can result in a one-time significant +// delay in returning the result. +// +// * If the secret is in a different AWS account from the credentials calling +// an API that requires encryption or decryption of the secret value then +// you must create and use a custom AWS KMS CMK because you can't access +// the default CMK for the account using credentials from a different AWS +// account. Store the ARN of the CMK in the secret when you create the secret +// or when you update it by including it in the KMSKeyId. If you call an +// API that must encrypt or decrypt SecretString or SecretBinary using credentials +// from a different account then the AWS KMS key policy must grant cross-account +// access to that other account's user or role for both the kms:GenerateDataKey +// and kms:Decrypt operations. // // Minimum permissions // @@ -2215,7 +2218,7 @@ func (c *SecretsManager) UpdateSecretRequest(input *UpdateSecretInput) (req *req // UpdateSecret API operation for AWS Secrets Manager. // // Modifies many of the details of the specified secret. If you include a ClientRequestToken -// and eitherSecretString or SecretBinary then it also creates a new version +// and either SecretString or SecretBinary then it also creates a new version // attached to the secret. // // To modify the rotation configuration of a secret, use RotateSecret instead. @@ -2233,27 +2236,27 @@ func (c *SecretsManager) UpdateSecretRequest(input *UpdateSecretInput) (req *req // Secrets Manager automatically attaches the staging label AWSCURRENT to // the new version. // -// If you call an operation that needs to encrypt or decrypt the SecretString -// or SecretBinary for a secret in the same account as the calling user and -// that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses -// the account's default AWS managed customer master key (CMK) with the alias -// aws/secretsmanager. If this key doesn't already exist in your account then -// Secrets Manager creates it for you automatically. All users and roles in -// the same AWS account automatically have access to use the default CMK. Note -// that if an Secrets Manager API call results in AWS having to create the account's -// AWS-managed CMK, it can result in a one-time significant delay in returning -// the result. -// -// If the secret is in a different AWS account from the credentials calling -// an API that requires encryption or decryption of the secret value then you -// must create and use a custom AWS KMS CMK because you can't access the default -// CMK for the account using credentials from a different AWS account. Store -// the ARN of the CMK in the secret when you create the secret or when you update -// it by including it in the KMSKeyId. If you call an API that must encrypt -// or decrypt SecretString or SecretBinary using credentials from a different -// account then the AWS KMS key policy must grant cross-account access to that -// other account's user or role for both the kms:GenerateDataKey and kms:Decrypt -// operations. +// * If you call an operation that needs to encrypt or decrypt the SecretString +// or SecretBinary for a secret in the same account as the calling user and +// that secret doesn't specify a AWS KMS encryption key, Secrets Manager +// uses the account's default AWS managed customer master key (CMK) with +// the alias aws/secretsmanager. If this key doesn't already exist in your +// account then Secrets Manager creates it for you automatically. All users +// and roles in the same AWS account automatically have access to use the +// default CMK. Note that if an Secrets Manager API call results in AWS having +// to create the account's AWS-managed CMK, it can result in a one-time significant +// delay in returning the result. +// +// * If the secret is in a different AWS account from the credentials calling +// an API that requires encryption or decryption of the secret value then +// you must create and use a custom AWS KMS CMK because you can't access +// the default CMK for the account using credentials from a different AWS +// account. Store the ARN of the CMK in the secret when you create the secret +// or when you update it by including it in the KMSKeyId. If you call an +// API that must encrypt or decrypt SecretString or SecretBinary using credentials +// from a different account then the AWS KMS key policy must grant cross-account +// access to that other account's user or role for both the kms:GenerateDataKey +// and kms:Decrypt operations. // // Minimum permissions // @@ -2696,14 +2699,14 @@ type CreateSecretInput struct { // secret. Each tag is a "Key" and "Value" pair of strings. This operation only // appends tags to the existing list of tags. To remove tags, you must use UntagResource. // - // Secrets Manager tag key names are case sensitive. A tag with the key "ABC" - // is a different tag from one with key "abc". + // * Secrets Manager tag key names are case sensitive. A tag with the key + // "ABC" is a different tag from one with key "abc". // - // If you check tags in IAM policy Condition elements as part of your security - // strategy, then adding or removing a tag can change permissions. If the successful - // completion of this operation would result in you losing your permissions - // for this secret, then this operation is blocked and returns an Access Denied - // error. + // * If you check tags in IAM policy Condition elements as part of your security + // strategy, then adding or removing a tag can change permissions. If the + // successful completion of this operation would result in you losing your + // permissions for this secret, then this operation is blocked and returns + // an Access Denied error. // // This parameter requires a JSON text string argument. For information on how // to format a JSON parameter for the various command line tool environments, @@ -3182,6 +3185,8 @@ type DescribeSecretOutput struct { // The user-provided friendly name of the secret. Name *string `min:"1" type:"string"` + OwningService *string `min:"1" type:"string"` + // Specifies whether automatic rotation is enabled for this secret. // // To enable rotation, use RotateSecret with AutomaticallyRotateAfterDays set @@ -3267,6 +3272,12 @@ func (s *DescribeSecretOutput) SetName(v string) *DescribeSecretOutput { return s } +// SetOwningService sets the OwningService field's value. +func (s *DescribeSecretOutput) SetOwningService(v string) *DescribeSecretOutput { + s.OwningService = &v + return s +} + // SetRotationEnabled sets the RotationEnabled field's value. func (s *DescribeSecretOutput) SetRotationEnabled(v bool) *DescribeSecretOutput { s.RotationEnabled = &v @@ -3420,7 +3431,7 @@ type GetRandomPasswordOutput struct { _ struct{} `type:"structure"` // A string with the generated password. - RandomPassword *string `type:"string"` + RandomPassword *string `type:"string" sensitive:"true"` } // String returns the string representation @@ -4648,6 +4659,8 @@ type SecretListEntry struct { // in the folder prod. Name *string `min:"1" type:"string"` + OwningService *string `min:"1" type:"string"` + // Indicated whether automatic, scheduled rotation is enabled for this secret. RotationEnabled *bool `type:"boolean"` @@ -4730,6 +4743,12 @@ func (s *SecretListEntry) SetName(v string) *SecretListEntry { return s } +// SetOwningService sets the OwningService field's value. +func (s *SecretListEntry) SetOwningService(v string) *SecretListEntry { + s.OwningService = &v + return s +} + // SetRotationEnabled sets the RotationEnabled field's value. func (s *SecretListEntry) SetRotationEnabled(v bool) *SecretListEntry { s.RotationEnabled = &v diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go index c4758e96dac..eeca2d9fd68 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SecretsManager { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "secretsmanager" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SecretsManager { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SecretsManager { svc := &SecretsManager{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-17", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go index f430ea47702..53ea5acaef2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go @@ -58,7 +58,10 @@ func (c *SecurityHub) AcceptInvitationRequest(input *AcceptInvitationInput) (req // AcceptInvitation API operation for AWS SecurityHub. // -// Accepts the invitation to be monitored by a master SecurityHub account. +// Accepts the invitation to be a member account and be monitored by the Security +// Hub master account that the invitation was sent from. When the member account +// accepts the invitation, permission is granted to the master account to view +// findings generated in the member account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -72,18 +75,18 @@ func (c *SecurityHub) AcceptInvitationRequest(input *AcceptInvitationInput) (req // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/AcceptInvitation func (c *SecurityHub) AcceptInvitation(input *AcceptInvitationInput) (*AcceptInvitationOutput, error) { @@ -151,10 +154,8 @@ func (c *SecurityHub) BatchDisableStandardsRequest(input *BatchDisableStandardsI // BatchDisableStandards API operation for AWS SecurityHub. // -// Disables the standards specified by the standards subscription ARNs. In the -// context of Security Hub, supported standards (for example, CIS AWS Foundations) -// are automated and continuous checks that help determine your compliance status -// against security industry (including AWS) best practices. +// Disables the standards specified by the provided StandardsSubscriptionArns. +// For more information, see Standards Supported in AWS Security Hub (https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-standards.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -168,11 +169,11 @@ func (c *SecurityHub) BatchDisableStandardsRequest(input *BatchDisableStandardsI // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -244,10 +245,9 @@ func (c *SecurityHub) BatchEnableStandardsRequest(input *BatchEnableStandardsInp // BatchEnableStandards API operation for AWS SecurityHub. // -// Enables the standards specified by the standards ARNs. In the context of -// Security Hub, supported standards (for example, CIS AWS Foundations) are -// automated and continuous checks that help determine your compliance status -// against security industry (including AWS) best practices. +// Enables the standards specified by the provided standardsArn. In this release, +// only CIS AWS Foundations standards are supported. For more information, see +// Standards Supported in AWS Security Hub (https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-standards.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -261,11 +261,11 @@ func (c *SecurityHub) BatchEnableStandardsRequest(input *BatchEnableStandardsInp // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -337,8 +337,10 @@ func (c *SecurityHub) BatchImportFindingsRequest(input *BatchImportFindingsInput // BatchImportFindings API operation for AWS SecurityHub. // -// Imports security findings that are generated by the integrated third-party -// products into Security Hub. +// Imports security findings generated from an integrated third-party product +// into Security Hub. This action is requested by the integrated product to +// import its findings into Security Hub. The maximum allowed size for a finding +// is 240 Kb. An error is returned for any finding larger than 240 Kb. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -352,15 +354,15 @@ func (c *SecurityHub) BatchImportFindingsRequest(input *BatchImportFindingsInput // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchImportFindings func (c *SecurityHub) BatchImportFindings(input *BatchImportFindingsInput) (*BatchImportFindingsOutput, error) { @@ -384,6 +386,101 @@ func (c *SecurityHub) BatchImportFindingsWithContext(ctx aws.Context, input *Bat return out, req.Send() } +const opCreateActionTarget = "CreateActionTarget" + +// CreateActionTargetRequest generates a "aws/request.Request" representing the +// client's request for the CreateActionTarget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateActionTarget for more information on using the CreateActionTarget +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateActionTargetRequest method. +// req, resp := client.CreateActionTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/CreateActionTarget +func (c *SecurityHub) CreateActionTargetRequest(input *CreateActionTargetInput) (req *request.Request, output *CreateActionTargetOutput) { + op := &request.Operation{ + Name: opCreateActionTarget, + HTTPMethod: "POST", + HTTPPath: "/actionTargets", + } + + if input == nil { + input = &CreateActionTargetInput{} + } + + output = &CreateActionTargetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateActionTarget API operation for AWS SecurityHub. +// +// Creates a custom action target in Security Hub. You can use custom actions +// on findings and insights in Security Hub to trigger target actions in Amazon +// CloudWatch Events. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation CreateActionTarget for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// The resource specified in the request conflicts with an existing resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/CreateActionTarget +func (c *SecurityHub) CreateActionTarget(input *CreateActionTargetInput) (*CreateActionTargetOutput, error) { + req, out := c.CreateActionTargetRequest(input) + return out, req.Send() +} + +// CreateActionTargetWithContext is the same as CreateActionTarget with the addition of +// the ability to pass a context and additional request options. +// +// See CreateActionTarget for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) CreateActionTargetWithContext(ctx aws.Context, input *CreateActionTargetInput, opts ...request.Option) (*CreateActionTargetOutput, error) { + req, out := c.CreateActionTargetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateInsight = "CreateInsight" // CreateInsightRequest generates a "aws/request.Request" representing the @@ -428,8 +525,9 @@ func (c *SecurityHub) CreateInsightRequest(input *CreateInsightInput) (req *requ // CreateInsight API operation for AWS SecurityHub. // -// Creates an insight, which is a consolidation of findings that identifies -// a security area that requires attention or intervention. +// Creates a custom insight in Security Hub. An insight is a consolidation of +// findings that relate to a security issue that requires attention or remediation. +// Use the GroupByAttribute to group the related findings in the insight. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -443,15 +541,15 @@ func (c *SecurityHub) CreateInsightRequest(input *CreateInsightInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceConflictException "ResourceConflictException" // The resource specified in the request conflicts with an existing resource. @@ -522,8 +620,23 @@ func (c *SecurityHub) CreateMembersRequest(input *CreateMembersInput) (req *requ // CreateMembers API operation for AWS SecurityHub. // -// Creates member Security Hub accounts in the current AWS account (which becomes -// the master Security Hub account) that has Security Hub enabled. +// Creates a member association in Security Hub between the specified accounts +// and the account used to make the request, which is the master account. To +// successfully create a member, you must use this action from an account that +// already has Security Hub enabled. You can use the EnableSecurityHub to enable +// Security Hub. +// +// After you use CreateMembers to create member account associations in Security +// Hub, you need to use the InviteMembers action, which invites the accounts +// to enable Security Hub and become member accounts in Security Hub. If the +// invitation is accepted by the account owner, the account becomes a member +// account in Security Hub, and a permission policy is added that permits the +// master account to view the findings generated in the member account. When +// Security Hub is enabled in the invited account, findings start being sent +// to both the member and master accounts. +// +// You can remove the association between the master and member accounts by +// using the DisassociateFromMasterAccount or DisassociateMembers operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -537,15 +650,15 @@ func (c *SecurityHub) CreateMembersRequest(input *CreateMembersInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceConflictException "ResourceConflictException" // The resource specified in the request conflicts with an existing resource. @@ -616,8 +729,7 @@ func (c *SecurityHub) DeclineInvitationsRequest(input *DeclineInvitationsInput) // DeclineInvitations API operation for AWS SecurityHub. // -// Declines invitations that are sent to this AWS account (invitee) by the AWS -// accounts (inviters) that are specified by the account IDs. +// Declines invitations to become a member account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -631,14 +743,14 @@ func (c *SecurityHub) DeclineInvitationsRequest(input *DeclineInvitationsInput) // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeclineInvitations func (c *SecurityHub) DeclineInvitations(input *DeclineInvitationsInput) (*DeclineInvitationsOutput, error) { @@ -662,6 +774,97 @@ func (c *SecurityHub) DeclineInvitationsWithContext(ctx aws.Context, input *Decl return out, req.Send() } +const opDeleteActionTarget = "DeleteActionTarget" + +// DeleteActionTargetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteActionTarget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteActionTarget for more information on using the DeleteActionTarget +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteActionTargetRequest method. +// req, resp := client.DeleteActionTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteActionTarget +func (c *SecurityHub) DeleteActionTargetRequest(input *DeleteActionTargetInput) (req *request.Request, output *DeleteActionTargetOutput) { + op := &request.Operation{ + Name: opDeleteActionTarget, + HTTPMethod: "DELETE", + HTTPPath: "/actionTargets/{ActionTargetArn+}", + } + + if input == nil { + input = &DeleteActionTargetInput{} + } + + output = &DeleteActionTargetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteActionTarget API operation for AWS SecurityHub. +// +// Deletes a custom action target from Security Hub. Deleting a custom action +// target doesn't affect any findings or insights that were already sent to +// Amazon CloudWatch Events using the custom action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation DeleteActionTarget for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteActionTarget +func (c *SecurityHub) DeleteActionTarget(input *DeleteActionTargetInput) (*DeleteActionTargetOutput, error) { + req, out := c.DeleteActionTargetRequest(input) + return out, req.Send() +} + +// DeleteActionTargetWithContext is the same as DeleteActionTarget with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteActionTarget for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) DeleteActionTargetWithContext(ctx aws.Context, input *DeleteActionTargetInput, opts ...request.Option) (*DeleteActionTargetOutput, error) { + req, out := c.DeleteActionTargetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteInsight = "DeleteInsight" // DeleteInsightRequest generates a "aws/request.Request" representing the @@ -706,7 +909,7 @@ func (c *SecurityHub) DeleteInsightRequest(input *DeleteInsightInput) (req *requ // DeleteInsight API operation for AWS SecurityHub. // -// Deletes an insight that is specified by the insight ARN. +// Deletes the insight specified by the InsightArn. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -720,18 +923,18 @@ func (c *SecurityHub) DeleteInsightRequest(input *DeleteInsightInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteInsight func (c *SecurityHub) DeleteInsight(input *DeleteInsightInput) (*DeleteInsightOutput, error) { @@ -799,8 +1002,7 @@ func (c *SecurityHub) DeleteInvitationsRequest(input *DeleteInvitationsInput) (r // DeleteInvitations API operation for AWS SecurityHub. // -// Deletes invitations that are sent to this AWS account (invitee) by the AWS -// accounts (inviters) that are specified by their account IDs. +// Deletes invitations received by the AWS account to become a member account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -814,18 +1016,18 @@ func (c *SecurityHub) DeleteInvitationsRequest(input *DeleteInvitationsInput) (r // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteInvitations func (c *SecurityHub) DeleteInvitations(input *DeleteInvitationsInput) (*DeleteInvitationsOutput, error) { @@ -893,8 +1095,7 @@ func (c *SecurityHub) DeleteMembersRequest(input *DeleteMembersInput) (req *requ // DeleteMembers API operation for AWS SecurityHub. // -// Deletes the Security Hub member accounts that are specified by the account -// IDs. +// Deletes the specified member accounts from Security Hub. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -908,18 +1109,18 @@ func (c *SecurityHub) DeleteMembersRequest(input *DeleteMembersInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteMembers func (c *SecurityHub) DeleteMembers(input *DeleteMembersInput) (*DeleteMembersOutput, error) { @@ -943,154 +1144,206 @@ func (c *SecurityHub) DeleteMembersWithContext(ctx aws.Context, input *DeleteMem return out, req.Send() } -const opDisableImportFindingsForProduct = "DisableImportFindingsForProduct" +const opDescribeActionTargets = "DescribeActionTargets" -// DisableImportFindingsForProductRequest generates a "aws/request.Request" representing the -// client's request for the DisableImportFindingsForProduct operation. The "output" return +// DescribeActionTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeActionTargets operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DisableImportFindingsForProduct for more information on using the DisableImportFindingsForProduct +// See DescribeActionTargets for more information on using the DescribeActionTargets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DisableImportFindingsForProductRequest method. -// req, resp := client.DisableImportFindingsForProductRequest(params) +// // Example sending a request using the DescribeActionTargetsRequest method. +// req, resp := client.DescribeActionTargetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableImportFindingsForProduct -func (c *SecurityHub) DisableImportFindingsForProductRequest(input *DisableImportFindingsForProductInput) (req *request.Request, output *DisableImportFindingsForProductOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DescribeActionTargets +func (c *SecurityHub) DescribeActionTargetsRequest(input *DescribeActionTargetsInput) (req *request.Request, output *DescribeActionTargetsOutput) { op := &request.Operation{ - Name: opDisableImportFindingsForProduct, - HTTPMethod: "DELETE", - HTTPPath: "/productSubscriptions/{ProductSubscriptionArn+}", + Name: opDescribeActionTargets, + HTTPMethod: "POST", + HTTPPath: "/actionTargets/get", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &DisableImportFindingsForProductInput{} + input = &DescribeActionTargetsInput{} } - output = &DisableImportFindingsForProductOutput{} + output = &DescribeActionTargetsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DisableImportFindingsForProduct API operation for AWS SecurityHub. +// DescribeActionTargets API operation for AWS SecurityHub. // -// Cancels the subscription that allows a findings-generating solution (product) -// to import its findings into Security Hub. +// Returns a list of the custom action targets in Security Hub in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation DisableImportFindingsForProduct for usage and error information. +// API operation DescribeActionTargets for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. -// -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // -// * ErrCodeLimitExceededException "LimitExceededException" -// The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error code describes the limit exceeded. +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableImportFindingsForProduct -func (c *SecurityHub) DisableImportFindingsForProduct(input *DisableImportFindingsForProductInput) (*DisableImportFindingsForProductOutput, error) { - req, out := c.DisableImportFindingsForProductRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DescribeActionTargets +func (c *SecurityHub) DescribeActionTargets(input *DescribeActionTargetsInput) (*DescribeActionTargetsOutput, error) { + req, out := c.DescribeActionTargetsRequest(input) return out, req.Send() } -// DisableImportFindingsForProductWithContext is the same as DisableImportFindingsForProduct with the addition of +// DescribeActionTargetsWithContext is the same as DescribeActionTargets with the addition of // the ability to pass a context and additional request options. // -// See DisableImportFindingsForProduct for details on how to use this API operation. +// See DescribeActionTargets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) DisableImportFindingsForProductWithContext(ctx aws.Context, input *DisableImportFindingsForProductInput, opts ...request.Option) (*DisableImportFindingsForProductOutput, error) { - req, out := c.DisableImportFindingsForProductRequest(input) +func (c *SecurityHub) DescribeActionTargetsWithContext(ctx aws.Context, input *DescribeActionTargetsInput, opts ...request.Option) (*DescribeActionTargetsOutput, error) { + req, out := c.DescribeActionTargetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDisableSecurityHub = "DisableSecurityHub" +// DescribeActionTargetsPages iterates over the pages of a DescribeActionTargets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeActionTargets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeActionTargets operation. +// pageNum := 0 +// err := client.DescribeActionTargetsPages(params, +// func(page *securityhub.DescribeActionTargetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SecurityHub) DescribeActionTargetsPages(input *DescribeActionTargetsInput, fn func(*DescribeActionTargetsOutput, bool) bool) error { + return c.DescribeActionTargetsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// DisableSecurityHubRequest generates a "aws/request.Request" representing the -// client's request for the DisableSecurityHub operation. The "output" return +// DescribeActionTargetsPagesWithContext same as DescribeActionTargetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) DescribeActionTargetsPagesWithContext(ctx aws.Context, input *DescribeActionTargetsInput, fn func(*DescribeActionTargetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeActionTargetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeActionTargetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeActionTargetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeHub = "DescribeHub" + +// DescribeHubRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHub operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DisableSecurityHub for more information on using the DisableSecurityHub +// See DescribeHub for more information on using the DescribeHub // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DisableSecurityHubRequest method. -// req, resp := client.DisableSecurityHubRequest(params) +// // Example sending a request using the DescribeHubRequest method. +// req, resp := client.DescribeHubRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableSecurityHub -func (c *SecurityHub) DisableSecurityHubRequest(input *DisableSecurityHubInput) (req *request.Request, output *DisableSecurityHubOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DescribeHub +func (c *SecurityHub) DescribeHubRequest(input *DescribeHubInput) (req *request.Request, output *DescribeHubOutput) { op := &request.Operation{ - Name: opDisableSecurityHub, - HTTPMethod: "DELETE", + Name: opDescribeHub, + HTTPMethod: "GET", HTTPPath: "/accounts", } if input == nil { - input = &DisableSecurityHubInput{} + input = &DescribeHubInput{} } - output = &DisableSecurityHubOutput{} + output = &DescribeHubOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DisableSecurityHub API operation for AWS SecurityHub. +// DescribeHub API operation for AWS SecurityHub. // -// Disables the AWS Security Hub Service. +// Returns details about the Hub resource in your account, including the HubArn +// and the time when you enabled Security Hub. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation DisableSecurityHub for usage and error information. +// API operation DescribeHub for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" @@ -1101,770 +1354,884 @@ func (c *SecurityHub) DisableSecurityHubRequest(input *DisableSecurityHubInput) // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableSecurityHub -func (c *SecurityHub) DisableSecurityHub(input *DisableSecurityHubInput) (*DisableSecurityHubOutput, error) { - req, out := c.DisableSecurityHubRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DescribeHub +func (c *SecurityHub) DescribeHub(input *DescribeHubInput) (*DescribeHubOutput, error) { + req, out := c.DescribeHubRequest(input) return out, req.Send() } -// DisableSecurityHubWithContext is the same as DisableSecurityHub with the addition of +// DescribeHubWithContext is the same as DescribeHub with the addition of // the ability to pass a context and additional request options. // -// See DisableSecurityHub for details on how to use this API operation. +// See DescribeHub for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) DisableSecurityHubWithContext(ctx aws.Context, input *DisableSecurityHubInput, opts ...request.Option) (*DisableSecurityHubOutput, error) { - req, out := c.DisableSecurityHubRequest(input) +func (c *SecurityHub) DescribeHubWithContext(ctx aws.Context, input *DescribeHubInput, opts ...request.Option) (*DescribeHubOutput, error) { + req, out := c.DescribeHubRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDisassociateFromMasterAccount = "DisassociateFromMasterAccount" +const opDescribeProducts = "DescribeProducts" -// DisassociateFromMasterAccountRequest generates a "aws/request.Request" representing the -// client's request for the DisassociateFromMasterAccount operation. The "output" return +// DescribeProductsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeProducts operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DisassociateFromMasterAccount for more information on using the DisassociateFromMasterAccount +// See DescribeProducts for more information on using the DescribeProducts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DisassociateFromMasterAccountRequest method. -// req, resp := client.DisassociateFromMasterAccountRequest(params) +// // Example sending a request using the DescribeProductsRequest method. +// req, resp := client.DescribeProductsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateFromMasterAccount -func (c *SecurityHub) DisassociateFromMasterAccountRequest(input *DisassociateFromMasterAccountInput) (req *request.Request, output *DisassociateFromMasterAccountOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DescribeProducts +func (c *SecurityHub) DescribeProductsRequest(input *DescribeProductsInput) (req *request.Request, output *DescribeProductsOutput) { op := &request.Operation{ - Name: opDisassociateFromMasterAccount, - HTTPMethod: "POST", - HTTPPath: "/master/disassociate", + Name: opDescribeProducts, + HTTPMethod: "GET", + HTTPPath: "/products", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &DisassociateFromMasterAccountInput{} + input = &DescribeProductsInput{} } - output = &DisassociateFromMasterAccountOutput{} + output = &DescribeProductsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DisassociateFromMasterAccount API operation for AWS SecurityHub. +// DescribeProducts API operation for AWS SecurityHub. // -// Disassociates the current Security Hub member account from its master account. +// Returns information about the products available that you can subscribe to +// and integrate with Security Hub to consolidate findings. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation DisassociateFromMasterAccount for usage and error information. +// API operation DescribeProducts for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // -// * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. -// -// * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. -// // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateFromMasterAccount -func (c *SecurityHub) DisassociateFromMasterAccount(input *DisassociateFromMasterAccountInput) (*DisassociateFromMasterAccountOutput, error) { - req, out := c.DisassociateFromMasterAccountRequest(input) +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DescribeProducts +func (c *SecurityHub) DescribeProducts(input *DescribeProductsInput) (*DescribeProductsOutput, error) { + req, out := c.DescribeProductsRequest(input) return out, req.Send() } -// DisassociateFromMasterAccountWithContext is the same as DisassociateFromMasterAccount with the addition of +// DescribeProductsWithContext is the same as DescribeProducts with the addition of // the ability to pass a context and additional request options. // -// See DisassociateFromMasterAccount for details on how to use this API operation. +// See DescribeProducts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) DisassociateFromMasterAccountWithContext(ctx aws.Context, input *DisassociateFromMasterAccountInput, opts ...request.Option) (*DisassociateFromMasterAccountOutput, error) { - req, out := c.DisassociateFromMasterAccountRequest(input) +func (c *SecurityHub) DescribeProductsWithContext(ctx aws.Context, input *DescribeProductsInput, opts ...request.Option) (*DescribeProductsOutput, error) { + req, out := c.DescribeProductsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDisassociateMembers = "DisassociateMembers" +// DescribeProductsPages iterates over the pages of a DescribeProducts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeProducts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeProducts operation. +// pageNum := 0 +// err := client.DescribeProductsPages(params, +// func(page *securityhub.DescribeProductsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SecurityHub) DescribeProductsPages(input *DescribeProductsInput, fn func(*DescribeProductsOutput, bool) bool) error { + return c.DescribeProductsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// DisassociateMembersRequest generates a "aws/request.Request" representing the -// client's request for the DisassociateMembers operation. The "output" return +// DescribeProductsPagesWithContext same as DescribeProductsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) DescribeProductsPagesWithContext(ctx aws.Context, input *DescribeProductsInput, fn func(*DescribeProductsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeProductsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeProductsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeProductsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDisableImportFindingsForProduct = "DisableImportFindingsForProduct" + +// DisableImportFindingsForProductRequest generates a "aws/request.Request" representing the +// client's request for the DisableImportFindingsForProduct operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DisassociateMembers for more information on using the DisassociateMembers +// See DisableImportFindingsForProduct for more information on using the DisableImportFindingsForProduct // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DisassociateMembersRequest method. -// req, resp := client.DisassociateMembersRequest(params) +// // Example sending a request using the DisableImportFindingsForProductRequest method. +// req, resp := client.DisableImportFindingsForProductRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateMembers -func (c *SecurityHub) DisassociateMembersRequest(input *DisassociateMembersInput) (req *request.Request, output *DisassociateMembersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableImportFindingsForProduct +func (c *SecurityHub) DisableImportFindingsForProductRequest(input *DisableImportFindingsForProductInput) (req *request.Request, output *DisableImportFindingsForProductOutput) { op := &request.Operation{ - Name: opDisassociateMembers, - HTTPMethod: "POST", - HTTPPath: "/members/disassociate", + Name: opDisableImportFindingsForProduct, + HTTPMethod: "DELETE", + HTTPPath: "/productSubscriptions/{ProductSubscriptionArn+}", } if input == nil { - input = &DisassociateMembersInput{} + input = &DisableImportFindingsForProductInput{} } - output = &DisassociateMembersOutput{} + output = &DisableImportFindingsForProductOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DisassociateMembers API operation for AWS SecurityHub. +// DisableImportFindingsForProduct API operation for AWS SecurityHub. // -// Disassociates the Security Hub member accounts that are specified by the -// account IDs from their master account. +// Disables the integration of the specified product with Security Hub. Findings +// from that product are no longer sent to Security Hub after the integration +// is disabled. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation DisassociateMembers for usage and error information. +// API operation DisableImportFindingsForProduct for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateMembers -func (c *SecurityHub) DisassociateMembers(input *DisassociateMembersInput) (*DisassociateMembersOutput, error) { - req, out := c.DisassociateMembersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableImportFindingsForProduct +func (c *SecurityHub) DisableImportFindingsForProduct(input *DisableImportFindingsForProductInput) (*DisableImportFindingsForProductOutput, error) { + req, out := c.DisableImportFindingsForProductRequest(input) return out, req.Send() } -// DisassociateMembersWithContext is the same as DisassociateMembers with the addition of +// DisableImportFindingsForProductWithContext is the same as DisableImportFindingsForProduct with the addition of // the ability to pass a context and additional request options. // -// See DisassociateMembers for details on how to use this API operation. +// See DisableImportFindingsForProduct for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) DisassociateMembersWithContext(ctx aws.Context, input *DisassociateMembersInput, opts ...request.Option) (*DisassociateMembersOutput, error) { - req, out := c.DisassociateMembersRequest(input) +func (c *SecurityHub) DisableImportFindingsForProductWithContext(ctx aws.Context, input *DisableImportFindingsForProductInput, opts ...request.Option) (*DisableImportFindingsForProductOutput, error) { + req, out := c.DisableImportFindingsForProductRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opEnableImportFindingsForProduct = "EnableImportFindingsForProduct" +const opDisableSecurityHub = "DisableSecurityHub" -// EnableImportFindingsForProductRequest generates a "aws/request.Request" representing the -// client's request for the EnableImportFindingsForProduct operation. The "output" return +// DisableSecurityHubRequest generates a "aws/request.Request" representing the +// client's request for the DisableSecurityHub operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See EnableImportFindingsForProduct for more information on using the EnableImportFindingsForProduct +// See DisableSecurityHub for more information on using the DisableSecurityHub // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the EnableImportFindingsForProductRequest method. -// req, resp := client.EnableImportFindingsForProductRequest(params) +// // Example sending a request using the DisableSecurityHubRequest method. +// req, resp := client.DisableSecurityHubRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableImportFindingsForProduct -func (c *SecurityHub) EnableImportFindingsForProductRequest(input *EnableImportFindingsForProductInput) (req *request.Request, output *EnableImportFindingsForProductOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableSecurityHub +func (c *SecurityHub) DisableSecurityHubRequest(input *DisableSecurityHubInput) (req *request.Request, output *DisableSecurityHubOutput) { op := &request.Operation{ - Name: opEnableImportFindingsForProduct, - HTTPMethod: "POST", - HTTPPath: "/productSubscriptions", + Name: opDisableSecurityHub, + HTTPMethod: "DELETE", + HTTPPath: "/accounts", } if input == nil { - input = &EnableImportFindingsForProductInput{} + input = &DisableSecurityHubInput{} } - output = &EnableImportFindingsForProductOutput{} + output = &DisableSecurityHubOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// EnableImportFindingsForProduct API operation for AWS SecurityHub. +// DisableSecurityHub API operation for AWS SecurityHub. +// +// Disables Security Hub in your account only in the current Region. To disable +// Security Hub in all Regions, you must submit one request per Region where +// you have enabled Security Hub. When you disable Security Hub for a master +// account, it doesn't disable Security Hub for any associated member accounts. // -// Sets up the subscription that enables a findings-generating solution (product) -// to import its findings into Security Hub. +// When you disable Security Hub, your existing findings and insights and any +// Security Hub configuration settings are deleted after 90 days and can't be +// recovered. Any standards that were enabled are disabled, and your master +// and member account associations are removed. If you want to save your existing +// findings, you must export them before you disable Security Hub. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation EnableImportFindingsForProduct for usage and error information. +// API operation DisableSecurityHub for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // -// * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. -// -// * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. -// -// * ErrCodeResourceConflictException "ResourceConflictException" -// The resource specified in the request conflicts with an existing resource. -// // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableImportFindingsForProduct -func (c *SecurityHub) EnableImportFindingsForProduct(input *EnableImportFindingsForProductInput) (*EnableImportFindingsForProductOutput, error) { - req, out := c.EnableImportFindingsForProductRequest(input) +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableSecurityHub +func (c *SecurityHub) DisableSecurityHub(input *DisableSecurityHubInput) (*DisableSecurityHubOutput, error) { + req, out := c.DisableSecurityHubRequest(input) return out, req.Send() } -// EnableImportFindingsForProductWithContext is the same as EnableImportFindingsForProduct with the addition of +// DisableSecurityHubWithContext is the same as DisableSecurityHub with the addition of // the ability to pass a context and additional request options. // -// See EnableImportFindingsForProduct for details on how to use this API operation. +// See DisableSecurityHub for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) EnableImportFindingsForProductWithContext(ctx aws.Context, input *EnableImportFindingsForProductInput, opts ...request.Option) (*EnableImportFindingsForProductOutput, error) { - req, out := c.EnableImportFindingsForProductRequest(input) +func (c *SecurityHub) DisableSecurityHubWithContext(ctx aws.Context, input *DisableSecurityHubInput, opts ...request.Option) (*DisableSecurityHubOutput, error) { + req, out := c.DisableSecurityHubRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opEnableSecurityHub = "EnableSecurityHub" +const opDisassociateFromMasterAccount = "DisassociateFromMasterAccount" -// EnableSecurityHubRequest generates a "aws/request.Request" representing the -// client's request for the EnableSecurityHub operation. The "output" return +// DisassociateFromMasterAccountRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateFromMasterAccount operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See EnableSecurityHub for more information on using the EnableSecurityHub +// See DisassociateFromMasterAccount for more information on using the DisassociateFromMasterAccount // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the EnableSecurityHubRequest method. -// req, resp := client.EnableSecurityHubRequest(params) +// // Example sending a request using the DisassociateFromMasterAccountRequest method. +// req, resp := client.DisassociateFromMasterAccountRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableSecurityHub -func (c *SecurityHub) EnableSecurityHubRequest(input *EnableSecurityHubInput) (req *request.Request, output *EnableSecurityHubOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateFromMasterAccount +func (c *SecurityHub) DisassociateFromMasterAccountRequest(input *DisassociateFromMasterAccountInput) (req *request.Request, output *DisassociateFromMasterAccountOutput) { op := &request.Operation{ - Name: opEnableSecurityHub, + Name: opDisassociateFromMasterAccount, HTTPMethod: "POST", - HTTPPath: "/accounts", + HTTPPath: "/master/disassociate", } if input == nil { - input = &EnableSecurityHubInput{} + input = &DisassociateFromMasterAccountInput{} } - output = &EnableSecurityHubOutput{} + output = &DisassociateFromMasterAccountOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// EnableSecurityHub API operation for AWS SecurityHub. +// DisassociateFromMasterAccount API operation for AWS SecurityHub. // -// Enables the AWS Security Hub service. +// Disassociates the current Security Hub member account from the associated +// master account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation EnableSecurityHub for usage and error information. +// API operation DisassociateFromMasterAccount for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // -// * ErrCodeLimitExceededException "LimitExceededException" -// The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error code describes the limit exceeded. +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // -// * ErrCodeResourceConflictException "ResourceConflictException" -// The resource specified in the request conflicts with an existing resource. +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. // -// * ErrCodeAccessDeniedException "AccessDeniedException" -// You do not have permission to to perform the action specified in the request. +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableSecurityHub -func (c *SecurityHub) EnableSecurityHub(input *EnableSecurityHubInput) (*EnableSecurityHubOutput, error) { - req, out := c.EnableSecurityHubRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateFromMasterAccount +func (c *SecurityHub) DisassociateFromMasterAccount(input *DisassociateFromMasterAccountInput) (*DisassociateFromMasterAccountOutput, error) { + req, out := c.DisassociateFromMasterAccountRequest(input) return out, req.Send() } -// EnableSecurityHubWithContext is the same as EnableSecurityHub with the addition of +// DisassociateFromMasterAccountWithContext is the same as DisassociateFromMasterAccount with the addition of // the ability to pass a context and additional request options. // -// See EnableSecurityHub for details on how to use this API operation. +// See DisassociateFromMasterAccount for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) EnableSecurityHubWithContext(ctx aws.Context, input *EnableSecurityHubInput, opts ...request.Option) (*EnableSecurityHubOutput, error) { - req, out := c.EnableSecurityHubRequest(input) +func (c *SecurityHub) DisassociateFromMasterAccountWithContext(ctx aws.Context, input *DisassociateFromMasterAccountInput, opts ...request.Option) (*DisassociateFromMasterAccountOutput, error) { + req, out := c.DisassociateFromMasterAccountRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetEnabledStandards = "GetEnabledStandards" +const opDisassociateMembers = "DisassociateMembers" -// GetEnabledStandardsRequest generates a "aws/request.Request" representing the -// client's request for the GetEnabledStandards operation. The "output" return +// DisassociateMembersRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateMembers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetEnabledStandards for more information on using the GetEnabledStandards +// See DisassociateMembers for more information on using the DisassociateMembers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetEnabledStandardsRequest method. -// req, resp := client.GetEnabledStandardsRequest(params) +// // Example sending a request using the DisassociateMembersRequest method. +// req, resp := client.DisassociateMembersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetEnabledStandards -func (c *SecurityHub) GetEnabledStandardsRequest(input *GetEnabledStandardsInput) (req *request.Request, output *GetEnabledStandardsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateMembers +func (c *SecurityHub) DisassociateMembersRequest(input *DisassociateMembersInput) (req *request.Request, output *DisassociateMembersOutput) { op := &request.Operation{ - Name: opGetEnabledStandards, + Name: opDisassociateMembers, HTTPMethod: "POST", - HTTPPath: "/standards/get", + HTTPPath: "/members/disassociate", } if input == nil { - input = &GetEnabledStandardsInput{} + input = &DisassociateMembersInput{} } - output = &GetEnabledStandardsOutput{} + output = &DisassociateMembersOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetEnabledStandards API operation for AWS SecurityHub. +// DisassociateMembers API operation for AWS SecurityHub. // -// Lists and describes enabled standards. +// Disassociates the specified member accounts from the associated master account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation GetEnabledStandards for usage and error information. +// API operation DisassociateMembers for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetEnabledStandards -func (c *SecurityHub) GetEnabledStandards(input *GetEnabledStandardsInput) (*GetEnabledStandardsOutput, error) { - req, out := c.GetEnabledStandardsRequest(input) +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateMembers +func (c *SecurityHub) DisassociateMembers(input *DisassociateMembersInput) (*DisassociateMembersOutput, error) { + req, out := c.DisassociateMembersRequest(input) return out, req.Send() } -// GetEnabledStandardsWithContext is the same as GetEnabledStandards with the addition of +// DisassociateMembersWithContext is the same as DisassociateMembers with the addition of // the ability to pass a context and additional request options. // -// See GetEnabledStandards for details on how to use this API operation. +// See DisassociateMembers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) GetEnabledStandardsWithContext(ctx aws.Context, input *GetEnabledStandardsInput, opts ...request.Option) (*GetEnabledStandardsOutput, error) { - req, out := c.GetEnabledStandardsRequest(input) +func (c *SecurityHub) DisassociateMembersWithContext(ctx aws.Context, input *DisassociateMembersInput, opts ...request.Option) (*DisassociateMembersOutput, error) { + req, out := c.DisassociateMembersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetFindings = "GetFindings" +const opEnableImportFindingsForProduct = "EnableImportFindingsForProduct" -// GetFindingsRequest generates a "aws/request.Request" representing the -// client's request for the GetFindings operation. The "output" return +// EnableImportFindingsForProductRequest generates a "aws/request.Request" representing the +// client's request for the EnableImportFindingsForProduct operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetFindings for more information on using the GetFindings +// See EnableImportFindingsForProduct for more information on using the EnableImportFindingsForProduct // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetFindingsRequest method. -// req, resp := client.GetFindingsRequest(params) +// // Example sending a request using the EnableImportFindingsForProductRequest method. +// req, resp := client.EnableImportFindingsForProductRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetFindings -func (c *SecurityHub) GetFindingsRequest(input *GetFindingsInput) (req *request.Request, output *GetFindingsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableImportFindingsForProduct +func (c *SecurityHub) EnableImportFindingsForProductRequest(input *EnableImportFindingsForProductInput) (req *request.Request, output *EnableImportFindingsForProductOutput) { op := &request.Operation{ - Name: opGetFindings, + Name: opEnableImportFindingsForProduct, HTTPMethod: "POST", - HTTPPath: "/findings", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/productSubscriptions", } if input == nil { - input = &GetFindingsInput{} + input = &EnableImportFindingsForProductInput{} } - output = &GetFindingsOutput{} + output = &EnableImportFindingsForProductOutput{} req = c.newRequest(op, input, output) return } -// GetFindings API operation for AWS SecurityHub. +// EnableImportFindingsForProduct API operation for AWS SecurityHub. // -// Lists and describes Security Hub-aggregated findings that are specified by -// filter attributes. +// Enables the integration of a partner product with Security Hub. Integrated +// products send findings to Security Hub. When you enable a product integration, +// a permission policy that grants permission for the product to send findings +// to Security Hub is applied. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation GetFindings for usage and error information. +// API operation EnableImportFindingsForProduct for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// The resource specified in the request conflicts with an existing resource. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetFindings -func (c *SecurityHub) GetFindings(input *GetFindingsInput) (*GetFindingsOutput, error) { - req, out := c.GetFindingsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableImportFindingsForProduct +func (c *SecurityHub) EnableImportFindingsForProduct(input *EnableImportFindingsForProductInput) (*EnableImportFindingsForProductOutput, error) { + req, out := c.EnableImportFindingsForProductRequest(input) return out, req.Send() } -// GetFindingsWithContext is the same as GetFindings with the addition of +// EnableImportFindingsForProductWithContext is the same as EnableImportFindingsForProduct with the addition of // the ability to pass a context and additional request options. // -// See GetFindings for details on how to use this API operation. +// See EnableImportFindingsForProduct for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) GetFindingsWithContext(ctx aws.Context, input *GetFindingsInput, opts ...request.Option) (*GetFindingsOutput, error) { - req, out := c.GetFindingsRequest(input) +func (c *SecurityHub) EnableImportFindingsForProductWithContext(ctx aws.Context, input *EnableImportFindingsForProductInput, opts ...request.Option) (*EnableImportFindingsForProductOutput, error) { + req, out := c.EnableImportFindingsForProductRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetFindingsPages iterates over the pages of a GetFindings operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opEnableSecurityHub = "EnableSecurityHub" + +// EnableSecurityHubRequest generates a "aws/request.Request" representing the +// client's request for the EnableSecurityHub operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See GetFindings method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See EnableSecurityHub for more information on using the EnableSecurityHub +// API call, and error handling. // -// // Example iterating over at most 3 pages of a GetFindings operation. -// pageNum := 0 -// err := client.GetFindingsPages(params, -// func(page *GetFindingsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *SecurityHub) GetFindingsPages(input *GetFindingsInput, fn func(*GetFindingsOutput, bool) bool) error { - return c.GetFindingsPagesWithContext(aws.BackgroundContext(), input, fn) +// +// // Example sending a request using the EnableSecurityHubRequest method. +// req, resp := client.EnableSecurityHubRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableSecurityHub +func (c *SecurityHub) EnableSecurityHubRequest(input *EnableSecurityHubInput) (req *request.Request, output *EnableSecurityHubOutput) { + op := &request.Operation{ + Name: opEnableSecurityHub, + HTTPMethod: "POST", + HTTPPath: "/accounts", + } + + if input == nil { + input = &EnableSecurityHubInput{} + } + + output = &EnableSecurityHubOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return } -// GetFindingsPagesWithContext same as GetFindingsPages except -// it takes a Context and allows setting request options on the pages. +// EnableSecurityHub API operation for AWS SecurityHub. +// +// Enables Security Hub for your account in the current Region or the Region +// you specify in the request. When you enable Security Hub, you grant to Security +// Hub the permissions necessary to gather findings from AWS Config, Amazon +// GuardDuty, Amazon Inspector, and Amazon Macie. To learn more, see Setting +// Up AWS Security Hub (https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-settingup.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation EnableSecurityHub for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// The resource specified in the request conflicts with an existing resource. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You don't have permission to perform the action specified in the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableSecurityHub +func (c *SecurityHub) EnableSecurityHub(input *EnableSecurityHubInput) (*EnableSecurityHubOutput, error) { + req, out := c.EnableSecurityHubRequest(input) + return out, req.Send() +} + +// EnableSecurityHubWithContext is the same as EnableSecurityHub with the addition of +// the ability to pass a context and additional request options. +// +// See EnableSecurityHub for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) GetFindingsPagesWithContext(ctx aws.Context, input *GetFindingsInput, fn func(*GetFindingsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetFindingsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetFindingsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetFindingsOutput), !p.HasNextPage()) - } - return p.Err() +func (c *SecurityHub) EnableSecurityHubWithContext(ctx aws.Context, input *EnableSecurityHubInput, opts ...request.Option) (*EnableSecurityHubOutput, error) { + req, out := c.EnableSecurityHubRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opGetInsightResults = "GetInsightResults" +const opGetEnabledStandards = "GetEnabledStandards" -// GetInsightResultsRequest generates a "aws/request.Request" representing the -// client's request for the GetInsightResults operation. The "output" return +// GetEnabledStandardsRequest generates a "aws/request.Request" representing the +// client's request for the GetEnabledStandards operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInsightResults for more information on using the GetInsightResults +// See GetEnabledStandards for more information on using the GetEnabledStandards // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInsightResultsRequest method. -// req, resp := client.GetInsightResultsRequest(params) +// // Example sending a request using the GetEnabledStandardsRequest method. +// req, resp := client.GetEnabledStandardsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsightResults -func (c *SecurityHub) GetInsightResultsRequest(input *GetInsightResultsInput) (req *request.Request, output *GetInsightResultsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetEnabledStandards +func (c *SecurityHub) GetEnabledStandardsRequest(input *GetEnabledStandardsInput) (req *request.Request, output *GetEnabledStandardsOutput) { op := &request.Operation{ - Name: opGetInsightResults, - HTTPMethod: "GET", - HTTPPath: "/insights/results/{InsightArn+}", + Name: opGetEnabledStandards, + HTTPMethod: "POST", + HTTPPath: "/standards/get", } if input == nil { - input = &GetInsightResultsInput{} + input = &GetEnabledStandardsInput{} } - output = &GetInsightResultsOutput{} + output = &GetEnabledStandardsOutput{} req = c.newRequest(op, input, output) return } -// GetInsightResults API operation for AWS SecurityHub. +// GetEnabledStandards API operation for AWS SecurityHub. // -// Lists the results of the Security Hub insight specified by the insight ARN. +// Returns a list of the standards that are currently enabled. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation GetInsightResults for usage and error information. +// API operation GetEnabledStandards for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsightResults -func (c *SecurityHub) GetInsightResults(input *GetInsightResultsInput) (*GetInsightResultsOutput, error) { - req, out := c.GetInsightResultsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetEnabledStandards +func (c *SecurityHub) GetEnabledStandards(input *GetEnabledStandardsInput) (*GetEnabledStandardsOutput, error) { + req, out := c.GetEnabledStandardsRequest(input) return out, req.Send() } -// GetInsightResultsWithContext is the same as GetInsightResults with the addition of +// GetEnabledStandardsWithContext is the same as GetEnabledStandards with the addition of // the ability to pass a context and additional request options. // -// See GetInsightResults for details on how to use this API operation. +// See GetEnabledStandards for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) GetInsightResultsWithContext(ctx aws.Context, input *GetInsightResultsInput, opts ...request.Option) (*GetInsightResultsOutput, error) { - req, out := c.GetInsightResultsRequest(input) +func (c *SecurityHub) GetEnabledStandardsWithContext(ctx aws.Context, input *GetEnabledStandardsInput, opts ...request.Option) (*GetEnabledStandardsOutput, error) { + req, out := c.GetEnabledStandardsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInsights = "GetInsights" +const opGetFindings = "GetFindings" -// GetInsightsRequest generates a "aws/request.Request" representing the -// client's request for the GetInsights operation. The "output" return +// GetFindingsRequest generates a "aws/request.Request" representing the +// client's request for the GetFindings operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInsights for more information on using the GetInsights +// See GetFindings for more information on using the GetFindings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInsightsRequest method. -// req, resp := client.GetInsightsRequest(params) +// // Example sending a request using the GetFindingsRequest method. +// req, resp := client.GetFindingsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsights -func (c *SecurityHub) GetInsightsRequest(input *GetInsightsInput) (req *request.Request, output *GetInsightsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetFindings +func (c *SecurityHub) GetFindingsRequest(input *GetFindingsInput) (req *request.Request, output *GetFindingsOutput) { op := &request.Operation{ - Name: opGetInsights, + Name: opGetFindings, HTTPMethod: "POST", - HTTPPath: "/insights/get", + HTTPPath: "/findings", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -1874,77 +2241,318 @@ func (c *SecurityHub) GetInsightsRequest(input *GetInsightsInput) (req *request. } if input == nil { - input = &GetInsightsInput{} + input = &GetFindingsInput{} } - output = &GetInsightsOutput{} + output = &GetFindingsOutput{} req = c.newRequest(op, input, output) return } -// GetInsights API operation for AWS SecurityHub. +// GetFindings API operation for AWS SecurityHub. // -// Lists and describes insights that are specified by insight ARNs. +// Returns a list of findings that match the specified criteria. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation GetInsights for usage and error information. +// API operation GetFindings for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsights -func (c *SecurityHub) GetInsights(input *GetInsightsInput) (*GetInsightsOutput, error) { - req, out := c.GetInsightsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetFindings +func (c *SecurityHub) GetFindings(input *GetFindingsInput) (*GetFindingsOutput, error) { + req, out := c.GetFindingsRequest(input) return out, req.Send() } -// GetInsightsWithContext is the same as GetInsights with the addition of +// GetFindingsWithContext is the same as GetFindings with the addition of // the ability to pass a context and additional request options. // -// See GetInsights for details on how to use this API operation. +// See GetFindings for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) GetInsightsWithContext(ctx aws.Context, input *GetInsightsInput, opts ...request.Option) (*GetInsightsOutput, error) { - req, out := c.GetInsightsRequest(input) +func (c *SecurityHub) GetFindingsWithContext(ctx aws.Context, input *GetFindingsInput, opts ...request.Option) (*GetFindingsOutput, error) { + req, out := c.GetFindingsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetInsightsPages iterates over the pages of a GetInsights operation, +// GetFindingsPages iterates over the pages of a GetFindings operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See GetInsights method for more information on how to use this operation. +// See GetFindings method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a GetInsights operation. +// // Example iterating over at most 3 pages of a GetFindings operation. +// pageNum := 0 +// err := client.GetFindingsPages(params, +// func(page *securityhub.GetFindingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SecurityHub) GetFindingsPages(input *GetFindingsInput, fn func(*GetFindingsOutput, bool) bool) error { + return c.GetFindingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetFindingsPagesWithContext same as GetFindingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) GetFindingsPagesWithContext(ctx aws.Context, input *GetFindingsInput, fn func(*GetFindingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetFindingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetFindingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetFindingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetInsightResults = "GetInsightResults" + +// GetInsightResultsRequest generates a "aws/request.Request" representing the +// client's request for the GetInsightResults operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInsightResults for more information on using the GetInsightResults +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetInsightResultsRequest method. +// req, resp := client.GetInsightResultsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsightResults +func (c *SecurityHub) GetInsightResultsRequest(input *GetInsightResultsInput) (req *request.Request, output *GetInsightResultsOutput) { + op := &request.Operation{ + Name: opGetInsightResults, + HTTPMethod: "GET", + HTTPPath: "/insights/results/{InsightArn+}", + } + + if input == nil { + input = &GetInsightResultsInput{} + } + + output = &GetInsightResultsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInsightResults API operation for AWS SecurityHub. +// +// Lists the results of the Security Hub insight that the insight ARN specifies. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation GetInsightResults for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsightResults +func (c *SecurityHub) GetInsightResults(input *GetInsightResultsInput) (*GetInsightResultsOutput, error) { + req, out := c.GetInsightResultsRequest(input) + return out, req.Send() +} + +// GetInsightResultsWithContext is the same as GetInsightResults with the addition of +// the ability to pass a context and additional request options. +// +// See GetInsightResults for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) GetInsightResultsWithContext(ctx aws.Context, input *GetInsightResultsInput, opts ...request.Option) (*GetInsightResultsOutput, error) { + req, out := c.GetInsightResultsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetInsights = "GetInsights" + +// GetInsightsRequest generates a "aws/request.Request" representing the +// client's request for the GetInsights operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInsights for more information on using the GetInsights +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetInsightsRequest method. +// req, resp := client.GetInsightsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsights +func (c *SecurityHub) GetInsightsRequest(input *GetInsightsInput) (req *request.Request, output *GetInsightsOutput) { + op := &request.Operation{ + Name: opGetInsights, + HTTPMethod: "POST", + HTTPPath: "/insights/get", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetInsightsInput{} + } + + output = &GetInsightsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInsights API operation for AWS SecurityHub. +// +// Lists and describes insights that insight ARNs specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation GetInsights for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsights +func (c *SecurityHub) GetInsights(input *GetInsightsInput) (*GetInsightsOutput, error) { + req, out := c.GetInsightsRequest(input) + return out, req.Send() +} + +// GetInsightsWithContext is the same as GetInsights with the addition of +// the ability to pass a context and additional request options. +// +// See GetInsights for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) GetInsightsWithContext(ctx aws.Context, input *GetInsightsInput, opts ...request.Option) (*GetInsightsOutput, error) { + req, out := c.GetInsightsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetInsightsPages iterates over the pages of a GetInsights operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetInsights method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetInsights operation. // pageNum := 0 // err := client.GetInsightsPages(params, -// func(page *GetInsightsOutput, lastPage bool) bool { +// func(page *securityhub.GetInsightsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1976,10 +2584,12 @@ func (c *SecurityHub) GetInsightsPagesWithContext(ctx aws.Context, input *GetIns }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetInsightsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetInsightsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2042,642 +2652,999 @@ func (c *SecurityHub) GetInvitationsCountRequest(input *GetInvitationsCountInput // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInvitationsCount -func (c *SecurityHub) GetInvitationsCount(input *GetInvitationsCountInput) (*GetInvitationsCountOutput, error) { - req, out := c.GetInvitationsCountRequest(input) - return out, req.Send() +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInvitationsCount +func (c *SecurityHub) GetInvitationsCount(input *GetInvitationsCountInput) (*GetInvitationsCountOutput, error) { + req, out := c.GetInvitationsCountRequest(input) + return out, req.Send() +} + +// GetInvitationsCountWithContext is the same as GetInvitationsCount with the addition of +// the ability to pass a context and additional request options. +// +// See GetInvitationsCount for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) GetInvitationsCountWithContext(ctx aws.Context, input *GetInvitationsCountInput, opts ...request.Option) (*GetInvitationsCountOutput, error) { + req, out := c.GetInvitationsCountRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMasterAccount = "GetMasterAccount" + +// GetMasterAccountRequest generates a "aws/request.Request" representing the +// client's request for the GetMasterAccount operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMasterAccount for more information on using the GetMasterAccount +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetMasterAccountRequest method. +// req, resp := client.GetMasterAccountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMasterAccount +func (c *SecurityHub) GetMasterAccountRequest(input *GetMasterAccountInput) (req *request.Request, output *GetMasterAccountOutput) { + op := &request.Operation{ + Name: opGetMasterAccount, + HTTPMethod: "GET", + HTTPPath: "/master", + } + + if input == nil { + input = &GetMasterAccountInput{} + } + + output = &GetMasterAccountOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMasterAccount API operation for AWS SecurityHub. +// +// Provides the details for the Security Hub master account to the current member +// account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation GetMasterAccount for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMasterAccount +func (c *SecurityHub) GetMasterAccount(input *GetMasterAccountInput) (*GetMasterAccountOutput, error) { + req, out := c.GetMasterAccountRequest(input) + return out, req.Send() +} + +// GetMasterAccountWithContext is the same as GetMasterAccount with the addition of +// the ability to pass a context and additional request options. +// +// See GetMasterAccount for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) GetMasterAccountWithContext(ctx aws.Context, input *GetMasterAccountInput, opts ...request.Option) (*GetMasterAccountOutput, error) { + req, out := c.GetMasterAccountRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMembers = "GetMembers" + +// GetMembersRequest generates a "aws/request.Request" representing the +// client's request for the GetMembers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMembers for more information on using the GetMembers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetMembersRequest method. +// req, resp := client.GetMembersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMembers +func (c *SecurityHub) GetMembersRequest(input *GetMembersInput) (req *request.Request, output *GetMembersOutput) { + op := &request.Operation{ + Name: opGetMembers, + HTTPMethod: "POST", + HTTPPath: "/members/get", + } + + if input == nil { + input = &GetMembersInput{} + } + + output = &GetMembersOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMembers API operation for AWS SecurityHub. +// +// Returns the details on the Security Hub member accounts that the account +// IDs specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation GetMembers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMembers +func (c *SecurityHub) GetMembers(input *GetMembersInput) (*GetMembersOutput, error) { + req, out := c.GetMembersRequest(input) + return out, req.Send() +} + +// GetMembersWithContext is the same as GetMembers with the addition of +// the ability to pass a context and additional request options. +// +// See GetMembers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) GetMembersWithContext(ctx aws.Context, input *GetMembersInput, opts ...request.Option) (*GetMembersOutput, error) { + req, out := c.GetMembersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opInviteMembers = "InviteMembers" + +// InviteMembersRequest generates a "aws/request.Request" representing the +// client's request for the InviteMembers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See InviteMembers for more information on using the InviteMembers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the InviteMembersRequest method. +// req, resp := client.InviteMembersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/InviteMembers +func (c *SecurityHub) InviteMembersRequest(input *InviteMembersInput) (req *request.Request, output *InviteMembersOutput) { + op := &request.Operation{ + Name: opInviteMembers, + HTTPMethod: "POST", + HTTPPath: "/members/invite", + } + + if input == nil { + input = &InviteMembersInput{} + } + + output = &InviteMembersOutput{} + req = c.newRequest(op, input, output) + return +} + +// InviteMembers API operation for AWS SecurityHub. +// +// Invites other AWS accounts to become member accounts for the Security Hub +// master account that the invitation is sent from. Before you can use this +// action to invite a member, you must first create the member account in Security +// Hub by using the CreateMembers action. When the account owner accepts the +// invitation to become a member account and enables Security Hub, the master +// account can view the findings generated from member account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation InviteMembers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/InviteMembers +func (c *SecurityHub) InviteMembers(input *InviteMembersInput) (*InviteMembersOutput, error) { + req, out := c.InviteMembersRequest(input) + return out, req.Send() +} + +// InviteMembersWithContext is the same as InviteMembers with the addition of +// the ability to pass a context and additional request options. +// +// See InviteMembers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) InviteMembersWithContext(ctx aws.Context, input *InviteMembersInput, opts ...request.Option) (*InviteMembersOutput, error) { + req, out := c.InviteMembersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListEnabledProductsForImport = "ListEnabledProductsForImport" + +// ListEnabledProductsForImportRequest generates a "aws/request.Request" representing the +// client's request for the ListEnabledProductsForImport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEnabledProductsForImport for more information on using the ListEnabledProductsForImport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListEnabledProductsForImportRequest method. +// req, resp := client.ListEnabledProductsForImportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListEnabledProductsForImport +func (c *SecurityHub) ListEnabledProductsForImportRequest(input *ListEnabledProductsForImportInput) (req *request.Request, output *ListEnabledProductsForImportOutput) { + op := &request.Operation{ + Name: opListEnabledProductsForImport, + HTTPMethod: "GET", + HTTPPath: "/productSubscriptions", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEnabledProductsForImportInput{} + } + + output = &ListEnabledProductsForImportOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListEnabledProductsForImport API operation for AWS SecurityHub. +// +// Lists all findings-generating solutions (products) whose findings you have +// subscribed to receive in Security Hub. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation ListEnabledProductsForImport for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListEnabledProductsForImport +func (c *SecurityHub) ListEnabledProductsForImport(input *ListEnabledProductsForImportInput) (*ListEnabledProductsForImportOutput, error) { + req, out := c.ListEnabledProductsForImportRequest(input) + return out, req.Send() +} + +// ListEnabledProductsForImportWithContext is the same as ListEnabledProductsForImport with the addition of +// the ability to pass a context and additional request options. +// +// See ListEnabledProductsForImport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) ListEnabledProductsForImportWithContext(ctx aws.Context, input *ListEnabledProductsForImportInput, opts ...request.Option) (*ListEnabledProductsForImportOutput, error) { + req, out := c.ListEnabledProductsForImportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEnabledProductsForImportPages iterates over the pages of a ListEnabledProductsForImport operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEnabledProductsForImport method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEnabledProductsForImport operation. +// pageNum := 0 +// err := client.ListEnabledProductsForImportPages(params, +// func(page *securityhub.ListEnabledProductsForImportOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SecurityHub) ListEnabledProductsForImportPages(input *ListEnabledProductsForImportInput, fn func(*ListEnabledProductsForImportOutput, bool) bool) error { + return c.ListEnabledProductsForImportPagesWithContext(aws.BackgroundContext(), input, fn) } -// GetInvitationsCountWithContext is the same as GetInvitationsCount with the addition of -// the ability to pass a context and additional request options. -// -// See GetInvitationsCount for details on how to use this API operation. +// ListEnabledProductsForImportPagesWithContext same as ListEnabledProductsForImportPages except +// it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) GetInvitationsCountWithContext(ctx aws.Context, input *GetInvitationsCountInput, opts ...request.Option) (*GetInvitationsCountOutput, error) { - req, out := c.GetInvitationsCountRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() +func (c *SecurityHub) ListEnabledProductsForImportPagesWithContext(ctx aws.Context, input *ListEnabledProductsForImportInput, fn func(*ListEnabledProductsForImportOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEnabledProductsForImportInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEnabledProductsForImportRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListEnabledProductsForImportOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() } -const opGetMasterAccount = "GetMasterAccount" +const opListInvitations = "ListInvitations" -// GetMasterAccountRequest generates a "aws/request.Request" representing the -// client's request for the GetMasterAccount operation. The "output" return +// ListInvitationsRequest generates a "aws/request.Request" representing the +// client's request for the ListInvitations operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetMasterAccount for more information on using the GetMasterAccount +// See ListInvitations for more information on using the ListInvitations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetMasterAccountRequest method. -// req, resp := client.GetMasterAccountRequest(params) +// // Example sending a request using the ListInvitationsRequest method. +// req, resp := client.ListInvitationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMasterAccount -func (c *SecurityHub) GetMasterAccountRequest(input *GetMasterAccountInput) (req *request.Request, output *GetMasterAccountOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListInvitations +func (c *SecurityHub) ListInvitationsRequest(input *ListInvitationsInput) (req *request.Request, output *ListInvitationsOutput) { op := &request.Operation{ - Name: opGetMasterAccount, + Name: opListInvitations, HTTPMethod: "GET", - HTTPPath: "/master", + HTTPPath: "/invitations", } if input == nil { - input = &GetMasterAccountInput{} + input = &ListInvitationsInput{} } - output = &GetMasterAccountOutput{} + output = &ListInvitationsOutput{} req = c.newRequest(op, input, output) return } -// GetMasterAccount API operation for AWS SecurityHub. +// ListInvitations API operation for AWS SecurityHub. // -// Provides the details for the Security Hub master account to the current member -// account. +// Lists all Security Hub membership invitations that were sent to the current +// AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation GetMasterAccount for usage and error information. +// API operation ListInvitations for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMasterAccount -func (c *SecurityHub) GetMasterAccount(input *GetMasterAccountInput) (*GetMasterAccountOutput, error) { - req, out := c.GetMasterAccountRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListInvitations +func (c *SecurityHub) ListInvitations(input *ListInvitationsInput) (*ListInvitationsOutput, error) { + req, out := c.ListInvitationsRequest(input) return out, req.Send() } -// GetMasterAccountWithContext is the same as GetMasterAccount with the addition of +// ListInvitationsWithContext is the same as ListInvitations with the addition of // the ability to pass a context and additional request options. // -// See GetMasterAccount for details on how to use this API operation. +// See ListInvitations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) GetMasterAccountWithContext(ctx aws.Context, input *GetMasterAccountInput, opts ...request.Option) (*GetMasterAccountOutput, error) { - req, out := c.GetMasterAccountRequest(input) +func (c *SecurityHub) ListInvitationsWithContext(ctx aws.Context, input *ListInvitationsInput, opts ...request.Option) (*ListInvitationsOutput, error) { + req, out := c.ListInvitationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetMembers = "GetMembers" +const opListMembers = "ListMembers" -// GetMembersRequest generates a "aws/request.Request" representing the -// client's request for the GetMembers operation. The "output" return +// ListMembersRequest generates a "aws/request.Request" representing the +// client's request for the ListMembers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetMembers for more information on using the GetMembers +// See ListMembers for more information on using the ListMembers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetMembersRequest method. -// req, resp := client.GetMembersRequest(params) +// // Example sending a request using the ListMembersRequest method. +// req, resp := client.ListMembersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMembers -func (c *SecurityHub) GetMembersRequest(input *GetMembersInput) (req *request.Request, output *GetMembersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListMembers +func (c *SecurityHub) ListMembersRequest(input *ListMembersInput) (req *request.Request, output *ListMembersOutput) { op := &request.Operation{ - Name: opGetMembers, - HTTPMethod: "POST", - HTTPPath: "/members/get", + Name: opListMembers, + HTTPMethod: "GET", + HTTPPath: "/members", } if input == nil { - input = &GetMembersInput{} + input = &ListMembersInput{} } - output = &GetMembersOutput{} + output = &ListMembersOutput{} req = c.newRequest(op, input, output) return } -// GetMembers API operation for AWS SecurityHub. +// ListMembers API operation for AWS SecurityHub. // -// Returns the details on the Security Hub member accounts that are specified -// by the account IDs. +// Lists details about all member accounts for the current Security Hub master +// account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation GetMembers for usage and error information. +// API operation ListMembers for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // -// * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMembers -func (c *SecurityHub) GetMembers(input *GetMembersInput) (*GetMembersOutput, error) { - req, out := c.GetMembersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListMembers +func (c *SecurityHub) ListMembers(input *ListMembersInput) (*ListMembersOutput, error) { + req, out := c.ListMembersRequest(input) return out, req.Send() } -// GetMembersWithContext is the same as GetMembers with the addition of +// ListMembersWithContext is the same as ListMembers with the addition of // the ability to pass a context and additional request options. // -// See GetMembers for details on how to use this API operation. +// See ListMembers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) GetMembersWithContext(ctx aws.Context, input *GetMembersInput, opts ...request.Option) (*GetMembersOutput, error) { - req, out := c.GetMembersRequest(input) +func (c *SecurityHub) ListMembersWithContext(ctx aws.Context, input *ListMembersInput, opts ...request.Option) (*ListMembersOutput, error) { + req, out := c.ListMembersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opInviteMembers = "InviteMembers" +const opListTagsForResource = "ListTagsForResource" -// InviteMembersRequest generates a "aws/request.Request" representing the -// client's request for the InviteMembers operation. The "output" return +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See InviteMembers for more information on using the InviteMembers +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the InviteMembersRequest method. -// req, resp := client.InviteMembersRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/InviteMembers -func (c *SecurityHub) InviteMembersRequest(input *InviteMembersInput) (req *request.Request, output *InviteMembersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListTagsForResource +func (c *SecurityHub) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ - Name: opInviteMembers, - HTTPMethod: "POST", - HTTPPath: "/members/invite", + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{ResourceArn}", } if input == nil { - input = &InviteMembersInput{} + input = &ListTagsForResourceInput{} } - output = &InviteMembersOutput{} + output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) return } -// InviteMembers API operation for AWS SecurityHub. +// ListTagsForResource API operation for AWS SecurityHub. // -// Invites other AWS accounts to enable Security Hub and become Security Hub -// member accounts. When an account accepts the invitation and becomes a member -// account, the master account can view Security Hub findings of the member -// account. +// Returns a list of tags associated with a resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation InviteMembers for usage and error information. +// API operation ListTagsForResource for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. -// -// * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error code describes the limit exceeded. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/InviteMembers -func (c *SecurityHub) InviteMembers(input *InviteMembersInput) (*InviteMembersOutput, error) { - req, out := c.InviteMembersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListTagsForResource +func (c *SecurityHub) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } -// InviteMembersWithContext is the same as InviteMembers with the addition of +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of // the ability to pass a context and additional request options. // -// See InviteMembers for details on how to use this API operation. +// See ListTagsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) InviteMembersWithContext(ctx aws.Context, input *InviteMembersInput, opts ...request.Option) (*InviteMembersOutput, error) { - req, out := c.InviteMembersRequest(input) +func (c *SecurityHub) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListEnabledProductsForImport = "ListEnabledProductsForImport" +const opTagResource = "TagResource" -// ListEnabledProductsForImportRequest generates a "aws/request.Request" representing the -// client's request for the ListEnabledProductsForImport operation. The "output" return +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListEnabledProductsForImport for more information on using the ListEnabledProductsForImport +// See TagResource for more information on using the TagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListEnabledProductsForImportRequest method. -// req, resp := client.ListEnabledProductsForImportRequest(params) +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListEnabledProductsForImport -func (c *SecurityHub) ListEnabledProductsForImportRequest(input *ListEnabledProductsForImportInput) (req *request.Request, output *ListEnabledProductsForImportOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/TagResource +func (c *SecurityHub) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { op := &request.Operation{ - Name: opListEnabledProductsForImport, - HTTPMethod: "GET", - HTTPPath: "/productSubscriptions", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{ResourceArn}", } if input == nil { - input = &ListEnabledProductsForImportInput{} + input = &TagResourceInput{} } - output = &ListEnabledProductsForImportOutput{} + output = &TagResourceOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// ListEnabledProductsForImport API operation for AWS SecurityHub. +// TagResource API operation for AWS SecurityHub. // -// Lists all findings-generating solutions (products) whose findings you've -// subscribed to receive in Security Hub. +// Adds one or more tags to a resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation ListEnabledProductsForImport for usage and error information. +// API operation TagResource for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // -// * ErrCodeLimitExceededException "LimitExceededException" -// The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error code describes the limit exceeded. +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // -// * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListEnabledProductsForImport -func (c *SecurityHub) ListEnabledProductsForImport(input *ListEnabledProductsForImportInput) (*ListEnabledProductsForImportOutput, error) { - req, out := c.ListEnabledProductsForImportRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/TagResource +func (c *SecurityHub) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) return out, req.Send() } -// ListEnabledProductsForImportWithContext is the same as ListEnabledProductsForImport with the addition of +// TagResourceWithContext is the same as TagResource with the addition of // the ability to pass a context and additional request options. // -// See ListEnabledProductsForImport for details on how to use this API operation. +// See TagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) ListEnabledProductsForImportWithContext(ctx aws.Context, input *ListEnabledProductsForImportInput, opts ...request.Option) (*ListEnabledProductsForImportOutput, error) { - req, out := c.ListEnabledProductsForImportRequest(input) +func (c *SecurityHub) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListEnabledProductsForImportPages iterates over the pages of a ListEnabledProductsForImport operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListEnabledProductsForImport method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListEnabledProductsForImport operation. -// pageNum := 0 -// err := client.ListEnabledProductsForImportPages(params, -// func(page *ListEnabledProductsForImportOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SecurityHub) ListEnabledProductsForImportPages(input *ListEnabledProductsForImportInput, fn func(*ListEnabledProductsForImportOutput, bool) bool) error { - return c.ListEnabledProductsForImportPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListEnabledProductsForImportPagesWithContext same as ListEnabledProductsForImportPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SecurityHub) ListEnabledProductsForImportPagesWithContext(ctx aws.Context, input *ListEnabledProductsForImportInput, fn func(*ListEnabledProductsForImportOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListEnabledProductsForImportInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListEnabledProductsForImportRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEnabledProductsForImportOutput), !p.HasNextPage()) - } - return p.Err() + req.ApplyOptions(opts...) + return out, req.Send() } -const opListInvitations = "ListInvitations" +const opUntagResource = "UntagResource" -// ListInvitationsRequest generates a "aws/request.Request" representing the -// client's request for the ListInvitations operation. The "output" return +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListInvitations for more information on using the ListInvitations +// See UntagResource for more information on using the UntagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListInvitationsRequest method. -// req, resp := client.ListInvitationsRequest(params) +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListInvitations -func (c *SecurityHub) ListInvitationsRequest(input *ListInvitationsInput) (req *request.Request, output *ListInvitationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UntagResource +func (c *SecurityHub) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { op := &request.Operation{ - Name: opListInvitations, - HTTPMethod: "GET", - HTTPPath: "/invitations", + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{ResourceArn}", } if input == nil { - input = &ListInvitationsInput{} + input = &UntagResourceInput{} } - output = &ListInvitationsOutput{} + output = &UntagResourceOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// ListInvitations API operation for AWS SecurityHub. +// UntagResource API operation for AWS SecurityHub. // -// Lists all Security Hub membership invitations that were sent to the current -// AWS account. +// Removes one or more tags from a resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation ListInvitations for usage and error information. +// API operation UntagResource for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. -// -// * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // -// * ErrCodeLimitExceededException "LimitExceededException" -// The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error code describes the limit exceeded. +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListInvitations -func (c *SecurityHub) ListInvitations(input *ListInvitationsInput) (*ListInvitationsOutput, error) { - req, out := c.ListInvitationsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UntagResource +func (c *SecurityHub) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) return out, req.Send() } -// ListInvitationsWithContext is the same as ListInvitations with the addition of +// UntagResourceWithContext is the same as UntagResource with the addition of // the ability to pass a context and additional request options. // -// See ListInvitations for details on how to use this API operation. +// See UntagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) ListInvitationsWithContext(ctx aws.Context, input *ListInvitationsInput, opts ...request.Option) (*ListInvitationsOutput, error) { - req, out := c.ListInvitationsRequest(input) +func (c *SecurityHub) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListMembers = "ListMembers" +const opUpdateActionTarget = "UpdateActionTarget" -// ListMembersRequest generates a "aws/request.Request" representing the -// client's request for the ListMembers operation. The "output" return +// UpdateActionTargetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateActionTarget operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListMembers for more information on using the ListMembers +// See UpdateActionTarget for more information on using the UpdateActionTarget // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListMembersRequest method. -// req, resp := client.ListMembersRequest(params) +// // Example sending a request using the UpdateActionTargetRequest method. +// req, resp := client.UpdateActionTargetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListMembers -func (c *SecurityHub) ListMembersRequest(input *ListMembersInput) (req *request.Request, output *ListMembersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateActionTarget +func (c *SecurityHub) UpdateActionTargetRequest(input *UpdateActionTargetInput) (req *request.Request, output *UpdateActionTargetOutput) { op := &request.Operation{ - Name: opListMembers, - HTTPMethod: "GET", - HTTPPath: "/members", + Name: opUpdateActionTarget, + HTTPMethod: "PATCH", + HTTPPath: "/actionTargets/{ActionTargetArn+}", } if input == nil { - input = &ListMembersInput{} + input = &UpdateActionTargetInput{} } - output = &ListMembersOutput{} + output = &UpdateActionTargetOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// ListMembers API operation for AWS SecurityHub. +// UpdateActionTarget API operation for AWS SecurityHub. // -// Lists details about all member accounts for the current Security Hub master -// account. +// Updates the name and description of a custom action target in Security Hub. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS SecurityHub's -// API operation ListMembers for usage and error information. +// API operation UpdateActionTarget for usage and error information. // // Returned Error Codes: // * ErrCodeInternalException "InternalException" // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // -// * ErrCodeLimitExceededException "LimitExceededException" -// The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error code describes the limit exceeded. +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListMembers -func (c *SecurityHub) ListMembers(input *ListMembersInput) (*ListMembersOutput, error) { - req, out := c.ListMembersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateActionTarget +func (c *SecurityHub) UpdateActionTarget(input *UpdateActionTargetInput) (*UpdateActionTargetOutput, error) { + req, out := c.UpdateActionTargetRequest(input) return out, req.Send() } -// ListMembersWithContext is the same as ListMembers with the addition of +// UpdateActionTargetWithContext is the same as UpdateActionTarget with the addition of // the ability to pass a context and additional request options. // -// See ListMembers for details on how to use this API operation. +// See UpdateActionTarget for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *SecurityHub) ListMembersWithContext(ctx aws.Context, input *ListMembersInput, opts ...request.Option) (*ListMembersOutput, error) { - req, out := c.ListMembersRequest(input) +func (c *SecurityHub) UpdateActionTargetWithContext(ctx aws.Context, input *UpdateActionTargetInput, opts ...request.Option) (*UpdateActionTargetOutput, error) { + req, out := c.UpdateActionTargetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -2728,8 +3695,9 @@ func (c *SecurityHub) UpdateFindingsRequest(input *UpdateFindingsInput) (req *re // UpdateFindings API operation for AWS SecurityHub. // -// Updates the AWS Security Hub-aggregated findings specified by the filter -// attributes. +// Updates the Note and RecordState of the Security Hub-aggregated findings +// that the filter attributes specify. Any member account that can view the +// finding also sees the update to the finding. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2743,18 +3711,18 @@ func (c *SecurityHub) UpdateFindingsRequest(input *UpdateFindingsInput) (req *re // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateFindings func (c *SecurityHub) UpdateFindings(input *UpdateFindingsInput) (*UpdateFindingsOutput, error) { @@ -2823,7 +3791,7 @@ func (c *SecurityHub) UpdateInsightRequest(input *UpdateInsightInput) (req *requ // UpdateInsight API operation for AWS SecurityHub. // -// Updates the AWS Security Hub insight specified by the insight ARN. +// Updates the Security Hub insight that the insight ARN specifies. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2837,18 +3805,18 @@ func (c *SecurityHub) UpdateInsightRequest(input *UpdateInsightInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateInsight func (c *SecurityHub) UpdateInsight(input *UpdateInsightInput) (*UpdateInsightOutput, error) { @@ -2875,13 +3843,15 @@ func (c *SecurityHub) UpdateInsightWithContext(ctx aws.Context, input *UpdateIns type AcceptInvitationInput struct { _ struct{} `type:"structure"` - // The ID of the invitation that is sent to the AWS account by the Security - // Hub master account. - InvitationId *string `type:"string"` + // The ID of the invitation sent from the Security Hub master account. + // + // InvitationId is a required field + InvitationId *string `type:"string" required:"true"` - // The account ID of the master Security Hub account whose invitation you're - // accepting. - MasterId *string `type:"string"` + // The account ID of the Security Hub master account that sent the invitation. + // + // MasterId is a required field + MasterId *string `type:"string" required:"true"` } // String returns the string representation @@ -2894,6 +3864,22 @@ func (s AcceptInvitationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcceptInvitationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcceptInvitationInput"} + if s.InvitationId == nil { + invalidParams.Add(request.NewErrParamRequired("InvitationId")) + } + if s.MasterId == nil { + invalidParams.Add(request.NewErrParamRequired("MasterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetInvitationId sets the InvitationId field's value. func (s *AcceptInvitationInput) SetInvitationId(v string) *AcceptInvitationInput { s.InvitationId = &v @@ -2953,7 +3939,55 @@ func (s *AccountDetails) SetEmail(v string) *AccountDetails { return s } -// The details of an AWS EC2 instance. +// An ActionTarget object. +type ActionTarget struct { + _ struct{} `type:"structure"` + + // The ARN for the target action. + // + // ActionTargetArn is a required field + ActionTargetArn *string `type:"string" required:"true"` + + // The description of the target action. + // + // Description is a required field + Description *string `type:"string" required:"true"` + + // The name of the action target. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ActionTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionTarget) GoString() string { + return s.String() +} + +// SetActionTargetArn sets the ActionTargetArn field's value. +func (s *ActionTarget) SetActionTargetArn(v string) *ActionTarget { + s.ActionTargetArn = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ActionTarget) SetDescription(v string) *ActionTarget { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *ActionTarget) SetName(v string) *ActionTarget { + s.Name = &v + return s +} + +// The details of an Amazon EC2 instance. type AwsEc2InstanceDetails struct { _ struct{} `type:"structure"` @@ -2975,13 +4009,13 @@ type AwsEc2InstanceDetails struct { // The date/time the instance was launched. LaunchedAt *string `type:"string"` - // The identifier of the subnet in which the instance was launched. + // The identifier of the subnet that the instance was launched in. SubnetId *string `type:"string"` // The instance type of the instance. Type *string `type:"string"` - // The identifier of the VPC in which the instance was launched. + // The identifier of the VPC that the instance was launched in. VpcId *string `type:"string"` } @@ -3049,7 +4083,7 @@ func (s *AwsEc2InstanceDetails) SetVpcId(v string) *AwsEc2InstanceDetails { return s } -// AWS IAM access key details related to a finding. +// IAM access key details related to a finding. type AwsIamAccessKeyDetails struct { _ struct{} `type:"structure"` @@ -3091,7 +4125,7 @@ func (s *AwsIamAccessKeyDetails) SetUserName(v string) *AwsIamAccessKeyDetails { return s } -// The details of an AWS S3 Bucket. +// The details of an Amazon S3 bucket. type AwsS3BucketDetails struct { _ struct{} `type:"structure"` @@ -3129,51 +4163,52 @@ func (s *AwsS3BucketDetails) SetOwnerName(v string) *AwsS3BucketDetails { // AWS security services and third-party solutions, and compliance checks. // // A finding is a potential security issue generated either by AWS services -// (GuardDuty, Inspector, Macie) or by the integrated third-party solutions -// and compliance checks. +// (Amazon GuardDuty, Amazon Inspector, and Amazon Macie) or by the integrated +// third-party solutions and compliance checks. type AwsSecurityFinding struct { _ struct{} `type:"structure"` - // The AWS account ID in which a finding is generated. + // The AWS account ID that a finding is generated in. // // AwsAccountId is a required field AwsAccountId *string `type:"string" required:"true"` // This data type is exclusive to findings that are generated as the result // of a check run against a specific rule in a supported standard (for example, - // AWS CIS Foundations). Contains compliance-related finding details. + // CIS AWS Foundations). Contains compliance-related finding details. Compliance *Compliance `type:"structure"` // A finding's confidence. Confidence is defined as the likelihood that a finding // accurately identifies the behavior or issue that it was intended to identify. - // Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero - // percent confidence and 100 equates to 100 percent confidence. + // Confidence is scored on a 0-100 basis using a ratio scale, where 0 means + // zero percent confidence and 100 means 100 percent confidence. Confidence *int64 `type:"integer"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was created by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider created the potential security issue that a finding captured. // // CreatedAt is a required field CreatedAt *string `type:"string" required:"true"` // The level of importance assigned to the resources associated with the finding. - // A score of 0 means the underlying resources have no criticality, and a score - // of 100 is reserved for the most critical resources. + // A score of 0 means that the underlying resources have no criticality, and + // a score of 100 is reserved for the most critical resources. Criticality *int64 `type:"integer"` // A finding's description. // // In this release, Description is a required property. - Description *string `type:"string"` + // + // Description is a required field + Description *string `type:"string" required:"true"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was first observed by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider first observed the potential security issue that a finding captured. FirstObservedAt *string `type:"string"` - // This is the identifier for the solution-specific component (a discrete unit - // of logic) that generated a finding. In various security findings provider's - // solutions, this generator can be called a rule, a check, a detector, a plug-in, - // etc. + // The identifier for the solution-specific component (a discrete unit of logic) + // that generated a finding. In various security-findings providers' solutions, + // this generator can be called a rule, a check, a detector, a plug-in, etc. // // GeneratorId is a required field GeneratorId *string `type:"string" required:"true"` @@ -3183,9 +4218,9 @@ type AwsSecurityFinding struct { // Id is a required field Id *string `type:"string" required:"true"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was most recently observed by the security findings - // provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider most recently observed the potential security issue that a finding + // captured. LastObservedAt *string `type:"string"` // A list of malware related to a finding. @@ -3201,14 +4236,14 @@ type AwsSecurityFinding struct { Process *ProcessDetails `type:"structure"` // The ARN generated by Security Hub that uniquely identifies a third-party - // company (security findings provider) once this provider's product (solution + // company (security-findings provider) after this provider's product (solution // that generates findings) is registered with Security Hub. // // ProductArn is a required field ProductArn *string `type:"string" required:"true"` - // A data type where security findings providers can include additional solution-specific - // details that are not part of the defined AwsSecurityFinding format. + // A data type where security-findings providers can include additional solution-specific + // details that aren't part of the defined AwsSecurityFinding format. ProductFields map[string]*string `type:"map"` // The record state of a finding. @@ -3217,16 +4252,16 @@ type AwsSecurityFinding struct { // A list of related findings. RelatedFindings []*RelatedFinding `type:"list"` - // An data type that describes the remediation options for a finding. + // A data type that describes the remediation options for a finding. Remediation *Remediation `type:"structure"` - // A set of resource data types that describe the resources to which the finding - // refers. + // A set of resource data types that describe the resources that the finding + // refers to. // // Resources is a required field Resources []*Resource `type:"list" required:"true"` - // The schema version for which a finding is formatted. + // The schema version that a finding is formatted for. // // SchemaVersion is a required field SchemaVersion *string `type:"string" required:"true"` @@ -3236,7 +4271,7 @@ type AwsSecurityFinding struct { // Severity is a required field Severity *Severity `type:"structure" required:"true"` - // A URL that links to a page about the current finding in the security findings + // A URL that links to a page about the current finding in the security-findings // provider's solution. SourceUrl *string `type:"string"` @@ -3246,9 +4281,11 @@ type AwsSecurityFinding struct { // A finding's title. // // In this release, Title is a required property. - Title *string `type:"string"` + // + // Title is a required field + Title *string `type:"string" required:"true"` - // One or more finding types in the format of 'namespace/category/classifier' + // One or more finding types in the format of namespace/category/classifier // that classify a finding. // // Valid namespace values are: Software and Configuration Checks | TTPs | Effects @@ -3257,8 +4294,8 @@ type AwsSecurityFinding struct { // Types is a required field Types []*string `type:"list" required:"true"` - // An ISO8601-formatted timestamp that indicates when the finding record was - // last updated by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider last updated the finding record. // // UpdatedAt is a required field UpdatedAt *string `type:"string" required:"true"` @@ -3293,6 +4330,9 @@ func (s *AwsSecurityFinding) Validate() error { if s.CreatedAt == nil { invalidParams.Add(request.NewErrParamRequired("CreatedAt")) } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } if s.GeneratorId == nil { invalidParams.Add(request.NewErrParamRequired("GeneratorId")) } @@ -3311,6 +4351,9 @@ func (s *AwsSecurityFinding) Validate() error { if s.Severity == nil { invalidParams.Add(request.NewErrParamRequired("Severity")) } + if s.Title == nil { + invalidParams.Add(request.NewErrParamRequired("Title")) + } if s.Types == nil { invalidParams.Add(request.NewErrParamRequired("Types")) } @@ -3550,7 +4593,7 @@ func (s *AwsSecurityFinding) SetWorkflowState(v string) *AwsSecurityFinding { type AwsSecurityFindingFilters struct { _ struct{} `type:"structure"` - // The AWS account ID in which a finding is generated. + // The AWS account ID that a finding is generated in. AwsAccountId []*StringFilter `type:"list"` // The name of the findings provider (company) that owns the solution (product) @@ -3558,36 +4601,35 @@ type AwsSecurityFindingFilters struct { CompanyName []*StringFilter `type:"list"` // Exclusive to findings that are generated as the result of a check run against - // a specific rule in a supported standard (for example, AWS CIS Foundations). + // a specific rule in a supported standard (for example, CIS AWS Foundations). // Contains compliance-related finding details. ComplianceStatus []*StringFilter `type:"list"` // A finding's confidence. Confidence is defined as the likelihood that a finding // accurately identifies the behavior or issue that it was intended to identify. - // Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero - // percent confidence and 100 equates to 100 percent confidence. + // Confidence is scored on a 0-100 basis using a ratio scale, where 0 means + // zero percent confidence and 100 means 100 percent confidence. Confidence []*NumberFilter `type:"list"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was created by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider captured the potential security issue that a finding captured. CreatedAt []*DateFilter `type:"list"` // The level of importance assigned to the resources associated with the finding. - // A score of 0 means the underlying resources have no criticality, and a score - // of 100 is reserved for the most critical resources. + // A score of 0 means that the underlying resources have no criticality, and + // a score of 100 is reserved for the most critical resources. Criticality []*NumberFilter `type:"list"` // A finding's description. Description []*StringFilter `type:"list"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was first observed by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider first observed the potential security issue that a finding captured. FirstObservedAt []*DateFilter `type:"list"` - // This is the identifier for the solution-specific component (a discrete unit - // of logic) that generated a finding. In various security findings provider's - // solutions, this generator can be called a rule, a check, a detector, a plug-in, - // etc. + // The identifier for the solution-specific component (a discrete unit of logic) + // that generated a finding. In various security-findings providers' solutions, + // this generator can be called a rule, a check, a detector, a plug-in, etc. GeneratorId []*StringFilter `type:"list"` // The security findings provider-specific identifier for a finding. @@ -3596,9 +4638,9 @@ type AwsSecurityFindingFilters struct { // A keyword for a finding. Keyword []*KeywordFilter `type:"list"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was most recently observed by the security findings - // provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider most recently observed the potential security issue that a finding + // captured. LastObservedAt []*DateFilter `type:"list"` // The name of the malware that was observed. @@ -3675,12 +4717,12 @@ type AwsSecurityFindingFilters struct { ProcessTerminatedAt []*DateFilter `type:"list"` // The ARN generated by Security Hub that uniquely identifies a third-party - // company (security findings provider) once this provider's product (solution + // company (security findings provider) after this provider's product (solution // that generates findings) is registered with Security Hub. ProductArn []*StringFilter `type:"list"` - // A data type where security findings providers can include additional solution-specific - // details that are not part of the defined AwsSecurityFinding format. + // A data type where security-findings providers can include additional solution-specific + // details that aren't part of the defined AwsSecurityFinding format. ProductFields []*MapFilter `type:"list"` // The name of the solution (product) that generates findings. @@ -3716,13 +4758,13 @@ type AwsSecurityFindingFilters struct { // The date/time the instance was launched. ResourceAwsEc2InstanceLaunchedAt []*DateFilter `type:"list"` - // The identifier of the subnet in which the instance was launched. + // The identifier of the subnet that the instance was launched in. ResourceAwsEc2InstanceSubnetId []*StringFilter `type:"list"` // The instance type of the instance. ResourceAwsEc2InstanceType []*StringFilter `type:"list"` - // The identifier of the VPC in which the instance was launched. + // The identifier of the VPC that the instance was launched in. ResourceAwsEc2InstanceVpcId []*StringFilter `type:"list"` // The creation date/time of the IAM access key related to a finding. @@ -3752,24 +4794,24 @@ type AwsSecurityFindingFilters struct { // The name of the container related to a finding. ResourceContainerName []*StringFilter `type:"list"` - // The details of a resource that does not have a specific sub-field for the - // resource type defined. + // The details of a resource that doesn't have a specific subfield for the resource + // type defined. ResourceDetailsOther []*MapFilter `type:"list"` // The canonical identifier for the given resource type. ResourceId []*StringFilter `type:"list"` - // The canonical AWS partition name to which the region is assigned. + // The canonical AWS partition name that the Region is assigned to. ResourcePartition []*StringFilter `type:"list"` - // The canonical AWS external region name where this resource is located. + // The canonical AWS external Region name where this resource is located. ResourceRegion []*StringFilter `type:"list"` // A list of AWS tags associated with a resource at the time the finding was // processed. ResourceTags []*MapFilter `type:"list"` - // Specifies the type of the resource for which details are provided. + // Specifies the type of the resource that details are provided for. ResourceType []*StringFilter `type:"list"` // The label of a finding's severity. @@ -3778,11 +4820,11 @@ type AwsSecurityFindingFilters struct { // The normalized severity of a finding. SeverityNormalized []*NumberFilter `type:"list"` - // The native severity as defined by the security findings provider's solution + // The native severity as defined by the security-findings provider's solution // that generated the finding. SeverityProduct []*NumberFilter `type:"list"` - // A URL that links to a page about the current finding in the security findings + // A URL that links to a page about the current finding in the security-findings // provider's solution. SourceUrl []*StringFilter `type:"list"` @@ -3807,19 +4849,19 @@ type AwsSecurityFindingFilters struct { // A finding's title. Title []*StringFilter `type:"list"` - // A finding type in the format of 'namespace/category/classifier' that classifies + // A finding type in the format of namespace/category/classifier that classifies // a finding. Type []*StringFilter `type:"list"` - // An ISO8601-formatted timestamp that indicates when the finding record was - // last updated by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider last updated the finding record. UpdatedAt []*DateFilter `type:"list"` // A list of name/value string pairs associated with the finding. These are // custom, user-defined fields added to a finding. UserDefinedFields []*MapFilter `type:"list"` - // Indicates the veracity of a finding. + // The veracity of a finding. VerificationState []*StringFilter `type:"list"` // The workflow state of a finding. @@ -4337,7 +5379,7 @@ func (s *AwsSecurityFindingFilters) SetWorkflowState(v []*StringFilter) *AwsSecu type BatchDisableStandardsInput struct { _ struct{} `type:"structure"` - // The ARNS of the standards subscriptions that you want to disable. + // The ARNs of the standards subscriptions to disable. // // StandardsSubscriptionArns is a required field StandardsSubscriptionArns []*string `min:"1" type:"list" required:"true"` @@ -4401,11 +5443,11 @@ func (s *BatchDisableStandardsOutput) SetStandardsSubscriptions(v []*StandardsSu type BatchEnableStandardsInput struct { _ struct{} `type:"structure"` - // The list of standards that you want to enable. + // The list of standards compliance checks to enable. // - // In this release, Security Hub only supports the CIS AWS Foundations standard. + // In this release, Security Hub supports only the CIS AWS Foundations standard. // - // Its ARN is arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0. + // The ARN for the standard is arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0. // // StandardsSubscriptionRequests is a required field StandardsSubscriptionRequests []*StandardsSubscriptionRequest `min:"1" type:"list" required:"true"` @@ -4479,8 +5521,8 @@ func (s *BatchEnableStandardsOutput) SetStandardsSubscriptions(v []*StandardsSub type BatchImportFindingsInput struct { _ struct{} `type:"structure"` - // A list of findings that you want to import. Must be submitted in the AWSSecurityFinding - // format. + // A list of findings to import. To successfully import a finding, it must follow + // the AWS Security Finding Format (https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-format.html). // // Findings is a required field Findings []*AwsSecurityFinding `type:"list" required:"true"` @@ -4528,15 +5570,15 @@ func (s *BatchImportFindingsInput) SetFindings(v []*AwsSecurityFinding) *BatchIm type BatchImportFindingsOutput struct { _ struct{} `type:"structure"` - // The number of findings that cannot be imported. + // The number of findings that failed to import. // // FailedCount is a required field FailedCount *int64 `type:"integer" required:"true"` - // The list of the findings that cannot be imported. + // The list of the findings that failed to import. FailedFindings []*ImportFindingsError `type:"list"` - // The number of findings that were successfully imported + // The number of findings that were successfully imported. // // SuccessCount is a required field SuccessCount *int64 `type:"integer" required:"true"` @@ -4571,12 +5613,12 @@ func (s *BatchImportFindingsOutput) SetSuccessCount(v int64) *BatchImportFinding } // Exclusive to findings that are generated as the result of a check run against -// a specific rule in a supported standard (for example, AWS CIS Foundations). +// a specific rule in a supported standard (for example, CIS AWS Foundations). // Contains compliance-related finding details. type Compliance struct { _ struct{} `type:"structure"` - // Indicates the result of a compliance check. + // The result of a compliance check. Status *string `type:"string" enum:"ComplianceStatus"` } @@ -4606,7 +5648,7 @@ type ContainerDetails struct { // The name of the image related to a finding. ImageName *string `type:"string"` - // The date/time that the container was started. + // The date and time when the container started. LaunchedAt *string `type:"string"` // The name of the container related to a finding. @@ -4647,24 +5689,113 @@ func (s *ContainerDetails) SetName(v string) *ContainerDetails { return s } +type CreateActionTargetInput struct { + _ struct{} `type:"structure"` + + // The description for the custom action target. + // + // Description is a required field + Description *string `type:"string" required:"true"` + + // The ID for the custom action target. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The name of the custom action target. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateActionTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateActionTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateActionTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateActionTargetInput"} + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateActionTargetInput) SetDescription(v string) *CreateActionTargetInput { + s.Description = &v + return s +} + +// SetId sets the Id field's value. +func (s *CreateActionTargetInput) SetId(v string) *CreateActionTargetInput { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateActionTargetInput) SetName(v string) *CreateActionTargetInput { + s.Name = &v + return s +} + +type CreateActionTargetOutput struct { + _ struct{} `type:"structure"` + + // The ARN for the custom action target. + // + // ActionTargetArn is a required field + ActionTargetArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateActionTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateActionTargetOutput) GoString() string { + return s.String() +} + +// SetActionTargetArn sets the ActionTargetArn field's value. +func (s *CreateActionTargetOutput) SetActionTargetArn(v string) *CreateActionTargetOutput { + s.ActionTargetArn = &v + return s +} + type CreateInsightInput struct { _ struct{} `type:"structure"` - // A collection of attributes that are applied to all active Security Hub-aggregated - // findings and that result in a subset of findings that are included in this - // insight. + // One or more attributes used to filter the findings included in the insight. + // Only findings that match the criteria defined in the filters are included + // in the insight. // // Filters is a required field Filters *AwsSecurityFindingFilters `type:"structure" required:"true"` - // The attribute by which the insight's findings are grouped. This attribute - // is used as a findings aggregator for the purposes of viewing and managing - // multiple related findings under a single operand. + // The attribute used as the aggregator to group related findings for the insight. // // GroupByAttribute is a required field GroupByAttribute *string `type:"string" required:"true"` - // The user-defined name that identifies the insight that you want to create. + // The name of the custom insight to create. // // Name is a required field Name *string `type:"string" required:"true"` @@ -4720,7 +5851,7 @@ func (s *CreateInsightInput) SetName(v string) *CreateInsightInput { type CreateInsightOutput struct { _ struct{} `type:"structure"` - // The ARN Of the created insight. + // The ARN of the insight created. // // InsightArn is a required field InsightArn *string `type:"string" required:"true"` @@ -4745,8 +5876,8 @@ func (s *CreateInsightOutput) SetInsightArn(v string) *CreateInsightOutput { type CreateMembersInput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the accounts that you want - // to associate with the master Security Hub account. + // A list of account ID and email address pairs of the accounts to associate + // with the Security Hub master account. AccountDetails []*AccountDetails `type:"list"` } @@ -4769,8 +5900,8 @@ func (s *CreateMembersInput) SetAccountDetails(v []*AccountDetails) *CreateMembe type CreateMembersOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that weren't + // processed. UnprocessedAccounts []*Result `type:"list"` } @@ -4832,91 +5963,172 @@ func (s *DateFilter) SetStart(v string) *DateFilter { return s } -// A date range for the date filter. -type DateRange struct { +// A date range for the date filter. +type DateRange struct { + _ struct{} `type:"structure"` + + // A date range unit for the date filter. + Unit *string `type:"string" enum:"DateRangeUnit"` + + // A date range value for the date filter. + Value *int64 `type:"integer"` +} + +// String returns the string representation +func (s DateRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DateRange) GoString() string { + return s.String() +} + +// SetUnit sets the Unit field's value. +func (s *DateRange) SetUnit(v string) *DateRange { + s.Unit = &v + return s +} + +// SetValue sets the Value field's value. +func (s *DateRange) SetValue(v int64) *DateRange { + s.Value = &v + return s +} + +type DeclineInvitationsInput struct { + _ struct{} `type:"structure"` + + // A list of account IDs that specify the accounts that invitations to Security + // Hub are declined from. + // + // AccountIds is a required field + AccountIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeclineInvitationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeclineInvitationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeclineInvitationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeclineInvitationsInput"} + if s.AccountIds == nil { + invalidParams.Add(request.NewErrParamRequired("AccountIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountIds sets the AccountIds field's value. +func (s *DeclineInvitationsInput) SetAccountIds(v []*string) *DeclineInvitationsInput { + s.AccountIds = v + return s +} + +type DeclineInvitationsOutput struct { _ struct{} `type:"structure"` - // A date range unit for the date filter. - Unit *string `type:"string" enum:"DateRangeUnit"` - - // A date range value for the date filter. - Value *int64 `type:"integer"` + // A list of account ID and email address pairs of the AWS accounts that weren't + // processed. + UnprocessedAccounts []*Result `type:"list"` } // String returns the string representation -func (s DateRange) String() string { +func (s DeclineInvitationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DateRange) GoString() string { +func (s DeclineInvitationsOutput) GoString() string { return s.String() } -// SetUnit sets the Unit field's value. -func (s *DateRange) SetUnit(v string) *DateRange { - s.Unit = &v - return s -} - -// SetValue sets the Value field's value. -func (s *DateRange) SetValue(v int64) *DateRange { - s.Value = &v +// SetUnprocessedAccounts sets the UnprocessedAccounts field's value. +func (s *DeclineInvitationsOutput) SetUnprocessedAccounts(v []*Result) *DeclineInvitationsOutput { + s.UnprocessedAccounts = v return s } -type DeclineInvitationsInput struct { +type DeleteActionTargetInput struct { _ struct{} `type:"structure"` - // A list of account IDs specifying accounts whose invitations to Security Hub - // you want to decline. - AccountIds []*string `type:"list"` + // The ARN of the custom action target to delete. + // + // ActionTargetArn is a required field + ActionTargetArn *string `location:"uri" locationName:"ActionTargetArn" type:"string" required:"true"` } // String returns the string representation -func (s DeclineInvitationsInput) String() string { +func (s DeleteActionTargetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeclineInvitationsInput) GoString() string { +func (s DeleteActionTargetInput) GoString() string { return s.String() } -// SetAccountIds sets the AccountIds field's value. -func (s *DeclineInvitationsInput) SetAccountIds(v []*string) *DeclineInvitationsInput { - s.AccountIds = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteActionTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteActionTargetInput"} + if s.ActionTargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("ActionTargetArn")) + } + if s.ActionTargetArn != nil && len(*s.ActionTargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionTargetArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionTargetArn sets the ActionTargetArn field's value. +func (s *DeleteActionTargetInput) SetActionTargetArn(v string) *DeleteActionTargetInput { + s.ActionTargetArn = &v return s } -type DeclineInvitationsOutput struct { +type DeleteActionTargetOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. - UnprocessedAccounts []*Result `type:"list"` + // The ARN of the custom action target that was deleted. + // + // ActionTargetArn is a required field + ActionTargetArn *string `type:"string" required:"true"` } // String returns the string representation -func (s DeclineInvitationsOutput) String() string { +func (s DeleteActionTargetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeclineInvitationsOutput) GoString() string { +func (s DeleteActionTargetOutput) GoString() string { return s.String() } -// SetUnprocessedAccounts sets the UnprocessedAccounts field's value. -func (s *DeclineInvitationsOutput) SetUnprocessedAccounts(v []*Result) *DeclineInvitationsOutput { - s.UnprocessedAccounts = v +// SetActionTargetArn sets the ActionTargetArn field's value. +func (s *DeleteActionTargetOutput) SetActionTargetArn(v string) *DeleteActionTargetOutput { + s.ActionTargetArn = &v return s } type DeleteInsightInput struct { _ struct{} `type:"structure"` - // The ARN of the insight that you want to delete. + // The ARN of the insight to delete. // // InsightArn is a required field InsightArn *string `location:"uri" locationName:"InsightArn" type:"string" required:"true"` @@ -4982,9 +6194,10 @@ func (s *DeleteInsightOutput) SetInsightArn(v string) *DeleteInsightOutput { type DeleteInvitationsInput struct { _ struct{} `type:"structure"` - // A list of account IDs specifying accounts whose invitations to Security Hub - // you want to delete. - AccountIds []*string `type:"list"` + // A list of the account IDs that sent the invitations to delete. + // + // AccountIds is a required field + AccountIds []*string `type:"list" required:"true"` } // String returns the string representation @@ -4997,6 +6210,19 @@ func (s DeleteInvitationsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInvitationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInvitationsInput"} + if s.AccountIds == nil { + invalidParams.Add(request.NewErrParamRequired("AccountIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAccountIds sets the AccountIds field's value. func (s *DeleteInvitationsInput) SetAccountIds(v []*string) *DeleteInvitationsInput { s.AccountIds = v @@ -5006,8 +6232,8 @@ func (s *DeleteInvitationsInput) SetAccountIds(v []*string) *DeleteInvitationsIn type DeleteInvitationsOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that invitations + // weren't deleted for. UnprocessedAccounts []*Result `type:"list"` } @@ -5030,8 +6256,7 @@ func (s *DeleteInvitationsOutput) SetUnprocessedAccounts(v []*Result) *DeleteInv type DeleteMembersInput struct { _ struct{} `type:"structure"` - // A list of account IDs of the Security Hub member accounts that you want to - // delete. + // A list of account IDs of the member accounts to delete. AccountIds []*string `type:"list"` } @@ -5054,8 +6279,8 @@ func (s *DeleteMembersInput) SetAccountIds(v []*string) *DeleteMembersInput { type DeleteMembersOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that weren't + // deleted. UnprocessedAccounts []*Result `type:"list"` } @@ -5075,10 +6300,233 @@ func (s *DeleteMembersOutput) SetUnprocessedAccounts(v []*Result) *DeleteMembers return s } +type DescribeActionTargetsInput struct { + _ struct{} `type:"structure"` + + // A list of custom action target ARNs for the custom action targets to retrieve. + ActionTargetArns []*string `type:"list"` + + // The maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // The token that is required for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeActionTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActionTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeActionTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeActionTargetsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionTargetArns sets the ActionTargetArns field's value. +func (s *DescribeActionTargetsInput) SetActionTargetArns(v []*string) *DescribeActionTargetsInput { + s.ActionTargetArns = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeActionTargetsInput) SetMaxResults(v int64) *DescribeActionTargetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeActionTargetsInput) SetNextToken(v string) *DescribeActionTargetsInput { + s.NextToken = &v + return s +} + +type DescribeActionTargetsOutput struct { + _ struct{} `type:"structure"` + + // A list of ActionTarget objects. Each object includes the ActionTargetArn, + // Description, and Name of a custom action target available in Security Hub. + // + // ActionTargets is a required field + ActionTargets []*ActionTarget `type:"list" required:"true"` + + // The token that is required for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeActionTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActionTargetsOutput) GoString() string { + return s.String() +} + +// SetActionTargets sets the ActionTargets field's value. +func (s *DescribeActionTargetsOutput) SetActionTargets(v []*ActionTarget) *DescribeActionTargetsOutput { + s.ActionTargets = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeActionTargetsOutput) SetNextToken(v string) *DescribeActionTargetsOutput { + s.NextToken = &v + return s +} + +type DescribeHubInput struct { + _ struct{} `type:"structure"` + + // The ARN of the Hub resource to retrieve. + HubArn *string `location:"querystring" locationName:"HubArn" type:"string"` +} + +// String returns the string representation +func (s DescribeHubInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHubInput) GoString() string { + return s.String() +} + +// SetHubArn sets the HubArn field's value. +func (s *DescribeHubInput) SetHubArn(v string) *DescribeHubInput { + s.HubArn = &v + return s +} + +type DescribeHubOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the Hub resource retrieved. + HubArn *string `type:"string"` + + // The date and time when Security Hub was enabled in the account. + SubscribedAt *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHubOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHubOutput) GoString() string { + return s.String() +} + +// SetHubArn sets the HubArn field's value. +func (s *DescribeHubOutput) SetHubArn(v string) *DescribeHubOutput { + s.HubArn = &v + return s +} + +// SetSubscribedAt sets the SubscribedAt field's value. +func (s *DescribeHubOutput) SetSubscribedAt(v string) *DescribeHubOutput { + s.SubscribedAt = &v + return s +} + +type DescribeProductsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` + + // The token that is required for pagination. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeProductsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProductsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeProductsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeProductsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeProductsInput) SetMaxResults(v int64) *DescribeProductsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeProductsInput) SetNextToken(v string) *DescribeProductsInput { + s.NextToken = &v + return s +} + +type DescribeProductsOutput struct { + _ struct{} `type:"structure"` + + // The token that is required for pagination. + NextToken *string `type:"string"` + + // A list of products, including details for each product. + // + // Products is a required field + Products []*Product `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeProductsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProductsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeProductsOutput) SetNextToken(v string) *DescribeProductsOutput { + s.NextToken = &v + return s +} + +// SetProducts sets the Products field's value. +func (s *DescribeProductsOutput) SetProducts(v []*Product) *DescribeProductsOutput { + s.Products = v + return s +} + type DisableImportFindingsForProductInput struct { _ struct{} `type:"structure"` - // The ARN of a resource that represents your subscription to a supported product. + // The ARN of the integrated product to disable the integration for. // // ProductSubscriptionArn is a required field ProductSubscriptionArn *string `location:"uri" locationName:"ProductSubscriptionArn" type:"string" required:"true"` @@ -5189,8 +6637,7 @@ func (s DisassociateFromMasterAccountOutput) GoString() string { type DisassociateMembersInput struct { _ struct{} `type:"structure"` - // The account IDs of the member accounts that you want to disassociate from - // the master account. + // The account IDs of the member accounts to disassociate from the master account. AccountIds []*string `type:"list"` } @@ -5227,8 +6674,7 @@ func (s DisassociateMembersOutput) GoString() string { type EnableImportFindingsForProductInput struct { _ struct{} `type:"structure"` - // The ARN of the product that generates findings that you want to import into - // Security Hub. + // The ARN of the product to enable the integration for. // // ProductArn is a required field ProductArn *string `type:"string" required:"true"` @@ -5266,8 +6712,7 @@ func (s *EnableImportFindingsForProductInput) SetProductArn(v string) *EnableImp type EnableImportFindingsForProductOutput struct { _ struct{} `type:"structure"` - // The ARN of a resource that represents your subscription to the product that - // generates the findings that you want to import into Security Hub. + // The ARN of your subscription to the product to enable integrations for. ProductSubscriptionArn *string `type:"string"` } @@ -5289,6 +6734,9 @@ func (s *EnableImportFindingsForProductOutput) SetProductSubscriptionArn(v strin type EnableSecurityHubInput struct { _ struct{} `type:"structure"` + + // The tags to add to the Hub resource when you enable Security Hub. + Tags map[string]*string `min:"1" type:"map"` } // String returns the string representation @@ -5301,6 +6749,25 @@ func (s EnableSecurityHubInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableSecurityHubInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableSecurityHubInput"} + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTags sets the Tags field's value. +func (s *EnableSecurityHubInput) SetTags(v map[string]*string) *EnableSecurityHubInput { + s.Tags = v + return s +} + type EnableSecurityHubOutput struct { _ struct{} `type:"structure"` } @@ -5318,16 +6785,16 @@ func (s EnableSecurityHubOutput) GoString() string { type GetEnabledStandardsInput struct { _ struct{} `type:"structure"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of results to return in the response. MaxResults *int64 `min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the GetEnabledStandards operation. For subsequent calls to the operation, + // Paginates results. On your first call to the GetEnabledStandards operation, + // set the value of this parameter to NULL. For subsequent calls to the operation, // fill nextToken in the request with the value of nextToken from the previous // response to continue listing data. NextToken *string `type:"string"` - // The list of standards subscription ARNS that you want to list and describe. + // A list of the standards subscription ARNs for the standards to retrieve. StandardsSubscriptionArns []*string `min:"1" type:"list"` } @@ -5381,7 +6848,8 @@ type GetEnabledStandardsOutput struct { // The token that is required for pagination. NextToken *string `type:"string"` - // The standards subscription details returned by the operation. + // A list of StandardsSubscriptions objects that include information about the + // enabled standards. StandardsSubscriptions []*StandardsSubscription `type:"list"` } @@ -5410,19 +6878,20 @@ func (s *GetEnabledStandardsOutput) SetStandardsSubscriptions(v []*StandardsSubs type GetFindingsInput struct { _ struct{} `type:"structure"` - // A collection of attributes that is use for querying findings. + // The findings attributes used to define a condition to filter the findings + // returned. Filters *AwsSecurityFindingFilters `type:"structure"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of findings to return. MaxResults *int64 `min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the GetFindings operation. For subsequent calls to the operation, - // fill nextToken in the request with the value of nextToken from the previous - // response to continue listing data. + // Paginates results. On your first call to the GetFindings operation, set the + // value of this parameter to NULL. For subsequent calls to the operation, fill + // nextToken in the request with the value of nextToken from the previous response + // to continue listing data. NextToken *string `type:"string"` - // A collection of attributes used for sorting findings. + // Findings attributes used to sort the list of findings returned. SortCriteria []*SortCriterion `type:"list"` } @@ -5476,7 +6945,7 @@ func (s *GetFindingsInput) SetSortCriteria(v []*SortCriterion) *GetFindingsInput type GetFindingsOutput struct { _ struct{} `type:"structure"` - // Findings details returned by the operation. + // The findings that matched the filters specified in the request. // // Findings is a required field Findings []*AwsSecurityFinding `type:"list" required:"true"` @@ -5576,16 +7045,16 @@ func (s *GetInsightResultsOutput) SetInsightResults(v *InsightResults) *GetInsig type GetInsightsInput struct { _ struct{} `type:"structure"` - // The ARNS of the insights that you want to describe. + // The ARNs of the insights that you want to describe. InsightArns []*string `type:"list"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of items that you want in the response. MaxResults *int64 `min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the GetInsights operation. For subsequent calls to the operation, - // fill nextToken in the request with the value of nextToken from the previous - // response to continue listing data. + // Paginates results. On your first call to the GetInsights operation, set the + // value of this parameter to NULL. For subsequent calls to the operation, fill + // nextToken in the request with the value of nextToken from the previous response + // to continue listing data. NextToken *string `type:"string"` } @@ -5743,8 +7212,8 @@ func (s *GetMasterAccountOutput) SetMaster(v *Invitation) *GetMasterAccountOutpu type GetMembersInput struct { _ struct{} `type:"structure"` - // A list of account IDs for the Security Hub member accounts on which you want - // to return the details. + // A list of account IDs for the Security Hub member accounts that you want + // to return the details for. // // AccountIds is a required field AccountIds []*string `type:"list" required:"true"` @@ -5785,8 +7254,8 @@ type GetMembersOutput struct { // A list of details about the Security Hub member accounts. Members []*Member `type:"list"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that couldn't + // be processed. UnprocessedAccounts []*Result `type:"list"` } @@ -5812,7 +7281,7 @@ func (s *GetMembersOutput) SetUnprocessedAccounts(v []*Result) *GetMembersOutput return s } -// Includes details of the list of the findings that cannot be imported. +// Includes details of the list of the findings that can't be imported. type ImportFindingsError struct { _ struct{} `type:"structure"` @@ -5826,7 +7295,7 @@ type ImportFindingsError struct { // ErrorMessage is a required field ErrorMessage *string `type:"string" required:"true"` - // The id of the error made during the BatchImportFindings operation. + // The ID of the error made during the BatchImportFindings operation. // // Id is a required field Id *string `type:"string" required:"true"` @@ -5864,14 +7333,14 @@ func (s *ImportFindingsError) SetId(v string) *ImportFindingsError { type Insight struct { _ struct{} `type:"structure"` - // A collection of attributes that are applied to all active Security Hub-aggregated - // findings and that result in a subset of findings that are included in this - // insight. + // One or more attributes used to filter the findings included in the insight. + // Only findings that match the criteria defined in the filters are included + // in the insight. // // Filters is a required field Filters *AwsSecurityFindingFilters `type:"structure" required:"true"` - // The attribute by which the insight's findings are grouped. This attribute + // The attribute that the insight's findings are grouped by. This attribute // is used as a findings aggregator for the purposes of viewing and managing // multiple related findings under a single operand. // @@ -5932,7 +7401,7 @@ type InsightResultValue struct { // Count is a required field Count *int64 `type:"integer" required:"true"` - // The value of the attribute by which the findings are grouped for the insight's + // The value of the attribute that the findings are grouped by for the insight // whose results are returned by the GetInsightResults operation. // // GroupByAttributeValue is a required field @@ -5965,7 +7434,7 @@ func (s *InsightResultValue) SetGroupByAttributeValue(v string) *InsightResultVa type InsightResults struct { _ struct{} `type:"structure"` - // The attribute by which the findings are grouped for the insight's whose results + // The attribute that the findings are grouped by for the insight whose results // are returned by the GetInsightResults operation. // // GroupByAttribute is a required field @@ -6011,21 +7480,21 @@ func (s *InsightResults) SetResultValues(v []*InsightResultValue) *InsightResult return s } -// The details of an invitation sent to an AWS account by the Security Hub master -// account. +// Details about an invitation. type Invitation struct { _ struct{} `type:"structure"` - // The account ID of the master Security Hub account who sent the invitation. + // The account ID of the Security Hub master account that the invitation was + // sent from. AccountId *string `type:"string"` - // The ID of the invitation sent by the master Security Hub account. + // The ID of the invitation sent to the member account. InvitationId *string `type:"string"` // The timestamp of when the invitation was sent. InvitedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // The current relationship status between the inviter and invitee accounts. + // The current status of the association between member and master accounts. MemberStatus *string `type:"string"` } @@ -6090,8 +7559,8 @@ func (s *InviteMembersInput) SetAccountIds(v []*string) *InviteMembersInput { type InviteMembersOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that couldn't + // be processed. UnprocessedAccounts []*Result `type:"list"` } @@ -6111,11 +7580,11 @@ func (s *InviteMembersOutput) SetUnprocessedAccounts(v []*Result) *InviteMembers return s } -// The IP filter for querying findings.> +// The IP filter for querying findings. type IpFilter struct { _ struct{} `type:"structure"` - // Finding's CIDR value. + // A finding's CIDR value. Cidr *string `type:"string"` } @@ -6162,11 +7631,11 @@ func (s *KeywordFilter) SetValue(v string) *KeywordFilter { type ListEnabledProductsForImportInput struct { _ struct{} `type:"structure"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of items that you want in the response. MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the ListEnabledProductsForImport operation. For subsequent calls + // Paginates results. On your first call to the ListEnabledProductsForImport + // operation, set the value of this parameter to NULL. For subsequent calls // to the operation, fill nextToken in the request with the value of NextToken // from the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` @@ -6242,11 +7711,11 @@ func (s *ListEnabledProductsForImportOutput) SetProductSubscriptions(v []*string type ListInvitationsInput struct { _ struct{} `type:"structure"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of items that you want in the response. MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the ListInvitations operation. For subsequent calls to the operation, + // Paginates results. On your first call to the ListInvitations operation, set + // the value of this parameter to NULL. For subsequent calls to the operation, // fill nextToken in the request with the value of NextToken from the previous // response to continue listing data. NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` @@ -6322,16 +7791,16 @@ func (s *ListInvitationsOutput) SetNextToken(v string) *ListInvitationsOutput { type ListMembersInput struct { _ struct{} `type:"structure"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of items that you want in the response. MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` // Paginates results. Set the value of this parameter to NULL on your first // call to the ListMembers operation. For subsequent calls to the operation, - // fill nextToken in the request with the value of NextToken from the previous + // fill nextToken in the request with the value of nextToken from the previous // response to continue listing data. NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` - // Specifies what member accounts the response includes based on their relationship + // Specifies which member accounts the response includes based on their relationship // status with the master account. The default value is TRUE. If onlyAssociated // is set to TRUE, the response includes member accounts whose relationship // status with the master is set to ENABLED or DISABLED. If onlyAssociated is @@ -6412,6 +7881,70 @@ func (s *ListMembersOutput) SetNextToken(v string) *ListMembersOutput { return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource to retrieve tags for. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags associated with a resource. + Tags map[string]*string `min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // A list of malware related to a finding. type Malware struct { _ struct{} `type:"structure"` @@ -6421,7 +7954,7 @@ type Malware struct { // Name is a required field Name *string `type:"string" required:"true"` - // The filesystem path of the malware that was observed. + // The file system path of the malware that was observed. Path *string `type:"string"` // The state of the malware that was observed. @@ -6482,8 +8015,8 @@ func (s *Malware) SetType(v string) *Malware { type MapFilter struct { _ struct{} `type:"structure"` - // Represents the condition to be applied to a key value when querying for findings - // with a map filter. + // The condition to apply to a key value when querying for findings with a map + // filter. Comparison *string `type:"string" enum:"MapFilterComparison"` // The key of the map filter. @@ -6521,27 +8054,29 @@ func (s *MapFilter) SetValue(v string) *MapFilter { return s } -// The details for a Security Hub member account. +// The details about a member account. type Member struct { _ struct{} `type:"structure"` - // The AWS account ID of a Security Hub member account. + // The AWS account ID of the member account. AccountId *string `type:"string"` - // The email of a Security Hub member account. + // The email address of the member account. Email *string `type:"string"` - // Time stamp at which the member account was invited to Security Hub. + // A timestamp for the date and time when the invitation was sent to the member + // account. InvitedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // The AWS account ID of the master Security Hub account to this member account. + // The AWS account ID of the Security Hub master account associated with this + // member account. MasterId *string `type:"string"` // The status of the relationship between the member account and its master // account. MemberStatus *string `type:"string"` - // Time stamp at which this member account was updated. + // The timestamp for the date and time when the member account was updated. UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` } @@ -6607,7 +8142,7 @@ type Network struct { // The destination port of network-related information about a finding. DestinationPort *int64 `type:"integer"` - // Indicates the direction of network traffic associated with a finding. + // The direction of network traffic associated with a finding. Direction *string `type:"string" enum:"NetworkDirection"` // The protocol of network-related information about a finding. @@ -6830,16 +8365,16 @@ func (s *NoteUpdate) SetUpdatedBy(v string) *NoteUpdate { type NumberFilter struct { _ struct{} `type:"structure"` - // Represents the "equal to" condition to be applied to a single field when - // querying for findings. + // The equal-to condition to be applied to a single field when querying for + // findings. Eq *float64 `type:"double"` - // Represents the "greater than equal" condition to be applied to a single field - // when querying for findings. + // The greater-than-equal condition to be applied to a single field when querying + // for findings. Gte *float64 `type:"double"` - // Represents the "less than equal" condition to be applied to a single field - // when querying for findings. + // The less-than-equal condition to be applied to a single field when querying + // for findings. Lte *float64 `type:"double"` } @@ -6890,7 +8425,7 @@ type ProcessDetails struct { // The process ID. Pid *int64 `type:"integer"` - // The date/time that the process was terminated. + // The date and time when the process was terminated. TerminatedAt *string `type:"string"` } @@ -6940,15 +8475,104 @@ func (s *ProcessDetails) SetTerminatedAt(v string) *ProcessDetails { return s } -// Provides a recommendation on how to remediate the issue identified within -// a finding. +// Contains details about a product. +type Product struct { + _ struct{} `type:"structure"` + + // The URL used to activate the product. + ActivationUrl *string `type:"string"` + + // The categories assigned to the product. + Categories []*string `type:"list"` + + // The name of the company that provides the product. + CompanyName *string `type:"string"` + + // A description of the product. + Description *string `type:"string"` + + // The URL for the page that contains more information about the product. + MarketplaceUrl *string `type:"string"` + + // The ARN assigned to the product. + // + // ProductArn is a required field + ProductArn *string `type:"string" required:"true"` + + // The name of the product. + ProductName *string `type:"string"` + + // The resource policy associated with the product. + ProductSubscriptionResourcePolicy *string `type:"string"` +} + +// String returns the string representation +func (s Product) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Product) GoString() string { + return s.String() +} + +// SetActivationUrl sets the ActivationUrl field's value. +func (s *Product) SetActivationUrl(v string) *Product { + s.ActivationUrl = &v + return s +} + +// SetCategories sets the Categories field's value. +func (s *Product) SetCategories(v []*string) *Product { + s.Categories = v + return s +} + +// SetCompanyName sets the CompanyName field's value. +func (s *Product) SetCompanyName(v string) *Product { + s.CompanyName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Product) SetDescription(v string) *Product { + s.Description = &v + return s +} + +// SetMarketplaceUrl sets the MarketplaceUrl field's value. +func (s *Product) SetMarketplaceUrl(v string) *Product { + s.MarketplaceUrl = &v + return s +} + +// SetProductArn sets the ProductArn field's value. +func (s *Product) SetProductArn(v string) *Product { + s.ProductArn = &v + return s +} + +// SetProductName sets the ProductName field's value. +func (s *Product) SetProductName(v string) *Product { + s.ProductName = &v + return s +} + +// SetProductSubscriptionResourcePolicy sets the ProductSubscriptionResourcePolicy field's value. +func (s *Product) SetProductSubscriptionResourcePolicy(v string) *Product { + s.ProductSubscriptionResourcePolicy = &v + return s +} + +// A recommendation on how to remediate the issue identified in a finding. type Recommendation struct { _ struct{} `type:"structure"` - // The recommendation of what to do about the issue described in a finding. + // Describes the recommended steps to take to remediate an issue identified + // in a finding. Text *string `type:"string"` - // A URL to link to general remediation information for the finding type of + // A URL to a page or site that contains information about how to remediate // a finding. Url *string `type:"string"` } @@ -6975,16 +8599,16 @@ func (s *Recommendation) SetUrl(v string) *Recommendation { return s } -// Related finding's details. +// Details about a related finding. type RelatedFinding struct { _ struct{} `type:"structure"` - // The solution-generated identifier for a related finding. + // The product-generated identifier for a related finding. // // Id is a required field Id *string `type:"string" required:"true"` - // The ARN of the solution that generated a related finding. + // The ARN of the product that generated a related finding. // // ProductArn is a required field ProductArn *string `type:"string" required:"true"` @@ -7028,11 +8652,11 @@ func (s *RelatedFinding) SetProductArn(v string) *RelatedFinding { return s } -// The remediation options for a finding. +// Details about the remediation steps for a finding. type Remediation struct { _ struct{} `type:"structure"` - // Provides a recommendation on how to remediate the issue identified within + // A recommendation on the steps to take to remediate the issue identified by // a finding. Recommendation *Recommendation `type:"structure"` } @@ -7053,11 +8677,11 @@ func (s *Remediation) SetRecommendation(v *Recommendation) *Remediation { return s } -// A resource data type that describes a resource to which the finding refers. +// A resource related to a finding. type Resource struct { _ struct{} `type:"structure"` - // Provides additional details about the resource. + // Additional details about the resource related to a finding. Details *ResourceDetails `type:"structure"` // The canonical identifier for the given resource type. @@ -7065,17 +8689,17 @@ type Resource struct { // Id is a required field Id *string `type:"string" required:"true"` - // The canonical AWS partition name to which the region is assigned. + // The canonical AWS partition name that the Region is assigned to. Partition *string `type:"string" enum:"Partition"` - // The canonical AWS external region name where this resource is located. + // The canonical AWS external Region name where this resource is located. Region *string `type:"string"` // A list of AWS tags associated with a resource at the time the finding was // processed. Tags map[string]*string `type:"map"` - // Specifies the type of the resource for which details are provided. + // The type of the resource that details are provided for. // // Type is a required field Type *string `type:"string" required:"true"` @@ -7143,24 +8767,23 @@ func (s *Resource) SetType(v string) *Resource { return s } -// Provides additional details about the resource. +// Additional details about a resource related to a finding. type ResourceDetails struct { _ struct{} `type:"structure"` - // The details of an AWS EC2 instance. + // Details about an Amazon EC2 instance related to a finding. AwsEc2Instance *AwsEc2InstanceDetails `type:"structure"` - // AWS IAM access key details related to a finding. + // Details about an IAM access key related to a finding. AwsIamAccessKey *AwsIamAccessKeyDetails `type:"structure"` - // The details of an AWS S3 Bucket. + // Details about an Amazon S3 Bucket related to a finding. AwsS3Bucket *AwsS3BucketDetails `type:"structure"` - // Container details related to a finding. + // Details about a container resource related to a finding. Container *ContainerDetails `type:"structure"` - // The details of a resource that does not have a specific sub-field for the - // resource type defined. + // Details about a resource that doesn't have a specific type defined. Other map[string]*string `type:"map"` } @@ -7204,14 +8827,14 @@ func (s *ResourceDetails) SetOther(v map[string]*string) *ResourceDetails { return s } -// The account details that could not be processed. +// Details about the account that wasn't processed. type Result struct { _ struct{} `type:"structure"` - // An ID of the AWS account that could not be processed. + // An AWS account ID of the account that wasn't be processed. AccountId *string `type:"string"` - // The reason for why an account could not be processed. + // The reason that the account wasn't be processed. ProcessingResult *string `type:"string"` } @@ -7237,7 +8860,7 @@ func (s *Result) SetProcessingResult(v string) *Result { return s } -// A finding's severity. +// The severity of the finding. type Severity struct { _ struct{} `type:"structure"` @@ -7246,7 +8869,7 @@ type Severity struct { // Normalized is a required field Normalized *int64 `type:"integer" required:"true"` - // The native severity as defined by the security findings provider's solution + // The native severity as defined by the AWS service or integrated partner product // that generated the finding. Product *float64 `type:"double"` } @@ -7286,14 +8909,14 @@ func (s *Severity) SetProduct(v float64) *Severity { return s } -// A collection of attributes used for sorting findings. +// A collection of finding attributes used to sort findings. type SortCriterion struct { _ struct{} `type:"structure"` - // The finding attribute used for sorting findings. + // The finding attribute used to sort findings. Field *string `type:"string"` - // The order used for sorting findings. + // The order used to sort findings. SortOrder *string `type:"string" enum:"SortOrder"` } @@ -7325,17 +8948,18 @@ type StandardsSubscription struct { // The ARN of a standard. // - // In this release, Security Hub only supports the CIS AWS Foundations standard. - // - // Its ARN is arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0. + // In this release, Security Hub supports only the CIS AWS Foundations standard, + // which uses the following ARN: arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0. // // StandardsArn is a required field StandardsArn *string `type:"string" required:"true"` + // A key-value pair of input for the standard. + // // StandardsInput is a required field StandardsInput map[string]*string `type:"map" required:"true"` - // The standard's status. + // The status of the standards subscription. // // StandardsStatus is a required field StandardsStatus *string `type:"string" required:"true" enum:"StandardsStatus"` @@ -7393,6 +9017,7 @@ type StandardsSubscriptionRequest struct { // StandardsArn is a required field StandardsArn *string `type:"string" required:"true"` + // A key-value pair of input for the standard. StandardsInput map[string]*string `type:"map"` } @@ -7435,8 +9060,7 @@ func (s *StandardsSubscriptionRequest) SetStandardsInput(v map[string]*string) * type StringFilter struct { _ struct{} `type:"structure"` - // Represents the condition to be applied to a string value when querying for - // findings. + // The condition to be applied to a string value when querying for findings. Comparison *string `type:"string" enum:"StringFilterComparison"` // The string filter value. @@ -7465,20 +9089,94 @@ func (s *StringFilter) SetValue(v string) *StringFilter { return s } -// Threat intel details related to a finding. +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource to apply the tags to. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` + + // The tags to add to the resource. + // + // Tags is a required field + Tags map[string]*string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// Details about the threat intel related to a finding. type ThreatIntelIndicator struct { _ struct{} `type:"structure"` // The category of a threat intel indicator. Category *string `type:"string" enum:"ThreatIntelIndicatorCategory"` - // The date/time of the last observation of a threat intel indicator. + // The date and time when the most recent instance of a threat intel indicator + // was observed. LastObservedAt *string `type:"string"` - // The source of the threat intel. + // The source of the threat intel indicator. Source *string `type:"string"` - // The URL for more details from the source of the threat intel. + // The URL to the page or site where you can get more information about the + // threat intel indicator. SourceUrl *string `type:"string"` // The type of a threat intel indicator. @@ -7534,10 +9232,155 @@ func (s *ThreatIntelIndicator) SetValue(v string) *ThreatIntelIndicator { return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource to remove the tags from. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` + + // The tag keys associated with the tags to remove from the resource. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateActionTargetInput struct { + _ struct{} `type:"structure"` + + // The ARN of the custom action target to update. + // + // ActionTargetArn is a required field + ActionTargetArn *string `location:"uri" locationName:"ActionTargetArn" type:"string" required:"true"` + + // The updated description for the custom action target. + Description *string `type:"string"` + + // The updated name of the custom action target. + Name *string `type:"string"` +} + +// String returns the string representation +func (s UpdateActionTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateActionTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateActionTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateActionTargetInput"} + if s.ActionTargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("ActionTargetArn")) + } + if s.ActionTargetArn != nil && len(*s.ActionTargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionTargetArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionTargetArn sets the ActionTargetArn field's value. +func (s *UpdateActionTargetInput) SetActionTargetArn(v string) *UpdateActionTargetInput { + s.ActionTargetArn = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateActionTargetInput) SetDescription(v string) *UpdateActionTargetInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateActionTargetInput) SetName(v string) *UpdateActionTargetInput { + s.Name = &v + return s +} + +type UpdateActionTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateActionTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateActionTargetOutput) GoString() string { + return s.String() +} + type UpdateFindingsInput struct { _ struct{} `type:"structure"` - // A collection of attributes that specify what findings you want to update. + // A collection of attributes that specify which findings you want to update. // // Filters is a required field Filters *AwsSecurityFindingFilters `type:"structure" required:"true"` @@ -7777,8 +9620,8 @@ const ( ) const ( - // MapFilterComparisonContains is a MapFilterComparison enum value - MapFilterComparisonContains = "CONTAINS" + // MapFilterComparisonEquals is a MapFilterComparison enum value + MapFilterComparisonEquals = "EQUALS" ) const ( @@ -7828,15 +9671,15 @@ const ( // StandardsStatusDeleting is a StandardsStatus enum value StandardsStatusDeleting = "DELETING" + + // StandardsStatusIncomplete is a StandardsStatus enum value + StandardsStatusIncomplete = "INCOMPLETE" ) const ( // StringFilterComparisonEquals is a StringFilterComparison enum value StringFilterComparisonEquals = "EQUALS" - // StringFilterComparisonContains is a StringFilterComparison enum value - StringFilterComparisonContains = "CONTAINS" - // StringFilterComparisonPrefix is a StringFilterComparison enum value StringFilterComparisonPrefix = "PREFIX" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go index 3ba53ebde2e..266765f2379 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go @@ -3,14 +3,24 @@ // Package securityhub provides the client and types for making API // requests to AWS SecurityHub. // -// AWS Security Hub provides you with a comprehensive view of your security -// state within AWS and your compliance with the security industry standards -// and best practices. Security Hub collects security data from across AWS accounts, -// services, and supported third-party partners and helps you analyze your security -// trends and identify the highest priority security issues. For more information, -// see AWS Security Hub User Guide (https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html). -// -// Currently, AWS Security Hub is in Preview release. +// Security Hub provides you with a comprehensive view of the security state +// of your AWS environment and resources. It also provides you with the compliance +// status of your environment based on CIS AWS Foundations compliance checks. +// Security Hub collects security data from AWS accounts, services, and integrated +// third-party products and helps you analyze security trends in your environment +// to identify the highest priority security issues. For more information about +// Security Hub, see the AWS Security Hub User Guide (https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html) . +// +// When you use operations in the Security Hub API, the requests are executed +// only in the AWS Region that is currently active or in the specific AWS Region +// that you specify in your request. Any configuration or settings change that +// results from the operation is applied only to that Region. To make the same +// change in other Regions, execute the same command for each Region to apply +// the change to. For example, if your Region is set to us-west-2, when you +// use CreateMembers to add a member account to Security Hub, the association +// of the member account with the master account is created only in the us-west-2 +// Region. Security Hub must be enabled for the member account in the same Region +// that the invite was sent from. // // See https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/errors.go index 6c2941282c4..ea15ffb030d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/errors.go @@ -7,7 +7,7 @@ const ( // ErrCodeAccessDeniedException for service response error code // "AccessDeniedException". // - // You do not have permission to to perform the action specified in the request. + // You don't have permission to perform the action specified in the request. ErrCodeAccessDeniedException = "AccessDeniedException" // ErrCodeInternalException for service response error code @@ -19,14 +19,14 @@ const ( // ErrCodeInvalidAccessException for service response error code // "InvalidAccessException". // - // AWS Security Hub is not enabled for the account used to make this request. + // AWS Security Hub isn't enabled for the account used to make this request. ErrCodeInvalidAccessException = "InvalidAccessException" // ErrCodeInvalidInputException for service response error code // "InvalidInputException". // - // The request was rejected because an invalid or out-of-range value was supplied - // for an input parameter. + // The request was rejected because you supplied an invalid or out-of-range + // value for an input parameter. ErrCodeInvalidInputException = "InvalidInputException" // ErrCodeLimitExceededException for service response error code @@ -45,6 +45,6 @@ const ( // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". // - // The request was rejected because the specified resource cannot be found. + // The request was rejected because we can't find the specified resource. ErrCodeResourceNotFoundException = "ResourceNotFoundException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go index 113ce37f3e1..cdb48ddf0c8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SecurityHub { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "securityhub" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SecurityHub { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SecurityHub { svc := &SecurityHub{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-10-26", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/api.go index e324d238f98..ee652493001 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/api.go @@ -874,7 +874,7 @@ func (c *ServerlessApplicationRepository) ListApplicationDependenciesWithContext // // Example iterating over at most 3 pages of a ListApplicationDependencies operation. // pageNum := 0 // err := client.ListApplicationDependenciesPages(params, -// func(page *ListApplicationDependenciesOutput, lastPage bool) bool { +// func(page *serverlessapplicationrepository.ListApplicationDependenciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -906,10 +906,12 @@ func (c *ServerlessApplicationRepository) ListApplicationDependenciesPagesWithCo }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListApplicationDependenciesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListApplicationDependenciesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1024,7 +1026,7 @@ func (c *ServerlessApplicationRepository) ListApplicationVersionsWithContext(ctx // // Example iterating over at most 3 pages of a ListApplicationVersions operation. // pageNum := 0 // err := client.ListApplicationVersionsPages(params, -// func(page *ListApplicationVersionsOutput, lastPage bool) bool { +// func(page *serverlessapplicationrepository.ListApplicationVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1056,10 +1058,12 @@ func (c *ServerlessApplicationRepository) ListApplicationVersionsPagesWithContex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListApplicationVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListApplicationVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1170,7 +1174,7 @@ func (c *ServerlessApplicationRepository) ListApplicationsWithContext(ctx aws.Co // // Example iterating over at most 3 pages of a ListApplications operation. // pageNum := 0 // err := client.ListApplicationsPages(params, -// func(page *ListApplicationsOutput, lastPage bool) bool { +// func(page *serverlessapplicationrepository.ListApplicationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1202,10 +1206,12 @@ func (c *ServerlessApplicationRepository) ListApplicationsPagesWithContext(ctx a }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListApplicationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListApplicationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1254,8 +1260,7 @@ func (c *ServerlessApplicationRepository) PutApplicationPolicyRequest(input *Put // PutApplicationPolicy API operation for AWSServerlessApplicationRepository. // // Sets the permission policy for an application. For the list of actions supported -// for this operation, see Application Permissions (https://docs.aws.amazon.com/serverlessrepo/latest/devguide/access-control-resource-based.html#application-permissions) -// . +// for this operation, see Application Permissions (https://docs.aws.amazon.com/serverlessrepo/latest/devguide/access-control-resource-based.html#application-permissions) . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3469,7 +3474,7 @@ type Tag struct { Key *string `locationName:"key" type:"string" required:"true"` // This property corresponds to the content of the same name for the AWS CloudFormation - // Tag (https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/Tag) + // Tag (https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/Tag) // Data Type. // // Value is a required field diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/doc.go index dde60865eee..f2f6c12983f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/doc.go @@ -30,10 +30,9 @@ // // * Consuming Applications – Browse for applications and view information // about them, including source code and readme files. Also install, configure, -// and deploy applications of your choosing. -// -// Publishing Applications – Configure and upload applications to make them -// available to other developers, and publish new versions of applications. +// and deploy applications of your choosing. Publishing Applications – +// Configure and upload applications to make them available to other developers, +// and publish new versions of applications. // // See https://docs.aws.amazon.com/goto/WebAPI/serverlessrepo-2017-09-08 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go index 5702ab93313..7ceaec7a2fd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServerlessApplicationRep if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "serverlessrepo" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServerlessApplicationRepository { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServerlessApplicationRepository { svc := &ServerlessApplicationRepository{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-08", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go index 31d0756f635..7fd6793ab7a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go @@ -1055,6 +1055,11 @@ func (c *ServiceCatalog) CreatePortfolioShareRequest(input *CreatePortfolioShare // * ErrCodeOperationNotSupportedException "OperationNotSupportedException" // The operation is not supported. // +// * ErrCodeInvalidStateException "InvalidStateException" +// An attempt was made to modify a resource that is in a state that is not valid. +// Check your resources to ensure that they are in valid states before retrying +// the operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/CreatePortfolioShare func (c *ServiceCatalog) CreatePortfolioShare(input *CreatePortfolioShareInput) (*CreatePortfolioShareOutput, error) { req, out := c.CreatePortfolioShareRequest(input) @@ -1768,6 +1773,11 @@ func (c *ServiceCatalog) DeletePortfolioShareRequest(input *DeletePortfolioShare // * ErrCodeOperationNotSupportedException "OperationNotSupportedException" // The operation is not supported. // +// * ErrCodeInvalidStateException "InvalidStateException" +// An attempt was made to modify a resource that is in a state that is not valid. +// Check your resources to ensure that they are in valid states before retrying +// the operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DeletePortfolioShare func (c *ServiceCatalog) DeletePortfolioShare(input *DeletePortfolioShareInput) (*DeletePortfolioShareOutput, error) { req, out := c.DeletePortfolioShareRequest(input) @@ -3182,6 +3192,12 @@ func (c *ServiceCatalog) DescribeRecordRequest(input *DescribeRecordInput) (req // Use this operation after calling a request operation (for example, ProvisionProduct, // TerminateProvisionedProduct, or UpdateProvisionedProduct). // +// If a provisioned product was transferred to a new owner using UpdateProvisionedProductProperties, +// the new owner will be able to describe all past records for that product. +// The previous owner will no longer be able to describe the records, but will +// be able to use ListRecordHistory to see the product's history from when he +// was the owner. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3294,6 +3310,86 @@ func (c *ServiceCatalog) DescribeServiceActionWithContext(ctx aws.Context, input return out, req.Send() } +const opDescribeServiceActionExecutionParameters = "DescribeServiceActionExecutionParameters" + +// DescribeServiceActionExecutionParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServiceActionExecutionParameters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeServiceActionExecutionParameters for more information on using the DescribeServiceActionExecutionParameters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeServiceActionExecutionParametersRequest method. +// req, resp := client.DescribeServiceActionExecutionParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeServiceActionExecutionParameters +func (c *ServiceCatalog) DescribeServiceActionExecutionParametersRequest(input *DescribeServiceActionExecutionParametersInput) (req *request.Request, output *DescribeServiceActionExecutionParametersOutput) { + op := &request.Operation{ + Name: opDescribeServiceActionExecutionParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServiceActionExecutionParametersInput{} + } + + output = &DescribeServiceActionExecutionParametersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeServiceActionExecutionParameters API operation for AWS Service Catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Service Catalog's +// API operation DescribeServiceActionExecutionParameters for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParametersException "InvalidParametersException" +// One or more parameters provided to the operation are not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeServiceActionExecutionParameters +func (c *ServiceCatalog) DescribeServiceActionExecutionParameters(input *DescribeServiceActionExecutionParametersInput) (*DescribeServiceActionExecutionParametersOutput, error) { + req, out := c.DescribeServiceActionExecutionParametersRequest(input) + return out, req.Send() +} + +// DescribeServiceActionExecutionParametersWithContext is the same as DescribeServiceActionExecutionParameters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeServiceActionExecutionParameters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceCatalog) DescribeServiceActionExecutionParametersWithContext(ctx aws.Context, input *DescribeServiceActionExecutionParametersInput, opts ...request.Option) (*DescribeServiceActionExecutionParametersOutput, error) { + req, out := c.DescribeServiceActionExecutionParametersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeTagOption = "DescribeTagOption" // DescribeTagOptionRequest generates a "aws/request.Request" representing the @@ -4338,7 +4434,7 @@ func (c *ServiceCatalog) ListAcceptedPortfolioSharesWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a ListAcceptedPortfolioShares operation. // pageNum := 0 // err := client.ListAcceptedPortfolioSharesPages(params, -// func(page *ListAcceptedPortfolioSharesOutput, lastPage bool) bool { +// func(page *servicecatalog.ListAcceptedPortfolioSharesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4370,10 +4466,12 @@ func (c *ServiceCatalog) ListAcceptedPortfolioSharesPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAcceptedPortfolioSharesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAcceptedPortfolioSharesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4476,7 +4574,7 @@ func (c *ServiceCatalog) ListBudgetsForResourceWithContext(ctx aws.Context, inpu // // Example iterating over at most 3 pages of a ListBudgetsForResource operation. // pageNum := 0 // err := client.ListBudgetsForResourcePages(params, -// func(page *ListBudgetsForResourceOutput, lastPage bool) bool { +// func(page *servicecatalog.ListBudgetsForResourceOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4508,10 +4606,12 @@ func (c *ServiceCatalog) ListBudgetsForResourcePagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListBudgetsForResourceOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListBudgetsForResourceOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4614,7 +4714,7 @@ func (c *ServiceCatalog) ListConstraintsForPortfolioWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a ListConstraintsForPortfolio operation. // pageNum := 0 // err := client.ListConstraintsForPortfolioPages(params, -// func(page *ListConstraintsForPortfolioOutput, lastPage bool) bool { +// func(page *servicecatalog.ListConstraintsForPortfolioOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4646,10 +4746,12 @@ func (c *ServiceCatalog) ListConstraintsForPortfolioPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListConstraintsForPortfolioOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListConstraintsForPortfolioOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4754,7 +4856,7 @@ func (c *ServiceCatalog) ListLaunchPathsWithContext(ctx aws.Context, input *List // // Example iterating over at most 3 pages of a ListLaunchPaths operation. // pageNum := 0 // err := client.ListLaunchPathsPages(params, -// func(page *ListLaunchPathsOutput, lastPage bool) bool { +// func(page *servicecatalog.ListLaunchPathsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4786,10 +4888,12 @@ func (c *ServiceCatalog) ListLaunchPathsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListLaunchPathsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListLaunchPathsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4896,7 +5000,7 @@ func (c *ServiceCatalog) ListOrganizationPortfolioAccessWithContext(ctx aws.Cont // // Example iterating over at most 3 pages of a ListOrganizationPortfolioAccess operation. // pageNum := 0 // err := client.ListOrganizationPortfolioAccessPages(params, -// func(page *ListOrganizationPortfolioAccessOutput, lastPage bool) bool { +// func(page *servicecatalog.ListOrganizationPortfolioAccessOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4928,10 +5032,12 @@ func (c *ServiceCatalog) ListOrganizationPortfolioAccessPagesWithContext(ctx aws }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListOrganizationPortfolioAccessOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListOrganizationPortfolioAccessOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5110,7 +5216,7 @@ func (c *ServiceCatalog) ListPortfoliosWithContext(ctx aws.Context, input *ListP // // Example iterating over at most 3 pages of a ListPortfolios operation. // pageNum := 0 // err := client.ListPortfoliosPages(params, -// func(page *ListPortfoliosOutput, lastPage bool) bool { +// func(page *servicecatalog.ListPortfoliosOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5142,10 +5248,12 @@ func (c *ServiceCatalog) ListPortfoliosPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPortfoliosOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPortfoliosOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5248,7 +5356,7 @@ func (c *ServiceCatalog) ListPortfoliosForProductWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a ListPortfoliosForProduct operation. // pageNum := 0 // err := client.ListPortfoliosForProductPages(params, -// func(page *ListPortfoliosForProductOutput, lastPage bool) bool { +// func(page *servicecatalog.ListPortfoliosForProductOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5280,10 +5388,12 @@ func (c *ServiceCatalog) ListPortfoliosForProductPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPortfoliosForProductOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPortfoliosForProductOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5386,7 +5496,7 @@ func (c *ServiceCatalog) ListPrincipalsForPortfolioWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a ListPrincipalsForPortfolio operation. // pageNum := 0 // err := client.ListPrincipalsForPortfolioPages(params, -// func(page *ListPrincipalsForPortfolioOutput, lastPage bool) bool { +// func(page *servicecatalog.ListPrincipalsForPortfolioOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5418,10 +5528,12 @@ func (c *ServiceCatalog) ListPrincipalsForPortfolioPagesWithContext(ctx aws.Cont }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPrincipalsForPortfolioOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPrincipalsForPortfolioOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5691,7 +5803,7 @@ func (c *ServiceCatalog) ListProvisioningArtifactsForServiceActionWithContext(ct // // Example iterating over at most 3 pages of a ListProvisioningArtifactsForServiceAction operation. // pageNum := 0 // err := client.ListProvisioningArtifactsForServiceActionPages(params, -// func(page *ListProvisioningArtifactsForServiceActionOutput, lastPage bool) bool { +// func(page *servicecatalog.ListProvisioningArtifactsForServiceActionOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5723,10 +5835,12 @@ func (c *ServiceCatalog) ListProvisioningArtifactsForServiceActionPagesWithConte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListProvisioningArtifactsForServiceActionOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListProvisioningArtifactsForServiceActionOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5913,7 +6027,7 @@ func (c *ServiceCatalog) ListResourcesForTagOptionWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a ListResourcesForTagOption operation. // pageNum := 0 // err := client.ListResourcesForTagOptionPages(params, -// func(page *ListResourcesForTagOptionOutput, lastPage bool) bool { +// func(page *servicecatalog.ListResourcesForTagOptionOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5945,10 +6059,12 @@ func (c *ServiceCatalog) ListResourcesForTagOptionPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListResourcesForTagOptionOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListResourcesForTagOptionOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6048,7 +6164,7 @@ func (c *ServiceCatalog) ListServiceActionsWithContext(ctx aws.Context, input *L // // Example iterating over at most 3 pages of a ListServiceActions operation. // pageNum := 0 // err := client.ListServiceActionsPages(params, -// func(page *ListServiceActionsOutput, lastPage bool) bool { +// func(page *servicecatalog.ListServiceActionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6080,10 +6196,12 @@ func (c *ServiceCatalog) ListServiceActionsPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListServiceActionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListServiceActionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6187,7 +6305,7 @@ func (c *ServiceCatalog) ListServiceActionsForProvisioningArtifactWithContext(ct // // Example iterating over at most 3 pages of a ListServiceActionsForProvisioningArtifact operation. // pageNum := 0 // err := client.ListServiceActionsForProvisioningArtifactPages(params, -// func(page *ListServiceActionsForProvisioningArtifactOutput, lastPage bool) bool { +// func(page *servicecatalog.ListServiceActionsForProvisioningArtifactOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6219,13 +6337,99 @@ func (c *ServiceCatalog) ListServiceActionsForProvisioningArtifactPagesWithConte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListServiceActionsForProvisioningArtifactOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListServiceActionsForProvisioningArtifactOutput), !p.HasNextPage()) { + break + } } + return p.Err() } +const opListStackInstancesForProvisionedProduct = "ListStackInstancesForProvisionedProduct" + +// ListStackInstancesForProvisionedProductRequest generates a "aws/request.Request" representing the +// client's request for the ListStackInstancesForProvisionedProduct operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListStackInstancesForProvisionedProduct for more information on using the ListStackInstancesForProvisionedProduct +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListStackInstancesForProvisionedProductRequest method. +// req, resp := client.ListStackInstancesForProvisionedProductRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListStackInstancesForProvisionedProduct +func (c *ServiceCatalog) ListStackInstancesForProvisionedProductRequest(input *ListStackInstancesForProvisionedProductInput) (req *request.Request, output *ListStackInstancesForProvisionedProductOutput) { + op := &request.Operation{ + Name: opListStackInstancesForProvisionedProduct, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListStackInstancesForProvisionedProductInput{} + } + + output = &ListStackInstancesForProvisionedProductOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListStackInstancesForProvisionedProduct API operation for AWS Service Catalog. +// +// Returns summary information about stack instances that are associated with +// the specified CFN_STACKSET type provisioned product. You can filter for stack +// instances that are associated with a specific AWS account name or region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Service Catalog's +// API operation ListStackInstancesForProvisionedProduct for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParametersException "InvalidParametersException" +// One or more parameters provided to the operation are not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListStackInstancesForProvisionedProduct +func (c *ServiceCatalog) ListStackInstancesForProvisionedProduct(input *ListStackInstancesForProvisionedProductInput) (*ListStackInstancesForProvisionedProductOutput, error) { + req, out := c.ListStackInstancesForProvisionedProductRequest(input) + return out, req.Send() +} + +// ListStackInstancesForProvisionedProductWithContext is the same as ListStackInstancesForProvisionedProduct with the addition of +// the ability to pass a context and additional request options. +// +// See ListStackInstancesForProvisionedProduct for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceCatalog) ListStackInstancesForProvisionedProductWithContext(ctx aws.Context, input *ListStackInstancesForProvisionedProductInput, opts ...request.Option) (*ListStackInstancesForProvisionedProductOutput, error) { + req, out := c.ListStackInstancesForProvisionedProductRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTagOptions = "ListTagOptions" // ListTagOptionsRequest generates a "aws/request.Request" representing the @@ -6327,7 +6531,7 @@ func (c *ServiceCatalog) ListTagOptionsWithContext(ctx aws.Context, input *ListT // // Example iterating over at most 3 pages of a ListTagOptions operation. // pageNum := 0 // err := client.ListTagOptionsPages(params, -// func(page *ListTagOptionsOutput, lastPage bool) bool { +// func(page *servicecatalog.ListTagOptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6359,10 +6563,12 @@ func (c *ServiceCatalog) ListTagOptionsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagOptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTagOptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6718,7 +6924,7 @@ func (c *ServiceCatalog) SearchProductsWithContext(ctx aws.Context, input *Searc // // Example iterating over at most 3 pages of a SearchProducts operation. // pageNum := 0 // err := client.SearchProductsPages(params, -// func(page *SearchProductsOutput, lastPage bool) bool { +// func(page *servicecatalog.SearchProductsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6750,10 +6956,12 @@ func (c *ServiceCatalog) SearchProductsPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*SearchProductsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*SearchProductsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6856,7 +7064,7 @@ func (c *ServiceCatalog) SearchProductsAsAdminWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a SearchProductsAsAdmin operation. // pageNum := 0 // err := client.SearchProductsAsAdminPages(params, -// func(page *SearchProductsAsAdminOutput, lastPage bool) bool { +// func(page *servicecatalog.SearchProductsAsAdminOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6888,10 +7096,12 @@ func (c *ServiceCatalog) SearchProductsAsAdminPagesWithContext(ctx aws.Context, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*SearchProductsAsAdminOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*SearchProductsAsAdminOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6991,7 +7201,7 @@ func (c *ServiceCatalog) SearchProvisionedProductsWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a SearchProvisionedProducts operation. // pageNum := 0 // err := client.SearchProvisionedProductsPages(params, -// func(page *SearchProvisionedProductsOutput, lastPage bool) bool { +// func(page *servicecatalog.SearchProvisionedProductsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7023,10 +7233,12 @@ func (c *ServiceCatalog) SearchProvisionedProductsPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*SearchProvisionedProductsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*SearchProvisionedProductsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7466,6 +7678,93 @@ func (c *ServiceCatalog) UpdateProvisionedProductWithContext(ctx aws.Context, in return out, req.Send() } +const opUpdateProvisionedProductProperties = "UpdateProvisionedProductProperties" + +// UpdateProvisionedProductPropertiesRequest generates a "aws/request.Request" representing the +// client's request for the UpdateProvisionedProductProperties operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateProvisionedProductProperties for more information on using the UpdateProvisionedProductProperties +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateProvisionedProductPropertiesRequest method. +// req, resp := client.UpdateProvisionedProductPropertiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/UpdateProvisionedProductProperties +func (c *ServiceCatalog) UpdateProvisionedProductPropertiesRequest(input *UpdateProvisionedProductPropertiesInput) (req *request.Request, output *UpdateProvisionedProductPropertiesOutput) { + op := &request.Operation{ + Name: opUpdateProvisionedProductProperties, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateProvisionedProductPropertiesInput{} + } + + output = &UpdateProvisionedProductPropertiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateProvisionedProductProperties API operation for AWS Service Catalog. +// +// Requests updates to the properties of the specified provisioned product. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Service Catalog's +// API operation UpdateProvisionedProductProperties for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParametersException "InvalidParametersException" +// One or more parameters provided to the operation are not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeInvalidStateException "InvalidStateException" +// An attempt was made to modify a resource that is in a state that is not valid. +// Check your resources to ensure that they are in valid states before retrying +// the operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/UpdateProvisionedProductProperties +func (c *ServiceCatalog) UpdateProvisionedProductProperties(input *UpdateProvisionedProductPropertiesInput) (*UpdateProvisionedProductPropertiesOutput, error) { + req, out := c.UpdateProvisionedProductPropertiesRequest(input) + return out, req.Send() +} + +// UpdateProvisionedProductPropertiesWithContext is the same as UpdateProvisionedProductProperties with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateProvisionedProductProperties for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceCatalog) UpdateProvisionedProductPropertiesWithContext(ctx aws.Context, input *UpdateProvisionedProductPropertiesInput, opts ...request.Option) (*UpdateProvisionedProductPropertiesOutput, error) { + req, out := c.UpdateProvisionedProductPropertiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateProvisioningArtifact = "UpdateProvisioningArtifact" // UpdateProvisioningArtifactRequest generates a "aws/request.Request" representing the @@ -8785,7 +9084,9 @@ type CreateConstraintInput struct { // The constraint parameters, in JSON format. The syntax depends on the constraint // type as follows: // - // LAUNCHSpecify the RoleArn property as follows: + // LAUNCH + // + // Specify the RoleArn property as follows: // // {"RoleArn" : "arn:aws:iam::123456789012:role/LaunchRole"} // @@ -8793,18 +9094,24 @@ type CreateConstraintInput struct { // // You also cannot have more than one LAUNCH constraint on a product and portfolio. // - // NOTIFICATIONSpecify the NotificationArns property as follows: + // NOTIFICATION + // + // Specify the NotificationArns property as follows: // // {"NotificationArns" : ["arn:aws:sns:us-east-1:123456789012:Topic"]} // - // RESOURCE_UPDATESpecify the TagUpdatesOnProvisionedProduct property as follows: + // RESOURCE_UPDATE + // + // Specify the TagUpdatesOnProvisionedProduct property as follows: // // {"Version":"2.0","Properties":{"TagUpdateOnProvisionedProduct":"String"}} // // The TagUpdatesOnProvisionedProduct property accepts a string value of ALLOWED // or NOT_ALLOWED. // - // STACKSETSpecify the Parameters property as follows: + // STACKSET + // + // Specify the Parameters property as follows: // // {"Version": "String", "Properties": {"AccountList": [ "String" ], "RegionList": // [ "String" ], "AdminRole": "String", "ExecutionRole": "String"}} @@ -8816,7 +9123,9 @@ type CreateConstraintInput struct { // Products with a STACKSET constraint will launch an AWS CloudFormation stack // set. // - // TEMPLATESpecify the Rules property. For more information, see Template Constraint + // TEMPLATE + // + // Specify the Rules property. For more information, see Template Constraint // Rules (http://docs.aws.amazon.com/servicecatalog/latest/adminguide/reference-template_constraint_rules.html). // // Parameters is a required field @@ -9500,6 +9809,10 @@ type CreateProvisionedProductPlanInput struct { ProvisioningParameters []*UpdateProvisioningParameter `type:"list"` // One or more tags. + // + // If the plan is for an existing provisioned product, the product must have + // a RESOURCE_UPDATE constraint with TagUpdatesOnProvisionedProduct set to ALLOWED + // to allow tag updates. Tags []*Tag `type:"list"` } @@ -9842,17 +10155,25 @@ type CreateServiceActionInput struct { // The self-service action definition. Can be one of the following: // - // NameThe name of the AWS Systems Manager Document. For example, AWS-RestartEC2Instance. + // Name // - // VersionThe AWS Systems Manager automation document version. For example, - // "Version": "1" + // The name of the AWS Systems Manager Document. For example, AWS-RestartEC2Instance. // - // AssumeRoleThe Amazon Resource Name (ARN) of the role that performs the self-service + // Version + // + // The AWS Systems Manager automation document version. For example, "Version": + // "1" + // + // AssumeRole + // + // The Amazon Resource Name (ARN) of the role that performs the self-service // actions on your behalf. For example, "AssumeRole": "arn:aws:iam::12345678910:role/ActionRole". // // To reuse the provisioned product launch role, set to "AssumeRole": "LAUNCH_ROLE". // - // ParametersThe list of parameters in JSON format. + // Parameters + // + // The list of parameters in JSON format. // // For example: [{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}]. // @@ -11955,25 +12276,109 @@ func (s *DescribeRecordOutput) SetRecordOutputs(v []*RecordOutput) *DescribeReco return s } -type DescribeServiceActionInput struct { +type DescribeServiceActionExecutionParametersInput struct { _ struct{} `type:"structure"` - // The language code. - // - // * en - English (default) - // - // * jp - Japanese - // - // * zh - Chinese AcceptLanguage *string `type:"string"` - // The self-service action identifier. - // - // Id is a required field - Id *string `min:"1" type:"string" required:"true"` -} + // ProvisionedProductId is a required field + ProvisionedProductId *string `min:"1" type:"string" required:"true"` -// String returns the string representation + // ServiceActionId is a required field + ServiceActionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeServiceActionExecutionParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceActionExecutionParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeServiceActionExecutionParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeServiceActionExecutionParametersInput"} + if s.ProvisionedProductId == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedProductId")) + } + if s.ProvisionedProductId != nil && len(*s.ProvisionedProductId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductId", 1)) + } + if s.ServiceActionId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceActionId")) + } + if s.ServiceActionId != nil && len(*s.ServiceActionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceActionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptLanguage sets the AcceptLanguage field's value. +func (s *DescribeServiceActionExecutionParametersInput) SetAcceptLanguage(v string) *DescribeServiceActionExecutionParametersInput { + s.AcceptLanguage = &v + return s +} + +// SetProvisionedProductId sets the ProvisionedProductId field's value. +func (s *DescribeServiceActionExecutionParametersInput) SetProvisionedProductId(v string) *DescribeServiceActionExecutionParametersInput { + s.ProvisionedProductId = &v + return s +} + +// SetServiceActionId sets the ServiceActionId field's value. +func (s *DescribeServiceActionExecutionParametersInput) SetServiceActionId(v string) *DescribeServiceActionExecutionParametersInput { + s.ServiceActionId = &v + return s +} + +type DescribeServiceActionExecutionParametersOutput struct { + _ struct{} `type:"structure"` + + ServiceActionParameters []*ExecutionParameter `type:"list"` +} + +// String returns the string representation +func (s DescribeServiceActionExecutionParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceActionExecutionParametersOutput) GoString() string { + return s.String() +} + +// SetServiceActionParameters sets the ServiceActionParameters field's value. +func (s *DescribeServiceActionExecutionParametersOutput) SetServiceActionParameters(v []*ExecutionParameter) *DescribeServiceActionExecutionParametersOutput { + s.ServiceActionParameters = v + return s +} + +type DescribeServiceActionInput struct { + _ struct{} `type:"structure"` + + // The language code. + // + // * en - English (default) + // + // * jp - Japanese + // + // * zh - Chinese + AcceptLanguage *string `type:"string"` + + // The self-service action identifier. + // + // Id is a required field + Id *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation func (s DescribeServiceActionInput) String() string { return awsutil.Prettify(s) } @@ -12682,6 +13087,8 @@ type ExecuteProvisionedProductServiceActionInput struct { // An idempotency token that uniquely identifies the execute request. ExecuteToken *string `min:"1" type:"string" idempotencyToken:"true"` + Parameters map[string][]*string `min:"1" type:"map"` + // The identifier of the provisioned product. // // ProvisionedProductId is a required field @@ -12709,6 +13116,9 @@ func (s *ExecuteProvisionedProductServiceActionInput) Validate() error { if s.ExecuteToken != nil && len(*s.ExecuteToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("ExecuteToken", 1)) } + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } if s.ProvisionedProductId == nil { invalidParams.Add(request.NewErrParamRequired("ProvisionedProductId")) } @@ -12740,6 +13150,12 @@ func (s *ExecuteProvisionedProductServiceActionInput) SetExecuteToken(v string) return s } +// SetParameters sets the Parameters field's value. +func (s *ExecuteProvisionedProductServiceActionInput) SetParameters(v map[string][]*string) *ExecuteProvisionedProductServiceActionInput { + s.Parameters = v + return s +} + // SetProvisionedProductId sets the ProvisionedProductId field's value. func (s *ExecuteProvisionedProductServiceActionInput) SetProvisionedProductId(v string) *ExecuteProvisionedProductServiceActionInput { s.ProvisionedProductId = &v @@ -12776,6 +13192,44 @@ func (s *ExecuteProvisionedProductServiceActionOutput) SetRecordDetail(v *Record return s } +type ExecutionParameter struct { + _ struct{} `type:"structure"` + + DefaultValues []*string `type:"list"` + + Name *string `min:"1" type:"string"` + + Type *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ExecutionParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutionParameter) GoString() string { + return s.String() +} + +// SetDefaultValues sets the DefaultValues field's value. +func (s *ExecutionParameter) SetDefaultValues(v []*string) *ExecutionParameter { + s.DefaultValues = v + return s +} + +// SetName sets the Name field's value. +func (s *ExecutionParameter) SetName(v string) *ExecutionParameter { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *ExecutionParameter) SetType(v string) *ExecutionParameter { + s.Type = &v + return s +} + // An object containing information about the error, along with identifying // information about the self-service action and its associations. type FailedServiceActionAssociation struct { @@ -14634,6 +15088,114 @@ func (s *ListServiceActionsOutput) SetServiceActionSummaries(v []*ServiceActionS return s } +type ListStackInstancesForProvisionedProductInput struct { + _ struct{} `type:"structure"` + + // The language code. + // + // * en - English (default) + // + // * jp - Japanese + // + // * zh - Chinese + AcceptLanguage *string `type:"string"` + + // The maximum number of items to return with this call. + PageSize *int64 `type:"integer"` + + // The page token for the next set of results. To retrieve the first set of + // results, use null. + PageToken *string `type:"string"` + + // The identifier of the provisioned product. + // + // ProvisionedProductId is a required field + ProvisionedProductId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListStackInstancesForProvisionedProductInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackInstancesForProvisionedProductInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStackInstancesForProvisionedProductInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStackInstancesForProvisionedProductInput"} + if s.ProvisionedProductId == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedProductId")) + } + if s.ProvisionedProductId != nil && len(*s.ProvisionedProductId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptLanguage sets the AcceptLanguage field's value. +func (s *ListStackInstancesForProvisionedProductInput) SetAcceptLanguage(v string) *ListStackInstancesForProvisionedProductInput { + s.AcceptLanguage = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *ListStackInstancesForProvisionedProductInput) SetPageSize(v int64) *ListStackInstancesForProvisionedProductInput { + s.PageSize = &v + return s +} + +// SetPageToken sets the PageToken field's value. +func (s *ListStackInstancesForProvisionedProductInput) SetPageToken(v string) *ListStackInstancesForProvisionedProductInput { + s.PageToken = &v + return s +} + +// SetProvisionedProductId sets the ProvisionedProductId field's value. +func (s *ListStackInstancesForProvisionedProductInput) SetProvisionedProductId(v string) *ListStackInstancesForProvisionedProductInput { + s.ProvisionedProductId = &v + return s +} + +type ListStackInstancesForProvisionedProductOutput struct { + _ struct{} `type:"structure"` + + // The page token to use to retrieve the next set of results. If there are no + // additional results, this value is null. + NextPageToken *string `type:"string"` + + // List of stack instances. + StackInstances []*StackInstance `type:"list"` +} + +// String returns the string representation +func (s ListStackInstancesForProvisionedProductOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackInstancesForProvisionedProductOutput) GoString() string { + return s.String() +} + +// SetNextPageToken sets the NextPageToken field's value. +func (s *ListStackInstancesForProvisionedProductOutput) SetNextPageToken(v string) *ListStackInstancesForProvisionedProductOutput { + s.NextPageToken = &v + return s +} + +// SetStackInstances sets the StackInstances field's value. +func (s *ListStackInstancesForProvisionedProductOutput) SetStackInstances(v []*StackInstance) *ListStackInstancesForProvisionedProductOutput { + s.StackInstances = v + return s +} + // Filters to use when listing TagOptions. type ListTagOptionsFilters struct { _ struct{} `type:"structure"` @@ -15903,6 +16465,10 @@ type ProvisioningArtifact struct { // The description of the provisioning artifact. Description *string `type:"string"` + // Information set by the administrator to provide guidance to end users about + // which provisioning artifacts to use. + Guidance *string `type:"string" enum:"ProvisioningArtifactGuidance"` + // The identifier of the provisioning artifact. Id *string `min:"1" type:"string"` @@ -15932,6 +16498,12 @@ func (s *ProvisioningArtifact) SetDescription(v string) *ProvisioningArtifact { return s } +// SetGuidance sets the Guidance field's value. +func (s *ProvisioningArtifact) SetGuidance(v string) *ProvisioningArtifact { + s.Guidance = &v + return s +} + // SetId sets the Id field's value. func (s *ProvisioningArtifact) SetId(v string) *ProvisioningArtifact { s.Id = &v @@ -15958,6 +16530,10 @@ type ProvisioningArtifactDetail struct { // The description of the provisioning artifact. Description *string `type:"string"` + // Information set by the administrator to provide guidance to end users about + // which provisioning artifacts to use. + Guidance *string `type:"string" enum:"ProvisioningArtifactGuidance"` + // The identifier of the provisioning artifact. Id *string `min:"1" type:"string"` @@ -16002,6 +16578,12 @@ func (s *ProvisioningArtifactDetail) SetDescription(v string) *ProvisioningArtif return s } +// SetGuidance sets the Guidance field's value. +func (s *ProvisioningArtifactDetail) SetGuidance(v string) *ProvisioningArtifactDetail { + s.Guidance = &v + return s +} + // SetId sets the Id field's value. func (s *ProvisioningArtifactDetail) SetId(v string) *ProvisioningArtifactDetail { s.Id = &v @@ -17817,6 +18399,68 @@ func (s *ShareError) SetMessage(v string) *ShareError { return s } +// An AWS CloudFormation stack, in a specific account and region, that's part +// of a stack set operation. A stack instance is a reference to an attempted +// or actual stack in a given account within a given region. A stack instance +// can exist without a stack—for example, if the stack couldn't be created +// for some reason. A stack instance is associated with only one stack set. +// Each stack instance contains the ID of its associated stack set, as well +// as the ID of the actual stack and the stack status. +type StackInstance struct { + _ struct{} `type:"structure"` + + // The name of the AWS account that the stack instance is associated with. + Account *string `type:"string"` + + // The name of the AWS region that the stack instance is associated with. + Region *string `type:"string"` + + // The status of the stack instance, in terms of its synchronization with its + // associated stack set. + // + // * INOPERABLE: A DeleteStackInstances operation has failed and left the + // stack in an unstable state. Stacks in this state are excluded from further + // UpdateStackSet operations. You might need to perform a DeleteStackInstances + // operation, with RetainStacks set to true, to delete the stack instance, + // and then delete the stack manually. + // + // * OUTDATED: The stack isn't currently up to date with the stack set because + // either the associated stack failed during a CreateStackSet or UpdateStackSet + // operation, or the stack was part of a CreateStackSet or UpdateStackSet + // operation that failed or was stopped before the stack was created or updated. + // + // * CURRENT: The stack is currently up to date with the stack set. + StackInstanceStatus *string `type:"string" enum:"StackInstanceStatus"` +} + +// String returns the string representation +func (s StackInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackInstance) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *StackInstance) SetAccount(v string) *StackInstance { + s.Account = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *StackInstance) SetRegion(v string) *StackInstance { + s.Region = &v + return s +} + +// SetStackInstanceStatus sets the StackInstanceStatus field's value. +func (s *StackInstance) SetStackInstanceStatus(v string) *StackInstance { + s.StackInstanceStatus = &v + return s +} + // Information about a tag. A tag is a key-value pair. Tags are propagated to // the resources created when provisioning a product. type Tag struct { @@ -18093,6 +18737,54 @@ type UpdateConstraintInput struct { // // Id is a required field Id *string `min:"1" type:"string" required:"true"` + + // The constraint parameters, in JSON format. The syntax depends on the constraint + // type as follows: + // + // LAUNCH + // + // Specify the RoleArn property as follows: + // + // {"RoleArn" : "arn:aws:iam::123456789012:role/LaunchRole"} + // + // You cannot have both a LAUNCH and a STACKSET constraint. + // + // You also cannot have more than one LAUNCH constraint on a product and portfolio. + // + // NOTIFICATION + // + // Specify the NotificationArns property as follows: + // + // {"NotificationArns" : ["arn:aws:sns:us-east-1:123456789012:Topic"]} + // + // RESOURCE_UPDATE + // + // Specify the TagUpdatesOnProvisionedProduct property as follows: + // + // {"Version":"2.0","Properties":{"TagUpdateOnProvisionedProduct":"String"}} + // + // The TagUpdatesOnProvisionedProduct property accepts a string value of ALLOWED + // or NOT_ALLOWED. + // + // STACKSET + // + // Specify the Parameters property as follows: + // + // {"Version": "String", "Properties": {"AccountList": [ "String" ], "RegionList": + // [ "String" ], "AdminRole": "String", "ExecutionRole": "String"}} + // + // You cannot have both a LAUNCH and a STACKSET constraint. + // + // You also cannot have more than one STACKSET constraint on a product and portfolio. + // + // Products with a STACKSET constraint will launch an AWS CloudFormation stack + // set. + // + // TEMPLATE + // + // Specify the Rules property. For more information, see Template Constraint + // Rules (http://docs.aws.amazon.com/servicecatalog/latest/adminguide/reference-template_constraint_rules.html). + Parameters *string `type:"string"` } // String returns the string representation @@ -18139,6 +18831,12 @@ func (s *UpdateConstraintInput) SetId(v string) *UpdateConstraintInput { return s } +// SetParameters sets the Parameters field's value. +func (s *UpdateConstraintInput) SetParameters(v string) *UpdateConstraintInput { + s.Parameters = &v + return s +} + type UpdateConstraintOutput struct { _ struct{} `type:"structure"` @@ -18531,7 +19229,7 @@ type UpdateProvisionedProductInput struct { // and ProvisionedProductId. ProvisionedProductId *string `min:"1" type:"string"` - // The updated name of the provisioned product. You cannot specify both ProvisionedProductName + // The name of the provisioned product. You cannot specify both ProvisionedProductName // and ProvisionedProductId. ProvisionedProductName *string `min:"1" type:"string"` @@ -18699,6 +19397,160 @@ func (s *UpdateProvisionedProductOutput) SetRecordDetail(v *RecordDetail) *Updat return s } +type UpdateProvisionedProductPropertiesInput struct { + _ struct{} `type:"structure"` + + // The language code. + // + // * en - English (default) + // + // * jp - Japanese + // + // * zh - Chinese + AcceptLanguage *string `type:"string"` + + // The idempotency token that uniquely identifies the provisioning product update + // request. + IdempotencyToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The identifier of the provisioned product. + // + // ProvisionedProductId is a required field + ProvisionedProductId *string `min:"1" type:"string" required:"true"` + + // A map that contains the provisioned product properties to be updated. + // + // The OWNER key only accepts user ARNs. The owner is the user that is allowed + // to see, update, terminate, and execute service actions in the provisioned + // product. + // + // The administrator can change the owner of a provisioned product to another + // IAM user within the same account. Both end user owners and administrators + // can see ownership history of the provisioned product using the ListRecordHistory + // API. The new owner can describe all past records for the provisioned product + // using the DescribeRecord API. The previous owner can no longer use DescribeRecord, + // but can still see the product's history from when he was an owner using ListRecordHistory. + // + // If a provisioned product ownership is assigned to an end user, they can see + // and perform any action through the API or Service Catalog console such as + // update, terminate, and execute service actions. If an end user provisions + // a product and the owner is updated to someone else, they will no longer be + // able to see or perform any actions through API or the Service Catalog console + // on that provisioned product. + // + // ProvisionedProductProperties is a required field + ProvisionedProductProperties map[string]*string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s UpdateProvisionedProductPropertiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateProvisionedProductPropertiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateProvisionedProductPropertiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateProvisionedProductPropertiesInput"} + if s.IdempotencyToken != nil && len(*s.IdempotencyToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdempotencyToken", 1)) + } + if s.ProvisionedProductId == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedProductId")) + } + if s.ProvisionedProductId != nil && len(*s.ProvisionedProductId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductId", 1)) + } + if s.ProvisionedProductProperties == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedProductProperties")) + } + if s.ProvisionedProductProperties != nil && len(s.ProvisionedProductProperties) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductProperties", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptLanguage sets the AcceptLanguage field's value. +func (s *UpdateProvisionedProductPropertiesInput) SetAcceptLanguage(v string) *UpdateProvisionedProductPropertiesInput { + s.AcceptLanguage = &v + return s +} + +// SetIdempotencyToken sets the IdempotencyToken field's value. +func (s *UpdateProvisionedProductPropertiesInput) SetIdempotencyToken(v string) *UpdateProvisionedProductPropertiesInput { + s.IdempotencyToken = &v + return s +} + +// SetProvisionedProductId sets the ProvisionedProductId field's value. +func (s *UpdateProvisionedProductPropertiesInput) SetProvisionedProductId(v string) *UpdateProvisionedProductPropertiesInput { + s.ProvisionedProductId = &v + return s +} + +// SetProvisionedProductProperties sets the ProvisionedProductProperties field's value. +func (s *UpdateProvisionedProductPropertiesInput) SetProvisionedProductProperties(v map[string]*string) *UpdateProvisionedProductPropertiesInput { + s.ProvisionedProductProperties = v + return s +} + +type UpdateProvisionedProductPropertiesOutput struct { + _ struct{} `type:"structure"` + + // The provisioned product identifier. + ProvisionedProductId *string `min:"1" type:"string"` + + // A map that contains the properties updated. + ProvisionedProductProperties map[string]*string `min:"1" type:"map"` + + // The identifier of the record. + RecordId *string `min:"1" type:"string"` + + // The status of the request. + Status *string `type:"string" enum:"RecordStatus"` +} + +// String returns the string representation +func (s UpdateProvisionedProductPropertiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateProvisionedProductPropertiesOutput) GoString() string { + return s.String() +} + +// SetProvisionedProductId sets the ProvisionedProductId field's value. +func (s *UpdateProvisionedProductPropertiesOutput) SetProvisionedProductId(v string) *UpdateProvisionedProductPropertiesOutput { + s.ProvisionedProductId = &v + return s +} + +// SetProvisionedProductProperties sets the ProvisionedProductProperties field's value. +func (s *UpdateProvisionedProductPropertiesOutput) SetProvisionedProductProperties(v map[string]*string) *UpdateProvisionedProductPropertiesOutput { + s.ProvisionedProductProperties = v + return s +} + +// SetRecordId sets the RecordId field's value. +func (s *UpdateProvisionedProductPropertiesOutput) SetRecordId(v string) *UpdateProvisionedProductPropertiesOutput { + s.RecordId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateProvisionedProductPropertiesOutput) SetStatus(v string) *UpdateProvisionedProductPropertiesOutput { + s.Status = &v + return s +} + type UpdateProvisioningArtifactInput struct { _ struct{} `type:"structure"` @@ -18717,6 +19569,17 @@ type UpdateProvisioningArtifactInput struct { // The updated description of the provisioning artifact. Description *string `type:"string"` + // Information set by the administrator to provide guidance to end users about + // which provisioning artifacts to use. + // + // The DEFAULT value indicates that the product version is active. + // + // The administrator can set the guidance to DEPRECATED to inform users that + // the product version is deprecated. Users are able to make updates to a provisioned + // product of a deprecated version but cannot launch new provisioned products + // using a deprecated version. + Guidance *string `type:"string" enum:"ProvisioningArtifactGuidance"` + // The updated name of the provisioning artifact. Name *string `type:"string"` @@ -18781,6 +19644,12 @@ func (s *UpdateProvisioningArtifactInput) SetDescription(v string) *UpdateProvis return s } +// SetGuidance sets the Guidance field's value. +func (s *UpdateProvisioningArtifactInput) SetGuidance(v string) *UpdateProvisioningArtifactInput { + s.Guidance = &v + return s +} + // SetName sets the Name field's value. func (s *UpdateProvisioningArtifactInput) SetName(v string) *UpdateProvisioningArtifactInput { s.Name = &v @@ -18977,15 +19846,21 @@ type UpdateProvisioningPreferences struct { // // Applicable only to a CFN_STACKSET provisioned product type. // - // CREATECreates a new stack instance in the stack set represented by the provisioned + // CREATE + // + // Creates a new stack instance in the stack set represented by the provisioned // product. In this case, only new stack instances are created based on accounts // and regions; if new ProductId or ProvisioningArtifactID are passed, they // will be ignored. // - // UPDATEUpdates the stack set represented by the provisioned product and also - // its stack instances. + // UPDATE + // + // Updates the stack set represented by the provisioned product and also its + // stack instances. + // + // DELETE // - // DELETEDeletes a stack instance in the stack set represented by the provisioned + // Deletes a stack instance in the stack set represented by the provisioned // product. StackSetOperationType *string `type:"string" enum:"StackSetOperationType"` @@ -19422,6 +20297,11 @@ const ( ProductViewSortByCreationDate = "CreationDate" ) +const ( + // PropertyKeyOwner is a PropertyKey enum value + PropertyKeyOwner = "OWNER" +) + const ( // ProvisionedProductPlanStatusCreateInProgress is a ProvisionedProductPlanStatus enum value ProvisionedProductPlanStatusCreateInProgress = "CREATE_IN_PROGRESS" @@ -19469,6 +20349,14 @@ const ( ProvisionedProductViewFilterBySearchQuery = "SearchQuery" ) +const ( + // ProvisioningArtifactGuidanceDefault is a ProvisioningArtifactGuidance enum value + ProvisioningArtifactGuidanceDefault = "DEFAULT" + + // ProvisioningArtifactGuidanceDeprecated is a ProvisioningArtifactGuidance enum value + ProvisioningArtifactGuidanceDeprecated = "DEPRECATED" +) + const ( // ProvisioningArtifactPropertyNameId is a ProvisioningArtifactPropertyName enum value ProvisioningArtifactPropertyNameId = "Id" @@ -19605,6 +20493,17 @@ const ( SortOrderDescending = "DESCENDING" ) +const ( + // StackInstanceStatusCurrent is a StackInstanceStatus enum value + StackInstanceStatusCurrent = "CURRENT" + + // StackInstanceStatusOutdated is a StackInstanceStatus enum value + StackInstanceStatusOutdated = "OUTDATED" + + // StackInstanceStatusInoperable is a StackInstanceStatus enum value + StackInstanceStatusInoperable = "INOPERABLE" +) + const ( // StackSetOperationTypeCreate is a StackSetOperationType enum value StackSetOperationTypeCreate = "CREATE" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go index f15a5b8024d..718d9a1486d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go @@ -46,11 +46,11 @@ const ( // svc := servicecatalog.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceCatalog { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServiceCatalog { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServiceCatalog { svc := &ServiceCatalog{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-12-10", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/api.go index bca3bcb654f..796c561766d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/api.go @@ -351,17 +351,7 @@ func (c *ServiceDiscovery) CreateServiceRequest(input *CreateServiceInput) (req // Creates a service, which defines the configuration for the following entities: // // * For public and private DNS namespaces, one of the following combinations -// of DNS records in Amazon Route 53: -// -// A -// -// AAAA -// -// A and AAAA -// -// SRV -// -// CNAME +// of DNS records in Amazon Route 53: A AAAA A and AAAA SRV CNAME // // * Optionally, a health check // @@ -983,7 +973,7 @@ func (c *ServiceDiscovery) GetInstancesHealthStatusWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a GetInstancesHealthStatus operation. // pageNum := 0 // err := client.GetInstancesHealthStatusPages(params, -// func(page *GetInstancesHealthStatusOutput, lastPage bool) bool { +// func(page *servicediscovery.GetInstancesHealthStatusOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1015,10 +1005,12 @@ func (c *ServiceDiscovery) GetInstancesHealthStatusPagesWithContext(ctx aws.Cont }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetInstancesHealthStatusOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetInstancesHealthStatusOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1379,7 +1371,7 @@ func (c *ServiceDiscovery) ListInstancesWithContext(ctx aws.Context, input *List // // Example iterating over at most 3 pages of a ListInstances operation. // pageNum := 0 // err := client.ListInstancesPages(params, -// func(page *ListInstancesOutput, lastPage bool) bool { +// func(page *servicediscovery.ListInstancesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1411,10 +1403,12 @@ func (c *ServiceDiscovery) ListInstancesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListInstancesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListInstancesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1517,7 +1511,7 @@ func (c *ServiceDiscovery) ListNamespacesWithContext(ctx aws.Context, input *Lis // // Example iterating over at most 3 pages of a ListNamespaces operation. // pageNum := 0 // err := client.ListNamespacesPages(params, -// func(page *ListNamespacesOutput, lastPage bool) bool { +// func(page *servicediscovery.ListNamespacesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1549,10 +1543,12 @@ func (c *ServiceDiscovery) ListNamespacesPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListNamespacesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListNamespacesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1654,7 +1650,7 @@ func (c *ServiceDiscovery) ListOperationsWithContext(ctx aws.Context, input *Lis // // Example iterating over at most 3 pages of a ListOperations operation. // pageNum := 0 // err := client.ListOperationsPages(params, -// func(page *ListOperationsOutput, lastPage bool) bool { +// func(page *servicediscovery.ListOperationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1686,10 +1682,12 @@ func (c *ServiceDiscovery) ListOperationsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListOperationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListOperationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1792,7 +1790,7 @@ func (c *ServiceDiscovery) ListServicesWithContext(ctx aws.Context, input *ListS // // Example iterating over at most 3 pages of a ListServices operation. // pageNum := 0 // err := client.ListServicesPages(params, -// func(page *ListServicesOutput, lastPage bool) bool { +// func(page *servicediscovery.ListServicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1824,10 +1822,12 @@ func (c *ServiceDiscovery) ListServicesPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListServicesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListServicesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3132,23 +3132,15 @@ type DnsRecord struct { // // * The values of priority and weight are both set to 1 and can't be changed. // - // // * The value of port comes from the value that you specify for the AWS_INSTANCE_PORT // attribute when you submit a RegisterInstance request. // // * The value of service-hostname is a concatenation of the following values: - // - // The value that you specify for InstanceId when you register an instance. - // - // The name of the service. - // - // The name of the namespace. - // - // For example, if the value of InstanceId is test, the name of the service - // is backend, and the name of the namespace is example.com, the value of - // service-hostname is: - // - // test.backend.example.com + // The value that you specify for InstanceId when you register an instance. + // The name of the service. The name of the namespace. For example, if the + // value of InstanceId is test, the name of the service is backend, and the + // name of the namespace is example.com, the value of service-hostname is: + // test.backend.example.com // // If you specify settings for an SRV record and if you specify values for AWS_INSTANCE_IPV4, // AWS_INSTANCE_IPV6, or both in the RegisterInstance request, AWS Cloud Map @@ -3664,14 +3656,11 @@ type HealthCheckConfig struct { // // * HTTPS: Route 53 tries to establish a TCP connection. If successful, // Route 53 submits an HTTPS request and waits for an HTTP status code of - // 200 or greater and less than 400. - // - // If you specify HTTPS for the value of Type, the endpoint must support TLS - // v1.0 or later. + // 200 or greater and less than 400. If you specify HTTPS for the value of + // Type, the endpoint must support TLS v1.0 or later. // - // * TCP: Route 53 tries to establish a TCP connection. - // - // If you specify TCP for Type, don't specify a value for ResourcePath. + // * TCP: Route 53 tries to establish a TCP connection. If you specify TCP + // for Type, don't specify a value for ResourcePath. // // For more information, see How Route 53 Determines Whether an Endpoint Is // Healthy (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) @@ -3919,7 +3908,6 @@ type Instance struct { // // * The attributes that apply to the records that are defined in the service. // - // // * For each attribute, the applicable value. // // Supported attribute keys include the following: @@ -3933,8 +3921,8 @@ type Instance struct { // // Note the following: // - // The configuration for the service that is specified by ServiceId must include - // settings for an A record, an AAAA record, or both. + // * The configuration for the service that is specified by ServiceId must + // include settings for an A record, an AAAA record, or both. // // * In the service that is specified by ServiceId, the value of RoutingPolicy // must be WEIGHTED. @@ -3954,30 +3942,37 @@ type Instance struct { // If the service configuration includes a CNAME record, the domain name that // you want Route 53 to return in response to DNS queries, for example, example.com. // - // This value is required if the service specified by ServiceIdincludes settings for an CNAME record. + // This value is required if the service specified by ServiceId includes settings + // for an CNAME record. // // AWS_INSTANCE_IPV4 // // If the service configuration includes an A record, the IPv4 address that // you want Route 53 to return in response to DNS queries, for example, 192.0.2.44. // - // This value is required if the service specified by ServiceIdincludes settings for an A record. If the service includes settings for an - // SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. + // This value is required if the service specified by ServiceId includes settings + // for an A record. If the service includes settings for an SRV record, you + // must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. // // AWS_INSTANCE_IPV6 // // If the service configuration includes an AAAA record, the IPv6 address that // you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345. // - // This value is required if the service specified by ServiceIdincludes settings for an AAAA record. If the service includes settings for - // an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. + // This value is required if the service specified by ServiceId includes settings + // for an AAAA record. If the service includes settings for an SRV record, you + // must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. // // AWS_INSTANCE_PORT // // If the service includes an SRV record, the value that you want Route 53 to // return for the port. // - // If the service includes HealthCheckConfig + // If the service includes HealthCheckConfig, the port on the endpoint that + // you want Route 53 to send requests to. + // + // This value is required if you specified settings for an SRV record when you + // created the service. Attributes map[string]*string `type:"map"` // A unique string that identifies the request and that allows failed RegisterInstance @@ -4001,10 +3996,9 @@ type Instance struct { // // * If you specify an existing InstanceId and ServiceId, AWS Cloud Map updates // the existing DNS records. If there's also an existing health check, AWS - // Cloud Map deletes the old health check and creates a new one. - // - // The health check isn't deleted immediately, so it will still appear for a - // while if you submit a ListHealthChecks request, for example. + // Cloud Map deletes the old health check and creates a new one. The health + // check isn't deleted immediately, so it will still appear for a while if + // you submit a ListHealthChecks request, for example. // // Id is a required field Id *string `type:"string" required:"true"` @@ -5129,7 +5123,6 @@ type RegisterInstanceInput struct { // // * The attributes that apply to the records that are defined in the service. // - // // * For each attribute, the applicable value. // // Supported attribute keys include the following: @@ -5139,12 +5132,13 @@ type RegisterInstanceInput struct { // If you want AWS Cloud Map to create an Amazon Route 53 alias record that // routes traffic to an Elastic Load Balancing load balancer, specify the DNS // name that is associated with the load balancer. For information about how - // to get the DNS name, see "DNSName" in the topic AliasTarget (http://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html)in the Route 53 API Reference. + // to get the DNS name, see "DNSName" in the topic AliasTarget (http://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html) + // in the Route 53 API Reference. // // Note the following: // - // The configuration for the service that is specified by ServiceId must include - // settings for an A record, an AAAA record, or both. + // * The configuration for the service that is specified by ServiceId must + // include settings for an A record, an AAAA record, or both. // // * In the service that is specified by ServiceId, the value of RoutingPolicy // must be WEIGHTED. @@ -5161,43 +5155,54 @@ type RegisterInstanceInput struct { // // AWS_INIT_HEALTH_STATUS // - // If the service configuration includes HealthCheckCustomConfig, you can optionally use AWS_INIT_HEALTH_STATUSto specify the initial status of the custom health check, HEALTHYor UNHEALTHY. If you don't specify a value for AWS_INIT_HEALTH_STATUS, the initial status is HEALTHY. + // If the service configuration includes HealthCheckCustomConfig, you can optionally + // use AWS_INIT_HEALTH_STATUS to specify the initial status of the custom health + // check, HEALTHY or UNHEALTHY. If you don't specify a value for AWS_INIT_HEALTH_STATUS, + // the initial status is HEALTHY. // // AWS_INSTANCE_CNAME // // If the service configuration includes a CNAME record, the domain name that // you want Route 53 to return in response to DNS queries, for example, example.com. // - // This value is required if the service specified by ServiceIdincludes settings for an CNAME record. + // This value is required if the service specified by ServiceId includes settings + // for an CNAME record. // // AWS_INSTANCE_IPV4 // // If the service configuration includes an A record, the IPv4 address that // you want Route 53 to return in response to DNS queries, for example, 192.0.2.44. // - // This value is required if the service specified by ServiceIdincludes settings for an A record. If the service includes settings for an - // SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. + // This value is required if the service specified by ServiceId includes settings + // for an A record. If the service includes settings for an SRV record, you + // must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. // // AWS_INSTANCE_IPV6 // // If the service configuration includes an AAAA record, the IPv6 address that // you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345. // - // This value is required if the service specified by ServiceIdincludes settings for an AAAA record. If the service includes settings for - // an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. + // This value is required if the service specified by ServiceId includes settings + // for an AAAA record. If the service includes settings for an SRV record, you + // must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. // // AWS_INSTANCE_PORT // // If the service includes an SRV record, the value that you want Route 53 to // return for the port. // - // If the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to. + // If the service includes HealthCheckConfig, the port on the endpoint that + // you want Route 53 to send requests to. // // This value is required if you specified settings for an SRV record when you // created the service. // // Custom attributes // + // You can add up to 30 custom attributes. For each key-value pair, the maximum + // length of the attribute name is 255 characters, and the maximum length of + // the attribute value is 1,024 characters. + // // Attributes is a required field Attributes map[string]*string `type:"map" required:"true"` @@ -5222,9 +5227,8 @@ type RegisterInstanceInput struct { // // * If you specify an existing InstanceId and ServiceId, AWS Cloud Map updates // the existing DNS records, if any. If there's also an existing health check, - // AWS Cloud Map deletes the old health check and creates a new one. - // - // The health check isn't deleted immediately, so it will still appear for a + // AWS Cloud Map deletes the old health check and creates a new one. The + // health check isn't deleted immediately, so it will still appear for a // while if you submit a ListHealthChecks request, for example. // // InstanceId is a required field diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go index 3463e12c241..77d77772c90 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go @@ -46,11 +46,11 @@ const ( // svc := servicediscovery.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceDiscovery { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServiceDiscovery { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServiceDiscovery { svc := &ServiceDiscovery{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-03-14", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/api.go new file mode 100644 index 00000000000..efc3cf18659 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/api.go @@ -0,0 +1,3991 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package servicequotas + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAssociateServiceQuotaTemplate = "AssociateServiceQuotaTemplate" + +// AssociateServiceQuotaTemplateRequest generates a "aws/request.Request" representing the +// client's request for the AssociateServiceQuotaTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateServiceQuotaTemplate for more information on using the AssociateServiceQuotaTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateServiceQuotaTemplateRequest method. +// req, resp := client.AssociateServiceQuotaTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/AssociateServiceQuotaTemplate +func (c *ServiceQuotas) AssociateServiceQuotaTemplateRequest(input *AssociateServiceQuotaTemplateInput) (req *request.Request, output *AssociateServiceQuotaTemplateOutput) { + op := &request.Operation{ + Name: opAssociateServiceQuotaTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateServiceQuotaTemplateInput{} + } + + output = &AssociateServiceQuotaTemplateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AssociateServiceQuotaTemplate API operation for Service Quotas. +// +// Associates the Service Quotas template with your organization so that when +// new accounts are created in your organization, the template submits increase +// requests for the specified service quotas. Use the Service Quotas template +// to request an increase for any adjustable quota value. After you define the +// Service Quotas template, use this operation to associate, or enable, the +// template. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation AssociateServiceQuotaTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDependencyAccessDeniedException "DependencyAccessDeniedException" +// You can't perform this action because a dependency does not have access. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// * ErrCodeAWSServiceAccessNotEnabledException "AWSServiceAccessNotEnabledException" +// The action you attempted is not allowed unless Service Access with Service +// Quotas is enabled in your organization. To enable, call AssociateServiceQuotaTemplate. +// +// * ErrCodeOrganizationNotInAllFeaturesModeException "OrganizationNotInAllFeaturesModeException" +// The organization that your account belongs to, is not in All Features mode. +// To enable all features mode, see EnableAllFeatures (https://docs.aws.amazon.com/organizations/latest/APIReference/API_EnableAllFeatures.html). +// +// * ErrCodeTemplatesNotAvailableInRegionException "TemplatesNotAvailableInRegionException" +// The Service Quotas template is not available in the Region where you are +// making the request. Please make the request in us-east-1. +// +// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" +// The account making this call is not a member of an organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/AssociateServiceQuotaTemplate +func (c *ServiceQuotas) AssociateServiceQuotaTemplate(input *AssociateServiceQuotaTemplateInput) (*AssociateServiceQuotaTemplateOutput, error) { + req, out := c.AssociateServiceQuotaTemplateRequest(input) + return out, req.Send() +} + +// AssociateServiceQuotaTemplateWithContext is the same as AssociateServiceQuotaTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateServiceQuotaTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) AssociateServiceQuotaTemplateWithContext(ctx aws.Context, input *AssociateServiceQuotaTemplateInput, opts ...request.Option) (*AssociateServiceQuotaTemplateOutput, error) { + req, out := c.AssociateServiceQuotaTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteServiceQuotaIncreaseRequestFromTemplate = "DeleteServiceQuotaIncreaseRequestFromTemplate" + +// DeleteServiceQuotaIncreaseRequestFromTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteServiceQuotaIncreaseRequestFromTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteServiceQuotaIncreaseRequestFromTemplate for more information on using the DeleteServiceQuotaIncreaseRequestFromTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteServiceQuotaIncreaseRequestFromTemplateRequest method. +// req, resp := client.DeleteServiceQuotaIncreaseRequestFromTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/DeleteServiceQuotaIncreaseRequestFromTemplate +func (c *ServiceQuotas) DeleteServiceQuotaIncreaseRequestFromTemplateRequest(input *DeleteServiceQuotaIncreaseRequestFromTemplateInput) (req *request.Request, output *DeleteServiceQuotaIncreaseRequestFromTemplateOutput) { + op := &request.Operation{ + Name: opDeleteServiceQuotaIncreaseRequestFromTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceQuotaIncreaseRequestFromTemplateInput{} + } + + output = &DeleteServiceQuotaIncreaseRequestFromTemplateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteServiceQuotaIncreaseRequestFromTemplate API operation for Service Quotas. +// +// Removes a service quota increase request from the Service Quotas template. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation DeleteServiceQuotaIncreaseRequestFromTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeDependencyAccessDeniedException "DependencyAccessDeniedException" +// You can't perform this action because a dependency does not have access. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeAWSServiceAccessNotEnabledException "AWSServiceAccessNotEnabledException" +// The action you attempted is not allowed unless Service Access with Service +// Quotas is enabled in your organization. To enable, call AssociateServiceQuotaTemplate. +// +// * ErrCodeTemplatesNotAvailableInRegionException "TemplatesNotAvailableInRegionException" +// The Service Quotas template is not available in the Region where you are +// making the request. Please make the request in us-east-1. +// +// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" +// The account making this call is not a member of an organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/DeleteServiceQuotaIncreaseRequestFromTemplate +func (c *ServiceQuotas) DeleteServiceQuotaIncreaseRequestFromTemplate(input *DeleteServiceQuotaIncreaseRequestFromTemplateInput) (*DeleteServiceQuotaIncreaseRequestFromTemplateOutput, error) { + req, out := c.DeleteServiceQuotaIncreaseRequestFromTemplateRequest(input) + return out, req.Send() +} + +// DeleteServiceQuotaIncreaseRequestFromTemplateWithContext is the same as DeleteServiceQuotaIncreaseRequestFromTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteServiceQuotaIncreaseRequestFromTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) DeleteServiceQuotaIncreaseRequestFromTemplateWithContext(ctx aws.Context, input *DeleteServiceQuotaIncreaseRequestFromTemplateInput, opts ...request.Option) (*DeleteServiceQuotaIncreaseRequestFromTemplateOutput, error) { + req, out := c.DeleteServiceQuotaIncreaseRequestFromTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateServiceQuotaTemplate = "DisassociateServiceQuotaTemplate" + +// DisassociateServiceQuotaTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateServiceQuotaTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateServiceQuotaTemplate for more information on using the DisassociateServiceQuotaTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateServiceQuotaTemplateRequest method. +// req, resp := client.DisassociateServiceQuotaTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/DisassociateServiceQuotaTemplate +func (c *ServiceQuotas) DisassociateServiceQuotaTemplateRequest(input *DisassociateServiceQuotaTemplateInput) (req *request.Request, output *DisassociateServiceQuotaTemplateOutput) { + op := &request.Operation{ + Name: opDisassociateServiceQuotaTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateServiceQuotaTemplateInput{} + } + + output = &DisassociateServiceQuotaTemplateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisassociateServiceQuotaTemplate API operation for Service Quotas. +// +// Disables the Service Quotas template. Once the template is disabled, it does +// not request quota increases for new accounts in your organization. Disabling +// the quota template does not apply the quota increase requests from the template. +// +// Related operations +// +// * To enable the quota template, call AssociateServiceQuotaTemplate. +// +// * To delete a specific service quota from the template, use DeleteServiceQuotaIncreaseRequestFromTemplate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation DisassociateServiceQuotaTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDependencyAccessDeniedException "DependencyAccessDeniedException" +// You can't perform this action because a dependency does not have access. +// +// * ErrCodeServiceQuotaTemplateNotInUseException "ServiceQuotaTemplateNotInUseException" +// The quota request template is not associated with your organization. +// +// To use the template, call AssociateServiceQuotaTemplate. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// * ErrCodeAWSServiceAccessNotEnabledException "AWSServiceAccessNotEnabledException" +// The action you attempted is not allowed unless Service Access with Service +// Quotas is enabled in your organization. To enable, call AssociateServiceQuotaTemplate. +// +// * ErrCodeTemplatesNotAvailableInRegionException "TemplatesNotAvailableInRegionException" +// The Service Quotas template is not available in the Region where you are +// making the request. Please make the request in us-east-1. +// +// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" +// The account making this call is not a member of an organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/DisassociateServiceQuotaTemplate +func (c *ServiceQuotas) DisassociateServiceQuotaTemplate(input *DisassociateServiceQuotaTemplateInput) (*DisassociateServiceQuotaTemplateOutput, error) { + req, out := c.DisassociateServiceQuotaTemplateRequest(input) + return out, req.Send() +} + +// DisassociateServiceQuotaTemplateWithContext is the same as DisassociateServiceQuotaTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateServiceQuotaTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) DisassociateServiceQuotaTemplateWithContext(ctx aws.Context, input *DisassociateServiceQuotaTemplateInput, opts ...request.Option) (*DisassociateServiceQuotaTemplateOutput, error) { + req, out := c.DisassociateServiceQuotaTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAWSDefaultServiceQuota = "GetAWSDefaultServiceQuota" + +// GetAWSDefaultServiceQuotaRequest generates a "aws/request.Request" representing the +// client's request for the GetAWSDefaultServiceQuota operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAWSDefaultServiceQuota for more information on using the GetAWSDefaultServiceQuota +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAWSDefaultServiceQuotaRequest method. +// req, resp := client.GetAWSDefaultServiceQuotaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetAWSDefaultServiceQuota +func (c *ServiceQuotas) GetAWSDefaultServiceQuotaRequest(input *GetAWSDefaultServiceQuotaInput) (req *request.Request, output *GetAWSDefaultServiceQuotaOutput) { + op := &request.Operation{ + Name: opGetAWSDefaultServiceQuota, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAWSDefaultServiceQuotaInput{} + } + + output = &GetAWSDefaultServiceQuotaOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAWSDefaultServiceQuota API operation for Service Quotas. +// +// Retrieves the default service quotas values. The Value returned for each +// quota is the AWS default value, even if the quotas have been increased.. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation GetAWSDefaultServiceQuota for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetAWSDefaultServiceQuota +func (c *ServiceQuotas) GetAWSDefaultServiceQuota(input *GetAWSDefaultServiceQuotaInput) (*GetAWSDefaultServiceQuotaOutput, error) { + req, out := c.GetAWSDefaultServiceQuotaRequest(input) + return out, req.Send() +} + +// GetAWSDefaultServiceQuotaWithContext is the same as GetAWSDefaultServiceQuota with the addition of +// the ability to pass a context and additional request options. +// +// See GetAWSDefaultServiceQuota for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) GetAWSDefaultServiceQuotaWithContext(ctx aws.Context, input *GetAWSDefaultServiceQuotaInput, opts ...request.Option) (*GetAWSDefaultServiceQuotaOutput, error) { + req, out := c.GetAWSDefaultServiceQuotaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAssociationForServiceQuotaTemplate = "GetAssociationForServiceQuotaTemplate" + +// GetAssociationForServiceQuotaTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetAssociationForServiceQuotaTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAssociationForServiceQuotaTemplate for more information on using the GetAssociationForServiceQuotaTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAssociationForServiceQuotaTemplateRequest method. +// req, resp := client.GetAssociationForServiceQuotaTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetAssociationForServiceQuotaTemplate +func (c *ServiceQuotas) GetAssociationForServiceQuotaTemplateRequest(input *GetAssociationForServiceQuotaTemplateInput) (req *request.Request, output *GetAssociationForServiceQuotaTemplateOutput) { + op := &request.Operation{ + Name: opGetAssociationForServiceQuotaTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAssociationForServiceQuotaTemplateInput{} + } + + output = &GetAssociationForServiceQuotaTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAssociationForServiceQuotaTemplate API operation for Service Quotas. +// +// Retrieves the ServiceQuotaTemplateAssociationStatus value from the service. +// Use this action to determine if the Service Quota template is associated, +// or enabled. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation GetAssociationForServiceQuotaTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDependencyAccessDeniedException "DependencyAccessDeniedException" +// You can't perform this action because a dependency does not have access. +// +// * ErrCodeServiceQuotaTemplateNotInUseException "ServiceQuotaTemplateNotInUseException" +// The quota request template is not associated with your organization. +// +// To use the template, call AssociateServiceQuotaTemplate. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// * ErrCodeAWSServiceAccessNotEnabledException "AWSServiceAccessNotEnabledException" +// The action you attempted is not allowed unless Service Access with Service +// Quotas is enabled in your organization. To enable, call AssociateServiceQuotaTemplate. +// +// * ErrCodeTemplatesNotAvailableInRegionException "TemplatesNotAvailableInRegionException" +// The Service Quotas template is not available in the Region where you are +// making the request. Please make the request in us-east-1. +// +// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" +// The account making this call is not a member of an organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetAssociationForServiceQuotaTemplate +func (c *ServiceQuotas) GetAssociationForServiceQuotaTemplate(input *GetAssociationForServiceQuotaTemplateInput) (*GetAssociationForServiceQuotaTemplateOutput, error) { + req, out := c.GetAssociationForServiceQuotaTemplateRequest(input) + return out, req.Send() +} + +// GetAssociationForServiceQuotaTemplateWithContext is the same as GetAssociationForServiceQuotaTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See GetAssociationForServiceQuotaTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) GetAssociationForServiceQuotaTemplateWithContext(ctx aws.Context, input *GetAssociationForServiceQuotaTemplateInput, opts ...request.Option) (*GetAssociationForServiceQuotaTemplateOutput, error) { + req, out := c.GetAssociationForServiceQuotaTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetRequestedServiceQuotaChange = "GetRequestedServiceQuotaChange" + +// GetRequestedServiceQuotaChangeRequest generates a "aws/request.Request" representing the +// client's request for the GetRequestedServiceQuotaChange operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRequestedServiceQuotaChange for more information on using the GetRequestedServiceQuotaChange +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRequestedServiceQuotaChangeRequest method. +// req, resp := client.GetRequestedServiceQuotaChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetRequestedServiceQuotaChange +func (c *ServiceQuotas) GetRequestedServiceQuotaChangeRequest(input *GetRequestedServiceQuotaChangeInput) (req *request.Request, output *GetRequestedServiceQuotaChangeOutput) { + op := &request.Operation{ + Name: opGetRequestedServiceQuotaChange, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRequestedServiceQuotaChangeInput{} + } + + output = &GetRequestedServiceQuotaChangeOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRequestedServiceQuotaChange API operation for Service Quotas. +// +// Retrieves the details for a particular increase request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation GetRequestedServiceQuotaChange for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetRequestedServiceQuotaChange +func (c *ServiceQuotas) GetRequestedServiceQuotaChange(input *GetRequestedServiceQuotaChangeInput) (*GetRequestedServiceQuotaChangeOutput, error) { + req, out := c.GetRequestedServiceQuotaChangeRequest(input) + return out, req.Send() +} + +// GetRequestedServiceQuotaChangeWithContext is the same as GetRequestedServiceQuotaChange with the addition of +// the ability to pass a context and additional request options. +// +// See GetRequestedServiceQuotaChange for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) GetRequestedServiceQuotaChangeWithContext(ctx aws.Context, input *GetRequestedServiceQuotaChangeInput, opts ...request.Option) (*GetRequestedServiceQuotaChangeOutput, error) { + req, out := c.GetRequestedServiceQuotaChangeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetServiceQuota = "GetServiceQuota" + +// GetServiceQuotaRequest generates a "aws/request.Request" representing the +// client's request for the GetServiceQuota operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetServiceQuota for more information on using the GetServiceQuota +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetServiceQuotaRequest method. +// req, resp := client.GetServiceQuotaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetServiceQuota +func (c *ServiceQuotas) GetServiceQuotaRequest(input *GetServiceQuotaInput) (req *request.Request, output *GetServiceQuotaOutput) { + op := &request.Operation{ + Name: opGetServiceQuota, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceQuotaInput{} + } + + output = &GetServiceQuotaOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetServiceQuota API operation for Service Quotas. +// +// Returns the details for the specified service quota. This operation provides +// a different Value than the GetAWSDefaultServiceQuota operation. This operation +// returns the applied value for each quota. GetAWSDefaultServiceQuota returns +// the default AWS value for each quota. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation GetServiceQuota for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetServiceQuota +func (c *ServiceQuotas) GetServiceQuota(input *GetServiceQuotaInput) (*GetServiceQuotaOutput, error) { + req, out := c.GetServiceQuotaRequest(input) + return out, req.Send() +} + +// GetServiceQuotaWithContext is the same as GetServiceQuota with the addition of +// the ability to pass a context and additional request options. +// +// See GetServiceQuota for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) GetServiceQuotaWithContext(ctx aws.Context, input *GetServiceQuotaInput, opts ...request.Option) (*GetServiceQuotaOutput, error) { + req, out := c.GetServiceQuotaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetServiceQuotaIncreaseRequestFromTemplate = "GetServiceQuotaIncreaseRequestFromTemplate" + +// GetServiceQuotaIncreaseRequestFromTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetServiceQuotaIncreaseRequestFromTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetServiceQuotaIncreaseRequestFromTemplate for more information on using the GetServiceQuotaIncreaseRequestFromTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetServiceQuotaIncreaseRequestFromTemplateRequest method. +// req, resp := client.GetServiceQuotaIncreaseRequestFromTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetServiceQuotaIncreaseRequestFromTemplate +func (c *ServiceQuotas) GetServiceQuotaIncreaseRequestFromTemplateRequest(input *GetServiceQuotaIncreaseRequestFromTemplateInput) (req *request.Request, output *GetServiceQuotaIncreaseRequestFromTemplateOutput) { + op := &request.Operation{ + Name: opGetServiceQuotaIncreaseRequestFromTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceQuotaIncreaseRequestFromTemplateInput{} + } + + output = &GetServiceQuotaIncreaseRequestFromTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetServiceQuotaIncreaseRequestFromTemplate API operation for Service Quotas. +// +// Returns the details of the service quota increase request in your template. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation GetServiceQuotaIncreaseRequestFromTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeDependencyAccessDeniedException "DependencyAccessDeniedException" +// You can't perform this action because a dependency does not have access. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeAWSServiceAccessNotEnabledException "AWSServiceAccessNotEnabledException" +// The action you attempted is not allowed unless Service Access with Service +// Quotas is enabled in your organization. To enable, call AssociateServiceQuotaTemplate. +// +// * ErrCodeTemplatesNotAvailableInRegionException "TemplatesNotAvailableInRegionException" +// The Service Quotas template is not available in the Region where you are +// making the request. Please make the request in us-east-1. +// +// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" +// The account making this call is not a member of an organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/GetServiceQuotaIncreaseRequestFromTemplate +func (c *ServiceQuotas) GetServiceQuotaIncreaseRequestFromTemplate(input *GetServiceQuotaIncreaseRequestFromTemplateInput) (*GetServiceQuotaIncreaseRequestFromTemplateOutput, error) { + req, out := c.GetServiceQuotaIncreaseRequestFromTemplateRequest(input) + return out, req.Send() +} + +// GetServiceQuotaIncreaseRequestFromTemplateWithContext is the same as GetServiceQuotaIncreaseRequestFromTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See GetServiceQuotaIncreaseRequestFromTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) GetServiceQuotaIncreaseRequestFromTemplateWithContext(ctx aws.Context, input *GetServiceQuotaIncreaseRequestFromTemplateInput, opts ...request.Option) (*GetServiceQuotaIncreaseRequestFromTemplateOutput, error) { + req, out := c.GetServiceQuotaIncreaseRequestFromTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAWSDefaultServiceQuotas = "ListAWSDefaultServiceQuotas" + +// ListAWSDefaultServiceQuotasRequest generates a "aws/request.Request" representing the +// client's request for the ListAWSDefaultServiceQuotas operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAWSDefaultServiceQuotas for more information on using the ListAWSDefaultServiceQuotas +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAWSDefaultServiceQuotasRequest method. +// req, resp := client.ListAWSDefaultServiceQuotasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListAWSDefaultServiceQuotas +func (c *ServiceQuotas) ListAWSDefaultServiceQuotasRequest(input *ListAWSDefaultServiceQuotasInput) (req *request.Request, output *ListAWSDefaultServiceQuotasOutput) { + op := &request.Operation{ + Name: opListAWSDefaultServiceQuotas, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAWSDefaultServiceQuotasInput{} + } + + output = &ListAWSDefaultServiceQuotasOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAWSDefaultServiceQuotas API operation for Service Quotas. +// +// Lists all default service quotas for the specified AWS service or all AWS +// services. ListAWSDefaultServiceQuotas is similar to ListServiceQuotas except +// for the Value object. The Value object returned by ListAWSDefaultServiceQuotas +// is the default value assigned by AWS. This request returns a list of all +// service quotas for the specified service. The listing of each you'll see +// the default values are the values that AWS provides for the quotas. +// +// Always check the NextToken response parameter when calling any of the List* +// operations. These operations can return an unexpected list of results, even +// when there are more results available. When this happens, the NextToken response +// parameter contains a value to pass the next call to the same API to request +// the next part of the list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation ListAWSDefaultServiceQuotas for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException" +// Invalid input was provided. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListAWSDefaultServiceQuotas +func (c *ServiceQuotas) ListAWSDefaultServiceQuotas(input *ListAWSDefaultServiceQuotasInput) (*ListAWSDefaultServiceQuotasOutput, error) { + req, out := c.ListAWSDefaultServiceQuotasRequest(input) + return out, req.Send() +} + +// ListAWSDefaultServiceQuotasWithContext is the same as ListAWSDefaultServiceQuotas with the addition of +// the ability to pass a context and additional request options. +// +// See ListAWSDefaultServiceQuotas for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListAWSDefaultServiceQuotasWithContext(ctx aws.Context, input *ListAWSDefaultServiceQuotasInput, opts ...request.Option) (*ListAWSDefaultServiceQuotasOutput, error) { + req, out := c.ListAWSDefaultServiceQuotasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAWSDefaultServiceQuotasPages iterates over the pages of a ListAWSDefaultServiceQuotas operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAWSDefaultServiceQuotas method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAWSDefaultServiceQuotas operation. +// pageNum := 0 +// err := client.ListAWSDefaultServiceQuotasPages(params, +// func(page *servicequotas.ListAWSDefaultServiceQuotasOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ServiceQuotas) ListAWSDefaultServiceQuotasPages(input *ListAWSDefaultServiceQuotasInput, fn func(*ListAWSDefaultServiceQuotasOutput, bool) bool) error { + return c.ListAWSDefaultServiceQuotasPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAWSDefaultServiceQuotasPagesWithContext same as ListAWSDefaultServiceQuotasPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListAWSDefaultServiceQuotasPagesWithContext(ctx aws.Context, input *ListAWSDefaultServiceQuotasInput, fn func(*ListAWSDefaultServiceQuotasOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAWSDefaultServiceQuotasInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAWSDefaultServiceQuotasRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAWSDefaultServiceQuotasOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListRequestedServiceQuotaChangeHistory = "ListRequestedServiceQuotaChangeHistory" + +// ListRequestedServiceQuotaChangeHistoryRequest generates a "aws/request.Request" representing the +// client's request for the ListRequestedServiceQuotaChangeHistory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRequestedServiceQuotaChangeHistory for more information on using the ListRequestedServiceQuotaChangeHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListRequestedServiceQuotaChangeHistoryRequest method. +// req, resp := client.ListRequestedServiceQuotaChangeHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListRequestedServiceQuotaChangeHistory +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistoryRequest(input *ListRequestedServiceQuotaChangeHistoryInput) (req *request.Request, output *ListRequestedServiceQuotaChangeHistoryOutput) { + op := &request.Operation{ + Name: opListRequestedServiceQuotaChangeHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRequestedServiceQuotaChangeHistoryInput{} + } + + output = &ListRequestedServiceQuotaChangeHistoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRequestedServiceQuotaChangeHistory API operation for Service Quotas. +// +// Requests a list of the changes to quotas for a service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation ListRequestedServiceQuotaChangeHistory for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException" +// Invalid input was provided. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListRequestedServiceQuotaChangeHistory +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistory(input *ListRequestedServiceQuotaChangeHistoryInput) (*ListRequestedServiceQuotaChangeHistoryOutput, error) { + req, out := c.ListRequestedServiceQuotaChangeHistoryRequest(input) + return out, req.Send() +} + +// ListRequestedServiceQuotaChangeHistoryWithContext is the same as ListRequestedServiceQuotaChangeHistory with the addition of +// the ability to pass a context and additional request options. +// +// See ListRequestedServiceQuotaChangeHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistoryWithContext(ctx aws.Context, input *ListRequestedServiceQuotaChangeHistoryInput, opts ...request.Option) (*ListRequestedServiceQuotaChangeHistoryOutput, error) { + req, out := c.ListRequestedServiceQuotaChangeHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListRequestedServiceQuotaChangeHistoryPages iterates over the pages of a ListRequestedServiceQuotaChangeHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRequestedServiceQuotaChangeHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRequestedServiceQuotaChangeHistory operation. +// pageNum := 0 +// err := client.ListRequestedServiceQuotaChangeHistoryPages(params, +// func(page *servicequotas.ListRequestedServiceQuotaChangeHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistoryPages(input *ListRequestedServiceQuotaChangeHistoryInput, fn func(*ListRequestedServiceQuotaChangeHistoryOutput, bool) bool) error { + return c.ListRequestedServiceQuotaChangeHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListRequestedServiceQuotaChangeHistoryPagesWithContext same as ListRequestedServiceQuotaChangeHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistoryPagesWithContext(ctx aws.Context, input *ListRequestedServiceQuotaChangeHistoryInput, fn func(*ListRequestedServiceQuotaChangeHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListRequestedServiceQuotaChangeHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListRequestedServiceQuotaChangeHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListRequestedServiceQuotaChangeHistoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListRequestedServiceQuotaChangeHistoryByQuota = "ListRequestedServiceQuotaChangeHistoryByQuota" + +// ListRequestedServiceQuotaChangeHistoryByQuotaRequest generates a "aws/request.Request" representing the +// client's request for the ListRequestedServiceQuotaChangeHistoryByQuota operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRequestedServiceQuotaChangeHistoryByQuota for more information on using the ListRequestedServiceQuotaChangeHistoryByQuota +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListRequestedServiceQuotaChangeHistoryByQuotaRequest method. +// req, resp := client.ListRequestedServiceQuotaChangeHistoryByQuotaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListRequestedServiceQuotaChangeHistoryByQuota +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistoryByQuotaRequest(input *ListRequestedServiceQuotaChangeHistoryByQuotaInput) (req *request.Request, output *ListRequestedServiceQuotaChangeHistoryByQuotaOutput) { + op := &request.Operation{ + Name: opListRequestedServiceQuotaChangeHistoryByQuota, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRequestedServiceQuotaChangeHistoryByQuotaInput{} + } + + output = &ListRequestedServiceQuotaChangeHistoryByQuotaOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRequestedServiceQuotaChangeHistoryByQuota API operation for Service Quotas. +// +// Requests a list of the changes to specific service quotas. This command provides +// additional granularity over the ListRequestedServiceQuotaChangeHistory command. +// Once a quota change request has reached CASE_CLOSED, APPROVED, or DENIED, +// the history has been kept for 90 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation ListRequestedServiceQuotaChangeHistoryByQuota for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException" +// Invalid input was provided. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListRequestedServiceQuotaChangeHistoryByQuota +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistoryByQuota(input *ListRequestedServiceQuotaChangeHistoryByQuotaInput) (*ListRequestedServiceQuotaChangeHistoryByQuotaOutput, error) { + req, out := c.ListRequestedServiceQuotaChangeHistoryByQuotaRequest(input) + return out, req.Send() +} + +// ListRequestedServiceQuotaChangeHistoryByQuotaWithContext is the same as ListRequestedServiceQuotaChangeHistoryByQuota with the addition of +// the ability to pass a context and additional request options. +// +// See ListRequestedServiceQuotaChangeHistoryByQuota for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistoryByQuotaWithContext(ctx aws.Context, input *ListRequestedServiceQuotaChangeHistoryByQuotaInput, opts ...request.Option) (*ListRequestedServiceQuotaChangeHistoryByQuotaOutput, error) { + req, out := c.ListRequestedServiceQuotaChangeHistoryByQuotaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListRequestedServiceQuotaChangeHistoryByQuotaPages iterates over the pages of a ListRequestedServiceQuotaChangeHistoryByQuota operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRequestedServiceQuotaChangeHistoryByQuota method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRequestedServiceQuotaChangeHistoryByQuota operation. +// pageNum := 0 +// err := client.ListRequestedServiceQuotaChangeHistoryByQuotaPages(params, +// func(page *servicequotas.ListRequestedServiceQuotaChangeHistoryByQuotaOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistoryByQuotaPages(input *ListRequestedServiceQuotaChangeHistoryByQuotaInput, fn func(*ListRequestedServiceQuotaChangeHistoryByQuotaOutput, bool) bool) error { + return c.ListRequestedServiceQuotaChangeHistoryByQuotaPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListRequestedServiceQuotaChangeHistoryByQuotaPagesWithContext same as ListRequestedServiceQuotaChangeHistoryByQuotaPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListRequestedServiceQuotaChangeHistoryByQuotaPagesWithContext(ctx aws.Context, input *ListRequestedServiceQuotaChangeHistoryByQuotaInput, fn func(*ListRequestedServiceQuotaChangeHistoryByQuotaOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListRequestedServiceQuotaChangeHistoryByQuotaInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListRequestedServiceQuotaChangeHistoryByQuotaRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListRequestedServiceQuotaChangeHistoryByQuotaOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListServiceQuotaIncreaseRequestsInTemplate = "ListServiceQuotaIncreaseRequestsInTemplate" + +// ListServiceQuotaIncreaseRequestsInTemplateRequest generates a "aws/request.Request" representing the +// client's request for the ListServiceQuotaIncreaseRequestsInTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListServiceQuotaIncreaseRequestsInTemplate for more information on using the ListServiceQuotaIncreaseRequestsInTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListServiceQuotaIncreaseRequestsInTemplateRequest method. +// req, resp := client.ListServiceQuotaIncreaseRequestsInTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListServiceQuotaIncreaseRequestsInTemplate +func (c *ServiceQuotas) ListServiceQuotaIncreaseRequestsInTemplateRequest(input *ListServiceQuotaIncreaseRequestsInTemplateInput) (req *request.Request, output *ListServiceQuotaIncreaseRequestsInTemplateOutput) { + op := &request.Operation{ + Name: opListServiceQuotaIncreaseRequestsInTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListServiceQuotaIncreaseRequestsInTemplateInput{} + } + + output = &ListServiceQuotaIncreaseRequestsInTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListServiceQuotaIncreaseRequestsInTemplate API operation for Service Quotas. +// +// Returns a list of the quota increase requests in the template. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation ListServiceQuotaIncreaseRequestsInTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeDependencyAccessDeniedException "DependencyAccessDeniedException" +// You can't perform this action because a dependency does not have access. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeAWSServiceAccessNotEnabledException "AWSServiceAccessNotEnabledException" +// The action you attempted is not allowed unless Service Access with Service +// Quotas is enabled in your organization. To enable, call AssociateServiceQuotaTemplate. +// +// * ErrCodeTemplatesNotAvailableInRegionException "TemplatesNotAvailableInRegionException" +// The Service Quotas template is not available in the Region where you are +// making the request. Please make the request in us-east-1. +// +// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" +// The account making this call is not a member of an organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListServiceQuotaIncreaseRequestsInTemplate +func (c *ServiceQuotas) ListServiceQuotaIncreaseRequestsInTemplate(input *ListServiceQuotaIncreaseRequestsInTemplateInput) (*ListServiceQuotaIncreaseRequestsInTemplateOutput, error) { + req, out := c.ListServiceQuotaIncreaseRequestsInTemplateRequest(input) + return out, req.Send() +} + +// ListServiceQuotaIncreaseRequestsInTemplateWithContext is the same as ListServiceQuotaIncreaseRequestsInTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See ListServiceQuotaIncreaseRequestsInTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListServiceQuotaIncreaseRequestsInTemplateWithContext(ctx aws.Context, input *ListServiceQuotaIncreaseRequestsInTemplateInput, opts ...request.Option) (*ListServiceQuotaIncreaseRequestsInTemplateOutput, error) { + req, out := c.ListServiceQuotaIncreaseRequestsInTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListServiceQuotaIncreaseRequestsInTemplatePages iterates over the pages of a ListServiceQuotaIncreaseRequestsInTemplate operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListServiceQuotaIncreaseRequestsInTemplate method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListServiceQuotaIncreaseRequestsInTemplate operation. +// pageNum := 0 +// err := client.ListServiceQuotaIncreaseRequestsInTemplatePages(params, +// func(page *servicequotas.ListServiceQuotaIncreaseRequestsInTemplateOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ServiceQuotas) ListServiceQuotaIncreaseRequestsInTemplatePages(input *ListServiceQuotaIncreaseRequestsInTemplateInput, fn func(*ListServiceQuotaIncreaseRequestsInTemplateOutput, bool) bool) error { + return c.ListServiceQuotaIncreaseRequestsInTemplatePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListServiceQuotaIncreaseRequestsInTemplatePagesWithContext same as ListServiceQuotaIncreaseRequestsInTemplatePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListServiceQuotaIncreaseRequestsInTemplatePagesWithContext(ctx aws.Context, input *ListServiceQuotaIncreaseRequestsInTemplateInput, fn func(*ListServiceQuotaIncreaseRequestsInTemplateOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListServiceQuotaIncreaseRequestsInTemplateInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListServiceQuotaIncreaseRequestsInTemplateRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListServiceQuotaIncreaseRequestsInTemplateOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListServiceQuotas = "ListServiceQuotas" + +// ListServiceQuotasRequest generates a "aws/request.Request" representing the +// client's request for the ListServiceQuotas operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListServiceQuotas for more information on using the ListServiceQuotas +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListServiceQuotasRequest method. +// req, resp := client.ListServiceQuotasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListServiceQuotas +func (c *ServiceQuotas) ListServiceQuotasRequest(input *ListServiceQuotasInput) (req *request.Request, output *ListServiceQuotasOutput) { + op := &request.Operation{ + Name: opListServiceQuotas, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListServiceQuotasInput{} + } + + output = &ListServiceQuotasOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListServiceQuotas API operation for Service Quotas. +// +// Lists all service quotas for the specified AWS service. This request returns +// a list of the service quotas for the specified service. you'll see the default +// values are the values that AWS provides for the quotas. +// +// Always check the NextToken response parameter when calling any of the List* +// operations. These operations can return an unexpected list of results, even +// when there are more results available. When this happens, the NextToken response +// parameter contains a value to pass the next call to the same API to request +// the next part of the list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation ListServiceQuotas for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException" +// Invalid input was provided. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListServiceQuotas +func (c *ServiceQuotas) ListServiceQuotas(input *ListServiceQuotasInput) (*ListServiceQuotasOutput, error) { + req, out := c.ListServiceQuotasRequest(input) + return out, req.Send() +} + +// ListServiceQuotasWithContext is the same as ListServiceQuotas with the addition of +// the ability to pass a context and additional request options. +// +// See ListServiceQuotas for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListServiceQuotasWithContext(ctx aws.Context, input *ListServiceQuotasInput, opts ...request.Option) (*ListServiceQuotasOutput, error) { + req, out := c.ListServiceQuotasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListServiceQuotasPages iterates over the pages of a ListServiceQuotas operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListServiceQuotas method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListServiceQuotas operation. +// pageNum := 0 +// err := client.ListServiceQuotasPages(params, +// func(page *servicequotas.ListServiceQuotasOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ServiceQuotas) ListServiceQuotasPages(input *ListServiceQuotasInput, fn func(*ListServiceQuotasOutput, bool) bool) error { + return c.ListServiceQuotasPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListServiceQuotasPagesWithContext same as ListServiceQuotasPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListServiceQuotasPagesWithContext(ctx aws.Context, input *ListServiceQuotasInput, fn func(*ListServiceQuotasOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListServiceQuotasInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListServiceQuotasRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListServiceQuotasOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListServices = "ListServices" + +// ListServicesRequest generates a "aws/request.Request" representing the +// client's request for the ListServices operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListServices for more information on using the ListServices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListServicesRequest method. +// req, resp := client.ListServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListServices +func (c *ServiceQuotas) ListServicesRequest(input *ListServicesInput) (req *request.Request, output *ListServicesOutput) { + op := &request.Operation{ + Name: opListServices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListServicesInput{} + } + + output = &ListServicesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListServices API operation for Service Quotas. +// +// Lists the AWS services available in Service Quotas. Not all AWS services +// are available in Service Quotas. To list the see the list of the service +// quotas for a specific service, use ListServiceQuotas. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation ListServices for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException" +// Invalid input was provided. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/ListServices +func (c *ServiceQuotas) ListServices(input *ListServicesInput) (*ListServicesOutput, error) { + req, out := c.ListServicesRequest(input) + return out, req.Send() +} + +// ListServicesWithContext is the same as ListServices with the addition of +// the ability to pass a context and additional request options. +// +// See ListServices for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListServicesWithContext(ctx aws.Context, input *ListServicesInput, opts ...request.Option) (*ListServicesOutput, error) { + req, out := c.ListServicesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListServicesPages iterates over the pages of a ListServices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListServices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListServices operation. +// pageNum := 0 +// err := client.ListServicesPages(params, +// func(page *servicequotas.ListServicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ServiceQuotas) ListServicesPages(input *ListServicesInput, fn func(*ListServicesOutput, bool) bool) error { + return c.ListServicesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListServicesPagesWithContext same as ListServicesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) ListServicesPagesWithContext(ctx aws.Context, input *ListServicesInput, fn func(*ListServicesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListServicesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListServicesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListServicesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutServiceQuotaIncreaseRequestIntoTemplate = "PutServiceQuotaIncreaseRequestIntoTemplate" + +// PutServiceQuotaIncreaseRequestIntoTemplateRequest generates a "aws/request.Request" representing the +// client's request for the PutServiceQuotaIncreaseRequestIntoTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutServiceQuotaIncreaseRequestIntoTemplate for more information on using the PutServiceQuotaIncreaseRequestIntoTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutServiceQuotaIncreaseRequestIntoTemplateRequest method. +// req, resp := client.PutServiceQuotaIncreaseRequestIntoTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/PutServiceQuotaIncreaseRequestIntoTemplate +func (c *ServiceQuotas) PutServiceQuotaIncreaseRequestIntoTemplateRequest(input *PutServiceQuotaIncreaseRequestIntoTemplateInput) (req *request.Request, output *PutServiceQuotaIncreaseRequestIntoTemplateOutput) { + op := &request.Operation{ + Name: opPutServiceQuotaIncreaseRequestIntoTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutServiceQuotaIncreaseRequestIntoTemplateInput{} + } + + output = &PutServiceQuotaIncreaseRequestIntoTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutServiceQuotaIncreaseRequestIntoTemplate API operation for Service Quotas. +// +// Defines and adds a quota to the service quota template. To add a quota to +// the template, you must provide the ServiceCode, QuotaCode, AwsRegion, and +// DesiredValue. Once you add a quota to the template, use ListServiceQuotaIncreaseRequestsInTemplate +// to see the list of quotas in the template. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation PutServiceQuotaIncreaseRequestIntoTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeDependencyAccessDeniedException "DependencyAccessDeniedException" +// You can't perform this action because a dependency does not have access. +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeQuotaExceededException "QuotaExceededException" +// You have exceeded your service quota. To perform the requested action, remove +// some of the relevant resources, or use Service Quotas to request a service +// quota increase. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeAWSServiceAccessNotEnabledException "AWSServiceAccessNotEnabledException" +// The action you attempted is not allowed unless Service Access with Service +// Quotas is enabled in your organization. To enable, call AssociateServiceQuotaTemplate. +// +// * ErrCodeTemplatesNotAvailableInRegionException "TemplatesNotAvailableInRegionException" +// The Service Quotas template is not available in the Region where you are +// making the request. Please make the request in us-east-1. +// +// * ErrCodeNoAvailableOrganizationException "NoAvailableOrganizationException" +// The account making this call is not a member of an organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/PutServiceQuotaIncreaseRequestIntoTemplate +func (c *ServiceQuotas) PutServiceQuotaIncreaseRequestIntoTemplate(input *PutServiceQuotaIncreaseRequestIntoTemplateInput) (*PutServiceQuotaIncreaseRequestIntoTemplateOutput, error) { + req, out := c.PutServiceQuotaIncreaseRequestIntoTemplateRequest(input) + return out, req.Send() +} + +// PutServiceQuotaIncreaseRequestIntoTemplateWithContext is the same as PutServiceQuotaIncreaseRequestIntoTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See PutServiceQuotaIncreaseRequestIntoTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) PutServiceQuotaIncreaseRequestIntoTemplateWithContext(ctx aws.Context, input *PutServiceQuotaIncreaseRequestIntoTemplateInput, opts ...request.Option) (*PutServiceQuotaIncreaseRequestIntoTemplateOutput, error) { + req, out := c.PutServiceQuotaIncreaseRequestIntoTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRequestServiceQuotaIncrease = "RequestServiceQuotaIncrease" + +// RequestServiceQuotaIncreaseRequest generates a "aws/request.Request" representing the +// client's request for the RequestServiceQuotaIncrease operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RequestServiceQuotaIncrease for more information on using the RequestServiceQuotaIncrease +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RequestServiceQuotaIncreaseRequest method. +// req, resp := client.RequestServiceQuotaIncreaseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/RequestServiceQuotaIncrease +func (c *ServiceQuotas) RequestServiceQuotaIncreaseRequest(input *RequestServiceQuotaIncreaseInput) (req *request.Request, output *RequestServiceQuotaIncreaseOutput) { + op := &request.Operation{ + Name: opRequestServiceQuotaIncrease, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestServiceQuotaIncreaseInput{} + } + + output = &RequestServiceQuotaIncreaseOutput{} + req = c.newRequest(op, input, output) + return +} + +// RequestServiceQuotaIncrease API operation for Service Quotas. +// +// Retrieves the details of a service quota increase request. The response to +// this command provides the details in the RequestedServiceQuotaChange object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Service Quotas's +// API operation RequestServiceQuotaIncrease for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDependencyAccessDeniedException "DependencyAccessDeniedException" +// You can't perform this action because a dependency does not have access. +// +// * ErrCodeQuotaExceededException "QuotaExceededException" +// You have exceeded your service quota. To perform the requested action, remove +// some of the relevant resources, or use Service Quotas to request a service +// quota increase. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// You do not have sufficient access to perform this action. +// +// * ErrCodeNoSuchResourceException "NoSuchResourceException" +// The specified resource does not exist. +// +// * ErrCodeIllegalArgumentException "IllegalArgumentException" +// Invalid input was provided. +// +// * ErrCodeInvalidResourceStateException "InvalidResourceStateException" +// Invalid input was provided for the . +// +// * ErrCodeServiceException "ServiceException" +// Something went wrong. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Due to throttling, the request was denied. Slow down the rate of request +// calls, or request an increase for this quota. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24/RequestServiceQuotaIncrease +func (c *ServiceQuotas) RequestServiceQuotaIncrease(input *RequestServiceQuotaIncreaseInput) (*RequestServiceQuotaIncreaseOutput, error) { + req, out := c.RequestServiceQuotaIncreaseRequest(input) + return out, req.Send() +} + +// RequestServiceQuotaIncreaseWithContext is the same as RequestServiceQuotaIncrease with the addition of +// the ability to pass a context and additional request options. +// +// See RequestServiceQuotaIncrease for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceQuotas) RequestServiceQuotaIncreaseWithContext(ctx aws.Context, input *RequestServiceQuotaIncreaseInput, opts ...request.Option) (*RequestServiceQuotaIncreaseOutput, error) { + req, out := c.RequestServiceQuotaIncreaseRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssociateServiceQuotaTemplateInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateServiceQuotaTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateServiceQuotaTemplateInput) GoString() string { + return s.String() +} + +type AssociateServiceQuotaTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateServiceQuotaTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateServiceQuotaTemplateOutput) GoString() string { + return s.String() +} + +type DeleteServiceQuotaIncreaseRequestFromTemplateInput struct { + _ struct{} `type:"structure"` + + // Specifies the AWS Region for the quota that you want to delete. + // + // AwsRegion is a required field + AwsRegion *string `min:"1" type:"string" required:"true"` + + // Specifies the code for the quota that you want to delete. + // + // QuotaCode is a required field + QuotaCode *string `min:"1" type:"string" required:"true"` + + // Specifies the code for the service that you want to delete. + // + // ServiceCode is a required field + ServiceCode *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceQuotaIncreaseRequestFromTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceQuotaIncreaseRequestFromTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServiceQuotaIncreaseRequestFromTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteServiceQuotaIncreaseRequestFromTemplateInput"} + if s.AwsRegion == nil { + invalidParams.Add(request.NewErrParamRequired("AwsRegion")) + } + if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) + } + if s.QuotaCode == nil { + invalidParams.Add(request.NewErrParamRequired("QuotaCode")) + } + if s.QuotaCode != nil && len(*s.QuotaCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QuotaCode", 1)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsRegion sets the AwsRegion field's value. +func (s *DeleteServiceQuotaIncreaseRequestFromTemplateInput) SetAwsRegion(v string) *DeleteServiceQuotaIncreaseRequestFromTemplateInput { + s.AwsRegion = &v + return s +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *DeleteServiceQuotaIncreaseRequestFromTemplateInput) SetQuotaCode(v string) *DeleteServiceQuotaIncreaseRequestFromTemplateInput { + s.QuotaCode = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *DeleteServiceQuotaIncreaseRequestFromTemplateInput) SetServiceCode(v string) *DeleteServiceQuotaIncreaseRequestFromTemplateInput { + s.ServiceCode = &v + return s +} + +type DeleteServiceQuotaIncreaseRequestFromTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteServiceQuotaIncreaseRequestFromTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceQuotaIncreaseRequestFromTemplateOutput) GoString() string { + return s.String() +} + +type DisassociateServiceQuotaTemplateInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateServiceQuotaTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateServiceQuotaTemplateInput) GoString() string { + return s.String() +} + +type DisassociateServiceQuotaTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateServiceQuotaTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateServiceQuotaTemplateOutput) GoString() string { + return s.String() +} + +// Returns an error that explains why the action did not succeed. +type ErrorReason struct { + _ struct{} `type:"structure"` + + // Service Quotas returns the following error values. + // + // DEPENDENCY_ACCESS_DENIED_ERROR is returned when the caller does not have + // permission to call the service or service quota. To resolve the error, you + // need permission to access the service or service quota. + // + // DEPENDENCY_THROTTLING_ERROR is returned when the service being called is + // throttling Service Quotas. + // + // DEPENDENCY_SERVICE_ERROR is returned when the service being called has availability + // issues. + // + // SERVICE_QUOTA_NOT_AVAILABLE_ERROR is returned when there was an error in + // Service Quotas. + ErrorCode *string `type:"string" enum:"ErrorCode"` + + // The error message that provides more detail. + ErrorMessage *string `type:"string"` +} + +// String returns the string representation +func (s ErrorReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorReason) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *ErrorReason) SetErrorCode(v string) *ErrorReason { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *ErrorReason) SetErrorMessage(v string) *ErrorReason { + s.ErrorMessage = &v + return s +} + +type GetAWSDefaultServiceQuotaInput struct { + _ struct{} `type:"structure"` + + // Identifies the service quota you want to select. + // + // QuotaCode is a required field + QuotaCode *string `min:"1" type:"string" required:"true"` + + // Specifies the service that you want to use. + // + // ServiceCode is a required field + ServiceCode *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAWSDefaultServiceQuotaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAWSDefaultServiceQuotaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAWSDefaultServiceQuotaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAWSDefaultServiceQuotaInput"} + if s.QuotaCode == nil { + invalidParams.Add(request.NewErrParamRequired("QuotaCode")) + } + if s.QuotaCode != nil && len(*s.QuotaCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QuotaCode", 1)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *GetAWSDefaultServiceQuotaInput) SetQuotaCode(v string) *GetAWSDefaultServiceQuotaInput { + s.QuotaCode = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *GetAWSDefaultServiceQuotaInput) SetServiceCode(v string) *GetAWSDefaultServiceQuotaInput { + s.ServiceCode = &v + return s +} + +type GetAWSDefaultServiceQuotaOutput struct { + _ struct{} `type:"structure"` + + // Returns the ServiceQuota object which contains all values for a quota. + Quota *ServiceQuota `type:"structure"` +} + +// String returns the string representation +func (s GetAWSDefaultServiceQuotaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAWSDefaultServiceQuotaOutput) GoString() string { + return s.String() +} + +// SetQuota sets the Quota field's value. +func (s *GetAWSDefaultServiceQuotaOutput) SetQuota(v *ServiceQuota) *GetAWSDefaultServiceQuotaOutput { + s.Quota = v + return s +} + +type GetAssociationForServiceQuotaTemplateInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAssociationForServiceQuotaTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociationForServiceQuotaTemplateInput) GoString() string { + return s.String() +} + +type GetAssociationForServiceQuotaTemplateOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the template is ASSOCIATED or DISASSOCIATED. If the template + // is ASSOCIATED, then it requests service quota increases for all new accounts + // created in your organization. + ServiceQuotaTemplateAssociationStatus *string `type:"string" enum:"ServiceQuotaTemplateAssociationStatus"` +} + +// String returns the string representation +func (s GetAssociationForServiceQuotaTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociationForServiceQuotaTemplateOutput) GoString() string { + return s.String() +} + +// SetServiceQuotaTemplateAssociationStatus sets the ServiceQuotaTemplateAssociationStatus field's value. +func (s *GetAssociationForServiceQuotaTemplateOutput) SetServiceQuotaTemplateAssociationStatus(v string) *GetAssociationForServiceQuotaTemplateOutput { + s.ServiceQuotaTemplateAssociationStatus = &v + return s +} + +type GetRequestedServiceQuotaChangeInput struct { + _ struct{} `type:"structure"` + + // Identifies the quota increase request. + // + // RequestId is a required field + RequestId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRequestedServiceQuotaChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRequestedServiceQuotaChangeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRequestedServiceQuotaChangeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRequestedServiceQuotaChangeInput"} + if s.RequestId == nil { + invalidParams.Add(request.NewErrParamRequired("RequestId")) + } + if s.RequestId != nil && len(*s.RequestId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRequestId sets the RequestId field's value. +func (s *GetRequestedServiceQuotaChangeInput) SetRequestId(v string) *GetRequestedServiceQuotaChangeInput { + s.RequestId = &v + return s +} + +type GetRequestedServiceQuotaChangeOutput struct { + _ struct{} `type:"structure"` + + // Returns the RequestedServiceQuotaChange object for the specific increase + // request. + RequestedQuota *RequestedServiceQuotaChange `type:"structure"` +} + +// String returns the string representation +func (s GetRequestedServiceQuotaChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRequestedServiceQuotaChangeOutput) GoString() string { + return s.String() +} + +// SetRequestedQuota sets the RequestedQuota field's value. +func (s *GetRequestedServiceQuotaChangeOutput) SetRequestedQuota(v *RequestedServiceQuotaChange) *GetRequestedServiceQuotaChangeOutput { + s.RequestedQuota = v + return s +} + +type GetServiceQuotaIncreaseRequestFromTemplateInput struct { + _ struct{} `type:"structure"` + + // Specifies the AWS Region for the quota that you want to use. + // + // AwsRegion is a required field + AwsRegion *string `min:"1" type:"string" required:"true"` + + // Specifies the quota you want. + // + // QuotaCode is a required field + QuotaCode *string `min:"1" type:"string" required:"true"` + + // Specifies the service that you want to use. + // + // ServiceCode is a required field + ServiceCode *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServiceQuotaIncreaseRequestFromTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceQuotaIncreaseRequestFromTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceQuotaIncreaseRequestFromTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetServiceQuotaIncreaseRequestFromTemplateInput"} + if s.AwsRegion == nil { + invalidParams.Add(request.NewErrParamRequired("AwsRegion")) + } + if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) + } + if s.QuotaCode == nil { + invalidParams.Add(request.NewErrParamRequired("QuotaCode")) + } + if s.QuotaCode != nil && len(*s.QuotaCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QuotaCode", 1)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsRegion sets the AwsRegion field's value. +func (s *GetServiceQuotaIncreaseRequestFromTemplateInput) SetAwsRegion(v string) *GetServiceQuotaIncreaseRequestFromTemplateInput { + s.AwsRegion = &v + return s +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *GetServiceQuotaIncreaseRequestFromTemplateInput) SetQuotaCode(v string) *GetServiceQuotaIncreaseRequestFromTemplateInput { + s.QuotaCode = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *GetServiceQuotaIncreaseRequestFromTemplateInput) SetServiceCode(v string) *GetServiceQuotaIncreaseRequestFromTemplateInput { + s.ServiceCode = &v + return s +} + +type GetServiceQuotaIncreaseRequestFromTemplateOutput struct { + _ struct{} `type:"structure"` + + // This object contains the details about the quota increase request. + ServiceQuotaIncreaseRequestInTemplate *ServiceQuotaIncreaseRequestInTemplate `type:"structure"` +} + +// String returns the string representation +func (s GetServiceQuotaIncreaseRequestFromTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceQuotaIncreaseRequestFromTemplateOutput) GoString() string { + return s.String() +} + +// SetServiceQuotaIncreaseRequestInTemplate sets the ServiceQuotaIncreaseRequestInTemplate field's value. +func (s *GetServiceQuotaIncreaseRequestFromTemplateOutput) SetServiceQuotaIncreaseRequestInTemplate(v *ServiceQuotaIncreaseRequestInTemplate) *GetServiceQuotaIncreaseRequestFromTemplateOutput { + s.ServiceQuotaIncreaseRequestInTemplate = v + return s +} + +type GetServiceQuotaInput struct { + _ struct{} `type:"structure"` + + // Identifies the service quota you want to select. + // + // QuotaCode is a required field + QuotaCode *string `min:"1" type:"string" required:"true"` + + // Specifies the service that you want to use. + // + // ServiceCode is a required field + ServiceCode *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServiceQuotaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceQuotaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceQuotaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetServiceQuotaInput"} + if s.QuotaCode == nil { + invalidParams.Add(request.NewErrParamRequired("QuotaCode")) + } + if s.QuotaCode != nil && len(*s.QuotaCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QuotaCode", 1)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *GetServiceQuotaInput) SetQuotaCode(v string) *GetServiceQuotaInput { + s.QuotaCode = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *GetServiceQuotaInput) SetServiceCode(v string) *GetServiceQuotaInput { + s.ServiceCode = &v + return s +} + +type GetServiceQuotaOutput struct { + _ struct{} `type:"structure"` + + // Returns the ServiceQuota object which contains all values for a quota. + Quota *ServiceQuota `type:"structure"` +} + +// String returns the string representation +func (s GetServiceQuotaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceQuotaOutput) GoString() string { + return s.String() +} + +// SetQuota sets the Quota field's value. +func (s *GetServiceQuotaOutput) SetQuota(v *ServiceQuota) *GetServiceQuotaOutput { + s.Quota = v + return s +} + +type ListAWSDefaultServiceQuotasInput struct { + _ struct{} `type:"structure"` + + // (Optional) Limits the number of results that you want to include in the response. + // If you don't include this parameter, the response defaults to a value that's + // specific to the operation. If additional items exist beyond the specified + // maximum, the NextToken element is present and has a value (isn't null). Include + // that value as the NextToken request parameter in the call to the operation + // to get the next part of the results. You should check NextToken after every + // operation to ensure that you receive all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) Use this parameter in a request if you receive a NextToken response + // in a previous request that indicates that there's more output available. + // In a subsequent call, set it to the value of the previous call's NextToken + // response to indicate where the output should continue from. If additional + // items exist beyond the specified maximum, the NextToken element is present + // and has a value (isn't null). Include that value as the NextToken request + // parameter in the call to the operation to get the next part of the results. + // You should check NextToken after every operation to ensure that you receive + // all of the results. + NextToken *string `type:"string"` + + // Specifies the service that you want to use. + // + // ServiceCode is a required field + ServiceCode *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAWSDefaultServiceQuotasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAWSDefaultServiceQuotasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAWSDefaultServiceQuotasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAWSDefaultServiceQuotasInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAWSDefaultServiceQuotasInput) SetMaxResults(v int64) *ListAWSDefaultServiceQuotasInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAWSDefaultServiceQuotasInput) SetNextToken(v string) *ListAWSDefaultServiceQuotasInput { + s.NextToken = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *ListAWSDefaultServiceQuotasInput) SetServiceCode(v string) *ListAWSDefaultServiceQuotasInput { + s.ServiceCode = &v + return s +} + +type ListAWSDefaultServiceQuotasOutput struct { + _ struct{} `type:"structure"` + + // (Optional) Use this parameter in a request if you receive a NextToken response + // in a previous request that indicates that there's more output available. + // In a subsequent call, set it to the value of the previous call's NextToken + // response to indicate where the output should continue from. + NextToken *string `type:"string"` + + // A list of the quotas in the account with the AWS default values. + Quotas []*ServiceQuota `type:"list"` +} + +// String returns the string representation +func (s ListAWSDefaultServiceQuotasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAWSDefaultServiceQuotasOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAWSDefaultServiceQuotasOutput) SetNextToken(v string) *ListAWSDefaultServiceQuotasOutput { + s.NextToken = &v + return s +} + +// SetQuotas sets the Quotas field's value. +func (s *ListAWSDefaultServiceQuotasOutput) SetQuotas(v []*ServiceQuota) *ListAWSDefaultServiceQuotasOutput { + s.Quotas = v + return s +} + +type ListRequestedServiceQuotaChangeHistoryByQuotaInput struct { + _ struct{} `type:"structure"` + + // (Optional) Limits the number of results that you want to include in the response. + // If you don't include this parameter, the response defaults to a value that's + // specific to the operation. If additional items exist beyond the specified + // maximum, the NextToken element is present and has a value (isn't null). Include + // that value as the NextToken request parameter in the call to the operation + // to get the next part of the results. You should check NextToken after every + // operation to ensure that you receive all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) Use this parameter in a request if you receive a NextToken response + // in a previous request that indicates that there's more output available. + // In a subsequent call, set it to the value of the previous call's NextToken + // response to indicate where the output should continue from. + NextToken *string `type:"string"` + + // Specifies the service quota that you want to use + // + // QuotaCode is a required field + QuotaCode *string `min:"1" type:"string" required:"true"` + + // Specifies the service that you want to use. + // + // ServiceCode is a required field + ServiceCode *string `min:"1" type:"string" required:"true"` + + // Specifies the status value of the quota increase request. + Status *string `type:"string" enum:"RequestStatus"` +} + +// String returns the string representation +func (s ListRequestedServiceQuotaChangeHistoryByQuotaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRequestedServiceQuotaChangeHistoryByQuotaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRequestedServiceQuotaChangeHistoryByQuotaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRequestedServiceQuotaChangeHistoryByQuotaInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.QuotaCode == nil { + invalidParams.Add(request.NewErrParamRequired("QuotaCode")) + } + if s.QuotaCode != nil && len(*s.QuotaCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QuotaCode", 1)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListRequestedServiceQuotaChangeHistoryByQuotaInput) SetMaxResults(v int64) *ListRequestedServiceQuotaChangeHistoryByQuotaInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListRequestedServiceQuotaChangeHistoryByQuotaInput) SetNextToken(v string) *ListRequestedServiceQuotaChangeHistoryByQuotaInput { + s.NextToken = &v + return s +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *ListRequestedServiceQuotaChangeHistoryByQuotaInput) SetQuotaCode(v string) *ListRequestedServiceQuotaChangeHistoryByQuotaInput { + s.QuotaCode = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *ListRequestedServiceQuotaChangeHistoryByQuotaInput) SetServiceCode(v string) *ListRequestedServiceQuotaChangeHistoryByQuotaInput { + s.ServiceCode = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListRequestedServiceQuotaChangeHistoryByQuotaInput) SetStatus(v string) *ListRequestedServiceQuotaChangeHistoryByQuotaInput { + s.Status = &v + return s +} + +type ListRequestedServiceQuotaChangeHistoryByQuotaOutput struct { + _ struct{} `type:"structure"` + + // If present in the response, this value indicates there's more output available + // that what's included in the current response. This can occur even when the + // response includes no values at all, such as when you ask for a filtered view + // of a very long list. Use this value in the NextToken request parameter in + // a subsequent call to the operation to continue processing and get the next + // part of the output. You should repeat this until the NextToken response element + // comes back empty (as null). + NextToken *string `type:"string"` + + // Returns a list of service quota requests. + RequestedQuotas []*RequestedServiceQuotaChange `type:"list"` +} + +// String returns the string representation +func (s ListRequestedServiceQuotaChangeHistoryByQuotaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRequestedServiceQuotaChangeHistoryByQuotaOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListRequestedServiceQuotaChangeHistoryByQuotaOutput) SetNextToken(v string) *ListRequestedServiceQuotaChangeHistoryByQuotaOutput { + s.NextToken = &v + return s +} + +// SetRequestedQuotas sets the RequestedQuotas field's value. +func (s *ListRequestedServiceQuotaChangeHistoryByQuotaOutput) SetRequestedQuotas(v []*RequestedServiceQuotaChange) *ListRequestedServiceQuotaChangeHistoryByQuotaOutput { + s.RequestedQuotas = v + return s +} + +type ListRequestedServiceQuotaChangeHistoryInput struct { + _ struct{} `type:"structure"` + + // (Optional) Limits the number of results that you want to include in the response. + // If you don't include this parameter, the response defaults to a value that's + // specific to the operation. If additional items exist beyond the specified + // maximum, the NextToken element is present and has a value (isn't null). Include + // that value as the NextToken request parameter in the call to the operation + // to get the next part of the results. You should check NextToken after every + // operation to ensure that you receive all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) Use this parameter in a request if you receive a NextToken response + // in a previous request that indicates that there's more output available. + // In a subsequent call, set it to the value of the previous call's NextToken + // response to indicate where the output should continue from. + NextToken *string `type:"string"` + + // Specifies the service that you want to use. + ServiceCode *string `min:"1" type:"string"` + + // Specifies the status value of the quota increase request. + Status *string `type:"string" enum:"RequestStatus"` +} + +// String returns the string representation +func (s ListRequestedServiceQuotaChangeHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRequestedServiceQuotaChangeHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRequestedServiceQuotaChangeHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRequestedServiceQuotaChangeHistoryInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListRequestedServiceQuotaChangeHistoryInput) SetMaxResults(v int64) *ListRequestedServiceQuotaChangeHistoryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListRequestedServiceQuotaChangeHistoryInput) SetNextToken(v string) *ListRequestedServiceQuotaChangeHistoryInput { + s.NextToken = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *ListRequestedServiceQuotaChangeHistoryInput) SetServiceCode(v string) *ListRequestedServiceQuotaChangeHistoryInput { + s.ServiceCode = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListRequestedServiceQuotaChangeHistoryInput) SetStatus(v string) *ListRequestedServiceQuotaChangeHistoryInput { + s.Status = &v + return s +} + +type ListRequestedServiceQuotaChangeHistoryOutput struct { + _ struct{} `type:"structure"` + + // If present in the response, this value indicates there's more output available + // that what's included in the current response. This can occur even when the + // response includes no values at all, such as when you ask for a filtered view + // of a very long list. Use this value in the NextToken request parameter in + // a subsequent call to the operation to continue processing and get the next + // part of the output. You should repeat this until the NextToken response element + // comes back empty (as null). + NextToken *string `type:"string"` + + // Returns a list of service quota requests. + RequestedQuotas []*RequestedServiceQuotaChange `type:"list"` +} + +// String returns the string representation +func (s ListRequestedServiceQuotaChangeHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRequestedServiceQuotaChangeHistoryOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListRequestedServiceQuotaChangeHistoryOutput) SetNextToken(v string) *ListRequestedServiceQuotaChangeHistoryOutput { + s.NextToken = &v + return s +} + +// SetRequestedQuotas sets the RequestedQuotas field's value. +func (s *ListRequestedServiceQuotaChangeHistoryOutput) SetRequestedQuotas(v []*RequestedServiceQuotaChange) *ListRequestedServiceQuotaChangeHistoryOutput { + s.RequestedQuotas = v + return s +} + +type ListServiceQuotaIncreaseRequestsInTemplateInput struct { + _ struct{} `type:"structure"` + + // Specifies the AWS Region for the quota that you want to use. + AwsRegion *string `min:"1" type:"string"` + + // (Optional) Limits the number of results that you want to include in the response. + // If you don't include this parameter, the response defaults to a value that's + // specific to the operation. If additional items exist beyond the specified + // maximum, the NextToken element is present and has a value (isn't null). Include + // that value as the NextToken request parameter in the call to the operation + // to get the next part of the results. You should check NextToken after every + // operation to ensure that you receive all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) Use this parameter in a request if you receive a NextToken response + // in a previous request that indicates that there's more output available. + // In a subsequent call, set it to the value of the previous call's NextToken + // response to indicate where the output should continue from. + NextToken *string `type:"string"` + + // The identifier for a service. When performing an operation, use the ServiceCode + // to specify a particular service. + ServiceCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListServiceQuotaIncreaseRequestsInTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServiceQuotaIncreaseRequestsInTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListServiceQuotaIncreaseRequestsInTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListServiceQuotaIncreaseRequestsInTemplateInput"} + if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsRegion sets the AwsRegion field's value. +func (s *ListServiceQuotaIncreaseRequestsInTemplateInput) SetAwsRegion(v string) *ListServiceQuotaIncreaseRequestsInTemplateInput { + s.AwsRegion = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListServiceQuotaIncreaseRequestsInTemplateInput) SetMaxResults(v int64) *ListServiceQuotaIncreaseRequestsInTemplateInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListServiceQuotaIncreaseRequestsInTemplateInput) SetNextToken(v string) *ListServiceQuotaIncreaseRequestsInTemplateInput { + s.NextToken = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *ListServiceQuotaIncreaseRequestsInTemplateInput) SetServiceCode(v string) *ListServiceQuotaIncreaseRequestsInTemplateInput { + s.ServiceCode = &v + return s +} + +type ListServiceQuotaIncreaseRequestsInTemplateOutput struct { + _ struct{} `type:"structure"` + + // If present in the response, this value indicates there's more output available + // that what's included in the current response. This can occur even when the + // response includes no values at all, such as when you ask for a filtered view + // of a very long list. Use this value in the NextToken request parameter in + // a subsequent call to the operation to continue processing and get the next + // part of the output. You should repeat this until the NextToken response element + // comes back empty (as null). + NextToken *string `type:"string"` + + // Returns the list of values of the quota increase request in the template. + ServiceQuotaIncreaseRequestInTemplateList []*ServiceQuotaIncreaseRequestInTemplate `type:"list"` +} + +// String returns the string representation +func (s ListServiceQuotaIncreaseRequestsInTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServiceQuotaIncreaseRequestsInTemplateOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListServiceQuotaIncreaseRequestsInTemplateOutput) SetNextToken(v string) *ListServiceQuotaIncreaseRequestsInTemplateOutput { + s.NextToken = &v + return s +} + +// SetServiceQuotaIncreaseRequestInTemplateList sets the ServiceQuotaIncreaseRequestInTemplateList field's value. +func (s *ListServiceQuotaIncreaseRequestsInTemplateOutput) SetServiceQuotaIncreaseRequestInTemplateList(v []*ServiceQuotaIncreaseRequestInTemplate) *ListServiceQuotaIncreaseRequestsInTemplateOutput { + s.ServiceQuotaIncreaseRequestInTemplateList = v + return s +} + +type ListServiceQuotasInput struct { + _ struct{} `type:"structure"` + + // (Optional) Limits the number of results that you want to include in the response. + // If you don't include this parameter, the response defaults to a value that's + // specific to the operation. If additional items exist beyond the specified + // maximum, the NextToken element is present and has a value (isn't null). Include + // that value as the NextToken request parameter in the call to the operation + // to get the next part of the results. You should check NextToken after every + // operation to ensure that you receive all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) Use this parameter in a request if you receive a NextToken response + // in a previous request that indicates that there's more output available. + // In a subsequent call, set it to the value of the previous call's NextToken + // response to indicate where the output should continue from. + NextToken *string `type:"string"` + + // The identifier for a service. When performing an operation, use the ServiceCode + // to specify a particular service. + // + // ServiceCode is a required field + ServiceCode *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListServiceQuotasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServiceQuotasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListServiceQuotasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListServiceQuotasInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListServiceQuotasInput) SetMaxResults(v int64) *ListServiceQuotasInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListServiceQuotasInput) SetNextToken(v string) *ListServiceQuotasInput { + s.NextToken = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *ListServiceQuotasInput) SetServiceCode(v string) *ListServiceQuotasInput { + s.ServiceCode = &v + return s +} + +type ListServiceQuotasOutput struct { + _ struct{} `type:"structure"` + + // If present in the response, this value indicates there's more output available + // that what's included in the current response. This can occur even when the + // response includes no values at all, such as when you ask for a filtered view + // of a very long list. Use this value in the NextToken request parameter in + // a subsequent call to the operation to continue processing and get the next + // part of the output. You should repeat this until the NextToken response element + // comes back empty (as null). + NextToken *string `type:"string"` + + // The response information for a quota lists all attribute information for + // the quota. + Quotas []*ServiceQuota `type:"list"` +} + +// String returns the string representation +func (s ListServiceQuotasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServiceQuotasOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListServiceQuotasOutput) SetNextToken(v string) *ListServiceQuotasOutput { + s.NextToken = &v + return s +} + +// SetQuotas sets the Quotas field's value. +func (s *ListServiceQuotasOutput) SetQuotas(v []*ServiceQuota) *ListServiceQuotasOutput { + s.Quotas = v + return s +} + +type ListServicesInput struct { + _ struct{} `type:"structure"` + + // (Optional) Limits the number of results that you want to include in the response. + // If you don't include this parameter, the response defaults to a value that's + // specific to the operation. If additional items exist beyond the specified + // maximum, the NextToken element is present and has a value (isn't null). Include + // that value as the NextToken request parameter in the call to the operation + // to get the next part of the results. You should check NextToken after every + // operation to ensure that you receive all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) Use this parameter in a request if you receive a NextToken response + // in a previous request that indicates that there's more output available. + // In a subsequent call, set it to the value of the previous call's NextToken + // response to indicate where the output should continue from. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListServicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListServicesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListServicesInput) SetMaxResults(v int64) *ListServicesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListServicesInput) SetNextToken(v string) *ListServicesInput { + s.NextToken = &v + return s +} + +type ListServicesOutput struct { + _ struct{} `type:"structure"` + + // If present in the response, this value indicates there's more output available + // that what's included in the current response. This can occur even when the + // response includes no values at all, such as when you ask for a filtered view + // of a very long list. Use this value in the NextToken request parameter in + // a subsequent call to the operation to continue processing and get the next + // part of the output. You should repeat this until the NextToken response element + // comes back empty (as null). + NextToken *string `type:"string"` + + // Returns a list of services. + Services []*ServiceInfo `type:"list"` +} + +// String returns the string representation +func (s ListServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListServicesOutput) SetNextToken(v string) *ListServicesOutput { + s.NextToken = &v + return s +} + +// SetServices sets the Services field's value. +func (s *ListServicesOutput) SetServices(v []*ServiceInfo) *ListServicesOutput { + s.Services = v + return s +} + +// A structure that uses CloudWatch metrics to gather data about the service +// quota. +type MetricInfo struct { + _ struct{} `type:"structure"` + + // A dimension is a name/value pair that is part of the identity of a metric. + // Every metric has specific characteristics that describe it, and you can think + // of dimensions as categories for those characteristics. These dimensions are + // part of the CloudWatch Metric Identity that measures usage against a particular + // service quota. + MetricDimensions map[string]*string `type:"map"` + + // The name of the CloudWatch metric that measures usage of a service quota. + // This is a required field. + MetricName *string `type:"string"` + + // The namespace of the metric. The namespace is a container for CloudWatch + // metrics. You can specify a name for the namespace when you create a metric. + MetricNamespace *string `type:"string"` + + // Statistics are metric data aggregations over specified periods of time. This + // is the recommended statistic to use when comparing usage in the CloudWatch + // Metric against your Service Quota. + MetricStatisticRecommendation *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MetricInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricInfo) GoString() string { + return s.String() +} + +// SetMetricDimensions sets the MetricDimensions field's value. +func (s *MetricInfo) SetMetricDimensions(v map[string]*string) *MetricInfo { + s.MetricDimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *MetricInfo) SetMetricName(v string) *MetricInfo { + s.MetricName = &v + return s +} + +// SetMetricNamespace sets the MetricNamespace field's value. +func (s *MetricInfo) SetMetricNamespace(v string) *MetricInfo { + s.MetricNamespace = &v + return s +} + +// SetMetricStatisticRecommendation sets the MetricStatisticRecommendation field's value. +func (s *MetricInfo) SetMetricStatisticRecommendation(v string) *MetricInfo { + s.MetricStatisticRecommendation = &v + return s +} + +type PutServiceQuotaIncreaseRequestIntoTemplateInput struct { + _ struct{} `type:"structure"` + + // Specifies the AWS Region for the quota. + // + // AwsRegion is a required field + AwsRegion *string `min:"1" type:"string" required:"true"` + + // Specifies the new, increased value for the quota. + // + // DesiredValue is a required field + DesiredValue *float64 `type:"double" required:"true"` + + // Specifies the service quota that you want to use. + // + // QuotaCode is a required field + QuotaCode *string `min:"1" type:"string" required:"true"` + + // Specifies the service that you want to use. + // + // ServiceCode is a required field + ServiceCode *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutServiceQuotaIncreaseRequestIntoTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutServiceQuotaIncreaseRequestIntoTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutServiceQuotaIncreaseRequestIntoTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutServiceQuotaIncreaseRequestIntoTemplateInput"} + if s.AwsRegion == nil { + invalidParams.Add(request.NewErrParamRequired("AwsRegion")) + } + if s.AwsRegion != nil && len(*s.AwsRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsRegion", 1)) + } + if s.DesiredValue == nil { + invalidParams.Add(request.NewErrParamRequired("DesiredValue")) + } + if s.QuotaCode == nil { + invalidParams.Add(request.NewErrParamRequired("QuotaCode")) + } + if s.QuotaCode != nil && len(*s.QuotaCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QuotaCode", 1)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsRegion sets the AwsRegion field's value. +func (s *PutServiceQuotaIncreaseRequestIntoTemplateInput) SetAwsRegion(v string) *PutServiceQuotaIncreaseRequestIntoTemplateInput { + s.AwsRegion = &v + return s +} + +// SetDesiredValue sets the DesiredValue field's value. +func (s *PutServiceQuotaIncreaseRequestIntoTemplateInput) SetDesiredValue(v float64) *PutServiceQuotaIncreaseRequestIntoTemplateInput { + s.DesiredValue = &v + return s +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *PutServiceQuotaIncreaseRequestIntoTemplateInput) SetQuotaCode(v string) *PutServiceQuotaIncreaseRequestIntoTemplateInput { + s.QuotaCode = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *PutServiceQuotaIncreaseRequestIntoTemplateInput) SetServiceCode(v string) *PutServiceQuotaIncreaseRequestIntoTemplateInput { + s.ServiceCode = &v + return s +} + +type PutServiceQuotaIncreaseRequestIntoTemplateOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains information about one service quota increase request. + ServiceQuotaIncreaseRequestInTemplate *ServiceQuotaIncreaseRequestInTemplate `type:"structure"` +} + +// String returns the string representation +func (s PutServiceQuotaIncreaseRequestIntoTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutServiceQuotaIncreaseRequestIntoTemplateOutput) GoString() string { + return s.String() +} + +// SetServiceQuotaIncreaseRequestInTemplate sets the ServiceQuotaIncreaseRequestInTemplate field's value. +func (s *PutServiceQuotaIncreaseRequestIntoTemplateOutput) SetServiceQuotaIncreaseRequestInTemplate(v *ServiceQuotaIncreaseRequestInTemplate) *PutServiceQuotaIncreaseRequestIntoTemplateOutput { + s.ServiceQuotaIncreaseRequestInTemplate = v + return s +} + +// A structure that contains information about the quota period. +type QuotaPeriod struct { + _ struct{} `type:"structure"` + + // The time unit of a period. + PeriodUnit *string `type:"string" enum:"PeriodUnit"` + + // The value of a period. + PeriodValue *int64 `type:"integer"` +} + +// String returns the string representation +func (s QuotaPeriod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QuotaPeriod) GoString() string { + return s.String() +} + +// SetPeriodUnit sets the PeriodUnit field's value. +func (s *QuotaPeriod) SetPeriodUnit(v string) *QuotaPeriod { + s.PeriodUnit = &v + return s +} + +// SetPeriodValue sets the PeriodValue field's value. +func (s *QuotaPeriod) SetPeriodValue(v int64) *QuotaPeriod { + s.PeriodValue = &v + return s +} + +type RequestServiceQuotaIncreaseInput struct { + _ struct{} `type:"structure"` + + // Specifies the value submitted in the service quota increase request. + // + // DesiredValue is a required field + DesiredValue *float64 `type:"double" required:"true"` + + // Specifies the service quota that you want to use. + // + // QuotaCode is a required field + QuotaCode *string `min:"1" type:"string" required:"true"` + + // Specifies the service that you want to use. + // + // ServiceCode is a required field + ServiceCode *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestServiceQuotaIncreaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestServiceQuotaIncreaseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestServiceQuotaIncreaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestServiceQuotaIncreaseInput"} + if s.DesiredValue == nil { + invalidParams.Add(request.NewErrParamRequired("DesiredValue")) + } + if s.QuotaCode == nil { + invalidParams.Add(request.NewErrParamRequired("QuotaCode")) + } + if s.QuotaCode != nil && len(*s.QuotaCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QuotaCode", 1)) + } + if s.ServiceCode == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceCode")) + } + if s.ServiceCode != nil && len(*s.ServiceCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDesiredValue sets the DesiredValue field's value. +func (s *RequestServiceQuotaIncreaseInput) SetDesiredValue(v float64) *RequestServiceQuotaIncreaseInput { + s.DesiredValue = &v + return s +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *RequestServiceQuotaIncreaseInput) SetQuotaCode(v string) *RequestServiceQuotaIncreaseInput { + s.QuotaCode = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *RequestServiceQuotaIncreaseInput) SetServiceCode(v string) *RequestServiceQuotaIncreaseInput { + s.ServiceCode = &v + return s +} + +type RequestServiceQuotaIncreaseOutput struct { + _ struct{} `type:"structure"` + + // Returns a list of service quota requests. + RequestedQuota *RequestedServiceQuotaChange `type:"structure"` +} + +// String returns the string representation +func (s RequestServiceQuotaIncreaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestServiceQuotaIncreaseOutput) GoString() string { + return s.String() +} + +// SetRequestedQuota sets the RequestedQuota field's value. +func (s *RequestServiceQuotaIncreaseOutput) SetRequestedQuota(v *RequestedServiceQuotaChange) *RequestServiceQuotaIncreaseOutput { + s.RequestedQuota = v + return s +} + +// A structure that contains information about a requested change for a quota. +type RequestedServiceQuotaChange struct { + _ struct{} `type:"structure"` + + // The case Id for the service quota increase request. + CaseId *string `type:"string"` + + // The date and time when the service quota increase request was received and + // the case Id was created. + Created *time.Time `type:"timestamp"` + + // New increased value for the service quota. + DesiredValue *float64 `type:"double"` + + // Identifies if the quota is global. + GlobalQuota *bool `type:"boolean"` + + // The unique identifier of a requested service quota change. + Id *string `min:"1" type:"string"` + + // The date and time of the most recent change in the service quota increase + // request. + LastUpdated *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the service quota. + QuotaArn *string `type:"string"` + + // Specifies the service quota that you want to use. + QuotaCode *string `min:"1" type:"string"` + + // Name of the service quota. + QuotaName *string `type:"string"` + + // The IAM identity who submitted the service quota increase request. + Requester *string `type:"string"` + + // Specifies the service that you want to use. + ServiceCode *string `min:"1" type:"string"` + + // The name of the AWS service specified in the increase request. + ServiceName *string `type:"string"` + + // State of the service quota increase request. + Status *string `type:"string" enum:"RequestStatus"` + + // Specifies the unit used for the quota. + Unit *string `type:"string"` +} + +// String returns the string representation +func (s RequestedServiceQuotaChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestedServiceQuotaChange) GoString() string { + return s.String() +} + +// SetCaseId sets the CaseId field's value. +func (s *RequestedServiceQuotaChange) SetCaseId(v string) *RequestedServiceQuotaChange { + s.CaseId = &v + return s +} + +// SetCreated sets the Created field's value. +func (s *RequestedServiceQuotaChange) SetCreated(v time.Time) *RequestedServiceQuotaChange { + s.Created = &v + return s +} + +// SetDesiredValue sets the DesiredValue field's value. +func (s *RequestedServiceQuotaChange) SetDesiredValue(v float64) *RequestedServiceQuotaChange { + s.DesiredValue = &v + return s +} + +// SetGlobalQuota sets the GlobalQuota field's value. +func (s *RequestedServiceQuotaChange) SetGlobalQuota(v bool) *RequestedServiceQuotaChange { + s.GlobalQuota = &v + return s +} + +// SetId sets the Id field's value. +func (s *RequestedServiceQuotaChange) SetId(v string) *RequestedServiceQuotaChange { + s.Id = &v + return s +} + +// SetLastUpdated sets the LastUpdated field's value. +func (s *RequestedServiceQuotaChange) SetLastUpdated(v time.Time) *RequestedServiceQuotaChange { + s.LastUpdated = &v + return s +} + +// SetQuotaArn sets the QuotaArn field's value. +func (s *RequestedServiceQuotaChange) SetQuotaArn(v string) *RequestedServiceQuotaChange { + s.QuotaArn = &v + return s +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *RequestedServiceQuotaChange) SetQuotaCode(v string) *RequestedServiceQuotaChange { + s.QuotaCode = &v + return s +} + +// SetQuotaName sets the QuotaName field's value. +func (s *RequestedServiceQuotaChange) SetQuotaName(v string) *RequestedServiceQuotaChange { + s.QuotaName = &v + return s +} + +// SetRequester sets the Requester field's value. +func (s *RequestedServiceQuotaChange) SetRequester(v string) *RequestedServiceQuotaChange { + s.Requester = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *RequestedServiceQuotaChange) SetServiceCode(v string) *RequestedServiceQuotaChange { + s.ServiceCode = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *RequestedServiceQuotaChange) SetServiceName(v string) *RequestedServiceQuotaChange { + s.ServiceName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *RequestedServiceQuotaChange) SetStatus(v string) *RequestedServiceQuotaChange { + s.Status = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *RequestedServiceQuotaChange) SetUnit(v string) *RequestedServiceQuotaChange { + s.Unit = &v + return s +} + +// A structure that contains the ServiceName and ServiceCode. It does not include +// all details of the service quota. To get those values, use the ListServiceQuotas +// operation. +type ServiceInfo struct { + _ struct{} `type:"structure"` + + // Specifies the service that you want to use. + ServiceCode *string `min:"1" type:"string"` + + // The name of the AWS service specified in the increase request. + ServiceName *string `type:"string"` +} + +// String returns the string representation +func (s ServiceInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceInfo) GoString() string { + return s.String() +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *ServiceInfo) SetServiceCode(v string) *ServiceInfo { + s.ServiceCode = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ServiceInfo) SetServiceName(v string) *ServiceInfo { + s.ServiceName = &v + return s +} + +// A structure that contains the full set of details that define the service +// quota. +type ServiceQuota struct { + _ struct{} `type:"structure"` + + // Specifies if the quota value can be increased. + Adjustable *bool `type:"boolean"` + + // Specifies the ErrorCode and ErrorMessage when success isn't achieved. + ErrorReason *ErrorReason `type:"structure"` + + // Specifies if the quota is global. + GlobalQuota *bool `type:"boolean"` + + // Identifies the unit and value of how time is measured. + Period *QuotaPeriod `type:"structure"` + + // The Amazon Resource Name (ARN) of the service quota. + QuotaArn *string `type:"string"` + + // The code identifier for the service quota specified. + QuotaCode *string `min:"1" type:"string"` + + // The name identifier of the service quota. + QuotaName *string `type:"string"` + + // Specifies the service that you want to use. + ServiceCode *string `min:"1" type:"string"` + + // The name of the AWS service specified in the increase request. + ServiceName *string `type:"string"` + + // The unit of measurement for the value of the service quota. + Unit *string `type:"string"` + + // Specifies the details about the measurement. + UsageMetric *MetricInfo `type:"structure"` + + // The value of service quota. + Value *float64 `type:"double"` +} + +// String returns the string representation +func (s ServiceQuota) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceQuota) GoString() string { + return s.String() +} + +// SetAdjustable sets the Adjustable field's value. +func (s *ServiceQuota) SetAdjustable(v bool) *ServiceQuota { + s.Adjustable = &v + return s +} + +// SetErrorReason sets the ErrorReason field's value. +func (s *ServiceQuota) SetErrorReason(v *ErrorReason) *ServiceQuota { + s.ErrorReason = v + return s +} + +// SetGlobalQuota sets the GlobalQuota field's value. +func (s *ServiceQuota) SetGlobalQuota(v bool) *ServiceQuota { + s.GlobalQuota = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *ServiceQuota) SetPeriod(v *QuotaPeriod) *ServiceQuota { + s.Period = v + return s +} + +// SetQuotaArn sets the QuotaArn field's value. +func (s *ServiceQuota) SetQuotaArn(v string) *ServiceQuota { + s.QuotaArn = &v + return s +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *ServiceQuota) SetQuotaCode(v string) *ServiceQuota { + s.QuotaCode = &v + return s +} + +// SetQuotaName sets the QuotaName field's value. +func (s *ServiceQuota) SetQuotaName(v string) *ServiceQuota { + s.QuotaName = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *ServiceQuota) SetServiceCode(v string) *ServiceQuota { + s.ServiceCode = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ServiceQuota) SetServiceName(v string) *ServiceQuota { + s.ServiceName = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *ServiceQuota) SetUnit(v string) *ServiceQuota { + s.Unit = &v + return s +} + +// SetUsageMetric sets the UsageMetric field's value. +func (s *ServiceQuota) SetUsageMetric(v *MetricInfo) *ServiceQuota { + s.UsageMetric = v + return s +} + +// SetValue sets the Value field's value. +func (s *ServiceQuota) SetValue(v float64) *ServiceQuota { + s.Value = &v + return s +} + +// A structure that contains information about one service quota increase request. +type ServiceQuotaIncreaseRequestInTemplate struct { + _ struct{} `type:"structure"` + + // The AWS Region where the increase request occurs. + AwsRegion *string `min:"1" type:"string"` + + // Identifies the new, increased value of the service quota in the increase + // request. + DesiredValue *float64 `type:"double"` + + // Specifies if the quota is a global quota. + GlobalQuota *bool `type:"boolean"` + + // The code identifier for the service quota specified in the increase request. + QuotaCode *string `min:"1" type:"string"` + + // The name of the service quota in the increase request. + QuotaName *string `type:"string"` + + // The code identifier for the AWS service specified in the increase request. + ServiceCode *string `min:"1" type:"string"` + + // The name of the AWS service specified in the increase request. + ServiceName *string `type:"string"` + + // The unit of measure for the increase request. + Unit *string `type:"string"` +} + +// String returns the string representation +func (s ServiceQuotaIncreaseRequestInTemplate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceQuotaIncreaseRequestInTemplate) GoString() string { + return s.String() +} + +// SetAwsRegion sets the AwsRegion field's value. +func (s *ServiceQuotaIncreaseRequestInTemplate) SetAwsRegion(v string) *ServiceQuotaIncreaseRequestInTemplate { + s.AwsRegion = &v + return s +} + +// SetDesiredValue sets the DesiredValue field's value. +func (s *ServiceQuotaIncreaseRequestInTemplate) SetDesiredValue(v float64) *ServiceQuotaIncreaseRequestInTemplate { + s.DesiredValue = &v + return s +} + +// SetGlobalQuota sets the GlobalQuota field's value. +func (s *ServiceQuotaIncreaseRequestInTemplate) SetGlobalQuota(v bool) *ServiceQuotaIncreaseRequestInTemplate { + s.GlobalQuota = &v + return s +} + +// SetQuotaCode sets the QuotaCode field's value. +func (s *ServiceQuotaIncreaseRequestInTemplate) SetQuotaCode(v string) *ServiceQuotaIncreaseRequestInTemplate { + s.QuotaCode = &v + return s +} + +// SetQuotaName sets the QuotaName field's value. +func (s *ServiceQuotaIncreaseRequestInTemplate) SetQuotaName(v string) *ServiceQuotaIncreaseRequestInTemplate { + s.QuotaName = &v + return s +} + +// SetServiceCode sets the ServiceCode field's value. +func (s *ServiceQuotaIncreaseRequestInTemplate) SetServiceCode(v string) *ServiceQuotaIncreaseRequestInTemplate { + s.ServiceCode = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ServiceQuotaIncreaseRequestInTemplate) SetServiceName(v string) *ServiceQuotaIncreaseRequestInTemplate { + s.ServiceName = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *ServiceQuotaIncreaseRequestInTemplate) SetUnit(v string) *ServiceQuotaIncreaseRequestInTemplate { + s.Unit = &v + return s +} + +const ( + // ErrorCodeDependencyAccessDeniedError is a ErrorCode enum value + ErrorCodeDependencyAccessDeniedError = "DEPENDENCY_ACCESS_DENIED_ERROR" + + // ErrorCodeDependencyThrottlingError is a ErrorCode enum value + ErrorCodeDependencyThrottlingError = "DEPENDENCY_THROTTLING_ERROR" + + // ErrorCodeDependencyServiceError is a ErrorCode enum value + ErrorCodeDependencyServiceError = "DEPENDENCY_SERVICE_ERROR" + + // ErrorCodeServiceQuotaNotAvailableError is a ErrorCode enum value + ErrorCodeServiceQuotaNotAvailableError = "SERVICE_QUOTA_NOT_AVAILABLE_ERROR" +) + +const ( + // PeriodUnitMicrosecond is a PeriodUnit enum value + PeriodUnitMicrosecond = "MICROSECOND" + + // PeriodUnitMillisecond is a PeriodUnit enum value + PeriodUnitMillisecond = "MILLISECOND" + + // PeriodUnitSecond is a PeriodUnit enum value + PeriodUnitSecond = "SECOND" + + // PeriodUnitMinute is a PeriodUnit enum value + PeriodUnitMinute = "MINUTE" + + // PeriodUnitHour is a PeriodUnit enum value + PeriodUnitHour = "HOUR" + + // PeriodUnitDay is a PeriodUnit enum value + PeriodUnitDay = "DAY" + + // PeriodUnitWeek is a PeriodUnit enum value + PeriodUnitWeek = "WEEK" +) + +const ( + // RequestStatusPending is a RequestStatus enum value + RequestStatusPending = "PENDING" + + // RequestStatusCaseOpened is a RequestStatus enum value + RequestStatusCaseOpened = "CASE_OPENED" + + // RequestStatusApproved is a RequestStatus enum value + RequestStatusApproved = "APPROVED" + + // RequestStatusDenied is a RequestStatus enum value + RequestStatusDenied = "DENIED" + + // RequestStatusCaseClosed is a RequestStatus enum value + RequestStatusCaseClosed = "CASE_CLOSED" +) + +const ( + // ServiceQuotaTemplateAssociationStatusAssociated is a ServiceQuotaTemplateAssociationStatus enum value + ServiceQuotaTemplateAssociationStatusAssociated = "ASSOCIATED" + + // ServiceQuotaTemplateAssociationStatusDisassociated is a ServiceQuotaTemplateAssociationStatus enum value + ServiceQuotaTemplateAssociationStatusDisassociated = "DISASSOCIATED" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/doc.go new file mode 100644 index 00000000000..3f952b88279 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/doc.go @@ -0,0 +1,40 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package servicequotas provides the client and types for making API +// requests to Service Quotas. +// +// Service Quotas is a web service that you can use to manage many of your AWS +// service quotas. Quotas, also referred to as limits, are the maximum values +// for a resource, item, or operation. This guide provide descriptions of the +// Service Quotas actions that you can call from an API. For the Service Quotas +// user guide, which explains how to use Service Quotas from the console, see +// What is Service Quotas (https://docs.aws.amazon.com/servicequotas/latest/userguide/intro.html). +// +// AWS provides SDKs that consist of libraries and sample code for programming +// languages and platforms (Java, Ruby, .NET, iOS, Android, etc...,). The SDKs +// provide a convenient way to create programmatic access to Service Quotas +// and AWS. For information about the AWS SDKs, including how to download and +// install them, see the Tools for Amazon Web Services (https://docs.aws.amazon.com/aws.amazon.com/tools) +// page. +// +// See https://docs.aws.amazon.com/goto/WebAPI/service-quotas-2019-06-24 for more information on this service. +// +// See servicequotas package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/servicequotas/ +// +// Using the Client +// +// To contact Service Quotas with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Service Quotas client ServiceQuotas for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/servicequotas/#New +package servicequotas diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/errors.go new file mode 100644 index 00000000000..6afc7a200bc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/errors.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package servicequotas + +const ( + + // ErrCodeAWSServiceAccessNotEnabledException for service response error code + // "AWSServiceAccessNotEnabledException". + // + // The action you attempted is not allowed unless Service Access with Service + // Quotas is enabled in your organization. To enable, call AssociateServiceQuotaTemplate. + ErrCodeAWSServiceAccessNotEnabledException = "AWSServiceAccessNotEnabledException" + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeDependencyAccessDeniedException for service response error code + // "DependencyAccessDeniedException". + // + // You can't perform this action because a dependency does not have access. + ErrCodeDependencyAccessDeniedException = "DependencyAccessDeniedException" + + // ErrCodeIllegalArgumentException for service response error code + // "IllegalArgumentException". + // + // Invalid input was provided. + ErrCodeIllegalArgumentException = "IllegalArgumentException" + + // ErrCodeInvalidPaginationTokenException for service response error code + // "InvalidPaginationTokenException". + // + // Invalid input was provided. + ErrCodeInvalidPaginationTokenException = "InvalidPaginationTokenException" + + // ErrCodeInvalidResourceStateException for service response error code + // "InvalidResourceStateException". + // + // Invalid input was provided for the . + ErrCodeInvalidResourceStateException = "InvalidResourceStateException" + + // ErrCodeNoAvailableOrganizationException for service response error code + // "NoAvailableOrganizationException". + // + // The account making this call is not a member of an organization. + ErrCodeNoAvailableOrganizationException = "NoAvailableOrganizationException" + + // ErrCodeNoSuchResourceException for service response error code + // "NoSuchResourceException". + // + // The specified resource does not exist. + ErrCodeNoSuchResourceException = "NoSuchResourceException" + + // ErrCodeOrganizationNotInAllFeaturesModeException for service response error code + // "OrganizationNotInAllFeaturesModeException". + // + // The organization that your account belongs to, is not in All Features mode. + // To enable all features mode, see EnableAllFeatures (https://docs.aws.amazon.com/organizations/latest/APIReference/API_EnableAllFeatures.html). + ErrCodeOrganizationNotInAllFeaturesModeException = "OrganizationNotInAllFeaturesModeException" + + // ErrCodeQuotaExceededException for service response error code + // "QuotaExceededException". + // + // You have exceeded your service quota. To perform the requested action, remove + // some of the relevant resources, or use Service Quotas to request a service + // quota increase. + ErrCodeQuotaExceededException = "QuotaExceededException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // The specified resource already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeServiceException for service response error code + // "ServiceException". + // + // Something went wrong. + ErrCodeServiceException = "ServiceException" + + // ErrCodeServiceQuotaTemplateNotInUseException for service response error code + // "ServiceQuotaTemplateNotInUseException". + // + // The quota request template is not associated with your organization. + // + // To use the template, call AssociateServiceQuotaTemplate. + ErrCodeServiceQuotaTemplateNotInUseException = "ServiceQuotaTemplateNotInUseException" + + // ErrCodeTemplatesNotAvailableInRegionException for service response error code + // "TemplatesNotAvailableInRegionException". + // + // The Service Quotas template is not available in the Region where you are + // making the request. Please make the request in us-east-1. + ErrCodeTemplatesNotAvailableInRegionException = "TemplatesNotAvailableInRegionException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Due to throttling, the request was denied. Slow down the rate of request + // calls, or request an increase for this quota. + ErrCodeTooManyRequestsException = "TooManyRequestsException" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go new file mode 100644 index 00000000000..4c064181c98 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go @@ -0,0 +1,98 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package servicequotas + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// ServiceQuotas provides the API operation methods for making requests to +// Service Quotas. See this package's package overview docs +// for details on the service. +// +// ServiceQuotas methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type ServiceQuotas struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "Service Quotas" // Name of service. + EndpointsID = "servicequotas" // ID to lookup a service endpoint with. + ServiceID = "Service Quotas" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the ServiceQuotas client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ServiceQuotas client from just a session. +// svc := servicequotas.New(mySession) +// +// // Create a ServiceQuotas client with additional configuration +// svc := servicequotas.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceQuotas { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServiceQuotas { + svc := &ServiceQuotas{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-24", + JSONVersion: "1.1", + TargetPrefix: "ServiceQuotasV20190624", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ServiceQuotas operation and runs any +// custom request initialization. +func (c *ServiceQuotas) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/api.go index 691f57e582f..5a2bc39fdbc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/api.go @@ -63,7 +63,7 @@ func (c *SES) CloneReceiptRuleSetRequest(input *CloneReceiptRuleSetInput) (req * // independent of the source rule set. // // For information about setting up rule sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). // // You can execute this operation no more than once per second. // @@ -83,7 +83,7 @@ func (c *SES) CloneReceiptRuleSetRequest(input *CloneReceiptRuleSetInput) (req * // // * ErrCodeLimitExceededException "LimitExceeded" // Indicates that a resource could not be created because of service limits. -// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CloneReceiptRuleSet func (c *SES) CloneReceiptRuleSet(input *CloneReceiptRuleSetInput) (*CloneReceiptRuleSetOutput, error) { @@ -155,7 +155,7 @@ func (c *SES) CreateConfigurationSetRequest(input *CreateConfigurationSetInput) // Creates a configuration set. // // Configuration sets enable you to publish email sending events. For information -// about using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// about using configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). // // You can execute this operation no more than once per second. // @@ -177,7 +177,7 @@ func (c *SES) CreateConfigurationSetRequest(input *CreateConfigurationSetInput) // // * ErrCodeLimitExceededException "LimitExceeded" // Indicates that a resource could not be created because of service limits. -// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateConfigurationSet func (c *SES) CreateConfigurationSet(input *CreateConfigurationSetInput) (*CreateConfigurationSetOutput, error) { @@ -254,7 +254,7 @@ func (c *SES) CreateConfigurationSetEventDestinationRequest(input *CreateConfigu // // An event destination is the AWS service to which Amazon SES publishes the // email sending events associated with a configuration set. For information -// about using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// about using configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). // // You can execute this operation no more than once per second. // @@ -287,7 +287,7 @@ func (c *SES) CreateConfigurationSetEventDestinationRequest(input *CreateConfigu // // * ErrCodeLimitExceededException "LimitExceeded" // Indicates that a resource could not be created because of service limits. -// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateConfigurationSetEventDestination func (c *SES) CreateConfigurationSetEventDestination(input *CreateConfigurationSetEventDestinationInput) (*CreateConfigurationSetEventDestinationOutput, error) { @@ -362,7 +362,7 @@ func (c *SES) CreateConfigurationSetTrackingOptionsRequest(input *CreateConfigur // By default, images and links used for tracking open and click events are // hosted on domains operated by Amazon SES. You can configure a subdomain of // your own to handle these events. For information about using custom domains, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -457,7 +457,7 @@ func (c *SES) CreateCustomVerificationEmailTemplateRequest(input *CreateCustomVe // Creates a new custom verification email template. // // For more information about custom verification email templates, see Using -// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// Custom Verification Email Templates (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) // in the Amazon SES Developer Guide. // // You can execute this operation no more than once per second. @@ -484,7 +484,7 @@ func (c *SES) CreateCustomVerificationEmailTemplateRequest(input *CreateCustomVe // // * ErrCodeLimitExceededException "LimitExceeded" // Indicates that a resource could not be created because of service limits. -// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateCustomVerificationEmailTemplate func (c *SES) CreateCustomVerificationEmailTemplate(input *CreateCustomVerificationEmailTemplateInput) (*CreateCustomVerificationEmailTemplateOutput, error) { @@ -556,7 +556,7 @@ func (c *SES) CreateReceiptFilterRequest(input *CreateReceiptFilterInput) (req * // Creates a new IP address filter. // // For information about setting up IP address filters, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). // // You can execute this operation no more than once per second. // @@ -570,7 +570,7 @@ func (c *SES) CreateReceiptFilterRequest(input *CreateReceiptFilterInput) (req * // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // Indicates that a resource could not be created because of service limits. -// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). // // * ErrCodeAlreadyExistsException "AlreadyExists" // Indicates that a resource could not be created because of a naming conflict. @@ -645,7 +645,7 @@ func (c *SES) CreateReceiptRuleRequest(input *CreateReceiptRuleInput) (req *requ // Creates a receipt rule. // // For information about setting up receipt rules, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). // // You can execute this operation no more than once per second. // @@ -660,19 +660,19 @@ func (c *SES) CreateReceiptRuleRequest(input *CreateReceiptRuleInput) (req *requ // * ErrCodeInvalidSnsTopicException "InvalidSnsTopic" // Indicates that the provided Amazon SNS topic is invalid, or that Amazon SES // could not publish to the topic, possibly due to permissions issues. For information -// about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// about giving permissions, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // * ErrCodeInvalidS3ConfigurationException "InvalidS3Configuration" // Indicates that the provided Amazon S3 bucket or AWS KMS encryption key is // invalid, or that Amazon SES could not publish to the bucket, possibly due // to permissions issues. For information about giving permissions, see the -// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // * ErrCodeInvalidLambdaFunctionException "InvalidLambdaFunction" // Indicates that the provided AWS Lambda function is invalid, or that Amazon // SES could not execute the provided function, possibly due to permissions // issues. For information about giving permissions, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // * ErrCodeAlreadyExistsException "AlreadyExists" // Indicates that a resource could not be created because of a naming conflict. @@ -685,7 +685,7 @@ func (c *SES) CreateReceiptRuleRequest(input *CreateReceiptRuleInput) (req *requ // // * ErrCodeLimitExceededException "LimitExceeded" // Indicates that a resource could not be created because of service limits. -// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateReceiptRule func (c *SES) CreateReceiptRule(input *CreateReceiptRuleInput) (*CreateReceiptRuleOutput, error) { @@ -757,7 +757,7 @@ func (c *SES) CreateReceiptRuleSetRequest(input *CreateReceiptRuleSetInput) (req // Creates an empty receipt rule set. // // For information about setting up receipt rule sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). // // You can execute this operation no more than once per second. // @@ -774,7 +774,7 @@ func (c *SES) CreateReceiptRuleSetRequest(input *CreateReceiptRuleSetInput) (req // // * ErrCodeLimitExceededException "LimitExceeded" // Indicates that a resource could not be created because of service limits. -// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateReceiptRuleSet func (c *SES) CreateReceiptRuleSet(input *CreateReceiptRuleSetInput) (*CreateReceiptRuleSetOutput, error) { @@ -845,7 +845,7 @@ func (c *SES) CreateTemplateRequest(input *CreateTemplateInput) (req *request.Re // // Creates an email template. Email templates enable you to send personalized // email to one or more destinations in a single API operation. For more information, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). // // You can execute this operation no more than once per second. // @@ -866,7 +866,7 @@ func (c *SES) CreateTemplateRequest(input *CreateTemplateInput) (req *request.Re // // * ErrCodeLimitExceededException "LimitExceeded" // Indicates that a resource could not be created because of service limits. -// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateTemplate func (c *SES) CreateTemplate(input *CreateTemplateInput) (*CreateTemplateOutput, error) { @@ -937,7 +937,7 @@ func (c *SES) DeleteConfigurationSetRequest(input *DeleteConfigurationSetInput) // // Deletes a configuration set. Configuration sets enable you to publish email // sending events. For information about using configuration sets, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). // // You can execute this operation no more than once per second. // @@ -1022,7 +1022,7 @@ func (c *SES) DeleteConfigurationSetEventDestinationRequest(input *DeleteConfigu // Deletes a configuration set event destination. Configuration set event destinations // are associated with configuration sets, which enable you to publish email // sending events. For information about using configuration sets, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). // // You can execute this operation no more than once per second. // @@ -1113,7 +1113,7 @@ func (c *SES) DeleteConfigurationSetTrackingOptionsRequest(input *DeleteConfigur // By default, images and links used for tracking open and click events are // hosted on domains operated by Amazon SES. You can configure a subdomain of // your own to handle these events. For information about using custom domains, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html). // // Deleting this kind of association will result in emails sent using the specified // configuration set to capture open and click events using the standard, Amazon @@ -1203,7 +1203,7 @@ func (c *SES) DeleteCustomVerificationEmailTemplateRequest(input *DeleteCustomVe // Deletes an existing custom verification email template. // // For more information about custom verification email templates, see Using -// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// Custom Verification Email Templates (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) // in the Amazon SES Developer Guide. // // You can execute this operation no more than once per second. @@ -1368,7 +1368,7 @@ func (c *SES) DeleteIdentityPolicyRequest(input *DeleteIdentityPolicyInput) (req // // Sending authorization is a feature that enables an identity owner to authorize // other senders to use its identities. For information about using sending -// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// authorization, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // You can execute this operation no more than once per second. // @@ -1448,7 +1448,7 @@ func (c *SES) DeleteReceiptFilterRequest(input *DeleteReceiptFilterInput) (req * // Deletes the specified IP address filter. // // For information about managing IP address filters, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). // // You can execute this operation no more than once per second. // @@ -1528,7 +1528,7 @@ func (c *SES) DeleteReceiptRuleRequest(input *DeleteReceiptRuleInput) (req *requ // Deletes the specified receipt rule. // // For information about managing receipt rules, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). // // You can execute this operation no more than once per second. // @@ -1615,7 +1615,7 @@ func (c *SES) DeleteReceiptRuleSetRequest(input *DeleteReceiptRuleSetInput) (req // The currently active rule set cannot be deleted. // // For information about managing receipt rule sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). // // You can execute this operation no more than once per second. // @@ -1853,7 +1853,7 @@ func (c *SES) DescribeActiveReceiptRuleSetRequest(input *DescribeActiveReceiptRu // active. // // For information about setting up receipt rule sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). // // You can execute this operation no more than once per second. // @@ -1930,7 +1930,7 @@ func (c *SES) DescribeConfigurationSetRequest(input *DescribeConfigurationSetInp // DescribeConfigurationSet API operation for Amazon Simple Email Service. // // Returns the details of the specified configuration set. For information about -// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// using configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). // // You can execute this operation no more than once per second. // @@ -2014,7 +2014,7 @@ func (c *SES) DescribeReceiptRuleRequest(input *DescribeReceiptRuleInput) (req * // Returns the details of the specified receipt rule. // // For information about setting up receipt rules, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). // // You can execute this operation no more than once per second. // @@ -2101,7 +2101,7 @@ func (c *SES) DescribeReceiptRuleSetRequest(input *DescribeReceiptRuleSetInput) // Returns the details of the specified receipt rule set. // // For information about managing receipt rule sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). // // You can execute this operation no more than once per second. // @@ -2263,7 +2263,7 @@ func (c *SES) GetCustomVerificationEmailTemplateRequest(input *GetCustomVerifica // specify. // // For more information about custom verification email templates, see Using -// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// Custom Verification Email Templates (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) // in the Amazon SES Developer Guide. // // You can execute this operation no more than once per second. @@ -2367,7 +2367,7 @@ func (c *SES) GetIdentityDkimAttributesRequest(input *GetIdentityDkimAttributesI // attributes for up to 100 identities at a time. // // For more information about creating DNS records using DKIM tokens, go to -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2526,7 +2526,7 @@ func (c *SES) GetIdentityNotificationAttributesRequest(input *GetIdentityNotific // attributes for up to 100 identities at a time. // // For more information about using notifications with Amazon SES, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2610,7 +2610,7 @@ func (c *SES) GetIdentityPoliciesRequest(input *GetIdentityPoliciesInput) (req * // // Sending authorization is a feature that enables an identity owner to authorize // other senders to use its identities. For information about using sending -// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// authorization, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // You can execute this operation no more than once per second. // @@ -3020,7 +3020,7 @@ func (c *SES) ListConfigurationSetsRequest(input *ListConfigurationSetsInput) (r // // Provides a list of the configuration sets associated with your Amazon SES // account in the current AWS Region. For information about using configuration -// sets, see Monitoring Your Amazon SES Sending Activity (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html) +// sets, see Monitoring Your Amazon SES Sending Activity (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html) // in the Amazon SES Developer Guide. // // You can execute this operation no more than once per second. This operation @@ -3112,7 +3112,7 @@ func (c *SES) ListCustomVerificationEmailTemplatesRequest(input *ListCustomVerif // the current AWS Region. // // For more information about custom verification email templates, see Using -// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// Custom Verification Email Templates (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) // in the Amazon SES Developer Guide. // // You can execute this operation no more than once per second. @@ -3156,7 +3156,7 @@ func (c *SES) ListCustomVerificationEmailTemplatesWithContext(ctx aws.Context, i // // Example iterating over at most 3 pages of a ListCustomVerificationEmailTemplates operation. // pageNum := 0 // err := client.ListCustomVerificationEmailTemplatesPages(params, -// func(page *ListCustomVerificationEmailTemplatesOutput, lastPage bool) bool { +// func(page *ses.ListCustomVerificationEmailTemplatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3188,10 +3188,12 @@ func (c *SES) ListCustomVerificationEmailTemplatesPagesWithContext(ctx aws.Conte }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListCustomVerificationEmailTemplatesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListCustomVerificationEmailTemplatesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3290,7 +3292,7 @@ func (c *SES) ListIdentitiesWithContext(ctx aws.Context, input *ListIdentitiesIn // // Example iterating over at most 3 pages of a ListIdentities operation. // pageNum := 0 // err := client.ListIdentitiesPages(params, -// func(page *ListIdentitiesOutput, lastPage bool) bool { +// func(page *ses.ListIdentitiesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3322,10 +3324,12 @@ func (c *SES) ListIdentitiesPagesWithContext(ctx aws.Context, input *ListIdentit }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListIdentitiesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListIdentitiesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3382,7 +3386,7 @@ func (c *SES) ListIdentityPoliciesRequest(input *ListIdentityPoliciesInput) (req // // Sending authorization is a feature that enables an identity owner to authorize // other senders to use its identities. For information about using sending -// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// authorization, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // You can execute this operation no more than once per second. // @@ -3462,7 +3466,7 @@ func (c *SES) ListReceiptFiltersRequest(input *ListReceiptFiltersInput) (req *re // AWS Region. // // For information about managing IP address filters, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). // // You can execute this operation no more than once per second. // @@ -3544,7 +3548,7 @@ func (c *SES) ListReceiptRuleSetsRequest(input *ListReceiptRuleSetsInput) (req * // to retrieve the additional entries. // // For information about managing receipt rule sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). // // You can execute this operation no more than once per second. // @@ -3728,6 +3732,89 @@ func (c *SES) ListVerifiedEmailAddressesWithContext(ctx aws.Context, input *List return out, req.Send() } +const opPutConfigurationSetDeliveryOptions = "PutConfigurationSetDeliveryOptions" + +// PutConfigurationSetDeliveryOptionsRequest generates a "aws/request.Request" representing the +// client's request for the PutConfigurationSetDeliveryOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutConfigurationSetDeliveryOptions for more information on using the PutConfigurationSetDeliveryOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutConfigurationSetDeliveryOptionsRequest method. +// req, resp := client.PutConfigurationSetDeliveryOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/PutConfigurationSetDeliveryOptions +func (c *SES) PutConfigurationSetDeliveryOptionsRequest(input *PutConfigurationSetDeliveryOptionsInput) (req *request.Request, output *PutConfigurationSetDeliveryOptionsOutput) { + op := &request.Operation{ + Name: opPutConfigurationSetDeliveryOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutConfigurationSetDeliveryOptionsInput{} + } + + output = &PutConfigurationSetDeliveryOptionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutConfigurationSetDeliveryOptions API operation for Amazon Simple Email Service. +// +// Adds or updates the delivery options for a configuration set. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation PutConfigurationSetDeliveryOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeInvalidDeliveryOptionsException "InvalidDeliveryOptions" +// Indicates that provided delivery option is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/PutConfigurationSetDeliveryOptions +func (c *SES) PutConfigurationSetDeliveryOptions(input *PutConfigurationSetDeliveryOptionsInput) (*PutConfigurationSetDeliveryOptionsOutput, error) { + req, out := c.PutConfigurationSetDeliveryOptionsRequest(input) + return out, req.Send() +} + +// PutConfigurationSetDeliveryOptionsWithContext is the same as PutConfigurationSetDeliveryOptions with the addition of +// the ability to pass a context and additional request options. +// +// See PutConfigurationSetDeliveryOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) PutConfigurationSetDeliveryOptionsWithContext(ctx aws.Context, input *PutConfigurationSetDeliveryOptionsInput, opts ...request.Option) (*PutConfigurationSetDeliveryOptionsOutput, error) { + req, out := c.PutConfigurationSetDeliveryOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutIdentityPolicy = "PutIdentityPolicy" // PutIdentityPolicyRequest generates a "aws/request.Request" representing the @@ -3781,7 +3868,7 @@ func (c *SES) PutIdentityPolicyRequest(input *PutIdentityPolicyInput) (req *requ // // Sending authorization is a feature that enables an identity owner to authorize // other senders to use its identities. For information about using sending -// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// authorization, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // You can execute this operation no more than once per second. // @@ -3871,7 +3958,7 @@ func (c *SES) ReorderReceiptRuleSetRequest(input *ReorderReceiptRuleSetInput) (r // position all of the rules. // // For information about managing receipt rule sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). // // You can execute this operation no more than once per second. // @@ -3963,7 +4050,7 @@ func (c *SES) SendBounceRequest(input *SendBounceInput) (req *request.Request, o // by Amazon SES. // // For information about receiving email through Amazon SES, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). // // You can execute this operation no more than once per second. // @@ -4059,7 +4146,7 @@ func (c *SES) SendBulkTemplatedEmailRequest(input *SendBulkTemplatedEmailInput) // * If your account is still in the Amazon SES sandbox, you may only send // to verified addresses or domains, or to email addresses associated with // the Amazon SES Mailbox Simulator. For more information, see Verifying -// Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// Email Addresses and Domains (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) // in the Amazon SES Developer Guide. // // * The maximum message size is 10 MB. @@ -4096,7 +4183,7 @@ func (c *SES) SendBulkTemplatedEmailRequest(input *SendBulkTemplatedEmailInput) // Indicates that the message could not be sent because Amazon SES could not // read the MX record required to use the specified MAIL FROM domain. For information // about editing the custom MAIL FROM domain settings for an identity, see the -// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). // // * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" // Indicates that the configuration set does not exist. @@ -4189,7 +4276,7 @@ func (c *SES) SendCustomVerificationEmailRequest(input *SendCustomVerificationEm // // To use this operation, you must first create a custom verification email // template. For more information about creating and using custom verification -// email templates, see Using Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// email templates, see Using Custom Verification Email Templates (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) // in the Amazon SES Developer Guide. // // You can execute this operation no more than once per second. @@ -4298,7 +4385,7 @@ func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, out // * If your account is still in the Amazon SES sandbox, you may only send // to verified addresses or domains, or to email addresses associated with // the Amazon SES Mailbox Simulator. For more information, see Verifying -// Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// Email Addresses and Domains (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) // in the Amazon SES Developer Guide. // // * The maximum message size is 10 MB. @@ -4319,7 +4406,7 @@ func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, out // each recipient in the To:, CC: and BCC: fields) is counted against the maximum // number of emails you can send in a 24-hour period (your sending quota). For // more information about sending quotas in Amazon SES, see Managing Your Amazon -// SES Sending Limits (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html) +// SES Sending Limits (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html) // in the Amazon SES Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4338,7 +4425,7 @@ func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, out // Indicates that the message could not be sent because Amazon SES could not // read the MX record required to use the specified MAIL FROM domain. For information // about editing the custom MAIL FROM domain settings for an identity, see the -// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). // // * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" // Indicates that the configuration set does not exist. @@ -4431,14 +4518,14 @@ func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Reques // // The SendRawEmail operation has the following requirements: // -// * You can only send email from verified email addresses or domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +// * You can only send email from verified email addresses or domains (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). // If you try to send email from an address that isn't verified, the operation // results in an "Email address not verified" error. // -// * If your account is still in the Amazon SES sandbox (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html), +// * If your account is still in the Amazon SES sandbox (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html), // you can only send email to other verified addresses in your account, or // to addresses that are associated with the Amazon SES mailbox simulator -// (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mailbox-simulator.html). +// (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mailbox-simulator.html). // // * The maximum message size, including attachments, is 10 MB. // @@ -4461,7 +4548,7 @@ func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Reques // of your message (for example, if you use open and click tracking), 8-bit // content isn't preserved. For this reason, we highly recommend that you // encode all content that isn't 7-bit ASCII. For more information, see MIME -// Encoding (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html#send-email-mime-encoding) +// Encoding (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html#send-email-mime-encoding) // in the Amazon SES Developer Guide. // // Additionally, keep the following considerations in mind when using the SendRawEmail @@ -4477,30 +4564,19 @@ func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Reques // the email's Source, From, and Return-Path parameters in one of two ways: // you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn // to the API, or you can include the following X-headers in the header of -// your raw email: -// -// X-SES-SOURCE-ARN -// -// X-SES-FROM-ARN -// -// X-SES-RETURN-PATH-ARN -// -// Do not include these X-headers in the DKIM signature; Amazon SES will remove -// them before sending the email. -// -// For most common sending authorization scenarios, we recommend that you specify -// the SourceIdentityArn parameter and not the FromIdentityArn or ReturnPathIdentityArn -// parameters. If you only specify the SourceIdentityArn parameter, Amazon -// SES will set the From and Return Path addresses to the identity specified -// in SourceIdentityArn. For more information about sending authorization, -// see the Using Sending Authorization with Amazon SES (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html) +// your raw email: X-SES-SOURCE-ARN X-SES-FROM-ARN X-SES-RETURN-PATH-ARN +// Don't include these X-headers in the DKIM signature. Amazon SES removes +// these before it sends the email. If you only specify the SourceIdentityArn +// parameter, Amazon SES sets the From and Return-Path addresses to the same +// identity that you specified. For more information about sending authorization, +// see the Using Sending Authorization with Amazon SES (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html) // in the Amazon SES Developer Guide. // // * For every message that you send, the total number of recipients (including // each recipient in the To:, CC: and BCC: fields) is counted against the // maximum number of emails you can send in a 24-hour period (your sending // quota). For more information about sending quotas in Amazon SES, see Managing -// Your Amazon SES Sending Limits (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html) +// Your Amazon SES Sending Limits (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html) // in the Amazon SES Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4519,7 +4595,7 @@ func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Reques // Indicates that the message could not be sent because Amazon SES could not // read the MX record required to use the specified MAIL FROM domain. For information // about editing the custom MAIL FROM domain settings for an identity, see the -// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). // // * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" // Indicates that the configuration set does not exist. @@ -4615,7 +4691,7 @@ func (c *SES) SendTemplatedEmailRequest(input *SendTemplatedEmailInput) (req *re // * If your account is still in the Amazon SES sandbox, you may only send // to verified addresses or domains, or to email addresses associated with // the Amazon SES Mailbox Simulator. For more information, see Verifying -// Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// Email Addresses and Domains (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) // in the Amazon SES Developer Guide. // // * The maximum message size is 10 MB. @@ -4640,7 +4716,7 @@ func (c *SES) SendTemplatedEmailRequest(input *SendTemplatedEmailInput) (req *re // // For these reasons, we highly recommend that you set up Amazon SES to send // you notifications when Rendering Failure events occur. For more information, -// see Sending Personalized Email Using the Amazon SES API (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html) +// see Sending Personalized Email Using the Amazon SES API (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html) // in the Amazon Simple Email Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4659,7 +4735,7 @@ func (c *SES) SendTemplatedEmailRequest(input *SendTemplatedEmailInput) (req *re // Indicates that the message could not be sent because Amazon SES could not // read the MX record required to use the specified MAIL FROM domain. For information // about editing the custom MAIL FROM domain settings for an identity, see the -// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). // // * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" // Indicates that the configuration set does not exist. @@ -4752,7 +4828,7 @@ func (c *SES) SetActiveReceiptRuleSetRequest(input *SetActiveReceiptRuleSetInput // this API with RuleSetName set to null. // // For information about managing receipt rule sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). // // You can execute this operation no more than once per second. // @@ -4834,24 +4910,24 @@ func (c *SES) SetIdentityDkimEnabledRequest(input *SetIdentityDkimEnabledInput) // SetIdentityDkimEnabled API operation for Amazon Simple Email Service. // -// Enables or disables Easy DKIM signing of email sent from an identity: -// -// * If Easy DKIM signing is enabled for a domain name identity (such as -// example.com), then Amazon SES will DKIM-sign all email sent by addresses -// under that domain name (for example, user@example.com). -// -// * If Easy DKIM signing is enabled for an email address, then Amazon SES -// will DKIM-sign all email sent by that email address. +// Enables or disables Easy DKIM signing of email sent from an identity. If +// Easy DKIM signing is enabled for a domain, then Amazon SES uses DKIM to sign +// all email that it sends from addresses on that domain. If Easy DKIM signing +// is enabled for an email address, then Amazon SES uses DKIM to sign all email +// it sends from that address. // // For email addresses (for example, user@example.com), you can only enable -// Easy DKIM signing if the corresponding domain (in this case, example.com) -// has been set up for Easy DKIM using the AWS Console or the VerifyDomainDkim -// operation. +// DKIM signing if the corresponding domain (in this case, example.com) has +// been set up to use Easy DKIM. +// +// You can enable DKIM signing for an identity at any time after you start the +// verification process for the identity, even if the verification process isn't +// complete. // // You can execute this operation no more than once per second. // // For more information about Easy DKIM signing, go to the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4937,7 +5013,7 @@ func (c *SES) SetIdentityFeedbackForwardingEnabledRequest(input *SetIdentityFeed // You can execute this operation no more than once per second. // // For more information about using notifications with Amazon SES, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5019,7 +5095,7 @@ func (c *SES) SetIdentityHeadersInNotificationsEnabledRequest(input *SetIdentity // You can execute this operation no more than once per second. // // For more information about using notifications with Amazon SES, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5100,7 +5176,7 @@ func (c *SES) SetIdentityMailFromDomainRequest(input *SetIdentityMailFromDomainI // To send emails using the specified MAIL FROM domain, you must add an MX record // to your MAIL FROM domain's DNS settings. If you want your emails to pass // Sender Policy Framework (SPF) checks, you must also add or update an SPF -// record. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-set.html). +// record. For more information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-set.html). // // You can execute this operation no more than once per second. // @@ -5187,7 +5263,7 @@ func (c *SES) SetIdentityNotificationTopicRequest(input *SetIdentityNotification // You can execute this operation no more than once per second. // // For more information about feedback notification, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5265,7 +5341,7 @@ func (c *SES) SetReceiptRulePositionRequest(input *SetReceiptRulePositionInput) // Sets the position of the specified receipt rule in the receipt rule set. // // For information about managing receipt rules, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). // // You can execute this operation no more than once per second. // @@ -5527,7 +5603,7 @@ func (c *SES) UpdateConfigurationSetEventDestinationRequest(input *UpdateConfigu // are associated with configuration sets, which enable you to publish email // sending events to Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple // Notification Service (Amazon SNS). For information about using configuration -// sets, see Monitoring Your Amazon SES Sending Activity (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html) +// sets, see Monitoring Your Amazon SES Sending Activity (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html) // in the Amazon SES Developer Guide. // // When you create or update an event destination, you must provide one, and @@ -5807,7 +5883,7 @@ func (c *SES) UpdateConfigurationSetTrackingOptionsRequest(input *UpdateConfigur // By default, images and links used for tracking open and click events are // hosted on domains operated by Amazon SES. You can configure a subdomain of // your own to handle these events. For information about using custom domains, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5901,7 +5977,7 @@ func (c *SES) UpdateCustomVerificationEmailTemplateRequest(input *UpdateCustomVe // Updates an existing custom verification email template. // // For more information about custom verification email templates, see Using -// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// Custom Verification Email Templates (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) // in the Amazon SES Developer Guide. // // You can execute this operation no more than once per second. @@ -5996,7 +6072,7 @@ func (c *SES) UpdateReceiptRuleRequest(input *UpdateReceiptRuleInput) (req *requ // Updates a receipt rule. // // For information about managing receipt rules, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). // // You can execute this operation no more than once per second. // @@ -6011,19 +6087,19 @@ func (c *SES) UpdateReceiptRuleRequest(input *UpdateReceiptRuleInput) (req *requ // * ErrCodeInvalidSnsTopicException "InvalidSnsTopic" // Indicates that the provided Amazon SNS topic is invalid, or that Amazon SES // could not publish to the topic, possibly due to permissions issues. For information -// about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// about giving permissions, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // * ErrCodeInvalidS3ConfigurationException "InvalidS3Configuration" // Indicates that the provided Amazon S3 bucket or AWS KMS encryption key is // invalid, or that Amazon SES could not publish to the bucket, possibly due // to permissions issues. For information about giving permissions, see the -// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // * ErrCodeInvalidLambdaFunctionException "InvalidLambdaFunction" // Indicates that the provided AWS Lambda function is invalid, or that Amazon // SES could not execute the provided function, possibly due to permissions // issues. For information about giving permissions, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" // Indicates that the provided receipt rule set does not exist. @@ -6033,7 +6109,7 @@ func (c *SES) UpdateReceiptRuleRequest(input *UpdateReceiptRuleInput) (req *requ // // * ErrCodeLimitExceededException "LimitExceeded" // Indicates that a resource could not be created because of service limits. -// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateReceiptRule func (c *SES) UpdateReceiptRule(input *UpdateReceiptRuleInput) (*UpdateReceiptRuleOutput, error) { @@ -6104,7 +6180,7 @@ func (c *SES) UpdateTemplateRequest(input *UpdateTemplateInput) (req *request.Re // // Updates an email template. Email templates enable you to send personalized // email to one or more destinations in a single API operation. For more information, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). // // You can execute this operation no more than once per second. // @@ -6190,21 +6266,35 @@ func (c *SES) VerifyDomainDkimRequest(input *VerifyDomainDkimInput) (req *reques // VerifyDomainDkim API operation for Amazon Simple Email Service. // -// Returns a set of DKIM tokens for a domain. DKIM tokens are character strings -// that represent your domain's identity. Using these tokens, you will need -// to create DNS CNAME records that point to DKIM public keys hosted by Amazon -// SES. Amazon Web Services will eventually detect that you have updated your -// DNS records; this detection process may take up to 72 hours. Upon successful -// detection, Amazon SES will be able to DKIM-sign email originating from that -// domain. +// Returns a set of DKIM tokens for a domain identity. // -// You can execute this operation no more than once per second. +// When you execute the VerifyDomainDkim operation, the domain that you specify +// is added to the list of identities that are associated with your account. +// This is true even if you haven't already associated the domain with your +// account by using the VerifyDomainIdentity operation. However, you can't send +// email from the domain until you either successfully verify it (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-domains.html) +// or you successfully set up DKIM for it (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). // -// To enable or disable Easy DKIM signing for a domain, use the SetIdentityDkimEnabled -// operation. +// You use the tokens that are generated by this operation to create CNAME records. +// When Amazon SES detects that you've added these records to the DNS configuration +// for a domain, you can start sending email from that domain. You can start +// sending email even if you haven't added the TXT record provided by the VerifyDomainIdentity +// operation to the DNS configuration for your domain. All email that you send +// from the domain is authenticated using DKIM. // -// For more information about creating DNS records using DKIM tokens, go to -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +// To create the CNAME records for DKIM authentication, use the following values: +// +// * Name: token._domainkey.example.com +// +// * Type: CNAME +// +// * Value: token.dkim.amazonses.com +// +// In the preceding example, replace token with one of the tokens that are generated +// when you execute this operation. Replace example.com with your domain. Repeat +// this process for each token that's generated by this operation. +// +// You can execute this operation no more than once per second. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6280,7 +6370,7 @@ func (c *SES) VerifyDomainIdentityRequest(input *VerifyDomainIdentityInput) (req // // Adds a domain to the list of identities for your Amazon SES account in the // current AWS Region and attempts to verify it. For more information about -// verifying domains, see Verifying Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// verifying domains, see Verifying Email Addresses and Domains (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) // in the Amazon SES Developer Guide. // // You can execute this operation no more than once per second. @@ -6471,7 +6561,7 @@ func (c *SES) VerifyEmailIdentityWithContext(ctx aws.Context, input *VerifyEmail // email. // // For information about adding a header using a receipt rule, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-add-header.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-add-header.html). type AddHeaderAction struct { _ struct{} `type:"structure"` @@ -6589,7 +6679,7 @@ func (s *Body) SetText(v *Content) *Body { // to Amazon Simple Notification Service (Amazon SNS). // // For information about sending a bounce message in response to a received -// email, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-bounce.html). +// email, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-bounce.html). type BounceAction struct { _ struct{} `type:"structure"` @@ -6615,7 +6705,7 @@ type BounceAction struct { // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the // bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. // For more information about Amazon SNS topics, see the Amazon SNS Developer - // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // Guide (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). TopicArn *string `type:"string"` } @@ -6682,7 +6772,7 @@ func (s *BounceAction) SetTopicArn(v string) *BounceAction { // (DSN) when an email that Amazon SES receives on your behalf bounces. // // For information about receiving email through Amazon SES, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). type BouncedRecipientInfo struct { _ struct{} `type:"structure"` @@ -6697,7 +6787,7 @@ type BouncedRecipientInfo struct { // This parameter is used only for sending authorization. It is the ARN of the // identity that is associated with the sending authorization policy that permits // you to receive email for the recipient of the bounced email. For more information - // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // about sending authorization, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). RecipientArn *string `type:"string"` // Recipient-related DSN fields, most of which would normally be filled in automatically @@ -6927,7 +7017,7 @@ func (s *BulkEmailDestinationStatus) SetStatus(v string) *BulkEmailDestinationSt // Represents a request to create a receipt rule set by cloning an existing // one. You use receipt rule sets to receive email with Amazon SES. For more -// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type CloneReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -7007,7 +7097,7 @@ func (s CloneReceiptRuleSetOutput) GoString() string { // // Event destinations, such as Amazon CloudWatch, are associated with configuration // sets, which enable you to publish email sending events. For information about -// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// using configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type CloudWatchDestination struct { _ struct{} `type:"structure"` @@ -7061,7 +7151,7 @@ func (s *CloudWatchDestination) SetDimensionConfigurations(v []*CloudWatchDimens // events to Amazon CloudWatch. // // For information about publishing email sending events to Amazon CloudWatch, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type CloudWatchDimensionConfiguration struct { _ struct{} `type:"structure"` @@ -7149,8 +7239,8 @@ func (s *CloudWatchDimensionConfiguration) SetDimensionValueSource(v string) *Cl // // Configuration sets let you create groups of rules that you can apply to the // emails you send using Amazon SES. For more information about using configuration -// sets, see Using Amazon SES Configuration Sets (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-configuration-sets.html) -// in the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/). +// sets, see Using Amazon SES Configuration Sets (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-configuration-sets.html) +// in the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/). type ConfigurationSet struct { _ struct{} `type:"structure"` @@ -7250,7 +7340,7 @@ func (s *Content) SetData(v string) *Content { // set event destination, which can be either Amazon CloudWatch or Amazon Kinesis // Firehose, describes an AWS service in which Amazon SES publishes the email // sending events associated with a configuration set. For information about -// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// using configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type CreateConfigurationSetEventDestinationInput struct { _ struct{} `type:"structure"` @@ -7327,7 +7417,7 @@ func (s CreateConfigurationSetEventDestinationOutput) GoString() string { // Represents a request to create a configuration set. Configuration sets enable // you to publish email sending events. For information about using configuration -// sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type CreateConfigurationSetInput struct { _ struct{} `type:"structure"` @@ -7402,7 +7492,7 @@ type CreateConfigurationSetTrackingOptionsInput struct { // emails. // // For more information, see Configuring Custom Domains to Handle Open and Click - // Tracking (ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) + // Tracking (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) // in the Amazon SES Developer Guide. // // TrackingOptions is a required field @@ -7486,7 +7576,7 @@ type CreateCustomVerificationEmailTemplateInput struct { // The content of the custom verification email. The total size of the email // must be less than 10 MB. The message body may contain HTML, with some limitations. // For more information, see Custom Verification Email Frequently Asked Questions - // (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html#custom-verification-emails-faq) + // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html#custom-verification-emails-faq) // in the Amazon SES Developer Guide. // // TemplateContent is a required field @@ -7593,7 +7683,7 @@ func (s CreateCustomVerificationEmailTemplateOutput) GoString() string { // Represents a request to create a new IP address filter. You use IP address // filters when you receive email with Amazon SES. For more information, see -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type CreateReceiptFilterInput struct { _ struct{} `type:"structure"` @@ -7655,7 +7745,7 @@ func (s CreateReceiptFilterOutput) GoString() string { // Represents a request to create a receipt rule. You use receipt rules to receive // email with Amazon SES. For more information, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type CreateReceiptRuleInput struct { _ struct{} `type:"structure"` @@ -7742,7 +7832,7 @@ func (s CreateReceiptRuleOutput) GoString() string { // Represents a request to create an empty receipt rule set. You use receipt // rule sets to receive email with Amazon SES. For more information, see the -// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type CreateReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -7804,7 +7894,7 @@ func (s CreateReceiptRuleSetOutput) GoString() string { } // Represents a request to create an email template. For more information, see -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). type CreateTemplateInput struct { _ struct{} `type:"structure"` @@ -7928,7 +8018,7 @@ func (s *CustomVerificationEmailTemplate) SetTemplateSubject(v string) *CustomVe // Represents a request to delete a configuration set event destination. Configuration // set event destinations are associated with configuration sets, which enable // you to publish email sending events. For information about using configuration -// sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type DeleteConfigurationSetEventDestinationInput struct { _ struct{} `type:"structure"` @@ -7998,7 +8088,7 @@ func (s DeleteConfigurationSetEventDestinationOutput) GoString() string { // Represents a request to delete a configuration set. Configuration sets enable // you to publish email sending events. For information about using configuration -// sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type DeleteConfigurationSetInput struct { _ struct{} `type:"structure"` @@ -8219,7 +8309,7 @@ func (s DeleteIdentityOutput) GoString() string { // Represents a request to delete a sending authorization policy for an identity. // Sending authorization is an Amazon SES feature that enables you to authorize // other senders to use your identities. For information, see the Amazon SES -// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). type DeleteIdentityPolicyInput struct { _ struct{} `type:"structure"` @@ -8296,7 +8386,7 @@ func (s DeleteIdentityPolicyOutput) GoString() string { // Represents a request to delete an IP address filter. You use IP address filters // when you receive email with Amazon SES. For more information, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DeleteReceiptFilterInput struct { _ struct{} `type:"structure"` @@ -8352,7 +8442,7 @@ func (s DeleteReceiptFilterOutput) GoString() string { // Represents a request to delete a receipt rule. You use receipt rules to receive // email with Amazon SES. For more information, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DeleteReceiptRuleInput struct { _ struct{} `type:"structure"` @@ -8422,7 +8512,7 @@ func (s DeleteReceiptRuleOutput) GoString() string { // Represents a request to delete a receipt rule set and all of the receipt // rules it contains. You use receipt rule sets to receive email with Amazon -// SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// SES. For more information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DeleteReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -8477,7 +8567,7 @@ func (s DeleteReceiptRuleSetOutput) GoString() string { } // Represents a request to delete an email template. For more information, see -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). type DeleteTemplateInput struct { _ struct{} `type:"structure"` @@ -8584,10 +8674,38 @@ func (s DeleteVerifiedEmailAddressOutput) GoString() string { return s.String() } +// Specifies whether messages that use the configuration set are required to +// use Transport Layer Security (TLS). +type DeliveryOptions struct { + _ struct{} `type:"structure"` + + // Specifies whether messages that use the configuration set are required to + // use Transport Layer Security (TLS). If the value is Require, messages are + // only delivered if a TLS connection can be established. If the value is Optional, + // messages can be delivered in plain text if a TLS connection can't be established. + TlsPolicy *string `type:"string" enum:"TlsPolicy"` +} + +// String returns the string representation +func (s DeliveryOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliveryOptions) GoString() string { + return s.String() +} + +// SetTlsPolicy sets the TlsPolicy field's value. +func (s *DeliveryOptions) SetTlsPolicy(v string) *DeliveryOptions { + s.TlsPolicy = &v + return s +} + // Represents a request to return the metadata and receipt rules for the receipt // rule set that is currently active. You use receipt rule sets to receive email // with Amazon SES. For more information, see the Amazon SES Developer Guide -// (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DescribeActiveReceiptRuleSetInput struct { _ struct{} `type:"structure"` } @@ -8639,7 +8757,7 @@ func (s *DescribeActiveReceiptRuleSetOutput) SetRules(v []*ReceiptRule) *Describ // Represents a request to return the details of a configuration set. Configuration // sets enable you to publish email sending events. For information about using -// configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type DescribeConfigurationSetInput struct { _ struct{} `type:"structure"` @@ -8689,7 +8807,7 @@ func (s *DescribeConfigurationSetInput) SetConfigurationSetName(v string) *Descr // Represents the details of a configuration set. Configuration sets enable // you to publish email sending events. For information about using configuration -// sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type DescribeConfigurationSetOutput struct { _ struct{} `type:"structure"` @@ -8697,6 +8815,10 @@ type DescribeConfigurationSetOutput struct { // set. ConfigurationSet *ConfigurationSet `type:"structure"` + // Specifies whether messages that use the configuration set are required to + // use Transport Layer Security (TLS). + DeliveryOptions *DeliveryOptions `type:"structure"` + // A list of event destinations associated with the configuration set. EventDestinations []*EventDestination `type:"list"` @@ -8724,6 +8846,12 @@ func (s *DescribeConfigurationSetOutput) SetConfigurationSet(v *ConfigurationSet return s } +// SetDeliveryOptions sets the DeliveryOptions field's value. +func (s *DescribeConfigurationSetOutput) SetDeliveryOptions(v *DeliveryOptions) *DescribeConfigurationSetOutput { + s.DeliveryOptions = v + return s +} + // SetEventDestinations sets the EventDestinations field's value. func (s *DescribeConfigurationSetOutput) SetEventDestinations(v []*EventDestination) *DescribeConfigurationSetOutput { s.EventDestinations = v @@ -8744,7 +8872,7 @@ func (s *DescribeConfigurationSetOutput) SetTrackingOptions(v *TrackingOptions) // Represents a request to return the details of a receipt rule. You use receipt // rules to receive email with Amazon SES. For more information, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DescribeReceiptRuleInput struct { _ struct{} `type:"structure"` @@ -8825,7 +8953,7 @@ func (s *DescribeReceiptRuleOutput) SetRule(v *ReceiptRule) *DescribeReceiptRule // Represents a request to return the details of a receipt rule set. You use // receipt rule sets to receive email with Amazon SES. For more information, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DescribeReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -8911,13 +9039,13 @@ func (s *DescribeReceiptRuleSetOutput) SetRules(v []*ReceiptRule) *DescribeRecei type Destination struct { _ struct{} `type:"structure"` - // The BCC: field(s) of the message. + // The recipients to place on the BCC: line of the message. BccAddresses []*string `type:"list"` - // The CC: field(s) of the message. + // The recipients to place on the CC: line of the message. CcAddresses []*string `type:"list"` - // The To: field(s) of the message. + // The recipients to place on the To: line of the message. ToAddresses []*string `type:"list"` } @@ -8959,7 +9087,7 @@ func (s *Destination) SetToAddresses(v []*string) *Destination { // Event destinations are associated with configuration sets, which enable you // to publish email sending events to Amazon CloudWatch, Amazon Kinesis Firehose, // or Amazon Simple Notification Service (Amazon SNS). For information about -// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// using configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type EventDestination struct { _ struct{} `type:"structure"` @@ -9078,7 +9206,7 @@ func (s *EventDestination) SetSNSDestination(v *SNSDestination) *EventDestinatio // when an email that Amazon SES receives on your behalf bounces. // // For information about receiving email through Amazon SES, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). type ExtensionField struct { _ struct{} `type:"structure"` @@ -9287,7 +9415,7 @@ func (s *GetCustomVerificationEmailTemplateOutput) SetTemplateSubject(v string) // identity. For domain identities, this request also returns the DKIM tokens // that are required for Easy DKIM signing, and whether Amazon SES successfully // verified that these tokens were published. For more information about Easy -// DKIM, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +// DKIM, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). type GetIdentityDkimAttributesInput struct { _ struct{} `type:"structure"` @@ -9358,7 +9486,7 @@ func (s *GetIdentityDkimAttributesOutput) SetDkimAttributes(v map[string]*Identi // Represents a request to return the Amazon SES custom MAIL FROM attributes // for a list of identities. For information about using a custom MAIL FROM -// domain, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). +// domain, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). type GetIdentityMailFromDomainAttributesInput struct { _ struct{} `type:"structure"` @@ -9425,7 +9553,7 @@ func (s *GetIdentityMailFromDomainAttributesOutput) SetMailFromDomainAttributes( // Represents a request to return the notification attributes for a list of // identities you verified with Amazon SES. For information about Amazon SES -// notifications, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +// notifications, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). type GetIdentityNotificationAttributesInput struct { _ struct{} `type:"structure"` @@ -9495,7 +9623,7 @@ func (s *GetIdentityNotificationAttributesOutput) SetNotificationAttributes(v ma // Represents a request to return the requested sending authorization policies // for an identity. Sending authorization is an Amazon SES feature that enables // you to authorize other senders to use your identities. For information, see -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). type GetIdentityPoliciesInput struct { _ struct{} `type:"structure"` @@ -9583,7 +9711,7 @@ func (s *GetIdentityPoliciesOutput) SetPolicies(v map[string]*string) *GetIdenti // Represents a request to return the Amazon SES verification status of a list // of identities. For domain identities, this request also returns the verification // token. For information about verifying identities with Amazon SES, see the -// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +// Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). type GetIdentityVerificationAttributesInput struct { _ struct{} `type:"structure"` @@ -9816,22 +9944,22 @@ func (s *GetTemplateOutput) SetTemplate(v *Template) *GetTemplateOutput { type IdentityDkimAttributes struct { _ struct{} `type:"structure"` - // True if DKIM signing is enabled for email sent from the identity; false otherwise. - // The default value is true. + // Is true if DKIM signing is enabled for email sent from the identity. It's + // false otherwise. The default value is true. // // DkimEnabled is a required field DkimEnabled *bool `type:"boolean" required:"true"` // A set of character strings that represent the domain's identity. Using these - // tokens, you will need to create DNS CNAME records that point to DKIM public - // keys hosted by Amazon SES. Amazon Web Services will eventually detect that - // you have updated your DNS records; this detection process may take up to - // 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign - // email originating from that domain. (This only applies to domain identities, - // not email address identities.) - // - // For more information about creating DNS records using DKIM tokens, go to - // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + // tokens, you need to create DNS CNAME records that point to DKIM public keys + // that are hosted by Amazon SES. Amazon Web Services eventually detects that + // you've updated your DNS records. This detection process might take up to + // 72 hours. After successful detection, Amazon SES is able to DKIM-sign email + // originating from that domain. (This only applies to domain identities, not + // email address identities.) + // + // For more information about creating DNS records using DKIM tokens, see the + // Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). DkimTokens []*string `type:"list"` // Describes whether Amazon SES has successfully verified the DKIM DNS records @@ -10077,7 +10205,7 @@ func (s *IdentityVerificationAttributes) SetVerificationToken(v string) *Identit // Event destinations, such as Amazon Kinesis Firehose, are associated with // configuration sets, which enable you to publish email sending events. For // information about using configuration sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type KinesisFirehoseDestination struct { _ struct{} `type:"structure"` @@ -10139,17 +10267,17 @@ func (s *KinesisFirehoseDestination) SetIAMRoleARN(v string) *KinesisFirehoseDes // To enable Amazon SES to call your AWS Lambda function or to publish to an // Amazon SNS topic of another account, Amazon SES must have permission to access // those resources. For information about giving permissions, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // For information about using AWS Lambda actions in receipt rules, see the -// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-lambda.html). +// Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-lambda.html). type LambdaAction struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the AWS Lambda function. An example of // an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction. // For more information about AWS Lambda, see the AWS Lambda Developer Guide - // (http://docs.aws.amazon.com/lambda/latest/dg/welcome.html). + // (https://docs.aws.amazon.com/lambda/latest/dg/welcome.html). // // FunctionArn is a required field FunctionArn *string `type:"string" required:"true"` @@ -10158,7 +10286,7 @@ type LambdaAction struct { // means that the execution of the function will immediately result in a response, // and a value of Event means that the function will be invoked asynchronously. // The default value is Event. For information about AWS Lambda invocation types, - // see the AWS Lambda Developer Guide (http://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html). + // see the AWS Lambda Developer Guide (https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html). // // There is a 30-second timeout on RequestResponse invocations. You should use // Event invocation in most cases. Use RequestResponse only when you want to @@ -10169,7 +10297,7 @@ type LambdaAction struct { // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the // Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. // For more information about Amazon SNS topics, see the Amazon SNS Developer - // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // Guide (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). TopicArn *string `type:"string"` } @@ -10217,7 +10345,7 @@ func (s *LambdaAction) SetTopicArn(v string) *LambdaAction { // Represents a request to list the configuration sets associated with your // AWS account. Configuration sets enable you to publish email sending events. // For information about using configuration sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type ListConfigurationSetsInput struct { _ struct{} `type:"structure"` @@ -10253,7 +10381,7 @@ func (s *ListConfigurationSetsInput) SetNextToken(v string) *ListConfigurationSe // A list of configuration sets associated with your AWS account. Configuration // sets enable you to publish email sending events. For information about using -// configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type ListConfigurationSetsOutput struct { _ struct{} `type:"structure"` @@ -10291,7 +10419,7 @@ func (s *ListConfigurationSetsOutput) SetNextToken(v string) *ListConfigurationS // for your account. // // For more information about custom verification email templates, see Using -// Custom Verification Email Templates (ses/latest/DeveloperGuide/custom-verification-emails.html) +// Custom Verification Email Templates (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) // in the Amazon SES Developer Guide. type ListCustomVerificationEmailTemplatesInput struct { _ struct{} `type:"structure"` @@ -10461,7 +10589,7 @@ func (s *ListIdentitiesOutput) SetNextToken(v string) *ListIdentitiesOutput { // Represents a request to return a list of sending authorization policies that // are attached to an identity. Sending authorization is an Amazon SES feature // that enables you to authorize other senders to use your identities. For information, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). type ListIdentityPoliciesInput struct { _ struct{} `type:"structure"` @@ -10532,7 +10660,7 @@ func (s *ListIdentityPoliciesOutput) SetPolicyNames(v []*string) *ListIdentityPo // Represents a request to list the IP address filters that exist under your // AWS account. You use IP address filters when you receive email with Amazon -// SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// SES. For more information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type ListReceiptFiltersInput struct { _ struct{} `type:"structure"` } @@ -10574,7 +10702,7 @@ func (s *ListReceiptFiltersOutput) SetFilters(v []*ReceiptFilter) *ListReceiptFi // Represents a request to list the receipt rule sets that exist under your // AWS account. You use receipt rule sets to receive email with Amazon SES. -// For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// For more information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type ListReceiptRuleSetsInput struct { _ struct{} `type:"structure"` @@ -10813,7 +10941,7 @@ func (s *Message) SetSubject(v *Content) *Message { // (DSN) when an email that Amazon SES receives on your behalf bounces. // // For information about receiving email through Amazon SES, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). type MessageDsn struct { _ struct{} `type:"structure"` @@ -10888,7 +11016,7 @@ func (s *MessageDsn) SetReportingMta(v string) *MessageDsn { // // Message tags, which you use with configuration sets, enable you to publish // email sending events. For information about using configuration sets, see -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type MessageTag struct { _ struct{} `type:"structure"` @@ -10951,10 +11079,76 @@ func (s *MessageTag) SetValue(v string) *MessageTag { return s } +// A request to modify the delivery options for a configuration set. +type PutConfigurationSetDeliveryOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to specify the delivery options + // for. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` + + // Specifies whether messages that use the configuration set are required to + // use Transport Layer Security (TLS). + DeliveryOptions *DeliveryOptions `type:"structure"` +} + +// String returns the string representation +func (s PutConfigurationSetDeliveryOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigurationSetDeliveryOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutConfigurationSetDeliveryOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutConfigurationSetDeliveryOptionsInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *PutConfigurationSetDeliveryOptionsInput) SetConfigurationSetName(v string) *PutConfigurationSetDeliveryOptionsInput { + s.ConfigurationSetName = &v + return s +} + +// SetDeliveryOptions sets the DeliveryOptions field's value. +func (s *PutConfigurationSetDeliveryOptionsInput) SetDeliveryOptions(v *DeliveryOptions) *PutConfigurationSetDeliveryOptionsInput { + s.DeliveryOptions = v + return s +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutConfigurationSetDeliveryOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutConfigurationSetDeliveryOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigurationSetDeliveryOptionsOutput) GoString() string { + return s.String() +} + // Represents a request to add or update a sending authorization policy for // an identity. Sending authorization is an Amazon SES feature that enables // you to authorize other senders to use your identities. For information, see -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). type PutIdentityPolicyInput struct { _ struct{} `type:"structure"` @@ -10970,7 +11164,7 @@ type PutIdentityPolicyInput struct { // The text of the policy in JSON format. The policy cannot exceed 4 KB. // // For information about the syntax of sending authorization policies, see the - // Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policies.html). + // Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policies.html). // // Policy is a required field Policy *string `min:"1" type:"string" required:"true"` @@ -11072,7 +11266,7 @@ type RawMessage struct { // Do not include these X-headers in the DKIM signature, because they are removed // by Amazon SES before sending the email. // - // For more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). + // For more information, go to the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). // // Data is automatically base64 encoded/decoded by the SDK. // @@ -11114,7 +11308,7 @@ func (s *RawMessage) SetData(v []byte) *RawMessage { // data type can represent only one action. // // For information about setting up receipt rules, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). type ReceiptAction struct { _ struct{} `type:"structure"` @@ -11247,7 +11441,7 @@ func (s *ReceiptAction) SetWorkmailAction(v *WorkmailAction) *ReceiptAction { // mail originating from an IP address or range of IP addresses. // // For information about setting up IP address filters, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). type ReceiptFilter struct { _ struct{} `type:"structure"` @@ -11317,7 +11511,7 @@ func (s *ReceiptFilter) SetName(v string) *ReceiptFilter { // mail originating from an IP address or range of IP addresses. // // For information about setting up IP address filters, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). type ReceiptIpFilter struct { _ struct{} `type:"structure"` @@ -11383,7 +11577,7 @@ func (s *ReceiptIpFilter) SetPolicy(v string) *ReceiptIpFilter { // the message. // // For information about setting up receipt rules, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). type ReceiptRule struct { _ struct{} `type:"structure"` @@ -11497,7 +11691,7 @@ func (s *ReceiptRule) SetTlsPolicy(v string) *ReceiptRule { // should do with mail it receives on behalf of your account's verified domains. // // For information about setting up receipt rule sets, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). type ReceiptRuleSetMetadata struct { _ struct{} `type:"structure"` @@ -11541,7 +11735,7 @@ func (s *ReceiptRuleSetMetadata) SetName(v string) *ReceiptRuleSetMetadata { // (DSN) when an email that Amazon SES receives on your behalf bounces. // // For information about receiving email through Amazon SES, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). type RecipientDsnFields struct { _ struct{} `type:"structure"` @@ -11667,7 +11861,7 @@ func (s *RecipientDsnFields) SetStatus(v string) *RecipientDsnFields { // Represents a request to reorder the receipt rules within a receipt rule set. // You use receipt rule sets to receive email with Amazon SES. For more information, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type ReorderReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -11801,13 +11995,13 @@ func (s *ReputationOptions) SetSendingEnabled(v bool) *ReputationOptions { // To enable Amazon SES to write emails to your Amazon S3 bucket, use an AWS // KMS key to encrypt your emails, or publish to an Amazon SNS topic of another // account, Amazon SES must have permission to access those resources. For information -// about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// about giving permissions, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // When you save your emails to an Amazon S3 bucket, the maximum email size // (including headers) is 30 MB. Emails larger than that will bounce. // // For information about specifying Amazon S3 actions in receipt rules, see -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-s3.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-s3.html). type S3Action struct { _ struct{} `type:"structure"` @@ -11830,10 +12024,10 @@ type S3Action struct { // * To use a custom master key you created in AWS KMS, provide the ARN of // the master key and ensure that you add a statement to your key's policy // to give Amazon SES permission to use it. For more information about giving - // permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + // permissions, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // For more information about key policies, see the AWS KMS Developer Guide - // (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html). If + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html). If // you do not specify a master key, Amazon SES will not encrypt your emails. // // Your mail is encrypted by Amazon SES using the Amazon S3 encryption client @@ -11844,7 +12038,7 @@ type S3Action struct { // This encryption client is currently available with the AWS SDK for Java (http://aws.amazon.com/sdk-for-java/) // and AWS SDK for Ruby (http://aws.amazon.com/sdk-for-ruby/) only. For more // information about client-side encryption using AWS KMS master keys, see the - // Amazon S3 Developer Guide (http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). + // Amazon S3 Developer Guide (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). KmsKeyArn *string `type:"string"` // The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory @@ -11855,7 +12049,7 @@ type S3Action struct { // The ARN of the Amazon SNS topic to notify when the message is saved to the // Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. // For more information about Amazon SNS topics, see the Amazon SNS Developer - // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // Guide (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). TopicArn *string `type:"string"` } @@ -11916,14 +12110,14 @@ func (s *S3Action) SetTopicArn(v string) *S3Action { // SES permission to publish emails to it. However, if you don't own the Amazon // SNS topic, you need to attach a policy to the topic to give Amazon SES permissions // to access it. For information about giving permissions, see the Amazon SES -// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // You can only publish emails that are 150 KB or less (including the header) // to Amazon SNS. Larger emails will bounce. If you anticipate emails larger // than 150 KB, use the S3 action instead. // // For information about using a receipt rule to publish an Amazon SNS notification, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-sns.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-sns.html). type SNSAction struct { _ struct{} `type:"structure"` @@ -11936,7 +12130,7 @@ type SNSAction struct { // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example // of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. // For more information about Amazon SNS topics, see the Amazon SNS Developer - // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // Guide (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). // // TopicArn is a required field TopicArn *string `type:"string" required:"true"` @@ -11982,14 +12176,14 @@ func (s *SNSAction) SetTopicArn(v string) *SNSAction { // // Event destinations, such as Amazon SNS, are associated with configuration // sets, which enable you to publish email sending events. For information about -// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// using configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type SNSDestination struct { _ struct{} `type:"structure"` // The ARN of the Amazon SNS topic that email sending events will be published // to. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. // For more information about Amazon SNS topics, see the Amazon SNS Developer - // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // Guide (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). // // TopicARN is a required field TopicARN *string `type:"string" required:"true"` @@ -12038,7 +12232,7 @@ type SendBounceInput struct { // This parameter is used only for sending authorization. It is the ARN of the // identity that is associated with the sending authorization policy that permits // you to use the address in the "From" header of the bounce. For more information - // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // about sending authorization, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). BounceSenderArn *string `type:"string"` // A list of recipients of the bounced message, including the information required @@ -12168,7 +12362,7 @@ func (s *SendBounceOutput) SetMessageId(v string) *SendBounceOutput { } // Represents a request to send a templated email to multiple destinations using -// Amazon SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// Amazon SES. For more information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). type SendBulkTemplatedEmailInput struct { _ struct{} `type:"structure"` @@ -12217,18 +12411,18 @@ type SendBulkTemplatedEmailInput struct { // and the ReturnPath to be feedback@example.com. // // For more information about sending authorization, see the Amazon SES Developer - // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). ReturnPathArn *string `type:"string"` // The email address that is sending the email. This email address must be either // individually verified with Amazon SES, or from a domain that has been verified // with Amazon SES. For information about verifying identities, see the Amazon - // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). // // If you are sending on behalf of another user and have been permitted to do // so by a sending authorization policy, then you must also specify the SourceArn // parameter. For more information about sending authorization, see the Amazon - // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 // (https://tools.ietf.org/html/rfc6531). For this reason, the local part of @@ -12254,7 +12448,7 @@ type SendBulkTemplatedEmailInput struct { // and the Source to be user@example.com. // // For more information about sending authorization, see the Amazon SES Developer - // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). SourceArn *string `type:"string"` // The template to use when sending this email. @@ -12556,7 +12750,7 @@ func (s *SendDataPoint) SetTimestamp(v time.Time) *SendDataPoint { } // Represents a request to send a single formatted email using Amazon SES. For -// more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-formatted.html). +// more information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-formatted.html). type SendEmailInput struct { _ struct{} `type:"structure"` @@ -12596,18 +12790,18 @@ type SendEmailInput struct { // and the ReturnPath to be feedback@example.com. // // For more information about sending authorization, see the Amazon SES Developer - // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). ReturnPathArn *string `type:"string"` // The email address that is sending the email. This email address must be either // individually verified with Amazon SES, or from a domain that has been verified // with Amazon SES. For information about verifying identities, see the Amazon - // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). // // If you are sending on behalf of another user and have been permitted to do // so by a sending authorization policy, then you must also specify the SourceArn // parameter. For more information about sending authorization, see the Amazon - // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 // (https://tools.ietf.org/html/rfc6531). For this reason, the local part of @@ -12633,7 +12827,7 @@ type SendEmailInput struct { // and the Source to be user@example.com. // // For more information about sending authorization, see the Amazon SES Developer - // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). SourceArn *string `type:"string"` // A list of tags, in the form of name/value pairs, to apply to an email that @@ -12767,7 +12961,7 @@ func (s *SendEmailOutput) SetMessageId(v string) *SendEmailOutput { } // Represents a request to send a single raw email using Amazon SES. For more -// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). +// information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). type SendRawEmailInput struct { _ struct{} `type:"structure"` @@ -12787,7 +12981,7 @@ type SendRawEmailInput struct { // the corresponding X-header, Amazon SES uses the value of the FromArn parameter. // // For information about when to use this parameter, see the description of - // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). FromArn *string `type:"string"` // The raw email message itself. The message has to meet the following criteria: @@ -12801,14 +12995,14 @@ type SendRawEmailInput struct { // // * Attachments must be of a content type that Amazon SES supports. For // a list on unsupported content types, see Unsupported Attachment Types - // (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html) + // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html) // in the Amazon SES Developer Guide. // // * The entire message must be base64-encoded. // // * If any of the MIME parts in your message contain content that is outside // of the 7-bit ASCII character range, we highly recommend that you encode - // that content. For more information, see Sending Raw Email (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html) + // that content. For more information, see Sending Raw Email (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html) // in the Amazon SES Developer Guide. // // * Per RFC 5321 (https://tools.ietf.org/html/rfc5321#section-4.5.3.1.6), @@ -12833,7 +13027,7 @@ type SendRawEmailInput struct { // parameter. // // For information about when to use this parameter, see the description of - // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). ReturnPathArn *string `type:"string"` // The identity's email address. If you do not provide a value for this parameter, @@ -12872,7 +13066,7 @@ type SendRawEmailInput struct { // parameter. // // For information about when to use this parameter, see the description of - // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). SourceArn *string `type:"string"` // A list of tags, in the form of name/value pairs, to apply to an email that @@ -12994,7 +13188,7 @@ func (s *SendRawEmailOutput) SetMessageId(v string) *SendRawEmailOutput { } // Represents a request to send a templated email using Amazon SES. For more -// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). type SendTemplatedEmailInput struct { _ struct{} `type:"structure"` @@ -13030,18 +13224,18 @@ type SendTemplatedEmailInput struct { // and the ReturnPath to be feedback@example.com. // // For more information about sending authorization, see the Amazon SES Developer - // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). ReturnPathArn *string `type:"string"` // The email address that is sending the email. This email address must be either // individually verified with Amazon SES, or from a domain that has been verified // with Amazon SES. For information about verifying identities, see the Amazon - // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). // // If you are sending on behalf of another user and have been permitted to do // so by a sending authorization policy, then you must also specify the SourceArn // parameter. For more information about sending authorization, see the Amazon - // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 // (https://tools.ietf.org/html/rfc6531). For this reason, the local part of @@ -13067,7 +13261,7 @@ type SendTemplatedEmailInput struct { // and the Source to be user@example.com. // // For more information about sending authorization, see the Amazon SES Developer - // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). SourceArn *string `type:"string"` // A list of tags, in the form of name/value pairs, to apply to an email that @@ -13226,7 +13420,7 @@ func (s *SendTemplatedEmailOutput) SetMessageId(v string) *SendTemplatedEmailOut // Represents a request to set a receipt rule set as the active receipt rule // set. You use receipt rule sets to receive email with Amazon SES. For more -// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type SetActiveReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -13268,7 +13462,7 @@ func (s SetActiveReceiptRuleSetOutput) GoString() string { // Represents a request to enable or disable Amazon SES Easy DKIM signing for // an identity. For more information about setting up Easy DKIM, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). type SetIdentityDkimEnabledInput struct { _ struct{} `type:"structure"` @@ -13339,7 +13533,7 @@ func (s SetIdentityDkimEnabledOutput) GoString() string { // Represents a request to enable or disable whether Amazon SES forwards you // bounce and complaint notifications through email. For information about email -// feedback forwarding, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-email.html). +// feedback forwarding, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-email.html). type SetIdentityFeedbackForwardingEnabledInput struct { _ struct{} `type:"structure"` @@ -13415,7 +13609,7 @@ func (s SetIdentityFeedbackForwardingEnabledOutput) GoString() string { // Represents a request to set whether Amazon SES includes the original email // headers in the Amazon SNS notifications of a specified type. For information -// about notifications, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). +// about notifications, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). type SetIdentityHeadersInNotificationsEnabledInput struct { _ struct{} `type:"structure"` @@ -13506,7 +13700,7 @@ func (s SetIdentityHeadersInNotificationsEnabledOutput) GoString() string { // Represents a request to enable or disable the Amazon SES custom MAIL FROM // domain setup for a verified identity. For information about using a custom -// MAIL FROM domain, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). +// MAIL FROM domain, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). type SetIdentityMailFromDomainInput struct { _ struct{} `type:"structure"` @@ -13530,7 +13724,7 @@ type SetIdentityMailFromDomainInput struct { // MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not // be used in a "From" address if the MAIL FROM domain is the destination of // email feedback forwarding (for more information, see the Amazon SES Developer - // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html)), + // Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html)), // and 3) not be used to receive emails. A value of null disables the custom // MAIL FROM setting for the identity. MailFromDomain *string `type:"string"` @@ -13595,7 +13789,7 @@ func (s SetIdentityMailFromDomainOutput) GoString() string { // Represents a request to specify the Amazon SNS topic to which Amazon SES // will publish bounce, complaint, or delivery notifications for emails sent // with that identity as the Source. For information about Amazon SES notifications, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). +// see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). type SetIdentityNotificationTopicInput struct { _ struct{} `type:"structure"` @@ -13684,7 +13878,7 @@ func (s SetIdentityNotificationTopicOutput) GoString() string { // Represents a request to set the position of a receipt rule in a receipt rule // set. You use receipt rule sets to receive email with Amazon SES. For more -// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// information, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type SetReceiptRulePositionInput struct { _ struct{} `type:"structure"` @@ -13766,11 +13960,11 @@ func (s SetReceiptRulePositionOutput) GoString() string { // Simple Notification Service (Amazon SNS). // // For information about setting a stop action in a receipt rule, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-stop.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-stop.html). type StopAction struct { _ struct{} `type:"structure"` - // The name of the RuleSet that is being stopped. + // The scope of the StopAction. The only acceptable value is RuleSet. // // Scope is a required field Scope *string `type:"string" required:"true" enum:"StopScope"` @@ -13778,7 +13972,7 @@ type StopAction struct { // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the // stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. // For more information about Amazon SNS topics, see the Amazon SNS Developer - // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // Guide (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). TopicArn *string `type:"string"` } @@ -14002,7 +14196,7 @@ func (s *TestRenderTemplateOutput) SetRenderedTemplate(v string) *TestRenderTemp // emails. // // For more information, see Configuring Custom Domains to Handle Open and Click -// Tracking (ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) +// Tracking (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) // in the Amazon SES Developer Guide. type TrackingOptions struct { _ struct{} `type:"structure"` @@ -14070,7 +14264,7 @@ func (s UpdateAccountSendingEnabledOutput) GoString() string { // Represents a request to update the event destination of a configuration set. // Configuration sets enable you to publish email sending events. For information -// about using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// about using configuration sets, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). type UpdateConfigurationSetEventDestinationInput struct { _ struct{} `type:"structure"` @@ -14298,7 +14492,7 @@ type UpdateConfigurationSetTrackingOptionsInput struct { // emails. // // For more information, see Configuring Custom Domains to Handle Open and Click - // Tracking (ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) + // Tracking (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) // in the Amazon SES Developer Guide. // // TrackingOptions is a required field @@ -14376,7 +14570,7 @@ type UpdateCustomVerificationEmailTemplateInput struct { // The content of the custom verification email. The total size of the email // must be less than 10 MB. The message body may contain HTML, with some limitations. // For more information, see Custom Verification Email Frequently Asked Questions - // (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html#custom-verification-emails-faq) + // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html#custom-verification-emails-faq) // in the Amazon SES Developer Guide. TemplateContent *string `type:"string"` @@ -14464,7 +14658,7 @@ func (s UpdateCustomVerificationEmailTemplateOutput) GoString() string { // Represents a request to update a receipt rule. You use receipt rules to receive // email with Amazon SES. For more information, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type UpdateReceiptRuleInput struct { _ struct{} `type:"structure"` @@ -14597,7 +14791,7 @@ func (s UpdateTemplateOutput) GoString() string { // Represents a request to generate the CNAME records needed to set up Easy // DKIM with Amazon SES. For more information about setting up Easy DKIM, see -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). type VerifyDomainDkimInput struct { _ struct{} `type:"structure"` @@ -14644,14 +14838,15 @@ type VerifyDomainDkimOutput struct { // A set of character strings that represent the domain's identity. If the identity // is an email address, the tokens represent the domain of that address. // - // Using these tokens, you will need to create DNS CNAME records that point - // to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually - // detect that you have updated your DNS records; this detection process may - // take up to 72 hours. Upon successful detection, Amazon SES will be able to - // DKIM-sign emails originating from that domain. + // Using these tokens, you need to create DNS CNAME records that point to DKIM + // public keys that are hosted by Amazon SES. Amazon Web Services eventually + // detects that you've updated your DNS records. This detection process might + // take up to 72 hours. After successful detection, Amazon SES is able to DKIM-sign + // email originating from that domain. (This only applies to domain identities, + // not email address identities.) // - // For more information about creating DNS records using DKIM tokens, go to - // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + // For more information about creating DNS records using DKIM tokens, see the + // Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). // // DkimTokens is a required field DkimTokens []*string `type:"list" required:"true"` @@ -14676,7 +14871,7 @@ func (s *VerifyDomainDkimOutput) SetDkimTokens(v []*string) *VerifyDomainDkimOut // Represents a request to begin Amazon SES domain verification and to generate // the TXT records that you must publish to the DNS server of your domain to // complete the verification. For information about domain verification, see -// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-domains.html). +// the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-domains.html). type VerifyDomainIdentityInput struct { _ struct{} `type:"structure"` @@ -14752,7 +14947,7 @@ func (s *VerifyDomainIdentityOutput) SetVerificationToken(v string) *VerifyDomai // Represents a request to begin email address verification with Amazon SES. // For information about email address verification, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). type VerifyEmailAddressInput struct { _ struct{} `type:"structure"` @@ -14807,7 +15002,7 @@ func (s VerifyEmailAddressOutput) GoString() string { // Represents a request to begin email address verification with Amazon SES. // For information about email address verification, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). +// Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). type VerifyEmailIdentityInput struct { _ struct{} `type:"structure"` @@ -14867,14 +15062,14 @@ func (s VerifyEmailIdentityOutput) GoString() string { // the rule automatically during its setup procedure. // // For information using a receipt rule to call Amazon WorkMail, see the Amazon -// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-workmail.html). +// SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-workmail.html). type WorkmailAction struct { _ struct{} `type:"structure"` // The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail // organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7. // For information about Amazon WorkMail organizations, see the Amazon WorkMail - // Administrator Guide (http://docs.aws.amazon.com/workmail/latest/adminguide/organizations_overview.html). + // Administrator Guide (https://docs.aws.amazon.com/workmail/latest/adminguide/organizations_overview.html). // // OrganizationArn is a required field OrganizationArn *string `type:"string" required:"true"` @@ -14882,7 +15077,7 @@ type WorkmailAction struct { // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the // WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. // For more information about Amazon SNS topics, see the Amazon SNS Developer - // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // Guide (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). TopicArn *string `type:"string"` } @@ -15000,6 +15195,9 @@ const ( // ConfigurationSetAttributeTrackingOptions is a ConfigurationSetAttribute enum value ConfigurationSetAttributeTrackingOptions = "trackingOptions" + // ConfigurationSetAttributeDeliveryOptions is a ConfigurationSetAttribute enum value + ConfigurationSetAttributeDeliveryOptions = "deliveryOptions" + // ConfigurationSetAttributeReputationOptions is a ConfigurationSetAttribute enum value ConfigurationSetAttributeReputationOptions = "reputationOptions" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/doc.go index 6ba270a7578..00aec63b347 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/doc.go @@ -6,11 +6,11 @@ // This document contains reference information for the Amazon Simple Email // Service (https://aws.amazon.com/ses/) (Amazon SES) API, version 2010-12-01. // This document is best used in conjunction with the Amazon SES Developer Guide -// (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). +// (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). // // For a list of Amazon SES endpoints to use in service requests, see Regions -// and Amazon SES (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/regions.html) -// in the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). +// and Amazon SES (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/regions.html) +// in the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). // // See https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go index dd94d63518b..d54b69fa74e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go @@ -101,6 +101,12 @@ const ( // details. ErrCodeInvalidConfigurationSetException = "InvalidConfigurationSet" + // ErrCodeInvalidDeliveryOptionsException for service response error code + // "InvalidDeliveryOptions". + // + // Indicates that provided delivery option is invalid. + ErrCodeInvalidDeliveryOptionsException = "InvalidDeliveryOptions" + // ErrCodeInvalidFirehoseDestinationException for service response error code // "InvalidFirehoseDestination". // @@ -114,7 +120,7 @@ const ( // Indicates that the provided AWS Lambda function is invalid, or that Amazon // SES could not execute the provided function, possibly due to permissions // issues. For information about giving permissions, see the Amazon SES Developer - // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + // Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). ErrCodeInvalidLambdaFunctionException = "InvalidLambdaFunction" // ErrCodeInvalidPolicyException for service response error code @@ -137,7 +143,7 @@ const ( // Indicates that the provided Amazon S3 bucket or AWS KMS encryption key is // invalid, or that Amazon SES could not publish to the bucket, possibly due // to permissions issues. For information about giving permissions, see the - // Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + // Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). ErrCodeInvalidS3ConfigurationException = "InvalidS3Configuration" // ErrCodeInvalidSNSDestinationException for service response error code @@ -152,7 +158,7 @@ const ( // // Indicates that the provided Amazon SNS topic is invalid, or that Amazon SES // could not publish to the topic, possibly due to permissions issues. For information - // about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + // about giving permissions, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). ErrCodeInvalidSnsTopicException = "InvalidSnsTopic" // ErrCodeInvalidTemplateException for service response error code @@ -177,7 +183,7 @@ const ( // "LimitExceeded". // // Indicates that a resource could not be created because of service limits. - // For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). + // For a list of Amazon SES limits, see the Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). ErrCodeLimitExceededException = "LimitExceeded" // ErrCodeMailFromDomainNotVerifiedException for service response error code @@ -186,7 +192,7 @@ const ( // Indicates that the message could not be sent because Amazon SES could not // read the MX record required to use the specified MAIL FROM domain. For information // about editing the custom MAIL FROM domain settings for an identity, see the - // Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). + // Amazon SES Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). ErrCodeMailFromDomainNotVerifiedException = "MailFromDomainNotVerifiedException" // ErrCodeMessageRejected for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/service.go index 0e33b771f53..09028e10453 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ses/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SES { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "ses" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SES { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SES { svc := &SES{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-12-01", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/api.go index 142ce93e9da..dfc0e5d41d4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/api.go @@ -67,6 +67,13 @@ func (c *SFN) CreateActivityRequest(input *CreateActivityInput) (req *request.Re // This operation is eventually consistent. The results are best effort and // may not reflect very recent updates and changes. // +// CreateActivity is an idempotent API. Subsequent requests won’t create a +// duplicate resource if it was already created. CreateActivity's idempotency +// check is based on the activity name. If a following request has different +// tags values, Step Functions will ignore these differences and treat it as +// an idempotent request of the previous. In this case, tags will not be updated, +// even if they are different. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -83,7 +90,7 @@ func (c *SFN) CreateActivityRequest(input *CreateActivityInput) (req *request.Re // The provided name is invalid. // // * ErrCodeTooManyTags "TooManyTags" -// You've exceeded the number of tags allowed for a resource. See the Limits +// You've exceeded the number of tags allowed for a resource. See the Limits // Topic (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html) // in the AWS Step Functions Developer Guide. // @@ -161,6 +168,13 @@ func (c *SFN) CreateStateMachineRequest(input *CreateStateMachineInput) (req *re // This operation is eventually consistent. The results are best effort and // may not reflect very recent updates and changes. // +// CreateStateMachine is an idempotent API. Subsequent requests won’t create +// a duplicate resource if it was already created. CreateStateMachine's idempotency +// check is based on the state machine name and definition. If a following request +// has a different roleArn or tags, Step Functions will ignore these differences +// and treat it as an idempotent request of the previous. In this case, roleArn +// and tags will not be updated, even if they are different. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -190,7 +204,7 @@ func (c *SFN) CreateStateMachineRequest(input *CreateStateMachineInput) (req *re // must be deleted before a new state machine can be created. // // * ErrCodeTooManyTags "TooManyTags" -// You've exceeded the number of tags allowed for a resource. See the Limits +// You've exceeded the number of tags allowed for a resource. See the Limits // Topic (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html) // in the AWS Step Functions Developer Guide. // @@ -930,7 +944,7 @@ func (c *SFN) GetExecutionHistoryWithContext(ctx aws.Context, input *GetExecutio // // Example iterating over at most 3 pages of a GetExecutionHistory operation. // pageNum := 0 // err := client.GetExecutionHistoryPages(params, -// func(page *GetExecutionHistoryOutput, lastPage bool) bool { +// func(page *sfn.GetExecutionHistoryOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -962,10 +976,12 @@ func (c *SFN) GetExecutionHistoryPagesWithContext(ctx aws.Context, input *GetExe }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetExecutionHistoryOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetExecutionHistoryOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1074,7 +1090,7 @@ func (c *SFN) ListActivitiesWithContext(ctx aws.Context, input *ListActivitiesIn // // Example iterating over at most 3 pages of a ListActivities operation. // pageNum := 0 // err := client.ListActivitiesPages(params, -// func(page *ListActivitiesOutput, lastPage bool) bool { +// func(page *sfn.ListActivitiesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1106,10 +1122,12 @@ func (c *SFN) ListActivitiesPagesWithContext(ctx aws.Context, input *ListActivit }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListActivitiesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListActivitiesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1225,7 +1243,7 @@ func (c *SFN) ListExecutionsWithContext(ctx aws.Context, input *ListExecutionsIn // // Example iterating over at most 3 pages of a ListExecutions operation. // pageNum := 0 // err := client.ListExecutionsPages(params, -// func(page *ListExecutionsOutput, lastPage bool) bool { +// func(page *sfn.ListExecutionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1257,10 +1275,12 @@ func (c *SFN) ListExecutionsPagesWithContext(ctx aws.Context, input *ListExecuti }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListExecutionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListExecutionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1369,7 +1389,7 @@ func (c *SFN) ListStateMachinesWithContext(ctx aws.Context, input *ListStateMach // // Example iterating over at most 3 pages of a ListStateMachines operation. // pageNum := 0 // err := client.ListStateMachinesPages(params, -// func(page *ListStateMachinesOutput, lastPage bool) bool { +// func(page *sfn.ListStateMachinesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1401,10 +1421,12 @@ func (c *SFN) ListStateMachinesPagesWithContext(ctx aws.Context, input *ListStat }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListStateMachinesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListStateMachinesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1454,6 +1476,9 @@ func (c *SFN) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req * // // List tags for a given resource. // +// Tags may only contain Unicode letters, digits, white space, or these symbols: +// _ . : / = + - @. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1466,7 +1491,7 @@ func (c *SFN) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req * // The provided Amazon Resource Name (ARN) is invalid. // // * ErrCodeResourceNotFound "ResourceNotFound" -// Could not fine the referenced resource. Only state machine and activity ARNs +// Could not find the referenced resource. Only state machine and activity ARNs // are supported. // // See also, https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/ListTagsForResource @@ -1536,7 +1561,8 @@ func (c *SFN) SendTaskFailureRequest(input *SendTaskFailureInput) (req *request. // SendTaskFailure API operation for AWS Step Functions. // -// Used by workers to report that the task identified by the taskToken failed. +// Used by activity workers and task states using the callback (https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token) +// pattern to report that the task identified by the taskToken failed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1620,19 +1646,21 @@ func (c *SFN) SendTaskHeartbeatRequest(input *SendTaskHeartbeatInput) (req *requ // SendTaskHeartbeat API operation for AWS Step Functions. // -// Used by workers to report to the service that the task represented by the -// specified taskToken is still making progress. This action resets the Heartbeat -// clock. The Heartbeat threshold is specified in the state machine's Amazon -// States Language definition. This action does not in itself create an event -// in the execution history. However, if the task times out, the execution history -// contains an ActivityTimedOut event. +// Used by activity workers and task states using the callback (https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token) +// pattern to report to Step Functions that the task represented by the specified +// taskToken is still making progress. This action resets the Heartbeat clock. +// The Heartbeat threshold is specified in the state machine's Amazon States +// Language definition (HeartbeatSeconds). This action does not in itself create +// an event in the execution history. However, if the task times out, the execution +// history contains an ActivityTimedOut entry for activities, or a TaskTimedOut +// entry for for tasks using the job run (https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-sync) +// or callback (https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token) +// pattern. // // The Timeout of a task, defined in the state machine's Amazon States Language // definition, is its maximum allowed duration, regardless of the number of -// SendTaskHeartbeat requests received. -// -// This operation is only useful for long-lived tasks to report the liveliness -// of the task. +// SendTaskHeartbeat requests received. Use HeartbeatSeconds to configure the +// timeout interval for heartbeats. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1716,8 +1744,8 @@ func (c *SFN) SendTaskSuccessRequest(input *SendTaskSuccessInput) (req *request. // SendTaskSuccess API operation for AWS Step Functions. // -// Used by workers to report that the task identified by the taskToken completed -// successfully. +// Used by activity workers and task states using the callback (https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token) +// pattern to report that the task identified by the taskToken completed successfully. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1994,6 +2022,14 @@ func (c *SFN) TagResourceRequest(input *TagResourceInput) (req *request.Request, // // Add a tag to a Step Functions resource. // +// An array of key-value pairs. For more information, see Using Cost Allocation +// Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// in the AWS Billing and Cost Management User Guide, and Controlling Access +// Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). +// +// Tags may only contain Unicode letters, digits, white space, or these symbols: +// _ . : / = + - @. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2006,11 +2042,11 @@ func (c *SFN) TagResourceRequest(input *TagResourceInput) (req *request.Request, // The provided Amazon Resource Name (ARN) is invalid. // // * ErrCodeResourceNotFound "ResourceNotFound" -// Could not fine the referenced resource. Only state machine and activity ARNs +// Could not find the referenced resource. Only state machine and activity ARNs // are supported. // // * ErrCodeTooManyTags "TooManyTags" -// You've exceeded the number of tags allowed for a resource. See the Limits +// You've exceeded the number of tags allowed for a resource. See the Limits // Topic (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html) // in the AWS Step Functions Developer Guide. // @@ -2095,7 +2131,7 @@ func (c *SFN) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ // The provided Amazon Resource Name (ARN) is invalid. // // * ErrCodeResourceNotFound "ResourceNotFound" -// Could not fine the referenced resource. Only state machine and activity ARNs +// Could not find the referenced resource. Only state machine and activity ARNs // are supported. // // See also, https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/UntagResource @@ -2270,7 +2306,7 @@ type ActivityListItem struct { // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -2486,13 +2522,13 @@ type CreateActivityInput struct { _ struct{} `type:"structure"` // The name of the activity to create. This name must be unique for your AWS - // account and region for 90 days. For more information, see Limits Related + // account and region for 90 days. For more information, see Limits Related // to State Machine Executions (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions) // in the AWS Step Functions Developer Guide. // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -2506,6 +2542,14 @@ type CreateActivityInput struct { Name *string `locationName:"name" min:"1" type:"string" required:"true"` // The list of tags to add to a resource. + // + // An array of key-value pairs. For more information, see Using Cost Allocation + // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) + // in the AWS Billing and Cost Management User Guide, and Controlling Access + // Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). + // + // Tags may only contain Unicode letters, digits, white space, or these symbols: + // _ . : / = + - @. Tags []*Tag `locationName:"tags" type:"list"` } @@ -2606,7 +2650,7 @@ type CreateStateMachineInput struct { // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -2625,6 +2669,14 @@ type CreateStateMachineInput struct { RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` // Tags to be added when creating a state machine. + // + // An array of key-value pairs. For more information, see Using Cost Allocation + // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) + // in the AWS Billing and Cost Management User Guide, and Controlling Access + // Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). + // + // Tags may only contain Unicode letters, digits, white space, or these symbols: + // _ . : / = + - @. Tags []*Tag `locationName:"tags" type:"list"` } @@ -2904,7 +2956,7 @@ type DescribeActivityOutput struct { // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -3004,7 +3056,7 @@ type DescribeExecutionOutput struct { // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -3271,7 +3323,7 @@ type DescribeStateMachineOutput struct { // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -3425,7 +3477,7 @@ type ExecutionListItem struct { // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -3866,6 +3918,21 @@ type HistoryEvent struct { // execution. LambdaFunctionTimedOutEventDetails *LambdaFunctionTimedOutEventDetails `locationName:"lambdaFunctionTimedOutEventDetails" type:"structure"` + // Contains details about an iteration of a Map state that was aborted. + MapIterationAbortedEventDetails *MapIterationEventDetails `locationName:"mapIterationAbortedEventDetails" type:"structure"` + + // Contains details about an iteration of a Map state that failed. + MapIterationFailedEventDetails *MapIterationEventDetails `locationName:"mapIterationFailedEventDetails" type:"structure"` + + // Contains details about an iteration of a Map state that was started. + MapIterationStartedEventDetails *MapIterationEventDetails `locationName:"mapIterationStartedEventDetails" type:"structure"` + + // Contains details about an iteration of a Map state that succeeded. + MapIterationSucceededEventDetails *MapIterationEventDetails `locationName:"mapIterationSucceededEventDetails" type:"structure"` + + // Contains details about Map state that was started. + MapStateStartedEventDetails *MapStateStartedEventDetails `locationName:"mapStateStartedEventDetails" type:"structure"` + // The id of the previous event. PreviousEventId *int64 `locationName:"previousEventId" type:"long"` @@ -4028,6 +4095,36 @@ func (s *HistoryEvent) SetLambdaFunctionTimedOutEventDetails(v *LambdaFunctionTi return s } +// SetMapIterationAbortedEventDetails sets the MapIterationAbortedEventDetails field's value. +func (s *HistoryEvent) SetMapIterationAbortedEventDetails(v *MapIterationEventDetails) *HistoryEvent { + s.MapIterationAbortedEventDetails = v + return s +} + +// SetMapIterationFailedEventDetails sets the MapIterationFailedEventDetails field's value. +func (s *HistoryEvent) SetMapIterationFailedEventDetails(v *MapIterationEventDetails) *HistoryEvent { + s.MapIterationFailedEventDetails = v + return s +} + +// SetMapIterationStartedEventDetails sets the MapIterationStartedEventDetails field's value. +func (s *HistoryEvent) SetMapIterationStartedEventDetails(v *MapIterationEventDetails) *HistoryEvent { + s.MapIterationStartedEventDetails = v + return s +} + +// SetMapIterationSucceededEventDetails sets the MapIterationSucceededEventDetails field's value. +func (s *HistoryEvent) SetMapIterationSucceededEventDetails(v *MapIterationEventDetails) *HistoryEvent { + s.MapIterationSucceededEventDetails = v + return s +} + +// SetMapStateStartedEventDetails sets the MapStateStartedEventDetails field's value. +func (s *HistoryEvent) SetMapStateStartedEventDetails(v *MapStateStartedEventDetails) *HistoryEvent { + s.MapStateStartedEventDetails = v + return s +} + // SetPreviousEventId sets the PreviousEventId field's value. func (s *HistoryEvent) SetPreviousEventId(v int64) *HistoryEvent { s.PreviousEventId = &v @@ -4674,6 +4771,63 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput return s } +// Contains details about an iteration of a Map state. +type MapIterationEventDetails struct { + _ struct{} `type:"structure"` + + // The index of the array belonging to the Map state iteration. + Index *int64 `locationName:"index" type:"integer"` + + // The name of the iteration’s parent Map state. + Name *string `locationName:"name" min:"1" type:"string"` +} + +// String returns the string representation +func (s MapIterationEventDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MapIterationEventDetails) GoString() string { + return s.String() +} + +// SetIndex sets the Index field's value. +func (s *MapIterationEventDetails) SetIndex(v int64) *MapIterationEventDetails { + s.Index = &v + return s +} + +// SetName sets the Name field's value. +func (s *MapIterationEventDetails) SetName(v string) *MapIterationEventDetails { + s.Name = &v + return s +} + +// Details about a Map state that was started. +type MapStateStartedEventDetails struct { + _ struct{} `type:"structure"` + + // The size of the array for Map state iterations. + Length *int64 `locationName:"length" type:"integer"` +} + +// String returns the string representation +func (s MapStateStartedEventDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MapStateStartedEventDetails) GoString() string { + return s.String() +} + +// SetLength sets the Length field's value. +func (s *MapStateStartedEventDetails) SetLength(v int64) *MapStateStartedEventDetails { + s.Length = &v + return s +} + type SendTaskFailureInput struct { _ struct{} `type:"structure"` @@ -4683,8 +4837,9 @@ type SendTaskFailureInput struct { // The error code of the failure. Error *string `locationName:"error" type:"string" sensitive:"true"` - // The token that represents this task. Task tokens are generated by the service - // when the tasks are assigned to a worker (see GetActivityTask::taskToken). + // The token that represents this task. Task tokens are generated by Step Functions + // when tasks are assigned to a worker, or in the context object (https://docs.aws.amazon.com/step-functions/latest/dg/input-output-contextobject.html) + // when a workflow enters a task state. See GetActivityTaskOutput$taskToken. // // TaskToken is a required field TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` @@ -4751,8 +4906,9 @@ func (s SendTaskFailureOutput) GoString() string { type SendTaskHeartbeatInput struct { _ struct{} `type:"structure"` - // The token that represents this task. Task tokens are generated by the service - // when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken). + // The token that represents this task. Task tokens are generated by Step Functions + // when tasks are assigned to a worker, or in the context object (https://docs.aws.amazon.com/step-functions/latest/dg/input-output-contextobject.html) + // when a workflow enters a task state. See GetActivityTaskOutput$taskToken. // // TaskToken is a required field TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` @@ -4812,8 +4968,9 @@ type SendTaskSuccessInput struct { // Output is a required field Output *string `locationName:"output" type:"string" required:"true" sensitive:"true"` - // The token that represents this task. Task tokens are generated by the service - // when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken). + // The token that represents this task. Task tokens are generated by Step Functions + // when tasks are assigned to a worker, or in the context object (https://docs.aws.amazon.com/step-functions/latest/dg/input-output-contextobject.html) + // when a workflow enters a task state. See GetActivityTaskOutput$taskToken. // // TaskToken is a required field TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` @@ -4886,13 +5043,13 @@ type StartExecutionInput struct { Input *string `locationName:"input" type:"string" sensitive:"true"` // The name of the execution. This name must be unique for your AWS account, - // region, and state machine for 90 days. For more information, see Limits - // Related to State Machine Executions (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions) + // region, and state machine for 90 days. For more information, see Limits Related + // to State Machine Executions (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions) // in the AWS Step Functions Developer Guide. // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -5035,7 +5192,7 @@ type StateExitedEventDetails struct { // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -5087,7 +5244,7 @@ type StateMachineListItem struct { // // A name must not contain: // - // * whitespace + // * white space // // * brackets < > { } [ ] // @@ -5220,6 +5377,14 @@ func (s *StopExecutionOutput) SetStopDate(v time.Time) *StopExecutionOutput { // Tags are key-value pairs that can be associated with Step Functions state // machines and activities. +// +// An array of key-value pairs. For more information, see Using Cost Allocation +// Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// in the AWS Billing and Cost Management User Guide, and Controlling Access +// Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). +// +// Tags may only contain Unicode letters, digits, white space, or these symbols: +// _ . : / = + - @. type Tag struct { _ struct{} `type:"structure"` @@ -5275,7 +5440,7 @@ type TagResourceInput struct { // The list of tags to add to a resource. // - // Tags may only contain unicode letters, digits, whitespace, or these symbols: + // Tags may only contain Unicode letters, digits, white space, or these symbols: // _ . : / = + - @. // // Tags is a required field @@ -5946,12 +6111,12 @@ const ( // HistoryEventTypeActivityFailed is a HistoryEventType enum value HistoryEventTypeActivityFailed = "ActivityFailed" - // HistoryEventTypeActivityScheduleFailed is a HistoryEventType enum value - HistoryEventTypeActivityScheduleFailed = "ActivityScheduleFailed" - // HistoryEventTypeActivityScheduled is a HistoryEventType enum value HistoryEventTypeActivityScheduled = "ActivityScheduled" + // HistoryEventTypeActivityScheduleFailed is a HistoryEventType enum value + HistoryEventTypeActivityScheduleFailed = "ActivityScheduleFailed" + // HistoryEventTypeActivityStarted is a HistoryEventType enum value HistoryEventTypeActivityStarted = "ActivityStarted" @@ -5967,29 +6132,8 @@ const ( // HistoryEventTypeChoiceStateExited is a HistoryEventType enum value HistoryEventTypeChoiceStateExited = "ChoiceStateExited" - // HistoryEventTypeTaskFailed is a HistoryEventType enum value - HistoryEventTypeTaskFailed = "TaskFailed" - - // HistoryEventTypeTaskScheduled is a HistoryEventType enum value - HistoryEventTypeTaskScheduled = "TaskScheduled" - - // HistoryEventTypeTaskStartFailed is a HistoryEventType enum value - HistoryEventTypeTaskStartFailed = "TaskStartFailed" - - // HistoryEventTypeTaskStarted is a HistoryEventType enum value - HistoryEventTypeTaskStarted = "TaskStarted" - - // HistoryEventTypeTaskSubmitFailed is a HistoryEventType enum value - HistoryEventTypeTaskSubmitFailed = "TaskSubmitFailed" - - // HistoryEventTypeTaskSubmitted is a HistoryEventType enum value - HistoryEventTypeTaskSubmitted = "TaskSubmitted" - - // HistoryEventTypeTaskSucceeded is a HistoryEventType enum value - HistoryEventTypeTaskSucceeded = "TaskSucceeded" - - // HistoryEventTypeTaskTimedOut is a HistoryEventType enum value - HistoryEventTypeTaskTimedOut = "TaskTimedOut" + // HistoryEventTypeExecutionAborted is a HistoryEventType enum value + HistoryEventTypeExecutionAborted = "ExecutionAborted" // HistoryEventTypeExecutionFailed is a HistoryEventType enum value HistoryEventTypeExecutionFailed = "ExecutionFailed" @@ -6000,9 +6144,6 @@ const ( // HistoryEventTypeExecutionSucceeded is a HistoryEventType enum value HistoryEventTypeExecutionSucceeded = "ExecutionSucceeded" - // HistoryEventTypeExecutionAborted is a HistoryEventType enum value - HistoryEventTypeExecutionAborted = "ExecutionAborted" - // HistoryEventTypeExecutionTimedOut is a HistoryEventType enum value HistoryEventTypeExecutionTimedOut = "ExecutionTimedOut" @@ -6012,44 +6153,53 @@ const ( // HistoryEventTypeLambdaFunctionFailed is a HistoryEventType enum value HistoryEventTypeLambdaFunctionFailed = "LambdaFunctionFailed" - // HistoryEventTypeLambdaFunctionScheduleFailed is a HistoryEventType enum value - HistoryEventTypeLambdaFunctionScheduleFailed = "LambdaFunctionScheduleFailed" - // HistoryEventTypeLambdaFunctionScheduled is a HistoryEventType enum value HistoryEventTypeLambdaFunctionScheduled = "LambdaFunctionScheduled" - // HistoryEventTypeLambdaFunctionStartFailed is a HistoryEventType enum value - HistoryEventTypeLambdaFunctionStartFailed = "LambdaFunctionStartFailed" + // HistoryEventTypeLambdaFunctionScheduleFailed is a HistoryEventType enum value + HistoryEventTypeLambdaFunctionScheduleFailed = "LambdaFunctionScheduleFailed" // HistoryEventTypeLambdaFunctionStarted is a HistoryEventType enum value HistoryEventTypeLambdaFunctionStarted = "LambdaFunctionStarted" + // HistoryEventTypeLambdaFunctionStartFailed is a HistoryEventType enum value + HistoryEventTypeLambdaFunctionStartFailed = "LambdaFunctionStartFailed" + // HistoryEventTypeLambdaFunctionSucceeded is a HistoryEventType enum value HistoryEventTypeLambdaFunctionSucceeded = "LambdaFunctionSucceeded" // HistoryEventTypeLambdaFunctionTimedOut is a HistoryEventType enum value HistoryEventTypeLambdaFunctionTimedOut = "LambdaFunctionTimedOut" - // HistoryEventTypeSucceedStateEntered is a HistoryEventType enum value - HistoryEventTypeSucceedStateEntered = "SucceedStateEntered" + // HistoryEventTypeMapIterationAborted is a HistoryEventType enum value + HistoryEventTypeMapIterationAborted = "MapIterationAborted" - // HistoryEventTypeSucceedStateExited is a HistoryEventType enum value - HistoryEventTypeSucceedStateExited = "SucceedStateExited" + // HistoryEventTypeMapIterationFailed is a HistoryEventType enum value + HistoryEventTypeMapIterationFailed = "MapIterationFailed" - // HistoryEventTypeTaskStateAborted is a HistoryEventType enum value - HistoryEventTypeTaskStateAborted = "TaskStateAborted" + // HistoryEventTypeMapIterationStarted is a HistoryEventType enum value + HistoryEventTypeMapIterationStarted = "MapIterationStarted" - // HistoryEventTypeTaskStateEntered is a HistoryEventType enum value - HistoryEventTypeTaskStateEntered = "TaskStateEntered" + // HistoryEventTypeMapIterationSucceeded is a HistoryEventType enum value + HistoryEventTypeMapIterationSucceeded = "MapIterationSucceeded" - // HistoryEventTypeTaskStateExited is a HistoryEventType enum value - HistoryEventTypeTaskStateExited = "TaskStateExited" + // HistoryEventTypeMapStateAborted is a HistoryEventType enum value + HistoryEventTypeMapStateAborted = "MapStateAborted" - // HistoryEventTypePassStateEntered is a HistoryEventType enum value - HistoryEventTypePassStateEntered = "PassStateEntered" + // HistoryEventTypeMapStateEntered is a HistoryEventType enum value + HistoryEventTypeMapStateEntered = "MapStateEntered" - // HistoryEventTypePassStateExited is a HistoryEventType enum value - HistoryEventTypePassStateExited = "PassStateExited" + // HistoryEventTypeMapStateExited is a HistoryEventType enum value + HistoryEventTypeMapStateExited = "MapStateExited" + + // HistoryEventTypeMapStateFailed is a HistoryEventType enum value + HistoryEventTypeMapStateFailed = "MapStateFailed" + + // HistoryEventTypeMapStateStarted is a HistoryEventType enum value + HistoryEventTypeMapStateStarted = "MapStateStarted" + + // HistoryEventTypeMapStateSucceeded is a HistoryEventType enum value + HistoryEventTypeMapStateSucceeded = "MapStateSucceeded" // HistoryEventTypeParallelStateAborted is a HistoryEventType enum value HistoryEventTypeParallelStateAborted = "ParallelStateAborted" @@ -6069,6 +6219,51 @@ const ( // HistoryEventTypeParallelStateSucceeded is a HistoryEventType enum value HistoryEventTypeParallelStateSucceeded = "ParallelStateSucceeded" + // HistoryEventTypePassStateEntered is a HistoryEventType enum value + HistoryEventTypePassStateEntered = "PassStateEntered" + + // HistoryEventTypePassStateExited is a HistoryEventType enum value + HistoryEventTypePassStateExited = "PassStateExited" + + // HistoryEventTypeSucceedStateEntered is a HistoryEventType enum value + HistoryEventTypeSucceedStateEntered = "SucceedStateEntered" + + // HistoryEventTypeSucceedStateExited is a HistoryEventType enum value + HistoryEventTypeSucceedStateExited = "SucceedStateExited" + + // HistoryEventTypeTaskFailed is a HistoryEventType enum value + HistoryEventTypeTaskFailed = "TaskFailed" + + // HistoryEventTypeTaskScheduled is a HistoryEventType enum value + HistoryEventTypeTaskScheduled = "TaskScheduled" + + // HistoryEventTypeTaskStarted is a HistoryEventType enum value + HistoryEventTypeTaskStarted = "TaskStarted" + + // HistoryEventTypeTaskStartFailed is a HistoryEventType enum value + HistoryEventTypeTaskStartFailed = "TaskStartFailed" + + // HistoryEventTypeTaskStateAborted is a HistoryEventType enum value + HistoryEventTypeTaskStateAborted = "TaskStateAborted" + + // HistoryEventTypeTaskStateEntered is a HistoryEventType enum value + HistoryEventTypeTaskStateEntered = "TaskStateEntered" + + // HistoryEventTypeTaskStateExited is a HistoryEventType enum value + HistoryEventTypeTaskStateExited = "TaskStateExited" + + // HistoryEventTypeTaskSubmitFailed is a HistoryEventType enum value + HistoryEventTypeTaskSubmitFailed = "TaskSubmitFailed" + + // HistoryEventTypeTaskSubmitted is a HistoryEventType enum value + HistoryEventTypeTaskSubmitted = "TaskSubmitted" + + // HistoryEventTypeTaskSucceeded is a HistoryEventType enum value + HistoryEventTypeTaskSucceeded = "TaskSucceeded" + + // HistoryEventTypeTaskTimedOut is a HistoryEventType enum value + HistoryEventTypeTaskTimedOut = "TaskTimedOut" + // HistoryEventTypeWaitStateAborted is a HistoryEventType enum value HistoryEventTypeWaitStateAborted = "WaitStateAborted" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/doc.go index d1faff58d7e..f3880014fdd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/doc.go @@ -20,7 +20,7 @@ // own servers, or any system that has access to AWS. You can access and use // Step Functions using the console, the AWS SDKs, or an HTTP API. For more // information about Step Functions, see the AWS Step Functions Developer Guide -// (https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html). +// (https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html) . // // See https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/errors.go index f73333973a5..b05b4021a24 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/errors.go @@ -91,7 +91,7 @@ const ( // ErrCodeResourceNotFound for service response error code // "ResourceNotFound". // - // Could not fine the referenced resource. Only state machine and activity ARNs + // Could not find the referenced resource. Only state machine and activity ARNs // are supported. ErrCodeResourceNotFound = "ResourceNotFound" @@ -132,7 +132,7 @@ const ( // ErrCodeTooManyTags for service response error code // "TooManyTags". // - // You've exceeded the number of tags allowed for a resource. See the Limits + // You've exceeded the number of tags allowed for a resource. See the Limits // Topic (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html) // in the AWS Step Functions Developer Guide. ErrCodeTooManyTags = "TooManyTags" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go index 2436268f075..f21c6d9d82a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go @@ -46,11 +46,11 @@ const ( // svc := sfn.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SFN { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SFN { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SFN { svc := &SFN{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-23", JSONVersion: "1.0", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/shield/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/shield/api.go index 10411bdb738..aa23ff9860e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/shield/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/shield/api.go @@ -59,8 +59,8 @@ func (c *Shield) AssociateDRTLogBucketRequest(input *AssociateDRTLogBucketInput) // AssociateDRTLogBucket API operation for AWS Shield. // // Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 -// bucket containing your flow logs. You can associate up to 10 Amazon S3 buckets -// with your subscription. +// bucket containing your AWS WAF logs. You can associate up to 10 Amazon S3 +// buckets with your subscription. // // To use the services of the DRT and make an AssociateDRTLogBucket request, // you must be subscribed to the Business Support plan (https://aws.amazon.com/premiumsupport/business-support/) @@ -187,8 +187,8 @@ func (c *Shield) AssociateDRTRoleRequest(input *AssociateDRTRoleInput) (req *req // Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy // (https://console.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) // managed policy to the role you will specify in the request. For more information -// see Attaching and Detaching IAM Policies ( https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html). -// The role must also trust the service principal drt.shield.amazonaws.com. +// see Attaching and Detaching IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html). +// The role must also trust the service principal drt.shield.amazonaws.com. // For more information, see IAM JSON Policy Elements: Principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html). // // The DRT will have access only to your AWS WAF and Shield resources. By submitting @@ -424,6 +424,10 @@ func (c *Shield) CreateSubscriptionRequest(input *CreateSubscriptionInput) (req // a suspected DDoS attack. For more information see Authorize the DDoS Response // Team to Create Rules and Web ACLs on Your Behalf (https://docs.aws.amazon.com/waf/latest/developerguide/authorize-DRT.html). // +// To use the services of the DRT, you must be subscribed to the Business Support +// plan (https://aws.amazon.com/premiumsupport/business-support/) or the Enterprise +// Support plan (https://aws.amazon.com/premiumsupport/enterprise-support/). +// // When you initally create a subscription, your subscription is set to be automatically // renewed at the end of the existing subscription period. You can change this // by submitting an UpdateSubscription request. @@ -1122,7 +1126,7 @@ func (c *Shield) DisassociateDRTLogBucketRequest(input *DisassociateDRTLogBucket // DisassociateDRTLogBucket API operation for AWS Shield. // // Removes the DDoS Response team's (DRT) access to the specified Amazon S3 -// bucket containing your flow logs. +// bucket containing your AWS WAF logs. // // To make a DisassociateDRTLogBucket request, you must be subscribed to the // Business Support plan (https://aws.amazon.com/premiumsupport/business-support/) @@ -1732,7 +1736,7 @@ func (c *Shield) UpdateSubscriptionWithContext(ctx aws.Context, input *UpdateSub type AssociateDRTLogBucketInput struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket that contains your flow logs. + // The Amazon S3 bucket that contains your AWS WAF logs. // // LogBucket is a required field LogBucket *string `min:"3" type:"string" required:"true"` @@ -1793,7 +1797,7 @@ type AssociateDRTRoleInput struct { // Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy // (https://console.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) // managed policy to this role. For more information see Attaching and Detaching - // IAM Policies ( https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html). + // IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html). // // RoleArn is a required field RoleArn *string `min:"1" type:"string" required:"true"` @@ -1939,11 +1943,14 @@ func (s *AttackDetail) SetSubResources(v []*SubResourceSummary) *AttackDetail { type AttackProperty struct { _ struct{} `type:"structure"` - // The type of DDoS event that was observed. NETWORK indicates layer 3 and layer - // 4 events and APPLICATION indicates layer 7 events. + // The type of distributed denial of service (DDoS) event that was observed. + // NETWORK indicates layer 3 and layer 4 events and APPLICATION indicates layer + // 7 events. AttackLayer *string `type:"string" enum:"AttackLayer"` - // Defines the DDoS attack property information that is provided. + // Defines the DDoS attack property information that is provided. The WORDPRESS_PINGBACK_REFLECTOR + // and WORDPRESS_PINGBACK_SOURCE values are valid only for WordPress reflective + // pingback DDoS attacks. AttackPropertyIdentifier *string `type:"string" enum:"AttackPropertyIdentifier"` // The array of Contributor objects that includes the top five contributors @@ -2096,6 +2103,12 @@ type AttackVectorDescription struct { // // * REQUEST_FLOOD // + // * HTTP_REFLECTION + // + // * UDS_REFLECTION + // + // * MEMCACHED_REFLECTION + // // VectorType is a required field VectorType *string `type:"string" required:"true"` } @@ -2624,7 +2637,7 @@ func (s *DescribeSubscriptionOutput) SetSubscription(v *Subscription) *DescribeS type DisassociateDRTLogBucketInput struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket that contains your flow logs. + // The Amazon S3 bucket that contains your AWS WAF logs. // // LogBucket is a required field LogBucket *string `min:"3" type:"string" required:"true"` @@ -3486,6 +3499,12 @@ const ( // AttackPropertyIdentifierSourceUserAgent is a AttackPropertyIdentifier enum value AttackPropertyIdentifierSourceUserAgent = "SOURCE_USER_AGENT" + + // AttackPropertyIdentifierWordpressPingbackReflector is a AttackPropertyIdentifier enum value + AttackPropertyIdentifierWordpressPingbackReflector = "WORDPRESS_PINGBACK_REFLECTOR" + + // AttackPropertyIdentifierWordpressPingbackSource is a AttackPropertyIdentifier enum value + AttackPropertyIdentifierWordpressPingbackSource = "WORDPRESS_PINGBACK_SOURCE" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/shield/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/shield/service.go index b7a62ef92f6..499dea15df5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/shield/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/shield/service.go @@ -46,11 +46,11 @@ const ( // svc := shield.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Shield { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Shield { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Shield { svc := &Shield{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-06-02", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go index 8304b0635e0..a00895099ba 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go @@ -62,23 +62,24 @@ func (c *SimpleDB) BatchDeleteAttributesRequest(input *BatchDeleteAttributesInpu // If you specify BatchDeleteAttributes without attributes or values, all the // attributes for the item are deleted. // -// BatchDeleteAttributes is an idempotent operation; running it multiple times +// BatchDeleteAttributes is an idempotent operation; running it multiple times // on the same item or attribute doesn't result in an error. // -// The BatchDeleteAttributes operation succeeds or fails in its entirety. There +// The BatchDeleteAttributes operation succeeds or fails in its entirety. There // are no partial deletes. You can execute multiple BatchDeleteAttributes operations // and other operations in parallel. However, large numbers of concurrent BatchDeleteAttributes // calls can result in Service Unavailable (503) responses. // -// This operation is vulnerable to exceeding the maximum URL size when making +// This operation is vulnerable to exceeding the maximum URL size when making // a REST request using the HTTP GET method. // -// This operation does not support conditions using Expected.X.Name, Expected.X.Value, +// This operation does not support conditions using Expected.X.Name, Expected.X.Value, // or Expected.X.Exists. // -// The following limitations are enforced for this operation: 1 MB request size +// The following limitations are enforced for this operation: +// * 1 MB request size // -// 25 item limit per BatchDeleteAttributes operation +// * 25 item limit per BatchDeleteAttributes operation // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -177,22 +178,33 @@ func (c *SimpleDB) BatchPutAttributesRequest(input *BatchPutAttributesInput) (re // a BatchPutAttributes of {'I', 'b', '4' } with the Replace parameter set to // true, the final attributes of the item will be { 'a', '1' } and { 'b', '4' // }, replacing the previous values of the 'b' attribute with the new value. -// -// You cannot specify an empty string as an item or as an attribute name. The -// BatchPutAttributes operation succeeds or fails in its entirety. There are -// no partial puts. This operation is vulnerable to exceeding the maximum URL size when making -// a REST request using the HTTP GET method. This operation does not support -// conditions using Expected.X.Name, Expected.X.Value, or Expected.X.Exists. +// You cannot specify an empty string as an item or as an attribute name. +// The +// BatchPutAttributes +// operation succeeds or fails in its entirety. There are no partial puts. +// This operation is vulnerable to exceeding the maximum URL size when making +// a REST request using the HTTP GET method. This operation does not support +// conditions using +// Expected.X.Name +// , +// Expected.X.Value +// , or +// Expected.X.Exists +// . // You can execute multiple BatchPutAttributes operations and other operations // in parallel. However, large numbers of concurrent BatchPutAttributes calls // can result in Service Unavailable (503) responses. // -// The following limitations are enforced for this operation: 256 attribute -// name-value pairs per item -// 1 MB request size -// 1 billion attributes per domain -// 10 GB of total user data storage per domain -// 25 item limit per BatchPutAttributes operation +// The following limitations are enforced for this operation: +// * 256 attribute name-value pairs per item +// +// * 1 MB request size +// +// * 1 billion attributes per domain +// +// * 10 GB of total user data storage per domain +// +// * 25 item limit per BatchPutAttributes operation // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -296,11 +308,11 @@ func (c *SimpleDB) CreateDomainRequest(input *CreateDomainInput) (req *request.R // The CreateDomain operation creates a new domain. The domain name should be // unique among the domains associated with the Access Key ID provided in the // request. The CreateDomain operation may take 10 or more seconds to complete. +// CreateDomain is an idempotent operation; running it multiple times using +// the same domain name will not result in an error response. +// The client can create up to 100 domains per account. // -// CreateDomain is an idempotent operation; running it multiple times using -// the same domain name will not result in an error response. The client can create up to 100 domains per account. -// -// If the client requires additional domains, go to http://aws.amazon.com/contact-us/simpledb-limit-request/ +// If the client requires additional domains, go to http://aws.amazon.com/contact-us/simpledb-limit-request/ // (http://aws.amazon.com/contact-us/simpledb-limit-request/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -386,9 +398,11 @@ func (c *SimpleDB) DeleteAttributesRequest(input *DeleteAttributesInput) (req *r // // Deletes one or more attributes associated with an item. If all attributes // of the item are deleted, the item is deleted. -// -// If DeleteAttributes is called without being passed any attributes or values -// specified, all the attributes for the item are deleted. DeleteAttributes is an idempotent operation; running it multiple times on +// If +// DeleteAttributes +// is called without being passed any attributes or values specified, all the +// attributes for the item are deleted. +// DeleteAttributes is an idempotent operation; running it multiple times on // the same item or attribute does not result in an error response. // // Because Amazon SimpleDB makes multiple copies of item data and uses an eventual @@ -483,9 +497,10 @@ func (c *SimpleDB) DeleteDomainRequest(input *DeleteDomainInput) (req *request.R // The DeleteDomain operation deletes a domain. Any items (and their attributes) // in the domain are deleted as well. The DeleteDomain operation might take // 10 or more seconds to complete. -// -// Running DeleteDomain on a domain that does not exist or running the function -// multiple times using the same domain name will not result in an error response. +// Running +// DeleteDomain +// on a domain that does not exist or running the function multiple times using +// the same domain name will not result in an error response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -649,9 +664,8 @@ func (c *SimpleDB) GetAttributesRequest(input *GetAttributesInput) (req *request // If the item does not exist on the replica that was accessed for this operation, // an empty set is returned. The system does not return an error as it cannot // guarantee the item does not exist on other replicas. -// -// If GetAttributes is called without being passed any attribute names, all -// the attributes for the item are returned. +// If GetAttributes is called without being passed any attribute names, all +// the attributes for the item are returned. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -792,7 +806,7 @@ func (c *SimpleDB) ListDomainsWithContext(ctx aws.Context, input *ListDomainsInp // // Example iterating over at most 3 pages of a ListDomains operation. // pageNum := 0 // err := client.ListDomainsPages(params, -// func(page *ListDomainsOutput, lastPage bool) bool { +// func(page *simpledb.ListDomainsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -824,10 +838,12 @@ func (c *SimpleDB) ListDomainsPagesWithContext(ctx aws.Context, input *ListDomai }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDomainsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDomainsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -892,19 +908,23 @@ func (c *SimpleDB) PutAttributesRequest(input *PutAttributesInput) (req *request // using the attributes { 'b', '4' } with the Replace parameter set to true, // the final attributes of the item are changed to { 'a', '1' } and { 'b', '4' // }, which replaces the previous values of the 'b' attribute with the new value. -// -// Using PutAttributes to replace attribute values that do not exist will not -// result in an error response. You cannot specify an empty string as an attribute name. +// Using +// PutAttributes +// to replace attribute values that do not exist will not result in an error +// response. +// You cannot specify an empty string as an attribute name. // // Because Amazon SimpleDB makes multiple copies of client data and uses an // eventual consistency update model, an immediate GetAttributes or Select operation // (read) immediately after a PutAttributes or DeleteAttributes operation (write) // might not return the updated data. // -// The following limitations are enforced for this operation: 256 total attribute -// name-value pairs per item -// One billion attributes per domain -// 10 GB of total user data storage per domain +// The following limitations are enforced for this operation: +// * 256 total attribute name-value pairs per item +// +// * One billion attributes per domain +// +// * 10 GB of total user data storage per domain // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1084,7 +1104,7 @@ func (c *SimpleDB) SelectWithContext(ctx aws.Context, input *SelectInput, opts . // // Example iterating over at most 3 pages of a Select operation. // pageNum := 0 // err := client.SelectPages(params, -// func(page *SelectOutput, lastPage bool) bool { +// func(page *simpledb.SelectOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1116,10 +1136,12 @@ func (c *SimpleDB) SelectPagesWithContext(ctx aws.Context, input *SelectInput, f }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*SelectOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*SelectOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1753,7 +1775,11 @@ type GetAttributesInput struct { AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` // Determines whether or not strong consistency should be enforced when data - // is read from SimpleDB. If true + // is read from SimpleDB. If + // true + // , any data previously written to SimpleDB will be returned. Otherwise, results + // will be consistent eventually, and the client may not see data that was written + // immediately before your read. ConsistentRead *bool `type:"boolean"` // The name of the domain in which to perform the operation. @@ -1925,7 +1951,8 @@ type ListDomainsOutput struct { DomainNames []*string `locationNameList:"DomainName" type:"list" flattened:"true"` // An opaque token indicating that there are more domains than the specified - // MaxNumberOfDomains + // MaxNumberOfDomains + // still available. NextToken *string `type:"string"` } @@ -2061,7 +2088,9 @@ type ReplaceableAttribute struct { Name *string `type:"string" required:"true"` // A flag specifying whether or not to replace the attribute/value pair or to - // add a new attribute/value pair. The default setting is false + // add a new attribute/value pair. The default setting is + // false + // . Replace *bool `type:"boolean"` // The value of the replaceable attribute. @@ -2180,10 +2209,16 @@ type SelectInput struct { _ struct{} `type:"structure"` // Determines whether or not strong consistency should be enforced when data - // is read from SimpleDB. If true + // is read from SimpleDB. If + // true + // , any data previously written to SimpleDB will be returned. Otherwise, results + // will be consistent eventually, and the client may not see data that was written + // immediately before your read. ConsistentRead *bool `type:"boolean"` - // A string informing Amazon SimpleDB where to start the next list of ItemNames + // A string informing Amazon SimpleDB where to start the next list of + // ItemNames + // . NextToken *string `type:"string"` // The expression used to query the domain. @@ -2239,7 +2274,10 @@ type SelectOutput struct { // A list of items that match the select expression. Items []*Item `locationNameList:"Item" type:"list" flattened:"true"` - // An opaque token indicating that more items than MaxNumberOfItems + // An opaque token indicating that more items than + // MaxNumberOfItems + // were matched, the response size exceeded 1 megabyte, or the execution time + // exceeded 5 seconds. NextToken *string `type:"string"` } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/doc.go index f0d59281d71..f1cb0133dfd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/doc.go @@ -6,15 +6,15 @@ // Amazon SimpleDB is a web service providing the core database functions of // data indexing and querying in the cloud. By offloading the time and effort // associated with building and operating a web-scale database, SimpleDB provides -// developers the freedom to focus on application development. A traditional, -// clustered relational database requires a sizable upfront capital outlay, -// is complex to design, and often requires extensive and repetitive database -// administration. Amazon SimpleDB is dramatically simpler, requiring no schema, -// automatically indexing your data and providing a simple API for storage and -// access. This approach eliminates the administrative burden of data modeling, -// index maintenance, and performance tuning. Developers gain access to this -// functionality within Amazon's proven computing environment, are able to scale -// instantly, and pay only for what they use. +// developers the freedom to focus on application development. +// A traditional, clustered relational database requires a sizable upfront capital +// outlay, is complex to design, and often requires extensive and repetitive +// database administration. Amazon SimpleDB is dramatically simpler, requiring +// no schema, automatically indexing your data and providing a simple API for +// storage and access. This approach eliminates the administrative burden of +// data modeling, index maintenance, and performance tuning. Developers gain +// access to this functionality within Amazon's proven computing environment, +// are able to scale instantly, and pay only for what they use. // // Visit http://aws.amazon.com/simpledb/ (http://aws.amazon.com/simpledb/) for // more information. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go index d4de27413cb..75fd3d60108 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go @@ -47,11 +47,11 @@ const ( // svc := simpledb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SimpleDB { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SimpleDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SimpleDB { svc := &SimpleDB{ Client: client.New( cfg, @@ -60,6 +60,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2009-04-15", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go index acc8a86eb7c..f64f6cc19f2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go @@ -8,19 +8,45 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) type xmlErrorDetail struct { Code string `xml:"Code"` Message string `xml:"Message"` } - -type xmlErrorResponse struct { +type xmlErrorMessage struct { XMLName xml.Name `xml:"Response"` Errors []xmlErrorDetail `xml:"Errors>Error"` RequestID string `xml:"RequestID"` } +type xmlErrorResponse struct { + Code string + Message string + RequestID string + OtherErrors []xmlErrorDetail +} + +func (r *xmlErrorResponse) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var errResp xmlErrorMessage + if err := d.DecodeElement(&errResp, &start); err != nil { + return err + } + + r.RequestID = errResp.RequestID + if len(errResp.Errors) == 0 { + r.Code = "MissingError" + r.Message = "missing error code in SimpleDB XML error response" + } else { + r.Code = errResp.Errors[0].Code + r.Message = errResp.Errors[0].Message + r.OtherErrors = errResp.Errors[1:] + } + + return nil +} + func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) @@ -30,24 +56,32 @@ func unmarshalError(r *request.Request) { r.Error = awserr.NewRequestFailure( awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), r.HTTPResponse.StatusCode, - "", + r.RequestID, ) return } - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - r.Error = awserr.New("SerializationError", "failed to decode SimpleDB XML error response", nil) - } else if len(resp.Errors) == 0 { - r.Error = awserr.New("MissingError", "missing error code in SimpleDB XML error response", nil) - } else { - // If there are multiple error codes, return only the first as the aws.Error interface only supports - // one error code. + var errResp xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(resp.Errors[0].Code, resp.Errors[0].Message, nil), + awserr.New(request.ErrCodeSerialization, "failed to unmarshal error message", err), r.HTTPResponse.StatusCode, - resp.RequestID, + r.RequestID, ) + return + } + + var otherErrs []error + for _, e := range errResp.OtherErrors { + otherErrs = append(otherErrs, awserr.New(e.Code, e.Message, nil)) } + + // If there are multiple error codes, return only the first as the + // aws.Error interface only supports one error code. + r.Error = awserr.NewRequestFailure( + awserr.NewBatchError(errResp.Code, errResp.Message, otherErrs), + r.HTTPResponse.StatusCode, + errResp.RequestID, + ) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sns/api.go index 316ee2f9298..94783de2c2b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sns/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -1467,7 +1467,7 @@ func (c *SNS) ListEndpointsByPlatformApplicationWithContext(ctx aws.Context, inp // // Example iterating over at most 3 pages of a ListEndpointsByPlatformApplication operation. // pageNum := 0 // err := client.ListEndpointsByPlatformApplicationPages(params, -// func(page *ListEndpointsByPlatformApplicationOutput, lastPage bool) bool { +// func(page *sns.ListEndpointsByPlatformApplicationOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1499,10 +1499,12 @@ func (c *SNS) ListEndpointsByPlatformApplicationPagesWithContext(ctx aws.Context }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListEndpointsByPlatformApplicationOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListEndpointsByPlatformApplicationOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1714,7 +1716,7 @@ func (c *SNS) ListPlatformApplicationsWithContext(ctx aws.Context, input *ListPl // // Example iterating over at most 3 pages of a ListPlatformApplications operation. // pageNum := 0 // err := client.ListPlatformApplicationsPages(params, -// func(page *ListPlatformApplicationsOutput, lastPage bool) bool { +// func(page *sns.ListPlatformApplicationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1746,10 +1748,12 @@ func (c *SNS) ListPlatformApplicationsPagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPlatformApplicationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPlatformApplicationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1860,7 +1864,7 @@ func (c *SNS) ListSubscriptionsWithContext(ctx aws.Context, input *ListSubscript // // Example iterating over at most 3 pages of a ListSubscriptions operation. // pageNum := 0 // err := client.ListSubscriptionsPages(params, -// func(page *ListSubscriptionsOutput, lastPage bool) bool { +// func(page *sns.ListSubscriptionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1892,10 +1896,12 @@ func (c *SNS) ListSubscriptionsPagesWithContext(ctx aws.Context, input *ListSubs }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSubscriptionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSubscriptionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2009,7 +2015,7 @@ func (c *SNS) ListSubscriptionsByTopicWithContext(ctx aws.Context, input *ListSu // // Example iterating over at most 3 pages of a ListSubscriptionsByTopic operation. // pageNum := 0 // err := client.ListSubscriptionsByTopicPages(params, -// func(page *ListSubscriptionsByTopicOutput, lastPage bool) bool { +// func(page *sns.ListSubscriptionsByTopicOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2041,10 +2047,12 @@ func (c *SNS) ListSubscriptionsByTopicPagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListSubscriptionsByTopicOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListSubscriptionsByTopicOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2249,7 +2257,7 @@ func (c *SNS) ListTopicsWithContext(ctx aws.Context, input *ListTopicsInput, opt // // Example iterating over at most 3 pages of a ListTopics operation. // pageNum := 0 // err := client.ListTopicsPages(params, -// func(page *ListTopicsOutput, lastPage bool) bool { +// func(page *sns.ListTopicsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2281,10 +2289,12 @@ func (c *SNS) ListTopicsPagesWithContext(ctx aws.Context, input *ListTopicsInput }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTopicsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTopicsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3656,8 +3666,8 @@ type CheckIfPhoneNumberIsOptedOutOutput struct { // * true – The phone number is opted out, meaning you cannot publish SMS // messages to it. // - // * false – The phone number is opted in, meaning you can publish SMS messages - // to it. + // * false – The phone number is opted in, meaning you can publish SMS + // messages to it. IsOptedOut *bool `locationName:"isOptedOut" type:"boolean"` } @@ -4522,9 +4532,9 @@ type GetSubscriptionAttributesOutput struct { // * DeliveryPolicy – The JSON serialization of the subscription's delivery // policy. // - // * EffectiveDeliveryPolicy – The JSON serialization of the effective delivery - // policy that takes into account the topic delivery policy and account system - // defaults. + // * EffectiveDeliveryPolicy – The JSON serialization of the effective + // delivery policy that takes into account the topic delivery policy and + // account system defaults. // // * FilterPolicy – The filter policy JSON that is assigned to the subscription. // @@ -4611,21 +4621,22 @@ type GetTopicAttributesOutput struct { // // * Policy – the JSON serialization of the topic's access control policy // - // * DisplayName – the human-readable name used in the "From" field for notifications - // to email and email-json endpoints + // * DisplayName – the human-readable name used in the "From" field for + // notifications to email and email-json endpoints // // * SubscriptionsPending – the number of subscriptions pending confirmation // on this topic // - // * SubscriptionsConfirmed – the number of confirmed subscriptions on this - // topic + // * SubscriptionsConfirmed – the number of confirmed subscriptions on + // this topic // - // * SubscriptionsDeleted – the number of deleted subscriptions on this topic + // * SubscriptionsDeleted – the number of deleted subscriptions on this + // topic // // * DeliveryPolicy – the JSON serialization of the topic's delivery policy // - // * EffectiveDeliveryPolicy – the JSON serialization of the effective delivery - // policy that takes into account system defaults + // * EffectiveDeliveryPolicy – the JSON serialization of the effective + // delivery policy that takes into account system defaults Attributes map[string]*string `type:"map"` } @@ -5285,18 +5296,16 @@ type PublishInput struct { // // Constraints: // - // With the exception of SMS, messages must be UTF-8 encoded strings and at - // most 256 KB in size (262,144 bytes, not 262,144 characters). + // * With the exception of SMS, messages must be UTF-8 encoded strings and + // at most 256 KB in size (262,144 bytes, not 262,144 characters). // // * For SMS, each message can contain up to 140 characters. This character // limit depends on the encoding schema. For example, an SMS message can // contain 160 GSM characters, 140 ASCII characters, or 70 UCS-2 characters. - // - // * If you publish a message that exceeds this size limit, Amazon SNS sends + // If you publish a message that exceeds this size limit, Amazon SNS sends // the message as multiple messages, each fitting within the size limit. // Messages aren't truncated mid-word but are cut off at whole-word boundaries. - // - // * The total size limit for a single SMS Publish action is 1,600 characters. + // The total size limit for a single SMS Publish action is 1,600 characters. // // JSON-specific constraints: // @@ -5631,9 +5640,10 @@ type SetPlatformApplicationAttributesInput struct { // A map of the platform application attributes. Attributes in this map include // the following: // - // * PlatformCredential – The credential received from the notification service. - // For APNS/APNS_SANDBOX, PlatformCredential is private key. For GCM, PlatformCredential - // is "API key". For ADM, PlatformCredential is "client secret". + // * PlatformCredential – The credential received from the notification + // service. For APNS/APNS_SANDBOX, PlatformCredential is private key. For + // GCM, PlatformCredential is "API key". For ADM, PlatformCredential is "client + // secret". // // * PlatformPrincipal – The principal received from the notification service. // For APNS/APNS_SANDBOX, PlatformPrincipal is SSL certificate. For GCM, @@ -5745,8 +5755,8 @@ type SetSMSAttributesInput struct { // Description field, explain that you are requesting an SMS monthly spend limit // increase. // - // DeliveryStatusIAMRole – The ARN of the IAM role that allows Amazon SNS to - // write logs about SMS deliveries in CloudWatch Logs. For each SMS message + // DeliveryStatusIAMRole – The ARN of the IAM role that allows Amazon SNS + // to write logs about SMS deliveries in CloudWatch Logs. For each SMS message // that you send, Amazon SNS writes a log that includes the message price, the // success or failure status, the reason for failure (if the message failed), // the message dwell time, and other information. @@ -5762,8 +5772,8 @@ type SetSMSAttributesInput struct { // The sender ID can be 1 - 11 alphanumeric characters, and it must contain // at least one letter. // - // DefaultSMSType – The type of SMS message that you will send by default. You - // can assign the following values: + // DefaultSMSType – The type of SMS message that you will send by default. + // You can assign the following values: // // * Promotional – (Default) Noncritical messages, such as marketing messages. // Amazon SNS optimizes the message delivery to incur the lowest cost. @@ -5772,8 +5782,8 @@ type SetSMSAttributesInput struct { // such as one-time passcodes for multi-factor authentication. Amazon SNS // optimizes the message delivery to achieve the highest reliability. // - // UsageReportS3Bucket – The name of the Amazon S3 bucket to receive daily SMS - // usage reports from Amazon SNS. Each day, Amazon SNS will deliver a usage + // UsageReportS3Bucket – The name of the Amazon S3 bucket to receive daily + // SMS usage reports from Amazon SNS. Each day, Amazon SNS will deliver a usage // report as a CSV file to the bucket. The report includes the following information // for each SMS message that was successfully delivered by your account: // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sns/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sns/service.go index 96d7c8ba05c..aa8aff7d6e1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sns/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sns/service.go @@ -46,11 +46,11 @@ const ( // svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SNS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SNS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SNS { svc := &SNS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-03-31", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go index 16f82a4b28d..bc087b5b62e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go @@ -57,22 +57,25 @@ func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Requ // AddPermission API operation for Amazon Simple Queue Service. // -// Adds a permission to a queue for a specific principal (http://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P). +// Adds a permission to a queue for a specific principal (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P). // This allows sharing access to the queue. // // When you create a queue, you have full control access rights for the queue. // Only you, the owner of the queue, can grant or deny permissions to the queue. // For more information about these permissions, see Allow Developers to Write -// Messages to a Shared Queue (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) +// Messages to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) // in the Amazon Simple Queue Service Developer Guide. // -// AddPermission writes an Amazon-SQS-generated policy. If you want to write -// your own policy, use SetQueueAttributes to upload your policy. For more information -// about writing your own policy, see Using Custom Policies with the Amazon -// SQS Access Policy Language (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html) -// in the Amazon Simple Queue Service Developer Guide. +// * AddPermission generates a policy for you. You can use SetQueueAttributes +// to upload your policy. For more information, see Using Custom Policies +// with the Amazon SQS Access Policy Language (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html) +// in the Amazon Simple Queue Service Developer Guide. // -// An Amazon SQS policy can have a maximum of 7 actions. +// * An Amazon SQS policy can have a maximum of 7 actions. +// +// * To remove the ability to change queue permissions, you must deny permission +// to the AddPermission, RemovePermission, and SetQueueAttributes actions +// in your IAM policy. // // Some actions take lists of parameters. These lists are specified using the // param.n notation. Values of n are integers starting from 1. For example, @@ -83,7 +86,7 @@ func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Requ // &Attribute.2=second // // Cross-account permissions don't apply to this action. For more information, -// see see Grant Cross-Account Permissions to a Role and a User Name (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) // in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -168,27 +171,46 @@ func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput // ChangeMessageVisibility API operation for Amazon Simple Queue Service. // // Changes the visibility timeout of a specified message in a queue to a new -// value. The maximum allowed timeout value is 12 hours. For more information, -// see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) +// value. The default visibility timeout for a message is 30 seconds. The minimum +// is 0 seconds. The maximum is 12 hours. For more information, see Visibility +// Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // // For example, you have a message with a visibility timeout of 5 minutes. After // 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. // You can continue to call ChangeMessageVisibility to extend the visibility -// timeout to a maximum of 12 hours. If you try to extend the visibility timeout -// beyond 12 hours, your request is rejected. +// timeout to the maximum allowed time. If you try to extend the visibility +// timeout beyond the maximum, your request is rejected. +// +// An Amazon SQS message has three basic states: +// +// Sent to a queue by a producer. +// +// Received from the queue by a consumer. +// +// Deleted from the queue. +// +// A message is considered to be stored after it is sent to a queue by a producer, +// but not yet received from the queue by a consumer (that is, between states +// 1 and 2). There is no limit to the number of stored messages. A message is +// considered to be in flight after it is received from a queue by a consumer, +// but not yet deleted from the queue (that is, between states 2 and 3). There +// is a limit to the number of inflight messages. // -// A message is considered to be in flight after it's received from a queue -// by a consumer, but not yet deleted from the queue. +// Limits that apply to inflight messages are unrelated to the unlimited number +// of stored messages. // -// For standard queues, there can be a maximum of 120,000 inflight messages -// per queue. If you reach this limit, Amazon SQS returns the OverLimit error -// message. To avoid reaching the limit, you should delete messages from the -// queue after they're processed. You can also increase the number of queues -// you use to process your messages. +// For most standard queues (depending on queue traffic and message backlog), +// there can be a maximum of approximately 120,000 inflight messages (received +// from a queue by a consumer, but not yet deleted from the queue). If you reach +// this limit, Amazon SQS returns the OverLimit error message. To avoid reaching +// the limit, you should delete messages from the queue after they're processed. +// You can also increase the number of queues you use to process your messages. +// To request a limit increase, file a support request (https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-sqs). // -// For FIFO queues, there can be a maximum of 20,000 inflight messages per queue. -// If you reach this limit, Amazon SQS returns no error messages. +// For FIFO queues, there can be a maximum of 20,000 inflight messages (received +// from a queue by a consumer, but not yet deleted from the queue). If you reach +// this limit, Amazon SQS returns no error messages. // // If you attempt to set the VisibilityTimeout to a value greater than the maximum // time left, Amazon SQS returns an error. Amazon SQS doesn't automatically @@ -388,13 +410,11 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // in the request. Keep the following caveats in mind: // // * If you don't specify the FifoQueue attribute, Amazon SQS creates a standard -// queue. -// -// You can't change the queue type after you create it and you can't convert -// an existing standard queue into a FIFO queue. You must either create a -// new FIFO queue for your application or delete your existing standard queue -// and recreate it as a FIFO queue. For more information, see Moving From -// a Standard Queue to a FIFO Queue (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-moving) +// queue. You can't change the queue type after you create it and you can't +// convert an existing standard queue into a FIFO queue. You must either +// create a new FIFO queue for your application or delete your existing standard +// queue and recreate it as a FIFO queue. For more information, see Moving +// From a Standard Queue to a FIFO Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-moving) // in the Amazon Simple Queue Service Developer Guide. // // * If you don't provide a value for an attribute, the queue is created @@ -404,7 +424,7 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // a queue with the same name. // // To successfully create a new queue, you must provide a queue name that adheres -// to the limits related to queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) +// to the limits related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) // and is unique within the scope of your queues. // // To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only @@ -426,7 +446,7 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // &Attribute.2=second // // Cross-account permissions don't apply to this action. For more information, -// see see Grant Cross-Account Permissions to a Role and a User Name (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) // in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -732,7 +752,7 @@ func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, // a queue with the same name. // // Cross-account permissions don't apply to this action. For more information, -// see see Grant Cross-Account Permissions to a Role and a User Name (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) // in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -809,7 +829,7 @@ func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *re // // Gets attributes for the specified queue. // -// To determine whether a queue is FIFO (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), +// To determine whether a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), // you can check whether QueueName ends with the .fifo suffix. // // Some actions take lists of parameters. These lists are specified using the @@ -903,7 +923,7 @@ func (c *SQS) GetQueueUrlRequest(input *GetQueueUrlInput) (req *request.Request, // parameter to specify the account ID of the queue's owner. The queue's owner // must grant you permission to access the queue. For more information about // shared queue access, see AddPermission or see Allow Developers to Write Messages -// to a Shared Queue (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) +// to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) // in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -987,7 +1007,7 @@ func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueue // configured with a dead-letter queue. // // For more information about using dead-letter queues, see Using Amazon SQS -// Dead-Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) +// Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1068,29 +1088,11 @@ func (c *SQS) ListQueueTagsRequest(input *ListQueueTagsInput) (req *request.Requ // ListQueueTags API operation for Amazon Simple Queue Service. // // List all cost allocation tags added to the specified Amazon SQS queue. For -// an overview, see Tagging Your Amazon SQS Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// When you use queue tags, keep the following guidelines in mind: -// -// * Adding more than 50 tags to a queue isn't recommended. -// -// * Tags don't have any semantic meaning. Amazon SQS interprets tags as -// character strings. -// -// * Tags are case-sensitive. -// -// * A new tag with a key identical to that of an existing tag overwrites -// the existing tag. -// -// * Tagging actions are limited to 5 TPS per AWS account. If your application -// requires a higher throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). -// -// For a full list of tag restrictions, see Limits Related to Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) +// an overview, see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) // in the Amazon Simple Queue Service Developer Guide. // // Cross-account permissions don't apply to this action. For more information, -// see see Grant Cross-Account Permissions to a Role and a User Name (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) // in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1170,7 +1172,7 @@ func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, o // only queues with a name that begins with the specified value are returned. // // Cross-account permissions don't apply to this action. For more information, -// see see Grant Cross-Account Permissions to a Role and a User Name (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) // in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1344,7 +1346,7 @@ func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Re // // Retrieves one or more messages (up to 10), from the specified queue. Using // the WaitTimeSeconds parameter enables long-poll support. For more information, -// see Amazon SQS Long Polling (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html) +// see Amazon SQS Long Polling (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html) // in the Amazon Simple Queue Service Developer Guide. // // Short poll is the default behavior where a weighted random set of machines @@ -1371,14 +1373,14 @@ func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Re // * An MD5 digest of the message attributes. // // The receipt handle is the identifier you must provide when deleting the message. -// For more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) +// For more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) // in the Amazon Simple Queue Service Developer Guide. // // You can provide the VisibilityTimeout parameter in your request. The parameter // is applied to the messages that Amazon SQS returns in the response. If you // don't include the parameter, the overall visibility timeout for the queue // is used for the returned messages. For more information, see Visibility Timeout -// (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) +// (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // // A message that isn't deleted or a message whose visibility isn't extended @@ -1474,11 +1476,15 @@ func (c *SQS) RemovePermissionRequest(input *RemovePermissionInput) (req *reques // Revokes any permissions in the queue policy that matches the specified Label // parameter. // -// Only the owner of a queue can remove permissions from it. +// * Only the owner of a queue can remove permissions from it. // -// Cross-account permissions don't apply to this action. For more information, -// see see Grant Cross-Account Permissions to a Role and a User Name (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// * Cross-account permissions don't apply to this action. For more information, +// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// in the Amazon Simple Queue Service Developer Guide. +// +// * To remove the ability to change queue permissions, you must deny permission +// to the AddPermission, RemovePermission, and SetQueueAttributes actions +// in your IAM policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1772,13 +1778,17 @@ func (c *SQS) SetQueueAttributesRequest(input *SetQueueAttributesInput) (req *re // to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod // attribute can take up to 15 minutes. // -// In the future, new attributes might be added. If you write code that calls -// this action, we recommend that you structure your code so that it can handle -// new attributes gracefully. +// * In the future, new attributes might be added. If you write code that +// calls this action, we recommend that you structure your code so that it +// can handle new attributes gracefully. // -// Cross-account permissions don't apply to this action. For more information, -// see see Grant Cross-Account Permissions to a Role and a User Name (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// * Cross-account permissions don't apply to this action. For more information, +// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// in the Amazon Simple Queue Service Developer Guide. +// +// * To remove the ability to change queue permissions, you must deny permission +// to the AddPermission, RemovePermission, and SetQueueAttributes actions +// in your IAM policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1859,7 +1869,7 @@ func (c *SQS) TagQueueRequest(input *TagQueueInput) (req *request.Request, outpu // TagQueue API operation for Amazon Simple Queue Service. // // Add cost allocation tags to the specified Amazon SQS queue. For an overview, -// see Tagging Your Amazon SQS Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) +// see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) // in the Amazon Simple Queue Service Developer Guide. // // When you use queue tags, keep the following guidelines in mind: @@ -1874,14 +1884,11 @@ func (c *SQS) TagQueueRequest(input *TagQueueInput) (req *request.Request, outpu // * A new tag with a key identical to that of an existing tag overwrites // the existing tag. // -// * Tagging actions are limited to 5 TPS per AWS account. If your application -// requires a higher throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). -// -// For a full list of tag restrictions, see Limits Related to Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) +// For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) // in the Amazon Simple Queue Service Developer Guide. // // Cross-account permissions don't apply to this action. For more information, -// see see Grant Cross-Account Permissions to a Role and a User Name (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) // in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1958,29 +1965,11 @@ func (c *SQS) UntagQueueRequest(input *UntagQueueInput) (req *request.Request, o // UntagQueue API operation for Amazon Simple Queue Service. // // Remove cost allocation tags from the specified Amazon SQS queue. For an overview, -// see Tagging Your Amazon SQS Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon Simple Queue Service Developer Guide. -// -// When you use queue tags, keep the following guidelines in mind: -// -// * Adding more than 50 tags to a queue isn't recommended. -// -// * Tags don't have any semantic meaning. Amazon SQS interprets tags as -// character strings. -// -// * Tags are case-sensitive. -// -// * A new tag with a key identical to that of an existing tag overwrites -// the existing tag. -// -// * Tagging actions are limited to 5 TPS per AWS account. If your application -// requires a higher throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). -// -// For a full list of tag restrictions, see Limits Related to Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) +// see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) // in the Amazon Simple Queue Service Developer Guide. // // Cross-account permissions don't apply to this action. For more information, -// see see Grant Cross-Account Permissions to a Role and a User Name (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) // in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2014,10 +2003,10 @@ func (c *SQS) UntagQueueWithContext(ctx aws.Context, input *UntagQueueInput, opt type AddPermissionInput struct { _ struct{} `type:"structure"` - // The AWS account number of the principal (http://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) + // The AWS account number of the principal (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) // who is given permission. The principal must have an AWS account, but does // not need to be signed up for Amazon SQS. For information about locating the - // AWS account identification, see Your AWS Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html#sqs-api-request-authentication) + // AWS account identification, see Your AWS Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html#sqs-api-request-authentication) // in the Amazon Simple Queue Service Developer Guide. // // AWSAccountIds is a required field @@ -2027,7 +2016,7 @@ type AddPermissionInput struct { // the name of any action or *. // // For more information about these actions, see Overview of Managing Access - // Permissions to Your Amazon Simple Queue Service Resource (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-overview-of-managing-access.html) + // Permissions to Your Amazon Simple Queue Service Resource (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-overview-of-managing-access.html) // in the Amazon Simple Queue Service Developer Guide. // // Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n @@ -2490,7 +2479,7 @@ type CreateQueueInput struct { // to 1,209,600 seconds (14 days). Default: 345,600 (4 days). // // * Policy - The queue's policy. A valid AWS policy. For more information - // about policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) // in the Amazon IAM User Guide. // // * ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for @@ -2500,84 +2489,69 @@ type CreateQueueInput struct { // * RedrivePolicy - The string that includes the parameters for the dead-letter // queue functionality of the source queue. For more information about the // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter - // Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue - // to which Amazon SQS moves messages after the value of maxReceiveCount - // is exceeded. - // - // maxReceiveCount - The number of times a message is delivered to the source - // queue before being moved to the dead-letter queue. When the ReceiveCount - // for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves - // the message to the dead-letter-queue. - // - // The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, - // the dead-letter queue of a standard queue must also be a standard queue. + // Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn + // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon + // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount + // - The number of times a message is delivered to the source queue before + // being moved to the dead-letter queue. When the ReceiveCount for a message + // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message + // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also + // be a FIFO queue. Similarly, the dead-letter queue of a standard queue + // must also be a standard queue. // // * VisibilityTimeout - The visibility timeout for the queue, in seconds. // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For // more information about the visibility timeout, see Visibility Timeout - // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // - // The following attributes apply only to server-side-encryption (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): + // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // // * KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) - // for Amazon SQS or a custom CMK. For more information, see Key Terms (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). + // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, - // the alias of a custom CMK can, for example, be alias/MyAlias. For more - // examples, see KeyId (http://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) + // the alias of a custom CMK can, for example, be alias/MyAlias . For more + // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // // * KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which - // Amazon SQS can reuse a data key (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) + // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) // to encrypt or decrypt messages before calling AWS KMS again. An integer // representing seconds, between 60 seconds (1 minute) and 86,400 seconds // (24 hours). Default: 300 (5 minutes). A shorter time period provides better // security but results in more calls to KMS which might incur charges after // Free Tier. For more information, see How Does the Data Key Reuse Period - // Work? (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). - // + // Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). // - // The following attributes apply only to FIFO (first-in-first-out) queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): + // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // * FifoQueue - Designates a queue as FIFO. Valid values: true, false. You - // can provide this attribute only during queue creation. You can't change - // it for an existing queue. When you set this attribute, you must also provide - // the MessageGroupId for your messages explicitly. - // - // For more information, see FIFO Queue Logic (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) + // * FifoQueue - Designates a queue as FIFO. Valid values: true, false. If + // you don't specify the FifoQueue attribute, Amazon SQS creates a standard + // queue. You can provide this attribute only during queue creation. You + // can't change it for an existing queue. When you set this attribute, you + // must also provide the MessageGroupId for your messages explicitly. For + // more information, see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) // in the Amazon Simple Queue Service Developer Guide. // // * ContentBasedDeduplication - Enables content-based deduplication. Valid // values: true, false. For more information, see Exactly-Once Processing - // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon Simple Queue Service Developer Guide. - // - // Every message must have a unique MessageDeduplicationId, - // - // You may provide a MessageDeduplicationId explicitly. - // - // If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication - // for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId - // using the body of the message (but not the attributes of the message). - // - // - // If you don't provide a MessageDeduplicationId and the queue doesn't have - // ContentBasedDeduplication set, the action fails with an error. - // - // If the queue has ContentBasedDeduplication set, your MessageDeduplicationId - // overrides the generated one. - // - // When ContentBasedDeduplication is in effect, messages with identical content - // sent within the deduplication interval are treated as duplicates and only - // one copy of the message is delivered. - // - // If you send one message with ContentBasedDeduplication enabled and then another - // message with a MessageDeduplicationId that is the same as the one generated - // for the first MessageDeduplicationId, the two messages are treated as - // duplicates and only one copy of the message is delivered. + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) + // in the Amazon Simple Queue Service Developer Guide. Every message must + // have a unique MessageDeduplicationId, You may provide a MessageDeduplicationId + // explicitly. If you aren't able to provide a MessageDeduplicationId and + // you enable ContentBasedDeduplication for your queue, Amazon SQS uses a + // SHA-256 hash to generate the MessageDeduplicationId using the body of + // the message (but not the attributes of the message). If you don't provide + // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication + // set, the action fails with an error. If the queue has ContentBasedDeduplication + // set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication + // is in effect, messages with identical content sent within the deduplication + // interval are treated as duplicates and only one copy of the message is + // delivered. If you send one message with ContentBasedDeduplication enabled + // and then another message with a MessageDeduplicationId that is the same + // as the one generated for the first MessageDeduplicationId, the two messages + // are treated as duplicates and only one copy of the message is delivered. Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` // The name of the new queue. The following limits apply to this name: @@ -2593,6 +2567,33 @@ type CreateQueueInput struct { // // QueueName is a required field QueueName *string `type:"string" required:"true"` + + // Add cost allocation tags to the specified Amazon SQS queue. For an overview, + // see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) + // in the Amazon Simple Queue Service Developer Guide. + // + // When you use queue tags, keep the following guidelines in mind: + // + // * Adding more than 50 tags to a queue isn't recommended. + // + // * Tags don't have any semantic meaning. Amazon SQS interprets tags as + // character strings. + // + // * Tags are case-sensitive. + // + // * A new tag with a key identical to that of an existing tag overwrites + // the existing tag. + // + // For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) + // in the Amazon Simple Queue Service Developer Guide. + // + // To be able to tag a queue on creation, you must have the sqs:CreateQueue + // and sqs:TagQueue permissions. + // + // Cross-account permissions don't apply to this action. For more information, + // see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) + // in the Amazon Simple Queue Service Developer Guide. + Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true"` } // String returns the string representation @@ -2630,6 +2631,12 @@ func (s *CreateQueueInput) SetQueueName(v string) *CreateQueueInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateQueueInput) SetTags(v map[string]*string) *CreateQueueInput { + s.Tags = v + return s +} + // Returns the QueueUrl attribute of the created queue. type CreateQueueOutput struct { _ struct{} `type:"structure"` @@ -3011,48 +3018,42 @@ type GetQueueAttributesInput struct { // * RedrivePolicy - Returns the string that includes the parameters for // dead-letter queue functionality of the source queue. For more information // about the redrive policy and dead-letter queues, see Using Amazon SQS - // Dead-Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue - // to which Amazon SQS moves messages after the value of maxReceiveCount - // is exceeded. - // - // maxReceiveCount - The number of times a message is delivered to the source - // queue before being moved to the dead-letter queue. When the ReceiveCount - // for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves - // the message to the dead-letter-queue. + // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn + // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon + // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount + // - The number of times a message is delivered to the source queue before + // being moved to the dead-letter queue. When the ReceiveCount for a message + // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message + // to the dead-letter-queue. // // * VisibilityTimeout - Returns the visibility timeout for the queue. For // more information about the visibility timeout, see Visibility Timeout - // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // - // The following attributes apply only to server-side-encryption (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): + // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // // * KmsMasterKeyId - Returns the ID of an AWS-managed customer master key // (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms - // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). - // + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // // * KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, // for which Amazon SQS can reuse a data key to encrypt or decrypt messages // before calling AWS KMS again. For more information, see How Does the Data - // Key Reuse Period Work? (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). + // Key Reuse Period Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). // - // - // The following attributes apply only to FIFO (first-in-first-out) queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): + // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // // * FifoQueue - Returns whether the queue is FIFO. For more information, - // see FIFO Queue Logic (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) - // in the Amazon Simple Queue Service Developer Guide. - // - // To determine whether a queue is FIFO (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), + // see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) + // in the Amazon Simple Queue Service Developer Guide. To determine whether + // a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), // you can check whether QueueName ends with the .fifo suffix. // // * ContentBasedDeduplication - Returns whether content-based deduplication // is enabled for the queue. For more information, see Exactly-Once Processing - // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` @@ -3173,7 +3174,7 @@ func (s *GetQueueUrlInput) SetQueueOwnerAWSAccountId(v string) *GetQueueUrlInput return s } -// For more information, see Interpreting Responses (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-responses.html) +// For more information, see Interpreting Responses (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-responses.html) // in the Amazon Simple Queue Service Developer Guide. type GetQueueUrlOutput struct { _ struct{} `type:"structure"` @@ -3415,7 +3416,7 @@ type Message struct { MD5OfMessageAttributes *string `type:"string"` // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) // in the Amazon Simple Queue Service Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` @@ -3504,7 +3505,7 @@ type MessageAttributeValue struct { // Binary. For the Number data type, you must use StringValue. // // You can also append custom labels. For more information, see Amazon SQS Message - // Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) // in the Amazon Simple Queue Service Developer Guide. // // DataType is a required field @@ -3571,6 +3572,94 @@ func (s *MessageAttributeValue) SetStringValue(v string) *MessageAttributeValue return s } +// The user-specified message system attribute value. For string data types, +// the Value attribute has the same restrictions on the content as the message +// body. For more information, see SendMessage. +// +// Name, type, value and the message body must not be empty or null. +type MessageSystemAttributeValue struct { + _ struct{} `type:"structure"` + + // Not implemented. Reserved for future use. + BinaryListValues [][]byte `locationName:"BinaryListValue" locationNameList:"BinaryListValue" type:"list" flattened:"true"` + + // Binary type attributes can store any binary data, such as compressed data, + // encrypted data, or images. + // + // BinaryValue is automatically base64 encoded/decoded by the SDK. + BinaryValue []byte `type:"blob"` + + // Amazon SQS supports the following logical data types: String, Number, and + // Binary. For the Number data type, you must use StringValue. + // + // You can also append custom labels. For more information, see Amazon SQS Message + // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // in the Amazon Simple Queue Service Developer Guide. + // + // DataType is a required field + DataType *string `type:"string" required:"true"` + + // Not implemented. Reserved for future use. + StringListValues []*string `locationName:"StringListValue" locationNameList:"StringListValue" type:"list" flattened:"true"` + + // Strings are Unicode with UTF-8 binary encoding. For a list of code values, + // see ASCII Printable Characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + StringValue *string `type:"string"` +} + +// String returns the string representation +func (s MessageSystemAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageSystemAttributeValue) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageSystemAttributeValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MessageSystemAttributeValue"} + if s.DataType == nil { + invalidParams.Add(request.NewErrParamRequired("DataType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBinaryListValues sets the BinaryListValues field's value. +func (s *MessageSystemAttributeValue) SetBinaryListValues(v [][]byte) *MessageSystemAttributeValue { + s.BinaryListValues = v + return s +} + +// SetBinaryValue sets the BinaryValue field's value. +func (s *MessageSystemAttributeValue) SetBinaryValue(v []byte) *MessageSystemAttributeValue { + s.BinaryValue = v + return s +} + +// SetDataType sets the DataType field's value. +func (s *MessageSystemAttributeValue) SetDataType(v string) *MessageSystemAttributeValue { + s.DataType = &v + return s +} + +// SetStringListValues sets the StringListValues field's value. +func (s *MessageSystemAttributeValue) SetStringListValues(v []*string) *MessageSystemAttributeValue { + s.StringListValues = v + return s +} + +// SetStringValue sets the StringValue field's value. +func (s *MessageSystemAttributeValue) SetStringValue(v string) *MessageSystemAttributeValue { + s.StringValue = &v + return s +} + type PurgeQueueInput struct { _ struct{} `type:"structure"` @@ -3628,8 +3717,8 @@ func (s PurgeQueueOutput) GoString() string { type ReceiveMessageInput struct { _ struct{} `type:"structure"` - // A list of s that need to be returned along with each message. These attributes - // include: + // A list of attributes that need to be returned along with each message. These + // attributes include: // // * All - Returns all values. // @@ -3640,11 +3729,10 @@ type ReceiveMessageInput struct { // * ApproximateReceiveCount - Returns the number of times a message has // been received from the queue but not deleted. // - // * SenderId - // - // For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R. + // * AWSTraceHeader - Returns the AWS X-Ray trace header string. // - // For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. + // * SenderId For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R. + // For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. // // * SentTimestamp - Returns the time the message was sent to the queue (epoch // time (http://en.wikipedia.org/wiki/Unix_time) in milliseconds). @@ -3717,19 +3805,16 @@ type ReceiveMessageInput struct { // * During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId // return the same messages and receipt handles. If a retry occurs within // the deduplication interval, it resets the visibility timeout. For more - // information, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // If a caller of the ReceiveMessage action still processes messages when the - // visibility timeout expires and messages become visible, another worker - // consuming from the same queue can receive the same messages and therefore - // process duplicates. Also, if a consumer whose message processing time - // is longer than the visibility timeout tries to delete the processed messages, - // the action fails with an error. - // - // To mitigate this effect, ensure that your application observes a safe threshold - // before the visibility timeout expires and extend the visibility timeout - // as necessary. + // information, see Visibility Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) + // in the Amazon Simple Queue Service Developer Guide. If a caller of the + // ReceiveMessage action still processes messages when the visibility timeout + // expires and messages become visible, another worker consuming from the + // same queue can receive the same messages and therefore process duplicates. + // Also, if a consumer whose message processing time is longer than the visibility + // timeout tries to delete the processed messages, the action fails with + // an error. To mitigate this effect, ensure that your application observes + // a safe threshold before the visibility timeout expires and extend the + // visibility timeout as necessary. // // * While messages with a particular MessageGroupId are invisible, no more // messages belonging to the same MessageGroupId are returned until the visibility @@ -3744,7 +3829,7 @@ type ReceiveMessageInput struct { // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). // // For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId - // Request Parameter (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-receiverequestattemptid-request-parameter.html) + // Request Parameter (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-receiverequestattemptid-request-parameter.html) // in the Amazon Simple Queue Service Developer Guide. ReceiveRequestAttemptId *string `type:"string"` @@ -4045,7 +4130,7 @@ type SendMessageBatchRequestEntry struct { Id *string `type:"string" required:"true"` // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) // in the Amazon Simple Queue Service Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` @@ -4059,24 +4144,17 @@ type SendMessageBatchRequestEntry struct { // The token used for deduplication of messages within a 5-minute minimum deduplication // interval. If a message with a particular MessageDeduplicationId is sent successfully, // subsequent messages with the same MessageDeduplicationId are accepted successfully - // but aren't delivered. For more information, see Exactly-Once Processing - // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) + // but aren't delivered. For more information, see Exactly-Once Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. // - // * Every message must have a unique MessageDeduplicationId, - // - // You may provide a MessageDeduplicationId explicitly. - // - // If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication - // for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId - // using the body of the message (but not the attributes of the message). - // - // - // If you don't provide a MessageDeduplicationId and the queue doesn't have - // ContentBasedDeduplication set, the action fails with an error. - // - // If the queue has ContentBasedDeduplication set, your MessageDeduplicationId - // overrides the generated one. + // * Every message must have a unique MessageDeduplicationId, You may provide + // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId + // and you enable ContentBasedDeduplication for your queue, Amazon SQS uses + // a SHA-256 hash to generate the MessageDeduplicationId using the body of + // the message (but not the attributes of the message). If you don't provide + // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication + // set, the action fails with an error. If the queue has ContentBasedDeduplication + // set, your MessageDeduplicationId overrides the generated one. // // * When ContentBasedDeduplication is in effect, messages with identical // content sent within the deduplication interval are treated as duplicates @@ -4101,7 +4179,7 @@ type SendMessageBatchRequestEntry struct { // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). // // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId - // Property (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) + // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) // in the Amazon Simple Queue Service Developer Guide. MessageDeduplicationId *string `type:"string"` @@ -4126,12 +4204,23 @@ type SendMessageBatchRequestEntry struct { // characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). // // For best practices of using MessageGroupId, see Using the MessageGroupId - // Property (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) + // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) // in the Amazon Simple Queue Service Developer Guide. // // MessageGroupId is required for FIFO queues. You can't use it for Standard // queues. MessageGroupId *string `type:"string"` + + // The message system attribute to send Each message system attribute consists + // of a Name, Type, and Value. + // + // * Currently, the only supported message system attribute is AWSTraceHeader. + // Its type must be String and its value must be a correctly formatted AWS + // X-Ray trace string. + // + // * The size of a message system attribute doesn't count towards the total + // size of a message. + MessageSystemAttributes map[string]*MessageSystemAttributeValue `locationName:"MessageSystemAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` } // String returns the string representation @@ -4163,6 +4252,16 @@ func (s *SendMessageBatchRequestEntry) Validate() error { } } } + if s.MessageSystemAttributes != nil { + for i, v := range s.MessageSystemAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageSystemAttributes", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4206,6 +4305,12 @@ func (s *SendMessageBatchRequestEntry) SetMessageGroupId(v string) *SendMessageB return s } +// SetMessageSystemAttributes sets the MessageSystemAttributes field's value. +func (s *SendMessageBatchRequestEntry) SetMessageSystemAttributes(v map[string]*MessageSystemAttributeValue) *SendMessageBatchRequestEntry { + s.MessageSystemAttributes = v + return s +} + // Encloses a MessageId for a successfully-enqueued message in a SendMessageBatch. type SendMessageBatchResultEntry struct { _ struct{} `type:"structure"` @@ -4229,6 +4334,12 @@ type SendMessageBatchResultEntry struct { // MD5OfMessageBody is a required field MD5OfMessageBody *string `type:"string" required:"true"` + // An MD5 digest of the non-URL-encoded message system attribute string. You + // can use this attribute to verify that Amazon SQS received the message correctly. + // Amazon SQS URL-decodes the message before creating the MD5 digest. For information + // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt). + MD5OfMessageSystemAttributes *string `type:"string"` + // An identifier for the message. // // MessageId is a required field @@ -4271,6 +4382,12 @@ func (s *SendMessageBatchResultEntry) SetMD5OfMessageBody(v string) *SendMessage return s } +// SetMD5OfMessageSystemAttributes sets the MD5OfMessageSystemAttributes field's value. +func (s *SendMessageBatchResultEntry) SetMD5OfMessageSystemAttributes(v string) *SendMessageBatchResultEntry { + s.MD5OfMessageSystemAttributes = &v + return s +} + // SetMessageId sets the MessageId field's value. func (s *SendMessageBatchResultEntry) SetMessageId(v string) *SendMessageBatchResultEntry { s.MessageId = &v @@ -4296,7 +4413,7 @@ type SendMessageInput struct { DelaySeconds *int64 `type:"integer"` // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) // in the Amazon Simple Queue Service Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` @@ -4318,24 +4435,18 @@ type SendMessageInput struct { // The token used for deduplication of sent messages. If a message with a particular // MessageDeduplicationId is sent successfully, any messages sent with the same // MessageDeduplicationId are accepted successfully but aren't delivered during - // the 5-minute deduplication interval. For more information, see Exactly-Once - // Processing (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) + // the 5-minute deduplication interval. For more information, see Exactly-Once + // Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. // - // * Every message must have a unique MessageDeduplicationId, - // - // You may provide a MessageDeduplicationId explicitly. - // - // If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication - // for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId - // using the body of the message (but not the attributes of the message). - // - // - // If you don't provide a MessageDeduplicationId and the queue doesn't have - // ContentBasedDeduplication set, the action fails with an error. - // - // If the queue has ContentBasedDeduplication set, your MessageDeduplicationId - // overrides the generated one. + // * Every message must have a unique MessageDeduplicationId, You may provide + // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId + // and you enable ContentBasedDeduplication for your queue, Amazon SQS uses + // a SHA-256 hash to generate the MessageDeduplicationId using the body of + // the message (but not the attributes of the message). If you don't provide + // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication + // set, the action fails with an error. If the queue has ContentBasedDeduplication + // set, your MessageDeduplicationId overrides the generated one. // // * When ContentBasedDeduplication is in effect, messages with identical // content sent within the deduplication interval are treated as duplicates @@ -4360,7 +4471,7 @@ type SendMessageInput struct { // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). // // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId - // Property (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) + // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) // in the Amazon Simple Queue Service Developer Guide. MessageDeduplicationId *string `type:"string"` @@ -4385,13 +4496,24 @@ type SendMessageInput struct { // characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). // // For best practices of using MessageGroupId, see Using the MessageGroupId - // Property (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) + // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) // in the Amazon Simple Queue Service Developer Guide. // // MessageGroupId is required for FIFO queues. You can't use it for Standard // queues. MessageGroupId *string `type:"string"` + // The message system attribute to send. Each message system attribute consists + // of a Name, Type, and Value. + // + // * Currently, the only supported message system attribute is AWSTraceHeader. + // Its type must be String and its value must be a correctly formatted AWS + // X-Ray trace string. + // + // * The size of a message system attribute doesn't count towards the total + // size of a message. + MessageSystemAttributes map[string]*MessageSystemAttributeValue `locationName:"MessageSystemAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + // The URL of the Amazon SQS queue to which a message is sent. // // Queue URLs and names are case-sensitive. @@ -4429,6 +4551,16 @@ func (s *SendMessageInput) Validate() error { } } } + if s.MessageSystemAttributes != nil { + for i, v := range s.MessageSystemAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageSystemAttributes", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4466,6 +4598,12 @@ func (s *SendMessageInput) SetMessageGroupId(v string) *SendMessageInput { return s } +// SetMessageSystemAttributes sets the MessageSystemAttributes field's value. +func (s *SendMessageInput) SetMessageSystemAttributes(v map[string]*MessageSystemAttributeValue) *SendMessageInput { + s.MessageSystemAttributes = v + return s +} + // SetQueueUrl sets the QueueUrl field's value. func (s *SendMessageInput) SetQueueUrl(v string) *SendMessageInput { s.QueueUrl = &v @@ -4488,8 +4626,13 @@ type SendMessageOutput struct { // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt). MD5OfMessageBody *string `type:"string"` + // An MD5 digest of the non-URL-encoded message system attribute string. You + // can use this attribute to verify that Amazon SQS received the message correctly. + // Amazon SQS URL-decodes the message before creating the MD5 digest. + MD5OfMessageSystemAttributes *string `type:"string"` + // An attribute containing the MessageId of the message sent to the queue. For - // more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) + // more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) // in the Amazon Simple Queue Service Developer Guide. MessageId *string `type:"string"` @@ -4524,6 +4667,12 @@ func (s *SendMessageOutput) SetMD5OfMessageBody(v string) *SendMessageOutput { return s } +// SetMD5OfMessageSystemAttributes sets the MD5OfMessageSystemAttributes field's value. +func (s *SendMessageOutput) SetMD5OfMessageSystemAttributes(v string) *SendMessageOutput { + s.MD5OfMessageSystemAttributes = &v + return s +} + // SetMessageId sets the MessageId field's value. func (s *SendMessageOutput) SetMessageId(v string) *SendMessageOutput { s.MessageId = &v @@ -4556,9 +4705,8 @@ type SetQueueAttributesInput struct { // SQS retains a message. Valid values: An integer representing seconds, // from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). // - // // * Policy - The queue's policy. A valid AWS policy. For more information - // about policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) // in the Amazon IAM User Guide. // // * ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for @@ -4568,76 +4716,61 @@ type SetQueueAttributesInput struct { // * RedrivePolicy - The string that includes the parameters for the dead-letter // queue functionality of the source queue. For more information about the // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter - // Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon Simple Queue Service Developer Guide. - // - // deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue - // to which Amazon SQS moves messages after the value of maxReceiveCount - // is exceeded. - // - // maxReceiveCount - The number of times a message is delivered to the source - // queue before being moved to the dead-letter queue. When the ReceiveCount - // for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves - // the message to the dead-letter-queue. - // - // The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, - // the dead-letter queue of a standard queue must also be a standard queue. + // Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn + // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon + // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount + // - The number of times a message is delivered to the source queue before + // being moved to the dead-letter queue. When the ReceiveCount for a message + // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message + // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also + // be a FIFO queue. Similarly, the dead-letter queue of a standard queue + // must also be a standard queue. // // * VisibilityTimeout - The visibility timeout for the queue, in seconds. // Valid values: an integer from 0 to 43,200 (12 hours). Default: 30. For // more information about the visibility timeout, see Visibility Timeout - // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // - // The following attributes apply only to server-side-encryption (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): + // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // // * KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) - // for Amazon SQS or a custom CMK. For more information, see Key Terms (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). + // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, - // the alias of a custom CMK can, for example, be alias/MyAlias. For more - // examples, see KeyId (http://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) + // the alias of a custom CMK can, for example, be alias/MyAlias . For more + // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // // * KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which - // Amazon SQS can reuse a data key (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) + // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) // to encrypt or decrypt messages before calling AWS KMS again. An integer // representing seconds, between 60 seconds (1 minute) and 86,400 seconds // (24 hours). Default: 300 (5 minutes). A shorter time period provides better // security but results in more calls to KMS which might incur charges after // Free Tier. For more information, see How Does the Data Key Reuse Period - // Work? (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). - // + // Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). // // The following attribute applies only to FIFO (first-in-first-out) queues - // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // // * ContentBasedDeduplication - Enables content-based deduplication. For - // more information, see Exactly-Once Processing (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon Simple Queue Service Developer Guide. - // - // Every message must have a unique MessageDeduplicationId, - // - // You may provide a MessageDeduplicationId explicitly. - // - // If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication - // for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId - // using the body of the message (but not the attributes of the message). - // - // - // If you don't provide a MessageDeduplicationId and the queue doesn't have - // ContentBasedDeduplication set, the action fails with an error. - // - // If the queue has ContentBasedDeduplication set, your MessageDeduplicationId - // overrides the generated one. - // - // When ContentBasedDeduplication is in effect, messages with identical content - // sent within the deduplication interval are treated as duplicates and only - // one copy of the message is delivered. - // - // If you send one message with ContentBasedDeduplication enabled and then another - // message with a MessageDeduplicationId that is the same as the one generated - // for the first MessageDeduplicationId, the two messages are treated as - // duplicates and only one copy of the message is delivered. + // more information, see Exactly-Once Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) + // in the Amazon Simple Queue Service Developer Guide. Every message must + // have a unique MessageDeduplicationId, You may provide a MessageDeduplicationId + // explicitly. If you aren't able to provide a MessageDeduplicationId and + // you enable ContentBasedDeduplication for your queue, Amazon SQS uses a + // SHA-256 hash to generate the MessageDeduplicationId using the body of + // the message (but not the attributes of the message). If you don't provide + // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication + // set, the action fails with an error. If the queue has ContentBasedDeduplication + // set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication + // is in effect, messages with identical content sent within the deduplication + // interval are treated as duplicates and only one copy of the message is + // delivered. If you send one message with ContentBasedDeduplication enabled + // and then another message with a MessageDeduplicationId that is the same + // as the one generated for the first MessageDeduplicationId, the two messages + // are treated as duplicates and only one copy of the message is delivered. // // Attributes is a required field Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true" required:"true"` @@ -4855,6 +4988,14 @@ const ( // MessageSystemAttributeNameMessageGroupId is a MessageSystemAttributeName enum value MessageSystemAttributeNameMessageGroupId = "MessageGroupId" + + // MessageSystemAttributeNameAwstraceHeader is a MessageSystemAttributeName enum value + MessageSystemAttributeNameAwstraceHeader = "AWSTraceHeader" +) + +const ( + // MessageSystemAttributeNameForSendsAwstraceHeader is a MessageSystemAttributeNameForSends enum value + MessageSystemAttributeNameForSendsAwstraceHeader = "AWSTraceHeader" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go index 6f338035969..3a3f55f0980 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go @@ -10,11 +10,6 @@ // Amazon SQS moves data between distributed application components and helps // you decouple these components. // -// Standard queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/standard-queues.html) -// are available in all regions. FIFO queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) -// are available in the US East (N. Virginia), US East (Ohio), US West (Oregon), -// and EU (Ireland) regions. -// // You can use AWS SDKs (http://aws.amazon.com/tools/#sdk) to access Amazon // SQS using your favorite programming language. The SDKs perform tasks such // as the following automatically: @@ -29,20 +24,13 @@ // // * Amazon SQS Product Page (http://aws.amazon.com/sqs/) // -// * Amazon Simple Queue Service Developer Guide -// -// Making API Requests (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html) -// -// Amazon SQS Message Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) -// -// Amazon SQS Dead-Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// -// * Amazon SQS in the (http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html)AWS -// CLI Command Reference +// * Amazon Simple Queue Service Developer Guide Making API Requests (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html) +// Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) +// Amazon SQS Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // -// * Amazon Web Services General Reference +// * Amazon SQS in the AWS CLI Command Reference (http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html) // -// Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) +// * Amazon Web Services General Reference Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) // // See https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go index d463ecf0ddb..7bac89c4ae9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go @@ -46,11 +46,11 @@ const ( // svc := sqs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SQS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SQS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SQS { svc := &SQS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-11-05", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go index f01c8d537df..767f80602d2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go @@ -59,8 +59,8 @@ func (c *SSM) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *requ // AddTagsToResource API operation for Amazon Simple Systems Manager (SSM). // // Adds or overwrites one or more tags for the specified resource. Tags are -// metadata that you can assign to your documents, managed instances, Maintenance -// Windows, Parameter Store parameters, and patch baselines. Tags enable you +// metadata that you can assign to your documents, managed instances, maintenance +// windows, Parameter Store parameters, and patch baselines. Tags enable you // to categorize your resources in different ways, for example, by purpose, // owner, or environment. Each tag consists of a key and an optional value, // both of which you define. For example, you could define a set of tags for @@ -194,12 +194,9 @@ func (c *SSM) CancelCommandRequest(input *CancelCommandInput) (req *request.Requ // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -273,7 +270,7 @@ func (c *SSM) CancelMaintenanceWindowExecutionRequest(input *CancelMaintenanceWi // CancelMaintenanceWindowExecution API operation for Amazon Simple Systems Manager (SSM). // -// Stops a Maintenance Window execution that is already in progress and cancels +// Stops a maintenance window execution that is already in progress and cancels // any tasks in the window that have not already starting running. (Tasks already // in progress will continue to completion.) // @@ -289,8 +286,8 @@ func (c *SSM) CancelMaintenanceWindowExecutionRequest(input *CancelMaintenanceWi // An error occurred on the server side. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -364,8 +361,8 @@ func (c *SSM) CreateActivationRequest(input *CreateActivationInput) (req *reques // Registers your on-premises server or virtual machine with Amazon EC2 so that // you can manage these resources using Run Command. An on-premises server or // virtual machine that has been registered with EC2 is called a managed instance. -// For more information about activations, see Setting Up Systems Manager in -// Hybrid Environments (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html). +// For more information about activations, see Setting Up AWS Systems Manager +// for Hybrid Environments (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -482,12 +479,9 @@ func (c *SSM) CreateAssociationRequest(input *CreateAssociationInput) (req *requ // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -609,12 +603,9 @@ func (c *SSM) CreateAssociationBatchRequest(input *CreateAssociationBatchInput) // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -736,7 +727,7 @@ func (c *SSM) CreateDocumentRequest(input *CreateDocumentInput) (req *request.Re // The content for the document is not valid. // // * ErrCodeDocumentLimitExceeded "DocumentLimitExceeded" -// You can have at most 200 active Systems Manager documents. +// You can have at most 500 active Systems Manager documents. // // * ErrCodeInvalidDocumentSchemaVersion "InvalidDocumentSchemaVersion" // The version of the document schema is not supported. @@ -807,7 +798,14 @@ func (c *SSM) CreateMaintenanceWindowRequest(input *CreateMaintenanceWindowInput // CreateMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Creates a new Maintenance Window. +// Creates a new maintenance window. +// +// The value you specify for Duration determines the specific end time for the +// maintenance window based on the time it begins. No maintenance window tasks +// are permitted to start after the resulting endtime minus the number of hours +// you specify for Cutoff. For example, if the maintenance window starts at +// 3 PM, the duration is three hours, and the value you specify for Cutoff is +// one hour, no maintenance window tasks can start after 5 PM. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -823,7 +821,7 @@ func (c *SSM) CreateMaintenanceWindowRequest(input *CreateMaintenanceWindowInput // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -853,6 +851,105 @@ func (c *SSM) CreateMaintenanceWindowWithContext(ctx aws.Context, input *CreateM return out, req.Send() } +const opCreateOpsItem = "CreateOpsItem" + +// CreateOpsItemRequest generates a "aws/request.Request" representing the +// client's request for the CreateOpsItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateOpsItem for more information on using the CreateOpsItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateOpsItemRequest method. +// req, resp := client.CreateOpsItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateOpsItem +func (c *SSM) CreateOpsItemRequest(input *CreateOpsItemInput) (req *request.Request, output *CreateOpsItemOutput) { + op := &request.Operation{ + Name: opCreateOpsItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOpsItemInput{} + } + + output = &CreateOpsItemOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateOpsItem API operation for Amazon Simple Systems Manager (SSM). +// +// Creates a new OpsItem. You must have permission in AWS Identity and Access +// Management (IAM) to create a new OpsItem. For more information, see Getting +// Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// in the AWS Systems Manager User Guide. +// +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CreateOpsItem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// * ErrCodeOpsItemAlreadyExistsException "OpsItemAlreadyExistsException" +// The OpsItem already exists. +// +// * ErrCodeOpsItemLimitExceededException "OpsItemLimitExceededException" +// The request caused OpsItems to exceed one or more limits. For information +// about OpsItem limits, see What are the resource limits for OpsCenter? (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). +// +// * ErrCodeOpsItemInvalidParameterException "OpsItemInvalidParameterException" +// A specified parameter argument isn't valid. Verify the available arguments +// and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateOpsItem +func (c *SSM) CreateOpsItem(input *CreateOpsItemInput) (*CreateOpsItemOutput, error) { + req, out := c.CreateOpsItemRequest(input) + return out, req.Send() +} + +// CreateOpsItemWithContext is the same as CreateOpsItem with the addition of +// the ability to pass a context and additional request options. +// +// See CreateOpsItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CreateOpsItemWithContext(ctx aws.Context, input *CreateOpsItemInput, opts ...request.Option) (*CreateOpsItemOutput, error) { + req, out := c.CreateOpsItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreatePatchBaseline = "CreatePatchBaseline" // CreatePatchBaselineRequest generates a "aws/request.Request" representing the @@ -916,7 +1013,7 @@ func (c *SSM) CreatePatchBaselineRequest(input *CreatePatchBaselineInput) (req * // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -1214,12 +1311,9 @@ func (c *SSM) DeleteAssociationRequest(input *DeleteAssociationInput) (req *requ // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -1484,7 +1578,7 @@ func (c *SSM) DeleteMaintenanceWindowRequest(input *DeleteMaintenanceWindowInput // DeleteMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Deletes a Maintenance Window. +// Deletes a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1646,8 +1740,7 @@ func (c *SSM) DeleteParametersRequest(input *DeleteParametersInput) (req *reques // DeleteParameters API operation for Amazon Simple Systems Manager (SSM). // -// Delete a list of parameters. This API is used to delete parameters by using -// the Amazon EC2 console. +// Delete a list of parameters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1913,12 +2006,9 @@ func (c *SSM) DeregisterManagedInstanceRequest(input *DeregisterManagedInstanceI // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -2075,7 +2165,7 @@ func (c *SSM) DeregisterTargetFromMaintenanceWindowRequest(input *DeregisterTarg // DeregisterTargetFromMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Removes a target from a Maintenance Window. +// Removes a target from a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2086,8 +2176,8 @@ func (c *SSM) DeregisterTargetFromMaintenanceWindowRequest(input *DeregisterTarg // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -2165,7 +2255,7 @@ func (c *SSM) DeregisterTaskFromMaintenanceWindowRequest(input *DeregisterTaskFr // DeregisterTaskFromMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Removes a task from a Maintenance Window. +// Removes a task from a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2176,8 +2266,8 @@ func (c *SSM) DeregisterTaskFromMaintenanceWindowRequest(input *DeregisterTaskFr // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -2257,9 +2347,9 @@ func (c *SSM) DescribeActivationsRequest(input *DescribeActivationsInput) (req * // DescribeActivations API operation for Amazon Simple Systems Manager (SSM). // -// Details about the activation, including: the date and time the activation -// was created, the expiration date, the IAM role assigned to the instances -// in the activation, and the number of instances activated by this registration. +// Describes details about the activation, such as the date and time the activation +// was created, its expiration date, the IAM role assigned to the instances +// in the activation, and the number of instances registered by using this activation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2312,7 +2402,7 @@ func (c *SSM) DescribeActivationsWithContext(ctx aws.Context, input *DescribeAct // // Example iterating over at most 3 pages of a DescribeActivations operation. // pageNum := 0 // err := client.DescribeActivationsPages(params, -// func(page *DescribeActivationsOutput, lastPage bool) bool { +// func(page *ssm.DescribeActivationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2344,10 +2434,12 @@ func (c *SSM) DescribeActivationsPagesWithContext(ctx aws.Context, input *Descri }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeActivationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeActivationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2428,12 +2520,9 @@ func (c *SSM) DescribeAssociationRequest(input *DescribeAssociationInput) (req * // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -2859,7 +2948,7 @@ func (c *SSM) DescribeAvailablePatchesRequest(input *DescribeAvailablePatchesInp // DescribeAvailablePatches API operation for Amazon Simple Systems Manager (SSM). // -// Lists all patches that could possibly be included in a patch baseline. +// Lists all patches eligible to be included in a patch baseline. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3129,12 +3218,9 @@ func (c *SSM) DescribeEffectiveInstanceAssociationsRequest(input *DescribeEffect // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -3225,8 +3311,8 @@ func (c *SSM) DescribeEffectivePatchesForPatchBaselineRequest(input *DescribeEff // try again. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -3323,12 +3409,9 @@ func (c *SSM) DescribeInstanceAssociationsStatusRequest(input *DescribeInstanceA // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -3435,12 +3518,9 @@ func (c *SSM) DescribeInstanceInformationRequest(input *DescribeInstanceInformat // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -3487,7 +3567,7 @@ func (c *SSM) DescribeInstanceInformationWithContext(ctx aws.Context, input *Des // // Example iterating over at most 3 pages of a DescribeInstanceInformation operation. // pageNum := 0 // err := client.DescribeInstanceInformationPages(params, -// func(page *DescribeInstanceInformationOutput, lastPage bool) bool { +// func(page *ssm.DescribeInstanceInformationOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3519,10 +3599,12 @@ func (c *SSM) DescribeInstanceInformationPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeInstanceInformationOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeInstanceInformationOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3758,12 +3840,9 @@ func (c *SSM) DescribeInstancePatchesRequest(input *DescribeInstancePatchesInput // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -3855,7 +3934,7 @@ func (c *SSM) DescribeInventoryDeletionsRequest(input *DescribeInventoryDeletion // An error occurred on the server side. // // * ErrCodeInvalidDeletionIdException "InvalidDeletionIdException" -// The ID specified for the delete operation does not exist or is not valide. +// The ID specified for the delete operation does not exist or is not valid. // Verify the ID and try again. // // * ErrCodeInvalidNextToken "InvalidNextToken" @@ -3928,7 +4007,7 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input *De // DescribeMaintenanceWindowExecutionTaskInvocations API operation for Amazon Simple Systems Manager (SSM). // // Retrieves the individual task executions (one per target) for a particular -// task run as part of a Maintenance Window execution. +// task run as part of a maintenance window execution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3939,8 +4018,8 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input *De // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4014,7 +4093,7 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTasksRequest(input *DescribeMain // DescribeMaintenanceWindowExecutionTasks API operation for Amazon Simple Systems Manager (SSM). // -// For a given Maintenance Window execution, lists the tasks that were run. +// For a given maintenance window execution, lists the tasks that were run. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4025,8 +4104,8 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTasksRequest(input *DescribeMain // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4100,9 +4179,9 @@ func (c *SSM) DescribeMaintenanceWindowExecutionsRequest(input *DescribeMaintena // DescribeMaintenanceWindowExecutions API operation for Amazon Simple Systems Manager (SSM). // -// Lists the executions of a Maintenance Window. This includes information about -// when the Maintenance Window was scheduled to be active, and information about -// tasks registered and run with the Maintenance Window. +// Lists the executions of a maintenance window. This includes information about +// when the maintenance window was scheduled to be active, and information about +// tasks registered and run with the maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4181,7 +4260,7 @@ func (c *SSM) DescribeMaintenanceWindowScheduleRequest(input *DescribeMaintenanc // DescribeMaintenanceWindowSchedule API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves information about upcoming executions of a Maintenance Window. +// Retrieves information about upcoming executions of a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4195,8 +4274,8 @@ func (c *SSM) DescribeMaintenanceWindowScheduleRequest(input *DescribeMaintenanc // An error occurred on the server side. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4267,7 +4346,7 @@ func (c *SSM) DescribeMaintenanceWindowTargetsRequest(input *DescribeMaintenance // DescribeMaintenanceWindowTargets API operation for Amazon Simple Systems Manager (SSM). // -// Lists the targets registered with the Maintenance Window. +// Lists the targets registered with the maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4278,8 +4357,8 @@ func (c *SSM) DescribeMaintenanceWindowTargetsRequest(input *DescribeMaintenance // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4353,7 +4432,7 @@ func (c *SSM) DescribeMaintenanceWindowTasksRequest(input *DescribeMaintenanceWi // DescribeMaintenanceWindowTasks API operation for Amazon Simple Systems Manager (SSM). // -// Lists the tasks in a Maintenance Window. +// Lists the tasks in a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4364,8 +4443,8 @@ func (c *SSM) DescribeMaintenanceWindowTasksRequest(input *DescribeMaintenanceWi // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4439,7 +4518,7 @@ func (c *SSM) DescribeMaintenanceWindowsRequest(input *DescribeMaintenanceWindow // DescribeMaintenanceWindows API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves the Maintenance Windows in an AWS account. +// Retrieves the maintenance windows in an AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4518,7 +4597,7 @@ func (c *SSM) DescribeMaintenanceWindowsForTargetRequest(input *DescribeMaintena // DescribeMaintenanceWindowsForTarget API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves information about the Maintenance Windows targets or tasks that +// Retrieves information about the maintenance window targets or tasks that // an instance is associated with. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4554,6 +4633,94 @@ func (c *SSM) DescribeMaintenanceWindowsForTargetWithContext(ctx aws.Context, in return out, req.Send() } +const opDescribeOpsItems = "DescribeOpsItems" + +// DescribeOpsItemsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOpsItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeOpsItems for more information on using the DescribeOpsItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeOpsItemsRequest method. +// req, resp := client.DescribeOpsItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeOpsItems +func (c *SSM) DescribeOpsItemsRequest(input *DescribeOpsItemsInput) (req *request.Request, output *DescribeOpsItemsOutput) { + op := &request.Operation{ + Name: opDescribeOpsItems, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeOpsItemsInput{} + } + + output = &DescribeOpsItemsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeOpsItems API operation for Amazon Simple Systems Manager (SSM). +// +// Query a set of OpsItems. You must have permission in AWS Identity and Access +// Management (IAM) to query a list of OpsItems. For more information, see Getting +// Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// in the AWS Systems Manager User Guide. +// +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeOpsItems for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeOpsItems +func (c *SSM) DescribeOpsItems(input *DescribeOpsItemsInput) (*DescribeOpsItemsOutput, error) { + req, out := c.DescribeOpsItemsRequest(input) + return out, req.Send() +} + +// DescribeOpsItemsWithContext is the same as DescribeOpsItems with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeOpsItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeOpsItemsWithContext(ctx aws.Context, input *DescribeOpsItemsInput, opts ...request.Option) (*DescribeOpsItemsOutput, error) { + req, out := c.DescribeOpsItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeParameters = "DescribeParameters" // DescribeParametersRequest generates a "aws/request.Request" representing the @@ -4671,7 +4838,7 @@ func (c *SSM) DescribeParametersWithContext(ctx aws.Context, input *DescribePara // // Example iterating over at most 3 pages of a DescribeParameters operation. // pageNum := 0 // err := client.DescribeParametersPages(params, -// func(page *DescribeParametersOutput, lastPage bool) bool { +// func(page *ssm.DescribeParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4703,10 +4870,12 @@ func (c *SSM) DescribeParametersPagesWithContext(ctx aws.Context, input *Describ }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeParametersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeParametersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5003,19 +5172,33 @@ func (c *SSM) DescribePatchPropertiesRequest(input *DescribePatchPropertiesInput // The following section lists the properties that can be used in filters for // each major operating system type: // -// WINDOWSValid properties: PRODUCT, PRODUCT_FAMILY, CLASSIFICATION, MSRC_SEVERITY +// WINDOWS +// +// Valid properties: PRODUCT, PRODUCT_FAMILY, CLASSIFICATION, MSRC_SEVERITY +// +// AMAZON_LINUX +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// +// AMAZON_LINUX_2 +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY // -// AMAZON_LINUXValid properties: PRODUCT, CLASSIFICATION, SEVERITY +// UBUNTU // -// AMAZON_LINUX_2Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// Valid properties: PRODUCT, PRIORITY // -// UBUNTU Valid properties: PRODUCT, PRIORITY +// REDHAT_ENTERPRISE_LINUX // -// REDHAT_ENTERPRISE_LINUXValid properties: PRODUCT, CLASSIFICATION, SEVERITY +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY // -// SUSEValid properties: PRODUCT, CLASSIFICATION, SEVERITY +// SUSE // -// CENTOSValid properties: PRODUCT, CLASSIFICATION, SEVERITY +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// +// CENTOS +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5284,12 +5467,9 @@ func (c *SSM) GetCommandInvocationRequest(input *GetCommandInvocationInput) (req // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -5299,7 +5479,7 @@ func (c *SSM) GetCommandInvocationRequest(input *GetCommandInvocationInput) (req // // * ErrCodeInvocationDoesNotExist "InvocationDoesNotExist" // The command ID and instance ID you specified did not match any invocations. -// Verify the command ID adn the instance ID and try again. +// Verify the command ID and the instance ID and try again. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetCommandInvocation func (c *SSM) GetCommandInvocation(input *GetCommandInvocationInput) (*GetCommandInvocationOutput, error) { @@ -5551,6 +5731,11 @@ func (c *SSM) GetDeployablePatchSnapshotForInstanceRequest(input *GetDeployableP // Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu. // // * ErrCodeUnsupportedFeatureRequiredException "UnsupportedFeatureRequiredException" +// Microsoft application patching is only available on EC2 instances and Advanced +// Instances. To patch Microsoft applications on on-premises servers and VMs, +// you must enable Advanced Instances. For more information, see Using the Advanced-Instances +// Tier (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) +// in the AWS Systems Manager User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDeployablePatchSnapshotForInstance func (c *SSM) GetDeployablePatchSnapshotForInstance(input *GetDeployablePatchSnapshotForInstanceInput) (*GetDeployablePatchSnapshotForInstanceOutput, error) { @@ -5888,7 +6073,7 @@ func (c *SSM) GetMaintenanceWindowRequest(input *GetMaintenanceWindowInput) (req // GetMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves a Maintenance Window. +// Retrieves a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5899,8 +6084,8 @@ func (c *SSM) GetMaintenanceWindowRequest(input *GetMaintenanceWindowInput) (req // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -5974,8 +6159,7 @@ func (c *SSM) GetMaintenanceWindowExecutionRequest(input *GetMaintenanceWindowEx // GetMaintenanceWindowExecution API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves details about a specific task run as part of a Maintenance Window -// execution. +// Retrieves details about a specific a maintenance window execution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5986,8 +6170,8 @@ func (c *SSM) GetMaintenanceWindowExecutionRequest(input *GetMaintenanceWindowEx // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -6061,8 +6245,8 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskRequest(input *GetMaintenanceWind // GetMaintenanceWindowExecutionTask API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves the details about a specific task run as part of a Maintenance -// Window execution. +// Retrieves the details about a specific task run as part of a maintenance +// window execution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6073,8 +6257,8 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskRequest(input *GetMaintenanceWind // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -6148,8 +6332,7 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskInvocationRequest(input *GetMaint // GetMaintenanceWindowExecutionTaskInvocation API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves a task invocation. A task invocation is a specific task running -// on a specific target. Maintenance Windows report status for all invocations. +// Retrieves information about a specific task running on a specific target. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6160,8 +6343,8 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskInvocationRequest(input *GetMaint // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -6235,7 +6418,7 @@ func (c *SSM) GetMaintenanceWindowTaskRequest(input *GetMaintenanceWindowTaskInp // GetMaintenanceWindowTask API operation for Amazon Simple Systems Manager (SSM). // -// Lists the tasks in a Maintenance Window. +// Lists the tasks in a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6246,8 +6429,8 @@ func (c *SSM) GetMaintenanceWindowTaskRequest(input *GetMaintenanceWindowTaskInp // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -6277,6 +6460,190 @@ func (c *SSM) GetMaintenanceWindowTaskWithContext(ctx aws.Context, input *GetMai return out, req.Send() } +const opGetOpsItem = "GetOpsItem" + +// GetOpsItemRequest generates a "aws/request.Request" representing the +// client's request for the GetOpsItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetOpsItem for more information on using the GetOpsItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetOpsItemRequest method. +// req, resp := client.GetOpsItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetOpsItem +func (c *SSM) GetOpsItemRequest(input *GetOpsItemInput) (req *request.Request, output *GetOpsItemOutput) { + op := &request.Operation{ + Name: opGetOpsItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpsItemInput{} + } + + output = &GetOpsItemOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOpsItem API operation for Amazon Simple Systems Manager (SSM). +// +// Get information about an OpsItem by using the ID. You must have permission +// in AWS Identity and Access Management (IAM) to view information about an +// OpsItem. For more information, see Getting Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// in the AWS Systems Manager User Guide. +// +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetOpsItem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// * ErrCodeOpsItemNotFoundException "OpsItemNotFoundException" +// The specified OpsItem ID doesn't exist. Verify the ID and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetOpsItem +func (c *SSM) GetOpsItem(input *GetOpsItemInput) (*GetOpsItemOutput, error) { + req, out := c.GetOpsItemRequest(input) + return out, req.Send() +} + +// GetOpsItemWithContext is the same as GetOpsItem with the addition of +// the ability to pass a context and additional request options. +// +// See GetOpsItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetOpsItemWithContext(ctx aws.Context, input *GetOpsItemInput, opts ...request.Option) (*GetOpsItemOutput, error) { + req, out := c.GetOpsItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetOpsSummary = "GetOpsSummary" + +// GetOpsSummaryRequest generates a "aws/request.Request" representing the +// client's request for the GetOpsSummary operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetOpsSummary for more information on using the GetOpsSummary +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetOpsSummaryRequest method. +// req, resp := client.GetOpsSummaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetOpsSummary +func (c *SSM) GetOpsSummaryRequest(input *GetOpsSummaryInput) (req *request.Request, output *GetOpsSummaryOutput) { + op := &request.Operation{ + Name: opGetOpsSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpsSummaryInput{} + } + + output = &GetOpsSummaryOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOpsSummary API operation for Amazon Simple Systems Manager (SSM). +// +// View a summary of OpsItems based on specified filters and aggregators. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetOpsSummary for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// * ErrCodeInvalidFilter "InvalidFilter" +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * ErrCodeInvalidNextToken "InvalidNextToken" +// The specified token is not valid. +// +// * ErrCodeInvalidTypeNameException "InvalidTypeNameException" +// The parameter type name is not valid. +// +// * ErrCodeInvalidAggregatorException "InvalidAggregatorException" +// The specified aggregator is not valid for inventory groups. Verify that the +// aggregator uses a valid inventory type such as AWS:Application or AWS:InstanceInformation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetOpsSummary +func (c *SSM) GetOpsSummary(input *GetOpsSummaryInput) (*GetOpsSummaryOutput, error) { + req, out := c.GetOpsSummaryRequest(input) + return out, req.Send() +} + +// GetOpsSummaryWithContext is the same as GetOpsSummary with the addition of +// the ability to pass a context and additional request options. +// +// See GetOpsSummary for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetOpsSummaryWithContext(ctx aws.Context, input *GetOpsSummaryInput, opts ...request.Option) (*GetOpsSummaryOutput, error) { + req, out := c.GetOpsSummaryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetParameter = "GetParameter" // GetParameterRequest generates a "aws/request.Request" representing the @@ -6472,7 +6839,7 @@ func (c *SSM) GetParameterHistoryWithContext(ctx aws.Context, input *GetParamete // // Example iterating over at most 3 pages of a GetParameterHistory operation. // pageNum := 0 // err := client.GetParameterHistoryPages(params, -// func(page *GetParameterHistoryOutput, lastPage bool) bool { +// func(page *ssm.GetParameterHistoryOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6504,10 +6871,12 @@ func (c *SSM) GetParameterHistoryPagesWithContext(ctx aws.Context, input *GetPar }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetParameterHistoryOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetParameterHistoryOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6718,7 +7087,7 @@ func (c *SSM) GetParametersByPathWithContext(ctx aws.Context, input *GetParamete // // Example iterating over at most 3 pages of a GetParametersByPath operation. // pageNum := 0 // err := client.GetParametersByPathPages(params, -// func(page *GetParametersByPathOutput, lastPage bool) bool { +// func(page *ssm.GetParametersByPathOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -6750,10 +7119,12 @@ func (c *SSM) GetParametersByPathPagesWithContext(ctx aws.Context, input *GetPar }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetParametersByPathOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetParametersByPathOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -6812,8 +7183,8 @@ func (c *SSM) GetPatchBaselineRequest(input *GetPatchBaselineInput) (req *reques // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -7329,7 +7700,7 @@ func (c *SSM) ListAssociationsWithContext(ctx aws.Context, input *ListAssociatio // // Example iterating over at most 3 pages of a ListAssociations operation. // pageNum := 0 // err := client.ListAssociationsPages(params, -// func(page *ListAssociationsOutput, lastPage bool) bool { +// func(page *ssm.ListAssociationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7361,10 +7732,12 @@ func (c *SSM) ListAssociationsPagesWithContext(ctx aws.Context, input *ListAssoc }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListAssociationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListAssociationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7442,12 +7815,9 @@ func (c *SSM) ListCommandInvocationsRequest(input *ListCommandInvocationsInput) // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -7491,7 +7861,7 @@ func (c *SSM) ListCommandInvocationsWithContext(ctx aws.Context, input *ListComm // // Example iterating over at most 3 pages of a ListCommandInvocations operation. // pageNum := 0 // err := client.ListCommandInvocationsPages(params, -// func(page *ListCommandInvocationsOutput, lastPage bool) bool { +// func(page *ssm.ListCommandInvocationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7523,10 +7893,12 @@ func (c *SSM) ListCommandInvocationsPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListCommandInvocationsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListCommandInvocationsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -7600,12 +7972,9 @@ func (c *SSM) ListCommandsRequest(input *ListCommandsInput) (req *request.Reques // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -7649,7 +8018,7 @@ func (c *SSM) ListCommandsWithContext(ctx aws.Context, input *ListCommandsInput, // // Example iterating over at most 3 pages of a ListCommands operation. // pageNum := 0 // err := client.ListCommandsPages(params, -// func(page *ListCommandsOutput, lastPage bool) bool { +// func(page *ssm.ListCommandsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -7681,10 +8050,12 @@ func (c *SSM) ListCommandsPagesWithContext(ctx aws.Context, input *ListCommandsI }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListCommandsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListCommandsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8060,7 +8431,7 @@ func (c *SSM) ListDocumentsWithContext(ctx aws.Context, input *ListDocumentsInpu // // Example iterating over at most 3 pages of a ListDocuments operation. // pageNum := 0 // err := client.ListDocumentsPages(params, -// func(page *ListDocumentsOutput, lastPage bool) bool { +// func(page *ssm.ListDocumentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -8092,10 +8463,12 @@ func (c *SSM) ListDocumentsPagesWithContext(ctx aws.Context, input *ListDocument }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDocumentsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDocumentsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -8161,12 +8534,9 @@ func (c *SSM) ListInventoryEntriesRequest(input *ListInventoryEntriesInput) (req // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -8543,7 +8913,7 @@ func (c *SSM) ModifyDocumentPermissionRequest(input *ModifyDocumentPermissionInp // documents. If you need to increase this limit, contact AWS Support. // // * ErrCodeDocumentLimitExceeded "DocumentLimitExceeded" -// You can have at most 200 active Systems Manager documents. +// You can have at most 500 active Systems Manager documents. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ModifyDocumentPermission func (c *SSM) ModifyDocumentPermission(input *ModifyDocumentPermissionInput) (*ModifyDocumentPermissionOutput, error) { @@ -8773,12 +9143,9 @@ func (c *SSM) PutInventoryRequest(input *PutInventoryInput) (req *request.Reques // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -9016,7 +9383,12 @@ func (c *SSM) RegisterDefaultPatchBaselineRequest(input *RegisterDefaultPatchBas // RegisterDefaultPatchBaseline API operation for Amazon Simple Systems Manager (SSM). // -// Defines the default patch baseline. +// Defines the default patch baseline for the relevant operating system. +// +// To reset the AWS predefined patch baseline as the default, specify the full +// patch baseline ARN as the baseline ID value. For example, for CentOS, specify +// arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0574b43a65ea646ed instead +// of pb-0574b43a65ea646ed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9031,8 +9403,8 @@ func (c *SSM) RegisterDefaultPatchBaselineRequest(input *RegisterDefaultPatchBas // try again. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9121,8 +9493,8 @@ func (c *SSM) RegisterPatchBaselineForPatchGroupRequest(input *RegisterPatchBase // baseline that is already registered with a different patch baseline. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9133,7 +9505,7 @@ func (c *SSM) RegisterPatchBaselineForPatchGroupRequest(input *RegisterPatchBase // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9207,7 +9579,7 @@ func (c *SSM) RegisterTargetWithMaintenanceWindowRequest(input *RegisterTargetWi // RegisterTargetWithMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Registers a target with a Maintenance Window. +// Registers a target with a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9222,15 +9594,15 @@ func (c *SSM) RegisterTargetWithMaintenanceWindowRequest(input *RegisterTargetWi // don't match the original call to the API with the same idempotency token. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9304,7 +9676,7 @@ func (c *SSM) RegisterTaskWithMaintenanceWindowRequest(input *RegisterTaskWithMa // RegisterTaskWithMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Adds a new task to a Maintenance Window. +// Adds a new task to a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9319,21 +9691,21 @@ func (c *SSM) RegisterTaskWithMaintenanceWindowRequest(input *RegisterTaskWithMa // don't match the original call to the API with the same idempotency token. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeFeatureNotAvailableException "FeatureNotAvailableException" -// You attempted to register a LAMBDA or STEP_FUNCTION task in a region where +// You attempted to register a LAMBDA or STEP_FUNCTIONS task in a region where // the corresponding service is not available. // // * ErrCodeInternalServerError "InternalServerError" @@ -9406,7 +9778,7 @@ func (c *SSM) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) // RemoveTagsFromResource API operation for Amazon Simple Systems Manager (SSM). // -// Removes all tags from the specified resource. +// Removes tag keys from the specified resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9614,8 +9986,8 @@ func (c *SSM) ResumeSessionRequest(input *ResumeSessionInput) (req *request.Requ // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9802,12 +10174,9 @@ func (c *SSM) SendCommandRequest(input *SendCommandInput) (req *request.Request, // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -10102,10 +10471,12 @@ func (c *SSM) StartSessionRequest(input *StartSessionInput) (req *request.Reques // // AWS CLI usage: start-session is an interactive command that requires the // Session Manager plugin to be installed on the client machine making the call. -// For information, see Install the Session Manager Plugin for the AWS CLI -// (http://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) +// For information, see Install the Session Manager Plugin for the AWS CLI (http://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) // in the AWS Systems Manager User Guide. // +// AWS Tools for PowerShell usage: Start-SSMSession is not currently supported +// by AWS Tools for PowerShell on Windows local machines. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -10292,8 +10663,8 @@ func (c *SSM) TerminateSessionRequest(input *TerminateSessionInput) (req *reques // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -10370,6 +10741,13 @@ func (c *SSM) UpdateAssociationRequest(input *UpdateAssociationInput) (req *requ // Updates an association. You can update the association name and version, // the document version, schedule, parameters, and Amazon S3 output. // +// In order to call this API action, your IAM user account, group, or role must +// be configured with permission to call the DescribeAssociation API action. +// If you don't have permission to call DescribeAssociation, then you receive +// the following error: An error occurred (AccessDeniedException) when calling +// the UpdateAssociation operation: User: is not authorized to perform: +// ssm:DescribeAssociation on resource: +// // When you update an association, the association immediately runs against // the specified targets. // @@ -10509,12 +10887,9 @@ func (c *SSM) UpdateAssociationStatusRequest(input *UpdateAssociationStatusInput // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -10598,7 +10973,7 @@ func (c *SSM) UpdateDocumentRequest(input *UpdateDocumentInput) (req *request.Re // UpdateDocument API operation for Amazon Simple Systems Manager (SSM). // -// The document you want to update. +// Updates one or more values for an SSM document. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10796,7 +11171,14 @@ func (c *SSM) UpdateMaintenanceWindowRequest(input *UpdateMaintenanceWindowInput // UpdateMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Updates an existing Maintenance Window. Only specified parameters are modified. +// Updates an existing maintenance window. Only specified parameters are modified. +// +// The value you specify for Duration determines the specific end time for the +// maintenance window based on the time it begins. No maintenance window tasks +// are permitted to start after the resulting endtime minus the number of hours +// you specify for Cutoff. For example, if the maintenance window starts at +// 3 PM, the duration is three hours, and the value you specify for Cutoff is +// one hour, no maintenance window tasks can start after 5 PM. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10807,8 +11189,8 @@ func (c *SSM) UpdateMaintenanceWindowRequest(input *UpdateMaintenanceWindowInput // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -10882,21 +11264,22 @@ func (c *SSM) UpdateMaintenanceWindowTargetRequest(input *UpdateMaintenanceWindo // UpdateMaintenanceWindowTarget API operation for Amazon Simple Systems Manager (SSM). // -// Modifies the target of an existing Maintenance Window. You can't change the -// target type, but you can change the following: +// Modifies the target of an existing maintenance window. You can change the +// following: // -// The target from being an ID target to a Tag target, or a Tag target to an -// ID target. +// * Name // -// IDs for an ID target. +// * Description // -// Tags for a Tag target. +// * Owner // -// Owner. +// * IDs for an ID target // -// Name. +// * Tags for a Tag target // -// Description. +// * From any supported tag type to another. The three supported tag types +// are ID target, Tag target, and resource group. For more information, see +// Target. // // If a parameter is null, then the corresponding field is not modified. // @@ -10909,8 +11292,8 @@ func (c *SSM) UpdateMaintenanceWindowTargetRequest(input *UpdateMaintenanceWindo // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -10984,7 +11367,7 @@ func (c *SSM) UpdateMaintenanceWindowTaskRequest(input *UpdateMaintenanceWindowT // UpdateMaintenanceWindowTask API operation for Amazon Simple Systems Manager (SSM). // -// Modifies a task assigned to a Maintenance Window. You can't change the task +// Modifies a task assigned to a maintenance window. You can't change the task // type, but you can change the following values: // // * TaskARN. For example, you can change a RUN_COMMAND task from AWS-RunPowerShellScript @@ -11014,8 +11397,8 @@ func (c *SSM) UpdateMaintenanceWindowTaskRequest(input *UpdateMaintenanceWindowT // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -11090,7 +11473,7 @@ func (c *SSM) UpdateManagedInstanceRoleRequest(input *UpdateManagedInstanceRoleI // UpdateManagedInstanceRole API operation for Amazon Simple Systems Manager (SSM). // -// Assigns or changes an Amazon Identity and Access Management (IAM) role to +// Assigns or changes an Amazon Identity and Access Management (IAM) role for // the managed instance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -11106,12 +11489,9 @@ func (c *SSM) UpdateManagedInstanceRoleRequest(input *UpdateManagedInstanceRoleI // // You do not have permission to access the instance. // -// SSM Agent is not running. On managed instances and Linux instances, verify -// that the SSM Agent is running. On EC2 Windows instances, verify that the -// EC2Config service is running. +// SSM Agent is not running. Verify that SSM Agent is running. // -// SSM Agent or EC2Config service is not registered to the SSM endpoint. Try -// reinstalling SSM Agent or EC2Config service. +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -11141,6 +11521,109 @@ func (c *SSM) UpdateManagedInstanceRoleWithContext(ctx aws.Context, input *Updat return out, req.Send() } +const opUpdateOpsItem = "UpdateOpsItem" + +// UpdateOpsItemRequest generates a "aws/request.Request" representing the +// client's request for the UpdateOpsItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateOpsItem for more information on using the UpdateOpsItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateOpsItemRequest method. +// req, resp := client.UpdateOpsItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateOpsItem +func (c *SSM) UpdateOpsItemRequest(input *UpdateOpsItemInput) (req *request.Request, output *UpdateOpsItemOutput) { + op := &request.Operation{ + Name: opUpdateOpsItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateOpsItemInput{} + } + + output = &UpdateOpsItemOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateOpsItem API operation for Amazon Simple Systems Manager (SSM). +// +// Edit or change an OpsItem. You must have permission in AWS Identity and Access +// Management (IAM) to update an OpsItem. For more information, see Getting +// Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// in the AWS Systems Manager User Guide. +// +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateOpsItem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// * ErrCodeOpsItemNotFoundException "OpsItemNotFoundException" +// The specified OpsItem ID doesn't exist. Verify the ID and try again. +// +// * ErrCodeOpsItemAlreadyExistsException "OpsItemAlreadyExistsException" +// The OpsItem already exists. +// +// * ErrCodeOpsItemLimitExceededException "OpsItemLimitExceededException" +// The request caused OpsItems to exceed one or more limits. For information +// about OpsItem limits, see What are the resource limits for OpsCenter? (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). +// +// * ErrCodeOpsItemInvalidParameterException "OpsItemInvalidParameterException" +// A specified parameter argument isn't valid. Verify the available arguments +// and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateOpsItem +func (c *SSM) UpdateOpsItem(input *UpdateOpsItemInput) (*UpdateOpsItemOutput, error) { + req, out := c.UpdateOpsItemRequest(input) + return out, req.Send() +} + +// UpdateOpsItemWithContext is the same as UpdateOpsItem with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateOpsItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateOpsItemWithContext(ctx aws.Context, input *UpdateOpsItemInput, opts ...request.Option) (*UpdateOpsItemOutput, error) { + req, out := c.UpdateOpsItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdatePatchBaseline = "UpdatePatchBaseline" // UpdatePatchBaselineRequest generates a "aws/request.Request" representing the @@ -11200,8 +11683,8 @@ func (c *SSM) UpdatePatchBaselineRequest(input *UpdatePatchBaselineInput) (req * // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -11458,7 +11941,7 @@ type AddTagsToResourceInput struct { // For the Document and Parameter values, use the name of the resource. // // The ManagedInstance type for this API action is only for on-premises managed - // instances. You must specify the the name of the managed instance in the following + // instances. You must specify the name of the managed instance in the following // format: mi-ID_number. For example, mi-1a2b3c4d5e6f. // // ResourceId is a required field @@ -11467,7 +11950,7 @@ type AddTagsToResourceInput struct { // Specifies the type of resource you are tagging. // // The ManagedInstance type for this API action is for on-premises managed instances. - // You must specify the the name of the managed instance in the following format: + // You must specify the name of the managed instance in the following format: // mi-ID_number. For example, mi-1a2b3c4d5e6f. // // ResourceType is a required field @@ -12606,16 +13089,21 @@ func (s *AttachmentInformation) SetName(v string) *AttachmentInformation { return s } -// A key and value pair that identifies the location of an attachment to a document. +// Identifying information about a document attachment, including the file name +// and a key-value pair that identifies the location of an attachment to a document. type AttachmentsSource struct { _ struct{} `type:"structure"` - // The key of a key and value pair that identifies the location of an attachment + // The key of a key-value pair that identifies the location of an attachment // to a document. Key *string `type:"string" enum:"AttachmentsSourceKey"` - // The URL of the location of a document attachment, such as the URL of an Amazon - // S3 bucket. + // The name of the document attachment file. + Name *string `type:"string"` + + // The value of a key-value pair that identifies the location of an attachment + // to a document. The format is the URL of the location of a document attachment, + // such as the URL of an Amazon S3 bucket. Values []*string `min:"1" type:"list"` } @@ -12648,6 +13136,12 @@ func (s *AttachmentsSource) SetKey(v string) *AttachmentsSource { return s } +// SetName sets the Name field's value. +func (s *AttachmentsSource) SetName(v string) *AttachmentsSource { + s.Name = &v + return s +} + // SetValues sets the Values field's value. func (s *AttachmentsSource) SetValues(v []*string) *AttachmentsSource { s.Values = v @@ -13252,7 +13746,7 @@ func (s CancelCommandOutput) GoString() string { type CancelMaintenanceWindowExecutionInput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window execution to stop. + // The ID of the maintenance window execution to stop. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -13293,7 +13787,7 @@ func (s *CancelMaintenanceWindowExecutionInput) SetWindowExecutionId(v string) * type CancelMaintenanceWindowExecutionOutput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window execution that has been stopped. + // The ID of the maintenance window execution that has been stopped. WindowExecutionId *string `min:"36" type:"string"` } @@ -13660,33 +14154,17 @@ type CommandFilter struct { // before July 7, 2018. // // * Status: Specify a valid command status to see a list of all command - // executions with that status. Status values you can specify include: - // - // Pending - // - // InProgress - // - // Success - // - // Cancelled - // - // Failed - // - // TimedOut - // - // Cancelling + // executions with that status. Status values you can specify include: Pending + // InProgress Success Cancelled Failed TimedOut Cancelling // // * DocumentName: Specify name of the SSM document for which you want to // see command execution results. For example, specify AWS-RunPatchBaseline // to see command executions that used this SSM document to perform security // patching operations on instances. // - // * ExecutionStage: Specify one of the following values: - // - // Executing: Returns a list of command executions that are currently still - // running. - // - // Complete: Returns a list of command executions that have already completed. + // * ExecutionStage: Specify one of the following values: Executing: Returns + // a list of command executions that are currently still running. Complete: + // Returns a list of command executions that have already completed. // // Value is a required field Value *string `locationName:"value" min:"1" type:"string" required:"true"` @@ -14999,6 +15477,12 @@ type CreateAssociationInput struct { DocumentVersion *string `type:"string"` // The instance ID. + // + // InstanceId has been deprecated. To specify an instance ID for an association, + // use the Targets parameter. If you use the parameter InstanceId, you cannot + // use the parameters AssociationName, DocumentVersion, MaxErrors, MaxConcurrency, + // OutputLocation, or ScheduleExpression. To use these parameters, you must + // use the Targets parameter. InstanceId *string `type:"string"` // The maximum number of targets allowed to run the association at the same @@ -15058,7 +15542,8 @@ type CreateAssociationInput struct { // A cron expression when the association will be applied to the target(s). ScheduleExpression *string `min:"1" type:"string"` - // The targets (either instances or tags) for the association. + // The targets (either instances or tags) for the association. You must specify + // a value for Targets if you don't specify a value for InstanceId. Targets []*Target `type:"list"` } @@ -15232,11 +15717,11 @@ type CreateDocumentInput struct { // Do not use the following to begin the names of documents you create. They // are reserved by AWS for use as document prefixes: // - // aws + // * aws // - // amazon + // * amazon // - // amzn + // * amzn // // Name is a required field Name *string `type:"string" required:"true"` @@ -15392,13 +15877,13 @@ func (s *CreateDocumentOutput) SetDocumentDescription(v *DocumentDescription) *C type CreateMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // Enables a Maintenance Window task to run on managed instances, even if you + // Enables a maintenance window task to run on managed instances, even if you // have not registered those instances as targets. If enabled, then you must // specify the unregistered instances (by instance ID) when you register a task - // with the Maintenance Window + // with the maintenance window. // // If you don't enable this option, then you must specify previously-registered - // targets when you register a task with the Maintenance Window. + // targets when you register a task with the maintenance window. // // AllowUnassociatedTargets is a required field AllowUnassociatedTargets *bool `type:"boolean" required:"true"` @@ -15406,50 +15891,50 @@ type CreateMaintenanceWindowInput struct { // User-provided idempotency token. ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. // // Cutoff is a required field Cutoff *int64 `type:"integer" required:"true"` - // An optional description for the Maintenance Window. We recommend specifying - // a description to help you organize your Maintenance Windows. + // An optional description for the maintenance window. We recommend specifying + // a description to help you organize your maintenance windows. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. // // Duration is a required field Duration *int64 `min:"1" type:"integer" required:"true"` - // The date and time, in ISO-8601 Extended format, for when you want the Maintenance - // Window to become inactive. EndDate allows you to set a date and time in the - // future when the Maintenance Window will no longer run. + // The date and time, in ISO-8601 Extended format, for when you want the maintenance + // window to become inactive. EndDate allows you to set a date and time in the + // future when the maintenance window will no longer run. EndDate *string `type:"string"` - // The name of the Maintenance Window. + // The name of the maintenance window. // // Name is a required field Name *string `min:"3" type:"string" required:"true"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. // // Schedule is a required field Schedule *string `min:"1" type:"string" required:"true"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. ScheduleTimezone *string `type:"string"` - // The date and time, in ISO-8601 Extended format, for when you want the Maintenance - // Window to become active. StartDate allows you to delay activation of the - // Maintenance Window until the specified future date. + // The date and time, in ISO-8601 Extended format, for when you want the maintenance + // window to become active. StartDate allows you to delay activation of the + // maintenance window until the specified future date. StartDate *string `type:"string"` // Optional metadata that you assign to a resource. Tags enable you to categorize // a resource in different ways, such as by purpose, owner, or environment. - // For example, you might want to tag a Maintenance Window to identify the type + // For example, you might want to tag a maintenance window to identify the type // of tasks it will run, the types of targets, and the environment it will run // in. In this case, you could specify the following key name/value pairs: // @@ -15459,7 +15944,7 @@ type CreateMaintenanceWindowInput struct { // // * Key=Environment,Value=Production // - // To add tags to an existing Maintenance Window, use the AddTagsToResource + // To add tags to an existing maintenance window, use the AddTagsToResource // action. Tags []*Tag `type:"list"` } @@ -15593,7 +16078,7 @@ func (s *CreateMaintenanceWindowInput) SetTags(v []*Tag) *CreateMaintenanceWindo type CreateMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the created Maintenance Window. + // The ID of the created maintenance window. WindowId *string `min:"20" type:"string"` } @@ -15613,6 +16098,205 @@ func (s *CreateMaintenanceWindowOutput) SetWindowId(v string) *CreateMaintenance return s } +type CreateOpsItemInput struct { + _ struct{} `type:"structure"` + + // Information about the OpsItem. + // + // Description is a required field + Description *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of an SNS topic where notifications are sent + // when this OpsItem is edited or changed. + Notifications []*OpsItemNotification `type:"list"` + + // Operational data is custom data that provides useful reference details about + // the OpsItem. For example, you can specify log files, error strings, license + // keys, troubleshooting tips, or other relevant data. You enter operational + // data as key-value pairs. The key has a maximum length of 128 characters. + // The value has a maximum size of 20 KB. + // + // Operational data keys can't begin with the following: amazon, aws, amzn, + // ssm, /amazon, /aws, /amzn, /ssm. + // + // You can choose to make the data searchable by other users in the account + // or you can restrict search access. Searchable data means that all users with + // access to the OpsItem Overview page (as provided by the DescribeOpsItems + // API action) can view and search on the specified data. Operational data that + // is not searchable is only viewable by users who have access to the OpsItem + // (as provided by the GetOpsItem API action). + // + // Use the /aws/resources key in OperationalData to specify a related resource + // in the request. Use the /aws/automations key in OperationalData to associate + // an Automation runbook with the OpsItem. To view AWS CLI example commands + // that use these keys, see Creating OpsItems Manually (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) + // in the AWS Systems Manager User Guide. + OperationalData map[string]*OpsItemDataValue `type:"map"` + + // The importance of this OpsItem in relation to other OpsItems in the system. + Priority *int64 `min:"1" type:"integer"` + + // One or more OpsItems that share something in common with the current OpsItems. + // For example, related OpsItems can include OpsItems with similar error messages, + // impacted resources, or statuses for the impacted resource. + RelatedOpsItems []*RelatedOpsItem `type:"list"` + + // The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager. + // + // Source is a required field + Source *string `min:"1" type:"string" required:"true"` + + // Optional metadata that you assign to a resource. You can restrict access + // to OpsItems by using an inline IAM policy that specifies tags. For more information, + // see Getting Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html#OpsCenter-getting-started-user-permissions) + // in the AWS Systems Manager User Guide. + // + // Tags use a key-value pair. For example: + // + // Key=Department,Value=Finance + // + // To add tags to an existing OpsItem, use the AddTagsToResource action. + Tags []*Tag `type:"list"` + + // A short heading that describes the nature of the OpsItem and the impacted + // resource. + // + // Title is a required field + Title *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateOpsItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpsItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOpsItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOpsItemInput"} + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Priority != nil && *s.Priority < 1 { + invalidParams.Add(request.NewErrParamMinValue("Priority", 1)) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Source != nil && len(*s.Source) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Source", 1)) + } + if s.Title == nil { + invalidParams.Add(request.NewErrParamRequired("Title")) + } + if s.Title != nil && len(*s.Title) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Title", 1)) + } + if s.RelatedOpsItems != nil { + for i, v := range s.RelatedOpsItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RelatedOpsItems", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateOpsItemInput) SetDescription(v string) *CreateOpsItemInput { + s.Description = &v + return s +} + +// SetNotifications sets the Notifications field's value. +func (s *CreateOpsItemInput) SetNotifications(v []*OpsItemNotification) *CreateOpsItemInput { + s.Notifications = v + return s +} + +// SetOperationalData sets the OperationalData field's value. +func (s *CreateOpsItemInput) SetOperationalData(v map[string]*OpsItemDataValue) *CreateOpsItemInput { + s.OperationalData = v + return s +} + +// SetPriority sets the Priority field's value. +func (s *CreateOpsItemInput) SetPriority(v int64) *CreateOpsItemInput { + s.Priority = &v + return s +} + +// SetRelatedOpsItems sets the RelatedOpsItems field's value. +func (s *CreateOpsItemInput) SetRelatedOpsItems(v []*RelatedOpsItem) *CreateOpsItemInput { + s.RelatedOpsItems = v + return s +} + +// SetSource sets the Source field's value. +func (s *CreateOpsItemInput) SetSource(v string) *CreateOpsItemInput { + s.Source = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateOpsItemInput) SetTags(v []*Tag) *CreateOpsItemInput { + s.Tags = v + return s +} + +// SetTitle sets the Title field's value. +func (s *CreateOpsItemInput) SetTitle(v string) *CreateOpsItemInput { + s.Title = &v + return s +} + +type CreateOpsItemOutput struct { + _ struct{} `type:"structure"` + + // The ID of the OpsItem. + OpsItemId *string `type:"string"` +} + +// String returns the string representation +func (s CreateOpsItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpsItemOutput) GoString() string { + return s.String() +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *CreateOpsItemOutput) SetOpsItemId(v string) *CreateOpsItemOutput { + s.OpsItemId = &v + return s +} + type CreatePatchBaselineInput struct { _ struct{} `type:"structure"` @@ -16043,10 +16727,18 @@ func (s DeleteAssociationOutput) GoString() string { type DeleteDocumentInput struct { _ struct{} `type:"structure"` + // The version of the document that you want to delete. If not provided, all + // versions of the document are deleted. + DocumentVersion *string `type:"string"` + // The name of the document. // // Name is a required field Name *string `type:"string" required:"true"` + + // The version name of the document that you want to delete. If not provided, + // all versions of the document are deleted. + VersionName *string `type:"string"` } // String returns the string representation @@ -16072,12 +16764,24 @@ func (s *DeleteDocumentInput) Validate() error { return nil } +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *DeleteDocumentInput) SetDocumentVersion(v string) *DeleteDocumentInput { + s.DocumentVersion = &v + return s +} + // SetName sets the Name field's value. func (s *DeleteDocumentInput) SetName(v string) *DeleteDocumentInput { s.Name = &v return s } +// SetVersionName sets the VersionName field's value. +func (s *DeleteDocumentInput) SetVersionName(v string) *DeleteDocumentInput { + s.VersionName = &v + return s +} + type DeleteDocumentOutput struct { _ struct{} `type:"structure"` } @@ -16113,7 +16817,7 @@ type DeleteInventoryInput struct { // DisableSchema: If you choose this option, the system ignores all inventory // data for the specified version, and any earlier versions. To enable this // schema again, you must call the PutInventory action for a version greater - // than the disbled version. + // than the disabled version. // // DeleteSchema: This option deletes the specified custom type from the Inventory // service. You can recreate the schema later, if you want. @@ -16228,7 +16932,7 @@ func (s *DeleteInventoryOutput) SetTypeName(v string) *DeleteInventoryOutput { type DeleteMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window to delete. + // The ID of the maintenance window to delete. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -16269,7 +16973,7 @@ func (s *DeleteMaintenanceWindowInput) SetWindowId(v string) *DeleteMaintenanceW type DeleteMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the deleted Maintenance Window. + // The ID of the deleted maintenance window. WindowId *string `min:"20" type:"string"` } @@ -16685,10 +17389,10 @@ type DeregisterTargetFromMaintenanceWindowInput struct { // The system checks if the target is being referenced by a task. If the target // is being referenced, the system returns an error and does not deregister - // the target from the Maintenance Window. + // the target from the maintenance window. Safe *bool `type:"boolean"` - // The ID of the Maintenance Window the target should be removed from. + // The ID of the maintenance window the target should be removed from. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -16752,7 +17456,7 @@ func (s *DeregisterTargetFromMaintenanceWindowInput) SetWindowTargetId(v string) type DeregisterTargetFromMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window the target was removed from. + // The ID of the maintenance window the target was removed from. WindowId *string `min:"20" type:"string"` // The ID of the removed target definition. @@ -16784,12 +17488,12 @@ func (s *DeregisterTargetFromMaintenanceWindowOutput) SetWindowTargetId(v string type DeregisterTaskFromMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window the task should be removed from. + // The ID of the maintenance window the task should be removed from. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` - // The ID of the task to remove from the Maintenance Window. + // The ID of the task to remove from the maintenance window. // // WindowTaskId is a required field WindowTaskId *string `min:"36" type:"string" required:"true"` @@ -16842,10 +17546,10 @@ func (s *DeregisterTaskFromMaintenanceWindowInput) SetWindowTaskId(v string) *De type DeregisterTaskFromMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window the task was removed from. + // The ID of the maintenance window the task was removed from. WindowId *string `min:"20" type:"string"` - // The ID of the task removed from the Maintenance Window. + // The ID of the task removed from the maintenance window. WindowTaskId *string `min:"36" type:"string"` } @@ -18450,11 +19154,9 @@ func (s *DescribeInstancePatchStatesOutput) SetNextToken(v string) *DescribeInst type DescribeInstancePatchesInput struct { _ struct{} `type:"structure"` - // Each entry in the array is a structure containing: - // - // Key (string, between 1 and 128 characters) - // - // Values (array of strings, each string between 1 and 256 characters) + // An array of structures. Each entry in the array is a structure containing + // a Key, Value combination. Valid values for Key are Classification | KBId + // | Severity | State. Filters []*PatchOrchestratorFilter `type:"list"` // The ID of the instance whose patch state information should be retrieved. @@ -18684,13 +19386,13 @@ type DescribeMaintenanceWindowExecutionTaskInvocationsInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the specific task in the Maintenance Window task that should be + // The ID of the specific task in the maintenance window task that should be // retrieved. // // TaskId is a required field TaskId *string `min:"36" type:"string" required:"true"` - // The ID of the Maintenance Window execution the task is part of. + // The ID of the maintenance window execution the task is part of. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -18821,7 +19523,7 @@ type DescribeMaintenanceWindowExecutionTasksInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the Maintenance Window execution whose task executions should be + // The ID of the maintenance window execution whose task executions should be // retrieved. // // WindowExecutionId is a required field @@ -18946,7 +19648,7 @@ type DescribeMaintenanceWindowExecutionsInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the Maintenance Window whose executions should be retrieved. + // The ID of the maintenance window whose executions should be retrieved. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -19022,7 +19724,7 @@ type DescribeMaintenanceWindowExecutionsOutput struct { // items to return, the string is empty. NextToken *string `type:"string"` - // Information about the Maintenance Windows execution. + // Information about the maintenance window executions. WindowExecutions []*MaintenanceWindowExecution `type:"list"` } @@ -19051,8 +19753,8 @@ func (s *DescribeMaintenanceWindowExecutionsOutput) SetWindowExecutions(v []*Mai type DescribeMaintenanceWindowScheduleInput struct { _ struct{} `type:"structure"` - // Filters used to limit the range of results. For example, you can limit Maintenance - // Window executions to only those scheduled before or after a certain date + // Filters used to limit the range of results. For example, you can limit maintenance + // window executions to only those scheduled before or after a certain date // and time. Filters []*PatchOrchestratorFilter `type:"list"` @@ -19072,7 +19774,7 @@ type DescribeMaintenanceWindowScheduleInput struct { // The instance ID or key/value pair to retrieve information about. Targets []*Target `type:"list"` - // The ID of the Maintenance Window to retrieve information about. + // The ID of the maintenance window to retrieve information about. WindowId *string `min:"20" type:"string"` } @@ -19165,7 +19867,7 @@ type DescribeMaintenanceWindowScheduleOutput struct { // next call.) NextToken *string `type:"string"` - // Information about Maintenance Window executions scheduled for the specified + // Information about maintenance window executions scheduled for the specified // time range. ScheduledWindowExecutions []*ScheduledWindowExecution `type:"list"` } @@ -19208,7 +19910,7 @@ type DescribeMaintenanceWindowTargetsInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the Maintenance Window whose targets should be retrieved. + // The ID of the maintenance window whose targets should be retrieved. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -19284,7 +19986,7 @@ type DescribeMaintenanceWindowTargetsOutput struct { // items to return, the string is empty. NextToken *string `type:"string"` - // Information about the targets in the Maintenance Window. + // Information about the targets in the maintenance window. Targets []*MaintenanceWindowTarget `type:"list"` } @@ -19326,7 +20028,7 @@ type DescribeMaintenanceWindowTasksInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the Maintenance Window whose tasks should be retrieved. + // The ID of the maintenance window whose tasks should be retrieved. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -19402,7 +20104,7 @@ type DescribeMaintenanceWindowTasksOutput struct { // items to return, the string is empty. NextToken *string `type:"string"` - // Information about the tasks in the Maintenance Window. + // Information about the tasks in the maintenance window. Tasks []*MaintenanceWindowTask `type:"list"` } @@ -19522,7 +20224,7 @@ type DescribeMaintenanceWindowsForTargetOutput struct { // next call.) NextToken *string `type:"string"` - // Information about the Maintenance Window targets and tasks an instance is + // Information about the maintenance window targets and tasks an instance is // associated with. WindowIdentities []*MaintenanceWindowIdentityForTarget `type:"list"` } @@ -19552,8 +20254,8 @@ func (s *DescribeMaintenanceWindowsForTargetOutput) SetWindowIdentities(v []*Mai type DescribeMaintenanceWindowsInput struct { _ struct{} `type:"structure"` - // Optional filters used to narrow down the scope of the returned Maintenance - // Windows. Supported filter keys are Name and Enabled. + // Optional filters used to narrow down the scope of the returned maintenance + // windows. Supported filter keys are Name and Enabled. Filters []*MaintenanceWindowFilter `type:"list"` // The maximum number of items to return for this call. The call also returns @@ -19624,7 +20326,7 @@ type DescribeMaintenanceWindowsOutput struct { // items to return, the string is empty. NextToken *string `type:"string"` - // Information about the Maintenance Windows. + // Information about the maintenance windows. WindowIdentities []*MaintenanceWindowIdentity `type:"list"` } @@ -19650,6 +20352,134 @@ func (s *DescribeMaintenanceWindowsOutput) SetWindowIdentities(v []*MaintenanceW return s } +type DescribeOpsItemsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` + + // One or more filters to limit the reponse. + // + // * Key: CreatedTime Operations: GreaterThan, LessThan + // + // * Key: LastModifiedBy Operations: Contains, Equals + // + // * Key: LastModifiedTime Operations: GreaterThan, LessThan + // + // * Key: Priority Operations: Equals + // + // * Key: Source Operations: Contains, Equals + // + // * Key: Status Operations: Equals + // + // * Key: Title Operations: Contains + // + // * Key: OperationalData* Operations: Equals + // + // * Key: OperationalDataKey Operations: Equals + // + // * Key: OperationalDataValue Operations: Equals, Contains + // + // * Key: OpsItemId Operations: Equals + // + // * Key: ResourceId Operations: Contains + // + // * Key: AutomationId Operations: Equals + // + // *If you filter the response by using the OperationalData operator, specify + // a key-value pair by using the following JSON format: {"key":"key_name","value":"a_value"} + OpsItemFilters []*OpsItemFilter `type:"list"` +} + +// String returns the string representation +func (s DescribeOpsItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOpsItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeOpsItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeOpsItemsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.OpsItemFilters != nil { + for i, v := range s.OpsItemFilters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OpsItemFilters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeOpsItemsInput) SetMaxResults(v int64) *DescribeOpsItemsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeOpsItemsInput) SetNextToken(v string) *DescribeOpsItemsInput { + s.NextToken = &v + return s +} + +// SetOpsItemFilters sets the OpsItemFilters field's value. +func (s *DescribeOpsItemsInput) SetOpsItemFilters(v []*OpsItemFilter) *DescribeOpsItemsInput { + s.OpsItemFilters = v + return s +} + +type DescribeOpsItemsOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` + + // A list of OpsItems. + OpsItemSummaries []*OpsItemSummary `type:"list"` +} + +// String returns the string representation +func (s DescribeOpsItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOpsItemsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeOpsItemsOutput) SetNextToken(v string) *DescribeOpsItemsOutput { + s.NextToken = &v + return s +} + +// SetOpsItemSummaries sets the OpsItemSummaries field's value. +func (s *DescribeOpsItemsOutput) SetOpsItemSummaries(v []*OpsItemSummary) *DescribeOpsItemsOutput { + s.OpsItemSummaries = v + return s +} + type DescribeParametersInput struct { _ struct{} `type:"structure"` @@ -22153,7 +22983,7 @@ func (s *GetInventorySchemaOutput) SetSchemas(v []*InventoryItemSchema) *GetInve type GetMaintenanceWindowExecutionInput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window execution that includes the task. + // The ID of the maintenance window execution that includes the task. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -22194,22 +23024,22 @@ func (s *GetMaintenanceWindowExecutionInput) SetWindowExecutionId(v string) *Get type GetMaintenanceWindowExecutionOutput struct { _ struct{} `type:"structure"` - // The time the Maintenance Window finished running. + // The time the maintenance window finished running. EndTime *time.Time `type:"timestamp"` - // The time the Maintenance Window started running. + // The time the maintenance window started running. StartTime *time.Time `type:"timestamp"` - // The status of the Maintenance Window execution. + // The status of the maintenance window execution. Status *string `type:"string" enum:"MaintenanceWindowExecutionStatus"` // The details explaining the Status. Only available for certain status values. StatusDetails *string `type:"string"` - // The ID of the task executions from the Maintenance Window execution. + // The ID of the task executions from the maintenance window execution. TaskIds []*string `type:"list"` - // The ID of the Maintenance Window execution. + // The ID of the maintenance window execution. WindowExecutionId *string `min:"36" type:"string"` } @@ -22262,13 +23092,13 @@ func (s *GetMaintenanceWindowExecutionOutput) SetWindowExecutionId(v string) *Ge type GetMaintenanceWindowExecutionTaskInput struct { _ struct{} `type:"structure"` - // The ID of the specific task execution in the Maintenance Window task that + // The ID of the specific task execution in the maintenance window task that // should be retrieved. // // TaskId is a required field TaskId *string `min:"36" type:"string" required:"true"` - // The ID of the Maintenance Window execution that includes the task. + // The ID of the maintenance window execution that includes the task. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -22326,13 +23156,13 @@ type GetMaintenanceWindowExecutionTaskInvocationInput struct { // InvocationId is a required field InvocationId *string `min:"36" type:"string" required:"true"` - // The ID of the specific task in the Maintenance Window task that should be + // The ID of the specific task in the maintenance window task that should be // retrieved. // // TaskId is a required field TaskId *string `min:"36" type:"string" required:"true"` - // The ID of the Maintenance Window execution for which the task is a part. + // The ID of the maintenance window execution for which the task is a part. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -22407,7 +23237,7 @@ type GetMaintenanceWindowExecutionTaskInvocationOutput struct { InvocationId *string `min:"36" type:"string"` // User-provided value to be included in any CloudWatch events raised while - // running tasks for these targets in this Maintenance Window. + // running tasks for these targets in this maintenance window. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` // The parameters used at the time that the task ran. @@ -22426,14 +23256,14 @@ type GetMaintenanceWindowExecutionTaskInvocationOutput struct { // The task execution ID. TaskExecutionId *string `min:"36" type:"string"` - // Retrieves the task type for a Maintenance Window. Task types include the - // following: LAMBDA, STEP_FUNCTION, AUTOMATION, RUN_COMMAND. + // Retrieves the task type for a maintenance window. Task types include the + // following: LAMBDA, STEP_FUNCTIONS, AUTOMATION, RUN_COMMAND. TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The Maintenance Window execution ID. + // The maintenance window execution ID. WindowExecutionId *string `min:"36" type:"string"` - // The Maintenance Window target ID. + // The maintenance window target ID. WindowTargetId *string `type:"string"` } @@ -22550,7 +23380,7 @@ type GetMaintenanceWindowExecutionTaskOutput struct { // The ARN of the task that ran. TaskArn *string `min:"1" type:"string"` - // The ID of the specific task execution in the Maintenance Window task that + // The ID of the specific task execution in the maintenance window task that // was retrieved. TaskExecutionId *string `min:"36" type:"string"` @@ -22559,7 +23389,7 @@ type GetMaintenanceWindowExecutionTaskOutput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // The map has the following format: // @@ -22571,7 +23401,7 @@ type GetMaintenanceWindowExecutionTaskOutput struct { // The type of task that was run. Type *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window execution that includes the task. + // The ID of the maintenance window execution that includes the task. WindowExecutionId *string `min:"36" type:"string"` } @@ -22666,7 +23496,7 @@ func (s *GetMaintenanceWindowExecutionTaskOutput) SetWindowExecutionId(v string) type GetMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // The ID of the desired Maintenance Window. + // The ID of the maintenance window for which you want to retrieve information. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -22707,56 +23537,56 @@ func (s *GetMaintenanceWindowInput) SetWindowId(v string) *GetMaintenanceWindowI type GetMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // Whether targets must be registered with the Maintenance Window before tasks + // Whether targets must be registered with the maintenance window before tasks // can be defined for those targets. AllowUnassociatedTargets *bool `type:"boolean"` - // The date the Maintenance Window was created. + // The date the maintenance window was created. CreatedDate *time.Time `type:"timestamp"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. Cutoff *int64 `type:"integer"` - // The description of the Maintenance Window. + // The description of the maintenance window. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. Duration *int64 `min:"1" type:"integer"` - // Whether the Maintenance Windows is enabled. + // Indicates whether the maintenance window is enabled. Enabled *bool `type:"boolean"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become inactive. The Maintenance Window will not run + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become inactive. The maintenance window will not run // after this specified time. EndDate *string `type:"string"` - // The date the Maintenance Window was last modified. + // The date the maintenance window was last modified. ModifiedDate *time.Time `type:"timestamp"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` - // The next time the Maintenance Window will actually run, taking into account - // any specified times for the Maintenance Window to become active or inactive. + // The next time the maintenance window will actually run, taking into account + // any specified times for the maintenance window to become active or inactive. NextExecutionTime *string `type:"string"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. ScheduleTimezone *string `type:"string"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become active. The Maintenance Window will not run + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become active. The maintenance window will not run // before this specified time. StartDate *string `type:"string"` - // The ID of the created Maintenance Window. + // The ID of the created maintenance window. WindowId *string `min:"20" type:"string"` } @@ -22857,12 +23687,12 @@ func (s *GetMaintenanceWindowOutput) SetWindowId(v string) *GetMaintenanceWindow type GetMaintenanceWindowTaskInput struct { _ struct{} `type:"structure"` - // The Maintenance Window ID that includes the task to retrieve. + // The maintenance window ID that includes the task to retrieve. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` - // The Maintenance Window task ID to retrieve. + // The maintenance window task ID to retrieve. // // WindowTaskId is a required field WindowTaskId *string `min:"36" type:"string" required:"true"` @@ -22923,7 +23753,7 @@ type GetMaintenanceWindowTaskOutput struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The maximum number of targets allowed to run this task in parallel. @@ -22939,7 +23769,8 @@ type GetMaintenanceWindowTaskOutput struct { // priority. Tasks that have the same priority are scheduled in parallel. Priority *int64 `type:"integer"` - // The IAM service role to assume during task execution. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. ServiceRoleArn *string `type:"string"` // The targets where the task should run. @@ -22947,8 +23778,8 @@ type GetMaintenanceWindowTaskOutput struct { // The resource that the task used during execution. For RUN_COMMAND and AUTOMATION // task types, the TaskArn is the Systems Manager Document name/ARN. For LAMBDA - // tasks, the value is the function name/ARN. For STEP_FUNCTION tasks, the value - // is the state machine ARN. + // tasks, the value is the function name/ARN. For STEP_FUNCTIONS tasks, the + // value is the state machine ARN. TaskArn *string `min:"1" type:"string"` // The parameters to pass to the task when it runs. @@ -22959,16 +23790,16 @@ type GetMaintenanceWindowTaskOutput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` // The type of task to run. TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The retrieved Maintenance Window ID. + // The retrieved maintenance window ID. WindowId *string `min:"20" type:"string"` - // The retrieved Maintenance Window task ID. + // The retrieved maintenance window task ID. WindowTaskId *string `min:"36" type:"string"` } @@ -23066,6 +23897,197 @@ func (s *GetMaintenanceWindowTaskOutput) SetWindowTaskId(v string) *GetMaintenan return s } +type GetOpsItemInput struct { + _ struct{} `type:"structure"` + + // The ID of the OpsItem that you want to get. + // + // OpsItemId is a required field + OpsItemId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOpsItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpsItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpsItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpsItemInput"} + if s.OpsItemId == nil { + invalidParams.Add(request.NewErrParamRequired("OpsItemId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *GetOpsItemInput) SetOpsItemId(v string) *GetOpsItemInput { + s.OpsItemId = &v + return s +} + +type GetOpsItemOutput struct { + _ struct{} `type:"structure"` + + // The OpsItem. + OpsItem *OpsItem `type:"structure"` +} + +// String returns the string representation +func (s GetOpsItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpsItemOutput) GoString() string { + return s.String() +} + +// SetOpsItem sets the OpsItem field's value. +func (s *GetOpsItemOutput) SetOpsItem(v *OpsItem) *GetOpsItemOutput { + s.OpsItem = v + return s +} + +type GetOpsSummaryInput struct { + _ struct{} `type:"structure"` + + // Optional aggregators that return counts of OpsItems based on one or more + // expressions. + // + // Aggregators is a required field + Aggregators []*OpsAggregator `min:"1" type:"list" required:"true"` + + // Optional filters used to scope down the returned OpsItems. + Filters []*OpsFilter `min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetOpsSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpsSummaryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpsSummaryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpsSummaryInput"} + if s.Aggregators == nil { + invalidParams.Add(request.NewErrParamRequired("Aggregators")) + } + if s.Aggregators != nil && len(s.Aggregators) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Aggregators", 1)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Aggregators != nil { + for i, v := range s.Aggregators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Aggregators", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregators sets the Aggregators field's value. +func (s *GetOpsSummaryInput) SetAggregators(v []*OpsAggregator) *GetOpsSummaryInput { + s.Aggregators = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetOpsSummaryInput) SetFilters(v []*OpsFilter) *GetOpsSummaryInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetOpsSummaryInput) SetMaxResults(v int64) *GetOpsSummaryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetOpsSummaryInput) SetNextToken(v string) *GetOpsSummaryInput { + s.NextToken = &v + return s +} + +type GetOpsSummaryOutput struct { + _ struct{} `type:"structure"` + + // The list of aggregated and filtered OpsItems. + Entities []*OpsEntity `type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetOpsSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpsSummaryOutput) GoString() string { + return s.String() +} + +// SetEntities sets the Entities field's value. +func (s *GetOpsSummaryOutput) SetEntities(v []*OpsEntity) *GetOpsSummaryOutput { + s.Entities = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetOpsSummaryOutput) SetNextToken(v string) *GetOpsSummaryOutput { + s.NextToken = &v + return s +} + type GetParameterHistoryInput struct { _ struct{} `type:"structure"` @@ -23447,8 +24469,8 @@ func (s *GetParametersInput) SetWithDecryption(v bool) *GetParametersInput { type GetParametersOutput struct { _ struct{} `type:"structure"` - // A list of parameters that are not formatted correctly or do not run when - // executed. + // A list of parameters that are not formatted correctly or do not run during + // an execution. InvalidParameters []*string `min:"1" type:"list"` // A list of details for a parameter. @@ -23994,7 +25016,7 @@ type InstanceAssociationStatusInfo struct { // Detailed status information about the instance association. DetailedStatus *string `type:"string"` - // The association document verions. + // The association document versions. DocumentVersion *string `type:"string"` // An error code returned by the request to create the association. @@ -24132,10 +25154,10 @@ type InstanceInformation struct { // The instance ID. InstanceId *string `type:"string"` - // Indicates whether latest version of SSM Agent is running on your instance. - // Some older versions of Windows Server use the EC2Config service to process - // SSM requests. For this reason, this field does not indicate whether or not - // the latest version is installed on Windows managed instances. + // Indicates whether the latest version of SSM Agent is running on your Linux + // Managed Instance. This field does not indicate whether or not the latest + // version is installed on Windows managed instances, because some older versions + // of Windows Server use the EC2Config service to process SSM requests. IsLatestVersion *bool `type:"boolean"` // The date the association was last run. @@ -24783,7 +25805,7 @@ type InventoryDeletionStatusItem struct { DeletionStartTime *time.Time `type:"timestamp"` // Information about the delete operation. For more information about this summary, - // see Understanding the Delete Inventory Summary (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-delete.html#sysman-inventory-delete-summary) + // see Understanding the Delete Inventory Summary (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-custom.html#sysman-inventory-delete) // in the AWS Systems Manager User Guide. DeletionSummary *InventoryDeletionSummary `type:"structure"` @@ -25406,7 +26428,7 @@ type LabelParameterVersionInput struct { // The specific version of the parameter on which you want to attach one or // more labels. If no version is specified, the system attaches the label to - // the latest version.) + // the latest version. ParameterVersion *int64 `type:"long"` } @@ -25467,6 +26489,9 @@ type LabelParameterVersionOutput struct { // label requirements, see Labeling Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html) // in the AWS Systems Manager User Guide. InvalidLabels []*string `min:"1" type:"list"` + + // The version of the parameter that has been labeled. + ParameterVersion *int64 `type:"long"` } // String returns the string representation @@ -25485,6 +26510,12 @@ func (s *LabelParameterVersionOutput) SetInvalidLabels(v []*string) *LabelParame return s } +// SetParameterVersion sets the ParameterVersion field's value. +func (s *LabelParameterVersionOutput) SetParameterVersion(v int64) *LabelParameterVersionOutput { + s.ParameterVersion = &v + return s +} + type ListAssociationVersionsInput struct { _ struct{} `type:"structure"` @@ -25694,7 +26725,7 @@ type ListCommandInvocationsInput struct { Details *bool `type:"boolean"` // (Optional) One or more filters. Use a filter to return a more specific list - // of results. + // of results. Note that the DocumentName filter is not supported for ListCommandInvocations. Filters []*CommandFilter `min:"1" type:"list"` // (Optional) The command execution details for a specific instance ID. @@ -26819,7 +27850,7 @@ func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOut // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. type LoggingInfo struct { _ struct{} `type:"structure"` @@ -26902,12 +27933,12 @@ type MaintenanceWindowAutomationParameters struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // For AUTOMATION task types, Systems Manager ignores any values specified for // these parameters. @@ -26949,7 +27980,7 @@ func (s *MaintenanceWindowAutomationParameters) SetParameters(v map[string][]*st return s } -// Describes the information about an execution of a Maintenance Window. +// Describes the information about an execution of a maintenance window. type MaintenanceWindowExecution struct { _ struct{} `type:"structure"` @@ -26965,10 +27996,10 @@ type MaintenanceWindowExecution struct { // The details explaining the Status. Only available for certain status values. StatusDetails *string `type:"string"` - // The ID of the Maintenance Window execution. + // The ID of the maintenance window execution. WindowExecutionId *string `min:"36" type:"string"` - // The ID of the Maintenance Window. + // The ID of the maintenance window. WindowId *string `min:"20" type:"string"` } @@ -27018,7 +28049,7 @@ func (s *MaintenanceWindowExecution) SetWindowId(v string) *MaintenanceWindowExe return s } -// Information about a task execution performed as part of a Maintenance Window +// Information about a task execution performed as part of a maintenance window // execution. type MaintenanceWindowExecutionTaskIdentity struct { _ struct{} `type:"structure"` @@ -27039,13 +28070,13 @@ type MaintenanceWindowExecutionTaskIdentity struct { // The ARN of the task that ran. TaskArn *string `min:"1" type:"string"` - // The ID of the specific task execution in the Maintenance Window execution. + // The ID of the specific task execution in the maintenance window execution. TaskExecutionId *string `min:"36" type:"string"` // The type of task that ran. TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window execution that ran the task. + // The ID of the maintenance window execution that ran the task. WindowExecutionId *string `min:"36" type:"string"` } @@ -27108,7 +28139,7 @@ func (s *MaintenanceWindowExecutionTaskIdentity) SetWindowExecutionId(v string) } // Describes the information about a task invocation for a particular target -// as part of a task execution performed as part of a Maintenance Window execution. +// as part of a task execution performed as part of a maintenance window execution. type MaintenanceWindowExecutionTaskInvocationIdentity struct { _ struct{} `type:"structure"` @@ -27123,7 +28154,7 @@ type MaintenanceWindowExecutionTaskInvocationIdentity struct { InvocationId *string `min:"36" type:"string"` // User-provided value that was specified when the target was registered with - // the Maintenance Window. This was also included in any CloudWatch events raised + // the maintenance window. This was also included in any CloudWatch events raised // during the task invocation. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` @@ -27140,16 +28171,16 @@ type MaintenanceWindowExecutionTaskInvocationIdentity struct { // for certain Status values. StatusDetails *string `type:"string"` - // The ID of the specific task execution in the Maintenance Window execution. + // The ID of the specific task execution in the maintenance window execution. TaskExecutionId *string `min:"36" type:"string"` // The task type. TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window execution that ran the task. + // The ID of the maintenance window execution that ran the task. WindowExecutionId *string `min:"36" type:"string"` - // The ID of the target definition in this Maintenance Window the invocation + // The ID of the target definition in this maintenance window the invocation // was performed for. WindowTargetId *string `type:"string"` } @@ -27282,46 +28313,46 @@ func (s *MaintenanceWindowFilter) SetValues(v []*string) *MaintenanceWindowFilte return s } -// Information about the Maintenance Window. +// Information about the maintenance window. type MaintenanceWindowIdentity struct { _ struct{} `type:"structure"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. Cutoff *int64 `type:"integer"` - // A description of the Maintenance Window. + // A description of the maintenance window. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. Duration *int64 `min:"1" type:"integer"` - // Whether the Maintenance Window is enabled. + // Indicates whether the maintenance window is enabled. Enabled *bool `type:"boolean"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become inactive. + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become inactive. EndDate *string `type:"string"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` - // The next time the Maintenance Window will actually run, taking into account - // any specified times for the Maintenance Window to become active or inactive. + // The next time the maintenance window will actually run, taking into account + // any specified times for the maintenance window to become active or inactive. NextExecutionTime *string `type:"string"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. ScheduleTimezone *string `type:"string"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become active. + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become active. StartDate *string `type:"string"` - // The ID of the Maintenance Window. + // The ID of the maintenance window. WindowId *string `min:"20" type:"string"` } @@ -27401,14 +28432,14 @@ func (s *MaintenanceWindowIdentity) SetWindowId(v string) *MaintenanceWindowIden return s } -// The Maintenance Window to which the specified target belongs. +// The maintenance window to which the specified target belongs. type MaintenanceWindowIdentityForTarget struct { _ struct{} `type:"structure"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` - // The ID of the Maintenance Window. + // The ID of the maintenance window. WindowId *string `min:"20" type:"string"` } @@ -27442,12 +28473,12 @@ func (s *MaintenanceWindowIdentityForTarget) SetWindowId(v string) *MaintenanceW // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // For Lambda tasks, Systems Manager ignores any values specified for TaskParameters // and LoggingInfo. @@ -27523,19 +28554,19 @@ func (s *MaintenanceWindowLambdaParameters) SetQualifier(v string) *MaintenanceW // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // For Run Command tasks, Systems Manager uses specified values for TaskParameters // and LoggingInfo only if no values are specified for TaskInvocationParameters. type MaintenanceWindowRunCommandParameters struct { _ struct{} `type:"structure"` - // Information about the command(s) to run. + // Information about the commands to run. Comment *string `type:"string"` // The SHA-256 or SHA-1 hash created by the system when the document was created. @@ -27558,7 +28589,8 @@ type MaintenanceWindowRunCommandParameters struct { // The parameters for the RUN_COMMAND task execution. Parameters map[string][]*string `type:"map"` - // The IAM service role to assume during task execution. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. ServiceRoleArn *string `type:"string"` // If this time is reached and the command has not already started running, @@ -27646,7 +28678,7 @@ func (s *MaintenanceWindowRunCommandParameters) SetTimeoutSeconds(v int64) *Main return s } -// The parameters for a STEP_FUNCTION task. +// The parameters for a STEP_FUNCTIONS task. // // For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow // and UpdateMaintenanceWindowTask. @@ -27654,22 +28686,22 @@ func (s *MaintenanceWindowRunCommandParameters) SetTimeoutSeconds(v int64) *Main // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // For Step Functions tasks, Systems Manager ignores any values specified for // TaskParameters and LoggingInfo. type MaintenanceWindowStepFunctionsParameters struct { _ struct{} `type:"structure"` - // The inputs for the STEP_FUNCTION task. + // The inputs for the STEP_FUNCTIONS task. Input *string `type:"string" sensitive:"true"` - // The name of the STEP_FUNCTION task. + // The name of the STEP_FUNCTIONS task. Name *string `min:"1" type:"string"` } @@ -27708,21 +28740,21 @@ func (s *MaintenanceWindowStepFunctionsParameters) SetName(v string) *Maintenanc return s } -// The target registered with the Maintenance Window. +// The target registered with the maintenance window. type MaintenanceWindowTarget struct { _ struct{} `type:"structure"` // A description for the target. Description *string `min:"1" type:"string" sensitive:"true"` - // The target name. + // The name for the maintenance window target. Name *string `min:"3" type:"string"` // A user-provided value that will be included in any CloudWatch events that - // are raised while running tasks for these targets in this Maintenance Window. + // are raised while running tasks for these targets in this maintenance window. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` - // The type of target that is being registered with the Maintenance Window. + // The type of target that is being registered with the maintenance window. ResourceType *string `type:"string" enum:"MaintenanceWindowResourceType"` // The targets, either instances or tags. @@ -27736,7 +28768,7 @@ type MaintenanceWindowTarget struct { // Key=,Values=. Targets []*Target `type:"list"` - // The ID of the Maintenance Window to register the target with. + // The ID of the maintenance window to register the target with. WindowId *string `min:"20" type:"string"` // The ID of the target. @@ -27795,7 +28827,7 @@ func (s *MaintenanceWindowTarget) SetWindowTargetId(v string) *MaintenanceWindow return s } -// Information about a task defined for a Maintenance Window. +// Information about a task defined for a maintenance window. type MaintenanceWindowTask struct { _ struct{} `type:"structure"` @@ -27807,7 +28839,7 @@ type MaintenanceWindowTask struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The maximum number of targets this task can be run for, in parallel. @@ -27819,12 +28851,13 @@ type MaintenanceWindowTask struct { // The task name. Name *string `min:"3" type:"string"` - // The priority of the task in the Maintenance Window. The lower the number, + // The priority of the task in the maintenance window. The lower the number, // the higher the priority. Tasks that have the same priority are scheduled // in parallel. Priority *int64 `type:"integer"` - // The role that should be assumed when running the task. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. ServiceRoleArn *string `type:"string"` // The targets (either instances or tags). Instances are specified using Key=instanceids,Values=,. @@ -27833,8 +28866,8 @@ type MaintenanceWindowTask struct { // The resource that the task uses during execution. For RUN_COMMAND and AUTOMATION // task types, TaskArn is the Systems Manager document name or ARN. For LAMBDA - // tasks, it's the function name or ARN. For STEP_FUNCTION tasks, it's the state - // machine ARN. + // tasks, it's the function name or ARN. For STEP_FUNCTIONS tasks, it's the + // state machine ARN. TaskArn *string `min:"1" type:"string"` // The parameters that should be passed to the task when it is run. @@ -27842,14 +28875,14 @@ type MaintenanceWindowTask struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` // The type of task. The type can be one of the following: RUN_COMMAND, AUTOMATION, - // LAMBDA, or STEP_FUNCTION. + // LAMBDA, or STEP_FUNCTIONS. Type *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window where the task is registered. + // The ID of the maintenance window where the task is registered. WindowId *string `min:"20" type:"string"` // The task ID. @@ -27957,7 +28990,7 @@ type MaintenanceWindowTaskInvocationParameters struct { // The parameters for a RUN_COMMAND task type. RunCommand *MaintenanceWindowRunCommandParameters `type:"structure"` - // The parameters for a STEP_FUNCTION task type. + // The parameters for a STEP_FUNCTIONS task type. StepFunctions *MaintenanceWindowStepFunctionsParameters `type:"structure"` } @@ -28176,14 +29209,15 @@ func (s *NonCompliantSummary) SetSeveritySummary(v *SeveritySummary) *NonComplia type NotificationConfig struct { _ struct{} `type:"structure"` - // An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. - // Run Command pushes notifications about command status changes to this topic. + // An Amazon Resource Name (ARN) for an Amazon Simple Notification Service (Amazon + // SNS) topic. Run Command pushes notifications about command status changes + // to this topic. NotificationArn *string `type:"string"` // The different events for which you can receive notifications. These events // include the following: All (events), InProgress, Success, TimedOut, Cancelled, // Failed. To learn more about these events, see Configuring Amazon SNS Notifications - // for Run Command (http://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) + // for AWS Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/monitoring-sns-notifications.html) // in the AWS Systems Manager User Guide. NotificationEvents []*string `type:"list"` @@ -28221,6 +29255,649 @@ func (s *NotificationConfig) SetNotificationType(v string) *NotificationConfig { return s } +// One or more aggregators for viewing counts of OpsItems using different dimensions +// such as Source, CreatedTime, or Source and CreatedTime, to name a few. +type OpsAggregator struct { + _ struct{} `type:"structure"` + + // Either a Range or Count aggregator for limiting an OpsItem summary. + AggregatorType *string `min:"1" type:"string"` + + // A nested aggregator for viewing counts of OpsItems. + Aggregators []*OpsAggregator `min:"1" type:"list"` + + // The name of an OpsItem attribute on which to limit the count of OpsItems. + AttributeName *string `type:"string"` + + // The aggregator filters. + Filters []*OpsFilter `min:"1" type:"list"` + + // The data type name to use for viewing counts of OpsItems. + TypeName *string `min:"1" type:"string"` + + // The aggregator value. + Values map[string]*string `type:"map"` +} + +// String returns the string representation +func (s OpsAggregator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsAggregator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OpsAggregator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OpsAggregator"} + if s.AggregatorType != nil && len(*s.AggregatorType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AggregatorType", 1)) + } + if s.Aggregators != nil && len(s.Aggregators) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Aggregators", 1)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) + } + if s.Aggregators != nil { + for i, v := range s.Aggregators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Aggregators", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregatorType sets the AggregatorType field's value. +func (s *OpsAggregator) SetAggregatorType(v string) *OpsAggregator { + s.AggregatorType = &v + return s +} + +// SetAggregators sets the Aggregators field's value. +func (s *OpsAggregator) SetAggregators(v []*OpsAggregator) *OpsAggregator { + s.Aggregators = v + return s +} + +// SetAttributeName sets the AttributeName field's value. +func (s *OpsAggregator) SetAttributeName(v string) *OpsAggregator { + s.AttributeName = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *OpsAggregator) SetFilters(v []*OpsFilter) *OpsAggregator { + s.Filters = v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *OpsAggregator) SetTypeName(v string) *OpsAggregator { + s.TypeName = &v + return s +} + +// SetValues sets the Values field's value. +func (s *OpsAggregator) SetValues(v map[string]*string) *OpsAggregator { + s.Values = v + return s +} + +// The result of the query. +type OpsEntity struct { + _ struct{} `type:"structure"` + + // The data returned by the query. + Data map[string]*OpsEntityItem `type:"map"` + + // The query ID. + Id *string `type:"string"` +} + +// String returns the string representation +func (s OpsEntity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsEntity) GoString() string { + return s.String() +} + +// SetData sets the Data field's value. +func (s *OpsEntity) SetData(v map[string]*OpsEntityItem) *OpsEntity { + s.Data = v + return s +} + +// SetId sets the Id field's value. +func (s *OpsEntity) SetId(v string) *OpsEntity { + s.Id = &v + return s +} + +// The OpsItem summaries result item. +type OpsEntityItem struct { + _ struct{} `type:"structure"` + + // The detailed data content for an OpsItem summaries result item. + Content []map[string]*string `type:"list"` +} + +// String returns the string representation +func (s OpsEntityItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsEntityItem) GoString() string { + return s.String() +} + +// SetContent sets the Content field's value. +func (s *OpsEntityItem) SetContent(v []map[string]*string) *OpsEntityItem { + s.Content = v + return s +} + +// A filter for viewing OpsItem summaries. +type OpsFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The type of filter. + Type *string `type:"string" enum:"OpsFilterOperatorType"` + + // The filter value. + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s OpsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OpsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OpsFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *OpsFilter) SetKey(v string) *OpsFilter { + s.Key = &v + return s +} + +// SetType sets the Type field's value. +func (s *OpsFilter) SetType(v string) *OpsFilter { + s.Type = &v + return s +} + +// SetValues sets the Values field's value. +func (s *OpsFilter) SetValues(v []*string) *OpsFilter { + s.Values = v + return s +} + +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +type OpsItem struct { + _ struct{} `type:"structure"` + + // The ARN of the AWS account that created the OpsItem. + CreatedBy *string `type:"string"` + + // The date and time the OpsItem was created. + CreatedTime *time.Time `type:"timestamp"` + + // The OpsItem description. + Description *string `min:"1" type:"string"` + + // The ARN of the AWS account that last updated the OpsItem. + LastModifiedBy *string `type:"string"` + + // The date and time the OpsItem was last updated. + LastModifiedTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of an SNS topic where notifications are sent + // when this OpsItem is edited or changed. + Notifications []*OpsItemNotification `type:"list"` + + // Operational data is custom data that provides useful reference details about + // the OpsItem. For example, you can specify log files, error strings, license + // keys, troubleshooting tips, or other relevant data. You enter operational + // data as key-value pairs. The key has a maximum length of 128 characters. + // The value has a maximum size of 20 KB. + // + // Operational data keys can't begin with the following: amazon, aws, amzn, + // ssm, /amazon, /aws, /amzn, /ssm. + // + // You can choose to make the data searchable by other users in the account + // or you can restrict search access. Searchable data means that all users with + // access to the OpsItem Overview page (as provided by the DescribeOpsItems + // API action) can view and search on the specified data. Operational data that + // is not searchable is only viewable by users who have access to the OpsItem + // (as provided by the GetOpsItem API action). + // + // Use the /aws/resources key in OperationalData to specify a related resource + // in the request. Use the /aws/automations key in OperationalData to associate + // an Automation runbook with the OpsItem. To view AWS CLI example commands + // that use these keys, see Creating OpsItems Manually (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) + // in the AWS Systems Manager User Guide. + OperationalData map[string]*OpsItemDataValue `type:"map"` + + // The ID of the OpsItem. + OpsItemId *string `type:"string"` + + // The importance of this OpsItem in relation to other OpsItems in the system. + Priority *int64 `min:"1" type:"integer"` + + // One or more OpsItems that share something in common with the current OpsItem. + // For example, related OpsItems can include OpsItems with similar error messages, + // impacted resources, or statuses for the impacted resource. + RelatedOpsItems []*RelatedOpsItem `type:"list"` + + // The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager. The + // impacted resource is a subset of source. + Source *string `min:"1" type:"string"` + + // The OpsItem status. Status can be Open, In Progress, or Resolved. For more + // information, see Editing OpsItem Details (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems-editing-details.html) + // in the AWS Systems Manager User Guide. + Status *string `type:"string" enum:"OpsItemStatus"` + + // A short heading that describes the nature of the OpsItem and the impacted + // resource. + Title *string `min:"1" type:"string"` + + // The version of this OpsItem. Each time the OpsItem is edited the version + // number increments by one. + Version *string `type:"string"` +} + +// String returns the string representation +func (s OpsItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItem) GoString() string { + return s.String() +} + +// SetCreatedBy sets the CreatedBy field's value. +func (s *OpsItem) SetCreatedBy(v string) *OpsItem { + s.CreatedBy = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *OpsItem) SetCreatedTime(v time.Time) *OpsItem { + s.CreatedTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *OpsItem) SetDescription(v string) *OpsItem { + s.Description = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *OpsItem) SetLastModifiedBy(v string) *OpsItem { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *OpsItem) SetLastModifiedTime(v time.Time) *OpsItem { + s.LastModifiedTime = &v + return s +} + +// SetNotifications sets the Notifications field's value. +func (s *OpsItem) SetNotifications(v []*OpsItemNotification) *OpsItem { + s.Notifications = v + return s +} + +// SetOperationalData sets the OperationalData field's value. +func (s *OpsItem) SetOperationalData(v map[string]*OpsItemDataValue) *OpsItem { + s.OperationalData = v + return s +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *OpsItem) SetOpsItemId(v string) *OpsItem { + s.OpsItemId = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *OpsItem) SetPriority(v int64) *OpsItem { + s.Priority = &v + return s +} + +// SetRelatedOpsItems sets the RelatedOpsItems field's value. +func (s *OpsItem) SetRelatedOpsItems(v []*RelatedOpsItem) *OpsItem { + s.RelatedOpsItems = v + return s +} + +// SetSource sets the Source field's value. +func (s *OpsItem) SetSource(v string) *OpsItem { + s.Source = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *OpsItem) SetStatus(v string) *OpsItem { + s.Status = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *OpsItem) SetTitle(v string) *OpsItem { + s.Title = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *OpsItem) SetVersion(v string) *OpsItem { + s.Version = &v + return s +} + +// An object that defines the value of the key and its type in the OperationalData +// map. +type OpsItemDataValue struct { + _ struct{} `type:"structure"` + + // The type of key-value pair. Valid types include SearchableString and String. + Type *string `type:"string" enum:"OpsItemDataType"` + + // The value of the OperationalData key. + Value *string `type:"string"` +} + +// String returns the string representation +func (s OpsItemDataValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemDataValue) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *OpsItemDataValue) SetType(v string) *OpsItemDataValue { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *OpsItemDataValue) SetValue(v string) *OpsItemDataValue { + s.Value = &v + return s +} + +// Describes an OpsItem filter. +type OpsItemFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // Key is a required field + Key *string `type:"string" required:"true" enum:"OpsItemFilterKey"` + + // The operator used by the filter call. + // + // Operator is a required field + Operator *string `type:"string" required:"true" enum:"OpsItemFilterOperator"` + + // The filter value. + // + // Values is a required field + Values []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s OpsItemFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OpsItemFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OpsItemFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Operator == nil { + invalidParams.Add(request.NewErrParamRequired("Operator")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *OpsItemFilter) SetKey(v string) *OpsItemFilter { + s.Key = &v + return s +} + +// SetOperator sets the Operator field's value. +func (s *OpsItemFilter) SetOperator(v string) *OpsItemFilter { + s.Operator = &v + return s +} + +// SetValues sets the Values field's value. +func (s *OpsItemFilter) SetValues(v []*string) *OpsItemFilter { + s.Values = v + return s +} + +// A notification about the OpsItem. +type OpsItemNotification struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of an SNS topic where notifications are sent + // when this OpsItem is edited or changed. + Arn *string `type:"string"` +} + +// String returns the string representation +func (s OpsItemNotification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemNotification) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *OpsItemNotification) SetArn(v string) *OpsItemNotification { + s.Arn = &v + return s +} + +// A count of OpsItems. +type OpsItemSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM entity that created the OpsItem. + CreatedBy *string `type:"string"` + + // The date and time the OpsItem was created. + CreatedTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the IAM entity that created the OpsItem. + LastModifiedBy *string `type:"string"` + + // The date and time the OpsItem was last updated. + LastModifiedTime *time.Time `type:"timestamp"` + + // Operational data is custom data that provides useful reference details about + // the OpsItem. + OperationalData map[string]*OpsItemDataValue `type:"map"` + + // The ID of the OpsItem. + OpsItemId *string `type:"string"` + + // The importance of this OpsItem in relation to other OpsItems in the system. + Priority *int64 `min:"1" type:"integer"` + + // The impacted AWS resource. + Source *string `min:"1" type:"string"` + + // The OpsItem status. Status can be Open, In Progress, or Resolved. + Status *string `type:"string" enum:"OpsItemStatus"` + + // A short heading that describes the nature of the OpsItem and the impacted + // resource. + Title *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s OpsItemSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemSummary) GoString() string { + return s.String() +} + +// SetCreatedBy sets the CreatedBy field's value. +func (s *OpsItemSummary) SetCreatedBy(v string) *OpsItemSummary { + s.CreatedBy = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *OpsItemSummary) SetCreatedTime(v time.Time) *OpsItemSummary { + s.CreatedTime = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *OpsItemSummary) SetLastModifiedBy(v string) *OpsItemSummary { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *OpsItemSummary) SetLastModifiedTime(v time.Time) *OpsItemSummary { + s.LastModifiedTime = &v + return s +} + +// SetOperationalData sets the OperationalData field's value. +func (s *OpsItemSummary) SetOperationalData(v map[string]*OpsItemDataValue) *OpsItemSummary { + s.OperationalData = v + return s +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *OpsItemSummary) SetOpsItemId(v string) *OpsItemSummary { + s.OpsItemId = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *OpsItemSummary) SetPriority(v int64) *OpsItemSummary { + s.Priority = &v + return s +} + +// SetSource sets the Source field's value. +func (s *OpsItemSummary) SetSource(v string) *OpsItemSummary { + s.Source = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *OpsItemSummary) SetStatus(v string) *OpsItemSummary { + s.Status = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *OpsItemSummary) SetTitle(v string) *OpsItemSummary { + s.Title = &v + return s +} + // Information about the source where the association execution details are // stored. type OutputSource struct { @@ -28379,6 +30056,9 @@ type ParameterHistory struct { Name *string `min:"1" type:"string"` // Information about the policies assigned to a parameter. + // + // Working with Parameter Policies (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html) + // in the AWS Systems Manager User Guide. Policies []*ParameterInlinePolicy `type:"list"` // The parameter tier. @@ -28522,7 +30202,7 @@ func (s *ParameterInlinePolicy) SetPolicyType(v string) *ParameterInlinePolicy { return s } -// Metada includes information like the ARN of the last user and the date/time +// Metadata includes information like the ARN of the last user and the date/time // the parameter was last used. type ParameterMetadata struct { _ struct{} `type:"structure"` @@ -29722,7 +31402,7 @@ func (s PutComplianceItemsOutput) GoString() string { type PutInventoryInput struct { _ struct{} `type:"structure"` - // One or more instance IDs where you want to add or update inventory items. + // An instance ID where you want to add or update inventory items. // // InstanceId is a required field InstanceId *string `type:"string" required:"true"` @@ -29911,29 +31591,66 @@ type PutParameterInput struct { // action. Tags []*Tag `type:"list"` + // The parameter tier to assign to a parameter. + // // Parameter Store offers a standard tier and an advanced tier for parameters. - // Standard parameters have a value limit of 4 KB and can't be configured to - // use parameter policies. You can create a maximum of 10,000 standard parameters - // per account and per Region. Standard parameters are offered at no additional - // cost. - // - // Advanced parameters have a value limit of 8 KB and can be configured to use - // parameter policies. You can create a maximum of 100,000 advanced parameters - // per account and per Region. Advanced parameters incur a charge. - // - // If you don't specify a parameter tier when you create a new parameter, the - // parameter defaults to using the standard tier. You can change a standard - // parameter to an advanced parameter at any time. But you can't revert an advanced - // parameter to a standard parameter. Reverting an advanced parameter to a standard - // parameter would result in data loss because the system would truncate the - // size of the parameter from 8 KB to 4 KB. Reverting would also remove any - // policies attached to the parameter. Lastly, advanced parameters use a different - // form of encryption than standard parameters. + // Standard parameters have a content size limit of 4 KB and can't be configured + // to use parameter policies. You can create a maximum of 10,000 standard parameters + // for each Region in an AWS account. Standard parameters are offered at no + // additional cost. + // + // Advanced parameters have a content size limit of 8 KB and can be configured + // to use parameter policies. You can create a maximum of 100,000 advanced parameters + // for each Region in an AWS account. Advanced parameters incur a charge. For + // more information, see About Advanced Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html) + // in the AWS Systems Manager User Guide. + // + // You can change a standard parameter to an advanced parameter any time. But + // you can't revert an advanced parameter to a standard parameter. Reverting + // an advanced parameter to a standard parameter would result in data loss because + // the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting + // would also remove any policies attached to the parameter. Lastly, advanced + // parameters use a different form of encryption than standard parameters. // // If you no longer need an advanced parameter, or if you no longer want to // incur charges for an advanced parameter, you must delete it and recreate - // it as a new standard parameter. For more information, see About Advanced - // Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html) + // it as a new standard parameter. + // + // Using the Default Tier Configuration + // + // In PutParameter requests, you can specify the tier to create the parameter + // in. Whenever you specify a tier in the request, Parameter Store creates or + // updates the parameter according to that request. However, if you do not specify + // a tier in a request, Parameter Store assigns the tier based on the current + // Parameter Store default tier configuration. + // + // The default tier when you begin using Parameter Store is the standard-parameter + // tier. If you use the advanced-parameter tier, you can specify one of the + // following as the default: + // + // * Advanced: With this option, Parameter Store evaluates all requests as + // advanced parameters. + // + // * Intelligent-Tiering: With this option, Parameter Store evaluates each + // request to determine if the parameter is standard or advanced. If the + // request doesn't include any options that require an advanced parameter, + // the parameter is created in the standard-parameter tier. If one or more + // options requiring an advanced parameter are included in the request, Parameter + // Store create a parameter in the advanced-parameter tier. This approach + // helps control your parameter-related costs by always creating standard + // parameters unless an advanced parameter is necessary. + // + // Options that require an advanced parameter include the following: + // + // * The content size of the parameter is more than 4 KB. + // + // * The parameter uses a parameter policy. + // + // * More than 10,000 parameters already exist in your AWS account in the + // current Region. + // + // For more information about configuring the default tier option, see Specifying + // a Default Parameter Tier (http://docs.aws.amazon.com/systems-manager/latest/userguide/ps-default-tier.html) // in the AWS Systems Manager User Guide. Tier *string `type:"string" enum:"ParameterTier"` @@ -30067,6 +31784,9 @@ func (s *PutParameterInput) SetValue(v string) *PutParameterInput { type PutParameterOutput struct { _ struct{} `type:"structure"` + // The tier assigned to the parameter. + Tier *string `type:"string" enum:"ParameterTier"` + // The new version number of a parameter. If you edit a parameter value, Parameter // Store automatically creates a new version and assigns this new version a // unique ID. You can reference a parameter version ID in API actions or in @@ -30086,6 +31806,12 @@ func (s PutParameterOutput) GoString() string { return s.String() } +// SetTier sets the Tier field's value. +func (s *PutParameterOutput) SetTier(v string) *PutParameterOutput { + s.Tier = &v + return s +} + // SetVersion sets the Version field's value. func (s *PutParameterOutput) SetVersion(v int64) *PutParameterOutput { s.Version = &v @@ -30259,30 +31985,54 @@ type RegisterTargetWithMaintenanceWindowInput struct { Name *string `min:"3" type:"string"` // User-provided value that will be included in any CloudWatch events raised - // while running tasks for these targets in this Maintenance Window. + // while running tasks for these targets in this maintenance window. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` - // The type of target being registered with the Maintenance Window. + // The type of target being registered with the maintenance window. // // ResourceType is a required field ResourceType *string `type:"string" required:"true" enum:"MaintenanceWindowResourceType"` - // The targets (either instances or tags). + // The targets to register with the maintenance window. In other words, the + // instances to run commands on when the maintenance window runs. // - // Specify instances using the following format: + // You can specify targets using instance IDs, resource group names, or tags + // that have been applied to instances. // - // Key=InstanceIds,Values=, + // Example 1: Specify instance IDs + // + // Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3 + // + // Example 2: Use tag key-pairs applied to instances + // + // Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2 + // + // Example 3: Use tag-keys applied to instances + // + // Key=tag-key,Values=my-tag-key-1,my-tag-key-2 // - // Specify tags using either of the following formats: + // Example 4: Use resource group names // - // Key=tag:,Values=, + // Key=resource-groups:Name,Values=resource-group-name // - // Key=tag-key,Values=, + // Example 5: Use filters for resource group types + // + // Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 + // + // For Key=resource-groups:ResourceTypeFilters, specify resource types in the + // following format + // + // Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC + // + // For more information about these examples formats, including the best use + // case for each one, see Examples: Register Targets with a Maintenance Window + // (https://docs.aws.amazon.com/systems-manager/latest/userguide/mw-cli-tutorial-targets-examples.html) + // in the AWS Systems Manager User Guide. // // Targets is a required field Targets []*Target `type:"list" required:"true"` - // The ID of the Maintenance Window the target should be registered with. + // The ID of the maintenance window the target should be registered with. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -30387,7 +32137,7 @@ func (s *RegisterTargetWithMaintenanceWindowInput) SetWindowId(v string) *Regist type RegisterTargetWithMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the target definition in this Maintenance Window. + // The ID of the target definition in this maintenance window. WindowTargetId *string `min:"36" type:"string"` } @@ -30422,7 +32172,7 @@ type RegisterTaskWithMaintenanceWindowInput struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The maximum number of targets this task can be run for in parallel. @@ -30438,35 +32188,34 @@ type RegisterTaskWithMaintenanceWindowInput struct { // An optional name for the task. Name *string `min:"3" type:"string"` - // The priority of the task in the Maintenance Window, the lower the number - // the higher the priority. Tasks in a Maintenance Window are scheduled in priority + // The priority of the task in the maintenance window, the lower the number + // the higher the priority. Tasks in a maintenance window are scheduled in priority // order with tasks that have the same priority scheduled in parallel. Priority *int64 `type:"integer"` - // The role to assume when running the Maintenance Window task. + // The ARN of the IAM service role for Systems Manager to assume when running + // a maintenance window task. If you do not specify a service role ARN, Systems + // Manager uses your account's service-linked role. If no service-linked role + // for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. // - // If you do not specify a service role ARN, Systems Manager will use your account's - // service-linked role for Systems Manager by default. If no service-linked - // role for Systems Manager exists in your account, it will be created when - // you run RegisterTaskWithMaintenanceWindow without specifying a service role - // ARN. + // For more information, see the following topics in the in the AWS Systems + // Manager User Guide: // - // For more information, see Service-Linked Role Permissions for Systems Manager - // (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) - // and Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance - // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) - // in the AWS Systems Manager User Guide. + // * Service-Linked Role Permissions for Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) + // + // * Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance + // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) ServiceRoleArn *string `type:"string"` - // The targets (either instances or Maintenance Window targets). + // The targets (either instances or maintenance window targets). // // Specify instances using the following format: // // Key=InstanceIds,Values=, // - // Specify Maintenance Window targets using the following format: + // Specify maintenance window targets using the following format: // - // Key=,Values=, + // Key=WindowTargetIds;,Values=, // // Targets is a required field Targets []*Target `type:"list" required:"true"` @@ -30485,7 +32234,7 @@ type RegisterTaskWithMaintenanceWindowInput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` // The type of task being registered. @@ -30493,7 +32242,7 @@ type RegisterTaskWithMaintenanceWindowInput struct { // TaskType is a required field TaskType *string `type:"string" required:"true" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window the task should be added to. + // The ID of the maintenance window the task should be added to. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -30665,7 +32414,7 @@ func (s *RegisterTaskWithMaintenanceWindowInput) SetWindowId(v string) *Register type RegisterTaskWithMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the task in the Maintenance Window. + // The ID of the task in the maintenance window. WindowTaskId *string `min:"36" type:"string"` } @@ -30685,11 +32434,51 @@ func (s *RegisterTaskWithMaintenanceWindowOutput) SetWindowTaskId(v string) *Reg return s } +// An OpsItems that shares something in common with the current OpsItem. For +// example, related OpsItems can include OpsItems with similar error messages, +// impacted resources, or statuses for the impacted resource. +type RelatedOpsItem struct { + _ struct{} `type:"structure"` + + // The ID of an OpsItem related to the current OpsItem. + // + // OpsItemId is a required field + OpsItemId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RelatedOpsItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RelatedOpsItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RelatedOpsItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RelatedOpsItem"} + if s.OpsItemId == nil { + invalidParams.Add(request.NewErrParamRequired("OpsItemId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *RelatedOpsItem) SetOpsItemId(v string) *RelatedOpsItem { + s.OpsItemId = &v + return s +} + type RemoveTagsFromResourceInput struct { _ struct{} `type:"structure"` - // The resource ID for which you want to remove tags. Use the ID of the resource. - // Here are some examples: + // The ID of the resource from which you want to remove tags. For example: // // ManagedInstance: mi-012345abcde // @@ -30700,17 +32489,17 @@ type RemoveTagsFromResourceInput struct { // For the Document and Parameter values, use the name of the resource. // // The ManagedInstance type for this API action is only for on-premises managed - // instances. You must specify the the name of the managed instance in the following - // format: mi-ID_number. For example, mi-1a2b3c4d5e6f. + // instances. Specify the name of the managed instance in the following format: + // mi-ID_number. For example, mi-1a2b3c4d5e6f. // // ResourceId is a required field ResourceId *string `type:"string" required:"true"` - // The type of resource of which you want to remove a tag. + // The type of resource from which you want to remove a tag. // // The ManagedInstance type for this API action is only for on-premises managed - // instances. You must specify the the name of the managed instance in the following - // format: mi-ID_number. For example, mi-1a2b3c4d5e6f. + // instances. Specify the name of the managed instance in the following format: + // mi-ID_number. For example, mi-1a2b3c4d5e6f. // // ResourceType is a required field ResourceType *string `type:"string" required:"true" enum:"ResourceTypeForTagging"` @@ -31055,7 +32844,7 @@ type ResourceDataSyncS3Destination struct { _ struct{} `type:"structure"` // The ARN of an encryption key for a destination in Amazon S3. Must belong - // to the same region as the destination Amazon S3 bucket. + // to the same Region as the destination Amazon S3 bucket. AWSKMSKeyARN *string `min:"1" type:"string"` // The name of the Amazon S3 bucket where the aggregated data is stored. @@ -31239,7 +33028,7 @@ type ResumeSessionOutput struct { SessionId *string `min:"1" type:"string"` // A URL back to SSM Agent on the instance that the Session Manager client uses - // to send commands and receive output from the instance. Format: wss://ssm-messages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output). + // to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output). // // region represents the Region identifier for an AWS Region supported by AWS // Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list @@ -31369,18 +33158,18 @@ func (s *S3OutputUrl) SetOutputUrl(v string) *S3OutputUrl { return s } -// Information about a scheduled execution for a Maintenance Window. +// Information about a scheduled execution for a maintenance window. type ScheduledWindowExecution struct { _ struct{} `type:"structure"` - // The time, in ISO-8601 Extended format, that the Maintenance Window is scheduled + // The time, in ISO-8601 Extended format, that the maintenance window is scheduled // to be run. ExecutionTime *string `type:"string"` - // The name of the Maintenance Window to be run. + // The name of the maintenance window to be run. Name *string `min:"3" type:"string"` - // The ID of the Maintenance Window to be run. + // The ID of the maintenance window to be run. WindowId *string `min:"20" type:"string"` } @@ -31423,10 +33212,24 @@ type SendAutomationSignalInput struct { // The data sent with the signal. The data schema depends on the type of signal // used in the request. + // + // For Approve and Reject signal types, the payload is an optional comment that + // you can send with the signal type. For example: + // + // Comment="Looks good" + // + // For StartStep and Resume signal types, you must send the name of the Automation + // step to start or resume as the payload. For example: + // + // StepName="step1" + // + // For the StopStep signal type, you must send the step execution ID as the + // payload. For example: + // + // StepExecutionId="97fff367-fc5a-4299-aed8-0123456789ab" Payload map[string][]*string `min:"1" type:"map"` - // The type of signal. Valid signal types include the following: Approve and - // Reject + // The type of signal to send to an Automation execution. // // SignalType is a required field SignalType *string `type:"string" required:"true" enum:"SignalType"` @@ -31576,7 +33379,8 @@ type SendCommandInput struct { // The required and optional parameters specified in the document being run. Parameters map[string][]*string `type:"map"` - // The IAM role that Systems Manager uses to send notifications. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for Run Command commands. ServiceRoleArn *string `type:"string"` // (Optional) An array of search criteria that targets instances using a Key,Value @@ -31986,19 +33790,8 @@ type SessionFilter struct { // by that user. // // * Status: Specify a valid session status to see a list of all sessions - // with that status. Status values you can specify include: - // - // Connected - // - // Connecting - // - // Disconnected - // - // Terminated - // - // Terminating - // - // Failed + // with that status. Status values you can specify include: Connected Connecting + // Disconnected Terminated Terminating Failed // // Value is a required field Value *string `locationName:"value" min:"1" type:"string" required:"true"` @@ -32496,7 +34289,7 @@ type StartSessionOutput struct { SessionId *string `min:"1" type:"string"` // A URL back to SSM Agent on the instance that the Session Manager client uses - // to send commands and receive output from the instance. Format: wss://ssm-messages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output) + // to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output) // // region represents the Region identifier for an AWS Region supported by AWS // Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list @@ -32891,8 +34684,8 @@ func (s StopAutomationExecutionOutput) GoString() string { // Metadata that you assign to your AWS resources. Tags enable you to categorize // your resources in different ways, for example, by purpose, owner, or environment. -// In Systems Manager, you can apply tags to documents, managed instances, Maintenance -// Windows, Parameter Store parameters, and patch baselines. +// In Systems Manager, you can apply tags to documents, managed instances, maintenance +// windows, Parameter Store parameters, and patch baselines. type Tag struct { _ struct{} `type:"structure"` @@ -32952,24 +34745,54 @@ func (s *Tag) SetValue(v string) *Tag { } // An array of search criteria that targets instances using a Key,Value combination -// that you specify. Targets is required if you don't provide one or more instance -// IDs in the call. +// that you specify. +// +// Supported formats include the following. +// +// * Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3 +// +// * Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2 +// +// * Key=tag-key,Values=my-tag-key-1,my-tag-key-2 +// +// * (Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name +// +// * (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 +// +// For example: +// +// * Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE +// +// * Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3 +// +// * Key=tag-key,Values=Name,Instance-Type,CostCenter +// +// * (Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup +// This example demonstrates how to target all resources in the resource +// group ProductionResourceGroup in your maintenance window. +// +// * (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC +// This example demonstrates how to target only Amazon EC2 instances and +// VPCs in your maintenance window. +// +// * (State Manager association targets only) Key=InstanceIds,Values=* This +// example demonstrates how to target all managed instances in the AWS Region +// where the association was created. +// +// For information about how to send commands that target instances using Key,Value +// parameters, see Using Targets and Rate Controls to Send Commands to a Fleet +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting) +// in the AWS Systems Manager User Guide. type Target struct { _ struct{} `type:"structure"` // User-defined criteria for sending commands that target instances that meet - // the criteria. Key can be tag: or InstanceIds. For more information - // about how to send commands that target instances using Key,Value parameters, - // see Using Targets and Rate Controls to Send Commands to a Fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting) - // in the AWS Systems Manager User Guide. + // the criteria. Key *string `min:"1" type:"string"` // User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, // you could specify value:WebServer to run a command on instances that include - // Amazon EC2 tags of ServerRole,WebServer. For more information about how to - // send commands that target instances using Key,Value parameters, see Using - // Targets and Rate Controls to Send Commands to a Fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) - // in the AWS Systems Manager User Guide. + // Amazon EC2 tags of ServerRole,WebServer. Values []*string `type:"list"` } @@ -33022,11 +34845,11 @@ type TargetLocation struct { // The AWS Regions targeted by the current Automation execution. Regions []*string `min:"1" type:"list"` - // The maxium number of AWS accounts and AWS regions allowed to run the Automation + // The maximum number of AWS accounts and AWS regions allowed to run the Automation // concurrently TargetLocationMaxConcurrency *string `min:"1" type:"string"` - // The maxium number of errors allowed before the system stops queueing additional + // The maximum number of errors allowed before the system stops queueing additional // Automation executions for the currently running Automation. TargetLocationMaxErrors *string `min:"1" type:"string"` } @@ -33583,7 +35406,7 @@ type UpdateDocumentInput struct { // supports JSON and YAML documents. JSON is the default format. DocumentFormat *string `type:"string" enum:"DocumentFormat"` - // The version of the document that you want to update. + // (Required) The version of the document that you want to update. DocumentVersion *string `type:"string"` // The name of the document that you want to update. @@ -33707,29 +35530,29 @@ func (s *UpdateDocumentOutput) SetDocumentDescription(v *DocumentDescription) *U type UpdateMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // Whether targets must be registered with the Maintenance Window before tasks + // Whether targets must be registered with the maintenance window before tasks // can be defined for those targets. AllowUnassociatedTargets *bool `type:"boolean"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. Cutoff *int64 `type:"integer"` // An optional description for the update request. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. Duration *int64 `min:"1" type:"integer"` - // Whether the Maintenance Window is enabled. + // Whether the maintenance window is enabled. Enabled *bool `type:"boolean"` - // The date and time, in ISO-8601 Extended format, for when you want the Maintenance - // Window to become inactive. EndDate allows you to set a date and time in the - // future when the Maintenance Window will no longer run. + // The date and time, in ISO-8601 Extended format, for when you want the maintenance + // window to become inactive. EndDate allows you to set a date and time in the + // future when the maintenance window will no longer run. EndDate *string `type:"string"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` // If True, then all fields that are required by the CreateMaintenanceWindow @@ -33737,22 +35560,22 @@ type UpdateMaintenanceWindowInput struct { // specified are set to null. Replace *bool `type:"boolean"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. ScheduleTimezone *string `type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. StartDate *string `type:"string"` - // The ID of the Maintenance Window to update. + // The ID of the maintenance window to update. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -33871,46 +35694,46 @@ func (s *UpdateMaintenanceWindowInput) SetWindowId(v string) *UpdateMaintenanceW type UpdateMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // Whether targets must be registered with the Maintenance Window before tasks + // Whether targets must be registered with the maintenance window before tasks // can be defined for those targets. AllowUnassociatedTargets *bool `type:"boolean"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. Cutoff *int64 `type:"integer"` // An optional description of the update. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. Duration *int64 `min:"1" type:"integer"` - // Whether the Maintenance Window is enabled. + // Whether the maintenance window is enabled. Enabled *bool `type:"boolean"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become inactive. The Maintenance Window will not run + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become inactive. The maintenance window will not run // after this specified time. EndDate *string `type:"string"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. ScheduleTimezone *string `type:"string"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become active. The Maintenance Window will not run + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become active. The maintenance window will not run // before this specified time. StartDate *string `type:"string"` - // The ID of the created Maintenance Window. + // The ID of the created maintenance window. WindowId *string `min:"20" type:"string"` } @@ -34000,7 +35823,7 @@ type UpdateMaintenanceWindowTargetInput struct { Name *string `min:"3" type:"string"` // User-provided value that will be included in any CloudWatch events raised - // while running tasks for these targets in this Maintenance Window. + // while running tasks for these targets in this maintenance window. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` // If True, then all fields that are required by the RegisterTargetWithMaintenanceWindow @@ -34011,7 +35834,7 @@ type UpdateMaintenanceWindowTargetInput struct { // The targets to add or replace. Targets []*Target `type:"list"` - // The Maintenance Window ID with which to modify the target. + // The maintenance window ID with which to modify the target. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -34130,7 +35953,7 @@ type UpdateMaintenanceWindowTargetOutput struct { // The updated targets. Targets []*Target `type:"list"` - // The Maintenance Window ID specified in the update request. + // The maintenance window ID specified in the update request. WindowId *string `min:"20" type:"string"` // The target ID specified in the update request. @@ -34194,7 +36017,7 @@ type UpdateMaintenanceWindowTaskInput struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The new MaxConcurrency value you want to specify. MaxConcurrency is the number @@ -34217,20 +36040,18 @@ type UpdateMaintenanceWindowTaskInput struct { // specified are set to null. Replace *bool `type:"boolean"` - // The IAM service role ARN to modify. The system assumes this role during task - // execution. + // The ARN of the IAM service role for Systems Manager to assume when running + // a maintenance window task. If you do not specify a service role ARN, Systems + // Manager uses your account's service-linked role. If no service-linked role + // for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. // - // If you do not specify a service role ARN, Systems Manager will use your account's - // service-linked role for Systems Manager by default. If no service-linked - // role for Systems Manager exists in your account, it will be created when - // you run RegisterTaskWithMaintenanceWindow without specifying a service role - // ARN. + // For more information, see the following topics in the in the AWS Systems + // Manager User Guide: // - // For more information, see Service-Linked Role Permissions for Systems Manager - // (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) - // and Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance - // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) - // in the AWS Systems Manager User Guide. + // * Service-Linked Role Permissions for Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) + // + // * Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance + // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) ServiceRoleArn *string `type:"string"` // The targets (either instances or tags) to modify. Instances are specified @@ -34250,7 +36071,7 @@ type UpdateMaintenanceWindowTaskInput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // The map has the following format: // @@ -34259,7 +36080,7 @@ type UpdateMaintenanceWindowTaskInput struct { // Value: an array of strings, each string is between 1 and 255 characters TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` - // The Maintenance Window ID that contains the task to modify. + // The maintenance window ID that contains the task to modify. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -34432,7 +36253,7 @@ type UpdateMaintenanceWindowTaskOutput struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The updated MaxConcurrency value. @@ -34447,7 +36268,8 @@ type UpdateMaintenanceWindowTaskOutput struct { // The updated priority value. Priority *int64 `type:"integer"` - // The updated service role ARN value. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. ServiceRoleArn *string `type:"string"` // The updated target values. @@ -34464,13 +36286,13 @@ type UpdateMaintenanceWindowTaskOutput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` - // The ID of the Maintenance Window that was updated. + // The ID of the maintenance window that was updated. WindowId *string `min:"20" type:"string"` - // The task ID of the Maintenance Window that was updated. + // The task ID of the maintenance window that was updated. WindowTaskId *string `min:"36" type:"string"` } @@ -34628,6 +36450,179 @@ func (s UpdateManagedInstanceRoleOutput) GoString() string { return s.String() } +type UpdateOpsItemInput struct { + _ struct{} `type:"structure"` + + // Update the information about the OpsItem. Provide enough information so that + // users reading this OpsItem for the first time understand the issue. + Description *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of an SNS topic where notifications are sent + // when this OpsItem is edited or changed. + Notifications []*OpsItemNotification `type:"list"` + + // Add new keys or edit existing key-value pairs of the OperationalData map + // in the OpsItem object. + // + // Operational data is custom data that provides useful reference details about + // the OpsItem. For example, you can specify log files, error strings, license + // keys, troubleshooting tips, or other relevant data. You enter operational + // data as key-value pairs. The key has a maximum length of 128 characters. + // The value has a maximum size of 20 KB. + // + // Operational data keys can't begin with the following: amazon, aws, amzn, + // ssm, /amazon, /aws, /amzn, /ssm. + // + // You can choose to make the data searchable by other users in the account + // or you can restrict search access. Searchable data means that all users with + // access to the OpsItem Overview page (as provided by the DescribeOpsItems + // API action) can view and search on the specified data. Operational data that + // is not searchable is only viewable by users who have access to the OpsItem + // (as provided by the GetOpsItem API action). + // + // Use the /aws/resources key in OperationalData to specify a related resource + // in the request. Use the /aws/automations key in OperationalData to associate + // an Automation runbook with the OpsItem. To view AWS CLI example commands + // that use these keys, see Creating OpsItems Manually (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) + // in the AWS Systems Manager User Guide. + OperationalData map[string]*OpsItemDataValue `type:"map"` + + // Keys that you want to remove from the OperationalData map. + OperationalDataToDelete []*string `type:"list"` + + // The ID of the OpsItem. + // + // OpsItemId is a required field + OpsItemId *string `type:"string" required:"true"` + + // The importance of this OpsItem in relation to other OpsItems in the system. + Priority *int64 `min:"1" type:"integer"` + + // One or more OpsItems that share something in common with the current OpsItems. + // For example, related OpsItems can include OpsItems with similar error messages, + // impacted resources, or statuses for the impacted resource. + RelatedOpsItems []*RelatedOpsItem `type:"list"` + + // The OpsItem status. Status can be Open, In Progress, or Resolved. For more + // information, see Editing OpsItem Details (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems-editing-details.html) + // in the AWS Systems Manager User Guide. + Status *string `type:"string" enum:"OpsItemStatus"` + + // A short heading that describes the nature of the OpsItem and the impacted + // resource. + Title *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateOpsItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpsItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateOpsItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateOpsItemInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.OpsItemId == nil { + invalidParams.Add(request.NewErrParamRequired("OpsItemId")) + } + if s.Priority != nil && *s.Priority < 1 { + invalidParams.Add(request.NewErrParamMinValue("Priority", 1)) + } + if s.Title != nil && len(*s.Title) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Title", 1)) + } + if s.RelatedOpsItems != nil { + for i, v := range s.RelatedOpsItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RelatedOpsItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateOpsItemInput) SetDescription(v string) *UpdateOpsItemInput { + s.Description = &v + return s +} + +// SetNotifications sets the Notifications field's value. +func (s *UpdateOpsItemInput) SetNotifications(v []*OpsItemNotification) *UpdateOpsItemInput { + s.Notifications = v + return s +} + +// SetOperationalData sets the OperationalData field's value. +func (s *UpdateOpsItemInput) SetOperationalData(v map[string]*OpsItemDataValue) *UpdateOpsItemInput { + s.OperationalData = v + return s +} + +// SetOperationalDataToDelete sets the OperationalDataToDelete field's value. +func (s *UpdateOpsItemInput) SetOperationalDataToDelete(v []*string) *UpdateOpsItemInput { + s.OperationalDataToDelete = v + return s +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *UpdateOpsItemInput) SetOpsItemId(v string) *UpdateOpsItemInput { + s.OpsItemId = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *UpdateOpsItemInput) SetPriority(v int64) *UpdateOpsItemInput { + s.Priority = &v + return s +} + +// SetRelatedOpsItems sets the RelatedOpsItems field's value. +func (s *UpdateOpsItemInput) SetRelatedOpsItems(v []*RelatedOpsItem) *UpdateOpsItemInput { + s.RelatedOpsItems = v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateOpsItemInput) SetStatus(v string) *UpdateOpsItemInput { + s.Status = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *UpdateOpsItemInput) SetTitle(v string) *UpdateOpsItemInput { + s.Title = &v + return s +} + +type UpdateOpsItemOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateOpsItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpsItemOutput) GoString() string { + return s.String() +} + type UpdatePatchBaselineInput struct { _ struct{} `type:"structure"` @@ -35133,6 +37128,9 @@ const ( const ( // AttachmentsSourceKeySourceUrl is a AttachmentsSourceKey enum value AttachmentsSourceKeySourceUrl = "SourceUrl" + + // AttachmentsSourceKeyS3fileUrl is a AttachmentsSourceKey enum value + AttachmentsSourceKeyS3fileUrl = "S3FileUrl" ) const ( @@ -35566,6 +37564,9 @@ const ( const ( // MaintenanceWindowResourceTypeInstance is a MaintenanceWindowResourceType enum value MaintenanceWindowResourceTypeInstance = "INSTANCE" + + // MaintenanceWindowResourceTypeResourceGroup is a MaintenanceWindowResourceType enum value + MaintenanceWindowResourceTypeResourceGroup = "RESOURCE_GROUP" ) const ( @@ -35633,12 +37634,109 @@ const ( OperatingSystemCentos = "CENTOS" ) +const ( + // OpsFilterOperatorTypeEqual is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeEqual = "Equal" + + // OpsFilterOperatorTypeNotEqual is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeNotEqual = "NotEqual" + + // OpsFilterOperatorTypeBeginWith is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeBeginWith = "BeginWith" + + // OpsFilterOperatorTypeLessThan is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeLessThan = "LessThan" + + // OpsFilterOperatorTypeGreaterThan is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeGreaterThan = "GreaterThan" + + // OpsFilterOperatorTypeExists is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeExists = "Exists" +) + +const ( + // OpsItemDataTypeSearchableString is a OpsItemDataType enum value + OpsItemDataTypeSearchableString = "SearchableString" + + // OpsItemDataTypeString is a OpsItemDataType enum value + OpsItemDataTypeString = "String" +) + +const ( + // OpsItemFilterKeyStatus is a OpsItemFilterKey enum value + OpsItemFilterKeyStatus = "Status" + + // OpsItemFilterKeyCreatedBy is a OpsItemFilterKey enum value + OpsItemFilterKeyCreatedBy = "CreatedBy" + + // OpsItemFilterKeySource is a OpsItemFilterKey enum value + OpsItemFilterKeySource = "Source" + + // OpsItemFilterKeyPriority is a OpsItemFilterKey enum value + OpsItemFilterKeyPriority = "Priority" + + // OpsItemFilterKeyTitle is a OpsItemFilterKey enum value + OpsItemFilterKeyTitle = "Title" + + // OpsItemFilterKeyOpsItemId is a OpsItemFilterKey enum value + OpsItemFilterKeyOpsItemId = "OpsItemId" + + // OpsItemFilterKeyCreatedTime is a OpsItemFilterKey enum value + OpsItemFilterKeyCreatedTime = "CreatedTime" + + // OpsItemFilterKeyLastModifiedTime is a OpsItemFilterKey enum value + OpsItemFilterKeyLastModifiedTime = "LastModifiedTime" + + // OpsItemFilterKeyOperationalData is a OpsItemFilterKey enum value + OpsItemFilterKeyOperationalData = "OperationalData" + + // OpsItemFilterKeyOperationalDataKey is a OpsItemFilterKey enum value + OpsItemFilterKeyOperationalDataKey = "OperationalDataKey" + + // OpsItemFilterKeyOperationalDataValue is a OpsItemFilterKey enum value + OpsItemFilterKeyOperationalDataValue = "OperationalDataValue" + + // OpsItemFilterKeyResourceId is a OpsItemFilterKey enum value + OpsItemFilterKeyResourceId = "ResourceId" + + // OpsItemFilterKeyAutomationId is a OpsItemFilterKey enum value + OpsItemFilterKeyAutomationId = "AutomationId" +) + +const ( + // OpsItemFilterOperatorEqual is a OpsItemFilterOperator enum value + OpsItemFilterOperatorEqual = "Equal" + + // OpsItemFilterOperatorContains is a OpsItemFilterOperator enum value + OpsItemFilterOperatorContains = "Contains" + + // OpsItemFilterOperatorGreaterThan is a OpsItemFilterOperator enum value + OpsItemFilterOperatorGreaterThan = "GreaterThan" + + // OpsItemFilterOperatorLessThan is a OpsItemFilterOperator enum value + OpsItemFilterOperatorLessThan = "LessThan" +) + +const ( + // OpsItemStatusOpen is a OpsItemStatus enum value + OpsItemStatusOpen = "Open" + + // OpsItemStatusInProgress is a OpsItemStatus enum value + OpsItemStatusInProgress = "InProgress" + + // OpsItemStatusResolved is a OpsItemStatus enum value + OpsItemStatusResolved = "Resolved" +) + const ( // ParameterTierStandard is a ParameterTier enum value ParameterTierStandard = "Standard" // ParameterTierAdvanced is a ParameterTier enum value ParameterTierAdvanced = "Advanced" + + // ParameterTierIntelligentTiering is a ParameterTier enum value + ParameterTierIntelligentTiering = "Intelligent-Tiering" ) const ( @@ -35840,6 +37938,9 @@ const ( // ResourceTypeForTaggingPatchBaseline is a ResourceTypeForTagging enum value ResourceTypeForTaggingPatchBaseline = "PatchBaseline" + + // ResourceTypeForTaggingOpsItem is a ResourceTypeForTagging enum value + ResourceTypeForTaggingOpsItem = "OpsItem" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go index 6964adba01b..48d6d3ee3e3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go @@ -15,7 +15,7 @@ // (http://docs.aws.amazon.com/systems-manager/latest/userguide/). // // To get started, verify prerequisites and configure managed instances. For -// more information, see Systems Manager Prerequisites (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html) +// more information, see Setting Up AWS Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html) // in the AWS Systems Manager User Guide. // // For information about other API actions you can perform on Amazon EC2 instances, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go index cf1f3eb0e3b..90c4a777131 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go @@ -105,7 +105,7 @@ const ( // ErrCodeDocumentLimitExceeded for service response error code // "DocumentLimitExceeded". // - // You can have at most 200 active Systems Manager documents. + // You can have at most 500 active Systems Manager documents. ErrCodeDocumentLimitExceeded = "DocumentLimitExceeded" // ErrCodeDocumentPermissionLimit for service response error code @@ -126,8 +126,8 @@ const ( // ErrCodeDoesNotExistException for service response error code // "DoesNotExistException". // - // Error returned when the ID specified for a resource, such as a Maintenance - // Window or Patch baseline, doesn't exist. + // Error returned when the ID specified for a resource, such as a maintenance + // window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -156,7 +156,7 @@ const ( // ErrCodeFeatureNotAvailableException for service response error code // "FeatureNotAvailableException". // - // You attempted to register a LAMBDA or STEP_FUNCTION task in a region where + // You attempted to register a LAMBDA or STEP_FUNCTIONS task in a region where // the corresponding service is not available. ErrCodeFeatureNotAvailableException = "FeatureNotAvailableException" @@ -272,7 +272,7 @@ const ( // ErrCodeInvalidDeletionIdException for service response error code // "InvalidDeletionIdException". // - // The ID specified for the delete operation does not exist or is not valide. + // The ID specified for the delete operation does not exist or is not valid. // Verify the ID and try again. ErrCodeInvalidDeletionIdException = "InvalidDeletionIdException" @@ -340,12 +340,9 @@ const ( // // You do not have permission to access the instance. // - // SSM Agent is not running. On managed instances and Linux instances, verify - // that the SSM Agent is running. On EC2 Windows instances, verify that the - // EC2Config service is running. + // SSM Agent is not running. Verify that SSM Agent is running. // - // SSM Agent or EC2Config service is not registered to the SSM endpoint. Try - // reinstalling SSM Agent or EC2Config service. + // SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. // // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. @@ -513,7 +510,7 @@ const ( // "InvocationDoesNotExist". // // The command ID and instance ID you specified did not match any invocations. - // Verify the command ID adn the instance ID and try again. + // Verify the command ID and the instance ID and try again. ErrCodeInvocationDoesNotExist = "InvocationDoesNotExist" // ErrCodeItemContentMismatchException for service response error code @@ -534,6 +531,32 @@ const ( // The size limit of a document is 64 KB. ErrCodeMaxDocumentSizeExceeded = "MaxDocumentSizeExceeded" + // ErrCodeOpsItemAlreadyExistsException for service response error code + // "OpsItemAlreadyExistsException". + // + // The OpsItem already exists. + ErrCodeOpsItemAlreadyExistsException = "OpsItemAlreadyExistsException" + + // ErrCodeOpsItemInvalidParameterException for service response error code + // "OpsItemInvalidParameterException". + // + // A specified parameter argument isn't valid. Verify the available arguments + // and try again. + ErrCodeOpsItemInvalidParameterException = "OpsItemInvalidParameterException" + + // ErrCodeOpsItemLimitExceededException for service response error code + // "OpsItemLimitExceededException". + // + // The request caused OpsItems to exceed one or more limits. For information + // about OpsItem limits, see What are the resource limits for OpsCenter? (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). + ErrCodeOpsItemLimitExceededException = "OpsItemLimitExceededException" + + // ErrCodeOpsItemNotFoundException for service response error code + // "OpsItemNotFoundException". + // + // The specified OpsItem ID doesn't exist. Verify the ID and try again. + ErrCodeOpsItemNotFoundException = "OpsItemNotFoundException" + // ErrCodeParameterAlreadyExists for service response error code // "ParameterAlreadyExists". // @@ -620,7 +643,7 @@ const ( // "ResourceLimitExceededException". // // Error returned when the caller has exceeded the default resource limits. - // For example, too many Maintenance Windows or Patch baselines have been created. + // For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -683,6 +706,12 @@ const ( // ErrCodeUnsupportedFeatureRequiredException for service response error code // "UnsupportedFeatureRequiredException". + // + // Microsoft application patching is only available on EC2 instances and Advanced + // Instances. To patch Microsoft applications on on-premises servers and VMs, + // you must enable Advanced Instances. For more information, see Using the Advanced-Instances + // Tier (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) + // in the AWS Systems Manager User Guide. ErrCodeUnsupportedFeatureRequiredException = "UnsupportedFeatureRequiredException" // ErrCodeUnsupportedInventoryItemContextException for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go index 9a6b8f71c22..c66bfba90cc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go @@ -46,11 +46,11 @@ const ( // svc := ssm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SSM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSM { svc := &SSM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-06", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go index e8cbefb205f..59410b750da 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go @@ -56,11 +56,11 @@ func (c *StorageGateway) ActivateGatewayRequest(input *ActivateGatewayInput) (re // ActivateGateway API operation for AWS Storage Gateway. // // Activates the gateway you previously deployed on your host. In the activation -// process, you specify information such as the region you want to use for storing -// snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot -// schedule window, an activation key, and a name for your gateway. The activation -// process also associates your gateway with your account; for more information, -// see UpdateGatewayInformation. +// process, you specify information such as the AWS Region that you want to +// use for storing snapshots or tapes, the time zone for scheduled snapshots +// the gateway snapshot schedule window, an activation key, and a name for your +// gateway. The activation process also associates your gateway with your account; +// for more information, see UpdateGatewayInformation. // // You must turn on the gateway VM before you can activate your gateway. // @@ -475,6 +475,96 @@ func (c *StorageGateway) AddWorkingStorageWithContext(ctx aws.Context, input *Ad return out, req.Send() } +const opAssignTapePool = "AssignTapePool" + +// AssignTapePoolRequest generates a "aws/request.Request" representing the +// client's request for the AssignTapePool operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssignTapePool for more information on using the AssignTapePool +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssignTapePoolRequest method. +// req, resp := client.AssignTapePoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AssignTapePool +func (c *StorageGateway) AssignTapePoolRequest(input *AssignTapePoolInput) (req *request.Request, output *AssignTapePoolOutput) { + op := &request.Operation{ + Name: opAssignTapePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignTapePoolInput{} + } + + output = &AssignTapePoolOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssignTapePool API operation for AWS Storage Gateway. +// +// Assigns a tape to a tape pool for archiving. The tape assigned to a pool +// is archived in the S3 storage class that is associated with the pool. When +// you use your backup application to eject the tape, the tape is archived directly +// into the S3 storage class (Glacier or Deep Archive) that corresponds to the +// pool. +// +// Valid values: "GLACIER", "DEEP_ARCHIVE" +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation AssignTapePool for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidGatewayRequestException "InvalidGatewayRequestException" +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * ErrCodeInternalServerError "InternalServerError" +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AssignTapePool +func (c *StorageGateway) AssignTapePool(input *AssignTapePoolInput) (*AssignTapePoolOutput, error) { + req, out := c.AssignTapePoolRequest(input) + return out, req.Send() +} + +// AssignTapePoolWithContext is the same as AssignTapePool with the addition of +// the ability to pass a context and additional request options. +// +// See AssignTapePool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) AssignTapePoolWithContext(ctx aws.Context, input *AssignTapePoolInput, opts ...request.Option) (*AssignTapePoolOutput, error) { + req, out := c.AssignTapePoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAttachVolume = "AttachVolume" // AttachVolumeRequest generates a "aws/request.Request" representing the @@ -886,10 +976,10 @@ func (c *StorageGateway) CreateNFSFileShareRequest(input *CreateNFSFileShareInpu // // File gateway requires AWS Security Token Service (AWS STS) to be activated // to enable you create a file share. Make sure AWS STS is activated in the -// region you are creating your file gateway in. If AWS STS is not activated -// in the region, activate it. For information about how to activate AWS STS, -// see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity -// and Access Management User Guide. +// AWS Region you are creating your file gateway in. If AWS STS is not activated +// in the AWS Region, activate it. For information about how to activate AWS +// STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS +// Identity and Access Management User Guide. // // File gateway does not support creating hard or symbolic links on a file share. // @@ -1831,10 +1921,10 @@ func (c *StorageGateway) DeleteGatewayRequest(input *DeleteGatewayInput) (req *r // // You no longer pay software charges after the gateway is deleted; however, // your existing Amazon EBS snapshots persist and you will continue to be billed -// for these snapshots. You can choose to remove all remaining Amazon EBS snapshots -// by canceling your Amazon EC2 subscription.  If you prefer not to cancel your +// for these snapshots. You can choose to remove all remaining Amazon EBS snapshots +// by canceling your Amazon EC2 subscription. If you prefer not to cancel your // Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 -// console. For more information, see the AWS Storage Gateway Detail Page (http://aws.amazon.com/storagegateway). +// console. For more information, see the AWS Storage Gateway Detail Page (http://aws.amazon.com/storagegateway). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3294,7 +3384,7 @@ func (c *StorageGateway) DescribeTapeArchivesWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeTapeArchives operation. // pageNum := 0 // err := client.DescribeTapeArchivesPages(params, -// func(page *DescribeTapeArchivesOutput, lastPage bool) bool { +// func(page *storagegateway.DescribeTapeArchivesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3326,10 +3416,12 @@ func (c *StorageGateway) DescribeTapeArchivesPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTapeArchivesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTapeArchivesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3440,7 +3532,7 @@ func (c *StorageGateway) DescribeTapeRecoveryPointsWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a DescribeTapeRecoveryPoints operation. // pageNum := 0 // err := client.DescribeTapeRecoveryPointsPages(params, -// func(page *DescribeTapeRecoveryPointsOutput, lastPage bool) bool { +// func(page *storagegateway.DescribeTapeRecoveryPointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3472,10 +3564,12 @@ func (c *StorageGateway) DescribeTapeRecoveryPointsPagesWithContext(ctx aws.Cont }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTapeRecoveryPointsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTapeRecoveryPointsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3583,7 +3677,7 @@ func (c *StorageGateway) DescribeTapesWithContext(ctx aws.Context, input *Descri // // Example iterating over at most 3 pages of a DescribeTapes operation. // pageNum := 0 // err := client.DescribeTapesPages(params, -// func(page *DescribeTapesOutput, lastPage bool) bool { +// func(page *storagegateway.DescribeTapesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3615,10 +3709,12 @@ func (c *StorageGateway) DescribeTapesPagesWithContext(ctx aws.Context, input *D }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeTapesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeTapesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -3814,7 +3910,7 @@ func (c *StorageGateway) DescribeVTLDevicesWithContext(ctx aws.Context, input *D // // Example iterating over at most 3 pages of a DescribeVTLDevices operation. // pageNum := 0 // err := client.DescribeVTLDevicesPages(params, -// func(page *DescribeVTLDevicesOutput, lastPage bool) bool { +// func(page *storagegateway.DescribeVTLDevicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -3846,10 +3942,12 @@ func (c *StorageGateway) DescribeVTLDevicesPagesWithContext(ctx aws.Context, inp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeVTLDevicesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeVTLDevicesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4312,7 +4410,7 @@ func (c *StorageGateway) ListFileSharesWithContext(ctx aws.Context, input *ListF // // Example iterating over at most 3 pages of a ListFileShares operation. // pageNum := 0 // err := client.ListFileSharesPages(params, -// func(page *ListFileSharesOutput, lastPage bool) bool { +// func(page *storagegateway.ListFileSharesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4344,10 +4442,12 @@ func (c *StorageGateway) ListFileSharesPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListFileSharesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListFileSharesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4401,8 +4501,8 @@ func (c *StorageGateway) ListGatewaysRequest(input *ListGatewaysInput) (req *req // ListGateways API operation for AWS Storage Gateway. // -// Lists gateways owned by an AWS account in a region specified in the request. -// The returned list is ordered by gateway Amazon Resource Name (ARN). +// Lists gateways owned by an AWS account in an AWS Region specified in the +// request. The returned list is ordered by gateway Amazon Resource Name (ARN). // // By default, the operation returns a maximum of 100 gateways. This operation // supports pagination that allows you to optionally reduce the number of gateways @@ -4462,7 +4562,7 @@ func (c *StorageGateway) ListGatewaysWithContext(ctx aws.Context, input *ListGat // // Example iterating over at most 3 pages of a ListGateways operation. // pageNum := 0 // err := client.ListGatewaysPages(params, -// func(page *ListGatewaysOutput, lastPage bool) bool { +// func(page *storagegateway.ListGatewaysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4494,10 +4594,12 @@ func (c *StorageGateway) ListGatewaysPagesWithContext(ctx aws.Context, input *Li }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListGatewaysOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListGatewaysOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4696,7 +4798,7 @@ func (c *StorageGateway) ListTagsForResourceWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a ListTagsForResource operation. // pageNum := 0 // err := client.ListTagsForResourcePages(params, -// func(page *ListTagsForResourceOutput, lastPage bool) bool { +// func(page *storagegateway.ListTagsForResourceOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4728,10 +4830,12 @@ func (c *StorageGateway) ListTagsForResourcePagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4846,7 +4950,7 @@ func (c *StorageGateway) ListTapesWithContext(ctx aws.Context, input *ListTapesI // // Example iterating over at most 3 pages of a ListTapes operation. // pageNum := 0 // err := client.ListTapesPages(params, -// func(page *ListTapesOutput, lastPage bool) bool { +// func(page *storagegateway.ListTapesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4878,10 +4982,12 @@ func (c *StorageGateway) ListTapesPagesWithContext(ctx aws.Context, input *ListT }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTapesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTapesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5174,7 +5280,7 @@ func (c *StorageGateway) ListVolumesWithContext(ctx aws.Context, input *ListVolu // // Example iterating over at most 3 pages of a ListVolumes operation. // pageNum := 0 // err := client.ListVolumesPages(params, -// func(page *ListVolumesOutput, lastPage bool) bool { +// func(page *storagegateway.ListVolumesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -5206,10 +5312,12 @@ func (c *StorageGateway) ListVolumesPagesWithContext(ctx aws.Context, input *Lis }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListVolumesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListVolumesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -5258,12 +5366,12 @@ func (c *StorageGateway) NotifyWhenUploadedRequest(input *NotifyWhenUploadedInpu // NotifyWhenUploaded API operation for AWS Storage Gateway. // // Sends you notification through CloudWatch Events when all files written to -// your NFS file share have been uploaded to Amazon S3. +// your file share have been uploaded to Amazon S3. // // AWS Storage Gateway can send a notification through Amazon CloudWatch Events // when all files written to your file share up to that point in time have been -// uploaded to Amazon S3. These files include files written to the NFS file -// share up to the time that you make a request for notification. When the upload +// uploaded to Amazon S3. These files include files written to the file share +// up to the time that you make a request for notification. When the upload // is done, Storage Gateway sends you notification through an Amazon CloudWatch // Event. You can configure CloudWatch Events to send the notification through // event targets such as Amazon SNS or AWS Lambda function. This operation is @@ -6798,6 +6906,95 @@ func (c *StorageGateway) UpdateSMBFileShareWithContext(ctx aws.Context, input *U return out, req.Send() } +const opUpdateSMBSecurityStrategy = "UpdateSMBSecurityStrategy" + +// UpdateSMBSecurityStrategyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSMBSecurityStrategy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSMBSecurityStrategy for more information on using the UpdateSMBSecurityStrategy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSMBSecurityStrategyRequest method. +// req, resp := client.UpdateSMBSecurityStrategyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSMBSecurityStrategy +func (c *StorageGateway) UpdateSMBSecurityStrategyRequest(input *UpdateSMBSecurityStrategyInput) (req *request.Request, output *UpdateSMBSecurityStrategyOutput) { + op := &request.Operation{ + Name: opUpdateSMBSecurityStrategy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSMBSecurityStrategyInput{} + } + + output = &UpdateSMBSecurityStrategyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSMBSecurityStrategy API operation for AWS Storage Gateway. +// +// Updates the SMB security strategy on a file gateway. This action is only +// supported in file gateways. +// +// This API is called Security level in the User Guide. +// +// A higher security level can affect performance of the gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation UpdateSMBSecurityStrategy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidGatewayRequestException "InvalidGatewayRequestException" +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * ErrCodeInternalServerError "InternalServerError" +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSMBSecurityStrategy +func (c *StorageGateway) UpdateSMBSecurityStrategy(input *UpdateSMBSecurityStrategyInput) (*UpdateSMBSecurityStrategyOutput, error) { + req, out := c.UpdateSMBSecurityStrategyRequest(input) + return out, req.Send() +} + +// UpdateSMBSecurityStrategyWithContext is the same as UpdateSMBSecurityStrategy with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSMBSecurityStrategy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) UpdateSMBSecurityStrategyWithContext(ctx aws.Context, input *UpdateSMBSecurityStrategyInput, opts ...request.Option) (*UpdateSMBSecurityStrategyOutput, error) { + req, out := c.UpdateSMBSecurityStrategyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateSnapshotSchedule = "UpdateSnapshotSchedule" // UpdateSnapshotScheduleRequest generates a "aws/request.Request" representing the @@ -7016,11 +7213,12 @@ type ActivateGatewayInput struct { // GatewayName is a required field GatewayName *string `min:"2" type:"string" required:"true"` - // A value that indicates the region where you want to store your data. The - // gateway region specified must be the same region as the region in your Host - // header in the request. For more information about available regions and endpoints - // for AWS Storage Gateway, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region) - // in the Amazon Web Services Glossary. + // A value that indicates the AWS Region where you want to store your data. + // The gateway AWS Region specified must be the same AWS Region as the AWS Region + // in your Host header in the request. For more information about available + // AWS Regions and endpoints for AWS Storage Gateway, see Regions and Endpoints + // (https://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region) in the + // Amazon Web Services Glossary. // // Valid Values: See AWS Storage Gateway Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region) // in the AWS General Reference. @@ -7050,13 +7248,13 @@ type ActivateGatewayInput struct { // Valid Values: "STK-L700", "AWS-Gateway-VTL" MediumChangerType *string `min:"2" type:"string"` - // A list of up to 50 tags that can be assigned to the gateway. Each tag is - // a key-value pair. + // A list of up to 50 tags that you can assign to the gateway. Each tag is a + // key-value pair. // - // Valid characters for key and value are letters, spaces, and numbers representable - // in UTF-8 format, and the following special characters: + - = . _ : / @. The - // maximum length of a tag's key is 128 characters, and the maximum length for - // a tag's value is 256. + // Valid characters for key and value are letters, spaces, and numbers that + // can be represented in UTF-8 format, and the following special characters: + // + - = . _ : / @. The maximum length of a tag's key is 128 characters, and + // the maximum length for a tag's value is 256 characters. Tags []*Tag `type:"list"` // The value that indicates the type of tape drive to use for tape gateway. @@ -7179,8 +7377,8 @@ func (s *ActivateGatewayInput) SetTapeDriveType(v string) *ActivateGatewayInput // AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated // gateway. It is a string made of information such as your account, gateway -// name, and region. This ARN is used to reference the gateway in other API -// operations as well as resource-based authorization. +// name, and AWS Region. This ARN is used to reference the gateway in other +// API operations as well as resource-based authorization. // // For gateways activated prior to September 02, 2015, the gateway ARN contains // the gateway name rather than the gateway ID. Changing the name of the gateway @@ -7189,7 +7387,7 @@ type ActivateGatewayOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -7220,7 +7418,7 @@ type AddCacheInput struct { DiskIds []*string `type:"list" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -7271,7 +7469,7 @@ type AddCacheOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -7398,7 +7596,7 @@ type AddUploadBufferInput struct { DiskIds []*string `type:"list" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -7449,7 +7647,7 @@ type AddUploadBufferOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -7483,7 +7681,7 @@ type AddWorkingStorageInput struct { DiskIds []*string `type:"list" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -7536,7 +7734,7 @@ type AddWorkingStorageOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -7556,6 +7754,95 @@ func (s *AddWorkingStorageOutput) SetGatewayARN(v string) *AddWorkingStorageOutp return s } +type AssignTapePoolInput struct { + _ struct{} `type:"structure"` + + // The ID of the pool that you want to add your tape to for archiving. The tape + // in this pool is archived in the S3 storage class that is associated with + // the pool. When you use your backup application to eject the tape, the tape + // is archived directly into the storage class (Glacier or Deep Archive) that + // corresponds to the pool. + // + // Valid values: "GLACIER", "DEEP_ARCHIVE" + // + // PoolId is a required field + PoolId *string `min:"1" type:"string" required:"true"` + + // The unique Amazon Resource Name (ARN) of the virtual tape that you want to + // add to the tape pool. + // + // TapeARN is a required field + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssignTapePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignTapePoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssignTapePoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssignTapePoolInput"} + if s.PoolId == nil { + invalidParams.Add(request.NewErrParamRequired("PoolId")) + } + if s.PoolId != nil && len(*s.PoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PoolId", 1)) + } + if s.TapeARN == nil { + invalidParams.Add(request.NewErrParamRequired("TapeARN")) + } + if s.TapeARN != nil && len(*s.TapeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPoolId sets the PoolId field's value. +func (s *AssignTapePoolInput) SetPoolId(v string) *AssignTapePoolInput { + s.PoolId = &v + return s +} + +// SetTapeARN sets the TapeARN field's value. +func (s *AssignTapePoolInput) SetTapeARN(v string) *AssignTapePoolInput { + s.TapeARN = &v + return s +} + +type AssignTapePoolOutput struct { + _ struct{} `type:"structure"` + + // The unique Amazon Resource Names (ARN) of the virtual tape that was added + // to the tape pool. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AssignTapePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignTapePoolOutput) GoString() string { + return s.String() +} + +// SetTapeARN sets the TapeARN field's value. +func (s *AssignTapePoolOutput) SetTapeARN(v string) *AssignTapePoolOutput { + s.TapeARN = &v + return s +} + // AttachVolumeInput type AttachVolumeInput struct { _ struct{} `type:"structure"` @@ -7859,7 +8146,7 @@ type CancelArchivalInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -7945,7 +8232,7 @@ type CancelRetrievalInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -8036,11 +8323,11 @@ type ChapInfo struct { // The secret key that the initiator (for example, the Windows client) must // provide to participate in mutual CHAP with the target. - SecretToAuthenticateInitiator *string `min:"1" type:"string"` + SecretToAuthenticateInitiator *string `min:"1" type:"string" sensitive:"true"` // The secret key that the target must provide to participate in mutual CHAP // with the initiator (e.g. Windows client). - SecretToAuthenticateTarget *string `min:"1" type:"string"` + SecretToAuthenticateTarget *string `min:"1" type:"string" sensitive:"true"` // The Amazon Resource Name (ARN) of the volume. // @@ -8093,7 +8380,7 @@ type CreateCachediSCSIVolumeInput struct { ClientToken *string `min:"5" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -8128,13 +8415,13 @@ type CreateCachediSCSIVolumeInput struct { // than the size of the existing volume, in bytes. SourceVolumeARN *string `min:"50" type:"string"` - // A list of up to 50 tags that can be assigned to a cached volume. Each tag + // A list of up to 50 tags that you can assign to a cached volume. Each tag // is a key-value pair. // - // Valid characters for key and value are letters, spaces, and numbers representable - // in UTF-8 format, and the following special characters: + - = . _ : / @. The - // maximum length of a tag's key is 128 characters, and the maximum length for - // a tag's value is 256. + // Valid characters for key and value are letters, spaces, and numbers that + // you can represent in UTF-8 format, and the following special characters: + // + - = . _ : / @. The maximum length of a tag's key is 128 characters, and + // the maximum length for a tag's value is 256 characters. Tags []*Tag `type:"list"` // The name of the iSCSI target used by an initiator to connect to a volume @@ -8585,9 +8872,12 @@ func (s *CreateNFSFileShareOutput) SetFileShareARN(v string) *CreateNFSFileShare type CreateSMBFileShareInput struct { _ struct{} `type:"structure"` - // A list of users or groups in the Active Directory that have administrator - // rights to the file share. A group must be prefixed with the @ character. - // For example @group1. Can only be set if Authentication is set to ActiveDirectory. + // A list of users in the Active Directory that will be granted administrator + // privileges on the file share. These users can do all file operations as the + // super-user. + // + // Use this option very carefully, because any user in this list can do anything + // they like on the file share, regardless of file permissions. AdminUserList []*string `type:"list"` // The authentication method that users use to access the file share. @@ -8662,6 +8952,9 @@ type CreateSMBFileShareInput struct { // Set this value to "true to enable ACL (access control list) on the SMB file // share. Set it to "false" to map file and directory permissions to the POSIX // permissions. + // + // For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html + // in the Storage Gateway User Guide. SMBACLEnabled *bool `type:"boolean"` // A list of up to 50 tags that can be assigned to the NFS file share. Each @@ -8878,6 +9171,15 @@ type CreateSnapshotFromVolumeRecoveryPointInput struct { // SnapshotDescription is a required field SnapshotDescription *string `min:"1" type:"string" required:"true"` + // A list of up to 50 tags that can be assigned to a snapshot. Each tag is a + // key-value pair. + // + // Valid characters for key and value are letters, spaces, and numbers representable + // in UTF-8 format, and the following special characters: + - = . _ : / @. The + // maximum length of a tag's key is 128 characters, and the maximum length for + // a tag's value is 256. + Tags []*Tag `type:"list"` + // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes // operation to return to retrieve the TargetARN for specified VolumeARN. // @@ -8910,6 +9212,16 @@ func (s *CreateSnapshotFromVolumeRecoveryPointInput) Validate() error { if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -8923,6 +9235,12 @@ func (s *CreateSnapshotFromVolumeRecoveryPointInput) SetSnapshotDescription(v st return s } +// SetTags sets the Tags field's value. +func (s *CreateSnapshotFromVolumeRecoveryPointInput) SetTags(v []*Tag) *CreateSnapshotFromVolumeRecoveryPointInput { + s.Tags = v + return s +} + // SetVolumeARN sets the VolumeARN field's value. func (s *CreateSnapshotFromVolumeRecoveryPointInput) SetVolumeARN(v string) *CreateSnapshotFromVolumeRecoveryPointInput { s.VolumeARN = &v @@ -8986,6 +9304,15 @@ type CreateSnapshotInput struct { // SnapshotDescription is a required field SnapshotDescription *string `min:"1" type:"string" required:"true"` + // A list of up to 50 tags that can be assigned to a snapshot. Each tag is a + // key-value pair. + // + // Valid characters for key and value are letters, spaces, and numbers representable + // in UTF-8 format, and the following special characters: + - = . _ : / @. The + // maximum length of a tag's key is 128 characters, and the maximum length for + // a tag's value is 256. + Tags []*Tag `type:"list"` + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation // to return a list of gateway volumes. // @@ -9018,6 +9345,16 @@ func (s *CreateSnapshotInput) Validate() error { if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9031,6 +9368,12 @@ func (s *CreateSnapshotInput) SetSnapshotDescription(v string) *CreateSnapshotIn return s } +// SetTags sets the Tags field's value. +func (s *CreateSnapshotInput) SetTags(v []*Tag) *CreateSnapshotInput { + s.Tags = v + return s +} + // SetVolumeARN sets the VolumeARN field's value. func (s *CreateSnapshotInput) SetVolumeARN(v string) *CreateSnapshotInput { s.VolumeARN = &v @@ -9094,7 +9437,7 @@ type CreateStorediSCSIVolumeInput struct { DiskId *string `min:"1" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -9312,7 +9655,7 @@ type CreateTapeWithBarcodeInput struct { // The unique Amazon Resource Name (ARN) that represents the gateway to associate // the virtual tape with. Use the ListGateways operation to return a list of - // gateways for your account and region. + // gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -9491,7 +9834,7 @@ type CreateTapesInput struct { // The unique Amazon Resource Name (ARN) that represents the gateway to associate // the virtual tapes with. Use the ListGateways operation to return a list of - // gateways for your account and region. + // gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -9701,7 +10044,7 @@ type DeleteBandwidthRateLimitInput struct { BandwidthType *string `min:"3" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -9757,7 +10100,7 @@ type DeleteBandwidthRateLimitOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -9957,7 +10300,7 @@ type DeleteGatewayInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -10000,7 +10343,7 @@ type DeleteGatewayOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -10158,7 +10501,7 @@ type DeleteTapeInput struct { // The unique Amazon Resource Name (ARN) of the gateway that the virtual tape // to delete is associated with. Use the ListGateways operation to return a - // list of gateways for your account and region. + // list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -10310,7 +10653,7 @@ type DescribeBandwidthRateLimitInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -10361,7 +10704,7 @@ type DescribeBandwidthRateLimitOutput struct { AverageUploadRateLimitInBitsPerSec *int64 `min:"51200" type:"long"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -10397,7 +10740,7 @@ type DescribeCacheInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -10465,7 +10808,7 @@ type DescribeCacheOutput struct { DiskIds []*string `type:"list"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -10673,7 +11016,7 @@ type DescribeGatewayInformationInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -10715,6 +11058,10 @@ func (s *DescribeGatewayInformationInput) SetGatewayARN(v string) *DescribeGatew type DescribeGatewayInformationOutput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was + // used to monitor and log events in the gateway. + CloudWatchLogGroupARN *string `type:"string"` + // The ID of the Amazon EC2 instance that was used to launch the gateway. Ec2InstanceId *string `type:"string"` @@ -10722,7 +11069,7 @@ type DescribeGatewayInformationOutput struct { Ec2InstanceRegion *string `type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The unique identifier assigned to your gateway during activation. This ID @@ -10760,6 +11107,10 @@ type DescribeGatewayInformationOutput struct { // key name. Each tag is a key-value pair. For a gateway with more than 10 tags // assigned, you can view all tags using the ListTagsForResource API operation. Tags []*Tag `type:"list"` + + // The configuration settings for the virtual private cloud (VPC) endpoint for + // your gateway. + VPCEndpoint *string `type:"string"` } // String returns the string representation @@ -10772,6 +11123,12 @@ func (s DescribeGatewayInformationOutput) GoString() string { return s.String() } +// SetCloudWatchLogGroupARN sets the CloudWatchLogGroupARN field's value. +func (s *DescribeGatewayInformationOutput) SetCloudWatchLogGroupARN(v string) *DescribeGatewayInformationOutput { + s.CloudWatchLogGroupARN = &v + return s +} + // SetEc2InstanceId sets the Ec2InstanceId field's value. func (s *DescribeGatewayInformationOutput) SetEc2InstanceId(v string) *DescribeGatewayInformationOutput { s.Ec2InstanceId = &v @@ -10844,12 +11201,18 @@ func (s *DescribeGatewayInformationOutput) SetTags(v []*Tag) *DescribeGatewayInf return s } +// SetVPCEndpoint sets the VPCEndpoint field's value. +func (s *DescribeGatewayInformationOutput) SetVPCEndpoint(v string) *DescribeGatewayInformationOutput { + s.VPCEndpoint = &v + return s +} + // A JSON object containing the of the gateway. type DescribeMaintenanceStartTimeInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -10914,7 +11277,7 @@ type DescribeMaintenanceStartTimeOutput struct { DayOfWeek *int64 `type:"integer"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The hour component of the maintenance start time represented as hh, where @@ -11116,7 +11479,7 @@ type DescribeSMBSettingsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -11161,12 +11524,28 @@ type DescribeSMBSettingsOutput struct { DomainName *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` - // This value is true if a password for the guest user “smbguest” is set, and - // otherwise false. + // This value is true if a password for the guest user “smbguest” is set, + // and otherwise false. SMBGuestPasswordSet *bool `type:"boolean"` + + // The type of security strategy that was specified for file gateway. + // + // ClientSpecified: if you use this option, requests are established based on + // what is negotiated by the client. This option is recommended when you want + // to maximize compatibility across different clients in your environment. + // + // MandatorySigning: if you use this option, file gateway only allows connections + // from SMBv2 or SMBv3 clients that have signing enabled. This option works + // with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. + // + // MandatoryEncryption: if you use this option, file gateway only allows connections + // from SMBv3 clients that have encryption enabled. This option is highly recommended + // for environments that handle sensitive data. This option works with SMB clients + // on Microsoft Windows 8, Windows Server 2012 or newer. + SMBSecurityStrategy *string `type:"string" enum:"SMBSecurityStrategy"` } // String returns the string representation @@ -11197,6 +11576,12 @@ func (s *DescribeSMBSettingsOutput) SetSMBGuestPasswordSet(v bool) *DescribeSMBS return s } +// SetSMBSecurityStrategy sets the SMBSecurityStrategy field's value. +func (s *DescribeSMBSettingsOutput) SetSMBSecurityStrategy(v string) *DescribeSMBSettingsOutput { + s.SMBSecurityStrategy = &v + return s +} + // A JSON object containing the DescribeSnapshotScheduleInput$VolumeARN of the // volume. type DescribeSnapshotScheduleInput struct { @@ -11255,6 +11640,11 @@ type DescribeSnapshotScheduleOutput struct { // of the gateway. StartAt *int64 `type:"integer"` + // A list of up to 50 tags assigned to the snapshot schedule, sorted alphabetically + // by key name. Each tag is a key-value pair. For a gateway with more than 10 + // tags assigned, you can view all tags using the ListTagsForResource API operation. + Tags []*Tag `type:"list"` + // A value that indicates the time zone of the gateway. Timezone *string `min:"3" type:"string"` @@ -11290,6 +11680,12 @@ func (s *DescribeSnapshotScheduleOutput) SetStartAt(v int64) *DescribeSnapshotSc return s } +// SetTags sets the Tags field's value. +func (s *DescribeSnapshotScheduleOutput) SetTags(v []*Tag) *DescribeSnapshotScheduleOutput { + s.Tags = v + return s +} + // SetTimezone sets the Timezone field's value. func (s *DescribeSnapshotScheduleOutput) SetTimezone(v string) *DescribeSnapshotScheduleOutput { s.Timezone = &v @@ -11518,7 +11914,7 @@ type DescribeTapeRecoveryPointsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -11587,7 +11983,7 @@ type DescribeTapeRecoveryPointsOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // An opaque string that indicates the position at which the virtual tape recovery @@ -11635,7 +12031,7 @@ type DescribeTapesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -11756,7 +12152,7 @@ type DescribeUploadBufferInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -11804,7 +12200,7 @@ type DescribeUploadBufferOutput struct { DiskIds []*string `type:"list"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The total number of bytes allocated in the gateway's as upload buffer. @@ -11853,7 +12249,7 @@ type DescribeVTLDevicesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -11936,7 +12332,7 @@ type DescribeVTLDevicesOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // An opaque string that indicates the position at which the VTL devices that @@ -11983,7 +12379,7 @@ type DescribeWorkingStorageInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -12032,7 +12428,7 @@ type DescribeWorkingStorageOutput struct { DiskIds []*string `type:"list"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The total working storage in bytes allocated for the gateway. If no working @@ -12212,7 +12608,7 @@ type DisableGatewayInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -12418,7 +12814,7 @@ type FileShareInfo struct { FileShareType *string `type:"string" enum:"FileShareType"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -12473,7 +12869,7 @@ type GatewayInfo struct { Ec2InstanceRegion *string `type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The unique identifier assigned to your gateway during activation. This ID @@ -12560,13 +12956,13 @@ type JoinDomainInput struct { DomainName *string `min:"1" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // The organizational unit (OU) is a container with an Active Directory that - // can hold users, groups, computers, and other OUs and this parameter specifies + // The organizational unit (OU) is a container in an Active Directory that can + // hold users, groups, computers, and other OUs and this parameter specifies // the OU that the gateway will join within the AD domain. OrganizationalUnit *string `min:"1" type:"string"` @@ -12894,7 +13290,7 @@ type ListLocalDisksInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -12941,7 +13337,7 @@ type ListLocalDisksOutput struct { Disks []*Disk `type:"list"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -13261,7 +13657,7 @@ type ListVolumeRecoveryPointsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -13303,7 +13699,7 @@ type ListVolumeRecoveryPointsOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // An array of VolumeRecoveryPointInfo objects. @@ -13341,7 +13737,7 @@ type ListVolumesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // Specifies that the list of volumes returned be limited to the specified number @@ -13410,7 +13806,7 @@ type ListVolumesOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // Use the marker in your next request to continue pagination of iSCSI volumes. @@ -13555,7 +13951,7 @@ type NFSFileShareInfo struct { FileShareStatus *string `min:"3" type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // A value that enables guessing of the MIME type for uploaded objects based @@ -14052,7 +14448,7 @@ type ResetCacheInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -14094,7 +14490,7 @@ type ResetCacheOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -14120,7 +14516,7 @@ type RetrieveTapeArchiveInput struct { // The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual // tape to. Use the ListGateways operation to return a list of gateways for - // your account and region. + // your account and AWS Region. // // You retrieve archived virtual tapes to only one gateway and the gateway must // be a tape gateway. @@ -14208,7 +14604,7 @@ type RetrieveTapeRecoveryPointInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -14321,7 +14717,7 @@ type SMBFileShareInfo struct { FileShareStatus *string `min:"3" type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // A value that enables guessing of the MIME type for uploaded objects based @@ -14373,6 +14769,9 @@ type SMBFileShareInfo struct { // If this value is set to "true", indicates that ACL (access control list) // is enabled on the SMB file share. If it is set to "false", it indicates that // file and directory permissions are mapped to the POSIX permission. + // + // For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html + // in the Storage Gateway User Guide. SMBACLEnabled *bool `type:"boolean"` // A list of up to 50 tags assigned to the SMB file share, sorted alphabetically @@ -14521,7 +14920,7 @@ type SetLocalConsolePasswordInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -14580,7 +14979,7 @@ type SetLocalConsolePasswordOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -14664,7 +15063,7 @@ type SetSMBGuestPasswordOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -14689,7 +15088,7 @@ type ShutdownGatewayInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -14732,7 +15131,7 @@ type ShutdownGatewayOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -14757,7 +15156,7 @@ type StartGatewayInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -14800,7 +15199,7 @@ type StartGatewayOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -15294,7 +15693,7 @@ type TapeInfo struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The ID of the pool that you want to add your tape to for archiving. The tape @@ -15435,7 +15834,7 @@ type UpdateBandwidthRateLimitInput struct { AverageUploadRateLimitInBitsPerSec *int64 `min:"51200" type:"long"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -15497,7 +15896,7 @@ type UpdateBandwidthRateLimitOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -15540,7 +15939,7 @@ type UpdateChapCredentialsInput struct { // The secret key must be between 12 and 16 bytes when encoded in UTF-8. // // SecretToAuthenticateInitiator is a required field - SecretToAuthenticateInitiator *string `min:"1" type:"string" required:"true"` + SecretToAuthenticateInitiator *string `min:"1" type:"string" required:"true" sensitive:"true"` // The secret key that the target must provide to participate in mutual CHAP // with the initiator (e.g. Windows client). @@ -15548,7 +15947,7 @@ type UpdateChapCredentialsInput struct { // Byte constraints: Minimum bytes of 12. Maximum bytes of 16. // // The secret key must be between 12 and 16 bytes when encoded in UTF-8. - SecretToAuthenticateTarget *string `min:"1" type:"string"` + SecretToAuthenticateTarget *string `min:"1" type:"string" sensitive:"true"` // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes // operation to return the TargetARN for specified VolumeARN. @@ -15660,8 +16059,14 @@ func (s *UpdateChapCredentialsOutput) SetTargetARN(v string) *UpdateChapCredenti type UpdateGatewayInformationInput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you + // want to use to monitor and log events in the gateway. + // + // For more information, see What Is Amazon CloudWatch Logs? (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html). + CloudWatchLogGroupARN *string `type:"string"` + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -15705,6 +16110,12 @@ func (s *UpdateGatewayInformationInput) Validate() error { return nil } +// SetCloudWatchLogGroupARN sets the CloudWatchLogGroupARN field's value. +func (s *UpdateGatewayInformationInput) SetCloudWatchLogGroupARN(v string) *UpdateGatewayInformationInput { + s.CloudWatchLogGroupARN = &v + return s +} + // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateGatewayInformationInput) SetGatewayARN(v string) *UpdateGatewayInformationInput { s.GatewayARN = &v @@ -15728,7 +16139,7 @@ type UpdateGatewayInformationOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The name you configured for your gateway. @@ -15762,7 +16173,7 @@ type UpdateGatewaySoftwareNowInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -15805,7 +16216,7 @@ type UpdateGatewaySoftwareNowOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -15849,7 +16260,7 @@ type UpdateMaintenanceStartTimeInput struct { DayOfWeek *int64 `type:"integer"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` @@ -15940,7 +16351,7 @@ type UpdateMaintenanceStartTimeOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } @@ -16159,9 +16570,9 @@ func (s *UpdateNFSFileShareOutput) SetFileShareARN(v string) *UpdateNFSFileShare type UpdateSMBFileShareInput struct { _ struct{} `type:"structure"` - // A list of users or groups in the Active Directory that have administrator - // rights to the file share. A group must be prefixed with the @ character. - // For example @group1. Can only be set if Authentication is set to ActiveDirectory. + // A list of users in the Active Directory that have administrator rights to + // the file share. A group must be prefixed with the @ character. For example + // @group1. Can only be set if Authentication is set to ActiveDirectory. AdminUserList []*string `type:"list"` // The default storage class for objects put into an Amazon S3 bucket by the @@ -16213,6 +16624,9 @@ type UpdateSMBFileShareInput struct { // Set this value to "true to enable ACL (access control list) on the SMB file // share. Set it to "false" to map file and directory permissions to the POSIX // permissions. + // + // For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.htmlin + // the Storage Gateway User Guide. SMBACLEnabled *bool `type:"boolean"` // A list of users or groups in the Active Directory that are allowed to access @@ -16349,6 +16763,99 @@ func (s *UpdateSMBFileShareOutput) SetFileShareARN(v string) *UpdateSMBFileShare return s } +type UpdateSMBSecurityStrategyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + // + // GatewayARN is a required field + GatewayARN *string `min:"50" type:"string" required:"true"` + + // Specifies the type of security strategy. + // + // ClientSpecified: if you use this option, requests are established based on + // what is negotiated by the client. This option is recommended when you want + // to maximize compatibility across different clients in your environment. + // + // MandatorySigning: if you use this option, file gateway only allows connections + // from SMBv2 or SMBv3 clients that have signing enabled. This option works + // with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. + // + // MandatoryEncryption: if you use this option, file gateway only allows connections + // from SMBv3 clients that have encryption enabled. This option is highly recommended + // for environments that handle sensitive data. This option works with SMB clients + // on Microsoft Windows 8, Windows Server 2012 or newer. + // + // SMBSecurityStrategy is a required field + SMBSecurityStrategy *string `type:"string" required:"true" enum:"SMBSecurityStrategy"` +} + +// String returns the string representation +func (s UpdateSMBSecurityStrategyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSMBSecurityStrategyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSMBSecurityStrategyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSMBSecurityStrategyInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.SMBSecurityStrategy == nil { + invalidParams.Add(request.NewErrParamRequired("SMBSecurityStrategy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *UpdateSMBSecurityStrategyInput) SetGatewayARN(v string) *UpdateSMBSecurityStrategyInput { + s.GatewayARN = &v + return s +} + +// SetSMBSecurityStrategy sets the SMBSecurityStrategy field's value. +func (s *UpdateSMBSecurityStrategyInput) SetSMBSecurityStrategy(v string) *UpdateSMBSecurityStrategyInput { + s.SMBSecurityStrategy = &v + return s +} + +type UpdateSMBSecurityStrategyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateSMBSecurityStrategyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSMBSecurityStrategyOutput) GoString() string { + return s.String() +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *UpdateSMBSecurityStrategyOutput) SetGatewayARN(v string) *UpdateSMBSecurityStrategyOutput { + s.GatewayARN = &v + return s +} + // A JSON object containing one or more of the following fields: // // * UpdateSnapshotScheduleInput$Description @@ -16376,6 +16883,15 @@ type UpdateSnapshotScheduleInput struct { // StartAt is a required field StartAt *int64 `type:"integer" required:"true"` + // A list of up to 50 tags that can be assigned to a snapshot. Each tag is a + // key-value pair. + // + // Valid characters for key and value are letters, spaces, and numbers representable + // in UTF-8 format, and the following special characters: + - = . _ : / @. The + // maximum length of a tag's key is 128 characters, and the maximum length for + // a tag's value is 256. + Tags []*Tag `type:"list"` + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation // to return a list of gateway volumes. // @@ -16414,6 +16930,16 @@ func (s *UpdateSnapshotScheduleInput) Validate() error { if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -16439,6 +16965,12 @@ func (s *UpdateSnapshotScheduleInput) SetStartAt(v int64) *UpdateSnapshotSchedul return s } +// SetTags sets the Tags field's value. +func (s *UpdateSnapshotScheduleInput) SetTags(v []*Tag) *UpdateSnapshotScheduleInput { + s.Tags = v + return s +} + // SetVolumeARN sets the VolumeARN field's value. func (s *UpdateSnapshotScheduleInput) SetVolumeARN(v string) *UpdateSnapshotScheduleInput { s.VolumeARN = &v @@ -16620,7 +17152,7 @@ type VolumeInfo struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation - // to return a list of gateways for your account and region. + // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The unique identifier assigned to your gateway during activation. This ID @@ -17044,3 +17576,14 @@ const ( // ObjectACLAwsExecRead is a ObjectACL enum value ObjectACLAwsExecRead = "aws-exec-read" ) + +const ( + // SMBSecurityStrategyClientSpecified is a SMBSecurityStrategy enum value + SMBSecurityStrategyClientSpecified = "ClientSpecified" + + // SMBSecurityStrategyMandatorySigning is a SMBSecurityStrategy enum value + SMBSecurityStrategyMandatorySigning = "MandatorySigning" + + // SMBSecurityStrategyMandatoryEncryption is a SMBSecurityStrategy enum value + SMBSecurityStrategyMandatoryEncryption = "MandatoryEncryption" +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/doc.go index 9ed0cd4578b..c3eb54cd5a6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/doc.go @@ -29,8 +29,8 @@ // of requests and responses. // // * AWS Storage Gateway Regions and Endpoints: (http://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region) -// Provides a list of each AWS region and endpoints available for use with -// AWS Storage Gateway. +// Provides a list of each AWS Region and the endpoints available for use +// with AWS Storage Gateway. // // AWS Storage Gateway resource IDs are in uppercase. When you use these resource // IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must @@ -46,8 +46,8 @@ // systems with the new format. For more information, see Longer EC2 and EBS // Resource IDs (https://aws.amazon.com/ec2/faqs/#longer-ids). // -// For example, a volume Amazon Resource Name (ARN) with the longer volume -// ID format looks like the following: +// For example, a volume Amazon Resource Name (ARN) with the longer volume ID +// format looks like the following: // // arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go index 9a0c08f6962..1e4f312be8c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go @@ -46,11 +46,11 @@ const ( // svc := storagegateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *StorageGateway { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *StorageGateway { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *StorageGateway { svc := &StorageGateway{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-06-30", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index fc05210ac98..9c5ed454536 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -3,6 +3,7 @@ package sts import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws" @@ -58,9 +59,9 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // Returns a set of temporary security credentials that you can use to access // AWS resources that you might not normally have access to. These temporary // credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use AssumeRole for cross-account access or federation. -// For a comparison of AssumeRole with other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // @@ -71,21 +72,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // to access resources in each account. You could create long-term credentials // in each account to access those resources. However, managing all those credentials // and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account and -// then use temporary security credentials to access all the other accounts +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts // by assuming roles in those accounts. For more information about roles, see -// IAM Roles (Delegation and Federation) (https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) -// in the IAM User Guide. -// -// For federation, you can, for example, grant single sign-on access to the -// AWS Management Console. If you already have an identity and authentication -// system in your network, you don't have to recreate identities in AWS in order -// to grant them access to AWS. Instead, after a user has been authenticated, -// you call AssumeRole (and specify the role with the appropriate permissions) -// to get temporary security credentials for that user. With those temporary -// security credentials, you construct a sign-in URL from which users can access -// the console. For more information, see Common Scenarios for Temporary Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) // in the IAM User Guide. // // By default, the temporary security credentials created by AssumeRole last @@ -103,51 +93,54 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // // The temporary security credentials created by AssumeRole can be used to make // API calls to any AWS service with the following exception: You cannot call -// the AWS STS service's GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass an IAM permissions policy to this operation. If you -// pass a policy to this operation, the resulting temporary credentials have -// the permissions of the assumed role and the policy that you pass. This gives -// you a way to further restrict the permissions for the resulting temporary -// security credentials. You cannot use the passed policy to grant permissions -// that are in excess of those allowed by the permissions policy of the role -// that is being assumed. For more information, see Permissions for AssumeRole, -// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// To assume a role, your AWS account must be trusted by the role. The trust -// relationship is defined in the role's trust policy when the role is created. -// That trust policy states which accounts are allowed to delegate access to -// this account's role. +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. // -// The user who wants to access the role must also have permissions delegated -// from the role's administrator. If the user and the role are in a different -// account, then the user's administrator must attach a policy. That attached -// policy must allow the user to call AssumeRole for the ARN of the role in -// the other account. If the user is in the same account as the role, then you -// can do either of the following: +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: // // * Attach a policy to the user (identical to the previous user in a different -// account) +// account). // // * Add the user as a principal directly in the role's trust policy. // -// In this case, the trust policy acts as the only resource-based policy in -// IAM. Users in the same account as the role do not need explicit permission -// to assume the role. For more information about trust policies and resource-based -// policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // // Using MFA with AssumeRole // // (Optional) You can include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios in which -// you want to make sure that the user who is assuming the role has been authenticated -// using an AWS MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication. If the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication -// might look like the following example. +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. // // "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} // @@ -280,16 +273,20 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot -// call the STS service's GetFederationToken or GetSessionToken API operations. -// -// Optionally, you can pass an IAM permissions policy to this operation. If -// you pass a policy to this operation, the resulting temporary credentials -// have the permissions of the assumed role and the policy that you pass. This -// gives you a way to further restrict the permissions for the resulting temporary -// security credentials. You cannot use the passed policy to grant permissions -// that are in excess of those allowed by the permissions policy of the role -// that is being assumed. For more information, see Permissions for AssumeRole, -// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Before your application can call AssumeRoleWithSAML, you must configure your @@ -440,8 +437,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // identity throughout the lifetime of an application. // // To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview -// (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) // in the AWS SDK for iOS Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of AWS security @@ -475,17 +471,20 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: -// you cannot call the STS service's GetFederationToken or GetSessionToken API -// operations. -// -// (Optional) You can pass an IAM permissions policy to this operation. If you -// pass a policy to this operation, the resulting temporary credentials have -// the permissions of the assumed role and the policy that you pass. This gives -// you a way to further restrict the permissions for the resulting temporary -// security credentials. You cannot use the passed policy to grant permissions -// that are in excess of those allowed by the permissions policy of the role -// that is being assumed. For more information, see Permissions for AssumeRole, -// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Before your application can call AssumeRoleWithWebIdentity, you must have @@ -507,8 +506,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) // and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). // Walk through the process of authenticating through Login with Amazon, // Facebook, or Google, getting temporary security credentials, and then // using those credentials to make a request to AWS. @@ -678,9 +676,9 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // Returned Error Codes: // * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. +// This error is returned if the message passed to DecodeAuthorizationMessage +// was invalid. This can happen if the token contains invalid characters, such +// as linebreaks. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { @@ -704,6 +702,102 @@ func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *Deco return out, req.Send() } +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html). +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the @@ -748,8 +842,15 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // GetCallerIdentity API operation for AWS Security Token Service. // -// Returns details about the IAM identity whose credentials are used to call -// the API. +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -866,18 +967,25 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // // Permissions // -// You must pass an IAM permissions policy to GetFederationToken. When you pass -// a policy to this operation, the resulting temporary credentials are defined -// by the intersection of your IAM user policies and the passed policy . The -// passed policy defines the permissions of the federated user. AWS allows the -// federated user's request only when both the attached policy and the IAM user -// policy explicitly allow the federated user to perform the requested action. -// The passed policy cannot grant more permissions than those that are defined -// in the IAM user policy. For more information about how permissions work, -// see Permissions for GetFederationToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). -// For information about using GetFederationToken to create temporary security -// credentials, see GetFederationToken—Federation Through a Custom Identity -// Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. The only exception +// is when the credentials are used to access a resource that has a resource-based +// policy that specifically references the federated user session in the Principal +// element of the policy. When you pass session policies, the session permissions +// are the intersection of the IAM user policies and the session policies that +// you pass. This gives you a way to further restrict the permissions for a +// federated user. You cannot use session policies to grant more permissions +// than those that are defined in the permissions policy of the IAM user. For +// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -984,12 +1092,12 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // in the IAM User Guide. // // The GetSessionToken operation must be called by using the long-term AWS security -// credentials of the AWS account or an IAM user. Credentials that are created -// by IAM users are valid for the duration that you specify. This duration can -// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 -// hours), with a default of 43,200 seconds (12 hours). Credentials that are -// created by using account credentials can range from 900 seconds (15 minutes) -// up to a maximum of 3,600 seconds (1 hour), with a default of 1 hour. +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. // // The temporary security credentials created by GetSessionToken can be used // to make API calls to any AWS service with the following exceptions: @@ -997,20 +1105,19 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // * You cannot call any IAM API operations unless MFA authentication information // is included in the request. // -// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. // // We recommend that you do not call GetSessionToken with AWS account root user // credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) // by creating one or more IAM users, giving them the necessary permissions, // and using IAM users for everyday interaction with AWS. // -// The permissions associated with the temporary security credentials returned -// by GetSessionToken are based on the permissions associated with account or -// IAM user whose credentials are used to call the operation. If GetSessionToken -// is called using AWS account root user credentials, the temporary credentials -// have root user permissions. Similarly, if GetSessionToken is called using -// the credentials of an IAM user, the temporary credentials have the same permissions -// as the IAM user. +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. // // For more information about using GetSessionToken to create temporary credentials, // go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) @@ -1080,13 +1187,13 @@ type AssumeRoleInput struct { // A unique identifier that might be required when you assume a role in another // account. If the administrator of the account to which the role belongs provided // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. Because - // a cross-account role is usually set up to trust everyone in an account, the - // administrator of the trusting account might send an external ID to the administrator - // of the trusted account. That way, only someone with the ID can assume the - // role, rather than everyone in the account. For more information about the - // external ID, see How to Use an External ID When Granting Access to Your AWS - // Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) // in the IAM User Guide. // // The regex used to validate this parameter is a string of characters consisting @@ -1094,30 +1201,56 @@ type AssumeRoleInput struct { // also include underscores or any of the following characters: =,.@:/- ExternalId *string `min:"2" type:"string"` - // An IAM policy in JSON format. + // An IAM policy in JSON format that you want to use as an inline session policy. // - // This parameter is optional. If you pass a policy to this operation, the resulting - // temporary credentials have the permissions of the assumed role and the policy - // that you pass. This gives you a way to further restrict the permissions for - // the resulting temporary security credentials. You cannot use the passed policy - // to grant permissions that are in excess of those allowed by the permissions - // policy of the role that is being assumed. For more information, see Permissions - // for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plaintext must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, where 100 percent is the maximum allowed - // size. + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1201,6 +1334,16 @@ func (s *AssumeRoleInput) Validate() error { if s.TokenCode != nil && len(*s.TokenCode) < 6 { invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1226,6 +1369,12 @@ func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -1267,8 +1416,6 @@ type AssumeRoleOutput struct { // // The size of the security token that STS API operations return is not fixed. // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -1332,31 +1479,56 @@ type AssumeRoleWithSAMLInput struct { // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format. + // An IAM policy in JSON format that you want to use as an inline session policy. // - // The policy parameter is optional. If you pass a policy to this operation, - // the resulting temporary credentials have the permissions of the assumed role - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the permissions policy of the role that is being assumed. For more information, - // see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plaintext must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, where 100 percent is the maximum allowed - // size. + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes // the IdP. // @@ -1414,6 +1586,16 @@ func (s *AssumeRoleWithSAMLInput) Validate() error { if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1433,6 +1615,12 @@ func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + // SetPrincipalArn sets the PrincipalArn field's value. func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { s.PrincipalArn = &v @@ -1469,8 +1657,6 @@ type AssumeRoleWithSAMLOutput struct { // // The size of the security token that STS API operations return is not fixed. // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // The value of the Issuer element of the SAML assertion. @@ -1588,31 +1774,56 @@ type AssumeRoleWithWebIdentityInput struct { // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format. + // An IAM policy in JSON format that you want to use as an inline session policy. // - // The policy parameter is optional. If you pass a policy to this operation, - // the resulting temporary credentials have the permissions of the assumed role - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the permissions policy of the role that is being assumed. For more information, - // see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plaintext must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, where 100 percent is the maximum allowed - // size. + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The fully qualified host component of the domain name of the identity provider. // // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com @@ -1689,6 +1900,16 @@ func (s *AssumeRoleWithWebIdentityInput) Validate() error { if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1708,6 +1929,12 @@ func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebI return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + // SetProviderId sets the ProviderId field's value. func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { s.ProviderId = &v @@ -1754,8 +1981,6 @@ type AssumeRoleWithWebIdentityOutput struct { // // The size of the security token that STS API operations return is not fixed. // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -2034,6 +2259,73 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercased letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2118,30 +2410,73 @@ type GetFederationTokenInput struct { // Name is a required field Name *string `min:"2" type:"string" required:"true"` - // An IAM policy in JSON format. You must pass an IAM permissions policy to - // GetFederationToken. When you pass a policy to this operation, the resulting - // temporary credentials are defined by the intersection of your IAM user policies - // and the policy that you pass. The passed policy defines the permissions of - // the federated user. AWS allows the federated user's request only when both - // the attached policy and the IAM user policy explicitly allow the federated - // user to perform the requested action. The passed policy cannot grant more - // permissions than those that are defined in the IAM user policy. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // An IAM policy in JSON format that you want to use as an inline session policy. // - // The policy plaintext must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, where 100 percent is the maximum allowed - // size. + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. // - // For more information about how permissions work, see Permissions for GetFederationToken - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies shouldn't exceed 2048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` } // String returns the string representation @@ -2169,6 +2504,16 @@ func (s *GetFederationTokenInput) Validate() error { if s.Policy != nil && len(*s.Policy) < 1 { invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2194,6 +2539,12 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. type GetFederationTokenOutput struct { @@ -2204,8 +2555,6 @@ type GetFederationTokenOutput struct { // // The size of the security token that STS API operations return is not fixed. // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // Identifiers for the federated user associated with the credentials (such @@ -2340,8 +2689,6 @@ type GetSessionTokenOutput struct { // // The size of the security token that STS API operations return is not fixed. // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` } @@ -2360,3 +2707,44 @@ func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenO s.Credentials = v return s } + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000000..d5307fcaa0f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index fa7a4c667c1..fcb720dcac6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -9,14 +9,6 @@ // This guide provides descriptions of the STS API. For more detailed information // about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). -// // For information about setting up signatures and authorization through the // API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) // in the AWS General Reference. For general information about the Query API, @@ -53,11 +45,11 @@ // in the IAM User Guide. // // After you activate a Region for use with AWS STS, you can direct AWS STS -// API calls to that Region. AWS STS recommends that you use both the setRegion -// and setEndpoint methods to make calls to a Regional endpoint. You can use -// the setRegion method alone for manually enabled Regions, such as Asia Pacific -// (Hong Kong). In this case, the calls are directed to the STS Regional endpoint. -// However, if you use the setRegion method alone for Regions enabled by default, +// API calls to that Region. AWS STS recommends that you provide both the Region +// and endpoint when you make calls to a Regional endpoint. You can provide +// the Region alone for manually enabled Regions, such as Asia Pacific (Hong +// Kong). In this case, the calls are directed to the STS Regional endpoint. +// However, if you provide the Region alone for Regions enabled by default, // the calls are directed to the global endpoint of https://sts.amazonaws.com. // // To view the list of AWS STS endpoints and whether they are active by default, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 41ea09c356c..a3e378edad3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -34,9 +34,9 @@ const ( // ErrCodeInvalidAuthorizationMessageException for service response error code // "InvalidAuthorizationMessageException". // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. + // This error is returned if the message passed to DecodeAuthorizationMessage + // was invalid. This can happen if the token contains invalid characters, such + // as linebreaks. ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" // ErrCodeInvalidIdentityTokenException for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 185c914d1b3..2c3c3d2c1ed 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -46,11 +46,11 @@ const ( // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { svc := &STS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-06-15", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 00000000000..e2e1d6efe55 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/api.go index 97f9b8228f7..864d2401090 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/api.go @@ -72,19 +72,15 @@ func (c *SWF) CountClosedWorkflowExecutionsRequest(input *CountClosedWorkflowExe // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. -// -// typeFilter.name: String constraint. The key is swf:typeFilter.name. -// -// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// the appropriate keys. tagFilter.tag: String constraint. The key is swf:tagFilter.tag. +// typeFilter.name: String constraint. The key is swf:typeFilter.name. typeFilter.version: +// String constraint. The key is swf:typeFilter.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -184,19 +180,15 @@ func (c *SWF) CountOpenWorkflowExecutionsRequest(input *CountOpenWorkflowExecuti // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. -// -// typeFilter.name: String constraint. The key is swf:typeFilter.name. -// -// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// the appropriate keys. tagFilter.tag: String constraint. The key is swf:tagFilter.tag. +// typeFilter.name: String constraint. The key is swf:typeFilter.name. typeFilter.version: +// String constraint. The key is swf:typeFilter.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -302,7 +294,7 @@ func (c *SWF) CountPendingActivityTasksRequest(input *CountPendingActivityTasksI // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -408,7 +400,7 @@ func (c *SWF) CountPendingDecisionTasksRequest(input *CountPendingDecisionTasksI // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -510,17 +502,15 @@ func (c *SWF) DeprecateActivityTypeRequest(input *DeprecateActivityTypeInput) (r // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// activityType.name: String constraint. The key is swf:activityType.name. -// -// activityType.version: String constraint. The key is swf:activityType.version. +// the appropriate keys. activityType.name: String constraint. The key is +// swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -633,7 +623,7 @@ func (c *SWF) DeprecateDomainRequest(input *DeprecateDomainInput) (req *request. // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -739,17 +729,15 @@ func (c *SWF) DeprecateWorkflowTypeRequest(input *DeprecateWorkflowTypeInput) (r // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// workflowType.name: String constraint. The key is swf:workflowType.name. -// -// workflowType.version: String constraint. The key is swf:workflowType.version. +// the appropriate keys. workflowType.name: String constraint. The key is +// swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -850,17 +838,15 @@ func (c *SWF) DescribeActivityTypeRequest(input *DescribeActivityTypeInput) (req // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// activityType.name: String constraint. The key is swf:activityType.name. -// -// activityType.version: String constraint. The key is swf:activityType.version. +// the appropriate keys. activityType.name: String constraint. The key is +// swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -962,7 +948,7 @@ func (c *SWF) DescribeDomainRequest(input *DescribeDomainInput) (req *request.Re // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1067,7 +1053,7 @@ func (c *SWF) DescribeWorkflowExecutionRequest(input *DescribeWorkflowExecutionI // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1165,17 +1151,15 @@ func (c *SWF) DescribeWorkflowTypeRequest(input *DescribeWorkflowTypeInput) (req // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// workflowType.name: String constraint. The key is swf:workflowType.name. -// -// workflowType.version: String constraint. The key is swf:workflowType.version. +// the appropriate keys. workflowType.name: String constraint. The key is +// swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1287,7 +1271,7 @@ func (c *SWF) GetWorkflowExecutionHistoryRequest(input *GetWorkflowExecutionHist // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1339,7 +1323,7 @@ func (c *SWF) GetWorkflowExecutionHistoryWithContext(ctx aws.Context, input *Get // // Example iterating over at most 3 pages of a GetWorkflowExecutionHistory operation. // pageNum := 0 // err := client.GetWorkflowExecutionHistoryPages(params, -// func(page *GetWorkflowExecutionHistoryOutput, lastPage bool) bool { +// func(page *swf.GetWorkflowExecutionHistoryOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1371,10 +1355,12 @@ func (c *SWF) GetWorkflowExecutionHistoryPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetWorkflowExecutionHistoryOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetWorkflowExecutionHistoryOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1448,7 +1434,7 @@ func (c *SWF) ListActivityTypesRequest(input *ListActivityTypesInput) (req *requ // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1500,7 +1486,7 @@ func (c *SWF) ListActivityTypesWithContext(ctx aws.Context, input *ListActivityT // // Example iterating over at most 3 pages of a ListActivityTypes operation. // pageNum := 0 // err := client.ListActivityTypesPages(params, -// func(page *ListActivityTypesOutput, lastPage bool) bool { +// func(page *swf.ListActivityTypesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1532,10 +1518,12 @@ func (c *SWF) ListActivityTypesPagesWithContext(ctx aws.Context, input *ListActi }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListActivityTypesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListActivityTypesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1606,19 +1594,15 @@ func (c *SWF) ListClosedWorkflowExecutionsRequest(input *ListClosedWorkflowExecu // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. -// -// typeFilter.name: String constraint. The key is swf:typeFilter.name. -// -// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// the appropriate keys. tagFilter.tag: String constraint. The key is swf:tagFilter.tag. +// typeFilter.name: String constraint. The key is swf:typeFilter.name. typeFilter.version: +// String constraint. The key is swf:typeFilter.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1670,7 +1654,7 @@ func (c *SWF) ListClosedWorkflowExecutionsWithContext(ctx aws.Context, input *Li // // Example iterating over at most 3 pages of a ListClosedWorkflowExecutions operation. // pageNum := 0 // err := client.ListClosedWorkflowExecutionsPages(params, -// func(page *WorkflowExecutionInfos, lastPage bool) bool { +// func(page *swf.WorkflowExecutionInfos, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1702,10 +1686,12 @@ func (c *SWF) ListClosedWorkflowExecutionsPagesWithContext(ctx aws.Context, inpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*WorkflowExecutionInfos), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*WorkflowExecutionInfos), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1781,7 +1767,7 @@ func (c *SWF) ListDomainsRequest(input *ListDomainsInput) (req *request.Request, // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1828,7 +1814,7 @@ func (c *SWF) ListDomainsWithContext(ctx aws.Context, input *ListDomainsInput, o // // Example iterating over at most 3 pages of a ListDomains operation. // pageNum := 0 // err := client.ListDomainsPages(params, -// func(page *ListDomainsOutput, lastPage bool) bool { +// func(page *swf.ListDomainsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1860,10 +1846,12 @@ func (c *SWF) ListDomainsPagesWithContext(ctx aws.Context, input *ListDomainsInp }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDomainsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDomainsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1934,19 +1922,15 @@ func (c *SWF) ListOpenWorkflowExecutionsRequest(input *ListOpenWorkflowExecution // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. -// -// typeFilter.name: String constraint. The key is swf:typeFilter.name. -// -// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// the appropriate keys. tagFilter.tag: String constraint. The key is swf:tagFilter.tag. +// typeFilter.name: String constraint. The key is swf:typeFilter.name. typeFilter.version: +// String constraint. The key is swf:typeFilter.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1998,7 +1982,7 @@ func (c *SWF) ListOpenWorkflowExecutionsWithContext(ctx aws.Context, input *List // // Example iterating over at most 3 pages of a ListOpenWorkflowExecutions operation. // pageNum := 0 // err := client.ListOpenWorkflowExecutionsPages(params, -// func(page *WorkflowExecutionInfos, lastPage bool) bool { +// func(page *swf.WorkflowExecutionInfos, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2030,13 +2014,102 @@ func (c *SWF) ListOpenWorkflowExecutionsPagesWithContext(ctx aws.Context, input }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*WorkflowExecutionInfos), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*WorkflowExecutionInfos), !p.HasNextPage()) { + break + } } + return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *SWF) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Simple Workflow Service. +// +// List tags for a given domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Workflow Service's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnknownResourceFault "UnknownResourceFault" +// Returned when the named resource cannot be found with in the scope of this +// operation (region or domain). This could happen if the named resource was +// never created or is no longer available for this operation. +// +// * ErrCodeLimitExceededFault "LimitExceededFault" +// Returned by any operation if a system imposed limitation has been reached. +// To address this fault you should either clean up unused resources or increase +// the limit by contacting AWS. +// +// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" +// Returned when the caller doesn't have sufficient permissions to invoke the +// action. +// +func (c *SWF) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SWF) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListWorkflowTypes = "ListWorkflowTypes" // ListWorkflowTypesRequest generates a "aws/request.Request" representing the @@ -2105,7 +2178,7 @@ func (c *SWF) ListWorkflowTypesRequest(input *ListWorkflowTypesInput) (req *requ // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2157,7 +2230,7 @@ func (c *SWF) ListWorkflowTypesWithContext(ctx aws.Context, input *ListWorkflowT // // Example iterating over at most 3 pages of a ListWorkflowTypes operation. // pageNum := 0 // err := client.ListWorkflowTypesPages(params, -// func(page *ListWorkflowTypesOutput, lastPage bool) bool { +// func(page *swf.ListWorkflowTypesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2189,10 +2262,12 @@ func (c *SWF) ListWorkflowTypesPagesWithContext(ctx aws.Context, input *ListWork }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListWorkflowTypesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListWorkflowTypesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2268,7 +2343,7 @@ func (c *SWF) PollForActivityTaskRequest(input *PollForActivityTaskInput) (req * // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2402,7 +2477,7 @@ func (c *SWF) PollForDecisionTaskRequest(input *PollForDecisionTaskInput) (req * // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2459,7 +2534,7 @@ func (c *SWF) PollForDecisionTaskWithContext(ctx aws.Context, input *PollForDeci // // Example iterating over at most 3 pages of a PollForDecisionTask operation. // pageNum := 0 // err := client.PollForDecisionTaskPages(params, -// func(page *PollForDecisionTaskOutput, lastPage bool) bool { +// func(page *swf.PollForDecisionTaskOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -2491,10 +2566,12 @@ func (c *SWF) PollForDecisionTaskPagesWithContext(ctx aws.Context, input *PollFo }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*PollForDecisionTaskOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*PollForDecisionTaskOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2583,7 +2660,7 @@ func (c *SWF) RecordActivityTaskHeartbeatRequest(input *RecordActivityTaskHeartb // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2685,19 +2762,15 @@ func (c *SWF) RegisterActivityTypeRequest(input *RegisterActivityTypeInput) (req // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name. -// -// name: String constraint. The key is swf:name. -// -// version: String constraint. The key is swf:version. +// the appropriate keys. defaultTaskList.name: String constraint. The key +// is swf:defaultTaskList.name. name: String constraint. The key is swf:name. +// version: String constraint. The key is swf:version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2709,10 +2782,9 @@ func (c *SWF) RegisterActivityTypeRequest(input *RegisterActivityTypeInput) (req // // Returned Error Codes: // * ErrCodeTypeAlreadyExistsFault "TypeAlreadyExistsFault" -// Returned if the type already exists in the specified domain. You get this -// fault even if the existing type is in deprecated status. You can specify -// another version if the intent is to create a new distinct version of the -// type. +// Returned if the type already exists in the specified domain. You may get +// this fault if you are registering a type that is either already registered +// or deprecated, or if you undeprecate a type that is currently registered. // // * ErrCodeLimitExceededFault "LimitExceededFault" // Returned by any operation if a system imposed limitation has been reached. @@ -2811,7 +2883,7 @@ func (c *SWF) RegisterDomainRequest(input *RegisterDomainInput) (req *request.Re // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2823,8 +2895,9 @@ func (c *SWF) RegisterDomainRequest(input *RegisterDomainInput) (req *request.Re // // Returned Error Codes: // * ErrCodeDomainAlreadyExistsFault "DomainAlreadyExistsFault" -// Returned if the specified domain already exists. You get this fault even -// if the existing domain is in deprecated status. +// Returned if the domain already exists. You may get this fault if you are +// registering a domain that is either already registered or deprecated, or +// if you undeprecate a domain that is currently registered. // // * ErrCodeLimitExceededFault "LimitExceededFault" // Returned by any operation if a system imposed limitation has been reached. @@ -2835,6 +2908,9 @@ func (c *SWF) RegisterDomainRequest(input *RegisterDomainInput) (req *request.Re // Returned when the caller doesn't have sufficient permissions to invoke the // action. // +// * ErrCodeTooManyTagsFault "TooManyTagsFault" +// You've exceeded the number of tags allowed for a domain. +// func (c *SWF) RegisterDomain(input *RegisterDomainInput) (*RegisterDomainOutput, error) { req, out := c.RegisterDomainRequest(input) return out, req.Send() @@ -2920,19 +2996,15 @@ func (c *SWF) RegisterWorkflowTypeRequest(input *RegisterWorkflowTypeInput) (req // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name. -// -// name: String constraint. The key is swf:name. -// -// version: String constraint. The key is swf:version. +// the appropriate keys. defaultTaskList.name: String constraint. The key +// is swf:defaultTaskList.name. name: String constraint. The key is swf:name. +// version: String constraint. The key is swf:version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2944,10 +3016,9 @@ func (c *SWF) RegisterWorkflowTypeRequest(input *RegisterWorkflowTypeInput) (req // // Returned Error Codes: // * ErrCodeTypeAlreadyExistsFault "TypeAlreadyExistsFault" -// Returned if the type already exists in the specified domain. You get this -// fault even if the existing type is in deprecated status. You can specify -// another version if the intent is to create a new distinct version of the -// type. +// Returned if the type already exists in the specified domain. You may get +// this fault if you are registering a type that is either already registered +// or deprecated, or if you undeprecate a type that is currently registered. // // * ErrCodeLimitExceededFault "LimitExceededFault" // Returned by any operation if a system imposed limitation has been reached. @@ -3056,7 +3127,7 @@ func (c *SWF) RequestCancelWorkflowExecutionRequest(input *RequestCancelWorkflow // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3154,7 +3225,7 @@ func (c *SWF) RespondActivityTaskCanceledRequest(input *RespondActivityTaskCance // closed. Therefore a task is reported as open while a worker is processing // it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, // RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed -// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). +// out (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). // // Access Control // @@ -3172,7 +3243,7 @@ func (c *SWF) RespondActivityTaskCanceledRequest(input *RespondActivityTaskCance // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3269,7 +3340,7 @@ func (c *SWF) RespondActivityTaskCompletedRequest(input *RespondActivityTaskComp // closed. Therefore a task is reported as open while a worker is processing // it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, // RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed -// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). +// out (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). // // Access Control // @@ -3287,7 +3358,7 @@ func (c *SWF) RespondActivityTaskCompletedRequest(input *RespondActivityTaskComp // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3379,7 +3450,7 @@ func (c *SWF) RespondActivityTaskFailedRequest(input *RespondActivityTaskFailedI // closed. Therefore a task is reported as open while a worker is processing // it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, // RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed -// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). +// out (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). // // Access Control // @@ -3397,7 +3468,7 @@ func (c *SWF) RespondActivityTaskFailedRequest(input *RespondActivityTaskFailedI // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3496,7 +3567,7 @@ func (c *SWF) RespondDecisionTaskCompletedRequest(input *RespondDecisionTaskComp // call. To allow for policies to be as readable as possible, you can express // permissions on decisions as if they were actual API calls, including applying // conditions to some parameters. For more information, see Using IAM to Manage -// Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3607,7 +3678,7 @@ func (c *SWF) SignalWorkflowExecutionRequest(input *SignalWorkflowExecutionInput // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3706,29 +3777,19 @@ func (c *SWF) StartWorkflowExecutionRequest(input *StartWorkflowExecutionInput) // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagList.member.0: The key is swf:tagList.member.0. -// -// tagList.member.1: The key is swf:tagList.member.1. -// -// tagList.member.2: The key is swf:tagList.member.2. -// -// tagList.member.3: The key is swf:tagList.member.3. -// -// tagList.member.4: The key is swf:tagList.member.4. -// -// taskList: String constraint. The key is swf:taskList.name. -// -// workflowType.name: String constraint. The key is swf:workflowType.name. -// -// workflowType.version: String constraint. The key is swf:workflowType.version. +// the appropriate keys. tagList.member.0: The key is swf:tagList.member.0. +// tagList.member.1: The key is swf:tagList.member.1. tagList.member.2: The +// key is swf:tagList.member.2. tagList.member.3: The key is swf:tagList.member.3. +// tagList.member.4: The key is swf:tagList.member.4. taskList: String constraint. +// The key is swf:taskList.name. workflowType.name: String constraint. The +// key is swf:workflowType.name. workflowType.version: String constraint. +// The key is swf:workflowType.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3794,6 +3855,99 @@ func (c *SWF) StartWorkflowExecutionWithContext(ctx aws.Context, input *StartWor return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *SWF) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon Simple Workflow Service. +// +// Add a tag to a Amazon SWF domain. +// +// Amazon SWF supports a maximum of 50 tags per resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Workflow Service's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnknownResourceFault "UnknownResourceFault" +// Returned when the named resource cannot be found with in the scope of this +// operation (region or domain). This could happen if the named resource was +// never created or is no longer available for this operation. +// +// * ErrCodeTooManyTagsFault "TooManyTagsFault" +// You've exceeded the number of tags allowed for a domain. +// +// * ErrCodeLimitExceededFault "LimitExceededFault" +// Returned by any operation if a system imposed limitation has been reached. +// To address this fault you should either clean up unused resources or increase +// the limit by contacting AWS. +// +// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" +// Returned when the caller doesn't have sufficient permissions to invoke the +// action. +// +func (c *SWF) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SWF) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTerminateWorkflowExecution = "TerminateWorkflowExecution" // TerminateWorkflowExecutionRequest generates a "aws/request.Request" representing the @@ -3869,7 +4023,7 @@ func (c *SWF) TerminateWorkflowExecutionRequest(input *TerminateWorkflowExecutio // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3910,53 +4064,480 @@ func (c *SWF) TerminateWorkflowExecutionWithContext(ctx aws.Context, input *Term return out, req.Send() } -// Provides the details of the ActivityTaskCancelRequested event. -type ActivityTaskCancelRequestedEventAttributes struct { - _ struct{} `type:"structure"` - - // The unique ID of the task. - // - // ActivityId is a required field - ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` - - // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the RequestCancelActivityTask decision for this cancellation - // request. This information can be useful for diagnosing problems by tracing - // back the chain of events leading up to this event. - // - // DecisionTaskCompletedEventId is a required field - DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` -} - -// String returns the string representation -func (s ActivityTaskCancelRequestedEventAttributes) String() string { - return awsutil.Prettify(s) -} +const opUndeprecateActivityType = "UndeprecateActivityType" -// GoString returns the string representation -func (s ActivityTaskCancelRequestedEventAttributes) GoString() string { - return s.String() -} +// UndeprecateActivityTypeRequest generates a "aws/request.Request" representing the +// client's request for the UndeprecateActivityType operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UndeprecateActivityType for more information on using the UndeprecateActivityType +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UndeprecateActivityTypeRequest method. +// req, resp := client.UndeprecateActivityTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *SWF) UndeprecateActivityTypeRequest(input *UndeprecateActivityTypeInput) (req *request.Request, output *UndeprecateActivityTypeOutput) { + op := &request.Operation{ + Name: opUndeprecateActivityType, + HTTPMethod: "POST", + HTTPPath: "/", + } -// SetActivityId sets the ActivityId field's value. -func (s *ActivityTaskCancelRequestedEventAttributes) SetActivityId(v string) *ActivityTaskCancelRequestedEventAttributes { - s.ActivityId = &v - return s -} + if input == nil { + input = &UndeprecateActivityTypeInput{} + } -// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. -func (s *ActivityTaskCancelRequestedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *ActivityTaskCancelRequestedEventAttributes { - s.DecisionTaskCompletedEventId = &v - return s + output = &UndeprecateActivityTypeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return } -// Provides the details of the ActivityTaskCanceled event. -type ActivityTaskCanceledEventAttributes struct { - _ struct{} `type:"structure"` - - // Details of the cancellation. - Details *string `locationName:"details" type:"string"` - +// UndeprecateActivityType API operation for Amazon Simple Workflow Service. +// +// Undeprecates a previously deprecated activity type. After an activity type +// has been undeprecated, you can create new tasks of that activity type. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// * Use a Resource element with the domain name to limit the action to only +// specified domains. +// +// * Use an Action element to allow or deny permission to call this action. +// +// * Constrain the following parameters by using a Condition element with +// the appropriate keys. activityType.name: String constraint. The key is +// swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. +// +// If the caller doesn't have sufficient permissions to invoke the action, or +// the parameter values fall outside the specified constraints, the action fails. +// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// in the Amazon SWF Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Workflow Service's +// API operation UndeprecateActivityType for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnknownResourceFault "UnknownResourceFault" +// Returned when the named resource cannot be found with in the scope of this +// operation (region or domain). This could happen if the named resource was +// never created or is no longer available for this operation. +// +// * ErrCodeTypeAlreadyExistsFault "TypeAlreadyExistsFault" +// Returned if the type already exists in the specified domain. You may get +// this fault if you are registering a type that is either already registered +// or deprecated, or if you undeprecate a type that is currently registered. +// +// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" +// Returned when the caller doesn't have sufficient permissions to invoke the +// action. +// +func (c *SWF) UndeprecateActivityType(input *UndeprecateActivityTypeInput) (*UndeprecateActivityTypeOutput, error) { + req, out := c.UndeprecateActivityTypeRequest(input) + return out, req.Send() +} + +// UndeprecateActivityTypeWithContext is the same as UndeprecateActivityType with the addition of +// the ability to pass a context and additional request options. +// +// See UndeprecateActivityType for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SWF) UndeprecateActivityTypeWithContext(ctx aws.Context, input *UndeprecateActivityTypeInput, opts ...request.Option) (*UndeprecateActivityTypeOutput, error) { + req, out := c.UndeprecateActivityTypeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUndeprecateDomain = "UndeprecateDomain" + +// UndeprecateDomainRequest generates a "aws/request.Request" representing the +// client's request for the UndeprecateDomain operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UndeprecateDomain for more information on using the UndeprecateDomain +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UndeprecateDomainRequest method. +// req, resp := client.UndeprecateDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *SWF) UndeprecateDomainRequest(input *UndeprecateDomainInput) (req *request.Request, output *UndeprecateDomainOutput) { + op := &request.Operation{ + Name: opUndeprecateDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UndeprecateDomainInput{} + } + + output = &UndeprecateDomainOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UndeprecateDomain API operation for Amazon Simple Workflow Service. +// +// Undeprecates a previously deprecated domain. After a domain has been undeprecated +// it can be used to create new workflow executions or register new types. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// * Use a Resource element with the domain name to limit the action to only +// specified domains. +// +// * Use an Action element to allow or deny permission to call this action. +// +// * You cannot use an IAM policy to constrain this action's parameters. +// +// If the caller doesn't have sufficient permissions to invoke the action, or +// the parameter values fall outside the specified constraints, the action fails. +// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// in the Amazon SWF Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Workflow Service's +// API operation UndeprecateDomain for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnknownResourceFault "UnknownResourceFault" +// Returned when the named resource cannot be found with in the scope of this +// operation (region or domain). This could happen if the named resource was +// never created or is no longer available for this operation. +// +// * ErrCodeDomainAlreadyExistsFault "DomainAlreadyExistsFault" +// Returned if the domain already exists. You may get this fault if you are +// registering a domain that is either already registered or deprecated, or +// if you undeprecate a domain that is currently registered. +// +// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" +// Returned when the caller doesn't have sufficient permissions to invoke the +// action. +// +func (c *SWF) UndeprecateDomain(input *UndeprecateDomainInput) (*UndeprecateDomainOutput, error) { + req, out := c.UndeprecateDomainRequest(input) + return out, req.Send() +} + +// UndeprecateDomainWithContext is the same as UndeprecateDomain with the addition of +// the ability to pass a context and additional request options. +// +// See UndeprecateDomain for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SWF) UndeprecateDomainWithContext(ctx aws.Context, input *UndeprecateDomainInput, opts ...request.Option) (*UndeprecateDomainOutput, error) { + req, out := c.UndeprecateDomainRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUndeprecateWorkflowType = "UndeprecateWorkflowType" + +// UndeprecateWorkflowTypeRequest generates a "aws/request.Request" representing the +// client's request for the UndeprecateWorkflowType operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UndeprecateWorkflowType for more information on using the UndeprecateWorkflowType +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UndeprecateWorkflowTypeRequest method. +// req, resp := client.UndeprecateWorkflowTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *SWF) UndeprecateWorkflowTypeRequest(input *UndeprecateWorkflowTypeInput) (req *request.Request, output *UndeprecateWorkflowTypeOutput) { + op := &request.Operation{ + Name: opUndeprecateWorkflowType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UndeprecateWorkflowTypeInput{} + } + + output = &UndeprecateWorkflowTypeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UndeprecateWorkflowType API operation for Amazon Simple Workflow Service. +// +// Undeprecates a previously deprecated workflow type. After a workflow type +// has been undeprecated, you can create new executions of that type. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// * Use a Resource element with the domain name to limit the action to only +// specified domains. +// +// * Use an Action element to allow or deny permission to call this action. +// +// * Constrain the following parameters by using a Condition element with +// the appropriate keys. workflowType.name: String constraint. The key is +// swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. +// +// If the caller doesn't have sufficient permissions to invoke the action, or +// the parameter values fall outside the specified constraints, the action fails. +// The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// in the Amazon SWF Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Workflow Service's +// API operation UndeprecateWorkflowType for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnknownResourceFault "UnknownResourceFault" +// Returned when the named resource cannot be found with in the scope of this +// operation (region or domain). This could happen if the named resource was +// never created or is no longer available for this operation. +// +// * ErrCodeTypeAlreadyExistsFault "TypeAlreadyExistsFault" +// Returned if the type already exists in the specified domain. You may get +// this fault if you are registering a type that is either already registered +// or deprecated, or if you undeprecate a type that is currently registered. +// +// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" +// Returned when the caller doesn't have sufficient permissions to invoke the +// action. +// +func (c *SWF) UndeprecateWorkflowType(input *UndeprecateWorkflowTypeInput) (*UndeprecateWorkflowTypeOutput, error) { + req, out := c.UndeprecateWorkflowTypeRequest(input) + return out, req.Send() +} + +// UndeprecateWorkflowTypeWithContext is the same as UndeprecateWorkflowType with the addition of +// the ability to pass a context and additional request options. +// +// See UndeprecateWorkflowType for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SWF) UndeprecateWorkflowTypeWithContext(ctx aws.Context, input *UndeprecateWorkflowTypeInput, opts ...request.Option) (*UndeprecateWorkflowTypeOutput, error) { + req, out := c.UndeprecateWorkflowTypeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *SWF) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Simple Workflow Service. +// +// Remove a tag from a Amazon SWF domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Workflow Service's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnknownResourceFault "UnknownResourceFault" +// Returned when the named resource cannot be found with in the scope of this +// operation (region or domain). This could happen if the named resource was +// never created or is no longer available for this operation. +// +// * ErrCodeLimitExceededFault "LimitExceededFault" +// Returned by any operation if a system imposed limitation has been reached. +// To address this fault you should either clean up unused resources or increase +// the limit by contacting AWS. +// +// * ErrCodeOperationNotPermittedFault "OperationNotPermittedFault" +// Returned when the caller doesn't have sufficient permissions to invoke the +// action. +// +func (c *SWF) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SWF) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Provides the details of the ActivityTaskCancelRequested event. +type ActivityTaskCancelRequestedEventAttributes struct { + _ struct{} `type:"structure"` + + // The unique ID of the task. + // + // ActivityId is a required field + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RequestCancelActivityTask decision for this cancellation + // request. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + // + // DecisionTaskCompletedEventId is a required field + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskCancelRequestedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskCancelRequestedEventAttributes) GoString() string { + return s.String() +} + +// SetActivityId sets the ActivityId field's value. +func (s *ActivityTaskCancelRequestedEventAttributes) SetActivityId(v string) *ActivityTaskCancelRequestedEventAttributes { + s.ActivityId = &v + return s +} + +// SetDecisionTaskCompletedEventId sets the DecisionTaskCompletedEventId field's value. +func (s *ActivityTaskCancelRequestedEventAttributes) SetDecisionTaskCompletedEventId(v int64) *ActivityTaskCancelRequestedEventAttributes { + s.DecisionTaskCompletedEventId = &v + return s +} + +// Provides the details of the ActivityTaskCanceled event. +type ActivityTaskCanceledEventAttributes struct { + _ struct{} `type:"structure"` + + // Details of the cancellation. + Details *string `locationName:"details" type:"string"` + // If set, contains the ID of the last ActivityTaskCancelRequested event recorded // for this activity task. This information can be useful for diagnosing problems // by tracing back the chain of events leading up to this event. @@ -4177,7 +4758,7 @@ type ActivityTaskScheduledEventAttributes struct { // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. TaskPriority *string `locationName:"taskPriority" type:"string"` } @@ -4431,11 +5012,11 @@ type ActivityTypeConfiguration struct { // task must report progress by calling RecordActivityTaskHeartbeat. // // You can specify this value only when registering an activity type. The registered - // default value can be overridden when you schedule a task through the ScheduleActivityTaskDecision. - // If the activity worker subsequently attempts to record a heartbeat or returns - // a result, the activity worker receives an UnknownResource fault. In this - // case, Amazon SWF no longer considers the activity task to be valid; the activity - // worker should clean up the activity task. + // default value can be overridden when you schedule a task through the ScheduleActivityTask + // Decision. If the activity worker subsequently attempts to record a heartbeat + // or returns a result, the activity worker receives an UnknownResource fault. + // In this case, Amazon SWF no longer considers the activity task to be valid; + // the activity worker should clean up the activity task. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. @@ -4443,8 +5024,8 @@ type ActivityTypeConfiguration struct { // The default task list specified for this activity type at registration. This // default is used if a task list isn't provided when a task is scheduled through - // the ScheduleActivityTaskDecision. You can override the default registered - // task list when scheduling a task through the ScheduleActivityTaskDecision. + // the ScheduleActivityTask Decision. You can override the default registered + // task list when scheduling a task through the ScheduleActivityTask Decision. DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` // The default task priority for tasks of this activity type, specified at registration. @@ -4455,13 +5036,13 @@ type ActivityTypeConfiguration struct { // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` // The default maximum duration, specified when registering the activity type, // for tasks of this activity type. You can override this default when scheduling - // a task through the ScheduleActivityTaskDecision. + // a task through the ScheduleActivityTask Decision. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. @@ -4469,7 +5050,8 @@ type ActivityTypeConfiguration struct { // The default maximum duration, specified when registering the activity type, // that a task of an activity type can wait before being assigned to a worker. - // You can override this default when scheduling a task through the ScheduleActivityTaskDecision. + // You can override this default when scheduling a task through the ScheduleActivityTask + // Decision. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. @@ -4477,7 +5059,7 @@ type ActivityTypeConfiguration struct { // The default maximum duration for tasks of an activity type specified when // registering the activity type. You can override this default when scheduling - // a task through the ScheduleActivityTaskDecision. + // a task through the ScheduleActivityTask Decision. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. @@ -4614,7 +5196,7 @@ func (s *ActivityTypeInfo) SetStatus(v string) *ActivityTypeInfo { // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type CancelTimerDecisionAttributes struct { _ struct{} `type:"structure"` @@ -4666,7 +5248,7 @@ type CancelTimerFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -4732,7 +5314,7 @@ func (s *CancelTimerFailedEventAttributes) SetTimerId(v string) *CancelTimerFail // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type CancelWorkflowExecutionDecisionAttributes struct { _ struct{} `type:"structure"` @@ -4766,7 +5348,7 @@ type CancelWorkflowExecutionFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -4811,7 +5393,7 @@ type ChildWorkflowExecutionCanceledEventAttributes struct { Details *string `locationName:"details" type:"string"` // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. + // the StartChildWorkflowExecution Decision to start this child workflow execution. // This information can be useful for diagnosing problems by tracing back the // chain of events leading up to this event. // @@ -4881,7 +5463,7 @@ type ChildWorkflowExecutionCompletedEventAttributes struct { _ struct{} `type:"structure"` // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. + // the StartChildWorkflowExecution Decision to start this child workflow execution. // This information can be useful for diagnosing problems by tracing back the // chain of events leading up to this event. // @@ -4957,7 +5539,7 @@ type ChildWorkflowExecutionFailedEventAttributes struct { Details *string `locationName:"details" type:"string"` // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. + // the StartChildWorkflowExecution Decision to start this child workflow execution. // This information can be useful for diagnosing problems by tracing back the // chain of events leading up to this event. // @@ -5036,7 +5618,7 @@ type ChildWorkflowExecutionStartedEventAttributes struct { _ struct{} `type:"structure"` // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. + // the StartChildWorkflowExecution Decision to start this child workflow execution. // This information can be useful for diagnosing problems by tracing back the // chain of events leading up to this event. // @@ -5087,7 +5669,7 @@ type ChildWorkflowExecutionTerminatedEventAttributes struct { _ struct{} `type:"structure"` // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. + // the StartChildWorkflowExecution Decision to start this child workflow execution. // This information can be useful for diagnosing problems by tracing back the // chain of events leading up to this event. // @@ -5151,7 +5733,7 @@ type ChildWorkflowExecutionTimedOutEventAttributes struct { _ struct{} `type:"structure"` // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this child workflow execution. + // the StartChildWorkflowExecution Decision to start this child workflow execution. // This information can be useful for diagnosing problems by tracing back the // chain of events leading up to this event. // @@ -5281,7 +5863,7 @@ func (s *CloseStatusFilter) SetStatus(v string) *CloseStatusFilter { // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type CompleteWorkflowExecutionDecisionAttributes struct { _ struct{} `type:"structure"` @@ -5316,7 +5898,7 @@ type CompleteWorkflowExecutionFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -5366,19 +5948,15 @@ func (s *CompleteWorkflowExecutionFailedEventAttributes) SetDecisionTaskComplete // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tag – A tag used to identify the workflow execution -// -// taskList – String constraint. The key is swf:taskList.name. -// -// workflowType.version – String constraint. The key is swf:workflowType.version. +// the appropriate keys. tag – A tag used to identify the workflow execution +// taskList – String constraint. The key is swf:taskList.name. workflowType.version +// – String constraint. The key is swf:workflowType.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type ContinueAsNewWorkflowExecutionDecisionAttributes struct { _ struct{} `type:"structure"` @@ -5440,7 +6018,7 @@ type ContinueAsNewWorkflowExecutionDecisionAttributes struct { // Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. TaskPriority *string `locationName:"taskPriority" type:"string"` @@ -5555,7 +6133,7 @@ type ContinueAsNewWorkflowExecutionFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -5986,13 +6564,13 @@ func (s *CountPendingDecisionTasksInput) SetTaskList(v *TaskList) *CountPendingD // * CompleteWorkflowExecution – Closes the workflow execution and records // a WorkflowExecutionCompleted event in the history . // -// * ContinueAsNewWorkflowExecution – Closes the workflow execution and starts -// a new workflow execution of the same type using the same workflow ID and -// a unique run Id. A WorkflowExecutionContinuedAsNew event is recorded in -// the history. +// * ContinueAsNewWorkflowExecution – Closes the workflow execution and +// starts a new workflow execution of the same type using the same workflow +// ID and a unique run Id. A WorkflowExecutionContinuedAsNew event is recorded +// in the history. // -// * FailWorkflowExecution – Closes the workflow execution and records a -// WorkflowExecutionFailed event in the history. +// * FailWorkflowExecution – Closes the workflow execution and records +// a WorkflowExecutionFailed event in the history. // // * RecordMarker – Records a MarkerRecorded event in the history. Markers // can be used for adding custom information in the history for instance @@ -6031,7 +6609,7 @@ func (s *CountPendingDecisionTasksInput) SetTaskList(v *TaskList) *CountPendingD // this action as if they were members of the API. Treating decisions as a pseudo // API maintains a uniform conceptual model and helps keep policies readable. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Decision Failure @@ -6050,7 +6628,7 @@ func (s *CountPendingDecisionTasksInput) SetTaskList(v *TaskList) *CountPendingD // error. The event attribute's cause parameter indicates the cause. If cause // is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked // sufficient permissions. For details and example IAM policies, see Using IAM -// to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // * ScheduleActivityTaskFailed – A ScheduleActivityTask decision failed. @@ -6061,15 +6639,15 @@ func (s *CountPendingDecisionTasksInput) SetTaskList(v *TaskList) *CountPendingD // failed. This could happen if there is no open activity task with the specified // activityId. // -// * StartTimerFailed – A StartTimer decision failed. This could happen if -// there is another open timer with the same timerId. +// * StartTimerFailed – A StartTimer decision failed. This could happen +// if there is another open timer with the same timerId. // // * CancelTimerFailed – A CancelTimer decision failed. This could happen // if there is no open timer with the specified timerId. // -// * StartChildWorkflowExecutionFailed – A StartChildWorkflowExecution decision -// failed. This could happen if the workflow type specified isn't registered, -// is deprecated, or the decision isn't properly configured. +// * StartChildWorkflowExecutionFailed – A StartChildWorkflowExecution +// decision failed. This could happen if the workflow type specified isn't +// registered, is deprecated, or the decision isn't properly configured. // // * SignalExternalWorkflowExecutionFailed – A SignalExternalWorkflowExecution // decision failed. This could happen if the workflowID specified in the @@ -6079,9 +6657,9 @@ func (s *CountPendingDecisionTasksInput) SetTaskList(v *TaskList) *CountPendingD // decision failed. This could happen if the workflowID specified in the // decision was incorrect. // -// * CancelWorkflowExecutionFailed – A CancelWorkflowExecution decision failed. -// This could happen if there is an unhandled decision task pending in the -// workflow execution. +// * CancelWorkflowExecutionFailed – A CancelWorkflowExecution decision +// failed. This could happen if there is an unhandled decision task pending +// in the workflow execution. // // * CompleteWorkflowExecutionFailed – A CompleteWorkflowExecution decision // failed. This could happen if there is an unhandled decision task pending @@ -6430,7 +7008,7 @@ type DecisionTaskScheduledEventAttributes struct { // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. TaskPriority *string `locationName:"taskPriority" type:"string"` } @@ -6834,8 +7412,8 @@ type DescribeActivityTypeOutput struct { // * REGISTERED – The type is registered and available. Workers supporting // this type should be running. // - // * DEPRECATED – The type was deprecated using DeprecateActivityType, but - // is still in use. You should keep workers supporting this type running. + // * DEPRECATED – The type was deprecated using DeprecateActivityType, + // but is still in use. You should keep workers supporting this type running. // You cannot create new tasks of this type. // // TypeInfo is a required field @@ -7152,8 +7730,8 @@ type DescribeWorkflowTypeOutput struct { // * REGISTERED – The type is registered and available. Workers supporting // this type should be running. // - // * DEPRECATED – The type was deprecated using DeprecateWorkflowType, but - // is still in use. You should keep workers supporting this type running. + // * DEPRECATED – The type was deprecated using DeprecateWorkflowType, + // but is still in use. You should keep workers supporting this type running. // You cannot create new workflow executions of this type. // // TypeInfo is a required field @@ -7212,6 +7790,9 @@ func (s *DomainConfiguration) SetWorkflowExecutionRetentionPeriodInDays(v string type DomainInfo struct { _ struct{} `type:"structure"` + // The ARN of the domain. + Arn *string `locationName:"arn" min:"1" type:"string"` + // The description of the domain provided through RegisterDomain. Description *string `locationName:"description" type:"string"` @@ -7222,12 +7803,12 @@ type DomainInfo struct { // The status of the domain: // - // * REGISTERED – The domain is properly registered and available. You can - // use this domain for registering types and creating new workflow executions. - // + // * REGISTERED – The domain is properly registered and available. You + // can use this domain for registering types and creating new workflow executions. // - // * DEPRECATED – The domain was deprecated using DeprecateDomain, but is - // still in use. You should not create new workflow executions in this domain. + // * DEPRECATED – The domain was deprecated using DeprecateDomain, but + // is still in use. You should not create new workflow executions in this + // domain. // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` @@ -7243,6 +7824,12 @@ func (s DomainInfo) GoString() string { return s.String() } +// SetArn sets the Arn field's value. +func (s *DomainInfo) SetArn(v string) *DomainInfo { + s.Arn = &v + return s +} + // SetDescription sets the Description field's value. func (s *DomainInfo) SetDescription(v string) *DomainInfo { s.Description = &v @@ -7411,7 +7998,7 @@ func (s *ExternalWorkflowExecutionSignaledEventAttributes) SetWorkflowExecution( // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type FailWorkflowExecutionDecisionAttributes struct { _ struct{} `type:"structure"` @@ -7454,7 +8041,7 @@ type FailWorkflowExecutionFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -7504,18 +8091,16 @@ type GetWorkflowExecutionHistoryInput struct { // Execution is a required field Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. + // The maximum number of results that are returned per call. Use nextPageToken + // to obtain further pages of results. MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. + // If NextPageToken is returned there are more results available. The value + // of NextPageToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. Each pagination token expires after 60 seconds. Using + // an expired pagination token will return a 400 error: "Specified token has + // exceeded its maximum lifetime". // // The configured maximumPageSize determines how many results can be returned // in a single call. @@ -7634,16 +8219,16 @@ func (s *GetWorkflowExecutionHistoryOutput) SetNextPageToken(v string) *GetWorkf // Event within a workflow execution. A history event can be one of these types: // -// * ActivityTaskCancelRequested – A RequestCancelActivityTask decision was -// received by the system. +// * ActivityTaskCancelRequested – A RequestCancelActivityTask decision +// was received by the system. // // * ActivityTaskCanceled – The activity task was successfully canceled. // -// * ActivityTaskCompleted – An activity worker successfully completed an -// activity task by calling RespondActivityTaskCompleted. +// * ActivityTaskCompleted – An activity worker successfully completed +// an activity task by calling RespondActivityTaskCompleted. // -// * ActivityTaskFailed – An activity worker failed an activity task by calling -// RespondActivityTaskFailed. +// * ActivityTaskFailed – An activity worker failed an activity task by +// calling RespondActivityTaskFailed. // // * ActivityTaskScheduled – An activity task was scheduled for execution. // @@ -7665,8 +8250,8 @@ func (s *GetWorkflowExecutionHistoryOutput) SetNextPageToken(v string) *GetWorkf // * ChildWorkflowExecutionCompleted – A child workflow execution, started // by this workflow execution, completed successfully and was closed. // -// * ChildWorkflowExecutionFailed – A child workflow execution, started by -// this workflow execution, failed to complete successfully and was closed. +// * ChildWorkflowExecutionFailed – A child workflow execution, started +// by this workflow execution, failed to complete successfully and was closed. // // * ChildWorkflowExecutionStarted – A child workflow execution was successfully // started. @@ -7677,7 +8262,8 @@ func (s *GetWorkflowExecutionHistoryOutput) SetNextPageToken(v string) *GetWorkf // * ChildWorkflowExecutionTimedOut – A child workflow execution, started // by this workflow execution, timed out and was closed. // -// * CompleteWorkflowExecutionFailed – The workflow execution failed to complete. +// * CompleteWorkflowExecutionFailed – The workflow execution failed to +// complete. // // * ContinueAsNewWorkflowExecutionFailed – The workflow execution failed // to complete after being continued as a new workflow execution. @@ -7702,8 +8288,8 @@ func (s *GetWorkflowExecutionHistoryOutput) SetNextPageToken(v string) *GetWorkf // * FailWorkflowExecutionFailed – A request to mark a workflow execution // as failed, itself failed. // -// * MarkerRecorded – A marker was recorded in the workflow history as the -// result of a RecordMarker decision. +// * MarkerRecorded – A marker was recorded in the workflow history as +// the result of a RecordMarker decision. // // * RecordMarkerFailed – A RecordMarker decision was returned as failed. // @@ -7723,8 +8309,8 @@ func (s *GetWorkflowExecutionHistoryOutput) SetNextPageToken(v string) *GetWorkf // * SignalExternalWorkflowExecutionFailed – The request to signal an external // workflow execution failed. // -// * SignalExternalWorkflowExecutionInitiated – A request to signal an external -// workflow was made. +// * SignalExternalWorkflowExecutionInitiated – A request to signal an +// external workflow was made. // // * StartActivityTaskFailed – A scheduled activity task failed to start. // @@ -7732,8 +8318,8 @@ func (s *GetWorkflowExecutionHistoryOutput) SetNextPageToken(v string) *GetWorkf // decision. This happens when the decision isn't configured properly, for // example the workflow type specified isn't registered. // -// * StartChildWorkflowExecutionInitiated – A request was made to start a -// child workflow execution. +// * StartChildWorkflowExecutionInitiated – A request was made to start +// a child workflow execution. // // * StartTimerFailed – Failed to process StartTimer decision. This happens // when the decision isn't configured properly, for example a timer already @@ -7745,8 +8331,8 @@ func (s *GetWorkflowExecutionHistoryOutput) SetNextPageToken(v string) *GetWorkf // * TimerFired – A timer, previously started for this workflow execution, // fired. // -// * TimerStarted – A timer was started for the workflow execution due to -// a StartTimer decision. +// * TimerStarted – A timer was started for the workflow execution due +// to a StartTimer decision. // // * WorkflowExecutionCancelRequested – A request to cancel this workflow // execution was made. @@ -7754,8 +8340,8 @@ func (s *GetWorkflowExecutionHistoryOutput) SetNextPageToken(v string) *GetWorkf // * WorkflowExecutionCanceled – The workflow execution was successfully // canceled and closed. // -// * WorkflowExecutionCompleted – The workflow execution was closed due to -// successful completion. +// * WorkflowExecutionCompleted – The workflow execution was closed due +// to successful completion. // // * WorkflowExecutionContinuedAsNew – The workflow execution was closed // and a new execution of the same type was created with the same workflowId. @@ -8674,21 +9260,19 @@ type ListActivityTypesInput struct { // Domain is a required field Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. + // The maximum number of results that are returned per call. Use nextPageToken + // to obtain further pages of results. MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` // If specified, only lists the activity types that have this name. Name *string `locationName:"name" min:"1" type:"string"` - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. + // If NextPageToken is returned there are more results available. The value + // of NextPageToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. Each pagination token expires after 60 seconds. Using + // an expired pagination token will return a 400 error: "Specified token has + // exceeded its maximum lifetime". // // The configured maximumPageSize determines how many results can be returned // in a single call. @@ -8844,18 +9428,16 @@ type ListClosedWorkflowExecutionsInput struct { // exclusive. You can specify at most one of these in a request. ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. + // The maximum number of results that are returned per call. Use nextPageToken + // to obtain further pages of results. MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. + // If NextPageToken is returned there are more results available. The value + // of NextPageToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. Each pagination token expires after 60 seconds. Using + // an expired pagination token will return a 400 error: "Specified token has + // exceeded its maximum lifetime". // // The configured maximumPageSize determines how many results can be returned // in a single call. @@ -9006,18 +9588,16 @@ func (s *ListClosedWorkflowExecutionsInput) SetTypeFilter(v *WorkflowTypeFilter) type ListDomainsInput struct { _ struct{} `type:"structure"` - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. + // The maximum number of results that are returned per call. Use nextPageToken + // to obtain further pages of results. MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. + // If NextPageToken is returned there are more results available. The value + // of NextPageToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. Each pagination token expires after 60 seconds. Using + // an expired pagination token will return a 400 error: "Specified token has + // exceeded its maximum lifetime". // // The configured maximumPageSize determines how many results can be returned // in a single call. @@ -9135,18 +9715,16 @@ type ListOpenWorkflowExecutionsInput struct { // specify at most one of these in a request. ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. + // The maximum number of results that are returned per call. Use nextPageToken + // to obtain further pages of results. MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. + // If NextPageToken is returned there are more results available. The value + // of NextPageToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. Each pagination token expires after 60 seconds. Using + // an expired pagination token will return a 400 error: "Specified token has + // exceeded its maximum lifetime". // // The configured maximumPageSize determines how many results can be returned // in a single call. @@ -9272,6 +9850,70 @@ func (s *ListOpenWorkflowExecutionsInput) SetTypeFilter(v *WorkflowTypeFilter) * return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the Amazon SWF domain. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // An array of tags associated with the domain. + Tags []*ResourceTag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*ResourceTag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + type ListWorkflowTypesInput struct { _ struct{} `type:"structure"` @@ -9280,21 +9922,19 @@ type ListWorkflowTypesInput struct { // Domain is a required field Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. - // - // This is an upper limit only; the actual number of results returned per call - // may be fewer than the specified maximum. + // The maximum number of results that are returned per call. Use nextPageToken + // to obtain further pages of results. MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` // If specified, lists the workflow type with this name. Name *string `locationName:"name" min:"1" type:"string"` - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. + // If NextPageToken is returned there are more results available. The value + // of NextPageToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. Each pagination token expires after 60 seconds. Using + // an expired pagination token will return a 400 error: "Specified token has + // exceeded its maximum lifetime". // // The configured maximumPageSize determines how many results can be returned // in a single call. @@ -9521,7 +10161,7 @@ type PollForActivityTaskInput struct { // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. // // TaskList is a required field TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` @@ -9674,18 +10314,19 @@ type PollForDecisionTaskInput struct { // arise. The form of this identity is user defined. Identity *string `locationName:"identity" type:"string"` - // The maximum number of results that are returned per call. nextPageToken can - // be used to obtain futher pages of results. The default is 1000, which is - // the maximum allowed page size. You can, however, specify a page size smaller - // than the maximum. + // The maximum number of results that are returned per call. Use nextPageToken + // to obtain further pages of results. // // This is an upper limit only; the actual number of results returned per call // may be fewer than the specified maximum. MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` - // If a NextPageToken was returned by a previous call, there are more results - // available. To retrieve the next page of results, make the call again using - // the returned token in nextPageToken. Keep all other arguments unchanged. + // If NextPageToken is returned there are more results available. The value + // of NextPageToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. Each pagination token expires after 60 seconds. Using + // an expired pagination token will return a 400 error: "Specified token has + // exceeded its maximum lifetime". // // The configured maximumPageSize determines how many results can be returned // in a single call. @@ -9704,7 +10345,7 @@ type PollForDecisionTaskInput struct { // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. // // TaskList is a required field TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` @@ -9978,7 +10619,7 @@ func (s *RecordActivityTaskHeartbeatOutput) SetCancelRequested(v bool) *RecordAc // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type RecordMarkerDecisionAttributes struct { _ struct{} `type:"structure"` @@ -10039,7 +10680,7 @@ type RecordMarkerFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -10094,7 +10735,7 @@ type RegisterActivityTypeInput struct { // a task of this type must report progress by calling RecordActivityTaskHeartbeat. // If the timeout is exceeded, the activity task is automatically timed out. // This default can be overridden when scheduling an activity task using the - // ScheduleActivityTaskDecision. If the activity worker subsequently attempts + // ScheduleActivityTask Decision. If the activity worker subsequently attempts // to record a heartbeat or returns a result, the activity worker receives an // UnknownResource fault. In this case, Amazon SWF no longer considers the activity // task to be valid; the activity worker should clean up the activity task. @@ -10105,7 +10746,7 @@ type RegisterActivityTypeInput struct { // If set, specifies the default task list to use for scheduling tasks of this // activity type. This default task list is used if a task list isn't provided - // when a task is scheduled through the ScheduleActivityTaskDecision. + // when a task is scheduled through the ScheduleActivityTask Decision. DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` // The default task priority to assign to the activity type. If not assigned, @@ -10114,13 +10755,13 @@ type RegisterActivityTypeInput struct { // higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the in the Amazon SWF Developer Guide.. DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` // If set, specifies the default maximum duration for a task of this activity // type. This default can be overridden when scheduling an activity task using - // the ScheduleActivityTaskDecision. + // the ScheduleActivityTask Decision. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. @@ -10128,7 +10769,7 @@ type RegisterActivityTypeInput struct { // If set, specifies the default maximum duration that a task of this activity // type can wait before being assigned to a worker. This default can be overridden - // when scheduling an activity task using the ScheduleActivityTaskDecision. + // when scheduling an activity task using the ScheduleActivityTask Decision. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. @@ -10136,7 +10777,7 @@ type RegisterActivityTypeInput struct { // If set, specifies the default maximum duration that a worker can take to // process tasks of this activity type. This default can be overridden when - // scheduling an activity task using the ScheduleActivityTaskDecision. + // scheduling an activity task using the ScheduleActivityTask Decision. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. @@ -10154,7 +10795,7 @@ type RegisterActivityTypeInput struct { // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -10166,7 +10807,7 @@ type RegisterActivityTypeInput struct { // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. // // Version is a required field Version *string `locationName:"version" min:"1" type:"string" required:"true"` @@ -10300,11 +10941,17 @@ type RegisterDomainInput struct { // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` + // Tags to be added when registering a domain. + // + // Tags may only contain unicode letters, digits, whitespace, or these symbols: + // _ . : / = + - @. + Tags []*ResourceTag `locationName:"tags" type:"list"` + // The duration (in days) that records and histories of workflow executions // on the domain should be kept by the service. After the retention period, // the workflow execution isn't available in the results of visibility calls. @@ -10314,7 +10961,7 @@ type RegisterDomainInput struct { // record and its history are deleted. // // The maximum workflow execution retention period is 90 days. For more information - // about Amazon SWF service limits, see: Amazon SWF Service Limits (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-limits.html) + // about Amazon SWF service limits, see: Amazon SWF Service Limits (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-limits.html) // in the Amazon SWF Developer Guide. // // WorkflowExecutionRetentionPeriodInDays is a required field @@ -10346,6 +10993,16 @@ func (s *RegisterDomainInput) Validate() error { if s.WorkflowExecutionRetentionPeriodInDays != nil && len(*s.WorkflowExecutionRetentionPeriodInDays) < 1 { invalidParams.Add(request.NewErrParamMinLen("WorkflowExecutionRetentionPeriodInDays", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -10365,6 +11022,12 @@ func (s *RegisterDomainInput) SetName(v string) *RegisterDomainInput { return s } +// SetTags sets the Tags field's value. +func (s *RegisterDomainInput) SetTags(v []*ResourceTag) *RegisterDomainInput { + s.Tags = v + return s +} + // SetWorkflowExecutionRetentionPeriodInDays sets the WorkflowExecutionRetentionPeriodInDays field's value. func (s *RegisterDomainInput) SetWorkflowExecutionRetentionPeriodInDays(v string) *RegisterDomainInput { s.WorkflowExecutionRetentionPeriodInDays = &v @@ -10392,7 +11055,7 @@ type RegisterWorkflowTypeInput struct { // when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution // action explicitly or due to an expired timeout. This default can be overridden // when starting a workflow execution using the StartWorkflowExecution action - // or the StartChildWorkflowExecutionDecision. + // or the StartChildWorkflowExecution Decision. // // The supported child policies are: // @@ -10408,7 +11071,7 @@ type RegisterWorkflowTypeInput struct { // If set, specifies the default maximum duration for executions of this workflow // type. You can override this default when starting an execution through the - // StartWorkflowExecution Action or StartChildWorkflowExecutionDecision. + // StartWorkflowExecution Action or StartChildWorkflowExecution Decision. // // The duration is specified in seconds; an integer greater than or equal to // 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot @@ -10421,15 +11084,15 @@ type RegisterWorkflowTypeInput struct { // // Executions of this workflow type need IAM roles to invoke Lambda functions. // If you don't specify an IAM role when you start this workflow type, the default - // Lambda role is attached to the execution. For more information, see http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) + // Lambda role is attached to the execution. For more information, see https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) // in the Amazon SWF Developer Guide. DefaultLambdaRole *string `locationName:"defaultLambdaRole" min:"1" type:"string"` // If set, specifies the default task list to use for scheduling decision tasks // for executions of this workflow type. This default is used only if a task // list isn't provided when starting the execution through the StartWorkflowExecution - // Action or StartChildWorkflowExecutionDecision. + // Action or StartChildWorkflowExecution Decision. DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` // The default task priority to assign to the workflow type. If not assigned, @@ -10438,13 +11101,14 @@ type RegisterWorkflowTypeInput struct { // higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` // If set, specifies the default maximum duration of decision tasks for this // workflow type. This default can be overridden when starting a workflow execution - // using the StartWorkflowExecution action or the StartChildWorkflowExecutionDecision. + // using the StartWorkflowExecution action or the StartChildWorkflowExecution + // Decision. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. @@ -10462,7 +11126,7 @@ type RegisterWorkflowTypeInput struct { // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -10475,7 +11139,7 @@ type RegisterWorkflowTypeInput struct { // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. // // Version is a required field Version *string `locationName:"version" min:"1" type:"string" required:"true"` @@ -10619,7 +11283,7 @@ func (s RegisterWorkflowTypeOutput) GoString() string { // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type RequestCancelActivityTaskDecisionAttributes struct { _ struct{} `type:"structure"` @@ -10676,7 +11340,7 @@ type RequestCancelActivityTaskFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -10737,7 +11401,7 @@ func (s *RequestCancelActivityTaskFailedEventAttributes) SetDecisionTaskComplete // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type RequestCancelExternalWorkflowExecutionDecisionAttributes struct { _ struct{} `type:"structure"` @@ -10809,7 +11473,7 @@ type RequestCancelExternalWorkflowExecutionFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -11032,6 +11696,61 @@ func (s RequestCancelWorkflowExecutionOutput) GoString() string { return s.String() } +// Tags are key-value pairs that can be associated with Amazon SWF state machines +// and activities. +// +// Tags may only contain unicode letters, digits, whitespace, or these symbols: +// _ . : / = + - @. +type ResourceTag struct { + _ struct{} `type:"structure"` + + // The key of a tag. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The value of a tag. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ResourceTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceTag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceTag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ResourceTag) SetKey(v string) *ResourceTag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ResourceTag) SetValue(v string) *ResourceTag { + s.Value = &v + return s +} + type RespondActivityTaskCanceledInput struct { _ struct{} `type:"structure"` @@ -11348,19 +12067,16 @@ func (s RespondDecisionTaskCompletedOutput) GoString() string { // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// activityType.name – String constraint. The key is swf:activityType.name. -// -// activityType.version – String constraint. The key is swf:activityType.version. -// -// taskList – String constraint. The key is swf:taskList.name. +// the appropriate keys. activityType.name – String constraint. The key +// is swf:activityType.name. activityType.version – String constraint. +// The key is swf:activityType.version. taskList – String constraint. The +// key is swf:taskList.name. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type ScheduleActivityTaskDecisionAttributes struct { _ struct{} `type:"structure"` @@ -11455,7 +12171,7 @@ type ScheduleActivityTaskDecisionAttributes struct { // Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. TaskPriority *string `locationName:"taskPriority" type:"string"` } @@ -11578,7 +12294,7 @@ type ScheduleActivityTaskFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -11727,7 +12443,7 @@ type ScheduleLambdaFunctionFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -11803,7 +12519,7 @@ func (s *ScheduleLambdaFunctionFailedEventAttributes) SetName(v string) *Schedul // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type SignalExternalWorkflowExecutionDecisionAttributes struct { _ struct{} `type:"structure"` @@ -11902,7 +12618,7 @@ type SignalExternalWorkflowExecutionFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -12185,22 +12901,17 @@ func (s SignalWorkflowExecutionOutput) GoString() string { // * Use an Action element to allow or deny permission to call this action. // // * Constrain the following parameters by using a Condition element with -// the appropriate keys. -// -// tagList.member.N – The key is "swf:tagList.N" where N is the tag number from -// 0 to 4, inclusive. -// -// taskList – String constraint. The key is swf:taskList.name. -// -// workflowType.name – String constraint. The key is swf:workflowType.name. -// -// workflowType.version – String constraint. The key is swf:workflowType.version. +// the appropriate keys. tagList.member.N – The key is "swf:tagList.N" +// where N is the tag number from 0 to 4, inclusive. taskList – String +// constraint. The key is swf:taskList.name. workflowType.name – String +// constraint. The key is swf:workflowType.name. workflowType.version – +// String constraint. The key is swf:workflowType.version. // // If the caller doesn't have sufficient permissions to invoke the action, or // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type StartChildWorkflowExecutionDecisionAttributes struct { _ struct{} `type:"structure"` @@ -12275,7 +12986,7 @@ type StartChildWorkflowExecutionDecisionAttributes struct { // Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. TaskPriority *string `locationName:"taskPriority" type:"string"` @@ -12424,7 +13135,7 @@ type StartChildWorkflowExecutionFailedEventAttributes struct { // // When cause is set to OPERATION_NOT_PERMITTED, the decision fails because // it lacks sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -12435,7 +13146,7 @@ type StartChildWorkflowExecutionFailedEventAttributes struct { Control *string `locationName:"control" type:"string"` // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the StartChildWorkflowExecutionDecision to request this + // that resulted in the StartChildWorkflowExecution Decision to request this // child workflow execution. This information can be useful for diagnosing problems // by tracing back the chain of events. // @@ -12443,9 +13154,10 @@ type StartChildWorkflowExecutionFailedEventAttributes struct { DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` // When the cause is WORKFLOW_ALREADY_RUNNING, initiatedEventId is the ID of - // the StartChildWorkflowExecutionInitiated event that corresponds to the StartChildWorkflowExecutionDecision - // to start the workflow execution. You can use this information to diagnose - // problems by tracing back the chain of events leading up to this event. + // the StartChildWorkflowExecutionInitiated event that corresponds to the StartChildWorkflowExecution + // Decision to start the workflow execution. You can use this information to + // diagnose problems by tracing back the chain of events leading up to this + // event. // // When the cause isn't WORKFLOW_ALREADY_RUNNING, initiatedEventId is set to // 0 because the StartChildWorkflowExecutionInitiated event doesn't exist. @@ -12458,7 +13170,7 @@ type StartChildWorkflowExecutionFailedEventAttributes struct { // WorkflowId is a required field WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` - // The workflow type provided in the StartChildWorkflowExecutionDecision that + // The workflow type provided in the StartChildWorkflowExecution Decision that // failed. // // WorkflowType is a required field @@ -12538,7 +13250,7 @@ type StartChildWorkflowExecutionInitiatedEventAttributes struct { Control *string `locationName:"control" type:"string"` // The ID of the DecisionTaskCompleted event corresponding to the decision task - // that resulted in the StartChildWorkflowExecutionDecision to request this + // that resulted in the StartChildWorkflowExecution Decision to request this // child workflow execution. This information can be useful for diagnosing problems // by tracing back the cause of events. // @@ -12572,7 +13284,7 @@ type StartChildWorkflowExecutionInitiatedEventAttributes struct { // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. TaskPriority *string `locationName:"taskPriority" type:"string"` @@ -12685,7 +13397,7 @@ type StartLambdaFunctionFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because the // IAM role attached to the execution lacked sufficient permissions. For details - // and example IAM policies, see Lambda Tasks (http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) + // and example IAM policies, see Lambda Tasks (https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) // in the Amazon SWF Developer Guide. Cause *string `locationName:"cause" type:"string" enum:"StartLambdaFunctionFailedCause"` @@ -12744,7 +13456,7 @@ func (s *StartLambdaFunctionFailedEventAttributes) SetScheduledEventId(v int64) // the parameter values fall outside the specified constraints, the action fails. // The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. // For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) +// SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type StartTimerDecisionAttributes struct { _ struct{} `type:"structure"` @@ -12830,7 +13542,7 @@ type StartTimerFailedEventAttributes struct { // // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it // lacked sufficient permissions. For details and example IAM policies, see - // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) + // Using IAM to Manage Access to Amazon SWF Workflows (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // // Cause is a required field @@ -12933,8 +13645,8 @@ type StartWorkflowExecutionInput struct { // Executions of this workflow type need IAM roles to invoke Lambda functions. // If you don't attach an IAM role, any attempt to schedule a Lambda task fails. // This results in a ScheduleLambdaFunctionFailed history event. For more information, - // see http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) + // see https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) // in the Amazon SWF Developer Guide. LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` @@ -12955,7 +13667,7 @@ type StartWorkflowExecutionInput struct { // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. TaskList *TaskList `locationName:"taskList" type:"structure"` // The task priority to use for this workflow execution. This overrides any @@ -12965,7 +13677,7 @@ type StartWorkflowExecutionInput struct { // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. TaskPriority *string `locationName:"taskPriority" type:"string"` @@ -12986,11 +13698,11 @@ type StartWorkflowExecutionInput struct { // use this to associate a custom identifier with the workflow execution. You // may specify the same identifier if a workflow execution is logically a restart // of a previous execution. You cannot have two open workflow executions with - // the same workflowId at the same time. + // the same workflowId at the same time within the same domain. // // The specified string must not start or end with whitespace. It must not contain // a : (colon), / (slash), | (vertical bar), or any control characters (\u0000-\u001f - // | \u007f-\u009f). Also, it must not contain the literal string arn. + // | \u007f-\u009f). Also, it must not be the literal string arn. // // WorkflowId is a required field WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` @@ -13140,32 +13852,97 @@ func (s *StartWorkflowExecutionOutput) SetRunId(v string) *StartWorkflowExecutio return s } -// Used to filter the workflow executions in visibility APIs based on a tag. -type TagFilter struct { +// Used to filter the workflow executions in visibility APIs based on a tag. +type TagFilter struct { + _ struct{} `type:"structure"` + + // Specifies the tag that must be associated with the execution for it to meet + // the filter criteria. + // + // Tags may only contain unicode letters, digits, whitespace, or these symbols: + // _ . : / = + - @. + // + // Tag is a required field + Tag *string `locationName:"tag" type:"string" required:"true"` +} + +// String returns the string representation +func (s TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagFilter"} + if s.Tag == nil { + invalidParams.Add(request.NewErrParamRequired("Tag")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTag sets the Tag field's value. +func (s *TagFilter) SetTag(v string) *TagFilter { + s.Tag = &v + return s +} + +type TagResourceInput struct { _ struct{} `type:"structure"` - // Specifies the tag that must be associated with the execution for it to meet - // the filter criteria. + // The Amazon Resource Name (ARN) for the Amazon SWF domain. // - // Tag is a required field - Tag *string `locationName:"tag" type:"string" required:"true"` + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // The list of tags to add to a domain. + // + // Tags may only contain unicode letters, digits, whitespace, or these symbols: + // _ . : / = + - @. + // + // Tags is a required field + Tags []*ResourceTag `locationName:"tags" type:"list" required:"true"` } // String returns the string representation -func (s TagFilter) String() string { +func (s TagResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TagFilter) GoString() string { +func (s TagResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TagFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagFilter"} - if s.Tag == nil { - invalidParams.Add(request.NewErrParamRequired("Tag")) +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -13174,12 +13951,32 @@ func (s *TagFilter) Validate() error { return nil } -// SetTag sets the Tag field's value. -func (s *TagFilter) SetTag(v string) *TagFilter { - s.Tag = &v +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*ResourceTag) *TagResourceInput { + s.Tags = v return s } +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // Represents a task list. type TaskList struct { _ struct{} `type:"structure"` @@ -13505,6 +14302,278 @@ func (s *TimerStartedEventAttributes) SetTimerId(v string) *TimerStartedEventAtt return s } +type UndeprecateActivityTypeInput struct { + _ struct{} `type:"structure"` + + // The activity type to undeprecate. + // + // ActivityType is a required field + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The name of the domain of the deprecated activity type. + // + // Domain is a required field + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UndeprecateActivityTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UndeprecateActivityTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UndeprecateActivityTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UndeprecateActivityTypeInput"} + if s.ActivityType == nil { + invalidParams.Add(request.NewErrParamRequired("ActivityType")) + } + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.ActivityType != nil { + if err := s.ActivityType.Validate(); err != nil { + invalidParams.AddNested("ActivityType", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActivityType sets the ActivityType field's value. +func (s *UndeprecateActivityTypeInput) SetActivityType(v *ActivityType) *UndeprecateActivityTypeInput { + s.ActivityType = v + return s +} + +// SetDomain sets the Domain field's value. +func (s *UndeprecateActivityTypeInput) SetDomain(v string) *UndeprecateActivityTypeInput { + s.Domain = &v + return s +} + +type UndeprecateActivityTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UndeprecateActivityTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UndeprecateActivityTypeOutput) GoString() string { + return s.String() +} + +type UndeprecateDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain of the deprecated workflow type. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UndeprecateDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UndeprecateDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UndeprecateDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UndeprecateDomainInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *UndeprecateDomainInput) SetName(v string) *UndeprecateDomainInput { + s.Name = &v + return s +} + +type UndeprecateDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UndeprecateDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UndeprecateDomainOutput) GoString() string { + return s.String() +} + +type UndeprecateWorkflowTypeInput struct { + _ struct{} `type:"structure"` + + // The name of the domain of the deprecated workflow type. + // + // Domain is a required field + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The name of the domain of the deprecated workflow type. + // + // WorkflowType is a required field + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UndeprecateWorkflowTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UndeprecateWorkflowTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UndeprecateWorkflowTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UndeprecateWorkflowTypeInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.WorkflowType == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowType")) + } + if s.WorkflowType != nil { + if err := s.WorkflowType.Validate(); err != nil { + invalidParams.AddNested("WorkflowType", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomain sets the Domain field's value. +func (s *UndeprecateWorkflowTypeInput) SetDomain(v string) *UndeprecateWorkflowTypeInput { + s.Domain = &v + return s +} + +// SetWorkflowType sets the WorkflowType field's value. +func (s *UndeprecateWorkflowTypeInput) SetWorkflowType(v *WorkflowType) *UndeprecateWorkflowTypeInput { + s.WorkflowType = v + return s +} + +type UndeprecateWorkflowTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UndeprecateWorkflowTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UndeprecateWorkflowTypeOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the Amazon SWF domain. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // The list of tags to remove from the Amazon SWF domain. + // + // TagKeys is a required field + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // Represents a workflow execution. type WorkflowExecution struct { _ struct{} `type:"structure"` @@ -13734,7 +14803,7 @@ type WorkflowExecutionConfiguration struct { // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. TaskPriority *string `locationName:"taskPriority" type:"string"` @@ -14085,8 +15154,8 @@ type WorkflowExecutionInfo struct { // // * FAILED – the execution failed to complete. // - // * TIMED_OUT – the execution did not complete in the alloted time and was - // automatically timed out. + // * TIMED_OUT – the execution did not complete in the alloted time and + // was automatically timed out. // // * CONTINUED_AS_NEW – the execution is logically continued. This means // the current execution was completed and a new execution was started to @@ -14403,7 +15472,7 @@ type WorkflowExecutionStartedEventAttributes struct { LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` // The ID of the StartChildWorkflowExecutionInitiated event corresponding to - // the StartChildWorkflowExecutionDecision to start this workflow execution. + // the StartChildWorkflowExecution Decision to start this workflow execution. // The source event with this ID can be found in the history of the source workflow // execution. This information can be useful for diagnosing problems by tracing // back the chain of events leading up to this event. @@ -14707,7 +15776,7 @@ type WorkflowTypeConfiguration struct { // execution of this type is terminated, by calling the TerminateWorkflowExecution // action explicitly or due to an expired timeout. This default can be overridden // when starting a workflow execution using the StartWorkflowExecution action - // or the StartChildWorkflowExecutionDecision. + // or the StartChildWorkflowExecution Decision. // // The supported child policies are: // @@ -14724,7 +15793,7 @@ type WorkflowTypeConfiguration struct { // The default maximum duration, specified when registering the workflow type, // for executions of this workflow type. This default can be overridden when // starting a workflow execution using the StartWorkflowExecution action or - // the StartChildWorkflowExecutionDecision. + // the StartChildWorkflowExecution Decision. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. @@ -14734,15 +15803,15 @@ type WorkflowTypeConfiguration struct { // // Executions of this workflow type need IAM roles to invoke Lambda functions. // If you don't specify an IAM role when starting this workflow type, the default - // Lambda role is attached to the execution. For more information, see http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) + // Lambda role is attached to the execution. For more information, see https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html) // in the Amazon SWF Developer Guide. DefaultLambdaRole *string `locationName:"defaultLambdaRole" min:"1" type:"string"` // The default task list, specified when registering the workflow type, for // decisions tasks scheduled for workflow executions of this type. This default // can be overridden when starting a workflow execution using the StartWorkflowExecution - // action or the StartChildWorkflowExecutionDecision. + // action or the StartChildWorkflowExecution Decision. DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` // The default task priority, specified when registering the workflow type, @@ -14754,7 +15823,7 @@ type WorkflowTypeConfiguration struct { // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. // // For more information about setting task priority, see Setting Task Priority - // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) // in the Amazon SWF Developer Guide. DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` @@ -14764,7 +15833,7 @@ type WorkflowTypeConfiguration struct { // time then the task is automatically timed out and rescheduled. If the decider // eventually reports a completion or failure, it is ignored. This default can // be overridden when starting a workflow execution using the StartWorkflowExecution - // action or the StartChildWorkflowExecutionDecision. + // action or the StartChildWorkflowExecution Decision. // // The duration is specified in seconds, an integer greater than or equal to // 0. You can use NONE to specify unlimited duration. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/doc.go index bbb8f45ab5c..b1f55c0dbc7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/doc.go @@ -15,7 +15,7 @@ // progress and maintaining their state. // // This documentation serves as reference only. For a broader overview of the -// Amazon SWF programming model, see the Amazon SWF Developer Guide (http://docs.aws.amazon.com/amazonswf/latest/developerguide/). +// Amazon SWF programming model, see the Amazon SWF Developer Guide (https://docs.aws.amazon.com/amazonswf/latest/developerguide/) . // // See swf package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/swf/ diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/errors.go index 7baff0daf68..95e3d26ef00 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/errors.go @@ -23,8 +23,9 @@ const ( // ErrCodeDomainAlreadyExistsFault for service response error code // "DomainAlreadyExistsFault". // - // Returned if the specified domain already exists. You get this fault even - // if the existing domain is in deprecated status. + // Returned if the domain already exists. You may get this fault if you are + // registering a domain that is either already registered or deprecated, or + // if you undeprecate a domain that is currently registered. ErrCodeDomainAlreadyExistsFault = "DomainAlreadyExistsFault" // ErrCodeDomainDeprecatedFault for service response error code @@ -48,13 +49,18 @@ const ( // action. ErrCodeOperationNotPermittedFault = "OperationNotPermittedFault" + // ErrCodeTooManyTagsFault for service response error code + // "TooManyTagsFault". + // + // You've exceeded the number of tags allowed for a domain. + ErrCodeTooManyTagsFault = "TooManyTagsFault" + // ErrCodeTypeAlreadyExistsFault for service response error code // "TypeAlreadyExistsFault". // - // Returned if the type already exists in the specified domain. You get this - // fault even if the existing type is in deprecated status. You can specify - // another version if the intent is to create a new distinct version of the - // type. + // Returned if the type already exists in the specified domain. You may get + // this fault if you are registering a type that is either already registered + // or deprecated, or if you undeprecate a type that is currently registered. ErrCodeTypeAlreadyExistsFault = "TypeAlreadyExistsFault" // ErrCodeTypeDeprecatedFault for service response error code diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/service.go index 014d89a5241..c30e411bd0f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/swf/service.go @@ -46,11 +46,11 @@ const ( // svc := swf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SWF { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SWF { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SWF { svc := &SWF{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-01-25", JSONVersion: "1.0", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go index 063a1fa9810..8f5c78af454 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go @@ -58,11 +58,9 @@ func (c *Transfer) CreateServerRequest(input *CreateServerInput) (req *request.R // CreateServer API operation for AWS Transfer for SFTP. // // Instantiates an autoscaling virtual server based on Secure File Transfer -// Protocol (SFTP) in AWS. The call returns the ServerId property assigned by -// the service to the newly created server. Reference this ServerId property -// when you make updates to your server, or work with users. -// -// The response returns the ServerId value for the newly created server. +// Protocol (SFTP) in AWS. When you make updates to your server or when you +// work with users, use the service-generated ServerId property that is assigned +// to the newly created server. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -151,15 +149,13 @@ func (c *Transfer) CreateUserRequest(input *CreateUserInput) (req *request.Reque // CreateUser API operation for AWS Transfer for SFTP. // -// Adds a user and associate them with an existing Secure File Transfer Protocol -// (SFTP) server. Using parameters for CreateUser, you can specify the user -// name, set the home directory, store the user's public key, and assign the -// user's AWS Identity and Access Management (IAM) role. You can also optionally -// add a scope-down policy, and assign metadata with tags that can be used to -// group and search for users. -// -// The response returns the UserName and ServerId values of the new user for -// that server. +// Creates a user and associates them with an existing Secure File Transfer +// Protocol (SFTP) server. You can only create and associate users with SFTP +// servers that have the IdentityProviderType set to SERVICE_MANAGED. Using +// parameters for CreateUser, you can specify the user name, set the home directory, +// store the user's public key, and assign the user's AWS Identity and Access +// Management (IAM) role. You can also optionally add a scope-down policy, and +// assign metadata with tags that can be used to group and search for users. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -254,10 +250,8 @@ func (c *Transfer) DeleteServerRequest(input *DeleteServerInput) (req *request.R // DeleteServer API operation for AWS Transfer for SFTP. // // Deletes the Secure File Transfer Protocol (SFTP) server that you specify. -// If you used SERVICE_MANAGED as your IdentityProviderType, you need to delete -// all users associated with this server before deleting the server itself // -// No response returns from this call. +// No response returns from this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -350,7 +344,7 @@ func (c *Transfer) DeleteSshPublicKeyRequest(input *DeleteSshPublicKeyInput) (re // // Deletes a user's Secure Shell (SSH) public key. // -// No response is returned from this call. +// No response is returned from this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -374,6 +368,11 @@ func (c *Transfer) DeleteSshPublicKeyRequest(input *DeleteSshPublicKeyInput) (re // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/DeleteSshPublicKey func (c *Transfer) DeleteSshPublicKey(input *DeleteSshPublicKeyInput) (*DeleteSshPublicKeyOutput, error) { req, out := c.DeleteSshPublicKeyRequest(input) @@ -443,7 +442,7 @@ func (c *Transfer) DeleteUserRequest(input *DeleteUserInput) (req *request.Reque // // Deletes the user belonging to the server you specify. // -// No response returns from this call. +// No response returns from this operation. // // When you delete a user from a server, the user's information is lost. // @@ -752,6 +751,11 @@ func (c *Transfer) ImportSshPublicKeyRequest(input *ImportSshPublicKeyInput) (re // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/ImportSshPublicKey func (c *Transfer) ImportSshPublicKey(input *ImportSshPublicKeyInput) (*ImportSshPublicKeyOutput, error) { req, out := c.ImportSshPublicKeyRequest(input) @@ -881,7 +885,7 @@ func (c *Transfer) ListServersWithContext(ctx aws.Context, input *ListServersInp // // Example iterating over at most 3 pages of a ListServers operation. // pageNum := 0 // err := client.ListServersPages(params, -// func(page *ListServersOutput, lastPage bool) bool { +// func(page *transfer.ListServersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -913,10 +917,12 @@ func (c *Transfer) ListServersPagesWithContext(ctx aws.Context, input *ListServe }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListServersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListServersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1027,7 +1033,7 @@ func (c *Transfer) ListTagsForResourceWithContext(ctx aws.Context, input *ListTa // // Example iterating over at most 3 pages of a ListTagsForResource operation. // pageNum := 0 // err := client.ListTagsForResourcePages(params, -// func(page *ListTagsForResourceOutput, lastPage bool) bool { +// func(page *transfer.ListTagsForResourceOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1059,10 +1065,12 @@ func (c *Transfer) ListTagsForResourcePagesWithContext(ctx aws.Context, input *L }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1176,7 +1184,7 @@ func (c *Transfer) ListUsersWithContext(ctx aws.Context, input *ListUsersInput, // // Example iterating over at most 3 pages of a ListUsers operation. // pageNum := 0 // err := client.ListUsersPages(params, -// func(page *ListUsersOutput, lastPage bool) bool { +// func(page *transfer.ListUsersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1208,10 +1216,12 @@ func (c *Transfer) ListUsersPagesWithContext(ctx aws.Context, input *ListUsersIn }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListUsersOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListUsersOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1292,6 +1302,11 @@ func (c *Transfer) StartServerRequest(input *StartServerInput) (req *request.Req // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/StartServer func (c *Transfer) StartServer(input *StartServerInput) (*StartServerOutput, error) { req, out := c.StartServerRequest(input) @@ -1365,7 +1380,7 @@ func (c *Transfer) StopServerRequest(input *StopServerInput) (req *request.Reque // Stopping a server will not reduce or impact your Secure File Transfer Protocol // (SFTP) endpoint billing. // -// The states of STOPPING indicates that the server is in an intermediate state, +// The state of STOPPING indicates that the server is in an intermediate state, // either not fully able to respond, or not fully offline. The values of STOP_FAILED // can indicate an error condition. // @@ -1393,6 +1408,11 @@ func (c *Transfer) StopServerRequest(input *StopServerInput) (req *request.Reque // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/StopServer func (c *Transfer) StopServer(input *StopServerInput) (*StopServerOutput, error) { req, out := c.StopServerRequest(input) @@ -1483,6 +1503,10 @@ func (c *Transfer) TagResourceRequest(input *TagResourceInput) (req *request.Req // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// This exception is thrown when a resource is not found by the AWS Transfer +// for SFTP service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/TagResource func (c *Transfer) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { req, out := c.TagResourceRequest(input) @@ -1551,7 +1575,7 @@ func (c *Transfer) TestIdentityProviderRequest(input *TestIdentityProviderInput) // // If the IdentityProviderType of the server is API_Gateway, tests whether your // API Gateway is set up successfully. We highly recommend that you call this -// method to test your authentication method as soon as you create your server. +// operation to test your authentication method as soon as you create your server. // By doing so, you can troubleshoot issues with the API Gateway integration // to ensure that your users can successfully use the service. // @@ -1667,6 +1691,10 @@ func (c *Transfer) UntagResourceRequest(input *UntagResourceInput) (req *request // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// This exception is thrown when a resource is not found by the AWS Transfer +// for SFTP service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UntagResource func (c *Transfer) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) @@ -1756,10 +1784,18 @@ func (c *Transfer) UpdateServerRequest(input *UpdateServerInput) (req *request.R // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeResourceExistsException "ResourceExistsException" +// The requested resource does not exist. +// // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UpdateServer func (c *Transfer) UpdateServer(input *UpdateServerInput) (*UpdateServerOutput, error) { req, out := c.UpdateServerRequest(input) @@ -1854,6 +1890,11 @@ func (c *Transfer) UpdateUserRequest(input *UpdateUserInput) (req *request.Reque // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UpdateUser func (c *Transfer) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { req, out := c.UpdateUserRequest(input) @@ -1880,38 +1921,43 @@ type CreateServerInput struct { _ struct{} `type:"structure"` // The virtual private cloud (VPC) endpoint settings that you want to configure - // for your SFTP server. + // for your SFTP server. This parameter is required when you specify a value + // for the EndpointType parameter. EndpointDetails *EndpointDetails `type:"structure"` - // The type of VPC endpoint that you want your SFTP server connect to. If you - // connect to a VPC endpoint, your SFTP server isn't accessible over the public - // internet. + // The type of VPC endpoint that you want your SFTP server to connect to. If + // you connect to a VPC endpoint, your SFTP server isn't accessible over the + // public internet. EndpointType *string `type:"string" enum:"EndpointType"` - // The RSA private key as generated by ssh-keygen -N "" -f my-new-server-key + // The RSA private key as generated by the ssh-keygen -N "" -f my-new-server-key // command. // // If you aren't planning to migrate existing users from an existing SFTP server // to a new AWS SFTP server, don't update the host key. Accidentally changing - // a server's host key can be disruptive. For more information, see change-host-key + // a server's host key can be disruptive. + // + // For more information, see "https://alpha-docs-aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" // in the AWS SFTP User Guide. - HostKey *string `type:"string"` + HostKey *string `type:"string" sensitive:"true"` - // An array containing all of the information required to call a customer-supplied - // authentication API. This parameter is not required when the IdentityProviderType - // value of server that is created uses the SERVICE_MANAGED authentication method. + // This parameter is required when the IdentityProviderType is set to API_GATEWAY. + // Accepts an array containing all of the information required to call a customer-supplied + // authentication API, including the API Gateway URL. This property is not required + // when the IdentityProviderType is set to SERVICE_MANAGED. IdentityProviderDetails *IdentityProviderDetails `type:"structure"` - // The mode of authentication enabled for this service. The default value is - // SERVICE_MANAGED, which allows you to store and access SFTP user credentials - // within the service. An IdentityProviderType value of API_GATEWAY indicates - // that user authentication requires a call to an API Gateway endpoint URL provided - // by you to integrate an identity provider of your choice. + // Specifies the mode of authentication for the SFTP server. The default value + // is SERVICE_MANAGED, which allows you to store and access SFTP user credentials + // within the AWS Transfer for SFTP service. Use the API_GATEWAY value to integrate + // with an identity provider of your choosing. The API_GATEWAY setting requires + // you to provide an API Gateway endpoint URL to call for authentication using + // the IdentityProviderDetails parameter. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` // A value that allows the service to write your SFTP users' activity to your // Amazon CloudWatch logs for monitoring and auditing purposes. - LoggingRole *string `type:"string"` + LoggingRole *string `min:"20" type:"string"` // Key-value pairs that can be used to group and search for servers. Tags []*Tag `min:"1" type:"list"` @@ -1930,9 +1976,22 @@ func (s CreateServerInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateServerInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateServerInput"} + if s.LoggingRole != nil && len(*s.LoggingRole) < 20 { + invalidParams.Add(request.NewErrParamMinLen("LoggingRole", 20)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } + if s.EndpointDetails != nil { + if err := s.EndpointDetails.Validate(); err != nil { + invalidParams.AddNested("EndpointDetails", err.(request.ErrInvalidParams)) + } + } + if s.IdentityProviderDetails != nil { + if err := s.IdentityProviderDetails.Validate(); err != nil { + invalidParams.AddNested("IdentityProviderDetails", err.(request.ErrInvalidParams)) + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -1998,7 +2057,7 @@ type CreateServerOutput struct { // The service-assigned ID of the SFTP server that is created. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -2021,13 +2080,47 @@ type CreateUserInput struct { _ struct{} `type:"structure"` // The landing directory (folder) for a user when they log in to the server - // using their SFTP client. An example is /home/username. + // using their SFTP client. + // + // An example is /home/username. HomeDirectory *string `type:"string"` + // Logical directory mappings that specify what S3 paths and keys should be + // visible to your user and how you want to make them visible. You will need + // to specify the "Entry" and "Target" pair, where Entry shows how the path + // is made visible and Target is the actual S3 path. If you only specify a target, + // it will be displayed as is. You will need to also make sure that your AWS + // IAM Role provides access to paths in Target. The following is an example. + // + // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": + // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` + + // The type of landing directory (folder) you want your users' home directory + // to be when they log into the SFTP server. If you set it to PATH, the user + // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. + // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make S3 paths visible to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // A scope-down policy for your user so you can use the same IAM role across // multiple users. This policy scopes down user access to portions of their - // Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, + // Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, // ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. + // + // For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON + // blob, instead of the Amazon Resource Name (ARN) of the policy. You save the + // policy as a JSON blob and pass it in the Policy argument. + // + // For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating + // a Scope-Down Policy. + // + // For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" + // in the AWS Security Token Service API Reference. Policy *string `type:"string"` // The IAM role that controls your user's access to your Amazon S3 bucket. The @@ -2038,15 +2131,15 @@ type CreateUserInput struct { // SFTP user's transfer requests. // // Role is a required field - Role *string `type:"string" required:"true"` + Role *string `min:"20" type:"string" required:"true"` // A system-assigned unique identifier for an SFTP server instance. This is // the specific SFTP server that you added your user to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` - // The public portion of the Secure Shall (SSH) key used to authenticate the + // The public portion of the Secure Shell (SSH) key used to authenticate the // user to the SFTP server. SshPublicKeyBody *string `type:"string"` @@ -2060,7 +2153,7 @@ type CreateUserInput struct { // underscore, and hyphen. The user name can't start with a hyphen. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2076,18 +2169,40 @@ func (s CreateUserInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateUserInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateUserInput"} + if s.HomeDirectoryMappings != nil && len(s.HomeDirectoryMappings) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HomeDirectoryMappings", 1)) + } if s.Role == nil { invalidParams.Add(request.NewErrParamRequired("Role")) } + if s.Role != nil && len(*s.Role) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Role", 20)) + } if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } + if s.HomeDirectoryMappings != nil { + for i, v := range s.HomeDirectoryMappings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HomeDirectoryMappings", i), err.(request.ErrInvalidParams)) + } + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -2111,6 +2226,18 @@ func (s *CreateUserInput) SetHomeDirectory(v string) *CreateUserInput { return s } +// SetHomeDirectoryMappings sets the HomeDirectoryMappings field's value. +func (s *CreateUserInput) SetHomeDirectoryMappings(v []*HomeDirectoryMapEntry) *CreateUserInput { + s.HomeDirectoryMappings = v + return s +} + +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *CreateUserInput) SetHomeDirectoryType(v string) *CreateUserInput { + s.HomeDirectoryType = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *CreateUserInput) SetPolicy(v string) *CreateUserInput { s.Policy = &v @@ -2153,12 +2280,12 @@ type CreateUserOutput struct { // The ID of the SFTP server that the user is attached to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user account associated with an SFTP server. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2189,7 +2316,7 @@ type DeleteServerInput struct { // A unique system-assigned identifier for an SFTP server instance. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -2208,6 +2335,9 @@ func (s *DeleteServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2242,17 +2372,17 @@ type DeleteSshPublicKeyInput struct { // server instance that has the user assigned to it. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique identifier used to reference your user's specific SSH key. // // SshPublicKeyId is a required field - SshPublicKeyId *string `type:"string" required:"true"` + SshPublicKeyId *string `min:"21" type:"string" required:"true"` // A unique string that identifies a user whose public key is being deleted. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2271,12 +2401,21 @@ func (s *DeleteSshPublicKeyInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.SshPublicKeyId == nil { invalidParams.Add(request.NewErrParamRequired("SshPublicKeyId")) } + if s.SshPublicKeyId != nil && len(*s.SshPublicKeyId) < 21 { + invalidParams.Add(request.NewErrParamMinLen("SshPublicKeyId", 21)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2323,12 +2462,12 @@ type DeleteUserInput struct { // the user assigned to it. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user that is being deleted from the server. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2347,9 +2486,15 @@ func (s *DeleteUserInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2389,7 +2534,7 @@ type DescribeServerInput struct { // A system-assigned unique identifier for an SFTP server. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -2408,6 +2553,9 @@ func (s *DescribeServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2453,14 +2601,14 @@ type DescribeUserInput struct { // assigned. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The name of the user assigned to one or more servers. User names are part - // of the sign-in credentials to use the AWS Transfer service and perform file - // transfer tasks. + // of the sign-in credentials to use the AWS Transfer for SFTP service and perform + // file transfer tasks. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2479,9 +2627,15 @@ func (s *DescribeUserInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2508,7 +2662,7 @@ type DescribeUserOutput struct { // assigned. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // An array containing the properties of the user account for the ServerID value // that you specified. @@ -2539,9 +2693,10 @@ func (s *DescribeUserOutput) SetUser(v *DescribedUser) *DescribeUserOutput { return s } -// Describe the properties of the server that was specified. Information returned -// includes: the server Amazon Resource Name (ARN), the authentication configuration -// and type, the logging role, server Id and state, and assigned tags or metadata. +// Describes the properties of the server that was specified. Information returned +// includes the following: the server Amazon Resource Name (ARN), the authentication +// configuration and type, the logging role, the server ID and state, and assigned +// tags or metadata. type DescribedServer struct { _ struct{} `type:"structure"` @@ -2559,9 +2714,9 @@ type DescribedServer struct { // the public internet. EndpointType *string `type:"string" enum:"EndpointType"` - // This value contains the Message-Digest Algorithm (MD5) hash of the server's - // host key. This value is equivalent to the output of ssh-keygen -l -E md5 - // -f my-new-server-key command. + // This value contains the message-digest algorithm (MD5) hash of the server's + // host key. This value is equivalent to the output of the ssh-keygen -l -E + // md5 -f my-new-server-key command. HostKeyFingerprint *string `type:"string"` // Specifies information to call a customer-supplied authentication API. This @@ -2569,7 +2724,7 @@ type DescribedServer struct { IdentityProviderDetails *IdentityProviderDetails `type:"structure"` // This property defines the mode of authentication method enabled for this - // service. A value of SERVICE_MANAGED, means that you are using this Server + // service. A value of SERVICE_MANAGED means that you are using this server // to store and access SFTP user credentials within the service. A value of // API_GATEWAY indicates that you have integrated an API Gateway endpoint that // will be invoked for authenticating your user into the service. @@ -2577,19 +2732,19 @@ type DescribedServer struct { // This property is an AWS Identity and Access Management (IAM) entity that // allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. - // When set, user activity can be view in your CloudWatch logs. - LoggingRole *string `type:"string"` + // When set, user activity can be viewed in your CloudWatch logs. + LoggingRole *string `min:"20" type:"string"` - // This property is a unique system assigned identifier for the SFTP server + // This property is a unique system-assigned identifier for the SFTP server // that you instantiate. - ServerId *string `type:"string"` + ServerId *string `min:"19" type:"string"` // The condition of the SFTP server for the server that was described. A value // of ONLINE indicates that the server can accept jobs and transfer files. A // State value of OFFLINE means that the server cannot perform file transfer // operations. // - // The states of STARTING and STOPPING indicated that the server is in an intermediate + // The states of STARTING and STOPPING indicate that the server is in an intermediate // state, either not fully able to respond, or not fully offline. The values // of START_FAILED or STOP_FAILED can indicate an error condition. State *string `type:"string" enum:"State"` @@ -2679,7 +2834,7 @@ func (s *DescribedServer) SetUserCount(v int64) *DescribedServer { return s } -// Returns properties of the user that you wish to describe. +// Returns properties of the user that you want to describe. type DescribedUser struct { _ struct{} `type:"structure"` @@ -2689,11 +2844,34 @@ type DescribedUser struct { // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // This property specifies the landing directory (or folder) which is the location + // This property specifies the landing directory (or folder), which is the location // that files are written to or read from in an Amazon S3 bucket for the described - // user. An example would be: /bucket_name/home/username. + // user. An example is /your s3 bucket name/home/username . HomeDirectory *string `type:"string"` + // Logical directory mappings that you specified for what S3 paths and keys + // should be visible to your user and how you want to make them visible. You + // will need to specify the "Entry" and "Target" pair, where Entry shows how + // the path is made visible and Target is the actual S3 path. If you only specify + // a target, it will be displayed as is. You will need to also make sure that + // your AWS IAM Role provides access to paths in Target. + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` + + // The type of landing directory (folder) you mapped for your users' to see + // when they log into the SFTP server. If you set it to PATH, the user will + // see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you + // set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make S3 paths visible to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // Specifies the name of the policy in use for the described user. Policy *string `type:"string"` @@ -2703,7 +2881,7 @@ type DescribedUser struct { // into and out of your Amazon S3 bucket or buckets. The IAM role should also // contain a trust relationship that allows the SFTP server to access your resources // when servicing your SFTP user's transfer requests. - Role *string `type:"string"` + Role *string `min:"20" type:"string"` // This property contains the public key portion of the Secure Shell (SSH) keys // stored for the described user. @@ -2716,7 +2894,7 @@ type DescribedUser struct { // This property is the name of the user that was requested to be described. // User names are used for authentication purposes. This is the string that // will be used by your user when they log in to your SFTP server. - UserName *string `type:"string"` + UserName *string `min:"3" type:"string"` } // String returns the string representation @@ -2741,6 +2919,18 @@ func (s *DescribedUser) SetHomeDirectory(v string) *DescribedUser { return s } +// SetHomeDirectoryMappings sets the HomeDirectoryMappings field's value. +func (s *DescribedUser) SetHomeDirectoryMappings(v []*HomeDirectoryMapEntry) *DescribedUser { + s.HomeDirectoryMappings = v + return s +} + +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *DescribedUser) SetHomeDirectoryType(v string) *DescribedUser { + s.HomeDirectoryType = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *DescribedUser) SetPolicy(v string) *DescribedUser { s.Policy = &v @@ -2777,7 +2967,7 @@ type EndpointDetails struct { _ struct{} `type:"structure"` // The ID of the VPC endpoint. - VpcEndpointId *string `type:"string"` + VpcEndpointId *string `min:"22" type:"string"` } // String returns the string representation @@ -2790,23 +2980,89 @@ func (s EndpointDetails) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *EndpointDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EndpointDetails"} + if s.VpcEndpointId != nil && len(*s.VpcEndpointId) < 22 { + invalidParams.Add(request.NewErrParamMinLen("VpcEndpointId", 22)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetVpcEndpointId sets the VpcEndpointId field's value. func (s *EndpointDetails) SetVpcEndpointId(v string) *EndpointDetails { s.VpcEndpointId = &v return s } +// Represents an object that contains entries and a targets for HomeDirectoryMappings. +type HomeDirectoryMapEntry struct { + _ struct{} `type:"structure"` + + // Represents an entry and a target for HomeDirectoryMappings. + // + // Entry is a required field + Entry *string `type:"string" required:"true"` + + // Represents the map target that is used in a HomeDirectorymapEntry. + // + // Target is a required field + Target *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HomeDirectoryMapEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HomeDirectoryMapEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HomeDirectoryMapEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HomeDirectoryMapEntry"} + if s.Entry == nil { + invalidParams.Add(request.NewErrParamRequired("Entry")) + } + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntry sets the Entry field's value. +func (s *HomeDirectoryMapEntry) SetEntry(v string) *HomeDirectoryMapEntry { + s.Entry = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *HomeDirectoryMapEntry) SetTarget(v string) *HomeDirectoryMapEntry { + s.Target = &v + return s +} + // Returns information related to the type of user authentication that is in -// use for a server's users. A server can only have one method of authentication. +// use for a server's users. A server can have only one method of authentication. type IdentityProviderDetails struct { _ struct{} `type:"structure"` - // The Role parameter provides the type of InvocationRole used to authenticate - // the user account. - InvocationRole *string `type:"string"` + // The InvocationRole parameter provides the type of InvocationRole used to + // authenticate the user account. + InvocationRole *string `min:"20" type:"string"` - // The IdentityProviderDetail parameter contains the location of the service - // endpoint used to authenticate users. + // The Url parameter provides contains the location of the service endpoint + // used to authenticate users. Url *string `type:"string"` } @@ -2820,6 +3076,19 @@ func (s IdentityProviderDetails) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *IdentityProviderDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IdentityProviderDetails"} + if s.InvocationRole != nil && len(*s.InvocationRole) < 20 { + invalidParams.Add(request.NewErrParamMinLen("InvocationRole", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetInvocationRole sets the InvocationRole field's value. func (s *IdentityProviderDetails) SetInvocationRole(v string) *IdentityProviderDetails { s.InvocationRole = &v @@ -2838,7 +3107,7 @@ type ImportSshPublicKeyInput struct { // A system-assigned unique identifier for an SFTP server. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The public key portion of an SSH key pair. // @@ -2848,7 +3117,7 @@ type ImportSshPublicKeyInput struct { // The name of the user account that is assigned to one or more servers. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2867,12 +3136,18 @@ func (s *ImportSshPublicKeyInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.SshPublicKeyBody == nil { invalidParams.Add(request.NewErrParamRequired("SshPublicKeyBody")) } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2898,27 +3173,27 @@ func (s *ImportSshPublicKeyInput) SetUserName(v string) *ImportSshPublicKeyInput return s } -// This response identifies the user, server they belong to, and the identifier +// This response identifies the user, the server they belong to, and the identifier // of the SSH public key associated with that user. A user can have more than -// one key on each server that they are associate with. +// one key on each server that they are associated with. type ImportSshPublicKeyOutput struct { _ struct{} `type:"structure"` // A system-assigned unique identifier for an SFTP server. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // This identifier is the name given to a public key by the system that was // imported. // // SshPublicKeyId is a required field - SshPublicKeyId *string `type:"string" required:"true"` + SshPublicKeyId *string `min:"21" type:"string" required:"true"` // A user name assigned to the ServerID value that you specified. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -3050,9 +3325,9 @@ type ListTagsForResourceInput struct { // request. MaxResults *int64 `min:"1" type:"integer"` - // When you request additional results from the ListTagsForResource call, a - // NextToken parameter is returned in the input. You can then pass in a subsequent - // command the NextToken parameter to continue listing additional tags. + // When you request additional results from the ListTagsForResource operation, + // a NextToken parameter is returned in the input. You can then pass in a subsequent + // command to the NextToken parameter to continue listing additional tags. NextToken *string `min:"1" type:"string"` } @@ -3114,12 +3389,11 @@ type ListTagsForResourceOutput struct { // When you can get additional results from the ListTagsForResource call, a // NextToken parameter is returned in the output. You can then pass in a subsequent - // command the NextToken parameter to continue listing additional tags. + // command to the NextToken parameter to continue listing additional tags. NextToken *string `min:"1" type:"string"` // Key-value pairs that are assigned to a resource, usually for the purpose - // of grouping and searching for items. Tags are metadata that you define that - // you can use for any purpose. + // of grouping and searching for items. Tags are metadata that you define. Tags []*Tag `min:"1" type:"list"` } @@ -3159,14 +3433,14 @@ type ListUsersInput struct { // When you can get additional results from the ListUsers call, a NextToken // parameter is returned in the output. You can then pass in a subsequent command - // the NextToken parameter to continue listing additional users. + // to the NextToken parameter to continue listing additional users. NextToken *string `min:"1" type:"string"` // A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) - // server that has users are assigned to it. + // server that has users assigned to it. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3191,6 +3465,9 @@ func (s *ListUsersInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3221,14 +3498,14 @@ type ListUsersOutput struct { // When you can get additional results from the ListUsers call, a NextToken // parameter is returned in the output. You can then pass in a subsequent command - // the NextToken parameter to continue listing additional users. + // to the NextToken parameter to continue listing additional users. NextToken *string `min:"1" type:"string"` // A system-assigned unique identifier for an SFTP server that the users are // assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // Returns the user accounts and their properties for the ServerId value that // you specify. @@ -3280,25 +3557,25 @@ type ListedServer struct { EndpointType *string `type:"string" enum:"EndpointType"` // The authentication method used to validate a user for the server that was - // specified. listed. This can include Secure Shell (SSH), user name and password - // combinations, or your own custom authentication method. Valid values include - // SERVICE_MANAGED or API_GATEWAY. + // specified. This can include Secure Shell (SSH), user name and password combinations, + // or your own custom authentication method. Valid values include SERVICE_MANAGED + // or API_GATEWAY. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` // The AWS Identity and Access Management entity that allows the server to turn // on Amazon CloudWatch logging. - LoggingRole *string `type:"string"` + LoggingRole *string `min:"20" type:"string"` // This value is the unique system assigned identifier for the SFTP servers // that were listed. - ServerId *string `type:"string"` + ServerId *string `min:"19" type:"string"` // This property describes the condition of the SFTP server for the server that // was described. A value of ONLINE> indicates that the server can accept jobs // and transfer files. A State value of OFFLINE means that the server cannot // perform file transfer operations. // - // The states of STARTING and STOPPING indicated that the server is in an intermediate + // The states of STARTING and STOPPING indicate that the server is in an intermediate // state, either not fully able to respond, or not fully offline. The values // of START_FAILED or STOP_FAILED can indicate an error condition. State *string `type:"string" enum:"State"` @@ -3365,7 +3642,7 @@ type ListedUser struct { _ struct{} `type:"structure"` // This property is the unique Amazon Resource Name (ARN) for the user that - // you wish to learn about. + // you want to learn about. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -3374,18 +3651,25 @@ type ListedUser struct { // an Amazon S3 bucket for the user you specify by their ARN. HomeDirectory *string `type:"string"` + // The type of landing directory (folder) you mapped for your users' home directory. + // If you set it to PATH, the user will see the absolute Amazon S3 bucket paths + // as is in their SFTP clients. If you set it LOGICAL, you will need to provide + // mappings in the HomeDirectoryMappings for how you want to make S3 paths visible + // to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // The role in use by this user. A role is an AWS Identity and Access Management - // (IAM) entity that in this case allows the SFTP server to act on a user's + // (IAM) entity that, in this case, allows the SFTP server to act on a user's // behalf. It allows the server to inherit the trust relationship that enables // that user to perform file operations to their Amazon S3 bucket. - Role *string `type:"string"` + Role *string `min:"20" type:"string"` // This value is the number of SSH public keys stored for the user you specified. SshPublicKeyCount *int64 `type:"integer"` // The name of the user whose ARN was specified. User names are used for authentication // purposes. - UserName *string `type:"string"` + UserName *string `min:"3" type:"string"` } // String returns the string representation @@ -3410,6 +3694,12 @@ func (s *ListedUser) SetHomeDirectory(v string) *ListedUser { return s } +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *ListedUser) SetHomeDirectoryType(v string) *ListedUser { + s.HomeDirectoryType = &v + return s +} + // SetRole sets the Role field's value. func (s *ListedUser) SetRole(v string) *ListedUser { s.Role = &v @@ -3449,7 +3739,7 @@ type SshPublicKey struct { // The SshPublicKeyId parameter contains the identifier of the public key. // // SshPublicKeyId is a required field - SshPublicKeyId *string `type:"string" required:"true"` + SshPublicKeyId *string `min:"21" type:"string" required:"true"` } // String returns the string representation @@ -3486,7 +3776,7 @@ type StartServerInput struct { // A system-assigned unique identifier for an SFTP server that you start. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3505,6 +3795,9 @@ func (s *StartServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3538,7 +3831,7 @@ type StopServerInput struct { // A system-assigned unique identifier for an SFTP server that you stopped. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3557,6 +3850,9 @@ func (s *StopServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3731,19 +4027,19 @@ func (s TagResourceOutput) GoString() string { type TestIdentityProviderInput struct { _ struct{} `type:"structure"` - // A system assigned identifier for a specific server. That server's user authentication + // A system-assigned identifier for a specific server. That server's user authentication // method is tested with a user name and password. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` - // This request parameter is name of the user account to be tested. + // This request parameter is the name of the user account to be tested. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` // The password of the user account to be tested. - UserPassword *string `type:"string"` + UserPassword *string `type:"string" sensitive:"true"` } // String returns the string representation @@ -3762,9 +4058,15 @@ func (s *TestIdentityProviderInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3793,9 +4095,12 @@ func (s *TestIdentityProviderInput) SetUserPassword(v string) *TestIdentityProvi type TestIdentityProviderOutput struct { _ struct{} `type:"structure"` - // The result of the authorization test as a message. + // A message that indicates whether the test was successful or not. Message *string `type:"string"` + // The response that is returned from your API Gateway. + Response *string `type:"string"` + // The HTTP status code that is the response from your API Gateway. // // StatusCode is a required field @@ -3823,6 +4128,12 @@ func (s *TestIdentityProviderOutput) SetMessage(v string) *TestIdentityProviderO return s } +// SetResponse sets the Response field's value. +func (s *TestIdentityProviderOutput) SetResponse(v string) *TestIdentityProviderOutput { + s.Response = &v + return s +} + // SetStatusCode sets the StatusCode field's value. func (s *TestIdentityProviderOutput) SetStatusCode(v int64) *TestIdentityProviderOutput { s.StatusCode = &v @@ -3929,9 +4240,11 @@ type UpdateServerInput struct { // // If you aren't planning to migrate existing users from an existing SFTP server // to a new AWS SFTP server, don't update the host key. Accidentally changing - // a server's host key can be disruptive. For more information, see change-host-key + // a server's host key can be disruptive. + // + // For more information, see "https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" // in the AWS SFTP User Guide. - HostKey *string `type:"string"` + HostKey *string `type:"string" sensitive:"true"` // This response parameter is an array containing all of the information required // to call a customer's authentication API method. @@ -3946,7 +4259,7 @@ type UpdateServerInput struct { // user account is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3965,6 +4278,19 @@ func (s *UpdateServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } + if s.EndpointDetails != nil { + if err := s.EndpointDetails.Validate(); err != nil { + invalidParams.AddNested("EndpointDetails", err.(request.ErrInvalidParams)) + } + } + if s.IdentityProviderDetails != nil { + if err := s.IdentityProviderDetails.Validate(); err != nil { + invalidParams.AddNested("IdentityProviderDetails", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4015,7 +4341,7 @@ type UpdateServerOutput struct { // is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -4037,16 +4363,49 @@ func (s *UpdateServerOutput) SetServerId(v string) *UpdateServerOutput { type UpdateUserInput struct { _ struct{} `type:"structure"` - // The HomeDirectory parameter specifies the landing directory (folder) for - // a user when they log in to the server using their client. An example would - // be: /home/username. + // A parameter that specifies the landing directory (folder) for a user when + // they log in to the server using their client. + // + // An example is /home/username. HomeDirectory *string `type:"string"` + // Logical directory mappings that specify what S3 paths and keys should be + // visible to your user and how you want to make them visible. You will need + // to specify the "Entry" and "Target" pair, where Entry shows how the path + // is made visible and Target is the actual S3 path. If you only specify a target, + // it will be displayed as is. You will need to also make sure that your AWS + // IAM Role provides access to paths in Target. The following is an example. + // + // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": + // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` + + // The type of landing directory (folder) you want your users' home directory + // to be when they log into the SFTP serve. If you set it to PATH, the user + // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. + // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make S3 paths visible to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // Allows you to supply a scope-down policy for your user so you can use the // same AWS Identity and Access Management (IAM) role across multiple users. - // The policy scopes down users access to portions of your Amazon S3 bucket. + // The policy scopes down user access to portions of your Amazon S3 bucket. // Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, // and ${Transfer:HomeBucket}. + // + // For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON + // blob, instead of the Amazon Resource Name (ARN) of the policy. You save the + // policy as a JSON blob and pass it in the Policy argument. + // + // For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating + // a Scope-Down Policy. + // + // For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" + // in the AWS Security Token Service API Reference. Policy *string `type:"string"` // The IAM role that controls your user's access to your Amazon S3 bucket. The @@ -4055,13 +4414,13 @@ type UpdateUserInput struct { // S3 bucket or buckets. The IAM role should also contain a trust relationship // that allows the Secure File Transfer Protocol (SFTP) server to access your // resources when servicing your SFTP user's transfer requests. - Role *string `type:"string"` + Role *string `min:"20" type:"string"` // A system-assigned unique identifier for an SFTP server instance that the // user account is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user and is associated with a server as // specified by the ServerId. This is the string that will be used by your user @@ -4070,7 +4429,7 @@ type UpdateUserInput struct { // A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -4086,12 +4445,34 @@ func (s UpdateUserInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateUserInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateUserInput"} + if s.HomeDirectoryMappings != nil && len(s.HomeDirectoryMappings) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HomeDirectoryMappings", 1)) + } + if s.Role != nil && len(*s.Role) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Role", 20)) + } if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } + if s.HomeDirectoryMappings != nil { + for i, v := range s.HomeDirectoryMappings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HomeDirectoryMappings", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4105,6 +4486,18 @@ func (s *UpdateUserInput) SetHomeDirectory(v string) *UpdateUserInput { return s } +// SetHomeDirectoryMappings sets the HomeDirectoryMappings field's value. +func (s *UpdateUserInput) SetHomeDirectoryMappings(v []*HomeDirectoryMapEntry) *UpdateUserInput { + s.HomeDirectoryMappings = v + return s +} + +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *UpdateUserInput) SetHomeDirectoryType(v string) *UpdateUserInput { + s.HomeDirectoryType = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *UpdateUserInput) SetPolicy(v string) *UpdateUserInput { s.Policy = &v @@ -4138,13 +4531,13 @@ type UpdateUserOutput struct { // user account is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The unique identifier for a user that is assigned to the SFTP server instance // that was specified in the request. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -4177,11 +4570,19 @@ const ( EndpointTypeVpcEndpoint = "VPC_ENDPOINT" ) +const ( + // HomeDirectoryTypePath is a HomeDirectoryType enum value + HomeDirectoryTypePath = "PATH" + + // HomeDirectoryTypeLogical is a HomeDirectoryType enum value + HomeDirectoryTypeLogical = "LOGICAL" +) + // Returns information related to the type of user authentication that is in // use for a server's users. For SERVICE_MANAGED authentication, the Secure // Shell (SSH) public keys are stored with a user on an SFTP server instance. // For API_GATEWAY authentication, your custom authentication method is implemented -// by using an API call. A server can only have one method of authentication. +// by using an API call. A server can have only one method of authentication. const ( // IdentityProviderTypeServiceManaged is a IdentityProviderType enum value IdentityProviderTypeServiceManaged = "SERVICE_MANAGED" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go index b741ff05542..51a9c01b3df 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go @@ -8,11 +8,11 @@ // Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. // AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer // for SFTP—by integrating with existing authentication systems, and providing -// DNS routing with Amazon Route 53—so nothing changes for your customers and -// partners, or their applications. With your data in S3, you can use it with -// AWS services for processing, analytics, machine learning, and archiving. +// DNS routing with Amazon Route 53—so nothing changes for your customers +// and partners, or their applications. With your data in S3, you can use it +// with AWS services for processing, analytics, machine learning, and archiving. // Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no -// infrastructure to buy and setup. +// infrastructure to buy and set up. // // See https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05 for more information on this service. // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go index 0734c873b55..60b6a6269fb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go @@ -41,4 +41,12 @@ const ( // // The request has failed because the AWS Transfer for SFTP service is not available. ErrCodeServiceUnavailableException = "ServiceUnavailableException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The request was denied due to request throttling. + // + // HTTP Status Code: 400 + ErrCodeThrottlingException = "ThrottlingException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go index 0fcea8665a0..90791826dac 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Transfer { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "transfer" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Transfer { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Transfer { svc := &Transfer{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-05", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/api.go index f6b917a941f..7dc45ecc8fc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/api.go @@ -110,16 +110,16 @@ func (c *WAF) CreateByteMatchSetRequest(input *CreateByteMatchSetInput) (req *re // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -260,16 +260,16 @@ func (c *WAF) CreateGeoMatchSetRequest(input *CreateGeoMatchSetInput) (req *requ // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -407,16 +407,16 @@ func (c *WAF) CreateIPSetRequest(input *CreateIPSetInput) (req *request.Request, // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -589,16 +589,16 @@ func (c *WAF) CreateRateBasedRuleRequest(input *CreateRateBasedRuleInput) (req * // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -613,6 +613,12 @@ func (c *WAF) CreateRateBasedRuleRequest(input *CreateRateBasedRuleInput) (req * // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// * ErrCodeBadRequestException "WAFBadRequestException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRateBasedRule func (c *WAF) CreateRateBasedRule(input *CreateRateBasedRuleInput) (*CreateRateBasedRuleOutput, error) { req, out := c.CreateRateBasedRuleRequest(input) @@ -972,16 +978,16 @@ func (c *WAF) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, o // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -996,6 +1002,12 @@ func (c *WAF) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, o // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// * ErrCodeBadRequestException "WAFBadRequestException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRule func (c *WAF) CreateRule(input *CreateRuleInput) (*CreateRuleOutput, error) { req, out := c.CreateRuleRequest(input) @@ -1102,6 +1114,12 @@ func (c *WAF) CreateRuleGroupRequest(input *CreateRuleGroupInput) (req *request. // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// * ErrCodeBadRequestException "WAFBadRequestException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRuleGroup func (c *WAF) CreateRuleGroup(input *CreateRuleGroupInput) (*CreateRuleGroupOutput, error) { req, out := c.CreateRuleGroupRequest(input) @@ -1226,16 +1244,16 @@ func (c *WAF) CreateSizeConstraintSetRequest(input *CreateSizeConstraintSetInput // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -1366,16 +1384,16 @@ func (c *WAF) CreateSqlInjectionMatchSetRequest(input *CreateSqlInjectionMatchSe // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -1526,16 +1544,16 @@ func (c *WAF) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Reques // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -1550,6 +1568,12 @@ func (c *WAF) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Reques // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// * ErrCodeBadRequestException "WAFBadRequestException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACL func (c *WAF) CreateWebACL(input *CreateWebACLInput) (*CreateWebACLOutput, error) { req, out := c.CreateWebACLRequest(input) @@ -1667,16 +1691,16 @@ func (c *WAF) CreateXssMatchSetRequest(input *CreateXssMatchSetInput) (req *requ // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -2380,6 +2404,10 @@ func (c *WAF) DeleteRateBasedRuleRequest(input *DeleteRateBasedRuleInput) (req * // // * You tried to delete an IPSet that references one or more IP addresses. // +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRateBasedRule func (c *WAF) DeleteRateBasedRule(input *DeleteRateBasedRuleInput) (*DeleteRateBasedRuleOutput, error) { req, out := c.DeleteRateBasedRuleRequest(input) @@ -2748,6 +2776,10 @@ func (c *WAF) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, o // // * You tried to delete an IPSet that references one or more IP addresses. // +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRule func (c *WAF) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { req, out := c.DeleteRuleRequest(input) @@ -2887,6 +2919,10 @@ func (c *WAF) DeleteRuleGroupRequest(input *DeleteRuleGroupInput) (req *request. // * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple // already exists in the specified WebACL. // +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRuleGroup func (c *WAF) DeleteRuleGroup(input *DeleteRuleGroupInput) (*DeleteRuleGroupOutput, error) { req, out := c.DeleteRuleGroupRequest(input) @@ -3265,6 +3301,10 @@ func (c *WAF) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Reques // // * You tried to delete an IPSet that references one or more IP addresses. // +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteWebACL func (c *WAF) DeleteWebACL(input *DeleteWebACLInput) (*DeleteWebACLOutput, error) { req, out := c.DeleteWebACLRequest(input) @@ -4195,16 +4235,16 @@ func (c *WAF) GetRateBasedRuleManagedKeysRequest(input *GetRateBasedRuleManagedK // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -5096,16 +5136,16 @@ func (c *WAF) ListActivatedRulesInRuleGroupRequest(input *ListActivatedRulesInRu // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -5460,16 +5500,16 @@ func (c *WAF) ListLoggingConfigurationsRequest(input *ListLoggingConfigurationsI // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -6167,6 +6207,122 @@ func (c *WAF) ListSubscribedRuleGroupsWithContext(ctx aws.Context, input *ListSu return out, req.Send() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListTagsForResource +func (c *WAF) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS WAF. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeBadRequestException "WAFBadRequestException" +// +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListTagsForResource +func (c *WAF) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListWebACLs = "ListWebACLs" // ListWebACLsRequest generates a "aws/request.Request" representing the @@ -6384,12 +6540,14 @@ func (c *WAF) PutLoggingConfigurationRequest(input *PutLoggingConfigurationInput // You can access information about all traffic that AWS WAF inspects using // the following steps: // -// Create an Amazon Kinesis Data Firehose . +// Create an Amazon Kinesis Data Firehose. // // Create the data firehose with a PUT source and in the region that you are // operating. However, if you are capturing logs for Amazon CloudFront, always // create the firehose in US East (N. Virginia). // +// Do not create the data firehose using a Kinesis stream as your source. +// // Associate that firehose to your web ACL using a PutLoggingConfiguration request. // // When you successfully enable logging using a PutLoggingConfiguration request, @@ -6587,6 +6745,246 @@ func (c *WAF) PutPermissionPolicyWithContext(ctx aws.Context, input *PutPermissi return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/TagResource +func (c *WAF) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS WAF. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// * ErrCodeNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeBadRequestException "WAFBadRequestException" +// +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/TagResource +func (c *WAF) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UntagResource +func (c *WAF) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS WAF. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeBadRequestException "WAFBadRequestException" +// +// * ErrCodeTagOperationException "WAFTagOperationException" +// +// * ErrCodeTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UntagResource +func (c *WAF) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateByteMatchSet = "UpdateByteMatchSet" // UpdateByteMatchSetRequest generates a "aws/request.Request" representing the @@ -6714,16 +7112,16 @@ func (c *WAF) UpdateByteMatchSetRequest(input *UpdateByteMatchSetInput) (req *re // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -6905,16 +7303,16 @@ func (c *WAF) UpdateGeoMatchSetRequest(input *UpdateGeoMatchSetInput) (req *requ // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -7124,16 +7522,16 @@ func (c *WAF) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Request, // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -7329,16 +7727,16 @@ func (c *WAF) UpdateRateBasedRuleRequest(input *UpdateRateBasedRuleInput) (req * // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -7790,7 +8188,7 @@ func (c *WAF) UpdateRuleRequest(input *UpdateRuleInput) (req *request.Request, o // // You then add the Rule to a WebACL and specify that you want to block requests // that satisfy the Rule. For a request to be blocked, the User-Agent header -// in the request must contain the value BadBotand the request must originate +// in the request must contain the value BadBot and the request must originate // from the IP address 192.0.2.44. // // To create and configure a Rule, perform the following steps: @@ -7861,16 +8259,16 @@ func (c *WAF) UpdateRuleRequest(input *UpdateRuleInput) (req *request.Request, o // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8072,16 +8470,16 @@ func (c *WAF) UpdateRuleGroupRequest(input *UpdateRuleGroupInput) (req *request. // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8169,9 +8567,8 @@ func (c *WAF) UpdateSizeConstraintSetRequest(input *UpdateSizeConstraintSetInput // * Whether to perform any transformations on the request, such as converting // it to lowercase, before checking its length. Note that transformations // of the request body are not supported because the AWS resource forwards -// only the first 8192 bytes of your request to AWS WAF. -// -// You can only specify a single type of TextTransformation. +// only the first 8192 bytes of your request to AWS WAF. You can only specify +// a single type of TextTransformation. // // * A ComparisonOperator used for evaluating the selected part of the request // against the specified Size, such as equals, greater than, less than, and @@ -8247,16 +8644,16 @@ func (c *WAF) UpdateSizeConstraintSetRequest(input *UpdateSizeConstraintSetInput // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8377,9 +8774,7 @@ func (c *WAF) UpdateSqlInjectionMatchSetRequest(input *UpdateSqlInjectionMatchSe // // * TextTransformation: Which text transformation, if any, to perform on // the web request before inspecting the request for snippets of malicious -// SQL code. -// -// You can only specify a single type of TextTransformation. +// SQL code. You can only specify a single type of TextTransformation. // // You use SqlInjectionMatchSet objects to specify which CloudFront requests // that you want to allow, block, or count. For example, if you're receiving @@ -8445,16 +8840,16 @@ func (c *WAF) UpdateSqlInjectionMatchSetRequest(input *UpdateSqlInjectionMatchSe // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8664,16 +9059,16 @@ func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Reques // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8797,9 +9192,7 @@ func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *requ // // * TextTransformation: Which text transformation, if any, to perform on // the web request before inspecting the request for cross-site scripting -// attacks. -// -// You can only specify a single type of TextTransformation. +// attacks. You can only specify a single type of TextTransformation. // // You use XssMatchSet objects to specify which CloudFront requests that you // want to allow, block, or count. For example, if you're receiving requests @@ -8865,16 +9258,16 @@ func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *requ // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8984,15 +9377,15 @@ type ActivatedRule struct { // // Submit an UpdateWebACL request that has two actions: // - // The first action deletes the existing rule group from the web ACL. That is, - // in the UpdateWebACL request, the first Updates:Action should be DELETE and - // Updates:ActivatedRule:RuleId should be the rule group that contains the rules - // that you want to exclude. + // * The first action deletes the existing rule group from the web ACL. That + // is, in the UpdateWebACL request, the first Updates:Action should be DELETE + // and Updates:ActivatedRule:RuleId should be the rule group that contains + // the rules that you want to exclude. // - // The second action inserts the same rule group back in, but specifying the - // rules to exclude. That is, the second Updates:Action should be INSERT, Updates:ActivatedRule:RuleId - // should be the rule group that you just removed, and ExcludedRules should - // contain the rules that you want to exclude. + // * The second action inserts the same rule group back in, but specifying + // the rules to exclude. That is, the second Updates:Action should be INSERT, + // Updates:ActivatedRule:RuleId should be the rule group that you just removed, + // and ExcludedRules should contain the rules that you want to exclude. ExcludedRules []*ExcludedRule `type:"list"` // Use the OverrideAction to test your RuleGroup. @@ -9839,9 +10232,10 @@ type CreateRateBasedRuleInput struct { ChangeToken *string `min:"1" type:"string" required:"true"` // A friendly name or description for the metrics for this RateBasedRule. The - // name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change the name of the metric after you create - // the RateBasedRule. + // name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum + // length 128 and minimum length one. It can't contain whitespace or metric + // names reserved for AWS WAF, including "All" and "Default_Action." You can't + // change the name of the metric after you create the RateBasedRule. // // MetricName is a required field MetricName *string `type:"string" required:"true"` @@ -9867,7 +10261,9 @@ type CreateRateBasedRuleInput struct { // rule. // // RateLimit is a required field - RateLimit *int64 `min:"2000" type:"long" required:"true"` + RateLimit *int64 `min:"100" type:"long" required:"true"` + + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -9904,8 +10300,21 @@ func (s *CreateRateBasedRuleInput) Validate() error { if s.RateLimit == nil { invalidParams.Add(request.NewErrParamRequired("RateLimit")) } - if s.RateLimit != nil && *s.RateLimit < 2000 { - invalidParams.Add(request.NewErrParamMinValue("RateLimit", 2000)) + if s.RateLimit != nil && *s.RateLimit < 100 { + invalidParams.Add(request.NewErrParamMinValue("RateLimit", 100)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -9944,6 +10353,12 @@ func (s *CreateRateBasedRuleInput) SetRateLimit(v int64) *CreateRateBasedRuleInp return s } +// SetTags sets the Tags field's value. +func (s *CreateRateBasedRuleInput) SetTags(v []*Tag) *CreateRateBasedRuleInput { + s.Tags = v + return s +} + type CreateRateBasedRuleOutput struct { _ struct{} `type:"structure"` @@ -10173,9 +10588,10 @@ type CreateRuleGroupInput struct { ChangeToken *string `min:"1" type:"string" required:"true"` // A friendly name or description for the metrics for this RuleGroup. The name - // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change the name of the metric after you create - // the RuleGroup. + // can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length + // 128 and minimum length one. It can't contain whitespace or metric names reserved + // for AWS WAF, including "All" and "Default_Action." You can't change the name + // of the metric after you create the RuleGroup. // // MetricName is a required field MetricName *string `type:"string" required:"true"` @@ -10185,6 +10601,8 @@ type CreateRuleGroupInput struct { // // Name is a required field Name *string `min:"1" type:"string" required:"true"` + + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -10215,6 +10633,19 @@ func (s *CreateRuleGroupInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -10240,6 +10671,12 @@ func (s *CreateRuleGroupInput) SetName(v string) *CreateRuleGroupInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateRuleGroupInput) SetTags(v []*Tag) *CreateRuleGroupInput { + s.Tags = v + return s +} + type CreateRuleGroupOutput struct { _ struct{} `type:"structure"` @@ -10283,9 +10720,10 @@ type CreateRuleInput struct { ChangeToken *string `min:"1" type:"string" required:"true"` // A friendly name or description for the metrics for this Rule. The name can - // contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain - // white space. You can't change the name of the metric after you create the - // Rule. + // contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length + // 128 and minimum length one. It can't contain whitespace or metric names reserved + // for AWS WAF, including "All" and "Default_Action." You can't change the name + // of the metric after you create the Rule. // // MetricName is a required field MetricName *string `type:"string" required:"true"` @@ -10295,6 +10733,8 @@ type CreateRuleInput struct { // // Name is a required field Name *string `min:"1" type:"string" required:"true"` + + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -10325,6 +10765,19 @@ func (s *CreateRuleInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -10350,6 +10803,12 @@ func (s *CreateRuleInput) SetName(v string) *CreateRuleInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateRuleInput) SetTags(v []*Tag) *CreateRuleInput { + s.Tags = v + return s +} + type CreateRuleOutput struct { _ struct{} `type:"structure"` @@ -10587,9 +11046,11 @@ type CreateWebACLInput struct { // DefaultAction is a required field DefaultAction *WafAction `type:"structure" required:"true"` - // A friendly name or description for the metrics for this WebACL. The name - // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain white space. You can't change MetricName after you create the WebACL. + // A friendly name or description for the metrics for this WebACL.The name can + // contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length + // 128 and minimum length one. It can't contain whitespace or metric names reserved + // for AWS WAF, including "All" and "Default_Action." You can't change MetricName + // after you create the WebACL. // // MetricName is a required field MetricName *string `type:"string" required:"true"` @@ -10599,6 +11060,8 @@ type CreateWebACLInput struct { // // Name is a required field Name *string `min:"1" type:"string" required:"true"` + + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -10632,11 +11095,24 @@ func (s *CreateWebACLInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.DefaultAction != nil { if err := s.DefaultAction.Validate(); err != nil { invalidParams.AddNested("DefaultAction", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -10668,6 +11144,12 @@ func (s *CreateWebACLInput) SetName(v string) *CreateWebACLInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateWebACLInput) SetTags(v []*Tag) *CreateWebACLInput { + s.Tags = v + return s +} + type CreateWebACLOutput struct { _ struct{} `type:"structure"` @@ -13707,7 +14189,7 @@ func (s *HTTPRequest) SetURI(v string) *HTTPRequest { // ranges: /24, /32, /48, /56, /64, and /128. // // To specify an individual IP address, you specify the four-part IP address -// followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, +// followed by a /32, for example, 192.0.2.0/32. To block a range of IP addresses, // you can specify /8 or any range between /16 through /32 (for IPv4) or /24, // /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, // see the Wikipedia entry Classless Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). @@ -14221,11 +14703,10 @@ type ListIPSetsInput struct { // another batch of IPSet objects. Limit *int64 `type:"integer"` - // If you specify a value for Limit and you have more IPSets than the value - // of Limit, AWS WAF returns a NextMarker value in the response that allows - // you to list another group of IPSets. For the second and subsequent ListIPSets - // requests, specify the value of NextMarker from the previous response to get - // information about another batch of IPSets. + // AWS WAF returns a NextMarker value in the response that allows you to list + // another group of IPSets. For the second and subsequent ListIPSets requests, + // specify the value of NextMarker from the previous response to get information + // about another batch of IPSets. NextMarker *string `min:"1" type:"string"` } @@ -14270,10 +14751,8 @@ type ListIPSetsOutput struct { // An array of IPSetSummary objects. IPSets []*IPSetSummary `type:"list"` - // If you have more IPSet objects than the number that you specified for Limit - // in the request, the response includes a NextMarker value. To list more IPSet - // objects, submit another ListIPSets request, and specify the NextMarker value - // from the response in the NextMarker value in the next request. + // To list more IPSet objects, submit another ListIPSets request, and in the + // next request use the NextMarker response value as the NextMarker value. NextMarker *string `min:"1" type:"string"` } @@ -15091,6 +15570,94 @@ func (s *ListSubscribedRuleGroupsOutput) SetRuleGroups(v []*SubscribedRuleGroupS return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + Limit *int64 `type:"integer"` + + NextMarker *string `min:"1" type:"string"` + + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) + } + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *ListTagsForResourceInput) SetLimit(v int64) *ListTagsForResourceInput { + s.Limit = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListTagsForResourceInput) SetNextMarker(v string) *ListTagsForResourceInput { + s.NextMarker = &v + return s +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + NextMarker *string `min:"1" type:"string"` + + TagInfoForResource *TagInfoForResource `type:"structure"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListTagsForResourceOutput) SetNextMarker(v string) *ListTagsForResourceOutput { + s.NextMarker = &v + return s +} + +// SetTagInfoForResource sets the TagInfoForResource field's value. +func (s *ListTagsForResourceOutput) SetTagInfoForResource(v *TagInfoForResource) *ListTagsForResourceOutput { + s.TagInfoForResource = v + return s +} + type ListWebACLsInput struct { _ struct{} `type:"structure"` @@ -15374,7 +15941,7 @@ type Predicate struct { // on the negation of the settings in the ByteMatchSet, IPSet, SqlInjectionMatchSet, // XssMatchSet, RegexMatchSet, GeoMatchSet, or SizeConstraintSet. For example, // if an IPSet includes the IP address 192.0.2.44, AWS WAF will allow, block, - // or count requests based on all IP addresses except192.0.2.44. + // or count requests based on all IP addresses except 192.0.2.44. // // Negated is a required field Negated *bool `type:"boolean" required:"true"` @@ -15442,6 +16009,9 @@ type PutLoggingConfigurationInput struct { // the redacted fields details, and the Amazon Resource Name (ARN) of the web // ACL to monitor. // + // When specifying Type in RedactedFields, you must use one of the following + // values: URI, QUERY_STRING, HEADER, or METHOD. + // // LoggingConfiguration is a required field LoggingConfiguration *LoggingConfiguration `type:"structure" required:"true"` } @@ -15601,9 +16171,10 @@ type RateBasedRule struct { MatchPredicates []*Predicate `type:"list" required:"true"` // A friendly name or description for the metrics for a RateBasedRule. The name - // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change the name of the metric after you create - // the RateBasedRule. + // can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length + // 128 and minimum length one. It can't contain whitespace or metric names reserved + // for AWS WAF, including "All" and "Default_Action." You can't change the name + // of the metric after you create the RateBasedRule. MetricName *string `type:"string"` // A friendly name or description for a RateBasedRule. You can't change the @@ -15625,7 +16196,7 @@ type RateBasedRule struct { // rule. // // RateLimit is a required field - RateLimit *int64 `min:"2000" type:"long" required:"true"` + RateLimit *int64 `min:"100" type:"long" required:"true"` // A unique identifier for a RateBasedRule. You use RuleId to get more information // about a RateBasedRule (see GetRateBasedRule), update a RateBasedRule (see @@ -16191,8 +16762,10 @@ type Rule struct { _ struct{} `type:"structure"` // A friendly name or description for the metrics for this Rule. The name can - // contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain - // whitespace. You can't change MetricName after you create the Rule. + // contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length + // 128 and minimum length one. It can't contain whitespace or metric names reserved + // for AWS WAF, including "All" and "Default_Action." You can't change MetricName + // after you create the Rule. MetricName *string `type:"string"` // The friendly name or description for the Rule. You can't change the name @@ -16264,9 +16837,10 @@ type RuleGroup struct { _ struct{} `type:"structure"` // A friendly name or description for the metrics for this RuleGroup. The name - // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change the name of the metric after you create - // the RuleGroup. + // can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length + // 128 and minimum length one. It can't contain whitespace or metric names reserved + // for AWS WAF, including "All" and "Default_Action." You can't change the name + // of the metric after you create the RuleGroup. MetricName *string `type:"string"` // The friendly name or description for the RuleGroup. You can't change the @@ -16596,14 +17170,14 @@ func (s *SampledHTTPRequest) SetWeight(v int64) *SampledHTTPRequest { // Specifies a constraint on the size of a part of the web request. AWS WAF // uses the Size, ComparisonOperator, and FieldToMatch to build an expression -// in the form of "SizeComparisonOperator size in bytes of FieldToMatch". If +// in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If // that expression is true, the SizeConstraint is considered to match. type SizeConstraint struct { _ struct{} `type:"structure"` // The type of comparison you want AWS WAF to perform. AWS WAF uses this in // combination with the provided Size and FieldToMatch to build an expression - // in the form of "SizeComparisonOperator size in bytes of FieldToMatch". If + // in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If // that expression is true, the SizeConstraint is considered to match. // // EQ: Used to test if the Size is equal to the size of the FieldToMatch @@ -16629,7 +17203,7 @@ type SizeConstraint struct { // The size in bytes that you want AWS WAF to compare against the size of the // specified FieldToMatch. AWS WAF uses this in combination with ComparisonOperator - // and FieldToMatch to build an expression in the form of "SizeComparisonOperator + // and FieldToMatch to build an expression in the form of "Size ComparisonOperator // size in bytes of FieldToMatch". If that expression is true, the SizeConstraint // is considered to match. // @@ -16895,7 +17469,7 @@ type SizeConstraintSetUpdate struct { // Specifies a constraint on the size of a part of the web request. AWS WAF // uses the Size, ComparisonOperator, and FieldToMatch to build an expression - // in the form of "SizeComparisonOperator size in bytes of FieldToMatch". If + // in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If // that expression is true, the SizeConstraint is considered to match. // // SizeConstraint is a required field @@ -17245,9 +17819,10 @@ type SubscribedRuleGroupSummary struct { _ struct{} `type:"structure"` // A friendly name or description for the metrics for this RuleGroup. The name - // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change the name of the metric after you create - // the RuleGroup. + // can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length + // 128 and minimum length one. It can't contain whitespace or metric names reserved + // for AWS WAF, including "All" and "Default_Action." You can't change the name + // of the metric after you create the RuleGroup. // // MetricName is a required field MetricName *string `type:"string" required:"true"` @@ -17292,6 +17867,157 @@ func (s *SubscribedRuleGroupSummary) SetRuleGroupId(v string) *SubscribedRuleGro return s } +type Tag struct { + _ struct{} `type:"structure"` + + Key *string `min:"1" type:"string"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagInfoForResource struct { + _ struct{} `type:"structure"` + + ResourceARN *string `min:"1" type:"string"` + + TagList []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s TagInfoForResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagInfoForResource) GoString() string { + return s.String() +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagInfoForResource) SetResourceARN(v string) *TagInfoForResource { + s.ResourceARN = &v + return s +} + +// SetTagList sets the TagList field's value. +func (s *TagInfoForResource) SetTagList(v []*Tag) *TagInfoForResource { + s.TagList = v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // Tags is a required field + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // In a GetSampledRequests request, the StartTime and EndTime objects specify // the time range for which you want AWS WAF to return a sample of web requests. // @@ -17360,6 +18086,74 @@ func (s *TimeWindow) SetStartTime(v time.Time) *TimeWindow { return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // TagKeys is a required field + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateByteMatchSetInput struct { _ struct{} `type:"structure"` @@ -17498,10 +18292,9 @@ type UpdateGeoMatchSetInput struct { // // * GeoMatchSetUpdate: Contains Action and GeoMatchConstraint // - // * GeoMatchConstraint: Contains Type and Value - // - // You can have only one Type and Value per GeoMatchConstraint. To add multiple - // countries, include multiple GeoMatchSetUpdate objects in your request. + // * GeoMatchConstraint: Contains Type and Value You can have only one Type + // and Value per GeoMatchConstraint. To add multiple countries, include multiple + // GeoMatchSetUpdate objects in your request. // // Updates is a required field Updates []*GeoMatchSetUpdate `min:"1" type:"list" required:"true"` @@ -17731,7 +18524,7 @@ type UpdateRateBasedRuleInput struct { // rule. // // RateLimit is a required field - RateLimit *int64 `min:"2000" type:"long" required:"true"` + RateLimit *int64 `min:"100" type:"long" required:"true"` // The RuleId of the RateBasedRule that you want to update. RuleId is returned // by CreateRateBasedRule and by ListRateBasedRules. @@ -17768,8 +18561,8 @@ func (s *UpdateRateBasedRuleInput) Validate() error { if s.RateLimit == nil { invalidParams.Add(request.NewErrParamRequired("RateLimit")) } - if s.RateLimit != nil && *s.RateLimit < 2000 { - invalidParams.Add(request.NewErrParamMinValue("RateLimit", 2000)) + if s.RateLimit != nil && *s.RateLimit < 100 { + invalidParams.Add(request.NewErrParamMinValue("RateLimit", 100)) } if s.RuleId == nil { invalidParams.Add(request.NewErrParamRequired("RuleId")) @@ -18907,8 +19700,10 @@ type WebACL struct { DefaultAction *WafAction `type:"structure" required:"true"` // A friendly name or description for the metrics for this WebACL. The name - // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change MetricName after you create the WebACL. + // can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length + // 128 and minimum length one. It can't contain whitespace or metric names reserved + // for AWS WAF, including "All" and "Default_Action." You can't change MetricName + // after you create the WebACL. MetricName *string `type:"string"` // A friendly name or description of the WebACL. You can't change the name of @@ -20248,6 +21043,12 @@ const ( // ParameterExceptionFieldResourceArn is a ParameterExceptionField enum value ParameterExceptionFieldResourceArn = "RESOURCE_ARN" + + // ParameterExceptionFieldTags is a ParameterExceptionField enum value + ParameterExceptionFieldTags = "TAGS" + + // ParameterExceptionFieldTagKeys is a ParameterExceptionField enum value + ParameterExceptionFieldTagKeys = "TAG_KEYS" ) const ( @@ -20259,6 +21060,9 @@ const ( // ParameterExceptionReasonIllegalArgument is a ParameterExceptionReason enum value ParameterExceptionReasonIllegalArgument = "ILLEGAL_ARGUMENT" + + // ParameterExceptionReasonInvalidTagKey is a ParameterExceptionReason enum value + ParameterExceptionReasonInvalidTagKey = "INVALID_TAG_KEY" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go index ee66380259f..32407c2a087 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go @@ -4,6 +4,10 @@ package waf const ( + // ErrCodeBadRequestException for service response error code + // "WAFBadRequestException". + ErrCodeBadRequestException = "WAFBadRequestException" + // ErrCodeDisallowedNameException for service response error code // "WAFDisallowedNameException". // @@ -58,16 +62,16 @@ const ( // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // - // * You tried to create a WebACL with a DefaultActionType other than ALLOW, + // * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // - // * You tried to update a WebACL with a WafActionType other than ALLOW, + // * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // - // * You tried to update a ByteMatchSet with a FieldToMatchType other than + // * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -197,4 +201,12 @@ const ( // // The specified subscription does not exist. ErrCodeSubscriptionNotFoundException = "WAFSubscriptionNotFoundException" + + // ErrCodeTagOperationException for service response error code + // "WAFTagOperationException". + ErrCodeTagOperationException = "WAFTagOperationException" + + // ErrCodeTagOperationInternalErrorException for service response error code + // "WAFTagOperationInternalErrorException". + ErrCodeTagOperationInternalErrorException = "WAFTagOperationInternalErrorException" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/service.go index 09bf43d9eeb..81b9b1c93d0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/waf/service.go @@ -46,11 +46,11 @@ const ( // svc := waf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *WAF { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WAF { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WAF { svc := &WAF{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-08-24", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go index b9d1b5b5424..f3ff4114127 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go @@ -86,16 +86,16 @@ func (c *WAFRegional) AssociateWebACLRequest(input *AssociateWebACLInput) (req * // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -230,16 +230,16 @@ func (c *WAFRegional) CreateByteMatchSetRequest(input *waf.CreateByteMatchSetInp // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -380,16 +380,16 @@ func (c *WAFRegional) CreateGeoMatchSetRequest(input *waf.CreateGeoMatchSetInput // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -527,16 +527,16 @@ func (c *WAFRegional) CreateIPSetRequest(input *waf.CreateIPSetInput) (req *requ // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -709,16 +709,16 @@ func (c *WAFRegional) CreateRateBasedRuleRequest(input *waf.CreateRateBasedRuleI // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -733,6 +733,12 @@ func (c *WAFRegional) CreateRateBasedRuleRequest(input *waf.CreateRateBasedRuleI // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// * ErrCodeWAFBadRequestException "WAFBadRequestException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRateBasedRule func (c *WAFRegional) CreateRateBasedRule(input *waf.CreateRateBasedRuleInput) (*waf.CreateRateBasedRuleOutput, error) { req, out := c.CreateRateBasedRuleRequest(input) @@ -1092,16 +1098,16 @@ func (c *WAFRegional) CreateRuleRequest(input *waf.CreateRuleInput) (req *reques // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -1116,6 +1122,12 @@ func (c *WAFRegional) CreateRuleRequest(input *waf.CreateRuleInput) (req *reques // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// * ErrCodeWAFBadRequestException "WAFBadRequestException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRule func (c *WAFRegional) CreateRule(input *waf.CreateRuleInput) (*waf.CreateRuleOutput, error) { req, out := c.CreateRuleRequest(input) @@ -1222,6 +1234,12 @@ func (c *WAFRegional) CreateRuleGroupRequest(input *waf.CreateRuleGroupInput) (r // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// * ErrCodeWAFBadRequestException "WAFBadRequestException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRuleGroup func (c *WAFRegional) CreateRuleGroup(input *waf.CreateRuleGroupInput) (*waf.CreateRuleGroupOutput, error) { req, out := c.CreateRuleGroupRequest(input) @@ -1346,16 +1364,16 @@ func (c *WAFRegional) CreateSizeConstraintSetRequest(input *waf.CreateSizeConstr // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -1486,16 +1504,16 @@ func (c *WAFRegional) CreateSqlInjectionMatchSetRequest(input *waf.CreateSqlInje // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -1646,16 +1664,16 @@ func (c *WAFRegional) CreateWebACLRequest(input *waf.CreateWebACLInput) (req *re // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -1670,6 +1688,12 @@ func (c *WAFRegional) CreateWebACLRequest(input *waf.CreateWebACLInput) (req *re // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// * ErrCodeWAFBadRequestException "WAFBadRequestException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateWebACL func (c *WAFRegional) CreateWebACL(input *waf.CreateWebACLInput) (*waf.CreateWebACLOutput, error) { req, out := c.CreateWebACLRequest(input) @@ -1787,16 +1811,16 @@ func (c *WAFRegional) CreateXssMatchSetRequest(input *waf.CreateXssMatchSetInput // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -2500,6 +2524,10 @@ func (c *WAFRegional) DeleteRateBasedRuleRequest(input *waf.DeleteRateBasedRuleI // // * You tried to delete an IPSet that references one or more IP addresses. // +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRateBasedRule func (c *WAFRegional) DeleteRateBasedRule(input *waf.DeleteRateBasedRuleInput) (*waf.DeleteRateBasedRuleOutput, error) { req, out := c.DeleteRateBasedRuleRequest(input) @@ -2868,6 +2896,10 @@ func (c *WAFRegional) DeleteRuleRequest(input *waf.DeleteRuleInput) (req *reques // // * You tried to delete an IPSet that references one or more IP addresses. // +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRule func (c *WAFRegional) DeleteRule(input *waf.DeleteRuleInput) (*waf.DeleteRuleOutput, error) { req, out := c.DeleteRuleRequest(input) @@ -3007,6 +3039,10 @@ func (c *WAFRegional) DeleteRuleGroupRequest(input *waf.DeleteRuleGroupInput) (r // * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple // already exists in the specified WebACL. // +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRuleGroup func (c *WAFRegional) DeleteRuleGroup(input *waf.DeleteRuleGroupInput) (*waf.DeleteRuleGroupOutput, error) { req, out := c.DeleteRuleGroupRequest(input) @@ -3385,6 +3421,10 @@ func (c *WAFRegional) DeleteWebACLRequest(input *waf.DeleteWebACLInput) (req *re // // * You tried to delete an IPSet that references one or more IP addresses. // +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// // See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteWebACL func (c *WAFRegional) DeleteWebACL(input *waf.DeleteWebACLInput) (*waf.DeleteWebACLOutput, error) { req, out := c.DeleteWebACLRequest(input) @@ -3609,16 +3649,16 @@ func (c *WAFRegional) DisassociateWebACLRequest(input *DisassociateWebACLInput) // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -4433,16 +4473,16 @@ func (c *WAFRegional) GetRateBasedRuleManagedKeysRequest(input *waf.GetRateBased // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -5252,16 +5292,16 @@ func (c *WAFRegional) GetWebACLForResourceRequest(input *GetWebACLForResourceInp // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -5455,16 +5495,16 @@ func (c *WAFRegional) ListActivatedRulesInRuleGroupRequest(input *waf.ListActiva // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -5819,16 +5859,16 @@ func (c *WAFRegional) ListLoggingConfigurationsRequest(input *waf.ListLoggingCon // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -6187,16 +6227,16 @@ func (c *WAFRegional) ListResourcesForWebACLRequest(input *ListResourcesForWebAC // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -6642,6 +6682,122 @@ func (c *WAFRegional) ListSubscribedRuleGroupsWithContext(ctx aws.Context, input return out, req.Send() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListTagsForResource +func (c *WAFRegional) ListTagsForResourceRequest(input *waf.ListTagsForResourceInput) (req *request.Request, output *waf.ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListTagsForResourceInput{} + } + + output = &waf.ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS WAF Regional. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFBadRequestException "WAFBadRequestException" +// +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListTagsForResource +func (c *WAFRegional) ListTagsForResource(input *waf.ListTagsForResourceInput) (*waf.ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListTagsForResourceWithContext(ctx aws.Context, input *waf.ListTagsForResourceInput, opts ...request.Option) (*waf.ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListWebACLs = "ListWebACLs" // ListWebACLsRequest generates a "aws/request.Request" representing the @@ -6859,12 +7015,14 @@ func (c *WAFRegional) PutLoggingConfigurationRequest(input *waf.PutLoggingConfig // You can access information about all traffic that AWS WAF inspects using // the following steps: // -// Create an Amazon Kinesis Data Firehose . +// Create an Amazon Kinesis Data Firehose. // // Create the data firehose with a PUT source and in the region that you are // operating. However, if you are capturing logs for Amazon CloudFront, always // create the firehose in US East (N. Virginia). // +// Do not create the data firehose using a Kinesis stream as your source. +// // Associate that firehose to your web ACL using a PutLoggingConfiguration request. // // When you successfully enable logging using a PutLoggingConfiguration request, @@ -7062,6 +7220,246 @@ func (c *WAFRegional) PutPermissionPolicyWithContext(ctx aws.Context, input *waf return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/TagResource +func (c *WAFRegional) TagResourceRequest(input *waf.TagResourceInput) (req *request.Request, output *waf.TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.TagResourceInput{} + } + + output = &waf.TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS WAF Regional. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFBadRequestException "WAFBadRequestException" +// +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/TagResource +func (c *WAFRegional) TagResource(input *waf.TagResourceInput) (*waf.TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) TagResourceWithContext(ctx aws.Context, input *waf.TagResourceInput, opts ...request.Option) (*waf.TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UntagResource +func (c *WAFRegional) UntagResourceRequest(input *waf.UntagResourceInput) (req *request.Request, output *waf.UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UntagResourceInput{} + } + + output = &waf.UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS WAF Regional. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFBadRequestException "WAFBadRequestException" +// +// * ErrCodeWAFTagOperationException "WAFTagOperationException" +// +// * ErrCodeWAFTagOperationInternalErrorException "WAFTagOperationInternalErrorException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UntagResource +func (c *WAFRegional) UntagResource(input *waf.UntagResourceInput) (*waf.UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UntagResourceWithContext(ctx aws.Context, input *waf.UntagResourceInput, opts ...request.Option) (*waf.UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateByteMatchSet = "UpdateByteMatchSet" // UpdateByteMatchSetRequest generates a "aws/request.Request" representing the @@ -7189,16 +7587,16 @@ func (c *WAFRegional) UpdateByteMatchSetRequest(input *waf.UpdateByteMatchSetInp // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -7380,16 +7778,16 @@ func (c *WAFRegional) UpdateGeoMatchSetRequest(input *waf.UpdateGeoMatchSetInput // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -7599,16 +7997,16 @@ func (c *WAFRegional) UpdateIPSetRequest(input *waf.UpdateIPSetInput) (req *requ // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -7804,16 +8202,16 @@ func (c *WAFRegional) UpdateRateBasedRuleRequest(input *waf.UpdateRateBasedRuleI // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8265,7 +8663,7 @@ func (c *WAFRegional) UpdateRuleRequest(input *waf.UpdateRuleInput) (req *reques // // You then add the Rule to a WebACL and specify that you want to block requests // that satisfy the Rule. For a request to be blocked, the User-Agent header -// in the request must contain the value BadBotand the request must originate +// in the request must contain the value BadBot and the request must originate // from the IP address 192.0.2.44. // // To create and configure a Rule, perform the following steps: @@ -8336,16 +8734,16 @@ func (c *WAFRegional) UpdateRuleRequest(input *waf.UpdateRuleInput) (req *reques // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8547,16 +8945,16 @@ func (c *WAFRegional) UpdateRuleGroupRequest(input *waf.UpdateRuleGroupInput) (r // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8644,9 +9042,8 @@ func (c *WAFRegional) UpdateSizeConstraintSetRequest(input *waf.UpdateSizeConstr // * Whether to perform any transformations on the request, such as converting // it to lowercase, before checking its length. Note that transformations // of the request body are not supported because the AWS resource forwards -// only the first 8192 bytes of your request to AWS WAF. -// -// You can only specify a single type of TextTransformation. +// only the first 8192 bytes of your request to AWS WAF. You can only specify +// a single type of TextTransformation. // // * A ComparisonOperator used for evaluating the selected part of the request // against the specified Size, such as equals, greater than, less than, and @@ -8722,16 +9119,16 @@ func (c *WAFRegional) UpdateSizeConstraintSetRequest(input *waf.UpdateSizeConstr // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -8852,9 +9249,7 @@ func (c *WAFRegional) UpdateSqlInjectionMatchSetRequest(input *waf.UpdateSqlInje // // * TextTransformation: Which text transformation, if any, to perform on // the web request before inspecting the request for snippets of malicious -// SQL code. -// -// You can only specify a single type of TextTransformation. +// SQL code. You can only specify a single type of TextTransformation. // // You use SqlInjectionMatchSet objects to specify which CloudFront requests // that you want to allow, block, or count. For example, if you're receiving @@ -8920,16 +9315,16 @@ func (c *WAFRegional) UpdateSqlInjectionMatchSetRequest(input *waf.UpdateSqlInje // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -9139,16 +9534,16 @@ func (c *WAFRegional) UpdateWebACLRequest(input *waf.UpdateWebACLInput) (req *re // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -9272,9 +9667,7 @@ func (c *WAFRegional) UpdateXssMatchSetRequest(input *waf.UpdateXssMatchSetInput // // * TextTransformation: Which text transformation, if any, to perform on // the web request before inspecting the request for cross-site scripting -// attacks. -// -// You can only specify a single type of TextTransformation. +// attacks. You can only specify a single type of TextTransformation. // // You use XssMatchSet objects to specify which CloudFront requests that you // want to allow, block, or count. For example, if you're receiving requests @@ -9340,16 +9733,16 @@ func (c *WAFRegional) UpdateXssMatchSetRequest(input *waf.UpdateXssMatchSetInput // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, +// * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -10572,6 +10965,12 @@ const ( // ParameterExceptionFieldResourceArn is a ParameterExceptionField enum value ParameterExceptionFieldResourceArn = "RESOURCE_ARN" + + // ParameterExceptionFieldTags is a ParameterExceptionField enum value + ParameterExceptionFieldTags = "TAGS" + + // ParameterExceptionFieldTagKeys is a ParameterExceptionField enum value + ParameterExceptionFieldTagKeys = "TAG_KEYS" ) const ( @@ -10583,6 +10982,9 @@ const ( // ParameterExceptionReasonIllegalArgument is a ParameterExceptionReason enum value ParameterExceptionReasonIllegalArgument = "ILLEGAL_ARGUMENT" + + // ParameterExceptionReasonInvalidTagKey is a ParameterExceptionReason enum value + ParameterExceptionReasonInvalidTagKey = "INVALID_TAG_KEY" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go index 15380ca2ec9..76315ecfbba 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go @@ -4,6 +4,10 @@ package wafregional const ( + // ErrCodeWAFBadRequestException for service response error code + // "WAFBadRequestException". + ErrCodeWAFBadRequestException = "WAFBadRequestException" + // ErrCodeWAFDisallowedNameException for service response error code // "WAFDisallowedNameException". // @@ -58,16 +62,16 @@ const ( // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) // using an action other than INSERT or DELETE. // - // * You tried to create a WebACL with a DefaultActionType other than ALLOW, + // * You tried to create a WebACL with a DefaultAction Type other than ALLOW, // BLOCK, or COUNT. // // * You tried to create a RateBasedRule with a RateKey value other than // IP. // - // * You tried to update a WebACL with a WafActionType other than ALLOW, + // * You tried to update a WebACL with a WafAction Type other than ALLOW, // BLOCK, or COUNT. // - // * You tried to update a ByteMatchSet with a FieldToMatchType other than + // * You tried to update a ByteMatchSet with a FieldToMatch Type other than // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value @@ -198,6 +202,14 @@ const ( // The specified subscription does not exist. ErrCodeWAFSubscriptionNotFoundException = "WAFSubscriptionNotFoundException" + // ErrCodeWAFTagOperationException for service response error code + // "WAFTagOperationException". + ErrCodeWAFTagOperationException = "WAFTagOperationException" + + // ErrCodeWAFTagOperationInternalErrorException for service response error code + // "WAFTagOperationInternalErrorException". + ErrCodeWAFTagOperationInternalErrorException = "WAFTagOperationInternalErrorException" + // ErrCodeWAFUnavailableEntityException for service response error code // "WAFUnavailableEntityException". // diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go index 3a267ae6360..1eeabd40f0f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go @@ -46,11 +46,11 @@ const ( // svc := wafregional.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *WAFRegional { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WAFRegional { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WAFRegional { svc := &WAFRegional{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-28", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/worklink/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/worklink/api.go index 1c6c372ad19..337f6a89afc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/worklink/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/worklink/api.go @@ -107,6 +107,101 @@ func (c *WorkLink) AssociateDomainWithContext(ctx aws.Context, input *AssociateD return out, req.Send() } +const opAssociateWebsiteAuthorizationProvider = "AssociateWebsiteAuthorizationProvider" + +// AssociateWebsiteAuthorizationProviderRequest generates a "aws/request.Request" representing the +// client's request for the AssociateWebsiteAuthorizationProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateWebsiteAuthorizationProvider for more information on using the AssociateWebsiteAuthorizationProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateWebsiteAuthorizationProviderRequest method. +// req, resp := client.AssociateWebsiteAuthorizationProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/AssociateWebsiteAuthorizationProvider +func (c *WorkLink) AssociateWebsiteAuthorizationProviderRequest(input *AssociateWebsiteAuthorizationProviderInput) (req *request.Request, output *AssociateWebsiteAuthorizationProviderOutput) { + op := &request.Operation{ + Name: opAssociateWebsiteAuthorizationProvider, + HTTPMethod: "POST", + HTTPPath: "/associateWebsiteAuthorizationProvider", + } + + if input == nil { + input = &AssociateWebsiteAuthorizationProviderInput{} + } + + output = &AssociateWebsiteAuthorizationProviderOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateWebsiteAuthorizationProvider API operation for Amazon WorkLink. +// +// Associates a website authorization provider with a specified fleet. This +// is used to authorize users against associated websites in the company network. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkLink's +// API operation AssociateWebsiteAuthorizationProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this action. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// The service is temporarily unavailable. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The requested resource was not found. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource already exists. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The number of requests exceeds the limit. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/AssociateWebsiteAuthorizationProvider +func (c *WorkLink) AssociateWebsiteAuthorizationProvider(input *AssociateWebsiteAuthorizationProviderInput) (*AssociateWebsiteAuthorizationProviderOutput, error) { + req, out := c.AssociateWebsiteAuthorizationProviderRequest(input) + return out, req.Send() +} + +// AssociateWebsiteAuthorizationProviderWithContext is the same as AssociateWebsiteAuthorizationProvider with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateWebsiteAuthorizationProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkLink) AssociateWebsiteAuthorizationProviderWithContext(ctx aws.Context, input *AssociateWebsiteAuthorizationProviderInput, opts ...request.Option) (*AssociateWebsiteAuthorizationProviderOutput, error) { + req, out := c.AssociateWebsiteAuthorizationProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAssociateWebsiteCertificateAuthority = "AssociateWebsiteCertificateAuthority" // AssociateWebsiteCertificateAuthorityRequest generates a "aws/request.Request" representing the @@ -1214,6 +1309,103 @@ func (c *WorkLink) DisassociateDomainWithContext(ctx aws.Context, input *Disasso return out, req.Send() } +const opDisassociateWebsiteAuthorizationProvider = "DisassociateWebsiteAuthorizationProvider" + +// DisassociateWebsiteAuthorizationProviderRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateWebsiteAuthorizationProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateWebsiteAuthorizationProvider for more information on using the DisassociateWebsiteAuthorizationProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateWebsiteAuthorizationProviderRequest method. +// req, resp := client.DisassociateWebsiteAuthorizationProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DisassociateWebsiteAuthorizationProvider +func (c *WorkLink) DisassociateWebsiteAuthorizationProviderRequest(input *DisassociateWebsiteAuthorizationProviderInput) (req *request.Request, output *DisassociateWebsiteAuthorizationProviderOutput) { + op := &request.Operation{ + Name: opDisassociateWebsiteAuthorizationProvider, + HTTPMethod: "POST", + HTTPPath: "/disassociateWebsiteAuthorizationProvider", + } + + if input == nil { + input = &DisassociateWebsiteAuthorizationProviderInput{} + } + + output = &DisassociateWebsiteAuthorizationProviderOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisassociateWebsiteAuthorizationProvider API operation for Amazon WorkLink. +// +// Disassociates a website authorization provider from a specified fleet. After +// the disassociation, users can't load any associated websites that require +// this authorization provider. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkLink's +// API operation DisassociateWebsiteAuthorizationProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this action. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// The service is temporarily unavailable. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The requested resource was not found. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The resource already exists. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The number of requests exceeds the limit. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DisassociateWebsiteAuthorizationProvider +func (c *WorkLink) DisassociateWebsiteAuthorizationProvider(input *DisassociateWebsiteAuthorizationProviderInput) (*DisassociateWebsiteAuthorizationProviderOutput, error) { + req, out := c.DisassociateWebsiteAuthorizationProviderRequest(input) + return out, req.Send() +} + +// DisassociateWebsiteAuthorizationProviderWithContext is the same as DisassociateWebsiteAuthorizationProvider with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateWebsiteAuthorizationProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkLink) DisassociateWebsiteAuthorizationProviderWithContext(ctx aws.Context, input *DisassociateWebsiteAuthorizationProviderInput, opts ...request.Option) (*DisassociateWebsiteAuthorizationProviderOutput, error) { + req, out := c.DisassociateWebsiteAuthorizationProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisassociateWebsiteCertificateAuthority = "DisassociateWebsiteCertificateAuthority" // DisassociateWebsiteCertificateAuthorityRequest generates a "aws/request.Request" representing the @@ -1414,7 +1606,7 @@ func (c *WorkLink) ListDevicesWithContext(ctx aws.Context, input *ListDevicesInp // // Example iterating over at most 3 pages of a ListDevices operation. // pageNum := 0 // err := client.ListDevicesPages(params, -// func(page *ListDevicesOutput, lastPage bool) bool { +// func(page *worklink.ListDevicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1446,10 +1638,12 @@ func (c *WorkLink) ListDevicesPagesWithContext(ctx aws.Context, input *ListDevic }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDevicesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDevicesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1558,7 +1752,7 @@ func (c *WorkLink) ListDomainsWithContext(ctx aws.Context, input *ListDomainsInp // // Example iterating over at most 3 pages of a ListDomains operation. // pageNum := 0 // err := client.ListDomainsPages(params, -// func(page *ListDomainsOutput, lastPage bool) bool { +// func(page *worklink.ListDomainsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1590,10 +1784,12 @@ func (c *WorkLink) ListDomainsPagesWithContext(ctx aws.Context, input *ListDomai }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListDomainsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListDomainsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1702,7 +1898,7 @@ func (c *WorkLink) ListFleetsWithContext(ctx aws.Context, input *ListFleetsInput // // Example iterating over at most 3 pages of a ListFleets operation. // pageNum := 0 // err := client.ListFleetsPages(params, -// func(page *ListFleetsOutput, lastPage bool) bool { +// func(page *worklink.ListFleetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1734,10 +1930,162 @@ func (c *WorkLink) ListFleetsPagesWithContext(ctx aws.Context, input *ListFleets }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListFleetsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListFleetsOutput), !p.HasNextPage()) { + break + } } + + return p.Err() +} + +const opListWebsiteAuthorizationProviders = "ListWebsiteAuthorizationProviders" + +// ListWebsiteAuthorizationProvidersRequest generates a "aws/request.Request" representing the +// client's request for the ListWebsiteAuthorizationProviders operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListWebsiteAuthorizationProviders for more information on using the ListWebsiteAuthorizationProviders +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListWebsiteAuthorizationProvidersRequest method. +// req, resp := client.ListWebsiteAuthorizationProvidersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/ListWebsiteAuthorizationProviders +func (c *WorkLink) ListWebsiteAuthorizationProvidersRequest(input *ListWebsiteAuthorizationProvidersInput) (req *request.Request, output *ListWebsiteAuthorizationProvidersOutput) { + op := &request.Operation{ + Name: opListWebsiteAuthorizationProviders, + HTTPMethod: "POST", + HTTPPath: "/listWebsiteAuthorizationProviders", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListWebsiteAuthorizationProvidersInput{} + } + + output = &ListWebsiteAuthorizationProvidersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListWebsiteAuthorizationProviders API operation for Amazon WorkLink. +// +// Retrieves a list of website authorization providers associated with a specified +// fleet. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkLink's +// API operation ListWebsiteAuthorizationProviders for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// You are not authorized to perform this action. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// The service is temporarily unavailable. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// The request is not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The requested resource was not found. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The number of requests exceeds the limit. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/ListWebsiteAuthorizationProviders +func (c *WorkLink) ListWebsiteAuthorizationProviders(input *ListWebsiteAuthorizationProvidersInput) (*ListWebsiteAuthorizationProvidersOutput, error) { + req, out := c.ListWebsiteAuthorizationProvidersRequest(input) + return out, req.Send() +} + +// ListWebsiteAuthorizationProvidersWithContext is the same as ListWebsiteAuthorizationProviders with the addition of +// the ability to pass a context and additional request options. +// +// See ListWebsiteAuthorizationProviders for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkLink) ListWebsiteAuthorizationProvidersWithContext(ctx aws.Context, input *ListWebsiteAuthorizationProvidersInput, opts ...request.Option) (*ListWebsiteAuthorizationProvidersOutput, error) { + req, out := c.ListWebsiteAuthorizationProvidersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListWebsiteAuthorizationProvidersPages iterates over the pages of a ListWebsiteAuthorizationProviders operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListWebsiteAuthorizationProviders method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListWebsiteAuthorizationProviders operation. +// pageNum := 0 +// err := client.ListWebsiteAuthorizationProvidersPages(params, +// func(page *worklink.ListWebsiteAuthorizationProvidersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *WorkLink) ListWebsiteAuthorizationProvidersPages(input *ListWebsiteAuthorizationProvidersInput, fn func(*ListWebsiteAuthorizationProvidersOutput, bool) bool) error { + return c.ListWebsiteAuthorizationProvidersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListWebsiteAuthorizationProvidersPagesWithContext same as ListWebsiteAuthorizationProvidersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkLink) ListWebsiteAuthorizationProvidersPagesWithContext(ctx aws.Context, input *ListWebsiteAuthorizationProvidersInput, fn func(*ListWebsiteAuthorizationProvidersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListWebsiteAuthorizationProvidersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListWebsiteAuthorizationProvidersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListWebsiteAuthorizationProvidersOutput), !p.HasNextPage()) { + break + } + } + return p.Err() } @@ -1847,7 +2195,7 @@ func (c *WorkLink) ListWebsiteCertificateAuthoritiesWithContext(ctx aws.Context, // // Example iterating over at most 3 pages of a ListWebsiteCertificateAuthorities operation. // pageNum := 0 // err := client.ListWebsiteCertificateAuthoritiesPages(params, -// func(page *ListWebsiteCertificateAuthoritiesOutput, lastPage bool) bool { +// func(page *worklink.ListWebsiteCertificateAuthoritiesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1879,10 +2227,12 @@ func (c *WorkLink) ListWebsiteCertificateAuthoritiesPagesWithContext(ctx aws.Con }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListWebsiteCertificateAuthoritiesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListWebsiteCertificateAuthoritiesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2810,6 +3160,97 @@ func (s AssociateDomainOutput) GoString() string { return s.String() } +type AssociateWebsiteAuthorizationProviderInput struct { + _ struct{} `type:"structure"` + + // The authorization provider type. + // + // AuthorizationProviderType is a required field + AuthorizationProviderType *string `type:"string" required:"true" enum:"AuthorizationProviderType"` + + // The domain name of the authorization provider. This applies only to SAML-based + // authorization providers. + DomainName *string `min:"1" type:"string"` + + // The ARN of the fleet. + // + // FleetArn is a required field + FleetArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateWebsiteAuthorizationProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateWebsiteAuthorizationProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateWebsiteAuthorizationProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateWebsiteAuthorizationProviderInput"} + if s.AuthorizationProviderType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizationProviderType")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } + if s.FleetArn == nil { + invalidParams.Add(request.NewErrParamRequired("FleetArn")) + } + if s.FleetArn != nil && len(*s.FleetArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("FleetArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthorizationProviderType sets the AuthorizationProviderType field's value. +func (s *AssociateWebsiteAuthorizationProviderInput) SetAuthorizationProviderType(v string) *AssociateWebsiteAuthorizationProviderInput { + s.AuthorizationProviderType = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *AssociateWebsiteAuthorizationProviderInput) SetDomainName(v string) *AssociateWebsiteAuthorizationProviderInput { + s.DomainName = &v + return s +} + +// SetFleetArn sets the FleetArn field's value. +func (s *AssociateWebsiteAuthorizationProviderInput) SetFleetArn(v string) *AssociateWebsiteAuthorizationProviderInput { + s.FleetArn = &v + return s +} + +type AssociateWebsiteAuthorizationProviderOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the authorization provider. + AuthorizationProviderId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AssociateWebsiteAuthorizationProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateWebsiteAuthorizationProviderOutput) GoString() string { + return s.String() +} + +// SetAuthorizationProviderId sets the AuthorizationProviderId field's value. +func (s *AssociateWebsiteAuthorizationProviderOutput) SetAuthorizationProviderId(v string) *AssociateWebsiteAuthorizationProviderOutput { + s.AuthorizationProviderId = &v + return s +} + type AssociateWebsiteCertificateAuthorityInput struct { _ struct{} `type:"structure"` @@ -3463,12 +3904,18 @@ func (s *DescribeDomainInput) SetFleetArn(v string) *DescribeDomainInput { type DescribeDomainOutput struct { _ struct{} `type:"structure"` + // The ARN of an issued ACM certificate that is valid for the domain being associated. + AcmCertificateArn *string `type:"string"` + // The time that the domain was added. CreatedTime *time.Time `type:"timestamp"` // The name to display. DisplayName *string `type:"string"` + // The name of the domain. + DomainName *string `min:"1" type:"string"` + // The current state for the domain. DomainStatus *string `type:"string" enum:"DomainStatus"` } @@ -3483,6 +3930,12 @@ func (s DescribeDomainOutput) GoString() string { return s.String() } +// SetAcmCertificateArn sets the AcmCertificateArn field's value. +func (s *DescribeDomainOutput) SetAcmCertificateArn(v string) *DescribeDomainOutput { + s.AcmCertificateArn = &v + return s +} + // SetCreatedTime sets the CreatedTime field's value. func (s *DescribeDomainOutput) SetCreatedTime(v time.Time) *DescribeDomainOutput { s.CreatedTime = &v @@ -3495,6 +3948,12 @@ func (s *DescribeDomainOutput) SetDisplayName(v string) *DescribeDomainOutput { return s } +// SetDomainName sets the DomainName field's value. +func (s *DescribeDomainOutput) SetDomainName(v string) *DescribeDomainOutput { + s.DomainName = &v + return s +} + // SetDomainStatus sets the DomainStatus field's value. func (s *DescribeDomainOutput) SetDomainStatus(v string) *DescribeDomainOutput { s.DomainStatus = &v @@ -3906,6 +4365,78 @@ func (s DisassociateDomainOutput) GoString() string { return s.String() } +type DisassociateWebsiteAuthorizationProviderInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the authorization provider. + // + // AuthorizationProviderId is a required field + AuthorizationProviderId *string `min:"1" type:"string" required:"true"` + + // The ARN of the fleet. + // + // FleetArn is a required field + FleetArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateWebsiteAuthorizationProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateWebsiteAuthorizationProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateWebsiteAuthorizationProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateWebsiteAuthorizationProviderInput"} + if s.AuthorizationProviderId == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizationProviderId")) + } + if s.AuthorizationProviderId != nil && len(*s.AuthorizationProviderId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AuthorizationProviderId", 1)) + } + if s.FleetArn == nil { + invalidParams.Add(request.NewErrParamRequired("FleetArn")) + } + if s.FleetArn != nil && len(*s.FleetArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("FleetArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthorizationProviderId sets the AuthorizationProviderId field's value. +func (s *DisassociateWebsiteAuthorizationProviderInput) SetAuthorizationProviderId(v string) *DisassociateWebsiteAuthorizationProviderInput { + s.AuthorizationProviderId = &v + return s +} + +// SetFleetArn sets the FleetArn field's value. +func (s *DisassociateWebsiteAuthorizationProviderInput) SetFleetArn(v string) *DisassociateWebsiteAuthorizationProviderInput { + s.FleetArn = &v + return s +} + +type DisassociateWebsiteAuthorizationProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateWebsiteAuthorizationProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateWebsiteAuthorizationProviderOutput) GoString() string { + return s.String() +} + type DisassociateWebsiteCertificateAuthorityInput struct { _ struct{} `type:"structure"` @@ -4393,6 +4924,105 @@ func (s *ListFleetsOutput) SetNextToken(v string) *ListFleetsOutput { return s } +type ListWebsiteAuthorizationProvidersInput struct { + _ struct{} `type:"structure"` + + // The ARN of the fleet. + // + // FleetArn is a required field + FleetArn *string `min:"20" type:"string" required:"true"` + + // The maximum number of results to be included in the next page. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListWebsiteAuthorizationProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWebsiteAuthorizationProvidersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListWebsiteAuthorizationProvidersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListWebsiteAuthorizationProvidersInput"} + if s.FleetArn == nil { + invalidParams.Add(request.NewErrParamRequired("FleetArn")) + } + if s.FleetArn != nil && len(*s.FleetArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("FleetArn", 20)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetArn sets the FleetArn field's value. +func (s *ListWebsiteAuthorizationProvidersInput) SetFleetArn(v string) *ListWebsiteAuthorizationProvidersInput { + s.FleetArn = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListWebsiteAuthorizationProvidersInput) SetMaxResults(v int64) *ListWebsiteAuthorizationProvidersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListWebsiteAuthorizationProvidersInput) SetNextToken(v string) *ListWebsiteAuthorizationProvidersInput { + s.NextToken = &v + return s +} + +type ListWebsiteAuthorizationProvidersOutput struct { + _ struct{} `type:"structure"` + + // The pagination token to use to retrieve the next page of results for this + // operation. If this value is null, it retrieves the first page. + NextToken *string `min:"1" type:"string"` + + // The website authorization providers. + WebsiteAuthorizationProviders []*WebsiteAuthorizationProviderSummary `type:"list"` +} + +// String returns the string representation +func (s ListWebsiteAuthorizationProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWebsiteAuthorizationProvidersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListWebsiteAuthorizationProvidersOutput) SetNextToken(v string) *ListWebsiteAuthorizationProvidersOutput { + s.NextToken = &v + return s +} + +// SetWebsiteAuthorizationProviders sets the WebsiteAuthorizationProviders field's value. +func (s *ListWebsiteAuthorizationProvidersOutput) SetWebsiteAuthorizationProviders(v []*WebsiteAuthorizationProviderSummary) *ListWebsiteAuthorizationProvidersOutput { + s.WebsiteAuthorizationProviders = v + return s +} + type ListWebsiteCertificateAuthoritiesInput struct { _ struct{} `type:"structure"` @@ -5174,6 +5804,60 @@ func (s UpdateIdentityProviderConfigurationOutput) GoString() string { return s.String() } +// The summary of the website authorization provider. +type WebsiteAuthorizationProviderSummary struct { + _ struct{} `type:"structure"` + + // A unique identifier for the authorization provider. + AuthorizationProviderId *string `min:"1" type:"string"` + + // The authorization provider type. + // + // AuthorizationProviderType is a required field + AuthorizationProviderType *string `type:"string" required:"true" enum:"AuthorizationProviderType"` + + // The time of creation. + CreatedTime *time.Time `type:"timestamp"` + + // The domain name of the authorization provider. This applies only to SAML-based + // authorization providers. + DomainName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s WebsiteAuthorizationProviderSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteAuthorizationProviderSummary) GoString() string { + return s.String() +} + +// SetAuthorizationProviderId sets the AuthorizationProviderId field's value. +func (s *WebsiteAuthorizationProviderSummary) SetAuthorizationProviderId(v string) *WebsiteAuthorizationProviderSummary { + s.AuthorizationProviderId = &v + return s +} + +// SetAuthorizationProviderType sets the AuthorizationProviderType field's value. +func (s *WebsiteAuthorizationProviderSummary) SetAuthorizationProviderType(v string) *WebsiteAuthorizationProviderSummary { + s.AuthorizationProviderType = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *WebsiteAuthorizationProviderSummary) SetCreatedTime(v time.Time) *WebsiteAuthorizationProviderSummary { + s.CreatedTime = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *WebsiteAuthorizationProviderSummary) SetDomainName(v string) *WebsiteAuthorizationProviderSummary { + s.DomainName = &v + return s +} + // The summary of the certificate authority (CA). type WebsiteCaSummary struct { _ struct{} `type:"structure"` @@ -5216,6 +5900,11 @@ func (s *WebsiteCaSummary) SetWebsiteCaId(v string) *WebsiteCaSummary { return s } +const ( + // AuthorizationProviderTypeSaml is a AuthorizationProviderType enum value + AuthorizationProviderTypeSaml = "SAML" +) + const ( // DeviceStatusActive is a DeviceStatus enum value DeviceStatusActive = "ACTIVE" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go index c8a7fc0067e..5fae1688d08 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkLink { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "worklink" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WorkLink { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WorkLink { svc := &WorkLink{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-09-25", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go index c6c22917b4a..cc276d42831 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go @@ -203,6 +203,103 @@ func (c *WorkSpaces) AuthorizeIpRulesWithContext(ctx aws.Context, input *Authori return out, req.Send() } +const opCopyWorkspaceImage = "CopyWorkspaceImage" + +// CopyWorkspaceImageRequest generates a "aws/request.Request" representing the +// client's request for the CopyWorkspaceImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyWorkspaceImage for more information on using the CopyWorkspaceImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyWorkspaceImageRequest method. +// req, resp := client.CopyWorkspaceImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CopyWorkspaceImage +func (c *WorkSpaces) CopyWorkspaceImageRequest(input *CopyWorkspaceImageInput) (req *request.Request, output *CopyWorkspaceImageOutput) { + op := &request.Operation{ + Name: opCopyWorkspaceImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyWorkspaceImageInput{} + } + + output = &CopyWorkspaceImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyWorkspaceImage API operation for Amazon WorkSpaces. +// +// Copies the specified image from the specified Region to the current Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation CopyWorkspaceImage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" +// Your resource limits have been exceeded. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource could not be found. +// +// * ErrCodeResourceUnavailableException "ResourceUnavailableException" +// The specified resource is not available. +// +// * ErrCodeOperationNotSupportedException "OperationNotSupportedException" +// This operation is not supported. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// The user is not authorized to access a resource. +// +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CopyWorkspaceImage +func (c *WorkSpaces) CopyWorkspaceImage(input *CopyWorkspaceImageInput) (*CopyWorkspaceImageOutput, error) { + req, out := c.CopyWorkspaceImageRequest(input) + return out, req.Send() +} + +// CopyWorkspaceImageWithContext is the same as CopyWorkspaceImage with the addition of +// the ability to pass a context and additional request options. +// +// See CopyWorkspaceImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) CopyWorkspaceImageWithContext(ctx aws.Context, input *CopyWorkspaceImageInput, opts ...request.Option) (*CopyWorkspaceImageOutput, error) { + req, out := c.CopyWorkspaceImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateIpGroup = "CreateIpGroup" // CreateIpGroupRequest generates a "aws/request.Request" representing the @@ -696,7 +793,8 @@ func (c *WorkSpaces) DeleteWorkspaceImageRequest(input *DeleteWorkspaceImageInpu // DeleteWorkspaceImage API operation for Amazon WorkSpaces. // // Deletes the specified image from your account. To delete an image, you must -// first delete any bundles that are associated with the image. +// first delete any bundles that are associated with the image and un-share +// the image if it is shared with other accounts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1241,7 +1339,7 @@ func (c *WorkSpaces) DescribeWorkspaceBundlesWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a DescribeWorkspaceBundles operation. // pageNum := 0 // err := client.DescribeWorkspaceBundlesPages(params, -// func(page *DescribeWorkspaceBundlesOutput, lastPage bool) bool { +// func(page *workspaces.DescribeWorkspaceBundlesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1273,10 +1371,12 @@ func (c *WorkSpaces) DescribeWorkspaceBundlesPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeWorkspaceBundlesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeWorkspaceBundlesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1377,7 +1477,7 @@ func (c *WorkSpaces) DescribeWorkspaceDirectoriesWithContext(ctx aws.Context, in // // Example iterating over at most 3 pages of a DescribeWorkspaceDirectories operation. // pageNum := 0 // err := client.DescribeWorkspaceDirectoriesPages(params, -// func(page *DescribeWorkspaceDirectoriesOutput, lastPage bool) bool { +// func(page *workspaces.DescribeWorkspaceDirectoriesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1409,10 +1509,12 @@ func (c *WorkSpaces) DescribeWorkspaceDirectoriesPagesWithContext(ctx aws.Contex }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeWorkspaceDirectoriesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeWorkspaceDirectoriesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1496,6 +1598,91 @@ func (c *WorkSpaces) DescribeWorkspaceImagesWithContext(ctx aws.Context, input * return out, req.Send() } +const opDescribeWorkspaceSnapshots = "DescribeWorkspaceSnapshots" + +// DescribeWorkspaceSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkspaceSnapshots operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeWorkspaceSnapshots for more information on using the DescribeWorkspaceSnapshots +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeWorkspaceSnapshotsRequest method. +// req, resp := client.DescribeWorkspaceSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceSnapshots +func (c *WorkSpaces) DescribeWorkspaceSnapshotsRequest(input *DescribeWorkspaceSnapshotsInput) (req *request.Request, output *DescribeWorkspaceSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaceSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWorkspaceSnapshotsInput{} + } + + output = &DescribeWorkspaceSnapshotsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeWorkspaceSnapshots API operation for Amazon WorkSpaces. +// +// Describes the snapshots for the specified WorkSpace. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DescribeWorkspaceSnapshots for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource could not be found. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// The user is not authorized to access a resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceSnapshots +func (c *WorkSpaces) DescribeWorkspaceSnapshots(input *DescribeWorkspaceSnapshotsInput) (*DescribeWorkspaceSnapshotsOutput, error) { + req, out := c.DescribeWorkspaceSnapshotsRequest(input) + return out, req.Send() +} + +// DescribeWorkspaceSnapshotsWithContext is the same as DescribeWorkspaceSnapshots with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeWorkspaceSnapshots for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeWorkspaceSnapshotsWithContext(ctx aws.Context, input *DescribeWorkspaceSnapshotsInput, opts ...request.Option) (*DescribeWorkspaceSnapshotsOutput, error) { + req, out := c.DescribeWorkspaceSnapshotsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeWorkspaces = "DescribeWorkspaces" // DescribeWorkspacesRequest generates a "aws/request.Request" representing the @@ -1598,7 +1785,7 @@ func (c *WorkSpaces) DescribeWorkspacesWithContext(ctx aws.Context, input *Descr // // Example iterating over at most 3 pages of a DescribeWorkspaces operation. // pageNum := 0 // err := client.DescribeWorkspacesPages(params, -// func(page *DescribeWorkspacesOutput, lastPage bool) bool { +// func(page *workspaces.DescribeWorkspacesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1630,10 +1817,12 @@ func (c *WorkSpaces) DescribeWorkspacesPagesWithContext(ctx aws.Context, input * }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*DescribeWorkspacesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*DescribeWorkspacesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -2318,8 +2507,9 @@ func (c *WorkSpaces) ModifyWorkspaceStateRequest(input *ModifyWorkspaceStateInpu // // To maintain a WorkSpace without being interrupted, set the WorkSpace state // to ADMIN_MAINTENANCE. WorkSpaces in this state do not respond to requests -// to reboot, stop, start, or rebuild. An AutoStop WorkSpace in this state is -// not stopped. Users can log into a WorkSpace in the ADMIN_MAINTENANCE state. +// to reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this +// state is not stopped. Users cannot log into a WorkSpace in the ADMIN_MAINTENANCE +// state. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2520,6 +2710,100 @@ func (c *WorkSpaces) RebuildWorkspacesWithContext(ctx aws.Context, input *Rebuil return out, req.Send() } +const opRestoreWorkspace = "RestoreWorkspace" + +// RestoreWorkspaceRequest generates a "aws/request.Request" representing the +// client's request for the RestoreWorkspace operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreWorkspace for more information on using the RestoreWorkspace +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreWorkspaceRequest method. +// req, resp := client.RestoreWorkspaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RestoreWorkspace +func (c *WorkSpaces) RestoreWorkspaceRequest(input *RestoreWorkspaceInput) (req *request.Request, output *RestoreWorkspaceOutput) { + op := &request.Operation{ + Name: opRestoreWorkspace, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreWorkspaceInput{} + } + + output = &RestoreWorkspaceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RestoreWorkspace API operation for Amazon WorkSpaces. +// +// Restores the specified WorkSpace to its last known healthy state. +// +// You cannot restore a WorkSpace unless its state is AVAILABLE, ERROR, or UNHEALTHY. +// +// Restoring a WorkSpace is a potentially destructive action that can result +// in the loss of data. For more information, see Restore a WorkSpace (https://docs.aws.amazon.com/workspaces/latest/adminguide/restore-workspace.html). +// +// This operation is asynchronous and returns before the WorkSpace is completely +// restored. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation RestoreWorkspace for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource could not be found. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// The user is not authorized to access a resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RestoreWorkspace +func (c *WorkSpaces) RestoreWorkspace(input *RestoreWorkspaceInput) (*RestoreWorkspaceOutput, error) { + req, out := c.RestoreWorkspaceRequest(input) + return out, req.Send() +} + +// RestoreWorkspaceWithContext is the same as RestoreWorkspace with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreWorkspace for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) RestoreWorkspaceWithContext(ctx aws.Context, input *RestoreWorkspaceInput, opts ...request.Option) (*RestoreWorkspaceOutput, error) { + req, out := c.RestoreWorkspaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRevokeIpRules = "RevokeIpRules" // RevokeIpRulesRequest generates a "aws/request.Request" representing the @@ -3226,6 +3510,132 @@ func (s *ComputeType) SetName(v string) *ComputeType { return s } +type CopyWorkspaceImageInput struct { + _ struct{} `type:"structure"` + + // A description of the image. + Description *string `min:"1" type:"string"` + + // The name of the image. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The identifier of the source image. + // + // SourceImageId is a required field + SourceImageId *string `type:"string" required:"true"` + + // The identifier of the source Region. + // + // SourceRegion is a required field + SourceRegion *string `min:"1" type:"string" required:"true"` + + // The tags for the image. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CopyWorkspaceImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyWorkspaceImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyWorkspaceImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyWorkspaceImageInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SourceImageId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceImageId")) + } + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + if s.SourceRegion != nil && len(*s.SourceRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceRegion", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CopyWorkspaceImageInput) SetDescription(v string) *CopyWorkspaceImageInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *CopyWorkspaceImageInput) SetName(v string) *CopyWorkspaceImageInput { + s.Name = &v + return s +} + +// SetSourceImageId sets the SourceImageId field's value. +func (s *CopyWorkspaceImageInput) SetSourceImageId(v string) *CopyWorkspaceImageInput { + s.SourceImageId = &v + return s +} + +// SetSourceRegion sets the SourceRegion field's value. +func (s *CopyWorkspaceImageInput) SetSourceRegion(v string) *CopyWorkspaceImageInput { + s.SourceRegion = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CopyWorkspaceImageInput) SetTags(v []*Tag) *CopyWorkspaceImageInput { + s.Tags = v + return s +} + +type CopyWorkspaceImageOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the image. + ImageId *string `type:"string"` +} + +// String returns the string representation +func (s CopyWorkspaceImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyWorkspaceImageOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *CopyWorkspaceImageOutput) SetImageId(v string) *CopyWorkspaceImageOutput { + s.ImageId = &v + return s +} + type CreateIpGroupInput struct { _ struct{} `type:"structure"` @@ -3503,7 +3913,16 @@ type DefaultWorkspaceCreationProperties struct { // The organizational unit (OU) in the directory for the WorkSpace machine accounts. DefaultOu *string `type:"string"` - // The public IP address to attach to all WorkSpaces that are created or rebuilt. + // Specifies whether to automatically assign a public IP address to WorkSpaces + // in this directory by default. If enabled, the public IP address allows outbound + // internet access from your WorkSpaces when you’re using an internet gateway + // in the Amazon VPC in which your WorkSpaces are located. If you're using a + // Network Address Translation (NAT) gateway for outbound internet access from + // your VPC, or if your WorkSpaces are in public subnets and you manually assign + // them Elastic IP addresses, you should disable this setting. This setting + // applies to new WorkSpaces that you launch or to existing WorkSpaces that + // you rebuild. For more information, see Configure a VPC for Amazon WorkSpaces + // (https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces-vpc.html). EnableInternetAccess *bool `type:"boolean"` // Specifies whether the directory is enabled for Amazon WorkDocs. @@ -4345,6 +4764,78 @@ func (s *DescribeWorkspaceImagesOutput) SetNextToken(v string) *DescribeWorkspac return s } +type DescribeWorkspaceSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace. + // + // WorkspaceId is a required field + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkspaceSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceSnapshotsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkspaceSnapshotsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspaceSnapshotsInput"} + if s.WorkspaceId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *DescribeWorkspaceSnapshotsInput) SetWorkspaceId(v string) *DescribeWorkspaceSnapshotsInput { + s.WorkspaceId = &v + return s +} + +type DescribeWorkspaceSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // Information about the snapshots that can be used to rebuild a WorkSpace. + // These snapshots include the user volume. + RebuildSnapshots []*Snapshot `type:"list"` + + // Information about the snapshots that can be used to restore a WorkSpace. + // These snapshots include both the root volume and the user volume. + RestoreSnapshots []*Snapshot `type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspaceSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceSnapshotsOutput) GoString() string { + return s.String() +} + +// SetRebuildSnapshots sets the RebuildSnapshots field's value. +func (s *DescribeWorkspaceSnapshotsOutput) SetRebuildSnapshots(v []*Snapshot) *DescribeWorkspaceSnapshotsOutput { + s.RebuildSnapshots = v + return s +} + +// SetRestoreSnapshots sets the RestoreSnapshots field's value. +func (s *DescribeWorkspaceSnapshotsOutput) SetRestoreSnapshots(v []*Snapshot) *DescribeWorkspaceSnapshotsOutput { + s.RestoreSnapshots = v + return s +} + type DescribeWorkspacesConnectionStatusInput struct { _ struct{} `type:"structure"` @@ -4672,8 +5163,8 @@ func (s *FailedCreateWorkspaceRequest) SetWorkspaceRequest(v *WorkspaceRequest) } // Describes a WorkSpace that could not be rebooted. (RebootWorkspaces), rebuilt -// (RebuildWorkspaces), terminated (TerminateWorkspaces), started (StartWorkspaces), -// or stopped (StopWorkspaces). +// (RebuildWorkspaces), restored (RestoreWorkspace), terminated (TerminateWorkspaces), +// started (StartWorkspaces), or stopped (StopWorkspaces). type FailedWorkspaceChangeRequest struct { _ struct{} `type:"structure"` @@ -5490,9 +5981,6 @@ func (s *RebuildRequest) SetWorkspaceId(v string) *RebuildRequest { type RebuildWorkspacesInput struct { _ struct{} `type:"structure"` - // Reserved. - AdditionalInfo *string `type:"string"` - // The WorkSpace to rebuild. You can specify a single WorkSpace. // // RebuildWorkspaceRequests is a required field @@ -5535,12 +6023,6 @@ func (s *RebuildWorkspacesInput) Validate() error { return nil } -// SetAdditionalInfo sets the AdditionalInfo field's value. -func (s *RebuildWorkspacesInput) SetAdditionalInfo(v string) *RebuildWorkspacesInput { - s.AdditionalInfo = &v - return s -} - // SetRebuildWorkspaceRequests sets the RebuildWorkspaceRequests field's value. func (s *RebuildWorkspacesInput) SetRebuildWorkspaceRequests(v []*RebuildRequest) *RebuildWorkspacesInput { s.RebuildWorkspaceRequests = v @@ -5570,6 +6052,58 @@ func (s *RebuildWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRe return s } +type RestoreWorkspaceInput struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace. + // + // WorkspaceId is a required field + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreWorkspaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreWorkspaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreWorkspaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreWorkspaceInput"} + if s.WorkspaceId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *RestoreWorkspaceInput) SetWorkspaceId(v string) *RestoreWorkspaceInput { + s.WorkspaceId = &v + return s +} + +type RestoreWorkspaceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RestoreWorkspaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreWorkspaceOutput) GoString() string { + return s.String() +} + type RevokeIpRulesInput struct { _ struct{} `type:"structure"` @@ -5660,6 +6194,30 @@ func (s *RootStorage) SetCapacity(v string) *RootStorage { return s } +// Describes a snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // The time when the snapshot was created. + SnapshotTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// SetSnapshotTime sets the SnapshotTime field's value. +func (s *Snapshot) SetSnapshotTime(v time.Time) *Snapshot { + s.SnapshotTime = &v + return s +} + // Information used to start a WorkSpace. type StartRequest struct { _ struct{} `type:"structure"` @@ -6609,7 +7167,7 @@ type WorkspaceProperties struct { RunningMode *string `type:"string" enum:"RunningMode"` // The time after a user logs off when WorkSpaces are automatically stopped. - // Configured in 60 minute intervals. + // Configured in 60-minute intervals. RunningModeAutoStopTimeoutInMinutes *int64 `type:"integer"` // The size of the user storage. @@ -6970,6 +7528,9 @@ const ( // WorkspaceStateRebuilding is a WorkspaceState enum value WorkspaceStateRebuilding = "REBUILDING" + // WorkspaceStateRestoring is a WorkspaceState enum value + WorkspaceStateRestoring = "RESTORING" + // WorkspaceStateMaintenance is a WorkspaceState enum value WorkspaceStateMaintenance = "MAINTENANCE" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go index 38e1cc2ee1f..63ae2bf7457 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go @@ -46,11 +46,11 @@ const ( // svc := workspaces.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkSpaces { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WorkSpaces { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WorkSpaces { svc := &WorkSpaces{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-04-08", JSONVersion: "1.1", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/xray/api.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/xray/api.go index 8ea57822e3d..a8aa010a6c6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/xray/api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/xray/api.go @@ -114,7 +114,7 @@ func (c *XRay) BatchGetTracesWithContext(ctx aws.Context, input *BatchGetTracesI // // Example iterating over at most 3 pages of a BatchGetTraces operation. // pageNum := 0 // err := client.BatchGetTracesPages(params, -// func(page *BatchGetTracesOutput, lastPage bool) bool { +// func(page *xray.BatchGetTracesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -146,10 +146,12 @@ func (c *XRay) BatchGetTracesPagesWithContext(ctx aws.Context, input *BatchGetTr }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*BatchGetTracesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*BatchGetTracesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -754,7 +756,7 @@ func (c *XRay) GetGroupsWithContext(ctx aws.Context, input *GetGroupsInput, opts // // Example iterating over at most 3 pages of a GetGroups operation. // pageNum := 0 // err := client.GetGroupsPages(params, -// func(page *GetGroupsOutput, lastPage bool) bool { +// func(page *xray.GetGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -786,10 +788,12 @@ func (c *XRay) GetGroupsPagesWithContext(ctx aws.Context, input *GetGroupsInput, }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetGroupsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetGroupsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -892,7 +896,7 @@ func (c *XRay) GetSamplingRulesWithContext(ctx aws.Context, input *GetSamplingRu // // Example iterating over at most 3 pages of a GetSamplingRules operation. // pageNum := 0 // err := client.GetSamplingRulesPages(params, -// func(page *GetSamplingRulesOutput, lastPage bool) bool { +// func(page *xray.GetSamplingRulesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -924,10 +928,12 @@ func (c *XRay) GetSamplingRulesPagesWithContext(ctx aws.Context, input *GetSampl }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetSamplingRulesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetSamplingRulesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1030,7 +1036,7 @@ func (c *XRay) GetSamplingStatisticSummariesWithContext(ctx aws.Context, input * // // Example iterating over at most 3 pages of a GetSamplingStatisticSummaries operation. // pageNum := 0 // err := client.GetSamplingStatisticSummariesPages(params, -// func(page *GetSamplingStatisticSummariesOutput, lastPage bool) bool { +// func(page *xray.GetSamplingStatisticSummariesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1062,10 +1068,12 @@ func (c *XRay) GetSamplingStatisticSummariesPagesWithContext(ctx aws.Context, in }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetSamplingStatisticSummariesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetSamplingStatisticSummariesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1254,7 +1262,7 @@ func (c *XRay) GetServiceGraphWithContext(ctx aws.Context, input *GetServiceGrap // // Example iterating over at most 3 pages of a GetServiceGraph operation. // pageNum := 0 // err := client.GetServiceGraphPages(params, -// func(page *GetServiceGraphOutput, lastPage bool) bool { +// func(page *xray.GetServiceGraphOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1286,10 +1294,12 @@ func (c *XRay) GetServiceGraphPagesWithContext(ctx aws.Context, input *GetServic }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetServiceGraphOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetServiceGraphOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1392,7 +1402,7 @@ func (c *XRay) GetTimeSeriesServiceStatisticsWithContext(ctx aws.Context, input // // Example iterating over at most 3 pages of a GetTimeSeriesServiceStatistics operation. // pageNum := 0 // err := client.GetTimeSeriesServiceStatisticsPages(params, -// func(page *GetTimeSeriesServiceStatisticsOutput, lastPage bool) bool { +// func(page *xray.GetTimeSeriesServiceStatisticsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1424,10 +1434,12 @@ func (c *XRay) GetTimeSeriesServiceStatisticsPagesWithContext(ctx aws.Context, i }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTimeSeriesServiceStatisticsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetTimeSeriesServiceStatisticsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1530,7 +1542,7 @@ func (c *XRay) GetTraceGraphWithContext(ctx aws.Context, input *GetTraceGraphInp // // Example iterating over at most 3 pages of a GetTraceGraph operation. // pageNum := 0 // err := client.GetTraceGraphPages(params, -// func(page *GetTraceGraphOutput, lastPage bool) bool { +// func(page *xray.GetTraceGraphOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1562,10 +1574,12 @@ func (c *XRay) GetTraceGraphPagesWithContext(ctx aws.Context, input *GetTraceGra }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTraceGraphOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetTraceGraphOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -1684,7 +1698,7 @@ func (c *XRay) GetTraceSummariesWithContext(ctx aws.Context, input *GetTraceSumm // // Example iterating over at most 3 pages of a GetTraceSummaries operation. // pageNum := 0 // err := client.GetTraceSummariesPages(params, -// func(page *GetTraceSummariesOutput, lastPage bool) bool { +// func(page *xray.GetTraceSummariesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -1716,10 +1730,12 @@ func (c *XRay) GetTraceSummariesPagesWithContext(ctx aws.Context, input *GetTrac }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetTraceSummariesOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*GetTraceSummariesOutput), !p.HasNextPage()) { + break + } } + return p.Err() } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/xray/service.go b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/xray/service.go index fdc5ea32958..9a34ccedc9b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/xray/service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/aws/aws-sdk-go/service/xray/service.go @@ -46,11 +46,11 @@ const ( // svc := xray.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *XRay { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *XRay { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *XRay { svc := &XRay{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-04-12", }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/bgentry/go-netrc/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/go-netrc/LICENSE new file mode 100644 index 00000000000..aade9a58b13 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/go-netrc/LICENSE @@ -0,0 +1,20 @@ +Original version Copyright © 2010 Fazlul Shahriar . Newer +portions Copyright © 2014 Blake Gentry . + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/bgentry/go-netrc/netrc/netrc.go b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/go-netrc/netrc/netrc.go new file mode 100644 index 00000000000..ea49987c081 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/go-netrc/netrc/netrc.go @@ -0,0 +1,510 @@ +package netrc + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type tkType int + +const ( + tkMachine tkType = iota + tkDefault + tkLogin + tkPassword + tkAccount + tkMacdef + tkComment + tkWhitespace +) + +var keywords = map[string]tkType{ + "machine": tkMachine, + "default": tkDefault, + "login": tkLogin, + "password": tkPassword, + "account": tkAccount, + "macdef": tkMacdef, + "#": tkComment, +} + +type Netrc struct { + tokens []*token + machines []*Machine + macros Macros + updateLock sync.Mutex +} + +// FindMachine returns the Machine in n named by name. If a machine named by +// name exists, it is returned. If no Machine with name name is found and there +// is a ``default'' machine, the ``default'' machine is returned. Otherwise, nil +// is returned. +func (n *Netrc) FindMachine(name string) (m *Machine) { + // TODO(bgentry): not safe for concurrency + var def *Machine + for _, m = range n.machines { + if m.Name == name { + return m + } + if m.IsDefault() { + def = m + } + } + if def == nil { + return nil + } + return def +} + +// MarshalText implements the encoding.TextMarshaler interface to encode a +// Netrc into text format. +func (n *Netrc) MarshalText() (text []byte, err error) { + // TODO(bgentry): not safe for concurrency + for i := range n.tokens { + switch n.tokens[i].kind { + case tkComment, tkDefault, tkWhitespace: // always append these types + text = append(text, n.tokens[i].rawkind...) + default: + if n.tokens[i].value != "" { // skip empty-value tokens + text = append(text, n.tokens[i].rawkind...) + } + } + if n.tokens[i].kind == tkMacdef { + text = append(text, ' ') + text = append(text, n.tokens[i].macroName...) + } + text = append(text, n.tokens[i].rawvalue...) + } + return +} + +func (n *Netrc) NewMachine(name, login, password, account string) *Machine { + n.updateLock.Lock() + defer n.updateLock.Unlock() + + prefix := "\n" + if len(n.tokens) == 0 { + prefix = "" + } + m := &Machine{ + Name: name, + Login: login, + Password: password, + Account: account, + + nametoken: &token{ + kind: tkMachine, + rawkind: []byte(prefix + "machine"), + value: name, + rawvalue: []byte(" " + name), + }, + logintoken: &token{ + kind: tkLogin, + rawkind: []byte("\n\tlogin"), + value: login, + rawvalue: []byte(" " + login), + }, + passtoken: &token{ + kind: tkPassword, + rawkind: []byte("\n\tpassword"), + value: password, + rawvalue: []byte(" " + password), + }, + accounttoken: &token{ + kind: tkAccount, + rawkind: []byte("\n\taccount"), + value: account, + rawvalue: []byte(" " + account), + }, + } + n.insertMachineTokensBeforeDefault(m) + for i := range n.machines { + if n.machines[i].IsDefault() { + n.machines = append(append(n.machines[:i], m), n.machines[i:]...) + return m + } + } + n.machines = append(n.machines, m) + return m +} + +func (n *Netrc) insertMachineTokensBeforeDefault(m *Machine) { + newtokens := []*token{m.nametoken} + if m.logintoken.value != "" { + newtokens = append(newtokens, m.logintoken) + } + if m.passtoken.value != "" { + newtokens = append(newtokens, m.passtoken) + } + if m.accounttoken.value != "" { + newtokens = append(newtokens, m.accounttoken) + } + for i := range n.tokens { + if n.tokens[i].kind == tkDefault { + // found the default, now insert tokens before it + n.tokens = append(n.tokens[:i], append(newtokens, n.tokens[i:]...)...) + return + } + } + // didn't find a default, just add the newtokens to the end + n.tokens = append(n.tokens, newtokens...) + return +} + +func (n *Netrc) RemoveMachine(name string) { + n.updateLock.Lock() + defer n.updateLock.Unlock() + + for i := range n.machines { + if n.machines[i] != nil && n.machines[i].Name == name { + m := n.machines[i] + for _, t := range []*token{ + m.nametoken, m.logintoken, m.passtoken, m.accounttoken, + } { + n.removeToken(t) + } + n.machines = append(n.machines[:i], n.machines[i+1:]...) + return + } + } +} + +func (n *Netrc) removeToken(t *token) { + if t != nil { + for i := range n.tokens { + if n.tokens[i] == t { + n.tokens = append(n.tokens[:i], n.tokens[i+1:]...) + return + } + } + } +} + +// Machine contains information about a remote machine. +type Machine struct { + Name string + Login string + Password string + Account string + + nametoken *token + logintoken *token + passtoken *token + accounttoken *token +} + +// IsDefault returns true if the machine is a "default" token, denoted by an +// empty name. +func (m *Machine) IsDefault() bool { + return m.Name == "" +} + +// UpdatePassword sets the password for the Machine m. +func (m *Machine) UpdatePassword(newpass string) { + m.Password = newpass + updateTokenValue(m.passtoken, newpass) +} + +// UpdateLogin sets the login for the Machine m. +func (m *Machine) UpdateLogin(newlogin string) { + m.Login = newlogin + updateTokenValue(m.logintoken, newlogin) +} + +// UpdateAccount sets the login for the Machine m. +func (m *Machine) UpdateAccount(newaccount string) { + m.Account = newaccount + updateTokenValue(m.accounttoken, newaccount) +} + +func updateTokenValue(t *token, value string) { + oldvalue := t.value + t.value = value + newraw := make([]byte, len(t.rawvalue)) + copy(newraw, t.rawvalue) + t.rawvalue = append( + bytes.TrimSuffix(newraw, []byte(oldvalue)), + []byte(value)..., + ) +} + +// Macros contains all the macro definitions in a netrc file. +type Macros map[string]string + +type token struct { + kind tkType + macroName string + value string + rawkind []byte + rawvalue []byte +} + +// Error represents a netrc file parse error. +type Error struct { + LineNum int // Line number + Msg string // Error message +} + +// Error returns a string representation of error e. +func (e *Error) Error() string { + return fmt.Sprintf("line %d: %s", e.LineNum, e.Msg) +} + +func (e *Error) BadDefaultOrder() bool { + return e.Msg == errBadDefaultOrder +} + +const errBadDefaultOrder = "default token must appear after all machine tokens" + +// scanLinesKeepPrefix is a split function for a Scanner that returns each line +// of text. The returned token may include newlines if they are before the +// first non-space character. The returned line may be empty. The end-of-line +// marker is one optional carriage return followed by one mandatory newline. In +// regular expression notation, it is `\r?\n`. The last non-empty line of +// input will be returned even if it has no newline. +func scanLinesKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + // Skip leading spaces. + start := 0 + for width := 0; start < len(data); start += width { + var r rune + r, width = utf8.DecodeRune(data[start:]) + if !unicode.IsSpace(r) { + break + } + } + if i := bytes.IndexByte(data[start:], '\n'); i >= 0 { + // We have a full newline-terminated line. + return start + i, data[0 : start+i], nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +// scanWordsKeepPrefix is a split function for a Scanner that returns each +// space-separated word of text, with prefixing spaces included. It will never +// return an empty string. The definition of space is set by unicode.IsSpace. +// +// Adapted from bufio.ScanWords(). +func scanTokensKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { + // Skip leading spaces. + start := 0 + for width := 0; start < len(data); start += width { + var r rune + r, width = utf8.DecodeRune(data[start:]) + if !unicode.IsSpace(r) { + break + } + } + if atEOF && len(data) == 0 || start == len(data) { + return len(data), data, nil + } + if len(data) > start && data[start] == '#' { + return scanLinesKeepPrefix(data, atEOF) + } + // Scan until space, marking end of word. + for width, i := 0, start; i < len(data); i += width { + var r rune + r, width = utf8.DecodeRune(data[i:]) + if unicode.IsSpace(r) { + return i, data[:i], nil + } + } + // If we're at EOF, we have a final, non-empty, non-terminated word. Return it. + if atEOF && len(data) > start { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +func newToken(rawb []byte) (*token, error) { + _, tkind, err := bufio.ScanWords(rawb, true) + if err != nil { + return nil, err + } + var ok bool + t := token{rawkind: rawb} + t.kind, ok = keywords[string(tkind)] + if !ok { + trimmed := strings.TrimSpace(string(tkind)) + if trimmed == "" { + t.kind = tkWhitespace // whitespace-only, should happen only at EOF + return &t, nil + } + if strings.HasPrefix(trimmed, "#") { + t.kind = tkComment // this is a comment + return &t, nil + } + return &t, fmt.Errorf("keyword expected; got " + string(tkind)) + } + return &t, nil +} + +func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) { + if scanner.Scan() { + raw := scanner.Bytes() + pos += bytes.Count(raw, []byte{'\n'}) + return raw, strings.TrimSpace(string(raw)), pos, nil + } + if err := scanner.Err(); err != nil { + return nil, "", pos, &Error{pos, err.Error()} + } + return nil, "", pos, nil +} + +func parse(r io.Reader, pos int) (*Netrc, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + nrc := Netrc{machines: make([]*Machine, 0, 20), macros: make(Macros, 10)} + + defaultSeen := false + var currentMacro *token + var m *Machine + var t *token + scanner := bufio.NewScanner(bytes.NewReader(b)) + scanner.Split(scanTokensKeepPrefix) + + for scanner.Scan() { + rawb := scanner.Bytes() + if len(rawb) == 0 { + break + } + pos += bytes.Count(rawb, []byte{'\n'}) + t, err = newToken(rawb) + if err != nil { + if currentMacro == nil { + return nil, &Error{pos, err.Error()} + } + currentMacro.rawvalue = append(currentMacro.rawvalue, rawb...) + continue + } + + if currentMacro != nil && bytes.Contains(rawb, []byte{'\n', '\n'}) { + // if macro rawvalue + rawb would contain \n\n, then macro def is over + currentMacro.value = strings.TrimLeft(string(currentMacro.rawvalue), "\r\n") + nrc.macros[currentMacro.macroName] = currentMacro.value + currentMacro = nil + } + + switch t.kind { + case tkMacdef: + if _, t.macroName, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + currentMacro = t + case tkDefault: + if defaultSeen { + return nil, &Error{pos, "multiple default token"} + } + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + m = new(Machine) + m.Name = "" + defaultSeen = true + case tkMachine: + if defaultSeen { + return nil, &Error{pos, errBadDefaultOrder} + } + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + m = new(Machine) + if t.rawvalue, m.Name, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Name + m.nametoken = t + case tkLogin: + if m == nil || m.Login != "" { + return nil, &Error{pos, "unexpected token login "} + } + if t.rawvalue, m.Login, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Login + m.logintoken = t + case tkPassword: + if m == nil || m.Password != "" { + return nil, &Error{pos, "unexpected token password"} + } + if t.rawvalue, m.Password, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Password + m.passtoken = t + case tkAccount: + if m == nil || m.Account != "" { + return nil, &Error{pos, "unexpected token account"} + } + if t.rawvalue, m.Account, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Account + m.accounttoken = t + } + + nrc.tokens = append(nrc.tokens, t) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + return &nrc, nil +} + +// ParseFile opens the file at filename and then passes its io.Reader to +// Parse(). +func ParseFile(filename string) (*Netrc, error) { + fd, err := os.Open(filename) + if err != nil { + return nil, err + } + defer fd.Close() + return Parse(fd) +} + +// Parse parses from the the Reader r as a netrc file and returns the set of +// machine information and macros defined in it. The ``default'' machine, +// which is intended to be used when no machine name matches, is identified +// by an empty machine name. There can be only one ``default'' machine. +// +// If there is a parsing error, an Error is returned. +func Parse(r io.Reader) (*Netrc, error) { + return parse(r, 1) +} + +// FindMachine parses the netrc file identified by filename and returns the +// Machine named by name. If a problem occurs parsing the file at filename, an +// error is returned. If a machine named by name exists, it is returned. If no +// Machine with name name is found and there is a ``default'' machine, the +// ``default'' machine is returned. Otherwise, nil is returned. +func FindMachine(filename, name string) (m *Machine, err error) { + n, err := ParseFile(filename) + if err != nil { + return nil, err + } + return n.FindMachine(name), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/LICENSE new file mode 100644 index 00000000000..37d60fc3541 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/LICENSE @@ -0,0 +1,24 @@ +MIT License + +Copyright (c) 2017 Blake Gentry + +This license applies to the non-Windows portions of this library. The Windows +portion maintains its own Apache 2.0 license. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS new file mode 100644 index 00000000000..ff177f61243 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [2013] [the CloudFoundry Authors] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/speakeasy.go b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/speakeasy.go new file mode 100644 index 00000000000..71c1dd1b969 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/speakeasy.go @@ -0,0 +1,49 @@ +package speakeasy + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Ask the user to enter a password with input hidden. prompt is a string to +// display before the user's input. Returns the provided password, or an error +// if the command failed. +func Ask(prompt string) (password string, err error) { + return FAsk(os.Stdout, prompt) +} + +// FAsk is the same as Ask, except it is possible to specify the file to write +// the prompt to. If 'nil' is passed as the writer, no prompt will be written. +func FAsk(wr io.Writer, prompt string) (password string, err error) { + if wr != nil && prompt != "" { + fmt.Fprint(wr, prompt) // Display the prompt. + } + password, err = getPassword() + + // Carriage return after the user input. + if wr != nil { + fmt.Fprintln(wr, "") + } + return +} + +func readline() (value string, err error) { + var valb []byte + var n int + b := make([]byte, 1) + for { + // read one byte at a time so we don't accidentally read extra bytes + n, err = os.Stdin.Read(b) + if err != nil && err != io.EOF { + return "", err + } + if n == 0 || b[0] == '\n' { + break + } + valb = append(valb, b[0]) + } + + return strings.TrimSuffix(string(valb), "\r"), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go new file mode 100644 index 00000000000..d99fda19190 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go @@ -0,0 +1,93 @@ +// based on https://code.google.com/p/gopass +// Author: johnsiilver@gmail.com (John Doak) +// +// Original code is based on code by RogerV in the golang-nuts thread: +// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package speakeasy + +import ( + "fmt" + "os" + "os/signal" + "strings" + "syscall" +) + +const sttyArg0 = "/bin/stty" + +var ( + sttyArgvEOff = []string{"stty", "-echo"} + sttyArgvEOn = []string{"stty", "echo"} +) + +// getPassword gets input hidden from the terminal from a user. This is +// accomplished by turning off terminal echo, reading input from the user and +// finally turning on terminal echo. +func getPassword() (password string, err error) { + sig := make(chan os.Signal, 10) + brk := make(chan bool) + + // File descriptors for stdin, stdout, and stderr. + fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} + + // Setup notifications of termination signals to channel sig, create a process to + // watch for these signals so we can turn back on echo if need be. + signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, + syscall.SIGTERM) + go catchSignal(fd, sig, brk) + + // Turn off the terminal echo. + pid, err := echoOff(fd) + if err != nil { + return "", err + } + + // Turn on the terminal echo and stop listening for signals. + defer signal.Stop(sig) + defer close(brk) + defer echoOn(fd) + + syscall.Wait4(pid, nil, 0, nil) + + line, err := readline() + if err == nil { + password = strings.TrimSpace(line) + } else { + err = fmt.Errorf("failed during password entry: %s", err) + } + + return password, err +} + +// echoOff turns off the terminal echo. +func echoOff(fd []uintptr) (int, error) { + pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) + if err != nil { + return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) + } + return pid, nil +} + +// echoOn turns back on the terminal echo. +func echoOn(fd []uintptr) { + // Turn on the terminal echo. + pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) + if e == nil { + syscall.Wait4(pid, nil, 0, nil) + } +} + +// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn +// terminal echo back on before the program ends. Otherwise the user is left +// with echo off on their terminal. +func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { + select { + case <-sig: + echoOn(fd) + os.Exit(-1) + case <-brk: + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go new file mode 100644 index 00000000000..c2093a8091f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package speakeasy + +import ( + "syscall" +) + +// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +const ENABLE_ECHO_INPUT = 0x0004 + +func getPassword() (password string, err error) { + var oldMode uint32 + + err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) + if err != nil { + return + } + + var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) + + err = setConsoleMode(syscall.Stdin, newMode) + defer setConsoleMode(syscall.Stdin, oldMode) + if err != nil { + return + } + + return readline() +} + +func setConsoleMode(console syscall.Handle, mode uint32) (err error) { + dll := syscall.MustLoadDLL("kernel32") + proc := dll.MustFindProc("SetConsoleMode") + r, _, err := proc.Call(uintptr(console), uintptr(mode)) + + if r == 0 { + return err + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/fatih/color/LICENSE.md b/pkg/terraform/exec/plugins/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 00000000000..25fdaf639df --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/fatih/color/color.go b/pkg/terraform/exec/plugins/vendor/github.com/fatih/color/color.go new file mode 100644 index 00000000000..91c8e9f0620 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/fatih/color/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/fatih/color/doc.go new file mode 100644 index 00000000000..cf1e96500f4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/AUTHORS b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 00000000000..bcfa19520af --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/CONTRIBUTORS b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 00000000000..931ae31606f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode.go b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 00000000000..72efb0353dd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode_amd64.go b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 00000000000..fcd192b849e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode_amd64.s b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 00000000000..e6179f65e35 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode_other.go b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 00000000000..8c9f2049bc7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode.go b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 00000000000..8d393e904bb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode_amd64.go b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 00000000000..150d91bc8be --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode_amd64.s b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 00000000000..adfd979fe27 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode_other.go b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 00000000000..dbcae905e6e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/snappy.go b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 00000000000..ece692ea461 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 00000000000..32017f8fa1d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/compare.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 00000000000..2133562b01c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,616 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared +// using the AllowUnexported option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported +// option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + s := newState(opts) + s.compareAny(&pathStep{t, vx, vy}) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + eq := Equal(x, y, Options(opts), Reporter(r)) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters map[reflect.Type]bool // Set of structs with unexported field visibility + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case visibleStructs: + if s.exporters == nil { + s.exporters = make(map[reflect.Type]bool) + } + for t := range opt { + s.exporters[t] = true + } + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore the curPath because all of the compareX + // methods should properly push and pop from the path. + // It is an implementation bug if the contents of curPath differs from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Obtain the current type and values. + t := step.Type() + vx, vy := step.Values() + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.mayForce = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/export_panic.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 00000000000..abc3a1c3e76 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package cmp + +import "reflect" + +const supportAllowUnexported = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("retrieveUnexportedField is not implemented") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 00000000000..59d4ee91b47 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,23 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportAllowUnexported = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { + return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 00000000000..fe98dcc6774 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 00000000000..597b6ae56b1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 00000000000..3d2e42662ca --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,372 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 00000000000..a9e7fc0b5b3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 00000000000..01aed0a1532 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 00000000000..c0b667f58b0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 00000000000..ace1dbe86e5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 00000000000..0a01c4796f1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 00000000000..da134ae2a80 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 00000000000..24fbae6e3c5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 00000000000..06a8ffd036d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,48 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "math" + "reflect" +) + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/options.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 00000000000..793448160ee --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,524 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// AllowUnexported returns an Option that forcibly allows operations on +// unexported fields in certain structs, which are specified by passing in a +// value of each struct type. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func AllowUnexported(types ...interface{}) Option { + if !supportAllowUnexported { + panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") + } + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return visibleStructs(m) +} + +type visibleStructs map[reflect.Type]bool + +func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/path.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 00000000000..96fffd291f7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,308 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // AllowUnexported to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 00000000000..6ddf29993e5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_compare.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 00000000000..17a05eede48 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 00000000000..2761b628921 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,278 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_slices.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 00000000000..eafcf2e4c0b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_text.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 00000000000..8b8fcab7bdf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,387 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_value.go b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 00000000000..83031a7f507 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/checksum.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/checksum.go new file mode 100644 index 00000000000..bea7ed13c63 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/checksum.go @@ -0,0 +1,314 @@ +package getter + +import ( + "bufio" + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "hash" + "io" + "net/url" + "os" + "path/filepath" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// fileChecksum helps verifying the checksum for a file. +type fileChecksum struct { + Type string + Hash hash.Hash + Value []byte + Filename string +} + +// A ChecksumError is returned when a checksum differs +type ChecksumError struct { + Hash hash.Hash + Actual []byte + Expected []byte + File string +} + +func (cerr *ChecksumError) Error() string { + if cerr == nil { + return "" + } + return fmt.Sprintf( + "Checksums did not match for %s.\nExpected: %s\nGot: %s\n%T", + cerr.File, + hex.EncodeToString(cerr.Expected), + hex.EncodeToString(cerr.Actual), + cerr.Hash, // ex: *sha256.digest + ) +} + +// checksum is a simple method to compute the checksum of a source file +// and compare it to the given expected value. +func (c *fileChecksum) checksum(source string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("Failed to open file for checksum: %s", err) + } + defer f.Close() + + c.Hash.Reset() + if _, err := io.Copy(c.Hash, f); err != nil { + return fmt.Errorf("Failed to hash: %s", err) + } + + if actual := c.Hash.Sum(nil); !bytes.Equal(actual, c.Value) { + return &ChecksumError{ + Hash: c.Hash, + Actual: actual, + Expected: c.Value, + File: source, + } + } + + return nil +} + +// extractChecksum will return a fileChecksum based on the 'checksum' +// parameter of u. +// ex: +// http://hashicorp.com/terraform?checksum= +// http://hashicorp.com/terraform?checksum=: +// http://hashicorp.com/terraform?checksum=file: +// when checksumming from a file, extractChecksum will go get checksum_url +// in a temporary directory, parse the content of the file then delete it. +// Content of files are expected to be BSD style or GNU style. +// +// BSD-style checksum: +// MD5 (file1) = +// MD5 (file2) = +// +// GNU-style: +// file1 +// *file2 +// +// see parseChecksumLine for more detail on checksum file parsing +func (c *Client) extractChecksum(u *url.URL) (*fileChecksum, error) { + q := u.Query() + v := q.Get("checksum") + + if v == "" { + return nil, nil + } + + vs := strings.SplitN(v, ":", 2) + switch len(vs) { + case 2: + break // good + default: + // here, we try to guess the checksum from it's length + // if the type was not passed + return newChecksumFromValue(v, filepath.Base(u.EscapedPath())) + } + + checksumType, checksumValue := vs[0], vs[1] + + switch checksumType { + case "file": + return c.checksumFromFile(checksumValue, u) + default: + return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath())) + } +} + +func newChecksum(checksumValue, filename string) (*fileChecksum, error) { + c := &fileChecksum{ + Filename: filename, + } + var err error + c.Value, err = hex.DecodeString(checksumValue) + if err != nil { + return nil, fmt.Errorf("invalid checksum: %s", err) + } + return c, nil +} + +func newChecksumFromType(checksumType, checksumValue, filename string) (*fileChecksum, error) { + c, err := newChecksum(checksumValue, filename) + if err != nil { + return nil, err + } + + c.Type = strings.ToLower(checksumType) + switch c.Type { + case "md5": + c.Hash = md5.New() + case "sha1": + c.Hash = sha1.New() + case "sha256": + c.Hash = sha256.New() + case "sha512": + c.Hash = sha512.New() + default: + return nil, fmt.Errorf( + "unsupported checksum type: %s", checksumType) + } + + return c, nil +} + +func newChecksumFromValue(checksumValue, filename string) (*fileChecksum, error) { + c, err := newChecksum(checksumValue, filename) + if err != nil { + return nil, err + } + + switch len(c.Value) { + case md5.Size: + c.Hash = md5.New() + c.Type = "md5" + case sha1.Size: + c.Hash = sha1.New() + c.Type = "sha1" + case sha256.Size: + c.Hash = sha256.New() + c.Type = "sha256" + case sha512.Size: + c.Hash = sha512.New() + c.Type = "sha512" + default: + return nil, fmt.Errorf("Unknown type for checksum %s", checksumValue) + } + + return c, nil +} + +// checksumsFromFile will return all the fileChecksums found in file +// +// checksumsFromFile will try to guess the hashing algorithm based on content +// of checksum file +// +// checksumsFromFile will only return checksums for files that match file +// behind src +func (c *Client) checksumFromFile(checksumFile string, src *url.URL) (*fileChecksum, error) { + checksumFileURL, err := urlhelper.Parse(checksumFile) + if err != nil { + return nil, err + } + + tempfile, err := tmpFile("", filepath.Base(checksumFileURL.Path)) + if err != nil { + return nil, err + } + defer os.Remove(tempfile) + + c2 := &Client{ + Ctx: c.Ctx, + Getters: c.Getters, + Decompressors: c.Decompressors, + Detectors: c.Detectors, + Pwd: c.Pwd, + Dir: false, + Src: checksumFile, + Dst: tempfile, + ProgressListener: c.ProgressListener, + } + if err = c2.Get(); err != nil { + return nil, fmt.Errorf( + "Error downloading checksum file: %s", err) + } + + filename := filepath.Base(src.Path) + absPath, err := filepath.Abs(src.Path) + if err != nil { + return nil, err + } + checksumFileDir := filepath.Dir(checksumFileURL.Path) + relpath, err := filepath.Rel(checksumFileDir, absPath) + switch { + case err == nil || + err.Error() == "Rel: can't make "+absPath+" relative to "+checksumFileDir: + // ex: on windows C:\gopath\...\content.txt cannot be relative to \ + // which is okay, may be another expected path will work. + break + default: + return nil, err + } + + // possible file identifiers: + options := []string{ + filename, // ubuntu-14.04.1-server-amd64.iso + "*" + filename, // *ubuntu-14.04.1-server-amd64.iso Standard checksum + "?" + filename, // ?ubuntu-14.04.1-server-amd64.iso shasum -p + relpath, // dir/ubuntu-14.04.1-server-amd64.iso + "./" + relpath, // ./dir/ubuntu-14.04.1-server-amd64.iso + absPath, // fullpath; set if local + } + + f, err := os.Open(tempfile) + if err != nil { + return nil, fmt.Errorf( + "Error opening downloaded file: %s", err) + } + defer f.Close() + rd := bufio.NewReader(f) + for { + line, err := rd.ReadString('\n') + if err != nil { + if err != io.EOF { + return nil, fmt.Errorf( + "Error reading checksum file: %s", err) + } + break + } + checksum, err := parseChecksumLine(line) + if err != nil || checksum == nil { + continue + } + if checksum.Filename == "" { + // filename not sure, let's try + return checksum, nil + } + // make sure the checksum is for the right file + for _, option := range options { + if option != "" && checksum.Filename == option { + // any checksum will work so we return the first one + return checksum, nil + } + } + } + return nil, fmt.Errorf("no checksum found in: %s", checksumFile) +} + +// parseChecksumLine takes a line from a checksum file and returns +// checksumType, checksumValue and filename parseChecksumLine guesses the style +// of the checksum BSD vs GNU by splitting the line and by counting the parts. +// of a line. +// for BSD type sums parseChecksumLine guesses the hashing algorithm +// by checking the length of the checksum. +func parseChecksumLine(line string) (*fileChecksum, error) { + parts := strings.Fields(line) + + switch len(parts) { + case 4: + // BSD-style checksum: + // MD5 (file1) = + // MD5 (file2) = + if len(parts[1]) <= 2 || + parts[1][0] != '(' || parts[1][len(parts[1])-1] != ')' { + return nil, fmt.Errorf( + "Unexpected BSD-style-checksum filename format: %s", line) + } + filename := parts[1][1 : len(parts[1])-1] + return newChecksumFromType(parts[0], parts[3], filename) + case 2: + // GNU-style: + // file1 + // *file2 + return newChecksumFromValue(parts[0], parts[1]) + case 0: + return nil, nil // empty line + default: + return newChecksumFromValue(parts[0], "") + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client.go new file mode 100644 index 00000000000..007a78ba7c8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client.go @@ -0,0 +1,298 @@ +package getter + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" +) + +// Client is a client for downloading things. +// +// Top-level functions such as Get are shortcuts for interacting with a client. +// Using a client directly allows more fine-grained control over how downloading +// is done, as well as customizing the protocols supported. +type Client struct { + // Ctx for cancellation + Ctx context.Context + + // Src is the source URL to get. + // + // Dst is the path to save the downloaded thing as. If Dir is set to + // true, then this should be a directory. If the directory doesn't exist, + // it will be created for you. + // + // Pwd is the working directory for detection. If this isn't set, some + // detection may fail. Client will not default pwd to the current + // working directory for security reasons. + Src string + Dst string + Pwd string + + // Mode is the method of download the client will use. See ClientMode + // for documentation. + Mode ClientMode + + // Detectors is the list of detectors that are tried on the source. + // If this is nil, then the default Detectors will be used. + Detectors []Detector + + // Decompressors is the map of decompressors supported by this client. + // If this is nil, then the default value is the Decompressors global. + Decompressors map[string]Decompressor + + // Getters is the map of protocols supported by this client. If this + // is nil, then the default Getters variable will be used. + Getters map[string]Getter + + // Dir, if true, tells the Client it is downloading a directory (versus + // a single file). This distinction is necessary since filenames and + // directory names follow the same format so disambiguating is impossible + // without knowing ahead of time. + // + // WARNING: deprecated. If Mode is set, that will take precedence. + Dir bool + + // ProgressListener allows to track file downloads. + // By default a no op progress listener is used. + ProgressListener ProgressTracker + + Options []ClientOption +} + +// Get downloads the configured source to the destination. +func (c *Client) Get() error { + if err := c.Configure(c.Options...); err != nil { + return err + } + + // Store this locally since there are cases we swap this + mode := c.Mode + if mode == ClientModeInvalid { + if c.Dir { + mode = ClientModeDir + } else { + mode = ClientModeFile + } + } + + src, err := Detect(c.Src, c.Pwd, c.Detectors) + if err != nil { + return err + } + + // Determine if we have a forced protocol, i.e. "git::http://..." + force, src := getForcedGetter(src) + + // If there is a subdir component, then we download the root separately + // and then copy over the proper subdir. + var realDst string + dst := c.Dst + src, subDir := SourceDirSubdir(src) + if subDir != "" { + td, tdcloser, err := safetemp.Dir("", "getter") + if err != nil { + return err + } + defer tdcloser.Close() + + realDst = dst + dst = td + } + + u, err := urlhelper.Parse(src) + if err != nil { + return err + } + if force == "" { + force = u.Scheme + } + + g, ok := c.Getters[force] + if !ok { + return fmt.Errorf( + "download not supported for scheme '%s'", force) + } + + // We have magic query parameters that we use to signal different features + q := u.Query() + + // Determine if we have an archive type + archiveV := q.Get("archive") + if archiveV != "" { + // Delete the paramter since it is a magic parameter we don't + // want to pass on to the Getter + q.Del("archive") + u.RawQuery = q.Encode() + + // If we can parse the value as a bool and it is false, then + // set the archive to "-" which should never map to a decompressor + if b, err := strconv.ParseBool(archiveV); err == nil && !b { + archiveV = "-" + } + } + if archiveV == "" { + // We don't appear to... but is it part of the filename? + matchingLen := 0 + for k := range c.Decompressors { + if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { + archiveV = k + matchingLen = len(k) + } + } + } + + // If we have a decompressor, then we need to change the destination + // to download to a temporary path. We unarchive this into the final, + // real path. + var decompressDst string + var decompressDir bool + decompressor := c.Decompressors[archiveV] + if decompressor != nil { + // Create a temporary directory to store our archive. We delete + // this at the end of everything. + td, err := ioutil.TempDir("", "getter") + if err != nil { + return fmt.Errorf( + "Error creating temporary directory for archive: %s", err) + } + defer os.RemoveAll(td) + + // Swap the download directory to be our temporary path and + // store the old values. + decompressDst = dst + decompressDir = mode != ClientModeFile + dst = filepath.Join(td, "archive") + mode = ClientModeFile + } + + // Determine checksum if we have one + checksum, err := c.extractChecksum(u) + if err != nil { + return fmt.Errorf("invalid checksum: %s", err) + } + + // Delete the query parameter if we have it. + q.Del("checksum") + u.RawQuery = q.Encode() + + if mode == ClientModeAny { + // Ask the getter which client mode to use + mode, err = g.ClientMode(u) + if err != nil { + return err + } + + // Destination is the base name of the URL path in "any" mode when + // a file source is detected. + if mode == ClientModeFile { + filename := filepath.Base(u.Path) + + // Determine if we have a custom file name + if v := q.Get("filename"); v != "" { + // Delete the query parameter if we have it. + q.Del("filename") + u.RawQuery = q.Encode() + + filename = v + } + + dst = filepath.Join(dst, filename) + } + } + + // If we're not downloading a directory, then just download the file + // and return. + if mode == ClientModeFile { + getFile := true + if checksum != nil { + if err := checksum.checksum(dst); err == nil { + // don't get the file if the checksum of dst is correct + getFile = false + } + } + if getFile { + err := g.GetFile(dst, u) + if err != nil { + return err + } + + if checksum != nil { + if err := checksum.checksum(dst); err != nil { + return err + } + } + } + + if decompressor != nil { + // We have a decompressor, so decompress the current destination + // into the final destination with the proper mode. + err := decompressor.Decompress(decompressDst, dst, decompressDir) + if err != nil { + return err + } + + // Swap the information back + dst = decompressDst + if decompressDir { + mode = ClientModeAny + } else { + mode = ClientModeFile + } + } + + // We check the dir value again because it can be switched back + // if we were unarchiving. If we're still only Get-ing a file, then + // we're done. + if mode == ClientModeFile { + return nil + } + } + + // If we're at this point we're either downloading a directory or we've + // downloaded and unarchived a directory and we're just checking subdir. + // In the case we have a decompressor we don't Get because it was Get + // above. + if decompressor == nil { + // If we're getting a directory, then this is an error. You cannot + // checksum a directory. TODO: test + if checksum != nil { + return fmt.Errorf( + "checksum cannot be specified for directory download") + } + + // We're downloading a directory, which might require a bit more work + // if we're specifying a subdir. + err := g.Get(dst, u) + if err != nil { + err = fmt.Errorf("error downloading '%s': %s", src, err) + return err + } + } + + // If we have a subdir, copy that over + if subDir != "" { + if err := os.RemoveAll(realDst); err != nil { + return err + } + if err := os.MkdirAll(realDst, 0755); err != nil { + return err + } + + // Process any globs + subDir, err := SubdirGlob(dst, subDir) + if err != nil { + return err + } + + return copyDir(c.Ctx, realDst, subDir, false) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client_mode.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client_mode.go new file mode 100644 index 00000000000..7f02509a789 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client_mode.go @@ -0,0 +1,24 @@ +package getter + +// ClientMode is the mode that the client operates in. +type ClientMode uint + +const ( + ClientModeInvalid ClientMode = iota + + // ClientModeAny downloads anything it can. In this mode, dst must + // be a directory. If src is a file, it is saved into the directory + // with the basename of the URL. If src is a directory or archive, + // it is unpacked directly into dst. + ClientModeAny + + // ClientModeFile downloads a single file. In this mode, dst must + // be a file path (doesn't have to exist). src must point to a single + // file. It is saved as dst. + ClientModeFile + + // ClientModeDir downloads a directory. In this mode, dst must be + // a directory path (doesn't have to exist). src must point to an + // archive or directory (such as in s3). + ClientModeDir +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client_option.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client_option.go new file mode 100644 index 00000000000..c1ee413b055 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client_option.go @@ -0,0 +1,46 @@ +package getter + +import "context" + +// A ClientOption allows to configure a client +type ClientOption func(*Client) error + +// Configure configures a client with options. +func (c *Client) Configure(opts ...ClientOption) error { + if c.Ctx == nil { + c.Ctx = context.Background() + } + c.Options = opts + for _, opt := range opts { + err := opt(c) + if err != nil { + return err + } + } + // Default decompressor values + if c.Decompressors == nil { + c.Decompressors = Decompressors + } + // Default detector values + if c.Detectors == nil { + c.Detectors = Detectors + } + // Default getter values + if c.Getters == nil { + c.Getters = Getters + } + + for _, getter := range c.Getters { + getter.SetClient(c) + } + return nil +} + +// WithContext allows to pass a context to operation +// in order to be able to cancel a download in progress. +func WithContext(ctx context.Context) func(*Client) error { + return func(c *Client) error { + c.Ctx = ctx + return nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client_option_progress.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client_option_progress.go new file mode 100644 index 00000000000..9b185f71de6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/client_option_progress.go @@ -0,0 +1,38 @@ +package getter + +import ( + "io" +) + +// WithProgress allows for a user to track +// the progress of a download. +// For example by displaying a progress bar with +// current download. +// Not all getters have progress support yet. +func WithProgress(pl ProgressTracker) func(*Client) error { + return func(c *Client) error { + c.ProgressListener = pl + return nil + } +} + +// ProgressTracker allows to track the progress of downloads. +type ProgressTracker interface { + // TrackProgress should be called when + // a new object is being downloaded. + // src is the location the file is + // downloaded from. + // currentSize is the current size of + // the file in case it is a partial + // download. + // totalSize is the total size in bytes, + // size can be zero if the file size + // is not known. + // stream is the file being downloaded, every + // written byte will add up to processed size. + // + // TrackProgress returns a ReadCloser that wraps the + // download in progress ( stream ). + // When the download is finished, body shall be closed. + TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/common.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/common.go new file mode 100644 index 00000000000..d2afd8ad888 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/common.go @@ -0,0 +1,14 @@ +package getter + +import ( + "io/ioutil" +) + +func tmpFile(dir, pattern string) (string, error) { + f, err := ioutil.TempFile(dir, pattern) + if err != nil { + return "", err + } + f.Close() + return f.Name(), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/copy_dir.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/copy_dir.go new file mode 100644 index 00000000000..641fe6d0f10 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/copy_dir.go @@ -0,0 +1,78 @@ +package getter + +import ( + "context" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +// +// If ignoreDot is set to true, then dot-prefixed files/folders are ignored. +func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == src { + return nil + } + + if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := Copy(ctx, dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress.go new file mode 100644 index 00000000000..198bb0edd01 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress.go @@ -0,0 +1,58 @@ +package getter + +import ( + "strings" +) + +// Decompressor defines the interface that must be implemented to add +// support for decompressing a type. +// +// Important: if you're implementing a decompressor, please use the +// containsDotDot helper in this file to ensure that files can't be +// decompressed outside of the specified directory. +type Decompressor interface { + // Decompress should decompress src to dst. dir specifies whether dst + // is a directory or single file. src is guaranteed to be a single file + // that exists. dst is not guaranteed to exist already. + Decompress(dst, src string, dir bool) error +} + +// Decompressors is the mapping of extension to the Decompressor implementation +// that will decompress that extension/type. +var Decompressors map[string]Decompressor + +func init() { + tbzDecompressor := new(TarBzip2Decompressor) + tgzDecompressor := new(TarGzipDecompressor) + txzDecompressor := new(TarXzDecompressor) + + Decompressors = map[string]Decompressor{ + "bz2": new(Bzip2Decompressor), + "gz": new(GzipDecompressor), + "xz": new(XzDecompressor), + "tar.bz2": tbzDecompressor, + "tar.gz": tgzDecompressor, + "tar.xz": txzDecompressor, + "tbz2": tbzDecompressor, + "tgz": tgzDecompressor, + "txz": txzDecompressor, + "zip": new(ZipDecompressor), + } +} + +// containsDotDot checks if the filepath value v contains a ".." entry. +// This will check filepath components by splitting along / or \. This +// function is copied directly from the Go net/http implementation. +func containsDotDot(v string) bool { + if !strings.Contains(v, "..") { + return false + } + for _, ent := range strings.FieldsFunc(v, isSlashRune) { + if ent == ".." { + return true + } + } + return false +} + +func isSlashRune(r rune) bool { return r == '/' || r == '\\' } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go new file mode 100644 index 00000000000..339f4cf7af2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go @@ -0,0 +1,45 @@ +package getter + +import ( + "compress/bzip2" + "fmt" + "io" + "os" + "path/filepath" +) + +// Bzip2Decompressor is an implementation of Decompressor that can +// decompress bz2 files. +type Bzip2Decompressor struct{} + +func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("bzip2-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, bzipR) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_gzip.go new file mode 100644 index 00000000000..5ebf709b4f9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -0,0 +1,49 @@ +package getter + +import ( + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" +) + +// GzipDecompressor is an implementation of Decompressor that can +// decompress gzip files. +type GzipDecompressor struct{} + +func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("gzip-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gzipR.Close() + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, gzipR) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_tar.go new file mode 100644 index 00000000000..b6986a25aec --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_tar.go @@ -0,0 +1,160 @@ +package getter + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "time" +) + +// untar is a shared helper for untarring an archive. The reader should provide +// an uncompressed view of the tar archive. +func untar(input io.Reader, dst, src string, dir bool) error { + tarR := tar.NewReader(input) + done := false + dirHdrs := []*tar.Header{} + now := time.Now() + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + break + } + if err != nil { + return err + } + + if hdr.Typeflag == tar.TypeXGlobalHeader || hdr.Typeflag == tar.TypeXHeader { + // don't unpack extended headers as files + continue + } + + path := dst + if dir { + // Disallow parent traversal + if containsDotDot(hdr.Name) { + return fmt.Errorf("entry contains '..': %s", hdr.Name) + } + + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + // Record the directory information so that we may set its attributes + // after all files have been extracted + dirHdrs = append(dirHdrs, hdr) + + continue + } else { + // There is no ordering guarantee that a file in a directory is + // listed before the directory + dstPath := filepath.Dir(path) + + // Check that the directory exists, otherwise create it + if _, err := os.Stat(dstPath); os.IsNotExist(err) { + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + } + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + + // Set the access and modification time if valid, otherwise default to current time + aTime := now + mTime := now + if hdr.AccessTime.Unix() > 0 { + aTime = hdr.AccessTime + } + if hdr.ModTime.Unix() > 0 { + mTime = hdr.ModTime + } + if err := os.Chtimes(path, aTime, mTime); err != nil { + return err + } + } + + // Perform a final pass over extracted directories to update metadata + for _, dirHdr := range dirHdrs { + path := filepath.Join(dst, dirHdr.Name) + // Chmod the directory since they might be created before we know the mode flags + if err := os.Chmod(path, dirHdr.FileInfo().Mode()); err != nil { + return err + } + // Set the mtime/atime attributes since they would have been changed during extraction + aTime := now + mTime := now + if dirHdr.AccessTime.Unix() > 0 { + aTime = dirHdr.AccessTime + } + if dirHdr.ModTime.Unix() > 0 { + mTime = dirHdr.ModTime + } + if err := os.Chtimes(path, aTime, mTime); err != nil { + return err + } + } + + return nil +} + +// tarDecompressor is an implementation of Decompressor that can +// unpack tar files. +type tarDecompressor struct{} + +func (d *tarDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + return untar(f, dst, src, dir) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go new file mode 100644 index 00000000000..5391b5c8c52 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -0,0 +1,33 @@ +package getter + +import ( + "compress/bzip2" + "os" + "path/filepath" +) + +// TarBzip2Decompressor is an implementation of Decompressor that can +// decompress tar.bz2 files. +type TarBzip2Decompressor struct{} + +func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + return untar(bzipR, dst, src, dir) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_testing.go new file mode 100644 index 00000000000..b2f662a89df --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -0,0 +1,171 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strings" + "time" + + "github.com/mitchellh/go-testing-interface" +) + +// TestDecompressCase is a single test case for testing decompressors +type TestDecompressCase struct { + Input string // Input is the complete path to the input file + Dir bool // Dir is whether or not we're testing directory mode + Err bool // Err is whether we expect an error or not + DirList []string // DirList is the list of files for Dir mode + FileMD5 string // FileMD5 is the expected MD5 for a single file + Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode) +} + +// TestDecompressor is a helper function for testing generic decompressors. +func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { + t.Helper() + + for _, tc := range cases { + t.Logf("Testing: %s", tc.Input) + + // Temporary dir to store stuff + td, err := ioutil.TempDir("", "getter") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Destination is always joining result so that we have a new path + dst := filepath.Join(td, "subdir", "result") + + // We use a function so defers work + func() { + defer os.RemoveAll(td) + + // Decompress + err := d.Decompress(dst, tc.Input, tc.Dir) + if (err != nil) != tc.Err { + t.Fatalf("err %s: %s", tc.Input, err) + } + if tc.Err { + return + } + + // If it isn't a directory, then check for a single file + if !tc.Dir { + fi, err := os.Stat(dst) + if err != nil { + t.Fatalf("err %s: %s", tc.Input, err) + } + if fi.IsDir() { + t.Fatalf("err %s: expected file, got directory", tc.Input) + } + if tc.FileMD5 != "" { + actual := testMD5(t, dst) + expected := tc.FileMD5 + if actual != expected { + t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual) + } + } + + if tc.Mtime != nil { + actual := fi.ModTime() + if tc.Mtime.Unix() > 0 { + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String()) + } + } else if actual.Unix() <= 0 { + t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) + } + } + + return + } + + // Convert expected for windows + expected := tc.DirList + if runtime.GOOS == "windows" { + for i, v := range expected { + expected[i] = strings.Replace(v, "/", "\\", -1) + } + } + + // Directory, check for the correct contents + actual := testListDir(t, dst) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) + } + // Check for correct atime/mtime + for _, dir := range actual { + path := filepath.Join(dst, dir) + if tc.Mtime != nil { + fi, err := os.Stat(path) + if err != nil { + t.Fatalf("err: %s", err) + } + actual := fi.ModTime() + if tc.Mtime.Unix() > 0 { + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String()) + } + } else if actual.Unix() < 0 { + t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) + } + + } + } + }() + } +} + +func testListDir(t testing.T, path string) []string { + var result []string + err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + sub = strings.TrimPrefix(sub, path) + if sub == "" { + return nil + } + sub = sub[1:] // Trim the leading path sep. + + // If it is a dir, add trailing sep + if info.IsDir() { + sub += string(os.PathSeparator) + } + + result = append(result, sub) + return nil + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(result) + return result +} + +func testMD5(t testing.T, path string) string { + f, err := os.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + h := md5.New() + _, err = io.Copy(h, f) + if err != nil { + t.Fatalf("err: %s", err) + } + + result := h.Sum(nil) + return hex.EncodeToString(result) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_tgz.go new file mode 100644 index 00000000000..65eb70dd2c2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -0,0 +1,39 @@ +package getter + +import ( + "compress/gzip" + "fmt" + "os" + "path/filepath" +) + +// TarGzipDecompressor is an implementation of Decompressor that can +// decompress tar.gzip files. +type TarGzipDecompressor struct{} + +func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err) + } + defer gzipR.Close() + + return untar(gzipR, dst, src, dir) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_txz.go new file mode 100644 index 00000000000..5e151c127df --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_txz.go @@ -0,0 +1,39 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// TarXzDecompressor is an implementation of Decompressor that can +// decompress tar.xz files. +type TarXzDecompressor struct{} + +func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + txzR, err := xz.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening an xz reader for %s: %s", src, err) + } + + return untar(txzR, dst, src, dir) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_xz.go new file mode 100644 index 00000000000..4e37abab108 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_xz.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// XzDecompressor is an implementation of Decompressor that can +// decompress xz files. +type XzDecompressor struct{} + +func (d *XzDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("xz-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + xzR, err := xz.NewReader(f) + if err != nil { + return err + } + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, xzR) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_zip.go new file mode 100644 index 00000000000..b0e70cac35c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -0,0 +1,101 @@ +package getter + +import ( + "archive/zip" + "fmt" + "io" + "os" + "path/filepath" +) + +// ZipDecompressor is an implementation of Decompressor that can +// decompress tar.gzip files. +type ZipDecompressor struct{} + +func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // Open the zip + zipR, err := zip.OpenReader(src) + if err != nil { + return err + } + defer zipR.Close() + + // Check the zip integrity + if len(zipR.File) == 0 { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + if !dir && len(zipR.File) > 1 { + return fmt.Errorf("expected a single file: %s", src) + } + + // Go through and unarchive + for _, f := range zipR.File { + path := dst + if dir { + // Disallow parent traversal + if containsDotDot(f.Name) { + return fmt.Errorf("entry contains '..': %s", f.Name) + } + + path = filepath.Join(path, f.Name) + } + + if f.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } + + // Create the enclosing directories if we must. ZIP files aren't + // required to contain entries for just the directories so this + // can happen. + if dir { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + } + + // Open the file for reading + srcF, err := f.Open() + if err != nil { + return err + } + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + srcF.Close() + return err + } + _, err = io.Copy(dstF, srcF) + srcF.Close() + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, f.Mode()); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect.go new file mode 100644 index 00000000000..5bb750c9f2c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect.go @@ -0,0 +1,105 @@ +package getter + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/go-getter/helper/url" +) + +// Detector defines the interface that an invalid URL or a URL with a blank +// scheme is passed through in order to determine if its shorthand for +// something else well-known. +type Detector interface { + // Detect will detect whether the string matches a known pattern to + // turn it into a proper URL. + Detect(string, string) (string, bool, error) +} + +// Detectors is the list of detectors that are tried on an invalid URL. +// This is also the order they're tried (index 0 is first). +var Detectors []Detector + +func init() { + Detectors = []Detector{ + new(GitHubDetector), + new(GitDetector), + new(BitBucketDetector), + new(S3Detector), + new(GCSDetector), + new(FileDetector), + } +} + +// Detect turns a source string into another source string if it is +// detected to be of a known pattern. +// +// The third parameter should be the list of detectors to use in the +// order to try them. If you don't want to configure this, just use +// the global Detectors variable. +// +// This is safe to be called with an already valid source string: Detect +// will just return it. +func Detect(src string, pwd string, ds []Detector) (string, error) { + getForce, getSrc := getForcedGetter(src) + + // Separate out the subdir if there is one, we don't pass that to detect + getSrc, subDir := SourceDirSubdir(getSrc) + + u, err := url.Parse(getSrc) + if err == nil && u.Scheme != "" { + // Valid URL + return src, nil + } + + for _, d := range ds { + result, ok, err := d.Detect(getSrc, pwd) + if err != nil { + return "", err + } + if !ok { + continue + } + + var detectForce string + detectForce, result = getForcedGetter(result) + result, detectSubdir := SourceDirSubdir(result) + + // If we have a subdir from the detection, then prepend it to our + // requested subdir. + if detectSubdir != "" { + if subDir != "" { + subDir = filepath.Join(detectSubdir, subDir) + } else { + subDir = detectSubdir + } + } + + if subDir != "" { + u, err := url.Parse(result) + if err != nil { + return "", fmt.Errorf("Error parsing URL: %s", err) + } + u.Path += "//" + subDir + + // a subdir may contain wildcards, but in order to support them we + // have to ensure the path isn't escaped. + u.RawPath = u.Path + + result = u.String() + } + + // Preserve the forced getter if it exists. We try to use the + // original set force first, followed by any force set by the + // detector. + if getForce != "" { + result = fmt.Sprintf("%s::%s", getForce, result) + } else if detectForce != "" { + result = fmt.Sprintf("%s::%s", detectForce, result) + } + + return result, nil + } + + return "", fmt.Errorf("invalid source string: %s", src) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go new file mode 100644 index 00000000000..a183a17dfe7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go @@ -0,0 +1,66 @@ +package getter + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" +) + +// BitBucketDetector implements Detector to detect BitBucket URLs and turn +// them into URLs that the Git or Hg Getter can understand. +type BitBucketDetector struct{} + +func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "bitbucket.org/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { + u, err := url.Parse("https://" + src) + if err != nil { + return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err) + } + + // We need to get info on this BitBucket repository to determine whether + // it is Git or Hg. + var info struct { + SCM string `json:"scm"` + } + infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path + resp, err := http.Get(infoUrl) + if err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + if resp.StatusCode == 403 { + // A private repo + return "", true, fmt.Errorf( + "shorthand BitBucket URL can't be used for private repos, " + + "please use a full URL") + } + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&info); err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + + switch info.SCM { + case "git": + if !strings.HasSuffix(u.Path, ".git") { + u.Path += ".git" + } + + return "git::" + u.String(), true, nil + case "hg": + return "hg::" + u.String(), true, nil + default: + return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_file.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_file.go new file mode 100644 index 00000000000..4ef41ea73fa --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_file.go @@ -0,0 +1,67 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +// FileDetector implements Detector to detect file paths. +type FileDetector struct{} + +func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if !filepath.IsAbs(src) { + if pwd == "" { + return "", true, fmt.Errorf( + "relative paths require a module with a pwd") + } + + // Stat the pwd to determine if its a symbolic link. If it is, + // then the pwd becomes the original directory. Otherwise, + // `filepath.Join` below does some weird stuff. + // + // We just ignore if the pwd doesn't exist. That error will be + // caught later when we try to use the URL. + if fi, err := os.Lstat(pwd); !os.IsNotExist(err) { + if err != nil { + return "", true, err + } + if fi.Mode()&os.ModeSymlink != 0 { + pwd, err = filepath.EvalSymlinks(pwd) + if err != nil { + return "", true, err + } + + // The symlink itself might be a relative path, so we have to + // resolve this to have a correctly rooted URL. + pwd, err = filepath.Abs(pwd) + if err != nil { + return "", true, err + } + } + } + + src = filepath.Join(pwd, src) + } + + return fmtFileURL(src), true, nil +} + +func fmtFileURL(path string) string { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + path = filepath.ToSlash(path) + return fmt.Sprintf("file://%s", path) + } + + // Make sure that we don't start with "/" since we add that below. + if path[0] == '/' { + path = path[1:] + } + return fmt.Sprintf("file:///%s", path) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_gcs.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_gcs.go new file mode 100644 index 00000000000..11363737c7d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_gcs.go @@ -0,0 +1,43 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// GCSDetector implements Detector to detect GCS URLs and turn +// them into URLs that the GCSGetter can understand. +type GCSDetector struct{} + +func (d *GCSDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.Contains(src, "googleapis.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *GCSDetector) detectHTTP(src string) (string, bool, error) { + + parts := strings.Split(src, "/") + if len(parts) < 5 { + return "", false, fmt.Errorf( + "URL is not a valid GCS URL") + } + version := parts[2] + bucket := parts[3] + object := strings.Join(parts[4:], "/") + + url, err := url.Parse(fmt.Sprintf("https://www.googleapis.com/storage/%s/%s/%s", + version, bucket, object)) + if err != nil { + return "", false, fmt.Errorf("error parsing GCS URL: %s", err) + } + + return "gcs::" + url.String(), true, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_git.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_git.go new file mode 100644 index 00000000000..eeb8a04c5e9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_git.go @@ -0,0 +1,26 @@ +package getter + +// GitDetector implements Detector to detect Git SSH URLs such as +// git@host.com:dir1/dir2 and converts them to proper URLs. +type GitDetector struct{} + +func (d *GitDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + u, err := detectSSH(src) + if err != nil { + return "", true, err + } + if u == nil { + return "", false, nil + } + + // We require the username to be "git" to assume that this is a Git URL + if u.User.Username() != "git" { + return "", false, nil + } + + return "git::" + u.String(), true, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_github.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_github.go new file mode 100644 index 00000000000..4bf4daf238d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_github.go @@ -0,0 +1,47 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// GitHubDetector implements Detector to detect GitHub URLs and turn +// them into URLs that the Git Getter can understand. +type GitHubDetector struct{} + +func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "github.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 3 { + return "", false, fmt.Errorf( + "GitHub URLs should be github.com/username/repo") + } + + urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", true, fmt.Errorf("error parsing GitHub URL: %s", err) + } + + if !strings.HasSuffix(url.Path, ".git") { + url.Path += ".git" + } + + if len(parts) > 3 { + url.Path += "//" + strings.Join(parts[3:], "/") + } + + return "git::" + url.String(), true, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_s3.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_s3.go new file mode 100644 index 00000000000..8e0f4a03b46 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_s3.go @@ -0,0 +1,61 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// S3Detector implements Detector to detect S3 URLs and turn +// them into URLs that the S3 getter can understand. +type S3Detector struct{} + +func (d *S3Detector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.Contains(src, ".amazonaws.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *S3Detector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 2 { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } + + hostParts := strings.Split(parts[0], ".") + if len(hostParts) == 3 { + return d.detectPathStyle(hostParts[0], parts[1:]) + } else if len(hostParts) == 4 { + return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:]) + } else { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } +} + +func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} + +func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_ssh.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_ssh.go new file mode 100644 index 00000000000..c0dbe9d4754 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/detect_ssh.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "net/url" + "regexp" + "strings" +) + +// Note that we do not have an SSH-getter currently so this file serves +// only to hold the detectSSH helper that is used by other detectors. + +// sshPattern matches SCP-like SSH patterns (user@host:path) +var sshPattern = regexp.MustCompile("^(?:([^@]+)@)?([^:]+):/?(.+)$") + +// detectSSH determines if the src string matches an SSH-like URL and +// converts it into a net.URL compatible string. This returns nil if the +// string doesn't match the SSH pattern. +// +// This function is tested indirectly via detect_git_test.go +func detectSSH(src string) (*url.URL, error) { + matched := sshPattern.FindStringSubmatch(src) + if matched == nil { + return nil, nil + } + + user := matched[1] + host := matched[2] + path := matched[3] + qidx := strings.Index(path, "?") + if qidx == -1 { + qidx = len(path) + } + + var u url.URL + u.Scheme = "ssh" + u.User = url.User(user) + u.Host = host + u.Path = path[0:qidx] + if qidx < len(path) { + q, err := url.ParseQuery(path[qidx+1:]) + if err != nil { + return nil, fmt.Errorf("error parsing GitHub SSH URL: %s", err) + } + u.RawQuery = q.Encode() + } + + return &u, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/folder_storage.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/folder_storage.go new file mode 100644 index 00000000000..647ccf45928 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/folder_storage.go @@ -0,0 +1,65 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "os" + "path/filepath" +) + +// FolderStorage is an implementation of the Storage interface that manages +// modules on the disk. +type FolderStorage struct { + // StorageDir is the directory where the modules will be stored. + StorageDir string +} + +// Dir implements Storage.Dir +func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { + d = s.dir(key) + _, err = os.Stat(d) + if err == nil { + // Directory exists + e = true + return + } + if os.IsNotExist(err) { + // Directory doesn't exist + d = "" + e = false + err = nil + return + } + + // An error + d = "" + e = false + return +} + +// Get implements Storage.Get +func (s *FolderStorage) Get(key string, source string, update bool) error { + dir := s.dir(key) + if !update { + if _, err := os.Stat(dir); err == nil { + // If the directory already exists, then we're done since + // we're not updating. + return nil + } else if !os.IsNotExist(err) { + // If the error we got wasn't a file-not-exist error, then + // something went wrong and we should report it. + return fmt.Errorf("Error reading module directory: %s", err) + } + } + + // Get the source. This always forces an update. + return Get(dir, source) +} + +// dir returns the directory name internally that we'll use to map to +// internally. +func (s *FolderStorage) dir(key string) string { + sum := md5.Sum([]byte(key)) + return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:])) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get.go new file mode 100644 index 00000000000..c233763c67f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get.go @@ -0,0 +1,152 @@ +// getter is a package for downloading files or directories from a variety of +// protocols. +// +// getter is unique in its ability to download both directories and files. +// It also detects certain source strings to be protocol-specific URLs. For +// example, "github.com/hashicorp/go-getter" would turn into a Git URL and +// use the Git protocol. +// +// Protocols and detectors are extensible. +// +// To get started, see Client. +package getter + +import ( + "bytes" + "fmt" + "net/url" + "os/exec" + "regexp" + "syscall" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +// Getter defines the interface that schemes must implement to download +// things. +type Getter interface { + // Get downloads the given URL into the given directory. This always + // assumes that we're updating and gets the latest version that it can. + // + // The directory may already exist (if we're updating). If it is in a + // format that isn't understood, an error should be returned. Get shouldn't + // simply nuke the directory. + Get(string, *url.URL) error + + // GetFile downloads the give URL into the given path. The URL must + // reference a single file. If possible, the Getter should check if + // the remote end contains the same file and no-op this operation. + GetFile(string, *url.URL) error + + // ClientMode returns the mode based on the given URL. This is used to + // allow clients to let the getters decide which mode to use. + ClientMode(*url.URL) (ClientMode, error) + + // SetClient allows a getter to know it's client + // in order to access client's Get functions or + // progress tracking. + SetClient(*Client) +} + +// Getters is the mapping of scheme to the Getter implementation that will +// be used to get a dependency. +var Getters map[string]Getter + +// forcedRegexp is the regular expression that finds forced getters. This +// syntax is schema::url, example: git::https://foo.com +var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) + +// httpClient is the default client to be used by HttpGetters. +var httpClient = cleanhttp.DefaultClient() + +func init() { + httpGetter := &HttpGetter{ + Netrc: true, + } + + Getters = map[string]Getter{ + "file": new(FileGetter), + "git": new(GitGetter), + "gcs": new(GCSGetter), + "hg": new(HgGetter), + "s3": new(S3Getter), + "http": httpGetter, + "https": httpGetter, + } +} + +// Get downloads the directory specified by src into the folder specified by +// dst. If dst already exists, Get will attempt to update it. +// +// src is a URL, whereas dst is always just a file path to a folder. This +// folder doesn't need to exist. It will be created if it doesn't exist. +func Get(dst, src string, opts ...ClientOption) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: true, + Options: opts, + }).Get() +} + +// GetAny downloads a URL into the given destination. Unlike Get or +// GetFile, both directories and files are supported. +// +// dst must be a directory. If src is a file, it will be downloaded +// into dst with the basename of the URL. If src is a directory or +// archive, it will be unpacked directly into dst. +func GetAny(dst, src string, opts ...ClientOption) error { + return (&Client{ + Src: src, + Dst: dst, + Mode: ClientModeAny, + Options: opts, + }).Get() +} + +// GetFile downloads the file specified by src into the path specified by +// dst. +func GetFile(dst, src string, opts ...ClientOption) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: false, + Options: opts, + }).Get() +} + +// getRunCommand is a helper that will run a command and capture the output +// in the case an error happens. +func getRunCommand(cmd *exec.Cmd) error { + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + err := cmd.Run() + if err == nil { + return nil + } + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return fmt.Errorf( + "%s exited with %d: %s", + cmd.Path, + status.ExitStatus(), + buf.String()) + } + } + + return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) +} + +// getForcedGetter takes a source and returns the tuple of the forced +// getter and the raw URL (without the force syntax). +func getForcedGetter(src string) (string, string) { + var forced string + if ms := forcedRegexp.FindStringSubmatch(src); ms != nil { + forced = ms[1] + src = ms[2] + } + + return forced, src +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_base.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_base.go new file mode 100644 index 00000000000..09e9b6313b1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_base.go @@ -0,0 +1,20 @@ +package getter + +import "context" + +// getter is our base getter; it regroups +// fields all getters have in common. +type getter struct { + client *Client +} + +func (g *getter) SetClient(c *Client) { g.client = c } + +// Context tries to returns the Contex from the getter's +// client. otherwise context.Background() is returned. +func (g *getter) Context() context.Context { + if g == nil || g.client == nil { + return context.Background() + } + return g.client.Ctx +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file.go new file mode 100644 index 00000000000..78660839a07 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file.go @@ -0,0 +1,36 @@ +package getter + +import ( + "net/url" + "os" +) + +// FileGetter is a Getter implementation that will download a module from +// a file scheme. +type FileGetter struct { + getter + + // Copy, if set to true, will copy data instead of using a symlink. If + // false, attempts to symlink to speed up the operation and to lower the + // disk space usage. If the symlink fails, may attempt to copy on windows. + Copy bool +} + +func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + fi, err := os.Stat(path) + if err != nil { + return 0, err + } + + // Check if the source is a directory. + if fi.IsDir() { + return ClientModeDir, nil + } + + return ClientModeFile, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file_copy.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file_copy.go new file mode 100644 index 00000000000..d70fb495128 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file_copy.go @@ -0,0 +1,29 @@ +package getter + +import ( + "context" + "io" +) + +// readerFunc is syntactic sugar for read interface. +type readerFunc func(p []byte) (n int, err error) + +func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } + +// Copy is a io.Copy cancellable by context +func Copy(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) { + // Copy will call the Reader and Writer interface multiple time, in order + // to copy by chunk (avoiding loading the whole file in memory). + return io.Copy(dst, readerFunc(func(p []byte) (int, error) { + + select { + case <-ctx.Done(): + // context has been canceled + // stop process and propagate "context canceled" error + return 0, ctx.Err() + default: + // otherwise just run default io.Reader implementation + return src.Read(p) + } + })) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file_unix.go new file mode 100644 index 00000000000..c3b28ae517a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file_unix.go @@ -0,0 +1,103 @@ +// +build !windows + +package getter + +import ( + "fmt" + "net/url" + "os" + "path/filepath" +) + +func (g *FileGetter) Get(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + + fi, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + mode := fi.Mode() + if mode&os.ModeSymlink == 0 { + return fmt.Errorf("destination exists and is not a symlink") + } + + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + return os.Symlink(path, dst) +} + +func (g *FileGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a file to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if fi.IsDir() { + return fmt.Errorf("source path must be a file") + } + + _, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // If we're not copying, just symlink and we're done + if !g.Copy { + return os.Symlink(path, dst) + } + + // Copy + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = Copy(ctx, dstF, srcF) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file_windows.go new file mode 100644 index 00000000000..24f1acb1762 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_file_windows.go @@ -0,0 +1,136 @@ +// +build windows + +package getter + +import ( + "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" +) + +func (g *FileGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + + fi, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + mode := fi.Mode() + if mode&os.ModeSymlink == 0 { + return fmt.Errorf("destination exists and is not a symlink") + } + + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + sourcePath := toBackslash(path) + + // Use mklink to create a junction point + output, err := exec.CommandContext(ctx, "cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) + } + + return nil +} + +func (g *FileGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if fi.IsDir() { + return fmt.Errorf("source path must be a file") + } + + _, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // If we're not copying, just symlink and we're done + if !g.Copy { + if err = os.Symlink(path, dst); err == nil { + return err + } + lerr, ok := err.(*os.LinkError) + if !ok { + return err + } + switch lerr.Err { + case syscall.ERROR_PRIVILEGE_NOT_HELD: + // no symlink privilege, let's + // fallback to a copy to avoid an error. + break + default: + return err + } + } + + // Copy + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = Copy(ctx, dstF, srcF) + return err +} + +// toBackslash returns the result of replacing each slash character +// in path with a backslash ('\') character. Multiple separators are +// replaced by multiple backslashes. +func toBackslash(path string) string { + return strings.Replace(path, "/", "\\", -1) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_gcs.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_gcs.go new file mode 100644 index 00000000000..6faa70f4fcf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_gcs.go @@ -0,0 +1,172 @@ +package getter + +import ( + "context" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" +) + +// GCSGetter is a Getter implementation that will download a module from +// a GCS bucket. +type GCSGetter struct { + getter +} + +func (g *GCSGetter) ClientMode(u *url.URL) (ClientMode, error) { + ctx := g.Context() + + // Parse URL + bucket, object, err := g.parseURL(u) + if err != nil { + return 0, err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return 0, err + } + iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object}) + for { + obj, err := iter.Next() + if err != nil && err != iterator.Done { + return 0, err + } + + if err == iterator.Done { + break + } + if strings.HasSuffix(obj.Name, "/") { + // A directory matched the prefix search, so this must be a directory + return ClientModeDir, nil + } else if obj.Name != object { + // A file matched the prefix search and doesn't have the same name + // as the query, so this must be a directory + return ClientModeDir, nil + } + } + // There are no directories or subdirectories, and if a match was returned, + // it was exactly equal to the prefix search. So return File mode + return ClientModeFile, nil +} + +func (g *GCSGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + + // Parse URL + bucket, object, err := g.parseURL(u) + if err != nil { + return err + } + + // Remove destination if it already exists + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // Remove the destination + if err := os.RemoveAll(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return err + } + + // Iterate through all matching objects. + iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object}) + for { + obj, err := iter.Next() + if err != nil && err != iterator.Done { + return err + } + if err == iterator.Done { + break + } + + if !strings.HasSuffix(obj.Name, "/") { + // Get the object destination path + objDst, err := filepath.Rel(object, obj.Name) + if err != nil { + return err + } + objDst = filepath.Join(dst, objDst) + // Download the matching object. + err = g.getObject(ctx, client, objDst, bucket, obj.Name) + if err != nil { + return err + } + } + } + return nil +} + +func (g *GCSGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() + + // Parse URL + bucket, object, err := g.parseURL(u) + if err != nil { + return err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return err + } + return g.getObject(ctx, client, dst, bucket, object) +} + +func (g *GCSGetter) getObject(ctx context.Context, client *storage.Client, dst, bucket, object string) error { + rc, err := client.Bucket(bucket).Object(object).NewReader(ctx) + if err != nil { + return err + } + defer rc.Close() + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = Copy(ctx, f, rc) + return err +} + +func (g *GCSGetter) parseURL(u *url.URL) (bucket, path string, err error) { + if strings.Contains(u.Host, "googleapis.com") { + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid GCS URL") + return + } + + pathParts := strings.SplitN(u.Path, "/", 5) + if len(pathParts) != 5 { + err = fmt.Errorf("URL is not a valid GCS URL") + return + } + bucket = pathParts[3] + path = pathParts[4] + } + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_git.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_git.go new file mode 100644 index 00000000000..2ff00d20ffd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_git.go @@ -0,0 +1,301 @@ +package getter + +import ( + "context" + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" + version "github.com/hashicorp/go-version" +) + +// GitGetter is a Getter implementation that will download a module from +// a git repository. +type GitGetter struct { + getter +} + +func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + +func (g *GitGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + if _, err := exec.LookPath("git"); err != nil { + return fmt.Errorf("git must be available and on the PATH") + } + + // Extract some query parameters we use + var ref, sshKey string + var depth int + q := u.Query() + if len(q) > 0 { + ref = q.Get("ref") + q.Del("ref") + + sshKey = q.Get("sshkey") + q.Del("sshkey") + + if n, err := strconv.Atoi(q.Get("depth")); err == nil { + depth = n + } + q.Del("depth") + + // Copy the URL + var newU url.URL = *u + u = &newU + u.RawQuery = q.Encode() + } + + var sshKeyFile string + if sshKey != "" { + // Check that the git version is sufficiently new. + if err := checkGitVersion("2.3"); err != nil { + return fmt.Errorf("Error using ssh key: %v", err) + } + + // We have an SSH key - decode it. + raw, err := base64.StdEncoding.DecodeString(sshKey) + if err != nil { + return err + } + + // Create a temp file for the key and ensure it is removed. + fh, err := ioutil.TempFile("", "go-getter") + if err != nil { + return err + } + sshKeyFile = fh.Name() + defer os.Remove(sshKeyFile) + + // Set the permissions prior to writing the key material. + if err := os.Chmod(sshKeyFile, 0600); err != nil { + return err + } + + // Write the raw key into the temp file. + _, err = fh.Write(raw) + fh.Close() + if err != nil { + return err + } + } + + // For SSH-style URLs, if they use the SCP syntax of host:path, then + // the URL will be mangled. We detect that here and correct the path. + // Example: host:path/bar will turn into host/path/bar + if u.Scheme == "ssh" { + if idx := strings.Index(u.Host, ":"); idx > -1 { + // Copy the URL so we don't modify the input + var newU url.URL = *u + u = &newU + + // Path includes the part after the ':'. + u.Path = u.Host[idx+1:] + u.Path + if u.Path[0] != '/' { + u.Path = "/" + u.Path + } + + // Host trims up to the : + u.Host = u.Host[:idx] + } + } + + // Clone or update the repository + _, err := os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + err = g.update(ctx, dst, sshKeyFile, ref, depth) + } else { + err = g.clone(ctx, dst, sshKeyFile, u, depth) + } + if err != nil { + return err + } + + // Next: check out the proper tag/branch if it is specified, and checkout + if ref != "" { + if err := g.checkout(dst, ref); err != nil { + return err + } + } + + // Lastly, download any/all submodules. + return g.fetchSubmodules(ctx, dst, sshKeyFile, depth) +} + +// GetFile for Git doesn't support updating at this time. It will download +// the file every time. +func (g *GitGetter) GetFile(dst string, u *url.URL) error { + td, tdcloser, err := safetemp.Dir("", "getter") + if err != nil { + return err + } + defer tdcloser.Close() + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.Dir(u.Path) + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true} + return fg.GetFile(dst, u) +} + +func (g *GitGetter) checkout(dst string, ref string) error { + cmd := exec.Command("git", "checkout", ref) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *GitGetter) clone(ctx context.Context, dst, sshKeyFile string, u *url.URL, depth int) error { + args := []string{"clone"} + + if depth > 0 { + args = append(args, "--depth", strconv.Itoa(depth)) + } + + args = append(args, u.String(), dst) + cmd := exec.CommandContext(ctx, "git", args...) + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +func (g *GitGetter) update(ctx context.Context, dst, sshKeyFile, ref string, depth int) error { + // Determine if we're a branch. If we're NOT a branch, then we just + // switch to master prior to checking out + cmd := exec.CommandContext(ctx, "git", "show-ref", "-q", "--verify", "refs/heads/"+ref) + cmd.Dir = dst + + if getRunCommand(cmd) != nil { + // Not a branch, switch to master. This will also catch non-existent + // branches, in which case we want to switch to master and then + // checkout the proper branch later. + ref = "master" + } + + // We have to be on a branch to pull + if err := g.checkout(dst, ref); err != nil { + return err + } + + if depth > 0 { + cmd = exec.Command("git", "pull", "--depth", strconv.Itoa(depth), "--ff-only") + } else { + cmd = exec.Command("git", "pull", "--ff-only") + } + + cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +// fetchSubmodules downloads any configured submodules recursively. +func (g *GitGetter) fetchSubmodules(ctx context.Context, dst, sshKeyFile string, depth int) error { + args := []string{"submodule", "update", "--init", "--recursive"} + if depth > 0 { + args = append(args, "--depth", strconv.Itoa(depth)) + } + cmd := exec.CommandContext(ctx, "git", args...) + cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +// setupGitEnv sets up the environment for the given command. This is used to +// pass configuration data to git and ssh and enables advanced cloning methods. +func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { + const gitSSHCommand = "GIT_SSH_COMMAND=" + var sshCmd []string + + // If we have an existing GIT_SSH_COMMAND, we need to append our options. + // We will also remove our old entry to make sure the behavior is the same + // with versions of Go < 1.9. + env := os.Environ() + for i, v := range env { + if strings.HasPrefix(v, gitSSHCommand) && len(v) > len(gitSSHCommand) { + sshCmd = []string{v} + + env[i], env[len(env)-1] = env[len(env)-1], env[i] + env = env[:len(env)-1] + break + } + } + + if len(sshCmd) == 0 { + sshCmd = []string{gitSSHCommand + "ssh"} + } + + if sshKeyFile != "" { + // We have an SSH key temp file configured, tell ssh about this. + if runtime.GOOS == "windows" { + sshKeyFile = strings.Replace(sshKeyFile, `\`, `/`, -1) + } + sshCmd = append(sshCmd, "-i", sshKeyFile) + } + + env = append(env, strings.Join(sshCmd, " ")) + cmd.Env = env +} + +// checkGitVersion is used to check the version of git installed on the system +// against a known minimum version. Returns an error if the installed version +// is older than the given minimum. +func checkGitVersion(min string) error { + want, err := version.NewVersion(min) + if err != nil { + return err + } + + out, err := exec.Command("git", "version").Output() + if err != nil { + return err + } + + fields := strings.Fields(string(out)) + if len(fields) < 3 { + return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) + } + v := fields[2] + if runtime.GOOS == "windows" && strings.Contains(v, ".windows.") { + // on windows, git version will return for example: + // git version 2.20.1.windows.1 + // Which does not follow the semantic versionning specs + // https://semver.org. We remove that part in order for + // go-version to not error. + v = v[:strings.Index(v, ".windows.")] + } + + have, err := version.NewVersion(v) + if err != nil { + return err + } + + if have.LessThan(want) { + return fmt.Errorf("Required git version = %s, have %s", want, have) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_hg.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_hg.go new file mode 100644 index 00000000000..290649c9105 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_hg.go @@ -0,0 +1,135 @@ +package getter + +import ( + "context" + "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + + urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" +) + +// HgGetter is a Getter implementation that will download a module from +// a Mercurial repository. +type HgGetter struct { + getter +} + +func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + +func (g *HgGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + if _, err := exec.LookPath("hg"); err != nil { + return fmt.Errorf("hg must be available and on the PATH") + } + + newURL, err := urlhelper.Parse(u.String()) + if err != nil { + return err + } + if fixWindowsDrivePath(newURL) { + // See valid file path form on http://www.selenic.com/hg/help/urls + newURL.Path = fmt.Sprintf("/%s", newURL.Path) + } + + // Extract some query parameters we use + var rev string + q := newURL.Query() + if len(q) > 0 { + rev = q.Get("rev") + q.Del("rev") + + newURL.RawQuery = q.Encode() + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err != nil { + if err := g.clone(dst, newURL); err != nil { + return err + } + } + + if err := g.pull(dst, newURL); err != nil { + return err + } + + return g.update(ctx, dst, newURL, rev) +} + +// GetFile for Hg doesn't support updating at this time. It will download +// the file every time. +func (g *HgGetter) GetFile(dst string, u *url.URL) error { + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") + if err != nil { + return err + } + defer tdcloser.Close() + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.ToSlash(filepath.Dir(u.Path)) + + // If we're on Windows, we need to set the host to "localhost" for hg + if runtime.GOOS == "windows" { + u.Host = "localhost" + } + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true, getter: g.getter} + return fg.GetFile(dst, u) +} + +func (g *HgGetter) clone(dst string, u *url.URL) error { + cmd := exec.Command("hg", "clone", "-U", u.String(), dst) + return getRunCommand(cmd) +} + +func (g *HgGetter) pull(dst string, u *url.URL) error { + cmd := exec.Command("hg", "pull") + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *HgGetter) update(ctx context.Context, dst string, u *url.URL, rev string) error { + args := []string{"update"} + if rev != "" { + args = append(args, rev) + } + + cmd := exec.CommandContext(ctx, "hg", args...) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func fixWindowsDrivePath(u *url.URL) bool { + // hg assumes a file:/// prefix for Windows drive letter file paths. + // (e.g. file:///c:/foo/bar) + // If the URL Path does not begin with a '/' character, the resulting URL + // path will have a file:// prefix. (e.g. file://c:/foo/bar) + // See http://www.selenic.com/hg/help/urls and the examples listed in + // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 + return runtime.GOOS == "windows" && u.Scheme == "file" && + len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_http.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_http.go new file mode 100644 index 00000000000..7c4541c6e95 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_http.go @@ -0,0 +1,322 @@ +package getter + +import ( + "context" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + + safetemp "github.com/hashicorp/go-safetemp" +) + +// HttpGetter is a Getter implementation that will download from an HTTP +// endpoint. +// +// For file downloads, HTTP is used directly. +// +// The protocol for downloading a directory from an HTTP endpoint is as follows: +// +// An HTTP GET request is made to the URL with the additional GET parameter +// "terraform-get=1". This lets you handle that scenario specially if you +// wish. The response must be a 2xx. +// +// First, a header is looked for "X-Terraform-Get" which should contain +// a source URL to download. +// +// If the header is not present, then a meta tag is searched for named +// "terraform-get" and the content should be a source URL. +// +// The source URL, whether from the header or meta tag, must be a fully +// formed URL. The shorthand syntax of "github.com/foo/bar" or relative +// paths are not allowed. +type HttpGetter struct { + getter + + // Netrc, if true, will lookup and use auth information found + // in the user's netrc file if available. + Netrc bool + + // Client is the http.Client to use for Get requests. + // This defaults to a cleanhttp.DefaultClient if left unset. + Client *http.Client + + // Header contains optional request header fields that should be included + // with every HTTP request. Note that the zero value of this field is nil, + // and as such it needs to be initialized before use, via something like + // make(http.Header). + Header http.Header +} + +func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { + if strings.HasSuffix(u.Path, "/") { + return ClientModeDir, nil + } + return ClientModeFile, nil +} + +func (g *HttpGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + // Copy the URL so we can modify it + var newU url.URL = *u + u = &newU + + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(u); err != nil { + return err + } + } + + if g.Client == nil { + g.Client = httpClient + } + + // Add terraform-get to the parameter. + q := u.Query() + q.Add("terraform-get", "1") + u.RawQuery = q.Encode() + + // Get the URL + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return err + } + + req.Header = g.Header + resp, err := g.Client.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + // Extract the source URL + var source string + if v := resp.Header.Get("X-Terraform-Get"); v != "" { + source = v + } else { + source, err = g.parseMeta(resp.Body) + if err != nil { + return err + } + } + if source == "" { + return fmt.Errorf("no source URL was returned") + } + + // If there is a subdir component, then we download the root separately + // into a temporary directory, then copy over the proper subdir. + source, subDir := SourceDirSubdir(source) + if subDir == "" { + var opts []ClientOption + if g.client != nil { + opts = g.client.Options + } + return Get(dst, source, opts...) + } + + // We have a subdir, time to jump some hoops + return g.getSubdir(ctx, dst, source, subDir) +} + +func (g *HttpGetter) GetFile(dst string, src *url.URL) error { + ctx := g.Context() + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(src); err != nil { + return err + } + } + + // Create all the parent directories if needed + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) + if err != nil { + return err + } + defer f.Close() + + if g.Client == nil { + g.Client = httpClient + } + + var currentFileSize int64 + + // We first make a HEAD request so we can check + // if the server supports range queries. If the server/URL doesn't + // support HEAD requests, we just fall back to GET. + req, err := http.NewRequest("HEAD", src.String(), nil) + if err != nil { + return err + } + if g.Header != nil { + req.Header = g.Header + } + headResp, err := g.Client.Do(req) + if err == nil && headResp != nil { + headResp.Body.Close() + if headResp.StatusCode == 200 { + // If the HEAD request succeeded, then attempt to set the range + // query if we can. + if headResp.Header.Get("Accept-Ranges") == "bytes" { + if fi, err := f.Stat(); err == nil { + if _, err = f.Seek(0, os.SEEK_END); err == nil { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + currentFileSize = fi.Size() + totalFileSize, _ := strconv.ParseInt(headResp.Header.Get("Content-Length"), 10, 64) + if currentFileSize >= totalFileSize { + // file already present + return nil + } + } + } + } + } + } + req.Method = "GET" + + resp, err := g.Client.Do(req) + if err != nil { + return err + } + switch resp.StatusCode { + case http.StatusOK, http.StatusPartialContent: + // all good + default: + resp.Body.Close() + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + body := resp.Body + + if g.client != nil && g.client.ProgressListener != nil { + // track download + fn := filepath.Base(src.EscapedPath()) + body = g.client.ProgressListener.TrackProgress(fn, currentFileSize, currentFileSize+resp.ContentLength, resp.Body) + } + defer body.Close() + + n, err := Copy(ctx, f, body) + if err == nil && n < resp.ContentLength { + err = io.ErrShortWrite + } + return err +} + +// getSubdir downloads the source into the destination, but with +// the proper subdir. +func (g *HttpGetter) getSubdir(ctx context.Context, dst, source, subDir string) error { + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") + if err != nil { + return err + } + defer tdcloser.Close() + + var opts []ClientOption + if g.client != nil { + opts = g.client.Options + } + // Download that into the given directory + if err := Get(td, source, opts...); err != nil { + return err + } + + // Process any globbing + sourcePath, err := SubdirGlob(td, subDir) + if err != nil { + return err + } + + // Make sure the subdir path actually exists + if _, err := os.Stat(sourcePath); err != nil { + return fmt.Errorf( + "Error downloading %s: %s", source, err) + } + + // Copy the subdirectory into our actual destination. + if err := os.RemoveAll(dst); err != nil { + return err + } + + // Make the final destination + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + return copyDir(ctx, dst, sourcePath, false) +} + +// parseMeta looks for the first meta tag in the given reader that +// will give us the source URL. +func (g *HttpGetter) parseMeta(r io.Reader) (string, error) { + d := xml.NewDecoder(r) + d.CharsetReader = charsetReader + d.Strict = false + var err error + var t xml.Token + for { + t, err = d.Token() + if err != nil { + if err == io.EOF { + err = nil + } + return "", err + } + if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { + return "", nil + } + if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { + return "", nil + } + e, ok := t.(xml.StartElement) + if !ok || !strings.EqualFold(e.Name.Local, "meta") { + continue + } + if attrValue(e.Attr, "name") != "terraform-get" { + continue + } + if f := attrValue(e.Attr, "content"); f != "" { + return f, nil + } + } +} + +// attrValue returns the attribute value for the case-insensitive key +// `name', or the empty string if nothing is found. +func attrValue(attrs []xml.Attr, name string) string { + for _, a := range attrs { + if strings.EqualFold(a.Name.Local, name) { + return a.Value + } + } + return "" +} + +// charsetReader returns a reader for the given charset. Currently +// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful +// error which is printed by go get, so the user can find why the package +// wasn't downloaded if the encoding is not supported. Note that, in +// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters +// greater than 0x7f are not rejected). +func charsetReader(charset string, input io.Reader) (io.Reader, error) { + switch strings.ToLower(charset) { + case "ascii": + return input, nil + default: + return nil, fmt.Errorf("can't decode XML document using charset %q", charset) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_mock.go new file mode 100644 index 00000000000..e2a98ea2843 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_mock.go @@ -0,0 +1,54 @@ +package getter + +import ( + "net/url" +) + +// MockGetter is an implementation of Getter that can be used for tests. +type MockGetter struct { + getter + + // Proxy, if set, will be called after recording the calls below. + // If it isn't set, then the *Err values will be returned. + Proxy Getter + + GetCalled bool + GetDst string + GetURL *url.URL + GetErr error + + GetFileCalled bool + GetFileDst string + GetFileURL *url.URL + GetFileErr error +} + +func (g *MockGetter) Get(dst string, u *url.URL) error { + g.GetCalled = true + g.GetDst = dst + g.GetURL = u + + if g.Proxy != nil { + return g.Proxy.Get(dst, u) + } + + return g.GetErr +} + +func (g *MockGetter) GetFile(dst string, u *url.URL) error { + g.GetFileCalled = true + g.GetFileDst = dst + g.GetFileURL = u + + if g.Proxy != nil { + return g.Proxy.GetFile(dst, u) + } + return g.GetFileErr +} + +func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) { + if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" { + return ClientModeDir, nil + } + return ClientModeFile, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_s3.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_s3.go new file mode 100644 index 00000000000..93eeb0b817f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -0,0 +1,275 @@ +package getter + +import ( + "context" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3Getter is a Getter implementation that will download a module from +// a S3 bucket. +type S3Getter struct { + getter +} + +func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return 0, err + } + + // Create client config + config := g.getAWSConfig(region, u, creds) + sess := session.New(config) + client := s3.New(sess) + + // List the object(s) at the given prefix + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + resp, err := client.ListObjects(req) + if err != nil { + return 0, err + } + + for _, o := range resp.Contents { + // Use file mode on exact match. + if *o.Key == path { + return ClientModeFile, nil + } + + // Use dir mode if child keys are found. + if strings.HasPrefix(*o.Key, path+"/") { + return ClientModeDir, nil + } + } + + // There was no match, so just return file mode. The download is going + // to fail but we will let S3 return the proper error later. + return ClientModeFile, nil +} + +func (g *S3Getter) Get(dst string, u *url.URL) error { + ctx := g.Context() + + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + // Remove destination if it already exists + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + if err == nil { + // Remove the destination + if err := os.RemoveAll(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + config := g.getAWSConfig(region, u, creds) + sess := session.New(config) + client := s3.New(sess) + + // List files in path, keep listing until no more objects are found + lastMarker := "" + hasMore := true + for hasMore { + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + if lastMarker != "" { + req.Marker = aws.String(lastMarker) + } + + resp, err := client.ListObjects(req) + if err != nil { + return err + } + + hasMore = aws.BoolValue(resp.IsTruncated) + + // Get each object storing each file relative to the destination path + for _, object := range resp.Contents { + lastMarker = aws.StringValue(object.Key) + objPath := aws.StringValue(object.Key) + + // If the key ends with a backslash assume it is a directory and ignore + if strings.HasSuffix(objPath, "/") { + continue + } + + // Get the object destination path + objDst, err := filepath.Rel(path, objPath) + if err != nil { + return err + } + objDst = filepath.Join(dst, objDst) + + if err := g.getObject(ctx, client, objDst, bucket, objPath, ""); err != nil { + return err + } + } + } + + return nil +} + +func (g *S3Getter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() + region, bucket, path, version, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + config := g.getAWSConfig(region, u, creds) + sess := session.New(config) + client := s3.New(sess) + return g.getObject(ctx, client, dst, bucket, path, version) +} + +func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, key, version string) error { + req := &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + if version != "" { + req.VersionId = aws.String(version) + } + + resp, err := client.GetObject(req) + if err != nil { + return err + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = Copy(ctx, f, resp.Body) + return err +} + +func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { + conf := &aws.Config{} + if creds == nil { + // Grab the metadata URL + metadataURL := os.Getenv("AWS_METADATA_URL") + if metadataURL == "" { + metadataURL = "http://169.254.169.254:80/latest" + } + + creds = credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(&aws.Config{ + Endpoint: aws.String(metadataURL), + })), + }, + }) + } + + if creds != nil { + conf.Endpoint = &url.Host + conf.S3ForcePathStyle = aws.Bool(true) + if url.Scheme == "http" { + conf.DisableSSL = aws.Bool(true) + } + } + + conf.Credentials = creds + if region != "" { + conf.Region = aws.String(region) + } + + return conf +} + +func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { + // This just check whether we are dealing with S3 or + // any other S3 compliant service. S3 has a predictable + // url as others do not + if strings.Contains(u.Host, "amazonaws.com") { + // Expected host style: s3.amazonaws.com. They always have 3 parts, + // although the first may differ if we're accessing a specific region. + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + // Parse the region out of the first part of the host + region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") + if region == "" { + region = "us-east-1" + } + + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + + } else { + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 complaint URL") + return + } + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + region = u.Query().Get("region") + if region == "" { + region = "us-east-1" + } + } + + _, hasAwsId := u.Query()["aws_access_key_id"] + _, hasAwsSecret := u.Query()["aws_access_key_secret"] + _, hasAwsToken := u.Query()["aws_access_token"] + if hasAwsId || hasAwsSecret || hasAwsToken { + creds = credentials.NewStaticCredentials( + u.Query().Get("aws_access_key_id"), + u.Query().Get("aws_access_key_secret"), + u.Query().Get("aws_access_token"), + ) + } + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/netrc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/netrc.go new file mode 100644 index 00000000000..c7f6a3fb3fb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/netrc.go @@ -0,0 +1,67 @@ +package getter + +import ( + "fmt" + "net/url" + "os" + "runtime" + + "github.com/bgentry/go-netrc/netrc" + "github.com/mitchellh/go-homedir" +) + +// addAuthFromNetrc adds auth information to the URL from the user's +// netrc file if it can be found. This will only add the auth info +// if the URL doesn't already have auth info specified and the +// the username is blank. +func addAuthFromNetrc(u *url.URL) error { + // If the URL already has auth information, do nothing + if u.User != nil && u.User.Username() != "" { + return nil + } + + // Get the netrc file path + path := os.Getenv("NETRC") + if path == "" { + filename := ".netrc" + if runtime.GOOS == "windows" { + filename = "_netrc" + } + + var err error + path, err = homedir.Expand("~/" + filename) + if err != nil { + return err + } + } + + // If the file is not a file, then do nothing + if fi, err := os.Stat(path); err != nil { + // File doesn't exist, do nothing + if os.IsNotExist(err) { + return nil + } + + // Some other error! + return err + } else if fi.IsDir() { + // File is directory, ignore + return nil + } + + // Load up the netrc file + net, err := netrc.ParseFile(path) + if err != nil { + return fmt.Errorf("Error parsing netrc file at %q: %s", path, err) + } + + machine := net.FindMachine(u.Host) + if machine == nil { + // Machine not found, no problem + return nil + } + + // Set the user info + u.User = url.UserPassword(machine.Login, machine.Password) + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/source.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/source.go new file mode 100644 index 00000000000..dab6d400cb7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/source.go @@ -0,0 +1,75 @@ +package getter + +import ( + "fmt" + "path/filepath" + "strings" +) + +// SourceDirSubdir takes a source URL and returns a tuple of the URL without +// the subdir and the subdir. +// +// ex: +// dom.com/path/?q=p => dom.com/path/?q=p, "" +// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*" +// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2" +// +func SourceDirSubdir(src string) (string, string) { + + // URL might contains another url in query parameters + stop := len(src) + if idx := strings.Index(src, "?"); idx > -1 { + stop = idx + } + + // Calculate an offset to avoid accidentally marking the scheme + // as the dir. + var offset int + if idx := strings.Index(src[:stop], "://"); idx > -1 { + offset = idx + 3 + } + + // First see if we even have an explicit subdir + idx := strings.Index(src[offset:stop], "//") + if idx == -1 { + return src, "" + } + + idx += offset + subdir := src[idx+2:] + src = src[:idx] + + // Next, check if we have query parameters and push them onto the + // URL. + if idx = strings.Index(subdir, "?"); idx > -1 { + query := subdir[idx:] + subdir = subdir[:idx] + src += query + } + + return src, subdir +} + +// SubdirGlob returns the actual subdir with globbing processed. +// +// dst should be a destination directory that is already populated (the +// download is complete) and subDir should be the set subDir. If subDir +// is an empty string, this returns an empty string. +// +// The returned path is the full absolute path. +func SubdirGlob(dst, subDir string) (string, error) { + matches, err := filepath.Glob(filepath.Join(dst, subDir)) + if err != nil { + return "", err + } + + if len(matches) == 0 { + return "", fmt.Errorf("subdir %q not found", subDir) + } + + if len(matches) > 1 { + return "", fmt.Errorf("subdir %q matches multiple paths", subDir) + } + + return matches[0], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/storage.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/storage.go new file mode 100644 index 00000000000..2bc6b9ec331 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-getter/storage.go @@ -0,0 +1,13 @@ +package getter + +// Storage is an interface that knows how to lookup downloaded directories +// as well as download and update directories from their sources into the +// proper location. +type Storage interface { + // Dir returns the directory on local disk where the directory source + // can be loaded from. + Dir(string) (string, bool, error) + + // Get will download and optionally update the given directory. + Get(string, string, bool) error +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 00000000000..abaf1e45f2a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/colorize_unix.go new file mode 100644 index 00000000000..44aa9bf2c62 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package hclog + +import ( + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + fallthrough + case ForceColor: + return + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(fi.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/colorize_windows.go new file mode 100644 index 00000000000..23486b6d74f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package hclog + +import ( + "os" + + colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + return + case ForceColor: + fi := l.checkWriterIsFile() + l.writer.w = colorable.NewColorable(fi) + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + return + } + l.writer.w = colorable.NewColorable(fi) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/context.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 00000000000..7815f501942 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,38 @@ +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/global.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 00000000000..22ebc57d877 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,62 @@ +package hclog + +import ( + "sync" +) + +var ( + protect sync.Once + def Logger + + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + } +) + +// Default returns a globally held logger. This can be a good starting +// place, and then you can use .With() and .Name() to create sub-loggers +// to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// cause should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. +func Default() Logger { + protect.Do(func() { + // If SetDefault was used before Default() was called, we need to + // detect that here. + if def == nil { + def = New(DefaultOptions) + } + }) + + return def +} + +// L is a short alias for Default(). +func L() Logger { + return Default() +} + +// SetDefault changes the logger to be returned by Default()and L() +// to the one given. This allows packages to use the default logger +// and have higher level packages change it to match the execution +// environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. +func SetDefault(log Logger) Logger { + old := def + def = log + return old +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/interceptlogger.go new file mode 100644 index 00000000000..68f31e42d88 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -0,0 +1,216 @@ +package hclog + +import ( + "io" + "log" + "sync" + "sync/atomic" +) + +var _ Logger = &interceptLogger{} + +type interceptLogger struct { + Logger + + sync.Mutex + sinkCount *int32 + Sinks map[SinkAdapter]struct{} +} + +func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + intercept := &interceptLogger{ + Logger: New(opts), + sinkCount: new(int32), + Sinks: make(map[SinkAdapter]struct{}), + } + + atomic.StoreInt32(intercept.sinkCount, 0) + + return intercept +} + +// Emit the message and args at TRACE level to log and sinks +func (i *interceptLogger) Trace(msg string, args ...interface{}) { + i.Logger.Trace(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Trace, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at DEBUG level to log and sinks +func (i *interceptLogger) Debug(msg string, args ...interface{}) { + i.Logger.Debug(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Debug, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at INFO level to log and sinks +func (i *interceptLogger) Info(msg string, args ...interface{}) { + i.Logger.Info(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Info, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at WARN level to log and sinks +func (i *interceptLogger) Warn(msg string, args ...interface{}) { + i.Logger.Warn(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Warn, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at ERROR level to log and sinks +func (i *interceptLogger) Error(msg string, args ...interface{}) { + i.Logger.Error(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Error, msg, i.retrieveImplied(args...)...) + } +} + +func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { + top := i.Logger.ImpliedArgs() + + cp := make([]interface{}, len(top)+len(args)) + copy(cp, top) + copy(cp[len(top):], args) + + return cp +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) Named(name string) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.Named(name) + + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamed(name string) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.ResetNamed(name) + + return &sub +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.Named(name) + + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.ResetNamed(name) + + return &sub +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (i *interceptLogger) With(args ...interface{}) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.With(args...) + + return &sub +} + +// RegisterSink attaches a SinkAdapter to interceptLoggers sinks. +func (i *interceptLogger) RegisterSink(sink SinkAdapter) { + i.Lock() + defer i.Unlock() + + i.Sinks[sink] = struct{}{} + + atomic.AddInt32(i.sinkCount, 1) +} + +// DeregisterSink removes a SinkAdapter from interceptLoggers sinks. +func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { + i.Lock() + defer i.Unlock() + + delete(i.Sinks, sink) + + atomic.AddInt32(i.sinkCount, -1) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library to log to +// actually use this logger, which will also send to any registered sinks. +func (l *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriterIntercept(opts), "", 0) +} + +func (l *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/intlogger.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 00000000000..5882b87018d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,588 @@ +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "log" + "os" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/fatih/color" +) + +// TimeFormat to use for logging. This is a version of RFC3339 that contains +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } + + _levelToColor = map[Level]*color.Color{ + Debug: color.New(color.FgHiWhite), + Trace: color.New(color.FgHiGreen), + Info: color.New(color.FgHiBlue), + Warn: color.New(color.FgHiYellow), + Error: color.New(color.FgHiRed), + } +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // This is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex *sync.Mutex + writer *writer + level *int32 + + implied []interface{} +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + return newLogger(opts) +} + +// NewSinkAdapter returns a SinkAdapter with configured settings +// defined by LoggerOptions +func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { + return newLogger(opts) +} + +func newLogger(opts *LoggerOptions) *intLogger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + l := &intLogger{ + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output, opts.Color), + level: new(int32), + } + + l.setColorization(opts) + + if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) Log(name string, level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := time.Now() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.json { + l.logJSON(t, name, level, msg, args...) + } else { + l.log(t, name, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +var logImplFile = regexp.MustCompile(`github.com/hashicorp/go-hclog/.+logger.go$`) + +// Non-JSON logging format function +func (l *intLogger) log(t time.Time, name string, level Level, msg string, args ...interface{}) { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + + s, ok := _levelToBracket[level] + if ok { + l.writer.WriteString(s) + } else { + l.writer.WriteString("[?????]") + } + + offset := 3 + if l.caller { + // Check if the caller is inside our package and inside + // a logger implementation file + if _, file, _, ok := runtime.Caller(3); ok { + match := logImplFile.MatchString(file) + if match { + offset = 4 + } + } + + if _, file, line, ok := runtime.Caller(offset); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if name != "" { + l.writer.WriteString(name) + l.writer.WriteString(": ") + } + + l.writer.WriteString(msg) + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + args = append(args, "") + } + } + + l.writer.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + val string + raw bool + ) + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + l.writer.WriteByte(' ') + l.writer.WriteString(args[i].(string)) + l.writer.WriteByte('=') + + if !raw && strings.ContainsAny(val, " \t\n\r") { + l.writer.WriteByte('"') + l.writer.WriteString(val) + l.writer.WriteByte('"') + } else { + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + } +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, name, level, msg) + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + args = append(args, "") + } + } + + for i := 0; i < len(args); i = i + 2 { + if _, ok := args[i].(string); !ok { + // As this is the logging function not much we can do here + // without injecting into logs... + continue + } + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + vals[args[i].(string)] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, name, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } + } +} + +func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if name != "" { + vals["@module"] = name + } + + if l.caller { + if _, file, line, ok := runtime.Caller(4); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.Log(l.Name(), Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.Log(l.Name(), Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.Log(l.Name(), Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.Log(l.Name(), Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.Log(l.Name(), Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + if len(args)%2 != 0 { + panic("With() call requires paired arguments") + } + + sl := *l + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + return &sl +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := *l + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return &sl +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := *l + + sl.name = name + + return &sl +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} + +// checks if the underlying io.Writer is a file, and +// panics if not. For use by colorization. +func (l *intLogger) checkWriterIsFile() *os.File { + fi, ok := l.writer.w.(*os.File) + if !ok { + panic("Cannot enable coloring of non-file Writers") + } + return fi +} + +// Accept implements the SinkAdapter interface +func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { + i.Log(name, level, msg, args...) +} + +// ImpliedArgs returns the loggers implied args +func (i *intLogger) ImpliedArgs() []interface{} { + return i.implied +} + +// Name returns the loggers name +func (i *intLogger) Name() string { + return i.name +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/logger.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/logger.go new file mode 100644 index 00000000000..48d608714f0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/logger.go @@ -0,0 +1,240 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" + "sync" +) + +var ( + //DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info +) + +// Level represents a log level. +type Level int32 + +const ( + // NoLevel is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. + Trace Level = 1 + + // Debug information for programmer lowlevel analysis. + Debug Level = 2 + + // Info information about steady state operations. + Info Level = 3 + + // Warn information about rare but handled events. + Warn Level = 4 + + // Error information about unrecoverable events. + Error Level = 5 +) + +// Format is a simple convience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). +type Format []interface{} + +// Fmt returns a Format type. This is a convience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// ColorOption expresses how the output should be colored, if at all. +type ColorOption uint8 + +const ( + // ColorOff is the default coloration, and does not + // inject color codes into the io.Writer. + ColorOff ColorOption = iota + // AutoColor checks if the io.Writer is a tty, + // and if so enables coloring. + AutoColor + // ForceColor will enable coloring, regardless of whether + // the io.Writer is a tty or not. + ForceColor +) + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept both "INFO" and "info". + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + default: + return NoLevel + } +} + +// Logger describes the interface that must be implemeted by all loggers. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // ImpliedArgs returns With key/value pairs + ImpliedArgs() []interface{} + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Returns the Name of the logger + Name() string + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all sub-loggers as well. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer +} + +// StandardLoggerOptions can be used to configure a new standard logger. +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level +} + +// LoggerOptions can be used to configure a new logger. +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional mutex pointer in case Output is shared + Mutex *sync.Mutex + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // The time format to use instead of the default + TimeFormat string + + // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // are concretely instances of *os.File. + Color ColorOption +} + +// InterceptLogger describes the interface for using a logger +// that can register different output sinks. +// This is useful for sending lower level log messages +// to a different output while keeping the root logger +// at a higher one. +type InterceptLogger interface { + // Logger is the root logger for an InterceptLogger + Logger + + // RegisterSink adds a SinkAdapter to the InterceptLogger + RegisterSink(sink SinkAdapter) + + // DeregisterSink removes a SinkAdapter from the InterceptLogger + DeregisterSink(sink SinkAdapter) + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + NamedIntercept(name string) InterceptLogger + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamedIntercept(name string) InterceptLogger + + // Return a value that conforms to the stdlib log.Logger interface + StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer +} + +// SinkAdapter describes the interface that must be implemented +// in order to Register a new sink to an InterceptLogger +type SinkAdapter interface { + Accept(name string, level Level, msg string, args ...interface{}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 00000000000..4abdd5583e8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,56 @@ +package hclog + +import ( + "io" + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Name() string { return "" } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 00000000000..9b27bd3d3d9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. +type CapturedStacktrace string + +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/stdlog.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 00000000000..2cf0456a05a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,74 @@ +package hclog + +import ( + "bytes" + "strings" +) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + log Logger + inferLevels bool + forceLevel Level +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger. +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + s.dispatch(str, s.forceLevel) + } else if s.inferLevels { + level, str := s.pickLevel(str) + s.dispatch(str, level) + } else { + s.log.Info(str) + } + + return len(data), nil +} + +func (s *stdlogAdapter) dispatch(str string, level Level) { + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } +} + +// Detect, based on conventions, what log level this is. +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/writer.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 00000000000..421a1f06c0b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,82 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer + color ColorOption +} + +func newWriter(w io.Writer, color ColorOption) *writer { + return &writer{w: w, color: color} +} + +func (w *writer) Flush(level Level) (err error) { + var unwritten = w.b.Bytes() + + if w.color != ColorOff { + color := _levelToColor[level] + unwritten = []byte(color.Sprintf("%s", unwritten)) + } + + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, unwritten) + } else { + _, err = w.w.Write(unwritten) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 00000000000..82b4de97c7e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/client.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 00000000000..bc56559c632 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,1025 @@ +package plugin + +import ( + "bufio" + "context" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + hclog "github.com/hashicorp/go-hclog" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + process *os.Process + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // stderrWaitGroup is used to prevent the command's Wait() function from + // being called before we've finished reading from the stderr pipe. + stderrWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will automatically be + // hooked up to os.Stdin, Stdout, and Stderr, respectively. + // + // If the default values (nil) are used, then this package will not + // sync any of these streams. + SyncStdout io.Writer + SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Protocol Protocol + Addr net.Addr + Pid int +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns the protocol client for this connection. +// +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) + + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) + } + + if err != nil { + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + process := c.process + addr := c.address + c.l.Unlock() + + // If there is no process, there is nothing to kill. + if process == nil { + return + } + + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.process = nil + c.l.Unlock() + }() + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + c.logger.Warn("error closing client during Kill", "err", err) + } + } else { + c.logger.Error("client", "error", err) + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") + return + case <-time.After(2 * time.Second): + } + } + + // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") + process.Kill() + + c.l.Lock() + c.processKilled = true + c.l.Unlock() +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stack here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } + } + + if c.config.Reattach != nil { + return c.reattach() + } + + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } + + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) + + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } + + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), + } + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + + cmdStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmdStderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: "localhost", + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + // Start goroutine that logs the stderr + c.clientWaitGroup.Add(1) + c.stderrWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(cmdStderr) + + c.clientWaitGroup.Add(1) + go func() { + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // get the cmd info early, since the process information will be removed + // in Kill. + pid := c.process.Pid + path := cmd.Path + + // wait to finish reading from stderr since the stderr pipe reader + // will be closed by the subsequent call to cmd.Wait(). + c.stderrWaitGroup.Wait() + + // Wait for the command to end. + err := cmd.Wait() + + debugMsgArgs := []interface{}{ + "path", path, + "pid", pid, + } + if err != nil { + debugMsgArgs = append(debugMsgArgs, + []interface{}{"error", err.Error()}...) + } + + // Log and make sure to flush the logs write away + c.logger.Debug("plugin process exited", debugMsgArgs...) + os.Stderr.Sync() + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan string) + c.clientWaitGroup.Add(1) + go func() { + defer c.clientWaitGroup.Done() + defer close(linesCh) + + scanner := bufio.NewScanner(cmdStdout) + for scanner.Scan() { + linesCh <- scanner.Text() + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) + defer func() { + go func() { + defer c.clientWaitGroup.Done() + for range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + c.logger.Debug("waiting for RPC address", "path", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-c.doneCtx.Done(): + err = errors.New("plugin exited before we could connect") + case line := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line = strings.TrimSpace(line) + parts := strings.SplitN(line, "|", 6) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int64 + coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if int(coreProtocol) != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Core version: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) + if err != nil { + return addr, err + } + + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return addr, err + } + + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } + } + + c.address = addr + return +} + +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(pid int) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + conn, err := netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + +var stdErrBufferSize = 64 * 1024 + +func (c *Client) logStderr(r io.Reader) { + defer c.clientWaitGroup.Done() + defer c.stderrWaitGroup.Done() + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + reader := bufio.NewReaderSize(r, stdErrBufferSize) + // continuation indicates the previous line was a prefix + continuation := false + + for { + line, isPrefix, err := reader.ReadLine() + switch { + case err == io.EOF: + return + case err != nil: + l.Error("reading plugin stderr", "error", err) + return + } + + c.config.Stderr.Write(line) + + // The line was longer than our max token size, so it's likely + // incomplete and won't unmarshal. + if isPrefix || continuation { + l.Debug(string(line)) + + // if we're finishing a continued line, add the newline back in + if !isPrefix { + c.config.Stderr.Write([]byte{'\n'}) + } + + continuation = isPrefix + continue + } + + c.config.Stderr.Write([]byte{'\n'}) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + // Attempt to infer the desired log level from the commonly used + // string prefixes + switch line := string(line); { + case strings.HasPrefix(line, "[TRACE]"): + l.Trace(line) + case strings.HasPrefix(line, "[DEBUG]"): + l.Debug(line) + case strings.HasPrefix(line, "[INFO]"): + l.Info(line) + case strings.HasPrefix(line, "[WARN]"): + l.Warn(line) + case strings.HasPrefix(line, "[ERROR]"): + l.Error(line) + default: + l.Debug(line) + } + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + default: + // if there was no log level, it's likely this is unexpected + // json from something other than hclog, and we should output + // it verbatim. + l.Debug(string(line)) + } + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/discover.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 00000000000..d22c566ed50 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/error.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 00000000000..22a7baa6a0d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,24 @@ +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 00000000000..daf142d1709 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,457 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*plugin.ConnInfo) error + Recv() (*plugin.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *plugin.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client plugin.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: plugin.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *plugin.ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *plugin.ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 00000000000..d0d0d8e20b0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,117 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "math" + "net" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0, 5) + + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + opts = append(opts, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: plugin.NewGRPCControllerClient(conn), + } + + return cl, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller plugin.GRPCControllerClient +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 00000000000..1a8a8e70ea4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { + resp := &plugin.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 00000000000..d3dbf1cedcb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,142 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + + logger hclog.Logger +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + plugin.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{ + server: s, + } + plugin.RegisterGRPCControllerServer(s.server, controllerServer) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server +func (s *GRPCServer) Stop() { + s.server.Stop() +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go new file mode 100644 index 00000000000..aa2fdc81387 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. + +package plugin diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go new file mode 100644 index 00000000000..b6850aa59ea --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_802e9beed3ec3b28, []int{0} +} + +func (m *ConnInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnInfo.Unmarshal(m, b) +} +func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) +} +func (m *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(m, src) +} +func (m *ConnInfo) XXX_Size() int { + return xxx_messageInfo_ConnInfo.Size(m) +} +func (m *ConnInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConnInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnInfo proto.InternalMessageInfo + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } + +var fileDescriptor_802e9beed3ec3b28 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go new file mode 100644 index 00000000000..38b4204326e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_controller.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_23c2c7e42feab570, []int{0} +} + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "plugin.Empty") +} + +func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } + +var fileDescriptor_23c2c7e42feab570 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { + s.RegisterService(&_GRPCController_serviceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/plugin.GRPCController/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _GRPCController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_controller.proto", +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/log_entry.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 00000000000..fb2ef930caa --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input []byte) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal(input, &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/mtls.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 00000000000..88955245877 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 00000000000..01c45ad7c68 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,204 @@ +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/plugin.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 00000000000..79d9674633a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,58 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "context" + "errors" + "net/rpc" + + "google.golang.org/grpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/process.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 00000000000..88c999a580d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/process_posix.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 00000000000..70ba546bf6d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/process_windows.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 00000000000..9f7b0180901 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/protocol.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 00000000000..0cfc19e52d6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 00000000000..f30a4b1d387 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,170 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 00000000000..5bb18dd5db1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,197 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Printf("[ERR] plugin: plugin server: %s", err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}) error { + *response = struct{}{} + return nil +} + +func (c *controlServer) Quit( + null bool, response *struct{}) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/server.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 00000000000..4c230e3ab4c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,452 @@ +package plugin + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/signal" + "runtime" + "sort" + "strconv" + "strings" + "sync/atomic" + + "github.com/hashicorp/go-hclog" + + "google.golang.org/grpc" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + + // Plugins are the plugins that are served. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger +} + +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet + } + + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// errors will be outputted to os.Stderr. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + os.Exit(1) + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + os.Exit(1) + } + + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + + // Logging goes to the original stderr + log.SetOutput(os.Stderr) + + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + logger.Error("plugin init error", "error", err) + return + } + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } + + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate client certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse client certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Build the server type + var server ServerProtocol + switch protoType { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: pluginSet, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: pluginSet, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + logger: logger, + } + + default: + panic("unknown server protocol: " + protoType) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + + // Output the address and service name to stdout so that the client can bring it up. + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + + // Eat the interrupts + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + var count int32 = 0 + for { + <-ch + newCount := atomic.AddInt32(&count, 1) + logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) + } + }() + + // Set our new out, err + os.Stdout = stdout_w + os.Stderr = stderr_w + + // Accept connections and wait for completion + go server.Serve(listener) + <-doneCh +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + envMinPort := os.Getenv("PLUGIN_MIN_PORT") + envMaxPort := os.Getenv("PLUGIN_MAX_PORT") + + var minPort, maxPort int64 + var err error + + switch { + case len(envMinPort) == 0: + minPort = 0 + default: + minPort, err = strconv.ParseInt(envMinPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err) + } + } + + switch { + case len(envMaxPort) == 0: + maxPort = 0 + default: + maxPort, err = strconv.ParseInt(envMaxPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err) + } + } + + if minPort > maxPort { + return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return &rmListener{ + Listener: l, + Path: path, + }, nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/server_mux.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 00000000000..033079ea0fc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/stream.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 00000000000..1d547aaaab3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/testing.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 00000000000..2cf2c26cc5c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,180 @@ +package plugin + +import ( + "bytes" + "context" + "io" + "net" + "net/rpc" + + "github.com/mitchellh/go-testing-interface" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" +) + +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + DoneCh: make(chan struct{}), + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + logger: hclog.Default(), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + controller: plugin.NewGRPCControllerClient(conn), + } + + return client, server +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-safetemp/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-safetemp/LICENSE new file mode 100644 index 00000000000..be2cc4dfb60 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-safetemp/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-safetemp/safetemp.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-safetemp/safetemp.go new file mode 100644 index 00000000000..c4ae72b7899 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/go-safetemp/safetemp.go @@ -0,0 +1,40 @@ +package safetemp + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// Dir creates a new temporary directory that isn't yet created. This +// can be used with calls that expect a non-existent directory. +// +// The directory is created as a child of a temporary directory created +// within the directory dir starting with prefix. The temporary directory +// returned is always named "temp". The parent directory has the specified +// prefix. +// +// The returned io.Closer should be used to clean up the returned directory. +// This will properly remove the returned directory and any other temporary +// files created. +// +// If an error is returned, the Closer does not need to be called (and will +// be nil). +func Dir(dir, prefix string) (string, io.Closer, error) { + // Create the temporary directory + td, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", nil, err + } + + return filepath.Join(td, "temp"), pathCloser(td), nil +} + +// pathCloser implements io.Closer to remove the given path on Close. +type pathCloser string + +// Close deletes this path. +func (p pathCloser) Close() error { + return os.RemoveAll(string(p)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/decoder.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 00000000000..bed9ebbe141 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,729 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float32, reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int, reflect.Int32, reflect.Int64: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + &ast.ObjectKey{ + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for _, f := range fields { + field, fieldValue := f.field, f.val + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + fieldName := field.Name + + tagValue := field.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, fieldValue) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + fieldValue.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, fieldValue) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { + return err + } + } + + decodedFields = append(decodedFields, field.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 00000000000..575a20b50b5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 00000000000..6e5ef654bb8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 00000000000..ba07ad42b02 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 00000000000..5c99381dfbf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 00000000000..64c83bcfb55 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,532 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")), + } + } + + // key=#comment + // val + if p.lineComment != nil { + o.LineComment, p.lineComment = p.lineComment, nil + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), + } + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 00000000000..624a18fe3a7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,652 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == utf8.RuneError && size == 1 { + s.err("illegal UTF-8 encoding") + return ch + } + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + if ch == '\x00' { + s.err("unexpected null character (0x00)") + return eof + } + + if ch == '\uE123' { + s.err("unicode code point U+E123 reserved for internal use") + return utf8.RuneError + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start && ch != eof { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 00000000000..5f981eaa2f0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 00000000000..59c1bb72d4a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 00000000000..e37c0664ecd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 00000000000..125a5f07298 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + return nil, errors.New("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 00000000000..fe3f0f09502 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/token/position.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 00000000000..59c1bb72d4a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/token/token.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 00000000000..95a0c3eee65 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/lex.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 00000000000..d9993c2928a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/parse.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 00000000000..1fca53c4cee --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/LICENSE new file mode 100644 index 00000000000..82b4de97c7e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go new file mode 100644 index 00000000000..dd308223983 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go @@ -0,0 +1,262 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// expandBody wraps another hcl.Body and expands any "dynamic" blocks found +// inside whenever Content or PartialContent is called. +type expandBody struct { + original hcl.Body + forEachCtx *hcl.EvalContext + iteration *iteration // non-nil if we're nested inside another "dynamic" block + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the transformer. + // + // Note that this is re-implemented here rather than delegating to the + // existing support required by the underlying body because we need to + // retain access to the entire original body on subsequent decode operations + // so we can retain any "dynamic" blocks for types we didn't take consume + // on the first pass. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]hcl.BlockHeaderSchema +} + +func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, diags := b.original.Content(extSchema) + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + return content, diags +} + +func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, _, diags := b.original.PartialContent(extSchema) + // We discard the "remain" argument above because we're going to construct + // our own remain that also takes into account remaining "dynamic" blocks. + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + remain := &expandBody{ + original: b.original, + forEachCtx: b.forEachCtx, + iteration: b.iteration, + hiddenAttrs: make(map[string]struct{}), + hiddenBlocks: make(map[string]hcl.BlockHeaderSchema), + } + for name := range b.hiddenAttrs { + remain.hiddenAttrs[name] = struct{}{} + } + for typeName, blockS := range b.hiddenBlocks { + remain.hiddenBlocks[typeName] = blockS + } + for _, attrS := range schema.Attributes { + remain.hiddenAttrs[attrS.Name] = struct{}{} + } + for _, blockS := range schema.Blocks { + remain.hiddenBlocks[blockS.Type] = blockS + } + + return content, remain, diags +} + +func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + // If we have any hiddenBlocks then we also need to register those here + // so that a call to "Content" on the underlying body won't fail. + // (We'll filter these out again once we process the result of either + // Content or PartialContent.) + for _, blockS := range b.hiddenBlocks { + extSchema.Blocks = append(extSchema.Blocks, blockS) + } + + // If we have any hiddenAttrs then we also need to register these, for + // the same reason as we deal with hiddenBlocks above. + if len(b.hiddenAttrs) != 0 { + newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs)) + copy(newAttrs, extSchema.Attributes) + for name := range b.hiddenAttrs { + newAttrs = append(newAttrs, hcl.AttributeSchema{ + Name: name, + Required: false, + }) + } + extSchema.Attributes = newAttrs + } + + return extSchema +} + +func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes { + if len(b.hiddenAttrs) == 0 && b.iteration == nil { + // Easy path: just pass through the attrs from the original body verbatim + return rawAttrs + } + + // Otherwise we have some work to do: we must filter out any attributes + // that are hidden (since a previous PartialContent call already saw these) + // and wrap the expressions of the inner attributes so that they will + // have access to our iteration variables. + attrs := make(hcl.Attributes, len(rawAttrs)) + for name, rawAttr := range rawAttrs { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + if b.iteration != nil { + attr := *rawAttr // shallow copy so we can mutate it + attr.Expr = exprWrap{ + Expression: attr.Expr, + i: b.iteration, + } + attrs[name] = &attr + } else { + // If we have no active iteration then no wrapping is required. + attrs[name] = rawAttr + } + } + return attrs +} + +func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) { + var blocks hcl.Blocks + var diags hcl.Diagnostics + + for _, rawBlock := range rawBlocks { + switch rawBlock.Type { + case "dynamic": + realBlockType := rawBlock.Labels[0] + if _, hidden := b.hiddenBlocks[realBlockType]; hidden { + continue + } + + var blockS *hcl.BlockHeaderSchema + for _, candidate := range schema.Blocks { + if candidate.Type == realBlockType { + blockS = &candidate + break + } + } + if blockS == nil { + // Not a block type that the caller requested. + if !partial { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType), + Subject: &rawBlock.LabelRanges[0], + }) + } + continue + } + + spec, specDiags := b.decodeSpec(blockS, rawBlock) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + continue + } + + if spec.forEachVal.IsKnown() { + for it := spec.forEachVal.ElementIterator(); it.Next(); { + key, value := it.Element() + i := b.iteration.MakeChild(spec.iteratorName, key, value) + + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + // Attach our new iteration context so that attributes + // and other nested blocks can refer to our iterator. + block.Body = b.expandChild(block.Body, i) + blocks = append(blocks, block) + } + } + } else { + // If our top-level iteration value isn't known then we're forced + // to compromise since HCL doesn't have any concept of an + // "unknown block". In this case then, we'll produce a single + // dynamic block with the iterator values set to DynamicVal, + // which at least makes the potential for a block visible + // in our result, even though it's not represented in a fully-accurate + // way. + i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal) + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + block.Body = b.expandChild(block.Body, i) + + // We additionally force all of the leaf attribute values + // in the result to be unknown so the calling application + // can, if necessary, use that as a heuristic to detect + // when a single nested block might be standing in for + // multiple blocks yet to be expanded. This retains the + // structure of the generated body but forces all of its + // leaf attribute values to be unknown. + block.Body = unknownBody{block.Body} + + blocks = append(blocks, block) + } + } + + default: + if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden { + // A static block doesn't create a new iteration context, but + // it does need to inherit _our own_ iteration context in + // case it contains expressions that refer to our inherited + // iterators, or nested "dynamic" blocks. + expandedBlock := *rawBlock // shallow copy + expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration) + blocks = append(blocks, &expandedBlock) + } + } + } + + return blocks, diags +} + +func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body { + chiCtx := i.EvalContext(b.forEachCtx) + ret := Expand(child, chiCtx) + ret.(*expandBody).iteration = i + return ret +} + +func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // blocks aren't allowed in JustAttributes mode and this body can + // only produce blocks, so we'll just pass straight through to our + // underlying body here. + return b.original.JustAttributes() +} + +func (b *expandBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go new file mode 100644 index 00000000000..41c0be267ac --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go @@ -0,0 +1,215 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type expandSpec struct { + blockType string + blockTypeRange hcl.Range + defRange hcl.Range + forEachVal cty.Value + iteratorName string + labelExprs []hcl.Expression + contentBody hcl.Body + inherited map[string]*iteration +} + +func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var schema *hcl.BodySchema + if len(blockS.LabelNames) != 0 { + schema = dynamicBlockBodySchemaLabels + } else { + schema = dynamicBlockBodySchemaNoLabels + } + + specContent, specDiags := rawSpec.Body.Content(schema) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + return nil, diags + } + + //// for_each attribute + + eachAttr := specContent.Attributes["for_each"] + eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx) + diags = append(diags, eachDiags...) + + if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType { + // We skip this error for DynamicPseudoType because that means we either + // have a null (which is checked immediately below) or an unknown + // (which is handled in the expandBody Content methods). + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()), + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + if eachVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: "Cannot use a null value in for_each.", + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + + //// iterator attribute + + iteratorName := blockS.Type + if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil { + itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr) + diags = append(diags, itDiags...) + if itDiags.HasErrors() { + return nil, diags + } + + if len(itTraversal) != 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic iterator name", + Detail: "Dynamic iterator must be a single variable name.", + Subject: itTraversal.SourceRange().Ptr(), + }) + return nil, diags + } + + iteratorName = itTraversal.RootName() + } + + var labelExprs []hcl.Expression + if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil { + var labelDiags hcl.Diagnostics + labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + if len(labelExprs) > len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic block label", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(), + }) + return nil, diags + } else if len(labelExprs) < len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Insufficient dynamic block labels", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelsAttr.Expr.Range().Ptr(), + }) + return nil, diags + } + } + + // Since our schema requests only blocks of type "content", we can assume + // that all entries in specContent.Blocks are content blocks. + if len(specContent.Blocks) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing dynamic content block", + Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.", + Subject: &specContent.MissingItemRange, + }) + return nil, diags + } + if len(specContent.Blocks) > 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic content block", + Detail: "Only one nested content block is allowed for each dynamic block.", + Subject: &specContent.Blocks[1].DefRange, + }) + return nil, diags + } + + return &expandSpec{ + blockType: blockS.Type, + blockTypeRange: rawSpec.LabelRanges[0], + defRange: rawSpec.DefRange, + forEachVal: eachVal, + iteratorName: iteratorName, + labelExprs: labelExprs, + contentBody: specContent.Blocks[0].Body, + }, diags +} + +func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) { + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + lCtx := i.EvalContext(ctx) + for _, labelExpr := range s.labelExprs { + labelVal, labelDiags := labelExpr.Value(lCtx) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + var convErr error + labelVal, convErr = convert.Convert(labelVal, cty.String) + if convErr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr), + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if labelVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "Cannot use a null value as a dynamic block label.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if !labelVal.IsKnown() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + + labels = append(labels, labelVal.AsString()) + labelRanges = append(labelRanges, labelExpr.Range()) + } + + block := &hcl.Block{ + Type: s.blockType, + TypeRange: s.blockTypeRange, + Labels: labels, + LabelRanges: labelRanges, + DefRange: s.defRange, + Body: s.contentBody, + } + + return block, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go new file mode 100644 index 00000000000..6916fc15800 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go @@ -0,0 +1,42 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +type exprWrap struct { + hcl.Expression + i *iteration +} + +func (e exprWrap) Variables() []hcl.Traversal { + raw := e.Expression.Variables() + ret := make([]hcl.Traversal, 0, len(raw)) + + // Filter out traversals that refer to our iterator name or any + // iterator we've inherited; we're going to provide those in + // our Value wrapper, so the caller doesn't need to know about them. + for _, traversal := range raw { + rootName := traversal.RootName() + if rootName == e.i.IteratorName { + continue + } + if _, inherited := e.i.Inherited[rootName]; inherited { + continue + } + ret = append(ret, traversal) + } + return ret +} + +func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + extCtx := e.i.EvalContext(ctx) + return e.Expression.Value(extCtx) +} + +// UnwrapExpression returns the expression being wrapped by this instance. +// This allows the original expression to be recovered by hcl.UnwrapExpression. +func (e exprWrap) UnwrapExpression() hcl.Expression { + return e.Expression +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go new file mode 100644 index 00000000000..7056d336064 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go @@ -0,0 +1,66 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +type iteration struct { + IteratorName string + Key cty.Value + Value cty.Value + Inherited map[string]*iteration +} + +func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration { + return &iteration{ + IteratorName: s.iteratorName, + Key: key, + Value: value, + Inherited: s.inherited, + } +} + +func (i *iteration) Object() cty.Value { + return cty.ObjectVal(map[string]cty.Value{ + "key": i.Key, + "value": i.Value, + }) +} + +func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext { + new := base.NewChild() + + if i != nil { + new.Variables = map[string]cty.Value{} + for name, otherIt := range i.Inherited { + new.Variables[name] = otherIt.Object() + } + new.Variables[i.IteratorName] = i.Object() + } + + return new +} + +func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration { + if i == nil { + // Create entirely new root iteration, then + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + } + } + + inherited := map[string]*iteration{} + for name, otherIt := range i.Inherited { + inherited[name] = otherIt + } + inherited[i.IteratorName] = i + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + Inherited: inherited, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go new file mode 100644 index 00000000000..b7e8ca95170 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go @@ -0,0 +1,44 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Expand "dynamic" blocks in the given body, returning a new body that +// has those blocks expanded. +// +// The given EvalContext is used when evaluating "for_each" and "labels" +// attributes within dynamic blocks, allowing those expressions access to +// variables and functions beyond the iterator variable created by the +// iteration. +// +// Expand returns no diagnostics because no blocks are actually expanded +// until a call to Content or PartialContent on the returned body, which +// will then expand only the blocks selected by the schema. +// +// "dynamic" blocks are also expanded automatically within nested blocks +// in the given body, including within other dynamic blocks, thus allowing +// multi-dimensional iteration. However, it is not possible to +// dynamically-generate the "dynamic" blocks themselves except through nesting. +// +// parent { +// dynamic "child" { +// for_each = child_objs +// content { +// dynamic "grandchild" { +// for_each = child.value.children +// labels = [grandchild.key] +// content { +// parent_key = child.key +// value = grandchild.value +// } +// } +// } +// } +// } +func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body { + return &expandBody{ + original: body, + forEachCtx: ctx, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go new file mode 100644 index 00000000000..dc8ed5a2f40 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go @@ -0,0 +1,50 @@ +package dynblock + +import "github.com/hashicorp/hcl2/hcl" + +var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, +} + +var dynamicBlockBodySchemaLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + { + Name: "labels", + Required: true, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} + +var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go new file mode 100644 index 00000000000..932f6a32b06 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go @@ -0,0 +1,84 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// unknownBody is a funny body that just reports everything inside it as +// unknown. It uses a given other body as a sort of template for what attributes +// and blocks are inside -- including source location information -- but +// subsitutes unknown values of unknown type for all attributes. +// +// This rather odd process is used to handle expansion of dynamic blocks whose +// for_each expression is unknown. Since a block cannot itself be unknown, +// we instead arrange for everything _inside_ the block to be unknown instead, +// to give the best possible approximation. +type unknownBody struct { + template hcl.Body +} + +var _ hcl.Body = unknownBody{} + +func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, diags := b.template.Content(schema) + content = b.fixupContent(content) + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return content, diags +} + +func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + content, remain, diags := b.template.PartialContent(schema) + content = b.fixupContent(content) + remain = unknownBody{remain} // remaining content must also be wrapped + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return content, remain, diags +} + +func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs, diags := b.template.JustAttributes() + attrs = b.fixupAttrs(attrs) + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return attrs, diags +} + +func (b unknownBody) MissingItemRange() hcl.Range { + return b.template.MissingItemRange() +} + +func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent { + ret := &hcl.BodyContent{} + ret.Attributes = b.fixupAttrs(got.Attributes) + if len(got.Blocks) > 0 { + ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks)) + for _, gotBlock := range got.Blocks { + new := *gotBlock // shallow copy + new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown + ret.Blocks = append(ret.Blocks, &new) + } + } + + return ret +} + +func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes { + if len(got) == 0 { + return nil + } + ret := make(hcl.Attributes, len(got)) + for name, gotAttr := range got { + new := *gotAttr // shallow copy + new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range()) + ret[name] = &new + } + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go new file mode 100644 index 00000000000..ad838f3e87e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go @@ -0,0 +1,209 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// WalkVariables begins the recursive process of walking all expressions and +// nested blocks in the given body and its child bodies while taking into +// account any "dynamic" blocks. +// +// This function requires that the caller walk through the nested block +// structure in the given body level-by-level so that an appropriate schema +// can be provided at each level to inform further processing. This workflow +// is thus easiest to use for calling applications that have some higher-level +// schema representation available with which to drive this multi-step +// process. If your application uses the hcldec package, you may be able to +// use VariablesHCLDec instead for a more automatic approach. +func WalkVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + includeContent: true, + } +} + +// WalkExpandVariables is like Variables but it includes only the variables +// required for successful block expansion, ignoring any variables referenced +// inside block contents. The result is the minimal set of all variables +// required for a call to Expand, excluding variables that would only be +// needed to subsequently call Content or PartialContent on the expanded +// body. +func WalkExpandVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + } +} + +type WalkVariablesNode struct { + body hcl.Body + it *iteration + + includeContent bool +} + +type WalkVariablesChild struct { + BlockTypeName string + Node WalkVariablesNode +} + +// Body returns the HCL Body associated with the child node, in case the caller +// wants to do some sort of inspection of it in order to decide what schema +// to pass to Visit. +// +// Most implementations should just fetch a fixed schema based on the +// BlockTypeName field and not access this. Deciding on a schema dynamically +// based on the body is a strange thing to do and generally necessary only if +// your caller is already doing other bizarre things with HCL bodies. +func (c WalkVariablesChild) Body() hcl.Body { + return c.Node.body +} + +// Visit returns the variable traversals required for any "dynamic" blocks +// directly in the body associated with this node, and also returns any child +// nodes that must be visited in order to continue the walk. +// +// Each child node has its associated block type name given in its BlockTypeName +// field, which the calling application should use to determine the appropriate +// schema for the content of each child node and pass it to the child node's +// own Visit method to continue the walk recursively. +func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) { + extSchema := n.extendSchema(schema) + container, _, _ := n.body.PartialContent(extSchema) + if container == nil { + return vars, children + } + + children = make([]WalkVariablesChild, 0, len(container.Blocks)) + + if n.includeContent { + for _, attr := range container.Attributes { + for _, traversal := range attr.Expr.Variables() { + var ours, inherited bool + if n.it != nil { + ours = traversal.RootName() == n.it.IteratorName + _, inherited = n.it.Inherited[traversal.RootName()] + } + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + } + + for _, block := range container.Blocks { + switch block.Type { + + case "dynamic": + blockTypeName := block.Labels[0] + inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema) + if inner == nil { + continue + } + + iteratorName := blockTypeName + if attr, exists := inner.Attributes["iterator"]; exists { + iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr) + if len(iterTraversal) == 0 { + // Ignore this invalid dynamic block, since it'll produce + // an error if someone tries to extract content from it + // later anyway. + continue + } + iteratorName = iterTraversal.RootName() + } + blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal) + + if attr, exists := inner.Attributes["for_each"]; exists { + // Filter out iterator names inherited from parent blocks + for _, traversal := range attr.Expr.Variables() { + if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited { + vars = append(vars, traversal) + } + } + } + if attr, exists := inner.Attributes["labels"]; exists { + // Filter out both our own iterator name _and_ those inherited + // from parent blocks, since we provide _both_ of these to the + // label expressions. + for _, traversal := range attr.Expr.Variables() { + ours := traversal.RootName() == iteratorName + _, inherited := blockIt.Inherited[traversal.RootName()] + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + + for _, contentBlock := range inner.Blocks { + // We only request "content" blocks in our schema, so we know + // any blocks we find here will be content blocks. We require + // exactly one content block for actual expansion, but we'll + // be more liberal here so that callers can still collect + // variables from erroneous "dynamic" blocks. + children = append(children, WalkVariablesChild{ + BlockTypeName: blockTypeName, + Node: WalkVariablesNode{ + body: contentBlock.Body, + it: blockIt, + includeContent: n.includeContent, + }, + }) + } + + default: + children = append(children, WalkVariablesChild{ + BlockTypeName: block.Type, + Node: WalkVariablesNode{ + body: block.Body, + it: n.it, + includeContent: n.includeContent, + }, + }) + + } + } + + return vars, children +} + +func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + return extSchema +} + +// This is a more relaxed schema than what's in schema.go, since we +// want to maximize the amount of variables we can find even if there +// are erroneous blocks. +var variableDetectionInnerSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: false, + }, + { + Name: "labels", + Required: false, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + }, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go new file mode 100644 index 00000000000..a078d915c04 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go @@ -0,0 +1,43 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" +) + +// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec +// specification to automatically drive the recursive walk through nested +// blocks in the given body. +// +// This is a drop-in replacement for hcldec.Variables which is able to treat +// blocks of type "dynamic" in the same special way that dynblock.Expand would, +// exposing both the variables referenced in the "for_each" and "labels" +// arguments and variables used in the nested "content" block. +func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the +// minimal set of variables required to call Expand, ignoring variables that +// are referenced only inside normal block contents. See WalkExpandVariables +// for more information. +func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkExpandVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal { + vars, children := node.Visit(hcldec.ImpliedSchema(spec)) + + if len(children) > 0 { + childSpecs := hcldec.ChildBlockTypes(spec) + for _, child := range children { + if childSpec, exists := childSpecs[child.BlockTypeName]; exists { + vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...) + } + } + } + + return vars +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go new file mode 100644 index 00000000000..c4b379579d4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go @@ -0,0 +1,11 @@ +// Package typeexpr extends HCL with a convention for describing HCL types +// within configuration files. +// +// The type syntax is processed statically from a hcl.Expression, so it cannot +// use any of the usual language operators. This is similar to type expressions +// in statically-typed programming languages. +// +// variable "example" { +// type = list(string) +// } +package typeexpr diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go new file mode 100644 index 00000000000..a84338a85a6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go @@ -0,0 +1,196 @@ +package typeexpr + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +const invalidTypeSummary = "Invalid type specification" + +// getType is the internal implementation of both Type and TypeConstraint, +// using the passed flag to distinguish. When constraint is false, the "any" +// keyword will produce an error. +func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { + // First we'll try for one of our keywords + kw := hcl.ExprAsKeyword(expr) + switch kw { + case "bool": + return cty.Bool, nil + case "string": + return cty.String, nil + case "number": + return cty.Number, nil + case "any": + if constraint { + return cty.DynamicPseudoType, nil + } + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw), + Subject: expr.Range().Ptr(), + }} + case "list", "map", "set": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw), + Subject: expr.Range().Ptr(), + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: expr.Range().Ptr(), + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: expr.Range().Ptr(), + }} + case "": + // okay! we'll fall through and try processing as a call, then. + default: + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw), + Subject: expr.Range().Ptr(), + }} + } + + // If we get down here then our expression isn't just a keyword, so we'll + // try to process it as a call instead. + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).", + Subject: expr.Range().Ptr(), + }} + } + + switch call.Name { + case "bool", "string", "number", "any": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), + Subject: &call.ArgsRange, + }} + } + + if len(call.Arguments) != 1 { + contextRange := call.ArgsRange + subjectRange := call.ArgsRange + if len(call.Arguments) > 1 { + // If we have too many arguments (as opposed to too _few_) then + // we'll highlight the extraneous arguments as the diagnostic + // subject. + subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range()) + } + + switch call.Name { + case "list", "set", "map": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name), + Subject: &subjectRange, + Context: &contextRange, + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: &subjectRange, + Context: &contextRange, + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: &subjectRange, + Context: &contextRange, + }} + } + } + + switch call.Name { + + case "list": + ety, diags := getType(call.Arguments[0], constraint) + return cty.List(ety), diags + case "set": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Set(ety), diags + case "map": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Map(ety), diags + case "object": + attrDefs, diags := hcl.ExprMap(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + + atys := make(map[string]cty.Type) + for _, attrDef := range attrDefs { + attrName := hcl.ExprAsKeyword(attrDef.Key) + if attrName == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object constructor map keys must be attribute names.", + Subject: attrDef.Key.Range().Ptr(), + Context: expr.Range().Ptr(), + }) + continue + } + aty, attrDiags := getType(attrDef.Value, constraint) + diags = append(diags, attrDiags...) + atys[attrName] = aty + } + return cty.Object(atys), diags + case "tuple": + elemDefs, diags := hcl.ExprList(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Tuple type constructor requires a list of element types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + etys := make([]cty.Type, len(elemDefs)) + for i, defExpr := range elemDefs { + ety, elemDiags := getType(defExpr, constraint) + diags = append(diags, elemDiags...) + etys[i] = ety + } + return cty.Tuple(etys), diags + default: + // Can't access call.Arguments in this path because we've not validated + // that it contains exactly one expression here. + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name), + Subject: expr.Range().Ptr(), + }} + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go new file mode 100644 index 00000000000..e3f5eef5929 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go @@ -0,0 +1,129 @@ +package typeexpr + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// Type attempts to process the given expression as a type expression and, if +// successful, returns the resulting type. If unsuccessful, error diagnostics +// are returned. +func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, false) +} + +// TypeConstraint attempts to parse the given expression as a type constraint +// and, if successful, returns the resulting type. If unsuccessful, error +// diagnostics are returned. +// +// A type constraint has the same structure as a type, but it additionally +// allows the keyword "any" to represent cty.DynamicPseudoType, which is often +// used as a wildcard in type checking and type conversion operations. +func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, true) +} + +// TypeString returns a string rendering of the given type as it would be +// expected to appear in the HCL native syntax. +// +// This is primarily intended for showing types to the user in an application +// that uses typexpr, where the user can be assumed to be familiar with the +// type expression syntax. In applications that do not use typeexpr these +// results may be confusing to the user and so type.FriendlyName may be +// preferable, even though it's less precise. +// +// TypeString produces reasonable results only for types like what would be +// produced by the Type and TypeConstraint functions. In particular, it cannot +// support capsule types. +func TypeString(ty cty.Type) string { + // Easy cases first + switch ty { + case cty.String: + return "string" + case cty.Bool: + return "bool" + case cty.Number: + return "number" + case cty.DynamicPseudoType: + return "any" + } + + if ty.IsCapsuleType() { + panic("TypeString does not support capsule types") + } + + if ty.IsCollectionType() { + ety := ty.ElementType() + etyString := TypeString(ety) + switch { + case ty.IsListType(): + return fmt.Sprintf("list(%s)", etyString) + case ty.IsSetType(): + return fmt.Sprintf("set(%s)", etyString) + case ty.IsMapType(): + return fmt.Sprintf("map(%s)", etyString) + default: + // Should never happen because the above is exhaustive + panic("unsupported collection type") + } + } + + if ty.IsObjectType() { + var buf bytes.Buffer + buf.WriteString("object({") + atys := ty.AttributeTypes() + names := make([]string, 0, len(atys)) + for name := range atys { + names = append(names, name) + } + sort.Strings(names) + first := true + for _, name := range names { + aty := atys[name] + if !first { + buf.WriteByte(',') + } + if !hclsyntax.ValidIdentifier(name) { + // Should never happen for any type produced by this package, + // but we'll do something reasonable here just so we don't + // produce garbage if someone gives us a hand-assembled object + // type that has weird attribute names. + // Using Go-style quoting here isn't perfect, since it doesn't + // exactly match HCL syntax, but it's fine for an edge-case. + buf.WriteString(fmt.Sprintf("%q", name)) + } else { + buf.WriteString(name) + } + buf.WriteByte('=') + buf.WriteString(TypeString(aty)) + first = false + } + buf.WriteString("})") + return buf.String() + } + + if ty.IsTupleType() { + var buf bytes.Buffer + buf.WriteString("tuple([") + etys := ty.TupleElementTypes() + first := true + for _, ety := range etys { + if !first { + buf.WriteByte(',') + } + buf.WriteString(TypeString(ety)) + first = false + } + buf.WriteString("])") + return buf.String() + } + + // Should never happen because we covered all cases above. + panic(fmt.Errorf("unsupported type %#v", ty)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/decode.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/decode.go new file mode 100644 index 00000000000..3a149a8c2cb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/decode.go @@ -0,0 +1,304 @@ +package gohcl + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// DecodeBody extracts the configuration within the given body into the given +// value. This value must be a non-nil pointer to either a struct or +// a map, where in the former case the configuration will be decoded using +// struct tags and in the latter case only attributes are allowed and their +// values are decoded into the map. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + rv := reflect.ValueOf(val) + if rv.Kind() != reflect.Ptr { + panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) + } + + return decodeBodyToValue(body, ctx, rv.Elem()) +} + +func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + et := val.Type() + switch et.Kind() { + case reflect.Struct: + return decodeBodyToStruct(body, ctx, val) + case reflect.Map: + return decodeBodyToMap(body, ctx, val) + default: + panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) + } +} + +func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + schema, partial := ImpliedBodySchema(val.Interface()) + + var content *hcl.BodyContent + var leftovers hcl.Body + var diags hcl.Diagnostics + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + if content == nil { + return diags + } + + tags := getFieldTags(val.Type()) + + if tags.Remain != nil { + fieldIdx := *tags.Remain + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + switch { + case bodyType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(leftovers)) + case attrsType.AssignableTo(field.Type): + attrs, attrsDiags := leftovers.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + fieldV.Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) + } + } + + for name, fieldIdx := range tags.Attributes { + attr := content.Attributes[name] + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + + if attr == nil { + if !exprType.AssignableTo(field.Type) { + continue + } + + // As a special case, if the target is of type hcl.Expression then + // we'll assign an actual expression that evalues to a cty null, + // so the caller can deal with it within the cty realm rather + // than within the Go realm. + synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) + fieldV.Set(reflect.ValueOf(synthExpr)) + continue + } + + switch { + case attrType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr)) + case exprType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr.Expr)) + default: + diags = append(diags, DecodeExpression( + attr.Expr, ctx, fieldV.Addr().Interface(), + )...) + } + } + + blocksByType := content.Blocks.ByType() + + for typeName, fieldIdx := range tags.Blocks { + blocks := blocksByType[typeName] + field := val.Type().Field(fieldIdx) + + ty := field.Type + isSlice := false + isPtr := false + if ty.Kind() == reflect.Slice { + isSlice = true + ty = ty.Elem() + } + if ty.Kind() == reflect.Ptr { + isPtr = true + ty = ty.Elem() + } + + if len(blocks) > 1 && !isSlice { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", typeName), + Detail: fmt.Sprintf( + "Only one %s block is allowed. Another was defined at %s.", + typeName, blocks[0].DefRange.String(), + ), + Subject: &blocks[1].DefRange, + }) + continue + } + + if len(blocks) == 0 { + if isSlice || isPtr { + val.Field(fieldIdx).Set(reflect.Zero(field.Type)) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", typeName), + Detail: fmt.Sprintf("A %s block is required.", typeName), + Subject: body.MissingItemRange().Ptr(), + }) + } + continue + } + + switch { + + case isSlice: + elemType := ty + if isPtr { + elemType = reflect.PtrTo(ty) + } + sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) + + for i, block := range blocks { + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + sli.Index(i).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) + } + } + + val.Field(fieldIdx).Set(sli) + + default: + block := blocks[0] + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + val.Field(fieldIdx).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) + } + + } + + } + + return diags +} + +func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + attrs, diags := body.JustAttributes() + if attrs == nil { + return diags + } + + mv := reflect.MakeMap(v.Type()) + + for k, attr := range attrs { + switch { + case attrType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) + case exprType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) + default: + ev := reflect.New(v.Type().Elem()) + diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) + mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) + } + } + + v.Set(mv) + + return diags +} + +func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + var diags hcl.Diagnostics + + ty := v.Type() + + switch { + case blockType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block)) + case bodyType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block.Body)) + case attrsType.AssignableTo(ty): + attrs, attrsDiags := block.Body.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + v.Elem().Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) + + if len(block.Labels) > 0 { + blockTags := getFieldTags(ty) + for li, lv := range block.Labels { + lfieldIdx := blockTags.Labels[li].FieldIndex + v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) + } + } + + } + + return diags +} + +// DecodeExpression extracts the value of the given expression into the given +// value. This value must be something that gocty is able to decode into, +// since the final decoding is delegated to that package. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + srcVal, diags := expr.Value(ctx) + + convTy, err := gocty.ImpliedType(val) + if err != nil { + panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) + } + + srcVal, err = convert.Convert(srcVal, convTy) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + return diags + } + + err = gocty.FromCtyValue(srcVal, val) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + } + + return diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/doc.go new file mode 100644 index 00000000000..aa3c6ea9ef3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/doc.go @@ -0,0 +1,53 @@ +// Package gohcl allows decoding HCL configurations into Go data structures. +// +// It provides a convenient and concise way of describing the schema for +// configuration and then accessing the resulting data via native Go +// types. +// +// A struct field tag scheme is used, similar to other decoding and +// unmarshalling libraries. The tags are formatted as in the following example: +// +// ThingType string `hcl:"thing_type,attr"` +// +// Within each tag there are two comma-separated tokens. The first is the +// name of the corresponding construct in configuration, while the second +// is a keyword giving the kind of construct expected. The following +// kind keywords are supported: +// +// attr (the default) indicates that the value is to be populated from an attribute +// block indicates that the value is to populated from a block +// label indicates that the value is to populated from a block label +// remain indicates that the value is to be populated from the remaining body after populating other fields +// +// "attr" fields may either be of type *hcl.Expression, in which case the raw +// expression is assigned, or of any type accepted by gocty, in which case +// gocty will be used to assign the value to a native Go type. +// +// "block" fields may be of type *hcl.Block or hcl.Body, in which case the +// corresponding raw value is assigned, or may be a struct that recursively +// uses the same tags. Block fields may also be slices of any of these types, +// in which case multiple blocks of the corresponding type are decoded into +// the slice. +// +// "label" fields are considered only in a struct used as the type of a field +// marked as "block", and are used sequentially to capture the labels of +// the blocks being decoded. In this case, the name token is used only as +// an identifier for the label in diagnostic messages. +// +// "remain" can be placed on a single field that may be either of type +// hcl.Body or hcl.Attributes, in which case any remaining body content is +// placed into this field for delayed processing. If no "remain" field is +// present then any attributes or blocks not matched by another valid tag +// will cause an error diagnostic. +// +// Only a subset of this tagging/typing vocabulary is supported for the +// "Encode" family of functions. See the EncodeIntoBody docs for full details +// on the constraints there. +// +// Broadly-speaking this package deals with two types of error. The first is +// errors in the configuration itself, which are returned as diagnostics +// written with the configuration author as the target audience. The second +// is bugs in the calling program, such as invalid struct tags, which are +// surfaced via panics since there can be no useful runtime handling of such +// errors and they should certainly not be returned to the user as diagnostics. +package gohcl diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/encode.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/encode.go new file mode 100644 index 00000000000..3cbf7e48af4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/encode.go @@ -0,0 +1,191 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + + "github.com/hashicorp/hcl2/hclwrite" + "github.com/zclconf/go-cty/cty/gocty" +) + +// EncodeIntoBody replaces the contents of the given hclwrite Body with +// attributes and blocks derived from the given value, which must be a +// struct value or a pointer to a struct value with the struct tags defined +// in this package. +// +// This function can work only with fully-decoded data. It will ignore any +// fields tagged as "remain", any fields that decode attributes into either +// hcl.Attribute or hcl.Expression values, and any fields that decode blocks +// into hcl.Attributes values. This function does not have enough information +// to complete the decoding of these types. +// +// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock +// to produce a whole hclwrite.Block including block labels. +// +// As long as a suitable value is given to encode and the destination body +// is non-nil, this function will always complete. It will panic in case of +// any errors in the calling program, such as passing an inappropriate type +// or a nil body. +// +// The layout of the resulting HCL source is derived from the ordering of +// the struct fields, with blank lines around nested blocks of different types. +// Fields representing attributes should usually precede those representing +// blocks so that the attributes can group togather in the result. For more +// control, use the hclwrite API directly. +func EncodeIntoBody(val interface{}, dst *hclwrite.Body) { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + populateBody(rv, ty, tags, dst) +} + +// EncodeAsBlock creates a new hclwrite.Block populated with the data from +// the given value, which must be a struct or pointer to struct with the +// struct tags defined in this package. +// +// If the given struct type has fields tagged with "label" tags then they +// will be used in order to annotate the created block with labels. +// +// This function has the same constraints as EncodeIntoBody and will panic +// if they are violated. +func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + labels := make([]string, len(tags.Labels)) + for i, lf := range tags.Labels { + lv := rv.Field(lf.FieldIndex) + // We just stringify whatever we find. It should always be a string + // but if not then we'll still do something reasonable. + labels[i] = fmt.Sprintf("%s", lv.Interface()) + } + + block := hclwrite.NewBlock(blockType, labels) + populateBody(rv, ty, tags, block.Body()) + return block +} + +func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) { + nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks)) + namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks)) + for n, i := range tags.Attributes { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + for n, i := range tags.Blocks { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + sort.SliceStable(namesOrder, func(i, j int) bool { + ni, nj := namesOrder[i], namesOrder[j] + return nameIdxs[ni] < nameIdxs[nj] + }) + + dst.Clear() + + prevWasBlock := false + for _, name := range namesOrder { + fieldIdx := nameIdxs[name] + field := ty.Field(fieldIdx) + fieldTy := field.Type + fieldVal := rv.Field(fieldIdx) + + if fieldTy.Kind() == reflect.Ptr { + fieldTy = fieldTy.Elem() + fieldVal = fieldVal.Elem() + } + + if _, isAttr := tags.Attributes[name]; isAttr { + + if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) { + continue // ignore undecoded fields + } + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + if prevWasBlock { + dst.AppendNewline() + prevWasBlock = false + } + + valTy, err := gocty.ImpliedType(fieldVal.Interface()) + if err != nil { + panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err)) + } + + val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy) + if err != nil { + // This should never happen, since we should always be able + // to decode into the implied type. + panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err)) + } + + dst.SetAttributeValue(name, val) + + } else { // must be a block, then + elemTy := fieldTy + isSeq := false + if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array { + isSeq = true + elemTy = elemTy.Elem() + } + + if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) { + continue // ignore undecoded fields + } + prevWasBlock = false + + if isSeq { + l := fieldVal.Len() + for i := 0; i < l; i++ { + elemVal := fieldVal.Index(i) + if !elemVal.IsValid() { + continue // ignore (elem value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(elemVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } else { + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(fieldVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/schema.go new file mode 100644 index 00000000000..88164cb05dd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/schema.go @@ -0,0 +1,174 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the +// given value, which must be a struct value or a pointer to one. If an +// inappropriate value is passed, this function will panic. +// +// The second return argument indicates whether the given struct includes +// a "remain" field, and thus the returned schema is non-exhaustive. +// +// This uses the tags on the fields of the struct to discover how each +// field's value should be expressed within configuration. If an invalid +// mapping is attempted, this function will panic. +func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { + ty := reflect.TypeOf(val) + + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("given value must be struct, not %T", val)) + } + + var attrSchemas []hcl.AttributeSchema + var blockSchemas []hcl.BlockHeaderSchema + + tags := getFieldTags(ty) + + attrNames := make([]string, 0, len(tags.Attributes)) + for n := range tags.Attributes { + attrNames = append(attrNames, n) + } + sort.Strings(attrNames) + for _, n := range attrNames { + idx := tags.Attributes[n] + optional := tags.Optional[n] + field := ty.Field(idx) + + var required bool + + switch { + case field.Type.AssignableTo(exprType): + // If we're decoding to hcl.Expression then absense can be + // indicated via a null value, so we don't specify that + // the field is required during decoding. + required = false + case field.Type.Kind() != reflect.Ptr && !optional: + required = true + default: + required = false + } + + attrSchemas = append(attrSchemas, hcl.AttributeSchema{ + Name: n, + Required: required, + }) + } + + blockNames := make([]string, 0, len(tags.Blocks)) + for n := range tags.Blocks { + blockNames = append(blockNames, n) + } + sort.Strings(blockNames) + for _, n := range blockNames { + idx := tags.Blocks[n] + field := ty.Field(idx) + fty := field.Type + if fty.Kind() == reflect.Slice { + fty = fty.Elem() + } + if fty.Kind() == reflect.Ptr { + fty = fty.Elem() + } + if fty.Kind() != reflect.Struct { + panic(fmt.Sprintf( + "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, + )) + } + ftags := getFieldTags(fty) + var labelNames []string + if len(ftags.Labels) > 0 { + labelNames = make([]string, len(ftags.Labels)) + for i, l := range ftags.Labels { + labelNames[i] = l.Name + } + } + + blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ + Type: n, + LabelNames: labelNames, + }) + } + + partial = tags.Remain != nil + schema = &hcl.BodySchema{ + Attributes: attrSchemas, + Blocks: blockSchemas, + } + return schema, partial +} + +type fieldTags struct { + Attributes map[string]int + Blocks map[string]int + Labels []labelField + Remain *int + Optional map[string]bool +} + +type labelField struct { + FieldIndex int + Name string +} + +func getFieldTags(ty reflect.Type) *fieldTags { + ret := &fieldTags{ + Attributes: map[string]int{}, + Blocks: map[string]int{}, + Optional: map[string]bool{}, + } + + ct := ty.NumField() + for i := 0; i < ct; i++ { + field := ty.Field(i) + tag := field.Tag.Get("hcl") + if tag == "" { + continue + } + + comma := strings.Index(tag, ",") + var name, kind string + if comma != -1 { + name = tag[:comma] + kind = tag[comma+1:] + } else { + name = tag + kind = "attr" + } + + switch kind { + case "attr": + ret.Attributes[name] = i + case "block": + ret.Blocks[name] = i + case "label": + ret.Labels = append(ret.Labels, labelField{ + FieldIndex: i, + Name: name, + }) + case "remain": + if ret.Remain != nil { + panic("only one 'remain' tag is permitted") + } + idx := i // copy, because this loop will continue assigning to i + ret.Remain = &idx + case "optional": + ret.Attributes[name] = i + ret.Optional[name] = true + default: + panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) + } + } + + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/types.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/types.go new file mode 100644 index 00000000000..a94f275adc5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/gohcl/types.go @@ -0,0 +1,16 @@ +package gohcl + +import ( + "reflect" + + "github.com/hashicorp/hcl2/hcl" +) + +var victimExpr hcl.Expression +var victimBody hcl.Body + +var exprType = reflect.TypeOf(&victimExpr).Elem() +var bodyType = reflect.TypeOf(&victimBody).Elem() +var blockType = reflect.TypeOf((*hcl.Block)(nil)) +var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) +var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go new file mode 100644 index 00000000000..c320961e11e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go @@ -0,0 +1,143 @@ +package hcl + +import ( + "fmt" +) + +// DiagnosticSeverity represents the severity of a diagnostic. +type DiagnosticSeverity int + +const ( + // DiagInvalid is the invalid zero value of DiagnosticSeverity + DiagInvalid DiagnosticSeverity = iota + + // DiagError indicates that the problem reported by a diagnostic prevents + // further progress in parsing and/or evaluating the subject. + DiagError + + // DiagWarning indicates that the problem reported by a diagnostic warrants + // user attention but does not prevent further progress. It is most + // commonly used for showing deprecation notices. + DiagWarning +) + +// Diagnostic represents information to be presented to a user about an +// error or anomoly in parsing or evaluating configuration. +type Diagnostic struct { + Severity DiagnosticSeverity + + // Summary and Detail contain the English-language description of the + // problem. Summary is a terse description of the general problem and + // detail is a more elaborate, often-multi-sentence description of + // the probem and what might be done to solve it. + Summary string + Detail string + + // Subject and Context are both source ranges relating to the diagnostic. + // + // Subject is a tight range referring to exactly the construct that + // is problematic, while Context is an optional broader range (which should + // fully contain Subject) that ought to be shown around Subject when + // generating isolated source-code snippets in diagnostic messages. + // If Context is nil, the Subject is also the Context. + // + // Some diagnostics have no source ranges at all. If Context is set then + // Subject should always also be set. + Subject *Range + Context *Range + + // For diagnostics that occur when evaluating an expression, Expression + // may refer to that expression and EvalContext may point to the + // EvalContext that was active when evaluating it. This may allow for the + // inclusion of additional useful information when rendering a diagnostic + // message to the user. + // + // It is not always possible to select a single EvalContext for a + // diagnostic, and so in some cases this field may be nil even when an + // expression causes a problem. + // + // EvalContexts form a tree, so the given EvalContext may refer to a parent + // which in turn refers to another parent, etc. For a full picture of all + // of the active variables and functions the caller must walk up this + // chain, preferring definitions that are "closer" to the expression in + // case of colliding names. + Expression Expression + EvalContext *EvalContext +} + +// Diagnostics is a list of Diagnostic instances. +type Diagnostics []*Diagnostic + +// error implementation, so that diagnostics can be returned via APIs +// that normally deal in vanilla Go errors. +// +// This presents only minimal context about the error, for compatibility +// with usual expectations about how errors will present as strings. +func (d *Diagnostic) Error() string { + return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail) +} + +// error implementation, so that sets of diagnostics can be returned via +// APIs that normally deal in vanilla Go errors. +func (d Diagnostics) Error() string { + count := len(d) + switch { + case count == 0: + return "no diagnostics" + case count == 1: + return d[0].Error() + default: + return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1) + } +} + +// Append appends a new error to a Diagnostics and return the whole Diagnostics. +// +// This is provided as a convenience for returning from a function that +// collects and then returns a set of diagnostics: +// +// return nil, diags.Append(&hcl.Diagnostic{ ... }) +// +// Note that this modifies the array underlying the diagnostics slice, so +// must be used carefully within a single codepath. It is incorrect (and rude) +// to extend a diagnostics created by a different subsystem. +func (d Diagnostics) Append(diag *Diagnostic) Diagnostics { + return append(d, diag) +} + +// Extend concatenates the given Diagnostics with the receiver and returns +// the whole new Diagnostics. +// +// This is similar to Append but accepts multiple diagnostics to add. It has +// all the same caveats and constraints. +func (d Diagnostics) Extend(diags Diagnostics) Diagnostics { + return append(d, diags...) +} + +// HasErrors returns true if the receiver contains any diagnostics of +// severity DiagError. +func (d Diagnostics) HasErrors() bool { + for _, diag := range d { + if diag.Severity == DiagError { + return true + } + } + return false +} + +func (d Diagnostics) Errs() []error { + var errs []error + for _, diag := range d { + if diag.Severity == DiagError { + errs = append(errs, diag) + } + } + + return errs +} + +// A DiagnosticWriter emits diagnostics somehow. +type DiagnosticWriter interface { + WriteDiagnostic(*Diagnostic) error + WriteDiagnostics(Diagnostics) error +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go new file mode 100644 index 00000000000..0b4a2629b98 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go @@ -0,0 +1,311 @@ +package hcl + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "sort" + + wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/zclconf/go-cty/cty" +) + +type diagnosticTextWriter struct { + files map[string]*File + wr io.Writer + width uint + color bool +} + +// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics +// to the given writer as formatted text. +// +// It is designed to produce text appropriate to print in a monospaced font +// in a terminal of a particular width, or optionally with no width limit. +// +// The given width may be zero to disable word-wrapping of the detail text +// and truncation of source code snippets. +// +// If color is set to true, the output will include VT100 escape sequences to +// color-code the severity indicators. It is suggested to turn this off if +// the target writer is not a terminal. +func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter { + return &diagnosticTextWriter{ + files: files, + wr: wr, + width: width, + color: color, + } +} + +func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { + if diag == nil { + return errors.New("nil diagnostic") + } + + var colorCode, highlightCode, resetCode string + if w.color { + switch diag.Severity { + case DiagError: + colorCode = "\x1b[31m" + case DiagWarning: + colorCode = "\x1b[33m" + } + resetCode = "\x1b[0m" + highlightCode = "\x1b[1;4m" + } + + var severityStr string + switch diag.Severity { + case DiagError: + severityStr = "Error" + case DiagWarning: + severityStr = "Warning" + default: + // should never happen + severityStr = "???????" + } + + fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary) + + if diag.Subject != nil { + snipRange := *diag.Subject + highlightRange := snipRange + if diag.Context != nil { + // Show enough of the source code to include both the subject + // and context ranges, which overlap in all reasonable + // situations. + snipRange = RangeOver(snipRange, *diag.Context) + } + // We can't illustrate an empty range, so we'll turn such ranges into + // single-character ranges, which might not be totally valid (may point + // off the end of a line, or off the end of the file) but are good + // enough for the bounds checks we do below. + if snipRange.Empty() { + snipRange.End.Byte++ + snipRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + file := w.files[diag.Subject.Filename] + if file == nil || file.Bytes == nil { + fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line) + } else { + + var contextLine string + if diag.Subject != nil { + contextLine = contextString(file, diag.Subject.Start.Byte) + if contextLine != "" { + contextLine = ", in " + contextLine + } + } + + fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine) + + src := file.Bytes + sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines) + + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snipRange) { + continue + } + + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + if highlightedRange.Empty() { + fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes()) + } else { + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + w.wr, "%4d: %s%s%s%s%s\n", + lineRange.Start.Line, + before, + highlightCode, highlighted, resetCode, + after, + ) + } + + } + + w.wr.Write([]byte{'\n'}) + } + + if diag.Expression != nil && diag.EvalContext != nil { + // We will attempt to render the values for any variables + // referenced in the given expression as additional context, for + // situations where the same expression is evaluated multiple + // times in different scopes. + expr := diag.Expression + ctx := diag.EvalContext + + vars := expr.Variables() + stmts := make([]string, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + for _, traversal := range vars { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + continue + } + + traversalStr := w.traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue // don't show duplicates when the same variable is referenced multiple times + } + switch { + case !val.IsKnown(): + // Can't say anything about this yet, then. + continue + case val.IsNull(): + stmts = append(stmts, fmt.Sprintf("%s set to null", traversalStr)) + default: + stmts = append(stmts, fmt.Sprintf("%s as %s", traversalStr, w.valueStr(val))) + } + seen[traversalStr] = struct{}{} + } + + sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? + last := len(stmts) - 1 + + for i, stmt := range stmts { + switch i { + case 0: + w.wr.Write([]byte{'w', 'i', 't', 'h', ' '}) + default: + w.wr.Write([]byte{' ', ' ', ' ', ' ', ' '}) + } + w.wr.Write([]byte(stmt)) + switch i { + case last: + w.wr.Write([]byte{'.', '\n', '\n'}) + default: + w.wr.Write([]byte{',', '\n'}) + } + } + } + } + + if diag.Detail != "" { + detail := diag.Detail + if w.width != 0 { + detail = wordwrap.WrapString(detail, w.width) + } + fmt.Fprintf(w.wr, "%s\n\n", detail) + } + + return nil +} + +func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error { + for _, diag := range diags { + err := w.WriteDiagnostic(diag) + if err != nil { + return err + } + } + return nil +} + +func (w *diagnosticTextWriter) traversalStr(traversal Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case TraverseRoot: + buf.WriteString(tStep.Name) + case TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(w.valueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +func (w *diagnosticTextWriter) valueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} + +func contextString(file *File, offset int) string { + type contextStringer interface { + ContextString(offset int) string + } + + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go new file mode 100644 index 00000000000..c12833440ad --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go @@ -0,0 +1,24 @@ +package hcl + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/doc.go new file mode 100644 index 00000000000..01318c96f87 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/doc.go @@ -0,0 +1 @@ +package hcl diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go new file mode 100644 index 00000000000..915910ad8a5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go @@ -0,0 +1,25 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// An EvalContext provides the variables and functions that should be used +// to evaluate an expression. +type EvalContext struct { + Variables map[string]cty.Value + Functions map[string]function.Function + parent *EvalContext +} + +// NewChild returns a new EvalContext that is a child of the receiver. +func (ctx *EvalContext) NewChild() *EvalContext { + return &EvalContext{parent: ctx} +} + +// Parent returns the parent of the receiver, or nil if the receiver has +// no parent. +func (ctx *EvalContext) Parent() *EvalContext { + return ctx.parent +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go new file mode 100644 index 00000000000..6963fbae361 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go @@ -0,0 +1,46 @@ +package hcl + +// ExprCall tests if the given expression is a function call and, +// if so, extracts the function name and the expressions that represent +// the arguments. If the given expression is not statically a function call, +// error diagnostics are returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprCall that takes no arguments and returns +// *StaticCall. This method should return nil if a static call cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprCall(expr Expression) (*StaticCall, Diagnostics) { + type exprCall interface { + ExprCall() *StaticCall + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprCall) + return supported + }) + + if exC, supported := physExpr.(exprCall); supported { + if call := exC.ExprCall(); call != nil { + return call, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static function call is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// StaticCall represents a function call that was extracted statically from +// an expression using ExprCall. +type StaticCall struct { + Name string + NameRange Range + Arguments []Expression + ArgsRange Range +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go new file mode 100644 index 00000000000..d05cca0b9af --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go @@ -0,0 +1,37 @@ +package hcl + +// ExprList tests if the given expression is a static list construct and, +// if so, extracts the expressions that represent the list elements. +// If the given expression is not a static list, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprList that takes no arguments and returns +// []Expression. This method should return nil if a static list cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprList(expr Expression) ([]Expression, Diagnostics) { + type exprList interface { + ExprList() []Expression + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprList) + return supported + }) + + if exL, supported := physExpr.(exprList); supported { + if list := exL.ExprList(); list != nil { + return list, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static list expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go new file mode 100644 index 00000000000..96d1ce4bfaf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go @@ -0,0 +1,44 @@ +package hcl + +// ExprMap tests if the given expression is a static map construct and, +// if so, extracts the expressions that represent the map elements. +// If the given expression is not a static map, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprMap that takes no arguments and returns +// []KeyValuePair. This method should return nil if a static map cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprMap(expr Expression) ([]KeyValuePair, Diagnostics) { + type exprMap interface { + ExprMap() []KeyValuePair + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprMap) + return supported + }) + + if exM, supported := physExpr.(exprMap); supported { + if pairs := exM.ExprMap(); pairs != nil { + return pairs, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static map expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// KeyValuePair represents a pair of expressions that serve as a single item +// within a map or object definition construct. +type KeyValuePair struct { + Key Expression + Value Expression +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go new file mode 100644 index 00000000000..6d5d205c495 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go @@ -0,0 +1,68 @@ +package hcl + +type unwrapExpression interface { + UnwrapExpression() Expression +} + +// UnwrapExpression removes any "wrapper" expressions from the given expression, +// to recover the representation of the physical expression given in source +// code. +// +// Sometimes wrapping expressions are used to modify expression behavior, e.g. +// in extensions that need to make some local variables available to certain +// sub-trees of the configuration. This can make it difficult to reliably +// type-assert on the physical AST types used by the underlying syntax. +// +// Unwrapping an expression may modify its behavior by stripping away any +// additional constraints or capabilities being applied to the Value and +// Variables methods, so this function should generally only be used prior +// to operations that concern themselves with the static syntax of the input +// configuration, and not with the effective value of the expression. +// +// Wrapper expression types must support unwrapping by implementing a method +// called UnwrapExpression that takes no arguments and returns the embedded +// Expression. Implementations of this method should peel away only one level +// of wrapping, if multiple are present. This method may return nil to +// indicate _dynamically_ that no wrapped expression is available, for +// expression types that might only behave as wrappers in certain cases. +func UnwrapExpression(expr Expression) Expression { + for { + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return expr + } + innerExpr := unwrap.UnwrapExpression() + if innerExpr == nil { + return expr + } + expr = innerExpr + } +} + +// UnwrapExpressionUntil is similar to UnwrapExpression except it gives the +// caller an opportunity to test each level of unwrapping to see each a +// particular expression is accepted. +// +// This could be used, for example, to unwrap until a particular other +// interface is satisfied, regardless of wrap wrapping level it is satisfied +// at. +// +// The given callback function must return false to continue wrapping, or +// true to accept and return the proposed expression given. If the callback +// function rejects even the final, physical expression then the result of +// this function is nil. +func UnwrapExpressionUntil(expr Expression, until func(Expression) bool) Expression { + for { + if until(expr) { + return expr + } + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return nil + } + expr = unwrap.UnwrapExpression() + if expr == nil { + return nil + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go new file mode 100644 index 00000000000..94eaf589290 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go @@ -0,0 +1,23 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// setDiagEvalContext is an internal helper that will impose a particular +// EvalContext on a set of diagnostics in-place, for any diagnostic that +// does not already have an EvalContext set. +// +// We generally expect diagnostics to be immutable, but this is safe to use +// on any Diagnostics where none of the contained Diagnostic objects have yet +// been seen by a caller. Its purpose is to apply additional context to a +// set of diagnostics produced by a "deeper" component as the stack unwinds +// during expression evaluation. +func setDiagEvalContext(diags hcl.Diagnostics, expr hcl.Expression, ctx *hcl.EvalContext) { + for _, diag := range diags { + if diag.Expression == nil { + diag.Expression = expr + diag.EvalContext = ctx + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go new file mode 100644 index 00000000000..ccc1c0ae2c5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go @@ -0,0 +1,24 @@ +package hclsyntax + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go new file mode 100644 index 00000000000..617bc29dc20 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go @@ -0,0 +1,7 @@ +// Package hclsyntax contains the parser, AST, etc for HCL's native language, +// as opposed to the JSON variant. +// +// In normal use applications should rarely depend on this package directly, +// instead preferring the higher-level interface of the main hcl package and +// its companion package hclparse. +package hclsyntax diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go new file mode 100644 index 00000000000..d3f7a74d399 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go @@ -0,0 +1,1468 @@ +package hclsyntax + +import ( + "fmt" + "sync" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// Expression is the abstract type for nodes that behave as HCL expressions. +type Expression interface { + Node + + // The hcl.Expression methods are duplicated here, rather than simply + // embedded, because both Node and hcl.Expression have a Range method + // and so they conflict. + + Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + Variables() []hcl.Traversal + StartRange() hcl.Range +} + +// Assert that Expression implements hcl.Expression +var assertExprImplExpr hcl.Expression = Expression(nil) + +// LiteralValueExpr is an expression that just always returns a given value. +type LiteralValueExpr struct { + Val cty.Value + SrcRange hcl.Range +} + +func (e *LiteralValueExpr) walkChildNodes(w internalWalkFunc) { + // Literal values have no child nodes +} + +func (e *LiteralValueExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Val, nil +} + +func (e *LiteralValueExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *LiteralValueExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *LiteralValueExpr) AsTraversal() hcl.Traversal { + // This one's a little weird: the contract for AsTraversal is to interpret + // an expression as if it were traversal syntax, and traversal syntax + // doesn't have the special keywords "null", "true", and "false" so these + // are expected to be treated like variables in that case. + // Since our parser already turned them into LiteralValueExpr by the time + // we get here, we need to undo this and infer the name that would've + // originally led to our value. + // We don't do anything for any other values, since they don't overlap + // with traversal roots. + + if e.Val.IsNull() { + // In practice the parser only generates null values of the dynamic + // pseudo-type for literals, so we can safely assume that any null + // was orignally the keyword "null". + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "null", + SrcRange: e.SrcRange, + }, + } + } + + switch e.Val { + case cty.True: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "true", + SrcRange: e.SrcRange, + }, + } + case cty.False: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "false", + SrcRange: e.SrcRange, + }, + } + default: + // No traversal is possible for any other value. + return nil + } +} + +// ScopeTraversalExpr is an Expression that retrieves a value from the scope +// using a traversal. +type ScopeTraversalExpr struct { + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) { + // Scope traversals have no child nodes +} + +func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, diags := e.Traversal.TraverseAbs(ctx) + setDiagEvalContext(diags, e, ctx) + return val, diags +} + +func (e *ScopeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ScopeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ScopeTraversalExpr) AsTraversal() hcl.Traversal { + return e.Traversal +} + +// RelativeTraversalExpr is an Expression that retrieves a value from another +// value using a _relative_ traversal. +type RelativeTraversalExpr struct { + Source Expression + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) { + w(e.Source) +} + +func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + src, diags := e.Source.Value(ctx) + ret, travDiags := e.Traversal.TraverseRel(src) + setDiagEvalContext(travDiags, e, ctx) + diags = append(diags, travDiags...) + return ret, diags +} + +func (e *RelativeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *RelativeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *RelativeTraversalExpr) AsTraversal() hcl.Traversal { + // We can produce a traversal only if our source can. + st, diags := hcl.AbsTraversalForExpr(e.Source) + if diags.HasErrors() { + return nil + } + + ret := make(hcl.Traversal, len(st)+len(e.Traversal)) + copy(ret, st) + copy(ret[len(st):], e.Traversal) + return ret +} + +// FunctionCallExpr is an Expression that calls a function from the EvalContext +// and returns its result. +type FunctionCallExpr struct { + Name string + Args []Expression + + // If true, the final argument should be a tuple, list or set which will + // expand to be one argument per element. + ExpandFinal bool + + NameRange hcl.Range + OpenParenRange hcl.Range + CloseParenRange hcl.Range +} + +func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) { + for _, arg := range e.Args { + w(arg) + } +} + +func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var f function.Function + exists := false + hasNonNilMap := false + thisCtx := ctx + for thisCtx != nil { + if thisCtx.Functions == nil { + thisCtx = thisCtx.Parent() + continue + } + hasNonNilMap = true + f, exists = thisCtx.Functions[e.Name] + if exists { + break + } + thisCtx = thisCtx.Parent() + } + + if !exists { + if !hasNonNilMap { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Function calls not allowed", + Detail: "Functions may not be called here.", + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + avail := make([]string, 0, len(ctx.Functions)) + for name := range ctx.Functions { + avail = append(avail, name) + } + suggestion := nameSuggestion(e.Name, avail) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Call to unknown function", + Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion), + Subject: &e.NameRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + params := f.Params() + varParam := f.VarParam() + + args := e.Args + if e.ExpandFinal { + if len(args) < 1 { + // should never happen if the parser is behaving + panic("ExpandFinal set on function call with no arguments") + } + expandExpr := args[len(args)-1] + expandVal, expandDiags := expandExpr.Value(ctx) + diags = append(diags, expandDiags...) + if expandDiags.HasErrors() { + return cty.DynamicVal, diags + } + + switch { + case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): + if expandVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if !expandVal.IsKnown() { + return cty.DynamicVal, diags + } + + newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt()) + newArgs = append(newArgs, args[:len(args)-1]...) + it := expandVal.ElementIterator() + for it.Next() { + _, val := it.Element() + newArgs = append(newArgs, &LiteralValueExpr{ + Val: val, + SrcRange: expandExpr.Range(), + }) + } + args = newArgs + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + } + + if len(args) < len(params) { + missing := params[len(args)] + qual := "" + if varParam != nil { + qual = " at least" + } + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Not enough function arguments", + Detail: fmt.Sprintf( + "Function %q expects%s %d argument(s). Missing value for %q.", + e.Name, qual, len(params), missing.Name, + ), + Subject: &e.CloseParenRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + if varParam == nil && len(args) > len(params) { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Too many function arguments", + Detail: fmt.Sprintf( + "Function %q expects only %d argument(s).", + e.Name, len(params), + ), + Subject: args[len(params)].StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + argVals := make([]cty.Value, len(args)) + + for i, argExpr := range args { + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + + val, argDiags := argExpr.Value(ctx) + if len(argDiags) > 0 { + diags = append(diags, argDiags...) + } + + // Try to convert our value to the parameter type + val, err := convert.Convert(val, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + }) + } + + argVals[i] = val + } + + if diags.HasErrors() { + // Don't try to execute the function if we already have errors with + // the arguments, because the result will probably be a confusing + // error message. + return cty.DynamicVal, diags + } + + resultVal, err := f.Call(argVals) + if err != nil { + switch terr := err.(type) { + case function.ArgError: + i := terr.Index + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + argExpr := e.Args[i] + + // TODO: we should also unpick a PathError here and show the + // path to the deep value where the error was detected. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + }) + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error in function call", + Detail: fmt.Sprintf( + "Call to function %q failed: %s.", + e.Name, err, + ), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + } + + return cty.DynamicVal, diags + } + + return resultVal, diags +} + +func (e *FunctionCallExpr) Range() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.CloseParenRange) +} + +func (e *FunctionCallExpr) StartRange() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.OpenParenRange) +} + +// Implementation for hcl.ExprCall. +func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall { + ret := &hcl.StaticCall{ + Name: e.Name, + NameRange: e.NameRange, + Arguments: make([]hcl.Expression, len(e.Args)), + ArgsRange: hcl.RangeBetween(e.OpenParenRange, e.CloseParenRange), + } + // Need to convert our own Expression objects into hcl.Expression. + for i, arg := range e.Args { + ret.Arguments[i] = arg + } + return ret +} + +type ConditionalExpr struct { + Condition Expression + TrueResult Expression + FalseResult Expression + + SrcRange hcl.Range +} + +func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) { + w(e.Condition) + w(e.TrueResult) + w(e.FalseResult) +} + +func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + trueResult, trueDiags := e.TrueResult.Value(ctx) + falseResult, falseDiags := e.FalseResult.Value(ctx) + var diags hcl.Diagnostics + + resultType := cty.DynamicPseudoType + convs := make([]convert.Conversion, 2) + + switch { + // If either case is a dynamic null value (which would result from a + // literal null in the config), we know that it can convert to the expected + // type of the opposite case, and we don't need to speculatively reduce the + // final result type to DynamicPseudoType. + + // If we know that either Type is a DynamicPseudoType, we can be certain + // that the other value can convert since it's a pass-through, and we don't + // need to unify the types. If the final evaluation results in the dynamic + // value being returned, there's no conversion we can do, so we return the + // value directly. + case trueResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)): + resultType = falseResult.Type() + convs[0] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType) + case falseResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)): + resultType = trueResult.Type() + convs[1] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType) + case trueResult.Type() == cty.DynamicPseudoType, falseResult.Type() == cty.DynamicPseudoType: + // the final resultType type is still unknown + // we don't need to get the conversion, because both are a noop. + + default: + // Try to find a type that both results can be converted to. + resultType, convs = convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()}) + } + + if resultType == cty.NilType { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + // FIXME: Need a helper function for showing natural-language type diffs, + // since this will generate some useless messages in some cases, like + // "These expressions are object and object respectively" if the + // object types don't exactly match. + "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", + trueResult.Type().FriendlyName(), falseResult.Type().FriendlyName(), + ), + Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), + Context: &e.SrcRange, + Expression: e, + EvalContext: ctx, + }, + } + } + + condResult, condDiags := e.Condition.Value(ctx) + diags = append(diags, condDiags...) + if condResult.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null condition", + Detail: "The condition value is null. Conditions must either be true or false.", + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Condition, + EvalContext: ctx, + }) + return cty.UnknownVal(resultType), diags + } + if !condResult.IsKnown() { + return cty.UnknownVal(resultType), diags + } + condResult, err := convert.Convert(condResult, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect condition type", + Detail: fmt.Sprintf("The condition expression must be of type bool."), + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Condition, + EvalContext: ctx, + }) + return cty.UnknownVal(resultType), diags + } + + if condResult.True() { + diags = append(diags, trueDiags...) + if convs[0] != nil { + var err error + trueResult, err = convs[0](trueResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The true result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.TrueResult, + EvalContext: ctx, + }) + trueResult = cty.UnknownVal(resultType) + } + } + return trueResult, diags + } else { + diags = append(diags, falseDiags...) + if convs[1] != nil { + var err error + falseResult, err = convs[1](falseResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The false result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.FalseResult.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.FalseResult, + EvalContext: ctx, + }) + falseResult = cty.UnknownVal(resultType) + } + } + return falseResult, diags + } +} + +func (e *ConditionalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ConditionalExpr) StartRange() hcl.Range { + return e.Condition.StartRange() +} + +type IndexExpr struct { + Collection Expression + Key Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *IndexExpr) walkChildNodes(w internalWalkFunc) { + w(e.Collection) + w(e.Key) +} + +func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + coll, collDiags := e.Collection.Value(ctx) + key, keyDiags := e.Key.Value(ctx) + diags = append(diags, collDiags...) + diags = append(diags, keyDiags...) + + val, indexDiags := hcl.Index(coll, key, &e.SrcRange) + setDiagEvalContext(indexDiags, e, ctx) + diags = append(diags, indexDiags...) + return val, diags +} + +func (e *IndexExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *IndexExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type TupleConsExpr struct { + Exprs []Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) { + for _, expr := range e.Exprs { + w(expr) + } +} + +func (e *TupleConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals []cty.Value + var diags hcl.Diagnostics + + vals = make([]cty.Value, len(e.Exprs)) + for i, expr := range e.Exprs { + val, valDiags := expr.Value(ctx) + vals[i] = val + diags = append(diags, valDiags...) + } + + return cty.TupleVal(vals), diags +} + +func (e *TupleConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TupleConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprList +func (e *TupleConsExpr) ExprList() []hcl.Expression { + ret := make([]hcl.Expression, len(e.Exprs)) + for i, expr := range e.Exprs { + ret[i] = expr + } + return ret +} + +type ObjectConsExpr struct { + Items []ObjectConsItem + + SrcRange hcl.Range + OpenRange hcl.Range +} + +type ObjectConsItem struct { + KeyExpr Expression + ValueExpr Expression +} + +func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { + for _, item := range e.Items { + w(item.KeyExpr) + w(item.ValueExpr) + } +} + +func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals map[string]cty.Value + var diags hcl.Diagnostics + + // This will get set to true if we fail to produce any of our keys, + // either because they are actually unknown or if the evaluation produces + // errors. In all of these case we must return DynamicPseudoType because + // we're unable to know the full set of keys our object has, and thus + // we can't produce a complete value of the intended type. + // + // We still evaluate all of the item keys and values to make sure that we + // get as complete as possible a set of diagnostics. + known := true + + vals = make(map[string]cty.Value, len(e.Items)) + for _, item := range e.Items { + key, keyDiags := item.KeyExpr.Value(ctx) + diags = append(diags, keyDiags...) + + val, valDiags := item.ValueExpr.Value(ctx) + diags = append(diags, valDiags...) + + if keyDiags.HasErrors() { + known = false + continue + } + + if key.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null value as key", + Detail: "Can't use a null value as a key.", + Subject: item.ValueExpr.Range().Ptr(), + Expression: item.KeyExpr, + EvalContext: ctx, + }) + known = false + continue + } + + var err error + key, err = convert.Convert(key, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect key type", + Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()), + Subject: item.KeyExpr.Range().Ptr(), + Expression: item.KeyExpr, + EvalContext: ctx, + }) + known = false + continue + } + + if !key.IsKnown() { + known = false + continue + } + + keyStr := key.AsString() + + vals[keyStr] = val + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.ObjectVal(vals), diags +} + +func (e *ObjectConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ObjectConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprMap +func (e *ObjectConsExpr) ExprMap() []hcl.KeyValuePair { + ret := make([]hcl.KeyValuePair, len(e.Items)) + for i, item := range e.Items { + ret[i] = hcl.KeyValuePair{ + Key: item.KeyExpr, + Value: item.ValueExpr, + } + } + return ret +} + +// ObjectConsKeyExpr is a special wrapper used only for ObjectConsExpr keys, +// which deals with the special case that a naked identifier in that position +// must be interpreted as a literal string rather than evaluated directly. +type ObjectConsKeyExpr struct { + Wrapped Expression +} + +func (e *ObjectConsKeyExpr) literalName() string { + // This is our logic for deciding whether to behave like a literal string. + // We lean on our AbsTraversalForExpr implementation here, which already + // deals with some awkward cases like the expression being the result + // of the keywords "null", "true" and "false" which we'd want to interpret + // as keys here too. + return hcl.ExprAsKeyword(e.Wrapped) +} + +func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) { + // We only treat our wrapped expression as a real expression if we're + // not going to interpret it as a literal. + if e.literalName() == "" { + w(e.Wrapped) + } +} + +func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // Because we accept a naked identifier as a literal key rather than a + // reference, it's confusing to accept a traversal containing periods + // here since we can't tell if the user intends to create a key with + // periods or actually reference something. To avoid confusing downstream + // errors we'll just prohibit a naked multi-step traversal here and + // require the user to state their intent more clearly. + // (This is handled at evaluation time rather than parse time because + // an application using static analysis _can_ accept a naked multi-step + // traversal here, if desired.) + if travExpr, isTraversal := e.Wrapped.(*ScopeTraversalExpr); isTraversal && len(travExpr.Traversal) > 1 { + var diags hcl.Diagnostics + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Ambiguous attribute key", + Detail: "If this expression is intended to be a reference, wrap it in parentheses. If it's instead intended as a literal name containing periods, wrap it in quotes to create a string literal.", + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + + if ln := e.literalName(); ln != "" { + return cty.StringVal(ln), nil + } + return e.Wrapped.Value(ctx) +} + +func (e *ObjectConsKeyExpr) Range() hcl.Range { + return e.Wrapped.Range() +} + +func (e *ObjectConsKeyExpr) StartRange() hcl.Range { + return e.Wrapped.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ObjectConsKeyExpr) AsTraversal() hcl.Traversal { + // We can produce a traversal only if our wrappee can. + st, diags := hcl.AbsTraversalForExpr(e.Wrapped) + if diags.HasErrors() { + return nil + } + + return st +} + +func (e *ObjectConsKeyExpr) UnwrapExpression() Expression { + return e.Wrapped +} + +// ForExpr represents iteration constructs: +// +// tuple = [for i, v in list: upper(v) if i > 2] +// object = {for k, v in map: k => upper(v)} +// object_of_tuples = {for v in list: v.key: v...} +type ForExpr struct { + KeyVar string // empty if ignoring the key + ValVar string + + CollExpr Expression + + KeyExpr Expression // nil when producing a tuple + ValExpr Expression + CondExpr Expression // null if no "if" clause is present + + Group bool // set if the ellipsis is used on the value in an object for + + SrcRange hcl.Range + OpenRange hcl.Range + CloseRange hcl.Range +} + +func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + collVal, collDiags := e.CollExpr.Value(ctx) + diags = append(diags, collDiags...) + + if collVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over null value", + Detail: "A null value cannot be used as the collection in a 'for' expression.", + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CollExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if collVal.Type() == cty.DynamicPseudoType { + return cty.DynamicVal, diags + } + if !collVal.CanIterateElements() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over non-iterable value", + Detail: fmt.Sprintf( + "A value of type %s cannot be used as the collection in a 'for' expression.", + collVal.Type().FriendlyName(), + ), + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CollExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if !collVal.IsKnown() { + return cty.DynamicVal, diags + } + + // Before we start we'll do an early check to see if any CondExpr we've + // been given is of the wrong type. This isn't 100% reliable (it may + // be DynamicVal until real values are given) but it should catch some + // straightforward cases and prevent a barrage of repeated errors. + if e.CondExpr != nil { + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = cty.DynamicVal + } + childCtx.Variables[e.ValVar] = cty.DynamicVal + + result, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if result.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + _, err := convert.Convert(result, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if condDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + + if e.KeyExpr != nil { + // Producing an object + var vals map[string]cty.Value + var groupVals map[string][]cty.Value + if e.Group { + groupVals = map[string][]cty.Value{} + } else { + vals = map[string]cty.Value{} + } + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + if !include.IsKnown() { + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + keyRaw, keyDiags := e.KeyExpr.Value(childCtx) + diags = append(diags, keyDiags...) + if keyRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: "Key expression in 'for' expression must not produce a null value.", + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + if !keyRaw.IsKnown() { + known = false + continue + } + + key, err := convert.Convert(keyRaw, cty.String) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + + if e.Group { + k := key.AsString() + groupVals[k] = append(groupVals[k], val) + } else { + k := key.AsString() + if _, exists := vals[k]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object key", + Detail: fmt.Sprintf( + "Two different items produced the key %q in this 'for' expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.", + k, + ), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, + }) + } else { + vals[key.AsString()] = val + } + } + } + + if !known { + return cty.DynamicVal, diags + } + + if e.Group { + vals = map[string]cty.Value{} + for k, gvs := range groupVals { + vals[k] = cty.TupleVal(gvs) + } + } + + return cty.ObjectVal(vals), diags + + } else { + // Producing a tuple + vals := []cty.Value{} + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + if !includeRaw.IsKnown() { + // We will eventually return DynamicVal, but we'll continue + // iterating in case there are other diagnostics to gather + // for later elements. + known = false + continue + } + + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + vals = append(vals, val) + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.TupleVal(vals), diags + } +} + +func (e *ForExpr) walkChildNodes(w internalWalkFunc) { + w(e.CollExpr) + + scopeNames := map[string]struct{}{} + if e.KeyVar != "" { + scopeNames[e.KeyVar] = struct{}{} + } + if e.ValVar != "" { + scopeNames[e.ValVar] = struct{}{} + } + + if e.KeyExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: e.KeyExpr, + }) + } + w(ChildScope{ + LocalNames: scopeNames, + Expr: e.ValExpr, + }) + if e.CondExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: e.CondExpr, + }) + } +} + +func (e *ForExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ForExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type SplatExpr struct { + Source Expression + Each Expression + Item *AnonSymbolExpr + + SrcRange hcl.Range + MarkerRange hcl.Range +} + +func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + sourceVal, diags := e.Source.Value(ctx) + if diags.HasErrors() { + // We'll evaluate our "Each" expression here just to see if it + // produces any more diagnostics we can report. Since we're not + // assigning a value to our AnonSymbolExpr here it will return + // DynamicVal, which should short-circuit any use of it. + _, itemDiags := e.Item.Value(ctx) + diags = append(diags, itemDiags...) + return cty.DynamicVal, diags + } + + sourceTy := sourceVal.Type() + if sourceTy == cty.DynamicPseudoType { + // If we don't even know the _type_ of our source value yet then + // we'll need to defer all processing, since we can't decide our + // result type either. + return cty.DynamicVal, diags + } + + // A "special power" of splat expressions is that they can be applied + // both to tuples/lists and to other values, and in the latter case + // the value will be treated as an implicit single-item tuple, or as + // an empty tuple if the value is null. + autoUpgrade := !(sourceTy.IsTupleType() || sourceTy.IsListType() || sourceTy.IsSetType()) + + if sourceVal.IsNull() { + if autoUpgrade { + return cty.EmptyTupleVal, diags + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Splat of null value", + Detail: "Splat expressions (with the * symbol) cannot be applied to null sequences.", + Subject: e.Source.Range().Ptr(), + Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(), + Expression: e.Source, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + + if autoUpgrade { + sourceVal = cty.TupleVal([]cty.Value{sourceVal}) + sourceTy = sourceVal.Type() + } + + // We'll compute our result type lazily if we need it. In the normal case + // it's inferred automatically from the value we construct. + resultTy := func() (cty.Type, hcl.Diagnostics) { + chiCtx := ctx.NewChild() + var diags hcl.Diagnostics + switch { + case sourceTy.IsListType() || sourceTy.IsSetType(): + ety := sourceTy.ElementType() + e.Item.setValue(chiCtx, cty.UnknownVal(ety)) + val, itemDiags := e.Each.Value(chiCtx) + diags = append(diags, itemDiags...) + e.Item.clearValue(chiCtx) // clean up our temporary value + return cty.List(val.Type()), diags + case sourceTy.IsTupleType(): + etys := sourceTy.TupleElementTypes() + resultTys := make([]cty.Type, 0, len(etys)) + for _, ety := range etys { + e.Item.setValue(chiCtx, cty.UnknownVal(ety)) + val, itemDiags := e.Each.Value(chiCtx) + diags = append(diags, itemDiags...) + e.Item.clearValue(chiCtx) // clean up our temporary value + resultTys = append(resultTys, val.Type()) + } + return cty.Tuple(resultTys), diags + default: + // Should never happen because of our promotion to list above. + return cty.DynamicPseudoType, diags + } + } + + if !sourceVal.IsKnown() { + // We can't produce a known result in this case, but we'll still + // indicate what the result type would be, allowing any downstream type + // checking to proceed. + ty, tyDiags := resultTy() + diags = append(diags, tyDiags...) + return cty.UnknownVal(ty), diags + } + + vals := make([]cty.Value, 0, sourceVal.LengthInt()) + it := sourceVal.ElementIterator() + if ctx == nil { + // we need a context to use our AnonSymbolExpr, so we'll just + // make an empty one here to use as a placeholder. + ctx = ctx.NewChild() + } + isKnown := true + for it.Next() { + _, sourceItem := it.Element() + e.Item.setValue(ctx, sourceItem) + newItem, itemDiags := e.Each.Value(ctx) + diags = append(diags, itemDiags...) + if itemDiags.HasErrors() { + isKnown = false + } + vals = append(vals, newItem) + } + e.Item.clearValue(ctx) // clean up our temporary value + + if !isKnown { + // We'll ingore the resultTy diagnostics in this case since they + // will just be the same errors we saw while iterating above. + ty, _ := resultTy() + return cty.UnknownVal(ty), diags + } + + switch { + case sourceTy.IsListType() || sourceTy.IsSetType(): + if len(vals) == 0 { + ty, tyDiags := resultTy() + diags = append(diags, tyDiags...) + return cty.ListValEmpty(ty.ElementType()), diags + } + return cty.ListVal(vals), diags + default: + return cty.TupleVal(vals), diags + } +} + +func (e *SplatExpr) walkChildNodes(w internalWalkFunc) { + w(e.Source) + w(e.Each) +} + +func (e *SplatExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *SplatExpr) StartRange() hcl.Range { + return e.MarkerRange +} + +// AnonSymbolExpr is used as a placeholder for a value in an expression that +// can be applied dynamically to any value at runtime. +// +// This is a rather odd, synthetic expression. It is used as part of the +// representation of splat expressions as a placeholder for the current item +// being visited in the splat evaluation. +// +// AnonSymbolExpr cannot be evaluated in isolation. If its Value is called +// directly then cty.DynamicVal will be returned. Instead, it is evaluated +// in terms of another node (i.e. a splat expression) which temporarily +// assigns it a value. +type AnonSymbolExpr struct { + SrcRange hcl.Range + + // values and its associated lock are used to isolate concurrent + // evaluations of a symbol from one another. It is the calling application's + // responsibility to ensure that the same splat expression is not evalauted + // concurrently within the _same_ EvalContext, but it is fine and safe to + // do cuncurrent evaluations with distinct EvalContexts. + values map[*hcl.EvalContext]cty.Value + valuesLock sync.RWMutex +} + +func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if ctx == nil { + return cty.DynamicVal, nil + } + + e.valuesLock.RLock() + defer e.valuesLock.RUnlock() + + val, exists := e.values[ctx] + if !exists { + return cty.DynamicVal, nil + } + return val, nil +} + +// setValue sets a temporary local value for the expression when evaluated +// in the given context, which must be non-nil. +func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) { + e.valuesLock.Lock() + defer e.valuesLock.Unlock() + + if e.values == nil { + e.values = make(map[*hcl.EvalContext]cty.Value) + } + if ctx == nil { + panic("can't setValue for a nil EvalContext") + } + e.values[ctx] = val +} + +func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) { + e.valuesLock.Lock() + defer e.valuesLock.Unlock() + + if e.values == nil { + return + } + if ctx == nil { + panic("can't clearValue for a nil EvalContext") + } + delete(e.values, ctx) +} + +func (e *AnonSymbolExpr) walkChildNodes(w internalWalkFunc) { + // AnonSymbolExpr is a leaf node in the tree +} + +func (e *AnonSymbolExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *AnonSymbolExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go new file mode 100644 index 00000000000..7f59f1a275d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go @@ -0,0 +1,268 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +type Operation struct { + Impl function.Function + Type cty.Type +} + +var ( + OpLogicalOr = &Operation{ + Impl: stdlib.OrFunc, + Type: cty.Bool, + } + OpLogicalAnd = &Operation{ + Impl: stdlib.AndFunc, + Type: cty.Bool, + } + OpLogicalNot = &Operation{ + Impl: stdlib.NotFunc, + Type: cty.Bool, + } + + OpEqual = &Operation{ + Impl: stdlib.EqualFunc, + Type: cty.Bool, + } + OpNotEqual = &Operation{ + Impl: stdlib.NotEqualFunc, + Type: cty.Bool, + } + + OpGreaterThan = &Operation{ + Impl: stdlib.GreaterThanFunc, + Type: cty.Bool, + } + OpGreaterThanOrEqual = &Operation{ + Impl: stdlib.GreaterThanOrEqualToFunc, + Type: cty.Bool, + } + OpLessThan = &Operation{ + Impl: stdlib.LessThanFunc, + Type: cty.Bool, + } + OpLessThanOrEqual = &Operation{ + Impl: stdlib.LessThanOrEqualToFunc, + Type: cty.Bool, + } + + OpAdd = &Operation{ + Impl: stdlib.AddFunc, + Type: cty.Number, + } + OpSubtract = &Operation{ + Impl: stdlib.SubtractFunc, + Type: cty.Number, + } + OpMultiply = &Operation{ + Impl: stdlib.MultiplyFunc, + Type: cty.Number, + } + OpDivide = &Operation{ + Impl: stdlib.DivideFunc, + Type: cty.Number, + } + OpModulo = &Operation{ + Impl: stdlib.ModuloFunc, + Type: cty.Number, + } + OpNegate = &Operation{ + Impl: stdlib.NegateFunc, + Type: cty.Number, + } +) + +var binaryOps []map[TokenType]*Operation + +func init() { + // This operation table maps from the operator's token type + // to the AST operation type. All expressions produced from + // binary operators are BinaryOp nodes. + // + // Binary operator groups are listed in order of precedence, with + // the *lowest* precedence first. Operators within the same group + // have left-to-right associativity. + binaryOps = []map[TokenType]*Operation{ + { + TokenOr: OpLogicalOr, + }, + { + TokenAnd: OpLogicalAnd, + }, + { + TokenEqualOp: OpEqual, + TokenNotEqual: OpNotEqual, + }, + { + TokenGreaterThan: OpGreaterThan, + TokenGreaterThanEq: OpGreaterThanOrEqual, + TokenLessThan: OpLessThan, + TokenLessThanEq: OpLessThanOrEqual, + }, + { + TokenPlus: OpAdd, + TokenMinus: OpSubtract, + }, + { + TokenStar: OpMultiply, + TokenSlash: OpDivide, + TokenPercent: OpModulo, + }, + } +} + +type BinaryOpExpr struct { + LHS Expression + Op *Operation + RHS Expression + + SrcRange hcl.Range +} + +func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) { + w(e.LHS) + w(e.RHS) +} + +func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly two arguments + params := impl.Params() + lhsParam := params[0] + rhsParam := params[1] + + var diags hcl.Diagnostics + + givenLHSVal, lhsDiags := e.LHS.Value(ctx) + givenRHSVal, rhsDiags := e.RHS.Value(ctx) + diags = append(diags, lhsDiags...) + diags = append(diags, rhsDiags...) + + lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err), + Subject: e.LHS.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.LHS, + EvalContext: ctx, + }) + } + rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err), + Subject: e.RHS.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.RHS, + EvalContext: ctx, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{lhsVal, rhsVal} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + Expression: e, + EvalContext: ctx, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *BinaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *BinaryOpExpr) StartRange() hcl.Range { + return e.LHS.StartRange() +} + +type UnaryOpExpr struct { + Op *Operation + Val Expression + + SrcRange hcl.Range + SymbolRange hcl.Range +} + +func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) { + w(e.Val) +} + +func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly one argument + params := impl.Params() + param := params[0] + + givenVal, diags := e.Val.Value(ctx) + + val, err := convert.Convert(givenVal, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err), + Subject: e.Val.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Val, + EvalContext: ctx, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{val} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + Expression: e, + EvalContext: ctx, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *UnaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *UnaryOpExpr) StartRange() hcl.Range { + return e.SymbolRange +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go new file mode 100644 index 00000000000..ca3dae189f4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go @@ -0,0 +1,220 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type TemplateExpr struct { + Parts []Expression + + SrcRange hcl.Range +} + +func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) { + for _, part := range e.Parts { + w(part) + } +} + +func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + buf := &bytes.Buffer{} + var diags hcl.Diagnostics + isKnown := true + + for _, part := range e.Parts { + partVal, partDiags := part.Value(ctx) + diags = append(diags, partDiags...) + + if partVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "The expression result is null. Cannot include a null value in a string template.", + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + Expression: part, + EvalContext: ctx, + }) + continue + } + + if !partVal.IsKnown() { + // If any part is unknown then the result as a whole must be + // unknown too. We'll keep on processing the rest of the parts + // anyway, because we want to still emit any diagnostics resulting + // from evaluating those. + isKnown = false + continue + } + + strVal, err := convert.Convert(partVal, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include the given value in a string template: %s.", + err.Error(), + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + Expression: part, + EvalContext: ctx, + }) + continue + } + + buf.WriteString(strVal.AsString()) + } + + if !isKnown { + return cty.UnknownVal(cty.String), diags + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateExpr) StartRange() hcl.Range { + return e.Parts[0].StartRange() +} + +// IsStringLiteral returns true if and only if the template consists only of +// single string literal, as would be created for a simple quoted string like +// "foo". +// +// If this function returns true, then calling Value on the same expression +// with a nil EvalContext will return the literal value. +// +// Note that "${"foo"}", "${1}", etc aren't considered literal values for the +// purposes of this method, because the intent of this method is to identify +// situations where the user seems to be explicitly intending literal string +// interpretation, not situations that result in literals as a technicality +// of the template expression unwrapping behavior. +func (e *TemplateExpr) IsStringLiteral() bool { + if len(e.Parts) != 1 { + return false + } + _, ok := e.Parts[0].(*LiteralValueExpr) + return ok +} + +// TemplateJoinExpr is used to convert tuples of strings produced by template +// constructs (i.e. for loops) into flat strings, by converting the values +// tos strings and joining them. This AST node is not used directly; it's +// produced as part of the AST of a "for" loop in a template. +type TemplateJoinExpr struct { + Tuple Expression +} + +func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) { + w(e.Tuple) +} + +func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + tuple, diags := e.Tuple.Value(ctx) + + if tuple.IsNull() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got null tuple") + } + if tuple.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + if !tuple.Type().IsTupleType() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got non-tuple tuple") + } + if !tuple.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf := &bytes.Buffer{} + it := tuple.ElementIterator() + for it.Next() { + _, val := it.Element() + + if val.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "An iteration result is null. Cannot include a null value in a string template.", + ), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + continue + } + if val.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + strVal, err := convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include one of the interpolation results into the string template: %s.", + err.Error(), + ), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + continue + } + if !val.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf.WriteString(strVal.AsString()) + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateJoinExpr) Range() hcl.Range { + return e.Tuple.Range() +} + +func (e *TemplateJoinExpr) StartRange() hcl.Range { + return e.Tuple.StartRange() +} + +// TemplateWrapExpr is used instead of a TemplateExpr when a template +// consists _only_ of a single interpolation sequence. In that case, the +// template's result is the single interpolation's result, verbatim with +// no type conversions. +type TemplateWrapExpr struct { + Wrapped Expression + + SrcRange hcl.Range +} + +func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) { + w(e.Wrapped) +} + +func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Wrapped.Value(ctx) +} + +func (e *TemplateWrapExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateWrapExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go new file mode 100755 index 00000000000..9177092ce4a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go @@ -0,0 +1,76 @@ +package hclsyntax + +// Generated by expression_vars_get.go. DO NOT EDIT. +// Run 'go generate' on this package to update the set of functions here. + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +func (e *AnonSymbolExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *BinaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ConditionalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ForExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *FunctionCallExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *IndexExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *LiteralValueExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsKeyExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *RelativeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ScopeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *SplatExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateJoinExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateWrapExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TupleConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *UnaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go new file mode 100644 index 00000000000..88f198009d1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go @@ -0,0 +1,99 @@ +// This is a 'go generate'-oriented program for producing the "Variables" +// method on every Expression implementation found within this package. +// All expressions share the same implementation for this method, which +// just wraps the package-level function "Variables" and uses an AST walk +// to do its work. + +// +build ignore + +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "sort" +) + +func main() { + fs := token.NewFileSet() + pkgs, err := parser.ParseDir(fs, ".", nil, 0) + if err != nil { + fmt.Fprintf(os.Stderr, "error while parsing: %s\n", err) + os.Exit(1) + } + pkg := pkgs["hclsyntax"] + + // Walk all the files and collect the receivers of any "Value" methods + // that look like they are trying to implement Expression. + var recvs []string + for _, f := range pkg.Files { + for _, decl := range f.Decls { + fd, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + if fd.Name.Name != "Value" { + continue + } + results := fd.Type.Results.List + if len(results) != 2 { + continue + } + valResult := fd.Type.Results.List[0].Type.(*ast.SelectorExpr).X.(*ast.Ident) + diagsResult := fd.Type.Results.List[1].Type.(*ast.SelectorExpr).X.(*ast.Ident) + + if valResult.Name != "cty" && diagsResult.Name != "hcl" { + continue + } + + // If we have a method called Value and it returns something in + // "cty" followed by something in "hcl" then that's specific enough + // for now, even though this is not 100% exact as a correct + // implementation of Value. + + recvTy := fd.Recv.List[0].Type + + switch rtt := recvTy.(type) { + case *ast.StarExpr: + name := rtt.X.(*ast.Ident).Name + recvs = append(recvs, fmt.Sprintf("*%s", name)) + default: + fmt.Fprintf(os.Stderr, "don't know what to do with a %T receiver\n", recvTy) + } + + } + } + + sort.Strings(recvs) + + of, err := os.OpenFile("expression_vars.go", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open output file: %s\n", err) + os.Exit(1) + } + + fmt.Fprint(of, outputPreamble) + for _, recv := range recvs { + fmt.Fprintf(of, outputMethodFmt, recv) + } + fmt.Fprint(of, "\n") + +} + +const outputPreamble = `package hclsyntax + +// Generated by expression_vars_get.go. DO NOT EDIT. +// Run 'go generate' on this package to update the set of functions here. + +import ( + "github.com/hashicorp/hcl2/hcl" +)` + +const outputMethodFmt = ` + +func (e %s) Variables() []hcl.Traversal { + return Variables(e) +}` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go new file mode 100644 index 00000000000..490c02556b2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go @@ -0,0 +1,20 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// File is the top-level object resulting from parsing a configuration file. +type File struct { + Body *Body + Bytes []byte +} + +func (f *File) AsHCLFile() *hcl.File { + return &hcl.File{ + Body: f.Body, + Bytes: f.Bytes, + + // TODO: The Nav object, once we have an implementation of it + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go new file mode 100644 index 00000000000..841656a6a17 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go @@ -0,0 +1,9 @@ +package hclsyntax + +//go:generate go run expression_vars_gen.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl +//go:generate ragel -Z scan_tokens.rl +//go:generate gofmt -w scan_tokens.go +//go:generate ragel -Z scan_string_lit.rl +//go:generate gofmt -w scan_string_lit.go +//go:generate stringer -type TokenType -output token_type_string.go diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go new file mode 100644 index 00000000000..eef8b9626ce --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go @@ -0,0 +1,21 @@ +package hclsyntax + +import ( + "bytes" +) + +type Keyword []byte + +var forKeyword = Keyword([]byte{'f', 'o', 'r'}) +var inKeyword = Keyword([]byte{'i', 'n'}) +var ifKeyword = Keyword([]byte{'i', 'f'}) +var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'}) +var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'}) +var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'}) + +func (kw Keyword) TokenMatches(token Token) bool { + if token.Type != TokenIdent { + return false + } + return bytes.Equal([]byte(kw), token.Bytes) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go new file mode 100644 index 00000000000..c8c97f37cdc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go @@ -0,0 +1,59 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" +) + +type navigation struct { + root *Body +} + +// Implementation of hcled.ContextString +func (n navigation) ContextString(offset int) string { + // We will walk our top-level blocks until we find one that contains + // the given offset, and then construct a representation of the header + // of the block. + + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return "" + } + + if len(block.Labels) == 0 { + // Easy case! + return block.Type + } + + buf := &bytes.Buffer{} + buf.WriteString(block.Type) + for _, label := range block.Labels { + fmt.Fprintf(buf, " %q", label) + } + return buf.String() +} + +func (n navigation) ContextDefRange(offset int) hcl.Range { + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return hcl.Range{} + } + + return block.DefRange() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go new file mode 100644 index 00000000000..75812e63dd1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go @@ -0,0 +1,22 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Node is the abstract type that every AST node implements. +// +// This is a closed interface, so it cannot be implemented from outside of +// this package. +type Node interface { + // This is the mechanism by which the public-facing walk functions + // are implemented. Implementations should call the given function + // for each child node and then replace that node with its return value. + // The return value might just be the same node, for non-transforming + // walks. + walkChildNodes(w internalWalkFunc) + + Range() hcl.Range +} + +type internalWalkFunc func(Node) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go new file mode 100644 index 00000000000..772ebae2bc6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go @@ -0,0 +1,2044 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "strconv" + "unicode/utf8" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +type parser struct { + *peeker + + // set to true if any recovery is attempted. The parser can use this + // to attempt to reduce error noise by suppressing "bad token" errors + // in recovery mode, assuming that the recovery heuristics have failed + // in this case and left the peeker in a wrong place. + recovery bool +} + +func (p *parser) ParseBody(end TokenType) (*Body, hcl.Diagnostics) { + attrs := Attributes{} + blocks := Blocks{} + var diags hcl.Diagnostics + + startRange := p.PrevRange() + var endRange hcl.Range + +Token: + for { + next := p.Peek() + if next.Type == end { + endRange = p.NextRange() + p.Read() + break Token + } + + switch next.Type { + case TokenNewline: + p.Read() + continue + case TokenIdent: + item, itemDiags := p.ParseBodyItem() + diags = append(diags, itemDiags...) + switch titem := item.(type) { + case *Block: + blocks = append(blocks, titem) + case *Attribute: + if existing, exists := attrs[titem.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute redefined", + Detail: fmt.Sprintf( + "The argument %q was already set at %s. Each argument may be set only once.", + titem.Name, existing.NameRange.String(), + ), + Subject: &titem.NameRange, + }) + } else { + attrs[titem.Name] = titem + } + default: + // This should never happen for valid input, but may if a + // syntax error was detected in ParseBodyItem that prevented + // it from even producing a partially-broken item. In that + // case, it would've left at least one error in the diagnostics + // slice we already dealt with above. + // + // We'll assume ParseBodyItem attempted recovery to leave + // us in a reasonable position to try parsing the next item. + continue + } + default: + bad := p.Read() + if !p.recovery { + if bad.Type == TokenOQuote { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid argument name", + Detail: "Argument names must not be quoted.", + Subject: &bad.Range, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &bad.Range, + }) + } + } + endRange = p.PrevRange() // arbitrary, but somewhere inside the body means better diagnostics + + p.recover(end) // attempt to recover to the token after the end of this body + break Token + } + } + + return &Body{ + Attributes: attrs, + Blocks: blocks, + + SrcRange: hcl.RangeBetween(startRange, endRange), + EndRange: hcl.Range{ + Filename: endRange.Filename, + Start: endRange.End, + End: endRange.End, + }, + }, diags +} + +func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + next := p.Peek() + + switch next.Type { + case TokenEqual: + return p.finishParsingBodyAttribute(ident, false) + case TokenOQuote, TokenOBrace, TokenIdent: + return p.finishParsingBodyBlock(ident) + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.", + Subject: &ident.Range, + }, + } + } + + return nil, nil +} + +// parseSingleAttrBody is a weird variant of ParseBody that deals with the +// body of a nested block containing only one attribute value all on a single +// line, like foo { bar = baz } . It expects to find a single attribute item +// immediately followed by the end token type with no intervening newlines. +func (p *parser) parseSingleAttrBody(end TokenType) (*Body, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + var attr *Attribute + var diags hcl.Diagnostics + + next := p.Peek() + + switch next.Type { + case TokenEqual: + node, attrDiags := p.finishParsingBodyAttribute(ident, true) + diags = append(diags, attrDiags...) + attr = node.(*Attribute) + case TokenOQuote, TokenOBrace, TokenIdent: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument definition required", + Detail: fmt.Sprintf("A single-line block definition can contain only a single argument. If you meant to define argument %q, use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.", ident.Bytes), + Subject: hcl.RangeBetween(ident.Range, next.Range).Ptr(), + }, + } + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.", + Subject: &ident.Range, + }, + } + } + + return &Body{ + Attributes: Attributes{ + string(ident.Bytes): attr, + }, + + SrcRange: attr.SrcRange, + EndRange: hcl.Range{ + Filename: attr.SrcRange.Filename, + Start: attr.SrcRange.End, + End: attr.SrcRange.End, + }, + }, diags + +} + +func (p *parser) finishParsingBodyAttribute(ident Token, singleLine bool) (Node, hcl.Diagnostics) { + eqTok := p.Read() // eat equals token + if eqTok.Type != TokenEqual { + // should never happen if caller behaves + panic("finishParsingBodyAttribute called with next not equals") + } + + var endRange hcl.Range + + expr, diags := p.ParseExpression() + if p.recovery && diags.HasErrors() { + // recovery within expressions tends to be tricky, so we've probably + // landed somewhere weird. We'll try to reset to the start of a body + // item so parsing can continue. + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + if !singleLine { + end := p.Peek() + if end.Type != TokenNewline && end.Type != TokenEOF { + if !p.recovery { + summary := "Missing newline after argument" + detail := "An argument definition must end with a newline." + + if end.Type == TokenComma { + summary = "Unexpected comma after argument" + detail = "Argument definitions must be separated by newlines, not commas. " + detail + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: summary, + Detail: detail, + Subject: &end.Range, + Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), + }) + } + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + p.Read() // eat newline + } + } + } + + return &Attribute{ + Name: string(ident.Bytes), + Expr: expr, + + SrcRange: hcl.RangeBetween(ident.Range, endRange), + NameRange: ident.Range, + EqualsRange: eqTok.Range, + }, diags +} + +func (p *parser) finishParsingBodyBlock(ident Token) (Node, hcl.Diagnostics) { + var blockType = string(ident.Bytes) + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + + var oBrace Token + +Token: + for { + tok := p.Peek() + + switch tok.Type { + + case TokenOBrace: + oBrace = p.Read() + break Token + + case TokenOQuote: + label, labelRange, labelDiags := p.parseQuotedStringLiteral() + diags = append(diags, labelDiags...) + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + // parseQuoteStringLiteral recovers up to the closing quote + // if it encounters problems, so we can continue looking for + // more labels and eventually the block body even. + + case TokenIdent: + tok = p.Read() // eat token + label, labelRange := string(tok.Bytes), tok.Range + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + + default: + switch tok.Type { + case TokenEqual: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "The equals sign \"=\" indicates an argument definition, and must not be used when defining a block.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + case TokenNewline: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "A block definition must have block content delimited by \"{\" and \"}\", starting on the same line as the block header.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "Either a quoted string block label or an opening brace (\"{\") is expected here.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + } + } + + p.recoverAfterBodyItem() + + return &Block{ + Type: blockType, + Labels: labels, + Body: &Body{ + SrcRange: ident.Range, + EndRange: ident.Range, + }, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: ident.Range, // placeholder + CloseBraceRange: ident.Range, // placeholder + }, diags + } + } + + // Once we fall out here, the peeker is pointed just after our opening + // brace, so we can begin our nested body parsing. + var body *Body + var bodyDiags hcl.Diagnostics + switch p.Peek().Type { + case TokenNewline, TokenEOF, TokenCBrace: + body, bodyDiags = p.ParseBody(TokenCBrace) + default: + // Special one-line, single-attribute block parsing mode. + body, bodyDiags = p.parseSingleAttrBody(TokenCBrace) + switch p.Peek().Type { + case TokenCBrace: + p.Read() // the happy path - just consume the closing brace + case TokenComma: + // User seems to be trying to use the object-constructor + // comma-separated style, which isn't permitted for blocks. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "Single-line block syntax can include only one argument definition. To define multiple arguments, use the multi-line block syntax with one argument definition per line.", + Subject: p.Peek().Range.Ptr(), + }) + p.recover(TokenCBrace) + case TokenNewline: + // We don't allow weird mixtures of single and multi-line syntax. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + p.recover(TokenCBrace) + default: + // Some other weird thing is going on. Since we can't guess a likely + // user intent for this one, we'll skip it if we're already in + // recovery mode. + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenCBrace) + } + } + diags = append(diags, bodyDiags...) + cBraceRange := p.PrevRange() + + eol := p.Peek() + if eol.Type == TokenNewline || eol.Type == TokenEOF { + p.Read() // eat newline + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after block definition", + Detail: "A block definition must end with a newline.", + Subject: &eol.Range, + Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), + }) + } + p.recoverAfterBodyItem() + } + + // We must never produce a nil body, since the caller may attempt to + // do analysis of a partial result when there's an error, so we'll + // insert a placeholder if we otherwise failed to produce a valid + // body due to one of the syntax error paths above. + if body == nil && diags.HasErrors() { + body = &Body{ + SrcRange: hcl.RangeBetween(oBrace.Range, cBraceRange), + EndRange: cBraceRange, + } + } + + return &Block{ + Type: blockType, + Labels: labels, + Body: body, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: oBrace.Range, + CloseBraceRange: cBraceRange, + }, diags +} + +func (p *parser) ParseExpression() (Expression, hcl.Diagnostics) { + return p.parseTernaryConditional() +} + +func (p *parser) parseTernaryConditional() (Expression, hcl.Diagnostics) { + // The ternary conditional operator (.. ? .. : ..) behaves somewhat + // like a binary operator except that the "symbol" is itself + // an expression enclosed in two punctuation characters. + // The middle expression is parsed as if the ? and : symbols + // were parentheses. The "rhs" (the "false expression") is then + // treated right-associatively so it behaves similarly to the + // middle in terms of precedence. + + startRange := p.NextRange() + var condExpr, trueExpr, falseExpr Expression + var diags hcl.Diagnostics + + condExpr, condDiags := p.parseBinaryOps(binaryOps) + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + return condExpr, diags + } + + questionMark := p.Peek() + if questionMark.Type != TokenQuestion { + return condExpr, diags + } + + p.Read() // eat question mark + + trueExpr, trueDiags := p.ParseExpression() + diags = append(diags, trueDiags...) + if p.recovery && trueDiags.HasErrors() { + return condExpr, diags + } + + colon := p.Peek() + if colon.Type != TokenColon { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing false expression in conditional", + Detail: "The conditional operator (...?...:...) requires a false expression, delimited by a colon.", + Subject: &colon.Range, + Context: hcl.RangeBetween(startRange, colon.Range).Ptr(), + }) + return condExpr, diags + } + + p.Read() // eat colon + + falseExpr, falseDiags := p.ParseExpression() + diags = append(diags, falseDiags...) + if p.recovery && falseDiags.HasErrors() { + return condExpr, diags + } + + return &ConditionalExpr{ + Condition: condExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(startRange, falseExpr.Range()), + }, diags +} + +// parseBinaryOps calls itself recursively to work through all of the +// operator precedence groups, and then eventually calls parseExpressionTerm +// for each operand. +func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) { + if len(ops) == 0 { + // We've run out of operators, so now we'll just try to parse a term. + return p.parseExpressionWithTraversals() + } + + thisLevel := ops[0] + remaining := ops[1:] + + var lhs, rhs Expression + var operation *Operation + var diags hcl.Diagnostics + + // Parse a term that might be the first operand of a binary + // operation or it might just be a standalone term. + // We won't know until we've parsed it and can look ahead + // to see if there's an operator token for this level. + lhs, lhsDiags := p.parseBinaryOps(remaining) + diags = append(diags, lhsDiags...) + if p.recovery && lhsDiags.HasErrors() { + return lhs, diags + } + + // We'll keep eating up operators until we run out, so that operators + // with the same precedence will combine in a left-associative manner: + // a+b+c => (a+b)+c, not a+(b+c) + // + // Should we later want to have right-associative operators, a way + // to achieve that would be to call back up to ParseExpression here + // instead of iteratively parsing only the remaining operators. + for { + next := p.Peek() + var newOp *Operation + var ok bool + if newOp, ok = thisLevel[next.Type]; !ok { + break + } + + // Are we extending an expression started on the previous iteration? + if operation != nil { + lhs = &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + } + } + + operation = newOp + p.Read() // eat operator token + var rhsDiags hcl.Diagnostics + rhs, rhsDiags = p.parseBinaryOps(remaining) + diags = append(diags, rhsDiags...) + if p.recovery && rhsDiags.HasErrors() { + return lhs, diags + } + } + + if operation == nil { + return lhs, diags + } + + return &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + }, diags +} + +func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) { + term, diags := p.parseExpressionTerm() + ret, moreDiags := p.parseExpressionTraversals(term) + diags = append(diags, moreDiags...) + return ret, diags +} + +func (p *parser) parseExpressionTraversals(from Expression) (Expression, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := from + +Traversal: + for { + next := p.Peek() + + switch next.Type { + case TokenDot: + // Attribute access or splat + dot := p.Read() + attrTok := p.Peek() + + switch attrTok.Type { + case TokenIdent: + attrTok = p.Read() // eat token + name := string(attrTok.Bytes) + rng := hcl.RangeBetween(dot.Range, attrTok.Range) + step := hcl.TraverseAttr{ + Name: name, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenNumberLit: + // This is a weird form we inherited from HIL, allowing numbers + // to be used as attributes as a weird way of writing [n]. + // This was never actually a first-class thing in HIL, but + // HIL tolerated sequences like .0. in its variable names and + // calling applications like Terraform exploited that to + // introduce indexing syntax where none existed. + numTok := p.Read() // eat token + attrTok = numTok + + // This syntax is ambiguous if multiple indices are used in + // succession, like foo.0.1.baz: that actually parses as + // a fractional number 0.1. Since we're only supporting this + // syntax for compatibility with legacy Terraform + // configurations, and Terraform does not tend to have lists + // of lists, we'll choose to reject that here with a helpful + // error message, rather than failing later because the index + // isn't a whole number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + break + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: numVal, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenStar: + // "Attribute-only" splat expression. + // (This is a kinda weird construct inherited from HIL, which + // behaves a bit like a [*] splat except that it is only able + // to do attribute traversals into each of its elements, + // whereas foo[*] can support _any_ traversal. + marker := p.Read() // eat star + trav := make(hcl.Traversal, 0, 1) + var firstRange, lastRange hcl.Range + firstRange = p.NextRange() + for p.Peek().Type == TokenDot { + dot := p.Read() + + if p.Peek().Type == TokenNumberLit { + // Continuing the "weird stuff inherited from HIL" + // theme, we also allow numbers as attribute names + // inside splats and interpret them as indexing + // into a list, for expressions like: + // foo.bar.*.baz.0.foo + numTok := p.Read() + + // Weird special case if the user writes something + // like foo.bar.*.baz.0.0.foo, where 0.0 parses + // as a number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax with a full splat expression [*] instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + trav = append(trav, hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + trav = append(trav, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + if p.Peek().Type != TokenIdent { + if !p.recovery { + if p.Peek().Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Nested splat expression not allowed", + Detail: "A splat expression (*) cannot be used inside another attribute-only splat expression.", + Subject: p.Peek().Range.Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + } + } + p.setRecovery() + continue Traversal + } + + attrTok := p.Read() + trav = append(trav, hcl.TraverseAttr{ + Name: string(attrTok.Bytes), + SrcRange: hcl.RangeBetween(dot.Range, attrTok.Range), + }) + lastRange = attrTok.Range + } + + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(dot.Range, marker.Range), + } + var travExpr Expression + if len(trav) == 0 { + travExpr = itemExpr + } else { + travExpr = &RelativeTraversalExpr{ + Source: itemExpr, + Traversal: trav, + SrcRange: hcl.RangeBetween(firstRange, lastRange), + } + } + + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(dot.Range, lastRange), + MarkerRange: hcl.RangeBetween(dot.Range, marker.Range), + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + // This leaves the peeker in a bad place, so following items + // will probably be misparsed until we hit something that + // allows us to re-sync. + // + // We will probably need to do something better here eventually + // in order to support autocomplete triggered by typing a + // period. + p.setRecovery() + } + + case TokenOBrack: + // Indexing of a collection. + // This may or may not be a hcl.Traverser, depending on whether + // the key value is something constant. + + open := p.Read() + switch p.Peek().Type { + case TokenStar: + // This is a full splat expression, like foo[*], which consumes + // the rest of the traversal steps after it using a recursive + // call to this function. + p.Read() // consume star + close := p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on splat index", + Detail: "The star for a full splat operator must be immediately followed by a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + // Splat expressions use a special "anonymous symbol" as a + // placeholder in an expression to be evaluated once for each + // item in the source expression. + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(open.Range, close.Range), + } + // Now we'll recursively call this same function to eat any + // remaining traversal steps against the anonymous symbol. + travExpr, nestedDiags := p.parseExpressionTraversals(itemExpr) + diags = append(diags, nestedDiags...) + + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(open.Range, travExpr.Range()), + MarkerRange: hcl.RangeBetween(open.Range, close.Range), + } + + default: + + var close Token + p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets + keyExpr, keyDiags := p.ParseExpression() + diags = append(diags, keyDiags...) + if p.recovery && keyDiags.HasErrors() { + close = p.recover(TokenCBrack) + } else { + close = p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on index", + Detail: "The index operator must end with a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + } + p.PopIncludeNewlines() + + if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { + litKey, _ := lit.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else if tmpl, isTmpl := keyExpr.(*TemplateExpr); isTmpl && tmpl.IsStringLiteral() { + litKey, _ := tmpl.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else { + rng := hcl.RangeBetween(open.Range, close.Range) + ret = &IndexExpr{ + Collection: ret, + Key: keyExpr, + + SrcRange: rng, + OpenRange: open.Range, + } + } + } + + default: + break Traversal + } + } + + return ret, diags +} + +// makeRelativeTraversal takes an expression and a traverser and returns +// a traversal expression that combines the two. If the given expression +// is already a traversal, it is extended in place (mutating it) and +// returned. If it isn't, a new RelativeTraversalExpr is created and returned. +func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) Expression { + switch texpr := expr.(type) { + case *ScopeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + case *RelativeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + default: + return &RelativeTraversalExpr{ + Source: expr, + Traversal: hcl.Traversal{next}, + SrcRange: rng, + } + } +} + +func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { + start := p.Peek() + + switch start.Type { + case TokenOParen: + p.Read() // eat open paren + + p.PushIncludeNewlines(false) + + expr, diags := p.ParseExpression() + if diags.HasErrors() { + // attempt to place the peeker after our closing paren + // before we return, so that the next parser has some + // chance of finding a valid expression. + p.recover(TokenCParen) + p.PopIncludeNewlines() + return expr, diags + } + + close := p.Peek() + if close.Type != TokenCParen { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unbalanced parentheses", + Detail: "Expected a closing parenthesis to terminate the expression.", + Subject: &close.Range, + Context: hcl.RangeBetween(start.Range, close.Range).Ptr(), + }) + p.setRecovery() + } + + p.Read() // eat closing paren + p.PopIncludeNewlines() + + return expr, diags + + case TokenNumberLit: + tok := p.Read() // eat number token + + numVal, diags := p.numberLitValue(tok) + return &LiteralValueExpr{ + Val: numVal, + SrcRange: tok.Range, + }, diags + + case TokenIdent: + tok := p.Read() // eat identifier token + + if p.Peek().Type == TokenOParen { + return p.finishParsingFunctionCall(tok) + } + + name := string(tok.Bytes) + switch name { + case "true": + return &LiteralValueExpr{ + Val: cty.True, + SrcRange: tok.Range, + }, nil + case "false": + return &LiteralValueExpr{ + Val: cty.False, + SrcRange: tok.Range, + }, nil + case "null": + return &LiteralValueExpr{ + Val: cty.NullVal(cty.DynamicPseudoType), + SrcRange: tok.Range, + }, nil + default: + return &ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{ + Name: name, + SrcRange: tok.Range, + }, + }, + SrcRange: tok.Range, + }, nil + } + + case TokenOQuote, TokenOHeredoc: + open := p.Read() // eat opening marker + closer := p.oppositeBracket(open.Type) + exprs, passthru, _, diags := p.parseTemplateInner(closer, tokenOpensFlushHeredoc(open)) + + closeRange := p.PrevRange() + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + + case TokenMinus: + tok := p.Read() // eat minus token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + // e.g. -46+5 should parse as (-46)+5, not -(46+5) + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpNegate, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenBang: + tok := p.Read() // eat bang token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpLogicalNot, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenOBrack: + return p.parseTupleCons() + + case TokenOBrace: + return p.parseObjectCons() + + default: + var diags hcl.Diagnostics + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expression", + Detail: "Expected the start of an expression, but found an invalid expression token.", + Subject: &start.Range, + }) + } + p.setRecovery() + + // Return a placeholder so that the AST is still structurally sound + // even in the presence of parse errors. + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: start.Range, + }, diags + } +} + +func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { + // The cty.ParseNumberVal is always the same behavior as converting a + // string to a number, ensuring we always interpret decimal numbers in + // the same way. + numVal, err := cty.ParseNumberVal(string(tok.Bytes)) + if err != nil { + ret := cty.UnknownVal(cty.Number) + return ret, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid number literal", + // FIXME: not a very good error message, but convert only + // gives us "a number is required", so not much help either. + Detail: "Failed to recognize the value of this number literal.", + Subject: &tok.Range, + }, + } + } + return numVal, nil +} + +// finishParsingFunctionCall parses a function call assuming that the function +// name was already read, and so the peeker should be pointing at the opening +// parenthesis after the name. +func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) { + openTok := p.Read() + if openTok.Type != TokenOParen { + // should never happen if callers behave + panic("finishParsingFunctionCall called with non-parenthesis as next token") + } + + var args []Expression + var diags hcl.Diagnostics + var expandFinal bool + var closeTok Token + + // Arbitrary newlines are allowed inside the function call parentheses. + p.PushIncludeNewlines(false) + +Token: + for { + tok := p.Peek() + + if tok.Type == TokenCParen { + closeTok = p.Read() // eat closing paren + break Token + } + + arg, argDiags := p.ParseExpression() + args = append(args, arg) + diags = append(diags, argDiags...) + if p.recovery && argDiags.HasErrors() { + // if there was a parse error in the argument then we've + // probably been left in a weird place in the token stream, + // so we'll bail out with a partial argument list. + p.recover(TokenCParen) + break Token + } + + sep := p.Read() + if sep.Type == TokenCParen { + closeTok = sep + break Token + } + + if sep.Type == TokenEllipsis { + expandFinal = true + + if p.Peek().Type != TokenCParen { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing closing parenthesis", + Detail: "An expanded function argument (with ...) must be immediately followed by closing parentheses.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + } + closeTok = p.recover(TokenCParen) + } else { + closeTok = p.Read() // eat closing paren + } + break Token + } + + if sep.Type != TokenComma { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing argument separator", + Detail: "A comma is required to separate each function argument from the next.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + closeTok = p.recover(TokenCParen) + break Token + } + + if p.Peek().Type == TokenCParen { + // A trailing comma after the last argument gets us in here. + closeTok = p.Read() // eat closing paren + break Token + } + + } + + p.PopIncludeNewlines() + + return &FunctionCallExpr{ + Name: string(name.Bytes), + Args: args, + + ExpandFinal: expandFinal, + + NameRange: name.Range, + OpenParenRange: openTok.Range, + CloseParenRange: closeTok.Range, + }, diags +} + +func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrack { + // Should never happen if callers are behaving + panic("parseTupleCons called without peeker pointing to open bracket") + } + + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() + + if forKeyword.TokenMatches(p.Peek()) { + return p.finishParsingForExpr(open) + } + + var close Token + + var diags hcl.Diagnostics + var exprs []Expression + + for { + next := p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + expr, exprDiags := p.ParseExpression() + exprs = append(exprs, expr) + diags = append(diags, exprDiags...) + + if p.recovery && exprDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing bracket to allow parsing to continue. + close = p.recover(TokenCBrack) + break + } + + next = p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrack) + break + } + + p.Read() // eat comma + + } + + return &TupleConsExpr{ + Exprs: exprs, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrace { + // Should never happen if callers are behaving + panic("parseObjectCons called without peeker pointing to open brace") + } + + // We must temporarily stop looking at newlines here while we check for + // a "for" keyword, since for expressions are _not_ newline-sensitive, + // even though object constructors are. + p.PushIncludeNewlines(false) + isFor := forKeyword.TokenMatches(p.Peek()) + p.PopIncludeNewlines() + if isFor { + return p.finishParsingForExpr(open) + } + + p.PushIncludeNewlines(true) + defer p.PopIncludeNewlines() + + var close Token + + var diags hcl.Diagnostics + var items []ObjectConsItem + + for { + next := p.Peek() + if next.Type == TokenNewline { + p.Read() // eat newline + continue + } + + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + var key Expression + var keyDiags hcl.Diagnostics + key, keyDiags = p.ParseExpression() + diags = append(diags, keyDiags...) + + if p.recovery && keyDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + // We wrap up the key expression in a special wrapper that deals + // with our special case that naked identifiers as object keys + // are interpreted as literal strings. + key = &ObjectConsKeyExpr{Wrapped: key} + + next = p.Peek() + if next.Type != TokenEqual && next.Type != TokenColon { + if !p.recovery { + switch next.Type { + case TokenNewline, TokenComma: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute value", + Detail: "Expected an attribute value, introduced by an equals sign (\"=\").", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + case TokenIdent: + // Although this might just be a plain old missing equals + // sign before a reference, one way to get here is to try + // to write an attribute name containing a period followed + // by a digit, which was valid in HCL1, like this: + // foo1.2_bar = "baz" + // We can't know exactly what the user intended here, but + // we'll augment our message with an extra hint in this case + // in case it is helpful. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value. If you intended to given an attribute name containing periods or spaces, write the name in quotes to create a string literal.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat equals sign or colon + + value, valueDiags := p.ParseExpression() + diags = append(diags, valueDiags...) + + if p.recovery && valueDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + items = append(items, ObjectConsItem{ + KeyExpr: key, + ValueExpr: value, + }) + + next = p.Peek() + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma && next.Type != TokenNewline { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute separator", + Detail: "Expected a newline or comma to mark the beginning of the next attribute.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat comma or newline + + } + + return &ObjectConsExpr{ + Items: items, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) { + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() + introducer := p.Read() + if !forKeyword.TokenMatches(introducer) { + // Should never happen if callers are behaving + panic("finishParsingForExpr called without peeker pointing to 'for' identifier") + } + + var makeObj bool + var closeType TokenType + switch open.Type { + case TokenOBrace: + makeObj = true + closeType = TokenCBrace + case TokenOBrack: + makeObj = false // making a tuple + closeType = TokenCBrack + default: + // Should never happen if callers are behaving + panic("finishParsingForExpr called with invalid open token") + } + + var diags hcl.Diagnostics + var keyName, valName string + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires the 'in' keyword after its name declarations.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + if p.recovery && collDiags.HasErrors() { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + if p.Peek().Type != TokenColon { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires a colon after the collection expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat colon + + var keyExpr, valExpr Expression + var keyDiags, valDiags hcl.Diagnostics + valExpr, valDiags = p.ParseExpression() + if p.Peek().Type == TokenFatArrow { + // What we just parsed was actually keyExpr + p.Read() // eat the fat arrow + keyExpr, keyDiags = valExpr, valDiags + + valExpr, valDiags = p.ParseExpression() + } + diags = append(diags, keyDiags...) + diags = append(diags, valDiags...) + if p.recovery && (keyDiags.HasErrors() || valDiags.HasErrors()) { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + group := false + var ellipsis Token + if p.Peek().Type == TokenEllipsis { + ellipsis = p.Read() + group = true + } + + var condExpr Expression + var condDiags hcl.Diagnostics + if ifKeyword.TokenMatches(p.Peek()) { + p.Read() // eat "if" + condExpr, condDiags = p.ParseExpression() + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + close := p.recover(p.oppositeBracket(open.Type)) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + } + + var close Token + if p.Peek().Type == closeType { + close = p.Read() + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Extra characters after the end of the 'for' expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close = p.recover(closeType) + } + + if !makeObj { + if keyExpr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is not valid when building a tuple.", + Subject: keyExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + if group { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Grouping ellipsis (...) cannot be used when building a tuple.", + Subject: &ellipsis.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } else { + if keyExpr == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is required when building an object.", + Subject: valExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } + + return &ForExpr{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + KeyExpr: keyExpr, + ValExpr: valExpr, + CondExpr: condExpr, + Group: group, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +// parseQuotedStringLiteral is a helper for parsing quoted strings that +// aren't allowed to contain any interpolations, such as block labels. +func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) { + oQuote := p.Read() + if oQuote.Type != TokenOQuote { + return "", oQuote.Range, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "A quoted string is required here.", + Subject: &oQuote.Range, + }, + } + } + + var diags hcl.Diagnostics + ret := &bytes.Buffer{} + var cQuote Token + +Token: + for { + tok := p.Read() + switch tok.Type { + + case TokenCQuote: + cQuote = tok + break Token + + case TokenQuotedLit: + s, sDiags := p.decodeStringLit(tok) + diags = append(diags, sDiags...) + ret.WriteString(s) + + case TokenTemplateControl, TokenTemplateInterp: + which := "$" + if tok.Type == TokenTemplateControl { + which = "%" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: fmt.Sprintf( + "Template sequences are not allowed in this string. To include a literal %q, double it (as \"%s%s\") to escape it.", + which, which, which, + ), + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + + // Now that we're returning an error callers won't attempt to use + // the result for any real operations, but they might try to use + // the partial AST for other analyses, so we'll leave a marker + // to indicate that there was something invalid in the string to + // help avoid misinterpretation of the partial result + ret.WriteString(which) + ret.WriteString("{ ... }") + + p.recover(TokenTemplateSeqEnd) // we'll try to keep parsing after the sequence ends + + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated string literal", + Detail: "Unable to find the closing quote mark before the end of the file.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + break Token + + default: + // Should never happen, as long as the scanner is behaving itself + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "This item is not valid in a string literal.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + p.recover(TokenCQuote) + break Token + + } + + } + + return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags +} + +// decodeStringLit processes the given token, which must be either a +// TokenQuotedLit or a TokenStringLit, returning the string resulting from +// resolving any escape sequences. +// +// If any error diagnostics are returned, the returned string may be incomplete +// or otherwise invalid. +func (p *parser) decodeStringLit(tok Token) (string, hcl.Diagnostics) { + var quoted bool + switch tok.Type { + case TokenQuotedLit: + quoted = true + case TokenStringLit: + quoted = false + default: + panic("decodeQuotedLit can only be used with TokenStringLit and TokenQuotedLit tokens") + } + var diags hcl.Diagnostics + + ret := make([]byte, 0, len(tok.Bytes)) + slices := scanStringLit(tok.Bytes, quoted) + + // We will mutate rng constantly as we walk through our token slices below. + // Any diagnostics must take a copy of this rng rather than simply pointing + // to it, e.g. by using rng.Ptr() rather than &rng. + rng := tok.Range + rng.End = rng.Start + +Slices: + for _, slice := range slices { + if len(slice) == 0 { + continue + } + + // Advance the start of our range to where the previous token ended + rng.Start = rng.End + + // Advance the end of our range to after our token. + b := slice + for len(b) > 0 { + adv, ch, _ := textseg.ScanGraphemeClusters(b, true) + rng.End.Byte += adv + switch ch[0] { + case '\r', '\n': + rng.End.Line++ + rng.End.Column = 1 + default: + rng.End.Column++ + } + b = b[adv:] + } + + TokenType: + switch slice[0] { + case '\\': + if !quoted { + // If we're not in quoted mode then just treat this token as + // normal. (Slices can still start with backslash even if we're + // not specifically looking for backslash sequences.) + break TokenType + } + if len(slice) < 2 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "Backslash must be followed by an escape sequence selector character.", + Subject: rng.Ptr(), + }) + break TokenType + } + + switch slice[1] { + + case 'n': + ret = append(ret, '\n') + continue Slices + case 'r': + ret = append(ret, '\r') + continue Slices + case 't': + ret = append(ret, '\t') + continue Slices + case '"': + ret = append(ret, '"') + continue Slices + case '\\': + ret = append(ret, '\\') + continue Slices + case 'u', 'U': + if slice[1] == 'u' && len(slice) != 6 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\u escape sequence must be followed by four hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } else if slice[1] == 'U' && len(slice) != 10 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\U escape sequence must be followed by eight hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } + + numHex := string(slice[2:]) + num, err := strconv.ParseUint(numHex, 16, 32) + if err != nil { + // Should never happen because the scanner won't match + // a sequence of digits that isn't valid. + panic(err) + } + + r := rune(num) + l := utf8.RuneLen(r) + if l == -1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("Cannot encode character U+%04x in UTF-8.", num), + Subject: rng.Ptr(), + }) + break TokenType + } + for i := 0; i < l; i++ { + ret = append(ret, 0) + } + rb := ret[len(ret)-l:] + utf8.EncodeRune(rb, r) + + continue Slices + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("The symbol %q is not a valid escape sequence selector.", slice[1:]), + Subject: rng.Ptr(), + }) + ret = append(ret, slice[1:]...) + continue Slices + } + + case '$', '%': + if len(slice) != 3 { + // Not long enough to be our escape sequence, so it's literal. + break TokenType + } + + if slice[1] == slice[0] && slice[2] == '{' { + ret = append(ret, slice[0]) + ret = append(ret, '{') + continue Slices + } + + break TokenType + } + + // If we fall out here or break out of here from the switch above + // then this slice is just a literal. + ret = append(ret, slice...) + } + + return string(ret), diags +} + +// setRecovery turns on recovery mode without actually doing any recovery. +// This can be used when a parser knowingly leaves the peeker in a useless +// place and wants to suppress errors that might result from that decision. +func (p *parser) setRecovery() { + p.recovery = true +} + +// recover seeks forward in the token stream until it finds TokenType "end", +// then returns with the peeker pointed at the following token. +// +// If the given token type is a bracketer, this function will additionally +// count nested instances of the brackets to try to leave the peeker at +// the end of the _current_ instance of that bracketer, skipping over any +// nested instances. This is a best-effort operation and may have +// unpredictable results on input with bad bracketer nesting. +func (p *parser) recover(end TokenType) Token { + start := p.oppositeBracket(end) + p.recovery = true + + nest := 0 + for { + tok := p.Read() + ty := tok.Type + if end == TokenTemplateSeqEnd && ty == TokenTemplateControl { + // normalize so that our matching behavior can work, since + // TokenTemplateControl/TokenTemplateInterp are asymmetrical + // with TokenTemplateSeqEnd and thus we need to count both + // openers if that's the closer we're looking for. + ty = TokenTemplateInterp + } + + switch ty { + case start: + nest++ + case end: + if nest < 1 { + return tok + } + + nest-- + case TokenEOF: + return tok + } + } +} + +// recoverOver seeks forward in the token stream until it finds a block +// starting with TokenType "start", then finds the corresponding end token, +// leaving the peeker pointed at the token after that end token. +// +// The given token type _must_ be a bracketer. For example, if the given +// start token is TokenOBrace then the parser will be left at the _end_ of +// the next brace-delimited block encountered, or at EOF if no such block +// is found or it is unclosed. +func (p *parser) recoverOver(start TokenType) { + end := p.oppositeBracket(start) + + // find the opening bracket first +Token: + for { + tok := p.Read() + switch tok.Type { + case start, TokenEOF: + break Token + } + } + + // Now use our existing recover function to locate the _end_ of the + // container we've found. + p.recover(end) +} + +func (p *parser) recoverAfterBodyItem() { + p.recovery = true + var open []TokenType + +Token: + for { + tok := p.Read() + + switch tok.Type { + + case TokenNewline: + if len(open) == 0 { + break Token + } + + case TokenEOF: + break Token + + case TokenOBrace, TokenOBrack, TokenOParen, TokenOQuote, TokenOHeredoc, TokenTemplateInterp, TokenTemplateControl: + open = append(open, tok.Type) + + case TokenCBrace, TokenCBrack, TokenCParen, TokenCQuote, TokenCHeredoc: + opener := p.oppositeBracket(tok.Type) + for len(open) > 0 && open[len(open)-1] != opener { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + case TokenTemplateSeqEnd: + for len(open) > 0 && open[len(open)-1] != TokenTemplateInterp && open[len(open)-1] != TokenTemplateControl { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + } + } +} + +// oppositeBracket finds the bracket that opposes the given bracketer, or +// NilToken if the given token isn't a bracketer. +// +// "Bracketer", for the sake of this function, is one end of a matching +// open/close set of tokens that establish a bracketing context. +func (p *parser) oppositeBracket(ty TokenType) TokenType { + switch ty { + + case TokenOBrace: + return TokenCBrace + case TokenOBrack: + return TokenCBrack + case TokenOParen: + return TokenCParen + case TokenOQuote: + return TokenCQuote + case TokenOHeredoc: + return TokenCHeredoc + + case TokenCBrace: + return TokenOBrace + case TokenCBrack: + return TokenOBrack + case TokenCParen: + return TokenOParen + case TokenCQuote: + return TokenOQuote + case TokenCHeredoc: + return TokenOHeredoc + + case TokenTemplateControl: + return TokenTemplateSeqEnd + case TokenTemplateInterp: + return TokenTemplateSeqEnd + case TokenTemplateSeqEnd: + // This is ambigous, but we return Interp here because that's + // what's assumed by the "recover" method. + return TokenTemplateInterp + + default: + return TokenNil + } +} + +func errPlaceholderExpr(rng hcl.Range) Expression { + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: rng, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go new file mode 100644 index 00000000000..a141626fe91 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go @@ -0,0 +1,799 @@ +package hclsyntax + +import ( + "fmt" + "strings" + "unicode" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { + return p.parseTemplate(TokenEOF, false) +} + +func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) { + exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc) + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: rng, + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: rng, + }, diags +} + +func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { + parts, diags := p.parseTemplateParts(end) + if flushHeredoc { + flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec + } + tp := templateParser{ + Tokens: parts.Tokens, + SrcRange: parts.SrcRange, + } + exprs, exprsDiags := tp.parseRoot() + diags = append(diags, exprsDiags...) + + passthru := false + if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token + if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp { + passthru = true + } + } + + return exprs, passthru, parts.SrcRange, diags +} + +type templateParser struct { + Tokens []templateToken + SrcRange hcl.Range + + pos int +} + +func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) { + var exprs []Expression + var diags hcl.Diagnostics + + for { + next := p.Peek() + if _, isEnd := next.(*templateEndToken); isEnd { + break + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + exprs = append(exprs, expr) + } + + return exprs, diags +} + +func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) { + next := p.Peek() + switch tok := next.(type) { + + case *templateLiteralToken: + p.Read() // eat literal + return &LiteralValueExpr{ + Val: cty.StringVal(tok.Val), + SrcRange: tok.SrcRange, + }, nil + + case *templateInterpToken: + p.Read() // eat interp + return tok.Expr, nil + + case *templateIfToken: + return p.parseIf() + + case *templateForToken: + return p.parseFor() + + case *templateEndToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + // This is a particularly unhelpful diagnostic, so callers + // should attempt to pre-empt it and produce a more helpful + // diagnostic that is context-aware. + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + case *templateEndCtrlToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()), + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + default: + // should never happen, because above should be exhaustive + panic(fmt.Sprintf("unhandled template token type %T", next)) + } +} + +func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) { + open := p.Read() + openIf, isIf := open.(*templateIfToken) + if !isIf { + // should never happen if caller is behaving + panic("parseIf called with peeker not pointing at if token") + } + + var ifExprs, elseExprs []Expression + var diags hcl.Diagnostics + var endifRange hcl.Range + + currentExprs := &ifExprs +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The if directive at %s is missing its corresponding endif directive.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + if currentExprs == &ifExprs { + currentExprs = &elseExprs + continue Token + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: fmt.Sprintf( + "Already in the else clause for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + + case templateEndIf: + endifRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endif directive for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + *currentExprs = append(*currentExprs, expr) + } + + if len(ifExprs) == 0 { + ifExprs = append(ifExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openIf.SrcRange.Filename, + Start: openIf.SrcRange.End, + End: openIf.SrcRange.End, + }, + }) + } + if len(elseExprs) == 0 { + elseExprs = append(elseExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: endifRange.Filename, + Start: endifRange.Start, + End: endifRange.Start, + }, + }) + } + + trueExpr := &TemplateExpr{ + Parts: ifExprs, + SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()), + } + falseExpr := &TemplateExpr{ + Parts: elseExprs, + SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()), + } + + return &ConditionalExpr{ + Condition: openIf.CondExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange), + }, diags +} + +func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) { + open := p.Read() + openFor, isFor := open.(*templateForToken) + if !isFor { + // should never happen if caller is behaving + panic("parseFor called with peeker not pointing at for token") + } + + var contentExprs []Expression + var diags hcl.Diagnostics + var endforRange hcl.Range + +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The for directive at %s is missing its corresponding endfor directive.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: "An else clause is not expected for a for directive.", + Subject: &end.SrcRange, + }) + + case templateEndFor: + endforRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endfor directive corresponding to the for directive at %s.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + contentExprs = append(contentExprs, expr) + } + + if len(contentExprs) == 0 { + contentExprs = append(contentExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openFor.SrcRange.Filename, + Start: openFor.SrcRange.End, + End: openFor.SrcRange.End, + }, + }) + } + + contentExpr := &TemplateExpr{ + Parts: contentExprs, + SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()), + } + + forExpr := &ForExpr{ + KeyVar: openFor.KeyVar, + ValVar: openFor.ValVar, + + CollExpr: openFor.CollExpr, + ValExpr: contentExpr, + + SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange), + OpenRange: openFor.SrcRange, + CloseRange: endforRange, + } + + return &TemplateJoinExpr{ + Tuple: forExpr, + }, diags +} + +func (p *templateParser) Peek() templateToken { + return p.Tokens[p.pos] +} + +func (p *templateParser) Read() templateToken { + ret := p.Peek() + if _, end := ret.(*templateEndToken); !end { + p.pos++ + } + return ret +} + +// parseTemplateParts produces a flat sequence of "template tokens", which are +// either literal values (with any "trimming" already applied), interpolation +// sequences, or control flow markers. +// +// A further pass is required on the result to turn it into an AST. +func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) { + var parts []templateToken + var diags hcl.Diagnostics + + startRange := p.NextRange() + ltrimNext := false + nextCanTrimPrev := false + var endRange hcl.Range + +Token: + for { + next := p.Read() + if next.Type == end { + // all done! + endRange = next.Range + break + } + + ltrim := ltrimNext + ltrimNext = false + canTrimPrev := nextCanTrimPrev + nextCanTrimPrev = false + + switch next.Type { + case TokenStringLit, TokenQuotedLit: + str, strDiags := p.decodeStringLit(next) + diags = append(diags, strDiags...) + + if ltrim { + str = strings.TrimLeftFunc(str, unicode.IsSpace) + } + + parts = append(parts, &templateLiteralToken{ + Val: str, + SrcRange: next.Range, + }) + nextCanTrimPrev = true + + case TokenTemplateInterp: + // if the opener is ${~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + + p.PushIncludeNewlines(false) + expr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + parts = append(parts, &templateInterpToken{ + Expr: expr, + SrcRange: hcl.RangeBetween(next.Range, close.Range), + }) + + case TokenTemplateControl: + // if the opener is %{~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + p.PushIncludeNewlines(false) + + kw := p.Peek() + if kw.Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template directive", + Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.", + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat keyword token + + switch { + + case ifKeyword.TokenMatches(kw): + condExpr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + parts = append(parts, &templateIfToken{ + CondExpr: condExpr, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case elseKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateElse, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endifKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndIf, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case forKeyword.TokenMatches(kw): + var keyName, valName string + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires 'in' keyword after names.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + parts = append(parts, &templateForToken{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endforKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndFor, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + default: + if !p.recovery { + suggestions := []string{"if", "for", "else", "endif", "endfor"} + given := string(kw.Bytes) + suggestion := nameSuggestion(given, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template control keyword", + Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion), + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + + } + + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes), + Detail: "Expected a closing brace to end the sequence, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated template string", + Detail: "No closing marker was found for the string.", + Subject: &next.Range, + Context: hcl.RangeBetween(startRange, next.Range).Ptr(), + }) + } + final := p.recover(end) + endRange = final.Range + break Token + } + } + + if len(parts) == 0 { + // If a sequence has no content, we'll treat it as if it had an + // empty string in it because that's what the user probably means + // if they write "" in configuration. + parts = append(parts, &templateLiteralToken{ + Val: "", + SrcRange: hcl.Range{ + // Range is the zero-character span immediately after the + // opening quote. + Filename: startRange.Filename, + Start: startRange.End, + End: startRange.End, + }, + }) + } + + // Always end with an end token, so the parser can produce diagnostics + // about unclosed items with proper position information. + parts = append(parts, &templateEndToken{ + SrcRange: endRange, + }) + + ret := &templateParts{ + Tokens: parts, + SrcRange: hcl.RangeBetween(startRange, endRange), + } + + return ret, diags +} + +// flushHeredocTemplateParts modifies in-place the line-leading literal strings +// to apply the flush heredoc processing rule: find the line with the smallest +// number of whitespace characters as prefix and then trim that number of +// characters from all of the lines. +// +// This rule is applied to static tokens rather than to the rendered result, +// so interpolating a string with leading whitespace cannot affect the chosen +// prefix length. +func flushHeredocTemplateParts(parts *templateParts) { + if len(parts.Tokens) == 0 { + // Nothing to do + return + } + + const maxInt = int((^uint(0)) >> 1) + + minSpaces := maxInt + newline := true + var adjust []*templateLiteralToken + for _, ttok := range parts.Tokens { + if newline { + newline = false + var spaces int + if lit, ok := ttok.(*templateLiteralToken); ok { + orig := lit.Val + trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace) + // If a token is entirely spaces and ends with a newline + // then it's a "blank line" and thus not considered for + // space-prefix-counting purposes. + if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") { + spaces = maxInt + } else { + spaceBytes := len(lit.Val) - len(trimmed) + spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters) + adjust = append(adjust, lit) + } + } else if _, ok := ttok.(*templateEndToken); ok { + break // don't process the end token since it never has spaces before it + } + if spaces < minSpaces { + minSpaces = spaces + } + } + if lit, ok := ttok.(*templateLiteralToken); ok { + if strings.HasSuffix(lit.Val, "\n") { + newline = true // The following token, if any, begins a new line + } + } + } + + for _, lit := range adjust { + // Since we want to count space _characters_ rather than space _bytes_, + // we can't just do a straightforward slice operation here and instead + // need to hunt for the split point with a scanner. + valBytes := []byte(lit.Val) + spaceByteCount := 0 + for i := 0; i < minSpaces; i++ { + adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true) + spaceByteCount += adv + valBytes = valBytes[adv:] + } + lit.Val = lit.Val[spaceByteCount:] + lit.SrcRange.Start.Column += minSpaces + lit.SrcRange.Start.Byte += spaceByteCount + } +} + +type templateParts struct { + Tokens []templateToken + SrcRange hcl.Range +} + +// templateToken is a higher-level token that represents a single atom within +// the template language. Our template parsing first raises the raw token +// stream to a sequence of templateToken, and then transforms the result into +// an expression tree. +type templateToken interface { + templateToken() templateToken +} + +type templateLiteralToken struct { + Val string + SrcRange hcl.Range + isTemplateToken +} + +type templateInterpToken struct { + Expr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateIfToken struct { + CondExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateForToken struct { + KeyVar string // empty if ignoring key + ValVar string + CollExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateEndCtrlType int + +const ( + templateEndIf templateEndCtrlType = iota + templateElse + templateEndFor +) + +type templateEndCtrlToken struct { + Type templateEndCtrlType + SrcRange hcl.Range + isTemplateToken +} + +func (t *templateEndCtrlToken) Name() string { + switch t.Type { + case templateEndIf: + return "endif" + case templateElse: + return "else" + case templateEndFor: + return "endfor" + default: + // should never happen + panic("invalid templateEndCtrlType") + } +} + +type templateEndToken struct { + SrcRange hcl.Range + isTemplateToken +} + +type isTemplateToken [0]int + +func (t isTemplateToken) templateToken() templateToken { + return t +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go new file mode 100644 index 00000000000..2ff3ed6c1a1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go @@ -0,0 +1,159 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// ParseTraversalAbs parses an absolute traversal that is assumed to consume +// all of the remaining tokens in the peeker. The usual parser recovery +// behavior is not supported here because traversals are not expected to +// be parsed as part of a larger program. +func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) { + var ret hcl.Traversal + var diags hcl.Diagnostics + + // Absolute traversal must always begin with a variable name + varTok := p.Read() + if varTok.Type != TokenIdent { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable name required", + Detail: "Must begin with a variable name.", + Subject: &varTok.Range, + }) + return ret, diags + } + + varName := string(varTok.Bytes) + ret = append(ret, hcl.TraverseRoot{ + Name: varName, + SrcRange: varTok.Range, + }) + + for { + next := p.Peek() + + if next.Type == TokenEOF { + return ret, diags + } + + switch next.Type { + case TokenDot: + // Attribute access + dot := p.Read() // eat dot + nameTok := p.Read() + if nameTok.Type != TokenIdent { + if nameTok.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions (.*) may not be used here.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Dot must be followed by attribute name.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } + return ret, diags + } + + attrName := string(nameTok.Bytes) + ret = append(ret, hcl.TraverseAttr{ + Name: attrName, + SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range), + }) + case TokenOBrack: + // Index + open := p.Read() // eat open bracket + next := p.Peek() + + switch next.Type { + case TokenNumberLit: + tok := p.Read() // eat number + numVal, numDiags := p.numberLitValue(tok) + diags = append(diags, numDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + case TokenOQuote: + str, _, strDiags := p.parseQuotedStringLiteral() + diags = append(diags, strDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: cty.StringVal(str), + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + default: + if next.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions ([*]) may not be used here.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Index value required", + Detail: "Index brackets must contain either a literal number or a literal string.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } + return ret, diags + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "Expected an attribute access or an index operator.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + return ret, diags + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go new file mode 100644 index 00000000000..5a4b50e2fc5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go @@ -0,0 +1,212 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// This is set to true at init() time in tests, to enable more useful output +// if a stack discipline error is detected. It should not be enabled in +// normal mode since there is a performance penalty from accessing the +// runtime stack to produce the traces, but could be temporarily set to +// true for debugging if desired. +var tracePeekerNewlinesStack = false + +type peeker struct { + Tokens Tokens + NextIndex int + + IncludeComments bool + IncludeNewlinesStack []bool + + // used only when tracePeekerNewlinesStack is set + newlineStackChanges []peekerNewlineStackChange +} + +// for use in debugging the stack usage only +type peekerNewlineStackChange struct { + Pushing bool // if false, then popping + Frame runtime.Frame + Include bool +} + +func newPeeker(tokens Tokens, includeComments bool) *peeker { + return &peeker{ + Tokens: tokens, + IncludeComments: includeComments, + + IncludeNewlinesStack: []bool{true}, + } +} + +func (p *peeker) Peek() Token { + ret, _ := p.nextToken() + return ret +} + +func (p *peeker) Read() Token { + ret, nextIdx := p.nextToken() + p.NextIndex = nextIdx + return ret +} + +func (p *peeker) NextRange() hcl.Range { + return p.Peek().Range +} + +func (p *peeker) PrevRange() hcl.Range { + if p.NextIndex == 0 { + return p.NextRange() + } + + return p.Tokens[p.NextIndex-1].Range +} + +func (p *peeker) nextToken() (Token, int) { + for i := p.NextIndex; i < len(p.Tokens); i++ { + tok := p.Tokens[i] + switch tok.Type { + case TokenComment: + if !p.IncludeComments { + // Single-line comment tokens, starting with # or //, absorb + // the trailing newline that terminates them as part of their + // bytes. When we're filtering out comments, we must as a + // special case transform these to newline tokens in order + // to properly parse newline-terminated block items. + + if p.includingNewlines() { + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + fakeNewline := Token{ + Type: TokenNewline, + Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)], + + // We use the whole token range as the newline + // range, even though that's a little... weird, + // because otherwise we'd need to go count + // characters again in order to figure out the + // column of the newline, and that complexity + // isn't justified when ranges of newlines are + // so rarely printed anyway. + Range: tok.Range, + } + return fakeNewline, i + 1 + } + } + + continue + } + case TokenNewline: + if !p.includingNewlines() { + continue + } + } + + return tok, i + 1 + } + + // if we fall out here then we'll return the EOF token, and leave + // our index pointed off the end of the array so we'll keep + // returning EOF in future too. + return p.Tokens[len(p.Tokens)-1], len(p.Tokens) +} + +func (p *peeker) includingNewlines() bool { + return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1] +} + +func (p *peeker) PushIncludeNewlines(include bool) { + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + true, frame, include, + }) + } + + p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include) +} + +func (p *peeker) PopIncludeNewlines() bool { + stack := p.IncludeNewlinesStack + remain, ret := stack[:len(stack)-1], stack[len(stack)-1] + p.IncludeNewlinesStack = remain + + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + false, frame, ret, + }) + } + + return ret +} + +// AssertEmptyNewlinesStack checks if the IncludeNewlinesStack is empty, doing +// panicking if it is not. This can be used to catch stack mismanagement that +// might otherwise just cause confusing downstream errors. +// +// This function is a no-op if the stack is empty when called. +// +// If newlines stack tracing is enabled by setting the global variable +// tracePeekerNewlinesStack at init time, a full log of all of the push/pop +// calls will be produced to help identify which caller in the parser is +// misbehaving. +func (p *peeker) AssertEmptyIncludeNewlinesStack() { + if len(p.IncludeNewlinesStack) != 1 { + // Should never happen; indicates mismanagement of the stack inside + // the parser. + if p.newlineStackChanges != nil { // only if traceNewlinesStack is enabled above + panic(fmt.Errorf( + "non-empty IncludeNewlinesStack after parse with %d calls unaccounted for:\n%s", + len(p.IncludeNewlinesStack)-1, + formatPeekerNewlineStackChanges(p.newlineStackChanges), + )) + } else { + panic(fmt.Errorf("non-empty IncludeNewlinesStack after parse: %#v", p.IncludeNewlinesStack)) + } + } +} + +func formatPeekerNewlineStackChanges(changes []peekerNewlineStackChange) string { + indent := 0 + var buf bytes.Buffer + for _, change := range changes { + funcName := change.Frame.Function + if idx := strings.LastIndexByte(funcName, '.'); idx != -1 { + funcName = funcName[idx+1:] + } + filename := change.Frame.File + if idx := strings.LastIndexByte(filename, filepath.Separator); idx != -1 { + filename = filename[idx+1:] + } + + switch change.Pushing { + + case true: + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "PUSH %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + indent++ + + case false: + indent-- + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "POP %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + + } + } + return buf.String() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go new file mode 100644 index 00000000000..cf0ee297695 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go @@ -0,0 +1,171 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ParseConfig parses the given buffer as a whole HCL config file, returning +// a *hcl.File representing its contents. If HasErrors called on the returned +// diagnostics returns true, the returned body is likely to be incomplete +// and should therefore be used with care. +// +// The body in the returned file has dynamic type *hclsyntax.Body, so callers +// may freely type-assert this to get access to the full hclsyntax API in +// situations where detailed access is required. However, most common use-cases +// should be served using the hcl.Body interface to ensure compatibility with +// other configurationg syntaxes, such as JSON. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) { + tokens, diags := LexConfig(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + body, parseDiags := parser.ParseBody(TokenEOF) + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return &hcl.File{ + Body: body, + Bytes: src, + + Nav: navigation{ + root: body, + }, + }, diags +} + +// ParseExpression parses the given buffer as a standalone HCL expression, +// returning it as an instance of Expression. +func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare expressions are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseExpression() + diags = append(diags, parseDiags...) + + next := parser.Peek() + if next.Type != TokenEOF && !parser.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after expression", + Detail: "An expression was successfully parsed, but extra characters were found after it.", + Subject: &next.Range, + }) + } + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTemplate parses the given buffer as a standalone HCL template, +// returning it as an instance of Expression. +func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexTemplate(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + expr, parseDiags := parser.ParseTemplate() + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTraversalAbs parses the given buffer as a standalone absolute traversal. +// +// Parsing as a traversal is more limited than parsing as an expession since +// it allows only attribute and indexing operations on variables. Traverals +// are useful as a syntax for referring to objects without necessarily +// evaluating them. +func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare traverals are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseTraversalAbs() + diags = append(diags, parseDiags...) + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// LexConfig performs lexical analysis on the given buffer, treating it as a +// whole HCL config file, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexExpression performs lexical analysis on the given buffer, treating it as +// a standalone HCL expression, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + // This is actually just the same thing as LexConfig, since configs + // and expressions lex in the same way. + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexTemplate performs lexical analysis on the given buffer, treating it as a +// standalone HCL template, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanTemplate) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// ValidIdentifier tests if the given string could be a valid identifier in +// a native syntax expression. +// +// This is useful when accepting names from the user that will be used as +// variable or attribute names in the scope, to ensure that any name chosen +// will be traversable using the variable or attribute traversal syntax. +func ValidIdentifier(s string) bool { + // This is a kinda-expensive way to do something pretty simple, but it + // is easiest to do with our existing scanner-related infrastructure here + // and nobody should be validating identifiers in a tight loop. + tokens := scanTokens([]byte(s), "", hcl.Pos{}, scanIdentOnly) + return len(tokens) == 2 && tokens[0].Type == TokenIdent && tokens[1].Type == TokenEOF +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go new file mode 100644 index 00000000000..2895ade7582 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go @@ -0,0 +1,301 @@ +//line scan_string_lit.rl:1 + +package hclsyntax + +// This file is generated from scan_string_lit.rl. DO NOT EDIT. + +//line scan_string_lit.go:9 +var _hclstrtok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 2, 1, 0, +} + +var _hclstrtok_key_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 10, 14, 18, + 22, 27, 31, 36, 41, 46, 51, 57, + 62, 74, 85, 96, 107, 118, 129, 140, + 151, +} + +var _hclstrtok_trans_keys []byte = []byte{ + 128, 191, 128, 191, 128, 191, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 123, 10, 13, 36, 37, 10, + 13, 36, 37, 92, 10, 13, 36, 37, + 92, 10, 13, 36, 37, 92, 10, 13, + 36, 37, 92, 10, 13, 36, 37, 92, + 123, 10, 13, 36, 37, 92, 85, 117, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 36, 37, 92, 48, + 57, 65, 70, 97, 102, 10, 13, 36, + 37, 92, 48, 57, 65, 70, 97, 102, + 10, 13, 36, 37, 92, 48, 57, 65, + 70, 97, 102, 10, 13, 36, 37, 92, + 48, 57, 65, 70, 97, 102, 10, 13, + 36, 37, 92, 48, 57, 65, 70, 97, + 102, 10, 13, 36, 37, 92, 48, 57, + 65, 70, 97, 102, 10, 13, 36, 37, + 92, 48, 57, 65, 70, 97, 102, 10, + 13, 36, 37, 92, 48, 57, 65, 70, + 97, 102, +} + +var _hclstrtok_single_lengths []byte = []byte{ + 0, 0, 0, 0, 4, 4, 4, 4, + 5, 4, 5, 5, 5, 5, 6, 5, + 2, 5, 5, 5, 5, 5, 5, 5, + 5, +} + +var _hclstrtok_range_lengths []byte = []byte{ + 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 5, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +var _hclstrtok_index_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 11, 16, 21, + 26, 32, 37, 43, 49, 55, 61, 68, + 74, 82, 91, 100, 109, 118, 127, 136, + 145, +} + +var _hclstrtok_indicies []byte = []byte{ + 0, 1, 2, 1, 3, 1, 5, 6, + 7, 8, 4, 10, 11, 12, 13, 9, + 14, 11, 12, 13, 9, 10, 11, 15, + 13, 9, 10, 11, 12, 13, 14, 9, + 10, 11, 12, 15, 9, 17, 18, 19, + 20, 21, 16, 23, 24, 25, 26, 27, + 22, 0, 24, 25, 26, 27, 22, 23, + 24, 28, 26, 27, 22, 23, 24, 25, + 26, 27, 0, 22, 23, 24, 25, 28, + 27, 22, 29, 30, 22, 2, 3, 31, + 22, 0, 23, 24, 25, 26, 27, 32, + 32, 32, 22, 23, 24, 25, 26, 27, + 33, 33, 33, 22, 23, 24, 25, 26, + 27, 34, 34, 34, 22, 23, 24, 25, + 26, 27, 30, 30, 30, 22, 23, 24, + 25, 26, 27, 35, 35, 35, 22, 23, + 24, 25, 26, 27, 36, 36, 36, 22, + 23, 24, 25, 26, 27, 37, 37, 37, + 22, 23, 24, 25, 26, 27, 0, 0, + 0, 22, +} + +var _hclstrtok_trans_targs []byte = []byte{ + 11, 0, 1, 2, 4, 5, 6, 7, + 9, 4, 5, 6, 7, 9, 5, 8, + 10, 11, 12, 13, 15, 16, 10, 11, + 12, 13, 15, 16, 14, 17, 21, 3, + 18, 19, 20, 22, 23, 24, +} + +var _hclstrtok_trans_actions []byte = []byte{ + 0, 0, 0, 0, 0, 1, 1, 1, + 1, 3, 5, 5, 5, 5, 0, 0, + 0, 1, 1, 1, 1, 1, 3, 5, + 5, 5, 5, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hclstrtok_eof_actions []byte = []byte{ + 0, 0, 0, 0, 0, 3, 3, 3, + 3, 3, 0, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +const hclstrtok_start int = 4 +const hclstrtok_first_final int = 4 +const hclstrtok_error int = 0 + +const hclstrtok_en_quoted int = 10 +const hclstrtok_en_unquoted int = 4 + +//line scan_string_lit.rl:10 + +func scanStringLit(data []byte, quoted bool) [][]byte { + var ret [][]byte + +//line scan_string_lit.rl:61 + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + eof := pe + + var cs int // current state + switch { + case quoted: + cs = hclstrtok_en_quoted + default: + cs = hclstrtok_en_unquoted + } + + // Make Go compiler happy + _ = ts + _ = eof + + /*token := func () { + ret = append(ret, data[ts:te]) + }*/ + +//line scan_string_lit.go:154 + { + } + +//line scan_string_lit.go:158 + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _keys = int(_hclstrtok_key_offsets[cs]) + _trans = int(_hclstrtok_index_offsets[cs]) + + _klen = int(_hclstrtok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hclstrtok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hclstrtok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hclstrtok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hclstrtok_indicies[_trans]) + cs = int(_hclstrtok_trans_targs[_trans]) + + if _hclstrtok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hclstrtok_trans_actions[_trans]) + _nacts = uint(_hclstrtok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hclstrtok_actions[_acts-1] { + case 0: +//line scan_string_lit.rl:40 + + // If te is behind p then we've skipped over some literal + // characters which we must now return. + if te < p { + ret = append(ret, data[te:p]) + } + ts = p + + case 1: +//line scan_string_lit.rl:48 + + te = p + ret = append(ret, data[ts:te]) + +//line scan_string_lit.go:253 + } + } + + _again: + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + __acts := _hclstrtok_eof_actions[cs] + __nacts := uint(_hclstrtok_actions[__acts]) + __acts++ + for ; __nacts > 0; __nacts-- { + __acts++ + switch _hclstrtok_actions[__acts-1] { + case 1: +//line scan_string_lit.rl:48 + + te = p + ret = append(ret, data[ts:te]) + +//line scan_string_lit.go:278 + } + } + } + + _out: + { + } + } + +//line scan_string_lit.rl:89 + + if te < p { + // Collect any leftover literal characters at the end of the input + ret = append(ret, data[te:p]) + } + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // tolerate it and let the caller deal with it. + if cs < hclstrtok_first_final { + ret = append(ret, data[p:len(data)]) + } + + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go new file mode 100644 index 00000000000..581e35e00a9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go @@ -0,0 +1,5265 @@ +//line scan_tokens.rl:1 + +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. + +//line scan_tokens.go:15 +var _hcltok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 3, 1, 4, + 1, 7, 1, 8, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 23, 1, 24, + 1, 25, 1, 26, 1, 27, 1, 28, + 1, 29, 1, 30, 1, 31, 1, 32, + 1, 35, 1, 36, 1, 37, 1, 38, + 1, 39, 1, 40, 1, 41, 1, 42, + 1, 43, 1, 44, 1, 47, 1, 48, + 1, 49, 1, 50, 1, 51, 1, 52, + 1, 53, 1, 56, 1, 57, 1, 58, + 1, 59, 1, 60, 1, 61, 1, 62, + 1, 63, 1, 64, 1, 65, 1, 66, + 1, 67, 1, 68, 1, 69, 1, 70, + 1, 71, 1, 72, 1, 73, 1, 74, + 1, 75, 1, 76, 1, 77, 1, 78, + 1, 79, 1, 80, 1, 81, 1, 82, + 1, 83, 1, 84, 1, 85, 2, 0, + 14, 2, 0, 25, 2, 0, 29, 2, + 0, 37, 2, 0, 41, 2, 1, 2, + 2, 4, 5, 2, 4, 6, 2, 4, + 21, 2, 4, 22, 2, 4, 33, 2, + 4, 34, 2, 4, 45, 2, 4, 46, + 2, 4, 54, 2, 4, 55, +} + +var _hcltok_key_offsets []int16 = []int16{ + 0, 0, 1, 2, 4, 9, 13, 15, + 57, 98, 144, 145, 149, 155, 155, 157, + 159, 168, 174, 181, 182, 185, 186, 190, + 195, 204, 208, 212, 220, 222, 224, 226, + 229, 261, 263, 265, 269, 273, 276, 287, + 300, 319, 332, 348, 360, 376, 391, 412, + 422, 434, 445, 459, 474, 484, 496, 505, + 517, 519, 523, 544, 553, 563, 569, 575, + 576, 625, 627, 631, 633, 639, 646, 654, + 661, 664, 670, 674, 678, 680, 684, 688, + 692, 698, 706, 714, 720, 722, 726, 728, + 734, 738, 742, 746, 750, 755, 762, 768, + 770, 772, 776, 778, 784, 788, 792, 802, + 807, 821, 836, 838, 846, 848, 853, 867, + 872, 874, 878, 879, 883, 889, 895, 905, + 915, 926, 934, 937, 940, 944, 948, 950, + 953, 953, 956, 958, 988, 990, 992, 996, + 1001, 1005, 1010, 1012, 1014, 1016, 1025, 1029, + 1033, 1039, 1041, 1049, 1057, 1069, 1072, 1078, + 1082, 1084, 1088, 1108, 1110, 1112, 1123, 1129, + 1131, 1133, 1135, 1139, 1145, 1151, 1153, 1158, + 1162, 1164, 1172, 1190, 1230, 1240, 1244, 1246, + 1248, 1249, 1253, 1257, 1261, 1265, 1269, 1274, + 1278, 1282, 1286, 1288, 1290, 1294, 1304, 1308, + 1310, 1314, 1318, 1322, 1335, 1337, 1339, 1343, + 1345, 1349, 1351, 1353, 1383, 1387, 1391, 1395, + 1398, 1405, 1410, 1421, 1425, 1441, 1455, 1459, + 1464, 1468, 1472, 1478, 1480, 1486, 1488, 1492, + 1494, 1500, 1505, 1510, 1520, 1522, 1524, 1528, + 1532, 1534, 1547, 1549, 1553, 1557, 1565, 1567, + 1571, 1573, 1574, 1577, 1582, 1584, 1586, 1590, + 1592, 1596, 1602, 1622, 1628, 1634, 1636, 1637, + 1647, 1648, 1656, 1663, 1665, 1668, 1670, 1672, + 1674, 1679, 1683, 1687, 1692, 1702, 1712, 1716, + 1720, 1734, 1760, 1770, 1772, 1774, 1777, 1779, + 1782, 1784, 1788, 1790, 1791, 1795, 1797, 1800, + 1807, 1815, 1817, 1819, 1823, 1825, 1831, 1842, + 1845, 1847, 1851, 1856, 1886, 1891, 1893, 1896, + 1901, 1915, 1922, 1936, 1941, 1954, 1958, 1971, + 1976, 1994, 1995, 2004, 2008, 2020, 2025, 2032, + 2039, 2046, 2048, 2052, 2074, 2079, 2080, 2084, + 2086, 2136, 2139, 2150, 2154, 2156, 2162, 2168, + 2170, 2175, 2177, 2181, 2183, 2184, 2186, 2188, + 2194, 2196, 2198, 2202, 2208, 2221, 2223, 2229, + 2233, 2241, 2252, 2260, 2263, 2293, 2299, 2302, + 2307, 2309, 2313, 2317, 2321, 2323, 2330, 2332, + 2341, 2348, 2356, 2358, 2378, 2390, 2394, 2396, + 2414, 2453, 2455, 2459, 2461, 2468, 2472, 2500, + 2502, 2504, 2506, 2508, 2511, 2513, 2517, 2521, + 2523, 2526, 2528, 2530, 2533, 2535, 2537, 2538, + 2540, 2542, 2546, 2550, 2553, 2566, 2568, 2574, + 2578, 2580, 2584, 2588, 2602, 2605, 2614, 2616, + 2620, 2626, 2626, 2628, 2630, 2639, 2645, 2652, + 2653, 2656, 2657, 2661, 2666, 2675, 2679, 2683, + 2691, 2693, 2695, 2697, 2700, 2732, 2734, 2736, + 2740, 2744, 2747, 2758, 2771, 2790, 2803, 2819, + 2831, 2847, 2862, 2883, 2893, 2905, 2916, 2930, + 2945, 2955, 2967, 2976, 2988, 2990, 2994, 3015, + 3024, 3034, 3040, 3046, 3047, 3096, 3098, 3102, + 3104, 3110, 3117, 3125, 3132, 3135, 3141, 3145, + 3149, 3151, 3155, 3159, 3163, 3169, 3177, 3185, + 3191, 3193, 3197, 3199, 3205, 3209, 3213, 3217, + 3221, 3226, 3233, 3239, 3241, 3243, 3247, 3249, + 3255, 3259, 3263, 3273, 3278, 3292, 3307, 3309, + 3317, 3319, 3324, 3338, 3343, 3345, 3349, 3350, + 3354, 3360, 3366, 3376, 3386, 3397, 3405, 3408, + 3411, 3415, 3419, 3421, 3424, 3424, 3427, 3429, + 3459, 3461, 3463, 3467, 3472, 3476, 3481, 3483, + 3485, 3487, 3496, 3500, 3504, 3510, 3512, 3520, + 3528, 3540, 3543, 3549, 3553, 3555, 3559, 3579, + 3581, 3583, 3594, 3600, 3602, 3604, 3606, 3610, + 3616, 3622, 3624, 3629, 3633, 3635, 3643, 3661, + 3701, 3711, 3715, 3717, 3719, 3720, 3724, 3728, + 3732, 3736, 3740, 3745, 3749, 3753, 3757, 3759, + 3761, 3765, 3775, 3779, 3781, 3785, 3789, 3793, + 3806, 3808, 3810, 3814, 3816, 3820, 3822, 3824, + 3854, 3858, 3862, 3866, 3869, 3876, 3881, 3892, + 3896, 3912, 3926, 3930, 3935, 3939, 3943, 3949, + 3951, 3957, 3959, 3963, 3965, 3971, 3976, 3981, + 3991, 3993, 3995, 3999, 4003, 4005, 4018, 4020, + 4024, 4028, 4036, 4038, 4042, 4044, 4045, 4048, + 4053, 4055, 4057, 4061, 4063, 4067, 4073, 4093, + 4099, 4105, 4107, 4108, 4118, 4119, 4127, 4134, + 4136, 4139, 4141, 4143, 4145, 4150, 4154, 4158, + 4163, 4173, 4183, 4187, 4191, 4205, 4231, 4241, + 4243, 4245, 4248, 4250, 4253, 4255, 4259, 4261, + 4262, 4266, 4268, 4270, 4277, 4281, 4288, 4295, + 4304, 4320, 4332, 4350, 4361, 4373, 4381, 4399, + 4407, 4437, 4440, 4450, 4460, 4472, 4483, 4492, + 4505, 4517, 4521, 4527, 4554, 4563, 4566, 4571, + 4577, 4582, 4603, 4607, 4613, 4613, 4620, 4629, + 4637, 4640, 4644, 4650, 4656, 4659, 4663, 4670, + 4676, 4685, 4694, 4698, 4702, 4706, 4710, 4717, + 4721, 4725, 4735, 4741, 4745, 4751, 4755, 4758, + 4764, 4770, 4782, 4786, 4790, 4800, 4804, 4815, + 4817, 4819, 4823, 4835, 4840, 4864, 4868, 4874, + 4896, 4905, 4909, 4912, 4913, 4921, 4929, 4935, + 4945, 4952, 4970, 4973, 4976, 4984, 4990, 4994, + 4998, 5002, 5008, 5016, 5021, 5027, 5031, 5039, + 5046, 5050, 5057, 5063, 5071, 5079, 5085, 5091, + 5102, 5106, 5118, 5127, 5144, 5161, 5164, 5168, + 5170, 5176, 5178, 5182, 5197, 5201, 5205, 5209, + 5213, 5217, 5219, 5225, 5230, 5234, 5240, 5247, + 5250, 5268, 5270, 5315, 5321, 5327, 5331, 5335, + 5341, 5345, 5351, 5357, 5364, 5366, 5372, 5378, + 5382, 5386, 5394, 5407, 5413, 5420, 5428, 5434, + 5443, 5449, 5453, 5458, 5462, 5470, 5474, 5478, + 5508, 5514, 5520, 5526, 5532, 5539, 5545, 5552, + 5557, 5567, 5571, 5578, 5584, 5588, 5595, 5599, + 5605, 5608, 5612, 5616, 5620, 5624, 5629, 5634, + 5638, 5649, 5653, 5657, 5663, 5671, 5675, 5692, + 5696, 5702, 5712, 5718, 5724, 5727, 5732, 5741, + 5745, 5749, 5755, 5759, 5765, 5773, 5791, 5792, + 5802, 5803, 5812, 5820, 5822, 5825, 5827, 5829, + 5831, 5836, 5849, 5853, 5868, 5897, 5908, 5910, + 5914, 5918, 5923, 5927, 5929, 5936, 5940, 5948, + 5952, 5964, 5966, 5968, 5970, 5972, 5974, 5975, + 5977, 5979, 5981, 5983, 5985, 5986, 5988, 5990, + 5992, 5994, 5996, 6000, 6006, 6006, 6008, 6010, + 6019, 6025, 6032, 6033, 6036, 6037, 6041, 6046, + 6055, 6059, 6063, 6071, 6073, 6075, 6077, 6080, + 6112, 6114, 6116, 6120, 6124, 6127, 6138, 6151, + 6170, 6183, 6199, 6211, 6227, 6242, 6263, 6273, + 6285, 6296, 6310, 6325, 6335, 6347, 6356, 6368, + 6370, 6374, 6395, 6404, 6414, 6420, 6426, 6427, + 6476, 6478, 6482, 6484, 6490, 6497, 6505, 6512, + 6515, 6521, 6525, 6529, 6531, 6535, 6539, 6543, + 6549, 6557, 6565, 6571, 6573, 6577, 6579, 6585, + 6589, 6593, 6597, 6601, 6606, 6613, 6619, 6621, + 6623, 6627, 6629, 6635, 6639, 6643, 6653, 6658, + 6672, 6687, 6689, 6697, 6699, 6704, 6718, 6723, + 6725, 6729, 6730, 6734, 6740, 6746, 6756, 6766, + 6777, 6785, 6788, 6791, 6795, 6799, 6801, 6804, + 6804, 6807, 6809, 6839, 6841, 6843, 6847, 6852, + 6856, 6861, 6863, 6865, 6867, 6876, 6880, 6884, + 6890, 6892, 6900, 6908, 6920, 6923, 6929, 6933, + 6935, 6939, 6959, 6961, 6963, 6974, 6980, 6982, + 6984, 6986, 6990, 6996, 7002, 7004, 7009, 7013, + 7015, 7023, 7041, 7081, 7091, 7095, 7097, 7099, + 7100, 7104, 7108, 7112, 7116, 7120, 7125, 7129, + 7133, 7137, 7139, 7141, 7145, 7155, 7159, 7161, + 7165, 7169, 7173, 7186, 7188, 7190, 7194, 7196, + 7200, 7202, 7204, 7234, 7238, 7242, 7246, 7249, + 7256, 7261, 7272, 7276, 7292, 7306, 7310, 7315, + 7319, 7323, 7329, 7331, 7337, 7339, 7343, 7345, + 7351, 7356, 7361, 7371, 7373, 7375, 7379, 7383, + 7385, 7398, 7400, 7404, 7408, 7416, 7418, 7422, + 7424, 7425, 7428, 7433, 7435, 7437, 7441, 7443, + 7447, 7453, 7473, 7479, 7485, 7487, 7488, 7498, + 7499, 7507, 7514, 7516, 7519, 7521, 7523, 7525, + 7530, 7534, 7538, 7543, 7553, 7563, 7567, 7571, + 7585, 7611, 7621, 7623, 7625, 7628, 7630, 7633, + 7635, 7639, 7641, 7642, 7646, 7648, 7650, 7657, + 7661, 7668, 7675, 7684, 7700, 7712, 7730, 7741, + 7753, 7761, 7779, 7787, 7817, 7820, 7830, 7840, + 7852, 7863, 7872, 7885, 7897, 7901, 7907, 7934, + 7943, 7946, 7951, 7957, 7962, 7983, 7987, 7993, + 7993, 8000, 8009, 8017, 8020, 8024, 8030, 8036, + 8039, 8043, 8050, 8056, 8065, 8074, 8078, 8082, + 8086, 8090, 8097, 8101, 8105, 8115, 8121, 8125, + 8131, 8135, 8138, 8144, 8150, 8162, 8166, 8170, + 8180, 8184, 8195, 8197, 8199, 8203, 8215, 8220, + 8244, 8248, 8254, 8276, 8285, 8289, 8292, 8293, + 8301, 8309, 8315, 8325, 8332, 8350, 8353, 8356, + 8364, 8370, 8374, 8378, 8382, 8388, 8396, 8401, + 8407, 8411, 8419, 8426, 8430, 8437, 8443, 8451, + 8459, 8465, 8471, 8482, 8486, 8498, 8507, 8524, + 8541, 8544, 8548, 8550, 8556, 8558, 8562, 8577, + 8581, 8585, 8589, 8593, 8597, 8599, 8605, 8610, + 8614, 8620, 8627, 8630, 8648, 8650, 8695, 8701, + 8707, 8711, 8715, 8721, 8725, 8731, 8737, 8744, + 8746, 8752, 8758, 8762, 8766, 8774, 8787, 8793, + 8800, 8808, 8814, 8823, 8829, 8833, 8838, 8842, + 8850, 8854, 8858, 8888, 8894, 8900, 8906, 8912, + 8919, 8925, 8932, 8937, 8947, 8951, 8958, 8964, + 8968, 8975, 8979, 8985, 8988, 8992, 8996, 9000, + 9004, 9009, 9014, 9018, 9029, 9033, 9037, 9043, + 9051, 9055, 9072, 9076, 9082, 9092, 9098, 9104, + 9107, 9112, 9121, 9125, 9129, 9135, 9139, 9145, + 9153, 9171, 9172, 9182, 9183, 9192, 9200, 9202, + 9205, 9207, 9209, 9211, 9216, 9229, 9233, 9248, + 9277, 9288, 9290, 9294, 9298, 9303, 9307, 9309, + 9316, 9320, 9328, 9332, 9407, 9409, 9410, 9411, + 9412, 9413, 9414, 9416, 9421, 9423, 9425, 9426, + 9470, 9471, 9472, 9474, 9479, 9483, 9483, 9485, + 9487, 9498, 9508, 9516, 9517, 9519, 9520, 9524, + 9528, 9538, 9542, 9549, 9560, 9567, 9571, 9577, + 9588, 9620, 9669, 9684, 9699, 9704, 9706, 9711, + 9743, 9751, 9753, 9775, 9797, 9799, 9815, 9831, + 9833, 9835, 9835, 9836, 9837, 9838, 9840, 9841, + 9853, 9855, 9857, 9859, 9873, 9887, 9889, 9892, + 9895, 9897, 9898, 9899, 9901, 9903, 9905, 9919, + 9933, 9935, 9938, 9941, 9943, 9944, 9945, 9947, + 9949, 9951, 10000, 10044, 10046, 10051, 10055, 10055, + 10057, 10059, 10070, 10080, 10088, 10089, 10091, 10092, + 10096, 10100, 10110, 10114, 10121, 10132, 10139, 10143, + 10149, 10160, 10192, 10241, 10256, 10271, 10276, 10278, + 10283, 10315, 10323, 10325, 10347, 10369, +} + +var _hcltok_trans_keys []byte = []byte{ + 46, 42, 42, 47, 46, 69, 101, 48, + 57, 43, 45, 48, 57, 48, 57, 45, + 95, 194, 195, 198, 199, 203, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 239, 240, 65, + 90, 97, 122, 196, 202, 208, 218, 229, + 236, 95, 194, 195, 198, 199, 203, 205, + 206, 207, 210, 212, 213, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 233, 234, 237, 239, 240, + 65, 90, 97, 122, 196, 202, 208, 218, + 229, 236, 10, 13, 45, 95, 194, 195, + 198, 199, 203, 204, 205, 206, 207, 210, + 212, 213, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, + 233, 234, 237, 239, 240, 243, 48, 57, + 65, 90, 97, 122, 196, 218, 229, 236, + 10, 170, 181, 183, 186, 128, 150, 152, + 182, 184, 255, 192, 255, 128, 255, 173, + 130, 133, 146, 159, 165, 171, 175, 255, + 181, 190, 184, 185, 192, 255, 140, 134, + 138, 142, 161, 163, 255, 182, 130, 136, + 137, 176, 151, 152, 154, 160, 190, 136, + 144, 192, 255, 135, 129, 130, 132, 133, + 144, 170, 176, 178, 144, 154, 160, 191, + 128, 169, 174, 255, 148, 169, 157, 158, + 189, 190, 192, 255, 144, 255, 139, 140, + 178, 255, 186, 128, 181, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 128, 173, 128, + 155, 160, 180, 182, 189, 148, 161, 163, + 255, 176, 164, 165, 132, 169, 177, 141, + 142, 145, 146, 179, 181, 186, 187, 158, + 133, 134, 137, 138, 143, 150, 152, 155, + 164, 165, 178, 255, 188, 129, 131, 133, + 138, 143, 144, 147, 168, 170, 176, 178, + 179, 181, 182, 184, 185, 190, 255, 157, + 131, 134, 137, 138, 142, 144, 146, 152, + 159, 165, 182, 255, 129, 131, 133, 141, + 143, 145, 147, 168, 170, 176, 178, 179, + 181, 185, 188, 255, 134, 138, 142, 143, + 145, 159, 164, 165, 176, 184, 186, 255, + 129, 131, 133, 140, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 185, 188, 191, + 177, 128, 132, 135, 136, 139, 141, 150, + 151, 156, 157, 159, 163, 166, 175, 156, + 130, 131, 133, 138, 142, 144, 146, 149, + 153, 154, 158, 159, 163, 164, 168, 170, + 174, 185, 190, 191, 144, 151, 128, 130, + 134, 136, 138, 141, 166, 175, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 185, + 189, 255, 133, 137, 151, 142, 148, 155, + 159, 164, 165, 176, 255, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 179, 181, + 185, 188, 191, 158, 128, 132, 134, 136, + 138, 141, 149, 150, 160, 163, 166, 175, + 177, 178, 129, 131, 133, 140, 142, 144, + 146, 186, 189, 255, 133, 137, 143, 147, + 152, 158, 164, 165, 176, 185, 192, 255, + 189, 130, 131, 133, 150, 154, 177, 179, + 187, 138, 150, 128, 134, 143, 148, 152, + 159, 166, 175, 178, 179, 129, 186, 128, + 142, 144, 153, 132, 138, 141, 165, 167, + 129, 130, 135, 136, 148, 151, 153, 159, + 161, 163, 170, 171, 173, 185, 187, 189, + 134, 128, 132, 136, 141, 144, 153, 156, + 159, 128, 181, 183, 185, 152, 153, 160, + 169, 190, 191, 128, 135, 137, 172, 177, + 191, 128, 132, 134, 151, 153, 188, 134, + 128, 129, 130, 131, 137, 138, 139, 140, + 141, 142, 143, 144, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 173, 175, + 176, 177, 178, 179, 181, 182, 183, 188, + 189, 190, 191, 132, 152, 172, 184, 185, + 187, 128, 191, 128, 137, 144, 255, 158, + 159, 134, 187, 136, 140, 142, 143, 137, + 151, 153, 142, 143, 158, 159, 137, 177, + 142, 143, 182, 183, 191, 255, 128, 130, + 133, 136, 150, 152, 255, 145, 150, 151, + 155, 156, 160, 168, 178, 255, 128, 143, + 160, 255, 182, 183, 190, 255, 129, 255, + 173, 174, 192, 255, 129, 154, 160, 255, + 171, 173, 185, 255, 128, 140, 142, 148, + 160, 180, 128, 147, 160, 172, 174, 176, + 178, 179, 148, 150, 152, 155, 158, 159, + 170, 255, 139, 141, 144, 153, 160, 255, + 184, 255, 128, 170, 176, 255, 182, 255, + 128, 158, 160, 171, 176, 187, 134, 173, + 176, 180, 128, 171, 176, 255, 138, 143, + 155, 255, 128, 155, 160, 255, 159, 189, + 190, 192, 255, 167, 128, 137, 144, 153, + 176, 189, 140, 143, 154, 170, 180, 255, + 180, 255, 128, 183, 128, 137, 141, 189, + 128, 136, 144, 146, 148, 182, 184, 185, + 128, 181, 187, 191, 150, 151, 158, 159, + 152, 154, 156, 158, 134, 135, 142, 143, + 190, 255, 190, 128, 180, 182, 188, 130, + 132, 134, 140, 144, 147, 150, 155, 160, + 172, 178, 180, 182, 188, 128, 129, 130, + 131, 132, 133, 134, 176, 177, 178, 179, + 180, 181, 182, 183, 191, 255, 129, 147, + 149, 176, 178, 190, 192, 255, 144, 156, + 161, 144, 156, 165, 176, 130, 135, 149, + 164, 166, 168, 138, 147, 152, 157, 170, + 185, 188, 191, 142, 133, 137, 160, 255, + 137, 255, 128, 174, 176, 255, 159, 165, + 170, 180, 255, 167, 173, 128, 165, 176, + 255, 168, 174, 176, 190, 192, 255, 128, + 150, 160, 166, 168, 174, 176, 182, 184, + 190, 128, 134, 136, 142, 144, 150, 152, + 158, 160, 191, 128, 129, 130, 131, 132, + 133, 134, 135, 144, 145, 255, 133, 135, + 161, 175, 177, 181, 184, 188, 160, 151, + 152, 187, 192, 255, 133, 173, 177, 255, + 143, 159, 187, 255, 176, 191, 182, 183, + 184, 191, 192, 255, 150, 255, 128, 146, + 147, 148, 152, 153, 154, 155, 156, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 129, 255, 141, 255, 144, 189, + 141, 143, 172, 255, 191, 128, 175, 180, + 189, 151, 159, 162, 255, 175, 137, 138, + 184, 255, 183, 255, 168, 255, 128, 179, + 188, 134, 143, 154, 159, 184, 186, 190, + 255, 128, 173, 176, 255, 148, 159, 189, + 255, 129, 142, 154, 159, 191, 255, 128, + 182, 128, 141, 144, 153, 160, 182, 186, + 255, 128, 130, 155, 157, 160, 175, 178, + 182, 129, 134, 137, 142, 145, 150, 160, + 166, 168, 174, 176, 255, 155, 166, 175, + 128, 170, 172, 173, 176, 185, 158, 159, + 160, 255, 164, 175, 135, 138, 188, 255, + 164, 169, 171, 172, 173, 174, 175, 180, + 181, 182, 183, 184, 185, 187, 188, 189, + 190, 191, 165, 186, 174, 175, 154, 255, + 190, 128, 134, 147, 151, 157, 168, 170, + 182, 184, 188, 128, 129, 131, 132, 134, + 255, 147, 255, 190, 255, 144, 145, 136, + 175, 188, 255, 128, 143, 160, 175, 179, + 180, 141, 143, 176, 180, 182, 255, 189, + 255, 191, 144, 153, 161, 186, 129, 154, + 166, 255, 191, 255, 130, 135, 138, 143, + 146, 151, 154, 156, 144, 145, 146, 147, + 148, 150, 151, 152, 155, 157, 158, 160, + 170, 171, 172, 175, 161, 169, 128, 129, + 130, 131, 133, 135, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 153, 155, 178, 179, 128, 139, + 141, 166, 168, 186, 188, 189, 191, 255, + 142, 143, 158, 255, 187, 255, 128, 180, + 189, 128, 156, 160, 255, 145, 159, 161, + 255, 128, 159, 176, 255, 139, 143, 187, + 255, 128, 157, 160, 255, 144, 132, 135, + 150, 255, 158, 159, 170, 175, 148, 151, + 188, 255, 128, 167, 176, 255, 164, 255, + 183, 255, 128, 149, 160, 167, 136, 188, + 128, 133, 138, 181, 183, 184, 191, 255, + 150, 159, 183, 255, 128, 158, 160, 178, + 180, 181, 128, 149, 160, 185, 128, 183, + 190, 191, 191, 128, 131, 133, 134, 140, + 147, 149, 151, 153, 179, 184, 186, 160, + 188, 128, 156, 128, 135, 137, 166, 128, + 181, 128, 149, 160, 178, 128, 145, 128, + 178, 129, 130, 131, 132, 133, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 155, 156, 162, + 163, 171, 176, 177, 178, 128, 134, 135, + 165, 176, 190, 144, 168, 176, 185, 128, + 180, 182, 191, 182, 144, 179, 155, 133, + 137, 141, 143, 157, 255, 190, 128, 145, + 147, 183, 136, 128, 134, 138, 141, 143, + 157, 159, 168, 176, 255, 171, 175, 186, + 255, 128, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 144, 151, 128, 132, 135, 136, 139, + 141, 157, 163, 166, 172, 176, 180, 128, + 138, 144, 153, 134, 136, 143, 154, 255, + 128, 181, 184, 255, 129, 151, 158, 255, + 129, 131, 133, 143, 154, 255, 128, 137, + 128, 153, 157, 171, 176, 185, 160, 255, + 170, 190, 192, 255, 128, 184, 128, 136, + 138, 182, 184, 191, 128, 144, 153, 178, + 255, 168, 144, 145, 183, 255, 128, 142, + 145, 149, 129, 141, 144, 146, 147, 148, + 175, 255, 132, 255, 128, 144, 129, 143, + 144, 153, 145, 152, 135, 255, 160, 168, + 169, 171, 172, 173, 174, 188, 189, 190, + 191, 161, 167, 185, 255, 128, 158, 160, + 169, 144, 173, 176, 180, 128, 131, 144, + 153, 163, 183, 189, 255, 144, 255, 133, + 143, 191, 255, 143, 159, 160, 128, 129, + 255, 159, 160, 171, 172, 255, 173, 255, + 179, 255, 128, 176, 177, 178, 128, 129, + 171, 175, 189, 255, 128, 136, 144, 153, + 157, 158, 133, 134, 137, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 168, 169, 170, 150, 153, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 149, 157, 173, 186, + 188, 160, 161, 163, 164, 167, 168, 132, + 134, 149, 157, 186, 139, 140, 191, 255, + 134, 128, 132, 138, 144, 146, 255, 166, + 167, 129, 155, 187, 149, 181, 143, 175, + 137, 169, 131, 140, 141, 192, 255, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 160, 163, 164, + 165, 184, 185, 186, 161, 162, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 133, 143, 151, 255, 139, 143, 154, 255, + 164, 167, 185, 187, 128, 131, 133, 159, + 161, 162, 169, 178, 180, 183, 130, 135, + 137, 139, 148, 151, 153, 155, 157, 159, + 164, 190, 141, 143, 145, 146, 161, 162, + 167, 170, 172, 178, 180, 183, 185, 188, + 128, 137, 139, 155, 161, 163, 165, 169, + 171, 187, 155, 156, 151, 255, 156, 157, + 160, 181, 255, 186, 187, 255, 162, 255, + 160, 168, 161, 167, 158, 255, 160, 132, + 135, 133, 134, 176, 255, 170, 181, 186, + 191, 176, 180, 182, 183, 186, 189, 134, + 140, 136, 138, 142, 161, 163, 255, 130, + 137, 136, 255, 144, 170, 176, 178, 160, + 191, 128, 138, 174, 175, 177, 255, 148, + 150, 164, 167, 173, 176, 185, 189, 190, + 192, 255, 144, 146, 175, 141, 255, 166, + 176, 178, 255, 186, 138, 170, 180, 181, + 160, 161, 162, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 184, 186, + 187, 188, 189, 190, 183, 185, 154, 164, + 168, 128, 149, 128, 152, 189, 132, 185, + 144, 152, 161, 177, 255, 169, 177, 129, + 132, 141, 142, 145, 146, 179, 181, 186, + 188, 190, 255, 142, 156, 157, 159, 161, + 176, 177, 133, 138, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 182, 184, 185, + 158, 153, 156, 178, 180, 189, 133, 141, + 143, 145, 147, 168, 170, 176, 178, 179, + 181, 185, 144, 185, 160, 161, 189, 133, + 140, 143, 144, 147, 168, 170, 176, 178, + 179, 181, 185, 177, 156, 157, 159, 161, + 131, 156, 133, 138, 142, 144, 146, 149, + 153, 154, 158, 159, 163, 164, 168, 170, + 174, 185, 144, 189, 133, 140, 142, 144, + 146, 168, 170, 185, 152, 154, 160, 161, + 128, 189, 133, 140, 142, 144, 146, 168, + 170, 179, 181, 185, 158, 160, 161, 177, + 178, 189, 133, 140, 142, 144, 146, 186, + 142, 148, 150, 159, 161, 186, 191, 189, + 133, 150, 154, 177, 179, 187, 128, 134, + 129, 176, 178, 179, 132, 138, 141, 165, + 167, 189, 129, 130, 135, 136, 148, 151, + 153, 159, 161, 163, 170, 171, 173, 176, + 178, 179, 134, 128, 132, 156, 159, 128, + 128, 135, 137, 172, 136, 140, 128, 129, + 130, 131, 137, 138, 139, 140, 141, 142, + 143, 144, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 184, + 188, 189, 190, 191, 132, 152, 185, 187, + 191, 128, 170, 161, 144, 149, 154, 157, + 165, 166, 174, 176, 181, 255, 130, 141, + 143, 159, 155, 255, 128, 140, 142, 145, + 160, 177, 128, 145, 160, 172, 174, 176, + 151, 156, 170, 128, 168, 176, 255, 138, + 255, 128, 150, 160, 255, 149, 255, 167, + 133, 179, 133, 139, 131, 160, 174, 175, + 186, 255, 166, 255, 128, 163, 141, 143, + 154, 189, 169, 172, 174, 177, 181, 182, + 129, 130, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 177, 191, 165, + 170, 175, 177, 180, 255, 168, 174, 176, + 255, 128, 134, 136, 142, 144, 150, 152, + 158, 128, 129, 130, 131, 132, 133, 134, + 135, 144, 145, 255, 133, 135, 161, 169, + 177, 181, 184, 188, 160, 151, 154, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 255, 141, 143, 160, + 169, 172, 255, 191, 128, 174, 130, 134, + 139, 163, 255, 130, 179, 187, 189, 178, + 183, 138, 165, 176, 255, 135, 159, 189, + 255, 132, 178, 143, 160, 164, 166, 175, + 186, 190, 128, 168, 186, 128, 130, 132, + 139, 160, 182, 190, 255, 176, 178, 180, + 183, 184, 190, 255, 128, 130, 155, 157, + 160, 170, 178, 180, 128, 162, 164, 169, + 171, 172, 173, 174, 175, 180, 181, 182, + 183, 185, 186, 187, 188, 189, 190, 191, + 165, 179, 157, 190, 128, 134, 147, 151, + 159, 168, 170, 182, 184, 188, 176, 180, + 182, 255, 161, 186, 144, 145, 146, 147, + 148, 150, 151, 152, 155, 157, 158, 160, + 170, 171, 172, 175, 161, 169, 128, 129, + 130, 131, 133, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 153, 155, 178, 179, 145, 255, 139, + 143, 182, 255, 158, 175, 128, 144, 147, + 149, 151, 153, 179, 128, 135, 137, 164, + 128, 130, 131, 132, 133, 134, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 162, 163, + 171, 176, 177, 178, 131, 183, 131, 175, + 144, 168, 131, 166, 182, 144, 178, 131, + 178, 154, 156, 129, 132, 128, 145, 147, + 171, 159, 255, 144, 157, 161, 135, 138, + 128, 175, 135, 132, 133, 128, 174, 152, + 155, 132, 128, 170, 128, 153, 160, 190, + 192, 255, 128, 136, 138, 174, 128, 178, + 255, 160, 168, 169, 171, 172, 173, 174, + 188, 189, 190, 191, 161, 167, 144, 173, + 128, 131, 163, 183, 189, 255, 133, 143, + 145, 255, 147, 159, 128, 176, 177, 178, + 128, 136, 144, 153, 144, 145, 146, 147, + 148, 149, 154, 155, 156, 157, 158, 159, + 150, 153, 131, 140, 255, 160, 163, 164, + 165, 184, 185, 186, 161, 162, 133, 255, + 170, 181, 183, 186, 128, 150, 152, 182, + 184, 255, 192, 255, 128, 255, 173, 130, + 133, 146, 159, 165, 171, 175, 255, 181, + 190, 184, 185, 192, 255, 140, 134, 138, + 142, 161, 163, 255, 182, 130, 136, 137, + 176, 151, 152, 154, 160, 190, 136, 144, + 192, 255, 135, 129, 130, 132, 133, 144, + 170, 176, 178, 144, 154, 160, 191, 128, + 169, 174, 255, 148, 169, 157, 158, 189, + 190, 192, 255, 144, 255, 139, 140, 178, + 255, 186, 128, 181, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 128, 173, 128, 155, + 160, 180, 182, 189, 148, 161, 163, 255, + 176, 164, 165, 132, 169, 177, 141, 142, + 145, 146, 179, 181, 186, 187, 158, 133, + 134, 137, 138, 143, 150, 152, 155, 164, + 165, 178, 255, 188, 129, 131, 133, 138, + 143, 144, 147, 168, 170, 176, 178, 179, + 181, 182, 184, 185, 190, 255, 157, 131, + 134, 137, 138, 142, 144, 146, 152, 159, + 165, 182, 255, 129, 131, 133, 141, 143, + 145, 147, 168, 170, 176, 178, 179, 181, + 185, 188, 255, 134, 138, 142, 143, 145, + 159, 164, 165, 176, 184, 186, 255, 129, + 131, 133, 140, 143, 144, 147, 168, 170, + 176, 178, 179, 181, 185, 188, 191, 177, + 128, 132, 135, 136, 139, 141, 150, 151, + 156, 157, 159, 163, 166, 175, 156, 130, + 131, 133, 138, 142, 144, 146, 149, 153, + 154, 158, 159, 163, 164, 168, 170, 174, + 185, 190, 191, 144, 151, 128, 130, 134, + 136, 138, 141, 166, 175, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 185, 189, + 255, 133, 137, 151, 142, 148, 155, 159, + 164, 165, 176, 255, 128, 131, 133, 140, + 142, 144, 146, 168, 170, 179, 181, 185, + 188, 191, 158, 128, 132, 134, 136, 138, + 141, 149, 150, 160, 163, 166, 175, 177, + 178, 129, 131, 133, 140, 142, 144, 146, + 186, 189, 255, 133, 137, 143, 147, 152, + 158, 164, 165, 176, 185, 192, 255, 189, + 130, 131, 133, 150, 154, 177, 179, 187, + 138, 150, 128, 134, 143, 148, 152, 159, + 166, 175, 178, 179, 129, 186, 128, 142, + 144, 153, 132, 138, 141, 165, 167, 129, + 130, 135, 136, 148, 151, 153, 159, 161, + 163, 170, 171, 173, 185, 187, 189, 134, + 128, 132, 136, 141, 144, 153, 156, 159, + 128, 181, 183, 185, 152, 153, 160, 169, + 190, 191, 128, 135, 137, 172, 177, 191, + 128, 132, 134, 151, 153, 188, 134, 128, + 129, 130, 131, 137, 138, 139, 140, 141, + 142, 143, 144, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 173, 175, 176, + 177, 178, 179, 181, 182, 183, 188, 189, + 190, 191, 132, 152, 172, 184, 185, 187, + 128, 191, 128, 137, 144, 255, 158, 159, + 134, 187, 136, 140, 142, 143, 137, 151, + 153, 142, 143, 158, 159, 137, 177, 142, + 143, 182, 183, 191, 255, 128, 130, 133, + 136, 150, 152, 255, 145, 150, 151, 155, + 156, 160, 168, 178, 255, 128, 143, 160, + 255, 182, 183, 190, 255, 129, 255, 173, + 174, 192, 255, 129, 154, 160, 255, 171, + 173, 185, 255, 128, 140, 142, 148, 160, + 180, 128, 147, 160, 172, 174, 176, 178, + 179, 148, 150, 152, 155, 158, 159, 170, + 255, 139, 141, 144, 153, 160, 255, 184, + 255, 128, 170, 176, 255, 182, 255, 128, + 158, 160, 171, 176, 187, 134, 173, 176, + 180, 128, 171, 176, 255, 138, 143, 155, + 255, 128, 155, 160, 255, 159, 189, 190, + 192, 255, 167, 128, 137, 144, 153, 176, + 189, 140, 143, 154, 170, 180, 255, 180, + 255, 128, 183, 128, 137, 141, 189, 128, + 136, 144, 146, 148, 182, 184, 185, 128, + 181, 187, 191, 150, 151, 158, 159, 152, + 154, 156, 158, 134, 135, 142, 143, 190, + 255, 190, 128, 180, 182, 188, 130, 132, + 134, 140, 144, 147, 150, 155, 160, 172, + 178, 180, 182, 188, 128, 129, 130, 131, + 132, 133, 134, 176, 177, 178, 179, 180, + 181, 182, 183, 191, 255, 129, 147, 149, + 176, 178, 190, 192, 255, 144, 156, 161, + 144, 156, 165, 176, 130, 135, 149, 164, + 166, 168, 138, 147, 152, 157, 170, 185, + 188, 191, 142, 133, 137, 160, 255, 137, + 255, 128, 174, 176, 255, 159, 165, 170, + 180, 255, 167, 173, 128, 165, 176, 255, + 168, 174, 176, 190, 192, 255, 128, 150, + 160, 166, 168, 174, 176, 182, 184, 190, + 128, 134, 136, 142, 144, 150, 152, 158, + 160, 191, 128, 129, 130, 131, 132, 133, + 134, 135, 144, 145, 255, 133, 135, 161, + 175, 177, 181, 184, 188, 160, 151, 152, + 187, 192, 255, 133, 173, 177, 255, 143, + 159, 187, 255, 176, 191, 182, 183, 184, + 191, 192, 255, 150, 255, 128, 146, 147, + 148, 152, 153, 154, 155, 156, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 129, 255, 141, 255, 144, 189, 141, + 143, 172, 255, 191, 128, 175, 180, 189, + 151, 159, 162, 255, 175, 137, 138, 184, + 255, 183, 255, 168, 255, 128, 179, 188, + 134, 143, 154, 159, 184, 186, 190, 255, + 128, 173, 176, 255, 148, 159, 189, 255, + 129, 142, 154, 159, 191, 255, 128, 182, + 128, 141, 144, 153, 160, 182, 186, 255, + 128, 130, 155, 157, 160, 175, 178, 182, + 129, 134, 137, 142, 145, 150, 160, 166, + 168, 174, 176, 255, 155, 166, 175, 128, + 170, 172, 173, 176, 185, 158, 159, 160, + 255, 164, 175, 135, 138, 188, 255, 164, + 169, 171, 172, 173, 174, 175, 180, 181, + 182, 183, 184, 185, 187, 188, 189, 190, + 191, 165, 186, 174, 175, 154, 255, 190, + 128, 134, 147, 151, 157, 168, 170, 182, + 184, 188, 128, 129, 131, 132, 134, 255, + 147, 255, 190, 255, 144, 145, 136, 175, + 188, 255, 128, 143, 160, 175, 179, 180, + 141, 143, 176, 180, 182, 255, 189, 255, + 191, 144, 153, 161, 186, 129, 154, 166, + 255, 191, 255, 130, 135, 138, 143, 146, + 151, 154, 156, 144, 145, 146, 147, 148, + 150, 151, 152, 155, 157, 158, 160, 170, + 171, 172, 175, 161, 169, 128, 129, 130, + 131, 133, 135, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 153, 155, 178, 179, 128, 139, 141, + 166, 168, 186, 188, 189, 191, 255, 142, + 143, 158, 255, 187, 255, 128, 180, 189, + 128, 156, 160, 255, 145, 159, 161, 255, + 128, 159, 176, 255, 139, 143, 187, 255, + 128, 157, 160, 255, 144, 132, 135, 150, + 255, 158, 159, 170, 175, 148, 151, 188, + 255, 128, 167, 176, 255, 164, 255, 183, + 255, 128, 149, 160, 167, 136, 188, 128, + 133, 138, 181, 183, 184, 191, 255, 150, + 159, 183, 255, 128, 158, 160, 178, 180, + 181, 128, 149, 160, 185, 128, 183, 190, + 191, 191, 128, 131, 133, 134, 140, 147, + 149, 151, 153, 179, 184, 186, 160, 188, + 128, 156, 128, 135, 137, 166, 128, 181, + 128, 149, 160, 178, 128, 145, 128, 178, + 129, 130, 131, 132, 133, 135, 136, 138, + 139, 140, 141, 144, 145, 146, 147, 150, + 151, 152, 153, 154, 155, 156, 162, 163, + 171, 176, 177, 178, 128, 134, 135, 165, + 176, 190, 144, 168, 176, 185, 128, 180, + 182, 191, 182, 144, 179, 155, 133, 137, + 141, 143, 157, 255, 190, 128, 145, 147, + 183, 136, 128, 134, 138, 141, 143, 157, + 159, 168, 176, 255, 171, 175, 186, 255, + 128, 131, 133, 140, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 185, 188, 191, + 144, 151, 128, 132, 135, 136, 139, 141, + 157, 163, 166, 172, 176, 180, 128, 138, + 144, 153, 134, 136, 143, 154, 255, 128, + 181, 184, 255, 129, 151, 158, 255, 129, + 131, 133, 143, 154, 255, 128, 137, 128, + 153, 157, 171, 176, 185, 160, 255, 170, + 190, 192, 255, 128, 184, 128, 136, 138, + 182, 184, 191, 128, 144, 153, 178, 255, + 168, 144, 145, 183, 255, 128, 142, 145, + 149, 129, 141, 144, 146, 147, 148, 175, + 255, 132, 255, 128, 144, 129, 143, 144, + 153, 145, 152, 135, 255, 160, 168, 169, + 171, 172, 173, 174, 188, 189, 190, 191, + 161, 167, 185, 255, 128, 158, 160, 169, + 144, 173, 176, 180, 128, 131, 144, 153, + 163, 183, 189, 255, 144, 255, 133, 143, + 191, 255, 143, 159, 160, 128, 129, 255, + 159, 160, 171, 172, 255, 173, 255, 179, + 255, 128, 176, 177, 178, 128, 129, 171, + 175, 189, 255, 128, 136, 144, 153, 157, + 158, 133, 134, 137, 144, 145, 146, 147, + 148, 149, 154, 155, 156, 157, 158, 159, + 168, 169, 170, 150, 153, 165, 169, 173, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 149, 157, 173, 186, 188, + 160, 161, 163, 164, 167, 168, 132, 134, + 149, 157, 186, 139, 140, 191, 255, 134, + 128, 132, 138, 144, 146, 255, 166, 167, + 129, 155, 187, 149, 181, 143, 175, 137, + 169, 131, 140, 141, 192, 255, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 160, 163, 164, 165, + 184, 185, 186, 161, 162, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 133, + 143, 151, 255, 139, 143, 154, 255, 164, + 167, 185, 187, 128, 131, 133, 159, 161, + 162, 169, 178, 180, 183, 130, 135, 137, + 139, 148, 151, 153, 155, 157, 159, 164, + 190, 141, 143, 145, 146, 161, 162, 167, + 170, 172, 178, 180, 183, 185, 188, 128, + 137, 139, 155, 161, 163, 165, 169, 171, + 187, 155, 156, 151, 255, 156, 157, 160, + 181, 255, 186, 187, 255, 162, 255, 160, + 168, 161, 167, 158, 255, 160, 132, 135, + 133, 134, 176, 255, 128, 191, 154, 164, + 168, 128, 149, 150, 191, 128, 152, 153, + 191, 181, 128, 159, 160, 189, 190, 191, + 189, 128, 131, 132, 185, 186, 191, 144, + 128, 151, 152, 161, 162, 176, 177, 255, + 169, 177, 129, 132, 141, 142, 145, 146, + 179, 181, 186, 188, 190, 191, 192, 255, + 142, 158, 128, 155, 156, 161, 162, 175, + 176, 177, 178, 191, 169, 177, 180, 183, + 128, 132, 133, 138, 139, 142, 143, 144, + 145, 146, 147, 185, 186, 191, 157, 128, + 152, 153, 158, 159, 177, 178, 180, 181, + 191, 142, 146, 169, 177, 180, 189, 128, + 132, 133, 185, 186, 191, 144, 185, 128, + 159, 160, 161, 162, 191, 169, 177, 180, + 189, 128, 132, 133, 140, 141, 142, 143, + 144, 145, 146, 147, 185, 186, 191, 158, + 177, 128, 155, 156, 161, 162, 191, 131, + 145, 155, 157, 128, 132, 133, 138, 139, + 141, 142, 149, 150, 152, 153, 159, 160, + 162, 163, 164, 165, 167, 168, 170, 171, + 173, 174, 185, 186, 191, 144, 128, 191, + 141, 145, 169, 189, 128, 132, 133, 185, + 186, 191, 128, 151, 152, 154, 155, 159, + 160, 161, 162, 191, 128, 141, 145, 169, + 180, 189, 129, 132, 133, 185, 186, 191, + 158, 128, 159, 160, 161, 162, 176, 177, + 178, 179, 191, 141, 145, 189, 128, 132, + 133, 186, 187, 191, 142, 128, 147, 148, + 150, 151, 158, 159, 161, 162, 185, 186, + 191, 178, 188, 128, 132, 133, 150, 151, + 153, 154, 189, 190, 191, 128, 134, 135, + 191, 128, 177, 129, 179, 180, 191, 128, + 131, 137, 141, 152, 160, 164, 166, 172, + 177, 189, 129, 132, 133, 134, 135, 138, + 139, 147, 148, 167, 168, 169, 170, 179, + 180, 191, 133, 128, 134, 135, 155, 156, + 159, 160, 191, 128, 129, 191, 136, 128, + 172, 173, 191, 128, 135, 136, 140, 141, + 191, 191, 128, 170, 171, 190, 161, 128, + 143, 144, 149, 150, 153, 154, 157, 158, + 164, 165, 166, 167, 173, 174, 176, 177, + 180, 181, 255, 130, 141, 143, 159, 134, + 187, 136, 140, 142, 143, 137, 151, 153, + 142, 143, 158, 159, 137, 177, 191, 142, + 143, 182, 183, 192, 255, 129, 151, 128, + 133, 134, 135, 136, 255, 145, 150, 151, + 155, 191, 192, 255, 128, 143, 144, 159, + 160, 255, 182, 183, 190, 191, 192, 255, + 128, 129, 255, 173, 174, 192, 255, 128, + 129, 154, 155, 159, 160, 255, 171, 173, + 185, 191, 192, 255, 141, 128, 145, 146, + 159, 160, 177, 178, 191, 173, 128, 145, + 146, 159, 160, 176, 177, 191, 128, 179, + 180, 191, 151, 156, 128, 191, 128, 159, + 160, 255, 184, 191, 192, 255, 169, 128, + 170, 171, 175, 176, 255, 182, 191, 192, + 255, 128, 158, 159, 191, 128, 143, 144, + 173, 174, 175, 176, 180, 181, 191, 128, + 171, 172, 175, 176, 255, 138, 191, 192, + 255, 128, 150, 151, 159, 160, 255, 149, + 191, 192, 255, 167, 128, 191, 128, 132, + 133, 179, 180, 191, 128, 132, 133, 139, + 140, 191, 128, 130, 131, 160, 161, 173, + 174, 175, 176, 185, 186, 255, 166, 191, + 192, 255, 128, 163, 164, 191, 128, 140, + 141, 143, 144, 153, 154, 189, 190, 191, + 128, 136, 137, 191, 173, 128, 168, 169, + 177, 178, 180, 181, 182, 183, 191, 0, + 127, 192, 255, 150, 151, 158, 159, 152, + 154, 156, 158, 134, 135, 142, 143, 190, + 191, 192, 255, 181, 189, 191, 128, 190, + 133, 181, 128, 129, 130, 140, 141, 143, + 144, 147, 148, 149, 150, 155, 156, 159, + 160, 172, 173, 177, 178, 188, 189, 191, + 177, 191, 128, 190, 128, 143, 144, 156, + 157, 191, 130, 135, 148, 164, 166, 168, + 128, 137, 138, 149, 150, 151, 152, 157, + 158, 169, 170, 185, 186, 187, 188, 191, + 142, 128, 132, 133, 137, 138, 159, 160, + 255, 137, 191, 192, 255, 175, 128, 255, + 159, 165, 170, 175, 177, 180, 191, 192, + 255, 166, 173, 128, 167, 168, 175, 176, + 255, 168, 174, 176, 191, 192, 255, 167, + 175, 183, 191, 128, 150, 151, 159, 160, + 190, 135, 143, 151, 128, 158, 159, 191, + 128, 132, 133, 135, 136, 160, 161, 169, + 170, 176, 177, 181, 182, 183, 184, 188, + 189, 191, 160, 151, 154, 187, 192, 255, + 128, 132, 133, 173, 174, 176, 177, 255, + 143, 159, 187, 191, 192, 255, 128, 175, + 176, 191, 150, 191, 192, 255, 141, 191, + 192, 255, 128, 143, 144, 189, 190, 191, + 141, 143, 160, 169, 172, 191, 192, 255, + 191, 128, 174, 175, 190, 128, 157, 158, + 159, 160, 255, 176, 191, 192, 255, 128, + 150, 151, 159, 160, 161, 162, 255, 175, + 137, 138, 184, 191, 192, 255, 128, 182, + 183, 255, 130, 134, 139, 163, 191, 192, + 255, 128, 129, 130, 179, 180, 191, 187, + 189, 128, 177, 178, 183, 184, 191, 128, + 137, 138, 165, 166, 175, 176, 255, 135, + 159, 189, 191, 192, 255, 128, 131, 132, + 178, 179, 191, 143, 165, 191, 128, 159, + 160, 175, 176, 185, 186, 190, 128, 168, + 169, 191, 131, 186, 128, 139, 140, 159, + 160, 182, 183, 189, 190, 255, 176, 178, + 180, 183, 184, 190, 191, 192, 255, 129, + 128, 130, 131, 154, 155, 157, 158, 159, + 160, 170, 171, 177, 178, 180, 181, 191, + 128, 167, 175, 129, 134, 135, 136, 137, + 142, 143, 144, 145, 150, 151, 159, 160, + 255, 155, 166, 175, 128, 162, 163, 191, + 164, 175, 135, 138, 188, 191, 192, 255, + 174, 175, 154, 191, 192, 255, 157, 169, + 183, 189, 191, 128, 134, 135, 146, 147, + 151, 152, 158, 159, 190, 130, 133, 128, + 255, 178, 191, 192, 255, 128, 146, 147, + 255, 190, 191, 192, 255, 128, 143, 144, + 255, 144, 145, 136, 175, 188, 191, 192, + 255, 181, 128, 175, 176, 255, 189, 191, + 192, 255, 128, 160, 161, 186, 187, 191, + 128, 129, 154, 155, 165, 166, 255, 191, + 192, 255, 128, 129, 130, 135, 136, 137, + 138, 143, 144, 145, 146, 151, 152, 153, + 154, 156, 157, 191, 128, 191, 128, 129, + 130, 131, 133, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 132, 151, 153, 155, 158, 175, 178, + 179, 180, 191, 140, 167, 187, 190, 128, + 255, 142, 143, 158, 191, 192, 255, 187, + 191, 192, 255, 128, 180, 181, 191, 128, + 156, 157, 159, 160, 255, 145, 191, 192, + 255, 128, 159, 160, 175, 176, 255, 139, + 143, 182, 191, 192, 255, 144, 132, 135, + 150, 191, 192, 255, 158, 175, 148, 151, + 188, 191, 192, 255, 128, 167, 168, 175, + 176, 255, 164, 191, 192, 255, 183, 191, + 192, 255, 128, 149, 150, 159, 160, 167, + 168, 191, 136, 182, 188, 128, 133, 134, + 137, 138, 184, 185, 190, 191, 255, 150, + 159, 183, 191, 192, 255, 179, 128, 159, + 160, 181, 182, 191, 128, 149, 150, 159, + 160, 185, 186, 191, 128, 183, 184, 189, + 190, 191, 128, 148, 152, 129, 143, 144, + 179, 180, 191, 128, 159, 160, 188, 189, + 191, 128, 156, 157, 191, 136, 128, 164, + 165, 191, 128, 181, 182, 191, 128, 149, + 150, 159, 160, 178, 179, 191, 128, 145, + 146, 191, 128, 178, 179, 191, 128, 130, + 131, 132, 133, 134, 135, 136, 138, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 162, 163, 171, 176, + 177, 178, 129, 191, 128, 130, 131, 183, + 184, 191, 128, 130, 131, 175, 176, 191, + 128, 143, 144, 168, 169, 191, 128, 130, + 131, 166, 167, 191, 182, 128, 143, 144, + 178, 179, 191, 128, 130, 131, 178, 179, + 191, 128, 154, 156, 129, 132, 133, 191, + 146, 128, 171, 172, 191, 135, 137, 142, + 158, 128, 168, 169, 175, 176, 255, 159, + 191, 192, 255, 144, 128, 156, 157, 161, + 162, 191, 128, 134, 135, 138, 139, 191, + 128, 175, 176, 191, 134, 128, 131, 132, + 135, 136, 191, 128, 174, 175, 191, 128, + 151, 152, 155, 156, 191, 132, 128, 191, + 128, 170, 171, 191, 128, 153, 154, 191, + 160, 190, 192, 255, 128, 184, 185, 191, + 137, 128, 174, 175, 191, 128, 129, 177, + 178, 255, 144, 191, 192, 255, 128, 142, + 143, 144, 145, 146, 149, 129, 148, 150, + 191, 175, 191, 192, 255, 132, 191, 192, + 255, 128, 144, 129, 143, 145, 191, 144, + 153, 128, 143, 145, 152, 154, 191, 135, + 191, 192, 255, 160, 168, 169, 171, 172, + 173, 174, 188, 189, 190, 191, 128, 159, + 161, 167, 170, 187, 185, 191, 192, 255, + 128, 143, 144, 173, 174, 191, 128, 131, + 132, 162, 163, 183, 184, 188, 189, 255, + 133, 143, 145, 191, 192, 255, 128, 146, + 147, 159, 160, 191, 160, 128, 191, 128, + 129, 191, 192, 255, 159, 160, 171, 128, + 170, 172, 191, 192, 255, 173, 191, 192, + 255, 179, 191, 192, 255, 128, 176, 177, + 178, 129, 191, 128, 129, 130, 191, 171, + 175, 189, 191, 192, 255, 128, 136, 137, + 143, 144, 153, 154, 191, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 128, 143, 150, 153, 160, 191, 149, + 157, 173, 186, 188, 160, 161, 163, 164, + 167, 168, 132, 134, 149, 157, 186, 191, + 139, 140, 192, 255, 133, 145, 128, 134, + 135, 137, 138, 255, 166, 167, 129, 155, + 187, 149, 181, 143, 175, 137, 169, 131, + 140, 191, 192, 255, 160, 163, 164, 165, + 184, 185, 186, 128, 159, 161, 162, 166, + 191, 133, 191, 192, 255, 132, 160, 163, + 167, 179, 184, 186, 128, 164, 165, 168, + 169, 187, 188, 191, 130, 135, 137, 139, + 144, 147, 151, 153, 155, 157, 159, 163, + 171, 179, 184, 189, 191, 128, 140, 141, + 148, 149, 160, 161, 164, 165, 166, 167, + 190, 138, 164, 170, 128, 155, 156, 160, + 161, 187, 188, 191, 128, 191, 155, 156, + 128, 191, 151, 191, 192, 255, 156, 157, + 160, 128, 191, 181, 191, 192, 255, 158, + 159, 186, 128, 185, 187, 191, 192, 255, + 162, 191, 192, 255, 160, 168, 128, 159, + 161, 167, 169, 191, 158, 191, 192, 255, + 10, 13, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 128, 191, 128, 191, + 128, 191, 128, 191, 128, 191, 10, 128, + 191, 128, 191, 128, 191, 36, 123, 37, + 123, 10, 128, 191, 128, 191, 128, 191, + 36, 123, 37, 123, 170, 181, 183, 186, + 128, 150, 152, 182, 184, 255, 192, 255, + 128, 255, 173, 130, 133, 146, 159, 165, + 171, 175, 255, 181, 190, 184, 185, 192, + 255, 140, 134, 138, 142, 161, 163, 255, + 182, 130, 136, 137, 176, 151, 152, 154, + 160, 190, 136, 144, 192, 255, 135, 129, + 130, 132, 133, 144, 170, 176, 178, 144, + 154, 160, 191, 128, 169, 174, 255, 148, + 169, 157, 158, 189, 190, 192, 255, 144, + 255, 139, 140, 178, 255, 186, 128, 181, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 128, 173, 128, 155, 160, 180, 182, 189, + 148, 161, 163, 255, 176, 164, 165, 132, + 169, 177, 141, 142, 145, 146, 179, 181, + 186, 187, 158, 133, 134, 137, 138, 143, + 150, 152, 155, 164, 165, 178, 255, 188, + 129, 131, 133, 138, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 182, 184, 185, + 190, 255, 157, 131, 134, 137, 138, 142, + 144, 146, 152, 159, 165, 182, 255, 129, + 131, 133, 141, 143, 145, 147, 168, 170, + 176, 178, 179, 181, 185, 188, 255, 134, + 138, 142, 143, 145, 159, 164, 165, 176, + 184, 186, 255, 129, 131, 133, 140, 143, + 144, 147, 168, 170, 176, 178, 179, 181, + 185, 188, 191, 177, 128, 132, 135, 136, + 139, 141, 150, 151, 156, 157, 159, 163, + 166, 175, 156, 130, 131, 133, 138, 142, + 144, 146, 149, 153, 154, 158, 159, 163, + 164, 168, 170, 174, 185, 190, 191, 144, + 151, 128, 130, 134, 136, 138, 141, 166, + 175, 128, 131, 133, 140, 142, 144, 146, + 168, 170, 185, 189, 255, 133, 137, 151, + 142, 148, 155, 159, 164, 165, 176, 255, + 128, 131, 133, 140, 142, 144, 146, 168, + 170, 179, 181, 185, 188, 191, 158, 128, + 132, 134, 136, 138, 141, 149, 150, 160, + 163, 166, 175, 177, 178, 129, 131, 133, + 140, 142, 144, 146, 186, 189, 255, 133, + 137, 143, 147, 152, 158, 164, 165, 176, + 185, 192, 255, 189, 130, 131, 133, 150, + 154, 177, 179, 187, 138, 150, 128, 134, + 143, 148, 152, 159, 166, 175, 178, 179, + 129, 186, 128, 142, 144, 153, 132, 138, + 141, 165, 167, 129, 130, 135, 136, 148, + 151, 153, 159, 161, 163, 170, 171, 173, + 185, 187, 189, 134, 128, 132, 136, 141, + 144, 153, 156, 159, 128, 181, 183, 185, + 152, 153, 160, 169, 190, 191, 128, 135, + 137, 172, 177, 191, 128, 132, 134, 151, + 153, 188, 134, 128, 129, 130, 131, 137, + 138, 139, 140, 141, 142, 143, 144, 153, + 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 173, 175, 176, 177, 178, 179, 181, + 182, 183, 188, 189, 190, 191, 132, 152, + 172, 184, 185, 187, 128, 191, 128, 137, + 144, 255, 158, 159, 134, 187, 136, 140, + 142, 143, 137, 151, 153, 142, 143, 158, + 159, 137, 177, 142, 143, 182, 183, 191, + 255, 128, 130, 133, 136, 150, 152, 255, + 145, 150, 151, 155, 156, 160, 168, 178, + 255, 128, 143, 160, 255, 182, 183, 190, + 255, 129, 255, 173, 174, 192, 255, 129, + 154, 160, 255, 171, 173, 185, 255, 128, + 140, 142, 148, 160, 180, 128, 147, 160, + 172, 174, 176, 178, 179, 148, 150, 152, + 155, 158, 159, 170, 255, 139, 141, 144, + 153, 160, 255, 184, 255, 128, 170, 176, + 255, 182, 255, 128, 158, 160, 171, 176, + 187, 134, 173, 176, 180, 128, 171, 176, + 255, 138, 143, 155, 255, 128, 155, 160, + 255, 159, 189, 190, 192, 255, 167, 128, + 137, 144, 153, 176, 189, 140, 143, 154, + 170, 180, 255, 180, 255, 128, 183, 128, + 137, 141, 189, 128, 136, 144, 146, 148, + 182, 184, 185, 128, 181, 187, 191, 150, + 151, 158, 159, 152, 154, 156, 158, 134, + 135, 142, 143, 190, 255, 190, 128, 180, + 182, 188, 130, 132, 134, 140, 144, 147, + 150, 155, 160, 172, 178, 180, 182, 188, + 128, 129, 130, 131, 132, 133, 134, 176, + 177, 178, 179, 180, 181, 182, 183, 191, + 255, 129, 147, 149, 176, 178, 190, 192, + 255, 144, 156, 161, 144, 156, 165, 176, + 130, 135, 149, 164, 166, 168, 138, 147, + 152, 157, 170, 185, 188, 191, 142, 133, + 137, 160, 255, 137, 255, 128, 174, 176, + 255, 159, 165, 170, 180, 255, 167, 173, + 128, 165, 176, 255, 168, 174, 176, 190, + 192, 255, 128, 150, 160, 166, 168, 174, + 176, 182, 184, 190, 128, 134, 136, 142, + 144, 150, 152, 158, 160, 191, 128, 129, + 130, 131, 132, 133, 134, 135, 144, 145, + 255, 133, 135, 161, 175, 177, 181, 184, + 188, 160, 151, 152, 187, 192, 255, 133, + 173, 177, 255, 143, 159, 187, 255, 176, + 191, 182, 183, 184, 191, 192, 255, 150, + 255, 128, 146, 147, 148, 152, 153, 154, + 155, 156, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 129, 255, 141, + 255, 144, 189, 141, 143, 172, 255, 191, + 128, 175, 180, 189, 151, 159, 162, 255, + 175, 137, 138, 184, 255, 183, 255, 168, + 255, 128, 179, 188, 134, 143, 154, 159, + 184, 186, 190, 255, 128, 173, 176, 255, + 148, 159, 189, 255, 129, 142, 154, 159, + 191, 255, 128, 182, 128, 141, 144, 153, + 160, 182, 186, 255, 128, 130, 155, 157, + 160, 175, 178, 182, 129, 134, 137, 142, + 145, 150, 160, 166, 168, 174, 176, 255, + 155, 166, 175, 128, 170, 172, 173, 176, + 185, 158, 159, 160, 255, 164, 175, 135, + 138, 188, 255, 164, 169, 171, 172, 173, + 174, 175, 180, 181, 182, 183, 184, 185, + 187, 188, 189, 190, 191, 165, 186, 174, + 175, 154, 255, 190, 128, 134, 147, 151, + 157, 168, 170, 182, 184, 188, 128, 129, + 131, 132, 134, 255, 147, 255, 190, 255, + 144, 145, 136, 175, 188, 255, 128, 143, + 160, 175, 179, 180, 141, 143, 176, 180, + 182, 255, 189, 255, 191, 144, 153, 161, + 186, 129, 154, 166, 255, 191, 255, 130, + 135, 138, 143, 146, 151, 154, 156, 144, + 145, 146, 147, 148, 150, 151, 152, 155, + 157, 158, 160, 170, 171, 172, 175, 161, + 169, 128, 129, 130, 131, 133, 135, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 152, 156, 157, 160, 161, + 162, 163, 164, 166, 168, 169, 170, 171, + 172, 173, 174, 176, 177, 153, 155, 178, + 179, 128, 139, 141, 166, 168, 186, 188, + 189, 191, 255, 142, 143, 158, 255, 187, + 255, 128, 180, 189, 128, 156, 160, 255, + 145, 159, 161, 255, 128, 159, 176, 255, + 139, 143, 187, 255, 128, 157, 160, 255, + 144, 132, 135, 150, 255, 158, 159, 170, + 175, 148, 151, 188, 255, 128, 167, 176, + 255, 164, 255, 183, 255, 128, 149, 160, + 167, 136, 188, 128, 133, 138, 181, 183, + 184, 191, 255, 150, 159, 183, 255, 128, + 158, 160, 178, 180, 181, 128, 149, 160, + 185, 128, 183, 190, 191, 191, 128, 131, + 133, 134, 140, 147, 149, 151, 153, 179, + 184, 186, 160, 188, 128, 156, 128, 135, + 137, 166, 128, 181, 128, 149, 160, 178, + 128, 145, 128, 178, 129, 130, 131, 132, + 133, 135, 136, 138, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 155, 156, 162, 163, 171, 176, 177, 178, + 128, 134, 135, 165, 176, 190, 144, 168, + 176, 185, 128, 180, 182, 191, 182, 144, + 179, 155, 133, 137, 141, 143, 157, 255, + 190, 128, 145, 147, 183, 136, 128, 134, + 138, 141, 143, 157, 159, 168, 176, 255, + 171, 175, 186, 255, 128, 131, 133, 140, + 143, 144, 147, 168, 170, 176, 178, 179, + 181, 185, 188, 191, 144, 151, 128, 132, + 135, 136, 139, 141, 157, 163, 166, 172, + 176, 180, 128, 138, 144, 153, 134, 136, + 143, 154, 255, 128, 181, 184, 255, 129, + 151, 158, 255, 129, 131, 133, 143, 154, + 255, 128, 137, 128, 153, 157, 171, 176, + 185, 160, 255, 170, 190, 192, 255, 128, + 184, 128, 136, 138, 182, 184, 191, 128, + 144, 153, 178, 255, 168, 144, 145, 183, + 255, 128, 142, 145, 149, 129, 141, 144, + 146, 147, 148, 175, 255, 132, 255, 128, + 144, 129, 143, 144, 153, 145, 152, 135, + 255, 160, 168, 169, 171, 172, 173, 174, + 188, 189, 190, 191, 161, 167, 185, 255, + 128, 158, 160, 169, 144, 173, 176, 180, + 128, 131, 144, 153, 163, 183, 189, 255, + 144, 255, 133, 143, 191, 255, 143, 159, + 160, 128, 129, 255, 159, 160, 171, 172, + 255, 173, 255, 179, 255, 128, 176, 177, + 178, 128, 129, 171, 175, 189, 255, 128, + 136, 144, 153, 157, 158, 133, 134, 137, + 144, 145, 146, 147, 148, 149, 154, 155, + 156, 157, 158, 159, 168, 169, 170, 150, + 153, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 149, + 157, 173, 186, 188, 160, 161, 163, 164, + 167, 168, 132, 134, 149, 157, 186, 139, + 140, 191, 255, 134, 128, 132, 138, 144, + 146, 255, 166, 167, 129, 155, 187, 149, + 181, 143, 175, 137, 169, 131, 140, 141, + 192, 255, 128, 182, 187, 255, 173, 180, + 182, 255, 132, 155, 159, 161, 175, 128, + 160, 163, 164, 165, 184, 185, 186, 161, + 162, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 133, 143, 151, 255, 139, + 143, 154, 255, 164, 167, 185, 187, 128, + 131, 133, 159, 161, 162, 169, 178, 180, + 183, 130, 135, 137, 139, 148, 151, 153, + 155, 157, 159, 164, 190, 141, 143, 145, + 146, 161, 162, 167, 170, 172, 178, 180, + 183, 185, 188, 128, 137, 139, 155, 161, + 163, 165, 169, 171, 187, 155, 156, 151, + 255, 156, 157, 160, 181, 255, 186, 187, + 255, 162, 255, 160, 168, 161, 167, 158, + 255, 160, 132, 135, 133, 134, 176, 255, + 128, 191, 154, 164, 168, 128, 149, 150, + 191, 128, 152, 153, 191, 181, 128, 159, + 160, 189, 190, 191, 189, 128, 131, 132, + 185, 186, 191, 144, 128, 151, 152, 161, + 162, 176, 177, 255, 169, 177, 129, 132, + 141, 142, 145, 146, 179, 181, 186, 188, + 190, 191, 192, 255, 142, 158, 128, 155, + 156, 161, 162, 175, 176, 177, 178, 191, + 169, 177, 180, 183, 128, 132, 133, 138, + 139, 142, 143, 144, 145, 146, 147, 185, + 186, 191, 157, 128, 152, 153, 158, 159, + 177, 178, 180, 181, 191, 142, 146, 169, + 177, 180, 189, 128, 132, 133, 185, 186, + 191, 144, 185, 128, 159, 160, 161, 162, + 191, 169, 177, 180, 189, 128, 132, 133, + 140, 141, 142, 143, 144, 145, 146, 147, + 185, 186, 191, 158, 177, 128, 155, 156, + 161, 162, 191, 131, 145, 155, 157, 128, + 132, 133, 138, 139, 141, 142, 149, 150, + 152, 153, 159, 160, 162, 163, 164, 165, + 167, 168, 170, 171, 173, 174, 185, 186, + 191, 144, 128, 191, 141, 145, 169, 189, + 128, 132, 133, 185, 186, 191, 128, 151, + 152, 154, 155, 159, 160, 161, 162, 191, + 128, 141, 145, 169, 180, 189, 129, 132, + 133, 185, 186, 191, 158, 128, 159, 160, + 161, 162, 176, 177, 178, 179, 191, 141, + 145, 189, 128, 132, 133, 186, 187, 191, + 142, 128, 147, 148, 150, 151, 158, 159, + 161, 162, 185, 186, 191, 178, 188, 128, + 132, 133, 150, 151, 153, 154, 189, 190, + 191, 128, 134, 135, 191, 128, 177, 129, + 179, 180, 191, 128, 131, 137, 141, 152, + 160, 164, 166, 172, 177, 189, 129, 132, + 133, 134, 135, 138, 139, 147, 148, 167, + 168, 169, 170, 179, 180, 191, 133, 128, + 134, 135, 155, 156, 159, 160, 191, 128, + 129, 191, 136, 128, 172, 173, 191, 128, + 135, 136, 140, 141, 191, 191, 128, 170, + 171, 190, 161, 128, 143, 144, 149, 150, + 153, 154, 157, 158, 164, 165, 166, 167, + 173, 174, 176, 177, 180, 181, 255, 130, + 141, 143, 159, 134, 187, 136, 140, 142, + 143, 137, 151, 153, 142, 143, 158, 159, + 137, 177, 191, 142, 143, 182, 183, 192, + 255, 129, 151, 128, 133, 134, 135, 136, + 255, 145, 150, 151, 155, 191, 192, 255, + 128, 143, 144, 159, 160, 255, 182, 183, + 190, 191, 192, 255, 128, 129, 255, 173, + 174, 192, 255, 128, 129, 154, 155, 159, + 160, 255, 171, 173, 185, 191, 192, 255, + 141, 128, 145, 146, 159, 160, 177, 178, + 191, 173, 128, 145, 146, 159, 160, 176, + 177, 191, 128, 179, 180, 191, 151, 156, + 128, 191, 128, 159, 160, 255, 184, 191, + 192, 255, 169, 128, 170, 171, 175, 176, + 255, 182, 191, 192, 255, 128, 158, 159, + 191, 128, 143, 144, 173, 174, 175, 176, + 180, 181, 191, 128, 171, 172, 175, 176, + 255, 138, 191, 192, 255, 128, 150, 151, + 159, 160, 255, 149, 191, 192, 255, 167, + 128, 191, 128, 132, 133, 179, 180, 191, + 128, 132, 133, 139, 140, 191, 128, 130, + 131, 160, 161, 173, 174, 175, 176, 185, + 186, 255, 166, 191, 192, 255, 128, 163, + 164, 191, 128, 140, 141, 143, 144, 153, + 154, 189, 190, 191, 128, 136, 137, 191, + 173, 128, 168, 169, 177, 178, 180, 181, + 182, 183, 191, 0, 127, 192, 255, 150, + 151, 158, 159, 152, 154, 156, 158, 134, + 135, 142, 143, 190, 191, 192, 255, 181, + 189, 191, 128, 190, 133, 181, 128, 129, + 130, 140, 141, 143, 144, 147, 148, 149, + 150, 155, 156, 159, 160, 172, 173, 177, + 178, 188, 189, 191, 177, 191, 128, 190, + 128, 143, 144, 156, 157, 191, 130, 135, + 148, 164, 166, 168, 128, 137, 138, 149, + 150, 151, 152, 157, 158, 169, 170, 185, + 186, 187, 188, 191, 142, 128, 132, 133, + 137, 138, 159, 160, 255, 137, 191, 192, + 255, 175, 128, 255, 159, 165, 170, 175, + 177, 180, 191, 192, 255, 166, 173, 128, + 167, 168, 175, 176, 255, 168, 174, 176, + 191, 192, 255, 167, 175, 183, 191, 128, + 150, 151, 159, 160, 190, 135, 143, 151, + 128, 158, 159, 191, 128, 132, 133, 135, + 136, 160, 161, 169, 170, 176, 177, 181, + 182, 183, 184, 188, 189, 191, 160, 151, + 154, 187, 192, 255, 128, 132, 133, 173, + 174, 176, 177, 255, 143, 159, 187, 191, + 192, 255, 128, 175, 176, 191, 150, 191, + 192, 255, 141, 191, 192, 255, 128, 143, + 144, 189, 190, 191, 141, 143, 160, 169, + 172, 191, 192, 255, 191, 128, 174, 175, + 190, 128, 157, 158, 159, 160, 255, 176, + 191, 192, 255, 128, 150, 151, 159, 160, + 161, 162, 255, 175, 137, 138, 184, 191, + 192, 255, 128, 182, 183, 255, 130, 134, + 139, 163, 191, 192, 255, 128, 129, 130, + 179, 180, 191, 187, 189, 128, 177, 178, + 183, 184, 191, 128, 137, 138, 165, 166, + 175, 176, 255, 135, 159, 189, 191, 192, + 255, 128, 131, 132, 178, 179, 191, 143, + 165, 191, 128, 159, 160, 175, 176, 185, + 186, 190, 128, 168, 169, 191, 131, 186, + 128, 139, 140, 159, 160, 182, 183, 189, + 190, 255, 176, 178, 180, 183, 184, 190, + 191, 192, 255, 129, 128, 130, 131, 154, + 155, 157, 158, 159, 160, 170, 171, 177, + 178, 180, 181, 191, 128, 167, 175, 129, + 134, 135, 136, 137, 142, 143, 144, 145, + 150, 151, 159, 160, 255, 155, 166, 175, + 128, 162, 163, 191, 164, 175, 135, 138, + 188, 191, 192, 255, 174, 175, 154, 191, + 192, 255, 157, 169, 183, 189, 191, 128, + 134, 135, 146, 147, 151, 152, 158, 159, + 190, 130, 133, 128, 255, 178, 191, 192, + 255, 128, 146, 147, 255, 190, 191, 192, + 255, 128, 143, 144, 255, 144, 145, 136, + 175, 188, 191, 192, 255, 181, 128, 175, + 176, 255, 189, 191, 192, 255, 128, 160, + 161, 186, 187, 191, 128, 129, 154, 155, + 165, 166, 255, 191, 192, 255, 128, 129, + 130, 135, 136, 137, 138, 143, 144, 145, + 146, 151, 152, 153, 154, 156, 157, 191, + 128, 191, 128, 129, 130, 131, 133, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 152, 156, 157, 160, 161, + 162, 163, 164, 166, 168, 169, 170, 171, + 172, 173, 174, 176, 177, 132, 151, 153, + 155, 158, 175, 178, 179, 180, 191, 140, + 167, 187, 190, 128, 255, 142, 143, 158, + 191, 192, 255, 187, 191, 192, 255, 128, + 180, 181, 191, 128, 156, 157, 159, 160, + 255, 145, 191, 192, 255, 128, 159, 160, + 175, 176, 255, 139, 143, 182, 191, 192, + 255, 144, 132, 135, 150, 191, 192, 255, + 158, 175, 148, 151, 188, 191, 192, 255, + 128, 167, 168, 175, 176, 255, 164, 191, + 192, 255, 183, 191, 192, 255, 128, 149, + 150, 159, 160, 167, 168, 191, 136, 182, + 188, 128, 133, 134, 137, 138, 184, 185, + 190, 191, 255, 150, 159, 183, 191, 192, + 255, 179, 128, 159, 160, 181, 182, 191, + 128, 149, 150, 159, 160, 185, 186, 191, + 128, 183, 184, 189, 190, 191, 128, 148, + 152, 129, 143, 144, 179, 180, 191, 128, + 159, 160, 188, 189, 191, 128, 156, 157, + 191, 136, 128, 164, 165, 191, 128, 181, + 182, 191, 128, 149, 150, 159, 160, 178, + 179, 191, 128, 145, 146, 191, 128, 178, + 179, 191, 128, 130, 131, 132, 133, 134, + 135, 136, 138, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 162, 163, 171, 176, 177, 178, 129, 191, + 128, 130, 131, 183, 184, 191, 128, 130, + 131, 175, 176, 191, 128, 143, 144, 168, + 169, 191, 128, 130, 131, 166, 167, 191, + 182, 128, 143, 144, 178, 179, 191, 128, + 130, 131, 178, 179, 191, 128, 154, 156, + 129, 132, 133, 191, 146, 128, 171, 172, + 191, 135, 137, 142, 158, 128, 168, 169, + 175, 176, 255, 159, 191, 192, 255, 144, + 128, 156, 157, 161, 162, 191, 128, 134, + 135, 138, 139, 191, 128, 175, 176, 191, + 134, 128, 131, 132, 135, 136, 191, 128, + 174, 175, 191, 128, 151, 152, 155, 156, + 191, 132, 128, 191, 128, 170, 171, 191, + 128, 153, 154, 191, 160, 190, 192, 255, + 128, 184, 185, 191, 137, 128, 174, 175, + 191, 128, 129, 177, 178, 255, 144, 191, + 192, 255, 128, 142, 143, 144, 145, 146, + 149, 129, 148, 150, 191, 175, 191, 192, + 255, 132, 191, 192, 255, 128, 144, 129, + 143, 145, 191, 144, 153, 128, 143, 145, + 152, 154, 191, 135, 191, 192, 255, 160, + 168, 169, 171, 172, 173, 174, 188, 189, + 190, 191, 128, 159, 161, 167, 170, 187, + 185, 191, 192, 255, 128, 143, 144, 173, + 174, 191, 128, 131, 132, 162, 163, 183, + 184, 188, 189, 255, 133, 143, 145, 191, + 192, 255, 128, 146, 147, 159, 160, 191, + 160, 128, 191, 128, 129, 191, 192, 255, + 159, 160, 171, 128, 170, 172, 191, 192, + 255, 173, 191, 192, 255, 179, 191, 192, + 255, 128, 176, 177, 178, 129, 191, 128, + 129, 130, 191, 171, 175, 189, 191, 192, + 255, 128, 136, 137, 143, 144, 153, 154, + 191, 144, 145, 146, 147, 148, 149, 154, + 155, 156, 157, 158, 159, 128, 143, 150, + 153, 160, 191, 149, 157, 173, 186, 188, + 160, 161, 163, 164, 167, 168, 132, 134, + 149, 157, 186, 191, 139, 140, 192, 255, + 133, 145, 128, 134, 135, 137, 138, 255, + 166, 167, 129, 155, 187, 149, 181, 143, + 175, 137, 169, 131, 140, 191, 192, 255, + 160, 163, 164, 165, 184, 185, 186, 128, + 159, 161, 162, 166, 191, 133, 191, 192, + 255, 132, 160, 163, 167, 179, 184, 186, + 128, 164, 165, 168, 169, 187, 188, 191, + 130, 135, 137, 139, 144, 147, 151, 153, + 155, 157, 159, 163, 171, 179, 184, 189, + 191, 128, 140, 141, 148, 149, 160, 161, + 164, 165, 166, 167, 190, 138, 164, 170, + 128, 155, 156, 160, 161, 187, 188, 191, + 128, 191, 155, 156, 128, 191, 151, 191, + 192, 255, 156, 157, 160, 128, 191, 181, + 191, 192, 255, 158, 159, 186, 128, 185, + 187, 191, 192, 255, 162, 191, 192, 255, + 160, 168, 128, 159, 161, 167, 169, 191, + 158, 191, 192, 255, 9, 10, 13, 32, + 33, 34, 35, 38, 46, 47, 60, 61, + 62, 64, 92, 95, 123, 124, 125, 126, + 127, 194, 195, 198, 199, 203, 204, 205, + 206, 207, 210, 212, 213, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 233, 234, 237, 238, 239, + 240, 0, 36, 37, 45, 48, 57, 58, + 63, 65, 90, 91, 96, 97, 122, 192, + 193, 196, 218, 229, 236, 241, 247, 9, + 32, 10, 61, 10, 38, 46, 42, 47, + 46, 69, 101, 48, 57, 60, 61, 61, + 62, 61, 45, 95, 194, 195, 198, 199, + 203, 204, 205, 206, 207, 210, 212, 213, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 233, 234, + 237, 239, 240, 243, 48, 57, 65, 90, + 97, 122, 196, 218, 229, 236, 124, 125, + 128, 191, 170, 181, 186, 128, 191, 151, + 183, 128, 255, 192, 255, 0, 127, 173, + 130, 133, 146, 159, 165, 171, 175, 191, + 192, 255, 181, 190, 128, 175, 176, 183, + 184, 185, 186, 191, 134, 139, 141, 162, + 128, 135, 136, 255, 182, 130, 137, 176, + 151, 152, 154, 160, 136, 191, 192, 255, + 128, 143, 144, 170, 171, 175, 176, 178, + 179, 191, 128, 159, 160, 191, 176, 128, + 138, 139, 173, 174, 255, 148, 150, 164, + 167, 173, 176, 185, 189, 190, 192, 255, + 144, 128, 145, 146, 175, 176, 191, 128, + 140, 141, 255, 166, 176, 178, 191, 192, + 255, 186, 128, 137, 138, 170, 171, 179, + 180, 181, 182, 191, 160, 161, 162, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 128, 191, 128, 129, 130, 131, + 137, 138, 139, 140, 141, 142, 143, 144, + 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 182, 183, 184, 188, + 189, 190, 191, 132, 187, 129, 130, 132, + 133, 134, 176, 177, 178, 179, 180, 181, + 182, 183, 128, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 136, 143, 145, + 191, 192, 255, 182, 183, 184, 128, 191, + 128, 191, 191, 128, 190, 192, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 191, 192, 255, 158, + 159, 128, 157, 160, 191, 192, 255, 128, + 191, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 128, 163, 165, 186, 144, + 145, 146, 147, 148, 150, 151, 152, 155, + 157, 158, 160, 170, 171, 172, 175, 128, + 159, 161, 169, 173, 191, 128, 191, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 92, 36, 37, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 36, 123, 123, 126, 126, 37, 123, + 126, 10, 13, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 128, 191, 128, + 191, 128, 191, 10, 13, 36, 37, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 10, 13, 36, 37, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 10, 13, 123, 10, 13, 126, 10, + 13, 126, 126, 128, 191, 128, 191, 128, + 191, 10, 13, 36, 37, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 36, 37, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 10, 13, 10, + 13, 123, 10, 13, 126, 10, 13, 126, + 126, 128, 191, 128, 191, 128, 191, 95, + 194, 195, 198, 199, 203, 204, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 238, 239, 240, + 65, 90, 97, 122, 128, 191, 192, 193, + 196, 218, 229, 236, 241, 247, 248, 255, + 45, 95, 194, 195, 198, 199, 203, 204, + 205, 206, 207, 210, 212, 213, 214, 215, + 216, 217, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 233, 234, 237, 239, + 240, 243, 48, 57, 65, 90, 97, 122, + 196, 218, 229, 236, 128, 191, 170, 181, + 186, 128, 191, 151, 183, 128, 255, 192, + 255, 0, 127, 173, 130, 133, 146, 159, + 165, 171, 175, 191, 192, 255, 181, 190, + 128, 175, 176, 183, 184, 185, 186, 191, + 134, 139, 141, 162, 128, 135, 136, 255, + 182, 130, 137, 176, 151, 152, 154, 160, + 136, 191, 192, 255, 128, 143, 144, 170, + 171, 175, 176, 178, 179, 191, 128, 159, + 160, 191, 176, 128, 138, 139, 173, 174, + 255, 148, 150, 164, 167, 173, 176, 185, + 189, 190, 192, 255, 144, 128, 145, 146, + 175, 176, 191, 128, 140, 141, 255, 166, + 176, 178, 191, 192, 255, 186, 128, 137, + 138, 170, 171, 179, 180, 181, 182, 191, + 160, 161, 162, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 128, 191, + 128, 129, 130, 131, 137, 138, 139, 140, + 141, 142, 143, 144, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 182, 183, 184, 188, 189, 190, 191, 132, + 187, 129, 130, 132, 133, 134, 176, 177, + 178, 179, 180, 181, 182, 183, 128, 191, + 128, 129, 130, 131, 132, 133, 134, 135, + 144, 136, 143, 145, 191, 192, 255, 182, + 183, 184, 128, 191, 128, 191, 191, 128, + 190, 192, 255, 128, 146, 147, 148, 152, + 153, 154, 155, 156, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 129, + 191, 192, 255, 158, 159, 128, 157, 160, + 191, 192, 255, 128, 191, 164, 169, 171, + 172, 173, 174, 175, 180, 181, 182, 183, + 184, 185, 187, 188, 189, 190, 191, 128, + 163, 165, 186, 144, 145, 146, 147, 148, + 150, 151, 152, 155, 157, 158, 160, 170, + 171, 172, 175, 128, 159, 161, 169, 173, + 191, 128, 191, +} + +var _hcltok_single_lengths []byte = []byte{ + 0, 1, 1, 2, 3, 2, 0, 32, + 31, 36, 1, 4, 0, 0, 0, 0, + 1, 2, 1, 1, 1, 1, 0, 1, + 1, 0, 0, 2, 0, 0, 0, 1, + 32, 0, 0, 0, 0, 1, 3, 1, + 1, 1, 0, 2, 0, 1, 1, 2, + 0, 3, 0, 1, 0, 2, 1, 2, + 0, 0, 5, 1, 4, 0, 0, 1, + 43, 0, 0, 0, 2, 3, 2, 1, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 1, + 0, 15, 0, 0, 0, 1, 6, 1, + 0, 0, 1, 0, 2, 0, 0, 0, + 9, 0, 1, 1, 0, 0, 0, 3, + 0, 1, 0, 28, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 2, + 0, 0, 18, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 16, 36, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 2, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 28, 0, 0, 0, 1, + 1, 1, 1, 0, 0, 2, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 4, 0, 0, 2, 2, + 0, 11, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 3, 0, 0, 4, 0, + 0, 0, 18, 0, 0, 0, 1, 4, + 1, 4, 1, 0, 3, 2, 2, 2, + 1, 0, 0, 1, 8, 0, 0, 0, + 4, 12, 0, 2, 0, 3, 0, 1, + 0, 2, 0, 1, 2, 0, 3, 1, + 2, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 1, 28, 3, 0, 1, 1, + 2, 1, 0, 1, 1, 2, 1, 1, + 2, 1, 1, 0, 2, 1, 1, 1, + 1, 0, 0, 6, 1, 1, 0, 0, + 46, 1, 1, 0, 0, 0, 0, 2, + 1, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 13, 2, 0, 0, + 0, 9, 0, 1, 28, 0, 1, 3, + 0, 2, 0, 0, 0, 1, 0, 1, + 1, 2, 0, 18, 2, 0, 0, 16, + 35, 0, 0, 0, 1, 0, 28, 0, + 0, 0, 0, 1, 0, 2, 0, 0, + 1, 0, 0, 1, 0, 0, 1, 0, + 0, 0, 0, 1, 11, 0, 0, 0, + 0, 4, 0, 12, 1, 7, 0, 4, + 0, 0, 0, 0, 1, 2, 1, 1, + 1, 1, 0, 1, 1, 0, 0, 2, + 0, 0, 0, 1, 32, 0, 0, 0, + 0, 1, 3, 1, 1, 1, 0, 2, + 0, 1, 1, 2, 0, 3, 0, 1, + 0, 2, 1, 2, 0, 0, 5, 1, + 4, 0, 0, 1, 43, 0, 0, 0, + 2, 3, 2, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 4, 1, 0, 15, 0, 0, + 0, 1, 6, 1, 0, 0, 1, 0, + 2, 0, 0, 0, 9, 0, 1, 1, + 0, 0, 0, 3, 0, 1, 0, 28, + 0, 0, 0, 1, 0, 1, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 2, 0, 0, 18, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 16, 36, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 2, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 28, + 0, 0, 0, 1, 1, 1, 1, 0, + 0, 2, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 4, + 0, 0, 2, 2, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 3, + 0, 0, 4, 0, 0, 0, 18, 0, + 0, 0, 1, 4, 1, 4, 1, 0, + 3, 2, 2, 2, 1, 0, 0, 1, + 8, 0, 0, 0, 4, 12, 0, 2, + 0, 3, 0, 1, 0, 2, 0, 1, + 2, 0, 0, 3, 0, 1, 1, 1, + 2, 2, 4, 1, 6, 2, 4, 2, + 4, 1, 4, 0, 6, 1, 3, 1, + 2, 0, 2, 11, 1, 1, 1, 0, + 1, 1, 0, 2, 0, 3, 3, 2, + 1, 0, 0, 0, 1, 0, 1, 0, + 1, 1, 0, 2, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 4, 3, 2, 2, 0, 6, + 1, 0, 1, 1, 0, 2, 0, 4, + 3, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 1, + 0, 3, 0, 2, 0, 0, 0, 3, + 0, 2, 1, 1, 3, 1, 0, 0, + 0, 0, 0, 5, 2, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 1, 1, + 0, 0, 35, 4, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 3, 0, 1, 0, 0, 3, + 0, 0, 1, 0, 0, 0, 0, 28, + 0, 0, 0, 0, 1, 0, 3, 1, + 4, 0, 1, 0, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 1, 0, + 7, 0, 0, 2, 2, 0, 11, 0, + 0, 0, 0, 0, 1, 1, 3, 0, + 0, 4, 0, 0, 0, 12, 1, 4, + 1, 5, 2, 0, 3, 2, 2, 2, + 1, 7, 0, 7, 17, 3, 0, 2, + 0, 3, 0, 0, 1, 0, 2, 0, + 2, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 2, 2, 1, 0, 0, 0, + 2, 2, 4, 0, 0, 0, 0, 1, + 2, 1, 1, 1, 1, 0, 1, 1, + 0, 0, 2, 0, 0, 0, 1, 32, + 0, 0, 0, 0, 1, 3, 1, 1, + 1, 0, 2, 0, 1, 1, 2, 0, + 3, 0, 1, 0, 2, 1, 2, 0, + 0, 5, 1, 4, 0, 0, 1, 43, + 0, 0, 0, 2, 3, 2, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 1, 0, + 15, 0, 0, 0, 1, 6, 1, 0, + 0, 1, 0, 2, 0, 0, 0, 9, + 0, 1, 1, 0, 0, 0, 3, 0, + 1, 0, 28, 0, 0, 0, 1, 0, + 1, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 2, 0, + 0, 18, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 0, 16, 36, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 2, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 28, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 2, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 4, 0, 0, 2, 2, 0, + 11, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 3, 0, 0, 4, 0, 0, + 0, 18, 0, 0, 0, 1, 4, 1, + 4, 1, 0, 3, 2, 2, 2, 1, + 0, 0, 1, 8, 0, 0, 0, 4, + 12, 0, 2, 0, 3, 0, 1, 0, + 2, 0, 1, 2, 0, 0, 3, 0, + 1, 1, 1, 2, 2, 4, 1, 6, + 2, 4, 2, 4, 1, 4, 0, 6, + 1, 3, 1, 2, 0, 2, 11, 1, + 1, 1, 0, 1, 1, 0, 2, 0, + 3, 3, 2, 1, 0, 0, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 4, 3, 2, + 2, 0, 6, 1, 0, 1, 1, 0, + 2, 0, 4, 3, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 1, 0, 3, 0, 2, 0, + 0, 0, 3, 0, 2, 1, 1, 3, + 1, 0, 0, 0, 0, 0, 5, 2, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 1, 1, 0, 0, 35, 4, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 3, 0, 1, + 0, 0, 3, 0, 0, 1, 0, 0, + 0, 0, 28, 0, 0, 0, 0, 1, + 0, 3, 1, 4, 0, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 0, + 1, 1, 0, 7, 0, 0, 2, 2, + 0, 11, 0, 0, 0, 0, 0, 1, + 1, 3, 0, 0, 4, 0, 0, 0, + 12, 1, 4, 1, 5, 2, 0, 3, + 2, 2, 2, 1, 7, 0, 7, 17, + 3, 0, 2, 0, 3, 0, 0, 1, + 0, 2, 0, 53, 2, 1, 1, 1, + 1, 1, 2, 3, 2, 2, 1, 34, + 1, 1, 0, 3, 2, 0, 0, 0, + 1, 2, 4, 1, 0, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 30, 47, 13, 9, 3, 0, 1, 28, + 2, 0, 18, 16, 0, 6, 4, 2, + 2, 0, 1, 1, 1, 2, 1, 2, + 0, 0, 0, 4, 2, 2, 3, 3, + 2, 1, 1, 0, 0, 0, 4, 2, + 2, 3, 3, 2, 1, 1, 0, 0, + 0, 33, 34, 0, 3, 2, 0, 0, + 0, 1, 2, 4, 1, 0, 1, 0, + 0, 0, 0, 1, 1, 1, 0, 0, + 1, 30, 47, 13, 9, 3, 0, 1, + 28, 2, 0, 18, 16, 0, +} + +var _hcltok_range_lengths []byte = []byte{ + 0, 0, 0, 0, 1, 1, 1, 5, + 5, 5, 0, 0, 3, 0, 1, 1, + 4, 2, 3, 0, 1, 0, 2, 2, + 4, 2, 2, 3, 1, 1, 1, 1, + 0, 1, 1, 2, 2, 1, 4, 6, + 9, 6, 8, 5, 8, 7, 10, 4, + 6, 4, 7, 7, 5, 5, 4, 5, + 1, 2, 8, 4, 3, 3, 3, 0, + 3, 1, 2, 1, 2, 2, 3, 3, + 1, 3, 2, 2, 1, 2, 2, 2, + 3, 4, 4, 3, 1, 2, 1, 3, + 2, 2, 2, 2, 2, 3, 3, 1, + 1, 2, 1, 3, 2, 2, 3, 2, + 7, 0, 1, 4, 1, 2, 4, 2, + 1, 2, 0, 2, 2, 3, 5, 5, + 1, 4, 1, 1, 2, 2, 1, 0, + 0, 1, 1, 1, 1, 1, 2, 2, + 2, 2, 1, 1, 1, 4, 2, 2, + 3, 1, 4, 4, 6, 1, 3, 1, + 1, 2, 1, 1, 1, 5, 3, 1, + 1, 1, 2, 3, 3, 1, 2, 2, + 1, 4, 1, 2, 5, 2, 1, 1, + 0, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 1, 1, 2, 4, 2, 1, + 2, 2, 2, 6, 1, 1, 2, 1, + 2, 1, 1, 1, 2, 2, 2, 1, + 3, 2, 5, 2, 8, 6, 2, 2, + 2, 2, 3, 1, 3, 1, 2, 1, + 3, 2, 2, 3, 1, 1, 1, 1, + 1, 1, 1, 2, 2, 4, 1, 2, + 1, 0, 1, 1, 1, 1, 0, 1, + 2, 3, 1, 3, 3, 1, 0, 3, + 0, 2, 3, 1, 0, 0, 0, 0, + 2, 2, 2, 2, 1, 5, 2, 2, + 5, 7, 5, 0, 1, 0, 1, 1, + 1, 1, 1, 0, 1, 1, 0, 3, + 3, 1, 1, 2, 1, 3, 5, 1, + 1, 2, 2, 1, 1, 1, 1, 2, + 6, 3, 7, 2, 6, 1, 6, 2, + 8, 0, 4, 2, 5, 2, 3, 3, + 3, 1, 2, 8, 2, 0, 2, 1, + 2, 1, 5, 2, 1, 3, 3, 0, + 2, 1, 2, 1, 0, 1, 1, 3, + 1, 1, 2, 3, 0, 0, 3, 2, + 4, 1, 4, 1, 1, 3, 1, 1, + 1, 1, 2, 2, 1, 3, 1, 4, + 3, 3, 1, 1, 5, 2, 1, 1, + 2, 1, 2, 1, 3, 2, 0, 1, + 1, 1, 1, 1, 1, 1, 2, 1, + 1, 1, 1, 1, 1, 1, 0, 1, + 1, 2, 2, 1, 1, 1, 3, 2, + 1, 0, 2, 1, 1, 1, 1, 0, + 3, 0, 1, 1, 4, 2, 3, 0, + 1, 0, 2, 2, 4, 2, 2, 3, + 1, 1, 1, 1, 0, 1, 1, 2, + 2, 1, 4, 6, 9, 6, 8, 5, + 8, 7, 10, 4, 6, 4, 7, 7, + 5, 5, 4, 5, 1, 2, 8, 4, + 3, 3, 3, 0, 3, 1, 2, 1, + 2, 2, 3, 3, 1, 3, 2, 2, + 1, 2, 2, 2, 3, 4, 4, 3, + 1, 2, 1, 3, 2, 2, 2, 2, + 2, 3, 3, 1, 1, 2, 1, 3, + 2, 2, 3, 2, 7, 0, 1, 4, + 1, 2, 4, 2, 1, 2, 0, 2, + 2, 3, 5, 5, 1, 4, 1, 1, + 2, 2, 1, 0, 0, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 1, 1, + 1, 4, 2, 2, 3, 1, 4, 4, + 6, 1, 3, 1, 1, 2, 1, 1, + 1, 5, 3, 1, 1, 1, 2, 3, + 3, 1, 2, 2, 1, 4, 1, 2, + 5, 2, 1, 1, 0, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 1, + 2, 4, 2, 1, 2, 2, 2, 6, + 1, 1, 2, 1, 2, 1, 1, 1, + 2, 2, 2, 1, 3, 2, 5, 2, + 8, 6, 2, 2, 2, 2, 3, 1, + 3, 1, 2, 1, 3, 2, 2, 3, + 1, 1, 1, 1, 1, 1, 1, 2, + 2, 4, 1, 2, 1, 0, 1, 1, + 1, 1, 0, 1, 2, 3, 1, 3, + 3, 1, 0, 3, 0, 2, 3, 1, + 0, 0, 0, 0, 2, 2, 2, 2, + 1, 5, 2, 2, 5, 7, 5, 0, + 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 2, 3, 3, 4, + 7, 5, 7, 5, 3, 3, 7, 3, + 13, 1, 3, 5, 3, 5, 3, 6, + 5, 2, 2, 8, 4, 1, 2, 3, + 2, 10, 2, 2, 0, 2, 3, 3, + 1, 2, 3, 3, 1, 2, 3, 3, + 4, 4, 2, 1, 2, 2, 3, 2, + 2, 5, 3, 2, 3, 2, 1, 3, + 3, 6, 2, 2, 5, 2, 5, 1, + 1, 2, 4, 1, 11, 1, 3, 8, + 4, 2, 1, 0, 4, 3, 3, 3, + 2, 9, 1, 1, 4, 3, 2, 2, + 2, 3, 4, 2, 3, 2, 4, 3, + 2, 2, 3, 3, 4, 3, 3, 4, + 2, 5, 4, 8, 7, 1, 2, 1, + 3, 1, 2, 5, 1, 2, 2, 2, + 2, 1, 3, 2, 2, 3, 3, 1, + 9, 1, 5, 1, 3, 2, 2, 3, + 2, 3, 3, 3, 1, 3, 3, 2, + 2, 4, 5, 3, 3, 4, 3, 3, + 3, 2, 2, 2, 4, 2, 2, 1, + 3, 3, 3, 3, 3, 3, 2, 2, + 3, 2, 3, 3, 2, 3, 2, 3, + 1, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 3, 2, 3, 2, + 3, 5, 3, 3, 1, 2, 3, 2, + 2, 1, 2, 3, 4, 3, 0, 3, + 0, 2, 3, 1, 0, 0, 0, 0, + 2, 3, 2, 4, 6, 4, 1, 1, + 2, 1, 2, 1, 3, 2, 3, 2, + 5, 1, 1, 1, 1, 1, 0, 1, + 1, 1, 0, 0, 0, 1, 1, 1, + 0, 0, 0, 3, 0, 1, 1, 4, + 2, 3, 0, 1, 0, 2, 2, 4, + 2, 2, 3, 1, 1, 1, 1, 0, + 1, 1, 2, 2, 1, 4, 6, 9, + 6, 8, 5, 8, 7, 10, 4, 6, + 4, 7, 7, 5, 5, 4, 5, 1, + 2, 8, 4, 3, 3, 3, 0, 3, + 1, 2, 1, 2, 2, 3, 3, 1, + 3, 2, 2, 1, 2, 2, 2, 3, + 4, 4, 3, 1, 2, 1, 3, 2, + 2, 2, 2, 2, 3, 3, 1, 1, + 2, 1, 3, 2, 2, 3, 2, 7, + 0, 1, 4, 1, 2, 4, 2, 1, + 2, 0, 2, 2, 3, 5, 5, 1, + 4, 1, 1, 2, 2, 1, 0, 0, + 1, 1, 1, 1, 1, 2, 2, 2, + 2, 1, 1, 1, 4, 2, 2, 3, + 1, 4, 4, 6, 1, 3, 1, 1, + 2, 1, 1, 1, 5, 3, 1, 1, + 1, 2, 3, 3, 1, 2, 2, 1, + 4, 1, 2, 5, 2, 1, 1, 0, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 1, 1, 2, 4, 2, 1, 2, + 2, 2, 6, 1, 1, 2, 1, 2, + 1, 1, 1, 2, 2, 2, 1, 3, + 2, 5, 2, 8, 6, 2, 2, 2, + 2, 3, 1, 3, 1, 2, 1, 3, + 2, 2, 3, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 4, 1, 2, 1, + 0, 1, 1, 1, 1, 0, 1, 2, + 3, 1, 3, 3, 1, 0, 3, 0, + 2, 3, 1, 0, 0, 0, 0, 2, + 2, 2, 2, 1, 5, 2, 2, 5, + 7, 5, 0, 1, 0, 1, 1, 1, + 1, 1, 0, 1, 1, 1, 2, 2, + 3, 3, 4, 7, 5, 7, 5, 3, + 3, 7, 3, 13, 1, 3, 5, 3, + 5, 3, 6, 5, 2, 2, 8, 4, + 1, 2, 3, 2, 10, 2, 2, 0, + 2, 3, 3, 1, 2, 3, 3, 1, + 2, 3, 3, 4, 4, 2, 1, 2, + 2, 3, 2, 2, 5, 3, 2, 3, + 2, 1, 3, 3, 6, 2, 2, 5, + 2, 5, 1, 1, 2, 4, 1, 11, + 1, 3, 8, 4, 2, 1, 0, 4, + 3, 3, 3, 2, 9, 1, 1, 4, + 3, 2, 2, 2, 3, 4, 2, 3, + 2, 4, 3, 2, 2, 3, 3, 4, + 3, 3, 4, 2, 5, 4, 8, 7, + 1, 2, 1, 3, 1, 2, 5, 1, + 2, 2, 2, 2, 1, 3, 2, 2, + 3, 3, 1, 9, 1, 5, 1, 3, + 2, 2, 3, 2, 3, 3, 3, 1, + 3, 3, 2, 2, 4, 5, 3, 3, + 4, 3, 3, 3, 2, 2, 2, 4, + 2, 2, 1, 3, 3, 3, 3, 3, + 3, 2, 2, 3, 2, 3, 3, 2, + 3, 2, 3, 1, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 3, + 2, 3, 2, 3, 5, 3, 3, 1, + 2, 3, 2, 2, 1, 2, 3, 4, + 3, 0, 3, 0, 2, 3, 1, 0, + 0, 0, 0, 2, 3, 2, 4, 6, + 4, 1, 1, 2, 1, 2, 1, 3, + 2, 3, 2, 11, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 5, + 0, 0, 1, 1, 1, 0, 1, 1, + 5, 4, 2, 0, 1, 0, 2, 2, + 5, 2, 3, 5, 3, 2, 3, 5, + 1, 1, 1, 3, 1, 1, 2, 2, + 3, 1, 2, 3, 1, 5, 6, 0, + 0, 0, 0, 0, 0, 0, 0, 5, + 1, 1, 1, 5, 6, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 5, 6, + 0, 0, 0, 0, 0, 0, 1, 1, + 1, 8, 5, 1, 1, 1, 0, 1, + 1, 5, 4, 2, 0, 1, 0, 2, + 2, 5, 2, 3, 5, 3, 2, 3, + 5, 1, 1, 1, 3, 1, 1, 2, + 2, 3, 1, 2, 3, 1, +} + +var _hcltok_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 7, 12, 16, 18, + 56, 93, 135, 137, 142, 146, 147, 149, + 151, 157, 162, 167, 169, 172, 174, 177, + 181, 187, 190, 193, 199, 201, 203, 205, + 208, 241, 243, 245, 248, 251, 254, 262, + 270, 281, 289, 298, 306, 315, 324, 336, + 343, 350, 358, 366, 375, 381, 389, 395, + 403, 405, 408, 422, 428, 436, 440, 444, + 446, 493, 495, 498, 500, 505, 511, 517, + 522, 525, 529, 532, 535, 537, 540, 543, + 546, 550, 555, 560, 564, 566, 569, 571, + 575, 578, 581, 584, 587, 591, 596, 600, + 602, 604, 607, 609, 613, 616, 619, 627, + 631, 639, 655, 657, 662, 664, 668, 679, + 683, 685, 688, 690, 693, 698, 702, 708, + 714, 725, 730, 733, 736, 739, 742, 744, + 748, 749, 752, 754, 784, 786, 788, 791, + 795, 798, 802, 804, 806, 808, 814, 817, + 820, 824, 826, 831, 836, 843, 846, 850, + 854, 856, 859, 879, 881, 883, 890, 894, + 896, 898, 900, 903, 907, 911, 913, 917, + 920, 922, 927, 945, 984, 990, 993, 995, + 997, 999, 1002, 1005, 1008, 1011, 1014, 1018, + 1021, 1024, 1027, 1029, 1031, 1034, 1041, 1044, + 1046, 1049, 1052, 1055, 1063, 1065, 1067, 1070, + 1072, 1075, 1077, 1079, 1109, 1112, 1115, 1118, + 1121, 1126, 1130, 1137, 1140, 1149, 1158, 1161, + 1165, 1168, 1171, 1175, 1177, 1181, 1183, 1186, + 1188, 1192, 1196, 1200, 1208, 1210, 1212, 1216, + 1220, 1222, 1235, 1237, 1240, 1243, 1248, 1250, + 1253, 1255, 1257, 1260, 1265, 1267, 1269, 1274, + 1276, 1279, 1283, 1303, 1307, 1311, 1313, 1315, + 1323, 1325, 1332, 1337, 1339, 1343, 1346, 1349, + 1352, 1356, 1359, 1362, 1366, 1376, 1382, 1385, + 1388, 1398, 1418, 1424, 1427, 1429, 1433, 1435, + 1438, 1440, 1444, 1446, 1448, 1452, 1454, 1458, + 1463, 1469, 1471, 1473, 1476, 1478, 1482, 1489, + 1492, 1494, 1497, 1501, 1531, 1536, 1538, 1541, + 1545, 1554, 1559, 1567, 1571, 1579, 1583, 1591, + 1595, 1606, 1608, 1614, 1617, 1625, 1629, 1634, + 1639, 1644, 1646, 1649, 1664, 1668, 1670, 1673, + 1675, 1724, 1727, 1734, 1737, 1739, 1743, 1747, + 1750, 1754, 1756, 1759, 1761, 1763, 1765, 1767, + 1771, 1773, 1775, 1778, 1782, 1796, 1799, 1803, + 1806, 1811, 1822, 1827, 1830, 1860, 1864, 1867, + 1872, 1874, 1878, 1881, 1884, 1886, 1891, 1893, + 1899, 1904, 1910, 1912, 1932, 1940, 1943, 1945, + 1963, 2001, 2003, 2006, 2008, 2013, 2016, 2045, + 2047, 2049, 2051, 2053, 2056, 2058, 2062, 2065, + 2067, 2070, 2072, 2074, 2077, 2079, 2081, 2083, + 2085, 2087, 2090, 2093, 2096, 2109, 2111, 2115, + 2118, 2120, 2125, 2128, 2142, 2145, 2154, 2156, + 2161, 2165, 2166, 2168, 2170, 2176, 2181, 2186, + 2188, 2191, 2193, 2196, 2200, 2206, 2209, 2212, + 2218, 2220, 2222, 2224, 2227, 2260, 2262, 2264, + 2267, 2270, 2273, 2281, 2289, 2300, 2308, 2317, + 2325, 2334, 2343, 2355, 2362, 2369, 2377, 2385, + 2394, 2400, 2408, 2414, 2422, 2424, 2427, 2441, + 2447, 2455, 2459, 2463, 2465, 2512, 2514, 2517, + 2519, 2524, 2530, 2536, 2541, 2544, 2548, 2551, + 2554, 2556, 2559, 2562, 2565, 2569, 2574, 2579, + 2583, 2585, 2588, 2590, 2594, 2597, 2600, 2603, + 2606, 2610, 2615, 2619, 2621, 2623, 2626, 2628, + 2632, 2635, 2638, 2646, 2650, 2658, 2674, 2676, + 2681, 2683, 2687, 2698, 2702, 2704, 2707, 2709, + 2712, 2717, 2721, 2727, 2733, 2744, 2749, 2752, + 2755, 2758, 2761, 2763, 2767, 2768, 2771, 2773, + 2803, 2805, 2807, 2810, 2814, 2817, 2821, 2823, + 2825, 2827, 2833, 2836, 2839, 2843, 2845, 2850, + 2855, 2862, 2865, 2869, 2873, 2875, 2878, 2898, + 2900, 2902, 2909, 2913, 2915, 2917, 2919, 2922, + 2926, 2930, 2932, 2936, 2939, 2941, 2946, 2964, + 3003, 3009, 3012, 3014, 3016, 3018, 3021, 3024, + 3027, 3030, 3033, 3037, 3040, 3043, 3046, 3048, + 3050, 3053, 3060, 3063, 3065, 3068, 3071, 3074, + 3082, 3084, 3086, 3089, 3091, 3094, 3096, 3098, + 3128, 3131, 3134, 3137, 3140, 3145, 3149, 3156, + 3159, 3168, 3177, 3180, 3184, 3187, 3190, 3194, + 3196, 3200, 3202, 3205, 3207, 3211, 3215, 3219, + 3227, 3229, 3231, 3235, 3239, 3241, 3254, 3256, + 3259, 3262, 3267, 3269, 3272, 3274, 3276, 3279, + 3284, 3286, 3288, 3293, 3295, 3298, 3302, 3322, + 3326, 3330, 3332, 3334, 3342, 3344, 3351, 3356, + 3358, 3362, 3365, 3368, 3371, 3375, 3378, 3381, + 3385, 3395, 3401, 3404, 3407, 3417, 3437, 3443, + 3446, 3448, 3452, 3454, 3457, 3459, 3463, 3465, + 3467, 3471, 3473, 3475, 3481, 3484, 3489, 3494, + 3500, 3510, 3518, 3530, 3537, 3547, 3553, 3565, + 3571, 3589, 3592, 3600, 3606, 3616, 3623, 3630, + 3638, 3646, 3649, 3654, 3674, 3680, 3683, 3687, + 3691, 3695, 3707, 3710, 3715, 3716, 3722, 3729, + 3735, 3738, 3741, 3745, 3749, 3752, 3755, 3760, + 3764, 3770, 3776, 3779, 3783, 3786, 3789, 3794, + 3797, 3800, 3806, 3810, 3813, 3817, 3820, 3823, + 3827, 3831, 3838, 3841, 3844, 3850, 3853, 3860, + 3862, 3864, 3867, 3876, 3881, 3895, 3899, 3903, + 3918, 3924, 3927, 3930, 3932, 3937, 3943, 3947, + 3955, 3961, 3971, 3974, 3977, 3982, 3986, 3989, + 3992, 3995, 3999, 4004, 4008, 4012, 4015, 4020, + 4025, 4028, 4034, 4038, 4044, 4049, 4053, 4057, + 4065, 4068, 4076, 4082, 4092, 4103, 4106, 4109, + 4111, 4115, 4117, 4120, 4131, 4135, 4138, 4141, + 4144, 4147, 4149, 4153, 4157, 4160, 4164, 4169, + 4172, 4182, 4184, 4225, 4231, 4235, 4238, 4241, + 4245, 4248, 4252, 4256, 4261, 4263, 4267, 4271, + 4274, 4277, 4282, 4291, 4295, 4300, 4305, 4309, + 4316, 4320, 4323, 4327, 4330, 4335, 4338, 4341, + 4371, 4375, 4379, 4383, 4387, 4392, 4396, 4402, + 4406, 4414, 4417, 4422, 4426, 4429, 4434, 4437, + 4441, 4444, 4447, 4450, 4453, 4456, 4460, 4464, + 4467, 4477, 4480, 4483, 4488, 4494, 4497, 4512, + 4515, 4519, 4525, 4529, 4533, 4536, 4540, 4547, + 4550, 4553, 4559, 4562, 4566, 4571, 4587, 4589, + 4597, 4599, 4607, 4613, 4615, 4619, 4622, 4625, + 4628, 4632, 4643, 4646, 4658, 4682, 4690, 4692, + 4696, 4699, 4704, 4707, 4709, 4714, 4717, 4723, + 4726, 4734, 4736, 4738, 4740, 4742, 4744, 4746, + 4748, 4750, 4752, 4755, 4758, 4760, 4762, 4764, + 4766, 4769, 4772, 4777, 4781, 4782, 4784, 4786, + 4792, 4797, 4802, 4804, 4807, 4809, 4812, 4816, + 4822, 4825, 4828, 4834, 4836, 4838, 4840, 4843, + 4876, 4878, 4880, 4883, 4886, 4889, 4897, 4905, + 4916, 4924, 4933, 4941, 4950, 4959, 4971, 4978, + 4985, 4993, 5001, 5010, 5016, 5024, 5030, 5038, + 5040, 5043, 5057, 5063, 5071, 5075, 5079, 5081, + 5128, 5130, 5133, 5135, 5140, 5146, 5152, 5157, + 5160, 5164, 5167, 5170, 5172, 5175, 5178, 5181, + 5185, 5190, 5195, 5199, 5201, 5204, 5206, 5210, + 5213, 5216, 5219, 5222, 5226, 5231, 5235, 5237, + 5239, 5242, 5244, 5248, 5251, 5254, 5262, 5266, + 5274, 5290, 5292, 5297, 5299, 5303, 5314, 5318, + 5320, 5323, 5325, 5328, 5333, 5337, 5343, 5349, + 5360, 5365, 5368, 5371, 5374, 5377, 5379, 5383, + 5384, 5387, 5389, 5419, 5421, 5423, 5426, 5430, + 5433, 5437, 5439, 5441, 5443, 5449, 5452, 5455, + 5459, 5461, 5466, 5471, 5478, 5481, 5485, 5489, + 5491, 5494, 5514, 5516, 5518, 5525, 5529, 5531, + 5533, 5535, 5538, 5542, 5546, 5548, 5552, 5555, + 5557, 5562, 5580, 5619, 5625, 5628, 5630, 5632, + 5634, 5637, 5640, 5643, 5646, 5649, 5653, 5656, + 5659, 5662, 5664, 5666, 5669, 5676, 5679, 5681, + 5684, 5687, 5690, 5698, 5700, 5702, 5705, 5707, + 5710, 5712, 5714, 5744, 5747, 5750, 5753, 5756, + 5761, 5765, 5772, 5775, 5784, 5793, 5796, 5800, + 5803, 5806, 5810, 5812, 5816, 5818, 5821, 5823, + 5827, 5831, 5835, 5843, 5845, 5847, 5851, 5855, + 5857, 5870, 5872, 5875, 5878, 5883, 5885, 5888, + 5890, 5892, 5895, 5900, 5902, 5904, 5909, 5911, + 5914, 5918, 5938, 5942, 5946, 5948, 5950, 5958, + 5960, 5967, 5972, 5974, 5978, 5981, 5984, 5987, + 5991, 5994, 5997, 6001, 6011, 6017, 6020, 6023, + 6033, 6053, 6059, 6062, 6064, 6068, 6070, 6073, + 6075, 6079, 6081, 6083, 6087, 6089, 6091, 6097, + 6100, 6105, 6110, 6116, 6126, 6134, 6146, 6153, + 6163, 6169, 6181, 6187, 6205, 6208, 6216, 6222, + 6232, 6239, 6246, 6254, 6262, 6265, 6270, 6290, + 6296, 6299, 6303, 6307, 6311, 6323, 6326, 6331, + 6332, 6338, 6345, 6351, 6354, 6357, 6361, 6365, + 6368, 6371, 6376, 6380, 6386, 6392, 6395, 6399, + 6402, 6405, 6410, 6413, 6416, 6422, 6426, 6429, + 6433, 6436, 6439, 6443, 6447, 6454, 6457, 6460, + 6466, 6469, 6476, 6478, 6480, 6483, 6492, 6497, + 6511, 6515, 6519, 6534, 6540, 6543, 6546, 6548, + 6553, 6559, 6563, 6571, 6577, 6587, 6590, 6593, + 6598, 6602, 6605, 6608, 6611, 6615, 6620, 6624, + 6628, 6631, 6636, 6641, 6644, 6650, 6654, 6660, + 6665, 6669, 6673, 6681, 6684, 6692, 6698, 6708, + 6719, 6722, 6725, 6727, 6731, 6733, 6736, 6747, + 6751, 6754, 6757, 6760, 6763, 6765, 6769, 6773, + 6776, 6780, 6785, 6788, 6798, 6800, 6841, 6847, + 6851, 6854, 6857, 6861, 6864, 6868, 6872, 6877, + 6879, 6883, 6887, 6890, 6893, 6898, 6907, 6911, + 6916, 6921, 6925, 6932, 6936, 6939, 6943, 6946, + 6951, 6954, 6957, 6987, 6991, 6995, 6999, 7003, + 7008, 7012, 7018, 7022, 7030, 7033, 7038, 7042, + 7045, 7050, 7053, 7057, 7060, 7063, 7066, 7069, + 7072, 7076, 7080, 7083, 7093, 7096, 7099, 7104, + 7110, 7113, 7128, 7131, 7135, 7141, 7145, 7149, + 7152, 7156, 7163, 7166, 7169, 7175, 7178, 7182, + 7187, 7203, 7205, 7213, 7215, 7223, 7229, 7231, + 7235, 7238, 7241, 7244, 7248, 7259, 7262, 7274, + 7298, 7306, 7308, 7312, 7315, 7320, 7323, 7325, + 7330, 7333, 7339, 7342, 7407, 7410, 7412, 7414, + 7416, 7418, 7420, 7423, 7428, 7431, 7434, 7436, + 7476, 7478, 7480, 7482, 7487, 7491, 7492, 7494, + 7496, 7503, 7510, 7517, 7519, 7521, 7523, 7526, + 7529, 7535, 7538, 7543, 7550, 7555, 7558, 7562, + 7569, 7601, 7650, 7665, 7678, 7683, 7685, 7689, + 7720, 7726, 7728, 7749, 7769, 7771, 7783, 7794, + 7797, 7800, 7801, 7803, 7805, 7807, 7810, 7812, + 7820, 7822, 7824, 7826, 7836, 7845, 7848, 7852, + 7856, 7859, 7861, 7863, 7865, 7867, 7869, 7879, + 7888, 7891, 7895, 7899, 7902, 7904, 7906, 7908, + 7910, 7912, 7954, 7994, 7996, 8001, 8005, 8006, + 8008, 8010, 8017, 8024, 8031, 8033, 8035, 8037, + 8040, 8043, 8049, 8052, 8057, 8064, 8069, 8072, + 8076, 8083, 8115, 8164, 8179, 8192, 8197, 8199, + 8203, 8234, 8240, 8242, 8263, 8283, +} + +var _hcltok_indicies []int16 = []int16{ + 1, 0, 3, 2, 3, 4, 2, 6, + 8, 8, 7, 5, 9, 9, 7, 5, + 7, 5, 10, 11, 12, 13, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 39, 40, 41, + 42, 43, 11, 11, 14, 14, 38, 0, + 11, 12, 13, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 39, 40, 41, 42, 43, 11, + 11, 14, 14, 38, 0, 44, 45, 11, + 11, 46, 13, 15, 16, 17, 16, 47, + 48, 20, 49, 22, 23, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 37, 39, 63, 41, 64, 65, + 66, 11, 11, 11, 14, 38, 0, 44, + 0, 11, 11, 11, 11, 0, 11, 11, + 11, 0, 11, 0, 11, 11, 0, 0, + 0, 0, 0, 0, 11, 0, 0, 0, + 0, 11, 11, 11, 11, 11, 0, 0, + 11, 0, 0, 11, 0, 11, 0, 0, + 11, 0, 0, 0, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 0, 11, 11, + 0, 0, 0, 0, 0, 0, 11, 11, + 0, 0, 11, 0, 11, 11, 11, 0, + 67, 68, 69, 70, 14, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, + 0, 11, 0, 11, 0, 11, 11, 0, + 11, 11, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 0, 0, 0, 0, 0, 0, 0, 0, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 0, 0, 0, 0, 0, 0, 0, + 0, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 0, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 0, 0, 0, + 0, 0, 0, 0, 0, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 0, 0, 0, 0, + 0, 0, 0, 0, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 0, 11, 0, 11, 11, 0, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 0, + 11, 11, 11, 0, 11, 0, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 16, + 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 14, 15, 133, 134, 135, 136, + 137, 14, 16, 14, 0, 11, 0, 11, + 11, 0, 0, 11, 0, 0, 0, 0, + 11, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 11, 11, 11, 11, + 11, 0, 0, 0, 11, 0, 0, 0, + 11, 11, 11, 0, 0, 0, 11, 11, + 0, 0, 0, 11, 11, 11, 0, 0, + 0, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 0, 0, 0, 0, 0, 11, + 11, 11, 11, 0, 0, 11, 11, 11, + 0, 0, 11, 11, 11, 11, 0, 11, + 11, 0, 11, 11, 0, 0, 0, 11, + 11, 11, 0, 0, 0, 0, 11, 11, + 11, 11, 11, 0, 0, 0, 0, 11, + 0, 11, 11, 0, 11, 11, 0, 11, + 0, 11, 11, 11, 0, 11, 11, 0, + 0, 0, 11, 0, 0, 0, 0, 0, + 0, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 14, 147, 148, 149, 150, 151, 0, 11, + 0, 0, 0, 0, 0, 11, 11, 0, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 0, 11, 11, 11, 0, + 0, 11, 0, 0, 11, 11, 11, 11, + 11, 0, 0, 0, 0, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 0, 152, 109, 153, 154, 155, 14, + 156, 157, 16, 14, 0, 11, 11, 11, + 11, 0, 0, 0, 11, 0, 0, 11, + 11, 11, 0, 0, 0, 11, 11, 0, + 119, 0, 16, 14, 14, 158, 0, 14, + 0, 11, 16, 159, 160, 16, 161, 162, + 16, 57, 163, 164, 165, 166, 167, 16, + 168, 169, 170, 16, 171, 172, 173, 15, + 174, 175, 176, 15, 177, 16, 14, 0, + 0, 11, 11, 0, 0, 0, 11, 11, + 11, 11, 0, 11, 11, 0, 0, 0, + 0, 11, 11, 0, 0, 11, 11, 0, + 0, 0, 0, 0, 0, 11, 11, 11, + 0, 0, 0, 11, 0, 0, 0, 11, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 0, 0, 0, 11, 11, 11, + 11, 0, 178, 179, 0, 14, 0, 11, + 0, 0, 11, 16, 180, 181, 182, 183, + 57, 184, 185, 55, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 14, 0, 0, + 11, 0, 11, 11, 11, 11, 11, 11, + 11, 0, 11, 11, 11, 0, 11, 0, + 0, 11, 0, 11, 0, 0, 11, 11, + 11, 11, 0, 11, 11, 11, 0, 0, + 11, 11, 11, 11, 0, 11, 11, 0, + 0, 11, 11, 11, 11, 11, 0, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 201, 206, 207, 208, 209, 38, + 0, 210, 211, 16, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 16, 14, 221, + 222, 223, 224, 16, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 16, 144, 14, 240, 0, + 11, 11, 11, 11, 11, 0, 0, 0, + 11, 0, 11, 11, 0, 11, 0, 11, + 11, 0, 0, 0, 11, 11, 11, 0, + 0, 0, 11, 11, 11, 0, 0, 0, + 0, 11, 0, 0, 11, 0, 0, 11, + 11, 11, 0, 0, 11, 0, 11, 11, + 11, 0, 11, 11, 11, 11, 11, 11, + 0, 0, 0, 11, 11, 0, 11, 11, + 0, 11, 11, 0, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 0, 11, 0, 11, 11, 0, 11, 0, + 11, 11, 0, 11, 0, 11, 0, 241, + 212, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 98, 251, 16, 252, 253, 254, + 16, 255, 129, 256, 257, 258, 259, 260, + 261, 262, 263, 16, 0, 0, 0, 11, + 11, 11, 0, 11, 11, 0, 11, 11, + 0, 0, 0, 0, 0, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 11, 11, + 0, 0, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 0, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 0, 0, 0, 0, 11, 11, 11, 0, + 0, 0, 11, 0, 0, 0, 11, 11, + 0, 11, 11, 11, 0, 11, 0, 0, + 0, 11, 11, 0, 11, 11, 11, 0, + 11, 11, 11, 0, 0, 0, 0, 11, + 16, 181, 264, 265, 14, 16, 14, 0, + 0, 11, 0, 11, 16, 264, 14, 0, + 16, 266, 14, 0, 0, 11, 16, 267, + 268, 269, 172, 270, 271, 16, 272, 273, + 274, 14, 0, 0, 11, 11, 11, 0, + 11, 11, 0, 11, 11, 11, 11, 0, + 0, 11, 0, 0, 11, 11, 0, 11, + 0, 16, 14, 0, 275, 16, 276, 0, + 14, 0, 11, 0, 11, 277, 16, 278, + 279, 0, 11, 0, 0, 0, 11, 11, + 11, 11, 0, 280, 281, 282, 16, 283, + 284, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, 295, 296, 14, 0, 11, + 11, 11, 0, 0, 0, 0, 11, 11, + 0, 0, 11, 0, 0, 0, 0, 0, + 0, 0, 11, 0, 11, 0, 0, 0, + 0, 0, 0, 11, 11, 11, 11, 11, + 0, 0, 11, 0, 0, 0, 11, 0, + 0, 11, 0, 0, 11, 0, 0, 11, + 0, 0, 0, 11, 11, 11, 0, 0, + 0, 11, 11, 11, 11, 0, 297, 16, + 298, 16, 299, 300, 301, 302, 14, 0, + 11, 11, 11, 11, 11, 0, 0, 0, + 11, 0, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 11, 0, + 303, 16, 14, 0, 11, 304, 16, 100, + 14, 0, 11, 305, 0, 14, 0, 11, + 16, 306, 14, 0, 0, 11, 307, 0, + 16, 308, 14, 0, 0, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 0, 0, 11, 0, + 11, 11, 11, 0, 11, 0, 11, 11, + 11, 0, 0, 0, 0, 0, 0, 0, + 11, 11, 11, 0, 11, 0, 0, 0, + 11, 11, 11, 11, 0, 309, 310, 69, + 311, 312, 313, 314, 315, 316, 317, 318, + 319, 320, 321, 322, 323, 324, 325, 326, + 327, 328, 329, 331, 332, 333, 334, 335, + 336, 330, 0, 11, 11, 11, 11, 0, + 11, 0, 11, 11, 0, 11, 11, 11, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 0, + 11, 11, 11, 11, 11, 0, 11, 11, + 0, 11, 11, 11, 11, 11, 11, 11, + 0, 11, 11, 11, 0, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 0, 11, 0, 11, 11, + 0, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 0, + 11, 11, 11, 0, 11, 0, 11, 11, + 0, 11, 0, 337, 338, 339, 101, 102, + 103, 104, 105, 340, 107, 108, 109, 110, + 111, 112, 341, 342, 167, 343, 258, 117, + 344, 119, 229, 269, 122, 345, 346, 347, + 348, 349, 350, 351, 352, 353, 354, 131, + 355, 16, 14, 15, 16, 134, 135, 136, + 137, 14, 14, 0, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 0, 0, 0, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 11, 11, 0, 11, 11, + 11, 0, 0, 11, 11, 11, 0, 0, + 11, 11, 0, 11, 0, 11, 0, 11, + 11, 11, 0, 0, 11, 11, 0, 11, + 11, 0, 11, 11, 11, 0, 356, 140, + 142, 143, 144, 145, 146, 14, 357, 148, + 358, 150, 359, 0, 11, 11, 0, 0, + 0, 0, 11, 0, 0, 11, 11, 11, + 11, 11, 0, 360, 109, 361, 154, 155, + 14, 156, 157, 16, 14, 0, 11, 11, + 11, 11, 0, 0, 0, 11, 16, 159, + 160, 16, 362, 363, 219, 308, 163, 164, + 165, 364, 167, 365, 366, 367, 368, 369, + 370, 371, 372, 373, 374, 175, 176, 15, + 375, 16, 14, 0, 0, 0, 0, 11, + 11, 11, 0, 0, 0, 0, 0, 11, + 11, 0, 11, 11, 11, 0, 11, 11, + 0, 0, 0, 11, 11, 0, 11, 11, + 11, 11, 0, 11, 0, 11, 11, 11, + 11, 11, 0, 0, 0, 0, 0, 11, + 11, 11, 11, 11, 11, 0, 11, 0, + 16, 180, 181, 376, 183, 57, 184, 185, + 55, 186, 187, 377, 14, 190, 378, 192, + 193, 194, 14, 0, 11, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 0, 11, + 0, 379, 380, 197, 198, 199, 381, 201, + 202, 382, 383, 384, 201, 206, 207, 208, + 209, 38, 0, 210, 211, 16, 212, 213, + 215, 385, 217, 386, 219, 220, 16, 14, + 387, 222, 223, 224, 16, 225, 226, 227, + 228, 229, 230, 231, 232, 388, 234, 235, + 389, 237, 238, 239, 16, 144, 14, 240, + 0, 0, 11, 0, 0, 11, 0, 11, + 11, 11, 11, 11, 0, 11, 11, 0, + 390, 391, 392, 393, 394, 395, 396, 397, + 247, 398, 319, 399, 213, 400, 401, 402, + 403, 404, 401, 405, 406, 407, 258, 408, + 260, 409, 410, 271, 0, 11, 0, 11, + 0, 11, 0, 11, 0, 11, 11, 0, + 11, 0, 11, 11, 11, 0, 11, 11, + 0, 0, 11, 11, 11, 0, 11, 0, + 11, 0, 11, 11, 0, 11, 0, 11, + 0, 11, 0, 11, 0, 11, 0, 0, + 0, 11, 11, 11, 0, 11, 11, 0, + 16, 267, 229, 411, 401, 412, 271, 16, + 413, 414, 274, 14, 0, 11, 0, 11, + 11, 11, 0, 0, 0, 11, 11, 0, + 277, 16, 278, 415, 0, 11, 11, 0, + 16, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 416, 14, 0, 0, 0, + 11, 16, 417, 16, 265, 300, 301, 302, + 14, 0, 0, 11, 419, 419, 419, 419, + 418, 419, 419, 419, 418, 419, 418, 419, + 419, 418, 418, 418, 418, 418, 418, 419, + 418, 418, 418, 418, 419, 419, 419, 419, + 419, 418, 418, 419, 418, 418, 419, 418, + 419, 418, 418, 419, 418, 418, 418, 419, + 419, 419, 419, 419, 419, 418, 419, 419, + 418, 419, 419, 418, 418, 418, 418, 418, + 418, 419, 419, 418, 418, 419, 418, 419, + 419, 419, 418, 421, 422, 423, 424, 425, + 426, 427, 428, 429, 430, 431, 432, 433, + 434, 435, 436, 437, 438, 439, 440, 441, + 442, 443, 444, 445, 446, 447, 448, 449, + 450, 451, 452, 418, 419, 418, 419, 418, + 419, 419, 418, 419, 419, 418, 418, 418, + 419, 418, 418, 418, 418, 418, 418, 418, + 419, 418, 418, 418, 418, 418, 418, 418, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 418, 418, 418, 418, 418, + 418, 418, 418, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 418, 418, 418, 418, + 418, 418, 418, 418, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 418, 419, 419, + 419, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 418, 419, 419, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 419, 419, + 418, 418, 418, 418, 418, 418, 418, 418, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 419, 418, + 418, 418, 418, 418, 418, 418, 418, 419, + 419, 419, 419, 419, 419, 418, 419, 419, + 419, 419, 419, 419, 419, 418, 419, 418, + 419, 419, 418, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 418, 419, 419, 419, 418, 419, + 418, 453, 454, 455, 456, 457, 458, 459, + 460, 461, 462, 463, 464, 465, 466, 467, + 468, 469, 470, 471, 472, 473, 474, 475, + 476, 477, 478, 479, 480, 481, 482, 483, + 484, 485, 486, 487, 488, 425, 489, 490, + 491, 492, 493, 494, 425, 470, 425, 418, + 419, 418, 419, 419, 418, 418, 419, 418, + 418, 418, 418, 419, 418, 418, 418, 418, + 418, 419, 418, 418, 418, 418, 418, 419, + 419, 419, 419, 419, 418, 418, 418, 419, + 418, 418, 418, 419, 419, 419, 418, 418, + 418, 419, 419, 418, 418, 418, 419, 419, + 419, 418, 418, 418, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 418, 418, 418, + 418, 418, 419, 419, 419, 419, 418, 418, + 419, 419, 419, 418, 418, 419, 419, 419, + 419, 418, 419, 419, 418, 419, 419, 418, + 418, 418, 419, 419, 419, 418, 418, 418, + 418, 419, 419, 419, 419, 419, 418, 418, + 418, 418, 419, 418, 419, 419, 418, 419, + 419, 418, 419, 418, 419, 419, 419, 418, + 419, 419, 418, 418, 418, 419, 418, 418, + 418, 418, 418, 418, 418, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 419, 419, + 419, 418, 495, 496, 497, 498, 499, 500, + 501, 502, 503, 425, 504, 505, 506, 507, + 508, 418, 419, 418, 418, 418, 418, 418, + 419, 419, 418, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 418, 419, 419, 419, 418, 418, 419, + 419, 419, 418, 418, 419, 418, 418, 419, + 419, 419, 419, 419, 418, 418, 418, 418, + 419, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 418, 509, 464, 510, + 511, 512, 425, 513, 514, 470, 425, 418, + 419, 419, 419, 419, 418, 418, 418, 419, + 418, 418, 419, 419, 419, 418, 418, 418, + 419, 419, 418, 475, 418, 470, 425, 425, + 515, 418, 425, 418, 419, 470, 516, 517, + 470, 518, 519, 470, 520, 521, 522, 523, + 524, 525, 470, 526, 527, 528, 470, 529, + 530, 531, 489, 532, 533, 534, 489, 535, + 470, 425, 418, 418, 419, 419, 418, 418, + 418, 419, 419, 419, 419, 418, 419, 419, + 418, 418, 418, 418, 419, 419, 418, 418, + 419, 419, 418, 418, 418, 418, 418, 418, + 419, 419, 419, 418, 418, 418, 419, 418, + 418, 418, 419, 419, 418, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 418, 418, 418, + 419, 419, 419, 419, 418, 536, 537, 418, + 425, 418, 419, 418, 418, 419, 470, 538, + 539, 540, 541, 520, 542, 543, 544, 545, + 546, 547, 548, 549, 550, 551, 552, 553, + 425, 418, 418, 419, 418, 419, 419, 419, + 419, 419, 419, 419, 418, 419, 419, 419, + 418, 419, 418, 418, 419, 418, 419, 418, + 418, 419, 419, 419, 419, 418, 419, 419, + 419, 418, 418, 419, 419, 419, 419, 418, + 419, 419, 418, 418, 419, 419, 419, 419, + 419, 418, 554, 555, 556, 557, 558, 559, + 560, 561, 562, 563, 564, 560, 566, 567, + 568, 569, 565, 418, 570, 571, 470, 572, + 573, 574, 575, 576, 577, 578, 579, 580, + 470, 425, 581, 582, 583, 584, 470, 585, + 586, 587, 588, 589, 590, 591, 592, 593, + 594, 595, 596, 597, 598, 599, 470, 501, + 425, 600, 418, 419, 419, 419, 419, 419, + 418, 418, 418, 419, 418, 419, 419, 418, + 419, 418, 419, 419, 418, 418, 418, 419, + 419, 419, 418, 418, 418, 419, 419, 419, + 418, 418, 418, 418, 419, 418, 418, 419, + 418, 418, 419, 419, 419, 418, 418, 419, + 418, 419, 419, 419, 418, 419, 419, 419, + 419, 419, 419, 418, 418, 418, 419, 419, + 418, 419, 419, 418, 419, 419, 418, 419, + 419, 418, 419, 419, 419, 419, 419, 419, + 419, 418, 419, 418, 419, 418, 419, 419, + 418, 419, 418, 419, 419, 418, 419, 418, + 419, 418, 601, 572, 602, 603, 604, 605, + 606, 607, 608, 609, 610, 453, 611, 470, + 612, 613, 614, 470, 615, 485, 616, 617, + 618, 619, 620, 621, 622, 623, 470, 418, + 418, 418, 419, 419, 419, 418, 419, 419, + 418, 419, 419, 418, 418, 418, 418, 418, + 419, 419, 419, 419, 418, 419, 419, 419, + 419, 419, 419, 418, 418, 418, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 418, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 418, 418, 418, 418, 419, + 419, 419, 418, 418, 418, 419, 418, 418, + 418, 419, 419, 418, 419, 419, 419, 418, + 419, 418, 418, 418, 419, 419, 418, 419, + 419, 419, 418, 419, 419, 419, 418, 418, + 418, 418, 419, 470, 539, 624, 625, 425, + 470, 425, 418, 418, 419, 418, 419, 470, + 624, 425, 418, 470, 626, 425, 418, 418, + 419, 470, 627, 628, 629, 530, 630, 631, + 470, 632, 633, 634, 425, 418, 418, 419, + 419, 419, 418, 419, 419, 418, 419, 419, + 419, 419, 418, 418, 419, 418, 418, 419, + 419, 418, 419, 418, 470, 425, 418, 635, + 470, 636, 418, 425, 418, 419, 418, 419, + 637, 470, 638, 639, 418, 419, 418, 418, + 418, 419, 419, 419, 419, 418, 640, 641, + 642, 470, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 425, 418, 419, 419, 419, 418, 418, 418, + 418, 419, 419, 418, 418, 419, 418, 418, + 418, 418, 418, 418, 418, 419, 418, 419, + 418, 418, 418, 418, 418, 418, 419, 419, + 419, 419, 419, 418, 418, 419, 418, 418, + 418, 419, 418, 418, 419, 418, 418, 419, + 418, 418, 419, 418, 418, 418, 419, 419, + 419, 418, 418, 418, 419, 419, 419, 419, + 418, 657, 470, 658, 470, 659, 660, 661, + 662, 425, 418, 419, 419, 419, 419, 419, + 418, 418, 418, 419, 418, 418, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 418, 419, 419, 419, + 419, 419, 418, 663, 470, 425, 418, 419, + 664, 470, 455, 425, 418, 419, 665, 418, + 425, 418, 419, 470, 666, 425, 418, 418, + 419, 667, 418, 470, 668, 425, 418, 418, + 419, 670, 669, 419, 419, 419, 419, 670, + 669, 419, 670, 669, 670, 670, 419, 670, + 669, 419, 670, 419, 670, 669, 419, 670, + 419, 670, 419, 669, 670, 670, 670, 670, + 670, 670, 670, 670, 669, 419, 419, 670, + 670, 419, 670, 419, 670, 669, 670, 670, + 670, 670, 670, 419, 670, 419, 670, 419, + 670, 669, 670, 670, 419, 670, 419, 670, + 669, 670, 670, 670, 670, 670, 419, 670, + 419, 670, 669, 419, 419, 670, 419, 670, + 669, 670, 670, 670, 419, 670, 419, 670, + 419, 670, 419, 670, 669, 670, 419, 670, + 419, 670, 669, 419, 670, 670, 670, 670, + 419, 670, 419, 670, 419, 670, 419, 670, + 419, 670, 419, 670, 669, 419, 670, 669, + 670, 670, 670, 419, 670, 419, 670, 669, + 670, 419, 670, 419, 670, 669, 419, 670, + 670, 670, 670, 419, 670, 419, 670, 669, + 419, 670, 419, 670, 419, 670, 669, 670, + 670, 419, 670, 419, 670, 669, 419, 670, + 419, 670, 419, 670, 419, 669, 670, 670, + 670, 419, 670, 419, 670, 669, 419, 670, + 669, 670, 670, 419, 670, 669, 670, 670, + 670, 419, 670, 670, 670, 670, 670, 670, + 419, 419, 670, 419, 670, 419, 670, 419, + 670, 669, 670, 419, 670, 419, 670, 669, + 419, 670, 669, 670, 419, 670, 669, 670, + 419, 670, 669, 419, 419, 670, 669, 419, + 670, 419, 670, 419, 670, 419, 670, 419, + 670, 419, 669, 670, 670, 419, 670, 670, + 670, 670, 419, 419, 670, 670, 670, 670, + 670, 419, 670, 670, 670, 670, 670, 669, + 419, 670, 670, 419, 670, 419, 669, 670, + 670, 419, 670, 669, 419, 419, 670, 419, + 669, 670, 670, 669, 419, 670, 419, 669, + 670, 669, 419, 670, 419, 670, 419, 669, + 670, 670, 669, 419, 670, 419, 670, 419, + 670, 669, 670, 419, 670, 419, 670, 669, + 419, 670, 669, 419, 419, 670, 669, 670, + 419, 669, 670, 669, 419, 670, 419, 670, + 419, 669, 670, 669, 419, 419, 670, 669, + 670, 419, 670, 419, 670, 669, 419, 670, + 419, 669, 670, 669, 419, 419, 670, 419, + 669, 670, 669, 419, 419, 670, 669, 670, + 419, 670, 669, 670, 419, 670, 669, 670, + 419, 670, 419, 670, 419, 669, 670, 669, + 419, 419, 670, 669, 670, 419, 670, 419, + 670, 669, 419, 670, 669, 670, 670, 419, + 670, 419, 670, 669, 669, 419, 669, 419, + 670, 670, 419, 670, 670, 670, 670, 670, + 670, 670, 669, 419, 670, 670, 670, 419, + 669, 670, 670, 670, 419, 670, 419, 670, + 419, 670, 419, 670, 419, 670, 669, 419, + 419, 670, 669, 670, 419, 670, 669, 419, + 419, 670, 419, 419, 419, 670, 419, 670, + 419, 670, 419, 670, 419, 669, 419, 670, + 419, 670, 419, 669, 670, 669, 419, 670, + 419, 669, 670, 419, 670, 670, 670, 669, + 419, 670, 419, 419, 670, 419, 669, 670, + 670, 669, 419, 670, 670, 670, 670, 419, + 670, 419, 669, 670, 670, 670, 419, 670, + 669, 670, 419, 670, 419, 670, 419, 670, + 419, 670, 669, 670, 670, 419, 670, 669, + 419, 670, 419, 670, 419, 669, 670, 670, + 669, 419, 670, 419, 669, 670, 669, 419, + 670, 669, 419, 670, 419, 670, 669, 670, + 670, 670, 669, 419, 419, 419, 670, 669, + 419, 670, 419, 669, 670, 669, 419, 670, + 419, 670, 419, 669, 670, 670, 670, 669, + 419, 670, 419, 669, 670, 670, 670, 670, + 669, 419, 670, 419, 670, 669, 419, 419, + 670, 419, 670, 669, 670, 419, 670, 419, + 669, 670, 670, 669, 419, 670, 419, 670, + 669, 419, 670, 670, 670, 419, 670, 419, + 669, 419, 670, 669, 670, 419, 419, 670, + 419, 670, 419, 669, 670, 670, 670, 670, + 669, 419, 670, 419, 670, 419, 670, 419, + 670, 419, 670, 669, 670, 670, 670, 419, + 670, 419, 670, 419, 670, 419, 669, 670, + 670, 419, 419, 670, 669, 670, 419, 670, + 670, 669, 419, 670, 419, 670, 669, 419, + 419, 670, 670, 670, 670, 419, 670, 419, + 670, 419, 669, 670, 670, 419, 669, 670, + 669, 419, 670, 419, 669, 670, 669, 419, + 670, 419, 669, 670, 419, 670, 670, 669, + 419, 670, 670, 419, 669, 670, 669, 419, + 670, 419, 670, 669, 670, 419, 670, 419, + 669, 670, 669, 419, 670, 419, 670, 419, + 670, 419, 670, 419, 670, 669, 671, 669, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 674, 683, 684, 685, 686, + 687, 674, 688, 689, 690, 691, 692, 693, + 694, 695, 696, 697, 698, 699, 700, 701, + 702, 674, 703, 671, 683, 671, 704, 671, + 669, 670, 670, 670, 670, 419, 669, 670, + 670, 669, 419, 670, 669, 419, 419, 670, + 669, 419, 670, 419, 669, 670, 669, 419, + 419, 670, 419, 669, 670, 670, 669, 419, + 670, 670, 670, 669, 419, 670, 419, 670, + 670, 669, 419, 419, 670, 419, 669, 670, + 669, 419, 670, 669, 419, 419, 670, 419, + 670, 669, 419, 670, 419, 419, 670, 419, + 670, 419, 669, 670, 670, 669, 419, 670, + 670, 419, 670, 669, 419, 670, 419, 670, + 669, 419, 670, 419, 669, 419, 670, 670, + 670, 419, 670, 669, 670, 419, 670, 669, + 419, 670, 669, 670, 419, 670, 669, 419, + 670, 669, 419, 670, 419, 670, 669, 419, + 670, 669, 419, 670, 669, 705, 706, 707, + 708, 709, 710, 711, 712, 713, 714, 715, + 716, 676, 717, 718, 719, 720, 721, 718, + 722, 723, 724, 725, 726, 727, 728, 729, + 730, 671, 669, 670, 419, 670, 669, 670, + 419, 670, 669, 670, 419, 670, 669, 670, + 419, 670, 669, 419, 670, 419, 670, 669, + 670, 419, 670, 669, 670, 419, 419, 419, + 670, 669, 670, 419, 670, 669, 670, 670, + 670, 670, 419, 670, 419, 669, 670, 669, + 419, 419, 670, 419, 670, 669, 670, 419, + 670, 669, 419, 670, 669, 670, 670, 419, + 670, 669, 419, 670, 669, 670, 419, 670, + 669, 419, 670, 669, 419, 670, 669, 419, + 670, 669, 670, 669, 419, 419, 670, 669, + 670, 419, 670, 669, 419, 670, 419, 669, + 670, 669, 419, 674, 731, 671, 674, 732, + 674, 733, 683, 671, 669, 670, 669, 419, + 670, 669, 419, 674, 732, 683, 671, 669, + 674, 734, 671, 683, 671, 669, 670, 669, + 419, 674, 735, 692, 736, 718, 737, 730, + 674, 738, 739, 740, 671, 683, 671, 669, + 670, 669, 419, 670, 419, 670, 669, 419, + 670, 419, 670, 419, 669, 670, 670, 669, + 419, 670, 419, 670, 669, 419, 670, 669, + 674, 683, 425, 669, 741, 674, 742, 683, + 671, 669, 425, 670, 669, 419, 670, 669, + 419, 743, 674, 744, 745, 671, 669, 419, + 670, 669, 670, 670, 669, 419, 419, 670, + 419, 670, 669, 674, 746, 747, 748, 749, + 750, 751, 752, 753, 754, 755, 756, 671, + 683, 671, 669, 670, 419, 670, 670, 670, + 670, 670, 670, 670, 419, 670, 419, 670, + 670, 670, 670, 670, 670, 669, 419, 670, + 670, 419, 670, 419, 669, 670, 419, 670, + 670, 670, 419, 670, 670, 419, 670, 670, + 419, 670, 670, 419, 670, 670, 669, 419, + 674, 757, 674, 733, 758, 759, 760, 671, + 683, 671, 669, 670, 669, 419, 670, 670, + 670, 419, 670, 670, 670, 419, 670, 419, + 670, 669, 419, 419, 419, 419, 670, 670, + 419, 419, 419, 419, 419, 670, 670, 670, + 670, 670, 670, 670, 419, 670, 419, 670, + 419, 669, 670, 670, 670, 419, 670, 419, + 670, 669, 683, 425, 761, 674, 683, 425, + 670, 669, 419, 762, 674, 763, 683, 425, + 670, 669, 419, 670, 419, 764, 683, 671, + 669, 425, 670, 669, 419, 674, 765, 671, + 683, 671, 669, 670, 669, 419, 766, 766, + 766, 768, 769, 770, 766, 767, 767, 771, + 768, 771, 769, 771, 767, 772, 773, 772, + 775, 774, 776, 774, 777, 774, 779, 778, + 781, 782, 780, 781, 783, 780, 785, 784, + 786, 784, 787, 784, 789, 788, 791, 792, + 790, 791, 793, 790, 795, 795, 795, 795, + 794, 795, 795, 795, 794, 795, 794, 795, + 795, 794, 794, 794, 794, 794, 794, 795, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 794, 794, 795, 794, 794, 795, 794, + 795, 794, 794, 795, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 795, + 795, 795, 794, 797, 798, 799, 800, 801, + 802, 803, 804, 805, 806, 807, 808, 809, + 810, 811, 812, 813, 814, 815, 816, 817, + 818, 819, 820, 821, 822, 823, 824, 825, + 826, 827, 828, 794, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 794, 794, + 794, 794, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 794, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 794, + 794, 794, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 795, + 794, 829, 830, 831, 832, 833, 834, 835, + 836, 837, 838, 839, 840, 841, 842, 843, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 855, 856, 857, 858, 859, + 860, 861, 862, 863, 864, 801, 865, 866, + 867, 868, 869, 870, 801, 846, 801, 794, + 795, 794, 795, 795, 794, 794, 795, 794, + 794, 794, 794, 795, 794, 794, 794, 794, + 794, 795, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 794, 795, 795, 795, 794, 794, + 794, 795, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 794, 794, 794, + 794, 794, 795, 795, 795, 795, 794, 794, + 795, 795, 795, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 794, 795, 795, 794, + 794, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 795, 795, 795, 794, 794, + 794, 794, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 794, 795, 795, 795, 794, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 871, 872, 873, 874, 875, 876, + 877, 878, 879, 801, 880, 881, 882, 883, + 884, 794, 795, 794, 794, 794, 794, 794, + 795, 795, 794, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 794, 794, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 794, 885, 840, 886, + 887, 888, 801, 889, 890, 846, 801, 794, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 794, + 795, 795, 794, 851, 794, 846, 801, 801, + 891, 794, 801, 794, 795, 846, 892, 893, + 846, 894, 895, 846, 896, 897, 898, 899, + 900, 901, 846, 902, 903, 904, 846, 905, + 906, 907, 865, 908, 909, 910, 865, 911, + 846, 801, 794, 794, 795, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 794, 794, 794, 794, 795, 795, 794, 794, + 795, 795, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 794, 794, 794, 795, 794, + 794, 794, 795, 795, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 794, 794, 794, + 795, 795, 795, 795, 794, 912, 913, 794, + 801, 794, 795, 794, 794, 795, 846, 914, + 915, 916, 917, 896, 918, 919, 920, 921, + 922, 923, 924, 925, 926, 927, 928, 929, + 801, 794, 794, 795, 794, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 794, 795, 794, 794, 795, 794, 795, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 795, 794, 794, 795, 795, 795, 795, 794, + 795, 795, 794, 794, 795, 795, 795, 795, + 795, 794, 930, 931, 932, 933, 934, 935, + 936, 937, 938, 939, 940, 936, 942, 943, + 944, 945, 941, 794, 946, 947, 846, 948, + 949, 950, 951, 952, 953, 954, 955, 956, + 846, 801, 957, 958, 959, 960, 846, 961, + 962, 963, 964, 965, 966, 967, 968, 969, + 970, 971, 972, 973, 974, 975, 846, 877, + 801, 976, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 795, 795, 794, + 795, 794, 795, 795, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 795, 795, + 794, 794, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 795, + 794, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 794, 795, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 794, 795, 794, 795, 795, + 794, 795, 794, 795, 795, 794, 795, 794, + 795, 794, 977, 948, 978, 979, 980, 981, + 982, 983, 984, 985, 986, 829, 987, 846, + 988, 989, 990, 846, 991, 861, 992, 993, + 994, 995, 996, 997, 998, 999, 846, 794, + 794, 794, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 795, 795, 794, 795, 795, 795, 794, + 795, 794, 794, 794, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 794, + 794, 794, 795, 846, 915, 1000, 1001, 801, + 846, 801, 794, 794, 795, 794, 795, 846, + 1000, 801, 794, 846, 1002, 801, 794, 794, + 795, 846, 1003, 1004, 1005, 906, 1006, 1007, + 846, 1008, 1009, 1010, 801, 794, 794, 795, + 795, 795, 794, 795, 795, 794, 795, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 794, 795, 794, 846, 801, 794, 1011, + 846, 1012, 794, 801, 794, 795, 794, 795, + 1013, 846, 1014, 1015, 794, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 1016, 1017, + 1018, 846, 1019, 1020, 1021, 1022, 1023, 1024, + 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, + 801, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 794, 795, + 794, 794, 794, 794, 794, 794, 795, 795, + 795, 795, 795, 794, 794, 795, 794, 794, + 794, 795, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 1033, 846, 1034, 846, 1035, 1036, 1037, + 1038, 801, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 794, 1039, 846, 801, 794, 795, + 1040, 846, 831, 801, 794, 795, 1041, 794, + 801, 794, 795, 846, 1042, 801, 794, 794, + 795, 1043, 794, 846, 1044, 801, 794, 794, + 795, 1046, 1045, 795, 795, 795, 795, 1046, + 1045, 795, 1046, 1045, 1046, 1046, 795, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1045, 795, 795, 1046, + 1046, 795, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 1046, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 1046, 1046, 795, 1046, + 795, 1046, 1045, 795, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 1046, 795, 1046, + 795, 1046, 1045, 795, 1046, 1046, 1046, 1046, + 795, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1046, 1045, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1045, 1046, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1046, + 795, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1046, 1046, + 1046, 1046, 795, 795, 1046, 1046, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1046, 795, 1046, 795, 1045, + 1046, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1045, 1046, 1045, 795, 1046, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 1045, 1046, 795, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 795, 1046, 1045, 1045, 795, 1045, 795, + 1046, 1046, 795, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1045, 795, 1046, 1046, 1046, 795, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 795, 1046, 795, 795, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1045, 1046, 795, 1046, 1046, 1046, 1045, + 795, 1046, 795, 795, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1046, 1046, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1046, 795, 1046, + 1045, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 1045, 1046, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 1046, + 1046, 1046, 1045, 795, 795, 795, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 1046, 1046, 795, 1046, 795, + 1045, 795, 1046, 1045, 1046, 795, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 1045, 1046, 1046, 1046, 795, + 1046, 795, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 795, 1046, 1045, 1046, 795, 1046, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1046, 1046, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1045, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1045, 1046, 795, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 795, 1046, 1045, 1047, 1045, + 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, + 1056, 1057, 1058, 1050, 1059, 1060, 1061, 1062, + 1063, 1050, 1064, 1065, 1066, 1067, 1068, 1069, + 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1050, 1079, 1047, 1059, 1047, 1080, 1047, + 1045, 1046, 1046, 1046, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1045, 795, 795, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 795, 1046, 795, 1045, 1046, 1046, 1045, 795, + 1046, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1046, 1045, 795, 795, 1046, 795, 1045, 1046, + 1045, 795, 1046, 1045, 795, 795, 1046, 795, + 1046, 1045, 795, 1046, 795, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 795, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1045, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 1045, 1081, 1082, 1083, + 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, + 1092, 1052, 1093, 1094, 1095, 1096, 1097, 1094, + 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, + 1106, 1047, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 1045, 1046, 795, 795, 795, + 1046, 1045, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 795, 1046, 1045, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 795, 1046, + 1045, 795, 1046, 1045, 795, 1046, 1045, 795, + 1046, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1050, 1107, 1047, 1050, 1108, + 1050, 1109, 1059, 1047, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1050, 1108, 1059, 1047, 1045, + 1050, 1110, 1047, 1059, 1047, 1045, 1046, 1045, + 795, 1050, 1111, 1068, 1112, 1094, 1113, 1106, + 1050, 1114, 1115, 1116, 1047, 1059, 1047, 1045, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1045, 1046, 1046, 1045, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1050, 1059, 801, 1045, 1117, 1050, 1118, 1059, + 1047, 1045, 801, 1046, 1045, 795, 1046, 1045, + 795, 1119, 1050, 1120, 1121, 1047, 1045, 795, + 1046, 1045, 1046, 1046, 1045, 795, 795, 1046, + 795, 1046, 1045, 1050, 1122, 1123, 1124, 1125, + 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1047, + 1059, 1047, 1045, 1046, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 1046, 1046, 1046, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 795, 1045, 1046, 795, 1046, + 1046, 1046, 795, 1046, 1046, 795, 1046, 1046, + 795, 1046, 1046, 795, 1046, 1046, 1045, 795, + 1050, 1133, 1050, 1109, 1134, 1135, 1136, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 795, 795, 795, 795, 1046, 1046, + 795, 795, 795, 795, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1045, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 1059, 801, 1137, 1050, 1059, 801, + 1046, 1045, 795, 1138, 1050, 1139, 1059, 801, + 1046, 1045, 795, 1046, 795, 1140, 1059, 1047, + 1045, 801, 1046, 1045, 795, 1050, 1141, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143, + 1144, 1142, 1145, 1146, 1147, 1149, 1150, 1151, + 1152, 1153, 1154, 670, 670, 419, 1155, 1156, + 1157, 1158, 670, 1161, 1162, 1164, 1165, 1166, + 1160, 1167, 1168, 1169, 1170, 1171, 1172, 1173, + 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, + 1182, 1183, 1184, 1185, 1186, 1188, 1189, 1190, + 1191, 1192, 1193, 670, 1148, 7, 1148, 419, + 1148, 419, 1160, 1163, 1187, 1194, 1159, 1142, + 1142, 1195, 1143, 1196, 1198, 1197, 4, 1147, + 1200, 1197, 1201, 1197, 2, 1147, 1197, 6, + 8, 8, 7, 1202, 1203, 1204, 1197, 1205, + 1206, 1197, 1207, 1197, 419, 419, 1209, 1210, + 489, 470, 1211, 470, 1212, 1213, 1214, 1215, + 1216, 1217, 1218, 1219, 1220, 1221, 1222, 544, + 1223, 520, 1224, 1225, 1226, 1227, 1228, 1229, + 1230, 1231, 1232, 1233, 1234, 1235, 419, 419, + 419, 425, 565, 1208, 1236, 1197, 1237, 1197, + 670, 1238, 419, 419, 419, 670, 1238, 670, + 670, 419, 1238, 419, 1238, 419, 1238, 419, + 670, 670, 670, 670, 670, 1238, 419, 670, + 670, 670, 419, 670, 419, 1238, 419, 670, + 670, 670, 670, 419, 1238, 670, 419, 670, + 419, 670, 419, 670, 670, 419, 670, 1238, + 419, 670, 419, 670, 419, 670, 1238, 670, + 419, 1238, 670, 419, 670, 419, 1238, 670, + 670, 670, 670, 670, 1238, 419, 419, 670, + 419, 670, 1238, 670, 419, 1238, 670, 670, + 1238, 419, 419, 670, 419, 670, 419, 670, + 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, + 1246, 1247, 1248, 1249, 715, 1250, 1251, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1260, 1262, 1263, 1264, 1265, 1266, 671, + 1238, 1267, 1268, 1269, 1270, 1271, 1272, 1273, + 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, + 1282, 1283, 1284, 1285, 725, 1286, 1287, 1288, + 692, 1289, 1290, 1291, 1292, 1293, 1294, 671, + 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, + 674, 1303, 671, 674, 1304, 1305, 1306, 1307, + 683, 1238, 1308, 1309, 1310, 1311, 703, 1312, + 1313, 683, 1314, 1315, 1316, 1317, 1318, 671, + 1238, 1319, 1278, 1320, 1321, 1322, 683, 1323, + 1324, 674, 671, 683, 425, 1238, 1288, 671, + 674, 683, 425, 683, 425, 1325, 683, 1238, + 425, 674, 1326, 1327, 674, 1328, 1329, 681, + 1330, 1331, 1332, 1333, 1334, 1284, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1303, 1347, 674, 683, 425, 1238, + 1348, 1349, 683, 671, 1238, 425, 671, 1238, + 674, 1350, 731, 1351, 1352, 1353, 1354, 1355, + 1356, 1357, 1358, 671, 1359, 1360, 1361, 1362, + 1363, 1364, 671, 683, 1238, 1366, 1367, 1368, + 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, + 1372, 1378, 1379, 1380, 1381, 1365, 1377, 1365, + 1238, 1365, 1238, 1382, 1382, 1383, 1384, 1385, + 1386, 1387, 1388, 1389, 1390, 1387, 767, 1391, + 1391, 1391, 1392, 1391, 1391, 768, 769, 770, + 1391, 767, 1382, 1382, 1393, 1396, 1397, 1395, + 1398, 1399, 1398, 1400, 1391, 1402, 1401, 1396, + 1403, 1395, 1405, 1404, 1394, 1394, 1394, 768, + 769, 770, 1394, 767, 767, 1406, 773, 1406, + 1407, 1406, 775, 1408, 1409, 1410, 1411, 1412, + 1413, 1414, 1411, 776, 775, 1408, 1415, 1415, + 777, 779, 1416, 1415, 776, 1418, 1419, 1417, + 1418, 1419, 1420, 1417, 775, 1408, 1421, 1415, + 775, 1408, 1415, 1423, 1422, 1425, 1424, 776, + 1426, 777, 1426, 779, 1426, 785, 1427, 1428, + 1429, 1430, 1431, 1432, 1433, 1430, 786, 785, + 1427, 1434, 1434, 787, 789, 1435, 1434, 786, + 1437, 1438, 1436, 1437, 1438, 1439, 1436, 785, + 1427, 1440, 1434, 785, 1427, 1434, 1442, 1441, + 1444, 1443, 786, 1445, 787, 1445, 789, 1445, + 795, 1448, 1449, 1451, 1452, 1453, 1447, 1454, + 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, + 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, + 1471, 1472, 1473, 1475, 1476, 1477, 1478, 1479, + 1480, 795, 795, 1446, 1447, 1450, 1474, 1481, + 1446, 1046, 795, 795, 1483, 1484, 865, 846, + 1485, 846, 1486, 1487, 1488, 1489, 1490, 1491, + 1492, 1493, 1494, 1495, 1496, 920, 1497, 896, + 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, + 1506, 1507, 1508, 1509, 795, 795, 795, 801, + 941, 1482, 1046, 1510, 795, 795, 795, 1046, + 1510, 1046, 1046, 795, 1510, 795, 1510, 795, + 1510, 795, 1046, 1046, 1046, 1046, 1046, 1510, + 795, 1046, 1046, 1046, 795, 1046, 795, 1510, + 795, 1046, 1046, 1046, 1046, 795, 1510, 1046, + 795, 1046, 795, 1046, 795, 1046, 1046, 795, + 1046, 1510, 795, 1046, 795, 1046, 795, 1046, + 1510, 1046, 795, 1510, 1046, 795, 1046, 795, + 1510, 1046, 1046, 1046, 1046, 1046, 1510, 795, + 795, 1046, 795, 1046, 1510, 1046, 795, 1510, + 1046, 1046, 1510, 795, 795, 1046, 795, 1046, + 795, 1046, 1510, 1511, 1512, 1513, 1514, 1515, + 1516, 1517, 1518, 1519, 1520, 1521, 1091, 1522, + 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, + 1531, 1532, 1533, 1532, 1534, 1535, 1536, 1537, + 1538, 1047, 1510, 1539, 1540, 1541, 1542, 1543, + 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, + 1552, 1553, 1554, 1555, 1556, 1557, 1101, 1558, + 1559, 1560, 1068, 1561, 1562, 1563, 1564, 1565, + 1566, 1047, 1567, 1568, 1569, 1570, 1571, 1572, + 1573, 1574, 1050, 1575, 1047, 1050, 1576, 1577, + 1578, 1579, 1059, 1510, 1580, 1581, 1582, 1583, + 1079, 1584, 1585, 1059, 1586, 1587, 1588, 1589, + 1590, 1047, 1510, 1591, 1550, 1592, 1593, 1594, + 1059, 1595, 1596, 1050, 1047, 1059, 801, 1510, + 1560, 1047, 1050, 1059, 801, 1059, 801, 1597, + 1059, 1510, 801, 1050, 1598, 1599, 1050, 1600, + 1601, 1057, 1602, 1603, 1604, 1605, 1606, 1556, + 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, + 1615, 1616, 1617, 1618, 1575, 1619, 1050, 1059, + 801, 1510, 1620, 1621, 1059, 1047, 1510, 801, + 1047, 1510, 1050, 1622, 1107, 1623, 1624, 1625, + 1626, 1627, 1628, 1629, 1630, 1047, 1631, 1632, + 1633, 1634, 1635, 1636, 1047, 1059, 1510, 1638, + 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, + 1647, 1648, 1644, 1650, 1651, 1652, 1653, 1637, + 1649, 1637, 1510, 1637, 1510, +} + +var _hcltok_trans_targs []int16 = []int16{ + 1459, 1459, 2, 3, 1459, 1459, 4, 1467, + 5, 6, 8, 9, 286, 12, 13, 14, + 15, 16, 287, 288, 19, 289, 21, 22, + 290, 291, 292, 293, 294, 295, 296, 297, + 298, 299, 328, 348, 353, 127, 128, 129, + 356, 151, 371, 375, 1459, 10, 11, 17, + 18, 20, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 64, 105, 120, 131, + 154, 170, 283, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, + 121, 122, 123, 124, 125, 126, 130, 132, + 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 152, 153, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 171, 203, 227, 230, 231, + 233, 242, 243, 246, 250, 268, 275, 277, + 279, 281, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, + 202, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, + 228, 229, 232, 234, 235, 236, 237, 238, + 239, 240, 241, 244, 245, 247, 248, 249, + 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, + 267, 269, 270, 271, 272, 273, 274, 276, + 278, 280, 282, 284, 285, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 318, + 319, 320, 321, 322, 323, 324, 325, 326, + 327, 329, 330, 331, 332, 333, 334, 335, + 336, 337, 338, 339, 340, 341, 342, 343, + 344, 345, 346, 347, 349, 350, 351, 352, + 354, 355, 357, 358, 359, 360, 361, 362, + 363, 364, 365, 366, 367, 368, 369, 370, + 372, 373, 374, 376, 382, 404, 409, 411, + 413, 377, 378, 379, 380, 381, 383, 384, + 385, 386, 387, 388, 389, 390, 391, 392, + 393, 394, 395, 396, 397, 398, 399, 400, + 401, 402, 403, 405, 406, 407, 408, 410, + 412, 414, 1459, 1471, 1459, 437, 438, 439, + 440, 417, 441, 442, 443, 444, 445, 446, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 458, 459, 460, 461, 462, + 463, 464, 465, 466, 467, 469, 470, 471, + 472, 473, 474, 475, 476, 477, 478, 479, + 480, 481, 482, 483, 484, 485, 419, 486, + 487, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 418, 504, 505, 506, 507, 508, 510, + 511, 512, 513, 514, 515, 516, 517, 518, + 519, 520, 521, 522, 523, 525, 526, 527, + 528, 529, 530, 534, 536, 537, 538, 539, + 434, 540, 541, 542, 543, 544, 545, 546, + 547, 548, 549, 550, 551, 552, 553, 554, + 556, 557, 559, 560, 561, 562, 563, 564, + 432, 565, 566, 567, 568, 569, 570, 571, + 572, 573, 575, 607, 631, 634, 635, 637, + 646, 647, 650, 654, 672, 532, 679, 681, + 683, 685, 576, 577, 578, 579, 580, 581, + 582, 583, 584, 585, 586, 587, 588, 589, + 590, 591, 592, 593, 594, 595, 596, 597, + 598, 599, 600, 601, 602, 603, 604, 605, + 606, 608, 609, 610, 611, 612, 613, 614, + 615, 616, 617, 618, 619, 620, 621, 622, + 623, 624, 625, 626, 627, 628, 629, 630, + 632, 633, 636, 638, 639, 640, 641, 642, + 643, 644, 645, 648, 649, 651, 652, 653, + 655, 656, 657, 658, 659, 660, 661, 662, + 663, 664, 665, 666, 667, 668, 669, 670, + 671, 673, 674, 675, 676, 677, 678, 680, + 682, 684, 686, 688, 689, 1459, 1459, 690, + 827, 828, 759, 829, 830, 831, 832, 833, + 834, 788, 835, 724, 836, 837, 838, 839, + 840, 841, 842, 843, 744, 844, 845, 846, + 847, 848, 849, 850, 851, 852, 853, 769, + 854, 856, 857, 858, 859, 860, 861, 862, + 863, 864, 865, 702, 866, 867, 868, 869, + 870, 871, 872, 873, 874, 740, 875, 876, + 877, 878, 879, 810, 881, 882, 885, 887, + 888, 889, 890, 891, 892, 895, 896, 898, + 899, 900, 902, 903, 904, 905, 906, 907, + 908, 909, 910, 911, 912, 914, 915, 916, + 917, 920, 922, 923, 925, 927, 1509, 1510, + 929, 930, 931, 1509, 1509, 932, 1523, 1523, + 1524, 935, 1523, 936, 1525, 1526, 1529, 1530, + 1534, 1534, 1535, 941, 1534, 942, 1536, 1537, + 1540, 1541, 1545, 1546, 1545, 968, 969, 970, + 971, 948, 972, 973, 974, 975, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 1000, 1001, 1002, + 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, + 1011, 1012, 1013, 1014, 1015, 1016, 950, 1017, + 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, + 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, + 1034, 949, 1035, 1036, 1037, 1038, 1039, 1041, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 1054, 1056, 1057, 1058, + 1059, 1060, 1061, 1065, 1067, 1068, 1069, 1070, + 965, 1071, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1087, 1088, 1090, 1091, 1092, 1093, 1094, 1095, + 963, 1096, 1097, 1098, 1099, 1100, 1101, 1102, + 1103, 1104, 1106, 1138, 1162, 1165, 1166, 1168, + 1177, 1178, 1181, 1185, 1203, 1063, 1210, 1212, + 1214, 1216, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, + 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, + 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, + 1137, 1139, 1140, 1141, 1142, 1143, 1144, 1145, + 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, + 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, + 1163, 1164, 1167, 1169, 1170, 1171, 1172, 1173, + 1174, 1175, 1176, 1179, 1180, 1182, 1183, 1184, + 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, + 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, + 1202, 1204, 1205, 1206, 1207, 1208, 1209, 1211, + 1213, 1215, 1217, 1219, 1220, 1545, 1545, 1221, + 1358, 1359, 1290, 1360, 1361, 1362, 1363, 1364, + 1365, 1319, 1366, 1255, 1367, 1368, 1369, 1370, + 1371, 1372, 1373, 1374, 1275, 1375, 1376, 1377, + 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1300, + 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393, + 1394, 1395, 1396, 1233, 1397, 1398, 1399, 1400, + 1401, 1402, 1403, 1404, 1405, 1271, 1406, 1407, + 1408, 1409, 1410, 1341, 1412, 1413, 1416, 1418, + 1419, 1420, 1421, 1422, 1423, 1426, 1427, 1429, + 1430, 1431, 1433, 1434, 1435, 1436, 1437, 1438, + 1439, 1440, 1441, 1442, 1443, 1445, 1446, 1447, + 1448, 1451, 1453, 1454, 1456, 1458, 1460, 1459, + 1461, 1462, 1459, 1463, 1459, 1464, 1465, 1466, + 1468, 1469, 1470, 1459, 1472, 1459, 1473, 1459, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, + 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, + 1506, 1507, 1508, 1459, 1459, 1459, 1459, 1459, + 1459, 1, 1459, 7, 1459, 1459, 1459, 1459, + 1459, 415, 416, 420, 421, 422, 423, 424, + 425, 426, 427, 428, 429, 430, 431, 433, + 435, 436, 468, 509, 524, 531, 533, 535, + 555, 558, 574, 687, 1459, 1459, 1459, 691, + 692, 693, 694, 695, 696, 697, 698, 699, + 700, 701, 703, 704, 705, 706, 707, 708, + 709, 710, 711, 712, 713, 714, 715, 716, + 717, 718, 719, 720, 721, 722, 723, 725, + 726, 727, 728, 729, 730, 731, 732, 733, + 734, 735, 736, 737, 738, 739, 741, 742, + 743, 745, 746, 747, 748, 749, 750, 751, + 752, 753, 754, 755, 756, 757, 758, 760, + 761, 762, 763, 764, 765, 766, 767, 768, + 770, 771, 772, 773, 774, 775, 776, 777, + 778, 779, 780, 781, 782, 783, 784, 785, + 786, 787, 789, 790, 791, 792, 793, 794, + 795, 796, 797, 798, 799, 800, 801, 802, + 803, 804, 805, 806, 807, 808, 809, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 822, 823, 824, 825, 826, 855, + 880, 883, 884, 886, 893, 894, 897, 901, + 913, 918, 919, 921, 924, 926, 1511, 1509, + 1512, 1517, 1519, 1509, 1520, 1521, 1522, 1509, + 928, 1509, 1509, 1513, 1514, 1516, 1509, 1515, + 1509, 1509, 1509, 1518, 1509, 1509, 1509, 933, + 934, 938, 939, 1523, 1531, 1532, 1533, 1523, + 937, 1523, 1523, 934, 1527, 1528, 1523, 1523, + 1523, 1523, 1523, 940, 944, 945, 1534, 1542, + 1543, 1544, 1534, 943, 1534, 1534, 940, 1538, + 1539, 1534, 1534, 1534, 1534, 1534, 1545, 1547, + 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, + 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, + 1580, 1581, 1545, 946, 947, 951, 952, 953, + 954, 955, 956, 957, 958, 959, 960, 961, + 962, 964, 966, 967, 999, 1040, 1055, 1062, + 1064, 1066, 1086, 1089, 1105, 1218, 1545, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1234, 1235, 1236, 1237, 1238, 1239, + 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, + 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1256, + 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, + 1265, 1266, 1267, 1268, 1269, 1270, 1272, 1273, + 1274, 1276, 1277, 1278, 1279, 1280, 1281, 1282, + 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1291, + 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, + 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, + 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, + 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325, + 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, + 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1342, + 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, + 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1386, + 1411, 1414, 1415, 1417, 1424, 1425, 1428, 1432, + 1444, 1449, 1450, 1452, 1455, 1457, +} + +var _hcltok_trans_actions []byte = []byte{ + 145, 107, 0, 0, 91, 141, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 121, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 143, 193, 149, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 147, 125, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 31, 169, + 0, 0, 0, 35, 33, 0, 55, 41, + 175, 0, 53, 0, 175, 175, 0, 0, + 75, 61, 181, 0, 73, 0, 181, 181, + 0, 0, 85, 187, 89, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 87, 79, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 93, + 0, 0, 119, 0, 111, 0, 7, 7, + 7, 0, 0, 113, 0, 115, 0, 123, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 7, 7, + 7, 196, 196, 196, 196, 196, 196, 7, + 7, 196, 7, 127, 139, 135, 97, 133, + 103, 0, 129, 0, 101, 95, 109, 99, + 131, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 105, 117, 137, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 13, + 0, 0, 172, 17, 0, 7, 7, 23, + 0, 25, 27, 0, 0, 0, 151, 0, + 15, 19, 9, 0, 21, 11, 29, 0, + 0, 0, 0, 43, 0, 178, 178, 49, + 0, 157, 154, 1, 175, 175, 45, 37, + 47, 39, 51, 0, 0, 0, 63, 0, + 184, 184, 69, 0, 163, 160, 1, 181, + 181, 65, 57, 67, 59, 71, 77, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 7, 7, 7, + 190, 190, 190, 190, 190, 190, 7, 7, + 190, 7, 81, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 83, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hcltok_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 166, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 166, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hcltok_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 5, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 5, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hcltok_eof_trans []int16 = []int16{ + 0, 1, 1, 1, 6, 6, 6, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 419, + 419, 421, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 767, 772, 772, 772, 773, 773, 775, 775, + 775, 779, 0, 0, 785, 785, 785, 789, + 0, 0, 795, 795, 797, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 0, 1196, 1197, 1198, 1200, + 1198, 1198, 1198, 1203, 1198, 1198, 1198, 1209, + 1198, 1198, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 0, 1392, 1394, + 1395, 1399, 1399, 1392, 1402, 1395, 1405, 1395, + 1407, 1407, 1407, 0, 1416, 1418, 1418, 1416, + 1416, 1423, 1425, 1427, 1427, 1427, 0, 1435, + 1437, 1437, 1435, 1435, 1442, 1444, 1446, 1446, + 1446, 0, 1483, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, +} + +const hcltok_start int = 1459 +const hcltok_first_final int = 1459 +const hcltok_error int = 0 + +const hcltok_en_stringTemplate int = 1509 +const hcltok_en_heredocTemplate int = 1523 +const hcltok_en_bareTemplate int = 1534 +const hcltok_en_identOnly int = 1545 +const hcltok_en_main int = 1459 + +//line scan_tokens.rl:16 + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + stripData := stripUTF8BOM(data) + start.Byte += len(data) - len(stripData) + data = stripData + + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + StartByte: start.Byte, + } + +//line scan_tokens.rl:305 + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = hcltok_en_main + case scanTemplate: + cs = hcltok_en_bareTemplate + case scanIdentOnly: + cs = hcltok_en_identOnly + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + +//line scan_tokens.rl:340 + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func(ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func() { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + +//line scan_tokens.go:4289 + { + top = 0 + ts = 0 + te = 0 + act = 0 + } + +//line scan_tokens.go:4297 + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _acts = int(_hcltok_from_state_actions[cs]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 3: +//line NONE:1 + ts = p + +//line scan_tokens.go:4320 + } + } + + _keys = int(_hcltok_key_offsets[cs]) + _trans = int(_hcltok_index_offsets[cs]) + + _klen = int(_hcltok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hcltok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hcltok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hcltok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hcltok_indicies[_trans]) + _eof_trans: + cs = int(_hcltok_trans_targs[_trans]) + + if _hcltok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hcltok_trans_actions[_trans]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 0: +//line scan_tokens.rl:224 + p-- + + case 4: +//line NONE:1 + te = p + 1 + + case 5: +//line scan_tokens.rl:248 + act = 4 + case 6: +//line scan_tokens.rl:250 + act = 6 + case 7: +//line scan_tokens.rl:160 + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 8: +//line scan_tokens.rl:170 + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 9: +//line scan_tokens.rl:84 + te = p + 1 + { + token(TokenCQuote) + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + case 10: +//line scan_tokens.rl:248 + te = p + 1 + { + token(TokenQuotedLit) + } + case 11: +//line scan_tokens.rl:251 + te = p + 1 + { + token(TokenBadUTF8) + } + case 12: +//line scan_tokens.rl:160 + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 13: +//line scan_tokens.rl:170 + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 14: +//line scan_tokens.rl:248 + te = p + p-- + { + token(TokenQuotedLit) + } + case 15: +//line scan_tokens.rl:249 + te = p + p-- + { + token(TokenQuotedNewline) + } + case 16: +//line scan_tokens.rl:250 + te = p + p-- + { + token(TokenInvalid) + } + case 17: +//line scan_tokens.rl:251 + te = p + p-- + { + token(TokenBadUTF8) + } + case 18: +//line scan_tokens.rl:248 + p = (te) - 1 + { + token(TokenQuotedLit) + } + case 19: +//line scan_tokens.rl:251 + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 20: +//line NONE:1 + switch act { + case 4: + { + p = (te) - 1 + token(TokenQuotedLit) + } + case 6: + { + p = (te) - 1 + token(TokenInvalid) + } + } + + case 21: +//line scan_tokens.rl:148 + act = 11 + case 22: +//line scan_tokens.rl:259 + act = 12 + case 23: +//line scan_tokens.rl:160 + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 24: +//line scan_tokens.rl:170 + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 25: +//line scan_tokens.rl:111 + te = p + 1 + { + // This action is called specificially when a heredoc literal + // ends with a newline character. + + // This might actually be our end marker. + topdoc := &heredocs[len(heredocs)-1] + if topdoc.StartOfLine { + maybeMarker := bytes.TrimSpace(data[ts:te]) + if bytes.Equal(maybeMarker, topdoc.Marker) { + // We actually emit two tokens here: the end-of-heredoc + // marker first, and then separately the newline that + // follows it. This then avoids issues with the closing + // marker consuming a newline that would normally be used + // to mark the end of an attribute definition. + // We might have either a \n sequence or an \r\n sequence + // here, so we must handle both. + nls := te - 1 + nle := te + te-- + if data[te-1] == '\r' { + // back up one more byte + nls-- + te-- + } + token(TokenCHeredoc) + ts = nls + te = nle + token(TokenNewline) + heredocs = heredocs[:len(heredocs)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + } + + topdoc.StartOfLine = true + token(TokenStringLit) + } + case 26: +//line scan_tokens.rl:259 + te = p + 1 + { + token(TokenBadUTF8) + } + case 27: +//line scan_tokens.rl:160 + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 28: +//line scan_tokens.rl:170 + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 29: +//line scan_tokens.rl:148 + te = p + p-- + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 30: +//line scan_tokens.rl:259 + te = p + p-- + { + token(TokenBadUTF8) + } + case 31: +//line scan_tokens.rl:148 + p = (te) - 1 + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 32: +//line NONE:1 + switch act { + case 0: + { + cs = 0 + goto _again + } + case 11: + { + p = (te) - 1 + + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 12: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 33: +//line scan_tokens.rl:156 + act = 15 + case 34: +//line scan_tokens.rl:266 + act = 16 + case 35: +//line scan_tokens.rl:160 + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 36: +//line scan_tokens.rl:170 + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 37: +//line scan_tokens.rl:156 + te = p + 1 + { + token(TokenStringLit) + } + case 38: +//line scan_tokens.rl:266 + te = p + 1 + { + token(TokenBadUTF8) + } + case 39: +//line scan_tokens.rl:160 + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 40: +//line scan_tokens.rl:170 + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 41: +//line scan_tokens.rl:156 + te = p + p-- + { + token(TokenStringLit) + } + case 42: +//line scan_tokens.rl:266 + te = p + p-- + { + token(TokenBadUTF8) + } + case 43: +//line scan_tokens.rl:156 + p = (te) - 1 + { + token(TokenStringLit) + } + case 44: +//line NONE:1 + switch act { + case 0: + { + cs = 0 + goto _again + } + case 15: + { + p = (te) - 1 + + token(TokenStringLit) + } + case 16: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 45: +//line scan_tokens.rl:270 + act = 17 + case 46: +//line scan_tokens.rl:271 + act = 18 + case 47: +//line scan_tokens.rl:271 + te = p + 1 + { + token(TokenBadUTF8) + } + case 48: +//line scan_tokens.rl:272 + te = p + 1 + { + token(TokenInvalid) + } + case 49: +//line scan_tokens.rl:270 + te = p + p-- + { + token(TokenIdent) + } + case 50: +//line scan_tokens.rl:271 + te = p + p-- + { + token(TokenBadUTF8) + } + case 51: +//line scan_tokens.rl:270 + p = (te) - 1 + { + token(TokenIdent) + } + case 52: +//line scan_tokens.rl:271 + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 53: +//line NONE:1 + switch act { + case 17: + { + p = (te) - 1 + token(TokenIdent) + } + case 18: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 54: +//line scan_tokens.rl:278 + act = 22 + case 55: +//line scan_tokens.rl:301 + act = 39 + case 56: +//line scan_tokens.rl:280 + te = p + 1 + { + token(TokenComment) + } + case 57: +//line scan_tokens.rl:281 + te = p + 1 + { + token(TokenNewline) + } + case 58: +//line scan_tokens.rl:283 + te = p + 1 + { + token(TokenEqualOp) + } + case 59: +//line scan_tokens.rl:284 + te = p + 1 + { + token(TokenNotEqual) + } + case 60: +//line scan_tokens.rl:285 + te = p + 1 + { + token(TokenGreaterThanEq) + } + case 61: +//line scan_tokens.rl:286 + te = p + 1 + { + token(TokenLessThanEq) + } + case 62: +//line scan_tokens.rl:287 + te = p + 1 + { + token(TokenAnd) + } + case 63: +//line scan_tokens.rl:288 + te = p + 1 + { + token(TokenOr) + } + case 64: +//line scan_tokens.rl:289 + te = p + 1 + { + token(TokenEllipsis) + } + case 65: +//line scan_tokens.rl:290 + te = p + 1 + { + token(TokenFatArrow) + } + case 66: +//line scan_tokens.rl:291 + te = p + 1 + { + selfToken() + } + case 67: +//line scan_tokens.rl:180 + te = p + 1 + { + token(TokenOBrace) + braces++ + } + case 68: +//line scan_tokens.rl:185 + te = p + 1 + { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + token(TokenCBrace) + braces-- + } + } + case 69: +//line scan_tokens.rl:197 + te = p + 1 + { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd) + braces-- + } + } + case 70: +//line scan_tokens.rl:79 + te = p + 1 + { + token(TokenOQuote) + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1509 + goto _again + } + } + case 71: +//line scan_tokens.rl:89 + te = p + 1 + { + token(TokenOHeredoc) + // the token is currently the whole heredoc introducer, like + // < 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 1: +//line NONE:1 + ts = 0 + + case 2: +//line NONE:1 + act = 0 + +//line scan_tokens.go:5073 + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + if _hcltok_eof_trans[cs] > 0 { + _trans = int(_hcltok_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: + { + } + } + +//line scan_tokens.rl:363 + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < hcltok_first_final { + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go new file mode 100644 index 00000000000..476025d1ba4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go @@ -0,0 +1,394 @@ +package hclsyntax + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// AsHCLBlock returns the block data expressed as a *hcl.Block. +func (b *Block) AsHCLBlock() *hcl.Block { + if b == nil { + return nil + } + + lastHeaderRange := b.TypeRange + if len(b.LabelRanges) > 0 { + lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] + } + + return &hcl.Block{ + Type: b.Type, + Labels: b.Labels, + Body: b.Body, + + DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange), + TypeRange: b.TypeRange, + LabelRanges: b.LabelRanges, + } +} + +// Body is the implementation of hcl.Body for the HCL native syntax. +type Body struct { + Attributes Attributes + Blocks Blocks + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the parser. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]struct{} + + SrcRange hcl.Range + EndRange hcl.Range // Final token of the body, for reporting missing items +} + +// Assert that *Body implements hcl.Body +var assertBodyImplBody hcl.Body = &Body{} + +func (b *Body) walkChildNodes(w internalWalkFunc) { + w(b.Attributes) + w(b.Blocks) +} + +func (b *Body) Range() hcl.Range { + return b.SrcRange +} + +func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remainHCL, diags := b.PartialContent(schema) + + // No we'll see if anything actually remains, to produce errors about + // extraneous items. + remain := remainHCL.(*Body) + + for name, attr := range b.Attributes { + if _, hidden := remain.hiddenAttrs[name]; !hidden { + var suggestions []string + for _, attrS := range schema.Attributes { + if _, defined := content.Attributes[attrS.Name]; defined { + continue + } + suggestions = append(suggestions, attrS.Name) + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there a block of the same name? + for _, blockS := range schema.Blocks { + if blockS.Type == name { + suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported argument", + Detail: fmt.Sprintf("An argument named %q is not expected here.%s", name, suggestion), + Subject: &attr.NameRange, + }) + } + } + + for _, block := range b.Blocks { + blockTy := block.Type + if _, hidden := remain.hiddenBlocks[blockTy]; !hidden { + var suggestions []string + for _, blockS := range schema.Blocks { + suggestions = append(suggestions, blockS.Type) + } + suggestion := nameSuggestion(blockTy, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there an attribute of the same name? + for _, attrS := range schema.Attributes { + if attrS.Name == blockTy { + suggestion = fmt.Sprintf(" Did you mean to define argument %q? If so, use the equals sign to assign it a value.", blockTy) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion), + Subject: &block.TypeRange, + }) + } + } + + return content, diags +} + +func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var blocks hcl.Blocks + var diags hcl.Diagnostics + hiddenAttrs := make(map[string]struct{}) + hiddenBlocks := make(map[string]struct{}) + + if b.hiddenAttrs != nil { + for k, v := range b.hiddenAttrs { + hiddenAttrs[k] = v + } + } + if b.hiddenBlocks != nil { + for k, v := range b.hiddenBlocks { + hiddenBlocks[k] = v + } + } + + for _, attrS := range schema.Attributes { + name := attrS.Name + attr, exists := b.Attributes[name] + _, hidden := hiddenAttrs[name] + if hidden || !exists { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + continue + } + + hiddenAttrs[name] = struct{}{} + attrs[name] = attr.AsHCLAttribute() + } + + blocksWanted := make(map[string]hcl.BlockHeaderSchema) + for _, blockS := range schema.Blocks { + blocksWanted[blockS.Type] = blockS + } + + for _, block := range b.Blocks { + if _, hidden := hiddenBlocks[block.Type]; hidden { + continue + } + blockS, wanted := blocksWanted[block.Type] + if !wanted { + continue + } + + if len(block.Labels) > len(blockS.LabelNames) { + name := block.Type + if len(blockS.LabelNames) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "No labels are expected for %s blocks.", name, + ), + Subject: block.LabelRanges[0].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "Only %d labels (%s) are expected for %s blocks.", + len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name, + ), + Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } + continue + } + + if len(block.Labels) < len(blockS.LabelNames) { + name := block.Type + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name), + Detail: fmt.Sprintf( + "All %s blocks must have %d labels (%s).", + name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), + ), + Subject: &block.OpenBraceRange, + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + continue + } + + blocks = append(blocks, block.AsHCLBlock()) + } + + // We hide blocks only after we've processed all of them, since otherwise + // we can't process more than one of the same type. + for _, blockS := range schema.Blocks { + hiddenBlocks[blockS.Type] = struct{}{} + } + + remain := &Body{ + Attributes: b.Attributes, + Blocks: b.Blocks, + + hiddenAttrs: hiddenAttrs, + hiddenBlocks: hiddenBlocks, + + SrcRange: b.SrcRange, + EndRange: b.EndRange, + } + + return &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + + MissingItemRange: b.MissingItemRange(), + }, remain, diags +} + +func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var diags hcl.Diagnostics + + if len(b.Blocks) > 0 { + example := b.Blocks[0] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %q block", example.Type), + Detail: "Blocks are not allowed here.", + Subject: &example.TypeRange, + }) + // we will continue processing anyway, and return the attributes + // we are able to find so that certain analyses can still be done + // in the face of errors. + } + + if b.Attributes == nil { + return attrs, diags + } + + for name, attr := range b.Attributes { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + attrs[name] = attr.AsHCLAttribute() + } + + return attrs, diags +} + +func (b *Body) MissingItemRange() hcl.Range { + return hcl.Range{ + Filename: b.SrcRange.Filename, + Start: b.SrcRange.Start, + End: b.SrcRange.Start, + } +} + +// Attributes is the collection of attribute definitions within a body. +type Attributes map[string]*Attribute + +func (a Attributes) walkChildNodes(w internalWalkFunc) { + for _, attr := range a { + w(attr) + } +} + +// Range returns the range of some arbitrary point within the set of +// attributes, or an invalid range if there are no attributes. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (a Attributes) Range() hcl.Range { + // An attributes doesn't really have a useful range to report, since + // it's just a grouping construct. So we'll arbitrarily take the + // range of one of the attributes, or produce an invalid range if we have + // none. In practice, there's little reason to ask for the range of + // an Attributes. + for _, attr := range a { + return attr.Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Attribute represents a single attribute definition within a body. +type Attribute struct { + Name string + Expr Expression + + SrcRange hcl.Range + NameRange hcl.Range + EqualsRange hcl.Range +} + +func (a *Attribute) walkChildNodes(w internalWalkFunc) { + w(a.Expr) +} + +func (a *Attribute) Range() hcl.Range { + return a.SrcRange +} + +// AsHCLAttribute returns the block data expressed as a *hcl.Attribute. +func (a *Attribute) AsHCLAttribute() *hcl.Attribute { + if a == nil { + return nil + } + return &hcl.Attribute{ + Name: a.Name, + Expr: a.Expr, + + Range: a.SrcRange, + NameRange: a.NameRange, + } +} + +// Blocks is the list of nested blocks within a body. +type Blocks []*Block + +func (bs Blocks) walkChildNodes(w internalWalkFunc) { + for _, block := range bs { + w(block) + } +} + +// Range returns the range of some arbitrary point within the list of +// blocks, or an invalid range if there are no blocks. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (bs Blocks) Range() hcl.Range { + if len(bs) > 0 { + return bs[0].Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Block represents a nested block structure +type Block struct { + Type string + Labels []string + Body *Body + + TypeRange hcl.Range + LabelRanges []hcl.Range + OpenBraceRange hcl.Range + CloseBraceRange hcl.Range +} + +func (b *Block) walkChildNodes(w internalWalkFunc) { + w(b.Body) +} + +func (b *Block) Range() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange) +} + +func (b *Block) DefRange() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go new file mode 100644 index 00000000000..d8f023ba052 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go @@ -0,0 +1,118 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ----------------------------------------------------------------------------- +// The methods in this file are all optional extension methods that serve to +// implement the methods of the same name on *hcl.File when its root body +// is provided by this package. +// ----------------------------------------------------------------------------- + +// BlocksAtPos implements the method of the same name for an *hcl.File that +// is backed by a *Body. +func (b *Body) BlocksAtPos(pos hcl.Pos) []*hcl.Block { + list, _ := b.blocksAtPos(pos, true) + return list +} + +// InnermostBlockAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) InnermostBlockAtPos(pos hcl.Pos) *hcl.Block { + _, innermost := b.blocksAtPos(pos, false) + return innermost.AsHCLBlock() +} + +// OutermostBlockAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) OutermostBlockAtPos(pos hcl.Pos) *hcl.Block { + return b.outermostBlockAtPos(pos).AsHCLBlock() +} + +// blocksAtPos is the internal engine of both BlocksAtPos and +// InnermostBlockAtPos, which both need to do the same logic but return a +// differently-shaped result. +// +// list is nil if makeList is false, avoiding an allocation. Innermost is +// always set, and if the returned list is non-nil it will always match the +// final element from that list. +func (b *Body) blocksAtPos(pos hcl.Pos, makeList bool) (list []*hcl.Block, innermost *Block) { + current := b + +Blocks: + for current != nil { + for _, block := range current.Blocks { + wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange) + if wholeRange.ContainsPos(pos) { + innermost = block + if makeList { + list = append(list, innermost.AsHCLBlock()) + } + current = block.Body + continue Blocks + } + } + + // If we fall out here then none of the current body's nested blocks + // contain the position we are looking for, and so we're done. + break + } + + return +} + +// outermostBlockAtPos is the internal version of OutermostBlockAtPos that +// returns a hclsyntax.Block rather than an hcl.Block, allowing for further +// analysis if necessary. +func (b *Body) outermostBlockAtPos(pos hcl.Pos) *Block { + // This is similar to blocksAtPos, but simpler because we know it only + // ever needs to search the first level of nested blocks. + + for _, block := range b.Blocks { + wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange) + if wholeRange.ContainsPos(pos) { + return block + } + } + + return nil +} + +// AttributeAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) AttributeAtPos(pos hcl.Pos) *hcl.Attribute { + return b.attributeAtPos(pos).AsHCLAttribute() +} + +// attributeAtPos is the internal version of AttributeAtPos that returns a +// hclsyntax.Block rather than an hcl.Block, allowing for further analysis if +// necessary. +func (b *Body) attributeAtPos(pos hcl.Pos) *Attribute { + searchBody := b + _, block := b.blocksAtPos(pos, false) + if block != nil { + searchBody = block.Body + } + + for _, attr := range searchBody.Attributes { + if attr.SrcRange.ContainsPos(pos) { + return attr + } + } + + return nil +} + +// OutermostExprAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) OutermostExprAtPos(pos hcl.Pos) hcl.Expression { + attr := b.attributeAtPos(pos) + if attr == nil { + return nil + } + if !attr.Expr.Range().ContainsPos(pos) { + return nil + } + return attr.Expr +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go new file mode 100644 index 00000000000..3d898fd738e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go @@ -0,0 +1,320 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" +) + +// Token represents a sequence of bytes from some HCL code that has been +// tagged with a type and its range within the source file. +type Token struct { + Type TokenType + Bytes []byte + Range hcl.Range +} + +// Tokens is a slice of Token. +type Tokens []Token + +// TokenType is an enumeration used for the Type field on Token. +type TokenType rune + +const ( + // Single-character tokens are represented by their own character, for + // convenience in producing these within the scanner. However, the values + // are otherwise arbitrary and just intended to be mnemonic for humans + // who might see them in debug output. + + TokenOBrace TokenType = '{' + TokenCBrace TokenType = '}' + TokenOBrack TokenType = '[' + TokenCBrack TokenType = ']' + TokenOParen TokenType = '(' + TokenCParen TokenType = ')' + TokenOQuote TokenType = '«' + TokenCQuote TokenType = '»' + TokenOHeredoc TokenType = 'H' + TokenCHeredoc TokenType = 'h' + + TokenStar TokenType = '*' + TokenSlash TokenType = '/' + TokenPlus TokenType = '+' + TokenMinus TokenType = '-' + TokenPercent TokenType = '%' + + TokenEqual TokenType = '=' + TokenEqualOp TokenType = '≔' + TokenNotEqual TokenType = '≠' + TokenLessThan TokenType = '<' + TokenLessThanEq TokenType = '≤' + TokenGreaterThan TokenType = '>' + TokenGreaterThanEq TokenType = '≥' + + TokenAnd TokenType = '∧' + TokenOr TokenType = '∨' + TokenBang TokenType = '!' + + TokenDot TokenType = '.' + TokenComma TokenType = ',' + + TokenEllipsis TokenType = '…' + TokenFatArrow TokenType = '⇒' + + TokenQuestion TokenType = '?' + TokenColon TokenType = ':' + + TokenTemplateInterp TokenType = '∫' + TokenTemplateControl TokenType = 'λ' + TokenTemplateSeqEnd TokenType = '∎' + + TokenQuotedLit TokenType = 'Q' // might contain backslash escapes + TokenStringLit TokenType = 'S' // cannot contain backslash escapes + TokenNumberLit TokenType = 'N' + TokenIdent TokenType = 'I' + + TokenComment TokenType = 'C' + + TokenNewline TokenType = '\n' + TokenEOF TokenType = '␄' + + // The rest are not used in the language but recognized by the scanner so + // we can generate good diagnostics in the parser when users try to write + // things that might work in other languages they are familiar with, or + // simply make incorrect assumptions about the HCL language. + + TokenBitwiseAnd TokenType = '&' + TokenBitwiseOr TokenType = '|' + TokenBitwiseNot TokenType = '~' + TokenBitwiseXor TokenType = '^' + TokenStarStar TokenType = '➚' + TokenApostrophe TokenType = '\'' + TokenBacktick TokenType = '`' + TokenSemicolon TokenType = ';' + TokenTabs TokenType = '␉' + TokenInvalid TokenType = '�' + TokenBadUTF8 TokenType = '💩' + TokenQuotedNewline TokenType = '␤' + + // TokenNil is a placeholder for when a token is required but none is + // available, e.g. when reporting errors. The scanner will never produce + // this as part of a token stream. + TokenNil TokenType = '\x00' +) + +func (t TokenType) GoString() string { + return fmt.Sprintf("hclsyntax.%s", t.String()) +} + +type scanMode int + +const ( + scanNormal scanMode = iota + scanTemplate + scanIdentOnly +) + +type tokenAccum struct { + Filename string + Bytes []byte + Pos hcl.Pos + Tokens []Token + StartByte int +} + +func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) { + // Walk through our buffer to figure out how much we need to adjust + // the start pos to get our end pos. + + start := f.Pos + start.Column += startOfs + f.StartByte - f.Pos.Byte // Safe because only ASCII spaces can be in the offset + start.Byte = startOfs + f.StartByte + + end := start + end.Byte = endOfs + f.StartByte + b := f.Bytes[startOfs:endOfs] + for len(b) > 0 { + advance, seq, _ := textseg.ScanGraphemeClusters(b, true) + if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') { + end.Line++ + end.Column = 1 + } else { + end.Column++ + } + b = b[advance:] + } + + f.Pos = end + + f.Tokens = append(f.Tokens, Token{ + Type: ty, + Bytes: f.Bytes[startOfs:endOfs], + Range: hcl.Range{ + Filename: f.Filename, + Start: start, + End: end, + }, + }) +} + +type heredocInProgress struct { + Marker []byte + StartOfLine bool +} + +func tokenOpensFlushHeredoc(tok Token) bool { + if tok.Type != TokenOHeredoc { + return false + } + return bytes.HasPrefix(tok.Bytes, []byte{'<', '<', '-'}) +} + +// checkInvalidTokens does a simple pass across the given tokens and generates +// diagnostics for tokens that should _never_ appear in HCL source. This +// is intended to avoid the need for the parser to have special support +// for them all over. +// +// Returns a diagnostics with no errors if everything seems acceptable. +// Otherwise, returns zero or more error diagnostics, though tries to limit +// repetition of the same information. +func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { + var diags hcl.Diagnostics + + toldBitwise := 0 + toldExponent := 0 + toldBacktick := 0 + toldApostrophe := 0 + toldSemicolon := 0 + toldTabs := 0 + toldBadUTF8 := 0 + + for _, tok := range tokens { + // copy token so it's safe to point to it + tok := tok + + switch tok.Type { + case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: + if toldBitwise < 4 { + var suggestion string + switch tok.Type { + case TokenBitwiseAnd: + suggestion = " Did you mean boolean AND (\"&&\")?" + case TokenBitwiseOr: + suggestion = " Did you mean boolean OR (\"&&\")?" + case TokenBitwiseNot: + suggestion = " Did you mean boolean NOT (\"!\")?" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion), + Subject: &tok.Range, + }) + toldBitwise++ + } + case TokenStarStar: + if toldExponent < 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.", + Subject: &tok.Range, + }) + + toldExponent++ + } + case TokenBacktick: + // Only report for alternating (even) backticks, so we won't report both start and ends of the same + // backtick-quoted string. + if (toldBacktick % 2) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< 0 && ret[0] == '.' { + ret = ret[1:] + } + return ret +} + +func navigationStepsRev(v node, offset int) []string { + switch tv := v.(type) { + case *objectVal: + // Do any of our properties have an object that contains the target + // offset? + for _, attr := range tv.Attrs { + k := attr.Name + av := attr.Value + + switch av.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if av.Range().ContainsOffset(offset) { + return append(navigationStepsRev(av, offset), "."+k) + } + } + case *arrayVal: + // Do any of our elements contain the target offset? + for i, elem := range tv.Values { + + switch elem.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if elem.Range().ContainsOffset(offset) { + return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i)) + } + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go new file mode 100644 index 00000000000..d368ea8fce5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go @@ -0,0 +1,496 @@ +package json + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) { + tokens := scan(buf, pos{ + Filename: filename, + Pos: hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + }, + }) + p := newPeeker(tokens) + node, diags := parseValue(p) + if len(diags) == 0 && p.Peek().Type != tokenEOF { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous data after value", + Detail: "Extra characters appear after the JSON value.", + Subject: p.Peek().Range.Ptr(), + }) + } + return node, diags +} + +func parseValue(p *peeker) (node, hcl.Diagnostics) { + tok := p.Peek() + + wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) { + if n != nil { + return n, diags + } + return invalidVal{tok.Range}, diags + } + + switch tok.Type { + case tokenBraceO: + return wrapInvalid(parseObject(p)) + case tokenBrackO: + return wrapInvalid(parseArray(p)) + case tokenNumber: + return wrapInvalid(parseNumber(p)) + case tokenString: + return wrapInvalid(parseString(p)) + case tokenKeyword: + return wrapInvalid(parseKeyword(p)) + case tokenBraceC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing JSON value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenBrackC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing array element value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenEOF: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing value", + Detail: "The JSON data ends prematurely.", + Subject: &tok.Range, + }, + }) + default: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid start of value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + } +} + +func tokenCanStartValue(tok token) bool { + switch tok.Type { + case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword: + return true + default: + return false + } +} + +func parseObject(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + attrs := []*objectAttr{} + + // recover is used to shift the peeker to what seems to be the end of + // our object, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBraceO: + open++ + case tokenBraceC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBraceC { + break Token + } + + keyNode, keyDiags := parseValue(p) + diags = diags.Extend(keyDiags) + if keyNode == nil { + return nil, diags + } + + keyStrNode, ok := keyNode.(*stringVal) + if !ok { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object property name", + Detail: "A JSON object property name must be a string", + Subject: keyNode.StartRange().Ptr(), + }) + } + + key := keyStrNode.Value + + colon := p.Read() + if colon.Type != tokenColon { + recover(colon) + + if colon.Type == tokenBraceC || colon.Type == tokenComma { + // Catch common mistake of using braces instead of brackets + // for an object. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing object value", + Detail: "A JSON object attribute must have a value, introduced by a colon.", + Subject: &colon.Range, + }) + } + + if colon.Type == tokenEquals { + // Possible confusion with native HCL syntax. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing property value colon", + Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", + Subject: &colon.Range, + }) + } + + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing property value colon", + Detail: "A colon must appear between an object property's name and its value.", + Subject: &colon.Range, + }) + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + attrs = append(attrs, &objectAttr{ + Name: key, + Value: valNode, + NameRange: keyStrNode.SrcRange, + }) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBraceC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in object", + Detail: "JSON does not permit a trailing comma after the final property in an object.", + Subject: &comma.Range, + }) + } + continue Token + case tokenEOF: + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing brace was found for this JSON object.", + Subject: &open.Range, + }) + case tokenBrackC: + // Consume the bracket anyway, so that we don't return with the peeker + // at a strange place. + p.Read() + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched braces", + Detail: "A JSON object must be closed with a brace, not a bracket.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBraceC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each property definition in an object.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &objectVal{ + Attrs: attrs, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +func parseArray(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + vals := []node{} + + // recover is used to shift the peeker to what seems to be the end of + // our array, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBrackO: + open++ + case tokenBrackC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBrackC { + break Token + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + vals = append(vals, valNode) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBrackC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in array", + Detail: "JSON does not permit a trailing comma after the final value in an array.", + Subject: &comma.Range, + }) + } + continue Token + case tokenColon: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid array value", + Detail: "A colon is not used to introduce values in a JSON array.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenEOF: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing bracket was found for this JSON array.", + Subject: &open.Range, + }) + case tokenBraceC: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched brackets", + Detail: "A JSON array must be closed with a bracket, not a brace.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBrackC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each value in an array.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &arrayVal{ + Values: vals, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func parseNumber(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + + // Use encoding/json to validate the number syntax. + // TODO: Do this more directly to produce better diagnostics. + var num json.Number + err := json.Unmarshal(tok.Bytes, &num) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + // We want to guarantee that we parse numbers the same way as cty (and thus + // native syntax HCL) would here, so we'll use the cty parser even though + // in most other cases we don't actually introduce cty concepts until + // decoding time. We'll unwrap the parsed float immediately afterwards, so + // the cty value is just a temporary helper. + nv, err := cty.ParseNumberVal(string(num)) + if err != nil { + // Should never happen if above passed, since JSON numbers are a subset + // of what cty can parse... + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + return &numberVal{ + Value: nv.AsBigFloat(), + SrcRange: tok.Range, + }, nil +} + +func parseString(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + var str string + err := json.Unmarshal(tok.Bytes, &str) + + if err != nil { + var errRange hcl.Range + if serr, ok := err.(*json.SyntaxError); ok { + errOfs := serr.Offset + errPos := tok.Range.Start + errPos.Byte += int(errOfs) + + // TODO: Use the byte offset to properly count unicode + // characters for the column, and mark the whole of the + // character that was wrong as part of our range. + errPos.Column += int(errOfs) + + errEndPos := errPos + errEndPos.Byte++ + errEndPos.Column++ + + errRange = hcl.Range{ + Filename: tok.Range.Filename, + Start: errPos, + End: errEndPos, + } + } else { + errRange = tok.Range + } + + var contextRange *hcl.Range + if errRange != tok.Range { + contextRange = &tok.Range + } + + // FIXME: Eventually we should parse strings directly here so + // we can produce a more useful error message in the face fo things + // such as invalid escapes, etc. + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON string", + Detail: fmt.Sprintf("There is a syntax error in the given JSON string."), + Subject: &errRange, + Context: contextRange, + }, + } + } + + return &stringVal{ + Value: str, + SrcRange: tok.Range, + }, nil +} + +func parseKeyword(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + s := string(tok.Bytes) + + switch s { + case "true": + return &booleanVal{ + Value: true, + SrcRange: tok.Range, + }, nil + case "false": + return &booleanVal{ + Value: false, + SrcRange: tok.Range, + }, nil + case "null": + return &nullVal{ + SrcRange: tok.Range, + }, nil + case "undefined", "NaN", "Infinity": + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s), + Subject: &tok.Range, + }, + } + default: + var dym string + if suggest := keywordSuggestion(s); suggest != "" { + dym = fmt.Sprintf(" Did you mean %q?", suggest) + } + + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym), + Subject: &tok.Range, + }, + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go new file mode 100644 index 00000000000..fc7bbf58274 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go @@ -0,0 +1,25 @@ +package json + +type peeker struct { + tokens []token + pos int +} + +func newPeeker(tokens []token) *peeker { + return &peeker{ + tokens: tokens, + pos: 0, + } +} + +func (p *peeker) Peek() token { + return p.tokens[p.pos] +} + +func (p *peeker) Read() token { + ret := p.tokens[p.pos] + if ret.Type != tokenEOF { + p.pos++ + } + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/public.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/public.go new file mode 100644 index 00000000000..2728aa13091 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/public.go @@ -0,0 +1,94 @@ +package json + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/hcl2/hcl" +) + +// Parse attempts to parse the given buffer as JSON and, if successful, returns +// a hcl.File for the HCL configuration represented by it. +// +// This is not a generic JSON parser. Instead, it deals only with the profile +// of JSON used to express HCL configuration. +// +// The returned file is valid only if the returned diagnostics returns false +// from its HasErrors method. If HasErrors returns true, the file represents +// the subset of data that was able to be parsed, which may be none. +func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + rootNode, diags := parseFileContent(src, filename) + + switch rootNode.(type) { + case *objectVal, *arrayVal: + // okay + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Root value must be object", + Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.", + Subject: rootNode.StartRange().Ptr(), + }) + + // Since we've already produced an error message for this being + // invalid, we'll return an empty placeholder here so that trying to + // extract content from our root body won't produce a redundant + // error saying the same thing again in more general terms. + fakePos := hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + } + fakeRange := hcl.Range{ + Filename: filename, + Start: fakePos, + End: fakePos, + } + rootNode = &objectVal{ + Attrs: []*objectAttr{}, + SrcRange: fakeRange, + OpenRange: fakeRange, + } + } + + file := &hcl.File{ + Body: &body{ + val: rootNode, + }, + Bytes: src, + Nav: navigation{rootNode}, + } + return file, diags +} + +// ParseFile is a convenience wrapper around Parse that first attempts to load +// data from the given filename, passing the result to Parse if successful. +// +// If the file cannot be read, an error diagnostic with nil context is returned. +func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) { + f, err := os.Open(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to open file", + Detail: fmt.Sprintf("The file %q could not be opened.", filename), + }, + } + } + defer f.Close() + + src, err := ioutil.ReadAll(f) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename), + }, + } + } + + return Parse(src, filename) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go new file mode 100644 index 00000000000..da728842391 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go @@ -0,0 +1,297 @@ +package json + +import ( + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" +) + +//go:generate stringer -type tokenType scanner.go +type tokenType rune + +const ( + tokenBraceO tokenType = '{' + tokenBraceC tokenType = '}' + tokenBrackO tokenType = '[' + tokenBrackC tokenType = ']' + tokenComma tokenType = ',' + tokenColon tokenType = ':' + tokenKeyword tokenType = 'K' + tokenString tokenType = 'S' + tokenNumber tokenType = 'N' + tokenEOF tokenType = '␄' + tokenInvalid tokenType = 0 + tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax +) + +type token struct { + Type tokenType + Bytes []byte + Range hcl.Range +} + +// scan returns the primary tokens for the given JSON buffer in sequence. +// +// The responsibility of this pass is to just mark the slices of the buffer +// as being of various types. It is lax in how it interprets the multi-byte +// token types keyword, string and number, preferring to capture erroneous +// extra bytes that we presume the user intended to be part of the token +// so that we can generate more helpful diagnostics in the parser. +func scan(buf []byte, start pos) []token { + var tokens []token + p := start + for { + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + buf, p = skipWhitespace(buf, p) + + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + start = p + + first := buf[0] + switch { + case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=': + p.Pos.Column++ + p.Pos.Byte++ + tokens = append(tokens, token{ + Type: tokenType(first), + Bytes: buf[0:1], + Range: posRange(start, p), + }) + buf = buf[1:] + case first == '"': + var tokBuf []byte + tokBuf, buf, p = scanString(buf, p) + tokens = append(tokens, token{ + Type: tokenString, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartNumber(first): + var tokBuf []byte + tokBuf, buf, p = scanNumber(buf, p) + tokens = append(tokens, token{ + Type: tokenNumber, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartKeyword(first): + var tokBuf []byte + tokBuf, buf, p = scanKeyword(buf, p) + tokens = append(tokens, token{ + Type: tokenKeyword, + Bytes: tokBuf, + Range: posRange(start, p), + }) + default: + tokens = append(tokens, token{ + Type: tokenInvalid, + Bytes: buf[:1], + Range: start.Range(1, 1), + }) + // If we've encountered an invalid then we might as well stop + // scanning since the parser won't proceed beyond this point. + return tokens + } + } +} + +func byteCanStartNumber(b byte) bool { + switch b { + // We are slightly more tolerant than JSON requires here since we + // expect the parser will make a stricter interpretation of the + // number bytes, but we specifically don't allow 'e' or 'E' here + // since we want the scanner to treat that as the start of an + // invalid keyword instead, to produce more intelligible error messages. + case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + default: + return false + } +} + +func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't check that the sequence of digit-ish bytes is + // in a valid order. The parser must do this when decoding a number + // token. + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func byteCanStartKeyword(b byte) bool { + switch { + // We allow any sequence of alphabetical characters here, even though + // JSON is more constrained, so that we can collect what we presume + // the user intended to be a single keyword and then check its validity + // in the parser, where we can generate better diagnostics. + // So e.g. we want to be able to say: + // unrecognized keyword "True". Did you mean "true"? + case isAlphabetical(b): + return true + default: + return false + } +} + +func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + b := buf[i] + switch { + case isAlphabetical(b) || b == '_': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func scanString(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't validate correct use of escapes, etc. It pays + // attention to escapes only for the purpose of identifying the closing + // quote character. It's the parser's responsibility to do proper + // validation. + // + // The scanner also doesn't specifically detect unterminated string + // literals, though they can be identified in the parser by checking if + // the final byte in a string token is the double-quote character. + + // Skip the opening quote symbol + i := 1 + p := start + p.Pos.Byte++ + p.Pos.Column++ + escaping := false +Byte: + for i < len(buf) { + b := buf[i] + + switch { + case b == '\\': + escaping = !escaping + p.Pos.Byte++ + p.Pos.Column++ + i++ + case b == '"': + p.Pos.Byte++ + p.Pos.Column++ + i++ + if !escaping { + break Byte + } + escaping = false + case b < 32: + break Byte + default: + // Advance by one grapheme cluster, so that we consider each + // grapheme to be a "column". + // Ignoring error because this scanner cannot produce errors. + advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) + + p.Pos.Byte += advance + p.Pos.Column++ + i += advance + + escaping = false + } + } + return buf[:i], buf[i:], p +} + +func skipWhitespace(buf []byte, start pos) ([]byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case ' ': + p.Pos.Byte++ + p.Pos.Column++ + case '\n': + p.Pos.Byte++ + p.Pos.Column = 1 + p.Pos.Line++ + case '\r': + // For the purpose of line/column counting we consider a + // carriage return to take up no space, assuming that it will + // be paired up with a newline (on Windows, for example) that + // will account for both of them. + p.Pos.Byte++ + case '\t': + // We arbitrarily count a tab as if it were two spaces, because + // we need to choose _some_ number here. This means any system + // that renders code on-screen with markers must itself treat + // tabs as a pair of spaces for rendering purposes, or instead + // use the byte offset and back into its own column position. + p.Pos.Byte++ + p.Pos.Column += 2 + default: + break Byte + } + } + return buf[i:], p +} + +type pos struct { + Filename string + Pos hcl.Pos +} + +func (p *pos) Range(byteLen, charLen int) hcl.Range { + start := p.Pos + end := p.Pos + end.Byte += byteLen + end.Column += charLen + return hcl.Range{ + Filename: p.Filename, + Start: start, + End: end, + } +} + +func posRange(start, end pos) hcl.Range { + return hcl.Range{ + Filename: start.Filename, + Start: start.Pos, + End: end.Pos, + } +} + +func (t token) GoString() string { + return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) +} + +func isAlphabetical(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go new file mode 100644 index 00000000000..74847c79a55 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go @@ -0,0 +1,637 @@ +package json + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// body is the implementation of "Body" used for files processed with the JSON +// parser. +type body struct { + val node + + // If non-nil, the keys of this map cause the corresponding attributes to + // be treated as non-existing. This is used when Body.PartialContent is + // called, to produce the "remaining content" Body. + hiddenAttrs map[string]struct{} +} + +// expression is the implementation of "Expression" used for files processed +// with the JSON parser. +type expression struct { + src node +} + +func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, newBody, diags := b.PartialContent(schema) + + hiddenAttrs := newBody.(*body).hiddenAttrs + + var nameSuggestions []string + for _, attrS := range schema.Attributes { + if _, ok := hiddenAttrs[attrS.Name]; !ok { + // Only suggest an attribute name if we didn't use it already. + nameSuggestions = append(nameSuggestions, attrS.Name) + } + } + for _, blockS := range schema.Blocks { + // Blocks can appear multiple times, so we'll suggest their type + // names regardless of whether they've already been used. + nameSuggestions = append(nameSuggestions, blockS.Type) + } + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + for _, attr := range jsonAttrs { + k := attr.Name + if k == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, ok := hiddenAttrs[k]; !ok { + suggestion := nameSuggestion(k, nameSuggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous JSON object property", + Detail: fmt.Sprintf("No argument or block type is named %q.%s", k, suggestion), + Subject: &attr.NameRange, + Context: attr.Range().Ptr(), + }) + } + } + + return content, diags +} + +func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + usedNames := map[string]struct{}{} + if b.hiddenAttrs != nil { + for k := range b.hiddenAttrs { + usedNames[k] = struct{}{} + } + } + + content := &hcl.BodyContent{ + Attributes: map[string]*hcl.Attribute{}, + Blocks: nil, + + MissingItemRange: b.MissingItemRange(), + } + + // Create some more convenient data structures for our work below. + attrSchemas := map[string]hcl.AttributeSchema{} + blockSchemas := map[string]hcl.BlockHeaderSchema{} + for _, attrS := range schema.Attributes { + attrSchemas[attrS.Name] = attrS + } + for _, blockS := range schema.Blocks { + blockSchemas[blockS.Type] = blockS + } + + for _, jsonAttr := range jsonAttrs { + attrName := jsonAttr.Name + if _, used := b.hiddenAttrs[attrName]; used { + continue + } + + if attrS, defined := attrSchemas[attrName]; defined { + if existing, exists := content.Attributes[attrName]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate argument", + Detail: fmt.Sprintf("The argument %q was already set at %s.", attrName, existing.Range), + Subject: &jsonAttr.NameRange, + Context: jsonAttr.Range().Ptr(), + }) + continue + } + + content.Attributes[attrS.Name] = &hcl.Attribute{ + Name: attrS.Name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + usedNames[attrName] = struct{}{} + + } else if blockS, defined := blockSchemas[attrName]; defined { + bv := jsonAttr.Value + blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks) + diags = append(diags, blockDiags...) + usedNames[attrName] = struct{}{} + } + + // We ignore anything that isn't defined because that's the + // PartialContent contract. The Content method will catch leftovers. + } + + // Make sure we got all the required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + if _, defined := content.Attributes[attrS.Name]; !defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + } + + unusedBody := &body{ + val: b.val, + hiddenAttrs: usedNames, + } + + return content, unusedBody, diags +} + +// JustAttributes for JSON bodies interprets all properties of the wrapped +// JSON object as attributes and returns them. +func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + attrs := make(map[string]*hcl.Attribute) + + obj, ok := b.val.(*objectVal) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, setting the arguments for this block.", + Subject: b.val.StartRange().Ptr(), + }) + return attrs, diags + } + + for _, jsonAttr := range obj.Attrs { + name := jsonAttr.Name + if name == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + + if existing, exists := attrs[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate attribute definition", + Detail: fmt.Sprintf("The argument %q was already set at %s.", name, existing.Range), + Subject: &jsonAttr.NameRange, + }) + continue + } + + attrs[name] = &hcl.Attribute{ + Name: name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + } + + // No diagnostics possible here, since the parser already took care of + // finding duplicates and every JSON value can be a valid attribute value. + return attrs, diags +} + +func (b *body) MissingItemRange() hcl.Range { + switch tv := b.val.(type) { + case *objectVal: + return tv.CloseRange + case *arrayVal: + return tv.OpenRange + default: + // Should not happen in correct operation, but might show up if the + // input is invalid and we are producing partial results. + return tv.StartRange() + } +} + +func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { + if len(labelsLeft) > 0 { + labelName := labelsLeft[0] + jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName) + diags = append(diags, attrDiags...) + + if len(jsonAttrs) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing block label", + Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), + Subject: v.StartRange().Ptr(), + }) + return + } + labelsUsed := append(labelsUsed, "") + labelRanges := append(labelRanges, hcl.Range{}) + for _, p := range jsonAttrs { + pk := p.Name + labelsUsed[len(labelsUsed)-1] = pk + labelRanges[len(labelRanges)-1] = p.NameRange + diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) + } + return + } + + // By the time we get here, we've peeled off all the labels and we're ready + // to deal with the block's actual content. + + // need to copy the label slices because their underlying arrays will + // continue to be mutated after we return. + labels := make([]string, len(labelsUsed)) + copy(labels, labelsUsed) + labelR := make([]hcl.Range, len(labelRanges)) + copy(labelR, labelRanges) + + switch tv := v.(type) { + case *nullVal: + // There is no block content, e.g the value is null. + return + case *objectVal: + // Single instance of the block + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: tv, + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + case *arrayVal: + // Multiple instances of the block + for _, av := range tv.Values { + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: av, // might be mistyped; we'll find out when content is requested for this body + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), + Subject: v.StartRange().Ptr(), + }) + } + return +} + +// collectDeepAttrs takes either a single object or an array of objects and +// flattens it into a list of object attributes, collecting attributes from +// all of the objects in a given array. +// +// Ordering is preserved, so a list of objects that each have one property +// will result in those properties being returned in the same order as the +// objects appeared in the array. +// +// This is appropriate for use only for objects representing bodies or labels +// within a block. +// +// The labelName argument, if non-null, is used to tailor returned error +// messages to refer to block labels rather than attributes and child blocks. +// It has no other effect. +func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) { + var diags hcl.Diagnostics + var attrs []*objectAttr + + switch tv := v.(type) { + case *nullVal: + // If a value is null, then we don't return any attributes or return an error. + + case *objectVal: + attrs = append(attrs, tv.Attrs...) + + case *arrayVal: + for _, ev := range tv.Values { + switch tev := ev.(type) { + case *objectVal: + attrs = append(attrs, tev.Attrs...) + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName), + Subject: ev.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, to define arguments and child blocks.", + Subject: ev.StartRange().Ptr(), + }) + } + } + } + + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName), + Subject: v.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "Either a JSON object or JSON array of objects is required here, to define arguments and child blocks.", + Subject: v.StartRange().Ptr(), + }) + } + } + + return attrs, diags +} + +func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + switch v := e.src.(type) { + case *stringVal: + if ctx != nil { + // Parse string contents as a HCL native language expression. + // We only do this if we have a context, so passing a nil context + // is how the caller specifies that interpolations are not allowed + // and that the string should just be returned verbatim. + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, evalDiags := expr.Value(ctx) + diags = append(diags, evalDiags...) + return val, diags + } + + return cty.StringVal(v.Value), nil + case *numberVal: + return cty.NumberVal(v.Value), nil + case *booleanVal: + return cty.BoolVal(v.Value), nil + case *arrayVal: + var diags hcl.Diagnostics + vals := []cty.Value{} + for _, jsonVal := range v.Values { + val, valDiags := (&expression{src: jsonVal}).Value(ctx) + vals = append(vals, val) + diags = append(diags, valDiags...) + } + return cty.TupleVal(vals), diags + case *objectVal: + var diags hcl.Diagnostics + attrs := map[string]cty.Value{} + attrRanges := map[string]hcl.Range{} + known := true + for _, jsonAttr := range v.Attrs { + // In this one context we allow keys to contain interpolation + // expressions too, assuming we're evaluating in interpolation + // mode. This achieves parity with the native syntax where + // object expressions can have dynamic keys, while block contents + // may not. + name, nameDiags := (&expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}).Value(ctx) + valExpr := &expression{src: jsonAttr.Value} + val, valDiags := valExpr.Value(ctx) + diags = append(diags, nameDiags...) + diags = append(diags, valDiags...) + + var err error + name, err = convert.Convert(name, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), + Subject: &jsonAttr.NameRange, + Expression: valExpr, + EvalContext: ctx, + }) + continue + } + if name.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: "Cannot use null value as an object key.", + Subject: &jsonAttr.NameRange, + Expression: valExpr, + EvalContext: ctx, + }) + continue + } + if !name.IsKnown() { + // This is a bit of a weird case, since our usual rules require + // us to tolerate unknowns and just represent the result as + // best we can but if we don't know the key then we can't + // know the type of our object at all, and thus we must turn + // the whole thing into cty.DynamicVal. This is consistent with + // how this situation is handled in the native syntax. + // We'll keep iterating so we can collect other errors in + // subsequent attributes. + known = false + continue + } + nameStr := name.AsString() + if _, defined := attrs[nameStr]; defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object attribute", + Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), + Subject: &jsonAttr.NameRange, + Expression: e, + EvalContext: ctx, + }) + continue + } + attrs[nameStr] = val + attrRanges[nameStr] = jsonAttr.NameRange + } + if !known { + // We encountered an unknown key somewhere along the way, so + // we can't know what our type will eventually be. + return cty.DynamicVal, diags + } + return cty.ObjectVal(attrs), diags + case *nullVal: + return cty.NullVal(cty.DynamicPseudoType), nil + default: + // Default to DynamicVal so that ASTs containing invalid nodes can + // still be partially-evaluated. + return cty.DynamicVal, nil + } +} + +func (e *expression) Variables() []hcl.Traversal { + var vars []hcl.Traversal + + switch v := e.src.(type) { + case *stringVal: + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return vars + } + return expr.Variables() + + case *arrayVal: + for _, jsonVal := range v.Values { + vars = append(vars, (&expression{src: jsonVal}).Variables()...) + } + case *objectVal: + for _, jsonAttr := range v.Attrs { + keyExpr := &stringVal{ // we're going to treat key as an expression in this context + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + } + vars = append(vars, (&expression{src: keyExpr}).Variables()...) + vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) + } + } + + return vars +} + +func (e *expression) Range() hcl.Range { + return e.src.Range() +} + +func (e *expression) StartRange() hcl.Range { + return e.src.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *expression) AsTraversal() hcl.Traversal { + // In JSON-based syntax a traversal is given as a string containing + // traversal syntax as defined by hclsyntax.ParseTraversalAbs. + + switch v := e.src.(type) { + case *stringVal: + traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + return traversal + default: + return nil + } +} + +// Implementation for hcl.ExprCall. +func (e *expression) ExprCall() *hcl.StaticCall { + // In JSON-based syntax a static call is given as a string containing + // an expression in the native syntax that also supports ExprCall. + + switch v := e.src.(type) { + case *stringVal: + expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return nil + } + + return call + default: + return nil + } +} + +// Implementation for hcl.ExprList. +func (e *expression) ExprList() []hcl.Expression { + switch v := e.src.(type) { + case *arrayVal: + ret := make([]hcl.Expression, len(v.Values)) + for i, node := range v.Values { + ret[i] = &expression{src: node} + } + return ret + default: + return nil + } +} + +// Implementation for hcl.ExprMap. +func (e *expression) ExprMap() []hcl.KeyValuePair { + switch v := e.src.(type) { + case *objectVal: + ret := make([]hcl.KeyValuePair, len(v.Attrs)) + for i, jsonAttr := range v.Attrs { + ret[i] = hcl.KeyValuePair{ + Key: &expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}, + Value: &expression{src: jsonAttr.Value}, + } + } + return ret + default: + return nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go new file mode 100644 index 00000000000..bbcce5b306f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. + +package json + +import "strconv" + +const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" + +var _tokenType_map = map[tokenType]string{ + 0: _tokenType_name[0:12], + 44: _tokenType_name[12:22], + 58: _tokenType_name[22:32], + 61: _tokenType_name[32:43], + 75: _tokenType_name[43:55], + 78: _tokenType_name[55:66], + 83: _tokenType_name[66:77], + 91: _tokenType_name[77:88], + 93: _tokenType_name[88:99], + 123: _tokenType_name[99:110], + 125: _tokenType_name[110:121], + 9220: _tokenType_name[121:129], +} + +func (i tokenType) String() string { + if str, ok := _tokenType_map[i]; ok { + return str + } + return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/merged.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/merged.go new file mode 100644 index 00000000000..96e62a58d49 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/merged.go @@ -0,0 +1,226 @@ +package hcl + +import ( + "fmt" +) + +// MergeFiles combines the given files to produce a single body that contains +// configuration from all of the given files. +// +// The ordering of the given files decides the order in which contained +// elements will be returned. If any top-level attributes are defined with +// the same name across multiple files, a diagnostic will be produced from +// the Content and PartialContent methods describing this error in a +// user-friendly way. +func MergeFiles(files []*File) Body { + var bodies []Body + for _, file := range files { + bodies = append(bodies, file.Body) + } + return MergeBodies(bodies) +} + +// MergeBodies is like MergeFiles except it deals directly with bodies, rather +// than with entire files. +func MergeBodies(bodies []Body) Body { + if len(bodies) == 0 { + // Swap out for our singleton empty body, to reduce the number of + // empty slices we have hanging around. + return emptyBody + } + + // If any of the given bodies are already merged bodies, we'll unpack + // to flatten to a single mergedBodies, since that's conceptually simpler. + // This also, as a side-effect, eliminates any empty bodies, since + // empties are merged bodies with no inner bodies. + var newLen int + var flatten bool + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + newLen += len(children) + flatten = true + } else { + newLen++ + } + } + + if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside + return mergedBodies(bodies) + } + + if newLen == 0 { + // Don't allocate a new empty when we already have one + return emptyBody + } + + new := make([]Body, 0, newLen) + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + new = append(new, children...) + } else { + new = append(new, body) + } + } + return mergedBodies(new) +} + +var emptyBody = mergedBodies([]Body{}) + +// EmptyBody returns a body with no content. This body can be used as a +// placeholder when a body is required but no body content is available. +func EmptyBody() Body { + return emptyBody +} + +type mergedBodies []Body + +// Content returns the content produced by applying the given schema to all +// of the merged bodies and merging the result. +// +// Although required attributes _are_ supported, they should be used sparingly +// with merged bodies since in this case there is no contextual information +// with which to return good diagnostics. Applications working with merged +// bodies may wish to mark all attributes as optional and then check for +// required attributes afterwards, to produce better diagnostics. +func (mb mergedBodies) Content(schema *BodySchema) (*BodyContent, Diagnostics) { + // the returned body will always be empty in this case, because mergedContent + // will only ever call Content on the child bodies. + content, _, diags := mb.mergedContent(schema, false) + return content, diags +} + +func (mb mergedBodies) PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) { + return mb.mergedContent(schema, true) +} + +func (mb mergedBodies) JustAttributes() (Attributes, Diagnostics) { + attrs := make(map[string]*Attribute) + var diags Diagnostics + + for _, body := range mb { + thisAttrs, thisDiags := body.JustAttributes() + + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisAttrs != nil { + for name, attr := range thisAttrs { + if existing := attrs[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate argument", + Detail: fmt.Sprintf( + "Argument %q was already set at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + + attrs[name] = attr + } + } + } + + return attrs, diags +} + +func (mb mergedBodies) MissingItemRange() Range { + if len(mb) == 0 { + // Nothing useful to return here, so we'll return some garbage. + return Range{ + Filename: "", + } + } + + // arbitrarily use the first body's missing item range + return mb[0].MissingItemRange() +} + +func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyContent, Body, Diagnostics) { + // We need to produce a new schema with none of the attributes marked as + // required, since _any one_ of our bodies can contribute an attribute value. + // We'll separately check that all required attributes are present at + // the end. + mergedSchema := &BodySchema{ + Blocks: schema.Blocks, + } + for _, attrS := range schema.Attributes { + mergedAttrS := attrS + mergedAttrS.Required = false + mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS) + } + + var mergedLeftovers []Body + content := &BodyContent{ + Attributes: map[string]*Attribute{}, + } + + var diags Diagnostics + for _, body := range mb { + var thisContent *BodyContent + var thisLeftovers Body + var thisDiags Diagnostics + + if partial { + thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema) + } else { + thisContent, thisDiags = body.Content(mergedSchema) + } + + if thisLeftovers != nil { + mergedLeftovers = append(mergedLeftovers, thisLeftovers) + } + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisContent.Attributes != nil { + for name, attr := range thisContent.Attributes { + if existing := content.Attributes[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate argument", + Detail: fmt.Sprintf( + "Argument %q was already set at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + content.Attributes[name] = attr + } + } + + if len(thisContent.Blocks) != 0 { + content.Blocks = append(content.Blocks, thisContent.Blocks...) + } + } + + // Finally, we check for required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + + if content.Attributes[attrS.Name] == nil { + // We don't have any context here to produce a good diagnostic, + // which is why we warn in the Content docstring to minimize the + // use of required attributes on merged bodies. + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Missing required argument", + Detail: fmt.Sprintf( + "The argument %q is required, but was not set.", + attrS.Name, + ), + }) + } + } + + leftoverBody := MergeBodies(mergedLeftovers) + return content, leftoverBody, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/ops.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/ops.go new file mode 100644 index 00000000000..5d2910c1301 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/ops.go @@ -0,0 +1,288 @@ +package hcl + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Index is a helper function that performs the same operation as the index +// operator in the HCL expression language. That is, the result is the +// same as it would be for collection[key] in a configuration expression. +// +// This is exported so that applications can perform indexing in a manner +// consistent with how the language does it, including handling of null and +// unknown values, etc. +// +// Diagnostics are produced if the given combination of values is not valid. +// Therefore a pointer to a source range must be provided to use in diagnostics, +// though nil can be provided if the calling application is going to +// ignore the subject of the returned diagnostics anyway. +func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) { + if collection.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to index null value", + Detail: "This value is null, so it does not have any indices.", + Subject: srcRange, + }, + } + } + if key.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "Can't use a null value as an indexing key.", + Subject: srcRange, + }, + } + } + ty := collection.Type() + kty := key.Type() + if kty == cty.DynamicPseudoType || ty == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + switch { + + case ty.IsListType() || ty.IsTupleType() || ty.IsMapType(): + var wantType cty.Type + switch { + case ty.IsListType() || ty.IsTupleType(): + wantType = cty.Number + case ty.IsMapType(): + wantType = cty.String + default: + // should never happen + panic("don't know what key type we want") + } + + key, keyErr := convert.Convert(key, wantType) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + + has := collection.HasIndex(key) + if !has.IsKnown() { + if ty.IsTupleType() { + return cty.DynamicVal, nil + } else { + return cty.UnknownVal(ty.ElementType()), nil + } + } + if has.False() { + // We have a more specialized error message for the situation of + // using a fractional number to index into a sequence, because + // that will tend to happen if the user is trying to use division + // to calculate an index and not realizing that HCL does float + // division rather than integer division. + if (ty.IsListType() || ty.IsTupleType()) && key.Type().Equals(cty.Number) { + if key.IsKnown() && !key.IsNull() { + bf := key.AsBigFloat() + if _, acc := bf.Int(nil); acc != big.Exact { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf("The given key does not identify an element in this collection value: indexing a sequence requires a whole number, but the given index (%g) has a fractional part.", bf), + Subject: srcRange, + }, + } + } + } + } + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.Index(key), nil + + case ty.IsObjectType(): + key, keyErr := convert.Convert(key, cty.String) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + if !collection.IsKnown() { + return cty.DynamicVal, nil + } + if !key.IsKnown() { + return cty.DynamicVal, nil + } + + attrName := key.AsString() + + if !ty.HasAttribute(attrName) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.GetAttr(attrName), nil + + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "This value does not have any indices.", + Subject: srcRange, + }, + } + } + +} + +// GetAttr is a helper function that performs the same operation as the +// attribute access in the HCL expression language. That is, the result is the +// same as it would be for obj.attr in a configuration expression. +// +// This is exported so that applications can access attributes in a manner +// consistent with how the language does it, including handling of null and +// unknown values, etc. +// +// Diagnostics are produced if the given combination of values is not valid. +// Therefore a pointer to a source range must be provided to use in diagnostics, +// though nil can be provided if the calling application is going to +// ignore the subject of the returned diagnostics anyway. +func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagnostics) { + if obj.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to get attribute from null value", + Detail: "This value is null, so it does not have any attributes.", + Subject: srcRange, + }, + } + } + + ty := obj.Type() + switch { + case ty.IsObjectType(): + if !ty.HasAttribute(attrName) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("This object does not have an attribute named %q.", attrName), + Subject: srcRange, + }, + } + } + + if !obj.IsKnown() { + return cty.UnknownVal(ty.AttributeType(attrName)), nil + } + + return obj.GetAttr(attrName), nil + case ty.IsMapType(): + if !obj.IsKnown() { + return cty.UnknownVal(ty.ElementType()), nil + } + + idx := cty.StringVal(attrName) + if obj.HasIndex(idx).False() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Missing map element", + Detail: fmt.Sprintf("This map does not have an element with the key %q.", attrName), + Subject: srcRange, + }, + } + } + + return obj.Index(idx), nil + case ty == cty.DynamicPseudoType: + return cty.DynamicVal, nil + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: "This value does not have any attributes.", + Subject: srcRange, + }, + } + } + +} + +// ApplyPath is a helper function that applies a cty.Path to a value using the +// indexing and attribute access operations from HCL. +// +// This is similar to calling the path's own Apply method, but ApplyPath uses +// the more relaxed typing rules that apply to these operations in HCL, rather +// than cty's relatively-strict rules. ApplyPath is implemented in terms of +// Index and GetAttr, and so it has the same behavior for individual steps +// but will stop and return any errors returned by intermediate steps. +// +// Diagnostics are produced if the given path cannot be applied to the given +// value. Therefore a pointer to a source range must be provided to use in +// diagnostics, though nil can be provided if the calling application is going +// to ignore the subject of the returned diagnostics anyway. +func ApplyPath(val cty.Value, path cty.Path, srcRange *Range) (cty.Value, Diagnostics) { + var diags Diagnostics + + for _, step := range path { + var stepDiags Diagnostics + switch ts := step.(type) { + case cty.IndexStep: + val, stepDiags = Index(val, ts.Key, srcRange) + case cty.GetAttrStep: + val, stepDiags = GetAttr(val, ts.Name, srcRange) + default: + // Should never happen because the above are all of the step types. + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Invalid path step", + Detail: fmt.Sprintf("Go type %T is not a valid path step. This is a bug in this program.", step), + Subject: srcRange, + }) + return cty.DynamicVal, diags + } + + diags = append(diags, stepDiags...) + if stepDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + + return val, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/pos.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/pos.go new file mode 100644 index 00000000000..06db8bfbd4f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/pos.go @@ -0,0 +1,275 @@ +package hcl + +import "fmt" + +// Pos represents a single position in a source file, by addressing the +// start byte of a unicode character encoded in UTF-8. +// +// Pos is generally used only in the context of a Range, which then defines +// which source file the position is within. +type Pos struct { + // Line is the source code line where this position points. Lines are + // counted starting at 1 and incremented for each newline character + // encountered. + Line int + + // Column is the source code column where this position points, in + // unicode characters, with counting starting at 1. + // + // Column counts characters as they appear visually, so for example a + // latin letter with a combining diacritic mark counts as one character. + // This is intended for rendering visual markers against source code in + // contexts where these diacritics would be rendered in a single character + // cell. Technically speaking, Column is counting grapheme clusters as + // used in unicode normalization. + Column int + + // Byte is the byte offset into the file where the indicated character + // begins. This is a zero-based offset to the first byte of the first + // UTF-8 codepoint sequence in the character, and thus gives a position + // that can be resolved _without_ awareness of Unicode characters. + Byte int +} + +// InitialPos is a suitable position to use to mark the start of a file. +var InitialPos = Pos{Byte: 0, Line: 1, Column: 1} + +// Range represents a span of characters between two positions in a source +// file. +// +// This struct is usually used by value in types that represent AST nodes, +// but by pointer in types that refer to the positions of other objects, +// such as in diagnostics. +type Range struct { + // Filename is the name of the file into which this range's positions + // point. + Filename string + + // Start and End represent the bounds of this range. Start is inclusive + // and End is exclusive. + Start, End Pos +} + +// RangeBetween returns a new range that spans from the beginning of the +// start range to the end of the end range. +// +// The result is meaningless if the two ranges do not belong to the same +// source file or if the end range appears before the start range. +func RangeBetween(start, end Range) Range { + return Range{ + Filename: start.Filename, + Start: start.Start, + End: end.End, + } +} + +// RangeOver returns a new range that covers both of the given ranges and +// possibly additional content between them if the two ranges do not overlap. +// +// If either range is empty then it is ignored. The result is empty if both +// given ranges are empty. +// +// The result is meaningless if the two ranges to not belong to the same +// source file. +func RangeOver(a, b Range) Range { + if a.Empty() { + return b + } + if b.Empty() { + return a + } + + var start, end Pos + if a.Start.Byte < b.Start.Byte { + start = a.Start + } else { + start = b.Start + } + if a.End.Byte > b.End.Byte { + end = a.End + } else { + end = b.End + } + return Range{ + Filename: a.Filename, + Start: start, + End: end, + } +} + +// ContainsPos returns true if and only if the given position is contained within +// the receiving range. +// +// In the unlikely case that the line/column information disagree with the byte +// offset information in the given position or receiving range, the byte +// offsets are given priority. +func (r Range) ContainsPos(pos Pos) bool { + return r.ContainsOffset(pos.Byte) +} + +// ContainsOffset returns true if and only if the given byte offset is within +// the receiving Range. +func (r Range) ContainsOffset(offset int) bool { + return offset >= r.Start.Byte && offset < r.End.Byte +} + +// Ptr returns a pointer to a copy of the receiver. This is a convenience when +// ranges in places where pointers are required, such as in Diagnostic, but +// the range in question is returned from a method. Go would otherwise not +// allow one to take the address of a function call. +func (r Range) Ptr() *Range { + return &r +} + +// String returns a compact string representation of the receiver. +// Callers should generally prefer to present a range more visually, +// e.g. via markers directly on the relevant portion of source code. +func (r Range) String() string { + if r.Start.Line == r.End.Line { + return fmt.Sprintf( + "%s:%d,%d-%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Column, + ) + } else { + return fmt.Sprintf( + "%s:%d,%d-%d,%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Line, r.End.Column, + ) + } +} + +func (r Range) Empty() bool { + return r.Start.Byte == r.End.Byte +} + +// CanSliceBytes returns true if SliceBytes could return an accurate +// sub-slice of the given slice. +// +// This effectively tests whether the start and end offsets of the range +// are within the bounds of the slice, and thus whether SliceBytes can be +// trusted to produce an accurate start and end position within that slice. +func (r Range) CanSliceBytes(b []byte) bool { + switch { + case r.Start.Byte < 0 || r.Start.Byte > len(b): + return false + case r.End.Byte < 0 || r.End.Byte > len(b): + return false + case r.End.Byte < r.Start.Byte: + return false + default: + return true + } +} + +// SliceBytes returns a sub-slice of the given slice that is covered by the +// receiving range, assuming that the given slice is the source code of the +// file indicated by r.Filename. +// +// If the receiver refers to any byte offsets that are outside of the slice +// then the result is constrained to the overlapping portion only, to avoid +// a panic. Use CanSliceBytes to determine if the result is guaranteed to +// be an accurate span of the requested range. +func (r Range) SliceBytes(b []byte) []byte { + start := r.Start.Byte + end := r.End.Byte + if start < 0 { + start = 0 + } else if start > len(b) { + start = len(b) + } + if end < 0 { + end = 0 + } else if end > len(b) { + end = len(b) + } + if end < start { + end = start + } + return b[start:end] +} + +// Overlaps returns true if the receiver and the other given range share any +// characters in common. +func (r Range) Overlaps(other Range) bool { + switch { + case r.Filename != other.Filename: + // If the ranges are in different files then they can't possibly overlap + return false + case r.Empty() || other.Empty(): + // Empty ranges can never overlap + return false + case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte): + return true + case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte): + return true + default: + return false + } +} + +// Overlap finds a range that is either identical to or a sub-range of both +// the receiver and the other given range. It returns an empty range +// within the receiver if there is no overlap between the two ranges. +// +// A non-empty result is either identical to or a subset of the receiver. +func (r Range) Overlap(other Range) Range { + if !r.Overlaps(other) { + // Start == End indicates an empty range + return Range{ + Filename: r.Filename, + Start: r.Start, + End: r.Start, + } + } + + var start, end Pos + if r.Start.Byte > other.Start.Byte { + start = r.Start + } else { + start = other.Start + } + if r.End.Byte < other.End.Byte { + end = r.End + } else { + end = other.End + } + + return Range{ + Filename: r.Filename, + Start: start, + End: end, + } +} + +// PartitionAround finds the portion of the given range that overlaps with +// the reciever and returns three ranges: the portion of the reciever that +// precedes the overlap, the overlap itself, and then the portion of the +// reciever that comes after the overlap. +// +// If the two ranges do not overlap then all three returned ranges are empty. +// +// If the given range aligns with or extends beyond either extent of the +// reciever then the corresponding outer range will be empty. +func (r Range) PartitionAround(other Range) (before, overlap, after Range) { + overlap = r.Overlap(other) + if overlap.Empty() { + return overlap, overlap, overlap + } + + before = Range{ + Filename: r.Filename, + Start: r.Start, + End: overlap.Start, + } + after = Range{ + Filename: r.Filename, + Start: overlap.End, + End: r.End, + } + + return before, overlap, after +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go new file mode 100644 index 00000000000..17c0d7c6b1e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go @@ -0,0 +1,152 @@ +package hcl + +import ( + "bufio" + "bytes" + + "github.com/apparentlymart/go-textseg/textseg" +) + +// RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc +// and visit a source range for each token matched. +// +// For example, this can be used with bufio.ScanLines to find the source range +// for each line in the file, skipping over the actual newline characters, which +// may be useful when printing source code snippets as part of diagnostic +// messages. +// +// The line and column information in the returned ranges is produced by +// counting newline characters and grapheme clusters respectively, which +// mimics the behavior we expect from a parser when producing ranges. +type RangeScanner struct { + filename string + b []byte + cb bufio.SplitFunc + + pos Pos // position of next byte to process in b + cur Range // latest range + tok []byte // slice of b that is covered by cur + err error // error from last scan, if any +} + +// NewRangeScanner creates a new RangeScanner for the given buffer, producing +// ranges for the given filename. +// +// Since ranges have grapheme-cluster granularity rather than byte granularity, +// the scanner will produce incorrect results if the given SplitFunc creates +// tokens between grapheme cluster boundaries. In particular, it is incorrect +// to use RangeScanner with bufio.ScanRunes because it will produce tokens +// around individual UTF-8 sequences, which will split any multi-sequence +// grapheme clusters. +func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner { + return NewRangeScannerFragment(b, filename, InitialPos, cb) +} + +// NewRangeScannerFragment is like NewRangeScanner but the ranges it produces +// will be offset by the given starting position, which is appropriate for +// sub-slices of a file, whereas NewRangeScanner assumes it is scanning an +// entire file. +func NewRangeScannerFragment(b []byte, filename string, start Pos, cb bufio.SplitFunc) *RangeScanner { + return &RangeScanner{ + filename: filename, + b: b, + cb: cb, + pos: start, + } +} + +func (sc *RangeScanner) Scan() bool { + if sc.pos.Byte >= len(sc.b) || sc.err != nil { + // All done + return false + } + + // Since we're operating on an in-memory buffer, we always pass the whole + // remainder of the buffer to our SplitFunc and set isEOF to let it know + // that it has the whole thing. + advance, token, err := sc.cb(sc.b[sc.pos.Byte:], true) + + // Since we are setting isEOF to true this should never happen, but + // if it does we will just abort and assume the SplitFunc is misbehaving. + if advance == 0 && token == nil && err == nil { + return false + } + + if err != nil { + sc.err = err + sc.cur = Range{ + Filename: sc.filename, + Start: sc.pos, + End: sc.pos, + } + sc.tok = nil + return false + } + + sc.tok = token + start := sc.pos + end := sc.pos + new := sc.pos + + // adv is similar to token but it also includes any subsequent characters + // we're being asked to skip over by the SplitFunc. + // adv is a slice covering any additional bytes we are skipping over, based + // on what the SplitFunc told us to do with advance. + adv := sc.b[sc.pos.Byte : sc.pos.Byte+advance] + + // We now need to scan over our token to count the grapheme clusters + // so we can correctly advance Column, and count the newlines so we + // can correctly advance Line. + advR := bytes.NewReader(adv) + gsc := bufio.NewScanner(advR) + advanced := 0 + gsc.Split(textseg.ScanGraphemeClusters) + for gsc.Scan() { + gr := gsc.Bytes() + new.Byte += len(gr) + new.Column++ + + // We rely here on the fact that \r\n is considered a grapheme cluster + // and so we don't need to worry about miscounting additional lines + // on files with Windows-style line endings. + if len(gr) != 0 && (gr[0] == '\r' || gr[0] == '\n') { + new.Column = 1 + new.Line++ + } + + if advanced < len(token) { + // If we've not yet found the end of our token then we'll + // also push our "end" marker along. + // (if advance > len(token) then we'll stop moving "end" early + // so that the caller only sees the range covered by token.) + end = new + } + advanced += len(gr) + } + + sc.cur = Range{ + Filename: sc.filename, + Start: start, + End: end, + } + sc.pos = new + return true +} + +// Range returns a range that covers the latest token obtained after a call +// to Scan returns true. +func (sc *RangeScanner) Range() Range { + return sc.cur +} + +// Bytes returns the slice of the input buffer that is covered by the range +// that would be returned by Range. +func (sc *RangeScanner) Bytes() []byte { + return sc.tok +} + +// Err can be called after Scan returns false to determine if the latest read +// resulted in an error, and obtain that error if so. +func (sc *RangeScanner) Err() error { + return sc.err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/schema.go new file mode 100644 index 00000000000..891257acb20 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/schema.go @@ -0,0 +1,21 @@ +package hcl + +// BlockHeaderSchema represents the shape of a block header, and is +// used for matching blocks within bodies. +type BlockHeaderSchema struct { + Type string + LabelNames []string +} + +// AttributeSchema represents the requirements for an attribute, and is used +// for matching attributes within bodies. +type AttributeSchema struct { + Name string + Required bool +} + +// BodySchema represents the desired shallow structure of a body. +type BodySchema struct { + Attributes []AttributeSchema + Blocks []BlockHeaderSchema +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go new file mode 100644 index 00000000000..98ada87b629 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +type staticExpr struct { + val cty.Value + rng Range +} + +// StaticExpr returns an Expression that always evaluates to the given value. +// +// This is useful to substitute default values for expressions that are +// not explicitly given in configuration and thus would otherwise have no +// Expression to return. +// +// Since expressions are expected to have a source range, the caller must +// provide one. Ideally this should be a real source range, but it can +// be a synthetic one (with an empty-string filename) if no suitable range +// is available. +func StaticExpr(val cty.Value, rng Range) Expression { + return staticExpr{val, rng} +} + +func (e staticExpr) Value(ctx *EvalContext) (cty.Value, Diagnostics) { + return e.val, nil +} + +func (e staticExpr) Variables() []Traversal { + return nil +} + +func (e staticExpr) Range() Range { + return e.rng +} + +func (e staticExpr) StartRange() Range { + return e.rng +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/structure.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/structure.go new file mode 100644 index 00000000000..aab09457d73 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/structure.go @@ -0,0 +1,151 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +// File is the top-level node that results from parsing a HCL file. +type File struct { + Body Body + Bytes []byte + + // Nav is used to integrate with the "hcled" editor integration package, + // and with diagnostic information formatters. It is not for direct use + // by a calling application. + Nav interface{} +} + +// Block represents a nested block within a Body. +type Block struct { + Type string + Labels []string + Body Body + + DefRange Range // Range that can be considered the "definition" for seeking in an editor + TypeRange Range // Range for the block type declaration specifically. + LabelRanges []Range // Ranges for the label values specifically. +} + +// Blocks is a sequence of Block. +type Blocks []*Block + +// Attributes is a set of attributes keyed by their names. +type Attributes map[string]*Attribute + +// Body is a container for attributes and blocks. It serves as the primary +// unit of hierarchical structure within configuration. +// +// The content of a body cannot be meaningfully interpreted without a schema, +// so Body represents the raw body content and has methods that allow the +// content to be extracted in terms of a given schema. +type Body interface { + // Content verifies that the entire body content conforms to the given + // schema and then returns it, and/or returns diagnostics. The returned + // body content is valid if non-nil, regardless of whether Diagnostics + // are provided, but diagnostics should still be eventually shown to + // the user. + Content(schema *BodySchema) (*BodyContent, Diagnostics) + + // PartialContent is like Content except that it permits the configuration + // to contain additional blocks or attributes not specified in the + // schema. If any are present, the returned Body is non-nil and contains + // the remaining items from the body that were not selected by the schema. + PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) + + // JustAttributes attempts to interpret all of the contents of the body + // as attributes, allowing for the contents to be accessed without a priori + // knowledge of the structure. + // + // The behavior of this method depends on the body's source language. + // Some languages, like JSON, can't distinguish between attributes and + // blocks without schema hints, but for languages that _can_ error + // diagnostics will be generated if any blocks are present in the body. + // + // Diagnostics may be produced for other reasons too, such as duplicate + // declarations of the same attribute. + JustAttributes() (Attributes, Diagnostics) + + // MissingItemRange returns a range that represents where a missing item + // might hypothetically be inserted. This is used when producing + // diagnostics about missing required attributes or blocks. Not all bodies + // will have an obvious single insertion point, so the result here may + // be rather arbitrary. + MissingItemRange() Range +} + +// BodyContent is the result of applying a BodySchema to a Body. +type BodyContent struct { + Attributes Attributes + Blocks Blocks + + MissingItemRange Range +} + +// Attribute represents an attribute from within a body. +type Attribute struct { + Name string + Expr Expression + + Range Range + NameRange Range +} + +// Expression is a literal value or an expression provided in the +// configuration, which can be evaluated within a scope to produce a value. +type Expression interface { + // Value returns the value resulting from evaluating the expression + // in the given evaluation context. + // + // The context may be nil, in which case the expression may contain + // only constants and diagnostics will be produced for any non-constant + // sub-expressions. (The exact definition of this depends on the source + // language.) + // + // The context may instead be set but have either its Variables or + // Functions maps set to nil, in which case only use of these features + // will return diagnostics. + // + // Different diagnostics are provided depending on whether the given + // context maps are nil or empty. In the former case, the message + // tells the user that variables/functions are not permitted at all, + // while in the latter case usage will produce a "not found" error for + // the specific symbol in question. + Value(ctx *EvalContext) (cty.Value, Diagnostics) + + // Variables returns a list of variables referenced in the receiving + // expression. These are expressed as absolute Traversals, so may include + // additional information about how the variable is used, such as + // attribute lookups, which the calling application can potentially use + // to only selectively populate the scope. + Variables() []Traversal + + Range() Range + StartRange() Range +} + +// OfType filters the receiving block sequence by block type name, +// returning a new block sequence including only the blocks of the +// requested type. +func (els Blocks) OfType(typeName string) Blocks { + ret := make(Blocks, 0) + for _, el := range els { + if el.Type == typeName { + ret = append(ret, el) + } + } + return ret +} + +// ByType transforms the receiving block sequence into a map from type +// name to block sequences of only that type. +func (els Blocks) ByType() map[string]Blocks { + ret := make(map[string]Blocks) + for _, el := range els { + ty := el.Type + if ret[ty] == nil { + ret[ty] = make(Blocks, 0, 1) + } + ret[ty] = append(ret[ty], el) + } + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go new file mode 100644 index 00000000000..8521814e5fb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go @@ -0,0 +1,117 @@ +package hcl + +// ----------------------------------------------------------------------------- +// The methods in this file all have the general pattern of making a best-effort +// to find one or more constructs that contain a given source position. +// +// These all operate by delegating to an optional method of the same name and +// signature on the file's root body, allowing each syntax to potentially +// provide its own implementations of these. For syntaxes that don't implement +// them, the result is always nil. +// ----------------------------------------------------------------------------- + +// BlocksAtPos attempts to find all of the blocks that contain the given +// position, ordered so that the outermost block is first and the innermost +// block is last. This is a best-effort method that may not be able to produce +// a complete result for all positions or for all HCL syntaxes. +// +// If the returned slice is non-empty, the first element is guaranteed to +// represent the same block as would be the result of OutermostBlockAtPos and +// the last element the result of InnermostBlockAtPos. However, the +// implementation may return two different objects describing the same block, +// so comparison by pointer identity is not possible. +// +// The result is nil if no blocks at all contain the given position. +func (f *File) BlocksAtPos(pos Pos) []*Block { + // The root body of the file must implement this interface in order + // to support BlocksAtPos. + type Interface interface { + BlocksAtPos(pos Pos) []*Block + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.BlocksAtPos(pos) +} + +// OutermostBlockAtPos attempts to find a top-level block in the receiving file +// that contains the given position. This is a best-effort method that may not +// be able to produce a result for all positions or for all HCL syntaxes. +// +// The result is nil if no single block could be selected for any reason. +func (f *File) OutermostBlockAtPos(pos Pos) *Block { + // The root body of the file must implement this interface in order + // to support OutermostBlockAtPos. + type Interface interface { + OutermostBlockAtPos(pos Pos) *Block + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.OutermostBlockAtPos(pos) +} + +// InnermostBlockAtPos attempts to find the most deeply-nested block in the +// receiving file that contains the given position. This is a best-effort +// method that may not be able to produce a result for all positions or for +// all HCL syntaxes. +// +// The result is nil if no single block could be selected for any reason. +func (f *File) InnermostBlockAtPos(pos Pos) *Block { + // The root body of the file must implement this interface in order + // to support InnermostBlockAtPos. + type Interface interface { + InnermostBlockAtPos(pos Pos) *Block + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.InnermostBlockAtPos(pos) +} + +// OutermostExprAtPos attempts to find an expression in the receiving file +// that contains the given position. This is a best-effort method that may not +// be able to produce a result for all positions or for all HCL syntaxes. +// +// Since expressions are often nested inside one another, this method returns +// the outermost "root" expression that is not contained by any other. +// +// The result is nil if no single expression could be selected for any reason. +func (f *File) OutermostExprAtPos(pos Pos) Expression { + // The root body of the file must implement this interface in order + // to support OutermostExprAtPos. + type Interface interface { + OutermostExprAtPos(pos Pos) Expression + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.OutermostExprAtPos(pos) +} + +// AttributeAtPos attempts to find an attribute definition in the receiving +// file that contains the given position. This is a best-effort method that may +// not be able to produce a result for all positions or for all HCL syntaxes. +// +// The result is nil if no single attribute could be selected for any reason. +func (f *File) AttributeAtPos(pos Pos) *Attribute { + // The root body of the file must implement this interface in order + // to support OutermostExprAtPos. + type Interface interface { + AttributeAtPos(pos Pos) *Attribute + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.AttributeAtPos(pos) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/traversal.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/traversal.go new file mode 100644 index 00000000000..d710197008c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/traversal.go @@ -0,0 +1,293 @@ +package hcl + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// A Traversal is a description of traversing through a value through a +// series of operations such as attribute lookup, index lookup, etc. +// +// It is used to look up values in scopes, for example. +// +// The traversal operations are implementations of interface Traverser. +// This is a closed set of implementations, so the interface cannot be +// implemented from outside this package. +// +// A traversal can be absolute (its first value is a symbol name) or relative +// (starts from an existing value). +type Traversal []Traverser + +// TraversalJoin appends a relative traversal to an absolute traversal to +// produce a new absolute traversal. +func TraversalJoin(abs Traversal, rel Traversal) Traversal { + if abs.IsRelative() { + panic("first argument to TraversalJoin must be absolute") + } + if !rel.IsRelative() { + panic("second argument to TraversalJoin must be relative") + } + + ret := make(Traversal, len(abs)+len(rel)) + copy(ret, abs) + copy(ret[len(abs):], rel) + return ret +} + +// TraverseRel applies the receiving traversal to the given value, returning +// the resulting value. This is supported only for relative traversals, +// and will panic if applied to an absolute traversal. +func (t Traversal) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + if !t.IsRelative() { + panic("can't use TraverseRel on an absolute traversal") + } + + current := val + var diags Diagnostics + for _, tr := range t { + var newDiags Diagnostics + current, newDiags = tr.TraversalStep(current) + diags = append(diags, newDiags...) + if newDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + return current, diags +} + +// TraverseAbs applies the receiving traversal to the given eval context, +// returning the resulting value. This is supported only for absolute +// traversals, and will panic if applied to a relative traversal. +func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + if t.IsRelative() { + panic("can't use TraverseAbs on a relative traversal") + } + + split := t.SimpleSplit() + root := split.Abs[0].(TraverseRoot) + name := root.Name + + thisCtx := ctx + hasNonNil := false + for thisCtx != nil { + if thisCtx.Variables == nil { + thisCtx = thisCtx.parent + continue + } + hasNonNil = true + val, exists := thisCtx.Variables[name] + if exists { + return split.Rel.TraverseRel(val) + } + thisCtx = thisCtx.parent + } + + if !hasNonNil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Variables not allowed", + Detail: "Variables may not be used here.", + Subject: &root.SrcRange, + }, + } + } + + suggestions := make([]string, 0, len(ctx.Variables)) + thisCtx = ctx + for thisCtx != nil { + for k := range thisCtx.Variables { + suggestions = append(suggestions, k) + } + thisCtx = thisCtx.parent + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unknown variable", + Detail: fmt.Sprintf("There is no variable named %q.%s", name, suggestion), + Subject: &root.SrcRange, + }, + } +} + +// IsRelative returns true if the receiver is a relative traversal, or false +// otherwise. +func (t Traversal) IsRelative() bool { + if len(t) == 0 { + return true + } + if _, firstIsRoot := t[0].(TraverseRoot); firstIsRoot { + return false + } + return true +} + +// SimpleSplit returns a TraversalSplit where the name lookup is the absolute +// part and the remainder is the relative part. Supported only for +// absolute traversals, and will panic if applied to a relative traversal. +// +// This can be used by applications that have a relatively-simple variable +// namespace where only the top-level is directly populated in the scope, with +// everything else handled by relative lookups from those initial values. +func (t Traversal) SimpleSplit() TraversalSplit { + if t.IsRelative() { + panic("can't use SimpleSplit on a relative traversal") + } + return TraversalSplit{ + Abs: t[0:1], + Rel: t[1:], + } +} + +// RootName returns the root name for a absolute traversal. Will panic if +// called on a relative traversal. +func (t Traversal) RootName() string { + if t.IsRelative() { + panic("can't use RootName on a relative traversal") + + } + return t[0].(TraverseRoot).Name +} + +// SourceRange returns the source range for the traversal. +func (t Traversal) SourceRange() Range { + if len(t) == 0 { + // Nothing useful to return here, but we'll return something + // that's correctly-typed at least. + return Range{} + } + + return RangeBetween(t[0].SourceRange(), t[len(t)-1].SourceRange()) +} + +// TraversalSplit represents a pair of traversals, the first of which is +// an absolute traversal and the second of which is relative to the first. +// +// This is used by calling applications that only populate prefixes of the +// traversals in the scope, with Abs representing the part coming from the +// scope and Rel representing the remaining steps once that part is +// retrieved. +type TraversalSplit struct { + Abs Traversal + Rel Traversal +} + +// TraverseAbs traverses from a scope to the value resulting from the +// absolute traversal. +func (t TraversalSplit) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + return t.Abs.TraverseAbs(ctx) +} + +// TraverseRel traverses from a given value, assumed to be the result of +// TraverseAbs on some scope, to a final result for the entire split traversal. +func (t TraversalSplit) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + return t.Rel.TraverseRel(val) +} + +// Traverse is a convenience function to apply TraverseAbs followed by +// TraverseRel. +func (t TraversalSplit) Traverse(ctx *EvalContext) (cty.Value, Diagnostics) { + v1, diags := t.TraverseAbs(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + v2, newDiags := t.TraverseRel(v1) + diags = append(diags, newDiags...) + return v2, diags +} + +// Join concatenates together the Abs and Rel parts to produce a single +// absolute traversal. +func (t TraversalSplit) Join() Traversal { + return TraversalJoin(t.Abs, t.Rel) +} + +// RootName returns the root name for the absolute part of the split. +func (t TraversalSplit) RootName() string { + return t.Abs.RootName() +} + +// A Traverser is a step within a Traversal. +type Traverser interface { + TraversalStep(cty.Value) (cty.Value, Diagnostics) + SourceRange() Range + isTraverserSigil() isTraverser +} + +// Embed this in a struct to declare it as a Traverser +type isTraverser struct { +} + +func (tr isTraverser) isTraverserSigil() isTraverser { + return isTraverser{} +} + +// TraverseRoot looks up a root name in a scope. It is used as the first step +// of an absolute Traversal, and cannot itself be traversed directly. +type TraverseRoot struct { + isTraverser + Name string + SrcRange Range +} + +// TraversalStep on a TraverseName immediately panics, because absolute +// traversals cannot be directly traversed. +func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) { + panic("Cannot traverse an absolute traversal") +} + +func (tn TraverseRoot) SourceRange() Range { + return tn.SrcRange +} + +// TraverseAttr looks up an attribute in its initial value. +type TraverseAttr struct { + isTraverser + Name string + SrcRange Range +} + +func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + return GetAttr(val, tn.Name, &tn.SrcRange) +} + +func (tn TraverseAttr) SourceRange() Range { + return tn.SrcRange +} + +// TraverseIndex applies the index operation to its initial value. +type TraverseIndex struct { + isTraverser + Key cty.Value + SrcRange Range +} + +func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + return Index(val, tn.Key, &tn.SrcRange) +} + +func (tn TraverseIndex) SourceRange() Range { + return tn.SrcRange +} + +// TraverseSplat applies the splat operation to its initial value. +type TraverseSplat struct { + isTraverser + Each Traversal + SrcRange Range +} + +func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + panic("TraverseSplat not yet implemented") +} + +func (tn TraverseSplat) SourceRange() Range { + return tn.SrcRange +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go new file mode 100644 index 00000000000..f69d5fe9b28 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go @@ -0,0 +1,124 @@ +package hcl + +// AbsTraversalForExpr attempts to interpret the given expression as +// an absolute traversal, or returns error diagnostic(s) if that is +// not possible for the given expression. +// +// A particular Expression implementation can support this function by +// offering a method called AsTraversal that takes no arguments and +// returns either a valid absolute traversal or nil to indicate that +// no traversal is possible. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +// +// In most cases the calling application is interested in the value +// that results from an expression, but in rarer cases the application +// needs to see the the name of the variable and subsequent +// attributes/indexes itself, for example to allow users to give references +// to the variables themselves rather than to their values. An implementer +// of this function should at least support attribute and index steps. +func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); traversal != nil { + return traversal, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A single static variable reference is required: only attribute access and indexing with constant keys. No calculations, function calls, template expressions, etc are allowed here.", + Subject: expr.Range().Ptr(), + }, + } +} + +// RelTraversalForExpr is similar to AbsTraversalForExpr but it returns +// a relative traversal instead. Due to the nature of HCL expressions, the +// first element of the returned traversal is always a TraverseAttr, and +// then it will be followed by zero or more other expressions. +// +// Any expression accepted by AbsTraversalForExpr is also accepted by +// RelTraversalForExpr. +func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + traversal, diags := AbsTraversalForExpr(expr) + if len(traversal) > 0 { + ret := make(Traversal, len(traversal)) + copy(ret, traversal) + root := traversal[0].(TraverseRoot) + ret[0] = TraverseAttr{ + Name: root.Name, + SrcRange: root.SrcRange, + } + return ret, diags + } + return traversal, diags +} + +// ExprAsKeyword attempts to interpret the given expression as a static keyword, +// returning the keyword string if possible, and the empty string if not. +// +// A static keyword, for the sake of this function, is a single identifier. +// For example, the following attribute has an expression that would produce +// the keyword "foo": +// +// example = foo +// +// This function is a variant of AbsTraversalForExpr, which uses the same +// interface on the given expression. This helper constrains the result +// further by requiring only a single root identifier. +// +// This function is intended to be used with the following idiom, to recognize +// situations where one of a fixed set of keywords is required and arbitrary +// expressions are not allowed: +// +// switch hcl.ExprAsKeyword(expr) { +// case "allow": +// // (take suitable action for keyword "allow") +// case "deny": +// // (take suitable action for keyword "deny") +// default: +// diags = append(diags, &hcl.Diagnostic{ +// // ... "invalid keyword" diagnostic message ... +// }) +// } +// +// The above approach will generate the same message for both the use of an +// unrecognized keyword and for not using a keyword at all, which is usually +// reasonable if the message specifies that the given value must be a keyword +// from that fixed list. +// +// Note that in the native syntax the keywords "true", "false", and "null" are +// recognized as literal values during parsing and so these reserved words +// cannot not be accepted as keywords by this function. +// +// Since interpreting an expression as a keyword bypasses usual expression +// evaluation, it should be used sparingly for situations where e.g. one of +// a fixed set of keywords is used in a structural way in a special attribute +// to affect the further processing of a block. +func ExprAsKeyword(expr Expression) string { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); len(traversal) == 1 { + return traversal.RootName() + } + } + return "" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go new file mode 100644 index 00000000000..7e652e9bc64 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go @@ -0,0 +1,21 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type blockLabel struct { + Value string + Range hcl.Range +} + +func labelsForBlock(block *hcl.Block) []blockLabel { + ret := make([]blockLabel, len(block.Labels)) + for i := range block.Labels { + ret[i] = blockLabel{ + Value: block.Labels[i], + Range: block.LabelRanges[i], + } + } + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/decode.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/decode.go new file mode 100644 index 00000000000..6cf93fedd3e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/decode.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { + schema := ImpliedSchema(spec) + + var content *hcl.BodyContent + var diags hcl.Diagnostics + var leftovers hcl.Body + + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + + val, valDiags := spec.decode(content, blockLabels, ctx) + diags = append(diags, valDiags...) + + return val, leftovers, diags +} + +func impliedType(spec Spec) cty.Type { + return spec.impliedType() +} + +func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { + schema := ImpliedSchema(spec) + content, _, _ := body.PartialContent(schema) + + return spec.sourceRange(content, blockLabels) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/doc.go new file mode 100644 index 00000000000..23bfe542b2f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/doc.go @@ -0,0 +1,12 @@ +// Package hcldec provides a higher-level API for unpacking the content of +// HCL bodies, implemented in terms of the low-level "Content" API exposed +// by the bodies themselves. +// +// It allows decoding an entire nested configuration in a single operation +// by providing a description of the intended structure. +// +// For some applications it may be more convenient to use the "gohcl" +// package, which has a similar purpose but decodes directly into native +// Go data types. hcldec instead targets the cty type system, and thus allows +// a cty-driven application to remain within that type system. +package hcldec diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/gob.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/gob.go new file mode 100644 index 00000000000..e2027cfd2d2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/gob.go @@ -0,0 +1,23 @@ +package hcldec + +import ( + "encoding/gob" +) + +func init() { + // Every Spec implementation should be registered with gob, so that + // specs can be sent over gob channels, such as using + // github.com/hashicorp/go-plugin with plugins that need to describe + // what shape of configuration they are expecting. + gob.Register(ObjectSpec(nil)) + gob.Register(TupleSpec(nil)) + gob.Register((*AttrSpec)(nil)) + gob.Register((*LiteralSpec)(nil)) + gob.Register((*ExprSpec)(nil)) + gob.Register((*BlockSpec)(nil)) + gob.Register((*BlockListSpec)(nil)) + gob.Register((*BlockSetSpec)(nil)) + gob.Register((*BlockMapSpec)(nil)) + gob.Register((*BlockLabelSpec)(nil)) + gob.Register((*DefaultSpec)(nil)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/public.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/public.go new file mode 100644 index 00000000000..3c803632d93 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/public.go @@ -0,0 +1,81 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// Decode interprets the given body using the given specification and returns +// the resulting value. If the given body is not valid per the spec, error +// diagnostics are returned and the returned value is likely to be incomplete. +// +// The ctx argument may be nil, in which case any references to variables or +// functions will produce error diagnostics. +func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, _, diags := decode(body, nil, ctx, spec, false) + return val, diags +} + +// PartialDecode is like Decode except that it permits "leftover" items in +// the top-level body, which are returned as a new body to allow for +// further processing. +// +// Any descendent block bodies are _not_ decoded partially and thus must +// be fully described by the given specification. +func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { + return decode(body, nil, ctx, spec, true) +} + +// ImpliedType returns the value type that should result from decoding the +// given spec. +func ImpliedType(spec Spec) cty.Type { + return impliedType(spec) +} + +// SourceRange interprets the given body using the given specification and +// then returns the source range of the value that would be used to +// fulfill the spec. +// +// This can be used if application-level validation detects value errors, to +// obtain a reasonable SourceRange to use for generated diagnostics. It works +// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) +// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will +// be less useful the broader the specification, so e.g. a spec that returns +// the entirety of all of the blocks of a given type is likely to be +// _particularly_ arbitrary and useless. +// +// If the given body is not valid per the given spec, the result is best-effort +// and may not actually be something ideal. It's expected that an application +// will already have used Decode or PartialDecode earlier and thus had an +// opportunity to detect and report spec violations. +func SourceRange(body hcl.Body, spec Spec) hcl.Range { + return sourceRange(body, nil, spec) +} + +// ChildBlockTypes returns a map of all of the child block types declared +// by the given spec, with block type names as keys and the associated +// nested body specs as values. +func ChildBlockTypes(spec Spec) map[string]Spec { + ret := map[string]Spec{} + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if bs, ok := s.(blockSpec); ok { + for _, blockS := range bs.blockHeaderSchemata() { + nested := bs.nestedSpec() + if nested != nil { // nil can be returned to dynamically opt out of this interface + ret[blockS.Type] = nested + } + } + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/schema.go new file mode 100644 index 00000000000..b57bd969209 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/schema.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. +// This is the schema that the Decode function will use internally to +// access the content of a given body. +func ImpliedSchema(spec Spec) *hcl.BodySchema { + var attrs []hcl.AttributeSchema + var blocks []hcl.BlockHeaderSchema + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if as, ok := s.(attrSpec); ok { + attrs = append(attrs, as.attrSchemata()...) + } + + if bs, ok := s.(blockSpec); ok { + blocks = append(blocks, bs.blockHeaderSchemata()...) + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return &hcl.BodySchema{ + Attributes: attrs, + Blocks: blocks, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/spec.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/spec.go new file mode 100644 index 00000000000..f9da7f65bcd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/spec.go @@ -0,0 +1,1567 @@ +package hcldec + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// A Spec is a description of how to decode a hcl.Body to a cty.Value. +// +// The various other types in this package whose names end in "Spec" are +// the spec implementations. The most common top-level spec is ObjectSpec, +// which decodes body content into a cty.Value of an object type. +type Spec interface { + // Perform the decode operation on the given body, in the context of + // the given block (which might be null), using the given eval context. + // + // "block" is provided only by the nested calls performed by the spec + // types that work on block bodies. + decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + + // Return the cty.Type that should be returned when decoding a body with + // this spec. + impliedType() cty.Type + + // Call the given callback once for each of the nested specs that would + // get decoded with the same body and block as the receiver. This should + // not descend into the nested specs used when decoding blocks. + visitSameBodyChildren(cb visitFunc) + + // Determine the source range of the value that would be returned for the + // spec in the given content, in the context of the given block + // (which might be null). If the corresponding item is missing, return + // a place where it might be inserted. + sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range +} + +type visitFunc func(spec Spec) + +// An ObjectSpec is a Spec that produces a cty.Value of an object type whose +// attributes correspond to the keys of the spec map. +type ObjectSpec map[string]Spec + +// attrSpec is implemented by specs that require attributes from the body. +type attrSpec interface { + attrSchemata() []hcl.AttributeSchema +} + +// blockSpec is implemented by specs that require blocks from the body. +type blockSpec interface { + blockHeaderSchemata() []hcl.BlockHeaderSchema + nestedSpec() Spec +} + +// specNeedingVariables is implemented by specs that can use variables +// from the EvalContext, to declare which variables they need. +type specNeedingVariables interface { + variablesNeeded(content *hcl.BodyContent) []hcl.Traversal +} + +func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make(map[string]cty.Value, len(s)) + var diags hcl.Diagnostics + + for k, spec := range s { + var kd hcl.Diagnostics + vals[k], kd = spec.decode(content, blockLabels, ctx) + diags = append(diags, kd...) + } + + return cty.ObjectVal(vals), diags +} + +func (s ObjectSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyObject + } + + attrTypes := make(map[string]cty.Type) + for k, childSpec := range s { + attrTypes[k] = childSpec.impliedType() + } + return cty.Object(attrTypes) +} + +func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose +// elements correspond to the elements of the spec slice. +type TupleSpec []Spec + +func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make([]cty.Value, len(s)) + var diags hcl.Diagnostics + + for i, spec := range s { + var ed hcl.Diagnostics + vals[i], ed = spec.decode(content, blockLabels, ctx) + diags = append(diags, ed...) + } + + return cty.TupleVal(vals), diags +} + +func (s TupleSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyTuple + } + + attrTypes := make([]cty.Type, len(s)) + for i, childSpec := range s { + attrTypes[i] = childSpec.impliedType() + } + return cty.Tuple(attrTypes) +} + +func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// An AttrSpec is a Spec that evaluates a particular attribute expression in +// the body and returns its resulting value converted to the requested type, +// or produces a diagnostic if the type is incorrect. +type AttrSpec struct { + Name string + Type cty.Type + Required bool +} + +func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + attr, exists := content.Attributes[s.Name] + if !exists { + return nil + } + + return attr.Expr.Variables() +} + +// attrSpec implementation +func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { + return []hcl.AttributeSchema{ + { + Name: s.Name, + Required: s.Required, + }, + } +} + +func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + attr, exists := content.Attributes[s.Name] + if !exists { + return content.MissingItemRange + } + + return attr.Expr.Range() +} + +func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + attr, exists := content.Attributes[s.Name] + if !exists { + // We don't need to check required and emit a diagnostic here, because + // that would already have happened when building "content". + return cty.NullVal(s.Type), nil + } + + val, diags := attr.Expr.Value(ctx) + + convVal, err := convert.Convert(val, s.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect attribute value type", + Detail: fmt.Sprintf( + "Inappropriate value for attribute %q: %s.", + s.Name, err.Error(), + ), + Subject: attr.Expr.StartRange().Ptr(), + Context: hcl.RangeBetween(attr.NameRange, attr.Expr.StartRange()).Ptr(), + }) + // We'll return an unknown value of the _correct_ type so that the + // incomplete result can still be used for some analysis use-cases. + val = cty.UnknownVal(s.Type) + } else { + val = convVal + } + + return val, diags +} + +func (s *AttrSpec) impliedType() cty.Type { + return s.Type +} + +// A LiteralSpec is a Spec that produces the given literal value, ignoring +// the given body. +type LiteralSpec struct { + Value cty.Value +} + +func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Value, nil +} + +func (s *LiteralSpec) impliedType() cty.Type { + return s.Value.Type() +} + +func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No sensible range to return for a literal, so the caller had better + // ensure it doesn't cause any diagnostics. + return hcl.Range{ + Filename: "", + } +} + +// An ExprSpec is a Spec that evaluates the given expression, ignoring the +// given body. +type ExprSpec struct { + Expr hcl.Expression +} + +func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + return s.Expr.Variables() +} + +func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Expr.Value(ctx) +} + +func (s *ExprSpec) impliedType() cty.Type { + // We can't know the type of our expression until we evaluate it + return cty.DynamicPseudoType +} + +func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + return s.Expr.Range() +} + +// A BlockSpec is a Spec that produces a cty.Value by decoding the contents +// of a single nested block of a given type, using a nested spec. +// +// If the Required flag is not set, the nested block may be omitted, in which +// case a null value is produced. If it _is_ set, an error diagnostic is +// produced if there are no nested blocks of the given type. +type BlockSpec struct { + TypeName string + Nested Spec + Required bool +} + +func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return nil + } + + return Variables(childBlock.Body, s.Nested) +} + +func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + if childBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, childBlock.DefRange.String(), + ), + Subject: &candidate.DefRange, + }) + break + } + + childBlock = candidate + } + + if childBlock == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(s.Nested.impliedType()), diags + } + + if s.Nested == nil { + panic("BlockSpec with no Nested Spec") + } + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + return val, diags +} + +func (s *BlockSpec) impliedType() cty.Type { + return s.Nested.impliedType() +} + +func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockListSpec is a Spec that produces a cty list of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockListSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockListSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.ListValEmpty(s.Nested.impliedType()) + } else { + // Since our target is a list, all of the decoded elements must have the + // same type or cty.ListVal will panic below. Different types can arise + // if there is an attribute spec of type cty.DynamicPseudoType in the + // nested spec; all given values must be convertable to a single type + // in order for the result to be considered valid. + etys := make([]cty.Type, len(elems)) + for i, v := range elems { + etys[i] = v.Type() + } + ety, convs := convert.UnifyUnsafe(etys) + if ety == cty.NilType { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: "Corresponding attributes in all blocks of this type must be the same.", + Subject: &sourceRanges[0], + }) + return cty.DynamicVal, diags + } + for i, v := range elems { + if convs[i] != nil { + newV, err := convs[i](v) + if err != nil { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), + Subject: &sourceRanges[i], + }) + // Bail early here so we won't panic below in cty.ListVal + return cty.DynamicVal, diags + } + elems[i] = newV + } + } + + ret = cty.ListVal(elems) + } + + return ret, diags +} + +func (s *BlockListSpec) impliedType() cty.Type { + return cty.List(s.Nested.impliedType()) +} + +func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockTupleSpec is a Spec that produces a cty tuple of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// This is similar to BlockListSpec, but it permits the nested blocks to have +// different result types in situations where cty.DynamicPseudoType attributes +// are present. +type BlockTupleSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockTupleSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.EmptyTupleVal + } else { + ret = cty.TupleVal(elems) + } + + return ret, diags +} + +func (s *BlockTupleSpec) impliedType() cty.Type { + // We can't predict our type, because we don't know how many blocks + // there will be until we decode. + return cty.DynamicPseudoType +} + +func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockSetSpec is a Spec that produces a cty set of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockSetSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSetSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockSetSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.SetValEmpty(s.Nested.impliedType()) + } else { + // Since our target is a set, all of the decoded elements must have the + // same type or cty.SetVal will panic below. Different types can arise + // if there is an attribute spec of type cty.DynamicPseudoType in the + // nested spec; all given values must be convertable to a single type + // in order for the result to be considered valid. + etys := make([]cty.Type, len(elems)) + for i, v := range elems { + etys[i] = v.Type() + } + ety, convs := convert.UnifyUnsafe(etys) + if ety == cty.NilType { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: "Corresponding attributes in all blocks of this type must be the same.", + Subject: &sourceRanges[0], + }) + return cty.DynamicVal, diags + } + for i, v := range elems { + if convs[i] != nil { + newV, err := convs[i](v) + if err != nil { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), + Subject: &sourceRanges[i], + }) + // Bail early here so we won't panic below in cty.ListVal + return cty.DynamicVal, diags + } + elems[i] = newV + } + } + + ret = cty.SetVal(elems) + } + + return ret, diags +} + +func (s *BlockSetSpec) impliedType() cty.Type { + return cty.Set(s.Nested.impliedType()) +} + +func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockMapSpec is a Spec that produces a cty map of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of map structure is created for each of the given label names. +// There must be at least one given label name. +type BlockMapSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// blockSpec implementation +func (s *BlockMapSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockMapSpec with no Nested Spec") + } + if ImpliedType(s).HasDynamicTypes() { + panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(s.Nested.impliedType()), diags + } + + var ctyMap func(map[string]interface{}, int) cty.Value + ctyMap = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyMap(v.(map[string]interface{}), depth-1) + } + } + return cty.MapVal(vals) + } + + return ctyMap(elems, len(s.LabelNames)), diags +} + +func (s *BlockMapSpec) impliedType() cty.Type { + ret := s.Nested.impliedType() + for _ = range s.LabelNames { + ret = cty.Map(ret) + } + return ret +} + +func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockObjectSpec is a Spec that produces a cty object of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of object structure is created for each of the given label names. +// There must be at least one given label name. +// +// This is similar to BlockMapSpec, but it permits the nested blocks to have +// different result types in situations where cty.DynamicPseudoType attributes +// are present. +type BlockObjectSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// blockSpec implementation +func (s *BlockObjectSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockObjectSpec with no Nested Spec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.EmptyObjectVal, diags + } + + var ctyObj func(map[string]interface{}, int) cty.Value + ctyObj = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyObj(v.(map[string]interface{}), depth-1) + } + } + return cty.ObjectVal(vals) + } + + return ctyObj(elems, len(s.LabelNames)), diags +} + +func (s *BlockObjectSpec) impliedType() cty.Type { + // We can't predict our type, since we don't know how many blocks are + // present and what labels they have until we decode. + return cty.DynamicPseudoType +} + +func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockAttrsSpec is a Spec that interprets a single block as if it were +// a map of some element type. That is, each attribute within the block +// becomes a key in the resulting map and the attribute's value becomes the +// element value, after conversion to the given element type. The resulting +// value is a cty.Map of the given element type. +// +// This spec imposes a validation constraint that there be exactly one block +// of the given type name and that this block may contain only attributes. The +// block does not accept any labels. +// +// This is an alternative to an AttrSpec of a map type for situations where +// block syntax is desired. Note that block syntax does not permit dynamic +// keys, construction of the result via a "for" expression, etc. In most cases +// an AttrSpec is preferred if the desired result is a map whose keys are +// chosen by the user rather than by schema. +type BlockAttrsSpec struct { + TypeName string + ElementType cty.Type + Required bool +} + +func (s *BlockAttrsSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// blockSpec implementation +func (s *BlockAttrsSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: nil, + }, + } +} + +// blockSpec implementation +func (s *BlockAttrsSpec) nestedSpec() Spec { + // This is an odd case: we aren't actually going to apply a nested spec + // in this case, since we're going to interpret the body directly as + // attributes, but we need to return something non-nil so that the + // decoder will recognize this as a block spec. We won't actually be + // using this for anything at decode time. + return noopSpec{} +} + +// specNeedingVariables implementation +func (s *BlockAttrsSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + + block, _ := s.findBlock(content) + if block == nil { + return nil + } + + var vars []hcl.Traversal + + attrs, diags := block.Body.JustAttributes() + if diags.HasErrors() { + return nil + } + + for _, attr := range attrs { + vars = append(vars, attr.Expr.Variables()...) + } + + // We'll return the variables references in source order so that any + // error messages that result are also in source order. + sort.Slice(vars, func(i, j int) bool { + return vars[i].SourceRange().Start.Byte < vars[j].SourceRange().Start.Byte + }) + + return vars +} + +func (s *BlockAttrsSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + block, other := s.findBlock(content) + if block == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(cty.Map(s.ElementType)), diags + } + if other != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, block.DefRange.String(), + ), + Subject: &other.DefRange, + }) + } + + attrs, attrDiags := block.Body.JustAttributes() + diags = append(diags, attrDiags...) + + if len(attrs) == 0 { + return cty.MapValEmpty(s.ElementType), diags + } + + vals := make(map[string]cty.Value, len(attrs)) + for name, attr := range attrs { + attrVal, attrDiags := attr.Expr.Value(ctx) + diags = append(diags, attrDiags...) + + attrVal, err := convert.Convert(attrVal, s.ElementType) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute value", + Detail: fmt.Sprintf("Invalid value for attribute of %q block: %s.", s.TypeName, err), + Subject: attr.Expr.Range().Ptr(), + }) + attrVal = cty.UnknownVal(s.ElementType) + } + + vals[name] = attrVal + } + + return cty.MapVal(vals), diags +} + +func (s *BlockAttrsSpec) impliedType() cty.Type { + return cty.Map(s.ElementType) +} + +func (s *BlockAttrsSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + block, _ := s.findBlock(content) + if block == nil { + return content.MissingItemRange + } + return block.DefRange +} + +func (s *BlockAttrsSpec) findBlock(content *hcl.BodyContent) (block *hcl.Block, other *hcl.Block) { + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + if block != nil { + return block, candidate + } + block = candidate + } + + return block, nil +} + +// A BlockLabelSpec is a Spec that returns a cty.String representing the +// label of the block its given body belongs to, if indeed its given body +// belongs to a block. It is a programming error to use this in a non-block +// context, so this spec will panic in that case. +// +// This spec only works in the nested spec within a BlockSpec, BlockListSpec, +// BlockSetSpec or BlockMapSpec. +// +// The full set of label specs used against a particular block must have a +// consecutive set of indices starting at zero. The maximum index found +// defines how many labels the corresponding blocks must have in cty source. +type BlockLabelSpec struct { + Index int + Name string +} + +func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return cty.StringVal(blockLabels[s.Index].Value), nil +} + +func (s *BlockLabelSpec) impliedType() cty.Type { + return cty.String // labels are always strings +} + +func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return blockLabels[s.Index].Range +} + +func findLabelSpecs(spec Spec) []string { + maxIdx := -1 + var names map[int]string + + var visit visitFunc + visit = func(s Spec) { + if ls, ok := s.(*BlockLabelSpec); ok { + if maxIdx < ls.Index { + maxIdx = ls.Index + } + if names == nil { + names = make(map[int]string) + } + names[ls.Index] = ls.Name + } + s.visitSameBodyChildren(visit) + } + + visit(spec) + + if maxIdx < 0 { + return nil // no labels at all + } + + ret := make([]string, maxIdx+1) + for i := range ret { + name := names[i] + if name == "" { + // Should never happen if the spec is conformant, since we require + // consecutive indices starting at zero. + name = fmt.Sprintf("missing%02d", i) + } + ret[i] = name + } + + return ret +} + +// DefaultSpec is a spec that wraps two specs, evaluating the primary first +// and then evaluating the default if the primary returns a null value. +// +// The two specifications must have the same implied result type for correct +// operation. If not, the result is undefined. +// +// Any requirements imposed by the "Default" spec apply even if "Primary" does +// not return null. For example, if the "Default" spec is for a required +// attribute then that attribute is always required, regardless of the result +// of the "Primary" spec. +// +// The "Default" spec must not describe a nested block, since otherwise the +// result of ChildBlockTypes would not be decidable without evaluation. If +// the default spec _does_ describe a nested block then the result is +// undefined. +type DefaultSpec struct { + Primary Spec + Default Spec +} + +func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Primary) + cb(s.Default) +} + +func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, diags := s.Primary.decode(content, blockLabels, ctx) + if val.IsNull() { + var moreDiags hcl.Diagnostics + val, moreDiags = s.Default.decode(content, blockLabels, ctx) + diags = append(diags, moreDiags...) + } + return val, diags +} + +func (s *DefaultSpec) impliedType() cty.Type { + return s.Primary.impliedType() +} + +// attrSpec implementation +func (s *DefaultSpec) attrSchemata() []hcl.AttributeSchema { + // We must pass through the union of both of our nested specs so that + // we'll have both values available in the result. + var ret []hcl.AttributeSchema + if as, ok := s.Primary.(attrSpec); ok { + ret = append(ret, as.attrSchemata()...) + } + if as, ok := s.Default.(attrSpec); ok { + ret = append(ret, as.attrSchemata()...) + } + return ret +} + +// blockSpec implementation +func (s *DefaultSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + // Only the primary spec may describe a block, since otherwise + // our nestedSpec method below can't know which to return. + if bs, ok := s.Primary.(blockSpec); ok { + return bs.blockHeaderSchemata() + } + return nil +} + +// blockSpec implementation +func (s *DefaultSpec) nestedSpec() Spec { + if bs, ok := s.Primary.(blockSpec); ok { + return bs.nestedSpec() + } + return nil +} + +func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We can't tell from here which of the two specs will ultimately be used + // in our result, so we'll just assume the first. This is usually the right + // choice because the default is often a literal spec that doesn't have a + // reasonable source range to return anyway. + return s.Primary.sourceRange(content, blockLabels) +} + +// TransformExprSpec is a spec that wraps another and then evaluates a given +// hcl.Expression on the result. +// +// The implied type of this spec is determined by evaluating the expression +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +type TransformExprSpec struct { + Wrapped Spec + Expr hcl.Expression + TransformCtx *hcl.EvalContext + VarName string +} + +func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: wrappedVal, + } + resultVal, resultDiags := s.Expr.Value(chiCtx) + diags = append(diags, resultDiags...) + return resultVal, diags +} + +func (s *TransformExprSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: cty.UnknownVal(wrappedTy), + } + resultVal, _ := s.Expr.Value(chiCtx) + return resultVal.Type() +} + +func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} + +// TransformFuncSpec is a spec that wraps another and then evaluates a given +// cty function with the result. The given function must expect exactly one +// argument, where the result of the wrapped spec will be passed. +// +// The implied type of this spec is determined by type-checking the function +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +// +// If the given function produces an error when run, this spec will produce +// a non-user-actionable diagnostic message. It's the caller's responsibility +// to ensure that the given function cannot fail for any non-error result +// of the wrapped spec. +type TransformFuncSpec struct { + Wrapped Spec + Func function.Function +} + +func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + resultVal, err := s.Func.Call([]cty.Value{wrappedVal}) + if err != nil { + // This is not a good example of a diagnostic because it is reporting + // a programming error in the calling application, rather than something + // an end-user could act on. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Transform function failed", + Detail: fmt.Sprintf("Decoder transform returned an error: %s", err), + Subject: s.sourceRange(content, blockLabels).Ptr(), + }) + return cty.UnknownVal(s.impliedType()), diags + } + + return resultVal, diags +} + +func (s *TransformFuncSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy}) + if err != nil { + // Should never happen with a correctly-configured spec + return cty.DynamicPseudoType + } + + return resultTy +} + +func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} + +// noopSpec is a placeholder spec that does nothing, used in situations where +// a non-nil placeholder spec is required. It is not exported because there is +// no reason to use it directly; it is always an implementation detail only. +type noopSpec struct { +} + +func (s noopSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return cty.NullVal(cty.DynamicPseudoType), nil +} + +func (s noopSpec) impliedType() cty.Type { + return cty.DynamicPseudoType +} + +func (s noopSpec) visitSameBodyChildren(cb visitFunc) { + // nothing to do +} + +func (s noopSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No useful range for a noopSpec, and nobody should be calling this anyway. + return hcl.Range{ + Filename: "noopSpec", + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/variables.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/variables.go new file mode 100644 index 00000000000..7662516cada --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcldec/variables.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Variables processes the given body with the given spec and returns a +// list of the variable traversals that would be required to decode +// the same pairing of body and spec. +// +// This can be used to conditionally populate the variables in the EvalContext +// passed to Decode, for applications where a static scope is insufficient. +// +// If the given body is not compliant with the given schema, the result may +// be incomplete, but that's assumed to be okay because the eventual call +// to Decode will produce error diagnostics anyway. +func Variables(body hcl.Body, spec Spec) []hcl.Traversal { + var vars []hcl.Traversal + schema := ImpliedSchema(spec) + content, _, _ := body.PartialContent(schema) + + if vs, ok := spec.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + + var visitFn visitFunc + visitFn = func(s Spec) { + if vs, ok := s.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + s.visitSameBodyChildren(visitFn) + } + spec.visitSameBodyChildren(visitFn) + + return vars +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcled/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcled/doc.go new file mode 100644 index 00000000000..1a8014480cf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcled/doc.go @@ -0,0 +1,4 @@ +// Package hcled provides functionality intended to help an application +// that embeds HCL to deliver relevant information to a text editor or IDE +// for navigating around and analyzing configuration files. +package hcled diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcled/navigation.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcled/navigation.go new file mode 100644 index 00000000000..5d10cd86cea --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hcled/navigation.go @@ -0,0 +1,34 @@ +package hcled + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type contextStringer interface { + ContextString(offset int) string +} + +// ContextString returns a string describing the context of the given byte +// offset, if available. An empty string is returned if no such information +// is available, or otherwise the returned string is in a form that depends +// on the language used to write the referenced file. +func ContextString(file *hcl.File, offset int) string { + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} + +type contextDefRanger interface { + ContextDefRange(offset int) hcl.Range +} + +func ContextDefRange(file *hcl.File, offset int) hcl.Range { + if cser, ok := file.Nav.(contextDefRanger); ok { + defRange := cser.ContextDefRange(offset) + if !defRange.Empty() { + return defRange + } + } + return file.Body.MissingItemRange() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclparse/parser.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclparse/parser.go new file mode 100644 index 00000000000..6d47f1268f4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclparse/parser.go @@ -0,0 +1,123 @@ +package hclparse + +import ( + "fmt" + "io/ioutil" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/hcl2/hcl/json" +) + +// NOTE: This is the public interface for parsing. The actual parsers are +// in other packages alongside this one, with this package just wrapping them +// to provide a unified interface for the caller across all supported formats. + +// Parser is the main interface for parsing configuration files. As well as +// parsing files, a parser also retains a registry of all of the files it +// has parsed so that multiple attempts to parse the same file will return +// the same object and so the collected files can be used when printing +// diagnostics. +// +// Any diagnostics for parsing a file are only returned once on the first +// call to parse that file. Callers are expected to collect up diagnostics +// and present them together, so returning diagnostics for the same file +// multiple times would create a confusing result. +type Parser struct { + files map[string]*hcl.File +} + +// NewParser creates a new parser, ready to parse configuration files. +func NewParser() *Parser { + return &Parser{ + files: map[string]*hcl.File{}, + } +} + +// ParseHCL parses the given buffer (which is assumed to have been loaded from +// the given filename) as a native-syntax configuration file and returns the +// hcl.File object representing it. +func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) + p.files[filename] = file + return file, diags +} + +// ParseHCLFile reads the given filename and parses it as a native-syntax HCL +// configuration file. An error diagnostic is returned if the given file +// cannot be read. +func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + src, err := ioutil.ReadFile(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), + }, + } + } + + return p.ParseHCL(src, filename) +} + +// ParseJSON parses the given JSON buffer (which is assumed to have been loaded +// from the given filename) and returns the hcl.File object representing it. +func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.Parse(src, filename) + p.files[filename] = file + return file, diags +} + +// ParseJSONFile reads the given filename and parses it as JSON, similarly to +// ParseJSON. An error diagnostic is returned if the given file cannot be read. +func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.ParseFile(filename) + p.files[filename] = file + return file, diags +} + +// AddFile allows a caller to record in a parser a file that was parsed some +// other way, thus allowing it to be included in the registry of sources. +func (p *Parser) AddFile(filename string, file *hcl.File) { + p.files[filename] = file +} + +// Sources returns a map from filenames to the raw source code that was +// read from them. This is intended to be used, for example, to print +// diagnostics with contextual information. +// +// The arrays underlying the returned slices should not be modified. +func (p *Parser) Sources() map[string][]byte { + ret := make(map[string][]byte) + for fn, f := range p.files { + ret[fn] = f.Bytes + } + return ret +} + +// Files returns a map from filenames to the File objects produced from them. +// This is intended to be used, for example, to print diagnostics with +// contextual information. +// +// The returned map and all of the objects it refers to directly or indirectly +// must not be modified. +func (p *Parser) Files() map[string]*hcl.File { + return p.files +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go new file mode 100644 index 00000000000..090416528da --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go @@ -0,0 +1,121 @@ +package hclwrite + +import ( + "bytes" + "io" +) + +type File struct { + inTree + + srcBytes []byte + body *node +} + +// NewEmptyFile constructs a new file with no content, ready to be mutated +// by other calls that append to its body. +func NewEmptyFile() *File { + f := &File{ + inTree: newInTree(), + } + body := newBody() + f.body = f.children.Append(body) + return f +} + +// Body returns the root body of the file, which contains the top-level +// attributes and blocks. +func (f *File) Body() *Body { + return f.body.content.(*Body) +} + +// WriteTo writes the tokens underlying the receiving file to the given writer. +// +// The tokens first have a simple formatting pass applied that adjusts only +// the spaces between them. +func (f *File) WriteTo(wr io.Writer) (int64, error) { + tokens := f.inTree.children.BuildTokens(nil) + format(tokens) + return tokens.WriteTo(wr) +} + +// Bytes returns a buffer containing the source code resulting from the +// tokens underlying the receiving file. If any updates have been made via +// the AST API, these will be reflected in the result. +func (f *File) Bytes() []byte { + buf := &bytes.Buffer{} + f.WriteTo(buf) + return buf.Bytes() +} + +type comments struct { + leafNode + + parent *node + tokens Tokens +} + +func newComments(tokens Tokens) *comments { + return &comments{ + tokens: tokens, + } +} + +func (c *comments) BuildTokens(to Tokens) Tokens { + return c.tokens.BuildTokens(to) +} + +type identifier struct { + leafNode + + parent *node + token *Token +} + +func newIdentifier(token *Token) *identifier { + return &identifier{ + token: token, + } +} + +func (i *identifier) BuildTokens(to Tokens) Tokens { + return append(to, i.token) +} + +func (i *identifier) hasName(name string) bool { + return name == string(i.token.Bytes) +} + +type number struct { + leafNode + + parent *node + token *Token +} + +func newNumber(token *Token) *number { + return &number{ + token: token, + } +} + +func (n *number) BuildTokens(to Tokens) Tokens { + return append(to, n.token) +} + +type quoted struct { + leafNode + + parent *node + tokens Tokens +} + +func newQuoted(tokens Tokens) *quoted { + return "ed{ + tokens: tokens, + } +} + +func (q *quoted) BuildTokens(to Tokens) Tokens { + return q.tokens.BuildTokens(to) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go new file mode 100644 index 00000000000..975fa74280d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go @@ -0,0 +1,48 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +type Attribute struct { + inTree + + leadComments *node + name *node + expr *node + lineComments *node +} + +func newAttribute() *Attribute { + return &Attribute{ + inTree: newInTree(), + } +} + +func (a *Attribute) init(name string, expr *Expression) { + expr.assertUnattached() + + nameTok := newIdentToken(name) + nameObj := newIdentifier(nameTok) + a.leadComments = a.children.Append(newComments(nil)) + a.name = a.children.Append(nameObj) + a.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenEqual, + Bytes: []byte{'='}, + }, + }) + a.expr = a.children.Append(expr) + a.expr.list = a.children + a.lineComments = a.children.Append(newComments(nil)) + a.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) +} + +func (a *Attribute) Expr() *Expression { + return a.expr.content.(*Expression) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go new file mode 100644 index 00000000000..d5fd32bd51b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go @@ -0,0 +1,74 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +type Block struct { + inTree + + leadComments *node + typeName *node + labels nodeSet + open *node + body *node + close *node +} + +func newBlock() *Block { + return &Block{ + inTree: newInTree(), + labels: newNodeSet(), + } +} + +// NewBlock constructs a new, empty block with the given type name and labels. +func NewBlock(typeName string, labels []string) *Block { + block := newBlock() + block.init(typeName, labels) + return block +} + +func (b *Block) init(typeName string, labels []string) { + nameTok := newIdentToken(typeName) + nameObj := newIdentifier(nameTok) + b.leadComments = b.children.Append(newComments(nil)) + b.typeName = b.children.Append(nameObj) + for _, label := range labels { + labelToks := TokensForValue(cty.StringVal(label)) + labelObj := newQuoted(labelToks) + labelNode := b.children.Append(labelObj) + b.labels.Add(labelNode) + } + b.open = b.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenOBrace, + Bytes: []byte{'{'}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) + body := newBody() // initially totally empty; caller can append to it subsequently + b.body = b.children.Append(body) + b.close = b.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenCBrace, + Bytes: []byte{'}'}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) +} + +// Body returns the body that represents the content of the receiving block. +// +// Appending to or otherwise modifying this body will make changes to the +// tokens that are generated between the blocks open and close braces. +func (b *Block) Body() *Body { + return b.body.content.(*Body) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go new file mode 100644 index 00000000000..cf69fee215b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go @@ -0,0 +1,153 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +type Body struct { + inTree + + items nodeSet +} + +func newBody() *Body { + return &Body{ + inTree: newInTree(), + items: newNodeSet(), + } +} + +func (b *Body) appendItem(c nodeContent) *node { + nn := b.children.Append(c) + b.items.Add(nn) + return nn +} + +func (b *Body) appendItemNode(nn *node) *node { + nn.assertUnattached() + b.children.AppendNode(nn) + b.items.Add(nn) + return nn +} + +// Clear removes all of the items from the body, making it empty. +func (b *Body) Clear() { + b.children.Clear() +} + +func (b *Body) AppendUnstructuredTokens(ts Tokens) { + b.inTree.children.Append(ts) +} + +// Attributes returns a new map of all of the attributes in the body, with +// the attribute names as the keys. +func (b *Body) Attributes() map[string]*Attribute { + ret := make(map[string]*Attribute) + for n := range b.items { + if attr, isAttr := n.content.(*Attribute); isAttr { + nameObj := attr.name.content.(*identifier) + name := string(nameObj.token.Bytes) + ret[name] = attr + } + } + return ret +} + +// Blocks returns a new slice of all the blocks in the body. +func (b *Body) Blocks() []*Block { + ret := make([]*Block, 0, len(b.items)) + for n := range b.items { + if block, isBlock := n.content.(*Block); isBlock { + ret = append(ret, block) + } + } + return ret +} + +// GetAttribute returns the attribute from the body that has the given name, +// or returns nil if there is currently no matching attribute. +func (b *Body) GetAttribute(name string) *Attribute { + for n := range b.items { + if attr, isAttr := n.content.(*Attribute); isAttr { + nameObj := attr.name.content.(*identifier) + if nameObj.hasName(name) { + // We've found it! + return attr + } + } + } + + return nil +} + +// SetAttributeValue either replaces the expression of an existing attribute +// of the given name or adds a new attribute definition to the end of the block. +// +// The value is given as a cty.Value, and must therefore be a literal. To set +// a variable reference or other traversal, use SetAttributeTraversal. +// +// The return value is the attribute that was either modified in-place or +// created. +func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute { + attr := b.GetAttribute(name) + expr := NewExpressionLiteral(val) + if attr != nil { + attr.expr = attr.expr.ReplaceWith(expr) + } else { + attr := newAttribute() + attr.init(name, expr) + b.appendItem(attr) + } + return attr +} + +// SetAttributeTraversal either replaces the expression of an existing attribute +// of the given name or adds a new attribute definition to the end of the body. +// +// The new expression is given as a hcl.Traversal, which must be an absolute +// traversal. To set a literal value, use SetAttributeValue. +// +// The return value is the attribute that was either modified in-place or +// created. +func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute { + attr := b.GetAttribute(name) + expr := NewExpressionAbsTraversal(traversal) + if attr != nil { + attr.expr = attr.expr.ReplaceWith(expr) + } else { + attr := newAttribute() + attr.init(name, expr) + b.appendItem(attr) + } + return attr +} + +// AppendBlock appends an existing block (which must not be already attached +// to a body) to the end of the receiving body. +func (b *Body) AppendBlock(block *Block) *Block { + b.appendItem(block) + return block +} + +// AppendNewBlock appends a new nested block to the end of the receiving body +// with the given type name and labels. +func (b *Body) AppendNewBlock(typeName string, labels []string) *Block { + block := newBlock() + block.init(typeName, labels) + b.appendItem(block) + return block +} + +// AppendNewline appends a newline token to th end of the receiving body, +// which generally serves as a separator between different sets of body +// contents. +func (b *Body) AppendNewline() { + b.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go new file mode 100644 index 00000000000..62d89fbefc2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go @@ -0,0 +1,201 @@ +package hclwrite + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +type Expression struct { + inTree + + absTraversals nodeSet +} + +func newExpression() *Expression { + return &Expression{ + inTree: newInTree(), + absTraversals: newNodeSet(), + } +} + +// NewExpressionLiteral constructs an an expression that represents the given +// literal value. +// +// Since an unknown value cannot be represented in source code, this function +// will panic if the given value is unknown or contains a nested unknown value. +// Use val.IsWhollyKnown before calling to be sure. +// +// HCL native syntax does not directly represent lists, maps, and sets, and +// instead relies on the automatic conversions to those collection types from +// either list or tuple constructor syntax. Therefore converting collection +// values to source code and re-reading them will lose type information, and +// the reader must provide a suitable type at decode time to recover the +// original value. +func NewExpressionLiteral(val cty.Value) *Expression { + toks := TokensForValue(val) + expr := newExpression() + expr.children.AppendUnstructuredTokens(toks) + return expr +} + +// NewExpressionAbsTraversal constructs an expression that represents the +// given traversal, which must be absolute or this function will panic. +func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression { + if traversal.IsRelative() { + panic("can't construct expression from relative traversal") + } + + physT := newTraversal() + rootName := traversal.RootName() + steps := traversal[1:] + + { + tn := newTraverseName() + tn.name = tn.children.Append(newIdentifier(&Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(rootName), + })) + physT.steps.Add(physT.children.Append(tn)) + } + + for _, step := range steps { + switch ts := step.(type) { + case hcl.TraverseAttr: + tn := newTraverseName() + tn.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenDot, + Bytes: []byte{'.'}, + }, + }) + tn.name = tn.children.Append(newIdentifier(&Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + })) + physT.steps.Add(physT.children.Append(tn)) + case hcl.TraverseIndex: + ti := newTraverseIndex() + ti.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }, + }) + indexExpr := NewExpressionLiteral(ts.Key) + ti.key = ti.children.Append(indexExpr) + ti.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }, + }) + physT.steps.Add(physT.children.Append(ti)) + } + } + + expr := newExpression() + expr.absTraversals.Add(expr.children.Append(physT)) + return expr +} + +// Variables returns the absolute traversals that exist within the receiving +// expression. +func (e *Expression) Variables() []*Traversal { + nodes := e.absTraversals.List() + ret := make([]*Traversal, len(nodes)) + for i, node := range nodes { + ret[i] = node.content.(*Traversal) + } + return ret +} + +// RenameVariablePrefix examines each of the absolute traversals in the +// receiving expression to see if they have the given sequence of names as +// a prefix prefix. If so, they are updated in place to have the given +// replacement names instead of that prefix. +// +// This can be used to implement symbol renaming. The calling application can +// visit all relevant expressions in its input and apply the same renaming +// to implement a global symbol rename. +// +// The search and replacement traversals must be the same length, or this +// method will panic. Only attribute access operations can be matched and +// replaced. Index steps never match the prefix. +func (e *Expression) RenameVariablePrefix(search, replacement []string) { + if len(search) != len(replacement) { + panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement))) + } +Traversals: + for node := range e.absTraversals { + traversal := node.content.(*Traversal) + if len(traversal.steps) < len(search) { + // If it's shorter then it can't have our prefix + continue + } + + stepNodes := traversal.steps.List() + for i, name := range search { + step, isName := stepNodes[i].content.(*TraverseName) + if !isName { + continue Traversals // only name nodes can match + } + foundNameBytes := step.name.content.(*identifier).token.Bytes + if len(foundNameBytes) != len(name) { + continue Traversals + } + if string(foundNameBytes) != name { + continue Traversals + } + } + + // If we get here then the prefix matched, so now we'll swap in + // the replacement strings. + for i, name := range replacement { + step := stepNodes[i].content.(*TraverseName) + token := step.name.content.(*identifier).token + token.Bytes = []byte(name) + } + } +} + +// Traversal represents a sequence of variable, attribute, and/or index +// operations. +type Traversal struct { + inTree + + steps nodeSet +} + +func newTraversal() *Traversal { + return &Traversal{ + inTree: newInTree(), + steps: newNodeSet(), + } +} + +type TraverseName struct { + inTree + + name *node +} + +func newTraverseName() *TraverseName { + return &TraverseName{ + inTree: newInTree(), + } +} + +type TraverseIndex struct { + inTree + + key *node +} + +func newTraverseIndex() *TraverseIndex { + return &TraverseIndex{ + inTree: newInTree(), + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go new file mode 100644 index 00000000000..56d5b77526d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go @@ -0,0 +1,11 @@ +// Package hclwrite deals with the problem of generating HCL configuration +// and of making specific surgical changes to existing HCL configurations. +// +// It operates at a different level of abstraction than the main HCL parser +// and AST, since details such as the placement of comments and newlines +// are preserved when unchanged. +// +// The hclwrite API follows a similar principle to XML/HTML DOM, allowing nodes +// to be read out, created and inserted, etc. Nodes represent syntax constructs +// rather than semantic concepts. +package hclwrite diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/format.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/format.go new file mode 100644 index 00000000000..7111ebde205 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/format.go @@ -0,0 +1,463 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +var inKeyword = hclsyntax.Keyword([]byte{'i', 'n'}) + +// placeholder token used when we don't have a token but we don't want +// to pass a real "nil" and complicate things with nil pointer checks +var nilToken = &Token{ + Type: hclsyntax.TokenNil, + Bytes: []byte{}, + SpacesBefore: 0, +} + +// format rewrites tokens within the given sequence, in-place, to adjust the +// whitespace around their content to achieve canonical formatting. +func format(tokens Tokens) { + // Formatting is a multi-pass process. More details on the passes below, + // but this is the overview: + // - adjust the leading space on each line to create appropriate + // indentation + // - adjust spaces between tokens in a single cell using a set of rules + // - adjust the leading space in the "assign" and "comment" cells on each + // line to vertically align with neighboring lines. + // All of these steps operate in-place on the given tokens, so a caller + // may collect a flat sequence of all of the tokens underlying an AST + // and pass it here and we will then indirectly modify the AST itself. + // Formatting must change only whitespace. Specifically, that means + // changing the SpacesBefore attribute on a token while leaving the + // other token attributes unchanged. + + lines := linesForFormat(tokens) + formatIndent(lines) + formatSpaces(lines) + formatCells(lines) +} + +func formatIndent(lines []formatLine) { + // Our methodology for indents is to take the input one line at a time + // and count the bracketing delimiters on each line. If a line has a net + // increase in open brackets, we increase the indent level by one and + // remember how many new openers we had. If the line has a net _decrease_, + // we'll compare it to the most recent number of openers and decrease the + // dedent level by one each time we pass an indent level remembered + // earlier. + // The "indent stack" used here allows for us to recognize degenerate + // input where brackets are not symmetrical within lines and avoid + // pushing things too far left or right, creating confusion. + + // We'll start our indent stack at a reasonable capacity to minimize the + // chance of us needing to grow it; 10 here means 10 levels of indent, + // which should be more than enough for reasonable HCL uses. + indents := make([]int, 0, 10) + + for i := range lines { + line := &lines[i] + if len(line.lead) == 0 { + continue + } + + if line.lead[0].Type == hclsyntax.TokenNewline { + // Never place spaces before a newline + line.lead[0].SpacesBefore = 0 + continue + } + + netBrackets := 0 + for _, token := range line.lead { + netBrackets += tokenBracketChange(token) + if token.Type == hclsyntax.TokenOHeredoc { + break + } + } + + for _, token := range line.assign { + netBrackets += tokenBracketChange(token) + } + + switch { + case netBrackets > 0: + line.lead[0].SpacesBefore = 2 * len(indents) + indents = append(indents, netBrackets) + case netBrackets < 0: + closed := -netBrackets + for closed > 0 && len(indents) > 0 { + switch { + + case closed > indents[len(indents)-1]: + closed -= indents[len(indents)-1] + indents = indents[:len(indents)-1] + + case closed < indents[len(indents)-1]: + indents[len(indents)-1] -= closed + closed = 0 + + default: + indents = indents[:len(indents)-1] + closed = 0 + } + } + line.lead[0].SpacesBefore = 2 * len(indents) + default: + line.lead[0].SpacesBefore = 2 * len(indents) + } + } +} + +func formatSpaces(lines []formatLine) { + for _, line := range lines { + for i, token := range line.lead { + var before, after *Token + if i > 0 { + before = line.lead[i-1] + } else { + before = nilToken + } + if i < (len(line.lead) - 1) { + after = line.lead[i+1] + } else { + after = nilToken + } + if spaceAfterToken(token, before, after) { + after.SpacesBefore = 1 + } else { + after.SpacesBefore = 0 + } + } + for i, token := range line.assign { + if i == 0 { + // first token in "assign" always has one space before to + // separate the equals sign from what it's assigning. + token.SpacesBefore = 1 + } + + var before, after *Token + if i > 0 { + before = line.assign[i-1] + } else { + before = nilToken + } + if i < (len(line.assign) - 1) { + after = line.assign[i+1] + } else { + after = nilToken + } + if spaceAfterToken(token, before, after) { + after.SpacesBefore = 1 + } else { + after.SpacesBefore = 0 + } + } + + } +} + +func formatCells(lines []formatLine) { + + chainStart := -1 + maxColumns := 0 + + // We'll deal with the "assign" cell first, since moving that will + // also impact the "comment" cell. + closeAssignChain := func(i int) { + for _, chainLine := range lines[chainStart:i] { + columns := chainLine.lead.Columns() + spaces := (maxColumns - columns) + 1 + chainLine.assign[0].SpacesBefore = spaces + } + chainStart = -1 + maxColumns = 0 + } + for i, line := range lines { + if line.assign == nil { + if chainStart != -1 { + closeAssignChain(i) + } + } else { + if chainStart == -1 { + chainStart = i + } + columns := line.lead.Columns() + if columns > maxColumns { + maxColumns = columns + } + } + } + if chainStart != -1 { + closeAssignChain(len(lines)) + } + + // Now we'll deal with the comments + closeCommentChain := func(i int) { + for _, chainLine := range lines[chainStart:i] { + columns := chainLine.lead.Columns() + chainLine.assign.Columns() + spaces := (maxColumns - columns) + 1 + chainLine.comment[0].SpacesBefore = spaces + } + chainStart = -1 + maxColumns = 0 + } + for i, line := range lines { + if line.comment == nil { + if chainStart != -1 { + closeCommentChain(i) + } + } else { + if chainStart == -1 { + chainStart = i + } + columns := line.lead.Columns() + line.assign.Columns() + if columns > maxColumns { + maxColumns = columns + } + } + } + if chainStart != -1 { + closeCommentChain(len(lines)) + } + +} + +// spaceAfterToken decides whether a particular subject token should have a +// space after it when surrounded by the given before and after tokens. +// "before" can be TokenNil, if the subject token is at the start of a sequence. +func spaceAfterToken(subject, before, after *Token) bool { + switch { + + case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil: + // Never add spaces before a newline + return false + + case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen: + // Don't split a function name from open paren in a call + return false + + case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot: + // Don't use spaces around attribute access dots + return false + + case after.Type == hclsyntax.TokenComma || after.Type == hclsyntax.TokenEllipsis: + // No space right before a comma or ... in an argument list + return false + + case subject.Type == hclsyntax.TokenComma: + // Always a space after a comma + return true + + case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc: + // No extra spaces within templates + return false + + case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent: + // This is a special case for inside for expressions where a user + // might want to use a literal tuple constructor: + // [for x in [foo]: x] + // ... in that case, we would normally produce in[foo] thinking that + // in is a reference, but we'll recognize it as a keyword here instead + // to make the result less confusing. + return true + + case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0): + return false + + case subject.Type == hclsyntax.TokenMinus: + // Since a minus can either be subtraction or negation, and the latter + // should _not_ have a space after it, we need to use some heuristics + // to decide which case this is. + // We guess that we have a negation if the token before doesn't look + // like it could be the end of an expression. + + switch before.Type { + + case hclsyntax.TokenNil: + // Minus at the start of input must be a negation + return false + + case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion: + // Minus immediately after an opening bracket or separator must be a negation. + return false + + case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus: + // Minus immediately after another arithmetic operator must be negation. + return false + + case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq: + // Minus immediately after another comparison operator must be negation. + return false + + case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang: + // Minus immediately after logical operator doesn't make sense but probably intended as negation. + return false + + default: + return true + } + + case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace: + // Unlike other bracket types, braces have spaces on both sides of them, + // both in single-line nested blocks foo { bar = baz } and in object + // constructor expressions foo = { bar = baz }. + if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace { + // An open brace followed by a close brace is an exception, however. + // e.g. foo {} rather than foo { } + return false + } + return true + + // In the unlikely event that an interpolation expression is just + // a single object constructor, we'll put a space between the ${ and + // the following { to make this more obvious, and then the same + // thing for the two braces at the end. + case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace: + return true + case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd: + return true + + // Don't add spaces between interpolated items + case subject.Type == hclsyntax.TokenTemplateSeqEnd && (after.Type == hclsyntax.TokenTemplateInterp || after.Type == hclsyntax.TokenTemplateControl): + return false + + case tokenBracketChange(subject) > 0: + // No spaces after open brackets + return false + + case tokenBracketChange(after) < 0: + // No spaces before close brackets + return false + + default: + // Most tokens are space-separated + return true + + } +} + +func linesForFormat(tokens Tokens) []formatLine { + if len(tokens) == 0 { + return make([]formatLine, 0) + } + + // first we'll count our lines, so we can allocate the array for them in + // a single block. (We want to minimize memory pressure in this codepath, + // so it can be run somewhat-frequently by editor integrations.) + lineCount := 1 // if there are zero newlines then there is one line + for _, tok := range tokens { + if tokenIsNewline(tok) { + lineCount++ + } + } + + // To start, we'll just put everything in the "lead" cell on each line, + // and then do another pass over the lines afterwards to adjust. + lines := make([]formatLine, lineCount) + li := 0 + lineStart := 0 + for i, tok := range tokens { + if tok.Type == hclsyntax.TokenEOF { + // The EOF token doesn't belong to any line, and terminates the + // token sequence. + lines[li].lead = tokens[lineStart:i] + break + } + + if tokenIsNewline(tok) { + lines[li].lead = tokens[lineStart : i+1] + lineStart = i + 1 + li++ + } + } + + // If a set of tokens doesn't end in TokenEOF (e.g. because it's a + // fragment of tokens from the middle of a file) then we might fall + // out here with a line still pending. + if lineStart < len(tokens) { + lines[li].lead = tokens[lineStart:] + if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF { + lines[li].lead = lines[li].lead[:len(lines[li].lead)-1] + } + } + + // Now we'll pick off any trailing comments and attribute assignments + // to shuffle off into the "comment" and "assign" cells. + for i := range lines { + line := &lines[i] + + if len(line.lead) == 0 { + // if the line is empty then there's nothing for us to do + // (this should happen only for the final line, because all other + // lines would have a newline token of some kind) + continue + } + + if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment { + line.comment = line.lead[len(line.lead)-1:] + line.lead = line.lead[:len(line.lead)-1] + } + + for i, tok := range line.lead { + if i > 0 && tok.Type == hclsyntax.TokenEqual { + // We only move the tokens into "assign" if the RHS seems to + // be a whole expression, which we determine by counting + // brackets. If there's a net positive number of brackets + // then that suggests we're introducing a multi-line expression. + netBrackets := 0 + for _, token := range line.lead[i:] { + netBrackets += tokenBracketChange(token) + } + + if netBrackets == 0 { + line.assign = line.lead[i:] + line.lead = line.lead[:i] + } + break + } + } + } + + return lines +} + +func tokenIsNewline(tok *Token) bool { + if tok.Type == hclsyntax.TokenNewline { + return true + } else if tok.Type == hclsyntax.TokenComment { + // Single line tokens (# and //) consume their terminating newline, + // so we need to treat them as newline tokens as well. + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + return true + } + } + return false +} + +func tokenBracketChange(tok *Token) int { + switch tok.Type { + case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp: + return 1 + case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd: + return -1 + default: + return 0 + } +} + +// formatLine represents a single line of source code for formatting purposes, +// splitting its tokens into up to three "cells": +// +// lead: always present, representing everything up to one of the others +// assign: if line contains an attribute assignment, represents the tokens +// starting at (and including) the equals symbol +// comment: if line contains any non-comment tokens and ends with a +// single-line comment token, represents the comment. +// +// When formatting, the leading spaces of the first tokens in each of these +// cells is adjusted to align vertically their occurences on consecutive +// rows. +type formatLine struct { + lead Tokens + assign Tokens + comment Tokens +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go new file mode 100644 index 00000000000..d249cfdf9a8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go @@ -0,0 +1,250 @@ +package hclwrite + +import ( + "fmt" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// TokensForValue returns a sequence of tokens that represents the given +// constant value. +// +// This function only supports types that are used by HCL. In particular, it +// does not support capsule types and will panic if given one. +// +// It is not possible to express an unknown value in source code, so this +// function will panic if the given value is unknown or contains any unknown +// values. A caller can call the value's IsWhollyKnown method to verify that +// no unknown values are present before calling TokensForValue. +func TokensForValue(val cty.Value) Tokens { + toks := appendTokensForValue(val, nil) + format(toks) // fiddle with the SpacesBefore field to get canonical spacing + return toks +} + +// TokensForTraversal returns a sequence of tokens that represents the given +// traversal. +// +// If the traversal is absolute then the result is a self-contained, valid +// reference expression. If the traversal is relative then the returned tokens +// could be appended to some other expression tokens to traverse into the +// represented expression. +func TokensForTraversal(traversal hcl.Traversal) Tokens { + toks := appendTokensForTraversal(traversal, nil) + format(toks) // fiddle with the SpacesBefore field to get canonical spacing + return toks +} + +func appendTokensForValue(val cty.Value, toks Tokens) Tokens { + switch { + + case !val.IsKnown(): + panic("cannot produce tokens for unknown value") + + case val.IsNull(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(`null`), + }) + + case val.Type() == cty.Bool: + var src []byte + if val.True() { + src = []byte(`true`) + } else { + src = []byte(`false`) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: src, + }) + + case val.Type() == cty.Number: + bf := val.AsBigFloat() + srcStr := bf.Text('f', -1) + toks = append(toks, &Token{ + Type: hclsyntax.TokenNumberLit, + Bytes: []byte(srcStr), + }) + + case val.Type() == cty.String: + // TODO: If it's a multi-line string ending in a newline, format + // it as a HEREDOC instead. + src := escapeQuotedStringLit(val.AsString()) + toks = append(toks, &Token{ + Type: hclsyntax.TokenOQuote, + Bytes: []byte{'"'}, + }) + if len(src) > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenQuotedLit, + Bytes: src, + }) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenCQuote, + Bytes: []byte{'"'}, + }) + + case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }) + + i := 0 + for it := val.ElementIterator(); it.Next(); { + if i > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }) + } + _, eVal := it.Element() + toks = appendTokensForValue(eVal, toks) + i++ + } + + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + + case val.Type().IsMapType() || val.Type().IsObjectType(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrace, + Bytes: []byte{'{'}, + }) + + i := 0 + for it := val.ElementIterator(); it.Next(); { + if i > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }) + } + eKey, eVal := it.Element() + if hclsyntax.ValidIdentifier(eKey.AsString()) { + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(eKey.AsString()), + }) + } else { + toks = appendTokensForValue(eKey, toks) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenEqual, + Bytes: []byte{'='}, + }) + toks = appendTokensForValue(eVal, toks) + i++ + } + + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrace, + Bytes: []byte{'}'}, + }) + + default: + panic(fmt.Sprintf("cannot produce tokens for %#v", val)) + } + + return toks +} + +func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens { + for _, step := range traversal { + appendTokensForTraversalStep(step, toks) + } + return toks +} + +func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) { + switch ts := step.(type) { + case hcl.TraverseRoot: + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + }) + case hcl.TraverseAttr: + toks = append( + toks, + &Token{ + Type: hclsyntax.TokenDot, + Bytes: []byte{'.'}, + }, + &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + }, + ) + case hcl.TraverseIndex: + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }) + appendTokensForValue(ts.Key, toks) + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + default: + panic(fmt.Sprintf("unsupported traversal step type %T", step)) + } +} + +func escapeQuotedStringLit(s string) []byte { + if len(s) == 0 { + return nil + } + buf := make([]byte, 0, len(s)) + for i, r := range s { + switch r { + case '\n': + buf = append(buf, '\\', 'n') + case '\r': + buf = append(buf, '\\', 'r') + case '\t': + buf = append(buf, '\\', 't') + case '"': + buf = append(buf, '\\', '"') + case '\\': + buf = append(buf, '\\', '\\') + case '$', '%': + buf = appendRune(buf, r) + remain := s[i+1:] + if len(remain) > 0 && remain[0] == '{' { + // Double up our template introducer symbol to escape it. + buf = appendRune(buf, r) + } + default: + if !unicode.IsPrint(r) { + var fmted string + if r < 65536 { + fmted = fmt.Sprintf("\\u%04x", r) + } else { + fmted = fmt.Sprintf("\\U%08x", r) + } + buf = append(buf, fmted...) + } else { + buf = appendRune(buf, r) + } + } + } + return buf +} + +func appendRune(b []byte, r rune) []byte { + l := utf8.RuneLen(r) + for i := 0; i < l; i++ { + b = append(b, 0) // make room at the end of our buffer + } + ch := b[len(b)-l:] + utf8.EncodeRune(ch, r) + return b +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go new file mode 100644 index 00000000000..a13c0ec419a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go @@ -0,0 +1,23 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +type nativeNodeSorter struct { + Nodes []hclsyntax.Node +} + +func (s nativeNodeSorter) Len() int { + return len(s.Nodes) +} + +func (s nativeNodeSorter) Less(i, j int) bool { + rangeI := s.Nodes[i].Range() + rangeJ := s.Nodes[j].Range() + return rangeI.Start.Byte < rangeJ.Start.Byte +} + +func (s nativeNodeSorter) Swap(i, j int) { + s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/node.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/node.go new file mode 100644 index 00000000000..71fd00faf67 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/node.go @@ -0,0 +1,236 @@ +package hclwrite + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" +) + +// node represents a node in the AST. +type node struct { + content nodeContent + + list *nodes + before, after *node +} + +func newNode(c nodeContent) *node { + return &node{ + content: c, + } +} + +func (n *node) Equal(other *node) bool { + return cmp.Equal(n.content, other.content) +} + +func (n *node) BuildTokens(to Tokens) Tokens { + return n.content.BuildTokens(to) +} + +// Detach removes the receiver from the list it currently belongs to. If the +// node is not currently in a list, this is a no-op. +func (n *node) Detach() { + if n.list == nil { + return + } + if n.before != nil { + n.before.after = n.after + } + if n.after != nil { + n.after.before = n.before + } + if n.list.first == n { + n.list.first = n.after + } + if n.list.last == n { + n.list.last = n.before + } + n.list = nil + n.before = nil + n.after = nil +} + +// ReplaceWith removes the receiver from the list it currently belongs to and +// inserts a new node with the given content in its place. If the node is not +// currently in a list, this function will panic. +// +// The return value is the newly-constructed node, containing the given content. +// After this function returns, the reciever is no longer attached to a list. +func (n *node) ReplaceWith(c nodeContent) *node { + if n.list == nil { + panic("can't replace node that is not in a list") + } + + before := n.before + after := n.after + list := n.list + n.before, n.after, n.list = nil, nil, nil + + nn := newNode(c) + nn.before = before + nn.after = after + nn.list = list + if before != nil { + before.after = nn + } + if after != nil { + after.before = nn + } + return nn +} + +func (n *node) assertUnattached() { + if n.list != nil { + panic(fmt.Sprintf("attempt to attach already-attached node %#v", n)) + } +} + +// nodeContent is the interface type implemented by all AST content types. +type nodeContent interface { + walkChildNodes(w internalWalkFunc) + BuildTokens(to Tokens) Tokens +} + +// nodes is a list of nodes. +type nodes struct { + first, last *node +} + +func (ns *nodes) BuildTokens(to Tokens) Tokens { + for n := ns.first; n != nil; n = n.after { + to = n.BuildTokens(to) + } + return to +} + +func (ns *nodes) Clear() { + ns.first = nil + ns.last = nil +} + +func (ns *nodes) Append(c nodeContent) *node { + n := &node{ + content: c, + } + ns.AppendNode(n) + n.list = ns + return n +} + +func (ns *nodes) AppendNode(n *node) { + if ns.last != nil { + n.before = ns.last + ns.last.after = n + } + n.list = ns + ns.last = n + if ns.first == nil { + ns.first = n + } +} + +func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node { + if len(tokens) == 0 { + return nil + } + n := newNode(tokens) + ns.AppendNode(n) + n.list = ns + return n +} + +// nodeSet is an unordered set of nodes. It is used to describe a set of nodes +// that all belong to the same list that have some role or characteristic +// in common. +type nodeSet map[*node]struct{} + +func newNodeSet() nodeSet { + return make(nodeSet) +} + +func (ns nodeSet) Has(n *node) bool { + if ns == nil { + return false + } + _, exists := ns[n] + return exists +} + +func (ns nodeSet) Add(n *node) { + ns[n] = struct{}{} +} + +func (ns nodeSet) Remove(n *node) { + delete(ns, n) +} + +func (ns nodeSet) List() []*node { + if len(ns) == 0 { + return nil + } + + ret := make([]*node, 0, len(ns)) + + // Determine which list we are working with. We assume here that all of + // the nodes belong to the same list, since that is part of the contract + // for nodeSet. + var list *nodes + for n := range ns { + list = n.list + break + } + + // We recover the order by iterating over the whole list. This is not + // the most efficient way to do it, but our node lists should always be + // small so not worth making things more complex. + for n := list.first; n != nil; n = n.after { + if ns.Has(n) { + ret = append(ret, n) + } + } + return ret +} + +type internalWalkFunc func(*node) + +// inTree can be embedded into a content struct that has child nodes to get +// a standard implementation of the NodeContent interface and a record of +// a potential parent node. +type inTree struct { + parent *node + children *nodes +} + +func newInTree() inTree { + return inTree{ + children: &nodes{}, + } +} + +func (it *inTree) assertUnattached() { + if it.parent != nil { + panic(fmt.Sprintf("node is already attached to %T", it.parent.content)) + } +} + +func (it *inTree) walkChildNodes(w internalWalkFunc) { + for n := it.children.first; n != nil; n = n.after { + w(n) + } +} + +func (it *inTree) BuildTokens(to Tokens) Tokens { + for n := it.children.first; n != nil; n = n.after { + to = n.BuildTokens(to) + } + return to +} + +// leafNode can be embedded into a content struct to give it a do-nothing +// implementation of walkChildNodes +type leafNode struct { +} + +func (n *leafNode) walkChildNodes(w internalWalkFunc) { +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go new file mode 100644 index 00000000000..0e8952d6746 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go @@ -0,0 +1,599 @@ +package hclwrite + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// Our "parser" here is actually not doing any parsing of its own. Instead, +// it leans on the native parser in hclsyntax, and then uses the source ranges +// from the AST to partition the raw token sequence to match the raw tokens +// up to AST nodes. +// +// This strategy feels somewhat counter-intuitive, since most of the work the +// parser does is thrown away here, but this strategy is chosen because the +// normal parsing work done by hclsyntax is considered to be the "main case", +// while modifying and re-printing source is more of an edge case, used only +// in ancillary tools, and so it's good to keep all the main parsing logic +// with the main case but keep all of the extra complexity of token wrangling +// out of the main parser, which is already rather complex just serving the +// use-cases it already serves. +// +// If the parsing step produces any errors, the returned File is nil because +// we can't reliably extract tokens from the partial AST produced by an +// erroneous parse. +func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { + file, diags := hclsyntax.ParseConfig(src, filename, start) + if diags.HasErrors() { + return nil, diags + } + + // To do our work here, we use the "native" tokens (those from hclsyntax) + // to match against source ranges in the AST, but ultimately produce + // slices from our sequence of "writer" tokens, which contain only + // *relative* position information that is more appropriate for + // transformation/writing use-cases. + nativeTokens, diags := hclsyntax.LexConfig(src, filename, start) + if diags.HasErrors() { + // should never happen, since we would've caught these diags in + // the first call above. + return nil, diags + } + writerTokens := writerTokens(nativeTokens) + + from := inputTokens{ + nativeTokens: nativeTokens, + writerTokens: writerTokens, + } + + before, root, after := parseBody(file.Body.(*hclsyntax.Body), from) + ret := &File{ + inTree: newInTree(), + + srcBytes: src, + body: root, + } + + nodes := ret.inTree.children + nodes.Append(before.Tokens()) + nodes.AppendNode(root) + nodes.Append(after.Tokens()) + + return ret, diags +} + +type inputTokens struct { + nativeTokens hclsyntax.Tokens + writerTokens Tokens +} + +func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) { + start, end := partitionTokens(it.nativeTokens, rng) + before = it.Slice(0, start) + within = it.Slice(start, end) + after = it.Slice(end, len(it.nativeTokens)) + return +} + +func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) { + for i, t := range it.writerTokens { + if t.Type == ty { + return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)) + } + } + panic(fmt.Sprintf("didn't find any token of type %s", ty)) +} + +func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) { + before, within, after := it.PartitionType(ty) + if within.Len() != 1 { + panic("PartitionType found more than one token") + } + return before, within.Tokens()[0], after +} + +// PartitionIncludeComments is like Partition except the returned "within" +// range includes any lead and line comments associated with the range. +func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) { + start, end := partitionTokens(it.nativeTokens, rng) + start = partitionLeadCommentTokens(it.nativeTokens[:start]) + _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:]) + end += afterNewline + + before = it.Slice(0, start) + within = it.Slice(start, end) + after = it.Slice(end, len(it.nativeTokens)) + return + +} + +// PartitionBlockItem is similar to PartitionIncludeComments but it returns +// the comments as separate token sequences so that they can be captured into +// AST attributes. It makes assumptions that apply only to block items, so +// should not be used for other constructs. +func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) { + before, within, after = it.Partition(rng) + before, leadComments = before.PartitionLeadComments() + lineComments, newline, after = after.PartitionLineEndTokens() + return +} + +func (it inputTokens) PartitionLeadComments() (before, within inputTokens) { + start := partitionLeadCommentTokens(it.nativeTokens) + before = it.Slice(0, start) + within = it.Slice(start, len(it.nativeTokens)) + return +} + +func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) { + afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens) + comments = it.Slice(0, afterComments) + newline = it.Slice(afterComments, afterNewline) + after = it.Slice(afterNewline, len(it.nativeTokens)) + return +} + +func (it inputTokens) Slice(start, end int) inputTokens { + // When we slice, we create a new slice with no additional capacity because + // we expect that these slices will be mutated in order to insert + // new code into the AST, and we want to ensure that a new underlying + // array gets allocated in that case, rather than writing into some + // following slice and corrupting it. + return inputTokens{ + nativeTokens: it.nativeTokens[start:end:end], + writerTokens: it.writerTokens[start:end:end], + } +} + +func (it inputTokens) Len() int { + return len(it.nativeTokens) +} + +func (it inputTokens) Tokens() Tokens { + return it.writerTokens +} + +func (it inputTokens) Types() []hclsyntax.TokenType { + ret := make([]hclsyntax.TokenType, len(it.nativeTokens)) + for i, tok := range it.nativeTokens { + ret[i] = tok.Type + } + return ret +} + +// parseBody locates the given body within the given input tokens and returns +// the resulting *Body object as well as the tokens that appeared before and +// after it. +func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) { + before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange) + + // The main AST doesn't retain the original source ordering of the + // body items, so we need to reconstruct that ordering by inspecting + // their source ranges. + nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks)) + for _, nativeAttr := range nativeBody.Attributes { + nativeItems = append(nativeItems, nativeAttr) + } + for _, nativeBlock := range nativeBody.Blocks { + nativeItems = append(nativeItems, nativeBlock) + } + sort.Sort(nativeNodeSorter{nativeItems}) + + body := &Body{ + inTree: newInTree(), + items: newNodeSet(), + } + + remain := within + for _, nativeItem := range nativeItems { + beforeItem, item, afterItem := parseBodyItem(nativeItem, remain) + + if beforeItem.Len() > 0 { + body.AppendUnstructuredTokens(beforeItem.Tokens()) + } + body.appendItemNode(item) + + remain = afterItem + } + + if remain.Len() > 0 { + body.AppendUnstructuredTokens(remain.Tokens()) + } + + return before, newNode(body), after +} + +func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) { + before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range()) + + var item *node + + switch tItem := nativeItem.(type) { + case *hclsyntax.Attribute: + item = parseAttribute(tItem, within, leadComments, lineComments, newline) + case *hclsyntax.Block: + item = parseBlock(tItem, within, leadComments, lineComments, newline) + default: + // should never happen if caller is behaving + panic("unsupported native item type") + } + + return before, item, after +} + +func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node { + attr := &Attribute{ + inTree: newInTree(), + } + children := attr.inTree.children + + { + cn := newNode(newComments(leadComments.Tokens())) + attr.leadComments = cn + children.AppendNode(cn) + } + + before, nameTokens, from := from.Partition(nativeAttr.NameRange) + { + children.AppendUnstructuredTokens(before.Tokens()) + if nameTokens.Len() != 1 { + // Should never happen with valid input + panic("attribute name is not exactly one token") + } + token := nameTokens.Tokens()[0] + in := newNode(newIdentifier(token)) + attr.name = in + children.AppendNode(in) + } + + before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendUnstructuredTokens(equalsTokens.Tokens()) + + before, exprTokens, from := from.Partition(nativeAttr.Expr.Range()) + { + children.AppendUnstructuredTokens(before.Tokens()) + exprNode := parseExpression(nativeAttr.Expr, exprTokens) + attr.expr = exprNode + children.AppendNode(exprNode) + } + + { + cn := newNode(newComments(lineComments.Tokens())) + attr.lineComments = cn + children.AppendNode(cn) + } + + children.AppendUnstructuredTokens(newline.Tokens()) + + // Collect any stragglers, though there shouldn't be any + children.AppendUnstructuredTokens(from.Tokens()) + + return newNode(attr) +} + +func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node { + block := &Block{ + inTree: newInTree(), + labels: newNodeSet(), + } + children := block.inTree.children + + { + cn := newNode(newComments(leadComments.Tokens())) + block.leadComments = cn + children.AppendNode(cn) + } + + before, typeTokens, from := from.Partition(nativeBlock.TypeRange) + { + children.AppendUnstructuredTokens(before.Tokens()) + if typeTokens.Len() != 1 { + // Should never happen with valid input + panic("block type name is not exactly one token") + } + token := typeTokens.Tokens()[0] + in := newNode(newIdentifier(token)) + block.typeName = in + children.AppendNode(in) + } + + for _, rng := range nativeBlock.LabelRanges { + var labelTokens inputTokens + before, labelTokens, from = from.Partition(rng) + children.AppendUnstructuredTokens(before.Tokens()) + tokens := labelTokens.Tokens() + var ln *node + if len(tokens) == 1 && tokens[0].Type == hclsyntax.TokenIdent { + ln = newNode(newIdentifier(tokens[0])) + } else { + ln = newNode(newQuoted(tokens)) + } + block.labels.Add(ln) + children.AppendNode(ln) + } + + before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendUnstructuredTokens(oBrace.Tokens()) + + // We go a bit out of order here: we go hunting for the closing brace + // so that we have a delimited body, but then we'll deal with the body + // before we actually append the closing brace and any straggling tokens + // that appear after it. + bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange) + before, body, after := parseBody(nativeBlock.Body, bodyTokens) + children.AppendUnstructuredTokens(before.Tokens()) + block.body = body + children.AppendNode(body) + children.AppendUnstructuredTokens(after.Tokens()) + + children.AppendUnstructuredTokens(cBrace.Tokens()) + + // stragglers + children.AppendUnstructuredTokens(from.Tokens()) + if lineComments.Len() > 0 { + // blocks don't actually have line comments, so we'll just treat + // them as extra stragglers + children.AppendUnstructuredTokens(lineComments.Tokens()) + } + children.AppendUnstructuredTokens(newline.Tokens()) + + return newNode(block) +} + +func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node { + expr := newExpression() + children := expr.inTree.children + + nativeVars := nativeExpr.Variables() + + for _, nativeTraversal := range nativeVars { + before, traversal, after := parseTraversal(nativeTraversal, from) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendNode(traversal) + expr.absTraversals.Add(traversal) + from = after + } + // Attach any stragglers that don't belong to a traversal to the expression + // itself. In an expression with no traversals at all, this is just the + // entirety of "from". + children.AppendUnstructuredTokens(from.Tokens()) + + return newNode(expr) +} + +func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) { + traversal := newTraversal() + children := traversal.inTree.children + before, from, after = from.Partition(nativeTraversal.SourceRange()) + + stepAfter := from + for _, nativeStep := range nativeTraversal { + before, step, after := parseTraversalStep(nativeStep, stepAfter) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendNode(step) + traversal.steps.Add(step) + stepAfter = after + } + + return before, newNode(traversal), after +} + +func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) { + var children *nodes + switch tNativeStep := nativeStep.(type) { + + case hcl.TraverseRoot, hcl.TraverseAttr: + step := newTraverseName() + children = step.inTree.children + before, from, after = from.Partition(nativeStep.SourceRange()) + inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent) + name := newIdentifier(token) + children.AppendUnstructuredTokens(inBefore.Tokens()) + step.name = children.Append(name) + children.AppendUnstructuredTokens(inAfter.Tokens()) + return before, newNode(step), after + + case hcl.TraverseIndex: + step := newTraverseIndex() + children = step.inTree.children + before, from, after = from.Partition(nativeStep.SourceRange()) + + var inBefore, oBrack, keyTokens, cBrack inputTokens + inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack) + children.AppendUnstructuredTokens(inBefore.Tokens()) + children.AppendUnstructuredTokens(oBrack.Tokens()) + keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack) + + keyVal := tNativeStep.Key + switch keyVal.Type() { + case cty.String: + key := newQuoted(keyTokens.Tokens()) + step.key = children.Append(key) + case cty.Number: + valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit) + children.AppendUnstructuredTokens(valBefore.Tokens()) + key := newNumber(valToken) + step.key = children.Append(key) + children.AppendUnstructuredTokens(valAfter.Tokens()) + } + + children.AppendUnstructuredTokens(cBrack.Tokens()) + children.AppendUnstructuredTokens(from.Tokens()) + + return before, newNode(step), after + default: + panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep)) + } + +} + +// writerTokens takes a sequence of tokens as produced by the main hclsyntax +// package and transforms it into an equivalent sequence of tokens using +// this package's own token model. +// +// The resulting list contains the same number of tokens and uses the same +// indices as the input, allowing the two sets of tokens to be correlated +// by index. +func writerTokens(nativeTokens hclsyntax.Tokens) Tokens { + // Ultimately we want a slice of token _pointers_, but since we can + // predict how much memory we're going to devote to tokens we'll allocate + // it all as a single flat buffer and thus give the GC less work to do. + tokBuf := make([]Token, len(nativeTokens)) + var lastByteOffset int + for i, mainToken := range nativeTokens { + // Create a copy of the bytes so that we can mutate without + // corrupting the original token stream. + bytes := make([]byte, len(mainToken.Bytes)) + copy(bytes, mainToken.Bytes) + + tokBuf[i] = Token{ + Type: mainToken.Type, + Bytes: bytes, + + // We assume here that spaces are always ASCII spaces, since + // that's what the scanner also assumes, and thus the number + // of bytes skipped is also the number of space characters. + SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset, + } + + lastByteOffset = mainToken.Range.End.Byte + } + + // Now make a slice of pointers into the previous slice. + ret := make(Tokens, len(tokBuf)) + for i := range ret { + ret[i] = &tokBuf[i] + } + + return ret +} + +// partitionTokens takes a sequence of tokens and a hcl.Range and returns +// two indices within the token sequence that correspond with the range +// boundaries, such that the slice operator could be used to produce +// three token sequences for before, within, and after respectively: +// +// start, end := partitionTokens(toks, rng) +// before := toks[:start] +// within := toks[start:end] +// after := toks[end:] +// +// This works best when the range is aligned with token boundaries (e.g. +// because it was produced in terms of the scanner's result) but if that isn't +// true then it will make a best effort that may produce strange results at +// the boundaries. +// +// Native hclsyntax tokens are used here, because they contain the necessary +// absolute position information. However, since writerTokens produces a +// correlatable sequence of writer tokens, the resulting indices can be +// used also to index into its result, allowing the partitioning of writer +// tokens to be driven by the partitioning of native tokens. +// +// The tokens are assumed to be in source order and non-overlapping, which +// will be true if the token sequence from the scanner is used directly. +func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) { + // We us a linear search here because we assume tha in most cases our + // target range is close to the beginning of the sequence, and the seqences + // are generally small for most reasonable files anyway. + for i := 0; ; i++ { + if i >= len(toks) { + // No tokens for the given range at all! + return len(toks), len(toks) + } + + if toks[i].Range.Start.Byte >= rng.Start.Byte { + start = i + break + } + } + + for i := start; ; i++ { + if i >= len(toks) { + // The range "hangs off" the end of the token sequence + return start, len(toks) + } + + if toks[i].Range.Start.Byte >= rng.End.Byte { + end = i // end marker is exclusive + break + } + } + + return start, end +} + +// partitionLeadCommentTokens takes a sequence of tokens that is assumed +// to immediately precede a construct that can have lead comment tokens, +// and returns the index into that sequence where the lead comments begin. +// +// Lead comments are defined as whole lines containing only comment tokens +// with no blank lines between. If no such lines are found, the returned +// index will be len(toks). +func partitionLeadCommentTokens(toks hclsyntax.Tokens) int { + // single-line comments (which is what we're interested in here) + // consume their trailing newline, so we can just walk backwards + // until we stop seeing comment tokens. + for i := len(toks) - 1; i >= 0; i-- { + if toks[i].Type != hclsyntax.TokenComment { + return i + 1 + } + } + return 0 +} + +// partitionLineEndTokens takes a sequence of tokens that is assumed +// to immediately follow a construct that can have a line comment, and +// returns first the index where any line comments end and then second +// the index immediately after the trailing newline. +// +// Line comments are defined as comments that appear immediately after +// a construct on the same line where its significant tokens ended. +// +// Since single-line comment tokens (# and //) include the newline that +// terminates them, in the presence of these the two returned indices +// will be the same since the comment itself serves as the line end. +func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) { + for i := 0; i < len(toks); i++ { + tok := toks[i] + if tok.Type != hclsyntax.TokenComment { + switch tok.Type { + case hclsyntax.TokenNewline: + return i, i + 1 + case hclsyntax.TokenEOF: + // Although this is valid, we mustn't include the EOF + // itself as our "newline" or else strange things will + // happen when we try to append new items. + return i, i + default: + // If we have well-formed input here then nothing else should be + // possible. This path should never happen, because we only try + // to extract tokens from the sequence if the parser succeeded, + // and it should catch this problem itself. + panic("malformed line trailers: expected only comments and newlines") + } + } + + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + // Newline at the end of a single-line comment serves both as + // the end of comments *and* the end of the line. + return i + 1, i + 1 + } + } + return len(toks), len(toks) +} + +// lexConfig uses the hclsyntax scanner to get a token stream and then +// rewrites it into this package's token model. +// +// Any errors produced during scanning are ignored, so the results of this +// function should be used with care. +func lexConfig(src []byte) Tokens { + mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1}) + return writerTokens(mainTokens) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/public.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/public.go new file mode 100644 index 00000000000..4d5ce2a6ef5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/public.go @@ -0,0 +1,44 @@ +package hclwrite + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// NewFile creates a new file object that is empty and ready to have constructs +// added t it. +func NewFile() *File { + body := &Body{ + inTree: newInTree(), + items: newNodeSet(), + } + file := &File{ + inTree: newInTree(), + } + file.body = file.inTree.children.Append(body) + return file +} + +// ParseConfig interprets the given source bytes into a *hclwrite.File. The +// resulting AST can be used to perform surgical edits on the source code +// before turning it back into bytes again. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { + return parse(src, filename, start) +} + +// Format takes source code and performs simple whitespace changes to transform +// it to a canonical layout style. +// +// Format skips constructing an AST and works directly with tokens, so it +// is less expensive than formatting via the AST for situations where no other +// changes will be made. It also ignores syntax errors and can thus be applied +// to partial source code, although the result in that case may not be +// desirable. +func Format(src []byte) []byte { + tokens := lexConfig(src) + format(tokens) + buf := &bytes.Buffer{} + tokens.WriteTo(buf) + return buf.Bytes() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go new file mode 100644 index 00000000000..d87f81853b0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go @@ -0,0 +1,122 @@ +package hclwrite + +import ( + "bytes" + "io" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// Token is a single sequence of bytes annotated with a type. It is similar +// in purpose to hclsyntax.Token, but discards the source position information +// since that is not useful in code generation. +type Token struct { + Type hclsyntax.TokenType + Bytes []byte + + // We record the number of spaces before each token so that we can + // reproduce the exact layout of the original file when we're making + // surgical changes in-place. When _new_ code is created it will always + // be in the canonical style, but we preserve layout of existing code. + SpacesBefore int +} + +// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token. +// A complete token is not possible since we don't have source location +// information here, and so this method is unexported so we can be sure it will +// only be used for internal purposes where we know the range isn't important. +// +// This is primarily intended to allow us to re-use certain functionality from +// hclsyntax rather than re-implementing it against our own token type here. +func (t *Token) asHCLSyntax() hclsyntax.Token { + return hclsyntax.Token{ + Type: t.Type, + Bytes: t.Bytes, + Range: hcl.Range{ + Filename: "", + }, + } +} + +// Tokens is a flat list of tokens. +type Tokens []*Token + +func (ts Tokens) Bytes() []byte { + buf := &bytes.Buffer{} + ts.WriteTo(buf) + return buf.Bytes() +} + +func (ts Tokens) testValue() string { + return string(ts.Bytes()) +} + +// Columns returns the number of columns (grapheme clusters) the token sequence +// occupies. The result is not meaningful if there are newline or single-line +// comment tokens in the sequence. +func (ts Tokens) Columns() int { + ret := 0 + for _, token := range ts { + ret += token.SpacesBefore // spaces are always worth one column each + ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters) + ret += ct + } + return ret +} + +// WriteTo takes an io.Writer and writes the bytes for each token to it, +// along with the spacing that separates each token. In other words, this +// allows serializing the tokens to a file or other such byte stream. +func (ts Tokens) WriteTo(wr io.Writer) (int64, error) { + // We know we're going to be writing a lot of small chunks of repeated + // space characters, so we'll prepare a buffer of these that we can + // easily pass to wr.Write without any further allocation. + spaces := make([]byte, 40) + for i := range spaces { + spaces[i] = ' ' + } + + var n int64 + var err error + for _, token := range ts { + if err != nil { + return n, err + } + + for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) { + thisChunk := spacesBefore + if thisChunk > len(spaces) { + thisChunk = len(spaces) + } + var thisN int + thisN, err = wr.Write(spaces[:thisChunk]) + n += int64(thisN) + if err != nil { + return n, err + } + } + + var thisN int + thisN, err = wr.Write(token.Bytes) + n += int64(thisN) + } + + return n, err +} + +func (ts Tokens) walkChildNodes(w internalWalkFunc) { + // Unstructured tokens have no child nodes +} + +func (ts Tokens) BuildTokens(to Tokens) Tokens { + return append(to, ts...) +} + +func newIdentToken(name string) *Token { + return &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(name), + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/LICENSE new file mode 100644 index 00000000000..82b4de97c7e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/arithmetic.go new file mode 100644 index 00000000000..94dc24f89f0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/arithmetic.go @@ -0,0 +1,43 @@ +package ast + +import ( + "bytes" + "fmt" +) + +// Arithmetic represents a node where the result is arithmetic of +// two or more operands in the order given. +type Arithmetic struct { + Op ArithmeticOp + Exprs []Node + Posx Pos +} + +func (n *Arithmetic) Accept(v Visitor) Node { + for i, expr := range n.Exprs { + n.Exprs[i] = expr.Accept(v) + } + + return v(n) +} + +func (n *Arithmetic) Pos() Pos { + return n.Posx +} + +func (n *Arithmetic) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *Arithmetic) String() string { + var b bytes.Buffer + for _, expr := range n.Exprs { + b.WriteString(fmt.Sprintf("%s", expr)) + } + + return b.String() +} + +func (n *Arithmetic) Type(Scope) (Type, error) { + return TypeInt, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go new file mode 100644 index 00000000000..18880c60473 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go @@ -0,0 +1,24 @@ +package ast + +// ArithmeticOp is the operation to use for the math. +type ArithmeticOp int + +const ( + ArithmeticOpInvalid ArithmeticOp = 0 + + ArithmeticOpAdd ArithmeticOp = iota + ArithmeticOpSub + ArithmeticOpMul + ArithmeticOpDiv + ArithmeticOpMod + + ArithmeticOpLogicalAnd + ArithmeticOpLogicalOr + + ArithmeticOpEqual + ArithmeticOpNotEqual + ArithmeticOpLessThan + ArithmeticOpLessThanOrEqual + ArithmeticOpGreaterThan + ArithmeticOpGreaterThanOrEqual +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/ast.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/ast.go new file mode 100644 index 00000000000..c6350f8bbae --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/ast.go @@ -0,0 +1,99 @@ +package ast + +import ( + "fmt" +) + +// Node is the interface that all AST nodes must implement. +type Node interface { + // Accept is called to dispatch to the visitors. It must return the + // resulting Node (which might be different in an AST transform). + Accept(Visitor) Node + + // Pos returns the position of this node in some source. + Pos() Pos + + // Type returns the type of this node for the given context. + Type(Scope) (Type, error) +} + +// Pos is the starting position of an AST node +type Pos struct { + Column, Line int // Column/Line number, starting at 1 + Filename string // Optional source filename, if known +} + +func (p Pos) String() string { + if p.Filename == "" { + return fmt.Sprintf("%d:%d", p.Line, p.Column) + } else { + return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column) + } +} + +// InitPos is an initiaial position value. This should be used as +// the starting position (presets the column and line to 1). +var InitPos = Pos{Column: 1, Line: 1} + +// Visitors are just implementations of this function. +// +// The function must return the Node to replace this node with. "nil" is +// _not_ a valid return value. If there is no replacement, the original node +// should be returned. We build this replacement directly into the visitor +// pattern since AST transformations are a common and useful tool and +// building it into the AST itself makes it required for future Node +// implementations and very easy to do. +// +// Note that this isn't a true implementation of the visitor pattern, which +// generally requires proper type dispatch on the function. However, +// implementing this basic visitor pattern style is still very useful even +// if you have to type switch. +type Visitor func(Node) Node + +//go:generate stringer -type=Type + +// Type is the type of any value. +type Type uint32 + +const ( + TypeInvalid Type = 0 + TypeAny Type = 1 << iota + TypeBool + TypeString + TypeInt + TypeFloat + TypeList + TypeMap + + // This is a special type used by Terraform to mark "unknown" values. + // It is impossible for this type to be introduced into your HIL programs + // unless you explicitly set a variable to this value. In that case, + // any operation including the variable will return "TypeUnknown" as the + // type. + TypeUnknown +) + +func (t Type) Printable() string { + switch t { + case TypeInvalid: + return "invalid type" + case TypeAny: + return "any type" + case TypeBool: + return "type bool" + case TypeString: + return "type string" + case TypeInt: + return "type int" + case TypeFloat: + return "type float" + case TypeList: + return "type list" + case TypeMap: + return "type map" + case TypeUnknown: + return "type unknown" + default: + return "unknown type" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/call.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/call.go new file mode 100644 index 00000000000..0557011022f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/call.go @@ -0,0 +1,47 @@ +package ast + +import ( + "fmt" + "strings" +) + +// Call represents a function call. +type Call struct { + Func string + Args []Node + Posx Pos +} + +func (n *Call) Accept(v Visitor) Node { + for i, a := range n.Args { + n.Args[i] = a.Accept(v) + } + + return v(n) +} + +func (n *Call) Pos() Pos { + return n.Posx +} + +func (n *Call) String() string { + args := make([]string, len(n.Args)) + for i, arg := range n.Args { + args[i] = fmt.Sprintf("%s", arg) + } + + return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", ")) +} + +func (n *Call) Type(s Scope) (Type, error) { + f, ok := s.LookupFunc(n.Func) + if !ok { + return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func) + } + + return f.ReturnType, nil +} + +func (n *Call) GoString() string { + return fmt.Sprintf("*%#v", *n) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/conditional.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/conditional.go new file mode 100644 index 00000000000..be48f89d46f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/conditional.go @@ -0,0 +1,36 @@ +package ast + +import ( + "fmt" +) + +type Conditional struct { + CondExpr Node + TrueExpr Node + FalseExpr Node + Posx Pos +} + +// Accept passes the given visitor to the child nodes in this order: +// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor. +func (n *Conditional) Accept(v Visitor) Node { + n.CondExpr = n.CondExpr.Accept(v) + n.TrueExpr = n.TrueExpr.Accept(v) + n.FalseExpr = n.FalseExpr.Accept(v) + + return v(n) +} + +func (n *Conditional) Pos() Pos { + return n.Posx +} + +func (n *Conditional) Type(Scope) (Type, error) { + // This is not actually a useful value; the type checker ignores + // this function when analyzing conditionals, just as with Arithmetic. + return TypeInt, nil +} + +func (n *Conditional) GoString() string { + return fmt.Sprintf("*%#v", *n) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/index.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/index.go new file mode 100644 index 00000000000..860c25fd24d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/index.go @@ -0,0 +1,76 @@ +package ast + +import ( + "fmt" + "strings" +) + +// Index represents an indexing operation into another data structure +type Index struct { + Target Node + Key Node + Posx Pos +} + +func (n *Index) Accept(v Visitor) Node { + n.Target = n.Target.Accept(v) + n.Key = n.Key.Accept(v) + return v(n) +} + +func (n *Index) Pos() Pos { + return n.Posx +} + +func (n *Index) String() string { + return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key) +} + +func (n *Index) Type(s Scope) (Type, error) { + variableAccess, ok := n.Target.(*VariableAccess) + if !ok { + return TypeInvalid, fmt.Errorf("target is not a variable") + } + + variable, ok := s.LookupVar(variableAccess.Name) + if !ok { + return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name) + } + + switch variable.Type { + case TypeList: + return n.typeList(variable, variableAccess.Name) + case TypeMap: + return n.typeMap(variable, variableAccess.Name) + default: + return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) + } +} + +func (n *Index) typeList(variable Variable, variableName string) (Type, error) { + // We assume type checking has already determined that this is a list + list := variable.Value.([]Variable) + + return VariableListElementTypesAreHomogenous(variableName, list) +} + +func (n *Index) typeMap(variable Variable, variableName string) (Type, error) { + // We assume type checking has already determined that this is a map + vmap := variable.Value.(map[string]Variable) + + return VariableMapValueTypesAreHomogenous(variableName, vmap) +} + +func reportTypes(typesFound map[Type]struct{}) string { + stringTypes := make([]string, len(typesFound)) + i := 0 + for k, _ := range typesFound { + stringTypes[0] = k.String() + i++ + } + return strings.Join(stringTypes, ", ") +} + +func (n *Index) GoString() string { + return fmt.Sprintf("*%#v", *n) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/literal.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/literal.go new file mode 100644 index 00000000000..da6014fee2b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/literal.go @@ -0,0 +1,88 @@ +package ast + +import ( + "fmt" + "reflect" +) + +// LiteralNode represents a single literal value, such as "foo" or +// 42 or 3.14159. Based on the Type, the Value can be safely cast. +type LiteralNode struct { + Value interface{} + Typex Type + Posx Pos +} + +// NewLiteralNode returns a new literal node representing the given +// literal Go value, which must correspond to one of the primitive types +// supported by HIL. Lists and maps cannot currently be constructed via +// this function. +// +// If an inappropriately-typed value is provided, this function will +// return an error. The main intended use of this function is to produce +// "synthetic" literals from constants in code, where the value type is +// well known at compile time. To easily store these in global variables, +// see also MustNewLiteralNode. +func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) { + goType := reflect.TypeOf(value) + var hilType Type + + switch goType.Kind() { + case reflect.Bool: + hilType = TypeBool + case reflect.Int: + hilType = TypeInt + case reflect.Float64: + hilType = TypeFloat + case reflect.String: + hilType = TypeString + default: + return nil, fmt.Errorf("unsupported literal node type: %T", value) + } + + return &LiteralNode{ + Value: value, + Typex: hilType, + Posx: pos, + }, nil +} + +// MustNewLiteralNode wraps NewLiteralNode and panics if an error is +// returned, thus allowing valid literal nodes to be easily assigned to +// global variables. +func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode { + node, err := NewLiteralNode(value, pos) + if err != nil { + panic(err) + } + return node +} + +func (n *LiteralNode) Accept(v Visitor) Node { + return v(n) +} + +func (n *LiteralNode) Pos() Pos { + return n.Posx +} + +func (n *LiteralNode) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *LiteralNode) String() string { + return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value) +} + +func (n *LiteralNode) Type(Scope) (Type, error) { + return n.Typex, nil +} + +// IsUnknown returns true either if the node's value is itself unknown +// of if it is a collection containing any unknown elements, deeply. +func (n *LiteralNode) IsUnknown() bool { + return IsUnknown(Variable{ + Type: n.Typex, + Value: n.Value, + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/output.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/output.go new file mode 100644 index 00000000000..1e27f970b33 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/output.go @@ -0,0 +1,78 @@ +package ast + +import ( + "bytes" + "fmt" +) + +// Output represents the root node of all interpolation evaluations. If the +// output only has one expression which is either a TypeList or TypeMap, the +// Output can be type-asserted to []interface{} or map[string]interface{} +// respectively. Otherwise the Output evaluates as a string, and concatenates +// the evaluation of each expression. +type Output struct { + Exprs []Node + Posx Pos +} + +func (n *Output) Accept(v Visitor) Node { + for i, expr := range n.Exprs { + n.Exprs[i] = expr.Accept(v) + } + + return v(n) +} + +func (n *Output) Pos() Pos { + return n.Posx +} + +func (n *Output) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *Output) String() string { + var b bytes.Buffer + for _, expr := range n.Exprs { + b.WriteString(fmt.Sprintf("%s", expr)) + } + + return b.String() +} + +func (n *Output) Type(s Scope) (Type, error) { + // Special case no expressions for backward compatibility + if len(n.Exprs) == 0 { + return TypeString, nil + } + + // Special case a single expression of types list or map + if len(n.Exprs) == 1 { + exprType, err := n.Exprs[0].Type(s) + if err != nil { + return TypeInvalid, err + } + switch exprType { + case TypeList: + return TypeList, nil + case TypeMap: + return TypeMap, nil + } + } + + // Otherwise ensure all our expressions are strings + for index, expr := range n.Exprs { + exprType, err := expr.Type(s) + if err != nil { + return TypeInvalid, err + } + // We only look for things we know we can't coerce with an implicit conversion func + if exprType == TypeList || exprType == TypeMap { + return TypeInvalid, fmt.Errorf( + "multi-expression HIL outputs may only have string inputs: %d is type %s", + index, exprType) + } + } + + return TypeString, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/scope.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/scope.go new file mode 100644 index 00000000000..7a975d99930 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/scope.go @@ -0,0 +1,90 @@ +package ast + +import ( + "fmt" + "reflect" +) + +// Scope is the interface used to look up variables and functions while +// evaluating. How these functions/variables are defined are up to the caller. +type Scope interface { + LookupFunc(string) (Function, bool) + LookupVar(string) (Variable, bool) +} + +// Variable is a variable value for execution given as input to the engine. +// It records the value of a variables along with their type. +type Variable struct { + Value interface{} + Type Type +} + +// NewVariable creates a new Variable for the given value. This will +// attempt to infer the correct type. If it can't, an error will be returned. +func NewVariable(v interface{}) (result Variable, err error) { + switch v := reflect.ValueOf(v); v.Kind() { + case reflect.String: + result.Type = TypeString + default: + err = fmt.Errorf("Unknown type: %s", v.Kind()) + } + + result.Value = v + return +} + +// String implements Stringer on Variable, displaying the type and value +// of the Variable. +func (v Variable) String() string { + return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value) +} + +// Function defines a function that can be executed by the engine. +// The type checker will validate that the proper types will be called +// to the callback. +type Function struct { + // ArgTypes is the list of types in argument order. These are the + // required arguments. + // + // ReturnType is the type of the returned value. The Callback MUST + // return this type. + ArgTypes []Type + ReturnType Type + + // Variadic, if true, says that this function is variadic, meaning + // it takes a variable number of arguments. In this case, the + // VariadicType must be set. + Variadic bool + VariadicType Type + + // Callback is the function called for a function. The argument + // types are guaranteed to match the spec above by the type checker. + // The length of the args is strictly == len(ArgTypes) unless Varidiac + // is true, in which case its >= len(ArgTypes). + Callback func([]interface{}) (interface{}, error) +} + +// BasicScope is a simple scope that looks up variables and functions +// using a map. +type BasicScope struct { + FuncMap map[string]Function + VarMap map[string]Variable +} + +func (s *BasicScope) LookupFunc(n string) (Function, bool) { + if s == nil { + return Function{}, false + } + + v, ok := s.FuncMap[n] + return v, ok +} + +func (s *BasicScope) LookupVar(n string) (Variable, bool) { + if s == nil { + return Variable{}, false + } + + v, ok := s.VarMap[n] + return v, ok +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/stack.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/stack.go new file mode 100644 index 00000000000..bd2bc157862 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/stack.go @@ -0,0 +1,25 @@ +package ast + +// Stack is a stack of Node. +type Stack struct { + stack []Node +} + +func (s *Stack) Len() int { + return len(s.stack) +} + +func (s *Stack) Push(n Node) { + s.stack = append(s.stack, n) +} + +func (s *Stack) Pop() Node { + x := s.stack[len(s.stack)-1] + s.stack[len(s.stack)-1] = nil + s.stack = s.stack[:len(s.stack)-1] + return x +} + +func (s *Stack) Reset() { + s.stack = nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/type_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/type_string.go new file mode 100644 index 00000000000..1f51a98dd54 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/type_string.go @@ -0,0 +1,54 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT + +package ast + +import "fmt" + +const ( + _Type_name_0 = "TypeInvalid" + _Type_name_1 = "TypeAny" + _Type_name_2 = "TypeBool" + _Type_name_3 = "TypeString" + _Type_name_4 = "TypeInt" + _Type_name_5 = "TypeFloat" + _Type_name_6 = "TypeList" + _Type_name_7 = "TypeMap" + _Type_name_8 = "TypeUnknown" +) + +var ( + _Type_index_0 = [...]uint8{0, 11} + _Type_index_1 = [...]uint8{0, 7} + _Type_index_2 = [...]uint8{0, 8} + _Type_index_3 = [...]uint8{0, 10} + _Type_index_4 = [...]uint8{0, 7} + _Type_index_5 = [...]uint8{0, 9} + _Type_index_6 = [...]uint8{0, 8} + _Type_index_7 = [...]uint8{0, 7} + _Type_index_8 = [...]uint8{0, 11} +) + +func (i Type) String() string { + switch { + case i == 0: + return _Type_name_0 + case i == 2: + return _Type_name_1 + case i == 4: + return _Type_name_2 + case i == 8: + return _Type_name_3 + case i == 16: + return _Type_name_4 + case i == 32: + return _Type_name_5 + case i == 64: + return _Type_name_6 + case i == 128: + return _Type_name_7 + case i == 256: + return _Type_name_8 + default: + return fmt.Sprintf("Type(%d)", i) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/unknown.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/unknown.go new file mode 100644 index 00000000000..d6ddaecc78e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/unknown.go @@ -0,0 +1,30 @@ +package ast + +// IsUnknown reports whether a variable is unknown or contains any value +// that is unknown. This will recurse into lists and maps and so on. +func IsUnknown(v Variable) bool { + // If it is unknown itself, return true + if v.Type == TypeUnknown { + return true + } + + // If it is a container type, check the values + switch v.Type { + case TypeList: + for _, el := range v.Value.([]Variable) { + if IsUnknown(el) { + return true + } + } + case TypeMap: + for _, el := range v.Value.(map[string]Variable) { + if IsUnknown(el) { + return true + } + } + default: + } + + // Not a container type or survive the above checks + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/variable_access.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/variable_access.go new file mode 100644 index 00000000000..4c1362d7531 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/variable_access.go @@ -0,0 +1,36 @@ +package ast + +import ( + "fmt" +) + +// VariableAccess represents a variable access. +type VariableAccess struct { + Name string + Posx Pos +} + +func (n *VariableAccess) Accept(v Visitor) Node { + return v(n) +} + +func (n *VariableAccess) Pos() Pos { + return n.Posx +} + +func (n *VariableAccess) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *VariableAccess) String() string { + return fmt.Sprintf("Variable(%s)", n.Name) +} + +func (n *VariableAccess) Type(s Scope) (Type, error) { + v, ok := s.LookupVar(n.Name) + if !ok { + return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name) + } + + return v.Type, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/variables_helper.go new file mode 100644 index 00000000000..06bd18de2ac --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/hil/ast/variables_helper.go @@ -0,0 +1,63 @@ +package ast + +import "fmt" + +func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { + if len(list) == 0 { + return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown + for _, v := range list { + if v.Type == TypeUnknown { + continue + } + + if elemType == TypeUnknown { + elemType = v.Type + continue + } + + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "list %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } + + elemType = v.Type + } + + return elemType, nil +} + +func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { + if len(vmap) == 0 { + return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown + for _, v := range vmap { + if v.Type == TypeUnknown { + continue + } + + if elemType == TypeUnknown { + elemType = v.Type + continue + } + + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "map %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } + + elemType = v.Type + } + + return elemType, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/logutils/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/logutils/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/logutils/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/logutils/level.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/logutils/level.go new file mode 100644 index 00000000000..6381bf1629a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/logutils/level.go @@ -0,0 +1,81 @@ +// Package logutils augments the standard log package with levels. +package logutils + +import ( + "bytes" + "io" + "sync" +) + +type LogLevel string + +// LevelFilter is an io.Writer that can be used with a logger that +// will filter out log messages that aren't at least a certain level. +// +// Once the filter is in use somewhere, it is not safe to modify +// the structure. +type LevelFilter struct { + // Levels is the list of log levels, in increasing order of + // severity. Example might be: {"DEBUG", "WARN", "ERROR"}. + Levels []LogLevel + + // MinLevel is the minimum level allowed through + MinLevel LogLevel + + // The underlying io.Writer where log messages that pass the filter + // will be set. + Writer io.Writer + + badLevels map[LogLevel]struct{} + once sync.Once +} + +// Check will check a given line if it would be included in the level +// filter. +func (f *LevelFilter) Check(line []byte) bool { + f.once.Do(f.init) + + // Check for a log level + var level LogLevel + x := bytes.IndexByte(line, '[') + if x >= 0 { + y := bytes.IndexByte(line[x:], ']') + if y >= 0 { + level = LogLevel(line[x+1 : x+y]) + } + } + + _, ok := f.badLevels[level] + return !ok +} + +func (f *LevelFilter) Write(p []byte) (n int, err error) { + // Note in general that io.Writer can receive any byte sequence + // to write, but the "log" package always guarantees that we only + // get a single line. We use that as a slight optimization within + // this method, assuming we're dealing with a single, complete line + // of log data. + + if !f.Check(p) { + return len(p), nil + } + + return f.Writer.Write(p) +} + +// SetMinLevel is used to update the minimum log level +func (f *LevelFilter) SetMinLevel(min LogLevel) { + f.MinLevel = min + f.init() +} + +func (f *LevelFilter) init() { + badLevels := make(map[LogLevel]struct{}) + for _, level := range f.Levels { + if level == f.MinLevel { + break + } + badLevels[level] = struct{}{} + } + f.badLevels = badLevels +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE new file mode 100644 index 00000000000..82b4de97c7e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go new file mode 100644 index 00000000000..8d04ad4de2b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go @@ -0,0 +1,138 @@ +package tfconfig + +import ( + "fmt" + + legacyhclparser "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl2/hcl" +) + +// Diagnostic describes a problem (error or warning) encountered during +// configuration loading. +type Diagnostic struct { + Severity DiagSeverity `json:"severity"` + Summary string `json:"summary"` + Detail string `json:"detail,omitempty"` + + // Pos is not populated for all diagnostics, but when populated should + // indicate a particular line that the described problem relates to. + Pos *SourcePos `json:"pos,omitempty"` +} + +// Diagnostics represents a sequence of diagnostics. This is the type that +// should be returned from a function that might generate diagnostics. +type Diagnostics []Diagnostic + +// HasErrors returns true if there is at least one Diagnostic of severity +// DiagError in the receiever. +// +// If a function returns a Diagnostics without errors then the result can +// be assumed to be complete within the "best effort" constraints of this +// library. If errors are present then the caller may wish to employ more +// caution in relying on the result. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity == DiagError { + return true + } + } + return false +} + +func (diags Diagnostics) Error() string { + switch len(diags) { + case 0: + return "no problems" + case 1: + return fmt.Sprintf("%s: %s", diags[0].Summary, diags[0].Detail) + default: + return fmt.Sprintf("%s: %s (and %d other messages)", diags[0].Summary, diags[0].Detail, len(diags)-1) + } +} + +// Err returns an error representing the receiver if the receiver HasErrors, or +// nil otherwise. +// +// The returned error can be type-asserted back to a Diagnostics if needed. +func (diags Diagnostics) Err() error { + if diags.HasErrors() { + return diags + } + return nil +} + +// DiagSeverity describes the severity of a Diagnostic. +type DiagSeverity rune + +// DiagError indicates a problem that prevented proper processing of the +// configuration. In the precense of DiagError diagnostics the result is +// likely to be incomplete. +const DiagError DiagSeverity = 'E' + +// DiagWarning indicates a problem that the user may wish to consider but +// that did not prevent proper processing of the configuration. +const DiagWarning DiagSeverity = 'W' + +// MarshalJSON is an implementation of encoding/json.Marshaler +func (s DiagSeverity) MarshalJSON() ([]byte, error) { + switch s { + case DiagError: + return []byte(`"error"`), nil + case DiagWarning: + return []byte(`"warning"`), nil + default: + return []byte(`"invalid"`), nil + } +} + +func diagnosticsHCL(diags hcl.Diagnostics) Diagnostics { + if len(diags) == 0 { + return nil + } + ret := make(Diagnostics, len(diags)) + for i, diag := range diags { + ret[i] = Diagnostic{ + Summary: diag.Summary, + Detail: diag.Detail, + } + switch diag.Severity { + case hcl.DiagError: + ret[i].Severity = DiagError + case hcl.DiagWarning: + ret[i].Severity = DiagWarning + } + if diag.Subject != nil { + pos := sourcePosHCL(*diag.Subject) + ret[i].Pos = &pos + } + } + return ret +} + +func diagnosticsError(err error) Diagnostics { + if err == nil { + return nil + } + + if posErr, ok := err.(*legacyhclparser.PosError); ok { + pos := sourcePosLegacyHCL(posErr.Pos, "") + return Diagnostics{ + Diagnostic{ + Severity: DiagError, + Summary: posErr.Err.Error(), + Pos: &pos, + }, + } + } + + return Diagnostics{ + Diagnostic{ + Severity: DiagError, + Summary: err.Error(), + }, + } +} + +func diagnosticsErrorf(format string, args ...interface{}) Diagnostics { + return diagnosticsError(fmt.Errorf(format, args...)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go new file mode 100644 index 00000000000..1604a6e08a3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go @@ -0,0 +1,21 @@ +// Package tfconfig is a helper library that does careful, shallow parsing of +// Terraform modules to provide access to high-level metadata while +// remaining broadly compatible with configurations targeting various +// different Terraform versions. +// +// This packge focuses on describing top-level objects only, and in particular +// does not attempt any sort of processing that would require access to plugins. +// Currently it allows callers to extract high-level information about +// variables, outputs, resource blocks, provider dependencies, and Terraform +// Core dependencies. +// +// This package only works at the level of single modules. A full configuration +// is a tree of potentially several modules, some of which may be references +// to remote packages. There are some basic helpers for traversing calls to +// modules at relative local paths, however. +// +// This package employs a "best effort" parsing strategy, producing as complete +// a result as possible even though the input may not be entirely valid. The +// intended use-case is high-level analysis and indexing of externally-facing +// module characteristics, as opposed to validating or even applying the module. +package tfconfig diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go new file mode 100644 index 00000000000..2d13fe12452 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go @@ -0,0 +1,130 @@ +package tfconfig + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// LoadModule reads the directory at the given path and attempts to interpret +// it as a Terraform module. +func LoadModule(dir string) (*Module, Diagnostics) { + + // For broad compatibility here we actually have two separate loader + // codepaths. The main one uses the new HCL parser and API and is intended + // for configurations from Terraform 0.12 onwards (though will work for + // many older configurations too), but we'll also fall back on one that + // uses the _old_ HCL implementation so we can deal with some edge-cases + // that are not valid in new HCL. + + module, diags := loadModule(dir) + if diags.HasErrors() { + // Try using the legacy HCL parser and see if we fare better. + legacyModule, legacyDiags := loadModuleLegacyHCL(dir) + if !legacyDiags.HasErrors() { + legacyModule.init(legacyDiags) + return legacyModule, legacyDiags + } + } + + module.init(diags) + return module, diags +} + +// IsModuleDir checks if the given path contains terraform configuration files. +// This allows the caller to decide how to handle directories that do not have tf files. +func IsModuleDir(dir string) bool { + primaryPaths, _ := dirFiles(dir) + if len(primaryPaths) == 0 { + return false + } + return true +} + +func (m *Module) init(diags Diagnostics) { + // Fill in any additional provider requirements that are implied by + // resource configurations, to avoid the caller from needing to apply + // this logic itself. Implied requirements don't have version constraints, + // but we'll make sure the requirement value is still non-nil in this + // case so callers can easily recognize it. + for _, r := range m.ManagedResources { + if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { + m.RequiredProviders[r.Provider.Name] = []string{} + } + } + for _, r := range m.DataResources { + if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { + m.RequiredProviders[r.Provider.Name] = []string{} + } + } + + // We redundantly also reference the diagnostics from inside the module + // object, primarily so that we can easily included in JSON-serialized + // versions of the module object. + m.Diagnostics = diags +} + +func dirFiles(dir string) (primary []string, diags hcl.Diagnostics) { + infos, err := ioutil.ReadDir(dir) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read module directory", + Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), + }) + return + } + + var override []string + for _, info := range infos { + if info.IsDir() { + // We only care about files + continue + } + + name := info.Name() + ext := fileExt(name) + if ext == "" || isIgnoredFile(name) { + continue + } + + baseName := name[:len(name)-len(ext)] // strip extension + isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") + + fullPath := filepath.Join(dir, name) + if isOverride { + override = append(override, fullPath) + } else { + primary = append(primary, fullPath) + } + } + + // We are assuming that any _override files will be logically named, + // and processing the files in alphabetical order. Primaries first, then overrides. + primary = append(primary, override...) + + return +} + +// fileExt returns the Terraform configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +// isIgnoredFile returns true if the given filename (which must not have a +// directory path ahead of it) should be ignored as e.g. an editor swap file. +func isIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go new file mode 100644 index 00000000000..72b5d4af908 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go @@ -0,0 +1,322 @@ +package tfconfig + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hclparse" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func loadModule(dir string) (*Module, Diagnostics) { + mod := newModule(dir) + primaryPaths, diags := dirFiles(dir) + + parser := hclparse.NewParser() + + for _, filename := range primaryPaths { + var file *hcl.File + var fileDiags hcl.Diagnostics + if strings.HasSuffix(filename, ".json") { + file, fileDiags = parser.ParseJSONFile(filename) + } else { + file, fileDiags = parser.ParseHCLFile(filename) + } + diags = append(diags, fileDiags...) + if file == nil { + continue + } + + content, _, contentDiags := file.Body.PartialContent(rootSchema) + diags = append(diags, contentDiags...) + + for _, block := range content.Blocks { + switch block.Type { + + case "terraform": + content, _, contentDiags := block.Body.PartialContent(terraformBlockSchema) + diags = append(diags, contentDiags...) + + if attr, defined := content.Attributes["required_version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredCore = append(mod.RequiredCore, version) + } + } + + for _, block := range content.Blocks { + // Our schema only allows required_providers here, so we + // assume that we'll only get that block type. + attrs, attrDiags := block.Body.JustAttributes() + diags = append(diags, attrDiags...) + + for name, attr := range attrs { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) + } + } + } + + case "variable": + content, _, contentDiags := block.Body.PartialContent(variableSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + v := &Variable{ + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + mod.Variables[name] = v + + if attr, defined := content.Attributes["type"]; defined { + // We handle this particular attribute in a somewhat-tricky way: + // since Terraform may evolve its type expression syntax in + // future versions, we don't want to be overly-strict in how + // we handle it here, and so we'll instead just take the raw + // source provided by the user, using the source location + // information in the expression object. + // + // However, older versions of Terraform expected the type + // to be a string containing a keyword, so we'll need to + // handle that as a special case first for backward compatibility. + + var typeExpr string + + var typeExprAsStr string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &typeExprAsStr) + if !valDiags.HasErrors() { + typeExpr = typeExprAsStr + } else { + + rng := attr.Expr.Range() + sourceFilename := rng.Filename + source, exists := parser.Sources()[sourceFilename] + if exists { + typeExpr = string(rng.SliceBytes(source)) + } else { + // This should never happen, so we'll just warn about it and leave the type unspecified. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Source code not available", + Detail: fmt.Sprintf("Source code is not available for the file %q, which declares the variable %q.", sourceFilename, name), + Subject: &block.DefRange, + }) + typeExpr = "" + } + + } + + v.Type = typeExpr + } + + if attr, defined := content.Attributes["description"]; defined { + var description string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) + diags = append(diags, valDiags...) + v.Description = description + } + + if attr, defined := content.Attributes["default"]; defined { + // To avoid the caller needing to deal with cty here, we'll + // use its JSON encoding to convert into an + // approximately-equivalent plain Go interface{} value + // to return. + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + if val.IsWhollyKnown() { // should only be false if there are errors in the input + valJSON, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + // Should never happen, since all possible known + // values have a JSON mapping. + panic(fmt.Errorf("failed to serialize default value as JSON: %s", err)) + } + var def interface{} + err = json.Unmarshal(valJSON, &def) + if err != nil { + // Again should never happen, because valJSON is + // guaranteed valid by ctyjson.Marshal. + panic(fmt.Errorf("failed to re-parse default value from JSON: %s", err)) + } + v.Default = def + } + } + + case "output": + + content, _, contentDiags := block.Body.PartialContent(outputSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + o := &Output{ + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + mod.Outputs[name] = o + + if attr, defined := content.Attributes["description"]; defined { + var description string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) + diags = append(diags, valDiags...) + o.Description = description + } + + case "provider": + + content, _, contentDiags := block.Body.PartialContent(providerConfigSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + + if attr, defined := content.Attributes["version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) + } + } + + // Even if there wasn't an explicit version required, we still + // need an entry in our map to signal the unversioned dependency. + if _, exists := mod.RequiredProviders[name]; !exists { + mod.RequiredProviders[name] = []string{} + } + + case "resource", "data": + + content, _, contentDiags := block.Body.PartialContent(resourceSchema) + diags = append(diags, contentDiags...) + + typeName := block.Labels[0] + name := block.Labels[1] + + r := &Resource{ + Type: typeName, + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + var resourcesMap map[string]*Resource + + switch block.Type { + case "resource": + r.Mode = ManagedResourceMode + resourcesMap = mod.ManagedResources + case "data": + r.Mode = DataResourceMode + resourcesMap = mod.DataResources + } + + key := r.MapKey() + + resourcesMap[key] = r + + if attr, defined := content.Attributes["provider"]; defined { + // New style here is to provide this as a naked traversal + // expression, but we also support quoted references for + // older configurations that predated this convention. + traversal, travDiags := hcl.AbsTraversalForExpr(attr.Expr) + if travDiags.HasErrors() { + traversal = nil // in case we got any partial results + + // Fall back on trying to parse as a string + var travStr string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &travStr) + if !valDiags.HasErrors() { + var strDiags hcl.Diagnostics + traversal, strDiags = hclsyntax.ParseTraversalAbs([]byte(travStr), "", hcl.Pos{}) + if strDiags.HasErrors() { + traversal = nil + } + } + } + + // If we get out here with a nil traversal then we didn't + // succeed in processing the input. + if len(traversal) > 0 { + providerName := traversal.RootName() + alias := "" + if len(traversal) > 1 { + if getAttr, ok := traversal[1].(hcl.TraverseAttr); ok { + alias = getAttr.Name + } + } + r.Provider = ProviderRef{ + Name: providerName, + Alias: alias, + } + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider reference", + Detail: "Provider argument requires a provider name followed by an optional alias, like \"aws.foo\".", + Subject: attr.Expr.Range().Ptr(), + }) + } + } else { + // If provider _isn't_ set then we'll infer it from the + // resource type. + r.Provider = ProviderRef{ + Name: resourceTypeDefaultProviderName(r.Type), + } + } + + case "module": + + content, _, contentDiags := block.Body.PartialContent(moduleCallSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + mc := &ModuleCall{ + Name: block.Labels[0], + Pos: sourcePosHCL(block.DefRange), + } + + // check if this is overriding an existing module + var origSource string + if origMod, exists := mod.ModuleCalls[name]; exists { + origSource = origMod.Source + } + + mod.ModuleCalls[name] = mc + + if attr, defined := content.Attributes["source"]; defined { + var source string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &source) + diags = append(diags, valDiags...) + mc.Source = source + } + + if mc.Source == "" { + mc.Source = origSource + } + + if attr, defined := content.Attributes["version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + mc.Version = version + } + + default: + // Should never happen because our cases above should be + // exhaustive for our schema. + panic(fmt.Errorf("unhandled block type %q", block.Type)) + } + } + } + + return mod, diagnosticsHCL(diags) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go new file mode 100644 index 00000000000..86ffdf11dd3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go @@ -0,0 +1,325 @@ +package tfconfig + +import ( + "io/ioutil" + "strings" + + legacyhcl "github.com/hashicorp/hcl" + legacyast "github.com/hashicorp/hcl/hcl/ast" +) + +func loadModuleLegacyHCL(dir string) (*Module, Diagnostics) { + // This implementation is intentionally more quick-and-dirty than the + // main loader. In particular, it doesn't bother to keep careful track + // of multiple error messages because we always fall back on returning + // the main parser's error message if our fallback parsing produces + // an error, and thus the errors here are not seen by the end-caller. + mod := newModule(dir) + + primaryPaths, diags := dirFiles(dir) + if diags.HasErrors() { + return mod, diagnosticsHCL(diags) + } + + for _, filename := range primaryPaths { + src, err := ioutil.ReadFile(filename) + if err != nil { + return mod, diagnosticsErrorf("Error reading %s: %s", filename, err) + } + + hclRoot, err := legacyhcl.Parse(string(src)) + if err != nil { + return mod, diagnosticsErrorf("Error parsing %s: %s", filename, err) + } + + list, ok := hclRoot.Node.(*legacyast.ObjectList) + if !ok { + return mod, diagnosticsErrorf("Error parsing %s: no root object", filename) + } + + for _, item := range list.Filter("terraform").Items { + if len(item.Keys) > 0 { + item = &legacyast.ObjectItem{ + Val: &legacyast.ObjectType{ + List: &legacyast.ObjectList{ + Items: []*legacyast.ObjectItem{item}, + }, + }, + } + } + + type TerraformBlock struct { + RequiredVersion string `hcl:"required_version"` + } + var block TerraformBlock + err = legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("terraform block: %s", err) + } + + if block.RequiredVersion != "" { + mod.RequiredCore = append(mod.RequiredCore, block.RequiredVersion) + } + } + + if vars := list.Filter("variable"); len(vars.Items) > 0 { + vars = vars.Children() + type VariableBlock struct { + Type string `hcl:"type"` + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + for _, item := range vars.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("variable block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block VariableBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid variable block at %s: %s", item.Pos(), err) + } + + // Clean up legacy HCL decoding ambiguity by unwrapping list of maps + if ms, ok := block.Default.([]map[string]interface{}); ok { + def := make(map[string]interface{}) + for _, m := range ms { + for k, v := range m { + def[k] = v + } + } + block.Default = def + } + + v := &Variable{ + Name: name, + Type: block.Type, + Description: block.Description, + Default: block.Default, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + if _, exists := mod.Variables[name]; exists { + return nil, diagnosticsErrorf("duplicate variable block for %q", name) + } + mod.Variables[name] = v + + } + } + + if outputs := list.Filter("output"); len(outputs.Items) > 0 { + outputs = outputs.Children() + type OutputBlock struct { + Description string + } + + for _, item := range outputs.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("output block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block OutputBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid output block at %s: %s", item.Pos(), err) + } + + o := &Output{ + Name: name, + Description: block.Description, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + if _, exists := mod.Outputs[name]; exists { + return nil, diagnosticsErrorf("duplicate output block for %q", name) + } + mod.Outputs[name] = o + } + } + + for _, blockType := range []string{"resource", "data"} { + if resources := list.Filter(blockType); len(resources.Items) > 0 { + resources = resources.Children() + type ResourceBlock struct { + Provider string + } + + for _, item := range resources.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 2) + + if len(item.Keys) != 2 { + return nil, diagnosticsErrorf("resource block at %s has wrong label count", item.Pos()) + } + + typeName := item.Keys[0].Token.Value().(string) + name := item.Keys[1].Token.Value().(string) + var mode ResourceMode + var rMap map[string]*Resource + switch blockType { + case "resource": + mode = ManagedResourceMode + rMap = mod.ManagedResources + case "data": + mode = DataResourceMode + rMap = mod.DataResources + } + + var block ResourceBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid resource block at %s: %s", item.Pos(), err) + } + + var providerName, providerAlias string + if dotPos := strings.IndexByte(block.Provider, '.'); dotPos != -1 { + providerName = block.Provider[:dotPos] + providerAlias = block.Provider[dotPos+1:] + } else { + providerName = block.Provider + } + if providerName == "" { + providerName = resourceTypeDefaultProviderName(typeName) + } + + r := &Resource{ + Mode: mode, + Type: typeName, + Name: name, + Provider: ProviderRef{ + Name: providerName, + Alias: providerAlias, + }, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + key := r.MapKey() + if _, exists := rMap[key]; exists { + return nil, diagnosticsErrorf("duplicate resource block for %q", key) + } + rMap[key] = r + } + } + + } + + if moduleCalls := list.Filter("module"); len(moduleCalls.Items) > 0 { + moduleCalls = moduleCalls.Children() + type ModuleBlock struct { + Source string + Version string + } + + for _, item := range moduleCalls.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("module block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block ModuleBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("module block at %s: %s", item.Pos(), err) + } + + mc := &ModuleCall{ + Name: name, + Source: block.Source, + Version: block.Version, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + // it's possible this module call is from an override file + if origMod, exists := mod.ModuleCalls[name]; exists { + if mc.Source == "" { + mc.Source = origMod.Source + } + } + mod.ModuleCalls[name] = mc + } + } + + if providerConfigs := list.Filter("provider"); len(providerConfigs.Items) > 0 { + providerConfigs = providerConfigs.Children() + type ProviderBlock struct { + Version string + } + + for _, item := range providerConfigs.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("provider block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block ProviderBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid provider block at %s: %s", item.Pos(), err) + } + + if block.Version != "" { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], block.Version) + } + + // Even if there wasn't an explicit version required, we still + // need an entry in our map to signal the unversioned dependency. + if _, exists := mod.RequiredProviders[name]; !exists { + mod.RequiredProviders[name] = []string{} + } + + } + } + } + + return mod, nil +} + +// unwrapLegacyHCLObjectKeysFromJSON cleans up an edge case that can occur when +// parsing JSON as input: if we're parsing JSON then directly nested +// items will show up as additional "keys". +// +// For objects that expect a fixed number of keys, this breaks the +// decoding process. This function unwraps the object into what it would've +// looked like if it came directly from HCL by specifying the number of keys +// you expect. +// +// Example: +// +// { "foo": { "baz": {} } } +// +// Will show up with Keys being: []string{"foo", "baz"} +// when we really just want the first two. This function will fix this. +func unwrapLegacyHCLObjectKeysFromJSON(item *legacyast.ObjectItem, depth int) { + if len(item.Keys) > depth && item.Keys[0].Token.JSON { + for len(item.Keys) > depth { + // Pop off the last key + n := len(item.Keys) + key := item.Keys[n-1] + item.Keys[n-1] = nil + item.Keys = item.Keys[:n-1] + + // Wrap our value in a list + item.Val = &legacyast.ObjectType{ + List: &legacyast.ObjectList{ + Items: []*legacyast.ObjectItem{ + &legacyast.ObjectItem{ + Keys: []*legacyast.ObjectKey{key}, + Val: item.Val, + }, + }, + }, + } + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go new file mode 100644 index 00000000000..65ddb230735 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go @@ -0,0 +1,35 @@ +package tfconfig + +// Module is the top-level type representing a parsed and processed Terraform +// module. +type Module struct { + // Path is the local filesystem directory where the module was loaded from. + Path string `json:"path"` + + Variables map[string]*Variable `json:"variables"` + Outputs map[string]*Output `json:"outputs"` + + RequiredCore []string `json:"required_core,omitempty"` + RequiredProviders map[string][]string `json:"required_providers"` + + ManagedResources map[string]*Resource `json:"managed_resources"` + DataResources map[string]*Resource `json:"data_resources"` + ModuleCalls map[string]*ModuleCall `json:"module_calls"` + + // Diagnostics records any errors and warnings that were detected during + // loading, primarily for inclusion in serialized forms of the module + // since this slice is also returned as a second argument from LoadModule. + Diagnostics Diagnostics `json:"diagnostics,omitempty"` +} + +func newModule(path string) *Module { + return &Module{ + Path: path, + Variables: make(map[string]*Variable), + Outputs: make(map[string]*Output), + RequiredProviders: make(map[string][]string), + ManagedResources: make(map[string]*Resource), + DataResources: make(map[string]*Resource), + ModuleCalls: make(map[string]*ModuleCall), + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go new file mode 100644 index 00000000000..5e1e05a7268 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go @@ -0,0 +1,11 @@ +package tfconfig + +// ModuleCall represents a "module" block within a module. That is, a +// declaration of a child module from inside its parent. +type ModuleCall struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go new file mode 100644 index 00000000000..890b25e694c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go @@ -0,0 +1,9 @@ +package tfconfig + +// Output represents a single output from a Terraform module. +type Output struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go new file mode 100644 index 00000000000..d924837785f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go @@ -0,0 +1,9 @@ +package tfconfig + +// ProviderRef is a reference to a provider configuration within a module. +// It represents the contents of a "provider" argument in a resource, or +// a value in the "providers" map for a module call. +type ProviderRef struct { + Name string `json:"name"` + Alias string `json:"alias,omitempty"` // Empty if the default provider configuration is referenced +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go new file mode 100644 index 00000000000..401c8fce97e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go @@ -0,0 +1,64 @@ +package tfconfig + +import ( + "fmt" + "strconv" + "strings" +) + +// Resource represents a single "resource" or "data" block within a module. +type Resource struct { + Mode ResourceMode `json:"mode"` + Type string `json:"type"` + Name string `json:"name"` + + Provider ProviderRef `json:"provider"` + + Pos SourcePos `json:"pos"` +} + +// MapKey returns a string that can be used to uniquely identify the receiver +// in a map[string]*Resource. +func (r *Resource) MapKey() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // should never happen + return fmt.Sprintf("[invalid_mode!].%s.%s", r.Type, r.Name) + } +} + +// ResourceMode represents the "mode" of a resource, which is used to +// distinguish between managed resources ("resource" blocks in config) and +// data resources ("data" blocks in config). +type ResourceMode rune + +const InvalidResourceMode ResourceMode = 0 +const ManagedResourceMode ResourceMode = 'M' +const DataResourceMode ResourceMode = 'D' + +func (m ResourceMode) String() string { + switch m { + case ManagedResourceMode: + return "managed" + case DataResourceMode: + return "data" + default: + return "" + } +} + +// MarshalJSON implements encoding/json.Marshaler. +func (m ResourceMode) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(m.String())), nil +} + +func resourceTypeDefaultProviderName(typeName string) string { + if underPos := strings.IndexByte(typeName, '_'); underPos != -1 { + return typeName[:underPos] + } + return typeName +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go new file mode 100644 index 00000000000..3af742ff710 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go @@ -0,0 +1,106 @@ +package tfconfig + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +var rootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + LabelNames: nil, + }, + { + Type: "variable", + LabelNames: []string{"name"}, + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + }, +} + +var terraformBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "required_providers", + }, + }, +} + +var providerConfigSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "version", + }, + { + Name: "alias", + }, + }, +} + +var variableSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "type", + }, + { + Name: "description", + }, + { + Name: "default", + }, + }, +} + +var outputSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + }, +} + +var moduleCallSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "source", + }, + { + Name: "version", + }, + { + Name: "providers", + }, + }, +} + +var resourceSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "provider", + }, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go new file mode 100644 index 00000000000..883914eb7b0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go @@ -0,0 +1,50 @@ +package tfconfig + +import ( + legacyhcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl2/hcl" +) + +// SourcePos is a pointer to a particular location in a source file. +// +// This type is embedded into other structs to allow callers to locate the +// definition of each described module element. The SourcePos of an element +// is usually the first line of its definition, although the definition can +// be a little "fuzzy" with JSON-based config files. +type SourcePos struct { + Filename string `json:"filename"` + Line int `json:"line"` +} + +func sourcePos(filename string, line int) SourcePos { + return SourcePos{ + Filename: filename, + Line: line, + } +} + +func sourcePosHCL(rng hcl.Range) SourcePos { + // We intentionally throw away the column information here because + // current and legacy HCL both disagree on the definition of a column + // and so a line-only reference is the best granularity we can do + // such that the result is consistent between both parsers. + return SourcePos{ + Filename: rng.Filename, + Line: rng.Start.Line, + } +} + +func sourcePosLegacyHCL(pos legacyhcltoken.Pos, filename string) SourcePos { + useFilename := pos.Filename + // We'll try to use the filename given in legacy HCL position, but + // in practice there's no way to actually get this populated via + // the HCL API so it's usually empty except in some specialized + // situations, such as positions in error objects. + if useFilename == "" { + useFilename = filename + } + return SourcePos{ + Filename: useFilename, + Line: pos.Line, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go new file mode 100644 index 00000000000..0f73fc995a8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go @@ -0,0 +1,16 @@ +package tfconfig + +// Variable represents a single variable from a Terraform module. +type Variable struct { + Name string `json:"name"` + Type string `json:"type,omitempty"` + Description string `json:"description,omitempty"` + + // Default is an approximate representation of the default value in + // the native Go type system. The conversion from the value given in + // configuration may be slightly lossy. Only values that can be + // serialized by json.Marshal will be included here. + Default interface{} `json:"default,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go new file mode 100644 index 00000000000..b09199953e8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go @@ -0,0 +1,72 @@ +package customdiff + +import ( + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// All returns a CustomizeDiffFunc that runs all of the given +// CustomizeDiffFuncs and returns all of the errors produced. +// +// If one function produces an error, functions after it are still run. +// If this is not desirable, use function Sequence instead. +// +// If multiple functions returns errors, the result is a multierror. +// +// For example: +// +// &schema.Resource{ +// // ... +// CustomizeDiff: customdiff.All( +// customdiff.ValidateChange("size", func (old, new, meta interface{}) error { +// // If we are increasing "size" then the new value must be +// // a multiple of the old value. +// if new.(int) <= old.(int) { +// return nil +// } +// if (new.(int) % old.(int)) != 0 { +// return fmt.Errorf("new size value must be an integer multiple of old value %d", old.(int)) +// } +// return nil +// }), +// customdiff.ForceNewIfChange("size", func (old, new, meta interface{}) bool { +// // "size" can only increase in-place, so we must create a new resource +// // if it is decreased. +// return new.(int) < old.(int) +// }), +// customdiff.ComputedIf("version_id", func (d *schema.ResourceDiff, meta interface{}) bool { +// // Any change to "content" causes a new "version_id" to be allocated. +// return d.HasChange("content") +// }), +// ), +// } +// +func All(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + var err error + for _, f := range funcs { + thisErr := f(d, meta) + if thisErr != nil { + err = multierror.Append(err, thisErr) + } + } + return err + } +} + +// Sequence returns a CustomizeDiffFunc that runs all of the given +// CustomizeDiffFuncs in sequence, stopping at the first one that returns +// an error and returning that error. +// +// If all functions succeed, the combined function also succeeds. +func Sequence(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + for _, f := range funcs { + err := f(d, meta) + if err != nil { + return err + } + } + return nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go new file mode 100644 index 00000000000..54ea5c40206 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go @@ -0,0 +1,16 @@ +package customdiff + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// ComputedIf returns a CustomizeDiffFunc that sets the given key's new value +// as computed if the given condition function returns true. +func ComputedIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + if f(d, meta) { + d.SetNewComputed(key) + } + return nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go new file mode 100644 index 00000000000..1d8e2bfd655 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go @@ -0,0 +1,60 @@ +package customdiff + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// ResourceConditionFunc is a function type that makes a boolean decision based +// on an entire resource diff. +type ResourceConditionFunc func(d *schema.ResourceDiff, meta interface{}) bool + +// ValueChangeConditionFunc is a function type that makes a boolean decision +// by comparing two values. +type ValueChangeConditionFunc func(old, new, meta interface{}) bool + +// ValueConditionFunc is a function type that makes a boolean decision based +// on a given value. +type ValueConditionFunc func(value, meta interface{}) bool + +// If returns a CustomizeDiffFunc that calls the given condition +// function and then calls the given CustomizeDiffFunc only if the condition +// function returns true. +// +// This can be used to include conditional customizations when composing +// customizations using All and Sequence, but should generally be used only in +// simple scenarios. Prefer directly writing a CustomizeDiffFunc containing +// a conditional branch if the given CustomizeDiffFunc is already a +// locally-defined function, since this avoids obscuring the control flow. +func If(cond ResourceConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + if cond(d, meta) { + return f(d, meta) + } + return nil + } +} + +// IfValueChange returns a CustomizeDiffFunc that calls the given condition +// function with the old and new values of the given key and then calls the +// given CustomizeDiffFunc only if the condition function returns true. +func IfValueChange(key string, cond ValueChangeConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange(key) + if cond(old, new, meta) { + return f(d, meta) + } + return nil + } +} + +// IfValue returns a CustomizeDiffFunc that calls the given condition +// function with the new values of the given key and then calls the +// given CustomizeDiffFunc only if the condition function returns true. +func IfValue(key string, cond ValueConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + if cond(d.Get(key), meta) { + return f(d, meta) + } + return nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go new file mode 100644 index 00000000000..c6ad1199cdc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go @@ -0,0 +1,11 @@ +// Package customdiff provides a set of reusable and composable functions +// to enable more "declarative" use of the CustomizeDiff mechanism available +// for resources in package helper/schema. +// +// The intent of these helpers is to make the intent of a set of diff +// customizations easier to see, rather than lost in a sea of Go function +// boilerplate. They should _not_ be used in situations where they _obscure_ +// intent, e.g. by over-using the composition functions where a single +// function containing normal Go control flow statements would be more +// straightforward. +package customdiff diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go new file mode 100644 index 00000000000..26afa8cb697 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go @@ -0,0 +1,40 @@ +package customdiff + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// ForceNewIf returns a CustomizeDiffFunc that flags the given key as +// requiring a new resource if the given condition function returns true. +// +// The return value of the condition function is ignored if the old and new +// values of the field compare equal, since no attribute diff is generated in +// that case. +func ForceNewIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + if f(d, meta) { + d.ForceNew(key) + } + return nil + } +} + +// ForceNewIfChange returns a CustomizeDiffFunc that flags the given key as +// requiring a new resource if the given condition function returns true. +// +// The return value of the condition function is ignored if the old and new +// values compare equal, since no attribute diff is generated in that case. +// +// This function is similar to ForceNewIf but provides the condition function +// only the old and new values of the given key, which leads to more compact +// and explicit code in the common case where the decision can be made with +// only the specific field value. +func ForceNewIfChange(key string, f ValueChangeConditionFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange(key) + if f(old, new, meta) { + d.ForceNew(key) + } + return nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go new file mode 100644 index 00000000000..0bc2c69505b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go @@ -0,0 +1,38 @@ +package customdiff + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// ValueChangeValidationFunc is a function type that validates the difference +// (or lack thereof) between two values, returning an error if the change +// is invalid. +type ValueChangeValidationFunc func(old, new, meta interface{}) error + +// ValueValidationFunc is a function type that validates a particular value, +// returning an error if the value is invalid. +type ValueValidationFunc func(value, meta interface{}) error + +// ValidateChange returns a CustomizeDiffFunc that applies the given validation +// function to the change for the given key, returning any error produced. +func ValidateChange(key string, f ValueChangeValidationFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange(key) + return f(old, new, meta) + } +} + +// ValidateValue returns a CustomizeDiffFunc that applies the given validation +// function to value of the given key, returning any error produced. +// +// This should generally not be used since it is functionally equivalent to +// a validation function applied directly to the schema attribute in question, +// but is provided for situations where composing multiple CustomizeDiffFuncs +// together makes intent clearer than spreading that validation across the +// schema. +func ValidateValue(key string, f ValueValidationFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + val := d.Get(key) + return f(val, meta) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/encryption/encryption.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/encryption/encryption.go new file mode 100644 index 00000000000..110ed18cd96 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/encryption/encryption.go @@ -0,0 +1,40 @@ +package encryption + +import ( + "encoding/base64" + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys" +) + +// RetrieveGPGKey returns the PGP key specified as the pgpKey parameter, or queries +// the public key from the keybase service if the parameter is a keybase username +// prefixed with the phrase "keybase:" +func RetrieveGPGKey(pgpKey string) (string, error) { + const keybasePrefix = "keybase:" + + encryptionKey := pgpKey + if strings.HasPrefix(pgpKey, keybasePrefix) { + publicKeys, err := pgpkeys.FetchKeybasePubkeys([]string{pgpKey}) + if err != nil { + return "", errwrap.Wrapf(fmt.Sprintf("Error retrieving Public Key for %s: {{err}}", pgpKey), err) + } + encryptionKey = publicKeys[pgpKey] + } + + return encryptionKey, nil +} + +// EncryptValue encrypts the given value with the given encryption key. Description +// should be set such that errors return a meaningful user-facing response. +func EncryptValue(encryptionKey, value, description string) (string, string, error) { + fingerprints, encryptedValue, err := + pgpkeys.EncryptShares([][]byte{[]byte(value)}, []string{encryptionKey}) + if err != nil { + return "", "", errwrap.Wrapf(fmt.Sprintf("Error encrypting %s: {{err}}", description), err) + } + + return fingerprints[0], base64.StdEncoding.EncodeToString(encryptedValue[0]), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go new file mode 100644 index 00000000000..6ccc5231834 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go @@ -0,0 +1,35 @@ +package hashcode + +import ( + "bytes" + "fmt" + "hash/crc32" +) + +// String hashes a string to a unique hashcode. +// +// crc32 returns a uint32, but for our use we need +// and non negative integer. Here we cast to an integer +// and invert it if the result is negative. +func String(s string) int { + v := int(crc32.ChecksumIEEE([]byte(s))) + if v >= 0 { + return v + } + if -v >= 0 { + return -v + } + // v == MinInt + return 0 +} + +// Strings hashes a list of strings to a unique hashcode. +func Strings(strings []string) string { + var buf bytes.Buffer + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return fmt.Sprintf("%d", String(buf.String())) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go new file mode 100644 index 00000000000..6bd92f77784 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go @@ -0,0 +1,100 @@ +package logging + +import ( + "io" + "io/ioutil" + "log" + "os" + "strings" + "syscall" + + "github.com/hashicorp/logutils" +) + +// These are the environmental variables that determine if we log, and if +// we log whether or not the log should go to a file. +const ( + EnvLog = "TF_LOG" // Set to True + EnvLogFile = "TF_LOG_PATH" // Set to a file +) + +var ValidLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} + +// LogOutput determines where we should send logs (if anywhere) and the log level. +func LogOutput() (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + if logPath := os.Getenv(EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: ValidLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// SetOutput checks for a log destination with LogOutput, and calls +// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses +// ioutil.Discard. Any error from LogOutout is fatal. +func SetOutput() { + out, err := LogOutput() + if err != nil { + log.Fatal(err) + } + + if out == nil { + out = ioutil.Discard + } + + log.SetOutput(out) +} + +// LogLevel returns the current log level string based the environment vars +func LogLevel() string { + envLevel := os.Getenv(EnvLog) + if envLevel == "" { + return "" + } + + logLevel := "TRACE" + if isValidLogLevel(envLevel) { + // allow following for better ux: info, Info or INFO + logLevel = strings.ToUpper(envLevel) + } else { + log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", + envLevel, ValidLevels) + } + + return logLevel +} + +// IsDebugOrHigher returns whether or not the current log level is debug or trace +func IsDebugOrHigher() bool { + level := string(LogLevel()) + return level == "DEBUG" || level == "TRACE" +} + +func isValidLogLevel(level string) bool { + for _, l := range ValidLevels { + if strings.ToUpper(level) == string(l) { + return true + } + } + + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go new file mode 100644 index 00000000000..bddabe647a9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go @@ -0,0 +1,70 @@ +package logging + +import ( + "bytes" + "encoding/json" + "log" + "net/http" + "net/http/httputil" + "strings" +) + +type transport struct { + name string + transport http.RoundTripper +} + +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + if IsDebugOrHigher() { + reqData, err := httputil.DumpRequestOut(req, true) + if err == nil { + log.Printf("[DEBUG] "+logReqMsg, t.name, prettyPrintJsonLines(reqData)) + } else { + log.Printf("[ERROR] %s API Request error: %#v", t.name, err) + } + } + + resp, err := t.transport.RoundTrip(req) + if err != nil { + return resp, err + } + + if IsDebugOrHigher() { + respData, err := httputil.DumpResponse(resp, true) + if err == nil { + log.Printf("[DEBUG] "+logRespMsg, t.name, prettyPrintJsonLines(respData)) + } else { + log.Printf("[ERROR] %s API Response error: %#v", t.name, err) + } + } + + return resp, nil +} + +func NewTransport(name string, t http.RoundTripper) *transport { + return &transport{name, t} +} + +// prettyPrintJsonLines iterates through a []byte line-by-line, +// transforming any lines that are complete json into pretty-printed json. +func prettyPrintJsonLines(b []byte) string { + parts := strings.Split(string(b), "\n") + for i, p := range parts { + if b := []byte(p); json.Valid(b) { + var out bytes.Buffer + json.Indent(&out, b, "", " ") + parts[i] = out.String() + } + } + return strings.Join(parts, "\n") +} + +const logReqMsg = `%s API Request Details: +---[ REQUEST ]--------------------------------------- +%s +-----------------------------------------------------` + +const logRespMsg = `%s API Response Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv/mutexkv.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv/mutexkv.go new file mode 100644 index 00000000000..6917f2142bd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv/mutexkv.go @@ -0,0 +1,51 @@ +package mutexkv + +import ( + "log" + "sync" +) + +// MutexKV is a simple key/value store for arbitrary mutexes. It can be used to +// serialize changes across arbitrary collaborators that share knowledge of the +// keys they must serialize on. +// +// The initial use case is to let aws_security_group_rule resources serialize +// their access to individual security groups based on SG ID. +type MutexKV struct { + lock sync.Mutex + store map[string]*sync.Mutex +} + +// Locks the mutex for the given key. Caller is responsible for calling Unlock +// for the same key +func (m *MutexKV) Lock(key string) { + log.Printf("[DEBUG] Locking %q", key) + m.get(key).Lock() + log.Printf("[DEBUG] Locked %q", key) +} + +// Unlock the mutex for the given key. Caller must have called Lock for the same key first +func (m *MutexKV) Unlock(key string) { + log.Printf("[DEBUG] Unlocking %q", key) + m.get(key).Unlock() + log.Printf("[DEBUG] Unlocked %q", key) +} + +// Returns a mutex for the given key, no guarantee of its lock status +func (m *MutexKV) get(key string) *sync.Mutex { + m.lock.Lock() + defer m.lock.Unlock() + mutex, ok := m.store[key] + if !ok { + mutex = &sync.Mutex{} + m.store[key] = mutex + } + return mutex +} + +// Returns a properly initalized MutexKV +func NewMutexKV() *MutexKV { + return &MutexKV{ + store: make(map[string]*sync.Mutex), + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go new file mode 100644 index 00000000000..7ee21614b9f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go @@ -0,0 +1,79 @@ +package resource + +import ( + "fmt" + "strings" + "time" +) + +type NotFoundError struct { + LastError error + LastRequest interface{} + LastResponse interface{} + Message string + Retries int +} + +func (e *NotFoundError) Error() string { + if e.Message != "" { + return e.Message + } + + if e.Retries > 0 { + return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) + } + + return "couldn't find resource" +} + +// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending +type UnexpectedStateError struct { + LastError error + State string + ExpectedState []string +} + +func (e *UnexpectedStateError) Error() string { + return fmt.Sprintf( + "unexpected state '%s', wanted target '%s'. last error: %s", + e.State, + strings.Join(e.ExpectedState, ", "), + e.LastError, + ) +} + +// TimeoutError is returned when WaitForState times out +type TimeoutError struct { + LastError error + LastState string + Timeout time.Duration + ExpectedState []string +} + +func (e *TimeoutError) Error() string { + expectedState := "resource to be gone" + if len(e.ExpectedState) > 0 { + expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) + } + + extraInfo := make([]string, 0) + if e.LastState != "" { + extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) + } + if e.Timeout > 0 { + extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) + } + + suffix := "" + if len(extraInfo) > 0 { + suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) + } + + if e.LastError != nil { + return fmt.Sprintf("timeout while waiting for %s%s: %s", + expectedState, suffix, e.LastError) + } + + return fmt.Sprintf("timeout while waiting for %s%s", + expectedState, suffix) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go new file mode 100644 index 00000000000..db12cee2021 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go @@ -0,0 +1,43 @@ +package resource + +import ( + "context" + "net" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + tfplugin "github.com/hashicorp/terraform-plugin-sdk/plugin" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC +// shim and starts it in a grpc server using an inmem connection. It returns a +// GRPCClient for this new server to test the shimmed resource provider. +func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { + listener := bufconn.Listen(256 * 1024) + grpcServer := grpc.NewServer() + + p := plugin.NewGRPCProviderServerShim(rp) + proto.RegisterProviderServer(grpcServer, p) + + go grpcServer.Serve(listener) + + conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { + return listener.Dial() + }), grpc.WithInsecure()) + if err != nil { + panic(err) + } + + var pp tfplugin.GRPCProviderPlugin + client, _ := pp.GRPCClient(context.Background(), nil, conn) + + grpcClient := client.(*tfplugin.GRPCProvider) + grpcClient.TestServer = grpcServer + + return grpcClient +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/id.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/id.go new file mode 100644 index 00000000000..44949550e73 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/id.go @@ -0,0 +1,45 @@ +package resource + +import ( + "fmt" + "strings" + "sync" + "time" +) + +const UniqueIdPrefix = `terraform-` + +// idCounter is a monotonic counter for generating ordered unique ids. +var idMutex sync.Mutex +var idCounter uint32 + +// Helper for a resource to generate a unique identifier w/ default prefix +func UniqueId() string { + return PrefixedUniqueId(UniqueIdPrefix) +} + +// UniqueIDSuffixLength is the string length of the suffix generated by +// PrefixedUniqueId. This can be used by length validation functions to +// ensure prefixes are the correct length for the target field. +const UniqueIDSuffixLength = 26 + +// Helper for a resource to generate a unique identifier w/ given prefix +// +// After the prefix, the ID consists of an incrementing 26 digit value (to match +// previous timestamp output). After the prefix, the ID consists of a timestamp +// and an incrementing 8 hex digit value The timestamp means that multiple IDs +// created with the same prefix will sort in the order of their creation, even +// across multiple terraform executions, as long as the clock is not turned back +// between calls, and as long as any given terraform execution generates fewer +// than 4 billion IDs. +func PrefixedUniqueId(prefix string) string { + // Be precise to 4 digits of fractional seconds, but remove the dot before the + // fractional seconds. + timestamp := strings.Replace( + time.Now().UTC().Format("20060102150405.0000"), ".", "", 1) + + idMutex.Lock() + defer idMutex.Unlock() + idCounter++ + return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go new file mode 100644 index 00000000000..02a993d6922 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go @@ -0,0 +1,140 @@ +package resource + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// Map is a map of resources that are supported, and provides helpers for +// more easily implementing a ResourceProvider. +type Map struct { + Mapping map[string]Resource +} + +func (m *Map) Validate( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := m.Mapping[t] + if !ok { + return nil, []error{fmt.Errorf("Unknown resource type: %s", t)} + } + + // If there is no validator set, then it is valid + if r.ConfigValidator == nil { + return nil, nil + } + + return r.ConfigValidator.Validate(c) +} + +// Apply performs a create or update depending on the diff, and calls +// the proper function on the matching Resource. +func (m *Map) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, error) { + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource if it is created + err := r.Destroy(s, meta) + if err != nil { + return s, err + } + + s.ID = "" + } + + // If we're only destroying, and not creating, then return now. + // Otherwise, we continue so that we can create a new resource. + if !d.RequiresNew() { + return nil, nil + } + } + + var result *terraform.InstanceState + var err error + if s.ID == "" { + result, err = r.Create(s, d, meta) + } else { + if r.Update == nil { + return s, fmt.Errorf( + "Resource type '%s' doesn't support update", + info.Type) + } + + result, err = r.Update(s, d, meta) + } + if result != nil { + if result.Attributes == nil { + result.Attributes = make(map[string]string) + } + + result.Attributes["id"] = result.ID + } + + return result, err +} + +// Diff performs a diff on the proper resource type. +func (m *Map) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + return r.Diff(s, c, meta) +} + +// Refresh performs a Refresh on the proper resource type. +// +// Refresh on the Resource won't be called if the state represents a +// non-created resource (ID is blank). +// +// An error is returned if the resource isn't registered. +func (m *Map) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the resource isn't created, don't refresh. + if s.ID == "" { + return s, nil + } + + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + return r.Refresh(s, meta) +} + +// Resources returns all the resources that are supported by this +// resource map and can be used to satisfy the Resources method of +// a ResourceProvider. +func (m *Map) Resources() []terraform.ResourceType { + ks := make([]string, 0, len(m.Mapping)) + for k, _ := range m.Mapping { + ks = append(ks, k) + } + sort.Strings(ks) + + rs := make([]terraform.ResourceType, 0, len(m.Mapping)) + for _, k := range ks { + rs = append(rs, terraform.ResourceType{ + Name: k, + }) + } + + return rs +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go new file mode 100644 index 00000000000..80782413b43 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go @@ -0,0 +1,49 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/helper/config" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +type Resource struct { + ConfigValidator *config.Validator + Create CreateFunc + Destroy DestroyFunc + Diff DiffFunc + Refresh RefreshFunc + Update UpdateFunc +} + +// CreateFunc is a function that creates a resource that didn't previously +// exist. +type CreateFunc func( + *terraform.InstanceState, + *terraform.InstanceDiff, + interface{}) (*terraform.InstanceState, error) + +// DestroyFunc is a function that destroys a resource that previously +// exists using the state. +type DestroyFunc func( + *terraform.InstanceState, + interface{}) error + +// DiffFunc is a function that performs a diff of a resource. +type DiffFunc func( + *terraform.InstanceState, + *terraform.ResourceConfig, + interface{}) (*terraform.InstanceDiff, error) + +// RefreshFunc is a function that performs a refresh of a specific type +// of resource. +type RefreshFunc func( + *terraform.InstanceState, + interface{}) (*terraform.InstanceState, error) + +// UpdateFunc is a function that is called to update a resource that +// previously existed. The difference between this and CreateFunc is that +// the diff is guaranteed to only contain attributes that don't require +// a new resource. +type UpdateFunc func( + *terraform.InstanceState, + *terraform.InstanceDiff, + interface{}) (*terraform.InstanceState, error) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go new file mode 100644 index 00000000000..88a839664c1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go @@ -0,0 +1,259 @@ +package resource + +import ( + "log" + "time" +) + +var refreshGracePeriod = 30 * time.Second + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an EC2 instance after refreshing +// it. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +type StateRefreshFunc func() (result interface{}, state string, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +type StateChangeConf struct { + Delay time.Duration // Wait this time before starting checks + Pending []string // States that are "allowed" and will continue trying + Refresh StateRefreshFunc // Refreshes the current state + Target []string // Target state + Timeout time.Duration // The amount of time to wait before timeout + MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often + NotFoundChecks int // Number of times to allow not found + + // This is to work around inconsistent APIs + ContinuousTargetOccurence int // Number of times the Target state has to occur continuously +} + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// If the Refresh function returns an error, exit immediately with that error. +// +// If the Refresh function returns a state other than the Target state or one +// listed in Pending, return immediately with an error. +// +// If the Timeout is exceeded before reaching the Target state, return an +// error. +// +// Otherwise, the result is the result of the first call to the Refresh function to +// reach the target state. +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) + + notfoundTick := 0 + targetOccurence := 0 + + // Set a default for times to check for not found + if conf.NotFoundChecks == 0 { + conf.NotFoundChecks = 20 + } + + if conf.ContinuousTargetOccurence == 0 { + conf.ContinuousTargetOccurence = 1 + } + + type Result struct { + Result interface{} + State string + Error error + Done bool + } + + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + + go func() { + defer close(resCh) + + time.Sleep(conf.Delay) + + // start with 0 delay for the first loop + var wait time.Duration + + for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + + res, currentState, err := conf.Refresh() + result = Result{ + Result: res, + State: currentState, + Error: err, + } + + if err != nil { + resCh <- result + return + } + + // If we're waiting for the absence of a thing, then return + if res == nil && len(conf.Target) == 0 { + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + + if res == nil { + // If we didn't find the resource, check if we have been + // not finding it for awhile, and if so, report an error. + notfoundTick++ + if notfoundTick > conf.NotFoundChecks { + result.Error = &NotFoundError{ + LastError: err, + Retries: notfoundTick, + } + resCh <- result + return + } + } else { + // Reset the counter for when a resource isn't found + notfoundTick = 0 + found := false + + for _, allowed := range conf.Target { + if currentState == allowed { + found = true + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + } + + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + targetOccurence = 0 + break + } + } + + if !found && len(conf.Pending) > 0 { + result.Error = &UnexpectedStateError{ + LastError: err, + State: result.State, + ExpectedState: conf.Target, + } + resCh <- result + return + } + } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + + // If a poll interval has been specified, choose that interval. + // Otherwise bound the default value. + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } + } + + log.Printf("[TRACE] Waiting %s before next try", wait) + } + }() + + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go new file mode 100644 index 00000000000..1e322593321 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go @@ -0,0 +1,188 @@ +package resource + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests +func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { + state := terraform.NewState() + + // in the odd case of a nil state, let the helper packages handle it + if newState == nil { + return nil, nil + } + + for _, newMod := range newState.Modules { + mod := state.AddModule(newMod.Addr) + + for name, out := range newMod.OutputValues { + outputType := "" + val := hcl2shim.ConfigValueFromHCL2(out.Value) + ty := out.Value.Type() + switch { + case ty == cty.String: + outputType = "string" + case ty.IsTupleType() || ty.IsListType(): + outputType = "list" + case ty.IsMapType(): + outputType = "map" + } + + mod.Outputs[name] = &terraform.OutputState{ + Type: outputType, + Value: val, + Sensitive: out.Sensitive, + } + } + + for _, res := range newMod.Resources { + resType := res.Addr.Type + providerType := res.ProviderConfig.ProviderConfig.Type + + resource := getResource(providers, providerType, res.Addr) + + for key, i := range res.Instances { + resState := &terraform.ResourceState{ + Type: resType, + Provider: res.ProviderConfig.String(), + } + + // We should always have a Current instance here, but be safe about checking. + if i.Current != nil { + flatmap, err := shimmedAttributes(i.Current, resource) + if err != nil { + return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) + } + + var meta map[string]interface{} + if i.Current.Private != nil { + err := json.Unmarshal(i.Current.Private, &meta) + if err != nil { + return nil, err + } + } + + resState.Primary = &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: i.Current.Status == states.ObjectTainted, + Meta: meta, + } + + if i.Current.SchemaVersion != 0 { + if resState.Primary.Meta == nil { + resState.Primary.Meta = map[string]interface{}{} + } + resState.Primary.Meta["schema_version"] = i.Current.SchemaVersion + } + + for _, dep := range i.Current.Dependencies { + resState.Dependencies = append(resState.Dependencies, dep.String()) + } + + // convert the indexes to the old style flapmap indexes + idx := "" + switch key.(type) { + case addrs.IntKey: + // don't add numeric index values to resources with a count of 0 + if len(res.Instances) > 1 { + idx = fmt.Sprintf(".%d", key) + } + case addrs.StringKey: + idx = "." + key.String() + } + + mod.Resources[res.Addr.String()+idx] = resState + } + + // add any deposed instances + for _, dep := range i.Deposed { + flatmap, err := shimmedAttributes(dep, resource) + if err != nil { + return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) + } + + var meta map[string]interface{} + if dep.Private != nil { + err := json.Unmarshal(dep.Private, &meta) + if err != nil { + return nil, err + } + } + + deposed := &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: dep.Status == states.ObjectTainted, + Meta: meta, + } + if dep.SchemaVersion != 0 { + deposed.Meta = map[string]interface{}{ + "schema_version": dep.SchemaVersion, + } + } + + resState.Deposed = append(resState.Deposed, deposed) + } + } + } + } + + return state, nil +} + +func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { + p := providers[providerName] + if p == nil { + panic(fmt.Sprintf("provider %q not found in test step", providerName)) + } + + // this is only for tests, so should only see schema.Providers + provider := p.(*schema.Provider) + + switch addr.Mode { + case addrs.ManagedResourceMode: + resource := provider.ResourcesMap[addr.Type] + if resource != nil { + return resource + } + case addrs.DataResourceMode: + resource := provider.DataSourcesMap[addr.Type] + if resource != nil { + return resource + } + } + + panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) +} + +func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { + flatmap := instance.AttrsFlat + if flatmap != nil { + return flatmap, nil + } + + // if we have json attrs, they need to be decoded + rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) + if err != nil { + return nil, err + } + + instanceState, err := res.ShimInstanceStateFromValue(rio.Value) + if err != nil { + return nil, err + } + + return instanceState.Attributes, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go new file mode 100644 index 00000000000..eb0b58c220e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go @@ -0,0 +1,1367 @@ +package resource + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" + "syscall" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/logutils" + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform-plugin-sdk/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/command/format" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload" + "github.com/hashicorp/terraform-plugin-sdk/internal/initwd" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// flagSweep is a flag available when running tests on the command line. It +// contains a comma seperated list of regions to for the sweeper functions to +// run in. This flag bypasses the normal Test path and instead runs functions designed to +// clean up any leaked resources a testing environment could have created. It is +// a best effort attempt, and relies on Provider authors to implement "Sweeper" +// methods for resources. + +// Adding Sweeper methods with AddTestSweepers will +// construct a list of sweeper funcs to be called here. We iterate through +// regions provided by the sweep flag, and for each region we iterate through the +// tests, and exit on any errors. At time of writing, sweepers are ran +// sequentially, however they can list dependencies to be ran first. We track +// the sweepers that have been ran, so as to not run a sweeper twice for a given +// region. +// +// WARNING: +// Sweepers are designed to be destructive. You should not use the -sweep flag +// in any environment that is not strictly a test environment. Resources will be +// destroyed. + +var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") +var flagSweepAllowFailures = flag.Bool("sweep-allow-failures", false, "Enable to allow Sweeper Tests to continue after failures") +var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") +var sweeperFuncs map[string]*Sweeper + +// type SweeperFunc is a signature for a function that acts as a sweeper. It +// accepts a string for the region that the sweeper is to be ran in. This +// function must be able to construct a valid client for that region. +type SweeperFunc func(r string) error + +type Sweeper struct { + // Name for sweeper. Must be unique to be ran by the Sweeper Runner + Name string + + // Dependencies list the const names of other Sweeper functions that must be ran + // prior to running this Sweeper. This is an ordered list that will be invoked + // recursively at the helper/resource level + Dependencies []string + + // Sweeper function that when invoked sweeps the Provider of specific + // resources + F SweeperFunc +} + +func init() { + sweeperFuncs = make(map[string]*Sweeper) +} + +// AddTestSweepers function adds a given name and Sweeper configuration +// pair to the internal sweeperFuncs map. Invoke this function to register a +// resource sweeper to be available for running when the -sweep flag is used +// with `go test`. Sweeper names must be unique to help ensure a given sweeper +// is only ran once per run. +func AddTestSweepers(name string, s *Sweeper) { + if _, ok := sweeperFuncs[name]; ok { + log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) + } + + sweeperFuncs[name] = s +} + +func TestMain(m *testing.M) { + flag.Parse() + if *flagSweep != "" { + // parse flagSweep contents for regions to run + regions := strings.Split(*flagSweep, ",") + + // get filtered list of sweepers to run based on sweep-run flag + sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) + + if _, err := runSweepers(regions, sweepers, *flagSweepAllowFailures); err != nil { + os.Exit(1) + } + } else { + os.Exit(m.Run()) + } +} + +func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures bool) (map[string]map[string]error, error) { + var sweeperErrorFound bool + sweeperRunList := make(map[string]map[string]error) + + for _, region := range regions { + region = strings.TrimSpace(region) + + var regionSweeperErrorFound bool + regionSweeperRunList := make(map[string]error) + + log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) + for _, sweeper := range sweepers { + if err := runSweeperWithRegion(region, sweeper, sweepers, regionSweeperRunList, allowFailures); err != nil { + if allowFailures { + continue + } + + sweeperRunList[region] = regionSweeperRunList + return sweeperRunList, fmt.Errorf("sweeper (%s) for region (%s) failed: %s", sweeper.Name, region, err) + } + } + + log.Printf("Sweeper Tests ran successfully:\n") + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr == nil { + fmt.Printf("\t- %s\n", sweeper) + } else { + regionSweeperErrorFound = true + } + } + + if regionSweeperErrorFound { + sweeperErrorFound = true + log.Printf("Sweeper Tests ran unsuccessfully:\n") + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr != nil { + fmt.Printf("\t- %s: %s\n", sweeper, sweeperErr) + } + } + } + + sweeperRunList[region] = regionSweeperRunList + } + + if sweeperErrorFound { + return sweeperRunList, errors.New("at least one sweeper failed") + } + + return sweeperRunList, nil +} + +// filterSweepers takes a comma seperated string listing the names of sweepers +// to be ran, and returns a filtered set from the list of all of sweepers to +// run based on the names given. +func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { + filterSlice := strings.Split(strings.ToLower(f), ",") + if len(filterSlice) == 1 && filterSlice[0] == "" { + // if the filter slice is a single element of "" then no sweeper list was + // given, so just return the full list + return source + } + + sweepers := make(map[string]*Sweeper) + for name, sweeper := range source { + for _, s := range filterSlice { + if strings.Contains(strings.ToLower(name), s) { + sweepers[name] = sweeper + } + } + } + return sweepers +} + +// runSweeperWithRegion recieves a sweeper and a region, and recursively calls +// itself with that region for every dependency found for that sweeper. If there +// are no dependencies, invoke the contained sweeper fun with the region, and +// add the success/fail status to the sweeperRunList. +func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweeper, sweeperRunList map[string]error, allowFailures bool) error { + for _, dep := range s.Dependencies { + if depSweeper, ok := sweepers[dep]; ok { + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) + err := runSweeperWithRegion(region, depSweeper, sweepers, sweeperRunList, allowFailures) + + if err != nil { + if allowFailures { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", depSweeper.Name, region, err) + continue + } + + return err + } + } else { + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + } + } + + if _, ok := sweeperRunList[s.Name]; ok { + log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) + return nil + } + + log.Printf("[DEBUG] Running Sweeper (%s) in region (%s)", s.Name, region) + + runE := s.F(region) + + sweeperRunList[s.Name] = runE + + if runE != nil { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", s.Name, region, runE) + } + + return runE +} + +const TestEnvVar = "TF_ACC" + +// TestProvider can be implemented by any ResourceProvider to provide custom +// reset functionality at the start of an acceptance test. +// The helper/schema Provider implements this interface. +type TestProvider interface { + TestReset() error +} + +// TestCheckFunc is the callback type used with acceptance tests to check +// the state of a resource. The state passed in is the latest state known, +// or in the case of being after a destroy, it is the last known state when +// it was created. +type TestCheckFunc func(*terraform.State) error + +// ImportStateCheckFunc is the check function for ImportState tests +type ImportStateCheckFunc func([]*terraform.InstanceState) error + +// ImportStateIdFunc is an ID generation function to help with complex ID +// generation for ImportState tests. +type ImportStateIdFunc func(*terraform.State) (string, error) + +// TestCase is a single acceptance test case used to test the apply/destroy +// lifecycle of a resource in a specific configuration. +// +// When the destroy plan is executed, the config from the last TestStep +// is used to plan it. +type TestCase struct { + // IsUnitTest allows a test to run regardless of the TF_ACC + // environment variable. This should be used with care - only for + // fast tests on local resources (e.g. remote state with a local + // backend) but can be used to increase confidence in correct + // operation of Terraform without waiting for a full acctest run. + IsUnitTest bool + + // PreCheck, if non-nil, will be called before any test steps are + // executed. It will only be executed in the case that the steps + // would run, so it can be used for some validation before running + // acceptance tests, such as verifying that keys are setup. + PreCheck func() + + // Providers is the ResourceProvider that will be under test. + // + // Alternately, ProviderFactories can be specified for the providers + // that are valid. This takes priority over Providers. + // + // The end effect of each is the same: specifying the providers that + // are used within the tests. + Providers map[string]terraform.ResourceProvider + ProviderFactories map[string]terraform.ResourceProviderFactory + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // CheckDestroy is called after the resource is finally destroyed + // to allow the tester to test that the resource is truly gone. + CheckDestroy TestCheckFunc + + // Steps are the apply sequences done within the context of the + // same state. Each step can have its own check to verify correctness. + Steps []TestStep + + // The settings below control the "ID-only refresh test." This is + // an enabled-by-default test that tests that a refresh can be + // refreshed with only an ID to result in the same attributes. + // This validates completeness of Refresh. + // + // IDRefreshName is the name of the resource to check. This will + // default to the first non-nil primary resource in the state. + // + // IDRefreshIgnore is a list of configuration keys that will be ignored. + IDRefreshName string + IDRefreshIgnore []string +} + +// TestStep is a single apply sequence of a test, done within the +// context of a state. +// +// Multiple TestSteps can be sequenced in a Test to allow testing +// potentially complex update logic. In general, simply create/destroy +// tests will only need one step. +type TestStep struct { + // ResourceName should be set to the name of the resource + // that is being tested. Example: "aws_instance.foo". Various test + // modes use this to auto-detect state information. + // + // This is only required if the test mode settings below say it is + // for the mode you're using. + ResourceName string + + // PreConfig is called before the Config is applied to perform any per-step + // setup that needs to happen. This is called regardless of "test mode" + // below. + PreConfig func() + + // Taint is a list of resource addresses to taint prior to the execution of + // the step. Be sure to only include this at a step where the referenced + // address will be present in state, as it will fail the test if the resource + // is missing. + // + // This option is ignored on ImportState tests, and currently only works for + // resources in the root module path. + Taint []string + + //--------------------------------------------------------------- + // Test modes. One of the following groups of settings must be + // set to determine what the test step will do. Ideally we would've + // used Go interfaces here but there are now hundreds of tests we don't + // want to re-type so instead we just determine which step logic + // to run based on what settings below are set. + //--------------------------------------------------------------- + + //--------------------------------------------------------------- + // Plan, Apply testing + //--------------------------------------------------------------- + + // Config a string of the configuration to give to Terraform. If this + // is set, then the TestCase will execute this step with the same logic + // as a `terraform apply`. + Config string + + // Check is called after the Config is applied. Use this step to + // make your own API calls to check the status of things, and to + // inspect the format of the ResourceState itself. + // + // If an error is returned, the test will fail. In this case, a + // destroy plan will still be attempted. + // + // If this is nil, no check is done on this step. + Check TestCheckFunc + + // Destroy will create a destroy plan if set to true. + Destroy bool + + // ExpectNonEmptyPlan can be set to true for specific types of tests that are + // looking to verify that a diff occurs + ExpectNonEmptyPlan bool + + // ExpectError allows the construction of test cases that we expect to fail + // with an error. The specified regexp must match against the error for the + // test to pass. + ExpectError *regexp.Regexp + + // PlanOnly can be set to only run `plan` with this configuration, and not + // actually apply it. This is useful for ensuring config changes result in + // no-op plans + PlanOnly bool + + // PreventDiskCleanup can be set to true for testing terraform modules which + // require access to disk at runtime. Note that this will leave files in the + // temp folder + PreventDiskCleanup bool + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // SkipFunc is called before applying config, but after PreConfig + // This is useful for defining test steps with platform-dependent checks + SkipFunc func() (bool, error) + + //--------------------------------------------------------------- + // ImportState testing + //--------------------------------------------------------------- + + // ImportState, if true, will test the functionality of ImportState + // by importing the resource with ResourceName (must be set) and the + // ID of that resource. + ImportState bool + + // ImportStateId is the ID to perform an ImportState operation with. + // This is optional. If it isn't set, then the resource ID is automatically + // determined by inspecting the state for ResourceName's ID. + ImportStateId string + + // ImportStateIdPrefix is the prefix added in front of ImportStateId. + // This can be useful in complex import cases, where more than one + // attribute needs to be passed on as the Import ID. Mainly in cases + // where the ID is not known, and a known prefix needs to be added to + // the unset ImportStateId field. + ImportStateIdPrefix string + + // ImportStateIdFunc is a function that can be used to dynamically generate + // the ID for the ImportState tests. It is sent the state, which can be + // checked to derive the attributes necessary and generate the string in the + // desired format. + ImportStateIdFunc ImportStateIdFunc + + // ImportStateCheck checks the results of ImportState. It should be + // used to verify that the resulting value of ImportState has the + // proper resources, IDs, and attributes. + ImportStateCheck ImportStateCheckFunc + + // ImportStateVerify, if true, will also check that the state values + // that are finally put into the state after import match for all the + // IDs returned by the Import. Note that this checks for strict equality + // and does not respect DiffSuppressFunc or CustomizeDiff. + // + // ImportStateVerifyIgnore is a list of prefixes of fields that should + // not be verified to be equal. These can be set to ephemeral fields or + // fields that can't be refreshed and don't matter. + ImportStateVerify bool + ImportStateVerifyIgnore []string + + // provider s is used internally to maintain a reference to the + // underlying providers during the tests + providers map[string]terraform.ResourceProvider +} + +// Set to a file mask in sprintf format where %s is test name +const EnvLogPathMask = "TF_LOG_PATH_MASK" + +func LogOutput(t TestT) (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := logging.LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + + if logPath := os.Getenv(logging.EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + + logPath := fmt.Sprintf(logPathMask, testName) + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: logging.ValidLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// ParallelTest performs an acceptance test on a resource, allowing concurrency +// with other ParallelTest. +// +// Tests will fail if they do not properly handle conditions to allow multiple +// tests to occur against the same resource or service (e.g. random naming). +// All other requirements of the Test function also apply to this function. +func ParallelTest(t TestT, c TestCase) { + t.Parallel() + Test(t, c) +} + +// Test performs an acceptance test on a resource. +// +// Tests are not run unless an environmental variable "TF_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +func Test(t TestT, c TestCase) { + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. You can opt out + // of this with OverrideEnvVar on individual TestCases. + if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", + TestEnvVar)) + return + } + + logWriter, err := LogOutput(t) + if err != nil { + t.Error(fmt.Errorf("error setting up logging: %s", err)) + } + log.SetOutput(logWriter) + + // We require verbose mode so that the user knows what is going on. + if !testTesting && !testing.Verbose() && !c.IsUnitTest { + t.Fatal("Acceptance tests must be run with the -v flag on tests") + return + } + + // Run the PreCheck if we have it + if c.PreCheck != nil { + c.PreCheck() + } + + // get instances of all providers, so we can use the individual + // resources to shim the state during the tests. + providers := make(map[string]terraform.ResourceProvider) + for name, pf := range testProviderFactories(c) { + p, err := pf() + if err != nil { + t.Fatal(err) + } + providers[name] = p + } + + providerResolver, err := testProviderResolver(c) + if err != nil { + t.Fatal(err) + } + + opts := terraform.ContextOpts{ProviderResolver: providerResolver} + + // A single state variable to track the lifecycle, starting with no state + var state *terraform.State + + // Go through each step and run it + var idRefreshCheck *terraform.ResourceState + idRefresh := c.IDRefreshName != "" + errored := false + for i, step := range c.Steps { + // insert the providers into the step so we can get the resources for + // shimming the state + step.providers = providers + + var err error + log.Printf("[DEBUG] Test: Executing step %d", i) + + if step.SkipFunc != nil { + skip, err := step.SkipFunc() + if err != nil { + t.Fatal(err) + } + if skip { + log.Printf("[WARN] Skipping step %d", i) + continue + } + } + + if step.Config == "" && !step.ImportState { + err = fmt.Errorf( + "unknown test mode for step. Please see TestStep docs\n\n%#v", + step) + } else { + if step.ImportState { + if step.Config == "" { + step.Config = testProviderConfig(c) + } + + // Can optionally set step.Config in addition to + // step.ImportState, to provide config for the import. + state, err = testStepImportState(opts, state, step) + } else { + state, err = testStepConfig(opts, state, step) + } + } + + // If we expected an error, but did not get one, fail + if err == nil && step.ExpectError != nil { + errored = true + t.Error(fmt.Sprintf( + "Step %d, no error received, but expected a match to:\n\n%s\n\n", + i, step.ExpectError)) + break + } + + // If there was an error, exit + if err != nil { + // Perhaps we expected an error? Check if it matches + if step.ExpectError != nil { + if !step.ExpectError.MatchString(err.Error()) { + errored = true + t.Error(fmt.Sprintf( + "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", + i, err, step.ExpectError)) + break + } + } else { + errored = true + t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) + break + } + } + + // If we've never checked an id-only refresh and our state isn't + // empty, find the first resource and test it. + if idRefresh && idRefreshCheck == nil && !state.Empty() { + // Find the first non-nil resource in the state + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.IDRefreshName]; ok { + idRefreshCheck = v + } + + break + } + } + + // If we have an instance to check for refreshes, do it + // immediately. We do it in the middle of another test + // because it shouldn't affect the overall state (refresh + // is read-only semantically) and we want to fail early if + // this fails. If refresh isn't read-only, then this will have + // caught a different bug. + if idRefreshCheck != nil { + log.Printf( + "[WARN] Test: Running ID-only refresh check on %s", + idRefreshCheck.Primary.ID) + if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { + log.Printf("[ERROR] Test: ID-only test failed: %s", err) + t.Error(fmt.Sprintf( + "[ERROR] Test: ID-only test failed: %s", err)) + break + } + } + } + } + + // If we never checked an id-only refresh, it is a failure. + if idRefresh { + if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { + t.Error("ID-only refresh check never ran.") + } + } + + // If we have a state, then run the destroy + if state != nil { + lastStep := c.Steps[len(c.Steps)-1] + destroyStep := TestStep{ + Config: lastStep.Config, + Check: c.CheckDestroy, + Destroy: true, + PreventDiskCleanup: lastStep.PreventDiskCleanup, + PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, + providers: providers, + } + + log.Printf("[WARN] Test: Executing destroy step") + state, err := testStep(opts, state, destroyStep) + if err != nil { + t.Error(fmt.Sprintf( + "Error destroying resource! WARNING: Dangling resources\n"+ + "may exist. The full state and error is shown below.\n\n"+ + "Error: %s\n\nState: %s", + err, + state)) + } + } else { + log.Printf("[WARN] Skipping destroy test since there is no state.") + } +} + +// testProviderConfig takes the list of Providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func testProviderConfig(c TestCase) string { + var lines []string + for p := range c.Providers { + lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) + } + + return strings.Join(lines, "") +} + +// testProviderFactories combines the fixed Providers and +// ResourceProviderFactory functions into a single map of +// ResourceProviderFactory functions. +func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory { + ctxProviders := make(map[string]terraform.ResourceProviderFactory) + for k, pf := range c.ProviderFactories { + ctxProviders[k] = pf + } + + // add any fixed providers + for k, p := range c.Providers { + ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) + } + return ctxProviders +} + +// testProviderResolver is a helper to build a ResourceProviderResolver +// with pre instantiated ResourceProviders, so that we can reset them for the +// test, while only calling the factory function once. +// Any errors are stored so that they can be returned by the factory in +// terraform to match non-test behavior. +func testProviderResolver(c TestCase) (providers.Resolver, error) { + ctxProviders := testProviderFactories(c) + + // wrap the old provider factories in the test grpc server so they can be + // called from terraform. + newProviders := make(map[string]providers.Factory) + + for k, pf := range ctxProviders { + factory := pf // must copy to ensure each closure sees its own value + newProviders[k] = func() (providers.Interface, error) { + p, err := factory() + if err != nil { + return nil, err + } + + // The provider is wrapped in a GRPCTestProvider so that it can be + // passed back to terraform core as a providers.Interface, rather + // than the legacy ResourceProvider. + return GRPCTestProvider(p), nil + } + } + + return providers.ResolverFixed(newProviders), nil +} + +// UnitTest is a helper to force the acceptance testing harness to run in the +// normal unit test suite. This should only be used for resource that don't +// have any external dependencies. +func UnitTest(t TestT, c TestCase) { + c.IsUnitTest = true + Test(t, c) +} + +func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { + // TODO: We guard by this right now so master doesn't explode. We + // need to remove this eventually to make this part of the normal tests. + if os.Getenv("TF_ACC_IDONLY") == "" { + return nil + } + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: r.Type, + Name: "foo", + }.Instance(addrs.NoKey) + absAddr := addr.Absolute(addrs.RootModuleInstance) + + // Build the state. The state is just the resource with an ID. There + // are no attributes. We only set what is needed to perform a refresh. + state := states.NewState() + state.RootModule().SetResourceInstanceCurrent( + addr, + &states.ResourceInstanceObjectSrc{ + AttrsFlat: r.Primary.Attributes, + Status: states.ObjectReady, + }, + addrs.ProviderConfig{Type: "placeholder"}.Absolute(addrs.RootModuleInstance), + ) + + // Create the config module. We use the full config because Refresh + // doesn't have access to it and we may need things like provider + // configurations. The initial implementation of id-only checks used + // an empty config module, but that caused the aforementioned problems. + cfg, err := testConfig(opts, step) + if err != nil { + return err + } + + // Initialize the context + opts.Config = cfg + opts.State = state + ctx, ctxDiags := terraform.NewContext(&opts) + if ctxDiags.HasErrors() { + return ctxDiags.Err() + } + if diags := ctx.Validate(); len(diags) > 0 { + if diags.HasErrors() { + return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) + } + + log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) + } + + // Refresh! + state, refreshDiags := ctx.Refresh() + if refreshDiags.HasErrors() { + return refreshDiags.Err() + } + + // Verify attribute equivalence. + actualR := state.ResourceInstance(absAddr) + if actualR == nil { + return fmt.Errorf("Resource gone!") + } + if actualR.Current == nil { + return fmt.Errorf("Resource has no primary instance") + } + actual := actualR.Current.AttrsFlat + expected := r.Primary.Attributes + // Remove fields we're ignoring + for _, v := range c.IDRefreshIgnore { + for k, _ := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k, _ := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return fmt.Errorf( + "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + + return nil +} + +func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { + if step.PreConfig != nil { + step.PreConfig() + } + + cfgPath, err := ioutil.TempDir("", "tf-test") + if err != nil { + return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) + } + + if step.PreventDiskCleanup { + log.Printf("[INFO] Skipping defer os.RemoveAll call") + } else { + defer os.RemoveAll(cfgPath) + } + + // Write the main configuration file + err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) + if err != nil { + return nil, fmt.Errorf("Error creating temporary file for config: %s", err) + } + + // Create directory for our child modules, if any. + modulesDir := filepath.Join(cfgPath, ".modules") + err = os.Mkdir(modulesDir, os.ModePerm) + if err != nil { + return nil, fmt.Errorf("Error creating child modules directory: %s", err) + } + + inst := initwd.NewModuleInstaller(modulesDir, nil) + _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) + if installDiags.HasErrors() { + return nil, installDiags.Err() + } + + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) + if err != nil { + return nil, fmt.Errorf("failed to create config loader: %s", err) + } + + config, configDiags := loader.LoadConfig(cfgPath) + if configDiags.HasErrors() { + return nil, configDiags + } + + return config, nil +} + +func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { + if c.ResourceName == "" { + return nil, fmt.Errorf("ResourceName must be set in TestStep") + } + + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.ResourceName]; ok { + return v, nil + } + } + } + + return nil, fmt.Errorf( + "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) +} + +// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + for i, f := range fs { + if err := f(s); err != nil { + return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) + } + } + + return nil + } +} + +// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +// +// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the +// TestCheckFuncs and aggregates failures. +func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + var result *multierror.Error + + for i, f := range fs { + if err := f(s); err != nil { + result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) + } + } + + return result.ErrorOrNil() + } +} + +// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value +// exists in state for the given name/key combination. It is useful when +// testing that computed values were set, when it is not possible to +// know ahead of time what the values will be. +func TestCheckResourceAttrSet(name, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + } +} + +// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with +// support for non-root modules +func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + } +} + +func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { + if val, ok := is.Attributes[key]; !ok || val == "" { + return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) + } + + return nil +} + +// TestCheckResourceAttr is a TestCheckFunc which validates +// the value in state for the given name/key combination. +func TestCheckResourceAttr(name, key, value string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + } +} + +// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with +// support for non-root modules +func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + } +} + +func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + emptyCheck := false + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + emptyCheck = true + } + + if v, ok := is.Attributes[key]; !ok || v != value { + if emptyCheck && !ok { + return nil + } + + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) + } + return nil +} + +// TestCheckNoResourceAttr is a TestCheckFunc which ensures that +// NO value exists in state for the given name/key combination. +func TestCheckNoResourceAttr(name, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + } +} + +// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with +// support for non-root modules +func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + } +} + +func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + // Empty containers may sometimes be included in the state. + // If the intent here is to check for an empty container, allow the value to + // also be "0". + emptyCheck := false + if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { + emptyCheck = true + } + + val, exists := is.Attributes[key] + if emptyCheck && val == "0" { + return nil + } + + if exists { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + } + + return nil +} + +// TestMatchResourceAttr is a TestCheckFunc which checks that the value +// in state for the given name/key combination matches the given regex. +func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + } +} + +// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with +// support for non-root modules +func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + } +} + +func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) + } + + return nil +} + +// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the +// value is a pointer so that it can be updated while the test is running. +// It will only be dereferenced at the point this step is run. +func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckResourceAttr(name, key, *value)(s) + } +} + +// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with +// support for non-root modules +func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckModuleResourceAttr(mp, name, key, *value)(s) + } +} + +// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values +// in state for a pair of name/key combinations are equal. +func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { + return func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + + isSecond, err := primaryInstanceState(s, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with +// support for non-root modules +func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() + mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() + return func(s *terraform.State) error { + isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) + if err != nil { + return err + } + + isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + vFirst, okFirst := isFirst.Attributes[keyFirst] + vSecond, okSecond := isSecond.Attributes[keySecond] + + // Container count values of 0 should not be relied upon, and not reliably + // maintained by helper/schema. For the purpose of tests, consider unset and + // 0 to be equal. + if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && + (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { + // they have the same suffix, and it is a collection count key. + if vFirst == "0" || vFirst == "" { + okFirst = false + } + if vSecond == "0" || vSecond == "" { + okSecond = false + } + } + + if okFirst != okSecond { + if !okFirst { + return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) + } + return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) + } + if !(okFirst || okSecond) { + // If they both don't exist then they are equally unset, so that's okay. + return nil + } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) + } + + return nil +} + +// TestCheckOutput checks an output in the Terraform configuration +func TestCheckOutput(name, value string) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Value != value { + return fmt.Errorf( + "Output '%s': expected %#v, got %#v", + name, + value, + rs) + } + + return nil + } +} + +func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if !r.MatchString(rs.Value.(string)) { + return fmt.Errorf( + "Output '%s': %#v didn't match %q", + name, + rs, + r.String()) + } + + return nil + } +} + +// TestT is the interface used to handle the test lifecycle of a test. +// +// Users should just use a *testing.T object, which implements this. +type TestT interface { + Error(args ...interface{}) + Fatal(args ...interface{}) + Skip(args ...interface{}) + Name() string + Parallel() +} + +// This is set to true by unit tests to alter some behavior +var testTesting = false + +// modulePrimaryInstanceState returns the instance state for the given resource +// name in a ModuleState +func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { + rs, ok := ms.Resources[name] + if !ok { + return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) + } + + is := rs.Primary + if is == nil { + return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) + } + + return is, nil +} + +// modulePathPrimaryInstanceState returns the primary instance state for the +// given resource name in a given module path. +func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { + ms := s.ModuleByPath(mp) + if ms == nil { + return nil, fmt.Errorf("No module found at: %s", mp) + } + + return modulePrimaryInstanceState(s, ms, name) +} + +// primaryInstanceState returns the primary instance state for the given +// resource name in the root module. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + return modulePrimaryInstanceState(s, ms, name) +} + +// operationError is a specialized implementation of error used to describe +// failures during one of the several operations performed for a particular +// test case. +type operationError struct { + OpName string + Diags tfdiags.Diagnostics +} + +func newOperationError(opName string, diags tfdiags.Diagnostics) error { + return operationError{opName, diags} +} + +// Error returns a terse error string containing just the basic diagnostic +// messages, for situations where normal Go error behavior is appropriate. +func (err operationError) Error() string { + return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) +} + +// ErrorDetail is like Error except it includes verbosely-rendered diagnostics +// similar to what would come from a normal Terraform run, which include +// additional context not included in Error(). +func (err operationError) ErrorDetail() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "errors during %s:", err.OpName) + clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} + for _, diag := range err.Diags { + diagStr := format.Diagnostic(diag, nil, clr, 78) + buf.WriteByte('\n') + buf.WriteString(diagStr) + } + return buf.String() +} + +// detailedErrorMessage is a helper for calling ErrorDetail on an error if +// it is an operationError or just taking Error otherwise. +func detailedErrorMessage(err error) string { + switch tErr := err.(type) { + case operationError: + return tErr.ErrorDetail() + default: + return err.Error() + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go new file mode 100644 index 00000000000..e21525de869 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go @@ -0,0 +1,404 @@ +package resource + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "log" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// testStepConfig runs a config-mode test step +func testStepConfig( + opts terraform.ContextOpts, + state *terraform.State, + step TestStep) (*terraform.State, error) { + return testStep(opts, state, step) +} + +func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { + if !step.Destroy { + if err := testStepTaint(state, step); err != nil { + return state, err + } + } + + cfg, err := testConfig(opts, step) + if err != nil { + return state, err + } + + var stepDiags tfdiags.Diagnostics + + // Build the context + opts.Config = cfg + opts.State, err = terraform.ShimLegacyState(state) + if err != nil { + return nil, err + } + + opts.Destroy = step.Destroy + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) + } + if stepDiags := ctx.Validate(); len(stepDiags) > 0 { + if stepDiags.HasErrors() { + return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) + } + + log.Printf("[WARN] Config warnings:\n%s", stepDiags) + } + + // Refresh! + newState, stepDiags := ctx.Refresh() + // shim the state first so the test can check the state on errors + + state, err = shimNewState(newState, step.providers) + if err != nil { + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("refresh", stepDiags) + } + + // If this step is a PlanOnly step, skip over this first Plan and subsequent + // Apply, and use the follow up Plan that checks for perpetual diffs + if !step.PlanOnly { + // Plan! + if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("plan", stepDiags) + } else { + log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) + } + + // We need to keep a copy of the state prior to destroying + // such that destroy steps can verify their behavior in the check + // function + stateBeforeApplication := state.DeepCopy() + + // Apply the diff, creating real resources. + newState, stepDiags = ctx.Apply() + // shim the state first so the test can check the state on errors + state, err = shimNewState(newState, step.providers) + if err != nil { + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("apply", stepDiags) + } + + // Run any configured checks + if step.Check != nil { + if step.Destroy { + if err := step.Check(stateBeforeApplication); err != nil { + return state, fmt.Errorf("Check failed: %s", err) + } + } else { + if err := step.Check(state); err != nil { + return state, fmt.Errorf("Check failed: %s", err) + } + } + } + } + + // Now, verify that Plan is now empty and we don't have a perpetual diff issue + // We do this with TWO plans. One without a refresh. + var p *plans.Plan + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("follow-up plan", stepDiags) + } + if !p.Changes.Empty() { + if step.ExpectNonEmptyPlan { + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } else { + return state, fmt.Errorf( + "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } + } + + // And another after a Refresh. + if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { + newState, stepDiags = ctx.Refresh() + if stepDiags.HasErrors() { + return state, newOperationError("follow-up refresh", stepDiags) + } + + state, err = shimNewState(newState, step.providers) + if err != nil { + return nil, err + } + } + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("second follow-up refresh", stepDiags) + } + empty := p.Changes.Empty() + + // Data resources are tricky because they legitimately get instantiated + // during refresh so that they will be already populated during the + // plan walk. Because of this, if we have any data resources in the + // config we'll end up wanting to destroy them again here. This is + // acceptable and expected, and we'll treat it as "empty" for the + // sake of this testing. + if step.Destroy && !empty { + empty = true + for _, change := range p.Changes.Resources { + if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { + empty = false + break + } + } + } + + if !empty { + if step.ExpectNonEmptyPlan { + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } else { + return state, fmt.Errorf( + "After applying this step and refreshing, "+ + "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } + } + + // Made it here, but expected a non-empty plan, fail! + if step.ExpectNonEmptyPlan && empty { + return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") + } + + // Made it here? Good job test step! + return state, nil +} + +// legacyPlanComparisonString produces a string representation of the changes +// from a plan and a given state togther, as was formerly produced by the +// String method of terraform.Plan. +// +// This is here only for compatibility with existing tests that predate our +// new plan and state types, and should not be used in new tests. Instead, use +// a library like "cmp" to do a deep equality and diff on the two +// data structures. +func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { + return fmt.Sprintf( + "DIFF:\n\n%s\n\nSTATE:\n\n%s", + legacyDiffComparisonString(changes), + state.String(), + ) +} + +// legacyDiffComparisonString produces a string representation of the changes +// from a planned changes object, as was formerly produced by the String method +// of terraform.Diff. +// +// This is here only for compatibility with existing tests that predate our +// new plan types, and should not be used in new tests. Instead, use a library +// like "cmp" to do a deep equality check and diff on the two data structures. +func legacyDiffComparisonString(changes *plans.Changes) string { + // The old string representation of a plan was grouped by module, but + // our new plan structure is not grouped in that way and so we'll need + // to preprocess it in order to produce that grouping. + type ResourceChanges struct { + Current *plans.ResourceInstanceChangeSrc + Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc + } + byModule := map[string]map[string]*ResourceChanges{} + resourceKeys := map[string][]string{} + requiresReplace := map[string][]string{} + var moduleKeys []string + for _, rc := range changes.Resources { + if rc.Action == plans.NoOp { + // We won't mention no-op changes here at all, since the old plan + // model we are emulating here didn't have such a concept. + continue + } + moduleKey := rc.Addr.Module.String() + if _, exists := byModule[moduleKey]; !exists { + moduleKeys = append(moduleKeys, moduleKey) + byModule[moduleKey] = make(map[string]*ResourceChanges) + } + resourceKey := rc.Addr.Resource.String() + if _, exists := byModule[moduleKey][resourceKey]; !exists { + resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) + byModule[moduleKey][resourceKey] = &ResourceChanges{ + Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), + } + } + + if rc.DeposedKey == states.NotDeposed { + byModule[moduleKey][resourceKey].Current = rc + } else { + byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc + } + + rr := []string{} + for _, p := range rc.RequiredReplace.List() { + rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) + } + requiresReplace[resourceKey] = rr + } + sort.Strings(moduleKeys) + for _, ks := range resourceKeys { + sort.Strings(ks) + } + + var buf bytes.Buffer + + for _, moduleKey := range moduleKeys { + rcs := byModule[moduleKey] + var mBuf bytes.Buffer + + for _, resourceKey := range resourceKeys[moduleKey] { + rc := rcs[resourceKey] + + forceNewAttrs := requiresReplace[resourceKey] + + crud := "UPDATE" + if rc.Current != nil { + switch rc.Current.Action { + case plans.DeleteThenCreate: + crud = "DESTROY/CREATE" + case plans.CreateThenDelete: + crud = "CREATE/DESTROY" + case plans.Delete: + crud = "DESTROY" + case plans.Create: + crud = "CREATE" + } + } else { + // We must be working on a deposed object then, in which + // case destroying is the only possible action. + crud = "DESTROY" + } + + extra := "" + if rc.Current == nil && len(rc.Deposed) > 0 { + extra = " (deposed only)" + } + + fmt.Fprintf( + &mBuf, "%s: %s%s\n", + crud, resourceKey, extra, + ) + + attrNames := map[string]bool{} + var oldAttrs map[string]string + var newAttrs map[string]string + if rc.Current != nil { + if before := rc.Current.Before; before != nil { + ty, err := before.ImpliedType() + if err == nil { + val, err := before.Decode(ty) + if err == nil { + oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range oldAttrs { + attrNames[k] = true + } + } + } + } + if after := rc.Current.After; after != nil { + ty, err := after.ImpliedType() + if err == nil { + val, err := after.Decode(ty) + if err == nil { + newAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range newAttrs { + attrNames[k] = true + } + } + } + } + } + if oldAttrs == nil { + oldAttrs = make(map[string]string) + } + if newAttrs == nil { + newAttrs = make(map[string]string) + } + + attrNamesOrder := make([]string, 0, len(attrNames)) + keyLen := 0 + for n := range attrNames { + attrNamesOrder = append(attrNamesOrder, n) + if len(n) > keyLen { + keyLen = len(n) + } + } + sort.Strings(attrNamesOrder) + + for _, attrK := range attrNamesOrder { + v := newAttrs[attrK] + u := oldAttrs[attrK] + + if v == hcl2shim.UnknownVariableValue { + v = "" + } + // NOTE: we don't support here because we would + // need schema to do that. Excluding sensitive values + // is now done at the UI layer, and so should not be tested + // at the core layer. + + updateMsg := "" + + // This may not be as precise as in the old diff, as it matches + // everything under the attribute that was originally marked as + // ForceNew, but should help make it easier to determine what + // caused replacement here. + for _, k := range forceNewAttrs { + if strings.HasPrefix(attrK, k) { + updateMsg = " (forces new resource)" + break + } + } + + fmt.Fprintf( + &mBuf, " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, v, + updateMsg, + ) + } + } + + if moduleKey == "" { // root module + buf.Write(mBuf.Bytes()) + buf.WriteByte('\n') + continue + } + + fmt.Fprintf(&buf, "%s:\n", moduleKey) + s := bufio.NewScanner(&mBuf) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return buf.String() +} + +func testStepTaint(state *terraform.State, step TestStep) error { + for _, p := range step.Taint { + m := state.RootModule() + if m == nil { + return errors.New("no state") + } + rs, ok := m.Resources[p] + if !ok { + return fmt.Errorf("resource %q not found in state", p) + } + log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + rs.Taint() + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go new file mode 100644 index 00000000000..9e547e2a044 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go @@ -0,0 +1,233 @@ +package resource + +import ( + "fmt" + "log" + "reflect" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// testStepImportState runs an imort state test step +func testStepImportState( + opts terraform.ContextOpts, + state *terraform.State, + step TestStep) (*terraform.State, error) { + + // Determine the ID to import + var importId string + switch { + case step.ImportStateIdFunc != nil: + var err error + importId, err = step.ImportStateIdFunc(state) + if err != nil { + return state, err + } + case step.ImportStateId != "": + importId = step.ImportStateId + default: + resource, err := testResource(step, state) + if err != nil { + return state, err + } + importId = resource.Primary.ID + } + + importPrefix := step.ImportStateIdPrefix + if importPrefix != "" { + importId = fmt.Sprintf("%s%s", importPrefix, importId) + } + + // Setup the context. We initialize with an empty state. We use the + // full config for provider configurations. + cfg, err := testConfig(opts, step) + if err != nil { + return state, err + } + + opts.Config = cfg + + // import tests start with empty state + opts.State = states.NewState() + + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, stepDiags.Err() + } + + // The test step provides the resource address as a string, so we need + // to parse it to get an addrs.AbsResourceAddress to pass in to the + // import method. + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) + if hclDiags.HasErrors() { + return nil, hclDiags + } + importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) + if stepDiags.HasErrors() { + return nil, stepDiags.Err() + } + + // Do the import + importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ + // Set the module so that any provider config is loaded + Config: cfg, + + Targets: []*terraform.ImportTarget{ + &terraform.ImportTarget{ + Addr: importAddr, + ID: importId, + }, + }, + }) + if stepDiags.HasErrors() { + log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) + return state, stepDiags.Err() + } + + newState, err := shimNewState(importedState, step.providers) + if err != nil { + return nil, err + } + + // Go through the new state and verify + if step.ImportStateCheck != nil { + var states []*terraform.InstanceState + for _, r := range newState.RootModule().Resources { + if r.Primary != nil { + is := r.Primary.DeepCopy() + is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type + states = append(states, is) + } + } + if err := step.ImportStateCheck(states); err != nil { + return state, err + } + } + + // Verify that all the states match + if step.ImportStateVerify { + new := newState.RootModule().Resources + old := state.RootModule().Resources + for _, r := range new { + // Find the existing resource + var oldR *terraform.ResourceState + for _, r2 := range old { + if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { + oldR = r2 + break + } + } + if oldR == nil { + return state, fmt.Errorf( + "Failed state verification, resource with ID %s not found", + r.Primary.ID) + } + + // We'll try our best to find the schema for this resource type + // so we can ignore Removed fields during validation. If we fail + // to find the schema then we won't ignore them and so the test + // will need to rely on explicit ImportStateVerifyIgnore, though + // this shouldn't happen in any reasonable case. + var rsrcSchema *schema.Resource + if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { + providerType := providerAddr.ProviderConfig.Type + if provider, ok := step.providers[providerType]; ok { + if provider, ok := provider.(*schema.Provider); ok { + rsrcSchema = provider.ResourcesMap[r.Type] + } + } + } + + // don't add empty flatmapped containers, so we can more easily + // compare the attributes + skipEmpty := func(k, v string) bool { + if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { + if v == "0" { + return true + } + } + return false + } + + // Compare their attributes + actual := make(map[string]string) + for k, v := range r.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + actual[k] = v + } + + expected := make(map[string]string) + for k, v := range oldR.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + expected[k] = v + } + + // Remove fields we're ignoring + for _, v := range step.ImportStateVerifyIgnore { + for k := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + // Also remove any attributes that are marked as "Removed" in the + // schema, if we have a schema to check that against. + if rsrcSchema != nil { + for k := range actual { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(actual, k) + break + } + } + } + for k := range expected { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(expected, k) + break + } + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return state, fmt.Errorf( + "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + } + } + + // Return the old state (non-imported) so we don't change anything. + return state, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go new file mode 100644 index 00000000000..e56a5155d10 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go @@ -0,0 +1,84 @@ +package resource + +import ( + "sync" + "time" +) + +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +func Retry(timeout time.Duration, f RetryFunc) error { + // These are used to pull the error out of the function; need a mutex to + // avoid a data race. + var resultErr error + var resultErrMu sync.Mutex + + c := &StateChangeConf{ + Pending: []string{"retryableerror"}, + Target: []string{"success"}, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + Refresh: func() (interface{}, string, error) { + rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + + if rerr == nil { + resultErr = nil + return 42, "success", nil + } + + resultErr = rerr.Err + + if rerr.Retryable { + return 42, "retryableerror", nil + } + return nil, "quit", rerr.Err + }, + } + + _, waitErr := c.WaitForState() + + // Need to acquire the lock here to be able to avoid race using resultErr as + // the return value + resultErrMu.Lock() + defer resultErrMu.Unlock() + + // resultErr may be nil because the wait timed out and resultErr was never + // set; this is still an error + if resultErr == nil { + return waitErr + } + // resultErr takes precedence over waitErr if both are set because it is + // more likely to be useful + return resultErr +} + +// RetryFunc is the function retried until it succeeds. +type RetryFunc func() *RetryError + +// RetryError is the required return type of RetryFunc. It forces client code +// to choose whether or not a given error is retryable. +type RetryError struct { + Err error + Retryable bool +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. +func RetryableError(err error) *RetryError { + if err == nil { + return nil + } + return &RetryError{Err: err, Retryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not_ retryable +// from a given error. +func NonRetryableError(err error) *RetryError { + if err == nil { + return nil + } + return &RetryError{Err: err, Retryable: false} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go new file mode 100644 index 00000000000..609c208b368 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go @@ -0,0 +1,200 @@ +package schema + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + ctyconvert "github.com/zclconf/go-cty/cty/convert" +) + +// Backend represents a partial backend.Backend implementation and simplifies +// the creation of configuration loading and validation. +// +// Unlike other schema structs such as Provider, this struct is meant to be +// embedded within your actual implementation. It provides implementations +// only for Input and Configure and gives you a method for accessing the +// configuration in the form of a ResourceData that you're expected to call +// from the other implementation funcs. +type Backend struct { + // Schema is the schema for the configuration of this backend. If this + // Backend has no configuration this can be omitted. + Schema map[string]*Schema + + // ConfigureFunc is called to configure the backend. Use the + // FromContext* methods to extract information from the context. + // This can be nil, in which case nothing will be called but the + // config will still be stored. + ConfigureFunc func(context.Context) error + + config *ResourceData +} + +var ( + backendConfigKey = contextKey("backend config") +) + +// FromContextBackendConfig extracts a ResourceData with the configuration +// from the context. This should only be called by Backend functions. +func FromContextBackendConfig(ctx context.Context) *ResourceData { + return ctx.Value(backendConfigKey).(*ResourceData) +} + +func (b *Backend) ConfigSchema() *configschema.Block { + // This is an alias of CoreConfigSchema just to implement the + // backend.Backend interface. + return b.CoreConfigSchema() +} + +func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { + if b == nil { + return configVal, nil + } + var diags tfdiags.Diagnostics + var err error + + // In order to use Transform below, this needs to be filled out completely + // according the schema. + configVal, err = b.CoreConfigSchema().CoerceValue(configVal) + if err != nil { + return configVal, diags.Append(err) + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := b.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return configVal, diags + } + + shimRC := b.shimConfig(configVal) + warns, errs := schemaMap(b.Schema).Validate(shimRC) + for _, warn := range warns { + diags = diags.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + diags = diags.Append(err) + } + return configVal, diags +} + +func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { + if b == nil { + return nil + } + + var diags tfdiags.Diagnostics + sm := schemaMap(b.Schema) + shimRC := b.shimConfig(obj) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, shimRC, nil, nil, true) + if err != nil { + diags = diags.Append(err) + return diags + } + + data, err := sm.Data(nil, diff) + if err != nil { + diags = diags.Append(err) + return diags + } + b.config = data + + if b.ConfigureFunc != nil { + err = b.ConfigureFunc(context.WithValue( + context.Background(), backendConfigKey, data)) + if err != nil { + diags = diags.Append(err) + return diags + } + } + + return diags +} + +// shimConfig turns a new-style cty.Value configuration (which must be of +// an object type) into a minimal old-style *terraform.ResourceConfig object +// that should be populated enough to appease the not-yet-updated functionality +// in this package. This should be removed once everything is updated. +func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig { + shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) + if !ok { + // If the configVal was nil, we still want a non-nil map here. + shimMap = map[string]interface{}{} + } + return &terraform.ResourceConfig{ + Config: shimMap, + Raw: shimMap, + } +} + +// Config returns the configuration. This is available after Configure is +// called. +func (b *Backend) Config() *ResourceData { + return b.config +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go new file mode 100644 index 00000000000..fa03d83384c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go @@ -0,0 +1,309 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// The functions and methods in this file are concerned with the conversion +// of this package's schema model into the slightly-lower-level schema model +// used by Terraform core for configuration parsing. + +// CoreConfigSchema lowers the receiver to the schema model expected by +// Terraform core. +// +// This lower-level model has fewer features than the schema in this package, +// describing only the basic structure of configuration and state values we +// expect. The full schemaMap from this package is still required for full +// validation, handling of default values, etc. +// +// This method presumes a schema that passes InternalValidate, and so may +// panic or produce an invalid result if given an invalid schemaMap. +func (m schemaMap) CoreConfigSchema() *configschema.Block { + if len(m) == 0 { + // We return an actual (empty) object here, rather than a nil, + // because a nil result would mean that we don't have a schema at + // all, rather than that we have an empty one. + return &configschema.Block{} + } + + ret := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + BlockTypes: map[string]*configschema.NestedBlock{}, + } + + for name, schema := range m { + if schema.Elem == nil { + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + if schema.Type == TypeMap { + // For TypeMap in particular, it isn't valid for Elem to be a + // *Resource (since that would be ambiguous in flatmap) and + // so Elem is treated as a TypeString schema if so. This matches + // how the field readers treat this situation, for compatibility + // with configurations targeting Terraform 0.11 and earlier. + if _, isResource := schema.Elem.(*Resource); isResource { + sch := *schema // shallow copy + sch.Elem = &Schema{ + Type: TypeString, + } + ret.Attributes[name] = sch.coreConfigSchemaAttribute() + continue + } + } + switch schema.ConfigMode { + case SchemaConfigModeAttr: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case SchemaConfigModeBlock: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: // SchemaConfigModeAuto, or any other invalid value + if schema.Computed && !schema.Optional { + // Computed-only schemas are always handled as attributes, + // because they never appear in configuration. + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + switch schema.Elem.(type) { + case *Schema, ValueType: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case *Resource: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) + } + } + } + + return ret +} + +// coreConfigSchemaAttribute prepares a configschema.Attribute representation +// of a schema. This is appropriate only for primitives or collections whose +// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections +// whose elem is a whole resource. +func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { + // The Schema.DefaultFunc capability adds some extra weirdness here since + // it can be combined with "Required: true" to create a sitution where + // required-ness is conditional. Terraform Core doesn't share this concept, + // so we must sniff for this possibility here and conditionally turn + // off the "Required" flag if it looks like the DefaultFunc is going + // to provide a value. + // This is not 100% true to the original interface of DefaultFunc but + // works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc + // situations, which are the main cases we care about. + // + // Note that this also has a consequence for commands that return schema + // information for documentation purposes: running those for certain + // providers will produce different results depending on which environment + // variables are set. We accept that weirdness in order to keep this + // interface to core otherwise simple. + reqd := s.Required + opt := s.Optional + if reqd && s.DefaultFunc != nil { + v, err := s.DefaultFunc() + // We can't report errors from here, so we'll instead just force + // "Required" to false and let the provider try calling its + // DefaultFunc again during the validate step, where it can then + // return the error. + if err != nil || (err == nil && v != nil) { + reqd = false + opt = true + } + } + + return &configschema.Attribute{ + Type: s.coreConfigSchemaType(), + Optional: opt, + Required: reqd, + Computed: s.Computed, + Sensitive: s.Sensitive, + Description: s.Description, + } +} + +// coreConfigSchemaBlock prepares a configschema.NestedBlock representation of +// a schema. This is appropriate only for collections whose Elem is an instance +// of Resource, and will panic otherwise. +func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { + ret := &configschema.NestedBlock{} + if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil { + ret.Block = *nested + } + switch s.Type { + case TypeList: + ret.Nesting = configschema.NestingList + case TypeSet: + ret.Nesting = configschema.NestingSet + case TypeMap: + ret.Nesting = configschema.NestingMap + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", s.Type)) + } + + ret.MinItems = s.MinItems + ret.MaxItems = s.MaxItems + + if s.Required && s.MinItems == 0 { + // configschema doesn't have a "required" representation for nested + // blocks, but we can fake it by requiring at least one item. + ret.MinItems = 1 + } + if s.Optional && s.MinItems > 0 { + // Historically helper/schema would ignore MinItems if Optional were + // set, so we must mimic this behavior here to ensure that providers + // relying on that undocumented behavior can continue to operate as + // they did before. + ret.MinItems = 0 + } + if s.Computed && !s.Optional { + // MinItems/MaxItems are meaningless for computed nested blocks, since + // they are never set by the user anyway. This ensures that we'll never + // generate weird errors about them. + ret.MinItems = 0 + ret.MaxItems = 0 + } + + return ret +} + +// coreConfigSchemaType determines the core config schema type that corresponds +// to a particular schema's type. +func (s *Schema) coreConfigSchemaType() cty.Type { + switch s.Type { + case TypeString: + return cty.String + case TypeBool: + return cty.Bool + case TypeInt, TypeFloat: + // configschema doesn't distinguish int and float, so helper/schema + // will deal with this as an additional validation step after + // configuration has been parsed and decoded. + return cty.Number + case TypeList, TypeSet, TypeMap: + var elemType cty.Type + switch set := s.Elem.(type) { + case *Schema: + elemType = set.coreConfigSchemaType() + case ValueType: + // This represents a mistake in the provider code, but it's a + // common one so we'll just shim it. + elemType = (&Schema{Type: set}).coreConfigSchemaType() + case *Resource: + // By default we construct a NestedBlock in this case, but this + // behavior is selected either for computed-only schemas or + // when ConfigMode is explicitly SchemaConfigModeBlock. + // See schemaMap.CoreConfigSchema for the exact rules. + elemType = set.coreConfigSchema().ImpliedType() + default: + if set != nil { + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", s.Elem)) + } + // Some pre-existing schemas assume string as default, so we need + // to be compatible with them. + elemType = cty.String + } + switch s.Type { + case TypeList: + return cty.List(elemType) + case TypeSet: + return cty.Set(elemType) + case TypeMap: + return cty.Map(elemType) + default: + // can never get here in practice, due to the case we're inside + panic("invalid collection type") + } + default: + // should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Type %s", s.Type)) + } +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema on +// the resource's schema. CoreConfigSchema adds the implicitly required "id" +// attribute for top level resources if it doesn't exist. +func (r *Resource) CoreConfigSchema() *configschema.Block { + block := r.coreConfigSchema() + + if block.Attributes == nil { + block.Attributes = map[string]*configschema.Attribute{} + } + + // Add the implicitly required "id" field if it doesn't exist + if block.Attributes["id"] == nil { + block.Attributes["id"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + Computed: true, + } + } + + _, timeoutsAttr := block.Attributes[TimeoutsConfigKey] + _, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey] + + // Insert configured timeout values into the schema, as long as the schema + // didn't define anything else by that name. + if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock { + timeouts := configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + } + + if r.Timeouts.Create != nil { + timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Read != nil { + timeouts.Attributes[TimeoutRead] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Update != nil { + timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Delete != nil { + timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Default != nil { + timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: timeouts, + } + } + + return block +} + +func (r *Resource) coreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema +// on the backends's schema. +func (r *Backend) CoreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go new file mode 100644 index 00000000000..8d93750aede --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go @@ -0,0 +1,59 @@ +package schema + +import ( + "fmt" +) + +// DataSourceResourceShim takes a Resource instance describing a data source +// (with a Read implementation and a Schema, at least) and returns a new +// Resource instance with additional Create and Delete implementations that +// allow the data source to be used as a resource. +// +// This is a backward-compatibility layer for data sources that were formerly +// read-only resources before the data source concept was added. It should not +// be used for any *new* data sources. +// +// The Read function for the data source *must* call d.SetId with a non-empty +// id in order for this shim to function as expected. +// +// The provided Resource instance, and its schema, will be modified in-place +// to make it suitable for use as a full resource. +func DataSourceResourceShim(name string, dataSource *Resource) *Resource { + // Recursively, in-place adjust the schema so that it has ForceNew + // on any user-settable resource. + dataSourceResourceShimAdjustSchema(dataSource.Schema) + + dataSource.Create = CreateFunc(dataSource.Read) + dataSource.Delete = func(d *ResourceData, meta interface{}) error { + d.SetId("") + return nil + } + dataSource.Update = nil // should already be nil, but let's make sure + + // FIXME: Link to some further docs either on the website or in the + // changelog, once such a thing exists. + dataSource.DeprecationMessage = fmt.Sprintf( + "using %s as a resource is deprecated; consider using the data source instead", + name, + ) + + return dataSource +} + +func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) { + for _, s := range schema { + // If the attribute is configurable then it must be ForceNew, + // since we have no Update implementation. + if s.Required || s.Optional { + s.ForceNew = true + } + + // If the attribute is a nested resource, we need to recursively + // apply these same adjustments to it. + if s.Elem != nil { + if r, ok := s.Elem.(*Resource); ok { + dataSourceResourceShimAdjustSchema(r.Schema) + } + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go new file mode 100644 index 00000000000..d5e20e03889 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go @@ -0,0 +1,6 @@ +package schema + +// Equal is an interface that checks for deep equality between two objects. +type Equal interface { + Equal(interface{}) bool +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go new file mode 100644 index 00000000000..2a66a068fb6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go @@ -0,0 +1,343 @@ +package schema + +import ( + "fmt" + "strconv" + "strings" +) + +// FieldReaders are responsible for decoding fields out of data into +// the proper typed representation. ResourceData uses this to query data +// out of multiple sources: config, state, diffs, etc. +type FieldReader interface { + ReadField([]string) (FieldReadResult, error) +} + +// FieldReadResult encapsulates all the resulting data from reading +// a field. +type FieldReadResult struct { + // Value is the actual read value. NegValue is the _negative_ value + // or the items that should be removed (if they existed). NegValue + // doesn't make sense for primitives but is important for any + // container types such as maps, sets, lists. + Value interface{} + ValueProcessed interface{} + + // Exists is true if the field was found in the data. False means + // it wasn't found if there was no error. + Exists bool + + // Computed is true if the field was found but the value + // is computed. + Computed bool +} + +// ValueOrZero returns the value of this result or the zero value of the +// schema type, ensuring a consistent non-nil return value. +func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { + if r.Value != nil { + return r.Value + } + + return s.ZeroValue() +} + +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through. +func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema { + parts := strings.Split(path, ".") + return addrToSchema(parts, schemaMap) +} + +// addrToSchema finds the final element schema for the given address +// and the given schema. It returns all the schemas that led to the final +// schema. These are in order of the address (out to in). +func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { + current := &Schema{ + Type: typeObject, + Elem: schemaMap, + } + + // If we aren't given an address, then the user is requesting the + // full object, so we return the special value which is the full object. + if len(addr) == 0 { + return []*Schema{current} + } + + result := make([]*Schema, 0, len(addr)) + for len(addr) > 0 { + k := addr[0] + addr = addr[1:] + + REPEAT: + // We want to trim off the first "typeObject" since its not a + // real lookup that people do. i.e. []string{"foo"} in a structure + // isn't {typeObject, typeString}, its just a {typeString}. + if len(result) > 0 || current.Type != typeObject { + result = append(result, current) + } + + switch t := current.Type; t { + case TypeBool, TypeInt, TypeFloat, TypeString: + if len(addr) > 0 { + return nil + } + case TypeList, TypeSet: + isIndex := len(addr) > 0 && addr[0] == "#" + + switch v := current.Elem.(type) { + case *Resource: + current = &Schema{ + Type: typeObject, + Elem: v.Schema, + } + case *Schema: + current = v + case ValueType: + current = &Schema{Type: v} + default: + // we may not know the Elem type and are just looking for the + // index + if isIndex { + break + } + + if len(addr) == 0 { + // we've processed the address, so return what we've + // collected + return result + } + + if len(addr) == 1 { + if _, err := strconv.Atoi(addr[0]); err == nil { + // we're indexing a value without a schema. This can + // happen if the list is nested in another schema type. + // Default to a TypeString like we do with a map + current = &Schema{Type: TypeString} + break + } + } + + return nil + } + + // If we only have one more thing and the next thing + // is a #, then we're accessing the index which is always + // an int. + if isIndex { + current = &Schema{Type: TypeInt} + break + } + + case TypeMap: + if len(addr) > 0 { + switch v := current.Elem.(type) { + case ValueType: + current = &Schema{Type: v} + case *Schema: + current, _ = current.Elem.(*Schema) + default: + // maps default to string values. This is all we can have + // if this is nested in another list or map. + current = &Schema{Type: TypeString} + } + } + case typeObject: + // If we're already in the object, then we want to handle Sets + // and Lists specially. Basically, their next key is the lookup + // key (the set value or the list element). For these scenarios, + // we just want to skip it and move to the next element if there + // is one. + if len(result) > 0 { + lastType := result[len(result)-2].Type + if lastType == TypeSet || lastType == TypeList { + if len(addr) == 0 { + break + } + + k = addr[0] + addr = addr[1:] + } + } + + m := current.Elem.(map[string]*Schema) + val, ok := m[k] + if !ok { + return nil + } + + current = val + goto REPEAT + } + } + + return result +} + +// readListField is a generic method for reading a list field out of a +// a FieldReader. It does this based on the assumption that there is a key +// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc. +// after that point. +func readListField( + r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) { + addrPadded := make([]string, len(addr)+1) + copy(addrPadded, addr) + addrPadded[len(addrPadded)-1] = "#" + + // Get the number of elements in the list + countResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !countResult.Exists { + // No count, means we have no list + countResult.Value = 0 + } + + // If we have an empty list, then return an empty list + if countResult.Computed || countResult.Value.(int) == 0 { + return FieldReadResult{ + Value: []interface{}{}, + Exists: countResult.Exists, + Computed: countResult.Computed, + }, nil + } + + // Go through each count, and get the item value out of it + result := make([]interface{}, countResult.Value.(int)) + for i, _ := range result { + is := strconv.FormatInt(int64(i), 10) + addrPadded[len(addrPadded)-1] = is + rawResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !rawResult.Exists { + // This should never happen, because by the time the data + // gets to the FieldReaders, all the defaults should be set by + // Schema. + rawResult.Value = nil + } + + result[i] = rawResult.Value + } + + return FieldReadResult{ + Value: result, + Exists: true, + }, nil +} + +// readObjectField is a generic method for reading objects out of FieldReaders +// based on the assumption that building an address of []string{k, FIELD} +// will result in the proper field data. +func readObjectField( + r FieldReader, + addr []string, + schema map[string]*Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + exists := false + for field, s := range schema { + addrRead := make([]string, len(addr), len(addr)+1) + copy(addrRead, addr) + addrRead = append(addrRead, field) + rawResult, err := r.ReadField(addrRead) + if err != nil { + return FieldReadResult{}, err + } + if rawResult.Exists { + exists = true + } + + result[field] = rawResult.ValueOrZero(s) + } + + return FieldReadResult{ + Value: result, + Exists: exists, + }, nil +} + +// convert map values to the proper primitive type based on schema.Elem +func mapValuesToPrimitive(k string, m map[string]interface{}, schema *Schema) error { + elemType, err := getValueType(k, schema) + if err != nil { + return err + } + + switch elemType { + case TypeInt, TypeFloat, TypeBool: + for k, v := range m { + vs, ok := v.(string) + if !ok { + continue + } + + v, err := stringToPrimitive(vs, false, &Schema{Type: elemType}) + if err != nil { + return err + } + + m[k] = v + } + } + return nil +} + +func stringToPrimitive( + value string, computed bool, schema *Schema) (interface{}, error) { + var returnVal interface{} + switch schema.Type { + case TypeBool: + if value == "" { + returnVal = false + break + } + if computed { + break + } + + v, err := strconv.ParseBool(value) + if err != nil { + return nil, err + } + + returnVal = v + case TypeFloat: + if value == "" { + returnVal = 0.0 + break + } + if computed { + break + } + + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return nil, err + } + + returnVal = v + case TypeInt: + if value == "" { + returnVal = 0 + break + } + if computed { + break + } + + v, err := strconv.ParseInt(value, 0, 0) + if err != nil { + return nil, err + } + + returnVal = int(v) + case TypeString: + returnVal = value + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } + + return returnVal, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go new file mode 100644 index 00000000000..dc2ae1af5d4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go @@ -0,0 +1,353 @@ +package schema + +import ( + "fmt" + "log" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/mapstructure" +) + +// ConfigFieldReader reads fields out of an untyped map[string]string to the +// best of its ability. It also applies defaults from the Schema. (The other +// field readers do not need default handling because they source fully +// populated data structures.) +type ConfigFieldReader struct { + Config *terraform.ResourceConfig + Schema map[string]*Schema + + indexMaps map[string]map[string]int + once sync.Once +} + +func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) { + r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) }) + return r.readField(address, false) +} + +func (r *ConfigFieldReader) readField( + address []string, nested bool) (FieldReadResult, error) { + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + if !nested { + // If we have a set anywhere in the address, then we need to + // read that set out in order and actually replace that part of + // the address with the real list index. i.e. set.50 might actually + // map to set.12 in the config, since it is in list order in the + // config, not indexed by set value. + for i, v := range schemaList { + // Sets are the only thing that cause this issue. + if v.Type != TypeSet { + continue + } + + // If we're at the end of the list, then we don't have to worry + // about this because we're just requesting the whole set. + if i == len(schemaList)-1 { + continue + } + + // If we're looking for the count, then ignore... + if address[i+1] == "#" { + continue + } + + indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")] + if !ok { + // Get the set so we can get the index map that tells us the + // mapping of the hash code to the list index + _, err := r.readSet(address[:i+1], v) + if err != nil { + return FieldReadResult{}, err + } + indexMap = r.indexMaps[strings.Join(address[:i+1], ".")] + } + + index, ok := indexMap[address[i+1]] + if !ok { + return FieldReadResult{}, nil + } + + address[i+1] = strconv.FormatInt(int64(index), 10) + } + } + + k := strings.Join(address, ".") + schema := schemaList[len(schemaList)-1] + + // If we're getting the single element of a promoted list, then + // check to see if we have a single element we need to promote. + if address[len(address)-1] == "0" && len(schemaList) > 1 { + lastSchema := schemaList[len(schemaList)-2] + if lastSchema.Type == TypeList && lastSchema.PromoteSingle { + k := strings.Join(address[:len(address)-1], ".") + result, err := r.readPrimitive(k, schema) + if err == nil { + return result, nil + } + } + } + + if protoVersion5 { + switch schema.Type { + case TypeList, TypeSet, TypeMap, typeObject: + // Check if the value itself is unknown. + // The new protocol shims will add unknown values to this list of + // ComputedKeys. This is the only way we have to indicate that a + // collection is unknown in the config + for _, unknown := range r.Config.ComputedKeys { + if k == unknown { + log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) + return FieldReadResult{Computed: true, Exists: true}, nil + } + } + } + } + + switch schema.Type { + case TypeBool, TypeFloat, TypeInt, TypeString: + return r.readPrimitive(k, schema) + case TypeList: + // If we support promotion then we first check if we have a lone + // value that we must promote. + // a value that is alone. + if schema.PromoteSingle { + result, err := r.readPrimitive(k, schema.Elem.(*Schema)) + if err == nil && result.Exists { + result.Value = []interface{}{result.Value} + return result, nil + } + } + + return readListField(&nestedConfigFieldReader{r}, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField( + &nestedConfigFieldReader{r}, + address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + // We want both the raw value and the interpolated. We use the interpolated + // to store actual values and we use the raw one to check for + // computed keys. Actual values are obtained in the switch, depending on + // the type of the raw value. + mraw, ok := r.Config.GetRaw(k) + if !ok { + // check if this is from an interpolated field by seeing if it exists + // in the config + _, ok := r.Config.Get(k) + if !ok { + // this really doesn't exist + return FieldReadResult{}, nil + } + + // We couldn't fetch the value from a nested data structure, so treat the + // raw value as an interpolation string. The mraw value is only used + // for the type switch below. + mraw = "${INTERPOLATED}" + } + + result := make(map[string]interface{}) + computed := false + switch m := mraw.(type) { + case string: + // This is a map which has come out of an interpolated variable, so we + // can just get the value directly from config. Values cannot be computed + // currently. + v, _ := r.Config.Get(k) + + // If this isn't a map[string]interface, it must be computed. + mapV, ok := v.(map[string]interface{}) + if !ok { + return FieldReadResult{ + Exists: true, + Computed: true, + }, nil + } + + // Otherwise we can proceed as usual. + for i, iv := range mapV { + result[i] = iv + } + case []interface{}: + for i, innerRaw := range m { + for ik := range innerRaw.(map[string]interface{}) { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case []map[string]interface{}: + for i, innerRaw := range m { + for ik := range innerRaw { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case map[string]interface{}: + for ik := range m { + key := fmt.Sprintf("%s.%s", k, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + case nil: + // the map may have been empty on the configuration, so we leave the + // empty result + default: + panic(fmt.Sprintf("unknown type: %#v", mraw)) + } + + err := mapValuesToPrimitive(k, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var value interface{} + if !computed { + value = result + } + + return FieldReadResult{ + Value: value, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readPrimitive( + k string, schema *Schema) (FieldReadResult, error) { + raw, ok := r.Config.Get(k) + if !ok { + // Nothing in config, but we might still have a default from the schema + var err error + raw, err = schema.DefaultValue() + if err != nil { + return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err) + } + + if raw == nil { + return FieldReadResult{}, nil + } + } + + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return FieldReadResult{}, err + } + + computed := r.Config.IsComputed(k) + returnVal, err := stringToPrimitive(result, computed, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + indexMap := make(map[string]int) + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + raw, err := readListField(&nestedConfigFieldReader{r}, address, schema) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + return FieldReadResult{Value: set}, nil + } + + // If the list is computed, the set is necessarilly computed + if raw.Computed { + return FieldReadResult{ + Value: set, + Exists: true, + Computed: raw.Computed, + }, nil + } + + // Build up the set from the list elements + for i, v := range raw.Value.([]interface{}) { + // Check if any of the keys in this item are computed + computed := r.hasComputedSubKeys( + fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema) + + code := set.add(v, computed) + indexMap[code] = i + } + + r.indexMaps[strings.Join(address, ".")] = indexMap + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// hasComputedSubKeys walks through a schema and returns whether or not the +// given key contains any subkeys that are computed. +func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool { + prefix := key + "." + + switch t := schema.Elem.(type) { + case *Resource: + for k, schema := range t.Schema { + if r.Config.IsComputed(prefix + k) { + return true + } + + if r.hasComputedSubKeys(prefix+k, schema) { + return true + } + } + } + + return false +} + +// nestedConfigFieldReader is a funny little thing that just wraps a +// ConfigFieldReader to call readField when ReadField is called so that +// we don't recalculate the set rewrites in the address, which leads to +// an infinite loop. +type nestedConfigFieldReader struct { + Reader *ConfigFieldReader +} + +func (r *nestedConfigFieldReader) ReadField( + address []string) (FieldReadResult, error) { + return r.Reader.readField(address, true) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go new file mode 100644 index 00000000000..c099029afa7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go @@ -0,0 +1,244 @@ +package schema + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/mapstructure" +) + +// DiffFieldReader reads fields out of a diff structures. +// +// It also requires access to a Reader that reads fields from the structure +// that the diff was derived from. This is usually the state. This is required +// because a diff on its own doesn't have complete data about full objects +// such as maps. +// +// The Source MUST be the data that the diff was derived from. If it isn't, +// the behavior of this struct is undefined. +// +// Reading fields from a DiffFieldReader is identical to reading from +// Source except the diff will be applied to the end result. +// +// The "Exists" field on the result will be set to true if the complete +// field exists whether its from the source, diff, or a combination of both. +// It cannot be determined whether a retrieved value is composed of +// diff elements. +type DiffFieldReader struct { + Diff *terraform.InstanceDiff + Source FieldReader + Schema map[string]*Schema + + // cache for memoizing ReadField calls. + cache map[string]cachedFieldReadResult +} + +type cachedFieldReadResult struct { + val FieldReadResult + err error +} + +func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { + if r.cache == nil { + r.cache = make(map[string]cachedFieldReadResult) + } + + // Create the cache key by joining around a value that isn't a valid part + // of an address. This assumes that the Source and Schema are not changed + // for the life of this DiffFieldReader. + cacheKey := strings.Join(address, "|") + if cached, ok := r.cache[cacheKey]; ok { + return cached.val, cached.err + } + + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + r.cache[cacheKey] = cachedFieldReadResult{} + return FieldReadResult{}, nil + } + + var res FieldReadResult + var err error + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + res, err = r.readPrimitive(address, schema) + case TypeList: + res, err = readListField(r, address, schema) + case TypeMap: + res, err = r.readMap(address, schema) + case TypeSet: + res, err = r.readSet(address, schema) + case typeObject: + res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } + + r.cache[cacheKey] = cachedFieldReadResult{ + val: res, + err: err, + } + return res, err +} + +func (r *DiffFieldReader) readMap( + address []string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // First read the map from the underlying source + source, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if source.Exists { + // readMap may return a nil value, or an unknown value placeholder in + // some cases, causing the type assertion to panic if we don't assign the ok value + result, _ = source.Value.(map[string]interface{}) + resultSet = true + } + + // Next, read all the elements we have in our diff, and apply + // the diff to our result. + prefix := strings.Join(address, ".") + "." + for k, v := range r.Diff.Attributes { + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasPrefix(k, prefix+"%") { + // Ignore the count field + continue + } + + resultSet = true + + k = k[len(prefix):] + if v.NewRemoved { + delete(result, k) + continue + } + + result[k] = v.New + } + + key := address[len(address)-1] + err = mapValuesToPrimitive(key, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *DiffFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + + attrD, ok := r.Diff.Attributes[strings.Join(address, ".")] + if !ok { + return result, nil + } + + var resultVal string + if !attrD.NewComputed { + resultVal = attrD.New + if attrD.NewExtra != nil { + result.ValueProcessed = resultVal + if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil { + return FieldReadResult{}, err + } + } + } + + result.Computed = attrD.NewComputed + result.Exists = true + result.Value, err = stringToPrimitive(resultVal, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return result, nil +} + +func (r *DiffFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + + prefix := strings.Join(address, ".") + "." + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // Go through the map and find all the set items + for k, d := range r.Diff.Attributes { + if d.NewRemoved { + // If the field is removed, we always ignore it + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasSuffix(k, "#") { + // Ignore any count field + continue + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + raw, err := r.ReadField(append(address, idx)) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + } + + // Determine if the set "exists". It exists if there are items or if + // the diff explicitly wanted it empty. + exists := set.Len() > 0 + if !exists { + // We could check if the diff value is "0" here but I think the + // existence of "#" on its own is enough to show it existed. This + // protects us in the future from the zero value changing from + // "0" to "" breaking us (if that were to happen). + if _, ok := r.Diff.Attributes[prefix+"#"]; ok { + exists = true + } + } + + if !exists { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if result.Exists { + return result, nil + } + } + + return FieldReadResult{ + Value: set, + Exists: exists, + }, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go new file mode 100644 index 00000000000..53f73b71bb1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go @@ -0,0 +1,235 @@ +package schema + +import ( + "fmt" + "strings" +) + +// MapFieldReader reads fields out of an untyped map[string]string to +// the best of its ability. +type MapFieldReader struct { + Map MapReader + Schema map[string]*Schema +} + +func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) { + k := strings.Join(address, ".") + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return r.readPrimitive(address, schema) + case TypeList: + return readListField(r, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // If the name of the map field is directly in the map with an + // empty string, it means that the map is being deleted, so mark + // that is is set. + if v, ok := r.Map.Access(k); ok && v == "" { + resultSet = true + } + + prefix := k + "." + r.Map.Range(func(k, v string) bool { + if strings.HasPrefix(k, prefix) { + resultSet = true + + key := k[len(prefix):] + if key != "%" && key != "#" { + result[key] = v + } + } + + return true + }) + + err := mapValuesToPrimitive(k, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *MapFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + k := strings.Join(address, ".") + result, ok := r.Map.Access(k) + if !ok { + return FieldReadResult{}, nil + } + + returnVal, err := stringToPrimitive(result, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + }, nil +} + +func (r *MapFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + + // Get the number of elements in the list + countRaw, err := r.readPrimitive( + append(address, "#"), &Schema{Type: TypeInt}) + if err != nil { + return FieldReadResult{}, err + } + if !countRaw.Exists { + // No count, means we have no list + countRaw.Value = 0 + } + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // If we have an empty list, then return an empty list + if countRaw.Computed || countRaw.Value.(int) == 0 { + return FieldReadResult{ + Value: set, + Exists: countRaw.Exists, + Computed: countRaw.Computed, + }, nil + } + + // Go through the map and find all the set items + prefix := strings.Join(address, ".") + "." + countExpected := countRaw.Value.(int) + countActual := make(map[string]struct{}) + completed := r.Map.Range(func(k, _ string) bool { + if !strings.HasPrefix(k, prefix) { + return true + } + if strings.HasPrefix(k, prefix+"#") { + // Ignore the count field + return true + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + var raw FieldReadResult + raw, err = r.ReadField(append(address, idx)) + if err != nil { + return false + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + + // Due to the way multimap readers work, if we've seen the number + // of fields we expect, then exit so that we don't read later values. + // For example: the "set" map might have "ports.#", "ports.0", and + // "ports.1", but the "state" map might have those plus "ports.2". + // We don't want "ports.2" + countActual[idx] = struct{}{} + if len(countActual) >= countExpected { + return false + } + + return true + }) + if !completed && err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// MapReader is an interface that is given to MapFieldReader for accessing +// a "map". This can be used to have alternate implementations. For a basic +// map[string]string, use BasicMapReader. +type MapReader interface { + Access(string) (string, bool) + Range(func(string, string) bool) bool +} + +// BasicMapReader implements MapReader for a single map. +type BasicMapReader map[string]string + +func (r BasicMapReader) Access(k string) (string, bool) { + v, ok := r[k] + return v, ok +} + +func (r BasicMapReader) Range(f func(string, string) bool) bool { + for k, v := range r { + if cont := f(k, v); !cont { + return false + } + } + + return true +} + +// MultiMapReader reads over multiple maps, preferring keys that are +// founder earlier (lower number index) vs. later (higher number index) +type MultiMapReader []map[string]string + +func (r MultiMapReader) Access(k string) (string, bool) { + for _, m := range r { + if v, ok := m[k]; ok { + return v, ok + } + } + + return "", false +} + +func (r MultiMapReader) Range(f func(string, string) bool) bool { + done := make(map[string]struct{}) + for _, m := range r { + for k, v := range m { + if _, ok := done[k]; ok { + continue + } + + if cont := f(k, v); !cont { + return false + } + + done[k] = struct{}{} + } + } + + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go new file mode 100644 index 00000000000..89ad3a86f2b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go @@ -0,0 +1,63 @@ +package schema + +import ( + "fmt" +) + +// MultiLevelFieldReader reads from other field readers, +// merging their results along the way in a specific order. You can specify +// "levels" and name them in order to read only an exact level or up to +// a specific level. +// +// This is useful for saying things such as "read the field from the state +// and config and merge them" or "read the latest value of the field". +type MultiLevelFieldReader struct { + Readers map[string]FieldReader + Levels []string +} + +func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) { + return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1]) +} + +func (r *MultiLevelFieldReader) ReadFieldExact( + address []string, level string) (FieldReadResult, error) { + reader, ok := r.Readers[level] + if !ok { + return FieldReadResult{}, fmt.Errorf( + "Unknown reader level: %s", level) + } + + result, err := reader.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %s", level, err) + } + + return result, nil +} + +func (r *MultiLevelFieldReader) ReadFieldMerge( + address []string, level string) (FieldReadResult, error) { + var result FieldReadResult + for _, l := range r.Levels { + if r, ok := r.Readers[l]; ok { + out, err := r.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %s", l, err) + } + + // TODO: computed + if out.Exists { + result = out + } + } + + if l == level { + break + } + } + + return result, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go new file mode 100644 index 00000000000..9abc41b54f4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go @@ -0,0 +1,8 @@ +package schema + +// FieldWriters are responsible for writing fields by address into +// a proper typed representation. ResourceData uses this to write new data +// into existing sources. +type FieldWriter interface { + WriteField([]string, interface{}) error +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go new file mode 100644 index 00000000000..c09358b1bb4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go @@ -0,0 +1,357 @@ +package schema + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/mitchellh/mapstructure" +) + +// MapFieldWriter writes data into a single map[string]string structure. +type MapFieldWriter struct { + Schema map[string]*Schema + + lock sync.Mutex + result map[string]string +} + +// Map returns the underlying map that is being written to. +func (w *MapFieldWriter) Map() map[string]string { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + return w.result +} + +func (w *MapFieldWriter) unsafeWriteField(addr string, value string) { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + w.result[addr] = value +} + +// clearTree clears a field and any sub-fields of the given address out of the +// map. This should be used to reset some kind of complex structures (namely +// sets) before writing to make sure that any conflicting data is removed (for +// example, if the set was previously written to the writer's layer). +func (w *MapFieldWriter) clearTree(addr []string) { + prefix := strings.Join(addr, ".") + "." + for k := range w.result { + if strings.HasPrefix(k, prefix) { + delete(w.result, k) + } + } +} + +func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + // If we're setting anything other than a list root or set root, + // then disallow it. + for _, schema := range schemaList[:len(schemaList)-1] { + if schema.Type == TypeList { + return fmt.Errorf( + "%s: can only set full list", + strings.Join(addr, ".")) + } + + if schema.Type == TypeMap { + return fmt.Errorf( + "%s: can only set full map", + strings.Join(addr, ".")) + } + + if schema.Type == TypeSet { + return fmt.Errorf( + "%s: can only set full set", + strings.Join(addr, ".")) + } + } + + return w.set(addr, value) +} + +func (w *MapFieldWriter) set(addr []string, value interface{}) error { + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return w.setPrimitive(addr, value, schema) + case TypeList: + return w.setList(addr, value, schema) + case TypeMap: + return w.setMap(addr, value, schema) + case TypeSet: + return w.setSet(addr, value, schema) + case typeObject: + return w.setObject(addr, value, schema) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } +} + +func (w *MapFieldWriter) setList( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + setElement := func(idx string, value interface{}) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + return w.set(append(addrCopy, idx), value) + } + + var vs []interface{} + if err := mapstructure.Decode(v, &vs); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Wipe the set from the current writer prior to writing if it exists. + // Multiple writes to the same layer is a lot safer for lists than sets due + // to the fact that indexes are always deterministic and the length will + // always be updated with the current length on the last write, but making + // sure we have a clean namespace removes any chance for edge cases to pop up + // and ensures that the last write to the set is the correct value. + w.clearTree(addr) + + // Set the entire list. + var err error + for i, elem := range vs { + is := strconv.FormatInt(int64(i), 10) + err = setElement(is, elem) + if err != nil { + break + } + } + if err != nil { + for i, _ := range vs { + is := strconv.FormatInt(int64(i), 10) + setElement(is, nil) + } + + return err + } + + w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10) + return nil +} + +func (w *MapFieldWriter) setMap( + addr []string, + value interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + v := reflect.ValueOf(value) + vs := make(map[string]interface{}) + + if value == nil { + // The empty string here means the map is removed. + w.result[k] = "" + return nil + } + + if v.Kind() != reflect.Map { + return fmt.Errorf("%s: must be a map", k) + } + if v.Type().Key().Kind() != reflect.String { + return fmt.Errorf("%s: keys must strings", k) + } + for _, mk := range v.MapKeys() { + mv := v.MapIndex(mk) + vs[mk.String()] = mv.Interface() + } + + // Wipe this address tree. The contents of the map should always reflect the + // last write made to it. + w.clearTree(addr) + + // Remove the pure key since we're setting the full map value + delete(w.result, k) + + // Set each subkey + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + for subKey, v := range vs { + if err := w.set(append(addrCopy, subKey), v); err != nil { + return err + } + } + + // Set the count + w.result[k+".%"] = strconv.Itoa(len(vs)) + + return nil +} + +func (w *MapFieldWriter) setObject( + addr []string, + value interface{}, + schema *Schema) error { + // Set the entire object. First decode into a proper structure + var v map[string]interface{} + if err := mapstructure.Decode(value, &v); err != nil { + return fmt.Errorf("%s: %s", strings.Join(addr, "."), err) + } + + // Make space for additional elements in the address + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + + // Set each element in turn + var err error + for k1, v1 := range v { + if err = w.set(append(addrCopy, k1), v1); err != nil { + break + } + } + if err != nil { + for k1, _ := range v { + w.set(append(addrCopy, k1), nil) + } + } + + return err +} + +func (w *MapFieldWriter) setPrimitive( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + + if v == nil { + // The empty string here means the value is removed. + w.result[k] = "" + return nil + } + + var set string + switch schema.Type { + case TypeBool: + var b bool + if err := mapstructure.Decode(v, &b); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + set = strconv.FormatBool(b) + case TypeString: + if err := mapstructure.Decode(v, &set); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + case TypeInt: + var n int + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + set = strconv.FormatInt(int64(n), 10) + case TypeFloat: + var n float64 + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + set = strconv.FormatFloat(float64(n), 'G', -1, 64) + default: + return fmt.Errorf("Unknown type: %#v", schema.Type) + } + + w.result[k] = set + return nil +} + +func (w *MapFieldWriter) setSet( + addr []string, + value interface{}, + schema *Schema) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + k := strings.Join(addr, ".") + + if value == nil { + w.result[k+".#"] = "0" + return nil + } + + // If it is a slice, then we have to turn it into a *Set so that + // we get the proper order back based on the hash code. + if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { + // Build a temp *ResourceData to use for the conversion + tempAddr := addr[len(addr)-1:] + tempSchema := *schema + tempSchema.Type = TypeList + tempSchemaMap := map[string]*Schema{tempAddr[0]: &tempSchema} + tempW := &MapFieldWriter{Schema: tempSchemaMap} + + // Set the entire list, this lets us get sane values out of it + if err := tempW.WriteField(tempAddr, value); err != nil { + return err + } + + // Build the set by going over the list items in order and + // hashing them into the set. The reason we go over the list and + // not the `value` directly is because this forces all types + // to become []interface{} (generic) instead of []string, which + // most hash functions are expecting. + s := schema.ZeroValue().(*Set) + tempR := &MapFieldReader{ + Map: BasicMapReader(tempW.Map()), + Schema: tempSchemaMap, + } + for i := 0; i < v.Len(); i++ { + is := strconv.FormatInt(int64(i), 10) + result, err := tempR.ReadField(append(tempAddr, is)) + if err != nil { + return err + } + if !result.Exists { + panic("set item just set doesn't exist") + } + + s.Add(result.Value) + } + + value = s + } + + // Clear any keys that match the set address first. This is necessary because + // it's always possible and sometimes may be necessary to write to a certain + // writer layer more than once with different set data each time, which will + // lead to different keys being inserted, which can lead to determinism + // problems when the old data isn't wiped first. + w.clearTree(addr) + + if value.(*Set) == nil { + w.result[k+".#"] = "0" + return nil + } + + for code, elem := range value.(*Set).m { + if err := w.set(append(addrCopy, code), elem); err != nil { + return err + } + } + + w.result[k+".#"] = strconv.Itoa(value.(*Set).Len()) + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go new file mode 100644 index 00000000000..0184d7b08ab --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go @@ -0,0 +1,46 @@ +// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT. + +package schema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[getSourceState-1] + _ = x[getSourceConfig-2] + _ = x[getSourceDiff-4] + _ = x[getSourceSet-8] + _ = x[getSourceExact-16] + _ = x[getSourceLevelMask-15] +} + +const ( + _getSource_name_0 = "getSourceStategetSourceConfig" + _getSource_name_1 = "getSourceDiff" + _getSource_name_2 = "getSourceSet" + _getSource_name_3 = "getSourceLevelMaskgetSourceExact" +) + +var ( + _getSource_index_0 = [...]uint8{0, 14, 29} + _getSource_index_3 = [...]uint8{0, 18, 32} +) + +func (i getSource) String() string { + switch { + case 1 <= i && i <= 2: + i -= 1 + return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]] + case i == 4: + return _getSource_name_1 + case i == 8: + return _getSource_name_2 + case 15 <= i && i <= 16: + i -= 15 + return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] + default: + return "getSource(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go new file mode 100644 index 00000000000..bbea5dbd57a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go @@ -0,0 +1,474 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +var ReservedProviderFields = []string{ + "alias", + "version", +} + +// Provider represents a resource provider in Terraform, and properly +// implements all of the ResourceProvider API. +// +// By defining a schema for the configuration of the provider, the +// map of supporting resources, and a configuration function, the schema +// framework takes over and handles all the provider operations for you. +// +// After defining the provider structure, it is unlikely that you'll require any +// of the methods on Provider itself. +type Provider struct { + // Schema is the schema for the configuration of this provider. If this + // provider has no configuration, this can be omitted. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ResourcesMap is the list of available resources that this provider + // can manage, along with their Resource structure defining their + // own schemas and CRUD operations. + // + // Provider automatically handles routing operations such as Apply, + // Diff, etc. to the proper resource. + ResourcesMap map[string]*Resource + + // DataSourcesMap is the collection of available data sources that + // this provider implements, with a Resource instance defining + // the schema and Read operation of each. + // + // Resource instances for data sources must have a Read function + // and must *not* implement Create, Update or Delete. + DataSourcesMap map[string]*Resource + + // ConfigureFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. + // + // See the ConfigureFunc documentation for more information. + ConfigureFunc ConfigureFunc + + // MetaReset is called by TestReset to reset any state stored in the meta + // interface. This is especially important if the StopContext is stored by + // the provider. + MetaReset func() error + + meta interface{} + + // a mutex is required because TestReset can directly replace the stopCtx + stopMu sync.Mutex + stopCtx context.Context + stopCtxCancel context.CancelFunc + stopOnce sync.Once + + TerraformVersion string +} + +// ConfigureFunc is the function used to configure a Provider. +// +// The interface{} value returned by this function is stored and passed into +// the subsequent resources as the meta parameter. This return value is +// usually used to pass along a configured API client, a configuration +// structure, etc. +type ConfigureFunc func(*ResourceData) (interface{}, error) + +// InternalValidate should be called to validate the structure +// of the provider. +// +// This should be called in a unit test for any provider to verify +// before release that a provider is properly configured for use with +// this library. +func (p *Provider) InternalValidate() error { + if p == nil { + return errors.New("provider is nil") + } + + var validationErrors error + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + + // Provider-specific checks + for k, _ := range sm { + if isReservedProviderFieldName(k) { + return fmt.Errorf("%s is a reserved field name for a provider", k) + } + } + + for k, r := range p.ResourcesMap { + if err := r.InternalValidate(nil, true); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) + } + } + + for k, r := range p.DataSourcesMap { + if err := r.InternalValidate(nil, false); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) + } + } + + return validationErrors +} + +func isReservedProviderFieldName(name string) bool { + for _, reservedName := range ReservedProviderFields { + if name == reservedName { + return true + } + } + return false +} + +// Meta returns the metadata associated with this provider that was +// returned by the Configure call. It will be nil until Configure is called. +func (p *Provider) Meta() interface{} { + return p.meta +} + +// SetMeta can be used to forcefully set the Meta object of the provider. +// Note that if Configure is called the return value will override anything +// set here. +func (p *Provider) SetMeta(v interface{}) { + p.meta = v +} + +// Stopped reports whether the provider has been stopped or not. +func (p *Provider) Stopped() bool { + ctx := p.StopContext() + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// StopCh returns a channel that is closed once the provider is stopped. +func (p *Provider) StopContext() context.Context { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + return p.stopCtx +} + +func (p *Provider) stopInit() { + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) +} + +// Stop implementation of terraform.ResourceProvider interface. +func (p *Provider) Stop() error { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtxCancel() + return nil +} + +// TestReset resets any state stored in the Provider, and will call TestReset +// on Meta if it implements the TestProvider interface. +// This may be used to reset the schema.Provider at the start of a test, and is +// automatically called by resource.Test. +func (p *Provider) TestReset() error { + p.stopInit() + if p.MetaReset != nil { + return p.MetaReset() + } + return nil +} + +// GetSchema implementation of terraform.ResourceProvider interface +func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &terraform.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + +// Input implementation of terraform.ResourceProvider interface. +func (p *Provider) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + return schemaMap(p.Schema).Input(input, c) +} + +// Validate implementation of terraform.ResourceProvider interface. +func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) { + if err := p.InternalValidate(); err != nil { + return nil, []error{fmt.Errorf( + "Internal validation of the provider failed! This is always a bug\n"+ + "with the provider itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err)} + } + + return schemaMap(p.Schema).Validate(c) +} + +// ValidateResource implementation of terraform.ResourceProvider interface. +func (p *Provider) ValidateResource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := p.ResourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support resource: %s", t)} + } + + return r.Validate(c) +} + +// Configure implementation of terraform.ResourceProvider interface. +func (p *Provider) Configure(c *terraform.ResourceConfig) error { + // No configuration + if p.ConfigureFunc == nil { + return nil + } + + sm := schemaMap(p.Schema) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, c, nil, p.meta, true) + if err != nil { + return err + } + + data, err := sm.Data(nil, diff) + if err != nil { + return err + } + + if p.TerraformVersion == "" { + // Terraform 0.12 introduced this field to the protocol + // We can therefore assume that if it's unconfigured at this point, it's 0.10 or 0.11 + p.TerraformVersion = "0.11+compatible" + } + meta, err := p.ConfigureFunc(data) + if err != nil { + return err + } + + p.meta = meta + return nil +} + +// Apply implementation of terraform.ResourceProvider interface. +func (p *Provider) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Apply(s, d, p.meta) +} + +// Diff implementation of terraform.ResourceProvider interface. +func (p *Provider) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Diff(s, c, p.meta) +} + +// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't +// attempt to calculate ignore_changes. +func (p *Provider) SimpleDiff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.simpleDiff(s, c, p.meta) +} + +// Refresh implementation of terraform.ResourceProvider interface. +func (p *Provider) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState) (*terraform.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Refresh(s, p.meta) +} + +// Resources implementation of terraform.ResourceProvider interface. +func (p *Provider) Resources() []terraform.ResourceType { + keys := make([]string, 0, len(p.ResourcesMap)) + for k := range p.ResourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.ResourceType, 0, len(keys)) + for _, k := range keys { + resource := p.ResourcesMap[k] + + // This isn't really possible (it'd fail InternalValidate), but + // we do it anyways to avoid a panic. + if resource == nil { + resource = &Resource{} + } + + result = append(result, terraform.ResourceType{ + Name: k, + Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +func (p *Provider) ImportState( + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + // Find the resource + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + // If it doesn't support import, error + if r.Importer == nil { + return nil, fmt.Errorf("resource %s doesn't support import", info.Type) + } + + // Create the data + data := r.Data(nil) + data.SetId(id) + data.SetType(info.Type) + + // Call the import function + results := []*ResourceData{data} + if r.Importer.State != nil { + var err error + results, err = r.Importer.State(data, p.meta) + if err != nil { + return nil, err + } + } + + // Convert the results to InstanceState values and return it + states := make([]*terraform.InstanceState, len(results)) + for i, r := range results { + states[i] = r.State() + } + + // Verify that all are non-nil. If there are any nil the error + // isn't obvious so we circumvent that with a friendlier error. + for _, s := range states { + if s == nil { + return nil, fmt.Errorf( + "nil entry in ImportState results. This is always a bug with\n" + + "the resource that is being imported. Please report this as\n" + + "a bug to Terraform.") + } + } + + return states, nil +} + +// ValidateDataSource implementation of terraform.ResourceProvider interface. +func (p *Provider) ValidateDataSource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := p.DataSourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support data source: %s", t)} + } + + return r.Validate(c) +} + +// ReadDataDiff implementation of terraform.ResourceProvider interface. +func (p *Provider) ReadDataDiff( + info *terraform.InstanceInfo, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.Diff(nil, c, p.meta) +} + +// RefreshData implementation of terraform.ResourceProvider interface. +func (p *Provider) ReadDataApply( + info *terraform.InstanceInfo, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.ReadDataApply(d, p.meta) +} + +// DataSources implementation of terraform.ResourceProvider interface. +func (p *Provider) DataSources() []terraform.DataSource { + keys := make([]string, 0, len(p.DataSourcesMap)) + for k, _ := range p.DataSourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.DataSource, 0, len(keys)) + for _, k := range keys { + result = append(result, terraform.DataSource{ + Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go new file mode 100644 index 00000000000..406dcdf7123 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go @@ -0,0 +1,831 @@ +package schema + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/zclconf/go-cty/cty" +) + +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedResourceFields = []string{ + "connection", + "count", + "depends_on", + "id", + "lifecycle", + "provider", + "provisioner", +} + +// Resource represents a thing in Terraform that has a set of configurable +// attributes and a lifecycle (create, read, update, delete). +// +// The Resource schema is an abstraction that allows provider writers to +// worry only about CRUD operations while off-loading validation, diff +// generation, etc. to this higher level library. +// +// In spite of the name, this struct is not used only for terraform resources, +// but also for data sources. In the case of data sources, the Create, +// Update and Delete functions must not be provided. +type Resource struct { + // Schema is the schema for the configuration of this resource. + // + // The keys of this map are the configuration keys, and the values + // describe the schema of the configuration value. + // + // The schema is used to represent both configurable data as well + // as data that might be computed in the process of creating this + // resource. + Schema map[string]*Schema + + // SchemaVersion is the version number for this resource's Schema + // definition. The current SchemaVersion stored in the state for each + // resource. Provider authors can increment this version number + // when Schema semantics change. If the State's SchemaVersion is less than + // the current SchemaVersion, the InstanceState is yielded to the + // MigrateState callback, where the provider can make whatever changes it + // needs to update the state to be compatible to the latest version of the + // Schema. + // + // When unset, SchemaVersion defaults to 0, so provider authors can start + // their Versioning at any integer >= 1 + SchemaVersion int + + // MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. + // + // MigrateState is responsible for updating an InstanceState with an old + // version to the format expected by the current version of the Schema. + // + // It is called during Refresh if the State's stored SchemaVersion is less + // than the current SchemaVersion of the Resource. + // + // The function is yielded the state's stored SchemaVersion and a pointer to + // the InstanceState that needs updating, as well as the configured + // provider's configured meta interface{}, in case the migration process + // needs to make any remote API calls. + MigrateState StateMigrateFunc + + // StateUpgraders contains the functions responsible for upgrading an + // existing state with an old schema version to a newer schema. It is + // called specifically by Terraform when the stored schema version is less + // than the current SchemaVersion of the Resource. + // + // StateUpgraders map specific schema versions to a StateUpgrader + // function. The registered versions are expected to be ordered, + // consecutive values. The initial value may be greater than 0 to account + // for legacy schemas that weren't recorded and can be handled by + // MigrateState. + StateUpgraders []StateUpgrader + + // The functions below are the CRUD operations for this resource. + // + // The only optional operation is Update. If Update is not implemented, + // then updates will not be supported for this resource. + // + // The ResourceData parameter in the functions below are used to + // query configuration and changes for the resource as well as to set + // the ID, computed data, etc. + // + // The interface{} parameter is the result of the ConfigureFunc in + // the provider for this resource. If the provider does not define + // a ConfigureFunc, this will be nil. This parameter should be used + // to store API clients, configuration structures, etc. + // + // If any errors occur during each of the operation, an error should be + // returned. If a resource was partially updated, be careful to enable + // partial state mode for ResourceData and use it accordingly. + // + // Exists is a function that is called to check if a resource still + // exists. If this returns false, then this will affect the diff + // accordingly. If this function isn't set, it will not be called. You + // can also signal existence in the Read method by calling d.SetId("") + // if the Resource is no longer present and should be removed from state. + // The *ResourceData passed to Exists should _not_ be modified. + Create CreateFunc + Read ReadFunc + Update UpdateFunc + Delete DeleteFunc + Exists ExistsFunc + + // CustomizeDiff is a custom function for working with the diff that + // Terraform has created for this resource - it can be used to customize the + // diff that has been created, diff values not controlled by configuration, + // or even veto the diff altogether and abort the plan. It is passed a + // *ResourceDiff, a structure similar to ResourceData but lacking most write + // functions like Set, while introducing new functions that work with the + // diff such as SetNew, SetNewComputed, and ForceNew. + // + // The phases Terraform runs this in, and the state available via functions + // like Get and GetChange, are as follows: + // + // * New resource: One run with no state + // * Existing resource: One run with state + // * Existing resource, forced new: One run with state (before ForceNew), + // then one run without state (as if new resource) + // * Tainted resource: No runs (custom diff logic is skipped) + // * Destroy: No runs (standard diff logic is skipped on destroy diffs) + // + // This function needs to be resilient to support all scenarios. + // + // If this function needs to access external API resources, remember to flag + // the RequiresRefresh attribute mentioned below to ensure that + // -refresh=false is blocked when running plan or apply, as this means that + // this resource requires refresh-like behaviour to work effectively. + // + // For the most part, only computed fields can be customized by this + // function. + // + // This function is only allowed on regular resources (not data sources). + CustomizeDiff CustomizeDiffFunc + + // Importer is the ResourceImporter implementation for this resource. + // If this is nil, then this resource does not support importing. If + // this is non-nil, then it supports importing and ResourceImporter + // must be validated. The validity of ResourceImporter is verified + // by InternalValidate on Resource. + Importer *ResourceImporter + + // If non-empty, this string is emitted as a warning during Validate. + DeprecationMessage string + + // Timeouts allow users to specify specific time durations in which an + // operation should time out, to allow them to extend an action to suit their + // usage. For example, a user may specify a large Creation timeout for their + // AWS RDS Instance due to it's size, or restoring from a snapshot. + // Resource implementors must enable Timeout support by adding the allowed + // actions (Create, Read, Update, Delete, Default) to the Resource struct, and + // accessing them in the matching methods. + Timeouts *ResourceTimeout +} + +// ShimInstanceStateFromValue converts a cty.Value to a +// terraform.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &terraform.InstanceState{} + } + return s, nil +} + +// See Resource documentation. +type CreateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ReadFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type UpdateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type DeleteFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ExistsFunc func(*ResourceData, interface{}) (bool, error) + +// See Resource documentation. +type StateMigrateFunc func( + int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) + +type StateUpgrader struct { + // Version is the version schema that this Upgrader will handle, converting + // it to Version+1. + Version int + + // Type describes the schema that this function can upgrade. Type is + // required to decode the schema if the state was stored in a legacy + // flatmap format. + Type cty.Type + + // Upgrade takes the JSON encoded state and the provider meta value, and + // upgrades the state one single schema version. The provided state is + // deocded into the default json types using a map[string]interface{}. It + // is up to the StateUpgradeFunc to ensure that the returned value can be + // encoded using the new schema. + Upgrade StateUpgradeFunc +} + +// See StateUpgrader +type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) + +// See Resource documentation. +type CustomizeDiffFunc func(*ResourceDiff, interface{}) error + +// Apply creates, updates, and/or deletes a resource. +func (r *Resource) Apply( + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, error) { + data, err := schemaMap(r.Schema).Data(s, d) + if err != nil { + return s, err + } + + // Instance Diff shoould have the timeout info, need to copy it over to the + // ResourceData meta + rt := ResourceTimeout{} + if _, ok := d.Meta[TimeoutKey]; ok { + if err := rt.DiffDecode(d); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } else if s != nil { + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + } else { + log.Printf("[DEBUG] No meta timeoutkey found in Apply()") + } + data.timeouts = &rt + + if s == nil { + // The Terraform API dictates that this should never happen, but + // it doesn't hurt to be safe in this case. + s = new(terraform.InstanceState) + } + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource since it is created + if err := r.Delete(data, meta); err != nil { + return r.recordCurrentSchemaVersion(data.State()), err + } + + // Make sure the ID is gone. + data.SetId("") + } + + // If we're only destroying, and not creating, then return + // now since we're done! + if !d.RequiresNew() { + return nil, nil + } + + // Reset the data to be stateless since we just destroyed + data, err = schemaMap(r.Schema).Data(nil, d) + // data was reset, need to re-apply the parsed timeouts + data.timeouts = &rt + if err != nil { + return nil, err + } + } + + err = nil + if data.Id() == "" { + // We're creating, it is a new resource. + data.MarkNewResource() + err = r.Create(data, meta) + } else { + if r.Update == nil { + return s, fmt.Errorf("doesn't support update") + } + + err = r.Update(data, meta) + } + + return r.recordCurrentSchemaVersion(data.State()), err +} + +// Diff returns a diff of this resource. +func (r *Resource) Diff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + t := &ResourceTimeout{} + err := t.ConfigDecode(r, c) + + if err != nil { + return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) + } + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) + if err != nil { + return instanceDiff, err + } + + if instanceDiff != nil { + if err := t.DiffEncode(instanceDiff); err != nil { + log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + } + } else { + log.Printf("[DEBUG] Instance Diff is nil in Diff()") + } + + return instanceDiff, err +} + +func (r *Resource) simpleDiff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) + if err != nil { + return instanceDiff, err + } + + if instanceDiff == nil { + instanceDiff = terraform.NewInstanceDiff() + } + + // Make sure the old value is set in each of the instance diffs. + // This was done by the RequiresNew logic in the full legacy Diff. + for k, attr := range instanceDiff.Attributes { + if attr == nil { + continue + } + if s != nil { + attr.Old = s.Attributes[k] + } + } + + return instanceDiff, nil +} + +// Validate validates the resource configuration against the schema. +func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { + warns, errs := schemaMap(r.Schema).Validate(c) + + if r.DeprecationMessage != "" { + warns = append(warns, r.DeprecationMessage) + } + + return warns, errs +} + +// ReadDataApply loads the data for a data source, given a diff that +// describes the configuration arguments and desired computed attributes. +func (r *Resource) ReadDataApply( + d *terraform.InstanceDiff, + meta interface{}, +) (*terraform.InstanceState, error) { + // Data sources are always built completely from scratch + // on each read, so the source state is always nil. + data, err := schemaMap(r.Schema).Data(nil, d) + if err != nil { + return nil, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + // Data sources can set an ID if they want, but they aren't + // required to; we'll provide a placeholder if they don't, + // to preserve the invariant that all resources have non-empty + // ids. + state.ID = "-" + } + + return r.recordCurrentSchemaVersion(state), err +} + +// RefreshWithoutUpgrade reads the instance state, but does not call +// MigrateState or the StateUpgraders, since those are now invoked in a +// separate API call. +// RefreshWithoutUpgrade is part of the new plugin shims. +func (r *Resource) RefreshWithoutUpgrade( + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +// Refresh refreshes the state of the resource. +func (r *Resource) Refresh( + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + // there may be new StateUpgraders that need to be run + s, err := r.upgradeState(s, meta) + if err != nil { + return s, err + } + + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + var err error + + needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) + migrate := needsMigration && r.MigrateState != nil + + if migrate { + s, err = r.MigrateState(stateSchemaVersion, s, meta) + if err != nil { + return s, err + } + } + + if len(r.StateUpgraders) == 0 { + return s, nil + } + + // If we ran MigrateState, then the stateSchemaVersion value is no longer + // correct. We can expect the first upgrade function to be the correct + // schema type version. + if migrate { + stateSchemaVersion = r.StateUpgraders[0].Version + } + + schemaType := r.CoreConfigSchema().ImpliedType() + // find the expected type to convert the state + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion == upgrader.Version { + schemaType = upgrader.Type + } + } + + // StateUpgraders only operate on the new JSON format state, so the state + // need to be converted. + stateVal, err := StateValueFromInstanceState(s, schemaType) + if err != nil { + return nil, err + } + + jsonState, err := StateValueToJSONMap(stateVal, schemaType) + if err != nil { + return nil, err + } + + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion != upgrader.Version { + continue + } + + jsonState, err = upgrader.Upgrade(jsonState, meta) + if err != nil { + return nil, err + } + stateSchemaVersion++ + } + + // now we need to re-flatmap the new state + stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) + if err != nil { + return nil, err + } + + return r.ShimInstanceStateFromValue(stateVal) +} + +// InternalValidate should be called to validate the structure +// of the resource. +// +// This should be called in a unit test for any resource to verify +// before release that a resource is properly configured for use with +// this library. +// +// Provider.InternalValidate() will automatically call this for all of +// the resources it manages, so you don't need to call this manually if it +// is part of a Provider. +func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { + if r == nil { + return errors.New("resource is nil") + } + + if !writable { + if r.Create != nil || r.Update != nil || r.Delete != nil { + return fmt.Errorf("must not implement Create, Update or Delete") + } + + // CustomizeDiff cannot be defined for read-only resources + if r.CustomizeDiff != nil { + return fmt.Errorf("cannot implement CustomizeDiff") + } + } + + tsm := topSchemaMap + + if r.isTopLevel() && writable { + // All non-Computed attributes must be ForceNew if Update is not defined + if r.Update == nil { + nonForceNewAttrs := make([]string, 0) + for k, v := range r.Schema { + if !v.ForceNew && !v.Computed { + nonForceNewAttrs = append(nonForceNewAttrs, k) + } + } + if len(nonForceNewAttrs) > 0 { + return fmt.Errorf( + "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) + } + } else { + nonUpdateableAttrs := make([]string, 0) + for k, v := range r.Schema { + if v.ForceNew || v.Computed && !v.Optional { + nonUpdateableAttrs = append(nonUpdateableAttrs, k) + } + } + updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) + if updateableAttrs == 0 { + return fmt.Errorf( + "All fields are ForceNew or Computed w/out Optional, Update is superfluous") + } + } + + tsm = schemaMap(r.Schema) + + // Destroy, and Read are required + if r.Read == nil { + return fmt.Errorf("Read must be implemented") + } + if r.Delete == nil { + return fmt.Errorf("Delete must be implemented") + } + + // If we have an importer, we need to verify the importer. + if r.Importer != nil { + if err := r.Importer.InternalValidate(); err != nil { + return err + } + } + + for k, f := range tsm { + if isReservedResourceFieldName(k, f) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + lastVersion := -1 + for _, u := range r.StateUpgraders { + if lastVersion >= 0 && u.Version-lastVersion > 1 { + return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) + } + + if u.Version >= r.SchemaVersion { + return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) + } + + if !u.Type.IsObjectType() { + return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) + } + + if u.Upgrade == nil { + return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) + } + + lastVersion = u.Version + } + + if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { + return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) + } + + // Data source + if r.isTopLevel() && !writable { + tsm = schemaMap(r.Schema) + for k, _ := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + return schemaMap(r.Schema).InternalValidate(tsm) +} + +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func isReservedResourceFieldName(name string, s *Schema) bool { + // Allow phasing out "id" + // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 + if name == "id" && (s.Deprecated != "" || s.Removed != "") { + return false + } + + for _, reservedName := range ReservedResourceFields { + if name == reservedName { + return true + } + } + return false +} + +// Data returns a ResourceData struct for this Resource. Each return value +// is a separate copy and can be safely modified differently. +// +// The data returned from this function has no actual affect on the Resource +// itself (including the state given to this function). +// +// This function is useful for unit tests and ResourceImporter functions. +func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { + result, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + // At the time of writing, this isn't possible (Data never returns + // non-nil errors). We panic to find this in the future if we have to. + // I don't see a reason for Data to ever return an error. + panic(err) + } + + // load the Resource timeouts + result.timeouts = r.Timeouts + if result.timeouts == nil { + result.timeouts = &ResourceTimeout{} + } + + // Set the schema version to latest by default + result.meta = map[string]interface{}{ + "schema_version": strconv.Itoa(r.SchemaVersion), + } + + return result +} + +// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing +// +// TODO: May be able to be removed with the above ResourceData function. +func (r *Resource) TestResourceData() *ResourceData { + return &ResourceData{ + schema: r.Schema, + } +} + +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through in the schema +// of the receiving Resource. +func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { + return SchemasForFlatmapPath(path, r.Schema) +} + +// Returns true if the resource is "top level" i.e. not a sub-resource. +func (r *Resource) isTopLevel() bool { + // TODO: This is a heuristic; replace with a definitive attribute? + return (r.Create != nil || r.Read != nil) +} + +// Determines if a given InstanceState needs to be migrated by checking the +// stored version number with the current SchemaVersion +func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { + // Get the raw interface{} value for the schema version. If it doesn't + // exist or is nil then set it to zero. + raw := is.Meta["schema_version"] + if raw == nil { + raw = "0" + } + + // Try to convert it to a string. If it isn't a string then we pretend + // that it isn't set at all. It should never not be a string unless it + // was manually tampered with. + rawString, ok := raw.(string) + if !ok { + rawString = "0" + } + + stateSchemaVersion, _ := strconv.Atoi(rawString) + + // Don't run MigrateState if the version is handled by a StateUpgrader, + // since StateMigrateFuncs are not required to handle unknown versions + maxVersion := r.SchemaVersion + if len(r.StateUpgraders) > 0 { + maxVersion = r.StateUpgraders[0].Version + } + + return stateSchemaVersion < maxVersion, stateSchemaVersion +} + +func (r *Resource) recordCurrentSchemaVersion( + state *terraform.InstanceState) *terraform.InstanceState { + if state != nil && r.SchemaVersion > 0 { + if state.Meta == nil { + state.Meta = make(map[string]interface{}) + } + state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) + } + return state +} + +// Noop is a convenience implementation of resource function which takes +// no action and returns no error. +func Noop(*ResourceData, interface{}) error { + return nil +} + +// RemoveFromState is a convenience implementation of a resource function +// which sets the resource ID to empty string (to remove it from state) +// and returns no error. +func RemoveFromState(d *ResourceData, _ interface{}) error { + d.SetId("") + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go new file mode 100644 index 00000000000..0793524a354 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go @@ -0,0 +1,551 @@ +package schema + +import ( + "log" + "reflect" + "strings" + "sync" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// ResourceData is used to query and set the attributes of a resource. +// +// ResourceData is the primary argument received for CRUD operations on +// a resource as well as configuration of a provider. It is a powerful +// structure that can be used to not only query data, but check for changes, +// define partial state updates, etc. +// +// The most relevant methods to take a look at are Get, Set, and Partial. +type ResourceData struct { + // Settable (internally) + schema map[string]*Schema + config *terraform.ResourceConfig + state *terraform.InstanceState + diff *terraform.InstanceDiff + meta map[string]interface{} + timeouts *ResourceTimeout + + // Don't set + multiReader *MultiLevelFieldReader + setWriter *MapFieldWriter + newState *terraform.InstanceState + partial bool + partialMap map[string]struct{} + once sync.Once + isNew bool + + panicOnError bool +} + +// getResult is the internal structure that is generated when a Get +// is called that contains some extra data that might be used. +type getResult struct { + Value interface{} + ValueProcessed interface{} + Computed bool + Exists bool + Schema *Schema +} + +// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary +// values, bypassing schema. This MUST NOT be used in normal circumstances - +// it exists only to support the remote_state data source. +// +// Deprecated: Fully define schema attributes and use Set() instead. +func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { + d.once.Do(d.init) + + d.setWriter.unsafeWriteField(key, value) +} + +// Get returns the data for the given key, or nil if the key doesn't exist +// in the schema. +// +// If the key does exist in the schema but doesn't exist in the configuration, +// then the default value for that type will be returned. For strings, this is +// "", for numbers it is 0, etc. +// +// If you want to test if something is set at all in the configuration, +// use GetOk. +func (d *ResourceData) Get(key string) interface{} { + v, _ := d.GetOk(key) + return v +} + +// GetChange returns the old and new value for a given key. +// +// HasChange should be used to check if a change exists. It is possible +// that both the old and new value are the same if the old value was not +// set and the new value is. This is common, for example, for boolean +// fields which have a zero value of false. +func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { + o, n := d.getChange(key, getSourceState, getSourceDiff) + return o.Value, n.Value +} + +// GetOk returns the data for the given key and whether or not the key +// has been set to a non-zero value at some point. +// +// The first result will not necessarilly be nil if the value doesn't exist. +// The second result should be checked to determine this information. +func (d *ResourceData) GetOk(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists returns the data for a given key and whether or not the key +// has been set to a non-zero value. This is only useful for determining +// if boolean attributes have been set, if they are Optional but do not +// have a Default value. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +// This should only be used if absolutely required/needed. +func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + return r.Value, exists +} + +func (d *ResourceData) getRaw(key string, level getSource) getResult { + var parts []string + if key != "" { + parts = strings.Split(key, ".") + } + + return d.get(parts, level) +} + +// HasChange returns whether or not the given key has been changed. +func (d *ResourceData) HasChange(key string) bool { + o, n := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := o.(Equal); ok { + return !eq.Equal(n) + } + + return !reflect.DeepEqual(o, n) +} + +// Partial turns partial state mode on/off. +// +// When partial state mode is enabled, then only key prefixes specified +// by SetPartial will be in the final state. This allows providers to return +// partial states for partially applied resources (when errors occur). +func (d *ResourceData) Partial(on bool) { + d.partial = on + if on { + if d.partialMap == nil { + d.partialMap = make(map[string]struct{}) + } + } else { + d.partialMap = nil + } +} + +// Set sets the value for the given key. +// +// If the key is invalid or the value is not a correct type, an error +// will be returned. +func (d *ResourceData) Set(key string, value interface{}) error { + d.once.Do(d.init) + + // If the value is a pointer to a non-struct, get its value and + // use that. This allows Set to take a pointer to primitives to + // simplify the interface. + reflectVal := reflect.ValueOf(value) + if reflectVal.Kind() == reflect.Ptr { + if reflectVal.IsNil() { + // If the pointer is nil, then the value is just nil + value = nil + } else { + // Otherwise, we dereference the pointer as long as its not + // a pointer to a struct, since struct pointers are allowed. + reflectVal = reflect.Indirect(reflectVal) + if reflectVal.Kind() != reflect.Struct { + value = reflectVal.Interface() + } + } + } + + err := d.setWriter.WriteField(strings.Split(key, "."), value) + if err != nil && d.panicOnError { + panic(err) + } + return err +} + +// SetPartial adds the key to the final state output while +// in partial state mode. The key must be a root key in the schema (i.e. +// it cannot be "list.0"). +// +// If partial state mode is disabled, then this has no effect. Additionally, +// whenever partial state mode is toggled, the partial data is cleared. +func (d *ResourceData) SetPartial(k string) { + if d.partial { + d.partialMap[k] = struct{}{} + } +} + +func (d *ResourceData) MarkNewResource() { + d.isNew = true +} + +func (d *ResourceData) IsNewResource() bool { + return d.isNew +} + +// Id returns the ID of the resource. +func (d *ResourceData) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + if result == "" { + result = d.state.Attributes["id"] + } + } + + if d.newState != nil { + result = d.newState.ID + if result == "" { + result = d.newState.Attributes["id"] + } + } + + return result +} + +// ConnInfo returns the connection info for this resource. +func (d *ResourceData) ConnInfo() map[string]string { + if d.newState != nil { + return d.newState.Ephemeral.ConnInfo + } + + if d.state != nil { + return d.state.Ephemeral.ConnInfo + } + + return nil +} + +// SetId sets the ID of the resource. If the value is blank, then the +// resource is destroyed. +func (d *ResourceData) SetId(v string) { + d.once.Do(d.init) + d.newState.ID = v + + // once we transition away from the legacy state types, "id" will no longer + // be a special field, and will become a normal attribute. + // set the attribute normally + d.setWriter.unsafeWriteField("id", v) + + // Make sure the newState is also set, otherwise the old value + // may get precedence. + if d.newState.Attributes == nil { + d.newState.Attributes = map[string]string{} + } + d.newState.Attributes["id"] = v +} + +// SetConnInfo sets the connection info for a resource. +func (d *ResourceData) SetConnInfo(v map[string]string) { + d.once.Do(d.init) + d.newState.Ephemeral.ConnInfo = v +} + +// SetType sets the ephemeral type for the data. This is only required +// for importing. +func (d *ResourceData) SetType(t string) { + d.once.Do(d.init) + d.newState.Ephemeral.Type = t +} + +// State returns the new InstanceState after the diff and any Set +// calls. +func (d *ResourceData) State() *terraform.InstanceState { + var result terraform.InstanceState + result.ID = d.Id() + result.Meta = d.meta + + // If we have no ID, then this resource doesn't exist and we just + // return nil. + if result.ID == "" { + return nil + } + + if d.timeouts != nil { + if err := d.timeouts.StateEncode(&result); err != nil { + log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) + } + } + + // Look for a magic key in the schema that determines we skip the + // integrity check of fields existing in the schema, allowing dynamic + // keys to be created. + hasDynamicAttributes := false + for k, _ := range d.schema { + if k == "__has_dynamic_attributes" { + hasDynamicAttributes = true + log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) + } + } + + // In order to build the final state attributes, we read the full + // attribute set as a map[string]interface{}, write it to a MapFieldWriter, + // and then use that map. + rawMap := make(map[string]interface{}) + for k := range d.schema { + source := getSourceSet + if d.partial { + source = getSourceState + if _, ok := d.partialMap[k]; ok { + source = getSourceSet + } + } + + raw := d.get([]string{k}, source) + if raw.Exists && !raw.Computed { + rawMap[k] = raw.Value + if raw.ValueProcessed != nil { + rawMap[k] = raw.ValueProcessed + } + } + } + + mapW := &MapFieldWriter{Schema: d.schema} + if err := mapW.WriteField(nil, rawMap); err != nil { + log.Printf("[ERR] Error writing fields: %s", err) + return nil + } + + result.Attributes = mapW.Map() + + if hasDynamicAttributes { + // If we have dynamic attributes, just copy the attributes map + // one for one into the result attributes. + for k, v := range d.setWriter.Map() { + // Don't clobber schema values. This limits usage of dynamic + // attributes to names which _do not_ conflict with schema + // keys! + if _, ok := result.Attributes[k]; !ok { + result.Attributes[k] = v + } + } + } + + if d.newState != nil { + result.Ephemeral = d.newState.Ephemeral + } + + // TODO: This is hacky and we can remove this when we have a proper + // state writer. We should instead have a proper StateFieldWriter + // and use that. + for k, schema := range d.schema { + if schema.Type != TypeMap { + continue + } + + if result.Attributes[k] == "" { + delete(result.Attributes, k) + } + } + + if v := d.Id(); v != "" { + result.Attributes["id"] = d.Id() + } + + if d.state != nil { + result.Tainted = d.state.Tainted + } + + return &result +} + +// Timeout returns the data for the given timeout key +// Returns a duration of 20 minutes for any key not found, or not found and no default. +func (d *ResourceData) Timeout(key string) time.Duration { + key = strings.ToLower(key) + + // System default of 20 minutes + defaultTimeout := 20 * time.Minute + + if d.timeouts == nil { + return defaultTimeout + } + + var timeout *time.Duration + switch key { + case TimeoutCreate: + timeout = d.timeouts.Create + case TimeoutRead: + timeout = d.timeouts.Read + case TimeoutUpdate: + timeout = d.timeouts.Update + case TimeoutDelete: + timeout = d.timeouts.Delete + } + + if timeout != nil { + return *timeout + } + + if d.timeouts.Default != nil { + return *d.timeouts.Default + } + + return defaultTimeout +} + +func (d *ResourceData) init() { + // Initialize the field that will store our new state + var copyState terraform.InstanceState + if d.state != nil { + copyState = *d.state.DeepCopy() + } + d.newState = ©State + + // Initialize the map for storing set data + d.setWriter = &MapFieldWriter{Schema: d.schema} + + // Initialize the reader for getting data from the + // underlying sources (config, diff, etc.) + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["set"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.setWriter.Map()), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "set", + }, + + Readers: readers, + } +} + +func (d *ResourceData) diffChange( + k string) (interface{}, interface{}, bool, bool, bool) { + // Get the change between the state and the config. + o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) + if !o.Exists { + o.Value = nil + } + if !n.Exists { + n.Value = nil + } + + // Return the old, new, and whether there is a change + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false +} + +func (d *ResourceData) getChange( + k string, + oldLevel getSource, + newLevel getSource) (getResult, getResult) { + var parts, parts2 []string + if k != "" { + parts = strings.Split(k, ".") + parts2 = strings.Split(k, ".") + } + + o := d.get(parts, oldLevel) + n := d.get(parts2, newLevel) + return o, n +} + +func (d *ResourceData) get(addr []string, source getSource) getResult { + d.once.Do(d.init) + + level := "set" + flags := source & ^getSourceLevelMask + exact := flags&getSourceExact != 0 + source = source & getSourceLevelMask + if source >= getSourceSet { + level = "set" + } else if source >= getSourceDiff { + level = "diff" + } else if source >= getSourceConfig { + level = "config" + } else { + level = "state" + } + + var result FieldReadResult + var err error + if exact { + result, err = d.multiReader.ReadFieldExact(addr, level) + } else { + result, err = d.multiReader.ReadFieldMerge(addr, level) + } + if err != nil { + panic(err) + } + + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go new file mode 100644 index 00000000000..8bfb079be60 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go @@ -0,0 +1,17 @@ +package schema + +//go:generate go run golang.org/x/tools/cmd/stringer -type=getSource resource_data_get_source.go + +// getSource represents the level we want to get for a value (internally). +// Any source less than or equal to the level will be loaded (whichever +// has a value first). +type getSource byte + +const ( + getSourceState getSource = 1 << iota + getSourceConfig + getSourceDiff + getSourceSet + getSourceExact // Only get from the _exact_ level + getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go new file mode 100644 index 00000000000..f55a66e14fd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go @@ -0,0 +1,559 @@ +package schema + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// newValueWriter is a minor re-implementation of MapFieldWriter to include +// keys that should be marked as computed, to represent the new part of a +// pseudo-diff. +type newValueWriter struct { + *MapFieldWriter + + // A list of keys that should be marked as computed. + computedKeys map[string]bool + + // A lock to prevent races on writes. The underlying writer will have one as + // well - this is for computed keys. + lock sync.Mutex + + // To be used with init. + once sync.Once +} + +// init performs any initialization tasks for the newValueWriter. +func (w *newValueWriter) init() { + if w.computedKeys == nil { + w.computedKeys = make(map[string]bool) + } +} + +// WriteField overrides MapValueWriter's WriteField, adding the ability to flag +// the address as computed. +func (w *newValueWriter) WriteField(address []string, value interface{}, computed bool) error { + // Fail the write if we have a non-nil value and computed is true. + // NewComputed values should not have a value when written. + if value != nil && computed { + return errors.New("Non-nil value with computed set") + } + + if err := w.MapFieldWriter.WriteField(address, value); err != nil { + return err + } + + w.once.Do(w.init) + + w.lock.Lock() + defer w.lock.Unlock() + if computed { + w.computedKeys[strings.Join(address, ".")] = true + } + return nil +} + +// ComputedKeysMap returns the underlying computed keys map. +func (w *newValueWriter) ComputedKeysMap() map[string]bool { + w.once.Do(w.init) + return w.computedKeys +} + +// newValueReader is a minor re-implementation of MapFieldReader and is the +// read counterpart to MapValueWriter, allowing the read of keys flagged as +// computed to accommodate the diff override logic in ResourceDiff. +type newValueReader struct { + *MapFieldReader + + // The list of computed keys from a newValueWriter. + computedKeys map[string]bool +} + +// ReadField reads the values from the underlying writer, returning the +// computed value if it is found as well. +func (r *newValueReader) ReadField(address []string) (FieldReadResult, error) { + addrKey := strings.Join(address, ".") + v, err := r.MapFieldReader.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + for computedKey := range r.computedKeys { + if childAddrOf(addrKey, computedKey) { + if strings.HasSuffix(addrKey, ".#") { + // This is a count value for a list or set that has been marked as + // computed, or a sub-list/sub-set of a complex resource that has + // been marked as computed. We need to pass through to other readers + // so that an accurate previous count can be fetched for the diff. + v.Exists = false + } + v.Computed = true + } + } + + return v, nil +} + +// ResourceDiff is used to query and make custom changes to an in-flight diff. +// It can be used to veto particular changes in the diff, customize the diff +// that has been created, or diff values not controlled by config. +// +// The object functions similar to ResourceData, however most notably lacks +// Set, SetPartial, and Partial, as it should be used to change diff values +// only. Most other first-class ResourceData functions exist, namely Get, +// GetOk, HasChange, and GetChange exist. +// +// All functions in ResourceDiff, save for ForceNew, can only be used on +// computed fields. +type ResourceDiff struct { + // The schema for the resource being worked on. + schema map[string]*Schema + + // The current config for this resource. + config *terraform.ResourceConfig + + // The state for this resource as it exists post-refresh, after the initial + // diff. + state *terraform.InstanceState + + // The diff created by Terraform. This diff is used, along with state, + // config, and custom-set diff data, to provide a multi-level reader + // experience similar to ResourceData. + diff *terraform.InstanceDiff + + // The internal reader structure that contains the state, config, the default + // diff, and the new diff. + multiReader *MultiLevelFieldReader + + // A writer that writes overridden new fields. + newWriter *newValueWriter + + // Tracks which keys have been updated by ResourceDiff to ensure that the + // diff does not get re-run on keys that were not touched, or diffs that were + // just removed (re-running on the latter would just roll back the removal). + updatedKeys map[string]bool + + // Tracks which keys were flagged as forceNew. These keys are not saved in + // newWriter, but we need to track them so that they can be re-diffed later. + forcedNewKeys map[string]bool +} + +// newResourceDiff creates a new ResourceDiff instance. +func newResourceDiff(schema map[string]*Schema, config *terraform.ResourceConfig, state *terraform.InstanceState, diff *terraform.InstanceDiff) *ResourceDiff { + d := &ResourceDiff{ + config: config, + state: state, + diff: diff, + schema: schema, + } + + d.newWriter = &newValueWriter{ + MapFieldWriter: &MapFieldWriter{Schema: d.schema}, + } + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["newDiff"] = &newValueReader{ + MapFieldReader: &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.newWriter.Map()), + }, + computedKeys: d.newWriter.ComputedKeysMap(), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "newDiff", + }, + + Readers: readers, + } + + d.updatedKeys = make(map[string]bool) + d.forcedNewKeys = make(map[string]bool) + + return d +} + +// UpdatedKeys returns the keys that were updated by this ResourceDiff run. +// These are the only keys that a diff should be re-calculated for. +// +// This is the combined result of both keys for which diff values were updated +// for or cleared, and also keys that were flagged to be re-diffed as a result +// of ForceNew. +func (d *ResourceDiff) UpdatedKeys() []string { + var s []string + for k := range d.updatedKeys { + s = append(s, k) + } + for k := range d.forcedNewKeys { + for _, l := range s { + if k == l { + break + } + } + s = append(s, k) + } + return s +} + +// Clear wipes the diff for a particular key. It is called by ResourceDiff's +// functionality to remove any possibility of conflicts, but can be called on +// its own to just remove a specific key from the diff completely. +// +// Note that this does not wipe an override. This function is only allowed on +// computed keys. +func (d *ResourceDiff) Clear(key string) error { + if err := d.checkKey(key, "Clear", true); err != nil { + return err + } + + return d.clear(key) +} + +func (d *ResourceDiff) clear(key string) error { + // Check the schema to make sure that this key exists first. + schemaL := addrToSchema(strings.Split(key, "."), d.schema) + if len(schemaL) == 0 { + return fmt.Errorf("%s is not a valid key", key) + } + + for k := range d.diff.Attributes { + if strings.HasPrefix(k, key) { + delete(d.diff.Attributes, k) + } + } + return nil +} + +// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff +// where we need to act on all nested fields +// without calling out each one separately +func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { + keys := make([]string, 0) + for k := range d.diff.Attributes { + if strings.HasPrefix(k, prefix) { + keys = append(keys, k) + } + } + return keys +} + +// diffChange helps to implement resourceDiffer and derives its change values +// from ResourceDiff's own change data, in addition to existing diff, config, and state. +func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { + old, new, customized := d.getChange(key) + + if !old.Exists { + old.Value = nil + } + if !new.Exists || d.removed(key) { + new.Value = nil + } + + return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized +} + +// SetNew is used to set a new diff value for the mentioned key. The value must +// be correct for the attribute's schema (mostly relevant for maps, lists, and +// sets). The original value from the state is used as the old value. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNew(key string, value interface{}) error { + if err := d.checkKey(key, "SetNew", false); err != nil { + return err + } + + return d.setDiff(key, value, false) +} + +// SetNewComputed functions like SetNew, except that it blanks out a new value +// and marks it as computed. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNewComputed(key string) error { + if err := d.checkKey(key, "SetNewComputed", false); err != nil { + return err + } + + return d.setDiff(key, nil, true) +} + +// setDiff performs common diff setting behaviour. +func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error { + if err := d.clear(key); err != nil { + return err + } + + if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil { + return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err) + } + + d.updatedKeys[key] = true + + return nil +} + +// ForceNew force-flags ForceNew in the schema for a specific key, and +// re-calculates its diff, effectively causing this attribute to force a new +// resource. +// +// Keep in mind that forcing a new resource will force a second run of the +// resource's CustomizeDiff function (with a new ResourceDiff) once the current +// one has completed. This second run is performed without state. This behavior +// will be the same as if a new resource is being created and is performed to +// ensure that the diff looks like the diff for a new resource as much as +// possible. CustomizeDiff should expect such a scenario and act correctly. +// +// This function is a no-op/error if there is no diff. +// +// Note that the change to schema is permanent for the lifecycle of this +// specific ResourceDiff instance. +func (d *ResourceDiff) ForceNew(key string) error { + if !d.HasChange(key) { + return fmt.Errorf("ForceNew: No changes for %s", key) + } + + keyParts := strings.Split(key, ".") + var schema *Schema + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } else { + return fmt.Errorf("ForceNew: %s is not a valid key", key) + } + + schema.ForceNew = true + + // Flag this for a re-diff. Don't save any values to guarantee that existing + // diffs aren't messed with, as this gets messy when dealing with complex + // structures, zero values, etc. + d.forcedNewKeys[keyParts[0]] = true + + return nil +} + +// Get hands off to ResourceData.Get. +func (d *ResourceDiff) Get(key string) interface{} { + r, _ := d.GetOk(key) + return r +} + +// GetChange gets the change between the state and diff, checking first to see +// if an overridden diff exists. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { + old, new, _ := d.getChange(key) + return old.Value, new.Value +} + +// GetOk functions the same way as ResourceData.GetOk, but it also checks the +// new diff levels to provide data consistent with the current state of the +// customized diff. +func (d *ResourceDiff) GetOk(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists functions the same way as GetOkExists within ResourceData, but +// it also checks the new diff levels to provide data consistent with the +// current state of the customized diff. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +func (d *ResourceDiff) GetOkExists(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + return r.Value, exists +} + +// NewValueKnown returns true if the new value for the given key is available +// as its final value at diff time. If the return value is false, this means +// either the value is based of interpolation that was unavailable at diff +// time, or that the value was explicitly marked as computed by SetNewComputed. +func (d *ResourceDiff) NewValueKnown(key string) bool { + r := d.get(strings.Split(key, "."), "newDiff") + return !r.Computed +} + +// HasChange checks to see if there is a change between state and the diff, or +// in the overridden diff. +func (d *ResourceDiff) HasChange(key string) bool { + old, new := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := old.(Equal); ok { + return !eq.Equal(new) + } + + return !reflect.DeepEqual(old, new) +} + +// Id returns the ID of this resource. +// +// Note that technically, ID does not change during diffs (it either has +// already changed in the refresh, or will change on update), hence we do not +// support updating the ID or fetching it from anything else other than state. +func (d *ResourceDiff) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + } + return result +} + +// getChange gets values from two different levels, designed for use in +// diffChange, HasChange, and GetChange. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { + old := d.get(strings.Split(key, "."), "state") + var new getResult + for p := range d.updatedKeys { + if childAddrOf(key, p) { + new = d.getExact(strings.Split(key, "."), "newDiff") + return old, new, true + } + } + new = d.get(strings.Split(key, "."), "newDiff") + return old, new, false +} + +// removed checks to see if the key is present in the existing, pre-customized +// diff and if it was marked as NewRemoved. +func (d *ResourceDiff) removed(k string) bool { + diff, ok := d.diff.Attributes[k] + if !ok { + return false + } + return diff.NewRemoved +} + +// get performs the appropriate multi-level reader logic for ResourceDiff, +// starting at source. Refer to newResourceDiff for the level order. +func (d *ResourceDiff) get(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldMerge(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// getExact gets an attribute from the exact level referenced by source. +func (d *ResourceDiff) getExact(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldExact(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// finalizeResult does some post-processing of the result produced by get and getExact. +func (d *ResourceDiff) finalizeResult(addr []string, result FieldReadResult) getResult { + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +// childAddrOf does a comparison of two addresses to see if one is the child of +// the other. +func childAddrOf(child, parent string) bool { + cs := strings.Split(child, ".") + ps := strings.Split(parent, ".") + if len(ps) > len(cs) { + return false + } + return reflect.DeepEqual(ps, cs[:len(ps)]) +} + +// checkKey checks the key to make sure it exists and is computed. +func (d *ResourceDiff) checkKey(key, caller string, nested bool) error { + var schema *Schema + if nested { + keyParts := strings.Split(key, ".") + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + } else { + s, ok := d.schema[key] + if ok { + schema = s + } + } + if schema == nil { + return fmt.Errorf("%s: invalid key: %s", caller, key) + } + if !schema.Computed { + return fmt.Errorf("%s only operates on computed keys - %s is not one", caller, key) + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go new file mode 100644 index 00000000000..5dada3caf32 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go @@ -0,0 +1,52 @@ +package schema + +// ResourceImporter defines how a resource is imported in Terraform. This +// can be set onto a Resource struct to make it Importable. Not all resources +// have to be importable; if a Resource doesn't have a ResourceImporter then +// it won't be importable. +// +// "Importing" in Terraform is the process of taking an already-created +// resource and bringing it under Terraform management. This can include +// updating Terraform state, generating Terraform configuration, etc. +type ResourceImporter struct { + // The functions below must all be implemented for importing to work. + + // State is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. If this isn't specified, then + // the ID is passed straight through. + State StateFunc +} + +// StateFunc is the function called to import a resource into the +// Terraform state. It is given a ResourceData with only ID set. This +// ID is going to be an arbitrary value given by the user and may not map +// directly to the ID format that the resource expects, so that should +// be validated. +// +// This should return a slice of ResourceData that turn into the state +// that was imported. This might be as simple as returning only the argument +// that was given to the function. In other cases (such as AWS security groups), +// an import may fan out to multiple resources and this will have to return +// multiple. +// +// To create the ResourceData structures for other resource types (if +// you have to), instantiate your resource and call the Data function. +type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) + +// InternalValidate should be called to validate the structure of this +// importer. This should be called in a unit test. +// +// Resource.InternalValidate() will automatically call this, so this doesn't +// need to be called manually. Further, Resource.InternalValidate() is +// automatically called by Provider.InternalValidate(), so you only need +// to internal validate the provider. +func (r *ResourceImporter) InternalValidate() error { + return nil +} + +// ImportStatePassthrough is an implementation of StateFunc that can be +// used to simply pass the ID directly through. This should be used only +// in the case that an ID-only refresh is possible. +func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go new file mode 100644 index 00000000000..f12bf725961 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go @@ -0,0 +1,263 @@ +package schema + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/copystructure" +) + +const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" +const TimeoutsConfigKey = "timeouts" + +const ( + TimeoutCreate = "create" + TimeoutRead = "read" + TimeoutUpdate = "update" + TimeoutDelete = "delete" + TimeoutDefault = "default" +) + +func timeoutKeys() []string { + return []string{ + TimeoutCreate, + TimeoutRead, + TimeoutUpdate, + TimeoutDelete, + TimeoutDefault, + } +} + +// could be time.Duration, int64 or float64 +func DefaultTimeout(tx interface{}) *time.Duration { + var td time.Duration + switch raw := tx.(type) { + case time.Duration: + return &raw + case int64: + td = time.Duration(raw) + case float64: + td = time.Duration(int64(raw)) + default: + log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx) + } + return &td +} + +type ResourceTimeout struct { + Create, Read, Update, Delete, Default *time.Duration +} + +// ConfigDecode takes a schema and the configuration (available in Diff) and +// validates, parses the timeouts into `t` +func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error { + if s.Timeouts != nil { + raw, err := copystructure.Copy(s.Timeouts) + if err != nil { + log.Printf("[DEBUG] Error with deep copy: %s", err) + } + *t = *raw.(*ResourceTimeout) + } + + if raw, ok := c.Config[TimeoutsConfigKey]; ok { + var rawTimeouts []map[string]interface{} + switch raw := raw.(type) { + case map[string]interface{}: + rawTimeouts = append(rawTimeouts, raw) + case []map[string]interface{}: + rawTimeouts = raw + case string: + if raw == hcl2shim.UnknownVariableValue { + // Timeout is not defined in the config + // Defaults will be used instead + return nil + } else { + log.Printf("[ERROR] Invalid timeout value: %q", raw) + return fmt.Errorf("Invalid Timeout value found") + } + case []interface{}: + for _, r := range raw { + if rMap, ok := r.(map[string]interface{}); ok { + rawTimeouts = append(rawTimeouts, rMap) + } else { + // Go will not allow a fallthrough + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } + } + default: + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } + + for _, timeoutValues := range rawTimeouts { + for timeKey, timeValue := range timeoutValues { + // validate that we're dealing with the normal CRUD actions + var found bool + for _, key := range timeoutKeys() { + if timeKey == key { + found = true + break + } + } + + if !found { + return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + } + + // Get timeout + rt, err := time.ParseDuration(timeValue.(string)) + if err != nil { + return fmt.Errorf("Error parsing %q timeout: %s", timeKey, err) + } + + var timeout *time.Duration + switch timeKey { + case TimeoutCreate: + timeout = t.Create + case TimeoutUpdate: + timeout = t.Update + case TimeoutRead: + timeout = t.Read + case TimeoutDelete: + timeout = t.Delete + case TimeoutDefault: + timeout = t.Default + } + + // If the resource has not delcared this in the definition, then error + // with an unsupported message + if timeout == nil { + return unsupportedTimeoutKeyError(timeKey) + } + + *timeout = rt + } + return nil + } + } + + return nil +} + +func unsupportedTimeoutKeyError(key string) error { + return fmt.Errorf("Timeout Key (%s) is not supported", key) +} + +// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder +// interface: they encode/decode a timeouts struct from an instance diff, which is +// where the timeout data is stored after a diff to pass into Apply. +// +// StateEncode encodes the timeout into the ResourceData's InstanceState for +// saving to state +// +func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error { + return t.metaEncode(id) +} + +func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error { + return t.metaEncode(is) +} + +// metaEncode encodes the ResourceTimeout into a map[string]interface{} format +// and stores it in the Meta field of the interface it's given. +// Assumes the interface is either *terraform.InstanceState or +// *terraform.InstanceDiff, returns an error otherwise +func (t *ResourceTimeout) metaEncode(ids interface{}) error { + m := make(map[string]interface{}) + + if t.Create != nil { + m[TimeoutCreate] = t.Create.Nanoseconds() + } + if t.Read != nil { + m[TimeoutRead] = t.Read.Nanoseconds() + } + if t.Update != nil { + m[TimeoutUpdate] = t.Update.Nanoseconds() + } + if t.Delete != nil { + m[TimeoutDelete] = t.Delete.Nanoseconds() + } + if t.Default != nil { + m[TimeoutDefault] = t.Default.Nanoseconds() + // for any key above that is nil, if default is specified, we need to + // populate it with the default + for _, k := range timeoutKeys() { + if _, ok := m[k]; !ok { + m[k] = t.Default.Nanoseconds() + } + } + } + + // only add the Timeout to the Meta if we have values + if len(m) > 0 { + switch instance := ids.(type) { + case *terraform.InstanceDiff: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + case *terraform.InstanceState: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + default: + return fmt.Errorf("Error matching type for Diff Encode") + } + } + + return nil +} + +func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error { + return t.metaDecode(id) +} +func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error { + return t.metaDecode(is) +} + +func (t *ResourceTimeout) metaDecode(ids interface{}) error { + var rawMeta interface{} + var ok bool + switch rawInstance := ids.(type) { + case *terraform.InstanceDiff: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + case *terraform.InstanceState: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + default: + return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids) + } + + times := rawMeta.(map[string]interface{}) + if len(times) == 0 { + return nil + } + + if v, ok := times[TimeoutCreate]; ok { + t.Create = DefaultTimeout(v) + } + if v, ok := times[TimeoutRead]; ok { + t.Read = DefaultTimeout(v) + } + if v, ok := times[TimeoutUpdate]; ok { + t.Update = DefaultTimeout(v) + } + if v, ok := times[TimeoutDelete]; ok { + t.Delete = DefaultTimeout(v) + } + if v, ok := times[TimeoutDefault]; ok { + t.Default = DefaultTimeout(v) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go new file mode 100644 index 00000000000..033b06843d4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go @@ -0,0 +1,1854 @@ +// schema is a high-level framework for easily writing new providers +// for Terraform. Usage of schema is recommended over attempting to write +// to the low-level plugin interfaces manually. +// +// schema breaks down provider creation into simple CRUD operations for +// resources. The logic of diffing, destroying before creating, updating +// or creating, etc. is all handled by the framework. The plugin author +// only needs to implement a configuration schema and the CRUD operations and +// everything else is meant to just work. +// +// A good starting point is to view the Provider structure. +package schema + +import ( + "context" + "fmt" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/mapstructure" +) + +// Name of ENV variable which (if not empty) prefers panic over error +const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" + +// type used for schema package context keys +type contextKey string + +var ( + protoVersionMu sync.Mutex + protoVersion5 = false +) + +func isProto5() bool { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + return protoVersion5 + +} + +// SetProto5 enables a feature flag for any internal changes required required +// to work with the new plugin protocol. This should not be called by +// provider. +func SetProto5() { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + protoVersion5 = true +} + +// Schema is used to describe the structure of a value. +// +// Read the documentation of the struct elements for important details. +type Schema struct { + // Type is the type of the value and must be one of the ValueType values. + // + // This type not only determines what type is expected/valid in configuring + // this value, but also what type is returned when ResourceData.Get is + // called. The types returned by Get are: + // + // TypeBool - bool + // TypeInt - int + // TypeFloat - float64 + // TypeString - string + // TypeList - []interface{} + // TypeMap - map[string]interface{} + // TypeSet - *schema.Set + // + Type ValueType + + // ConfigMode allows for overriding the default behaviors for mapping + // schema entries onto configuration constructs. + // + // By default, the Elem field is used to choose whether a particular + // schema is represented in configuration as an attribute or as a nested + // block; if Elem is a *schema.Resource then it's a block and it's an + // attribute otherwise. + // + // If Elem is *schema.Resource then setting ConfigMode to + // SchemaConfigModeAttr will force it to be represented in configuration + // as an attribute, which means that the Computed flag can be used to + // provide default elements when the argument isn't set at all, while still + // allowing the user to force zero elements by explicitly assigning an + // empty list. + // + // When Computed is set without Optional, the attribute is not settable + // in configuration at all and so SchemaConfigModeAttr is the automatic + // behavior, and SchemaConfigModeBlock is not permitted. + ConfigMode SchemaConfigMode + + // If one of these is set, then this item can come from the configuration. + // Both cannot be set. If Optional is set, the value is optional. If + // Required is set, the value is required. + // + // One of these must be set if the value is not computed. That is: + // value either comes from the config, is computed, or is both. + Optional bool + Required bool + + // If this is non-nil, the provided function will be used during diff + // of this field. If this is nil, a default diff for the type of the + // schema will be used. + // + // This allows comparison based on something other than primitive, list + // or map equality - for example SSH public keys may be considered + // equivalent regardless of trailing whitespace. + DiffSuppressFunc SchemaDiffSuppressFunc + + // If this is non-nil, then this will be a default value that is used + // when this item is not set in the configuration. + // + // DefaultFunc can be specified to compute a dynamic default. + // Only one of Default or DefaultFunc can be set. If DefaultFunc is + // used then its return value should be stable to avoid generating + // confusing/perpetual diffs. + // + // Changing either Default or the return value of DefaultFunc can be + // a breaking change, especially if the attribute in question has + // ForceNew set. If a default needs to change to align with changing + // assumptions in an upstream API then it may be necessary to also use + // the MigrateState function on the resource to change the state to match, + // or have the Read function adjust the state value to align with the + // new default. + // + // If Required is true above, then Default cannot be set. DefaultFunc + // can be set with Required. If the DefaultFunc returns nil, then there + // will be no default and the user will be asked to fill it in. + // + // If either of these is set, then the user won't be asked for input + // for this key if the default is not nil. + Default interface{} + DefaultFunc SchemaDefaultFunc + + // Description is used as the description for docs or asking for user + // input. It should be relatively short (a few sentences max) and should + // be formatted to fit a CLI. + Description string + + // InputDefault is the default value to use for when inputs are requested. + // This differs from Default in that if Default is set, no input is + // asked for. If Input is asked, this will be the default value offered. + InputDefault string + + // The fields below relate to diffs. + // + // If Computed is true, then the result of this value is computed + // (unless specified by config) on creation. + // + // If ForceNew is true, then a change in this resource necessitates + // the creation of a new resource. + // + // StateFunc is a function called to change the value of this before + // storing it in the state (and likewise before comparing for diffs). + // The use for this is for example with large strings, you may want + // to simply store the hash of it. + Computed bool + ForceNew bool + StateFunc SchemaStateFunc + + // The following fields are only set for a TypeList, TypeSet, or TypeMap. + // + // Elem represents the element type. For a TypeMap, it must be a *Schema + // with a Type that is one of the primitives: TypeString, TypeBool, + // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a + // *Resource. If it is *Schema, the element type is just a simple value. + // If it is *Resource, the element type is a complex structure, + // potentially managed via its own CRUD actions on the API. + Elem interface{} + + // The following fields are only set for a TypeList or TypeSet. + // + // MaxItems defines a maximum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however more than one instance would + // cause instability. + // + // MinItems defines a minimum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however less than one instance would + // cause instability. + // + // If the field Optional is set to true then MinItems is ignored and thus + // effectively zero. + MaxItems int + MinItems int + + // PromoteSingle originally allowed for a single element to be assigned + // where a primitive list was expected, but this no longer works from + // Terraform v0.12 onwards (Terraform Core will require a list to be set + // regardless of what this is set to) and so only applies to Terraform v0.11 + // and earlier, and so should be used only to retain this functionality + // for those still using v0.11 with a provider that formerly used this. + PromoteSingle bool + + // The following fields are only valid for a TypeSet type. + // + // Set defines a function to determine the unique ID of an item so that + // a proper set can be built. + Set SchemaSetFunc + + // ComputedWhen is a set of queries on the configuration. Whenever any + // of these things is changed, it will require a recompute (this requires + // that Computed is set to true). + // + // NOTE: This currently does not work. + ComputedWhen []string + + // ConflictsWith is a set of schema keys that conflict with this schema. + // This will only check that they're set in the _config_. This will not + // raise an error for a malfunctioning resource that sets a conflicting + // key. + ConflictsWith []string + + // When Deprecated is set, this attribute is deprecated. + // + // A deprecated field still works, but will probably stop working in near + // future. This string is the message shown to the user with instructions on + // how to address the deprecation. + Deprecated string + + // When Removed is set, this attribute has been removed from the schema + // + // Removed attributes can be left in the Schema to generate informative error + // messages for the user when they show up in resource configurations. + // This string is the message shown to the user with instructions on + // what do to about the removed attribute. + Removed string + + // ValidateFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield warnings or + // errors based on inspection of that value. + // + // ValidateFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + ValidateFunc SchemaValidateFunc + + // Sensitive ensures that the attribute's value does not get displayed in + // logs or regular output. It should be used for passwords or other + // secret fields. Future versions of Terraform may encrypt these + // values. + Sensitive bool +} + +// SchemaConfigMode is used to influence how a schema item is mapped into a +// corresponding configuration construct, using the ConfigMode field of +// Schema. +type SchemaConfigMode int + +const ( + SchemaConfigModeAuto SchemaConfigMode = iota + SchemaConfigModeAttr + SchemaConfigModeBlock +) + +// SchemaDiffSuppressFunc is a function which can be used to determine +// whether a detected diff on a schema element is "valid" or not, and +// suppress it from the plan if necessary. +// +// Return true if the diff should be suppressed, false to retain it. +type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool + +// SchemaDefaultFunc is a function called to return a default value for +// a field. +type SchemaDefaultFunc func() (interface{}, error) + +// EnvDefaultFunc is a helper function that returns the value of the +// given environment variable, if one exists, or the default value +// otherwise. +func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return dv, nil + } +} + +// MultiEnvDefaultFunc is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v, nil + } + } + return dv, nil + } +} + +// SchemaSetFunc is a function that must return a unique ID for the given +// element. This unique ID is used to store the element in a hash. +type SchemaSetFunc func(interface{}) int + +// SchemaStateFunc is a function used to convert some type to a string +// to be stored in the state. +type SchemaStateFunc func(interface{}) string + +// SchemaValidateFunc is a function used to validate a single field in the +// schema. +type SchemaValidateFunc func(interface{}, string) ([]string, []error) + +func (s *Schema) GoString() string { + return fmt.Sprintf("*%#v", *s) +} + +// Returns a default value for this schema by either reading Default or +// evaluating DefaultFunc. If neither of these are defined, returns nil. +func (s *Schema) DefaultValue() (interface{}, error) { + if s.Default != nil { + return s.Default, nil + } + + if s.DefaultFunc != nil { + defaultValue, err := s.DefaultFunc() + if err != nil { + return nil, fmt.Errorf("error loading default: %s", err) + } + return defaultValue, nil + } + + return nil, nil +} + +// Returns a zero value for the schema. +func (s *Schema) ZeroValue() interface{} { + // If it's a set then we'll do a bit of extra work to provide the + // right hashing function in our empty value. + if s.Type == TypeSet { + setFunc := s.Set + if setFunc == nil { + // Default set function uses the schema to hash the whole value + elem := s.Elem + switch t := elem.(type) { + case *Schema: + setFunc = HashSchema(t) + case *Resource: + setFunc = HashResource(t) + default: + panic("invalid set element type") + } + } + return &Set{F: setFunc} + } else { + return s.Type.Zero() + } +} + +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { + if d == nil { + return d + } + + if s.Type == TypeBool { + normalizeBoolString := func(s string) string { + switch s { + case "0": + return "false" + case "1": + return "true" + } + return s + } + d.Old = normalizeBoolString(d.Old) + d.New = normalizeBoolString(d.New) + } + + if s.Computed && !d.NewRemoved && d.New == "" { + // Computed attribute without a new value set + d.NewComputed = true + } + + if s.ForceNew { + // ForceNew, mark that this field is requiring new under the + // following conditions, explained below: + // + // * Old != New - There is a change in value. This field + // is therefore causing a new resource. + // + // * NewComputed - This field is being computed, hence a + // potential change in value, mark as causing a new resource. + d.RequiresNew = d.Old != d.New || d.NewComputed + } + + if d.NewRemoved { + return d + } + + if s.Computed { + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } + } + + if d.New == "" && !d.NewComputed { + // Computed attribute without a new value set + d.NewComputed = true + } + } + + if s.Sensitive { + // Set the Sensitive flag so output is hidden in the UI + d.Sensitive = true + } + + return d +} + +// InternalMap is used to aid in the transition to the new schema types and +// protocol. The name is not meant to convey any usefulness, as this is not to +// be used directly by any providers. +type InternalMap = schemaMap + +// schemaMap is a wrapper that adds nice functions on top of schemas. +type schemaMap map[string]*Schema + +func (m schemaMap) panicOnError() bool { + if os.Getenv(PanicOnErr) != "" { + return true + } + return false +} + +// Data returns a ResourceData for the given schema, state, and diff. +// +// The diff is optional. +func (m schemaMap) Data( + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*ResourceData, error) { + return &ResourceData{ + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), + }, nil +} + +// DeepCopy returns a copy of this schemaMap. The copy can be safely modified +// without affecting the original. +func (m *schemaMap) DeepCopy() schemaMap { + copy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + return *copy.(*schemaMap) +} + +// Diff returns the diff for a resource given the schema map, +// state, and configuration. +func (m schemaMap) Diff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + customizeDiff CustomizeDiffFunc, + meta interface{}, + handleRequiresNew bool) (*terraform.InstanceDiff, error) { + result := new(terraform.InstanceDiff) + result.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Make sure to mark if the resource is tainted + if s != nil { + result.DestroyTainted = s.Tainted + } + + d := &ResourceData{ + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), + } + + for k, schema := range m { + err := m.diff(k, schema, result, d, false) + if err != nil { + return nil, err + } + } + + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } + + // If this is a non-destroy diff, call any custom diff logic that has been + // defined. + if !result.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, s, result) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result, rd, false) + if err != nil { + return nil, err + } + } + } + + if handleRequiresNew { + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(terraform.InstanceDiff) + result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted + + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() + + // Perform the diff again + for k, schema := range m { + err := m.diff(k, schema, result2, d, false) + if err != nil { + return nil, err + } + } + + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } + } + + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } + + if attr.RequiresNew { + attr.RequiresNew = false + } + + if s != nil { + attr.Old = s.Attributes[k] + } + } + + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } + + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } + + if attr.RequiresNew { + newAttr.RequiresNew = true + } + + result2.Attributes[k] = newAttr + } + + // And set the diff! + result = result2 + } + + } + + // Go through and detect all of the ComputedWhens now that we've + // finished the diff. + // TODO + + if result.Empty() { + // If we don't have any diff elements, just return nil + return nil, nil + } + + return result, nil +} + +// Input implements the terraform.ResourceProvider method by asking +// for input for required configuration keys that don't have a value. +func (m schemaMap) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := m[k] + + // Skip things that don't require config, if that is even valid + // for a provider schema. + // Required XOR Optional must always be true to validate, so we only + // need to check one. + if v.Optional { + continue + } + + // Deprecated fields should never prompt + if v.Deprecated != "" { + continue + } + + // Skip things that have a value of some sort already + if _, ok := c.Raw[k]; ok { + continue + } + + // Skip if it has a default value + defaultValue, err := v.DefaultValue() + if err != nil { + return nil, fmt.Errorf("%s: error loading default: %s", k, err) + } + if defaultValue != nil { + continue + } + + var value interface{} + switch v.Type { + case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: + continue + case TypeString: + value, err = m.inputString(input, k, v) + default: + panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) + } + + if err != nil { + return nil, fmt.Errorf( + "%s: %s", k, err) + } + + c.Config[k] = value + } + + return c, nil +} + +// Validate validates the configuration against this schema mapping. +func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { + return m.validateObject("", m, c) +} + +// InternalValidate validates the format of this schema. This should be called +// from a unit test (and not in user-path code) to verify that a schema +// is properly built. +func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + return m.internalValidate(topSchemaMap, false) +} + +func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { + if topSchemaMap == nil { + topSchemaMap = m + } + for k, v := range m { + if v.Type == TypeInvalid { + return fmt.Errorf("%s: Type must be specified", k) + } + + if v.Optional && v.Required { + return fmt.Errorf("%s: Optional or Required must be set, not both", k) + } + + if v.Required && v.Computed { + return fmt.Errorf("%s: Cannot be both Required and Computed", k) + } + + if !v.Required && !v.Optional && !v.Computed { + return fmt.Errorf("%s: One of optional, required, or computed must be set", k) + } + + computedOnly := v.Computed && !v.Optional + + switch v.ConfigMode { + case SchemaConfigModeBlock: + if _, ok := v.Elem.(*Resource); !ok { + return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) + } + if attrsOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) + } + if computedOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) + } + case SchemaConfigModeAttr: + // anything goes + case SchemaConfigModeAuto: + // Since "Auto" for Elem: *Resource would create a nested block, + // and that's impossible inside an attribute, we require it to be + // explicitly overridden as mode "Attr" for clarity. + if _, ok := v.Elem.(*Resource); ok { + if attrsOnly { + return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) + } + } + default: + return fmt.Errorf("%s: invalid ConfigMode value", k) + } + + if v.Computed && v.Default != nil { + return fmt.Errorf("%s: Default must be nil if computed", k) + } + + if v.Required && v.Default != nil { + return fmt.Errorf("%s: Default cannot be set with Required", k) + } + + if len(v.ComputedWhen) > 0 && !v.Computed { + return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) + } + + if len(v.ConflictsWith) > 0 && v.Required { + return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) + } + + if len(v.ConflictsWith) > 0 { + for _, key := range v.ConflictsWith { + parts := strings.Split(key, ".") + sm := topSchemaMap + var target *Schema + for _, part := range parts { + // Skip index fields + if _, err := strconv.Atoi(part); err == nil { + continue + } + + var ok bool + if target, ok = sm[part]; !ok { + return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s) at part (%s)", k, key, part) + } + + if subResource, ok := target.Elem.(*Resource); ok { + sm = schemaMap(subResource.Schema) + } + } + if target == nil { + return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm) + } + if target.Required { + return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key) + } + + if len(target.ComputedWhen) > 0 { + return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key) + } + } + } + + if v.Type == TypeList || v.Type == TypeSet { + if v.Elem == nil { + return fmt.Errorf("%s: Elem must be set for lists", k) + } + + if v.Default != nil { + return fmt.Errorf("%s: Default is not valid for lists or sets", k) + } + + if v.Type != TypeSet && v.Set != nil { + return fmt.Errorf("%s: Set can only be set for TypeSet", k) + } + + switch t := v.Elem.(type) { + case *Resource: + attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr + + if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { + return err + } + case *Schema: + bad := t.Computed || t.Optional || t.Required + if bad { + return fmt.Errorf( + "%s: Elem must have only Type set", k) + } + } + } else { + if v.MaxItems > 0 || v.MinItems > 0 { + return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) + } + } + + // Computed-only field + if v.Computed && !v.Optional { + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.DiffSuppressFunc != nil { + return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ + " between config and state representation. "+ + "There is no config for computed-only field, nothing to compare.", k) + } + } + + if v.ValidateFunc != nil { + switch v.Type { + case TypeList, TypeSet: + return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) + } + } + + if v.Deprecated == "" && v.Removed == "" { + if !isValidFieldName(k) { + return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) + } + } + } + + return nil +} + +func isValidFieldName(name string) bool { + re := regexp.MustCompile("^[a-z0-9_]+$") + return re.MatchString(name) +} + +// resourceDiffer is an interface that is used by the private diff functions. +// This helps facilitate diff logic for both ResourceData and ResoureDiff with +// minimal divergence in code. +type resourceDiffer interface { + diffChange(string) (interface{}, interface{}, bool, bool, bool) + Get(string) interface{} + GetChange(string) (interface{}, interface{}) + GetOk(string) (interface{}, bool) + HasChange(string) bool + Id() string +} + +func (m schemaMap) diff( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + unsupressedDiff := new(terraform.InstanceDiff) + unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + var err error + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + err = m.diffString(k, schema, unsupressedDiff, d, all) + case TypeList: + err = m.diffList(k, schema, unsupressedDiff, d, all) + case TypeMap: + err = m.diffMap(k, schema, unsupressedDiff, d, all) + case TypeSet: + err = m.diffSet(k, schema, unsupressedDiff, d, all) + default: + err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) + } + + for attrK, attrV := range unsupressedDiff.Attributes { + switch rd := d.(type) { + case *ResourceData: + if schema.DiffSuppressFunc != nil && attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { + // If this attr diff is suppressed, we may still need it in the + // overall diff if it's contained within a set. Rather than + // dropping the diff, make it a NOOP. + if !all { + continue + } + + attrV = &terraform.ResourceAttrDiff{ + Old: attrV.Old, + New: attrV.Old, + } + } + } + diff.Attributes[attrK] = attrV + } + + return err +} + +func (m schemaMap) diffList( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + o, n, _, computedList, customized := d.diffChange(k) + if computedList { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedList && schema.Computed { + return nil + } + + if o == nil { + o = []interface{}{} + } + if n == nil { + n = []interface{}{} + } + if s, ok := o.(*Set); ok { + o = s.List() + } + if s, ok := n.(*Set); ok { + n = s.List() + } + os := o.([]interface{}) + vs := n.([]interface{}) + + // If the new value was set, and the two are equal, then we're done. + // We have to do this check here because sets might be NOT + // reflect.DeepEqual so we need to wait until we get the []interface{} + if !all && nSet && reflect.DeepEqual(os, vs) { + return nil + } + + // Get the counts + oldLen := len(os) + newLen := len(vs) + oldStr := strconv.FormatInt(int64(oldLen), 10) + + // If the whole list is computed, then say that the # is computed + if computedList { + diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ + Old: oldStr, + NewComputed: true, + RequiresNew: schema.ForceNew, + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + computed := oldLen == 0 && newLen == 0 && schema.Computed + if changed || computed || all { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + newStr := "" + if !computed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Figure out the maximum + maxLen := oldLen + if newLen > maxLen { + maxLen = newLen + } + + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for i := 0; i < maxLen; i++ { + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%d.%s", k, i, k2) + err := m.diff(subK, schema, diff, d, all) + if err != nil { + return err + } + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeList). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + for i := 0; i < maxLen; i++ { + subK := fmt.Sprintf("%s.%d", k, i) + err := m.diff(subK, &t2, diff, d, all) + if err != nil { + return err + } + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + + return nil +} + +func (m schemaMap) diffMap( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + prefix := k + "." + + // First get all the values from the state + var stateMap, configMap map[string]string + o, n, _, nComputed, customized := d.diffChange(k) + if err := mapstructure.WeakDecode(o, &stateMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(n, &configMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Keep track of whether the state _exists_ at all prior to clearing it + stateExists := o != nil + + // Delete any count values, since we don't use those + delete(configMap, "%") + delete(stateMap, "%") + + // Check if the number of elements has changed. + oldLen, newLen := len(stateMap), len(configMap) + changed := oldLen != newLen + if oldLen != 0 && newLen == 0 && schema.Computed { + changed = false + } + + // It is computed if we have no old value, no new value, the schema + // says it is computed, and it didn't exist in the state before. The + // last point means: if it existed in the state, even empty, then it + // has already been computed. + computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists + + // If the count has changed or we're computed, then add a diff for the + // count. "nComputed" means that the new value _contains_ a value that + // is computed. We don't do granular diffs for this yet, so we mark the + // whole map as computed. + if changed || computed || nComputed { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed || nComputed, + ForceNew: schema.ForceNew, + } + + oldStr := strconv.FormatInt(int64(oldLen), 10) + newStr := "" + if !computed && !nComputed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".%"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // If the new map is nil and we're computed, then ignore it. + if n == nil && schema.Computed { + return nil + } + + // Now we compare, preferring values from the config map + for k, v := range configMap { + old, ok := stateMap[k] + delete(stateMap, k) + + if old == v && ok && !all { + continue + } + + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) + } + for k, v := range stateMap { + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) + } + + return nil +} + +func (m schemaMap) diffSet( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + o, n, _, computedSet, customized := d.diffChange(k) + if computedSet { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedSet && schema.Computed { + return nil + } + + if o == nil { + o = schema.ZeroValue().(*Set) + } + if n == nil { + n = schema.ZeroValue().(*Set) + } + os := o.(*Set) + ns := n.(*Set) + + // If the new value was set, compare the listCode's to determine if + // the two are equal. Comparing listCode's instead of the actual values + // is needed because there could be computed values in the set which + // would result in false positives while comparing. + if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { + return nil + } + + // Get the counts + oldLen := os.Len() + newLen := ns.Len() + oldStr := strconv.Itoa(oldLen) + newStr := strconv.Itoa(newLen) + + // Build a schema for our count + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + // If the set computed then say that the # is computed + if computedSet || schema.Computed && !nSet { + // If # already exists, equals 0 and no new set is supplied, there + // is nothing to record in the diff + count, ok := d.GetOk(k + ".#") + if ok && count.(int) == 0 && !nSet && !computedSet { + return nil + } + + // Set the count but make sure that if # does not exist, we don't + // use the zeroed value + countStr := strconv.Itoa(count.(int)) + if !ok { + countStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + if changed || all { + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Build the list of codes that will make up our set. This is the + // removed codes as well as all the codes in the new codes. + codes := make([][]string, 2) + codes[0] = os.Difference(ns).listCode() + codes[1] = ns.listCode() + for _, list := range codes { + for _, code := range list { + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%s.%s", k, code, k2) + err := m.diff(subK, schema, diff, d, true) + if err != nil { + return err + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeSet). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + subK := fmt.Sprintf("%s.%s", k, code) + err := m.diff(subK, &t2, diff, d, true) + if err != nil { + return err + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + } + } + + return nil +} + +func (m schemaMap) diffString( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + var originalN interface{} + var os, ns string + o, n, _, computed, customized := d.diffChange(k) + if schema.StateFunc != nil && n != nil { + originalN = n + n = schema.StateFunc(n) + } + nraw := n + if nraw == nil && o != nil { + nraw = schema.Type.Zero() + } + if err := mapstructure.WeakDecode(o, &os); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(nraw, &ns); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + if os == ns && !all && !computed { + // They're the same value. If there old value is not blank or we + // have an ID, then return right away since we're already setup. + if os != "" || d.Id() != "" { + return nil + } + + // Otherwise, only continue if we're computed + if !schema.Computed { + return nil + } + } + + removed := false + if o != nil && n == nil && !computed { + removed = true + } + if removed && schema.Computed { + return nil + } + + diff.Attributes[k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) + + return nil +} + +func (m schemaMap) inputString( + input terraform.UIInput, + k string, + schema *Schema) (interface{}, error) { + result, err := input.Input(context.Background(), &terraform.InputOpts{ + Id: k, + Query: k, + Description: schema.Description, + Default: schema.InputDefault, + }) + + return result, err +} + +func (m schemaMap) validate( + k string, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + raw, ok := c.Get(k) + if !ok && schema.DefaultFunc != nil { + // We have a dynamic default. Check if we have a value. + var err error + raw, err = schema.DefaultFunc() + if err != nil { + return nil, []error{fmt.Errorf( + "%q, error loading default: %s", k, err)} + } + + // We're okay as long as we had a value set + ok = raw != nil + } + if !ok { + if schema.Required { + return nil, []error{fmt.Errorf( + "%q: required field is not set", k)} + } + + return nil, nil + } + + if !schema.Required && !schema.Optional { + // This is a computed-only field + return nil, []error{fmt.Errorf( + "%q: this field cannot be set", k)} + } + + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + // The SDK has to allow the unknown value through initially, so that + // Required fields set via an interpolated value are accepted. + if !isWhollyKnown(raw) { + if schema.Deprecated != "" { + return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil + } + return nil, nil + } + + err := m.validateConflictingAttributes(k, schema, c) + if err != nil { + return nil, []error{err} + } + + return m.validateType(k, raw, schema, c) +} + +// isWhollyKnown returns false if the argument contains an UnknownVariableValue +func isWhollyKnown(raw interface{}) bool { + switch raw := raw.(type) { + case string: + if raw == hcl2shim.UnknownVariableValue { + return false + } + case []interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + case map[string]interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + } + return true +} +func (m schemaMap) validateConflictingAttributes( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ConflictsWith) == 0 { + return nil + } + + for _, conflictingKey := range schema.ConflictsWith { + if raw, ok := c.Get(conflictingKey); ok { + if raw == hcl2shim.UnknownVariableValue { + // An unknown value might become unset (null) once known, so + // we must defer validation until it's known. + continue + } + return fmt.Errorf( + "%q: conflicts with %s", k, conflictingKey) + } + } + + return nil +} + +func (m schemaMap) validateList( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return nil, nil + } + } + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + + // If we support promotion and the raw value isn't a slice, wrap + // it in []interface{} and check again. + if schema.PromoteSingle && rawV.Kind() != reflect.Slice { + raw = []interface{}{raw} + rawV = reflect.ValueOf(raw) + } + + if rawV.Kind() != reflect.Slice { + return nil, []error{fmt.Errorf( + "%s: should be a list", k)} + } + + // We can't validate list length if this came from a dynamic block. + // Since there's no way to determine if something was from a dynamic block + // at this point, we're going to skip validation in the new protocol if + // there are any unknowns. Validate will eventually be called again once + // all values are known. + if isProto5() && !isWhollyKnown(raw) { + return nil, nil + } + + // Validate length + if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} + } + + if schema.MinItems > 0 && rawV.Len() < schema.MinItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} + } + + // Now build the []interface{} + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + var ws []string + var es []error + for i, raw := range raws { + key := fmt.Sprintf("%s.%d", k, i) + + // Reify the key value from the ResourceConfig. + // If the list was computed we have all raw values, but some of these + // may be known in the config, and aren't individually marked as Computed. + if r, ok := c.Get(key); ok { + raw = r + } + + var ws2 []string + var es2 []error + switch t := schema.Elem.(type) { + case *Resource: + // This is a sub-resource + ws2, es2 = m.validateObject(key, t.Schema, c) + case *Schema: + ws2, es2 = m.validateType(key, raw, t, c) + } + + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + return ws, es +} + +func (m schemaMap) validateMap( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return nil, nil + } + } + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + switch rawV.Kind() { + case reflect.String: + // If raw and reified are equal, this is a string and should + // be rejected. + reified, reifiedOk := c.Get(k) + if reifiedOk && raw == reified && !c.IsComputed(k) { + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + // Otherwise it's likely raw is an interpolation. + return nil, nil + case reflect.Map: + case reflect.Slice: + default: + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + + // If it is not a slice, validate directly + if rawV.Kind() != reflect.Slice { + mapIface := rawV.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + if schema.ValidateFunc != nil { + return schema.ValidateFunc(mapIface, k) + } + return nil, nil + } + + // It is a slice, verify that all the elements are maps + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for _, raw := range raws { + v := reflect.ValueOf(raw) + if v.Kind() != reflect.Map { + return nil, []error{fmt.Errorf( + "%s: should be a map", k)} + } + mapIface := v.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + } + + if schema.ValidateFunc != nil { + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.ValidateFunc(validatableMap, k) + } + + return nil, nil +} + +func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { + for key, raw := range m { + valueType, err := getValueType(k, schema) + if err != nil { + return nil, []error{err} + } + + switch valueType { + case TypeBool: + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeInt: + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeFloat: + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeString: + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + } + return nil, nil +} + +func getValueType(k string, schema *Schema) (ValueType, error) { + if schema.Elem == nil { + return TypeString, nil + } + if vt, ok := schema.Elem.(ValueType); ok { + return vt, nil + } + + // If a Schema is provided to a Map, we use the Type of that schema + // as the type for each element in the Map. + if s, ok := schema.Elem.(*Schema); ok { + return s.Type, nil + } + + if _, ok := schema.Elem.(*Resource); ok { + // TODO: We don't actually support this (yet) + // but silently pass the validation, until we decide + // how to handle nested structures in maps + return TypeString, nil + } + return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) +} + +func (m schemaMap) validateObject( + k string, + schema map[string]*Schema, + c *terraform.ResourceConfig) ([]string, []error) { + raw, _ := c.Get(k) + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + + if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { + return nil, []error{fmt.Errorf( + "%s: expected object, got %s", + k, reflect.ValueOf(raw).Kind())} + } + + var ws []string + var es []error + for subK, s := range schema { + key := subK + if k != "" { + key = fmt.Sprintf("%s.%s", k, subK) + } + + ws2, es2 := m.validate(key, s, c) + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + // Detect any extra/unknown keys and report those as errors. + if m, ok := raw.(map[string]interface{}); ok { + for subk, _ := range m { + if _, ok := schema[subk]; !ok { + if subk == TimeoutsConfigKey { + continue + } + es = append(es, fmt.Errorf( + "%s: invalid or unknown key: %s", k, subk)) + } + } + } + + return ws, es +} + +func (m schemaMap) validatePrimitive( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + + // a nil value shouldn't happen in the old protocol, and in the new + // protocol the types have already been validated. Either way, we can't + // reflect on nil, so don't panic. + if raw == nil { + return nil, nil + } + + // Catch if the user gave a complex type where a primitive was + // expected, so we can return a friendly error message that + // doesn't contain Go type system terminology. + switch reflect.ValueOf(raw).Type().Kind() { + case reflect.Slice: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a list", k), + } + case reflect.Map: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a map", k), + } + default: // ok + } + + if c.IsComputed(k) { + // If the key is being computed, then it is not an error as + // long as it's not a slice or map. + return nil, nil + } + + var decoded interface{} + switch schema.Type { + case TypeBool: + // Verify that we can parse this as the correct type + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + case TypeInt: + switch { + case isProto5(): + // We need to verify the type precisely, because WeakDecode will + // decode a float as an integer. + + // the config shims only use int for integral number values + if v, ok := raw.(int); ok { + decoded = v + } else { + return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)} + } + default: + // Verify that we can parse this as an int + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + } + case TypeFloat: + // Verify that we can parse this as an int + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + case TypeString: + // Verify that we can parse this as a string + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + + if schema.ValidateFunc != nil { + return schema.ValidateFunc(decoded, k) + } + + return nil, nil +} + +func (m schemaMap) validateType( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + var ws []string + var es []error + switch schema.Type { + case TypeSet, TypeList: + ws, es = m.validateList(k, raw, schema, c) + case TypeMap: + ws, es = m.validateMap(k, raw, schema, c) + default: + ws, es = m.validatePrimitive(k, raw, schema, c) + } + + if schema.Deprecated != "" { + ws = append(ws, fmt.Sprintf( + "%q: [DEPRECATED] %s", k, schema.Deprecated)) + } + + if schema.Removed != "" { + es = append(es, fmt.Errorf( + "%q: [REMOVED] %s", k, schema.Removed)) + } + + return ws, es +} + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return new(Set) + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go new file mode 100644 index 00000000000..fe6d7504c74 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go @@ -0,0 +1,125 @@ +package schema + +import ( + "bytes" + "fmt" + "sort" + "strconv" +) + +func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) { + if val == nil { + buf.WriteRune(';') + return + } + + switch schema.Type { + case TypeBool: + if val.(bool) { + buf.WriteRune('1') + } else { + buf.WriteRune('0') + } + case TypeInt: + buf.WriteString(strconv.Itoa(val.(int))) + case TypeFloat: + buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64)) + case TypeString: + buf.WriteString(val.(string)) + case TypeList: + buf.WriteRune('(') + l := val.([]interface{}) + for _, innerVal := range l { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune(')') + case TypeMap: + + m := val.(map[string]interface{}) + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + buf.WriteRune('[') + for _, k := range keys { + innerVal := m[k] + if innerVal == nil { + continue + } + buf.WriteString(k) + buf.WriteRune(':') + + switch innerVal := innerVal.(type) { + case int: + buf.WriteString(strconv.Itoa(innerVal)) + case float64: + buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64)) + case string: + buf.WriteString(innerVal) + default: + panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal)) + } + + buf.WriteRune(';') + } + buf.WriteRune(']') + case TypeSet: + buf.WriteRune('{') + s := val.(*Set) + for _, innerVal := range s.List() { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune('}') + default: + panic("unknown schema type to serialize") + } + buf.WriteRune(';') +} + +// SerializeValueForHash appends a serialization of the given resource config +// to the given buffer, guaranteeing deterministic results given the same value +// and schema. +// +// Its primary purpose is as input into a hashing function in order +// to hash complex substructures when used in sets, and so the serialization +// is not reversible. +func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) { + if val == nil { + return + } + sm := resource.Schema + m := val.(map[string]interface{}) + var keys []string + for k := range sm { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + innerSchema := sm[k] + // Skip attributes that are not user-provided. Computed attributes + // do not contribute to the hash since their ultimate value cannot + // be known at plan/diff time. + if !(innerSchema.Required || innerSchema.Optional) { + continue + } + + buf.WriteString(k) + buf.WriteRune(':') + innerVal := m[k] + SerializeValueForHash(buf, innerVal, innerSchema) + } +} + +func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) { + switch tElem := elem.(type) { + case *Schema: + SerializeValueForHash(buf, val, tElem) + case *Resource: + buf.WriteRune('<') + SerializeResourceForHash(buf, val, tElem) + buf.WriteString(">;") + default: + panic(fmt.Sprintf("invalid element type: %T", tElem)) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go new file mode 100644 index 00000000000..daa431ddb1c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go @@ -0,0 +1,246 @@ +package schema + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" +) + +// HashString hashes strings. If you want a Set of strings, this is the +// SchemaSetFunc you want. +func HashString(v interface{}) int { + return hashcode.String(v.(string)) +} + +// HashInt hashes integers. If you want a Set of integers, this is the +// SchemaSetFunc you want. +func HashInt(v interface{}) int { + return hashcode.String(strconv.Itoa(v.(int))) +} + +// HashResource hashes complex structures that are described using +// a *Resource. This is the default set implementation used when a set's +// element type is a full resource. +func HashResource(resource *Resource) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeResourceForHash(&buf, v, resource) + return hashcode.String(buf.String()) + } +} + +// HashSchema hashes values that are described using a *Schema. This is the +// default set implementation used when a set's element type is a single +// schema. +func HashSchema(schema *Schema) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeValueForHash(&buf, v, schema) + return hashcode.String(buf.String()) + } +} + +// Set is a set data structure that is returned for elements of type +// TypeSet. +type Set struct { + F SchemaSetFunc + + m map[string]interface{} + once sync.Once +} + +// NewSet is a convenience method for creating a new set with the given +// items. +func NewSet(f SchemaSetFunc, items []interface{}) *Set { + s := &Set{F: f} + for _, i := range items { + s.Add(i) + } + + return s +} + +// CopySet returns a copy of another set. +func CopySet(otherSet *Set) *Set { + return NewSet(otherSet.F, otherSet.List()) +} + +// Add adds an item to the set if it isn't already in the set. +func (s *Set) Add(item interface{}) { + s.add(item, false) +} + +// Remove removes an item if it's already in the set. Idempotent. +func (s *Set) Remove(item interface{}) { + s.remove(item) +} + +// Contains checks if the set has the given item. +func (s *Set) Contains(item interface{}) bool { + _, ok := s.m[s.hash(item)] + return ok +} + +// Len returns the amount of items in the set. +func (s *Set) Len() int { + return len(s.m) +} + +// List returns the elements of this set in slice format. +// +// The order of the returned elements is deterministic. Given the same +// set, the order of this will always be the same. +func (s *Set) List() []interface{} { + result := make([]interface{}, len(s.m)) + for i, k := range s.listCode() { + result[i] = s.m[k] + } + + return result +} + +// Difference performs a set difference of the two sets, returning +// a new third set that has only the elements unique to this set. +func (s *Set) Difference(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; !ok { + result.m[k] = v + } + } + + return result +} + +// Intersection performs the set intersection of the two sets +// and returns a new third set. +func (s *Set) Intersection(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; ok { + result.m[k] = v + } + } + + return result +} + +// Union performs the set union of the two sets and returns a new third +// set. +func (s *Set) Union(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + result.m[k] = v + } + for k, v := range other.m { + result.m[k] = v + } + + return result +} + +func (s *Set) Equal(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + return reflect.DeepEqual(s.m, other.m) +} + +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + +func (s *Set) GoString() string { + return fmt.Sprintf("*Set(%#v)", s.m) +} + +func (s *Set) init() { + s.m = make(map[string]interface{}) +} + +func (s *Set) add(item interface{}, computed bool) string { + s.once.Do(s.init) + + code := s.hash(item) + if computed { + code = "~" + code + + if isProto5() { + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) + } + code = tmpCode + } + } + + if _, ok := s.m[code]; !ok { + s.m[code] = item + } + + return code +} + +func (s *Set) hash(item interface{}) string { + code := s.F(item) + // Always return a nonnegative hashcode. + if code < 0 { + code = -code + } + return strconv.Itoa(code) +} + +func (s *Set) remove(item interface{}) string { + s.once.Do(s.init) + + code := s.hash(item) + delete(s.m, code) + + return code +} + +func (s *Set) listCode() []string { + // Sort the hash codes so the order of the list is deterministic + keys := make([]string, 0, len(s.m)) + for k := range s.m { + keys = append(keys, k) + } + sort.Sort(sort.StringSlice(keys)) + return keys +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go new file mode 100644 index 00000000000..93c601f80ab --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go @@ -0,0 +1,115 @@ +package schema + +import ( + "encoding/json" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// DiffFromValues takes the current state and desired state as cty.Values and +// derives a terraform.InstanceDiff to give to the legacy providers. This is +// used to take the states provided by the new ApplyResourceChange method and +// convert them to a state+diff required for the legacy Apply method. +func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { + return diffFromValues(prior, planned, res, nil) +} + +// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our +// test fixtures from the legacy tests. In the new provider protocol the diff +// only needs to be created for the apply operation, and any customizations +// have already been done. +func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { + instanceState, err := res.ShimInstanceStateFromValue(prior) + if err != nil { + return nil, err + } + + configSchema := res.CoreConfigSchema() + + cfg := terraform.NewResourceConfigShimmed(planned, configSchema) + removeConfigUnknowns(cfg.Config) + removeConfigUnknowns(cfg.Raw) + + diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false) + if err != nil { + return nil, err + } + + return diff, err +} + +// During apply the only unknown values are those which are to be computed by +// the resource itself. These may have been marked as unknown config values, and +// need to be removed to prevent the UnknownVariableValue from appearing the diff. +func removeConfigUnknowns(cfg map[string]interface{}) { + for k, v := range cfg { + switch v := v.(type) { + case string: + if v == hcl2shim.UnknownVariableValue { + delete(cfg, k) + } + case []interface{}: + for _, i := range v { + if m, ok := i.(map[string]interface{}); ok { + removeConfigUnknowns(m) + } + } + case map[string]interface{}: + removeConfigUnknowns(v) + } + } +} + +// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to +// get a new cty.Value state. This is used to convert the diff returned from +// the legacy provider Diff method to the state required for the new +// PlanResourceChange method. +func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) { + return d.ApplyToValue(base, schema) +} + +// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON +// encoding. +func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) { + js, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + var m map[string]interface{} + if err := json.Unmarshal(js, &m); err != nil { + return nil, err + } + + return m, nil +} + +// JSONMapToStateValue takes a generic json map[string]interface{} and converts it +// to the specific type, ensuring that the values conform to the schema. +func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) { + var val cty.Value + + js, err := json.Marshal(m) + if err != nil { + return val, err + } + + val, err = ctyjson.Unmarshal(js, block.ImpliedType()) + if err != nil { + return val, err + } + + return block.CoerceValue(val) +} + +// StateValueFromInstanceState converts a terraform.InstanceState to a +// cty.Value as described by the provided cty.Type, and maintains the resource +// ID as the "id" attribute. +func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) { + return is.AttrsAsObjectValue(ty) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go new file mode 100644 index 00000000000..4d0fd7365de --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go @@ -0,0 +1,28 @@ +package schema + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// TestResourceDataRaw creates a ResourceData from a raw configuration map. +func TestResourceDataRaw( + t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + + c := terraform.NewResourceConfigRaw(raw) + + sm := schemaMap(schema) + diff, err := sm.Diff(nil, c, nil, nil, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + result, err := sm.Data(nil, diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + return result +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go new file mode 100644 index 00000000000..0f65d692f04 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go @@ -0,0 +1,21 @@ +package schema + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ValueType valuetype.go + +// ValueType is an enum of the type that can be represented by a schema. +type ValueType int + +const ( + TypeInvalid ValueType = iota + TypeBool + TypeInt + TypeFloat + TypeString + TypeList + TypeMap + TypeSet + typeObject +) + +// NOTE: ValueType has more functions defined on it in schema.go. We can't +// put them here because we reference other files. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go new file mode 100644 index 00000000000..914ca32cbe0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT. + +package schema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypeBool-1] + _ = x[TypeInt-2] + _ = x[TypeFloat-3] + _ = x[TypeString-4] + _ = x[TypeList-5] + _ = x[TypeMap-6] + _ = x[TypeSet-7] + _ = x[typeObject-8] +} + +const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" + +var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} + +func (i ValueType) String() string { + if i < 0 || i >= ValueType(len(_ValueType_index)-1) { + return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/expand_json.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/expand_json.go new file mode 100644 index 00000000000..b3eb90fdff5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/expand_json.go @@ -0,0 +1,11 @@ +package structure + +import "encoding/json" + +func ExpandJsonFromString(jsonString string) (map[string]interface{}, error) { + var result map[string]interface{} + + err := json.Unmarshal([]byte(jsonString), &result) + + return result, err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/flatten_json.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/flatten_json.go new file mode 100644 index 00000000000..578ad2eade3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/flatten_json.go @@ -0,0 +1,16 @@ +package structure + +import "encoding/json" + +func FlattenJsonToString(input map[string]interface{}) (string, error) { + if len(input) == 0 { + return "", nil + } + + result, err := json.Marshal(input) + if err != nil { + return "", err + } + + return string(result), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/normalize_json.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/normalize_json.go new file mode 100644 index 00000000000..3256b476dd0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/normalize_json.go @@ -0,0 +1,24 @@ +package structure + +import "encoding/json" + +// Takes a value containing JSON string and passes it through +// the JSON parser to normalize it, returns either a parsing +// error or normalized JSON string. +func NormalizeJsonString(jsonString interface{}) (string, error) { + var j interface{} + + if jsonString == nil || jsonString.(string) == "" { + return "", nil + } + + s := jsonString.(string) + + err := json.Unmarshal([]byte(s), &j) + if err != nil { + return s, err + } + + bytes, _ := json.Marshal(j) + return string(bytes[:]), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/suppress_json_diff.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/suppress_json_diff.go new file mode 100644 index 00000000000..e23707f574c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/suppress_json_diff.go @@ -0,0 +1,21 @@ +package structure + +import ( + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func SuppressJsonDiff(k, old, new string, d *schema.ResourceData) bool { + oldMap, err := ExpandJsonFromString(old) + if err != nil { + return false + } + + newMap, err := ExpandJsonFromString(new) + if err != nil { + return false + } + + return reflect.DeepEqual(oldMap, newMap) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/validation.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/validation.go new file mode 100644 index 00000000000..fd3dbd951bc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/validation.go @@ -0,0 +1,341 @@ +package validation + +import ( + "bytes" + "fmt" + "net" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" +) + +// All returns a SchemaValidateFunc which tests if the provided value +// passes all provided SchemaValidateFunc +func All(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + var allErrors []error + var allWarnings []string + for _, validator := range validators { + validatorWarnings, validatorErrors := validator(i, k) + allWarnings = append(allWarnings, validatorWarnings...) + allErrors = append(allErrors, validatorErrors...) + } + return allWarnings, allErrors + } +} + +// Any returns a SchemaValidateFunc which tests if the provided value +// passes any of the provided SchemaValidateFunc +func Any(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + var allErrors []error + var allWarnings []string + for _, validator := range validators { + validatorWarnings, validatorErrors := validator(i, k) + if len(validatorWarnings) == 0 && len(validatorErrors) == 0 { + return []string{}, []error{} + } + allWarnings = append(allWarnings, validatorWarnings...) + allErrors = append(allErrors, validatorErrors...) + } + return allWarnings, allErrors + } +} + +// IntBetween returns a SchemaValidateFunc which tests if the provided value +// is of type int and is between min and max (inclusive) +func IntBetween(min, max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be int", k)) + return + } + + if v < min || v > max { + es = append(es, fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v)) + return + } + + return + } +} + +// IntAtLeast returns a SchemaValidateFunc which tests if the provided value +// is of type int and is at least min (inclusive) +func IntAtLeast(min int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be int", k)) + return + } + + if v < min { + es = append(es, fmt.Errorf("expected %s to be at least (%d), got %d", k, min, v)) + return + } + + return + } +} + +// IntAtMost returns a SchemaValidateFunc which tests if the provided value +// is of type int and is at most max (inclusive) +func IntAtMost(max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be int", k)) + return + } + + if v > max { + es = append(es, fmt.Errorf("expected %s to be at most (%d), got %d", k, max, v)) + return + } + + return + } +} + +// IntInSlice returns a SchemaValidateFunc which tests if the provided value +// is of type int and matches the value of an element in the valid slice +func IntInSlice(valid []int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be an integer", k)) + return + } + + for _, validInt := range valid { + if v == validInt { + return + } + } + + es = append(es, fmt.Errorf("expected %s to be one of %v, got %d", k, valid, v)) + return + } +} + +// StringInSlice returns a SchemaValidateFunc which tests if the provided value +// is of type string and matches the value of an element in the valid slice +// will test with in lower case if ignoreCase is true +func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + for _, str := range valid { + if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) { + return + } + } + + es = append(es, fmt.Errorf("expected %s to be one of %v, got %s", k, valid, v)) + return + } +} + +// StringLenBetween returns a SchemaValidateFunc which tests if the provided value +// is of type string and has length between min and max (inclusive) +func StringLenBetween(min, max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + if len(v) < min || len(v) > max { + es = append(es, fmt.Errorf("expected length of %s to be in the range (%d - %d), got %s", k, min, max, v)) + } + return + } +} + +// StringMatch returns a SchemaValidateFunc which tests if the provided value +// matches a given regexp. Optionally an error message can be provided to +// return something friendlier than "must match some globby regexp". +func StringMatch(r *regexp.Regexp, message string) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be string", k)} + } + + if ok := r.MatchString(v); !ok { + if message != "" { + return nil, []error{fmt.Errorf("invalid value for %s (%s)", k, message)} + + } + return nil, []error{fmt.Errorf("expected value of %s to match regular expression %q", k, r)} + } + return nil, nil + } +} + +// NoZeroValues is a SchemaValidateFunc which tests if the provided value is +// not a zero value. It's useful in situations where you want to catch +// explicit zero values on things like required fields during validation. +func NoZeroValues(i interface{}, k string) (s []string, es []error) { + if reflect.ValueOf(i).Interface() == reflect.Zero(reflect.TypeOf(i)).Interface() { + switch reflect.TypeOf(i).Kind() { + case reflect.String: + es = append(es, fmt.Errorf("%s must not be empty", k)) + case reflect.Int, reflect.Float64: + es = append(es, fmt.Errorf("%s must not be zero", k)) + default: + // this validator should only ever be applied to TypeString, TypeInt and TypeFloat + panic(fmt.Errorf("can't use NoZeroValues with %T attribute %s", i, k)) + } + } + return +} + +// CIDRNetwork returns a SchemaValidateFunc which tests if the provided value +// is of type string, is in valid CIDR network notation, and has significant bits between min and max (inclusive) +func CIDRNetwork(min, max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + _, ipnet, err := net.ParseCIDR(v) + if err != nil { + es = append(es, fmt.Errorf( + "expected %s to contain a valid CIDR, got: %s with err: %s", k, v, err)) + return + } + + if ipnet == nil || v != ipnet.String() { + es = append(es, fmt.Errorf( + "expected %s to contain a valid network CIDR, expected %s, got %s", + k, ipnet, v)) + } + + sigbits, _ := ipnet.Mask.Size() + if sigbits < min || sigbits > max { + es = append(es, fmt.Errorf( + "expected %q to contain a network CIDR with between %d and %d significant bits, got: %d", + k, min, max, sigbits)) + } + + return + } +} + +// SingleIP returns a SchemaValidateFunc which tests if the provided value +// is of type string, and in valid single IP notation +func SingleIP() schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + ip := net.ParseIP(v) + if ip == nil { + es = append(es, fmt.Errorf( + "expected %s to contain a valid IP, got: %s", k, v)) + } + return + } +} + +// IPRange returns a SchemaValidateFunc which tests if the provided value +// is of type string, and in valid IP range notation +func IPRange() schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + ips := strings.Split(v, "-") + if len(ips) != 2 { + es = append(es, fmt.Errorf( + "expected %s to contain a valid IP range, got: %s", k, v)) + return + } + ip1 := net.ParseIP(ips[0]) + ip2 := net.ParseIP(ips[1]) + if ip1 == nil || ip2 == nil || bytes.Compare(ip1, ip2) > 0 { + es = append(es, fmt.Errorf( + "expected %s to contain a valid IP range, got: %s", k, v)) + } + return + } +} + +// ValidateJsonString is a SchemaValidateFunc which tests to make sure the +// supplied string is valid JSON. +func ValidateJsonString(v interface{}, k string) (ws []string, errors []error) { + if _, err := structure.NormalizeJsonString(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + } + return +} + +// ValidateListUniqueStrings is a ValidateFunc that ensures a list has no +// duplicate items in it. It's useful for when a list is needed over a set +// because order matters, yet the items still need to be unique. +func ValidateListUniqueStrings(v interface{}, k string) (ws []string, errors []error) { + for n1, v1 := range v.([]interface{}) { + for n2, v2 := range v.([]interface{}) { + if v1.(string) == v2.(string) && n1 != n2 { + errors = append(errors, fmt.Errorf("%q: duplicate entry - %s", k, v1.(string))) + } + } + } + return +} + +// ValidateRegexp returns a SchemaValidateFunc which tests to make sure the +// supplied string is a valid regular expression. +func ValidateRegexp(v interface{}, k string) (ws []string, errors []error) { + if _, err := regexp.Compile(v.(string)); err != nil { + errors = append(errors, fmt.Errorf("%q: %s", k, err)) + } + return +} + +// ValidateRFC3339TimeString is a ValidateFunc that ensures a string parses +// as time.RFC3339 format +func ValidateRFC3339TimeString(v interface{}, k string) (ws []string, errors []error) { + if _, err := time.Parse(time.RFC3339, v.(string)); err != nil { + errors = append(errors, fmt.Errorf("%q: invalid RFC3339 timestamp", k)) + } + return +} + +// FloatBetween returns a SchemaValidateFunc which tests if the provided value +// is of type float64 and is between min and max (inclusive). +func FloatBetween(min, max float64) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(float64) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be float64", k)) + return + } + + if v < min || v > max { + es = append(es, fmt.Errorf("expected %s to be in the range (%f - %f), got %f", k, min, max, v)) + return + } + + return + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go new file mode 100644 index 00000000000..36b494c0149 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go @@ -0,0 +1,26 @@ +package httpclient + +import ( + "fmt" + "log" + "os" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/meta" +) + +const uaEnvVar = "TF_APPEND_USER_AGENT" + +func TerraformUserAgent(version string) string { + ua := fmt.Sprintf("HashiCorp Terraform/%s (+https://www.terraform.io) Terraform Plugin SDK/%s", version, meta.SDKVersionString()) + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go new file mode 100644 index 00000000000..90a5faf0edf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go @@ -0,0 +1,12 @@ +package addrs + +// CountAttr is the address of an attribute of the "count" object in +// the interpolation scope, like "count.index". +type CountAttr struct { + referenceable + Name string +} + +func (ca CountAttr) String() string { + return "count." + ca.Name +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go new file mode 100644 index 00000000000..46093314fe2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go @@ -0,0 +1,17 @@ +// Package addrs contains types that represent "addresses", which are +// references to specific objects within a Terraform configuration or +// state. +// +// All addresses have string representations based on HCL traversal syntax +// which should be used in the user-interface, and also in-memory +// representations that can be used internally. +// +// For object types that exist within Terraform modules a pair of types is +// used. The "local" part of the address is represented by a type, and then +// an absolute path to that object in the context of its module is represented +// by a type of the same name with an "Abs" prefix added, for "absolute". +// +// All types within this package should be treated as immutable, even if this +// is not enforced by the Go compiler. It is always an implementation error +// to modify an address object in-place after it is initially constructed. +package addrs diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go new file mode 100644 index 00000000000..7a6385035df --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go @@ -0,0 +1,12 @@ +package addrs + +// ForEachAttr is the address of an attribute referencing the current "for_each" object in +// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value" +type ForEachAttr struct { + referenceable + Name string +} + +func (f ForEachAttr) String() string { + return "each." + f.Name +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go new file mode 100644 index 00000000000..d2c046c111b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go @@ -0,0 +1,41 @@ +package addrs + +import ( + "fmt" +) + +// InputVariable is the address of an input variable. +type InputVariable struct { + referenceable + Name string +} + +func (v InputVariable) String() string { + return "var." + v.Name +} + +// AbsInputVariableInstance is the address of an input variable within a +// particular module instance. +type AbsInputVariableInstance struct { + Module ModuleInstance + Variable InputVariable +} + +// InputVariable returns the absolute address of the input variable of the +// given name inside the receiving module instance. +func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance { + return AbsInputVariableInstance{ + Module: m, + Variable: InputVariable{ + Name: name, + }, + } +} + +func (v AbsInputVariableInstance) String() string { + if len(v.Module) == 0 { + return v.String() + } + + return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go new file mode 100644 index 00000000000..cef8b279640 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go @@ -0,0 +1,123 @@ +package addrs + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// InstanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// IntKey and StringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type InstanceKey interface { + instanceKeySigil() + String() string +} + +// ParseInstanceKey returns the instance key corresponding to the given value, +// which must be known and non-null. +// +// If an unknown or null value is provided then this function will panic. This +// function is intended to deal with the values that would naturally be found +// in a hcl.TraverseIndex, which (when parsed from source, at least) can never +// contain unknown or null values. +func ParseInstanceKey(key cty.Value) (InstanceKey, error) { + switch key.Type() { + case cty.String: + return StringKey(key.AsString()), nil + case cty.Number: + var idx int + err := gocty.FromCtyValue(key, &idx) + return IntKey(idx), err + default: + return NoKey, fmt.Errorf("either a string or an integer is required") + } +} + +// NoKey represents the absense of an InstanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey InstanceKey + +// IntKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type IntKey int + +func (k IntKey) instanceKeySigil() { +} + +func (k IntKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +// StringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type StringKey string + +func (k StringKey) instanceKeySigil() { +} + +func (k StringKey) String() string { + // FIXME: This isn't _quite_ right because Go's quoted string syntax is + // slightly different than HCL's, but we'll accept it for now. + return fmt.Sprintf("[%q]", string(k)) +} + +// InstanceKeyLess returns true if the first given instance key i should sort +// before the second key j, and false otherwise. +func InstanceKeyLess(i, j InstanceKey) bool { + iTy := instanceKeyType(i) + jTy := instanceKeyType(j) + + switch { + case i == j: + return false + case i == NoKey: + return true + case j == NoKey: + return false + case iTy != jTy: + // The ordering here is arbitrary except that we want NoKeyType + // to sort before the others, so we'll just use the enum values + // of InstanceKeyType here (where NoKey is zero, sorting before + // any other). + return uint32(iTy) < uint32(jTy) + case iTy == IntKeyType: + return int(i.(IntKey)) < int(j.(IntKey)) + case iTy == StringKeyType: + return string(i.(StringKey)) < string(j.(StringKey)) + default: + // Shouldn't be possible to get down here in practice, since the + // above is exhaustive. + return false + } +} + +func instanceKeyType(k InstanceKey) InstanceKeyType { + if _, ok := k.(StringKey); ok { + return StringKeyType + } + if _, ok := k.(IntKey); ok { + return IntKeyType + } + return NoKeyType +} + +// InstanceKeyType represents the different types of instance key that are +// supported. Usually it is sufficient to simply type-assert an InstanceKey +// value to either IntKey or StringKey, but this type and its values can be +// used to represent the types themselves, rather than specific values +// of those types. +type InstanceKeyType rune + +const ( + NoKeyType InstanceKeyType = 0 + IntKeyType InstanceKeyType = 'I' + StringKeyType InstanceKeyType = 'S' +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go new file mode 100644 index 00000000000..61a07b9c75b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go @@ -0,0 +1,48 @@ +package addrs + +import ( + "fmt" +) + +// LocalValue is the address of a local value. +type LocalValue struct { + referenceable + Name string +} + +func (v LocalValue) String() string { + return "local." + v.Name +} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: v, + } +} + +// AbsLocalValue is the absolute address of a local value within a module instance. +type AbsLocalValue struct { + Module ModuleInstance + LocalValue LocalValue +} + +// LocalValue returns the absolute address of a local value of the given +// name within the receiving module instance. +func (m ModuleInstance) LocalValue(name string) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: LocalValue{ + Name: name, + }, + } +} + +func (v AbsLocalValue) String() string { + if len(v.Module) == 0 { + return v.LocalValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go new file mode 100644 index 00000000000..6420c630182 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go @@ -0,0 +1,75 @@ +package addrs + +import ( + "strings" +) + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string + +// RootModule is the module address representing the root of the static module +// call tree, which is also the zero value of Module. +// +// Note that this is not the root of the dynamic module tree, which is instead +// represented by RootModuleInstance. +var RootModule Module + +// IsRoot returns true if the receiver is the address of the root module, +// or false otherwise. +func (m Module) IsRoot() bool { + return len(m) == 0 +} + +func (m Module) String() string { + if len(m) == 0 { + return "" + } + return strings.Join([]string(m), ".") +} + +// Child returns the address of a child call in the receiver, identified by the +// given name. +func (m Module) Child(name string) Module { + ret := make(Module, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, name) +} + +// Parent returns the address of the parent module of the receiver, or the +// receiver itself if there is no parent (if it's the root module address). +func (m Module) Parent() Module { + if len(m) == 0 { + return m + } + return m[:len(m)-1] +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m Module) Call() (Module, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + caller, callName := m[:len(m)-1], m[len(m)-1] + return caller, ModuleCall{ + Name: callName, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go new file mode 100644 index 00000000000..09596cc84a3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go @@ -0,0 +1,81 @@ +package addrs + +import ( + "fmt" +) + +// ModuleCall is the address of a call from the current module to a child +// module. +// +// There is no "Abs" version of ModuleCall because an absolute module path +// is represented by ModuleInstance. +type ModuleCall struct { + referenceable + Name string +} + +func (c ModuleCall) String() string { + return "module." + c.Name +} + +// Instance returns the address of an instance of the receiver identified by +// the given key. +func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance { + return ModuleCallInstance{ + Call: c, + Key: key, + } +} + +// ModuleCallInstance is the address of one instance of a module created from +// a module call, which might create multiple instances using "count" or +// "for_each" arguments. +type ModuleCallInstance struct { + referenceable + Call ModuleCall + Key InstanceKey +} + +func (c ModuleCallInstance) String() string { + if c.Key == NoKey { + return c.Call.String() + } + return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) +} + +// ModuleInstance returns the address of the module instance that corresponds +// to the receiving call instance when resolved in the given calling module. +// In other words, it returns the child module instance that the receving +// call instance creates. +func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance { + return caller.Child(c.Call.Name, c.Key) +} + +// Output returns the address of an output of the receiver identified by its +// name. +func (c ModuleCallInstance) Output(name string) ModuleCallOutput { + return ModuleCallOutput{ + Call: c, + Name: name, + } +} + +// ModuleCallOutput is the address of a particular named output produced by +// an instance of a module call. +type ModuleCallOutput struct { + referenceable + Call ModuleCallInstance + Name string +} + +func (co ModuleCallOutput) String() string { + return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) +} + +// AbsOutputValue returns the absolute output value address that corresponds +// to the receving module call output address, once resolved in the given +// calling module. +func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { + moduleAddr := co.Call.ModuleInstance(caller) + return moduleAddr.OutputValue(co.Name) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go new file mode 100644 index 00000000000..1353622a0b3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go @@ -0,0 +1,415 @@ +package addrs + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +var ( + _ Targetable = ModuleInstance(nil) +) + +func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "A module instance address must begin with \"module.\".", + Subject: remain.SourceRange().Ptr(), + }) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "The module instance address is followed by additional invalid content.", + Subject: remain.SourceRange().Ptr(), + }) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := ParseModuleInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Module address prefix must be followed by dot and then a name.", + Subject: remain[0].SourceRange().Ptr(), + }) + break + } + + if next != "module" { + break + } + + kwRange := remain[0].SourceRange() + remain = remain[1:] + // If we have the prefix "module" then we should be followed by an + // module call name, as an attribute, and then optionally an index step + // giving the instance key. + if len(remain) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: &kwRange, + }) + break + } + + var moduleName string + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: remain[0].SourceRange().Ptr(), + }) + break + } + remain = remain[1:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = StringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = IntKey(idxInt) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: fmt.Sprintf("Invalid module index: %s.", err), + Subject: idx.SourceRange().Ptr(), + }) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Invalid module key: must be either a string or an integer.", + Subject: idx.SourceRange().Ptr(), + }) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that Terraform does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey InstanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// IsRoot returns true if the receiver is the address of the root module instance, +// or false otherwise. +func (m ModuleInstance) IsRoot() bool { + return len(m) == 0 +} + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// Parent returns the address of the parent module instance of the receiver, or +// the receiver itself if there is no parent (if it's the root module address). +func (m ModuleInstance) Parent() ModuleInstance { + if len(m) == 0 { + return m + } + return m[:len(m)-1] +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + var buf bytes.Buffer + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} + +// Equal returns true if the receiver and the given other value +// contains the exact same parts. +func (m ModuleInstance) Equal(o ModuleInstance) bool { + return m.String() == o.String() +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (m ModuleInstance) Less(o ModuleInstance) bool { + if len(m) != len(o) { + // Shorter path sorts first. + return len(m) < len(o) + } + + for i := range m { + mS, oS := m[i], o[i] + switch { + case mS.Name != oS.Name: + return mS.Name < oS.Name + case mS.InstanceKey != oS.InstanceKey: + return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey) + } + } + + return false +} + +// Ancestors returns a slice containing the receiver and all of its ancestor +// module instances, all the way up to (and including) the root module. +// The result is ordered by depth, with the root module always first. +// +// Since the result always includes the root module, a caller may choose to +// ignore it by slicing the result with [1:]. +func (m ModuleInstance) Ancestors() []ModuleInstance { + ret := make([]ModuleInstance, 0, len(m)+1) + for i := 0; i <= len(m); i++ { + ret = append(ret, m[:i]) + } + return ret +} + +// IsAncestor returns true if the receiver is an ancestor of the given +// other value. +func (m ModuleInstance) IsAncestor(o ModuleInstance) bool { + // Longer or equal sized paths means the receiver cannot + // be an ancestor of the given module insatnce. + if len(m) >= len(o) { + return false + } + + for i, ms := range m { + if ms.Name != o[i].Name { + return false + } + if ms.InstanceKey != NoKey && ms.InstanceKey != o[i].InstanceKey { + return false + } + } + + return true +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module instance that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// A single module call can produce potentially many module instances, so the +// result discards any instance key that might be present on the last step +// of the instance. To retain this, use CallInstance instead. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCall{ + Name: lastStep.Name, + } +} + +// CallInstance returns the module call instance address that corresponds to +// the given module instance, along with the address of the module instance +// that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCallInstance and then returns a slice of the receiever that excludes +// that last part. This is just a convenience for situations where a call\ +// address is required, such as when dealing with *Reference and Referencable +// values. +func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) { + if len(m) == 0 { + panic("cannot produce ModuleCallInstance for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCallInstance{ + Call: ModuleCall{ + Name: lastStep.Name, + }, + Key: lastStep.InstanceKey, + } +} + +// TargetContains implements Targetable by returning true if the given other +// address either matches the receiver, is a sub-module-instance of the +// receiver, or is a targetable absolute address within a module that +// is contained within the reciever. +func (m ModuleInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case ModuleInstance: + if len(to) < len(m) { + // Can't be contained if the path is shorter + return false + } + // Other is contained if its steps match for the length of our own path. + for i, ourStep := range m { + otherStep := to[i] + if ourStep != otherStep { + return false + } + } + // If we fall out here then the prefixed matched, so it's contained. + return true + + case AbsResource: + return m.TargetContains(to.Module) + + case AbsResourceInstance: + return m.TargetContains(to.Module) + + default: + return false + } +} + +func (m ModuleInstance) targetableSigil() { + // ModuleInstance is targetable +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go new file mode 100644 index 00000000000..bcd923acb76 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go @@ -0,0 +1,75 @@ +package addrs + +import ( + "fmt" +) + +// OutputValue is the address of an output value, in the context of the module +// that is defining it. +// +// This is related to but separate from ModuleCallOutput, which represents +// a module output from the perspective of its parent module. Since output +// values cannot be represented from the module where they are defined, +// OutputValue is not Referenceable, while ModuleCallOutput is. +type OutputValue struct { + Name string +} + +func (v OutputValue) String() string { + return "output." + v.Name +} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: v, + } +} + +// AbsOutputValue is the absolute address of an output value within a module instance. +// +// This represents an output globally within the namespace of a particular +// configuration. It is related to but separate from ModuleCallOutput, which +// represents a module output from the perspective of its parent module. +type AbsOutputValue struct { + Module ModuleInstance + OutputValue OutputValue +} + +// OutputValue returns the absolute address of an output value of the given +// name within the receiving module instance. +func (m ModuleInstance) OutputValue(name string) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: OutputValue{ + Name: name, + }, + } +} + +func (v AbsOutputValue) String() string { + if v.Module.IsRoot() { + return v.OutputValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) +} + +// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, +// returning also the module instance that the ModuleCallOutput is relative +// to. +// +// The root module does not have a call, and so this method cannot be used +// with outputs in the root module, and will panic in that case. +func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) { + if v.Module.IsRoot() { + panic("ReferenceFromCall used with root module output") + } + + caller, call := v.Module.CallInstance() + return caller, ModuleCallOutput{ + Call: call, + Name: v.OutputValue.Name, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go new file mode 100644 index 00000000000..eccbcda4c98 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go @@ -0,0 +1,346 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Reference describes a reference to an address with source location +// information. +type Reference struct { + Subject Referenceable + SourceRange tfdiags.SourceRange + Remaining hcl.Traversal +} + +// ParseRef attempts to extract a referencable address from the prefix of the +// given traversal, which must be an absolute traversal or this function +// will panic. +// +// If no error diagnostics are returned, the returned reference includes the +// address that was extracted, the source range it was extracted from, and any +// remaining relative traversal that was not consumed as part of the +// reference. +// +// If error diagnostics are returned then the Reference value is invalid and +// must not be used. +func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + ref, diags := parseRef(traversal) + + // Normalize a little to make life easier for callers. + if ref != nil { + if len(ref.Remaining) == 0 { + ref.Remaining = nil + } + } + + return ref, diags +} + +// ParseRefStr is a helper wrapper around ParseRef that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseRef. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned reference may be nil or incomplete. +func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + ref, targetDiags := ParseRef(traversal) + diags = diags.Append(targetDiags) + return ref, diags +} + +func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + switch root { + + case "count": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: CountAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "each": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: ForEachAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "data": + if len(traversal) < 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`, + Subject: traversal.SourceRange().Ptr(), + }) + return nil, diags + } + remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser + return parseResourceRef(DataResourceMode, rootRange, remain) + + case "local": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: LocalValue{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "module": + callName, callRange, remain, diags := parseSingleAttrRef(traversal) + if diags.HasErrors() { + return nil, diags + } + + // A traversal starting with "module" can either be a reference to + // an entire module instance or to a single output from a module + // instance, depending on what we find after this introducer. + + callInstance := ModuleCallInstance{ + Call: ModuleCall{ + Name: callName, + }, + Key: NoKey, + } + + if len(remain) == 0 { + // Reference to an entire module instance. Might alternatively + // be a reference to a collection of instances of a particular + // module, but the caller will need to deal with that ambiguity + // since we don't have enough context here. + return &Reference{ + Subject: callInstance, + SourceRange: tfdiags.SourceRangeFromHCL(callRange), + Remaining: remain, + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + callInstance.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for module instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + + if len(remain) == 0 { + // Also a reference to an entire module instance, but we have a key + // now. + return &Reference{ + Subject: callInstance, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)), + Remaining: remain, + }, diags + } + } + + if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { + remain = remain[1:] + return &Reference{ + Subject: ModuleCallOutput{ + Name: attrTrav.Name, + Call: callInstance, + }, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)), + Remaining: remain, + }, diags + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: "Module instance objects do not support this operation.", + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + + case "path": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: PathAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "self": + return &Reference{ + Subject: Self, + SourceRange: tfdiags.SourceRangeFromHCL(rootRange), + Remaining: traversal[1:], + }, diags + + case "terraform": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: TerraformAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "var": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: InputVariable{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + default: + return parseResourceRef(ManagedResourceMode, rootRange, traversal) + } +} + +func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, + Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(), + }) + return nil, diags + } + + var typeName, name string + switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + // If it isn't a TraverseRoot then it must be a "data" reference. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object does not support this operation.`, + Subject: traversal[0].SourceRange().Ptr(), + }) + return nil, diags + } + + attrTrav, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + var what string + switch mode { + case DataResourceMode: + what = "data source" + default: + what = "resource type" + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what), + Subject: traversal[1].SourceRange().Ptr(), + }) + return nil, diags + } + name = attrTrav.Name + rng := hcl.RangeBetween(startRange, attrTrav.SrcRange) + remain := traversal[2:] + + resourceAddr := Resource{ + Mode: mode, + Type: typeName, + Name: name, + } + resourceInstAddr := ResourceInstance{ + Resource: resourceAddr, + Key: NoKey, + } + + if len(remain) == 0 { + // This might actually be a reference to the collection of all instances + // of the resource, but we don't have enough context here to decide + // so we'll let the caller resolve that ambiguity. + return &Reference{ + Subject: resourceInstAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + rng = hcl.RangeBetween(rng, idxTrav.SrcRange) + } + + return &Reference{ + Subject: resourceInstAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags +} + +func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root), + Subject: &rootRange, + }) + return "", hcl.Range{}, nil, diags + } + if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok { + return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object does not support this operation.", root), + Subject: traversal[1].SourceRange().Ptr(), + }) + return "", hcl.Range{}, nil, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go new file mode 100644 index 00000000000..4f0430989d7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go @@ -0,0 +1,318 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Target describes a targeted address with source location information. +type Target struct { + Subject Targetable + SourceRange tfdiags.SourceRange +} + +// ParseTarget attempts to interpret the given traversal as a targetable +// address. The given traversal must be absolute, or this function will +// panic. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the Target value is invalid and +// must not be used. +func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return nil, diags + } + + rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) + + if len(remain) == 0 { + return &Target{ + Subject: path, + SourceRange: rng, + }, diags + } + + mode := ManagedResourceMode + if remain.RootName() == "data" { + mode = DataResourceMode + remain = remain[1:] + } + + if len(remain) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource specification must include a resource type and name.", + Subject: remain.SourceRange().Ptr(), + }) + return nil, diags + } + + var typeName, name string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + switch mode { + case ManagedResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource type name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + case DataResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A data source name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + default: + panic("unknown mode") + } + return nil, diags + } + + switch tt := remain[1].(type) { + case hcl.TraverseAttr: + name = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource name is required.", + Subject: remain[1].SourceRange().Ptr(), + }) + return nil, diags + } + + var subject Targetable + remain = remain[2:] + switch len(remain) { + case 0: + subject = path.Resource(mode, typeName, name) + case 1: + if tt, ok := remain[0].(hcl.TraverseIndex); ok { + key, err := ParseInstanceKey(tt.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + } + + subject = path.ResourceInstance(mode, typeName, name, key) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource instance key must be given in square brackets.", + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Unexpected extra operators after address.", + Subject: remain[1].SourceRange().Ptr(), + }) + return nil, diags + } + + return &Target{ + Subject: subject, + SourceRange: rng, + }, diags +} + +// ParseTargetStr is a helper wrapper around ParseTarget that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a target string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseTarget. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned target may be nil or incomplete. +func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + target, targetDiags := ParseTarget(traversal) + diags = diags.Append(targetDiags) + return target, diags +} + +// ParseAbsResource attempts to interpret the given traversal as an absolute +// resource address, using the same syntax as expected by ParseTarget. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the AbsResource value is invalid and +// must not be used. +func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) { + addr, diags := ParseTarget(traversal) + if diags.HasErrors() { + return AbsResource{}, diags + } + + switch tt := addr.Subject.(type) { + + case AbsResource: + return tt, diags + + case AbsResourceInstance: // Catch likely user error with specialized message + // Assume that the last element of the traversal must be the index, + // since that's required for a valid resource instance address. + indexStep := traversal[len(traversal)-1] + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.", + Subject: indexStep.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + case ModuleInstance: // Catch likely user error with specialized message + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here. The module path must be followed by a resource specification.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + default: // Generic message for other address types + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + } +} + +// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a +// string and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address may be incomplete. +// +// Since this function has no context about the source of the given string, +// any returned diagnostics will not have meaningful source location +// information. +func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsResource{}, diags + } + + addr, addrDiags := ParseAbsResource(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ParseAbsResourceInstance attempts to interpret the given traversal as an +// absolute resource instance address, using the same syntax as expected by +// ParseTarget. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the AbsResource value is invalid and +// must not be used. +func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { + addr, diags := ParseTarget(traversal) + if diags.HasErrors() { + return AbsResourceInstance{}, diags + } + + switch tt := addr.Subject.(type) { + + case AbsResource: + return tt.Instance(NoKey), diags + + case AbsResourceInstance: + return tt, diags + + case ModuleInstance: // Catch likely user error with specialized message + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + default: // Generic message for other address types + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + } +} + +// ParseAbsResourceInstanceStr is a helper wrapper around +// ParseAbsResourceInstance that takes a string and parses it with the HCL +// native syntax traversal parser before interpreting it. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address may be incomplete. +// +// Since this function has no context about the source of the given string, +// any returned diagnostics will not have meaningful source location +// information. +func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsResourceInstance{}, diags + } + + addr, addrDiags := ParseAbsResourceInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go new file mode 100644 index 00000000000..cfc13f4bcd8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go @@ -0,0 +1,12 @@ +package addrs + +// PathAttr is the address of an attribute of the "path" object in +// the interpolation scope, like "path.module". +type PathAttr struct { + referenceable + Name string +} + +func (pa PathAttr) String() string { + return "path." + pa.Name +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go new file mode 100644 index 00000000000..4d1ed2557cc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go @@ -0,0 +1,297 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// ProviderConfig is the address of a provider configuration. +type ProviderConfig struct { + Type string + + // If not empty, Alias identifies which non-default (aliased) provider + // configuration this address refers to. + Alias string +} + +// NewDefaultProviderConfig returns the address of the default (un-aliased) +// configuration for the provider with the given type name. +func NewDefaultProviderConfig(typeName string) ProviderConfig { + return ProviderConfig{ + Type: typeName, + } +} + +// ParseProviderConfigCompact parses the given absolute traversal as a relative +// provider address in compact form. The following are examples of traversals +// that can be successfully parsed as compact relative provider configuration +// addresses: +// +// aws +// aws.foo +// +// This function will panic if given a relative traversal. +// +// If the returned diagnostics contains errors then the result value is invalid +// and must not be used. +func ParseProviderConfigCompact(traversal hcl.Traversal) (ProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := ProviderConfig{ + Type: traversal.RootName(), + } + + if len(traversal) < 2 { + // Just a type name, then. + return ret, diags + } + + aliasStep := traversal[1] + switch ts := aliasStep.(type) { + case hcl.TraverseAttr: + ret.Alias = ts.Name + return ret, diags + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", + Subject: aliasStep.SourceRange().Ptr(), + }) + } + + if len(traversal) > 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous extra operators after provider configuration address.", + Subject: traversal[2:].SourceRange().Ptr(), + }) + } + + return ret, diags +} + +// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseProviderConfigCompactStr(str string) (ProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return ProviderConfig{}, diags + } + + addr, addrDiags := ParseProviderConfigCompact(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// Absolute returns an AbsProviderConfig from the receiver and the given module +// instance address. +func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig { + return AbsProviderConfig{ + Module: module, + ProviderConfig: pc, + } +} + +func (pc ProviderConfig) String() string { + if pc.Type == "" { + // Should never happen; always indicates a bug + return "provider." + } + + if pc.Alias != "" { + return fmt.Sprintf("provider.%s.%s", pc.Type, pc.Alias) + } + + return "provider." + pc.Type +} + +// StringCompact is an alternative to String that returns the form that can +// be parsed by ParseProviderConfigCompact, without the "provider." prefix. +func (pc ProviderConfig) StringCompact() string { + if pc.Alias != "" { + return fmt.Sprintf("%s.%s", pc.Type, pc.Alias) + } + return pc.Type +} + +// AbsProviderConfig is the absolute address of a provider configuration +// within a particular module instance. +type AbsProviderConfig struct { + Module ModuleInstance + ProviderConfig ProviderConfig +} + +// ParseAbsProviderConfig parses the given traversal as an absolute provider +// address. The following are examples of traversals that can be successfully +// parsed as absolute provider configuration addresses: +// +// provider.aws +// provider.aws.foo +// module.bar.provider.aws +// module.bar.module.baz.provider.aws.foo +// module.foo[1].provider.aws.foo +// +// This type of address is used, for example, to record the relationships +// between resources and provider configurations in the state structure. +// This type of address is not generally used in the UI, except in error +// messages that refer to provider configurations. +func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { + modInst, remain, diags := parseModuleInstancePrefix(traversal) + ret := AbsProviderConfig{ + Module: modInst, + } + if len(remain) < 2 || remain.RootName() != "provider" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", + Subject: remain.SourceRange().Ptr(), + }) + return ret, diags + } + if len(remain) > 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous operators after provider configuration alias.", + Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), + }) + return ret, diags + } + + if tt, ok := remain[1].(hcl.TraverseAttr); ok { + ret.ProviderConfig.Type = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The prefix \"provider.\" must be followed by a provider type name.", + Subject: remain[1].SourceRange().Ptr(), + }) + return ret, diags + } + + if len(remain) == 3 { + if tt, ok := remain[2].(hcl.TraverseAttr); ok { + ret.ProviderConfig.Alias = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider type name must be followed by a configuration alias name.", + Subject: remain[2].SourceRange().Ptr(), + }) + return ret, diags + } + } + + return ret, diags +} + +// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseAbsProviderConfig. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address is invalid. +func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsProviderConfig{}, diags + } + + addr, addrDiags := ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ProviderConfigDefault returns the address of the default provider config +// of the given type inside the recieving module instance. +func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig { + return AbsProviderConfig{ + Module: m, + ProviderConfig: ProviderConfig{ + Type: name, + }, + } +} + +// ProviderConfigAliased returns the address of an aliased provider config +// of with given type and alias inside the recieving module instance. +func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig { + return AbsProviderConfig{ + Module: m, + ProviderConfig: ProviderConfig{ + Type: name, + Alias: alias, + }, + } +} + +// Inherited returns an address that the receiving configuration address might +// inherit from in a parent module. The second bool return value indicates if +// such inheritance is possible, and thus whether the returned address is valid. +// +// Inheritance is possible only for default (un-aliased) providers in modules +// other than the root module. Even if a valid address is returned, inheritence +// may not be performed for other reasons, such as if the calling module +// provided explicit provider configurations within the call for this module. +// The ProviderTransformer graph transform in the main terraform module has +// the authoritative logic for provider inheritance, and this method is here +// mainly just for its benefit. +func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) { + // Can't inherit if we're already in the root. + if len(pc.Module) == 0 { + return AbsProviderConfig{}, false + } + + // Can't inherit if we have an alias. + if pc.ProviderConfig.Alias != "" { + return AbsProviderConfig{}, false + } + + // Otherwise, we might inherit from a configuration with the same + // provider name in the parent module instance. + parentMod := pc.Module.Parent() + return pc.ProviderConfig.Absolute(parentMod), true +} + +func (pc AbsProviderConfig) String() string { + if len(pc.Module) == 0 { + return pc.ProviderConfig.String() + } + return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go new file mode 100644 index 00000000000..64b8ac869c9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go @@ -0,0 +1,7 @@ +package addrs + +// ProviderType encapsulates a single provider type. In the future this will be +// extended to include additional fields including Namespace and SourceHost +type ProviderType struct { + Name string +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go new file mode 100644 index 00000000000..211083a5f45 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go @@ -0,0 +1,20 @@ +package addrs + +// Referenceable is an interface implemented by all address types that can +// appear as references in configuration language expressions. +type Referenceable interface { + // All implementations of this interface must be covered by the type switch + // in lang.Scope.buildEvalContext. + referenceableSigil() + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseRef to produce an identical + // result. + String() string +} + +type referenceable struct { +} + +func (r referenceable) referenceableSigil() { +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go new file mode 100644 index 00000000000..b075a6d1d2f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go @@ -0,0 +1,270 @@ +package addrs + +import ( + "fmt" + "strings" +) + +// Resource is an address for a resource block within configuration, which +// contains potentially-multiple resource instances if that configuration +// block uses "count" or "for_each". +type Resource struct { + referenceable + Mode ResourceMode + Type string + Name string +} + +func (r Resource) String() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // Should never happen, but we'll return a string here rather than + // crashing just in case it does. + return fmt.Sprintf(".%s.%s", r.Type, r.Name) + } +} + +func (r Resource) Equal(o Resource) bool { + return r.String() == o.String() +} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r Resource) Instance(key InstanceKey) ResourceInstance { + return ResourceInstance{ + Resource: r, + Key: key, + } +} + +// Absolute returns an AbsResource from the receiver and the given module +// instance address. +func (r Resource) Absolute(module ModuleInstance) AbsResource { + return AbsResource{ + Module: module, + Resource: r, + } +} + +// DefaultProviderConfig returns the address of the provider configuration +// that should be used for the resource identified by the reciever if it +// does not have a provider configuration address explicitly set in +// configuration. +// +// This method is not able to verify that such a configuration exists, nor +// represent the behavior of automatically inheriting certain provider +// configurations from parent modules. It just does a static analysis of the +// receiving address and returns an address to start from, relative to the +// same module that contains the resource. +func (r Resource) DefaultProviderConfig() ProviderConfig { + typeName := r.Type + if under := strings.Index(typeName, "_"); under != -1 { + typeName = typeName[:under] + } + return ProviderConfig{ + Type: typeName, + } +} + +// ResourceInstance is an address for a specific instance of a resource. +// When a resource is defined in configuration with "count" or "for_each" it +// produces zero or more instances, which can be addressed using this type. +type ResourceInstance struct { + referenceable + Resource Resource + Key InstanceKey +} + +func (r ResourceInstance) ContainingResource() Resource { + return r.Resource +} + +func (r ResourceInstance) String() string { + if r.Key == NoKey { + return r.Resource.String() + } + return r.Resource.String() + r.Key.String() +} + +func (r ResourceInstance) Equal(o ResourceInstance) bool { + return r.String() == o.String() +} + +// Absolute returns an AbsResourceInstance from the receiver and the given module +// instance address. +func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { + return AbsResourceInstance{ + Module: module, + Resource: r, + } +} + +// AbsResource is an absolute address for a resource under a given module path. +type AbsResource struct { + targetable + Module ModuleInstance + Resource Resource +} + +// Resource returns the address of a particular resource within the receiver. +func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource { + return AbsResource{ + Module: m, + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + } +} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: r.Module, + Resource: r.Resource.Instance(key), + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is either equal to the receiver or is an instance of the +// receiver. +func (r AbsResource) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case AbsResource: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + + case AbsResourceInstance: + return r.TargetContains(to.ContainingResource()) + + default: + return false + + } +} + +func (r AbsResource) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +func (r AbsResource) Equal(o AbsResource) bool { + return r.String() == o.String() +} + +// AbsResourceInstance is an absolute address for a resource instance under a +// given module path. +type AbsResourceInstance struct { + targetable + Module ModuleInstance + Resource ResourceInstance +} + +// ResourceInstance returns the address of a particular resource instance within the receiver. +func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: m, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + Key: key, + }, + } +} + +// ContainingResource returns the address of the resource that contains the +// receving resource instance. In other words, it discards the key portion +// of the address to produce an AbsResource value. +func (r AbsResourceInstance) ContainingResource() AbsResource { + return AbsResource{ + Module: r.Module, + Resource: r.Resource.ContainingResource(), + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is equal to the receiver. +func (r AbsResourceInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case AbsResourceInstance: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + + default: + return false + + } +} + +func (r AbsResourceInstance) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +func (r AbsResourceInstance) Equal(o AbsResourceInstance) bool { + return r.String() == o.String() +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { + switch { + + case len(r.Module) != len(o.Module): + return len(r.Module) < len(o.Module) + + case r.Module.String() != o.Module.String(): + return r.Module.Less(o.Module) + + case r.Resource.Resource.Mode != o.Resource.Resource.Mode: + return r.Resource.Resource.Mode == DataResourceMode + + case r.Resource.Resource.Type != o.Resource.Resource.Type: + return r.Resource.Resource.Type < o.Resource.Resource.Type + + case r.Resource.Resource.Name != o.Resource.Resource.Name: + return r.Resource.Resource.Name < o.Resource.Resource.Name + + case r.Resource.Key != o.Resource.Key: + return InstanceKeyLess(r.Resource.Key, o.Resource.Key) + + default: + return false + + } +} + +// ResourceMode defines which lifecycle applies to a given resource. Each +// resource lifecycle has a slightly different address format. +type ResourceMode rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type ResourceMode + +const ( + // InvalidResourceMode is the zero value of ResourceMode and is not + // a valid resource mode. + InvalidResourceMode ResourceMode = 0 + + // ManagedResourceMode indicates a managed resource, as defined by + // "resource" blocks in configuration. + ManagedResourceMode ResourceMode = 'M' + + // DataResourceMode indicates a data resource, as defined by + // "data" blocks in configuration. + DataResourceMode ResourceMode = 'D' +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go new file mode 100644 index 00000000000..9bdbdc421a7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go @@ -0,0 +1,105 @@ +package addrs + +import "fmt" + +// ResourceInstancePhase is a special kind of reference used only internally +// during graph building to represent resource instances that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via an instance phase +// or can declare that they reference an instance phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourceInstancePhase struct { + referenceable + ResourceInstance ResourceInstance + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourceInstancePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase { + return ResourceInstancePhase{ + ResourceInstance: r, + Phase: rpt, + } +} + +// ContainingResource returns an address for the same phase of the resource +// that this instance belongs to. +func (rp ResourceInstancePhase) ContainingResource() ResourcePhase { + return rp.ResourceInstance.Resource.Phase(rp.Phase) +} + +func (rp ResourceInstancePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) +} + +// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. +type ResourceInstancePhaseType string + +const ( + // ResourceInstancePhaseDestroy represents the "destroy" phase of a + // resource instance. + ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy" + + // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy + // but is used for resources that have "create_before_destroy" set, thus + // requiring a different dependency ordering. + ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd" +) + +func (rpt ResourceInstancePhaseType) String() string { + return string(rpt) +} + +// ResourcePhase is a special kind of reference used only internally +// during graph building to represent resources that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via a resource phase +// or can declare that they reference a resource phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// Since resources (as opposed to instances) aren't actually phased, this +// address type is used only as an approximation during initial construction +// of the resource-oriented plan graph, under the assumption that resource +// instances with ResourceInstancePhase addresses will be created in dynamic +// subgraphs during the graph walk. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourcePhase struct { + referenceable + Resource Resource + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourcePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase { + return ResourcePhase{ + Resource: r, + Phase: rpt, + } +} + +func (rp ResourcePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go new file mode 100644 index 00000000000..0b5c33f8ee2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type ResourceMode"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidResourceMode-0] + _ = x[ManagedResourceMode-77] + _ = x[DataResourceMode-68] +} + +const ( + _ResourceMode_name_0 = "InvalidResourceMode" + _ResourceMode_name_1 = "DataResourceMode" + _ResourceMode_name_2 = "ManagedResourceMode" +) + +func (i ResourceMode) String() string { + switch { + case i == 0: + return _ResourceMode_name_0 + case i == 68: + return _ResourceMode_name_1 + case i == 77: + return _ResourceMode_name_2 + default: + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go new file mode 100644 index 00000000000..7f24eaf085b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go @@ -0,0 +1,14 @@ +package addrs + +// Self is the address of the special object "self" that behaves as an alias +// for a containing object currently in scope. +const Self selfT = 0 + +type selfT int + +func (s selfT) referenceableSigil() { +} + +func (s selfT) String() string { + return "self" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go new file mode 100644 index 00000000000..16819a5afbb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go @@ -0,0 +1,26 @@ +package addrs + +// Targetable is an interface implemented by all address types that can be +// used as "targets" for selecting sub-graphs of a graph. +type Targetable interface { + targetableSigil() + + // TargetContains returns true if the receiver is considered to contain + // the given other address. Containment, for the purpose of targeting, + // means that if a container address is targeted then all of the + // addresses within it are also implicitly targeted. + // + // A targetable address always contains at least itself. + TargetContains(other Targetable) bool + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseTarget to produce an + // identical result. + String() string +} + +type targetable struct { +} + +func (r targetable) targetableSigil() { +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go new file mode 100644 index 00000000000..a880182ae2a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go @@ -0,0 +1,12 @@ +package addrs + +// TerraformAttr is the address of an attribute of the "terraform" object in +// the interpolation scope, like "terraform.workspace". +type TerraformAttr struct { + referenceable + Name string +} + +func (ta TerraformAttr) String() string { + return "terraform." + ta.Name +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go new file mode 100644 index 00000000000..6f64f6c9241 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go @@ -0,0 +1,295 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcled" + "github.com/hashicorp/hcl2/hclparse" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/mitchellh/colorstring" + wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/zclconf/go-cty/cty" +) + +// Diagnostic formats a single diagnostic message. +// +// The width argument specifies at what column the diagnostic messages will +// be wrapped. If set to zero, messages will not be wrapped by this function +// at all. Although the long-form text parts of the message are wrapped, +// not all aspects of the message are guaranteed to fit within the specified +// terminal width. +func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + switch diag.Severity() { + case tfdiags.Error: + buf.WriteString(color.Color("\n[bold][red]Error: [reset]")) + case tfdiags.Warning: + buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]")) + default: + // Clear out any coloring that might be applied by Terraform's UI helper, + // so our result is not context-sensitive. + buf.WriteString(color.Color("\n[reset]")) + } + + desc := diag.Description() + sourceRefs := diag.Source() + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) + + if sourceRefs.Subject != nil { + // We'll borrow HCL's range implementation here, because it has some + // handy features to help us produce a nice source code snippet. + highlightRange := sourceRefs.Subject.ToHCL() + snippetRange := highlightRange + if sourceRefs.Context != nil { + snippetRange = sourceRefs.Context.ToHCL() + } + + // Make sure the snippet includes the highlight. This should be true + // for any reasonable diagnostic, but we'll make sure. + snippetRange = hcl.RangeOver(snippetRange, highlightRange) + if snippetRange.Empty() { + snippetRange.End.Byte++ + snippetRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + var src []byte + if sources != nil { + src = sources[snippetRange.Filename] + } + if src == nil { + // This should generally not happen, as long as sources are always + // loaded through the main loader. We may load things in other + // ways in weird cases, so we'll tolerate it at the expense of + // a not-so-helpful error message. + fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) + } else { + file, offset := parseRange(src, highlightRange) + + headerRange := highlightRange + + contextStr := hcled.ContextString(file, offset-1) + if contextStr != "" { + contextStr = ", in " + contextStr + } + + fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) + + // Config snippet rendering + sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snippetRange) { + continue + } + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), + lineRange.Start.Line, + before, highlighted, after, + ) + } + + } + + if fromExpr := diag.FromExpr(); fromExpr != nil { + // We may also be able to generate information about the dynamic + // values of relevant variables at the point of evaluation, then. + // This is particularly useful for expressions that get evaluated + // multiple times with different values, such as blocks using + // "count" and "for_each", or within "for" expressions. + expr := fromExpr.Expression + ctx := fromExpr.EvalContext + vars := expr.Variables() + stmts := make([]string, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + Traversals: + for _, traversal := range vars { + for len(traversal) > 1 { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + traversal = traversal[:len(traversal)-1] + continue + } + + traversalStr := traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue Traversals // don't show duplicates when the same variable is referenced multiple times + } + switch { + case !val.IsKnown(): + // Can't say anything about this yet, then. + continue Traversals + case val.IsNull(): + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr)) + default: + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val))) + } + seen[traversalStr] = struct{}{} + } + } + + sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? + + if len(stmts) > 0 { + fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n")) + } + for _, stmt := range stmts { + fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt) + } + } + + buf.WriteByte('\n') + } + + if desc.Detail != "" { + detail := desc.Detail + if width != 0 { + detail = wordwrap.WrapString(detail, uint(width)) + } + fmt.Fprintf(&buf, "%s\n", detail) + } + + return buf.String() +} + +func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { + filename := rng.Filename + offset := rng.Start.Byte + + // We need to re-parse here to get a *hcl.File we can interrogate. This + // is not awesome since we presumably already parsed the file earlier too, + // but this re-parsing is architecturally simpler than retaining all of + // the hcl.File objects and we only do this in the case of an error anyway + // so the overhead here is not a big problem. + parser := hclparse.NewParser() + var file *hcl.File + var diags hcl.Diagnostics + if strings.HasSuffix(filename, ".json") { + file, diags = parser.ParseJSON(src, filename) + } else { + file, diags = parser.ParseHCL(src, filename) + } + if diags.HasErrors() { + return file, offset + } + + return file, offset +} + +// traversalStr produces a representation of an HCL traversal that is compact, +// resembles HCL native syntax, and is suitable for display in the UI. +func traversalStr(traversal hcl.Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case hcl.TraverseRoot: + buf.WriteString(tStep.Name) + case hcl.TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case hcl.TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(compactValueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// compactValueStr produces a compact, single-line summary of a given value +// that is suitable for display in the UI. +// +// For primitives it returns a full representation, while for more complex +// types it instead summarizes the type, size, etc to produce something +// that is hopefully still somewhat useful but not as verbose as a rendering +// of the entire data structure. +func compactValueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go new file mode 100644 index 00000000000..0a2aa7d02e6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go @@ -0,0 +1,1192 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// ResourceChange returns a string representation of a change to a particular +// resource, for inclusion in user-facing plan output. +// +// The resource schema must be provided along with the change so that the +// formatted change can reflect the configuration structure for the associated +// resource. +// +// If "color" is non-nil, it will be used to color the result. Otherwise, +// no color codes will be included. +func ResourceChange( + change *plans.ResourceInstanceChangeSrc, + tainted bool, + schema *configschema.Block, + color *colorstring.Colorize, +) string { + addr := change.Addr + var buf bytes.Buffer + + if color == nil { + color = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + Reset: false, + } + } + + dispAddr := addr.String() + if change.DeposedKey != states.NotDeposed { + dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey) + } + + switch change.Action { + case plans.Create: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr))) + case plans.Read: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be read during apply\n # (config refers to values not yet known)", dispAddr))) + case plans.Update: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr))) + case plans.CreateThenDelete, plans.DeleteThenCreate: + if tainted { + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced", dispAddr))) + } else { + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced", dispAddr))) + } + case plans.Delete: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed", dispAddr))) + default: + // should never happen, since the above is exhaustive + buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) + } + buf.WriteString(color.Color("[reset]\n")) + + switch change.Action { + case plans.Create: + buf.WriteString(color.Color("[green] +[reset] ")) + case plans.Read: + buf.WriteString(color.Color("[cyan] <=[reset] ")) + case plans.Update: + buf.WriteString(color.Color("[yellow] ~[reset] ")) + case plans.DeleteThenCreate: + buf.WriteString(color.Color("[red]-[reset]/[green]+[reset] ")) + case plans.CreateThenDelete: + buf.WriteString(color.Color("[green]+[reset]/[red]-[reset] ")) + case plans.Delete: + buf.WriteString(color.Color("[red] -[reset] ")) + default: + buf.WriteString(color.Color("??? ")) + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + buf.WriteString(fmt.Sprintf( + "resource %q %q", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + case addrs.DataResourceMode: + buf.WriteString(fmt.Sprintf( + "data %q %q ", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + default: + // should never happen, since the above is exhaustive + buf.WriteString(addr.String()) + } + + buf.WriteString(" {") + + p := blockBodyDiffPrinter{ + buf: &buf, + color: color, + action: change.Action, + requiredReplace: change.RequiredReplace, + } + + // Most commonly-used resources have nested blocks that result in us + // going at least three traversals deep while we recurse here, so we'll + // start with that much capacity and then grow as needed for deeper + // structures. + path := make(cty.Path, 0, 3) + + changeV, err := change.Decode(schema.ImpliedType()) + if err != nil { + // Should never happen in here, since we've already been through + // loads of layers of encode/decode of the planned changes before now. + panic(fmt.Sprintf("failed to decode plan for %s while rendering diff: %s", addr, err)) + } + + // We currently have an opt-out that permits the legacy SDK to return values + // that defy our usual conventions around handling of nesting blocks. To + // avoid the rendering code from needing to handle all of these, we'll + // normalize first. + // (Ideally we'd do this as part of the SDK opt-out implementation in core, + // but we've added it here for now to reduce risk of unexpected impacts + // on other code in core.) + changeV.Change.Before = objchange.NormalizeObjectFromLegacySDK(changeV.Change.Before, schema) + changeV.Change.After = objchange.NormalizeObjectFromLegacySDK(changeV.Change.After, schema) + + bodyWritten := p.writeBlockBodyDiff(schema, changeV.Before, changeV.After, 6, path) + if bodyWritten { + buf.WriteString("\n") + buf.WriteString(strings.Repeat(" ", 4)) + } + buf.WriteString("}\n") + + return buf.String() +} + +type blockBodyDiffPrinter struct { + buf *bytes.Buffer + color *colorstring.Colorize + action plans.Action + requiredReplace cty.PathSet +} + +const forcesNewResourceCaption = " [red]# forces replacement[reset]" + +// writeBlockBodyDiff writes attribute or block differences +// and returns true if any differences were found and written +func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) bool { + path = ctyEnsurePathCapacity(path, 1) + + bodyWritten := false + blankBeforeBlocks := false + { + attrNames := make([]string, 0, len(schema.Attributes)) + attrNameLen := 0 + for name := range schema.Attributes { + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + if oldVal.IsNull() && newVal.IsNull() { + // Skip attributes where both old and new values are null + // (we do this early here so that we'll do our value alignment + // based on the longest attribute name that has a change, rather + // than the longest attribute name in the full set.) + continue + } + + attrNames = append(attrNames, name) + if len(name) > attrNameLen { + attrNameLen = len(name) + } + } + sort.Strings(attrNames) + if len(attrNames) > 0 { + blankBeforeBlocks = true + } + + for _, name := range attrNames { + attrS := schema.Attributes[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + bodyWritten = true + p.writeAttrDiff(name, attrS, oldVal, newVal, attrNameLen, indent, path) + } + } + + { + blockTypeNames := make([]string, 0, len(schema.BlockTypes)) + for name := range schema.BlockTypes { + blockTypeNames = append(blockTypeNames, name) + } + sort.Strings(blockTypeNames) + + for _, name := range blockTypeNames { + blockS := schema.BlockTypes[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + bodyWritten = true + p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path) + + // Always include a blank for any subsequent block types. + blankBeforeBlocks = true + } + } + + return bodyWritten +} + +func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) { + path = append(path, cty.GetAttrStep{Name: name}) + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + showJustNew := false + var action plans.Action + switch { + case old.IsNull(): + action = plans.Create + showJustNew = true + case new.IsNull(): + action = plans.Delete + case ctyEqualWithUnknown(old, new): + action = plans.NoOp + showJustNew = true + default: + action = plans.Update + } + + p.writeActionSymbol(action) + + p.buf.WriteString(p.color.Color("[bold]")) + p.buf.WriteString(name) + p.buf.WriteString(p.color.Color("[reset]")) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) + p.buf.WriteString(" = ") + + if attrS.Sensitive { + p.buf.WriteString("(sensitive value)") + } else { + switch { + case showJustNew: + p.writeValue(new, action, indent+2) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + default: + // We show new even if it is null to emphasize the fact + // that it is being unset, since otherwise it is easy to + // misunderstand that the value is still set to the old value. + p.writeValueDiff(old, new, indent+2, path) + } + } +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) { + path = append(path, cty.GetAttrStep{Name: name}) + if old.IsNull() && new.IsNull() { + // Nothing to do if both old and new is null + return + } + + // Where old/new are collections representing a nesting mode other than + // NestingSingle, we assume the collection value can never be unknown + // since we always produce the container for the nested objects, even if + // the objects within are computed. + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + var action plans.Action + eqV := new.Equals(old) + switch { + case old.IsNull(): + action = plans.Create + case new.IsNull(): + action = plans.Delete + case !new.IsWhollyKnown() || !old.IsWhollyKnown(): + // "old" should actually always be known due to our contract + // that old values must never be unknown, but we'll allow it + // anyway to be robust. + action = plans.Update + case !eqV.IsKnown() || !eqV.True(): + action = plans.Update + } + + if blankBefore { + p.buf.WriteRune('\n') + } + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, path) + case configschema.NestingList: + // For the sake of handling nested blocks, we'll treat a null list + // the same as an empty list since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockListAsEmpty(old) + new = ctyNullBlockListAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + // Here we intentionally preserve the index-based correspondance + // between old and new, rather than trying to detect insertions + // and removals in the list, because this more accurately reflects + // how Terraform Core and providers will understand the change, + // particularly when the nested block contains computed attributes + // that will themselves maintain correspondance by index. + + // commonLen is number of elements that exist in both lists, which + // will be presented as updates (~). Any additional items in one + // of the lists will be presented as either creates (+) or deletes (-) + // depending on which list they belong to. + var commonLen int + switch { + case len(oldItems) < len(newItems): + commonLen = len(oldItems) + default: + commonLen = len(newItems) + } + + if blankBefore && (len(oldItems) > 0 || len(newItems) > 0) { + p.buf.WriteRune('\n') + } + + for i := 0; i < commonLen; i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := newItems[i] + action := plans.Update + if oldItem.RawEquals(newItem) { + action = plans.NoOp + } + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, path) + } + for i := commonLen; i < len(oldItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := cty.NullVal(oldItem.Type()) + p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, path) + } + for i := commonLen; i < len(newItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + newItem := newItems[i] + oldItem := cty.NullVal(newItem.Type()) + p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, path) + } + case configschema.NestingSet: + // For the sake of handling nested blocks, we'll treat a null set + // the same as an empty set since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockSetAsEmpty(old) + new = ctyNullBlockSetAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both sets are empty + return + } + + allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) + allItems = append(allItems, oldItems...) + allItems = append(allItems, newItems...) + all := cty.SetVal(allItems) + + if blankBefore { + p.buf.WriteRune('\n') + } + + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + var action plans.Action + var oldValue, newValue cty.Value + switch { + case !val.IsKnown(): + action = plans.Update + newValue = val + case !old.HasElement(val).True(): + action = plans.Create + oldValue = cty.NullVal(val.Type()) + newValue = val + case !new.HasElement(val).True(): + action = plans.Delete + oldValue = val + newValue = cty.NullVal(val.Type()) + default: + action = plans.NoOp + oldValue = val + newValue = val + } + path := append(path, cty.IndexStep{Key: val}) + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, path) + } + + case configschema.NestingMap: + // For the sake of handling nested blocks, we'll treat a null map + // the same as an empty map since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockMapAsEmpty(old) + new = ctyNullBlockMapAsEmpty(new) + + oldItems := old.AsValueMap() + newItems := new.AsValueMap() + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both maps are empty + return + } + + allKeys := make(map[string]bool) + for k := range oldItems { + allKeys[k] = true + } + for k := range newItems { + allKeys[k] = true + } + allKeysOrder := make([]string, 0, len(allKeys)) + for k := range allKeys { + allKeysOrder = append(allKeysOrder, k) + } + sort.Strings(allKeysOrder) + + if blankBefore { + p.buf.WriteRune('\n') + } + + for _, k := range allKeysOrder { + var action plans.Action + oldValue := oldItems[k] + newValue := newItems[k] + switch { + case oldValue == cty.NilVal: + oldValue = cty.NullVal(newValue.Type()) + action = plans.Create + case newValue == cty.NilVal: + newValue = cty.NullVal(oldValue.Type()) + action = plans.Delete + case !newValue.RawEquals(oldValue): + action = plans.Update + default: + action = plans.NoOp + } + + path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) + p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, path) + } + } +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + p.writeActionSymbol(action) + + if label != nil { + fmt.Fprintf(p.buf, "%s %q {", name, *label) + } else { + fmt.Fprintf(p.buf, "%s {", name) + } + + if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + + bodyWritten := p.writeBlockBodyDiff(blockS, old, new, indent+4, path) + if bodyWritten { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + } + p.buf.WriteString("}") +} + +func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) { + if !val.IsKnown() { + p.buf.WriteString("(known after apply)") + return + } + if val.IsNull() { + p.buf.WriteString(p.color.Color("[dark_gray]null[reset]")) + return + } + + ty := val.Type() + + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + { + // Special behavior for JSON strings containing array or object + src := []byte(val.AsString()) + ty, err := ctyjson.ImpliedType(src) + // check for the special case of "null", which decodes to nil, + // and just allow it to be printed out directly + if err == nil && !ty.IsPrimitiveType() && val.AsString() != "null" { + jv, err := ctyjson.Unmarshal(src, ty) + if err == nil { + p.buf.WriteString("jsonencode(") + if jv.LengthInt() == 0 { + p.writeValue(jv, action, 0) + } else { + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(jv, action, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteByte(')') + break // don't *also* do the normal behavior below + } + } + } + fmt.Fprintf(p.buf, "%q", val.AsString()) + case cty.Bool: + if val.True() { + p.buf.WriteString("true") + } else { + p.buf.WriteString("false") + } + case cty.Number: + bf := val.AsBigFloat() + p.buf.WriteString(bf.Text('f', -1)) + default: + // should never happen, since the above is exhaustive + fmt.Fprintf(p.buf, "%#v", val) + } + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + p.buf.WriteString("[") + + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",") + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("]") + case ty.IsMapType(): + p.buf.WriteString("{") + + keyLen := 0 + for it := val.ElementIterator(); it.Next(); { + key, _ := it.Element() + if keyStr := key.AsString(); len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + for it := val.ElementIterator(); it.Next(); { + key, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(key, action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString()))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + case ty.IsObjectType(): + p.buf.WriteString("{") + + atys := ty.AttributeTypes() + attrNames := make([]string, 0, len(atys)) + nameLen := 0 + for attrName := range atys { + attrNames = append(attrNames, attrName) + if len(attrName) > nameLen { + nameLen = len(attrName) + } + } + sort.Strings(attrNames) + + for _, attrName := range attrNames { + val := val.GetAttr(attrName) + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.buf.WriteString(attrName) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(attrName))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if len(attrNames) > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + } +} + +func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) { + ty := old.Type() + typesEqual := ctyTypesEqual(ty, new.Type()) + + // We have some specialized diff implementations for certain complex + // values where it's useful to see a visualization of the diff of + // the nested elements rather than just showing the entire old and + // new values verbatim. + // However, these specialized implementations can apply only if both + // values are known and non-null. + if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual { + switch { + case ty == cty.String: + // We have special behavior for both multi-line strings in general + // and for strings that can parse as JSON. For the JSON handling + // to apply, both old and new must be valid JSON. + // For single-line strings that don't parse as JSON we just fall + // out of this switch block and do the default old -> new rendering. + oldS := old.AsString() + newS := new.AsString() + + { + // Special behavior for JSON strings containing object or + // list values. + oldBytes := []byte(oldS) + newBytes := []byte(newS) + oldType, oldErr := ctyjson.ImpliedType(oldBytes) + newType, newErr := ctyjson.ImpliedType(newBytes) + if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) { + oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType) + newJV, newErr := ctyjson.Unmarshal(newBytes, newType) + if oldErr == nil && newErr == nil { + if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace + p.buf.WriteString("jsonencode(") + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(plans.Update) + p.writeValueDiff(oldJV, newJV, indent+4, path) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } else { + // if they differ only in insigificant whitespace + // then we'll note that but still expand out the + // effective value. + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]")) + } else { + p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]")) + } + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(oldJV, plans.NoOp, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } + return + } + } + } + + if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 { + break + } + + p.buf.WriteString("<<~EOT") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var oldLines, newLines []cty.Value + { + r := strings.NewReader(oldS) + sc := bufio.NewScanner(r) + for sc.Scan() { + oldLines = append(oldLines, cty.StringVal(sc.Text())) + } + } + { + r := strings.NewReader(newS) + sc := bufio.NewScanner(r) + for sc.Scan() { + newLines = append(newLines, cty.StringVal(sc.Text())) + } + } + + diffLines := ctySequenceDiff(oldLines, newLines) + for _, diffLine := range diffLines { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(diffLine.Action) + + switch diffLine.Action { + case plans.NoOp, plans.Delete: + p.buf.WriteString(diffLine.Before.AsString()) + case plans.Create: + p.buf.WriteString(diffLine.After.AsString()) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return for strings + p.buf.WriteString(diffLine.After.AsString()) + + } + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol + p.buf.WriteString("EOT") + + return + + case ty.IsSetType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var addedVals, removedVals, allVals []cty.Value + for it := old.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if new.HasElement(val).False() { + removedVals = append(removedVals, val) + } + } + for it := new.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if val.IsKnown() && old.HasElement(val).False() { + addedVals = append(addedVals, val) + } + } + + var all, added, removed cty.Value + if len(allVals) > 0 { + all = cty.SetVal(allVals) + } else { + all = cty.SetValEmpty(ty.ElementType()) + } + if len(addedVals) > 0 { + added = cty.SetVal(addedVals) + } else { + added = cty.SetValEmpty(ty.ElementType()) + } + if len(removedVals) > 0 { + removed = cty.SetVal(removedVals) + } else { + removed = cty.SetValEmpty(ty.ElementType()) + } + + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + + var action plans.Action + switch { + case !val.IsKnown(): + action = plans.Update + case added.HasElement(val).True(): + action = plans.Create + case removed.HasElement(val).True(): + action = plans.Delete + default: + action = plans.NoOp + } + + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + return + case ty.IsListType() || ty.IsTupleType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice()) + for _, elemDiff := range elemDiffs { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(elemDiff.Action) + switch elemDiff.Action { + case plans.NoOp, plans.Delete: + p.writeValue(elemDiff.Before, elemDiff.Action, indent+4) + case plans.Update: + p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path) + case plans.Create: + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return. + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + } + + p.buf.WriteString(",\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + return + + case ty.IsMapType(): + p.buf.WriteString("{") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var allKeys []string + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + sort.Strings(allKeys) + + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + kV := cty.StringVal(k) + var action plans.Action + if old.HasIndex(kV).False() { + action = plans.Create + } else if new.HasIndex(kV).False() { + action = plans.Delete + } else if eqV := old.Index(kV).Equals(new.Index(kV)); eqV.IsKnown() && eqV.True() { + action = plans.NoOp + } else { + action = plans.Update + } + + path := append(path, cty.IndexStep{Key: kV}) + + p.writeActionSymbol(action) + p.writeValue(kV, action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) + p.buf.WriteString(" = ") + switch action { + case plans.Create, plans.NoOp: + v := new.Index(kV) + p.writeValue(v, action, indent+4) + case plans.Delete: + oldV := old.Index(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + oldV := old.Index(kV) + newV := new.Index(kV) + p.writeValueDiff(oldV, newV, indent+4, path) + } + + p.buf.WriteByte('\n') + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + return + case ty.IsObjectType(): + p.buf.WriteString("{") + p.buf.WriteString("\n") + + forcesNewResource := p.pathForcesNewResource(path) + + var allKeys []string + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + sort.Strings(allKeys) + + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + kV := k + var action plans.Action + if !old.Type().HasAttribute(kV) { + action = plans.Create + } else if !new.Type().HasAttribute(kV) { + action = plans.Delete + } else if eqV := old.GetAttr(kV).Equals(new.GetAttr(kV)); eqV.IsKnown() && eqV.True() { + action = plans.NoOp + } else { + action = plans.Update + } + + path := append(path, cty.GetAttrStep{Name: kV}) + + p.writeActionSymbol(action) + p.buf.WriteString(k) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) + p.buf.WriteString(" = ") + + switch action { + case plans.Create, plans.NoOp: + v := new.GetAttr(kV) + p.writeValue(v, action, indent+4) + case plans.Delete: + oldV := old.GetAttr(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + oldV := old.GetAttr(kV) + newV := new.GetAttr(kV) + p.writeValueDiff(oldV, newV, indent+4, path) + } + + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + + if forcesNewResource { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + return + } + } + + // In all other cases, we just show the new and old values as-is + p.writeValue(old, plans.Delete, indent) + if new.IsNull() { + p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] ")) + } else { + p.buf.WriteString(p.color.Color(" [yellow]->[reset] ")) + } + + p.writeValue(new, plans.Create, indent) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } +} + +// writeActionSymbol writes a symbol to represent the given action, followed +// by a space. +// +// It only supports the actions that can be represented with a single character: +// Create, Delete, Update and NoAction. +func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) { + switch action { + case plans.Create: + p.buf.WriteString(p.color.Color("[green]+[reset] ")) + case plans.Delete: + p.buf.WriteString(p.color.Color("[red]-[reset] ")) + case plans.Update: + p.buf.WriteString(p.color.Color("[yellow]~[reset] ")) + case plans.NoOp: + p.buf.WriteString(" ") + default: + // Should never happen + p.buf.WriteString(p.color.Color("? ")) + } +} + +func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool { + if !p.action.IsReplace() { + // "requiredReplace" only applies when the instance is being replaced + return false + } + return p.requiredReplace.Has(path) +} + +func ctyEmptyString(value cty.Value) bool { + if !value.IsNull() && value.IsKnown() { + valueType := value.Type() + if valueType == cty.String && value.AsString() == "" { + return true + } + } + return false +} + +func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value { + attrType := val.Type().AttributeType(name) + + if val.IsNull() { + return cty.NullVal(attrType) + } + + // We treat "" as null here + // as existing SDK doesn't support null yet. + // This allows us to avoid spurious diffs + // until we introduce null to the SDK. + attrValue := val.GetAttr(name) + if ctyEmptyString(attrValue) { + return cty.NullVal(attrType) + } + + return attrValue +} + +func ctyCollectionValues(val cty.Value) []cty.Value { + if !val.IsKnown() || val.IsNull() { + return nil + } + + ret := make([]cty.Value, 0, val.LengthInt()) + for it := val.ElementIterator(); it.Next(); { + _, value := it.Element() + ret = append(ret, value) + } + return ret +} + +// ctySequenceDiff returns differences between given sequences of cty.Value(s) +// in the form of Create, Delete, or Update actions (for objects). +func ctySequenceDiff(old, new []cty.Value) []*plans.Change { + var ret []*plans.Change + lcs := objchange.LongestCommonSubsequence(old, new) + var oldI, newI, lcsI int + for oldI < len(old) || newI < len(new) || lcsI < len(lcs) { + for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) { + isObjectDiff := old[oldI].Type().IsObjectType() && (newI >= len(new) || new[newI].Type().IsObjectType()) + if isObjectDiff && newI < len(new) { + ret = append(ret, &plans.Change{ + Action: plans.Update, + Before: old[oldI], + After: new[newI], + }) + oldI++ + newI++ // we also consume the next "new" in this case + continue + } + + ret = append(ret, &plans.Change{ + Action: plans.Delete, + Before: old[oldI], + After: cty.NullVal(old[oldI].Type()), + }) + oldI++ + } + for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) { + ret = append(ret, &plans.Change{ + Action: plans.Create, + Before: cty.NullVal(new[newI].Type()), + After: new[newI], + }) + newI++ + } + if lcsI < len(lcs) { + ret = append(ret, &plans.Change{ + Action: plans.NoOp, + Before: lcs[lcsI], + After: lcs[lcsI], + }) + + // All of our indexes advance together now, since the line + // is common to all three sequences. + lcsI++ + oldI++ + newI++ + } + } + return ret +} + +func ctyEqualWithUnknown(old, new cty.Value) bool { + if !old.IsWhollyKnown() || !new.IsWhollyKnown() { + return false + } + return old.Equals(new).True() +} + +// ctyTypesEqual checks equality of two types more loosely +// by avoiding checks of object/tuple elements +// as we render differences on element-by-element basis anyway +func ctyTypesEqual(oldT, newT cty.Type) bool { + if oldT.IsObjectType() && newT.IsObjectType() { + return true + } + if oldT.IsTupleType() && newT.IsTupleType() { + return true + } + return oldT.Equals(newT) +} + +func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path { + if cap(path)-len(path) >= minExtra { + return path + } + newCap := cap(path) * 2 + if newCap < (len(path) + minExtra) { + newCap = len(path) + minExtra + } + newPath := make(cty.Path, len(path), newCap) + copy(newPath, path) + return newPath +} + +// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "list" is +// actually represented as a tuple type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockListAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsListType() { + return cty.ListValEmpty(ty.ElementType()) + } + return cty.EmptyTupleVal // must need a tuple, then +} + +// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "map" is +// actually represented as an object type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsMapType() { + return cty.MapValEmpty(ty.ElementType()) + } + return cty.EmptyObjectVal // must need an object, then +} + +// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + // Dynamically-typed attributes are not supported inside blocks backed by + // sets, so our result here is always a set. + return cty.SetValEmpty(in.Type().ElementType()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go new file mode 100644 index 00000000000..aa8d7deb2a2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go @@ -0,0 +1,8 @@ +// Package format contains helpers for formatting various Terraform +// structures for human-readabout output. +// +// This package is used by the official Terraform CLI in formatting any +// output and is exported to encourage non-official frontends to mimic the +// output formatting as much as possible so that text formats of Terraform +// structures have a consistent look and feel. +package format diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go new file mode 100644 index 00000000000..85ebbfec5ea --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go @@ -0,0 +1,123 @@ +package format + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ObjectValueID takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a unique identifier in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the Terraform configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// This function will panic if the given value is not of an object type. +func ObjectValueID(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["id"] == cty.String: + v := obj.GetAttr("id") + if v.IsKnown() && !v.IsNull() { + return "id", v.AsString() + } + + case atys["name"] == cty.String: + // "name" isn't always globally unique, but if there isn't also an + // "id" then it _often_ is, in practice. + v := obj.GetAttr("name") + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + } + + return "", "" +} + +// ObjectValueName takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a human-friendly name in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the Terraform configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// Callers that use both ObjectValueName and ObjectValueID at the same time +// should be prepared to get the same attribute key and value from both in +// some cases, since there is overlap betweek the id-extraction and +// name-extraction heuristics. +// +// This function will panic if the given value is not of an object type. +func ObjectValueName(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["name"] == cty.String: + v := obj.GetAttr("name") + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + + case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String: + tags := obj.GetAttr("tags") + if tags.IsNull() || !tags.IsWhollyKnown() { + break + } + + switch { + case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True): + v := tags.Index(cty.StringVal("name")) + if v.IsKnown() && !v.IsNull() { + return "tags.name", v.AsString() + } + case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True): + // AWS-style naming convention + v := tags.Index(cty.StringVal("Name")) + if v.IsKnown() && !v.IsNull() { + return "tags.Name", v.AsString() + } + } + } + + return "", "" +} + +// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID +// and ObjectValueName (in that preference order) to try to extract some sort +// of human-friendly descriptive string value for an object as additional +// context about an object when it is being displayed in a compact way (where +// not all of the attributes are visible.) +// +// Just as with the two functions it wraps, it is a best-effort and may return +// two empty strings if no suitable attribute can be found for a given object. +func ObjectValueIDOrName(obj cty.Value) (k, v string) { + k, v = ObjectValueID(obj) + if k != "" { + return + } + k, v = ObjectValueName(obj) + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/plan.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/plan.go new file mode 100644 index 00000000000..7d303004147 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/plan.go @@ -0,0 +1,306 @@ +package format + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" + + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// Plan is a representation of a plan optimized for display to +// an end-user, as opposed to terraform.Plan which is for internal use. +// +// DisplayPlan excludes implementation details that may otherwise appear +// in the main plan, such as destroy actions on data sources (which are +// there only to clean up the state). +type Plan struct { + Resources []*InstanceDiff +} + +// InstanceDiff is a representation of an instance diff optimized +// for display, in conjunction with DisplayPlan. +type InstanceDiff struct { + Addr *terraform.ResourceAddress + Action plans.Action + + // Attributes describes changes to the attributes of the instance. + // + // For destroy diffs this is always nil. + Attributes []*AttributeDiff + + Tainted bool + Deposed bool +} + +// AttributeDiff is a representation of an attribute diff optimized +// for display, in conjunction with DisplayInstanceDiff. +type AttributeDiff struct { + // Path is a dot-delimited traversal through possibly many levels of list and map structure, + // intended for display purposes only. + Path string + + Action plans.Action + + OldValue string + NewValue string + + NewComputed bool + Sensitive bool + ForcesNew bool +} + +// PlanStats gives summary counts for a Plan. +type PlanStats struct { + ToAdd, ToChange, ToDestroy int +} + +// NewPlan produces a display-oriented Plan from a terraform.Plan. +func NewPlan(changes *plans.Changes) *Plan { + log.Printf("[TRACE] NewPlan for %#v", changes) + ret := &Plan{} + if changes == nil { + // Nothing to do! + return ret + } + + for _, rc := range changes.Resources { + addr := rc.Addr + log.Printf("[TRACE] NewPlan found %s (%s)", addr, rc.Action) + dataSource := addr.Resource.Resource.Mode == addrs.DataResourceMode + + // We create "delete" actions for data resources so we can clean + // up their entries in state, but this is an implementation detail + // that users shouldn't see. + if dataSource && rc.Action == plans.Delete { + continue + } + + if rc.Action == plans.NoOp { + continue + } + + // For now we'll shim this to work with our old types. + // TODO: Update for the new plan types, ideally also switching over to + // a structural diff renderer instead of a flat renderer. + did := &InstanceDiff{ + Addr: terraform.NewLegacyResourceInstanceAddress(addr), + Action: rc.Action, + } + + if rc.DeposedKey != states.NotDeposed { + did.Deposed = true + } + + // Since this is just a temporary stub implementation on the way + // to us replacing this with the structural diff renderer, we currently + // don't include any attributes here. + // FIXME: Implement the structural diff renderer to replace this + // codepath altogether. + + ret.Resources = append(ret.Resources, did) + } + + // Sort the instance diffs by their addresses for display. + sort.Slice(ret.Resources, func(i, j int) bool { + iAddr := ret.Resources[i].Addr + jAddr := ret.Resources[j].Addr + return iAddr.Less(jAddr) + }) + + return ret +} + +// Format produces and returns a text representation of the receiving plan +// intended for display in a terminal. +// +// If color is not nil, it is used to colorize the output. +func (p *Plan) Format(color *colorstring.Colorize) string { + if p.Empty() { + return "This plan does nothing." + } + + if color == nil { + color = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: false, + } + } + + // Find the longest path length of all the paths that are changing, + // so we can align them all. + keyLen := 0 + for _, r := range p.Resources { + for _, attr := range r.Attributes { + key := attr.Path + + if len(key) > keyLen { + keyLen = len(key) + } + } + } + + buf := new(bytes.Buffer) + for _, r := range p.Resources { + formatPlanInstanceDiff(buf, r, keyLen, color) + } + + return strings.TrimSpace(buf.String()) +} + +// Stats returns statistics about the plan +func (p *Plan) Stats() PlanStats { + var ret PlanStats + for _, r := range p.Resources { + switch r.Action { + case plans.Create: + ret.ToAdd++ + case plans.Update: + ret.ToChange++ + case plans.DeleteThenCreate, plans.CreateThenDelete: + ret.ToAdd++ + ret.ToDestroy++ + case plans.Delete: + ret.ToDestroy++ + } + } + return ret +} + +// ActionCounts returns the number of diffs for each action type +func (p *Plan) ActionCounts() map[plans.Action]int { + ret := map[plans.Action]int{} + for _, r := range p.Resources { + ret[r.Action]++ + } + return ret +} + +// Empty returns true if there is at least one resource diff in the receiving plan. +func (p *Plan) Empty() bool { + return len(p.Resources) == 0 +} + +// DiffActionSymbol returns a string that, once passed through a +// colorstring.Colorize, will produce a result that can be written +// to a terminal to produce a symbol made of three printable +// characters, possibly interspersed with VT100 color codes. +func DiffActionSymbol(action plans.Action) string { + switch action { + case plans.DeleteThenCreate: + return "[red]-[reset]/[green]+[reset]" + case plans.CreateThenDelete: + return "[green]+[reset]/[red]-[reset]" + case plans.Create: + return " [green]+[reset]" + case plans.Delete: + return " [red]-[reset]" + case plans.Read: + return " [cyan]<=[reset]" + case plans.Update: + return " [yellow]~[reset]" + default: + return " ?" + } +} + +// formatPlanInstanceDiff writes the text representation of the given instance diff +// to the given buffer, using the given colorizer. +func formatPlanInstanceDiff(buf *bytes.Buffer, r *InstanceDiff, keyLen int, colorizer *colorstring.Colorize) { + addrStr := r.Addr.String() + + // Determine the color for the text (green for adding, yellow + // for change, red for delete), and symbol, and output the + // resource header. + color := "yellow" + symbol := DiffActionSymbol(r.Action) + oldValues := true + switch r.Action { + case plans.DeleteThenCreate, plans.CreateThenDelete: + color = "yellow" + case plans.Create: + color = "green" + oldValues = false + case plans.Delete: + color = "red" + case plans.Read: + color = "cyan" + oldValues = false + } + + var extraStr string + if r.Tainted { + extraStr = extraStr + " (tainted)" + } + if r.Deposed { + extraStr = extraStr + " (deposed)" + } + if r.Action.IsReplace() { + extraStr = extraStr + colorizer.Color(" [red][bold](new resource required)") + } + + buf.WriteString( + colorizer.Color(fmt.Sprintf( + "[%s]%s [%s]%s%s\n", + color, symbol, color, addrStr, extraStr, + )), + ) + + for _, attr := range r.Attributes { + + v := attr.NewValue + var dispV string + switch { + case v == "" && attr.NewComputed: + dispV = "" + case attr.Sensitive: + dispV = "" + default: + dispV = fmt.Sprintf("%q", v) + } + + updateMsg := "" + switch { + case attr.ForcesNew && r.Action.IsReplace(): + updateMsg = colorizer.Color(" [red](forces new resource)") + case attr.Sensitive && oldValues: + updateMsg = colorizer.Color(" [yellow](attribute changed)") + } + + if oldValues { + u := attr.OldValue + var dispU string + switch { + case attr.Sensitive: + dispU = "" + default: + dispU = fmt.Sprintf("%q", u) + } + buf.WriteString(fmt.Sprintf( + " %s:%s %s => %s%s\n", + attr.Path, + strings.Repeat(" ", keyLen-len(attr.Path)), + dispU, dispV, + updateMsg, + )) + } else { + buf.WriteString(fmt.Sprintf( + " %s:%s %s%s\n", + attr.Path, + strings.Repeat(" ", keyLen-len(attr.Path)), + dispV, + updateMsg, + )) + } + } + + // Write the reset color so we don't bleed color into later text + buf.WriteString(colorizer.Color("[reset]\n")) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go new file mode 100644 index 00000000000..14869ad3ca3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go @@ -0,0 +1,208 @@ +package format + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" +) + +// StateOpts are the options for formatting a state. +type StateOpts struct { + // State is the state to format. This is required. + State *states.State + + // Schemas are used to decode attributes. This is required. + Schemas *terraform.Schemas + + // Color is the colorizer. This is optional. + Color *colorstring.Colorize +} + +// State takes a state and returns a string +func State(opts *StateOpts) string { + if opts.Color == nil { + panic("colorize not given") + } + + if opts.Schemas == nil { + panic("schemas not given") + } + + s := opts.State + if len(s.Modules) == 0 { + return "The state file is empty. No resources are represented." + } + + buf := bytes.NewBufferString("[reset]") + p := blockBodyDiffPrinter{ + buf: buf, + color: opts.Color, + action: plans.NoOp, + } + + // Format all the modules + for _, m := range s.Modules { + formatStateModule(p, m, opts.Schemas) + } + + // Write the outputs for the root module + m := s.RootModule() + + if m.OutputValues != nil { + if len(m.OutputValues) > 0 { + p.buf.WriteString("Outputs:\n\n") + } + + // Sort the outputs + ks := make([]string, 0, len(m.OutputValues)) + for k := range m.OutputValues { + ks = append(ks, k) + } + sort.Strings(ks) + + // Output each output k/v pair + for _, k := range ks { + v := m.OutputValues[k] + p.buf.WriteString(fmt.Sprintf("%s = ", k)) + p.writeValue(v.Value, plans.NoOp, 0) + p.buf.WriteString("\n") + } + } + + trimmedOutput := strings.TrimSpace(p.buf.String()) + trimmedOutput += "[reset]" + + return opts.Color.Color(trimmedOutput) + +} + +func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) { + // First get the names of all the resources so we can show them + // in alphabetical order. + names := make([]string, 0, len(m.Resources)) + for name := range m.Resources { + names = append(names, name) + } + sort.Strings(names) + + // Go through each resource and begin building up the output. + for _, key := range names { + for k, v := range m.Resources[key].Instances { + // keep these in order to keep the current object first, and + // provide deterministic output for the deposed objects + type obj struct { + header string + instance *states.ResourceInstanceObjectSrc + } + instances := []obj{} + + addr := m.Resources[key].Addr + + taintStr := "" + if v.Current != nil && v.Current.Status == 'T' { + taintStr = " (tainted)" + } + + instances = append(instances, + obj{fmt.Sprintf("# %s:%s\n", addr.Absolute(m.Addr).Instance(k), taintStr), v.Current}) + + for dk, v := range v.Deposed { + instances = append(instances, + obj{fmt.Sprintf("# %s: (deposed object %s)\n", addr.Absolute(m.Addr).Instance(k), dk), v}) + } + + // Sort the instances for consistent output. + // Starting the sort from the second index, so the current instance + // is always first. + sort.Slice(instances[1:], func(i, j int) bool { + return instances[i+1].header < instances[j+1].header + }) + + for _, obj := range instances { + header := obj.header + instance := obj.instance + p.buf.WriteString(header) + if instance == nil { + // this shouldn't happen, but there's nothing to do here so + // don't panic below. + continue + } + + var schema *configschema.Block + provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact() + if _, exists := schemas.Providers[provider]; !exists { + // This should never happen in normal use because we should've + // loaded all of the schemas and checked things prior to this + // point. We can't return errors here, but since this is UI code + // we will try to do _something_ reasonable. + p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider)) + continue + } + + switch addr.Mode { + case addrs.ManagedResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + addr.Mode, + addr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q resource type %s\n\n", provider, addr.Type)) + continue + } + + p.buf.WriteString(fmt.Sprintf( + "resource %q %q {", + addr.Type, + addr.Name, + )) + case addrs.DataResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + addr.Mode, + addr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q data source %s\n\n", provider, addr.Type)) + continue + } + + p.buf.WriteString(fmt.Sprintf( + "data %q %q {", + addr.Type, + addr.Name, + )) + default: + // should never happen, since the above is exhaustive + p.buf.WriteString(addr.String()) + } + + val, err := instance.Decode(schema.ImpliedType()) + if err != nil { + fmt.Println(err.Error()) + break + } + + path := make(cty.Path, 0, 3) + bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) + if bodyWritten { + p.buf.WriteString("\n") + } + + p.buf.WriteString("}\n\n") + } + } + } + p.buf.WriteString("\n") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go new file mode 100644 index 00000000000..6c7848d23ac --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go @@ -0,0 +1,55 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// Backend represents a "backend" block inside a "terraform" block in a module +// or file. +type Backend struct { + Type string + Config hcl.Body + + TypeRange hcl.Range + DeclRange hcl.Range +} + +func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { + return &Backend{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + Config: block.Body, + DeclRange: block.DefRange, + }, nil +} + +// Hash produces a hash value for the reciever that covers the type and the +// portions of the config that conform to the given schema. +// +// If the config does not conform to the schema then the result is not +// meaningful for comparison since it will be based on an incomplete result. +// +// As an exception, required attributes in the schema are treated as optional +// for the purpose of hashing, so that an incomplete configuration can still +// be hashed. Other errors, such as extraneous attributes, have no such special +// case. +func (b *Backend) Hash(schema *configschema.Block) int { + // Don't fail if required attributes are not set. Instead, we'll just + // hash them as nulls. + schema = schema.NoneRequired() + spec := schema.DecoderSpec() + val, _ := hcldec.Decode(b.Config, spec, nil) + if val == cty.NilVal { + val = cty.UnknownVal(schema.ImpliedType()) + } + + toHash := cty.TupleVal([]cty.Value{ + cty.StringVal(b.Type), + val, + }) + + return toHash.Hash() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go new file mode 100644 index 00000000000..66037fcdce4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go @@ -0,0 +1,116 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// ------------------------------------------------------------------------- +// Functions in this file are compatibility shims intended to ease conversion +// from the old configuration loader. Any use of these functions that makes +// a change should generate a deprecation warning explaining to the user how +// to update their code for new patterns. +// +// Shims are particularly important for any patterns that have been widely +// documented in books, tutorials, etc. Users will still be starting from +// these examples and we want to help them adopt the latest patterns rather +// than leave them stranded. +// ------------------------------------------------------------------------- + +// shimTraversalInString takes any arbitrary expression and checks if it is +// a quoted string in the native syntax. If it _is_, then it is parsed as a +// traversal and re-wrapped into a synthetic traversal expression and a +// warning is generated. Otherwise, the given expression is just returned +// verbatim. +// +// This function has no effect on expressions from the JSON syntax, since +// traversals in strings are the required pattern in that syntax. +// +// If wantKeyword is set, the generated warning diagnostic will talk about +// keywords rather than references. The behavior is otherwise unchanged, and +// the caller remains responsible for checking that the result is indeed +// a keyword, e.g. using hcl.ExprAsKeyword. +func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { + // ObjectConsKeyExpr is a special wrapper type used for keys on object + // constructors to deal with the fact that naked identifiers are normally + // handled as "bareword" strings rather than as variable references. Since + // we know we're interpreting as a traversal anyway (and thus it won't + // matter whether it's a string or an identifier) we can safely just unwrap + // here and then process whatever we find inside as normal. + if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { + expr = ocke.Wrapped + } + + if !exprIsNativeQuotedString(expr) { + return expr, nil + } + + strVal, diags := expr.Value(nil) + if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { + // Since we're not even able to attempt a shim here, we'll discard + // the diagnostics we saw so far and let the caller's own error + // handling take care of reporting the invalid expression. + return expr, nil + } + + // The position handling here isn't _quite_ right because it won't + // take into account any escape sequences in the literal string, but + // it should be close enough for any error reporting to make sense. + srcRange := expr.Range() + startPos := srcRange.Start // copy + startPos.Column++ // skip initial quote + startPos.Byte++ // skip initial quote + + traversal, tDiags := hclsyntax.ParseTraversalAbs( + []byte(strVal.AsString()), + srcRange.Filename, + startPos, + ) + diags = append(diags, tDiags...) + + // For initial release our deprecation warnings are disabled to allow + // a period where modules can be compatible with both old and new + // conventions. + // FIXME: Re-enable these deprecation warnings in a release prior to + // Terraform 0.13 and then remove the shims altogether for 0.13. + /* + if wantKeyword { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted keywords are deprecated", + Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.", + Subject: &srcRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted references are deprecated", + Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.", + Subject: &srcRange, + }) + } + */ + + return &hclsyntax.ScopeTraversalExpr{ + Traversal: traversal, + SrcRange: srcRange, + }, diags +} + +// shimIsIgnoreChangesStar returns true if the given expression seems to be +// a string literal whose value is "*". This is used to support a legacy +// form of ignore_changes = all . +// +// This function does not itself emit any diagnostics, so it's the caller's +// responsibility to emit a warning diagnostic when this function returns true. +func shimIsIgnoreChangesStar(expr hcl.Expression) bool { + val, valDiags := expr.Value(nil) + if valDiags.HasErrors() { + return false + } + if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { + return false + } + return val.AsString() == "*" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go new file mode 100644 index 00000000000..c90b51a1132 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go @@ -0,0 +1,205 @@ +package configs + +import ( + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +// +// A module tree described in *this* package represents the static tree +// represented by configuration. During evaluation a static ModuleNode may +// expand into zero or more module instances depending on the use of count and +// for_each configuration attributes within each call. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *Module + + // CallRange is the source range for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallRange hcl.Range + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddr string + + // SourceAddrRange is the location in the configuration source where the + // SourceAddr value was set, for use in diagnostic messages. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddrRange hcl.Range + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// NewEmptyConfig constructs a single-node configuration tree with an empty +// root module. This is generally a pretty useless thing to do, so most callers +// should instead use BuildConfig. +func NewEmptyConfig() *Config { + ret := &Config{} + ret.Root = ret + ret.Children = make(map[string]*Config) + ret.Module = &Module{} + return ret +} + +// Depth returns the number of "hops" the receiver is from the root of its +// module tree, with the root module having a depth of zero. +func (c *Config) Depth() int { + ret := 0 + this := c + for this.Parent != nil { + ret++ + this = this.Parent + } + return ret +} + +// DeepEach calls the given function once for each module in the tree, starting +// with the receiver. +// +// A parent is always called before its children and children of a particular +// node are visited in lexicographic order by their names. +func (c *Config) DeepEach(cb func(c *Config)) { + cb(c) + + names := make([]string, 0, len(c.Children)) + for name := range c.Children { + names = append(names, name) + } + + for _, name := range names { + c.Children[name].DeepEach(cb) + } +} + +// AllModules returns a slice of all the receiver and all of its descendent +// nodes in the module tree, in the same order they would be visited by +// DeepEach. +func (c *Config) AllModules() []*Config { + var ret []*Config + c.DeepEach(func(c *Config) { + ret = append(ret, c) + }) + return ret +} + +// Descendent returns the descendent config that has the given path beneath +// the receiver, or nil if there is no such module. +// +// The path traverses the static module tree, prior to any expansion to handle +// count and for_each arguments. +// +// An empty path will just return the receiver, and is therefore pointless. +func (c *Config) Descendent(path addrs.Module) *Config { + current := c + for _, name := range path { + current = current.Children[name] + if current == nil { + return nil + } + } + return current +} + +// DescendentForInstance is like Descendent except that it accepts a path +// to a particular module instance in the dynamic module graph, returning +// the node from the static module graph that corresponds to it. +// +// All instances created by a particular module call share the same +// configuration, so the keys within the given path are disregarded. +func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { + current := c + for _, step := range path { + current = current.Children[step.Name] + if current == nil { + return nil + } + } + return current +} + +// ProviderTypes returns the names of each distinct provider type referenced +// in the receiving configuration. +// +// This is a helper for easily determining which provider types are required +// to fully interpret the configuration, though it does not include version +// information and so callers are expected to have already dealt with +// provider version selection in an earlier step and have identified suitable +// versions for each provider. +func (c *Config) ProviderTypes() []string { + m := make(map[string]struct{}) + c.gatherProviderTypes(m) + + ret := make([]string, 0, len(m)) + for k := range m { + ret = append(ret, k) + } + sort.Strings(ret) + return ret +} +func (c *Config) gatherProviderTypes(m map[string]struct{}) { + if c == nil { + return + } + + for _, pc := range c.Module.ProviderConfigs { + m[pc.Name] = struct{}{} + } + for _, rc := range c.Module.ManagedResources { + providerAddr := rc.ProviderConfigAddr() + m[providerAddr.Type] = struct{}{} + } + for _, rc := range c.Module.DataResources { + providerAddr := rc.ProviderConfigAddr() + m[providerAddr.Type] = struct{}{} + } + + // Must also visit our child modules, recursively. + for _, cc := range c.Children { + cc.gatherProviderTypes(m) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go new file mode 100644 index 00000000000..f1129bc19a0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go @@ -0,0 +1,180 @@ +package configs + +import ( + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +// +// The result is a module tree that has so far only had basic module- and +// file-level invariants validated. If the returned diagnostics contains errors, +// the returned module tree may be incomplete but can still be used carefully +// for static analysis. +func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := map[string]*Config{} + + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + VersionConstraint: call.Version, + Parent: parent, + CallRange: call.DeclRange, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallRange: call.DeclRange, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + diags = append(diags, modDiags...) + + ret[call.Name] = child + } + + return ret, diags +} + +// A ModuleWalker knows how to find and load a child module given details about +// the module to be loaded and a reference to its partially-loaded parent +// Config. +type ModuleWalker interface { + // LoadModule finds and loads a requested child module. + // + // If errors are detected during loading, implementations should return them + // in the diagnostics object. If the diagnostics object contains any errors + // then the caller will tolerate the returned module being nil or incomplete. + // If no errors are returned, it should be non-nil and complete. + // + // Full validation need not have been performed but an implementation should + // ensure that the basic file- and module-validations performed by the + // LoadConfigDir function (valid syntax, no namespace collisions, etc) have + // been performed before returning a module. + LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) + +// LoadModule implements ModuleWalker. +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return f(req) +} + +// ModuleRequest is used with the ModuleWalker interface to describe a child +// module that must be loaded. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr string + + // SourceAddrRange is the source range for the SourceAddr value as it + // was provided in configuration. This can and should be used to generate + // diagnostics about the source address having invalid syntax, referring + // to a non-existent object, etc. + SourceAddrRange hcl.Range + + // VersionConstraint is the version constraint applied to the module in + // configuration. This data structure includes the source range for + // the constraint, which can and should be used to generate diagnostics + // about constraint-related issues, such as constraints that eliminate all + // available versions of a module whose source is otherwise valid. + VersionConstraint VersionConstraint + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source range for the header of the "module" block + // in configuration that prompted this request. This can be used as the + // subject of an error diagnostic that relates to the module call itself, + // rather than to either its source address or its version number. + CallRange hcl.Range +} + +// DisabledModuleWalker is a ModuleWalker that doesn't support +// child modules at all, and so will return an error if asked to load one. +// +// This is provided primarily for testing. There is no good reason to use this +// in the main application. +var DisabledModuleWalker ModuleWalker + +func init() { + DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Child modules are not supported", + Detail: "Child module calls are not allowed in this context.", + Subject: &req.CallRange, + }, + } + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go new file mode 100644 index 00000000000..ebbeb3b629f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go @@ -0,0 +1,125 @@ +package configload + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go new file mode 100644 index 00000000000..8b615f90260 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go @@ -0,0 +1,4 @@ +// Package configload knows how to install modules into the .terraform/modules +// directory and to load modules from those installed locations. It is used +// in conjunction with the LoadConfig function in the parent package. +package configload diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go new file mode 100644 index 00000000000..57df04145a0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris dragonfly + +package configload + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go new file mode 100644 index 00000000000..4dc28eaa897 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package configload + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go new file mode 100644 index 00000000000..0d22e672642 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package configload + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go new file mode 100644 index 00000000000..302b6b5d7dc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go @@ -0,0 +1,150 @@ +package configload + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco" + "github.com/spf13/afero" +) + +// A Loader instance is the main entry-point for loading configurations via +// this package. +// +// It extends the general config-loading functionality in the parent package +// "configs" to support installation of modules from remote sources and +// loading full configurations using modules that were previously installed. +type Loader struct { + // parser is used to read configuration + parser *configs.Parser + + // modules is used to install and locate descendent modules that are + // referenced (directly or indirectly) from the root module. + modules moduleMgr +} + +// Config is used with NewLoader to specify configuration arguments for the +// loader. +type Config struct { + // ModulesDir is a path to a directory where descendent modules are + // (or should be) installed. (This is usually the + // .terraform/modules directory, in the common case where this package + // is being loaded from the main Terraform CLI package.) + ModulesDir string + + // Services is the service discovery client to use when locating remote + // module registry endpoints. If this is nil then registry sources are + // not supported, which should be true only in specialized circumstances + // such as in tests. + Services *disco.Disco +} + +// NewLoader creates and returns a loader that reads configuration from the +// real OS filesystem. +// +// The loader has some internal state about the modules that are currently +// installed, which is read from disk as part of this function. If that +// manifest cannot be read then an error will be returned. +func NewLoader(config *Config) (*Loader, error) { + fs := afero.NewOsFs() + parser := configs.NewParser(fs) + reg := registry.NewClient(config.Services, nil) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: true, + Dir: config.ModulesDir, + Services: config.Services, + Registry: reg, + }, + } + + err := ret.modules.readModuleManifestSnapshot() + if err != nil { + return nil, fmt.Errorf("failed to read module manifest: %s", err) + } + + return ret, nil +} + +// ModulesDir returns the path to the directory where the loader will look for +// the local cache of remote module packages. +func (l *Loader) ModulesDir() string { + return l.modules.Dir +} + +// RefreshModules updates the in-memory cache of the module manifest from the +// module manifest file on disk. This is not necessary in normal use because +// module installation and configuration loading are separate steps, but it +// can be useful in tests where module installation is done as a part of +// configuration loading by a helper function. +// +// Call this function after any module installation where an existing loader +// is already alive and may be used again later. +// +// An error is returned if the manifest file cannot be read. +func (l *Loader) RefreshModules() error { + if l == nil { + // Nothing to do, then. + return nil + } + return l.modules.readModuleManifestSnapshot() +} + +// Parser returns the underlying parser for this loader. +// +// This is useful for loading other sorts of files than the module directories +// that a loader deals with, since then they will share the source code cache +// for this loader and can thus be shown as snippets in diagnostic messages. +func (l *Loader) Parser() *configs.Parser { + return l.parser +} + +// Sources returns the source code cache for the underlying parser of this +// loader. This is a shorthand for l.Parser().Sources(). +func (l *Loader) Sources() map[string][]byte { + return l.parser.Sources() +} + +// IsConfigDir returns true if and only if the given directory contains at +// least one Terraform configuration file. This is a wrapper around calling +// the same method name on the loader's parser. +func (l *Loader) IsConfigDir(path string) bool { + return l.parser.IsConfigDir(path) +} + +// ImportSources writes into the receiver's source code the given source +// code buffers. +// +// This is useful in the situation where an ancillary loader is created for +// some reason (e.g. loading config from a plan file) but the cached source +// code from that loader must be imported into the "main" loader in order +// to return source code snapshots in diagnostic messages. +// +// loader.ImportSources(otherLoader.Sources()) +func (l *Loader) ImportSources(sources map[string][]byte) { + p := l.Parser() + for name, src := range sources { + p.ForceFileSource(name, src) + } +} + +// ImportSourcesFromSnapshot writes into the receiver's source code the +// source files from the given snapshot. +// +// This is similar to ImportSources but knows how to unpack and flatten a +// snapshot data structure to get the corresponding flat source file map. +func (l *Loader) ImportSourcesFromSnapshot(snap *Snapshot) { + p := l.Parser() + for _, m := range snap.Modules { + baseDir := m.Dir + for fn, src := range m.Files { + fullPath := filepath.Join(baseDir, fn) + p.ForceFileSource(fullPath, src) + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go new file mode 100644 index 00000000000..64219bb7793 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go @@ -0,0 +1,105 @@ +package configload + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// LoadConfig reads the Terraform module in the given directory and uses it as the +// root module to build the static module tree that represents a configuration, +// assuming that all required descendent modules have already been installed. +// +// If error diagnostics are returned, the returned configuration may be either +// nil or incomplete. In the latter case, cautious static analysis is possible +// in spite of the errors. +// +// LoadConfig performs the basic syntax and uniqueness validations that are +// required to process the individual modules, and also detects +func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) { + rootMod, diags := l.parser.LoadConfigDir(rootDir) + if rootMod == nil { + return nil, diags + } + + cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) + diags = append(diags, cDiags...) + + return cfg, diags +} + +// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that +// are presumed to have already been installed. A different function +// (moduleWalkerInstall) is used for installation. +func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + // Since we're just loading here, we expect that all referenced modules + // will be already installed and described in our manifest. However, we + // do verify that the manifest and the configuration are in agreement + // so that we can prompt the user to run "terraform init" if not. + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.CallRange, + }, + } + } + + var diags hcl.Diagnostics + + // Check for inconsistencies between manifest and config + if req.SourceAddr != record.SourceAddr { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module source has changed", + Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.SourceAddrRange, + }) + } + if len(req.VersionConstraint.Required) > 0 && record.Version == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.SourceAddrRange, + }) + } + if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: fmt.Sprintf( + "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", + record.Version, + ), + Subject: &req.SourceAddrRange, + }) + } + + mod, mDiags := l.parser.LoadConfigDir(record.Dir) + diags = append(diags, mDiags...) + if mod == nil { + // nil specifically indicates that the directory does not exist or + // cannot be read, so in this case we'll discard any generic diagnostics + // returned from LoadConfigDir and produce our own context-sensitive + // error message. + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir), + Subject: &req.CallRange, + }, + } + } + + return mod, record.Version, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go new file mode 100644 index 00000000000..0eb2c376d47 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go @@ -0,0 +1,504 @@ +package configload + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "time" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" + "github.com/spf13/afero" +) + +// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously +// creates an in-memory snapshot of the configuration files used, which can +// be later used to create a loader that may read only from this snapshot. +func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) { + rootMod, diags := l.parser.LoadConfigDir(rootDir) + if rootMod == nil { + return nil, nil, diags + } + + snap := &Snapshot{ + Modules: map[string]*SnapshotModule{}, + } + walker := l.makeModuleWalkerSnapshot(snap) + cfg, cDiags := configs.BuildConfig(rootMod, walker) + diags = append(diags, cDiags...) + + addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil) + diags = append(diags, addDiags...) + + return cfg, snap, diags +} + +// NewLoaderFromSnapshot creates a Loader that reads files only from the +// given snapshot. +// +// A snapshot-based loader cannot install modules, so calling InstallModules +// on the return value will cause a panic. +// +// A snapshot-based loader also has access only to configuration files. Its +// underlying parser does not have access to other files in the native +// filesystem, such as values files. For those, either use a normal loader +// (created by NewLoader) or use the configs.Parser API directly. +func NewLoaderFromSnapshot(snap *Snapshot) *Loader { + fs := snapshotFS{snap} + parser := configs.NewParser(fs) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: false, + manifest: snap.moduleManifest(), + }, + } + + return ret +} + +// Snapshot is an in-memory representation of the source files from a +// configuration, which can be used as an alternative configurations source +// for a loader with NewLoaderFromSnapshot. +// +// The primary purpose of a Snapshot is to build the configuration portion +// of a plan file (see ../../plans/planfile) so that it can later be reloaded +// and used to recover the exact configuration that the plan was built from. +type Snapshot struct { + // Modules is a map from opaque module keys (suitable for use as directory + // names on all supported operating systems) to the snapshot information + // about each module. + Modules map[string]*SnapshotModule +} + +// NewEmptySnapshot constructs and returns a snapshot containing only an empty +// root module. This is not useful for anything except placeholders in tests. +func NewEmptySnapshot() *Snapshot { + return &Snapshot{ + Modules: map[string]*SnapshotModule{ + "": &SnapshotModule{ + Files: map[string][]byte{}, + }, + }, + } +} + +// SnapshotModule represents a single module within a Snapshot. +type SnapshotModule struct { + // Dir is the path, relative to the root directory given when the + // snapshot was created, where the module appears in the snapshot's + // virtual filesystem. + Dir string + + // Files is a map from each configuration file filename for the + // module to a raw byte representation of the source file contents. + Files map[string][]byte + + // SourceAddr is the source address given for this module in configuration. + SourceAddr string `json:"Source"` + + // Version is the version of the module that is installed, or nil if + // the module is installed from a source that does not support versions. + Version *version.Version `json:"-"` +} + +// moduleManifest constructs a module manifest based on the contents of +// the receiving snapshot. +func (s *Snapshot) moduleManifest() modsdir.Manifest { + ret := make(modsdir.Manifest) + + for k, modSnap := range s.Modules { + ret[k] = modsdir.Record{ + Key: k, + Dir: modSnap.Dir, + SourceAddr: modSnap.SourceAddr, + Version: modSnap.Version, + } + } + + return ret +} + +// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit +// the same lookup behaviors as l.moduleWalkerLoad but will additionally write +// source files from the referenced modules into the given snapshot. +func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker { + return configs.ModuleWalkerFunc( + func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + mod, v, diags := l.moduleWalkerLoad(req) + if diags.HasErrors() { + return mod, v, diags + } + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + // Should never happen, since otherwise moduleWalkerLoader would've + // returned an error and we would've returned already. + panic(fmt.Sprintf("module %s is not present in manifest", key)) + } + + addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version) + diags = append(diags, addDiags...) + + return mod, v, diags + }, + ) +} + +func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics { + var diags hcl.Diagnostics + + primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir) + if moreDiags.HasErrors() { + // Any diagnostics we get here should be already present + // in diags, so it's weird if we get here but we'll allow it + // and return a general error message in that case. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read directory for module", + Detail: fmt.Sprintf("The source directory %s could not be read", dir), + }) + return diags + } + + snapMod := &SnapshotModule{ + Dir: dir, + Files: map[string][]byte{}, + SourceAddr: sourceAddr, + Version: v, + } + + files := make([]string, 0, len(primaryFiles)+len(overrideFiles)) + files = append(files, primaryFiles...) + files = append(files, overrideFiles...) + sources := l.Sources() // should be populated with all the files we need by now + for _, filePath := range files { + filename := filepath.Base(filePath) + src, exists := sources[filePath] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing source file for snapshot", + Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath), + }) + continue + } + snapMod.Files[filepath.Clean(filename)] = src + } + + snap.Modules[key] = snapMod + + return diags +} + +// snapshotFS is an implementation of afero.Fs that reads from a snapshot. +// +// This is not intended as a general-purpose filesystem implementation. Instead, +// it just supports the minimal functionality required to support the +// configuration loader and parser as an implementation detail of creating +// a loader from a snapshot. +type snapshotFS struct { + snap *Snapshot +} + +var _ afero.Fs = snapshotFS{} + +func (fs snapshotFS) Create(name string) (afero.File, error) { + return nil, fmt.Errorf("cannot create file inside configuration snapshot") +} + +func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directory inside configuration snapshot") +} + +func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directories inside configuration snapshot") +} + +func (fs snapshotFS) Open(name string) (afero.File, error) { + + // Our "filesystem" is sparsely populated only with the directories + // mentioned by modules in our snapshot, so the high-level process + // for opening a file is: + // - Find the module snapshot corresponding to the containing directory + // - Find the file within that snapshot + // - Wrap the resulting byte slice in a snapshotFile to return + // + // The other possibility handled here is if the given name is for the + // module directory itself, in which case we'll return a snapshotDir + // instead. + // + // This function doesn't try to be incredibly robust in supporting + // different permutations of paths, etc because in practice we only + // need to support the path forms that our own loader and parser will + // generate. + + dir := filepath.Dir(name) + fn := filepath.Base(name) + directDir := filepath.Clean(name) + + // First we'll check to see if this is an exact path for a module directory. + // We need to do this first (rather than as part of the next loop below) + // because a module in a child directory of another module can otherwise + // appear to be a file in that parent directory. + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == directDir { + // We've matched the module directory itself + filenames := make([]string, 0, len(candidate.Files)) + for n := range candidate.Files { + filenames = append(filenames, n) + } + sort.Strings(filenames) + return snapshotDir{ + filenames: filenames, + }, nil + } + } + + // If we get here then the given path isn't a module directory exactly, so + // we'll treat it as a file path and try to find a module directory it + // could be located in. + var modSnap *SnapshotModule + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == dir { + modSnap = candidate + break + } + } + if modSnap == nil { + return nil, os.ErrNotExist + } + + src, exists := modSnap.Files[fn] + if !exists { + return nil, os.ErrNotExist + } + + return &snapshotFile{ + src: src, + }, nil +} + +func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { + return fs.Open(name) +} + +func (fs snapshotFS) Remove(name string) error { + return fmt.Errorf("cannot remove file inside configuration snapshot") +} + +func (fs snapshotFS) RemoveAll(path string) error { + return fmt.Errorf("cannot remove files inside configuration snapshot") +} + +func (fs snapshotFS) Rename(old, new string) error { + return fmt.Errorf("cannot rename file inside configuration snapshot") +} + +func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + _, isDir := f.(snapshotDir) + return snapshotFileInfo{ + name: filepath.Base(name), + isDir: isDir, + }, nil +} + +func (fs snapshotFS) Name() string { + return "ConfigSnapshotFS" +} + +func (fs snapshotFS) Chmod(name string, mode os.FileMode) error { + return fmt.Errorf("cannot set file mode inside configuration snapshot") +} + +func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error { + return fmt.Errorf("cannot set file times inside configuration snapshot") +} + +type snapshotFile struct { + snapshotFileStub + src []byte + at int64 +} + +var _ afero.File = (*snapshotFile)(nil) + +func (f *snapshotFile) Read(p []byte) (n int, err error) { + if len(p) > 0 && f.at == int64(len(f.src)) { + return 0, io.EOF + } + if f.at > int64(len(f.src)) { + return 0, io.ErrUnexpectedEOF + } + if int64(len(f.src))-f.at >= int64(len(p)) { + n = len(p) + } else { + n = int(int64(len(f.src)) - f.at) + } + copy(p, f.src[f.at:f.at+int64(n)]) + f.at += int64(n) + return +} + +func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) { + f.at = off + return f.Read(p) +} + +func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + f.at = offset + case 1: + f.at += offset + case 2: + f.at = int64(len(f.src)) + offset + } + return f.at, nil +} + +type snapshotDir struct { + snapshotFileStub + filenames []string + at int +} + +var _ afero.File = snapshotDir{} + +func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { + names, err := f.Readdirnames(count) + if err != nil { + return nil, err + } + ret := make([]os.FileInfo, len(names)) + for i, name := range names { + ret[i] = snapshotFileInfo{ + name: name, + isDir: false, + } + } + return ret, nil +} + +func (f snapshotDir) Readdirnames(count int) ([]string, error) { + var outLen int + names := f.filenames[f.at:] + if count > 0 { + if len(names) < count { + outLen = len(names) + } else { + outLen = count + } + if len(names) == 0 { + return nil, io.EOF + } + } else { + outLen = len(names) + } + f.at += outLen + + return names[:outLen], nil +} + +// snapshotFileInfo is a minimal implementation of os.FileInfo to support our +// virtual filesystem from snapshots. +type snapshotFileInfo struct { + name string + isDir bool +} + +var _ os.FileInfo = snapshotFileInfo{} + +func (fi snapshotFileInfo) Name() string { + return fi.name +} + +func (fi snapshotFileInfo) Size() int64 { + // In practice, our parser and loader never call Size + return -1 +} + +func (fi snapshotFileInfo) Mode() os.FileMode { + return os.ModePerm +} + +func (fi snapshotFileInfo) ModTime() time.Time { + return time.Now() +} + +func (fi snapshotFileInfo) IsDir() bool { + return fi.isDir +} + +func (fi snapshotFileInfo) Sys() interface{} { + return nil +} + +type snapshotFileStub struct{} + +func (f snapshotFileStub) Close() error { + return nil +} + +func (f snapshotFileStub) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("cannot seek") +} + +func (f snapshotFileStub) Write(p []byte) (n int, err error) { + return f.WriteAt(p, 0) +} + +func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) WriteString(s string) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) Name() string { + // in practice, the loader and parser never use this + return "" +} + +func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Readdirnames(count int) ([]string, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Stat() (os.FileInfo, error) { + return nil, fmt.Errorf("cannot stat") +} + +func (f snapshotFileStub) Sync() error { + return nil +} + +func (f snapshotFileStub) Truncate(size int64) error { + return fmt.Errorf("cannot write to file in snapshot") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go new file mode 100644 index 00000000000..17032026623 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go @@ -0,0 +1,62 @@ +package configload + +import ( + "os" + "path/filepath" + + "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco" + "github.com/spf13/afero" +) + +type moduleMgr struct { + FS afero.Afero + + // CanInstall is true for a module manager that can support installation. + // + // This must be set only if FS is an afero.OsFs, because the installer + // (which uses go-getter) is not aware of the virtual filesystem + // abstraction and will always write into the "real" filesystem. + CanInstall bool + + // Dir is the path where descendent modules are (or will be) installed. + Dir string + + // Services is a service discovery client that will be used to find + // remote module registry endpoints. This object may be pre-loaded with + // cached discovery information. + Services *disco.Disco + + // Registry is a client for the module registry protocol, which is used + // when a module is requested from a registry source. + Registry *registry.Client + + // manifest tracks the currently-installed modules for this manager. + // + // The loader may read this. Only the installer may write to it, and + // after a set of updates are completed the installer must call + // writeModuleManifestSnapshot to persist a snapshot of the manifest + // to disk for use on subsequent runs. + manifest modsdir.Manifest +} + +func (m *moduleMgr) manifestSnapshotPath() string { + return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename) +} + +// readModuleManifestSnapshot loads a manifest snapshot from the filesystem. +func (m *moduleMgr) readModuleManifestSnapshot() error { + r, err := m.FS.Open(m.manifestSnapshotPath()) + if err != nil { + if os.IsNotExist(err) { + // We'll treat a missing file as an empty manifest + m.manifest = make(modsdir.Manifest) + return nil + } + return err + } + + m.manifest, err = modsdir.ReadManifestSnapshot(r) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go new file mode 100644 index 00000000000..86ca9d10b77 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go @@ -0,0 +1,43 @@ +package configload + +import ( + "io/ioutil" + "os" + "testing" +) + +// NewLoaderForTests is a variant of NewLoader that is intended to be more +// convenient for unit tests. +// +// The loader's modules directory is a separate temporary directory created +// for each call. Along with the created loader, this function returns a +// cleanup function that should be called before the test completes in order +// to remove that temporary directory. +// +// In the case of any errors, t.Fatal (or similar) will be called to halt +// execution of the test, so the calling test does not need to handle errors +// itself. +func NewLoaderForTests(t *testing.T) (*Loader, func()) { + t.Helper() + + modulesDir, err := ioutil.TempDir("", "tf-configs") + if err != nil { + t.Fatalf("failed to create temporary modules dir: %s", err) + return nil, func() {} + } + + cleanup := func() { + os.RemoveAll(modulesDir) + } + + loader, err := NewLoader(&Config{ + ModulesDir: modulesDir, + }) + if err != nil { + cleanup() + t.Fatalf("failed to create config loader: %s", err) + return nil, func() {} + } + + return loader, cleanup +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go new file mode 100644 index 00000000000..41a533745c3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go @@ -0,0 +1,250 @@ +package configschema + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// CoerceValue attempts to force the given value to conform to the type +// implied by the receiever. +// +// This is useful in situations where a configuration must be derived from +// an already-decoded value. It is always better to decode directly from +// configuration where possible since then source location information is +// still available to produce diagnostics, but in special situations this +// function allows a compatible result to be obtained even if the +// configuration objects are not available. +// +// If the given value cannot be converted to conform to the receiving schema +// then an error is returned describing one of possibly many problems. This +// error may be a cty.PathError indicating a position within the nested +// data structure where the problem applies. +func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { + var path cty.Path + return b.coerceValue(in, path) +} + +func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + switch { + case in.IsNull(): + return cty.NullVal(b.ImpliedType()), nil + case !in.IsKnown(): + return cty.UnknownVal(b.ImpliedType()), nil + } + + ty := in.Type() + if !ty.IsObjectType() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") + } + + for name := range ty.AttributeTypes() { + if _, defined := b.Attributes[name]; defined { + continue + } + if _, defined := b.BlockTypes[name]; defined { + continue + } + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) + } + + attrs := make(map[string]cty.Value) + + for name, attrS := range b.Attributes { + var val cty.Value + switch { + case ty.HasAttribute(name): + val = in.GetAttr(name) + case attrS.Computed || attrS.Optional: + val = cty.NullVal(attrS.Type) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) + } + + val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + + attrs[name] = val + } + for typeName, blockS := range b.BlockTypes { + switch blockS.Nesting { + + case NestingSingle, NestingGroup: + switch { + case ty.HasAttribute(typeName): + var err error + val := in.GetAttr(typeName) + attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + default: + attrs[typeName] = blockS.EmptyValue() + } + + case NestingList: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.ListVal(elems) + default: + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + } + + case NestingSet: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.SetVal(elems) + default: + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + } + + case NestingMap: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + l := coll.LengthInt() + if l == 0 { + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + continue + } + elems := make(map[string]cty.Value) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + key, val := it.Element() + if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems[key.AsString()] = val + } + } + + // If the attribute values here contain any DynamicPseudoTypes, + // the concrete type must be an object. + useObject := false + switch { + case coll.Type().IsObjectType(): + useObject = true + default: + // It's possible that we were given a map, and need to coerce it to an object + ety := coll.Type().ElementType() + for _, v := range elems { + if !v.Type().Equals(ety) { + useObject = true + break + } + } + } + + if useObject { + attrs[typeName] = cty.ObjectVal(elems) + } else { + attrs[typeName] = cty.MapVal(elems) + } + default: + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + } + + default: + // should never happen because above is exhaustive + panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) + } + } + + return cty.ObjectVal(attrs), nil +} + +func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + val, err := convert.Convert(in, a.Type) + if err != nil { + return cty.UnknownVal(a.Type), path.NewError(err) + } + return val, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go new file mode 100644 index 00000000000..c4bc3be03bc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go @@ -0,0 +1,123 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" +) + +var mapLabelNames = []string{"key"} + +// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body +// using the facilities in the hcldec package. +// +// The returned specification is guaranteed to return a value of the same type +// returned by method ImpliedType, but it may contain null values if any of the +// block attributes are defined as optional and/or computed respectively. +func (b *Block) DecoderSpec() hcldec.Spec { + ret := hcldec.ObjectSpec{} + if b == nil { + return ret + } + + for name, attrS := range b.Attributes { + ret[name] = attrS.decoderSpec(name) + } + + for name, blockS := range b.BlockTypes { + if _, exists := ret[name]; exists { + // This indicates an invalid schema, since it's not valid to + // define both an attribute and a block type of the same name. + // However, we don't raise this here since it's checked by + // InternalValidate. + continue + } + + childSpec := blockS.Block.DecoderSpec() + + // We can only validate 0 or 1 for MinItems, because a dynamic block + // may satisfy any number of min items while only having a single + // block in the config. We cannot validate MaxItems because a + // configuration may have any number of dynamic blocks + minItems := 0 + if blockS.MinItems > 1 { + minItems = 1 + } + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + ret[name] = &hcldec.BlockSpec{ + TypeName: name, + Nested: childSpec, + Required: blockS.MinItems == 1, + } + if blockS.Nesting == NestingGroup { + ret[name] = &hcldec.DefaultSpec{ + Primary: ret[name], + Default: &hcldec.LiteralSpec{ + Value: blockS.EmptyValue(), + }, + } + } + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.ImpliedType().HasDynamicTypes() { + ret[name] = &hcldec.BlockTupleSpec{ + TypeName: name, + Nested: childSpec, + MinItems: minItems, + } + } else { + ret[name] = &hcldec.BlockListSpec{ + TypeName: name, + Nested: childSpec, + MinItems: minItems, + } + } + case NestingSet: + // We forbid dynamically-typed attributes inside NestingSet in + // InternalValidate, so we don't do anything special to handle + // that here. (There is no set analog to tuple and object types, + // because cty's set implementation depends on knowing the static + // type in order to properly compute its internal hashes.) + ret[name] = &hcldec.BlockSetSpec{ + TypeName: name, + Nested: childSpec, + MinItems: minItems, + } + case NestingMap: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.ImpliedType().HasDynamicTypes() { + ret[name] = &hcldec.BlockObjectSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } else { + ret[name] = &hcldec.BlockMapSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. + continue + } + } + + return ret +} + +func (a *Attribute) decoderSpec(name string) hcldec.Spec { + return &hcldec.AttrSpec{ + Name: name, + Type: a.Type, + Required: a.Required, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go new file mode 100644 index 00000000000..caf8d730c1e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go @@ -0,0 +1,14 @@ +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into Terraform core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go new file mode 100644 index 00000000000..005da56bf5e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go @@ -0,0 +1,59 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// EmptyValue returns the "empty value" for the recieving block, which for +// a block type is a non-null object where all of the attribute values are +// the empty values of the block's attributes and nested block types. +// +// In other words, it returns the value that would be returned if an empty +// block were decoded against the recieving schema, assuming that no required +// attribute or block constraints were honored. +func (b *Block) EmptyValue() cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range b.Attributes { + vals[name] = attrS.EmptyValue() + } + for name, blockS := range b.BlockTypes { + vals[name] = blockS.EmptyValue() + } + return cty.ObjectVal(vals) +} + +// EmptyValue returns the "empty value" for the receiving attribute, which is +// the value that would be returned if there were no definition of the attribute +// at all, ignoring any required constraint. +func (a *Attribute) EmptyValue() cty.Value { + return cty.NullVal(a.Type) +} + +// EmptyValue returns the "empty value" for when there are zero nested blocks +// present of the receiving type. +func (b *NestedBlock) EmptyValue() cty.Value { + switch b.Nesting { + case NestingSingle: + return cty.NullVal(b.Block.ImpliedType()) + case NestingGroup: + return b.Block.EmptyValue() + case NestingList: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyTupleVal + } else { + return cty.ListValEmpty(ty) + } + case NestingMap: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyObjectVal + } else { + return cty.MapValEmpty(ty) + } + case NestingSet: + return cty.SetValEmpty(b.Block.ImpliedType()) + default: + // Should never get here because the above is intended to be exhaustive, + // but we'll be robust and return a result nonetheless. + return cty.NullVal(cty.DynamicPseudoType) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go new file mode 100644 index 00000000000..c0ee8419d35 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go @@ -0,0 +1,42 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Block objects should be +// tested using the InternalValidate method to detect any inconsistencies +// that would cause this method to fall back on defaults and assumptions. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + return hcldec.ImpliedType(b.DecoderSpec()) +} + +// ContainsSensitive returns true if any of the attributes of the receiving +// block or any of its descendent blocks are marked as sensitive. +// +// Blocks themselves cannot be sensitive as a whole -- sensitivity is a +// per-attribute idea -- but sometimes we want to include a whole object +// decoded from a block in some UI output, and that is safe to do only if +// none of the contained attributes are sensitive. +func (b *Block) ContainsSensitive() bool { + for _, attrS := range b.Attributes { + if attrS.Sensitive { + return true + } + } + for _, blockS := range b.BlockTypes { + if blockS.ContainsSensitive() { + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go new file mode 100644 index 00000000000..ebf1abbab16 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go @@ -0,0 +1,105 @@ +package configschema + +import ( + "fmt" + "regexp" + + "github.com/zclconf/go-cty/cty" + + multierror "github.com/hashicorp/go-multierror" +) + +var validName = regexp.MustCompile(`^[a-z0-9_]+$`) + +// InternalValidate returns an error if the receiving block and its child +// schema definitions have any consistencies with the documented rules for +// valid schema. +// +// This is intended to be used within unit tests to detect when a given +// schema is invalid. +func (b *Block) InternalValidate() error { + if b == nil { + return fmt.Errorf("top-level block schema is nil") + } + return b.internalValidate("", nil) + +} + +func (b *Block) internalValidate(prefix string, err error) error { + for name, attrS := range b.Attributes { + if attrS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + continue + } + if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { + err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) + } + if attrS.Optional && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) + } + if attrS.Computed && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) + } + if attrS.Type == cty.NilType { + err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) + } + } + + for name, blockS := range b.BlockTypes { + if blockS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) + continue + } + + if _, isAttr := b.Attributes[name]; isAttr { + err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) + } else if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + + if blockS.MinItems < 0 || blockS.MaxItems < 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) + } + + switch blockS.Nesting { + case NestingSingle: + switch { + case blockS.MinItems != blockS.MaxItems: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) + case blockS.MinItems < 0 || blockS.MinItems > 1: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) + } + case NestingGroup: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) + } + case NestingList, NestingSet: + if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) + } + if blockS.Nesting == NestingSet { + ety := blockS.Block.ImpliedType() + if ety.HasDynamicTypes() { + // This is not permitted because the HCL (cty) set implementation + // needs to know the exact type of set elements in order to + // properly hash them, and so can't support mixed types. + err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) + } + } + case NestingMap: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) + } + default: + err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) + } + + subPrefix := prefix + name + "." + err = blockS.Block.internalValidate(subPrefix, err) + } + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go new file mode 100644 index 00000000000..febe743e11a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[nestingModeInvalid-0] + _ = x[NestingSingle-1] + _ = x[NestingGroup-2] + _ = x[NestingList-3] + _ = x[NestingSet-4] + _ = x[NestingMap-5] +} + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go new file mode 100644 index 00000000000..0be3b8fa357 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go @@ -0,0 +1,38 @@ +package configschema + +// NoneRequired returns a deep copy of the receiver with any required +// attributes translated to optional. +func (b *Block) NoneRequired() *Block { + ret := &Block{} + + if b.Attributes != nil { + ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) + } + for name, attrS := range b.Attributes { + ret.Attributes[name] = attrS.forceOptional() + } + + if b.BlockTypes != nil { + ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) + } + for name, blockS := range b.BlockTypes { + ret.BlockTypes[name] = blockS.noneRequired() + } + + return ret +} + +func (b *NestedBlock) noneRequired() *NestedBlock { + ret := *b + ret.Block = *(ret.Block.NoneRequired()) + ret.MinItems = 0 + ret.MaxItems = 0 + return &ret +} + +func (a *Attribute) forceOptional() *Attribute { + ret := *a + ret.Optional = true + ret.Required = false + return &ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go new file mode 100644 index 00000000000..f4702d369ed --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go @@ -0,0 +1,130 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Description is an English-language description of the purpose and + // usage of the attribute. A description should be concise and use only + // one or two sentences, leaving full definition to longer-form + // documentation defined elsewhere. + Description string + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingGroup is similar to NestingSingle in that it calls for only a + // single instance of a given block type with no labels, but it additonally + // guarantees that its result will never be null, even if the block is + // absent, and instead the nested attributes and blocks will be treated + // as absent in that case. (Any required attributes or blocks within the + // nested block are not enforced unless the block is explicitly present + // in the configuration, so they are all effectively optional when the + // block is not present.) + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using NestingGroup instead of + // NestingSingle in that case, generated plans will show the block as + // present even when not present in configuration, thus allowing any + // default values within to be displayed to the user. + NestingGroup + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go new file mode 100644 index 00000000000..2873830c235 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go @@ -0,0 +1,173 @@ +package configschema + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// StaticValidateTraversal checks whether the given traversal (which must be +// relative) refers to a construct in the receiving schema, returning error +// diagnostics if any problems are found. +// +// This method is "optimistic" in that it will not return errors for possible +// problems that cannot be detected statically. It is possible that an +// traversal which passed static validation will still fail when evaluated. +func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { + if !traversal.IsRelative() { + panic("StaticValidateTraversal on absolute traversal") + } + if len(traversal) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + next := traversal[0] + after := traversal[1:] + + var name string + switch step := next.(type) { + case hcl.TraverseAttr: + name = step.Name + case hcl.TraverseIndex: + // No other traversal step types are allowed directly at a block. + // If it looks like the user was trying to use index syntax to + // access an attribute then we'll produce a specialized message. + key := step.Key + if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { + maybeName := key.AsString() + if hclsyntax.ValidIdentifier(maybeName) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), + Subject: &step.SrcRange, + }) + return diags + } + } + // If it looks like some other kind of index then we'll use a generic error. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: &step.SrcRange, + }) + return diags + default: + // No other traversal types should appear in a normal valid traversal, + // but we'll handle this with a generic error anyway to be robust. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: next.SourceRange().Ptr(), + }) + return diags + } + + if attrS, exists := b.Attributes[name]; exists { + // For attribute validation we will just apply the rest of the + // traversal to an unknown value of the attribute type and pass + // through HCL's own errors, since we don't want to replicate all of + // HCL's type checking rules here. + val := cty.UnknownVal(attrS.Type) + _, hclDiags := after.TraverseRel(val) + diags = diags.Append(hclDiags) + return diags + } + + if blockS, exists := b.BlockTypes[name]; exists { + moreDiags := blockS.staticValidateTraversal(name, after) + diags = diags.Append(moreDiags) + return diags + } + + // If we get here then the name isn't valid at all. We'll collect up + // all of the names that _are_ valid to use as suggestions. + var suggestions []string + for name := range b.Attributes { + suggestions = append(suggestions, name) + } + for name := range b.BlockTypes { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unsupported attribute`, + Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), + Subject: next.SourceRange().Ptr(), + }) + + return diags +} + +func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { + if b.Nesting == NestingSingle || b.Nesting == NestingGroup { + // Single blocks are easy: just pass right through. + return b.Block.StaticValidateTraversal(traversal) + } + + if len(traversal) == 0 { + // It's always valid to access a nested block's attribute directly. + return nil + } + + var diags tfdiags.Diagnostics + next := traversal[0] + after := traversal[1:] + + switch b.Nesting { + + case NestingSet: + // Can't traverse into a set at all, since it does not have any keys + // to index with. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Cannot index a set value`, + Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), + Subject: next.SourceRange().Ptr(), + }) + return diags + + case NestingList: + if _, ok := next.(hcl.TraverseIndex); ok { + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), + Subject: next.SourceRange().Ptr(), + }) + } + return diags + + case NestingMap: + // Both attribute and index steps are valid for maps, so we'll just + // pass through here and let normal evaluation catch an + // incorrectly-typed index key later, if present. + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + return diags + + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. (Note that we handled NestingSingle separately + // back at the start of this function.) + return nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go new file mode 100644 index 00000000000..b1984768fe3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go @@ -0,0 +1,23 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { + var ret []hcl.Traversal + exprs, diags := hcl.ExprList(attr.Expr) + + for _, expr := range exprs { + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + ret = append(ret, traversal) + } + } + + return ret, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go new file mode 100644 index 00000000000..f01eb79f400 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go @@ -0,0 +1,19 @@ +// Package configs contains types that represent Terraform configurations and +// the different elements thereof. +// +// The functionality in this package can be used for some static analyses of +// Terraform configurations, but this package generally exposes representations +// of the configuration source code rather than the result of evaluating these +// objects. The sibling package "lang" deals with evaluation of structures +// and expressions in the configuration. +// +// Due to its close relationship with HCL, this package makes frequent use +// of types from the HCL API, including raw HCL diagnostic messages. Such +// diagnostics can be converted into Terraform-flavored diagnostics, if needed, +// using functions in the sibling package tfdiags. +// +// The Parser type is the main entry-point into this package. The LoadConfigDir +// method can be used to load a single module directory, and then a full +// configuration (including any descendent modules) can be produced using +// the top-level BuildConfig method. +package configs diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go new file mode 100644 index 00000000000..bb4228d98c4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go @@ -0,0 +1,424 @@ +package hcl2shim + +import ( + "fmt" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty/convert" + + "github.com/zclconf/go-cty/cty" +) + +// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a map compatible with what would be +// produced by the "flatmap" package. +// +// The type of the given value informs the structure of the resulting map. +// The value must be of an object type or this function will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given value must not have any maps of complex types or the result +// is undefined. +func FlatmapValueFromHCL2(v cty.Value) map[string]string { + if v.IsNull() { + return nil + } + + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type())) + } + + m := make(map[string]string) + flatmapValueFromHCL2Map(m, "", v) + return m +} + +func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) { + ty := val.Type() + switch { + case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType: + flatmapValueFromHCL2Primitive(m, key, val) + case ty.IsObjectType() || ty.IsMapType(): + flatmapValueFromHCL2Map(m, key+".", val) + case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): + flatmapValueFromHCL2Seq(m, key+".", val) + default: + panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName())) + } +} + +func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) { + if !val.IsKnown() { + m[key] = UnknownVariableValue + return + } + if val.IsNull() { + // Omit entirely + return + } + + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + // Should not be possible, since all primitive types can convert to string. + panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err)) + } + m[key] = val.AsString() +} + +func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + switch { + case val.Type().IsObjectType(): + // Whole objects can't be unknown in flatmap, so instead we'll + // just write all of the attribute values out as unknown. + for name, aty := range val.Type().AttributeTypes() { + flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty)) + } + default: + m[prefix+"%"] = UnknownVariableValue + } + return + } + + len := 0 + for it := val.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + flatmapValueFromHCL2Value(m, prefix+name, av) + len++ + } + if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed + m[prefix+"%"] = strconv.Itoa(len) + } +} + +func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + m[prefix+"#"] = UnknownVariableValue + return + } + + // For sets this won't actually generate exactly what helper/schema would've + // generated, because we don't have access to the set key function it + // would've used. However, in practice it doesn't actually matter what the + // keys are as long as they are unique, so we'll just generate sequential + // indexes for them as if it were a list. + // + // An important implication of this, however, is that the set ordering will + // not be consistent across mutations and so different keys may be assigned + // to the same value when round-tripping. Since this shim is intended to + // be short-lived and not used for round-tripping, we accept this. + i := 0 + for it := val.ElementIterator(); it.Next(); { + _, av := it.Element() + key := prefix + strconv.Itoa(i) + flatmapValueFromHCL2Value(m, key, av) + i++ + } + m[prefix+"#"] = strconv.Itoa(i) +} + +// HCL2ValueFromFlatmap converts a map compatible with what would be produced +// by the "flatmap" package to a HCL2 (really, the cty dynamic types library +// that HCL2 uses) object type. +// +// The intended result type must be provided in order to guide how the +// map contents are decoded. This must be an object type or this function +// will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given type must not have any maps of complex types or the result +// is undefined. +// +// The result may contain null values if the given map does not contain keys +// for all of the different key paths implied by the given type. +func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) { + if m == nil { + return cty.NullVal(ty), nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty)) + } + + return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes()) +} + +func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + var val cty.Value + var err error + switch { + case ty.IsPrimitiveType(): + val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty) + case ty.IsObjectType(): + val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes()) + case ty.IsTupleType(): + val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes()) + case ty.IsMapType(): + val, err = hcl2ValueFromFlatmapMap(m, key+".", ty) + case ty.IsListType(): + val, err = hcl2ValueFromFlatmapList(m, key+".", ty) + case ty.IsSetType(): + val, err = hcl2ValueFromFlatmapSet(m, key+".", ty) + default: + err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName()) + } + + if err != nil { + return cty.DynamicVal, err + } + return val, nil +} + +func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + rawVal, exists := m[key] + if !exists { + return cty.NullVal(ty), nil + } + if rawVal == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + var err error + val := cty.StringVal(rawVal) + val, err = convert.Convert(val, ty) + if err != nil { + // This should never happen for _valid_ input, but flatmap data might + // be tampered with by the user and become invalid. + return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %s", key, err) + } + + return val, nil +} + +func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + for name, aty := range atys { + val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty) + if err != nil { + return cty.DynamicVal, err + } + vals[name] = val + } + return cty.ObjectVal(vals), nil +} + +func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(cty.Tuple(etys)), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + if count != len(etys) { + return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys)) + } + + vals = make([]cty.Value, len(etys)) + for i, ety := range etys { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + return cty.TupleVal(vals), nil +} + +func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // We actually don't really care about the "count" of a map for our + // purposes here, but we do need to check if it _exists_ in order to + // recognize the difference between null (not set at all) and empty. + if strCount, exists := m[prefix+"%"]; !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + key := fullKey[len(prefix):] + if key == "%" { + // Ignore the "count" key + continue + } + + val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[key] = val + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + return cty.MapVal(vals), nil +} + +func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + + ety := ty.ElementType() + if count == 0 { + return cty.ListValEmpty(ety), nil + } + + vals = make([]cty.Value, count) + for i := 0; i < count; i++ { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + + return cty.ListVal(vals), nil +} + +func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + strCount, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // Keep track of keys we've seen, se we don't add the same set value + // multiple times. The cty.Set will normally de-duplicate values, but we may + // have unknown values that would not show as equivalent. + seen := map[string]bool{} + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + subKey := fullKey[len(prefix):] + if subKey == "#" { + // Ignore the "count" key + continue + } + key := fullKey + if dot := strings.IndexByte(subKey, '.'); dot != -1 { + key = fullKey[:dot+len(prefix)] + } + + if seen[key] { + continue + } + + seen[key] = true + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals = append(vals, val) + } + + if len(vals) == 0 && strCount == "1" { + // An empty set wouldn't be represented in the flatmap, so this must be + // a single empty object since the count is actually 1. + // Add an appropriately typed null value to the set. + var val cty.Value + switch { + case ety.IsMapType(): + val = cty.MapValEmpty(ety) + case ety.IsListType(): + val = cty.ListValEmpty(ety) + case ety.IsSetType(): + val = cty.SetValEmpty(ety) + case ety.IsObjectType(): + // TODO: cty.ObjectValEmpty + objectMap := map[string]cty.Value{} + for attr, ty := range ety.AttributeTypes() { + objectMap[attr] = cty.NullVal(ty) + } + val = cty.ObjectVal(objectMap) + default: + val = cty.NullVal(ety) + } + vals = append(vals, val) + + } else if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go new file mode 100644 index 00000000000..3403c026bf8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go @@ -0,0 +1,276 @@ +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" +) + +// RequiresReplace takes a list of flatmapped paths from a +// InstanceDiff.Attributes along with the corresponding cty.Type, and returns +// the list of the cty.Paths that are flagged as causing the resource +// replacement (RequiresNew). +// This will filter out redundant paths, paths that refer to flatmapped indexes +// (e.g. "#", "%"), and will return any changes within a set as the path to the +// set itself. +func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { + var paths []cty.Path + + for _, attr := range attrs { + p, err := requiresReplacePath(attr, ty) + if err != nil { + return nil, err + } + + paths = append(paths, p) + } + + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + + // There may be redundant paths due to set elements or index attributes + // Do some ugly n^2 filtering, but these are always fairly small sets. + for i := 0; i < len(paths)-1; i++ { + for j := i + 1; j < len(paths); j++ { + if reflect.DeepEqual(paths[i], paths[j]) { + // swap the tail and slice it off + paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] + paths = paths[:len(paths)-1] + j-- + } + } + } + + return paths, nil +} + +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + +// requiresReplacePath takes a key from a flatmap along with the cty.Type +// describing the structure, and returns the cty.Path that would be used to +// reference the nested value in the data structure. +// This is used specifically to record the RequiresReplace attributes from a +// ResourceInstanceDiff. +func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { + if k == "" { + return nil, nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) + } + + path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) + if err != nil { + return path, fmt.Errorf("[%s] %s", k, err) + } + return path, nil +} + +func pathSplit(p string) (string, string) { + parts := strings.SplitN(p, ".", 2) + head := parts[0] + rest := "" + if len(parts) > 1 { + rest = parts[1] + } + return head, rest +} + +func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { + k, rest := pathSplit(key) + + path := cty.Path{cty.GetAttrStep{Name: k}} + + ty, ok := atys[k] + if !ok { + return path, fmt.Errorf("attribute %q not found", k) + } + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + switch { + case ty.IsPrimitiveType(): + err = fmt.Errorf("invalid step %q with type %#v", key, ty) + case ty.IsObjectType(): + path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) + case ty.IsTupleType(): + path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) + case ty.IsMapType(): + path, err = pathFromFlatmapKeyMap(key, ty) + case ty.IsListType(): + path, err = pathFromFlatmapKeyList(key, ty) + case ty.IsSetType(): + path, err = pathFromFlatmapKeySet(key, ty) + default: + err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) + } + + if err != nil { + return path, err + } + + return path, nil +} + +func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if k == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if idx >= len(etys) { + return path, fmt.Errorf("index %s out of range in %#v", key, etys) + } + + if rest == "" { + return path, nil + } + + ty := etys[idx] + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := key, "" + if !ty.ElementType().IsPrimitiveType() { + k, rest = pathSplit(key) + } + + // we don't need to convert the index keys to paths + if k == "%" { + return path, nil + } + + path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if key == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { + // once we hit a set, we can't return consistent paths, so just mark the + // set as a whole changed. + return nil, nil +} + +// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for +// use in generating legacy style diffs. +func FlatmapKeyFromPath(path cty.Path) string { + var parts []string + + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + parts = append(parts, step.Name) + case cty.IndexStep: + switch ty := step.Key.Type(); { + case ty == cty.String: + parts = append(parts, step.Key.AsString()) + case ty == cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + parts = append(parts, strconv.Itoa(int(i))) + } + } + } + + return strings.Join(parts, ".") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go new file mode 100644 index 00000000000..19651c81cf2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go @@ -0,0 +1,85 @@ +package hcl2shim + +import ( + "fmt" + + hcl2 "github.com/hashicorp/hcl2/hcl" +) + +// SingleAttrBody is a weird implementation of hcl2.Body that acts as if +// it has a single attribute whose value is the given expression. +// +// This is used to shim Resource.RawCount and Output.RawConfig to behave +// more like they do in the old HCL loader. +type SingleAttrBody struct { + Name string + Expr hcl2.Expression +} + +var _ hcl2.Body = SingleAttrBody{} + +func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + if !all { + // This should never happen because this body implementation should only + // be used by code that is aware that it's using a single-attr body. + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid attribute", + Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + return content, diags +} + +func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + var remain hcl2.Body + if all { + // If the request matched the one attribute we represent, then the + // remaining body is empty. + remain = hcl2.EmptyBody() + } else { + remain = b + } + return content, remain, diags +} + +func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { + ret := &hcl2.BodyContent{} + all := false + var diags hcl2.Diagnostics + + for _, attrS := range schema.Attributes { + if attrS.Name == b.Name { + attrs, _ := b.JustAttributes() + ret.Attributes = attrs + all = true + } else if attrS.Required { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Missing attribute", + Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + } + + return ret, all, diags +} + +func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { + return hcl2.Attributes{ + b.Name: { + Expr: b.Expr, + Name: b.Name, + NameRange: b.Expr.Range(), + Range: b.Expr.Range(), + }, + }, nil +} + +func (b SingleAttrBody) MissingItemRange() hcl2.Range { + return b.Expr.Range() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go new file mode 100644 index 00000000000..c0c816418a2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go @@ -0,0 +1,353 @@ +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/hashicorp/hil/ast" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for +// known object values and uses the provided block schema to perform some +// additional normalization to better mimic the shape of value that the old +// HCL1/HIL-based codepaths would've produced. +// +// In particular, it discards the collections that we use to represent nested +// blocks (other than NestingSingle) if they are empty, which better mimics +// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't +// know that an unspecified block _could_ exist. +// +// The given object value must conform to the schema's implied type or this +// function will panic or produce incorrect results. +// +// This is primarily useful for the final transition from new-style values to +// terraform.ResourceConfig before calling to a legacy provider, since +// helper/schema (the old provider SDK) is particularly sensitive to these +// subtle differences within its validation code. +func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { + if v.IsNull() { + return nil + } + if !v.IsKnown() { + panic("ConfigValueFromHCL2Block used with unknown value") + } + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) + } + + atys := v.Type().AttributeTypes() + ret := make(map[string]interface{}) + + for name := range schema.Attributes { + if _, exists := atys[name]; !exists { + continue + } + + av := v.GetAttr(name) + if av.IsNull() { + // Skip nulls altogether, to better mimic how HCL1 would behave + continue + } + ret[name] = ConfigValueFromHCL2(av) + } + + for name, blockS := range schema.BlockTypes { + if _, exists := atys[name]; !exists { + continue + } + bv := v.GetAttr(name) + if !bv.IsKnown() { + ret[name] = UnknownVariableValue + continue + } + if bv.IsNull() { + continue + } + + switch blockS.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) + + case configschema.NestingList, configschema.NestingSet: + l := bv.LengthInt() + if l == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make([]interface{}, 0, l) + for it := bv.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsKnown() { + elems = append(elems, UnknownVariableValue) + continue + } + elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) + } + ret[name] = elems + + case configschema.NestingMap: + if bv.LengthInt() == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make(map[string]interface{}) + for it := bv.ElementIterator(); it.Next(); { + ek, ev := it.Element() + if !ev.IsKnown() { + elems[ek.AsString()] = UnknownVariableValue + continue + } + elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) + } + ret[name] = elems + } + } + + return ret +} + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + cv := ConfigValueFromHCL2(ev) + if cv != nil { + l[ek.AsString()] = cv + } + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} + +func HILVariableFromHCL2Value(v cty.Value) ast.Variable { + if v.IsNull() { + // Caller should guarantee/check this before calling + panic("Null values cannot be represented in HIL") + } + if !v.IsKnown() { + return ast.Variable{ + Type: ast.TypeUnknown, + Value: UnknownVariableValue, + } + } + + switch v.Type() { + case cty.Bool: + return ast.Variable{ + Type: ast.TypeBool, + Value: v.True(), + } + case cty.Number: + v := ConfigValueFromHCL2(v) + switch tv := v.(type) { + case int: + return ast.Variable{ + Type: ast.TypeInt, + Value: tv, + } + case float64: + return ast.Variable{ + Type: ast.TypeFloat, + Value: tv, + } + default: + // should never happen + panic("invalid return value for configValueFromHCL2") + } + case cty.String: + return ast.Variable{ + Type: ast.TypeString, + Value: v.AsString(), + } + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]ast.Variable, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, HILVariableFromHCL2Value(ev)) + } + // If we were given a tuple then this could actually produce an invalid + // list with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous list would be. + return ast.Variable{ + Type: ast.TypeList, + Value: l, + } + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]ast.Variable) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + l[ek.AsString()] = HILVariableFromHCL2Value(ev) + } + // If we were given an object then this could actually produce an invalid + // map with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous map would be. + return ast.Variable{ + Type: ast.TypeMap, + Value: l, + } + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to HIL variable", v)) +} + +func HCL2ValueFromHILVariable(v ast.Variable) cty.Value { + switch v.Type { + case ast.TypeList: + vals := make([]cty.Value, len(v.Value.([]ast.Variable))) + for i, ev := range v.Value.([]ast.Variable) { + vals[i] = HCL2ValueFromHILVariable(ev) + } + return cty.TupleVal(vals) + case ast.TypeMap: + vals := make(map[string]cty.Value, len(v.Value.(map[string]ast.Variable))) + for k, ev := range v.Value.(map[string]ast.Variable) { + vals[k] = HCL2ValueFromHILVariable(ev) + } + return cty.ObjectVal(vals) + default: + return HCL2ValueFromConfigValue(v.Value) + } +} + +func HCL2TypeForHILType(hilType ast.Type) cty.Type { + switch hilType { + case ast.TypeAny: + return cty.DynamicPseudoType + case ast.TypeUnknown: + return cty.DynamicPseudoType + case ast.TypeBool: + return cty.Bool + case ast.TypeInt: + return cty.Number + case ast.TypeFloat: + return cty.Number + case ast.TypeString: + return cty.String + case ast.TypeList: + return cty.List(cty.DynamicPseudoType) + case ast.TypeMap: + return cty.Map(cty.DynamicPseudoType) + default: + return cty.NilType // equilvalent to ast.TypeInvalid + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go new file mode 100644 index 00000000000..92f0213d724 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go @@ -0,0 +1,214 @@ +package hcl2shim + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ValuesSDKEquivalent returns true if both of the given values seem equivalent +// as far as the legacy SDK diffing code would be concerned. +// +// Since SDK diffing is a fuzzy, inexact operation, this function is also +// fuzzy and inexact. It will err on the side of returning false if it +// encounters an ambiguous situation. Ambiguity is most common in the presence +// of sets because in practice it is impossible to exactly correlate +// nonequal-but-equivalent set elements because they have no identity separate +// from their value. +// +// This must be used _only_ for comparing values for equivalence within the +// SDK planning code. It is only meaningful to compare the "prior state" +// provided by Terraform Core with the "planned new state" produced by the +// legacy SDK code via shims. In particular it is not valid to use this +// function with their the config value or the "proposed new state" value +// because they contain only the subset of data that Terraform Core itself is +// able to determine. +func ValuesSDKEquivalent(a, b cty.Value) bool { + if a == cty.NilVal || b == cty.NilVal { + // We don't generally expect nils to appear, but we'll allow them + // for robustness since the data structures produced by legacy SDK code + // can sometimes be non-ideal. + return a == b // equivalent if they are _both_ nil + } + if a.RawEquals(b) { + // Easy case. We use RawEquals because we want two unknowns to be + // considered equal here, whereas "Equals" would return unknown. + return true + } + if !a.IsKnown() || !b.IsKnown() { + // Two unknown values are equivalent regardless of type. A known is + // never equivalent to an unknown. + return a.IsKnown() == b.IsKnown() + } + if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero { + // Two null/zero values are equivalent regardless of type. A non-zero is + // never equivalent to a zero. + return aZero == bZero + } + + // If we get down here then we are guaranteed that both a and b are known, + // non-null values. + + aTy := a.Type() + bTy := b.Type() + switch { + case aTy.IsSetType() && bTy.IsSetType(): + return valuesSDKEquivalentSets(a, b) + case aTy.IsListType() && bTy.IsListType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsTupleType() && bTy.IsTupleType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsMapType() && bTy.IsMapType(): + return valuesSDKEquivalentMappings(a, b) + case aTy.IsObjectType() && bTy.IsObjectType(): + return valuesSDKEquivalentMappings(a, b) + case aTy == cty.Number && bTy == cty.Number: + return valuesSDKEquivalentNumbers(a, b) + default: + // We've now covered all the interesting cases, so anything that falls + // down here cannot be equivalent. + return false + } +} + +// valuesSDKEquivalentIsNullOrZero returns true if the given value is either +// null or is the "zero value" (in the SDK/Go sense) for its type. +func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool { + if v == cty.NilVal { + return true + } + + ty := v.Type() + switch { + case !v.IsKnown(): + return false + case v.IsNull(): + return true + + // After this point, v is always known and non-null + case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType(): + return v.LengthInt() == 0 + case ty == cty.String: + return v.RawEquals(cty.StringVal("")) + case ty == cty.Number: + return v.RawEquals(cty.Zero) + case ty == cty.Bool: + return v.RawEquals(cty.False) + default: + // The above is exhaustive, but for robustness we'll consider anything + // else to _not_ be zero unless it is null. + return false + } +} + +// valuesSDKEquivalentSets returns true only if each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa. +// This is a fuzzy operation that prefers to signal non-equivalence if it cannot +// be certain that all elements are accounted for. +func valuesSDKEquivalentSets(a, b cty.Value) bool { + if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen { + return false + } + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if ValuesSDKEquivalent(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + for _, eq := range aeqs { + if !eq { + return false + } + } + for _, eq := range beqs { + if !eq { + return false + } + } + return true +} + +// valuesSDKEquivalentSequences decides equivalence for two sequence values +// (lists or tuples). +func valuesSDKEquivalentSequences(a, b cty.Value) bool { + as := a.AsValueSlice() + bs := b.AsValueSlice() + if len(as) != len(bs) { + return false + } + + for i := range as { + if !ValuesSDKEquivalent(as[i], bs[i]) { + return false + } + } + return true +} + +// valuesSDKEquivalentMappings decides equivalence for two mapping values +// (maps or objects). +func valuesSDKEquivalentMappings(a, b cty.Value) bool { + as := a.AsValueMap() + bs := b.AsValueMap() + if len(as) != len(bs) { + return false + } + + for k, av := range as { + bv, ok := bs[k] + if !ok { + return false + } + if !ValuesSDKEquivalent(av, bv) { + return false + } + } + return true +} + +// valuesSDKEquivalentNumbers decides equivalence for two number values based +// on the fact that the SDK uses int and float64 representations while +// cty (and thus Terraform Core) uses big.Float, and so we expect to lose +// precision in the round-trip. +// +// This does _not_ attempt to allow for an epsilon difference that may be +// caused by accumulated innacuracy in a float calculation, under the +// expectation that providers generally do not actually do compuations on +// floats and instead just pass string representations of them on verbatim +// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's +// a problem for the provider itself to deal with, based on its knowledge of +// the remote system, e.g. using DiffSuppressFunc. +func valuesSDKEquivalentNumbers(a, b cty.Value) bool { + if a.RawEquals(b) { + return true // easy + } + + af := a.AsBigFloat() + bf := b.AsBigFloat() + + if af.IsInt() != bf.IsInt() { + return false + } + if af.IsInt() && bf.IsInt() { + return false // a.RawEquals(b) test above is good enough for integers + } + + // The SDK supports only int and float64, so if it's not an integer + // we know that only a float64-level of precision can possibly be + // significant. + af64, _ := af.Float64() + bf64, _ := bf.Float64() + return af64 == bf64 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go new file mode 100644 index 00000000000..d45ed8a9c46 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go @@ -0,0 +1,404 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Module is a container for a set of configuration constructs that are +// evaluated within a common namespace. +type Module struct { + // SourceDir is the filesystem directory that the module was loaded from. + // + // This is populated automatically only for configurations loaded with + // LoadConfigDir. If the parser is using a virtual filesystem then the + // path here will be in terms of that virtual filesystem. + + // Any other caller that constructs a module directly with NewModule may + // assign a suitable value to this attribute before using it for other + // purposes. It should be treated as immutable by all consumers of Module + // values. + SourceDir string + + CoreVersionConstraints []VersionConstraint + + Backend *Backend + ProviderConfigs map[string]*Provider + ProviderRequirements map[string][]VersionConstraint + + Variables map[string]*Variable + Locals map[string]*Local + Outputs map[string]*Output + + ModuleCalls map[string]*ModuleCall + + ManagedResources map[string]*Resource + DataResources map[string]*Resource +} + +// File describes the contents of a single configuration file. +// +// Individual files are not usually used alone, but rather combined together +// with other files (conventionally, those in the same directory) to produce +// a *Module, using NewModule. +// +// At the level of an individual file we represent directly the structural +// elements present in the file, without any attempt to detect conflicting +// declarations. A File object can therefore be used for some basic static +// analysis of individual elements, but must be built into a Module to detect +// duplicate declarations. +type File struct { + CoreVersionConstraints []VersionConstraint + + Backends []*Backend + ProviderConfigs []*Provider + ProviderRequirements []*ProviderRequirement + + Variables []*Variable + Locals []*Local + Outputs []*Output + + ModuleCalls []*ModuleCall + + ManagedResources []*Resource + DataResources []*Resource +} + +// NewModule takes a list of primary files and a list of override files and +// produces a *Module by combining the files together. +// +// If there are any conflicting declarations in the given files -- for example, +// if the same variable name is defined twice -- then the resulting module +// will be incomplete and error diagnostics will be returned. Careful static +// analysis of the returned Module is still possible in this case, but the +// module will probably not be semantically valid. +func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { + var diags hcl.Diagnostics + mod := &Module{ + ProviderConfigs: map[string]*Provider{}, + ProviderRequirements: map[string][]VersionConstraint{}, + Variables: map[string]*Variable{}, + Locals: map[string]*Local{}, + Outputs: map[string]*Output{}, + ModuleCalls: map[string]*ModuleCall{}, + ManagedResources: map[string]*Resource{}, + DataResources: map[string]*Resource{}, + } + + for _, file := range primaryFiles { + fileDiags := mod.appendFile(file) + diags = append(diags, fileDiags...) + } + + for _, file := range overrideFiles { + fileDiags := mod.mergeFile(file) + diags = append(diags, fileDiags...) + } + + return mod, diags +} + +// ResourceByAddr returns the configuration for the resource with the given +// address, or nil if there is no such resource. +func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { + key := addr.String() + switch addr.Mode { + case addrs.ManagedResourceMode: + return m.ManagedResources[key] + case addrs.DataResourceMode: + return m.DataResources[key] + default: + return nil + } +} + +func (m *Module) appendFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + for _, constraint := range file.CoreVersionConstraints { + // If there are any conflicting requirements then we'll catch them + // when we actually check these constraints. + m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) + } + + for _, b := range file.Backends { + if m.Backend != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), + Subject: &b.DeclRange, + }) + continue + } + m.Backend = b + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + if existing, exists := m.ProviderConfigs[key]; exists { + if existing.Alias == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } + continue + } + m.ProviderConfigs[key] = pc + } + + for _, reqd := range file.ProviderRequirements { + m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement) + } + + for _, v := range file.Variables { + if existing, exists := m.Variables[v.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate variable declaration", + Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &v.DeclRange, + }) + } + m.Variables[v.Name] = v + } + + for _, l := range file.Locals { + if existing, exists := m.Locals[l.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate local value definition", + Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &l.DeclRange, + }) + } + m.Locals[l.Name] = l + } + + for _, o := range file.Outputs { + if existing, exists := m.Outputs[o.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate output definition", + Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &o.DeclRange, + }) + } + m.Outputs[o.Name] = o + } + + for _, mc := range file.ModuleCalls { + if existing, exists := m.ModuleCalls[mc.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate module call", + Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), + Subject: &mc.DeclRange, + }) + } + m.ModuleCalls[mc.Name] = mc + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + if existing, exists := m.ManagedResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.ManagedResources[key] = r + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + if existing, exists := m.DataResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.DataResources[key] = r + } + + return diags +} + +func (m *Module) mergeFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + if len(file.CoreVersionConstraints) != 0 { + // This is a bit of a strange case for overriding since we normally + // would union together across multiple files anyway, but we'll + // allow it and have each override file clobber any existing list. + m.CoreVersionConstraints = nil + for _, constraint := range file.CoreVersionConstraints { + m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) + } + } + + if len(file.Backends) != 0 { + switch len(file.Backends) { + case 1: + m.Backend = file.Backends[0] + default: + // An override file with multiple backends is still invalid, even + // though it can override backends from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), + Subject: &file.Backends[1].DeclRange, + }) + } + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + existing, exists := m.ProviderConfigs[key] + if pc.Alias == "" { + // We allow overriding a non-existing _default_ provider configuration + // because the user model is that an absent provider configuration + // implies an empty provider configuration, which is what the user + // is therefore overriding here. + if exists { + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } else { + m.ProviderConfigs[key] = pc + } + } else { + // For aliased providers, there must be a base configuration to + // override. This allows us to detect and report alias typos + // that might otherwise cause the override to not apply. + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base provider configuration for override", + Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), + Subject: &pc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } + } + + if len(file.ProviderRequirements) != 0 { + mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements) + } + + for _, v := range file.Variables { + existing, exists := m.Variables[v.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base variable declaration to override", + Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), + Subject: &v.DeclRange, + }) + continue + } + mergeDiags := existing.merge(v) + diags = append(diags, mergeDiags...) + } + + for _, l := range file.Locals { + existing, exists := m.Locals[l.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base local value definition to override", + Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), + Subject: &l.DeclRange, + }) + continue + } + mergeDiags := existing.merge(l) + diags = append(diags, mergeDiags...) + } + + for _, o := range file.Outputs { + existing, exists := m.Outputs[o.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base output definition to override", + Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), + Subject: &o.DeclRange, + }) + continue + } + mergeDiags := existing.merge(o) + diags = append(diags, mergeDiags...) + } + + for _, mc := range file.ModuleCalls { + existing, exists := m.ModuleCalls[mc.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing module call to override", + Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), + Subject: &mc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(mc) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + existing, exists := m.ManagedResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing resource to override", + Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + existing, exists := m.DataResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing data resource to override", + Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r) + diags = append(diags, mergeDiags...) + } + + return diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go new file mode 100644 index 00000000000..8c3ba67ce6e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go @@ -0,0 +1,188 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// ModuleCall represents a "module" block in a module or file. +type ModuleCall struct { + Name string + + SourceAddr string + SourceAddrRange hcl.Range + SourceSet bool + + Config hcl.Body + + Version VersionConstraint + + Count hcl.Expression + ForEach hcl.Expression + + Providers []PassedProviderConfig + + DependsOn []hcl.Traversal + + DeclRange hcl.Range +} + +func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { + mc := &ModuleCall{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + schema := moduleBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, remain, diags := block.Body.PartialContent(schema) + mc.Config = remain + + if !hclsyntax.ValidIdentifier(mc.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["source"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) + diags = append(diags, valDiags...) + mc.SourceAddrRange = attr.Expr.Range() + mc.SourceSet = true + } + + if attr, exists := content.Attributes["version"]; exists { + var versionDiags hcl.Diagnostics + mc.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + if attr, exists := content.Attributes["count"]; exists { + mc.Count = attr.Expr + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["for_each"]; exists { + mc.ForEach = attr.Expr + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + mc.DependsOn = append(mc.DependsOn, deps...) + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["providers"]; exists { + seen := make(map[string]hcl.Range) + pairs, pDiags := hcl.ExprMap(attr.Expr) + diags = append(diags, pDiags...) + for _, pair := range pairs { + key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") + diags = append(diags, keyDiags...) + value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") + diags = append(diags, valueDiags...) + if keyDiags.HasErrors() || valueDiags.HasErrors() { + continue + } + + matchKey := key.String() + if prev, exists := seen[matchKey]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider address", + Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), + Subject: pair.Value.Range().Ptr(), + }) + continue + } + + rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) + seen[matchKey] = rng + mc.Providers = append(mc.Providers, PassedProviderConfig{ + InChild: key, + InParent: value, + }) + } + } + + // Reserved block types (all of them) + for _, block := range content.Blocks { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in module block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + + return mc, diags +} + +// PassedProviderConfig represents a provider config explicitly passed down to +// a child module, possibly giving it a new local address in the process. +type PassedProviderConfig struct { + InChild *ProviderConfigRef + InParent *ProviderConfigRef +} + +var moduleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "source", + Required: true, + }, + { + Name: "version", + }, + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "depends_on", + }, + { + Name: "providers", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + // These are all reserved for future use. + {Type: "lifecycle"}, + {Type: "locals"}, + {Type: "provider", LabelNames: []string{"type"}}, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go new file mode 100644 index 00000000000..12916ef8c97 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go @@ -0,0 +1,247 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// The methods in this file are used by Module.mergeFile to apply overrides +// to our different configuration elements. These methods all follow the +// pattern of mutating the receiver to incorporate settings from the parameter, +// returning error diagnostics if any aspect of the parameter cannot be merged +// into the receiver for some reason. +// +// User expectation is that anything _explicitly_ set in the given object +// should take precedence over the corresponding settings in the receiver, +// but that anything omitted in the given object should be left unchanged. +// In some cases it may be reasonable to do a "deep merge" of certain nested +// features, if it is possible to unambiguously correlate the nested elements +// and their behaviors are orthogonal to each other. + +func (p *Provider) merge(op *Provider) hcl.Diagnostics { + var diags hcl.Diagnostics + + if op.Version.Required != nil { + p.Version = op.Version + } + + p.Config = MergeBodies(p.Config, op.Config) + + return diags +} + +func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) { + // Any provider name that's mentioned in the override gets nilled out in + // our map so that we'll rebuild it below. Any provider not mentioned is + // left unchanged. + for _, reqd := range ovrd { + delete(recv, reqd.Name) + } + for _, reqd := range ovrd { + recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement) + } +} + +func (v *Variable) merge(ov *Variable) hcl.Diagnostics { + var diags hcl.Diagnostics + + if ov.DescriptionSet { + v.Description = ov.Description + v.DescriptionSet = ov.DescriptionSet + } + if ov.Default != cty.NilVal { + v.Default = ov.Default + } + if ov.Type != cty.NilType { + v.Type = ov.Type + } + if ov.ParsingMode != 0 { + v.ParsingMode = ov.ParsingMode + } + + // If the override file overrode type without default or vice-versa then + // it may have created an invalid situation, which we'll catch now by + // attempting to re-convert the value. + // + // Note that here we may be re-converting an already-converted base value + // from the base config. This will be a no-op if the type was not changed, + // but in particular might be user-observable in the edge case where the + // literal value in config could've been converted to the overridden type + // constraint but the converted value cannot. In practice, this situation + // should be rare since most of our conversions are interchangable. + if v.Default != cty.NilVal { + val, err := convert.Convert(v.Default, v.Type) + if err != nil { + // What exactly we'll say in the error message here depends on whether + // it was Default or Type that was overridden here. + switch { + case ov.Type != cty.NilType && ov.Default == cty.NilVal: + // If only the type was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), + Subject: &ov.DeclRange, + }) + case ov.Type == cty.NilType && ov.Default != cty.NilVal: + // Only the default was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + } + } else { + v.Default = val + } + } + + return diags +} + +func (l *Local) merge(ol *Local) hcl.Diagnostics { + var diags hcl.Diagnostics + + // Since a local is just a single expression in configuration, the + // override definition entirely replaces the base definition, including + // the source range so that we'll send the user to the right place if + // there is an error. + l.Expr = ol.Expr + l.DeclRange = ol.DeclRange + + return diags +} + +func (o *Output) merge(oo *Output) hcl.Diagnostics { + var diags hcl.Diagnostics + + if oo.Description != "" { + o.Description = oo.Description + } + if oo.Expr != nil { + o.Expr = oo.Expr + } + if oo.SensitiveSet { + o.Sensitive = oo.Sensitive + o.SensitiveSet = oo.SensitiveSet + } + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(oo.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { + var diags hcl.Diagnostics + + if omc.SourceSet { + mc.SourceAddr = omc.SourceAddr + mc.SourceAddrRange = omc.SourceAddrRange + mc.SourceSet = omc.SourceSet + } + + if omc.Count != nil { + mc.Count = omc.Count + } + + if omc.ForEach != nil { + mc.ForEach = omc.ForEach + } + + if len(omc.Version.Required) != 0 { + mc.Version = omc.Version + } + + mc.Config = MergeBodies(mc.Config, omc.Config) + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(mc.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (r *Resource) merge(or *Resource) hcl.Diagnostics { + var diags hcl.Diagnostics + + if r.Mode != or.Mode { + // This is always a programming error, since managed and data resources + // are kept in separate maps in the configuration structures. + panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) + } + + if or.Count != nil { + r.Count = or.Count + } + if or.ForEach != nil { + r.ForEach = or.ForEach + } + if or.ProviderConfigRef != nil { + r.ProviderConfigRef = or.ProviderConfigRef + } + if r.Mode == addrs.ManagedResourceMode { + // or.Managed is always non-nil for managed resource mode + + if or.Managed.Connection != nil { + r.Managed.Connection = or.Managed.Connection + } + if or.Managed.CreateBeforeDestroySet { + r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy + r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet + } + if len(or.Managed.IgnoreChanges) != 0 { + r.Managed.IgnoreChanges = or.Managed.IgnoreChanges + } + if or.Managed.PreventDestroySet { + r.Managed.PreventDestroy = or.Managed.PreventDestroy + r.Managed.PreventDestroySet = or.Managed.PreventDestroySet + } + if len(or.Managed.Provisioners) != 0 { + r.Managed.Provisioners = or.Managed.Provisioners + } + } + + r.Config = MergeBodies(r.Config, or.Config) + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(or.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go new file mode 100644 index 00000000000..0ed561eeee4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go @@ -0,0 +1,143 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// MergeBodies creates a new HCL body that contains a combination of the +// given base and override bodies. Attributes and blocks defined in the +// override body take precedence over those of the same name defined in +// the base body. +// +// If any block of a particular type appears in "override" then it will +// replace _all_ of the blocks of the same type in "base" in the new +// body. +func MergeBodies(base, override hcl.Body) hcl.Body { + return mergeBody{ + Base: base, + Override: override, + } +} + +// mergeBody is a hcl.Body implementation that wraps a pair of other bodies +// and allows attributes and blocks within the override to take precedence +// over those defined in the base body. +// +// This is used to deal with dynamically-processed bodies in Module.mergeFile. +// It uses a shallow-only merging strategy where direct attributes defined +// in Override will override attributes of the same name in Base, while any +// blocks defined in Override will hide all blocks of the same type in Base. +// +// This cannot possibly "do the right thing" in all cases, because we don't +// have enough information about user intent. However, this behavior is intended +// to be reasonable for simple overriding use-cases. +type mergeBody struct { + Base hcl.Body + Override hcl.Body +} + +var _ hcl.Body = mergeBody{} + +func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) + + baseContent, _, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + return content, diags +} + +func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) + + baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + remain := MergeBodies(baseRemain, overrideRemain) + + return content, remain, diags +} + +func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + } + + // For attributes we just assign from each map in turn and let the override + // map clobber any matching entries from base. + for k, a := range base.Attributes { + content.Attributes[k] = a + } + for k, a := range override.Attributes { + content.Attributes[k] = a + } + + // Things are a little more interesting for blocks because they arrive + // as a flat list. Our merging semantics call for us to suppress blocks + // from base if at least one block of the same type appears in override. + // We explicitly do not try to correlate and deeply merge nested blocks, + // since we don't have enough context here to infer user intent. + + overriddenBlockTypes := make(map[string]bool) + for _, block := range override.Blocks { + if block.Type == "dynamic" { + overriddenBlockTypes[block.Labels[0]] = true + continue + } + overriddenBlockTypes[block.Type] = true + } + for _, block := range base.Blocks { + // We skip over dynamic blocks whose type label is an overridden type + // but note that below we do still leave them as dynamic blocks in + // the result because expanding the dynamic blocks that are left is + // done much later during the core graph walks, where we can safely + // evaluate the expressions. + if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { + continue + } + if overriddenBlockTypes[block.Type] { + continue + } + content.Blocks = append(content.Blocks, block) + } + for _, block := range override.Blocks { + content.Blocks = append(content.Blocks, block) + } + + return content +} + +func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := make(hcl.Attributes) + + baseAttrs, aDiags := b.Base.JustAttributes() + diags = append(diags, aDiags...) + overrideAttrs, aDiags := b.Override.JustAttributes() + diags = append(diags, aDiags...) + + for k, a := range baseAttrs { + ret[k] = a + } + for k, a := range overrideAttrs { + ret[k] = a + } + + return ret, diags +} + +func (b mergeBody) MissingItemRange() hcl.Range { + return b.Base.MissingItemRange() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go new file mode 100644 index 00000000000..66468c3bdac --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go @@ -0,0 +1,364 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/ext/typeexpr" + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// A consistent detail message for all "not a valid identifier" diagnostics. +const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes." + +// Variable represents a "variable" block in a module or file. +type Variable struct { + Name string + Description string + Default cty.Value + Type cty.Type + ParsingMode VariableParsingMode + + DescriptionSet bool + + DeclRange hcl.Range +} + +func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { + v := &Variable{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + // Unless we're building an override, we'll set some defaults + // which we might override with attributes below. We leave these + // as zero-value in the override case so we can recognize whether + // or not they are set when we merge. + if !override { + v.Type = cty.DynamicPseudoType + v.ParsingMode = VariableParseLiteral + } + + content, diags := block.Body.Content(variableBlockSchema) + + if !hclsyntax.ValidIdentifier(v.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + // Don't allow declaration of variables that would conflict with the + // reserved attribute and block type names in a "module" block, since + // these won't be usable for child modules. + for _, attr := range moduleBlockSchema.Attributes { + if attr.Name == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), + Subject: &block.LabelRanges[0], + }) + } + } + for _, blockS := range moduleBlockSchema.Blocks { + if blockS.Type == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), + Subject: &block.LabelRanges[0], + }) + } + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) + diags = append(diags, valDiags...) + v.DescriptionSet = true + } + + if attr, exists := content.Attributes["type"]; exists { + ty, parseMode, tyDiags := decodeVariableType(attr.Expr) + diags = append(diags, tyDiags...) + v.Type = ty + v.ParsingMode = parseMode + } + + if attr, exists := content.Attributes["default"]; exists { + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + + // Convert the default to the expected type so we can catch invalid + // defaults early and allow later code to assume validity. + // Note that this depends on us having already processed any "type" + // attribute above. + // However, we can't do this if we're in an override file where + // the type might not be set; we'll catch that during merge. + if v.Type != cty.NilType { + var err error + val, err = convert.Convert(val, v.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), + Subject: attr.Expr.Range().Ptr(), + }) + val = cty.DynamicVal + } + } + + v.Default = val + } + + return v, diags +} + +func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) { + if exprIsNativeQuotedString(expr) { + // Here we're accepting the pre-0.12 form of variable type argument where + // the string values "string", "list" and "map" are accepted has a hint + // about the type used primarily for deciding how to parse values + // given on the command line and in environment variables. + // Only the native syntax ends up in this codepath; we handle the + // JSON syntax (which is, of course, quoted even in the new format) + // in the normal codepath below. + val, diags := expr.Value(nil) + if diags.HasErrors() { + return cty.DynamicPseudoType, VariableParseHCL, diags + } + str := val.AsString() + switch str { + case "string": + return cty.String, VariableParseLiteral, diags + case "list": + return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags + case "map": + return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags + default: + return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: "Invalid legacy variable type hint", + Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, + Subject: expr.Range().Ptr(), + }} + } + } + + // First we'll deal with some shorthand forms that the HCL-level type + // expression parser doesn't include. These both emulate pre-0.12 behavior + // of allowing a list or map of any element type as long as all of the + // elements are consistent. This is the same as list(any) or map(any). + switch hcl.ExprAsKeyword(expr) { + case "list": + return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil + case "map": + return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil + } + + ty, diags := typeexpr.TypeConstraint(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, VariableParseHCL, diags + } + + switch { + case ty.IsPrimitiveType(): + // Primitive types use literal parsing. + return ty, VariableParseLiteral, diags + default: + // Everything else uses HCL parsing + return ty, VariableParseHCL, diags + } +} + +// VariableParsingMode defines how values of a particular variable given by +// text-only mechanisms (command line arguments and environment variables) +// should be parsed to produce the final value. +type VariableParsingMode rune + +// VariableParseLiteral is a variable parsing mode that just takes the given +// string directly as a cty.String value. +const VariableParseLiteral VariableParsingMode = 'L' + +// VariableParseHCL is a variable parsing mode that attempts to parse the given +// string as an HCL expression and returns the result. +const VariableParseHCL VariableParsingMode = 'H' + +// Parse uses the receiving parsing mode to process the given variable value +// string, returning the result along with any diagnostics. +// +// A VariableParsingMode does not know the expected type of the corresponding +// variable, so it's the caller's responsibility to attempt to convert the +// result to the appropriate type and return to the user any diagnostics that +// conversion may produce. +// +// The given name is used to create a synthetic filename in case any diagnostics +// must be generated about the given string value. This should be the name +// of the root module variable whose value will be populated from the given +// string. +// +// If the returned diagnostics has errors, the returned value may not be +// valid. +func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { + switch m { + case VariableParseLiteral: + return cty.StringVal(value), nil + case VariableParseHCL: + fakeFilename := fmt.Sprintf("", name) + expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, valDiags := expr.Value(nil) + diags = append(diags, valDiags...) + return val, diags + default: + // Should never happen + panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) + } +} + +// Output represents an "output" block in a module or file. +type Output struct { + Name string + Description string + Expr hcl.Expression + DependsOn []hcl.Traversal + Sensitive bool + + DescriptionSet bool + SensitiveSet bool + + DeclRange hcl.Range +} + +func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { + o := &Output{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + schema := outputBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, diags := block.Body.Content(schema) + + if !hclsyntax.ValidIdentifier(o.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid output name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) + diags = append(diags, valDiags...) + o.DescriptionSet = true + } + + if attr, exists := content.Attributes["value"]; exists { + o.Expr = attr.Expr + } + + if attr, exists := content.Attributes["sensitive"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) + diags = append(diags, valDiags...) + o.SensitiveSet = true + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + o.DependsOn = append(o.DependsOn, deps...) + } + + return o, diags +} + +// Local represents a single entry from a "locals" block in a module or file. +// The "locals" block itself is not represented, because it serves only to +// provide context for us to interpret its contents. +type Local struct { + Name string + Expr hcl.Expression + + DeclRange hcl.Range +} + +func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + if len(attrs) == 0 { + return nil, diags + } + + locals := make([]*Local, 0, len(attrs)) + for name, attr := range attrs { + if !hclsyntax.ValidIdentifier(name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid local value name", + Detail: badIdentifierDetail, + Subject: &attr.NameRange, + }) + } + + locals = append(locals, &Local{ + Name: name, + Expr: attr.Expr, + DeclRange: attr.Range, + }) + } + return locals, diags +} + +// Addr returns the address of the local value declared by the receiver, +// relative to its containing module. +func (l *Local) Addr() addrs.LocalValue { + return addrs.LocalValue{ + Name: l.Name, + } +} + +var variableBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "default", + }, + { + Name: "type", + }, + }, +} + +var outputBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "value", + Required: true, + }, + { + Name: "depends_on", + }, + { + Name: "sensitive", + }, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go new file mode 100644 index 00000000000..8176fa1b775 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go @@ -0,0 +1,100 @@ +package configs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hclparse" + "github.com/spf13/afero" +) + +// Parser is the main interface to read configuration files and other related +// files from disk. +// +// It retains a cache of all files that are loaded so that they can be used +// to create source code snippets in diagnostics, etc. +type Parser struct { + fs afero.Afero + p *hclparse.Parser +} + +// NewParser creates and returns a new Parser that reads files from the given +// filesystem. If a nil filesystem is passed then the system's "real" filesystem +// will be used, via afero.OsFs. +func NewParser(fs afero.Fs) *Parser { + if fs == nil { + fs = afero.OsFs{} + } + + return &Parser{ + fs: afero.Afero{Fs: fs}, + p: hclparse.NewParser(), + } +} + +// LoadHCLFile is a low-level method that reads the file at the given path, +// parses it, and returns the hcl.Body representing its root. In many cases +// it is better to use one of the other Load*File methods on this type, +// which additionally decode the root body in some way and return a higher-level +// construct. +// +// If the file cannot be read at all -- e.g. because it does not exist -- then +// this method will return a nil body and error diagnostics. In this case +// callers may wish to ignore the provided error diagnostics and produce +// a more context-sensitive error instead. +// +// The file will be parsed using the HCL native syntax unless the filename +// ends with ".json", in which case the HCL JSON syntax will be used. +func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { + src, err := p.fs.ReadFile(path) + + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q could not be read.", path), + }, + } + } + + var file *hcl.File + var diags hcl.Diagnostics + switch { + case strings.HasSuffix(path, ".json"): + file, diags = p.p.ParseJSON(src, path) + default: + file, diags = p.p.ParseHCL(src, path) + } + + // If the returned file or body is nil, then we'll return a non-nil empty + // body so we'll meet our contract that nil means an error reading the file. + if file == nil || file.Body == nil { + return hcl.EmptyBody(), diags + } + + return file.Body, diags +} + +// Sources returns a map of the cached source buffers for all files that +// have been loaded through this parser, with source filenames (as requested +// when each file was opened) as the keys. +func (p *Parser) Sources() map[string][]byte { + return p.p.Sources() +} + +// ForceFileSource artificially adds source code to the cache of file sources, +// as if it had been loaded from the given filename. +// +// This should be used only in special situations where configuration is loaded +// some other way. Most callers should load configuration via methods of +// Parser, which will update the sources cache automatically. +func (p *Parser) ForceFileSource(filename string, src []byte) { + // We'll make a synthetic hcl.File here just so we can reuse the + // existing cache. + p.p.AddFile(filename, &hcl.File{ + Body: hcl.EmptyBody(), + Bytes: src, + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go new file mode 100644 index 00000000000..7f2ff271420 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go @@ -0,0 +1,247 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// LoadConfigFile reads the file at the given path and parses it as a config +// file. +// +// If the file cannot be read -- for example, if it does not exist -- then +// a nil *File will be returned along with error diagnostics. Callers may wish +// to disregard the returned diagnostics in this case and instead generate +// their own error message(s) with additional context. +// +// If the returned diagnostics has errors when a non-nil map is returned +// then the map may be incomplete but should be valid enough for careful +// static analysis. +// +// This method wraps LoadHCLFile, and so it inherits the syntax selection +// behaviors documented for that method. +func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, false) +} + +// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes +// certain required attribute constraints in order to interpret the given +// file as an overrides file. +func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, true) +} + +func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { + + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + file := &File{} + + var reqDiags hcl.Diagnostics + file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) + diags = append(diags, reqDiags...) + + content, contentDiags := body.Content(configFileSchema) + diags = append(diags, contentDiags...) + + for _, block := range content.Blocks { + switch block.Type { + + case "terraform": + content, contentDiags := block.Body.Content(terraformBlockSchema) + diags = append(diags, contentDiags...) + + // We ignore the "terraform_version" attribute here because + // sniffCoreVersionRequirements already dealt with that above. + + for _, innerBlock := range content.Blocks { + switch innerBlock.Type { + + case "backend": + backendCfg, cfgDiags := decodeBackendBlock(innerBlock) + diags = append(diags, cfgDiags...) + if backendCfg != nil { + file.Backends = append(file.Backends, backendCfg) + } + + case "required_providers": + reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) + diags = append(diags, reqsDiags...) + file.ProviderRequirements = append(file.ProviderRequirements, reqs...) + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + case "provider": + cfg, cfgDiags := decodeProviderBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ProviderConfigs = append(file.ProviderConfigs, cfg) + } + + case "variable": + cfg, cfgDiags := decodeVariableBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Variables = append(file.Variables, cfg) + } + + case "locals": + defs, defsDiags := decodeLocalsBlock(block) + diags = append(diags, defsDiags...) + file.Locals = append(file.Locals, defs...) + + case "output": + cfg, cfgDiags := decodeOutputBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Outputs = append(file.Outputs, cfg) + } + + case "module": + cfg, cfgDiags := decodeModuleBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ModuleCalls = append(file.ModuleCalls, cfg) + } + + case "resource": + cfg, cfgDiags := decodeResourceBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ManagedResources = append(file.ManagedResources, cfg) + } + + case "data": + cfg, cfgDiags := decodeDataBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.DataResources = append(file.DataResources, cfg) + } + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + return file, diags +} + +// sniffCoreVersionRequirements does minimal parsing of the given body for +// "terraform" blocks with "required_version" attributes, returning the +// requirements found. +// +// This is intended to maximize the chance that we'll be able to read the +// requirements (syntax errors notwithstanding) even if the config file contains +// constructs that might've been added in future Terraform versions +// +// This is a "best effort" sort of method which will return constraints it is +// able to find, but may return no constraints at all if the given body is +// so invalid that it cannot be decoded at all. +func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { + rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema) + + var constraints []VersionConstraint + + for _, block := range rootContent.Blocks { + content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) + diags = append(diags, blockDiags...) + + attr, exists := content.Attributes["required_version"] + if !exists { + continue + } + + constraint, constraintDiags := decodeVersionConstraint(attr) + diags = append(diags, constraintDiags...) + if !constraintDiags.HasErrors() { + constraints = append(constraints, constraint) + } + } + + return constraints, diags +} + +// configFileSchema is the schema for the top-level of a config file. We use +// the low-level HCL API for this level so we can easily deal with each +// block type separately with its own decoding logic. +var configFileSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "variable", + LabelNames: []string{"name"}, + }, + { + Type: "locals", + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + }, +} + +// terraformBlockSchema is the schema for a top-level "terraform" block in +// a configuration file. +var terraformBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "backend", + LabelNames: []string{"type"}, + }, + { + Type: "required_providers", + }, + }, +} + +// configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements +var configFileVersionSniffRootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + }, +} + +// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements +var configFileVersionSniffBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go new file mode 100644 index 00000000000..752d6d9cabf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go @@ -0,0 +1,163 @@ +package configs + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// LoadConfigDir reads the .tf and .tf.json files in the given directory +// as config files (using LoadConfigFile) and then combines these files into +// a single Module. +// +// If this method returns nil, that indicates that the given directory does not +// exist at all or could not be opened for some reason. Callers may wish to +// detect this case and ignore the returned diagnostics so that they can +// produce a more context-aware error message in that case. +// +// If this method returns a non-nil module while error diagnostics are returned +// then the module may be incomplete but can be used carefully for static +// analysis. +// +// This file does not consider a directory with no files to be an error, and +// will simply return an empty module in that case. Callers should first call +// Parser.IsConfigDir if they wish to recognize that situation. +// +// .tf files are parsed using the HCL native syntax while .tf.json files are +// parsed using the HCL JSON syntax. +func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) { + primaryPaths, overridePaths, diags := p.dirFiles(path) + if diags.HasErrors() { + return nil, diags + } + + primary, fDiags := p.loadFiles(primaryPaths, false) + diags = append(diags, fDiags...) + override, fDiags := p.loadFiles(overridePaths, true) + diags = append(diags, fDiags...) + + mod, modDiags := NewModule(primary, override) + diags = append(diags, modDiags...) + + mod.SourceDir = path + + return mod, diags +} + +// ConfigDirFiles returns lists of the primary and override files configuration +// files in the given directory. +// +// If the given directory does not exist or cannot be read, error diagnostics +// are returned. If errors are returned, the resulting lists may be incomplete. +func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { + return p.dirFiles(dir) +} + +// IsConfigDir determines whether the given path refers to a directory that +// exists and contains at least one Terraform config file (with a .tf or +// .tf.json extension.) +func (p *Parser) IsConfigDir(path string) bool { + primaryPaths, overridePaths, _ := p.dirFiles(path) + return (len(primaryPaths) + len(overridePaths)) > 0 +} + +func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { + var files []*File + var diags hcl.Diagnostics + + for _, path := range paths { + var f *File + var fDiags hcl.Diagnostics + if override { + f, fDiags = p.LoadConfigFileOverride(path) + } else { + f, fDiags = p.LoadConfigFile(path) + } + diags = append(diags, fDiags...) + if f != nil { + files = append(files, f) + } + } + + return files, diags +} + +func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { + infos, err := p.fs.ReadDir(dir) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read module directory", + Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), + }) + return + } + + for _, info := range infos { + if info.IsDir() { + // We only care about files + continue + } + + name := info.Name() + ext := fileExt(name) + if ext == "" || IsIgnoredFile(name) { + continue + } + + baseName := name[:len(name)-len(ext)] // strip extension + isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") + + fullPath := filepath.Join(dir, name) + if isOverride { + override = append(override, fullPath) + } else { + primary = append(primary, fullPath) + } + } + + return +} + +// fileExt returns the Terraform configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +// IsIgnoredFile returns true if the given filename (which must not have a +// directory path ahead of it) should be ignored as e.g. an editor swap file. +func IsIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} + +// IsEmptyDir returns true if the given filesystem path contains no Terraform +// configuration files. +// +// Unlike the methods of the Parser type, this function always consults the +// real filesystem, and thus it isn't appropriate to use when working with +// configuration loaded from a plan file. +func IsEmptyDir(path string) (bool, error) { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + return true, nil + } + + p := NewParser(nil) + fs, os, err := p.dirFiles(path) + if err != nil { + return false, err + } + + return len(fs) == 0 && len(os) == 0, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go new file mode 100644 index 00000000000..b7f1c1c5de5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go @@ -0,0 +1,43 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// LoadValuesFile reads the file at the given path and parses it as a "values +// file", which is an HCL config file whose top-level attributes are treated +// as arbitrary key.value pairs. +// +// If the file cannot be read -- for example, if it does not exist -- then +// a nil map will be returned along with error diagnostics. Callers may wish +// to disregard the returned diagnostics in this case and instead generate +// their own error message(s) with additional context. +// +// If the returned diagnostics has errors when a non-nil map is returned +// then the map may be incomplete but should be valid enough for careful +// static analysis. +// +// This method wraps LoadHCLFile, and so it inherits the syntax selection +// behaviors documented for that method. +func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) { + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + vals := make(map[string]cty.Value) + attrs, attrDiags := body.JustAttributes() + diags = append(diags, attrDiags...) + if attrs == nil { + return vals, diags + } + + for name, attr := range attrs { + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + vals[name] = val + } + + return vals, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go new file mode 100644 index 00000000000..99f0f26de5f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go @@ -0,0 +1,144 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Provider represents a "provider" block in a module or file. A provider +// block is a provider configuration, and there can be zero or more +// configurations for each actual provider. +type Provider struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if no alias set + + Version VersionConstraint + + Config hcl.Body + + DeclRange hcl.Range +} + +func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { + content, config, diags := block.Body.PartialContent(providerBlockSchema) + + provider := &Provider{ + Name: block.Labels[0], + NameRange: block.LabelRanges[0], + Config: config, + DeclRange: block.DefRange, + } + + if attr, exists := content.Attributes["alias"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) + diags = append(diags, valDiags...) + provider.AliasRange = attr.Expr.Range().Ptr() + + if !hclsyntax.ValidIdentifier(provider.Alias) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration alias", + Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), + }) + } + } + + if attr, exists := content.Attributes["version"]; exists { + var versionDiags hcl.Diagnostics + provider.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + // Reserved attribute names + for _, name := range []string{"count", "depends_on", "for_each", "source"} { + if attr, exists := content.Attributes[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in provider block", + Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), + Subject: &attr.NameRange, + }) + } + } + + // Reserved block types (all of them) + for _, block := range content.Blocks { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provider block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + + return provider, diags +} + +// Addr returns the address of the receiving provider configuration, relative +// to its containing module. +func (p *Provider) Addr() addrs.ProviderConfig { + return addrs.ProviderConfig{ + Type: p.Name, + Alias: p.Alias, + } +} + +func (p *Provider) moduleUniqueKey() string { + if p.Alias != "" { + return fmt.Sprintf("%s.%s", p.Name, p.Alias) + } + return p.Name +} + +// ProviderRequirement represents a declaration of a dependency on a particular +// provider version without actually configuring that provider. This is used in +// child modules that expect a provider to be passed in from their parent. +type ProviderRequirement struct { + Name string + Requirement VersionConstraint +} + +func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + var reqs []*ProviderRequirement + for name, attr := range attrs { + req, reqDiags := decodeVersionConstraint(attr) + diags = append(diags, reqDiags...) + if !diags.HasErrors() { + reqs = append(reqs, &ProviderRequirement{ + Name: name, + Requirement: req, + }) + } + } + return reqs, diags +} + +var providerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "alias", + }, + { + Name: "version", + }, + + // Attribute names reserved for future expansion. + {Name: "count"}, + {Name: "depends_on"}, + {Name: "for_each"}, + {Name: "source"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + // _All_ of these are reserved for future expansion. + {Type: "lifecycle"}, + {Type: "locals"}, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go new file mode 100644 index 00000000000..311f107f86f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go @@ -0,0 +1,150 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" +) + +// Provisioner represents a "provisioner" block when used within a +// "resource" block in a module or file. +type Provisioner struct { + Type string + Config hcl.Body + Connection *Connection + When ProvisionerWhen + OnFailure ProvisionerOnFailure + + DeclRange hcl.Range + TypeRange hcl.Range +} + +func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { + pv := &Provisioner{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + DeclRange: block.DefRange, + When: ProvisionerWhenCreate, + OnFailure: ProvisionerOnFailureFail, + } + + content, config, diags := block.Body.PartialContent(provisionerBlockSchema) + pv.Config = config + + if attr, exists := content.Attributes["when"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "create": + pv.When = ProvisionerWhenCreate + case "destroy": + pv.When = ProvisionerWhenDestroy + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"when\" keyword", + Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", + Subject: expr.Range().Ptr(), + }) + } + } + + if attr, exists := content.Attributes["on_failure"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "continue": + pv.OnFailure = ProvisionerOnFailureContinue + case "fail": + pv.OnFailure = ProvisionerOnFailureFail + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"on_failure\" keyword", + Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", + Subject: attr.Expr.Range().Ptr(), + }) + } + } + + var seenConnection *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + //conn, connDiags := decodeConnectionBlock(block) + //diags = append(diags, connDiags...) + pv.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provisioner block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return pv, diags +} + +// Connection represents a "connection" block when used within either a +// "resource" or "provisioner" block in a module or file. +type Connection struct { + Config hcl.Body + + DeclRange hcl.Range +} + +// ProvisionerWhen is an enum for valid values for when to run provisioners. +type ProvisionerWhen int + +//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerWhen + +const ( + ProvisionerWhenInvalid ProvisionerWhen = iota + ProvisionerWhenCreate + ProvisionerWhenDestroy +) + +// ProvisionerOnFailure is an enum for valid values for on_failure options +// for provisioners. +type ProvisionerOnFailure int + +//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerOnFailure + +const ( + ProvisionerOnFailureInvalid ProvisionerOnFailure = iota + ProvisionerOnFailureContinue + ProvisionerOnFailureFail +) + +var provisionerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "when"}, + {Name: "on_failure"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "connection"}, + {Type: "lifecycle"}, // reserved for future use + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go new file mode 100644 index 00000000000..7ff5a6e00b5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ProvisionerOnFailureInvalid-0] + _ = x[ProvisionerOnFailureContinue-1] + _ = x[ProvisionerOnFailureFail-2] +} + +const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" + +var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} + +func (i ProvisionerOnFailure) String() string { + if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { + return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go new file mode 100644 index 00000000000..9f21b3ac636 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ProvisionerWhenInvalid-0] + _ = x[ProvisionerWhenCreate-1] + _ = x[ProvisionerWhenDestroy-2] +} + +const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" + +var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} + +func (i ProvisionerWhen) String() string { + if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { + return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go new file mode 100644 index 00000000000..22657fad016 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go @@ -0,0 +1,490 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Resource represents a "resource" or "data" block in a module or file. +type Resource struct { + Mode addrs.ResourceMode + Name string + Type string + Config hcl.Body + Count hcl.Expression + ForEach hcl.Expression + + ProviderConfigRef *ProviderConfigRef + + DependsOn []hcl.Traversal + + // Managed is populated only for Mode = addrs.ManagedResourceMode, + // containing the additional fields that apply to managed resources. + // For all other resource modes, this field is nil. + Managed *ManagedResource + + DeclRange hcl.Range + TypeRange hcl.Range +} + +// ManagedResource represents a "resource" block in a module or file. +type ManagedResource struct { + Connection *Connection + Provisioners []*Provisioner + + CreateBeforeDestroy bool + PreventDestroy bool + IgnoreChanges []hcl.Traversal + IgnoreAllChanges bool + + CreateBeforeDestroySet bool + PreventDestroySet bool +} + +func (r *Resource) moduleUniqueKey() string { + return r.Addr().String() +} + +// Addr returns a resource address for the receiver that is relative to the +// resource's containing module. +func (r *Resource) Addr() addrs.Resource { + return addrs.Resource{ + Mode: r.Mode, + Type: r.Type, + Name: r.Name, + } +} + +// ProviderConfigAddr returns the address for the provider configuration +// that should be used for this resource. This function implements the +// default behavior of extracting the type from the resource type name if +// an explicit "provider" argument was not provided. +func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig { + if r.ProviderConfigRef == nil { + return r.Addr().DefaultProviderConfig() + } + + return addrs.ProviderConfig{ + Type: r.ProviderConfigRef.Name, + Alias: r.ProviderConfigRef.Alias, + } +} + +func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { + r := &Resource{ + Mode: addrs.ManagedResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + Managed: &ManagedResource{}, + } + + content, remain, diags := block.Body.PartialContent(resourceBlockSchema) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // Cannot have count and for_each on the same resource block + if r.Count != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid combination of "count" and "for_each"`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Subject: &attr.NameRange, + }) + } + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + var seenLifecycle *hcl.Block + var seenConnection *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "lifecycle": + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) + diags = append(diags, valDiags...) + r.Managed.CreateBeforeDestroySet = true + } + + if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) + diags = append(diags, valDiags...) + r.Managed.PreventDestroySet = true + } + + if attr, exists := lcContent.Attributes["ignore_changes"]; exists { + + // ignore_changes can either be a list of relative traversals + // or it can be just the keyword "all" to ignore changes to this + // resource entirely. + // ignore_changes = [ami, instance_type] + // ignore_changes = all + // We also allow two legacy forms for compatibility with earlier + // versions: + // ignore_changes = ["ami", "instance_type"] + // ignore_changes = ["*"] + + kw := hcl.ExprAsKeyword(attr.Expr) + + switch { + case kw == "all": + r.Managed.IgnoreAllChanges = true + default: + exprs, listDiags := hcl.ExprList(attr.Expr) + diags = append(diags, listDiags...) + + var ignoreAllRange hcl.Range + + for _, expr := range exprs { + + // our expr might be the literal string "*", which + // we accept as a deprecated way of saying "all". + if shimIsIgnoreChangesStar(expr) { + r.Managed.IgnoreAllChanges = true + ignoreAllRange = expr.Range() + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Deprecated ignore_changes wildcard", + Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.", + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.RelTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) + } + } + + if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid ignore_changes ruleset", + Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", + Subject: &ignoreAllRange, + Context: attr.Expr.Range().Ptr(), + }) + } + + } + + } + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + r.Managed.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + case "provisioner": + pv, pvDiags := decodeProvisionerBlock(block) + diags = append(diags, pvDiags...) + if pv != nil { + r.Managed.Provisioners = append(r.Managed.Provisioners, pv) + } + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in resource block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return r, diags +} + +func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { + r := &Resource{ + Mode: addrs.DataResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + } + + content, remain, diags := block.Body.PartialContent(dataBlockSchema) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // Cannot have count and for_each on the same data block + if r.Count != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid combination of "count" and "for_each"`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Subject: &attr.NameRange, + }) + } + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + for _, block := range content.Blocks { + // All of the block types we accept are just reserved for future use, but some get a specialized error message. + switch block.Type { + case "lifecycle": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported lifecycle block", + Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", + Subject: &block.DefRange, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in data block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return r, diags +} + +type ProviderConfigRef struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if alias not set +} + +func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var shimDiags hcl.Diagnostics + expr, shimDiags = shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + + // AbsTraversalForExpr produces only generic errors, so we'll discard + // the errors given and produce our own with extra context. If we didn't + // get any errors then we might still have warnings, though. + if !travDiags.HasErrors() { + diags = append(diags, travDiags...) + } + + if len(traversal) < 1 || len(traversal) > 2 { + // A provider reference was given as a string literal in the legacy + // configuration language and there are lots of examples out there + // showing that usage, so we'll sniff for that situation here and + // produce a specialized error message for it to help users find + // the new correct form. + if exprIsNativeQuotedString(expr) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "A provider configuration reference must not be given in quotes.", + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + ret := &ProviderConfigRef{ + Name: traversal.RootName(), + NameRange: traversal[0].SourceRange(), + } + + if len(traversal) > 1 { + aliasStep, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", + Subject: traversal[1].SourceRange().Ptr(), + }) + return ret, diags + } + + ret.Alias = aliasStep.Name + ret.AliasRange = aliasStep.SourceRange().Ptr() + } + + return ret, diags +} + +// Addr returns the provider config address corresponding to the receiving +// config reference. +// +// This is a trivial conversion, essentially just discarding the source +// location information and keeping just the addressing information. +func (r *ProviderConfigRef) Addr() addrs.ProviderConfig { + return addrs.ProviderConfig{ + Type: r.Name, + Alias: r.Alias, + } +} + +func (r *ProviderConfigRef) String() string { + if r == nil { + return "" + } + if r.Alias != "" { + return fmt.Sprintf("%s.%s", r.Name, r.Alias) + } + return r.Name +} + +var commonResourceAttributes = []hcl.AttributeSchema{ + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "provider", + }, + { + Name: "depends_on", + }, +} + +var resourceBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "locals"}, // reserved for future use + {Type: "lifecycle"}, + {Type: "connection"}, + {Type: "provisioner", LabelNames: []string{"type"}}, + }, +} + +var dataBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "lifecycle"}, // reserved for future use + {Type: "locals"}, // reserved for future use + }, +} + +var resourceLifecycleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "create_before_destroy", + }, + { + Name: "prevent_destroy", + }, + { + Name: "ignore_changes", + }, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go new file mode 100644 index 00000000000..3ae1bff6ad1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go @@ -0,0 +1,118 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes +// corresponding to the elements given in the values map. +// +// This is useful in situations where, for example, values provided on the +// command line can override values given in configuration, using MergeBodies. +// +// The given filename is used in case any diagnostics are returned. Since +// the created body is synthetic, it is likely that this will not be a "real" +// filename. For example, if from a command line argument it could be +// a representation of that argument's name, such as "-var=...". +func SynthBody(filename string, values map[string]cty.Value) hcl.Body { + return synthBody{ + Filename: filename, + Values: values, + } +} + +type synthBody struct { + Filename string + Values map[string]cty.Value +} + +func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remain, diags := b.PartialContent(schema) + remainS := remain.(synthBody) + for name := range remainS.Values { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), + Subject: b.synthRange().Ptr(), + }) + } + return content, diags +} + +func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + MissingItemRange: b.synthRange(), + } + + remainValues := make(map[string]cty.Value) + for attrName, val := range b.Values { + remainValues[attrName] = val + } + + for _, attrS := range schema.Attributes { + delete(remainValues, attrS.Name) + val, defined := b.Values[attrS.Name] + if !defined { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.synthRange().Ptr(), + }) + } + continue + } + content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) + } + + // We just ignore blocks altogether, because this body type never has + // nested blocks. + + remain := synthBody{ + Filename: b.Filename, + Values: remainValues, + } + + return content, remain, diags +} + +func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + ret := make(hcl.Attributes) + for name, val := range b.Values { + ret[name] = b.synthAttribute(name, val) + } + return ret, nil +} + +func (b synthBody) MissingItemRange() hcl.Range { + return b.synthRange() +} + +func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { + rng := b.synthRange() + return &hcl.Attribute{ + Name: name, + Expr: &hclsyntax.LiteralValueExpr{ + Val: val, + SrcRange: rng, + }, + NameRange: rng, + Range: rng, + } +} + +func (b synthBody) synthRange() hcl.Range { + return hcl.Range{ + Filename: b.Filename, + Start: hcl.Pos{Line: 1, Column: 1}, + End: hcl.Pos{Line: 1, Column: 1}, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go new file mode 100644 index 00000000000..5fbde43109a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go @@ -0,0 +1,63 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// exprIsNativeQuotedString determines whether the given expression looks like +// it's a quoted string in the HCL native syntax. +// +// This should be used sparingly only for situations where our legacy HCL +// decoding would've expected a keyword or reference in quotes but our new +// decoding expects the keyword or reference to be provided directly as +// an identifier-based expression. +func exprIsNativeQuotedString(expr hcl.Expression) bool { + _, ok := expr.(*hclsyntax.TemplateExpr) + return ok +} + +// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is +// equivalent except that any required attributes are forced to not be required. +// +// This is useful for dealing with "override" config files, which are allowed +// to omit things that they don't wish to override from the main configuration. +// +// The returned schema may have some pointers in common with the given schema, +// so neither the given schema nor the returned schema should be modified after +// using this function in order to avoid confusion. +// +// Overrides are rarely used, so it's recommended to just create the override +// schema on the fly only when it's needed, rather than storing it in a global +// variable as we tend to do for a primary schema. +func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} + +// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that +// is equivalent except that it accepts an additional block type "dynamic" with +// a single label, used to recognize usage of the HCL dynamic block extension. +func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + + copy(ret.Blocks, schema.Blocks) + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, + }) + + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go new file mode 100644 index 00000000000..c02ad4b5526 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go @@ -0,0 +1,45 @@ +package configs + +// VariableTypeHint is an enumeration used for the Variable.TypeHint field, +// which is an incompletely-specified type for the variable which is used +// as a hint for whether a value provided in an ambiguous context (on the +// command line or in an environment variable) should be taken literally as a +// string or parsed as an HCL expression to produce a data structure. +// +// The type hint is applied to runtime values as well, but since it does not +// accurately describe a precise type it is not fully-sufficient to infer +// the dynamic type of a value passed through a variable. +// +// These hints use inaccurate terminology for historical reasons. Full details +// are in the documentation for each constant in this enumeration, but in +// summary: +// +// TypeHintString requires a primitive type +// TypeHintList requires a type that could be converted to a tuple +// TypeHintMap requires a type that could be converted to an object +type VariableTypeHint rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type VariableTypeHint + +// TypeHintNone indicates the absence of a type hint. Values specified in +// ambiguous contexts will be treated as literal strings, as if TypeHintString +// were selected, but no runtime value checks will be applied. This is reasonable +// type hint for a module that is never intended to be used at the top-level +// of a configuration, since descendent modules never receive values from +// ambiguous contexts. +const TypeHintNone VariableTypeHint = 0 + +// TypeHintString spec indicates that a value provided in an ambiguous context +// should be treated as a literal string, and additionally requires that the +// runtime value for the variable is of a primitive type (string, number, bool). +const TypeHintString VariableTypeHint = 'S' + +// TypeHintList indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an tuple, list, or set type. +const TypeHintList VariableTypeHint = 'L' + +// TypeHintMap indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an object or map type. +const TypeHintMap VariableTypeHint = 'M' diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go new file mode 100644 index 00000000000..2b50428ce12 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go @@ -0,0 +1,39 @@ +// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeHintNone-0] + _ = x[TypeHintString-83] + _ = x[TypeHintList-76] + _ = x[TypeHintMap-77] +} + +const ( + _VariableTypeHint_name_0 = "TypeHintNone" + _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" + _VariableTypeHint_name_2 = "TypeHintString" +) + +var ( + _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} +) + +func (i VariableTypeHint) String() string { + switch { + case i == 0: + return _VariableTypeHint_name_0 + case 76 <= i && i <= 77: + i -= 76 + return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] + case i == 83: + return _VariableTypeHint_name_2 + default: + return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go new file mode 100644 index 00000000000..e40ce163969 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go @@ -0,0 +1,71 @@ +package configs + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// VersionConstraint represents a version constraint on some resource +// (e.g. Terraform Core, a provider, a module, ...) that carries with it +// a source range so that a helpful diagnostic can be printed in the event +// that a particular constraint does not match. +type VersionConstraint struct { + Required version.Constraints + DeclRange hcl.Range +} + +func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { + ret := VersionConstraint{ + DeclRange: attr.Range, + } + + val, diags := attr.Expr.Value(nil) + if diags.HasErrors() { + return ret, diags + } + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + if val.IsNull() { + // A null version constraint is strange, but we'll just treat it + // like an empty constraint set. + return ret, diags + } + + if !val.IsWhollyKnown() { + // If there is a syntax error, HCL sets the value of the given attribute + // to cty.DynamicVal. A diagnostic for the syntax error will already + // bubble up, so we will move forward gracefully here. + return ret, diags + } + + constraintStr := val.AsString() + constraints, err := version.NewConstraint(constraintStr) + if err != nil { + // NewConstraint doesn't return user-friendly errors, so we'll just + // ignore the provided error and produce our own generic one. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + ret.Required = constraints + return ret, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go new file mode 100644 index 00000000000..a150af96190 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go @@ -0,0 +1,301 @@ +package dag + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/go-multierror" +) + +// AcyclicGraph is a specialization of Graph that cannot have cycles. With +// this property, we get the property of sane graph traversal. +type AcyclicGraph struct { + Graph +} + +// WalkFunc is the callback used for walking the graph. +type WalkFunc func(Vertex) tfdiags.Diagnostics + +// DepthWalkFunc is a walk function that also receives the current depth of the +// walk as an argument +type DepthWalkFunc func(Vertex, int) error + +func (g *AcyclicGraph) DirectedGraph() Grapher { + return g +} + +// Returns a Set that includes every Vertex yielded by walking down from the +// provided starting Vertex v. +func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { + s := new(Set) + start := AsVertexList(g.DownEdges(v)) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.DepthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Returns a Set that includes every Vertex yielded by walking up from the +// provided starting Vertex v. +func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) { + s := new(Set) + start := AsVertexList(g.UpEdges(v)) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Root returns the root of the DAG, or an error. +// +// Complexity: O(V) +func (g *AcyclicGraph) Root() (Vertex, error) { + roots := make([]Vertex, 0, 1) + for _, v := range g.Vertices() { + if g.UpEdges(v).Len() == 0 { + roots = append(roots, v) + } + } + + if len(roots) > 1 { + // TODO(mitchellh): make this error message a lot better + return nil, fmt.Errorf("multiple roots: %#v", roots) + } + + if len(roots) == 0 { + return nil, fmt.Errorf("no roots found") + } + + return roots[0], nil +} + +// TransitiveReduction performs the transitive reduction of graph g in place. +// The transitive reduction of a graph is a graph with as few edges as +// possible with the same reachability as the original graph. This means +// that if there are three nodes A => B => C, and A connects to both +// B and C, and B connects to C, then the transitive reduction is the +// same graph with only a single edge between A and B, and a single edge +// between B and C. +// +// The graph must be valid for this operation to behave properly. If +// Validate() returns an error, the behavior is undefined and the results +// will likely be unexpected. +// +// Complexity: O(V(V+E)), or asymptotically O(VE) +func (g *AcyclicGraph) TransitiveReduction() { + // For each vertex u in graph g, do a DFS starting from each vertex + // v such that the edge (u,v) exists (v is a direct descendant of u). + // + // For each v-prime reachable from v, remove the edge (u, v-prime). + defer g.debug.BeginOperation("TransitiveReduction", "").End("") + + for _, u := range g.Vertices() { + uTargets := g.DownEdges(u) + vs := AsVertexList(g.DownEdges(u)) + + g.depthFirstWalk(vs, false, func(v Vertex, d int) error { + shared := uTargets.Intersection(g.DownEdges(v)) + for _, vPrime := range AsVertexList(shared) { + g.RemoveEdge(BasicEdge(u, vPrime)) + } + + return nil + }) + } +} + +// Validate validates the DAG. A DAG is valid if it has a single root +// with no cycles. +func (g *AcyclicGraph) Validate() error { + if _, err := g.Root(); err != nil { + return err + } + + // Look for cycles of more than 1 component + var err error + cycles := g.Cycles() + if len(cycles) > 0 { + for _, cycle := range cycles { + cycleStr := make([]string, len(cycle)) + for j, vertex := range cycle { + cycleStr[j] = VertexName(vertex) + } + + err = multierror.Append(err, fmt.Errorf( + "Cycle: %s", strings.Join(cycleStr, ", "))) + } + } + + // Look for cycles to self + for _, e := range g.Edges() { + if e.Source() == e.Target() { + err = multierror.Append(err, fmt.Errorf( + "Self reference: %s", VertexName(e.Source()))) + } + } + + return err +} + +func (g *AcyclicGraph) Cycles() [][]Vertex { + var cycles [][]Vertex + for _, cycle := range StronglyConnected(&g.Graph) { + if len(cycle) > 1 { + cycles = append(cycles, cycle) + } + } + return cycles +} + +// Walk walks the graph, calling your callback as each node is visited. +// This will walk nodes in parallel if it can. The resulting diagnostics +// contains problems from all graphs visited, in no particular order. +func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics { + defer g.debug.BeginOperation(typeWalk, "").End("") + + w := &Walker{Callback: cb, Reverse: true} + w.Update(g) + return w.Wait() +} + +// simple convenience helper for converting a dag.Set to a []Vertex +func AsVertexList(s *Set) []Vertex { + rawList := s.List() + vertexList := make([]Vertex, len(rawList)) + for i, raw := range rawList { + vertexList[i] = raw.(Vertex) + } + return vertexList +} + +type vertexAtDepth struct { + Vertex Vertex + Depth int +} + +// depthFirstWalk does a depth-first walk of the graph starting from +// the vertices in start. +func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + return g.depthFirstWalk(start, true, f) +} + +// This internal method provides the option of not sorting the vertices during +// the walk, which we use for the Transitive reduction. +// Some configurations can lead to fully-connected subgraphs, which makes our +// transitive reduction algorithm O(n^3). This is still passable for the size +// of our graphs, but the additional n^2 sort operations would make this +// uncomputable in a reasonable amount of time. +func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") + + seen := make(map[Vertex]struct{}) + frontier := make([]*vertexAtDepth, len(start)) + for i, v := range start { + frontier[i] = &vertexAtDepth{ + Vertex: v, + Depth: 0, + } + } + for len(frontier) > 0 { + // Pop the current vertex + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // Check if we've seen this already and return... + if _, ok := seen[current.Vertex]; ok { + continue + } + seen[current.Vertex] = struct{}{} + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } + + // Visit targets of this in a consistent order. + targets := AsVertexList(g.DownEdges(current.Vertex)) + + if sorted { + sort.Sort(byVertexName(targets)) + } + + for _, t := range targets { + frontier = append(frontier, &vertexAtDepth{ + Vertex: t, + Depth: current.Depth + 1, + }) + } + } + + return nil +} + +// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from +// the vertices in start. +func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("") + + seen := make(map[Vertex]struct{}) + frontier := make([]*vertexAtDepth, len(start)) + for i, v := range start { + frontier[i] = &vertexAtDepth{ + Vertex: v, + Depth: 0, + } + } + for len(frontier) > 0 { + // Pop the current vertex + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // Check if we've seen this already and return... + if _, ok := seen[current.Vertex]; ok { + continue + } + seen[current.Vertex] = struct{}{} + + // Add next set of targets in a consistent order. + targets := AsVertexList(g.UpEdges(current.Vertex)) + sort.Sort(byVertexName(targets)) + for _, t := range targets { + frontier = append(frontier, &vertexAtDepth{ + Vertex: t, + Depth: current.Depth + 1, + }) + } + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } + } + + return nil +} + +// byVertexName implements sort.Interface so a list of Vertices can be sorted +// consistently by their VertexName +type byVertexName []Vertex + +func (b byVertexName) Len() int { return len(b) } +func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byVertexName) Less(i, j int) bool { + return VertexName(b[i]) < VertexName(b[j]) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go new file mode 100644 index 00000000000..7e6d2af3b1d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go @@ -0,0 +1,282 @@ +package dag + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// DotOpts are the options for generating a dot formatted Graph. +type DotOpts struct { + // Allows some nodes to decide to only show themselves when the user has + // requested the "verbose" graph. + Verbose bool + + // Highlight Cycles + DrawCycles bool + + // How many levels to expand modules as we draw + MaxDepth int + + // use this to keep the cluster_ naming convention from the previous dot writer + cluster bool +} + +// GraphNodeDotter can be implemented by a node to cause it to be included +// in the dot graph. The Dot method will be called which is expected to +// return a representation of this node. +type GraphNodeDotter interface { + // Dot is called to return the dot formatting for the node. + // The first parameter is the title of the node. + // The second parameter includes user-specified options that affect the dot + // graph. See GraphDotOpts below for details. + DotNode(string, *DotOpts) *DotNode +} + +// DotNode provides a structure for Vertices to return in order to specify their +// dot format. +type DotNode struct { + Name string + Attrs map[string]string +} + +// Returns the DOT representation of this Graph. +func (g *marshalGraph) Dot(opts *DotOpts) []byte { + if opts == nil { + opts = &DotOpts{ + DrawCycles: true, + MaxDepth: -1, + Verbose: true, + } + } + + var w indentWriter + w.WriteString("digraph {\n") + w.Indent() + + // some dot defaults + w.WriteString(`compound = "true"` + "\n") + w.WriteString(`newrank = "true"` + "\n") + + // the top level graph is written as the first subgraph + w.WriteString(`subgraph "root" {` + "\n") + g.writeBody(opts, &w) + + // cluster isn't really used other than for naming purposes in some graphs + opts.cluster = opts.MaxDepth != 0 + maxDepth := opts.MaxDepth + if maxDepth == 0 { + maxDepth = -1 + } + + for _, s := range g.Subgraphs { + g.writeSubgraph(s, opts, maxDepth, &w) + } + + w.Unindent() + w.WriteString("}\n") + return w.Bytes() +} + +func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + name := v.Name + attrs := v.Attrs + if v.graphNodeDotter != nil { + node := v.graphNodeDotter.DotNode(name, opts) + if node == nil { + return []byte{} + } + + newAttrs := make(map[string]string) + for k, v := range attrs { + newAttrs[k] = v + } + for k, v := range node.Attrs { + newAttrs[k] = v + } + + name = node.Name + attrs = newAttrs + } + + buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) + writeAttrs(&buf, attrs) + buf.WriteByte('\n') + + return buf.Bytes() +} + +func (e *marshalEdge) dot(g *marshalGraph) string { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + sourceName := g.vertexByID(e.Source).Name + targetName := g.vertexByID(e.Target).Name + s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) + buf.WriteString(s) + writeAttrs(&buf, e.Attrs) + + return buf.String() +} + +func cycleDot(e *marshalEdge, g *marshalGraph) string { + return e.dot(g) + ` [color = "red", penwidth = "2.0"]` +} + +// Write the subgraph body. The is recursive, and the depth argument is used to +// record the current depth of iteration. +func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { + if depth == 0 { + return + } + depth-- + + name := sg.Name + if opts.cluster { + // we prefix with cluster_ to match the old dot output + name = "cluster_" + name + sg.Attrs["label"] = sg.Name + } + w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) + sg.writeBody(opts, w) + + for _, sg := range sg.Subgraphs { + g.writeSubgraph(sg, opts, depth, w) + } +} + +func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { + w.Indent() + + for _, as := range attrStrings(g.Attrs) { + w.WriteString(as + "\n") + } + + // list of Vertices that aren't to be included in the dot output + skip := map[string]bool{} + + for _, v := range g.Vertices { + if v.graphNodeDotter == nil { + skip[v.ID] = true + continue + } + + w.Write(v.dot(g, opts)) + } + + var dotEdges []string + + if opts.DrawCycles { + for _, c := range g.Cycles { + if len(c) < 2 { + continue + } + + for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { + if j >= len(c) { + j = 0 + } + src := c[i] + tgt := c[j] + + if skip[src.ID] || skip[tgt.ID] { + continue + } + + e := &marshalEdge{ + Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), + Source: src.ID, + Target: tgt.ID, + Attrs: make(map[string]string), + } + + dotEdges = append(dotEdges, cycleDot(e, g)) + src = tgt + } + } + } + + for _, e := range g.Edges { + dotEdges = append(dotEdges, e.dot(g)) + } + + // srot these again to match the old output + sort.Strings(dotEdges) + + for _, e := range dotEdges { + w.WriteString(e + "\n") + } + + w.Unindent() + w.WriteString("}\n") +} + +func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { + if len(attrs) > 0 { + buf.WriteString(" [") + buf.WriteString(strings.Join(attrStrings(attrs), ", ")) + buf.WriteString("]") + } +} + +func attrStrings(attrs map[string]string) []string { + strings := make([]string, 0, len(attrs)) + for k, v := range attrs { + strings = append(strings, fmt.Sprintf("%s = %q", k, v)) + } + sort.Strings(strings) + return strings +} + +// Provide a bytes.Buffer like structure, which will indent when starting a +// newline. +type indentWriter struct { + bytes.Buffer + level int +} + +func (w *indentWriter) indent() { + newline := []byte("\n") + if !bytes.HasSuffix(w.Bytes(), newline) { + return + } + for i := 0; i < w.level; i++ { + w.Buffer.WriteString("\t") + } +} + +// Indent increases indentation by 1 +func (w *indentWriter) Indent() { w.level++ } + +// Unindent decreases indentation by 1 +func (w *indentWriter) Unindent() { w.level-- } + +// the following methods intercecpt the byte.Buffer writes and insert the +// indentation when starting a new line. +func (w *indentWriter) Write(b []byte) (int, error) { + w.indent() + return w.Buffer.Write(b) +} + +func (w *indentWriter) WriteString(s string) (int, error) { + w.indent() + return w.Buffer.WriteString(s) +} +func (w *indentWriter) WriteByte(b byte) error { + w.indent() + return w.Buffer.WriteByte(b) +} +func (w *indentWriter) WriteRune(r rune) (int, error) { + w.indent() + return w.Buffer.WriteRune(r) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go new file mode 100644 index 00000000000..f0d99ee3a61 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go @@ -0,0 +1,37 @@ +package dag + +import ( + "fmt" +) + +// Edge represents an edge in the graph, with a source and target vertex. +type Edge interface { + Source() Vertex + Target() Vertex + + Hashable +} + +// BasicEdge returns an Edge implementation that simply tracks the source +// and target given as-is. +func BasicEdge(source, target Vertex) Edge { + return &basicEdge{S: source, T: target} +} + +// basicEdge is a basic implementation of Edge that has the source and +// target vertex. +type basicEdge struct { + S, T Vertex +} + +func (e *basicEdge) Hashcode() interface{} { + return fmt.Sprintf("%p-%p", e.S, e.T) +} + +func (e *basicEdge) Source() Vertex { + return e.S +} + +func (e *basicEdge) Target() Vertex { + return e.T +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go new file mode 100644 index 00000000000..e7517a2062b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go @@ -0,0 +1,391 @@ +package dag + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "sort" +) + +// Graph is used to represent a dependency graph. +type Graph struct { + vertices *Set + edges *Set + downEdges map[interface{}]*Set + upEdges map[interface{}]*Set + + // JSON encoder for recording debug information + debug *encoder +} + +// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. +type Subgrapher interface { + Subgraph() Grapher +} + +// A Grapher is any type that returns a Grapher, mainly used to identify +// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they +// return themselves. +type Grapher interface { + DirectedGraph() Grapher +} + +// Vertex of the graph. +type Vertex interface{} + +// NamedVertex is an optional interface that can be implemented by Vertex +// to give it a human-friendly name that is used for outputting the graph. +type NamedVertex interface { + Vertex + Name() string +} + +func (g *Graph) DirectedGraph() Grapher { + return g +} + +// Vertices returns the list of all the vertices in the graph. +func (g *Graph) Vertices() []Vertex { + list := g.vertices.List() + result := make([]Vertex, len(list)) + for i, v := range list { + result[i] = v.(Vertex) + } + + return result +} + +// Edges returns the list of all the edges in the graph. +func (g *Graph) Edges() []Edge { + list := g.edges.List() + result := make([]Edge, len(list)) + for i, v := range list { + result[i] = v.(Edge) + } + + return result +} + +// EdgesFrom returns the list of edges from the given source. +func (g *Graph) EdgesFrom(v Vertex) []Edge { + var result []Edge + from := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Source()) == from { + result = append(result, e) + } + } + + return result +} + +// EdgesTo returns the list of edges to the given target. +func (g *Graph) EdgesTo(v Vertex) []Edge { + var result []Edge + search := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Target()) == search { + result = append(result, e) + } + } + + return result +} + +// HasVertex checks if the given Vertex is present in the graph. +func (g *Graph) HasVertex(v Vertex) bool { + return g.vertices.Include(v) +} + +// HasEdge checks if the given Edge is present in the graph. +func (g *Graph) HasEdge(e Edge) bool { + return g.edges.Include(e) +} + +// Add adds a vertex to the graph. This is safe to call multiple time with +// the same Vertex. +func (g *Graph) Add(v Vertex) Vertex { + g.init() + g.vertices.Add(v) + g.debug.Add(v) + return v +} + +// Remove removes a vertex from the graph. This will also remove any +// edges with this vertex as a source or target. +func (g *Graph) Remove(v Vertex) Vertex { + // Delete the vertex itself + g.vertices.Delete(v) + g.debug.Remove(v) + + // Delete the edges to non-existent things + for _, target := range g.DownEdges(v).List() { + g.RemoveEdge(BasicEdge(v, target)) + } + for _, source := range g.UpEdges(v).List() { + g.RemoveEdge(BasicEdge(source, v)) + } + + return nil +} + +// Replace replaces the original Vertex with replacement. If the original +// does not exist within the graph, then false is returned. Otherwise, true +// is returned. +func (g *Graph) Replace(original, replacement Vertex) bool { + // If we don't have the original, we can't do anything + if !g.vertices.Include(original) { + return false + } + + defer g.debug.BeginOperation("Replace", "").End("") + + // If they're the same, then don't do anything + if original == replacement { + return true + } + + // Add our new vertex, then copy all the edges + g.Add(replacement) + for _, target := range g.DownEdges(original).List() { + g.Connect(BasicEdge(replacement, target)) + } + for _, source := range g.UpEdges(original).List() { + g.Connect(BasicEdge(source, replacement)) + } + + // Remove our old vertex, which will also remove all the edges + g.Remove(original) + + return true +} + +// RemoveEdge removes an edge from the graph. +func (g *Graph) RemoveEdge(edge Edge) { + g.init() + g.debug.RemoveEdge(edge) + + // Delete the edge from the set + g.edges.Delete(edge) + + // Delete the up/down edges + if s, ok := g.downEdges[hashcode(edge.Source())]; ok { + s.Delete(edge.Target()) + } + if s, ok := g.upEdges[hashcode(edge.Target())]; ok { + s.Delete(edge.Source()) + } +} + +// DownEdges returns the outward edges from the source Vertex v. +func (g *Graph) DownEdges(v Vertex) *Set { + g.init() + return g.downEdges[hashcode(v)] +} + +// UpEdges returns the inward edges to the destination Vertex v. +func (g *Graph) UpEdges(v Vertex) *Set { + g.init() + return g.upEdges[hashcode(v)] +} + +// Connect adds an edge with the given source and target. This is safe to +// call multiple times with the same value. Note that the same value is +// verified through pointer equality of the vertices, not through the +// value of the edge itself. +func (g *Graph) Connect(edge Edge) { + g.init() + g.debug.Connect(edge) + + source := edge.Source() + target := edge.Target() + sourceCode := hashcode(source) + targetCode := hashcode(target) + + // Do we have this already? If so, don't add it again. + if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { + return + } + + // Add the edge to the set + g.edges.Add(edge) + + // Add the down edge + s, ok := g.downEdges[sourceCode] + if !ok { + s = new(Set) + g.downEdges[sourceCode] = s + } + s.Add(target) + + // Add the up edge + s, ok = g.upEdges[targetCode] + if !ok { + s = new(Set) + g.upEdges[targetCode] = s + } + s.Add(source) +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) StringWithNodeTypes() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + targetNodes := make(map[string]Vertex) + for _, target := range targets.List() { + dep := VertexName(target) + deps = append(deps, dep) + targetNodes[dep] = target + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) + } + } + + return buf.String() +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) String() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s\n", name)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + for _, target := range targets.List() { + deps = append(deps, VertexName(target)) + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s\n", d)) + } + } + + return buf.String() +} + +func (g *Graph) init() { + if g.vertices == nil { + g.vertices = new(Set) + } + if g.edges == nil { + g.edges = new(Set) + } + if g.downEdges == nil { + g.downEdges = make(map[interface{}]*Set) + } + if g.upEdges == nil { + g.upEdges = make(map[interface{}]*Set) + } +} + +// Dot returns a dot-formatted representation of the Graph. +func (g *Graph) Dot(opts *DotOpts) []byte { + return newMarshalGraph("", g).Dot(opts) +} + +// MarshalJSON returns a JSON representation of the entire Graph. +func (g *Graph) MarshalJSON() ([]byte, error) { + dg := newMarshalGraph("root", g) + return json.MarshalIndent(dg, "", " ") +} + +// SetDebugWriter sets the io.Writer where the Graph will record debug +// information. After this is set, the graph will immediately encode itself to +// the stream, and continue to record all subsequent operations. +func (g *Graph) SetDebugWriter(w io.Writer) { + g.debug = &encoder{w: w} + g.debug.Encode(newMarshalGraph("root", g)) +} + +// DebugVertexInfo encodes arbitrary information about a vertex in the graph +// debug logs. +func (g *Graph) DebugVertexInfo(v Vertex, info string) { + va := newVertexInfo(typeVertexInfo, v, info) + g.debug.Encode(va) +} + +// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug +// logs. +func (g *Graph) DebugEdgeInfo(e Edge, info string) { + ea := newEdgeInfo(typeEdgeInfo, e, info) + g.debug.Encode(ea) +} + +// DebugVisitInfo records a visit to a Vertex during a walk operation. +func (g *Graph) DebugVisitInfo(v Vertex, info string) { + vi := newVertexInfo(typeVisitInfo, v, info) + g.debug.Encode(vi) +} + +// DebugOperation marks the start of a set of graph transformations in +// the debug log, and returns a DebugOperationEnd func, which marks the end of +// the operation in the log. Additional information can be added to the log via +// the info parameter. +// +// The returned func's End method allows this method to be called from a single +// defer statement: +// defer g.DebugOperationBegin("OpName", "operating").End("") +// +// The returned function must be called to properly close the logical operation +// in the logs. +func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd { + return g.debug.BeginOperation(operation, info) +} + +// VertexName returns the name of a vertex. +func VertexName(raw Vertex) string { + switch v := raw.(type) { + case NamedVertex: + return v.Name() + case fmt.Stringer: + return fmt.Sprintf("%s", v) + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go new file mode 100644 index 00000000000..c567d27194f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go @@ -0,0 +1,474 @@ +package dag + +import ( + "encoding/json" + "fmt" + "io" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +const ( + typeOperation = "Operation" + typeTransform = "Transform" + typeWalk = "Walk" + typeDepthFirstWalk = "DepthFirstWalk" + typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" + typeTransitiveReduction = "TransitiveReduction" + typeEdgeInfo = "EdgeInfo" + typeVertexInfo = "VertexInfo" + typeVisitInfo = "VisitInfo" +) + +// the marshal* structs are for serialization of the graph data. +type marshalGraph struct { + // Type is always "Graph", for identification as a top level object in the + // JSON stream. + Type string + + // Each marshal structure requires a unique ID so that it can be referenced + // by other structures. + ID string `json:",omitempty"` + + // Human readable name for this graph. + Name string `json:",omitempty"` + + // Arbitrary attributes that can be added to the output. + Attrs map[string]string `json:",omitempty"` + + // List of graph vertices, sorted by ID. + Vertices []*marshalVertex `json:",omitempty"` + + // List of edges, sorted by Source ID. + Edges []*marshalEdge `json:",omitempty"` + + // Any number of subgraphs. A subgraph itself is considered a vertex, and + // may be referenced by either end of an edge. + Subgraphs []*marshalGraph `json:",omitempty"` + + // Any lists of vertices that are included in cycles. + Cycles [][]*marshalVertex `json:",omitempty"` +} + +// The add, remove, connect, removeEdge methods mirror the basic Graph +// manipulations to reconstruct a marshalGraph from a debug log. +func (g *marshalGraph) add(v *marshalVertex) { + g.Vertices = append(g.Vertices, v) + sort.Sort(vertices(g.Vertices)) +} + +func (g *marshalGraph) remove(v *marshalVertex) { + for i, existing := range g.Vertices { + if v.ID == existing.ID { + g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) + return + } + } +} + +func (g *marshalGraph) connect(e *marshalEdge) { + g.Edges = append(g.Edges, e) + sort.Sort(edges(g.Edges)) +} + +func (g *marshalGraph) removeEdge(e *marshalEdge) { + for i, existing := range g.Edges { + if e.Source == existing.Source && e.Target == existing.Target { + g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) + return + } + } +} + +func (g *marshalGraph) vertexByID(id string) *marshalVertex { + for _, v := range g.Vertices { + if id == v.ID { + return v + } + } + return nil +} + +type marshalVertex struct { + // Unique ID, used to reference this vertex from other structures. + ID string + + // Human readable name + Name string `json:",omitempty"` + + Attrs map[string]string `json:",omitempty"` + + // This is to help transition from the old Dot interfaces. We record if the + // node was a GraphNodeDotter here, so we can call it to get attributes. + graphNodeDotter GraphNodeDotter +} + +func newMarshalVertex(v Vertex) *marshalVertex { + dn, ok := v.(GraphNodeDotter) + if !ok { + dn = nil + } + + return &marshalVertex{ + ID: marshalVertexID(v), + Name: VertexName(v), + Attrs: make(map[string]string), + graphNodeDotter: dn, + } +} + +// vertices is a sort.Interface implementation for sorting vertices by ID +type vertices []*marshalVertex + +func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } +func (v vertices) Len() int { return len(v) } +func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +type marshalEdge struct { + // Human readable name + Name string + + // Source and Target Vertices by ID + Source string + Target string + + Attrs map[string]string `json:",omitempty"` +} + +func newMarshalEdge(e Edge) *marshalEdge { + return &marshalEdge{ + Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), + Source: marshalVertexID(e.Source()), + Target: marshalVertexID(e.Target()), + Attrs: make(map[string]string), + } +} + +// edges is a sort.Interface implementation for sorting edges by Source ID +type edges []*marshalEdge + +func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } +func (e edges) Len() int { return len(e) } +func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } + +// build a marshalGraph structure from a *Graph +func newMarshalGraph(name string, g *Graph) *marshalGraph { + mg := &marshalGraph{ + Type: "Graph", + Name: name, + Attrs: make(map[string]string), + } + + for _, v := range g.Vertices() { + id := marshalVertexID(v) + if sg, ok := marshalSubgrapher(v); ok { + smg := newMarshalGraph(VertexName(v), sg) + smg.ID = id + mg.Subgraphs = append(mg.Subgraphs, smg) + } + + mv := newMarshalVertex(v) + mg.Vertices = append(mg.Vertices, mv) + } + + sort.Sort(vertices(mg.Vertices)) + + for _, e := range g.Edges() { + mg.Edges = append(mg.Edges, newMarshalEdge(e)) + } + + sort.Sort(edges(mg.Edges)) + + for _, c := range (&AcyclicGraph{*g}).Cycles() { + var cycle []*marshalVertex + for _, v := range c { + mv := newMarshalVertex(v) + cycle = append(cycle, mv) + } + mg.Cycles = append(mg.Cycles, cycle) + } + + return mg +} + +// Attempt to return a unique ID for any vertex. +func marshalVertexID(v Vertex) string { + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return strconv.Itoa(int(val.Pointer())) + case reflect.Interface: + return strconv.Itoa(int(val.InterfaceData()[1])) + } + + if v, ok := v.(Hashable); ok { + h := v.Hashcode() + if h, ok := h.(string); ok { + return h + } + } + + // fallback to a name, which we hope is unique. + return VertexName(v) + + // we could try harder by attempting to read the arbitrary value from the + // interface, but we shouldn't get here from terraform right now. +} + +// check for a Subgrapher, and return the underlying *Graph. +func marshalSubgrapher(v Vertex) (*Graph, bool) { + sg, ok := v.(Subgrapher) + if !ok { + return nil, false + } + + switch g := sg.Subgraph().DirectedGraph().(type) { + case *Graph: + return g, true + case *AcyclicGraph: + return &g.Graph, true + } + + return nil, false +} + +// The DebugOperationEnd func type provides a way to call an End function via a +// method call, allowing for the chaining of methods in a defer statement. +type DebugOperationEnd func(string) + +// End calls function e with the info parameter, marking the end of this +// operation in the logs. +func (e DebugOperationEnd) End(info string) { e(info) } + +// encoder provides methods to write debug data to an io.Writer, and is a noop +// when no writer is present +type encoder struct { + sync.Mutex + w io.Writer +} + +// Encode is analogous to json.Encoder.Encode +func (e *encoder) Encode(i interface{}) { + if e == nil || e.w == nil { + return + } + e.Lock() + defer e.Unlock() + + js, err := json.Marshal(i) + if err != nil { + log.Println("[ERROR] dag:", err) + return + } + js = append(js, '\n') + + _, err = e.w.Write(js) + if err != nil { + log.Println("[ERROR] dag:", err) + return + } +} + +func (e *encoder) Add(v Vertex) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + AddVertex: newMarshalVertex(v), + }) +} + +// Remove records the removal of Vertex v. +func (e *encoder) Remove(v Vertex) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + RemoveVertex: newMarshalVertex(v), + }) +} + +func (e *encoder) Connect(edge Edge) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + AddEdge: newMarshalEdge(edge), + }) +} + +func (e *encoder) RemoveEdge(edge Edge) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + RemoveEdge: newMarshalEdge(edge), + }) +} + +// BeginOperation marks the start of set of graph transformations, and returns +// an EndDebugOperation func to be called once the opration is complete. +func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd { + if e == nil { + return func(string) {} + } + + e.Encode(marshalOperation{ + Type: typeOperation, + Begin: op, + Info: info, + }) + + return func(info string) { + e.Encode(marshalOperation{ + Type: typeOperation, + End: op, + Info: info, + }) + } +} + +// structure for recording graph transformations +type marshalTransform struct { + // Type: "Transform" + Type string + AddEdge *marshalEdge `json:",omitempty"` + RemoveEdge *marshalEdge `json:",omitempty"` + AddVertex *marshalVertex `json:",omitempty"` + RemoveVertex *marshalVertex `json:",omitempty"` +} + +func (t marshalTransform) Transform(g *marshalGraph) { + switch { + case t.AddEdge != nil: + g.connect(t.AddEdge) + case t.RemoveEdge != nil: + g.removeEdge(t.RemoveEdge) + case t.AddVertex != nil: + g.add(t.AddVertex) + case t.RemoveVertex != nil: + g.remove(t.RemoveVertex) + } +} + +// this structure allows us to decode any object in the json stream for +// inspection, then re-decode it into a proper struct if needed. +type streamDecode struct { + Type string + Map map[string]interface{} + JSON []byte +} + +func (s *streamDecode) UnmarshalJSON(d []byte) error { + s.JSON = d + err := json.Unmarshal(d, &s.Map) + if err != nil { + return err + } + + if t, ok := s.Map["Type"]; ok { + s.Type, _ = t.(string) + } + return nil +} + +// structure for recording the beginning and end of any multi-step +// transformations. These are informational, and not required to reproduce the +// graph state. +type marshalOperation struct { + Type string + Begin string `json:",omitempty"` + End string `json:",omitempty"` + Info string `json:",omitempty"` +} + +// decodeGraph decodes a marshalGraph from an encoded graph stream. +func decodeGraph(r io.Reader) (*marshalGraph, error) { + dec := json.NewDecoder(r) + + // a stream should always start with a graph + g := &marshalGraph{} + + err := dec.Decode(g) + if err != nil { + return nil, err + } + + // now replay any operations that occurred on the original graph + for dec.More() { + s := &streamDecode{} + err := dec.Decode(s) + if err != nil { + return g, err + } + + // the only Type we're concerned with here is Transform to complete the + // Graph + if s.Type != typeTransform { + continue + } + + t := &marshalTransform{} + err = json.Unmarshal(s.JSON, t) + if err != nil { + return g, err + } + t.Transform(g) + } + return g, nil +} + +// marshalVertexInfo allows encoding arbitrary information about the a single +// Vertex in the logs. These are accumulated for informational display while +// rebuilding the graph. +type marshalVertexInfo struct { + Type string + Vertex *marshalVertex + Info string +} + +func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo { + return &marshalVertexInfo{ + Type: infoType, + Vertex: newMarshalVertex(v), + Info: info, + } +} + +// marshalEdgeInfo allows encoding arbitrary information about the a single +// Edge in the logs. These are accumulated for informational display while +// rebuilding the graph. +type marshalEdgeInfo struct { + Type string + Edge *marshalEdge + Info string +} + +func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo { + return &marshalEdgeInfo{ + Type: infoType, + Edge: newMarshalEdge(e), + Info: info, + } +} + +// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final +// graph dot format. +// +// TODO: Allow returning the output at a certain point during decode. +// Encode extra information from the json log into the Dot. +func JSON2Dot(r io.Reader) ([]byte, error) { + g, err := decodeGraph(r) + if err != nil { + return nil, err + } + + return g.Dot(nil), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go new file mode 100644 index 00000000000..92b42151d75 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go @@ -0,0 +1,123 @@ +package dag + +import ( + "sync" +) + +// Set is a set data structure. +type Set struct { + m map[interface{}]interface{} + once sync.Once +} + +// Hashable is the interface used by set to get the hash code of a value. +// If this isn't given, then the value of the item being added to the set +// itself is used as the comparison value. +type Hashable interface { + Hashcode() interface{} +} + +// hashcode returns the hashcode used for set elements. +func hashcode(v interface{}) interface{} { + if h, ok := v.(Hashable); ok { + return h.Hashcode() + } + + return v +} + +// Add adds an item to the set +func (s *Set) Add(v interface{}) { + s.once.Do(s.init) + s.m[hashcode(v)] = v +} + +// Delete removes an item from the set. +func (s *Set) Delete(v interface{}) { + s.once.Do(s.init) + delete(s.m, hashcode(v)) +} + +// Include returns true/false of whether a value is in the set. +func (s *Set) Include(v interface{}) bool { + s.once.Do(s.init) + _, ok := s.m[hashcode(v)] + return ok +} + +// Intersection computes the set intersection with other. +func (s *Set) Intersection(other *Set) *Set { + result := new(Set) + if s == nil { + return result + } + if other != nil { + for _, v := range s.m { + if other.Include(v) { + result.Add(v) + } + } + } + + return result +} + +// Difference returns a set with the elements that s has but +// other doesn't. +func (s *Set) Difference(other *Set) *Set { + result := new(Set) + if s != nil { + for k, v := range s.m { + var ok bool + if other != nil { + _, ok = other.m[k] + } + if !ok { + result.Add(v) + } + } + } + + return result +} + +// Filter returns a set that contains the elements from the receiver +// where the given callback returns true. +func (s *Set) Filter(cb func(interface{}) bool) *Set { + result := new(Set) + + for _, v := range s.m { + if cb(v) { + result.Add(v) + } + } + + return result +} + +// Len is the number of items in the set. +func (s *Set) Len() int { + if s == nil { + return 0 + } + + return len(s.m) +} + +// List returns the list of set elements. +func (s *Set) List() []interface{} { + if s == nil { + return nil + } + + r := make([]interface{}, 0, len(s.m)) + for _, v := range s.m { + r = append(r, v) + } + + return r +} + +func (s *Set) init() { + s.m = make(map[interface{}]interface{}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go new file mode 100644 index 00000000000..9d8b25ce2ca --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go @@ -0,0 +1,107 @@ +package dag + +// StronglyConnected returns the list of strongly connected components +// within the Graph g. This information is primarily used by this package +// for cycle detection, but strongly connected components have widespread +// use. +func StronglyConnected(g *Graph) [][]Vertex { + vs := g.Vertices() + acct := sccAcct{ + NextIndex: 1, + VertexIndex: make(map[Vertex]int, len(vs)), + } + for _, v := range vs { + // Recurse on any non-visited nodes + if acct.VertexIndex[v] == 0 { + stronglyConnected(&acct, g, v) + } + } + return acct.SCC +} + +func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { + // Initial vertex visit + index := acct.visit(v) + minIdx := index + + for _, raw := range g.DownEdges(v).List() { + target := raw.(Vertex) + targetIdx := acct.VertexIndex[target] + + // Recurse on successor if not yet visited + if targetIdx == 0 { + minIdx = min(minIdx, stronglyConnected(acct, g, target)) + } else if acct.inStack(target) { + // Check if the vertex is in the stack + minIdx = min(minIdx, targetIdx) + } + } + + // Pop the strongly connected components off the stack if + // this is a root vertex + if index == minIdx { + var scc []Vertex + for { + v2 := acct.pop() + scc = append(scc, v2) + if v2 == v { + break + } + } + + acct.SCC = append(acct.SCC, scc) + } + + return minIdx +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// sccAcct is used ot pass around accounting information for +// the StronglyConnectedComponents algorithm +type sccAcct struct { + NextIndex int + VertexIndex map[Vertex]int + Stack []Vertex + SCC [][]Vertex +} + +// visit assigns an index and pushes a vertex onto the stack +func (s *sccAcct) visit(v Vertex) int { + idx := s.NextIndex + s.VertexIndex[v] = idx + s.NextIndex++ + s.push(v) + return idx +} + +// push adds a vertex to the stack +func (s *sccAcct) push(n Vertex) { + s.Stack = append(s.Stack, n) +} + +// pop removes a vertex from the stack +func (s *sccAcct) pop() Vertex { + n := len(s.Stack) + if n == 0 { + return nil + } + vertex := s.Stack[n-1] + s.Stack = s.Stack[:n-1] + return vertex +} + +// inStack checks if a vertex is in the stack +func (s *sccAcct) inStack(needle Vertex) bool { + for _, n := range s.Stack { + if n == needle { + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go new file mode 100644 index 00000000000..5ddf8ef34c1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go @@ -0,0 +1,454 @@ +package dag + +import ( + "errors" + "log" + "sync" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Walker is used to walk every vertex of a graph in parallel. +// +// A vertex will only be walked when the dependencies of that vertex have +// been walked. If two vertices can be walked at the same time, they will be. +// +// Update can be called to update the graph. This can be called even during +// a walk, cahnging vertices/edges mid-walk. This should be done carefully. +// If a vertex is removed but has already been executed, the result of that +// execution (any error) is still returned by Wait. Changing or re-adding +// a vertex that has already executed has no effect. Changing edges of +// a vertex that has already executed has no effect. +// +// Non-parallelism can be enforced by introducing a lock in your callback +// function. However, the goroutine overhead of a walk will remain. +// Walker will create V*2 goroutines (one for each vertex, and dependency +// waiter for each vertex). In general this should be of no concern unless +// there are a huge number of vertices. +// +// The walk is depth first by default. This can be changed with the Reverse +// option. +// +// A single walker is only valid for one graph walk. After the walk is complete +// you must construct a new walker to walk again. State for the walk is never +// deleted in case vertices or edges are changed. +type Walker struct { + // Callback is what is called for each vertex + Callback WalkFunc + + // Reverse, if true, causes the source of an edge to depend on a target. + // When false (default), the target depends on the source. + Reverse bool + + // changeLock must be held to modify any of the fields below. Only Update + // should modify these fields. Modifying them outside of Update can cause + // serious problems. + changeLock sync.Mutex + vertices Set + edges Set + vertexMap map[Vertex]*walkerVertex + + // wait is done when all vertices have executed. It may become "undone" + // if new vertices are added. + wait sync.WaitGroup + + // diagsMap contains the diagnostics recorded so far for execution, + // and upstreamFailed contains all the vertices whose problems were + // caused by upstream failures, and thus whose diagnostics should be + // excluded from the final set. + // + // Readers and writers of either map must hold diagsLock. + diagsMap map[Vertex]tfdiags.Diagnostics + upstreamFailed map[Vertex]struct{} + diagsLock sync.Mutex +} + +type walkerVertex struct { + // These should only be set once on initialization and never written again. + // They are not protected by a lock since they don't need to be since + // they are write-once. + + // DoneCh is closed when this vertex has completed execution, regardless + // of success. + // + // CancelCh is closed when the vertex should cancel execution. If execution + // is already complete (DoneCh is closed), this has no effect. Otherwise, + // execution is cancelled as quickly as possible. + DoneCh chan struct{} + CancelCh chan struct{} + + // Dependency information. Any changes to any of these fields requires + // holding DepsLock. + // + // DepsCh is sent a single value that denotes whether the upstream deps + // were successful (no errors). Any value sent means that the upstream + // dependencies are complete. No other values will ever be sent again. + // + // DepsUpdateCh is closed when there is a new DepsCh set. + DepsCh chan bool + DepsUpdateCh chan struct{} + DepsLock sync.Mutex + + // Below is not safe to read/write in parallel. This behavior is + // enforced by changes only happening in Update. Nothing else should + // ever modify these. + deps map[Vertex]chan struct{} + depsCancelCh chan struct{} +} + +// Wait waits for the completion of the walk and returns diagnostics describing +// any problems that arose. Update should be called to populate the walk with +// vertices and edges prior to calling this. +// +// Wait will return as soon as all currently known vertices are complete. +// If you plan on calling Update with more vertices in the future, you +// should not call Wait until after this is done. +func (w *Walker) Wait() tfdiags.Diagnostics { + // Wait for completion + w.wait.Wait() + + var diags tfdiags.Diagnostics + w.diagsLock.Lock() + for v, vDiags := range w.diagsMap { + if _, upstream := w.upstreamFailed[v]; upstream { + // Ignore diagnostics for nodes that had failed upstreams, since + // the downstream diagnostics are likely to be redundant. + continue + } + diags = diags.Append(vDiags) + } + w.diagsLock.Unlock() + + return diags +} + +// Update updates the currently executing walk with the given graph. +// This will perform a diff of the vertices and edges and update the walker. +// Already completed vertices remain completed (including any errors during +// their execution). +// +// This returns immediately once the walker is updated; it does not wait +// for completion of the walk. +// +// Multiple Updates can be called in parallel. Update can be called at any +// time during a walk. +func (w *Walker) Update(g *AcyclicGraph) { + log.Print("[TRACE] dag/walk: updating graph") + var v, e *Set + if g != nil { + v, e = g.vertices, g.edges + } + + // Grab the change lock so no more updates happen but also so that + // no new vertices are executed during this time since we may be + // removing them. + w.changeLock.Lock() + defer w.changeLock.Unlock() + + // Initialize fields + if w.vertexMap == nil { + w.vertexMap = make(map[Vertex]*walkerVertex) + } + + // Calculate all our sets + newEdges := e.Difference(&w.edges) + oldEdges := w.edges.Difference(e) + newVerts := v.Difference(&w.vertices) + oldVerts := w.vertices.Difference(v) + + // Add the new vertices + for _, raw := range newVerts.List() { + v := raw.(Vertex) + + // Add to the waitgroup so our walk is not done until everything finishes + w.wait.Add(1) + + // Add to our own set so we know about it already + log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) + w.vertices.Add(raw) + + // Initialize the vertex info + info := &walkerVertex{ + DoneCh: make(chan struct{}), + CancelCh: make(chan struct{}), + deps: make(map[Vertex]chan struct{}), + } + + // Add it to the map and kick off the walk + w.vertexMap[v] = info + } + + // Remove the old vertices + for _, raw := range oldVerts.List() { + v := raw.(Vertex) + + // Get the vertex info so we can cancel it + info, ok := w.vertexMap[v] + if !ok { + // This vertex for some reason was never in our map. This + // shouldn't be possible. + continue + } + + // Cancel the vertex + close(info.CancelCh) + + // Delete it out of the map + delete(w.vertexMap, v) + + log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) + w.vertices.Delete(raw) + } + + // Add the new edges + var changedDeps Set + for _, raw := range newEdges.List() { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Get the info for the dep + depInfo, ok := w.vertexMap[dep] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Add the dependency to our waiter + waiterInfo.deps[dep] = depInfo.DoneCh + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + + log.Printf( + "[TRACE] dag/walk: added edge: %q waiting on %q", + VertexName(waiter), VertexName(dep)) + w.edges.Add(raw) + } + + // Process reoved edges + for _, raw := range oldEdges.List() { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Delete the dependency from the waiter + delete(waiterInfo.deps, dep) + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + + log.Printf( + "[TRACE] dag/walk: removed edge: %q waiting on %q", + VertexName(waiter), VertexName(dep)) + w.edges.Delete(raw) + } + + // For each vertex with changed dependencies, we need to kick off + // a new waiter and notify the vertex of the changes. + for _, raw := range changedDeps.List() { + v := raw.(Vertex) + info, ok := w.vertexMap[v] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Create a new done channel + doneCh := make(chan bool, 1) + + // Create the channel we close for cancellation + cancelCh := make(chan struct{}) + + // Build a new deps copy + deps := make(map[Vertex]<-chan struct{}) + for k, v := range info.deps { + deps[k] = v + } + + // Update the update channel + info.DepsLock.Lock() + if info.DepsUpdateCh != nil { + close(info.DepsUpdateCh) + } + info.DepsCh = doneCh + info.DepsUpdateCh = make(chan struct{}) + info.DepsLock.Unlock() + + // Cancel the older waiter + if info.depsCancelCh != nil { + close(info.depsCancelCh) + } + info.depsCancelCh = cancelCh + + log.Printf( + "[TRACE] dag/walk: dependencies changed for %q, sending new deps", + VertexName(v)) + + // Start the waiter + go w.waitDeps(v, deps, doneCh, cancelCh) + } + + // Start all the new vertices. We do this at the end so that all + // the edge waiters and changes are setup above. + for _, raw := range newVerts.List() { + v := raw.(Vertex) + go w.walkVertex(v, w.vertexMap[v]) + } +} + +// edgeParts returns the waiter and the dependency, in that order. +// The waiter is waiting on the dependency. +func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { + if w.Reverse { + return e.Source(), e.Target() + } + + return e.Target(), e.Source() +} + +// walkVertex walks a single vertex, waiting for any dependencies before +// executing the callback. +func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { + // When we're done executing, lower the waitgroup count + defer w.wait.Done() + + // When we're done, always close our done channel + defer close(info.DoneCh) + + // Wait for our dependencies. We create a [closed] deps channel so + // that we can immediately fall through to load our actual DepsCh. + var depsSuccess bool + var depsUpdateCh chan struct{} + depsCh := make(chan bool, 1) + depsCh <- true + close(depsCh) + for { + select { + case <-info.CancelCh: + // Cancel + return + + case depsSuccess = <-depsCh: + // Deps complete! Mark as nil to trigger completion handling. + depsCh = nil + + case <-depsUpdateCh: + // New deps, reloop + } + + // Check if we have updated dependencies. This can happen if the + // dependencies were satisfied exactly prior to an Update occurring. + // In that case, we'd like to take into account new dependencies + // if possible. + info.DepsLock.Lock() + if info.DepsCh != nil { + depsCh = info.DepsCh + info.DepsCh = nil + } + if info.DepsUpdateCh != nil { + depsUpdateCh = info.DepsUpdateCh + } + info.DepsLock.Unlock() + + // If we still have no deps channel set, then we're done! + if depsCh == nil { + break + } + } + + // If we passed dependencies, we just want to check once more that + // we're not cancelled, since this can happen just as dependencies pass. + select { + case <-info.CancelCh: + // Cancelled during an update while dependencies completed. + return + default: + } + + // Run our callback or note that our upstream failed + var diags tfdiags.Diagnostics + var upstreamFailed bool + if depsSuccess { + log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v)) + diags = w.Callback(v) + } else { + log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) + // This won't be displayed to the user because we'll set upstreamFailed, + // but we need to ensure there's at least one error in here so that + // the failures will cascade downstream. + diags = diags.Append(errors.New("upstream dependencies failed")) + upstreamFailed = true + } + + // Record the result (we must do this after execution because we mustn't + // hold diagsLock while visiting a vertex.) + w.diagsLock.Lock() + if w.diagsMap == nil { + w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) + } + w.diagsMap[v] = diags + if w.upstreamFailed == nil { + w.upstreamFailed = make(map[Vertex]struct{}) + } + if upstreamFailed { + w.upstreamFailed[v] = struct{}{} + } + w.diagsLock.Unlock() +} + +func (w *Walker) waitDeps( + v Vertex, + deps map[Vertex]<-chan struct{}, + doneCh chan<- bool, + cancelCh <-chan struct{}) { + + // For each dependency given to us, wait for it to complete + for dep, depCh := range deps { + DepSatisfied: + for { + select { + case <-depCh: + // Dependency satisfied! + break DepSatisfied + + case <-cancelCh: + // Wait cancelled. Note that we didn't satisfy dependencies + // so that anything waiting on us also doesn't run. + doneCh <- false + return + + case <-time.After(time.Second * 5): + log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", + VertexName(v), VertexName(dep)) + } + } + } + + // Dependencies satisfied! We need to check if any errored + w.diagsLock.Lock() + defer w.diagsLock.Unlock() + for dep := range deps { + if w.diagsMap[dep].HasErrors() { + // One of our dependencies failed, so return false + doneCh <- false + return + } + } + + // All dependencies satisfied and successful + doneCh <- true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go new file mode 100644 index 00000000000..98b159b4739 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go @@ -0,0 +1,123 @@ +package earlyconfig + +import ( + "fmt" + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *tfconfig.Module + + // CallPos is the source position for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallPos tfconfig.SourcePos + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddr string + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// ProviderDependencies returns the provider dependencies for the recieving +// config, including all of its descendent modules. +func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var name string + if len(c.Path) > 0 { + name = c.Path[len(c.Path)-1] + } + + ret := &moduledeps.Module{ + Name: name, + } + + providers := make(moduledeps.Providers) + for name, reqs := range c.Module.RequiredProviders { + inst := moduledeps.ProviderInstance(name) + var constraints version.Constraints + for _, reqStr := range reqs { + if reqStr != "" { + constraint, err := version.NewConstraint(reqStr) + if err != nil { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid provider version constraint", + Detail: fmt.Sprintf("Invalid version constraint %q for provider %s.", reqStr, name), + })) + continue + } + constraints = append(constraints, constraint...) + } + } + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.NewConstraints(constraints), + Reason: moduledeps.ProviderDependencyExplicit, + } + } + ret.Providers = providers + + childNames := make([]string, 0, len(c.Children)) + for name := range c.Children { + childNames = append(childNames, name) + } + sort.Strings(childNames) + + for _, name := range childNames { + child, childDiags := c.Children[name].ProviderDependencies() + ret.Children = append(ret.Children, child) + diags = diags.Append(childDiags) + } + + return ret, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go new file mode 100644 index 00000000000..3707f2738df --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go @@ -0,0 +1,144 @@ +package earlyconfig + +import ( + "fmt" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := map[string]*Config{} + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + var vc version.Constraints + if strings.TrimSpace(call.Version) != "" { + var err error + vc, err = version.NewConstraint(call.Version) + if err != nil { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err), + })) + continue + } + } + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.Source, + VersionConstraints: vc, + Parent: parent, + CallPos: call.Pos, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallPos: call.Pos, + SourceAddr: call.Source, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + diags = diags.Append(modDiags) + + ret[call.Name] = child + } + + return ret, diags +} + +// ModuleRequest is used as part of the ModuleWalker interface used with +// function BuildConfig. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr string + + // VersionConstraint is the version constraint applied to the module in + // configuration. + VersionConstraints version.Constraints + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source position for the header of the "module" block + // in configuration that prompted this request. + CallPos tfconfig.SourcePos +} + +// ModuleWalker is an interface used with BuildConfig. +type ModuleWalker interface { + LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) + +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + return f(req) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go new file mode 100644 index 00000000000..b2e1807eb7a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go @@ -0,0 +1,78 @@ +package earlyconfig + +import ( + "fmt" + + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics { + ret := make(tfdiags.Diagnostics, len(diags)) + for i, diag := range diags { + ret[i] = wrapDiagnostic(diag) + } + return ret +} + +func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic { + return wrappedDiagnostic{ + d: diag, + } +} + +type wrappedDiagnostic struct { + d tfconfig.Diagnostic +} + +func (d wrappedDiagnostic) Severity() tfdiags.Severity { + switch d.d.Severity { + case tfconfig.DiagError: + return tfdiags.Error + case tfconfig.DiagWarning: + return tfdiags.Warning + default: + // Should never happen since there are no other severities + return 0 + } +} + +func (d wrappedDiagnostic) Description() tfdiags.Description { + // Since the inspect library doesn't produce precise source locations, + // we include the position information as part of the error message text. + // See the comment inside method "Source" for more information. + switch { + case d.d.Pos == nil: + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: d.d.Detail, + } + case d.d.Detail != "": + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail), + } + default: + return tfdiags.Description{ + Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line), + } + } +} + +func (d wrappedDiagnostic) Source() tfdiags.Source { + // Since the inspect library is constrained by the lowest common denominator + // between legacy HCL and modern HCL, it only returns ranges at whole-line + // granularity, and that isn't sufficient to populate a tfdiags.Source + // and so we'll just omit ranges altogether and include the line number in + // the Description text. + // + // Callers that want to return nicer errors should consider reacting to + // earlyconfig errors by attempting a follow-up parse with the normal + // config loader, which can produce more precise source location + // information. + return tfdiags.Source{} +} + +func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr { + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go new file mode 100644 index 00000000000..a9cf10f37c2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go @@ -0,0 +1,20 @@ +// Package earlyconfig is a specialized alternative to the top-level "configs" +// package that does only shallow processing of configuration and is therefore +// able to be much more liberal than the full config loader in what it accepts. +// +// In particular, it can accept both current and legacy HCL syntax, and it +// ignores top-level blocks that it doesn't recognize. These two characteristics +// make this package ideal for dependency-checking use-cases so that we are +// more likely to be able to return an error message about an explicit +// incompatibility than to return a less-actionable message about a construct +// not being supported. +// +// However, its liberal approach also means it should be used sparingly. It +// exists primarily for "terraform init", so that it is able to detect +// incompatibilities more robustly when installing dependencies. For most +// other use-cases, use the "configs" and "configs/configload" packages. +// +// Package earlyconfig is a wrapper around the terraform-config-inspect +// codebase, adding to it just some helper functionality for Terraform's own +// use-cases. +package earlyconfig diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go new file mode 100644 index 00000000000..11eff2eb692 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go @@ -0,0 +1,13 @@ +package earlyconfig + +import ( + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// LoadModule loads some top-level metadata for the module in the given +// directory. +func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { + mod, diags := tfconfig.LoadModule(dir) + return mod, wrapDiagnostics(diags) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go new file mode 100644 index 00000000000..1bb7b9f2f96 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go @@ -0,0 +1,152 @@ +package flatmap + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" +) + +// Expand takes a map and a key (prefix) and expands that value into +// a more complex structure. This is the reverse of the Flatten operation. +func Expand(m map[string]string, key string) interface{} { + // If the key is exactly a key in the map, just return it + if v, ok := m[key]; ok { + if v == "true" { + return true + } else if v == "false" { + return false + } + + return v + } + + // Check if the key is an array, and if so, expand the array + if v, ok := m[key+".#"]; ok { + // If the count of the key is unknown, then just put the unknown + // value in the value itself. This will be detected by Terraform + // core later. + if v == hcl2shim.UnknownVariableValue { + return v + } + + return expandArray(m, key) + } + + // Check if this is a prefix in the map + prefix := key + "." + for k := range m { + if strings.HasPrefix(k, prefix) { + return expandMap(m, prefix) + } + } + + return nil +} + +func expandArray(m map[string]string, prefix string) []interface{} { + num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) + if err != nil { + panic(err) + } + + // If the number of elements in this array is 0, then return an + // empty slice as there is nothing to expand. Trying to expand it + // anyway could lead to crashes as any child maps, arrays or sets + // that no longer exist are still shown as empty with a count of 0. + if num == 0 { + return []interface{}{} + } + + // NOTE: "num" is not necessarily accurate, e.g. if a user tampers + // with state, so the following code should not crash when given a + // number of items more or less than what's given in num. The + // num key is mainly just a hint that this is a list or set. + + // The Schema "Set" type stores its values in an array format, but + // using numeric hash values instead of ordinal keys. Take the set + // of keys regardless of value, and expand them in numeric order. + // See GH-11042 for more details. + keySet := map[int]bool{} + computed := map[string]bool{} + for k := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + + key := k[len(prefix)+1:] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + + // skip the count value + if key == "#" { + continue + } + + // strip the computed flag if there is one + if strings.HasPrefix(key, "~") { + key = key[1:] + computed[key] = true + } + + k, err := strconv.Atoi(key) + if err != nil { + panic(err) + } + keySet[int(k)] = true + } + + keysList := make([]int, 0, num) + for key := range keySet { + keysList = append(keysList, key) + } + sort.Ints(keysList) + + result := make([]interface{}, len(keysList)) + for i, key := range keysList { + keyString := strconv.Itoa(key) + if computed[keyString] { + keyString = "~" + keyString + } + result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) + } + + return result +} + +func expandMap(m map[string]string, prefix string) map[string]interface{} { + // Submaps may not have a '%' key, so we can't count on this value being + // here. If we don't have a count, just proceed as if we have have a map. + if count, ok := m[prefix+"%"]; ok && count == "0" { + return map[string]interface{}{} + } + + result := make(map[string]interface{}) + for k := range m { + if !strings.HasPrefix(k, prefix) { + continue + } + + key := k[len(prefix):] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + if _, ok := result[key]; ok { + continue + } + + // skip the map count value + if key == "%" { + continue + } + + result[key] = Expand(m, k[:len(prefix)+len(key)]) + } + + return result +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go new file mode 100644 index 00000000000..9ff6e426526 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go @@ -0,0 +1,71 @@ +package flatmap + +import ( + "fmt" + "reflect" +) + +// Flatten takes a structure and turns into a flat map[string]string. +// +// Within the "thing" parameter, only primitive values are allowed. Structs are +// not supported. Therefore, it can only be slices, maps, primitives, and +// any combination of those together. +// +// See the tests for examples of what inputs are turned into. +func Flatten(thing map[string]interface{}) Map { + result := make(map[string]string) + + for k, raw := range thing { + flatten(result, k, reflect.ValueOf(raw)) + } + + return Map(result) +} + +func flatten(result map[string]string, prefix string, v reflect.Value) { + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + result[prefix] = "true" + } else { + result[prefix] = "false" + } + case reflect.Int: + result[prefix] = fmt.Sprintf("%d", v.Int()) + case reflect.Map: + flattenMap(result, prefix, v) + case reflect.Slice: + flattenSlice(result, prefix, v) + case reflect.String: + result[prefix] = v.String() + default: + panic(fmt.Sprintf("Unknown: %s", v)) + } +} + +func flattenMap(result map[string]string, prefix string, v reflect.Value) { + for _, k := range v.MapKeys() { + if k.Kind() == reflect.Interface { + k = k.Elem() + } + + if k.Kind() != reflect.String { + panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) + } + + flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) + } +} + +func flattenSlice(result map[string]string, prefix string, v reflect.Value) { + prefix = prefix + "." + + result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) + for i := 0; i < v.Len(); i++ { + flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go new file mode 100644 index 00000000000..46b72c4014a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go @@ -0,0 +1,82 @@ +package flatmap + +import ( + "strings" +) + +// Map is a wrapper around map[string]string that provides some helpers +// above it that assume the map is in the format that flatmap expects +// (the result of Flatten). +// +// All modifying functions such as Delete are done in-place unless +// otherwise noted. +type Map map[string]string + +// Contains returns true if the map contains the given key. +func (m Map) Contains(key string) bool { + for _, k := range m.Keys() { + if k == key { + return true + } + } + + return false +} + +// Delete deletes a key out of the map with the given prefix. +func (m Map) Delete(prefix string) { + for k, _ := range m { + match := k == prefix + if !match { + if !strings.HasPrefix(k, prefix) { + continue + } + + if k[len(prefix):len(prefix)+1] != "." { + continue + } + } + + delete(m, k) + } +} + +// Keys returns all of the top-level keys in this map +func (m Map) Keys() []string { + ks := make(map[string]struct{}) + for k, _ := range m { + idx := strings.Index(k, ".") + if idx == -1 { + idx = len(k) + } + + ks[k[:idx]] = struct{}{} + } + + result := make([]string, 0, len(ks)) + for k, _ := range ks { + result = append(result, k) + } + + return result +} + +// Merge merges the contents of the other Map into this one. +// +// This merge is smarter than a simple map iteration because it +// will fully replace arrays and other complex structures that +// are present in this map with the other map's. For example, if +// this map has a 3 element "foo" list, and m2 has a 2 element "foo" +// list, then the result will be that m has a 2 element "foo" +// list. +func (m Map) Merge(m2 Map) { + for _, prefix := range m2.Keys() { + m.Delete(prefix) + + for k, v := range m2 { + if strings.HasPrefix(k, prefix) { + m[k] = v + } + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/decode.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/decode.go new file mode 100644 index 00000000000..f470c9b4bee --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/decode.go @@ -0,0 +1,28 @@ +package config + +import ( + "github.com/mitchellh/mapstructure" +) + +func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) { + var md mapstructure.Metadata + decoderConfig := &mapstructure.DecoderConfig{ + Metadata: &md, + Result: target, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decoderConfig) + if err != nil { + return nil, err + } + + for _, raw := range raws { + err := decoder.Decode(raw) + if err != nil { + return nil, err + } + } + + return &md, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go new file mode 100644 index 00000000000..35a3e7a4989 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go @@ -0,0 +1,214 @@ +package config + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/flatmap" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// Validator is a helper that helps you validate the configuration +// of your resource, resource provider, etc. +// +// At the most basic level, set the Required and Optional lists to be +// specifiers of keys that are required or optional. If a key shows up +// that isn't in one of these two lists, then an error is generated. +// +// The "specifiers" allowed in this is a fairly rich syntax to help +// describe the format of your configuration: +// +// * Basic keys are just strings. For example: "foo" will match the +// "foo" key. +// +// * Nested structure keys can be matched by doing +// "listener.*.foo". This will verify that there is at least one +// listener element that has the "foo" key set. +// +// * The existence of a nested structure can be checked by simply +// doing "listener.*" which will verify that there is at least +// one element in the "listener" structure. This is NOT +// validating that "listener" is an array. It is validating +// that it is a nested structure in the configuration. +// +type Validator struct { + Required []string + Optional []string +} + +func (v *Validator) Validate( + c *terraform.ResourceConfig) (ws []string, es []error) { + // Flatten the configuration so it is easier to reason about + flat := flatmap.Flatten(c.Raw) + + keySet := make(map[string]validatorKey) + for i, vs := range [][]string{v.Required, v.Optional} { + req := i == 0 + for _, k := range vs { + vk, err := newValidatorKey(k, req) + if err != nil { + es = append(es, err) + continue + } + + keySet[k] = vk + } + } + + purged := make([]string, 0) + for _, kv := range keySet { + p, w, e := kv.Validate(flat) + if len(w) > 0 { + ws = append(ws, w...) + } + if len(e) > 0 { + es = append(es, e...) + } + + purged = append(purged, p...) + } + + // Delete all the keys we processed in order to find + // the unknown keys. + for _, p := range purged { + delete(flat, p) + } + + // The rest are unknown + for k, _ := range flat { + es = append(es, fmt.Errorf("Unknown configuration: %s", k)) + } + + return +} + +type validatorKey interface { + // Validate validates the given configuration and returns viewed keys, + // warnings, and errors. + Validate(map[string]string) ([]string, []string, []error) +} + +func newValidatorKey(k string, req bool) (validatorKey, error) { + var result validatorKey + + parts := strings.Split(k, ".") + if len(parts) > 1 && parts[1] == "*" { + result = &nestedValidatorKey{ + Parts: parts, + Required: req, + } + } else { + result = &basicValidatorKey{ + Key: k, + Required: req, + } + } + + return result, nil +} + +// basicValidatorKey validates keys that are basic such as "foo" +type basicValidatorKey struct { + Key string + Required bool +} + +func (v *basicValidatorKey) Validate( + m map[string]string) ([]string, []string, []error) { + for k, _ := range m { + // If we have the exact key its a match + if k == v.Key { + return []string{k}, nil, nil + } + } + + if !v.Required { + return nil, nil, nil + } + + return nil, nil, []error{fmt.Errorf( + "Key not found: %s", v.Key)} +} + +type nestedValidatorKey struct { + Parts []string + Required bool +} + +func (v *nestedValidatorKey) validate( + m map[string]string, + prefix string, + offset int) ([]string, []string, []error) { + if offset >= len(v.Parts) { + // We're at the end. Look for a specific key. + v2 := &basicValidatorKey{Key: prefix, Required: v.Required} + return v2.Validate(m) + } + + current := v.Parts[offset] + + // If we're at offset 0, special case to start at the next one. + if offset == 0 { + return v.validate(m, current, offset+1) + } + + // Determine if we're doing a "for all" or a specific key + if current != "*" { + // We're looking at a specific key, continue on. + return v.validate(m, prefix+"."+current, offset+1) + } + + // We're doing a "for all", so we loop over. + countStr, ok := m[prefix+".#"] + if !ok { + if !v.Required { + // It wasn't required, so its no problem. + return nil, nil, nil + } + + return nil, nil, []error{fmt.Errorf( + "Key not found: %s", prefix)} + } + + count, err := strconv.ParseInt(countStr, 0, 0) + if err != nil { + // This shouldn't happen if flatmap works properly + panic("invalid flatmap array") + } + + var e []error + var w []string + u := make([]string, 1, count+1) + u[0] = prefix + ".#" + for i := 0; i < int(count); i++ { + prefix := fmt.Sprintf("%s.%d", prefix, i) + + // Mark that we saw this specific key + u = append(u, prefix) + + // Mark all prefixes of this + for k, _ := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + u = append(u, k) + } + + // If we have more parts, then validate deeper + if offset+1 < len(v.Parts) { + u2, w2, e2 := v.validate(m, prefix, offset+1) + + u = append(u, u2...) + w = append(w, w2...) + e = append(e, e2...) + } + } + + return u, w, e +} + +func (v *nestedValidatorKey) Validate( + m map[string]string) ([]string, []string, []error) { + return v.validate(m, "", 0) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go new file mode 100644 index 00000000000..54899bc6522 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go @@ -0,0 +1,24 @@ +package didyoumean + +import ( + "github.com/agext/levenshtein" +) + +// NameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func NameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go new file mode 100644 index 00000000000..82b5937bfe2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go @@ -0,0 +1,6 @@ +// Package plugin contains types and functions to help Terraform plugins +// implement the plugin rpc interface. +// The primary Provider type will be responsible for converting from the grpc +// wire protocol to the types and methods known to the provider +// implementations. +package plugin diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go new file mode 100644 index 00000000000..388f1ed59d9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go @@ -0,0 +1,1398 @@ +package plugin + +import ( + "encoding/json" + "fmt" + "log" + "strconv" + + "github.com/zclconf/go-cty/cty" + ctyconvert "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/msgpack" + context "golang.org/x/net/context" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +const newExtraKey = "_new_extra_shim" + +// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a +// proto.ProviderServer implementation. If the provided provider is not a +// *schema.Provider, this will return nil, +func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer { + sp, ok := p.(*schema.Provider) + if !ok { + return nil + } + + return &GRPCProviderServer{ + provider: sp, + } +} + +// GRPCProviderServer handles the server, or plugin side of the rpc connection. +type GRPCProviderServer struct { + provider *schema.Provider +} + +func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) { + // Here we are certain that the provider is being called through grpc, so + // make sure the feature flag for helper/schema is set + schema.SetProto5() + + resp := &proto.GetProviderSchema_Response{ + ResourceSchemas: make(map[string]*proto.Schema), + DataSourceSchemas: make(map[string]*proto.Schema), + } + + resp.Provider = &proto.Schema{ + Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), + } + + for typ, res := range s.provider.ResourcesMap { + resp.ResourceSchemas[typ] = &proto.Schema{ + Version: int64(res.SchemaVersion), + Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), + } + } + + for typ, dat := range s.provider.DataSourcesMap { + resp.DataSourceSchemas[typ] = &proto.Schema{ + Version: int64(dat.SchemaVersion), + Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), + } + } + + return resp, nil +} + +func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { + return schema.InternalMap(s.provider.Schema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { + res := s.provider.ResourcesMap[name] + return res.CoreConfigSchema() +} + +func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block { + dat := s.provider.DataSourcesMap[name] + return dat.CoreConfigSchema() +} + +func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) { + resp := &proto.PrepareProviderConfig_Response{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := s.provider.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return resp, nil + } + + configVal, err = schemaBlock.CoerceValue(configVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + warns, errs := s.provider.Validate(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP} + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) { + resp := &proto.ValidateResourceTypeConfig_Response{} + + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + warns, errs := s.provider.ValidateResource(req.TypeName, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) { + resp := &proto.ValidateDataSourceConfig_Response{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + warns, errs := s.provider.ValidateDataSource(req.TypeName, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { + resp := &proto.UpgradeResourceState_Response{} + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + version := int(req.Version) + + jsonMap := map[string]interface{}{} + var err error + + switch { + // We first need to upgrade a flatmap state if it exists. + // There should never be both a JSON and Flatmap state in the request. + case len(req.RawState.Flatmap) > 0: + jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // if there's a JSON state, we need to decode it. + case len(req.RawState.Json) > 0: + err = json.Unmarshal(req.RawState.Json, &jsonMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + default: + log.Println("[DEBUG] no state provided to upgrade") + return resp, nil + } + + // complete the upgrade of the JSON states + jsonMap, err = s.upgradeJSONState(version, jsonMap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // The provider isn't required to clean out removed fields + s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) + + // now we need to turn the state into the default json representation, so + // that it can be re-decoded using the actual schema. + val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to make sure blocks are represented correctly, which means + // that missing blocks are empty collections, rather than null. + // First we need to CoerceValue to ensure that all object types match. + val, err = schemaBlock.CoerceValue(val) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // Normalize the value and fill in any missing blocks. + val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock) + + // encode the final state to the expected msgpack format + newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP} + return resp, nil +} + +// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate +// state if necessary, and converts it to the new JSON state format decoded as a +// map[string]interface{}. +// upgradeFlatmapState returns the json map along with the corresponding schema +// version. +func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { + // this will be the version we've upgraded so, defaulting to the given + // version in case no migration was called. + upgradedVersion := version + + // first determine if we need to call the legacy MigrateState func + requiresMigrate := version < res.SchemaVersion + + schemaType := res.CoreConfigSchema().ImpliedType() + + // if there are any StateUpgraders, then we need to only compare + // against the first version there + if len(res.StateUpgraders) > 0 { + requiresMigrate = version < res.StateUpgraders[0].Version + } + + if requiresMigrate && res.MigrateState == nil { + // Providers were previously allowed to bump the version + // without declaring MigrateState. + // If there are further upgraders, then we've only updated that far. + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else if requiresMigrate { + is := &terraform.InstanceState{ + ID: m["id"], + Attributes: m, + Meta: map[string]interface{}{ + "schema_version": strconv.Itoa(version), + }, + } + + is, err := res.MigrateState(version, is, s.provider.Meta()) + if err != nil { + return nil, 0, err + } + + // re-assign the map in case there was a copy made, making sure to keep + // the ID + m := is.Attributes + m["id"] = is.ID + + // if there are further upgraders, then we've only updated that far + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else { + // the schema version may be newer than the MigrateState functions + // handled and older than the current, but still stored in the flatmap + // form. If that's the case, we need to find the correct schema type to + // convert the state. + for _, upgrader := range res.StateUpgraders { + if upgrader.Version == version { + schemaType = upgrader.Type + break + } + } + } + + // now we know the state is up to the latest version that handled the + // flatmap format state. Now we can upgrade the format and continue from + // there. + newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) + if err != nil { + return nil, 0, err + } + + jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType) + return jsonMap, upgradedVersion, err +} + +func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { + var err error + + for _, upgrader := range res.StateUpgraders { + if version != upgrader.Version { + continue + } + + m, err = upgrader.Upgrade(m, s.provider.Meta()) + if err != nil { + return nil, err + } + version++ + } + + return m, nil +} + +// Remove any attributes no longer present in the schema, so that the json can +// be correctly decoded. +func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { + // we're only concerned with finding maps that corespond to object + // attributes + switch v := v.(type) { + case []interface{}: + // If these aren't blocks the next call will be a noop + if ty.IsListType() || ty.IsSetType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + } + return + case map[string]interface{}: + // map blocks aren't yet supported, but handle this just in case + if ty.IsMapType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + return + } + + if ty == cty.DynamicPseudoType { + log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) + return + } + + if !ty.IsObjectType() { + // This shouldn't happen, and will fail to decode further on, so + // there's no need to handle it here. + log.Printf("[WARN] unexpected type %#v for map in json state", ty) + return + } + + attrTypes := ty.AttributeTypes() + for attr, attrV := range v { + attrTy, ok := attrTypes[attr] + if !ok { + log.Printf("[DEBUG] attribute %q no longer present in schema", attr) + delete(v, attr) + continue + } + + s.removeAttributes(attrV, attrTy) + } + } +} + +func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) { + resp := &proto.Stop_Response{} + + err := s.provider.Stop() + if err != nil { + resp.Error = err.Error() + } + + return resp, nil +} + +func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { + resp := &proto.Configure_Response{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + s.provider.TerraformVersion = req.TerraformVersion + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + err = s.provider.Configure(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + + return resp, nil +} + +func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { + resp := &proto.ReadResource_Response{ + // helper/schema did previously handle private data during refresh, but + // core is now going to expect this to be maintained in order to + // persist it in the state. + Private: req.Private, + } + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + instanceState, err := res.ShimInstanceStateFromValue(stateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.Private) > 0 { + if err := json.Unmarshal(req.Private, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + instanceState.Meta = private + + newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + if newInstanceState == nil || newInstanceState.ID == "" { + // The old provider API used an empty id to signal that the remote + // object appears to have been deleted, but our new protocol expects + // to see a null value (in the cty sense) in that case. + newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // helper/schema should always copy the ID over, but do it again just to be safe + newInstanceState.Attributes["id"] = newInstanceState.ID + + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, stateVal, false) + newStateVal = copyTimeoutValues(newStateVal, stateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + return resp, nil +} + +func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { + resp := &proto.PlanResourceChange_Response{} + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + create := priorStateVal.IsNull() + + proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // We don't usually plan destroys, but this can return early in any case. + if proposedNewStateVal.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorPrivate := make(map[string]interface{}) + if len(req.PriorPrivate) > 0 { + if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + priorState.Meta = priorPrivate + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // turn the proposed state into a legacy configuration + cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) + + diff, err := s.provider.SimpleDiff(info, priorState, cfg) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // if this is a new instance, we need to make sure ID is going to be computed + if create { + if diff == nil { + diff = terraform.NewInstanceDiff() + } + + diff.Attributes["id"] = &terraform.ResourceAttrDiff{ + NewComputed: true, + } + } + + if diff == nil || len(diff.Attributes) == 0 { + // schema.Provider.Diff returns nil if it ends up making a diff with no + // changes, but our new interface wants us to return an actual change + // description that _shows_ there are no changes. This is always the + // prior state, because we force a diff above if this is a new instance. + resp.PlannedState = req.PriorState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + if priorState == nil { + priorState = &terraform.InstanceState{} + } + + // now we need to apply the diff to the prior state, so get the planned state + plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) + + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) + + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) + + // The old SDK code has some imprecisions that cause it to sometimes + // generate differences that the SDK itself does not consider significant + // but Terraform Core would. To avoid producing weird do-nothing diffs + // in that case, we'll check if the provider as produced something we + // think is "equivalent" to the prior state and just return the prior state + // itself if so, thus ensuring that Terraform Core will treat this as + // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its + // accuracy. + forceNoChanges := false + if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { + plannedStateVal = priorStateVal + forceNoChanges = true + } + + // if this was creating the resource, we need to set any remaining computed + // fields + if create { + plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock) + } + + plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedState = &proto.DynamicValue{ + Msgpack: plannedMP, + } + + // encode any timeouts into the diff Meta + t := &schema.ResourceTimeout{} + if err := t.ConfigDecode(res, cfg); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + if err := t.DiffEncode(diff); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to store any NewExtra values, which are where any actual + // StateFunc modified config fields are hidden. + privateMap := diff.Meta + if privateMap == nil { + privateMap = map[string]interface{}{} + } + + newExtra := map[string]interface{}{} + + for k, v := range diff.Attributes { + if v.NewExtra != nil { + newExtra[k] = v.NewExtra + } + } + privateMap[newExtraKey] = newExtra + + // the Meta field gets encoded into PlannedPrivate + plannedPrivate, err := json.Marshal(privateMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedPrivate = plannedPrivate + + // collect the attributes that require instance replacement, and convert + // them to cty.Paths. + var requiresNew []string + if !forceNoChanges { + for attr, d := range diff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + } + + // If anything requires a new resource already, or the "id" field indicates + // that we will be creating a new resource, then we need to add that to + // RequiresReplace so that core can tell if the instance is being replaced + // even if changes are being suppressed via "ignore_changes". + id := plannedStateVal.GetAttr("id") + if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { + requiresNew = append(requiresNew, "id") + } + + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // convert these to the protocol structures + for _, p := range requiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { + resp := &proto.ApplyResourceChange_Response{ + // Start with the existing state as a fallback + NewState: req.PriorState, + } + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.PlannedPrivate) > 0 { + if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + var diff *terraform.InstanceDiff + destroy := false + + // a null state means we are destroying the instance + if plannedStateVal.IsNull() { + destroy = true + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + Destroy: true, + } + } else { + diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res)) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + if diff == nil { + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + } + } + + // add NewExtra Fields that may have been stored in the private data + if newExtra := private[newExtraKey]; newExtra != nil { + for k, v := range newExtra.(map[string]interface{}) { + d := diff.Attributes[k] + + if d == nil { + d = &terraform.ResourceAttrDiff{} + } + + d.NewExtra = v + diff.Attributes[k] = d + } + } + + if private != nil { + diff.Meta = private + } + + for k, d := range diff.Attributes { + // We need to turn off any RequiresNew. There could be attributes + // without changes in here inserted by helper/schema, but if they have + // RequiresNew then the state will be dropped from the ResourceData. + d.RequiresNew = false + + // Check that any "removed" attributes that don't actually exist in the + // prior state, or helper/schema will confuse itself + if d.NewRemoved { + if _, ok := priorState.Attributes[k]; !ok { + delete(diff.Attributes, k) + } + } + } + + newInstanceState, err := s.provider.Apply(info, priorState, diff) + // we record the error here, but continue processing any returned state. + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + newStateVal := cty.NullVal(schemaBlock.ImpliedType()) + + // Always return a null value for destroy. + // While this is usually indicated by a nil state, check for missing ID or + // attributes in the case of a provider failure. + if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // We keep the null val if we destroyed the resource, otherwise build the + // entire object, even if the new state was nil. + newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true) + + newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + meta, err := json.Marshal(newInstanceState.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.Private = meta + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + return resp, nil +} + +func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { + resp := &proto.ImportResourceState_Response{} + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + newInstanceStates, err := s.provider.ImportState(info, req.Id) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, is := range newInstanceStates { + // copy the ID again just to be sure it wasn't missed + is.Attributes["id"] = is.ID + + resourceType := is.Ephemeral.Type + if resourceType == "" { + resourceType = req.TypeName + } + + schemaBlock := s.getResourceSchemaBlock(resourceType) + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Normalize the value and fill in any missing blocks. + newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + meta, err := json.Marshal(is.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + importedResource := &proto.ImportResourceState_ImportedResource{ + TypeName: resourceType, + State: &proto.DynamicValue{ + Msgpack: newStateMP, + }, + Private: meta, + } + + resp.ImportedResources = append(resp.ImportedResources, importedResource) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { + resp := &proto.ReadDataSource_Response{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + // we need to still build the diff separately with the Read method to match + // the old behavior + diff, err := s.provider.ReadDataDiff(info, config) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // now we can get the new complete data source + newInstanceState, err := s.provider.ReadDataApply(info, diff) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = copyTimeoutValues(newStateVal, configVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.State = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil +} + +func pathToAttributePath(path cty.Path) *proto.AttributePath { + var steps []*proto.AttributePath_Step + + for _, step := range path { + switch s := step.(type) { + case cty.GetAttrStep: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: s.Name, + }, + }) + case cty.IndexStep: + ty := s.Key.Type() + switch ty { + case cty.Number: + i, _ := s.Key.AsBigFloat().Int64() + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: i, + }, + }) + case cty.String: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: s.Key.AsString(), + }, + }) + } + } + } + + return &proto.AttributePath{Steps: steps} +} + +// helper/schema throws away timeout values from the config and stores them in +// the Private/Meta fields. we need to copy those values into the planned state +// so that core doesn't see a perpetual diff with the timeout block. +func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { + // if `to` is null we are planning to remove it altogether. + if to.IsNull() { + return to + } + toAttrs := to.AsValueMap() + // We need to remove the key since the hcl2shims will add a non-null block + // because we can't determine if a single block was null from the flatmapped + // values. This needs to conform to the correct schema for marshaling, so + // change the value to null rather than deleting it from the object map. + timeouts, ok := toAttrs[schema.TimeoutsConfigKey] + if ok { + toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) + } + + // if from is null then there are no timeouts to copy + if from.IsNull() { + return cty.ObjectVal(toAttrs) + } + + fromAttrs := from.AsValueMap() + timeouts, ok = fromAttrs[schema.TimeoutsConfigKey] + + // timeouts shouldn't be unknown, but don't copy possibly invalid values either + if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { + // no timeouts block to copy + return cty.ObjectVal(toAttrs) + } + + toAttrs[schema.TimeoutsConfigKey] = timeouts + + return cty.ObjectVal(toAttrs) +} + +// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all +// StateFuncs and CustomizeDiffs removed. This will be used during apply to +// create a diff from a planned state where the diff modifications have already +// been applied. +func stripResourceModifiers(r *schema.Resource) *schema.Resource { + if r == nil { + return nil + } + // start with a shallow copy + newResource := new(schema.Resource) + *newResource = *r + + newResource.CustomizeDiff = nil + newResource.Schema = map[string]*schema.Schema{} + + for k, s := range r.Schema { + newResource.Schema[k] = stripSchema(s) + } + + return newResource +} + +func stripSchema(s *schema.Schema) *schema.Schema { + if s == nil { + return nil + } + // start with a shallow copy + newSchema := new(schema.Schema) + *newSchema = *s + + newSchema.StateFunc = nil + + switch e := newSchema.Elem.(type) { + case *schema.Schema: + newSchema.Elem = stripSchema(e) + case *schema.Resource: + newSchema.Elem = stripResourceModifiers(e) + } + + return newSchema +} + +// Zero values and empty containers may be interchanged by the apply process. +// When there is a discrepency between src and dst value being null or empty, +// prefer the src value. This takes a little more liberty with set types, since +// we can't correlate modified set values. In the case of sets, if the src set +// was wholly known we assume the value was correctly applied and copy that +// entirely to the new value. +// While apply prefers the src value, during plan we prefer dst whenever there +// is an unknown or a set is involved, since the plan can alter the value +// however it sees fit. This however means that a CustomizeDiffFunction may not +// be able to change a null to an empty value or vice versa, but that should be +// very uncommon nor was it reliable before 0.12 either. +func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { + ty := dst.Type() + if !src.IsNull() && !src.IsKnown() { + // Return src during plan to retain unknown interpolated placeholders, + // which could be lost if we're only updating a resource. If this is a + // read scenario, then there shouldn't be any unknowns at all. + if dst.IsNull() && !apply { + return src + } + return dst + } + + // Handle null/empty changes for collections during apply. + // A change between null and empty values prefers src to make sure the state + // is consistent between plan and apply. + if ty.IsCollectionType() && apply { + dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0 + srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0 + + if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) { + return src + } + } + + // check the invariants that we need below, to ensure we are working with + // non-null and known values. + if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { + return dst + } + + switch { + case ty.IsMapType(), ty.IsObjectType(): + var dstMap map[string]cty.Value + if !dst.IsNull() { + dstMap = dst.AsValueMap() + } + if dstMap == nil { + dstMap = map[string]cty.Value{} + } + + srcMap := src.AsValueMap() + for key, v := range srcMap { + dstVal, ok := dstMap[key] + if !ok && apply && ty.IsMapType() { + // don't transfer old map values to dst during apply + continue + } + + if dstVal == cty.NilVal { + if !apply && ty.IsMapType() { + // let plan shape this map however it wants + continue + } + dstVal = cty.NullVal(v.Type()) + } + + dstMap[key] = normalizeNullValues(dstVal, v, apply) + } + + // you can't call MapVal/ObjectVal with empty maps, but nothing was + // copied in anyway. If the dst is nil, and the src is known, assume the + // src is correct. + if len(dstMap) == 0 { + if dst.IsNull() && src.IsWhollyKnown() && apply { + return src + } + return dst + } + + if ty.IsMapType() { + // helper/schema will populate an optional+computed map with + // unknowns which we have to fixup here. + // It would be preferable to simply prevent any known value from + // becoming unknown, but concessions have to be made to retain the + // broken legacy behavior when possible. + for k, srcVal := range srcMap { + if !srcVal.IsNull() && srcVal.IsKnown() { + dstVal, ok := dstMap[k] + if !ok { + continue + } + + if !dstVal.IsNull() && !dstVal.IsKnown() { + dstMap[k] = srcVal + } + } + } + + return cty.MapVal(dstMap) + } + + return cty.ObjectVal(dstMap) + + case ty.IsSetType(): + // If the original was wholly known, then we expect that is what the + // provider applied. The apply process loses too much information to + // reliably re-create the set. + if src.IsWhollyKnown() && apply { + return src + } + + case ty.IsListType(), ty.IsTupleType(): + // If the dst is null, and the src is known, then we lost an empty value + // so take the original. + if dst.IsNull() { + if src.IsWhollyKnown() && src.LengthInt() == 0 && apply { + return src + } + + // if dst is null and src only contains unknown values, then we lost + // those during a read or plan. + if !apply && !src.IsNull() { + allUnknown := true + for _, v := range src.AsValueSlice() { + if v.IsKnown() { + allUnknown = false + break + } + } + if allUnknown { + return src + } + } + + return dst + } + + // if the lengths are identical, then iterate over each element in succession. + srcLen := src.LengthInt() + dstLen := dst.LengthInt() + if srcLen == dstLen && srcLen > 0 { + srcs := src.AsValueSlice() + dsts := dst.AsValueSlice() + + for i := 0; i < srcLen; i++ { + dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply) + } + + if ty.IsTupleType() { + return cty.TupleVal(dsts) + } + return cty.ListVal(dsts) + } + + case ty == cty.String: + // The legacy SDK should not be able to remove a value during plan or + // apply, however we are only going to overwrite this if the source was + // an empty string, since that is what is often equated with unset and + // lost in the diff process. + if dst.IsNull() && src.AsString() == "" { + return src + } + } + + return dst +} + +// validateConfigNulls checks a config value for unsupported nulls before +// attempting to shim the value. While null values can mostly be ignored in the +// configuration, since they're not supported in HCL1, the case where a null +// appears in a list-like attribute (list, set, tuple) will present a nil value +// to helper/schema which can panic. Return an error to the user in this case, +// indicating the attribute with the null value. +func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic { + var diags []*proto.Diagnostic + if v.IsNull() || !v.IsKnown() { + return diags + } + + switch { + case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + if ev.IsNull() { + // if this is a set, the kv is also going to be null which + // isn't a valid path element, so we can't append it to the + // diagnostic. + p := path + if !kv.IsNull() { + p = append(p, cty.IndexStep{Key: kv}) + } + + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "Null value found in list", + Detail: "Null values are not allowed for this attribute value.", + Attribute: convert.PathToAttributePath(p), + }) + continue + } + + d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) + diags = convert.AppendProtoDiag(diags, d) + } + + case v.Type().IsMapType() || v.Type().IsObjectType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + var step cty.PathStep + switch { + case v.Type().IsMapType(): + step = cty.IndexStep{Key: kv} + case v.Type().IsObjectType(): + step = cty.GetAttrStep{Name: kv.AsString()} + } + d := validateConfigNulls(ev, append(path, step)) + diags = convert.AppendProtoDiag(diags, d) + } + } + + return diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go new file mode 100644 index 00000000000..a22a264fa0a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go @@ -0,0 +1,131 @@ +package plugin + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// SetUnknowns takes a cty.Value, and compares it to the schema setting any null +// values which are computed to unknown. +func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { + if !val.IsKnown() { + return val + } + + // If the object was null, we still need to handle the top level attributes + // which might be computed, but we don't need to expand the blocks. + if val.IsNull() { + objMap := map[string]cty.Value{} + allNull := true + for name, attr := range schema.Attributes { + switch { + case attr.Computed: + objMap[name] = cty.UnknownVal(attr.Type) + allNull = false + default: + objMap[name] = cty.NullVal(attr.Type) + } + } + + // If this object has no unknown attributes, then we can leave it null. + if allNull { + return val + } + + return cty.ObjectVal(objMap) + } + + valMap := val.AsValueMap() + newVals := make(map[string]cty.Value) + + for name, attr := range schema.Attributes { + v := valMap[name] + + if attr.Computed && v.IsNull() { + newVals[name] = cty.UnknownVal(attr.Type) + continue + } + + newVals[name] = v + } + + for name, blockS := range schema.BlockTypes { + blockVal := valMap[name] + if blockVal.IsNull() || !blockVal.IsKnown() { + newVals[name] = blockVal + continue + } + + blockValType := blockVal.Type() + blockElementType := blockS.Block.ImpliedType() + + // This switches on the value type here, so we can correctly switch + // between Tuples/Lists and Maps/Objects. + switch { + case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: + // NestingSingle is the only exception here, where we treat the + // block directly as an object + newVals[name] = SetUnknowns(blockVal, &blockS.Block) + + case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): + listVals := blockVal.AsValueSlice() + newListVals := make([]cty.Value, 0, len(listVals)) + + for _, v := range listVals { + newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) + } + + switch { + case blockValType.IsSetType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.SetValEmpty(blockElementType) + default: + newVals[name] = cty.SetVal(newListVals) + } + case blockValType.IsListType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.ListValEmpty(blockElementType) + default: + newVals[name] = cty.ListVal(newListVals) + } + case blockValType.IsTupleType(): + newVals[name] = cty.TupleVal(newListVals) + } + + case blockValType.IsMapType(), blockValType.IsObjectType(): + mapVals := blockVal.AsValueMap() + newMapVals := make(map[string]cty.Value) + + for k, v := range mapVals { + newMapVals[k] = SetUnknowns(v, &blockS.Block) + } + + switch { + case blockValType.IsMapType(): + switch len(newMapVals) { + case 0: + newVals[name] = cty.MapValEmpty(blockElementType) + default: + newVals[name] = cty.MapVal(newMapVals) + } + case blockValType.IsObjectType(): + if len(newMapVals) == 0 { + // We need to populate empty values to make a valid object. + for attr, ty := range blockElementType.AttributeTypes() { + newMapVals[attr] = cty.NullVal(ty) + } + } + newVals[name] = cty.ObjectVal(newMapVals) + } + + default: + panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) + } + } + + return cty.ObjectVal(newVals) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go new file mode 100644 index 00000000000..ad8d626c66d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go @@ -0,0 +1,53 @@ +package httpclient + +import ( + "fmt" + "log" + "net/http" + "os" + "strings" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +const uaEnvVar = "TF_APPEND_USER_AGENT" +const userAgentFormat = "Terraform/%s" + +// New returns the DefaultPooledClient from the cleanhttp +// package that will also send a Terraform User-Agent string. +func New() *http.Client { + cli := cleanhttp.DefaultPooledClient() + cli.Transport = &userAgentRoundTripper{ + userAgent: UserAgentString(), + inner: cli.Transport, + } + return cli +} + +type userAgentRoundTripper struct { + inner http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + log.Printf("[TRACE] HTTP client %s request to %s", req.Method, req.URL.String()) + return rt.inner.RoundTrip(req) +} + +func UserAgentString() string { + ua := fmt.Sprintf(userAgentFormat, version.Version) + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go new file mode 100644 index 00000000000..7096ff74f82 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go @@ -0,0 +1,125 @@ +package initwd + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go new file mode 100644 index 00000000000..b9d938dbb09 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go @@ -0,0 +1,7 @@ +// Package initwd contains various helper functions used by the "terraform init" +// command to initialize a working directory. +// +// These functions may also be used from testing code to simulate the behaviors +// of "terraform init" against test fixtures, but should not be used elsewhere +// in the main code. +package initwd diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go new file mode 100644 index 00000000000..641e71dec8a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go @@ -0,0 +1,363 @@ +package initwd + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +const initFromModuleRootCallName = "root" +const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "." + +// DirFromModule populates the given directory (which must exist and be +// empty) with the contents of the module at the given source address. +// +// It does this by installing the given module and all of its descendent +// modules in a temporary root directory and then copying the installed +// files into suitable locations. As a consequence, any diagnostics it +// generates will reveal the location of this temporary directory to the +// user. +// +// This rather roundabout installation approach is taken to ensure that +// installation proceeds in a manner identical to normal module installation. +// +// If the given source address specifies a sub-directory of the given +// package then only the sub-directory and its descendents will be copied +// into the given root directory, which will cause any relative module +// references using ../ from that module to be unresolvable. Error diagnostics +// are produced in that case, to prompt the user to rewrite the source strings +// to be absolute references to the original remote module. +func DirFromModule(rootDir, modulesDir, sourceAddr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // The way this function works is pretty ugly, but we accept it because + // -from-module is a less important case than normal module installation + // and so it's better to keep this ugly complexity out here rather than + // adding even more complexity to the normal module installer. + + // The target directory must exist but be empty. + { + entries, err := ioutil.ReadDir(rootDir) + if err != nil { + if os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Target directory does not exist", + fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read target directory", + fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err), + )) + } + return diags + } + haveEntries := false + for _, entry := range entries { + if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" { + continue + } + haveEntries = true + } + if haveEntries { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't populate non-empty directory", + fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir), + )) + return diags + } + } + + instDir := filepath.Join(rootDir, ".terraform/init-from-module") + inst := NewModuleInstaller(instDir, reg) + log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr) + os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too + err := os.MkdirAll(instDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create temporary directory", + fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err), + )) + return diags + } + + instManifest := make(modsdir.Manifest) + retManifest := make(modsdir.Manifest) + + fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr) + fakePos := tfconfig.SourcePos{ + Filename: fakeFilename, + Line: 1, + } + + // -from-module allows relative paths but it's different than a normal + // module address where it'd be resolved relative to the module call + // (which is synthetic, here.) To address this, we'll just patch up any + // relative paths to be absolute paths before we run, ensuring we'll + // get the right result. This also, as an important side-effect, ensures + // that the result will be "downloaded" with go-getter (copied from the + // source location), rather than just recorded as a relative path. + { + maybePath := filepath.ToSlash(sourceAddr) + if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") { + if wd, err := os.Getwd(); err == nil { + sourceAddr = filepath.Join(wd, sourceAddr) + log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr) + } + } + } + + // Now we need to create an artificial root module that will seed our + // installation process. + fakeRootModule := &tfconfig.Module{ + ModuleCalls: map[string]*tfconfig.ModuleCall{ + initFromModuleRootCallName: { + Name: initFromModuleRootCallName, + Source: sourceAddr, + Pos: fakePos, + }, + }, + } + + // wrapHooks filters hook notifications to only include Download calls + // and to trim off the initFromModuleRootCallName prefix. We'll produce + // our own Install notifications directly below. + wrapHooks := installHooksInitDir{ + Wrapped: hooks, + } + getter := reusingGetter{} + _, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter) + diags = append(diags, instDiags...) + if instDiags.HasErrors() { + return diags + } + + // If all of that succeeded then we'll now migrate what was installed + // into the final directory structure. + err = os.MkdirAll(modulesDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create local modules directory", + fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err), + )) + return diags + } + + recordKeys := make([]string, 0, len(instManifest)) + for k := range instManifest { + recordKeys = append(recordKeys, k) + } + sort.Strings(recordKeys) + + for _, recordKey := range recordKeys { + record := instManifest[recordKey] + + if record.Key == initFromModuleRootCallName { + // We've found the module the user requested, which we must + // now copy into rootDir so it can be used directly. + log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir) + err := copyDir(rootDir, record.Dir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy root module", + fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err), + )) + continue + } + + // We'll try to load the newly-copied module here just so we can + // sniff for any module calls that ../ out of the root directory + // and must thus be rewritten to be absolute addresses again. + // For now we can't do this rewriting automatically, but we'll + // generate an error to help the user do it manually. + mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway + if mod != nil { + for _, mc := range mod.ModuleCalls { + if pathTraversesUp(mc.Source) { + packageAddr, givenSubdir := splitAddrSubdir(sourceAddr) + newSubdir := filepath.Join(givenSubdir, mc.Source) + if pathTraversesUp(newSubdir) { + // This should never happen in any reasonable + // configuration since this suggests a path that + // traverses up out of the package root. We'll just + // ignore this, since we'll fail soon enough anyway + // trying to resolve this path when this module is + // loaded. + continue + } + + var newAddr = packageAddr + if newSubdir != "" { + newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir)) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Root module references parent directory", + fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr), + )) + continue + } + } + } + + retManifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + continue + } + + if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) { + // Ignore the *real* root module, whose key is empty, since + // we're only interested in the module named "root" and its + // descendents. + continue + } + + newKey := record.Key[len(initFromModuleRootKeyPrefix):] + instPath := filepath.Join(modulesDir, newKey) + tempPath := filepath.Join(instDir, record.Key) + + // tempPath won't be present for a module that was installed from + // a relative path, so in that case we just record the installation + // directory and assume it was already copied into place as part + // of its parent. + if _, err := os.Stat(tempPath); err != nil { + if !os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to stat temporary module install directory", + fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + var parentKey string + if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 { + parentKey = newKey[:lastDot] + } else { + parentKey = "" // parent is the root module + } + + parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey] + parentNew := retManifest[parentKey] + + // We need to figure out which portion of our directory is the + // parent package path and which portion is the subdirectory + // under that. + baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir) + if err != nil { + // Should never happen, because we constructed both directories + // from the same base and so they must have a common prefix. + panic(err) + } + + newDir := filepath.Join(parentNew.Dir, baseDirRel) + log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir) + newRecord := record // shallow copy + newRecord.Dir = newDir + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + continue + } + + err = os.MkdirAll(instPath, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create module install directory", + fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + // We copy rather than "rename" here because renaming between directories + // can be tricky in edge-cases like network filesystems, etc. + log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath) + err := copyDir(instPath, tempPath) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy descendent module", + fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err), + )) + continue + } + + subDir, err := filepath.Rel(tempPath, record.Dir) + if err != nil { + // Should never happen, because we constructed both directories + // from the same base and so they must have a common prefix. + panic(err) + } + + newRecord := record // shallow copy + newRecord.Dir = filepath.Join(instPath, subDir) + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + } + + retManifest.WriteSnapshotToDir(modulesDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write module manifest", + fmt.Sprintf("Error writing module manifest: %s.", err), + )) + } + + if !diags.HasErrors() { + // Try to clean up our temporary directory, but don't worry if we don't + // succeed since it shouldn't hurt anything. + os.RemoveAll(instDir) + } + + return diags +} + +func pathTraversesUp(path string) bool { + return strings.HasPrefix(filepath.ToSlash(path), "../") +} + +// installHooksInitDir is an adapter wrapper for an InstallHooks that +// does some fakery to make downloads look like they are happening in their +// final locations, rather than in the temporary loader we use. +// +// It also suppresses "Install" calls entirely, since InitDirFromModule +// does its own installation steps after the initial installation pass +// has completed. +type installHooksInitDir struct { + Wrapped ModuleInstallHooks + ModuleInstallHooksImpl +} + +func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) { + if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) { + // We won't announce the root module, since hook implementations + // don't expect to see that and the caller will usually have produced + // its own user-facing notification about what it's doing anyway. + return + } + + trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):] + h.Wrapped.Download(trimAddr, packageAddr, version) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go new file mode 100644 index 00000000000..8dc0374b1e8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go @@ -0,0 +1,204 @@ +package initwd + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" +) + +// We configure our own go-getter detector and getter sets here, because +// the set of sources we support is part of Terraform's documentation and +// so we don't want any new sources introduced in go-getter to sneak in here +// and work even though they aren't documented. This also insulates us from +// any meddling that might be done by other go-getter callers linked into our +// executable. + +var goGetterNoDetectors = []getter.Detector{} + +var goGetterDecompressors = map[string]getter.Decompressor{ + "bz2": new(getter.Bzip2Decompressor), + "gz": new(getter.GzipDecompressor), + "xz": new(getter.XzDecompressor), + "zip": new(getter.ZipDecompressor), + + "tar.bz2": new(getter.TarBzip2Decompressor), + "tar.tbz2": new(getter.TarBzip2Decompressor), + + "tar.gz": new(getter.TarGzipDecompressor), + "tgz": new(getter.TarGzipDecompressor), + + "tar.xz": new(getter.TarXzDecompressor), + "txz": new(getter.TarXzDecompressor), +} + +var goGetterGetters = map[string]getter.Getter{ + "file": new(getter.FileGetter), + "gcs": new(getter.GCSGetter), + "git": new(getter.GitGetter), + "hg": new(getter.HgGetter), + "s3": new(getter.S3Getter), + "http": getterHTTPGetter, + "https": getterHTTPGetter, +} + +var getterHTTPClient = cleanhttp.DefaultClient() + +var getterHTTPGetter = &getter.HttpGetter{ + Client: getterHTTPClient, + Netrc: true, +} + +// A reusingGetter is a helper for the module installer that remembers +// the final resolved addresses of all of the sources it has already been +// asked to install, and will copy from a prior installation directory if +// it has the same resolved source address. +// +// The keys in a reusingGetter are resolved and trimmed source addresses +// (with a scheme always present, and without any "subdir" component), +// and the values are the paths where each source was previously installed. +type reusingGetter map[string]string + +// getWithGoGetter retrieves the package referenced in the given address +// into the installation path and then returns the full path to any subdir +// indicated in the address. +// +// The errors returned by this function are those surfaced by the underlying +// go-getter library, which have very inconsistent quality as +// end-user-actionable error messages. At this time we do not have any +// reasonable way to improve these error messages at this layer because +// the underlying errors are not separately recognizable. +func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { + packageAddr, subDir := splitAddrSubdir(addr) + + log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) + + realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors) + if err != nil { + return "", err + } + + if isMaybeRelativeLocalPath(realAddr) { + return "", &MaybeRelativePathErr{addr} + } + + var realSubDir string + realAddr, realSubDir = splitAddrSubdir(realAddr) + if realSubDir != "" { + subDir = filepath.Join(realSubDir, subDir) + } + + if realAddr != packageAddr { + log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) + } + + if prevDir, exists := g[realAddr]; exists { + log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) + err := os.Mkdir(instPath, os.ModePerm) + if err != nil { + return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) + } + err = copyDir(instPath, prevDir) + if err != nil { + return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) + } + } else { + log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) + client := getter.Client{ + Src: realAddr, + Dst: instPath, + Pwd: instPath, + + Mode: getter.ClientModeDir, + + Detectors: goGetterNoDetectors, // we already did detection above + Decompressors: goGetterDecompressors, + Getters: goGetterGetters, + } + err = client.Get() + if err != nil { + return "", err + } + // Remember where we installed this so we might reuse this directory + // on subsequent calls to avoid re-downloading. + g[realAddr] = instPath + } + + // Our subDir string can contain wildcards until this point, so that + // e.g. a subDir of * can expand to one top-level directory in a .tar.gz + // archive. Now that we've expanded the archive successfully we must + // resolve that into a concrete path. + var finalDir string + if subDir != "" { + finalDir, err = getter.SubdirGlob(instPath, subDir) + log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) + if err != nil { + return "", err + } + } else { + finalDir = instPath + } + + // If we got this far then we have apparently succeeded in downloading + // the requested object! + return filepath.Clean(finalDir), nil +} + +// splitAddrSubdir splits the given address (which is assumed to be a +// registry address or go-getter-style address) into a package portion +// and a sub-directory portion. +// +// The package portion defines what should be downloaded and then the +// sub-directory portion, if present, specifies a sub-directory within +// the downloaded object (an archive, VCS repository, etc) that contains +// the module's configuration files. +// +// The subDir portion will be returned as empty if no subdir separator +// ("//") is present in the address. +func splitAddrSubdir(addr string) (packageAddr, subDir string) { + return getter.SourceDirSubdir(addr) +} + +var localSourcePrefixes = []string{ + "./", + "../", + ".\\", + "..\\", +} + +func isLocalSourceAddr(addr string) bool { + for _, prefix := range localSourcePrefixes { + if strings.HasPrefix(addr, prefix) { + return true + } + } + return false +} + +func isRegistrySourceAddr(addr string) bool { + _, err := regsrc.ParseModuleSource(addr) + return err == nil +} + +type MaybeRelativePathErr struct { + Addr string +} + +func (e *MaybeRelativePathErr) Error() string { + return fmt.Sprintf("Terraform cannot determine the module source for %s", e.Addr) +} + +func isMaybeRelativeLocalPath(addr string) bool { + if strings.HasPrefix(addr, "file://") { + _, err := os.Stat(addr[7:]) + if err != nil { + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go new file mode 100644 index 00000000000..1150b093ccb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris dragonfly + +package initwd + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go new file mode 100644 index 00000000000..30532f54ace --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package initwd + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go new file mode 100644 index 00000000000..3ed58e4bf96 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package initwd + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/load_config.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/load_config.go new file mode 100644 index 00000000000..02c3c6f70a8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/load_config.go @@ -0,0 +1,56 @@ +package initwd + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// LoadConfig loads a full configuration tree that has previously had all of +// its dependent modules installed to the given modulesDir using a +// ModuleInstaller. +// +// This uses the early configuration loader and thus only reads top-level +// metadata from the modules in the configuration. Most callers should use +// the configs/configload package to fully load a configuration. +func LoadConfig(rootDir, modulesDir string) (*earlyconfig.Config, tfdiags.Diagnostics) { + rootMod, diags := earlyconfig.LoadModule(rootDir) + if rootMod == nil { + return nil, diags + } + + manifest, err := modsdir.ReadManifestSnapshotForDir(modulesDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read module manifest", + fmt.Sprintf("Terraform failed to read its manifest of locally-cached modules: %s.", err), + )) + return nil, diags + } + + return earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( + func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + key := manifest.ModuleKey(req.Path) + record, exists := manifest[key] + if !exists { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not installed", + fmt.Sprintf("Module %s is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", req.Path.String()), + )) + return nil, nil, diags + } + + mod, mDiags := earlyconfig.LoadModule(record.Dir) + diags = diags.Append(mDiags) + return mod, record.Version, diags + }, + )) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go new file mode 100644 index 00000000000..8e055756734 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go @@ -0,0 +1,558 @@ +package initwd + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +type ModuleInstaller struct { + modsDir string + reg *registry.Client +} + +func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller { + return &ModuleInstaller{ + modsDir: modsDir, + reg: reg, + } +} + +// InstallModules analyses the root module in the given directory and installs +// all of its direct and transitive dependencies into the given modules +// directory, which must already exist. +// +// Since InstallModules makes possibly-time-consuming calls to remote services, +// a hook interface is supported to allow the caller to be notified when +// each module is installed and, for remote modules, when downloading begins. +// LoadConfig guarantees that two hook calls will not happen concurrently but +// it does not guarantee any particular ordering of hook calls. This mechanism +// is for UI feedback only and does not give the caller any control over the +// process. +// +// If modules are already installed in the target directory, they will be +// skipped unless their source address or version have changed or unless +// the upgrade flag is set. +// +// InstallModules never deletes any directory, except in the case where it +// needs to replace a directory that is already present with a newly-extracted +// package. +// +// If the returned diagnostics contains errors then the module installation +// may have wholly or partially completed. Modules must be loaded in order +// to find their dependencies, so this function does many of the same checks +// as LoadConfig as a side-effect. +// +// If successful (the returned diagnostics contains no errors) then the +// first return value is the early configuration tree that was constructed by +// the installation process. +func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) { + log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir) + + rootMod, diags := earlyconfig.LoadModule(rootDir) + if rootMod == nil { + return nil, diags + } + + manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read modules manifest file", + fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err), + )) + return nil, diags + } + + getter := reusingGetter{} + cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter) + diags = append(diags, instDiags...) + + return cfg, diags +} + +func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if hooks == nil { + // Use our no-op implementation as a placeholder + hooks = ModuleInstallHooksImpl{} + } + + // Create a manifest record for the root module. This will be used if + // there are any relative-pathed modules in the root. + manifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + + cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( + func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + + key := manifest.ModuleKey(req.Path) + instPath := i.packageInstallPath(req.Path) + + log.Printf("[DEBUG] Module installer: begin %s", key) + + // First we'll check if we need to upgrade/replace an existing + // installed module, and delete it out of the way if so. + replace := upgrade + if !replace { + record, recorded := manifest[key] + switch { + case !recorded: + log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key) + replace = true + case record.SourceAddr != req.SourceAddr: + log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr) + replace = true + case record.Version != nil && !req.VersionConstraints.Check(record.Version): + log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints) + replace = true + } + } + + // If we _are_ planning to replace this module, then we'll remove + // it now so our installation code below won't conflict with any + // existing remnants. + if replace { + if _, recorded := manifest[key]; recorded { + log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key) + } + delete(manifest, key) + // Deleting a module invalidates all of its descendent modules too. + keyPrefix := key + "." + for subKey := range manifest { + if strings.HasPrefix(subKey, keyPrefix) { + if _, recorded := manifest[subKey]; recorded { + log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey) + } + delete(manifest, subKey) + } + } + } + + record, recorded := manifest[key] + if !recorded { + // Clean up any stale cache directory that might be present. + // If this is a local (relative) source then the dir will + // not exist, but we'll ignore that. + log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key) + err := os.RemoveAll(instPath) + if err != nil && !os.IsNotExist(err) { + log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to remove local module cache", + fmt.Sprintf( + "Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s", + instPath, err, + ), + )) + return nil, nil, diags + } + } else { + // If this module is already recorded and its root directory + // exists then we will just load what's already there and + // keep our existing record. + info, err := os.Stat(record.Dir) + if err == nil && info.IsDir() { + mod, mDiags := earlyconfig.LoadModule(record.Dir) + diags = diags.Append(mDiags) + + log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir) + return mod, record.Version, diags + } + } + + // If we get down here then it's finally time to actually install + // the module. There are some variants to this process depending + // on what type of module source address we have. + switch { + + case isLocalSourceAddr(req.SourceAddr): + log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr) + mod, mDiags := i.installLocalModule(req, key, manifest, hooks) + diags = append(diags, mDiags...) + return mod, nil, diags + + case isRegistrySourceAddr(req.SourceAddr): + addr, err := regsrc.ParseModuleSource(req.SourceAddr) + if err != nil { + // Should never happen because isRegistrySourceAddr already validated + panic(err) + } + log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr) + + mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter) + diags = append(diags, mDiags...) + return mod, v, diags + + default: + log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr) + + mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter) + diags = append(diags, mDiags...) + return mod, nil, diags + } + + }, + )) + diags = append(diags, cDiags...) + + err := manifest.WriteSnapshotToDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update module manifest", + fmt.Sprintf("Unable to write the module manifest file: %s", err), + )) + } + + return cfg, diags +} + +func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + parentKey := manifest.ModuleKey(req.Parent.Path) + parentRecord, recorded := manifest[parentKey] + if !recorded { + // This is indicative of a bug rather than a user-actionable error + panic(fmt.Errorf("missing manifest record for parent module %s", parentKey)) + } + + if len(req.VersionConstraints) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid version constraint", + fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } + + // For local sources we don't actually need to modify the + // filesystem at all because the parent already wrote + // the files we need, and so we just load up what's already here. + newDir := filepath.Join(parentRecord.Dir, req.SourceAddr) + + log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir) + // it is possible that the local directory is a symlink + newDir, err := filepath.EvalSymlinks(newDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()), + )) + } + + mod, mDiags := earlyconfig.LoadModule(newDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } else { + diags = diags.Append(mDiags) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: newDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir) + hooks.Install(key, nil, newDir) + + return mod, diags +} + +func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + hostname, err := addr.SvcHost() + if err != nil { + // If it looks like the user was trying to use punycode then we'll generate + // a specialized error for that case. We require the unicode form of + // hostname so that hostnames are always human-readable in configuration + // and punycode can't be used to hide a malicious module hostname. + if strings.HasPrefix(addr.RawHost.Raw, "xn--") { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid module registry hostname", + fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid module registry hostname", + fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } + return nil, nil, diags + } + + reg := i.reg + + log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname) + resp, err := reg.ModuleVersions(addr) + if err != nil { + if registry.IsModuleNotFound(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not found", + fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error accessing remote module registry", + fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err), + )) + } + return nil, nil, diags + } + + // The response might contain information about dependencies to allow us + // to potentially optimize future requests, but we don't currently do that + // and so for now we'll just take the first item which is guaranteed to + // be the address we requested. + if len(resp.Modules) < 1 { + // Should never happen, but since this is a remote service that may + // be implemented by third-parties we will handle it gracefully. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid response from remote module registry", + fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + return nil, nil, diags + } + + modMeta := resp.Modules[0] + + var latestMatch *version.Version + var latestVersion *version.Version + for _, mv := range modMeta.Versions { + v, err := version.NewVersion(mv.Version) + if err != nil { + // Should never happen if the registry server is compliant with + // the protocol, but we'll warn if not to assist someone who + // might be developing a module registry server. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Invalid response from remote module registry", + fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + continue + } + + // If we've found a pre-release version then we'll ignore it unless + // it was exactly requested. + if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() { + log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v) + continue + } + + if latestVersion == nil || v.GreaterThan(latestVersion) { + latestVersion = v + } + + if req.VersionConstraints.Check(v) { + if latestMatch == nil || v.GreaterThan(latestMatch) { + latestMatch = v + } + } + } + + if latestVersion == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module has no versions", + fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname), + )) + return nil, nil, diags + } + + if latestMatch == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unresolvable module version constraint", + fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion), + )) + return nil, nil, diags + } + + // Report up to the caller that we're about to start downloading. + packageAddr, _ := splitAddrSubdir(req.SourceAddr) + hooks.Download(key, packageAddr, latestMatch) + + // If we manage to get down here then we've found a suitable version to + // install, so we need to ask the registry where we should download it from. + // The response to this is a go-getter-style address string. + dlAddr, err := reg.ModuleLocation(addr, latestMatch.String()) + if err != nil { + log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid response from remote module registry", + fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch), + )) + return nil, nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr) + + modDir, err := getter.getWithGoGetter(instPath, dlAddr) + if err != nil { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to download module", + fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err), + )) + return nil, nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir) + + if addr.RawSubmodule != "" { + // Append the user's requested subdirectory to any subdirectory that + // was implied by any of the nested layers we expanded within go-getter. + modDir = filepath.Join(modDir, addr.RawSubmodule) + } + + log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir) + + // Finally we are ready to try actually loading the module. + mod, mDiags := earlyconfig.LoadModule(modDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For registry modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), + )) + } else { + diags = append(diags, mDiags...) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Version: latestMatch, + Dir: modDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, latestMatch, modDir) + + return mod, latestMatch, diags +} + +func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Report up to the caller that we're about to start downloading. + packageAddr, _ := splitAddrSubdir(req.SourceAddr) + hooks.Download(key, packageAddr, nil) + + if len(req.VersionConstraints) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid version constraint", + fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a non Registry URL.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + return nil, diags + } + + modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr) + if err != nil { + if _, ok := err.(*MaybeRelativePathErr); ok { + log.Printf( + "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../", + req.SourceAddr, + ) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not found", + fmt.Sprintf( + "The module address %q could not be resolved.\n\n"+ + "If you intended this as a path relative to the current "+ + "module, use \"./%s\" instead. The \"./\" prefix "+ + "indicates that the address is a relative filesystem path.", + req.SourceAddr, req.SourceAddr, + ), + )) + } else { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to download module", + fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err), + )) + } + return nil, diags + + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir) + + mod, mDiags := earlyconfig.LoadModule(modDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For go-getter modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), + )) + } else { + diags = append(diags, mDiags...) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: modDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, nil, modDir) + + return mod, diags +} + +func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string { + return filepath.Join(i.modsDir, strings.Join(modulePath, ".")) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go new file mode 100644 index 00000000000..817a6dc832a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go @@ -0,0 +1,36 @@ +package initwd + +import ( + version "github.com/hashicorp/go-version" +) + +// ModuleInstallHooks is an interface used to provide notifications about the +// installation process being orchestrated by InstallModules. +// +// This interface may have new methods added in future, so implementers should +// embed InstallHooksImpl to get no-op implementations of any unimplemented +// methods. +type ModuleInstallHooks interface { + // Download is called for modules that are retrieved from a remote source + // before that download begins, to allow a caller to give feedback + // on progress through a possibly-long sequence of downloads. + Download(moduleAddr, packageAddr string, version *version.Version) + + // Install is called for each module that is installed, even if it did + // not need to be downloaded from a remote source. + Install(moduleAddr string, version *version.Version, localPath string) +} + +// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that +// can be embedded in another implementation struct to allow only partial +// implementation of the interface. +type ModuleInstallHooksImpl struct { +} + +func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) { +} + +func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) { +} + +var _ ModuleInstallHooks = ModuleInstallHooksImpl{} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/testdata/local-module-symlink/modules/child_a b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/testdata/local-module-symlink/modules/child_a new file mode 120000 index 00000000000..0d568b14371 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/testdata/local-module-symlink/modules/child_a @@ -0,0 +1 @@ +../child_a/ \ No newline at end of file diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/testing.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/testing.go new file mode 100644 index 00000000000..c4c67fd18ca --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/testing.go @@ -0,0 +1,73 @@ +package initwd + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// LoadConfigForTests is a convenience wrapper around configload.NewLoaderForTests, +// ModuleInstaller.InstallModules and configload.Loader.LoadConfig that allows +// a test configuration to be loaded in a single step. +// +// If module installation fails, t.Fatal (or similar) is called to halt +// execution of the test, under the assumption that installation failures are +// not expected. If installation failures _are_ expected then use +// NewLoaderForTests and work with the loader object directly. If module +// installation succeeds but generates warnings, these warnings are discarded. +// +// If installation succeeds but errors are detected during loading then a +// possibly-incomplete config is returned along with error diagnostics. The +// test run is not aborted in this case, so that the caller can make assertions +// against the returned diagnostics. +// +// As with NewLoaderForTests, a cleanup function is returned which must be +// called before the test completes in order to remove the temporary +// modules directory. +func LoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func(), tfdiags.Diagnostics) { + t.Helper() + + var diags tfdiags.Diagnostics + + loader, cleanup := configload.NewLoaderForTests(t) + inst := NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil)) + + _, moreDiags := inst.InstallModules(rootDir, true, ModuleInstallHooksImpl{}) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + return nil, nil, func() {}, diags + } + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + config, hclDiags := loader.LoadConfig(rootDir) + diags = diags.Append(hclDiags) + return config, loader, cleanup, diags +} + +// MustLoadConfigForTests is a variant of LoadConfigForTests which calls +// t.Fatal (or similar) if there are any errors during loading, and thus +// does not return diagnostics at all. +// +// This is useful for concisely writing tests that don't expect errors at +// all. For tests that expect errors and need to assert against them, use +// LoadConfigForTests instead. +func MustLoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func()) { + t.Helper() + + config, loader, cleanup, diags := LoadConfigForTests(t, rootDir) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + } + return config, loader, cleanup +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/version_required.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/version_required.go new file mode 100644 index 00000000000..803fb4b5243 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/version_required.go @@ -0,0 +1,83 @@ +package initwd + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +// CheckCoreVersionRequirements visits each of the modules in the given +// early configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(earlyConfig *earlyconfig.Config) tfdiags.Diagnostics { + if earlyConfig == nil { + return nil + } + + var diags tfdiags.Diagnostics + module := earlyConfig.Module + + var constraints version.Constraints + for _, constraintStr := range module.RequiredCore { + constraint, err := version.NewConstraint(constraintStr) + if err != nil { + // Unfortunately the early config parser doesn't preserve a source + // location for this, so we're unable to indicate a specific + // location where this constraint came from, but we can at least + // say which module set it. + switch { + case len(earlyConfig.Path) == 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider version constraint", + fmt.Sprintf("Invalid version core constraint %q in the root module.", constraintStr), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider version constraint", + fmt.Sprintf("Invalid version core constraint %q in %s.", constraintStr, earlyConfig.Path), + )) + } + continue + } + constraints = append(constraints, constraint...) + } + + if !constraints.Check(tfversion.SemVer) { + switch { + case len(earlyConfig.Path) == 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported Terraform Core version", + fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the root module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported Terraform Core version", + fmt.Sprintf( + "Module %s (from %q) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + earlyConfig.Path, earlyConfig.SourceAddr, tfversion.String(), + ), + )) + } + } + + for _, c := range earlyConfig.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) + } + + return diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go new file mode 100644 index 00000000000..8f89909c6fc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go @@ -0,0 +1,5 @@ +// Package blocktoattr includes some helper functions that can perform +// preprocessing on a HCL body where a configschema.Block schema is available +// in order to allow list and set attributes defined in the schema to be +// optionally written by the user as block syntax. +package blocktoattr diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go new file mode 100644 index 00000000000..18db2018595 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go @@ -0,0 +1,187 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization +// functionality to allow attributes that are specified as having list or set +// type in the schema to be written with HCL block syntax as multiple nested +// blocks with the attribute name as the block type. +// +// This partially restores some of the block/attribute confusion from HCL 1 +// so that existing patterns that depended on that confusion can continue to +// be used in the short term while we settle on a longer-term strategy. +// +// Most of the fixup work is actually done when the returned body is +// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual +// decode of the body might not, if the content of the body is so ambiguous +// that there's no safe way to map it to the schema. +func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { + // The schema should never be nil, but in practice it seems to be sometimes + // in the presence of poorly-configured test mocks, so we'll be robust + // by synthesizing an empty one. + if schema == nil { + schema = &configschema.Block{} + } + + return &fixupBody{ + original: body, + schema: schema, + names: ambiguousNames(schema), + } +} + +type fixupBody struct { + original hcl.Body + schema *configschema.Block + names map[string]struct{} +} + +// Content decodes content from the body. The given schema must be the lower-level +// representation of the same schema that was previously passed to FixUpBlockAttrs, +// or else the result is undefined. +func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, diags := b.original.Content(schema) + return b.fixupContent(content), diags +} + +func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, remain, diags := b.original.PartialContent(schema) + remain = &fixupBody{ + original: remain, + schema: b.schema, + names: b.names, + } + return b.fixupContent(content), remain, diags +} + +func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // FixUpBlockAttrs is not intended to be used in situations where we'd use + // JustAttributes, so we just pass this through verbatim to complete our + // implementation of hcl.Body. + return b.original.JustAttributes() +} + +func (b *fixupBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} + +// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's +// content to determine whether the author has used attribute or block syntax +// for each of the ambigious attributes where both are permitted. +// +// The resulting schema will always contain all of the same names that are +// in the given schema, but some attribute schemas may instead be replaced by +// block header schemas. +func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { + return effectiveSchema(given, b.original, b.names, true) +} + +func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { + var ret hcl.BodyContent + ret.Attributes = make(hcl.Attributes) + for name, attr := range content.Attributes { + ret.Attributes[name] = attr + } + blockAttrVals := make(map[string][]*hcl.Block) + for _, block := range content.Blocks { + if _, exists := b.names[block.Type]; exists { + // If we get here then we've found a block type whose instances need + // to be re-interpreted as a list-of-objects attribute. We'll gather + // those up and fix them up below. + blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) + continue + } + + // We need to now re-wrap our inner body so it will be subject to the + // same attribute-as-block fixup when recursively decoded. + retBlock := *block // shallow copy + if blockS, ok := b.schema.BlockTypes[block.Type]; ok { + // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then + retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) + } + + ret.Blocks = append(ret.Blocks, &retBlock) + } + // No we'll install synthetic attributes for each of our fixups. We can't + // do this exactly because HCL's information model expects an attribute + // to be a single decl but we have multiple separate blocks. We'll + // approximate things, then, by using only our first block for the source + // location information. (We are guaranteed at least one by the above logic.) + for name, blocks := range blockAttrVals { + ret.Attributes[name] = &hcl.Attribute{ + Name: name, + Expr: &fixupBlocksExpr{ + blocks: blocks, + ety: b.schema.Attributes[name].Type.ElementType(), + }, + + Range: blocks[0].DefRange, + NameRange: blocks[0].TypeRange, + } + } + return &ret +} + +type fixupBlocksExpr struct { + blocks hcl.Blocks + ety cty.Type +} + +func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // In order to produce a suitable value for our expression we need to + // now decode the whole descendent block structure under each of our block + // bodies. + // + // That requires us to do something rather strange: we must construct a + // synthetic block type schema derived from the element type of the + // attribute, thus inverting our usual direction of lowering a schema + // into an implied type. Because a type is less detailed than a schema, + // the result is imprecise and in particular will just consider all + // the attributes to be optional and let the provider eventually decide + // whether to return errors if they turn out to be null when required. + schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety + spec := schema.DecoderSpec() + + vals := make([]cty.Value, len(e.blocks)) + var diags hcl.Diagnostics + for i, block := range e.blocks { + body := FixUpBlockAttrs(block.Body, schema) + val, blockDiags := hcldec.Decode(body, spec, ctx) + diags = append(diags, blockDiags...) + if val == cty.NilVal { + val = cty.UnknownVal(e.ety) + } + vals[i] = val + } + if len(vals) == 0 { + return cty.ListValEmpty(e.ety), diags + } + return cty.ListVal(vals), diags +} + +func (e *fixupBlocksExpr) Variables() []hcl.Traversal { + var ret []hcl.Traversal + schema := SchemaForCtyElementType(e.ety) + spec := schema.DecoderSpec() + for _, block := range e.blocks { + ret = append(ret, hcldec.Variables(block.Body, spec)...) + } + return ret +} + +func (e *fixupBlocksExpr) Range() hcl.Range { + // This is not really an appropriate range for the expression but it's + // the best we can do from here. + return e.blocks[0].DefRange +} + +func (e *fixupBlocksExpr) StartRange() hcl.Range { + return e.blocks[0].DefRange +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go new file mode 100644 index 00000000000..d6048cf13f3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go @@ -0,0 +1,146 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func ambiguousNames(schema *configschema.Block) map[string]struct{} { + if schema == nil { + return nil + } + ambiguousNames := make(map[string]struct{}) + for name, attrS := range schema.Attributes { + aty := attrS.Type + if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { + ambiguousNames[name] = struct{}{} + } + } + return ambiguousNames +} + +func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { + ret := &hcl.BodySchema{} + + appearsAsBlock := make(map[string]struct{}) + { + // We'll construct some throwaway schemas here just to probe for + // whether each of our ambiguous names seems to be being used as + // an attribute or a block. We need to check both because in JSON + // syntax we rely on the schema to decide between attribute or block + // interpretation and so JSON will always answer yes to both of + // these questions and we want to prefer the attribute interpretation + // in that case. + var probeSchema hcl.BodySchema + + for name := range ambiguousNames { + probeSchema = hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: name, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + if _, exists := content.Attributes[name]; exists { + // Can decode as an attribute, so we'll go with that. + continue + } + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: name, + }, + }, + } + content, _, _ = body.PartialContent(&probeSchema) + if len(content.Blocks) > 0 || dynamicExpanded { + // A dynamic block with an empty iterator returns nothing. + // If there's no attribute and we have either a block or a + // dynamic expansion, we need to rewrite this one as a + // block for a successful result. + appearsAsBlock[name] = struct{}{} + } + } + if !dynamicExpanded { + // If we're deciding for a context where dynamic blocks haven't + // been expanded yet then we need to probe for those too. + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "dynamic", + LabelNames: []string{"type"}, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + for _, block := range content.Blocks { + if _, exists := ambiguousNames[block.Labels[0]]; exists { + appearsAsBlock[block.Labels[0]] = struct{}{} + } + } + } + } + + for _, attrS := range given.Attributes { + if _, exists := appearsAsBlock[attrS.Name]; exists { + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: attrS.Name, + }) + } else { + ret.Attributes = append(ret.Attributes, attrS) + } + } + + // Anything that is specified as a block type in the input schema remains + // that way by just passing through verbatim. + ret.Blocks = append(ret.Blocks, given.Blocks...) + + return ret +} + +// SchemaForCtyElementType converts a cty object type into an +// approximately-equivalent configschema.Block representing the element of +// a list or set. If the given type is not an object type then this +// function will panic. +func SchemaForCtyElementType(ty cty.Type) *configschema.Block { + atys := ty.AttributeTypes() + ret := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute, len(atys)), + } + for name, aty := range atys { + ret.Attributes[name] = &configschema.Attribute{ + Type: aty, + Optional: true, + } + } + return ret +} + +// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type +// into an approximately-equivalent configschema.NestedBlock. If the given type +// is not of the expected kind then this function will panic. +func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch { + case ty.IsListType(): + nesting = configschema.NestingList + case ty.IsSetType(): + nesting = configschema.NestingSet + default: + panic("unsuitable type") + } + nested := SchemaForCtyElementType(ty.ElementType()) + return &configschema.NestedBlock{ + Nesting: nesting, + Block: *nested, + } +} + +// TypeCanBeBlocks returns true if the given type is a list-of-object or +// set-of-object type, and would thus be subject to the blocktoattr fixup +// if used as an attribute type. +func TypeCanBeBlocks(ty cty.Type) bool { + return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go new file mode 100644 index 00000000000..065139b9ae6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go @@ -0,0 +1,45 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl2/ext/dynblock" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// ExpandedVariables finds all of the global variables referenced in the +// given body with the given schema while taking into account the possibilities +// both of "dynamic" blocks being expanded and the possibility of certain +// attributes being written instead as nested blocks as allowed by the +// FixUpBlockAttrs function. +// +// This function exists to allow variables to be analyzed prior to dynamic +// block expansion while also dealing with the fact that dynamic block expansion +// might in turn produce nested blocks that are subject to FixUpBlockAttrs. +// +// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, +// which is itself a drop-in replacement for hcldec.Variables. +func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { + rootNode := dynblock.WalkVariables(body) + return walkVariables(rootNode, body, schema) +} + +func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { + givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + ambiguousNames := ambiguousNames(schema) + effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) + vars, children := node.Visit(effectiveRawSchema) + + for _, child := range children { + if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { + vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) + } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { + // ☝️Check for collection type before element type, because if this is a mis-placed reference, + // a panic here will prevent other useful diags from being elevated to show the user what to fix + synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) + vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) + } + } + + return vars +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go new file mode 100644 index 00000000000..f7d0246ff26 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go @@ -0,0 +1,34 @@ +package lang + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Data is an interface whose implementations can provide cty.Value +// representations of objects identified by referenceable addresses from +// the addrs package. +// +// This interface will grow each time a new type of reference is added, and so +// implementations outside of the Terraform codebases are not advised. +// +// Each method returns a suitable value and optionally some diagnostics. If the +// returned diagnostics contains errors then the type of the returned value is +// used to construct an unknown value of the same type which is then used in +// place of the requested object so that type checking can still proceed. In +// cases where it's not possible to even determine a suitable result type, +// cty.DynamicVal is returned along with errors describing the problem. +type Data interface { + StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics + + GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetResourceInstance(addrs.ResourceInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go new file mode 100644 index 00000000000..af5c5cac0d2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go @@ -0,0 +1,5 @@ +// Package lang deals with the runtime aspects of Terraform's configuration +// language, with concerns such as expression evaluation. It is closely related +// to sibling package "configs", which is responsible for configuration +// parsing and static validation. +package lang diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go new file mode 100644 index 00000000000..28552bb5064 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go @@ -0,0 +1,487 @@ +package lang + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/hcl2/ext/dynblock" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// ExpandBlock expands any "dynamic" blocks present in the given body. The +// result is a body with those blocks expanded, ready to be evaluated with +// EvalBlock. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + traversals := dynblock.ExpandVariablesHCLDec(body, spec) + refs, diags := References(traversals) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + + return dynblock.Expand(body, ctx), diags +} + +// EvalBlock evaluates the given body using the given block schema and returns +// a cty object value representing its contents. The type of the result conforms +// to the implied type of the given schema. +// +// This function does not automatically expand "dynamic" blocks within the +// body. If that is desired, first call the ExpandBlock method to obtain +// an expanded body to pass to this method. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + refs, diags := ReferencesInBlock(body, schema) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(schema.ImpliedType()), diags + } + + // HACK: In order to remain compatible with some assumptions made in + // Terraform v0.11 and earlier about the approximate equivalence of + // attribute vs. block syntax, we do a just-in-time fixup here to allow + // any attribute in the schema that has a list-of-objects or set-of-objects + // kind to potentially be populated instead by one or more nested blocks + // whose type is the attribute name. + body = blocktoattr.FixUpBlockAttrs(body, schema) + + val, evalDiags := hcldec.Decode(body, spec, ctx) + diags = diags.Append(evalDiags) + + return val, diags +} + +// EvalExpr evaluates a single expression in the receiving context and returns +// the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + refs, diags := ReferencesInExpr(expr) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(wantType), diags + } + + val, evalDiags := expr.Value(ctx) + diags = diags.Append(evalDiags) + + if wantType != cty.DynamicPseudoType { + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: expr.Range().Ptr(), + }) + } + } + + return val, diags +} + +// EvalReference evaluates the given reference in the receiving scope and +// returns the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // We cheat a bit here and just build an EvalContext for our requested + // reference with the "self" address overridden, and then pull the "self" + // result out of it to return. + ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject) + diags = diags.Append(ctxDiags) + val := ctx.Variables["self"] + if val == cty.NilVal { + val = cty.DynamicVal + } + + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + + return val, diags +} + +// EvalContext constructs a HCL expression evaluation context whose variable +// scope contains sufficient values to satisfy the given set of references. +// +// Most callers should prefer to use the evaluation helper methods that +// this type offers, but this is here for less common situations where the +// caller will handle the evaluation calls itself. +func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { + return s.evalContext(refs, s.SelfAddr) +} + +func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { + if s == nil { + panic("attempt to construct EvalContext for nil Scope") + } + + var diags tfdiags.Diagnostics + vals := make(map[string]cty.Value) + funcs := s.Functions() + ctx := &hcl.EvalContext{ + Variables: vals, + Functions: funcs, + } + + if len(refs) == 0 { + // Easy path for common case where there are no references at all. + return ctx, diags + } + + // First we'll do static validation of the references. This catches things + // early that might otherwise not get caught due to unknown values being + // present in the scope during planning. + if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() { + diags = diags.Append(staticDiags) + return ctx, diags + } + + // The reference set we are given has not been de-duped, and so there can + // be redundant requests in it for two reasons: + // - The same item is referenced multiple times + // - Both an item and that item's container are separately referenced. + // We will still visit every reference here and ask our data source for + // it, since that allows us to gather a full set of any errors and + // warnings, but once we've gathered all the data we'll then skip anything + // that's redundant in the process of populating our values map. + dataResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{} + managedResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{} + wholeModules := map[string]map[addrs.InstanceKey]cty.Value{} + moduleOutputs := map[string]map[addrs.InstanceKey]map[string]cty.Value{} + inputVariables := map[string]cty.Value{} + localValues := map[string]cty.Value{} + pathAttrs := map[string]cty.Value{} + terraformAttrs := map[string]cty.Value{} + countAttrs := map[string]cty.Value{} + forEachAttrs := map[string]cty.Value{} + var self cty.Value + + for _, ref := range refs { + rng := ref.SourceRange + isSelf := false + + rawSubj := ref.Subject + if rawSubj == addrs.Self { + if selfAddr == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + continue + } + + // Treat "self" as an alias for the configured self address. + rawSubj = selfAddr + isSelf = true + + if rawSubj == addrs.Self { + // Programming error: the self address cannot alias itself. + panic("scope SelfAddr attempting to alias itself") + } + } + + // This type switch must cover all of the "Referenceable" implementations + // in package addrs. + switch subj := rawSubj.(type) { + + case addrs.ResourceInstance: + var into map[string]map[string]map[addrs.InstanceKey]cty.Value + switch subj.Resource.Mode { + case addrs.ManagedResourceMode: + into = managedResources + case addrs.DataResourceMode: + into = dataResources + default: + panic(fmt.Errorf("unsupported ResourceMode %s", subj.Resource.Mode)) + } + + val, valDiags := normalizeRefValue(s.Data.GetResourceInstance(subj, rng)) + diags = diags.Append(valDiags) + + r := subj.Resource + if into[r.Type] == nil { + into[r.Type] = make(map[string]map[addrs.InstanceKey]cty.Value) + } + if into[r.Type][r.Name] == nil { + into[r.Type][r.Name] = make(map[addrs.InstanceKey]cty.Value) + } + into[r.Type][r.Name][subj.Key] = val + if isSelf { + self = val + } + + case addrs.ModuleCallInstance: + val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng)) + diags = diags.Append(valDiags) + + if wholeModules[subj.Call.Name] == nil { + wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value) + } + wholeModules[subj.Call.Name][subj.Key] = val + if isSelf { + self = val + } + + case addrs.ModuleCallOutput: + val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng)) + diags = diags.Append(valDiags) + + callName := subj.Call.Call.Name + callKey := subj.Call.Key + if moduleOutputs[callName] == nil { + moduleOutputs[callName] = make(map[addrs.InstanceKey]map[string]cty.Value) + } + if moduleOutputs[callName][callKey] == nil { + moduleOutputs[callName][callKey] = make(map[string]cty.Value) + } + moduleOutputs[callName][callKey][subj.Name] = val + if isSelf { + self = val + } + + case addrs.InputVariable: + val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng)) + diags = diags.Append(valDiags) + inputVariables[subj.Name] = val + if isSelf { + self = val + } + + case addrs.LocalValue: + val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng)) + diags = diags.Append(valDiags) + localValues[subj.Name] = val + if isSelf { + self = val + } + + case addrs.PathAttr: + val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng)) + diags = diags.Append(valDiags) + pathAttrs[subj.Name] = val + if isSelf { + self = val + } + + case addrs.TerraformAttr: + val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng)) + diags = diags.Append(valDiags) + terraformAttrs[subj.Name] = val + if isSelf { + self = val + } + + case addrs.CountAttr: + val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng)) + diags = diags.Append(valDiags) + countAttrs[subj.Name] = val + if isSelf { + self = val + } + + case addrs.ForEachAttr: + val, valDiags := normalizeRefValue(s.Data.GetForEachAttr(subj, rng)) + diags = diags.Append(valDiags) + forEachAttrs[subj.Name] = val + if isSelf { + self = val + } + + default: + // Should never happen + panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) + } + } + + for k, v := range buildResourceObjects(managedResources) { + vals[k] = v + } + vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources)) + vals["module"] = cty.ObjectVal(buildModuleObjects(wholeModules, moduleOutputs)) + vals["var"] = cty.ObjectVal(inputVariables) + vals["local"] = cty.ObjectVal(localValues) + vals["path"] = cty.ObjectVal(pathAttrs) + vals["terraform"] = cty.ObjectVal(terraformAttrs) + vals["count"] = cty.ObjectVal(countAttrs) + vals["each"] = cty.ObjectVal(forEachAttrs) + if self != cty.NilVal { + vals["self"] = self + } + + return ctx, diags +} + +func buildResourceObjects(resources map[string]map[string]map[addrs.InstanceKey]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + for typeName, names := range resources { + nameVals := make(map[string]cty.Value) + for name, keys := range names { + nameVals[name] = buildInstanceObjects(keys) + } + vals[typeName] = cty.ObjectVal(nameVals) + } + return vals +} + +func buildModuleObjects(wholeModules map[string]map[addrs.InstanceKey]cty.Value, moduleOutputs map[string]map[addrs.InstanceKey]map[string]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + + for name, keys := range wholeModules { + vals[name] = buildInstanceObjects(keys) + } + + for name, keys := range moduleOutputs { + if _, exists := wholeModules[name]; exists { + // If we also have a whole module value for this name then we'll + // skip this since the individual outputs are embedded in that result. + continue + } + + // The shape of this collection isn't compatible with buildInstanceObjects, + // but rather than replicating most of the buildInstanceObjects logic + // here we'll instead first transform the structure to be what that + // function expects and then use it. This is a little wasteful, but + // we do not expect this these maps to be large and so the extra work + // here should not hurt too much. + flattened := make(map[addrs.InstanceKey]cty.Value, len(keys)) + for k, vals := range keys { + flattened[k] = cty.ObjectVal(vals) + } + vals[name] = buildInstanceObjects(flattened) + } + + return vals +} + +func buildInstanceObjects(keys map[addrs.InstanceKey]cty.Value) cty.Value { + if val, exists := keys[addrs.NoKey]; exists { + // If present, a "no key" value supersedes all other values, + // since they should be embedded inside it. + return val + } + + // If we only have individual values then we need to construct + // either a list or a map, depending on what sort of keys we + // have. + haveInt := false + haveString := false + maxInt := 0 + + for k := range keys { + switch tk := k.(type) { + case addrs.IntKey: + haveInt = true + if int(tk) > maxInt { + maxInt = int(tk) + } + case addrs.StringKey: + haveString = true + } + } + + // We should either have ints or strings and not both, but + // if we have both then we'll prefer strings and let the + // language interpreter try to convert the int keys into + // strings in a map. + switch { + case haveString: + vals := make(map[string]cty.Value) + for k, v := range keys { + switch tk := k.(type) { + case addrs.StringKey: + vals[string(tk)] = v + case addrs.IntKey: + sk := strconv.Itoa(int(tk)) + vals[sk] = v + } + } + return cty.ObjectVal(vals) + case haveInt: + // We'll make a tuple that is long enough for our maximum + // index value. It doesn't matter if we end up shorter than + // the number of instances because if length(...) were + // being evaluated we would've got a NoKey reference and + // thus not ended up in this codepath at all. + vals := make([]cty.Value, maxInt+1) + for i := range vals { + if v, exists := keys[addrs.IntKey(i)]; exists { + vals[i] = v + } else { + // Just a placeholder, since nothing will access this anyway + vals[i] = cty.DynamicVal + } + } + return cty.TupleVal(vals) + default: + // Should never happen because there are no other key types. + log.Printf("[ERROR] strange makeInstanceObjects call with no supported key types") + return cty.EmptyObjectVal + } +} + +func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { + if diags.HasErrors() { + // If there are errors then we will force an unknown result so that + // we can still evaluate and catch type errors but we'll avoid + // producing redundant re-statements of the same errors we've already + // dealt with here. + return cty.UnknownVal(val.Type()), diags + } + return val, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go new file mode 100644 index 00000000000..8c075148964 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go @@ -0,0 +1,218 @@ +package funcs + +import ( + "fmt" + "net" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CidrHostFunc contructs a function that calculates a full host IP address +// within a given IP network address prefix. +var CidrHostFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "hostnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var hostNum int + if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { + return cty.UnknownVal(cty.String), err + } + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + ip, err := cidr.Host(network, hostNum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ip.String()), nil + }, +}) + +// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given +// in CIDR notation into a subnet mask address. +var CidrNetmaskFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + return cty.StringVal(net.IP(network.Mask).String()), nil + }, +}) + +// CidrSubnetFunc contructs a function that calculates a subnet address within +// a given IP network address prefix. +var CidrSubnetFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "newbits", + Type: cty.Number, + }, + { + Name: "netnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var newbits int + if err := gocty.FromCtyValue(args[1], &newbits); err != nil { + return cty.UnknownVal(cty.String), err + } + var netnum int + if err := gocty.FromCtyValue(args[2], &netnum); err != nil { + return cty.UnknownVal(cty.String), err + } + + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if newbits > 32 { + return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits") + } + + newNetwork, err := cidr.Subnet(network, newbits, netnum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(newNetwork.String()), nil + }, +}) + +// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive +// subnet addresses at once, rather than just a single subnet extension. +var CidrSubnetsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "newbits", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) + } + startPrefixLen, _ := network.Mask.Size() + + prefixLengthArgs := args[1:] + if len(prefixLengthArgs) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + var firstLength int + if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(1, err) + } + firstLength += startPrefixLen + + retVals := make([]cty.Value, len(prefixLengthArgs)) + + current, _ := cidr.PreviousSubnet(network, firstLength) + for i, lengthArg := range prefixLengthArgs { + var length int + if err := gocty.FromCtyValue(lengthArg, &length); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) + } + + if length < 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") + } + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if length > 32 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") + } + length += startPrefixLen + if length > (len(network.IP) * 8) { + protocol := "IP" + switch len(network.IP) * 8 { + case 32: + protocol = "IPv4" + case 128: + protocol = "IPv6" + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) + } + + next, rollover := cidr.NextSubnet(current, length) + if rollover || !network.Contains(next.IP) { + // If we run out of suffix bits in the base CIDR prefix then + // NextSubnet will start incrementing the prefix bits, which + // we don't allow because it would then allocate addresses + // outside of the caller's given prefix. + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) + } + + current = next + retVals[i] = cty.StringVal(current.String()) + } + + return cty.ListVal(retVals), nil + }, +}) + +// CidrHost calculates a full host IP address within a given IP network address prefix. +func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { + return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) +} + +// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. +func CidrNetmask(prefix cty.Value) (cty.Value, error) { + return CidrNetmaskFunc.Call([]cty.Value{prefix}) +} + +// CidrSubnet calculates a subnet address within a given IP network address prefix. +func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { + return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) +} + +// CidrSubnets calculates a sequence of consecutive subnet prefixes that may +// be of different prefix lengths under a common base prefix. +func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(newbits)+1) + args[0] = prefix + copy(args[1:], newbits) + return CidrSubnetsFunc.Call(args) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go new file mode 100644 index 00000000000..e6898457b9d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go @@ -0,0 +1,1519 @@ +package funcs + +import ( + "errors" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + "github.com/zclconf/go-cty/cty/gocty" +) + +var ElementFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "index", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + list := args[0] + listTy := list.Type() + switch { + case listTy.IsListType(): + return listTy.ElementType(), nil + case listTy.IsTupleType(): + if !args[1].IsKnown() { + // If the index isn't known yet then we can't predict the + // result type since each tuple element can have its own type. + return cty.DynamicPseudoType, nil + } + + etys := listTy.TupleElementTypes() + var index int + err := gocty.FromCtyValue(args[1], &index) + if err != nil { + // e.g. fractional number where whole number is required + return cty.DynamicPseudoType, fmt.Errorf("invalid index: %s", err) + } + if len(etys) == 0 { + return cty.DynamicPseudoType, errors.New("cannot use element function with an empty list") + } + index = index % len(etys) + return etys[index], nil + default: + return cty.DynamicPseudoType, fmt.Errorf("cannot read elements from %s", listTy.FriendlyName()) + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + var index int + err := gocty.FromCtyValue(args[1], &index) + if err != nil { + // can't happen because we checked this in the Type function above + return cty.DynamicVal, fmt.Errorf("invalid index: %s", err) + } + + if !args[0].IsKnown() { + return cty.UnknownVal(retType), nil + } + + l := args[0].LengthInt() + if l == 0 { + return cty.DynamicVal, errors.New("cannot use element function with an empty list") + } + index = index % l + + // We did all the necessary type checks in the type function above, + // so this is guaranteed not to fail. + return args[0].Index(cty.NumberIntVal(int64(index))), nil + }, +}) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + collTy := args[0].Type() + switch { + case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: + return cty.Number, nil + default: + return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + coll := args[0] + collTy := args[0].Type() + switch { + case collTy == cty.DynamicPseudoType: + return cty.UnknownVal(cty.Number), nil + case collTy.IsTupleType(): + l := len(collTy.TupleElementTypes()) + return cty.NumberIntVal(int64(l)), nil + case collTy.IsObjectType(): + l := len(collTy.AttributeTypes()) + return cty.NumberIntVal(int64(l)), nil + case collTy == cty.String: + // We'll delegate to the cty stdlib strlen function here, because + // it deals with all of the complexities of tokenizing unicode + // grapheme clusters. + return stdlib.Strlen(coll) + case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): + return coll.Length(), nil + default: + // Should never happen, because of the checks in our Type func above + return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") + } + }, +}) + +// CoalesceFunc constructs a function that takes any number of arguments and +// returns the first one that isn't empty. This function was copied from go-cty +// stdlib and modified so that it returns the first *non-empty* non-null element +// from a sequence, instead of merely the first non-null. +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + // We already know this will succeed because of the checks in our Type func above + argVal, _ = convert.Convert(argVal, retType) + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { + continue + } + + return argVal, nil + } + return cty.NilVal, errors.New("no non-null, non-empty-string arguments") + }, +}) + +// CoalesceListFunc constructs a function that takes any number of list arguments +// and returns the first one that isn't empty. +var CoalesceListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, errors.New("at least one argument is required") + } + + argTypes := make([]cty.Type, len(args)) + + for i, arg := range args { + // if any argument is unknown, we can't be certain know which type we will return + if !arg.IsKnown() { + return cty.DynamicPseudoType, nil + } + ty := arg.Type() + + if !ty.IsListType() && !ty.IsTupleType() { + return cty.NilType, errors.New("coalescelist arguments must be lists or tuples") + } + + argTypes[i] = arg.Type() + } + + last := argTypes[0] + // If there are mixed types, we have to return a dynamic type. + for _, next := range argTypes[1:] { + if !next.Equals(last) { + return cty.DynamicPseudoType, nil + } + } + + return last, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, arg := range args { + if !arg.IsKnown() { + // If we run into an unknown list at some point, we can't + // predict the final result yet. (If there's a known, non-empty + // arg before this then we won't get here.) + return cty.UnknownVal(retType), nil + } + + if arg.LengthInt() > 0 { + return arg, nil + } + } + + return cty.NilVal, errors.New("no non-null arguments") + }, +}) + +// CompactFunc constructs a function that takes a list of strings and returns a new list +// with any empty string elements removed. +var CompactFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.String), + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + if !listVal.IsWhollyKnown() { + // If some of the element values aren't known yet then we + // can't yet return a compacted list + return cty.UnknownVal(retType), nil + } + + var outputList []cty.Value + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + if v.IsNull() || v.AsString() == "" { + continue + } + outputList = append(outputList, v) + } + + if len(outputList) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + return cty.ListVal(outputList), nil + }, +}) + +// ContainsFunc constructs a function that determines whether a given list or +// set contains a given single value as one of its elements. +var ContainsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + arg := args[0] + ty := arg.Type() + + if !ty.IsListType() && !ty.IsTupleType() && !ty.IsSetType() { + return cty.NilVal, errors.New("argument must be list, tuple, or set") + } + + _, err = Index(cty.TupleVal(arg.AsValueSlice()), args[1]) + if err != nil { + return cty.False, nil + } + + return cty.True, nil + }, +}) + +// IndexFunc constructs a function that finds the element index for a given value in a list. +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { + return cty.NilVal, errors.New("argument must be a list or tuple") + } + + if !args[0].IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, errors.New("cannot search an empty list") + } + + for it := args[0].ElementIterator(); it.Next(); { + i, v := it.Element() + eq, err := stdlib.Equal(v, args[1]) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + if eq.True() { + return i, nil + } + } + return cty.NilVal, errors.New("item not found") + + }, +}) + +// DistinctFunc constructs a function that takes a list and returns a new list +// with any duplicate elements removed. +var DistinctFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + + if !listVal.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + var list []cty.Value + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + list, err = appendIfMissing(list, v) + if err != nil { + return cty.NilVal, err + } + } + + if len(list) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(list), nil + }, +}) + +// ChunklistFunc constructs a function that splits a single list into fixed-size chunks, +// returning a list of lists. +var ChunklistFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "size", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.List(args[0].Type()), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + if !listVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + + if listVal.LengthInt() == 0 { + return cty.ListValEmpty(listVal.Type()), nil + } + + var size int + err = gocty.FromCtyValue(args[1], &size) + if err != nil { + return cty.NilVal, fmt.Errorf("invalid index: %s", err) + } + + if size < 0 { + return cty.NilVal, errors.New("the size argument must be positive") + } + + output := make([]cty.Value, 0) + + // if size is 0, returns a list made of the initial list + if size == 0 { + output = append(output, listVal) + return cty.ListVal(output), nil + } + + chunk := make([]cty.Value, 0) + + l := args[0].LengthInt() + i := 0 + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + chunk = append(chunk, v) + + // Chunk when index isn't 0, or when reaching the values's length + if (i+1)%size == 0 || (i+1) == l { + output = append(output, cty.ListVal(chunk)) + chunk = make([]cty.Value, 0) + } + i++ + } + + return cty.ListVal(output), nil + }, +}) + +// FlattenFunc constructs a function that takes a list and replaces any elements +// that are lists with a flattened sequence of the list contents. +var FlattenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsWhollyKnown() { + return cty.DynamicPseudoType, nil + } + + argTy := args[0].Type() + if !argTy.IsListType() && !argTy.IsSetType() && !argTy.IsTupleType() { + return cty.NilType, errors.New("can only flatten lists, sets and tuples") + } + + retVal, known := flattener(args[0]) + if !known { + return cty.DynamicPseudoType, nil + } + + tys := make([]cty.Type, len(retVal)) + for i, ty := range retVal { + tys[i] = ty.Type() + } + return cty.Tuple(tys), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputList := args[0] + if inputList.LengthInt() == 0 { + return cty.EmptyTupleVal, nil + } + + out, known := flattener(inputList) + if !known { + return cty.UnknownVal(retType), nil + } + + return cty.TupleVal(out), nil + }, +}) + +// Flatten until it's not a cty.List, and return whether the value is known. +// We can flatten lists with unknown values, as long as they are not +// lists themselves. +func flattener(flattenList cty.Value) ([]cty.Value, bool) { + out := make([]cty.Value, 0) + for it := flattenList.ElementIterator(); it.Next(); { + _, val := it.Element() + if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() { + if !val.IsKnown() { + return out, false + } + + res, known := flattener(val) + if !known { + return res, known + } + out = append(out, res...) + } else { + out = append(out, val) + } + } + return out, true +} + +// KeysFunc constructs a function that takes a map and returns a sorted list of the map keys. +var KeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty := args[0].Type() + switch { + case ty.IsMapType(): + return cty.List(cty.String), nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + // All of our result elements will be strings, and atys just + // decides how many there are. + etys := make([]cty.Type, len(atys)) + for i := range etys { + etys[i] = cty.String + } + return cty.Tuple(etys), nil + default: + return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + m := args[0] + var keys []cty.Value + + switch { + case m.Type().IsObjectType(): + // In this case we allow unknown values so we must work only with + // the attribute _types_, not with the value itself. + var names []string + for name := range m.Type().AttributeTypes() { + names = append(names, name) + } + sort.Strings(names) // same ordering guaranteed by cty's ElementIterator + if len(names) == 0 { + return cty.EmptyTupleVal, nil + } + keys = make([]cty.Value, len(names)) + for i, name := range names { + keys[i] = cty.StringVal(name) + } + return cty.TupleVal(keys), nil + default: + if !m.IsKnown() { + return cty.UnknownVal(retType), nil + } + + // cty guarantees that ElementIterator will iterate in lexicographical + // order by key. + for it := args[0].ElementIterator(); it.Next(); { + k, _ := it.Element() + keys = append(keys, k) + } + if len(keys) == 0 { + return cty.ListValEmpty(cty.String), nil + } + return cty.ListVal(keys), nil + } + }, +}) + +// ListFunc constructs a function that takes an arbitrary number of arguments +// and returns a list containing those values in the same order. +// +// This function is deprecated in Terraform v0.12 +var ListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, errors.New("at least one argument is required") + } + + argTypes := make([]cty.Type, len(args)) + + for i, arg := range args { + argTypes[i] = arg.Type() + } + + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + + return cty.List(retType), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + newList := make([]cty.Value, 0, len(args)) + + for _, arg := range args { + // We already know this will succeed because of the checks in our Type func above + arg, _ = convert.Convert(arg, retType.ElementType()) + newList = append(newList, arg) + } + + return cty.ListVal(newList), nil + }, +}) + +// LookupFunc constructs a function that performs dynamic lookups of map types. +var LookupFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + }, + { + Name: "key", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "default", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 1 || len(args) > 3 { + return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) + } + + ty := args[0].Type() + + switch { + case ty.IsObjectType(): + if !args[1].IsKnown() { + return cty.DynamicPseudoType, nil + } + + key := args[1].AsString() + if ty.HasAttribute(key) { + return args[0].GetAttr(key).Type(), nil + } else if len(args) == 3 { + // if the key isn't found but a default is provided, + // return the default type + return args[2].Type(), nil + } + return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) + case ty.IsMapType(): + if len(args) == 3 { + _, err = convert.Convert(args[2], ty.ElementType()) + if err != nil { + return cty.NilType, function.NewArgErrorf(2, "the default value must have the same type as the map elements") + } + } + return ty.ElementType(), nil + default: + return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var defaultVal cty.Value + defaultValueSet := false + + if len(args) == 3 { + defaultVal = args[2] + defaultValueSet = true + } + + mapVar := args[0] + lookupKey := args[1].AsString() + + if !mapVar.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + if mapVar.Type().IsObjectType() { + if mapVar.Type().HasAttribute(lookupKey) { + return mapVar.GetAttr(lookupKey), nil + } + } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { + return mapVar.Index(cty.StringVal(lookupKey)), nil + } + + if defaultValueSet { + defaultVal, err = convert.Convert(defaultVal, retType) + if err != nil { + return cty.NilVal, err + } + return defaultVal, nil + } + + return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( + "lookup failed to find '%s'", lookupKey) + }, +}) + +// MapFunc constructs a function that takes an even number of arguments and +// returns a map whose elements are constructed from consecutive pairs of arguments. +// +// This function is deprecated in Terraform v0.12 +var MapFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 2 || len(args)%2 != 0 { + return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args)) + } + + argTypes := make([]cty.Type, len(args)/2) + index := 0 + + for i := 0; i < len(args); i += 2 { + argTypes[index] = args[i+1].Type() + index++ + } + + valType, _ := convert.UnifyUnsafe(argTypes) + if valType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + + return cty.Map(valType), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, arg := range args { + if !arg.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + } + + outputMap := make(map[string]cty.Value) + + for i := 0; i < len(args); i += 2 { + + key := args[i].AsString() + + err := gocty.FromCtyValue(args[i], &key) + if err != nil { + return cty.NilVal, err + } + + val := args[i+1] + + var variable cty.Value + err = gocty.FromCtyValue(val, &variable) + if err != nil { + return cty.NilVal, err + } + + // We already know this will succeed because of the checks in our Type func above + variable, _ = convert.Convert(variable, retType.ElementType()) + + // Check for duplicate keys + if _, ok := outputMap[key]; ok { + return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) + } + outputMap[key] = variable + } + + return cty.MapVal(outputMap), nil + }, +}) + +// MatchkeysFunc constructs a function that constructs a new list by taking a +// subset of elements from one list whose indexes match the corresponding +// indexes of values in another list. +var MatchkeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "keys", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "searchset", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + if ty == cty.NilType { + return cty.NilType, errors.New("keys and searchset must be of the same type") + } + + // the return type is based on args[0] (values) + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !args[0].IsKnown() { + return cty.UnknownVal(cty.List(retType.ElementType())), nil + } + + if args[0].LengthInt() != args[1].LengthInt() { + return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") + } + + output := make([]cty.Value, 0) + values := args[0] + + // Keys and searchset must be the same type. + // We can skip error checking here because we've already verified that + // they can be unified in the Type function + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + keys, _ := convert.Convert(args[1], ty) + searchset, _ := convert.Convert(args[2], ty) + + // if searchset is empty, return an empty list. + if searchset.LengthInt() == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, key := it.Element() + for iter := searchset.ElementIterator(); iter.Next(); { + _, search := iter.Element() + eq, err := stdlib.Equal(key, search) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.ListValEmpty(retType.ElementType()), nil + } + if eq.True() { + v := values.Index(cty.NumberIntVal(int64(i))) + output = append(output, v) + break + } + } + i++ + } + + // if we haven't matched any key, then output is an empty list. + if len(output) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(output), nil + }, +}) + +// MergeFunc constructs a function that takes an arbitrary number of maps and +// returns a single map that contains a merged set of elements from all of the maps. +// +// If more than one given map defines the same key then the one that is later in +// the argument sequence takes precedence. +var MergeFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "maps", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.DynamicPseudoType), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + outputMap := make(map[string]cty.Value) + + for _, arg := range args { + if !arg.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + if !arg.Type().IsObjectType() && !arg.Type().IsMapType() { + return cty.NilVal, fmt.Errorf("arguments must be maps or objects, got %#v", arg.Type().FriendlyName()) + } + for it := arg.ElementIterator(); it.Next(); { + k, v := it.Element() + outputMap[k.AsString()] = v + } + } + return cty.ObjectVal(outputMap), nil + }, +}) + +// ReverseFunc takes a sequence and produces a new sequence of the same length +// with all of the same elements as the given sequence but in reverse order. +var ReverseFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + argTy := args[0].Type() + switch { + case argTy.IsTupleType(): + argTys := argTy.TupleElementTypes() + retTys := make([]cty.Type, len(argTys)) + for i, ty := range argTys { + retTys[len(retTys)-i-1] = ty + } + return cty.Tuple(retTys), nil + case argTy.IsListType(), argTy.IsSetType(): // We accept sets here to mimic the usual behavior of auto-converting to list + return cty.List(argTy.ElementType()), nil + default: + return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName()) + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + in := args[0].AsValueSlice() + outVals := make([]cty.Value, len(in)) + for i, v := range in { + outVals[len(outVals)-i-1] = v + } + switch { + case retType.IsTupleType(): + return cty.TupleVal(outVals), nil + default: + if len(outVals) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(outVals), nil + } + }, +}) + +// SetProductFunc calculates the cartesian product of two or more sets or +// sequences. If the arguments are all lists then the result is a list of tuples, +// preserving the ordering of all of the input lists. Otherwise the result is a +// set of tuples. +var SetProductFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "sets", + Type: cty.DynamicPseudoType, + }, + Type: func(args []cty.Value) (retType cty.Type, err error) { + if len(args) < 2 { + return cty.NilType, errors.New("at least two arguments are required") + } + + listCount := 0 + elemTys := make([]cty.Type, len(args)) + for i, arg := range args { + aty := arg.Type() + switch { + case aty.IsSetType(): + elemTys[i] = aty.ElementType() + case aty.IsListType(): + elemTys[i] = aty.ElementType() + listCount++ + case aty.IsTupleType(): + // We can accept a tuple type only if there's some common type + // that all of its elements can be converted to. + allEtys := aty.TupleElementTypes() + if len(allEtys) == 0 { + elemTys[i] = cty.DynamicPseudoType + listCount++ + break + } + ety, _ := convert.UnifyUnsafe(allEtys) + if ety == cty.NilType { + return cty.NilType, function.NewArgErrorf(i, "all elements must be of the same type") + } + elemTys[i] = ety + listCount++ + default: + return cty.NilType, function.NewArgErrorf(i, "a set or a list is required") + } + } + + if listCount == len(args) { + return cty.List(cty.Tuple(elemTys)), nil + } + return cty.Set(cty.Tuple(elemTys)), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + ety := retType.ElementType() + + total := 1 + for _, arg := range args { + // Because of our type checking function, we are guaranteed that + // all of the arguments are known, non-null values of types that + // support LengthInt. + total *= arg.LengthInt() + } + + if total == 0 { + // If any of the arguments was an empty collection then our result + // is also an empty collection, which we'll short-circuit here. + if retType.IsListType() { + return cty.ListValEmpty(ety), nil + } + return cty.SetValEmpty(ety), nil + } + + subEtys := ety.TupleElementTypes() + product := make([][]cty.Value, total) + + b := make([]cty.Value, total*len(args)) + n := make([]int, len(args)) + s := 0 + argVals := make([][]cty.Value, len(args)) + for i, arg := range args { + argVals[i] = arg.AsValueSlice() + } + + for i := range product { + e := s + len(args) + pi := b[s:e] + product[i] = pi + s = e + + for j, n := range n { + val := argVals[j][n] + ty := subEtys[j] + if !val.Type().Equals(ty) { + var err error + val, err = convert.Convert(val, ty) + if err != nil { + // Should never happen since we checked this in our + // type-checking function. + return cty.NilVal, fmt.Errorf("failed to convert argVals[%d][%d] to %s; this is a bug in Terraform", j, n, ty.FriendlyName()) + } + } + pi[j] = val + } + + for j := len(n) - 1; j >= 0; j-- { + n[j]++ + if n[j] < len(argVals[j]) { + break + } + n[j] = 0 + } + } + + productVals := make([]cty.Value, total) + for i, vals := range product { + productVals[i] = cty.TupleVal(vals) + } + + if retType.IsListType() { + return cty.ListVal(productVals), nil + } + return cty.SetVal(productVals), nil + }, +}) + +// SliceFunc constructs a function that extracts some consecutive elements +// from within a list. +var SliceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "start_index", + Type: cty.Number, + }, + { + Name: "end_index", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + arg := args[0] + argTy := arg.Type() + + if argTy.IsSetType() { + return cty.NilType, function.NewArgErrorf(0, "cannot slice a set, because its elements do not have indices; use the tolist function to force conversion to list if the ordering of the result is not important") + } + if !argTy.IsListType() && !argTy.IsTupleType() { + return cty.NilType, function.NewArgErrorf(0, "must be a list or tuple value") + } + + startIndex, endIndex, idxsKnown, err := sliceIndexes(args) + if err != nil { + return cty.NilType, err + } + + if argTy.IsListType() { + return argTy, nil + } + + if !idxsKnown { + // If we don't know our start/end indices then we can't predict + // the result type if we're planning to return a tuple. + return cty.DynamicPseudoType, nil + } + return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputList := args[0] + + if retType == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + // we ignore idxsKnown return value here because the indices are always + // known here, or else the call would've short-circuited. + startIndex, endIndex, _, err := sliceIndexes(args) + if err != nil { + return cty.NilVal, err + } + + if endIndex-startIndex == 0 { + if retType.IsTupleType() { + return cty.EmptyTupleVal, nil + } + return cty.ListValEmpty(retType.ElementType()), nil + } + + outputList := inputList.AsValueSlice()[startIndex:endIndex] + + if retType.IsTupleType() { + return cty.TupleVal(outputList), nil + } + + return cty.ListVal(outputList), nil + }, +}) + +func sliceIndexes(args []cty.Value) (int, int, bool, error) { + var startIndex, endIndex, length int + var startKnown, endKnown, lengthKnown bool + + if args[0].Type().IsTupleType() || args[0].IsKnown() { // if it's a tuple then we always know the length by the type, but lists must be known + length = args[0].LengthInt() + lengthKnown = true + } + + if args[1].IsKnown() { + if err := gocty.FromCtyValue(args[1], &startIndex); err != nil { + return 0, 0, false, function.NewArgErrorf(1, "invalid start index: %s", err) + } + if startIndex < 0 { + return 0, 0, false, function.NewArgErrorf(1, "start index must not be less than zero") + } + if lengthKnown && startIndex > length { + return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than the length of the list") + } + startKnown = true + } + if args[2].IsKnown() { + if err := gocty.FromCtyValue(args[2], &endIndex); err != nil { + return 0, 0, false, function.NewArgErrorf(2, "invalid end index: %s", err) + } + if endIndex < 0 { + return 0, 0, false, function.NewArgErrorf(2, "end index must not be less than zero") + } + if lengthKnown && endIndex > length { + return 0, 0, false, function.NewArgErrorf(2, "end index must not be greater than the length of the list") + } + endKnown = true + } + if startKnown && endKnown { + if startIndex > endIndex { + return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than end index") + } + } + return startIndex, endIndex, startKnown && endKnown, nil +} + +// TransposeFunc contructs a function that takes a map of lists of strings and +// TransposeFunc constructs a function that takes a map of lists of strings and +// swaps the keys and values to produce a new map of lists of strings. +var TransposeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.Map(cty.List(cty.String)), + }, + }, + Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputMap := args[0] + if !inputMap.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + outputMap := make(map[string]cty.Value) + tmpMap := make(map[string][]string) + + for it := inputMap.ElementIterator(); it.Next(); { + inKey, inVal := it.Element() + for iter := inVal.ElementIterator(); iter.Next(); { + _, val := iter.Element() + if !val.Type().Equals(cty.String) { + return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") + } + + outKey := val.AsString() + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey.AsString()) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]cty.Value, 0) + for _, v := range outVal { + values = append(values, cty.StringVal(v)) + } + outputMap[outKey] = cty.ListVal(values) + } + + return cty.MapVal(outputMap), nil + }, +}) + +// ValuesFunc constructs a function that returns a list of the map values, +// in the order of the sorted keys. +var ValuesFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + ty := args[0].Type() + if ty.IsMapType() { + return cty.List(ty.ElementType()), nil + } else if ty.IsObjectType() { + // The result is a tuple type with all of the same types as our + // object type's attributes, sorted in lexicographical order by the + // keys. (This matches the sort order guaranteed by ElementIterator + // on a cty object value.) + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + attrNames := make([]string, 0, len(atys)) + for name := range atys { + attrNames = append(attrNames, name) + } + sort.Strings(attrNames) + + tys := make([]cty.Type, len(attrNames)) + for i, name := range attrNames { + tys[i] = atys[name] + } + return cty.Tuple(tys), nil + } + return cty.NilType, errors.New("values() requires a map as the first argument") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + mapVar := args[0] + + // We can just iterate the map/object value here because cty guarantees + // that these types always iterate in key lexicographical order. + var values []cty.Value + for it := mapVar.ElementIterator(); it.Next(); { + _, val := it.Element() + values = append(values, val) + } + + if retType.IsTupleType() { + return cty.TupleVal(values), nil + } + if len(values) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(values), nil + }, +}) + +// ZipmapFunc constructs a function that constructs a map from a list of keys +// and a corresponding list of values. +var ZipmapFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "keys", + Type: cty.List(cty.String), + }, + { + Name: "values", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + keys := args[0] + values := args[1] + valuesTy := values.Type() + + switch { + case valuesTy.IsListType(): + return cty.Map(values.Type().ElementType()), nil + case valuesTy.IsTupleType(): + if !keys.IsWhollyKnown() { + // Since zipmap with a tuple produces an object, we need to know + // all of the key names before we can predict our result type. + return cty.DynamicPseudoType, nil + } + + keysRaw := keys.AsValueSlice() + valueTypesRaw := valuesTy.TupleElementTypes() + if len(keysRaw) != len(valueTypesRaw) { + return cty.NilType, fmt.Errorf("number of keys (%d) does not match number of values (%d)", len(keysRaw), len(valueTypesRaw)) + } + atys := make(map[string]cty.Type, len(valueTypesRaw)) + for i, keyVal := range keysRaw { + if keyVal.IsNull() { + return cty.NilType, fmt.Errorf("keys list has null value at index %d", i) + } + key := keyVal.AsString() + atys[key] = valueTypesRaw[i] + } + return cty.Object(atys), nil + + default: + return cty.NilType, errors.New("values argument must be a list or tuple value") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + keys := args[0] + values := args[1] + + if !keys.IsWhollyKnown() { + // Unknown map keys and object attributes are not supported, so + // our entire result must be unknown in this case. + return cty.UnknownVal(retType), nil + } + + // both keys and values are guaranteed to be shallowly-known here, + // because our declared params above don't allow unknown or null values. + if keys.LengthInt() != values.LengthInt() { + return cty.NilVal, fmt.Errorf("number of keys (%d) does not match number of values (%d)", keys.LengthInt(), values.LengthInt()) + } + + output := make(map[string]cty.Value) + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, v := it.Element() + val := values.Index(cty.NumberIntVal(int64(i))) + output[v.AsString()] = val + i++ + } + + switch { + case retType.IsMapType(): + if len(output) == 0 { + return cty.MapValEmpty(retType.ElementType()), nil + } + return cty.MapVal(output), nil + case retType.IsObjectType(): + return cty.ObjectVal(output), nil + default: + // Should never happen because the type-check function should've + // caught any other case. + return cty.NilVal, fmt.Errorf("internally selected incorrect result type %s (this is a bug)", retType.FriendlyName()) + } + }, +}) + +// helper function to add an element to a list, if it does not already exist +func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) { + for _, ele := range slice { + eq, err := stdlib.Equal(ele, element) + if err != nil { + return slice, err + } + if eq.True() { + return slice, nil + } + } + return append(slice, element), nil +} + +// Element returns a single element from a given list at the given index. If +// index is greater than the length of the list then it is wrapped modulo +// the list length. +func Element(list, index cty.Value) (cty.Value, error) { + return ElementFunc.Call([]cty.Value{list, index}) +} + +// Length returns the number of elements in the given collection or number of +// Unicode characters in the given string. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} + +// Coalesce takes any number of arguments and returns the first one that isn't empty. +func Coalesce(args ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(args) +} + +// CoalesceList takes any number of list arguments and returns the first one that isn't empty. +func CoalesceList(args ...cty.Value) (cty.Value, error) { + return CoalesceListFunc.Call(args) +} + +// Compact takes a list of strings and returns a new list +// with any empty string elements removed. +func Compact(list cty.Value) (cty.Value, error) { + return CompactFunc.Call([]cty.Value{list}) +} + +// Contains determines whether a given list contains a given single value +// as one of its elements. +func Contains(list, value cty.Value) (cty.Value, error) { + return ContainsFunc.Call([]cty.Value{list, value}) +} + +// Index finds the element index for a given value in a list. +func Index(list, value cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{list, value}) +} + +// Distinct takes a list and returns a new list with any duplicate elements removed. +func Distinct(list cty.Value) (cty.Value, error) { + return DistinctFunc.Call([]cty.Value{list}) +} + +// Chunklist splits a single list into fixed-size chunks, returning a list of lists. +func Chunklist(list, size cty.Value) (cty.Value, error) { + return ChunklistFunc.Call([]cty.Value{list, size}) +} + +// Flatten takes a list and replaces any elements that are lists with a flattened +// sequence of the list contents. +func Flatten(list cty.Value) (cty.Value, error) { + return FlattenFunc.Call([]cty.Value{list}) +} + +// Keys takes a map and returns a sorted list of the map keys. +func Keys(inputMap cty.Value) (cty.Value, error) { + return KeysFunc.Call([]cty.Value{inputMap}) +} + +// List takes any number of list arguments and returns a list containing those +// values in the same order. +func List(args ...cty.Value) (cty.Value, error) { + return ListFunc.Call(args) +} + +// Lookup performs a dynamic lookup into a map. +// There are two required arguments, map and key, plus an optional default, +// which is a value to return if no key is found in map. +func Lookup(args ...cty.Value) (cty.Value, error) { + return LookupFunc.Call(args) +} + +// Map takes an even number of arguments and returns a map whose elements are constructed +// from consecutive pairs of arguments. +func Map(args ...cty.Value) (cty.Value, error) { + return MapFunc.Call(args) +} + +// Matchkeys constructs a new list by taking a subset of elements from one list +// whose indexes match the corresponding indexes of values in another list. +func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { + return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) +} + +// Merge takes an arbitrary number of maps and returns a single map that contains +// a merged set of elements from all of the maps. +// +// If more than one given map defines the same key then the one that is later in +// the argument sequence takes precedence. +func Merge(maps ...cty.Value) (cty.Value, error) { + return MergeFunc.Call(maps) +} + +// Reverse takes a sequence and produces a new sequence of the same length +// with all of the same elements as the given sequence but in reverse order. +func Reverse(list cty.Value) (cty.Value, error) { + return ReverseFunc.Call([]cty.Value{list}) +} + +// SetProduct computes the cartesian product of sets or sequences. +func SetProduct(sets ...cty.Value) (cty.Value, error) { + return SetProductFunc.Call(sets) +} + +// Slice extracts some consecutive elements from within a list. +func Slice(list, start, end cty.Value) (cty.Value, error) { + return SliceFunc.Call([]cty.Value{list, start, end}) +} + +// Transpose takes a map of lists of strings and swaps the keys and values to +// produce a new map of lists of strings. +func Transpose(values cty.Value) (cty.Value, error) { + return TransposeFunc.Call([]cty.Value{values}) +} + +// Values returns a list of the map values, in the order of the sorted keys. +// This function only works on flat maps. +func Values(values cty.Value) (cty.Value, error) { + return ValuesFunc.Call([]cty.Value{values}) +} + +// Zipmap constructs a map from a list of keys and a corresponding list of values. +func Zipmap(keys, values cty.Value) (cty.Value, error) { + return ZipmapFunc.Call([]cty.Value{keys, values}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go new file mode 100644 index 00000000000..83f85979722 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go @@ -0,0 +1,87 @@ +package funcs + +import ( + "strconv" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeToFunc constructs a "to..." function, like "tostring", which converts +// its argument to a specific type or type kind. +// +// The given type wantTy can be any type constraint that cty's "convert" package +// would accept. In particular, this means that you can pass +// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which +// will then cause cty to attempt to unify all of the element types when given +// a tuple. +func MakeToFunc(wantTy cty.Type) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "v", + // We use DynamicPseudoType rather than wantTy here so that + // all values will pass through the function API verbatim and + // we can handle the conversion logic within the Type and + // Impl functions. This allows us to customize the error + // messages to be more appropriate for an explicit type + // conversion, whereas the cty function system produces + // messages aimed at _implicit_ type conversions. + Type: cty.DynamicPseudoType, + AllowNull: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + gotTy := args[0].Type() + if gotTy.Equals(wantTy) { + return wantTy, nil + } + conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) + if conv == nil { + // We'll use some specialized errors for some trickier cases, + // but most we can handle in a simple way. + switch { + case gotTy.IsTupleType() && wantTy.IsTupleType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + case gotTy.IsObjectType() && wantTy.IsObjectType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + default: + return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + // If a conversion is available then everything is fine. + return wantTy, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // We didn't set "AllowUnknown" on our argument, so it is guaranteed + // to be known here but may still be null. + ret, err := convert.Convert(args[0], retType) + if err != nil { + // Because we used GetConversionUnsafe above, conversion can + // still potentially fail in here. For example, if the user + // asks to convert the string "a" to bool then we'll + // optimistically permit it during type checking but fail here + // once we note that the value isn't either "true" or "false". + gotTy := args[0].Type() + switch { + case gotTy == cty.String && wantTy == cty.Bool: + what := "string" + if !args[0].IsNull() { + what = strconv.Quote(args[0].AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) + case gotTy == cty.String && wantTy == cty.Number: + what := "string" + if !args[0].IsNull() { + what = strconv.Quote(args[0].AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) + default: + return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + return ret, nil + }, + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go new file mode 100644 index 00000000000..28074fb13eb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go @@ -0,0 +1,325 @@ +package funcs + +import ( + "crypto/md5" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "fmt" + "hash" + + uuidv5 "github.com/google/uuid" + uuid "github.com/hashicorp/go-uuid" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/bcrypt" +) + +var UUIDFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result, err := uuid.GenerateUUID() + if err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.StringVal(result), nil + }, +}) + +var UUIDV5Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "namespace", + Type: cty.String, + }, + { + Name: "name", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var namespace uuidv5.UUID + switch { + case args[0].AsString() == "dns": + namespace = uuidv5.NameSpaceDNS + case args[0].AsString() == "url": + namespace = uuidv5.NameSpaceURL + case args[0].AsString() == "oid": + namespace = uuidv5.NameSpaceOID + case args[0].AsString() == "x500": + namespace = uuidv5.NameSpaceX500 + default: + if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) + } + } + val := args[1].AsString() + return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil + }, +}) + +// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) +} + +// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) +} + +// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. +var BcryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "cost", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + defaultCost := 10 + + if len(args) > 1 { + var val int + if err := gocty.FromCtyValue(args[1], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + defaultCost = val + } + + if len(args) > 2 { + return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].AsString() + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. +var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) + +// MakeFileMd5Func constructs a function that is like Md5Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileMd5Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) +} + +// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. +var RsaDecryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "ciphertext", + Type: cty.String, + }, + { + Name: "privatekey", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + key := args[1].AsString() + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found") + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return cty.UnknownVal(cty.String), fmt.Errorf( + "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use", + ) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Sha1Func contructs a function that computes the SHA1 hash of a given string +// and encodes it with hexadecimal digits. +var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) + +// MakeFileSha1Func constructs a function that is like Sha1Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha1Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) +} + +// Sha256Func contructs a function that computes the SHA256 hash of a given string +// and encodes it with hexadecimal digits. +var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) + +// MakeFileSha256Func constructs a function that is like Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) +} + +// Sha512Func contructs a function that computes the SHA512 hash of a given string +// and encodes it with hexadecimal digits. +var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) + +// MakeFileSha512Func constructs a function that is like Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) +} + +func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + h := hf() + h.Write([]byte(s)) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + path := args[0].AsString() + src, err := readFileBytes(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + h := hf() + h.Write(src) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +// UUID generates and returns a Type-4 UUID in the standard hexadecimal string +// format. +// +// This is not a pure function: it will generate a different result for each +// call. It must therefore be registered as an impure function in the function +// table in the "lang" package. +func UUID() (cty.Value, error) { + return UUIDFunc.Call(nil) +} + +// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string +// format. +func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { + return UUIDV5Func.Call([]cty.Value{namespace, name}) +} + +// Base64Sha256 computes the SHA256 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +func Base64Sha256(str cty.Value) (cty.Value, error) { + return Base64Sha256Func.Call([]cty.Value{str}) +} + +// Base64Sha512 computes the SHA512 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 +func Base64Sha512(str cty.Value) (cty.Value, error) { + return Base64Sha512Func.Call([]cty.Value{str}) +} + +// Bcrypt computes a hash of the given string using the Blowfish cipher, +// returning a string in the Modular Crypt Format +// usually expected in the shadow password file on many Unix systems. +func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(cost)+1) + args[0] = str + copy(args[1:], cost) + return BcryptFunc.Call(args) +} + +// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. +func Md5(str cty.Value) (cty.Value, error) { + return Md5Func.Call([]cty.Value{str}) +} + +// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding +// cleartext. +func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { + return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) +} + +// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. +func Sha1(str cty.Value) (cty.Value, error) { + return Sha1Func.Call([]cty.Value{str}) +} + +// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. +func Sha256(str cty.Value) (cty.Value, error) { + return Sha256Func.Call([]cty.Value{str}) +} + +// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. +func Sha512(str cty.Value) (cty.Value, error) { + return Sha512Func.Call([]cty.Value{str}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go new file mode 100644 index 00000000000..5dae198774a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go @@ -0,0 +1,70 @@ +package funcs + +import ( + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// TimestampFunc constructs a function that returns a string representation of the current date and time. +var TimestampFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil + }, +}) + +// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. +var TimeAddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "timestamp", + Type: cty.String, + }, + { + Name: "duration", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ts, err := time.Parse(time.RFC3339, args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + duration, err := time.ParseDuration(args[1].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil + }, +}) + +// Timestamp returns a string representation of the current date and time. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax, and so timestamp +// returns a string in this format. +func Timestamp() (cty.Value, error) { + return TimestampFunc.Call([]cty.Value{}) +} + +// TimeAdd adds a duration to a timestamp, returning a new timestamp. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires +// the timestamp argument to be a string conforming to this syntax. +// +// `duration` is a string representation of a time difference, consisting of +// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted +// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first +// number may be negative to indicate a negative duration, like `"-2h5m"`. +// +// The result is a string, also in RFC 3339 format, representing the result +// of adding the given direction to the given timestamp. +func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { + return TimeAddFunc.Call([]cty.Value{timestamp, duration}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go new file mode 100644 index 00000000000..af93f08dc1b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go @@ -0,0 +1,140 @@ +package funcs + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "log" + "net/url" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. +var Base64DecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) + } + if !utf8.Valid([]byte(sDec)) { + log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec) + return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8") + } + return cty.StringVal(string(sDec)), nil + }, +}) + +// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. +var Base64EncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil + }, +}) + +// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in +// Base64 encoding. +var Base64GzipFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s) + } + if err := gz.Flush(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s) + } + if err := gz.Close(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s) + } + return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil + }, +}) + +// URLEncodeFunc constructs a function that applies URL encoding to a given string. +var URLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(url.QueryEscape(args[0].AsString())), nil + }, +}) + +// Base64Decode decodes a string containing a base64 sequence. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function +// produces an error. +func Base64Decode(str cty.Value) (cty.Value, error) { + return Base64DecodeFunc.Call([]cty.Value{str}) +} + +// Base64Encode applies Base64 encoding to a string. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func Base64Encode(str cty.Value) (cty.Value, error) { + return Base64EncodeFunc.Call([]cty.Value{str}) +} + +// Base64Gzip compresses a string with gzip and then encodes the result in +// Base64 encoding. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. +func Base64Gzip(str cty.Value) (cty.Value, error) { + return Base64GzipFunc.Call([]cty.Value{str}) +} + +// URLEncode applies URL encoding to a given string. +// +// This function identifies characters in the given string that would have a +// special meaning when included as a query string argument in a URL and +// escapes them using RFC 3986 "percent encoding". +// +// If the given string contains non-ASCII characters, these are first encoded as +// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. +func URLEncode(str cty.Value) (cty.Value, error) { + return URLEncodeFunc.Call([]cty.Value{str}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go new file mode 100644 index 00000000000..016b102d946 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go @@ -0,0 +1,360 @@ +package funcs + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "unicode/utf8" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeFileFunc constructs a function that takes a file path and returns the +// contents of that file, either directly as a string (where valid UTF-8 is +// required) or as a string containing base64 bytes. +func MakeFileFunc(baseDir string, encBase64 bool) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + src, err := readFileBytes(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + switch { + case encBase64: + enc := base64.StdEncoding.EncodeToString(src) + return cty.StringVal(enc), nil + default: + if !utf8.Valid(src) { + return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) + } + return cty.StringVal(string(src)), nil + } + }, + }) +} + +// MakeTemplateFileFunc constructs a function that takes a file path and +// an arbitrary object of named values and attempts to render the referenced +// file as a template using HCL template syntax. +// +// The template itself may recursively call other functions so a callback +// must be provided to get access to those functions. The template cannot, +// however, access any variables defined in the scope: it is restricted only to +// those variables provided in the second function argument, to ensure that all +// dependencies on other graph nodes can be seen before executing this function. +// +// As a special exception, a referenced template file may not recursively call +// the templatefile function, since that would risk the same file being +// included into itself indefinitely. +func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { + + params := []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + { + Name: "vars", + Type: cty.DynamicPseudoType, + }, + } + + loadTmpl := func(fn string) (hcl.Expression, error) { + // We re-use File here to ensure the same filename interpretation + // as it does, along with its other safety checks. + tmplVal, err := File(baseDir, cty.StringVal(fn)) + if err != nil { + return nil, err + } + + expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return nil, diags + } + + return expr, nil + } + + renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { + if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { + return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time + } + + ctx := &hcl.EvalContext{ + Variables: varsVal.AsValueMap(), + } + + // We'll pre-check references in the template here so we can give a + // more specialized error message than HCL would by default, so it's + // clearer that this problem is coming from a templatefile call. + for _, traversal := range expr.Variables() { + root := traversal.RootName() + if _, ok := ctx.Variables[root]; !ok { + return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) + } + } + + givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems + funcs := make(map[string]function.Function, len(givenFuncs)) + for name, fn := range givenFuncs { + if name == "templatefile" { + // We stub this one out to prevent recursive calls. + funcs[name] = function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") + }, + }) + continue + } + funcs[name] = fn + } + ctx.Functions = funcs + + val, diags := expr.Value(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + return val, nil + } + + return function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + if !(args[0].IsKnown() && args[1].IsKnown()) { + return cty.DynamicPseudoType, nil + } + + // We'll render our template now to see what result type it produces. + // A template consisting only of a single interpolation an potentially + // return any type. + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicPseudoType, err + } + + // This is safe even if args[1] contains unknowns because the HCL + // template renderer itself knows how to short-circuit those. + val, err := renderTmpl(expr, args[1]) + return val.Type(), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicVal, err + } + return renderTmpl(expr, args[1]) + }, + }) + +} + +// MakeFileExistsFunc constructs a function that takes a path +// and determines whether a file exists at that path +func MakeFileExistsFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + path, err := homedir.Expand(path) + if err != nil { + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return cty.False, nil + } + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) + } + + if fi.Mode().IsRegular() { + return cty.True, nil + } + + return cty.False, fmt.Errorf("%s is not a regular file, but %q", + path, fi.Mode().String()) + }, + }) +} + +// BasenameFunc constructs a function that takes a string containing a filesystem path +// and removes all except the last portion from it. +var BasenameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Base(args[0].AsString())), nil + }, +}) + +// DirnameFunc constructs a function that takes a string containing a filesystem path +// and removes the last portion from it. +var DirnameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Dir(args[0].AsString())), nil + }, +}) + +// AbsPathFunc constructs a function that converts a filesystem path to an absolute path +var AbsPathFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + absPath, err := filepath.Abs(args[0].AsString()) + return cty.StringVal(filepath.ToSlash(absPath)), err + }, +}) + +// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. +var PathExpandFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + + homePath, err := homedir.Expand(args[0].AsString()) + return cty.StringVal(homePath), err + }, +}) + +func readFileBytes(baseDir, path string) ([]byte, error) { + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + src, err := ioutil.ReadFile(path) + if err != nil { + // ReadFile does not return Terraform-user-friendly error + // messages, so we'll provide our own. + if os.IsNotExist(err) { + return nil, fmt.Errorf("no file exists at %s", path) + } + return nil, fmt.Errorf("failed to read %s", path) + } + + return src, nil +} + +// File reads the contents of the file at the given path. +// +// The file must contain valid UTF-8 bytes, or this function will return an error. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func File(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, false) + return fn.Call([]cty.Value{path}) +} + +// FileExists determines whether a file exists at the given path. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileExists(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileExistsFunc(baseDir) + return fn.Call([]cty.Value{path}) +} + +// FileBase64 reads the contents of the file at the given path. +// +// The bytes from the file are encoded as base64 before returning. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, true) + return fn.Call([]cty.Value{path}) +} + +// Basename takes a string containing a filesystem path and removes all except the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Basename(path cty.Value) (cty.Value, error) { + return BasenameFunc.Call([]cty.Value{path}) +} + +// Dirname takes a string containing a filesystem path and removes the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Dirname(path cty.Value) (cty.Value, error) { + return DirnameFunc.Call([]cty.Value{path}) +} + +// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with +// the current user's home directory path. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the leading segment in the path is not `~` then the given path is returned unmodified. +func Pathexpand(path cty.Value) (cty.Value, error) { + return PathExpandFunc.Call([]cty.Value{path}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go new file mode 100644 index 00000000000..c813f47bf67 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go @@ -0,0 +1,217 @@ +package funcs + +import ( + "math" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CeilFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var CeilFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var val float64 + if err := gocty.FromCtyValue(args[0], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.NumberIntVal(int64(math.Ceil(val))), nil + }, +}) + +// FloorFunc contructs a function that returns the closest whole number lesser +// than or equal to the given value. +var FloorFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var val float64 + if err := gocty.FromCtyValue(args[0], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.NumberIntVal(int64(math.Floor(val))), nil + }, +}) + +// LogFunc contructs a function that returns the logarithm of a given number in a given base. +var LogFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var base float64 + if err := gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil + }, +}) + +// PowFunc contructs a function that returns the logarithm of a given number in a given base. +var PowFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "power", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var power float64 + if err := gocty.FromCtyValue(args[1], &power); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Pow(num, power)), nil + }, +}) + +// SignumFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var SignumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num int + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + switch { + case num < 0: + return cty.NumberIntVal(-1), nil + case num > 0: + return cty.NumberIntVal(+1), nil + default: + return cty.NumberIntVal(0), nil + } + }, +}) + +// ParseIntFunc contructs a function that parses a string argument and returns an integer of the specified base. +var ParseIntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "number", + Type: cty.DynamicPseudoType, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].Type().Equals(cty.String) { + return cty.Number, function.NewArgErrorf(0, "first argument must be a string, not %s", args[0].Type().FriendlyName()) + } + return cty.Number, nil + }, + + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + var numstr string + var base int + var err error + + if err = gocty.FromCtyValue(args[0], &numstr); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(0, err) + } + + if err = gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.Number), function.NewArgError(1, err) + } + + if base < 2 || base > 62 { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 1, + "base must be a whole number between 2 and 62 inclusive", + ) + } + + num, ok := (&big.Int{}).SetString(numstr, base) + if !ok { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 0, + "cannot parse %q as a base %d integer", + numstr, + base, + ) + } + + parsedNum := cty.NumberVal((&big.Float{}).SetInt(num)) + + return parsedNum, nil + }, +}) + +// Ceil returns the closest whole number greater than or equal to the given value. +func Ceil(num cty.Value) (cty.Value, error) { + return CeilFunc.Call([]cty.Value{num}) +} + +// Floor returns the closest whole number lesser than or equal to the given value. +func Floor(num cty.Value) (cty.Value, error) { + return FloorFunc.Call([]cty.Value{num}) +} + +// Log returns returns the logarithm of a given number in a given base. +func Log(num, base cty.Value) (cty.Value, error) { + return LogFunc.Call([]cty.Value{num, base}) +} + +// Pow returns the logarithm of a given number in a given base. +func Pow(num, power cty.Value) (cty.Value, error) { + return PowFunc.Call([]cty.Value{num, power}) +} + +// Signum determines the sign of a number, returning a number between -1 and +// 1 to represent the sign. +func Signum(num cty.Value) (cty.Value, error) { + return SignumFunc.Call([]cty.Value{num}) +} + +// ParseInt parses a string argument and returns an integer of the specified base. +func ParseInt(num cty.Value, base cty.Value) (cty.Value, error) { + return ParseIntFunc.Call([]cty.Value{num, base}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go new file mode 100644 index 00000000000..c9ddf19e368 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go @@ -0,0 +1,280 @@ +package funcs + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +var JoinFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "separator", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "lists", + Type: cty.List(cty.String), + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + sep := args[0].AsString() + listVals := args[1:] + if len(listVals) < 1 { + return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required") + } + + l := 0 + for _, list := range listVals { + if !list.IsWhollyKnown() { + return cty.UnknownVal(cty.String), nil + } + l += list.LengthInt() + } + + items := make([]string, 0, l) + for ai, list := range listVals { + ei := 0 + for it := list.ElementIterator(); it.Next(); { + _, val := it.Element() + if val.IsNull() { + if len(listVals) > 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1) + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei) + } + items = append(items, val.AsString()) + ei++ + } + } + + return cty.StringVal(strings.Join(items, sep)), nil + }, +}) + +var SortFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.String), + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + listVal := args[0] + + if !listVal.IsWhollyKnown() { + // If some of the element values aren't known yet then we + // can't yet preduct the order of the result. + return cty.UnknownVal(retType), nil + } + if listVal.LengthInt() == 0 { // Easy path + return listVal, nil + } + + list := make([]string, 0, listVal.LengthInt()) + for it := listVal.ElementIterator(); it.Next(); { + iv, v := it.Element() + if v.IsNull() { + return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String()) + } + list = append(list, v.AsString()) + } + + sort.Strings(list) + retVals := make([]cty.Value, len(list)) + for i, s := range list { + retVals[i] = cty.StringVal(s) + } + return cty.ListVal(retVals), nil + }, +}) + +var SplitFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "separator", + Type: cty.String, + }, + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + sep := args[0].AsString() + str := args[1].AsString() + elems := strings.Split(str, sep) + elemVals := make([]cty.Value, len(elems)) + for i, s := range elems { + elemVals[i] = cty.StringVal(s) + } + if len(elemVals) == 0 { + return cty.ListValEmpty(cty.String), nil + } + return cty.ListVal(elemVals), nil + }, +}) + +// ChompFunc constructions a function that removes newline characters at the end of a string. +var ChompFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) + return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil + }, +}) + +// IndentFunc constructions a function that adds a given number of spaces to the +// beginnings of all but the first line in a given multi-line string. +var IndentFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "spaces", + Type: cty.Number, + }, + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var spaces int + if err := gocty.FromCtyValue(args[0], &spaces); err != nil { + return cty.UnknownVal(cty.String), err + } + data := args[1].AsString() + pad := strings.Repeat(" ", spaces) + return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil + }, +}) + +// ReplaceFunc constructions a function that searches a given string for another +// given substring, and replaces each occurence with a given replacement string. +var ReplaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + { + Name: "substr", + Type: cty.String, + }, + { + Name: "replace", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + str := args[0].AsString() + substr := args[1].AsString() + replace := args[2].AsString() + + // We search/replace using a regexp if the string is surrounded + // in forward slashes. + if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { + re, err := regexp.Compile(substr[1 : len(substr)-1]) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(re.ReplaceAllString(str, replace)), nil + } + + return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil + }, +}) + +// TitleFunc constructions a function that converts the first letter of each word +// in the given string to uppercase. +var TitleFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.StringVal(strings.Title(args[0].AsString())), nil + }, +}) + +// TrimSpaceFunc constructions a function that removes any space characters from +// the start and end of the given string. +var TrimSpaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil + }, +}) + +// Join concatenates together the string elements of one or more lists with a +// given separator. +func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(lists)+1) + args[0] = sep + copy(args[1:], lists) + return JoinFunc.Call(args) +} + +// Sort re-orders the elements of a given list of strings so that they are +// in ascending lexicographical order. +func Sort(list cty.Value) (cty.Value, error) { + return SortFunc.Call([]cty.Value{list}) +} + +// Split divides a given string by a given separator, returning a list of +// strings containing the characters between the separator sequences. +func Split(sep, str cty.Value) (cty.Value, error) { + return SplitFunc.Call([]cty.Value{sep, str}) +} + +// Chomp removes newline characters at the end of a string. +func Chomp(str cty.Value) (cty.Value, error) { + return ChompFunc.Call([]cty.Value{str}) +} + +// Indent adds a given number of spaces to the beginnings of all but the first +// line in a given multi-line string. +func Indent(spaces, str cty.Value) (cty.Value, error) { + return IndentFunc.Call([]cty.Value{spaces, str}) +} + +// Replace searches a given string for another given substring, +// and replaces all occurences with a given replacement string. +func Replace(str, substr, replace cty.Value) (cty.Value, error) { + return ReplaceFunc.Call([]cty.Value{str, substr, replace}) +} + +// Title converts the first letter of each word in the given string to uppercase. +func Title(str cty.Value) (cty.Value, error) { + return TitleFunc.Call([]cty.Value{str}) +} + +// TrimSpace removes any space characters from the start and end of the given string. +func TrimSpace(str cty.Value) (cty.Value, error) { + return TrimSpaceFunc.Call([]cty.Value{str}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go new file mode 100644 index 00000000000..a3c4906646d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go @@ -0,0 +1,146 @@ +package lang + +import ( + ctyyaml "github.com/zclconf/go-cty-yaml" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs" +) + +var impureFunctions = []string{ + "bcrypt", + "timestamp", + "uuid", +} + +// Functions returns the set of functions that should be used to when evaluating +// expressions in the receiving scope. +func (s *Scope) Functions() map[string]function.Function { + s.funcsLock.Lock() + if s.funcs == nil { + // Some of our functions are just directly the cty stdlib functions. + // Others are implemented in the subdirectory "funcs" here in this + // repository. New functions should generally start out their lives + // in the "funcs" directory and potentially graduate to cty stdlib + // later if the functionality seems to be something domain-agnostic + // that would be useful to all applications using cty functions. + + s.funcs = map[string]function.Function{ + "abs": stdlib.AbsoluteFunc, + "abspath": funcs.AbsPathFunc, + "basename": funcs.BasenameFunc, + "base64decode": funcs.Base64DecodeFunc, + "base64encode": funcs.Base64EncodeFunc, + "base64gzip": funcs.Base64GzipFunc, + "base64sha256": funcs.Base64Sha256Func, + "base64sha512": funcs.Base64Sha512Func, + "bcrypt": funcs.BcryptFunc, + "ceil": funcs.CeilFunc, + "chomp": funcs.ChompFunc, + "cidrhost": funcs.CidrHostFunc, + "cidrnetmask": funcs.CidrNetmaskFunc, + "cidrsubnet": funcs.CidrSubnetFunc, + "cidrsubnets": funcs.CidrSubnetsFunc, + "coalesce": funcs.CoalesceFunc, + "coalescelist": funcs.CoalesceListFunc, + "compact": funcs.CompactFunc, + "concat": stdlib.ConcatFunc, + "contains": funcs.ContainsFunc, + "csvdecode": stdlib.CSVDecodeFunc, + "dirname": funcs.DirnameFunc, + "distinct": funcs.DistinctFunc, + "element": funcs.ElementFunc, + "chunklist": funcs.ChunklistFunc, + "file": funcs.MakeFileFunc(s.BaseDir, false), + "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), + "filebase64": funcs.MakeFileFunc(s.BaseDir, true), + "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), + "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), + "filemd5": funcs.MakeFileMd5Func(s.BaseDir), + "filesha1": funcs.MakeFileSha1Func(s.BaseDir), + "filesha256": funcs.MakeFileSha256Func(s.BaseDir), + "filesha512": funcs.MakeFileSha512Func(s.BaseDir), + "flatten": funcs.FlattenFunc, + "floor": funcs.FloorFunc, + "format": stdlib.FormatFunc, + "formatdate": stdlib.FormatDateFunc, + "formatlist": stdlib.FormatListFunc, + "indent": funcs.IndentFunc, + "index": funcs.IndexFunc, + "join": funcs.JoinFunc, + "jsondecode": stdlib.JSONDecodeFunc, + "jsonencode": stdlib.JSONEncodeFunc, + "keys": funcs.KeysFunc, + "length": funcs.LengthFunc, + "list": funcs.ListFunc, + "log": funcs.LogFunc, + "lookup": funcs.LookupFunc, + "lower": stdlib.LowerFunc, + "map": funcs.MapFunc, + "matchkeys": funcs.MatchkeysFunc, + "max": stdlib.MaxFunc, + "md5": funcs.Md5Func, + "merge": funcs.MergeFunc, + "min": stdlib.MinFunc, + "parseint": funcs.ParseIntFunc, + "pathexpand": funcs.PathExpandFunc, + "pow": funcs.PowFunc, + "range": stdlib.RangeFunc, + "regex": stdlib.RegexFunc, + "regexall": stdlib.RegexAllFunc, + "replace": funcs.ReplaceFunc, + "reverse": funcs.ReverseFunc, + "rsadecrypt": funcs.RsaDecryptFunc, + "setintersection": stdlib.SetIntersectionFunc, + "setproduct": funcs.SetProductFunc, + "setunion": stdlib.SetUnionFunc, + "sha1": funcs.Sha1Func, + "sha256": funcs.Sha256Func, + "sha512": funcs.Sha512Func, + "signum": funcs.SignumFunc, + "slice": funcs.SliceFunc, + "sort": funcs.SortFunc, + "split": funcs.SplitFunc, + "strrev": stdlib.ReverseFunc, + "substr": stdlib.SubstrFunc, + "timestamp": funcs.TimestampFunc, + "timeadd": funcs.TimeAddFunc, + "title": funcs.TitleFunc, + "tostring": funcs.MakeToFunc(cty.String), + "tonumber": funcs.MakeToFunc(cty.Number), + "tobool": funcs.MakeToFunc(cty.Bool), + "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), + "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), + "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), + "transpose": funcs.TransposeFunc, + "trimspace": funcs.TrimSpaceFunc, + "upper": stdlib.UpperFunc, + "urlencode": funcs.URLEncodeFunc, + "uuid": funcs.UUIDFunc, + "uuidv5": funcs.UUIDV5Func, + "values": funcs.ValuesFunc, + "yamldecode": ctyyaml.YAMLDecodeFunc, + "yamlencode": ctyyaml.YAMLEncodeFunc, + "zipmap": funcs.ZipmapFunc, + } + + s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { + // The templatefile function prevents recursive calls to itself + // by copying this map and overwriting the "templatefile" entry. + return s.funcs + }) + + if s.PureOnly { + // Force our few impure functions to return unknown so that we + // can defer evaluating them until a later pass. + for _, name := range impureFunctions { + s.funcs[name] = function.Unpredictable(s.funcs[name]) + } + } + } + s.funcsLock.Unlock() + + return s.funcs +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go new file mode 100644 index 00000000000..8df09e9a8bf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go @@ -0,0 +1,81 @@ +package lang + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// References finds all of the references in the given set of traversals, +// returning diagnostics if any of the traversals cannot be interpreted as a +// reference. +// +// This function does not do any de-duplication of references, since references +// have source location information embedded in them and so any invalid +// references that are duplicated should have errors reported for each +// occurence. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. Otherwise, the returned slice has one reference per +// given traversal, though it is not guaranteed that the references will +// appear in the same order as the given traversals. +func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { + if len(traversals) == 0 { + return nil, nil + } + + var diags tfdiags.Diagnostics + refs := make([]*addrs.Reference, 0, len(traversals)) + + for _, traversal := range traversals { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if ref == nil { + continue + } + refs = append(refs, ref) + } + + return refs, diags +} + +// ReferencesInBlock is a helper wrapper around References that first searches +// the given body for traversals, before converting those traversals to +// references. +// +// A block schema must be provided so that this function can determine where in +// the body variables are expected. +func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { + if body == nil { + return nil, nil + } + + // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or + // dynblock.VariablesHCLDec here because when we evaluate a block we'll + // first apply the dynamic block extension and _then_ the blocktoattr + // transform, and so blocktoattr.ExpandedVariables takes into account + // both of those transforms when it analyzes the body to ensure we find + // all of the references as if they'd already moved into their final + // locations, even though we can't expand dynamic blocks yet until we + // already know which variables are required. + // + // The set of cases we want to detect here is covered by the tests for + // the plan graph builder in the main 'terraform' package, since it's + // in a better position to test this due to having mock providers etc + // available. + traversals := blocktoattr.ExpandedVariables(body, schema) + return References(traversals) +} + +// ReferencesInExpr is a helper wrapper around References that first searches +// the given expression for traversals, before converting those traversals +// to references. +func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { + if expr == nil { + return nil, nil + } + traversals := expr.Variables() + return References(traversals) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go new file mode 100644 index 00000000000..a720cca6820 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go @@ -0,0 +1,34 @@ +package lang + +import ( + "sync" + + "github.com/zclconf/go-cty/cty/function" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Scope is the main type in this package, allowing dynamic evaluation of +// blocks and expressions based on some contextual information that informs +// which variables and functions will be available. +type Scope struct { + // Data is used to resolve references in expressions. + Data Data + + // SelfAddr is the address that the "self" object should be an alias of, + // or nil if the "self" object should not be available at all. + SelfAddr addrs.Referenceable + + // BaseDir is the base directory used by any interpolation functions that + // accept filesystem paths as arguments. + BaseDir string + + // PureOnly can be set to true to request that any non-pure functions + // produce unknown value results rather than actually executing. This is + // important during a plan phase to avoid generating results that could + // then differ during apply. + PureOnly bool + + funcs map[string]function.Function + funcsLock sync.Mutex +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go new file mode 100644 index 00000000000..0d7d664fc1f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go @@ -0,0 +1,3 @@ +// Package modsdir is an internal package containing the model types used to +// represent the manifest of modules in a local modules cache directory. +package modsdir diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go new file mode 100644 index 00000000000..2d45c8520ee --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go @@ -0,0 +1,138 @@ +package modsdir + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Record represents some metadata about an installed module, as part +// of a ModuleManifest. +type Record struct { + // Key is a unique identifier for this particular module, based on its + // position within the static module tree. + Key string `json:"Key"` + + // SourceAddr is the source address given for this module in configuration. + // This is used only to detect if the source was changed in configuration + // since the module was last installed, which means that the installer + // must re-install it. + SourceAddr string `json:"Source"` + + // Version is the exact version of the module, which results from parsing + // VersionStr. nil for un-versioned modules. + Version *version.Version `json:"-"` + + // VersionStr is the version specifier string. This is used only for + // serialization in snapshots and should not be accessed or updated + // by any other codepaths; use "Version" instead. + VersionStr string `json:"Version,omitempty"` + + // Dir is the path to the local directory where the module is installed. + Dir string `json:"Dir"` +} + +// Manifest is a map used to keep track of the filesystem locations +// and other metadata about installed modules. +// +// The configuration loader refers to this, while the module installer updates +// it to reflect any changes to the installed modules. +type Manifest map[string]Record + +func (m Manifest) ModuleKey(path addrs.Module) string { + return path.String() +} + +// manifestSnapshotFile is an internal struct used only to assist in our JSON +// serialization of manifest snapshots. It should not be used for any other +// purpose. +type manifestSnapshotFile struct { + Records []Record `json:"Modules"` +} + +func ReadManifestSnapshot(r io.Reader) (Manifest, error) { + src, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + if len(src) == 0 { + // This should never happen, but we'll tolerate it as if it were + // a valid empty JSON object. + return make(Manifest), nil + } + + var read manifestSnapshotFile + err = json.Unmarshal(src, &read) + + new := make(Manifest) + for _, record := range read.Records { + if record.VersionStr != "" { + record.Version, err = version.NewVersion(record.VersionStr) + if err != nil { + return nil, fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err) + } + } + if _, exists := new[record.Key]; exists { + // This should never happen in any valid file, so we'll catch it + // and report it to avoid confusing/undefined behavior if the + // snapshot file was edited incorrectly outside of Terraform. + return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key) + } + new[record.Key] = record + } + return new, nil +} + +func ReadManifestSnapshotForDir(dir string) (Manifest, error) { + fn := filepath.Join(dir, ManifestSnapshotFilename) + r, err := os.Open(fn) + if err != nil { + if os.IsNotExist(err) { + return make(Manifest), nil // missing file is okay and treated as empty + } + return nil, err + } + return ReadManifestSnapshot(r) +} + +func (m Manifest) WriteSnapshot(w io.Writer) error { + var write manifestSnapshotFile + + for _, record := range m { + // Make sure VersionStr is in sync with Version, since we encourage + // callers to manipulate Version and ignore VersionStr. + if record.Version != nil { + record.VersionStr = record.Version.String() + } else { + record.VersionStr = "" + } + write.Records = append(write.Records, record) + } + + src, err := json.Marshal(write) + if err != nil { + return err + } + + _, err = w.Write(src) + return err +} + +func (m Manifest) WriteSnapshotToDir(dir string) error { + fn := filepath.Join(dir, ManifestSnapshotFilename) + log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn) + w, err := os.Create(fn) + if err != nil { + return err + } + return m.WriteSnapshot(w) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go new file mode 100644 index 00000000000..9ebb52431ba --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go @@ -0,0 +1,3 @@ +package modsdir + +const ManifestSnapshotFilename = "modules.json" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go new file mode 100644 index 00000000000..c8058871845 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go @@ -0,0 +1,43 @@ +package moduledeps + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" +) + +// Providers describes a set of provider dependencies for a given module. +// +// Each named provider instance can have one version constraint. +type Providers map[ProviderInstance]ProviderDependency + +// ProviderDependency describes the dependency for a particular provider +// instance, including both the set of allowed versions and the reason for +// the dependency. +type ProviderDependency struct { + Constraints discovery.Constraints + Reason ProviderDependencyReason +} + +// ProviderDependencyReason is an enumeration of reasons why a dependency might be +// present. +type ProviderDependencyReason int + +const ( + // ProviderDependencyExplicit means that there is an explicit "provider" + // block in the configuration for this module. + ProviderDependencyExplicit ProviderDependencyReason = iota + + // ProviderDependencyImplicit means that there is no explicit "provider" + // block but there is at least one resource that uses this provider. + ProviderDependencyImplicit + + // ProviderDependencyInherited is a special case of + // ProviderDependencyImplicit where a parent module has defined a + // configuration for the provider that has been inherited by at least one + // resource in this module. + ProviderDependencyInherited + + // ProviderDependencyFromState means that this provider is not currently + // referenced by configuration at all, but some existing instances in + // the state still depend on it. + ProviderDependencyFromState +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go new file mode 100644 index 00000000000..7eff083157d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go @@ -0,0 +1,7 @@ +// Package moduledeps contains types that can be used to describe the +// providers required for all of the modules in a module tree. +// +// It does not itself contain the functionality for populating such +// data structures; that's in Terraform core, since this package intentionally +// does not depend on terraform core to avoid package dependency cycles. +package moduledeps diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go new file mode 100644 index 00000000000..388a2ce1f9d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go @@ -0,0 +1,204 @@ +package moduledeps + +import ( + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" +) + +// Module represents the dependencies of a single module, as well being +// a node in a tree of such structures representing the dependencies of +// an entire configuration. +type Module struct { + Name string + Providers Providers + Children []*Module +} + +// WalkFunc is a callback type for use with Module.WalkTree +type WalkFunc func(path []string, parent *Module, current *Module) error + +// WalkTree calls the given callback once for the receiver and then +// once for each descendent, in an order such that parents are called +// before their children and siblings are called in the order they +// appear in the Children slice. +// +// When calling the callback, parent will be nil for the first call +// for the receiving module, and then set to the direct parent of +// each module for the subsequent calls. +// +// The path given to the callback is valid only until the callback +// returns, after which it will be mutated and reused. Callbacks must +// therefore copy the path slice if they wish to retain it. +// +// If the given callback returns an error, the walk will be aborted at +// that point and that error returned to the caller. +// +// This function is not thread-safe for concurrent modifications of the +// data structure, so it's the caller's responsibility to arrange for that +// should it be needed. +// +// It is safe for a callback to modify the descendents of the "current" +// module, including the ordering of the Children slice itself, but the +// callback MUST NOT modify the parent module. +func (m *Module) WalkTree(cb WalkFunc) error { + return walkModuleTree(make([]string, 0, 1), nil, m, cb) +} + +func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { + path = append(path, current.Name) + err := cb(path, parent, current) + if err != nil { + return err + } + + for _, child := range current.Children { + err := walkModuleTree(path, current, child, cb) + if err != nil { + return err + } + } + return nil +} + +// SortChildren sorts the Children slice into lexicographic order by +// name, in-place. +// +// This is primarily useful prior to calling WalkTree so that the walk +// will proceed in a consistent order. +func (m *Module) SortChildren() { + sort.Sort(sortModules{m.Children}) +} + +// SortDescendents is a convenience wrapper for calling SortChildren on +// the receiver and all of its descendent modules. +func (m *Module) SortDescendents() { + m.WalkTree(func(path []string, parent *Module, current *Module) error { + current.SortChildren() + return nil + }) +} + +type sortModules struct { + modules []*Module +} + +func (s sortModules) Len() int { + return len(s.modules) +} + +func (s sortModules) Less(i, j int) bool { + cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) + return cmp < 0 +} + +func (s sortModules) Swap(i, j int) { + s.modules[i], s.modules[j] = s.modules[j], s.modules[i] +} + +// PluginRequirements produces a PluginRequirements structure that can +// be used with discovery.PluginMetaSet.ConstrainVersions to identify +// suitable plugins to satisfy the module's provider dependencies. +// +// This method only considers the direct requirements of the receiver. +// Use AllPluginRequirements to flatten the dependencies for the +// entire tree of modules. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) PluginRequirements() discovery.PluginRequirements { + ret := make(discovery.PluginRequirements) + for inst, dep := range m.Providers { + // m.Providers is keyed on provider names, such as "aws.foo". + // a PluginRequirements wants keys to be provider *types*, such + // as "aws". If there are multiple aliases for the same + // provider then we will flatten them into a single requirement + // by combining their constraint sets. + pty := inst.Type() + if existing, exists := ret[pty]; exists { + ret[pty].Versions = existing.Versions.Append(dep.Constraints) + } else { + ret[pty] = &discovery.PluginConstraints{ + Versions: dep.Constraints, + } + } + } + return ret +} + +// AllPluginRequirements calls PluginRequirements for the receiver and all +// of its descendents, and merges the result into a single PluginRequirements +// structure that would satisfy all of the modules together. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) AllPluginRequirements() discovery.PluginRequirements { + var ret discovery.PluginRequirements + m.WalkTree(func(path []string, parent *Module, current *Module) error { + ret = ret.Merge(current.PluginRequirements()) + return nil + }) + return ret +} + +// Equal returns true if the receiver is the root of an identical tree +// to the other given Module. This is a deep comparison that considers +// the equality of all downstream modules too. +// +// The children are considered to be ordered, so callers may wish to use +// SortDescendents first to normalize the order of the slices of child nodes. +// +// The implementation of this function is not optimized since it is provided +// primarily for use in tests. +func (m *Module) Equal(other *Module) bool { + // take care of nils first + if m == nil && other == nil { + return true + } else if (m == nil && other != nil) || (m != nil && other == nil) { + return false + } + + if m.Name != other.Name { + return false + } + + if len(m.Providers) != len(other.Providers) { + return false + } + if len(m.Children) != len(other.Children) { + return false + } + + // Can't use reflect.DeepEqual on this provider structure because + // the nested Constraints objects contain function pointers that + // never compare as equal. So we'll need to walk it the long way. + for inst, dep := range m.Providers { + if _, exists := other.Providers[inst]; !exists { + return false + } + + if dep.Reason != other.Providers[inst].Reason { + return false + } + + // Constraints are not too easy to compare robustly, so + // we'll just use their string representations as a proxy + // for now. + if dep.Constraints.String() != other.Providers[inst].Constraints.String() { + return false + } + } + + // Above we already checked that we have the same number of children + // in each module, so now we just need to check that they are + // recursively equal. + for i := range m.Children { + if !m.Children[i].Equal(other.Children[i]) { + return false + } + } + + // If we fall out here then they are equal + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go new file mode 100644 index 00000000000..89ceefb2cf0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go @@ -0,0 +1,30 @@ +package moduledeps + +import ( + "strings" +) + +// ProviderInstance describes a particular provider instance by its full name, +// like "null" or "aws.foo". +type ProviderInstance string + +// Type returns the provider type of this instance. For example, for an instance +// named "aws.foo" the type is "aws". +func (p ProviderInstance) Type() string { + t := string(p) + if dotPos := strings.Index(t, "."); dotPos != -1 { + t = t[:dotPos] + } + return t +} + +// Alias returns the alias of this provider, if any. An instance named "aws.foo" +// has the alias "foo", while an instance named just "docker" has no alias, +// so the empty string would be returned. +func (p ProviderInstance) Alias() string { + t := string(p) + if dotPos := strings.Index(t, "."); dotPos != -1 { + return t[dotPos+1:] + } + return "" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go new file mode 100644 index 00000000000..c653b106b3a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go @@ -0,0 +1,22 @@ +package plans + +type Action rune + +const ( + NoOp Action = 0 + Create Action = '+' + Read Action = '←' + Update Action = '~' + DeleteThenCreate Action = '∓' + CreateThenDelete Action = '±' + Delete Action = '-' +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type Action + +// IsReplace returns true if the action is one of the two actions that +// represents replacing an existing object with a new object: +// DeleteThenCreate or CreateThenDelete. +func (a Action) IsReplace() bool { + return a == DeleteThenCreate || a == CreateThenDelete +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go new file mode 100644 index 00000000000..be43ab1757b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go @@ -0,0 +1,49 @@ +// Code generated by "stringer -type Action"; DO NOT EDIT. + +package plans + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NoOp-0] + _ = x[Create-43] + _ = x[Read-8592] + _ = x[Update-126] + _ = x[DeleteThenCreate-8723] + _ = x[CreateThenDelete-177] + _ = x[Delete-45] +} + +const ( + _Action_name_0 = "NoOp" + _Action_name_1 = "Create" + _Action_name_2 = "Delete" + _Action_name_3 = "Update" + _Action_name_4 = "CreateThenDelete" + _Action_name_5 = "Read" + _Action_name_6 = "DeleteThenCreate" +) + +func (i Action) String() string { + switch { + case i == 0: + return _Action_name_0 + case i == 43: + return _Action_name_1 + case i == 45: + return _Action_name_2 + case i == 126: + return _Action_name_3 + case i == 177: + return _Action_name_4 + case i == 8592: + return _Action_name_5 + case i == 8723: + return _Action_name_6 + default: + return "Action(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go new file mode 100644 index 00000000000..5c2028c832a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go @@ -0,0 +1,308 @@ +package plans + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/zclconf/go-cty/cty" +) + +// Changes describes various actions that Terraform will attempt to take if +// the corresponding plan is applied. +// +// A Changes object can be rendered into a visual diff (by the caller, using +// code in another package) for display to the user. +type Changes struct { + // Resources tracks planned changes to resource instance objects. + Resources []*ResourceInstanceChangeSrc + + // Outputs tracks planned changes output values. + // + // Note that although an in-memory plan contains planned changes for + // outputs throughout the configuration, a plan serialized + // to disk retains only the root outputs because they are + // externally-visible, while other outputs are implementation details and + // can be easily re-calculated during the apply phase. Therefore only root + // module outputs will survive a round-trip through a plan file. + Outputs []*OutputChangeSrc +} + +// NewChanges returns a valid Changes object that describes no changes. +func NewChanges() *Changes { + return &Changes{} +} + +func (c *Changes) Empty() bool { + for _, res := range c.Resources { + if res.Action != NoOp { + return false + } + } + return true +} + +// ResourceInstance returns the planned change for the current object of the +// resource instance of the given address, if any. Returns nil if no change is +// planned. +func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc { + addrStr := addr.String() + for _, rc := range c.Resources { + if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed { + return rc + } + } + + return nil +} + +// ResourceInstanceDeposed returns the plan change of a deposed object of +// the resource instance of the given address, if any. Returns nil if no change +// is planned. +func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc { + addrStr := addr.String() + for _, rc := range c.Resources { + if rc.Addr.String() == addrStr && rc.DeposedKey == key { + return rc + } + } + + return nil +} + +// OutputValue returns the planned change for the output value with the +// given address, if any. Returns nil if no change is planned. +func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc { + addrStr := addr.String() + for _, oc := range c.Outputs { + if oc.Addr.String() == addrStr { + return oc + } + } + + return nil +} + +// SyncWrapper returns a wrapper object around the receiver that can be used +// to make certain changes to the receiver in a concurrency-safe way, as long +// as all callers share the same wrapper object. +func (c *Changes) SyncWrapper() *ChangesSync { + return &ChangesSync{ + changes: c, + } +} + +// ResourceInstanceChange describes a change to a particular resource instance +// object. +type ResourceInstanceChange struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // Change is an embedded description of the change. + Change + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + // + // This is retained only for UI-plan-rendering purposes and so it does not + // currently survive a round-trip through a saved plan file. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // Terraform that relates to this change. Terraform will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the implied type of the +// corresponding resource type schema for correct operation. +func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) { + cs, err := rc.Change.Encode(ty) + if err != nil { + return nil, err + } + return &ResourceInstanceChangeSrc{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + ProviderAddr: rc.ProviderAddr, + ChangeSrc: *cs, + RequiredReplace: rc.RequiredReplace, + Private: rc.Private, + }, err +} + +// Simplify will, where possible, produce a change with a simpler action than +// the receiever given a flag indicating whether the caller is dealing with +// a normal apply or a destroy. This flag deals with the fact that Terraform +// Core uses a specialized graph node type for destroying; only that +// specialized node should set "destroying" to true. +// +// The following table shows the simplification behavior: +// +// Action Destroying? New Action +// --------+-------------+----------- +// Create true NoOp +// Delete false NoOp +// Replace true Delete +// Replace false Create +// +// For any combination not in the above table, the Simplify just returns the +// receiver as-is. +func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange { + if destroying { + switch rc.Action { + case Delete: + // We'll fall out and just return rc verbatim, then. + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Delete, + Before: rc.Before, + After: cty.NullVal(rc.Before.Type()), + }, + } + default: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + }, + } + } + } else { + switch rc.Action { + case Delete: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + }, + } + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Create, + Before: cty.NullVal(rc.After.Type()), + After: rc.After, + }, + } + } + } + + // If we fall out here then our change is already simple enough. + return rc +} + +// OutputChange describes a change to an output value. +type OutputChange struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // Change is an embedded description of the change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + Change + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. +func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { + cs, err := oc.Change.Encode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChangeSrc{ + Addr: oc.Addr, + ChangeSrc: *cs, + Sensitive: oc.Sensitive, + }, err +} + +// Change describes a single change with a given action. +type Change struct { + // Action defines what kind of change is being made. + Action Action + + // Interpretation of Before and After depend on Action: + // + // NoOp Before and After are the same, unchanged value + // Create Before is nil, and After is the expected value after create. + // Read Before is any prior value (nil if no prior), and After is the + // value that was or will be read. + // Update Before is the value prior to update, and After is the expected + // value after update. + // Replace As with Update. + // Delete Before is the value prior to delete, and After is always nil. + // + // Unknown values may appear anywhere within the Before and After values, + // either as the values themselves or as nested elements within known + // collections/structures. + Before, After cty.Value +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the type constraint +// that the values are expected to conform to; to properly decode the values +// later an identical type constraint must be provided at that time. +// +// Where a Change is embedded in some other struct, it's generally better +// to call the corresponding Encode method of that struct rather than working +// directly with its embedded Change. +func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) { + beforeDV, err := NewDynamicValue(c.Before, ty) + if err != nil { + return nil, err + } + afterDV, err := NewDynamicValue(c.After, ty) + if err != nil { + return nil, err + } + + return &ChangeSrc{ + Action: c.Action, + Before: beforeDV, + After: afterDV, + }, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go new file mode 100644 index 00000000000..97bc8da7c48 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go @@ -0,0 +1,190 @@ +package plans + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/zclconf/go-cty/cty" +) + +// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange. +// Pass the associated resource type's schema type to method Decode to +// obtain a ResourceInstancChange. +type ResourceInstanceChangeSrc struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // ChangeSrc is an embedded description of the not-yet-decoded change. + ChangeSrc + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + // + // This is retained only for UI-plan-rendering purposes and so it does not + // currently survive a round-trip through a saved plan file. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // Terraform that relates to this change. Terraform will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Decode unmarshals the raw representation of the instance object being +// changed. Pass the implied type of the corresponding resource type schema +// for correct operation. +func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) { + change, err := rcs.ChangeSrc.Decode(ty) + if err != nil { + return nil, err + } + return &ResourceInstanceChange{ + Addr: rcs.Addr, + DeposedKey: rcs.DeposedKey, + ProviderAddr: rcs.ProviderAddr, + Change: *change, + RequiredReplace: rcs.RequiredReplace, + Private: rcs.Private, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc { + if rcs == nil { + return nil + } + ret := *rcs + + ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...) + + if len(ret.Private) != 0 { + private := make([]byte, len(ret.Private)) + copy(private, ret.Private) + ret.Private = private + } + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +// OutputChangeSrc describes a change to an output value. +type OutputChangeSrc struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // ChangeSrc is an embedded description of the not-yet-decoded change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + ChangeSrc + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Decode unmarshals the raw representation of the output value being +// changed. +func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) { + change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChange{ + Addr: ocs.Addr, + Change: *change, + Sensitive: ocs.Sensitive, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc { + if ocs == nil { + return nil + } + ret := *ocs + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +// ChangeSrc is a not-yet-decoded Change. +type ChangeSrc struct { + // Action defines what kind of change is being made. + Action Action + + // Before and After correspond to the fields of the same name in Change, + // but have not yet been decoded from the serialized value used for + // storage. + Before, After DynamicValue +} + +// Decode unmarshals the raw representations of the before and after values +// to produce a Change object. Pass the type constraint that the result must +// conform to. +// +// Where a ChangeSrc is embedded in some other struct, it's generally better +// to call the corresponding Decode method of that struct rather than working +// directly with its embedded Change. +func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) { + var err error + before := cty.NullVal(ty) + after := cty.NullVal(ty) + + if len(cs.Before) > 0 { + before, err = cs.Before.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'before' value: %s", err) + } + } + if len(cs.After) > 0 { + after, err = cs.After.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'after' value: %s", err) + } + } + return &Change{ + Action: cs.Action, + Before: before, + After: after, + }, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_state.go new file mode 100644 index 00000000000..c4fd3b0f466 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_state.go @@ -0,0 +1,15 @@ +package plans + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// PlannedState merges the set of changes described by the receiver into the +// given prior state to produce the planned result state. +// +// The result is an approximation of the state as it would exist after +// applying these changes, omitting any values that cannot be determined until +// the changes are actually applied. +func (c *Changes) PlannedState(prior *states.State) (*states.State, error) { + panic("Changes.PlannedState not yet implemented") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go new file mode 100644 index 00000000000..89cc1ab2254 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go @@ -0,0 +1,144 @@ +package plans + +import ( + "fmt" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// ChangesSync is a wrapper around a Changes that provides a concurrency-safe +// interface to insert new changes and retrieve copies of existing changes. +// +// Each ChangesSync is independent of all others, so all concurrent writers +// to a particular Changes must share a single ChangesSync. Behavior is +// undefined if any other caller makes changes to the underlying Changes +// object or its nested objects concurrently with any of the methods of a +// particular ChangesSync. +type ChangesSync struct { + lock sync.Mutex + changes *Changes +} + +// AppendResourceInstanceChange records the given resource instance change in +// the set of planned resource changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) { + if cs == nil { + panic("AppendResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Resources = append(cs.changes.Resources, s) +} + +// GetResourceInstanceChange searches the set of resource instance changes for +// one matching the given address and generation, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc { + if cs == nil { + panic("GetResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + if gen == states.CurrentGen { + return cs.changes.ResourceInstance(addr).DeepCopy() + } + if dk, ok := gen.(states.DeposedKey); ok { + return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy() + } + panic(fmt.Sprintf("unsupported generation value %#v", gen)) +} + +// RemoveResourceInstanceChange searches the set of resource instance changes +// for one matching the given address and generation, and removes it from the +// set if it exists. +func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) { + if cs == nil { + panic("RemoveResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + dk := states.NotDeposed + if realDK, ok := gen.(states.DeposedKey); ok { + dk = realDK + } + + addrStr := addr.String() + for i, r := range cs.changes.Resources { + if r.Addr.String() != addrStr || r.DeposedKey != dk { + continue + } + copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:]) + cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1] + return + } +} + +// AppendOutputChange records the given output value change in the set of +// planned value changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) { + if cs == nil { + panic("AppendOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Outputs = append(cs.changes.Outputs, s) +} + +// GetOutputChange searches the set of output value changes for one matching +// the given address, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc { + if cs == nil { + panic("GetOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + return cs.changes.OutputValue(addr) +} + +// RemoveOutputChange searches the set of output value changes for one matching +// the given address, and removes it from the set if it exists. +func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { + if cs == nil { + panic("RemoveOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + addrStr := addr.String() + for i, o := range cs.changes.Outputs { + if o.Addr.String() != addrStr { + continue + } + copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) + cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1] + return + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go new file mode 100644 index 00000000000..01ca3892389 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go @@ -0,0 +1,5 @@ +// Package plans contains the types that are used to represent Terraform plans. +// +// A plan describes a set of changes that Terraform will make to update remote +// objects to match with changes to the configuration. +package plans diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go new file mode 100644 index 00000000000..51fbb24cfb3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go @@ -0,0 +1,96 @@ +package plans + +import ( + "github.com/zclconf/go-cty/cty" + ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" +) + +// DynamicValue is the representation in the plan of a value whose type cannot +// be determined at compile time, such as because it comes from a schema +// defined in a plugin. +// +// This type is used as an indirection so that the overall plan structure can +// be decoded without schema available, and then the dynamic values accessed +// at a later time once the appropriate schema has been determined. +// +// Internally, DynamicValue is a serialized version of a cty.Value created +// against a particular type constraint. Callers should not access directly +// the serialized form, whose format may change in future. Values of this +// type must always be created by calling NewDynamicValue. +// +// The zero value of DynamicValue is nil, and represents the absense of a +// value within the Go type system. This is distinct from a cty.NullVal +// result, which represents the absense of a value within the cty type system. +type DynamicValue []byte + +// NewDynamicValue creates a DynamicValue by serializing the given value +// against the given type constraint. The value must conform to the type +// constraint, or the result is undefined. +// +// If the value to be encoded has no predefined schema (for example, for +// module output values and input variables), set the type constraint to +// cty.DynamicPseudoType in order to save type information as part of the +// value, and then also pass cty.DynamicPseudoType to method Decode to recover +// the original value. +// +// cty.NilVal can be used to represent the absense of a value, but callers +// must be careful to distinguish values that are absent at the Go layer +// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal +// results). +func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) { + // If we're given cty.NilVal (the zero value of cty.Value, which is + // distinct from a typed null value created by cty.NullVal) then we'll + // assume the caller is trying to represent the _absense_ of a value, + // and so we'll return a nil DynamicValue. + if val == cty.NilVal { + return DynamicValue(nil), nil + } + + // Currently our internal encoding is msgpack, via ctymsgpack. + buf, err := ctymsgpack.Marshal(val, ty) + if err != nil { + return nil, err + } + + return DynamicValue(buf), nil +} + +// Decode retrieves the effective value from the receiever by interpreting the +// serialized form against the given type constraint. For correct results, +// the type constraint must match (or be consistent with) the one that was +// used to create the receiver. +// +// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and +// instead represents the absense of a value. +func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) { + if v == nil { + return cty.NilVal, nil + } + + return ctymsgpack.Unmarshal([]byte(v), ty) +} + +// ImpliedType returns the type implied by the serialized structure of the +// receiving value. +// +// This will not necessarily be exactly the type that was given when the +// value was encoded, and in particular must not be used for values that +// were encoded with their static type given as cty.DynamicPseudoType. +// It is however safe to use this method for values that were encoded using +// their runtime type as the conforming type, with the result being +// semantically equivalent but with all lists and sets represented as tuples, +// and maps as objects, due to ambiguities of the serialization. +func (v DynamicValue) ImpliedType() (cty.Type, error) { + return ctymsgpack.ImpliedType([]byte(v)) +} + +// Copy produces a copy of the receiver with a distinct backing array. +func (v DynamicValue) Copy() DynamicValue { + if v == nil { + return nil + } + + ret := make(DynamicValue, len(v)) + copy(ret, v) + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go new file mode 100644 index 00000000000..ba9cc9611ac --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go @@ -0,0 +1,18 @@ +package objchange + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// AllAttributesNull constructs a non-null cty.Value of the object type implied +// by the given schema that has all of its leaf attributes set to null and all +// of its nested block collections set to zero-length. +// +// This simulates what would result from decoding an empty configuration block +// with the given schema, except that it does not produce errors +func AllAttributesNull(schema *configschema.Block) cty.Value { + // "All attributes null" happens to be the definition of EmptyValue for + // a Block, so we can just delegate to that. + return schema.EmptyValue() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go new file mode 100644 index 00000000000..36a7d496c2a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go @@ -0,0 +1,447 @@ +package objchange + +import ( + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// AssertObjectCompatible checks whether the given "actual" value is a valid +// completion of the possibly-partially-unknown "planned" value. +// +// This means that any known leaf value in "planned" must be equal to the +// corresponding value in "actual", and various other similar constraints. +// +// Any inconsistencies are reported by returning a non-zero number of errors. +// These errors are usually (but not necessarily) cty.PathError values +// referring to a particular nested value within the "actual" value. +// +// The two values must have types that conform to the given schema's implied +// type, or this function will panic. +func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error { + return assertObjectCompatible(schema, planned, actual, nil) +} + +func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error { + var errs []error + if planned.IsNull() && !actual.IsNull() { + errs = append(errs, path.NewErrorf("was absent, but now present")) + return errs + } + if actual.IsNull() && !planned.IsNull() { + errs = append(errs, path.NewErrorf("was present, but now absent")) + return errs + } + if planned.IsNull() { + // No further checks possible if both values are null + return errs + } + + for name, attrS := range schema.Attributes { + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + + path := append(path, cty.GetAttrStep{Name: name}) + moreErrs := assertValueCompatible(plannedV, actualV, path) + if attrS.Sensitive { + if len(moreErrs) > 0 { + // Use a vague placeholder message instead, to avoid disclosing + // sensitive information. + errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute")) + } + } else { + errs = append(errs, moreErrs...) + } + } + for name, blockS := range schema.BlockTypes { + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + + // As a special case, if there were any blocks whose leaf attributes + // are all unknown then we assume (possibly incorrectly) that the + // HCL dynamic block extension is in use with an unknown for_each + // argument, and so we will do looser validation here that allows + // for those blocks to have expanded into a different number of blocks + // if the for_each value is now known. + maybeUnknownBlocks := couldHaveUnknownBlockPlaceholder(plannedV, blockS, false) + + path := append(path, cty.GetAttrStep{Name: name}) + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + // If an unknown block placeholder was present then the placeholder + // may have expanded out into zero blocks, which is okay. + if maybeUnknownBlocks && actualV.IsNull() { + continue + } + moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + if maybeUnknownBlocks { + // When unknown blocks are present the final blocks may be + // at different indices than the planned blocks, so unfortunately + // we can't do our usual checks in this case without generating + // false negatives. + continue + } + + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL { + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so both values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + actualAtys := actualV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := actualAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q has vanished", k)) + continue + } + + plannedEV := plannedV.GetAttr(k) + actualEV := actualV.GetAttr(k) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k})) + errs = append(errs, moreErrs...) + } + if !maybeUnknownBlocks { // new blocks may appear if unknown blocks were present in the plan + for k := range actualAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("new block key %q has appeared", k)) + continue + } + } + } + } else { + if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL && !maybeUnknownBlocks { // new blocks may appear if unknown blocks were persent in the plan + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + } + case configschema.NestingSet: + if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool { + errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + if maybeUnknownBlocks { + // When unknown blocks are present the final number of blocks + // may be different, either because the unknown set values + // become equal and are collapsed, or the count is unknown due + // a dynamic block. Unfortunately this means we can't do our + // usual checks in this case without generating false + // negatives. + continue + } + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL)) + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + return errs +} + +func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error { + // NOTE: We don't normally use the GoString rendering of cty.Value in + // user-facing error messages as a rule, but we make an exception + // for this function because we expect the user to pass this message on + // verbatim to the provider development team and so more detail is better. + + var errs []error + if planned.Type() == cty.DynamicPseudoType { + // Anything goes, then + return errs + } + if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 { + errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type()))) + // If the types don't match then we can't do any other comparisons, + // so we bail early. + return errs + } + + if !planned.IsKnown() { + // We didn't know what were going to end up with during plan, so + // anything goes during apply. + return errs + } + + if actual.IsNull() { + if planned.IsNull() { + return nil + } + errs = append(errs, path.NewErrorf("was %#v, but now null", planned)) + return errs + } + if planned.IsNull() { + errs = append(errs, path.NewErrorf("was null, but now %#v", actual)) + return errs + } + + ty := planned.Type() + switch { + + case !actual.IsKnown(): + errs = append(errs, path.NewErrorf("was known, but now unknown")) + + case ty.IsPrimitiveType(): + if !actual.Equals(planned).True() { + errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual)) + } + + case ty.IsListType() || ty.IsMapType() || ty.IsTupleType(): + for it := planned.ElementIterator(); it.Next(); { + k, plannedV := it.Element() + if !actual.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k))) + continue + } + + actualV := actual.Index(k) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k})) + errs = append(errs, moreErrs...) + } + + for it := actual.ElementIterator(); it.Next(); { + k, _ := it.Element() + if !planned.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k))) + } + } + + case ty.IsObjectType(): + atys := ty.AttributeTypes() + for name := range atys { + // Because we already tested that the two values have the same type, + // we can assume that the same attributes are present in both and + // focus just on testing their values. + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name})) + errs = append(errs, moreErrs...) + } + + case ty.IsSetType(): + // We can't really do anything useful for sets here because changing + // an unknown element to known changes the identity of the element, and + // so we can't correlate them properly. However, we will at least check + // to ensure that the number of elements is consistent, along with + // the general type-match checks we ran earlier in this function. + if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() { + + setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool { + errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + + plannedL := planned.LengthInt() + actualL := actual.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL)) + } + } + } + + return errs +} + +func indexStrForErrors(v cty.Value) string { + switch v.Type() { + case cty.Number: + return v.AsBigFloat().Text('f', -1) + case cty.String: + return strconv.Quote(v.AsString()) + default: + // Should be impossible, since no other index types are allowed! + return fmt.Sprintf("%#v", v) + } +} + +// couldHaveUnknownBlockPlaceholder is a heuristic that recognizes how the +// HCL dynamic block extension behaves when it's asked to expand a block whose +// for_each argument is unknown. In such cases, it generates a single placeholder +// block with all leaf attribute values unknown, and once the for_each +// expression becomes known the placeholder may be replaced with any number +// of blocks, so object compatibility checks would need to be more liberal. +// +// Set "nested" if testing a block that is nested inside a candidate block +// placeholder; this changes the interpretation of there being no blocks of +// a type to allow for there being zero nested blocks. +func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBlock, nested bool) bool { + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if nested && v.IsNull() { + return true // for nested blocks, a single block being unset doesn't disqualify from being an unknown block placeholder + } + return couldBeUnknownBlockPlaceholderElement(v, &blockS.Block) + default: + // These situations should be impossible for correct providers, but + // we permit the legacy SDK to produce some incorrect outcomes + // for compatibility with its existing logic, and so we must be + // tolerant here. + if !v.IsKnown() { + return true + } + if v.IsNull() { + return false // treated as if the list were empty, so we would see zero iterations below + } + + // For all other nesting modes, our value should be something iterable. + for it := v.ElementIterator(); it.Next(); { + _, ev := it.Element() + if couldBeUnknownBlockPlaceholderElement(ev, &blockS.Block) { + return true + } + } + + // Our default changes depending on whether we're testing the candidate + // block itself or something nested inside of it: zero blocks of a type + // can never contain a dynamic block placeholder, but a dynamic block + // placeholder might contain zero blocks of one of its own nested block + // types, if none were set in the config at all. + return nested + } +} + +func couldBeUnknownBlockPlaceholderElement(v cty.Value, schema *configschema.Block) bool { + if v.IsNull() { + return false // null value can never be a placeholder element + } + if !v.IsKnown() { + return true // this should never happen for well-behaved providers, but can happen with the legacy SDK opt-outs + } + for name := range schema.Attributes { + av := v.GetAttr(name) + + // Unknown block placeholders contain only unknown or null attribute + // values, depending on whether or not a particular attribute was set + // explicitly inside the content block. Note that this is imprecise: + // non-placeholders can also match this, so this function can generate + // false positives. + if av.IsKnown() && !av.IsNull() { + return false + } + } + for name, blockS := range schema.BlockTypes { + if !couldHaveUnknownBlockPlaceholder(v.GetAttr(name), blockS, true) { + return false + } + } + return true +} + +// assertSetValuesCompatible checks that each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa, +// using the given correlation function. +// +// This allows the number of elements in the sets to change as long as all +// elements in both sets can be correlated, making this function safe to use +// with sets that may contain unknown values as long as the unknown case is +// addressed in some reasonable way in the callback function. +// +// The callback always recieves values from set a as its first argument and +// values from set b in its second argument, so it is safe to use with +// non-commutative functions. +// +// As with assertValueCompatible, we assume that the target audience of error +// messages here is a provider developer (via a bug report from a user) and so +// we intentionally violate our usual rule of keeping cty implementation +// details out of error messages. +func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error { + a := planned + b := actual + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if f(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + var errs []error + for i, eq := range aeqs { + if !eq { + errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i])) + } + } + if len(errs) > 0 { + // Exit early since otherwise we're likely to generate duplicate + // error messages from the other perspective in the subsequent loop. + return errs + } + for i, eq := range beqs { + if !eq { + errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i])) + } + } + return errs +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go new file mode 100644 index 00000000000..2c18a0108f9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go @@ -0,0 +1,4 @@ +// Package objchange deals with the business logic of taking a prior state +// value and a config value and producing a proposed new merged value, along +// with other related rules in this domain. +package objchange diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go new file mode 100644 index 00000000000..cbfefddddb9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go @@ -0,0 +1,104 @@ +package objchange + +import ( + "github.com/zclconf/go-cty/cty" +) + +// LongestCommonSubsequence finds a sequence of values that are common to both +// x and y, with the same relative ordering as in both collections. This result +// is useful as a first step towards computing a diff showing added/removed +// elements in a sequence. +// +// The approached used here is a "naive" one, assuming that both xs and ys will +// generally be small in most reasonable Terraform configurations. For larger +// lists the time/space usage may be sub-optimal. +// +// A pair of lists may have multiple longest common subsequences. In that +// case, the one selected by this function is undefined. +func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value { + if len(xs) == 0 || len(ys) == 0 { + return make([]cty.Value, 0) + } + + c := make([]int, len(xs)*len(ys)) + eqs := make([]bool, len(xs)*len(ys)) + w := len(xs) + + for y := 0; y < len(ys); y++ { + for x := 0; x < len(xs); x++ { + eqV := xs[x].Equals(ys[y]) + eq := false + if eqV.IsKnown() && eqV.True() { + eq = true + eqs[(w*y)+x] = true // equality tests can be expensive, so cache it + } + if eq { + // Sequence gets one longer than for the cell at top left, + // since we'd append a new item to the sequence here. + if x == 0 || y == 0 { + c[(w*y)+x] = 1 + } else { + c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1 + } + } else { + // We follow the longest of the sequence above and the sequence + // to the left of us in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + c[(w*y)+x] = l + } else { + c[(w*y)+x] = u + } + } + } + } + + // The bottom right cell tells us how long our longest sequence will be + seq := make([]cty.Value, c[len(c)-1]) + + // Now we will walk back from the bottom right cell, finding again all + // of the equal pairs to construct our sequence. + x := len(xs) - 1 + y := len(ys) - 1 + i := len(seq) - 1 + + for x > -1 && y > -1 { + if eqs[(w*y)+x] { + // Add the value to our result list and then walk diagonally + // up and to the left. + seq[i] = xs[x] + x-- + y-- + i-- + } else { + // Take the path with the greatest sequence length in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + x-- + } else { + y-- + } + } + } + + if i > -1 { + // should never happen if the matrix was constructed properly + panic("not enough elements in sequence") + } + + return seq +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go new file mode 100644 index 00000000000..a8629046cad --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go @@ -0,0 +1,132 @@ +package objchange + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// NormalizeObjectFromLegacySDK takes an object that may have been generated +// by the legacy Terraform SDK (i.e. returned from a provider with the +// LegacyTypeSystem opt-out set) and does its best to normalize it for the +// assumptions we would normally enforce if the provider had not opted out. +// +// In particular, this function guarantees that a value representing a nested +// block will never itself be unknown or null, instead representing that as +// a non-null value that may contain null/unknown values. +// +// The input value must still conform to the implied type of the given schema, +// or else this function may produce garbage results or panic. This is usually +// okay because type consistency is enforced when deserializing the value +// returned from the provider over the RPC wire protocol anyway. +func NormalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value { + if val == cty.NilVal || val.IsNull() { + // This should never happen in reasonable use, but we'll allow it + // and normalize to a null of the expected type rather than panicking + // below. + return cty.NullVal(schema.ImpliedType()) + } + + vals := make(map[string]cty.Value) + for name := range schema.Attributes { + // No normalization for attributes, since them being type-conformant + // is all that we require. + vals[name] = val.GetAttr(name) + } + for name, blockS := range schema.BlockTypes { + lv := val.GetAttr(name) + + // Legacy SDK never generates dynamically-typed attributes and so our + // normalization code doesn't deal with them, but we need to make sure + // we still pass them through properly so that we don't interfere with + // objects generated by other SDKs. + if ty := blockS.Block.ImpliedType(); ty.HasDynamicTypes() { + vals[name] = lv + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if lv.IsKnown() { + if lv.IsNull() && blockS.Nesting == configschema.NestingGroup { + vals[name] = blockS.EmptyValue() + } else { + vals[name] = NormalizeObjectFromLegacySDK(lv, &blockS.Block) + } + } else { + vals[name] = unknownBlockStub(&blockS.Block) + } + case configschema.NestingList: + switch { + case !lv.IsKnown(): + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.ListVal(subVals) + } + case configschema.NestingSet: + switch { + case !lv.IsKnown(): + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.SetVal(subVals) + } + default: + // The legacy SDK doesn't support NestingMap, so we just assume + // maps are always okay. (If not, we would've detected and returned + // an error to the user before we got here.) + vals[name] = lv + } + } + return cty.ObjectVal(vals) +} + +// unknownBlockStub constructs an object value that approximates an unknown +// block by producing a known block object with all of its leaf attribute +// values set to unknown. +// +// Blocks themselves cannot be unknown, so if the legacy SDK tries to return +// such a thing, we'll use this result instead. This convention mimics how +// the dynamic block feature deals with being asked to iterate over an unknown +// value, because our value-checking functions already accept this convention +// as a special case. +func unknownBlockStub(schema *configschema.Block) cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range schema.Attributes { + vals[name] = cty.UnknownVal(attrS.Type) + } + for name, blockS := range schema.BlockTypes { + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + vals[name] = unknownBlockStub(&blockS.Block) + case configschema.NestingList: + // In principle we may be expected to produce a tuple value here, + // if there are any dynamically-typed attributes in our nested block, + // but the legacy SDK doesn't support that, so we just assume it'll + // never be necessary to normalize those. (Incorrect usage in any + // other SDK would be caught and returned as an error before we + // get here.) + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingSet: + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingMap: + // A nesting map can never be unknown since we then wouldn't know + // what the keys are. (Legacy SDK doesn't support NestingMap anyway, + // so this should never arise.) + vals[name] = cty.MapValEmpty(blockS.Block.ImpliedType()) + } + } + return cty.ObjectVal(vals) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go new file mode 100644 index 00000000000..879fc93a1e6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go @@ -0,0 +1,390 @@ +package objchange + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// ProposedNewObject constructs a proposed new object value by combining the +// computed attribute values from "prior" with the configured attribute values +// from "config". +// +// Both value must conform to the given schema's implied type, or this function +// will panic. +// +// The prior value must be wholly known, but the config value may be unknown +// or have nested unknown values. +// +// The merging of the two objects includes the attributes of any nested blocks, +// which will be correlated in a manner appropriate for their nesting mode. +// Note in particular that the correlation for blocks backed by sets is a +// heuristic based on matching non-computed attribute values and so it may +// produce strange results with more "extreme" cases, such as a nested set +// block where _all_ attributes are computed. +func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { + // If the config and prior are both null, return early here before + // populating the prior block. The prevents non-null blocks from appearing + // the proposed state value. + if config.IsNull() && prior.IsNull() { + return prior + } + + if prior.IsNull() { + // In this case, we will construct a synthetic prior value that is + // similar to the result of decoding an empty configuration block, + // which simplifies our handling of the top-level attributes/blocks + // below by giving us one non-null level of object to pull values from. + prior = AllAttributesNull(schema) + } + return proposedNewObject(schema, prior, config) +} + +// PlannedDataResourceObject is similar to ProposedNewObject but tailored for +// planning data resources in particular. Specifically, it replaces the values +// of any Computed attributes not set in the configuration with an unknown +// value, which serves as a placeholder for a value to be filled in by the +// provider when the data resource is finally read. +// +// Data resources are different because the planning of them is handled +// entirely within Terraform Core and not subject to customization by the +// provider. This function is, in effect, producing an equivalent result to +// passing the ProposedNewObject result into a provider's PlanResourceChange +// function, assuming a fixed implementation of PlanResourceChange that just +// fills in unknown values as needed. +func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value { + // Our trick here is to run the ProposedNewObject logic with an + // entirely-unknown prior value. Because of cty's unknown short-circuit + // behavior, any operation on prior returns another unknown, and so + // unknown values propagate into all of the parts of the resulting value + // that would normally be filled in by preserving the prior state. + prior := cty.UnknownVal(schema.ImpliedType()) + return proposedNewObject(schema, prior, config) +} + +func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { + if config.IsNull() || !config.IsKnown() { + // This is a weird situation, but we'll allow it anyway to free + // callers from needing to specifically check for these cases. + return prior + } + if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) { + panic("ProposedNewObject only supports object-typed values") + } + + // From this point onwards, we can assume that both values are non-null + // object types, and that the config value itself is known (though it + // may contain nested values that are unknown.) + + newAttrs := map[string]cty.Value{} + for name, attr := range schema.Attributes { + priorV := prior.GetAttr(name) + configV := config.GetAttr(name) + var newV cty.Value + switch { + case attr.Computed && attr.Optional: + // This is the trickiest scenario: we want to keep the prior value + // if the config isn't overriding it. Note that due to some + // ambiguity here, setting an optional+computed attribute from + // config and then later switching the config to null in a + // subsequent change causes the initial config value to be "sticky" + // unless the provider specifically overrides it during its own + // plan customization step. + if configV.IsNull() { + newV = priorV + } else { + newV = configV + } + case attr.Computed: + // configV will always be null in this case, by definition. + // priorV may also be null, but that's okay. + newV = priorV + default: + // For non-computed attributes, we always take the config value, + // even if it is null. If it's _required_ then null values + // should've been caught during an earlier validation step, and + // so we don't really care about that here. + newV = configV + } + newAttrs[name] = newV + } + + // Merging nested blocks is a little more complex, since we need to + // correlate blocks between both objects and then recursively propose + // a new object for each. The correlation logic depends on the nesting + // mode for each block type. + for name, blockType := range schema.BlockTypes { + priorV := prior.GetAttr(name) + configV := config.GetAttr(name) + var newV cty.Value + switch blockType.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + newV = ProposedNewObject(&blockType.Block, priorV, configV) + + case configschema.NestingList: + // Nested blocks are correlated by index. + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make([]cty.Value, 0, configVLen) + for it := configV.ElementIterator(); it.Next(); { + idx, configEV := it.Element() + if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals = append(newVals, configEV) + continue + } + priorEV := priorV.Index(idx) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals = append(newVals, newEV) + } + // Despite the name, a NestingList might also be a tuple, if + // its nested schema contains dynamically-typed attributes. + if configV.Type().IsTupleType() { + newV = cty.TupleVal(newVals) + } else { + newV = cty.ListVal(newVals) + } + } else { + // Despite the name, a NestingList might also be a tuple, if + // its nested schema contains dynamically-typed attributes. + if configV.Type().IsTupleType() { + newV = cty.EmptyTupleVal + } else { + newV = cty.ListValEmpty(blockType.ImpliedType()) + } + } + + case configschema.NestingMap: + // Despite the name, a NestingMap may produce either a map or + // object value, depending on whether the nested schema contains + // dynamically-typed attributes. + if configV.Type().IsObjectType() { + // Nested blocks are correlated by key. + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make(map[string]cty.Value, configVLen) + atys := configV.Type().AttributeTypes() + for name := range atys { + configEV := configV.GetAttr(name) + if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals[name] = configEV + continue + } + priorEV := priorV.GetAttr(name) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals[name] = newEV + } + // Although we call the nesting mode "map", we actually use + // object values so that elements might have different types + // in case of dynamically-typed attributes. + newV = cty.ObjectVal(newVals) + } else { + newV = cty.EmptyObjectVal + } + } else { + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make(map[string]cty.Value, configVLen) + for it := configV.ElementIterator(); it.Next(); { + idx, configEV := it.Element() + k := idx.AsString() + if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals[k] = configEV + continue + } + priorEV := priorV.Index(idx) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals[k] = newEV + } + newV = cty.MapVal(newVals) + } else { + newV = cty.MapValEmpty(blockType.ImpliedType()) + } + } + + case configschema.NestingSet: + if !configV.Type().IsSetType() { + panic("configschema.NestingSet value is not a set as expected") + } + + // Nested blocks are correlated by comparing the element values + // after eliminating all of the computed attributes. In practice, + // this means that any config change produces an entirely new + // nested object, and we only propagate prior computed values + // if the non-computed attribute values are identical. + var cmpVals [][2]cty.Value + if priorV.IsKnown() && !priorV.IsNull() { + cmpVals = setElementCompareValues(&blockType.Block, priorV, false) + } + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value + newVals := make([]cty.Value, 0, configVLen) + for it := configV.ElementIterator(); it.Next(); { + _, configEV := it.Element() + var priorEV cty.Value + for i, cmp := range cmpVals { + if used[i] { + continue + } + if cmp[1].RawEquals(configEV) { + priorEV = cmp[0] + used[i] = true // we can't use this value on a future iteration + break + } + } + if priorEV == cty.NilVal { + priorEV = cty.NullVal(blockType.ImpliedType()) + } + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals = append(newVals, newEV) + } + newV = cty.SetVal(newVals) + } else { + newV = cty.SetValEmpty(blockType.Block.ImpliedType()) + } + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) + } + + newAttrs[name] = newV + } + + return cty.ObjectVal(newAttrs) +} + +// setElementCompareValues takes a known, non-null value of a cty.Set type and +// returns a table -- constructed of two-element arrays -- that maps original +// set element values to corresponding values that have all of the computed +// values removed, making them suitable for comparison with values obtained +// from configuration. The element type of the set must conform to the implied +// type of the given schema, or this function will panic. +// +// In the resulting slice, the zeroth element of each array is the original +// value and the one-indexed element is the corresponding "compare value". +// +// This is intended to help correlate prior elements with configured elements +// in ProposedNewObject. The result is a heuristic rather than an exact science, +// since e.g. two separate elements may reduce to the same value through this +// process. The caller must therefore be ready to deal with duplicates. +func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value { + ret := make([][2]cty.Value, 0, set.LengthInt()) + for it := set.ElementIterator(); it.Next(); { + _, ev := it.Element() + ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)}) + } + return ret +} + +// setElementCompareValue creates a new value that has all of the same +// non-computed attribute values as the one given but has all computed +// attribute values forced to null. +// +// If isConfig is true then non-null Optional+Computed attribute values will +// be preserved. Otherwise, they will also be set to null. +// +// The input value must conform to the schema's implied type, and the return +// value is guaranteed to conform to it. +func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value { + if v.IsNull() || !v.IsKnown() { + return v + } + + attrs := map[string]cty.Value{} + for name, attr := range schema.Attributes { + switch { + case attr.Computed && attr.Optional: + if isConfig { + attrs[name] = v.GetAttr(name) + } else { + attrs[name] = cty.NullVal(attr.Type) + } + case attr.Computed: + attrs[name] = cty.NullVal(attr.Type) + default: + attrs[name] = v.GetAttr(name) + } + } + + for name, blockType := range schema.BlockTypes { + switch blockType.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig) + + case configschema.NestingList, configschema.NestingSet: + cv := v.GetAttr(name) + if cv.IsNull() || !cv.IsKnown() { + attrs[name] = cv + continue + } + if l := cv.LengthInt(); l > 0 { + elems := make([]cty.Value, 0, l) + for it := cv.ElementIterator(); it.Next(); { + _, ev := it.Element() + elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig)) + } + if blockType.Nesting == configschema.NestingSet { + // SetValEmpty would panic if given elements that are not + // all of the same type, but that's guaranteed not to + // happen here because our input value was _already_ a + // set and we've not changed the types of any elements here. + attrs[name] = cty.SetVal(elems) + } else { + attrs[name] = cty.TupleVal(elems) + } + } else { + if blockType.Nesting == configschema.NestingSet { + attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType()) + } else { + attrs[name] = cty.EmptyTupleVal + } + } + + case configschema.NestingMap: + cv := v.GetAttr(name) + if cv.IsNull() || !cv.IsKnown() { + attrs[name] = cv + continue + } + elems := make(map[string]cty.Value) + for it := cv.ElementIterator(); it.Next(); { + kv, ev := it.Element() + elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig) + } + attrs[name] = cty.ObjectVal(elems) + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) + } + } + + return cty.ObjectVal(attrs) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go new file mode 100644 index 00000000000..905a91142a1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go @@ -0,0 +1,267 @@ +package objchange + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// AssertPlanValid checks checks whether a planned new state returned by a +// provider's PlanResourceChange method is suitable to achieve a change +// from priorState to config. It returns a slice with nonzero length if +// any problems are detected. Because problems here indicate bugs in the +// provider that generated the plannedState, they are written with provider +// developers as an audience, rather than end-users. +// +// All of the given values must have the same type and must conform to the +// implied type of the given schema, or this function may panic or produce +// garbage results. +// +// During planning, a provider may only make changes to attributes that are +// null (unset) in the configuration and are marked as "computed" in the +// resource type schema, in order to insert any default values the provider +// may know about. If the default value cannot be determined until apply time, +// the provider can return an unknown value. Providers are forbidden from +// planning a change that disagrees with any non-null argument in the +// configuration. +// +// As a special exception, providers _are_ allowed to provide attribute values +// conflicting with configuration if and only if the planned value exactly +// matches the corresponding attribute value in the prior state. The provider +// can use this to signal that the new value is functionally equivalent to +// the old and thus no change is required. +func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error { + return assertPlanValid(schema, priorState, config, plannedState, nil) +} + +func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error { + var errs []error + if plannedState.IsNull() && !config.IsNull() { + errs = append(errs, path.NewErrorf("planned for absense but config wants existence")) + return errs + } + if config.IsNull() && !plannedState.IsNull() { + errs = append(errs, path.NewErrorf("planned for existence but config wants absense")) + return errs + } + if plannedState.IsNull() { + // No further checks possible if the planned value is null + return errs + } + + impTy := schema.ImpliedType() + + for name, attrS := range schema.Attributes { + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(attrS.Type) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + + path := append(path, cty.GetAttrStep{Name: name}) + moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path) + errs = append(errs, moreErrs...) + } + for name, blockS := range schema.BlockTypes { + path := append(path, cty.GetAttrStep{Name: name}) + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(impTy.AttributeType(name)) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + if plannedV.RawEquals(configV) { + // Easy path: nothing has changed at all + continue + } + if !plannedV.IsKnown() { + errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + if !configV.HasIndex(idx).True() { + continue // should never happen since we checked the lengths above + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so all values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + configAtys := configV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := configAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + path := append(path, cty.GetAttrStep{Name: k}) + + plannedEV := plannedV.GetAttr(k) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + configEV := configV.GetAttr(k) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.Type().HasAttribute(k) { + priorEV = priorV.GetAttr(k) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for k := range configAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k)) + continue + } + } + } else { + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + k := idx.AsString() + if !configV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for it := configV.ElementIterator(); it.Next(); { + idx, _ := it.Element() + if !plannedV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString())) + continue + } + } + } + case configschema.NestingSet: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // Because set elements have no identifier with which to correlate + // them, we can't robustly validate the plan for a nested block + // backed by a set, and so unfortunately we need to just trust the + // provider to do the right thing. :( + // + // (In principle we could correlate elements by matching the + // subset of attributes explicitly set in config, except for the + // special diff suppression rule which allows for there to be a + // planned value that is constructed by mixing part of a prior + // value with part of a config value, creating an entirely new + // element that is not present in either prior nor config.) + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + } + + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + + return errs +} + +func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error { + var errs []error + if plannedV.RawEquals(configV) { + // This is the easy path: provider didn't change anything at all. + return errs + } + if plannedV.RawEquals(priorV) && !priorV.IsNull() { + // Also pretty easy: there is a prior value and the provider has + // returned it unchanged. This indicates that configV and plannedV + // are functionally equivalent and so the provider wishes to disregard + // the configuration value in favor of the prior. + return errs + } + if attrS.Computed && configV.IsNull() { + // The provider is allowed to change the value of any computed + // attribute that isn't explicitly set in the config. + return errs + } + + // If none of the above conditions match, the provider has made an invalid + // change to this attribute. + if priorV.IsNull() { + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV)) + } + return errs + } + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV)) + } + return errs +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go new file mode 100644 index 00000000000..0abed56a0ff --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go @@ -0,0 +1,92 @@ +package plans + +import ( + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// Plan is the top-level type representing a planned set of changes. +// +// A plan is a summary of the set of changes required to move from a current +// state to a goal state derived from configuration. The described changes +// are not applied directly, but contain an approximation of the final +// result that will be completed during apply by resolving any values that +// cannot be predicted. +// +// A plan must always be accompanied by the state and configuration it was +// built from, since the plan does not itself include all of the information +// required to make the changes indicated. +type Plan struct { + VariableValues map[string]DynamicValue + Changes *Changes + TargetAddrs []addrs.Targetable + ProviderSHA256s map[string][]byte + Backend Backend +} + +// Backend represents the backend-related configuration and other data as it +// existed when a plan was created. +type Backend struct { + // Type is the type of backend that the plan will apply against. + Type string + + // Config is the configuration of the backend, whose schema is decided by + // the backend Type. + Config DynamicValue + + // Workspace is the name of the workspace that was active when the plan + // was created. It is illegal to apply a plan created for one workspace + // to the state of another workspace. + // (This constraint is already enforced by the statefile lineage mechanism, + // but storing this explicitly allows us to return a better error message + // in the situation where the user has the wrong workspace selected.) + Workspace string +} + +func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { + dv, err := NewDynamicValue(config, configSchema.ImpliedType()) + if err != nil { + return nil, err + } + + return &Backend{ + Type: typeName, + Config: dv, + Workspace: workspaceName, + }, nil +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving plan. +// +// The result is de-duplicated so that each distinct address appears only once. +func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { + if p == nil || p.Changes == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, rc := range p.Changes.Resources { + m[rc.ProviderAddr.String()] = rc.ProviderAddr + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go new file mode 100644 index 00000000000..f20f0507e96 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go @@ -0,0 +1,132 @@ +package convert + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/zclconf/go-cty/cty" +) + +// WarnsAndErrorsToProto converts the warnings and errors return by the legacy +// provider to protobuf diagnostics. +func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { + for _, w := range warns { + diags = AppendProtoDiag(diags, w) + } + + for _, e := range errs { + diags = AppendProtoDiag(diags, e) + } + + return diags +} + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + Attribute: ap, + }) + case error: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + }) + case string: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: d, + }) + case *proto.Diagnostic: + diags = append(diags, d) + case []*proto.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. +func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, d := range ds { + var severity tfdiags.Severity + + switch d.Severity { + case proto.Diagnostic_ERROR: + severity = tfdiags.Error + case proto.Diagnostic_WARNING: + severity = tfdiags.Warning + } + + var newDiag tfdiags.Diagnostic + + // if there's an attribute path, we need to create a AttributeValue diagnostic + if d.Attribute != nil { + path := AttributePathToPath(d.Attribute) + newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) + } else { + newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) + } + + diags = diags.Append(newDiag) + } + + return diags +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *proto.AttributePath) cty.Path { + var p cty.Path + for _, step := range ap.Steps { + switch selector := step.Selector.(type) { + case *proto.AttributePath_Step_AttributeName: + p = p.GetAttr(selector.AttributeName) + case *proto.AttributePath_Step_ElementKeyString: + p = p.Index(cty.StringVal(selector.ElementKeyString)) + case *proto.AttributePath_Step_ElementKeyInt: + p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) + } + } + return p +} + +// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *proto.AttributePath { + ap := &proto.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: selector.Name, + }, + }) + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: key.AsString(), + }, + }) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: v, + }, + }) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go new file mode 100644 index 00000000000..105c32c6fa5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go @@ -0,0 +1,154 @@ +package convert + +import ( + "encoding/json" + "reflect" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" +) + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// proto.Schema_Block for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { + block := &proto.Schema_Block{} + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + } + + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + + attr.Type = ty + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { + var nesting proto.Schema_NestedBlock_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_NestedBlock_SINGLE + case configschema.NestingGroup: + nesting = proto.Schema_NestedBlock_GROUP + case configschema.NestingList: + nesting = proto.Schema_NestedBlock_LIST + case configschema.NestingSet: + nesting = proto.Schema_NestedBlock_SET + case configschema.NestingMap: + nesting = proto.Schema_NestedBlock_MAP + default: + nesting = proto.Schema_NestedBlock_INVALID + } + return &proto.Schema_NestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. +func ProtoToProviderSchema(s *proto.Schema) providers.Schema { + return providers.Schema{ + Version: s.Version, + Block: ProtoToConfigSchema(s.Block), + } +} + +// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it +// to a terraform *configschema.Block. +func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + } + + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_NestedBlock_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_NestedBlock_GROUP: + nesting = configschema.NestingGroup + case proto.Schema_NestedBlock_LIST: + nesting = configschema.NestingList + case proto.Schema_NestedBlock_MAP: + nesting = configschema.NestingMap + case proto.Schema_NestedBlock_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go new file mode 100644 index 00000000000..729e97099e3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go @@ -0,0 +1,64 @@ +package discovery + +// Error is a type used to describe situations that the caller must handle +// since they indicate some form of user error. +// +// The functions and methods that return these specialized errors indicate so +// in their documentation. The Error type should not itself be used directly, +// but rather errors should be compared using the == operator with the +// error constants in this package. +// +// Values of this type are _not_ used when the error being reported is an +// operational error (server unavailable, etc) or indicative of a bug in +// this package or its caller. +type Error string + +// ErrorNoSuitableVersion indicates that a suitable version (meeting given +// constraints) is not available. +const ErrorNoSuitableVersion = Error("no suitable version is available") + +// ErrorNoVersionCompatible indicates that all of the available versions +// that otherwise met constraints are not compatible with the current +// version of Terraform. +const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") + +// ErrorVersionIncompatible indicates that all of the versions within the +// constraints are not compatible with the current version of Terrafrom, though +// there does exist a version outside of the constaints that is compatible. +const ErrorVersionIncompatible = Error("incompatible provider version") + +// ErrorNoSuchProvider indicates that no provider exists with a name given +const ErrorNoSuchProvider = Error("no provider exists with the given name") + +// ErrorNoVersionCompatibleWithPlatform indicates that all of the available +// versions that otherwise met constraints are not compatible with the +// requested platform +const ErrorNoVersionCompatibleWithPlatform = Error("no available version is compatible for the requested platform") + +// ErrorMissingChecksumVerification indicates that either the provider +// distribution is missing the SHA256SUMS file or the checksum file does +// not contain a checksum for the binary plugin +const ErrorMissingChecksumVerification = Error("unable to verify checksum") + +// ErrorChecksumVerification indicates that the current checksum of the +// provider plugin has changed since the initial release and is not trusted +// to download +const ErrorChecksumVerification = Error("unexpected plugin checksum") + +// ErrorSignatureVerification indicates that the digital signature for a +// provider distribution could not be verified for one of the following +// reasons: missing signature file, missing public key, or the signature +// was not signed by any known key for the publisher +const ErrorSignatureVerification = Error("unable to verify signature") + +// ErrorServiceUnreachable indicates that the network was unable to connect +// to the registry service +const ErrorServiceUnreachable = Error("registry service is unreachable") + +// ErrorPublicRegistryUnreachable indicates that the network was unable to connect +// to the public registry in particular, so we can show a link to the statuspage +const ErrorPublicRegistryUnreachable = Error("registry service is unreachable, check https://status.hashicorp.com/ for status updates") + +func (err Error) Error() string { + return string(err) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go new file mode 100644 index 00000000000..f053312b00d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go @@ -0,0 +1,191 @@ +package discovery + +import ( + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" +) + +// FindPlugins looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider") and +// returns a PluginMetaSet representing the discovered potential-plugins. +// +// Currently this supports two different naming schemes. The current +// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing +// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is +// files directly in the given directory whose names are like +// terraform-$KIND-$NAME. +// +// Only one plugin will be returned for each unique plugin (name, version) +// pair, with preference given to files found in earlier directories. +// +// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. +func FindPlugins(kind string, dirs []string) PluginMetaSet { + return ResolvePluginPaths(FindPluginPaths(kind, dirs)) +} + +// FindPluginPaths looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider"). +// +// The return value is a list of absolute paths that appear to refer to +// plugins in the given directories, based only on what can be inferred +// from the naming scheme. The paths returned are ordered such that files +// in later dirs appear after files in earlier dirs in the given directory +// list. Within the same directory plugins are returned in a consistent but +// undefined order. +func FindPluginPaths(kind string, dirs []string) []string { + // This is just a thin wrapper around findPluginPaths so that we can + // use the latter in tests with a fake machineName so we can use our + // test fixtures. + return findPluginPaths(kind, dirs) +} + +func findPluginPaths(kind string, dirs []string) []string { + prefix := "terraform-" + kind + "-" + + ret := make([]string, 0, len(dirs)) + + for _, dir := range dirs { + items, err := ioutil.ReadDir(dir) + if err != nil { + // Ignore missing dirs, non-dirs, etc + continue + } + + log.Printf("[DEBUG] checking for %s in %q", kind, dir) + + for _, item := range items { + fullName := item.Name() + + if !strings.HasPrefix(fullName, prefix) { + continue + } + + // New-style paths must have a version segment in filename + if strings.Contains(strings.ToLower(fullName), "_v") { + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[DEBUG] found %s %q", kind, fullName) + ret = append(ret, filepath.Clean(absPath)) + continue + } + + // Legacy style with files directly in the base directory + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[WARN] found legacy %s %q", kind, fullName) + + ret = append(ret, filepath.Clean(absPath)) + } + } + + return ret +} + +// Returns true if and only if the given path refers to a file or a symlink +// to a file. +func pathIsFile(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + return !info.IsDir() +} + +// ResolvePluginPaths takes a list of paths to plugin executables (as returned +// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the +// referenced plugins. +// +// If the same combination of plugin name and version appears multiple times, +// the earlier reference will be preferred. Several different versions of +// the same plugin name may be returned, in which case the methods of +// PluginMetaSet can be used to filter down. +func ResolvePluginPaths(paths []string) PluginMetaSet { + s := make(PluginMetaSet) + + type nameVersion struct { + Name string + Version string + } + found := make(map[nameVersion]struct{}) + + for _, path := range paths { + baseName := strings.ToLower(filepath.Base(path)) + if !strings.HasPrefix(baseName, "terraform-") { + // Should never happen with reasonable input + continue + } + + baseName = baseName[10:] + firstDash := strings.Index(baseName, "-") + if firstDash == -1 { + // Should never happen with reasonable input + continue + } + + baseName = baseName[firstDash+1:] + if baseName == "" { + // Should never happen with reasonable input + continue + } + + // Trim the .exe suffix used on Windows before we start wrangling + // the remainder of the path. + if strings.HasSuffix(baseName, ".exe") { + baseName = baseName[:len(baseName)-4] + } + + parts := strings.SplitN(baseName, "_v", 2) + name := parts[0] + version := VersionZero + if len(parts) == 2 { + version = parts[1] + } + + // Auto-installed plugins contain an extra name portion representing + // the expected plugin version, which we must trim off. + if underX := strings.Index(version, "_x"); underX != -1 { + version = version[:underX] + } + + if _, ok := found[nameVersion{name, version}]; ok { + // Skip duplicate versions of the same plugin + // (We do this during this step because after this we will be + // dealing with sets and thus lose our ordering with which to + // decide preference.) + continue + } + + s.Add(PluginMeta{ + Name: name, + Version: VersionStr(version), + Path: path, + }) + found[nameVersion{name, version}] = struct{}{} + } + + return s +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go new file mode 100644 index 00000000000..ff90a266a99 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go @@ -0,0 +1,676 @@ +package discovery + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + "github.com/hashicorp/errwrap" + getter "github.com/hashicorp/go-getter" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/response" + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" + "github.com/mitchellh/cli" +) + +// Releases are located by querying the terraform registry. + +var httpClient *http.Client + +func init() { + httpClient = httpclient.New() + + httpGetter := &getter.HttpGetter{ + Client: httpClient, + Netrc: true, + } + + getter.Getters["http"] = httpGetter + getter.Getters["https"] = httpGetter +} + +// An Installer maintains a local cache of plugins by downloading plugins +// from an online repository. +type Installer interface { + Get(provider addrs.ProviderType, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) + PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error) +} + +// ProviderInstaller is an Installer implementation that knows how to +// download Terraform providers from the official HashiCorp releases service +// into a local directory. The files downloaded are compliant with the +// naming scheme expected by FindPlugins, so the target directory of a +// provider installer can be used as one of several plugin discovery sources. +type ProviderInstaller struct { + Dir string + + // Cache is used to access and update a local cache of plugins if non-nil. + // Can be nil to disable caching. + Cache PluginCache + + PluginProtocolVersion uint + + // OS and Arch specify the OS and architecture that should be used when + // installing plugins. These use the same labels as the runtime.GOOS and + // runtime.GOARCH variables respectively, and indeed the values of these + // are used as defaults if either of these is the empty string. + OS string + Arch string + + // Skip checksum and signature verification + SkipVerify bool + + Ui cli.Ui // Ui for output + + // Services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + Services *disco.Disco + + // registry client + registry *registry.Client +} + +// Get is part of an implementation of type Installer, and attempts to download +// and install a Terraform provider matching the given constraints. +// +// This method may return one of a number of sentinel errors from this +// package to indicate issues that are likely to be resolvable via user action: +// +// ErrorNoSuchProvider: no provider with the given name exists in the repository. +// ErrorNoSuitableVersion: the provider exists but no available version matches constraints. +// ErrorNoVersionCompatible: a plugin was found within the constraints but it is +// incompatible with the current Terraform version. +// +// These errors should be recognized and handled as special cases by the caller +// to present a suitable user-oriented error message. +// +// All other errors indicate an internal problem that is likely _not_ solvable +// through user action, or at least not within Terraform's scope. Error messages +// are produced under the assumption that if presented to the user they will +// be presented alongside context about what is being installed, and thus the +// error messages do not redundantly include such information. +func (i *ProviderInstaller) Get(provider addrs.ProviderType, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) { + var diags tfdiags.Diagnostics + + // a little bit of initialization. + if i.OS == "" { + i.OS = runtime.GOOS + } + if i.Arch == "" { + i.Arch = runtime.GOARCH + } + if i.registry == nil { + i.registry = registry.NewClient(i.Services, nil) + } + + // get a full listing of versions for the requested provider + allVersions, err := i.listProviderVersions(provider) + + // TODO: return multiple errors + if err != nil { + log.Printf("[DEBUG] %s", err) + if registry.IsServiceUnreachable(err) { + registryHost, err := i.hostname() + if err == nil && registryHost == regsrc.PublicRegistryHost.Raw { + return PluginMeta{}, diags, ErrorPublicRegistryUnreachable + } + return PluginMeta{}, diags, ErrorServiceUnreachable + } + if registry.IsServiceNotProvided(err) { + return PluginMeta{}, diags, err + } + return PluginMeta{}, diags, ErrorNoSuchProvider + } + + // Add any warnings from the response to diags + for _, warning := range allVersions.Warnings { + hostname, err := i.hostname() + if err != nil { + return PluginMeta{}, diags, err + } + diag := tfdiags.SimpleWarning(fmt.Sprintf("%s: %s", hostname, warning)) + diags = diags.Append(diag) + } + + if len(allVersions.Versions) == 0 { + return PluginMeta{}, diags, ErrorNoSuitableVersion + } + providerSource := allVersions.ID + + // Filter the list of plugin versions to those which meet the version constraints + versions := allowedVersions(allVersions, req) + if len(versions) == 0 { + return PluginMeta{}, diags, ErrorNoSuitableVersion + } + + // sort them newest to oldest. The newest version wins! + response.ProviderVersionCollection(versions).Sort() + + // if the chosen provider version does not support the requested platform, + // filter the list of acceptable versions to those that support that platform + if err := i.checkPlatformCompatibility(versions[0]); err != nil { + versions = i.platformCompatibleVersions(versions) + if len(versions) == 0 { + return PluginMeta{}, diags, ErrorNoVersionCompatibleWithPlatform + } + } + + // we now have a winning platform-compatible version + versionMeta := versions[0] + v := VersionStr(versionMeta.Version).MustParse() + + // check protocol compatibility + if err := i.checkPluginProtocol(versionMeta); err != nil { + closestMatch, err := i.findClosestProtocolCompatibleVersion(allVersions.Versions) + if err != nil { + // No operation here if we can't find a version with compatible protocol + return PluginMeta{}, diags, err + } + + // Prompt version suggestion to UI based on closest protocol match + var errMsg string + closestVersion := VersionStr(closestMatch.Version).MustParse() + if v.NewerThan(closestVersion) { + errMsg = providerProtocolTooNew + } else { + errMsg = providerProtocolTooOld + } + + constraintStr := req.String() + if constraintStr == "" { + constraintStr = "(any version)" + } + + return PluginMeta{}, diags, errwrap.Wrap(ErrorVersionIncompatible, fmt.Errorf(fmt.Sprintf( + errMsg, provider, v.String(), tfversion.String(), + closestVersion.String(), closestVersion.MinorUpgradeConstraintStr(), constraintStr))) + } + + downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version) + if err != nil { + return PluginMeta{}, diags, err + } + providerURL := downloadURLs.DownloadURL + + if !i.SkipVerify { + // Terraform verifies the integrity of a provider release before downloading + // the plugin binary. The digital signature (SHA256SUMS.sig) on the + // release distribution (SHA256SUMS) is verified with the public key of the + // publisher provided in the Terraform Registry response, ensuring that + // everything is as intended by the publisher. The checksum of the provider + // plugin is expected in the SHA256SUMS file and is double checked to match + // the checksum of the original published release to the Registry. This + // enforces immutability of releases between the Registry and the plugin's + // host location. Lastly, the integrity of the binary is verified upon + // download matches the Registry and signed checksum. + sha256, err := i.getProviderChecksum(downloadURLs) + if err != nil { + return PluginMeta{}, diags, err + } + + // add the checksum parameter for go-getter to verify the download for us. + if sha256 != "" { + providerURL = providerURL + "?checksum=sha256:" + sha256 + } + } + + printedProviderName := fmt.Sprintf("%q (%s)", provider.Name, providerSource) + i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %s %s...", printedProviderName, versionMeta.Version)) + log.Printf("[DEBUG] getting provider %s version %q", printedProviderName, versionMeta.Version) + err = i.install(provider, v, providerURL) + if err != nil { + return PluginMeta{}, diags, err + } + + // Find what we just installed + // (This is weird, because go-getter doesn't directly return + // information about what was extracted, and we just extracted + // the archive directly into a shared dir here.) + log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider.Name, versionMeta.Version) + metas := FindPlugins("provider", []string{i.Dir}) + log.Printf("[DEBUG] all plugins found %#v", metas) + metas, _ = metas.ValidateVersions() + metas = metas.WithName(provider.Name).WithVersion(v) + log.Printf("[DEBUG] filtered plugins %#v", metas) + if metas.Count() == 0 { + // This should never happen. Suggests that the release archive + // contains an executable file whose name doesn't match the + // expected convention. + return PluginMeta{}, diags, fmt.Errorf( + "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", + versionMeta.Version, + ) + } + + if metas.Count() > 1 { + // This should also never happen, and suggests that a + // particular version was re-released with a different + // executable filename. We consider releases as immutable, so + // this is an error. + return PluginMeta{}, diags, fmt.Errorf( + "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", + versionMeta.Version, + ) + } + + // By now we know we have exactly one meta, and so "Newest" will + // return that one. + return metas.Newest(), diags, nil +} + +func (i *ProviderInstaller) install(provider addrs.ProviderType, version Version, url string) error { + if i.Cache != nil { + log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider.Name, version) + cached := i.Cache.CachedPluginPath("provider", provider.Name, version) + if cached == "" { + log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider.Name, version, url) + err := getter.Get(i.Cache.InstallDir(), url) + if err != nil { + return err + } + // should now be in cache + cached = i.Cache.CachedPluginPath("provider", provider.Name, version) + if cached == "" { + // should never happen if the getter is behaving properly + // and the plugins are packaged properly. + return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir()) + } + } + + // Link or copy the cached binary into our install dir so the + // normal resolution machinery can find it. + filename := filepath.Base(cached) + targetPath := filepath.Join(i.Dir, filename) + // check if the target dir exists, and create it if not + var err error + if _, StatErr := os.Stat(i.Dir); os.IsNotExist(StatErr) { + err = os.MkdirAll(i.Dir, 0700) + } + if err != nil { + return err + } + + log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider.Name, version, targetPath, cached) + + // Delete if we can. If there's nothing there already then no harm done. + // This is important because we can't create a link if there's + // already a file of the same name present. + // (any other error here we'll catch below when we try to write here) + os.Remove(targetPath) + + // We don't attempt linking on Windows because links are not + // comprehensively supported by all tools/apps in Windows and + // so we choose to be conservative to avoid creating any + // weird issues for Windows users. + linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned + if runtime.GOOS != "windows" { + // Try hard linking first. Hard links are preferable because this + // creates a self-contained directory that doesn't depend on the + // cache after install. + linkErr = os.Link(cached, targetPath) + + // If that failed, try a symlink. This _does_ depend on the cache + // after install, so the user must manage the cache more carefully + // in this case, but avoids creating redundant copies of the + // plugins on disk. + if linkErr != nil { + linkErr = os.Symlink(cached, targetPath) + } + } + + // If we still have an error then we'll try a copy as a fallback. + // In this case either the OS is Windows or the target filesystem + // can't support symlinks. + if linkErr != nil { + srcFile, err := os.Open(cached) + if err != nil { + return fmt.Errorf("failed to open cached plugin %s: %s", cached, err) + } + defer srcFile.Close() + + destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create %s: %s", targetPath, err) + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + destFile.Close() + return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err) + } + + err = destFile.Close() + if err != nil { + return fmt.Errorf("error creating %s: %s", targetPath, err) + } + } + + // One way or another, by the time we get here we should have either + // a link or a copy of the cached plugin within i.Dir, as expected. + } else { + log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider.Name, version, url) + err := getter.Get(i.Dir, url) + if err != nil { + return err + } + } + return nil +} + +func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { + purge := make(PluginMetaSet) + + present := FindPlugins("provider", []string{i.Dir}) + for meta := range present { + chosen, ok := used[meta.Name] + if !ok { + purge.Add(meta) + } + if chosen.Path != meta.Path { + purge.Add(meta) + } + } + + removed := make(PluginMetaSet) + var errs error + for meta := range purge { + path := meta.Path + err := os.Remove(path) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "failed to remove unused provider plugin %s: %s", + path, err, + )) + } else { + removed.Add(meta) + } + } + + return removed, errs +} + +func (i *ProviderInstaller) getProviderChecksum(resp *response.TerraformProviderPlatformLocation) (string, error) { + // Get SHA256SUMS file. + shasums, err := getFile(resp.ShasumsURL) + if err != nil { + log.Printf("[ERROR] error fetching checksums from %q: %s", resp.ShasumsURL, err) + return "", ErrorMissingChecksumVerification + } + + // Get SHA256SUMS.sig file. + signature, err := getFile(resp.ShasumsSignatureURL) + if err != nil { + log.Printf("[ERROR] error fetching checksums signature from %q: %s", resp.ShasumsSignatureURL, err) + return "", ErrorSignatureVerification + } + + // Verify the GPG signature returned from the Registry. + asciiArmor := resp.SigningKeys.GPGASCIIArmor() + signer, err := verifySig(shasums, signature, asciiArmor) + if err != nil { + log.Printf("[ERROR] error verifying signature: %s", err) + return "", ErrorSignatureVerification + } + + // Also verify the GPG signature against the HashiCorp public key. This is + // a temporary additional check until a more robust key verification + // process is added in a future release. + _, err = verifySig(shasums, signature, HashicorpPublicKey) + if err != nil { + log.Printf("[ERROR] error verifying signature against HashiCorp public key: %s", err) + return "", ErrorSignatureVerification + } + + // Display identity for GPG key which succeeded verifying the signature. + // This could also be used to display to the user with i.Ui.Info(). + identities := []string{} + for k := range signer.Identities { + identities = append(identities, k) + } + identity := strings.Join(identities, ", ") + log.Printf("[DEBUG] verified GPG signature with key from %s", identity) + + // Extract checksum for this os/arch platform binary and verify against Registry + checksum := checksumForFile(shasums, resp.Filename) + if checksum == "" { + log.Printf("[ERROR] missing checksum for %s from source %s", resp.Filename, resp.ShasumsURL) + return "", ErrorMissingChecksumVerification + } else if checksum != resp.Shasum { + log.Printf("[ERROR] unexpected checksum for %s from source %q", resp.Filename, resp.ShasumsURL) + return "", ErrorChecksumVerification + } + + return checksum, nil +} + +func (i *ProviderInstaller) hostname() (string, error) { + provider := regsrc.NewTerraformProvider("", i.OS, i.Arch) + svchost, err := provider.SvcHost() + if err != nil { + return "", err + } + + return svchost.ForDisplay(), nil +} + +// list all versions available for the named provider +func (i *ProviderInstaller) listProviderVersions(provider addrs.ProviderType) (*response.TerraformProviderVersions, error) { + req := regsrc.NewTerraformProvider(provider.Name, i.OS, i.Arch) + versions, err := i.registry.TerraformProviderVersions(req) + return versions, err +} + +func (i *ProviderInstaller) listProviderDownloadURLs(name, version string) (*response.TerraformProviderPlatformLocation, error) { + urls, err := i.registry.TerraformProviderLocation(regsrc.NewTerraformProvider(name, i.OS, i.Arch), version) + if urls == nil { + return nil, fmt.Errorf("No download urls found for provider %s", name) + } + return urls, err +} + +// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match. +// Prerelease versions are filtered. +func (i *ProviderInstaller) findClosestProtocolCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { + // Loop through all the provider versions to find the earliest and latest + // versions that match the installer protocol to then select the closest of the two + var latest, earliest *response.TerraformProviderVersion + for _, version := range versions { + // Prereleases are filtered and will not be suggested + v, err := VersionStr(version.Version).Parse() + if err != nil || v.IsPrerelease() { + continue + } + + if err := i.checkPluginProtocol(version); err == nil { + if earliest == nil { + // Found the first provider version with compatible protocol + earliest = version + } + // Update the latest protocol compatible version + latest = version + } + } + if earliest == nil { + // No compatible protocol was found for any version + return nil, ErrorNoVersionCompatible + } + + // Convert protocols to comparable types + protoString := strconv.Itoa(int(i.PluginProtocolVersion)) + protocolVersion, err := VersionStr(protoString).Parse() + if err != nil { + return nil, fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) + } + + earliestVersionProtocol, err := VersionStr(earliest.Protocols[0]).Parse() + if err != nil { + return nil, err + } + + // Compare installer protocol version with the first protocol listed of the earliest match + // [A, B] where A is assumed the earliest compatible major version of the protocol pair + if protocolVersion.NewerThan(earliestVersionProtocol) { + // Provider protocols are too old, the closest version is the earliest compatible version + return earliest, nil + } + + // Provider protocols are too new, the closest version is the latest compatible version + return latest, nil +} + +func (i *ProviderInstaller) checkPluginProtocol(versionMeta *response.TerraformProviderVersion) error { + // TODO: should this be a different error? We should probably differentiate between + // no compatible versions and no protocol versions listed at all + if len(versionMeta.Protocols) == 0 { + return fmt.Errorf("no plugin protocol versions listed") + } + + protoString := strconv.Itoa(int(i.PluginProtocolVersion)) + protocolVersion, err := VersionStr(protoString).Parse() + if err != nil { + return fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) + } + protocolConstraint, err := protocolVersion.MinorUpgradeConstraintStr().Parse() + if err != nil { + // This should not fail if the preceding function succeeded. + return fmt.Errorf("invalid plugin protocol version: %q", protocolVersion.String()) + } + + for _, p := range versionMeta.Protocols { + proPro, err := VersionStr(p).Parse() + if err != nil { + // invalid protocol reported by the registry. Move along. + log.Printf("[WARN] invalid provider protocol version %q found in the registry", versionMeta.Version) + continue + } + // success! + if protocolConstraint.Allows(proPro) { + return nil + } + } + + return ErrorNoVersionCompatible +} + +// platformCompatibleVersions returns a list of provider versions that are +// compatible with the requested platform. +func (i *ProviderInstaller) platformCompatibleVersions(versions []*response.TerraformProviderVersion) []*response.TerraformProviderVersion { + var v []*response.TerraformProviderVersion + for _, version := range versions { + if err := i.checkPlatformCompatibility(version); err == nil { + v = append(v, version) + } + } + return v +} + +func (i *ProviderInstaller) checkPlatformCompatibility(versionMeta *response.TerraformProviderVersion) error { + if len(versionMeta.Platforms) == 0 { + return fmt.Errorf("no supported provider platforms listed") + } + for _, p := range versionMeta.Platforms { + if p.Arch == i.Arch && p.OS == i.OS { + return nil + } + } + return fmt.Errorf("version %s does not support the requested platform %s_%s", versionMeta.Version, i.OS, i.Arch) +} + +// take the list of available versions for a plugin, and filter out those that +// don't fit the constraints. +func allowedVersions(available *response.TerraformProviderVersions, required Constraints) []*response.TerraformProviderVersion { + var allowed []*response.TerraformProviderVersion + + for _, v := range available.Versions { + version, err := VersionStr(v.Version).Parse() + if err != nil { + log.Printf("[WARN] invalid version found for %q: %s", available.ID, err) + continue + } + if required.Allows(version) { + allowed = append(allowed, v) + } + } + return allowed +} + +func checksumForFile(sums []byte, name string) string { + for _, line := range strings.Split(string(sums), "\n") { + parts := strings.Fields(line) + if len(parts) > 1 && parts[1] == name { + return parts[0] + } + } + return "" +} + +func getFile(url string) ([]byte, error) { + resp, err := httpClient.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s", resp.Status) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return data, err + } + return data, nil +} + +// providerProtocolTooOld is a message sent to the CLI UI if the provider's +// supported protocol versions are too old for the user's version of terraform, +// but an older version of the provider is compatible. +const providerProtocolTooOld = ` +[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] + +Provider version %s is the earliest compatible version. Select it with +the following version constraint: + + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on +compatibility between provider and Terraform versions. +` + +// providerProtocolTooNew is a message sent to the CLI UI if the provider's +// supported protocol versions are too new for the user's version of terraform, +// and the user could either upgrade terraform or choose an older version of the +// provider +const providerProtocolTooNew = ` +[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] + +Provider version %s is the latest compatible version. Select it with +the following constraint: + + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on +compatibility between provider and Terraform versions. + +Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases. +` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go new file mode 100644 index 00000000000..1a100426482 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go @@ -0,0 +1,48 @@ +package discovery + +// PluginCache is an interface implemented by objects that are able to maintain +// a cache of plugins. +type PluginCache interface { + // CachedPluginPath returns a path where the requested plugin is already + // cached, or an empty string if the requested plugin is not yet cached. + CachedPluginPath(kind string, name string, version Version) string + + // InstallDir returns the directory that new plugins should be installed into + // in order to populate the cache. This directory should be used as the + // first argument to getter.Get when downloading plugins with go-getter. + // + // After installing into this directory, use CachedPluginPath to obtain the + // path where the plugin was installed. + InstallDir() string +} + +// NewLocalPluginCache returns a PluginCache that caches plugins in a +// given local directory. +func NewLocalPluginCache(dir string) PluginCache { + return &pluginCache{ + Dir: dir, + } +} + +type pluginCache struct { + Dir string +} + +func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { + allPlugins := FindPlugins(kind, []string{c.Dir}) + plugins := allPlugins.WithName(name).WithVersion(version) + + if plugins.Count() == 0 { + // nothing cached + return "" + } + + // There should generally be only one plugin here; if there's more than + // one match for some reason then we'll just choose one arbitrarily. + plugin := plugins.Newest() + return plugin.Path +} + +func (c *pluginCache) InstallDir() string { + return c.Dir +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go new file mode 100644 index 00000000000..4622ca0545c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go @@ -0,0 +1,34 @@ +package discovery + +// HashicorpPublicKey is the HashiCorp public key, also available at +// https://www.hashicorp.com/security +const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f +W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq +fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA +3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca +KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k +SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 +cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG +CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n +Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i +SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi +psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w +sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO +klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW +WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 +wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j +2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM +skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo +mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y +0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA +CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc +z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP +0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG +unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ +EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ +oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C +=LYpS +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go new file mode 100644 index 00000000000..bdcebcb9dc4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go @@ -0,0 +1,41 @@ +package discovery + +import ( + "crypto/sha256" + "io" + "os" +) + +// PluginMeta is metadata about a plugin, useful for launching the plugin +// and for understanding which plugins are available. +type PluginMeta struct { + // Name is the name of the plugin, e.g. as inferred from the plugin + // binary's filename, or by explicit configuration. + Name string + + // Version is the semver version of the plugin, expressed as a string + // that might not be semver-valid. + Version VersionStr + + // Path is the absolute path of the executable that can be launched + // to provide the RPC server for this plugin. + Path string +} + +// SHA256 returns a SHA256 hash of the content of the referenced executable +// file, or an error if the file's contents cannot be read. +func (m PluginMeta) SHA256() ([]byte, error) { + f, err := os.Open(m.Path) + if err != nil { + return nil, err + } + defer f.Close() + + h := sha256.New() + _, err = io.Copy(h, f) + if err != nil { + return nil, err + } + + return h.Sum(nil), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go new file mode 100644 index 00000000000..3a992892df6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go @@ -0,0 +1,195 @@ +package discovery + +// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. +// +// Methods on this type allow filtering of the set to produce subsets that +// meet more restrictive criteria. +type PluginMetaSet map[PluginMeta]struct{} + +// Add inserts the given PluginMeta into the receiving set. This is a no-op +// if the given meta is already present. +func (s PluginMetaSet) Add(p PluginMeta) { + s[p] = struct{}{} +} + +// Remove removes the given PluginMeta from the receiving set. This is a no-op +// if the given meta is not already present. +func (s PluginMetaSet) Remove(p PluginMeta) { + delete(s, p) +} + +// Has returns true if the given meta is in the receiving set, or false +// otherwise. +func (s PluginMetaSet) Has(p PluginMeta) bool { + _, ok := s[p] + return ok +} + +// Count returns the number of metas in the set +func (s PluginMetaSet) Count() int { + return len(s) +} + +// ValidateVersions returns two new PluginMetaSets, separating those with +// versions that have syntax-valid semver versions from those that don't. +// +// Eliminating invalid versions from consideration (and possibly warning about +// them) is usually the first step of working with a meta set after discovery +// has completed. +func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { + valid = make(PluginMetaSet) + invalid = make(PluginMetaSet) + for p := range s { + if _, err := p.Version.Parse(); err == nil { + valid.Add(p) + } else { + invalid.Add(p) + } + } + return +} + +// WithName returns the subset of metas that have the given name. +func (s PluginMetaSet) WithName(name string) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + if p.Name == name { + ns.Add(p) + } + } + return ns +} + +// WithVersion returns the subset of metas that have the given version. +// +// This should be used only with the "valid" result from ValidateVersions; +// it will ignore any plugin metas that have invalid version strings. +func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + gotVersion, err := p.Version.Parse() + if err != nil { + continue + } + if gotVersion.Equal(version) { + ns.Add(p) + } + } + return ns +} + +// ByName groups the metas in the set by their Names, returning a map. +func (s PluginMetaSet) ByName() map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + ret[p.Name].Add(p) + } + return ret +} + +// Newest returns the one item from the set that has the newest Version value. +// +// The result is meaningful only if the set is already filtered such that +// all of the metas have the same Name. +// +// If there isn't at least one meta in the set then this function will panic. +// Use Count() to ensure that there is at least one value before calling. +// +// If any of the metas have invalid version strings then this function will +// panic. Use ValidateVersions() first to filter out metas with invalid +// versions. +// +// If two metas have the same Version then one is arbitrarily chosen. This +// situation should be avoided by pre-filtering the set. +func (s PluginMetaSet) Newest() PluginMeta { + if len(s) == 0 { + panic("can't call NewestStable on empty PluginMetaSet") + } + + var first = true + var winner PluginMeta + var winnerVersion Version + for p := range s { + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + + if first == true || version.NewerThan(winnerVersion) { + winner = p + winnerVersion = version + first = false + } + } + + return winner +} + +// ConstrainVersions takes a set of requirements and attempts to +// return a map from name to a set of metas that have the matching +// name and an appropriate version. +// +// If any of the given requirements match *no* plugins then its PluginMetaSet +// in the returned map will be empty. +// +// All viable metas are returned, so the caller can apply any desired filtering +// to reduce down to a single option. For example, calling Newest() to obtain +// the highest available version. +// +// If any of the metas in the set have invalid version strings then this +// function will panic. Use ValidateVersions() first to filter out metas with +// invalid versions. +func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + name := p.Name + allowedVersions, ok := reqd[name] + if !ok { + continue + } + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + if allowedVersions.Allows(version) { + ret[p.Name].Add(p) + } + } + return ret +} + +// OverridePaths returns a new set where any existing plugins with the given +// names are removed and replaced with the single path given in the map. +// +// This is here only to continue to support the legacy way of overriding +// plugin binaries in the .terraformrc file. It treats all given plugins +// as pre-versioning (version 0.0.0). This mechanism will eventually be +// phased out, with vendor directories being the intended replacement. +func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { + ret := make(PluginMetaSet) + for p := range s { + if _, ok := paths[p.Name]; ok { + // Skip plugins that we're overridding + continue + } + + ret.Add(p) + } + + // Now add the metadata for overriding plugins + for name, path := range paths { + ret.Add(PluginMeta{ + Name: name, + Version: VersionZero, + Path: path, + }) + } + + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go new file mode 100644 index 00000000000..0466ab25aeb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go @@ -0,0 +1,111 @@ +package discovery + +import ( + "bytes" +) + +// PluginInstallProtocolVersion is the protocol version TF-core +// supports to communicate with servers, and is used to resolve +// plugin discovery with terraform registry, in addition to +// any specified plugin version constraints +const PluginInstallProtocolVersion = 5 + +// PluginRequirements describes a set of plugins (assumed to be of a consistent +// kind) that are required to exist and have versions within the given +// corresponding sets. +type PluginRequirements map[string]*PluginConstraints + +// PluginConstraints represents an element of PluginRequirements describing +// the constraints for a single plugin. +type PluginConstraints struct { + // Specifies that the plugin's version must be within the given + // constraints. + Versions Constraints + + // If non-nil, the hash of the on-disk plugin executable must exactly + // match the SHA256 hash given here. + SHA256 []byte +} + +// Allows returns true if the given version is within the receiver's version +// constraints. +func (s *PluginConstraints) Allows(v Version) bool { + return s.Versions.Allows(v) +} + +// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, +// either because it matches the constraint or because there is no such +// constraint. +func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { + if s.SHA256 == nil { + return true + } + return bytes.Equal(s.SHA256, digest) +} + +// Merge takes the contents of the receiver and the other given requirements +// object and merges them together into a single requirements structure +// that satisfies both sets of requirements. +// +// Note that it doesn't make sense to merge two PluginRequirements with +// differing required plugin SHA256 hashes, since the result will never +// match any plugin. +func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { + ret := make(PluginRequirements) + for n, c := range r { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + for n, c := range other { + if existing, exists := ret[n]; exists { + ret[n].Versions = ret[n].Versions.Append(c.Versions) + + if existing.SHA256 != nil { + if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { + // If we've been asked to merge two constraints with + // different SHA256 hashes then we'll produce a dummy value + // that can never match anything. This is a silly edge case + // that no reasonable caller should hit. + ret[n].SHA256 = []byte(invalidProviderHash) + } + } else { + ret[n].SHA256 = c.SHA256 // might still be nil + } + } else { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + } + return ret +} + +// LockExecutables applies additional constraints to the receiver that +// require plugin executables with specific SHA256 digests. This modifies +// the receiver in-place, since it's intended to be applied after +// version constraints have been resolved. +// +// The given map must include a key for every plugin that is already +// required. If not, any missing keys will cause the corresponding plugin +// to never match, though the direct caller doesn't necessarily need to +// guarantee this as long as the downstream code _applying_ these constraints +// is able to deal with the non-match in some way. +func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { + for name, cons := range r { + digest := sha256s[name] + + if digest == nil { + // Prevent any match, which will then presumably cause the + // downstream consumer of this requirements to report an error. + cons.SHA256 = []byte(invalidProviderHash) + continue + } + + cons.SHA256 = digest + } +} + +const invalidProviderHash = "" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go new file mode 100644 index 00000000000..7bbae50c384 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go @@ -0,0 +1,19 @@ +package discovery + +import ( + "bytes" + "strings" + + "golang.org/x/crypto/openpgp" +) + +// Verify the data using the provided openpgp detached signature and the +// embedded hashicorp public key. +func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) { + el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor)) + if err != nil { + return nil, err + } + + return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go new file mode 100644 index 00000000000..4311d510765 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go @@ -0,0 +1,77 @@ +package discovery + +import ( + "fmt" + "sort" + + version "github.com/hashicorp/go-version" +) + +const VersionZero = "0.0.0" + +// A VersionStr is a string containing a possibly-invalid representation +// of a semver version number. Call Parse on it to obtain a real Version +// object, or discover that it is invalid. +type VersionStr string + +// Parse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s VersionStr) Parse() (Version, error) { + raw, err := version.NewVersion(string(s)) + if err != nil { + return Version{}, err + } + return Version{raw}, nil +} + +// MustParse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then it panics. +func (s VersionStr) MustParse() Version { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Version represents a version number that has been parsed from +// a semver string and known to be valid. +type Version struct { + // We wrap this here just because it avoids a proliferation of + // direct go-version imports all over the place, and keeps the + // version-processing details within this package. + raw *version.Version +} + +func (v Version) String() string { + return v.raw.String() +} + +func (v Version) NewerThan(other Version) bool { + return v.raw.GreaterThan(other.raw) +} + +func (v Version) Equal(other Version) bool { + return v.raw.Equal(other.raw) +} + +// IsPrerelease determines if version is a prerelease +func (v Version) IsPrerelease() bool { + return v.raw.Prerelease() != "" +} + +// MinorUpgradeConstraintStr returns a ConstraintStr that would permit +// minor upgrades relative to the receiving version. +func (v Version) MinorUpgradeConstraintStr() ConstraintStr { + segments := v.raw.Segments() + return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) +} + +type Versions []Version + +// Sort sorts version from newest to oldest. +func (v Versions) Sort() { + sort.Slice(v, func(i, j int) bool { + return v[i].NewerThan(v[j]) + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go new file mode 100644 index 00000000000..de02f5ec5b5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go @@ -0,0 +1,89 @@ +package discovery + +import ( + "sort" + + version "github.com/hashicorp/go-version" +) + +// A ConstraintStr is a string containing a possibly-invalid representation +// of a version constraint provided in configuration. Call Parse on it to +// obtain a real Constraint object, or discover that it is invalid. +type ConstraintStr string + +// Parse transforms a ConstraintStr into a Constraints if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s ConstraintStr) Parse() (Constraints, error) { + raw, err := version.NewConstraint(string(s)) + if err != nil { + return Constraints{}, err + } + return Constraints{raw}, nil +} + +// MustParse is like Parse but it panics if the constraint string is invalid. +func (s ConstraintStr) MustParse() Constraints { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Constraints represents a set of versions which any given Version is either +// a member of or not. +type Constraints struct { + raw version.Constraints +} + +// NewConstraints creates a Constraints based on a version.Constraints. +func NewConstraints(c version.Constraints) Constraints { + return Constraints{c} +} + +// AllVersions is a Constraints containing all versions +var AllVersions Constraints + +func init() { + AllVersions = Constraints{ + raw: make(version.Constraints, 0), + } +} + +// Allows returns true if the given version permitted by the receiving +// constraints set. +func (s Constraints) Allows(v Version) bool { + return s.raw.Check(v.raw) +} + +// Append combines the receiving set with the given other set to produce +// a set that is the intersection of both sets, which is to say that resulting +// constraints contain only the versions that are members of both. +func (s Constraints) Append(other Constraints) Constraints { + raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) + + // Since "raw" is a list of constraints that remove versions from the set, + // "Intersection" is implemented by concatenating together those lists, + // thus leaving behind only the versions not removed by either list. + raw = append(raw, s.raw...) + raw = append(raw, other.raw...) + + // while the set is unordered, we sort these lexically for consistent output + sort.Slice(raw, func(i, j int) bool { + return raw[i].String() < raw[j].String() + }) + + return Constraints{raw} +} + +// String returns a string representation of the set members as a set +// of range constraints. +func (s Constraints) String() string { + return s.raw.String() +} + +// Unconstrained returns true if and only if the receiver is an empty +// constraint set. +func (s Constraints) Unconstrained() bool { + return len(s.raw) == 0 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go new file mode 100644 index 00000000000..0f48f2447d4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go @@ -0,0 +1,47 @@ +package providers + +import ( + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// AddressedTypes is a helper that extracts all of the distinct provider +// types from the given list of relative provider configuration addresses. +func AddressedTypes(providerAddrs []addrs.ProviderConfig) []string { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]struct{}{} + for _, addr := range providerAddrs { + m[addr.Type] = struct{}{} + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + return names +} + +// AddressedTypesAbs is a helper that extracts all of the distinct provider +// types from the given list of absolute provider configuration addresses. +func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []string { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]struct{}{} + for _, addr := range providerAddrs { + m[addr.ProviderConfig.Type] = struct{}{} + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + return names +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go new file mode 100644 index 00000000000..39aa1de60f7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go @@ -0,0 +1,3 @@ +// Package providers contains the interface and primary types required to +// implement a Terraform resource provider. +package providers diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go new file mode 100644 index 00000000000..3d0aa8ec9eb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go @@ -0,0 +1,359 @@ +package providers + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Interface represents the set of methods required for a complete resource +// provider plugin. +type Interface interface { + // GetSchema returns the complete schema for the provider. + GetSchema() GetSchemaResponse + + // PrepareProviderConfig allows the provider to validate the configuration + // values, and set or override any values with defaults. + PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse + + // ValidateResourceTypeConfig allows the provider to validate the resource + // configuration values. + ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse + + // ValidateDataSource allows the provider to validate the data source + // configuration values. + ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse + + // UpgradeResourceState is called when the state loader encounters an + // instance state whose schema version is less than the one reported by the + // currently-used version of the corresponding provider, and the upgraded + // result is used for any further processing. + UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse + + // Configure configures and initialized the provider. + Configure(ConfigureRequest) ConfigureResponse + + // Stop is called when the provider should halt any in-flight actions. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provider after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // ReadResource refreshes a resource and returns its current state. + ReadResource(ReadResourceRequest) ReadResourceResponse + + // PlanResourceChange takes the current state and proposed state of a + // resource, and returns the planned final state. + PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse + + // ApplyResourceChange takes the planned state for a resource, which may + // yet contain unknown computed values, and applies the changes returning + // the final state. + ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse + + // ImportResourceState requests that the given resource be imported. + ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse + + // ReadDataSource returns the data source's current state. + ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provider is the schema for the provider itself. + Provider Schema + + // ResourceTypes map the resource type name to that type's schema. + ResourceTypes map[string]Schema + + // DataSources maps the data source name to that data source's schema. + DataSources map[string]Schema + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// Schema pairs a provider or resource schema with that schema's version. +// This is used to be able to upgrade the schema in UpgradeResourceState. +type Schema struct { + Version int64 + Block *configschema.Block +} + +type PrepareProviderConfigRequest struct { + // Config is the raw configuration value for the provider. + Config cty.Value +} + +type PrepareProviderConfigResponse struct { + // PreparedConfig is the configuration as prepared by the provider. + PreparedConfig cty.Value + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateResourceTypeConfigRequest struct { + // TypeName is the name of the resource type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateResourceTypeConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateDataSourceConfigRequest struct { + // TypeName is the name of the data source type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateDataSourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type UpgradeResourceStateRequest struct { + // TypeName is the name of the resource type being upgraded + TypeName string + + // Version is version of the schema that created the current state. + Version int64 + + // RawStateJSON and RawStateFlatmap contiain the state that needs to be + // upgraded to match the current schema version. Because the schema is + // unknown, this contains only the raw data as stored in the state. + // RawStateJSON is the current json state encoding. + // RawStateFlatmap is the legacy flatmap encoding. + // Only on of these fields may be set for the upgrade request. + RawStateJSON []byte + RawStateFlatmap map[string]string +} + +type UpgradeResourceStateResponse struct { + // UpgradedState is the newly upgraded resource state. + UpgradedState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ConfigureRequest struct { + // Terraform version is the version string from the running instance of + // terraform. Providers can use TerraformVersion to verify compatibility, + // and to store for informational purposes. + TerraformVersion string + + // Config is the complete configuration value for the provider. + Config cty.Value +} + +type ConfigureResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ReadResourceRequest struct { + // TypeName is the name of the resource type being read. + TypeName string + + // PriorState contains the previously saved state value for this resource. + PriorState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +type ReadResourceResponse struct { + // NewState contains the current state of the resource. + NewState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +type PlanResourceChangeRequest struct { + // TypeName is the name of the resource type to plan. + TypeName string + + // PriorState is the previously saved state value for this resource. + PriorState cty.Value + + // ProposedNewState is the expected state after the new configuration is + // applied. This is created by directly applying the configuration to the + // PriorState. The provider is then responsible for applying any further + // changes required to create the proposed final state. + ProposedNewState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the ProposedNewState in most circumstances. + Config cty.Value + + // PriorPrivate is the previously saved private data returned from the + // provider during the last apply. + PriorPrivate []byte +} + +type PlanResourceChangeResponse struct { + // PlannedState is the expected state of the resource once the current + // configuration is applied. + PlannedState cty.Value + + // RequiresReplace is the list of thee attributes that are requiring + // resource replacement. + RequiresReplace []cty.Path + + // PlannedPrivate is an opaque blob that is not interpreted by terraform + // core. This will be saved and relayed back to the provider during + // ApplyResourceChange. + PlannedPrivate []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ApplyResourceChangeRequest struct { + // TypeName is the name of the resource type being applied. + TypeName string + + // PriorState is the current state of resource. + PriorState cty.Value + + // Planned state is the state returned from PlanResourceChange, and should + // represent the new state, minus any remaining computed attributes. + PlannedState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the PlannedState in most circumstances. + Config cty.Value + + // PlannedPrivate is the same value as returned by PlanResourceChange. + PlannedPrivate []byte +} + +type ApplyResourceChangeResponse struct { + // NewState is the new complete state after applying the planned change. + // In the event of an error, NewState should represent the most recent + // known state of the resource, if it exists. + NewState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ImportResourceStateRequest struct { + // TypeName is the name of the resource type to be imported. + TypeName string + + // ID is a string with which the provider can identify the resource to be + // imported. + ID string +} + +type ImportResourceStateResponse struct { + // ImportedResources contains one or more state values related to the + // imported resource. It is not required that these be complete, only that + // there is enough identifying information for the provider to successfully + // update the states in ReadResource. + ImportedResources []ImportedResource + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// ImportedResource represents an object being imported into Terraform with the +// help of a provider. An ImportedObject is a RemoteObject that has been read +// by the provider's import handler but hasn't yet been committed to state. +type ImportedResource struct { + // TypeName is the name of the resource type associated with the + // returned state. It's possible for providers to import multiple related + // types with a single import request. + TypeName string + + // State is the state of the remote object being imported. This may not be + // complete, but must contain enough information to uniquely identify the + // resource. + State cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +// AsInstanceObject converts the receiving ImportedObject into a +// ResourceInstanceObject that has status ObjectReady. +// +// The returned object does not know its own resource type, so the caller must +// retain the ResourceType value from the source object if this information is +// needed. +// +// The returned object also has no dependency addresses, but the caller may +// freely modify the direct fields of the returned object without affecting +// the receiver. +func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { + return &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: ir.State, + Private: ir.Private, + } +} + +type ReadDataSourceRequest struct { + // TypeName is the name of the data source type to Read. + TypeName string + + // Config is the complete configuration for the requested data source. + Config cty.Value +} + +type ReadDataSourceResponse struct { + // State is the current state of the requested data source. + State cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go new file mode 100644 index 00000000000..f5529ff32af --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go @@ -0,0 +1,112 @@ +package providers + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" +) + +// Resolver is an interface implemented by objects that are able to resolve +// a given set of resource provider version constraints into Factory +// callbacks. +type Resolver interface { + // Given a constraint map, return a Factory for each requested provider. + // If some or all of the constraints cannot be satisfied, return a non-nil + // slice of errors describing the problems. + ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) +} + +// ResolverFunc wraps a callback function and turns it into a Resolver +// implementation, for convenience in situations where a function and its +// associated closure are sufficient as a resolver implementation. +type ResolverFunc func(reqd discovery.PluginRequirements) (map[string]Factory, []error) + +// ResolveProviders implements Resolver by calling the +// wrapped function. +func (f ResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) { + return f(reqd) +} + +// ResolverFixed returns a Resolver that has a fixed set of provider factories +// provided by the caller. The returned resolver ignores version constraints +// entirely and just returns the given factory for each requested provider +// name. +// +// This function is primarily used in tests, to provide mock providers or +// in-process providers under test. +func ResolverFixed(factories map[string]Factory) Resolver { + return ResolverFunc(func(reqd discovery.PluginRequirements) (map[string]Factory, []error) { + ret := make(map[string]Factory, len(reqd)) + var errs []error + for name := range reqd { + if factory, exists := factories[name]; exists { + ret[name] = factory + } else { + errs = append(errs, fmt.Errorf("provider %q is not available", name)) + } + } + return ret, errs + }) +} + +// Factory is a function type that creates a new instance of a resource +// provider, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provider. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} + +// ProviderHasResource is a helper that requests schema from the given provider +// and checks if it has a resource type of the given name. +// +// This function is more expensive than it may first appear since it must +// retrieve the entire schema from the underlying provider, and so it should +// be used sparingly and especially not in tight loops. +// +// Since retrieving the provider may fail (e.g. if the provider is accessed +// over an RPC channel that has operational problems), this function will +// return false if the schema cannot be retrieved, under the assumption that +// a subsequent call to do anything with the resource type would fail +// anyway. +func ProviderHasResource(provider Interface, typeName string) bool { + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + return false + } + + _, exists := resp.ResourceTypes[typeName] + return exists +} + +// ProviderHasDataSource is a helper that requests schema from the given +// provider and checks if it has a data source of the given name. +// +// This function is more expensive than it may first appear since it must +// retrieve the entire schema from the underlying provider, and so it should +// be used sparingly and especially not in tight loops. +// +// Since retrieving the provider may fail (e.g. if the provider is accessed +// over an RPC channel that has operational problems), this function will +// return false if the schema cannot be retrieved, under the assumption that +// a subsequent call to do anything with the data source would fail +// anyway. +func ProviderHasDataSource(provider Interface, dataSourceName string) bool { + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + return false + } + + _, exists := resp.DataSources[dataSourceName] + return exists +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go new file mode 100644 index 00000000000..b03ba9a1bbd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go @@ -0,0 +1,3 @@ +// Package provisioners contains the interface and primary types to implement a +// Terraform resource provisioner. +package provisioners diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go new file mode 100644 index 00000000000..590b97a84fe --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go @@ -0,0 +1,19 @@ +package provisioners + +// Factory is a function type that creates a new instance of a resource +// provisioner, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provisioner. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go new file mode 100644 index 00000000000..7d8f4076ba4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go @@ -0,0 +1,82 @@ +package provisioners + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Interface is the set of methods required for a resource provisioner plugin. +type Interface interface { + // GetSchema returns the schema for the provisioner configuration. + GetSchema() GetSchemaResponse + + // ValidateProvisionerConfig allows the provisioner to validate the + // configuration values. + ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse + + // ProvisionResource runs the provisioner with provided configuration. + // ProvisionResource blocks until the execution is complete. + // If the returned diagnostics contain any errors, the resource will be + // left in a tainted state. + ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse + + // Stop is called to interrupt the provisioner. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provisioner after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provisioner contains the schema for this provisioner. + Provisioner *configschema.Block + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// UIOutput provides the Output method for resource provisioner +// plugins to write any output to the UI. +// +// Provisioners may call the Output method multiple times while Apply is in +// progress. It is invalid to call Output after Apply returns. +type UIOutput interface { + Output(string) +} + +type ValidateProvisionerConfigRequest struct { + // Config is the complete configuration to be used for the provisioner. + Config cty.Value +} + +type ValidateProvisionerConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ProvisionResourceRequest struct { + // Config is the complete provisioner configuration. + Config cty.Value + + // Connection contains any information required to access the resource + // instance. + Connection cty.Value + + // UIOutput is used to return output during the Apply operation. + UIOutput UIOutput +} + +type ProvisionResourceResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go new file mode 100644 index 00000000000..313951b38f8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go @@ -0,0 +1,343 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/response" + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost" + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco" + "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +const ( + xTerraformGet = "X-Terraform-Get" + xTerraformVersion = "X-Terraform-Version" + requestTimeout = 10 * time.Second + modulesServiceID = "modules.v1" + providersServiceID = "providers.v1" +) + +var tfVersion = version.String() + +// Client provides methods to query Terraform Registries. +type Client struct { + // this is the client to be used for all requests. + client *http.Client + + // services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + services *disco.Disco +} + +// NewClient returns a new initialized registry client. +func NewClient(services *disco.Disco, client *http.Client) *Client { + if services == nil { + services = disco.New() + } + + if client == nil { + client = httpclient.New() + client.Timeout = requestTimeout + } + + services.Transport = client.Transport + + return &Client{ + client: client, + services: services, + } +} + +// Discover queries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { + service, err := c.services.DiscoverServiceURL(host, serviceID) + if err != nil { + return nil, &ServiceUnreachableError{err} + } + if !strings.HasSuffix(service.Path, "/") { + service.Path += "/" + } + return service, nil +} + +// ModuleVersions queries the registry for a module, and returns the available versions. +func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { + host, err := module.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(module.Module(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching module versions from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errModuleNotFound{addr: module} + default: + return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) + } + + var versions response.ModuleVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + for _, mod := range versions.Modules { + for _, v := range mod.Versions { + log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) + } + } + + return &versions, nil +} + +func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { + creds, err := c.services.CredentialsForHost(host) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) + return + } + + if creds != nil { + creds.PrepareRequest(req) + } +} + +// ModuleLocation find the download location for a specific version module. +// This returns a string, because the final location may contain special go-getter syntax. +func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) { + host, err := module.SvcHost() + if err != nil { + return "", err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return "", err + } + + var p *url.URL + if version == "" { + p, err = url.Parse(path.Join(module.Module(), "download")) + } else { + p, err = url.Parse(path.Join(module.Module(), version, "download")) + } + if err != nil { + return "", err + } + download := service.ResolveReference(p) + + log.Printf("[DEBUG] looking up module location from %q", download) + + req, err := http.NewRequest("GET", download.String(), nil) + if err != nil { + return "", err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + // there should be no body, but save it for logging + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("error reading response body from registry: %s", err) + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return "", fmt.Errorf("module %q version %q not found", module, version) + default: + // anything else is an error: + return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) + } + + // the download location is in the X-Terraform-Get header + location := resp.Header.Get(xTerraformGet) + if location == "" { + return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) + } + + // If location looks like it's trying to be a relative URL, treat it as + // one. + // + // We don't do this for just _any_ location, since the X-Terraform-Get + // header is a go-getter location rather than a URL, and so not all + // possible values will parse reasonably as URLs.) + // + // When used in conjunction with go-getter we normally require this header + // to be an absolute URL, but we are more liberal here because third-party + // registry implementations may not "know" their own absolute URLs if + // e.g. they are running behind a reverse proxy frontend, or such. + if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { + locationURL, err := url.Parse(location) + if err != nil { + return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) + } + locationURL = download.ResolveReference(locationURL) + location = locationURL.String() + } + + return location, nil +} + +// TerraformProviderVersions queries the registry for a provider, and returns the available versions. +func (c *Client) TerraformProviderVersions(provider *regsrc.TerraformProvider) (*response.TerraformProviderVersions, error) { + host, err := provider.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, providersServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(provider.TerraformProvider(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching provider versions from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errProviderNotFound{addr: provider} + default: + return nil, fmt.Errorf("error looking up provider versions: %s", resp.Status) + } + + var versions response.TerraformProviderVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + return &versions, nil +} + +// TerraformProviderLocation queries the registry for a provider download metadata +func (c *Client) TerraformProviderLocation(provider *regsrc.TerraformProvider, version string) (*response.TerraformProviderPlatformLocation, error) { + host, err := provider.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, providersServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join( + provider.TerraformProvider(), + version, + "download", + provider.OS, + provider.Arch, + )) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching provider location from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var loc response.TerraformProviderPlatformLocation + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&loc); err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return nil, fmt.Errorf("provider %q version %q not found", provider.TerraformProvider(), version) + default: + // anything else is an error: + return nil, fmt.Errorf("error getting download location for %q: %s", provider.TerraformProvider(), resp.Status) + } + + return &loc, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go new file mode 100644 index 00000000000..8b22ec14229 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go @@ -0,0 +1,63 @@ +package registry + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco" +) + +type errModuleNotFound struct { + addr *regsrc.Module +} + +func (e *errModuleNotFound) Error() string { + return fmt.Sprintf("module %s not found", e.addr) +} + +// IsModuleNotFound returns true only if the given error is a "module not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsModuleNotFound(err error) bool { + _, ok := err.(*errModuleNotFound) + return ok +} + +type errProviderNotFound struct { + addr *regsrc.TerraformProvider +} + +func (e *errProviderNotFound) Error() string { + return fmt.Sprintf("provider %s not found", e.addr) +} + +// IsProviderNotFound returns true only if the given error is a "provider not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsProviderNotFound(err error) bool { + _, ok := err.(*errProviderNotFound) + return ok +} + +// IsServiceNotProvided returns true only if the given error is a "service not provided" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsServiceNotProvided(err error) bool { + _, ok := err.(*disco.ErrServiceNotProvided) + return ok +} + +// ServiceUnreachableError Registry service is unreachable +type ServiceUnreachableError struct { + err error +} + +func (e *ServiceUnreachableError) Error() string { + return e.err.Error() +} + +// IsServiceUnreachable returns true if the registry/discovery service was unreachable +func IsServiceUnreachable(err error) bool { + _, ok := err.(*ServiceUnreachableError) + return ok +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go new file mode 100644 index 00000000000..e67942a713a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go @@ -0,0 +1,140 @@ +package regsrc + +import ( + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost" +) + +var ( + // InvalidHostString is a placeholder returned when a raw host can't be + // converted by IDNA spec. It will never be returned for any host for which + // Valid() is true. + InvalidHostString = "" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at the start or end of a URL label according to RFC1123. + urlLabelEndSubRe = "[0-9A-Za-z]" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at in a non-start or end of a URL label according to RFC1123. + urlLabelMidSubRe = "[0-9A-Za-z-]" + + // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char + // in an IDN (Unicode) display URL. It's not strict - there are only ~15k + // valid Unicode points in IDN RFC (some with conditions). We are just going + // with being liberal with matching and then erroring if we fail to convert + // to punycode later (which validates chars fully). This at least ensures + // ascii chars dissalowed by the RC1123 parts above don't become legal + // again. + urlLabelUnicodeSubRe = "[^[:ascii:]]" + + // hostLabelSubRe is the sub-expression that matches a valid hostname label. + // It does not anchor the start or end so it can be composed into more + // complex RegExps below. Note that for sanity we don't handle disallowing + // raw punycode in this regexp (esp. since re2 doesn't support negative + // lookbehind, but we can capture it's presence here to check later). + hostLabelSubRe = "" + + // Match valid initial char, or unicode char + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + // Optionally, match 0 to 61 valid URL or Unicode chars, + // followed by one valid end char or unicode char + "(?:" + + "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" + + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + ")?" + + // hostSubRe is the sub-expression that matches a valid host prefix. + // Allows custom port. + hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?" + + // hostRe is a regexp that matches a valid host prefix. Additional + // validation of unicode strings is needed for matches. + hostRe = regexp.MustCompile("^" + hostSubRe + "$") +) + +// FriendlyHost describes a registry instance identified in source strings by a +// simple bare hostname like registry.terraform.io. +type FriendlyHost struct { + Raw string +} + +func NewFriendlyHost(host string) *FriendlyHost { + return &FriendlyHost{Raw: host} +} + +// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the +// given string. If no valid prefix is found, host will be nil and rest will +// contain the full source string. The host prefix must terminate at the end of +// the input or at the first / character. If one or more characters exist after +// the first /, they will be returned as rest (without the / delimiter). +// Hostnames containing punycode WILL be parsed successfully since they may have +// come from an internal normalized source string, however should be considered +// invalid if the string came from a user directly. This must be checked +// explicitly for user-input strings by calling Valid() on the +// returned host. +func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) { + parts := strings.SplitN(source, "/", 2) + + if hostRe.MatchString(parts[0]) { + host = &FriendlyHost{Raw: parts[0]} + if len(parts) == 2 { + rest = parts[1] + } + return + } + + // No match, return whole string as rest along with nil host + rest = source + return +} + +// Valid returns whether the host prefix is considered valid in any case. +// Example of invalid prefixes might include ones that don't conform to the host +// name specifications. Not that IDN prefixes containing punycode are not valid +// input which we expect to always be in user-input or normalised display form. +func (h *FriendlyHost) Valid() bool { + return svchost.IsValid(h.Raw) +} + +// Display returns the host formatted for display to the user in CLI or web +// output. +func (h *FriendlyHost) Display() string { + return svchost.ForDisplay(h.Raw) +} + +// Normalized returns the host formatted for internal reference or comparison. +func (h *FriendlyHost) Normalized() string { + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return InvalidHostString + } + return string(host) +} + +// String returns the host formatted as the user originally typed it assuming it +// was parsed from user input. +func (h *FriendlyHost) String() string { + return h.Raw +} + +// Equal compares the FriendlyHost against another instance taking normalization +// into account. Invalid hosts cannot be compared and will always return false. +func (h *FriendlyHost) Equal(other *FriendlyHost) bool { + if other == nil { + return false + } + + otherHost, err := svchost.ForComparison(other.Raw) + if err != nil { + return false + } + + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return false + } + + return otherHost == host +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go new file mode 100644 index 00000000000..4c83afadb30 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go @@ -0,0 +1,205 @@ +package regsrc + +import ( + "errors" + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost" +) + +var ( + ErrInvalidModuleSource = errors.New("not a valid registry module source") + + // nameSubRe is the sub-expression that matches a valid module namespace or + // name. It's strictly a super-set of what GitHub allows for user/org and + // repo names respectively, but more restrictive than our original repo-name + // regex which allowed periods but could cause ambiguity with hostname + // prefixes. It does not anchor the start or end so it can be composed into + // more complex RegExps below. Alphanumeric with - and _ allowed in non + // leading or trailing positions. Max length 64 chars. (GitHub username is + // 38 max.) + nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" + + // providerSubRe is the sub-expression that matches a valid provider. It + // does not anchor the start or end so it can be composed into more complex + // RegExps below. Only lowercase chars and digits are supported in practice. + // Max length 64 chars. + providerSubRe = "[0-9a-z]{1,64}" + + // moduleSourceRe is a regular expression that matches the basic + // namespace/name/provider[//...] format for registry sources. It assumes + // any FriendlyHost prefix has already been removed if present. + moduleSourceRe = regexp.MustCompile( + fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", + nameSubRe, nameSubRe, providerSubRe)) + + // NameRe is a regular expression defining the format allowed for namespace + // or name fields in module registry implementations. + NameRe = regexp.MustCompile("^" + nameSubRe + "$") + + // ProviderRe is a regular expression defining the format allowed for + // provider fields in module registry implementations. + ProviderRe = regexp.MustCompile("^" + providerSubRe + "$") + + // these hostnames are not allowed as registry sources, because they are + // already special case module sources in terraform. + disallowed = map[string]bool{ + "github.com": true, + "bitbucket.org": true, + } +) + +// Module describes a Terraform Registry Module source. +type Module struct { + // RawHost is the friendly host prefix if one was present. It might be nil + // if the original source had no host prefix which implies + // PublicRegistryHost but is distinct from having an actual pointer to + // PublicRegistryHost since it encodes the fact the original string didn't + // include a host prefix at all which is significant for recovering actual + // input not just normalized form. Most callers should access it with Host() + // which will return public registry host instance if it's nil. + RawHost *FriendlyHost + RawNamespace string + RawName string + RawProvider string + RawSubmodule string +} + +// NewModule construct a new module source from separate parts. Pass empty +// string if host or submodule are not needed. +func NewModule(host, namespace, name, provider, submodule string) (*Module, error) { + m := &Module{ + RawNamespace: namespace, + RawName: name, + RawProvider: provider, + RawSubmodule: submodule, + } + if host != "" { + h := NewFriendlyHost(host) + if h != nil { + fmt.Println("HOST:", h) + if !h.Valid() || disallowed[h.Display()] { + return nil, ErrInvalidModuleSource + } + } + m.RawHost = h + } + return m, nil +} + +// ParseModuleSource attempts to parse source as a Terraform registry module +// source. If the string is not found to be in a valid format, +// ErrInvalidModuleSource is returned. Note that this can only be used on +// "input" strings, e.g. either ones supplied by the user or potentially +// normalised but in Display form (unicode). It will fail to parse a source with +// a punycoded domain since this is not permitted input from a user. If you have +// an already normalized string internally, you can compare it without parsing +// by comparing with the normalized version of the subject with the normal +// string equality operator. +func ParseModuleSource(source string) (*Module, error) { + // See if there is a friendly host prefix. + host, rest := ParseFriendlyHost(source) + if host != nil { + if !host.Valid() || disallowed[host.Display()] { + return nil, ErrInvalidModuleSource + } + } + + matches := moduleSourceRe.FindStringSubmatch(rest) + if len(matches) < 4 { + return nil, ErrInvalidModuleSource + } + + m := &Module{ + RawHost: host, + RawNamespace: matches[1], + RawName: matches[2], + RawProvider: matches[3], + } + + if len(matches) == 5 { + m.RawSubmodule = matches[4] + } + + return m, nil +} + +// Display returns the source formatted for display to the user in CLI or web +// output. +func (m *Module) Display() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) +} + +// Normalized returns the source formatted for internal reference or comparison. +func (m *Module) Normalized() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) +} + +// String returns the source formatted as the user originally typed it assuming +// it was parsed from user input. +func (m *Module) String() string { + // Don't normalize public registry hostname - leave it exactly like the user + // input it. + hostPrefix := "" + if m.RawHost != nil { + hostPrefix = m.RawHost.String() + "/" + } + return m.formatWithPrefix(hostPrefix, true) +} + +// Equal compares the module source against another instance taking +// normalization into account. +func (m *Module) Equal(other *Module) bool { + return m.Normalized() == other.Normalized() +} + +// Host returns the FriendlyHost object describing which registry this module is +// in. If the original source string had not host component this will return the +// PublicRegistryHost. +func (m *Module) Host() *FriendlyHost { + if m.RawHost == nil { + return PublicRegistryHost + } + return m.RawHost +} + +func (m *Module) normalizedHostPrefix(host string) string { + if m.Host().Equal(PublicRegistryHost) { + return "" + } + return host + "/" +} + +func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { + suffix := "" + if m.RawSubmodule != "" { + suffix = "//" + m.RawSubmodule + } + str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, + m.RawProvider, suffix) + + // lower case by default + if !preserveCase { + return strings.ToLower(str) + } + return str +} + +// Module returns just the registry ID of the module, without a hostname or +// suffix. +func (m *Module) Module() string { + return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) +} + +// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may +// contain an invalid hostname, this also returns an error indicating if it +// could be converted to a svchost.Hostname. If no host is specified, the +// default PublicRegistryHost is returned. +func (m *Module) SvcHost() (svchost.Hostname, error) { + if m.RawHost == nil { + return svchost.ForComparison(PublicRegistryHost.Raw) + } + return svchost.ForComparison(m.RawHost.Raw) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go new file mode 100644 index 00000000000..c430bf14133 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go @@ -0,0 +1,8 @@ +// Package regsrc provides helpers for working with source strings that identify +// resources within a Terraform registry. +package regsrc + +var ( + // PublicRegistryHost is a FriendlyHost that represents the public registry. + PublicRegistryHost = NewFriendlyHost("registry.terraform.io") +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go new file mode 100644 index 00000000000..42ab3f77c08 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go @@ -0,0 +1,60 @@ +package regsrc + +import ( + "fmt" + "runtime" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost" +) + +var ( + // DefaultProviderNamespace represents the namespace for canonical + // HashiCorp-controlled providers. + DefaultProviderNamespace = "-" +) + +// TerraformProvider describes a Terraform Registry Provider source. +type TerraformProvider struct { + RawHost *FriendlyHost + RawNamespace string + RawName string + OS string + Arch string +} + +// NewTerraformProvider constructs a new provider source. +func NewTerraformProvider(name, os, arch string) *TerraformProvider { + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + + // separate namespace if included + namespace := DefaultProviderNamespace + if names := strings.SplitN(name, "/", 2); len(names) == 2 { + namespace, name = names[0], names[1] + } + p := &TerraformProvider{ + RawHost: PublicRegistryHost, + RawNamespace: namespace, + RawName: name, + OS: os, + Arch: arch, + } + + return p +} + +// Provider returns just the registry ID of the provider +func (p *TerraformProvider) TerraformProvider() string { + return fmt.Sprintf("%s/%s", p.RawNamespace, p.RawName) +} + +// SvcHost returns the svchost.Hostname for this provider. The +// default PublicRegistryHost is returned. +func (p *TerraformProvider) SvcHost() (svchost.Hostname, error) { + return svchost.ForComparison(PublicRegistryHost.Raw) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go new file mode 100644 index 00000000000..3bd2b3df219 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go @@ -0,0 +1,93 @@ +package response + +import ( + "time" +) + +// Module is the response structure with the data for a single module version. +type Module struct { + ID string `json:"id"` + + //--------------------------------------------------------------- + // Metadata about the overall module. + + Owner string `json:"owner"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Version string `json:"version"` + Provider string `json:"provider"` + Description string `json:"description"` + Source string `json:"source"` + PublishedAt time.Time `json:"published_at"` + Downloads int `json:"downloads"` + Verified bool `json:"verified"` +} + +// ModuleDetail represents a module in full detail. +type ModuleDetail struct { + Module + + //--------------------------------------------------------------- + // Metadata about the overall module. This is only available when + // requesting the specific module (not in list responses). + + // Root is the root module. + Root *ModuleSubmodule `json:"root"` + + // Submodules are the other submodules that are available within + // this module. + Submodules []*ModuleSubmodule `json:"submodules"` + + //--------------------------------------------------------------- + // The fields below are only set when requesting this specific + // module. They are available to easily know all available versions + // and providers without multiple API calls. + + Providers []string `json:"providers"` // All available providers + Versions []string `json:"versions"` // All versions +} + +// ModuleSubmodule is the metadata about a specific submodule within +// a module. This includes the root module as a special case. +type ModuleSubmodule struct { + Path string `json:"path"` + Readme string `json:"readme"` + Empty bool `json:"empty"` + + Inputs []*ModuleInput `json:"inputs"` + Outputs []*ModuleOutput `json:"outputs"` + Dependencies []*ModuleDep `json:"dependencies"` + Resources []*ModuleResource `json:"resources"` +} + +// ModuleInput is an input for a module. +type ModuleInput struct { + Name string `json:"name"` + Description string `json:"description"` + Default string `json:"default"` +} + +// ModuleOutput is an output for a module. +type ModuleOutput struct { + Name string `json:"name"` + Description string `json:"description"` +} + +// ModuleDep is an output for a module. +type ModuleDep struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version"` +} + +// ModuleProviderDep is the output for a provider dependency +type ModuleProviderDep struct { + Name string `json:"name"` + Version string `json:"version"` +} + +// ModuleResource is an output for a module. +type ModuleResource struct { + Name string `json:"name"` + Type string `json:"type"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_list.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_list.go new file mode 100644 index 00000000000..978374822f4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_list.go @@ -0,0 +1,7 @@ +package response + +// ModuleList is the response structure for a pageable list of modules. +type ModuleList struct { + Meta PaginationMeta `json:"meta"` + Modules []*Module `json:"modules"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_provider.go new file mode 100644 index 00000000000..e48499dceed --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_provider.go @@ -0,0 +1,14 @@ +package response + +// ModuleProvider represents a single provider for modules. +type ModuleProvider struct { + Name string `json:"name"` + Downloads int `json:"downloads"` + ModuleCount int `json:"module_count"` +} + +// ModuleProviderList is the response structure for a pageable list of ModuleProviders. +type ModuleProviderList struct { + Meta PaginationMeta `json:"meta"` + Providers []*ModuleProvider `json:"providers"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go new file mode 100644 index 00000000000..f69e9750c23 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go @@ -0,0 +1,32 @@ +package response + +// ModuleVersions is the response format that contains all metadata about module +// versions needed for terraform CLI to resolve version constraints. See RFC +// TF-042 for details on this format. +type ModuleVersions struct { + Modules []*ModuleProviderVersions `json:"modules"` +} + +// ModuleProviderVersions is the response format for a single module instance, +// containing metadata about all versions and their dependencies. +type ModuleProviderVersions struct { + Source string `json:"source"` + Versions []*ModuleVersion `json:"versions"` +} + +// ModuleVersion is the output metadata for a given version needed by CLI to +// resolve candidate versions to satisfy requirements. +type ModuleVersion struct { + Version string `json:"version"` + Root VersionSubmodule `json:"root"` + Submodules []*VersionSubmodule `json:"submodules"` +} + +// VersionSubmodule is the output metadata for a submodule within a given +// version needed by CLI to resolve candidate versions to satisfy requirements. +// When representing the Root in JSON the path is omitted. +type VersionSubmodule struct { + Path string `json:"path,omitempty"` + Providers []*ModuleProviderDep `json:"providers"` + Dependencies []*ModuleDep `json:"dependencies"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go new file mode 100644 index 00000000000..75a925490a2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go @@ -0,0 +1,65 @@ +package response + +import ( + "net/url" + "strconv" +) + +// PaginationMeta is a structure included in responses for pagination. +type PaginationMeta struct { + Limit int `json:"limit"` + CurrentOffset int `json:"current_offset"` + NextOffset *int `json:"next_offset,omitempty"` + PrevOffset *int `json:"prev_offset,omitempty"` + NextURL string `json:"next_url,omitempty"` + PrevURL string `json:"prev_url,omitempty"` +} + +// NewPaginationMeta populates pagination meta data from result parameters +func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta { + pm := PaginationMeta{ + Limit: limit, + CurrentOffset: offset, + } + + // Calculate next/prev offsets, leave nil if not valid pages + nextOffset := offset + limit + if hasMore { + pm.NextOffset = &nextOffset + } + + prevOffset := offset - limit + if prevOffset < 0 { + prevOffset = 0 + } + if prevOffset < offset { + pm.PrevOffset = &prevOffset + } + + // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should + // catch missing URLs if we call with bad URL arg (and we care about them being present). + if currentURL != "" && pm.NextOffset != nil { + pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0) + } + if currentURL != "" && pm.PrevOffset != nil { + pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0) + } + + return pm +} + +func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", err + } + q := u.Query() + if val == defaultVal { + // elide param if it's the default value + q.Del(key) + } else { + q.Set(key, strconv.Itoa(val)) + } + u.RawQuery = q.Encode() + return u.String(), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/provider.go new file mode 100644 index 00000000000..5e8bae35437 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/provider.go @@ -0,0 +1,36 @@ +package response + +import ( + "time" +) + +// Provider is the response structure with the data for a single provider +// version. This is just the metadata. A full provider response will be +// ProviderDetail. +type Provider struct { + ID string `json:"id"` + + //--------------------------------------------------------------- + // Metadata about the overall provider. + + Owner string `json:"owner"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + Source string `json:"source"` + PublishedAt time.Time `json:"published_at"` + Downloads int `json:"downloads"` +} + +// ProviderDetail represents a Provider with full detail. +type ProviderDetail struct { + Provider + + //--------------------------------------------------------------- + // The fields below are only set when requesting this specific + // module. They are available to easily know all available versions + // without multiple API calls. + + Versions []string `json:"versions"` // All versions +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/provider_list.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/provider_list.go new file mode 100644 index 00000000000..1dc7d237fd7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/provider_list.go @@ -0,0 +1,7 @@ +package response + +// ProviderList is the response structure for a pageable list of providers. +type ProviderList struct { + Meta PaginationMeta `json:"meta"` + Providers []*Provider `json:"providers"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/redirect.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/redirect.go new file mode 100644 index 00000000000..d5eb49ba665 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/redirect.go @@ -0,0 +1,6 @@ +package response + +// Redirect causes the frontend to perform a window redirect. +type Redirect struct { + URL string `json:"url"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go new file mode 100644 index 00000000000..c2c333b0dca --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go @@ -0,0 +1,95 @@ +package response + +import ( + "sort" + "strings" + + version "github.com/hashicorp/go-version" +) + +// TerraformProvider is the response structure for all required information for +// Terraform to choose a download URL. It must include all versions and all +// platforms for Terraform to perform version and os/arch constraint matching +// locally. +type TerraformProvider struct { + ID string `json:"id"` + + Versions []*TerraformProviderVersion `json:"versions"` +} + +// TerraformProviderVersion is the Terraform-specific response structure for a +// provider version. +type TerraformProviderVersion struct { + Version string `json:"version"` + Protocols []string `json:"protocols"` + + Platforms []*TerraformProviderPlatform `json:"platforms"` +} + +// TerraformProviderVersions is the Terraform-specific response structure for an +// array of provider versions +type TerraformProviderVersions struct { + ID string `json:"id"` + Versions []*TerraformProviderVersion `json:"versions"` + Warnings []string `json:"warnings"` +} + +// TerraformProviderPlatform is the Terraform-specific response structure for a +// provider platform. +type TerraformProviderPlatform struct { + OS string `json:"os"` + Arch string `json:"arch"` +} + +// TerraformProviderPlatformLocation is the Terraform-specific response +// structure for a provider platform with all details required to perform a +// download. +type TerraformProviderPlatformLocation struct { + Protocols []string `json:"protocols"` + OS string `json:"os"` + Arch string `json:"arch"` + Filename string `json:"filename"` + DownloadURL string `json:"download_url"` + ShasumsURL string `json:"shasums_url"` + ShasumsSignatureURL string `json:"shasums_signature_url"` + Shasum string `json:"shasum"` + + SigningKeys SigningKeyList `json:"signing_keys"` +} + +// SigningKeyList is the response structure for a list of signing keys. +type SigningKeyList struct { + GPGKeys []*GPGKey `json:"gpg_public_keys"` +} + +// GPGKey is the response structure for a GPG key. +type GPGKey struct { + ASCIIArmor string `json:"ascii_armor"` + Source string `json:"source"` + SourceURL *string `json:"source_url"` +} + +// Collection type for TerraformProviderVersion +type ProviderVersionCollection []*TerraformProviderVersion + +// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg +// keys in the response. +func (signingKeys *SigningKeyList) GPGASCIIArmor() string { + keys := []string{} + + for _, gpgKey := range signingKeys.GPGKeys { + keys = append(keys, gpgKey.ASCIIArmor) + } + + return strings.Join(keys, "\n") +} + +// Sort sorts versions from newest to oldest. +func (v ProviderVersionCollection) Sort() { + sort.Slice(v, func(i, j int) bool { + versionA, _ := version.NewVersion(v[i].Version) + versionB, _ := version.NewVersion(v[j].Version) + + return versionA.GreaterThan(versionB) + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go new file mode 100644 index 00000000000..7dd74ac7852 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go @@ -0,0 +1,3 @@ +// Package states contains the types that are used to represent Terraform +// states. +package states diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go new file mode 100644 index 00000000000..0dc73499a33 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go @@ -0,0 +1,35 @@ +// Code generated by "stringer -type EachMode"; DO NOT EDIT. + +package states + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NoEach-0] + _ = x[EachList-76] + _ = x[EachMap-77] +} + +const ( + _EachMode_name_0 = "NoEach" + _EachMode_name_1 = "EachListEachMap" +) + +var ( + _EachMode_index_1 = [...]uint8{0, 8, 15} +) + +func (i EachMode) String() string { + switch { + case i == 0: + return _EachMode_name_0 + case 76 <= i && i <= 77: + i -= 76 + return _EachMode_name_1[_EachMode_index_1[i]:_EachMode_index_1[i+1]] + default: + return "EachMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go new file mode 100644 index 00000000000..891adc003ce --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go @@ -0,0 +1,20 @@ +package states + +// Generation is used to represent multiple objects in a succession of objects +// represented by a single resource instance address. A resource instance can +// have multiple generations over its lifetime due to object replacement +// (when a change can't be applied without destroying and re-creating), and +// multiple generations can exist at the same time when create_before_destroy +// is used. +// +// A Generation value can either be the value of the variable "CurrentGen" or +// a value of type DeposedKey. Generation values can be compared for equality +// using "==" and used as map keys. The zero value of Generation (nil) is not +// a valid generation and must not be used. +type Generation interface { + generation() +} + +// CurrentGen is the Generation representing the currently-active object for +// a resource instance. +var CurrentGen Generation diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go new file mode 100644 index 00000000000..3bb717d332e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go @@ -0,0 +1,120 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// ResourceInstanceObject is the local representation of a specific remote +// object associated with a resource instance. In practice not all remote +// objects are actually remote in the sense of being accessed over the network, +// but this is the most common case. +// +// It is not valid to mutate a ResourceInstanceObject once it has been created. +// Instead, create a new object and replace the existing one. +type ResourceInstanceObject struct { + // Value is the object-typed value representing the remote object within + // Terraform. + Value cty.Value + + // Private is an opaque value set by the provider when this object was + // last created or updated. Terraform Core does not use this value in + // any way and it is not exposed anywhere in the user interface, so + // a provider can use it for retaining any necessary private state. + Private []byte + + // Status represents the "readiness" of the object as of the last time + // it was updated. + Status ObjectStatus + + // Dependencies is a set of other addresses in the same module which + // this instance depended on when the given attributes were evaluated. + // This is used to construct the dependency relationships for an object + // whose configuration is no longer available, such as if it has been + // removed from configuration altogether, or is now deposed. + Dependencies []addrs.Referenceable +} + +// ObjectStatus represents the status of a RemoteObject. +type ObjectStatus rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type ObjectStatus + +const ( + // ObjectReady is an object status for an object that is ready to use. + ObjectReady ObjectStatus = 'R' + + // ObjectTainted is an object status representing an object that is in + // an unrecoverable bad state due to a partial failure during a create, + // update, or delete operation. Since it cannot be moved into the + // ObjectRead state, a tainted object must be replaced. + ObjectTainted ObjectStatus = 'T' + + // ObjectPlanned is a special object status used only for the transient + // placeholder objects we place into state during the refresh and plan + // walks to stand in for objects that will be created during apply. + // + // Any object of this status must have a corresponding change recorded + // in the current plan, whose value must then be used in preference to + // the value stored in state when evaluating expressions. A planned + // object stored in state will be incomplete if any of its attributes are + // not yet known, and the plan must be consulted in order to "see" those + // unknown values, because the state is not able to represent them. + ObjectPlanned ObjectStatus = 'P' +) + +// Encode marshals the value within the receiver to produce a +// ResourceInstanceObjectSrc ready to be written to a state file. +// +// The given type must be the implied type of the resource type schema, and +// the given value must conform to it. It is important to pass the schema +// type and not the object's own type so that dynamically-typed attributes +// will be stored correctly. The caller must also provide the version number +// of the schema that the given type was derived from, which will be recorded +// in the source object so it can be used to detect when schema migration is +// required on read. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + // Our state serialization can't represent unknown values, so we convert + // them to nulls here. This is lossy, but nobody should be writing unknown + // values here and expecting to get them out again later. + // + // We get unknown values here while we're building out a "planned state" + // during the plan phase, but the value stored in the plan takes precedence + // for expression evaluation. The apply step should never produce unknown + // values, but if it does it's the responsibility of the caller to detect + // and raise an error about that. + val := cty.UnknownAsNull(o.Value) + + src, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + return &ResourceInstanceObjectSrc{ + SchemaVersion: schemaVersion, + AttrsJSON: src, + Private: o.Private, + Status: o.Status, + Dependencies: o.Dependencies, + }, nil +} + +// AsTainted returns a deep copy of the receiver with the status updated to +// ObjectTainted. +func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject { + if o == nil { + // A nil object can't be tainted, but we'll allow this anyway to + // avoid a crash, since we presumably intend to eventually record + // the object has having been deleted anyway. + return nil + } + ret := o.DeepCopy() + ret.Status = ObjectTainted + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go new file mode 100644 index 00000000000..728ad80d12e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go @@ -0,0 +1,113 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" +) + +// ResourceInstanceObjectSrc is a not-fully-decoded version of +// ResourceInstanceObject. Decoding of it can be completed by first handling +// any schema migration steps to get to the latest schema version and then +// calling method Decode with the implied type of the latest schema. +type ResourceInstanceObjectSrc struct { + // SchemaVersion is the resource-type-specific schema version number that + // was current when either AttrsJSON or AttrsFlat was encoded. Migration + // steps are required if this is less than the current version number + // reported by the corresponding provider. + SchemaVersion uint64 + + // AttrsJSON is a JSON-encoded representation of the object attributes, + // encoding the value (of the object type implied by the associated resource + // type schema) that represents this remote object in Terraform Language + // expressions, and is compared with configuration when producing a diff. + // + // This is retained in JSON format here because it may require preprocessing + // before decoding if, for example, the stored attributes are for an older + // schema version which the provider must upgrade before use. If the + // version is current, it is valid to simply decode this using the + // type implied by the current schema, without the need for the provider + // to perform an upgrade first. + // + // When writing a ResourceInstanceObject into the state, AttrsJSON should + // always be conformant to the current schema version and the current + // schema version should be recorded in the SchemaVersion field. + AttrsJSON []byte + + // AttrsFlat is a legacy form of attributes used in older state file + // formats, and in the new state format for objects that haven't yet been + // upgraded. This attribute is mutually exclusive with Attrs: for any + // ResourceInstanceObject, only one of these attributes may be populated + // and the other must be nil. + // + // An instance object with this field populated should be upgraded to use + // Attrs at the earliest opportunity, since this legacy flatmap-based + // format will be phased out over time. AttrsFlat should not be used when + // writing new or updated objects to state; instead, callers must follow + // the recommendations in the AttrsJSON documentation above. + AttrsFlat map[string]string + + // These fields all correspond to the fields of the same name on + // ResourceInstanceObject. + Private []byte + Status ObjectStatus + Dependencies []addrs.Referenceable +} + +// Decode unmarshals the raw representation of the object attributes. Pass the +// implied type of the corresponding resource type schema for correct operation. +// +// Before calling Decode, the caller must check that the SchemaVersion field +// exactly equals the version number of the schema whose implied type is being +// passed, or else the result is undefined. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) { + var val cty.Value + var err error + if os.AttrsFlat != nil { + // Legacy mode. We'll do our best to unpick this from the flatmap. + val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty) + if err != nil { + return nil, err + } + } else { + val, err = ctyjson.Unmarshal(os.AttrsJSON, ty) + if err != nil { + return nil, err + } + } + + return &ResourceInstanceObject{ + Value: val, + Status: os.Status, + Dependencies: os.Dependencies, + Private: os.Private, + }, nil +} + +// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the +// metadata from the receiver and writing in the given new schema version +// and attribute value that are presumed to have resulted from upgrading +// from an older schema version. +func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + new := os.DeepCopy() + new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap + + // This is the same principle as ResourceInstanceObject.Encode, but + // avoiding a decode/re-encode cycle because we don't have type info + // available for the "old" attributes. + newAttrs = cty.UnknownAsNull(newAttrs) + src, err := ctyjson.Marshal(newAttrs, newType) + if err != nil { + return nil, err + } + + new.AttrsJSON = src + new.SchemaVersion = newSchemaVersion + return new, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go new file mode 100644 index 00000000000..8c89278c5e6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go @@ -0,0 +1,285 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Module is a container for the states of objects within a particular module. +type Module struct { + Addr addrs.ModuleInstance + + // Resources contains the state for each resource. The keys in this map are + // an implementation detail and must not be used by outside callers. + Resources map[string]*Resource + + // OutputValues contains the state for each output value. The keys in this + // map are output value names. + OutputValues map[string]*OutputValue + + // LocalValues contains the value for each named output value. The keys + // in this map are local value names. + LocalValues map[string]cty.Value +} + +// NewModule constructs an empty module state for the given module address. +func NewModule(addr addrs.ModuleInstance) *Module { + return &Module{ + Addr: addr, + Resources: map[string]*Resource{}, + OutputValues: map[string]*OutputValue{}, + LocalValues: map[string]cty.Value{}, + } +} + +// Resource returns the state for the resource with the given address within +// the receiving module state, or nil if the requested resource is not tracked +// in the state. +func (ms *Module) Resource(addr addrs.Resource) *Resource { + return ms.Resources[addr.String()] +} + +// ResourceInstance returns the state for the resource instance with the given +// address within the receiving module state, or nil if the requested instance +// is not tracked in the state. +func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { + rs := ms.Resource(addr.Resource) + if rs == nil { + return nil + } + return rs.Instance(addr.Key) +} + +// SetResourceMeta updates the resource-level metadata for the resource +// with the given address, creating the resource state for it if it doesn't +// already exist. +func (ms *Module) SetResourceMeta(addr addrs.Resource, eachMode EachMode, provider addrs.AbsProviderConfig) { + rs := ms.Resource(addr) + if rs == nil { + rs = &Resource{ + Addr: addr, + Instances: map[addrs.InstanceKey]*ResourceInstance{}, + } + ms.Resources[addr.String()] = rs + } + + rs.EachMode = eachMode + rs.ProviderConfig = provider +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed. +func (ms *Module) RemoveResource(addr addrs.Resource) { + delete(ms.Resources, addr.String()) +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simulataneously +// updating the recorded provider configuration address, dependencies, and +// resource EachMode. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance will be removed altogether. +// +// The provider address and "each mode" are resource-wide settings and so they +// are updated for all other instances of the same resource as a side-effect of +// this call. +func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + + is.Current = obj + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + if obj != nil { + is.Deposed[key] = obj + } else { + delete(is.Deposed, key) + } + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceAll removes the record of all objects associated with +// the specified resource instance, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + delete(rs.Instances, addr.Key) + + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + is := rs.Instance(addr.Key) + if is == nil { + return + } + delete(is.Deposed, key) + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// deposeResourceInstanceObject is the real implementation of +// SyncState.DeposeResourceInstanceObject. +func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { + is := ms.ResourceInstance(addr) + if is == nil { + return NotDeposed + } + return is.deposeCurrentObject(forceKey) +} + +// maybeRestoreResourceInstanceDeposed is the real implementation of +// SyncState.MaybeRestoreResourceInstanceDeposed. +func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { + rs := ms.Resource(addr.Resource) + if rs == nil { + return false + } + is := rs.Instance(addr.Key) + if is == nil { + return false + } + if is.Current != nil { + return false + } + if len(is.Deposed) == 0 { + return false + } + is.Current = is.Deposed[key] + delete(is.Deposed, key) + return true +} + +// SetOutputValue writes an output value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { + os := &OutputValue{ + Value: value, + Sensitive: sensitive, + } + ms.OutputValues[name] = os + return os +} + +// RemoveOutputValue removes the output value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveOutputValue(name string) { + delete(ms.OutputValues, name) +} + +// SetLocalValue writes a local value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetLocalValue(name string, value cty.Value) { + ms.LocalValues[name] = value +} + +// RemoveLocalValue removes the local value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveLocalValue(name string) { + delete(ms.LocalValues, name) +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// You probably shouldn't call this! See the method of the same name on +// type State for more information on what this is for and the rare situations +// where it is safe to use. +func (ms *Module) PruneResourceHusks() { + for _, rs := range ms.Resources { + if len(rs.Instances) == 0 { + ms.RemoveResource(rs.Addr) + } + } +} + +// empty returns true if the receving module state is contributing nothing +// to the state. In other words, it returns true if the module could be +// removed from the state altogether without changing the meaning of the state. +// +// In practice a module containing no objects is the same as a non-existent +// module, and so we can opportunistically clean up once a module becomes +// empty on the assumption that it will be re-added if needed later. +func (ms *Module) empty() bool { + if ms == nil { + return true + } + + // This must be updated to cover any new collections added to Module + // in future. + return (len(ms.Resources) == 0 && + len(ms.OutputValues) == 0 && + len(ms.LocalValues) == 0) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go new file mode 100644 index 00000000000..96a6db2f4c4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT. + +package states + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ObjectReady-82] + _ = x[ObjectTainted-84] + _ = x[ObjectPlanned-80] +} + +const ( + _ObjectStatus_name_0 = "ObjectPlanned" + _ObjectStatus_name_1 = "ObjectReady" + _ObjectStatus_name_2 = "ObjectTainted" +) + +func (i ObjectStatus) String() string { + switch { + case i == 80: + return _ObjectStatus_name_0 + case i == 82: + return _ObjectStatus_name_1 + case i == 84: + return _ObjectStatus_name_2 + default: + return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go new file mode 100644 index 00000000000..d232b76d404 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go @@ -0,0 +1,14 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" +) + +// OutputValue represents the state of a particular output value. +// +// It is not valid to mutate an OutputValue object once it has been created. +// Instead, create an entirely new OutputValue to replace the previous one. +type OutputValue struct { + Value cty.Value + Sensitive bool +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go new file mode 100644 index 00000000000..aacdb9d2d1b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go @@ -0,0 +1,239 @@ +package states + +import ( + "fmt" + "math/rand" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Resource represents the state of a resource. +type Resource struct { + // Addr is the module-relative address for the resource this state object + // belongs to. + Addr addrs.Resource + + // EachMode is the multi-instance mode currently in use for this resource, + // or NoEach if this is a single-instance resource. This dictates what + // type of value is returned when accessing this resource via expressions + // in the Terraform language. + EachMode EachMode + + // Instances contains the potentially-multiple instances associated with + // this resource. This map can contain a mixture of different key types, + // but only the ones of InstanceKeyType are considered current. + Instances map[addrs.InstanceKey]*ResourceInstance + + // ProviderConfig is the absolute address for the provider configuration that + // most recently managed this resource. This is used to connect a resource + // with a provider configuration when the resource configuration block is + // not available, such as if it has been removed from configuration + // altogether. + ProviderConfig addrs.AbsProviderConfig +} + +// Instance returns the state for the instance with the given key, or nil +// if no such instance is tracked within the state. +func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { + return rs.Instances[key] +} + +// EnsureInstance returns the state for the instance with the given key, +// creating a new empty state for it if one doesn't already exist. +// +// Because this may create and save a new state, it is considered to be +// a write operation. +func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { + ret := rs.Instance(key) + if ret == nil { + ret = NewResourceInstance() + rs.Instances[key] = ret + } + return ret +} + +// ResourceInstance represents the state of a particular instance of a resource. +type ResourceInstance struct { + // Current, if non-nil, is the remote object that is currently represented + // by the corresponding resource instance. + Current *ResourceInstanceObjectSrc + + // Deposed, if len > 0, contains any remote objects that were previously + // represented by the corresponding resource instance but have been + // replaced and are pending destruction due to the create_before_destroy + // lifecycle mode. + Deposed map[DeposedKey]*ResourceInstanceObjectSrc +} + +// NewResourceInstance constructs and returns a new ResourceInstance, ready to +// use. +func NewResourceInstance() *ResourceInstance { + return &ResourceInstance{ + Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, + } +} + +// HasCurrent returns true if this resource instance has a "current"-generation +// object. Most instances do, but this can briefly be false during a +// create-before-destroy replace operation when the current has been deposed +// but its replacement has not yet been created. +func (i *ResourceInstance) HasCurrent() bool { + return i != nil && i.Current != nil +} + +// HasDeposed returns true if this resource instance has a deposed object +// with the given key. +func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { + return i != nil && i.Deposed[key] != nil +} + +// HasAnyDeposed returns true if this resource instance has one or more +// deposed objects. +func (i *ResourceInstance) HasAnyDeposed() bool { + return i != nil && len(i.Deposed) > 0 +} + +// HasObjects returns true if this resource has any objects at all, whether +// current or deposed. +func (i *ResourceInstance) HasObjects() bool { + return i.Current != nil || len(i.Deposed) != 0 +} + +// deposeCurrentObject is part of the real implementation of +// SyncState.DeposeResourceInstanceObject. The exported method uses a lock +// to ensure that we can safely allocate an unused deposed key without +// collision. +func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { + if !i.HasCurrent() { + return NotDeposed + } + + key := forceKey + if key == NotDeposed { + key = i.findUnusedDeposedKey() + } else { + if _, exists := i.Deposed[key]; exists { + panic(fmt.Sprintf("forced key %s is already in use", forceKey)) + } + } + i.Deposed[key] = i.Current + i.Current = nil + return key +} + +// GetGeneration retrieves the object of the given generation from the +// ResourceInstance, or returns nil if there is no such object. +// +// If the given generation is nil or invalid, this method will panic. +func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { + if gen == CurrentGen { + return i.Current + } + if dk, ok := gen.(DeposedKey); ok { + return i.Deposed[dk] + } + if gen == nil { + panic(fmt.Sprintf("get with nil Generation")) + } + // Should never fall out here, since the above covers all possible + // Generation values. + panic(fmt.Sprintf("get invalid Generation %#v", gen)) +} + +// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance at the time of the call. +// +// Note that the validity of this result may change if new deposed keys are +// allocated before it is used. To avoid this risk, instead use the +// DeposeResourceInstanceObject method on the SyncState wrapper type, which +// allocates a key and uses it atomically. +func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { + return i.findUnusedDeposedKey() +} + +// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance. +func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { + for { + key := NewDeposedKey() + if _, exists := i.Deposed[key]; !exists { + return key + } + // Spin until we find a unique one. This shouldn't take long, because + // we have a 32-bit keyspace and there's rarely more than one deposed + // instance. + } +} + +// EachMode specifies the multi-instance mode for a resource. +type EachMode rune + +const ( + NoEach EachMode = 0 + EachList EachMode = 'L' + EachMap EachMode = 'M' +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type EachMode + +func eachModeForInstanceKey(key addrs.InstanceKey) EachMode { + switch key.(type) { + case addrs.IntKey: + return EachList + case addrs.StringKey: + return EachMap + default: + if key == addrs.NoKey { + return NoEach + } + panic(fmt.Sprintf("don't know an each mode for instance key %#v", key)) + } +} + +// DeposedKey is a 8-character hex string used to uniquely identify deposed +// instance objects in the state. +type DeposedKey string + +// NotDeposed is a special invalid value of DeposedKey that is used to represent +// the absense of a deposed key. It must not be used as an actual deposed key. +const NotDeposed = DeposedKey("") + +var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) + +// NewDeposedKey generates a pseudo-random deposed key. Because of the short +// length of these keys, uniqueness is not a natural consequence and so the +// caller should test to see if the generated key is already in use and generate +// another if so, until a unique key is found. +func NewDeposedKey() DeposedKey { + v := deposedKeyRand.Uint32() + return DeposedKey(fmt.Sprintf("%08x", v)) +} + +func (k DeposedKey) String() string { + return string(k) +} + +func (k DeposedKey) GoString() string { + ks := string(k) + switch { + case ks == "": + return "states.NotDeposed" + default: + return fmt.Sprintf("states.DeposedKey(%s)", ks) + } +} + +// Generation is a helper method to convert a DeposedKey into a Generation. +// If the reciever is anything other than NotDeposed then the result is +// just the same value as a Generation. If the receiver is NotDeposed then +// the result is CurrentGen. +func (k DeposedKey) Generation() Generation { + if k == NotDeposed { + return CurrentGen + } + return k +} + +// generation is an implementation of Generation. +func (k DeposedKey) generation() {} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go new file mode 100644 index 00000000000..328dd53d5c6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go @@ -0,0 +1,229 @@ +package states + +import ( + "sort" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// State is the top-level type of a Terraform state. +// +// A state should be mutated only via its accessor methods, to ensure that +// invariants are preserved. +// +// Access to State and the nested values within it is not concurrency-safe, +// so when accessing a State object concurrently it is the caller's +// responsibility to ensure that only one write is in progress at a time +// and that reads only occur when no write is in progress. The most common +// way to acheive this is to wrap the State in a SyncState and use the +// higher-level atomic operations supported by that type. +type State struct { + // Modules contains the state for each module. The keys in this map are + // an implementation detail and must not be used by outside callers. + Modules map[string]*Module +} + +// NewState constructs a minimal empty state, containing an empty root module. +func NewState() *State { + modules := map[string]*Module{} + modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) + return &State{ + Modules: modules, + } +} + +// BuildState is a helper -- primarily intended for tests -- to build a state +// using imperative code against the StateSync type while still acting as +// an expression of type *State to assign into a containing struct. +func BuildState(cb func(*SyncState)) *State { + s := NewState() + cb(s.SyncWrapper()) + return s +} + +// Empty returns true if there are no resources or populated output values +// in the receiver. In other words, if this state could be safely replaced +// with the return value of NewState and be functionally equivalent. +func (s *State) Empty() bool { + if s == nil { + return true + } + for _, ms := range s.Modules { + if len(ms.Resources) != 0 { + return false + } + if len(ms.OutputValues) != 0 { + return false + } + } + return true +} + +// Module returns the state for the module with the given address, or nil if +// the requested module is not tracked in the state. +func (s *State) Module(addr addrs.ModuleInstance) *Module { + if s == nil { + panic("State.Module on nil *State") + } + return s.Modules[addr.String()] +} + +// RemoveModule removes the module with the given address from the state, +// unless it is the root module. The root module cannot be deleted, and so +// this method will panic if that is attempted. +// +// Removing a module implicitly discards all of the resources, outputs and +// local values within it, and so this should usually be done only for empty +// modules. For callers accessing the state through a SyncState wrapper, modules +// are automatically pruned if they are empty after one of their contained +// elements is removed. +func (s *State) RemoveModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + panic("attempted to remove root module") + } + + delete(s.Modules, addr.String()) +} + +// RootModule is a convenient alias for Module(addrs.RootModuleInstance). +func (s *State) RootModule() *Module { + if s == nil { + panic("RootModule called on nil State") + } + return s.Modules[addrs.RootModuleInstance.String()] +} + +// EnsureModule returns the state for the module with the given address, +// creating and adding a new one if necessary. +// +// Since this might modify the state to add a new instance, it is considered +// to be a write operation. +func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { + ms := s.Module(addr) + if ms == nil { + ms = NewModule(addr) + s.Modules[addr.String()] = ms + } + return ms +} + +// HasResources returns true if there is at least one resource (of any mode) +// present in the receiving state. +func (s *State) HasResources() bool { + if s == nil { + return false + } + for _, ms := range s.Modules { + if len(ms.Resources) > 0 { + return true + } + } + return false +} + +// Resource returns the state for the resource with the given address, or nil +// if no such resource is tracked in the state. +func (s *State) Resource(addr addrs.AbsResource) *Resource { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.Resource(addr.Resource) +} + +// ResourceInstance returns the state for the resource instance with the given +// address, or nil if no such resource is tracked in the state. +func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + if s == nil { + panic("State.ResourceInstance on nil *State") + } + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.ResourceInstance(addr.Resource) +} + +// OutputValue returns the state for the output value with the given address, +// or nil if no such output value is tracked in the state. +func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.OutputValues[addr.OutputValue.Name] +} + +// LocalValue returns the value of the named local value with the given address, +// or cty.NilVal if no such value is tracked in the state. +func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { + ms := s.Module(addr.Module) + if ms == nil { + return cty.NilVal + } + return ms.LocalValues[addr.LocalValue.Name] +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving state. +// +// The result is de-duplicated so that each distinct address appears only once. +func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { + if s == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, ms := range s.Modules { + for _, rc := range ms.Resources { + m[rc.ProviderConfig.String()] = rc.ProviderConfig + } + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// This should generally be used only after a "terraform destroy" operation, +// to finalize the cleanup of the state. It is not correct to use this after +// other operations because if a resource has "count = 0" or "for_each" over +// an empty collection then we want to retain it in the state so that references +// to it, particularly in "strange" contexts like "terraform console", can be +// properly resolved. +// +// This method MUST NOT be called concurrently with other readers and writers +// of the receiving state. +func (s *State) PruneResourceHusks() { + for _, m := range s.Modules { + m.PruneResourceHusks() + if len(m.Resources) == 0 && !m.Addr.IsRoot() { + s.RemoveModule(m.Addr) + } + } +} + +// SyncWrapper returns a SyncState object wrapping the receiver. +func (s *State) SyncWrapper() *SyncState { + return &SyncState{ + state: s, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go new file mode 100644 index 00000000000..6266aca79dc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go @@ -0,0 +1,221 @@ +package states + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" +) + +// Taking deep copies of states is an important operation because state is +// otherwise a mutable data structure that is challenging to share across +// many separate callers. It is important that the DeepCopy implementations +// in this file comprehensively copy all parts of the state data structure +// that could be mutated via pointers. + +// DeepCopy returns a new state that contains equivalent data to the reciever +// but shares no backing memory in common. +// +// As with all methods on State, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + modules := make(map[string]*Module, len(s.Modules)) + for k, m := range s.Modules { + modules[k] = m.DeepCopy() + } + return &State{ + Modules: modules, + } +} + +// DeepCopy returns a new module state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Module, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (ms *Module) DeepCopy() *Module { + if ms == nil { + return nil + } + + resources := make(map[string]*Resource, len(ms.Resources)) + for k, r := range ms.Resources { + resources[k] = r.DeepCopy() + } + outputValues := make(map[string]*OutputValue, len(ms.OutputValues)) + for k, v := range ms.OutputValues { + outputValues[k] = v.DeepCopy() + } + localValues := make(map[string]cty.Value, len(ms.LocalValues)) + for k, v := range ms.LocalValues { + // cty.Value is immutable, so we don't need to copy these. + localValues[k] = v + } + + return &Module{ + Addr: ms.Addr, // technically mutable, but immutable by convention + Resources: resources, + OutputValues: outputValues, + LocalValues: localValues, + } +} + +// DeepCopy returns a new resource state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Resource, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (rs *Resource) DeepCopy() *Resource { + if rs == nil { + return nil + } + + instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances)) + for k, i := range rs.Instances { + instances[k] = i.DeepCopy() + } + + return &Resource{ + Addr: rs.Addr, + EachMode: rs.EachMode, + Instances: instances, + ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention + } +} + +// DeepCopy returns a new resource instance state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstance, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (is *ResourceInstance) DeepCopy() *ResourceInstance { + if is == nil { + return nil + } + + deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed)) + for k, obj := range is.Deposed { + deposed[k] = obj.DeepCopy() + } + + return &ResourceInstance{ + Current: is.Current.DeepCopy(), + Deposed: deposed, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObjectSrc, this method is not safe to +// use concurrently with writing to any portion of the recieving data structure. +// It is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { + if obj == nil { + return nil + } + + var attrsFlat map[string]string + if obj.AttrsFlat != nil { + attrsFlat = make(map[string]string, len(obj.AttrsFlat)) + for k, v := range obj.AttrsFlat { + attrsFlat[k] = v + } + } + + var attrsJSON []byte + if obj.AttrsJSON != nil { + attrsJSON = make([]byte, len(obj.AttrsJSON)) + copy(attrsJSON, obj.AttrsJSON) + } + + var private []byte + if obj.Private != nil { + private = make([]byte, len(obj.Private)) + copy(private, obj.Private) + } + + // Some addrs.Referencable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + dependencies := make([]addrs.Referenceable, len(obj.Dependencies)) + copy(dependencies, obj.Dependencies) + + return &ResourceInstanceObjectSrc{ + Status: obj.Status, + SchemaVersion: obj.SchemaVersion, + Private: private, + AttrsFlat: attrsFlat, + AttrsJSON: attrsJSON, + Dependencies: dependencies, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObject, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { + if obj == nil { + return nil + } + + var private []byte + if obj.Private != nil { + private = make([]byte, len(obj.Private)) + copy(private, obj.Private) + } + + // Some addrs.Referenceable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + var dependencies []addrs.Referenceable + if obj.Dependencies != nil { + dependencies = make([]addrs.Referenceable, len(obj.Dependencies)) + copy(dependencies, obj.Dependencies) + } + + return &ResourceInstanceObject{ + Value: obj.Value, + Status: obj.Status, + Private: private, + Dependencies: dependencies, + } +} + +// DeepCopy returns a new output value state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on OutputValue, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (os *OutputValue) DeepCopy() *OutputValue { + if os == nil { + return nil + } + + return &OutputValue{ + Value: os.Value, + Sensitive: os.Sensitive, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go new file mode 100644 index 00000000000..ea20967e5b9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go @@ -0,0 +1,18 @@ +package states + +import ( + "reflect" +) + +// Equal returns true if the receiver is functionally equivalent to other, +// including any ephemeral portions of the state that would not be included +// if the state were saved to files. +// +// To test only the persistent portions of two states for equality, instead +// use statefile.StatesMarshalEqual. +func (s *State) Equal(other *State) bool { + // For the moment this is sufficient, but we may need to do something + // more elaborate in future if we have any portions of state that require + // more sophisticated comparisons. + return reflect.DeepEqual(s, other) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go new file mode 100644 index 00000000000..dffd650d6b0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go @@ -0,0 +1,279 @@ +package states + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" +) + +// String returns a rather-odd string representation of the entire state. +// +// This is intended to match the behavior of the older terraform.State.String +// method that is used in lots of existing tests. It should not be used in +// new tests: instead, use "cmp" to directly compare the state data structures +// and print out a diff if they do not match. +// +// This method should never be used in non-test code, whether directly by call +// or indirectly via a %s or %q verb in package fmt. +func (s *State) String() string { + if s == nil { + return "" + } + + // sort the modules by name for consistent output + modules := make([]string, 0, len(s.Modules)) + for m := range s.Modules { + modules = append(modules, m) + } + sort.Strings(modules) + + var buf bytes.Buffer + for _, name := range modules { + m := s.Modules[name] + mStr := m.testString() + + // If we're the root module, we just write the output directly. + if m.Addr.IsRoot() { + buf.WriteString(mStr + "\n") + continue + } + + // We need to build out a string that resembles the not-quite-standard + // format that terraform.State.String used to use, where there's a + // "module." prefix but then just a chain of all of the module names + // without any further "module." portions. + buf.WriteString("module") + for _, step := range m.Addr { + buf.WriteByte('.') + buf.WriteString(step.Name) + if step.InstanceKey != addrs.NoKey { + buf.WriteByte('[') + buf.WriteString(step.InstanceKey.String()) + buf.WriteByte(']') + } + } + buf.WriteString(":\n") + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// testString is used to produce part of the output of State.String. It should +// never be used directly. +func (m *Module) testString() string { + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + // We use AbsResourceInstance here, even though everything belongs to + // the same module, just because we have a sorting behavior defined + // for those but not for just ResourceInstance. + addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources)) + for _, rs := range m.Resources { + for ik := range rs.Instances { + addrsOrder = append(addrsOrder, rs.Addr.Instance(ik).Absolute(addrs.RootModuleInstance)) + } + } + + sort.Slice(addrsOrder, func(i, j int) bool { + return addrsOrder[i].Less(addrsOrder[j]) + }) + + for _, fakeAbsAddr := range addrsOrder { + addr := fakeAbsAddr.Resource + rs := m.Resource(addr.ContainingResource()) + is := m.ResourceInstance(addr) + + // Here we need to fake up a legacy-style address as the old state + // types would've used, since that's what our tests against those + // old types expect. The significant difference is that instancekey + // is dot-separated rather than using index brackets. + k := addr.ContainingResource().String() + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + k = fmt.Sprintf("%s.%d", k, tk) + default: + // No other key types existed for the legacy types, so we + // can do whatever we want here. We'll just use our standard + // syntax for these. + k = k + tk.String() + } + } + + id := LegacyInstanceObjectID(is.Current) + + taintStr := "" + if is.Current != nil && is.Current.Status == ObjectTainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(is.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String())) + + // Attributes were a flatmap before, but are not anymore. To preserve + // our old output as closely as possible we need to do a conversion + // to flatmap. Normally we'd want to do this with schema for + // accuracy, but for our purposes here it only needs to be approximate. + // This should produce an identical result for most cases, though + // in particular will differ in a few cases: + // - The keys used for elements in a set will be different + // - Values for attributes of type cty.DynamicPseudoType will be + // misinterpreted (but these weren't possible in old world anyway) + var attributes map[string]string + if obj := is.Current; obj != nil { + switch { + case obj.AttrsFlat != nil: + // Easy (but increasingly unlikely) case: the state hasn't + // actually been upgraded to the new form yet. + attributes = obj.AttrsFlat + case obj.AttrsJSON != nil: + ty, err := ctyjson.ImpliedType(obj.AttrsJSON) + if err == nil { + val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty) + if err == nil { + attributes = hcl2shim.FlatmapValueFromHCL2(val) + } + } + } + } + attrKeys := make([]string, 0, len(attributes)) + for ak, val := range attributes { + if ak == "id" { + continue + } + + // don't show empty containers in the output + if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + // CAUTION: Since deposed keys are now random strings instead of + // incrementing integers, this result will not be deterministic + // if there is more than one deposed object. + i := 1 + for _, t := range is.Deposed { + id := LegacyInstanceObjectID(t) + taintStr := "" + if t.Status == ObjectTainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr)) + i++ + } + + if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range obj.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) + } + } + } + + if len(m.OutputValues) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.OutputValues)) + for k := range m.OutputValues { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + v := m.OutputValues[k] + lv := hcl2shim.ConfigValueFromHCL2(v.Value) + switch vTyped := lv.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + default: + buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv)) + } + } + } + + return buf.String() +} + +// LegacyInstanceObjectID is a helper for extracting an object id value from +// an instance object in a way that approximates how we used to do this +// for the old state types. ID is no longer first-class, so this is preserved +// only for compatibility with old tests that include the id as part of their +// expected value. +func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string { + if obj == nil { + return "" + } + + if obj.AttrsJSON != nil { + type WithID struct { + ID string `json:"id"` + } + var withID WithID + err := json.Unmarshal(obj.AttrsJSON, &withID) + if err == nil { + return withID.ID + } + } else if obj.AttrsFlat != nil { + if flatID, exists := obj.AttrsFlat["id"]; exists { + return flatID + } + } + + // For resource types created after we removed id as special there may + // not actually be one at all. This is okay because older tests won't + // encounter this, and new tests shouldn't be using ids. + return "" +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go new file mode 100644 index 00000000000..042ce51c144 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go @@ -0,0 +1,62 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +const invalidFormat = "Invalid state file format" + +// jsonUnmarshalDiags is a helper that translates errors returned from +// json.Unmarshal into hopefully-more-helpful diagnostics messages. +func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if err == nil { + return diags + } + + switch tErr := err.(type) { + case *json.SyntaxError: + // We've usually already successfully parsed a source file as JSON at + // least once before we'd use jsonUnmarshalDiags with it (to sniff + // the version number) so this particular error should not appear much + // in practice. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + // This is likely to be the most common area, describing a + // non-conformance between the file and the expected file format + // at a semantic level. + if tErr.Field != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), + )) + break + } else { + // Without a field name, we can't really say anything helpful. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + "The state file does not conform to the expected JSON data structure.", + )) + } + default: + // Fallback for all other types of errors. This can happen only for + // custom UnmarshalJSON implementations, so should be encountered + // only rarely. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), + )) + } + + return diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go new file mode 100644 index 00000000000..625d0cf429a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go @@ -0,0 +1,3 @@ +// Package statefile deals with the file format used to serialize states for +// persistent storage and then deserialize them into memory again later. +package statefile diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go new file mode 100644 index 00000000000..9f7ae00e4c6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go @@ -0,0 +1,62 @@ +package statefile + +import ( + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +// File is the in-memory representation of a state file. It includes the state +// itself along with various metadata used to track changing state files for +// the same configuration over time. +type File struct { + // TerraformVersion is the version of Terraform that wrote this state file. + TerraformVersion *version.Version + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial uint64 + + // Lineage is set when a new, blank state file is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string + + // State is the actual state represented by this file. + State *states.State +} + +func New(state *states.State, lineage string, serial uint64) *File { + // To make life easier on callers, we'll accept a nil state here and just + // allocate an empty one, which is required for this file to be successfully + // written out. + if state == nil { + state = states.NewState() + } + + return &File{ + TerraformVersion: tfversion.SemVer, + State: state, + Lineage: lineage, + Serial: serial, + } +} + +// DeepCopy is a convenience method to create a new File object whose state +// is a deep copy of the receiver's, as implemented by states.State.DeepCopy. +func (f *File) DeepCopy() *File { + if f == nil { + return nil + } + return &File{ + TerraformVersion: f.TerraformVersion, + Serial: f.Serial, + Lineage: f.Lineage, + State: f.State.DeepCopy(), + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/marshal_equal.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/marshal_equal.go new file mode 100644 index 00000000000..41f485d17b5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/marshal_equal.go @@ -0,0 +1,40 @@ +package statefile + +import ( + "bytes" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// StatesMarshalEqual returns true if and only if the two given states have +// an identical (byte-for-byte) statefile representation. +// +// This function compares only the portions of the state that are persisted +// in state files, so for example it will not return false if the only +// differences between the two states are local values or descendent module +// outputs. +func StatesMarshalEqual(a, b *states.State) bool { + var aBuf bytes.Buffer + var bBuf bytes.Buffer + + // nil states are not valid states, and so they can never martial equal. + if a == nil || b == nil { + return false + } + + // We write here some temporary files that have no header information + // populated, thus ensuring that we're only comparing the state itself + // and not any metadata. + err := Write(&File{State: a}, &aBuf) + if err != nil { + // Should never happen, because we're writing to an in-memory buffer + panic(err) + } + err = Write(&File{State: b}, &bBuf) + if err != nil { + // Should never happen, because we're writing to an in-memory buffer + panic(err) + } + + return bytes.Equal(aBuf.Bytes(), bBuf.Bytes()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go new file mode 100644 index 00000000000..f1899cd228e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go @@ -0,0 +1,209 @@ +package statefile + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +// ErrNoState is returned by ReadState when the state file is empty. +var ErrNoState = errors.New("no state") + +// Read reads a state from the given reader. +// +// Legacy state format versions 1 through 3 are supported, but the result will +// contain object attributes in the deprecated "flatmap" format and so must +// be upgraded by the caller before use. +// +// If the state file is empty, the special error value ErrNoState is returned. +// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics +// potentially describing multiple errors. +func Read(r io.Reader) (*File, error) { + // Some callers provide us a "typed nil" *os.File here, which would + // cause us to panic below if we tried to use it. + if f, ok := r.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + var diags tfdiags.Diagnostics + + // We actually just buffer the whole thing in memory, because states are + // generally not huge and we need to do be able to sniff for a version + // number before full parsing. + src, err := ioutil.ReadAll(r) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read state file", + fmt.Sprintf("The state file could not be read: %s", err), + )) + return nil, diags.Err() + } + + if len(src) == 0 { + return nil, ErrNoState + } + + state, diags := readState(src) + if diags.HasErrors() { + return nil, diags.Err() + } + + if state == nil { + // Should never happen + panic("readState returned nil state with no errors") + } + + if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { + return state, fmt.Errorf( + "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", + state.TerraformVersion, + tfversion.SemVer, + state.TerraformVersion, + ) + } + + return state, diags.Err() +} + +func readState(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if looksLikeVersion0(src) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.", + )) + return nil, diags + } + + version, versionDiags := sniffJSONStateVersion(src) + diags = diags.Append(versionDiags) + if versionDiags.HasErrors() { + return nil, diags + } + + switch version { + case 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.", + )) + return nil, diags + case 1: + return readStateV1(src) + case 2: + return readStateV2(src) + case 3: + return readStateV3(src) + case 4: + return readStateV4(src) + default: + thisVersion := tfversion.SemVer.String() + creatingVersion := sniffJSONStateTerraformVersion(src) + switch { + case creatingVersion != "": + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion), + )) + } + return nil, diags + } +} + +func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + type VersionSniff struct { + Version *uint64 `json:"version"` + } + var sniff VersionSniff + err := json.Unmarshal(src, &sniff) + if err != nil { + switch tErr := err.(type) { + case *json.SyntaxError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file could not be parsed as JSON.", + )) + } + } + + if sniff.Version == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file does not have a \"version\" attribute, which is required to identify the format version.", + )) + return 0, diags + } + + return *sniff.Version, diags +} + +// sniffJSONStateTerraformVersion attempts to sniff the Terraform version +// specification from the given state file source code. The result is either +// a version string or an empty string if no version number could be extracted. +// +// This is a best-effort function intended to produce nicer error messages. It +// should not be used for any real processing. +func sniffJSONStateTerraformVersion(src []byte) string { + type VersionSniff struct { + Version string `json:"terraform_version"` + } + var sniff VersionSniff + + err := json.Unmarshal(src, &sniff) + if err != nil { + return "" + } + + // Attempt to parse the string as a version so we won't report garbage + // as a version number. + _, err = version.NewVersion(sniff.Version) + if err != nil { + return "" + } + + return sniff.Version +} + +// unsupportedFormat is a diagnostic summary message for when the state file +// seems to not be a state file at all, or is not a supported version. +// +// Use invalidFormat instead for the subtly-different case of "this looks like +// it's intended to be a state file but it's not structured correctly". +const unsupportedFormat = "Unsupported state file format" + +const upgradeFailed = "State format upgrade failed" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/testdata/roundtrip/v4-simple.out.tfstate b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/testdata/roundtrip/v4-simple.out.tfstate new file mode 120000 index 00000000000..d0e79c30a10 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/testdata/roundtrip/v4-simple.out.tfstate @@ -0,0 +1 @@ +v4-simple.in.tfstate \ No newline at end of file diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go new file mode 100644 index 00000000000..9b533317bd2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go @@ -0,0 +1,23 @@ +package statefile + +// looksLikeVersion0 sniffs for the signature indicating a version 0 state +// file. +// +// Version 0 was the number retroactively assigned to Terraform's initial +// (unversioned) binary state file format, which was later superseded by the +// version 1 format in JSON. +// +// Version 0 is no longer supported, so this is used only to detect it and +// return a nice error to the user. +func looksLikeVersion0(src []byte) bool { + // Version 0 files begin with the magic prefix "tfstate". + const magic = "tfstate" + if len(src) < len(magic) { + // Not even long enough to have the magic prefix + return false + } + if string(src[0:len(magic)]) == magic { + return true + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go new file mode 100644 index 00000000000..85b422ad22b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go @@ -0,0 +1,167 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV1 := &stateV1{} + err := json.Unmarshal(src, sV1) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV1(sV1) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2, err := upgradeStateV1ToV2(sV1) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV1 is a representation of the legacy JSON state format version 1. +// +// It is only used to read version 1 JSON files prior to upgrading them to +// the current format. +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that Terraform is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go new file mode 100644 index 00000000000..0b417e1c401 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go @@ -0,0 +1,172 @@ +package statefile + +import ( + "fmt" + "log" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) { + log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2") + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + + modules := make([]*moduleStateV2, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &stateV2{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) + } + + return &remoteStateV2{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root. + path = []string{"root"} + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*outputStateV2) + for key, output := range old.Outputs { + outputs[key] = &outputStateV2{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*resourceStateV2) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + + return &moduleStateV2{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + deposed := make([]*instanceStateV2, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &resourceStateV2{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &instanceStateV2{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Meta: newMeta, + }, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go new file mode 100644 index 00000000000..6d10166b2e6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go @@ -0,0 +1,204 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2 := &stateV2{} + err := json.Unmarshal(src, sV2) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3, err := upgradeStateV2ToV3(sV2) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 2. +// +// It is only used to read version 2 JSON files prior to upgrading them to +// the current format. +type stateV2 struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV2 `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *backendStateV2 `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV2 `json:"modules"` +} + +type remoteStateV2 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type outputStateV2 struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` +} + +type moduleStateV2 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*outputStateV2 `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV2 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` +} + +type resourceStateV2 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV2 `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*instanceStateV2 `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` +} + +type instanceStateV2 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` +} + +type backendStateV2 struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go new file mode 100644 index 00000000000..2d03c07c9dc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go @@ -0,0 +1,145 @@ +package statefile + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" +) + +func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) { + if old == nil { + return (*stateV3)(nil), nil + } + + var new *stateV3 + { + copy, err := copystructure.Config{Lock: true}.Copy(old) + if err != nil { + panic(err) + } + newWrongType := copy.(*stateV2) + newRightType := (stateV3)(*newWrongType) + new = &newRightType + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go new file mode 100644 index 00000000000..1c81e7169e5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go @@ -0,0 +1,50 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3 := &stateV3{} + err := json.Unmarshal(src, sV3) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4, err := upgradeStateV3ToV4(sV3) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 3. +// +// It is only used to read version 3 JSON files prior to upgrading them to +// the current format. +// +// The differences between version 2 and version 3 are only in the data and +// not in the structure, so stateV3 actually shares the same structs as +// stateV2. Type stateV3 represents that the data within is formatted as +// expected by the V3 format, rather than the V2 format. +type stateV3 stateV2 diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go new file mode 100644 index 00000000000..f08a62b2d59 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go @@ -0,0 +1,444 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { + + if old.Serial < 0 { + // The new format is using uint64 here, which should be fine for any + // real state (we only used positive integers in practice) but we'll + // catch this explicitly here to avoid weird behavior if a state file + // has been tampered with in some way. + return nil, fmt.Errorf("state has serial less than zero, which is invalid") + } + + new := &stateV4{ + TerraformVersion: old.TFVersion, + Serial: uint64(old.Serial), + Lineage: old.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + if new.TerraformVersion == "" { + // Older formats considered this to be optional, but now it's required + // and so we'll stub it out with something that's definitely older + // than the version that really created this state. + new.TerraformVersion = "0.0.0" + } + + for _, msOld := range old.Modules { + if len(msOld.Path) < 1 || msOld.Path[0] != "root" { + return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path) + } + + // Convert legacy-style module address into our newer address type. + // Since these old formats are only generated by versions of Terraform + // that don't support count and for_each on modules, we can just assume + // all of the modules are unkeyed. + moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1) + for i, name := range msOld.Path[1:] { + moduleAddr[i] = addrs.ModuleInstanceStep{ + Name: name, + InstanceKey: addrs.NoKey, + } + } + + // In a v3 state file, a "resource state" is actually an instance + // state, so we need to fill in a missing level of heirarchy here + // by lazily creating resource states as we encounter them. + // We'll track them in here, keyed on the string representation of + // the resource address. + resourceStates := map[string]*resourceStateV4{} + + for legacyAddr, rsOld := range msOld.Resources { + instAddr, err := parseLegacyResourceAddress(legacyAddr) + if err != nil { + return nil, err + } + + resAddr := instAddr.Resource + rs, exists := resourceStates[resAddr.String()] + if !exists { + var modeStr string + switch resAddr.Mode { + case addrs.ManagedResourceMode: + modeStr = "managed" + case addrs.DataResourceMode: + modeStr = "data" + default: + return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode %#v", resAddr, resAddr.Mode) + } + + // In state versions prior to 4 we allowed each instance of a + // resource to have its own provider configuration address, + // which makes no real sense in practice because providers + // are associated with resources in the configuration. We + // elevate that to the resource level during this upgrade, + // implicitly taking the provider address of the first instance + // we encounter for each resource. While this is lossy in + // theory, in practice there is no reason for these values to + // differ between instances. + var providerAddr addrs.AbsProviderConfig + oldProviderAddr := rsOld.Provider + if strings.Contains(oldProviderAddr, "provider.") { + // Smells like a new-style provider address, but we'll test it. + var diags tfdiags.Diagnostics + providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr) + if diags.HasErrors() { + return nil, fmt.Errorf("invalid provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) + } + } else { + // Smells like an old-style module-local provider address, + // which we'll need to migrate. We'll assume it's referring + // to the same module the resource is in, which might be + // incorrect but it'll get fixed up next time any updates + // are made to an instance. + if oldProviderAddr != "" { + localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr) + if diags.HasErrors() { + return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) + } + providerAddr = localAddr.Absolute(moduleAddr) + } else { + providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr) + } + } + + rs = &resourceStateV4{ + Module: moduleAddr.String(), + Mode: modeStr, + Type: resAddr.Type, + Name: resAddr.Name, + Instances: []instanceObjectStateV4{}, + ProviderConfig: providerAddr.String(), + } + resourceStates[resAddr.String()] = rs + } + + // Now we'll deal with the instance itself, which may either be + // the first instance in a resource we just created or an additional + // instance for a resource added on a prior loop. + instKey := instAddr.Key + if isOld := rsOld.Primary; isOld != nil { + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed) + if err != nil { + return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + for i, isOld := range rsOld.Deposed { + // When we migrate old instances we'll use sequential deposed + // keys just so that the upgrade result is deterministic. New + // deposed keys allocated moving forward will be pseudorandomly + // selected, but we check for collisions and so these + // non-random ones won't hurt. + deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1)) + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey) + if err != nil { + return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + + if instKey != addrs.NoKey && rs.EachMode == "" { + rs.EachMode = "list" + } + } + + for _, rs := range resourceStates { + new.Resources = append(new.Resources, *rs) + } + + if len(msOld.Path) == 1 && msOld.Path[0] == "root" { + // We'll migrate the outputs for this module too, then. + for name, oldOS := range msOld.Outputs { + newOS := outputStateV4{ + Sensitive: oldOS.Sensitive, + } + + valRaw := oldOS.Value + valSrc, err := json.Marshal(valRaw) + if err != nil { + // Should never happen, because this value came from JSON + // in the first place and so we're just round-tripping here. + return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err) + } + + // The "type" field in state V2 wasn't really that useful + // since it was only able to capture string vs. list vs. map. + // For this reason, during upgrade we'll just discard it + // altogether and use cty's idea of the implied type of + // turning our old value into JSON. + ty, err := ctyjson.ImpliedType(valSrc) + if err != nil { + // REALLY should never happen, because we literally just + // encoded this as JSON above! + return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err) + } + + // ImpliedType tends to produce structural types, but since older + // version of Terraform didn't support those a collection type + // is probably what was intended, so we'll see if we can + // interpret our value as one. + ty = simplifyImpliedValueType(ty) + + tySrc, err := ctyjson.MarshalType(ty) + if err != nil { + return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err) + } + + newOS.ValueRaw = json.RawMessage(valSrc) + newOS.ValueTypeRaw = json.RawMessage(tySrc) + + new.RootOutputs[name] = newOS + } + } + } + + new.normalize() + + return new, nil +} + +func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) { + + // Schema versions were, in prior formats, a private concern of the provider + // SDK, and not a first-class concept in the state format. Here we're + // sniffing for the pre-0.12 SDK's way of representing schema versions + // and promoting it to our first-class field if we find it. We'll ignore + // it if it doesn't look like what the SDK would've written. If this + // sniffing fails then we'll assume schema version 0. + var schemaVersion uint64 + migratedSchemaVersion := false + if raw, exists := isOld.Meta["schema_version"]; exists { + switch tv := raw.(type) { + case string: + v, err := strconv.ParseUint(tv, 10, 64) + if err == nil { + schemaVersion = v + migratedSchemaVersion = true + } + case int: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + case float64: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + } + } + + private := map[string]interface{}{} + for k, v := range isOld.Meta { + if k == "schema_version" && migratedSchemaVersion { + // We're gonna promote this into our first-class schema version field + continue + } + private[k] = v + } + var privateJSON []byte + if len(private) != 0 { + var err error + privateJSON, err = json.Marshal(private) + if err != nil { + // This shouldn't happen, because the Meta values all came from JSON + // originally anyway. + return nil, fmt.Errorf("cannot serialize private instance object data: %s", err) + } + } + + var status string + if isOld.Tainted { + status = "tainted" + } + + var instKeyRaw interface{} + switch tk := instKey.(type) { + case addrs.IntKey: + instKeyRaw = int(tk) + case addrs.StringKey: + instKeyRaw = string(tk) + default: + if instKeyRaw != nil { + return nil, fmt.Errorf("unsupported instance key: %#v", instKey) + } + } + + var attributes map[string]string + if isOld.Attributes != nil { + attributes = make(map[string]string, len(isOld.Attributes)) + for k, v := range isOld.Attributes { + attributes[k] = v + } + } + if isOld.ID != "" { + // As a special case, if we don't already have an "id" attribute and + // yet there's a non-empty first-class ID on the old object then we'll + // create a synthetic id attribute to avoid losing that first-class id. + // In practice this generally arises only in tests where state literals + // are hand-written in a non-standard way; real code prior to 0.12 + // would always force the first-class ID to be copied into the + // id attribute before storing. + if attributes == nil { + attributes = make(map[string]string, len(isOld.Attributes)) + } + if idVal := attributes["id"]; idVal == "" { + attributes["id"] = isOld.ID + } + } + + dependencies := make([]string, len(rsOld.Dependencies)) + for i, v := range rsOld.Dependencies { + depStr, err := parseLegacyDependency(v) + if err != nil { + return nil, fmt.Errorf("invalid dependency reference %q: %s", v, err) + } + dependencies[i] = depStr + } + + return &instanceObjectStateV4{ + IndexKey: instKeyRaw, + Status: status, + Deposed: string(deposedKey), + AttributesFlat: attributes, + Dependencies: dependencies, + SchemaVersion: schemaVersion, + PrivateRaw: privateJSON, + }, nil +} + +// parseLegacyResourceAddress parses the different identifier format used +// state formats before version 4, like "instance.name.0". +func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) { + var ret addrs.ResourceInstance + + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + ret.Resource.Mode = addrs.ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + ret.Resource.Mode = addrs.DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + ret.Resource.Type = parts[0] + ret.Resource.Name = parts[1] + ret.Key = addrs.NoKey + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return ret, fmt.Errorf("error parsing resource address %q: %s", s, err) + } + + ret.Key = addrs.IntKey(idx) + } + + return ret, nil +} + +// simplifyImpliedValueType attempts to heuristically simplify a value type +// derived from a legacy stored output value into something simpler that +// is closer to what would've fitted into the pre-v0.12 value type system. +func simplifyImpliedValueType(ty cty.Type) cty.Type { + switch { + case ty.IsTupleType(): + // If all of the element types are the same then we'll make this + // a list instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyTuple) { + // Don't know what the element type would be, then. + return ty + } + + etys := ty.TupleElementTypes() + ety := etys[0] + for _, other := range etys[1:] { + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.List(ety) + + case ty.IsObjectType(): + // If all of the attribute types are the same then we'll make this + // a map instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyObject) { + // Don't know what the element type would be, then. + return ty + } + + atys := ty.AttributeTypes() + var ety cty.Type + for _, other := range atys { + if ety == cty.NilType { + ety = other + continue + } + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.Map(ety) + + default: + // No other normalizations are possible + return ty + } +} + +func parseLegacyDependency(s string) (string, error) { + parts := strings.Split(s, ".") + ret := parts[0] + for _, part := range parts[1:] { + if part == "*" { + break + } + if i, err := strconv.Atoi(part); err == nil { + ret = ret + fmt.Sprintf("[%d]", i) + break + } + ret = ret + "." + part + } + + // The result must parse as a reference, or else we'll create an invalid + // state file. + var diags tfdiags.Diagnostics + _, diags = addrs.ParseRefStr(ret) + if diags.HasErrors() { + return "", diags.Err() + } + + return ret, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go new file mode 100644 index 00000000000..164b57f827f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go @@ -0,0 +1,604 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + version "github.com/hashicorp/go-version" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4 := &stateV4{} + err := json.Unmarshal(src, sV4) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var tfVersion *version.Version + if sV4.TerraformVersion != "" { + var err error + tfVersion, err = version.NewVersion(sV4.TerraformVersion) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid Terraform version string", + fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion), + )) + } + } + + file := &File{ + TerraformVersion: tfVersion, + Serial: sV4.Serial, + Lineage: sV4.Lineage, + } + + state := states.NewState() + + for _, rsV4 := range sV4.Resources { + rAddr := addrs.Resource{ + Type: rsV4.Type, + Name: rsV4.Name, + } + switch rsV4.Mode { + case "managed": + rAddr.Mode = addrs.ManagedResourceMode + case "data": + rAddr.Mode = addrs.DataResourceMode + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource mode in state", + fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name), + )) + continue + } + + moduleAddr := addrs.RootModuleInstance + if rsV4.Module != "" { + var addrDiags tfdiags.Diagnostics + moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + } + + providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig) + diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + + var eachMode states.EachMode + switch rsV4.EachMode { + case "": + eachMode = states.NoEach + case "list": + eachMode = states.EachList + case "map": + eachMode = states.EachMap + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource metadata in state", + fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode), + )) + continue + } + + ms := state.EnsureModule(moduleAddr) + + // Ensure the resource container object is present in the state. + ms.SetResourceMeta(rAddr, eachMode, providerAddr) + + for _, isV4 := range rsV4.Instances { + keyRaw := isV4.IndexKey + var key addrs.InstanceKey + switch tk := keyRaw.(type) { + case int: + key = addrs.IntKey(tk) + case float64: + // Since JSON only has one number type, reading from encoding/json + // gives us a float64 here even if the number is whole. + // float64 has a smaller integer range than int, but in practice + // we rarely have more than a few tens of instances and so + // it's unlikely that we'll exhaust the 52 bits in a float64. + key = addrs.IntKey(int(tk)) + case string: + key = addrs.StringKey(tk) + default: + if keyRaw != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw), + )) + continue + } + key = addrs.NoKey + } + + instAddr := rAddr.Instance(key) + + obj := &states.ResourceInstanceObjectSrc{ + SchemaVersion: isV4.SchemaVersion, + } + + { + // Instance attributes + switch { + case isV4.AttributesRaw != nil: + obj.AttrsJSON = isV4.AttributesRaw + case isV4.AttributesFlat != nil: + obj.AttrsFlat = isV4.AttributesFlat + default: + // This is odd, but we'll accept it and just treat the + // object has being empty. In practice this should arise + // only from the contrived sort of state objects we tend + // to hand-write inline in tests. + obj.AttrsJSON = []byte{'{', '}'} + } + } + + { + // Status + raw := isV4.Status + switch raw { + case "": + obj.Status = states.ObjectReady + case "tainted": + obj.Status = states.ObjectTainted + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw), + )) + continue + } + } + + if raw := isV4.PrivateRaw; len(raw) > 0 { + obj.Private = raw + } + + { + depsRaw := isV4.Dependencies + deps := make([]addrs.Referenceable, 0, len(depsRaw)) + for _, depRaw := range depsRaw { + ref, refDiags := addrs.ParseRefStr(depRaw) + diags = diags.Append(refDiags) + if refDiags.HasErrors() { + continue + } + if len(ref.Remaining) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw), + )) + } + if ref.Subject == nil { + // Should never happen + panic(fmt.Sprintf("parsing dependency %q for instance %s returned a nil address", depRaw, instAddr.Absolute(moduleAddr))) + } + deps = append(deps, ref.Subject) + } + obj.Dependencies = deps + } + + switch { + case isV4.Deposed != "": + dk := states.DeposedKey(isV4.Deposed) + if len(dk) != 8 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed), + )) + continue + } + is := ms.ResourceInstance(instAddr) + if is.HasDeposed(dk) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk), + )) + continue + } + + ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr) + default: + is := ms.ResourceInstance(instAddr) + if is.HasCurrent() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)), + )) + continue + } + + ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr) + } + } + + // We repeat this after creating the instances because + // SetResourceInstanceCurrent automatically resets this metadata based + // on the incoming objects. That behavior is useful when we're making + // piecemeal updates to the state during an apply, but when we're + // reading the state file we want to reflect its contents exactly. + ms.SetResourceMeta(rAddr, eachMode, providerAddr) + } + + // The root module is special in that we persist its attributes and thus + // need to reload them now. (For descendent modules we just re-calculate + // them based on the latest configuration on each run.) + { + rootModule := state.RootModule() + for name, fos := range sV4.RootOutputs { + os := &states.OutputValue{} + os.Sensitive = fos.Sensitive + + ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw)) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value type in state", + fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err), + )) + continue + } + + val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value saved in state", + fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err), + )) + continue + } + + os.Value = val + rootModule.OutputValues[name] = os + } + } + + file.State = state + return file, diags +} + +func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics { + // Here we'll convert back from the "File" representation to our + // stateV4 struct representation and write that. + // + // While we support legacy state formats for reading, we only support the + // latest for writing and so if a V5 is added in future then this function + // should be deleted and replaced with a writeStateV5, even though the + // read/prepare V4 functions above would stick around. + + var diags tfdiags.Diagnostics + if file == nil || file.State == nil { + panic("attempt to write nil state to file") + } + + var terraformVersion string + if file.TerraformVersion != nil { + terraformVersion = file.TerraformVersion.String() + } + + sV4 := &stateV4{ + TerraformVersion: terraformVersion, + Serial: file.Serial, + Lineage: file.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + for name, os := range file.State.RootModule().OutputValues { + src, err := ctyjson.Marshal(os.Value, os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err), + )) + continue + } + + typeSrc, err := ctyjson.MarshalType(os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err), + )) + continue + } + + sV4.RootOutputs[name] = outputStateV4{ + Sensitive: os.Sensitive, + ValueRaw: json.RawMessage(src), + ValueTypeRaw: json.RawMessage(typeSrc), + } + } + + for _, ms := range file.State.Modules { + moduleAddr := ms.Addr + for _, rs := range ms.Resources { + resourceAddr := rs.Addr + + var mode string + switch resourceAddr.Mode { + case addrs.ManagedResourceMode: + mode = "managed" + case addrs.DataResourceMode: + mode = "data" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource in state", + fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode), + )) + continue + } + + var eachMode string + switch rs.EachMode { + case states.NoEach: + eachMode = "" + case states.EachList: + eachMode = "list" + case states.EachMap: + eachMode = "map" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource in state", + fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode), + )) + continue + } + + sV4.Resources = append(sV4.Resources, resourceStateV4{ + Module: moduleAddr.String(), + Mode: mode, + Type: resourceAddr.Type, + Name: resourceAddr.Name, + EachMode: eachMode, + ProviderConfig: rs.ProviderConfig.String(), + Instances: []instanceObjectStateV4{}, + }) + rsV4 := &(sV4.Resources[len(sV4.Resources)-1]) + + for key, is := range rs.Instances { + if is.HasCurrent() { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, is.Current, states.NotDeposed, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + for dk, obj := range is.Deposed { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, obj, dk, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + } + } + } + + sV4.normalize() + + src, err := json.MarshalIndent(sV4, "", " ") + if err != nil { + // Shouldn't happen if we do our conversion to *stateV4 correctly above. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize state", + fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err), + )) + return diags + } + src = append(src, '\n') + + _, err = w.Write(src) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write state", + fmt.Sprintf("An error occured while writing the serialized state: %s.", err), + )) + return diags + } + + return diags +} + +func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var status string + switch obj.Status { + case states.ObjectReady: + status = "" + case states.ObjectTainted: + status = "tainted" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status), + )) + } + + var privateRaw []byte + if len(obj.Private) > 0 { + privateRaw = obj.Private + } + + deps := make([]string, len(obj.Dependencies)) + for i, depAddr := range obj.Dependencies { + deps[i] = depAddr.String() + } + + var rawKey interface{} + switch tk := key.(type) { + case addrs.IntKey: + rawKey = int(tk) + case addrs.StringKey: + rawKey = string(tk) + default: + if key != addrs.NoKey { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key), + )) + } + } + + return append(isV4s, instanceObjectStateV4{ + IndexKey: rawKey, + Deposed: string(deposed), + Status: status, + SchemaVersion: obj.SchemaVersion, + AttributesFlat: obj.AttrsFlat, + AttributesRaw: obj.AttrsJSON, + PrivateRaw: privateRaw, + Dependencies: deps, + }), diags +} + +type stateV4 struct { + Version stateVersionV4 `json:"version"` + TerraformVersion string `json:"terraform_version"` + Serial uint64 `json:"serial"` + Lineage string `json:"lineage"` + RootOutputs map[string]outputStateV4 `json:"outputs"` + Resources []resourceStateV4 `json:"resources"` +} + +// normalize makes some in-place changes to normalize the way items are +// stored to ensure that two functionally-equivalent states will be stored +// identically. +func (s *stateV4) normalize() { + sort.Stable(sortResourcesV4(s.Resources)) + for _, rs := range s.Resources { + sort.Stable(sortInstancesV4(rs.Instances)) + } +} + +type outputStateV4 struct { + ValueRaw json.RawMessage `json:"value"` + ValueTypeRaw json.RawMessage `json:"type"` + Sensitive bool `json:"sensitive,omitempty"` +} + +type resourceStateV4 struct { + Module string `json:"module,omitempty"` + Mode string `json:"mode"` + Type string `json:"type"` + Name string `json:"name"` + EachMode string `json:"each,omitempty"` + ProviderConfig string `json:"provider"` + Instances []instanceObjectStateV4 `json:"instances"` +} + +type instanceObjectStateV4 struct { + IndexKey interface{} `json:"index_key,omitempty"` + Status string `json:"status,omitempty"` + Deposed string `json:"deposed,omitempty"` + + SchemaVersion uint64 `json:"schema_version"` + AttributesRaw json.RawMessage `json:"attributes,omitempty"` + AttributesFlat map[string]string `json:"attributes_flat,omitempty"` + + PrivateRaw []byte `json:"private,omitempty"` + + Dependencies []string `json:"depends_on,omitempty"` +} + +// stateVersionV4 is a weird special type we use to produce our hard-coded +// "version": 4 in the JSON serialization. +type stateVersionV4 struct{} + +func (sv stateVersionV4) MarshalJSON() ([]byte, error) { + return []byte{'4'}, nil +} + +func (sv stateVersionV4) UnmarshalJSON([]byte) error { + // Nothing to do: we already know we're version 4 + return nil +} + +type sortResourcesV4 []resourceStateV4 + +func (sr sortResourcesV4) Len() int { return len(sr) } +func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] } +func (sr sortResourcesV4) Less(i, j int) bool { + switch { + case sr[i].Mode != sr[j].Mode: + return sr[i].Mode < sr[j].Mode + case sr[i].Type != sr[j].Type: + return sr[i].Type < sr[j].Type + case sr[i].Name != sr[j].Name: + return sr[i].Name < sr[j].Name + default: + return false + } +} + +type sortInstancesV4 []instanceObjectStateV4 + +func (si sortInstancesV4) Len() int { return len(si) } +func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] } +func (si sortInstancesV4) Less(i, j int) bool { + ki := si[i].IndexKey + kj := si[j].IndexKey + if ki != kj { + if (ki == nil) != (kj == nil) { + return ki == nil + } + if kii, isInt := ki.(int); isInt { + if kji, isInt := kj.(int); isInt { + return kii < kji + } + return true + } + if kis, isStr := ki.(string); isStr { + if kjs, isStr := kj.(string); isStr { + return kis < kjs + } + return true + } + } + if si[i].Deposed != si[j].Deposed { + return si[i].Deposed < si[j].Deposed + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go new file mode 100644 index 00000000000..8fdca45803a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go @@ -0,0 +1,17 @@ +package statefile + +import ( + "io" + + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +// Write writes the given state to the given writer in the current state +// serialization format. +func Write(s *File, w io.Writer) error { + // Always record the current terraform version in the state. + s.TerraformVersion = tfversion.SemVer + + diags := writeStateV4(s, w) + return diags.Err() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go new file mode 100644 index 00000000000..8675efc3c83 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go @@ -0,0 +1,537 @@ +package states + +import ( + "log" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" +) + +// SyncState is a wrapper around State that provides concurrency-safe access to +// various common operations that occur during a Terraform graph walk, or other +// similar concurrent contexts. +// +// When a SyncState wrapper is in use, no concurrent direct access to the +// underlying objects is permitted unless the caller first acquires an explicit +// lock, using the Lock and Unlock methods. Most callers should _not_ +// explicitly lock, and should instead use the other methods of this type that +// handle locking automatically. +// +// Since SyncState is able to safely consolidate multiple updates into a single +// atomic operation, many of its methods are at a higher level than those +// of the underlying types, and operate on the state as a whole rather than +// on individual sub-structures of the state. +// +// SyncState can only protect against races within its own methods. It cannot +// provide any guarantees about the order in which concurrent operations will +// be processed, so callers may still need to employ higher-level techniques +// for ensuring correct operation sequencing, such as building and walking +// a dependency graph. +type SyncState struct { + state *State + lock sync.RWMutex +} + +// Module returns a snapshot of the state of the module instance with the given +// address, or nil if no such module is tracked. +// +// The return value is a pointer to a copy of the module state, which the +// caller may then freely access and mutate. However, since the module state +// tends to be a large data structure with many child objects, where possible +// callers should prefer to use a more granular accessor to access a child +// module directly, and thus reduce the amount of copying required. +func (s *SyncState) Module(addr addrs.ModuleInstance) *Module { + s.lock.RLock() + ret := s.state.Module(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// RemoveModule removes the entire state for the given module, taking with +// it any resources associated with the module. This should generally be +// called only for modules whose resources have all been destroyed, but +// that is not enforced by this method. +func (s *SyncState) RemoveModule(addr addrs.ModuleInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.RemoveModule(addr) +} + +// OutputValue returns a snapshot of the state of the output value with the +// given address, or nil if no such output value is tracked. +// +// The return value is a pointer to a copy of the output value state, which the +// caller may then freely access and mutate. +func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + s.lock.RLock() + ret := s.state.OutputValue(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// SetOutputValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the output is not yet tracked in state then it +// be added as a side-effect. +func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetOutputValue(addr.OutputValue.Name, value, sensitive) +} + +// RemoveOutputValue removes the stored value for the output value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveOutputValue(addr.OutputValue.Name) + s.maybePruneModule(addr.Module) +} + +// LocalValue returns the current value associated with the given local value +// address. +func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value { + s.lock.RLock() + // cty.Value is immutable, so we don't need any extra copying here. + ret := s.state.LocalValue(addr) + s.lock.RUnlock() + return ret +} + +// SetLocalValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the local value is not yet tracked in state then it +// will be added as a side-effect. +func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetLocalValue(addr.LocalValue.Name, value) +} + +// RemoveLocalValue removes the stored value for the local value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveLocalValue(addr.LocalValue.Name) + s.maybePruneModule(addr.Module) +} + +// Resource returns a snapshot of the state of the resource with the given +// address, or nil if no such resource is tracked. +// +// The return value is a pointer to a copy of the resource state, which the +// caller may then freely access and mutate. +func (s *SyncState) Resource(addr addrs.AbsResource) *Resource { + s.lock.RLock() + ret := s.state.Resource(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstance returns a snapshot of the state the resource instance with +// the given address, or nil if no such instance is tracked. +// +// The return value is a pointer to a copy of the instance state, which the +// caller may then freely access and mutate. +func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + s.lock.RLock() + ret := s.state.ResourceInstance(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstanceObject returns a snapshot of the current instance object +// of the given generation belonging to the instance with the given address, +// or nil if no such object is tracked.. +// +// The return value is a pointer to a copy of the object, which the caller may +// then freely access and mutate. +func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc { + s.lock.RLock() + defer s.lock.RUnlock() + + inst := s.state.ResourceInstance(addr) + if inst == nil { + return nil + } + return inst.GetGeneration(gen).DeepCopy() +} + +// SetResourceMeta updates the resource-level metadata for the resource at +// the given address, creating the containing module state and resource state +// as a side-effect if not already present. +func (s *SyncState) SetResourceMeta(addr addrs.AbsResource, eachMode EachMode, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceMeta(addr.Resource, eachMode, provider) +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed, +// but that is not enforced by this method. (Use RemoveResourceIfEmpty instead +// to safely check first.) +func (s *SyncState) RemoveResource(addr addrs.AbsResource) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.RemoveResource(addr.Resource) + s.maybePruneModule(addr.Module) +} + +// RemoveResourceIfEmpty is similar to RemoveResource but first checks to +// make sure there are no instances or objects left in the resource. +// +// Returns true if the resource was removed, or false if remaining child +// objects prevented its removal. Returns true also if the resource was +// already absent, and thus no action needed to be taken. +func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return true // nothing to do + } + rs := ms.Resource(addr.Resource) + if rs == nil { + return true // nothing to do + } + if len(rs.Instances) != 0 { + // We don't check here for the possibility of instances that exist + // but don't have any objects because it's the responsibility of the + // instance-mutation methods to prune those away automatically. + return false + } + ms.RemoveResource(addr.Resource) + s.maybePruneModule(addr.Module) + return true +} + +// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a +// resource has changed from having "count" set to not set, or vice-versa, and +// so we need to rename the zeroth instance key to no key at all, or vice-versa. +// +// Set countEnabled to true if the resource has count set in its new +// configuration, or false if it does not. +// +// The state is modified in-place if necessary, moving a resource instance +// between the two addresses. The return value is true if a change was made, +// and false otherwise. +func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.AbsResource, countEnabled bool) bool { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return false + } + + relAddr := addr.Resource + rs := ms.Resource(relAddr) + if rs == nil { + return false + } + huntKey := addrs.NoKey + replaceKey := addrs.InstanceKey(addrs.IntKey(0)) + if !countEnabled { + huntKey, replaceKey = replaceKey, huntKey + } + + is, exists := rs.Instances[huntKey] + if !exists { + return false + } + + if _, exists := rs.Instances[replaceKey]; exists { + // If the replacement key also exists then we'll do nothing and keep both. + return false + } + + // If we get here then we need to "rename" from hunt to replace + rs.Instances[replaceKey] = is + delete(rs.Instances, huntKey) + return true +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simultaneously +// updating the recorded provider configuration address, dependencies, and +// resource EachMode. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance as a whole will be removed, which +// may in turn also remove the containing module if it becomes empty. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The provider address and "each mode" are resource-wide settings and so they +// are updated for all other instances of the same resource as a side-effect of +// this call. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// DeposeResourceInstanceObject moves the current instance object for the +// given resource instance address into the deposed set, leaving the instance +// without a current object. +// +// The return value is the newly-allocated deposed key, or NotDeposed if the +// given instance is already lacking a current object. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then there cannot be a current object for the +// given instance, and so NotDeposed will be returned without modifying the +// state at all. +func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return NotDeposed + } + + return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed) +} + +// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject +// but uses a pre-allocated key. It's the caller's responsibility to ensure +// that there aren't any races to use a particular key; this method will panic +// if the given key is already in use. +func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) { + s.lock.Lock() + defer s.lock.Unlock() + + if forcedKey == NotDeposed { + // Usage error: should use DeposeResourceInstanceObject in this case + panic("DeposeResourceInstanceObjectForceKey called without forced key") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + return // Nothing to do, since there can't be any current object either. + } + + ms.deposeResourceInstanceObject(addr.Resource, forcedKey) +} + +// ForgetResourceInstanceAll removes the record of all objects associated with +// the specified resource instance, if present. If not present, this is a no-op. +func (s *SyncState) ForgetResourceInstanceAll(addr addrs.AbsResourceInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.ForgetResourceInstanceAll(addr.Resource) + s.maybePruneModule(addr.Module) +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (s *SyncState) ForgetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.ForgetResourceInstanceDeposed(addr.Resource, key) + s.maybePruneModule(addr.Module) +} + +// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the +// given key on the specified resource as the current object for that instance +// if and only if that would not cause us to forget an existing current +// object for that instance. +// +// Returns true if the object was restored to current, or false if no change +// was made at all. +func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool { + s.lock.Lock() + defer s.lock.Unlock() + + if key == NotDeposed { + panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + // Nothing to do, since the specified deposed object cannot exist. + return false + } + + return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key) +} + +// RemovePlannedResourceInstanceObjects removes from the state any resource +// instance objects that have the status ObjectPlanned, indiciating that they +// are just transient placeholders created during planning. +// +// Note that this does not restore any "ready" or "tainted" object that might +// have been present before the planned object was written. The only real use +// for this method is in preparing the state created during a refresh walk, +// where we run the planning step for certain instances just to create enough +// information to allow correct expression evaluation within provider and +// data resource blocks. Discarding planned instances in that case is okay +// because the refresh phase only creates planned objects to stand in for +// objects that don't exist yet, and thus the planned object must have been +// absent before by definition. +func (s *SyncState) RemovePlannedResourceInstanceObjects() { + // TODO: Merge together the refresh and plan phases into a single walk, + // so we can remove the need to create this "partial plan" during refresh + // that we then need to clean up before proceeding. + + s.lock.Lock() + defer s.lock.Unlock() + + for _, ms := range s.state.Modules { + moduleAddr := ms.Addr + + for _, rs := range ms.Resources { + resAddr := rs.Addr + + for ik, is := range rs.Instances { + instAddr := resAddr.Instance(ik) + + if is.Current != nil && is.Current.Status == ObjectPlanned { + // Setting the current instance to nil removes it from the + // state altogether if there are not also deposed instances. + ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig) + } + + for dk, obj := range is.Deposed { + // Deposed objects should never be "planned", but we'll + // do this anyway for the sake of completeness. + if obj.Status == ObjectPlanned { + ms.ForgetResourceInstanceDeposed(instAddr, dk) + } + } + } + } + + // We may have deleted some objects, which means that we may have + // left a module empty, and so we must prune to preserve the invariant + // that only the root module is allowed to be empty. + s.maybePruneModule(moduleAddr) + } +} + +// Lock acquires an explicit lock on the state, allowing direct read and write +// access to the returned state object. The caller must call Unlock once +// access is no longer needed, and then immediately discard the state pointer +// pointer. +// +// Most callers should not use this. Instead, use the concurrency-safe +// accessors and mutators provided directly on SyncState. +func (s *SyncState) Lock() *State { + s.lock.Lock() + return s.state +} + +// Unlock releases a lock previously acquired by Lock, at which point the +// caller must cease all use of the state pointer that was returned. +// +// Do not call this method except to end an explicit lock acquired by +// Lock. If a caller calls Unlock without first holding the lock, behavior +// is undefined. +func (s *SyncState) Unlock() { + s.lock.Unlock() +} + +// maybePruneModule will remove a module from the state altogether if it is +// empty, unless it's the root module which must always be present. +// +// This helper method is not concurrency-safe on its own, so must only be +// called while the caller is already holding the lock for writing. +func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + // We never prune the root. + return + } + + ms := s.state.Module(addr) + if ms == nil { + return + } + + if ms.empty() { + log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr) + s.state.RemoveModule(addr) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/cache.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/cache.go new file mode 100644 index 00000000000..99e2c0306af --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/cache.go @@ -0,0 +1,45 @@ +package auth + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost" +) + +// CachingCredentialsSource creates a new credentials source that wraps another +// and caches its results in memory, on a per-hostname basis. +// +// No means is provided for expiration of cached credentials, so a caching +// credentials source should have a limited lifetime (one Terraform operation, +// for example) to ensure that time-limited credentials don't expire before +// their cache entries do. +func CachingCredentialsSource(source CredentialsSource) CredentialsSource { + return &cachingCredentialsSource{ + source: source, + cache: map[svchost.Hostname]HostCredentials{}, + } +} + +type cachingCredentialsSource struct { + source CredentialsSource + cache map[svchost.Hostname]HostCredentials +} + +// ForHost passes the given hostname on to the wrapped credentials source and +// caches the result to return for future requests with the same hostname. +// +// Both credentials and non-credentials (nil) responses are cached. +// +// No cache entry is created if the wrapped source returns an error, to allow +// the caller to retry the failing operation. +func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if cache, cached := s.cache[host]; cached { + return cache, nil + } + + result, err := s.source.ForHost(host) + if err != nil { + return result, err + } + + s.cache[host] = result + return result, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/credentials.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/credentials.go new file mode 100644 index 00000000000..00042a0a543 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/credentials.go @@ -0,0 +1,63 @@ +// Package auth contains types and functions to manage authentication +// credentials for service hosts. +package auth + +import ( + "net/http" + + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost" +) + +// Credentials is a list of CredentialsSource objects that can be tried in +// turn until one returns credentials for a host, or one returns an error. +// +// A Credentials is itself a CredentialsSource, wrapping its members. +// In principle one CredentialsSource can be nested inside another, though +// there is no good reason to do so. +type Credentials []CredentialsSource + +// NoCredentials is an empty CredentialsSource that always returns nil +// when asked for credentials. +var NoCredentials CredentialsSource = Credentials{} + +// A CredentialsSource is an object that may be able to provide credentials +// for a given host. +// +// Credentials lookups are not guaranteed to be concurrency-safe. Callers +// using these facilities in concurrent code must use external concurrency +// primitives to prevent race conditions. +type CredentialsSource interface { + // ForHost returns a non-nil HostCredentials if the source has credentials + // available for the host, and a nil HostCredentials if it does not. + // + // If an error is returned, progress through a list of CredentialsSources + // is halted and the error is returned to the user. + ForHost(host svchost.Hostname) (HostCredentials, error) +} + +// HostCredentials represents a single set of credentials for a particular +// host. +type HostCredentials interface { + // PrepareRequest modifies the given request in-place to apply the + // receiving credentials. The usual behavior of this method is to + // add some sort of Authorization header to the request. + PrepareRequest(req *http.Request) + + // Token returns the authentication token. + Token() string +} + +// ForHost iterates over the contained CredentialsSource objects and +// tries to obtain credentials for the given host from each one in turn. +// +// If any source returns either a non-nil HostCredentials or a non-nil error +// then this result is returned. Otherwise, the result is nil, nil. +func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { + for _, source := range c { + creds, err := source.ForHost(host) + if creds != nil || err != nil { + return creds, err + } + } + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/from_map.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/from_map.go new file mode 100644 index 00000000000..f91006aece7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/from_map.go @@ -0,0 +1,18 @@ +package auth + +// HostCredentialsFromMap converts a map of key-value pairs from a credentials +// definition provided by the user (e.g. in a config file, or via a credentials +// helper) into a HostCredentials object if possible, or returns nil if +// no credentials could be extracted from the map. +// +// This function ignores map keys it is unfamiliar with, to allow for future +// expansion of the credentials map format for new credential types. +func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { + if m == nil { + return nil + } + if token, ok := m["token"].(string); ok { + return HostCredentialsToken(token) + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/helper_program.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/helper_program.go new file mode 100644 index 00000000000..93f52604a4b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/helper_program.go @@ -0,0 +1,80 @@ +package auth + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost" +) + +type helperProgramCredentialsSource struct { + executable string + args []string +} + +// HelperProgramCredentialsSource returns a CredentialsSource that runs the +// given program with the given arguments in order to obtain credentials. +// +// The given executable path must be an absolute path; it is the caller's +// responsibility to validate and process a relative path or other input +// provided by an end-user. If the given path is not absolute, this +// function will panic. +// +// When credentials are requested, the program will be run in a child process +// with the given arguments along with two additional arguments added to the +// end of the list: the literal string "get", followed by the requested +// hostname in ASCII compatibility form (punycode form). +func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { + if !filepath.IsAbs(executable) { + panic("NewCredentialsSourceHelperProgram requires absolute path to executable") + } + + fullArgs := make([]string, len(args)+1) + fullArgs[0] = executable + copy(fullArgs[1:], args) + + return &helperProgramCredentialsSource{ + executable: executable, + args: fullArgs, + } +} + +func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + args := make([]string, len(s.args), len(s.args)+2) + copy(args, s.args) + args = append(args, "get") + args = append(args, string(host)) + + outBuf := bytes.Buffer{} + errBuf := bytes.Buffer{} + + cmd := exec.Cmd{ + Path: s.executable, + Args: args, + Stdin: nil, + Stdout: &outBuf, + Stderr: &errBuf, + } + err := cmd.Run() + if _, isExitErr := err.(*exec.ExitError); isExitErr { + errText := errBuf.String() + if errText == "" { + // Shouldn't happen for a well-behaved helper program + return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) + } + return nil, fmt.Errorf("error in %s: %s", s.executable, errText) + } else if err != nil { + return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) + } + + var m map[string]interface{} + err = json.Unmarshal(outBuf.Bytes(), &m) + if err != nil { + return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) + } + + return HostCredentialsFromMap(m), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/static.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/static.go new file mode 100644 index 00000000000..b5108a4a2ec --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/static.go @@ -0,0 +1,28 @@ +package auth + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost" +) + +// StaticCredentialsSource is a credentials source that retrieves credentials +// from the provided map. It returns nil if a requested hostname is not +// present in the map. +// +// The caller should not modify the given map after passing it to this function. +func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { + return staticCredentialsSource(creds) +} + +type staticCredentialsSource map[svchost.Hostname]map[string]interface{} + +func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if s == nil { + return nil, nil + } + + if m, exists := s[host]; exists { + return HostCredentialsFromMap(m), nil + } + + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/token_credentials.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/token_credentials.go new file mode 100644 index 00000000000..9358bcb6444 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth/token_credentials.go @@ -0,0 +1,25 @@ +package auth + +import ( + "net/http" +) + +// HostCredentialsToken is a HostCredentials implementation that represents a +// single "bearer token", to be sent to the server via an Authorization header +// with the auth type set to "Bearer" +type HostCredentialsToken string + +// PrepareRequest alters the given HTTP request by setting its Authorization +// header to the string "Bearer " followed by the encapsulated authentication +// token. +func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { + if req.Header == nil { + req.Header = http.Header{} + } + req.Header.Set("Authorization", "Bearer "+string(tc)) +} + +// Token returns the authentication token. +func (tc HostCredentialsToken) Token() string { + return string(tc) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco/disco.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco/disco.go new file mode 100644 index 00000000000..c770338be1b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco/disco.go @@ -0,0 +1,259 @@ +// Package disco handles Terraform's remote service discovery protocol. +// +// This protocol allows mapping from a service hostname, as produced by the +// svchost package, to a set of services supported by that host and the +// endpoint information for each supported service. +package disco + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "net/http" + "net/url" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost" + "github.com/hashicorp/terraform-plugin-sdk/internal/svchost/auth" +) + +const ( + // Fixed path to the discovery manifest. + discoPath = "/.well-known/terraform.json" + + // Arbitrary-but-small number to prevent runaway redirect loops. + maxRedirects = 3 + + // Arbitrary-but-small time limit to prevent UI "hangs" during discovery. + discoTimeout = 11 * time.Second + + // 1MB - to prevent abusive services from using loads of our memory. + maxDiscoDocBytes = 1 * 1024 * 1024 +) + +// httpTransport is overridden during tests, to skip TLS verification. +var httpTransport = cleanhttp.DefaultPooledTransport() + +// Disco is the main type in this package, which allows discovery on given +// hostnames and caches the results by hostname to avoid repeated requests +// for the same information. +type Disco struct { + hostCache map[svchost.Hostname]*Host + credsSrc auth.CredentialsSource + + // Transport is a custom http.RoundTripper to use. + Transport http.RoundTripper +} + +// New returns a new initialized discovery object. +func New() *Disco { + return NewWithCredentialsSource(nil) +} + +// NewWithCredentialsSource returns a new discovery object initialized with +// the given credentials source. +func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { + return &Disco{ + hostCache: make(map[svchost.Hostname]*Host), + credsSrc: credsSrc, + Transport: httpTransport, + } +} + +// SetCredentialsSource provides a credentials source that will be used to +// add credentials to outgoing discovery requests, where available. +// +// If this method is never called, no outgoing discovery requests will have +// credentials. +func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { + d.credsSrc = src +} + +// CredentialsForHost returns a non-nil HostCredentials if the embedded source has +// credentials available for the host, and a nil HostCredentials if it does not. +func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) { + if d.credsSrc == nil { + return nil, nil + } + return d.credsSrc.ForHost(hostname) +} + +// ForceHostServices provides a pre-defined set of services for a given +// host, which prevents the receiver from attempting network-based discovery +// for the given host. Instead, the given services map will be returned +// verbatim. +// +// When providing "forced" services, any relative URLs are resolved against +// the initial discovery URL that would have been used for network-based +// discovery, yielding the same results as if the given map were published +// at the host's default discovery URL, though using absolute URLs is strongly +// recommended to make the configured behavior more explicit. +func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) { + if services == nil { + services = map[string]interface{}{} + } + + d.hostCache[hostname] = &Host{ + discoURL: &url.URL{ + Scheme: "https", + Host: string(hostname), + Path: discoPath, + }, + hostname: hostname.ForDisplay(), + services: services, + transport: d.Transport, + } +} + +// Discover runs the discovery protocol against the given hostname (which must +// already have been validated and prepared with svchost.ForComparison) and +// returns an object describing the services available at that host. +// +// If a given hostname supports no Terraform services at all, a non-nil but +// empty Host object is returned. When giving feedback to the end user about +// such situations, we say "host does not provide a service", +// regardless of whether that is due to that service specifically being absent +// or due to the host not providing Terraform services at all, since we don't +// wish to expose the detail of whole-host discovery to an end-user. +func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) { + if host, cached := d.hostCache[hostname]; cached { + return host, nil + } + + host, err := d.discover(hostname) + if err != nil { + return nil, err + } + d.hostCache[hostname] = host + + return host, nil +} + +// DiscoverServiceURL is a convenience wrapper for discovery on a given +// hostname and then looking up a particular service in the result. +func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) { + host, err := d.Discover(hostname) + if err != nil { + return nil, err + } + return host.ServiceURL(serviceID) +} + +// discover implements the actual discovery process, with its result cached +// by the public-facing Discover method. +func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) { + discoURL := &url.URL{ + Scheme: "https", + Host: hostname.String(), + Path: discoPath, + } + + client := &http.Client{ + Transport: d.Transport, + Timeout: discoTimeout, + + CheckRedirect: func(req *http.Request, via []*http.Request) error { + log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) + if len(via) > maxRedirects { + return errors.New("too many redirects") // this error will never actually be seen + } + return nil + }, + } + + req := &http.Request{ + Header: make(http.Header), + Method: "GET", + URL: discoURL, + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", httpclient.UserAgentString()) + + creds, err := d.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err) + } + if creds != nil { + // Update the request to include credentials. + creds.PrepareRequest(req) + } + + log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to request discovery document: %v", err) + } + defer resp.Body.Close() + + host := &Host{ + // Use the discovery URL from resp.Request in + // case the client followed any redirects. + discoURL: resp.Request.URL, + hostname: hostname.ForDisplay(), + transport: d.Transport, + } + + // Return the host without any services. + if resp.StatusCode == 404 { + return host, nil + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status) + } + + contentType := resp.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType) + } + if mediaType != "application/json" { + return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType) + } + + // This doesn't catch chunked encoding, because ContentLength is -1 in that case. + if resp.ContentLength > maxDiscoDocBytes { + // Size limit here is not a contractual requirement and so we may + // adjust it over time if we find a different limit is warranted. + return nil, fmt.Errorf( + "Discovery doc response is too large (got %d bytes; limit %d)", + resp.ContentLength, maxDiscoDocBytes, + ) + } + + // If the response is using chunked encoding then we can't predict its + // size, but we'll at least prevent reading the entire thing into memory. + lr := io.LimitReader(resp.Body, maxDiscoDocBytes) + + servicesBytes, err := ioutil.ReadAll(lr) + if err != nil { + return nil, fmt.Errorf("Error reading discovery document body: %v", err) + } + + var services map[string]interface{} + err = json.Unmarshal(servicesBytes, &services) + if err != nil { + return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err) + } + host.services = services + + return host, nil +} + +// Forget invalidates any cached record of the given hostname. If the host +// has no cache entry then this is a no-op. +func (d *Disco) Forget(hostname svchost.Hostname) { + delete(d.hostCache, hostname) +} + +// ForgetAll is like Forget, but for all of the hostnames that have cache entries. +func (d *Disco) ForgetAll() { + d.hostCache = make(map[svchost.Hostname]*Host) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco/host.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco/host.go new file mode 100644 index 00000000000..0d6ef038378 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/disco/host.go @@ -0,0 +1,264 @@ +package disco + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" +) + +const versionServiceID = "versions.v1" + +// Host represents a service discovered host. +type Host struct { + discoURL *url.URL + hostname string + services map[string]interface{} + transport http.RoundTripper +} + +// Constraints represents the version constraints of a service. +type Constraints struct { + Service string `json:"service"` + Product string `json:"product"` + Minimum string `json:"minimum"` + Maximum string `json:"maximum"` + Excluding []string `json:"excluding"` +} + +// ErrServiceNotProvided is returned when the service is not provided. +type ErrServiceNotProvided struct { + hostname string + service string +} + +// Error returns a customized error message. +func (e *ErrServiceNotProvided) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not provide a %s service", e.service) + } + return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service) +} + +// ErrVersionNotSupported is returned when the version is not supported. +type ErrVersionNotSupported struct { + hostname string + service string + version string +} + +// Error returns a customized error message. +func (e *ErrVersionNotSupported) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not support %s version %s", e.service, e.version) + } + return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version) +} + +// ErrNoVersionConstraints is returned when checkpoint was disabled +// or the endpoint to query for version constraints was unavailable. +type ErrNoVersionConstraints struct { + disabled bool +} + +// Error returns a customized error message. +func (e *ErrNoVersionConstraints) Error() string { + if e.disabled { + return "checkpoint disabled" + } + return "unable to contact versions service" +} + +// ServiceURL returns the URL associated with the given service identifier, +// which should be of the form "servicename.vN". +// +// A non-nil result is always an absolute URL with a scheme of either HTTPS +// or HTTP. +func (h *Host) ServiceURL(id string) (*url.URL, error) { + svc, ver, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + urlStr, ok := h.services[id].(string) + if !ok { + // See if we have a matching service as that would indicate + // the service is supported, but not the requested version. + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + return nil, &ErrVersionNotSupported{ + hostname: h.hostname, + service: svc, + version: ver.Original(), + } + } + } + + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, fmt.Errorf("Failed to parse service URL: %v", err) + } + + // Make relative URLs absolute using our discovery URL. + if !u.IsAbs() { + u = h.discoURL.ResolveReference(u) + } + + if u.Scheme != "https" && u.Scheme != "http" { + return nil, fmt.Errorf("Service URL is using an unsupported scheme: %s", u.Scheme) + } + if u.User != nil { + return nil, fmt.Errorf("Embedded username/password information is not permitted") + } + + // Fragment part is irrelevant, since we're not a browser. + u.Fragment = "" + + return h.discoURL.ResolveReference(u), nil +} + +// VersionConstraints returns the contraints for a given service identifier +// (which should be of the form "servicename.vN") and product. +// +// When an exact (service and version) match is found, the constraints for +// that service are returned. +// +// When the requested version is not provided but the service is, we will +// search for all alternative versions. If mutliple alternative versions +// are found, the contrains of the latest available version are returned. +// +// When a service is not provided at all an error will be returned instead. +// +// When checkpoint is disabled or when a 404 is returned after making the +// HTTP call, an ErrNoVersionConstraints error will be returned. +func (h *Host) VersionConstraints(id, product string) (*Constraints, error) { + svc, _, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // Return early if checkpoint is disabled. + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return nil, &ErrNoVersionConstraints{disabled: true} + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + // Try to get the service URL for the version service and + // return early if the service isn't provided by the host. + u, err := h.ServiceURL(versionServiceID) + if err != nil { + return nil, err + } + + // Check if we have an exact (service and version) match. + if _, ok := h.services[id].(string); !ok { + // If we don't have an exact match, we search for all matching + // services and then use the service ID of the latest version. + var services []string + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + services = append(services, serviceID) + } + } + + if len(services) == 0 { + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + // Set id to the latest service ID we found. + var latest *version.Version + for _, serviceID := range services { + if _, ver, err := parseServiceID(serviceID); err == nil { + if latest == nil || latest.LessThan(ver) { + id = serviceID + latest = ver + } + } + } + } + + // Set a default timeout of 1 sec for the versions request (in milliseconds) + timeout := 1000 + if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout = v + } + + client := &http.Client{ + Transport: h.transport, + Timeout: time.Duration(timeout) * time.Millisecond, + } + + // Prepare the service URL by setting the service and product. + v := u.Query() + v.Set("product", product) + u.Path += id + u.RawQuery = v.Encode() + + // Create a new request. + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, fmt.Errorf("Failed to create version constraints request: %v", err) + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", httpclient.UserAgentString()) + + log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to request version constraints: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode == 404 { + return nil, &ErrNoVersionConstraints{disabled: false} + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status) + } + + // Parse the constraints from the response body. + result := &Constraints{} + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, fmt.Errorf("Error parsing version constraints: %v", err) + } + + return result, nil +} + +func parseServiceID(id string) (string, *version.Version, error) { + parts := strings.SplitN(id, ".", 2) + if len(parts) != 2 { + return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id) + } + + version, err := version.NewVersion(parts[1]) + if err != nil { + return "", nil, fmt.Errorf("Invalid service version: %v", err) + } + + return parts[0], version, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/label_iter.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/label_iter.go new file mode 100644 index 00000000000..6e0e47b73f2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/label_iter.go @@ -0,0 +1,49 @@ +package svchost + +import ( + "strings" +) + +// A labelIter allows iterating over domain name labels. +// +// This type is copied from golang.org/x/net/idna, where it is used +// to segment hostnames into their separate labels for analysis. We use +// it for the same purpose here, in ForComparison. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/svchost.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/svchost.go new file mode 100644 index 00000000000..4060b767e58 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/svchost/svchost.go @@ -0,0 +1,207 @@ +// Package svchost deals with the representations of the so-called "friendly +// hostnames" that we use to represent systems that provide Terraform-native +// remote services, such as module registry, remote operations, etc. +// +// Friendly hostnames are specified such that, as much as possible, they +// are consistent with how web browsers think of hostnames, so that users +// can bring their intuitions about how hostnames behave when they access +// a Terraform Enterprise instance's web UI (or indeed any other website) +// and have this behave in a similar way. +package svchost + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "golang.org/x/net/idna" +) + +// Hostname is specialized name for string that indicates that the string +// has been converted to (or was already in) the storage and comparison form. +// +// Hostname values are not suitable for display in the user-interface. Use +// the ForDisplay method to obtain a form suitable for display in the UI. +// +// Unlike user-supplied hostnames, strings of type Hostname (assuming they +// were constructed by a function within this package) can be compared for +// equality using the standard Go == operator. +type Hostname string + +// acePrefix is the ASCII Compatible Encoding prefix, used to indicate that +// a domain name label is in "punycode" form. +const acePrefix = "xn--" + +// displayProfile is a very liberal idna profile that we use to do +// normalization for display without imposing validation rules. +var displayProfile = idna.New( + idna.MapForLookup(), + idna.Transitional(true), +) + +// ForDisplay takes a user-specified hostname and returns a normalized form of +// it suitable for display in the UI. +// +// If the input is so invalid that no normalization can be performed then +// this will return the input, assuming that the caller still wants to +// display _something_. This function is, however, more tolerant than the +// other functions in this package and will make a best effort to prepare +// _any_ given hostname for display. +// +// For validation, use either IsValid (for explicit validation) or +// ForComparison (which implicitly validates, returning an error if invalid). +func ForDisplay(given string) string { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + portPortion, _ = normalizePortPortion(portPortion) + + ascii, err := displayProfile.ToASCII(given) + if err != nil { + return given + portPortion + } + display, err := displayProfile.ToUnicode(ascii) + if err != nil { + return given + portPortion + } + return display + portPortion +} + +// IsValid returns true if the given user-specified hostname is a valid +// service hostname. +// +// Validity is determined by complying with the RFC 5891 requirements for +// names that are valid for domain lookup (section 5), with the additional +// requirement that user-supplied forms must not _already_ contain +// Punycode segments. +func IsValid(given string) bool { + _, err := ForComparison(given) + return err == nil +} + +// ForComparison takes a user-specified hostname and returns a normalized +// form of it suitable for storage and comparison. The result is not suitable +// for display to end-users because it uses Punycode to represent non-ASCII +// characters, and this form is unreadable for non-ASCII-speaking humans. +// +// The result is typed as Hostname -- a specialized name for string -- so that +// other APIs can make it clear within the type system whether they expect a +// user-specified or display-form hostname or a value already normalized for +// comparison. +// +// The returned Hostname is not valid if the returned error is non-nil. +func ForComparison(given string) (Hostname, error) { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + + var err error + portPortion, err = normalizePortPortion(portPortion) + if err != nil { + return Hostname(""), err + } + + if given == "" { + return Hostname(""), fmt.Errorf("empty string is not a valid hostname") + } + + // First we'll apply our additional constraint that Punycode must not + // be given directly by the user. This is not an IDN specification + // requirement, but we prohibit it to force users to use human-readable + // hostname forms within Terraform configuration. + labels := labelIter{orig: given} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + return Hostname(""), fmt.Errorf( + "hostname contains empty label (two consecutive periods)", + ) + } + if strings.HasPrefix(label, acePrefix) { + return Hostname(""), fmt.Errorf( + "hostname label %q specified in punycode format; service hostnames must be given in unicode", + label, + ) + } + } + + result, err := idna.Lookup.ToASCII(given) + if err != nil { + return Hostname(""), err + } + return Hostname(result + portPortion), nil +} + +// ForDisplay returns a version of the receiver that is appropriate for display +// in the UI. This includes converting any punycode labels to their +// corresponding Unicode characters. +// +// A round-trip through ForComparison and this ForDisplay method does not +// guarantee the same result as calling this package's top-level ForDisplay +// function, since a round-trip through the Hostname type implies stricter +// handling than we do when doing basic display-only processing. +func (h Hostname) ForDisplay() string { + given := string(h) + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + // We don't normalize the port portion here because we assume it's + // already been normalized on the way in. + + result, err := idna.Lookup.ToUnicode(given) + if err != nil { + // Should never happen, since type Hostname indicates that a string + // passed through our validation rules. + panic(fmt.Errorf("ForDisplay called on invalid Hostname: %s", err)) + } + return result + portPortion +} + +func (h Hostname) String() string { + return string(h) +} + +func (h Hostname) GoString() string { + return fmt.Sprintf("svchost.Hostname(%q)", string(h)) +} + +// normalizePortPortion attempts to normalize the "port portion" of a hostname, +// which begins with the first colon in the hostname and should be followed +// by a string of decimal digits. +// +// If the port portion is valid, a normalized version of it is returned along +// with a nil error. +// +// If the port portion is invalid, the input string is returned verbatim along +// with a non-nil error. +// +// An empty string is a valid port portion representing the absence of a port. +// If non-empty, the first character must be a colon. +func normalizePortPortion(s string) (string, error) { + if s == "" { + return s, nil + } + + if s[0] != ':' { + // should never happen, since caller tends to guarantee the presence + // of a colon due to how it's extracted from the string. + return s, errors.New("port portion is missing its initial colon") + } + + numStr := s[1:] + num, err := strconv.Atoi(numStr) + if err != nil { + return s, errors.New("port portion contains non-digit characters") + } + if num == 443 { + return "", nil // ":443" is the default + } + if num > 65535 { + return s, errors.New("port number is greater than 65535") + } + return fmt.Sprintf(":%d", num), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go new file mode 100644 index 00000000000..8e41f46ed28 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go @@ -0,0 +1,68 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" +) + +// FormatCtyPath is a helper function to produce a user-friendly string +// representation of a cty.Path. The result uses a syntax similar to the +// HCL expression language in the hope of it being familiar to users. +func FormatCtyPath(path cty.Path) string { + var buf bytes.Buffer + for _, step := range path { + switch ts := step.(type) { + case cty.GetAttrStep: + fmt.Fprintf(&buf, ".%s", ts.Name) + case cty.IndexStep: + buf.WriteByte('[') + key := ts.Key + keyTy := key.Type() + switch { + case key.IsNull(): + buf.WriteString("null") + case !key.IsKnown(): + buf.WriteString("(not yet known)") + case keyTy == cty.Number: + bf := key.AsBigFloat() + buf.WriteString(bf.Text('g', -1)) + case keyTy == cty.String: + buf.WriteString(strconv.Quote(key.AsString())) + default: + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// FormatError is a helper function to produce a user-friendly string +// representation of certain special error types that we might want to +// include in diagnostic messages. +// +// This currently has special behavior only for cty.PathError, where a +// non-empty path is rendered in a HCL-like syntax as context. +func FormatError(err error) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return err.Error() + } + + return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) +} + +// FormatErrorPrefixed is like FormatError except that it presents any path +// information after the given prefix string, which is assumed to contain +// an HCL syntax representation of the value that errors are relative to. +func FormatErrorPrefixed(err error, prefix string) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return fmt.Sprintf("%s: %s", prefix, err.Error()) + } + + return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go new file mode 100644 index 00000000000..25b21403723 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go @@ -0,0 +1,372 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// The "contextual" family of diagnostics are designed to allow separating +// the detection of a problem from placing that problem in context. For +// example, some code that is validating an object extracted from configuration +// may not have access to the configuration that generated it, but can still +// report problems within that object which the caller can then place in +// context by calling IsConfigBody on the returned diagnostics. +// +// When contextual diagnostics are used, the documentation for a method must +// be very explicit about what context is implied for any diagnostics returned, +// to help ensure the expected result. + +// contextualFromConfig is an interface type implemented by diagnostic types +// that can elaborate themselves when given information about the configuration +// body they are embedded in. +// +// Usually this entails extracting source location information in order to +// populate the "Subject" range. +type contextualFromConfigBody interface { + ElaborateFromConfigBody(hcl.Body) Diagnostic +} + +// InConfigBody returns a copy of the receiver with any config-contextual +// diagnostics elaborated in the context of the given body. +func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics { + if len(d) == 0 { + return nil + } + + ret := make(Diagnostics, len(d)) + for i, srcDiag := range d { + if cd, isCD := srcDiag.(contextualFromConfigBody); isCD { + ret[i] = cd.ElaborateFromConfigBody(body) + } else { + ret[i] = srcDiag + } + } + + return ret +} + +// AttributeValue returns a diagnostic about an attribute value in an implied current +// configuration context. This should be returned only from functions whose +// interface specifies a clear configuration context that this will be +// resolved in. +// +// The given path is relative to the implied configuration context. To describe +// a top-level attribute, it should be a single-element cty.Path with a +// cty.GetAttrStep. It's assumed that the path is returning into a structure +// that would be produced by our conventions in the configschema package; it +// may return unexpected results for structures that can't be represented by +// configschema. +// +// Since mapping attribute paths back onto configuration is an imprecise +// operation (e.g. dynamic block generation may cause the same block to be +// evaluated multiple times) the diagnostic detail should include the attribute +// name and other context required to help the user understand what is being +// referenced in case the identified source range is not unique. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is the value assigned to the +// named attribute, or the containing body's "missing item range" if no +// value is present. +func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { + return &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + attrPath: attrPath, + } +} + +// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains +// one. Normally this is not accessed directly, and instead the config body is +// added to the Diagnostic to create a more complete message for the user. In +// some cases however, we may want to know just the name of the attribute that +// generated the Diagnostic message. +// This returns a nil cty.Path if it does not exist in the Diagnostic. +func GetAttribute(d Diagnostic) cty.Path { + if d, ok := d.(*attributeDiagnostic); ok { + return d.attrPath + } + return nil +} + +type attributeDiagnostic struct { + diagnosticBase + attrPath cty.Path + subject *SourceRange // populated only after ElaborateFromConfigBody +} + +// ElaborateFromConfigBody finds the most accurate possible source location +// for a diagnostic's attribute path within the given body. +// +// Backing out from a path back to a source location is not always entirely +// possible because we lose some information in the decoding process, so +// if an exact position cannot be found then the returned diagnostic will +// refer to a position somewhere within the containing body, which is assumed +// to be better than no location at all. +// +// If possible it is generally better to report an error at a layer where +// source location information is still available, for more accuracy. This +// is not always possible due to system architecture, so this serves as a +// "best effort" fallback behavior for such situations. +func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { + if len(d.attrPath) < 1 { + // Should never happen, but we'll allow it rather than crashing. + return d + } + + if d.subject != nil { + // Don't modify an already-elaborated diagnostic. + return d + } + + ret := *d + + // This function will often end up re-decoding values that were already + // decoded by an earlier step. This is non-ideal but is architecturally + // more convenient than arranging for source location information to be + // propagated to every place in Terraform, and this happens only in the + // presence of errors where performance isn't a concern. + + traverse := d.attrPath[:] + final := d.attrPath[len(d.attrPath)-1] + + // Index should never be the first step + // as indexing of top blocks (such as resources & data sources) + // is handled elsewhere + if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep { + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + return &ret + } + + // Process index separately + idxStep, hasIdx := final.(cty.IndexStep) + if hasIdx { + final = d.attrPath[len(d.attrPath)-2] + traverse = d.attrPath[:len(d.attrPath)-1] + } + + // If we have more than one step after removing index + // then we'll first try to traverse to a child body + // corresponding to the requested path. + if len(traverse) > 1 { + body = traversePathSteps(traverse, body) + } + + // Default is to indicate a missing item in the deepest body we reached + // while traversing. + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + + // Once we get here, "final" should be a GetAttr step that maps to an + // attribute in our current body. + finalStep, isAttr := final.(cty.GetAttrStep) + if !isAttr { + return &ret + } + + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: finalStep.Name, + Required: true, + }, + }, + }) + if contentDiags.HasErrors() { + return &ret + } + + if attr, ok := content.Attributes[finalStep.Name]; ok { + hclRange := attr.Expr.Range() + if hasIdx { + // Try to be more precise by finding index range + hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) + } + subject = SourceRangeFromHCL(hclRange) + ret.subject = &subject + } + + return &ret +} + +func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { + for i := 0; i < len(traverse); i++ { + step := traverse[i] + + switch tStep := step.(type) { + case cty.GetAttrStep: + + var next cty.PathStep + if i < (len(traverse) - 1) { + next = traverse[i+1] + } + + // Will be indexing into our result here? + var indexType cty.Type + var indexVal cty.Value + if nextIndex, ok := next.(cty.IndexStep); ok { + indexVal = nextIndex.Key + indexType = indexVal.Type() + i++ // skip over the index on subsequent iterations + } + + var blockLabelNames []string + if indexType == cty.String { + // Map traversal means we expect one label for the key. + blockLabelNames = []string{"key"} + } + + // For intermediate steps we expect to be referring to a child + // block, so we'll attempt decoding under that assumption. + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: tStep.Name, + LabelNames: blockLabelNames, + }, + }, + }) + if contentDiags.HasErrors() { + return body + } + filtered := make([]*hcl.Block, 0, len(content.Blocks)) + for _, block := range content.Blocks { + if block.Type == tStep.Name { + filtered = append(filtered, block) + } + } + if len(filtered) == 0 { + // Step doesn't refer to a block + continue + } + + switch indexType { + case cty.NilType: // no index at all + if len(filtered) != 1 { + return body + } + body = filtered[0].Body + case cty.Number: + var idx int + err := gocty.FromCtyValue(indexVal, &idx) + if err != nil || idx >= len(filtered) { + return body + } + body = filtered[idx].Body + case cty.String: + key := indexVal.AsString() + var block *hcl.Block + for _, candidate := range filtered { + if candidate.Labels[0] == key { + block = candidate + break + } + } + if block == nil { + // No block with this key, so we'll just indicate a + // missing item in the containing block. + return body + } + body = block.Body + default: + // Should never happen, because only string and numeric indices + // are supported by cty collections. + return body + } + + default: + // For any other kind of step, we'll just return our current body + // as the subject and accept that this is a little inaccurate. + return body + } + } + return body +} + +func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { + switch idxStep.Key.Type() { + case cty.Number: + var idx int + err := gocty.FromCtyValue(idxStep.Key, &idx) + items, diags := hcl.ExprList(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + if err != nil || idx >= len(items) { + return attr.NameRange + } + return items[idx].Range() + case cty.String: + pairs, diags := hcl.ExprMap(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + stepKey := idxStep.Key.AsString() + for _, kvPair := range pairs { + key, err := kvPair.Key.Value(nil) + if err != nil { + return attr.Expr.Range() + } + if key.AsString() == stepKey { + startRng := kvPair.Value.StartRange() + return startRng + } + } + return attr.NameRange + } + return attr.Expr.Range() +} + +func (d *attributeDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase + subject *SourceRange // populated only after ElaborateFromConfigBody +} + +func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { + if d.subject != nil { + // Don't modify an already-elaborated diagnostic. + return d + } + + ret := *d + rng := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &rng + return &ret +} + +func (d *wholeBodyDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go new file mode 100644 index 00000000000..f3b0ea1e42e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go @@ -0,0 +1,40 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type Diagnostic interface { + Severity() Severity + Description() Description + Source() Source + + // FromExpr returns the expression-related context for the diagnostic, if + // available. Returns nil if the diagnostic is not related to an + // expression evaluation. + FromExpr() *FromExpr +} + +type Severity rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} + +type Source struct { + Subject *SourceRange + Context *SourceRange +} + +type FromExpr struct { + Expression hcl.Expression + EvalContext *hcl.EvalContext +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go new file mode 100644 index 00000000000..50bf9d8eba5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go @@ -0,0 +1,31 @@ +package tfdiags + +// diagnosticBase can be embedded in other diagnostic structs to get +// default implementations of Severity and Description. This type also +// has default implementations of Source and FromExpr that return no source +// location or expression-related information, so embedders should generally +// override those method to return more useful results where possible. +type diagnosticBase struct { + severity Severity + summary string + detail string +} + +func (d diagnosticBase) Severity() Severity { + return d.severity +} + +func (d diagnosticBase) Description() Description { + return Description{ + Summary: d.summary, + Detail: d.detail, + } +} + +func (d diagnosticBase) Source() Source { + return Source{} +} + +func (d diagnosticBase) FromExpr() *FromExpr { + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go new file mode 100644 index 00000000000..465b230f606 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go @@ -0,0 +1,330 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "path/filepath" + "sort" + "strings" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl2/hcl" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// Append is the main interface for constructing Diagnostics lists, taking +// an existing list (which may be nil) and appending the new objects to it +// after normalizing them to be implementations of Diagnostic. +// +// The usual pattern for a function that natively "speaks" diagnostics is: +// +// // Create a nil Diagnostics at the start of the function +// var diags diag.Diagnostics +// +// // At later points, build on it if errors / warnings occur: +// foo, err := DoSomethingRisky() +// if err != nil { +// diags = diags.Append(err) +// } +// +// // Eventually return the result and diagnostics in place of error +// return result, diags +// +// Append accepts a variety of different diagnostic-like types, including +// native Go errors and HCL diagnostics. It also knows how to unwrap +// a multierror.Error into separate error diagnostics. It can be passed +// another Diagnostics to concatenate the two lists. If given something +// it cannot handle, this function will panic. +func (diags Diagnostics) Append(new ...interface{}) Diagnostics { + for _, item := range new { + if item == nil { + continue + } + + switch ti := item.(type) { + case Diagnostic: + diags = append(diags, ti) + case Diagnostics: + diags = append(diags, ti...) // flatten + case diagnosticsAsError: + diags = diags.Append(ti.Diagnostics) // unwrap + case NonFatalError: + diags = diags.Append(ti.Diagnostics) // unwrap + case hcl.Diagnostics: + for _, hclDiag := range ti { + diags = append(diags, hclDiagnostic{hclDiag}) + } + case *hcl.Diagnostic: + diags = append(diags, hclDiagnostic{ti}) + case *multierror.Error: + for _, err := range ti.Errors { + diags = append(diags, nativeError{err}) + } + case error: + switch { + case errwrap.ContainsType(ti, Diagnostics(nil)): + // If we have an errwrap wrapper with a Diagnostics hiding + // inside then we'll unpick it here to get access to the + // individual diagnostics. + diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) + case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): + // Likewise, if we have HCL diagnostics we'll unpick that too. + diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) + default: + diags = append(diags, nativeError{ti}) + } + default: + panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) + } + } + + // Given the above, we should never end up with a non-nil empty slice + // here, but we'll make sure of that so callers can rely on empty == nil + if len(diags) == 0 { + return nil + } + + return diags +} + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// ForRPC returns a version of the receiver that has been simplified so that +// it is friendly to RPC protocols. +// +// Currently this means that it can be serialized with encoding/gob and +// subsequently re-inflated. It may later grow to include other serialization +// formats. +// +// Note that this loses information about the original objects used to +// construct the diagnostics, so e.g. the errwrap API will not work as +// expected on an error-wrapped Diagnostics that came from ForRPC. +func (diags Diagnostics) ForRPC() Diagnostics { + ret := make(Diagnostics, len(diags)) + for i := range diags { + ret[i] = makeRPCFriendlyDiag(diags[i]) + } + return ret +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +// ErrWithWarnings is similar to Err except that it will also return a non-nil +// error if the receiver contains only warnings. +// +// In the warnings-only situation, the result is guaranteed to be of dynamic +// type NonFatalError, allowing diagnostics-aware callers to type-assert +// and unwrap it, treating it as non-fatal. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) ErrWithWarnings() error { + if len(diags) == 0 { + return nil + } + if diags.HasErrors() { + return diags.Err() + } + return NonFatalError{diags} +} + +// NonFatalErr is similar to Err except that it always returns either nil +// (if there are no diagnostics at all) or NonFatalError. +// +// This allows diagnostics to be returned over an error return channel while +// being explicit that the diagnostics should not halt processing. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) NonFatalErr() error { + if len(diags) == 0 { + return nil + } + return NonFatalError{diags} +} + +// Sort applies an ordering to the diagnostics in the receiver in-place. +// +// The ordering is: warnings before errors, sourceless before sourced, +// short source paths before long source paths, and then ordering by +// position within each file. +// +// Diagnostics that do not differ by any of these sortable characteristics +// will remain in the same relative order after this method returns. +func (diags Diagnostics) Sort() { + sort.Stable(sortDiagnostics(diags)) +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} + +// NonFatalError is a special error type, returned by +// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, +// that indicates that the wrapped diagnostics should be treated as non-fatal. +// Callers can conditionally type-assert an error to this type in order to +// detect the non-fatal scenario and handle it in a different way. +type NonFatalError struct { + Diagnostics +} + +func (woe NonFatalError) Error() string { + diags := woe.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors or warnings" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + if diags.HasErrors() { + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + } else { + fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) + } + for _, diag := range woe.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// sortDiagnostics is an implementation of sort.Interface +type sortDiagnostics []Diagnostic + +var _ sort.Interface = sortDiagnostics(nil) + +func (sd sortDiagnostics) Len() int { + return len(sd) +} + +func (sd sortDiagnostics) Less(i, j int) bool { + iD, jD := sd[i], sd[j] + iSev, jSev := iD.Severity(), jD.Severity() + iSrc, jSrc := iD.Source(), jD.Source() + + switch { + + case iSev != jSev: + return iSev == Warning + + case (iSrc.Subject == nil) != (jSrc.Subject == nil): + return iSrc.Subject == nil + + case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject: + iSubj := iSrc.Subject + jSubj := jSrc.Subject + switch { + case iSubj.Filename != jSubj.Filename: + // Path with fewer segments goes first if they are different lengths + sep := string(filepath.Separator) + iCount := strings.Count(iSubj.Filename, sep) + jCount := strings.Count(jSubj.Filename, sep) + if iCount != jCount { + return iCount < jCount + } + return iSubj.Filename < jSubj.Filename + case iSubj.Start.Byte != jSubj.Start.Byte: + return iSubj.Start.Byte < jSubj.Start.Byte + case iSubj.End.Byte != jSubj.End.Byte: + return iSubj.End.Byte < jSubj.End.Byte + } + fallthrough + + default: + // The remaining properties do not have a defined ordering, so + // we'll leave it unspecified. Since we use sort.Stable in + // the caller of this, the ordering of remaining items will + // be preserved. + return false + } +} + +func (sd sortDiagnostics) Swap(i, j int) { + sd[i], sd[j] = sd[j], sd[i] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go new file mode 100644 index 00000000000..c427879ebc7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go @@ -0,0 +1,16 @@ +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go new file mode 100644 index 00000000000..13f7a714f42 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go @@ -0,0 +1,28 @@ +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: FormatError(e.err), + } +} + +func (e nativeError) Source() Source { + // No source information available for a native error + return Source{} +} + +func (e nativeError) FromExpr() *FromExpr { + // Native errors are not expression-related + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go new file mode 100644 index 00000000000..f9aec41c931 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go @@ -0,0 +1,87 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic +type hclDiagnostic struct { + diag *hcl.Diagnostic +} + +var _ Diagnostic = hclDiagnostic{} + +func (d hclDiagnostic) Severity() Severity { + switch d.diag.Severity { + case hcl.DiagWarning: + return Warning + default: + return Error + } +} + +func (d hclDiagnostic) Description() Description { + return Description{ + Summary: d.diag.Summary, + Detail: d.diag.Detail, + } +} + +func (d hclDiagnostic) Source() Source { + var ret Source + if d.diag.Subject != nil { + rng := SourceRangeFromHCL(*d.diag.Subject) + ret.Subject = &rng + } + if d.diag.Context != nil { + rng := SourceRangeFromHCL(*d.diag.Context) + ret.Context = &rng + } + return ret +} + +func (d hclDiagnostic) FromExpr() *FromExpr { + if d.diag.Expression == nil || d.diag.EvalContext == nil { + return nil + } + return &FromExpr{ + Expression: d.diag.Expression, + EvalContext: d.diag.EvalContext, + } +} + +// SourceRangeFromHCL constructs a SourceRange from the corresponding range +// type within the HCL package. +func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { + return SourceRange{ + Filename: hclRange.Filename, + Start: SourcePos{ + Line: hclRange.Start.Line, + Column: hclRange.Start.Column, + Byte: hclRange.Start.Byte, + }, + End: SourcePos{ + Line: hclRange.End.Line, + Column: hclRange.End.Column, + Byte: hclRange.End.Byte, + }, + } +} + +// ToHCL constructs a HCL Range from the receiving SourceRange. This is the +// opposite of SourceRangeFromHCL. +func (r SourceRange) ToHCL() hcl.Range { + return hcl.Range{ + Filename: r.Filename, + Start: hcl.Pos{ + Line: r.Start.Line, + Column: r.Start.Column, + Byte: r.Start.Byte, + }, + End: hcl.Pos{ + Line: r.End.Line, + Column: r.End.Column, + Byte: r.End.Byte, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go new file mode 100644 index 00000000000..485063b0c0e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go @@ -0,0 +1,59 @@ +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiag struct { + Severity_ Severity + Summary_ string + Detail_ string + Subject_ *SourceRange + Context_ *SourceRange +} + +// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + source := diag.Source() + return &rpcFriendlyDiag{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + Subject_: source.Subject, + Context_: source.Context, + } +} + +func (d *rpcFriendlyDiag) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiag) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func (d *rpcFriendlyDiag) Source() Source { + return Source{ + Subject: d.Subject_, + Context: d.Context_, + } +} + +func (d rpcFriendlyDiag) FromExpr() *FromExpr { + // RPC-friendly diagnostics cannot preserve expression information because + // expressions themselves are not RPC-friendly. + return nil +} + +func init() { + gob.Register((*rpcFriendlyDiag)(nil)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go new file mode 100644 index 00000000000..78a721068c3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=Severity"; DO NOT EDIT. + +package tfdiags + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Error-69] + _ = x[Warning-87] +} + +const ( + _Severity_name_0 = "Error" + _Severity_name_1 = "Warning" +) + +func (i Severity) String() string { + switch { + case i == 69: + return _Severity_name_0 + case i == 87: + return _Severity_name_1 + default: + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go new file mode 100644 index 00000000000..b0f1ecd46c6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go @@ -0,0 +1,30 @@ +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} + +func (e simpleWarning) Source() Source { + // No source information available for a simple warning + return Source{} +} + +func (e simpleWarning) FromExpr() *FromExpr { + // Simple warnings are not expression-related + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go new file mode 100644 index 00000000000..3031168d6a4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go @@ -0,0 +1,35 @@ +package tfdiags + +import ( + "fmt" + "os" + "path/filepath" +) + +type SourceRange struct { + Filename string + Start, End SourcePos +} + +type SourcePos struct { + Line, Column, Byte int +} + +// StartString returns a string representation of the start of the range, +// including the filename and the line and column numbers. +func (r SourceRange) StartString() string { + filename := r.Filename + + // We'll try to relative-ize our filename here so it's less verbose + // in the common case of being in the current working directory. If not, + // we'll just show the full path. + wd, err := os.Getwd() + if err == nil { + relFn, err := filepath.Rel(wd, filename) + if err == nil { + filename = relFn + } + } + + return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go new file mode 100644 index 00000000000..eaa27373db1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go @@ -0,0 +1,13 @@ +package tfdiags + +// Sourceless creates and returns a diagnostic with no source location +// information. This is generally used for operational-type errors that are +// caused by or relate to the environment where Terraform is running rather +// than to the provided configuration. +func Sourceless(severity Severity, summary, detail string) Diagnostic { + return diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go new file mode 100644 index 00000000000..86fd21e41e0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go @@ -0,0 +1,3518 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tfplugin5.proto + +package tfplugin5 + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +var Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", +} + +var Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, +} + +func (x Diagnostic_Severity) String() string { + return proto.EnumName(Diagnostic_Severity_name, int32(x)) +} + +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +var Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", +} + +var Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x)) +} + +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 2, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DynamicValue) Reset() { *m = DynamicValue{} } +func (m *DynamicValue) String() string { return proto.CompactTextString(m) } +func (*DynamicValue) ProtoMessage() {} +func (*DynamicValue) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{0} +} + +func (m *DynamicValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DynamicValue.Unmarshal(m, b) +} +func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) +} +func (m *DynamicValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicValue.Merge(m, src) +} +func (m *DynamicValue) XXX_Size() int { + return xxx_messageInfo_DynamicValue.Size(m) +} +func (m *DynamicValue) XXX_DiscardUnknown() { + xxx_messageInfo_DynamicValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DynamicValue proto.InternalMessageInfo + +func (m *DynamicValue) GetMsgpack() []byte { + if m != nil { + return m.Msgpack + } + return nil +} + +func (m *DynamicValue) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +type Diagnostic struct { + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Diagnostic) Reset() { *m = Diagnostic{} } +func (m *Diagnostic) String() string { return proto.CompactTextString(m) } +func (*Diagnostic) ProtoMessage() {} +func (*Diagnostic) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{1} +} + +func (m *Diagnostic) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Diagnostic.Unmarshal(m, b) +} +func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic) +} +func (m *Diagnostic) XXX_Merge(src proto.Message) { + xxx_messageInfo_Diagnostic.Merge(m, src) +} +func (m *Diagnostic) XXX_Size() int { + return xxx_messageInfo_Diagnostic.Size(m) +} +func (m *Diagnostic) XXX_DiscardUnknown() { + xxx_messageInfo_Diagnostic.DiscardUnknown(m) +} + +var xxx_messageInfo_Diagnostic proto.InternalMessageInfo + +func (m *Diagnostic) GetSeverity() Diagnostic_Severity { + if m != nil { + return m.Severity + } + return Diagnostic_INVALID +} + +func (m *Diagnostic) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Diagnostic) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (m *Diagnostic) GetAttribute() *AttributePath { + if m != nil { + return m.Attribute + } + return nil +} + +type AttributePath struct { + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath) Reset() { *m = AttributePath{} } +func (m *AttributePath) String() string { return proto.CompactTextString(m) } +func (*AttributePath) ProtoMessage() {} +func (*AttributePath) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{2} +} + +func (m *AttributePath) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath.Unmarshal(m, b) +} +func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic) +} +func (m *AttributePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath.Merge(m, src) +} +func (m *AttributePath) XXX_Size() int { + return xxx_messageInfo_AttributePath.Size(m) +} +func (m *AttributePath) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath proto.InternalMessageInfo + +func (m *AttributePath) GetSteps() []*AttributePath_Step { + if m != nil { + return m.Steps + } + return nil +} + +type AttributePath_Step struct { + // Types that are valid to be assigned to Selector: + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} } +func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) } +func (*AttributePath_Step) ProtoMessage() {} +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{2, 0} +} + +func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b) +} +func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic) +} +func (m *AttributePath_Step) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath_Step.Merge(m, src) +} +func (m *AttributePath_Step) XXX_Size() int { + return xxx_messageInfo_AttributePath_Step.Size(m) +} +func (m *AttributePath_Step) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath_Step.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath_Step proto.InternalMessageInfo + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *AttributePath_Step) GetAttributeName() string { + if x, ok := m.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyString() string { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AttributePath_Step) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } +} + +type Stop struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop) Reset() { *m = Stop{} } +func (m *Stop) String() string { return proto.CompactTextString(m) } +func (*Stop) ProtoMessage() {} +func (*Stop) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3} +} + +func (m *Stop) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop.Unmarshal(m, b) +} +func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop.Marshal(b, m, deterministic) +} +func (m *Stop) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop.Merge(m, src) +} +func (m *Stop) XXX_Size() int { + return xxx_messageInfo_Stop.Size(m) +} +func (m *Stop) XXX_DiscardUnknown() { + xxx_messageInfo_Stop.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop proto.InternalMessageInfo + +type Stop_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Request) Reset() { *m = Stop_Request{} } +func (m *Stop_Request) String() string { return proto.CompactTextString(m) } +func (*Stop_Request) ProtoMessage() {} +func (*Stop_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3, 0} +} + +func (m *Stop_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Request.Unmarshal(m, b) +} +func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic) +} +func (m *Stop_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Request.Merge(m, src) +} +func (m *Stop_Request) XXX_Size() int { + return xxx_messageInfo_Stop_Request.Size(m) +} +func (m *Stop_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Request proto.InternalMessageInfo + +type Stop_Response struct { + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Response) Reset() { *m = Stop_Response{} } +func (m *Stop_Response) String() string { return proto.CompactTextString(m) } +func (*Stop_Response) ProtoMessage() {} +func (*Stop_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3, 1} +} + +func (m *Stop_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Response.Unmarshal(m, b) +} +func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic) +} +func (m *Stop_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Response.Merge(m, src) +} +func (m *Stop_Response) XXX_Size() int { + return xxx_messageInfo_Stop_Response.Size(m) +} +func (m *Stop_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Response proto.InternalMessageInfo + +func (m *Stop_Response) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawState) Reset() { *m = RawState{} } +func (m *RawState) String() string { return proto.CompactTextString(m) } +func (*RawState) ProtoMessage() {} +func (*RawState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{4} +} + +func (m *RawState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RawState.Unmarshal(m, b) +} +func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RawState.Marshal(b, m, deterministic) +} +func (m *RawState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawState.Merge(m, src) +} +func (m *RawState) XXX_Size() int { + return xxx_messageInfo_RawState.Size(m) +} +func (m *RawState) XXX_DiscardUnknown() { + xxx_messageInfo_RawState.DiscardUnknown(m) +} + +var xxx_messageInfo_RawState proto.InternalMessageInfo + +func (m *RawState) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +func (m *RawState) GetFlatmap() map[string]string { + if m != nil { + return m.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +type Schema struct { + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5} +} + +func (m *Schema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema.Unmarshal(m, b) +} +func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema.Marshal(b, m, deterministic) +} +func (m *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(m, src) +} +func (m *Schema) XXX_Size() int { + return xxx_messageInfo_Schema.Size(m) +} +func (m *Schema) XXX_DiscardUnknown() { + xxx_messageInfo_Schema.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema proto.InternalMessageInfo + +func (m *Schema) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +type Schema_Block struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Block) Reset() { *m = Schema_Block{} } +func (m *Schema_Block) String() string { return proto.CompactTextString(m) } +func (*Schema_Block) ProtoMessage() {} +func (*Schema_Block) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 0} +} + +func (m *Schema_Block) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Block.Unmarshal(m, b) +} +func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic) +} +func (m *Schema_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Block.Merge(m, src) +} +func (m *Schema_Block) XXX_Size() int { + return xxx_messageInfo_Schema_Block.Size(m) +} +func (m *Schema_Block) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Block proto.InternalMessageInfo + +func (m *Schema_Block) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema_Block) GetAttributes() []*Schema_Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if m != nil { + return m.BlockTypes + } + return nil +} + +type Schema_Attribute struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} } +func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) } +func (*Schema_Attribute) ProtoMessage() {} +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 1} +} + +func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b) +} +func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic) +} +func (m *Schema_Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Attribute.Merge(m, src) +} +func (m *Schema_Attribute) XXX_Size() int { + return xxx_messageInfo_Schema_Attribute.Size(m) +} +func (m *Schema_Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Attribute proto.InternalMessageInfo + +func (m *Schema_Attribute) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Schema_Attribute) GetType() []byte { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema_Attribute) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema_Attribute) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *Schema_Attribute) GetOptional() bool { + if m != nil { + return m.Optional + } + return false +} + +func (m *Schema_Attribute) GetComputed() bool { + if m != nil { + return m.Computed + } + return false +} + +func (m *Schema_Attribute) GetSensitive() bool { + if m != nil { + return m.Sensitive + } + return false +} + +type Schema_NestedBlock struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} } +func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) } +func (*Schema_NestedBlock) ProtoMessage() {} +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 2} +} + +func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b) +} +func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic) +} +func (m *Schema_NestedBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_NestedBlock.Merge(m, src) +} +func (m *Schema_NestedBlock) XXX_Size() int { + return xxx_messageInfo_Schema_NestedBlock.Size(m) +} +func (m *Schema_NestedBlock) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_NestedBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_NestedBlock proto.InternalMessageInfo + +func (m *Schema_NestedBlock) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *Schema_NestedBlock) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +func (m *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if m != nil { + return m.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (m *Schema_NestedBlock) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema_NestedBlock) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +type GetProviderSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} } +func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema) ProtoMessage() {} +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6} +} + +func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b) +} +func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema.Merge(m, src) +} +func (m *GetProviderSchema) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema.Size(m) +} +func (m *GetProviderSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema proto.InternalMessageInfo + +type GetProviderSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Request{} } +func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Request) ProtoMessage() {} +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6, 0} +} + +func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b) +} +func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Request.Merge(m, src) +} +func (m *GetProviderSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Request.Size(m) +} +func (m *GetProviderSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Request proto.InternalMessageInfo + +type GetProviderSchema_Response struct { + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Response{} } +func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Response) ProtoMessage() {} +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6, 1} +} + +func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b) +} +func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Response.Merge(m, src) +} +func (m *GetProviderSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Response.Size(m) +} +func (m *GetProviderSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Response proto.InternalMessageInfo + +func (m *GetProviderSchema_Response) GetProvider() *Schema { + if m != nil { + return m.Provider + } + return nil +} + +func (m *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if m != nil { + return m.ResourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if m != nil { + return m.DataSourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type PrepareProviderConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} } +func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig) ProtoMessage() {} +func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7} +} + +func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b) +} +func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig.Merge(m, src) +} +func (m *PrepareProviderConfig) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig.Size(m) +} +func (m *PrepareProviderConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig proto.InternalMessageInfo + +type PrepareProviderConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderConfig_Request{} } +func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Request) ProtoMessage() {} +func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7, 0} +} + +func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Request.Merge(m, src) +} +func (m *PrepareProviderConfig_Request) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Request.Size(m) +} +func (m *PrepareProviderConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Request proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type PrepareProviderConfig_Response struct { + PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderConfig_Response{} } +func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Response) ProtoMessage() {} +func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7, 1} +} + +func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Response.Merge(m, src) +} +func (m *PrepareProviderConfig_Response) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Response.Size(m) +} +func (m *PrepareProviderConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Response proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { + if m != nil { + return m.PreparedConfig + } + return nil +} + +func (m *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type UpgradeResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} } +func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState) ProtoMessage() {} +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8} +} + +func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b) +} +func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState.Merge(m, src) +} +func (m *UpgradeResourceState) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState.Size(m) +} +func (m *UpgradeResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState proto.InternalMessageInfo + +type UpgradeResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceState_Request{} } +func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Request) ProtoMessage() {} +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8, 0} +} + +func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Request.Merge(m, src) +} +func (m *UpgradeResourceState_Request) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Request.Size(m) +} +func (m *UpgradeResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Request proto.InternalMessageInfo + +func (m *UpgradeResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *UpgradeResourceState_Request) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *UpgradeResourceState_Request) GetRawState() *RawState { + if m != nil { + return m.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceState_Response{} } +func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Response) ProtoMessage() {} +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8, 1} +} + +func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Response.Merge(m, src) +} +func (m *UpgradeResourceState_Response) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Response.Size(m) +} +func (m *UpgradeResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Response proto.InternalMessageInfo + +func (m *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if m != nil { + return m.UpgradedState + } + return nil +} + +func (m *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateResourceTypeConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceTypeConfig{} } +func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig) ProtoMessage() {} +func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9} +} + +func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig.Merge(m, src) +} +func (m *ValidateResourceTypeConfig) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig.Size(m) +} +func (m *ValidateResourceTypeConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig proto.InternalMessageInfo + +type ValidateResourceTypeConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateResourceTypeConfig_Request{} } +func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9, 0} +} + +func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(m, src) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m) +} +func (m *ValidateResourceTypeConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Request proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateResourceTypeConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateResourceTypeConfig_Response{} } +func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9, 1} +} + +func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(m, src) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m) +} +func (m *ValidateResourceTypeConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Response proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateDataSourceConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConfig{} } +func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig) ProtoMessage() {} +func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10} +} + +func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig.Merge(m, src) +} +func (m *ValidateDataSourceConfig) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig.Size(m) +} +func (m *ValidateDataSourceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig proto.InternalMessageInfo + +type ValidateDataSourceConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSourceConfig_Request{} } +func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Request) ProtoMessage() {} +func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10, 0} +} + +func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(m, src) +} +func (m *ValidateDataSourceConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m) +} +func (m *ValidateDataSourceConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Request proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateDataSourceConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataSourceConfig_Response{} } +func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Response) ProtoMessage() {} +func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10, 1} +} + +func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(m, src) +} +func (m *ValidateDataSourceConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m) +} +func (m *ValidateDataSourceConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Response proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type Configure struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure) Reset() { *m = Configure{} } +func (m *Configure) String() string { return proto.CompactTextString(m) } +func (*Configure) ProtoMessage() {} +func (*Configure) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11} +} + +func (m *Configure) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure.Unmarshal(m, b) +} +func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure.Marshal(b, m, deterministic) +} +func (m *Configure) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure.Merge(m, src) +} +func (m *Configure) XXX_Size() int { + return xxx_messageInfo_Configure.Size(m) +} +func (m *Configure) XXX_DiscardUnknown() { + xxx_messageInfo_Configure.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure proto.InternalMessageInfo + +type Configure_Request struct { + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Request) Reset() { *m = Configure_Request{} } +func (m *Configure_Request) String() string { return proto.CompactTextString(m) } +func (*Configure_Request) ProtoMessage() {} +func (*Configure_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11, 0} +} + +func (m *Configure_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Request.Unmarshal(m, b) +} +func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic) +} +func (m *Configure_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Request.Merge(m, src) +} +func (m *Configure_Request) XXX_Size() int { + return xxx_messageInfo_Configure_Request.Size(m) +} +func (m *Configure_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Request proto.InternalMessageInfo + +func (m *Configure_Request) GetTerraformVersion() string { + if m != nil { + return m.TerraformVersion + } + return "" +} + +func (m *Configure_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type Configure_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Response) Reset() { *m = Configure_Response{} } +func (m *Configure_Response) String() string { return proto.CompactTextString(m) } +func (*Configure_Response) ProtoMessage() {} +func (*Configure_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11, 1} +} + +func (m *Configure_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Response.Unmarshal(m, b) +} +func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic) +} +func (m *Configure_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Response.Merge(m, src) +} +func (m *Configure_Response) XXX_Size() int { + return xxx_messageInfo_Configure_Response.Size(m) +} +func (m *Configure_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Response proto.InternalMessageInfo + +func (m *Configure_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource) Reset() { *m = ReadResource{} } +func (m *ReadResource) String() string { return proto.CompactTextString(m) } +func (*ReadResource) ProtoMessage() {} +func (*ReadResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12} +} + +func (m *ReadResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource.Unmarshal(m, b) +} +func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic) +} +func (m *ReadResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource.Merge(m, src) +} +func (m *ReadResource) XXX_Size() int { + return xxx_messageInfo_ReadResource.Size(m) +} +func (m *ReadResource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource proto.InternalMessageInfo + +type ReadResource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} } +func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Request) ProtoMessage() {} +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12, 0} +} + +func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b) +} +func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic) +} +func (m *ReadResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Request.Merge(m, src) +} +func (m *ReadResource_Request) XXX_Size() int { + return xxx_messageInfo_ReadResource_Request.Size(m) +} +func (m *ReadResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Request proto.InternalMessageInfo + +func (m *ReadResource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadResource_Request) GetCurrentState() *DynamicValue { + if m != nil { + return m.CurrentState + } + return nil +} + +func (m *ReadResource_Request) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type ReadResource_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} } +func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Response) ProtoMessage() {} +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12, 1} +} + +func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b) +} +func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic) +} +func (m *ReadResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Response.Merge(m, src) +} +func (m *ReadResource_Response) XXX_Size() int { + return xxx_messageInfo_ReadResource_Response.Size(m) +} +func (m *ReadResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Response proto.InternalMessageInfo + +func (m *ReadResource_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *ReadResource_Response) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type PlanResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} } +func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange) ProtoMessage() {} +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13} +} + +func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b) +} +func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange.Merge(m, src) +} +func (m *PlanResourceChange) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange.Size(m) +} +func (m *PlanResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange proto.InternalMessageInfo + +type PlanResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_Request{} } +func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Request) ProtoMessage() {} +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13, 0} +} + +func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b) +} +func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Request.Merge(m, src) +} +func (m *PlanResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Request.Size(m) +} +func (m *PlanResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Request proto.InternalMessageInfo + +func (m *PlanResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if m != nil { + return m.ProposedNewState + } + return nil +} + +func (m *PlanResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *PlanResourceChange_Request) GetPriorPrivate() []byte { + if m != nil { + return m.PriorPrivate + } + return nil +} + +type PlanResourceChange_Response struct { + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_Response{} } +func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Response) ProtoMessage() {} +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13, 1} +} + +func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b) +} +func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Response.Merge(m, src) +} +func (m *PlanResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Response.Size(m) +} +func (m *PlanResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Response proto.InternalMessageInfo + +func (m *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if m != nil { + return m.RequiresReplace + } + return nil +} + +func (m *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +func (m *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} } +func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange) ProtoMessage() {} +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14} +} + +func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b) +} +func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange.Merge(m, src) +} +func (m *ApplyResourceChange) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange.Size(m) +} +func (m *ApplyResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange proto.InternalMessageInfo + +type ApplyResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange_Request{} } +func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Request) ProtoMessage() {} +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14, 0} +} + +func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Request.Merge(m, src) +} +func (m *ApplyResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Request.Size(m) +} +func (m *ApplyResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Request proto.InternalMessageInfo + +func (m *ApplyResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +type ApplyResourceChange_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChange_Response{} } +func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Response) ProtoMessage() {} +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14, 1} +} + +func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Response.Merge(m, src) +} +func (m *ApplyResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Response.Size(m) +} +func (m *ApplyResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Response proto.InternalMessageInfo + +func (m *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ApplyResourceChange_Response) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +func (m *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ImportResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState) Reset() { *m = ImportResourceState{} } +func (m *ImportResourceState) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState) ProtoMessage() {} +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15} +} + +func (m *ImportResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState.Unmarshal(m, b) +} +func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic) +} +func (m *ImportResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState.Merge(m, src) +} +func (m *ImportResourceState) XXX_Size() int { + return xxx_messageInfo_ImportResourceState.Size(m) +} +func (m *ImportResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState proto.InternalMessageInfo + +type ImportResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState_Request{} } +func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Request) ProtoMessage() {} +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 0} +} + +func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b) +} +func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Request.Merge(m, src) +} +func (m *ImportResourceState_Request) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Request.Size(m) +} +func (m *ImportResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Request proto.InternalMessageInfo + +func (m *ImportResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_Request) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportResourceState_ImportedResource{} } +func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_ImportedResource) ProtoMessage() {} +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 1} +} + +func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b) +} +func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_ImportedResource.Merge(m, src) +} +func (m *ImportResourceState_ImportedResource) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m) +} +func (m *ImportResourceState_ImportedResource) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_ImportedResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_ImportedResource proto.InternalMessageInfo + +func (m *ImportResourceState_ImportedResource) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ImportResourceState_ImportedResource) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type ImportResourceState_Response struct { + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Response) Reset() { *m = ImportResourceState_Response{} } +func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Response) ProtoMessage() {} +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 2} +} + +func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b) +} +func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Response.Merge(m, src) +} +func (m *ImportResourceState_Response) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Response.Size(m) +} +func (m *ImportResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Response proto.InternalMessageInfo + +func (m *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if m != nil { + return m.ImportedResources + } + return nil +} + +func (m *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadDataSource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource) Reset() { *m = ReadDataSource{} } +func (m *ReadDataSource) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource) ProtoMessage() {} +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16} +} + +func (m *ReadDataSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource.Unmarshal(m, b) +} +func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic) +} +func (m *ReadDataSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource.Merge(m, src) +} +func (m *ReadDataSource) XXX_Size() int { + return xxx_messageInfo_ReadDataSource.Size(m) +} +func (m *ReadDataSource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource proto.InternalMessageInfo + +type ReadDataSource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} } +func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Request) ProtoMessage() {} +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16, 0} +} + +func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b) +} +func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic) +} +func (m *ReadDataSource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Request.Merge(m, src) +} +func (m *ReadDataSource_Request) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Request.Size(m) +} +func (m *ReadDataSource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Request proto.InternalMessageInfo + +func (m *ReadDataSource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadDataSource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ReadDataSource_Response struct { + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response{} } +func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Response) ProtoMessage() {} +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16, 1} +} + +func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b) +} +func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic) +} +func (m *ReadDataSource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Response.Merge(m, src) +} +func (m *ReadDataSource_Response) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Response.Size(m) +} +func (m *ReadDataSource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Response proto.InternalMessageInfo + +func (m *ReadDataSource_Response) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type GetProvisionerSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} } +func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema) ProtoMessage() {} +func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17} +} + +func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b) +} +func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema.Merge(m, src) +} +func (m *GetProvisionerSchema) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema.Size(m) +} +func (m *GetProvisionerSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema proto.InternalMessageInfo + +type GetProvisionerSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSchema_Request{} } +func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Request) ProtoMessage() {} +func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17, 0} +} + +func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Request.Merge(m, src) +} +func (m *GetProvisionerSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Request.Size(m) +} +func (m *GetProvisionerSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Request proto.InternalMessageInfo + +type GetProvisionerSchema_Response struct { + Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSchema_Response{} } +func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Response) ProtoMessage() {} +func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17, 1} +} + +func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Response.Merge(m, src) +} +func (m *GetProvisionerSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Response.Size(m) +} +func (m *GetProvisionerSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Response proto.InternalMessageInfo + +func (m *GetProvisionerSchema_Response) GetProvisioner() *Schema { + if m != nil { + return m.Provisioner + } + return nil +} + +func (m *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateProvisionerConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerConfig{} } +func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig) ProtoMessage() {} +func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18} +} + +func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig.Merge(m, src) +} +func (m *ValidateProvisionerConfig) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig.Size(m) +} +func (m *ValidateProvisionerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig proto.InternalMessageInfo + +type ValidateProvisionerConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvisionerConfig_Request{} } +func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Request) ProtoMessage() {} +func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18, 0} +} + +func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(m, src) +} +func (m *ValidateProvisionerConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m) +} +func (m *ValidateProvisionerConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Request proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateProvisionerConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProvisionerConfig_Response{} } +func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Response) ProtoMessage() {} +func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18, 1} +} + +func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(m, src) +} +func (m *ValidateProvisionerConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m) +} +func (m *ValidateProvisionerConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Response proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ProvisionResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource) Reset() { *m = ProvisionResource{} } +func (m *ProvisionResource) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource) ProtoMessage() {} +func (*ProvisionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19} +} + +func (m *ProvisionResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource.Unmarshal(m, b) +} +func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic) +} +func (m *ProvisionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource.Merge(m, src) +} +func (m *ProvisionResource) XXX_Size() int { + return xxx_messageInfo_ProvisionResource.Size(m) +} +func (m *ProvisionResource) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource proto.InternalMessageInfo + +type ProvisionResource_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Request{} } +func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Request) ProtoMessage() {} +func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19, 0} +} + +func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b) +} +func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic) +} +func (m *ProvisionResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Request.Merge(m, src) +} +func (m *ProvisionResource_Request) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Request.Size(m) +} +func (m *ProvisionResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Request proto.InternalMessageInfo + +func (m *ProvisionResource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ProvisionResource_Request) GetConnection() *DynamicValue { + if m != nil { + return m.Connection + } + return nil +} + +type ProvisionResource_Response struct { + Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Response{} } +func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Response) ProtoMessage() {} +func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19, 1} +} + +func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b) +} +func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic) +} +func (m *ProvisionResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Response.Merge(m, src) +} +func (m *ProvisionResource_Response) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Response.Size(m) +} +func (m *ProvisionResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Response proto.InternalMessageInfo + +func (m *ProvisionResource_Response) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func init() { + proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) + proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) + proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue") + proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic") + proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath") + proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step") + proto.RegisterType((*Stop)(nil), "tfplugin5.Stop") + proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request") + proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response") + proto.RegisterType((*RawState)(nil), "tfplugin5.RawState") + proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry") + proto.RegisterType((*Schema)(nil), "tfplugin5.Schema") + proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block") + proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute") + proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock") + proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema") + proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request") + proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry") + proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig") + proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request") + proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response") + proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState") + proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request") + proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response") + proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig") + proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request") + proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response") + proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig") + proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request") + proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response") + proto.RegisterType((*Configure)(nil), "tfplugin5.Configure") + proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request") + proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response") + proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource") + proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request") + proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response") + proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange") + proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request") + proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response") + proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange") + proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request") + proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response") + proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState") + proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request") + proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource") + proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response") + proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource") + proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request") + proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response") + proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema") + proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request") + proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response") + proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig") + proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request") + proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response") + proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource") + proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request") + proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response") +} + +func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_17ae6090ff270234) } + +var fileDescriptor_17ae6090ff270234 = []byte{ + // 1880 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x23, 0x49, + 0x19, 0x9f, 0xf6, 0x23, 0xb1, 0x3f, 0xe7, 0xe1, 0xd4, 0xcc, 0x0e, 0xa6, 0x77, 0x17, 0x82, 0x79, + 0x24, 0xab, 0xdd, 0xf1, 0xac, 0x32, 0xb0, 0xbb, 0x84, 0xd1, 0x8a, 0x6c, 0x26, 0x64, 0x22, 0x66, + 0xb2, 0xa1, 0x3c, 0x0f, 0x24, 0xa4, 0xb5, 0x6a, 0xdc, 0x15, 0x4f, 0x33, 0x76, 0x77, 0x6f, 0x75, + 0x39, 0x89, 0x85, 0xc4, 0x05, 0xc1, 0x19, 0x09, 0xf1, 0x90, 0x78, 0x5c, 0x40, 0xe2, 0x1f, 0xe0, + 0x00, 0xdc, 0x38, 0xf1, 0x0f, 0x70, 0x03, 0x4e, 0x08, 0x6e, 0x9c, 0xe1, 0x82, 0x84, 0xea, 0xd5, + 0x5d, 0xb6, 0xdb, 0x4e, 0x4f, 0xb2, 0x23, 0xc4, 0xad, 0xab, 0xbe, 0x5f, 0x7d, 0xdf, 0x57, 0xdf, + 0xab, 0xbe, 0xcf, 0x86, 0x55, 0x7e, 0x1c, 0xf5, 0x87, 0x3d, 0x3f, 0xf8, 0x42, 0x2b, 0x62, 0x21, + 0x0f, 0x51, 0x35, 0xd9, 0x68, 0xde, 0x86, 0xa5, 0x3b, 0xa3, 0x80, 0x0c, 0xfc, 0xee, 0x23, 0xd2, + 0x1f, 0x52, 0xd4, 0x80, 0xc5, 0x41, 0xdc, 0x8b, 0x48, 0xf7, 0x59, 0xc3, 0x59, 0x77, 0x36, 0x97, + 0xb0, 0x59, 0x22, 0x04, 0xa5, 0x6f, 0xc6, 0x61, 0xd0, 0x28, 0xc8, 0x6d, 0xf9, 0xdd, 0xfc, 0x9b, + 0x03, 0x70, 0xc7, 0x27, 0xbd, 0x20, 0x8c, 0xb9, 0xdf, 0x45, 0xdb, 0x50, 0x89, 0xe9, 0x09, 0x65, + 0x3e, 0x1f, 0xc9, 0xd3, 0x2b, 0x5b, 0x9f, 0x68, 0xa5, 0xb2, 0x53, 0x60, 0xab, 0xad, 0x51, 0x38, + 0xc1, 0x0b, 0xc1, 0xf1, 0x70, 0x30, 0x20, 0x6c, 0x24, 0x25, 0x54, 0xb1, 0x59, 0xa2, 0xeb, 0xb0, + 0xe0, 0x51, 0x4e, 0xfc, 0x7e, 0xa3, 0x28, 0x09, 0x7a, 0x85, 0xde, 0x82, 0x2a, 0xe1, 0x9c, 0xf9, + 0x4f, 0x86, 0x9c, 0x36, 0x4a, 0xeb, 0xce, 0x66, 0x6d, 0xab, 0x61, 0x89, 0xdb, 0x31, 0xb4, 0x23, + 0xc2, 0x9f, 0xe2, 0x14, 0xda, 0xbc, 0x09, 0x15, 0x23, 0x1f, 0xd5, 0x60, 0xf1, 0xe0, 0xf0, 0xd1, + 0xce, 0xbd, 0x83, 0x3b, 0xf5, 0x2b, 0xa8, 0x0a, 0xe5, 0x3d, 0x8c, 0xdf, 0xc7, 0x75, 0x47, 0xec, + 0x3f, 0xde, 0xc1, 0x87, 0x07, 0x87, 0xfb, 0xf5, 0x42, 0xf3, 0x2f, 0x0e, 0x2c, 0x8f, 0x71, 0x43, + 0xb7, 0xa0, 0x1c, 0x73, 0x1a, 0xc5, 0x0d, 0x67, 0xbd, 0xb8, 0x59, 0xdb, 0x7a, 0x75, 0x96, 0xd8, + 0x56, 0x9b, 0xd3, 0x08, 0x2b, 0xac, 0xfb, 0x43, 0x07, 0x4a, 0x62, 0x8d, 0x36, 0x60, 0x25, 0xd1, + 0xa6, 0x13, 0x90, 0x01, 0x95, 0xc6, 0xaa, 0xde, 0xbd, 0x82, 0x97, 0x93, 0xfd, 0x43, 0x32, 0xa0, + 0xa8, 0x05, 0x88, 0xf6, 0xe9, 0x80, 0x06, 0xbc, 0xf3, 0x8c, 0x8e, 0x3a, 0x31, 0x67, 0x7e, 0xd0, + 0x53, 0xe6, 0xb9, 0x7b, 0x05, 0xd7, 0x35, 0xed, 0xab, 0x74, 0xd4, 0x96, 0x14, 0xb4, 0x09, 0xab, + 0x36, 0xde, 0x0f, 0xb8, 0x34, 0x59, 0x51, 0x70, 0x4e, 0xc1, 0x07, 0x01, 0x7f, 0x0f, 0x84, 0xa7, + 0xfa, 0xb4, 0xcb, 0x43, 0xd6, 0xbc, 0x25, 0xd4, 0x0a, 0x23, 0xb7, 0x0a, 0x8b, 0x98, 0x7e, 0x38, + 0xa4, 0x31, 0x77, 0xd7, 0xa1, 0x82, 0x69, 0x1c, 0x85, 0x41, 0x4c, 0xd1, 0x35, 0x28, 0xef, 0x31, + 0x16, 0x32, 0xa5, 0x24, 0x56, 0x8b, 0xe6, 0x8f, 0x1c, 0xa8, 0x60, 0x72, 0xda, 0xe6, 0x84, 0xd3, + 0x24, 0x34, 0x9c, 0x34, 0x34, 0xd0, 0x36, 0x2c, 0x1e, 0xf7, 0x09, 0x1f, 0x90, 0xa8, 0x51, 0x90, + 0x46, 0x5a, 0xb7, 0x8c, 0x64, 0x4e, 0xb6, 0xbe, 0xa2, 0x20, 0x7b, 0x01, 0x67, 0x23, 0x6c, 0x0e, + 0xb8, 0xdb, 0xb0, 0x64, 0x13, 0x50, 0x1d, 0x8a, 0xcf, 0xe8, 0x48, 0x2b, 0x20, 0x3e, 0x85, 0x52, + 0x27, 0x22, 0x5e, 0x75, 0xac, 0xa8, 0xc5, 0x76, 0xe1, 0x1d, 0xa7, 0xf9, 0x8f, 0x32, 0x2c, 0xb4, + 0xbb, 0x4f, 0xe9, 0x80, 0x88, 0x90, 0x3a, 0xa1, 0x2c, 0xf6, 0xb5, 0x66, 0x45, 0x6c, 0x96, 0xe8, + 0x06, 0x94, 0x9f, 0xf4, 0xc3, 0xee, 0x33, 0x79, 0xbc, 0xb6, 0xf5, 0x31, 0x4b, 0x35, 0x75, 0xb6, + 0xf5, 0x9e, 0x20, 0x63, 0x85, 0x72, 0x7f, 0xe1, 0x40, 0x59, 0x6e, 0xcc, 0x61, 0xf9, 0x25, 0x80, + 0xc4, 0x79, 0xb1, 0xbe, 0xf2, 0xcb, 0xd3, 0x7c, 0x93, 0xf0, 0xc0, 0x16, 0x1c, 0xbd, 0x0b, 0x35, + 0x29, 0xa9, 0xc3, 0x47, 0x11, 0x8d, 0x1b, 0xc5, 0xa9, 0xa8, 0xd2, 0xa7, 0x0f, 0x69, 0xcc, 0xa9, + 0xa7, 0x74, 0x03, 0x79, 0xe2, 0x81, 0x38, 0xe0, 0xfe, 0xd1, 0x81, 0x6a, 0xc2, 0x59, 0xb8, 0x23, + 0x8d, 0x2a, 0x2c, 0xbf, 0xc5, 0x9e, 0xe0, 0x6d, 0xb2, 0x57, 0x7c, 0xa3, 0x75, 0xa8, 0x79, 0x34, + 0xee, 0x32, 0x3f, 0xe2, 0xe2, 0x42, 0x2a, 0xbb, 0xec, 0x2d, 0xe4, 0x42, 0x85, 0xd1, 0x0f, 0x87, + 0x3e, 0xa3, 0x9e, 0xcc, 0xb0, 0x0a, 0x4e, 0xd6, 0x82, 0x16, 0x4a, 0x14, 0xe9, 0x37, 0xca, 0x8a, + 0x66, 0xd6, 0x82, 0xd6, 0x0d, 0x07, 0xd1, 0x90, 0x53, 0xaf, 0xb1, 0xa0, 0x68, 0x66, 0x8d, 0x5e, + 0x81, 0x6a, 0x4c, 0x83, 0xd8, 0xe7, 0xfe, 0x09, 0x6d, 0x2c, 0x4a, 0x62, 0xba, 0xe1, 0xfe, 0xba, + 0x00, 0x35, 0xeb, 0x96, 0xe8, 0x65, 0xa8, 0x0a, 0x5d, 0xad, 0x34, 0xc1, 0x15, 0xb1, 0x21, 0xf3, + 0xe3, 0xf9, 0xdc, 0x88, 0x76, 0x61, 0x31, 0xa0, 0x31, 0x17, 0x39, 0x54, 0x94, 0xd5, 0xe9, 0xb5, + 0xb9, 0x16, 0x96, 0xdf, 0x7e, 0xd0, 0xbb, 0x1f, 0x7a, 0x14, 0x9b, 0x93, 0x42, 0xa1, 0x81, 0x1f, + 0x74, 0x7c, 0x4e, 0x07, 0xb1, 0xb4, 0x49, 0x11, 0x57, 0x06, 0x7e, 0x70, 0x20, 0xd6, 0x92, 0x48, + 0xce, 0x34, 0xb1, 0xac, 0x89, 0xe4, 0x4c, 0x12, 0x9b, 0xf7, 0xd5, 0xcd, 0x34, 0xc7, 0xf1, 0xd2, + 0x03, 0xb0, 0xd0, 0x3e, 0x38, 0xdc, 0xbf, 0xb7, 0x57, 0x77, 0x50, 0x05, 0x4a, 0xf7, 0x0e, 0xda, + 0x0f, 0xea, 0x05, 0xb4, 0x08, 0xc5, 0xf6, 0xde, 0x83, 0x7a, 0x51, 0x7c, 0xdc, 0xdf, 0x39, 0xaa, + 0x97, 0x44, 0x89, 0xda, 0xc7, 0xef, 0x3f, 0x3c, 0xaa, 0x97, 0x9b, 0x3f, 0x29, 0xc1, 0xda, 0x3e, + 0xe5, 0x47, 0x2c, 0x3c, 0xf1, 0x3d, 0xca, 0x94, 0xfe, 0x76, 0x12, 0xff, 0xab, 0x68, 0x65, 0xf1, + 0x0d, 0xa8, 0x44, 0x1a, 0x29, 0xcd, 0x58, 0xdb, 0x5a, 0x9b, 0xba, 0x3c, 0x4e, 0x20, 0x88, 0x42, + 0x9d, 0xd1, 0x38, 0x1c, 0xb2, 0x2e, 0xed, 0xc4, 0x92, 0x68, 0x62, 0x7a, 0xdb, 0x3a, 0x36, 0x25, + 0xbe, 0x65, 0xe4, 0x89, 0x0f, 0x79, 0x5a, 0xed, 0xc7, 0x2a, 0xc1, 0x57, 0xd9, 0xf8, 0x2e, 0xea, + 0xc3, 0x55, 0x8f, 0x70, 0xd2, 0x99, 0x90, 0xa4, 0xe2, 0xff, 0x76, 0x3e, 0x49, 0x77, 0x08, 0x27, + 0xed, 0x69, 0x59, 0x6b, 0xde, 0xe4, 0x3e, 0x7a, 0x1b, 0x6a, 0x5e, 0xf2, 0x06, 0x09, 0xe7, 0x09, + 0x29, 0x2f, 0x65, 0xbe, 0x50, 0xd8, 0x46, 0xba, 0x0f, 0xe1, 0x5a, 0xd6, 0x7d, 0x32, 0xea, 0xd2, + 0x86, 0x5d, 0x97, 0x32, 0x6d, 0x9c, 0x96, 0x2a, 0xf7, 0x31, 0x5c, 0xcf, 0x56, 0xfe, 0x92, 0x8c, + 0x9b, 0x7f, 0x76, 0xe0, 0xa5, 0x23, 0x46, 0x23, 0xc2, 0xa8, 0xb1, 0xda, 0x6e, 0x18, 0x1c, 0xfb, + 0x3d, 0x77, 0x3b, 0x09, 0x0f, 0x74, 0x13, 0x16, 0xba, 0x72, 0x53, 0xc7, 0x83, 0x9d, 0x3d, 0x76, + 0x4b, 0x80, 0x35, 0xcc, 0xfd, 0xae, 0x63, 0xc5, 0xd3, 0x97, 0x61, 0x35, 0x52, 0x12, 0xbc, 0x4e, + 0x3e, 0x36, 0x2b, 0x06, 0xaf, 0x54, 0x99, 0xf4, 0x46, 0x21, 0xaf, 0x37, 0x9a, 0xdf, 0x2f, 0xc0, + 0xb5, 0x87, 0x51, 0x8f, 0x11, 0x8f, 0x26, 0x5e, 0x11, 0x8f, 0x89, 0xcb, 0xd2, 0xcb, 0xcd, 0x2d, + 0x1b, 0x56, 0x11, 0x2f, 0x8c, 0x17, 0xf1, 0x37, 0xa1, 0xca, 0xc8, 0x69, 0x27, 0x16, 0xec, 0x64, + 0x8d, 0xa8, 0x6d, 0x5d, 0xcd, 0x78, 0xb6, 0x70, 0x85, 0xe9, 0x2f, 0xf7, 0x3b, 0xb6, 0x51, 0xde, + 0x85, 0x95, 0xa1, 0x52, 0xcc, 0xd3, 0x3c, 0xce, 0xb1, 0xc9, 0xb2, 0x81, 0xab, 0x77, 0xf4, 0xc2, + 0x26, 0xf9, 0xbd, 0x03, 0xee, 0x23, 0xd2, 0xf7, 0x3d, 0xa1, 0x9c, 0xb6, 0x89, 0x78, 0x19, 0xb4, + 0xd7, 0x1f, 0xe7, 0x34, 0x4c, 0x1a, 0x12, 0x85, 0x7c, 0x21, 0xb1, 0x6b, 0x5d, 0x7e, 0x42, 0x79, + 0x27, 0xb7, 0xf2, 0xbf, 0x75, 0xa0, 0x61, 0x94, 0x4f, 0xf3, 0xe1, 0xff, 0x42, 0xf5, 0xdf, 0x39, + 0x50, 0x55, 0x8a, 0x0e, 0x19, 0x75, 0x7b, 0xa9, 0xae, 0xaf, 0xc3, 0x1a, 0xa7, 0x8c, 0x91, 0xe3, + 0x90, 0x0d, 0x3a, 0x76, 0xc7, 0x50, 0xc5, 0xf5, 0x84, 0xf0, 0x48, 0x47, 0xdd, 0xff, 0x46, 0xf7, + 0x5f, 0x15, 0x60, 0x09, 0x53, 0xe2, 0x99, 0x78, 0x71, 0xbf, 0x9d, 0xd3, 0xd4, 0xb7, 0x61, 0xb9, + 0x3b, 0x64, 0x4c, 0x74, 0x99, 0x2a, 0xc8, 0xcf, 0xd1, 0x7a, 0x49, 0xa3, 0x55, 0x8c, 0x37, 0x60, + 0x31, 0x62, 0xfe, 0x89, 0x49, 0xb0, 0x25, 0x6c, 0x96, 0xee, 0x0f, 0xec, 0x54, 0xfa, 0x3c, 0x54, + 0x03, 0x7a, 0x9a, 0x2f, 0x8b, 0x2a, 0x01, 0x3d, 0xbd, 0x5c, 0x02, 0xcd, 0xd6, 0xaa, 0xf9, 0x9b, + 0x12, 0xa0, 0xa3, 0x3e, 0x09, 0x8c, 0x99, 0x76, 0x9f, 0x92, 0xa0, 0x47, 0xdd, 0xff, 0x38, 0x39, + 0xad, 0xf5, 0x0e, 0xd4, 0x22, 0xe6, 0x87, 0x2c, 0x9f, 0xad, 0x40, 0x62, 0xd5, 0x65, 0xf6, 0x00, + 0x45, 0x2c, 0x8c, 0xc2, 0x98, 0x7a, 0x9d, 0xd4, 0x16, 0xc5, 0xf9, 0x0c, 0xea, 0xe6, 0xc8, 0xa1, + 0xb1, 0x49, 0x1a, 0x5d, 0xa5, 0x5c, 0xd1, 0x85, 0x3e, 0x0d, 0xcb, 0x4a, 0x63, 0x63, 0x91, 0xb2, + 0xb4, 0xc8, 0x92, 0xdc, 0x3c, 0xd2, 0xce, 0xfa, 0x79, 0xc1, 0x72, 0xd6, 0x6d, 0x58, 0x8e, 0xfa, + 0x24, 0x08, 0xf2, 0x96, 0xbd, 0x25, 0x8d, 0x56, 0x0a, 0xee, 0x8a, 0x5e, 0x43, 0x36, 0x95, 0x71, + 0x87, 0xd1, 0xa8, 0x4f, 0xba, 0x54, 0x7b, 0x6e, 0xf6, 0x38, 0xb7, 0x6a, 0x4e, 0x60, 0x75, 0x00, + 0x6d, 0xc0, 0xaa, 0x51, 0x61, 0xdc, 0x91, 0x2b, 0x7a, 0x5b, 0x2b, 0x7e, 0xe1, 0x26, 0x00, 0xbd, + 0x01, 0xa8, 0x4f, 0x7b, 0xa4, 0x3b, 0x92, 0x4d, 0x7a, 0x27, 0x1e, 0xc5, 0x9c, 0x0e, 0x74, 0xe7, + 0x5b, 0x57, 0x14, 0x51, 0x72, 0xdb, 0x72, 0xbf, 0xf9, 0xa7, 0x22, 0x5c, 0xdd, 0x89, 0xa2, 0xfe, + 0x68, 0x22, 0x6e, 0xfe, 0xfd, 0xe2, 0xe3, 0x66, 0xca, 0x1b, 0xc5, 0xe7, 0xf1, 0xc6, 0x73, 0x87, + 0x4b, 0x86, 0xe5, 0xcb, 0x59, 0x96, 0x77, 0xff, 0x70, 0xf9, 0xfc, 0xb6, 0xd2, 0xb4, 0x30, 0x96, + 0xa6, 0x93, 0x6e, 0x2d, 0x5e, 0xd2, 0xad, 0xa5, 0x19, 0x6e, 0xfd, 0x67, 0x01, 0xae, 0x1e, 0x0c, + 0xa2, 0x90, 0xf1, 0xf1, 0xd6, 0xe3, 0xad, 0x9c, 0x5e, 0x5d, 0x81, 0x82, 0xef, 0xe9, 0xa1, 0xb5, + 0xe0, 0x7b, 0xee, 0x19, 0xd4, 0x15, 0x3b, 0x9a, 0xd4, 0xe1, 0x73, 0x47, 0x9e, 0x5c, 0x01, 0xa1, + 0x50, 0x73, 0xaa, 0xed, 0x2f, 0x6d, 0x6f, 0x7c, 0x00, 0xc8, 0xd7, 0x6a, 0x74, 0x4c, 0x8f, 0x6e, + 0xde, 0x92, 0x9b, 0x96, 0x88, 0x8c, 0xab, 0xb7, 0x26, 0xf5, 0xc7, 0x6b, 0xfe, 0xc4, 0x4e, 0x7c, + 0xf1, 0xc6, 0xe6, 0xaf, 0x0e, 0xac, 0x88, 0x47, 0x2a, 0xed, 0x0b, 0x5e, 0x5c, 0x47, 0xc0, 0xc6, + 0xc6, 0xa5, 0x72, 0xae, 0xd0, 0xd4, 0x66, 0xbe, 0xf0, 0xfd, 0x7e, 0xea, 0xc0, 0x35, 0x33, 0xdb, + 0x88, 0x5e, 0x20, 0x6b, 0x8e, 0x3b, 0xb3, 0xf4, 0xba, 0x25, 0xaa, 0x42, 0x82, 0x9d, 0x3d, 0xc9, + 0xd9, 0xa8, 0x8b, 0x6b, 0xf7, 0x33, 0x07, 0x3e, 0x6e, 0x3a, 0x33, 0x4b, 0xc5, 0x8f, 0x60, 0x96, + 0xf8, 0x48, 0x3a, 0x98, 0xbf, 0x3b, 0xb0, 0x96, 0xa8, 0x95, 0xb4, 0x31, 0xf1, 0xc5, 0xd5, 0x42, + 0x6f, 0x03, 0x74, 0xc3, 0x20, 0xa0, 0x5d, 0x6e, 0x86, 0x83, 0x79, 0x35, 0x37, 0x85, 0xba, 0xdf, + 0xb0, 0xee, 0x73, 0x1d, 0x16, 0xc2, 0x21, 0x8f, 0x86, 0x5c, 0x87, 0xa4, 0x5e, 0x5d, 0xd8, 0x0d, + 0x5b, 0x3f, 0xae, 0x42, 0xc5, 0xcc, 0x71, 0xe8, 0xeb, 0x50, 0xdd, 0xa7, 0x5c, 0xff, 0xc2, 0xf5, + 0x99, 0x73, 0x46, 0x64, 0x15, 0x40, 0x9f, 0xcd, 0x35, 0x48, 0xa3, 0xfe, 0x8c, 0xa1, 0x11, 0x6d, + 0x5a, 0xe7, 0x33, 0x11, 0x89, 0xa4, 0xd7, 0x72, 0x20, 0xb5, 0xb4, 0x6f, 0xcd, 0x9b, 0x58, 0xd0, + 0x0d, 0x8b, 0xd1, 0x6c, 0x58, 0x22, 0xb7, 0x95, 0x17, 0xae, 0x85, 0x0f, 0x67, 0x4f, 0x1c, 0xe8, + 0xf5, 0x0c, 0x5e, 0x93, 0xa0, 0x44, 0xf0, 0x1b, 0xf9, 0xc0, 0x5a, 0xac, 0x9f, 0x3d, 0xb8, 0xa2, + 0x0d, 0x8b, 0x4b, 0x16, 0x20, 0x11, 0xb7, 0x79, 0x3e, 0x50, 0x8b, 0xba, 0x6b, 0x0d, 0x26, 0xe8, + 0x15, 0xeb, 0x58, 0xb2, 0x9b, 0x30, 0x7d, 0x75, 0x06, 0x55, 0x73, 0xfa, 0xda, 0xf8, 0x98, 0x80, + 0x3e, 0x69, 0x0f, 0xc4, 0x16, 0x21, 0xe1, 0xb7, 0x3e, 0x1b, 0xa0, 0x59, 0x76, 0xb3, 0x5a, 0x6a, + 0x64, 0x87, 0xe9, 0x34, 0x39, 0x61, 0xff, 0xb9, 0xf3, 0x60, 0x5a, 0xc8, 0x71, 0x66, 0x03, 0x86, + 0xec, 0xe3, 0x19, 0xf4, 0x44, 0xcc, 0xc6, 0xb9, 0xb8, 0x54, 0x4e, 0xc6, 0xb3, 0x38, 0x26, 0x27, + 0xeb, 0xd9, 0xcc, 0x92, 0x93, 0x8d, 0xd3, 0x72, 0x1e, 0x4f, 0xbe, 0x84, 0xe8, 0x53, 0x13, 0x86, + 0x4e, 0x49, 0x09, 0xf7, 0xe6, 0x3c, 0x88, 0x66, 0xfc, 0x45, 0xf5, 0xfb, 0x3f, 0x1a, 0xfb, 0xf9, + 0x94, 0x87, 0x51, 0xc2, 0xa4, 0x31, 0x4d, 0x50, 0x47, 0xb7, 0xbe, 0x57, 0x84, 0x9a, 0xf5, 0x30, + 0xa0, 0x0f, 0xec, 0xe2, 0xb4, 0x91, 0x51, 0x76, 0xec, 0x37, 0x2e, 0x33, 0xaa, 0x67, 0x00, 0xb5, + 0xaa, 0x67, 0x73, 0xde, 0x23, 0x94, 0x95, 0x8b, 0x53, 0xa8, 0x44, 0xe8, 0x8d, 0x9c, 0x68, 0x2d, + 0xf9, 0x49, 0xc6, 0x53, 0x33, 0x56, 0x7e, 0xa7, 0xa8, 0x99, 0xe5, 0x37, 0x0b, 0xa5, 0x24, 0xbc, + 0xe9, 0x5c, 0xc2, 0x11, 0x4f, 0x16, 0xe4, 0x1f, 0x7b, 0xb7, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, + 0x8a, 0x61, 0xfa, 0xcc, 0xeb, 0x1b, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProviderClient interface { + //////// Information about what a provider supports/expects + GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + //////// One-time initialization, called before other functions below + Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) + //////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + //////// Graceful Shutdown + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type providerClient struct { + cc *grpc.ClientConn +} + +func NewProviderClient(cc *grpc.ClientConn) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { + out := new(PrepareProviderConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { + out := new(ValidateResourceTypeConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { + out := new(ValidateDataSourceConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { + out := new(Configure_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +type ProviderServer interface { + //////// Information about what a provider supports/expects + GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + //////// One-time initialization, called before other functions below + Configure(context.Context, *Configure_Request) (*Configure_Response, error) + //////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + //////// Graceful Shutdown + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +// UnimplementedProviderServer can be embedded to have forward compatible implementations. +type UnimplementedProviderServer struct { +} + +func (*UnimplementedProviderServer) GetSchema(ctx context.Context, req *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedProviderServer) PrepareProviderConfig(ctx context.Context, req *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProviderConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceTypeConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateDataSourceConfig(ctx context.Context, req *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateDataSourceConfig not implemented") +} +func (*UnimplementedProviderServer) UpgradeResourceState(ctx context.Context, req *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") +} +func (*UnimplementedProviderServer) Configure(ctx context.Context, req *Configure_Request) (*Configure_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} +func (*UnimplementedProviderServer) ReadResource(ctx context.Context, req *ReadResource_Request) (*ReadResource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") +} +func (*UnimplementedProviderServer) PlanResourceChange(ctx context.Context, req *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") +} +func (*UnimplementedProviderServer) ApplyResourceChange(ctx context.Context, req *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") +} +func (*UnimplementedProviderServer) ImportResourceState(ctx context.Context, req *ImportResourceState_Request) (*ImportResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") +} +func (*UnimplementedProviderServer) ReadDataSource(ctx context.Context, req *ReadDataSource_Request) (*ReadDataSource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") +} +func (*UnimplementedProviderServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} + +func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { + s.RegisterService(&_Provider_serviceDesc, srv) +} + +func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PrepareProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceTypeConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataSourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/UpgradeResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Configure_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Configure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PlanResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ApplyResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ImportResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadDataSource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provider_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provider_GetSchema_Handler, + }, + { + MethodName: "PrepareProviderConfig", + Handler: _Provider_PrepareProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceTypeConfig", + Handler: _Provider_ValidateResourceTypeConfig_Handler, + }, + { + MethodName: "ValidateDataSourceConfig", + Handler: _Provider_ValidateDataSourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "Configure", + Handler: _Provider_Configure_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "Stop", + Handler: _Provider_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin5.proto", +} + +// ProvisionerClient is the client API for Provisioner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProvisionerClient interface { + GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type provisionerClient struct { + cc *grpc.ClientConn +} + +func NewProvisionerClient(cc *grpc.ClientConn) ProvisionerClient { + return &provisionerClient{cc} +} + +func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { + out := new(GetProvisionerSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { + out := new(ValidateProvisionerConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { + stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) + if err != nil { + return nil, err + } + x := &provisionerProvisionResourceClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Provisioner_ProvisionResourceClient interface { + Recv() (*ProvisionResource_Response, error) + grpc.ClientStream +} + +type provisionerProvisionResourceClient struct { + grpc.ClientStream +} + +func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { + m := new(ProvisionResource_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProvisionerServer is the server API for Provisioner service. +type ProvisionerServer interface { + GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +// UnimplementedProvisionerServer can be embedded to have forward compatible implementations. +type UnimplementedProvisionerServer struct { +} + +func (*UnimplementedProvisionerServer) GetSchema(ctx context.Context, req *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedProvisionerServer) ValidateProvisionerConfig(ctx context.Context, req *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateProvisionerConfig not implemented") +} +func (*UnimplementedProvisionerServer) ProvisionResource(req *ProvisionResource_Request, srv Provisioner_ProvisionResourceServer) error { + return status.Errorf(codes.Unimplemented, "method ProvisionResource not implemented") +} +func (*UnimplementedProvisionerServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} + +func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { + s.RegisterService(&_Provisioner_serviceDesc, srv) +} + +func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProvisionerSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProvisionerConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ProvisionResource_Request) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) +} + +type Provisioner_ProvisionResourceServer interface { + Send(*ProvisionResource_Response) error + grpc.ServerStream +} + +type provisionerProvisionResourceServer struct { + grpc.ServerStream +} + +func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { + return x.ServerStream.SendMsg(m) +} + +func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provisioner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provisioner", + HandlerType: (*ProvisionerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provisioner_GetSchema_Handler, + }, + { + MethodName: "ValidateProvisionerConfig", + Handler: _Provisioner_ValidateProvisionerConfig_Handler, + }, + { + MethodName: "Stop", + Handler: _Provisioner_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ProvisionResource", + Handler: _Provisioner_ProvisionResource_Handler, + ServerStreams: true, + }, + }, + Metadata: "tfplugin5.proto", +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/encrypt_decrypt.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/encrypt_decrypt.go new file mode 100644 index 00000000000..eef4c5ed007 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/encrypt_decrypt.go @@ -0,0 +1,118 @@ +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" +) + +// EncryptShares takes an ordered set of byte slices to encrypt and the +// corresponding base64-encoded public keys to encrypt them with, encrypts each +// byte slice with the corresponding public key. +// +// Note: There is no corresponding test function; this functionality is +// thoroughly tested in the init and rekey command unit tests +func EncryptShares(input [][]byte, pgpKeys []string) ([]string, [][]byte, error) { + if len(input) != len(pgpKeys) { + return nil, nil, fmt.Errorf("mismatch between number items to encrypt and number of PGP keys") + } + encryptedShares := make([][]byte, 0, len(pgpKeys)) + entities, err := GetEntities(pgpKeys) + if err != nil { + return nil, nil, err + } + for i, entity := range entities { + ctBuf := bytes.NewBuffer(nil) + pt, err := openpgp.Encrypt(ctBuf, []*openpgp.Entity{entity}, nil, nil, nil) + if err != nil { + return nil, nil, errwrap.Wrapf("error setting up encryption for PGP message: {{err}}", err) + } + _, err = pt.Write(input[i]) + if err != nil { + return nil, nil, errwrap.Wrapf("error encrypting PGP message: {{err}}", err) + } + pt.Close() + encryptedShares = append(encryptedShares, ctBuf.Bytes()) + } + + fingerprints, err := GetFingerprints(nil, entities) + if err != nil { + return nil, nil, err + } + + return fingerprints, encryptedShares, nil +} + +// GetFingerprints takes in a list of openpgp Entities and returns the +// fingerprints. If entities is nil, it will instead parse both entities and +// fingerprints from the pgpKeys string slice. +func GetFingerprints(pgpKeys []string, entities []*openpgp.Entity) ([]string, error) { + if entities == nil { + var err error + entities, err = GetEntities(pgpKeys) + + if err != nil { + return nil, err + } + } + ret := make([]string, 0, len(entities)) + for _, entity := range entities { + ret = append(ret, fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)) + } + return ret, nil +} + +// GetEntities takes in a string array of base64-encoded PGP keys and returns +// the openpgp Entities +func GetEntities(pgpKeys []string) ([]*openpgp.Entity, error) { + ret := make([]*openpgp.Entity, 0, len(pgpKeys)) + for _, keystring := range pgpKeys { + data, err := base64.StdEncoding.DecodeString(keystring) + if err != nil { + return nil, errwrap.Wrapf("error decoding given PGP key: {{err}}", err) + } + entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data))) + if err != nil { + return nil, errwrap.Wrapf("error parsing given PGP key: {{err}}", err) + } + ret = append(ret, entity) + } + return ret, nil +} + +// DecryptBytes takes in base64-encoded encrypted bytes and the base64-encoded +// private key and decrypts it. A bytes.Buffer is returned to allow the caller +// to do useful thing with it (get it as a []byte, get it as a string, use it +// as an io.Reader, etc), and also because this function doesn't know if what +// comes out is binary data or a string, so let the caller decide. +func DecryptBytes(encodedCrypt, privKey string) (*bytes.Buffer, error) { + privKeyBytes, err := base64.StdEncoding.DecodeString(privKey) + if err != nil { + return nil, errwrap.Wrapf("error decoding base64 private key: {{err}}", err) + } + + cryptBytes, err := base64.StdEncoding.DecodeString(encodedCrypt) + if err != nil { + return nil, errwrap.Wrapf("error decoding base64 crypted bytes: {{err}}", err) + } + + entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes))) + if err != nil { + return nil, errwrap.Wrapf("error parsing private key: {{err}}", err) + } + + entityList := &openpgp.EntityList{entity} + md, err := openpgp.ReadMessage(bytes.NewBuffer(cryptBytes), entityList, nil, nil) + if err != nil { + return nil, errwrap.Wrapf("error decrypting the messages: {{err}}", err) + } + + ptBuf := bytes.NewBuffer(nil) + ptBuf.ReadFrom(md.UnverifiedBody) + + return ptBuf, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/flag.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/flag.go new file mode 100644 index 00000000000..bb0f367d6bf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/flag.go @@ -0,0 +1,140 @@ +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "os" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/keybase/go-crypto/openpgp" +) + +// PubKeyFileFlag implements flag.Value and command.Example to receive exactly +// one PGP or keybase key via a flag. +type PubKeyFileFlag string + +func (p *PubKeyFileFlag) String() string { return string(*p) } + +func (p *PubKeyFileFlag) Set(val string) error { + if p != nil && *p != "" { + return errors.New("can only be specified once") + } + + keys, err := ParsePGPKeys(strings.Split(val, ",")) + if err != nil { + return err + } + + if len(keys) > 1 { + return errors.New("can only specify one pgp key") + } + + *p = PubKeyFileFlag(keys[0]) + return nil +} + +func (p *PubKeyFileFlag) Example() string { return "keybase:user" } + +// PGPPubKeyFiles implements the flag.Value interface and allows parsing and +// reading a list of PGP public key files. +type PubKeyFilesFlag []string + +func (p *PubKeyFilesFlag) String() string { + return fmt.Sprint(*p) +} + +func (p *PubKeyFilesFlag) Set(val string) error { + if len(*p) > 0 { + return errors.New("can only be specified once") + } + + keys, err := ParsePGPKeys(strings.Split(val, ",")) + if err != nil { + return err + } + + *p = PubKeyFilesFlag(keys) + return nil +} + +func (p *PubKeyFilesFlag) Example() string { return "keybase:user1, keybase:user2, ..." } + +// ParsePGPKeys takes a list of PGP keys and parses them either using keybase +// or reading them from disk and returns the "expanded" list of pgp keys in +// the same order. +func ParsePGPKeys(keyfiles []string) ([]string, error) { + keys := make([]string, len(keyfiles)) + + keybaseMap, err := FetchKeybasePubkeys(keyfiles) + if err != nil { + return nil, err + } + + for i, keyfile := range keyfiles { + keyfile = strings.TrimSpace(keyfile) + + if strings.HasPrefix(keyfile, kbPrefix) { + key, ok := keybaseMap[keyfile] + if !ok || key == "" { + return nil, fmt.Errorf("keybase user %q not found", strings.TrimPrefix(keyfile, kbPrefix)) + } + keys[i] = key + continue + } + + pgpStr, err := ReadPGPFile(keyfile) + if err != nil { + return nil, err + } + keys[i] = pgpStr + } + + return keys, nil +} + +// ReadPGPFile reads the given PGP file from disk. +func ReadPGPFile(path string) (string, error) { + if path[0] == '@' { + path = path[1:] + } + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + buf := bytes.NewBuffer(nil) + _, err = buf.ReadFrom(f) + if err != nil { + return "", err + } + + // First parse as an armored keyring file, if that doesn't work, treat it as a straight binary/b64 string + keyReader := bytes.NewReader(buf.Bytes()) + entityList, err := openpgp.ReadArmoredKeyRing(keyReader) + if err == nil { + if len(entityList) != 1 { + return "", fmt.Errorf("more than one key found in file %q", path) + } + if entityList[0] == nil { + return "", fmt.Errorf("primary key was nil for file %q", path) + } + + serializedEntity := bytes.NewBuffer(nil) + err = entityList[0].Serialize(serializedEntity) + if err != nil { + return "", errwrap.Wrapf(fmt.Sprintf("error serializing entity for file %q: {{err}}", path), err) + } + + return base64.StdEncoding.EncodeToString(serializedEntity.Bytes()), nil + } + + _, err = base64.StdEncoding.DecodeString(buf.String()) + if err == nil { + return buf.String(), nil + } + return base64.StdEncoding.EncodeToString(buf.Bytes()), nil + +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/keybase.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/keybase.go new file mode 100644 index 00000000000..7d153346a5c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/keybase.go @@ -0,0 +1,117 @@ +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/jsonutil" + "github.com/keybase/go-crypto/openpgp" +) + +const ( + kbPrefix = "keybase:" +) + +// FetchKeybasePubkeys fetches public keys from Keybase given a set of +// usernames, which are derived from correctly formatted input entries. It +// doesn't use their client code due to both the API and the fact that it is +// considered alpha and probably best not to rely on it. The keys are returned +// as base64-encoded strings. +func FetchKeybasePubkeys(input []string) (map[string]string, error) { + client := cleanhttp.DefaultClient() + if client == nil { + return nil, fmt.Errorf("unable to create an http client") + } + + if len(input) == 0 { + return nil, nil + } + + usernames := make([]string, 0, len(input)) + for _, v := range input { + if strings.HasPrefix(v, kbPrefix) { + usernames = append(usernames, strings.TrimPrefix(v, kbPrefix)) + } + } + + if len(usernames) == 0 { + return nil, nil + } + + ret := make(map[string]string, len(usernames)) + url := fmt.Sprintf("https://keybase.io/_/api/1.0/user/lookup.json?usernames=%s&fields=public_keys", strings.Join(usernames, ",")) + resp, err := client.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + type PublicKeys struct { + Primary struct { + Bundle string + } + } + + type LThem struct { + PublicKeys `json:"public_keys"` + } + + type KbResp struct { + Status struct { + Name string + } + Them []LThem + } + + out := &KbResp{ + Them: []LThem{}, + } + + if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil { + return nil, err + } + + if out.Status.Name != "OK" { + return nil, fmt.Errorf("got non-OK response: %q", out.Status.Name) + } + + missingNames := make([]string, 0, len(usernames)) + var keyReader *bytes.Reader + serializedEntity := bytes.NewBuffer(nil) + for i, themVal := range out.Them { + if themVal.Primary.Bundle == "" { + missingNames = append(missingNames, usernames[i]) + continue + } + keyReader = bytes.NewReader([]byte(themVal.Primary.Bundle)) + entityList, err := openpgp.ReadArmoredKeyRing(keyReader) + if err != nil { + return nil, err + } + if len(entityList) != 1 { + return nil, fmt.Errorf("primary key could not be parsed for user %q", usernames[i]) + } + if entityList[0] == nil { + return nil, fmt.Errorf("primary key was nil for user %q", usernames[i]) + } + + serializedEntity.Reset() + err = entityList[0].Serialize(serializedEntity) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error serializing entity for user %q: {{err}}", usernames[i]), err) + } + + // The API returns values in the same ordering requested, so this should properly match + ret[kbPrefix+usernames[i]] = base64.StdEncoding.EncodeToString(serializedEntity.Bytes()) + } + + if len(missingNames) > 0 { + return nil, fmt.Errorf("unable to fetch keys for user(s) %q from keybase", strings.Join(missingNames, ",")) + } + + return ret, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/test_keys.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/test_keys.go new file mode 100644 index 00000000000..c10a9055ed0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/test_keys.go @@ -0,0 +1,271 @@ +package pgpkeys + +const ( + TestPrivKey1 = `lQOYBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da +rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/ +063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f +sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg +8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAEAB/wL+KX0mdeISEpX +oDgt766Key1Kthe8nbEs5dOXIsP7OR7ZPcnE2hy6gftgVFnBGEZnWVN70vmJd6Z5y9d1mI+GecXj +UL0EpI0EmohyYDJsHUnght/5ecRNFA+VeNmGPYNQGCeHJyZOiFunGGENpHU7BbubAht8delz37Mx +JQgvMyR6AKvg8HKBoQeqV1uMWNJE/vKwV/z1dh1sjK/GFxu05Qaq0GTfAjVLuFOyJTS95yq6gblD +jUdbHLp7tBeqIKo9voWCJF5mGOlq3973vVoWETy9b0YYPCE/M7fXmK9dJITHqkROLMW6TgcFeIw4 +yL5KOBCHk+QGPSvyQN7R7Fd5BADwuT1HZmvg7Y9GjarKXDjxdNemUiHtba2rUzfH6uNmKNQvwQek +nma5palNUJ4/dz1aPB21FUBXJF5yWwXEdApl+lIDU0J5m4UD26rqEVRq9Kx3GsX+yfcwObkrSzW6 +kmnQSB5KI0fIuegMTM+Jxo3pB/mIRwDTMmk+vfzIGyW+7QQA8aFwFLMdKdfLgSGbl5Z6etmOAVQ2 +Oe2ebegU9z/ewi/Rdt2s9yQiAdGVM8+q15Saz8a+kyS/l1CjNPzr3VpYx1OdZ3gb7i2xoy9GdMYR +ZpTq3TuST95kx/9DqA97JrP23G47U0vwF/cg8ixCYF8Fz5dG4DEsxgMwKqhGdW58wMMD/iytkfMk +Vk6Z958Rpy7lhlC6L3zpO38767bSeZ8gRRi/NMFVOSGYepKFarnfxcTiNa+EoSVA6hUo1N64nALE +sJBpyOoTfKIpz7WwTF1+WogkiYrfM6lHon1+3qlziAcRW0IohM3g2C1i3GWdON4Cl8/PDO3R0E52 +N6iG/ctNNeMiPe60EFZhdWx0IFRlc3QgS2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUI +AgkKCwQWAgMBAh4BAheAAAoJEOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d +4hIHsG7kmJRTJfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C +Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF3 +9jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poe +o+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeUR +BRWdA5gEVduM9QEIAL53hJ5bZJ7oEDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkf +Rqnv981fFwGnh2+I1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a +9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu +9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/z +bfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOleYwxk+FoTqXEAEQEAAQAH+wVyQXaNwnjQ +xfW+M8SJNo0C7e+0d7HsuBTA/d/eP4bj6+X8RaRFVwiMvSAoxsqBNCLJP00qzzKfRQWJseD1H35z +UjM7rNVUEL2k1yppyp61S0qj0TdhVUfJDYZqRYonVgRMvzfDTB1ryKrefKenQYL/jGd9VYMnKmWZ +6GVk4WWXXx61iOt2HNcmSXKetMM1Mg67woPZkA3fJaXZ+zW0zMu4lTSB7yl3+vLGIFYILkCFnREr +drQ+pmIMwozUAt+pBq8dylnkHh6g/FtRfWmLIMDqM1NlyuHRp3dyLDFdTA93osLG0QJblfX54W34 +byX7a4HASelGi3nPjjOAsTFDkuEEANV2viaWk1CV4ryDrXGmy4Xo32Md+laGPRcVfbJ0mjZjhQsO +gWC1tjMs1qZMPhcrKIBCjjdAcAIrGV9h3CXc0uGuez4XxLO+TPBKaS0B8rKhnKph1YZuf+HrOhzS +astDnOjNIT+qucCL/qSbdYpj9of3yY61S59WphPOBjoVM3BFBADka6ZCk81gx8jA2E1e9UqQDmdM +FZaVA1E7++kqVSFRDJGnq+5GrBTwCJ+sevi+Rvf8Nx4AXvpCdtMBPX9RogsUFcR0pMrKBrgRo/Vg +EpuodY2Ef1VtqXR24OxtRf1UwvHKydIsU05rzMAy5uGgQvTzRTXxZFLGUY31wjWqmo9VPQP+PnwA +K83EV2kk2bsXwZ9MXg05iXqGQYR4bEc/12v04BtaNaDS53hBDO4JIa3Bnz+5oUoYhb8FgezUKA9I +n6RdKTTP1BLAu8titeozpNF07V++dPiSE2wrIVsaNHL1pUwW0ql50titVwe+EglWiCKPtJBcCPUA +3oepSPchiDjPqrNCYIkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZAQIABgUCVduM +9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYulEimOPzLUX/Z +XZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr +9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEc +ZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4 +EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY+XsKVYRf +NIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcIPXFv3m3WfUln +G/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O9uK3lQozbw2g +H9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKf +PRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h +7u2CfYyFPu3AlUaGNMBlvy6PEpU=` + + TestPrivKey2 = `lQOYBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG +Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 +0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e +Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk +Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAEAB/oCBqTIsxlUgLtz +HRpWW5MJ+93xvmVV0JHhRK/ygKghq+zpC6S+cn7dwrEj1JTPh+17lyemYQK+RMeiBEduoWNKuHUd +WX353w2411rrc/VuGTglzhd8Ir2BdJlPesCzw4JQnrWqcBqN52W+iwhnE7PWVhnvItWnx6APK5Se +q7dzFWy8Z8tNIHm0pBQbeyo6x2rHHSWkr2fs7V02qFQhii1ayFRMcgdOWSNX6CaZJuYhk/DyjApN +9pVhi3P1pNMpFeV0Pt8Gl1f/9o6/HpAYYEt/6vtVRhFUGgtNi95oc0oyzIJxliRvd6+Z236osigQ +QEBwj1ImRK8TKyWPlykiJWc5BADfldgOCA55o3Qz/z/oVE1mm+a3FmPPTQlHBXotNEsrWV2wmJHe +lNQPI6ZwMtLrBSg8PUpG2Rvao6XJ4ZBl/VcDwfcLgCnALPCcL0L0Z3vH3Sc9Ta/bQWJODG7uSaI1 +iVJ7ArKNtVzTqRQWK967mol9CCqh4A0jRrH0aVEFbrqQ/QQA58iEJaFhzFZjufjC9N8Isn3Ky7xu +h+dk001RNCb1GnNZcx4Ld2IB+uXyYjtg7dNaUhGgGuCBo9nax89bMsBzzUukx3SHq1pxopMg6Dm8 +ImBoIAicuQWgEkaP2T0rlwCozUalJZaG1gyrzkPhkeY7CglpJycHLHfY2MIb46c8+58D/iJ83Q5j +Y4x+sqW2QeUYFwqCcOW8Urg64UxEkgXZXiNMwTAJCaxp/Pz7cgeUDwgv+6CXEdnT1910+byzK9ha +V1Q/65+/JYuCeyHxcoAb4Wtpdl7GALGd/1G0UAmq47yrefEr/b00uS35i1qUUhOzo1NmEZch/bvF +kmJ+WtAHunZcOCu0EFZhdWx0IFRlc3QgS2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUI +AgkKCwQWAgMBAh4BAheAAAoJEOuDLGfrXolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHip +ZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABq +hb5ojexdnAYRswaHV201ZCclj9rnJN1PAg0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmG +kdrg8K8ARmRILjmwuBAgJM0eXBZHNGWXelk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0 +vDttB+ZXqF88W9jAYlvdgbTtajNF5IDYDjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlx +k4edA5gEVduQkQEIAOjZV5tbpfIh5QefpIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe +4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg+YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/t +GF5xE3e5CoZRsHV/c92h3t1LdJNOnC5mUKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBH +yt0tdHtIWuQv6joTJzujqViRhlCwQYzQSKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1r +ENO8JOuPu6tMS+znFu67skq2gFFZwCQWIjdHm+2ukE+PE580WAWudyMAEQEAAQAH/i7ndRPI+t0T +AdEu0dTIdyrrg3g7gd471kQtIVZZwTYSy2yhNY/Ciu72s3ab8QNCxY8dNL5bRk8FKjHslAoNSFdO +8iZSLiDgIHOZOcjYe6pqdgQaeTHodm1Otrn2SbB+K/3oX6W/y1xe18aSojGba/nHMj5PeJbIN9Pi +jmh0WMLD/0rmkTTxR7qQ5+kMV4O29xY4qjdYRD5O0adeZX0mNncmlmQ+rX9yxrtSgFROu1jwVtfP +hcNetifTTshJnTwND8hux5ECEadlIVBHypW28Hth9TRBXmddTmv7L7mdtUO6DybgkpWpw4k4LPsk +uZ6aY4wcGRp7EVfWGr9NHbq/n+0EAOlhDXIGdylkQsndjBMyhPsXZa5fFBmOyHjXj733195Jgr1v +ZjaIomrA9cvYrmN75oKrG1jJsMEl6HfC/ZPzEj6E51/p1PRdHP7CdUUA+DG8x4M3jn+e43psVuAR +a1XbN+8/bOa0ubt7ljVPjAEvWRSvU9dRaQz93w3fduAuM07dBAD/ayK3e0d6JMJMrU50lNOXQBgL +rFbg4rWzPO9BJQdhjOhmOZQiUa1Q+EV+s95yIUg1OAfaMP9KRIljr5RCdGNS6WoMNBAQOSrZpelf +jW4NpzphNfWDGVkUoPoskVtJz/nu9d860dGd3Al0kSmtUpMu5QKlo+sSxXUPbWLUn8V9/wP/ScCW +H+0gtL4R7SFazPeTIP+Cu5oR7A/DlFVLJKa3vo+atkhSvwxHGbg04vb/W4mKhGGVtMBtlhRmaWOe +PhUulU5FdaYsdlpN/Yd+hhgU6NHlyImPGVEHWD8c6CG8qoZfpR33j2sqshs4i/MtJZeBvl62vxPn +9bDN7KAjFNll9axAjIkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZAQIABgUCVduQ +kQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDDhnV3bXQsCvn/ +6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQe3l4CqJvkn6j +ybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4KBIrp/bhG6Pdn +igKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eYENtyOmEMWOFC +LLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H/1trYUtJjXQK +HmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7PkUZTfpaP/L6 +DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0UPEnjvtZTp5yO +hTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQdw/2epIewH0L/ +FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4MFOMVRn1dc3q +dXlg3mimA+iK7tABQfG0RJ9YzWs=` + + TestPrivKey3 = `lQOXBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj +6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 +Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH +CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy +resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAEAB/dQbElFIa0VklZa +39ZLhtbBxACSWH3ql3EtRZaB2Mh4zSALbFyJDQfScOy8AZHmv66Ozxit9X9WsYr9OzcHujgl/2da +A3lybF6iLw1YDNaL11G6kuyn5sFP6lYGMRGOIWSik9oSVF6slo8m8ujRLdBsdMXVcElHKzCJiWmt +JZHEnUkl9X96fIPajMBfWjHHwcaeMOc77nvjwqy5wC4EY8TSVYzxeZHL7DADQ0EHBcThlmfizpCq +26LMVb6ju8STH7uDDFyKmhr/hC2vOkt+PKsvBCmW8/ESanO1zKPD9cvSsOWr2rZWNnkDRftqzOU5 +OCrI+3o9E74+toNb07bPntEEAMEStOzSvqZ6NKdh7EZYPA4mkkFC+EiHYIoinP1sd9V8O2Hq+dzx +yFHtWu0LmP6uWXk45vsP9y1UMJcEa33ew5JJa7zgucI772/BNvd/Oys/PqwIAl6uNIY8uYLgmn4L +1IPatp7vDiXzZSivPZd4yN4S4zCypZp9cnpO3qv8q7CtBADW87IA0TabdoxiN+m4XL7sYDRIfglr +MRPAlfrkAUaGDBx/t1xb6IaKk7giFdwHpTI6+g9XNkqKqogMe4Fp+nsd1xtfsNUBn6iKZavm5kXe +Lp9QgE+K6mvIreOTe2PKQqXqgPRG6+SRGatoKeY76fIpd8AxOJyWERxcq2lUHLn45QP/UXDTcYB7 +gzJtZrfpXN0GqQ0lYXMzbQfLnkUsu3mYzArfNy0otzEmKTkwmKclNY1/EJSzSdHfgmeA260a0nLK +64C0wPgSmOqw90qwi5odAYSjSFBapDbyGF86JpHrLxyEEpGoXanRPwWfbiWp19Nwg6nknA87AtaM +3+AHjbWzwCpHL7QQVmF1bHQgVGVzdCBLZXkgM4kBOAQTAQIAIgUCVduSIwIbLwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQ9HlLVvwtxt1aMQf/aaGoL1rRWTUjM6DEShXFhWpV29rEjSdNk5N+ +ZwVifgdCVD5IsSjI1Z7mO2SHHiTm4eKnHAofM6/TZgzXg1YLpu8rDYJARMsM8bgK/xgxSamGjm2c +wN220jOnwePIlG0drNTW5N6zb/K6qHoscJ6NUkjS5JPdGJuq7B0bdCM8/xSbG75gL34U5bYqK38B +DwmW4UMl2rf/BJfxV9hmsZ2Cat4TspgyiWEKTMZI+PugXKDDwuoqgm+320K4EqFkwG4y/WwHkKgk +hZ0+io5lzhTsvVd2p8q8VlH9GG5eA3WWQj0yqucsOmKQvcuT5y0vFY6NQJbyuioqgdlgEXtc+p0B ++Z0DmARV25IjAQgA49yN3hCBsuWoiTezoE9FHJXOCVOBR1/4jStQPJtoMl8mhtl3xTp7iGQ+9GhD +y0l5+fP+qcP/rfBq0BslhxVOZ7jQjdUoM6ZUZzJoPGIo/V2KwqpwQl3tdCIjvagCJeYQfTL7lTCc +4ySz+XBoAYMwZVGMcRcjp+JE8Wx9Ovzuq8wnelbU6I5dVJ7O4E1OWbIkLuytDX+fDEvfft6/oPXN +Bl3cm6FzEuQetQQss3DOG9xnvS+DrjmMCbPwR2a++ioQ8+geoqA/kB4cAI6xOb3ncoeGDHc1i4Y9 +T9Ggi+6Aq3girmfDtNYVOM8cZUXcZNCvLkJn8DNeIvnuFUSEO+a5PwARAQABAAf/TPd98CmRNdV/ +VUI8aYT9Kkervdi4DVzsfvrHcoFn88PSJrCkVTmI6qw526Kwa6VZD0YMmll7LszLt5nD1lorDrwN +rir3FmMzlVwge20IvXRwX4rkunYxtA2oFvL+LsEEhtXGx0ERbWRDapk+eGxQ15hxIO4Y/Cdg9E+a +CWfQUrTSnC6qMVfVYMGfnM1yNX3OWattEFfmxQas5XqQk/0FgjCZALixdanjN/r1tjp5/2MiSD8N +Wkemzsr6yPicnc3+BOZc5YOOnH8FqBvVHcDlSJI6pCOCEiO3Pq2QEk/1evONulbF116mLnQoGrpp +W77l+5O42VUpZfjROCPd5DYyMQQA492CFXZpDIJ2emB9/nK8X6IzdVRK3oof8btbSNnme5afIzhs +wR1ruX30O7ThfB+5ezpbgK1C988CWkr9SNSTy43omarafGig6/Y1RzdiITILuIGfbChoSpc70jXx +U0nzJ/1i9yZ/vDgP3EC2miRhlDcp5w0Bu0oMBlgG/1uhj0cEAP/+7aFGP0fo2MZPhyl5feHKWj4k +85XoAIpMBnzF6HTGU3ljAE56a+4sVw3bWB755DPhvpZvDkX60I9iIJxio8TK5ITdfjlLhxuskXyt +ycwWI/4J+soeq4meoxK9jxZJuDl/qvoGfyzNg1oy2OBehX8+6erW46kr6Z/MQutS3zJJBACmJHrK +VR40qD7a8KbvfuM3ruwlm5JqT/Ykq1gfKKxHjWDIUIeyBX/axGQvAGNYeuuQCzZ0+QsEWur3C4kN +U+Pb5K1WGyOKkhJzivSI56AG3d8TA/Q0JhqST6maY0fvUoahWSCcpd7MULa3n1zx5Wsvi8mkVtup +Js/IDi/kqneqM0XviQI+BBgBAgAJBQJV25IjAhsuASkJEPR5S1b8LcbdwF0gBBkBAgAGBQJV25Ij +AAoJEAUj/03Hcrkg84UIAKxn9nizYtwSgDnVNb5PnD5h6+Ui6r7ffYm2o0im4YhakbFTHIPI9PRh +BavRI5sE5Fg2vtE/x38jattoUrJoNoq9Gh9iv5PBfL3amEGjul0RRqYGl+ub+yv7YGAAHbHcdZen +4gx15VWGpB7y3hycWbdzV8h3EAPKIm5XmB7YyXmArnI3CoJA+HtTZGoL6WZWUwka9YichGfaZ/oD +umENg1l87Pp2RqvjLKHmv2tGCtnDzyv/IiWur9zopFQiCc8ysVgRq6CA5x5nzbv6MqRspYUS4e2I +LFbuREA3blR+caw9oX41IYzarW8IbgeIXJ3HqUyhczRKF/z5nDKtX/kHMCqlbAgAnfu0TALnwVuj +KeXLo4Y7OA9LTEqfORcw62q5OjSoQf/VsRSwGSefv3kGZk5N/igELluU3qpG/twZI/TSL6zGqXU2 +FOMlyMm1849TOB9b4B//4dHrjzPhztzowKMMUqeTxmSgYtFTshKN6eQ0XO+7ZuOXEmSKXS4kOUs9 +ttfzSiPNXUZL2D5nFU9H7rw3VAuXYVTrOx+Dfi6mYsscbxUbi8THODI2Q7B9Ni92DJE1OOe4+57o +fXZ9ln24I14bna/uVHd6hBwLEE6eLCCKkHxQnnZFZduXDHMK0a0OL8RYHfMtNSem4pyC5wDQui1u +KFIzGEPKVoBF9U7VBXpyxpsz+A==` + + TestPubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da +rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/ +063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f +sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg +8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B +HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD +cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE +A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB +C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa +QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn +aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y +jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb +6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N +ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu +9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ +AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu +lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN +C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0 +YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi +oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH +/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI +PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O +9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx +8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd +OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=` + + TestPubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG +Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 +0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e +Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk +Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOuDLGfr +XolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHipZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO +2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABqhb5ojexdnAYRswaHV201ZCclj9rnJN1P +Ag0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmGkdrg8K8ARmRILjmwuBAgJM0eXBZHNGWX +elk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0vDttB+ZXqF88W9jAYlvdgbTtajNF5IDY +DjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlxk4e5AQ0EVduQkQEIAOjZV5tbpfIh5Qef +pIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg ++YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/tGF5xE3e5CoZRsHV/c92h3t1LdJNOnC5m +UKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBHyt0tdHtIWuQv6joTJzujqViRhlCwQYzQ +SKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1rENO8JOuPu6tMS+znFu67skq2gFFZwCQW +IjdHm+2ukE+PE580WAWudyMAEQEAAYkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZ +AQIABgUCVduQkQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDD +hnV3bXQsCvn/6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQ +e3l4CqJvkn6jybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4K +BIrp/bhG6PdnigKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eY +ENtyOmEMWOFCLLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H +/1trYUtJjXQKHmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7 +PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U +PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd +w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4 +MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=` + + TestPubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj +6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 +Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH +CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy +resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDOJATgEEwECACIFAlXbkiMCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPR5S1b8 +LcbdWjEH/2mhqC9a0Vk1IzOgxEoVxYVqVdvaxI0nTZOTfmcFYn4HQlQ+SLEoyNWe5jtkhx4k5uHi +pxwKHzOv02YM14NWC6bvKw2CQETLDPG4Cv8YMUmpho5tnMDdttIzp8HjyJRtHazU1uTes2/yuqh6 +LHCejVJI0uST3RibquwdG3QjPP8Umxu+YC9+FOW2Kit/AQ8JluFDJdq3/wSX8VfYZrGdgmreE7KY +MolhCkzGSPj7oFygw8LqKoJvt9tCuBKhZMBuMv1sB5CoJIWdPoqOZc4U7L1XdqfKvFZR/RhuXgN1 +lkI9MqrnLDpikL3Lk+ctLxWOjUCW8roqKoHZYBF7XPqdAfm5AQ0EVduSIwEIAOPcjd4QgbLlqIk3 +s6BPRRyVzglTgUdf+I0rUDybaDJfJobZd8U6e4hkPvRoQ8tJefnz/qnD/63watAbJYcVTme40I3V +KDOmVGcyaDxiKP1disKqcEJd7XQiI72oAiXmEH0y+5UwnOMks/lwaAGDMGVRjHEXI6fiRPFsfTr8 +7qvMJ3pW1OiOXVSezuBNTlmyJC7srQ1/nwxL337ev6D1zQZd3JuhcxLkHrUELLNwzhvcZ70vg645 +jAmz8EdmvvoqEPPoHqKgP5AeHACOsTm953KHhgx3NYuGPU/RoIvugKt4Iq5nw7TWFTjPHGVF3GTQ +ry5CZ/AzXiL57hVEhDvmuT8AEQEAAYkCPgQYAQIACQUCVduSIwIbLgEpCRD0eUtW/C3G3cBdIAQZ +AQIABgUCVduSIwAKCRAFI/9Nx3K5IPOFCACsZ/Z4s2LcEoA51TW+T5w+YevlIuq+332JtqNIpuGI +WpGxUxyDyPT0YQWr0SObBORYNr7RP8d/I2rbaFKyaDaKvRofYr+TwXy92phBo7pdEUamBpfrm/sr ++2BgAB2x3HWXp+IMdeVVhqQe8t4cnFm3c1fIdxADyiJuV5ge2Ml5gK5yNwqCQPh7U2RqC+lmVlMJ +GvWInIRn2mf6A7phDYNZfOz6dkar4yyh5r9rRgrZw88r/yIlrq/c6KRUIgnPMrFYEauggOceZ827 ++jKkbKWFEuHtiCxW7kRAN25UfnGsPaF+NSGM2q1vCG4HiFydx6lMoXM0Shf8+ZwyrV/5BzAqpWwI +AJ37tEwC58Fboynly6OGOzgPS0xKnzkXMOtquTo0qEH/1bEUsBknn795BmZOTf4oBC5blN6qRv7c +GSP00i+sxql1NhTjJcjJtfOPUzgfW+Af/+HR648z4c7c6MCjDFKnk8ZkoGLRU7ISjenkNFzvu2bj +lxJkil0uJDlLPbbX80ojzV1GS9g+ZxVPR+68N1QLl2FU6zsfg34upmLLHG8VG4vExzgyNkOwfTYv +dgyRNTjnuPue6H12fZZ9uCNeG52v7lR3eoQcCxBOniwgipB8UJ52RWXblwxzCtGtDi/EWB3zLTUn +puKcgucA0LotbihSMxhDylaARfVO1QV6csabM/g=` + + TestAAPubKey1 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzz +wiMwBS5cD0darGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7 +H+/mhfFvKmgr0Y5kDCF1j0T/063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX +1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0fsF5St9jhO7mbZU9EFkv9O3t3EaUR +fHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg8hQssKeVGpuskTdz +5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3QgS2V5 +IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJ +EOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRT +JfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C +Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1Z +mumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4z +J2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+ +7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7o +EDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I +1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okj +h5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTj +OleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2o +P/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOle +Ywxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ +AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVh +EGipBmpDGRYulEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHk +GRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRd +tPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEcZHvsjSZjgydKvfLY +cm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4EKc7 +fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY ++XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7 +moViAAcIPXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWko +jHqyob3cyLgy6z9Q557O9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJ +iEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKfPRENiLOOc19MmS+phmUy +rbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h7u2CfYyF +Pu3AlUaGNMBlvy6PEpU= +=NUTS +-----END PGP PUBLIC KEY BLOCK-----` +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/compressutil/compress.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/compressutil/compress.go new file mode 100644 index 00000000000..356d4548fa4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/compressutil/compress.go @@ -0,0 +1,207 @@ +package compressutil + +import ( + "bytes" + "compress/gzip" + "compress/lzw" + "fmt" + "io" + + "github.com/golang/snappy" + "github.com/hashicorp/errwrap" + "github.com/pierrec/lz4" +) + +const ( + // A byte value used as a canary prefix for the compressed information + // which is used to distinguish if a JSON input is compressed or not. + // The value of this constant should not be a first character of any + // valid JSON string. + + CompressionTypeGzip = "gzip" + CompressionCanaryGzip byte = 'G' + + CompressionTypeLZW = "lzw" + CompressionCanaryLZW byte = 'L' + + CompressionTypeSnappy = "snappy" + CompressionCanarySnappy byte = 'S' + + CompressionTypeLZ4 = "lz4" + CompressionCanaryLZ4 byte = '4' +) + +// SnappyReadCloser embeds the snappy reader which implements the io.Reader +// interface. The decompress procedure in this utility expects an +// io.ReadCloser. This type implements the io.Closer interface to retain the +// generic way of decompression. +type CompressUtilReadCloser struct { + io.Reader +} + +// Close is a noop method implemented only to satisfy the io.Closer interface +func (c *CompressUtilReadCloser) Close() error { + return nil +} + +// CompressionConfig is used to select a compression type to be performed by +// Compress and Decompress utilities. +// Supported types are: +// * CompressionTypeLZW +// * CompressionTypeGzip +// * CompressionTypeSnappy +// * CompressionTypeLZ4 +// +// When using CompressionTypeGzip, the compression levels can also be chosen: +// * gzip.DefaultCompression +// * gzip.BestSpeed +// * gzip.BestCompression +type CompressionConfig struct { + // Type of the compression algorithm to be used + Type string + + // When using Gzip format, the compression level to employ + GzipCompressionLevel int +} + +// Compress places the canary byte in a buffer and uses the same buffer to fill +// in the compressed information of the given input. The configuration supports +// two type of compression: LZW and Gzip. When using Gzip compression format, +// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will +// be assumed. +func Compress(data []byte, config *CompressionConfig) ([]byte, error) { + var buf bytes.Buffer + var writer io.WriteCloser + var err error + + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Write the canary into the buffer and create writer to compress the + // input data based on the configured type + switch config.Type { + case CompressionTypeLZW: + buf.Write([]byte{CompressionCanaryLZW}) + writer = lzw.NewWriter(&buf, lzw.LSB, 8) + + case CompressionTypeGzip: + buf.Write([]byte{CompressionCanaryGzip}) + + switch { + case config.GzipCompressionLevel == gzip.BestCompression, + config.GzipCompressionLevel == gzip.BestSpeed, + config.GzipCompressionLevel == gzip.DefaultCompression: + // These are valid compression levels + default: + // If compression level is set to NoCompression or to + // any invalid value, fallback to Defaultcompression + config.GzipCompressionLevel = gzip.DefaultCompression + } + writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) + + case CompressionTypeSnappy: + buf.Write([]byte{CompressionCanarySnappy}) + writer = snappy.NewBufferedWriter(&buf) + + case CompressionTypeLZ4: + buf.Write([]byte{CompressionCanaryLZ4}) + writer = lz4.NewWriter(&buf) + + default: + return nil, fmt.Errorf("unsupported compression type") + } + + if err != nil { + return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) + } + + if writer == nil { + return nil, fmt.Errorf("failed to create a compression writer") + } + + // Compress the input and place it in the same buffer containing the + // canary byte. + if _, err = writer.Write(data); err != nil { + return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) + } + + // Close the io.WriteCloser + if err = writer.Close(); err != nil { + return nil, err + } + + // Return the compressed bytes with canary byte at the start + return buf.Bytes(), nil +} + +// Decompress checks if the first byte in the input matches the canary byte. +// If the first byte is a canary byte, then the input past the canary byte +// will be decompressed using the method specified in the given configuration. +// If the first byte isn't a canary byte, then the utility returns a boolean +// value indicating that the input was not compressed. +func Decompress(data []byte) ([]byte, bool, error) { + var err error + var reader io.ReadCloser + if data == nil || len(data) == 0 { + return nil, false, fmt.Errorf("'data' being decompressed is empty") + } + + canary := data[0] + cData := data[1:] + + switch canary { + // If the first byte matches the canary byte, remove the canary + // byte and try to decompress the data that is after the canary. + case CompressionCanaryGzip: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader, err = gzip.NewReader(bytes.NewReader(cData)) + + case CompressionCanaryLZW: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8) + + case CompressionCanarySnappy: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: snappy.NewReader(bytes.NewReader(cData)), + } + + case CompressionCanaryLZ4: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: lz4.NewReader(bytes.NewReader(cData)), + } + + default: + // If the first byte doesn't match the canary byte, it means + // that the content was not compressed at all. Indicate the + // caller that the input was not compressed. + return nil, true, nil + } + if err != nil { + return nil, false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) + } + if reader == nil { + return nil, false, fmt.Errorf("failed to create a compression reader") + } + + // Close the io.ReadCloser + defer reader.Close() + + // Read all the compressed data into a buffer + var buf bytes.Buffer + if _, err = io.Copy(&buf, reader); err != nil { + return nil, false, err + } + + return buf.Bytes(), false, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/jsonutil/json.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/jsonutil/json.go new file mode 100644 index 00000000000..b5dbca4f86f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/jsonutil/json.go @@ -0,0 +1,100 @@ +package jsonutil + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/compressutil" +) + +// Encodes/Marshals the given object into JSON +func EncodeJSON(in interface{}) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(in); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// EncodeJSONAndCompress encodes the given input into JSON and compresses the +// encoded value (using Gzip format BestCompression level, by default). A +// canary byte is placed at the beginning of the returned bytes for the logic +// in decompression method to identify compressed input. +func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + + // First JSON encode the given input + encodedBytes, err := EncodeJSON(in) + if err != nil { + return nil, err + } + + if config == nil { + config = &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeGzip, + GzipCompressionLevel: gzip.BestCompression, + } + } + + return compressutil.Compress(encodedBytes, config) +} + +// DecodeJSON tries to decompress the given data. The call to decompress, fails +// if the content was not compressed in the first place, which is identified by +// a canary byte before the compressed data. If the data is not compressed, it +// is JSON decoded directly. Otherwise the decompressed data will be JSON +// decoded. +func DecodeJSON(data []byte, out interface{}) error { + if data == nil || len(data) == 0 { + return fmt.Errorf("'data' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + // Decompress the data if it was compressed in the first place + decompressedBytes, uncompressed, err := compressutil.Decompress(data) + if err != nil { + return errwrap.Wrapf("failed to decompress JSON: {{err}}", err) + } + if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) { + return fmt.Errorf("decompressed data being decoded is invalid") + } + + // If the input supplied failed to contain the compression canary, it + // will be notified by the compression utility. Decode the decompressed + // input. + if !uncompressed { + data = decompressedBytes + } + + return DecodeJSONFromReader(bytes.NewReader(data), out) +} + +// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object +func DecodeJSONFromReader(r io.Reader, out interface{}) error { + if r == nil { + return fmt.Errorf("'io.Reader' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + dec := json.NewDecoder(r) + + // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`. + dec.UseNumber() + + // Since 'out' is an interface representing a pointer, pass it to the decoder without an '&' + return dec.Decode(out) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go new file mode 100644 index 00000000000..7341b5339e6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go @@ -0,0 +1,40 @@ +// The version package provides a location to set the release versions for all +// packages to consume, without creating import cycles. +// +// This package should not import any other terraform packages. +package version + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +var Version = "0.12.7" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var Prerelease = "sdk" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(Version)) +} + +// Header is the header name used to send the current terraform version +// in http requests. +const Header = "Terraform-Version" + +// String returns the complete version string, including prerelease +func String() string { + if Prerelease != "" { + return fmt.Sprintf("%s-%s", Version, Prerelease) + } + return Version +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go new file mode 100644 index 00000000000..0f3f8401f9e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go @@ -0,0 +1,36 @@ +// The meta package provides a location to set the release version +// and any other relevant metadata for the SDK. +// +// This package should not import any other SDK packages. +package meta + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +var SDKVersion = "1.2.0" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var SDKPrerelease = "" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(SDKVersion)) +} + +// VersionString returns the complete version string, including prerelease +func SDKVersionString() string { + if SDKPrerelease != "" { + return fmt.Sprintf("%s-%s", SDKVersion, SDKPrerelease) + } + return SDKVersion +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go new file mode 100644 index 00000000000..5a99e90064a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go @@ -0,0 +1,35 @@ +package plugin + +import ( + "os" + "os/exec" + + hclog "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" +) + +// ClientConfig returns a configuration object that can be used to instantiate +// a client for the plugin described by the given metadata. +func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { + logger := hclog.New(&hclog.LoggerOptions{ + Name: "plugin", + Level: hclog.Trace, + Output: os.Stderr, + }) + + return &plugin.ClientConfig{ + Cmd: exec.Command(m.Path), + HandshakeConfig: Handshake, + VersionedPlugins: VersionedPlugins, + Managed: true, + Logger: logger, + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + AutoMTLS: true, + } +} + +// Client returns a plugin client for the plugin described by the given metadata. +func Client(m discovery.PluginMeta) *plugin.Client { + return plugin.NewClient(ClientConfig(m)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go new file mode 100644 index 00000000000..e4520975c1b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go @@ -0,0 +1,563 @@ +package plugin + +import ( + "context" + "errors" + "log" + "sync" + + "github.com/zclconf/go-cty/cty" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. +type GRPCProviderPlugin struct { + plugin.Plugin + GRPCProvider func() proto.ProviderServer +} + +func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvider{ + client: proto.NewProviderClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} + +// GRPCProvider handles the client, or core side of the plugin rpc connection. +// The GRPCProvider methods are mostly a translation layer between the +// terraform provioders types and the grpc proto types, directly converting +// between the two. +type GRPCProvider struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + // TestServer contains a grpc.Server to close when the GRPCProvider is being + // used in an end to end test of a provider. + TestServer *grpc.Server + + // Proto client use to make the grpc service calls. + client proto.ProviderClient + + // this context is created by the plugin package, and is canceled when the + // plugin process ends. + ctx context.Context + + // schema stores the schema for this provider. This is used to properly + // serialize the state for requests. + mu sync.Mutex + schemas providers.GetSchemaResponse +} + +// getSchema is used internally to get the saved provider schema. The schema +// should have already been fetched from the provider, but we have to +// synchronize access to avoid being called concurrently with GetSchema. +func (p *GRPCProvider) getSchema() providers.GetSchemaResponse { + p.mu.Lock() + // unlock inline in case GetSchema needs to be called + if p.schemas.Provider.Block != nil { + p.mu.Unlock() + return p.schemas + } + p.mu.Unlock() + + // the schema should have been fetched already, but give it another shot + // just in case things are being called out of order. This may happen for + // tests. + schemas := p.GetSchema() + if schemas.Diagnostics.HasErrors() { + panic(schemas.Diagnostics.Err()) + } + + return schemas +} + +// getResourceSchema is a helper to extract the schema for a resource, and +// panics if the schema is not available. +func (p *GRPCProvider) getResourceSchema(name string) providers.Schema { + schema := p.getSchema() + resSchema, ok := schema.ResourceTypes[name] + if !ok { + panic("unknown resource type " + name) + } + return resSchema +} + +// gettDatasourceSchema is a helper to extract the schema for a datasource, and +// panics if that schema is not available. +func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema { + schema := p.getSchema() + dataSchema, ok := schema.DataSources[name] + if !ok { + panic("unknown data source " + name) + } + return dataSchema +} + +func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) { + log.Printf("[TRACE] GRPCProvider: GetSchema") + p.mu.Lock() + defer p.mu.Unlock() + + if p.schemas.Provider.Block != nil { + return p.schemas + } + + resp.ResourceTypes = make(map[string]providers.Schema) + resp.DataSources = make(map[string]providers.Schema) + + // Some providers may generate quite large schemas, and the internal default + // grpc response size limit is 4MB. 64MB should cover most any use case, and + // if we get providers nearing that we may want to consider a finer-grained + // API to fetch individual resource schemas. + // Note: this option is marked as EXPERIMENTAL in the grpc API. + const maxRecvSize = 64 << 20 + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if protoResp.Provider == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) + return resp + } + + resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) + + for name, res := range protoResp.ResourceSchemas { + resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) + } + + for name, data := range protoResp.DataSourceSchemas { + resp.DataSources[name] = convert.ProtoToProviderSchema(data) + } + + p.schemas = resp + + return resp +} + +func (p *GRPCProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { + log.Printf("[TRACE] GRPCProvider: PrepareProviderConfig") + + schema := p.getSchema() + ty := schema.Provider.Block.ImpliedType() + + mp, err := msgpack.Marshal(r.Config, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PrepareProviderConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + config := cty.NullVal(ty) + if protoResp.PreparedConfig != nil { + config, err = msgpack.Unmarshal(protoResp.PreparedConfig.Msgpack, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.PreparedConfig = config + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { + log.Printf("[TRACE] GRPCProvider: ValidateResourceTypeConfig") + resourceSchema := p.getResourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateResourceTypeConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { + log.Printf("[TRACE] GRPCProvider: ValidateDataSourceConfig") + + dataSchema := p.getDatasourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateDataSourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + log.Printf("[TRACE] GRPCProvider: UpgradeResourceState") + + resSchema := p.getResourceSchema(r.TypeName) + + protoReq := &proto.UpgradeResourceState_Request{ + TypeName: r.TypeName, + Version: int64(r.Version), + RawState: &proto.RawState{ + Json: r.RawStateJSON, + Flatmap: r.RawStateFlatmap, + }, + } + + protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.UpgradedState != nil { + state, err = msgpack.Unmarshal(protoResp.UpgradedState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + + resp.UpgradedState = state + return resp +} + +func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) { + log.Printf("[TRACE] GRPCProvider: Configure") + + schema := p.getSchema() + + var mp []byte + + // we don't have anything to marshal if there's no config + mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.Configure_Request{ + TerraformVersion: r.TerraformVersion, + Config: &proto.DynamicValue{ + Msgpack: mp, + }, + } + + protoResp, err := p.client.Configure(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) Stop() error { + log.Printf("[TRACE] GRPCProvider: Stop") + + resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) + if err != nil { + return err + } + + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + log.Printf("[TRACE] GRPCProvider: ReadResource") + + resSchema := p.getResourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadResource_Request{ + TypeName: r.TypeName, + CurrentState: &proto.DynamicValue{Msgpack: mp}, + Private: r.Private, + } + + protoResp, err := p.client.ReadResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.NewState != nil { + state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.NewState = state + resp.Private = protoResp.Private + + return resp +} + +func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + log.Printf("[TRACE] GRPCProvider: PlanResourceChange") + + resSchema := p.getResourceSchema(r.TypeName) + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PlanResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, + PriorPrivate: r.PriorPrivate, + } + + protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.PlannedState != nil { + state, err = msgpack.Unmarshal(protoResp.PlannedState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.PlannedState = state + + for _, p := range protoResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) + } + + resp.PlannedPrivate = protoResp.PlannedPrivate + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + log.Printf("[TRACE] GRPCProvider: ApplyResourceChange") + + resSchema := p.getResourceSchema(r.TypeName) + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ApplyResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + PlannedPrivate: r.PlannedPrivate, + } + + protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + resp.Private = protoResp.Private + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.NewState != nil { + state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.NewState = state + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + log.Printf("[TRACE] GRPCProvider: ImportResourceState") + + protoReq := &proto.ImportResourceState_Request{ + TypeName: r.TypeName, + Id: r.ID, + } + + protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + for _, imported := range protoResp.ImportedResources { + resource := providers.ImportedResource{ + TypeName: imported.TypeName, + Private: imported.Private, + } + + resSchema := p.getResourceSchema(resource.TypeName) + state := cty.NullVal(resSchema.Block.ImpliedType()) + if imported.State != nil { + state, err = msgpack.Unmarshal(imported.State.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resource.State = state + resp.ImportedResources = append(resp.ImportedResources, resource) + } + + return resp +} + +func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + log.Printf("[TRACE] GRPCProvider: ReadDataSource") + + dataSchema := p.getDatasourceSchema(r.TypeName) + + config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadDataSource_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{ + Msgpack: config, + }, + } + + protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(dataSchema.Block.ImpliedType()) + if protoResp.State != nil { + state, err = msgpack.Unmarshal(protoResp.State.Msgpack, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.State = state + + return resp +} + +// closing the grpc connection is final, and terraform will call it at the end of every phase. +func (p *GRPCProvider) Close() error { + log.Printf("[TRACE] GRPCProvider: Close") + + // Make sure to stop the server if we're not running within go-plugin. + if p.TestServer != nil { + p.TestServer.Stop() + } + + // Check this since it's not automatically inserted during plugin creation. + // It's currently only inserted by the command package, because that is + // where the factory is built and is the only point with access to the + // plugin.Client. + if p.PluginClient == nil { + log.Println("[DEBUG] provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go new file mode 100644 index 00000000000..c0e6f549abe --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go @@ -0,0 +1,178 @@ +package plugin + +import ( + "context" + "errors" + "io" + "log" + "sync" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation. +type GRPCProvisionerPlugin struct { + plugin.Plugin + GRPCProvisioner func() proto.ProvisionerServer +} + +func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvisioner{ + client: proto.NewProvisionerClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProvisionerServer(s, p.GRPCProvisioner()) + return nil +} + +// provisioners.Interface grpc implementation +type GRPCProvisioner struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + client proto.ProvisionerClient + ctx context.Context + + // Cache the schema since we need it for serialization in each method call. + mu sync.Mutex + schema *configschema.Block +} + +func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + p.mu.Lock() + defer p.mu.Unlock() + + if p.schema != nil { + return provisioners.GetSchemaResponse{ + Provisioner: p.schema, + } + } + + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if protoResp.Provisioner == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema")) + return resp + } + + resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block) + + p.schema = resp.Provisioner + + return resp +} + +func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateProvisionerConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + // connection is always assumed to be a simple string map + connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ProvisionResource_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + Connection: &proto.DynamicValue{Msgpack: connMP}, + } + + outputClient, err := p.client.ProvisionResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + for { + rcv, err := outputClient.Recv() + if rcv != nil { + r.UIOutput.Output(rcv.Output) + } + if err != nil { + if err != io.EOF { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + break + } + + if len(rcv.Diagnostics) > 0 { + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics)) + break + } + } + + return resp +} + +func (p *GRPCProvisioner) Stop() error { + protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{}) + if err != nil { + return err + } + if protoResp.Error != "" { + return errors.New(protoResp.Error) + } + return nil +} + +func (p *GRPCProvisioner) Close() error { + // check this since it's not automatically inserted during plugin creation + if p.PluginClient == nil { + log.Println("[DEBUG] provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go new file mode 100644 index 00000000000..e4fb577619c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go @@ -0,0 +1,14 @@ +package plugin + +import ( + "github.com/hashicorp/go-plugin" +) + +// See serve.go for serving plugins + +var VersionedPlugins = map[int]plugin.PluginSet{ + 5: { + "provider": &GRPCProviderPlugin{}, + "provisioner": &GRPCProvisionerPlugin{}, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go new file mode 100644 index 00000000000..bfd62e2e9ba --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go @@ -0,0 +1,620 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// ResourceProviderPlugin is the plugin.Plugin implementation. +type ResourceProviderPlugin struct { + ResourceProvider func() terraform.ResourceProvider +} + +func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { + return &ResourceProviderServer{ + Broker: b, + Provider: p.ResourceProvider(), + }, nil +} + +func (p *ResourceProviderPlugin) Client( + b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &ResourceProvider{Broker: b, Client: c}, nil +} + +// ResourceProvider is an implementation of terraform.ResourceProvider +// that communicates over RPC. +type ResourceProvider struct { + Broker *plugin.MuxBroker + Client *rpc.Client +} + +func (p *ResourceProvider) Stop() error { + var resp ResourceProviderStopResponse + err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) + if err != nil { + return err + } + if resp.Error != nil { + err = resp.Error + } + + return err +} + +func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + var result ResourceProviderGetSchemaResponse + args := &ResourceProviderGetSchemaArgs{ + Req: req, + } + + err := p.Client.Call("Plugin.GetSchema", args, &result) + if err != nil { + return nil, err + } + + if result.Error != nil { + err = result.Error + } + + return result.Schema, err +} + +func (p *ResourceProvider) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + id := p.Broker.NextId() + go p.Broker.AcceptAndServe(id, &UIInputServer{ + UIInput: input, + }) + + var resp ResourceProviderInputResponse + args := ResourceProviderInputArgs{ + InputId: id, + Config: c, + } + + err := p.Client.Call("Plugin.Input", &args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + return nil, err + } + + return resp.Config, nil +} + +func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProviderValidateResponse + args := ResourceProviderValidateArgs{ + Config: c, + } + + err := p.Client.Call("Plugin.Validate", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvider) ValidateResource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProviderValidateResourceResponse + args := ResourceProviderValidateResourceArgs{ + Config: c, + Type: t, + } + + err := p.Client.Call("Plugin.ValidateResource", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { + var resp ResourceProviderConfigureResponse + err := p.Client.Call("Plugin.Configure", c, &resp) + if err != nil { + return err + } + if resp.Error != nil { + err = resp.Error + } + + return err +} + +func (p *ResourceProvider) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + var resp ResourceProviderApplyResponse + args := &ResourceProviderApplyArgs{ + Info: info, + State: s, + Diff: d, + } + + err := p.Client.Call("Plugin.Apply", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + var resp ResourceProviderDiffResponse + args := &ResourceProviderDiffArgs{ + Info: info, + State: s, + Config: c, + } + err := p.Client.Call("Plugin.Diff", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.Diff, err +} + +func (p *ResourceProvider) ValidateDataSource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProviderValidateResourceResponse + args := ResourceProviderValidateResourceArgs{ + Config: c, + Type: t, + } + + err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvider) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState) (*terraform.InstanceState, error) { + var resp ResourceProviderRefreshResponse + args := &ResourceProviderRefreshArgs{ + Info: info, + State: s, + } + + err := p.Client.Call("Plugin.Refresh", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) ImportState( + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + var resp ResourceProviderImportStateResponse + args := &ResourceProviderImportStateArgs{ + Info: info, + Id: id, + } + + err := p.Client.Call("Plugin.ImportState", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) Resources() []terraform.ResourceType { + var result []terraform.ResourceType + + err := p.Client.Call("Plugin.Resources", new(interface{}), &result) + if err != nil { + // TODO: panic, log, what? + return nil + } + + return result +} + +func (p *ResourceProvider) ReadDataDiff( + info *terraform.InstanceInfo, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + var resp ResourceProviderReadDataDiffResponse + args := &ResourceProviderReadDataDiffArgs{ + Info: info, + Config: c, + } + + err := p.Client.Call("Plugin.ReadDataDiff", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.Diff, err +} + +func (p *ResourceProvider) ReadDataApply( + info *terraform.InstanceInfo, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + var resp ResourceProviderReadDataApplyResponse + args := &ResourceProviderReadDataApplyArgs{ + Info: info, + Diff: d, + } + + err := p.Client.Call("Plugin.ReadDataApply", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) DataSources() []terraform.DataSource { + var result []terraform.DataSource + + err := p.Client.Call("Plugin.DataSources", new(interface{}), &result) + if err != nil { + // TODO: panic, log, what? + return nil + } + + return result +} + +func (p *ResourceProvider) Close() error { + return p.Client.Close() +} + +// ResourceProviderServer is a net/rpc compatible structure for serving +// a ResourceProvider. This should not be used directly. +type ResourceProviderServer struct { + Broker *plugin.MuxBroker + Provider terraform.ResourceProvider +} + +type ResourceProviderStopResponse struct { + Error *plugin.BasicError +} + +type ResourceProviderGetSchemaArgs struct { + Req *terraform.ProviderSchemaRequest +} + +type ResourceProviderGetSchemaResponse struct { + Schema *terraform.ProviderSchema + Error *plugin.BasicError +} + +type ResourceProviderConfigureResponse struct { + Error *plugin.BasicError +} + +type ResourceProviderInputArgs struct { + InputId uint32 + Config *terraform.ResourceConfig +} + +type ResourceProviderInputResponse struct { + Config *terraform.ResourceConfig + Error *plugin.BasicError +} + +type ResourceProviderApplyArgs struct { + Info *terraform.InstanceInfo + State *terraform.InstanceState + Diff *terraform.InstanceDiff +} + +type ResourceProviderApplyResponse struct { + State *terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderDiffArgs struct { + Info *terraform.InstanceInfo + State *terraform.InstanceState + Config *terraform.ResourceConfig +} + +type ResourceProviderDiffResponse struct { + Diff *terraform.InstanceDiff + Error *plugin.BasicError +} + +type ResourceProviderRefreshArgs struct { + Info *terraform.InstanceInfo + State *terraform.InstanceState +} + +type ResourceProviderRefreshResponse struct { + State *terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderImportStateArgs struct { + Info *terraform.InstanceInfo + Id string +} + +type ResourceProviderImportStateResponse struct { + State []*terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderReadDataApplyArgs struct { + Info *terraform.InstanceInfo + Diff *terraform.InstanceDiff +} + +type ResourceProviderReadDataApplyResponse struct { + State *terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderReadDataDiffArgs struct { + Info *terraform.InstanceInfo + Config *terraform.ResourceConfig +} + +type ResourceProviderReadDataDiffResponse struct { + Diff *terraform.InstanceDiff + Error *plugin.BasicError +} + +type ResourceProviderValidateArgs struct { + Config *terraform.ResourceConfig +} + +type ResourceProviderValidateResponse struct { + Warnings []string + Errors []*plugin.BasicError +} + +type ResourceProviderValidateResourceArgs struct { + Config *terraform.ResourceConfig + Type string +} + +type ResourceProviderValidateResourceResponse struct { + Warnings []string + Errors []*plugin.BasicError +} + +func (s *ResourceProviderServer) Stop( + _ interface{}, + reply *ResourceProviderStopResponse) error { + err := s.Provider.Stop() + *reply = ResourceProviderStopResponse{ + Error: plugin.NewBasicError(err), + } + + return nil +} + +func (s *ResourceProviderServer) GetSchema( + args *ResourceProviderGetSchemaArgs, + result *ResourceProviderGetSchemaResponse, +) error { + schema, err := s.Provider.GetSchema(args.Req) + result.Schema = schema + if err != nil { + result.Error = plugin.NewBasicError(err) + } + return nil +} + +func (s *ResourceProviderServer) Input( + args *ResourceProviderInputArgs, + reply *ResourceProviderInputResponse) error { + conn, err := s.Broker.Dial(args.InputId) + if err != nil { + *reply = ResourceProviderInputResponse{ + Error: plugin.NewBasicError(err), + } + return nil + } + client := rpc.NewClient(conn) + defer client.Close() + + input := &UIInput{Client: client} + + config, err := s.Provider.Input(input, args.Config) + *reply = ResourceProviderInputResponse{ + Config: config, + Error: plugin.NewBasicError(err), + } + + return nil +} + +func (s *ResourceProviderServer) Validate( + args *ResourceProviderValidateArgs, + reply *ResourceProviderValidateResponse) error { + warns, errs := s.Provider.Validate(args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProviderValidateResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProviderServer) ValidateResource( + args *ResourceProviderValidateResourceArgs, + reply *ResourceProviderValidateResourceResponse) error { + warns, errs := s.Provider.ValidateResource(args.Type, args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProviderValidateResourceResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProviderServer) Configure( + config *terraform.ResourceConfig, + reply *ResourceProviderConfigureResponse) error { + err := s.Provider.Configure(config) + *reply = ResourceProviderConfigureResponse{ + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Apply( + args *ResourceProviderApplyArgs, + result *ResourceProviderApplyResponse) error { + state, err := s.Provider.Apply(args.Info, args.State, args.Diff) + *result = ResourceProviderApplyResponse{ + State: state, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Diff( + args *ResourceProviderDiffArgs, + result *ResourceProviderDiffResponse) error { + diff, err := s.Provider.Diff(args.Info, args.State, args.Config) + *result = ResourceProviderDiffResponse{ + Diff: diff, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Refresh( + args *ResourceProviderRefreshArgs, + result *ResourceProviderRefreshResponse) error { + newState, err := s.Provider.Refresh(args.Info, args.State) + *result = ResourceProviderRefreshResponse{ + State: newState, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) ImportState( + args *ResourceProviderImportStateArgs, + result *ResourceProviderImportStateResponse) error { + states, err := s.Provider.ImportState(args.Info, args.Id) + *result = ResourceProviderImportStateResponse{ + State: states, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Resources( + nothing interface{}, + result *[]terraform.ResourceType) error { + *result = s.Provider.Resources() + return nil +} + +func (s *ResourceProviderServer) ValidateDataSource( + args *ResourceProviderValidateResourceArgs, + reply *ResourceProviderValidateResourceResponse) error { + warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProviderValidateResourceResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProviderServer) ReadDataDiff( + args *ResourceProviderReadDataDiffArgs, + result *ResourceProviderReadDataDiffResponse) error { + diff, err := s.Provider.ReadDataDiff(args.Info, args.Config) + *result = ResourceProviderReadDataDiffResponse{ + Diff: diff, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) ReadDataApply( + args *ResourceProviderReadDataApplyArgs, + result *ResourceProviderReadDataApplyResponse) error { + newState, err := s.Provider.ReadDataApply(args.Info, args.Diff) + *result = ResourceProviderReadDataApplyResponse{ + State: newState, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) DataSources( + nothing interface{}, + result *[]terraform.DataSource) error { + *result = s.Provider.DataSources() + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go new file mode 100644 index 00000000000..cbe9fc63693 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go @@ -0,0 +1,100 @@ +package plugin + +import ( + "github.com/hashicorp/go-plugin" + grpcplugin "github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + ProviderPluginName = "provider" + + // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify + // a particular version during their handshake. This is the version used when Terraform 0.10 + // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must + // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and + // 0.11. + DefaultProtocolVersion = 4 +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The ProtocolVersion is the version that must match between TF core + // and TF plugins. This should be bumped whenever a change happens in + // one or the other that makes it so that they can't safely communicate. + // This could be adding a new interface value, it could be how + // helper/schema computes diffs, etc. + ProtocolVersion: DefaultProtocolVersion, + + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type ProviderFunc func() terraform.ResourceProvider +type GRPCProviderFunc func() proto.ProviderServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + ProviderFunc ProviderFunc + + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + // since the plugins may not yet be aware of the new protocol, we + // automatically wrap the plugins in the grpc shims. + if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { + provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc()) + // this is almost always going to be a *schema.Provider, but check that + // we got back a valid provider just in case. + if provider != nil { + opts.GRPCProviderFunc = func() proto.ProviderServer { + return provider + } + } + } + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + VersionedPlugins: pluginSet(opts), + GRPCServer: plugin.DefaultGRPCServer, + }) +} + +// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring +// a plugin server or client. +func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin { + return map[string]plugin.Plugin{ + "provider": &ResourceProviderPlugin{ + ResourceProvider: opts.ProviderFunc, + }, + } +} + +func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { + // Set the legacy netrpc plugins at version 4. + // The oldest version is returned in when executed by a legacy go-plugin + // client. + plugins := map[int]plugin.PluginSet{ + 4: legacyPluginMap(opts), + } + + // add the new protocol versions if they're configured + if opts.GRPCProviderFunc != nil { + plugins[5] = plugin.PluginSet{} + if opts.GRPCProviderFunc != nil { + plugins[5]["provider"] = &GRPCProviderPlugin{ + GRPCProvider: opts.GRPCProviderFunc, + } + } + } + return plugins +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go new file mode 100644 index 00000000000..b24b03ebfe1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go @@ -0,0 +1,52 @@ +package plugin + +import ( + "context" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// UIInput is an implementation of terraform.UIInput that communicates +// over RPC. +type UIInput struct { + Client *rpc.Client +} + +func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { + var resp UIInputInputResponse + err := i.Client.Call("Plugin.Input", opts, &resp) + if err != nil { + return "", err + } + if resp.Error != nil { + err = resp.Error + return "", err + } + + return resp.Value, nil +} + +type UIInputInputResponse struct { + Value string + Error *plugin.BasicError +} + +// UIInputServer is a net/rpc compatible structure for serving +// a UIInputServer. This should not be used directly. +type UIInputServer struct { + UIInput terraform.UIInput +} + +func (s *UIInputServer) Input( + opts *terraform.InputOpts, + reply *UIInputInputResponse) error { + value, err := s.UIInput.Input(context.Background(), opts) + *reply = UIInputInputResponse{ + Value: value, + Error: plugin.NewBasicError(err), + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go new file mode 100644 index 00000000000..07c13d03aa4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// UIOutput is an implementatin of terraform.UIOutput that communicates +// over RPC. +type UIOutput struct { + Client *rpc.Client +} + +func (o *UIOutput) Output(v string) { + o.Client.Call("Plugin.Output", v, new(interface{})) +} + +// UIOutputServer is the RPC server for serving UIOutput. +type UIOutputServer struct { + UIOutput terraform.UIOutput +} + +func (s *UIOutputServer) Output( + v string, + reply *interface{}) error { + s.UIOutput.Output(v) + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go new file mode 100644 index 00000000000..eb05c68ae30 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go @@ -0,0 +1,882 @@ +package terraform + +import ( + "bytes" + "context" + "fmt" + "log" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// InputMode defines what sort of input will be asked for when Input +// is called on Context. +type InputMode byte + +const ( + // InputModeVar asks for all variables + InputModeVar InputMode = 1 << iota + + // InputModeVarUnset asks for variables which are not set yet. + // InputModeVar must be set for this to have an effect. + InputModeVarUnset + + // InputModeProvider asks for provider variables + InputModeProvider + + // InputModeStd is the standard operating mode and asks for both variables + // and providers. + InputModeStd = InputModeVar | InputModeProvider +) + +// ContextOpts are the user-configurable options to create a context with +// NewContext. +type ContextOpts struct { + Config *configs.Config + Changes *plans.Changes + State *states.State + Targets []addrs.Targetable + Variables InputValues + Meta *ContextMeta + Destroy bool + + Hooks []Hook + Parallelism int + ProviderResolver providers.Resolver + Provisioners map[string]ProvisionerFactory + + // If non-nil, will apply as additional constraints on the provider + // plugins that will be requested from the provider resolver. + ProviderSHA256s map[string][]byte + SkipProviderVerify bool + + UIInput UIInput +} + +// ContextMeta is metadata about the running context. This is information +// that this package or structure cannot determine on its own but exposes +// into Terraform in various ways. This must be provided by the Context +// initializer. +type ContextMeta struct { + Env string // Env is the state environment +} + +// Context represents all the context that Terraform needs in order to +// perform operations on infrastructure. This structure is built using +// NewContext. +type Context struct { + config *configs.Config + changes *plans.Changes + state *states.State + targets []addrs.Targetable + variables InputValues + meta *ContextMeta + destroy bool + + hooks []Hook + components contextComponentFactory + schemas *Schemas + sh *stopHook + uiInput UIInput + + l sync.Mutex // Lock acquired during any task + parallelSem Semaphore + providerInputConfig map[string]map[string]cty.Value + providerSHA256s map[string][]byte + runCond *sync.Cond + runContext context.Context + runContextCancel context.CancelFunc + shadowErr error +} + +// (additional methods on Context can be found in context_*.go files.) + +// NewContext creates a new Context structure. +// +// Once a Context is created, the caller must not access or mutate any of +// the objects referenced (directly or indirectly) by the ContextOpts fields. +// +// If the returned diagnostics contains errors then the resulting context is +// invalid and must not be used. +func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { + log.Printf("[TRACE] terraform.NewContext: starting") + diags := CheckCoreVersionRequirements(opts.Config) + // If version constraints are not met then we'll bail early since otherwise + // we're likely to just see a bunch of other errors related to + // incompatibilities, which could be overwhelming for the user. + if diags.HasErrors() { + return nil, diags + } + + // Copy all the hooks and add our stop hook. We don't append directly + // to the Config so that we're not modifying that in-place. + sh := new(stopHook) + hooks := make([]Hook, len(opts.Hooks)+1) + copy(hooks, opts.Hooks) + hooks[len(opts.Hooks)] = sh + + state := opts.State + if state == nil { + state = states.NewState() + } + + // Determine parallelism, default to 10. We do this both to limit + // CPU pressure but also to have an extra guard against rate throttling + // from providers. + par := opts.Parallelism + if par == 0 { + par = 10 + } + + // Set up the variables in the following sequence: + // 0 - Take default values from the configuration + // 1 - Take values from TF_VAR_x environment variables + // 2 - Take values specified in -var flags, overriding values + // set by environment variables if necessary. This includes + // values taken from -var-file in addition. + var variables InputValues + if opts.Config != nil { + // Default variables from the configuration seed our map. + variables = DefaultVariableValues(opts.Config.Module.Variables) + } + // Variables provided by the caller (from CLI, environment, etc) can + // override the defaults. + variables = variables.Override(opts.Variables) + + // Bind available provider plugins to the constraints in config + var providerFactories map[string]providers.Factory + if opts.ProviderResolver != nil { + deps := ConfigTreeDependencies(opts.Config, state) + reqd := deps.AllPluginRequirements() + if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { + reqd.LockExecutables(opts.ProviderSHA256s) + } + log.Printf("[TRACE] terraform.NewContext: resolving provider version selections") + + var providerDiags tfdiags.Diagnostics + providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd) + diags = diags.Append(providerDiags) + + if diags.HasErrors() { + return nil, diags + } + } else { + providerFactories = make(map[string]providers.Factory) + } + + components := &basicComponentFactory{ + providers: providerFactories, + provisioners: opts.Provisioners, + } + + log.Printf("[TRACE] terraform.NewContext: loading provider schemas") + schemas, err := LoadSchemas(opts.Config, opts.State, components) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + changes := opts.Changes + if changes == nil { + changes = plans.NewChanges() + } + + config := opts.Config + if config == nil { + config = configs.NewEmptyConfig() + } + + log.Printf("[TRACE] terraform.NewContext: complete") + + return &Context{ + components: components, + schemas: schemas, + destroy: opts.Destroy, + changes: changes, + hooks: hooks, + meta: opts.Meta, + config: config, + state: state, + targets: opts.Targets, + uiInput: opts.UIInput, + variables: variables, + + parallelSem: NewSemaphore(par), + providerInputConfig: make(map[string]map[string]cty.Value), + providerSHA256s: opts.ProviderSHA256s, + sh: sh, + }, nil +} + +func (c *Context) Schemas() *Schemas { + return c.schemas +} + +type ContextGraphOpts struct { + // If true, validates the graph structure (checks for cycles). + Validate bool + + // Legacy graphs only: won't prune the graph + Verbose bool +} + +// Graph returns the graph used for the given operation type. +// +// The most extensive or complex graph type is GraphTypePlan. +func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) { + if opts == nil { + opts = &ContextGraphOpts{Validate: true} + } + + log.Printf("[INFO] terraform: building graph: %s", typ) + switch typ { + case GraphTypeApply: + return (&ApplyGraphBuilder{ + Config: c.config, + Changes: c.changes, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Destroy: c.destroy, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + + case GraphTypeValidate: + // The validate graph is just a slightly modified plan graph + fallthrough + case GraphTypePlan: + // Create the plan graph builder + p := &PlanGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + } + + // Some special cases for other graph types shared with plan currently + var b GraphBuilder = p + switch typ { + case GraphTypeValidate: + b = ValidateGraphBuilder(p) + } + + return b.Build(addrs.RootModuleInstance) + + case GraphTypePlanDestroy: + return (&DestroyPlanGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + + case GraphTypeRefresh: + return (&RefreshGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + + case GraphTypeEval: + return (&EvalGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + }).Build(addrs.RootModuleInstance) + + default: + // Should never happen, because the above is exhaustive for all graph types. + panic(fmt.Errorf("unsupported graph type %s", typ)) + } +} + +// ShadowError returns any errors caught during a shadow operation. +// +// A shadow operation is an operation run in parallel to a real operation +// that performs the same tasks using new logic on copied state. The results +// are compared to ensure that the new logic works the same as the old logic. +// The shadow never affects the real operation or return values. +// +// The result of the shadow operation are only available through this function +// call after a real operation is complete. +// +// For API consumers of Context, you can safely ignore this function +// completely if you have no interest in helping report experimental feature +// errors to Terraform maintainers. Otherwise, please call this function +// after every operation and report this to the user. +// +// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect +// the real state or result of a real operation. They are purely informational +// to assist in future Terraform versions being more stable. Please message +// this effectively to the end user. +// +// This must be called only when no other operation is running (refresh, +// plan, etc.). The result can be used in parallel to any other operation +// running. +func (c *Context) ShadowError() error { + return c.shadowErr +} + +// State returns a copy of the current state associated with this context. +// +// This cannot safely be called in parallel with any other Context function. +func (c *Context) State() *states.State { + return c.state.DeepCopy() +} + +// Eval produces a scope in which expressions can be evaluated for +// the given module path. +// +// This method must first evaluate any ephemeral values (input variables, local +// values, and output values) in the configuration. These ephemeral values are +// not included in the persisted state, so they must be re-computed using other +// values in the state before they can be properly evaluated. The updated +// values are retained in the main state associated with the receiving context. +// +// This function takes no action against remote APIs but it does need access +// to all provider and provisioner instances in order to obtain their schemas +// for type checking. +// +// The result is an evaluation scope that can be used to resolve references +// against the root module. If the returned diagnostics contains errors then +// the returned scope may be nil. If it is not nil then it may still be used +// to attempt expression evaluation or other analysis, but some expressions +// may not behave as expected. +func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) { + // This is intended for external callers such as the "terraform console" + // command. Internally, we create an evaluator in c.walk before walking + // the graph, and create scopes in ContextGraphWalker. + + var diags tfdiags.Diagnostics + defer c.acquireRun("eval")() + + // Start with a copy of state so that we don't affect any instances + // that other methods may have already returned. + c.state = c.state.DeepCopy() + var walker *ContextGraphWalker + + graph, graphDiags := c.Graph(GraphTypeEval, nil) + diags = diags.Append(graphDiags) + if !diags.HasErrors() { + var walkDiags tfdiags.Diagnostics + walker, walkDiags = c.walk(graph, walkEval) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + } + + if walker == nil { + // If we skipped walking the graph (due to errors) then we'll just + // use a placeholder graph walker here, which'll refer to the + // unmodified state. + walker = c.graphWalker(walkEval) + } + + // This is a bit weird since we don't normally evaluate outside of + // the context of a walk, but we'll "re-enter" our desired path here + // just to get hold of an EvalContext for it. GraphContextBuiltin + // caches its contexts, so we should get hold of the context that was + // previously used for evaluation here, unless we skipped walking. + evalCtx := walker.EnterPath(path) + return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags +} + +// Apply applies the changes represented by this context and returns +// the resulting state. +// +// Even in the case an error is returned, the state may be returned and will +// potentially be partially updated. In addition to returning the resulting +// state, this context is updated with the latest state. +// +// If the state is required after an error, the caller should call +// Context.State, rather than rely on the return value. +// +// TODO: Apply and Refresh should either always return a state, or rely on the +// State() method. Currently the helper/resource testing framework relies +// on the absence of a returned state to determine if Destroy can be +// called, so that will need to be refactored before this can be changed. +func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) { + defer c.acquireRun("apply")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // Build the graph. + graph, diags := c.Graph(GraphTypeApply, nil) + if diags.HasErrors() { + return nil, diags + } + + // Determine the operation + operation := walkApply + if c.destroy { + operation = walkDestroy + } + + // Walk the graph + walker, walkDiags := c.walk(graph, operation) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + + if c.destroy && !diags.HasErrors() { + // If we know we were trying to destroy objects anyway, and we + // completed without any errors, then we'll also prune out any + // leftover empty resource husks (left after all of the instances + // of a resource with "count" or "for_each" are destroyed) to + // help ensure we end up with an _actually_ empty state, assuming + // we weren't destroying with -target here. + // + // (This doesn't actually take into account -target, but that should + // be okay because it doesn't throw away anything we can't recompute + // on a subsequent "terraform plan" run, if the resources are still + // present in the configuration. However, this _will_ cause "count = 0" + // resources to read as unknown during the next refresh walk, which + // may cause some additional churn if used in a data resource or + // provider block, until we remove refreshing as a separate walk and + // just do it as part of the plan walk.) + c.state.PruneResourceHusks() + } + + if len(c.targets) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Applied changes may be incomplete", + `The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are pending: + terraform plan + +Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, + )) + } + + return c.state, diags +} + +// Plan generates an execution plan for the given context. +// +// The execution plan encapsulates the context and can be stored +// in order to reinstantiate a context later for Apply. +// +// Plan also updates the diff of this context to be the diff generated +// by the plan, so Apply can be called after. +func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) { + defer c.acquireRun("plan")() + c.changes = plans.NewChanges() + + var diags tfdiags.Diagnostics + + if len(c.targets) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Resource targeting is in effect", + `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. + +The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, + )) + } + + varVals := make(map[string]plans.DynamicValue, len(c.variables)) + for k, iv := range c.variables { + // We use cty.DynamicPseudoType here so that we'll save both the + // value _and_ its dynamic type in the plan, so we can recover + // exactly the same value later. + dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to prepare variable value for plan", + fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), + )) + continue + } + varVals[k] = dv + } + + p := &plans.Plan{ + VariableValues: varVals, + TargetAddrs: c.targets, + ProviderSHA256s: c.providerSHA256s, + } + + var operation walkOperation + if c.destroy { + operation = walkPlanDestroy + } else { + // Set our state to be something temporary. We do this so that + // the plan can update a fake state so that variables work, then + // we replace it back with our old state. + old := c.state + if old == nil { + c.state = states.NewState() + } else { + c.state = old.DeepCopy() + } + defer func() { + c.state = old + }() + + operation = walkPlan + } + + // Build the graph. + graphType := GraphTypePlan + if c.destroy { + graphType = GraphTypePlanDestroy + } + graph, graphDiags := c.Graph(graphType, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return nil, diags + } + + // Do the walk + walker, walkDiags := c.walk(graph, operation) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return nil, diags + } + p.Changes = c.changes + + return p, diags +} + +// Refresh goes through all the resources in the state and refreshes them +// to their latest state. This will update the state that this context +// works with, along with returning it. +// +// Even in the case an error is returned, the state may be returned and +// will potentially be partially updated. +func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) { + defer c.acquireRun("refresh")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // Refresh builds a partial changeset as part of its work because it must + // create placeholder stubs for any resource instances that'll be created + // in subsequent plan so that provider configurations and data resources + // can interpolate from them. This plan is always thrown away after + // the operation completes, restoring any existing changeset. + oldChanges := c.changes + defer func() { c.changes = oldChanges }() + c.changes = plans.NewChanges() + + // Build the graph. + graph, diags := c.Graph(GraphTypeRefresh, nil) + if diags.HasErrors() { + return nil, diags + } + + // Do the walk + _, walkDiags := c.walk(graph, walkRefresh) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return nil, diags + } + + // During our walk we will have created planned object placeholders in + // state for resource instances that are in configuration but not yet + // created. These were created only to allow expression evaluation to + // work properly in provider and data blocks during the walk and must + // now be discarded, since a subsequent plan walk is responsible for + // creating these "for real". + // TODO: Consolidate refresh and plan into a single walk, so that the + // refresh walk doesn't need to emulate various aspects of the plan + // walk in order to properly evaluate provider and data blocks. + c.state.SyncWrapper().RemovePlannedResourceInstanceObjects() + + return c.state, diags +} + +// Stop stops the running task. +// +// Stop will block until the task completes. +func (c *Context) Stop() { + log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") + + c.l.Lock() + defer c.l.Unlock() + + // If we're running, then stop + if c.runContextCancel != nil { + log.Printf("[WARN] terraform: run context exists, stopping") + + // Tell the hook we want to stop + c.sh.Stop() + + // Stop the context + c.runContextCancel() + c.runContextCancel = nil + } + + // Grab the condition var before we exit + if cond := c.runCond; cond != nil { + log.Printf("[INFO] terraform: waiting for graceful stop to complete") + cond.Wait() + } + + log.Printf("[WARN] terraform: stop complete") +} + +// Validate performs semantic validation of the configuration, and returning +// any warnings or errors. +// +// Syntax and structural checks are performed by the configuration loader, +// and so are not repeated here. +func (c *Context) Validate() tfdiags.Diagnostics { + defer c.acquireRun("validate")() + + var diags tfdiags.Diagnostics + + // Validate input variables. We do this only for the values supplied + // by the root module, since child module calls are validated when we + // visit their graph nodes. + if c.config != nil { + varDiags := checkInputVariables(c.config.Module.Variables, c.variables) + diags = diags.Append(varDiags) + } + + // If we have errors at this point then we probably won't be able to + // construct a graph without producing redundant errors, so we'll halt early. + if diags.HasErrors() { + return diags + } + + // Build the graph so we can walk it and run Validate on nodes. + // We also validate the graph generated here, but this graph doesn't + // necessarily match the graph that Plan will generate, so we'll validate the + // graph again later after Planning. + graph, graphDiags := c.Graph(GraphTypeValidate, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return diags + } + + // Walk + walker, walkDiags := c.walk(graph, walkValidate) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return diags + } + + return diags +} + +// Config returns the configuration tree associated with this context. +func (c *Context) Config() *configs.Config { + return c.config +} + +// Variables will return the mapping of variables that were defined +// for this Context. If Input was called, this mapping may be different +// than what was given. +func (c *Context) Variables() InputValues { + return c.variables +} + +// SetVariable sets a variable after a context has already been built. +func (c *Context) SetVariable(k string, v cty.Value) { + c.variables[k] = &InputValue{ + Value: v, + SourceType: ValueFromCaller, + } +} + +func (c *Context) acquireRun(phase string) func() { + // With the run lock held, grab the context lock to make changes + // to the run context. + c.l.Lock() + defer c.l.Unlock() + + // Wait until we're no longer running + for c.runCond != nil { + c.runCond.Wait() + } + + // Build our lock + c.runCond = sync.NewCond(&c.l) + + // Create a new run context + c.runContext, c.runContextCancel = context.WithCancel(context.Background()) + + // Reset the stop hook so we're not stopped + c.sh.Reset() + + // Reset the shadow errors + c.shadowErr = nil + + return c.releaseRun +} + +func (c *Context) releaseRun() { + // Grab the context lock so that we can make modifications to fields + c.l.Lock() + defer c.l.Unlock() + + // End our run. We check if runContext is non-nil because it can be + // set to nil if it was cancelled via Stop() + if c.runContextCancel != nil { + c.runContextCancel() + } + + // Unlock all waiting our condition + cond := c.runCond + c.runCond = nil + cond.Broadcast() + + // Unset the context + c.runContext = nil +} + +func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) { + log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) + + walker := c.graphWalker(operation) + + // Watch for a stop so we can call the provider Stop() API. + watchStop, watchWait := c.watchStop(walker) + + // Walk the real graph, this will block until it completes + diags := graph.Walk(walker) + + // Close the channel so the watcher stops, and wait for it to return. + close(watchStop) + <-watchWait + + return walker, diags +} + +func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker { + return &ContextGraphWalker{ + Context: c, + State: c.state.SyncWrapper(), + Changes: c.changes.SyncWrapper(), + Operation: operation, + StopContext: c.runContext, + RootVariableValues: c.variables, + } +} + +// watchStop immediately returns a `stop` and a `wait` chan after dispatching +// the watchStop goroutine. This will watch the runContext for cancellation and +// stop the providers accordingly. When the watch is no longer needed, the +// `stop` chan should be closed before waiting on the `wait` chan. +// The `wait` chan is important, because without synchronizing with the end of +// the watchStop goroutine, the runContext may also be closed during the select +// incorrectly causing providers to be stopped. Even if the graph walk is done +// at that point, stopping a provider permanently cancels its StopContext which +// can cause later actions to fail. +func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { + stop := make(chan struct{}) + wait := make(chan struct{}) + + // get the runContext cancellation channel now, because releaseRun will + // write to the runContext field. + done := c.runContext.Done() + + go func() { + defer close(wait) + // Wait for a stop or completion + select { + case <-done: + // done means the context was canceled, so we need to try and stop + // providers. + case <-stop: + // our own stop channel was closed. + return + } + + // If we're here, we're stopped, trigger the call. + log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") + + { + // Copy the providers so that a misbehaved blocking Stop doesn't + // completely hang Terraform. + walker.providerLock.Lock() + ps := make([]providers.Interface, 0, len(walker.providerCache)) + for _, p := range walker.providerCache { + ps = append(ps, p) + } + defer walker.providerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + + { + // Call stop on all the provisioners + walker.provisionerLock.Lock() + ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) + for _, p := range walker.provisionerCache { + ps = append(ps, p) + } + defer walker.provisionerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + }() + + return stop, wait +} + +// ShimLegacyState is a helper that takes the legacy state type and +// converts it to the new state type. +// +// This is implemented as a state file upgrade, so it will not preserve +// parts of the state structure that are not included in a serialized state, +// such as the resolved results of any local values, outputs in non-root +// modules, etc. +func ShimLegacyState(legacy *State) (*states.State, error) { + if legacy == nil { + return nil, nil + } + var buf bytes.Buffer + err := WriteState(legacy, &buf) + if err != nil { + return nil, err + } + f, err := statefile.Read(&buf) + if err != nil { + return nil, err + } + return f.State, err +} + +// MustShimLegacyState is a wrapper around ShimLegacyState that panics if +// the conversion does not succeed. This is primarily intended for tests where +// the given legacy state is an object constructed within the test. +func MustShimLegacyState(legacy *State) *states.State { + ret, err := ShimLegacyState(legacy) + if err != nil { + panic(err) + } + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go new file mode 100644 index 00000000000..a627996e39b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go @@ -0,0 +1,68 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" +) + +// contextComponentFactory is the interface that Context uses +// to initialize various components such as providers and provisioners. +// This factory gets more information than the raw maps using to initialize +// a Context. This information is used for debugging. +type contextComponentFactory interface { + // ResourceProvider creates a new ResourceProvider with the given + // type. The "uid" is a unique identifier for this provider being + // initialized that can be used for internal tracking. + ResourceProvider(typ, uid string) (providers.Interface, error) + ResourceProviders() []string + + // ResourceProvisioner creates a new ResourceProvisioner with the + // given type. The "uid" is a unique identifier for this provisioner + // being initialized that can be used for internal tracking. + ResourceProvisioner(typ, uid string) (provisioners.Interface, error) + ResourceProvisioners() []string +} + +// basicComponentFactory just calls a factory from a map directly. +type basicComponentFactory struct { + providers map[string]providers.Factory + provisioners map[string]ProvisionerFactory +} + +func (c *basicComponentFactory) ResourceProviders() []string { + result := make([]string, len(c.providers)) + for k := range c.providers { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvisioners() []string { + result := make([]string, len(c.provisioners)) + for k := range c.provisioners { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvider(typ, uid string) (providers.Interface, error) { + f, ok := c.providers[typ] + if !ok { + return nil, fmt.Errorf("unknown provider %q", typ) + } + + return f() +} + +func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (provisioners.Interface, error) { + f, ok := c.provisioners[typ] + if !ok { + return nil, fmt.Errorf("unknown provisioner %q", typ) + } + + return f() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go new file mode 100644 index 00000000000..4448d8706e1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go @@ -0,0 +1,32 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=GraphType context_graph_type.go + +// GraphType is an enum of the type of graph to create with a Context. +// The values of the constants may change so they shouldn't be depended on; +// always use the constant name. +type GraphType byte + +const ( + GraphTypeInvalid GraphType = 0 + GraphTypeLegacy GraphType = iota + GraphTypeRefresh + GraphTypePlan + GraphTypePlanDestroy + GraphTypeApply + GraphTypeValidate + GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs. +) + +// GraphTypeMap is a mapping of human-readable string to GraphType. This +// is useful to use as the mechanism for human input for configurable +// graph types. +var GraphTypeMap = map[string]GraphType{ + "apply": GraphTypeApply, + "plan": GraphTypePlan, + "plan-destroy": GraphTypePlanDestroy, + "refresh": GraphTypeRefresh, + "legacy": GraphTypeLegacy, + "validate": GraphTypeValidate, + "eval": GraphTypeEval, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go new file mode 100644 index 00000000000..9a9cd9626b1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go @@ -0,0 +1,83 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ImportOpts are used as the configuration for Import. +type ImportOpts struct { + // Targets are the targets to import + Targets []*ImportTarget + + // Config is optional, and specifies a config tree that will be loaded + // into the graph and evaluated. This is the source for provider + // configurations. + Config *configs.Config +} + +// ImportTarget is a single resource to import. +type ImportTarget struct { + // Addr is the address for the resource instance that the new object should + // be imported into. + Addr addrs.AbsResourceInstance + + // ID is the ID of the resource to import. This is resource-specific. + ID string + + // ProviderAddr is the address of the provider that should handle the import. + ProviderAddr addrs.AbsProviderConfig +} + +// Import takes already-created external resources and brings them +// under Terraform management. Import requires the exact type, name, and ID +// of the resources to import. +// +// This operation is idempotent. If the requested resource is already +// imported, no changes are made to the state. +// +// Further, this operation also gracefully handles partial state. If during +// an import there is a failure, all previously imported resources remain +// imported. +func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Hold a lock since we can modify our own state here + defer c.acquireRun("import")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // If no module is given, default to the module configured with + // the Context. + config := opts.Config + if config == nil { + config = c.config + } + + // Initialize our graph builder + builder := &ImportGraphBuilder{ + ImportTargets: opts.Targets, + Config: config, + Components: c.components, + Schemas: c.schemas, + } + + // Build the graph! + graph, graphDiags := builder.Build(addrs.RootModuleInstance) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return c.state, diags + } + + // Walk it + _, walkDiags := c.walk(graph, walkImport) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return c.state, diags + } + + return c.state, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go new file mode 100644 index 00000000000..b92fce1208f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go @@ -0,0 +1,251 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Input asks for input to fill variables and provider configurations. +// This modifies the configuration in-place, so asking for Input twice +// may result in different UI output showing different current values. +func (c *Context) Input(mode InputMode) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + defer c.acquireRun("input")() + + if c.uiInput == nil { + log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") + return diags + } + + ctx := context.Background() + + if mode&InputModeVar != 0 { + log.Printf("[TRACE] Context.Input: Prompting for variables") + + // Walk the variables first for the root module. We walk them in + // alphabetical order for UX reasons. + configs := c.config.Module.Variables + names := make([]string, 0, len(configs)) + for name := range configs { + names = append(names, name) + } + sort.Strings(names) + Variables: + for _, n := range names { + v := configs[n] + + // If we only care about unset variables, then we should set any + // variable that is already set. + if mode&InputModeVarUnset != 0 { + if _, isSet := c.variables[n]; isSet { + continue + } + } + + // this should only happen during tests + if c.uiInput == nil { + log.Println("[WARN] Context.uiInput is nil during input walk") + continue + } + + // Ask the user for a value for this variable + var rawValue string + retry := 0 + for { + var err error + rawValue, err = c.uiInput.Input(ctx, &InputOpts{ + Id: fmt.Sprintf("var.%s", n), + Query: fmt.Sprintf("var.%s", n), + Description: v.Description, + }) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to request interactive input", + fmt.Sprintf("Terraform attempted to request a value for var.%s interactively, but encountered an error: %s.", n, err), + )) + return diags + } + + if rawValue == "" && v.Default == cty.NilVal { + // Redo if it is required, but abort if we keep getting + // blank entries + if retry > 2 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required variable not assigned", + fmt.Sprintf("The variable %q is required, so Terraform cannot proceed without a defined value for it.", n), + )) + continue Variables + } + retry++ + continue + } + + break + } + + val, valDiags := v.ParsingMode.Parse(n, rawValue) + diags = diags.Append(valDiags) + if diags.HasErrors() { + continue + } + + c.variables[n] = &InputValue{ + Value: val, + SourceType: ValueFromInput, + } + } + } + + if mode&InputModeProvider != 0 { + log.Printf("[TRACE] Context.Input: Prompting for provider arguments") + + // We prompt for input only for provider configurations defined in + // the root module. At the time of writing that is an arbitrary + // restriction, but we have future plans to support "count" and + // "for_each" on modules that will then prevent us from supporting + // input for child module configurations anyway (since we'd need to + // dynamic-expand first), and provider configurations in child modules + // are not recommended since v0.11 anyway, so this restriction allows + // us to keep this relatively simple without significant hardship. + + pcs := make(map[string]*configs.Provider) + pas := make(map[string]addrs.ProviderConfig) + for _, pc := range c.config.Module.ProviderConfigs { + addr := pc.Addr() + pcs[addr.String()] = pc + pas[addr.String()] = addr + log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) + } + // We also need to detect _implied_ provider configs from resources. + // These won't have *configs.Provider objects, but they will still + // exist in the map and we'll just treat them as empty below. + for _, rc := range c.config.Module.ManagedResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) + } + } + for _, rc := range c.config.Module.DataResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) + } + } + + for pk, pa := range pas { + pc := pcs[pk] // will be nil if this is an implied config + + // Wrap the input into a namespace + input := &PrefixUIInput{ + IdPrefix: pk, + QueryPrefix: pk + ".", + UIInput: c.uiInput, + } + + schema := c.schemas.ProviderConfig(pa.Type) + if schema == nil { + // Could either be an incorrect config or just an incomplete + // mock in tests. We'll let a later pass decide, and just + // ignore this for the purposes of gathering input. + log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.Type) + continue + } + + // For our purposes here we just want to detect if attrbutes are + // set in config at all, so rather than doing a full decode + // (which would require us to prepare an evalcontext, etc) we'll + // use the low-level HCL API to process only the top-level + // structure. + var attrExprs hcl.Attributes // nil if there is no config + if pc != nil && pc.Config != nil { + lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) + content, _, diags := pc.Config.PartialContent(lowLevelSchema) + if diags.HasErrors() { + log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) + continue + } + attrExprs = content.Attributes + } + + keys := make([]string, 0, len(schema.Attributes)) + for key := range schema.Attributes { + keys = append(keys, key) + } + sort.Strings(keys) + + vals := map[string]cty.Value{} + for _, key := range keys { + attrS := schema.Attributes[key] + if attrS.Optional { + continue + } + if attrExprs != nil { + if _, exists := attrExprs[key]; exists { + continue + } + } + if !attrS.Type.Equals(cty.String) { + continue + } + + log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) + rawVal, err := input.Input(ctx, &InputOpts{ + Id: key, + Query: key, + Description: attrS.Description, + }) + if err != nil { + log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) + continue + } + + vals[key] = cty.StringVal(rawVal) + } + + c.providerInputConfig[pk] = vals + log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) + } + } + + return diags +} + +// schemaForInputSniffing returns a transformed version of a given schema +// that marks all attributes as optional, which the Context.Input method can +// use to detect whether a required argument is set without missing arguments +// themselves generating errors. +func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go new file mode 100644 index 00000000000..e2f54883bcd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go @@ -0,0 +1,1441 @@ +package terraform + +import ( + "bufio" + "bytes" + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/zclconf/go-cty/cty" + + "github.com/mitchellh/copystructure" +) + +// DiffChangeType is an enum with the kind of changes a diff has planned. +type DiffChangeType byte + +const ( + DiffInvalid DiffChangeType = iota + DiffNone + DiffCreate + DiffUpdate + DiffDestroy + DiffDestroyCreate + + // DiffRefresh is only used in the UI for displaying diffs. + // Managed resource reads never appear in plan, and when data source + // reads appear they are represented as DiffCreate in core before + // transforming to DiffRefresh in the UI layer. + DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// Diff tracks the changes that are necessary to apply a configuration +// to an existing infrastructure. +type Diff struct { + // Modules contains all the modules that have a diff + Modules []*ModuleDiff +} + +// Prune cleans out unused structures in the diff without affecting +// the behavior of the diff at all. +// +// This is not safe to call concurrently. This is safe to call on a +// nil Diff. +func (d *Diff) Prune() { + if d == nil { + return + } + + // Prune all empty modules + newModules := make([]*ModuleDiff, 0, len(d.Modules)) + for _, m := range d.Modules { + // If the module isn't empty, we keep it + if !m.Empty() { + newModules = append(newModules, m) + } + } + if len(newModules) == 0 { + newModules = nil + } + d.Modules = newModules +} + +// AddModule adds the module with the given path to the diff. +// +// This should be the preferred method to add module diffs since it +// allows us to optimize lookups later as well as control sorting. +func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + legacyPath := make([]string, len(path)) + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("diff cannot represent modules with count or for_each keys") + } + + legacyPath[i] = step.Name + } + + m := &ModuleDiff{Path: legacyPath} + m.init() + d.Modules = append(d.Modules, m) + return m +} + +// ModuleByPath is used to lookup the module diff for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { + if d == nil { + return nil + } + for _, mod := range d.Modules { + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// RootModule returns the ModuleState for the root module +func (d *Diff) RootModule() *ModuleDiff { + root := d.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Empty returns true if the diff has no changes. +func (d *Diff) Empty() bool { + if d == nil { + return true + } + + for _, m := range d.Modules { + if !m.Empty() { + return false + } + } + + return true +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *Diff) Equal(d2 *Diff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Sort the modules + sort.Sort(moduleDiffSort(d.Modules)) + sort.Sort(moduleDiffSort(d2.Modules)) + + // Copy since we have to modify the module destroy flag to false so + // we don't compare that. TODO: delete this when we get rid of the + // destroy flag on modules. + dCopy := d.DeepCopy() + d2Copy := d2.DeepCopy() + for _, m := range dCopy.Modules { + m.Destroy = false + } + for _, m := range d2Copy.Modules { + m.Destroy = false + } + + // Use DeepEqual + return reflect.DeepEqual(dCopy, d2Copy) +} + +// DeepCopy performs a deep copy of all parts of the Diff, making the +// resulting Diff safe to use without modifying this one. +func (d *Diff) DeepCopy() *Diff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*Diff) +} + +func (d *Diff) String() string { + var buf bytes.Buffer + + keys := make([]string, 0, len(d.Modules)) + lookup := make(map[string]*ModuleDiff) + for _, m := range d.Modules { + addr := normalizeModulePath(m.Path) + key := addr.String() + keys = append(keys, key) + lookup[key] = m + } + sort.Strings(keys) + + for _, key := range keys { + m := lookup[key] + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("%s:\n", key)) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return strings.TrimSpace(buf.String()) +} + +// ModuleDiff tracks the differences between resources to apply within +// a single module. +type ModuleDiff struct { + Path []string + Resources map[string]*InstanceDiff + Destroy bool // Set only by the destroy plan +} + +func (d *ModuleDiff) init() { + if d.Resources == nil { + d.Resources = make(map[string]*InstanceDiff) + } + for _, r := range d.Resources { + r.init() + } +} + +// ChangeType returns the type of changes that the diff for this +// module includes. +// +// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or +// DiffCreate. If an instance within the module has a DiffDestroyCreate +// then this will register as a DiffCreate for a module. +func (d *ModuleDiff) ChangeType() DiffChangeType { + result := DiffNone + for _, r := range d.Resources { + change := r.ChangeType() + switch change { + case DiffCreate, DiffDestroy: + if result == DiffNone { + result = change + } + case DiffDestroyCreate, DiffUpdate: + result = DiffUpdate + } + } + + return result +} + +// Empty returns true if the diff has no changes within this module. +func (d *ModuleDiff) Empty() bool { + if d.Destroy { + return false + } + + if len(d.Resources) == 0 { + return true + } + + for _, rd := range d.Resources { + if !rd.Empty() { + return false + } + } + + return true +} + +// Instances returns the instance diffs for the id given. This can return +// multiple instance diffs if there are counts within the resource. +func (d *ModuleDiff) Instances(id string) []*InstanceDiff { + var result []*InstanceDiff + for k, diff := range d.Resources { + if k == id || strings.HasPrefix(k, id+".") { + if !diff.Empty() { + result = append(result, diff) + } + } + } + + return result +} + +// IsRoot says whether or not this module diff is for the root module. +func (d *ModuleDiff) IsRoot() bool { + return reflect.DeepEqual(d.Path, rootModulePath) +} + +// String outputs the diff in a long but command-line friendly output +// format that users can read to quickly inspect a diff. +func (d *ModuleDiff) String() string { + var buf bytes.Buffer + + names := make([]string, 0, len(d.Resources)) + for name, _ := range d.Resources { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + rdiff := d.Resources[name] + + crud := "UPDATE" + switch { + case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): + crud = "DESTROY/CREATE" + case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): + crud = "DESTROY" + case rdiff.RequiresNew(): + crud = "CREATE" + } + + extra := "" + if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { + extra = " (deposed only)" + } + + buf.WriteString(fmt.Sprintf( + "%s: %s%s\n", + crud, + name, + extra)) + + keyLen := 0 + rdiffAttrs := rdiff.CopyAttributes() + keys := make([]string, 0, len(rdiffAttrs)) + for key, _ := range rdiffAttrs { + if key == "id" { + continue + } + + keys = append(keys, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(keys) + + for _, attrK := range keys { + attrDiff, _ := rdiff.GetAttribute(attrK) + + v := attrDiff.New + u := attrDiff.Old + if attrDiff.NewComputed { + v = "" + } + + if attrDiff.Sensitive { + u = "" + v = "" + } + + updateMsg := "" + if attrDiff.RequiresNew { + updateMsg = " (forces new resource)" + } else if attrDiff.Sensitive { + updateMsg = " (attribute changed)" + } + + buf.WriteString(fmt.Sprintf( + " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, + v, + updateMsg)) + } + } + + return buf.String() +} + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = hcl2shim.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != hcl2shim.UnknownVariableValue && + diff.Old != hcl2shim.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = hcl2shim.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type DiffAttrType +} + +// Empty returns true if the diff for this attr is neutral +func (d *ResourceAttrDiff) Empty() bool { + return d.Old == d.New && !d.NewComputed && !d.NewRemoved +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type DiffAttrType byte + +const ( + DiffAttrUnknown DiffAttrType = iota + DiffAttrInput + DiffAttrOutput +) + +func (d *InstanceDiff) init() { + if d.Attributes == nil { + d.Attributes = make(map[string]*ResourceAttrDiff) + } +} + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +func (d *InstanceDiff) Copy() (*InstanceDiff, error) { + if d == nil { + return nil, nil + } + + dCopy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + return nil, err + } + + return dCopy.(*InstanceDiff), nil +} + +// ChangeType returns the DiffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() DiffChangeType { + if d.Empty() { + return DiffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return DiffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return DiffDestroy + } + + if d.RequiresNew() { + return DiffCreate + } + + return DiffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +// DeepCopy performs a deep copy of all parts of the InstanceDiff +func (d *InstanceDiff) DeepCopy() *InstanceDiff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*InstanceDiff) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) SetDestroyDeposed(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyDeposed = b +} + +// These methods are properly locked, for use outside other InstanceDiff +// methods but everywhere else within the terraform package. +// TODO refactor the locking scheme +func (d *InstanceDiff) SetTainted(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyTainted = b +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) SetDestroy(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Destroy = b +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Attributes[key] = attr +} + +func (d *InstanceDiff) DelAttribute(key string) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.Attributes, key) +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} +func (d *InstanceDiff) GetAttributesLen() int { + d.mu.Lock() + defer d.mu.Unlock() + + return len(d.Attributes) +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2, _ := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k, _ := range d.Attributes { + checkOld[k] = struct{}{} + } + for k, _ := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k, _ := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2, _ := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2, _ := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2, _ := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr, _ := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} + +// moduleDiffSort implements sort.Interface to sort module diffs by path. +type moduleDiffSort []*ModuleDiff + +func (s moduleDiffSort) Len() int { return len(s) } +func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s moduleDiffSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go new file mode 100644 index 00000000000..17464bc0638 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go @@ -0,0 +1,17 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// DestroyEdge is an edge that represents a standard "destroy" relationship: +// Target depends on Source because Source is destroying. +type DestroyEdge struct { + S, T dag.Vertex +} + +func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) } +func (e *DestroyEdge) Source() dag.Vertex { return e.S } +func (e *DestroyEdge) Target() dag.Vertex { return e.T } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go new file mode 100644 index 00000000000..c490c3bcffb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go @@ -0,0 +1,70 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalNode is the interface that must be implemented by graph nodes to +// evaluate/execute. +type EvalNode interface { + // Eval evaluates this node with the given context. The second parameter + // are the argument values. These will match in order and 1-1 with the + // results of the Args() return value. + Eval(EvalContext) (interface{}, error) +} + +// GraphNodeEvalable is the interface that graph nodes must implement +// to enable valuation. +type GraphNodeEvalable interface { + EvalTree() EvalNode +} + +// EvalEarlyExitError is a special error return value that can be returned +// by eval nodes that does an early exit. +type EvalEarlyExitError struct{} + +func (EvalEarlyExitError) Error() string { return "early exit" } + +// Eval evaluates the given EvalNode with the given context, properly +// evaluating all args in the correct order. +func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { + // Call the lower level eval which doesn't understand early exit, + // and if we early exit, it isn't an error. + result, err := EvalRaw(n, ctx) + if err != nil { + if _, ok := err.(EvalEarlyExitError); ok { + return nil, nil + } + } + + return result, err +} + +// EvalRaw is like Eval except that it returns all errors, even if they +// signal something normal such as EvalEarlyExitError. +func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { + path := "unknown" + if ctx != nil { + path = ctx.Path().String() + } + if path == "" { + path = "" + } + + log.Printf("[TRACE] %s: eval: %T", path, n) + output, err := n.Eval(ctx) + if err != nil { + switch err.(type) { + case EvalEarlyExitError: + log.Printf("[TRACE] %s: eval: %T, early exit err: %s", path, n, err) + case tfdiags.NonFatalError: + log.Printf("[WARN] %s: eval: %T, non-fatal err: %s", path, n, err) + default: + log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) + } + } + + return output, err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go new file mode 100644 index 00000000000..eb0d4fea2aa --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go @@ -0,0 +1,649 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalApply is an EvalNode implementation that writes the diff to +// the full diff. +type EvalApply struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Dependencies []addrs.Referenceable + State **states.ResourceInstanceObject + Change **plans.ResourceInstanceChange + ProviderAddr addrs.AbsProviderConfig + Provider *providers.Interface + ProviderSchema **ProviderSchema + Output **states.ResourceInstanceObject + CreateNew *bool + Error *error +} + +// TODO: test +func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + + change := *n.Change + provider := *n.Provider + state := *n.State + absAddr := n.Addr.Absolute(ctx.Path()) + + if state == nil { + state = &states.ResourceInstanceObject{} + } + + schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + if n.CreateNew != nil { + *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace()) + } + + configVal := cty.NullVal(cty.DynamicPseudoType) + if n.Config != nil { + var configDiags tfdiags.Diagnostics + forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) + configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + } + + log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action) + resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + PriorState: change.Before, + Config: configVal, + PlannedState: change.After, + PlannedPrivate: change.Private, + }) + applyDiags := resp.Diagnostics + if n.Config != nil { + applyDiags = applyDiags.InConfigBody(n.Config.Config) + } + diags = diags.Append(applyDiags) + + // Even if there are errors in the returned diagnostics, the provider may + // have returned a _partial_ state for an object that already exists but + // failed to fully configure, and so the remaining code must always run + // to completion but must be defensive against the new value being + // incomplete. + newVal := resp.NewState + + if newVal == cty.NilVal { + // Providers are supposed to return a partial new value even when errors + // occur, but sometimes they don't and so in that case we'll patch that up + // by just using the prior state, so we'll at least keep track of the + // object for the user to retry. + newVal = change.Before + + // As a special case, we'll set the new value to null if it looks like + // we were trying to execute a delete, because the provider in this case + // probably left the newVal unset intending it to be interpreted as "null". + if change.After.IsNull() { + newVal = cty.NullVal(schema.ImpliedType()) + } + + // Ideally we'd produce an error or warning here if newVal is nil and + // there are no errors in diags, because that indicates a buggy + // provider not properly reporting its result, but unfortunately many + // of our historical test mocks behave in this way and so producing + // a diagnostic here fails hundreds of tests. Instead, we must just + // silently retain the old value for now. Returning a nil value with + // no errors is still always considered a bug in the provider though, + // and should be fixed for any "real" providers that do it. + } + + var conformDiags tfdiags.Diagnostics + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + conformDiags = conformDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + diags = diags.Append(conformDiags) + if conformDiags.HasErrors() { + // Bail early in this particular case, because an object that doesn't + // conform to the schema can't be saved in the state anyway -- the + // serializer will reject it. + return nil, diags.Err() + } + + // After this point we have a type-conforming result object and so we + // must always run to completion to ensure it can be saved. If n.Error + // is set then we must not return a non-nil error, in order to allow + // evaluation to continue to a later point where our state object will + // be saved. + + // By this point there must not be any unknown values remaining in our + // object, because we've applied the change and we can't save unknowns + // in our persistent state. If any are present then we will indicate an + // error (which is always a bug in the provider) but we will also replace + // them with nulls so that we can successfully save the portions of the + // returned value that are known. + if !newVal.IsWhollyKnown() { + // To generate better error messages, we'll go for a walk through the + // value and make a separate diagnostic for each unknown value we + // find. + cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { + if !val.IsKnown() { + pathStr := tfdiags.FormatCtyPath(path) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", + n.Addr.Absolute(ctx.Path()), pathStr, + ), + )) + } + return true, nil + }) + + // NOTE: This operation can potentially be lossy if there are multiple + // elements in a set that differ only by unknown values: after + // replacing with null these will be merged together into a single set + // element. Since we can only get here in the presence of a provider + // bug, we accept this because storing a result here is always a + // best-effort sort of thing. + newVal = cty.UnknownAsNull(newVal) + } + + if change.Action != plans.Delete && !diags.HasErrors() { + // Only values that were marked as unknown in the planned value are allowed + // to change during the apply operation. (We do this after the unknown-ness + // check above so that we also catch anything that became unknown after + // being known during plan.) + // + // If we are returning other errors anyway then we'll give this + // a pass since the other errors are usually the explanation for + // this one and so it's more helpful to let the user focus on the + // root cause rather than distract with this extra problem. + if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + + // The sort of inconsistency we won't catch here is if a known value + // in the plan is changed during apply. That can cause downstream + // problems because a dependent resource would make its own plan based + // on the planned value, and thus get a different result during the + // apply phase. This will usually lead to a "Provider produced invalid plan" + // error that incorrectly blames the downstream resource for the change. + + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent result after apply", + fmt.Sprintf( + "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), + ), + )) + } + } + } + } + + // If a provider returns a null or non-null object at the wrong time then + // we still want to save that but it often causes some confusing behaviors + // where it seems like Terraform is failing to take any action at all, + // so we'll generate some errors to draw attention to it. + if !diags.HasErrors() { + if change.Action == plans.Delete && !newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", + change.Action, n.Addr.Absolute(ctx.Path()), + ), + )) + } + if change.Action != plans.Delete && newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", + change.Action, n.Addr.Absolute(ctx.Path()), + ), + )) + } + } + + // Sometimes providers return a null value when an operation fails for some + // reason, but we'd rather keep the prior state so that the error can be + // corrected on a subsequent run. We must only do this for null new value + // though, or else we may discard partial updates the provider was able to + // complete. + if diags.HasErrors() && newVal.IsNull() { + // Otherwise, we'll continue but using the prior state as the new value, + // making this effectively a no-op. If the item really _has_ been + // deleted then our next refresh will detect that and fix it up. + // If change.Action is Create then change.Before will also be null, + // which is fine. + newVal = change.Before + } + + var newState *states.ResourceInstanceObject + if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case + newState = &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: newVal, + Private: resp.Private, + Dependencies: n.Dependencies, // Should be populated by the caller from the StateDependencies method on the resource instance node + } + } + + // Write the final state + if n.Output != nil { + *n.Output = newState + } + + if diags.HasErrors() { + // If the caller provided an error pointer then they are expected to + // handle the error some other way and we treat our own result as + // success. + if n.Error != nil { + err := diags.Err() + *n.Error = err + log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err) + return nil, nil + } + } + + return nil, diags.ErrWithWarnings() +} + +// EvalApplyPre is an EvalNode implementation that does the pre-Apply work +type EvalApplyPre struct { + Addr addrs.ResourceInstance + Gen states.Generation + State **states.ResourceInstanceObject + Change **plans.ResourceInstanceChange +} + +// TODO: test +func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { + change := *n.Change + absAddr := n.Addr.Absolute(ctx.Path()) + + if change == nil { + panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr)) + } + + if resourceHasUserVisibleApply(n.Addr) { + priorState := change.Before + plannedNewState := change.After + + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalApplyPost is an EvalNode implementation that does the post-Apply work +type EvalApplyPost struct { + Addr addrs.ResourceInstance + Gen states.Generation + State **states.ResourceInstanceObject + Error *error +} + +// TODO: test +func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + + if resourceHasUserVisibleApply(n.Addr) { + absAddr := n.Addr.Absolute(ctx.Path()) + var newState cty.Value + if state != nil { + newState = state.Value + } else { + newState = cty.NullVal(cty.DynamicPseudoType) + } + var err error + if n.Error != nil { + err = *n.Error + } + + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(absAddr, n.Gen, newState, err) + }) + if hookErr != nil { + return nil, hookErr + } + } + + return nil, *n.Error +} + +// EvalMaybeTainted is an EvalNode that takes the planned change, new value, +// and possible error from an apply operation and produces a new instance +// object marked as tainted if it appears that a create operation has failed. +// +// This EvalNode never returns an error, to ensure that a subsequent EvalNode +// can still record the possibly-tainted object in the state. +type EvalMaybeTainted struct { + Addr addrs.ResourceInstance + Gen states.Generation + Change **plans.ResourceInstanceChange + State **states.ResourceInstanceObject + Error *error + + // If StateOutput is not nil, its referent will be assigned either the same + // pointer as State or a new object with its status set as Tainted, + // depending on whether an error is given and if this was a create action. + StateOutput **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + change := *n.Change + err := *n.Error + + if state != nil && state.Status == states.ObjectTainted { + log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path())) + return nil, nil + } + + if n.StateOutput != nil { + if err != nil && change.Action == plans.Create { + // If there are errors during a _create_ then the object is + // in an undefined state, and so we'll mark it as tainted so + // we can try again on the next run. + // + // We don't do this for other change actions because errors + // during updates will often not change the remote object at all. + // If there _were_ changes prior to the error, it's the provider's + // responsibility to record the effect of those changes in the + // object value it returned. + log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path())) + *n.StateOutput = state.AsTainted() + } else { + *n.StateOutput = state + } + } + + return nil, nil +} + +// resourceHasUserVisibleApply returns true if the given resource is one where +// apply actions should be exposed to the user. +// +// Certain resources do apply actions only as an implementation detail, so +// these should not be advertised to code outside of this package. +func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool { + // Only managed resources have user-visible apply actions. + // In particular, this excludes data resources since we "apply" these + // only as an implementation detail of removing them from state when + // they are destroyed. (When reading, they don't get here at all because + // we present them as "Refresh" actions.) + return addr.ContainingResource().Mode == addrs.ManagedResourceMode +} + +// EvalApplyProvisioners is an EvalNode implementation that executes +// the provisioners for a resource. +// +// TODO(mitchellh): This should probably be split up into a more fine-grained +// ApplyProvisioner (single) that is looped over. +type EvalApplyProvisioners struct { + Addr addrs.ResourceInstance + State **states.ResourceInstanceObject + ResourceConfig *configs.Resource + CreateNew *bool + Error *error + + // When is the type of provisioner to run at this point + When configs.ProvisionerWhen +} + +// TODO: test +func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := *n.State + if state == nil { + log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) + return nil, nil + } + if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew { + // If we're not creating a new resource, then don't run provisioners + log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) + return nil, nil + } + if state.Status == states.ObjectTainted { + // No point in provisioning an object that is already tainted, since + // it's going to get recreated on the next apply anyway. + log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) + return nil, nil + } + + provs := n.filterProvisioners() + if len(provs) == 0 { + // We have no provisioners, so don't do anything + return nil, nil + } + + if n.Error != nil && *n.Error != nil { + // We're already tainted, so just return out + return nil, nil + } + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionInstance(absAddr, state.Value) + }) + if err != nil { + return nil, err + } + } + + // If there are no errors, then we append it to our output error + // if we have one, otherwise we just output it. + err := n.apply(ctx, provs) + if err != nil { + *n.Error = multierror.Append(*n.Error, err) + if n.Error == nil { + return nil, err + } else { + log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr) + return nil, nil + } + } + + { + // Call post hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionInstance(absAddr, state.Value) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// filterProvisioners filters the provisioners on the resource to only +// the provisioners specified by the "when" option. +func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner { + // Fast path the zero case + if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil { + return nil + } + + if len(n.ResourceConfig.Managed.Provisioners) == 0 { + return nil + } + + result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners)) + for _, p := range n.ResourceConfig.Managed.Provisioners { + if p.When == n.When { + result = append(result, p) + } + } + + return result +} + +func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { + var diags tfdiags.Diagnostics + instanceAddr := n.Addr + absAddr := instanceAddr.Absolute(ctx.Path()) + + // If there's a connection block defined directly inside the resource block + // then it'll serve as a base connection configuration for all of the + // provisioners. + var baseConn hcl.Body + if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil { + baseConn = n.ResourceConfig.Managed.Connection.Config + } + + for _, prov := range provs { + log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type) + + // Get the provisioner + provisioner := ctx.Provisioner(prov.Type) + schema := ctx.ProvisionerSchema(prov.Type) + + forEach, forEachDiags := evaluateResourceForEachExpression(n.ResourceConfig.ForEach, ctx) + diags = diags.Append(forEachDiags) + keyData := EvalDataForInstanceKey(instanceAddr.Key, forEach) + + // Evaluate the main provisioner configuration. + config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) + diags = diags.Append(configDiags) + + // If the provisioner block contains a connection block of its own then + // it can override the base connection configuration, if any. + var localConn hcl.Body + if prov.Connection != nil { + localConn = prov.Connection.Config + } + + var connBody hcl.Body + switch { + case baseConn != nil && localConn != nil: + // Our standard merging logic applies here, similar to what we do + // with _override.tf configuration files: arguments from the + // base connection block will be masked by any arguments of the + // same name in the local connection block. + connBody = configs.MergeBodies(baseConn, localConn) + case baseConn != nil: + connBody = baseConn + case localConn != nil: + connBody = localConn + } + + // start with an empty connInfo + connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) + + if connBody != nil { + var connInfoDiags tfdiags.Diagnostics + connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData) + diags = diags.Append(connInfoDiags) + if diags.HasErrors() { + // "on failure continue" setting only applies to failures of the + // provisioner itself, not to invalid configuration. + return diags.Err() + } + } + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionInstanceStep(absAddr, prov.Type) + }) + if err != nil { + return err + } + } + + // The output function + outputFn := func(msg string) { + ctx.Hook(func(h Hook) (HookAction, error) { + h.ProvisionOutput(absAddr, prov.Type, msg) + return HookActionContinue, nil + }) + } + + output := CallbackUIOutput{OutputFn: outputFn} + resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: config, + Connection: connInfo, + UIOutput: &output, + }) + applyDiags := resp.Diagnostics.InConfigBody(prov.Config) + + // Call post hook + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err()) + }) + + switch prov.OnFailure { + case configs.ProvisionerOnFailureContinue: + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) + } else { + // Maybe there are warnings that we still want to see + diags = diags.Append(applyDiags) + } + default: + diags = diags.Append(applyDiags) + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) + return diags.Err() + } + } + + // Deal with the hook + if hookErr != nil { + return hookErr + } + } + + return diags.ErrWithWarnings() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go new file mode 100644 index 00000000000..bcc3c531b99 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go @@ -0,0 +1,47 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalPreventDestroy is an EvalNode implementation that returns an +// error if a resource has PreventDestroy configured and the diff +// would destroy the resource. +type EvalCheckPreventDestroy struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Change **plans.ResourceInstanceChange +} + +func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { + if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil { + return nil, nil + } + + change := *n.Change + preventDestroy := n.Config.Managed.PreventDestroy + + if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Instance cannot be destroyed", + Detail: fmt.Sprintf( + "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", + n.Addr.Absolute(ctx.Path()).String(), + ), + Subject: &n.Config.DeclRange, + }) + return nil, diags.Err() + } + + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go new file mode 100644 index 00000000000..ac354350427 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go @@ -0,0 +1,133 @@ +package terraform + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// EvalContext is the interface that is given to eval nodes to execute. +type EvalContext interface { + // Stopped returns a channel that is closed when evaluation is stopped + // via Terraform.Context.Stop() + Stopped() <-chan struct{} + + // Path is the current module path. + Path() addrs.ModuleInstance + + // Hook is used to call hook methods. The callback is called for each + // hook and should return the hook action to take and the error. + Hook(func(Hook) (HookAction, error)) error + + // Input is the UIInput object for interacting with the UI. + Input() UIInput + + // InitProvider initializes the provider with the given type and address, and + // returns the implementation of the resource provider or an error. + // + // It is an error to initialize the same provider more than once. + InitProvider(typ string, addr addrs.ProviderConfig) (providers.Interface, error) + + // Provider gets the provider instance with the given address (already + // initialized) or returns nil if the provider isn't initialized. + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + // InitProvider must've been called on the EvalContext of the module + // that owns the given provider before calling this method. + Provider(addrs.AbsProviderConfig) providers.Interface + + // ProviderSchema retrieves the schema for a particular provider, which + // must have already been initialized with InitProvider. + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema + + // CloseProvider closes provider connections that aren't needed anymore. + CloseProvider(addrs.ProviderConfig) error + + // ConfigureProvider configures the provider with the given + // configuration. This is a separate context call because this call + // is used to store the provider configuration for inheritance lookups + // with ParentProviderConfig(). + ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics + + // ProviderInput and SetProviderInput are used to configure providers + // from user input. + ProviderInput(addrs.ProviderConfig) map[string]cty.Value + SetProviderInput(addrs.ProviderConfig, map[string]cty.Value) + + // InitProvisioner initializes the provisioner with the given name and + // returns the implementation of the resource provisioner or an error. + // + // It is an error to initialize the same provisioner more than once. + InitProvisioner(string) (provisioners.Interface, error) + + // Provisioner gets the provisioner instance with the given name (already + // initialized) or returns nil if the provisioner isn't initialized. + Provisioner(string) provisioners.Interface + + // ProvisionerSchema retrieves the main configuration schema for a + // particular provisioner, which must have already been initialized with + // InitProvisioner. + ProvisionerSchema(string) *configschema.Block + + // CloseProvisioner closes provisioner connections that aren't needed + // anymore. + CloseProvisioner(string) error + + // EvaluateBlock takes the given raw configuration block and associated + // schema and evaluates it to produce a value of an object type that + // conforms to the implied type of the schema. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + // + // The "key" argument is also optional. If given, it is the instance key + // of the current object within the multi-instance container it belongs + // to. For example, on a resource block with "count" set this should be + // set to a different addrs.IntKey for each instance created from that + // block. Set this to addrs.NoKey if not appropriate. + // + // The returned body is an expanded version of the given body, with any + // "dynamic" blocks replaced with zero or more static blocks. This can be + // used to extract correct source location information about attributes of + // the returned object value. + EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) + + // EvaluateExpr takes the given HCL expression and evaluates it to produce + // a value. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) + + // EvaluationScope returns a scope that can be used to evaluate reference + // addresses in this context. + EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope + + // SetModuleCallArguments defines values for the variables of a particular + // child module call. + // + // Calling this function multiple times has merging behavior, keeping any + // previously-set keys that are not present in the new map. + SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value) + + // Changes returns the writer object that can be used to write new proposed + // changes into the global changes set. + Changes() *plans.ChangesSync + + // State returns a wrapper object that provides safe concurrent access to + // the global state. + State() *states.SyncState +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go new file mode 100644 index 00000000000..7347a80dfcb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go @@ -0,0 +1,329 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" +) + +// BuiltinEvalContext is an EvalContext implementation that is used by +// Terraform by default. +type BuiltinEvalContext struct { + // StopContext is the context used to track whether we're complete + StopContext context.Context + + // PathValue is the Path that this context is operating within. + PathValue addrs.ModuleInstance + + // Evaluator is used for evaluating expressions within the scope of this + // eval context. + Evaluator *Evaluator + + // Schemas is a repository of all of the schemas we should need to + // decode configuration blocks and expressions. This must be constructed by + // the caller to include schemas for all of the providers, resource types, + // data sources and provisioners used by the given configuration and + // state. + // + // This must not be mutated during evaluation. + Schemas *Schemas + + // VariableValues contains the variable values across all modules. This + // structure is shared across the entire containing context, and so it + // may be accessed only when holding VariableValuesLock. + // The keys of the first level of VariableValues are the string + // representations of addrs.ModuleInstance values. The second-level keys + // are variable names within each module instance. + VariableValues map[string]map[string]cty.Value + VariableValuesLock *sync.Mutex + + Components contextComponentFactory + Hooks []Hook + InputValue UIInput + ProviderCache map[string]providers.Interface + ProviderInputConfig map[string]map[string]cty.Value + ProviderLock *sync.Mutex + ProvisionerCache map[string]provisioners.Interface + ProvisionerLock *sync.Mutex + ChangesValue *plans.ChangesSync + StateValue *states.SyncState + + once sync.Once +} + +// BuiltinEvalContext implements EvalContext +var _ EvalContext = (*BuiltinEvalContext)(nil) + +func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { + // This can happen during tests. During tests, we just block forever. + if ctx.StopContext == nil { + return nil + } + + return ctx.StopContext.Done() +} + +func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + for _, h := range ctx.Hooks { + action, err := fn(h) + if err != nil { + return err + } + + switch action { + case HookActionContinue: + continue + case HookActionHalt: + // Return an early exit error to trigger an early exit + log.Printf("[WARN] Early exit triggered by hook: %T", h) + return EvalEarlyExitError{} + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) Input() UIInput { + return ctx.InputValue +} + +func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (providers.Interface, error) { + ctx.once.Do(ctx.init) + absAddr := addr.Absolute(ctx.Path()) + + // If we already initialized, it is an error + if p := ctx.Provider(absAddr); p != nil { + return nil, fmt.Errorf("%s is already initialized", addr) + } + + // Warning: make sure to acquire these locks AFTER the call to Provider + // above, since it also acquires locks. + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + key := absAddr.String() + + p, err := ctx.Components.ResourceProvider(typeName, key) + if err != nil { + return nil, err + } + + log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", typeName, absAddr) + ctx.ProviderCache[key] = p + + return p, nil +} + +func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { + ctx.once.Do(ctx.init) + + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + return ctx.ProviderCache[addr.String()] +} + +func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { + ctx.once.Do(ctx.init) + + return ctx.Schemas.ProviderSchema(addr.ProviderConfig.Type) +} + +func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error { + ctx.once.Do(ctx.init) + + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + key := addr.Absolute(ctx.Path()).String() + provider := ctx.ProviderCache[key] + if provider != nil { + delete(ctx.ProviderCache, key) + return provider.Close() + } + + return nil +} + +func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + absAddr := addr.Absolute(ctx.Path()) + p := ctx.Provider(absAddr) + if p == nil { + diags = diags.Append(fmt.Errorf("%s not initialized", addr)) + return diags + } + + providerSchema := ctx.ProviderSchema(absAddr) + if providerSchema == nil { + diags = diags.Append(fmt.Errorf("schema for %s is not available", absAddr)) + return diags + } + + req := providers.ConfigureRequest{ + TerraformVersion: version.String(), + Config: cfg, + } + + resp := p.Configure(req) + return resp.Diagnostics +} + +func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + if !ctx.Path().IsRoot() { + // Only root module provider configurations can have input. + return nil + } + + return ctx.ProviderInputConfig[pc.String()] +} + +func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) { + absProvider := pc.Absolute(ctx.Path()) + + if !ctx.Path().IsRoot() { + // Only root module provider configurations can have input. + log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") + return + } + + // Save the configuration + ctx.ProviderLock.Lock() + ctx.ProviderInputConfig[absProvider.String()] = c + ctx.ProviderLock.Unlock() +} + +func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { + ctx.once.Do(ctx.init) + + // If we already initialized, it is an error + if p := ctx.Provisioner(n); p != nil { + return nil, fmt.Errorf("Provisioner '%s' already initialized", n) + } + + // Warning: make sure to acquire these locks AFTER the call to Provisioner + // above, since it also acquires locks. + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + key := PathObjectCacheKey(ctx.Path(), n) + + p, err := ctx.Components.ResourceProvisioner(n, key) + if err != nil { + return nil, err + } + + ctx.ProvisionerCache[key] = p + + return p, nil +} + +func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface { + ctx.once.Do(ctx.init) + + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + key := PathObjectCacheKey(ctx.Path(), n) + return ctx.ProvisionerCache[key] +} + +func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block { + ctx.once.Do(ctx.init) + + return ctx.Schemas.ProvisionerConfig(n) +} + +func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { + ctx.once.Do(ctx.init) + + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + key := PathObjectCacheKey(ctx.Path(), n) + + prov := ctx.ProvisionerCache[key] + if prov != nil { + return prov.Close() + } + + return nil +} + +func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + scope := ctx.EvaluationScope(self, keyData) + body, evalDiags := scope.ExpandBlock(body, schema) + diags = diags.Append(evalDiags) + val, evalDiags := scope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + return val, body, diags +} + +func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey) + return scope.EvalExpr(expr, wantType) +} + +func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + data := &evaluationStateData{ + Evaluator: ctx.Evaluator, + ModulePath: ctx.PathValue, + InstanceKeyData: keyData, + Operation: ctx.Evaluator.Operation, + } + return ctx.Evaluator.Scope(data, self) +} + +func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { + return ctx.PathValue +} + +func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) { + ctx.VariableValuesLock.Lock() + defer ctx.VariableValuesLock.Unlock() + + childPath := n.ModuleInstance(ctx.PathValue) + key := childPath.String() + + args := ctx.VariableValues[key] + if args == nil { + args = make(map[string]cty.Value) + ctx.VariableValues[key] = vals + return + } + + for k, v := range vals { + args[k] = v + } +} + +func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { + return ctx.ChangesValue +} + +func (ctx *BuiltinEvalContext) State() *states.SyncState { + return ctx.StateValue +} + +func (ctx *BuiltinEvalContext) init() { +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go new file mode 100644 index 00000000000..b7b8722dea4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go @@ -0,0 +1,319 @@ +package terraform + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// MockEvalContext is a mock version of EvalContext that can be used +// for tests. +type MockEvalContext struct { + StoppedCalled bool + StoppedValue <-chan struct{} + + HookCalled bool + HookHook Hook + HookError error + + InputCalled bool + InputInput UIInput + + InitProviderCalled bool + InitProviderType string + InitProviderAddr addrs.ProviderConfig + InitProviderProvider providers.Interface + InitProviderError error + + ProviderCalled bool + ProviderAddr addrs.AbsProviderConfig + ProviderProvider providers.Interface + + ProviderSchemaCalled bool + ProviderSchemaAddr addrs.AbsProviderConfig + ProviderSchemaSchema *ProviderSchema + + CloseProviderCalled bool + CloseProviderAddr addrs.ProviderConfig + CloseProviderProvider providers.Interface + + ProviderInputCalled bool + ProviderInputAddr addrs.ProviderConfig + ProviderInputValues map[string]cty.Value + + SetProviderInputCalled bool + SetProviderInputAddr addrs.ProviderConfig + SetProviderInputValues map[string]cty.Value + + ConfigureProviderCalled bool + ConfigureProviderAddr addrs.ProviderConfig + ConfigureProviderConfig cty.Value + ConfigureProviderDiags tfdiags.Diagnostics + + InitProvisionerCalled bool + InitProvisionerName string + InitProvisionerProvisioner provisioners.Interface + InitProvisionerError error + + ProvisionerCalled bool + ProvisionerName string + ProvisionerProvisioner provisioners.Interface + + ProvisionerSchemaCalled bool + ProvisionerSchemaName string + ProvisionerSchemaSchema *configschema.Block + + CloseProvisionerCalled bool + CloseProvisionerName string + CloseProvisionerProvisioner provisioners.Interface + + EvaluateBlockCalled bool + EvaluateBlockBody hcl.Body + EvaluateBlockSchema *configschema.Block + EvaluateBlockSelf addrs.Referenceable + EvaluateBlockKeyData InstanceKeyEvalData + EvaluateBlockResultFunc func( + body hcl.Body, + schema *configschema.Block, + self addrs.Referenceable, + keyData InstanceKeyEvalData, + ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateBlockResult cty.Value + EvaluateBlockExpandedBody hcl.Body + EvaluateBlockDiags tfdiags.Diagnostics + + EvaluateExprCalled bool + EvaluateExprExpr hcl.Expression + EvaluateExprWantType cty.Type + EvaluateExprSelf addrs.Referenceable + EvaluateExprResultFunc func( + expr hcl.Expression, + wantType cty.Type, + self addrs.Referenceable, + ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateExprResult cty.Value + EvaluateExprDiags tfdiags.Diagnostics + + EvaluationScopeCalled bool + EvaluationScopeSelf addrs.Referenceable + EvaluationScopeKeyData InstanceKeyEvalData + EvaluationScopeScope *lang.Scope + + PathCalled bool + PathPath addrs.ModuleInstance + + SetModuleCallArgumentsCalled bool + SetModuleCallArgumentsModule addrs.ModuleCallInstance + SetModuleCallArgumentsValues map[string]cty.Value + + ChangesCalled bool + ChangesChanges *plans.ChangesSync + + StateCalled bool + StateState *states.SyncState +} + +// MockEvalContext implements EvalContext +var _ EvalContext = (*MockEvalContext)(nil) + +func (c *MockEvalContext) Stopped() <-chan struct{} { + c.StoppedCalled = true + return c.StoppedValue +} + +func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + c.HookCalled = true + if c.HookHook != nil { + if _, err := fn(c.HookHook); err != nil { + return err + } + } + + return c.HookError +} + +func (c *MockEvalContext) Input() UIInput { + c.InputCalled = true + return c.InputInput +} + +func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (providers.Interface, error) { + c.InitProviderCalled = true + c.InitProviderType = t + c.InitProviderAddr = addr + return c.InitProviderProvider, c.InitProviderError +} + +func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { + c.ProviderCalled = true + c.ProviderAddr = addr + return c.ProviderProvider +} + +func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { + c.ProviderSchemaCalled = true + c.ProviderSchemaAddr = addr + return c.ProviderSchemaSchema +} + +func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error { + c.CloseProviderCalled = true + c.CloseProviderAddr = addr + return nil +} + +func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { + c.ConfigureProviderCalled = true + c.ConfigureProviderAddr = addr + c.ConfigureProviderConfig = cfg + return c.ConfigureProviderDiags +} + +func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value { + c.ProviderInputCalled = true + c.ProviderInputAddr = addr + return c.ProviderInputValues +} + +func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) { + c.SetProviderInputCalled = true + c.SetProviderInputAddr = addr + c.SetProviderInputValues = vals +} + +func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { + c.InitProvisionerCalled = true + c.InitProvisionerName = n + return c.InitProvisionerProvisioner, c.InitProvisionerError +} + +func (c *MockEvalContext) Provisioner(n string) provisioners.Interface { + c.ProvisionerCalled = true + c.ProvisionerName = n + return c.ProvisionerProvisioner +} + +func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { + c.ProvisionerSchemaCalled = true + c.ProvisionerSchemaName = n + return c.ProvisionerSchemaSchema +} + +func (c *MockEvalContext) CloseProvisioner(n string) error { + c.CloseProvisionerCalled = true + c.CloseProvisionerName = n + return nil +} + +func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + c.EvaluateBlockCalled = true + c.EvaluateBlockBody = body + c.EvaluateBlockSchema = schema + c.EvaluateBlockSelf = self + c.EvaluateBlockKeyData = keyData + if c.EvaluateBlockResultFunc != nil { + return c.EvaluateBlockResultFunc(body, schema, self, keyData) + } + return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags +} + +func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + c.EvaluateExprCalled = true + c.EvaluateExprExpr = expr + c.EvaluateExprWantType = wantType + c.EvaluateExprSelf = self + if c.EvaluateExprResultFunc != nil { + return c.EvaluateExprResultFunc(expr, wantType, self) + } + return c.EvaluateExprResult, c.EvaluateExprDiags +} + +// installSimpleEval is a helper to install a simple mock implementation of +// both EvaluateBlock and EvaluateExpr into the receiver. +// +// These default implementations will either evaluate the given input against +// the scope in field EvaluationScopeScope or, if it is nil, with no eval +// context at all so that only constant values may be used. +// +// This function overwrites any existing functions installed in fields +// EvaluateBlockResultFunc and EvaluateExprResultFunc. +func (c *MockEvalContext) installSimpleEval() { + c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + var diags tfdiags.Diagnostics + body, diags = scope.ExpandBlock(body, schema) + if diags.HasErrors() { + return cty.DynamicVal, body, diags + } + val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return cty.DynamicVal, body, diags + } + return val, body, diags + } + + // Fallback codepath supporting constant values only. + val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) + return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) + } + c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + return scope.EvalExpr(expr, wantType) + } + + // Fallback codepath supporting constant values only. + var diags tfdiags.Diagnostics + val, hclDiags := expr.Value(nil) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return cty.DynamicVal, diags + } + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + diags = diags.Append(err) + return cty.DynamicVal, diags + } + return val, diags + } +} + +func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + c.EvaluationScopeCalled = true + c.EvaluationScopeSelf = self + c.EvaluationScopeKeyData = keyData + return c.EvaluationScopeScope +} + +func (c *MockEvalContext) Path() addrs.ModuleInstance { + c.PathCalled = true + return c.PathPath +} + +func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) { + c.SetModuleCallArgumentsCalled = true + c.SetModuleCallArgumentsModule = n + c.SetModuleCallArgumentsValues = values +} + +func (c *MockEvalContext) Changes() *plans.ChangesSync { + c.ChangesCalled = true + return c.ChangesChanges +} + +func (c *MockEvalContext) State() *states.SyncState { + c.StateCalled = true + return c.StateState +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go new file mode 100644 index 00000000000..52205fec002 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go @@ -0,0 +1,120 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// evaluateResourceCountExpression is our standard mechanism for interpreting an +// expression given for a "count" argument on a resource. This should be called +// from the DynamicExpand of a node representing a resource in order to +// determine the final count value. +// +// If the result is zero or positive and no error diagnostics are returned, then +// the result is the literal count value to use. +// +// If the result is -1, this indicates that the given expression is nil and so +// the "count" behavior should not be enabled for this resource at all. +// +// If error diagnostics are returned then the result is always the meaningless +// placeholder value -1. +func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { + count, known, diags := evaluateResourceCountExpressionKnown(expr, ctx) + if !known { + // Currently this is a rather bad outcome from a UX standpoint, since we have + // no real mechanism to deal with this situation and all we can do is produce + // an error message. + // FIXME: In future, implement a built-in mechanism for deferring changes that + // can't yet be predicted, and use it to guide the user through several + // plan/apply steps until the desired configuration is eventually reached. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, + Subject: expr.Range().Ptr(), + }) + } + return count, diags +} + +// evaluateResourceCountExpressionKnown is like evaluateResourceCountExpression +// except that it handles an unknown result by returning count = 0 and +// a known = false, rather than by reporting the unknown value as an error +// diagnostic. +func evaluateResourceCountExpressionKnown(expr hcl.Expression, ctx EvalContext) (count int, known bool, diags tfdiags.Diagnostics) { + if expr == nil { + return -1, true, nil + } + + countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) + diags = diags.Append(countDiags) + if diags.HasErrors() { + return -1, true, diags + } + + switch { + case countVal.IsNull(): + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is null. An integer is required.`, + Subject: expr.Range().Ptr(), + }) + return -1, true, diags + case !countVal.IsKnown(): + return 0, false, diags + } + + err := gocty.FromCtyValue(countVal, &count) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return -1, true, diags + } + if count < 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`, + Subject: expr.Range().Ptr(), + }) + return -1, true, diags + } + + return count, true, diags +} + +// fixResourceCountSetTransition is a helper function to fix up the state when a +// resource transitions its "count" from being set to unset or vice-versa, +// treating a 0-key and a no-key instance as aliases for one another across +// the transition. +// +// The correct time to call this function is in the DynamicExpand method for +// a node representing a resource, just after evaluating the count with +// evaluateResourceCountExpression, and before any other analysis of the +// state such as orphan detection. +// +// This function calls methods on the given EvalContext to update the current +// state in-place, if necessary. It is a no-op if there is no count transition +// taking place. +// +// Since the state is modified in-place, this function must take a writer lock +// on the state. The caller must therefore not also be holding a state lock, +// or this function will block forever awaiting the lock. +func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) { + state := ctx.State() + changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) + if changed { + log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go new file mode 100644 index 00000000000..aac380632ae --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go @@ -0,0 +1,77 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state +// when there is a resource count with zero/one boundary, i.e. fixing +// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. +// +// This works on the global state. +type EvalCountFixZeroOneBoundaryGlobal struct { + Config *configs.Config +} + +// TODO: test +func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { + // We'll temporarily lock the state to grab the modules, then work on each + // one separately while taking a lock again for each separate resource. + // This means that if another caller concurrently adds a module here while + // we're working then we won't update it, but that's no worse than the + // concurrent writer blocking for our entire fixup process and _then_ + // adding a new module, and in practice the graph node associated with + // this eval depends on everything else in the graph anyway, so there + // should not be concurrent writers. + state := ctx.State().Lock() + moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules)) + for _, m := range state.Modules { + moduleAddrs = append(moduleAddrs, m.Addr) + } + ctx.State().Unlock() + + for _, addr := range moduleAddrs { + cfg := n.Config.DescendentForInstance(addr) + if cfg == nil { + log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) + continue + } + if err := n.fixModule(ctx, addr); err != nil { + return nil, err + } + } + + return nil, nil +} + +func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error { + ms := ctx.State().Module(moduleAddr) + cfg := n.Config.DescendentForInstance(moduleAddr) + if ms == nil { + // Theoretically possible for a concurrent writer to delete a module + // while we're running, but in practice the graph node that called us + // depends on everything else in the graph and so there can never + // be a concurrent writer. + return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr) + } + if cfg == nil { + return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr) + } + + for _, r := range ms.Resources { + addr := r.Addr.Absolute(moduleAddr) + rCfg := cfg.Module.ResourceByAddr(r.Addr) + if rCfg == nil { + log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) + continue + } + hasCount := rCfg.Count != nil + fixResourceCountSetTransition(ctx, addr, hasCount) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go new file mode 100644 index 00000000000..afab5790992 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go @@ -0,0 +1,783 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalCheckPlannedChange is an EvalNode implementation that produces errors +// if the _actual_ expected value is not compatible with what was recorded +// in the plan. +// +// Errors here are most often indicative of a bug in the provider, so our +// error messages will report with that in mind. It's also possible that +// there's a bug in Terraform's Core's own "proposed new value" code in +// EvalDiff. +type EvalCheckPlannedChange struct { + Addr addrs.ResourceInstance + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + + // We take ResourceInstanceChange objects here just because that's what's + // convenient to pass in from the evaltree implementation, but we really + // only look at the "After" value of each change. + Planned, Actual **plans.ResourceInstanceChange +} + +func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) { + providerSchema := *n.ProviderSchema + plannedChange := *n.Planned + actualChange := *n.Actual + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type) + } + + var diags tfdiags.Diagnostics + absAddr := n.Addr.Absolute(ctx.Path()) + + log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) + + if plannedChange.Action != actualChange.Action { + switch { + case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: + // It's okay for an update to become a NoOp once we've filled in + // all of the unknown values, since the final values might actually + // match what was there before after all. + log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, + plannedChange.Action, actualChange.Action, + ), + )) + } + } + + errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), + ), + )) + } + return nil, diags.Err() +} + +// EvalDiff is an EvalNode implementation that detects changes for a given +// resource instance. +type EvalDiff struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + State **states.ResourceInstanceObject + PreviousDiff **plans.ResourceInstanceChange + + // CreateBeforeDestroy is set if either the resource's own config sets + // create_before_destroy explicitly or if dependencies have forced the + // resource to be handled as create_before_destroy in order to avoid + // a dependency cycle. + CreateBeforeDestroy bool + + OutputChange **plans.ResourceInstanceChange + OutputValue *cty.Value + OutputState **states.ResourceInstanceObject + + Stub bool +} + +// TODO: test +func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + config := *n.Config + provider := *n.Provider + providerSchema := *n.ProviderSchema + + if providerSchema == nil { + return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr) + } + if n.ProviderAddr.ProviderConfig.Type == "" { + panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path()))) + } + + var diags tfdiags.Diagnostics + + // Evaluate the configuration + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) + configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + absAddr := n.Addr.Absolute(ctx.Path()) + var priorVal cty.Value + var priorValTainted cty.Value + var priorPrivate []byte + if state != nil { + if state.Status != states.ObjectTainted { + priorVal = state.Value + priorPrivate = state.Private + } else { + // If the prior state is tainted then we'll proceed below like + // we're creating an entirely new object, but then turn it into + // a synthetic "Replace" change at the end, creating the same + // result as if the provider had marked at least one argument + // change as "requires replacement". + priorValTainted = state.Value + priorVal = cty.NullVal(schema.ImpliedType()) + } + } else { + priorVal = cty.NullVal(schema.ImpliedType()) + } + + proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal) + + // Call pre-diff hook + if !n.Stub { + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } + } + + log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path())) + // Allow the provider to validate the final set of values. + // The config was statically validated early on, but there may have been + // unknown values which the provider could not validate at the time. + validateResp := provider.ValidateResourceTypeConfig( + providers.ValidateResourceTypeConfigRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }, + ) + if validateResp.Diagnostics.HasErrors() { + return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err() + } + + // The provider gets an opportunity to customize the proposed new value, + // which in turn produces the _planned_ new value. But before + // we send back this information, we need to process ignore_changes + // so that CustomizeDiff will not act on them + var ignoreChangeDiags tfdiags.Diagnostics + proposedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, proposedNewVal) + diags = diags.Append(ignoreChangeDiags) + if ignoreChangeDiags.HasErrors() { + return nil, diags.Err() + } + + resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + PriorState: priorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: priorPrivate, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + + plannedNewVal := resp.PlannedState + plannedPrivate := resp.PlannedPrivate + + if plannedNewVal == cty.NilVal { + // Should never happen. Since real-world providers return via RPC a nil + // is always a bug in the client-side stub. This is more likely caused + // by an incompletely-configured mock provider in tests, though. + panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String())) + } + + // We allow the planned new value to disagree with configuration _values_ + // here, since that allows the provider to do special logic like a + // DiffSuppressFunc, but we still require that the provider produces + // a value whose type conforms to the schema. + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + return nil, diags.Err() + } + } + + // TODO: We should be able to remove this repeat of processing ignored changes + // after the plan, which helps providers relying on old behavior "just work" + // in the next major version, such that we can be stricter about ignore_changes + // values + plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, plannedNewVal) + diags = diags.Append(ignoreChangeDiags) + if ignoreChangeDiags.HasErrors() { + return nil, diags.Err() + } + + // The provider produces a list of paths to attributes whose changes mean + // that we must replace rather than update an existing remote object. + // However, we only need to do that if the identified attributes _have_ + // actually changed -- particularly after we may have undone some of the + // changes in processIgnoreChanges -- so now we'll filter that list to + // include only where changes are detected. + reqRep := cty.NewPathSet() + if len(resp.RequiresReplace) > 0 { + for _, path := range resp.RequiresReplace { + if priorVal.IsNull() { + // If prior is null then we don't expect any RequiresReplace at all, + // because this is a Create action. + continue + } + + priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil) + plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) + if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { + // This means the path was invalid in both the prior and new + // values, which is an error with the provider itself. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, path, + ), + )) + continue + } + + // Make sure we have valid Values for both values. + // Note: if the opposing value was of the type + // cty.DynamicPseudoType, the type assigned here may not exactly + // match the schema. This is fine here, since we're only going to + // check for equality, but if the NullVal is to be used, we need to + // check the schema for th true type. + switch { + case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: + // this should never happen without ApplyPath errors above + panic("requires replace path returned 2 nil values") + case priorChangedVal == cty.NilVal: + priorChangedVal = cty.NullVal(plannedChangedVal.Type()) + case plannedChangedVal == cty.NilVal: + plannedChangedVal = cty.NullVal(priorChangedVal.Type()) + } + + eqV := plannedChangedVal.Equals(priorChangedVal) + if !eqV.IsKnown() || eqV.False() { + reqRep.Add(path) + } + } + if diags.HasErrors() { + return nil, diags.Err() + } + } + + eqV := plannedNewVal.Equals(priorVal) + eq := eqV.IsKnown() && eqV.True() + + var action plans.Action + switch { + case priorVal.IsNull(): + action = plans.Create + case eq: + action = plans.NoOp + case !reqRep.Empty(): + // If there are any "requires replace" paths left _after our filtering + // above_ then this is a replace action. + if n.CreateBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + default: + action = plans.Update + // "Delete" is never chosen here, because deletion plans are always + // created more directly elsewhere, such as in "orphan" handling. + } + + if action.IsReplace() { + // In this strange situation we want to produce a change object that + // shows our real prior object but has a _new_ object that is built + // from a null prior object, since we're going to delete the one + // that has all the computed values on it. + // + // Therefore we'll ask the provider to plan again here, giving it + // a null object for the prior, and then we'll meld that with the + // _actual_ prior state to produce a correctly-shaped replace change. + // The resulting change should show any computed attributes changing + // from known prior values to unknown values, unless the provider is + // able to predict new values for any of these computed attributes. + nullPriorVal := cty.NullVal(schema.ImpliedType()) + + // create a new proposed value from the null state and the config + proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) + + resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + PriorState: nullPriorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: plannedPrivate, + }) + // We need to tread carefully here, since if there are any warnings + // in here they probably also came out of our previous call to + // PlanResourceChange above, and so we don't want to repeat them. + // Consequently, we break from the usual pattern here and only + // append these new diagnostics if there's at least one error inside. + if resp.Diagnostics.HasErrors() { + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + return nil, diags.Err() + } + plannedNewVal = resp.PlannedState + plannedPrivate = resp.PlannedPrivate + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + } + + // If our prior value was tainted then we actually want this to appear + // as a replace change, even though so far we've been treating it as a + // create. + if action == plans.Create && priorValTainted != cty.NilVal { + if n.CreateBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + priorVal = priorValTainted + } + + // As a special case, if we have a previous diff (presumably from the plan + // phases, whereas we're now in the apply phase) and it was for a replace, + // we've already deleted the original object from state by the time we + // get here and so we would've ended up with a _create_ action this time, + // which we now need to paper over to get a result consistent with what + // we originally intended. + if n.PreviousDiff != nil { + prevChange := *n.PreviousDiff + if prevChange.Action.IsReplace() && action == plans.Create { + log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action) + action = prevChange.Action + priorVal = prevChange.Before + } + } + + // Call post-refresh hook + if !n.Stub { + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal) + }) + if err != nil { + return nil, err + } + } + + // Update our output if we care + if n.OutputChange != nil { + *n.OutputChange = &plans.ResourceInstanceChange{ + Addr: absAddr, + Private: plannedPrivate, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: action, + Before: priorVal, + After: plannedNewVal, + }, + RequiredReplace: reqRep, + } + } + + if n.OutputValue != nil { + *n.OutputValue = configVal + } + + // Update the state if we care + if n.OutputState != nil { + *n.OutputState = &states.ResourceInstanceObject{ + // We use the special "planned" status here to note that this + // object's value is not yet complete. Objects with this status + // cannot be used during expression evaluation, so the caller + // must _also_ record the returned change in the active plan, + // which the expression evaluator will use in preference to this + // incomplete value recorded in the state. + Status: states.ObjectPlanned, + Value: plannedNewVal, + Private: plannedPrivate, + } + } + + return nil, nil +} + +func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) { + // ignore_changes only applies when an object already exists, since we + // can't ignore changes to a thing we've not created yet. + if prior.IsNull() { + return proposed, nil + } + + ignoreChanges := n.Config.Managed.IgnoreChanges + ignoreAll := n.Config.Managed.IgnoreAllChanges + + if len(ignoreChanges) == 0 && !ignoreAll { + return proposed, nil + } + if ignoreAll { + return prior, nil + } + if prior.IsNull() || proposed.IsNull() { + // Ignore changes doesn't apply when we're creating for the first time. + // Proposed should never be null here, but if it is then we'll just let it be. + return proposed, nil + } + + return processIgnoreChangesIndividual(prior, proposed, ignoreChanges) +} + +func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { + // When we walk below we will be using cty.Path values for comparison, so + // we'll convert our traversals here so we can compare more easily. + ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) + for i, traversal := range ignoreChanges { + path := make(cty.Path, len(traversal)) + for si, step := range traversal { + switch ts := step.(type) { + case hcl.TraverseRoot: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseAttr: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseIndex: + path[si] = cty.IndexStep{ + Key: ts.Key, + } + default: + panic(fmt.Sprintf("unsupported traversal step %#v", step)) + } + } + ignoreChangesPath[i] = path + } + + var diags tfdiags.Diagnostics + ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) { + // First we must see if this is a path that's being ignored at all. + // We're looking for an exact match here because this walk will visit + // leaf values first and then their containers, and we want to do + // the "ignore" transform once we reach the point indicated, throwing + // away any deeper values we already produced at that point. + var ignoreTraversal hcl.Traversal + for i, candidate := range ignoreChangesPath { + if path.Equals(candidate) { + ignoreTraversal = ignoreChanges[i] + } + } + if ignoreTraversal == nil { + return v, nil + } + + // If we're able to follow the same path through the prior value, + // we'll take the value there instead, effectively undoing the + // change that was planned. + priorV, diags := hcl.ApplyPath(prior, path, nil) + if diags.HasErrors() { + // We just ignore the errors and move on here, since we assume it's + // just because the prior value was a slightly-different shape. + // It could potentially also be that the traversal doesn't match + // the schema, but we should've caught that during the validate + // walk if so. + return v, nil + } + return priorV, nil + }) + return ret, diags +} + +// EvalDiffDestroy is an EvalNode implementation that returns a plain +// destroy diff. +type EvalDiffDestroy struct { + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + State **states.ResourceInstanceObject + ProviderAddr addrs.AbsProviderConfig + + Output **plans.ResourceInstanceChange + OutputState **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := *n.State + + if n.ProviderAddr.ProviderConfig.Type == "" { + if n.DeposedKey == "" { + panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr)) + } else { + panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey)) + } + } + + // If there is no state or our attributes object is null then we're already + // destroyed. + if state == nil || state.Value.IsNull() { + return nil, nil + } + + // Call pre-diff hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff( + absAddr, n.DeposedKey.Generation(), + state.Value, + cty.NullVal(cty.DynamicPseudoType), + ) + }) + if err != nil { + return nil, err + } + + // Change is always the same for a destroy. We don't need the provider's + // help for this one. + // TODO: Should we give the provider an opportunity to veto this? + change := &plans.ResourceInstanceChange{ + Addr: absAddr, + DeposedKey: n.DeposedKey, + Change: plans.Change{ + Action: plans.Delete, + Before: state.Value, + After: cty.NullVal(cty.DynamicPseudoType), + }, + Private: state.Private, + ProviderAddr: n.ProviderAddr, + } + + // Call post-diff hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff( + absAddr, + n.DeposedKey.Generation(), + change.Action, + change.Before, + change.After, + ) + }) + if err != nil { + return nil, err + } + + // Update our output + *n.Output = change + + if n.OutputState != nil { + // Record our proposed new state, which is nil because we're destroying. + *n.OutputState = nil + } + + return nil, nil +} + +// EvalReduceDiff is an EvalNode implementation that takes a planned resource +// instance change as might be produced by EvalDiff or EvalDiffDestroy and +// "simplifies" it to a single atomic action to be performed by a specific +// graph node. +// +// Callers must specify whether they are a destroy node or a regular apply +// node. If the result is NoOp then the given change requires no action for +// the specific graph node calling this and so evaluation of the that graph +// node should exit early and take no action. +// +// The object written to OutChange may either be identical to InChange or +// a new change object derived from InChange. Because of the former case, the +// caller must not mutate the object returned in OutChange. +type EvalReduceDiff struct { + Addr addrs.ResourceInstance + InChange **plans.ResourceInstanceChange + Destroy bool + OutChange **plans.ResourceInstanceChange +} + +// TODO: test +func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) { + in := *n.InChange + out := in.Simplify(n.Destroy) + if n.OutChange != nil { + *n.OutChange = out + } + if out.Action != in.Action { + if n.Destroy { + log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action) + } else { + log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action) + } + } + return nil, nil +} + +// EvalReadDiff is an EvalNode implementation that retrieves the planned +// change for a particular resource instance object. +type EvalReadDiff struct { + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + ProviderSchema **ProviderSchema + Change **plans.ResourceInstanceChange +} + +func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { + providerSchema := *n.ProviderSchema + changes := ctx.Changes() + addr := n.Addr.Absolute(ctx.Path()) + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + gen := states.CurrentGen + if n.DeposedKey != states.NotDeposed { + gen = n.DeposedKey + } + csrc := changes.GetResourceInstanceChange(addr, gen) + if csrc == nil { + log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr) + return nil, nil + } + + change, err := csrc.Decode(schema.ImpliedType()) + if err != nil { + return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err) + } + if n.Change != nil { + *n.Change = change + } + + log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr) + + return nil, nil +} + +// EvalWriteDiff is an EvalNode implementation that saves a planned change +// for an instance object into the set of global planned changes. +type EvalWriteDiff struct { + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + ProviderSchema **ProviderSchema + Change **plans.ResourceInstanceChange +} + +// TODO: test +func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { + changes := ctx.Changes() + addr := n.Addr.Absolute(ctx.Path()) + if n.Change == nil || *n.Change == nil { + // Caller sets nil to indicate that we need to remove a change from + // the set of changes. + gen := states.CurrentGen + if n.DeposedKey != states.NotDeposed { + gen = n.DeposedKey + } + changes.RemoveResourceInstanceChange(addr, gen) + return nil, nil + } + + providerSchema := *n.ProviderSchema + change := *n.Change + + if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey { + // Should never happen, and indicates a bug in the caller. + panic("inconsistent address and/or deposed key in EvalWriteDiff") + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + csrc, err := change.Encode(schema.ImpliedType()) + if err != nil { + return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err) + } + + changes.AppendResourceInstanceChange(csrc) + if n.DeposedKey == states.NotDeposed { + log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr) + } else { + log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey) + } + + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go new file mode 100644 index 00000000000..470f798b7fc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go @@ -0,0 +1,20 @@ +package terraform + +// EvalReturnError is an EvalNode implementation that returns an +// error if it is present. +// +// This is useful for scenarios where an error has been captured by +// another EvalNode (like EvalApply) for special EvalTree-based error +// handling, and that handling has completed, so the error should be +// returned normally. +type EvalReturnError struct { + Error *error +} + +func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) { + if n.Error == nil { + return nil, nil + } + + return nil, *n.Error +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go new file mode 100644 index 00000000000..711c625c836 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go @@ -0,0 +1,25 @@ +package terraform + +// EvalNodeFilterFunc is the callback used to replace a node with +// another to node. To not do the replacement, just return the input node. +type EvalNodeFilterFunc func(EvalNode) EvalNode + +// EvalNodeFilterable is an interface that can be implemented by +// EvalNodes to allow filtering of sub-elements. Note that this isn't +// a common thing to implement and you probably don't need it. +type EvalNodeFilterable interface { + EvalNode + Filter(EvalNodeFilterFunc) +} + +// EvalFilter runs the filter on the given node and returns the +// final filtered value. This should be called rather than checking +// the EvalNode directly since this will properly handle EvalNodeFilterables. +func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode { + if f, ok := node.(EvalNodeFilterable); ok { + f.Filter(fn) + return node + } + + return fn(node) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go new file mode 100644 index 00000000000..1a55f024a81 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go @@ -0,0 +1,49 @@ +package terraform + +// EvalNodeOpFilterable is an interface that EvalNodes can implement +// to be filterable by the operation that is being run on Terraform. +type EvalNodeOpFilterable interface { + IncludeInOp(walkOperation) bool +} + +// EvalNodeFilterOp returns a filter function that filters nodes that +// include themselves in specific operations. +func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc { + return func(n EvalNode) EvalNode { + include := true + if of, ok := n.(EvalNodeOpFilterable); ok { + include = of.IncludeInOp(op) + } + if include { + return n + } + + return EvalNoop{} + } +} + +// EvalOpFilter is an EvalNode implementation that is a proxy to +// another node but filters based on the operation. +type EvalOpFilter struct { + // Ops is the list of operations to include this node in. + Ops []walkOperation + + // Node is the node to execute + Node EvalNode +} + +// TODO: test +func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) { + return EvalRaw(n.Node, ctx) +} + +// EvalNodeOpFilterable impl. +func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool { + for _, v := range n.Ops { + if v == op { + return true + } + } + + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go new file mode 100644 index 00000000000..4f7c340d29c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go @@ -0,0 +1,87 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// evaluateResourceForEachExpression interprets a "for_each" argument on a resource. +// +// Returns a cty.Value map, and diagnostics if necessary. It will return nil if +// the expression is nil, and is used to distinguish between an unset for_each and an +// empty map +func evaluateResourceForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) { + forEachMap, known, diags := evaluateResourceForEachExpressionKnown(expr, ctx) + if !known { + // Attach a diag as we do with count, with the same downsides + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid forEach argument", + Detail: `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.`, + Subject: expr.Range().Ptr(), + }) + } + return forEachMap, diags +} + +// evaluateResourceForEachExpressionKnown is like evaluateResourceForEachExpression +// except that it handles an unknown result by returning an empty map and +// a known = false, rather than by reporting the unknown value as an error +// diagnostic. +func evaluateResourceForEachExpressionKnown(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, known bool, diags tfdiags.Diagnostics) { + if expr == nil { + return nil, true, nil + } + + forEachVal, forEachDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) + diags = diags.Append(forEachDiags) + if diags.HasErrors() { + return nil, true, diags + } + + switch { + case forEachVal.IsNull(): + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`, + Subject: expr.Range().Ptr(), + }) + return nil, true, diags + case !forEachVal.IsKnown(): + return map[string]cty.Value{}, false, diags + } + + if !forEachVal.CanIterateElements() || forEachVal.Type().IsListType() || forEachVal.Type().IsTupleType() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, forEachVal.Type().FriendlyName()), + Subject: expr.Range().Ptr(), + }) + return nil, true, diags + } + + // If the map is empty ({}), return an empty map, because cty will return nil when representing {} AsValueMap + // This also covers an empty set (toset([])) + if forEachVal.LengthInt() == 0 { + return map[string]cty.Value{}, true, diags + } + + if forEachVal.Type().IsSetType() { + if forEachVal.Type().ElementType() != cty.String { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each set argument", + Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), + Subject: expr.Range().Ptr(), + }) + return nil, true, diags + } + } + + return forEachVal.AsValueMap(), true, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go new file mode 100644 index 00000000000..d6b46a1f222 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go @@ -0,0 +1,26 @@ +package terraform + +// EvalIf is an EvalNode that is a conditional. +type EvalIf struct { + If func(EvalContext) (bool, error) + Then EvalNode + Else EvalNode +} + +// TODO: test +func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) { + yes, err := n.If(ctx) + if err != nil { + return nil, err + } + + if yes { + return EvalRaw(n.Then, ctx) + } else { + if n.Else != nil { + return EvalRaw(n.Else, ctx) + } + } + + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go new file mode 100644 index 00000000000..25a2aae06b1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go @@ -0,0 +1,95 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalImportState is an EvalNode implementation that performs an +// ImportState operation on a provider. This will return the imported +// states but won't modify any actual state. +type EvalImportState struct { + Addr addrs.ResourceInstance + Provider *providers.Interface + ID string + Output *[]providers.ImportedResource +} + +// TODO: test +func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + provider := *n.Provider + var diags tfdiags.Diagnostics + + { + // Call pre-import hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreImportState(absAddr, n.ID) + }) + if err != nil { + return nil, err + } + } + + resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: n.Addr.Resource.Type, + ID: n.ID, + }) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.Err() + } + + imported := resp.ImportedResources + + for _, obj := range imported { + log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) + } + + if n.Output != nil { + *n.Output = imported + } + + { + // Call post-import hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostImportState(absAddr, imported) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalImportStateVerify verifies the state after ImportState and +// after the refresh to make sure it is non-nil and valid. +type EvalImportStateVerify struct { + Addr addrs.ResourceInstance + State **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + + state := *n.State + if state.Value.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot import non-existent remote object", + fmt.Sprintf( + "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.", + n.Addr.String(), + ), + )) + } + + return nil, diags.ErrWithWarnings() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go new file mode 100644 index 00000000000..fe99847b23c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go @@ -0,0 +1,61 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// EvalConfigBlock is an EvalNode implementation that takes a raw +// configuration block and evaluates any expressions within it. +// +// ExpandedConfig is populated with the result of expanding any "dynamic" +// blocks in the given body, which can be useful for extracting correct source +// location information for specific attributes in the result. +type EvalConfigBlock struct { + Config *hcl.Body + Schema *configschema.Block + SelfAddr addrs.Referenceable + Output *cty.Value + ExpandedConfig *hcl.Body + ContinueOnErr bool +} + +func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) { + val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey) + if diags.HasErrors() && n.ContinueOnErr { + log.Printf("[WARN] Block evaluation failed: %s", diags.Err()) + return nil, EvalEarlyExitError{} + } + + if n.Output != nil { + *n.Output = val + } + if n.ExpandedConfig != nil { + *n.ExpandedConfig = body + } + + return nil, diags.ErrWithWarnings() +} + +// EvalConfigExpr is an EvalNode implementation that takes a raw configuration +// expression and evaluates it. +type EvalConfigExpr struct { + Expr hcl.Expression + SelfAddr addrs.Referenceable + Output *cty.Value +} + +func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) { + val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr) + + if n.Output != nil { + *n.Output = val + } + + return nil, diags.ErrWithWarnings() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go new file mode 100644 index 00000000000..28eaf5fd9db --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go @@ -0,0 +1,74 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalLocal is an EvalNode implementation that evaluates the +// expression for a local value and writes it into a transient part of +// the state. +type EvalLocal struct { + Addr addrs.LocalValue + Expr hcl.Expression +} + +func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + + // We ignore diags here because any problems we might find will be found + // again in EvaluateExpr below. + refs, _ := lang.ReferencesInExpr(n.Expr) + for _, ref := range refs { + if ref.Subject == n.Addr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referencing local value", + Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr), + Subject: ref.SourceRange.ToHCL().Ptr(), + Context: n.Expr.Range().Ptr(), + }) + } + } + if diags.HasErrors() { + return nil, diags.Err() + } + + val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags.Err() + } + + state := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write local value to nil state") + } + + state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val) + + return nil, nil +} + +// EvalDeleteLocal is an EvalNode implementation that deletes a Local value +// from the state. Locals aren't persisted, but we don't need to evaluate them +// during destroy. +type EvalDeleteLocal struct { + Addr addrs.LocalValue +} + +func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { + state := ctx.State() + if state == nil { + return nil, nil + } + + state.RemoveLocalValue(n.Addr.Absolute(ctx.Path())) + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go new file mode 100644 index 00000000000..f4bc8225c54 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go @@ -0,0 +1,8 @@ +package terraform + +// EvalNoop is an EvalNode that does nothing. +type EvalNoop struct{} + +func (EvalNoop) Eval(EvalContext) (interface{}, error) { + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go new file mode 100644 index 00000000000..6d2db068cdd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go @@ -0,0 +1,135 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// EvalDeleteOutput is an EvalNode implementation that deletes an output +// from the state. +type EvalDeleteOutput struct { + Addr addrs.OutputValue +} + +// TODO: test +func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { + state := ctx.State() + if state == nil { + return nil, nil + } + + state.RemoveOutputValue(n.Addr.Absolute(ctx.Path())) + return nil, nil +} + +// EvalWriteOutput is an EvalNode implementation that writes the output +// for the given name to the current state. +type EvalWriteOutput struct { + Addr addrs.OutputValue + Sensitive bool + Expr hcl.Expression + // ContinueOnErr allows interpolation to fail during Input + ContinueOnErr bool +} + +// TODO: test +func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { + addr := n.Addr.Absolute(ctx.Path()) + + // This has to run before we have a state lock, since evaluation also + // reads the state + val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) + // We'll handle errors below, after we have loaded the module. + + state := ctx.State() + if state == nil { + return nil, nil + } + + changes := ctx.Changes() // may be nil, if we're not working on a changeset + + // handling the interpolation error + if diags.HasErrors() { + if n.ContinueOnErr || flagWarnOutputErrors { + log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err()) + // if we're continuing, make sure the output is included, and + // marked as unknown. If the evaluator was able to find a type + // for the value in spite of the error then we'll use it. + n.setValue(addr, state, changes, cty.UnknownVal(val.Type())) + return nil, EvalEarlyExitError{} + } + return nil, diags.Err() + } + + n.setValue(addr, state, changes, val) + + return nil, nil +} + +func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { + if val.IsKnown() && !val.IsNull() { + // The state itself doesn't represent unknown values, so we null them + // out here and then we'll save the real unknown value in the planned + // changeset below, if we have one on this graph walk. + log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr) + stateVal := cty.UnknownAsNull(val) + state.SetOutputValue(addr, stateVal, n.Sensitive) + } else { + log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr) + state.RemoveOutputValue(addr) + } + + // If we also have an active changeset then we'll replicate the value in + // there. This is used in preference to the state where present, since it + // *is* able to represent unknowns, while the state cannot. + if changes != nil { + // For the moment we are not properly tracking changes to output + // values, and just marking them always as "Create" or "Destroy" + // actions. A future release will rework the output lifecycle so we + // can track their changes properly, in a similar way to how we work + // with resource instances. + + var change *plans.OutputChange + if !val.IsNull() { + change = &plans.OutputChange{ + Addr: addr, + Sensitive: n.Sensitive, + Change: plans.Change{ + Action: plans.Create, + Before: cty.NullVal(cty.DynamicPseudoType), + After: val, + }, + } + } else { + change = &plans.OutputChange{ + Addr: addr, + Sensitive: n.Sensitive, + Change: plans.Change{ + // This is just a weird placeholder delete action since + // we don't have an actual prior value to indicate. + // FIXME: Generate real planned changes for output values + // that include the old values. + Action: plans.Delete, + Before: cty.NullVal(cty.DynamicPseudoType), + After: cty.NullVal(cty.DynamicPseudoType), + }, + } + } + + cs, err := change.Encode() + if err != nil { + // Should never happen, since we just constructed this right above + panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err)) + } + log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr) + changes.RemoveOutputChange(addr) // remove any existing planned change, if present + changes.AppendOutputChange(cs) // add the new planned change + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go new file mode 100644 index 00000000000..9c29aafd8e5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go @@ -0,0 +1,147 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, config *configs.Provider) hcl.Body { + var configBody hcl.Body + if config != nil { + configBody = config.Config + } + + var inputBody hcl.Body + inputConfig := ctx.ProviderInput(addr) + if len(inputConfig) > 0 { + inputBody = configs.SynthBody("", inputConfig) + } + + switch { + case configBody != nil && inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) + // Note that the inputBody is the _base_ here, because configs.MergeBodies + // expects the base have all of the required fields, while these are + // forced to be optional for the override. The input process should + // guarantee that we have a value for each of the required arguments and + // that in practice the sets of attributes in each body will be + // disjoint. + return configs.MergeBodies(inputBody, configBody) + case configBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) + return configBody + case inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) + return inputBody + default: + log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) + return hcl.EmptyBody() + } +} + +// EvalConfigProvider is an EvalNode implementation that configures +// a provider that is already initialized and retrieved. +type EvalConfigProvider struct { + Addr addrs.ProviderConfig + Provider *providers.Interface + Config *configs.Provider +} + +func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { + if n.Provider == nil { + return nil, fmt.Errorf("EvalConfigProvider Provider is nil") + } + + var diags tfdiags.Diagnostics + provider := *n.Provider + config := n.Config + + configBody := buildProviderConfig(ctx, n.Addr, config) + + resp := provider.GetSchema() + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configSchema := resp.Provider.Block + configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configDiags := ctx.ConfigureProvider(n.Addr, configVal) + configDiags = configDiags.InConfigBody(configBody) + + return nil, configDiags.ErrWithWarnings() +} + +// EvalInitProvider is an EvalNode implementation that initializes a provider +// and returns nothing. The provider can be retrieved again with the +// EvalGetProvider node. +type EvalInitProvider struct { + TypeName string + Addr addrs.ProviderConfig +} + +func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { + return ctx.InitProvider(n.TypeName, n.Addr) +} + +// EvalCloseProvider is an EvalNode implementation that closes provider +// connections that aren't needed anymore. +type EvalCloseProvider struct { + Addr addrs.ProviderConfig +} + +func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { + ctx.CloseProvider(n.Addr) + return nil, nil +} + +// EvalGetProvider is an EvalNode implementation that retrieves an already +// initialized provider instance for the given name. +// +// Unlike most eval nodes, this takes an _absolute_ provider configuration, +// because providers can be passed into and inherited between modules. +// Resource nodes must therefore know the absolute path of the provider they +// will use, which is usually accomplished by implementing +// interface GraphNodeProviderConsumer. +type EvalGetProvider struct { + Addr addrs.AbsProviderConfig + Output *providers.Interface + + // If non-nil, Schema will be updated after eval to refer to the + // schema of the provider. + Schema **ProviderSchema +} + +func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { + if n.Addr.ProviderConfig.Type == "" { + // Should never happen + panic("EvalGetProvider used with uninitialized provider configuration address") + } + + result := ctx.Provider(n.Addr) + if result == nil { + return nil, fmt.Errorf("provider %s not initialized", n.Addr) + } + + if n.Output != nil { + *n.Output = result + } + + if n.Schema != nil { + *n.Schema = ctx.ProviderSchema(n.Addr) + } + + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go new file mode 100644 index 00000000000..405ce9d0bd5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go @@ -0,0 +1,55 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" +) + +// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner +// and returns nothing. The provisioner can be retrieved again with the +// EvalGetProvisioner node. +type EvalInitProvisioner struct { + Name string +} + +func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) { + return ctx.InitProvisioner(n.Name) +} + +// EvalCloseProvisioner is an EvalNode implementation that closes provisioner +// connections that aren't needed anymore. +type EvalCloseProvisioner struct { + Name string +} + +func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { + ctx.CloseProvisioner(n.Name) + return nil, nil +} + +// EvalGetProvisioner is an EvalNode implementation that retrieves an already +// initialized provisioner instance for the given name. +type EvalGetProvisioner struct { + Name string + Output *provisioners.Interface + Schema **configschema.Block +} + +func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { + result := ctx.Provisioner(n.Name) + if result == nil { + return nil, fmt.Errorf("provisioner %s not initialized", n.Name) + } + + if n.Output != nil { + *n.Output = result + } + + if n.Schema != nil { + *n.Schema = ctx.ProvisionerSchema(n.Name) + } + + return result, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go new file mode 100644 index 00000000000..0b734b793f3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go @@ -0,0 +1,395 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalReadData is an EvalNode implementation that deals with the main part +// of the data resource lifecycle: either actually reading from the data source +// or generating a plan to do so. +type EvalReadData struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Dependencies []addrs.Referenceable + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + + // Planned is set when dealing with data resources that were deferred to + // the apply walk, to let us see what was planned. If this is set, the + // evaluation of the config is required to produce a wholly-known + // configuration which is consistent with the partial object included + // in this planned change. + Planned **plans.ResourceInstanceChange + + // ForcePlanRead, if true, overrides the usual behavior of immediately + // reading from the data source where possible, instead forcing us to + // _always_ generate a plan. This is used during the plan walk, since we + // mustn't actually apply anything there. (The resulting state doesn't + // get persisted) + ForcePlanRead bool + + // The result from this EvalNode has a few different possibilities + // depending on the input: + // - If Planned is nil then we assume we're aiming to _produce_ the plan, + // and so the following two outcomes are possible: + // - OutputChange.Action is plans.NoOp and OutputState is the complete + // result of reading from the data source. This is the easy path. + // - OutputChange.Action is plans.Read and OutputState is a planned + // object placeholder (states.ObjectPlanned). In this case, the + // returned change must be recorded in the overral changeset and + // eventually passed to another instance of this struct during the + // apply walk. + // - If Planned is non-nil then we assume we're aiming to complete a + // planned read from an earlier plan walk. In this case the only possible + // non-error outcome is to set Output.Action (if non-nil) to a plans.NoOp + // change and put the complete resulting state in OutputState, ready to + // be saved in the overall state and used for expression evaluation. + OutputChange **plans.ResourceInstanceChange + OutputValue *cty.Value + OutputConfigValue *cty.Value + OutputState **states.ResourceInstanceObject +} + +func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadData: working on %s", absAddr) + + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("provider schema not available for %s", n.Addr) + } + + var diags tfdiags.Diagnostics + var change *plans.ResourceInstanceChange + var configVal cty.Value + + // TODO: Do we need to handle Delete changes here? EvalReadDataDiff and + // EvalReadDataApply did, but it seems like we should handle that via a + // separate mechanism since it boils down to just deleting the object from + // the state... and we do that on every plan anyway, forcing the data + // resource to re-read. + + config := *n.Config + provider := *n.Provider + providerSchema := *n.ProviderSchema + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.ProviderConfig.Type, n.Addr.Resource.Type) + } + + // We'll always start by evaluating the configuration. What we do after + // that will depend on the evaluation result along with what other inputs + // we were given. + objTy := schema.ImpliedType() + priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time + + forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) + + var configDiags tfdiags.Diagnostics + configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) + + // If our configuration contains any unknown values then we must defer the + // read to the apply phase by producing a "Read" change for this resource, + // and a placeholder value for it in the state. + if n.ForcePlanRead || !configVal.IsWhollyKnown() { + // If the configuration is still unknown when we're applying a planned + // change then that indicates a bug in Terraform, since we should have + // everything resolved by now. + if n.Planned != nil && *n.Planned != nil { + return nil, fmt.Errorf( + "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", + absAddr, + ) + } + if n.ForcePlanRead { + log.Printf("[TRACE] EvalReadData: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) + } else { + log.Printf("[TRACE] EvalReadData: %s configuration not fully known yet, so deferring to apply phase", absAddr) + } + + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } + + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.Read, + Before: priorVal, + After: proposedNewVal, + }, + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(absAddr, states.CurrentGen, change.Action, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } + + if n.OutputChange != nil { + *n.OutputChange = change + } + if n.OutputValue != nil { + *n.OutputValue = change.After + } + if n.OutputConfigValue != nil { + *n.OutputConfigValue = configVal + } + if n.OutputState != nil { + state := &states.ResourceInstanceObject{ + Value: change.After, + Status: states.ObjectPlanned, // because the partial value in the plan must be used for now + Dependencies: n.Dependencies, + } + *n.OutputState = state + } + + return nil, diags.ErrWithWarnings() + } + + if n.Planned != nil && *n.Planned != nil && (*n.Planned).Action != plans.Read { + // If any other action gets in here then that's always a bug; this + // EvalNode only deals with reading. + return nil, fmt.Errorf( + "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", + (*n.Planned).Action, absAddr, + ) + } + + log.Printf("[TRACE] Re-validating config for %s", absAddr) + validateResp := provider.ValidateDataSourceConfig( + providers.ValidateDataSourceConfigRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }, + ) + if validateResp.Diagnostics.HasErrors() { + return nil, validateResp.Diagnostics.InConfigBody(n.Config.Config).Err() + } + + // If we get down here then our configuration is complete and we're read + // to actually call the provider to read the data. + log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) + + err := ctx.Hook(func(h Hook) (HookAction, error) { + // We don't have a state yet, so we'll just give the hook an + // empty one to work with. + return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) + }) + if err != nil { + return nil, err + } + + resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + newVal := resp.State + if newVal == cty.NilVal { + // This can happen with incompletely-configured mocks. We'll allow it + // and treat it as an alias for a properly-typed null value. + newVal = cty.NullVal(schema.ImpliedType()) + } + + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + if newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced null object", + fmt.Sprintf( + "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, + ), + )) + } + if !newVal.IsWhollyKnown() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, + ), + )) + + // We'll still save the object, but we need to eliminate any unknown + // values first because we can't serialize them in the state file. + // Note that this may cause set elements to be coalesced if they + // differed only by having unknown values, but we don't worry about + // that here because we're saving the value only for inspection + // purposes; the error we added above will halt the graph walk. + newVal = cty.UnknownAsNull(newVal) + } + + // Since we've completed the read, we actually have no change to make, but + // we'll produce a NoOp one anyway to preserve the usual flow of the + // plan phase and allow it to produce a complete plan. + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.NoOp, + Before: newVal, + After: newVal, + }, + } + state := &states.ResourceInstanceObject{ + Value: change.After, + Status: states.ObjectReady, // because we completed the read from the provider + Dependencies: n.Dependencies, + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) + }) + if err != nil { + return nil, err + } + + if n.OutputChange != nil { + *n.OutputChange = change + } + if n.OutputValue != nil { + *n.OutputValue = change.After + } + if n.OutputConfigValue != nil { + *n.OutputConfigValue = configVal + } + if n.OutputState != nil { + *n.OutputState = state + } + + return nil, diags.ErrWithWarnings() +} + +// EvalReadDataApply is an EvalNode implementation that executes a data +// resource's ReadDataApply method to read data from the data source. +type EvalReadDataApply struct { + Addr addrs.ResourceInstance + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + Output **states.ResourceInstanceObject + Config *configs.Resource + Change **plans.ResourceInstanceChange + StateReferences []addrs.Referenceable +} + +func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + change := *n.Change + providerSchema := *n.ProviderSchema + absAddr := n.Addr.Absolute(ctx.Path()) + + var diags tfdiags.Diagnostics + + // If the diff is for *destroying* this resource then we'll + // just drop its state and move on, since data resources don't + // support an actual "destroy" action. + if change != nil && change.Action == plans.Delete { + if n.Output != nil { + *n.Output = nil + } + return nil, nil + } + + // For the purpose of external hooks we present a data apply as a + // "Refresh" rather than an "Apply" because creating a data source + // is presented to users/callers as a "read" operation. + err := ctx.Hook(func(h Hook) (HookAction, error) { + // We don't have a state yet, so we'll just give the hook an + // empty one to work with. + return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) + }) + if err != nil { + return nil, err + } + + resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: n.Addr.Resource.Type, + Config: change.After, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type) + } + + newVal := resp.State + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s. The result could not be saved.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) + }) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = &states.ResourceInstanceObject{ + Value: newVal, + Status: states.ObjectReady, + Dependencies: n.StateReferences, + } + } + + return nil, diags.ErrWithWarnings() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go new file mode 100644 index 00000000000..6a834445c31 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go @@ -0,0 +1,106 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalRefresh is an EvalNode implementation that does a refresh for +// a resource. +type EvalRefresh struct { + Addr addrs.ResourceInstance + ProviderAddr addrs.AbsProviderConfig + Provider *providers.Interface + ProviderSchema **ProviderSchema + State **states.ResourceInstanceObject + Output **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + absAddr := n.Addr.Absolute(ctx.Path()) + + var diags tfdiags.Diagnostics + + // If we have no state, we don't do any refreshing + if state == nil { + log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path())) + return nil, diags.ErrWithWarnings() + } + + schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + // Call pre-refresh hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreRefresh(absAddr, states.CurrentGen, state.Value) + }) + if err != nil { + return nil, diags.ErrWithWarnings() + } + + // Refresh! + priorVal := state.Value + req := providers.ReadResourceRequest{ + TypeName: n.Addr.Resource.Type, + PriorState: priorVal, + Private: state.Private, + } + + provider := *n.Provider + resp := provider.ReadResource(req) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.Err() + } + + if resp.NewState == cty.NilVal { + // This ought not to happen in real cases since it's not possible to + // send NilVal over the plugin RPC channel, but it can come up in + // tests due to sloppy mocking. + panic("new state is cty.NilVal") + } + + for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + newState := state.DeepCopy() + newState.Value = resp.NewState + newState.Private = resp.Private + + // Call post-refresh hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value) + }) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = newState + } + + return nil, diags.ErrWithWarnings() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go new file mode 100644 index 00000000000..7d6bb6603bd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go @@ -0,0 +1,42 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalSequence is an EvalNode that evaluates in sequence. +type EvalSequence struct { + Nodes []EvalNode +} + +func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + + for _, n := range n.Nodes { + if n == nil { + continue + } + + if _, err := EvalRaw(n, ctx); err != nil { + if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit { + // In this path we abort early, losing any non-error + // diagnostics we saw earlier. + return nil, err + } + diags = diags.Append(err) + if diags.HasErrors() { + // Halt if we get some errors, but warnings are okay. + break + } + } + } + + return nil, diags.ErrWithWarnings() +} + +// EvalNodeFilterable impl. +func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) { + for i, node := range n.Nodes { + n.Nodes[i] = fn(node) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go new file mode 100644 index 00000000000..70a72bbdbc1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go @@ -0,0 +1,475 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalReadState is an EvalNode implementation that reads the +// current object for a specific instance in the state. +type EvalReadState struct { + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // ProviderSchema is the schema for the provider given in Provider. + ProviderSchema **ProviderSchema + + // Provider is the provider that will subsequently perform actions on + // the the state object. This is used to perform any schema upgrades + // that might be required to prepare the stored data for use. + Provider *providers.Interface + + // Output will be written with a pointer to the retrieved object. + Output **states.ResourceInstanceObject +} + +func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { + if n.Provider == nil || *n.Provider == nil { + panic("EvalReadState used with no Provider object") + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + panic("EvalReadState used with no ProviderSchema object") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr) + + src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr) + return nil, nil + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) + } + var diags tfdiags.Diagnostics + src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags.Err() + } + + obj, err := src.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = obj + } + return obj, nil +} + +// EvalReadStateDeposed is an EvalNode implementation that reads the +// deposed InstanceState for a specific resource out of the state +type EvalReadStateDeposed struct { + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // Key identifies which deposed object we will read. + Key states.DeposedKey + + // ProviderSchema is the schema for the provider given in Provider. + ProviderSchema **ProviderSchema + + // Provider is the provider that will subsequently perform actions on + // the the state object. This is used to perform any schema upgrades + // that might be required to prepare the stored data for use. + Provider *providers.Interface + + // Output will be written with a pointer to the retrieved object. + Output **states.ResourceInstanceObject +} + +func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { + if n.Provider == nil || *n.Provider == nil { + panic("EvalReadStateDeposed used with no Provider object") + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + panic("EvalReadStateDeposed used with no ProviderSchema object") + } + + key := n.Key + if key == states.NotDeposed { + return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported") + } + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key) + + src := ctx.State().ResourceInstanceObject(absAddr, key) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key) + return nil, nil + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) + } + var diags tfdiags.Diagnostics + src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags.Err() + } + + obj, err := src.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + if n.Output != nil { + *n.Output = obj + } + return obj, nil +} + +// EvalRequireState is an EvalNode implementation that exits early if the given +// object is null. +type EvalRequireState struct { + State **states.ResourceInstanceObject +} + +func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { + if n.State == nil { + return nil, EvalEarlyExitError{} + } + + state := *n.State + if state == nil || state.Value.IsNull() { + return nil, EvalEarlyExitError{} + } + + return nil, nil +} + +// EvalUpdateStateHook is an EvalNode implementation that calls the +// PostStateUpdate hook with the current state. +type EvalUpdateStateHook struct{} + +func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { + // In principle we could grab the lock here just long enough to take a + // deep copy and then pass that to our hooks below, but we'll instead + // hold the hook for the duration to avoid the potential confusing + // situation of us racing to call PostStateUpdate concurrently with + // different state snapshots. + stateSync := ctx.State() + state := stateSync.Lock().DeepCopy() + defer stateSync.Unlock() + + // Call the hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostStateUpdate(state) + }) + if err != nil { + return nil, err + } + + return nil, nil +} + +// EvalWriteState is an EvalNode implementation that saves the given object +// as the current object for the selected resource instance. +type EvalWriteState struct { + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // State is the object state to save. + State **states.ResourceInstanceObject + + // ProviderSchema is the schema for the provider given in ProviderAddr. + ProviderSchema **ProviderSchema + + // ProviderAddr is the address of the provider configuration that + // produced the given object. + ProviderAddr addrs.AbsProviderConfig +} + +func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { + if n.State == nil { + // Note that a pointer _to_ nil is valid here, indicating the total + // absense of an object as we'd see during destroy. + panic("EvalWriteState used with no ResourceInstanceObject") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + if n.ProviderAddr.ProviderConfig.Type == "" { + return nil, fmt.Errorf("failed to write state for %s, missing provider type", absAddr) + } + + obj := *n.State + if obj == nil || obj.Value.IsNull() { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr) + log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr) + return nil, nil + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + // Should never happen, unless our state object is nil + panic("EvalWriteState used with pointer to nil ProviderSchema object") + } + + if obj != nil { + log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr) + } else { + log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr) + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) + } + + state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr) + return nil, nil +} + +// EvalWriteStateDeposed is an EvalNode implementation that writes +// an InstanceState out to the Deposed list of a resource in the state. +type EvalWriteStateDeposed struct { + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // Key indicates which deposed object to write to. + Key states.DeposedKey + + // State is the object state to save. + State **states.ResourceInstanceObject + + // ProviderSchema is the schema for the provider given in ProviderAddr. + ProviderSchema **ProviderSchema + + // ProviderAddr is the address of the provider configuration that + // produced the given object. + ProviderAddr addrs.AbsProviderConfig +} + +func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { + if n.State == nil { + // Note that a pointer _to_ nil is valid here, indicating the total + // absense of an object as we'd see during destroy. + panic("EvalWriteStateDeposed used with no ResourceInstanceObject") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + key := n.Key + state := ctx.State() + + if key == states.NotDeposed { + // should never happen + return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) + } + + obj := *n.State + if obj == nil { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr) + log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key) + return nil, nil + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + // Should never happen, unless our state object is nil + panic("EvalWriteStateDeposed used with no ProviderSchema object") + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) + } + + log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key) + state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr) + return nil, nil +} + +// EvalDeposeState is an EvalNode implementation that moves the current object +// for the given instance to instead be a deposed object, leaving the instance +// with no current object. +// This is used at the beginning of a create-before-destroy replace action so +// that the create can create while preserving the old state of the +// to-be-destroyed object. +type EvalDeposeState struct { + Addr addrs.ResourceInstance + + // ForceKey, if a value other than states.NotDeposed, will be used as the + // key for the newly-created deposed object that results from this action. + // If set to states.NotDeposed (the zero value), a new unique key will be + // allocated. + ForceKey states.DeposedKey + + // OutputKey, if non-nil, will be written with the deposed object key that + // was generated for the object. This can then be passed to + // EvalUndeposeState.Key so it knows which deposed instance to forget. + OutputKey *states.DeposedKey +} + +// TODO: test +func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + var key states.DeposedKey + if n.ForceKey == states.NotDeposed { + key = state.DeposeResourceInstanceObject(absAddr) + } else { + key = n.ForceKey + state.DeposeResourceInstanceObjectForceKey(absAddr, key) + } + log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key) + + if n.OutputKey != nil { + *n.OutputKey = key + } + + return nil, nil +} + +// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will +// restore a particular deposed object of the specified resource instance +// to be the "current" object if and only if the instance doesn't currently +// have a current object. +// +// This is intended for use when the create leg of a create before destroy +// fails with no partial new object: if we didn't take any action, the user +// would be left in the unfortunate situation of having no current object +// and the previously-workign object now deposed. This EvalNode causes a +// better outcome by restoring things to how they were before the replace +// operation began. +// +// The create operation may have produced a partial result even though it +// failed and it's important that we don't "forget" that state, so in that +// situation the prior object remains deposed and the partial new object +// remains the current object, allowing the situation to hopefully be +// improved in a subsequent run. +type EvalMaybeRestoreDeposedObject struct { + Addr addrs.ResourceInstance + + // Key is a pointer to the deposed object key that should be forgotten + // from the state, which must be non-nil. + Key *states.DeposedKey +} + +// TODO: test +func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + dk := *n.Key + state := ctx.State() + + restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk) + if restored { + log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk) + } else { + log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk) + } + + return nil, nil +} + +// EvalWriteResourceState is an EvalNode implementation that ensures that +// a suitable resource-level state record is present in the state, if that's +// required for the "each mode" of that resource. +// +// This is important primarily for the situation where count = 0, since this +// eval is the only change we get to set the resource "each mode" to list +// in that case, allowing expression evaluation to see it as a zero-element +// list rather than as not set at all. +type EvalWriteResourceState struct { + Addr addrs.Resource + Config *configs.Resource + ProviderAddr addrs.AbsProviderConfig +} + +// TODO: test +func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() + } + + eachMode := states.NoEach + if count >= 0 { // -1 signals "count not set" + eachMode = states.EachList + } + + forEach, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + diags = diags.Append(forEachDiags) + if forEachDiags.HasErrors() { + return nil, diags.Err() + } + + if forEach != nil { + eachMode = states.EachMap + } + + // This method takes care of all of the business logic of updating this + // while ensuring that any existing instances are preserved, etc. + state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr) + + return nil, nil +} + +// EvalForgetResourceState is an EvalNode implementation that prunes out an +// empty resource-level state for a given resource address, or produces an +// error if it isn't empty after all. +// +// This should be the last action taken for a resource that has been removed +// from the configuration altogether, to clean up the leftover husk of the +// resource in the state after other EvalNodes have destroyed and removed +// all of the instances and instance objects beneath it. +type EvalForgetResourceState struct { + Addr addrs.Resource +} + +func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + pruned := state.RemoveResourceIfEmpty(absAddr) + if !pruned { + // If this produces an error, it indicates a bug elsewhere in Terraform + // -- probably missing graph nodes, graph edges, or + // incorrectly-implemented evaluation steps. + return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr) + } + log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr) + + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go new file mode 100644 index 00000000000..27d5f212eb2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go @@ -0,0 +1,106 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// UpgradeResourceState will, if necessary, run the provider-defined upgrade +// logic against the given state object to make it compliant with the +// current schema version. This is a no-op if the given state object is +// already at the latest version. +// +// If any errors occur during upgrade, error diagnostics are returned. In that +// case it is not safe to proceed with using the original state object. +func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + // We only do state upgrading for managed resources. + return src, nil + } + + stateIsFlatmap := len(src.AttrsJSON) == 0 + + providerType := addr.Resource.Resource.DefaultProviderConfig().Type + if src.SchemaVersion > currentVersion { + log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource instance managed by newer provider version", + // This is not a very good error message, but we don't retain enough + // information in state to give good feedback on what provider + // version might be required here. :( + fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), + )) + return nil, diags + } + + // If we get down here then we need to upgrade the state, with the + // provider's help. + // If this state was originally created by a version of Terraform prior to + // v0.12, this also includes translating from legacy flatmap to new-style + // representation, since only the provider has enough information to + // understand a flatmap built against an older schema. + if src.SchemaVersion != currentVersion { + log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) + } else { + log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) + } + + req := providers.UpgradeResourceStateRequest{ + TypeName: addr.Resource.Resource.Type, + + // TODO: The internal schema version representations are all using + // uint64 instead of int64, but unsigned integers aren't friendly + // to all protobuf target languages so in practice we use int64 + // on the wire. In future we will change all of our internal + // representations to int64 too. + Version: int64(src.SchemaVersion), + } + + if stateIsFlatmap { + req.RawStateFlatmap = src.AttrsFlat + } else { + req.RawStateJSON = src.AttrsJSON + } + + resp := provider.UpgradeResourceState(req) + diags := resp.Diagnostics + if diags.HasErrors() { + return nil, diags + } + + // After upgrading, the new value must conform to the current schema. When + // going over RPC this is actually already ensured by the + // marshaling/unmarshaling of the new value, but we'll check it here + // anyway for robustness, e.g. for in-process providers. + newValue := resp.UpgradedState + if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource state upgrade", + fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), + )) + } + return nil, diags + } + + new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) + if err != nil { + // We already checked for type conformance above, so getting into this + // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to encode result of resource state upgrade", + fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), + )) + } + return new, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go new file mode 100644 index 00000000000..e11cac7faa4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go @@ -0,0 +1,588 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// EvalValidateCount is an EvalNode implementation that validates +// the count of a resource. +type EvalValidateCount struct { + Resource *configs.Resource +} + +// TODO: test +func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + var count int + var err error + + val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + goto RETURN + } + if val.IsNull() || !val.IsKnown() { + goto RETURN + } + + err = gocty.FromCtyValue(val, &count) + if err != nil { + // The EvaluateExpr call above already guaranteed us a number value, + // so if we end up here then we have something that is out of range + // for an int, and the error message will include a description of + // the valid range. + rawVal := val.AsBigFloat() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count value", + Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err), + Subject: n.Resource.Count.Range().Ptr(), + }) + } else if count < 0 { + rawVal := val.AsBigFloat() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count value", + Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal), + Subject: n.Resource.Count.Range().Ptr(), + }) + } + +RETURN: + return nil, diags.NonFatalErr() +} + +// EvalValidateProvider is an EvalNode implementation that validates +// a provider configuration. +type EvalValidateProvider struct { + Addr addrs.ProviderConfig + Provider *providers.Interface + Config *configs.Provider +} + +func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + provider := *n.Provider + + configBody := buildProviderConfig(ctx, n.Addr, n.Config) + + resp := provider.GetSchema() + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configSchema := resp.Provider.Block + if configSchema == nil { + // Should never happen in real code, but often comes up in tests where + // mock schemas are being used that tend to be incomplete. + log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) + configSchema = &configschema.Block{} + } + + configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return nil, diags.NonFatalErr() + } + + req := providers.PrepareProviderConfigRequest{ + Config: configVal, + } + + validateResp := provider.PrepareProviderConfig(req) + diags = diags.Append(validateResp.Diagnostics) + + return nil, diags.NonFatalErr() +} + +// EvalValidateProvisioner is an EvalNode implementation that validates +// the configuration of a provisioner belonging to a resource. The provisioner +// config is expected to contain the merged connection configurations. +type EvalValidateProvisioner struct { + ResourceAddr addrs.Resource + Provisioner *provisioners.Interface + Schema **configschema.Block + Config *configs.Provisioner + ResourceHasCount bool + ResourceHasForEach bool +} + +func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { + provisioner := *n.Provisioner + config := *n.Config + schema := *n.Schema + + var diags tfdiags.Diagnostics + + { + // Validate the provisioner's own config first + + configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + if configVal == cty.NilVal { + // Should never happen for a well-behaved EvaluateBlock implementation + return nil, fmt.Errorf("EvaluateBlock returned nil value") + } + + req := provisioners.ValidateProvisionerConfigRequest{ + Config: configVal, + } + + resp := provisioner.ValidateProvisionerConfig(req) + diags = diags.Append(resp.Diagnostics) + } + + { + // Now validate the connection config, which contains the merged bodies + // of the resource and provisioner connection blocks. + connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr) + diags = diags.Append(connDiags) + } + + return nil, diags.NonFatalErr() +} + +func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics { + // We can't comprehensively validate the connection config since its + // final structure is decided by the communicator and we can't instantiate + // that until we have a complete instance state. However, we *can* catch + // configuration keys that are not valid for *any* communicator, catching + // typos early rather than waiting until we actually try to run one of + // the resource's provisioners. + + var diags tfdiags.Diagnostics + + if config == nil || config.Config == nil { + // No block to validate + return diags + } + + // We evaluate here just by evaluating the block and returning any + // diagnostics we get, since evaluation alone is enough to check for + // extraneous arguments and incorrectly-typed arguments. + _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema) + diags = diags.Append(configDiags) + + return diags +} + +func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + keyData := EvalDataForNoInstanceKey + selfAddr := n.ResourceAddr.Instance(addrs.NoKey) + + if n.ResourceHasCount { + // For a resource that has count, we allow count.index but don't + // know at this stage what it will return. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // "self" can't point to an unknown key, but we'll force it to be + // key 0 here, which should return an unknown value of the + // expected type since none of these elements are known at this + // point anyway. + selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) + } else if n.ResourceHasForEach { + // For a resource that has for_each, we allow each.value and each.key + // but don't know at this stage what it will return. + keyData = InstanceKeyEvalData{ + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.DynamicVal, + } + + // "self" can't point to an unknown key, but we'll force it to be + // key "" here, which should return an unknown value of the + // expected type since none of these elements are known at + // this point anyway. + selfAddr = n.ResourceAddr.Instance(addrs.StringKey("")) + } + + return ctx.EvaluateBlock(body, schema, selfAddr, keyData) +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. Once that is done, we can remove +// this and use a type-specific schema from the communicator to validate +// exactly what is expected for a given connection type. +var connectionBlockSupersetSchema = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // NOTE: "type" is not included here because it's treated special + // by the config loader and stored away in a separate field. + + // Common attributes for both connection types + "host": { + Type: cty.String, + Required: true, + }, + "type": { + Type: cty.String, + Optional: true, + }, + "user": { + Type: cty.String, + Optional: true, + }, + "password": { + Type: cty.String, + Optional: true, + }, + "port": { + Type: cty.String, + Optional: true, + }, + "timeout": { + Type: cty.String, + Optional: true, + }, + "script_path": { + Type: cty.String, + Optional: true, + }, + + // For type=ssh only (enforced in ssh communicator) + "private_key": { + Type: cty.String, + Optional: true, + }, + "certificate": { + Type: cty.String, + Optional: true, + }, + "host_key": { + Type: cty.String, + Optional: true, + }, + "agent": { + Type: cty.Bool, + Optional: true, + }, + "agent_identity": { + Type: cty.String, + Optional: true, + }, + "bastion_host": { + Type: cty.String, + Optional: true, + }, + "bastion_host_key": { + Type: cty.String, + Optional: true, + }, + "bastion_port": { + Type: cty.Number, + Optional: true, + }, + "bastion_user": { + Type: cty.String, + Optional: true, + }, + "bastion_password": { + Type: cty.String, + Optional: true, + }, + "bastion_private_key": { + Type: cty.String, + Optional: true, + }, + "bastion_certificate": { + Type: cty.String, + Optional: true, + }, + + // For type=winrm only (enforced in winrm communicator) + "https": { + Type: cty.Bool, + Optional: true, + }, + "insecure": { + Type: cty.Bool, + Optional: true, + }, + "cacert": { + Type: cty.String, + Optional: true, + }, + "use_ntlm": { + Type: cty.Bool, + Optional: true, + }, + }, +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. It's exported only for use in the +// configs/configupgrade package and should not be used from anywhere else. +// The caller may not modify any part of the returned schema data structure. +func ConnectionBlockSupersetSchema() *configschema.Block { + return connectionBlockSupersetSchema +} + +// EvalValidateResource is an EvalNode implementation that validates +// the configuration of a resource. +type EvalValidateResource struct { + Addr addrs.Resource + Provider *providers.Interface + ProviderSchema **ProviderSchema + Config *configs.Resource + + // IgnoreWarnings means that warnings will not be passed through. This allows + // "just-in-time" passes of validation to continue execution through warnings. + IgnoreWarnings bool + + // ConfigVal, if non-nil, will be updated with the value resulting from + // evaluating the given configuration body. Since validation is performed + // very early, this value is likely to contain lots of unknown values, + // but its type will conform to the schema of the resource type associated + // with the resource instance being validated. + ConfigVal *cty.Value +} + +func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr) + } + + var diags tfdiags.Diagnostics + provider := *n.Provider + cfg := *n.Config + schema := *n.ProviderSchema + mode := cfg.Mode + + keyData := EvalDataForNoInstanceKey + if n.Config.Count != nil { + // If the config block has count, we'll evaluate with an unknown + // number as count.index so we can still type check even though + // we won't expand count until the plan phase. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // Basic type-checking of the count argument. More complete validation + // of this will happen when we DynamicExpand during the plan walk. + countDiags := n.validateCount(ctx, n.Config.Count) + diags = diags.Append(countDiags) + } + + if n.Config.ForEach != nil { + keyData = InstanceKeyEvalData{ + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.UnknownVal(cty.DynamicPseudoType), + } + + // Evaluate the for_each expression here so we can expose the diagnostics + forEachDiags := n.validateForEach(ctx, n.Config.ForEach) + diags = diags.Append(forEachDiags) + } + + for _, traversal := range n.Config.DependsOn { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if !refDiags.HasErrors() && len(ref.Remaining) != 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid depends_on reference", + Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", + Subject: ref.Remaining.SourceRange().Ptr(), + }) + } + + // The ref must also refer to something that exists. To test that, + // we'll just eval it and count on the fact that our evaluator will + // detect references to non-existent objects. + if !diags.HasErrors() { + scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) + if scope != nil { // sometimes nil in tests, due to incomplete mocks + _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) + diags = diags.Append(refDiags) + } + } + } + + // Provider entry point varies depending on resource mode, because + // managed resources and data resources are two distinct concepts + // in the provider abstraction. + switch mode { + case addrs.ManagedResourceMode: + schema, _ := schema.SchemaForResourceType(mode, cfg.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type", + Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type), + Subject: &cfg.TypeRange, + }) + return nil, diags.Err() + } + + configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return nil, diags.Err() + } + + if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks + for _, traversal := range cfg.Managed.IgnoreChanges { + moreDiags := schema.StaticValidateTraversal(traversal) + diags = diags.Append(moreDiags) + } + } + + req := providers.ValidateResourceTypeConfigRequest{ + TypeName: cfg.Type, + Config: configVal, + } + + resp := provider.ValidateResourceTypeConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) + + if n.ConfigVal != nil { + *n.ConfigVal = configVal + } + + case addrs.DataResourceMode: + schema, _ := schema.SchemaForResourceType(mode, cfg.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source", + Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type), + Subject: &cfg.TypeRange, + }) + return nil, diags.Err() + } + + configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return nil, diags.Err() + } + + req := providers.ValidateDataSourceConfigRequest{ + TypeName: cfg.Type, + Config: configVal, + } + + resp := provider.ValidateDataSourceConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) + } + + if n.IgnoreWarnings { + // If we _only_ have warnings then we'll return nil. + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } + return nil, nil + } else { + // We'll return an error if there are any diagnostics at all, even if + // some of them are warnings. + return nil, diags.NonFatalErr() + } +} + +func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics { + if expr == nil { + return nil + } + + var diags tfdiags.Diagnostics + + countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) + diags = diags.Append(countDiags) + if diags.HasErrors() { + return diags + } + + if countVal.IsNull() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is null. An integer is required.`, + Subject: expr.Range().Ptr(), + }) + return diags + } + + var err error + countVal, err = convert.Convert(countVal, cty.Number) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return diags + } + + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk. + if !countVal.IsKnown() { + return diags + } + + // If we _do_ know the value, then we can do a few more checks here. + var count int + err = gocty.FromCtyValue(countVal, &count) + if err != nil { + // Isn't a whole number, etc. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return diags + } + + if count < 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is unsuitable: count cannot be negative.`, + Subject: expr.Range().Ptr(), + }) + return diags + } + + return diags +} + +func (n *EvalValidateResource) validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { + _, known, forEachDiags := evaluateResourceForEachExpressionKnown(expr, ctx) + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk + if !known { + return diags + } + + if forEachDiags.HasErrors() { + diags = diags.Append(forEachDiags) + } + + return diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go new file mode 100644 index 00000000000..ae883b422ea --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go @@ -0,0 +1,67 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that +// expressions within a particular referencable block do not reference that +// same block. +type EvalValidateSelfRef struct { + Addr addrs.Referenceable + Config hcl.Body + ProviderSchema **ProviderSchema +} + +func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + addr := n.Addr + + addrStrs := make([]string, 0, 1) + addrStrs = append(addrStrs, addr.String()) + switch tAddr := addr.(type) { + case addrs.ResourceInstance: + // A resource instance may not refer to its containing resource either. + addrStrs = append(addrStrs, tAddr.ContainingResource().String()) + } + + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr) + } + + providerSchema := *n.ProviderSchema + var schema *configschema.Block + switch tAddr := addr.(type) { + case addrs.Resource: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr) + case addrs.ResourceInstance: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) + } + + if schema == nil { + return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr) + } + + refs, _ := lang.ReferencesInBlock(n.Config, schema) + for _, ref := range refs { + for _, addrStr := range addrStrs { + if ref.Subject.String() == addrStr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referential block", + Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } + } + + return nil, diags.NonFatalErr() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go new file mode 100644 index 00000000000..0df4a4afc64 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go @@ -0,0 +1,96 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// EvalSetModuleCallArguments is an EvalNode implementation that sets values +// for arguments of a child module call, for later retrieval during +// expression evaluation. +type EvalSetModuleCallArguments struct { + Module addrs.ModuleCallInstance + Values map[string]cty.Value +} + +// TODO: test +func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) { + ctx.SetModuleCallArguments(n.Module, n.Values) + return nil, nil +} + +// EvalModuleCallArgument is an EvalNode implementation that produces the value +// for a particular variable as will be used by a child module instance. +// +// The result is written into the map given in Values, with its key +// set to the local name of the variable, disregarding the module instance +// address. Any existing values in that map are deleted first. This weird +// interface is a result of trying to be convenient for use with +// EvalContext.SetModuleCallArguments, which expects a map to merge in with +// any existing arguments. +type EvalModuleCallArgument struct { + Addr addrs.InputVariable + Config *configs.Variable + Expr hcl.Expression + + // If this flag is set, any diagnostics are discarded and this operation + // will always succeed, though may produce an unknown value in the + // event of an error. + IgnoreDiagnostics bool + + Values map[string]cty.Value +} + +func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) { + // Clear out the existing mapping + for k := range n.Values { + delete(n.Values, k) + } + + wantType := n.Config.Type + name := n.Addr.Name + expr := n.Expr + + if expr == nil { + // Should never happen, but we'll bail out early here rather than + // crash in case it does. We set no value at all in this case, + // making a subsequent call to EvalContext.SetModuleCallArguments + // a no-op. + log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String()) + return nil, nil + } + + val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) + + // We intentionally passed DynamicPseudoType to EvaluateExpr above because + // now we can do our own local type conversion and produce an error message + // with better context if it fails. + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid value for module argument", + Detail: fmt.Sprintf( + "The given value is not suitable for child module variable %q defined at %s: %s.", + name, n.Config.DeclRange.String(), convErr, + ), + Subject: expr.Range().Ptr(), + }) + // We'll return a placeholder unknown value to avoid producing + // redundant downstream errors. + val = cty.UnknownVal(wantType) + } + + n.Values[name] = val + if n.IgnoreDiagnostics { + return nil, nil + } + return nil, diags.ErrWithWarnings() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go new file mode 100644 index 00000000000..d4a8d3cf749 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go @@ -0,0 +1,88 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" +) + +// ProviderEvalTree returns the evaluation tree for initializing and +// configuring providers. +func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode { + var provider providers.Interface + + addr := n.Addr + relAddr := addr.ProviderConfig + + seq := make([]EvalNode, 0, 5) + seq = append(seq, &EvalInitProvider{ + TypeName: relAddr.Type, + Addr: addr.ProviderConfig, + }) + + // Input stuff + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: addr, + Output: &provider, + }, + }, + }, + }) + + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkValidate}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: addr, + Output: &provider, + }, + &EvalValidateProvider{ + Addr: relAddr, + Provider: &provider, + Config: config, + }, + }, + }, + }) + + // Apply stuff + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: addr, + Output: &provider, + }, + }, + }, + }) + + // We configure on everything but validate, since validate may + // not have access to all the variables. + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalConfigProvider{ + Addr: relAddr, + Provider: &provider, + Config: config, + }, + }, + }, + }) + + return &EvalSequence{Nodes: seq} +} + +// CloseProviderEvalTree returns the evaluation tree for closing +// provider connections that aren't needed anymore. +func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode { + return &EvalCloseProvider{Addr: addr.ProviderConfig} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go new file mode 100644 index 00000000000..81645d7d351 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go @@ -0,0 +1,968 @@ +package terraform + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strconv" + "sync" + + "github.com/agext/levenshtein" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Evaluator provides the necessary contextual data for evaluating expressions +// for a particular walk operation. +type Evaluator struct { + // Operation defines what type of operation this evaluator is being used + // for. + Operation walkOperation + + // Meta is contextual metadata about the current operation. + Meta *ContextMeta + + // Config is the root node in the configuration tree. + Config *configs.Config + + // VariableValues is a map from variable names to their associated values, + // within the module indicated by ModulePath. VariableValues is modified + // concurrently, and so it must be accessed only while holding + // VariableValuesLock. + // + // The first map level is string representations of addr.ModuleInstance + // values, while the second level is variable names. + VariableValues map[string]map[string]cty.Value + VariableValuesLock *sync.Mutex + + // Schemas is a repository of all of the schemas we should need to + // evaluate expressions. This must be constructed by the caller to + // include schemas for all of the providers, resource types, data sources + // and provisioners used by the given configuration and state. + // + // This must not be mutated during evaluation. + Schemas *Schemas + + // State is the current state, embedded in a wrapper that ensures that + // it can be safely accessed and modified concurrently. + State *states.SyncState + + // Changes is the set of proposed changes, embedded in a wrapper that + // ensures they can be safely accessed and modified concurrently. + Changes *plans.ChangesSync +} + +// Scope creates an evaluation scope for the given module path and optional +// resource. +// +// If the "self" argument is nil then the "self" object is not available +// in evaluated expressions. Otherwise, it behaves as an alias for the given +// address. +func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope { + return &lang.Scope{ + Data: data, + SelfAddr: self, + PureOnly: e.Operation != walkApply && e.Operation != walkDestroy, + BaseDir: ".", // Always current working directory for now. + } +} + +// evaluationStateData is an implementation of lang.Data that resolves +// references primarily (but not exclusively) using information from a State. +type evaluationStateData struct { + Evaluator *Evaluator + + // ModulePath is the path through the dynamic module tree to the module + // that references will be resolved relative to. + ModulePath addrs.ModuleInstance + + // InstanceKeyData describes the values, if any, that are accessible due + // to repetition of a containing object using "count" or "for_each" + // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, + // since the user specifies in that case which variable name to locally + // shadow.) + InstanceKeyData InstanceKeyEvalData + + // Operation records the type of walk the evaluationStateData is being used + // for. + Operation walkOperation +} + +// InstanceKeyEvalData is used during evaluation to specify which values, +// if any, should be produced for count.index, each.key, and each.value. +type InstanceKeyEvalData struct { + // CountIndex is the value for count.index, or cty.NilVal if evaluating + // in a context where the "count" argument is not active. + // + // For correct operation, this should always be of type cty.Number if not + // nil. + CountIndex cty.Value + + // EachKey and EachValue are the values for each.key and each.value + // respectively, or cty.NilVal if evaluating in a context where the + // "for_each" argument is not active. These must either both be set + // or neither set. + // + // For correct operation, EachKey must always be either of type cty.String + // or cty.Number if not nil. + EachKey, EachValue cty.Value +} + +// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for +// evaluating in a context that has the given instance key. +func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData { + var countIdx cty.Value + var eachKey cty.Value + var eachVal cty.Value + + if intKey, ok := key.(addrs.IntKey); ok { + countIdx = cty.NumberIntVal(int64(intKey)) + } + + if stringKey, ok := key.(addrs.StringKey); ok { + eachKey = cty.StringVal(string(stringKey)) + eachVal = forEachMap[string(stringKey)] + } + + return InstanceKeyEvalData{ + CountIndex: countIdx, + EachKey: eachKey, + EachValue: eachVal, + } +} + +// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance +// key values at all, suitable for use in contexts where no keyed instance +// is relevant. +var EvalDataForNoInstanceKey = InstanceKeyEvalData{} + +// evaluationStateData must implement lang.Data +var _ lang.Data = (*evaluationStateData)(nil) + +func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "index": + idxVal := d.InstanceKeyData.CountIndex + if idxVal == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "count" in non-counted context`, + Detail: fmt.Sprintf(`The "count" object can be used only in "resource" and "data" blocks, and only when the "count" argument is set.`), + Subject: rng.ToHCL().Ptr(), + }) + return cty.UnknownVal(cty.Number), diags + } + return idxVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "count" attribute`, + Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var returnVal cty.Value + switch addr.Name { + + case "key": + returnVal = d.InstanceKeyData.EachKey + case "value": + returnVal = d.InstanceKeyData.EachValue + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "each" attribute`, + Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + if returnVal == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "each" in context without for_each`, + Detail: fmt.Sprintf(`The "each" object can be used only in "resource" blocks, and only when the "for_each" argument is set.`), + Subject: rng.ToHCL().Ptr(), + }) + return cty.UnknownVal(cty.DynamicPseudoType), diags + } + return returnVal, diags +} + +func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Variables[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Variables { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared input variable`, + Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + wantType := cty.DynamicPseudoType + if config.Type != cty.NilType { + wantType = config.Type + } + + d.Evaluator.VariableValuesLock.Lock() + defer d.Evaluator.VariableValuesLock.Unlock() + + // During the validate walk, input variables are always unknown so + // that we are validating the configuration for all possible input values + // rather than for a specific set. Checking against a specific set of + // input values then happens during the plan walk. + // + // This is important because otherwise the validation walk will tend to be + // overly strict, requiring expressions throughout the configuration to + // be complicated to accommodate all possible inputs, whereas returning + // known here allows for simpler patterns like using input values as + // guards to broadly enable/disable resources, avoid processing things + // that are disabled, etc. Terraform's static validation leans towards + // being liberal in what it accepts because the subsequent plan walk has + // more information available and so can be more conservative. + if d.Operation == walkValidate { + return cty.UnknownVal(wantType), diags + } + + moduleAddrStr := d.ModulePath.String() + vals := d.Evaluator.VariableValues[moduleAddrStr] + if vals == nil { + return cty.UnknownVal(wantType), diags + } + + val, isSet := vals[addr.Name] + if !isSet { + if config.Default != cty.NilVal { + return config.Default, diags + } + return cty.UnknownVal(wantType), diags + } + + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + // We should never get here because this problem should've been caught + // during earlier validation, but we'll do something reasonable anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Incorrect variable type`, + Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err), + Subject: &config.DeclRange, + }) + // Stub out our return value so that the semantic checker doesn't + // produce redundant downstream errors. + val = cty.UnknownVal(wantType) + } + + return val, diags +} + +func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Locals[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Locals { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared local value`, + Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) + if val == cty.NilVal { + // Not evaluated yet? + val = cty.DynamicVal + } + + return val, diags +} + +func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Output results live in the module that declares them, which is one of + // the child module instances of our current module path. + moduleAddr := addr.ModuleInstance(d.ModulePath) + + // We'll consult the configuration to see what output names we are + // expecting, so we can ensure the resulting object is of the expected + // type even if our data is incomplete for some reason. + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // should never happen, since this should've been caught during + // static validation. + panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) + } + outputConfigs := moduleConfig.Module.Outputs + + vals := map[string]cty.Value{} + for n := range outputConfigs { + addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr) + + // If a pending change is present in our current changeset then its value + // takes priority over what's in state. (It will usually be the same but + // will differ if the new value is unknown during planning.) + if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil { + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) + vals[n] = cty.DynamicVal + continue + } + // We care only about the "after" value, which is the value this output + // will take on after the plan is applied. + vals[n] = change.After + } else { + os := d.Evaluator.State.OutputValue(addr) + if os == nil { + // Not evaluated yet? + vals[n] = cty.DynamicVal + continue + } + vals[n] = os.Value + } + } + return cty.ObjectVal(vals), diags +} + +func (d *evaluationStateData) GetModuleInstanceOutput(addr addrs.ModuleCallOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Output results live in the module that declares them, which is one of + // the child module instances of our current module path. + absAddr := addr.AbsOutputValue(d.ModulePath) + moduleAddr := absAddr.Module + + // First we'll consult the configuration to see if an output of this + // name is declared at all. + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // this doesn't happen in normal circumstances due to our validation + // pass, but it can turn up in some unusual situations, like in the + // "terraform console" repl where arbitrary expressions can be + // evaluated. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + config := moduleConfig.Module.Outputs[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Outputs { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared output value`, + Detail: fmt.Sprintf(`An output value with the name %q has not been declared in %s.%s`, addr.Name, moduleDisplayAddr(moduleAddr), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + // If a pending change is present in our current changeset then its value + // takes priority over what's in state. (It will usually be the same but + // will differ if the new value is unknown during planning.) + if changeSrc := d.Evaluator.Changes.GetOutputChange(absAddr); changeSrc != nil { + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", absAddr, err)) + return cty.DynamicVal, diags + } + // We care only about the "after" value, which is the value this output + // will take on after the plan is applied. + return change.After, diags + } + + os := d.Evaluator.State.OutputValue(absAddr) + if os == nil { + // Not evaluated yet? + return cty.DynamicVal, diags + } + + return os.Value, diags +} + +func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "cwd": + wd, err := os.Getwd() + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Failed to get working directory`, + Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + return cty.StringVal(filepath.ToSlash(wd)), diags + + case "module": + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) + } + sourceDir := moduleConfig.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + case "root": + sourceDir := d.Evaluator.Config.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + default: + suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"}) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "path" attribute`, + Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetResourceInstance(addr addrs.ResourceInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Although we are giving a ResourceInstance address here, if it has + // a key of addrs.NoKey then it might actually be a request for all of + // the instances of a particular resource. The reference resolver can't + // resolve the ambiguity itself, so we must do it in here. + + // First we'll consult the configuration to see if an resource of this + // name is declared at all. + moduleAddr := d.ModulePath + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) + } + + config := moduleConfig.Module.ResourceByAddr(addr.ContainingResource()) + if config == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Resource.Type, addr.Resource.Name, moduleDisplayAddr(moduleAddr)), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + // First we'll find the state for the resource as a whole, and decide + // from there whether we're going to interpret the given address as a + // resource or a resource instance address. + rs := d.Evaluator.State.Resource(addr.ContainingResource().Absolute(d.ModulePath)) + + if rs == nil { + schema := d.getResourceSchema(addr.ContainingResource(), config.ProviderConfigAddr().Absolute(d.ModulePath)) + + // If it doesn't exist at all then we can't reliably determine whether + // single-instance or whole-resource interpretation was intended, but + // we can decide this partially... + if addr.Key != addrs.NoKey { + // If there's an instance key then the user must be intending + // single-instance interpretation, and so we can return a + // properly-typed unknown value to help with type checking. + return cty.UnknownVal(schema.ImpliedType()), diags + } + + // otherwise we must return DynamicVal so that both interpretations + // can proceed without generating errors, and we'll deal with this + // in a later step where more information is gathered. + // (In practice we should only end up here during the validate walk, + // since later walks should have at least partial states populated + // for all resources in the configuration.) + return cty.DynamicVal, diags + } + + // Break out early during validation, because resource may not be expanded + // yet and indexed references may show up as invalid. + if d.Operation == walkValidate { + return cty.DynamicVal, diags + } + + schema := d.getResourceSchema(addr.ContainingResource(), rs.ProviderConfig) + + // If we are able to automatically convert to the "right" type of instance + // key for this each mode then we'll do so, to match with how we generally + // treat values elsewhere in the language. This allows code below to + // assume that any possible conversions have already been dealt with and + // just worry about validation. + key := d.coerceInstanceKey(addr.Key, rs.EachMode) + + multi := false + + switch rs.EachMode { + case states.NoEach: + if key != addrs.NoKey { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource index", + Detail: fmt.Sprintf("Resource %s does not have either \"count\" or \"for_each\" set, so it cannot be indexed.", addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + case states.EachList: + multi = key == addrs.NoKey + if _, ok := addr.Key.(addrs.IntKey); !multi && !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource index", + Detail: fmt.Sprintf("Resource %s must be indexed with a number value.", addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + case states.EachMap: + multi = key == addrs.NoKey + if _, ok := addr.Key.(addrs.StringKey); !multi && !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource index", + Detail: fmt.Sprintf("Resource %s must be indexed with a string value.", addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + } + + if !multi { + log.Printf("[TRACE] GetResourceInstance: %s is a single instance", addr) + is := rs.Instance(key) + if is == nil { + return cty.UnknownVal(schema.ImpliedType()), diags + } + return d.getResourceInstanceSingle(addr, rng, is, config, rs.ProviderConfig) + } + + log.Printf("[TRACE] GetResourceInstance: %s has multiple keyed instances", addr) + return d.getResourceInstancesAll(addr.ContainingResource(), rng, config, rs, rs.ProviderConfig) +} + +func (d *evaluationStateData) getResourceInstanceSingle(addr addrs.ResourceInstance, rng tfdiags.SourceRange, is *states.ResourceInstance, config *configs.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + schema := d.getResourceSchema(addr.ContainingResource(), providerAddr) + if schema == nil { + // This shouldn't happen, since validation before we get here should've + // taken care of it, but we'll show a reasonable error message anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource type schema`, + Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + ty := schema.ImpliedType() + if is == nil || is.Current == nil { + // Assume we're dealing with an instance that hasn't been created yet. + return cty.UnknownVal(ty), diags + } + + if is.Current.Status == states.ObjectPlanned { + // If there's a pending change for this instance in our plan, we'll prefer + // that. This is important because the state can't represent unknown values + // and so its data is inaccurate when changes are pending. + if change := d.Evaluator.Changes.GetResourceInstanceChange(addr.Absolute(d.ModulePath), states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", addr.Absolute(d.ModulePath), err), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + return val, diags + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", addr), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", addr.Absolute(d.ModulePath), err), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + + return ios.Value, diags +} + +func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng tfdiags.SourceRange, config *configs.Resource, rs *states.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + schema := d.getResourceSchema(addr, providerAddr) + if schema == nil { + // This shouldn't happen, since validation before we get here should've + // taken care of it, but we'll show a reasonable error message anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource type schema`, + Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + switch rs.EachMode { + + case states.EachList: + // We need to infer the length of our resulting tuple by searching + // for the max IntKey in our instances map. + length := 0 + for k := range rs.Instances { + if ik, ok := k.(addrs.IntKey); ok { + if int(ik) >= length { + length = int(ik) + 1 + } + } + } + + vals := make([]cty.Value, length) + for i := 0; i < length; i++ { + ty := schema.ImpliedType() + key := addrs.IntKey(i) + is, exists := rs.Instances[key] + if exists && is.Current != nil { + instAddr := addr.Instance(key).Absolute(d.ModulePath) + + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + if is.Current.Status == states.ObjectPlanned { + if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[i] = val + continue + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), + Subject: &config.DeclRange, + }) + continue + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[i] = ios.Value + } else { + // There shouldn't normally be "gaps" in our list but we'll + // allow it under the assumption that we're in a weird situation + // where e.g. someone has run "terraform state mv" to reorder + // a list and left a hole behind. + vals[i] = cty.UnknownVal(schema.ImpliedType()) + } + } + + // We use a tuple rather than a list here because resource schemas may + // include dynamically-typed attributes, which will then cause each + // instance to potentially have a different runtime type even though + // they all conform to the static schema. + return cty.TupleVal(vals), diags + + case states.EachMap: + ty := schema.ImpliedType() + vals := make(map[string]cty.Value, len(rs.Instances)) + for k, is := range rs.Instances { + if sk, ok := k.(addrs.StringKey); ok { + instAddr := addr.Instance(k).Absolute(d.ModulePath) + + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + if is.Current.Status == states.ObjectPlanned { + if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[string(sk)] = val + continue + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), + Subject: &config.DeclRange, + }) + continue + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[string(sk)] = ios.Value + } + } + + // We use an object rather than a map here because resource schemas may + // include dynamically-typed attributes, which will then cause each + // instance to potentially have a different runtime type even though + // they all conform to the static schema. + return cty.ObjectVal(vals), diags + + default: + // Should never happen since caller should deal with other modes + panic(fmt.Sprintf("unsupported EachMode %s", rs.EachMode)) + } +} + +func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block { + providerType := providerAddr.ProviderConfig.Type + schemas := d.Evaluator.Schemas + schema, _ := schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) + return schema +} + +// coerceInstanceKey attempts to convert the given key to the type expected +// for the given EachMode. +// +// If the key is already of the correct type or if it cannot be converted then +// it is returned verbatim. If conversion is required and possible, the +// converted value is returned. Callers should not try to determine if +// conversion was possible, should instead just check if the result is of +// the expected type. +func (d *evaluationStateData) coerceInstanceKey(key addrs.InstanceKey, mode states.EachMode) addrs.InstanceKey { + if key == addrs.NoKey { + // An absent key can't be converted + return key + } + + switch mode { + case states.NoEach: + // No conversions possible at all + return key + case states.EachMap: + if intKey, isInt := key.(addrs.IntKey); isInt { + return addrs.StringKey(strconv.Itoa(int(intKey))) + } + return key + case states.EachList: + if strKey, isStr := key.(addrs.StringKey); isStr { + i, err := strconv.Atoi(string(strKey)) + if err != nil { + return key + } + return addrs.IntKey(i) + } + return key + default: + return key + } +} + +func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "workspace": + workspaceName := d.Evaluator.Meta.Env + return cty.StringVal(workspaceName), diags + + case "env": + // Prior to Terraform 0.12 there was an attribute "env", which was + // an alias name for "workspace". This was deprecated and is now + // removed. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} + +// moduleDisplayAddr returns a string describing the given module instance +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleDisplayAddr(addr addrs.ModuleInstance) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go new file mode 100644 index 00000000000..6badb1543b9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go @@ -0,0 +1,299 @@ +package terraform + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// StaticValidateReferences checks the given references against schemas and +// other statically-checkable rules, producing error diagnostics if any +// problems are found. +// +// If this method returns errors for a particular reference then evaluating +// that reference is likely to generate a very similar error, so callers should +// not run this method and then also evaluate the source expression(s) and +// merge the two sets of diagnostics together, since this will result in +// confusing redundant errors. +// +// This method can find more errors than can be found by evaluating an +// expression with a partially-populated scope, since it checks the referenced +// names directly against the schema rather than relying on evaluation errors. +// +// The result may include warning diagnostics if, for example, deprecated +// features are referenced. +func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, ref := range refs { + moreDiags := d.staticValidateReference(ref, self) + diags = diags.Append(moreDiags) + } + return diags +} + +func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if modCfg == nil { + // This is a bug in the caller rather than a problem with the + // reference, but rather than crashing out here in an unhelpful way + // we'll just ignore it and trust a different layer to catch it. + return nil + } + + if ref.Subject == addrs.Self { + // The "self" address is a special alias for the address given as + // our self parameter here, if present. + if self == nil { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + return diags + } + + synthRef := *ref // shallow copy + synthRef.Subject = self + ref = &synthRef + } + + switch addr := ref.Subject.(type) { + + // For static validation we validate both resource and resource instance references the same way. + // We mostly disregard the index, though we do some simple validation of + // its _presence_ in staticValidateSingleResourceReference and + // staticValidateMultiResourceReference respectively. + case addrs.Resource: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + return diags + case addrs.ResourceInstance: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange)) + return diags + + // We also handle all module call references the same way, disregarding index. + case addrs.ModuleCall: + return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallInstance: + return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallOutput: + // This one is a funny one because we will take the output name referenced + // and use it to fake up a "remaining" that would make sense for the + // module call itself, rather than for the specific output, and then + // we can just re-use our static module call validation logic. + remain := make(hcl.Traversal, len(ref.Remaining)+1) + copy(remain[1:], ref.Remaining) + remain[0] = hcl.TraverseAttr{ + Name: addr.Name, + + // Using the whole reference as the source range here doesn't exactly + // match how HCL would normally generate an attribute traversal, + // but is close enough for our purposes. + SrcRange: ref.SourceRange.ToHCL(), + } + return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) + + default: + // Anything else we'll just permit through without any static validation + // and let it be caught during dynamic evaluation, in evaluate.go . + return nil + } +} + +func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + // If we have at least one step in "remain" and this resource has + // "count" set then we know for sure this in invalid because we have + // something like: + // aws_instance.foo.bar + // ...when we really need + // aws_instance.foo[count.index].bar + + // It is _not_ safe to do this check when remain is empty, because that + // would also match aws_instance.foo[count.index].bar due to `count.index` + // not being statically-resolvable as part of a reference, and match + // direct references to the whole aws_instance.foo tuple. + if len(remain) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if cfg.Count != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + if cfg.ForEach != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + + return diags +} + +func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if addr.Key == addrs.NoKey { + // This is a different path into staticValidateSingleResourceReference + return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) + } else { + if cfg.Count == nil && cfg.ForEach == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unexpected resource instance key`, + Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + } + } + + return diags +} + +func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + var modeAdjective string + switch addr.Mode { + case addrs.ManagedResourceMode: + modeAdjective = "managed" + case addrs.DataResourceMode: + modeAdjective = "data" + default: + // should never happen + modeAdjective = "" + } + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + // Normally accessing this directly is wrong because it doesn't take into + // account provider inheritance, etc but it's okay here because we're only + // paying attention to the type anyway. + providerType := cfg.ProviderConfigAddr().Type + schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) + + if schema == nil { + // Prior validation should've taken care of a resource block with an + // unsupported type, so we should never get here but we'll handle it + // here anyway for robustness. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource type`, + Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerType), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + // As a special case we'll detect attempts to access an attribute called + // "count" and produce a special error for it, since versions of Terraform + // prior to v0.12 offered this as a weird special case that we can no + // longer support. + if len(remain) > 0 { + if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource count attribute`, + Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + } + + // If we got this far then we'll try to validate the remaining traversal + // steps against our schema. + moreDiags := schema.StaticValidateTraversal(remain) + diags = diags.Append(moreDiags) + + return diags +} + +func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // For now, our focus here is just in testing that the referenced module + // call exists. All other validation is deferred until evaluation time. + _, exists := modCfg.Module.ModuleCalls[addr.Name] + if !exists { + var suggestions []string + for name := range modCfg.Module.ModuleCalls { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + return diags +} + +// moduleConfigDisplayAddr returns a string describing the given module +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleConfigDisplayAddr(addr addrs.Module) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go new file mode 100644 index 00000000000..97c77bdbd00 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go @@ -0,0 +1,7 @@ +package terraform + +import "os" + +// This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go new file mode 100644 index 00000000000..36e295b6f20 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go @@ -0,0 +1,141 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// Graph represents the graph that Terraform uses to represent resources +// and their dependencies. +type Graph struct { + // Graph is the actual DAG. This is embedded so you can call the DAG + // methods directly. + dag.AcyclicGraph + + // Path is the path in the module tree that this Graph represents. + Path addrs.ModuleInstance + + // debugName is a name for reference in the debug output. This is usually + // to indicate what topmost builder was, and if this graph is a shadow or + // not. + debugName string +} + +func (g *Graph) DirectedGraph() dag.Grapher { + return &g.AcyclicGraph +} + +// Walk walks the graph with the given walker for callbacks. The graph +// will be walked with full parallelism, so the walker should expect +// to be called in concurrently. +func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { + return g.walk(walker) +} + +func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { + // The callbacks for enter/exiting a graph + ctx := walker.EnterPath(g.Path) + defer walker.ExitPath(g.Path) + + // Get the path for logs + path := ctx.Path().String() + + debugName := "walk-graph.json" + if g.debugName != "" { + debugName = g.debugName + "-" + debugName + } + + // Walk the graph. + var walkFn dag.WalkFunc + walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) { + log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) + g.DebugVisitInfo(v, g.debugName) + + defer func() { + log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) + }() + + walker.EnterVertex(v) + defer walker.ExitVertex(v, diags) + + // vertexCtx is the context that we use when evaluating. This + // is normally the context of our graph but can be overridden + // with a GraphNodeSubPath impl. + vertexCtx := ctx + if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { + vertexCtx = walker.EnterPath(pn.Path()) + defer walker.ExitPath(pn.Path()) + } + + // If the node is eval-able, then evaluate it. + if ev, ok := v.(GraphNodeEvalable); ok { + tree := ev.EvalTree() + if tree == nil { + panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v)) + } + + // Allow the walker to change our tree if needed. Eval, + // then callback with the output. + log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) + + tree = walker.EnterEvalTree(v, tree) + output, err := Eval(tree, vertexCtx) + diags = diags.Append(walker.ExitEvalTree(v, output, err)) + if diags.HasErrors() { + return + } + } + + // If the node is dynamically expanded, then expand it + if ev, ok := v.(GraphNodeDynamicExpandable); ok { + log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) + + g, err := ev.DynamicExpand(vertexCtx) + if err != nil { + diags = diags.Append(err) + return + } + if g != nil { + // Walk the subgraph + log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) + subDiags := g.walk(walker) + diags = diags.Append(subDiags) + if subDiags.HasErrors() { + log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v)) + return + } + log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) + } else { + log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) + } + } + + // If the node has a subgraph, then walk the subgraph + if sn, ok := v.(GraphNodeSubgraph); ok { + log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) + + subDiags := sn.Subgraph().(*Graph).walk(walker) + if subDiags.HasErrors() { + log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v)) + return + } + log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v)) + } + + return + } + + return g.AcyclicGraph.Walk(walkFn) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go new file mode 100644 index 00000000000..ee2c5857afb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go @@ -0,0 +1,85 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// GraphBuilder is an interface that can be implemented and used with +// Terraform to build the graph that Terraform walks. +type GraphBuilder interface { + // Build builds the graph for the given module path. It is up to + // the interface implementation whether this build should expand + // the graph or not. + Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) +} + +// BasicGraphBuilder is a GraphBuilder that builds a graph out of a +// series of transforms and (optionally) validates the graph is a valid +// structure. +type BasicGraphBuilder struct { + Steps []GraphTransformer + Validate bool + // Optional name to add to the graph debug log + Name string +} + +func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + g := &Graph{Path: path} + + var lastStepStr string + for _, step := range b.Steps { + if step == nil { + continue + } + log.Printf("[TRACE] Executing graph transform %T", step) + + stepName := fmt.Sprintf("%T", step) + dot := strings.LastIndex(stepName, ".") + if dot >= 0 { + stepName = stepName[dot+1:] + } + + debugOp := g.DebugOperation(stepName, "") + err := step.Transform(g) + + errMsg := "" + if err != nil { + errMsg = err.Error() + } + debugOp.End(errMsg) + + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s------", step, thisStepStr) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] Completed graph transform %T (no changes)", step) + } + + if err != nil { + if nf, isNF := err.(tfdiags.NonFatalError); isNF { + diags = diags.Append(nf.Diagnostics) + } else { + diags = diags.Append(err) + return g, diags + } + } + } + + // Validate the graph structure + if b.Validate { + if err := g.Validate(); err != nil { + log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) + diags = diags.Append(err) + return nil, diags + } + } + + return g, diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go new file mode 100644 index 00000000000..a4041d13376 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go @@ -0,0 +1,212 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ApplyGraphBuilder implements GraphBuilder and is responsible for building +// a graph for applying a Terraform diff. +// +// Because the graph is built from the diff (vs. the config or state), +// this helps ensure that the apply-time graph doesn't modify any resources +// that aren't explicitly in the diff. There are other scenarios where the +// diff can be deviated, so this is just one layer of protection. +type ApplyGraphBuilder struct { + // Config is the configuration tree that the diff was built from. + Config *configs.Config + + // Changes describes the changes that we need apply. + Changes *plans.Changes + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas + + // Targets are resources to target. This is only required to make sure + // unnecessary outputs aren't included in the apply graph. The plan + // builder successfully handles targeting resources. In the future, + // outputs should go into the diff so that this is unnecessary. + Targets []addrs.Targetable + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Destroy, if true, represents a pure destroy operation + Destroy bool + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "ApplyGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Steps() []GraphTransformer { + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeApplyableResource{ + NodeAbstractResource: a, + } + } + + concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeDestroyResource{ + NodeAbstractResource: a, + } + } + + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodeApplyableResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + steps := []GraphTransformer{ + // Creates all the resources represented in the config. During apply, + // we use this just to ensure that the whole-resource metadata is + // updated to reflect things such as whether the count argument is + // set in config, or which provider configuration manages each resource. + &ConfigTransformer{ + Concrete: concreteResource, + Config: b.Config, + }, + + // Creates all the resource instances represented in the diff, along + // with dependency edges against the whole-resource nodes added by + // ConfigTransformer above. + &DiffTransformer{ + Concrete: concreteResourceInstance, + State: b.State, + Changes: b.Changes, + }, + + // Creates extra cleanup nodes for any entire resources that are + // no longer present in config, so we can make sure we clean up the + // leftover empty resource states after the instances have been + // destroyed. + // (We don't track this particular type of change in the plan because + // it's just cleanup of our own state object, and so doesn't effect + // any real remote objects or consumable outputs.) + &OrphanResourceTransformer{ + Concrete: concreteOrphanResource, + Config: b.Config, + State: b.State, + }, + + // Create orphan output nodes + &OrphanOutputTransformer{Config: b.Config, State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Destruction ordering + &DestroyEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, + GraphTransformIf( + func() bool { return !b.Destroy }, + &CBDEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, + ), + + // Provisioner-related transformations + &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, + &ProvisionerTransformer{}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + // add providers + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Handle destroy time transformations for output and local values. + // Reverse the edges from outputs and locals, so that + // interpolations don't fail during destroy. + // Create a destroy node for outputs to remove them from the state. + // Prune unreferenced values, which may have interpolations that can't + // be resolved. + GraphTransformIf( + func() bool { return b.Destroy }, + GraphTransformMulti( + &DestroyValueReferenceTransformer{}, + &DestroyOutputTransformer{}, + &PruneUnusedValuesTransformer{}, + ), + ), + + // Add the node to fix the state count boundaries + &CountBoundaryTransformer{ + Config: b.Config, + }, + + // Target + &TargetsTransformer{Targets: b.Targets}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go new file mode 100644 index 00000000000..32fe5f97359 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go @@ -0,0 +1,97 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for +// planning a pure-destroy. +// +// Planning a pure destroy operation is simple because we can ignore most +// ordering configuration and simply reverse the state. +type DestroyPlanGraphBuilder struct { + // Config is the configuration tree to build the plan from. + Config *configs.Config + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas + + // Targets are resources to target + Targets []addrs.Targetable + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "DestroyPlanGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlanDestroyableResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Creates nodes for the resource instances tracked in the state. + &StateTransformer{ + ConcreteCurrent: concreteResourceInstance, + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Destruction ordering. We require this only so that + // targeting below will prune the correct things. + &DestroyEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, + + // Target. Note we don't set "Destroy: true" here since we already + // created proper destroy ordering. + &TargetsTransformer{Targets: b.Targets}, + + // Single root + &RootTransformer{}, + } + + return steps +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go new file mode 100644 index 00000000000..8a0bcf5ba9e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go @@ -0,0 +1,108 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable +// for evaluating in-memory values (input variables, local values, output +// values) in the state without any other side-effects. +// +// This graph is used only in weird cases, such as the "terraform console" +// CLI command, where we need to evaluate expressions against the state +// without taking any other actions. +// +// The generated graph will include nodes for providers, resources, etc +// just to allow indirect dependencies to be resolved, but these nodes will +// not take any actions themselves since we assume that their parts of the +// state, if any, are already complete. +// +// Although the providers are never configured, they must still be available +// in order to obtain schema information used for type checking, etc. +type EvalGraphBuilder struct { + // Config is the configuration tree. + Config *configs.Config + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: true, + Name: "EvalGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Steps() []GraphTransformer { + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeEvalableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. + &ConfigTransformer{ + Concrete: nil, // just use the abstract type + Config: b.Config, + Unique: true, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Although we don't configure providers, we do still start them up + // to get their schemas, and so we must shut them down again here. + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + + // Remove redundant edges to simplify the graph. + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go new file mode 100644 index 00000000000..dcbb10e6088 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go @@ -0,0 +1,100 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ImportGraphBuilder implements GraphBuilder and is responsible for building +// a graph for importing resources into Terraform. This is a much, much +// simpler graph than a normal configuration graph. +type ImportGraphBuilder struct { + // ImportTargets are the list of resources to import. + ImportTargets []*ImportTarget + + // Module is a configuration to build the graph from. See ImportOpts.Config. + Config *configs.Config + + // Components is the factory for our available plugin components. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas +} + +// Build builds the graph according to the steps returned by Steps. +func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: true, + Name: "ImportGraphBuilder", + }).Build(path) +} + +// Steps returns the ordered list of GraphTransformers that must be executed +// to build a complete graph. +func (b *ImportGraphBuilder) Steps() []GraphTransformer { + // Get the module. If we don't have one, we just use an empty tree + // so that the transform still works but does nothing. + config := b.Config + if config == nil { + config = configs.NewEmptyConfig() + } + + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Create all our resources from the configuration and state + &ConfigTransformer{Config: config}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Add the import steps + &ImportStateTransformer{Targets: b.ImportTargets}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, config), + + // This validates that the providers only depend on variables + &ImportProviderValidateTransformer{}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + + // Optimize + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go new file mode 100644 index 00000000000..bcd119b39d3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go @@ -0,0 +1,204 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// PlanGraphBuilder implements GraphBuilder and is responsible for building +// a graph for planning (creating a Terraform Diff). +// +// The primary difference between this graph and others: +// +// * Based on the config since it represents the target state +// +// * Ignores lifecycle options since no lifecycle events occur here. This +// simplifies the graph significantly since complex transforms such as +// create-before-destroy can be completely ignored. +// +type PlanGraphBuilder struct { + // Config is the configuration tree to build a plan from. + Config *configs.Config + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas + + // Targets are resources to target + Targets []addrs.Targetable + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Validate will do structural validation of the graph. + Validate bool + + // CustomConcrete can be set to customize the node types created + // for various parts of the plan. This is useful in order to customize + // the plan behavior. + CustomConcrete bool + ConcreteProvider ConcreteProviderNodeFunc + ConcreteResource ConcreteResourceNodeFunc + ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc + + once sync.Once +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "PlanGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Steps() []GraphTransformer { + b.once.Do(b.init) + + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + + steps := []GraphTransformer{ + // Creates all the resources represented in the config + &ConfigTransformer{ + Concrete: b.ConcreteResource, + Config: b.Config, + }, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add orphan resources + &OrphanResourceInstanceTransformer{ + Concrete: b.ConcreteResourceOrphan, + State: b.State, + Config: b.Config, + }, + + // We also need nodes for any deposed instance objects present in the + // state, so we can plan to destroy them. (This intentionally + // skips creating nodes for _current_ objects, since ConfigTransformer + // created nodes that will do that during DynamicExpand.) + &StateTransformer{ + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, + }, + + // Create orphan output nodes + &OrphanOutputTransformer{ + Config: b.Config, + State: b.State, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, + &ProvisionerTransformer{}, + + // Add module variables + &ModuleVariableTransformer{ + Config: b.Config, + }, + + TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config), + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Add the node to fix the state count boundaries + &CountBoundaryTransformer{ + Config: b.Config, + }, + + // Target + &TargetsTransformer{ + Targets: b.Targets, + + // Resource nodes from config have not yet been expanded for + // "count", so we must apply targeting without indices. Exact + // targeting will be dealt with later when these resources + // DynamicExpand. + IgnoreIndices: true, + }, + + // Detect when create_before_destroy must be forced on for a particular + // node due to dependency edges, to avoid graph cycles during apply. + &ForcedCBDTransformer{}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} + +func (b *PlanGraphBuilder) init() { + // Do nothing if the user requests customizing the fields + if b.CustomConcrete { + return + } + + b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &NodePlannableResource{ + NodeAbstractResource: a, + } + } + + b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go new file mode 100644 index 00000000000..fad7bf161fc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go @@ -0,0 +1,194 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// RefreshGraphBuilder implements GraphBuilder and is responsible for building +// a graph for refreshing (updating the Terraform state). +// +// The primary difference between this graph and others: +// +// * Based on the state since it represents the only resources that +// need to be refreshed. +// +// * Ignores lifecycle options since no lifecycle events occur here. This +// simplifies the graph significantly since complex transforms such as +// create-before-destroy can be completely ignored. +// +type RefreshGraphBuilder struct { + // Config is the configuration tree. + Config *configs.Config + + // State is the prior state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas + + // Targets are resources to target + Targets []addrs.Targetable + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "RefreshGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *RefreshGraphBuilder) Steps() []GraphTransformer { + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableManagedResource{ + NodeAbstractResource: a, + } + } + + concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + // The "Plan" node type also handles refreshing behavior. + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + + concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableDataResource{ + NodeAbstractResource: a, + } + } + + steps := []GraphTransformer{ + // Creates all the managed resources that aren't in the state, but only if + // we have a state already. No resources in state means there's not + // anything to refresh. + func() GraphTransformer { + if b.State.HasResources() { + return &ConfigTransformer{ + Concrete: concreteManagedResource, + Config: b.Config, + Unique: true, + ModeFilter: true, + Mode: addrs.ManagedResourceMode, + } + } + log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer") + return nil + }(), + + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. + &ConfigTransformer{ + Concrete: concreteDataResource, + Config: b.Config, + Unique: true, + ModeFilter: true, + Mode: addrs.DataResourceMode, + }, + + // Add any fully-orphaned resources from config (ones that have been + // removed completely, not ones that are just orphaned due to a scaled-in + // count. + &OrphanResourceInstanceTransformer{ + Concrete: concreteManagedResourceInstance, + State: b.State, + Config: b.Config, + }, + + // We also need nodes for any deposed instance objects present in the + // state, so we can check if they still exist. (This intentionally + // skips creating nodes for _current_ objects, since ConfigTransformer + // created nodes that will do that during DynamicExpand.) + &StateTransformer{ + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Target + &TargetsTransformer{ + Targets: b.Targets, + + // Resource nodes from config have not yet been expanded for + // "count", so we must apply targeting without indices. Exact + // targeting will be dealt with later when these resources + // DynamicExpand. + IgnoreIndices: true, + }, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go new file mode 100644 index 00000000000..0aa8b915a90 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go @@ -0,0 +1,34 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// ValidateGraphBuilder creates the graph for the validate operation. +// +// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that +// we only have to validate what we'd normally plan anyways. The +// PlanGraphBuilder given will be modified so it shouldn't be used for anything +// else after calling this function. +func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { + // We're going to customize the concrete functions + p.CustomConcrete = true + + // Set the provider to the normal provider. This will ask for input. + p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &NodeValidatableResource{ + NodeAbstractResource: a, + } + } + + // We purposely don't set any other concrete types since they don't + // require validation. + + return p +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go new file mode 100644 index 00000000000..5dbf415ffd4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go @@ -0,0 +1,9 @@ +package terraform + +import "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + +// GraphDot returns the dot formatting of a visual representation of +// the given Terraform graph. +func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { + return string(g.Dot(opts)), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go new file mode 100644 index 00000000000..a005ea5a0ae --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go @@ -0,0 +1,11 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// GraphNodeSubPath says that a node is part of a graph with a +// different path, and the context should be adjusted accordingly. +type GraphNodeSubPath interface { + Path() addrs.ModuleInstance +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go new file mode 100644 index 00000000000..d699376f2f3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go @@ -0,0 +1,32 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// GraphWalker is an interface that can be implemented that when used +// with Graph.Walk will invoke the given callbacks under certain events. +type GraphWalker interface { + EnterPath(addrs.ModuleInstance) EvalContext + ExitPath(addrs.ModuleInstance) + EnterVertex(dag.Vertex) + ExitVertex(dag.Vertex, tfdiags.Diagnostics) + EnterEvalTree(dag.Vertex, EvalNode) EvalNode + ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics +} + +// NullGraphWalker is a GraphWalker implementation that does nothing. +// This can be embedded within other GraphWalker implementations for easily +// implementing all the required functions. +type NullGraphWalker struct{} + +func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } +func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} +func (NullGraphWalker) EnterVertex(dag.Vertex) {} +func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {} +func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } +func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics { + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go new file mode 100644 index 00000000000..11fb2fd01e4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go @@ -0,0 +1,157 @@ +package terraform + +import ( + "context" + "log" + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ContextGraphWalker is the GraphWalker implementation used with the +// Context struct to walk and evaluate the graph. +type ContextGraphWalker struct { + NullGraphWalker + + // Configurable values + Context *Context + State *states.SyncState // Used for safe concurrent access to state + Changes *plans.ChangesSync // Used for safe concurrent writes to changes + Operation walkOperation + StopContext context.Context + RootVariableValues InputValues + + // This is an output. Do not set this, nor read it while a graph walk + // is in progress. + NonFatalDiagnostics tfdiags.Diagnostics + + errorLock sync.Mutex + once sync.Once + contexts map[string]*BuiltinEvalContext + contextLock sync.Mutex + variableValues map[string]map[string]cty.Value + variableValuesLock sync.Mutex + providerCache map[string]providers.Interface + providerSchemas map[string]*ProviderSchema + providerLock sync.Mutex + provisionerCache map[string]provisioners.Interface + provisionerSchemas map[string]*configschema.Block + provisionerLock sync.Mutex +} + +func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { + w.once.Do(w.init) + + w.contextLock.Lock() + defer w.contextLock.Unlock() + + // If we already have a context for this path cached, use that + key := path.String() + if ctx, ok := w.contexts[key]; ok { + return ctx + } + + // Our evaluator shares some locks with the main context and the walker + // so that we can safely run multiple evaluations at once across + // different modules. + evaluator := &Evaluator{ + Meta: w.Context.meta, + Config: w.Context.config, + Operation: w.Operation, + State: w.State, + Changes: w.Changes, + Schemas: w.Context.schemas, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, + } + + ctx := &BuiltinEvalContext{ + StopContext: w.StopContext, + PathValue: path, + Hooks: w.Context.hooks, + InputValue: w.Context.uiInput, + Components: w.Context.components, + Schemas: w.Context.schemas, + ProviderCache: w.providerCache, + ProviderInputConfig: w.Context.providerInputConfig, + ProviderLock: &w.providerLock, + ProvisionerCache: w.provisionerCache, + ProvisionerLock: &w.provisionerLock, + ChangesValue: w.Changes, + StateValue: w.State, + Evaluator: evaluator, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, + } + + w.contexts[key] = ctx + return ctx +} + +func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { + log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v)) + + // Acquire a lock on the semaphore + w.Context.parallelSem.Acquire() + + // We want to filter the evaluation tree to only include operations + // that belong in this operation. + return EvalFilter(n, EvalNodeFilterOp(w.Operation)) +} + +func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics { + log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v)) + + // Release the semaphore + w.Context.parallelSem.Release() + + if err == nil { + return nil + } + + // Acquire the lock because anything is going to require a lock. + w.errorLock.Lock() + defer w.errorLock.Unlock() + + // If the error is non-fatal then we'll accumulate its diagnostics in our + // non-fatal list, rather than returning it directly, so that the graph + // walk can continue. + if nferr, ok := err.(tfdiags.NonFatalError); ok { + log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr) + w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics) + return nil + } + + // Otherwise, we'll let our usual diagnostics machinery figure out how to + // unpack this as one or more diagnostic messages and return that. If we + // get down here then the returned diagnostics will contain at least one + // error, causing the graph walk to halt. + var diags tfdiags.Diagnostics + diags = diags.Append(err) + return diags +} + +func (w *ContextGraphWalker) init() { + w.contexts = make(map[string]*BuiltinEvalContext) + w.providerCache = make(map[string]providers.Interface) + w.providerSchemas = make(map[string]*ProviderSchema) + w.provisionerCache = make(map[string]provisioners.Interface) + w.provisionerSchemas = make(map[string]*configschema.Block) + w.variableValues = make(map[string]map[string]cty.Value) + + // Populate root module variable values. Other modules will be populated + // during the graph walk. + w.variableValues[""] = make(map[string]cty.Value) + for k, iv := range w.RootVariableValues { + w.variableValues[""][k] = iv.Value + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go new file mode 100644 index 00000000000..859f6fb121c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go @@ -0,0 +1,18 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=walkOperation graph_walk_operation.go + +// walkOperation is an enum which tells the walkContext what to do. +type walkOperation byte + +const ( + walkInvalid walkOperation = iota + walkApply + walkPlan + walkPlanDestroy + walkRefresh + walkValidate + walkDestroy + walkImport + walkEval // used just to prepare EvalContext for expression evaluation, with no other actions +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go new file mode 100644 index 00000000000..b51e1a26617 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[GraphTypeInvalid-0] + _ = x[GraphTypeLegacy-1] + _ = x[GraphTypeRefresh-2] + _ = x[GraphTypePlan-3] + _ = x[GraphTypePlanDestroy-4] + _ = x[GraphTypeApply-5] + _ = x[GraphTypeValidate-6] + _ = x[GraphTypeEval-7] +} + +const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval" + +var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124} + +func (i GraphType) String() string { + if i >= GraphType(len(_GraphType_index)-1) { + return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go new file mode 100644 index 00000000000..b5be9482436 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go @@ -0,0 +1,145 @@ +package terraform + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// HookAction is an enum of actions that can be taken as a result of a hook +// callback. This allows you to modify the behavior of Terraform at runtime. +type HookAction byte + +const ( + // HookActionContinue continues with processing as usual. + HookActionContinue HookAction = iota + + // HookActionHalt halts immediately: no more hooks are processed + // and the action that Terraform was about to take is cancelled. + HookActionHalt +) + +// Hook is the interface that must be implemented to hook into various +// parts of Terraform, allowing you to inspect or change behavior at runtime. +// +// There are MANY hook points into Terraform. If you only want to implement +// some hook points, but not all (which is the likely case), then embed the +// NilHook into your struct, which implements all of the interface but does +// nothing. Then, override only the functions you want to implement. +type Hook interface { + // PreApply and PostApply are called before and after an action for a + // single instance is applied. The error argument in PostApply is the + // error, if any, that was returned from the provider Apply call itself. + PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) + + // PreDiff and PostDiff are called before and after a provider is given + // the opportunity to customize the proposed new state to produce the + // planned new state. + PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) + PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + + // The provisioning hooks signal both the overall start end end of + // provisioning for a particular instance and of each of the individual + // configured provisioners for each instance. The sequence of these + // for a given instance might look something like this: + // + // PreProvisionInstance(aws_instance.foo[1], ...) + // PreProvisionInstanceStep(aws_instance.foo[1], "file") + // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) + // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") + // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) + // PostProvisionInstance(aws_instance.foo[1], ...) + // + // ProvisionOutput is called with output sent back by the provisioners. + // This will be called multiple times as output comes in, with each call + // representing one line of output. It cannot control whether the + // provisioner continues running. + PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) + PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) + ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) + + // PreRefresh and PostRefresh are called before and after a single + // resource state is refreshed, respectively. + PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) + PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) + + // PreImportState and PostImportState are called before and after + // (respectively) each state import operation for a given resource address. + PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) + PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) + + // PostStateUpdate is called each time the state is updated. It receives + // a deep copy of the state, which it may therefore access freely without + // any need for locks to protect from concurrent writes from the caller. + PostStateUpdate(new *states.State) (HookAction, error) +} + +// NilHook is a Hook implementation that does nothing. It exists only to +// simplify implementing hooks. You can embed this into your Hook implementation +// and only implement the functions you are interested in. +type NilHook struct{} + +var _ Hook = (*NilHook)(nil) + +func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { +} + +func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { + return HookActionContinue, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go new file mode 100644 index 00000000000..74a29bde0e6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go @@ -0,0 +1,274 @@ +package terraform + +import ( + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// MockHook is an implementation of Hook that can be used for tests. +// It records all of its function calls. +type MockHook struct { + sync.Mutex + + PreApplyCalled bool + PreApplyAddr addrs.AbsResourceInstance + PreApplyGen states.Generation + PreApplyAction plans.Action + PreApplyPriorState cty.Value + PreApplyPlannedState cty.Value + PreApplyReturn HookAction + PreApplyError error + + PostApplyCalled bool + PostApplyAddr addrs.AbsResourceInstance + PostApplyGen states.Generation + PostApplyNewState cty.Value + PostApplyError error + PostApplyReturn HookAction + PostApplyReturnError error + PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) + + PreDiffCalled bool + PreDiffAddr addrs.AbsResourceInstance + PreDiffGen states.Generation + PreDiffPriorState cty.Value + PreDiffProposedState cty.Value + PreDiffReturn HookAction + PreDiffError error + + PostDiffCalled bool + PostDiffAddr addrs.AbsResourceInstance + PostDiffGen states.Generation + PostDiffAction plans.Action + PostDiffPriorState cty.Value + PostDiffPlannedState cty.Value + PostDiffReturn HookAction + PostDiffError error + + PreProvisionInstanceCalled bool + PreProvisionInstanceAddr addrs.AbsResourceInstance + PreProvisionInstanceState cty.Value + PreProvisionInstanceReturn HookAction + PreProvisionInstanceError error + + PostProvisionInstanceCalled bool + PostProvisionInstanceAddr addrs.AbsResourceInstance + PostProvisionInstanceState cty.Value + PostProvisionInstanceReturn HookAction + PostProvisionInstanceError error + + PreProvisionInstanceStepCalled bool + PreProvisionInstanceStepAddr addrs.AbsResourceInstance + PreProvisionInstanceStepProvisionerType string + PreProvisionInstanceStepReturn HookAction + PreProvisionInstanceStepError error + + PostProvisionInstanceStepCalled bool + PostProvisionInstanceStepAddr addrs.AbsResourceInstance + PostProvisionInstanceStepProvisionerType string + PostProvisionInstanceStepErrorArg error + PostProvisionInstanceStepReturn HookAction + PostProvisionInstanceStepError error + + ProvisionOutputCalled bool + ProvisionOutputAddr addrs.AbsResourceInstance + ProvisionOutputProvisionerType string + ProvisionOutputMessage string + + PreRefreshCalled bool + PreRefreshAddr addrs.AbsResourceInstance + PreRefreshGen states.Generation + PreRefreshPriorState cty.Value + PreRefreshReturn HookAction + PreRefreshError error + + PostRefreshCalled bool + PostRefreshAddr addrs.AbsResourceInstance + PostRefreshGen states.Generation + PostRefreshPriorState cty.Value + PostRefreshNewState cty.Value + PostRefreshReturn HookAction + PostRefreshError error + + PreImportStateCalled bool + PreImportStateAddr addrs.AbsResourceInstance + PreImportStateID string + PreImportStateReturn HookAction + PreImportStateError error + + PostImportStateCalled bool + PostImportStateAddr addrs.AbsResourceInstance + PostImportStateNewStates []providers.ImportedResource + PostImportStateReturn HookAction + PostImportStateError error + + PostStateUpdateCalled bool + PostStateUpdateState *states.State + PostStateUpdateReturn HookAction + PostStateUpdateError error +} + +var _ Hook = (*MockHook)(nil) + +func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreApplyCalled = true + h.PreApplyAddr = addr + h.PreApplyGen = gen + h.PreApplyAction = action + h.PreApplyPriorState = priorState + h.PreApplyPlannedState = plannedNewState + return h.PreApplyReturn, h.PreApplyError +} + +func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostApplyCalled = true + h.PostApplyAddr = addr + h.PostApplyGen = gen + h.PostApplyNewState = newState + h.PostApplyError = err + + if h.PostApplyFn != nil { + return h.PostApplyFn(addr, gen, newState, err) + } + + return h.PostApplyReturn, h.PostApplyReturnError +} + +func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreDiffCalled = true + h.PreDiffAddr = addr + h.PreDiffGen = gen + h.PreDiffPriorState = priorState + h.PreDiffProposedState = proposedNewState + return h.PreDiffReturn, h.PreDiffError +} + +func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostDiffCalled = true + h.PostDiffAddr = addr + h.PostDiffGen = gen + h.PostDiffAction = action + h.PostDiffPriorState = priorState + h.PostDiffPlannedState = plannedNewState + return h.PostDiffReturn, h.PostDiffError +} + +func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionInstanceCalled = true + h.PreProvisionInstanceAddr = addr + h.PreProvisionInstanceState = state + return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError +} + +func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionInstanceCalled = true + h.PostProvisionInstanceAddr = addr + h.PostProvisionInstanceState = state + return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError +} + +func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionInstanceStepCalled = true + h.PreProvisionInstanceStepAddr = addr + h.PreProvisionInstanceStepProvisionerType = typeName + return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError +} + +func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionInstanceStepCalled = true + h.PostProvisionInstanceStepAddr = addr + h.PostProvisionInstanceStepProvisionerType = typeName + h.PostProvisionInstanceStepErrorArg = err + return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError +} + +func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { + h.Lock() + defer h.Unlock() + + h.ProvisionOutputCalled = true + h.ProvisionOutputAddr = addr + h.ProvisionOutputProvisionerType = typeName + h.ProvisionOutputMessage = line +} + +func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreRefreshCalled = true + h.PreRefreshAddr = addr + h.PreRefreshGen = gen + h.PreRefreshPriorState = priorState + return h.PreRefreshReturn, h.PreRefreshError +} + +func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostRefreshCalled = true + h.PostRefreshAddr = addr + h.PostRefreshPriorState = priorState + h.PostRefreshNewState = newState + return h.PostRefreshReturn, h.PostRefreshError +} + +func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreImportStateCalled = true + h.PreImportStateAddr = addr + h.PreImportStateID = importID + return h.PreImportStateReturn, h.PreImportStateError +} + +func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostImportStateCalled = true + h.PostImportStateAddr = addr + h.PostImportStateNewStates = imported + return h.PostImportStateReturn, h.PostImportStateError +} + +func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostStateUpdateCalled = true + h.PostStateUpdateState = new + return h.PostStateUpdateReturn, h.PostStateUpdateError +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go new file mode 100644 index 00000000000..42c3d20cb94 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go @@ -0,0 +1,100 @@ +package terraform + +import ( + "sync/atomic" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// stopHook is a private Hook implementation that Terraform uses to +// signal when to stop or cancel actions. +type stopHook struct { + stop uint32 +} + +var _ Hook = (*stopHook)(nil) + +func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { +} + +func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) hook() (HookAction, error) { + if h.Stopped() { + // FIXME: This should really return an error since stopping partway + // through is not a successful run-to-completion, but we'll need to + // introduce that cautiously since existing automation solutions may + // be depending on this behavior. + return HookActionHalt, nil + } + + return HookActionContinue, nil +} + +// reset should be called within the lock context +func (h *stopHook) Reset() { + atomic.StoreUint32(&h.stop, 0) +} + +func (h *stopHook) Stop() { + atomic.StoreUint32(&h.stop, 1) +} + +func (h *stopHook) Stopped() bool { + return atomic.LoadUint32(&h.stop) == 1 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go new file mode 100644 index 00000000000..375a8638a88 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go @@ -0,0 +1,13 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=InstanceType instancetype.go + +// InstanceType is an enum of the various types of instances store in the State +type InstanceType int + +const ( + TypeInvalid InstanceType = iota + TypePrimary + TypeTainted + TypeDeposed +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go new file mode 100644 index 00000000000..95b7a9802e5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypePrimary-1] + _ = x[TypeTainted-2] + _ = x[TypeDeposed-3] +} + +const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" + +var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i InstanceType) String() string { + if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { + return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go new file mode 100644 index 00000000000..f1434e62520 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go @@ -0,0 +1,202 @@ +package terraform + +import ( + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// ConfigTreeDependencies returns the dependencies of the tree of modules +// described by the given configuration and state. +// +// Both configuration and state are required because there can be resources +// implied by instances in the state that no longer exist in config. +func ConfigTreeDependencies(root *configs.Config, state *states.State) *moduledeps.Module { + // First we walk the configuration tree to build the overall structure + // and capture the explicit/implicit/inherited provider dependencies. + deps := configTreeConfigDependencies(root, nil) + + // Next we walk over the resources in the state to catch any additional + // dependencies created by existing resources that are no longer in config. + // Most things we find in state will already be present in 'deps', but + // we're interested in the rare thing that isn't. + configTreeMergeStateDependencies(deps, state) + + return deps +} + +func configTreeConfigDependencies(root *configs.Config, inheritProviders map[string]*configs.Provider) *moduledeps.Module { + if root == nil { + // If no config is provided, we'll make a synthetic root. + // This isn't necessarily correct if we're called with a nil that + // *isn't* at the root, but in practice that can never happen. + return &moduledeps.Module{ + Name: "root", + Providers: make(moduledeps.Providers), + } + } + + name := "root" + if len(root.Path) != 0 { + name = root.Path[len(root.Path)-1] + } + + ret := &moduledeps.Module{ + Name: name, + } + + module := root.Module + + // Provider dependencies + { + providers := make(moduledeps.Providers) + + // The main way to declare a provider dependency is explicitly inside + // the "terraform" block, which allows declaring a requirement without + // also creating a configuration. + for fullName, constraints := range module.ProviderRequirements { + inst := moduledeps.ProviderInstance(fullName) + + // The handling here is a bit fiddly because the moduledeps package + // was designed around the legacy (pre-0.12) configuration model + // and hasn't yet been revised to handle the new model. As a result, + // we need to do some translation here. + // FIXME: Eventually we should adjust the underlying model so we + // can also retain the source location of each constraint, for + // more informative output from the "terraform providers" command. + var rawConstraints version.Constraints + for _, constraint := range constraints { + rawConstraints = append(rawConstraints, constraint.Required...) + } + discoConstraints := discovery.NewConstraints(rawConstraints) + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discoConstraints, + Reason: moduledeps.ProviderDependencyExplicit, + } + } + + // Provider configurations can also include version constraints, + // allowing for more terse declaration in situations where both a + // configuration and a constraint are defined in the same module. + for fullName, pCfg := range module.ProviderConfigs { + inst := moduledeps.ProviderInstance(fullName) + discoConstraints := discovery.AllVersions + if pCfg.Version.Required != nil { + discoConstraints = discovery.NewConstraints(pCfg.Version.Required) + } + if existing, exists := providers[inst]; exists { + existing.Constraints = existing.Constraints.Append(discoConstraints) + } else { + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discoConstraints, + Reason: moduledeps.ProviderDependencyExplicit, + } + } + } + + // Each resource in the configuration creates an *implicit* provider + // dependency, though we'll only record it if there isn't already + // an explicit dependency on the same provider. + for _, rc := range module.ManagedResources { + addr := rc.ProviderConfigAddr() + inst := moduledeps.ProviderInstance(addr.StringCompact()) + if _, exists := providers[inst]; exists { + // Explicit dependency already present + continue + } + + reason := moduledeps.ProviderDependencyImplicit + if _, inherited := inheritProviders[addr.StringCompact()]; inherited { + reason = moduledeps.ProviderDependencyInherited + } + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: reason, + } + } + for _, rc := range module.DataResources { + addr := rc.ProviderConfigAddr() + inst := moduledeps.ProviderInstance(addr.StringCompact()) + if _, exists := providers[inst]; exists { + // Explicit dependency already present + continue + } + + reason := moduledeps.ProviderDependencyImplicit + if _, inherited := inheritProviders[addr.String()]; inherited { + reason = moduledeps.ProviderDependencyInherited + } + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: reason, + } + } + + ret.Providers = providers + } + + childInherit := make(map[string]*configs.Provider) + for k, v := range inheritProviders { + childInherit[k] = v + } + for k, v := range module.ProviderConfigs { + childInherit[k] = v + } + for _, c := range root.Children { + ret.Children = append(ret.Children, configTreeConfigDependencies(c, childInherit)) + } + + return ret +} + +func configTreeMergeStateDependencies(root *moduledeps.Module, state *states.State) { + if state == nil { + return + } + + findModule := func(path addrs.ModuleInstance) *moduledeps.Module { + module := root + for _, step := range path { + var next *moduledeps.Module + for _, cm := range module.Children { + if cm.Name == step.Name { + next = cm + break + } + } + + if next == nil { + // If we didn't find a next node, we'll need to make one + next = &moduledeps.Module{ + Name: step.Name, + Providers: make(moduledeps.Providers), + } + module.Children = append(module.Children, next) + } + + module = next + } + return module + } + + for _, ms := range state.Modules { + module := findModule(ms.Addr) + + for _, rs := range ms.Resources { + inst := moduledeps.ProviderInstance(rs.ProviderConfig.ProviderConfig.StringCompact()) + if _, exists := module.Providers[inst]; !exists { + module.Providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: moduledeps.ProviderDependencyFromState, + } + } + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go new file mode 100644 index 00000000000..acd8262b066 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go @@ -0,0 +1,22 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// NodeCountBoundary fixes up any transitions between "each modes" in objects +// saved in state, such as switching from NoEach to EachInt. +type NodeCountBoundary struct { + Config *configs.Config +} + +func (n *NodeCountBoundary) Name() string { + return "meta.count-boundary (EachMode fixup)" +} + +// GraphNodeEvalable +func (n *NodeCountBoundary) EvalTree() EvalNode { + return &EvalCountFixZeroOneBoundaryGlobal{ + Config: n.Config, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go new file mode 100644 index 00000000000..56a33bce2c6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": +// it is ready to be destroyed. +type NodeDestroyableDataResourceInstance struct { + *NodeAbstractResourceInstance +} + +// GraphNodeEvalable +func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var providerSchema *ProviderSchema + // We don't need the provider, but we're calling EvalGetProvider to load the + // schema. + var provider providers.Interface + + // Just destroy it. + var state *states.ResourceInstanceObject + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalWriteState{ + Addr: addr.Resource, + State: &state, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + }, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go new file mode 100644 index 00000000000..60bcdb74c63 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go @@ -0,0 +1,228 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// NodeRefreshableDataResource represents a resource that is "refreshable". +type NodeRefreshableDataResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil) + _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil) + _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil) + _ GraphNodeResource = (*NodeRefreshableDataResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil) +) + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + count, countKnown, countDiags := evaluateResourceCountExpressionKnown(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() + } + if !countKnown { + // If the count isn't known yet, we'll skip refreshing and try expansion + // again during the plan walk. + return nil, nil + } + + forEachMap, forEachKnown, forEachDiags := evaluateResourceForEachExpressionKnown(n.Config.ForEach, ctx) + if forEachDiags.HasErrors() { + return nil, diags.Err() + } + if !forEachKnown { + // If the for_each isn't known yet, we'll skip refreshing and try expansion + // again during the plan walk. + return nil, nil + } + + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeRefreshableDataResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + // We also need a destroyable resource for orphans that are a result of a + // scaled-in count. + concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and provider since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeDestroyableDataResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Schema: n.Schema, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans. As these are orphaned refresh nodes, we add them + // directly as NodeDestroyableDataResource. + &OrphanResourceCountTransformer{ + Concrete: concreteResourceDestroyable, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableDataResource", + } + + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() +} + +// NodeRefreshableDataResourceInstance represents a single resource instance +// that is refreshable. +type NodeRefreshableDataResourceInstance struct { + *NodeAbstractResourceInstance +} + +// GraphNodeEvalable +func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // These variables are the state for the eval sequence below, and are + // updated through pointers. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var configVal cty.Value + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Always destroy the existing state first, since we must + // make sure that values from a previous read will not + // get interpolated if we end up needing to defer our + // loading until apply time. + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, // a pointer to nil, here + ProviderSchema: &providerSchema, + }, + + // EvalReadData will _attempt_ to read the data source, but may + // generate an incomplete planned object if the configuration + // includes values that won't be known until apply. + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + OutputChange: &change, + OutputConfigValue: &configVal, + OutputState: &state, + // If the config explicitly has a depends_on for this data + // source, assume the intention is to prevent refreshing ahead + // of that dependency, and therefore we need to deal with this + // resource during the apply phase. We do that by forcing this + // read to result in a plan. + ForcePlanRead: len(n.Config.DependsOn) > 0, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return (*state).Status != states.ObjectPlanned, nil + }, + Then: &EvalSequence{ + Nodes: []EvalNode{ + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + &EvalUpdateStateHook{}, + }, + }, + Else: &EvalSequence{ + // We can't deal with this yet, so we'll repeat this step + // during the plan walk to produce a planned change to read + // this during the apply walk. However, we do still need to + // save the generated change and partial state so that + // results from it can be included in other data resources + // or provider configurations during the refresh walk. + // (The planned object we save in the state here will be + // pruned out at the end of the refresh walk, returning + // it back to being unset again for subsequent walks.) + Nodes: []EvalNode{ + &EvalWriteDiff{ + Addr: addr.Resource, + Change: &change, + ProviderSchema: &providerSchema, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + }, + }, + }, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go new file mode 100644 index 00000000000..38681d83dbb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go @@ -0,0 +1,70 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" +) + +// NodeLocal represents a named local value in a particular module. +// +// Local value nodes only have one operation, common to all walk types: +// evaluate the result and place it in state. +type NodeLocal struct { + Addr addrs.AbsLocalValue + Config *configs.Local +} + +var ( + _ GraphNodeSubPath = (*NodeLocal)(nil) + _ RemovableIfNotTargeted = (*NodeLocal)(nil) + _ GraphNodeReferenceable = (*NodeLocal)(nil) + _ GraphNodeReferencer = (*NodeLocal)(nil) + _ GraphNodeEvalable = (*NodeLocal)(nil) + _ dag.GraphNodeDotter = (*NodeLocal)(nil) +) + +func (n *NodeLocal) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeLocal) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeLocal) RemoveIfNotTargeted() bool { + return true +} + +// GraphNodeReferenceable +func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.LocalValue} +} + +// GraphNodeReferencer +func (n *NodeLocal) References() []*addrs.Reference { + refs, _ := lang.ReferencesInExpr(n.Config.Expr) + return appendResourceDestroyReferences(refs) +} + +// GraphNodeEvalable +func (n *NodeLocal) EvalTree() EvalNode { + return &EvalLocal{ + Addr: n.Addr.LocalValue, + Expr: n.Config.Expr, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go new file mode 100644 index 00000000000..441d2632ade --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go @@ -0,0 +1,81 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// NodeModuleRemoved represents a module that is no longer in the +// config. +type NodeModuleRemoved struct { + Addr addrs.ModuleInstance +} + +var ( + _ GraphNodeSubPath = (*NodeModuleRemoved)(nil) + _ GraphNodeEvalable = (*NodeModuleRemoved)(nil) + _ GraphNodeReferencer = (*NodeModuleRemoved)(nil) + _ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil) +) + +func (n *NodeModuleRemoved) Name() string { + return fmt.Sprintf("%s (removed)", n.Addr.String()) +} + +// GraphNodeSubPath +func (n *NodeModuleRemoved) Path() addrs.ModuleInstance { + return n.Addr +} + +// GraphNodeEvalable +func (n *NodeModuleRemoved) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalCheckModuleRemoved{ + Addr: n.Addr, + }, + } +} + +func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + // Our "References" implementation indicates that this node depends on + // the call to the module it represents, which implicitly depends on + // everything inside the module. That reference must therefore be + // interpreted in terms of our parent module. + return n.Addr, n.Addr.Parent() +} + +func (n *NodeModuleRemoved) References() []*addrs.Reference { + // We depend on the call to the module we represent, because that + // implicitly then depends on everything inside that module. + // Our ReferenceOutside implementation causes this to be interpreted + // within the parent module. + + _, call := n.Addr.CallInstance() + return []*addrs.Reference{ + { + Subject: call, + + // No source range here, because there's nothing reasonable for + // us to return. + }, + } +} + +// EvalCheckModuleRemoved is an EvalNode implementation that verifies that +// a module has been removed from the state as expected. +type EvalCheckModuleRemoved struct { + Addr addrs.ModuleInstance +} + +func (n *EvalCheckModuleRemoved) Eval(ctx EvalContext) (interface{}, error) { + mod := ctx.State().Module(n.Addr) + if mod != nil { + // If we get here then that indicates a bug either in the states + // module or in an earlier step of the graph walk, since we should've + // pruned out the module when the last resource was removed from it. + return nil, fmt.Errorf("leftover module %s in state that should have been removed; this is a bug in Terraform and should be reported", n.Addr) + } + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go new file mode 100644 index 00000000000..03653359cf1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go @@ -0,0 +1,142 @@ +package terraform + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/zclconf/go-cty/cty" +) + +// NodeApplyableModuleVariable represents a module variable input during +// the apply step. +type NodeApplyableModuleVariable struct { + Addr addrs.AbsInputVariableInstance + Config *configs.Variable // Config is the var in the config + Expr hcl.Expression // Expr is the value expression given in the call +} + +// Ensure that we are implementing all of the interfaces we think we are +// implementing. +var ( + _ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil) + _ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) +) + +func (n *NodeApplyableModuleVariable) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance { + // We execute in the parent scope (above our own module) because + // expressions in our value are resolved in that context. + return n.Addr.Module.Parent() +} + +// RemovableIfNotTargeted +func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeReferenceOutside implementation +func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + + // Module input variables have their value expressions defined in the + // context of their calling (parent) module, and so references from + // a node of this type should be resolved in the parent module instance. + referencePath = n.Addr.Module.Parent() + + // Input variables are _referenced_ from their own module, though. + selfPath = n.Addr.Module + + return // uses named return values +} + +// GraphNodeReferenceable +func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Variable} +} + +// GraphNodeReferencer +func (n *NodeApplyableModuleVariable) References() []*addrs.Reference { + + // If we have no value expression, we cannot depend on anything. + if n.Expr == nil { + return nil + } + + // Variables in the root don't depend on anything, because their values + // are gathered prior to the graph walk and recorded in the context. + if len(n.Addr.Module) == 0 { + return nil + } + + // Otherwise, we depend on anything referenced by our value expression. + // We ignore diagnostics here under the assumption that we'll re-eval + // all these things later and catch them then; for our purposes here, + // we only care about valid references. + // + // Due to our GraphNodeReferenceOutside implementation, the addresses + // returned by this function are interpreted in the _parent_ module from + // where our associated variable was declared, which is correct because + // our value expression is assigned within a "module" block in the parent + // module. + refs, _ := lang.ReferencesInExpr(n.Expr) + return refs +} + +// GraphNodeEvalable +func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { + // If we have no value, do nothing + if n.Expr == nil { + return &EvalNoop{} + } + + // Otherwise, interpolate the value of this variable and set it + // within the variables mapping. + vals := make(map[string]cty.Value) + + _, call := n.Addr.Module.CallInstance() + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, + walkDestroy, walkValidate}, + Node: &EvalModuleCallArgument{ + Addr: n.Addr.Variable, + Config: n.Config, + Expr: n.Expr, + Values: vals, + + IgnoreDiagnostics: false, + }, + }, + + &EvalSetModuleCallArguments{ + Module: call, + Values: vals, + }, + }, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeApplyableModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go new file mode 100644 index 00000000000..7530571234d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go @@ -0,0 +1,200 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" +) + +// NodeApplyableOutput represents an output that is "applyable": +// it is ready to be applied. +type NodeApplyableOutput struct { + Addr addrs.AbsOutputValue + Config *configs.Output // Config is the output in the config +} + +var ( + _ GraphNodeSubPath = (*NodeApplyableOutput)(nil) + _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil) + _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) + _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) + _ GraphNodeEvalable = (*NodeApplyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) +) + +func (n *NodeApplyableOutput) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeTargetDownstream +func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + // If any of the direct dependencies of an output are targeted then + // the output must always be targeted as well, so its value will always + // be up-to-date at the completion of an apply walk. + return true +} + +func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) { + + // Output values have their expressions resolved in the context of the + // module where they are defined. + referencePath = addr.Module + + // ...but they are referenced in the context of their calling module. + selfPath = addr.Module.Parent() + + return // uses named return values + +} + +// GraphNodeReferenceOutside implementation +func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + return referenceOutsideForOutput(n.Addr) +} + +func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { + // An output in the root module can't be referenced at all. + if addr.Module.IsRoot() { + return nil + } + + // Otherwise, we can be referenced via a reference to our output name + // on the parent module's call, or via a reference to the entire call. + // e.g. module.foo.bar or just module.foo . + // Note that our ReferenceOutside method causes these addresses to be + // relative to the calling module, not the module where the output + // was declared. + _, outp := addr.ModuleCallOutput() + _, call := addr.Module.CallInstance() + return []addrs.Referenceable{outp, call} + +} + +// GraphNodeReferenceable +func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { + return referenceableAddrsForOutput(n.Addr) +} + +func referencesForOutput(c *configs.Output) []*addrs.Reference { + impRefs, _ := lang.ReferencesInExpr(c.Expr) + expRefs, _ := lang.References(c.DependsOn) + l := len(impRefs) + len(expRefs) + if l == 0 { + return nil + } + refs := make([]*addrs.Reference, 0, l) + refs = append(refs, impRefs...) + refs = append(refs, expRefs...) + return refs + +} + +// GraphNodeReferencer +func (n *NodeApplyableOutput) References() []*addrs.Reference { + return appendResourceDestroyReferences(referencesForOutput(n.Config)) +} + +// GraphNodeEvalable +func (n *NodeApplyableOutput) EvalTree() EvalNode { + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, + Node: &EvalWriteOutput{ + Addr: n.Addr.OutputValue, + Sensitive: n.Config.Sensitive, + Expr: n.Config.Expr, + }, + }, + }, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} + +// NodeDestroyableOutput represents an output that is "destroybale": +// its application will remove the output from the state. +type NodeDestroyableOutput struct { + Addr addrs.AbsOutputValue + Config *configs.Output // Config is the output in the config +} + +var ( + _ GraphNodeSubPath = (*NodeDestroyableOutput)(nil) + _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil) + _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil) + _ GraphNodeReferencer = (*NodeDestroyableOutput)(nil) + _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) +) + +func (n *NodeDestroyableOutput) Name() string { + return fmt.Sprintf("%s (destroy)", n.Addr.String()) +} + +// GraphNodeSubPath +func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// This will keep the destroy node in the graph if its corresponding output +// node is also in the destroy graph. +func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + return true +} + +// GraphNodeReferencer +func (n *NodeDestroyableOutput) References() []*addrs.Reference { + return referencesForOutput(n.Config) +} + +// GraphNodeEvalable +func (n *NodeDestroyableOutput) EvalTree() EvalNode { + return &EvalDeleteOutput{ + Addr: n.Addr.OutputValue, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go new file mode 100644 index 00000000000..a76d1742ce9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// NodeOutputOrphan represents an output that is an orphan. +type NodeOutputOrphan struct { + Addr addrs.AbsOutputValue +} + +var ( + _ GraphNodeSubPath = (*NodeOutputOrphan)(nil) + _ GraphNodeReferenceable = (*NodeOutputOrphan)(nil) + _ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil) + _ GraphNodeEvalable = (*NodeOutputOrphan)(nil) +) + +func (n *NodeOutputOrphan) Name() string { + return fmt.Sprintf("%s (orphan)", n.Addr.String()) +} + +// GraphNodeReferenceOutside implementation +func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + return referenceOutsideForOutput(n.Addr) +} + +// GraphNodeReferenceable +func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable { + return referenceableAddrsForOutput(n.Addr) +} + +// GraphNodeSubPath +func (n *NodeOutputOrphan) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeEvalable +func (n *NodeOutputOrphan) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalDeleteOutput{ + Addr: n.Addr.OutputValue, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go new file mode 100644 index 00000000000..2071ab168f8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go @@ -0,0 +1,11 @@ +package terraform + +// NodeApplyableProvider represents a provider during an apply. +type NodeApplyableProvider struct { + *NodeAbstractProvider +} + +// GraphNodeEvalable +func (n *NodeApplyableProvider) EvalTree() EvalNode { + return ProviderEvalTree(n, n.ProviderConfig()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go new file mode 100644 index 00000000000..afdd4741d2e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go @@ -0,0 +1,96 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// ConcreteProviderNodeFunc is a callback type used to convert an +// abstract provider to a concrete one of some type. +type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex + +// NodeAbstractProvider represents a provider that has no associated operations. +// It registers all the common interfaces across operations for providers. +type NodeAbstractProvider struct { + Addr addrs.AbsProviderConfig + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Config *configs.Provider + Schema *configschema.Block +} + +var ( + _ GraphNodeSubPath = (*NodeAbstractProvider)(nil) + _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil) + _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) + _ GraphNodeProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) +) + +func (n *NodeAbstractProvider) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeReferencer +func (n *NodeAbstractProvider) References() []*addrs.Reference { + if n.Config == nil || n.Schema == nil { + return nil + } + + return ReferencesFromConfig(n.Config.Config, n.Schema) +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.Addr +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { + if n.Config == nil { + return nil + } + + return n.Config +} + +// GraphNodeAttachProvider +func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { + n.Config = c +} + +// GraphNodeAttachProviderConfigSchema impl. +func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { + n.Schema = schema +} + +// GraphNodeDotter impl. +func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go new file mode 100644 index 00000000000..51335654bde --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go @@ -0,0 +1,27 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// NodeDisabledProvider represents a provider that is disabled. A disabled +// provider does nothing. It exists to properly set inheritance information +// for child providers. +type NodeDisabledProvider struct { + *NodeAbstractProvider +} + +var ( + _ GraphNodeSubPath = (*NodeDisabledProvider)(nil) + _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil) + _ GraphNodeReferencer = (*NodeDisabledProvider)(nil) + _ GraphNodeProvider = (*NodeDisabledProvider)(nil) + _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil) + _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil) +) + +func (n *NodeDisabledProvider) Name() string { + return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go new file mode 100644 index 00000000000..580e60cb7e4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go @@ -0,0 +1,20 @@ +package terraform + +// NodeEvalableProvider represents a provider during an "eval" walk. +// This special provider node type just initializes a provider and +// fetches its schema, without configuring it or otherwise interacting +// with it. +type NodeEvalableProvider struct { + *NodeAbstractProvider +} + +// GraphNodeEvalable +func (n *NodeEvalableProvider) EvalTree() EvalNode { + addr := n.Addr + relAddr := addr.ProviderConfig + + return &EvalInitProvider{ + TypeName: relAddr.Type, + Addr: addr.ProviderConfig, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go new file mode 100644 index 00000000000..573f030d702 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// NodeProvisioner represents a provider that has no associated operations. +// It registers all the common interfaces across operations for providers. +type NodeProvisioner struct { + NameValue string + PathValue addrs.ModuleInstance +} + +var ( + _ GraphNodeSubPath = (*NodeProvisioner)(nil) + _ GraphNodeProvisioner = (*NodeProvisioner)(nil) + _ GraphNodeEvalable = (*NodeProvisioner)(nil) +) + +func (n *NodeProvisioner) Name() string { + result := fmt.Sprintf("provisioner.%s", n.NameValue) + if len(n.PathValue) > 0 { + result = fmt.Sprintf("%s.%s", n.PathValue.String(), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeProvisioner) Path() addrs.ModuleInstance { + return n.PathValue +} + +// GraphNodeProvisioner +func (n *NodeProvisioner) ProvisionerName() string { + return n.NameValue +} + +// GraphNodeEvalable impl. +func (n *NodeProvisioner) EvalTree() EvalNode { + return &EvalInitProvisioner{Name: n.NameValue} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go new file mode 100644 index 00000000000..c7b0e3c8e75 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go @@ -0,0 +1,446 @@ +package terraform + +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ConcreteResourceNodeFunc is a callback type used to convert an +// abstract resource to a concrete one of some type. +type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex + +// GraphNodeResource is implemented by any nodes that represent a resource. +// The type of operation cannot be assumed, only that this node represents +// the given resource. +type GraphNodeResource interface { + ResourceAddr() addrs.AbsResource +} + +// ConcreteResourceInstanceNodeFunc is a callback type used to convert an +// abstract resource instance to a concrete one of some type. +type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex + +// GraphNodeResourceInstance is implemented by any nodes that represent +// a resource instance. A single resource may have multiple instances if, +// for example, the "count" or "for_each" argument is used for it in +// configuration. +type GraphNodeResourceInstance interface { + ResourceInstanceAddr() addrs.AbsResourceInstance +} + +// NodeAbstractResource represents a resource that has no associated +// operations. It registers all the interfaces for a resource that common +// across multiple operation types. +type NodeAbstractResource struct { + Addr addrs.AbsResource // Addr is the address for this resource + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Schema *configschema.Block // Schema for processing the configuration body + SchemaVersion uint64 // Schema version of "Schema", as decided by the provider + Config *configs.Resource // Config is the resource in the config + + ProvisionerSchemas map[string]*configschema.Block + + Targets []addrs.Targetable // Set from GraphNodeTargetable + + // The address of the provider this resource will use + ResolvedProvider addrs.AbsProviderConfig +} + +var ( + _ GraphNodeSubPath = (*NodeAbstractResource)(nil) + _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) + _ GraphNodeReferencer = (*NodeAbstractResource)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeResource = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) + _ GraphNodeTargetable = (*NodeAbstractResource)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) +) + +// NewNodeAbstractResource creates an abstract resource graph node for +// the given absolute resource address. +func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource { + return &NodeAbstractResource{ + Addr: addr, + } +} + +// NodeAbstractResourceInstance represents a resource instance with no +// associated operations. It embeds NodeAbstractResource but additionally +// contains an instance key, used to identify one of potentially many +// instances that were created from a resource in configuration, e.g. using +// the "count" or "for_each" arguments. +type NodeAbstractResourceInstance struct { + NodeAbstractResource + InstanceKey addrs.InstanceKey + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + ResourceState *states.Resource +} + +var ( + _ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeResource = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) +) + +// NewNodeAbstractResourceInstance creates an abstract resource instance graph +// node for the given absolute resource instance address. +func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { + // Due to the fact that we embed NodeAbstractResource, the given address + // actually ends up split between the resource address in the embedded + // object and the InstanceKey field in our own struct. The + // ResourceInstanceAddr method will stick these back together again on + // request. + return &NodeAbstractResourceInstance{ + NodeAbstractResource: NodeAbstractResource{ + Addr: addr.ContainingResource(), + }, + InstanceKey: addr.Resource.Key, + } +} + +func (n *NodeAbstractResource) Name() string { + return n.ResourceAddr().String() +} + +func (n *NodeAbstractResourceInstance) Name() string { + return n.ResourceInstanceAddr().String() +} + +// GraphNodeSubPath +func (n *NodeAbstractResource) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeReferenceable +func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Resource} +} + +// GraphNodeReferenceable +func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + addr := n.ResourceInstanceAddr() + return []addrs.Referenceable{ + addr.Resource, + + // A resource instance can also be referenced by the address of its + // containing resource, so that e.g. a reference to aws_instance.foo + // would match both aws_instance.foo[0] and aws_instance.foo[1]. + addr.ContainingResource().Resource, + } +} + +// GraphNodeReferencer +func (n *NodeAbstractResource) References() []*addrs.Reference { + // If we have a config then we prefer to use that. + if c := n.Config; c != nil { + var result []*addrs.Reference + + for _, traversal := range c.DependsOn { + ref, err := addrs.ParseRef(traversal) + if err != nil { + // We ignore this here, because this isn't a suitable place to return + // errors. This situation should be caught and rejected during + // validation. + log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, err) + continue + } + + result = append(result, ref) + } + + if n.Schema == nil { + // Should never happens, but we'll log if it does so that we can + // see this easily when debugging. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) + } + + refs, _ := lang.ReferencesInExpr(c.Count) + result = append(result, refs...) + refs, _ = lang.ReferencesInExpr(c.ForEach) + result = append(result, refs...) + refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) + result = append(result, refs...) + if c.Managed != nil { + for _, p := range c.Managed.Provisioners { + if p.When != configs.ProvisionerWhenCreate { + continue + } + if p.Connection != nil { + refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema) + result = append(result, refs...) + } + + schema := n.ProvisionerSchemas[p.Type] + if schema == nil { + log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) + } + refs, _ = lang.ReferencesInBlock(p.Config, schema) + result = append(result, refs...) + } + } + return result + } + + // Otherwise, we have no references. + return nil +} + +// GraphNodeReferencer +func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { + // If we have a configuration attached then we'll delegate to our + // embedded abstract resource, which knows how to extract dependencies + // from configuration. + if n.Config != nil { + if n.Schema == nil { + // We'll produce a log message about this out here so that + // we can include the full instance address, since the equivalent + // message in NodeAbstractResource.References cannot see it. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) + return nil + } + return n.NodeAbstractResource.References() + } + + // Otherwise, if we have state then we'll use the values stored in state + // as a fallback. + if rs := n.ResourceState; rs != nil { + if s := rs.Instance(n.InstanceKey); s != nil { + // State is still storing dependencies as old-style strings, so we'll + // need to do a little work here to massage this to the form we now + // want. + var result []*addrs.Reference + + // It is (apparently) possible for s.Current to be nil. This proved + // difficult to reproduce, so we will fix the symptom here and hope + // to find the root cause another time. + // + // https://github.com/hashicorp/terraform-plugin-sdk/issues/21407 + if s.Current == nil { + log.Printf("[WARN] no current state found for %s", n.Name()) + } else { + for _, addr := range s.Current.Dependencies { + if addr == nil { + // Should never happen; indicates a bug in the state loader + panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr())) + } + + // This is a little weird: we need to manufacture an addrs.Reference + // with a fake range here because the state isn't something we can + // make source references into. + result = append(result, &addrs.Reference{ + Subject: addr, + SourceRange: tfdiags.SourceRange{ + Filename: "(state file)", + }, + }) + } + } + return result + } + } + + // If we have neither config nor state then we have no references. + return nil +} + +// StateReferences returns the dependencies to put into the state for +// this resource. +func (n *NodeAbstractResourceInstance) StateReferences() []addrs.Referenceable { + selfAddrs := n.ReferenceableAddrs() + + // Since we don't include the source location references in our + // results from this method, we'll also filter out duplicates: + // there's no point in listing the same object twice without + // that additional context. + seen := map[string]struct{}{} + + // Pretend that we've already "seen" all of our own addresses so that we + // won't record self-references in the state. This can arise if, for + // example, a provisioner for a resource refers to the resource itself, + // which is valid (since provisioners always run after apply) but should + // not create an explicit dependency edge. + for _, selfAddr := range selfAddrs { + seen[selfAddr.String()] = struct{}{} + if riAddr, ok := selfAddr.(addrs.ResourceInstance); ok { + seen[riAddr.ContainingResource().String()] = struct{}{} + } + } + + depsRaw := n.References() + deps := make([]addrs.Referenceable, 0, len(depsRaw)) + for _, d := range depsRaw { + subj := d.Subject + if mco, isOutput := subj.(addrs.ModuleCallOutput); isOutput { + // For state dependencies, we simplify outputs to just refer + // to the module as a whole. It's not really clear why we do this, + // but this logic is preserved from before the 0.12 rewrite of + // this function. + subj = mco.Call + } + + k := subj.String() + if _, exists := seen[k]; exists { + continue + } + seen[k] = struct{}{} + switch tr := subj.(type) { + case addrs.ResourceInstance: + deps = append(deps, tr) + case addrs.Resource: + deps = append(deps, tr) + case addrs.ModuleCallInstance: + deps = append(deps, tr) + default: + // No other reference types are recorded in the state. + } + } + + // We'll also sort them, since that'll avoid creating changes in the + // serialized state that make no semantic difference. + sort.Slice(deps, func(i, j int) bool { + // Simple string-based sort because we just care about consistency, + // not user-friendliness. + return deps[i].String() < deps[j].String() + }) + + return deps +} + +func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { + n.ResolvedProvider = p +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // If we have a config we prefer that above all else + if n.Config != nil { + relAddr := n.Config.ProviderConfigAddr() + return relAddr.Absolute(n.Path()), false + } + + // Use our type and containing module path to guess a provider configuration address + return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Addr.Module), false +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // If we have a config we prefer that above all else + if n.Config != nil { + relAddr := n.Config.ProviderConfigAddr() + return relAddr.Absolute(n.Path()), false + } + + // If we have state, then we will use the provider from there + if n.ResourceState != nil { + // An address from the state must match exactly, since we must ensure + // we refresh/destroy a resource with the same provider configuration + // that created it. + return n.ResourceState.ProviderConfig, true + } + + // Use our type and containing module path to guess a provider configuration address + return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Path()), false +} + +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) ProvisionedBy() []string { + // If we have no configuration, then we have no provisioners + if n.Config == nil || n.Config.Managed == nil { + return nil + } + + // Build the list of provisioners we need based on the configuration. + // It is okay to have duplicates here. + result := make([]string, len(n.Config.Managed.Provisioners)) + for i, p := range n.Config.Managed.Provisioners { + result[i] = p.Type + } + + return result +} + +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { + if n.ProvisionerSchemas == nil { + n.ProvisionerSchemas = make(map[string]*configschema.Block) + } + n.ProvisionerSchemas[name] = schema +} + +// GraphNodeResource +func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource { + return n.Addr +} + +// GraphNodeResourceInstance +func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { + return n.NodeAbstractResource.Addr.Instance(n.InstanceKey) +} + +// GraphNodeAddressable, TODO: remove, used by target, should unify +func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { + return NewLegacyResourceAddress(n.Addr) +} + +// GraphNodeTargetable +func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { + n.Targets = targets +} + +// GraphNodeAttachResourceState +func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { + n.ResourceState = s +} + +// GraphNodeAttachResourceConfig +func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { + n.Config = c +} + +// GraphNodeAttachResourceSchema impl +func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { + n.Schema = schema + n.SchemaVersion = version +} + +// GraphNodeDotter impl. +func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "box", + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go new file mode 100644 index 00000000000..68d438d7bc4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go @@ -0,0 +1,71 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" +) + +// NodeApplyableResource represents a resource that is "applyable": +// it may need to have its record in the state adjusted to match configuration. +// +// Unlike in the plan walk, this resource node does not DynamicExpand. Instead, +// it should be inserted into the same graph as any instances of the nodes +// with dependency edges ensuring that the resource is evaluated before any +// of its instances, which will turn ensure that the whole-resource record +// in the state is suitably prepared to receive any updates to instances. +type NodeApplyableResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeResource = (*NodeApplyableResource)(nil) + _ GraphNodeEvalable = (*NodeApplyableResource)(nil) + _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil) + _ GraphNodeReferencer = (*NodeApplyableResource)(nil) +) + +func (n *NodeApplyableResource) Name() string { + return n.NodeAbstractResource.Name() + " (prepare state)" +} + +func (n *NodeApplyableResource) References() []*addrs.Reference { + if n.Config == nil { + log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n)) + return nil + } + + var result []*addrs.Reference + + // Since this node type only updates resource-level metadata, we only + // need to worry about the parts of the configuration that affect + // our "each mode": the count and for_each meta-arguments. + refs, _ := lang.ReferencesInExpr(n.Config.Count) + result = append(result, refs...) + refs, _ = lang.ReferencesInExpr(n.Config.ForEach) + result = append(result, refs...) + + return result +} + +// GraphNodeEvalable +func (n *NodeApplyableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + providerAddr := n.ResolvedProvider + + if config == nil { + // Nothing to do, then. + log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) + return &EvalNoop{} + } + + return &EvalWriteResourceState{ + Addr: addr.Resource, + Config: config, + ProviderAddr: providerAddr, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go new file mode 100644 index 00000000000..acdda45e407 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go @@ -0,0 +1,426 @@ +package terraform + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// NodeApplyableResourceInstance represents a resource instance that is +// "applyable": it is ready to be applied and is represented by a diff. +// +// This node is for a specific instance of a resource. It will usually be +// accompanied in the graph by a NodeApplyableResource representing its +// containing resource, and should depend on that node to ensure that the +// state is properly prepared to receive changes to instances. +type NodeApplyableResourceInstance struct { + *NodeAbstractResourceInstance + + destroyNode GraphNodeDestroyerCBD + graphNodeDeposer // implementation of GraphNodeDeposer +} + +var ( + _ GraphNodeResource = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil) +) + +// GraphNodeAttachDestroyer +func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) { + n.destroyNode = d +} + +// createBeforeDestroy checks this nodes config status and the status af any +// companion destroy node for CreateBeforeDestroy. +func (n *NodeApplyableResourceInstance) createBeforeDestroy() bool { + cbd := false + + if n.Config != nil && n.Config.Managed != nil { + cbd = n.Config.Managed.CreateBeforeDestroy + } + + if n.destroyNode != nil { + cbd = cbd || n.destroyNode.CreateBeforeDestroy() + } + + return cbd +} + +// GraphNodeCreator +func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeReferencer, overriding NodeAbstractResourceInstance +func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { + // Start with the usual resource instance implementation + ret := n.NodeAbstractResourceInstance.References() + + // Applying a resource must also depend on the destruction of any of its + // dependencies, since this may for example affect the outcome of + // evaluating an entire list of resources with "count" set (by reducing + // the count). + // + // However, we can't do this in create_before_destroy mode because that + // would create a dependency cycle. We make a compromise here of requiring + // changes to be updated across two applies in this case, since the first + // plan will use the old values. + if !n.createBeforeDestroy() { + for _, ref := range ret { + switch tr := ref.Subject.(type) { + case addrs.ResourceInstance: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + case addrs.Resource: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + } + } + } + + return ret +} + +// GraphNodeEvalable +func (n *NodeApplyableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + if n.Config == nil { + // This should not be possible, but we've got here in at least one + // case as discussed in the following issue: + // https://github.com/hashicorp/terraform-plugin-sdk/issues/21258 + // To avoid an outright crash here, we'll instead return an explicit + // error. + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource node has no configuration attached", + fmt.Sprintf( + "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!", + addr, + ), + )) + err := diags.Err() + return &EvalReturnError{ + Error: &err, + } + } + + // Eval info is different depending on what kind of resource this is + switch n.Config.Mode { + case addrs.ManagedResourceMode: + return n.evalTreeManagedResource(addr) + case addrs.DataResourceMode: + return n.evalTreeDataResource(addr) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + + // Stop early if we don't actually have a diff + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if change == nil { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + // In this particular call to EvalReadData we include our planned + // change, which signals that we expect this read to complete fully + // with no unknown values; it'll produce an error if not. + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Planned: &change, // setting this indicates that the result must be complete + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + OutputState: &state, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + // Clear the diff now that we've applied it, so + // later nodes won't see a diff that's now a no-op. + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: nil, + }, + + &EvalUpdateStateHook{}, + }, + } +} + +func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider providers.Interface + var providerSchema *ProviderSchema + var diff, diffApply *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var err error + var createNew bool + var createBeforeDestroyEnabled bool + var configVal cty.Value + var deposedKey states.DeposedKey + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &diffApply, + }, + + // We don't want to do any destroys + // (these are handled by NodeDestroyResourceInstance instead) + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil { + return true, EvalEarlyExitError{} + } + if diffApply.Action == plans.Delete { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + destroy := false + if diffApply != nil { + destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) + } + if destroy && n.createBeforeDestroy() { + createBeforeDestroyEnabled = true + } + return createBeforeDestroyEnabled, nil + }, + Then: &EvalDeposeState{ + Addr: addr.Resource, + ForceKey: n.PreallocatedDeposedKey, + OutputKey: &deposedKey, + }, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + // Get the saved diff + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &diff, + }, + + // Make a new diff, in case we've learned new values in the state + // during apply which we can now incorporate. + &EvalDiff{ + Addr: addr.Resource, + Config: n.Config, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + PreviousDiff: &diff, + OutputChange: &diffApply, + OutputValue: &configVal, + OutputState: &state, + }, + + // Compare the diffs + &EvalCheckPlannedChange{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Planned: &diff, + Actual: &diffApply, + }, + + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalReduceDiff{ + Addr: addr.Resource, + InChange: &diffApply, + Destroy: false, + OutChange: &diffApply, + }, + + // EvalReduceDiff may have simplified our planned change + // into a NoOp if it only requires destroying, since destroying + // is handled by NodeDestroyResourceInstance. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil || diffApply.Action == plans.NoOp { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + }, + &EvalApply{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + State: &state, + Change: &diffApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + CreateNew: &createNew, + }, + &EvalMaybeTainted{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + Error: &err, + StateOutput: &state, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyProvisioners{ + Addr: addr.Resource, + State: &state, // EvalApplyProvisioners will skip if already tainted + ResourceConfig: n.Config, + CreateNew: &createNew, + Error: &err, + When: configs.ProvisionerWhenCreate, + }, + &EvalMaybeTainted{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + Error: &err, + StateOutput: &state, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return createBeforeDestroyEnabled && err != nil, nil + }, + Then: &EvalMaybeRestoreDeposedObject{ + Addr: addr.Resource, + Key: &deposedKey, + }, + }, + + // We clear the diff out here so that future nodes + // don't see a diff that is already complete. There + // is no longer a diff! + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if !diff.Action.IsReplace() { + return true, nil + } + if !n.createBeforeDestroy() { + return true, nil + } + return false, nil + }, + Then: &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: nil, + }, + }, + + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go new file mode 100644 index 00000000000..049e5e99078 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go @@ -0,0 +1,321 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// NodeDestroyResourceInstance represents a resource instance that is to be +// destroyed. +type NodeDestroyResourceInstance struct { + *NodeAbstractResourceInstance + + // If DeposedKey is set to anything other than states.NotDeposed then + // this node destroys a deposed object of the associated instance + // rather than its current object. + DeposedKey states.DeposedKey + + CreateBeforeDestroyOverride *bool +} + +var ( + _ GraphNodeResource = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) +) + +func (n *NodeDestroyResourceInstance) Name() string { + if n.DeposedKey != states.NotDeposed { + return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) + } + return n.ResourceInstanceAddr().String() + " (destroy)" +} + +// GraphNodeDestroyer +func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { + if n.CreateBeforeDestroyOverride != nil { + return *n.CreateBeforeDestroyOverride + } + + // If we have no config, we just assume no + if n.Config == nil || n.Config.Managed == nil { + return false + } + + return n.Config.Managed.CreateBeforeDestroy +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { + n.CreateBeforeDestroyOverride = &v + return nil +} + +// GraphNodeReferenceable, overriding NodeAbstractResource +func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() + destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) + + phaseType := addrs.ResourceInstancePhaseDestroy + if n.CreateBeforeDestroy() { + phaseType = addrs.ResourceInstancePhaseDestroyCBD + } + + for i, normalAddr := range normalAddrs { + switch ta := normalAddr.(type) { + case addrs.Resource: + destroyAddrs[i] = ta.Phase(phaseType) + case addrs.ResourceInstance: + destroyAddrs[i] = ta.Phase(phaseType) + default: + destroyAddrs[i] = normalAddr + } + } + + return destroyAddrs +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { + // If we have a config, then we need to include destroy-time dependencies + if c := n.Config; c != nil && c.Managed != nil { + var result []*addrs.Reference + + // We include conn info and config for destroy time provisioners + // as dependencies that we have. + for _, p := range c.Managed.Provisioners { + schema := n.ProvisionerSchemas[p.Type] + + if p.When == configs.ProvisionerWhenDestroy { + if p.Connection != nil { + result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) + } + result = append(result, ReferencesFromConfig(p.Config, schema)...) + } + } + + return result + } + + return nil +} + +// GraphNodeEvalable +func (n *NodeDestroyResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Get our state + rs := n.ResourceState + var is *states.ResourceInstance + if rs != nil { + is = rs.Instance(n.InstanceKey) + } + if is == nil { + log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) + } + + var changeApply *plans.ResourceInstanceChange + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + var err error + return &EvalOpFilter{ + Ops: []walkOperation{walkApply, walkDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &changeApply, + }, + + &EvalReduceDiff{ + Addr: addr.Resource, + InChange: &changeApply, + Destroy: true, + OutChange: &changeApply, + }, + + // EvalReduceDiff may have simplified our planned change + // into a NoOp if it does not require destroying. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if changeApply == nil || changeApply.Action == plans.NoOp { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalReadState{ + Addr: addr.Resource, + Output: &state, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalRequireState{ + State: &state, + }, + + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &changeApply, + }, + + // Run destroy provisioners if not tainted + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if state != nil && state.Status == states.ObjectTainted { + return false, nil + } + + return true, nil + }, + + Then: &EvalApplyProvisioners{ + Addr: addr.Resource, + State: &state, + ResourceConfig: n.Config, + Error: &err, + When: configs.ProvisionerWhenDestroy, + }, + }, + + // If we have a provisioning error, then we just call + // the post-apply hook now. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return err != nil, nil + }, + + Then: &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + }, + + // Make sure we handle data sources properly. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil + }, + + Then: &EvalReadDataApply{ + Addr: addr.Resource, + Config: n.Config, + Change: &changeApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + }, + Else: &EvalApply{ + Addr: addr.Resource, + Config: nil, // No configuration because we are destroying + State: &state, + Change: &changeApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + }, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + }, + } +} + +// NodeDestroyResourceInstance represents a resource that is to be destroyed. +// +// Destroying a resource is a state-only operation: it is the individual +// instances being destroyed that affects remote objects. During graph +// construction, NodeDestroyResource should always depend on any other node +// related to the given resource, since it's just a final cleanup to avoid +// leaving skeleton resource objects in state after their instances have +// all been destroyed. +type NodeDestroyResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeResource = (*NodeDestroyResource)(nil) + _ GraphNodeReferenceable = (*NodeDestroyResource)(nil) + _ GraphNodeReferencer = (*NodeDestroyResource)(nil) + _ GraphNodeEvalable = (*NodeDestroyResource)(nil) +) + +func (n *NodeDestroyResource) Name() string { + return n.ResourceAddr().String() + " (clean up state)" +} + +// GraphNodeReferenceable, overriding NodeAbstractResource +func (n *NodeDestroyResource) ReferenceableAddrs() []addrs.Referenceable { + // NodeDestroyResource doesn't participate in references: the graph + // builder that created it should ensure directly that it already depends + // on every other node related to its resource, without relying on + // references. + return nil +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeDestroyResource) References() []*addrs.Reference { + // NodeDestroyResource doesn't participate in references: the graph + // builder that created it should ensure directly that it already depends + // on every other node related to its resource, without relying on + // references. + return nil +} + +// GraphNodeEvalable +func (n *NodeDestroyResource) EvalTree() EvalNode { + // This EvalNode will produce an error if the resource isn't already + // empty by the time it is called, since it should just be pruning the + // leftover husk of a resource in state after all of the child instances + // and their objects were destroyed. + return &EvalForgetResourceState{ + Addr: n.ResourceAddr().Resource, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go new file mode 100644 index 00000000000..269c7980804 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go @@ -0,0 +1,313 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert +// an abstract resource instance to a concrete one of some type that has +// an associated deposed object key. +type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex + +type GraphNodeDeposedResourceInstanceObject interface { + DeposedInstanceObjectKey() states.DeposedKey +} + +// NodePlanDeposedResourceInstanceObject represents deposed resource +// instance objects during plan. These are distinct from the primary object +// for each resource instance since the only valid operation to do with them +// is to destroy them. +// +// This node type is also used during the refresh walk to ensure that the +// record of a deposed object is up-to-date before we plan to destroy it. +type NodePlanDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeResource = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) +) + +func (n *NodePlanDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) +} + +func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeEvalable impl. +func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + + seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} + + // During the refresh walk we will ensure that our record of the deposed + // object is up-to-date. If it was already deleted outside of Terraform + // then this will remove it from state and thus avoid us planning a + // destroy for it during the subsequent plan walk. + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Key: n.DeposedKey, + Output: &state, + }, + &EvalRefresh{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, + }, + &EvalWriteStateDeposed{ + Addr: addr.Resource, + Key: n.DeposedKey, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + }, + }) + + // During the plan walk we always produce a planned destroy change, because + // destroying is the only supported action for deposed objects. + var change *plans.ResourceInstanceChange + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkPlan, walkPlanDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Output: &state, + Key: n.DeposedKey, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + DeposedKey: n.DeposedKey, + State: &state, + Output: &change, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + DeposedKey: n.DeposedKey, + ProviderSchema: &providerSchema, + Change: &change, + }, + // Since deposed objects cannot be referenced by expressions + // elsewhere, we don't need to also record the planned new + // state in this case. + }, + }, + }) + + return seq +} + +// NodeDestroyDeposedResourceInstanceObject represents deposed resource +// instance objects during apply. Nodes of this type are inserted by +// DiffTransformer when the planned changeset contains "delete" changes for +// deposed instance objects, and its only supported operation is to destroy +// and then forget the associated object. +type NodeDestroyDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) +) + +func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (destroy deposed %s)", n.Addr.String(), n.DeposedKey) +} + +func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeDestroyer +func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { + // A deposed instance is always CreateBeforeDestroy by definition, since + // we use deposed only to handle create-before-destroy. + return true +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { + if !v { + // Should never happen: deposed instances are _always_ create_before_destroy. + return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") + } + return nil +} + +// GraphNodeEvalable impl. +func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + var change *plans.ResourceInstanceChange + var err error + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Output: &state, + Key: n.DeposedKey, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + Output: &change, + }, + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &change, + }, + &EvalApply{ + Addr: addr.Resource, + Config: nil, // No configuration because we are destroying + State: &state, + Change: &change, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + }, + // Always write the resource back to the state deposed... if it + // was successfully destroyed it will be pruned. If it was not, it will + // be caught on the next run. + &EvalWriteStateDeposed{ + Addr: addr.Resource, + Key: n.DeposedKey, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalReturnError{ + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} + +// GraphNodeDeposer is an optional interface implemented by graph nodes that +// might create a single new deposed object for a specific associated resource +// instance, allowing a caller to optionally pre-allocate a DeposedKey for +// it. +type GraphNodeDeposer interface { + // SetPreallocatedDeposedKey will be called during graph construction + // if a particular node must use a pre-allocated deposed key if/when it + // "deposes" the current object of its associated resource instance. + SetPreallocatedDeposedKey(key states.DeposedKey) +} + +// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. +// Embed it in a node type to get automatic support for it, and then access +// the field PreallocatedDeposedKey to access any pre-allocated key. +type graphNodeDeposer struct { + PreallocatedDeposedKey states.DeposedKey +} + +func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { + n.PreallocatedDeposedKey = key +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go new file mode 100644 index 00000000000..2dc0df908e1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go @@ -0,0 +1,166 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// NodePlannableResource represents a resource that is "plannable": +// it is ready to be planned in order to create a diff. +type NodePlannableResource struct { + *NodeAbstractResource + + // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD + // during graph construction, if dependencies require us to force this + // on regardless of what the configuration says. + ForceCreateBeforeDestroy *bool +} + +var ( + _ GraphNodeSubPath = (*NodePlannableResource)(nil) + _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil) + _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil) + _ GraphNodeReferenceable = (*NodePlannableResource)(nil) + _ GraphNodeReferencer = (*NodePlannableResource)(nil) + _ GraphNodeResource = (*NodePlannableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil) +) + +// GraphNodeEvalable +func (n *NodePlannableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + + if config == nil { + // Nothing to do, then. + log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) + return &EvalNoop{} + } + + // this ensures we can reference the resource even if the count is 0 + return &EvalWriteResourceState{ + Addr: addr.Resource, + Config: config, + ProviderAddr: n.ResolvedProvider, + } +} + +// GraphNodeDestroyerCBD +func (n *NodePlannableResource) CreateBeforeDestroy() bool { + if n.ForceCreateBeforeDestroy != nil { + return *n.ForceCreateBeforeDestroy + } + + // If we have no config, we just assume no + if n.Config == nil || n.Config.Managed == nil { + return false + } + + return n.Config.Managed.CreateBeforeDestroy +} + +// GraphNodeDestroyerCBD +func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error { + n.ForceCreateBeforeDestroy = &v + return nil +} + +// GraphNodeDynamicExpandable +func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() + } + + forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + if forEachDiags.HasErrors() { + return nil, diags.Err() + } + + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas + + return &NodePlannableResourceInstance{ + NodeAbstractResourceInstance: a, + + // By the time we're walking, we've figured out whether we need + // to force on CreateBeforeDestroy due to dependencies on other + // nodes that have it. + ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), + } + } + + // The concrete resource factory we'll use for orphans + concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas + + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count or for_each (if present) + &ResourceCountTransformer{ + Concrete: concreteResource, + Schema: n.Schema, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + }, + + // Add the count/for_each orphans + &OrphanResourceCountTransformer{ + Concrete: concreteResourceOrphan, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodePlannableResource", + } + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go new file mode 100644 index 00000000000..2c3a7012b94 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go @@ -0,0 +1,88 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// NodePlanDestroyableResourceInstance represents a resource that is ready +// to be planned for destruction. +type NodePlanDestroyableResourceInstance struct { + *NodeAbstractResourceInstance +} + +var ( + _ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) +) + +// GraphNodeDestroyer +func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeEvalable +func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. These are written to by address in the EvalNodes we + // declare below. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + if n.ResolvedProvider.ProviderConfig.Type == "" { + // Should never happen; indicates that the graph was not constructed + // correctly since we didn't get our provider attached. + panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n))) + } + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + Output: &change, + }, + &EvalCheckPreventDestroy{ + Addr: addr.Resource, + Config: n.Config, + Change: &change, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go new file mode 100644 index 00000000000..ac4b24cf220 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go @@ -0,0 +1,201 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" +) + +// NodePlannableResourceInstance represents a _single_ resource +// instance that is plannable. This means this represents a single +// count index, for example. +type NodePlannableResourceInstance struct { + *NodeAbstractResourceInstance + ForceCreateBeforeDestroy bool +} + +var ( + _ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) + _ GraphNodeResource = (*NodePlannableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil) +) + +// GraphNodeEvalable +func (n *NodePlannableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Eval info is different depending on what kind of resource this is + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + return n.evalTreeManagedResource(addr) + case addrs.DataResourceMode: + return n.evalTreeDataResource(addr) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { + config := n.Config + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var configVal cty.Value + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + // If we already have a non-planned state then we already dealt + // with this during the refresh walk and so we have nothing to do + // here. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + depChanges := false + + // Check and see if any of our dependencies have changes. + changes := ctx.Changes() + for _, d := range n.StateReferences() { + ri, ok := d.(addrs.ResourceInstance) + if !ok { + continue + } + change := changes.GetResourceInstanceChange(ri.Absolute(ctx.Path()), states.CurrentGen) + if change != nil && change.Action != plans.NoOp { + depChanges = true + break + } + } + + refreshed := state != nil && state.Status != states.ObjectPlanned + + // If there are no dependency changes, and it's not a forced + // read because we there was no Refresh, then we don't need + // to re-read. If any dependencies have changes, it means + // our config may also have changes and we need to Read the + // data source again. + if !depChanges && refreshed { + return false, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalValidateSelfRef{ + Addr: addr.Resource, + Config: config.Config, + ProviderSchema: &providerSchema, + }, + + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + ForcePlanRead: true, // _always_ produce a Read change, even if the config seems ready + OutputChange: &change, + OutputValue: &configVal, + OutputState: &state, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + }, + } +} + +func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { + config := n.Config + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalValidateSelfRef{ + Addr: addr.Resource, + Config: config.Config, + ProviderSchema: &providerSchema, + }, + + &EvalDiff{ + Addr: addr.Resource, + Config: n.Config, + CreateBeforeDestroy: n.ForceCreateBeforeDestroy, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + OutputChange: &change, + OutputState: &state, + }, + &EvalCheckPreventDestroy{ + Addr: addr.Resource, + Config: n.Config, + Change: &change, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go new file mode 100644 index 00000000000..8e4f7148ff9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go @@ -0,0 +1,84 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodePlannableResourceInstanceOrphan struct { + *NodeAbstractResourceInstance +} + +var ( + _ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) +) + +var ( + _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) +) + +func (n *NodePlannableResourceInstanceOrphan) Name() string { + return n.ResourceInstanceAddr().String() + " (orphan)" +} + +// GraphNodeEvalable +func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var provider providers.Interface + var providerSchema *ProviderSchema + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + State: &state, + ProviderAddr: n.ResolvedProvider, + Output: &change, + OutputState: &state, // Will point to a nil state after this complete, signalling destroyed + }, + &EvalCheckPreventDestroy{ + Addr: addr.Resource, + Config: n.Config, + Change: &change, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go new file mode 100644 index 00000000000..dcab37270ca --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go @@ -0,0 +1,296 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// NodeRefreshableManagedResource represents a resource that is expanabled into +// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. +type NodeRefreshableManagedResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeResource = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil) +) + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() + } + + forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + if forEachDiags.HasErrors() { + return nil, diags.Err() + } + + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Schema: n.Schema, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans to make sure these resources are accounted for + // during a scale in. + &OrphanResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableManagedResource", + } + + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() +} + +// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodeRefreshableManagedResourceInstance struct { + *NodeAbstractResourceInstance +} + +var ( + _ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil) +) + +// GraphNodeDestroyer +func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeEvalable +func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Eval info is different depending on what kind of resource this is + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + if n.ResourceState == nil { + log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr) + return n.evalTreeManagedResourceNoState() + } + log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr) + return n.evalTreeManagedResource() + + case addrs.DataResourceMode: + // Get the data source node. If we don't have a configuration + // then it is an orphan so we destroy it (remove it from the state). + var dn GraphNodeEvalable + if n.Config != nil { + dn = &NodeRefreshableDataResourceInstance{ + NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, + } + } else { + dn = &NodeDestroyableDataResourceInstance{ + NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, + } + } + + return dn.EvalTree() + default: + panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode)) + } +} + +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + + // This happened during initial development. All known cases were + // fixed and tested but as a sanity check let's assert here. + if n.ResourceState == nil { + err := fmt.Errorf( + "No resource state attached for addr: %s\n\n"+ + "This is a bug. Please report this to Terraform with your configuration\n"+ + "and state attached. Please be careful to scrub any sensitive information.", + addr) + return &EvalReturnError{Error: &err} + } + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalRefresh{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + } +} + +// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource +// nodes that don't have state attached. An example of where this functionality +// is useful is when a resource that already exists in state is being scaled +// out, ie: has its resource count increased. In this case, the scaled out node +// needs to be available to other nodes (namely data sources) that may depend +// on it for proper interpolation, or confusing "index out of range" errors can +// occur. +// +// The steps in this sequence are very similar to the steps carried out in +// plan, but nothing is done with the diff after it is created - it is dropped, +// and its changes are not counted in the UI. +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalDiff{ + Addr: addr.Resource, + Config: n.Config, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + OutputChange: &change, + OutputState: &state, + Stub: true, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + // We must also save the planned change, so that expressions in + // other nodes, such as provider configurations and data resources, + // can work with the planned new value. + // + // This depends on the fact that Context.Refresh creates a + // temporary new empty changeset for the duration of its graph + // walk, and so this recorded change will be discarded immediately + // after the refresh walk completes. + &EvalWriteDiff{ + Addr: addr.Resource, + Change: &change, + ProviderSchema: &providerSchema, + }, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go new file mode 100644 index 00000000000..f0eb18a0655 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go @@ -0,0 +1,90 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/zclconf/go-cty/cty" +) + +// NodeValidatableResource represents a resource that is used for validation +// only. +type NodeValidatableResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeSubPath = (*NodeValidatableResource)(nil) + _ GraphNodeEvalable = (*NodeValidatableResource)(nil) + _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) + _ GraphNodeReferencer = (*NodeValidatableResource)(nil) + _ GraphNodeResource = (*NodeValidatableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) +) + +// GraphNodeEvalable +func (n *NodeValidatableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + + // Declare the variables will be used are used to pass values along + // the evaluation sequence below. These are written to via pointers + // passed to the EvalNodes. + var provider providers.Interface + var providerSchema *ProviderSchema + var configVal cty.Value + + seq := &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalValidateResource{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Config: config, + ConfigVal: &configVal, + }, + }, + } + + if managed := n.Config.Managed; managed != nil { + hasCount := n.Config.Count != nil + hasForEach := n.Config.ForEach != nil + + // Validate all the provisioners + for _, p := range managed.Provisioners { + var provisioner provisioners.Interface + var provisionerSchema *configschema.Block + + if p.Connection == nil { + p.Connection = config.Managed.Connection + } else if config.Managed.Connection != nil { + p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config) + } + + seq.Nodes = append( + seq.Nodes, + &EvalGetProvisioner{ + Name: p.Type, + Output: &provisioner, + Schema: &provisionerSchema, + }, + &EvalValidateProvisioner{ + ResourceAddr: addr.Resource, + Provisioner: &provisioner, + Schema: &provisionerSchema, + Config: p, + ResourceHasCount: hasCount, + ResourceHasForEach: hasForEach, + }, + ) + } + } + + return seq +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go new file mode 100644 index 00000000000..844d060c9f1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// NodeRootVariable represents a root variable input. +type NodeRootVariable struct { + Addr addrs.InputVariable + Config *configs.Variable +} + +var ( + _ GraphNodeSubPath = (*NodeRootVariable)(nil) + _ GraphNodeReferenceable = (*NodeRootVariable)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) +) + +func (n *NodeRootVariable) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeRootVariable) Path() addrs.ModuleInstance { + return addrs.RootModuleInstance +} + +// GraphNodeReferenceable +func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr} +} + +// dag.GraphNodeDotter impl. +func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go new file mode 100644 index 00000000000..19e3469cb35 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go @@ -0,0 +1,17 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// PathObjectCacheKey is like PathCacheKey but includes an additional name +// to be included in the key, for module-namespaced objects. +// +// The result of this function is guaranteed unique for any distinct pair +// of path and name, but is not guaranteed to be in any particular format +// and in particular should never be shown to end-users. +func PathObjectCacheKey(path addrs.ModuleInstance, objectName string) string { + return fmt.Sprintf("%s|%s", path.String(), objectName) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go new file mode 100644 index 00000000000..5c19f6e7cf8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go @@ -0,0 +1,94 @@ +package terraform + +import ( + "bytes" + "encoding/gob" + "fmt" + "io" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/zclconf/go-cty/cty" +) + +func init() { + gob.Register(make([]interface{}, 0)) + gob.Register(make([]map[string]interface{}, 0)) + gob.Register(make(map[string]interface{})) + gob.Register(make(map[string]string)) +} + +// Plan represents a single Terraform execution plan, which contains +// all the information necessary to make an infrastructure change. +// +// A plan has to contain basically the entire state of the world +// necessary to make a change: the state, diff, config, backend config, etc. +// This is so that it can run alone without any other data. +type Plan struct { + // Diff describes the resource actions that must be taken when this + // plan is applied. + Diff *Diff + + // Config represents the entire configuration that was present when this + // plan was created. + Config *configs.Config + + // State is the Terraform state that was current when this plan was + // created. + // + // It is not allowed to apply a plan that has a stale state, since its + // diff could be outdated. + State *State + + // Vars retains the variables that were set when creating the plan, so + // that the same variables can be applied during apply. + Vars map[string]cty.Value + + // Targets, if non-empty, contains a set of resource address strings that + // identify graph nodes that were selected as targets for plan. + // + // When targets are set, any graph node that is not directly targeted or + // indirectly targeted via dependencies is excluded from the graph. + Targets []string + + // TerraformVersion is the version of Terraform that was used to create + // this plan. + // + // It is not allowed to apply a plan created with a different version of + // Terraform, since the other fields of this structure may be interpreted + // in different ways between versions. + TerraformVersion string + + // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries + // used as plugins for each provider during plan. + // + // These must match between plan and apply to ensure that the diff is + // correctly interpreted, since different provider versions may have + // different attributes or attribute value constraints. + ProviderSHA256s map[string][]byte + + // Backend is the backend that this plan should use and store data with. + Backend *BackendState + + // Destroy indicates that this plan was created for a full destroy operation + Destroy bool +} + +func (p *Plan) String() string { + buf := new(bytes.Buffer) + buf.WriteString("DIFF:\n\n") + buf.WriteString(p.Diff.String()) + buf.WriteString("\n\nSTATE:\n\n") + buf.WriteString(p.State.String()) + return buf.String() +} + +// ReadPlan reads a plan structure out of a reader in the format that +// was written by WritePlan. +func ReadPlan(src io.Reader) (*Plan, error) { + return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead") +} + +// WritePlan writes a plan somewhere in a binary format. +func WritePlan(d *Plan, dst io.Writer) error { + return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go new file mode 100644 index 00000000000..7e401f33ebf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go @@ -0,0 +1,521 @@ +package terraform + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +var _ providers.Interface = (*MockProvider)(nil) + +// MockProvider implements providers.Interface but mocks out all the +// calls for testing purposes. +type MockProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests + + PrepareProviderConfigCalled bool + PrepareProviderConfigResponse providers.PrepareProviderConfigResponse + PrepareProviderConfigRequest providers.PrepareProviderConfigRequest + PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse + + ValidateResourceTypeConfigCalled bool + ValidateResourceTypeConfigTypeName string + ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse + ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest + ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse + + ValidateDataSourceConfigCalled bool + ValidateDataSourceConfigTypeName string + ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse + ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest + ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse + + UpgradeResourceStateCalled bool + UpgradeResourceStateTypeName string + UpgradeResourceStateResponse providers.UpgradeResourceStateResponse + UpgradeResourceStateRequest providers.UpgradeResourceStateRequest + UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse + + ConfigureCalled bool + ConfigureResponse providers.ConfigureResponse + ConfigureRequest providers.ConfigureRequest + ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below + + StopCalled bool + StopFn func() error + StopResponse error + + ReadResourceCalled bool + ReadResourceResponse providers.ReadResourceResponse + ReadResourceRequest providers.ReadResourceRequest + ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse + + PlanResourceChangeCalled bool + PlanResourceChangeResponse providers.PlanResourceChangeResponse + PlanResourceChangeRequest providers.PlanResourceChangeRequest + PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse + + ApplyResourceChangeCalled bool + ApplyResourceChangeResponse providers.ApplyResourceChangeResponse + ApplyResourceChangeRequest providers.ApplyResourceChangeRequest + ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse + + ImportResourceStateCalled bool + ImportResourceStateResponse providers.ImportResourceStateResponse + ImportResourceStateRequest providers.ImportResourceStateRequest + ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse + // Legacy return type for existing tests, which will be shimmed into an + // ImportResourceStateResponse if set + ImportStateReturn []*InstanceState + + ReadDataSourceCalled bool + ReadDataSourceResponse providers.ReadDataSourceResponse + ReadDataSourceRequest providers.ReadDataSourceRequest + ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse + + CloseCalled bool + CloseError error + + // Legacy callbacks: if these are set, we will shim incoming calls for + // new-style methods to these old-fashioned terraform.ResourceProvider + // mock callbacks, for the benefit of older tests that were written against + // the old mock API. + ValidateFn func(c *ResourceConfig) (ws []string, es []error) + ConfigureFn func(c *ResourceConfig) error + DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) + ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) +} + +func (p *MockProvider) GetSchema() providers.GetSchemaResponse { + p.Lock() + defer p.Unlock() + p.GetSchemaCalled = true + return p.getSchema() +} + +func (p *MockProvider) getSchema() providers.GetSchemaResponse { + // This version of getSchema doesn't do any locking, so it's suitable to + // call from other methods of this mock as long as they are already + // holding the lock. + + ret := providers.GetSchemaResponse{ + Provider: providers.Schema{}, + DataSources: map[string]providers.Schema{}, + ResourceTypes: map[string]providers.Schema{}, + } + if p.GetSchemaReturn != nil { + ret.Provider.Block = p.GetSchemaReturn.Provider + for n, s := range p.GetSchemaReturn.DataSources { + ret.DataSources[n] = providers.Schema{ + Block: s, + } + } + for n, s := range p.GetSchemaReturn.ResourceTypes { + ret.ResourceTypes[n] = providers.Schema{ + Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), + Block: s, + } + } + } + + return ret +} + +func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { + p.Lock() + defer p.Unlock() + + p.PrepareProviderConfigCalled = true + p.PrepareProviderConfigRequest = r + if p.PrepareProviderConfigFn != nil { + return p.PrepareProviderConfigFn(r) + } + return p.PrepareProviderConfigResponse +} + +func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateResourceTypeConfigCalled = true + p.ValidateResourceTypeConfigRequest = r + + if p.ValidateFn != nil { + resp := p.getSchema() + schema := resp.Provider.Block + rc := NewResourceConfigShimmed(r.Config, schema) + warns, errs := p.ValidateFn(rc) + ret := providers.ValidateResourceTypeConfigResponse{} + for _, warn := range warns { + ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + ret.Diagnostics = ret.Diagnostics.Append(err) + } + } + if p.ValidateResourceTypeConfigFn != nil { + return p.ValidateResourceTypeConfigFn(r) + } + + return p.ValidateResourceTypeConfigResponse +} + +func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceConfigCalled = true + p.ValidateDataSourceConfigRequest = r + + if p.ValidateDataSourceConfigFn != nil { + return p.ValidateDataSourceConfigFn(r) + } + + return p.ValidateDataSourceConfigResponse +} + +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + p.Lock() + defer p.Unlock() + + schemas := p.getSchema() + schema := schemas.ResourceTypes[r.TypeName] + schemaType := schema.Block.ImpliedType() + + p.UpgradeResourceStateCalled = true + p.UpgradeResourceStateRequest = r + + if p.UpgradeResourceStateFn != nil { + return p.UpgradeResourceStateFn(r) + } + + resp := p.UpgradeResourceStateResponse + + if resp.UpgradedState == cty.NilVal { + switch { + case r.RawStateFlatmap != nil: + v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + case len(r.RawStateJSON) > 0: + v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + } + } + return resp +} + +func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureRequest = r + + if p.ConfigureFn != nil { + resp := p.getSchema() + schema := resp.Provider.Block + rc := NewResourceConfigShimmed(r.Config, schema) + ret := providers.ConfigureResponse{} + + err := p.ConfigureFn(rc) + if err != nil { + ret.Diagnostics = ret.Diagnostics.Append(err) + } + return ret + } + if p.ConfigureNewFn != nil { + return p.ConfigureNewFn(r) + } + + return p.ConfigureResponse +} + +func (p *MockProvider) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provider itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadResourceCalled = true + p.ReadResourceRequest = r + + if p.ReadResourceFn != nil { + return p.ReadResourceFn(r) + } + + // make sure the NewState fits the schema + newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState) + if err != nil { + panic(err) + } + resp := p.ReadResourceResponse + resp.NewState = newState + + return resp +} + +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + p.Lock() + defer p.Unlock() + + p.PlanResourceChangeCalled = true + p.PlanResourceChangeRequest = r + + if p.DiffFn != nil { + ps := p.getSchema() + if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil { + return providers.PlanResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)), + } + } + schema := ps.ResourceTypes[r.TypeName].Block + info := &InstanceInfo{ + Type: r.TypeName, + } + priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0) + cfg := NewResourceConfigShimmed(r.Config, schema) + + legacyDiff, err := p.DiffFn(info, priorState, cfg) + + var res providers.PlanResourceChangeResponse + res.PlannedState = r.ProposedNewState + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + if legacyDiff != nil { + newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema) + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + + res.PlannedState = newVal + + var requiresNew []string + for attr, d := range legacyDiff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType()) + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + res.RequiresReplace = requiresReplace + } + return res + } + if p.PlanResourceChangeFn != nil { + return p.PlanResourceChangeFn(r) + } + + return p.PlanResourceChangeResponse +} + +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + p.Lock() + p.ApplyResourceChangeCalled = true + p.ApplyResourceChangeRequest = r + p.Unlock() + + if p.ApplyFn != nil { + // ApplyFn is a special callback fashioned after our old provider + // interface, which expected to be given an actual diff rather than + // separate old/new values to apply. Therefore we need to approximate + // a diff here well enough that _most_ of our legacy ApplyFns in old + // tests still see the behavior they are expecting. New tests should + // not use this, and should instead use ApplyResourceChangeFn directly. + providerSchema := p.getSchema() + schema, ok := providerSchema.ResourceTypes[r.TypeName] + if !ok { + return providers.ApplyResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)), + } + } + + info := &InstanceInfo{ + Type: r.TypeName, + } + + priorVal := r.PriorState + plannedVal := r.PlannedState + priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal) + plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal) + s := NewInstanceStateShimmedFromValue(priorVal, 0) + d := &InstanceDiff{ + Attributes: make(map[string]*ResourceAttrDiff), + } + if plannedMap == nil { // destroying, then + d.Destroy = true + // Destroy diffs don't have any attribute diffs + } else { + if priorMap == nil { // creating, then + // We'll just make an empty prior map to make things easier below. + priorMap = make(map[string]string) + } + + for k, new := range plannedMap { + old := priorMap[k] + newComputed := false + if new == hcl2shim.UnknownVariableValue { + new = "" + newComputed = true + } + d.Attributes[k] = &ResourceAttrDiff{ + Old: old, + New: new, + NewComputed: newComputed, + Type: DiffAttrInput, // not generally used in tests, so just hard-coded + } + } + // Also need any attributes that were removed in "planned" + for k, old := range priorMap { + if _, ok := plannedMap[k]; ok { + continue + } + d.Attributes[k] = &ResourceAttrDiff{ + Old: old, + NewRemoved: true, + Type: DiffAttrInput, + } + } + } + newState, err := p.ApplyFn(info, s, d) + resp := providers.ApplyResourceChangeResponse{} + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + if newState != nil { + var newVal cty.Value + if newState != nil { + var err error + newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + } else { + // If apply returned a nil new state then that's the old way to + // indicate that the object was destroyed. Our new interface calls + // for that to be signalled as a null value. + newVal = cty.NullVal(schema.Block.ImpliedType()) + } + resp.NewState = newVal + } + + return resp + } + if p.ApplyResourceChangeFn != nil { + return p.ApplyResourceChangeFn(r) + } + + return p.ApplyResourceChangeResponse +} + +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + p.Lock() + defer p.Unlock() + + if p.ImportStateReturn != nil { + for _, is := range p.ImportStateReturn { + if is.Attributes == nil { + is.Attributes = make(map[string]string) + } + is.Attributes["id"] = is.ID + + typeName := is.Ephemeral.Type + // Use the requested type if the resource has no type of it's own. + // We still return the empty type, which will error, but this prevents a panic. + if typeName == "" { + typeName = r.TypeName + } + + schema := p.GetSchemaReturn.ResourceTypes[typeName] + if schema == nil { + panic("no schema found for " + typeName) + } + + private, err := json.Marshal(is.Meta) + if err != nil { + panic(err) + } + + state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) + if err != nil { + panic(err) + } + + state, err = schema.CoerceValue(state) + if err != nil { + panic(err) + } + + p.ImportResourceStateResponse.ImportedResources = append( + p.ImportResourceStateResponse.ImportedResources, + providers.ImportedResource{ + TypeName: is.Ephemeral.Type, + State: state, + Private: private, + }) + } + } + + p.ImportResourceStateCalled = true + p.ImportResourceStateRequest = r + if p.ImportResourceStateFn != nil { + return p.ImportResourceStateFn(r) + } + + return p.ImportResourceStateResponse +} + +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadDataSourceCalled = true + p.ReadDataSourceRequest = r + + if p.ReadDataSourceFn != nil { + return p.ReadDataSourceFn(r) + } + + return p.ReadDataSourceResponse +} + +func (p *MockProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go new file mode 100644 index 00000000000..93b19be57d4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go @@ -0,0 +1,154 @@ +package terraform + +import ( + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" +) + +var _ provisioners.Interface = (*MockProvisioner)(nil) + +// MockProvisioner implements provisioners.Interface but mocks out all the +// calls for testing purposes. +type MockProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaResponse provisioners.GetSchemaResponse + + ValidateProvisionerConfigCalled bool + ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest + ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse + ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse + + ProvisionResourceCalled bool + ProvisionResourceRequest provisioners.ProvisionResourceRequest + ProvisionResourceResponse provisioners.ProvisionResourceResponse + ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse + + StopCalled bool + StopResponse error + StopFn func() error + + CloseCalled bool + CloseResponse error + CloseFn func() error + + // Legacy callbacks: if these are set, we will shim incoming calls for + // new-style methods to these old-fashioned terraform.ResourceProvider + // mock callbacks, for the benefit of older tests that were written against + // the old mock API. + ApplyFn func(rs *InstanceState, c *ResourceConfig) error +} + +func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + return p.getSchema() +} + +// getSchema is the implementation of GetSchema, which can be called from other +// methods on MockProvisioner that may already be holding the lock. +func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { + return p.GetSchemaResponse +} + +func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProvisionerConfigCalled = true + p.ValidateProvisionerConfigRequest = r + if p.ValidateProvisionerConfigFn != nil { + return p.ValidateProvisionerConfigFn(r) + } + return p.ValidateProvisionerConfigResponse +} + +func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { + p.Lock() + defer p.Unlock() + + p.ProvisionResourceCalled = true + p.ProvisionResourceRequest = r + if p.ApplyFn != nil { + if !r.Config.IsKnown() { + panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config)) + } + + schema := p.getSchema() + rc := NewResourceConfigShimmed(r.Config, schema.Provisioner) + connVal := r.Connection + connMap := map[string]string{} + + if !connVal.IsNull() && connVal.IsKnown() { + for it := connVal.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + + if !av.IsKnown() || av.IsNull() { + continue + } + + av, _ = convert.Convert(av, cty.String) + connMap[name] = av.AsString() + } + } + + // We no longer pass the full instance state to a provisioner, so we'll + // construct a partial one that should be good enough for what existing + // test mocks need. + is := &InstanceState{ + Ephemeral: EphemeralState{ + ConnInfo: connMap, + }, + } + var resp provisioners.ProvisionResourceResponse + err := p.ApplyFn(is, rc) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + return resp + } + if p.ProvisionResourceFn != nil { + fn := p.ProvisionResourceFn + p.Unlock() + return fn(r) + } + + return p.ProvisionResourceResponse +} + +func (p *MockProvisioner) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provisioner itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvisioner) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + if p.CloseFn != nil { + return p.CloseFn() + } + + return p.CloseResponse +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go new file mode 100644 index 00000000000..bd5774600ec --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go @@ -0,0 +1,510 @@ +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" +) + +// Resource is a legacy way to identify a particular resource instance. +// +// New code should use addrs.ResourceInstance instead. This is still here +// only for codepaths that haven't been updated yet. +type Resource struct { + // These are all used by the new EvalNode stuff. + Name string + Type string + CountIndex int + + // These aren't really used anymore anywhere, but we keep them around + // since we haven't done a proper cleanup yet. + Id string + Info *InstanceInfo + Config *ResourceConfig + Dependencies []string + Diff *InstanceDiff + Provider ResourceProvider + State *InstanceState + Flags ResourceFlag +} + +// NewResource constructs a legacy Resource object from an +// addrs.ResourceInstance value. +// +// This is provided to shim to old codepaths that haven't been updated away +// from this type yet. Since this old type is not able to represent instances +// that have string keys, this function will panic if given a resource address +// that has a string key. +func NewResource(addr addrs.ResourceInstance) *Resource { + ret := &Resource{ + Name: addr.Resource.Name, + Type: addr.Resource.Type, + } + + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + ret.CountIndex = int(tk) + default: + panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) + } + } + + return ret +} + +// ResourceKind specifies what kind of instance we're working with, whether +// its a primary instance, a tainted instance, or an orphan. +type ResourceFlag byte + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string +} + +// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. +// +// InstanceInfo is a legacy type, and uses of it should be gradually replaced +// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as +// appropriate. +// +// The legacy InstanceInfo type cannot represent module instances with instance +// keys, so this function will panic if given such a path. Uses of this type +// should all be removed or replaced before implementing "count" and "for_each" +// arguments on modules in order to avoid such panics. +// +// This legacy type also cannot represent resource instances with string +// instance keys. It will panic if the given key is not either NoKey or an +// IntKey. +func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { + // We need an old-style []string module path for InstanceInfo. + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + panic("NewInstanceInfo cannot convert module instance with key") + } + path[i] = step.Name + } + + // This is a funny old meaning of "id" that is no longer current. It should + // not be used for anything users might see. Note that it does not include + // a representation of the resource mode, and so it's impossible to + // determine from an InstanceInfo alone whether it is a managed or data + // resource that is being referred to. + id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) + if addr.Resource.Resource.Mode == addrs.DataResourceMode { + id = "data." + id + } + if addr.Resource.Key != addrs.NoKey { + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + id = id + fmt.Sprintf(".%d", int(k)) + default: + panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) + } + } + + return &InstanceInfo{ + Id: id, + ModulePath: path, + Type: addr.Resource.Resource.Type, + } +} + +// ResourceAddress returns the address of the resource that the receiver is describing. +func (i *InstanceInfo) ResourceAddress() *ResourceAddress { + // GROSS: for tainted and deposed instances, their status gets appended + // to i.Id to create a unique id for the graph node. Historically these + // ids were displayed to the user, so it's designed to be human-readable: + // "aws_instance.bar.0 (deposed #0)" + // + // So here we detect such suffixes and try to interpret them back to + // their original meaning so we can then produce a ResourceAddress + // with a suitable InstanceType. + id := i.Id + instanceType := TypeInvalid + if idx := strings.Index(id, " ("); idx != -1 { + remain := id[idx:] + id = id[:idx] + + switch { + case strings.Contains(remain, "tainted"): + instanceType = TypeTainted + case strings.Contains(remain, "deposed"): + instanceType = TypeDeposed + } + } + + addr, err := parseResourceAddressInternal(id) + if err != nil { + // should never happen, since that would indicate a bug in the + // code that constructed this InstanceInfo. + panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) + } + if len(i.ModulePath) > 1 { + addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied + } + if instanceType != TypeInvalid { + addr.InstanceTypeSet = true + addr.InstanceType = instanceType + } + return addr +} + +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} +} + +// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly +// the given value. +// +// The given value may contain hcl2shim.UnknownVariableValue to signal that +// something is computed, but it must not contain unprocessed interpolation +// sequences as we might've seen in Terraform v0.11 and prior. +func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { + v := hcl2shim.HCL2ValueFromConfigValue(raw) + + // This is a little weird but we round-trip the value through the hcl2shim + // package here for two reasons: firstly, because that reduces the risk + // of it including something unlike what NewResourceConfigShimmed would + // produce, and secondly because it creates a copy of "raw" just in case + // something is relying on the fact that in the old world the raw and + // config maps were always distinct, and thus you could in principle mutate + // one without affecting the other. (I sure hope nobody was doing that, though!) + cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) + + return &ResourceConfig{ + Raw: raw, + Config: cfg, + + ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), + } +} + +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// CheckSet checks that the given list of configuration keys is +// properly set. If not, errors are returned for each unset key. +// +// This is useful to be called in the Validate method of a ResourceProvider. +func (c *ResourceConfig) CheckSet(keys []string) []error { + var errs []error + + for _, k := range keys { + if !c.IsSet(k) { + errs = append(errs, fmt.Errorf("%s must be set", k)) + } + } + + return errs +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +// IsSet checks if the key in the configuration is set. A key is set if +// it has a value or the value is being computed (is unknown currently). +// +// This function should be used rather than checking the keys of the +// raw configuration itself, since a key may be omitted from the raw +// configuration if it is being computed. +func (c *ResourceConfig) IsSet(k string) bool { + if c == nil { + return false + } + + if c.IsComputed(k) { + return true + } + + if _, ok := c.Get(k); ok { + return true + } + + return false +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if int(i) < 0 || int(i) >= cv.Len() { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == hcl2shim.UnknownVariableValue { + w.Unknown = true + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go new file mode 100644 index 00000000000..8a683012d24 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go @@ -0,0 +1,618 @@ +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// ResourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type ResourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType InstanceType + InstanceTypeSet bool + Name string + Type string + Mode ResourceMode // significant only if InstanceTypeSet +} + +// Copy returns a copy of this ResourceAddress +func (r *ResourceAddress) Copy() *ResourceAddress { + if r == nil { + return nil + } + + n := &ResourceAddress{ + Path: make([]string, 0, len(r.Path)), + Index: r.Index, + InstanceType: r.InstanceType, + Name: r.Name, + Type: r.Type, + Mode: r.Mode, + } + + n.Path = append(n.Path, r.Path...) + + return n +} + +// String outputs the address that parses into this address. +func (r *ResourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case ManagedResourceMode: + // nothing to do + case DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case TypePrimary: + name += ".primary" + case TypeDeposed: + name += ".deposed" + case TypeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +// HasResourceSpec returns true if the address has a resource spec, as +// defined in the documentation: +// https://www.terraform.io/docs/internals/resource-addressing.html +// In particular, this returns false if the address contains only +// a module path, thus addressing the entire module. +func (r *ResourceAddress) HasResourceSpec() bool { + return r.Type != "" && r.Name != "" +} + +// WholeModuleAddress returns the resource address that refers to all +// resources in the same module as the receiver address. +func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { + return &ResourceAddress{ + Path: r.Path, + Index: -1, + InstanceTypeSet: false, + } +} + +// MatchesResourceConfig returns true if the receiver matches the given +// configuration resource within the given _static_ module path. Note that +// the module path in a resource address is a _dynamic_ module path, and +// multiple dynamic resource paths may map to a single static path if +// count and for_each are in use on module calls. +// +// Since resource configuration blocks represent all of the instances of +// a multi-instance resource, the index of the address (if any) is not +// considered. +func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { + if r.HasResourceSpec() { + // FIXME: Some ugliness while we are between worlds. Functionality + // in "addrs" should eventually replace this ResourceAddress idea + // completely, but for now we'll need to translate to the old + // way of representing resource modes. + switch r.Mode { + case ManagedResourceMode: + if rc.Mode != addrs.ManagedResourceMode { + return false + } + case DataResourceMode: + if rc.Mode != addrs.DataResourceMode { + return false + } + } + if r.Type != rc.Type || r.Name != rc.Name { + return false + } + } + + addrPath := r.Path + + // normalize + if len(addrPath) == 0 { + addrPath = nil + } + if len(path) == 0 { + path = nil + } + rawPath := []string(path) + return reflect.DeepEqual(addrPath, rawPath) +} + +// stateId returns the ID that this resource should be entered with +// in the state. This is also used for diffs. In the future, we'd like to +// move away from this string field so I don't export this. +func (r *ResourceAddress) stateId() string { + result := fmt.Sprintf("%s.%s", r.Type, r.Name) + switch r.Mode { + case ManagedResourceMode: + // Done + case DataResourceMode: + result = fmt.Sprintf("data.%s", result) + default: + panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) + } + if r.Index >= 0 { + result += fmt.Sprintf(".%d", r.Index) + } + + return result +} + +// parseResourceAddressInternal parses the somewhat bespoke resource +// identifier used in states and diffs, such as "instance.name.0". +func parseResourceAddressInternal(s string) (*ResourceAddress, error) { + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + mode := ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + mode = DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && mode != DataResourceMode { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + addr := &ResourceAddress{ + Type: parts[0], + Name: parts[1], + Index: -1, + InstanceType: TypePrimary, + Mode: mode, + } + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) + } + + addr.Index = int(idx) + } + + return addr, nil +} + +func ParseResourceAddress(s string) (*ResourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := ManagedResourceMode + if matches["data_prefix"] != "" { + mode = DataResourceMode + } + resourceIndex, err := ParseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := ParseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := ParseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) + } + + return &ResourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a +// resource name as described in a module diff. +// +// For historical reasons a different addressing format is used in this +// context. The internal format should not be shown in the UI and instead +// this function should be used to translate to a ResourceAddress and +// then, where appropriate, use the String method to produce a canonical +// resource address string for display in the UI. +// +// The given path slice must be empty (or nil) for the root module, and +// otherwise consist of a sequence of module names traversing down into +// the module tree. If a non-nil path is provided, the caller must not +// modify its underlying array after passing it to this function. +func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { + addr, err := parseResourceAddressInternal(key) + if err != nil { + return nil, err + } + addr.Path = path + return addr, nil +} + +// NewLegacyResourceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Type, + Name: addr.Resource.Name, + } + + switch addr.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + ret.Index = -1 + + return ret +} + +// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Resource.Type, + Name: addr.Resource.Resource.Name, + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + + if addr.Resource.Key == addrs.NoKey { + ret.Index = -1 + } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { + ret.Index = int(ik) + } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok { + ret.Index = -1 + } else { + panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) + } + + return ret +} + +// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to +// the new resource address type addrs.AbsResourceInstance. +// +// This method can be used only on an address that has a resource specification. +// It will panic if called on a module-path-only ResourceAddress. Use +// method HasResourceSpec to check before calling, in contexts where it is +// unclear. +// +// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" +// states, and so if these are present on the receiver then they are discarded. +// +// This is provided for shimming purposes so that we can easily adapt functions +// that are returning the legacy ResourceAddress type, for situations where +// the new type is required. +func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { + if !addr.HasResourceSpec() { + panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") + } + + ret := addrs.AbsResourceInstance{ + Module: addr.ModuleInstanceAddr(), + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Type: addr.Type, + Name: addr.Name, + }, + }, + } + + switch addr.Mode { + case ManagedResourceMode: + ret.Resource.Resource.Mode = addrs.ManagedResourceMode + case DataResourceMode: + ret.Resource.Resource.Mode = addrs.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) + } + + if addr.Index != -1 { + ret.Resource.Key = addrs.IntKey(addr.Index) + } + + return ret +} + +// ModuleInstanceAddr returns the module path portion of the receiver as a +// addrs.ModuleInstance value. +func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { + path := make(addrs.ModuleInstance, len(addr.Path)) + for i, name := range addr.Path { + path[i] = addrs.ModuleInstanceStep{Name: name} + } + return path +} + +// Contains returns true if and only if the given node is contained within +// the receiver. +// +// Containment is defined in terms of the module and resource heirarchy: +// a resource is contained within its module and any ancestor modules, +// an indexed resource instance is contained with the unindexed resource, etc. +func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { + ourPath := addr.Path + givenPath := other.Path + if len(givenPath) < len(ourPath) { + return false + } + for i := range ourPath { + if ourPath[i] != givenPath[i] { + return false + } + } + + // If the receiver is a whole-module address then the path prefix + // matching is all we need. + if !addr.HasResourceSpec() { + return true + } + + if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { + return false + } + + if addr.Index != -1 && addr.Index != other.Index { + return false + } + + if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { + return false + } + + return true +} + +// Equals returns true if the receiver matches the given address. +// +// The name of this method is a misnomer, since it doesn't test for exact +// equality. Instead, it tests that the _specified_ parts of each +// address match, treating any unspecified parts as wildcards. +// +// See also Contains, which takes a more heirarchical approach to comparing +// addresses. +func (addr *ResourceAddress) Equals(raw interface{}) bool { + other, ok := raw.(*ResourceAddress) + if !ok { + return false + } + + pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || + reflect.DeepEqual(addr.Path, other.Path) + + indexMatch := addr.Index == -1 || + other.Index == -1 || + addr.Index == other.Index + + nameMatch := addr.Name == "" || + other.Name == "" || + addr.Name == other.Name + + typeMatch := addr.Type == "" || + other.Type == "" || + addr.Type == other.Type + + // mode is significant only when type is set + modeMatch := addr.Type == "" || + other.Type == "" || + addr.Mode == other.Mode + + return pathMatch && + indexMatch && + addr.InstanceType == other.InstanceType && + nameMatch && + typeMatch && + modeMatch +} + +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *ResourceAddress) Less(other *ResourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + +func ParseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func ParseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func ParseInstanceType(s string) (InstanceType, error) { + switch s { + case "", "primary": + return TypePrimary, nil + case "deposed": + return TypeDeposed, nil + case "tainted": + return TypeTainted, nil + default: + return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.(?P[^.]+)\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("invalid resource address %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go new file mode 100644 index 00000000000..c83643a65ca --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go @@ -0,0 +1,12 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go + +// ResourceMode is deprecated, use addrs.ResourceMode instead. +// It has been preserved for backwards compatibility. +type ResourceMode int + +const ( + ManagedResourceMode ResourceMode = iota + DataResourceMode +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go new file mode 100644 index 00000000000..ba84346a218 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ManagedResourceMode-0] + _ = x[DataResourceMode-1] +} + +const _ResourceMode_name = "ManagedResourceModeDataResourceMode" + +var _ResourceMode_index = [...]uint8{0, 19, 35} + +func (i ResourceMode) String() string { + if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go new file mode 100644 index 00000000000..670bcba20bf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go @@ -0,0 +1,319 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" +) + +// ResourceProvider is an interface that must be implemented by any +// resource provider: the thing that creates and manages the resources in +// a Terraform configuration. +// +// Important implementation note: All returned pointers, such as +// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to +// shared data. Terraform is highly parallel and assumes that this data is safe +// to read/write in parallel so it must be unique references. Note that it is +// safe to return arguments as results, however. +type ResourceProvider interface { + /********************************************************************* + * Functions related to the provider + *********************************************************************/ + + // ProviderSchema returns the config schema for the main provider + // configuration, as would appear in a "provider" block in the + // configuration files. + // + // Currently not all providers support schema. Callers must therefore + // first call Resources and DataSources and ensure that at least one + // resource or data source has the SchemaAvailable flag set. + GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) + + // Input was used prior to v0.12 to ask the provider to prompt the user + // for input to complete the configuration. + // + // From v0.12 onwards this method is never called because Terraform Core + // is able to handle the necessary input logic itself based on the + // schema returned from GetSchema. + Input(UIInput, *ResourceConfig) (*ResourceConfig, error) + + // Validate is called once at the beginning with the raw configuration + // (no interpolation done) and can return a list of warnings and/or + // errors. + // + // This is called once with the provider configuration only. It may not + // be called at all if no provider configuration is given. + // + // This should not assume that any values of the configurations are valid. + // The primary use case of this call is to check that required keys are + // set. + Validate(*ResourceConfig) ([]string, []error) + + // Configure configures the provider itself with the configuration + // given. This is useful for setting things like access keys. + // + // This won't be called at all if no provider configuration is given. + // + // Configure returns an error if it occurred. + Configure(*ResourceConfig) error + + // Resources returns all the available resource types that this provider + // knows how to manage. + Resources() []ResourceType + + // Stop is called when the provider should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + /********************************************************************* + * Functions related to individual resources + *********************************************************************/ + + // ValidateResource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateResource(string, *ResourceConfig) ([]string, []error) + + // Apply applies a diff to a specific resource and returns the new + // resource state along with an error. + // + // If the resource state given has an empty ID, then a new resource + // is expected to be created. + Apply( + *InstanceInfo, + *InstanceState, + *InstanceDiff) (*InstanceState, error) + + // Diff diffs a resource versus a desired state and returns + // a diff. + Diff( + *InstanceInfo, + *InstanceState, + *ResourceConfig) (*InstanceDiff, error) + + // Refresh refreshes a resource and updates all of its attributes + // with the latest information. + Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) + + /********************************************************************* + * Functions related to importing + *********************************************************************/ + + // ImportState requests that the given resource be imported. + // + // The returned InstanceState only requires ID be set. Importing + // will always call Refresh after the state to complete it. + // + // IMPORTANT: InstanceState doesn't have the resource type attached + // to it. A type must be specified on the state via the Ephemeral + // field on the state. + // + // This function can return multiple states. Normally, an import + // will map 1:1 to a physical resource. However, some resources map + // to multiple. For example, an AWS security group may contain many rules. + // Each rule is represented by a separate resource in Terraform, + // therefore multiple states are returned. + ImportState(*InstanceInfo, string) ([]*InstanceState, error) + + /********************************************************************* + * Functions related to data resources + *********************************************************************/ + + // ValidateDataSource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per data source instance. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateDataSource(string, *ResourceConfig) ([]string, []error) + + // DataSources returns all of the available data sources that this + // provider implements. + DataSources() []DataSource + + // ReadDataDiff produces a diff that represents the state that will + // be produced when the given data source is read using a later call + // to ReadDataApply. + ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + + // ReadDataApply initializes a data instance using the configuration + // in a diff produced by ReadDataDiff. + ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) +} + +// ResourceProviderCloser is an interface that providers that can close +// connections that aren't needed anymore must implement. +type ResourceProviderCloser interface { + Close() error +} + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// ResourceProviderResolver is an interface implemented by objects that are +// able to resolve a given set of resource provider version constraints +// into ResourceProviderFactory callbacks. +type ResourceProviderResolver interface { + // Given a constraint map, return a ResourceProviderFactory for each + // requested provider. If some or all of the constraints cannot be + // satisfied, return a non-nil slice of errors describing the problems. + ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) +} + +// ResourceProviderResolverFunc wraps a callback function and turns it into +// a ResourceProviderResolver implementation, for convenience in situations +// where a function and its associated closure are sufficient as a resolver +// implementation. +type ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) + +// ResolveProviders implements ResourceProviderResolver by calling the +// wrapped function. +func (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { + return f(reqd) +} + +// ResourceProviderResolverFixed returns a ResourceProviderResolver that +// has a fixed set of provider factories provided by the caller. The returned +// resolver ignores version constraints entirely and just returns the given +// factory for each requested provider name. +// +// This function is primarily used in tests, to provide mock providers or +// in-process providers under test. +func ResourceProviderResolverFixed(factories map[string]ResourceProviderFactory) ResourceProviderResolver { + return ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { + ret := make(map[string]ResourceProviderFactory, len(reqd)) + var errs []error + for name := range reqd { + if factory, exists := factories[name]; exists { + ret[name] = factory + } else { + errs = append(errs, fmt.Errorf("provider %q is not available", name)) + } + } + return ret, errs + }) +} + +// ResourceProviderFactory is a function type that creates a new instance +// of a resource provider. +type ResourceProviderFactory func() (ResourceProvider, error) + +// ResourceProviderFactoryFixed is a helper that creates a +// ResourceProviderFactory that just returns some fixed provider. +func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { + return func() (ResourceProvider, error) { + return p, nil + } +} + +func ProviderHasResource(p ResourceProvider, n string) bool { + for _, rt := range p.Resources() { + if rt.Name == n { + return true + } + } + + return false +} + +func ProviderHasDataSource(p ResourceProvider, n string) bool { + for _, rt := range p.DataSources() { + if rt.Name == n { + return true + } + } + + return false +} + +// resourceProviderFactories matches available plugins to the given version +// requirements to produce a map of compatible provider plugins if possible, +// or an error if the currently-available plugins are insufficient. +// +// This should be called only with configurations that have passed calls +// to config.Validate(), which ensures that all of the given version +// constraints are valid. It will panic if any invalid constraints are present. +func resourceProviderFactories(resolver providers.Resolver, reqd discovery.PluginRequirements) (map[string]providers.Factory, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret, errs := resolver.ResolveProviders(reqd) + if errs != nil { + diags = diags.Append( + tfdiags.Sourceless(tfdiags.Error, + "Could not satisfy plugin requirements", + errPluginInit, + ), + ) + + for _, err := range errs { + diags = diags.Append(err) + } + + return nil, diags + } + + return ret, nil +} + +const errPluginInit = ` +Plugin reinitialization required. Please run "terraform init". + +Plugins are external binaries that Terraform uses to access and manipulate +resources. The configuration provided requires plugins which can't be located, +don't satisfy the version constraints, or are otherwise incompatible. + +Terraform automatically discovers provider requirements from your +configuration, including providers used in child modules. To see the +requirements and constraints from each module, run "terraform providers". +` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go new file mode 100644 index 00000000000..4000e3d2149 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go @@ -0,0 +1,315 @@ +package terraform + +import ( + "sync" +) + +// MockResourceProvider implements ResourceProvider but mocks out all the +// calls for testing purposes. +type MockResourceProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + CloseCalled bool + CloseError error + GetSchemaCalled bool + GetSchemaRequest *ProviderSchemaRequest + GetSchemaReturn *ProviderSchema + GetSchemaReturnError error + InputCalled bool + InputInput UIInput + InputConfig *ResourceConfig + InputReturnConfig *ResourceConfig + InputReturnError error + InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) + ApplyCalled bool + ApplyInfo *InstanceInfo + ApplyState *InstanceState + ApplyDiff *InstanceDiff + ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) + ApplyReturn *InstanceState + ApplyReturnError error + ConfigureCalled bool + ConfigureConfig *ResourceConfig + ConfigureFn func(*ResourceConfig) error + ConfigureReturnError error + DiffCalled bool + DiffInfo *InstanceInfo + DiffState *InstanceState + DiffDesired *ResourceConfig + DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) + DiffReturn *InstanceDiff + DiffReturnError error + RefreshCalled bool + RefreshInfo *InstanceInfo + RefreshState *InstanceState + RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) + RefreshReturn *InstanceState + RefreshReturnError error + ResourcesCalled bool + ResourcesReturn []ResourceType + ReadDataApplyCalled bool + ReadDataApplyInfo *InstanceInfo + ReadDataApplyDiff *InstanceDiff + ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) + ReadDataApplyReturn *InstanceState + ReadDataApplyReturnError error + ReadDataDiffCalled bool + ReadDataDiffInfo *InstanceInfo + ReadDataDiffDesired *ResourceConfig + ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + ReadDataDiffReturn *InstanceDiff + ReadDataDiffReturnError error + StopCalled bool + StopFn func() error + StopReturnError error + DataSourcesCalled bool + DataSourcesReturn []DataSource + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(*ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateResourceCalled bool + ValidateResourceType string + ValidateResourceConfig *ResourceConfig + ValidateResourceReturnWarns []string + ValidateResourceReturnErrors []error + ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateDataSourceCalled bool + ValidateDataSourceType string + ValidateDataSourceConfig *ResourceConfig + ValidateDataSourceReturnWarns []string + ValidateDataSourceReturnErrors []error + + ImportStateCalled bool + ImportStateInfo *InstanceInfo + ImportStateID string + ImportStateReturn []*InstanceState + ImportStateReturnError error + ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) +} + +func (p *MockResourceProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} + +func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + p.GetSchemaRequest = req + return p.GetSchemaReturn, p.GetSchemaReturnError +} + +func (p *MockResourceProvider) Input( + input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.Lock() + defer p.Unlock() + p.InputCalled = true + p.InputInput = input + p.InputConfig = c + if p.InputFn != nil { + return p.InputFn(input, c) + } + return p.InputReturnConfig, p.InputReturnError +} + +func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateResourceCalled = true + p.ValidateResourceType = t + p.ValidateResourceConfig = c + + if p.ValidateResourceFn != nil { + return p.ValidateResourceFn(t, c) + } + + return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors +} + +func (p *MockResourceProvider) Configure(c *ResourceConfig) error { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureConfig = c + + if p.ConfigureFn != nil { + return p.ConfigureFn(c) + } + + return p.ConfigureReturnError +} + +func (p *MockResourceProvider) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} + +func (p *MockResourceProvider) Apply( + info *InstanceInfo, + state *InstanceState, + diff *InstanceDiff) (*InstanceState, error) { + // We only lock while writing data. Reading is fine + p.Lock() + p.ApplyCalled = true + p.ApplyInfo = info + p.ApplyState = state + p.ApplyDiff = diff + p.Unlock() + + if p.ApplyFn != nil { + return p.ApplyFn(info, state, diff) + } + + return p.ApplyReturn.DeepCopy(), p.ApplyReturnError +} + +func (p *MockResourceProvider) Diff( + info *InstanceInfo, + state *InstanceState, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.DiffCalled = true + p.DiffInfo = info + p.DiffState = state + p.DiffDesired = desired + + if p.DiffFn != nil { + return p.DiffFn(info, state, desired) + } + + return p.DiffReturn.DeepCopy(), p.DiffReturnError +} + +func (p *MockResourceProvider) Refresh( + info *InstanceInfo, + s *InstanceState) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.RefreshCalled = true + p.RefreshInfo = info + p.RefreshState = s + + if p.RefreshFn != nil { + return p.RefreshFn(info, s) + } + + return p.RefreshReturn.DeepCopy(), p.RefreshReturnError +} + +func (p *MockResourceProvider) Resources() []ResourceType { + p.Lock() + defer p.Unlock() + + p.ResourcesCalled = true + return p.ResourcesReturn +} + +func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ImportStateCalled = true + p.ImportStateInfo = info + p.ImportStateID = id + if p.ImportStateFn != nil { + return p.ImportStateFn(info, id) + } + + var result []*InstanceState + if p.ImportStateReturn != nil { + result = make([]*InstanceState, len(p.ImportStateReturn)) + for i, v := range p.ImportStateReturn { + result[i] = v.DeepCopy() + } + } + + return result, p.ImportStateReturnError +} + +func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceCalled = true + p.ValidateDataSourceType = t + p.ValidateDataSourceConfig = c + + if p.ValidateDataSourceFn != nil { + return p.ValidateDataSourceFn(t, c) + } + + return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors +} + +func (p *MockResourceProvider) ReadDataDiff( + info *InstanceInfo, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataDiffCalled = true + p.ReadDataDiffInfo = info + p.ReadDataDiffDesired = desired + if p.ReadDataDiffFn != nil { + return p.ReadDataDiffFn(info, desired) + } + + return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError +} + +func (p *MockResourceProvider) ReadDataApply( + info *InstanceInfo, + d *InstanceDiff) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataApplyCalled = true + p.ReadDataApplyInfo = info + p.ReadDataApplyDiff = d + + if p.ReadDataApplyFn != nil { + return p.ReadDataApplyFn(info, d) + } + + return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError +} + +func (p *MockResourceProvider) DataSources() []DataSource { + p.Lock() + defer p.Unlock() + + p.DataSourcesCalled = true + return p.DataSourcesReturn +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go new file mode 100644 index 00000000000..74ee2a940d4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go @@ -0,0 +1,70 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" +) + +// ResourceProvisioner is an interface that must be implemented by any +// resource provisioner: the thing that initializes resources in +// a Terraform configuration. +type ResourceProvisioner interface { + // GetConfigSchema returns the schema for the provisioner type's main + // configuration block. This is called prior to Validate to enable some + // basic structural validation to be performed automatically and to allow + // the configuration to be properly extracted from potentially-ambiguous + // configuration file formats. + GetConfigSchema() (*configschema.Block, error) + + // Validate is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + Validate(*ResourceConfig) ([]string, []error) + + // Apply runs the provisioner on a specific resource and returns the new + // resource state along with an error. Instead of a diff, the ResourceConfig + // is provided since provisioners only run after a resource has been + // newly created. + Apply(UIOutput, *InstanceState, *ResourceConfig) error + + // Stop is called when the provisioner should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error +} + +// ResourceProvisionerCloser is an interface that provisioners that can close +// connections that aren't needed anymore must implement. +type ResourceProvisionerCloser interface { + Close() error +} + +// ResourceProvisionerFactory is a function type that creates a new instance +// of a resource provisioner. +type ResourceProvisionerFactory func() (ResourceProvisioner, error) + +// ProvisionerFactory is a function type that creates a new instance +// of a provisioners.Interface. +type ProvisionerFactory = provisioners.Factory diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go new file mode 100644 index 00000000000..ed6f241bc86 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go @@ -0,0 +1,87 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// MockResourceProvisioner implements ResourceProvisioner but mocks out all the +// calls for testing purposes. +type MockResourceProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetConfigSchemaCalled bool + GetConfigSchemaReturnSchema *configschema.Block + GetConfigSchemaReturnError error + + ApplyCalled bool + ApplyOutput UIOutput + ApplyState *InstanceState + ApplyConfig *ResourceConfig + ApplyFn func(*InstanceState, *ResourceConfig) error + ApplyReturnError error + + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(c *ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + + StopCalled bool + StopFn func() error + StopReturnError error +} + +var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) + +func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { + p.GetConfigSchemaCalled = true + return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError +} + +func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvisioner) Apply( + output UIOutput, + state *InstanceState, + c *ResourceConfig) error { + p.Lock() + + p.ApplyCalled = true + p.ApplyOutput = output + p.ApplyState = state + p.ApplyConfig = c + if p.ApplyFn != nil { + fn := p.ApplyFn + p.Unlock() + return fn(state, c) + } + + defer p.Unlock() + return p.ApplyReturnError +} + +func (p *MockResourceProvisioner) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go new file mode 100644 index 00000000000..8bc3b017b1e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go @@ -0,0 +1,278 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Schemas is a container for various kinds of schema that Terraform needs +// during processing. +type Schemas struct { + Providers map[string]*ProviderSchema + Provisioners map[string]*configschema.Block +} + +// ProviderSchema returns the entire ProviderSchema object that was produced +// by the plugin for the given provider, or nil if no such schema is available. +// +// It's usually better to go use the more precise methods offered by type +// Schemas to handle this detail automatically. +func (ss *Schemas) ProviderSchema(typeName string) *ProviderSchema { + if ss.Providers == nil { + return nil + } + return ss.Providers[typeName] +} + +// ProviderConfig returns the schema for the provider configuration of the +// given provider type, or nil if no such schema is available. +func (ss *Schemas) ProviderConfig(typeName string) *configschema.Block { + ps := ss.ProviderSchema(typeName) + if ps == nil { + return nil + } + return ps.Provider +} + +// ResourceTypeConfig returns the schema for the configuration of a given +// resource type belonging to a given provider type, or nil of no such +// schema is available. +// +// In many cases the provider type is inferrable from the resource type name, +// but this is not always true because users can override the provider for +// a resource using the "provider" meta-argument. Therefore it's important to +// always pass the correct provider name, even though it many cases it feels +// redundant. +func (ss *Schemas) ResourceTypeConfig(providerType string, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { + ps := ss.ProviderSchema(providerType) + if ps == nil || ps.ResourceTypes == nil { + return nil, 0 + } + return ps.SchemaForResourceType(resourceMode, resourceType) +} + +// ProvisionerConfig returns the schema for the configuration of a given +// provisioner, or nil of no such schema is available. +func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { + return ss.Provisioners[name] +} + +// LoadSchemas searches the given configuration, state and plan (any of which +// may be nil) for constructs that have an associated schema, requests the +// necessary schemas from the given component factory (which must _not_ be nil), +// and returns a single object representing all of the necessary schemas. +// +// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing +// errors across multiple separate objects. Errors here will usually indicate +// either misbehavior on the part of one of the providers or of the provider +// protocol itself. When returned with errors, the returned schemas object is +// still valid but may be incomplete. +func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { + schemas := &Schemas{ + Providers: map[string]*ProviderSchema{}, + Provisioners: map[string]*configschema.Block{}, + } + var diags tfdiags.Diagnostics + + newDiags := loadProviderSchemas(schemas.Providers, config, state, components) + diags = diags.Append(newDiags) + newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) + diags = diags.Append(newDiags) + + return schemas, diags.Err() +} + +func loadProviderSchemas(schemas map[string]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(typeName string) { + if _, exists := schemas[typeName]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", typeName) + provider, err := components.ResourceProvider(typeName, "early/"+typeName) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[typeName] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", typeName, err), + ) + return + } + defer func() { + provider.Close() + }() + + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[typeName] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provider %q: %s", typeName, resp.Diagnostics.Err()), + ) + return + } + + s := &ProviderSchema{ + Provider: resp.Provider.Block, + ResourceTypes: make(map[string]*configschema.Block), + DataSources: make(map[string]*configschema.Block), + + ResourceTypeSchemaVersions: make(map[string]uint64), + } + + if resp.Provider.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version provider configuration for provider %q", typeName), + ) + } + + for t, r := range resp.ResourceTypes { + s.ResourceTypes[t] = r.Block + s.ResourceTypeSchemaVersions[t] = uint64(r.Version) + if r.Version < 0 { + diags = diags.Append( + fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, typeName), + ) + } + } + + for t, d := range resp.DataSources { + s.DataSources[t] = d.Block + if d.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, typeName), + ) + } + } + + schemas[typeName] = s + } + + if config != nil { + for _, typeName := range config.ProviderTypes() { + ensure(typeName) + } + } + + if state != nil { + needed := providers.AddressedTypesAbs(state.ProviderAddrs()) + for _, typeName := range needed { + ensure(typeName) + } + } + + return diags +} + +func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(name string) { + if _, exists := schemas[name]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) + provisioner, err := components.ResourceProvisioner(name, "early/"+name) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + if closer, ok := provisioner.(ResourceProvisionerCloser); ok { + closer.Close() + } + }() + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + schemas[name] = resp.Provisioner + } + + if config != nil { + for _, rc := range config.Module.ManagedResources { + for _, pc := range rc.Managed.Provisioners { + ensure(pc.Type) + } + } + + // Must also visit our child modules, recursively. + for _, cc := range config.Children { + childDiags := loadProvisionerSchemas(schemas, cc, components) + diags = diags.Append(childDiags) + } + } + + return diags +} + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ps.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ps.SchemaForResourceType(addr.Mode, addr.Type) +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go new file mode 100644 index 00000000000..e70e8ab2438 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go @@ -0,0 +1,2217 @@ +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" + "github.com/mitchellh/copystructure" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +const ( + // StateVersion is the current version for our state file + StateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. + + if len(p) > 0 && p[0] == "root" { + p = p[1:] + } + + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &StateFilter{State: s} + results, err := filter.Filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(normalizeModulePath(path)) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(normalizeModulePath(m.Path)) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +// MarshalEqual is similar to Equal but provides a stronger definition of +// "equal", where two states are equal if and only if their serialized form +// is byte-for-byte identical. +// +// This is primarily useful for callers that are trying to save snapshots +// of state to persistent storage, allowing them to detect when a new +// snapshot must be taken. +// +// Note that the serial number and lineage are included in the serialized form, +// so it's the caller's responsibility to properly manage these attributes +// so that this method is only called on two states that have the same +// serial and lineage, unless detecting such differences is desired. +func (s *State) MarshalEqual(other *State) bool { + if s == nil && other == nil { + return true + } else if s == nil || other == nil { + return false + } + + recvBuf := &bytes.Buffer{} + otherBuf := &bytes.Buffer{} + + err := WriteState(s, recvBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + err = WriteState(other, otherBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*State) +} + +// FromFutureTerraform checks if this state was written by a Terraform +// version from the future. +func (s *State) FromFutureTerraform() bool { + s.Lock() + defer s.Unlock() + + // No TF version means it is certainly from the past + if s.TFVersion == "" { + return false + } + + v := version.Must(version.NewVersion(s.TFVersion)) + return tfversion.SemVer.LessThan(v) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = StateVersion + } + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } else { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} + +// Empty returns true if BackendState has no state. +func (s *BackendState) Empty() bool { + return s == nil || s.Type == "" +} + +// Config decodes the type-specific configuration object using the provided +// schema and returns the result as a cty.Value. +// +// An error is returned if the stored configuration does not conform to the +// given schema. +func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { + ty := schema.ImpliedType() + if s == nil { + return cty.NullVal(ty), nil + } + return ctyjson.Unmarshal(s.ConfigRaw, ty) +} + +// SetConfig replaces (in-place) the type-specific configuration object using +// the provided value and associated schema. +// +// An error is returned if the given value does not conform to the implied +// type of the schema. +func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { + ty := schema.ImpliedType() + buf, err := ctyjson.Marshal(val, ty) + if err != nil { + return err + } + s.ConfigRaw = buf + return nil +} + +// ForPlan produces an alternative representation of the reciever that is +// suitable for storing in a plan. The current workspace must additionally +// be provided, to be stored alongside the backend configuration. +// +// The backend configuration schema is required in order to properly +// encode the backend-specific configuration settings. +func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { + if s == nil { + return nil, nil + } + + configVal, err := s.Config(schema) + if err != nil { + return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) + } + return plans.NewBackend(s.Type, configVal, schema, workspaceName) +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +func (r *RemoteState) Equals(other *RemoteState) bool { + r.Lock() + defer r.Unlock() + + if r.Type != other.Type { + return false + } + if len(r.Config) != len(other.Config) { + return false + } + for k, v := range r.Config { + if other.Config[k] != v { + return false + } + } + return true +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +// IsRoot says whether or not this module diff is for the root module. +func (m *ModuleState) IsRoot() bool { + m.Lock() + defer m.Unlock() + return reflect.DeepEqual(m.Path, rootModulePath) +} + +// IsDescendent returns true if other is a descendent of this module. +func (m *ModuleState) IsDescendent(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + i := len(m.Path) + return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) +} + +// Orphans returns a list of keys of resources that are in the State +// but aren't present in the configuration itself. Hence, these keys +// represent the state of resources that are orphans. +func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { + m.Lock() + defer m.Unlock() + + inConfig := make(map[string]struct{}) + if c != nil { + for _, r := range c.ManagedResources { + inConfig[r.Addr().String()] = struct{}{} + } + for _, r := range c.DataResources { + inConfig[r.Addr().String()] = struct{}{} + } + } + + var result []addrs.ResourceInstance + for k := range m.Resources { + // Since we've not yet updated state to use our new address format, + // we need to do some shimming here. + legacyAddr, err := parseResourceAddressInternal(k) + if err != nil { + // Suggests that the user tampered with the state, since we always + // generate valid internal addresses. + log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) + continue + } + + addr := legacyAddr.AbsResourceInstanceAddr().Resource + compareKey := addr.Resource.String() // compare by resource address, ignoring instance key + if _, exists := inConfig[compareKey]; !exists { + result = append(result, addr) + } + } + return result +} + +// RemovedOutputs returns a list of outputs that are in the State but aren't +// present in the configuration itself. +func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { + if outputs == nil { + // If we got no output map at all then we'll just treat our set of + // configured outputs as empty, since that suggests that they've all + // been removed by removing their containing module. + outputs = make(map[string]*configs.Output) + } + + s.Lock() + defer s.Unlock() + + var ret []addrs.OutputValue + for n := range s.Outputs { + if _, declared := outputs[n]; !declared { + ret = append(ret, addrs.OutputValue{ + Name: n, + }) + } + } + + return ret +} + +// View returns a view with the given resource prefix. +func (m *ModuleState) View(id string) *ModuleState { + if m == nil { + return m + } + + r := m.deepcopy() + for k, _ := range r.Resources { + if id == k || strings.HasPrefix(k, id+".") { + continue + } + + delete(r.Resources, k) + } + + return r +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +func (m *ModuleState) deepcopy() *ModuleState { + if m == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + + return stateCopy.(*ModuleState) +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == hcl2shim.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name, _ := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k, _ := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key, _ := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +func (m *ModuleState) Empty() bool { + return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case ManagedResourceMode: + prefix = "" + case DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func ParseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +// +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + if !s.Primary.Equal(other.Primary) { + return false + } + + return true +} + +// Taint marks a resource as tainted. +func (s *ResourceState) Taint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = true + } +} + +// Untaint unmarks a resource as tainted. +func (s *ResourceState) Untaint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = false + } +} + +// ProviderAddr returns the provider address for the receiver, by parsing the +// string representation saved in state. An error can be returned if the +// value in state is corrupt. +func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { + var diags tfdiags.Diagnostics + + str := s.Provider + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + return addrs.AbsProviderConfig{}, diags.Err() + } + + addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags.Err() +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = hcl2shim.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s.ID == "" { + return notCreated + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +func (e *EphemeralState) DeepCopy() *EphemeralState { + copy, err := copystructure.Config{Lock: true}.Copy(e) + if err != nil { + panic(err) + } + + return copy.(*EphemeralState) +} + +type jsonStateVersionIdentifier struct { + Version int `json:"version"` +} + +// Check if this is a V0 format - the magic bytes at the start of the file +// should be "tfstate" if so. We no longer support upgrading this type of +// state but return an error message explaining to a user how they can +// upgrade via the 0.6.x series. +func testForV0State(buf *bufio.Reader) error { + start, err := buf.Peek(len("tfstate")) + if err != nil { + return fmt.Errorf("Failed to check for magic bytes: %v", err) + } + if string(start) == "tfstate" { + return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + + "format which was used prior to Terraform 0.3. Please upgrade\n" + + "this state file using Terraform 0.6.16 prior to using it with\n" + + "Terraform 0.7.") + } + + return nil +} + +// ErrNoState is returned by ReadState when the io.Reader contains no data +var ErrNoState = errors.New("no state") + +// ReadState reads a state structure out of a reader in the format that +// was written by WriteState. +func ReadState(src io.Reader) (*State, error) { + // check for a nil file specifically, since that produces a platform + // specific error if we try to use it in a bufio.Reader. + if f, ok := src.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + buf := bufio.NewReader(src) + + if _, err := buf.Peek(1); err != nil { + if err == io.EOF { + return nil, ErrNoState + } + return nil, err + } + + if err := testForV0State(buf); err != nil { + return nil, err + } + + // If we are JSON we buffer the whole thing in memory so we can read it twice. + // This is suboptimal, but will work for now. + jsonBytes, err := ioutil.ReadAll(buf) + if err != nil { + return nil, fmt.Errorf("Reading state file failed: %v", err) + } + + versionIdentifier := &jsonStateVersionIdentifier{} + if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { + return nil, fmt.Errorf("Decoding state file version failed: %v", err) + } + + var result *State + switch versionIdentifier.Version { + case 0: + return nil, fmt.Errorf("State version 0 is not supported as JSON.") + case 1: + v1State, err := ReadStateV1(jsonBytes) + if err != nil { + return nil, err + } + + v2State, err := upgradeStateV1ToV2(v1State) + if err != nil { + return nil, err + } + + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + // increment the Serial whenever we upgrade state + v3State.Serial++ + result = v3State + case 2: + v2State, err := ReadStateV2(jsonBytes) + if err != nil { + return nil, err + } + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + v3State.Serial++ + result = v3State + case 3: + v3State, err := ReadStateV3(jsonBytes) + if err != nil { + return nil, err + } + + result = v3State + default: + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), versionIdentifier.Version) + } + + // If we reached this place we must have a result set + if result == nil { + panic("resulting state in load not set, assertion failed") + } + + // Prune the state when read it. Its possible to write unpruned states or + // for a user to make a state unpruned (nil-ing a module state for example). + result.prune() + + // Validate the state file is valid + if err := result.Validate(); err != nil { + return nil, err + } + + return result, nil +} + +func ReadStateV1(jsonBytes []byte) (*stateV1, error) { + v1State := &stateV1{} + if err := json.Unmarshal(jsonBytes, v1State); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + if v1State.Version != 1 { + return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ + "read %d, expected 1", v1State.Version) + } + + return v1State, nil +} + +func ReadStateV2(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + return state, nil +} + +func ReadStateV3(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + // Now we write the state back out to detect any changes in normaliztion. + // If our state is now written out differently, bump the serial number to + // prevent conflicts. + var buf bytes.Buffer + err := WriteState(state, &buf) + if err != nil { + return nil, err + } + + if !bytes.Equal(jsonBytes, buf.Bytes()) { + log.Println("[INFO] state modified during read or write. incrementing serial number") + state.Serial++ + } + + return state, nil +} + +// WriteState writes a state somewhere in a binary format. +func WriteState(d *State, dst io.Writer) error { + // writing a nil state is a noop. + if d == nil { + return nil + } + + // make sure we have no uninitialized fields + d.init() + + // Make sure it is sorted + d.sort() + + // Ensure the version is set + d.Version = StateVersion + + // If the TFVersion is set, verify it. We used to just set the version + // here, but this isn't safe since it changes the MD5 sum on some remote + // state storage backends such as Atlas. We now leave it be if needed. + if d.TFVersion != "" { + if _, err := version.NewVersion(d.TFVersion); err != nil { + return fmt.Errorf( + "Error writing state, invalid version: %s\n\n"+ + "The Terraform version when writing the state must be a semantic\n"+ + "version.", + d.TFVersion) + } + } + + // Encode the data in a human-friendly way + data, err := json.MarshalIndent(d, "", " ") + if err != nil { + return fmt.Errorf("Failed to encode state: %s", err) + } + + // We append a newline to the data because MarshalIndent doesn't + data = append(data, '\n') + + // Write the data out to the dst + if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { + return fmt.Errorf("Failed to write state: %v", err) + } + + return nil +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go new file mode 100644 index 00000000000..2dcb11b76b9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go @@ -0,0 +1,267 @@ +package terraform + +import ( + "fmt" + "sort" +) + +// StateFilter is responsible for filtering and searching a state. +// +// This is a separate struct from State rather than a method on State +// because StateFilter might create sidecar data structures to optimize +// filtering on the state. +// +// If you change the State, the filter created is invalid and either +// Reset should be called or a new one should be allocated. StateFilter +// will not watch State for changes and do this for you. If you filter after +// changing the State without calling Reset, the behavior is not defined. +type StateFilter struct { + State *State +} + +// Filter takes the addresses specified by fs and finds all the matches. +// The values of fs are resource addressing syntax that can be parsed by +// ParseResourceAddress. +func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { + // Parse all the addresses + as := make([]*ResourceAddress, len(fs)) + for i, v := range fs { + a, err := ParseResourceAddress(v) + if err != nil { + return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) + } + + as[i] = a + } + + // If we weren't given any filters, then we list all + if len(fs) == 0 { + as = append(as, &ResourceAddress{Index: -1}) + } + + // Filter each of the address. We keep track of this in a map to + // strip duplicates. + resultSet := make(map[string]*StateFilterResult) + for _, a := range as { + for _, r := range f.filterSingle(a) { + resultSet[r.String()] = r + } + } + + // Make the result list + results := make([]*StateFilterResult, 0, len(resultSet)) + for _, v := range resultSet { + results = append(results, v) + } + + // Sort them and return + sort.Sort(StateFilterResultSlice(results)) + return results, nil +} + +func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { + // The slice to keep track of results + var results []*StateFilterResult + + // Go through modules first. + modules := make([]*ModuleState, 0, len(f.State.Modules)) + for _, m := range f.State.Modules { + if f.relevant(a, m) { + modules = append(modules, m) + + // Only add the module to the results if we haven't specified a type. + // We also ignore the root module. + if a.Type == "" && len(m.Path) > 1 { + results = append(results, &StateFilterResult{ + Path: m.Path[1:], + Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Value: m, + }) + } + } + } + + // With the modules set, go through all the resources within + // the modules to find relevant resources. + for _, m := range modules { + for n, r := range m.Resources { + // The name in the state contains valuable information. Parse. + key, err := ParseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + + // Older states and test fixtures often don't contain the + // type directly on the ResourceState. We add this so StateFilter + // is a bit more robust. + if r.Type == "" { + r.Type = key.Type + } + + if f.relevant(a, r) { + if a.Name != "" && a.Name != key.Name { + // Name doesn't match + continue + } + + if a.Index >= 0 && key.Index != a.Index { + // Index doesn't match + continue + } + + if a.Name != "" && a.Name != key.Name { + continue + } + + // Build the address for this resource + addr := &ResourceAddress{ + Path: m.Path[1:], + Name: key.Name, + Type: key.Type, + Index: key.Index, + } + + // Add the resource level result + resourceResult := &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Value: r, + } + if !a.InstanceTypeSet { + results = append(results, resourceResult) + } + + // Add the instances + if r.Primary != nil { + addr.InstanceType = TypePrimary + addr.InstanceTypeSet = false + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: r.Primary, + }) + } + + for _, instance := range r.Deposed { + if f.relevant(a, instance) { + addr.InstanceType = TypeDeposed + addr.InstanceTypeSet = true + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + } + } + } + + return results +} + +// relevant checks for relevance of this address against the given value. +func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { + switch v := raw.(type) { + case *ModuleState: + path := v.Path[1:] + + if len(addr.Path) > len(path) { + // Longer path in address means there is no way we match. + return false + } + + // Check for a prefix match + for i, p := range addr.Path { + if path[i] != p { + // Any mismatches don't match. + return false + } + } + + return true + case *ResourceState: + if addr.Type == "" { + // If we have no resource type, then we're interested in all! + return true + } + + // If the type doesn't match we fail immediately + if v.Type != addr.Type { + return false + } + + return true + default: + // If we don't know about it, let's just say no + return false + } +} + +// StateFilterResult is a single result from a filter operation. Filter +// can match multiple things within a state (module, resource, instance, etc.) +// and this unifies that. +type StateFilterResult struct { + // Module path of the result + Path []string + + // Address is the address that can be used to reference this exact result. + Address string + + // Parent, if non-nil, is a parent of this result. For instances, the + // parent would be a resource. For resources, the parent would be + // a module. For modules, this is currently nil. + Parent *StateFilterResult + + // Value is the actual value. This must be type switched on. It can be + // any data structures that `State` can hold: `ModuleState`, + // `ResourceState`, `InstanceState`. + Value interface{} +} + +func (r *StateFilterResult) String() string { + return fmt.Sprintf("%T: %s", r.Value, r.Address) +} + +func (r *StateFilterResult) sortedType() int { + switch r.Value.(type) { + case *ModuleState: + return 0 + case *ResourceState: + return 1 + case *InstanceState: + return 2 + default: + return 50 + } +} + +// StateFilterResultSlice is a slice of results that implements +// sort.Interface. The sorting goal is what is most appealing to +// human output. +type StateFilterResultSlice []*StateFilterResult + +func (s StateFilterResultSlice) Len() int { return len(s) } +func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s StateFilterResultSlice) Less(i, j int) bool { + a, b := s[i], s[j] + + // if these address contain an index, we want to sort by index rather than name + addrA, errA := ParseResourceAddress(a.Address) + addrB, errB := ParseResourceAddress(b.Address) + if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { + return addrA.Index < addrB.Index + } + + // If the addresses are different it is just lexographic sorting + if a.Address != b.Address { + return a.Address < b.Address + } + + // Addresses are the same, which means it matters on the type + return a.sortedType() < b.sortedType() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go new file mode 100644 index 00000000000..aa13cce8030 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go @@ -0,0 +1,189 @@ +package terraform + +import ( + "fmt" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*State, error) { + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + + modules := make([]*ModuleState, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &State{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + newState.sort() + newState.init() + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) + } + + return &RemoteState{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root and catch + // duplicate path errors later (as part of Validate). + path = rootModulePath + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*OutputState) + for key, output := range old.Outputs { + outputs[key] = &OutputState{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*ResourceState) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + + return &ModuleState{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + deposed := make([]*InstanceState, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &ResourceState{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + ephemeral, err := old.Ephemeral.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &InstanceState{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Ephemeral: *ephemeral, + Meta: newMeta, + }, nil +} + +func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { + connInfo, err := copystructure.Copy(old.ConnInfo) + if err != nil { + return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) + } + return &EphemeralState{ + ConnInfo: connInfo.(map[string]string), + }, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go new file mode 100644 index 00000000000..e52d35fcd14 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go @@ -0,0 +1,142 @@ +package terraform + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" +) + +// The upgrade process from V2 to V3 state does not affect the structure, +// so we do not need to redeclare all of the structs involved - we just +// take a deep copy of the old structure and assert the version number is +// as we expect. +func upgradeStateV2ToV3(old *State) (*State, error) { + new := old.DeepCopy() + + // Ensure the copied version is v2 before attempting to upgrade + if new.Version != 2 { + return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + + "a state which is not version 2.") + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + if resource.Deposed != nil { + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *InstanceState) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go new file mode 100644 index 00000000000..68cffb41b5c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go @@ -0,0 +1,145 @@ +package terraform + +// stateV1 keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +// +// stateV1 is _only used for the purposes of backwards compatibility +// and is no longer used in Terraform. +// +// For the upgrade process, see state_upgrade_v1_to_v2.go +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that Terraform is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral ephemeralStateV1 `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} + +type ephemeralStateV1 struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go new file mode 100644 index 00000000000..3f0418d9273 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "os" + "testing" +) + +// TestStateFile writes the given state to the path. +func TestStateFile(t *testing.T, path string, state *State) { + f, err := os.Create(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + if err := WriteState(state, f); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go new file mode 100644 index 00000000000..f9559f41b6c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go @@ -0,0 +1,62 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphTransformer is the interface that transformers implement. This +// interface is only for transforms that need entire graph visibility. +type GraphTransformer interface { + Transform(*Graph) error +} + +// GraphVertexTransformer is an interface that transforms a single +// Vertex within with graph. This is a specialization of GraphTransformer +// that makes it easy to do vertex replacement. +// +// The GraphTransformer that runs through the GraphVertexTransformers is +// VertexTransformer. +type GraphVertexTransformer interface { + Transform(dag.Vertex) (dag.Vertex, error) +} + +// GraphTransformIf is a helper function that conditionally returns a +// GraphTransformer given. This is useful for calling inline a sequence +// of transforms without having to split it up into multiple append() calls. +func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { + if f() { + return then + } + + return nil +} + +type graphTransformerMulti struct { + Transforms []GraphTransformer +} + +func (t *graphTransformerMulti) Transform(g *Graph) error { + var lastStepStr string + for _, t := range t.Transforms { + log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) + if err := t.Transform(g); err != nil { + return err + } + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s------", t, thisStepStr) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) + } + } + + return nil +} + +// GraphTransformMulti combines multiple graph transformers into a single +// GraphTransformer that runs all the individual graph transformers. +func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { + return &graphTransformerMulti{Transforms: ts} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go new file mode 100644 index 00000000000..cbac13387b3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// GraphNodeAttachProvider is an interface that must be implemented by nodes +// that want provider configurations attached. +type GraphNodeAttachProvider interface { + // Must be implemented to determine the path for the configuration + GraphNodeSubPath + + // ProviderName with no module prefix. Example: "aws". + ProviderAddr() addrs.AbsProviderConfig + + // Sets the configuration + AttachProvider(*configs.Provider) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go new file mode 100644 index 00000000000..23578c78467 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go @@ -0,0 +1,74 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes +// that want resource configurations attached. +type GraphNodeAttachResourceConfig interface { + GraphNodeResource + + // Sets the configuration + AttachResourceConfig(*configs.Resource) +} + +// AttachResourceConfigTransformer goes through the graph and attaches +// resource configuration structures to nodes that implement +// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. +// +// The attached configuration structures are directly from the configuration. +// If they're going to be modified, a copy should be made. +type AttachResourceConfigTransformer struct { + Config *configs.Config // Config is the root node in the config tree +} + +func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { + + // Go through and find GraphNodeAttachResource + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachResource implementations + arn, ok := v.(GraphNodeAttachResourceConfig) + if !ok { + continue + } + + // Determine what we're looking for + addr := arn.ResourceAddr() + + // Get the configuration. + config := t.Config.DescendentForInstance(addr.Module) + if config == nil { + log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) + continue + } + + for _, r := range config.Module.ManagedResources { + rAddr := r.Addr() + + if rAddr != addr.Resource { + // Not the same resource + continue + } + + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange) + arn.AttachResourceConfig(r) + } + for _, r := range config.Module.DataResources { + rAddr := r.Addr() + + if rAddr != addr.Resource { + // Not the same resource + continue + } + + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) + arn.AttachResourceConfig(r) + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go new file mode 100644 index 00000000000..fee220b52bb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go @@ -0,0 +1,99 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeAttachResourceSchema is an interface implemented by node types +// that need a resource schema attached. +type GraphNodeAttachResourceSchema interface { + GraphNodeResource + GraphNodeProviderConsumer + + AttachResourceSchema(schema *configschema.Block, version uint64) +} + +// GraphNodeAttachProviderConfigSchema is an interface implemented by node types +// that need a provider configuration schema attached. +type GraphNodeAttachProviderConfigSchema interface { + GraphNodeProvider + + AttachProviderConfigSchema(*configschema.Block) +} + +// GraphNodeAttachProvisionerSchema is an interface implemented by node types +// that need one or more provisioner schemas attached. +type GraphNodeAttachProvisionerSchema interface { + ProvisionedBy() []string + + // SetProvisionerSchema is called during transform for each provisioner + // type returned from ProvisionedBy, providing the configuration schema + // for each provisioner in turn. The implementer should save these for + // later use in evaluating provisioner configuration blocks. + AttachProvisionerSchema(name string, schema *configschema.Block) +} + +// AttachSchemaTransformer finds nodes that implement +// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or +// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each +// and then passes them to a method implemented by the node. +type AttachSchemaTransformer struct { + Schemas *Schemas +} + +func (t *AttachSchemaTransformer) Transform(g *Graph) error { + if t.Schemas == nil { + // Should never happen with a reasonable caller, but we'll return a + // proper error here anyway so that we'll fail gracefully. + return fmt.Errorf("AttachSchemaTransformer used with nil Schemas") + } + + for _, v := range g.Vertices() { + + if tv, ok := v.(GraphNodeAttachResourceSchema); ok { + addr := tv.ResourceAddr() + mode := addr.Resource.Mode + typeName := addr.Resource.Type + providerAddr, _ := tv.ProvidedBy() + providerType := providerAddr.ProviderConfig.Type + + schema, version := t.Schemas.ResourceTypeConfig(providerType, mode, typeName) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) + tv.AttachResourceSchema(schema, version) + } + + if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { + providerAddr := tv.ProviderAddr() + schema := t.Schemas.ProviderConfig(providerAddr.ProviderConfig.Type) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) + tv.AttachProviderConfigSchema(schema) + } + + if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { + names := tv.ProvisionedBy() + for _, name := range names { + schema := t.Schemas.ProvisionerConfig(name) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) + tv.AttachProvisionerSchema(name, schema) + } + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go new file mode 100644 index 00000000000..f8749487985 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go @@ -0,0 +1,68 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// GraphNodeAttachResourceState is an interface that can be implemented +// to request that a ResourceState is attached to the node. +// +// Due to a historical naming inconsistency, the type ResourceState actually +// represents the state for a particular _instance_, while InstanceState +// represents the values for that instance during a particular phase +// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState +// is supported only for nodes that represent resource instances, even though +// the name might suggest it is for containing resources. +type GraphNodeAttachResourceState interface { + GraphNodeResourceInstance + + // Sets the state + AttachResourceState(*states.Resource) +} + +// AttachStateTransformer goes through the graph and attaches +// state to nodes that implement the interfaces above. +type AttachStateTransformer struct { + State *states.State // State is the root state +} + +func (t *AttachStateTransformer) Transform(g *Graph) error { + // If no state, then nothing to do + if t.State == nil { + log.Printf("[DEBUG] Not attaching any node states: overall state is nil") + return nil + } + + for _, v := range g.Vertices() { + // Nodes implement this interface to request state attachment. + an, ok := v.(GraphNodeAttachResourceState) + if !ok { + continue + } + addr := an.ResourceInstanceAddr() + + rs := t.State.Resource(addr.ContainingResource()) + if rs == nil { + log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) + continue + } + + is := rs.Instance(addr.Resource.Key) + if is == nil { + // We don't actually need this here, since we'll attach the whole + // resource state, but we still check because it'd be weird + // for the specific instance we're attaching to not to exist. + log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) + continue + } + + // make sure to attach a copy of the state, so instances can modify the + // same ResourceState. + an.AttachResourceState(rs.DeepCopy()) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go new file mode 100644 index 00000000000..8920761ea2d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go @@ -0,0 +1,133 @@ +package terraform + +import ( + "log" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// ConfigTransformer is a GraphTransformer that adds all the resources +// from the configuration to the graph. +// +// The module used to configure this transformer must be the root module. +// +// Only resources are added to the graph. Variables, outputs, and +// providers must be added via other transforms. +// +// Unlike ConfigTransformerOld, this transformer creates a graph with +// all resources including module resources, rather than creating module +// nodes that are then "flattened". +type ConfigTransformer struct { + Concrete ConcreteResourceNodeFunc + + // Module is the module to add resources from. + Config *configs.Config + + // Unique will only add resources that aren't already present in the graph. + Unique bool + + // Mode will only add resources that match the given mode + ModeFilter bool + Mode addrs.ResourceMode + + l sync.Mutex + uniqueMap map[string]struct{} +} + +func (t *ConfigTransformer) Transform(g *Graph) error { + // Lock since we use some internal state + t.l.Lock() + defer t.l.Unlock() + + // If no configuration is available, we don't do anything + if t.Config == nil { + return nil + } + + // Reset the uniqueness map. If we're tracking uniques, then populate + // it with addresses. + t.uniqueMap = make(map[string]struct{}) + defer func() { t.uniqueMap = nil }() + if t.Unique { + for _, v := range g.Vertices() { + if rn, ok := v.(GraphNodeResource); ok { + t.uniqueMap[rn.ResourceAddr().String()] = struct{}{} + } + } + } + + // Start the transformation process + return t.transform(g, t.Config) +} + +func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error { + // If no config, do nothing + if config == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, config); err != nil { + return err + } + + // Transform all the children. + for _, c := range config.Children { + if err := t.transform(g, c); err != nil { + return err + } + } + + return nil +} + +func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error { + path := config.Path + module := config.Module + log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) + + // For now we assume that each module call produces only one module + // instance with no key, since we don't yet support "count" and "for_each" + // on modules. + // FIXME: As part of supporting "count" and "for_each" on modules, rework + // this so that we'll "expand" the module call first and then create graph + // nodes for each module instance separately. + instPath := path.UnkeyedInstanceShim() + + allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) + for _, r := range module.ManagedResources { + allResources = append(allResources, r) + } + for _, r := range module.DataResources { + allResources = append(allResources, r) + } + + for _, r := range allResources { + relAddr := r.Addr() + + if t.ModeFilter && relAddr.Mode != t.Mode { + // Skip non-matching modes + continue + } + + addr := relAddr.Absolute(instPath) + if _, ok := t.uniqueMap[addr.String()]; ok { + // We've already seen a resource with this address. This should + // never happen, because we enforce uniqueness in the config loader. + continue + } + + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config_flat.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config_flat.go new file mode 100644 index 00000000000..4dbdcb7424c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config_flat.go @@ -0,0 +1,71 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// FlatConfigTransformer is a GraphTransformer that adds the configuration +// to the graph. The module used to configure this transformer must be +// the root module. +// +// This transform adds the nodes but doesn't connect any of the references. +// The ReferenceTransformer should be used for that. +// +// NOTE: In relation to ConfigTransformer: this is a newer generation config +// transformer. It puts the _entire_ config into the graph (there is no +// "flattening" step as before). +type FlatConfigTransformer struct { + Concrete ConcreteResourceNodeFunc // What to turn resources into + + Config *configs.Config +} + +func (t *FlatConfigTransformer) Transform(g *Graph) error { + // We have nothing to do if there is no configuration. + if t.Config == nil { + return nil + } + + return t.transform(g, t.Config) +} + +func (t *FlatConfigTransformer) transform(g *Graph, config *configs.Config) error { + // If we have no configuration then there's nothing to do. + if config == nil { + return nil + } + + // Transform all the children. + for _, c := range config.Children { + if err := t.transform(g, c); err != nil { + return err + } + } + + module := config.Module + // For now we assume that each module call produces only one module + // instance with no key, since we don't yet support "count" and "for_each" + // on modules. + // FIXME: As part of supporting "count" and "for_each" on modules, rework + // this so that we'll "expand" the module call first and then create graph + // nodes for each module instance separately. + instPath := config.Path.UnkeyedInstanceShim() + + for _, r := range module.ManagedResources { + addr := r.Addr().Absolute(instPath) + abstract := &NodeAbstractResource{ + Addr: addr, + Config: r, + } + // Grab the address for this resource + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go new file mode 100644 index 00000000000..892f75ec17b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go @@ -0,0 +1,33 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// CountBoundaryTransformer adds a node that depends on everything else +// so that it runs last in order to clean up the state for nodes that +// are on the "count boundary": "foo.0" when only one exists becomes "foo" +type CountBoundaryTransformer struct { + Config *configs.Config +} + +func (t *CountBoundaryTransformer) Transform(g *Graph) error { + node := &NodeCountBoundary{ + Config: t.Config, + } + g.Add(node) + + // Depends on everything + for _, v := range g.Vertices() { + // Don't connect to ourselves + if v == node { + continue + } + + // Connect! + g.Connect(dag.BasicEdge(node, v)) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go new file mode 100644 index 00000000000..44c606407d4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go @@ -0,0 +1,308 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// GraphNodeDestroyerCBD must be implemented by nodes that might be +// create-before-destroy destroyers, or might plan a create-before-destroy +// action. +type GraphNodeDestroyerCBD interface { + // CreateBeforeDestroy returns true if this node represents a node + // that is doing a CBD. + CreateBeforeDestroy() bool + + // ModifyCreateBeforeDestroy is called when the CBD state of a node + // is changed dynamically. This can return an error if this isn't + // allowed. + ModifyCreateBeforeDestroy(bool) error +} + +// GraphNodeAttachDestroyer is implemented by applyable nodes that have a +// companion destroy node. This allows the creation node to look up the status +// of the destroy node and determine if it needs to depose the existing state, +// or replace it. +// If a node is not marked as create-before-destroy in the configuration, but a +// dependency forces that status, only the destroy node will be aware of that +// status. +type GraphNodeAttachDestroyer interface { + // AttachDestroyNode takes a destroy node and saves a reference to that + // node in the receiver, so it can later check the status of + // CreateBeforeDestroy(). + AttachDestroyNode(n GraphNodeDestroyerCBD) +} + +// ForcedCBDTransformer detects when a particular CBD-able graph node has +// dependencies with another that has create_before_destroy set that require +// it to be forced on, and forces it on. +// +// This must be used in the plan graph builder to ensure that +// create_before_destroy settings are properly propagated before constructing +// the planned changes. This requires that the plannable resource nodes +// implement GraphNodeDestroyerCBD. +type ForcedCBDTransformer struct { +} + +func (t *ForcedCBDTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + // If there are no CBD decendent (dependent nodes), then we + // do nothing here. + if !t.hasCBDDescendent(g, v) { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) + continue + } + + // If this isn't naturally a CBD node, this means that an descendent is + // and we need to auto-upgrade this node to CBD. We do this because + // a CBD node depending on non-CBD will result in cycles. To avoid this, + // we always attempt to upgrade it. + log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) + if err := dn.ModifyCreateBeforeDestroy(true); err != nil { + return fmt.Errorf( + "%s: must have create before destroy enabled because "+ + "a dependent resource has CBD enabled. However, when "+ + "attempting to automatically do this, an error occurred: %s", + dag.VertexName(v), err) + } + } else { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) + } + } + return nil +} + +// hasCBDDescendent returns true if any descendent (node that depends on this) +// has CBD set. +func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { + s, _ := g.Descendents(v) + if s == nil { + return true + } + + for _, ov := range s.List() { + dn, ok := ov.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if dn.CreateBeforeDestroy() { + // some descendent is CreateBeforeDestroy, so we need to follow suit + log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) + return true + } + } + + return false +} + +// CBDEdgeTransformer modifies the edges of CBD nodes that went through +// the DestroyEdgeTransformer to have the right dependencies. There are +// two real tasks here: +// +// 1. With CBD, the destroy edge is inverted: the destroy depends on +// the creation. +// +// 2. A_d must depend on resources that depend on A. This is to enable +// the destroy to only happen once nodes that depend on A successfully +// update to A. Example: adding a web server updates the load balancer +// before deleting the old web server. +// +// This transformer requires that a previous transformer has already forced +// create_before_destroy on for nodes that are depended on by explicit CBD +// nodes. This is the logic in ForcedCBDTransformer, though in practice we +// will get here by recording the CBD-ness of each change in the plan during +// the plan walk and then forcing the nodes into the appropriate setting during +// DiffTransformer when building the apply graph. +type CBDEdgeTransformer struct { + // Module and State are only needed to look up dependencies in + // any way possible. Either can be nil if not availabile. + Config *configs.Config + State *states.State + + // If configuration is present then Schemas is required in order to + // obtain schema information from providers and provisioners so we can + // properly resolve implicit dependencies. + Schemas *Schemas +} + +func (t *CBDEdgeTransformer) Transform(g *Graph) error { + // Go through and reverse any destroy edges + destroyMap := make(map[string][]dag.Vertex) + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + dern, ok := v.(GraphNodeDestroyer) + if !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + continue + } + + // Find the destroy edge. There should only be one. + for _, e := range g.EdgesTo(v) { + // Not a destroy edge, ignore it + de, ok := e.(*DestroyEdge) + if !ok { + continue + } + + log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s", + dag.VertexName(de.Source()), dag.VertexName(de.Target())) + + // Found it! Invert. + g.RemoveEdge(de) + applyNode := de.Source() + destroyNode := de.Target() + g.Connect(&DestroyEdge{S: destroyNode, T: applyNode}) + } + + // If the address has an index, we strip that. Our depMap creation + // graph doesn't expand counts so we don't currently get _exact_ + // dependencies. One day when we limit dependencies more exactly + // this will have to change. We have a test case covering this + // (depNonCBDCountBoth) so it'll be caught. + addr := dern.DestroyAddr() + key := addr.ContainingResource().String() + + // Add this to the list of nodes that we need to fix up + // the edges for (step 2 above in the docs). + destroyMap[key] = append(destroyMap[key], v) + } + + // If we have no CBD nodes, then our work here is done + if len(destroyMap) == 0 { + return nil + } + + // We have CBD nodes. We now have to move on to the much more difficult + // task of connecting dependencies of the creation side of the destroy + // to the destruction node. The easiest way to explain this is an example: + // + // Given a pre-destroy dependence of: A => B + // And A has CBD set. + // + // The resulting graph should be: A => B => A_d + // + // They key here is that B happens before A is destroyed. This is to + // facilitate the primary purpose for CBD: making sure that downstreams + // are properly updated to avoid downtime before the resource is destroyed. + // + // We can't trust that the resource being destroyed or anything that + // depends on it is actually in our current graph so we make a new + // graph in order to determine those dependencies and add them in. + log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...") + depMap, err := t.depMap(destroyMap) + if err != nil { + return err + } + + // We now have the mapping of resource addresses to the destroy + // nodes they need to depend on. We now go through our own vertices to + // find any matching these addresses and make the connection. + for _, v := range g.Vertices() { + // We're looking for creators + rn, ok := v.(GraphNodeCreator) + if !ok { + continue + } + + // Get the address + addr := rn.CreateAddr() + + // If the address has an index, we strip that. Our depMap creation + // graph doesn't expand counts so we don't currently get _exact_ + // dependencies. One day when we limit dependencies more exactly + // this will have to change. We have a test case covering this + // (depNonCBDCount) so it'll be caught. + key := addr.ContainingResource().String() + + // If there is nothing this resource should depend on, ignore it + dns, ok := depMap[key] + if !ok { + continue + } + + // We have nodes! Make the connection + for _, dn := range dns { + log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s", + dag.VertexName(dn), dag.VertexName(v)) + g.Connect(dag.BasicEdge(dn, v)) + } + } + + return nil +} + +func (t *CBDEdgeTransformer) depMap(destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { + // Build the graph of our config, this ensures that all resources + // are present in the graph. + g, diags := (&BasicGraphBuilder{ + Steps: []GraphTransformer{ + &FlatConfigTransformer{Config: t.Config}, + &AttachResourceConfigTransformer{Config: t.Config}, + &AttachStateTransformer{State: t.State}, + &AttachSchemaTransformer{Schemas: t.Schemas}, + &ReferenceTransformer{}, + }, + Name: "CBDEdgeTransformer", + }).Build(nil) + if diags.HasErrors() { + return nil, diags.Err() + } + + // Using this graph, build the list of destroy nodes that each resource + // address should depend on. For example, when we find B, we map the + // address of B to A_d in the "depMap" variable below. + depMap := make(map[string][]dag.Vertex) + for _, v := range g.Vertices() { + // We're looking for resources. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + // Get the address + addr := rn.ResourceAddr() + key := addr.String() + + // Get the destroy nodes that are destroying this resource. + // If there aren't any, then we don't need to worry about + // any connections. + dns, ok := destroyMap[key] + if !ok { + continue + } + + // Get the nodes that depend on this on. In the example above: + // finding B in A => B. + for _, v := range g.UpEdges(v).List() { + // We're looking for resources. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + // Keep track of the destroy nodes that this address + // needs to depend on. + key := rn.ResourceAddr().String() + depMap[key] = append(depMap[key], dns...) + } + } + + return depMap, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go new file mode 100644 index 00000000000..ab4ba156dfc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go @@ -0,0 +1,281 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeDestroyer must be implemented by nodes that destroy resources. +type GraphNodeDestroyer interface { + dag.Vertex + + // DestroyAddr is the address of the resource that is being + // destroyed by this node. If this returns nil, then this node + // is not destroying anything. + DestroyAddr() *addrs.AbsResourceInstance +} + +// GraphNodeCreator must be implemented by nodes that create OR update resources. +type GraphNodeCreator interface { + // CreateAddr is the address of the resource being created or updated + CreateAddr() *addrs.AbsResourceInstance +} + +// DestroyEdgeTransformer is a GraphTransformer that creates the proper +// references for destroy resources. Destroy resources are more complex +// in that they must be depend on the destruction of resources that +// in turn depend on the CREATION of the node being destroy. +// +// That is complicated. Visually: +// +// B_d -> A_d -> A -> B +// +// Notice that A destroy depends on B destroy, while B create depends on +// A create. They're inverted. This must be done for example because often +// dependent resources will block parent resources from deleting. Concrete +// example: VPC with subnets, the VPC can't be deleted while there are +// still subnets. +type DestroyEdgeTransformer struct { + // These are needed to properly build the graph of dependencies + // to determine what a destroy node depends on. Any of these can be nil. + Config *configs.Config + State *states.State + + // If configuration is present then Schemas is required in order to + // obtain schema information from providers and provisioners in order + // to properly resolve implicit dependencies. + Schemas *Schemas +} + +func (t *DestroyEdgeTransformer) Transform(g *Graph) error { + // Build a map of what is being destroyed (by address string) to + // the list of destroyers. Usually there will be at most one destroyer + // per node, but we allow multiple if present for completeness. + destroyers := make(map[string][]GraphNodeDestroyer) + destroyerAddrs := make(map[string]addrs.AbsResourceInstance) + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyer) + if !ok { + continue + } + + addrP := dn.DestroyAddr() + if addrP == nil { + continue + } + addr := *addrP + + key := addr.String() + log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(dn), v, key) + destroyers[key] = append(destroyers[key], dn) + destroyerAddrs[key] = addr + } + + // If we aren't destroying anything, there will be no edges to make + // so just exit early and avoid future work. + if len(destroyers) == 0 { + return nil + } + + // Go through and connect creators to destroyers. Going along with + // our example, this makes: A_d => A + for _, v := range g.Vertices() { + cn, ok := v.(GraphNodeCreator) + if !ok { + continue + } + + addr := cn.CreateAddr() + if addr == nil { + continue + } + + key := addr.String() + ds := destroyers[key] + if len(ds) == 0 { + continue + } + + for _, d := range ds { + // For illustrating our example + a_d := d.(dag.Vertex) + a := v + + log.Printf( + "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", + dag.VertexName(a), dag.VertexName(a_d)) + + g.Connect(&DestroyEdge{S: a, T: a_d}) + + // Attach the destroy node to the creator + // There really shouldn't be more than one destroyer, but even if + // there are, any of them will represent the correct + // CreateBeforeDestroy status. + if n, ok := cn.(GraphNodeAttachDestroyer); ok { + if d, ok := d.(GraphNodeDestroyerCBD); ok { + n.AttachDestroyNode(d) + } + } + } + } + + // This is strange but is the easiest way to get the dependencies + // of a node that is being destroyed. We use another graph to make sure + // the resource is in the graph and ask for references. We have to do this + // because the node that is being destroyed may NOT be in the graph. + // + // Example: resource A is force new, then destroy A AND create A are + // in the graph. BUT if resource A is just pure destroy, then only + // destroy A is in the graph, and create A is not. + providerFn := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{NodeAbstractProvider: a} + } + steps := []GraphTransformer{ + // Add the local values + &LocalTransformer{Config: t.Config}, + + // Add outputs and metadata + &OutputTransformer{Config: t.Config}, + &AttachResourceConfigTransformer{Config: t.Config}, + &AttachStateTransformer{State: t.State}, + + // Add all the variables. We can depend on resources through + // variables due to module parameters, and we need to properly + // determine that. + &RootVariableTransformer{Config: t.Config}, + &ModuleVariableTransformer{Config: t.Config}, + + TransformProviders(nil, providerFn, t.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: t.Schemas}, + + &ReferenceTransformer{}, + } + + // Go through all the nodes being destroyed and create a graph. + // The resulting graph is only of things being CREATED. For example, + // following our example, the resulting graph would be: + // + // A, B (with no edges) + // + var tempG Graph + var tempDestroyed []dag.Vertex + for d := range destroyers { + // d is the string key for the resource being destroyed. We actually + // want the address value, which we stashed earlier. + addr := destroyerAddrs[d] + + // This part is a little bit weird but is the best way to + // find the dependencies we need to: build a graph and use the + // attach config and state transformers then ask for references. + abstract := NewNodeAbstractResourceInstance(addr) + tempG.Add(abstract) + tempDestroyed = append(tempDestroyed, abstract) + + // We also add the destroy version here since the destroy can + // depend on things that the creation doesn't (destroy provisioners). + destroy := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: abstract} + tempG.Add(destroy) + tempDestroyed = append(tempDestroyed, destroy) + } + + // Run the graph transforms so we have the information we need to + // build references. + log.Printf("[TRACE] DestroyEdgeTransformer: constructing temporary graph for analysis of references, starting from:\n%s", tempG.StringWithNodeTypes()) + for _, s := range steps { + log.Printf("[TRACE] DestroyEdgeTransformer: running %T on temporary graph", s) + if err := s.Transform(&tempG); err != nil { + log.Printf("[TRACE] DestroyEdgeTransformer: %T failed: %s", s, err) + return err + } + } + log.Printf("[TRACE] DestroyEdgeTransformer: temporary reference graph:\n%s", tempG.String()) + + // Go through all the nodes in the graph and determine what they + // depend on. + for _, v := range tempDestroyed { + // Find all ancestors of this to determine the edges we'll depend on + vs, err := tempG.Ancestors(v) + if err != nil { + return err + } + + refs := make([]dag.Vertex, 0, vs.Len()) + for _, raw := range vs.List() { + refs = append(refs, raw.(dag.Vertex)) + } + + refNames := make([]string, len(refs)) + for i, ref := range refs { + refNames[i] = dag.VertexName(ref) + } + log.Printf( + "[TRACE] DestroyEdgeTransformer: creation node %q references %s", + dag.VertexName(v), refNames) + + // If we have no references, then we won't need to do anything + if len(refs) == 0 { + continue + } + + // Get the destroy node for this. In the example of our struct, + // we are currently at B and we're looking for B_d. + rn, ok := v.(GraphNodeResourceInstance) + if !ok { + log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s, since it's not a resource", dag.VertexName(v)) + continue + } + + addr := rn.ResourceInstanceAddr() + dns := destroyers[addr.String()] + + // We have dependencies, check if any are being destroyed + // to build the list of things that we must depend on! + // + // In the example of the struct, if we have: + // + // B_d => A_d => A => B + // + // Then at this point in the algorithm we started with B_d, + // we built B (to get dependencies), and we found A. We're now looking + // to see if A_d exists. + var depDestroyers []dag.Vertex + for _, v := range refs { + rn, ok := v.(GraphNodeResourceInstance) + if !ok { + continue + } + + addr := rn.ResourceInstanceAddr() + key := addr.String() + if ds, ok := destroyers[key]; ok { + for _, d := range ds { + depDestroyers = append(depDestroyers, d.(dag.Vertex)) + log.Printf( + "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s", + key, dag.VertexName(d)) + } + } + } + + // Go through and make the connections. Use the variable + // names "a_d" and "b_d" to reference our example. + for _, a_d := range dns { + for _, b_d := range depDestroyers { + if b_d != a_d { + log.Printf("[TRACE] DestroyEdgeTransformer: %q depends on %q", dag.VertexName(b_d), dag.VertexName(a_d)) + g.Connect(dag.BasicEdge(b_d, a_d)) + } + } + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go new file mode 100644 index 00000000000..773aad7c404 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go @@ -0,0 +1,192 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// DiffTransformer is a GraphTransformer that adds graph nodes representing +// each of the resource changes described in the given Changes object. +type DiffTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + State *states.State + Changes *plans.Changes +} + +func (t *DiffTransformer) Transform(g *Graph) error { + if t.Changes == nil || len(t.Changes.Resources) == 0 { + // Nothing to do! + return nil + } + + // Go through all the modules in the diff. + log.Printf("[TRACE] DiffTransformer starting") + + var diags tfdiags.Diagnostics + state := t.State + changes := t.Changes + + // DiffTransformer creates resource _instance_ nodes. If there are any + // whole-resource nodes already in the graph, we must ensure that they + // get evaluated before any of the corresponding instances by creating + // dependency edges, so we'll do some prep work here to ensure we'll only + // create connections to nodes that existed before we started here. + resourceNodes := map[string][]GraphNodeResource{} + for _, node := range g.Vertices() { + rn, ok := node.(GraphNodeResource) + if !ok { + continue + } + // We ignore any instances that _also_ implement + // GraphNodeResourceInstance, since in the unlikely event that they + // do exist we'd probably end up creating cycles by connecting them. + if _, ok := node.(GraphNodeResourceInstance); ok { + continue + } + + addr := rn.ResourceAddr().String() + resourceNodes[addr] = append(resourceNodes[addr], rn) + } + + for _, rc := range changes.Resources { + addr := rc.Addr + dk := rc.DeposedKey + + log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) + + // Depending on the action we'll need some different combinations of + // nodes, because destroying uses a special node type separate from + // other actions. + var update, delete, createBeforeDestroy bool + switch rc.Action { + case plans.NoOp: + continue + case plans.Delete: + delete = true + case plans.DeleteThenCreate, plans.CreateThenDelete: + update = true + delete = true + createBeforeDestroy = (rc.Action == plans.CreateThenDelete) + default: + update = true + } + + if dk != states.NotDeposed && update { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change for deposed object", + fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk), + )) + continue + } + + // If we're going to do a create_before_destroy Replace operation then + // we need to allocate a DeposedKey to use to retain the + // not-yet-destroyed prior object, so that the delete node can destroy + // _that_ rather than the newly-created node, which will be current + // by the time the delete node is visited. + if update && delete && createBeforeDestroy { + // In this case, variable dk will be the _pre-assigned_ DeposedKey + // that must be used if the update graph node deposes the current + // instance, which will then align with the same key we pass + // into the destroy node to ensure we destroy exactly the deposed + // object we expect. + if state != nil { + ris := state.ResourceInstance(addr) + if ris == nil { + // Should never happen, since we don't plan to replace an + // instance that doesn't exist yet. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change", + fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr), + )) + continue + } + + // Allocating a deposed key separately from using it can be racy + // in general, but we assume here that nothing except the apply + // node we instantiate below will actually make new deposed objects + // in practice, and so the set of already-used keys will not change + // between now and then. + dk = ris.FindUnusedDeposedKey() + } else { + // If we have no state at all yet then we can use _any_ + // DeposedKey. + dk = states.NewDeposedKey() + } + } + + if update { + // All actions except destroying the node type chosen by t.Concrete + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + if createBeforeDestroy { + // We'll attach our pre-allocated DeposedKey to the node if + // it supports that. NodeApplyableResourceInstance is the + // specific concrete node type we are looking for here really, + // since that's the only node type that might depose objects. + if dn, ok := node.(GraphNodeDeposer); ok { + dn.SetPreallocatedDeposedKey(dk) + } + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) + } else { + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) + } + + g.Add(node) + rsrcAddr := addr.ContainingResource().String() + for _, rsrcNode := range resourceNodes[rsrcAddr] { + g.Connect(dag.BasicEdge(node, rsrcNode)) + } + } + + if delete { + // Destroying always uses a destroy-specific node type, though + // which one depends on whether we're destroying a current object + // or a deposed object. + var node GraphNodeResourceInstance + abstract := NewNodeAbstractResourceInstance(addr) + if dk == states.NotDeposed { + node = &NodeDestroyResourceInstance{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + node.(*NodeDestroyResourceInstance).ModifyCreateBeforeDestroy(createBeforeDestroy) + } else { + node = &NodeDestroyDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + } + if dk == states.NotDeposed { + log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) + } else { + log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) + } + g.Add(node) + rsrcAddr := addr.ContainingResource().String() + for _, rsrcNode := range resourceNodes[rsrcAddr] { + // We connect this edge "forwards" (even though destroy dependencies + // are often inverted) because evaluating the resource node + // after the destroy node could cause an unnecessary husk of + // a resource state to be re-added. + g.Connect(dag.BasicEdge(node, rsrcNode)) + } + } + + } + + log.Printf("[TRACE] DiffTransformer complete") + + return diags.Err() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go new file mode 100644 index 00000000000..03eac685eb4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeExapndable is an interface that nodes can implement to +// signal that they can be expanded. Expanded nodes turn into +// GraphNodeSubgraph nodes within the graph. +type GraphNodeExpandable interface { + Expand(GraphBuilder) (GraphNodeSubgraph, error) +} + +// GraphNodeDynamicExpandable is an interface that nodes can implement +// to signal that they can be expanded at eval-time (hence dynamic). +// These nodes are given the eval context and are expected to return +// a new subgraph. +type GraphNodeDynamicExpandable interface { + DynamicExpand(EvalContext) (*Graph, error) +} + +// GraphNodeSubgraph is an interface a node can implement if it has +// a larger subgraph that should be walked. +type GraphNodeSubgraph interface { + Subgraph() dag.Grapher +} + +// ExpandTransform is a transformer that does a subgraph expansion +// at graph transform time (vs. at eval time). The benefit of earlier +// subgraph expansion is that errors with the graph build can be detected +// at an earlier stage. +type ExpandTransform struct { + Builder GraphBuilder +} + +func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) { + ev, ok := v.(GraphNodeExpandable) + if !ok { + // This isn't an expandable vertex, so just ignore it. + return v, nil + } + + // Expand the subgraph! + log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev)) + return ev.Expand(t.Builder) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go new file mode 100644 index 00000000000..aa00e020d85 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ImportProviderValidateTransformer is a GraphTransformer that goes through +// the providers in the graph and validates that they only depend on variables. +type ImportProviderValidateTransformer struct{} + +func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { + var diags tfdiags.Diagnostics + + for _, v := range g.Vertices() { + // We only care about providers + pv, ok := v.(GraphNodeProvider) + if !ok { + continue + } + + // We only care about providers that reference things + rn, ok := pv.(GraphNodeReferencer) + if !ok { + continue + } + + for _, ref := range rn.References() { + if _, ok := ref.Subject.(addrs.InputVariable); !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider dependency for import", + Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } + } + + return diags.Err() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go new file mode 100644 index 00000000000..7dd2c4876d3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go @@ -0,0 +1,239 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ImportStateTransformer is a GraphTransformer that adds nodes to the +// graph to represent the imports we want to do for resources. +type ImportStateTransformer struct { + Targets []*ImportTarget +} + +func (t *ImportStateTransformer) Transform(g *Graph) error { + for _, target := range t.Targets { + // The ProviderAddr may not be supplied for non-aliased providers. + // This will be populated if the targets come from the cli, but tests + // may not specify implied provider addresses. + providerAddr := target.ProviderAddr + if providerAddr.ProviderConfig.Type == "" { + providerAddr = target.Addr.Resource.Resource.DefaultProviderConfig().Absolute(target.Addr.Module) + } + + node := &graphNodeImportState{ + Addr: target.Addr, + ID: target.ID, + ProviderAddr: providerAddr, + } + g.Add(node) + } + return nil +} + +type graphNodeImportState struct { + Addr addrs.AbsResourceInstance // Addr is the resource address to import into + ID string // ID is the ID to import as + ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type + ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution + + states []providers.ImportedResource +} + +var ( + _ GraphNodeSubPath = (*graphNodeImportState)(nil) + _ GraphNodeEvalable = (*graphNodeImportState)(nil) + _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) + _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) +) + +func (n *graphNodeImportState) Name() string { + return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) +} + +// GraphNodeProviderConsumer +func (n *graphNodeImportState) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // We assume that n.ProviderAddr has been properly populated here. + // It's the responsibility of the code creating a graphNodeImportState + // to populate this, possibly by calling DefaultProviderConfig() on the + // resource address to infer an implied provider from the resource type + // name. + return n.ProviderAddr, false +} + +// GraphNodeProviderConsumer +func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { + n.ResolvedProvider = addr +} + +// GraphNodeSubPath +func (n *graphNodeImportState) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeEvalable impl. +func (n *graphNodeImportState) EvalTree() EvalNode { + var provider providers.Interface + + // Reset our states + n.states = nil + + // Return our sequence + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + }, + &EvalImportState{ + Addr: n.Addr.Resource, + Provider: &provider, + ID: n.ID, + Output: &n.states, + }, + }, + } +} + +// GraphNodeDynamicExpandable impl. +// +// We use DynamicExpand as a way to generate the subgraph of refreshes +// and state inserts we need to do for our import state. Since they're new +// resources they don't depend on anything else and refreshes are isolated +// so this is nearly a perfect use case for dynamic expand. +func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + g := &Graph{Path: ctx.Path()} + + // nameCounter is used to de-dup names in the state. + nameCounter := make(map[string]int) + + // Compile the list of addresses that we'll be inserting into the state. + // We do this ahead of time so we can verify that we aren't importing + // something that already exists. + addrs := make([]addrs.AbsResourceInstance, len(n.states)) + for i, state := range n.states { + addr := n.Addr + if t := state.TypeName; t != "" { + addr.Resource.Resource.Type = t + } + + // Determine if we need to suffix the name to de-dup + key := addr.String() + count, ok := nameCounter[key] + if ok { + count++ + addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) + } + nameCounter[key] = count + + // Add it to our list + addrs[i] = addr + } + + // Verify that all the addresses are clear + state := ctx.State() + for _, addr := range addrs { + existing := state.ResourceInstance(addr) + if existing != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource already managed by Terraform", + fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), + )) + continue + } + } + if diags.HasErrors() { + // Bail out early, then. + return nil, diags.Err() + } + + // For each of the states, we add a node to handle the refresh/add to state. + // "n.states" is populated by our own EvalTree with the result of + // ImportState. Since DynamicExpand is always called after EvalTree, this + // is safe. + for i, state := range n.states { + g.Add(&graphNodeImportStateSub{ + TargetAddr: addrs[i], + State: state, + ResolvedProvider: n.ResolvedProvider, + }) + } + + // Root transform for a single root + t := &RootTransformer{} + if err := t.Transform(g); err != nil { + return nil, err + } + + // Done! + return g, diags.Err() +} + +// graphNodeImportStateSub is the sub-node of graphNodeImportState +// and is part of the subgraph. This node is responsible for refreshing +// and adding a resource to the state once it is imported. +type graphNodeImportStateSub struct { + TargetAddr addrs.AbsResourceInstance + State providers.ImportedResource + ResolvedProvider addrs.AbsProviderConfig +} + +var ( + _ GraphNodeSubPath = (*graphNodeImportStateSub)(nil) + _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil) +) + +func (n *graphNodeImportStateSub) Name() string { + return fmt.Sprintf("import %s result", n.TargetAddr) +} + +func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { + return n.TargetAddr.Module +} + +// GraphNodeEvalable impl. +func (n *graphNodeImportStateSub) EvalTree() EvalNode { + // If the Ephemeral type isn't set, then it is an error + if n.State.TypeName == "" { + err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()) + return &EvalReturnError{Error: &err} + } + + state := n.State.AsInstanceObject() + + var provider providers.Interface + var providerSchema *ProviderSchema + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalRefresh{ + Addr: n.TargetAddr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, + }, + &EvalImportStateVerify{ + Addr: n.TargetAddr.Resource, + State: &state, + }, + &EvalWriteState{ + Addr: n.TargetAddr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go new file mode 100644 index 00000000000..b97dea2abd4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// LocalTransformer is a GraphTransformer that adds all the local values +// from the configuration to the graph. +type LocalTransformer struct { + Config *configs.Config +} + +func (t *LocalTransformer) Transform(g *Graph) error { + return t.transformModule(g, t.Config) +} + +func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { + if c == nil { + // Can't have any locals if there's no config + return nil + } + + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + + for _, local := range c.Module.Locals { + addr := path.LocalValue(local.Name) + node := &NodeLocal{ + Addr: addr, + Config: local, + } + g.Add(node) + } + + // Also populate locals for child modules + for _, cc := range c.Children { + if err := t.transformModule(g, cc); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go new file mode 100644 index 00000000000..1b6531fe1c0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go @@ -0,0 +1,126 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// ModuleVariableTransformer is a GraphTransformer that adds all the variables +// in the configuration to the graph. +// +// Any "variable" block present in any non-root module is included here, even +// if a particular variable is not referenced from anywhere. +// +// The transform will produce errors if a call to a module does not conform +// to the expected set of arguments, but this transformer is not in a good +// position to return errors and so the validate walk should include specific +// steps for validating module blocks, separate from this transform. +type ModuleVariableTransformer struct { + Config *configs.Config +} + +func (t *ModuleVariableTransformer) Transform(g *Graph) error { + return t.transform(g, nil, t.Config) +} + +func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { + // We can have no variables if we have no configuration. + if c == nil { + return nil + } + + // Transform all the children first. + for _, cc := range c.Children { + if err := t.transform(g, c, cc); err != nil { + return err + } + } + + // If we're processing anything other than the root module then we'll + // add graph nodes for variables defined inside. (Variables for the root + // module are dealt with in RootVariableTransformer). + // If we have a parent, we can determine if a module variable is being + // used, so we transform this. + if parent != nil { + if err := t.transformSingle(g, parent, c); err != nil { + return err + } + } + + return nil +} + +func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { + + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + _, call := path.Call() + + // Find the call in the parent module configuration, so we can get the + // expressions given for each input variable at the call site. + callConfig, exists := parent.Module.ModuleCalls[call.Name] + if !exists { + // This should never happen, since it indicates an improperly-constructed + // configuration tree. + panic(fmt.Errorf("no module call block found for %s", path)) + } + + // We need to construct a schema for the expected call arguments based on + // the configured variables in our config, which we can then use to + // decode the content of the call block. + schema := &hcl.BodySchema{} + for _, v := range c.Module.Variables { + schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ + Name: v.Name, + Required: v.Default == cty.NilVal, + }) + } + + content, contentDiags := callConfig.Config.Content(schema) + if contentDiags.HasErrors() { + // Validation code elsewhere should deal with any errors before we + // get in here, but we'll report them out here just in case, to + // avoid crashes. + var diags tfdiags.Diagnostics + diags = diags.Append(contentDiags) + return diags.Err() + } + + for _, v := range c.Module.Variables { + var expr hcl.Expression + if attr := content.Attributes[v.Name]; attr != nil { + expr = attr.Expr + } else { + // No expression provided for this variable, so we'll make a + // synthetic one using the variable's default value. + expr = &hclsyntax.LiteralValueExpr{ + Val: v.Default, + SrcRange: v.DeclRange, // This is not exact, but close enough + } + } + + // For now we treat all module variables as "applyable", even though + // such nodes are valid to use on other walks too. We may specialize + // this in future if we find reasons to employ different behaviors + // in different scenarios. + node := &NodeApplyableModuleVariable{ + Addr: path.InputVariable(v.Name), + Config: v, + Expr: expr, + } + g.Add(node) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go new file mode 100644 index 00000000000..4d1323fb09a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go @@ -0,0 +1,175 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/zclconf/go-cty/cty" +) + +// OrphanResourceCountTransformer is a GraphTransformer that adds orphans +// for an expanded count to the graph. The determination of this depends +// on the count argument given. +// +// Orphans are found by comparing the count to what is found in the state. +// This transform assumes that if an element in the state is within the count +// bounds given, that it is not an orphan. +type OrphanResourceCountTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + + Count int // Actual count of the resource, or -1 if count is not set at all + ForEach map[string]cty.Value // The ForEach map on the resource + Addr addrs.AbsResource // Addr of the resource to look for orphans + State *states.State // Full global state +} + +func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { + rs := t.State.Resource(t.Addr) + if rs == nil { + return nil // Resource doesn't exist in state, so nothing to do! + } + + haveKeys := make(map[addrs.InstanceKey]struct{}) + for key := range rs.Instances { + haveKeys[key] = struct{}{} + } + + // if for_each is set, use that transformer + if t.ForEach != nil { + return t.transformForEach(haveKeys, g) + } + if t.Count < 0 { + return t.transformNoCount(haveKeys, g) + } + if t.Count == 0 { + return t.transformZeroCount(haveKeys, g) + } + return t.transformCount(haveKeys, g) +} + +func (t *OrphanResourceCountTransformer) transformForEach(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // If there is a NoKey node, add this to the graph first, + // so that we can create edges to it in subsequent (StringKey) nodes. + // This is because the last item determines the resource mode for the whole resource, + // (see SetResourceInstanceCurrent for more information) and we need to evaluate + // an orphaned (NoKey) resource before the in-memory state is updated + // to deal with a new for_each resource + _, hasNoKeyNode := haveKeys[addrs.NoKey] + var noKeyNode dag.Vertex + if hasNoKeyNode { + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(addrs.NoKey)) + noKeyNode = abstract + if f := t.Concrete; f != nil { + noKeyNode = f(abstract) + } + g.Add(noKeyNode) + } + + for key := range haveKeys { + // If the key is no-key, we have already added it, so skip + if key == addrs.NoKey { + continue + } + + s, _ := key.(addrs.StringKey) + // If the key is present in our current for_each, carry on + if _, ok := t.ForEach[string(s)]; ok { + continue + } + + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) + g.Add(node) + + // Add edge to noKeyNode if it exists + if hasNoKeyNode { + g.Connect(dag.BasicEdge(node, noKeyNode)) + } + } + return nil +} + +func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // Due to the logic in Transform, we only get in here if our count is + // at least one. + + _, have0Key := haveKeys[addrs.IntKey(0)] + + for key := range haveKeys { + if key == addrs.NoKey && !have0Key { + // If we have no 0-key then we will accept a no-key instance + // as an alias for it. + continue + } + + i, isInt := key.(addrs.IntKey) + if isInt && int(i) < t.Count { + continue + } + + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) + g.Add(node) + } + + return nil +} + +func (t *OrphanResourceCountTransformer) transformZeroCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // This case is easy: we need to orphan any keys we have at all. + + for key := range haveKeys { + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(zero): adding %s as %T", t.Addr, node) + g.Add(node) + } + + return nil +} + +func (t *OrphanResourceCountTransformer) transformNoCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // Negative count indicates that count is not set at all, in which + // case we expect to have a single instance with no key set at all. + // However, we'll also accept an instance with key 0 set as an alias + // for it, in case the user has just deleted the "count" argument and + // so wants to keep the first instance in the set. + + _, haveNoKey := haveKeys[addrs.NoKey] + _, have0Key := haveKeys[addrs.IntKey(0)] + keepKey := addrs.NoKey + if have0Key && !haveNoKey { + // If we don't have a no-key instance then we can use the 0-key instance + // instead. + keepKey = addrs.IntKey(0) + } + + for key := range haveKeys { + if key == keepKey { + continue + } + + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(no-count): adding %s as %T", t.Addr, node) + g.Add(node) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go new file mode 100644 index 00000000000..cab10da124b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go @@ -0,0 +1,60 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// OrphanOutputTransformer finds the outputs that aren't present +// in the given config that are in the state and adds them to the graph +// for deletion. +type OrphanOutputTransformer struct { + Config *configs.Config // Root of config tree + State *states.State // State is the root state +} + +func (t *OrphanOutputTransformer) Transform(g *Graph) error { + if t.State == nil { + log.Printf("[DEBUG] No state, no orphan outputs") + return nil + } + + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err + } + } + return nil +} + +func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { + if ms == nil { + return nil + } + + moduleAddr := ms.Addr + + // Get the config for this path, which is nil if the entire module has been + // removed. + var outputs map[string]*configs.Output + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + outputs = c.Module.Outputs + } + + // An output is "orphaned" if it's present in the state but not declared + // in the configuration. + for name := range ms.OutputValues { + if _, exists := outputs[name]; exists { + continue + } + + g.Add(&NodeOutputOrphan{ + Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), + }) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go new file mode 100644 index 00000000000..f927b10864c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go @@ -0,0 +1,179 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned +// resource instances to the graph. An "orphan" is an instance that is present +// in the state but belongs to a resource that is no longer present in the +// configuration. +// +// This is not the transformer that deals with "count orphans" (instances that +// are no longer covered by a resource's "count" or "for_each" setting); that's +// handled instead by OrphanResourceCountTransformer. +type OrphanResourceInstanceTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + + // State is the global state. We require the global state to + // properly find module orphans at our path. + State *states.State + + // Config is the root node in the configuration tree. We'll look up + // the appropriate note in this tree using the path in each node. + Config *configs.Config +} + +func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { + if t.State == nil { + // If the entire state is nil, there can't be any orphans + return nil + } + if t.Config == nil { + // Should never happen: we can't be doing any Terraform operations + // without at least an empty configuration. + panic("OrphanResourceInstanceTransformer used without setting Config") + } + + // Go through the modules and for each module transform in order + // to add the orphan. + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err + } + } + + return nil +} + +func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { + if ms == nil { + return nil + } + + moduleAddr := ms.Addr + + // Get the configuration for this module. The configuration might be + // nil if the module was removed from the configuration. This is okay, + // this just means that every resource is an orphan. + var m *configs.Module + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + m = c.Module + } + + // An "orphan" is a resource that is in the state but not the configuration, + // so we'll walk the state resources and try to correlate each of them + // with a configuration block. Each orphan gets a node in the graph whose + // type is decided by t.Concrete. + // + // We don't handle orphans related to changes in the "count" and "for_each" + // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. + for _, rs := range ms.Resources { + if m != nil { + if r := m.ResourceByAddr(rs.Addr); r != nil { + continue + } + } + + for key := range rs.Instances { + addr := rs.Addr.Instance(key).Absolute(moduleAddr) + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) + g.Add(node) + } + } + + return nil +} + +// OrphanResourceTransformer is a GraphTransformer that adds orphaned +// resources to the graph. An "orphan" is a resource that is present in +// the state but no longer present in the config. +// +// This is separate to OrphanResourceInstanceTransformer in that it deals with +// whole resources, rather than individual instances of resources. Orphan +// resource nodes are only used during apply to clean up leftover empty +// resource state skeletons, after all of the instances inside have been +// removed. +// +// This transformer will also create edges in the graph to any pre-existing +// node that creates or destroys the entire orphaned resource or any of its +// instances, to ensure that the "orphan-ness" of a resource is always dealt +// with after all other aspects of it. +type OrphanResourceTransformer struct { + Concrete ConcreteResourceNodeFunc + + // State is the global state. + State *states.State + + // Config is the root node in the configuration tree. + Config *configs.Config +} + +func (t *OrphanResourceTransformer) Transform(g *Graph) error { + if t.State == nil { + // If the entire state is nil, there can't be any orphans + return nil + } + if t.Config == nil { + // Should never happen: we can't be doing any Terraform operations + // without at least an empty configuration. + panic("OrphanResourceTransformer used without setting Config") + } + + // We'll first collect up the existing nodes for each resource so we can + // create dependency edges for any new nodes we create. + deps := map[string][]dag.Vertex{} + for _, v := range g.Vertices() { + switch tv := v.(type) { + case GraphNodeResourceInstance: + k := tv.ResourceInstanceAddr().ContainingResource().String() + deps[k] = append(deps[k], v) + case GraphNodeResource: + k := tv.ResourceAddr().String() + deps[k] = append(deps[k], v) + case GraphNodeDestroyer: + k := tv.DestroyAddr().ContainingResource().String() + deps[k] = append(deps[k], v) + } + } + + for _, ms := range t.State.Modules { + moduleAddr := ms.Addr + + mc := t.Config.DescendentForInstance(moduleAddr) // might be nil if whole module has been removed + + for _, rs := range ms.Resources { + if mc != nil { + if r := mc.Module.ResourceByAddr(rs.Addr); r != nil { + // It's in the config, so nothing to do for this one. + continue + } + } + + addr := rs.Addr.Absolute(moduleAddr) + abstract := NewNodeAbstractResource(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceTransformer: adding whole-resource orphan node for %s", addr) + g.Add(node) + for _, dn := range deps[addr.String()] { + log.Printf("[TRACE] OrphanResourceTransformer: node %q depends on %q", dag.VertexName(node), dag.VertexName(dn)) + g.Connect(dag.BasicEdge(node, dn)) + } + } + } + + return nil + +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go new file mode 100644 index 00000000000..e2979ac5c47 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go @@ -0,0 +1,95 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// OutputTransformer is a GraphTransformer that adds all the outputs +// in the configuration to the graph. +// +// This is done for the apply graph builder even if dependent nodes +// aren't changing since there is no downside: the state will be available +// even if the dependent items aren't changing. +type OutputTransformer struct { + Config *configs.Config +} + +func (t *OutputTransformer) Transform(g *Graph) error { + return t.transform(g, t.Config) +} + +func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { + // If we have no config then there can be no outputs. + if c == nil { + return nil + } + + // Transform all the children. We must do this first because + // we can reference module outputs and they must show up in the + // reference map. + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { + return err + } + } + + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + + for _, o := range c.Module.Outputs { + addr := path.OutputValue(o.Name) + node := &NodeApplyableOutput{ + Addr: addr, + Config: o, + } + g.Add(node) + } + + return nil +} + +// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete +// outputs during destroy. We need to do this to ensure that no stale outputs +// are ever left in the state. +type DestroyOutputTransformer struct { +} + +func (t *DestroyOutputTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + output, ok := v.(*NodeApplyableOutput) + if !ok { + continue + } + + // create the destroy node for this output + node := &NodeDestroyableOutput{ + Addr: output.Addr, + Config: output.Config, + } + + log.Printf("[TRACE] creating %s", node.Name()) + g.Add(node) + + deps, err := g.Descendents(v) + if err != nil { + return err + } + + // the destroy node must depend on the eval node + deps.Add(v) + + for _, d := range deps.List() { + log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d)) + g.Connect(dag.BasicEdge(node, d)) + } + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go new file mode 100644 index 00000000000..77af1fbb235 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go @@ -0,0 +1,705 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { + return GraphTransformMulti( + // Add providers from the config + &ProviderConfigTransformer{ + Config: config, + Providers: providers, + Concrete: concrete, + }, + // Add any remaining missing providers + &MissingProviderTransformer{ + Providers: providers, + Concrete: concrete, + }, + // Connect the providers + &ProviderTransformer{ + Config: config, + }, + // Remove unused providers and proxies + &PruneProviderTransformer{}, + // Connect provider to their parent provider nodes + &ParentProviderTransformer{}, + ) +} + +// GraphNodeProvider is an interface that nodes that can be a provider +// must implement. +// +// ProviderAddr returns the address of the provider configuration this +// satisfies, which is relative to the path returned by method Path(). +// +// Name returns the full name of the provider in the config. +type GraphNodeProvider interface { + GraphNodeSubPath + ProviderAddr() addrs.AbsProviderConfig + Name() string +} + +// GraphNodeCloseProvider is an interface that nodes that can be a close +// provider must implement. The CloseProviderName returned is the name of +// the provider they satisfy. +type GraphNodeCloseProvider interface { + GraphNodeSubPath + CloseProviderAddr() addrs.AbsProviderConfig +} + +// GraphNodeProviderConsumer is an interface that nodes that require +// a provider must implement. ProvidedBy must return the address of the provider +// to use, which will be resolved to a configuration either in the same module +// or in an ancestor module, with the resulting absolute address passed to +// SetProvider. +type GraphNodeProviderConsumer interface { + // ProvidedBy returns the address of the provider configuration the node + // refers to. If the returned "exact" value is true, this address will + // be taken exactly. If "exact" is false, a provider configuration from + // an ancestor module may be selected instead. + ProvidedBy() (addr addrs.AbsProviderConfig, exact bool) + // Set the resolved provider address for this resource. + SetProvider(addrs.AbsProviderConfig) +} + +// ProviderTransformer is a GraphTransformer that maps resources to +// providers within the graph. This will error if there are any resources +// that don't map to proper resources. +type ProviderTransformer struct { + Config *configs.Config +} + +func (t *ProviderTransformer) Transform(g *Graph) error { + // We need to find a provider configuration address for each resource + // either directly represented by a node or referenced by a node in + // the graph, and then create graph edges from provider to provider user + // so that the providers will get initialized first. + + var diags tfdiags.Diagnostics + + // To start, we'll collect the _requested_ provider addresses for each + // node, which we'll then resolve (handling provider inheritence, etc) in + // the next step. + // Our "requested" map is from graph vertices to string representations of + // provider config addresses (for deduping) to requests. + type ProviderRequest struct { + Addr addrs.AbsProviderConfig + Exact bool // If true, inheritence from parent modules is not attempted + } + requested := map[dag.Vertex]map[string]ProviderRequest{} + needConfigured := map[string]addrs.AbsProviderConfig{} + for _, v := range g.Vertices() { + + // Does the vertex _directly_ use a provider? + if pv, ok := v.(GraphNodeProviderConsumer); ok { + requested[v] = make(map[string]ProviderRequest) + + p, exact := pv.ProvidedBy() + if exact { + log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), p) + } else { + log.Printf("[TRACE] ProviderTransformer: %s is provided by %s or inherited equivalent", dag.VertexName(v), p) + } + + requested[v][p.String()] = ProviderRequest{ + Addr: p, + Exact: exact, + } + + // Direct references need the provider configured as well as initialized + needConfigured[p.String()] = p + } + } + + // Now we'll go through all the requested addresses we just collected and + // figure out which _actual_ config address each belongs to, after resolving + // for provider inheritance and passing. + m := providerVertexMap(g) + for v, reqs := range requested { + for key, req := range reqs { + p := req.Addr + target := m[key] + + _, ok := v.(GraphNodeSubPath) + if !ok && target == nil { + // No target and no path to traverse up from + diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) + continue + } + + if target != nil { + log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) + } + + // if we don't have a provider at this level, walk up the path looking for one, + // unless we were told to be exact. + if target == nil && !req.Exact { + for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { + key := pp.String() + target = m[key] + if target != nil { + log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) + break + } + log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) + } + } + + // If this provider doesn't need to be configured then we can just + // stub it out with an init-only provider node, which will just + // start up the provider and fetch its schema. + if _, exists := needConfigured[key]; target == nil && !exists { + stubAddr := p.ProviderConfig.Absolute(addrs.RootModuleInstance) + stub := &NodeEvalableProvider{ + &NodeAbstractProvider{ + Addr: stubAddr, + }, + } + m[stubAddr.String()] = stub + log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) + target = stub + g.Add(target) + } + + if target == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider configuration not present", + fmt.Sprintf( + "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", + dag.VertexName(v), p, dag.VertexName(v), + ), + )) + break + } + + // see if this in an inherited provider + if p, ok := target.(*graphNodeProxyProvider); ok { + g.Remove(p) + target = p.Target() + key = target.(GraphNodeProvider).ProviderAddr().String() + } + + log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) + if pv, ok := v.(GraphNodeProviderConsumer); ok { + pv.SetProvider(target.ProviderAddr()) + } + g.Connect(dag.BasicEdge(v, target)) + } + } + + return diags.Err() +} + +// CloseProviderTransformer is a GraphTransformer that adds nodes to the +// graph that will close open provider connections that aren't needed anymore. +// A provider connection is not needed anymore once all depended resources +// in the graph are evaluated. +type CloseProviderTransformer struct{} + +func (t *CloseProviderTransformer) Transform(g *Graph) error { + pm := providerVertexMap(g) + cpm := make(map[string]*graphNodeCloseProvider) + var err error + + for _, v := range pm { + p := v.(GraphNodeProvider) + key := p.ProviderAddr().String() + + // get the close provider of this type if we alread created it + closer := cpm[key] + + if closer == nil { + // create a closer for this provider type + closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} + g.Add(closer) + cpm[key] = closer + } + + // Close node depends on the provider itself + // this is added unconditionally, so it will connect to all instances + // of the provider. Extra edges will be removed by transitive + // reduction. + g.Connect(dag.BasicEdge(closer, p)) + + // connect all the provider's resources to the close node + for _, s := range g.UpEdges(p).List() { + if _, ok := s.(GraphNodeProviderConsumer); ok { + g.Connect(dag.BasicEdge(closer, s)) + } + } + } + + return err +} + +// MissingProviderTransformer is a GraphTransformer that adds to the graph +// a node for each default provider configuration that is referenced by another +// node but not already present in the graph. +// +// These "default" nodes are always added to the root module, regardless of +// where they are requested. This is important because our inheritance +// resolution behavior in ProviderTransformer will then treat these as a +// last-ditch fallback after walking up the tree, rather than preferring them +// as it would if they were placed in the same module as the requester. +// +// This transformer may create extra nodes that are not needed in practice, +// due to overriding provider configurations in child modules. +// PruneProviderTransformer can then remove these once ProviderTransformer +// has resolved all of the inheritence, etc. +type MissingProviderTransformer struct { + // Providers is the list of providers we support. + Providers []string + + // Concrete, if set, overrides how the providers are made. + Concrete ConcreteProviderNodeFunc +} + +func (t *MissingProviderTransformer) Transform(g *Graph) error { + // Initialize factory + if t.Concrete == nil { + t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { + return a + } + } + + var err error + m := providerVertexMap(g) + for _, v := range g.Vertices() { + pv, ok := v.(GraphNodeProviderConsumer) + if !ok { + continue + } + + // For our work here we actually care only about the provider type and + // we plan to place all default providers in the root module, and so + // it's safe for us to rely on ProvidedBy here rather than waiting for + // the later proper resolution of provider inheritance done by + // ProviderTransformer. + p, _ := pv.ProvidedBy() + if p.ProviderConfig.Alias != "" { + // We do not create default aliased configurations. + log.Println("[TRACE] MissingProviderTransformer: skipping implication of aliased config", p) + continue + } + + // We're going to create an implicit _default_ configuration for the + // referenced provider type in the _root_ module, ignoring all other + // aspects of the resource's declared provider address. + defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(p.ProviderConfig.Type) + key := defaultAddr.String() + provider := m[key] + + if provider != nil { + // There's already an explicit default configuration for this + // provider type in the root module, so we have nothing to do. + continue + } + + log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) + + // create the missing top-level provider + provider = t.Concrete(&NodeAbstractProvider{ + Addr: defaultAddr, + }).(GraphNodeProvider) + + g.Add(provider) + m[key] = provider + } + + return err +} + +// ParentProviderTransformer connects provider nodes to their parents. +// +// This works by finding nodes that are both GraphNodeProviders and +// GraphNodeSubPath. It then connects the providers to their parent +// path. The parent provider is always at the root level. +type ParentProviderTransformer struct{} + +func (t *ParentProviderTransformer) Transform(g *Graph) error { + pm := providerVertexMap(g) + for _, v := range g.Vertices() { + // Only care about providers + pn, ok := v.(GraphNodeProvider) + if !ok { + continue + } + + // Also require non-empty path, since otherwise we're in the root + // module and so cannot have a parent. + if len(pn.Path()) <= 1 { + continue + } + + // this provider may be disabled, but we can only get it's name from + // the ProviderName string + addr := pn.ProviderAddr() + parentAddr, ok := addr.Inherited() + if ok { + parent := pm[parentAddr.String()] + if parent != nil { + g.Connect(dag.BasicEdge(v, parent)) + } + } + } + return nil +} + +// PruneProviderTransformer removes any providers that are not actually used by +// anything, and provider proxies. This avoids the provider being initialized +// and configured. This both saves resources but also avoids errors since +// configuration may imply initialization which may require auth. +type PruneProviderTransformer struct{} + +func (t *PruneProviderTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + // We only care about providers + _, ok := v.(GraphNodeProvider) + if !ok { + continue + } + + // ProxyProviders will have up edges, but we're now done with them in the graph + if _, ok := v.(*graphNodeProxyProvider); ok { + log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) + g.Remove(v) + } + + // Remove providers with no dependencies. + if g.UpEdges(v).Len() == 0 { + log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) + g.Remove(v) + } + } + + return nil +} + +func providerVertexMap(g *Graph) map[string]GraphNodeProvider { + m := make(map[string]GraphNodeProvider) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvider); ok { + addr := pv.ProviderAddr() + m[addr.String()] = pv + } + } + + return m +} + +type graphNodeCloseProvider struct { + Addr addrs.AbsProviderConfig +} + +var ( + _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) +) + +func (n *graphNodeCloseProvider) Name() string { + return n.Addr.String() + " (close)" +} + +// GraphNodeSubPath impl. +func (n *graphNodeCloseProvider) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeEvalable impl. +func (n *graphNodeCloseProvider) EvalTree() EvalNode { + return CloseProviderEvalTree(n.Addr) +} + +// GraphNodeDependable impl. +func (n *graphNodeCloseProvider) DependableName() []string { + return []string{n.Name()} +} + +func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { + return n.Addr +} + +// GraphNodeDotter impl. +func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + if !opts.Verbose { + return nil + } + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} + +// RemovableIfNotTargeted +func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to +// store the name and value of a provider node for inheritance between modules. +// These nodes are only used to store the data while loading the provider +// configurations, and are removed after all the resources have been connected +// to their providers. +type graphNodeProxyProvider struct { + addr addrs.AbsProviderConfig + target GraphNodeProvider +} + +var ( + _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) +) + +func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.addr +} + +func (n *graphNodeProxyProvider) Path() addrs.ModuleInstance { + return n.addr.Module +} + +func (n *graphNodeProxyProvider) Name() string { + return n.addr.String() + " (proxy)" +} + +// find the concrete provider instance +func (n *graphNodeProxyProvider) Target() GraphNodeProvider { + switch t := n.target.(type) { + case *graphNodeProxyProvider: + return t.Target() + default: + return n.target + } +} + +// ProviderConfigTransformer adds all provider nodes from the configuration and +// attaches the configs. +type ProviderConfigTransformer struct { + Providers []string + Concrete ConcreteProviderNodeFunc + + // each provider node is stored here so that the proxy nodes can look up + // their targets by name. + providers map[string]GraphNodeProvider + // record providers that can be overriden with a proxy + proxiable map[string]bool + + // Config is the root node of the configuration tree to add providers from. + Config *configs.Config +} + +func (t *ProviderConfigTransformer) Transform(g *Graph) error { + // If no configuration is given, we don't do anything + if t.Config == nil { + return nil + } + + t.providers = make(map[string]GraphNodeProvider) + t.proxiable = make(map[string]bool) + + // Start the transformation process + if err := t.transform(g, t.Config); err != nil { + return err + } + + // finally attach the configs to the new nodes + return t.attachProviderConfigs(g) +} + +func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { + // If no config, do nothing + if c == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, c); err != nil { + return err + } + + // Transform all the children. + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { + return err + } + } + return nil +} + +func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { + // Get the module associated with this configuration tree node + mod := c.Module + staticPath := c.Path + + // We actually need a dynamic module path here, but we've not yet updated + // our graph builders enough to support expansion of module calls with + // "count" and "for_each" set, so for now we'll shim this by converting to + // a dynamic path with no keys. At the time of writing this is the only + // possible kind of dynamic path anyway. + path := make(addrs.ModuleInstance, len(staticPath)) + for i, name := range staticPath { + path[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + + // add all providers from the configuration + for _, p := range mod.ProviderConfigs { + relAddr := p.Addr() + addr := relAddr.Absolute(path) + + abstract := &NodeAbstractProvider{ + Addr: addr, + } + var v dag.Vertex + if t.Concrete != nil { + v = t.Concrete(abstract) + } else { + v = abstract + } + + // Add it to the graph + g.Add(v) + key := addr.String() + t.providers[key] = v.(GraphNodeProvider) + + // A provider configuration is "proxyable" if its configuration is + // entirely empty. This means it's standing in for a provider + // configuration that must be passed in from the parent module. + // We decide this by evaluating the config with an empty schema; + // if this succeeds, then we know there's nothing in the body. + _, diags := p.Config.Content(&hcl.BodySchema{}) + t.proxiable[key] = !diags.HasErrors() + } + + // Now replace the provider nodes with proxy nodes if a provider was being + // passed in, and create implicit proxies if there was no config. Any extra + // proxies will be removed in the prune step. + return t.addProxyProviders(g, c) +} + +func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { + path := c.Path + + // can't add proxies at the root + if len(path) == 0 { + return nil + } + + parentPath, callAddr := path.Call() + parent := c.Parent + if parent == nil { + return nil + } + + callName := callAddr.Name + var parentCfg *configs.ModuleCall + for name, mod := range parent.Module.ModuleCalls { + if name == callName { + parentCfg = mod + break + } + } + + // We currently don't support count/for_each for modules and so we must + // shim our path and parentPath into module instances here so that the + // rest of Terraform can behave as if we do. This shimming should be + // removed later as part of implementing count/for_each for modules. + instPath := make(addrs.ModuleInstance, len(path)) + for i, name := range path { + instPath[i] = addrs.ModuleInstanceStep{Name: name} + } + parentInstPath := make(addrs.ModuleInstance, len(parentPath)) + for i, name := range parentPath { + parentInstPath[i] = addrs.ModuleInstanceStep{Name: name} + } + + if parentCfg == nil { + // this can't really happen during normal execution. + return fmt.Errorf("parent module config not found for %s", c.Path.String()) + } + + // Go through all the providers the parent is passing in, and add proxies to + // the parent provider nodes. + for _, pair := range parentCfg.Providers { + fullAddr := pair.InChild.Addr().Absolute(instPath) + fullParentAddr := pair.InParent.Addr().Absolute(parentInstPath) + fullName := fullAddr.String() + fullParentName := fullParentAddr.String() + + parentProvider := t.providers[fullParentName] + + if parentProvider == nil { + return fmt.Errorf("missing provider %s", fullParentName) + } + + proxy := &graphNodeProxyProvider{ + addr: fullAddr, + target: parentProvider, + } + + concreteProvider := t.providers[fullName] + + // replace the concrete node with the provider passed in + if concreteProvider != nil && t.proxiable[fullName] { + g.Replace(concreteProvider, proxy) + t.providers[fullName] = proxy + continue + } + + // aliased configurations can't be implicitly passed in + if fullAddr.ProviderConfig.Alias != "" { + continue + } + + // There was no concrete provider, so add this as an implicit provider. + // The extra proxy will be pruned later if it's unused. + g.Add(proxy) + t.providers[fullName] = proxy + } + return nil +} + +func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachProvider implementations + apn, ok := v.(GraphNodeAttachProvider) + if !ok { + continue + } + + // Determine what we're looking for + addr := apn.ProviderAddr() + + // Get the configuration. + mc := t.Config.DescendentForInstance(addr.Module) + if mc == nil { + log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) + continue + } + + // Go through the provider configs to find the matching config + for _, p := range mc.Module.ProviderConfigs { + if p.Name == addr.ProviderConfig.Type && p.Alias == addr.ProviderConfig.Alias { + log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) + apn.AttachProvider(p) + break + } + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go new file mode 100644 index 00000000000..e6fe25dac00 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go @@ -0,0 +1,205 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeProvisioner is an interface that nodes that can be a provisioner +// must implement. The ProvisionerName returned is the name of the provisioner +// they satisfy. +type GraphNodeProvisioner interface { + ProvisionerName() string +} + +// GraphNodeCloseProvisioner is an interface that nodes that can be a close +// provisioner must implement. The CloseProvisionerName returned is the name +// of the provisioner they satisfy. +type GraphNodeCloseProvisioner interface { + CloseProvisionerName() string +} + +// GraphNodeProvisionerConsumer is an interface that nodes that require +// a provisioner must implement. ProvisionedBy must return the names of the +// provisioners to use. +type GraphNodeProvisionerConsumer interface { + ProvisionedBy() []string +} + +// ProvisionerTransformer is a GraphTransformer that maps resources to +// provisioners within the graph. This will error if there are any resources +// that don't map to proper resources. +type ProvisionerTransformer struct{} + +func (t *ProvisionerTransformer) Transform(g *Graph) error { + // Go through the other nodes and match them to provisioners they need + var err error + m := provisionerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisionerConsumer); ok { + for _, p := range pv.ProvisionedBy() { + key := provisionerMapKey(p, pv) + if m[key] == nil { + err = multierror.Append(err, fmt.Errorf( + "%s: provisioner %s couldn't be found", + dag.VertexName(v), p)) + continue + } + + log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), key, dag.VertexName(m[key])) + g.Connect(dag.BasicEdge(v, m[key])) + } + } + } + + return err +} + +// MissingProvisionerTransformer is a GraphTransformer that adds nodes +// for missing provisioners into the graph. +type MissingProvisionerTransformer struct { + // Provisioners is the list of provisioners we support. + Provisioners []string +} + +func (t *MissingProvisionerTransformer) Transform(g *Graph) error { + // Create a set of our supported provisioners + supported := make(map[string]struct{}, len(t.Provisioners)) + for _, v := range t.Provisioners { + supported[v] = struct{}{} + } + + // Get the map of provisioners we already have in our graph + m := provisionerVertexMap(g) + + // Go through all the provisioner consumers and make sure we add + // that provisioner if it is missing. + for _, v := range g.Vertices() { + pv, ok := v.(GraphNodeProvisionerConsumer) + if !ok { + continue + } + + // If this node has a subpath, then we use that as a prefix + // into our map to check for an existing provider. + path := addrs.RootModuleInstance + if sp, ok := pv.(GraphNodeSubPath); ok { + path = sp.Path() + } + + for _, p := range pv.ProvisionedBy() { + // Build the key for storing in the map + key := provisionerMapKey(p, pv) + + if _, ok := m[key]; ok { + // This provisioner already exists as a configure node + continue + } + + if _, ok := supported[p]; !ok { + // If we don't support the provisioner type, we skip it. + // Validation later will catch this as an error. + continue + } + + // Build the vertex + var newV dag.Vertex = &NodeProvisioner{ + NameValue: p, + PathValue: path, + } + + // Add the missing provisioner node to the graph + m[key] = g.Add(newV) + log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", key, dag.VertexName(v)) + } + } + + return nil +} + +// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the +// graph that will close open provisioner connections that aren't needed +// anymore. A provisioner connection is not needed anymore once all depended +// resources in the graph are evaluated. +type CloseProvisionerTransformer struct{} + +func (t *CloseProvisionerTransformer) Transform(g *Graph) error { + m := closeProvisionerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisionerConsumer); ok { + for _, p := range pv.ProvisionedBy() { + source := m[p] + + if source == nil { + // Create a new graphNodeCloseProvisioner and add it to the graph + source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} + g.Add(source) + + // Make sure we also add the new graphNodeCloseProvisioner to the map + // so we don't create and add any duplicate graphNodeCloseProvisioners. + m[p] = source + } + + g.Connect(dag.BasicEdge(source, v)) + } + } + } + + return nil +} + +// provisionerMapKey is a helper that gives us the key to use for the +// maps returned by things such as provisionerVertexMap. +func provisionerMapKey(k string, v dag.Vertex) string { + pathPrefix := "" + if sp, ok := v.(GraphNodeSubPath); ok { + pathPrefix = sp.Path().String() + "." + } + + return pathPrefix + k +} + +func provisionerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisioner); ok { + key := provisionerMapKey(pv.ProvisionerName(), v) + m[key] = v + } + } + + return m +} + +func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeCloseProvisioner); ok { + m[pv.CloseProvisionerName()] = v + } + } + + return m +} + +type graphNodeCloseProvisioner struct { + ProvisionerNameValue string +} + +func (n *graphNodeCloseProvisioner) Name() string { + return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) +} + +// GraphNodeEvalable impl. +func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { + return &EvalCloseProvisioner{Name: n.ProvisionerNameValue} +} + +func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { + return n.ProvisionerNameValue +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go new file mode 100644 index 00000000000..8ae1fa75e6e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go @@ -0,0 +1,446 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" +) + +// GraphNodeReferenceable must be implemented by any node that represents +// a Terraform thing that can be referenced (resource, module, etc.). +// +// Even if the thing has no name, this should return an empty list. By +// implementing this and returning a non-nil result, you say that this CAN +// be referenced and other methods of referencing may still be possible (such +// as by path!) +type GraphNodeReferenceable interface { + GraphNodeSubPath + + // ReferenceableAddrs returns a list of addresses through which this can be + // referenced. + ReferenceableAddrs() []addrs.Referenceable +} + +// GraphNodeReferencer must be implemented by nodes that reference other +// Terraform items and therefore depend on them. +type GraphNodeReferencer interface { + GraphNodeSubPath + + // References returns a list of references made by this node, which + // include both a referenced address and source location information for + // the reference. + References() []*addrs.Reference +} + +// GraphNodeReferenceOutside is an interface that can optionally be implemented. +// A node that implements it can specify that its own referenceable addresses +// and/or the addresses it references are in a different module than the +// node itself. +// +// Any referenceable addresses returned by ReferenceableAddrs are interpreted +// relative to the returned selfPath. +// +// Any references returned by References are interpreted relative to the +// returned referencePath. +// +// It is valid but not required for either of these paths to match what is +// returned by method Path, though if both match the main Path then there +// is no reason to implement this method. +// +// The primary use-case for this is the nodes representing module input +// variables, since their expressions are resolved in terms of their calling +// module, but they are still referenced from their own module. +type GraphNodeReferenceOutside interface { + // ReferenceOutside returns a path in which any references from this node + // are resolved. + ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) +} + +// ReferenceTransformer is a GraphTransformer that connects all the +// nodes that reference each other in order to form the proper ordering. +type ReferenceTransformer struct{} + +func (t *ReferenceTransformer) Transform(g *Graph) error { + // Build a reference map so we can efficiently look up the references + vs := g.Vertices() + m := NewReferenceMap(vs) + + // Find the things that reference things and connect them + for _, v := range vs { + parents, _ := m.References(v) + parentsDbg := make([]string, len(parents)) + for i, v := range parents { + parentsDbg[i] = dag.VertexName(v) + } + log.Printf( + "[DEBUG] ReferenceTransformer: %q references: %v", + dag.VertexName(v), parentsDbg) + + for _, parent := range parents { + g.Connect(dag.BasicEdge(v, parent)) + } + } + + return nil +} + +// DestroyReferenceTransformer is a GraphTransformer that reverses the edges +// for locals and outputs that depend on other nodes which will be +// removed during destroy. If a destroy node is evaluated before the local or +// output value, it will be removed from the state, and the later interpolation +// will fail. +type DestroyValueReferenceTransformer struct{} + +func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error { + vs := g.Vertices() + for _, v := range vs { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + // reverse any outgoing edges so that the value is evaluated first. + for _, e := range g.EdgesFrom(v) { + target := e.Target() + + // only destroy nodes will be evaluated in reverse + if _, ok := target.(GraphNodeDestroyer); !ok { + continue + } + + log.Printf("[TRACE] output dep: %s", dag.VertexName(target)) + + g.RemoveEdge(e) + g.Connect(&DestroyEdge{S: target, T: v}) + } + } + + return nil +} + +// PruneUnusedValuesTransformer is s GraphTransformer that removes local and +// output values which are not referenced in the graph. Since outputs and +// locals always need to be evaluated, if they reference a resource that is not +// available in the state the interpolation could fail. +type PruneUnusedValuesTransformer struct{} + +func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error { + // this might need multiple runs in order to ensure that pruning a value + // doesn't effect a previously checked value. + for removed := 0; ; removed = 0 { + for _, v := range g.Vertices() { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + dependants := g.UpEdges(v) + + switch dependants.Len() { + case 0: + // nothing at all depends on this + g.Remove(v) + removed++ + case 1: + // because an output's destroy node always depends on the output, + // we need to check for the case of a single destroy node. + d := dependants.List()[0] + if _, ok := d.(*NodeDestroyableOutput); ok { + g.Remove(v) + removed++ + } + } + } + if removed == 0 { + break + } + } + + return nil +} + +// ReferenceMap is a structure that can be used to efficiently check +// for references on a graph. +type ReferenceMap struct { + // vertices is a map from internal reference keys (as produced by the + // mapKey method) to one or more vertices that are identified by each key. + // + // A particular reference key might actually identify multiple vertices, + // e.g. in situations where one object is contained inside another. + vertices map[string][]dag.Vertex + + // edges is a map whose keys are a subset of the internal reference keys + // from "vertices", and whose values are the nodes that refer to each + // key. The values in this map are the referrers, while values in + // "verticies" are the referents. The keys in both cases are referents. + edges map[string][]dag.Vertex +} + +// References returns the set of vertices that the given vertex refers to, +// and any referenced addresses that do not have corresponding vertices. +func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) { + rn, ok := v.(GraphNodeReferencer) + if !ok { + return nil, nil + } + if _, ok := v.(GraphNodeSubPath); !ok { + return nil, nil + } + + var matches []dag.Vertex + var missing []addrs.Referenceable + + for _, ref := range rn.References() { + subject := ref.Subject + + key := m.referenceMapKey(v, subject) + if _, exists := m.vertices[key]; !exists { + // If what we were looking for was a ResourceInstance then we + // might be in a resource-oriented graph rather than an + // instance-oriented graph, and so we'll see if we have the + // resource itself instead. + switch ri := subject.(type) { + case addrs.ResourceInstance: + subject = ri.ContainingResource() + case addrs.ResourceInstancePhase: + subject = ri.ContainingResource() + } + key = m.referenceMapKey(v, subject) + } + + vertices := m.vertices[key] + for _, rv := range vertices { + // don't include self-references + if rv == v { + continue + } + matches = append(matches, rv) + } + if len(vertices) == 0 { + missing = append(missing, ref.Subject) + } + } + + return matches, missing +} + +// Referrers returns the set of vertices that refer to the given vertex. +func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex { + rn, ok := v.(GraphNodeReferenceable) + if !ok { + return nil + } + sp, ok := v.(GraphNodeSubPath) + if !ok { + return nil + } + + var matches []dag.Vertex + for _, addr := range rn.ReferenceableAddrs() { + key := m.mapKey(sp.Path(), addr) + referrers, ok := m.edges[key] + if !ok { + continue + } + + // If the referrer set includes our own given vertex then we skip, + // since we don't want to return self-references. + selfRef := false + for _, p := range referrers { + if p == v { + selfRef = true + break + } + } + if selfRef { + continue + } + + matches = append(matches, referrers...) + } + + return matches +} + +func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string { + return fmt.Sprintf("%s|%s", path.String(), addr.String()) +} + +// vertexReferenceablePath returns the path in which the given vertex can be +// referenced. This is the path that its results from ReferenceableAddrs +// are considered to be relative to. +// +// Only GraphNodeSubPath implementations can be referenced, so this method will +// panic if the given vertex does not implement that interface. +func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance { + sp, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp)) + } + + if outside, ok := v.(GraphNodeReferenceOutside); ok { + // Vertex is referenced from a different module than where it was + // declared. + path, _ := outside.ReferenceOutside() + return path + } + + // Vertex is referenced from the same module as where it was declared. + return sp.Path() +} + +// vertexReferencePath returns the path in which references _from_ the given +// vertex must be interpreted. +// +// Only GraphNodeSubPath implementations can have references, so this method +// will panic if the given vertex does not implement that interface. +func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance { + sp, ok := referrer.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp)) + } + + var path addrs.ModuleInstance + if outside, ok := referrer.(GraphNodeReferenceOutside); ok { + // Vertex makes references to objects in a different module than where + // it was declared. + _, path = outside.ReferenceOutside() + return path + } + + // Vertex makes references to objects in the same module as where it + // was declared. + return sp.Path() +} + +// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex +// that the reference is from, and "addr" is the address of the object being +// referenced. +// +// The result is an opaque string that includes both the address of the given +// object and the address of the module instance that object belongs to. +// +// Only GraphNodeSubPath implementations can be referrers, so this method will +// panic if the given vertex does not implement that interface. +func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { + path := vertexReferencePath(referrer) + return m.mapKey(path, addr) +} + +// NewReferenceMap is used to create a new reference map for the +// given set of vertices. +func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { + var m ReferenceMap + + // Build the lookup table + vertices := make(map[string][]dag.Vertex) + for _, v := range vs { + _, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + continue + } + + // We're only looking for referenceable nodes + rn, ok := v.(GraphNodeReferenceable) + if !ok { + continue + } + + path := m.vertexReferenceablePath(v) + + // Go through and cache them + for _, addr := range rn.ReferenceableAddrs() { + key := m.mapKey(path, addr) + vertices[key] = append(vertices[key], v) + } + + // Any node can be referenced by the address of the module it belongs + // to or any of that module's ancestors. + for _, addr := range path.Ancestors()[1:] { + // Can be referenced either as the specific call instance (with + // an instance key) or as the bare module call itself (the "module" + // block in the parent module that created the instance). + callPath, call := addr.Call() + callInstPath, callInst := addr.CallInstance() + callKey := m.mapKey(callPath, call) + callInstKey := m.mapKey(callInstPath, callInst) + vertices[callKey] = append(vertices[callKey], v) + vertices[callInstKey] = append(vertices[callInstKey], v) + } + } + + // Build the lookup table for referenced by + edges := make(map[string][]dag.Vertex) + for _, v := range vs { + _, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + continue + } + + rn, ok := v.(GraphNodeReferencer) + if !ok { + // We're only looking for referenceable nodes + continue + } + + // Go through and cache them + for _, ref := range rn.References() { + if ref.Subject == nil { + // Should never happen + panic(fmt.Sprintf("%T.References returned reference with nil subject", rn)) + } + key := m.referenceMapKey(v, ref.Subject) + edges[key] = append(edges[key], v) + } + } + + m.vertices = vertices + m.edges = edges + return &m +} + +// ReferencesFromConfig returns the references that a configuration has +// based on the interpolated variables in a configuration. +func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { + if body == nil { + return nil + } + refs, _ := lang.ReferencesInBlock(body, schema) + return refs +} + +// appendResourceDestroyReferences identifies resource and resource instance +// references in the given slice and appends to it the "destroy-phase" +// equivalents of those references, returning the result. +// +// This can be used in the References implementation for a node which must also +// depend on the destruction of anything it references. +func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference { + given := refs + for _, ref := range given { + switch tr := ref.Subject.(type) { + case addrs.Resource: + newRef := *ref // shallow copy + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + refs = append(refs, &newRef) + case addrs.ResourceInstance: + newRef := *ref // shallow copy + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + refs = append(refs, &newRef) + } + } + return refs +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go new file mode 100644 index 00000000000..327950d885f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go @@ -0,0 +1,33 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// RemovedModuleTransformer implements GraphTransformer to add nodes indicating +// when a module was removed from the configuration. +type RemovedModuleTransformer struct { + Config *configs.Config // root node in the config tree + State *states.State +} + +func (t *RemovedModuleTransformer) Transform(g *Graph) error { + // nothing to remove if there's no state! + if t.State == nil { + return nil + } + + for _, m := range t.State.Modules { + cc := t.Config.DescendentForInstance(m.Addr) + if cc != nil { + continue + } + + log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) + g.Add(&NodeModuleRemoved{Addr: m.Addr}) + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go new file mode 100644 index 00000000000..51d9466a2c1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go @@ -0,0 +1,71 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/zclconf/go-cty/cty" +) + +// ResourceCountTransformer is a GraphTransformer that expands the count +// out for a specific resource. +// +// This assumes that the count is already interpolated. +type ResourceCountTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + Schema *configschema.Block + + // Count is either the number of indexed instances to create, or -1 to + // indicate that count is not set at all and thus a no-key instance should + // be created. + Count int + ForEach map[string]cty.Value + Addr addrs.AbsResource +} + +func (t *ResourceCountTransformer) Transform(g *Graph) error { + if t.Count < 0 && t.ForEach == nil { + // Negative count indicates that count is not set at all. + addr := t.Addr.Instance(addrs.NoKey) + + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + return nil + } + + // Add nodes related to the for_each expression + for key := range t.ForEach { + addr := t.Addr.Instance(addrs.StringKey(key)) + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + // For each count, build and add the node + for i := 0; i < t.Count; i++ { + key := addrs.IntKey(i) + addr := t.Addr.Instance(key) + + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go new file mode 100644 index 00000000000..485c1c8a04b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go @@ -0,0 +1,38 @@ +package terraform + +import "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + +const rootNodeName = "root" + +// RootTransformer is a GraphTransformer that adds a root to the graph. +type RootTransformer struct{} + +func (t *RootTransformer) Transform(g *Graph) error { + // If we already have a good root, we're done + if _, err := g.Root(); err == nil { + return nil + } + + // Add a root + var root graphNodeRoot + g.Add(root) + + // Connect the root to all the edges that need it + for _, v := range g.Vertices() { + if v == root { + continue + } + + if g.UpEdges(v).Len() == 0 { + g.Connect(dag.BasicEdge(root, v)) + } + } + + return nil +} + +type graphNodeRoot struct{} + +func (n graphNodeRoot) Name() string { + return rootNodeName +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go new file mode 100644 index 00000000000..e7d95be9780 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go @@ -0,0 +1,74 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// StateTransformer is a GraphTransformer that adds the elements of +// the state to the graph. +// +// This transform is used for example by the DestroyPlanGraphBuilder to ensure +// that only resources that are in the state are represented in the graph. +type StateTransformer struct { + // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract + // resource instance nodes that this transformer will create. + // + // If either of these is nil, the objects of that type will be skipped and + // not added to the graph at all. It doesn't make sense to use this + // transformer without setting at least one of these, since that would + // skip everything and thus be a no-op. + ConcreteCurrent ConcreteResourceInstanceNodeFunc + ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc + + State *states.State +} + +func (t *StateTransformer) Transform(g *Graph) error { + if !t.State.HasResources() { + log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do") + return nil + } + + switch { + case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") + case t.ConcreteCurrent != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") + case t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") + default: + log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") + } + + for _, ms := range t.State.Modules { + moduleAddr := ms.Addr + + for _, rs := range ms.Resources { + resourceAddr := rs.Addr.Absolute(moduleAddr) + + for key, is := range rs.Instances { + addr := resourceAddr.Instance(key) + + if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteCurrent(abstract) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) + } + + if t.ConcreteDeposed != nil { + for dk := range is.Deposed { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteDeposed(abstract, dk) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) + } + } + } + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go new file mode 100644 index 00000000000..beb1eed9e3b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go @@ -0,0 +1,267 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeTargetable is an interface for graph nodes to implement when they +// need to be told about incoming targets. This is useful for nodes that need +// to respect targets as they dynamically expand. Note that the list of targets +// provided will contain every target provided, and each implementing graph +// node must filter this list to targets considered relevant. +type GraphNodeTargetable interface { + SetTargets([]addrs.Targetable) +} + +// GraphNodeTargetDownstream is an interface for graph nodes that need to +// be remain present under targeting if any of their dependencies are targeted. +// TargetDownstream is called with the set of vertices that are direct +// dependencies for the node, and it should return true if the node must remain +// in the graph in support of those dependencies. +// +// This is used in situations where the dependency edges are representing an +// ordering relationship but the dependency must still be visited if its +// dependencies are visited. This is true for outputs, for example, since +// they must get updated if any of their dependent resources get updated, +// which would not normally be true if one of their dependencies were targeted. +type GraphNodeTargetDownstream interface { + TargetDownstream(targeted, untargeted *dag.Set) bool +} + +// TargetsTransformer is a GraphTransformer that, when the user specifies a +// list of resources to target, limits the graph to only those resources and +// their dependencies. +type TargetsTransformer struct { + // List of targeted resource names specified by the user + Targets []addrs.Targetable + + // If set, the index portions of resource addresses will be ignored + // for comparison. This is used when transforming a graph where + // counted resources have not yet been expanded, since otherwise + // the unexpanded nodes (which never have indices) would not match. + IgnoreIndices bool + + // Set to true when we're in a `terraform destroy` or a + // `terraform plan -destroy` + Destroy bool +} + +func (t *TargetsTransformer) Transform(g *Graph) error { + if len(t.Targets) > 0 { + targetedNodes, err := t.selectTargetedNodes(g, t.Targets) + if err != nil { + return err + } + + for _, v := range g.Vertices() { + removable := false + if _, ok := v.(GraphNodeResource); ok { + removable = true + } + + if vr, ok := v.(RemovableIfNotTargeted); ok { + removable = vr.RemoveIfNotTargeted() + } + + if removable && !targetedNodes.Include(v) { + log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) + g.Remove(v) + } + } + } + + return nil +} + +// Returns a set of targeted nodes. A targeted node is either addressed +// directly, address indirectly via its container, or it's a dependency of a +// targeted node. Destroy mode keeps dependents instead of dependencies. +func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (*dag.Set, error) { + targetedNodes := new(dag.Set) + + vertices := g.Vertices() + + for _, v := range vertices { + if t.nodeIsTarget(v, addrs) { + targetedNodes.Add(v) + + // We inform nodes that ask about the list of targets - helps for nodes + // that need to dynamically expand. Note that this only occurs for nodes + // that are already directly targeted. + if tn, ok := v.(GraphNodeTargetable); ok { + tn.SetTargets(addrs) + } + + var deps *dag.Set + var err error + if t.Destroy { + deps, err = g.Descendents(v) + } else { + deps, err = g.Ancestors(v) + } + if err != nil { + return nil, err + } + + for _, d := range deps.List() { + targetedNodes.Add(d) + } + } + } + return t.addDependencies(targetedNodes, g) +} + +func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) { + // Handle nodes that need to be included if their dependencies are included. + // This requires multiple passes since we need to catch transitive + // dependencies if and only if they are via other nodes that also + // support TargetDownstream. For example: + // output -> output -> targeted-resource: both outputs need to be targeted + // output -> non-targeted-resource -> targeted-resource: output not targeted + // + // We'll keep looping until we stop targeting more nodes. + queue := targetedNodes.List() + for len(queue) > 0 { + vertices := queue + queue = nil // ready to append for next iteration if neccessary + for _, v := range vertices { + // providers don't cause transitive dependencies, so don't target + // downstream from them. + if _, ok := v.(GraphNodeProvider); ok { + continue + } + + dependers := g.UpEdges(v) + if dependers == nil { + // indicates that there are no up edges for this node, so + // we have nothing to do here. + continue + } + + dependers = dependers.Filter(func(dv interface{}) bool { + _, ok := dv.(GraphNodeTargetDownstream) + return ok + }) + + if dependers.Len() == 0 { + continue + } + + for _, dv := range dependers.List() { + if targetedNodes.Include(dv) { + // Already present, so nothing to do + continue + } + + // We'll give the node some information about what it's + // depending on in case that informs its decision about whether + // it is safe to be targeted. + deps := g.DownEdges(v) + + depsTargeted := deps.Intersection(targetedNodes) + depsUntargeted := deps.Difference(depsTargeted) + + if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { + targetedNodes.Add(dv) + // Need to visit this node on the next pass to see if it + // has any transitive dependers. + queue = append(queue, dv) + } + } + } + } + + return targetedNodes.Filter(func(dv interface{}) bool { + return filterPartialOutputs(dv, targetedNodes, g) + }), nil +} + +// Outputs may have been included transitively, but if any of their +// dependencies have been pruned they won't be resolvable. +// If nothing depends on the output, and the output is missing any +// dependencies, remove it from the graph. +// This essentially maintains the previous behavior where interpolation in +// outputs would fail silently, but can now surface errors where the output +// is required. +func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool { + // should this just be done with TargetDownstream? + if _, ok := v.(*NodeApplyableOutput); !ok { + return true + } + + dependers := g.UpEdges(v) + for _, d := range dependers.List() { + if _, ok := d.(*NodeCountBoundary); ok { + continue + } + + if !targetedNodes.Include(d) { + // this one is going to be removed, so it doesn't count + continue + } + + // as soon as we see a real dependency, we mark this as + // non-removable + return true + } + + depends := g.DownEdges(v) + + for _, d := range depends.List() { + if !targetedNodes.Include(d) { + log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", + dag.VertexName(v), dag.VertexName(d)) + return false + } + } + return true +} + +func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { + var vertexAddr addrs.Targetable + switch r := v.(type) { + case GraphNodeResourceInstance: + vertexAddr = r.ResourceInstanceAddr() + case GraphNodeResource: + vertexAddr = r.ResourceAddr() + default: + // Only resource and resource instance nodes can be targeted. + return false + } + _, ok := v.(GraphNodeResource) + if !ok { + return false + } + + for _, targetAddr := range targets { + if t.IgnoreIndices { + // If we're ignoring indices then we'll convert any resource instance + // addresses into resource addresses. We don't need to convert + // vertexAddr because instance addresses are contained within + // their associated resources, and so .TargetContains will take + // care of this for us. + if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance { + targetAddr = instance.ContainingResource() + } + } + if targetAddr.TargetContains(vertexAddr) { + return true + } + } + + return false +} + +// RemovableIfNotTargeted is a special interface for graph nodes that +// aren't directly addressable, but need to be removed from the graph when they +// are not targeted. (Nodes that are not directly targeted end up in the set of +// targeted nodes because something that _is_ targeted depends on them.) The +// initial use case for this interface is GraphNodeConfigVariable, which was +// having trouble interpolating for module variables in targeted scenarios that +// filtered out the resource node being referenced. +type RemovableIfNotTargeted interface { + RemoveIfNotTargeted() bool +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go new file mode 100644 index 00000000000..21842789cf7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go @@ -0,0 +1,20 @@ +package terraform + +// TransitiveReductionTransformer is a GraphTransformer that performs +// finds the transitive reduction of the graph. For a definition of +// transitive reduction, see Wikipedia. +type TransitiveReductionTransformer struct{} + +func (t *TransitiveReductionTransformer) Transform(g *Graph) error { + // If the graph isn't valid, skip the transitive reduction. + // We don't error here because Terraform itself handles graph + // validation in a better way, or we assume it does. + if err := g.Validate(); err != nil { + return nil + } + + // Do it + g.TransitiveReduction() + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go new file mode 100644 index 00000000000..3afce566086 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// RootVariableTransformer is a GraphTransformer that adds all the root +// variables to the graph. +// +// Root variables are currently no-ops but they must be added to the +// graph since downstream things that depend on them must be able to +// reach them. +type RootVariableTransformer struct { + Config *configs.Config +} + +func (t *RootVariableTransformer) Transform(g *Graph) error { + // We can have no variables if we have no config. + if t.Config == nil { + return nil + } + + // We're only considering root module variables here, since child + // module variables are handled by ModuleVariableTransformer. + vars := t.Config.Module.Variables + + // Add all variables here + for _, v := range vars { + node := &NodeRootVariable{ + Addr: addrs.InputVariable{ + Name: v.Name, + }, + Config: v, + } + g.Add(node) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go new file mode 100644 index 00000000000..6b3c62d1f30 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// VertexTransformer is a GraphTransformer that transforms vertices +// using the GraphVertexTransformers. The Transforms are run in sequential +// order. If a transform replaces a vertex then the next transform will see +// the new vertex. +type VertexTransformer struct { + Transforms []GraphVertexTransformer +} + +func (t *VertexTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + for _, vt := range t.Transforms { + newV, err := vt.Transform(v) + if err != nil { + return err + } + + // If the vertex didn't change, then don't do anything more + if newV == v { + continue + } + + // Vertex changed, replace it within the graph + if ok := g.Replace(v, newV); !ok { + // This should never happen, big problem + return fmt.Errorf( + "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", + dag.VertexName(v), dag.VertexName(newV), v, newV) + } + + // Replace v so that future transforms use the proper vertex + v = newV + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go new file mode 100644 index 00000000000..f6790d9e5f3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go @@ -0,0 +1,28 @@ +package terraform + +import "context" + +// UIInput is the interface that must be implemented to ask for input +// from this user. This should forward the request to wherever the user +// inputs things to ask for values. +type UIInput interface { + Input(context.Context, *InputOpts) (string, error) +} + +// InputOpts are options for asking for input. +type InputOpts struct { + // Id is a unique ID for the question being asked that might be + // used for logging or to look up a prior answered question. + Id string + + // Query is a human-friendly question for inputting this value. + Query string + + // Description is a description about what this option is. Be wary + // that this will probably be in a terminal so split lines as you see + // necessary. + Description string + + // Default will be the value returned if no data is entered. + Default string +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go new file mode 100644 index 00000000000..e2d9c384819 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go @@ -0,0 +1,25 @@ +package terraform + +import "context" + +// MockUIInput is an implementation of UIInput that can be used for tests. +type MockUIInput struct { + InputCalled bool + InputOpts *InputOpts + InputReturnMap map[string]string + InputReturnString string + InputReturnError error + InputFn func(*InputOpts) (string, error) +} + +func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + i.InputCalled = true + i.InputOpts = opts + if i.InputFn != nil { + return i.InputFn(opts) + } + if i.InputReturnMap != nil { + return i.InputReturnMap[opts.Id], i.InputReturnError + } + return i.InputReturnString, i.InputReturnError +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go new file mode 100644 index 00000000000..b5d32b1e85d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go @@ -0,0 +1,20 @@ +package terraform + +import ( + "context" + "fmt" +) + +// PrefixUIInput is an implementation of UIInput that prefixes the ID +// with a string, allowing queries to be namespaced. +type PrefixUIInput struct { + IdPrefix string + QueryPrefix string + UIInput UIInput +} + +func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) + opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) + return i.UIInput.Input(ctx, opts) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go new file mode 100644 index 00000000000..84427c63de1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go @@ -0,0 +1,7 @@ +package terraform + +// UIOutput is the interface that must be implemented to output +// data to the end user. +type UIOutput interface { + Output(string) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go new file mode 100644 index 00000000000..135a91c5f0a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go @@ -0,0 +1,9 @@ +package terraform + +type CallbackUIOutput struct { + OutputFn func(string) +} + +func (o *CallbackUIOutput) Output(v string) { + o.OutputFn(v) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go new file mode 100644 index 00000000000..d828c921ca3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go @@ -0,0 +1,21 @@ +package terraform + +import "sync" + +// MockUIOutput is an implementation of UIOutput that can be used for tests. +type MockUIOutput struct { + sync.Mutex + OutputCalled bool + OutputMessage string + OutputFn func(string) +} + +func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() + o.OutputCalled = true + o.OutputMessage = v + if o.OutputFn != nil { + o.OutputFn(v) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go new file mode 100644 index 00000000000..0d7d4ce032c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// ProvisionerUIOutput is an implementation of UIOutput that calls a hook +// for the output so that the hooks can handle it. +type ProvisionerUIOutput struct { + InstanceAddr addrs.AbsResourceInstance + ProvisionerType string + Hooks []Hook +} + +func (o *ProvisionerUIOutput) Output(msg string) { + for _, h := range o.Hooks { + h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go new file mode 100644 index 00000000000..5428cd5a0a1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go @@ -0,0 +1,75 @@ +package terraform + +import ( + "sort" +) + +// Semaphore is a wrapper around a channel to provide +// utility methods to clarify that we are treating the +// channel as a semaphore +type Semaphore chan struct{} + +// NewSemaphore creates a semaphore that allows up +// to a given limit of simultaneous acquisitions +func NewSemaphore(n int) Semaphore { + if n == 0 { + panic("semaphore with limit 0") + } + ch := make(chan struct{}, n) + return Semaphore(ch) +} + +// Acquire is used to acquire an available slot. +// Blocks until available. +func (s Semaphore) Acquire() { + s <- struct{}{} +} + +// TryAcquire is used to do a non-blocking acquire. +// Returns a bool indicating success +func (s Semaphore) TryAcquire() bool { + select { + case s <- struct{}{}: + return true + default: + return false + } +} + +// Release is used to return a slot. Acquire must +// be called as a pre-condition. +func (s Semaphore) Release() { + select { + case <-s: + default: + panic("release without an acquire") + } +} + +// strSliceContains checks if a given string is contained in a slice +// When anybody asks why Go needs generics, here you go. +func strSliceContains(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + return false +} + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go new file mode 100644 index 00000000000..627593d762b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go @@ -0,0 +1,59 @@ +// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ValueFromUnknown-0] + _ = x[ValueFromConfig-67] + _ = x[ValueFromAutoFile-70] + _ = x[ValueFromNamedFile-78] + _ = x[ValueFromCLIArg-65] + _ = x[ValueFromEnvVar-69] + _ = x[ValueFromInput-73] + _ = x[ValueFromPlan-80] + _ = x[ValueFromCaller-83] +} + +const ( + _ValueSourceType_name_0 = "ValueFromUnknown" + _ValueSourceType_name_1 = "ValueFromCLIArg" + _ValueSourceType_name_2 = "ValueFromConfig" + _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" + _ValueSourceType_name_4 = "ValueFromInput" + _ValueSourceType_name_5 = "ValueFromNamedFile" + _ValueSourceType_name_6 = "ValueFromPlan" + _ValueSourceType_name_7 = "ValueFromCaller" +) + +var ( + _ValueSourceType_index_3 = [...]uint8{0, 15, 32} +) + +func (i ValueSourceType) String() string { + switch { + case i == 0: + return _ValueSourceType_name_0 + case i == 65: + return _ValueSourceType_name_1 + case i == 67: + return _ValueSourceType_name_2 + case 69 <= i && i <= 70: + i -= 69 + return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] + case i == 73: + return _ValueSourceType_name_4 + case i == 78: + return _ValueSourceType_name_5 + case i == 80: + return _ValueSourceType_name_6 + case i == 83: + return _ValueSourceType_name_7 + default: + return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go new file mode 100644 index 00000000000..0b0c1d28d80 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go @@ -0,0 +1,313 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// InputValue represents a value for a variable in the root module, provided +// as part of the definition of an operation. +type InputValue struct { + Value cty.Value + SourceType ValueSourceType + + // SourceRange provides source location information for values whose + // SourceType is either ValueFromConfig or ValueFromFile. It is not + // populated for other source types, and so should not be used. + SourceRange tfdiags.SourceRange +} + +// ValueSourceType describes what broad category of source location provided +// a particular value. +type ValueSourceType rune + +const ( + // ValueFromUnknown is the zero value of ValueSourceType and is not valid. + ValueFromUnknown ValueSourceType = 0 + + // ValueFromConfig indicates that a value came from a .tf or .tf.json file, + // e.g. the default value defined for a variable. + ValueFromConfig ValueSourceType = 'C' + + // ValueFromAutoFile indicates that a value came from a "values file", like + // a .tfvars file, that was implicitly loaded by naming convention. + ValueFromAutoFile ValueSourceType = 'F' + + // ValueFromNamedFile indicates that a value came from a named "values file", + // like a .tfvars file, that was passed explicitly on the command line (e.g. + // -var-file=foo.tfvars). + ValueFromNamedFile ValueSourceType = 'N' + + // ValueFromCLIArg indicates that the value was provided directly in + // a CLI argument. The name of this argument is not recorded and so it must + // be inferred from context. + ValueFromCLIArg ValueSourceType = 'A' + + // ValueFromEnvVar indicates that the value was provided via an environment + // variable. The name of the variable is not recorded and so it must be + // inferred from context. + ValueFromEnvVar ValueSourceType = 'E' + + // ValueFromInput indicates that the value was provided at an interactive + // input prompt. + ValueFromInput ValueSourceType = 'I' + + // ValueFromPlan indicates that the value was retrieved from a stored plan. + ValueFromPlan ValueSourceType = 'P' + + // ValueFromCaller indicates that the value was explicitly overridden by + // a caller to Context.SetVariable after the context was constructed. + ValueFromCaller ValueSourceType = 'S' +) + +func (v *InputValue) GoString() string { + if (v.SourceRange != tfdiags.SourceRange{}) { + return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) + } else { + return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) + } +} + +func (v ValueSourceType) GoString() string { + return fmt.Sprintf("terraform.%s", v) +} + +//go:generate go run golang.org/x/tools/cmd/stringer -type ValueSourceType + +// InputValues is a map of InputValue instances. +type InputValues map[string]*InputValue + +// InputValuesFromCaller turns the given map of naked values into an +// InputValues that attributes each value to "a caller", using the source +// type ValueFromCaller. This is primarily useful for testing purposes. +// +// This should not be used as a general way to convert map[string]cty.Value +// into InputValues, since in most real cases we want to set a suitable +// other SourceType and possibly SourceRange value. +func InputValuesFromCaller(vals map[string]cty.Value) InputValues { + ret := make(InputValues, len(vals)) + for k, v := range vals { + ret[k] = &InputValue{ + Value: v, + SourceType: ValueFromCaller, + } + } + return ret +} + +// Override merges the given value maps with the receiver, overriding any +// conflicting keys so that the latest definition wins. +func (vv InputValues) Override(others ...InputValues) InputValues { + // FIXME: This should check to see if any of the values are maps and + // merge them if so, in order to preserve the behavior from prior to + // Terraform 0.12. + ret := make(InputValues) + for k, v := range vv { + ret[k] = v + } + for _, other := range others { + for k, v := range other { + ret[k] = v + } + } + return ret +} + +// JustValues returns a map that just includes the values, discarding the +// source information. +func (vv InputValues) JustValues() map[string]cty.Value { + ret := make(map[string]cty.Value, len(vv)) + for k, v := range vv { + ret[k] = v.Value + } + return ret +} + +// DefaultVariableValues returns an InputValues map representing the default +// values specified for variables in the given configuration map. +func DefaultVariableValues(configs map[string]*configs.Variable) InputValues { + ret := make(InputValues) + for k, c := range configs { + if c.Default == cty.NilVal { + continue + } + ret[k] = &InputValue{ + Value: c.Default, + SourceType: ValueFromConfig, + SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange), + } + } + return ret +} + +// SameValues returns true if the given InputValues has the same values as +// the receiever, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) SameValues(other InputValues) bool { + if len(vv) != len(other) { + return false + } + + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false + } + } + + return true +} + +// HasValues returns true if the reciever has the same values as in the given +// map, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) HasValues(vals map[string]cty.Value) bool { + if len(vv) != len(vals) { + return false + } + + for k, v := range vv { + oVal, exists := vals[k] + if !exists { + return false + } + if !v.Value.RawEquals(oVal) { + return false + } + } + + return true +} + +// Identical returns true if the given InputValues has the same values, +// source types, and source ranges as the receiver. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +// +// This method is primarily for testing. For most practical purposes, it's +// better to use SameValues or HasValues. +func (vv InputValues) Identical(other InputValues) bool { + if len(vv) != len(other) { + return false + } + + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false + } + if v.SourceType != ov.SourceType { + return false + } + if v.SourceRange != ov.SourceRange { + return false + } + } + + return true +} + +// checkInputVariables ensures that variable values supplied at the UI conform +// to their corresponding declarations in configuration. +// +// The set of values is considered valid only if the returned diagnostics +// does not contain errors. A valid set of values may still produce warnings, +// which should be returned to the user. +func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + for name, vc := range vcs { + val, isSet := vs[name] + if !isSet { + // Always an error, since the caller should already have included + // default values from the configuration in the values map. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unassigned variable", + fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name), + )) + continue + } + + wantType := vc.Type + + // A given value is valid if it can convert to the desired type. + _, err := convert.Convert(val.Value, wantType) + if err != nil { + switch val.SourceType { + case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: + // We have source location information for these. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid value for input variable", + Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err), + Subject: val.SourceRange.ToHCL().Ptr(), + }) + case ValueFromEnvVar: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err), + )) + case ValueFromCLIArg: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err), + )) + case ValueFromInput: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err), + )) + default: + // The above gets us good coverage for the situations users + // are likely to encounter with their own inputs. The other + // cases are generally implementation bugs, so we'll just + // use a generic error for these. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err), + )) + } + } + } + + // Check for any variables that are assigned without being configured. + // This is always an implementation error in the caller, because we + // expect undefined variables to be caught during context construction + // where there is better context to report it well. + for name := range vs { + if _, defined := vcs[name]; !defined { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Value assigned to undeclared variable", + fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), + )) + } + } + + return diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go new file mode 100644 index 00000000000..d2ee4816002 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go @@ -0,0 +1,62 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +// CheckCoreVersionRequirements visits each of the modules in the given +// configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { + if config == nil { + return nil + } + + var diags tfdiags.Diagnostics + module := config.Module + + for _, constraint := range module.CoreVersionConstraints { + if !constraint.Required.Check(tfversion.SemVer) { + switch { + case len(config.Path) == 0: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + Subject: &constraint.DeclRange, + }) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + config.Path, config.SourceAddr, tfversion.String(), + ), + Subject: &constraint.DeclRange, + }) + } + } + } + + for _, c := range config.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) + } + + return diags +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go new file mode 100644 index 00000000000..0666aa5f3ff --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[walkInvalid-0] + _ = x[walkApply-1] + _ = x[walkPlan-2] + _ = x[walkPlanDestroy-3] + _ = x[walkRefresh-4] + _ = x[walkValidate-5] + _ = x[walkDestroy-6] + _ = x[walkImport-7] + _ = x[walkEval-8] +} + +const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval" + +var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95} + +func (i walkOperation) String() string { + if i >= walkOperation(len(_walkOperation_index)-1) { + return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 00000000000..f0e5c79e181 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/addr.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 00000000000..f6a00199cdd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// RemoteAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/const.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 00000000000..4f52938287f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,157 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/mux.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 00000000000..18a078c8ad9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,98 @@ +package yamux + +import ( + "fmt" + "io" + "log" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination. Either Logger or + // LogOutput can be set, not both. + LogOutput io.Writer + + // Logger is used to pass in the logger to be used. Either Logger or + // LogOutput can be set, not both. + Logger *log.Logger +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + if config.LogOutput != nil && config.Logger != nil { + return fmt.Errorf("both Logger and LogOutput may not be set, select one") + } else if config.LogOutput == nil && config.Logger == nil { + return fmt.Errorf("one of Logger or LogOutput must be set, select one") + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/session.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 00000000000..a80ddec35ea --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,653 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + logger := config.Logger + if logger == nil { + logger = log.New(config.LogOutput, "", log.LstdFlags) + } + + s := &Session{ + config: config, + logger: logger, + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { + return ErrInvalidMsgType + } + + if err := handlers[mt](s, hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/stream.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 00000000000..aa239197398 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,470 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() + return nil + } + + // Update our window + s.recvWindow += delta + s.recvLock.Unlock() + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + return ErrRecvWindowExceeded + } + + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + s.recvWindow -= length + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline.Store(t) + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline.Store(t) + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/util.go b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 00000000000..8a73e9249a6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,43 @@ +package yamux + +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/AUTHORS b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/AUTHORS new file mode 100644 index 00000000000..15167cd746c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/CONTRIBUTORS b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/CONTRIBUTORS new file mode 100644 index 00000000000..1c4577e9680 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/PATENTS b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go new file mode 100644 index 00000000000..77fb8b9a046 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go @@ -0,0 +1,134 @@ +// Package brainpool implements Brainpool elliptic curves. +// Implementation of rcurves is from github.com/ebfe/brainpool +// Note that these curves are implemented with naive, non-constant time operations +// and are likely not suitable for enviroments where timing attacks are a concern. +package brainpool + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var ( + once sync.Once + p256t1, p384t1, p512t1 *elliptic.CurveParams + p256r1, p384r1, p512r1 *rcurve +) + +func initAll() { + initP256t1() + initP384t1() + initP512t1() + initP256r1() + initP384r1() + initP512r1() +} + +func initP256t1() { + p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} + p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) + p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) + p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) + p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) + p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) + p256t1.BitSize = 256 +} + +func initP256r1() { + twisted := p256t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP256r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) + params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) + z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) + p256r1 = newrcurve(twisted, params, z) +} + +func initP384t1() { + p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} + p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) + p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) + p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) + p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) + p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) + p384t1.BitSize = 384 +} + +func initP384r1() { + twisted := p384t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP384r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) + params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) + z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) + p384r1 = newrcurve(twisted, params, z) +} + +func initP512t1() { + p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} + p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) + p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) + p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) + p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) + p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) + p512t1.BitSize = 512 +} + +func initP512r1() { + twisted := p512t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP512r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) + params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) + z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) + p512r1 = newrcurve(twisted, params, z) +} + +// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) +func P256t1() elliptic.Curve { + once.Do(initAll) + return p256t1 +} + +// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) +func P256r1() elliptic.Curve { + once.Do(initAll) + return p256r1 +} + +// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) +func P384t1() elliptic.Curve { + once.Do(initAll) + return p384t1 +} + +// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) +func P384r1() elliptic.Curve { + once.Do(initAll) + return p384r1 +} + +// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) +func P512t1() elliptic.Curve { + once.Do(initAll) + return p512t1 +} + +// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) +func P512r1() elliptic.Curve { + once.Do(initAll) + return p512r1 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go new file mode 100644 index 00000000000..7e291d6aa4e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go @@ -0,0 +1,83 @@ +package brainpool + +import ( + "crypto/elliptic" + "math/big" +) + +var _ elliptic.Curve = (*rcurve)(nil) + +type rcurve struct { + twisted elliptic.Curve + params *elliptic.CurveParams + z *big.Int + zinv *big.Int + z2 *big.Int + z3 *big.Int + zinv2 *big.Int + zinv3 *big.Int +} + +var ( + two = big.NewInt(2) + three = big.NewInt(3) +) + +func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { + zinv := new(big.Int).ModInverse(z, params.P) + return &rcurve{ + twisted: twisted, + params: params, + z: z, + zinv: zinv, + z2: new(big.Int).Exp(z, two, params.P), + z3: new(big.Int).Exp(z, three, params.P), + zinv2: new(big.Int).Exp(zinv, two, params.P), + zinv3: new(big.Int).Exp(zinv, three, params.P), + } +} + +func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { + var tx, ty big.Int + tx.Mul(x, curve.z2) + tx.Mod(&tx, curve.params.P) + ty.Mul(y, curve.z3) + ty.Mod(&ty, curve.params.P) + return &tx, &ty +} + +func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { + var x, y big.Int + x.Mul(tx, curve.zinv2) + x.Mod(&x, curve.params.P) + y.Mul(ty, curve.zinv3) + y.Mod(&y, curve.params.P) + return &x, &y +} + +func (curve *rcurve) Params() *elliptic.CurveParams { + return curve.params +} + +func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { + return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) +} + +func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + tx2, ty2 := curve.toTwisted(x2, y2) + return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) +} + +func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) +} + +func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) +} + +func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/cast5/cast5.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/cast5/cast5.go new file mode 100644 index 00000000000..e0207352c55 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/cast5/cast5.go @@ -0,0 +1,526 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common +// OpenPGP cipher. +package cast5 // import "github.com/keybase/go-crypto/cast5" + +import "errors" + +const BlockSize = 8 +const KeySize = 16 + +type Cipher struct { + masking [16]uint32 + rotate [16]uint8 +} + +func NewCipher(key []byte) (c *Cipher, err error) { + if len(key) != KeySize { + return nil, errors.New("CAST5: keys must be 16 bytes") + } + + c = new(Cipher) + c.keySchedule(key) + return +} + +func (c *Cipher) BlockSize() int { + return BlockSize +} + +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +type keyScheduleA [4][7]uint8 +type keyScheduleB [4][5]uint8 + +// keyScheduleRound contains the magic values for a round of the key schedule. +// The keyScheduleA deals with the lines like: +// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] +// Conceptually, both x and z are in the same array, x first. The first +// element describes which word of this array gets written to and the +// second, which word gets read. So, for the line above, it's "4, 0", because +// it's writing to the first word of z, which, being after x, is word 4, and +// reading from the first word of x: word 0. +// +// Next are the indexes into the S-boxes. Now the array is treated as bytes. So +// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear +// that it's z that we're indexing. +// +// keyScheduleB deals with lines like: +// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] +// "K1" is ignored because key words are always written in order. So the five +// elements are the S-box indexes. They use the same form as in keyScheduleA, +// above. + +type keyScheduleRound struct{} +type keySchedule []keyScheduleRound + +var schedule = []struct { + a keyScheduleA + b keyScheduleB +}{ + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, + {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, + {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, + {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {3, 2, 0xc, 0xd, 8}, + {1, 0, 0xe, 0xf, 0xd}, + {7, 6, 8, 9, 3}, + {5, 4, 0xa, 0xb, 7}, + }, + }, + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, + {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, + {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, + {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {8, 9, 7, 6, 3}, + {0xa, 0xb, 5, 4, 7}, + {0xc, 0xd, 3, 2, 8}, + {0xe, 0xf, 1, 0, 0xd}, + }, + }, +} + +func (c *Cipher) keySchedule(in []byte) { + var t [8]uint32 + var k [32]uint32 + + for i := 0; i < 4; i++ { + j := i * 4 + t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) + } + + x := []byte{6, 7, 4, 5} + ki := 0 + + for half := 0; half < 2; half++ { + for _, round := range schedule { + for j := 0; j < 4; j++ { + var a [7]uint8 + copy(a[:], round.a[j][:]) + w := t[a[1]] + w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] + w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] + w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] + w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] + w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] + t[a[0]] = w + } + + for j := 0; j < 4; j++ { + var b [5]uint8 + copy(b[:], round.b[j][:]) + w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] + w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] + w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] + w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] + w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] + k[ki] = w + ki++ + } + } + } + + for i := 0; i < 16; i++ { + c.masking[i] = k[i] + c.rotate[i] = uint8(k[16+i] & 0x1f) + } +} + +// These are the three 'f' functions. See RFC 2144, section 2.2. +func f1(d, m uint32, r uint8) uint32 { + t := m + d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] +} + +func f2(d, m uint32, r uint8) uint32 { + t := m ^ d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] +} + +func f3(d, m uint32, r uint8) uint32 { + t := m - d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] +} + +var sBox = [8][256]uint32{ + { + 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, + 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, + 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, + 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, + 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, + 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, + 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, + 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, + 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, + 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, + 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, + 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, + 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, + 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, + 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, + 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, + 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, + 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, + 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, + 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, + 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, + 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, + 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, + 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, + 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, + 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, + 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, + 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, + 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, + 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, + 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, + 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, + }, + { + 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, + 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, + 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, + 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, + 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, + 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, + 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, + 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, + 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, + 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, + 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, + 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, + 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, + 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, + 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, + 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, + 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, + 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, + 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, + 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, + 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, + 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, + 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, + 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, + 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, + 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, + 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, + 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, + 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, + 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, + 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, + 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, + }, + { + 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, + 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, + 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, + 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, + 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, + 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, + 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, + 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, + 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, + 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, + 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, + 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, + 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, + 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, + 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, + 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, + 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, + 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, + 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, + 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, + 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, + 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, + 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, + 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, + 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, + 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, + 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, + 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, + 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, + 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, + 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, + 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, + }, + { + 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, + 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, + 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, + 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, + 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, + 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, + 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, + 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, + 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, + 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, + 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, + 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, + 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, + 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, + 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, + 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, + 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, + 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, + 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, + 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, + 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, + 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, + 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, + 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, + 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, + 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, + 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, + 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, + 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, + 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, + 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, + 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, + }, + { + 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, + 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, + 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, + 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, + 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, + 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, + 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, + 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, + 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, + 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, + 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, + 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, + 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, + 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, + 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, + 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, + 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, + 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, + 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, + 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, + 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, + 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, + 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, + 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, + 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, + 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, + 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, + 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, + 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, + 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, + 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, + 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, + }, + { + 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, + 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, + 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, + 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, + 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, + 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, + 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, + 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, + 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, + 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, + 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, + 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, + 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, + 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, + 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, + 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, + 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, + 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, + 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, + 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, + 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, + 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, + 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, + 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, + 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, + 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, + 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, + 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, + 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, + 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, + 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, + 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, + }, + { + 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, + 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, + 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, + 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, + 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, + 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, + 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, + 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, + 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, + 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, + 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, + 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, + 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, + 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, + 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, + 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, + 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, + 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, + 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, + 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, + 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, + 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, + 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, + 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, + 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, + 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, + 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, + 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, + 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, + 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, + 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, + 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, + }, + { + 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, + 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, + 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, + 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, + 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, + 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, + 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, + 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, + 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, + 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, + 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, + 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, + 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, + 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, + 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, + 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, + 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, + 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, + 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, + 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, + 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, + 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, + 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, + 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, + 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, + 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, + 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, + 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, + 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, + 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, + 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, + 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.h b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.h new file mode 100644 index 00000000000..b3f74162f60 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.s b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.s new file mode 100644 index 00000000000..ee7b4bd5f8e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/cswap_amd64.s b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/cswap_amd64.s new file mode 100644 index 00000000000..cd793a5b5f2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/cswap_amd64.s @@ -0,0 +1,65 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/curve25519.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/curve25519.go new file mode 100644 index 00000000000..cb8fbc57b97 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/curve25519.go @@ -0,0 +1,834 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have an implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +import ( + "encoding/binary" +) + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := load3(src[29:]) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/curve_impl.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/curve_impl.go new file mode 100644 index 00000000000..a3d3a3d9177 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/curve_impl.go @@ -0,0 +1,124 @@ +package curve25519 + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var cv25519 cv25519Curve + +type cv25519Curve struct { + *elliptic.CurveParams +} + +func copyReverse(dst []byte, src []byte) { + // Curve 25519 multiplication functions expect scalars in reverse + // order than PGP. To keep the curve25519Curve type consistent + // with other curves, we reverse it here. + for i, j := 0, len(src)-1; j >= 0 && i < len(dst); i, j = i+1, j-1 { + dst[i] = src[j] + } +} + +func copyTruncate(dst []byte, src []byte) { + lenDst, lenSrc := len(dst), len(src) + if lenDst == lenSrc { + copy(dst, src) + } else if lenDst > lenSrc { + copy(dst[lenDst-lenSrc:lenDst], src) + } else if lenDst < lenSrc { + copy(dst, src[:lenDst]) + } +} + +func (cv25519Curve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + // Assume y1 is 0 with cv25519. + var dst [32]byte + var x1Bytes [32]byte + var scalarBytes [32]byte + + copyTruncate(x1Bytes[:], x1.Bytes()) + copyReverse(scalarBytes[:], scalar) + + scalarMult(&dst, &scalarBytes, &x1Bytes) + + x = new(big.Int).SetBytes(dst[:]) + y = new(big.Int) + return x, y +} + +func (cv25519Curve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + var dst [32]byte + var scalarBytes [32]byte + copyReverse(scalarBytes[:], scalar[:32]) + scalarMult(&dst, &scalarBytes, &basePoint) + x = new(big.Int).SetBytes(dst[:]) + y = new(big.Int) + return x, y +} + +func (cv25519Curve) IsOnCurve(bigX, bigY *big.Int) bool { + return bigY.Sign() == 0 // bigY == 0 ? +} + +// More information about 0x40 point format: +// https://tools.ietf.org/html/draft-koch-eddsa-for-openpgp-00#section-3 +// In addition to uncompressed point format described here: +// https://tools.ietf.org/html/rfc6637#section-6 + +func (cv25519Curve) MarshalType40(x, y *big.Int) []byte { + byteLen := 32 + + ret := make([]byte, 1+byteLen) + ret[0] = 0x40 + + xBytes := x.Bytes() + copyTruncate(ret[1:], xBytes) + return ret +} + +func (cv25519Curve) UnmarshalType40(data []byte) (x, y *big.Int) { + if len(data) != 1+32 { + return nil, nil + } + if data[0] != 0x40 { + return nil, nil + } + x = new(big.Int).SetBytes(data[1:]) + // Any x is a valid curve point. + return x, new(big.Int) +} + +// ToCurve25519 casts given elliptic.Curve type to Curve25519 type, or +// returns nil, false if cast was unsuccessful. +func ToCurve25519(cv elliptic.Curve) (cv25519Curve, bool) { + cv2, ok := cv.(cv25519Curve) + return cv2, ok +} + +func initCv25519() { + cv25519.CurveParams = &elliptic.CurveParams{Name: "Curve 25519"} + // Some code relies on these parameters being available for + // checking Curve coordinate length. They should not be used + // directly for any calculations. + cv25519.P, _ = new(big.Int).SetString("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed", 16) + cv25519.N, _ = new(big.Int).SetString("1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed", 16) + cv25519.Gx, _ = new(big.Int).SetString("9", 16) + cv25519.Gy, _ = new(big.Int).SetString("20ae19a1b8a086b4e01edd2c7748d14c923d4d7e6d7c61b229e9c5a27eced3d9", 16) + cv25519.BitSize = 256 +} + +var initonce sync.Once + +// Cv25519 returns a Curve which (partially) implements Cv25519. Only +// ScalarMult and ScalarBaseMult are valid for this curve. Add and +// Double should not be used. +func Cv25519() elliptic.Curve { + initonce.Do(initCv25519) + return cv25519 +} + +func (curve cv25519Curve) Params() *elliptic.CurveParams { + return curve.CurveParams +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/doc.go new file mode 100644 index 00000000000..78bd9fc07cc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html +package curve25519 // import "github.com/keybase/go-crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/freeze_amd64.s b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/freeze_amd64.s new file mode 100644 index 00000000000..390816106ee --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/ladderstep_amd64.s b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 00000000000..9e9040b2502 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/mont25519_amd64.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/mont25519_amd64.go new file mode 100644 index 00000000000..5822bd53383 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/mul_amd64.s b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/mul_amd64.s new file mode 100644 index 00000000000..5ce80a2e56b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R9:R8 + ANDQ SI,R8 + SHLQ $13,R11:R10 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BP:BX + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/square_amd64.s b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/square_amd64.s new file mode 100644 index 00000000000..12f73734ff5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,R8:CX + ANDQ SI,CX + SHLQ $13,R10:R9 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R12:R11 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R14:R13 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,BX:R15 + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/ed25519/ed25519.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/ed25519/ed25519.go new file mode 100644 index 00000000000..5ba434b8393 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/ed25519/ed25519.go @@ -0,0 +1,217 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seed”. +package ed25519 + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +import ( + "bytes" + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "errors" + "io" + "strconv" + + "github.com/keybase/go-crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:32]) + return seed +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptorand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[32:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + if l := len(seed); l != SeedSize { + panic("ed25519: bad seed length: " + strconv.Itoa(l)) + } + + digest := sha512.Sum512(seed) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + privateKey := make([]byte, PrivateKeySize) + copy(privateKey, seed) + copy(privateKey[32:], publicKeyBytes[:]) + + return privateKey +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var s [32]byte + copy(s[:], sig[32:]) + + // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in + // the range [0, order) in order to prevent signature malleability. + if !edwards25519.ScMinimal(&s) { + return false + } + + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) + + var checkR [32]byte + R.ToBytes(&checkR) + return bytes.Equal(sig[:32], checkR[:]) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/const.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/const.go new file mode 100644 index 00000000000..e39f086c1d8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/const.go @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/edwards25519.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/edwards25519.go new file mode 100644 index 00000000000..fd03c252af4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/edwards25519.go @@ -0,0 +1,1793 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "encoding/binary" + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} + +// order is the order of Curve25519 in little-endian form. +var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} + +// ScMinimal returns true if the given scalar is less than the order of the +// curve. +func ScMinimal(scalar *[32]byte) bool { + for i := 3; ; i-- { + v := binary.LittleEndian.Uint64(scalar[i*8:]) + if v > order[i] { + return false + } else if v < order[i] { + break + } else if i == 0 { + return false + } + } + + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go new file mode 100644 index 00000000000..28717403977 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go @@ -0,0 +1,302 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "github.com/keybase/go-crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "strings" + "unicode" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc *uint32 +} + +// ourIsSpace checks if a rune is either space according to unicode +// package, or ZeroWidthSpace (which is not a space according to +// unicode module). Used to trim lines during header reading. +func ourIsSpace(r rune) bool { + return r == '\u200b' || unicode.IsSpace(r) +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + + // Entry-level cleanup, just trim spaces. + line = bytes.TrimFunc(line, ourIsSpace) + + lineWithChecksum := false + foldedChecksum := false + if !isPrefix && len(line) >= 5 && line[len(line)-5] == '=' && line[len(line)-4] != '=' { + // This is the checksum line. Checksum should appear on separate line, + // but some bundles don't have a newline between main payload and the + // checksum, and we try to support that. + + // `=` is not a base64 character with the exception of padding, and the + // padding can only be 2 characters long at most ("=="), so we can + // safely assume that 5 characters starting with `=` at the end of the + // line can't be a valid ending of a base64 stream. In other words, `=` + // at position len-5 in base64 stream can never be a valid part of that + // stream. + + // Checksum can never appear if isPrefix is true - that is, when + // ReadLine returned non-final part of some line because it was longer + // than its buffer. + + if l.crc != nil { + // Error out early if there are multiple checksums. + return 0, ArmorCorrupt + } + + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[len(line)-4:]) + if err != nil { + return 0, fmt.Errorf("error decoding CRC: %s", err.Error()) + } else if m != 3 { + return 0, fmt.Errorf("error decoding CRC: wrong size CRC") + } + + crc := uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + l.crc = &crc + + line = line[:len(line)-5] + + lineWithChecksum = true + + // If we've found a checksum but there is still data left, we don't + // want to enter the "looking for armor end" loop, we still need to + // return the leftover data to the reader. + foldedChecksum = len(line) > 0 + + // At this point, `line` contains leftover data or "" (if checksum + // was on separate line.) + } + + expectArmorEnd := false + if l.crc != nil && !foldedChecksum { + // "looking for armor end" loop + + // We have a checksum, and we are now reading what comes afterwards. + // Skip all empty lines until we see something and we except it to be + // ArmorEnd at this point. + + // This loop is not entered if there is more data *before* the CRC + // suffix (if the CRC is not on separate line). + for { + if len(strings.TrimSpace(string(line))) > 0 { + break + } + lineWithChecksum = false + line, _, err = l.in.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return + } + } + expectArmorEnd = true + } + + if bytes.HasPrefix(line, armorEnd) { + if lineWithChecksum { + // ArmorEnd and checksum at the same line? + return 0, ArmorCorrupt + } + l.eof = true + return 0, io.EOF + } else if expectArmorEnd { + // We wanted armorEnd but didn't see one. + return 0, ArmorCorrupt + } + + // Clean-up line from whitespace to pass it further (to base64 + // decoder). This is done after test for CRC and test for + // armorEnd. Keys that have whitespace in CRC will have CRC + // treated as part of the payload and probably fail in base64 + // reading. + line = bytes.Map(func(r rune) rune { + if ourIsSpace(r) { + return -1 + } + return r + }, line) + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF { + if r.lReader.crc != nil && *r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimFunc(line, ourIsSpace) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go new file mode 100644 index 00000000000..075a1978e6d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go @@ -0,0 +1,160 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine, []byte{'\n'}) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go new file mode 100644 index 00000000000..e601e389f12 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import "hash" + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + start := 0 + + for i, c := range buf { + switch cth.s { + case 0: + if c == '\r' { + cth.s = 1 + } else if c == '\n' { + cth.h.Write(buf[start:i]) + cth.h.Write(newline) + start = i + 1 + } + case 1: + cth.s = 0 + } + } + + cth.h.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/ecdh/ecdh.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/ecdh/ecdh.go new file mode 100644 index 00000000000..1a87b27571e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/ecdh/ecdh.go @@ -0,0 +1,316 @@ +package ecdh + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/elliptic" + "encoding/binary" + "errors" + "github.com/keybase/go-crypto/curve25519" + "io" + "math/big" +) + +type PublicKey struct { + elliptic.Curve + X, Y *big.Int +} + +type PrivateKey struct { + PublicKey + X *big.Int +} + +// KDF implements Key Derivation Function as described in +// https://tools.ietf.org/html/rfc6637#section-7 +func (e *PublicKey) KDF(S []byte, kdfParams []byte, hash crypto.Hash) []byte { + sLen := (e.Curve.Params().P.BitLen() + 7) / 8 + buf := new(bytes.Buffer) + buf.Write([]byte{0, 0, 0, 1}) + if sLen > len(S) { + // zero-pad the S. If we got invalid S (bigger than curve's + // P), we are going to produce invalid key. Garbage in, + // garbage out. + buf.Write(make([]byte, sLen-len(S))) + } + buf.Write(S) + buf.Write(kdfParams) + + hashw := hash.New() + + hashw.Write(buf.Bytes()) + key := hashw.Sum(nil) + + return key +} + +// AESKeyUnwrap implements RFC 3394 Key Unwrapping. See +// http://tools.ietf.org/html/rfc3394#section-2.2.1 +// Note: The second described algorithm ("index-based") is implemented +// here. +func AESKeyUnwrap(key, cipherText []byte) ([]byte, error) { + if len(cipherText)%8 != 0 { + return nil, errors.New("cipherText must by a multiple of 64 bits") + } + + cipher, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + nblocks := len(cipherText)/8 - 1 + + // 1) Initialize variables. + // - Set A = C[0] + var A [aes.BlockSize]byte + copy(A[:8], cipherText[:8]) + + // For i = 1 to n + // Set R[i] = C[i] + R := make([]byte, len(cipherText)-8) + copy(R, cipherText[8:]) + + // 2) Compute intermediate values. + for j := 5; j >= 0; j-- { + for i := nblocks - 1; i >= 0; i-- { + // B = AES-1(K, (A ^ t) | R[i]) where t = n*j+i + // A = MSB(64, B) + t := uint64(nblocks*j + i + 1) + At := binary.BigEndian.Uint64(A[:8]) ^ t + binary.BigEndian.PutUint64(A[:8], At) + + copy(A[8:], R[i*8:i*8+8]) + cipher.Decrypt(A[:], A[:]) + + // R[i] = LSB(B, 64) + copy(R[i*8:i*8+8], A[8:]) + } + } + + // 3) Output results. + // If A is an appropriate initial value (see 2.2.3), + for i := 0; i < 8; i++ { + if A[i] != 0xA6 { + return nil, errors.New("Failed to unwrap key (A is not IV)") + } + } + + return R, nil +} + +// AESKeyWrap implements RFC 3394 Key Wrapping. See +// https://tools.ietf.org/html/rfc3394#section-2.2.2 +// Note: The second described algorithm ("index-based") is implemented +// here. +func AESKeyWrap(key, plainText []byte) ([]byte, error) { + if len(plainText)%8 != 0 { + return nil, errors.New("plainText must be a multiple of 64 bits") + } + + cipher, err := aes.NewCipher(key) // NewCipher checks key size + if err != nil { + return nil, err + } + + nblocks := len(plainText) / 8 + + // 1) Initialize variables. + var A [aes.BlockSize]byte + // Section 2.2.3.1 -- Initial Value + // http://tools.ietf.org/html/rfc3394#section-2.2.3.1 + for i := 0; i < 8; i++ { + A[i] = 0xA6 + } + + // For i = 1 to n + // Set R[i] = P[i] + R := make([]byte, len(plainText)) + copy(R, plainText) + + // 2) Calculate intermediate values. + for j := 0; j <= 5; j++ { + for i := 0; i < nblocks; i++ { + // B = AES(K, A | R[i]) + copy(A[8:], R[i*8:i*8+8]) + cipher.Encrypt(A[:], A[:]) + + // (Assume B = A) + // A = MSB(64, B) ^ t where t = (n*j)+1 + t := uint64(j*nblocks + i + 1) + At := binary.BigEndian.Uint64(A[:8]) ^ t + binary.BigEndian.PutUint64(A[:8], At) + + // R[i] = LSB(64, B) + copy(R[i*8:i*8+8], A[8:]) + } + } + + // 3) Output results. + // Set C[0] = A + // For i = 1 to n + // C[i] = R[i] + return append(A[:8], R...), nil +} + +// PadBuffer pads byte buffer buf to a length being multiple of +// blockLen. Additional bytes appended to the buffer have value of the +// number padded bytes. E.g. if the buffer is 3 bytes short of being +// 40 bytes total, the appended bytes will be [03, 03, 03]. +func PadBuffer(buf []byte, blockLen int) []byte { + padding := blockLen - (len(buf) % blockLen) + if padding == 0 { + return buf + } + + padBuf := make([]byte, padding) + for i := 0; i < padding; i++ { + padBuf[i] = byte(padding) + } + + return append(buf, padBuf...) +} + +// UnpadBuffer verifies that buffer contains proper padding and +// returns buffer without the padding, or nil if the padding was +// invalid. +func UnpadBuffer(buf []byte, dataLen int) []byte { + padding := len(buf) - dataLen + outBuf := buf[:dataLen] + + for i := dataLen; i < len(buf); i++ { + if buf[i] != byte(padding) { + // Invalid padding - bail out + return nil + } + } + + return outBuf +} + +func (e *PublicKey) Encrypt(random io.Reader, kdfParams []byte, plain []byte, hash crypto.Hash, kdfKeySize int) (Vx *big.Int, Vy *big.Int, C []byte, err error) { + // Vx, Vy - encryption key + + // Note for Curve 25519 - curve25519 library already does key + // clamping in scalarMult, so we can use generic random scalar + // generation from elliptic. + priv, Vx, Vy, err := elliptic.GenerateKey(e.Curve, random) + if err != nil { + return nil, nil, nil, err + } + + // Sx, Sy - shared secret + Sx, _ := e.Curve.ScalarMult(e.X, e.Y, priv) + + // Encrypt the payload with KDF-ed S as the encryption key. Pass + // the ciphertext along with V to the recipient. Recipient can + // generate S using V and their priv key, and then KDF(S), on + // their own, to get encryption key and decrypt the ciphertext, + // revealing encryption key for symmetric encryption later. + + plain = PadBuffer(plain, 8) + key := e.KDF(Sx.Bytes(), kdfParams, hash) + + // Take only as many bytes from key as the key length (the hash + // result might be bigger) + encrypted, err := AESKeyWrap(key[:kdfKeySize], plain) + + return Vx, Vy, encrypted, nil +} + +func (e *PrivateKey) DecryptShared(X, Y *big.Int) []byte { + Sx, _ := e.Curve.ScalarMult(X, Y, e.X.Bytes()) + return Sx.Bytes() +} + +func countBits(buffer []byte) int { + var headerLen int + switch buffer[0] { + case 0x4: + headerLen = 3 + case 0x40: + headerLen = 7 + default: + // Unexpected header - but we can still count the bits. + val := buffer[0] + headerLen = 0 + for val > 0 { + val = val / 2 + headerLen++ + } + } + + return headerLen + (len(buffer)-1)*8 +} + +// elliptic.Marshal and elliptic.Unmarshal only marshals uncompressed +// 0x4 MPI types. These functions will check if the curve is cv25519, +// and if so, use 0x40 compressed type to (un)marshal. Otherwise, +// elliptic.(Un)marshal will be called. + +// Marshal encodes point into either 0x4 uncompressed point form, or +// 0x40 compressed point for Curve 25519. +func Marshal(curve elliptic.Curve, x, y *big.Int) (buf []byte, bitSize int) { + // NOTE: Read more about MPI encoding in the RFC: + // https://tools.ietf.org/html/rfc4880#section-3.2 + + // We are required to encode size in bits, counting from the most- + // significant non-zero bit. So assuming that the buffer never + // starts with 0x00, we only need to count bits in the first byte + // - and in current implentation it will always be 0x4 or 0x40. + + cv, ok := curve25519.ToCurve25519(curve) + if ok { + buf = cv.MarshalType40(x, y) + } else { + buf = elliptic.Marshal(curve, x, y) + } + + return buf, countBits(buf) +} + +// Unmarshal converts point, serialized by Marshal, into x, y pair. +// For 0x40 compressed points (for Curve 25519), y will always be 0. +// It is an error if point is not on the curve, On error, x = nil. +func Unmarshal(curve elliptic.Curve, data []byte) (x, y *big.Int) { + cv, ok := curve25519.ToCurve25519(curve) + if ok { + return cv.UnmarshalType40(data) + } + + return elliptic.Unmarshal(curve, data) +} + +func GenerateKey(curve elliptic.Curve, random io.Reader) (priv *PrivateKey, err error) { + var privBytes []byte + var Vx, Vy *big.Int + + if _, ok := curve25519.ToCurve25519(curve); ok { + privBytes = make([]byte, 32) + _, err = io.ReadFull(random, privBytes) + if err != nil { + return nil, err + } + + // NOTE: PGP expect scalars in reverse order than Curve 25519 + // go library. That's why this trimming is backwards compared + // to curve25519.go + privBytes[31] &= 248 + privBytes[0] &= 127 + privBytes[0] |= 64 + + Vx,Vy = curve.ScalarBaseMult(privBytes) + } else { + privBytes, Vx, Vy, err = elliptic.GenerateKey(curve, random) + if err != nil { + return nil, err + } + } + + priv = &PrivateKey{} + priv.X = new(big.Int).SetBytes(privBytes) + priv.PublicKey.Curve = curve + priv.PublicKey.X = Vx + priv.PublicKey.Y = Vy + return priv, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 00000000000..15dafc5560f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,122 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "github.com/keybase/go-crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + s.ModInverse(s, priv.P) + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go new file mode 100644 index 00000000000..855fa89c1b0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go @@ -0,0 +1,80 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "github.com/keybase/go-crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} + +// DeprecatedKeyError indicates that the key was read and verified +// properly, but uses a deprecated algorithm and can't be used. +type DeprecatedKeyError string + +func (d DeprecatedKeyError) Error() string { + return "openpgp: key is deprecated: " + string(d) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/keys.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/keys.go new file mode 100644 index 00000000000..b30315c4477 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/keys.go @@ -0,0 +1,934 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto/hmac" + "encoding/binary" + "io" + "time" + + "github.com/keybase/go-crypto/openpgp/armor" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/rsa" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + // Revocations that are signed by designated revokers. Reading keys + // will not verify these revocations, because it won't have access to + // issuers' public keys, API consumers should do this instead (or + // not, and just assume that the key is probably revoked). + UnverifiedRevocations []*packet.Signature + Subkeys []Subkey + BadSubkeys []BadSubkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature + Revocation *packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocation *packet.Signature +} + +// BadSubkey is one that failed reconstruction, but we'll keep it around for +// informational purposes. +type BadSubkey struct { + Subkey + Err error +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature + KeyFlags packet.KeyFlagBits +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + + // KeysById returns the set of keys that have the given key id. + // fp can be optionally supplied, which is the full key fingerprint. + // If it's provided, then it must match. This comes up in the case + // of GPG subpacket 33. + KeysById(id uint64, fp []byte) []Key + + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + // fp can be optionally supplied, which is the full key fingerprint. + // If it's provided, then it must match. This comes up in the case + // of GPG subpacket 33. + KeysByIdUsage(id uint64, fp []byte, requiredUsage byte) []Key + + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// primaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) primaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// encryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) encryptionKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest, non-revoked key that can + // encrypt. + var maxTime time.Time + for i, subkey := range e.Subkeys { + + // NOTE(maxtaco) + // If there is a Flags subpacket, then we have to follow it, and only + // use keys that are marked for Encryption of Communication. If there + // isn't a Flags subpacket, and this is an Encrypt-Only key (right now only ElGamal + // suffices), then we implicitly use it. The check for primary below is a little + // more open-ended, but for now, let's be strict and potentially open up + // if we see bugs in the wild. + // + // One more note: old DSA/ElGamal keys tend not to have the Flags subpacket, + // so this sort of thing is pretty important for encrypting to older keys. + // + if ((subkey.Sig.FlagsValid && subkey.Sig.FlagEncryptCommunications) || + (!subkey.Sig.FlagsValid && subkey.PublicKey.PubKeyAlgo == packet.PubKeyAlgoElGamal)) && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.Sig.KeyExpired(now) && + subkey.Revocation == nil && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Sig.GetKeyFlags()}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt to, then we can obviously use it. + // + // NOTE(maxtaco) - see note above, how this policy is a little too open-ended + // for my liking, but leave it for now. + i := e.primaryIdentity() + if (!i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications) && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, i.SelfSignature.GetKeyFlags()}, true + } + + // This Entity appears to be signing only. + return Key{}, false +} + +// signingKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) signingKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest, non-revoked key that can + // sign. + var maxTime time.Time + for i, subkey := range e.Subkeys { + if (!subkey.Sig.FlagsValid || subkey.Sig.FlagSign) && + subkey.PrivateKey.PrivateKey != nil && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.Sig.KeyExpired(now) && + subkey.Revocation == nil && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + break + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Sig.GetKeyFlags()}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. + i := e.primaryIdentity() + if (!i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign) && + e.PrimaryKey.PubKeyAlgo.CanSign() && + !i.SelfSignature.KeyExpired(now) && + e.PrivateKey.PrivateKey != nil { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, i.SelfSignature.GetKeyFlags()}, true + } + + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +func keyMatchesIdAndFingerprint(key *packet.PublicKey, id uint64, fp []byte) bool { + if key.KeyId != id { + return false + } + if fp == nil { + return true + } + return hmac.Equal(fp, key.Fingerprint[:]) +} + +// KeysById returns the set of keys that have the given key id. +// fp can be optionally supplied, which is the full key fingerprint. +// If it's provided, then it must match. This comes up in the case +// of GPG subpacket 33. +func (el EntityList) KeysById(id uint64, fp []byte) (keys []Key) { + for _, e := range el { + if keyMatchesIdAndFingerprint(e.PrimaryKey, id, fp) { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + + var keyFlags packet.KeyFlagBits + for _, ident := range e.Identities { + keyFlags.Merge(ident.SelfSignature.GetKeyFlags()) + } + + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, keyFlags}) + } + + for _, subKey := range e.Subkeys { + if keyMatchesIdAndFingerprint(subKey.PublicKey, id, fp) { + + // If there's both a a revocation and a sig, then take the + // revocation. Otherwise, we can proceed with the sig. + sig := subKey.Revocation + if sig == nil { + sig = subKey.Sig + } + + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, sig, sig.GetKeyFlags()}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +// fp can be optionally supplied, which is the full key fingerprint. +// If it's provided, then it must match. This comes up in the case +// of GPG subpacket 33. +func (el EntityList) KeysByIdUsage(id uint64, fp []byte, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id, fp) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if requiredUsage != 0 { + var usage byte + + switch { + case key.KeyFlags.Valid: + usage = key.KeyFlags.BitField + + case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoElGamal: + // We also need to handle the case where, although the sig's + // flags aren't valid, the key can is implicitly usable for + // encryption by virtue of being ElGamal. See also the comment + // in encryptionKey() above. + usage |= packet.KeyFlagEncryptCommunications + usage |= packet.KeyFlagEncryptStorage + + case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoDSA || + key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoECDSA || + key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoEdDSA: + usage |= packet.KeyFlagSign + + // For a primary RSA key without any key flags, be as permissiable + // as possible. + case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoRSA && + keyMatchesIdAndFingerprint(key.Entity.PrimaryKey, id, fp): + usage = (packet.KeyFlagCertify | packet.KeyFlagSign | + packet.KeyFlagEncryptCommunications | packet.KeyFlagEncryptStorage) + } + + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && subKey.PrivateKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Sig.GetKeyFlags()}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } + + panic("unreachable") +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } else { + e.PrimaryKey = &e.PrivateKey.PublicKey + } + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var current *Identity + var revocations []*packet.Signature + + designatedRevokers := make(map[uint64]bool) +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + switch pkt := p.(type) { + case *packet.UserId: + + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + current = new(Identity) + current.Name = pkt.Id + current.UserId = pkt + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + // These revocations won't revoke UIDs (see + // SigTypeIdentityRevocation). Handle these first, + // because key might have revocation coming from + // another key (designated revoke). + revocations = append(revocations, pkt) + continue + } + + // These are signatures by other people on this key. Let's just ignore them + // from the beginning, since they shouldn't affect our key decoding one way + // or the other. + if pkt.IssuerKeyId != nil && *pkt.IssuerKeyId != e.PrimaryKey.KeyId { + continue + } + + // If this is a signature made by the keyholder, and the signature has stubbed out + // critical packets, then *now* we need to bail out. + if e := pkt.StubbedOutCriticalError; e != nil { + return nil, e + } + + // Next handle the case of a self-signature. According to RFC8440, + // Section 5.2.3.3, if there are several self-signatures, + // we should take the newer one. If they were both created + // at the same time, but one of them has keyflags specified and the + // other doesn't, keep the one with the keyflags. We have actually + // seen this in the wild (see the 'Yield' test in read_test.go). + // If there is a tie, and both have the same value for FlagsValid, + // then "last writer wins." + // + // HOWEVER! We have seen yet more keys in the wild (see the 'Spiros' + // test in read_test.go), in which the later self-signature is a bunch + // of junk, and doesn't even specify key flags. Does it really make + // sense to overwrite reasonable key flags with the empty set? I'm not + // sure what that would be trying to achieve, and plus GPG seems to be + // ok with this situation, and ignores the later (empty) keyflag set. + // So further tighten our overwrite rules, and only allow the later + // signature to overwrite the earlier signature if so doing won't + // trash the key flags. + if current != nil && + (current.SelfSignature == nil || + (!pkt.CreationTime.Before(current.SelfSignature.CreationTime) && + (pkt.FlagsValid || !current.SelfSignature.FlagsValid))) && + (pkt.SigType == packet.SigTypePositiveCert || pkt.SigType == packet.SigTypeGenericCert) && + pkt.IssuerKeyId != nil && + *pkt.IssuerKeyId == e.PrimaryKey.KeyId { + + if err = e.PrimaryKey.VerifyUserIdSignature(current.Name, e.PrimaryKey, pkt); err == nil { + + current.SelfSignature = pkt + + // NOTE(maxtaco) 2016.01.11 + // Only register an identity once we've gotten a valid self-signature. + // It's possible therefore for us to throw away `current` in the case + // no valid self-signatures were found. That's OK as long as there are + // other identities that make sense. + // + // NOTE! We might later see a revocation for this very same UID, and it + // won't be undone. We've preserved this feature from the original + // Google OpenPGP we forked from. + e.Identities[current.Name] = current + } else { + // We really should warn that there was a failure here. Not raise an error + // since this really shouldn't be a fail-stop error. + } + } else if current != nil && pkt.SigType == packet.SigTypeIdentityRevocation { + if err = e.PrimaryKey.VerifyUserIdSignature(current.Name, e.PrimaryKey, pkt); err == nil { + // Note: we are not removing the identity from + // e.Identities. Caller can always filter by Revocation + // field to ignore revoked identities. + current.Revocation = pkt + } + } else if pkt.SigType == packet.SigTypeDirectSignature { + if err = e.PrimaryKey.VerifyRevocationSignature(e.PrimaryKey, pkt); err == nil { + if desig := pkt.DesignatedRevoker; desig != nil { + // If it's a designated revoker signature, take last 8 octects + // of fingerprint as Key ID and save it to designatedRevokers + // map. We consult this map later to see if a foreign + // revocation should be added to UnverifiedRevocations. + keyID := binary.BigEndian.Uint64(desig.Fingerprint[len(desig.Fingerprint)-8:]) + designatedRevokers[keyID] = true + } + } + } else if current == nil { + // NOTE(maxtaco) + // + // See https://github.com/keybase/client/issues/2666 + // + // There might have been a user attribute picture before this signature, + // in which case this is still a valid PGP key. In the future we might + // not ignore user attributes (like picture). But either way, it doesn't + // make sense to bail out here. Keep looking for other valid signatures. + // + // Used to be: + // return nil, errors.StructuralError("signature packet found before user id packet") + } else { + current.Signatures = append(current.Signatures, pkt) + } + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + if revocation.IssuerKeyId == nil || *revocation.IssuerKeyId == e.PrimaryKey.KeyId { + // Key revokes itself, something that we can verify. + err = e.PrimaryKey.VerifyRevocationSignature(e.PrimaryKey, revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } else if revocation.IssuerKeyId != nil { + if _, ok := designatedRevokers[*revocation.IssuerKeyId]; ok { + // Revocation is done by certified designated revoker, + // but we can't verify the revocation. + e.UnverifiedRevocations = append(e.UnverifiedRevocations, revocation) + } + } + } + + return e, nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + var lastErr error + for { + p, err := packets.Next() + if err == io.EOF { + break + } + if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + sig, ok := p.(*packet.Signature) + if !ok { + // Hit a non-signature packet, so assume we're up to the next key + packets.Unread(p) + break + } + if st := sig.SigType; st != packet.SigTypeSubkeyBinding && st != packet.SigTypeSubkeyRevocation { + + // Note(maxtaco): + // We used to error out here, but instead, let's fast-forward past + // packets that are in the wrong place (like misplaced 0x13 signatures) + // until we get to one that works. For a test case, + // see TestWithBadSubkeySignaturePackets. + + continue + } + err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig) + if err != nil { + // Non valid signature, so again, no need to abandon all hope, just continue; + // make a note of the error we hit. + lastErr = errors.StructuralError("subkey signature invalid: " + err.Error()) + continue + } + switch sig.SigType { + case packet.SigTypeSubkeyBinding: + // Does the "new" sig set expiration to later date than + // "previous" sig? + if subKey.Sig == nil || subKey.Sig.ExpiresBeforeOther(sig) { + subKey.Sig = sig + } + case packet.SigTypeSubkeyRevocation: + // First writer wins + if subKey.Revocation == nil { + subKey.Revocation = sig + } + } + } + + if subKey.Sig != nil { + if err := subKey.PublicKey.ErrorIfDeprecated(); err != nil { + // Key passed signature check but is deprecated. + subKey.Sig = nil + lastErr = err + } + } + + if subKey.Sig != nil { + e.Subkeys = append(e.Subkeys, subKey) + } else { + if lastErr == nil { + lastErr = errors.StructuralError("Subkey wasn't signed; expected a 'binding' signature") + } + e.BadSubkeys = append(e.BadSubkeys, BadSubkey{Subkey: subKey, Err: lastErr}) + } + return nil +} + +const defaultRSAKeyBits = 2048 + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + currentTime := config.Now() + + bits := defaultRSAKeyBits + if config != nil && config.RSABits != 0 { + bits = config.RSABits + } + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + signingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + + e := &Entity{ + PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv), + Identities: make(map[string]*Identity), + } + isPrimaryId := true + e.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + + // If the user passes in a DefaultHash via packet.Config, set the + // PreferredHash for the SelfSignature. + if config != nil && config.DefaultHash != 0 { + e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} + } + + // Likewise for DefaultCipher. + if config != nil && config.DefaultCipher != 0 { + e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} + } + + e.Subkeys = make([]Subkey, 1) + e.Subkeys[0] = Subkey{ + PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv), + Sig: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + e.Subkeys[0].PublicKey.IsSubkey = true + e.Subkeys[0].PrivateKey.IsSubkey = true + + return e, nil +} + +// SerializePrivate serializes an Entity, including private key material, to +// the given Writer. For now, it must only be used on an Entity returned from +// NewEntity. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + if e.PrivateKey.PrivateKey != nil { + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + if e.PrivateKey.PrivateKey != nil && !config.ReuseSignatures() { + // If not reusing existing signatures, sign subkey using private key + // (subkey binding), but also sign primary key using subkey (primary + // key binding) if subkey is used for signing. + if subkey.Sig.FlagSign { + err = subkey.Sig.CrossSignKey(e.PrimaryKey, subkey.PrivateKey, config) + if err != nil { + return err + } + } + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + } + + if subkey.Revocation != nil { + err = subkey.Revocation.Serialize(w) + if err != nil { + return + } + } + + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w. (No private +// key material will be output). +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + + if subkey.Revocation != nil { + err = subkey.Revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} + +// CopySubkeyRevocations copies subkey revocations from the src Entity over +// to the receiver entity. We need this because `gpg --export-secret-key` does +// not appear to output subkey revocations. In this case we need to manually +// merge with the output of `gpg --export`. +func (e *Entity) CopySubkeyRevocations(src *Entity) { + m := make(map[[20]byte]*packet.Signature) + for _, subkey := range src.Subkeys { + if subkey.Revocation != nil { + m[subkey.PublicKey.Fingerprint] = subkey.Revocation + } + } + for i, subkey := range e.Subkeys { + if r := m[subkey.PublicKey.Fingerprint]; r != nil { + e.Subkeys[i].Revocation = r + } + } +} + +// CheckDesignatedRevokers will try to confirm any of designated +// revocation of entity. For this function to work, revocation +// issuer's key should be found in keyring. First successfully +// verified designated revocation is returned along with the key that +// verified it. +func FindVerifiedDesignatedRevoke(keyring KeyRing, entity *Entity) (*packet.Signature, *Key) { + for _, sig := range entity.UnverifiedRevocations { + if sig.IssuerKeyId == nil { + continue + } + + issuerKeyId := *sig.IssuerKeyId + issuerFingerprint := sig.IssuerFingerprint + keys := keyring.KeysByIdUsage(issuerKeyId, issuerFingerprint, packet.KeyFlagSign) + if len(keys) == 0 { + continue + } + for _, key := range keys { + err := key.PublicKey.VerifyRevocationSignature(entity.PrimaryKey, sig) + if err == nil { + return sig, &key + } + } + } + + return nil, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go new file mode 100644 index 00000000000..f023fe5337c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go new file mode 100644 index 00000000000..f4125e189dc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go @@ -0,0 +1,98 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int + // ReuseSignatures tells us to reuse existing Signatures + // on serialized output. + ReuseSignaturesOnSerialize bool +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} + +func (c *Config) ReuseSignatures() bool { + return c != nil && c.ReuseSignaturesOnSerialize +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go new file mode 100644 index 00000000000..41de661d70a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go @@ -0,0 +1,104 @@ +package packet + +import ( + "bytes" + "io" + "math/big" + + "github.com/keybase/go-crypto/openpgp/ecdh" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// ECDHKdfParams generates KDF parameters sequence for given +// PublicKey. See https://tools.ietf.org/html/rfc6637#section-8 +func ECDHKdfParams(pub *PublicKey) []byte { + buf := new(bytes.Buffer) + oid := pub.ec.oid + buf.WriteByte(byte(len(oid))) + buf.Write(oid) + buf.WriteByte(18) // ECDH TYPE + pub.ecdh.serialize(buf) + buf.WriteString("Anonymous Sender ") + buf.Write(pub.Fingerprint[:]) + return buf.Bytes() +} + +func decryptKeyECDH(priv *PrivateKey, X, Y *big.Int, C []byte) (out []byte, err error) { + ecdhpriv, ok := priv.PrivateKey.(*ecdh.PrivateKey) + if !ok { + return nil, errors.InvalidArgumentError("bad internal ECDH key") + } + + Sx := ecdhpriv.DecryptShared(X, Y) + + kdfParams := ECDHKdfParams(&priv.PublicKey) + hash, ok := s2k.HashIdToHash(byte(priv.ecdh.KdfHash)) + if !ok { + return nil, errors.InvalidArgumentError("invalid hash id in private key") + } + + key := ecdhpriv.KDF(Sx, kdfParams, hash) + keySize := CipherFunction(priv.ecdh.KdfAlgo).KeySize() + + decrypted, err := ecdh.AESKeyUnwrap(key[:keySize], C) + if err != nil { + return nil, err + } + + // We have to "read ahead" to discover real length of the + // encryption key and properly unpad buffer. + cipherFunc := CipherFunction(decrypted[0]) + // +3 bytes = 1-byte cipher id and checksum 2-byte checksum. + out = ecdh.UnpadBuffer(decrypted, cipherFunc.KeySize()+3) + if out == nil { + return nil, errors.InvalidArgumentError("invalid padding while ECDH") + } + return out, nil +} + +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *PublicKey, keyBlock []byte) error { + ecdhpub := pub.PublicKey.(*ecdh.PublicKey) + kdfParams := ECDHKdfParams(pub) + + hash, ok := s2k.HashIdToHash(byte(pub.ecdh.KdfHash)) + if !ok { + return errors.InvalidArgumentError("invalid hash id in private key") + } + + kdfKeySize := CipherFunction(pub.ecdh.KdfAlgo).KeySize() + Vx, Vy, C, err := ecdhpub.Encrypt(rand, kdfParams, keyBlock, hash, kdfKeySize) + if err != nil { + return err + } + + mpis, mpiBitLen := ecdh.Marshal(ecdhpub.Curve, Vx, Vy) + + packetLen := len(header) /* header length in bytes */ + packetLen += 2 /* mpi length in bits */ + len(mpis) + packetLen += 1 /* ciphertext size in bytes */ + len(C) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + + _, err = w.Write([]byte{byte(mpiBitLen >> 8), byte(mpiBitLen)}) + if err != nil { + return err + } + + _, err = w.Write(mpis[:]) + if err != nil { + return err + } + + w.Write([]byte{byte(len(C))}) + w.Write(C[:]) + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 00000000000..2a6a04168a4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,231 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" + "math/big" + "strconv" + + "github.com/keybase/go-crypto/openpgp/ecdh" + "github.com/keybase/go-crypto/openpgp/elgamal" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/rsa" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 parsedMPI + ecdh_C []byte +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + case PubKeyAlgoElGamal: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + case PubKeyAlgoECDH: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return err + } + _, err = readFull(r, buf[:1]) // read C len (1 byte) + if err != nil { + return err + } + e.ecdh_C = make([]byte, int(buf[0])) + _, err = readFull(r, e.ecdh_C) + } + + if err != nil { + return err + } + + _, err = consumeAll(r) + return err +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + if priv == nil || priv.PrivateKey == nil { + return errors.InvalidArgumentError("attempting to decrypt with nil PrivateKey") + } + + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + k := priv.PrivateKey.(*rsa.PrivateKey) + b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes)) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + case PubKeyAlgoECDH: + // Note: Unmarshal checks if point is on the curve. + c1, c2 := ecdh.Unmarshal(priv.PrivateKey.(*ecdh.PrivateKey).Curve, e.encryptedMPI1.bytes) + if c1 == nil { + return errors.InvalidArgumentError("failed to parse EC point for encryption key") + } + b, err = decryptKeyECDH(priv, c1, c2, e.ecdh_C) + default: + err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + writeMPIs(w, e.encryptedMPI1) + case PubKeyAlgoElGamal: + writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) + default: + panic("internal error") + } + + return nil +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoECDH: + return serializeEncryptedKeyECDH(w, config.Random(), buf, pub, keyBlock) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + return writeMPI(w, 8*uint16(len(cipherText)), cipherText) +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + err = writeBig(w, c1) + if err != nil { + return err + } + return writeBig(w, c2) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go new file mode 100644 index 00000000000..1a9ec6e51e8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.IsBinary = buf[0] == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go new file mode 100644 index 00000000000..ce2a33a547c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go @@ -0,0 +1,143 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. If an incorrect key is detected then nil is returned. On +// successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if prefixCopy[blockSize-2] != prefixCopy[blockSize] || + prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { + return nil + } + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 00000000000..af404bb10e9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,74 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go new file mode 100644 index 00000000000..cdeea012f2c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go new file mode 100644 index 00000000000..eb61eda9474 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go @@ -0,0 +1,576 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "github.com/keybase/go-crypto/openpgp/packet" + +import ( + "bufio" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/elliptic" + "io" + "math/big" + + "github.com/keybase/go-crypto/cast5" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/rsa" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + for len(p) > 0 { + for power := uint(14); power < 32; power-- { + l := 1 << power + if len(p) >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(p[:l]) + n += m + if err != nil { + return + } + p = p[l:] + break + } + } + } + return +} + +func (w *partialLengthWriter) Close() error { + w.lengthByte[0] = 0 + _, err := w.w.Write(w.lengthByte[:]) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + var buf [6]byte + var n int + + buf[0] = 0x80 | 0x40 | byte(ptype) + if length < 192 { + buf[1] = byte(length) + n = 2 + } else if length < 8384 { + length -= 192 + buf[1] = 192 + byte(length>>8) + buf[2] = byte(length) + n = 3 + } else { + buf[1] = 255 + buf[2] = byte(length >> 24) + buf[3] = byte(length >> 16) + buf[4] = byte(length >> 8) + buf[5] = byte(length) + n = 6 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } + + panic("unreachable") +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 +) + +// peekVersion detects the version of a public key packet about to +// be read. A bufio.Reader at the original position of the io.Reader +// is returned. +func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { + bufr = bufio.NewReader(r) + var verBuf []byte + if verBuf, err = bufr.Peek(1); err != nil { + return + } + ver = verBuf[0] + return +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + var version byte + // Detect signature version + if contents, version, err = peekVersion(contents); err != nil { + return + } + if version < 4 { + p = new(SignatureV3) + } else { + p = new(Signature) + } + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + var version byte + if contents, version, err = peekVersion(contents); err != nil { + return + } + isSubkey := tag == packetTypePublicSubkey + if version < 4 { + p = &PublicKeyV3{IsSubkey: isSubkey} + } else { + p = &PublicKey{IsSubkey: isSubkey} + } + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 + SigTypeIdentityRevocation = 0x30 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + + PubKeyAlgoBadElGamal PublicKeyAlgorithm = 20 // Reserved (deprecated, formerly ElGamal Encrypt or Sign) + // RFC -1 + PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction uint8 + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case Cipher3DES: + return 24 + case CipherCAST5: + return cast5.KeySize + case CipherAES128: + return 16 + case CipherAES192: + return 24 + case CipherAES256: + return 32 + } + return 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + switch cipher { + case Cipher3DES: + return des.BlockSize + case CipherCAST5: + return 8 + case CipherAES128, CipherAES192, CipherAES256: + return 16 + } + return 0 +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + switch cipher { + case Cipher3DES: + block, _ = des.NewTripleDESCipher(key) + case CipherCAST5: + block, _ = cast5.NewCipher(key) + case CipherAES128, CipherAES192, CipherAES256: + block, _ = aes.NewCipher(key) + } + return +} + +// readMPI reads a big integer from r. The bit length returned is the bit +// length that was specified in r. This is preserved so that the integer can be +// reserialized exactly. +func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { + var buf [2]byte + _, err = readFull(r, buf[0:]) + if err != nil { + return + } + bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + numBytes := (int(bitLength) + 7) / 8 + mpi = make([]byte, numBytes) + _, err = readFull(r, mpi) + // According to RFC 4880 3.2. we should check that the MPI has no leading + // zeroes (at least when not an encrypted MPI?), but this implementation + // does generate leading zeroes, so we keep accepting them. + return +} + +// writeMPI serializes a big integer to w. +func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. + // Implementations seem to be tolerant of them, and stripping them would + // make it complex to guarantee matching re-serialization. + _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) + if err == nil { + _, err = w.Write(mpiBytes) + } + return +} + +func WritePaddedBigInt(w io.Writer, length int, X *big.Int) (n int, err error) { + bytes := X.Bytes() + n1, err := w.Write(make([]byte, length-len(bytes))) + if err != nil { + return n1, err + } + n2, err := w.Write(bytes) + if err != nil { + return n2, err + } + return (n1 + n2), err +} + +// Minimum number of bytes to fit the curve coordinates. All +// coordinates have to be 0-padded to this length. +func mpiPointByteLength(curve elliptic.Curve) int { + return (curve.Params().P.BitLen() + 7) / 8 +} + +// writeBig serializes a *big.Int to w. +func writeBig(w io.Writer, i *big.Int) error { + return writeMPI(w, uint16(i.BitLen()), i.Bytes()) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go new file mode 100644 index 00000000000..7388ead809b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go @@ -0,0 +1,564 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "github.com/keybase/go-crypto/ed25519" + "github.com/keybase/go-crypto/openpgp/ecdh" + "github.com/keybase/go-crypto/openpgp/elgamal" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" + "github.com/keybase/go-crypto/rsa" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + PrivateKey interface{} // An *rsa.PrivateKey or *dsa.PrivateKey. + sha1Checksum bool + iv []byte + s2kHeader []byte +} + +type EdDSAPrivateKey struct { + PrivateKey + seed parsedMPI +} + +func (e *EdDSAPrivateKey) Seed() []byte { + return e.seed.bytes +} + +func (e *EdDSAPrivateKey) Sign(digest []byte) (R, S []byte, err error) { + r := bytes.NewReader(e.seed.bytes) + publicKey, privateKey, err := ed25519.GenerateKey(r) + if err != nil { + return nil, nil, err + } + + if !bytes.Equal(publicKey, e.PublicKey.edk.p.bytes[1:]) { // [1:] because [0] is 0x40 mpi header + return nil, nil, errors.UnsupportedError("EdDSA: Private key does not match public key.") + } + + sig := ed25519.Sign(privateKey, digest) + + sigLen := ed25519.SignatureSize / 2 + return sig[:sigLen], sig[sigLen:], nil +} + +func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDHPrivateKey(currentTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDHPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + s2kType := buf[0] + + switch s2kType { + case 0: + pk.s2k = nil + pk.Encrypted = false + case 254, 255: + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.Encrypted = true + pk.s2k, err = s2k.Parse(r) + if err != nil { + return + } + if s2kType == 254 { + pk.sha1Checksum = true + } + // S2K == nil implies that we got a "GNU Dummy" S2K. For instance, + // because our master secret key is on a USB key in a vault somewhere. + // In that case, there is no further data to consume here. + if pk.s2k == nil { + pk.Encrypted = false + return + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + pk.encryptedData, err = ioutil.ReadAll(r) + if err != nil { + return + } + + if !pk.Encrypted { + return pk.parsePrivateKey(pk.encryptedData) + } + + return +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +// Encrypt is the counterpart to the Decrypt() method below. It encrypts +// the private key with the provided passphrase. If config is nil, then +// the standard, and sensible, defaults apply. +// +// A key will be derived from the given passphrase using S2K Specifier +// Type 3 (Iterated + Salted, see RFC-4880 Sec. 3.7.1.3). This choice +// is hardcoded in s2k.Serialize(). S2KCount is hardcoded to 0, which is +// equivalent to 65536. And the hash algorithm for key-derivation can be +// set with config. The encrypted PrivateKey, using the algorithm specified +// in config (if provided), is written out to the encryptedData member. +// When Serialize() is called, this encryptedData member will be +// serialized, using S2K Usage value of 254, and thus SHA1 checksum. +func (pk *PrivateKey) Encrypt(passphrase []byte, config *Config) (err error) { + if pk.PrivateKey == nil { + return errors.InvalidArgumentError("there is no private key to encrypt") + } + + pk.sha1Checksum = true + pk.cipher = config.Cipher() + s2kConfig := s2k.Config{ + Hash: config.Hash(), + S2KCount: 0, + } + s2kBuf := bytes.NewBuffer(nil) + derivedKey := make([]byte, pk.cipher.KeySize()) + err = s2k.Serialize(s2kBuf, derivedKey, config.Random(), passphrase, &s2kConfig) + if err != nil { + return err + } + + pk.s2kHeader = s2kBuf.Bytes() + // No good way to set pk.s2k but to call s2k.Parse(), + // even though we have all the information here, but + // most of the functions needed are private to s2k. + pk.s2k, err = s2k.Parse(s2kBuf) + pk.iv = make([]byte, pk.cipher.blockSize()) + if _, err = config.Random().Read(pk.iv); err != nil { + return err + } + + privateKeyBuf := bytes.NewBuffer(nil) + if err = pk.serializePrivateKey(privateKeyBuf); err != nil { + return err + } + + checksum := sha1.Sum(privateKeyBuf.Bytes()) + if _, err = privateKeyBuf.Write(checksum[:]); err != nil { + return err + } + + pkData := privateKeyBuf.Bytes() + block := pk.cipher.new(derivedKey) + pk.encryptedData = make([]byte, len(pkData)) + cfb := cipher.NewCFBEncrypter(block, pk.iv) + cfb.XORKeyStream(pk.encryptedData, pkData) + pk.Encrypted = true + return nil +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + buf := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(buf) + if err != nil { + return + } + + privateKeyBuf := bytes.NewBuffer(nil) + + if pk.PrivateKey == nil { + _, err = buf.Write([]byte{ + 254, // SHA-1 Convention + 9, // Encryption scheme (AES256) + 101, // GNU Extensions + 2, // Hash value (SHA1) + 'G', 'N', 'U', // "GNU" as a string + 1, // Extension type 1001 (minus 1000) + }) + } else if pk.Encrypted { + _, err = buf.Write([]byte{ + 254, // SHA-1 Convention + byte(pk.cipher), // Encryption scheme + }) + if err != nil { + return err + } + if _, err = buf.Write(pk.s2kHeader); err != nil { + return err + } + if _, err = buf.Write(pk.iv); err != nil { + return err + } + if _, err = privateKeyBuf.Write(pk.encryptedData); err != nil { + return err + } + } else { + buf.WriteByte(0 /* no encryption */) + if err = pk.serializePrivateKey(privateKeyBuf); err != nil { + return err + } + } + + ptype := packetTypePrivateKey + contents := buf.Bytes() + privateKeyBytes := privateKeyBuf.Bytes() + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + totalLen := len(contents) + len(privateKeyBytes) + if !pk.Encrypted { + totalLen += 2 + } + err = serializeHeader(w, ptype, totalLen) + if err != nil { + return + } + _, err = w.Write(contents) + if err != nil { + return + } + _, err = w.Write(privateKeyBytes) + if err != nil { + return + } + + if len(privateKeyBytes) > 0 && !pk.Encrypted { + checksum := mod64kHash(privateKeyBytes) + var checksumBytes [2]byte + checksumBytes[0] = byte(checksum >> 8) + checksumBytes[1] = byte(checksum) + _, err = w.Write(checksumBytes[:]) + } + + return +} + +func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(w, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(w, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(w, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(w, priv) + case *ecdh.PrivateKey: + err = serializeECDHPrivateKey(w, priv) + case *EdDSAPrivateKey: + err = serializeEdDSAPrivateKey(w, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + + return err +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + err := writeBig(w, priv.D) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[1]) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[0]) + if err != nil { + return err + } + return writeBig(w, priv.Precomputed.Qinv) +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + return writeBig(w, priv.D) +} + +func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeEdDSAPrivateKey(w io.Writer, priv *EdDSAPrivateKey) error { + return writeMPI(w, priv.seed.bitLength, priv.seed.bytes) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if !pk.Encrypted { + return nil + } + // For GNU Dummy S2K, there's no key here, so don't do anything. + if pk.s2k == nil { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + return pk.parsePrivateKey(data) +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + case PubKeyAlgoECDH: + return pk.parseECDHPrivateKey(data) + case PubKeyAlgoEdDSA: + return pk.parseEdDSAPrivateKey(data) + case PubKeyAlgoBadElGamal: + return errors.UnsupportedError("parsing el-gamal sign-or-encrypt privatekeys is unsupported") + default: + return errors.UnsupportedError("cannot parse this private key type") + } +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + p, _, err := readMPI(buf) + if err != nil { + return + } + q, _, err := readMPI(buf) + if err != nil { + return + } + + rsaPriv.D = new(big.Int).SetBytes(d) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q) + if err := rsaPriv.Validate(); err != nil { + return err + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + dsaPriv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = dsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) + priv := new(ecdh.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(d) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + ecdsaPriv := new(ecdsa.PrivateKey) + ecdsaPriv.PublicKey = *ecdsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + ecdsaPriv.D = new(big.Int).SetBytes(d) + pk.PrivateKey = ecdsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { + eddsaPriv := new(EdDSAPrivateKey) + eddsaPriv.PublicKey = pk.PublicKey + + buf := bytes.NewBuffer(data) + eddsaPriv.seed.bytes, eddsaPriv.seed.bitLength, err = readMPI(buf) + if err != nil { + return err + } + + if bLen := len(eddsaPriv.seed.bytes); bLen != 32 { // 32 bytes private part of ed25519 key. + return errors.UnsupportedError(fmt.Sprintf("Unexpected EdDSA private key length: %d", bLen)) + } + + pk.PrivateKey = eddsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go new file mode 100644 index 00000000000..372a183cb75 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go @@ -0,0 +1,993 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/keybase/go-crypto/brainpool" + "github.com/keybase/go-crypto/curve25519" + "github.com/keybase/go-crypto/ed25519" + "github.com/keybase/go-crypto/openpgp/ecdh" + "github.com/keybase/go-crypto/openpgp/elgamal" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" + "github.com/keybase/go-crypto/rsa" +) + +var ( + // NIST curve P-224 + oidCurveP224 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x21} + // NIST curve P-256 + oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} + // NIST curve P-384 + oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} + // NIST curve P-521 + oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} + // Brainpool curve P-256r1 + oidCurveP256r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07} + // Brainpool curve P-384r1 + oidCurveP384r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B} + // Brainpool curve P-512r1 + oidCurveP512r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D} + // EdDSA + oidEdDSA []byte = []byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01} + // cv25519 + oidCurve25519 []byte = []byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01} +) + +const maxOIDLength = 10 + +// ecdsaKey stores the algorithm-specific fields for ECDSA keys. +// as defined in RFC 6637, Section 9. +type ecdsaKey struct { + // oid contains the OID byte sequence identifying the elliptic curve used + oid []byte + // p contains the elliptic curve point that represents the public key + p parsedMPI +} + +type edDSAkey struct { + ecdsaKey +} + +func copyFrontFill(dst, src []byte, length int) int { + if srcLen := len(src); srcLen < length { + return copy(dst[length-srcLen:], src[:]) + } else { + return copy(dst[:], src[:]) + } +} + +func (e *edDSAkey) Verify(payload []byte, r parsedMPI, s parsedMPI) bool { + const halfSigSize = ed25519.SignatureSize / 2 + var sig [ed25519.SignatureSize]byte + + // NOTE: The first byte is 0x40 - MPI header + // TODO: Maybe clean the code up and use 0x40 as a header when + // reading and keep only actual number in p field. Find out how + // other MPIs are stored. + key := e.p.bytes[1:] + + // Note: it may happen that R + S do not form 64-byte signature buffer that + // ed25519 expects, but because we copy it over to an array of exact size, + // we will always pass correctly sized slice to Verify. Slice too short + // would make ed25519 panic(). + copyFrontFill(sig[:halfSigSize], r.bytes, halfSigSize) + copyFrontFill(sig[halfSigSize:], s.bytes, halfSigSize) + + return ed25519.Verify(key, payload, sig[:]) +} + +// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. +func parseOID(r io.Reader) (oid []byte, err error) { + buf := make([]byte, maxOIDLength) + if _, err = readFull(r, buf[:1]); err != nil { + return + } + oidLen := buf[0] + if int(oidLen) > len(buf) { + err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) + return + } + oid = buf[:oidLen] + _, err = readFull(r, oid) + return +} + +func (f *ecdsaKey) parse(r io.Reader) (err error) { + if f.oid, err = parseOID(r); err != nil { + return err + } + f.p.bytes, f.p.bitLength, err = readMPI(r) + return err +} + +func (f *ecdsaKey) serialize(w io.Writer) (err error) { + buf := make([]byte, maxOIDLength+1) + buf[0] = byte(len(f.oid)) + copy(buf[1:], f.oid) + if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { + return + } + return writeMPIs(w, f.p) +} + +func getCurveByOid(oid []byte) elliptic.Curve { + switch { + case bytes.Equal(oid, oidCurveP224): + return elliptic.P224() + case bytes.Equal(oid, oidCurveP256): + return elliptic.P256() + case bytes.Equal(oid, oidCurveP384): + return elliptic.P384() + case bytes.Equal(oid, oidCurveP521): + return elliptic.P521() + case bytes.Equal(oid, oidCurveP256r1): + return brainpool.P256r1() + case bytes.Equal(oid, oidCurveP384r1): + return brainpool.P384r1() + case bytes.Equal(oid, oidCurveP512r1): + return brainpool.P512r1() + case bytes.Equal(oid, oidCurve25519): + return curve25519.Cv25519() + default: + return nil + } +} + +func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { + var c = getCurveByOid(f.oid) + // Curve25519 should not be used in ECDSA. + if c == nil || bytes.Equal(f.oid, oidCurve25519) { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + // Note: Unmarshal already checks if point is on curve. + x, y := elliptic.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) newECDH() (*ecdh.PublicKey, error) { + var c = getCurveByOid(f.oid) + if c == nil { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + // ecdh.Unmarshal handles unmarshaling for all curve types. It + // also checks if point is on curve. + x, y := ecdh.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdh.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) byteLen() int { + return 1 + len(f.oid) + 2 + len(f.p.bytes) +} + +type kdfHashFunction byte +type kdfAlgorithm byte + +// ecdhKdf stores key derivation function parameters +// used for ECDH encryption. See RFC 6637, Section 9. +type ecdhKdf struct { + KdfHash kdfHashFunction + KdfAlgo kdfAlgorithm +} + +func (f *ecdhKdf) parse(r io.Reader) (err error) { + buf := make([]byte, 1) + if _, err = readFull(r, buf); err != nil { + return + } + kdfLen := int(buf[0]) + if kdfLen < 3 { + return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + buf = make([]byte, kdfLen) + if _, err = readFull(r, buf); err != nil { + return + } + reserved := int(buf[0]) + f.KdfHash = kdfHashFunction(buf[1]) + f.KdfAlgo = kdfAlgorithm(buf[2]) + if reserved != 0x01 { + return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) + } + return +} + +func (f *ecdhKdf) serialize(w io.Writer) (err error) { + buf := make([]byte, 4) + // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. + buf[0] = byte(0x03) // Length of the following fields + buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now + buf[2] = byte(f.KdfHash) + buf[3] = byte(f.KdfAlgo) + _, err = w.Write(buf[:]) + return +} + +func (f *ecdhKdf) byteLen() int { + return 4 +} + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey + Fingerprint [20]byte + KeyId uint64 + IsSubkey bool + + n, e, p, q, g, y parsedMPI + + // RFC 6637 fields + ec *ecdsaKey + ecdh *ecdhKdf + + // EdDSA fields (no RFC available), uses ecdsa scaffolding + edk *edDSAkey +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +func FromBig(n *big.Int) parsedMPI { + return parsedMPI{ + bytes: n.Bytes(), + bitLength: uint16(n.BitLen()), + } +} + +func FromBytes(bytes []byte) parsedMPI { + return parsedMPI{ + bytes: bytes, + bitLength: uint16(8 * len(bytes)), + } +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: FromBig(pub.N), + e: FromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: FromBig(pub.P), + q: FromBig(pub.Q), + g: FromBig(pub.G), + y: FromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// check EdDSA public key material. +// There is currently no RFC for it, but it doesn't mean it's not +// implemented or in use. +func (e *edDSAkey) check() error { + if !bytes.Equal(e.oid, oidEdDSA) { + return errors.UnsupportedError(fmt.Sprintf("Bad OID for EdDSA key: %v", e.oid)) + } + if bLen := len(e.p.bytes); bLen != 33 { // 32 bytes for ed25519 key and 1 byte for 0x40 header + return errors.UnsupportedError(fmt.Sprintf("Unexpected EdDSA public key length: %d", bLen)) + } + return nil +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: FromBig(pub.P), + g: FromBig(pub.G), + y: FromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func getCurveOid(curve elliptic.Curve) (res []byte, err error) { + switch curve { + case elliptic.P224(): + res = oidCurveP224 + case elliptic.P256(): + res = oidCurveP256 + case elliptic.P384(): + res = oidCurveP384 + case elliptic.P521(): + res = oidCurveP521 + case brainpool.P256r1(): + res = oidCurveP256r1 + case brainpool.P384r1(): + res = oidCurveP384r1 + case brainpool.P512r1(): + res = oidCurveP512r1 + case curve25519.Cv25519(): + res = oidCurve25519 + default: + err = errors.UnsupportedError("unknown curve") + } + return +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + ec: new(ecdsaKey), + } + oid, _ := getCurveOid(pub.Curve) + pk.ec.oid = oid + bs, bitLen := ecdh.Marshal(pub.Curve, pub.X, pub.Y) + pk.ec.p.bytes = bs + pk.ec.p.bitLength = uint16(bitLen) + + pk.setFingerPrintAndKeyId() + return pk +} + +func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + ec: new(ecdsaKey), + } + oid, _ := getCurveOid(pub.Curve) + pk.ec.oid = oid + bs, bitLen := ecdh.Marshal(pub.Curve, pub.X, pub.Y) + pk.ec.p.bytes = bs + pk.ec.p.bitLength = uint16(bitLen) + + hashbyte, _ := s2k.HashToHashId(crypto.SHA512) + pk.ecdh = &ecdhKdf{ + KdfHash: kdfHashFunction(hashbyte), + KdfAlgo: kdfAlgorithm(CipherAES256), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoEdDSA: + pk.edk = new(edDSAkey) + if err = pk.edk.parse(r); err != nil { + return err + } + err = pk.edk.check() + if err == nil { + pk.PublicKey = ed25519.PublicKey(pk.edk.p.bytes[1:]) + } + case PubKeyAlgoECDSA: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return err + } + pk.PublicKey, err = pk.ec.newECDSA() + case PubKeyAlgoECDH: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return + } + pk.ecdh = new(ecdhKdf) + if err = pk.ecdh.parse(r); err != nil { + return + } + pk.PublicKey, err = pk.ec.newECDH() + case PubKeyAlgoBadElGamal: + // Key has ElGamal format but nil-implementation - it will + // load but it's not possible to do any operations using this + // key. + err = pk.parseElGamal(r) + if err != nil { + pk.PublicKey = nil + } + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKey) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := sha1.New() + pk.SerializeSignaturePrefix(fingerPrint) + pk.serializeWithoutHeaders(fingerPrint) + copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n.bytes, pk.n.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.e.bytes, pk.e.bitLength, err = readMPI(r) + if err != nil { + return + } + + if len(pk.e.bytes) > 7 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.bytes), + E: 0, + } + // Warning: incompatibility with crypto/rsa: keybase fork uses + // int64 public exponents instead of int32. + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int64(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.q.bytes, pk.q.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.bytes) + dsa.Q = new(big.Int).SetBytes(pk.q.bytes) + dsa.G = new(big.Int).SetBytes(pk.g.bytes) + dsa.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.bytes) + elgamal.G = new(big.Int).SetBytes(pk.g.bytes) + elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = elgamal + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + case PubKeyAlgoDSA: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.q.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoECDSA: + pLength += uint16(pk.ec.byteLen()) + case PubKeyAlgoECDH: + pLength += uint16(pk.ec.byteLen()) + pLength += uint16(pk.ecdh.byteLen()) + case PubKeyAlgoEdDSA: + pLength += uint16(pk.edk.byteLen()) + default: + panic("unknown public key algorithm") + } + pLength += 6 + h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + case PubKeyAlgoDSA: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.q.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoECDSA: + length += pk.ec.byteLen() + case PubKeyAlgoECDH: + length += pk.ec.byteLen() + length += pk.ecdh.byteLen() + case PubKeyAlgoEdDSA: + length += pk.edk.byteLen() + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [6]byte + buf[0] = 4 + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + buf[5] = byte(pk.PubKeyAlgo) + + _, err = w.Write(buf[:]) + if err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + case PubKeyAlgoDSA: + return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) + case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal: + return writeMPIs(w, pk.p, pk.g, pk.y) + case PubKeyAlgoECDSA: + return pk.ec.serialize(w) + case PubKeyAlgoEdDSA: + return pk.edk.serialize(w) + case PubKeyAlgoECDH: + if err = pk.ec.serialize(w); err != nil { + return + } + return pk.ecdh.serialize(w) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + + // NOTE(maxtaco) 2016-08-22 + // + // We used to do this: + // + // if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + // return errors.SignatureError("hash tag doesn't match") + // } + // + // But don't do anything in this case. Some GPGs generate bad + // 2-byte hash prefixes, but GPG also doesn't seem to care on + // import. See BrentMaxwell's key. I think it's safe to disable + // this check! + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + case PubKeyAlgoEdDSA: + if !pk.edk.Verify(hashBytes, sig.EdDSASigR, sig.EdDSASigS) { + return errors.SignatureError("EdDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } + panic("unreachable") +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + case PubKeyAlgoDSA: + dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + default: + panic("shouldn't happen") + } + panic("unreachable") +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + updateKeySignatureHash(pk, signed, h) + + return +} + +// updateKeySignatureHash does the actual hash updates for keySignatureHash. +func updateKeySignatureHash(pk, signed signingKey, h hash.Hash) { + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + signed.SerializeSignaturePrefix(h) + signed.serializeWithoutHeaders(h) +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + + // BUG(maxtaco) + // + // We should check for more than FlagsSign here, because if + // you read keys.go, we can sometimes use signing subkeys even if they're + // not explicitly flagged as such. However, so doing fails lots of currently + // working tests, so I'm not going to do much here. + // + // In other words, we should have this disjunction in the condition above: + // + // || (!sig.FlagsValid && pk.PubKeyAlgo.CanSign()) { + // + + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(revokedKey *PublicKey, sig *Signature) (err error) { + h, err := keyRevocationHash(revokedKey, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +type teeHash struct { + h hash.Hash +} + +func (t teeHash) Write(b []byte) (n int, err error) { + fmt.Printf("hash -> %s %+v\n", string(b), b) + return t.h.Write(b) +} +func (t teeHash) Sum(b []byte) []byte { return t.h.Sum(b) } +func (t teeHash) Reset() { t.h.Reset() } +func (t teeHash) Size() int { return t.h.Size() } +func (t teeHash) BlockSize() int { return t.h.BlockSize() } + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + updateUserIdSignatureHash(id, pk, h) + + return +} + +// updateUserIdSignatureHash does the actual hash updates for +// userIdSignatureHash. +func updateUserIdSignatureHash(id string, pk *PublicKey, h hash.Hash) { + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// A parsedMPI is used to store the contents of a big integer, along with the +// bit length that was specified in the original input. This allows the MPI to +// be reserialized exactly. +type parsedMPI struct { + bytes []byte + bitLength uint16 +} + +// writeMPIs is a utility function for serializing several big integers to the +// given Writer. +func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { + for _, mpi := range mpis { + err = writeMPI(w, mpi.bitLength, mpi.bytes) + if err != nil { + return + } + } + return +} + +// BitLength returns the bit length for the given public key. Used for +// displaying key information, actual buffers and BigInts inside may +// have non-matching different size if the key is invalid. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + case PubKeyAlgoDSA: + bitLength = pk.p.bitLength + case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal: + bitLength = pk.p.bitLength + case PubKeyAlgoECDH: + ecdhPublicKey := pk.PublicKey.(*ecdh.PublicKey) + bitLength = uint16(ecdhPublicKey.Curve.Params().BitSize) + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + bitLength = uint16(ecdsaPublicKey.Curve.Params().BitSize) + case PubKeyAlgoEdDSA: + // EdDSA only support ed25519 curves right now, just return + // the length. Also, we don't have any PublicKey.Curve object + // to look the size up from. + bitLength = 256 + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} + +func (pk *PublicKey) ErrorIfDeprecated() error { + switch pk.PubKeyAlgo { + case PubKeyAlgoBadElGamal: + return errors.DeprecatedKeyError("ElGamal Encrypt or Sign (algo 20) is deprecated") + default: + return nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go new file mode 100644 index 00000000000..f75cbeabcf7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go @@ -0,0 +1,282 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/md5" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/rsa" +) + +// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and +// should not be used for signing or encrypting. They are supported here only for +// parsing version 3 key material and validating signatures. +// See RFC 4880, section 5.5.2. +type PublicKeyV3 struct { + CreationTime time.Time + DaysToExpire uint16 + PubKeyAlgo PublicKeyAlgorithm + PublicKey *rsa.PublicKey + Fingerprint [16]byte + KeyId uint64 + IsSubkey bool + + n, e parsedMPI +} + +// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. +// Included here for testing purposes only. RFC 4880, section 5.5.2: +// "an implementation MUST NOT generate a V3 key, but MAY accept it." +func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { + pk := &PublicKeyV3{ + CreationTime: creationTime, + PublicKey: pub, + n: FromBig(pub.N), + e: FromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKeyV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [8]byte + if _, err = readFull(r, buf[:]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKeyV3) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := md5.New() + fingerPrint.Write(pk.n.bytes) + fingerPrint.Write(pk.e.bytes) + fingerPrint.Sum(pk.Fingerprint[:0]) + pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { + if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { + return + } + if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { + return + } + + // RFC 4880 Section 12.2 requires the low 8 bytes of the + // modulus to form the key id. + if len(pk.n.bytes) < 8 { + return errors.StructuralError("v3 public key modulus is too short") + } + if len(pk.e.bytes) > 7 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} + // Warning: incompatibility with crypto/rsa: keybase fork uses + // int64 public exponents instead of int32. + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int64(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + default: + panic("unknown public key algorithm") + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { + length := 8 // 8 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + if err = serializeHeader(w, packetType, length); err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [8]byte + // Version 3 + buf[0] = 3 + // Creation time + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + // Days to expire + buf[5] = byte(pk.DaysToExpire >> 8) + buf[6] = byte(pk.DaysToExpire) + // Public key algorithm + buf[7] = byte(pk.PubKeyAlgo) + + if _, err = w.Write(buf[:]); err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKeyV3) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + default: + // V3 public keys only support RSA. + panic("shouldn't happen") + } + panic("unreachable") +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// userIdSignatureV3Hash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { + if !hfn.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hfn.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + h.Write([]byte(id)) + + return +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKeyV3) KeyIdString() string { + return fmt.Sprintf("%X", pk.KeyId) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKeyV3) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go new file mode 100644 index 00000000000..957b3b897eb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go @@ -0,0 +1,76 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go new file mode 100644 index 00000000000..383a8a6a39d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go @@ -0,0 +1,923 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/binary" + "fmt" + "hash" + "io" + "strconv" + "time" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" + "github.com/keybase/go-crypto/rsa" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signer can be implemented by application code to do actual signing. +type Signer interface { + hash.Hash + Sign(sig *Signature) error + KeyId() uint64 + PublicKeyAlgo() PublicKeyAlgorithm +} + +// RevocationKey represents designated revoker packet. See RFC 4880 +// section 5.2.3.15 for details. +type RevocationKey struct { + Class byte + PublicKeyAlgo PublicKeyAlgorithm + Fingerprint []byte +} + +// KeyFlagBits holds boolean whether any usage flags were provided in +// the signature and BitField with KeyFlag* flags. +type KeyFlagBits struct { + Valid bool + BitField byte +} + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + CreationTime time.Time + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI + ECDSASigR, ECDSASigS parsedMPI + EdDSASigR, EdDSASigS parsedMPI + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + PreferredKeyServer string + IssuerKeyId *uint64 + IsPrimaryId *bool + IssuerFingerprint []byte + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // PolicyURI is optional. See RFC 4880, Section 5.2.3.20 for details + PolicyURI string + + // Regex is a regex that can match a PGP UID. See RFC 4880, 5.2.3.14 for details + Regex string + + // MDC is set if this signature has a feature packet that indicates + // support for MDC subpackets. + MDC bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + // StubbedOutCriticalError is not fail-stop, since it shouldn't break key parsing + // when appearing in WoT-style cross signatures. But it should prevent a signature + // from being applied to a primary or subkey. + StubbedOutCriticalError error + + // DesignaterRevoker will be present if this signature certifies a + // designated revoking key id (3rd party key that can sign + // revocation for this key). + DesignatedRevoker *RevocationKey + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + l := 6 + hashedSubpacketsLength + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + copy(sig.HashSuffix[1:], buf[:5]) + hashedSubpackets := sig.HashSuffix[6:l] + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + // See RFC 4880, section 5.2.4 + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = uint8(l >> 24) + trailer[3] = uint8(l >> 16) + trailer[4] = uint8(l >> 8) + trailer[5] = uint8(l) + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoEdDSA: + sig.EdDSASigR.bytes, sig.EdDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.EdDSASigS.bytes, sig.EdDSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoECDSA: + sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + regularExpressionSubpacket signatureSubpacketType = 6 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + revocationKey signatureSubpacketType = 12 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + prefKeyServerSubpacket signatureSubpacketType = 24 + primaryUserIdSubpacket signatureSubpacketType = 25 + policyURISubpacket signatureSubpacketType = 26 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 + issuerFingerprint signatureSubpacketType = 33 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + if subpacket[0] != 0 { + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. In practice, the subpacket is used exclusively to + // indicate support for MDC-protected encryption. + sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + case policyURISubpacket: + // See RFC 4880, Section 5.2.3.20 + sig.PolicyURI = string(subpacket[:]) + case regularExpressionSubpacket: + sig.Regex = string(subpacket[:]) + if isCritical { + sig.StubbedOutCriticalError = errors.UnsupportedError("regex support is stubbed out") + } + case prefKeyServerSubpacket: + sig.PreferredKeyServer = string(subpacket[:]) + case issuerFingerprint: + // The first byte is how many bytes the fingerprint is, but we'll just + // read until the end of the subpacket, so we'll ignore it. + sig.IssuerFingerprint = append([]byte{}, subpacket[1:]...) + case revocationKey: + // Authorizes the specified key to issue revocation signatures + // for a key. + + // TODO: Class octet must have bit 0x80 set. If the bit 0x40 + // is set, then this means that the revocation information is + // sensitive. + sig.DesignatedRevoker = &RevocationKey{ + Class: subpacket[0], + PublicKeyAlgo: PublicKeyAlgorithm(subpacket[1]), + Fingerprint: append([]byte{}, subpacket[2:]...), + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired. +func (sig *Signature) KeyExpired(currentTime time.Time) bool { + if sig.KeyLifetimeSecs == nil { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// ExpiresBeforeOther checks if other signature has expiration at +// later date than sig. +func (sig *Signature) ExpiresBeforeOther(other *Signature) bool { + if sig.KeyLifetimeSecs == nil { + // This sig never expires, or has infinitely long expiration + // time. + return false + } else if other.KeyLifetimeSecs == nil { + // This sig expires at some non-infinite point, but the other + // sig never expires. + return true + } + + getExpiryDate := func(s *Signature) time.Time { + return s.CreationTime.Add(time.Duration(*s.KeyLifetimeSecs) * time.Second) + } + + return getExpiryDate(other).After(getExpiryDate(sig)) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix() (err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + + var ok bool + l := 6 + hashedSubpacketsLen + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + sig.HashSuffix[1] = uint8(sig.SigType) + sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) + sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) + sig.HashSuffix[5] = byte(hashedSubpacketsLen) + serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = byte(l >> 24) + trailer[3] = byte(l >> 16) + trailer[4] = byte(l >> 8) + trailer[5] = byte(l) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + err = sig.buildHashSuffix() + if err != nil { + return + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + signer, hashIsSigner := h.(Signer) + + if !hashIsSigner && (priv == nil || priv.PrivateKey == nil) { + err = errors.InvalidArgumentError("attempting to sign with nil PrivateKey") + return + } + + sig.outSubpackets = sig.buildSubpackets() + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + + if hashIsSigner { + err = signer.Sign(sig) + return + } + + // Parameter check, if this is wrong we will make a signature but + // not serialize it later. + if sig.PubKeyAlgo != priv.PubKeyAlgo { + err = errors.InvalidArgumentError("signature pub key algo does not match priv key") + return + } + + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, err = rsa.SignPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), sig.Hash, digest) + sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err != nil { + return err + } + sig.DSASigR.bytes = r.Bytes() + sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) + sig.DSASigS.bytes = s.Bytes() + sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) + case PubKeyAlgoECDSA: + r, s, err := ecdsa.Sign(config.Random(), priv.PrivateKey.(*ecdsa.PrivateKey), digest) + if err != nil { + return err + } + sig.ECDSASigR = FromBig(r) + sig.ECDSASigS = FromBig(s) + case PubKeyAlgoEdDSA: + r, s, err := priv.PrivateKey.(*EdDSAPrivateKey).Sign(digest) + if err != nil { + return err + } + sig.EdDSASigR = FromBytes(r) + sig.EdDSASigS = FromBytes(s) + default: + err = errors.UnsupportedError("public key algorithm for signing: " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + return +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignUserIdWithSigner computes a signature from priv, asserting that pub is a +// valid key for the identity id. On success, the signature is stored in sig. +// Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserIdWithSigner(id string, pub *PublicKey, s Signer, config *Config) error { + updateUserIdSignatureHash(id, pub, s) + + return sig.Sign(s, nil, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignKeyWithSigner computes a signature using s, asserting that +// signeePubKey is a subkey. On success, the signature is stored in sig. Call +// Serialize to write it out. If config is nil, sensible defaults will be used. +func (sig *Signature) SignKeyWithSigner(signeePubKey *PublicKey, signerPubKey *PublicKey, s Signer, config *Config) error { + updateKeySignatureHash(signerPubKey, signeePubKey, s) + + return sig.Sign(s, nil, config) +} + +// CrossSignKey creates PrimaryKeyBinding signature in sig.EmbeddedSignature by +// signing `primary` key's hash using `priv` subkey private key. Primary public +// key is the `signee` here. +func (sig *Signature) CrossSignKey(primary *PublicKey, priv *PrivateKey, config *Config) error { + if len(sig.outSubpackets) > 0 { + return fmt.Errorf("outSubpackets already exists, looks like CrossSignKey was called after Sign") + } + + sig.EmbeddedSignature = &Signature{ + CreationTime: sig.CreationTime, + SigType: SigTypePrimaryKeyBinding, + PubKeyAlgo: priv.PubKeyAlgo, + Hash: sig.Hash, + } + + h, err := keySignatureHash(primary, &priv.PublicKey, sig.Hash) + if err != nil { + return err + } + return sig.EmbeddedSignature.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature.bytes == nil && + sig.DSASigR.bytes == nil && + sig.ECDSASigR.bytes == nil && + sig.EdDSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = 2 + len(sig.RSASignature.bytes) + case PubKeyAlgoDSA: + sigLength = 2 + len(sig.DSASigR.bytes) + sigLength += 2 + len(sig.DSASigS.bytes) + case PubKeyAlgoEdDSA: + sigLength = 2 + len(sig.EdDSASigR.bytes) + sigLength += 2 + len(sig.EdDSASigS.bytes) + case PubKeyAlgoECDSA: + sigLength = 2 + len(sig.ECDSASigR.bytes) + sigLength += 2 + len(sig.ECDSASigS.bytes) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + + _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) + if err != nil { + return + } + + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + case PubKeyAlgoEdDSA: + err = writeMPIs(w, sig.EdDSASigR, sig.EdDSASigS) + case PubKeyAlgoECDSA: + err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{sig.GetKeyFlags().BitField}}) + } + + // The following subpackets may only appear in self-signatures + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + if sig.EmbeddedSignature != nil { + buf := bytes.NewBuffer(nil) + if err := sig.EmbeddedSignature.Serialize(buf); err == nil { + byteContent := buf.Bytes()[2:] // skip 2-byte length header + subpackets = append(subpackets, outputSubpacket{false, embeddedSignatureSubpacket, true, byteContent}) + } + } + + return +} + +func (sig *Signature) GetKeyFlags() (ret KeyFlagBits) { + if !sig.FlagsValid { + return ret + } + + ret.Valid = true + if sig.FlagCertify { + ret.BitField |= KeyFlagCertify + } + if sig.FlagSign { + ret.BitField |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + ret.BitField |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + ret.BitField |= KeyFlagEncryptStorage + } + return ret +} + +func (f *KeyFlagBits) HasFlagCertify() bool { + return f.BitField&KeyFlagCertify != 0 +} + +func (f *KeyFlagBits) HasFlagSign() bool { + return f.BitField&KeyFlagSign != 0 +} + +func (f *KeyFlagBits) HasFlagEncryptCommunications() bool { + return f.BitField&KeyFlagEncryptCommunications != 0 +} + +func (f *KeyFlagBits) HasFlagEncryptStorage() bool { + return f.BitField&KeyFlagEncryptStorage != 0 +} + +func (f *KeyFlagBits) Merge(other KeyFlagBits) { + if other.Valid { + f.Valid = true + f.BitField |= other.BitField + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go new file mode 100644 index 00000000000..dfca651be72 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go @@ -0,0 +1,146 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// SignatureV3 represents older version 3 signatures. These signatures are less secure +// than version 4 and should not be used to create new signatures. They are included +// here for backwards compatibility to read and validate with older key material. +// See RFC 4880, section 5.2.2. +type SignatureV3 struct { + SigType SignatureType + CreationTime time.Time + IssuerKeyId uint64 + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + HashTag [2]byte + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI +} + +func (sig *SignatureV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.2 + var buf [8]byte + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] != 5 { + err = errors.UnsupportedError( + "invalid hashed material length " + strconv.Itoa(int(buf[0]))) + return + } + + // Read hashed material: signature type + creation time + if _, err = readFull(r, buf[:5]); err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + t := binary.BigEndian.Uint32(buf[1:5]) + sig.CreationTime = time.Unix(int64(t), 0) + + // Eight-octet Key ID of signer. + if _, err = readFull(r, buf[:8]); err != nil { + return + } + sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) + + // Public-key and hash algorithm + if _, err = readFull(r, buf[:2]); err != nil { + return + } + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + var ok bool + if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + // Two-octet field holding left 16 bits of signed hash value. + if _, err = readFull(r, sig.HashTag[:2]); err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { + return + } + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + default: + panic("unreachable") + } + return +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *SignatureV3) Serialize(w io.Writer) (err error) { + buf := make([]byte, 8) + + // Write the sig type and creation time + buf[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + // Write the issuer long key ID + binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) + if _, err = w.Write(buf[:8]); err != nil { + return + } + + // Write public key algorithm, hash ID, and hash value + buf[0] = byte(sig.PubKeyAlgo) + hashId, ok := s2k.HashToHashId(sig.Hash) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) + } + buf[1] = hashId + copy(buf[2:4], sig.HashTag[:]) + if _, err = w.Write(buf[:4]); err != nil { + return + } + + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + default: + panic("impossible") + } + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 00000000000..b92c1d77847 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,158 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + CipherFunc CipherFunction + s2k func(out, in []byte) + encryptedKey []byte +} + +const symmetricKeyEncryptedVersion = 4 + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + if buf[0] != symmetricKeyEncryptedVersion { + return errors.UnsupportedError("SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + var err error + ske.s2k, err = s2k.Parse(r) + if err != nil { + return err + } + if ske.s2k == nil { + return errors.UnsupportedError("can't use dummy S2K for symmetric key encryption") + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + + "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") + } + return plaintextKey, cipherFunc, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The +// packet contains a random session key, encrypted by a key derived from the +// given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + var buf [2]byte + buf[0] = symmetricKeyEncryptedVersion + buf[1] = byte(cipherFunc) + _, err = w.Write(buf[:]) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + + key = sessionKey + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 00000000000..fd4f8f015b1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,291 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "hash" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted contents of the +// packet can be read. An incorrect key can, with high probability, be detected +// immediately and this will result in a KeyIncorrect error being returned. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + if s == nil { + return nil, errors.ErrKeyIncorrect + } + + plaintext := cipher.StreamReader{S: s, R: se.contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.SignatureError("error during reading") + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.SignatureError("error during reading") + } + } + + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.SignatureError("MDC packet not found") + } + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.SignatureError("hash mismatch") + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go new file mode 100644 index 00000000000..96a2b382a1d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + sp.Serialize(&buf) + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// the user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go new file mode 100644 index 00000000000..d6bea7d4acc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go @@ -0,0 +1,160 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/read.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/read.go new file mode 100644 index 00000000000..5caf7e39c52 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/read.go @@ -0,0 +1,507 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "github.com/keybase/go-crypto/openpgp" + +import ( + "crypto" + "crypto/hmac" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/armor" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + SignatureError error // nil if the signature is good. + Signature *packet.Signature // the signature packet itself, if v4 (default) + SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature + + // Does the Message include multiple signatures? Also called "nested signatures". + MultiSig bool + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + var se *packet.SymmetricallyEncrypted + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId, nil) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted: + se = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if pk.key.PrivateKey.PrivateKey == nil { + // Key is stubbed + continue + } + if len(pk.encryptedKey.Key) == 0 { + err := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + if err != nil { + continue + } + } + if len(pk.encryptedKey.Key) == 0 { + continue + } + decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + if err == nil { + decrypted, err = se.Decrypt(cipherFunc, key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + return readSignedMessage(packets, md, keyring) +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if md.IsSigned { + // If IsSigned is set, it means we have multiple + // OnePassSignature packets. + md.MultiSig = true + if md.SignedBy != nil { + // We've already found the signature we were looking + // for, made by key that we had in keyring and can + // check signature against. Continue with that instead + // of trying to find another. + continue FindLiteralData + } + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md = nil + return + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, nil, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.SignedBy != nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (n int, err error) { + n, err = cr.md.LiteralData.Body.Read(buf) + if err == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + return +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails +} + +func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { + n, err = scr.md.LiteralData.Body.Read(buf) + scr.wrappedHash.Write(buf[:n]) + if err == io.EOF { + for { + var p packet.Packet + p, scr.md.SignatureError = scr.packets.Next() + if scr.md.SignatureError != nil { + if scr.md.MultiSig { + // If we are in MultiSig, we might have found other + // signature that cannot be verified using our key. + // Clear Signature field so it's clear for consumers + // that this message failed to verify. + scr.md.Signature = nil + } + return + } + + var ok bool + if scr.md.Signature, ok = p.(*packet.Signature); ok { + var err error + if keyID := scr.md.Signature.IssuerKeyId; keyID != nil { + if *keyID != scr.md.SignedBy.PublicKey.KeyId { + if scr.md.MultiSig { + continue // try again to find a sig we can verify + } + err = errors.StructuralError("bad key id") + } + } + if fingerprint := scr.md.Signature.IssuerFingerprint; fingerprint != nil { + if !hmac.Equal(fingerprint, scr.md.SignedBy.PublicKey.Fingerprint[:]) { + if scr.md.MultiSig { + continue // try again to find a sig we can verify + } + err = errors.StructuralError("bad key fingerprint") + } + } + if err == nil { + err = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + } + scr.md.SignatureError = err + } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) + } else { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") + return + } + + // Parse only one packet by default, unless message is MultiSig. Then + // we ask for more packets after discovering non-matching signature, + // until we find one that we can verify. + break + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + } + return +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + signer, _, err = checkDetachedSignature(keyring, signed, signature) + return signer, err +} + +func checkDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) { + var issuerKeyId uint64 + var issuerFingerprint []byte + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, nil, err + } + + switch sig := p.(type) { + case *packet.Signature: + if sig.IssuerKeyId == nil { + return nil, nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + issuerFingerprint = sig.IssuerFingerprint + case *packet.SignatureV3: + issuerKeyId = sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + default: + return nil, nil, errors.StructuralError("non signature packet found") + } + + keys = keyring.KeysByIdUsage(issuerKeyId, issuerFingerprint, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, nil, err + } + + for _, key := range keys { + switch sig := p.(type) { + case *packet.Signature: + err = key.PublicKey.VerifySignature(h, sig) + case *packet.SignatureV3: + err = key.PublicKey.VerifySignatureV3(h, sig) + default: + panic("unreachable") + } + + if err == nil { + return key.Entity, &issuerKeyId, nil + } + } + + return nil, nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + signer, _, err = checkArmoredDetachedSignature(keyring, signed, signature) + return signer, err +} + +func checkArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + return checkDetachedSignature(keyring, signed, body) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go new file mode 100644 index 00000000000..01bb67852d3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go @@ -0,0 +1,326 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +package s2k // import "github.com/keybase/go-crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "github.com/keybase/go-crypto/openpgp/errors" +) + +// Config collects configuration parameters for s2k key-stretching +// transformatioms. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // Hash is the default hash function to be used. If + // nil, SHA1 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + // SHA1 is the historical default in this package. + return crypto.SHA1 + } + + return c.Hash +} + +func (c *Config) encodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 96 // The common case. Correspoding to 65536 + } + + i := c.S2KCount + switch { + // Behave like GPG. Should we make 65536 the lowest value used? + case i < 1024: + i = 1024 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 1024 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 0; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +func parseGNUExtensions(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + // A three-byte string identifier + _, err = io.ReadFull(r, buf[:3]) + if err != nil { + return + } + gnuExt := string(buf[:3]) + + if gnuExt != "GNU" { + return nil, errors.UnsupportedError("Malformed GNU extension: " + gnuExt) + } + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + gnuExtType := int(buf[0]) + switch gnuExtType { + case 1: + return nil, nil + case 2: + // Read a serial number, which is prefixed by a 1-byte length. + // The maximum length is 16. + var lenBuf [1]byte + _, err = io.ReadFull(r, lenBuf[:]) + if err != nil { + return + } + + maxLen := 16 + ivLen := int(lenBuf[0]) + if ivLen > maxLen { + ivLen = maxLen + } + ivBuf := make([]byte, ivLen) + // For now we simply discard the IV + _, err = io.ReadFull(r, ivBuf) + if err != nil { + return + } + return nil, nil + default: + return nil, errors.UnsupportedError("unknown S2K GNU protection mode: " + strconv.Itoa(int(gnuExtType))) + } +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + // GNU Extensions; handle them before we try to look for a hash, which won't + // be needed in most cases anyway. + if buf[0] == 101 { + return parseGNUExtensions(r) + } + + hash, ok := HashIdToHash(buf[1]) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) + } + if !hash.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) + } + h := hash.New() + + switch buf[0] { + case 0: + f := func(out, in []byte) { + Simple(out, h, in) + } + return f, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return + } + f := func(out, in []byte) { + Salted(out, h, in, buf[:8]) + } + return f, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return + } + count := decodeCount(buf[8]) + f := func(out, in []byte) { + Iterated(out, h, in, buf[:8], count) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + var buf [11]byte + buf[0] = 3 /* iterated and salted */ + buf[1], _ = HashToHashId(c.hash()) + salt := buf[2:10] + if _, err := io.ReadFull(rand, salt); err != nil { + return err + } + encodedCount := c.encodedCount() + count := decodeCount(encodedCount) + buf[10] = encodedCount + if _, err := w.Write(buf[:]); err != nil { + return err + } + + Iterated(key, c.hash().New(), passphrase, salt, count) + return nil +} + +// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +var hashToHashIdMapping = []struct { + id byte + hash crypto.Hash + name string +}{ + {1, crypto.MD5, "MD5"}, + {2, crypto.SHA1, "SHA1"}, + {3, crypto.RIPEMD160, "RIPEMD160"}, + {8, crypto.SHA256, "SHA256"}, + {9, crypto.SHA384, "SHA384"}, + {10, crypto.SHA512, "SHA512"}, + {11, crypto.SHA224, "SHA224"}, +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.hash, true + } + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id, or panics if id is unknown. +func HashIdToString(id byte) (name string, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.name, true + } + } + + return "", false +} + +// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for _, m := range hashToHashIdMapping { + if m.hash == h { + return m.id, true + } + } + return 0, false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/write.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/write.go new file mode 100644 index 00000000000..89ef132b5df --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/openpgp/write.go @@ -0,0 +1,506 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "github.com/keybase/go-crypto/openpgp/armor" + "github.com/keybase/go-crypto/openpgp/errors" + "github.com/keybase/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +// SignWithSigner signs the message of type sigType with s and writes the +// signature to w. +// If config is nil, sensible defaults will be used. +func SignWithSigner(s packet.Signer, w io.Writer, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + keyId := s.KeyId() + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = s.PublicKeyAlgo() + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &keyId + + s.Reset() + + wrapped := s.(hash.Hash) + + if sigType == packet.SigTypeText { + wrapped = NewCanonicalTextHash(s) + } + + io.Copy(wrapped, message) + + err = sig.Sign(s, nil, config) + if err != nil { + return + } + + err = sig.Serialize(w) + + return +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + signerSubkey, ok := signer.signingKey(config.Now()) + if !ok { + err = errors.InvalidArgumentError("no valid signing keys") + return + } + if signerSubkey.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signerSubkey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signerSubkey.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &signerSubkey.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + io.Copy(wrappedHash, message) + + err = sig.Sign(h, signerSubkey.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + + literaldata := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literaldata, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.signingKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + + // If no preferences were specified, assume something safe and reasonable. + defaultCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES192), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + + defaultHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.RIPEMD160), + } + + encryptKeys := make([]Key, len(to)) + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].encryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].primaryIdentity().SelfSignature + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + } + + if len(candidateCiphers) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common ciphers") + } + if len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common hashes") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + // If the cipher specifed by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) + if err != nil { + return + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(encryptedData); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := encryptedData + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{encryptedData} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil + } + return literalData, nil +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + h hash.Hash + signer *packet.PrivateKey + config *packet.Config +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.h.Write(data) + return s.literalData.Write(data) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + SigType: packet.SigTypeBinary, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// AttachedSign is like openpgp.Encrypt (as in p.crypto/openpgp/write.go), but +// don't encrypt at all, just sign the literal unencrypted data. +// Unfortunately we need to duplicate some code here that's already +// in write.go +func AttachedSign(out io.WriteCloser, signed Entity, hints *FileHints, + config *packet.Config) (in io.WriteCloser, err error) { + + if hints == nil { + hints = &FileHints{} + } + + if config == nil { + config = &packet.Config{} + } + + var signer *packet.PrivateKey + + signKey, ok := signed.signingKey(config.Now()) + if !ok { + err = errors.InvalidArgumentError("no valid signing keys") + return + } + signer = signKey.PrivateKey + if signer == nil { + err = errors.InvalidArgumentError("no valid signing keys") + return + } + if signer.Encrypted { + err = errors.InvalidArgumentError("signing key must be decrypted") + return + } + + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + out, err = packet.SerializeCompressed(out, algo, compConfig) + if err != nil { + return + } + } + + hasher := crypto.SHA512 + + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hasher, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + + if err = ops.Serialize(out); err != nil { + return + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + + // We don't want the literal serializer to closer the output stream + // since we're going to need to write to it when we finish up the + // signature stuff. + in, err = packet.SerializeLiteral(noOpCloser{out}, hints.IsBinary, hints.FileName, epochSeconds) + + if err != nil { + return + } + + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + in = signatureWriter{out, in, hasher, hasher.New(), signer, config} + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go new file mode 100644 index 00000000000..5c5f415c88d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go @@ -0,0 +1,325 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rsa + +import ( + "crypto" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// This file implements encryption and decryption using PKCS#1 v1.5 padding. + +// PKCS1v15DecrypterOpts is for passing options to PKCS#1 v1.5 decryption using +// the crypto.Decrypter interface. +type PKCS1v15DecryptOptions struct { + // SessionKeyLen is the length of the session key that is being + // decrypted. If not zero, then a padding error during decryption will + // cause a random plaintext of this length to be returned rather than + // an error. These alternatives happen in constant time. + SessionKeyLen int +} + +// EncryptPKCS1v15 encrypts the given message with RSA and the padding scheme from PKCS#1 v1.5. +// The message must be no longer than the length of the public modulus minus 11 bytes. +// +// The rand parameter is used as a source of entropy to ensure that encrypting +// the same message twice doesn't result in the same ciphertext. +// +// WARNING: use of this function to encrypt plaintexts other than session keys +// is dangerous. Use RSA OAEP in new protocols. +func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) (out []byte, err error) { + if err := checkPub(pub); err != nil { + return nil, err + } + k := (pub.N.BitLen() + 7) / 8 + if len(msg) > k-11 { + err = ErrMessageTooLong + return + } + + // EM = 0x00 || 0x02 || PS || 0x00 || M + em := make([]byte, k) + em[1] = 2 + ps, mm := em[2:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, rand) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + c := encrypt(new(big.Int), pub, m) + + copyWithLeftPad(em, c.Bytes()) + out = em + return +} + +// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5. +// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks. +// +// Note that whether this function returns an error or not discloses secret +// information. If an attacker can cause this function to run repeatedly and +// learn whether each instance returned an error then they can decrypt and +// forge signatures as if they had the private key. See +// DecryptPKCS1v15SessionKey for a way of solving this problem. +func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (out []byte, err error) { + if err := checkPub(&priv.PublicKey); err != nil { + return nil, err + } + valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext) + if err != nil { + return + } + if valid == 0 { + return nil, ErrDecryption + } + out = out[index:] + return +} + +// DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS#1 v1.5. +// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks. +// It returns an error if the ciphertext is the wrong length or if the +// ciphertext is greater than the public modulus. Otherwise, no error is +// returned. If the padding is valid, the resulting plaintext message is copied +// into key. Otherwise, key is unchanged. These alternatives occur in constant +// time. It is intended that the user of this function generate a random +// session key beforehand and continue the protocol with the resulting value. +// This will remove any possibility that an attacker can learn any information +// about the plaintext. +// See ``Chosen Ciphertext Attacks Against Protocols Based on the RSA +// Encryption Standard PKCS #1'', Daniel Bleichenbacher, Advances in Cryptology +// (Crypto '98). +// +// Note that if the session key is too small then it may be possible for an +// attacker to brute-force it. If they can do that then they can learn whether +// a random value was used (because it'll be different for the same ciphertext) +// and thus whether the padding was correct. This defeats the point of this +// function. Using at least a 16-byte key will protect against this attack. +func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) (err error) { + if err := checkPub(&priv.PublicKey); err != nil { + return err + } + k := (priv.N.BitLen() + 7) / 8 + if k-(len(key)+3+8) < 0 { + return ErrDecryption + } + + valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext) + if err != nil { + return + } + + if len(em) != k { + // This should be impossible because decryptPKCS1v15 always + // returns the full slice. + return ErrDecryption + } + + valid &= subtle.ConstantTimeEq(int32(len(em)-index), int32(len(key))) + subtle.ConstantTimeCopy(valid, key, em[len(em)-len(key):]) + return +} + +// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if +// rand is not nil. It returns one or zero in valid that indicates whether the +// plaintext was correctly structured. In either case, the plaintext is +// returned in em so that it may be read independently of whether it was valid +// in order to maintain constant memory access patterns. If the plaintext was +// valid then index contains the index of the original message in em. +func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) { + k := (priv.N.BitLen() + 7) / 8 + if k < 11 { + err = ErrDecryption + return + } + + c := new(big.Int).SetBytes(ciphertext) + m, err := decrypt(rand, priv, c) + if err != nil { + return + } + + em = leftPad(m.Bytes(), k) + firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0) + secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + lookingForIndex := 1 + + for i := 2; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + // The PS padding must be at least 8 bytes long, and it starts two + // bytes into em. + validPS := subtle.ConstantTimeLessOrEq(2+8, index) + + valid = firstByteIsZero & secondByteIsTwo & (^lookingForIndex & 1) & validPS + index = subtle.ConstantTimeSelect(valid, index+1, 0) + return valid, em, index, nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + // In tests, the PRNG may return all zeros so we do + // this to break the loop. + s[i] ^= 0x42 + } + } + + return +} + +// These are ASN1 DER structures: +// DigestInfo ::= SEQUENCE { +// digestAlgorithm AlgorithmIdentifier, +// digest OCTET STRING +// } +// For performance, we don't use the generic ASN1 encoder. Rather, we +// precompute a prefix of the digest value that makes a valid ASN1 DER string +// with the correct contents. +var hashPrefixes = map[crypto.Hash][]byte{ + crypto.MD5: {0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, 0x05, 0x00, 0x04, 0x10}, + crypto.SHA1: {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14}, + crypto.SHA224: {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c}, + crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}, + crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30}, + crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40}, + crypto.MD5SHA1: {}, // A special TLS case which doesn't use an ASN1 prefix. + crypto.RIPEMD160: {0x30, 0x20, 0x30, 0x08, 0x06, 0x06, 0x28, 0xcf, 0x06, 0x03, 0x00, 0x31, 0x04, 0x14}, +} + +// SignPKCS1v15 calculates the signature of hashed using RSASSA-PKCS1-V1_5-SIGN from RSA PKCS#1 v1.5. +// Note that hashed must be the result of hashing the input message using the +// given hash function. If hash is zero, hashed is signed directly. This isn't +// advisable except for interoperability. +// +// If rand is not nil then RSA blinding will be used to avoid timing side-channel attacks. +// +// This function is deterministic. Thus, if the set of possible messages is +// small, an attacker may be able to build a map from messages to signatures +// and identify the signed messages. As ever, signatures provide authenticity, +// not confidentiality. +func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) (s []byte, err error) { + hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed)) + if err != nil { + return + } + + tLen := len(prefix) + hashLen + k := (priv.N.BitLen() + 7) / 8 + if k < tLen+11 { + return nil, ErrMessageTooLong + } + + // EM = 0x00 || 0x01 || PS || 0x00 || T + em := make([]byte, k) + em[1] = 1 + for i := 2; i < k-tLen-1; i++ { + em[i] = 0xff + } + copy(em[k-tLen:k-hashLen], prefix) + copy(em[k-hashLen:k], hashed) + + m := new(big.Int).SetBytes(em) + c, err := decryptAndCheck(rand, priv, m) + if err != nil { + return + } + + copyWithLeftPad(em, c.Bytes()) + s = em + return +} + +// VerifyPKCS1v15 verifies an RSA PKCS#1 v1.5 signature. +// hashed is the result of hashing the input message using the given hash +// function and sig is the signature. A valid signature is indicated by +// returning a nil error. If hash is zero then hashed is used directly. This +// isn't advisable except for interoperability. +func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) (err error) { + hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed)) + if err != nil { + return + } + + tLen := len(prefix) + hashLen + k := (pub.N.BitLen() + 7) / 8 + if k < tLen+11 { + err = ErrVerification + return + } + + c := new(big.Int).SetBytes(sig) + m := encrypt(new(big.Int), pub, c) + em := leftPad(m.Bytes(), k) + // EM = 0x00 || 0x01 || PS || 0x00 || T + + ok := subtle.ConstantTimeByteEq(em[0], 0) + ok &= subtle.ConstantTimeByteEq(em[1], 1) + ok &= subtle.ConstantTimeCompare(em[k-hashLen:k], hashed) + ok &= subtle.ConstantTimeCompare(em[k-tLen:k-hashLen], prefix) + ok &= subtle.ConstantTimeByteEq(em[k-tLen-1], 0) + + for i := 2; i < k-tLen-1; i++ { + ok &= subtle.ConstantTimeByteEq(em[i], 0xff) + } + + if ok != 1 { + return ErrVerification + } + + return nil +} + +func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte, err error) { + // Special case: crypto.Hash(0) is used to indicate that the data is + // signed directly. + if hash == 0 { + return inLen, nil, nil + } + + hashLen = hash.Size() + if inLen != hashLen { + return 0, nil, errors.New("crypto/rsa: input must be hashed message") + } + prefix, ok := hashPrefixes[hash] + if !ok { + return 0, nil, errors.New("crypto/rsa: unsupported hash function") + } + return +} + +// copyWithLeftPad copies src to the end of dest, padding with zero bytes as +// needed. +func copyWithLeftPad(dest, src []byte) { + numPaddingBytes := len(dest) - len(src) + for i := 0; i < numPaddingBytes; i++ { + dest[i] = 0 + } + copy(dest[numPaddingBytes:], src) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/rsa/pss.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/rsa/pss.go new file mode 100644 index 00000000000..8a94589b1c2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/rsa/pss.go @@ -0,0 +1,297 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rsa + +// This file implements the PSS signature scheme [1]. +// +// [1] http://www.rsa.com/rsalabs/pkcs/files/h11300-wp-pkcs-1v2-2-rsa-cryptography-standard.pdf + +import ( + "bytes" + "crypto" + "errors" + "hash" + "io" + "math/big" +) + +func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) { + // See [1], section 9.1.1 + hLen := hash.Size() + sLen := len(salt) + emLen := (emBits + 7) / 8 + + // 1. If the length of M is greater than the input limitation for the + // hash function (2^61 - 1 octets for SHA-1), output "message too + // long" and stop. + // + // 2. Let mHash = Hash(M), an octet string of length hLen. + + if len(mHash) != hLen { + return nil, errors.New("crypto/rsa: input must be hashed message") + } + + // 3. If emLen < hLen + sLen + 2, output "encoding error" and stop. + + if emLen < hLen+sLen+2 { + return nil, errors.New("crypto/rsa: encoding error") + } + + em := make([]byte, emLen) + db := em[:emLen-sLen-hLen-2+1+sLen] + h := em[emLen-sLen-hLen-2+1+sLen : emLen-1] + + // 4. Generate a random octet string salt of length sLen; if sLen = 0, + // then salt is the empty string. + // + // 5. Let + // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt; + // + // M' is an octet string of length 8 + hLen + sLen with eight + // initial zero octets. + // + // 6. Let H = Hash(M'), an octet string of length hLen. + + var prefix [8]byte + + hash.Write(prefix[:]) + hash.Write(mHash) + hash.Write(salt) + + h = hash.Sum(h[:0]) + hash.Reset() + + // 7. Generate an octet string PS consisting of emLen - sLen - hLen - 2 + // zero octets. The length of PS may be 0. + // + // 8. Let DB = PS || 0x01 || salt; DB is an octet string of length + // emLen - hLen - 1. + + db[emLen-sLen-hLen-2] = 0x01 + copy(db[emLen-sLen-hLen-1:], salt) + + // 9. Let dbMask = MGF(H, emLen - hLen - 1). + // + // 10. Let maskedDB = DB \xor dbMask. + + mgf1XOR(db, hash, h) + + // 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in + // maskedDB to zero. + + db[0] &= (0xFF >> uint(8*emLen-emBits)) + + // 12. Let EM = maskedDB || H || 0xbc. + em[emLen-1] = 0xBC + + // 13. Output EM. + return em, nil +} + +func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error { + // 1. If the length of M is greater than the input limitation for the + // hash function (2^61 - 1 octets for SHA-1), output "inconsistent" + // and stop. + // + // 2. Let mHash = Hash(M), an octet string of length hLen. + hLen := hash.Size() + if hLen != len(mHash) { + return ErrVerification + } + + // 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop. + emLen := (emBits + 7) / 8 + if emLen < hLen+sLen+2 { + return ErrVerification + } + + // 4. If the rightmost octet of EM does not have hexadecimal value + // 0xbc, output "inconsistent" and stop. + if em[len(em)-1] != 0xBC { + return ErrVerification + } + + // 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and + // let H be the next hLen octets. + db := em[:emLen-hLen-1] + h := em[emLen-hLen-1 : len(em)-1] + + // 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in + // maskedDB are not all equal to zero, output "inconsistent" and + // stop. + if em[0]&(0xFF<> uint(8*emLen-emBits)) + + if sLen == PSSSaltLengthAuto { + FindSaltLength: + for sLen = emLen - (hLen + 2); sLen >= 0; sLen-- { + switch db[emLen-hLen-sLen-2] { + case 1: + break FindSaltLength + case 0: + continue + default: + return ErrVerification + } + } + if sLen < 0 { + return ErrVerification + } + } else { + // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero + // or if the octet at position emLen - hLen - sLen - 1 (the leftmost + // position is "position 1") does not have hexadecimal value 0x01, + // output "inconsistent" and stop. + for _, e := range db[:emLen-hLen-sLen-2] { + if e != 0x00 { + return ErrVerification + } + } + if db[emLen-hLen-sLen-2] != 0x01 { + return ErrVerification + } + } + + // 11. Let salt be the last sLen octets of DB. + salt := db[len(db)-sLen:] + + // 12. Let + // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ; + // M' is an octet string of length 8 + hLen + sLen with eight + // initial zero octets. + // + // 13. Let H' = Hash(M'), an octet string of length hLen. + var prefix [8]byte + hash.Write(prefix[:]) + hash.Write(mHash) + hash.Write(salt) + + h0 := hash.Sum(nil) + + // 14. If H = H', output "consistent." Otherwise, output "inconsistent." + if !bytes.Equal(h0, h) { + return ErrVerification + } + return nil +} + +// signPSSWithSalt calculates the signature of hashed using PSS [1] with specified salt. +// Note that hashed must be the result of hashing the input message using the +// given hash function. salt is a random sequence of bytes whose length will be +// later used to verify the signature. +func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) { + nBits := priv.N.BitLen() + em, err := emsaPSSEncode(hashed, nBits-1, salt, hash.New()) + if err != nil { + return + } + m := new(big.Int).SetBytes(em) + c, err := decryptAndCheck(rand, priv, m) + if err != nil { + return + } + s = make([]byte, (nBits+7)/8) + copyWithLeftPad(s, c.Bytes()) + return +} + +const ( + // PSSSaltLengthAuto causes the salt in a PSS signature to be as large + // as possible when signing, and to be auto-detected when verifying. + PSSSaltLengthAuto = 0 + // PSSSaltLengthEqualsHash causes the salt length to equal the length + // of the hash used in the signature. + PSSSaltLengthEqualsHash = -1 +) + +// PSSOptions contains options for creating and verifying PSS signatures. +type PSSOptions struct { + // SaltLength controls the length of the salt used in the PSS + // signature. It can either be a number of bytes, or one of the special + // PSSSaltLength constants. + SaltLength int + + // Hash, if not zero, overrides the hash function passed to SignPSS. + // This is the only way to specify the hash function when using the + // crypto.Signer interface. + Hash crypto.Hash +} + +// HashFunc returns pssOpts.Hash so that PSSOptions implements +// crypto.SignerOpts. +func (pssOpts *PSSOptions) HashFunc() crypto.Hash { + return pssOpts.Hash +} + +func (opts *PSSOptions) saltLength() int { + if opts == nil { + return PSSSaltLengthAuto + } + return opts.SaltLength +} + +// SignPSS calculates the signature of hashed using RSASSA-PSS [1]. +// Note that hashed must be the result of hashing the input message using the +// given hash function. The opts argument may be nil, in which case sensible +// defaults are used. +func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, opts *PSSOptions) (s []byte, err error) { + saltLength := opts.saltLength() + switch saltLength { + case PSSSaltLengthAuto: + saltLength = (priv.N.BitLen()+7)/8 - 2 - hash.Size() + case PSSSaltLengthEqualsHash: + saltLength = hash.Size() + } + + if opts != nil && opts.Hash != 0 { + hash = opts.Hash + } + + salt := make([]byte, saltLength) + if _, err = io.ReadFull(rand, salt); err != nil { + return + } + return signPSSWithSalt(rand, priv, hash, hashed, salt) +} + +// VerifyPSS verifies a PSS signature. +// hashed is the result of hashing the input message using the given hash +// function and sig is the signature. A valid signature is indicated by +// returning a nil error. The opts argument may be nil, in which case sensible +// defaults are used. +func VerifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *PSSOptions) error { + return verifyPSS(pub, hash, hashed, sig, opts.saltLength()) +} + +// verifyPSS verifies a PSS signature with the given salt length. +func verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error { + nBits := pub.N.BitLen() + if len(sig) != (nBits+7)/8 { + return ErrVerification + } + s := new(big.Int).SetBytes(sig) + m := encrypt(new(big.Int), pub, s) + emBits := nBits - 1 + emLen := (emBits + 7) / 8 + if emLen < len(m.Bytes()) { + return ErrVerification + } + em := make([]byte, emLen) + copyWithLeftPad(em, m.Bytes()) + if saltLen == PSSSaltLengthEqualsHash { + saltLen = hash.Size() + } + return emsaPSSVerify(hashed, em, emBits, saltLen, hash.New()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/rsa/rsa.go b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/rsa/rsa.go new file mode 100644 index 00000000000..ff6b11b3eee --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/keybase/go-crypto/rsa/rsa.go @@ -0,0 +1,646 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rsa implements RSA encryption as specified in PKCS#1. +// +// RSA is a single, fundamental operation that is used in this package to +// implement either public-key encryption or public-key signatures. +// +// The original specification for encryption and signatures with RSA is PKCS#1 +// and the terms "RSA encryption" and "RSA signatures" by default refer to +// PKCS#1 version 1.5. However, that specification has flaws and new designs +// should use version two, usually called by just OAEP and PSS, where +// possible. +// +// Two sets of interfaces are included in this package. When a more abstract +// interface isn't neccessary, there are functions for encrypting/decrypting +// with v1.5/OAEP and signing/verifying with v1.5/PSS. If one needs to abstract +// over the public-key primitive, the PrivateKey struct implements the +// Decrypter and Signer interfaces from the crypto package. +package rsa + +import ( + "crypto" + "crypto/rand" + "crypto/subtle" + "errors" + "hash" + "io" + "math/big" +) + +var bigZero = big.NewInt(0) +var bigOne = big.NewInt(1) + +// A PublicKey represents the public part of an RSA key. +type PublicKey struct { + N *big.Int // modulus + E int64 // public exponent +} + +// OAEPOptions is an interface for passing options to OAEP decryption using the +// crypto.Decrypter interface. +type OAEPOptions struct { + // Hash is the hash function that will be used when generating the mask. + Hash crypto.Hash + // Label is an arbitrary byte string that must be equal to the value + // used when encrypting. + Label []byte +} + +var ( + errPublicModulus = errors.New("crypto/rsa: missing public modulus") + errPublicExponentSmall = errors.New("crypto/rsa: public exponent too small") + errPublicExponentLarge = errors.New("crypto/rsa: public exponent too large") +) + +// checkPub sanity checks the public key before we use it. +// We require pub.E to fit into a 32-bit integer so that we +// do not have different behavior depending on whether +// int is 32 or 64 bits. See also +// http://www.imperialviolet.org/2012/03/16/rsae.html. +func checkPub(pub *PublicKey) error { + if pub.N == nil { + return errPublicModulus + } + if pub.E < 2 { + return errPublicExponentSmall + } + if pub.E > 1<<63-1 { + return errPublicExponentLarge + } + return nil +} + +// A PrivateKey represents an RSA key +type PrivateKey struct { + PublicKey // public part. + D *big.Int // private exponent + Primes []*big.Int // prime factors of N, has >= 2 elements. + + // Precomputed contains precomputed values that speed up private + // operations, if available. + Precomputed PrecomputedValues +} + +// Public returns the public key corresponding to priv. +func (priv *PrivateKey) Public() crypto.PublicKey { + return &priv.PublicKey +} + +// Sign signs msg with priv, reading randomness from rand. If opts is a +// *PSSOptions then the PSS algorithm will be used, otherwise PKCS#1 v1.5 will +// be used. This method is intended to support keys where the private part is +// kept in, for example, a hardware module. Common uses should use the Sign* +// functions in this package. +func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) { + if pssOpts, ok := opts.(*PSSOptions); ok { + return SignPSS(rand, priv, pssOpts.Hash, msg, pssOpts) + } + + return SignPKCS1v15(rand, priv, opts.HashFunc(), msg) +} + +// Decrypt decrypts ciphertext with priv. If opts is nil or of type +// *PKCS1v15DecryptOptions then PKCS#1 v1.5 decryption is performed. Otherwise +// opts must have type *OAEPOptions and OAEP decryption is done. +func (priv *PrivateKey) Decrypt(rand io.Reader, ciphertext []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) { + if opts == nil { + return DecryptPKCS1v15(rand, priv, ciphertext) + } + + switch opts := opts.(type) { + case *OAEPOptions: + return DecryptOAEP(opts.Hash.New(), rand, priv, ciphertext, opts.Label) + + case *PKCS1v15DecryptOptions: + if l := opts.SessionKeyLen; l > 0 { + plaintext = make([]byte, l) + if _, err := io.ReadFull(rand, plaintext); err != nil { + return nil, err + } + if err := DecryptPKCS1v15SessionKey(rand, priv, ciphertext, plaintext); err != nil { + return nil, err + } + return plaintext, nil + } else { + return DecryptPKCS1v15(rand, priv, ciphertext) + } + + default: + return nil, errors.New("crypto/rsa: invalid options for Decrypt") + } +} + +type PrecomputedValues struct { + Dp, Dq *big.Int // D mod (P-1) (or mod Q-1) + Qinv *big.Int // Q^-1 mod P + + // CRTValues is used for the 3rd and subsequent primes. Due to a + // historical accident, the CRT for the first two primes is handled + // differently in PKCS#1 and interoperability is sufficiently + // important that we mirror this. + CRTValues []CRTValue +} + +// CRTValue contains the precomputed Chinese remainder theorem values. +type CRTValue struct { + Exp *big.Int // D mod (prime-1). + Coeff *big.Int // R·Coeff ≡ 1 mod Prime. + R *big.Int // product of primes prior to this (inc p and q). +} + +// Validate performs basic sanity checks on the key. +// It returns nil if the key is valid, or else an error describing a problem. +func (priv *PrivateKey) Validate() error { + if err := checkPub(&priv.PublicKey); err != nil { + return err + } + + // Check that Πprimes == n. + modulus := new(big.Int).Set(bigOne) + for _, prime := range priv.Primes { + // Any primes ≤ 1 will cause divide-by-zero panics later. + if prime.Cmp(bigOne) <= 0 { + return errors.New("crypto/rsa: invalid prime value") + } + modulus.Mul(modulus, prime) + } + if modulus.Cmp(priv.N) != 0 { + return errors.New("crypto/rsa: invalid modulus") + } + + // Check that de ≡ 1 mod p-1, for each prime. + // This implies that e is coprime to each p-1 as e has a multiplicative + // inverse. Therefore e is coprime to lcm(p-1,q-1,r-1,...) = + // exponent(ℤ/nℤ). It also implies that a^de ≡ a mod p as a^(p-1) ≡ 1 + // mod p. Thus a^de ≡ a mod n for all a coprime to n, as required. + congruence := new(big.Int) + de := new(big.Int).SetInt64(int64(priv.E)) + de.Mul(de, priv.D) + for _, prime := range priv.Primes { + pminus1 := new(big.Int).Sub(prime, bigOne) + congruence.Mod(de, pminus1) + if congruence.Cmp(bigOne) != 0 { + return errors.New("crypto/rsa: invalid exponents") + } + } + return nil +} + +// GenerateKey generates an RSA keypair of the given bit size using the +// random source random (for example, crypto/rand.Reader). +func GenerateKey(random io.Reader, bits int) (priv *PrivateKey, err error) { + return GenerateMultiPrimeKey(random, 2, bits) +} + +// GenerateMultiPrimeKey generates a multi-prime RSA keypair of the given bit +// size and the given random source, as suggested in [1]. Although the public +// keys are compatible (actually, indistinguishable) from the 2-prime case, +// the private keys are not. Thus it may not be possible to export multi-prime +// private keys in certain formats or to subsequently import them into other +// code. +// +// Table 1 in [2] suggests maximum numbers of primes for a given size. +// +// [1] US patent 4405829 (1972, expired) +// [2] http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf +func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.E = 65537 + + if nprimes < 2 { + return nil, errors.New("crypto/rsa: GenerateMultiPrimeKey: nprimes must be >= 2") + } + + primes := make([]*big.Int, nprimes) + +NextSetOfPrimes: + for { + todo := bits + // crypto/rand should set the top two bits in each prime. + // Thus each prime has the form + // p_i = 2^bitlen(p_i) × 0.11... (in base 2). + // And the product is: + // P = 2^todo × α + // where α is the product of nprimes numbers of the form 0.11... + // + // If α < 1/2 (which can happen for nprimes > 2), we need to + // shift todo to compensate for lost bits: the mean value of 0.11... + // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 + // will give good results. + if nprimes >= 7 { + todo += (nprimes - 2) / 5 + } + for i := 0; i < nprimes; i++ { + primes[i], err = rand.Prime(random, todo/(nprimes-i)) + if err != nil { + return nil, err + } + todo -= primes[i].BitLen() + } + + // Make sure that primes is pairwise unequal. + for i, prime := range primes { + for j := 0; j < i; j++ { + if prime.Cmp(primes[j]) == 0 { + continue NextSetOfPrimes + } + } + } + + n := new(big.Int).Set(bigOne) + totient := new(big.Int).Set(bigOne) + pminus1 := new(big.Int) + for _, prime := range primes { + n.Mul(n, prime) + pminus1.Sub(prime, bigOne) + totient.Mul(totient, pminus1) + } + if n.BitLen() != bits { + // This should never happen for nprimes == 2 because + // crypto/rand should set the top two bits in each prime. + // For nprimes > 2 we hope it does not happen often. + continue NextSetOfPrimes + } + + g := new(big.Int) + priv.D = new(big.Int) + y := new(big.Int) + e := big.NewInt(int64(priv.E)) + g.GCD(priv.D, y, e, totient) + + if g.Cmp(bigOne) == 0 { + if priv.D.Sign() < 0 { + priv.D.Add(priv.D, totient) + } + priv.Primes = primes + priv.N = n + + break + } + } + + priv.Precompute() + return +} + +// incCounter increments a four byte, big-endian counter. +func incCounter(c *[4]byte) { + if c[3]++; c[3] != 0 { + return + } + if c[2]++; c[2] != 0 { + return + } + if c[1]++; c[1] != 0 { + return + } + c[0]++ +} + +// mgf1XOR XORs the bytes in out with a mask generated using the MGF1 function +// specified in PKCS#1 v2.1. +func mgf1XOR(out []byte, hash hash.Hash, seed []byte) { + var counter [4]byte + var digest []byte + + done := 0 + for done < len(out) { + hash.Write(seed) + hash.Write(counter[0:4]) + digest = hash.Sum(digest[:0]) + hash.Reset() + + for i := 0; i < len(digest) && done < len(out); i++ { + out[done] ^= digest[i] + done++ + } + incCounter(&counter) + } +} + +// ErrMessageTooLong is returned when attempting to encrypt a message which is +// too large for the size of the public key. +var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size") + +func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int { + e := big.NewInt(int64(pub.E)) + c.Exp(m, e, pub.N) + return c +} + +// EncryptOAEP encrypts the given message with RSA-OAEP. +// +// OAEP is parameterised by a hash function that is used as a random oracle. +// Encryption and decryption of a given message must use the same hash function +// and sha256.New() is a reasonable choice. +// +// The random parameter is used as a source of entropy to ensure that +// encrypting the same message twice doesn't result in the same ciphertext. +// +// The label parameter may contain arbitrary data that will not be encrypted, +// but which gives important context to the message. For example, if a given +// public key is used to decrypt two types of messages then distinct label +// values could be used to ensure that a ciphertext for one purpose cannot be +// used for another by an attacker. If not required it can be empty. +// +// The message must be no longer than the length of the public modulus less +// twice the hash length plus 2. +func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) (out []byte, err error) { + if err := checkPub(pub); err != nil { + return nil, err + } + hash.Reset() + k := (pub.N.BitLen() + 7) / 8 + if len(msg) > k-2*hash.Size()-2 { + err = ErrMessageTooLong + return + } + + hash.Write(label) + lHash := hash.Sum(nil) + hash.Reset() + + em := make([]byte, k) + seed := em[1 : 1+hash.Size()] + db := em[1+hash.Size():] + + copy(db[0:hash.Size()], lHash) + db[len(db)-len(msg)-1] = 1 + copy(db[len(db)-len(msg):], msg) + + _, err = io.ReadFull(random, seed) + if err != nil { + return + } + + mgf1XOR(db, hash, seed) + mgf1XOR(seed, hash, db) + + m := new(big.Int) + m.SetBytes(em) + c := encrypt(new(big.Int), pub, m) + out = c.Bytes() + + if len(out) < k { + // If the output is too small, we need to left-pad with zeros. + t := make([]byte, k) + copy(t[k-len(out):], out) + out = t + } + + return +} + +// ErrDecryption represents a failure to decrypt a message. +// It is deliberately vague to avoid adaptive attacks. +var ErrDecryption = errors.New("crypto/rsa: decryption error") + +// ErrVerification represents a failure to verify a signature. +// It is deliberately vague to avoid adaptive attacks. +var ErrVerification = errors.New("crypto/rsa: verification error") + +// modInverse returns ia, the inverse of a in the multiplicative group of prime +// order n. It requires that a be a member of the group (i.e. less than n). +func modInverse(a, n *big.Int) (ia *big.Int, ok bool) { + g := new(big.Int) + x := new(big.Int) + y := new(big.Int) + g.GCD(x, y, a, n) + if g.Cmp(bigOne) != 0 { + // In this case, a and n aren't coprime and we cannot calculate + // the inverse. This happens because the values of n are nearly + // prime (being the product of two primes) rather than truly + // prime. + return + } + + if x.Cmp(bigOne) < 0 { + // 0 is not the multiplicative inverse of any element so, if x + // < 1, then x is negative. + x.Add(x, n) + } + + return x, true +} + +// Precompute performs some calculations that speed up private key operations +// in the future. +func (priv *PrivateKey) Precompute() { + if priv.Precomputed.Dp != nil { + return + } + + priv.Precomputed.Dp = new(big.Int).Sub(priv.Primes[0], bigOne) + priv.Precomputed.Dp.Mod(priv.D, priv.Precomputed.Dp) + + priv.Precomputed.Dq = new(big.Int).Sub(priv.Primes[1], bigOne) + priv.Precomputed.Dq.Mod(priv.D, priv.Precomputed.Dq) + + priv.Precomputed.Qinv = new(big.Int).ModInverse(priv.Primes[1], priv.Primes[0]) + + r := new(big.Int).Mul(priv.Primes[0], priv.Primes[1]) + priv.Precomputed.CRTValues = make([]CRTValue, len(priv.Primes)-2) + for i := 2; i < len(priv.Primes); i++ { + prime := priv.Primes[i] + values := &priv.Precomputed.CRTValues[i-2] + + values.Exp = new(big.Int).Sub(prime, bigOne) + values.Exp.Mod(priv.D, values.Exp) + + values.R = new(big.Int).Set(r) + values.Coeff = new(big.Int).ModInverse(r, prime) + + r.Mul(r, prime) + } +} + +// decrypt performs an RSA decryption, resulting in a plaintext integer. If a +// random source is given, RSA blinding is used. +func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) { + // TODO(agl): can we get away with reusing blinds? + if c.Cmp(priv.N) > 0 { + err = ErrDecryption + return + } + + var ir *big.Int + if random != nil { + // Blinding enabled. Blinding involves multiplying c by r^e. + // Then the decryption operation performs (m^e * r^e)^d mod n + // which equals mr mod n. The factor of r can then be removed + // by multiplying by the multiplicative inverse of r. + + var r *big.Int + + for { + r, err = rand.Int(random, priv.N) + if err != nil { + return + } + if r.Cmp(bigZero) == 0 { + r = bigOne + } + var ok bool + ir, ok = modInverse(r, priv.N) + if ok { + break + } + } + bigE := big.NewInt(int64(priv.E)) + rpowe := new(big.Int).Exp(r, bigE, priv.N) + cCopy := new(big.Int).Set(c) + cCopy.Mul(cCopy, rpowe) + cCopy.Mod(cCopy, priv.N) + c = cCopy + } + + if priv.Precomputed.Dp == nil { + m = new(big.Int).Exp(c, priv.D, priv.N) + } else { + // We have the precalculated values needed for the CRT. + m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0]) + m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1]) + m.Sub(m, m2) + if m.Sign() < 0 { + m.Add(m, priv.Primes[0]) + } + m.Mul(m, priv.Precomputed.Qinv) + m.Mod(m, priv.Primes[0]) + m.Mul(m, priv.Primes[1]) + m.Add(m, m2) + + for i, values := range priv.Precomputed.CRTValues { + prime := priv.Primes[2+i] + m2.Exp(c, values.Exp, prime) + m2.Sub(m2, m) + m2.Mul(m2, values.Coeff) + m2.Mod(m2, prime) + if m2.Sign() < 0 { + m2.Add(m2, prime) + } + m2.Mul(m2, values.R) + m.Add(m, m2) + } + } + + if ir != nil { + // Unblind. + m.Mul(m, ir) + m.Mod(m, priv.N) + } + + return +} + +func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) { + m, err = decrypt(random, priv, c) + if err != nil { + return nil, err + } + + // In order to defend against errors in the CRT computation, m^e is + // calculated, which should match the original ciphertext. + check := encrypt(new(big.Int), &priv.PublicKey, m) + if c.Cmp(check) != 0 { + return nil, errors.New("rsa: internal error") + } + return m, nil +} + +// DecryptOAEP decrypts ciphertext using RSA-OAEP. + +// OAEP is parameterised by a hash function that is used as a random oracle. +// Encryption and decryption of a given message must use the same hash function +// and sha256.New() is a reasonable choice. +// +// The random parameter, if not nil, is used to blind the private-key operation +// and avoid timing side-channel attacks. Blinding is purely internal to this +// function – the random data need not match that used when encrypting. +// +// The label parameter must match the value given when encrypting. See +// EncryptOAEP for details. +func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err error) { + if err := checkPub(&priv.PublicKey); err != nil { + return nil, err + } + k := (priv.N.BitLen() + 7) / 8 + if len(ciphertext) > k || + k < hash.Size()*2+2 { + err = ErrDecryption + return + } + + c := new(big.Int).SetBytes(ciphertext) + + m, err := decrypt(random, priv, c) + if err != nil { + return + } + + hash.Write(label) + lHash := hash.Sum(nil) + hash.Reset() + + // Converting the plaintext number to bytes will strip any + // leading zeros so we may have to left pad. We do this unconditionally + // to avoid leaking timing information. (Although we still probably + // leak the number of leading zeros. It's not clear that we can do + // anything about this.) + em := leftPad(m.Bytes(), k) + + firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0) + + seed := em[1 : hash.Size()+1] + db := em[hash.Size()+1:] + + mgf1XOR(seed, hash, db) + mgf1XOR(db, hash, seed) + + lHash2 := db[0:hash.Size()] + + // We have to validate the plaintext in constant time in order to avoid + // attacks like: J. Manger. A Chosen Ciphertext Attack on RSA Optimal + // Asymmetric Encryption Padding (OAEP) as Standardized in PKCS #1 + // v2.0. In J. Kilian, editor, Advances in Cryptology. + lHash2Good := subtle.ConstantTimeCompare(lHash, lHash2) + + // The remainder of the plaintext must be zero or more 0x00, followed + // by 0x01, followed by the message. + // lookingForIndex: 1 iff we are still looking for the 0x01 + // index: the offset of the first 0x01 byte + // invalid: 1 iff we saw a non-zero byte before the 0x01. + var lookingForIndex, index, invalid int + lookingForIndex = 1 + rest := db[hash.Size():] + + for i := 0; i < len(rest); i++ { + equals0 := subtle.ConstantTimeByteEq(rest[i], 0) + equals1 := subtle.ConstantTimeByteEq(rest[i], 1) + index = subtle.ConstantTimeSelect(lookingForIndex&equals1, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals1, 0, lookingForIndex) + invalid = subtle.ConstantTimeSelect(lookingForIndex&^equals0, 1, invalid) + } + + if firstByteIsZero&lHash2Good&^invalid&^lookingForIndex != 1 { + err = ErrDecryption + return + } + + msg = rest[index+1:] + return +} + +// leftPad returns a new slice of length size. The contents of input are right +// aligned in the new slice. +func leftPad(input []byte, size int) (out []byte) { + n := len(input) + if n > size { + n = size + } + out = make([]byte, size) + copy(out[len(out)-n:], input) + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 00000000000..91b5cef30eb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 00000000000..1f28d773d74 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/colorable_others.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 00000000000..887f203dc7f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,30 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/colorable_windows.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 00000000000..e17a5474e98 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,884 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") +) + +// Writer provide colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + oldattr word + oldpos coord +} + +// NewColorable return new instance of Writer which handle escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// Write write data on console +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + if c2 == ']' { + if err := doTitleSequence(er); err != nil { + break loop + } + continue + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + var m byte + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.window.top-csbi.cursorPosition.y)*csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x + 1, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x - 1) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 22 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/noncolorable.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 00000000000..9721e16f4bf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable hold writer but remove escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable return new instance of Writer which remove escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write write data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 00000000000..65dc692b6b1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 00000000000..17d4f90ebcc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_android.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_android.go new file mode 100644 index 00000000000..d3567cb5bf2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_android.go @@ -0,0 +1,23 @@ +// +build android + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 00000000000..07e93039dbe --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,24 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_others.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 00000000000..ff714a37615 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js nacl + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 00000000000..bc0a70920f4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,22 @@ +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(fd) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 00000000000..bdd5c79a07f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 00000000000..453b025d0df --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +// +build linux aix +// +build !appengine +// +build !android + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_windows.go b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 00000000000..1fa86915405 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/autocomplete.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/autocomplete.go new file mode 100644 index 00000000000..3bec6258f07 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/autocomplete.go @@ -0,0 +1,43 @@ +package cli + +import ( + "github.com/posener/complete/cmd/install" +) + +// autocompleteInstaller is an interface to be implemented to perform the +// autocomplete installation and uninstallation with a CLI. +// +// This interface is not exported because it only exists for unit tests +// to be able to test that the installation is called properly. +type autocompleteInstaller interface { + Install(string) error + Uninstall(string) error +} + +// realAutocompleteInstaller uses the real install package to do the +// install/uninstall. +type realAutocompleteInstaller struct{} + +func (i *realAutocompleteInstaller) Install(cmd string) error { + return install.Install(cmd) +} + +func (i *realAutocompleteInstaller) Uninstall(cmd string) error { + return install.Uninstall(cmd) +} + +// mockAutocompleteInstaller is used for tests to record the install/uninstall. +type mockAutocompleteInstaller struct { + InstallCalled bool + UninstallCalled bool +} + +func (i *mockAutocompleteInstaller) Install(cmd string) error { + i.InstallCalled = true + return nil +} + +func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { + i.UninstallCalled = true + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/cli.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/cli.go new file mode 100644 index 00000000000..c2dbe55aa07 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/cli.go @@ -0,0 +1,720 @@ +package cli + +import ( + "fmt" + "io" + "os" + "regexp" + "sort" + "strings" + "sync" + "text/template" + + "github.com/armon/go-radix" + "github.com/posener/complete" +) + +// CLI contains the state necessary to run subcommands and parse the +// command line arguments. +// +// CLI also supports nested subcommands, such as "cli foo bar". To use +// nested subcommands, the key in the Commands mapping below contains the +// full subcommand. In this example, it would be "foo bar". +// +// If you use a CLI with nested subcommands, some semantics change due to +// ambiguities: +// +// * We use longest prefix matching to find a matching subcommand. This +// means if you register "foo bar" and the user executes "cli foo qux", +// the "foo" command will be executed with the arg "qux". It is up to +// you to handle these args. One option is to just return the special +// help return code `RunResultHelp` to display help and exit. +// +// * The help flag "-h" or "-help" will look at all args to determine +// the help function. For example: "otto apps list -h" will show the +// help for "apps list" but "otto apps -h" will show it for "apps". +// In the normal CLI, only the first subcommand is used. +// +// * The help flag will list any subcommands that a command takes +// as well as the command's help itself. If there are no subcommands, +// it will note this. If the CLI itself has no subcommands, this entire +// section is omitted. +// +// * Any parent commands that don't exist are automatically created as +// no-op commands that just show help for other subcommands. For example, +// if you only register "foo bar", then "foo" is automatically created. +// +type CLI struct { + // Args is the list of command-line arguments received excluding + // the name of the app. For example, if the command "./cli foo bar" + // was invoked, then Args should be []string{"foo", "bar"}. + Args []string + + // Commands is a mapping of subcommand names to a factory function + // for creating that Command implementation. If there is a command + // with a blank string "", then it will be used as the default command + // if no subcommand is specified. + // + // If the key has a space in it, this will create a nested subcommand. + // For example, if the key is "foo bar", then to access it our CLI + // must be accessed with "./cli foo bar". See the docs for CLI for + // notes on how this changes some other behavior of the CLI as well. + // + // The factory should be as cheap as possible, ideally only allocating + // a struct. The factory may be called multiple times in the course + // of a command execution and certain events such as help require the + // instantiation of all commands. Expensive initialization should be + // deferred to function calls within the interface implementation. + Commands map[string]CommandFactory + + // HiddenCommands is a list of commands that are "hidden". Hidden + // commands are not given to the help function callback and do not + // show up in autocomplete. The values in the slice should be equivalent + // to the keys in the command map. + HiddenCommands []string + + // Name defines the name of the CLI. + Name string + + // Version of the CLI. + Version string + + // Autocomplete enables or disables subcommand auto-completion support. + // This is enabled by default when NewCLI is called. Otherwise, this + // must enabled explicitly. + // + // Autocomplete requires the "Name" option to be set on CLI. This name + // should be set exactly to the binary name that is autocompleted. + // + // Autocompletion is supported via the github.com/posener/complete + // library. This library supports bash, zsh and fish. To add support + // for other shells, please see that library. + // + // AutocompleteInstall and AutocompleteUninstall are the global flag + // names for installing and uninstalling the autocompletion handlers + // for the user's shell. The flag should omit the hyphen(s) in front of + // the value. Both single and double hyphens will automatically be supported + // for the flag name. These default to `autocomplete-install` and + // `autocomplete-uninstall` respectively. + // + // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- + // complete flags like -help and -version are added to the output. + // + // AutocompleteGlobalFlags are a mapping of global flags for + // autocompletion. The help and version flags are automatically added. + Autocomplete bool + AutocompleteInstall string + AutocompleteUninstall string + AutocompleteNoDefaultFlags bool + AutocompleteGlobalFlags complete.Flags + autocompleteInstaller autocompleteInstaller // For tests + + // HelpFunc and HelpWriter are used to output help information, if + // requested. + // + // HelpFunc is the function called to generate the generic help + // text that is shown if help must be shown for the CLI that doesn't + // pertain to a specific command. + // + // HelpWriter is the Writer where the help text is outputted to. If + // not specified, it will default to Stderr. + HelpFunc HelpFunc + HelpWriter io.Writer + + //--------------------------------------------------------------- + // Internal fields set automatically + + once sync.Once + autocomplete *complete.Complete + commandTree *radix.Tree + commandNested bool + commandHidden map[string]struct{} + subcommand string + subcommandArgs []string + topFlags []string + + // These are true when special global flags are set. We can/should + // probably use a bitset for this one day. + isHelp bool + isVersion bool + isAutocompleteInstall bool + isAutocompleteUninstall bool +} + +// NewClI returns a new CLI instance with sensible defaults. +func NewCLI(app, version string) *CLI { + return &CLI{ + Name: app, + Version: version, + HelpFunc: BasicHelpFunc(app), + Autocomplete: true, + } + +} + +// IsHelp returns whether or not the help flag is present within the +// arguments. +func (c *CLI) IsHelp() bool { + c.once.Do(c.init) + return c.isHelp +} + +// IsVersion returns whether or not the version flag is present within the +// arguments. +func (c *CLI) IsVersion() bool { + c.once.Do(c.init) + return c.isVersion +} + +// Run runs the actual CLI based on the arguments given. +func (c *CLI) Run() (int, error) { + c.once.Do(c.init) + + // If this is a autocompletion request, satisfy it. This must be called + // first before anything else since its possible to be autocompleting + // -help or -version or other flags and we want to show completions + // and not actually write the help or version. + if c.Autocomplete && c.autocomplete.Complete() { + return 0, nil + } + + // Just show the version and exit if instructed. + if c.IsVersion() && c.Version != "" { + c.HelpWriter.Write([]byte(c.Version + "\n")) + return 0, nil + } + + // Just print the help when only '-h' or '--help' is passed. + if c.IsHelp() && c.Subcommand() == "" { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n")) + return 0, nil + } + + // If we're attempting to install or uninstall autocomplete then handle + if c.Autocomplete { + // Autocomplete requires the "Name" to be set so that we know what + // command to setup the autocomplete on. + if c.Name == "" { + return 1, fmt.Errorf( + "internal error: CLI.Name must be specified for autocomplete to work") + } + + // If both install and uninstall flags are specified, then error + if c.isAutocompleteInstall && c.isAutocompleteUninstall { + return 1, fmt.Errorf( + "Either the autocomplete install or uninstall flag may " + + "be specified, but not both.") + } + + // If the install flag is specified, perform the install or uninstall + if c.isAutocompleteInstall { + if err := c.autocompleteInstaller.Install(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + + if c.isAutocompleteUninstall { + if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + } + + // Attempt to get the factory function for creating the command + // implementation. If the command is invalid or blank, it is an error. + raw, ok := c.commandTree.Get(c.Subcommand()) + if !ok { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) + return 127, nil + } + + command, err := raw.(CommandFactory)() + if err != nil { + return 1, err + } + + // If we've been instructed to just print the help, then print it + if c.IsHelp() { + c.commandHelp(command) + return 0, nil + } + + // If there is an invalid flag, then error + if len(c.topFlags) > 0 { + c.HelpWriter.Write([]byte( + "Invalid flags before the subcommand. If these flags are for\n" + + "the subcommand, please put them after the subcommand.\n\n")) + c.commandHelp(command) + return 1, nil + } + + code := command.Run(c.SubcommandArgs()) + if code == RunResultHelp { + // Requesting help + c.commandHelp(command) + return 1, nil + } + + return code, nil +} + +// Subcommand returns the subcommand that the CLI would execute. For +// example, a CLI from "--version version --help" would return a Subcommand +// of "version" +func (c *CLI) Subcommand() string { + c.once.Do(c.init) + return c.subcommand +} + +// SubcommandArgs returns the arguments that will be passed to the +// subcommand. +func (c *CLI) SubcommandArgs() []string { + c.once.Do(c.init) + return c.subcommandArgs +} + +// subcommandParent returns the parent of this subcommand, if there is one. +// If there isn't on, "" is returned. +func (c *CLI) subcommandParent() string { + // Get the subcommand, if it is "" alread just return + sub := c.Subcommand() + if sub == "" { + return sub + } + + // Clear any trailing spaces and find the last space + sub = strings.TrimRight(sub, " ") + idx := strings.LastIndex(sub, " ") + + if idx == -1 { + // No space means our parent is root + return "" + } + + return sub[:idx] +} + +func (c *CLI) init() { + if c.HelpFunc == nil { + c.HelpFunc = BasicHelpFunc("app") + + if c.Name != "" { + c.HelpFunc = BasicHelpFunc(c.Name) + } + } + + if c.HelpWriter == nil { + c.HelpWriter = os.Stderr + } + + // Build our hidden commands + if len(c.HiddenCommands) > 0 { + c.commandHidden = make(map[string]struct{}) + for _, h := range c.HiddenCommands { + c.commandHidden[h] = struct{}{} + } + } + + // Build our command tree + c.commandTree = radix.New() + c.commandNested = false + for k, v := range c.Commands { + k = strings.TrimSpace(k) + c.commandTree.Insert(k, v) + if strings.ContainsRune(k, ' ') { + c.commandNested = true + } + } + + // Go through the key and fill in any missing parent commands + if c.commandNested { + var walkFn radix.WalkFn + toInsert := make(map[string]struct{}) + walkFn = func(k string, raw interface{}) bool { + idx := strings.LastIndex(k, " ") + if idx == -1 { + // If there is no space, just ignore top level commands + return false + } + + // Trim up to that space so we can get the expected parent + k = k[:idx] + if _, ok := c.commandTree.Get(k); ok { + // Yay we have the parent! + return false + } + + // We're missing the parent, so let's insert this + toInsert[k] = struct{}{} + + // Call the walk function recursively so we check this one too + return walkFn(k, nil) + } + + // Walk! + c.commandTree.Walk(walkFn) + + // Insert any that we're missing + for k := range toInsert { + var f CommandFactory = func() (Command, error) { + return &MockCommand{ + HelpText: "This command is accessed by using one of the subcommands below.", + RunResult: RunResultHelp, + }, nil + } + + c.commandTree.Insert(k, f) + } + } + + // Setup autocomplete if we have it enabled. We have to do this after + // the command tree is setup so we can use the radix tree to easily find + // all subcommands. + if c.Autocomplete { + c.initAutocomplete() + } + + // Process the args + c.processArgs() +} + +func (c *CLI) initAutocomplete() { + if c.AutocompleteInstall == "" { + c.AutocompleteInstall = defaultAutocompleteInstall + } + + if c.AutocompleteUninstall == "" { + c.AutocompleteUninstall = defaultAutocompleteUninstall + } + + if c.autocompleteInstaller == nil { + c.autocompleteInstaller = &realAutocompleteInstaller{} + } + + // Build the root command + cmd := c.initAutocompleteSub("") + + // For the root, we add the global flags to the "Flags". This way + // they don't show up on every command. + if !c.AutocompleteNoDefaultFlags { + cmd.Flags = map[string]complete.Predictor{ + "-" + c.AutocompleteInstall: complete.PredictNothing, + "-" + c.AutocompleteUninstall: complete.PredictNothing, + "-help": complete.PredictNothing, + "-version": complete.PredictNothing, + } + } + cmd.GlobalFlags = c.AutocompleteGlobalFlags + + c.autocomplete = complete.New(c.Name, cmd) +} + +// initAutocompleteSub creates the complete.Command for a subcommand with +// the given prefix. This will continue recursively for all subcommands. +// The prefix "" (empty string) can be used for the root command. +func (c *CLI) initAutocompleteSub(prefix string) complete.Command { + var cmd complete.Command + walkFn := func(k string, raw interface{}) bool { + // Ignore the empty key which can be present for default commands. + if k == "" { + return false + } + + // Keep track of the full key so that we can nest further if necessary + fullKey := k + + if len(prefix) > 0 { + // If we have a prefix, trim the prefix + 1 (for the space) + // Example: turns "sub one" to "one" with prefix "sub" + k = k[len(prefix)+1:] + } + + if idx := strings.Index(k, " "); idx >= 0 { + // If there is a space, we trim up to the space. This turns + // "sub sub2 sub3" into "sub". The prefix trim above will + // trim our current depth properly. + k = k[:idx] + } + + if _, ok := cmd.Sub[k]; ok { + // If we already tracked this subcommand then ignore + return false + } + + // If the command is hidden, don't record it at all + if _, ok := c.commandHidden[fullKey]; ok { + return false + } + + if cmd.Sub == nil { + cmd.Sub = complete.Commands(make(map[string]complete.Command)) + } + subCmd := c.initAutocompleteSub(fullKey) + + // Instantiate the command so that we can check if the command is + // a CommandAutocomplete implementation. If there is an error + // creating the command, we just ignore it since that will be caught + // later. + impl, err := raw.(CommandFactory)() + if err != nil { + impl = nil + } + + // Check if it implements ComandAutocomplete. If so, setup the autocomplete + if c, ok := impl.(CommandAutocomplete); ok { + subCmd.Args = c.AutocompleteArgs() + subCmd.Flags = c.AutocompleteFlags() + } + + cmd.Sub[k] = subCmd + return false + } + + walkPrefix := prefix + if walkPrefix != "" { + walkPrefix += " " + } + + c.commandTree.WalkPrefix(walkPrefix, walkFn) + return cmd +} + +func (c *CLI) commandHelp(command Command) { + // Get the template to use + tpl := strings.TrimSpace(defaultHelpTemplate) + if t, ok := command.(CommandHelpTemplate); ok { + tpl = t.HelpTemplate() + } + if !strings.HasSuffix(tpl, "\n") { + tpl += "\n" + } + + // Parse it + t, err := template.New("root").Parse(tpl) + if err != nil { + t = template.Must(template.New("root").Parse(fmt.Sprintf( + "Internal error! Failed to parse command help template: %s\n", err))) + } + + // Template data + data := map[string]interface{}{ + "Name": c.Name, + "Help": command.Help(), + } + + // Build subcommand list if we have it + var subcommandsTpl []map[string]interface{} + if c.commandNested { + // Get the matching keys + subcommands := c.helpCommands(c.Subcommand()) + keys := make([]string, 0, len(subcommands)) + for k := range subcommands { + keys = append(keys, k) + } + + // Sort the keys + sort.Strings(keys) + + // Figure out the padding length + var longest int + for _, k := range keys { + if v := len(k); v > longest { + longest = v + } + } + + // Go through and create their structures + subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) + for _, k := range keys { + // Get the command + raw, ok := subcommands[k] + if !ok { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error getting subcommand %q", k))) + } + sub, err := raw() + if err != nil { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error instantiating %q: %s", k, err))) + } + + // Find the last space and make sure we only include that last part + name := k + if idx := strings.LastIndex(k, " "); idx > -1 { + name = name[idx+1:] + } + + subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ + "Name": name, + "NameAligned": name + strings.Repeat(" ", longest-len(k)), + "Help": sub.Help(), + "Synopsis": sub.Synopsis(), + }) + } + } + data["Subcommands"] = subcommandsTpl + + // Write + err = t.Execute(c.HelpWriter, data) + if err == nil { + return + } + + // An error, just output... + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Internal error rendering help: %s", err))) +} + +// helpCommands returns the subcommands for the HelpFunc argument. +// This will only contain immediate subcommands. +func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { + // If our prefix isn't empty, make sure it ends in ' ' + if prefix != "" && prefix[len(prefix)-1] != ' ' { + prefix += " " + } + + // Get all the subkeys of this command + var keys []string + c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { + // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" + if !strings.Contains(k[len(prefix):], " ") { + keys = append(keys, k) + } + + return false + }) + + // For each of the keys return that in the map + result := make(map[string]CommandFactory, len(keys)) + for _, k := range keys { + raw, ok := c.commandTree.Get(k) + if !ok { + // We just got it via WalkPrefix above, so we just panic + panic("not found: " + k) + } + + // If this is a hidden command, don't show it + if _, ok := c.commandHidden[k]; ok { + continue + } + + result[k] = raw.(CommandFactory) + } + + return result +} + +func (c *CLI) processArgs() { + for i, arg := range c.Args { + if arg == "--" { + break + } + + // Check for help flags. + if arg == "-h" || arg == "-help" || arg == "--help" { + c.isHelp = true + continue + } + + // Check for autocomplete flags + if c.Autocomplete { + if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { + c.isAutocompleteInstall = true + continue + } + + if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { + c.isAutocompleteUninstall = true + continue + } + } + + if c.subcommand == "" { + // Check for version flags if not in a subcommand. + if arg == "-v" || arg == "-version" || arg == "--version" { + c.isVersion = true + continue + } + + if arg != "" && arg[0] == '-' { + // Record the arg... + c.topFlags = append(c.topFlags, arg) + } + } + + // If we didn't find a subcommand yet and this is the first non-flag + // argument, then this is our subcommand. + if c.subcommand == "" && arg != "" && arg[0] != '-' { + c.subcommand = arg + if c.commandNested { + // If the command has a space in it, then it is invalid. + // Set a blank command so that it fails. + if strings.ContainsRune(arg, ' ') { + c.subcommand = "" + return + } + + // Determine the argument we look to to end subcommands. + // We look at all arguments until one has a space. This + // disallows commands like: ./cli foo "bar baz". An argument + // with a space is always an argument. + j := 0 + for k, v := range c.Args[i:] { + if strings.ContainsRune(v, ' ') { + break + } + + j = i + k + 1 + } + + // Nested CLI, the subcommand is actually the entire + // arg list up to a flag that is still a valid subcommand. + searchKey := strings.Join(c.Args[i:j], " ") + k, _, ok := c.commandTree.LongestPrefix(searchKey) + if ok { + // k could be a prefix that doesn't contain the full + // command such as "foo" instead of "foobar", so we + // need to verify that we have an entire key. To do that, + // we look for an ending in a space or an end of string. + reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) + if reVerify.MatchString(searchKey) { + c.subcommand = k + i += strings.Count(k, " ") + } + } + } + + // The remaining args the subcommand arguments + c.subcommandArgs = c.Args[i+1:] + } + } + + // If we never found a subcommand and support a default command, then + // switch to using that. + if c.subcommand == "" { + if _, ok := c.Commands[""]; ok { + args := c.topFlags + args = append(args, c.subcommandArgs...) + c.topFlags = nil + c.subcommandArgs = args + } + } +} + +// defaultAutocompleteInstall and defaultAutocompleteUninstall are the +// default values for the autocomplete install and uninstall flags. +const defaultAutocompleteInstall = "autocomplete-install" +const defaultAutocompleteUninstall = "autocomplete-uninstall" + +const defaultHelpTemplate = ` +{{.Help}}{{if gt (len .Subcommands) 0}} + +Subcommands: +{{- range $value := .Subcommands }} + {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} +{{- end }} +` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/command.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/command.go new file mode 100644 index 00000000000..bed11faf578 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/command.go @@ -0,0 +1,67 @@ +package cli + +import ( + "github.com/posener/complete" +) + +const ( + // RunResultHelp is a value that can be returned from Run to signal + // to the CLI to render the help output. + RunResultHelp = -18511 +) + +// A command is a runnable sub-command of a CLI. +type Command interface { + // Help should return long-form help text that includes the command-line + // usage, a brief few sentences explaining the function of the command, + // and the complete list of flags the command accepts. + Help() string + + // Run should run the actual command with the given CLI instance and + // command-line arguments. It should return the exit status when it is + // finished. + // + // There are a handful of special exit codes this can return documented + // above that change behavior. + Run(args []string) int + + // Synopsis should return a one-line, short synopsis of the command. + // This should be less than 50 characters ideally. + Synopsis() string +} + +// CommandAutocomplete is an extension of Command that enables fine-grained +// autocompletion. Subcommand autocompletion will work even if this interface +// is not implemented. By implementing this interface, more advanced +// autocompletion is enabled. +type CommandAutocomplete interface { + // AutocompleteArgs returns the argument predictor for this command. + // If argument completion is not supported, this should return + // complete.PredictNothing. + AutocompleteArgs() complete.Predictor + + // AutocompleteFlags returns a mapping of supported flags and autocomplete + // options for this command. The map key for the Flags map should be the + // complete flag such as "-foo" or "--foo". + AutocompleteFlags() complete.Flags +} + +// CommandHelpTemplate is an extension of Command that also has a function +// for returning a template for the help rather than the help itself. In +// this scenario, both Help and HelpTemplate should be implemented. +// +// If CommandHelpTemplate isn't implemented, the Help is output as-is. +type CommandHelpTemplate interface { + // HelpTemplate is the template in text/template format to use for + // displaying the Help. The keys available are: + // + // * ".Help" - The help text itself + // * ".Subcommands" + // + HelpTemplate() string +} + +// CommandFactory is a type of function that is a factory for commands. +// We need a factory because we may need to setup some state on the +// struct that implements the command itself. +type CommandFactory func() (Command, error) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/command_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/command_mock.go new file mode 100644 index 00000000000..7a584b7e9b3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/command_mock.go @@ -0,0 +1,63 @@ +package cli + +import ( + "github.com/posener/complete" +) + +// MockCommand is an implementation of Command that can be used for tests. +// It is publicly exported from this package in case you want to use it +// externally. +type MockCommand struct { + // Settable + HelpText string + RunResult int + SynopsisText string + + // Set by the command + RunCalled bool + RunArgs []string +} + +func (c *MockCommand) Help() string { + return c.HelpText +} + +func (c *MockCommand) Run(args []string) int { + c.RunCalled = true + c.RunArgs = args + + return c.RunResult +} + +func (c *MockCommand) Synopsis() string { + return c.SynopsisText +} + +// MockCommandAutocomplete is an implementation of CommandAutocomplete. +type MockCommandAutocomplete struct { + MockCommand + + // Settable + AutocompleteArgsValue complete.Predictor + AutocompleteFlagsValue complete.Flags +} + +func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { + return c.AutocompleteArgsValue +} + +func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { + return c.AutocompleteFlagsValue +} + +// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. +type MockCommandHelpTemplate struct { + MockCommand + + // Settable + HelpTemplateText string +} + +func (c *MockCommandHelpTemplate) HelpTemplate() string { + return c.HelpTemplateText +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/help.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/help.go new file mode 100644 index 00000000000..f5ca58f5951 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/help.go @@ -0,0 +1,79 @@ +package cli + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" +) + +// HelpFunc is the type of the function that is responsible for generating +// the help output when the CLI must show the general help text. +type HelpFunc func(map[string]CommandFactory) string + +// BasicHelpFunc generates some basic help output that is usually good enough +// for most CLI applications. +func BasicHelpFunc(app string) HelpFunc { + return func(commands map[string]CommandFactory) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf( + "Usage: %s [--version] [--help] []\n\n", + app)) + buf.WriteString("Available commands are:\n") + + // Get the list of keys so we can sort them, and also get the maximum + // key length so they can be aligned properly. + keys := make([]string, 0, len(commands)) + maxKeyLen := 0 + for key := range commands { + if len(key) > maxKeyLen { + maxKeyLen = len(key) + } + + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + commandFunc, ok := commands[key] + if !ok { + // This should never happen since we JUST built the list of + // keys. + panic("command not found: " + key) + } + + command, err := commandFunc() + if err != nil { + log.Printf("[ERR] cli: Command '%s' failed to load: %s", + key, err) + continue + } + + key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) + buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) + } + + return buf.String() + } +} + +// FilteredHelpFunc will filter the commands to only include the keys +// in the include parameter. +func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { + return func(commands map[string]CommandFactory) string { + set := make(map[string]struct{}) + for _, k := range include { + set[k] = struct{}{} + } + + filtered := make(map[string]CommandFactory) + for k, f := range commands { + if _, ok := set[k]; ok { + filtered[k] = f + } + } + + return f(filtered) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui.go new file mode 100644 index 00000000000..a2d6f94f45e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui.go @@ -0,0 +1,187 @@ +package cli + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + + "github.com/bgentry/speakeasy" + "github.com/mattn/go-isatty" +) + +// Ui is an interface for interacting with the terminal, or "interface" +// of a CLI. This abstraction doesn't have to be used, but helps provide +// a simple, layerable way to manage user interactions. +type Ui interface { + // Ask asks the user for input using the given query. The response is + // returned as the given string, or an error. + Ask(string) (string, error) + + // AskSecret asks the user for input using the given query, but does not echo + // the keystrokes to the terminal. + AskSecret(string) (string, error) + + // Output is called for normal standard output. + Output(string) + + // Info is called for information related to the previous output. + // In general this may be the exact same as Output, but this gives + // Ui implementors some flexibility with output formats. + Info(string) + + // Error is used for any error messages that might appear on standard + // error. + Error(string) + + // Warn is used for any warning messages that might appear on standard + // error. + Warn(string) +} + +// BasicUi is an implementation of Ui that just outputs to the given +// writer. This UI is not threadsafe by default, but you can wrap it +// in a ConcurrentUi to make it safe. +type BasicUi struct { + Reader io.Reader + Writer io.Writer + ErrorWriter io.Writer +} + +func (u *BasicUi) Ask(query string) (string, error) { + return u.ask(query, false) +} + +func (u *BasicUi) AskSecret(query string) (string, error) { + return u.ask(query, true) +} + +func (u *BasicUi) ask(query string, secret bool) (string, error) { + if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { + return "", err + } + + // Register for interrupts so that we can catch it and immediately + // return... + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Ask for input in a go-routine so that we can ignore it. + errCh := make(chan error, 1) + lineCh := make(chan string, 1) + go func() { + var line string + var err error + if secret && isatty.IsTerminal(os.Stdin.Fd()) { + line, err = speakeasy.Ask("") + } else { + r := bufio.NewReader(u.Reader) + line, err = r.ReadString('\n') + } + if err != nil { + errCh <- err + return + } + + lineCh <- strings.TrimRight(line, "\r\n") + }() + + select { + case err := <-errCh: + return "", err + case line := <-lineCh: + return line, nil + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(u.Writer) + + return "", errors.New("interrupted") + } +} + +func (u *BasicUi) Error(message string) { + w := u.Writer + if u.ErrorWriter != nil { + w = u.ErrorWriter + } + + fmt.Fprint(w, message) + fmt.Fprint(w, "\n") +} + +func (u *BasicUi) Info(message string) { + u.Output(message) +} + +func (u *BasicUi) Output(message string) { + fmt.Fprint(u.Writer, message) + fmt.Fprint(u.Writer, "\n") +} + +func (u *BasicUi) Warn(message string) { + u.Error(message) +} + +// PrefixedUi is an implementation of Ui that prefixes messages. +type PrefixedUi struct { + AskPrefix string + AskSecretPrefix string + OutputPrefix string + InfoPrefix string + ErrorPrefix string + WarnPrefix string + Ui Ui +} + +func (u *PrefixedUi) Ask(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskPrefix, query) + } + + return u.Ui.Ask(query) +} + +func (u *PrefixedUi) AskSecret(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) + } + + return u.Ui.AskSecret(query) +} + +func (u *PrefixedUi) Error(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) + } + + u.Ui.Error(message) +} + +func (u *PrefixedUi) Info(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.InfoPrefix, message) + } + + u.Ui.Info(message) +} + +func (u *PrefixedUi) Output(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.OutputPrefix, message) + } + + u.Ui.Output(message) +} + +func (u *PrefixedUi) Warn(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.WarnPrefix, message) + } + + u.Ui.Warn(message) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_colored.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_colored.go new file mode 100644 index 00000000000..b0ec44840e2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_colored.go @@ -0,0 +1,73 @@ +package cli + +import ( + "github.com/fatih/color" +) + +const ( + noColor = -1 +) + +// UiColor is a posix shell color code to use. +type UiColor struct { + Code int + Bold bool +} + +// A list of colors that are useful. These are all non-bolded by default. +var ( + UiColorNone UiColor = UiColor{noColor, false} + UiColorRed = UiColor{int(color.FgHiRed), false} + UiColorGreen = UiColor{int(color.FgHiGreen), false} + UiColorYellow = UiColor{int(color.FgHiYellow), false} + UiColorBlue = UiColor{int(color.FgHiBlue), false} + UiColorMagenta = UiColor{int(color.FgHiMagenta), false} + UiColorCyan = UiColor{int(color.FgHiCyan), false} +) + +// ColoredUi is a Ui implementation that colors its output according +// to the given color schemes for the given type of output. +type ColoredUi struct { + OutputColor UiColor + InfoColor UiColor + ErrorColor UiColor + WarnColor UiColor + Ui Ui +} + +func (u *ColoredUi) Ask(query string) (string, error) { + return u.Ui.Ask(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) AskSecret(query string) (string, error) { + return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) Output(message string) { + u.Ui.Output(u.colorize(message, u.OutputColor)) +} + +func (u *ColoredUi) Info(message string) { + u.Ui.Info(u.colorize(message, u.InfoColor)) +} + +func (u *ColoredUi) Error(message string) { + u.Ui.Error(u.colorize(message, u.ErrorColor)) +} + +func (u *ColoredUi) Warn(message string) { + u.Ui.Warn(u.colorize(message, u.WarnColor)) +} + +func (u *ColoredUi) colorize(message string, uc UiColor) string { + if uc.Code == noColor { + return message + } + + attr := []color.Attribute{color.Attribute(uc.Code)} + if uc.Bold { + attr = append(attr, color.Bold) + } + + return color.New(attr...).SprintFunc()(message) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_concurrent.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_concurrent.go new file mode 100644 index 00000000000..b4f4dbfaa81 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_concurrent.go @@ -0,0 +1,54 @@ +package cli + +import ( + "sync" +) + +// ConcurrentUi is a wrapper around a Ui interface (and implements that +// interface) making the underlying Ui concurrency safe. +type ConcurrentUi struct { + Ui Ui + l sync.Mutex +} + +func (u *ConcurrentUi) Ask(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.Ask(query) +} + +func (u *ConcurrentUi) AskSecret(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.AskSecret(query) +} + +func (u *ConcurrentUi) Error(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Error(message) +} + +func (u *ConcurrentUi) Info(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Info(message) +} + +func (u *ConcurrentUi) Output(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Output(message) +} + +func (u *ConcurrentUi) Warn(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Warn(message) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_mock.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_mock.go new file mode 100644 index 00000000000..0bfe0a19121 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_mock.go @@ -0,0 +1,111 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +// NewMockUi returns a fully initialized MockUi instance +// which is safe for concurrent use. +func NewMockUi() *MockUi { + m := new(MockUi) + m.once.Do(m.init) + return m +} + +// MockUi is a mock UI that is used for tests and is exported publicly +// for use in external tests if needed as well. Do not instantite this +// directly since the buffers will be initialized on the first write. If +// there is no write then you will get a nil panic. Please use the +// NewMockUi() constructor function instead. You can fix your code with +// +// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go +type MockUi struct { + InputReader io.Reader + ErrorWriter *syncBuffer + OutputWriter *syncBuffer + + once sync.Once +} + +func (u *MockUi) Ask(query string) (string, error) { + u.once.Do(u.init) + + var result string + fmt.Fprint(u.OutputWriter, query) + if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { + return "", err + } + + return result, nil +} + +func (u *MockUi) AskSecret(query string) (string, error) { + return u.Ask(query) +} + +func (u *MockUi) Error(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) Info(message string) { + u.Output(message) +} + +func (u *MockUi) Output(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.OutputWriter, message) + fmt.Fprint(u.OutputWriter, "\n") +} + +func (u *MockUi) Warn(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) init() { + u.ErrorWriter = new(syncBuffer) + u.OutputWriter = new(syncBuffer) +} + +type syncBuffer struct { + sync.RWMutex + b bytes.Buffer +} + +func (b *syncBuffer) Write(data []byte) (int, error) { + b.Lock() + defer b.Unlock() + return b.b.Write(data) +} + +func (b *syncBuffer) Read(data []byte) (int, error) { + b.RLock() + defer b.RUnlock() + return b.b.Read(data) +} + +func (b *syncBuffer) Reset() { + b.Lock() + b.b.Reset() + b.Unlock() +} + +func (b *syncBuffer) String() string { + return string(b.Bytes()) +} + +func (b *syncBuffer) Bytes() []byte { + b.RLock() + data := b.b.Bytes() + b.RUnlock() + return data +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_writer.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_writer.go new file mode 100644 index 00000000000..1e1db3cf630 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/cli/ui_writer.go @@ -0,0 +1,18 @@ +package cli + +// UiWriter is an io.Writer implementation that can be used with +// loggers that writes every line of log output data to a Ui at the +// Info level. +type UiWriter struct { + Ui Ui +} + +func (w *UiWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + p = p[:n-1] + } + + w.Ui.Info(string(p)) + return n, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/colorstring/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/colorstring/LICENSE new file mode 100644 index 00000000000..22985159044 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/colorstring/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/colorstring/colorstring.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/colorstring/colorstring.go new file mode 100644 index 00000000000..3de5b241d90 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/colorstring/colorstring.go @@ -0,0 +1,244 @@ +// colorstring provides functions for colorizing strings for terminal +// output. +package colorstring + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// Color colorizes your strings using the default settings. +// +// Strings given to Color should use the syntax `[color]` to specify the +// color for text following. For example: `[blue]Hello` will return "Hello" +// in blue. See DefaultColors for all the supported colors and attributes. +// +// If an unrecognized color is given, it is ignored and assumed to be part +// of the string. For example: `[hi]world` will result in "[hi]world". +// +// A color reset is appended to the end of every string. This will reset +// the color of following strings when you output this text to the same +// terminal session. +// +// If you want to customize any of this behavior, use the Colorize struct. +func Color(v string) string { + return def.Color(v) +} + +// ColorPrefix returns the color sequence that prefixes the given text. +// +// This is useful when wrapping text if you want to inherit the color +// of the wrapped text. For example, "[green]foo" will return "[green]". +// If there is no color sequence, then this will return "". +func ColorPrefix(v string) string { + return def.ColorPrefix(v) +} + +// Colorize colorizes your strings, giving you the ability to customize +// some of the colorization process. +// +// The options in Colorize can be set to customize colorization. If you're +// only interested in the defaults, just use the top Color function directly, +// which creates a default Colorize. +type Colorize struct { + // Colors maps a color string to the code for that color. The code + // is a string so that you can use more complex colors to set foreground, + // background, attributes, etc. For example, "boldblue" might be + // "1;34" + Colors map[string]string + + // If true, color attributes will be ignored. This is useful if you're + // outputting to a location that doesn't support colors and you just + // want the strings returned. + Disable bool + + // Reset, if true, will reset the color after each colorization by + // adding a reset code at the end. + Reset bool +} + +// Color colorizes a string according to the settings setup in the struct. +// +// For more details on the syntax, see the top-level Color function. +func (c *Colorize) Color(v string) string { + matches := parseRe.FindAllStringIndex(v, -1) + if len(matches) == 0 { + return v + } + + result := new(bytes.Buffer) + colored := false + m := []int{0, 0} + for _, nm := range matches { + // Write the text in between this match and the last + result.WriteString(v[m[1]:nm[0]]) + m = nm + + var replace string + if code, ok := c.Colors[v[m[0]+1:m[1]-1]]; ok { + colored = true + + if !c.Disable { + replace = fmt.Sprintf("\033[%sm", code) + } + } else { + replace = v[m[0]:m[1]] + } + + result.WriteString(replace) + } + result.WriteString(v[m[1]:]) + + if colored && c.Reset && !c.Disable { + // Write the clear byte at the end + result.WriteString("\033[0m") + } + + return result.String() +} + +// ColorPrefix returns the first color sequence that exists in this string. +// +// For example: "[green]foo" would return "[green]". If no color sequence +// exists, then "" is returned. This is especially useful when wrapping +// colored texts to inherit the color of the wrapped text. +func (c *Colorize) ColorPrefix(v string) string { + return prefixRe.FindString(strings.TrimSpace(v)) +} + +// DefaultColors are the default colors used when colorizing. +// +// If the color is surrounded in underscores, such as "_blue_", then that +// color will be used for the background color. +var DefaultColors map[string]string + +func init() { + DefaultColors = map[string]string{ + // Default foreground/background colors + "default": "39", + "_default_": "49", + + // Foreground colors + "black": "30", + "red": "31", + "green": "32", + "yellow": "33", + "blue": "34", + "magenta": "35", + "cyan": "36", + "light_gray": "37", + "dark_gray": "90", + "light_red": "91", + "light_green": "92", + "light_yellow": "93", + "light_blue": "94", + "light_magenta": "95", + "light_cyan": "96", + "white": "97", + + // Background colors + "_black_": "40", + "_red_": "41", + "_green_": "42", + "_yellow_": "43", + "_blue_": "44", + "_magenta_": "45", + "_cyan_": "46", + "_light_gray_": "47", + "_dark_gray_": "100", + "_light_red_": "101", + "_light_green_": "102", + "_light_yellow_": "103", + "_light_blue_": "104", + "_light_magenta_": "105", + "_light_cyan_": "106", + "_white_": "107", + + // Attributes + "bold": "1", + "dim": "2", + "underline": "4", + "blink_slow": "5", + "blink_fast": "6", + "invert": "7", + "hidden": "8", + + // Reset to reset everything to their defaults + "reset": "0", + "reset_bold": "21", + } + + def = Colorize{ + Colors: DefaultColors, + Reset: true, + } +} + +var def Colorize +var parseReRaw = `\[[a-z0-9_-]+\]` +var parseRe = regexp.MustCompile(`(?i)` + parseReRaw) +var prefixRe = regexp.MustCompile(`^(?i)(` + parseReRaw + `)+`) + +// Print is a convenience wrapper for fmt.Print with support for color codes. +// +// Print formats using the default formats for its operands and writes to +// standard output with support for color codes. Spaces are added between +// operands when neither is a string. It returns the number of bytes written +// and any write error encountered. +func Print(a string) (n int, err error) { + return fmt.Print(Color(a)) +} + +// Println is a convenience wrapper for fmt.Println with support for color +// codes. +// +// Println formats using the default formats for its operands and writes to +// standard output with support for color codes. Spaces are always added +// between operands and a newline is appended. It returns the number of bytes +// written and any write error encountered. +func Println(a string) (n int, err error) { + return fmt.Println(Color(a)) +} + +// Printf is a convenience wrapper for fmt.Printf with support for color codes. +// +// Printf formats according to a format specifier and writes to standard output +// with support for color codes. It returns the number of bytes written and any +// write error encountered. +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(Color(format), a...) +} + +// Fprint is a convenience wrapper for fmt.Fprint with support for color codes. +// +// Fprint formats using the default formats for its operands and writes to w +// with support for color codes. Spaces are added between operands when neither +// is a string. It returns the number of bytes written and any write error +// encountered. +func Fprint(w io.Writer, a string) (n int, err error) { + return fmt.Fprint(w, Color(a)) +} + +// Fprintln is a convenience wrapper for fmt.Fprintln with support for color +// codes. +// +// Fprintln formats using the default formats for its operands and writes to w +// with support for color codes. Spaces are always added between operands and a +// newline is appended. It returns the number of bytes written and any write +// error encountered. +func Fprintln(w io.Writer, a string) (n int, err error) { + return fmt.Fprintln(w, Color(a)) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf with support for color +// codes. +// +// Fprintf formats according to a format specifier and writes to w with support +// for color codes. It returns the number of bytes written and any write error +// encountered. +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, Color(format), a...) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 00000000000..a3866a291fd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-testing-interface/testing.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 00000000000..204afb42005 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 00000000000..31b42cadf8d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,108 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 00000000000..22985159044 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 00000000000..ac67205bc2e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,73 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) > lim { + current = 0 + } else { + current += uint(spaceBuf.Len()) + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + } else { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + + spaceBuf.WriteRune(char) + } else { + + wordBuf.WriteRune(char) + + if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/oklog/run/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/oklog/run/group.go b/pkg/terraform/exec/plugins/vendor/github.com/oklog/run/group.go new file mode 100644 index 00000000000..832d47dd169 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/LICENSE new file mode 100644 index 00000000000..bd899d8353d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/block.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 00000000000..5755cda2460 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,387 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (int, error) { + if len(src) == 0 { + return 0, nil + } + if di := decodeBlock(dst, src); di >= 0 { + return di, nil + } + return 0, ErrInvalidSourceShortBuffer +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// The size of hashTable must be at least 64Kb. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { + defer recoverBlock(&err) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compresssion. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 { + return 0, nil + } + if len(hashTable) < htSize { + return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize) + } + // Prove to the compiler the table has at least htSize elements. + // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. + hashTable = hashTable[:htSize] + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, anchor int + + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + for si < sn { + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) + h := blockHash(match) + h2 := blockHash(match >> 8) + + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. + ref := hashTable[h] + ref2 := hashTable[h2] + hashTable[h] = si + hashTable[h2] = si + 1 + offset := si - ref + + // If offset <= 0 we got an old entry in the hash table. + if offset <= 0 || offset >= winSize || // Out of window. + uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref = hashTable[h] + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || + uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref + hashTable[h] = si + + if offset <= 0 || offset >= winSize || + uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } + } + + // Match found. + lLen := si - anchor // Literal length. + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ + } + + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si < sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } + } + + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + 2 + anchor = si + + // Encode offset. + _ = dst[di] // Bound check elimination. + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + // Check if we can load next values. + if si >= sn { + break + } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + hashTable[h] = si - 2 + } + + if anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + +// CompressBlockHC compresses the source buffer src into the destination dst +// with max search depth (use 0 or negative value for no max). +// +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { + defer recoverBlock(&err) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compresssion. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 { + return 0, nil + } + var si int + + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + var hashTable, chainTable [winSize]int + + if depth <= 0 { + depth = winSize + } + + anchor := si + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHashHC(match) + + // Follow the chain until out of window and give the longest match. + mLen := 0 + offset := 0 + for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break + } + } + if ml < minMatch || ml <= mLen { + // Match too small (>adaptSkipLog + continue + } + + // Match found. + // Update hash/chain tables with overlapping bytes: + // si already hashed, add everything from si+1 up to the match length. + winStart := si + 1 + if ws := si + mLen - winSize; ws > winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHashHC(match) + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + si++ + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // Match length does not include minMatch. + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + anchor = si + + // Encode offset. + di += 2 + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/debug.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/debug.go new file mode 100644 index 00000000000..bc5e78d40f0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/debug.go @@ -0,0 +1,23 @@ +// +build lz4debug + +package lz4 + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +const debugFlag = true + +func debug(args ...interface{}) { + _, file, line, _ := runtime.Caller(1) + file = filepath.Base(file) + + f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) + if f[len(f)-1] != '\n' { + f += "\n" + } + fmt.Fprintf(os.Stderr, f, args[1:]...) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/debug_stub.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/debug_stub.go new file mode 100644 index 00000000000..44211ad9645 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/debug_stub.go @@ -0,0 +1,7 @@ +// +build !lz4debug + +package lz4 + +const debugFlag = false + +func debug(args ...interface{}) {} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/decode_amd64.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/decode_amd64.go new file mode 100644 index 00000000000..43cc14fbe2e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/decode_amd64.go @@ -0,0 +1,8 @@ +// +build !appengine +// +build gc +// +build !noasm + +package lz4 + +//go:noescape +func decodeBlock(dst, src []byte) int diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/decode_amd64.s b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/decode_amd64.s new file mode 100644 index 00000000000..20fef39759c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/decode_amd64.s @@ -0,0 +1,375 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// AX scratch +// BX scratch +// CX scratch +// DX token +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// func decodeBlock(dst, src []byte) int +// using 50 bytes of stack currently +TEXT ·decodeBlock(SB), NOSPLIT, $64-56 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + ADDQ SI, R9 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + +loop: + // for si < len(src) + CMPQ SI, R9 + JGE end + + // token := uint32(src[si]) + MOVBQZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVQ DX, CX + SHRQ $4, CX + + // if lit_len != 0xF + CMPQ CX, $0xF + JEQ lit_len_loop_pre + CMPQ DI, R12 + JGE lit_len_loop_pre + CMPQ SI, R13 + JGE lit_len_loop_pre + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVQ DX, CX + ANDQ $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWQZX (SI), DX + ADDQ $2, SI + + MOVQ DI, AX + SUBQ DX, AX + CMPQ AX, DI + JGT err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPQ CX, $0xF + JEQ match_len_loop_pre + CMPQ DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JLT err_short_buf + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + ADDQ $4, DI // minmatch + ADDQ CX, DI + + // shortcut complete, load next token + JMP loop + +lit_len_loop_pre: + // if lit_len > 0 + CMPQ CX, $0 + JEQ offset + CMPQ CX, $0xF + JNE copy_literal + +lit_len_loop: + // for src[si] == 0xFF + CMPB (SI), $0xFF + JNE lit_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + // lit_len += 0xFF + ADDQ $0xFF, CX + INCQ SI + JMP lit_len_loop + +lit_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + CMPQ AX, R9 + JGT err_short_buf + + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // whats a good cut off to call memmove? + CMPQ CX, $16 + JGT memmove_lit + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_lit + + // if len(src[si:]) < 16 + MOVQ R9, AX + SUBQ SI, AX + CMPQ AX, $16 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU X0, (DI) + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + MOVB DX, 48(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVB 48(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + ADDQ CX, SI + ADDQ CX, DI + + CMPQ SI, R9 + JGE end + +offset: + // CX := mLen + // free up DX to use for offset + MOVQ DX, CX + + MOVQ SI, AX + ADDQ $2, AX + CMPQ AX, R9 + JGT err_short_buf + + // offset + // DX := int(src[si]) | int(src[si+1])<<8 + MOVWQZX (SI), DX + ADDQ $2, SI + + // 0 offset is invalid + CMPQ DX, $0 + JEQ err_corrupt + + ANDB $0xF, CX + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + +match_len_loop: + // for src[si] == 0xFF + // lit_len += 0xFF + CMPB (SI), $0xFF + JNE match_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + ADDQ $0xFF, CX + INCQ SI + JMP match_len_loop + +match_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_match: + // mLen += minMatch + ADDQ $4, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + CMPQ BX, R11 + JLT err_short_buf + + // if offset + match_len < di + MOVQ BX, AX + ADDQ CX, AX + CMPQ DI, AX + JGT copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + + CMPQ CX, $0 + JGT copy_match_loop + + JMP loop + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + JMP loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + ADDQ CX, DI + JMP loop + +err_corrupt: + MOVQ $-1, ret+48(FP) + RET + +err_short_buf: + MOVQ $-2, ret+48(FP) + RET + +end: + SUBQ R11, DI + MOVQ DI, ret+48(FP) + RET diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/decode_other.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/decode_other.go new file mode 100644 index 00000000000..919888edf7d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/decode_other.go @@ -0,0 +1,98 @@ +// +build !amd64 appengine !gc noasm + +package lz4 + +func decodeBlock(dst, src []byte) (ret int) { + const hasError = -2 + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di int + for { + // Literals and match lengths (token). + b := int(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < len(src): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { + i := di - offset + end := i + 18 + if end > len(dst) { + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + end = len(dst) + } + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + case lLen == 0xF: + for src[si] == 0xFF { + lLen += 0xFF + si++ + } + lLen += int(src[si]) + si++ + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + if si >= len(src) { + return di + } + + offset := int(src[si]) | int(src[si+1])<<8 + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen := b & 0xF + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + si++ + } + mLen += int(src[si]) + si++ + } + mLen += minMatch + + // Copy the match. + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += copy(dst[di:di+mLen], expanded[:mLen]) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/errors.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/errors.go new file mode 100644 index 00000000000..1c45d1813ce --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/errors.go @@ -0,0 +1,30 @@ +package lz4 + +import ( + "errors" + "fmt" + "os" + rdebug "runtime/debug" +) + +var ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") + // ErrInvalid is returned when reading an invalid LZ4 archive. + ErrInvalid = errors.New("lz4: bad magic number") + // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. + ErrBlockDependency = errors.New("lz4: block dependency not supported") + // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. + ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + if debugFlag { + fmt.Fprintln(os.Stderr, r) + rdebug.PrintStack() + } + *e = ErrInvalidSourceShortBuffer + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go new file mode 100644 index 00000000000..7a76a6bce2b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go @@ -0,0 +1,223 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/XXH/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime1 uint32 = 2654435761 + prime2 uint32 = 2246822519 + prime3 uint32 = 3266489917 + prime4 uint32 = 668265263 + prime5 uint32 = 374761393 + + primeMask = 0xFFFFFFFF + prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 + prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v1 = prime1plus2 + xxh.v2 = prime2 + xxh.v3 = 0 + xxh.v4 = prime1minus + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + buf := xxh.buf[:16] // BCE hint. + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 + p = r + xxh.bufused = 0 + } + + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += prime5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime5 + h32 = rol11(h32) * prime1 + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// ChecksumZero returns the 32bits Hash value. +func ChecksumZero(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime5 + } else { + v1 := prime1plus2 + v2 := prime2 + v3 := uint32(0) + v4 := prime1minus + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for p < n { + h32 += uint32(input[p]) * prime5 + h32 = rol11(h32) * prime1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// Uint32Zero hashes x with seed 0. +func Uint32Zero(x uint32) uint32 { + h := prime5 + 4 + x*prime3 + h = rol17(h) * prime4 + h ^= h >> 15 + h *= prime2 + h ^= h >> 13 + h *= prime3 + h ^= h >> 16 + return h +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/lz4.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 00000000000..cdbf9611f48 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,66 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +// +package lz4 + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + compressedBlockFlag = 1 << 31 + compressedBlockMask = compressedBlockFlag - 1 + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog + + mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +const ( + blockSize64K = 64 << 10 + blockSize256K = 256 << 10 + blockSize1M = 1 << 20 + blockSize4M = 4 << 20 +) + +var ( + bsMapID = map[byte]int{4: blockSize64K, 5: blockSize256K, 6: blockSize1M, 7: blockSize4M} + bsMapValue = map[int]byte{blockSize64K: 4, blockSize256K: 5, blockSize1M: 6, blockSize4M: 7} +) + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition +// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller responsibility to check them if necessary. +type Header struct { + BlockChecksum bool // Compressed blocks checksum flag. + NoChecksum bool // Frame checksum flag. + BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // Frame total size. It is _not_ computed by the Writer. + CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). + done bool // Header processed flag (Read or Write and checked). +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/lz4_go1.10.go new file mode 100644 index 00000000000..9a0fb00709d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/lz4_go1.10.go @@ -0,0 +1,29 @@ +//+build go1.10 + +package lz4 + +import ( + "fmt" + "strings" +) + +func (h Header) String() string { + var s strings.Builder + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go new file mode 100644 index 00000000000..12c761a2e7f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go @@ -0,0 +1,29 @@ +//+build !go1.10 + +package lz4 + +import ( + "bytes" + "fmt" +) + +func (h Header) String() string { + var s bytes.Buffer + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/reader.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 00000000000..126b792e712 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,335 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + buf [8]byte // Scrap buffer. + pos int64 // Current position in src. + src io.Reader // Source. + zdata []byte // Compressed data. + data []byte // Uncompressed data. + idx int // Index of unread bytes into data. + checksum xxh32.XXHZero // Frame hash. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + r := &Reader{src: src} + return r +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + buf := z.buf[:] + for { + magic, err := z.readUint32() + if err != nil { + z.pos += 4 + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + if magic == frameMagic { + break + } + if magic>>8 != frameSkipMagic>>8 { + return ErrInvalid + } + skipSize, err := z.readUint32() + if err != nil { + return err + } + z.pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + if err != nil { + return err + } + z.pos += m + } + + // Header. + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.pos += 8 + + b := buf[0] + if v := b >> 6; v != Version { + return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) + } + if b>>5&1 == 0 { + return ErrBlockDependency + } + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + + bmsID := buf[1] >> 4 & 0x7 + bSize, ok := bsMapID[bmsID] + if !ok { + return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) + } + z.BlockMaxSize = bSize + + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + if debugFlag { + debug("header block max size id=%d size=%d", bmsID, bSize) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = len(z.data) + + _, _ = z.checksum.Write(buf[0:2]) + + if frameSize { + buf := buf[:8] + if _, err := io.ReadFull(z.src, buf); err != nil { + return err + } + z.Size = binary.LittleEndian.Uint64(buf) + z.pos += 8 + _, _ = z.checksum.Write(buf) + } + + // Header checksum. + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) + } + + z.Header.done = true + if debugFlag { + debug("header read: %v", z.Header) + } + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +func (z *Reader) Read(buf []byte) (int, error) { + if debugFlag { + debug("Read buf len=%d", len(buf)) + } + if !z.Header.done { + if err := z.readHeader(true); err != nil { + return 0, err + } + if debugFlag { + debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", + len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) + } + } + + if len(buf) == 0 { + return 0, nil + } + + if z.idx == len(z.data) { + // No data ready for reading, process the next block. + if debugFlag { + debug("reading block from writer") + } + // Reset uncompressed buffer + z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] + + // Block length: 0 = end of frame, highest bit set: uncompressed. + bLen, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if bLen == 0 { + // End of frame reached. + if !z.NoChecksum { + // Validate the frame checksum. + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + if debugFlag { + debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) + } + z.pos += 4 + if h := z.checksum.Sum32(); checksum != h { + return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) + } + } + + // Get ready for the next concatenated frame and keep the position. + pos := z.pos + z.Reset(z.src) + z.pos = pos + + // Since multiple frames can be concatenated, check for more. + return 0, z.readHeader(false) + } + + if debugFlag { + debug("raw block size %d", bLen) + } + if bLen&compressedBlockFlag > 0 { + // Uncompressed block. + bLen &= compressedBlockMask + if debugFlag { + debug("uncompressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + z.data = z.data[:bLen] + if _, err := io.ReadFull(z.src, z.data); err != nil { + return 0, err + } + z.pos += int64(bLen) + if z.OnBlockDone != nil { + z.OnBlockDone(int(bLen)) + } + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(z.data); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + } else { + // Compressed block. + if debugFlag { + debug("compressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + zdata := z.zdata[:bLen] + if _, err := io.ReadFull(z.src, zdata); err != nil { + return 0, err + } + z.pos += int64(bLen) + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(zdata); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + n, err := UncompressBlock(zdata, z.data) + if err != nil { + return 0, err + } + z.data = z.data[:n] + if z.OnBlockDone != nil { + z.OnBlockDone(n) + } + } + + if !z.NoChecksum { + _, _ = z.checksum.Write(z.data) + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + } + z.idx = 0 + } + + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil + } + + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 + + n := copy(buf, z.data[z.idx:]) + z.idx += n + z.dpos += int64(n) + if debugFlag { + debug("copied %d bytes to input", n) + } + + return n, nil +} + +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *Reader) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek + } + z.skip += offset + return z.dpos + z.skip, nil +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.pos = 0 + z.src = r + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 + z.checksum.Reset() +} + +// readUint32 reads an uint32 into the supplied buffer. +// The idea is to make use of the already allocated buffers avoiding additional allocations. +func (z *Reader) readUint32() (uint32, error) { + buf := z.buf[:4] + _, err := io.ReadFull(z.src, buf) + x := binary.LittleEndian.Uint32(buf) + return x, err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/writer.go b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 00000000000..2cc8d95ca7d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,275 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + // Handler called when a block has been successfully written out. + // It provides the number of bytes written. + OnBlockDone func(size int) + + buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + dst io.Writer // Destination. + checksum xxh32.XXHZero // Frame checksum. + zdata []byte // Compressed data. + data []byte // Data to be compressed. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + return &Writer{dst: dst} +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set. + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = bsMapID[7] + } + // The only option that needs to be validated. + bSize := z.Header.BlockMaxSize + bSizeID, ok := bsMapValue[bSize] + if !ok { + return fmt.Errorf("lz4: invalid block max size: %d", bSize) + } + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if cap(z.zdata) < bSize { + // Only allocate if there is not enough capacity. + // Allocate both buffers at once. + z.zdata = make([]byte, 2*bSize) + } + z.data = z.zdata[:bSize] // Uncompressed buffer is the first half. + z.zdata = z.zdata[:cap(z.zdata)][bSize:] // Compressed buffer is the second half. + z.idx = 0 + + // Size is optional. + buf := z.buf[:] + + // Set the fixed size data: magic number, block max size and flags. + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + flg |= 1 << 5 // No block dependency. + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + buf[4] = flg + buf[5] = bSizeID << 4 + + // Current buffer size: magic(4) + flags(1) + block max size (1). + n := 6 + // Optional items. + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + + // The header checksum includes the flags, block max size and optional Size. + buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) + z.checksum.Reset() + + // Header ready, write it out. + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + if debugFlag { + debug("wrote header %v", z.Header) + } + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +func (z *Writer) Write(buf []byte) (int, error) { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return 0, err + } + } + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) + } + + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) + } + + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") + } + return n, nil + } + + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err + } + z.idx = 0 + } + + return n, nil +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(data []byte) error { + if !z.NoChecksum { + z.checksum.Write(data) + } + + // The compressed block size cannot exceed the input's. + var zn int + var err error + + if level := z.Header.CompressionLevel; level != 0 { + zn, err = CompressBlockHC(data, z.zdata, level) + } else { + zn, err = CompressBlock(data, z.zdata, z.hashtable[:]) + } + + var zdata []byte + var bLen uint32 + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + if err == nil && zn > 0 && zn < len(data) { + // Compressible and compressed size smaller than uncompressed: ok! + bLen = uint32(zn) + zdata = z.zdata[:zn] + } else { + // Uncompressed block. + bLen = uint32(len(data)) | compressedBlockFlag + zdata = data + } + if debugFlag { + debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) + } + + // Write the block. + if err := z.writeUint32(bLen); err != nil { + return err + } + written, err := z.dst.Write(zdata) + if err != nil { + return err + } + if h := z.OnBlockDone; h != nil { + h(written) + } + + if !z.BlockChecksum { + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + return nil + } + checksum := xxh32.ChecksumZero(zdata) + if debugFlag { + debug("block checksum %x", checksum) + defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }() + } + return z.writeUint32(checksum) +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +func (z *Writer) Flush() error { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { + return nil + } + + if err := z.compressBlock(z.data[:z.idx]); err != nil { + return err + } + z.idx = 0 + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + if err := z.Flush(); err != nil { + return err + } + + if debugFlag { + debug("writing last empty block") + } + if err := z.writeUint32(0); err != nil { + return err + } + if z.NoChecksum { + return nil + } + checksum := z.checksum.Sum32() + if debugFlag { + debug("stream checksum %x", checksum) + } + return z.writeUint32(checksum) +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + z.Header = Header{} + z.dst = w + z.checksum.Reset() + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 +} + +// writeUint32 writes a uint32 to the underlying writer. +func (z *Writer) writeUint32(x uint32) error { + buf := z.buf[:4] + binary.LittleEndian.PutUint32(buf, x) + _, err := z.dst.Write(buf) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/LICENSE.txt b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/LICENSE.txt new file mode 100644 index 00000000000..16249b4a1e0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2017 Eyal Posener + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/args.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/args.go new file mode 100644 index 00000000000..3340285e1c7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/args.go @@ -0,0 +1,114 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" + "unicode" +) + +// Args describes command line arguments +type Args struct { + // All lists of all arguments in command line (not including the command itself) + All []string + // Completed lists of all completed arguments in command line, + // If the last one is still being typed - no space after it, + // it won't appear in this list of arguments. + Completed []string + // Last argument in command line, the one being typed, if the last + // character in the command line is a space, this argument will be empty, + // otherwise this would be the last word. + Last string + // LastCompleted is the last argument that was fully typed. + // If the last character in the command line is space, this would be the + // last word, otherwise, it would be the word before that. + LastCompleted string +} + +// Directory gives the directory of the current written +// last argument if it represents a file name being written. +// in case that it is not, we fall back to the current directory. +// +// Deprecated. +func (a Args) Directory() string { + if info, err := os.Stat(a.Last); err == nil && info.IsDir() { + return fixPathForm(a.Last, a.Last) + } + dir := filepath.Dir(a.Last) + if info, err := os.Stat(dir); err != nil || !info.IsDir() { + return "./" + } + return fixPathForm(a.Last, dir) +} + +func newArgs(line string) Args { + var ( + all []string + completed []string + ) + parts := splitFields(line) + if len(parts) > 0 { + all = parts[1:] + completed = removeLast(parts[1:]) + } + return Args{ + All: all, + Completed: completed, + Last: last(parts), + LastCompleted: last(completed), + } +} + +// splitFields returns a list of fields from the given command line. +// If the last character is space, it appends an empty field in the end +// indicating that the field before it was completed. +// If the last field is of the form "a=b", it splits it to two fields: "a", "b", +// So it can be completed. +func splitFields(line string) []string { + parts := strings.Fields(line) + + // Add empty field if the last field was completed. + if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { + parts = append(parts, "") + } + + // Treat the last field if it is of the form "a=b" + parts = splitLastEqual(parts) + return parts +} + +func splitLastEqual(line []string) []string { + if len(line) == 0 { + return line + } + parts := strings.Split(line[len(line)-1], "=") + return append(line[:len(line)-1], parts...) +} + +// from returns a copy of Args of all arguments after the i'th argument. +func (a Args) from(i int) Args { + if i >= len(a.All) { + i = len(a.All) - 1 + } + a.All = a.All[i+1:] + + if i >= len(a.Completed) { + i = len(a.Completed) - 1 + } + a.Completed = a.Completed[i+1:] + return a +} + +func removeLast(a []string) []string { + if len(a) > 0 { + return a[:len(a)-1] + } + return a +} + +func last(args []string) string { + if len(args) == 0 { + return "" + } + return args[len(args)-1] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/cmd.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/cmd.go new file mode 100644 index 00000000000..b99fe529011 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/cmd.go @@ -0,0 +1,128 @@ +// Package cmd used for command line options for the complete tool +package cmd + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + + "github.com/posener/complete/cmd/install" +) + +// CLI for command line +type CLI struct { + Name string + InstallName string + UninstallName string + + install bool + uninstall bool + yes bool +} + +const ( + defaultInstallName = "install" + defaultUninstallName = "uninstall" +) + +// Run is used when running complete in command line mode. +// this is used when the complete is not completing words, but to +// install it or uninstall it. +func (f *CLI) Run() bool { + err := f.validate() + if err != nil { + os.Stderr.WriteString(err.Error() + "\n") + os.Exit(1) + } + + switch { + case f.install: + f.prompt() + err = install.Install(f.Name) + case f.uninstall: + f.prompt() + err = install.Uninstall(f.Name) + default: + // non of the action flags matched, + // returning false should make the real program execute + return false + } + + if err != nil { + fmt.Printf("%s failed! %s\n", f.action(), err) + os.Exit(3) + } + fmt.Println("Done!") + return true +} + +// prompt use for approval +// exit if approval was not given +func (f *CLI) prompt() { + defer fmt.Println(f.action() + "ing...") + if f.yes { + return + } + fmt.Printf("%s completion for %s? ", f.action(), f.Name) + var answer string + fmt.Scanln(&answer) + + switch strings.ToLower(answer) { + case "y", "yes": + return + default: + fmt.Println("Cancelling...") + os.Exit(1) + } +} + +// AddFlags adds the CLI flags to the flag set. +// If flags is nil, the default command line flags will be taken. +// Pass non-empty strings as installName and uninstallName to override the default +// flag names. +func (f *CLI) AddFlags(flags *flag.FlagSet) { + if flags == nil { + flags = flag.CommandLine + } + + if f.InstallName == "" { + f.InstallName = defaultInstallName + } + if f.UninstallName == "" { + f.UninstallName = defaultUninstallName + } + + if flags.Lookup(f.InstallName) == nil { + flags.BoolVar(&f.install, f.InstallName, false, + fmt.Sprintf("Install completion for %s command", f.Name)) + } + if flags.Lookup(f.UninstallName) == nil { + flags.BoolVar(&f.uninstall, f.UninstallName, false, + fmt.Sprintf("Uninstall completion for %s command", f.Name)) + } + if flags.Lookup("y") == nil { + flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes' when installing completion") + } +} + +// validate the CLI +func (f *CLI) validate() error { + if f.install && f.uninstall { + return errors.New("Install and uninstall are mutually exclusive") + } + return nil +} + +// action name according to the CLI values. +func (f *CLI) action() string { + switch { + case f.install: + return "Install" + case f.uninstall: + return "Uninstall" + default: + return "unknown" + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/bash.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/bash.go new file mode 100644 index 00000000000..17c64de1362 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/bash.go @@ -0,0 +1,37 @@ +package install + +import "fmt" + +// (un)install in bash +// basically adds/remove from .bashrc: +// +// complete -C +type bash struct { + rc string +} + +func (b bash) IsInstalled(cmd, bin string) bool { + completeCmd := b.cmd(cmd, bin) + return lineInFile(b.rc, completeCmd) +} + +func (b bash) Install(cmd, bin string) error { + if b.IsInstalled(cmd, bin) { + return fmt.Errorf("already installed in %s", b.rc) + } + completeCmd := b.cmd(cmd, bin) + return appendToFile(b.rc, completeCmd) +} + +func (b bash) Uninstall(cmd, bin string) error { + if !b.IsInstalled(cmd, bin) { + return fmt.Errorf("does not installed in %s", b.rc) + } + + completeCmd := b.cmd(cmd, bin) + return removeFromFile(b.rc, completeCmd) +} + +func (bash) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -C %s %s", bin, cmd) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/fish.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/fish.go new file mode 100644 index 00000000000..2b64bfc832b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/fish.go @@ -0,0 +1,69 @@ +package install + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "text/template" +) + +// (un)install in fish + +type fish struct { + configDir string +} + +func (f fish) IsInstalled(cmd, bin string) bool { + completionFile := f.getCompletionFilePath(cmd) + if _, err := os.Stat(completionFile); err == nil { + return true + } + return false +} + +func (f fish) Install(cmd, bin string) error { + if f.IsInstalled(cmd, bin) { + return fmt.Errorf("already installed at %s", f.getCompletionFilePath(cmd)) + } + + completionFile := f.getCompletionFilePath(cmd) + completeCmd, err := f.cmd(cmd, bin) + if err != nil { + return err + } + + return createFile(completionFile, completeCmd) +} + +func (f fish) Uninstall(cmd, bin string) error { + if !f.IsInstalled(cmd, bin) { + return fmt.Errorf("does not installed in %s", f.configDir) + } + + completionFile := f.getCompletionFilePath(cmd) + return os.Remove(completionFile) +} + +func (f fish) getCompletionFilePath(cmd string) string { + return filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) +} + +func (f fish) cmd(cmd, bin string) (string, error) { + var buf bytes.Buffer + params := struct{ Cmd, Bin string }{cmd, bin} + tmpl := template.Must(template.New("cmd").Parse(` +function __complete_{{.Cmd}} + set -lx COMP_LINE (commandline -cp) + test -z (commandline -ct) + and set COMP_LINE "$COMP_LINE " + {{.Bin}} +end +complete -f -c {{.Cmd}} -a "(__complete_{{.Cmd}})" +`)) + err := tmpl.Execute(&buf, params) + if err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/install.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/install.go new file mode 100644 index 00000000000..884c23f5b46 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/install.go @@ -0,0 +1,148 @@ +package install + +import ( + "errors" + "os" + "os/user" + "path/filepath" + "runtime" + + "github.com/hashicorp/go-multierror" +) + +type installer interface { + IsInstalled(cmd, bin string) bool + Install(cmd, bin string) error + Uninstall(cmd, bin string) error +} + +// Install complete command given: +// cmd: is the command name +func Install(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to install") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Install(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +// IsInstalled returns true if the completion +// for the given cmd is installed. +func IsInstalled(cmd string) bool { + bin, err := getBinaryPath() + if err != nil { + return false + } + + for _, i := range installers() { + installed := i.IsInstalled(cmd, bin) + if installed { + return true + } + } + + return false +} + +// Uninstall complete command given: +// cmd: is the command name +func Uninstall(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to uninstall") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Uninstall(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +func installers() (i []installer) { + // The list of bash config files candidates where it is + // possible to install the completion command. + var bashConfFiles []string + switch runtime.GOOS { + case "darwin": + bashConfFiles = []string{".bash_profile"} + default: + bashConfFiles = []string{".bashrc", ".bash_profile", ".bash_login", ".profile"} + } + for _, rc := range bashConfFiles { + if f := rcFile(rc); f != "" { + i = append(i, bash{f}) + break + } + } + if f := rcFile(".zshrc"); f != "" { + i = append(i, zsh{f}) + } + if d := fishConfigDir(); d != "" { + i = append(i, fish{d}) + } + return +} + +func fishConfigDir() string { + configDir := filepath.Join(getConfigHomePath(), "fish") + if configDir == "" { + return "" + } + if info, err := os.Stat(configDir); err != nil || !info.IsDir() { + return "" + } + return configDir +} + +func getConfigHomePath() string { + u, err := user.Current() + if err != nil { + return "" + } + + configHome := os.Getenv("XDG_CONFIG_HOME") + if configHome == "" { + return filepath.Join(u.HomeDir, ".config") + } + return configHome +} + +func getBinaryPath() (string, error) { + bin, err := os.Executable() + if err != nil { + return "", err + } + return filepath.Abs(bin) +} + +func rcFile(name string) string { + u, err := user.Current() + if err != nil { + return "" + } + path := filepath.Join(u.HomeDir, name) + if _, err := os.Stat(path); err != nil { + return "" + } + return path +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/utils.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/utils.go new file mode 100644 index 00000000000..d34ac8cae8b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/utils.go @@ -0,0 +1,140 @@ +package install + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +func lineInFile(name string, lookFor string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + r := bufio.NewReader(f) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + return false + } + if err != nil { + return false + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + if string(line) == lookFor { + return true + } + prefix = prefix[:0] + } +} + +func createFile(name string, content string) error { + // make sure file directory exists + if err := os.MkdirAll(filepath.Dir(name), 0775); err != nil { + return err + } + + // create the file + f, err := os.Create(name) + if err != nil { + return err + } + defer f.Close() + + // write file content + _, err = f.WriteString(fmt.Sprintf("%s\n", content)) + return err +} + +func appendToFile(name string, content string) error { + f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) + return err +} + +func removeFromFile(name string, content string) error { + backup := name + ".bck" + err := copyFile(name, backup) + if err != nil { + return err + } + temp, err := removeContentToTempFile(name, content) + if err != nil { + return err + } + + err = copyFile(temp, name) + if err != nil { + return err + } + + return os.Remove(backup) +} + +func removeContentToTempFile(name, content string) (string, error) { + rf, err := os.Open(name) + if err != nil { + return "", err + } + defer rf.Close() + wf, err := ioutil.TempFile("/tmp", "complete-") + if err != nil { + return "", err + } + defer wf.Close() + + r := bufio.NewReader(rf) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return "", err + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + str := string(line) + if str == content { + continue + } + _, err = wf.WriteString(str + "\n") + if err != nil { + return "", err + } + prefix = prefix[:0] + } + return wf.Name(), nil +} + +func copyFile(src string, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/zsh.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/zsh.go new file mode 100644 index 00000000000..29950ab171f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/cmd/install/zsh.go @@ -0,0 +1,44 @@ +package install + +import "fmt" + +// (un)install in zsh +// basically adds/remove from .zshrc: +// +// autoload -U +X bashcompinit && bashcompinit" +// complete -C +type zsh struct { + rc string +} + +func (z zsh) IsInstalled(cmd, bin string) bool { + completeCmd := z.cmd(cmd, bin) + return lineInFile(z.rc, completeCmd) +} + +func (z zsh) Install(cmd, bin string) error { + if z.IsInstalled(cmd, bin) { + return fmt.Errorf("already installed in %s", z.rc) + } + + completeCmd := z.cmd(cmd, bin) + bashCompInit := "autoload -U +X bashcompinit && bashcompinit" + if !lineInFile(z.rc, bashCompInit) { + completeCmd = bashCompInit + "\n" + completeCmd + } + + return appendToFile(z.rc, completeCmd) +} + +func (z zsh) Uninstall(cmd, bin string) error { + if !z.IsInstalled(cmd, bin) { + return fmt.Errorf("does not installed in %s", z.rc) + } + + completeCmd := z.cmd(cmd, bin) + return removeFromFile(z.rc, completeCmd) +} + +func (zsh) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/command.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/command.go new file mode 100644 index 00000000000..82d37d529b1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/command.go @@ -0,0 +1,111 @@ +package complete + +// Command represents a command line +// It holds the data that enables auto completion of command line +// Command can also be a sub command. +type Command struct { + // Sub is map of sub commands of the current command + // The key refer to the sub command name, and the value is it's + // Command descriptive struct. + Sub Commands + + // Flags is a map of flags that the command accepts. + // The key is the flag name, and the value is it's predictions. + Flags Flags + + // GlobalFlags is a map of flags that the command accepts. + // Global flags that can appear also after a sub command. + GlobalFlags Flags + + // Args are extra arguments that the command accepts, those who are + // given without any flag before. + Args Predictor +} + +// Predict returns all possible predictions for args according to the command struct +func (c *Command) Predict(a Args) []string { + options, _ := c.predict(a) + return options +} + +// Commands is the type of Sub member, it maps a command name to a command struct +type Commands map[string]Command + +// Predict completion of sub command names names according to command line arguments +func (c Commands) Predict(a Args) (prediction []string) { + for sub := range c { + prediction = append(prediction, sub) + } + return +} + +// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. +type Flags map[string]Predictor + +// Predict completion of flags names according to command line arguments +func (f Flags) Predict(a Args) (prediction []string) { + for flag := range f { + // If the flag starts with a hyphen, we avoid emitting the prediction + // unless the last typed arg contains a hyphen as well. + flagHyphenStart := len(flag) != 0 && flag[0] == '-' + lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' + if flagHyphenStart && !lastHyphenStart { + continue + } + prediction = append(prediction, flag) + } + return +} + +// predict options +// only is set to true if no more options are allowed to be returned +// those are in cases of special flag that has specific completion arguments, +// and other flags or sub commands can't come after it. +func (c *Command) predict(a Args) (options []string, only bool) { + + // search sub commands for predictions first + subCommandFound := false + for i, arg := range a.Completed { + if cmd, ok := c.Sub[arg]; ok { + subCommandFound = true + + // recursive call for sub command + options, only = cmd.predict(a.from(i)) + if only { + return + } + + // We matched so stop searching. Continuing to search can accidentally + // match a subcommand with current set of commands, see issue #46. + break + } + } + + // if last completed word is a global flag that we need to complete + if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to global flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.GlobalFlags.Predict(a)...) + + // if a sub command was entered, we won't add the parent command + // completions and we return here. + if subCommandFound { + return + } + + // if last completed word is a command flag that we need to complete + if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.Sub.Predict(a)...) + options = append(options, c.Flags.Predict(a)...) + if c.Args != nil { + options = append(options, c.Args.Predict(a)...) + } + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/complete.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/complete.go new file mode 100644 index 00000000000..423cbec6c17 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/complete.go @@ -0,0 +1,104 @@ +package complete + +import ( + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/posener/complete/cmd" +) + +const ( + envLine = "COMP_LINE" + envPoint = "COMP_POINT" + envDebug = "COMP_DEBUG" +) + +// Complete structs define completion for a command with CLI options +type Complete struct { + Command Command + cmd.CLI + Out io.Writer +} + +// New creates a new complete command. +// name is the name of command we want to auto complete. +// IMPORTANT: it must be the same name - if the auto complete +// completes the 'go' command, name must be equal to "go". +// command is the struct of the command completion. +func New(name string, command Command) *Complete { + return &Complete{ + Command: command, + CLI: cmd.CLI{Name: name}, + Out: os.Stdout, + } +} + +// Run runs the completion and add installation flags beforehand. +// The flags are added to the main flag CommandLine variable. +func (c *Complete) Run() bool { + c.AddFlags(nil) + flag.Parse() + return c.Complete() +} + +// Complete a command from completion line in environment variable, +// and print out the complete options. +// returns success if the completion ran or if the cli matched +// any of the given flags, false otherwise +// For installation: it assumes that flags were added and parsed before +// it was called. +func (c *Complete) Complete() bool { + line, point, ok := getEnv() + if !ok { + // make sure flags parsed, + // in case they were not added in the main program + return c.CLI.Run() + } + + if point >= 0 && point < len(line) { + line = line[:point] + } + + Log("Completing phrase: %s", line) + a := newArgs(line) + Log("Completing last field: %s", a.Last) + options := c.Command.Predict(a) + Log("Options: %s", options) + + // filter only options that match the last argument + matches := []string{} + for _, option := range options { + if strings.HasPrefix(option, a.Last) { + matches = append(matches, option) + } + } + Log("Matches: %s", matches) + c.output(matches) + return true +} + +func getEnv() (line string, point int, ok bool) { + line = os.Getenv(envLine) + if line == "" { + return + } + point, err := strconv.Atoi(os.Getenv(envPoint)) + if err != nil { + // If failed parsing point for some reason, set it to point + // on the end of the line. + Log("Failed parsing point %s: %v", os.Getenv(envPoint), err) + point = len(line) + } + return line, point, true +} + +func (c *Complete) output(options []string) { + // stdout of program defines the complete options + for _, option := range options { + fmt.Fprintln(c.Out, option) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/doc.go new file mode 100644 index 00000000000..0ae09a1b74c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/doc.go @@ -0,0 +1,110 @@ +/* +Package complete provides a tool for bash writing bash completion in go, and bash completion for the go command line. + +Writing bash completion scripts is a hard work. This package provides an easy way +to create bash completion scripts for any command, and also an easy way to install/uninstall +the completion of the command. + +Go Command Bash Completion + +In ./cmd/gocomplete there is an example for bash completion for the `go` command line. + +This is an example that uses the `complete` package on the `go` command - the `complete` package +can also be used to implement any completions, see #usage. + +Install + +1. Type in your shell: + + go get -u github.com/posener/complete/gocomplete + gocomplete -install + +2. Restart your shell + +Uninstall by `gocomplete -uninstall` + +Features + +- Complete `go` command, including sub commands and all flags. +- Complete packages names or `.go` files when necessary. +- Complete test names after `-run` flag. + +Complete package + +Supported shells: + +- [x] bash +- [x] zsh +- [x] fish + +Usage + +Assuming you have program called `run` and you want to have bash completion +for it, meaning, if you type `run` then space, then press the `Tab` key, +the shell will suggest relevant complete options. + +In that case, we will create a program called `runcomplete`, a go program, +with a `func main()` and so, that will make the completion of the `run` +program. Once the `runcomplete` will be in a binary form, we could +`runcomplete -install` and that will add to our shell all the bash completion +options for `run`. + +So here it is: + + import "github.com/posener/complete" + + func main() { + + // create a Command object, that represents the command we want + // to complete. + run := complete.Command{ + + // Sub defines a list of sub commands of the program, + // this is recursive, since every command is of type command also. + Sub: complete.Commands{ + + // add a build sub command + "build": complete.Command { + + // define flags of the build sub command + Flags: complete.Flags{ + // build sub command has a flag '-cpus', which + // expects number of cpus after it. in that case + // anything could complete this flag. + "-cpus": complete.PredictAnything, + }, + }, + }, + + // define flags of the 'run' main command + Flags: complete.Flags{ + // a flag -o, which expects a file ending with .out after + // it, the tab completion will auto complete for files matching + // the given pattern. + "-o": complete.PredictFiles("*.out"), + }, + + // define global flags of the 'run' main command + // those will show up also when a sub command was entered in the + // command line + GlobalFlags: complete.Flags{ + + // a flag '-h' which does not expects anything after it + "-h": complete.PredictNothing, + }, + } + + // run the command completion, as part of the main() function. + // this triggers the autocompletion when needed. + // name must be exactly as the binary that we want to complete. + complete.New("run", run).Run() + } + +Self completing program + +In case that the program that we want to complete is written in go we +can make it self completing. +Here is an example: ./example/self/main.go . + +*/ +package complete diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/log.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/log.go new file mode 100644 index 00000000000..c3029556e50 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/log.go @@ -0,0 +1,22 @@ +package complete + +import ( + "io/ioutil" + "log" + "os" +) + +// Log is used for debugging purposes +// since complete is running on tab completion, it is nice to +// have logs to the stderr (when writing your own completer) +// to write logs, set the COMP_DEBUG environment variable and +// use complete.Log in the complete program +var Log = getLogger() + +func getLogger() func(format string, args ...interface{}) { + var logfile = ioutil.Discard + if os.Getenv(envDebug) != "" { + logfile = os.Stderr + } + return log.New(logfile, "complete ", log.Flags()).Printf +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/predict.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/predict.go new file mode 100644 index 00000000000..820706325b3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/predict.go @@ -0,0 +1,41 @@ +package complete + +// Predictor implements a predict method, in which given +// command line arguments returns a list of options it predicts. +type Predictor interface { + Predict(Args) []string +} + +// PredictOr unions two predicate functions, so that the result predicate +// returns the union of their predication +func PredictOr(predictors ...Predictor) Predictor { + return PredictFunc(func(a Args) (prediction []string) { + for _, p := range predictors { + if p == nil { + continue + } + prediction = append(prediction, p.Predict(a)...) + } + return + }) +} + +// PredictFunc determines what terms can follow a command or a flag +// It is used for auto completion, given last - the last word in the already +// in the command line, what words can complete it. +type PredictFunc func(Args) []string + +// Predict invokes the predict function and implements the Predictor interface +func (p PredictFunc) Predict(a Args) []string { + if p == nil { + return nil + } + return p(a) +} + +// PredictNothing does not expect anything after. +var PredictNothing Predictor + +// PredictAnything expects something, but nothing particular, such as a number +// or arbitrary name. +var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/predict_files.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/predict_files.go new file mode 100644 index 00000000000..25ae2d51440 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/predict_files.go @@ -0,0 +1,174 @@ +package complete + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +// PredictDirs will search for directories in the given started to be typed +// path, if no path was started to be typed, it will complete to directories +// in the current working directory. +func PredictDirs(pattern string) Predictor { + return files(pattern, false) +} + +// PredictFiles will search for files matching the given pattern in the started to +// be typed path, if no path was started to be typed, it will complete to files that +// match the pattern in the current working directory. +// To match any file, use "*" as pattern. To match go files use "*.go", and so on. +func PredictFiles(pattern string) Predictor { + return files(pattern, true) +} + +func files(pattern string, allowFiles bool) PredictFunc { + + // search for files according to arguments, + // if only one directory has matched the result, search recursively into + // this directory to give more results. + return func(a Args) (prediction []string) { + prediction = predictFiles(a, pattern, allowFiles) + + // if the number of prediction is not 1, we either have many results or + // have no results, so we return it. + if len(prediction) != 1 { + return + } + + // only try deeper, if the one item is a directory + if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { + return + } + + a.Last = prediction[0] + return predictFiles(a, pattern, allowFiles) + } +} + +func predictFiles(a Args, pattern string, allowFiles bool) []string { + if strings.HasSuffix(a.Last, "/..") { + return nil + } + + dir := directory(a.Last) + files := listFiles(dir, pattern, allowFiles) + + // add dir if match + files = append(files, dir) + + return PredictFilesSet(files).Predict(a) +} + +// directory gives the directory of the given partial path +// in case that it is not, we fall back to the current directory. +func directory(path string) string { + if info, err := os.Stat(path); err == nil && info.IsDir() { + return fixPathForm(path, path) + } + dir := filepath.Dir(path) + if info, err := os.Stat(dir); err == nil && info.IsDir() { + return fixPathForm(path, dir) + } + return "./" +} + +// PredictFilesSet predict according to file rules to a given set of file names +func PredictFilesSet(files []string) PredictFunc { + return func(a Args) (prediction []string) { + // add all matching files to prediction + for _, f := range files { + f = fixPathForm(a.Last, f) + + // test matching of file to the argument + if matchFile(f, a.Last) { + prediction = append(prediction, f) + } + } + return + } +} + +func listFiles(dir, pattern string, allowFiles bool) []string { + // set of all file names + m := map[string]bool{} + + // list files + if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { + for _, f := range files { + if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { + m[f] = true + } + } + } + + // list directories + if dirs, err := ioutil.ReadDir(dir); err == nil { + for _, d := range dirs { + if d.IsDir() { + m[filepath.Join(dir, d.Name())] = true + } + } + } + + list := make([]string, 0, len(m)) + for k := range m { + list = append(list, k) + } + return list +} + +// MatchFile returns true if prefix can match the file +func matchFile(file, prefix string) bool { + // special case for current directory completion + if file == "./" && (prefix == "." || prefix == "") { + return true + } + if prefix == "." && strings.HasPrefix(file, ".") { + return true + } + + file = strings.TrimPrefix(file, "./") + prefix = strings.TrimPrefix(prefix, "./") + + return strings.HasPrefix(file, prefix) +} + +// fixPathForm changes a file name to a relative name +func fixPathForm(last string, file string) string { + // get wording directory for relative name + workDir, err := os.Getwd() + if err != nil { + return file + } + + abs, err := filepath.Abs(file) + if err != nil { + return file + } + + // if last is absolute, return path as absolute + if filepath.IsAbs(last) { + return fixDirPath(abs) + } + + rel, err := filepath.Rel(workDir, abs) + if err != nil { + return file + } + + // fix ./ prefix of path + if rel != "." && strings.HasPrefix(last, ".") { + rel = "./" + rel + } + + return fixDirPath(rel) +} + +func fixDirPath(path string) string { + info, err := os.Stat(path) + if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { + path += "/" + } + return path +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/predict_set.go b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/predict_set.go new file mode 100644 index 00000000000..fa4a34ae46b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/posener/complete/predict_set.go @@ -0,0 +1,12 @@ +package complete + +// PredictSet expects specific set of terms, given in the options argument. +func PredictSet(options ...string) Predictor { + return predictSet(options) +} + +type predictSet []string + +func (p predictSet) Predict(a Args) []string { + return p +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/LICENSE.txt b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 00000000000..298f0e2665e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/afero.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 00000000000..f5b5e127cd6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,108 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + //Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/basepath.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 00000000000..616ff8ff74c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,180 @@ +package afero + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +var _ Lstater = (*BasePathFs)(nil) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +type BasePathFile struct { + File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/cacheOnReadFs.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 00000000000..29a26c67dd5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,290 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{Base: bfi, Layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{Base: bfh, Layer: lfh}, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/const_bsds.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 00000000000..5728243d962 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/const_win_unix.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 00000000000..968fc2783e5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,25 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/copyOnWriteFs.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 00000000000..e8108a851e1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,293 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +var _ Lstater = (*CopyOnWriteFs)(nil) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes() and Chmod()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + isNotExist := u.isNotExist(err) + if isNotExist { + return u.base.Stat(name) + } + return nil, err + } + return fi, nil +} + +func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + llayer, ok1 := u.layer.(Lstater) + lbase, ok2 := u.base.(Lstater) + + if ok1 { + fi, b, err := llayer.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + + if !u.isNotExist(err) { + return nil, b, err + } + } + + if ok2 { + fi, b, err := lbase.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + if !u.isNotExist(err) { + return nil, b, err + } + } + + fi, err := u.Stat(name) + + return fi, false, err +} + +func (u *CopyOnWriteFs) isNotExist(err error) bool { + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return true + } + return false +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return ErrFileExists + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + // This is in line with how os.MkdirAll behaves. + return nil + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/httpFs.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 00000000000..c42193688ce --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/ioutil.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 00000000000..5c3a3d8fffc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,230 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, prefix string) (f File, err error) { + return TempFile(a.Fs, dir, prefix) +} + +func TempFile(fs Fs, dir, prefix string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/lstater.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/lstater.go new file mode 100644 index 00000000000..89c1bfc0a7d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/lstater.go @@ -0,0 +1,27 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" +) + +// Lstater is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// Else it will call Stat. +// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +type Lstater interface { + LstatIfPossible(name string) (os.FileInfo, bool, error) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/match.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/match.go new file mode 100644 index 00000000000..c18a87fb713 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // Lstat not supported by a ll filesystems. + if _, err = lstatIfPossible(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.IndexAny(path, "*?[") >= 0 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/mem/dir.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 00000000000..e104013f457 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/mem/dirmap.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 00000000000..03a57ee5b52 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/mem/file.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 00000000000..7af2fb56ff4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,317 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" +) + +import "time" + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + if !f.fileData.dir { + return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + } + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Read(b) +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case 0: + atomic.StoreInt64(&f.at, offset) + case 1: + atomic.AddInt64(&f.at, int64(offset)) + case 2: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/memmap.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 00000000000..09498e70fba --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,365 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, 0777) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + m.mu.Unlock() + + m.Chmod(name, perm|os.ModeDir) + + return nil +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + chmod := false + file, err := m.openWrite(name) + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + m.Chmod(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p, _ := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} + +// func debugMemMapList(fs Fs) { +// if x, ok := fs.(*MemMapFs); ok { +// x.List() +// } +// } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/os.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/os.go new file mode 100644 index 00000000000..13cc1b84c93 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,101 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +var _ Lstater = (*OsFs)(nil) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fi, err := os.Lstat(name) + return fi, true, err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/path.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/path.go new file mode 100644 index 00000000000..18f60a0f6b6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,106 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfPossible(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem supports it, use Lstat, else use fs.Stat +func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { + if lfs, ok := fs.(Lstater); ok { + fi, _, err := lfs.LstatIfPossible(path) + return fi, err + } + return fs.Stat(path) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfPossible(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/readonlyfs.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 00000000000..c6376ec373a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,80 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +var _ Lstater = (*ReadOnlyFs)(nil) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + if lsf, ok := r.source.(Lstater); ok { + return lsf.LstatIfPossible(name) + } + fi, err := r.Stat(name) + return fi, false, err +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/regexpfs.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 00000000000..9d92dbc051f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,214 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/unionFile.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 00000000000..eda96312df6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,320 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + Base File + Layer File + Merger DirsMerger + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.Base != nil { + f.Base.Close() + } + if f.Layer != nil { + return f.Layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.Layer != nil { + n, err := f.Layer.Read(s) + if (err == nil || err == io.EOF) && f.Base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.Base != nil { + return f.Base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.Layer != nil { + n, err := f.Layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.Base != nil { + return f.Base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.Layer != nil { + pos, err = f.Layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o, w) + } + return pos, err + } + if f.Base != nil { + return f.Base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.Write(s) + if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.Base.Write(s) + } + return n, err + } + if f.Base != nil { + return f.Base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteAt(s, o) + if err == nil && f.Base != nil { + _, err = f.Base.WriteAt(s, o) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.Layer != nil { + return f.Layer.Name() + } + return f.Base.Name() +} + +// DirsMerger is how UnionFile weaves two directories together. +// It takes the FileInfo slices from the layer and the base and returns a +// single view. +type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) + +var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { + var files = make(map[string]os.FileInfo) + + for _, fi := range lofi { + files[fi.Name()] = fi + } + + for _, fi := range bofi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + + rfi := make([]os.FileInfo, len(files)) + + i := 0 + for _, fi := range files { + rfi[i] = fi + i++ + } + + return rfi, nil + +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories. +// At the end of the directory view, the error is io.EOF if c > 0. +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + var merge DirsMerger = f.Merger + if merge == nil { + merge = defaultUnionMergeDirsFn + } + + if f.off == 0 { + var lfi []os.FileInfo + if f.Layer != nil { + lfi, err = f.Layer.Readdir(-1) + if err != nil { + return nil, err + } + } + + var bfi []os.FileInfo + if f.Base != nil { + bfi, err = f.Base.Readdir(-1) + if err != nil { + return nil, err + } + + } + merged, err := merge(lfi, bfi) + if err != nil { + return nil, err + } + f.files = append(f.files, merged...) + } + + if c <= 0 && len(f.files) == 0 { + return f.files, nil + } + + if f.off >= len(f.files) { + return nil, io.EOF + } + + if c <= 0 { + return f.files[f.off:], nil + } + + if c > len(f.files) { + c = len(f.files) + } + + defer func() { f.off += c }() + return f.files[f.off:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.Layer != nil { + return f.Layer.Stat() + } + if f.Base != nil { + return f.Base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.Layer != nil { + err = f.Layer.Sync() + if err == nil && f.Base != nil { + err = f.Base.Sync() + } + return err + } + if f.Base != nil { + return f.Base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.Layer != nil { + err = f.Layer.Truncate(s) + if err == nil && f.Base != nil { + err = f.Base.Truncate(s) + } + return err + } + if f.Base != nil { + return f.Base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteString(s) + if err == nil && f.Base != nil { + _, err = f.Base.WriteString(s) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/util.go b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/util.go new file mode 100644 index 00000000000..4f253f481ed --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,330 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/autoscaling_tags.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/autoscaling_tags.go index c63d1623fc5..ffc74eba9b2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/autoscaling_tags.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/autoscaling_tags.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // autoscalingTagSchema returns the schema to use for the tag element. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/awserr.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/awserr.go index 5f72afa6061..0dc343bf36b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/awserr.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/awserr.go @@ -1,11 +1,12 @@ package aws import ( + "errors" "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" ) // Returns true if the error matches all these conditions: @@ -13,8 +14,9 @@ import ( // * Error.Code() matches code // * Error.Message() contains message func isAWSErr(err error, code string, message string) bool { - if err, ok := err.(awserr.Error); ok { - return err.Code() == code && strings.Contains(err.Message(), message) + var awsErr awserr.Error + if errors.As(err, &awsErr) { + return awsErr.Code() == code && strings.Contains(awsErr.Message(), message) } return false } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/cloudfront_distribution_configuration_structure.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/cloudfront_distribution_configuration_structure.go index 2708d971d8b..a9e94e883bf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/cloudfront_distribution_configuration_structure.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/cloudfront_distribution_configuration_structure.go @@ -11,13 +11,13 @@ import ( "bytes" "fmt" "strconv" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/flatmap" ) // cloudFrontRoute53ZoneID defines the route 53 zone ID for CloudFront. This @@ -33,7 +33,7 @@ const cloudFrontRoute53ZoneID = "Z2FDTNDATAQYW2" func expandDistributionConfig(d *schema.ResourceData) *cloudfront.DistributionConfig { distributionConfig := &cloudfront.DistributionConfig{ CacheBehaviors: expandCacheBehaviors(d.Get("ordered_cache_behavior").([]interface{})), - CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)), + CallerReference: aws.String(resource.UniqueId()), Comment: aws.String(d.Get("comment").(string)), CustomErrorResponses: expandCustomErrorResponses(d.Get("custom_error_response").(*schema.Set)), DefaultCacheBehavior: expandCloudFrontDefaultCacheBehavior(d.Get("default_cache_behavior").([]interface{})[0].(map[string]interface{})), diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/config.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/config.go index 4a3a56a7075..c526f493391 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/config.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/config.go @@ -7,16 +7,21 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/acm" "github.com/aws/aws-sdk-go/service/acmpca" + "github.com/aws/aws-sdk-go/service/amplify" "github.com/aws/aws-sdk-go/service/apigateway" "github.com/aws/aws-sdk-go/service/apigatewayv2" "github.com/aws/aws-sdk-go/service/applicationautoscaling" + "github.com/aws/aws-sdk-go/service/applicationinsights" "github.com/aws/aws-sdk-go/service/appmesh" + "github.com/aws/aws-sdk-go/service/appstream" "github.com/aws/aws-sdk-go/service/appsync" "github.com/aws/aws-sdk-go/service/athena" "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/autoscalingplans" "github.com/aws/aws-sdk-go/service/backup" "github.com/aws/aws-sdk-go/service/batch" "github.com/aws/aws-sdk-go/service/budgets" @@ -61,6 +66,7 @@ import ( "github.com/aws/aws-sdk-go/service/emr" "github.com/aws/aws-sdk-go/service/firehose" "github.com/aws/aws-sdk-go/service/fms" + "github.com/aws/aws-sdk-go/service/forecastservice" "github.com/aws/aws-sdk-go/service/fsx" "github.com/aws/aws-sdk-go/service/gamelift" "github.com/aws/aws-sdk-go/service/glacier" @@ -70,12 +76,15 @@ import ( "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/inspector" "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go/service/iotanalytics" + "github.com/aws/aws-sdk-go/service/iotevents" "github.com/aws/aws-sdk-go/service/kafka" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesisanalytics" "github.com/aws/aws-sdk-go/service/kinesisanalyticsv2" "github.com/aws/aws-sdk-go/service/kinesisvideo" "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go/service/lakeformation" "github.com/aws/aws-sdk-go/service/lambda" "github.com/aws/aws-sdk-go/service/lexmodelbuildingservice" "github.com/aws/aws-sdk-go/service/licensemanager" @@ -92,8 +101,10 @@ import ( "github.com/aws/aws-sdk-go/service/neptune" "github.com/aws/aws-sdk-go/service/opsworks" "github.com/aws/aws-sdk-go/service/organizations" + "github.com/aws/aws-sdk-go/service/personalize" "github.com/aws/aws-sdk-go/service/pinpoint" "github.com/aws/aws-sdk-go/service/pricing" + "github.com/aws/aws-sdk-go/service/qldb" "github.com/aws/aws-sdk-go/service/quicksight" "github.com/aws/aws-sdk-go/service/ram" "github.com/aws/aws-sdk-go/service/rds" @@ -109,6 +120,7 @@ import ( "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" "github.com/aws/aws-sdk-go/service/servicecatalog" "github.com/aws/aws-sdk-go/service/servicediscovery" + "github.com/aws/aws-sdk-go/service/servicequotas" "github.com/aws/aws-sdk-go/service/ses" "github.com/aws/aws-sdk-go/service/sfn" "github.com/aws/aws-sdk-go/service/shield" @@ -126,8 +138,8 @@ import ( "github.com/aws/aws-sdk-go/service/workspaces" "github.com/aws/aws-sdk-go/service/xray" awsbase "github.com/hashicorp/aws-sdk-go-base" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/helper/logging" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) type Config struct { @@ -147,8 +159,10 @@ type Config struct { AllowedAccountIds []string ForbiddenAccountIds []string - Endpoints map[string]string - Insecure bool + Endpoints map[string]string + IgnoreTagPrefixes []string + IgnoreTags []string + Insecure bool SkipCredsValidation bool SkipGetEC2Platforms bool @@ -156,19 +170,25 @@ type Config struct { SkipRequestingAccountId bool SkipMetadataApiCheck bool S3ForcePathStyle bool + + terraformVersion string } type AWSClient struct { accountid string acmconn *acm.ACM acmpcaconn *acmpca.ACMPCA + amplifyconn *amplify.Amplify apigateway *apigateway.APIGateway apigatewayv2conn *apigatewayv2.ApiGatewayV2 appautoscalingconn *applicationautoscaling.ApplicationAutoScaling + applicationinsightsconn *applicationinsights.ApplicationInsights appmeshconn *appmesh.AppMesh + appstreamconn *appstream.AppStream appsyncconn *appsync.AppSync athenaconn *athena.Athena autoscalingconn *autoscaling.AutoScaling + autoscalingplansconn *autoscalingplans.AutoScalingPlans backupconn *backup.Backup batchconn *batch.Batch budgetconn *budgets.Budgets @@ -195,6 +215,7 @@ type AWSClient struct { devicefarmconn *devicefarm.DeviceFarm dlmconn *dlm.DLM dmsconn *databasemigrationservice.DatabaseMigrationService + dnsSuffix string docdbconn *docdb.DocDB dsconn *directoryservice.DirectoryService dxconn *directconnect.DirectConnect @@ -213,6 +234,7 @@ type AWSClient struct { esconn *elasticsearch.ElasticsearchService firehoseconn *firehose.Firehose fmsconn *fms.FMS + forecastconn *forecastservice.ForecastService fsxconn *fsx.FSx gameliftconn *gamelift.GameLift glacierconn *glacier.Glacier @@ -220,14 +242,19 @@ type AWSClient struct { glueconn *glue.Glue guarddutyconn *guardduty.GuardDuty iamconn *iam.IAM + ignoreTagPrefixes keyvaluetags.KeyValueTags + ignoreTags keyvaluetags.KeyValueTags inspectorconn *inspector.Inspector iotconn *iot.IoT + iotanalyticsconn *iotanalytics.IoTAnalytics + ioteventsconn *iotevents.IoTEvents kafkaconn *kafka.Kafka kinesisanalyticsconn *kinesisanalytics.KinesisAnalytics kinesisanalyticsv2conn *kinesisanalyticsv2.KinesisAnalyticsV2 kinesisconn *kinesis.Kinesis kinesisvideoconn *kinesisvideo.KinesisVideo kmsconn *kms.KMS + lakeformationconn *lakeformation.LakeFormation lambdaconn *lambda.Lambda lexmodelconn *lexmodelbuildingservice.LexModelBuildingService licensemanagerconn *licensemanager.LicenseManager @@ -245,8 +272,10 @@ type AWSClient struct { opsworksconn *opsworks.OpsWorks organizationsconn *organizations.Organizations partition string + personalizeconn *personalize.Personalize pinpointconn *pinpoint.Pinpoint pricingconn *pricing.Pricing + qldbconn *qldb.QLDB quicksightconn *quicksight.QuickSight r53conn *route53.Route53 ramconn *ram.RAM @@ -256,6 +285,7 @@ type AWSClient struct { resourcegroupsconn *resourcegroups.ResourceGroups route53resolverconn *route53resolver.Route53Resolver s3conn *s3.S3 + s3connUriCleaningDisabled *s3.S3 s3controlconn *s3control.S3Control sagemakerconn *sagemaker.SageMaker scconn *servicecatalog.ServiceCatalog @@ -263,6 +293,7 @@ type AWSClient struct { secretsmanagerconn *secretsmanager.SecretsManager securityhubconn *securityhub.SecurityHub serverlessapplicationrepositoryconn *serverlessapplicationrepository.ServerlessApplicationRepository + servicequotasconn *servicequotas.ServiceQuotas sesConn *ses.SES sfnconn *sfn.SFN shieldconn *shield.Shield @@ -274,6 +305,7 @@ type AWSClient struct { stsconn *sts.STS supportedplatforms []string swfconn *swf.SWF + terraformVersion string transferconn *transfer.Transfer wafconn *waf.WAF wafregionalconn *wafregional.WAFRegional @@ -315,7 +347,8 @@ func (c *Config) Client() (interface{}, error) { UserAgentProducts: []*awsbase.UserAgentProduct{ {Name: "APN", Version: "1.0"}, {Name: "HashiCorp", Version: "1.0"}, - {Name: "Terraform", Version: terraform.VersionString()}, + {Name: "Terraform", Version: c.terraformVersion, + Extra: []string{"+https://www.terraform.io"}}, }, } @@ -332,17 +365,26 @@ func (c *Config) Client() (interface{}, error) { return nil, err } + dnsSuffix := "amazonaws.com" + if p, ok := endpoints.PartitionForRegion(endpoints.DefaultPartitions(), c.Region); ok { + dnsSuffix = p.DNSSuffix() + } + client := &AWSClient{ accountid: accountID, acmconn: acm.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["acm"])})), acmpcaconn: acmpca.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["acmpca"])})), + amplifyconn: amplify.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["amplify"])})), apigateway: apigateway.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["apigateway"])})), apigatewayv2conn: apigatewayv2.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["apigateway"])})), appautoscalingconn: applicationautoscaling.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["applicationautoscaling"])})), + applicationinsightsconn: applicationinsights.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["applicationinsights"])})), appmeshconn: appmesh.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["appmesh"])})), + appstreamconn: appstream.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["appstream"])})), appsyncconn: appsync.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["appsync"])})), athenaconn: athena.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["athena"])})), autoscalingconn: autoscaling.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["autoscaling"])})), + autoscalingplansconn: autoscalingplans.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["autoscalingplans"])})), backupconn: backup.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["backup"])})), batchconn: batch.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["batch"])})), budgetconn: budgets.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["budgets"])})), @@ -369,6 +411,7 @@ func (c *Config) Client() (interface{}, error) { devicefarmconn: devicefarm.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["devicefarm"])})), dlmconn: dlm.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["dlm"])})), dmsconn: databasemigrationservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["dms"])})), + dnsSuffix: dnsSuffix, docdbconn: docdb.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["docdb"])})), dsconn: directoryservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["ds"])})), dxconn: directconnect.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["directconnect"])})), @@ -387,21 +430,26 @@ func (c *Config) Client() (interface{}, error) { esconn: elasticsearch.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["es"])})), firehoseconn: firehose.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["firehose"])})), fmsconn: fms.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["fms"])})), + forecastconn: forecastservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["forecast"])})), fsxconn: fsx.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["fsx"])})), gameliftconn: gamelift.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["gamelift"])})), glacierconn: glacier.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["glacier"])})), - globalacceleratorconn: globalaccelerator.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["globalaccelerator"])})), glueconn: glue.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["glue"])})), guarddutyconn: guardduty.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["guardduty"])})), iamconn: iam.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["iam"])})), + ignoreTagPrefixes: keyvaluetags.New(c.IgnoreTagPrefixes), + ignoreTags: keyvaluetags.New(c.IgnoreTags), inspectorconn: inspector.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["inspector"])})), iotconn: iot.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["iot"])})), + iotanalyticsconn: iotanalytics.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["iotanalytics"])})), + ioteventsconn: iotevents.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["iotevents"])})), kafkaconn: kafka.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["kafka"])})), kinesisanalyticsconn: kinesisanalytics.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["kinesisanalytics"])})), kinesisanalyticsv2conn: kinesisanalyticsv2.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["kinesisanalytics"])})), kinesisconn: kinesis.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["kinesis"])})), kinesisvideoconn: kinesisvideo.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["kinesisvideo"])})), kmsconn: kms.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["kms"])})), + lakeformationconn: lakeformation.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["lakeformation"])})), lambdaconn: lambda.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["lambda"])})), lexmodelconn: lexmodelbuildingservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["lexmodels"])})), licensemanagerconn: licensemanager.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["licensemanager"])})), @@ -419,17 +467,17 @@ func (c *Config) Client() (interface{}, error) { opsworksconn: opsworks.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["opsworks"])})), organizationsconn: organizations.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["organizations"])})), partition: partition, + personalizeconn: personalize.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["personalize"])})), pinpointconn: pinpoint.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["pinpoint"])})), pricingconn: pricing.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["pricing"])})), + qldbconn: qldb.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["qldb"])})), quicksightconn: quicksight.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["quicksight"])})), - r53conn: route53.New(sess.Copy(&aws.Config{Region: aws.String("us-east-1"), Endpoint: aws.String(c.Endpoints["route53"])})), ramconn: ram.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["ram"])})), rdsconn: rds.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["rds"])})), redshiftconn: redshift.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["redshift"])})), region: c.Region, resourcegroupsconn: resourcegroups.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["resourcegroups"])})), route53resolverconn: route53resolver.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["route53resolver"])})), - s3conn: s3.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["s3"]), S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle)})), s3controlconn: s3control.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["s3control"])})), sagemakerconn: sagemaker.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["sagemaker"])})), scconn: servicecatalog.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["servicecatalog"])})), @@ -437,9 +485,9 @@ func (c *Config) Client() (interface{}, error) { secretsmanagerconn: secretsmanager.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["secretsmanager"])})), securityhubconn: securityhub.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["securityhub"])})), serverlessapplicationrepositoryconn: serverlessapplicationrepository.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["serverlessrepo"])})), + servicequotasconn: servicequotas.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["servicequotas"])})), sesConn: ses.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["ses"])})), sfnconn: sfn.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["stepfunctions"])})), - shieldconn: shield.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["shield"])})), simpledbconn: simpledb.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["sdb"])})), snsconn: sns.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["sns"])})), sqsconn: sqs.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["sqs"])})), @@ -447,6 +495,7 @@ func (c *Config) Client() (interface{}, error) { storagegatewayconn: storagegateway.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["storagegateway"])})), stsconn: sts.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["sts"])})), swfconn: swf.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["swf"])})), + terraformVersion: c.terraformVersion, transferconn: transfer.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["transfer"])})), wafconn: waf.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["waf"])})), wafregionalconn: wafregional.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["wafregional"])})), @@ -455,27 +504,56 @@ func (c *Config) Client() (interface{}, error) { xrayconn: xray.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["xray"])})), } + // "Global" services that require customizations + globalAcceleratorConfig := &aws.Config{ + Endpoint: aws.String(c.Endpoints["globalaccelerator"]), + } + route53Config := &aws.Config{ + Endpoint: aws.String(c.Endpoints["route53"]), + } + shieldConfig := &aws.Config{ + Endpoint: aws.String(c.Endpoints["shield"]), + } + + // Services that require multiple client configurations + s3Config := &aws.Config{ + Endpoint: aws.String(c.Endpoints["s3"]), + S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle), + } + + client.s3conn = s3.New(sess.Copy(s3Config)) + + s3Config.DisableRestProtocolURICleaning = aws.Bool(true) + client.s3connUriCleaningDisabled = s3.New(sess.Copy(s3Config)) + // Handle deprecated endpoint configurations if c.Endpoints["kinesis_analytics"] != "" { client.kinesisanalyticsconn = kinesisanalytics.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["kinesis_analytics"])})) } if c.Endpoints["r53"] != "" { - client.r53conn = route53.New(sess.Copy(&aws.Config{Region: aws.String("us-east-1"), Endpoint: aws.String(c.Endpoints["r53"])})) + route53Config.Endpoint = aws.String(c.Endpoints["r53"]) } - // Workaround for https://github.com/aws/aws-sdk-go/issues/1376 - client.kinesisconn.Handlers.Retry.PushBack(func(r *request.Request) { - if !strings.HasPrefix(r.Operation.Name, "Describe") && !strings.HasPrefix(r.Operation.Name, "List") { - return - } - err, ok := r.Error.(awserr.Error) - if !ok || err == nil { - return + // Force "global" services to correct regions + switch partition { + case endpoints.AwsPartitionID: + globalAcceleratorConfig.Region = aws.String(endpoints.UsWest2RegionID) + route53Config.Region = aws.String(endpoints.UsEast1RegionID) + shieldConfig.Region = aws.String(endpoints.UsEast1RegionID) + case endpoints.AwsCnPartitionID: + // The AWS Go SDK is missing endpoint information for Route 53 in the AWS China partition. + // This can likely be removed in the future. + if aws.StringValue(route53Config.Endpoint) == "" { + route53Config.Endpoint = aws.String("https://api.route53.cn") } - if err.Code() == kinesis.ErrCodeLimitExceededException { - r.Retryable = aws.Bool(true) - } - }) + route53Config.Region = aws.String(endpoints.CnNorthwest1RegionID) + case endpoints.AwsUsGovPartitionID: + route53Config.Region = aws.String(endpoints.UsGovWest1RegionID) + } + + client.globalacceleratorconn = globalaccelerator.New(sess.Copy(globalAcceleratorConfig)) + client.r53conn = route53.New(sess.Copy(route53Config)) + client.shieldconn = shield.New(sess.Copy(shieldConfig)) // Workaround for https://github.com/aws/aws-sdk-go/issues/1472 client.appautoscalingconn.Handlers.Retry.PushBack(func(r *request.Request) { @@ -499,6 +577,29 @@ func (c *Config) Client() (interface{}, error) { } }) + client.configconn.Handlers.Retry.PushBack(func(r *request.Request) { + // When calling Config Organization Rules API actions immediately + // after Organization creation, the API can randomly return the + // OrganizationAccessDeniedException error for a few minutes, even + // after succeeding a few requests. + switch r.Operation.Name { + case "DeleteOrganizationConfigRule", "DescribeOrganizationConfigRules", "DescribeOrganizationConfigRuleStatuses", "PutOrganizationConfigRule": + if !isAWSErr(r.Error, configservice.ErrCodeOrganizationAccessDeniedException, "This action can be only made by AWS Organization's master account.") { + return + } + + // We only want to retry briefly as the default max retry count would + // excessively retry when the error could be legitimate. + // We currently depend on the DefaultRetryer exponential backoff here. + // ~10 retries gives a fair backoff of a few seconds. + if r.RetryCount < 9 { + r.Retryable = aws.Bool(true) + } else { + r.Retryable = aws.Bool(false) + } + } + }) + // See https://github.com/aws/aws-sdk-go/pull/1276 client.dynamodbconn.Handlers.Retry.PushBack(func(r *request.Request) { if r.Operation.Name != "PutItem" && r.Operation.Name != "UpdateItem" && r.Operation.Name != "DeleteItem" { @@ -510,6 +611,12 @@ func (c *Config) Client() (interface{}, error) { }) client.ec2conn.Handlers.Retry.PushBack(func(r *request.Request) { + if r.Operation.Name == "CreateClientVpnEndpoint" { + if isAWSErr(r.Error, "OperationNotPermitted", "Endpoint cannot be created while another endpoint is being created") { + r.Retryable = aws.Bool(true) + } + } + if r.Operation.Name == "CreateVpnConnection" { if isAWSErr(r.Error, "VpnConnectionLimitExceeded", "maximum number of mutating objects has been reached") { r.Retryable = aws.Bool(true) @@ -521,6 +628,18 @@ func (c *Config) Client() (interface{}, error) { r.Retryable = aws.Bool(true) } } + + if r.Operation.Name == "AttachVpnGateway" { + if isAWSErr(r.Error, "InvalidParameterValue", "This call cannot be completed because there are pending VPNs or Virtual Interfaces") { + r.Retryable = aws.Bool(true) + } + } + }) + + client.kafkaconn.Handlers.Retry.PushBack(func(r *request.Request) { + if isAWSErr(r.Error, kafka.ErrCodeTooManyRequestsException, "Too Many Requests") { + r.Retryable = aws.Bool(true) + } }) client.kinesisconn.Handlers.Retry.PushBack(func(r *request.Request) { @@ -536,6 +655,14 @@ func (c *Config) Client() (interface{}, error) { } }) + client.organizationsconn.Handlers.Retry.PushBack(func(r *request.Request) { + // Retry on the following error: + // ConcurrentModificationException: AWS Organizations can't complete your request because it conflicts with another attempt to modify the same entity. Try again later. + if isAWSErr(r.Error, organizations.ErrCodeConcurrentModificationException, "Try again later") { + r.Retryable = aws.Bool(true) + } + }) + client.storagegatewayconn.Handlers.Retry.PushBack(func(r *request.Request) { // InvalidGatewayRequestException: The specified gateway proxy network connection is busy. if isAWSErr(r.Error, storagegateway.ErrCodeInvalidGatewayRequestException, "The specified gateway proxy network connection is busy") { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/configservice.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/configservice.go new file mode 100644 index 00000000000..a240ab9bd30 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/configservice.go @@ -0,0 +1,179 @@ +package aws + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +func configDescribeOrganizationConfigRule(conn *configservice.ConfigService, name string) (*configservice.OrganizationConfigRule, error) { + input := &configservice.DescribeOrganizationConfigRulesInput{ + OrganizationConfigRuleNames: []*string{aws.String(name)}, + } + + for { + output, err := conn.DescribeOrganizationConfigRules(input) + + if err != nil { + return nil, err + } + + for _, rule := range output.OrganizationConfigRules { + if aws.StringValue(rule.OrganizationConfigRuleName) == name { + return rule, nil + } + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil, nil +} + +func configDescribeOrganizationConfigRuleStatus(conn *configservice.ConfigService, name string) (*configservice.OrganizationConfigRuleStatus, error) { + input := &configservice.DescribeOrganizationConfigRuleStatusesInput{ + OrganizationConfigRuleNames: []*string{aws.String(name)}, + } + + for { + output, err := conn.DescribeOrganizationConfigRuleStatuses(input) + + if err != nil { + return nil, err + } + + for _, status := range output.OrganizationConfigRuleStatuses { + if aws.StringValue(status.OrganizationConfigRuleName) == name { + return status, nil + } + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil, nil +} + +func configGetOrganizationConfigRuleDetailedStatus(conn *configservice.ConfigService, ruleName, ruleStatus string) ([]*configservice.MemberAccountStatus, error) { + input := &configservice.GetOrganizationConfigRuleDetailedStatusInput{ + Filters: &configservice.StatusDetailFilters{ + MemberAccountRuleStatus: aws.String(ruleStatus), + }, + OrganizationConfigRuleName: aws.String(ruleName), + } + var statuses []*configservice.MemberAccountStatus + + for { + output, err := conn.GetOrganizationConfigRuleDetailedStatus(input) + + if err != nil { + return nil, err + } + + statuses = append(statuses, output.OrganizationConfigRuleDetailedStatus...) + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return statuses, nil +} + +func configRefreshOrganizationConfigRuleStatus(conn *configservice.ConfigService, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + status, err := configDescribeOrganizationConfigRuleStatus(conn, name) + + if err != nil { + return nil, "", err + } + + if status == nil { + return nil, "", fmt.Errorf("status not found") + } + + if status.ErrorCode != nil { + return status, aws.StringValue(status.OrganizationRuleStatus), fmt.Errorf("%s: %s", aws.StringValue(status.ErrorCode), aws.StringValue(status.ErrorMessage)) + } + + switch aws.StringValue(status.OrganizationRuleStatus) { + case configservice.OrganizationRuleStatusCreateFailed, configservice.OrganizationRuleStatusDeleteFailed, configservice.OrganizationRuleStatusUpdateFailed: + // Display detailed errors for failed member accounts + memberAccountStatuses, err := configGetOrganizationConfigRuleDetailedStatus(conn, name, aws.StringValue(status.OrganizationRuleStatus)) + + if err != nil { + return status, aws.StringValue(status.OrganizationRuleStatus), fmt.Errorf("unable to get Organization Config Rule detailed status for showing member account errors: %s", err) + } + + var errBuilder strings.Builder + + for _, mas := range memberAccountStatuses { + errBuilder.WriteString(fmt.Sprintf("Account ID (%s): %s: %s\n", aws.StringValue(mas.AccountId), aws.StringValue(mas.ErrorCode), aws.StringValue(mas.ErrorMessage))) + } + + return status, aws.StringValue(status.OrganizationRuleStatus), fmt.Errorf("Failed in %d account(s):\n\n%s", len(memberAccountStatuses), errBuilder.String()) + } + + return status, aws.StringValue(status.OrganizationRuleStatus), nil + } +} + +func configWaitForOrganizationRuleStatusCreateSuccessful(conn *configservice.ConfigService, name string, timeout time.Duration) error { + stateChangeConf := &resource.StateChangeConf{ + Pending: []string{configservice.OrganizationRuleStatusCreateInProgress}, + Target: []string{configservice.OrganizationRuleStatusCreateSuccessful}, + Refresh: configRefreshOrganizationConfigRuleStatus(conn, name), + Timeout: timeout, + Delay: 10 * time.Second, + } + + _, err := stateChangeConf.WaitForState() + + return err +} + +func configWaitForOrganizationRuleStatusDeleteSuccessful(conn *configservice.ConfigService, name string, timeout time.Duration) error { + stateChangeConf := &resource.StateChangeConf{ + Pending: []string{configservice.OrganizationRuleStatusDeleteInProgress}, + Target: []string{configservice.OrganizationRuleStatusDeleteSuccessful}, + Refresh: configRefreshOrganizationConfigRuleStatus(conn, name), + Timeout: timeout, + Delay: 10 * time.Second, + } + + _, err := stateChangeConf.WaitForState() + + if isAWSErr(err, configservice.ErrCodeNoSuchOrganizationConfigRuleException, "") { + return nil + } + + return err +} + +func configWaitForOrganizationRuleStatusUpdateSuccessful(conn *configservice.ConfigService, name string, timeout time.Duration) error { + stateChangeConf := &resource.StateChangeConf{ + Pending: []string{configservice.OrganizationRuleStatusUpdateInProgress}, + Target: []string{configservice.OrganizationRuleStatusUpdateSuccessful}, + Refresh: configRefreshOrganizationConfigRuleStatus(conn, name), + Timeout: timeout, + Delay: 10 * time.Second, + } + + _, err := stateChangeConf.WaitForState() + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acm_certificate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acm_certificate.go index a49ca9d88fc..a5180b77651 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acm_certificate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acm_certificate.go @@ -7,7 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/acm" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsAcmCertificate() *schema.Resource { @@ -27,6 +28,21 @@ func dataSourceAwsAcmCertificate() *schema.Resource { Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "key_types": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + acm.KeyAlgorithmEcPrime256v1, + acm.KeyAlgorithmEcSecp384r1, + acm.KeyAlgorithmEcSecp521r1, + acm.KeyAlgorithmRsa1024, + acm.KeyAlgorithmRsa2048, + acm.KeyAlgorithmRsa4096, + }, false), + }, + }, "types": { Type: schema.TypeList, Optional: true, @@ -45,6 +61,13 @@ func dataSourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) e conn := meta.(*AWSClient).acmconn params := &acm.ListCertificatesInput{} + + if v := d.Get("key_types").(*schema.Set); v.Len() > 0 { + params.Includes = &acm.Filters{ + KeyTypes: expandStringSet(v), + } + } + target := d.Get("domain") statuses, ok := d.GetOk("statuses") if ok { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acmpca_certificate_authority.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acmpca_certificate_authority.go index b64902e2ef1..ac822402513 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acmpca_certificate_authority.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acmpca_certificate_authority.go @@ -6,7 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/acmpca" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsAcmpcaCertificateAuthority() *schema.Resource { @@ -161,12 +162,13 @@ func dataSourceAwsAcmpcaCertificateAuthorityRead(d *schema.ResourceData, meta in d.Set("certificate_signing_request", getCertificateAuthorityCsrOutput.Csr) } - tags, err := listAcmpcaTags(conn, certificateAuthorityArn) + tags, err := keyvaluetags.AcmpcaListTags(conn, certificateAuthorityArn) + if err != nil { - return fmt.Errorf("error reading ACMPCA Certificate Authority %q tags: %s", certificateAuthorityArn, err) + return fmt.Errorf("error listing tags for ACMPCA Certificate Authority (%s): %s", certificateAuthorityArn, err) } - if err := d.Set("tags", tagsToMapACMPCA(tags)); err != nil { + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami.go index a877fe62762..5cfd3c464e6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami.go @@ -10,9 +10,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsAmi() *schema.Resource { @@ -290,8 +291,8 @@ func amiDescriptionAttributes(d *schema.ResourceData, image *ec2.Image) error { if err := d.Set("state_reason", amiStateReason(image.StateReason)); err != nil { return err } - if err := d.Set("tags", tagsToMap(image.Tags)); err != nil { - return err + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(image.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami_ids.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami_ids.go index 941dc4b8840..922aea0a3a7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami_ids.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami_ids.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsAmiIds() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_api_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_api_key.go index e9a7d7a9733..050a4c9d785 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_api_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_api_key.go @@ -3,7 +3,7 @@ package aws import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsApiGatewayApiKey() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_resource.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_resource.go index 5092285fcae..ee7810e27a3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_resource.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_resource.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsApiGatewayResource() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_rest_api.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_rest_api.go index cc71d87d2c6..62651e5e4b1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_rest_api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_rest_api.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsApiGatewayRestApi() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_vpc_link.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_vpc_link.go index 7ce38d711f0..c24aebe32d1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_vpc_link.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_api_gateway_vpc_link.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsApiGatewayVpcLink() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_arn.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_arn.go index 93a36935551..005805e65b8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_arn.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_arn.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsArn() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_group.go index 291af71efb4..917e9c82559 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_group.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsAutoscalingGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_groups.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_groups.go index 3498cc7e021..3ac5ba31375 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_groups.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_groups.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsAutoscalingGroups() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zone.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zone.go index bc660cfdb9a..3878aa75d34 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zone.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zone.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsAvailabilityZone() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zones.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zones.go index 7085dd2a428..b9e9526afea 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zones.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zones.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsAvailabilityZones() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_batch_compute_environment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_batch_compute_environment.go index 11c363150be..f4dfc00b97a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_batch_compute_environment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_batch_compute_environment.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/batch" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsBatchComputeEnvironment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_batch_job_queue.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_batch_job_queue.go index 8afca929bc3..2770d692d9e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_batch_job_queue.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_batch_job_queue.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/batch" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsBatchJobQueue() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_billing_service_account.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_billing_service_account.go index 03ec2f7c44e..080d251c513 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_billing_service_account.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_billing_service_account.go @@ -2,7 +2,7 @@ package aws import ( "github.com/aws/aws-sdk-go/aws/arn" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // See http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-getting-started.html#step-2 diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_caller_identity.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_caller_identity.go index 2a87b21f782..42339b53781 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_caller_identity.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_caller_identity.go @@ -6,7 +6,7 @@ import ( "time" "github.com/aws/aws-sdk-go/service/sts" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsCallerIdentity() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_canonical_user_id.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_canonical_user_id.go index b5d65950678..bc00cb0aa18 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_canonical_user_id.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_canonical_user_id.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsCanonicalUserId() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_export.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_export.go index 493d28aab71..f93a3b833b8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_export.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_export.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsCloudFormationExport() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_stack.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_stack.go index 84bc051fbc4..1b1e440c6cd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_stack.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_stack.go @@ -6,7 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsCloudFormationStack() *schema.Resource { @@ -62,10 +63,7 @@ func dataSourceAwsCloudFormationStack() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "tags": { - Type: schema.TypeMap, - Computed: true, - }, + "tags": tagsSchemaComputed(), }, } } @@ -98,7 +96,9 @@ func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface } d.Set("parameters", flattenAllCloudFormationParameters(stack.Parameters)) - d.Set("tags", flattenCloudFormationTags(stack.Tags)) + if err := d.Set("tags", keyvaluetags.CloudformationKeyValueTags(stack.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) if len(stack.Capabilities) > 0 { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudhsm2_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudhsm2_cluster.go index 57535f2c7cd..97e27731724 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudhsm2_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudhsm2_cluster.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudhsmv2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceCloudHsm2Cluster() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudtrail_service_account.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudtrail_service_account.go index 67d9f8cec38..6358368fcd6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudtrail_service_account.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudtrail_service_account.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // See http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-regions.html @@ -19,12 +19,14 @@ var cloudTrailServiceAccountPerRegionMap = map[string]string{ "ap-southeast-1": "903692715234", "ap-southeast-2": "284668455005", "ca-central-1": "819402241893", + "cn-north-1": "193415116832", "cn-northwest-1": "681348832753", "eu-central-1": "035351147821", "eu-north-1": "829690693026", "eu-west-1": "859597730677", "eu-west-2": "282025262664", "eu-west-3": "262312530599", + "me-south-1": "034638983726", "sa-east-1": "814480443879", "us-east-1": "086441151436", "us-east-2": "475085895292", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudwatch_log_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudwatch_log_group.go index 134f7f5dce7..7482c4dfd46 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudwatch_log_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudwatch_log_group.go @@ -3,7 +3,7 @@ package aws import ( "fmt" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsCloudwatchLogGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_codecommit_repository.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_codecommit_repository.go index aec07a0e018..fbc3aa5bb16 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_codecommit_repository.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_codecommit_repository.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codecommit" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsCodeCommitRepository() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cognito_user_pools.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cognito_user_pools.go index 700f9f4958d..97296d3996e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cognito_user_pools.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cognito_user_pools.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsCognitoUserPools() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_common_schema.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_common_schema.go index bd7fb333347..b0535be3ac2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_common_schema.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_common_schema.go @@ -3,7 +3,7 @@ package aws import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func buildAwsDataSourceFilters(set *schema.Set) []*ec2.Filter { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cur_report_definition.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cur_report_definition.go index 734da5dd9a4..d2cc805b7fa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cur_report_definition.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cur_report_definition.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsCurReportDefinition() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_customer_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_customer_gateway.go new file mode 100644 index 00000000000..a9e7fd9e00d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_customer_gateway.go @@ -0,0 +1,91 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsCustomerGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsCustomerGatewayRead, + Schema: map[string]*schema.Schema{ + "filter": dataSourceFiltersSchema(), + "id": { + Type: schema.TypeString, + Optional: true, + }, + + "bgp_asn": { + Type: schema.TypeInt, + Computed: true, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchemaComputed(), + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + input := ec2.DescribeCustomerGatewaysInput{} + + if v, ok := d.GetOk("filter"); ok { + input.Filters = buildAwsDataSourceFilters(v.(*schema.Set)) + } + + if v, ok := d.GetOk("id"); ok { + input.CustomerGatewayIds = []*string{aws.String(v.(string))} + } + + log.Printf("[DEBUG] Reading EC2 Customer Gateways: %s", input) + output, err := conn.DescribeCustomerGateways(&input) + + if err != nil { + return fmt.Errorf("error reading EC2 Customer Gateways: %s", err) + } + + if output == nil || len(output.CustomerGateways) == 0 { + return errors.New("error reading EC2 Customer Gateways: no results found") + } + + if len(output.CustomerGateways) > 1 { + return errors.New("error reading EC2 Customer Gateways: multiple results found, try adjusting search criteria") + } + + cg := output.CustomerGateways[0] + if cg == nil { + return errors.New("error reading EC2 Customer Gateway: empty result") + } + + d.Set("ip_address", cg.IpAddress) + d.Set("type", cg.Type) + d.SetId(aws.StringValue(cg.CustomerGatewayId)) + + if v := aws.StringValue(cg.BgpAsn); v != "" { + asn, err := strconv.ParseInt(v, 0, 0) + if err != nil { + return fmt.Errorf("error parsing BGP ASN %q: %s", v, err) + } + + d.Set("bgp_asn", int(asn)) + } + + if err := d.Set("tags", tagsToMap(cg.Tags)); err != nil { + return fmt.Errorf("error setting tags for EC2 Customer Gateway %q: %s", aws.StringValue(cg.CustomerGatewayId), err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_cluster_snapshot.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_cluster_snapshot.go index d756b4806ea..60e18afeb61 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_cluster_snapshot.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_cluster_snapshot.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsDbClusterSnapshot() *schema.Resource { @@ -104,6 +104,7 @@ func dataSourceAwsDbClusterSnapshot() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "tags": tagsSchemaComputed(), }, } } @@ -177,6 +178,11 @@ func dataSourceAwsDbClusterSnapshotRead(d *schema.ResourceData, meta interface{} d.Set("storage_encrypted", snapshot.StorageEncrypted) d.Set("vpc_id", snapshot.VpcId) + // Fetch and save tags + if err := saveTagsRDS(conn, d, aws.StringValue(snapshot.DBClusterSnapshotArn)); err != nil { + log.Printf("[WARN] Failed to save tags for RDS DB Cluster Snapshot (%s): %s", aws.StringValue(snapshot.DBClusterSnapshotArn), err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_event_categories.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_event_categories.go index 688e1deb33f..49e430f439c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_event_categories.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_event_categories.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsDbEventCategories() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_instance.go index 1ad0063c9fc..acc51f7794c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_instance.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsDbInstance() *schema.Resource { @@ -20,6 +20,8 @@ func dataSourceAwsDbInstance() *schema.Resource { ForceNew: true, }, + "tags": tagsSchemaComputed(), + "address": { Type: schema.TypeString, Computed: true, @@ -313,5 +315,10 @@ func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error setting vpc_security_groups attribute: %#v, error: %#v", vpcSecurityGroups, err) } + // Fetch and save tags + if err := saveTagsRDS(conn, d, aws.StringValue(dbInstance.DBInstanceArn)); err != nil { + log.Printf("[WARN] Failed to save tags for RDS Instance (%s): %s", aws.StringValue(dbInstance.DBInstanceArn), err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_snapshot.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_snapshot.go index b9a1fb3af0e..90688fbee27 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_snapshot.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_snapshot.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsDbSnapshot() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_dx_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_dx_gateway.go index b4ebc9b075e..db2e5e49bec 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_dx_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_dx_gateway.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsDxGateway() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_dynamodb_table.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_dynamodb_table.go index 4705ac31ee3..b05b94836be 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_dynamodb_table.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_dynamodb_table.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsDynamoDbTable() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_default_kms_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_default_kms_key.go new file mode 100644 index 00000000000..1de0a950bc4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_default_kms_key.go @@ -0,0 +1,35 @@ +package aws + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsEbsDefaultKmsKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEbsDefaultKmsKeyRead, + + Schema: map[string]*schema.Schema{ + "key_arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} +func dataSourceAwsEbsDefaultKmsKeyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + res, err := conn.GetEbsDefaultKmsKeyId(&ec2.GetEbsDefaultKmsKeyIdInput{}) + if err != nil { + return fmt.Errorf("Error reading EBS default KMS key: %q", err) + } + + d.SetId(time.Now().UTC().String()) + d.Set("key_arn", res.KmsKeyId) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_encryption_by_default.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_encryption_by_default.go new file mode 100644 index 00000000000..83bb73d748a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_encryption_by_default.go @@ -0,0 +1,35 @@ +package aws + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsEbsEncryptionByDefault() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEbsEncryptionByDefaultRead, + + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + } +} +func dataSourceAwsEbsEncryptionByDefaultRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + res, err := conn.GetEbsEncryptionByDefault(&ec2.GetEbsEncryptionByDefaultInput{}) + if err != nil { + return fmt.Errorf("Error reading default EBS encryption toggle: %q", err) + } + + d.SetId(time.Now().UTC().String()) + d.Set("enabled", res.EbsEncryptionByDefault) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot.go index e277085dcf5..8b3d035d1a3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEbsSnapshot() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot_ids.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot_ids.go index a8fbb6377e5..03b014ca73a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot_ids.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot_ids.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEbsSnapshotIds() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_volume.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_volume.go index 1a109d0ba3e..df47570a16a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_volume.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_volume.go @@ -7,7 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsEbsVolume() *schema.Resource { @@ -141,6 +142,9 @@ func volumeDescriptionAttributes(d *schema.ResourceData, client *AWSClient, volu d.Set("snapshot_id", volume.SnapshotId) d.Set("volume_type", volume.VolumeType) - err := d.Set("tags", tagsToMap(volume.Tags)) - return err + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(volume.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway.go index b44fc13c8f0..5528648f18d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEc2TransitGateway() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_dx_gateway_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_dx_gateway_attachment.go new file mode 100644 index 00000000000..cfff5ff2dc4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_dx_gateway_attachment.go @@ -0,0 +1,78 @@ +package aws + +import ( + "errors" + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsEc2TransitGatewayDxGatewayAttachment() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEc2TransitGatewayDxGatewayAttachmentRead, + + Schema: map[string]*schema.Schema{ + "dx_gateway_id": { + Type: schema.TypeString, + Required: true, + }, + "tags": tagsSchemaComputed(), + "transit_gateway_id": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAwsEc2TransitGatewayDxGatewayAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + input := &ec2.DescribeTransitGatewayAttachmentsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("resource-id"), + Values: []*string{aws.String(d.Get("dx_gateway_id").(string))}, + }, + { + Name: aws.String("resource-type"), + Values: []*string{aws.String("direct-connect-gateway")}, // Not yet defined in ec2/api.go. + }, + { + Name: aws.String("transit-gateway-id"), + Values: []*string{aws.String(d.Get("transit_gateway_id").(string))}, + }, + }, + } + + log.Printf("[DEBUG] Reading EC2 Transit Gateway Direct Connect Gateway Attachments: %s", input) + output, err := conn.DescribeTransitGatewayAttachments(input) + + if err != nil { + return fmt.Errorf("error reading EC2 Transit Gateway Direct Connect Gateway Attachment: %s", err) + } + + if output == nil || len(output.TransitGatewayAttachments) == 0 || output.TransitGatewayAttachments[0] == nil { + return errors.New("error reading EC2 Transit Gateway Direct Connect Gateway Attachment: no results found") + } + + if len(output.TransitGatewayAttachments) > 1 { + return errors.New("error reading EC2 Transit Gateway Direct Connect Gateway Attachment: multiple results found, try adjusting search criteria") + } + + transitGatewayAttachment := output.TransitGatewayAttachments[0] + + if err := d.Set("tags", tagsToMap(transitGatewayAttachment.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + d.Set("transit_gateway_id", aws.StringValue(transitGatewayAttachment.TransitGatewayId)) + d.Set("dx_gateway_id", aws.StringValue(transitGatewayAttachment.ResourceId)) + + d.SetId(aws.StringValue(transitGatewayAttachment.TransitGatewayAttachmentId)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_route_table.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_route_table.go index a011de02f98..21e3a02a508 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_route_table.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_route_table.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEc2TransitGatewayRouteTable() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go index e28ef0986fe..5467f5d5ff3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEc2TransitGatewayVpcAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_vpn_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_vpn_attachment.go index 04eb22b62ed..8904414e278 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_vpn_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ec2_transit_gateway_vpn_attachment.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEc2TransitGatewayVpnAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecr_image.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecr_image.go new file mode 100755 index 00000000000..28a6dac8f0f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecr_image.go @@ -0,0 +1,119 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceAwsEcrImage() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEcrImageRead, + Schema: map[string]*schema.Schema{ + "registry_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.NoZeroValues, + }, + "repository_name": { + Type: schema.TypeString, + Required: true, + }, + "image_digest": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "image_tag": { + Type: schema.TypeString, + Optional: true, + }, + "image_pushed_at": { + Type: schema.TypeInt, + Computed: true, + }, + "image_size_in_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + "image_tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceAwsEcrImageRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecrconn + + params := &ecr.DescribeImagesInput{ + RepositoryName: aws.String(d.Get("repository_name").(string)), + } + + regId, ok := d.GetOk("registry_id") + if ok { + params.RegistryId = aws.String(regId.(string)) + } + + imgId := ecr.ImageIdentifier{} + digest, ok := d.GetOk("image_digest") + if ok { + imgId.ImageDigest = aws.String(digest.(string)) + } + tag, ok := d.GetOk("image_tag") + if ok { + imgId.ImageTag = aws.String(tag.(string)) + } + + if imgId.ImageDigest == nil && imgId.ImageTag == nil { + return fmt.Errorf("At least one of either image_digest or image_tag must be defined") + } + + params.ImageIds = []*ecr.ImageIdentifier{&imgId} + + var imageDetails []*ecr.ImageDetail + log.Printf("[DEBUG] Reading ECR Images: %s", params) + err := conn.DescribeImagesPages(params, func(page *ecr.DescribeImagesOutput, lastPage bool) bool { + imageDetails = append(imageDetails, page.ImageDetails...) + return true + }) + if err != nil { + return fmt.Errorf("Error describing ECR images: %q", err) + } + + if len(imageDetails) == 0 { + return fmt.Errorf("No matching image found") + } + if len(imageDetails) > 1 { + return fmt.Errorf("More than one image found for tag/digest combination") + } + + image := imageDetails[0] + + d.SetId(time.Now().UTC().String()) + if err = d.Set("registry_id", aws.StringValue(image.RegistryId)); err != nil { + return fmt.Errorf("failed to set registry_id: %s", err) + } + if err = d.Set("image_digest", aws.StringValue(image.ImageDigest)); err != nil { + return fmt.Errorf("failed to set image_digest: %s", err) + } + if err = d.Set("image_pushed_at", image.ImagePushedAt.Unix()); err != nil { + return fmt.Errorf("failed to set image_pushed_at: %s", err) + } + if err = d.Set("image_size_in_bytes", aws.Int64Value(image.ImageSizeInBytes)); err != nil { + return fmt.Errorf("failed to set image_size_in_bytes: %s", err) + } + if err := d.Set("image_tags", aws.StringValueSlice(image.ImageTags)); err != nil { + return fmt.Errorf("failed to set image_tags: %s", err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecr_repository.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecr_repository.go index 8b93434ac15..9bc8e4df698 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecr_repository.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecr_repository.go @@ -6,7 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecr" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsEcrRepository() *schema.Resource { @@ -54,17 +55,22 @@ func dataSourceAwsEcrRepositoryRead(d *schema.ResourceData, meta interface{}) er } repository := out.Repositories[0] - - log.Printf("[DEBUG] Received ECR repository %s", out) + arn := aws.StringValue(repository.RepositoryArn) d.SetId(aws.StringValue(repository.RepositoryName)) - d.Set("arn", repository.RepositoryArn) + d.Set("arn", arn) d.Set("registry_id", repository.RegistryId) d.Set("name", repository.RepositoryName) d.Set("repository_url", repository.RepositoryUri) - if err := getTagsECR(conn, d); err != nil { - return fmt.Errorf("error getting ECR repository tags: %s", err) + tags, err := keyvaluetags.EcrListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for ECR Repository (%s): %s", arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_cluster.go index 308bdb9a95a..8ca184f05f9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_cluster.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEcsCluster() *schema.Resource { @@ -44,6 +44,23 @@ func dataSourceAwsEcsCluster() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + + "setting": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, }, } } @@ -77,5 +94,9 @@ func dataSourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error d.Set("running_tasks_count", cluster.RunningTasksCount) d.Set("registered_container_instances_count", cluster.RegisteredContainerInstancesCount) + if err := d.Set("setting", flattenEcsSettings(cluster.Settings)); err != nil { + return fmt.Errorf("error setting setting: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_container_definition.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_container_definition.go index 914d582649a..e275fffd438 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_container_definition.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_container_definition.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEcsContainerDefinition() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_service.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_service.go index 524b47f7334..2a87cd1f77a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_service.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEcsService() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_task_definition.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_task_definition.go index a734bb0e8af..8123a8b512f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_task_definition.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_task_definition.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEcsTaskDefinition() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_file_system.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_file_system.go index 539a867882c..1a6e07e5ae9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_file_system.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_file_system.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsEfsFileSystem() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_mount_target.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_mount_target.go index 88e4dcf8828..d69332f5e29 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_mount_target.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_mount_target.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEfsMountTarget() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eip.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eip.go index a1e5a0cc2b4..cca755bbcfa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eip.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eip.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsEip() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eks_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eks_cluster.go index c0141041521..075b4caf3c9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eks_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eks_cluster.go @@ -6,8 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsEksCluster() *schema.Resource { @@ -46,6 +47,26 @@ func dataSourceAwsEksCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "identity": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oidc": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issuer": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, "name": { Type: schema.TypeString, Required: true, @@ -60,6 +81,11 @@ func dataSourceAwsEksCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchemaComputed(), "vpc_config": { Type: schema.TypeList, MaxItems: 1, @@ -130,9 +156,20 @@ func dataSourceAwsEksClusterRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error setting enabled_cluster_log_types: %s", err) } d.Set("endpoint", cluster.Endpoint) + + if err := d.Set("identity", flattenEksIdentity(cluster.Identity)); err != nil { + return fmt.Errorf("error setting identity: %s", err) + } + d.Set("name", cluster.Name) d.Set("platform_version", cluster.PlatformVersion) d.Set("role_arn", cluster.RoleArn) + d.Set("status", cluster.Status) + + if err := d.Set("tags", keyvaluetags.EksKeyValueTags(cluster.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + d.Set("version", cluster.Version) if err := d.Set("vpc_config", flattenEksVpcConfigResponse(cluster.ResourcesVpcConfig)); err != nil { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eks_cluster_auth.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eks_cluster_auth.go index 4ace7b051d5..49a6838ec8d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eks_cluster_auth.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eks_cluster_auth.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/kubernetes-sigs/aws-iam-authenticator/pkg/token" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_application.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_application.go index 5ec320d7ac1..400dc7f6e0e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_application.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_application.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsElasticBeanstalkApplication() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_hosted_zone.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_hosted_zone.go index 90bc5638213..9532043f18e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_hosted_zone.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_hosted_zone.go @@ -3,15 +3,17 @@ package aws import ( "fmt" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) -// See # http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region +// See http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region var elasticBeanstalkHostedZoneIds = map[string]string{ "ap-southeast-1": "Z16FZ9L249IFLT", "ap-southeast-2": "Z2PCDNR3VC2G1N", + "ap-east-1": "ZPWYUBWRU171A", "ap-northeast-1": "Z1R25G3KIG2GBW", "ap-northeast-2": "Z3JE5OI70TWKCP", + "ap-northeast-3": "ZNE5GEY1TIAGY", "ap-south-1": "Z18NTBI3Y7N9TZ", "ca-central-1": "ZJFCZL7SSZB5I", "eu-central-1": "Z1FRNW7UH4DEZJ", @@ -19,11 +21,14 @@ var elasticBeanstalkHostedZoneIds = map[string]string{ "eu-west-1": "Z2NYPWQ7DFZAZH", "eu-west-2": "Z1GKAAAUGATPF1", "eu-west-3": "Z5WN6GAYWG5OB", + "me-south-1": "Z2BBTEKR2I36N2", "sa-east-1": "Z10X7K2B4QSOFV", "us-east-1": "Z117KPS5GTRQ2G", "us-east-2": "Z14LCN19Q5QHIC", "us-west-1": "Z1LQECGX5PH1X", "us-west-2": "Z38NKT9BP95V3O", + "us-gov-east-1": "Z35TSARG0EJ4VU", + "us-gov-west-1": "Z4KAURWC4UUUG", } func dataSourceAwsElasticBeanstalkHostedZone() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_solution_stack.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_solution_stack.go index b41f67568d6..e4fc1da4486 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_solution_stack.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_solution_stack.go @@ -6,8 +6,8 @@ import ( "regexp" "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsElasticBeanstalkSolutionStack() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_cluster.go index 4456aef75b3..090a0bfcf3c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_cluster.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsElastiCacheCluster() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_replication_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_replication_group.go index 7c3ee5e2ca5..3a5e07def39 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_replication_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_replication_group.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsElasticacheReplicationGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticsearch_domain.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticsearch_domain.go new file mode 100644 index 00000000000..80816b1c245 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticsearch_domain.go @@ -0,0 +1,366 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" +) + +func dataSourceAwsElasticSearchDomain() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsElasticSearchDomainRead, + + Schema: map[string]*schema.Schema{ + "access_policies": { + Type: schema.TypeString, + Computed: true, + }, + "advanced_options": { + Type: schema.TypeMap, + Computed: true, + }, + "domain_name": { + Type: schema.TypeString, + Required: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "endpoint": { + Type: schema.TypeString, + Computed: true, + }, + "kibana_endpoint": { + Type: schema.TypeString, + Computed: true, + }, + "ebs_options": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ebs_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "iops": { + Type: schema.TypeInt, + Computed: true, + }, + "volume_size": { + Type: schema.TypeInt, + Computed: true, + }, + "volume_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "encryption_at_rest": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "node_to_node_encryption": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "cluster_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dedicated_master_count": { + Type: schema.TypeInt, + Computed: true, + }, + "dedicated_master_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "dedicated_master_type": { + Type: schema.TypeString, + Computed: true, + }, + "instance_count": { + Type: schema.TypeInt, + Computed: true, + }, + "instance_type": { + Type: schema.TypeString, + Computed: true, + }, + "zone_awareness_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "availability_zone_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "zone_awareness_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "snapshot_options": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "automated_snapshot_start_hour": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "vpc_options": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "availability_zones": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + //Set: schema.HashString, + }, + "security_group_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "subnet_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "log_publishing_options": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_type": { + Type: schema.TypeString, + Computed: true, + }, + "cloudwatch_log_group_arn": { + Type: schema.TypeString, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "elasticsearch_version": { + Type: schema.TypeString, + Computed: true, + }, + "cognito_options": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "user_pool_id": { + Type: schema.TypeString, + Computed: true, + }, + "identity_pool_id": { + Type: schema.TypeString, + Computed: true, + }, + "role_arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "created": { + Type: schema.TypeBool, + Computed: true, + }, + "deleted": { + Type: schema.TypeBool, + Computed: true, + }, + "processing": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{}) error { + esconn := meta.(*AWSClient).esconn + + req := &elasticsearchservice.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + } + + resp, err := esconn.DescribeElasticsearchDomain(req) + if err != nil { + return fmt.Errorf("error querying elasticsearch_domain: %s", err) + } + + if resp.DomainStatus == nil { + return fmt.Errorf("your query returned no results") + } + + ds := resp.DomainStatus + + d.SetId(*ds.ARN) + + if ds.AccessPolicies != nil && *ds.AccessPolicies != "" { + policies, err := structure.NormalizeJsonString(*ds.AccessPolicies) + if err != nil { + return fmt.Errorf("access policies contain an invalid JSON: %s", err) + } + d.Set("access_policies", policies) + } + + if err := d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions)); err != nil { + return fmt.Errorf("error setting advanced_options: %s", err) + } + + d.Set("arn", ds.ARN) + d.Set("domain_id", ds.DomainId) + d.Set("endpoint", ds.Endpoint) + d.Set("kibana_endpoint", getKibanaEndpoint(d)) + + if err := d.Set("ebs_options", flattenESEBSOptions(ds.EBSOptions)); err != nil { + return fmt.Errorf("error setting ebs_options: %s", err) + } + + if err := d.Set("encryption_at_rest", flattenESEncryptAtRestOptions(ds.EncryptionAtRestOptions)); err != nil { + return fmt.Errorf("error setting encryption_at_rest: %s", err) + } + + if err := d.Set("node_to_node_encryption", flattenESNodeToNodeEncryptionOptions(ds.NodeToNodeEncryptionOptions)); err != nil { + return fmt.Errorf("error setting node_to_node_encryption: %s", err) + } + + if err := d.Set("cluster_config", flattenESClusterConfig(ds.ElasticsearchClusterConfig)); err != nil { + return fmt.Errorf("error setting cluster_config: %s", err) + } + + if err := d.Set("snapshot_options", flattenESSnapshotOptions(ds.SnapshotOptions)); err != nil { + return fmt.Errorf("error setting snapshot_options: %s", err) + } + + if ds.VPCOptions != nil { + if err := d.Set("vpc_options", flattenESVPCDerivedInfo(ds.VPCOptions)); err != nil { + return fmt.Errorf("error setting vpc_options: %s", err) + } + + endpoints := pointersMapToStringList(ds.Endpoints) + if err := d.Set("endpoint", endpoints["vpc"]); err != nil { + return fmt.Errorf("error setting endpoint: %s", err) + } + d.Set("kibana_endpoint", getKibanaEndpoint(d)) + if ds.Endpoint != nil { + return fmt.Errorf("%q: Elasticsearch domain in VPC expected to have null Endpoint value", d.Id()) + } + } else { + if ds.Endpoint != nil { + d.Set("endpoint", aws.StringValue(ds.Endpoint)) + d.Set("kibana_endpoint", getKibanaEndpoint(d)) + } + if ds.Endpoints != nil { + return fmt.Errorf("%q: Elasticsearch domain not in VPC expected to have null Endpoints value", d.Id()) + } + } + + if ds.LogPublishingOptions != nil { + m := make([]map[string]interface{}, 0) + for k, val := range ds.LogPublishingOptions { + mm := map[string]interface{}{} + mm["log_type"] = k + if val.CloudWatchLogsLogGroupArn != nil { + mm["cloudwatch_log_group_arn"] = aws.StringValue(val.CloudWatchLogsLogGroupArn) + } + mm["enabled"] = aws.BoolValue(val.Enabled) + m = append(m, mm) + } + d.Set("log_publishing_options", m) + } + + d.Set("elasticsearch_version", ds.ElasticsearchVersion) + + if err := d.Set("cognito_options", flattenESCognitoOptions(ds.CognitoOptions)); err != nil { + return fmt.Errorf("error setting cognito_options: %s", err) + } + + d.Set("created", ds.Created) + d.Set("deleted", ds.Deleted) + + d.Set("processing", ds.Processing) + + tagResp, err := esconn.ListTags(&elasticsearchservice.ListTagsInput{ + ARN: ds.ARN, + }) + + if err != nil { + return fmt.Errorf("error retrieving tags for elasticsearch_domain: %s", err) + } + + if err := d.Set("tags", tagsToMapElasticsearchService(tagResp.TagList)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb.go index 626c2eaae6e..1d159bbfbc7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsElb() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_hosted_zone_id.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_hosted_zone_id.go index dfe8214aea9..9c88dae928b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_hosted_zone_id.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_hosted_zone_id.go @@ -3,10 +3,11 @@ package aws import ( "fmt" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // See http://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region +// See https://docs.amazonaws.cn/en_us/general/latest/gr/rande.html#elb_region var elbHostedZoneIdPerRegionMap = map[string]string{ "ap-east-1": "Z3DQVH9N71FHZ0", "ap-northeast-1": "Z14GRHDCWA56QT", @@ -16,16 +17,17 @@ var elbHostedZoneIdPerRegionMap = map[string]string{ "ap-southeast-1": "Z1LMS91P8CMLE5", "ap-southeast-2": "Z1GM3OXH4ZPM65", "ca-central-1": "ZQSVJUPU6J1EY", - "cn-north-1": "638102146993", + "cn-north-1": "Z3BX2TMKNYI13Y", + "cn-northwest-1": "Z3BX2TMKNYI13Y", "eu-central-1": "Z215JYRZR1TBD5", "eu-north-1": "Z23TAZ6LKFMNIO", "eu-west-1": "Z32O12XQLNTSW2", "eu-west-2": "ZHURV8PSTC4K8", "eu-west-3": "Z3Q77PNBQS71R4", + "me-south-1": "ZS929ML54UICD", "sa-east-1": "Z2P70J7HTTTPLU", "us-east-1": "Z35SXDOTRQ7X7K", "us-east-2": "Z3AADJGX6KTTL2", - "us-gov-west-1": "048591011584", "us-west-1": "Z368ELLRRE2KJ0", "us-west-2": "Z1H1FL5HABSF5", } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_service_account.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_service_account.go index d9d4c83bed0..e17424cda6b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_service_account.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_service_account.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // See http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy @@ -24,6 +24,7 @@ var elbAccountIdPerRegionMap = map[string]string{ "eu-west-1": "156460612806", "eu-west-2": "652711504416", "eu-west-3": "009996457667", + "me-south-1": "076674570225", "sa-east-1": "507241528517", "us-east-1": "127311923021", "us-east-2": "033677994240", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_glue_script.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_glue_script.go index ec59b882cca..fb5cb7f6bad 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_glue_script.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_glue_script.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsGlueScript() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_account_alias.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_account_alias.go index f938973734b..89347984dbf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_account_alias.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_account_alias.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsIamAccountAlias() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_group.go index 9376caf4b1b..8cf595bdf8c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_group.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsIAMGroup() *schema.Resource { @@ -30,6 +30,30 @@ func dataSourceAwsIAMGroup() *schema.Resource { Type: schema.TypeString, Required: true, }, + "users": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "user_id": { + Type: schema.TypeString, + Computed: true, + }, + "user_name": { + Type: schema.TypeString, + Computed: true, + }, + "path": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, }, } } @@ -58,6 +82,22 @@ func dataSourceAwsIAMGroupRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", group.Arn) d.Set("path", group.Path) d.Set("group_id", group.GroupId) + if err := d.Set("users", dataSourceUsersRead(resp.Users)); err != nil { + return fmt.Errorf("error setting users: %s", err) + } return nil } + +func dataSourceUsersRead(iamUsers []*iam.User) []map[string]interface{} { + users := make([]map[string]interface{}, 0, len(iamUsers)) + for _, i := range iamUsers { + u := make(map[string]interface{}) + u["arn"] = aws.StringValue(i.Arn) + u["user_id"] = aws.StringValue(i.UserId) + u["user_name"] = aws.StringValue(i.UserName) + u["path"] = aws.StringValue(i.Path) + users = append(users, u) + } + return users +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_instance_profile.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_instance_profile.go index de36b71893a..ddc61a95117 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_instance_profile.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_instance_profile.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsIAMInstanceProfile() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy.go index 78ca5b91b89..e9ab0832993 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsIAMPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy_document.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy_document.go index 8e9ce8cc8a8..7bdc56302ba 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy_document.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy_document.go @@ -6,9 +6,9 @@ import ( "strconv" "strings" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) var dataSourceAwsIamPolicyDocumentVarReplacer = strings.NewReplacer("&{", "${") diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_role.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_role.go index 63c68cc2d1b..1be2de616fb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_role.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_role.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsIAMRole() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_server_certificate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_server_certificate.go index 16403fbd15d..fdbfefdd020 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_server_certificate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_server_certificate.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsIAMServerCertificate() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_user.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_user.go index 2a627b918bb..36d941b994b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_user.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_user.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsIAMUser() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_inspector_rules_packages.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_inspector_rules_packages.go index fb479bd5faa..df3bcd03b44 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_inspector_rules_packages.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_inspector_rules_packages.go @@ -8,7 +8,7 @@ import ( "time" "github.com/aws/aws-sdk-go/service/inspector" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsInspectorRulesPackages() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instance.go index 9b28684431f..d3dd4a96226 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instance.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsInstance() *schema.Resource { @@ -187,6 +187,11 @@ func dataSourceAwsInstance() *schema.Resource { Computed: true, }, + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_id": { Type: schema.TypeString, Computed: true, @@ -219,11 +224,21 @@ func dataSourceAwsInstance() *schema.Resource { Computed: true, }, + "encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + "iops": { Type: schema.TypeInt, Computed: true, }, + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + "volume_size": { Type: schema.TypeInt, Computed: true, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instances.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instances.go index 5fd8373836f..d3128e6d919 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instances.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instances.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsInstances() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_internet_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_internet_gateway.go index 5535fb7a363..dd1aaed3b9b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_internet_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_internet_gateway.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsInternetGateway() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iot_endpoint.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iot_endpoint.go index 2c5e943726e..217278b4b45 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iot_endpoint.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iot_endpoint.go @@ -5,8 +5,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsIotEndpoint() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ip_ranges.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ip_ranges.go index 3706ec8ba1b..79eec622d84 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ip_ranges.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ip_ranges.go @@ -10,7 +10,7 @@ import ( "strings" "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) type dataSourceAwsIPRangesResult struct { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kinesis_stream.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kinesis_stream.go index 3c82c1a7081..b311e2072bb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kinesis_stream.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kinesis_stream.go @@ -3,7 +3,7 @@ package aws import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsKinesisStream() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_alias.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_alias.go index c002d1da8f8..cb50477e0d8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_alias.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_alias.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsKmsAlias() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_ciphertext.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_ciphertext.go index 87ef5fffc85..2ca3556f9e0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_ciphertext.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_ciphertext.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsKmsCiphertext() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_key.go index a05f8edfd6e..75dd23f119c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_key.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsKmsKey() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secret.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secret.go index c66494e39c0..b0f2c13e29f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secret.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secret.go @@ -3,7 +3,7 @@ package aws import ( "errors" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) const dataSourceAwsKmsSecretRemovedMessage = "This data source has been replaced with the `aws_kms_secrets` data source. Upgrade information is available at: https://www.terraform.io/docs/providers/aws/guides/version-2-upgrade.html#data-source-aws_kms_secret" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secrets.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secrets.go index 63caf704130..f00ad2d51b3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secrets.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secrets.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsKmsSecrets() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_function.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_function.go index dbd49a854fc..4db75ba2817 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_function.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_function.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsLambdaFunction() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_invocation.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_invocation.go index f21b99af688..d896c109dbc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_invocation.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_invocation.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsLambdaInvocation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_layer_version.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_layer_version.go index 2807b979c2c..58562574818 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_layer_version.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lambda_layer_version.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsLambdaLayerVersion() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_launch_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_launch_configuration.go index e02cbd2a06a..4a9adc62e27 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_launch_configuration.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_launch_configuration.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsLaunchConfiguration() *schema.Resource { @@ -157,6 +157,11 @@ func dataSourceAwsLaunchConfiguration() *schema.Resource { Computed: true, }, + "encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + "iops": { Type: schema.TypeInt, Computed: true, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_launch_template.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_launch_template.go index c27f248150b..7d043dfcb0e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_launch_template.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_launch_template.go @@ -9,7 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsLaunchTemplate() *schema.Resource { @@ -379,7 +380,9 @@ func dataSourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) e d.Set("name", lt.LaunchTemplateName) d.Set("latest_version", lt.LatestVersionNumber) d.Set("default_version", lt.DefaultVersionNumber) - d.Set("tags", tagsToMap(lt.Tags)) + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(lt.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } arn := arn.ARN{ Partition: meta.(*AWSClient).partition, @@ -420,41 +423,41 @@ func dataSourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) e } if err := d.Set("block_device_mappings", getBlockDeviceMappings(ltData.BlockDeviceMappings)); err != nil { - return err + return fmt.Errorf("error setting block_device_mappings: %s", err) } if strings.HasPrefix(aws.StringValue(ltData.InstanceType), "t2") || strings.HasPrefix(aws.StringValue(ltData.InstanceType), "t3") { if err := d.Set("credit_specification", getCreditSpecification(ltData.CreditSpecification)); err != nil { - return err + return fmt.Errorf("error setting credit_specification: %s", err) } } if err := d.Set("elastic_gpu_specifications", getElasticGpuSpecifications(ltData.ElasticGpuSpecifications)); err != nil { - return err + return fmt.Errorf("error setting elastic_gpu_specifications: %s", err) } if err := d.Set("iam_instance_profile", getIamInstanceProfile(ltData.IamInstanceProfile)); err != nil { - return err + return fmt.Errorf("error setting iam_instance_profile: %s", err) } if err := d.Set("instance_market_options", getInstanceMarketOptions(ltData.InstanceMarketOptions)); err != nil { - return err + return fmt.Errorf("error setting instance_market_options: %s", err) } if err := d.Set("monitoring", getMonitoring(ltData.Monitoring)); err != nil { - return err + return fmt.Errorf("error setting monitoring: %s", err) } if err := d.Set("network_interfaces", getNetworkInterfaces(ltData.NetworkInterfaces)); err != nil { - return err + return fmt.Errorf("error setting network_interfaces: %s", err) } if err := d.Set("placement", getPlacement(ltData.Placement)); err != nil { - return err + return fmt.Errorf("error setting placement: %s", err) } if err := d.Set("tag_specifications", getTagSpecifications(ltData.TagSpecifications)); err != nil { - return err + return fmt.Errorf("error setting tag_specifications: %s", err) } return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb.go index 6a7395a59f7..b827dc3eab0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsLb() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb_listener.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb_listener.go index 8ef6dfd2045..f4bca086bd4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb_listener.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb_listener.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsLbListener() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb_target_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb_target_group.go index 0bee32ef6b0..cff2cf93999 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb_target_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_lb_target_group.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsLbTargetGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_mq_broker.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_mq_broker.go index eb65ee3e1a7..a7e00f8c713 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_mq_broker.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_mq_broker.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/mq" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsMqBroker() *schema.Resource { @@ -55,6 +55,22 @@ func dataSourceAwsMqBroker() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "encryption_options": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + "use_aws_owned_key": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, "engine_type": { Type: schema.TypeString, Computed: true, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_msk_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_msk_cluster.go new file mode 100644 index 00000000000..d65bfef793f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_msk_cluster.go @@ -0,0 +1,115 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceAwsMskCluster() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsMskClusterRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_tls": { + Type: schema.TypeString, + Computed: true, + }, + "cluster_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, + "kafka_version": { + Type: schema.TypeString, + Computed: true, + }, + "number_of_broker_nodes": { + Type: schema.TypeInt, + Computed: true, + }, + "tags": tagsSchemaComputed(), + "zookeeper_connect_string": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + listClustersInput := &kafka.ListClustersInput{ + ClusterNameFilter: aws.String(d.Get("cluster_name").(string)), + } + + var clusters []*kafka.ClusterInfo + for { + listClustersOutput, err := conn.ListClusters(listClustersInput) + + if err != nil { + return fmt.Errorf("error listing MSK Clusters: %s", err) + } + + if listClustersOutput == nil { + break + } + + clusters = append(clusters, listClustersOutput.ClusterInfoList...) + + if aws.StringValue(listClustersOutput.NextToken) == "" { + break + } + + listClustersInput.NextToken = listClustersOutput.NextToken + } + + if len(clusters) == 0 { + return fmt.Errorf("error reading MSK Cluster: no results found") + } + + if len(clusters) > 1 { + return fmt.Errorf("error reading MSK Cluster: multiple results found, try adjusting search criteria") + } + + cluster := clusters[0] + + bootstrapBrokersInput := &kafka.GetBootstrapBrokersInput{ + ClusterArn: cluster.ClusterArn, + } + + bootstrapBrokersoOutput, err := conn.GetBootstrapBrokers(bootstrapBrokersInput) + + if err != nil { + return fmt.Errorf("error reading MSK Cluster (%s) bootstrap brokers: %s", aws.StringValue(cluster.ClusterArn), err) + } + + d.Set("arn", aws.StringValue(cluster.ClusterArn)) + d.Set("bootstrap_brokers", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerString)) + d.Set("bootstrap_brokers_tls", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerStringTls)) + d.Set("cluster_name", aws.StringValue(cluster.ClusterName)) + d.Set("kafka_version", aws.StringValue(cluster.CurrentBrokerSoftwareInfo.KafkaVersion)) + d.Set("number_of_broker_nodes", aws.Int64Value(cluster.NumberOfBrokerNodes)) + + if err := d.Set("tags", tagsToMapMskCluster(cluster.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + d.Set("zookeeper_connect_string", aws.StringValue(cluster.ZookeeperConnectString)) + + d.SetId(aws.StringValue(cluster.ClusterArn)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_msk_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_msk_configuration.go new file mode 100644 index 00000000000..2449b96a76e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_msk_configuration.go @@ -0,0 +1,108 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceAwsMskConfiguration() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsMskConfigurationRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "kafka_versions": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "latest_revision": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, + "server_properties": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsMskConfigurationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + listConfigurationsInput := &kafka.ListConfigurationsInput{} + + var configuration *kafka.Configuration + err := conn.ListConfigurationsPages(listConfigurationsInput, func(page *kafka.ListConfigurationsOutput, lastPage bool) bool { + for _, config := range page.Configurations { + if aws.StringValue(config.Name) == d.Get("name").(string) { + configuration = config + break + } + } + + return !lastPage + }) + + if err != nil { + return fmt.Errorf("error listing MSK Configurations: %s", err) + } + + if configuration == nil { + return fmt.Errorf("error reading MSK Configuration: no results found") + } + + if configuration.LatestRevision == nil { + return fmt.Errorf("error describing MSK Configuration (%s): missing latest revision", d.Id()) + } + + revision := configuration.LatestRevision.Revision + revisionInput := &kafka.DescribeConfigurationRevisionInput{ + Arn: configuration.Arn, + Revision: revision, + } + + revisionOutput, err := conn.DescribeConfigurationRevision(revisionInput) + + if err != nil { + return fmt.Errorf("error describing MSK Configuration (%s) Revision (%d): %s", d.Id(), aws.Int64Value(revision), err) + } + + if revisionOutput == nil { + return fmt.Errorf("error describing MSK Configuration (%s) Revision (%d): missing result", d.Id(), aws.Int64Value(revision)) + } + + d.Set("arn", aws.StringValue(configuration.Arn)) + d.Set("description", aws.StringValue(configuration.Description)) + + if err := d.Set("kafka_versions", aws.StringValueSlice(configuration.KafkaVersions)); err != nil { + return fmt.Errorf("error setting kafka_versions: %s", err) + } + + d.Set("latest_revision", aws.Int64Value(revision)) + d.Set("name", aws.StringValue(configuration.Name)) + d.Set("server_properties", string(revisionOutput.ServerProperties)) + + d.SetId(aws.StringValue(configuration.Arn)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_nat_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_nat_gateway.go index a805694473d..886633ab9a9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_nat_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_nat_gateway.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsNatGateway() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_acls.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_acls.go index bd99ed18a65..fb56b28291f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_acls.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_acls.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsNetworkAcls() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_interface.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_interface.go index 51b91b49a15..0a138faa150 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_interface.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_interface.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsNetworkInterface() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_interfaces.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_interfaces.go index 316c598032f..1fe328038d0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_interfaces.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_network_interfaces.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsNetworkInterfaces() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_organizations_organization.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_organizations_organization.go new file mode 100644 index 00000000000..27c20321d29 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_organizations_organization.go @@ -0,0 +1,222 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/organizations" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsOrganizationsOrganization() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsOrganizationsOrganizationRead, + + Schema: map[string]*schema.Schema{ + "accounts": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "email": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "aws_service_access_principals": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "enabled_policy_types": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "feature_set": { + Type: schema.TypeString, + Computed: true, + }, + "master_account_arn": { + Type: schema.TypeString, + Computed: true, + }, + "master_account_email": { + Type: schema.TypeString, + Computed: true, + }, + "master_account_id": { + Type: schema.TypeString, + Computed: true, + }, + "non_master_accounts": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "email": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "roots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "policy_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceAwsOrganizationsOrganizationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).organizationsconn + + org, err := conn.DescribeOrganization(&organizations.DescribeOrganizationInput{}) + if err != nil { + return fmt.Errorf("Error describing organization: %s", err) + } + + d.SetId(aws.StringValue(org.Organization.Id)) + d.Set("arn", org.Organization.Arn) + d.Set("feature_set", org.Organization.FeatureSet) + d.Set("master_account_arn", org.Organization.MasterAccountArn) + d.Set("master_account_email", org.Organization.MasterAccountEmail) + d.Set("master_account_id", org.Organization.MasterAccountId) + + if aws.StringValue(org.Organization.MasterAccountId) == meta.(*AWSClient).accountid { + var accounts []*organizations.Account + var nonMasterAccounts []*organizations.Account + err = conn.ListAccountsPages(&organizations.ListAccountsInput{}, func(page *organizations.ListAccountsOutput, lastPage bool) bool { + for _, account := range page.Accounts { + if aws.StringValue(account.Id) != aws.StringValue(org.Organization.MasterAccountId) { + nonMasterAccounts = append(nonMasterAccounts, account) + } + + accounts = append(accounts, account) + } + + return !lastPage + }) + if err != nil { + return fmt.Errorf("error listing AWS Organization (%s) accounts: %s", d.Id(), err) + } + + var roots []*organizations.Root + err = conn.ListRootsPages(&organizations.ListRootsInput{}, func(page *organizations.ListRootsOutput, lastPage bool) bool { + roots = append(roots, page.Roots...) + return !lastPage + }) + if err != nil { + return fmt.Errorf("error listing AWS Organization (%s) roots: %s", d.Id(), err) + } + + awsServiceAccessPrincipals := make([]string, 0) + // ConstraintViolationException: The request failed because the organization does not have all features enabled. Please enable all features in your organization and then retry. + if aws.StringValue(org.Organization.FeatureSet) == organizations.OrganizationFeatureSetAll { + err = conn.ListAWSServiceAccessForOrganizationPages(&organizations.ListAWSServiceAccessForOrganizationInput{}, func(page *organizations.ListAWSServiceAccessForOrganizationOutput, lastPage bool) bool { + for _, enabledServicePrincipal := range page.EnabledServicePrincipals { + awsServiceAccessPrincipals = append(awsServiceAccessPrincipals, aws.StringValue(enabledServicePrincipal.ServicePrincipal)) + } + return !lastPage + }) + + if err != nil { + return fmt.Errorf("error listing AWS Service Access for Organization (%s): %s", d.Id(), err) + } + } + + enabledPolicyTypes := make([]string, 0) + for _, policyType := range roots[0].PolicyTypes { + if aws.StringValue(policyType.Status) == organizations.PolicyTypeStatusEnabled { + enabledPolicyTypes = append(enabledPolicyTypes, aws.StringValue(policyType.Type)) + } + } + + if err := d.Set("accounts", flattenOrganizationsAccounts(accounts)); err != nil { + return fmt.Errorf("error setting accounts: %s", err) + } + + if err := d.Set("aws_service_access_principals", awsServiceAccessPrincipals); err != nil { + return fmt.Errorf("error setting aws_service_access_principals: %s", err) + } + + if err := d.Set("enabled_policy_types", enabledPolicyTypes); err != nil { + return fmt.Errorf("error setting enabled_policy_types: %s", err) + } + + if err := d.Set("non_master_accounts", flattenOrganizationsAccounts(nonMasterAccounts)); err != nil { + return fmt.Errorf("error setting non_master_accounts: %s", err) + } + + if err := d.Set("roots", flattenOrganizationsRoots(roots)); err != nil { + return fmt.Errorf("error setting roots: %s", err) + } + + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_partition.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_partition.go index d52f7ee47b4..d1f26518436 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_partition.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_partition.go @@ -4,7 +4,7 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsPartition() *schema.Resource { @@ -16,6 +16,10 @@ func dataSourceAwsPartition() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "dns_suffix": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -29,5 +33,8 @@ func dataSourceAwsPartitionRead(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Setting AWS Partition to %s.", client.partition) d.Set("partition", meta.(*AWSClient).partition) + log.Printf("[DEBUG] Setting AWS URL Suffix to %s.", client.dnsSuffix) + d.Set("dns_suffix", meta.(*AWSClient).dnsSuffix) + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_prefix_list.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_prefix_list.go index 5a251a737d3..faf1eeee18c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_prefix_list.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_prefix_list.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsPrefixList() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_pricing_product.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_pricing_product.go index 393acef12a6..72b778ffa76 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_pricing_product.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_pricing_product.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pricing" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsPricingProduct() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_qldb_ledger.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_qldb_ledger.go new file mode 100644 index 00000000000..58e3cac689c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_qldb_ledger.go @@ -0,0 +1,63 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/qldb" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "log" + "regexp" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsQLDBLedger() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsQLDBLedgerRead, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexp.MustCompile(`^[A-Za-z0-9_-]+`), "must contain only alphanumeric characters, underscores, and hyphens"), + ), + }, + + "deletion_protection": { + Type: schema.TypeBool, + Computed: true, + }, + }, + } +} + +func dataSourceAwsQLDBLedgerRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).qldbconn + + target := d.Get("name") + + req := &qldb.DescribeLedgerInput{ + Name: aws.String(target.(string)), + } + + log.Printf("[DEBUG] Reading QLDB Ledger: %s", req) + resp, err := conn.DescribeLedger(req) + + if err != nil { + return fmt.Errorf("Error describing ledger: %s", err) + } + + d.SetId(aws.StringValue(resp.Name)) + d.Set("arn", resp.Arn) + d.Set("deletion_protection", resp.DeletionProtection) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ram_resource_share.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ram_resource_share.go new file mode 100644 index 00000000000..dea0e4e57c5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ram_resource_share.go @@ -0,0 +1,145 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ram" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceAwsRamResourceShare() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsRamResourceShareRead, + + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + "resource_owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + ram.ResourceOwnerOtherAccounts, + ram.ResourceOwnerSelf, + }, false), + }, + + "name": { + Type: schema.TypeString, + Required: true, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": { + Type: schema.TypeMap, + Computed: true, + }, + + "id": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsRamResourceShareRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ramconn + + name := d.Get("name").(string) + owner := d.Get("resource_owner").(string) + + filters, filtersOk := d.GetOk("filter") + + params := &ram.GetResourceSharesInput{ + Name: aws.String(name), + ResourceOwner: aws.String(owner), + } + + if filtersOk { + params.TagFilters = buildRAMTagFilters(filters.(*schema.Set)) + } + + for { + resp, err := conn.GetResourceShares(params) + + if err != nil { + return fmt.Errorf("Error retrieving resource share: empty response for: %s", params) + } + + if len(resp.ResourceShares) > 1 { + return fmt.Errorf("Multiple resource shares found for: %s", name) + } + + if resp == nil || len(resp.ResourceShares) == 0 { + return fmt.Errorf("No matching resource found: %s", err) + } + + for _, r := range resp.ResourceShares { + if aws.StringValue(r.Name) == name { + d.SetId(aws.StringValue(r.ResourceShareArn)) + d.Set("arn", aws.StringValue(r.ResourceShareArn)) + d.Set("owning_account_id", aws.StringValue(r.OwningAccountId)) + d.Set("status", aws.StringValue(r.Status)) + + if err := d.Set("tags", tagsToMapRAM(r.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + break + } + } + + if resp.NextToken == nil { + break + } + + params.NextToken = resp.NextToken + } + + return nil +} + +func buildRAMTagFilters(set *schema.Set) []*ram.TagFilter { + var filters []*ram.TagFilter + + for _, v := range set.List() { + m := v.(map[string]interface{}) + var filterValues []*string + for _, e := range m["values"].([]interface{}) { + filterValues = append(filterValues, aws.String(e.(string))) + } + filters = append(filters, &ram.TagFilter{ + TagKey: aws.String(m["name"].(string)), + TagValues: filterValues, + }) + } + + return filters +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_rds_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_rds_cluster.go index 27f2d44f0f8..00427b42213 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_rds_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_rds_cluster.go @@ -7,7 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsRdsCluster() *schema.Resource { @@ -137,6 +138,11 @@ func dataSourceAwsRdsCluster() *schema.Resource { Computed: true, }, + "hosted_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + "replication_source_identifier": { Type: schema.TypeString, Computed: true, @@ -196,7 +202,8 @@ func dataSourceAwsRdsClusterRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error setting availability_zones: %s", err) } - d.Set("arn", dbc.DBClusterArn) + arn := dbc.DBClusterArn + d.Set("arn", arn) d.Set("backtrack_window", int(aws.Int64Value(dbc.BacktrackWindow))) d.Set("backup_retention_period", dbc.BackupRetentionPeriod) d.Set("cluster_identifier", dbc.DBClusterIdentifier) @@ -258,9 +265,14 @@ func dataSourceAwsRdsClusterRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error setting vpc_security_group_ids: %s", err) } - // Fetch and save tags - if err := saveTagsRDS(conn, d, aws.StringValue(dbc.DBClusterArn)); err != nil { - log.Printf("[WARN] Failed to save tags for RDS Cluster (%s): %s", aws.StringValue(dbc.DBClusterIdentifier), err) + tags, err := keyvaluetags.RdsListTags(conn, *arn) + + if err != nil { + return fmt.Errorf("error listing tags for RDS Cluster (%s): %s", *arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_cluster.go index 4aaeeb251ed..e9baeeaaa24 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_cluster.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsRedshiftCluster() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_service_account.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_service_account.go index 77a18fd6dd0..6fa61225d61 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_service_account.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_service_account.go @@ -4,10 +4,12 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) -// See http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging +// See http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-bucket-permissions +// See https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-redshift.html +// See https://docs.amazonaws.cn/en_us/redshift/latest/mgmt/db-auditing.html#db-auditing-bucket-permissions var redshiftServiceAccountPerRegionMap = map[string]string{ "us-east-1": "193672423079", "us-east-2": "391106570357", @@ -15,16 +17,20 @@ var redshiftServiceAccountPerRegionMap = map[string]string{ "us-west-2": "902366379725", "ap-east-1": "313564881002", "ap-south-1": "865932855811", + "ap-northeast-3": "090321488786", "ap-northeast-2": "760740231472", "ap-southeast-1": "361669875840", "ap-southeast-2": "762762565011", "ap-northeast-1": "404641285394", "ca-central-1": "907379612154", + "cn-north-1": "111890595117", "cn-northwest-1": "660998842044", "eu-central-1": "053454850223", "eu-west-1": "210876761215", "eu-west-2": "307160386991", "eu-west-3": "915173422425", + "eu-north-1": "729911121831", + "me-south-1": "013126148197", "sa-east-1": "075028567923", "us-gov-east-1": "665727464434", "us-gov-west-1": "665727464434", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_region.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_region.go index 9c8354a26b7..dbafe3c4815 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_region.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_region.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsRegion() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route.go index e85f12d071f..26ebc96405a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route.go @@ -5,7 +5,7 @@ import ( "log" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsRoute() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_delegation_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_delegation_set.go index 4cd4f08d29f..f132a59b1db 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_delegation_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_delegation_set.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsDelegationSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_resolver_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_resolver_rule.go new file mode 100644 index 00000000000..481afd3e2ca --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_resolver_rule.go @@ -0,0 +1,155 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceAwsRoute53ResolverRule() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsRoute53ResolverRuleRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "domain_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringLenBetween(1, 256), + ConflictsWith: []string{"resolver_rule_id"}, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateRoute53ResolverName, + ConflictsWith: []string{"resolver_rule_id"}, + }, + + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + + "resolver_endpoint_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"resolver_rule_id"}, + }, + + "resolver_rule_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"domain_name", "name", "resolver_endpoint_id", "rule_type"}, + }, + + "rule_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + route53resolver.RuleTypeOptionForward, + route53resolver.RuleTypeOptionSystem, + route53resolver.RuleTypeOptionRecursive, + }, false), + ConflictsWith: []string{"resolver_rule_id"}, + }, + + "share_status": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsRoute53ResolverRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).route53resolverconn + + var rule *route53resolver.ResolverRule + if v, ok := d.GetOk("resolver_rule_id"); ok { + ruleRaw, state, err := route53ResolverRuleRefresh(conn, v.(string))() + if err != nil { + return fmt.Errorf("error getting Route53 Resolver rule (%s): %s", v, err) + } + + if state == route53ResolverRuleStatusDeleted { + return fmt.Errorf("no Route53 Resolver rules matched found with the id (%q)", v) + } + + rule = ruleRaw.(*route53resolver.ResolverRule) + } else { + req := &route53resolver.ListResolverRulesInput{ + Filters: buildRoute53ResolverAttributeFilterList(map[string]string{ + "DOMAIN_NAME": d.Get("domain_name").(string), + "NAME": d.Get("name").(string), + "RESOLVER_ENDPOINT_ID": d.Get("resolver_endpoint_id").(string), + "TYPE": d.Get("rule_type").(string), + }), + } + + log.Printf("[DEBUG] Listing Route53 Resolver rules: %s", req) + resp, err := conn.ListResolverRules(req) + if err != nil { + return fmt.Errorf("error getting Route53 Resolver rules: %s", err) + } + + if n := len(resp.ResolverRules); n == 0 { + return fmt.Errorf("no Route53 Resolver rules matched") + } else if n > 1 { + return fmt.Errorf("%d Route53 Resolver rules matched; use additional constraints to reduce matches to a rule", n) + } + + rule = resp.ResolverRules[0] + } + + d.SetId(aws.StringValue(rule.Id)) + d.Set("arn", rule.Arn) + d.Set("domain_name", rule.DomainName) + d.Set("name", rule.Name) + d.Set("owner_id", rule.OwnerId) + d.Set("resolver_endpoint_id", rule.ResolverEndpointId) + d.Set("resolver_rule_id", rule.Id) + d.Set("rule_type", rule.RuleType) + shareStatus := aws.StringValue(rule.ShareStatus) + d.Set("share_status", shareStatus) + // https://github.com/terraform-providers/terraform-provider-aws/issues/10211 + if shareStatus != route53resolver.ShareStatusSharedWithMe { + if err := getTagsRoute53Resolver(conn, d); err != nil { + return fmt.Errorf("error reading Route 53 Resolver rule (%s) tags: %s", d.Id(), err) + } + } + + return nil +} + +func buildRoute53ResolverAttributeFilterList(attrs map[string]string) []*route53resolver.Filter { + filters := []*route53resolver.Filter{} + + for k, v := range attrs { + if v == "" { + continue + } + + filters = append(filters, &route53resolver.Filter{ + Name: aws.String(k), + Values: aws.StringSlice([]string{v}), + }) + } + + return filters +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_resolver_rules.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_resolver_rules.go new file mode 100644 index 00000000000..fa0f6011c81 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_resolver_rules.go @@ -0,0 +1,101 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func dataSourceAwsRoute53ResolverRules() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsRoute53ResolverRulesRead, + + Schema: map[string]*schema.Schema{ + "owner_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.Any( + validateAwsAccountId, + // The owner of the default Internet Resolver rule. + validation.StringInSlice([]string{"Route 53 Resolver"}, false), + ), + }, + + "resolver_endpoint_id": { + Type: schema.TypeString, + Optional: true, + }, + + "resolver_rule_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "rule_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + route53resolver.RuleTypeOptionForward, + route53resolver.RuleTypeOptionSystem, + route53resolver.RuleTypeOptionRecursive, + }, false), + }, + + "share_status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + route53resolver.ShareStatusNotShared, + route53resolver.ShareStatusSharedWithMe, + route53resolver.ShareStatusSharedByMe, + }, false), + }, + }, + } +} + +func dataSourceAwsRoute53ResolverRulesRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).route53resolverconn + + req := &route53resolver.ListResolverRulesInput{} + resolverRuleIds := []*string{} + + log.Printf("[DEBUG] Listing Route53 Resolver rules: %s", req) + err := conn.ListResolverRulesPages(req, func(page *route53resolver.ListResolverRulesOutput, isLast bool) bool { + for _, rule := range page.ResolverRules { + if v, ok := d.GetOk("owner_id"); ok && aws.StringValue(rule.OwnerId) != v.(string) { + continue + } + if v, ok := d.GetOk("resolver_endpoint_id"); ok && aws.StringValue(rule.ResolverEndpointId) != v.(string) { + continue + } + if v, ok := d.GetOk("rule_type"); ok && aws.StringValue(rule.RuleType) != v.(string) { + continue + } + if v, ok := d.GetOk("share_status"); ok && aws.StringValue(rule.ShareStatus) != v.(string) { + continue + } + + resolverRuleIds = append(resolverRuleIds, rule.Id) + } + return !isLast + }) + if err != nil { + return fmt.Errorf("error getting Route53 Resolver rules: %s", err) + } + + d.SetId(time.Now().UTC().String()) + err = d.Set("resolver_rule_ids", flattenStringSet(resolverRuleIds)) + if err != nil { + return fmt.Errorf("error setting resolver_rule_ids: %s", err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_zone.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_zone.go index e5ee30103a3..d825272ae72 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_zone.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_zone.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsRoute53Zone() *schema.Resource { @@ -32,12 +32,10 @@ func dataSourceAwsRoute53Zone() *schema.Resource { }, "comment": { Type: schema.TypeString, - Optional: true, Computed: true, }, "caller_reference": { Type: schema.TypeString, - Optional: true, Computed: true, }, "vpc_id": { @@ -56,6 +54,14 @@ func dataSourceAwsRoute53Zone() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Computed: true, }, + "linked_service_principal": { + Type: schema.TypeString, + Computed: true, + }, + "linked_service_description": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -169,6 +175,10 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro d.Set("private_zone", hostedZoneFound.Config.PrivateZone) d.Set("caller_reference", hostedZoneFound.CallerReference) d.Set("resource_record_set_count", hostedZoneFound.ResourceRecordSetCount) + if hostedZoneFound.LinkedService != nil { + d.Set("linked_service_principal", hostedZoneFound.LinkedService.ServicePrincipal) + d.Set("linked_service_description", hostedZoneFound.LinkedService.Description) + } nameServers, err := hostedZoneNameServers(idHostedZone, conn) if err != nil { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_table.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_table.go index 80751a531dc..690317dd2a2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_table.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_table.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsRouteTable() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_tables.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_tables.go index ef34987c37f..77d6eb96328 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_tables.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_tables.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsRouteTables() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket.go index 194c38217cd..e09c1a244ce 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsS3Bucket() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_object.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_object.go index 5be6586d0b8..bcb1814387d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_object.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_object.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsS3BucketObject() *schema.Resource { @@ -74,6 +74,18 @@ func dataSourceAwsS3BucketObject() *schema.Resource { Type: schema.TypeMap, Computed: true, }, + "object_lock_legal_hold_status": { + Type: schema.TypeString, + Computed: true, + }, + "object_lock_mode": { + Type: schema.TypeString, + Computed: true, + }, + "object_lock_retain_until_date": { + Type: schema.TypeString, + Computed: true, + }, "range": { Type: schema.TypeString, Optional: true, @@ -155,6 +167,9 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e d.Set("expires", out.Expires) d.Set("last_modified", out.LastModified.Format(time.RFC1123)) d.Set("metadata", pointersMapToStringList(out.Metadata)) + d.Set("object_lock_legal_hold_status", out.ObjectLockLegalHoldStatus) + d.Set("object_lock_mode", out.ObjectLockMode) + d.Set("object_lock_retain_until_date", flattenS3ObjectLockRetainUntilDate(out.ObjectLockRetainUntilDate)) d.Set("server_side_encryption", out.ServerSideEncryption) d.Set("sse_kms_key_id", out.SSEKMSKeyId) d.Set("version_id", out.VersionId) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_objects.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_objects.go new file mode 100644 index 00000000000..94d173918cf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_objects.go @@ -0,0 +1,150 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +const keyRequestPageSize = 1000 + +func dataSourceAwsS3BucketObjects() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsS3BucketObjectsRead, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + }, + "delimiter": { + Type: schema.TypeString, + Optional: true, + }, + "encoding_type": { + Type: schema.TypeString, + Optional: true, + }, + "max_keys": { + Type: schema.TypeInt, + Optional: true, + Default: 1000, + }, + "start_after": { + Type: schema.TypeString, + Optional: true, + }, + "fetch_owner": { + Type: schema.TypeBool, + Optional: true, + }, + "keys": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "common_prefixes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "owners": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceAwsS3BucketObjectsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).s3conn + + bucket := d.Get("bucket").(string) + prefix := d.Get("prefix").(string) + + d.SetId(resource.UniqueId()) + + listInput := s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + } + + if prefix != "" { + listInput.Prefix = aws.String(prefix) + } + + if s, ok := d.GetOk("delimiter"); ok { + listInput.Delimiter = aws.String(s.(string)) + } + + if s, ok := d.GetOk("encoding_type"); ok { + listInput.EncodingType = aws.String(s.(string)) + } + + // "listInput.MaxKeys" refers to max keys returned in a single request + // (i.e., page size), not the total number of keys returned if you page + // through the results. "maxKeys" does refer to total keys returned. + maxKeys := int64(d.Get("max_keys").(int)) + if maxKeys <= keyRequestPageSize { + listInput.MaxKeys = aws.Int64(maxKeys) + } + + if s, ok := d.GetOk("start_after"); ok { + listInput.StartAfter = aws.String(s.(string)) + } + + if b, ok := d.GetOk("fetch_owner"); ok { + listInput.FetchOwner = aws.Bool(b.(bool)) + } + + var commonPrefixes []string + var keys []string + var owners []string + + err := conn.ListObjectsV2Pages(&listInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool { + for _, commonPrefix := range page.CommonPrefixes { + commonPrefixes = append(commonPrefixes, aws.StringValue(commonPrefix.Prefix)) + } + + for _, object := range page.Contents { + keys = append(keys, aws.StringValue(object.Key)) + + if object.Owner != nil { + owners = append(owners, aws.StringValue(object.Owner.ID)) + } + } + + maxKeys = maxKeys - aws.Int64Value(page.KeyCount) + + if maxKeys <= keyRequestPageSize { + listInput.MaxKeys = aws.Int64(maxKeys) + } + + return !lastPage + }) + + if err != nil { + return fmt.Errorf("error listing S3 Bucket (%s) Objects: %s", bucket, err) + } + + if err := d.Set("common_prefixes", commonPrefixes); err != nil { + return fmt.Errorf("error setting common_prefixes: %s", err) + } + + if err := d.Set("keys", keys); err != nil { + return fmt.Errorf("error setting keys: %s", err) + } + + if err := d.Set("owners", owners); err != nil { + return fmt.Errorf("error setting owners: %s", err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_secretsmanager_secret.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_secretsmanager_secret.go index 1cba15188c3..2aa573be3a7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_secretsmanager_secret.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_secretsmanager_secret.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/secretsmanager" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" ) func dataSourceAwsSecretsManagerSecret() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_secretsmanager_secret_version.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_secretsmanager_secret_version.go index 7bc4170de05..1842f4612d1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_secretsmanager_secret_version.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_secretsmanager_secret_version.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/secretsmanager" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsSecretsManagerSecretVersion() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_group.go index 80207dd1a6a..7fddc60fe1b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_group.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsSecurityGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_groups.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_groups.go index d5b1ac121ac..665b44a3b4f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_groups.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_groups.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsSecurityGroups() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_servicequotas_service.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_servicequotas_service.go new file mode 100644 index 00000000000..c472b6e897c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_servicequotas_service.go @@ -0,0 +1,60 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/servicequotas" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsServiceQuotasService() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsServiceQuotasServiceRead, + + Schema: map[string]*schema.Schema{ + "service_code": { + Type: schema.TypeString, + Computed: true, + }, + "service_name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAwsServiceQuotasServiceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).servicequotasconn + + serviceName := d.Get("service_name").(string) + + input := &servicequotas.ListServicesInput{} + + var service *servicequotas.ServiceInfo + err := conn.ListServicesPages(input, func(page *servicequotas.ListServicesOutput, lastPage bool) bool { + for _, s := range page.Services { + if aws.StringValue(s.ServiceName) == serviceName { + service = s + break + } + } + + return !lastPage + }) + + if err != nil { + return fmt.Errorf("error listing Services: %s", err) + } + + if service == nil { + return fmt.Errorf("error finding Service (%s): no results found", serviceName) + } + + d.Set("service_code", service.ServiceCode) + d.Set("service_name", service.ServiceName) + d.SetId(aws.StringValue(service.ServiceCode)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_servicequotas_service_quota.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_servicequotas_service_quota.go new file mode 100644 index 00000000000..66aaa1e18c9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_servicequotas_service_quota.go @@ -0,0 +1,144 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/servicequotas" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsServiceQuotasServiceQuota() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsServiceQuotasServiceQuotaRead, + + Schema: map[string]*schema.Schema{ + "adjustable": { + Type: schema.TypeBool, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "default_value": { + Type: schema.TypeFloat, + Computed: true, + }, + "global_quota": { + Type: schema.TypeBool, + Computed: true, + }, + "quota_code": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"quota_name"}, + }, + "quota_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"quota_code"}, + }, + "service_code": { + Type: schema.TypeString, + Required: true, + }, + "service_name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + } +} + +func dataSourceAwsServiceQuotasServiceQuotaRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).servicequotasconn + + quotaCode := d.Get("quota_code").(string) + quotaName := d.Get("quota_name").(string) + serviceCode := d.Get("service_code").(string) + + if quotaCode == "" && quotaName == "" { + return fmt.Errorf("either quota_code or quota_name must be configured") + } + + var serviceQuota *servicequotas.ServiceQuota + + if quotaCode == "" { + input := &servicequotas.ListServiceQuotasInput{ + ServiceCode: aws.String(serviceCode), + } + + err := conn.ListServiceQuotasPages(input, func(page *servicequotas.ListServiceQuotasOutput, lastPage bool) bool { + for _, q := range page.Quotas { + if aws.StringValue(q.QuotaName) == quotaName { + serviceQuota = q + break + } + } + + return !lastPage + }) + + if err != nil { + return fmt.Errorf("error listing Service (%s) Quotas: %s", serviceCode, err) + } + + if serviceQuota == nil { + return fmt.Errorf("error finding Service (%s) Quota (%s): no results found", serviceCode, quotaName) + } + } else { + input := &servicequotas.GetServiceQuotaInput{ + QuotaCode: aws.String(quotaCode), + ServiceCode: aws.String(serviceCode), + } + + output, err := conn.GetServiceQuota(input) + + if err != nil { + return fmt.Errorf("error getting Service (%s) Quota (%s): %s", serviceCode, quotaCode, err) + } + + if output == nil { + return fmt.Errorf("error getting Service (%s) Quota (%s): empty result", serviceCode, quotaCode) + } + + serviceQuota = output.Quota + } + + input := &servicequotas.GetAWSDefaultServiceQuotaInput{ + QuotaCode: serviceQuota.QuotaCode, + ServiceCode: serviceQuota.ServiceCode, + } + + output, err := conn.GetAWSDefaultServiceQuota(input) + + if err != nil { + return fmt.Errorf("error getting Service (%s) Default Quota (%s): %s", serviceCode, aws.StringValue(serviceQuota.QuotaCode), err) + } + + if output == nil { + return fmt.Errorf("error getting Service (%s) Default Quota (%s): empty result", serviceCode, aws.StringValue(serviceQuota.QuotaCode)) + } + + defaultQuota := output.Quota + + d.Set("adjustable", serviceQuota.Adjustable) + d.Set("arn", serviceQuota.QuotaArn) + d.Set("default_value", defaultQuota.Value) + d.Set("global_quota", serviceQuota.GlobalQuota) + d.Set("quota_code", serviceQuota.QuotaCode) + d.Set("quota_name", serviceQuota.QuotaName) + d.Set("service_code", serviceQuota.ServiceCode) + d.Set("service_name", serviceQuota.ServiceName) + d.Set("value", serviceQuota.Value) + d.SetId(aws.StringValue(serviceQuota.QuotaArn)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sns.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sns.go index a28a118793e..f9684e39be9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sns.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sns.go @@ -7,7 +7,7 @@ import ( "time" "github.com/aws/aws-sdk-go/service/sns" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsSnsTopic() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sqs_queue.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sqs_queue.go index 40099d6a426..e2cf0672524 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sqs_queue.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sqs_queue.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sqs" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsSqsQueue() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_document.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_document.go index 6819233a4cc..3e910e3f7de 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_document.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_document.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func dataSourceAwsSsmDocument() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_parameter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_parameter.go index 68df28df4c0..d3dbce452c2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_parameter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_parameter.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsSsmParameter() *schema.Resource { @@ -37,6 +37,10 @@ func dataSourceAwsSsmParameter() *schema.Resource { Optional: true, Default: true, }, + "version": { + Type: schema.TypeInt, + Computed: true, + }, }, } } @@ -72,6 +76,7 @@ func dataAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { d.Set("name", param.Name) d.Set("type", param.Type) d.Set("value", param.Value) + d.Set("version", param.Version) return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_storagegateway_local_disk.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_storagegateway_local_disk.go index ed86c4a4b5c..6d9a1c6c951 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_storagegateway_local_disk.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_storagegateway_local_disk.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/storagegateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsStorageGatewayLocalDisk() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet.go index e14b2612b44..064d3c25d3b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet.go @@ -6,7 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsSubnet() *schema.Resource { @@ -162,7 +163,11 @@ func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { d.Set("cidr_block", subnet.CidrBlock) d.Set("default_for_az", subnet.DefaultForAz) d.Set("state", subnet.State) - d.Set("tags", tagsToMap(subnet.Tags)) + + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(subnet.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet_ids.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet_ids.go index 21339f1ab91..ce72d3d2aff 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet_ids.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet_ids.go @@ -5,7 +5,7 @@ import ( "log" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsSubnetIDs() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_transfer_server.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_transfer_server.go index a08e6d1acfc..b682553ff71 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_transfer_server.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_transfer_server.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/transfer" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsTransferServer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc.go index 10db3e99f1b..edd13c10c8b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc.go @@ -7,7 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsVpc() *schema.Resource { @@ -176,7 +177,11 @@ func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { d.Set("instance_tenancy", vpc.InstanceTenancy) d.Set("default", vpc.IsDefault) d.Set("state", vpc.State) - d.Set("tags", tagsToMap(vpc.Tags)) + + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(vpc.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + d.Set("owner_id", vpc.OwnerId) arn := arn.ARN{ diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_dhcp_options.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_dhcp_options.go index f66f1d4ba2f..3680a82710c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_dhcp_options.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_dhcp_options.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsVpcDhcpOptions() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint.go index 2669ceb8e0e..4c5f76fd33c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint.go @@ -6,7 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" ) func dataSourceAwsVpcEndpoint() *schema.Resource { @@ -14,27 +15,40 @@ func dataSourceAwsVpcEndpoint() *schema.Resource { Read: dataSourceAwsVpcEndpointRead, Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, + "cidr_blocks": { + Type: schema.TypeList, Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, - "vpc_id": { - Type: schema.TypeString, - Optional: true, + "dns_entry": { + Type: schema.TypeList, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "hosted_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, }, - "service_name": { + "filter": ec2CustomFiltersSchema(), + "id": { Type: schema.TypeString, Optional: true, Computed: true, }, - "state": { - Type: schema.TypeString, - Optional: true, + "network_interface_ids": { + Type: schema.TypeSet, Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, }, - "vpc_endpoint_type": { + "owner_id": { Type: schema.TypeString, Computed: true, }, @@ -42,58 +56,55 @@ func dataSourceAwsVpcEndpoint() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "route_table_ids": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, "prefix_list_id": { Type: schema.TypeString, Computed: true, }, - "cidr_blocks": { - Type: schema.TypeList, + "private_dns_enabled": { + Type: schema.TypeBool, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, }, - "subnet_ids": { + "requester_managed": { + Type: schema.TypeBool, + Computed: true, + }, + "route_table_ids": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "network_interface_ids": { + "security_group_ids": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "security_group_ids": { + "service_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "subnet_ids": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "private_dns_enabled": { - Type: schema.TypeBool, + "tags": tagsSchemaComputed(), + "vpc_endpoint_type": { + Type: schema.TypeString, Computed: true, }, - "dns_entry": { - Type: schema.TypeList, + "vpc_id": { + Type: schema.TypeString, + Optional: true, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dns_name": { - Type: schema.TypeString, - Computed: true, - }, - "hosted_zone_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, }, }, } @@ -115,25 +126,97 @@ func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) erro "service-name": d.Get("service_name").(string), }, ) + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(d.Get("tags").(map[string]interface{})), + )...) + req.Filters = append(req.Filters, buildEC2CustomFilterList( + d.Get("filter").(*schema.Set), + )...) if len(req.Filters) == 0 { // Don't send an empty filters list; the EC2 API won't accept it. req.Filters = nil } log.Printf("[DEBUG] Reading VPC Endpoint: %s", req) - resp, err := conn.DescribeVpcEndpoints(req) + respVpce, err := conn.DescribeVpcEndpoints(req) if err != nil { - return err + return fmt.Errorf("error reading VPC Endpoint: %s", err) } - if resp == nil || len(resp.VpcEndpoints) == 0 { - return fmt.Errorf("no matching VPC endpoint found") + if respVpce == nil || len(respVpce.VpcEndpoints) == 0 { + return fmt.Errorf("no matching VPC Endpoint found") } - if len(resp.VpcEndpoints) > 1 { - return fmt.Errorf("multiple VPC endpoints matched; use additional constraints to reduce matches to a single VPC endpoint") + if len(respVpce.VpcEndpoints) > 1 { + return fmt.Errorf("multiple VPC Endpoints matched; use additional constraints to reduce matches to a single VPC Endpoint") } - vpce := resp.VpcEndpoints[0] + vpce := respVpce.VpcEndpoints[0] d.SetId(aws.StringValue(vpce.VpcEndpointId)) - return vpcEndpointAttributes(d, vpce, conn) + serviceName := aws.StringValue(vpce.ServiceName) + d.Set("service_name", serviceName) + d.Set("state", vpce.State) + d.Set("vpc_id", vpce.VpcId) + + respPl, err := conn.DescribePrefixLists(&ec2.DescribePrefixListsInput{ + Filters: buildEC2AttributeFilterList(map[string]string{ + "prefix-list-name": serviceName, + }), + }) + if err != nil { + return fmt.Errorf("error reading Prefix List (%s): %s", serviceName, err) + } + if respPl == nil || len(respPl.PrefixLists) == 0 { + d.Set("cidr_blocks", []interface{}{}) + } else if len(respPl.PrefixLists) > 1 { + return fmt.Errorf("multiple prefix lists associated with the service name '%s'. Unexpected", serviceName) + } else { + pl := respPl.PrefixLists[0] + + d.Set("prefix_list_id", pl.PrefixListId) + err = d.Set("cidr_blocks", flattenStringList(pl.Cidrs)) + if err != nil { + return fmt.Errorf("error setting cidr_blocks: %s", err) + } + } + + err = d.Set("dns_entry", flattenVpcEndpointDnsEntries(vpce.DnsEntries)) + if err != nil { + return fmt.Errorf("error setting dns_entry: %s", err) + } + err = d.Set("network_interface_ids", flattenStringSet(vpce.NetworkInterfaceIds)) + if err != nil { + return fmt.Errorf("error setting network_interface_ids: %s", err) + } + d.Set("owner_id", vpce.OwnerId) + policy, err := structure.NormalizeJsonString(aws.StringValue(vpce.PolicyDocument)) + if err != nil { + return fmt.Errorf("policy contains an invalid JSON: %s", err) + } + d.Set("policy", policy) + d.Set("private_dns_enabled", vpce.PrivateDnsEnabled) + err = d.Set("route_table_ids", flattenStringSet(vpce.RouteTableIds)) + if err != nil { + return fmt.Errorf("error setting route_table_ids: %s", err) + } + d.Set("requester_managed", vpce.RequesterManaged) + err = d.Set("security_group_ids", flattenVpcEndpointSecurityGroupIds(vpce.Groups)) + if err != nil { + return fmt.Errorf("error setting security_group_ids: %s", err) + } + err = d.Set("subnet_ids", flattenStringSet(vpce.SubnetIds)) + if err != nil { + return fmt.Errorf("error setting subnet_ids: %s", err) + } + err = d.Set("tags", tagsToMap(vpce.Tags)) + if err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + // VPC endpoints don't have types in GovCloud, so set type to default if empty + if vpceType := aws.StringValue(vpce.VpcEndpointType); vpceType == "" { + d.Set("vpc_endpoint_type", ec2.VpcEndpointTypeGateway) + } else { + d.Set("vpc_endpoint_type", vpceType) + } + + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint_service.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint_service.go index b4eaa5e7dd9..63a28a0673c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint_service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint_service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsVpcEndpointService() *schema.Resource { @@ -16,11 +16,43 @@ func dataSourceAwsVpcEndpointService() *schema.Resource { Read: dataSourceAwsVpcEndpointServiceRead, Schema: map[string]*schema.Schema{ + "acceptance_required": { + Type: schema.TypeBool, + Computed: true, + }, + "availability_zones": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Set: schema.HashString, + }, + "base_endpoint_dns_names": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Set: schema.HashString, + }, + "manages_vpc_endpoints": { + Type: schema.TypeBool, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "private_dns_name": { + Type: schema.TypeString, + Computed: true, + }, "service": { Type: schema.TypeString, Optional: true, ConflictsWith: []string{"service_name"}, }, + "service_id": { + Type: schema.TypeString, + Computed: true, + }, "service_name": { Type: schema.TypeString, Optional: true, @@ -31,34 +63,11 @@ func dataSourceAwsVpcEndpointService() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "owner": { - Type: schema.TypeString, - Computed: true, - }, + "tags": tagsSchemaComputed(), "vpc_endpoint_policy_supported": { Type: schema.TypeBool, Computed: true, }, - "acceptance_required": { - Type: schema.TypeBool, - Computed: true, - }, - "availability_zones": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - Set: schema.HashString, - }, - "private_dns_name": { - Type: schema.TypeString, - Computed: true, - }, - "base_endpoint_dns_names": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - Set: schema.HashString, - }, }, } } @@ -80,10 +89,10 @@ func dataSourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{ ServiceNames: aws.StringSlice([]string{serviceName}), } - log.Printf("[DEBUG] Reading VPC Endpoint Services: %s", req) + log.Printf("[DEBUG] Reading VPC Endpoint Service: %s", req) resp, err := conn.DescribeVpcEndpointServices(req) if err != nil { - return fmt.Errorf("Error fetching VPC Endpoint Services: %s", err) + return fmt.Errorf("error reading VPC Endpoint Service (%s): %s", serviceName, err) } if resp == nil || (len(resp.ServiceNames) == 0 && len(resp.ServiceDetails) == 0) { @@ -114,13 +123,25 @@ func dataSourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{ serviceName = aws.StringValue(sd.ServiceName) d.SetId(strconv.Itoa(hashcode.String(serviceName))) d.Set("service_name", serviceName) - d.Set("service_type", sd.ServiceType[0].ServiceType) - d.Set("owner", sd.Owner) - d.Set("vpc_endpoint_policy_supported", sd.VpcEndpointPolicySupported) d.Set("acceptance_required", sd.AcceptanceRequired) - d.Set("availability_zones", flattenStringList(sd.AvailabilityZones)) + err = d.Set("availability_zones", flattenStringSet(sd.AvailabilityZones)) + if err != nil { + return fmt.Errorf("error setting availability_zones: %s", err) + } + err = d.Set("base_endpoint_dns_names", flattenStringSet(sd.BaseEndpointDnsNames)) + if err != nil { + return fmt.Errorf("error setting base_endpoint_dns_names: %s", err) + } + d.Set("manages_vpc_endpoints", sd.ManagesVpcEndpoints) + d.Set("owner", sd.Owner) d.Set("private_dns_name", sd.PrivateDnsName) - d.Set("base_endpoint_dns_names", flattenStringList(sd.BaseEndpointDnsNames)) + d.Set("service_id", sd.ServiceId) + d.Set("service_type", sd.ServiceType[0].ServiceType) + err = d.Set("tags", tagsToMap(sd.Tags)) + if err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + d.Set("vpc_endpoint_policy_supported", sd.VpcEndpointPolicySupported) return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_peering_connection.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_peering_connection.go index b3f1cca8e21..fa7b2aa47cd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_peering_connection.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_peering_connection.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsVpcPeeringConnection() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpcs.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpcs.go index b51053d8cd8..cc2d5d39107 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpcs.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpcs.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsVpcs() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpn_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpn_gateway.go index 2d0fa3277ac..b1d21a0dd86 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpn_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpn_gateway.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsVpnGateway() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_ipset.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_ipset.go new file mode 100644 index 00000000000..f8197e85414 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_ipset.go @@ -0,0 +1,59 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsWafIpSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAWSWafIpSetRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAWSWafIpSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + name := d.Get("name").(string) + + ipsets := make([]*waf.IPSetSummary, 0) + // ListIPSetsInput does not have a name parameter for filtering or a paginator + input := &waf.ListIPSetsInput{} + for { + output, err := conn.ListIPSets(input) + if err != nil { + return fmt.Errorf("Error reading WAF IP sets: %s", err) + } + for _, ipset := range output.IPSets { + if aws.StringValue(ipset.Name) == name { + ipsets = append(ipsets, ipset) + } + } + + if output.NextMarker == nil { + break + } + input.NextMarker = output.NextMarker + } + + if len(ipsets) == 0 { + return fmt.Errorf("WAF IP Set not found for name: %s", name) + } + if len(ipsets) > 1 { + return fmt.Errorf("Multiple WAF IP Sets found for name: %s", name) + } + + ipset := ipsets[0] + d.SetId(aws.StringValue(ipset.IPSetId)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_rate_based_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_rate_based_rule.go new file mode 100644 index 00000000000..502d82886c1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_rate_based_rule.go @@ -0,0 +1,60 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsWafRateBasedRule() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsWafRateBasedRuleRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAwsWafRateBasedRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + name := d.Get("name").(string) + + rules := make([]*waf.RuleSummary, 0) + // ListRulesInput does not have a name parameter for filtering + input := &waf.ListRateBasedRulesInput{} + for { + output, err := conn.ListRateBasedRules(input) + if err != nil { + return fmt.Errorf("error reading WAF Rate Based Rules: %s", err) + } + for _, rule := range output.Rules { + if aws.StringValue(rule.Name) == name { + rules = append(rules, rule) + } + } + + if output.NextMarker == nil { + break + } + input.NextMarker = output.NextMarker + } + + if len(rules) == 0 { + return fmt.Errorf("WAF Rate Based Rules not found for name: %s", name) + } + + if len(rules) > 1 { + return fmt.Errorf("multiple WAF Rate Based Rules found for name: %s", name) + } + + rule := rules[0] + + d.SetId(aws.StringValue(rule.RuleId)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_rule.go new file mode 100644 index 00000000000..862b016471c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_rule.go @@ -0,0 +1,60 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsWafRule() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsWafRuleRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAwsWafRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + name := d.Get("name").(string) + + rules := make([]*waf.RuleSummary, 0) + // ListRulesInput does not have a name parameter for filtering + input := &waf.ListRulesInput{} + for { + output, err := conn.ListRules(input) + if err != nil { + return fmt.Errorf("error reading WAF Rules: %s", err) + } + for _, rule := range output.Rules { + if aws.StringValue(rule.Name) == name { + rules = append(rules, rule) + } + } + + if output.NextMarker == nil { + break + } + input.NextMarker = output.NextMarker + } + + if len(rules) == 0 { + return fmt.Errorf("WAF Rules not found for name: %s", name) + } + + if len(rules) > 1 { + return fmt.Errorf("multiple WAF Rules found for name: %s", name) + } + + rule := rules[0] + + d.SetId(aws.StringValue(rule.RuleId)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_web_acl.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_web_acl.go new file mode 100644 index 00000000000..8539f793575 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_waf_web_acl.go @@ -0,0 +1,60 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsWafWebAcl() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsWafWebAclRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAwsWafWebAclRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + name := d.Get("name").(string) + + acls := make([]*waf.WebACLSummary, 0) + // ListWebACLsInput does not have a name parameter for filtering + input := &waf.ListWebACLsInput{} + for { + output, err := conn.ListWebACLs(input) + if err != nil { + return fmt.Errorf("error reading web ACLs: %s", err) + } + for _, acl := range output.WebACLs { + if aws.StringValue(acl.Name) == name { + acls = append(acls, acl) + } + } + + if output.NextMarker == nil { + break + } + input.NextMarker = output.NextMarker + } + + if len(acls) == 0 { + return fmt.Errorf("web ACLs not found for name: %s", name) + } + + if len(acls) > 1 { + return fmt.Errorf("multiple web ACLs found for name: %s", name) + } + + acl := acls[0] + + d.SetId(aws.StringValue(acl.WebACLId)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_ipset.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_ipset.go new file mode 100644 index 00000000000..8b5155e21fe --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_ipset.go @@ -0,0 +1,59 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsWafRegionalIpSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAWSWafRegionalIpSetRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAWSWafRegionalIpSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + name := d.Get("name").(string) + + ipsets := make([]*waf.IPSetSummary, 0) + // ListIPSetsInput does not have a name parameter for filtering or a paginator + input := &waf.ListIPSetsInput{} + for { + output, err := conn.ListIPSets(input) + if err != nil { + return fmt.Errorf("Error reading WAF Regional IP sets: %s", err) + } + for _, ipset := range output.IPSets { + if aws.StringValue(ipset.Name) == name { + ipsets = append(ipsets, ipset) + } + } + + if output.NextMarker == nil { + break + } + input.NextMarker = output.NextMarker + } + + if len(ipsets) == 0 { + return fmt.Errorf("WAF Regional IP Set not found for name: %s", name) + } + if len(ipsets) > 1 { + return fmt.Errorf("Multiple WAF Regional IP Sets found for name: %s", name) + } + + ipset := ipsets[0] + d.SetId(aws.StringValue(ipset.IPSetId)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_rate_based_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_rate_based_rule.go new file mode 100644 index 00000000000..8cf1187a915 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_rate_based_rule.go @@ -0,0 +1,60 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsWafRegionalRateBasedRule() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsWafRegionalRateBasedRuleRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAwsWafRegionalRateBasedRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + name := d.Get("name").(string) + + rules := make([]*waf.RuleSummary, 0) + // ListRulesInput does not have a name parameter for filtering + input := &waf.ListRateBasedRulesInput{} + for { + output, err := conn.ListRateBasedRules(input) + if err != nil { + return fmt.Errorf("error reading WAF Rate Based Rules: %s", err) + } + for _, rule := range output.Rules { + if aws.StringValue(rule.Name) == name { + rules = append(rules, rule) + } + } + + if output.NextMarker == nil { + break + } + input.NextMarker = output.NextMarker + } + + if len(rules) == 0 { + return fmt.Errorf("WAF Rate Based Rules not found for name: %s", name) + } + + if len(rules) > 1 { + return fmt.Errorf("multiple WAF Rate Based Rules found for name: %s", name) + } + + rule := rules[0] + + d.SetId(aws.StringValue(rule.RuleId)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_rule.go new file mode 100644 index 00000000000..3daea121228 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_rule.go @@ -0,0 +1,61 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/waf" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsWafRegionalRule() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsWafRegionalRuleRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAwsWafRegionalRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + name := d.Get("name").(string) + + rules := make([]*waf.RuleSummary, 0) + // ListRulesInput does not have a name parameter for filtering + input := &waf.ListRulesInput{} + for { + output, err := conn.ListRules(input) + if err != nil { + return fmt.Errorf("error reading WAF Rule: %s", err) + } + for _, rule := range output.Rules { + if aws.StringValue(rule.Name) == name { + rules = append(rules, rule) + } + } + + if output.NextMarker == nil { + break + } + input.NextMarker = output.NextMarker + } + + if len(rules) == 0 { + return fmt.Errorf("WAF Rule not found for name: %s", name) + } + + if len(rules) > 1 { + return fmt.Errorf("multiple WAF Rules found for name: %s", name) + } + + rule := rules[0] + + d.SetId(aws.StringValue(rule.RuleId)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_web_acl.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_web_acl.go new file mode 100644 index 00000000000..1ef48de49b9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_wafregional_web_acl.go @@ -0,0 +1,60 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func dataSourceAwsWafRegionalWebAcl() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsWafRegionalWebAclRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAwsWafRegionalWebAclRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + name := d.Get("name").(string) + + acls := make([]*waf.WebACLSummary, 0) + // ListWebACLsInput does not have a name parameter for filtering + input := &waf.ListWebACLsInput{} + for { + output, err := conn.ListWebACLs(input) + if err != nil { + return fmt.Errorf("error reading web ACLs: %s", err) + } + for _, acl := range output.WebACLs { + if aws.StringValue(acl.Name) == name { + acls = append(acls, acl) + } + } + + if output.NextMarker == nil { + break + } + input.NextMarker = output.NextMarker + } + + if len(acls) == 0 { + return fmt.Errorf("web ACLs not found for name: %s", name) + } + + if len(acls) > 1 { + return fmt.Errorf("multiple web ACLs found for name: %s", name) + } + + acl := acls[0] + + d.SetId(aws.StringValue(acl.WebACLId)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_workspaces_bundle.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_workspaces_bundle.go index 0da1e52a1d8..1d1088db9e2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_workspaces_bundle.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_workspaces_bundle.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/workspaces" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSourceAwsWorkspaceBundle() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/datasync.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/datasync.go index 9626c4c9123..046765d60f8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/datasync.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/datasync.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dataSyncParseLocationURI(uri string) (string, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/diff_suppress_funcs.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/diff_suppress_funcs.go index 0e340e4351e..cae32e2a94a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/diff_suppress_funcs.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/diff_suppress_funcs.go @@ -7,7 +7,7 @@ import ( "net/url" "strings" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/jen20/awspolicyequivalence" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/dx_vif.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/dx_vif.go index ebf0a54e045..e475a533b63 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/dx_vif.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/dx_vif.go @@ -7,14 +7,14 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dxVirtualInterfaceRead(id string, conn *directconnect.DirectConnect) (*directconnect.VirtualInterface, error) { resp, state, err := dxVirtualInterfaceStateRefresh(conn, id)() if err != nil { - return nil, fmt.Errorf("Error reading Direct Connect virtual interface: %s", err) + return nil, fmt.Errorf("error reading Direct Connect virtual interface (%s): %s", id, err) } if state == directconnect.VirtualInterfaceStateDeleted { return nil, nil @@ -26,26 +26,21 @@ func dxVirtualInterfaceRead(id string, conn *directconnect.DirectConnect) (*dire func dxVirtualInterfaceUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).dxconn - req := &directconnect.UpdateVirtualInterfaceAttributesInput{ - VirtualInterfaceId: aws.String(d.Id()), - } - - requestUpdate := false if d.HasChange("mtu") { - req.Mtu = aws.Int64(int64(d.Get("mtu").(int))) - requestUpdate = true - } + req := &directconnect.UpdateVirtualInterfaceAttributesInput{ + Mtu: aws.Int64(int64(d.Get("mtu").(int))), + VirtualInterfaceId: aws.String(d.Id()), + } - if requestUpdate { - log.Printf("[DEBUG] Modifying Direct Connect virtual interface attributes: %#v", req) + log.Printf("[DEBUG] Modifying Direct Connect virtual interface attributes: %s", req) _, err := conn.UpdateVirtualInterfaceAttributes(req) if err != nil { - return fmt.Errorf("Error modifying Direct Connect virtual interface (%s) attributes, error: %s", d.Id(), err) + return fmt.Errorf("error modifying Direct Connect virtual interface (%s) attributes, error: %s", d.Id(), err) } } if err := setTagsDX(conn, d, d.Get("arn").(string)); err != nil { - return err + return fmt.Errorf("error setting Direct Connect virtual interface (%s) tags: %s", d.Id(), err) } return nil @@ -62,7 +57,7 @@ func dxVirtualInterfaceDelete(d *schema.ResourceData, meta interface{}) error { if isAWSErr(err, directconnect.ErrCodeClientException, "does not exist") { return nil } - return fmt.Errorf("Error deleting Direct Connect virtual interface: %s", err) + return fmt.Errorf("error deleting Direct Connect virtual interface (%s): %s", d.Id(), err) } deleteStateConf := &resource.StateChangeConf{ @@ -85,7 +80,7 @@ func dxVirtualInterfaceDelete(d *schema.ResourceData, meta interface{}) error { } _, err = deleteStateConf.WaitForState() if err != nil { - return fmt.Errorf("Error waiting for Direct Connect virtual interface (%s) to be deleted: %s", d.Id(), err) + return fmt.Errorf("error waiting for Direct Connect virtual interface (%s) to be deleted: %s", d.Id(), err) } return nil @@ -125,7 +120,7 @@ func dxVirtualInterfaceWaitUntilAvailable(conn *directconnect.DirectConnect, vif MinTimeout: 5 * time.Second, } if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Direct Connect virtual interface (%s) to become available: %s", vifId, err) + return fmt.Errorf("error waiting for Direct Connect virtual interface (%s) to become available: %s", vifId, err) } return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_filters.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_filters.go index 743d2822456..a5d8a994c4b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_filters.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_filters.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // buildEC2AttributeFilterList takes a flat map of scalar attributes (most diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_transit_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_transit_gateway.go index 89f4fc0ca66..740eccf1f8d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_transit_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_transit_gateway.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" ) func decodeEc2TransitGatewayRouteID(id string) (string, string, error) { @@ -541,7 +541,24 @@ func waitForEc2TransitGatewayRouteTableAssociationDeletion(conn *ec2.EC2, transi return err } -func waitForEc2TransitGatewayRouteTableAttachmentCreation(conn *ec2.EC2, transitGatewayAttachmentID string) error { +func waitForEc2TransitGatewayVpcAttachmentAcceptance(conn *ec2.EC2, transitGatewayAttachmentID string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + ec2.TransitGatewayAttachmentStatePending, + ec2.TransitGatewayAttachmentStatePendingAcceptance, + }, + Target: []string{ec2.TransitGatewayAttachmentStateAvailable}, + Refresh: ec2TransitGatewayVpcAttachmentRefreshFunc(conn, transitGatewayAttachmentID), + Timeout: 10 * time.Minute, + } + + log.Printf("[DEBUG] Waiting for EC2 Transit Gateway VPC Attachment (%s) availability", transitGatewayAttachmentID) + _, err := stateConf.WaitForState() + + return err +} + +func waitForEc2TransitGatewayVpcAttachmentCreation(conn *ec2.EC2, transitGatewayAttachmentID string) error { stateConf := &resource.StateChangeConf{ Pending: []string{ec2.TransitGatewayAttachmentStatePending}, Target: []string{ @@ -558,7 +575,7 @@ func waitForEc2TransitGatewayRouteTableAttachmentCreation(conn *ec2.EC2, transit return err } -func waitForEc2TransitGatewayRouteTableAttachmentDeletion(conn *ec2.EC2, transitGatewayAttachmentID string) error { +func waitForEc2TransitGatewayVpcAttachmentDeletion(conn *ec2.EC2, transitGatewayAttachmentID string) error { stateConf := &resource.StateChangeConf{ Pending: []string{ ec2.TransitGatewayAttachmentStateAvailable, @@ -580,7 +597,7 @@ func waitForEc2TransitGatewayRouteTableAttachmentDeletion(conn *ec2.EC2, transit return err } -func waitForEc2TransitGatewayRouteTableAttachmentUpdate(conn *ec2.EC2, transitGatewayAttachmentID string) error { +func waitForEc2TransitGatewayVpcAttachmentUpdate(conn *ec2.EC2, transitGatewayAttachmentID string) error { stateConf := &resource.StateChangeConf{ Pending: []string{ec2.TransitGatewayAttachmentStateModifying}, Target: []string{ec2.TransitGatewayAttachmentStateAvailable}, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ecs_task_definition_equivalency.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ecs_task_definition_equivalency.go index c67707dfcfe..01082fa7463 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ecs_task_definition_equivalency.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ecs_task_definition_equivalency.go @@ -78,7 +78,7 @@ func (cd containerDefinitions) Reduce(isAWSVPC bool) error { // Deal with fields which may be re-ordered in the API sort.Slice(def.Environment, func(i, j int) bool { - return *def.Environment[i].Name < *def.Environment[j].Name + return aws.StringValue(def.Environment[i].Name) < aws.StringValue(def.Environment[j].Name) }) // Create a mutable copy diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/fsx.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/fsx.go new file mode 100644 index 00000000000..020002f8575 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/fsx.go @@ -0,0 +1,77 @@ +package aws + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +func describeFsxFileSystem(conn *fsx.FSx, id string) (*fsx.FileSystem, error) { + input := &fsx.DescribeFileSystemsInput{ + FileSystemIds: []*string{aws.String(id)}, + } + var filesystem *fsx.FileSystem + + err := conn.DescribeFileSystemsPages(input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { + for _, fs := range page.FileSystems { + if aws.StringValue(fs.FileSystemId) == id { + filesystem = fs + return false + } + } + + return !lastPage + }) + + return filesystem, err +} + +func refreshFsxFileSystemLifecycle(conn *fsx.FSx, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + filesystem, err := describeFsxFileSystem(conn, id) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if filesystem == nil { + return nil, "", nil + } + + return filesystem, aws.StringValue(filesystem.Lifecycle), nil + } +} + +func waitForFsxFileSystemCreation(conn *fsx.FSx, id string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileSystemLifecycleCreating}, + Target: []string{fsx.FileSystemLifecycleAvailable}, + Refresh: refreshFsxFileSystemLifecycle(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} + +func waitForFsxFileSystemDeletion(conn *fsx.FSx, id string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileSystemLifecycleAvailable, fsx.FileSystemLifecycleDeleting}, + Target: []string{}, + Refresh: refreshFsxFileSystemLifecycle(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/hosted_zones.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/hosted_zones.go index 0db14a56f85..bb50d2381b8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/hosted_zones.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/hosted_zones.go @@ -5,6 +5,9 @@ import "fmt" // This list is copied from // http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints // It currently cannot be generated from the API json. +// See https://docs.amazonaws.cn/en_us/general/latest/gr/rande.html#s3_website_region_endpoints +// See https://docs.aws.amazon.com/pt_br/govcloud-us/latest/ug-east/using-govcloud-endpoints.html +// See https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/using-govcloud-endpoints.html var hostedZoneIDsMap = map[string]string{ "ap-east-1": "ZNB98KWMFR0R6", "ap-northeast-1": "Z2M4EHUR26P7ZW", @@ -19,6 +22,7 @@ var hostedZoneIDsMap = map[string]string{ "eu-west-1": "Z1BKCTXD74EZPE", "eu-west-2": "Z3GKZC51ZF0DB4", "eu-west-3": "Z3R1K369G5AVDG", + "me-south-1": "Z1MPMWCPA7YB62", "sa-east-1": "Z7KQH4QJS55SO", "us-east-1": "Z3AQBSTGFYJSTF", "us-east-2": "Z2O1EMRO9K5GLX", diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_cloudfront_distribution.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_cloudfront_distribution.go index 9558deb4e21..a2635d47bb1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_cloudfront_distribution.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_cloudfront_distribution.go @@ -3,7 +3,7 @@ package aws import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCloudFrontDistributionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_db_event_subscription.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_db_event_subscription.go index 40b1d26f0fb..bdc31bad533 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_db_event_subscription.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_db_event_subscription.go @@ -1,6 +1,6 @@ package aws -import "github.com/hashicorp/terraform/helper/schema" +import "github.com/hashicorp/terraform-plugin-sdk/helper/schema" func resourceAwsDbEventSubscriptionImport( d *schema.ResourceData, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_dx_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_dx_gateway.go deleted file mode 100644 index 067e704e208..00000000000 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_dx_gateway.go +++ /dev/null @@ -1,48 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" -) - -// Direct Connect Gateway import also imports all assocations -func resourceAwsDxGatewayImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - conn := meta.(*AWSClient).dxconn - - id := d.Id() - resp, err := conn.DescribeDirectConnectGateways(&directconnect.DescribeDirectConnectGatewaysInput{ - DirectConnectGatewayId: aws.String(id), - }) - if err != nil { - return nil, err - } - if len(resp.DirectConnectGateways) < 1 || resp.DirectConnectGateways[0] == nil { - return nil, fmt.Errorf("Direct Connect Gateway %s was not found", id) - } - results := make([]*schema.ResourceData, 1) - results[0] = d - - { - subResource := resourceAwsDxGatewayAssociation() - resp, err := conn.DescribeDirectConnectGatewayAssociations(&directconnect.DescribeDirectConnectGatewayAssociationsInput{ - DirectConnectGatewayId: aws.String(id), - }) - if err != nil { - return nil, err - } - - for _, assoc := range resp.DirectConnectGatewayAssociations { - d := subResource.Data(nil) - d.SetType("aws_dx_gateway_association") - d.Set("dx_gateway_id", assoc.DirectConnectGatewayId) - d.Set("vpn_gateway_id", assoc.VirtualGatewayId) - d.SetId(dxGatewayAssociationId(aws.StringValue(assoc.DirectConnectGatewayId), aws.StringValue(assoc.VirtualGatewayId))) - results = append(results, d) - } - } - - return results, nil -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_network_acl.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_network_acl.go index bcc221d0e18..be2c6c164bd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_network_acl.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_network_acl.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // Network ACLs import their rules and associations diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_s3_bucket.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_s3_bucket.go index 44262623ecd..b1702011218 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_s3_bucket.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_s3_bucket.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsS3BucketImportState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_security_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_security_group.go index bbae893d6bf..8226566da8f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_security_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_security_group.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // Security group import fans out to multiple resources due to the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/flatmap/flatten.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/flatmap/flatten.go new file mode 100644 index 00000000000..9ff6e426526 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/flatmap/flatten.go @@ -0,0 +1,71 @@ +package flatmap + +import ( + "fmt" + "reflect" +) + +// Flatten takes a structure and turns into a flat map[string]string. +// +// Within the "thing" parameter, only primitive values are allowed. Structs are +// not supported. Therefore, it can only be slices, maps, primitives, and +// any combination of those together. +// +// See the tests for examples of what inputs are turned into. +func Flatten(thing map[string]interface{}) Map { + result := make(map[string]string) + + for k, raw := range thing { + flatten(result, k, reflect.ValueOf(raw)) + } + + return Map(result) +} + +func flatten(result map[string]string, prefix string, v reflect.Value) { + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + result[prefix] = "true" + } else { + result[prefix] = "false" + } + case reflect.Int: + result[prefix] = fmt.Sprintf("%d", v.Int()) + case reflect.Map: + flattenMap(result, prefix, v) + case reflect.Slice: + flattenSlice(result, prefix, v) + case reflect.String: + result[prefix] = v.String() + default: + panic(fmt.Sprintf("Unknown: %s", v)) + } +} + +func flattenMap(result map[string]string, prefix string, v reflect.Value) { + for _, k := range v.MapKeys() { + if k.Kind() == reflect.Interface { + k = k.Elem() + } + + if k.Kind() != reflect.String { + panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) + } + + flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) + } +} + +func flattenSlice(result map[string]string, prefix string, v reflect.Value) { + prefix = prefix + "." + + result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) + for i := 0; i < v.Len(); i++ { + flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/flatmap/map.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/flatmap/map.go new file mode 100644 index 00000000000..435e04a39db --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/flatmap/map.go @@ -0,0 +1,82 @@ +package flatmap + +import ( + "strings" +) + +// Map is a wrapper around map[string]string that provides some helpers +// above it that assume the map is in the format that flatmap expects +// (the result of Flatten). +// +// All modifying functions such as Delete are done in-place unless +// otherwise noted. +type Map map[string]string + +// Contains returns true if the map contains the given key. +func (m Map) Contains(key string) bool { + for _, k := range m.Keys() { + if k == key { + return true + } + } + + return false +} + +// Delete deletes a key out of the map with the given prefix. +func (m Map) Delete(prefix string) { + for k := range m { + match := k == prefix + if !match { + if !strings.HasPrefix(k, prefix) { + continue + } + + if k[len(prefix):len(prefix)+1] != "." { + continue + } + } + + delete(m, k) + } +} + +// Keys returns all of the top-level keys in this map +func (m Map) Keys() []string { + ks := make(map[string]struct{}) + for k := range m { + idx := strings.Index(k, ".") + if idx == -1 { + idx = len(k) + } + + ks[k[:idx]] = struct{}{} + } + + result := make([]string, 0, len(ks)) + for k := range ks { + result = append(result, k) + } + + return result +} + +// Merge merges the contents of the other Map into this one. +// +// This merge is smarter than a simple map iteration because it +// will fully replace arrays and other complex structures that +// are present in this map with the other map's. For example, if +// this map has a 3 element "foo" list, and m2 has a 2 element "foo" +// list, then the result will be that m has a 2 element "foo" +// list. +func (m Map) Merge(m2 Map) { + for _, prefix := range m2.Keys() { + m.Delete(prefix) + + for k, v := range m2 { + if strings.HasPrefix(k, prefix) { + m[k] = v + } + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/key_value_tags.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/key_value_tags.go new file mode 100644 index 00000000000..63e9ad5fc1d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/key_value_tags.go @@ -0,0 +1,225 @@ +//go:generate go run generators/listtags/main.go +//go:generate go run generators/servicetags/main.go +//go:generate go run generators/updatetags/main.go + +package keyvaluetags + +import ( + "strings" +) + +const ( + AwsTagKeyPrefix = `aws:` + ElasticbeanstalkTagKeyPrefix = `elasticbeanstalk:` + NameTagKey = `Name` + RdsTagKeyPrefix = `rds:` +) + +// KeyValueTags is a standard implementation for AWS key-value resource tags. +// The AWS Go SDK is split into multiple service packages, each service with +// its own Go struct type representing a resource tag. To standardize logic +// across all these Go types, we convert them into this Go type. +type KeyValueTags map[string]*string + +// IgnoreAws returns non-AWS tag keys. +func (tags KeyValueTags) IgnoreAws() KeyValueTags { + result := make(KeyValueTags) + + for k, v := range tags { + if !strings.HasPrefix(k, AwsTagKeyPrefix) { + result[k] = v + } + } + + return result +} + +// IgnoreElasticbeanstalk returns non-AWS and non-Elasticbeanstalk tag keys. +func (tags KeyValueTags) IgnoreElasticbeanstalk() KeyValueTags { + result := make(KeyValueTags) + + for k, v := range tags { + if strings.HasPrefix(k, AwsTagKeyPrefix) { + continue + } + + if strings.HasPrefix(k, ElasticbeanstalkTagKeyPrefix) { + continue + } + + if k == NameTagKey { + continue + } + + result[k] = v + } + + return result +} + +// IgnorePrefixes returns non-matching tag key prefixes. +func (tags KeyValueTags) IgnorePrefixes(ignoreTagPrefixes KeyValueTags) KeyValueTags { + result := make(KeyValueTags) + + for k, v := range tags { + var ignore bool + + for ignoreTagPrefix := range ignoreTagPrefixes { + if strings.HasPrefix(k, ignoreTagPrefix) { + ignore = true + break + } + } + + if ignore { + continue + } + + result[k] = v + } + + return result +} + +// IgnoreRDS returns non-AWS and non-RDS tag keys. +func (tags KeyValueTags) IgnoreRds() KeyValueTags { + result := make(KeyValueTags) + + for k, v := range tags { + if strings.HasPrefix(k, AwsTagKeyPrefix) { + continue + } + + if strings.HasPrefix(k, RdsTagKeyPrefix) { + continue + } + + result[k] = v + } + + return result +} + +// Ignore returns non-matching tag keys. +func (tags KeyValueTags) Ignore(ignoreTags KeyValueTags) KeyValueTags { + result := make(KeyValueTags) + + for k, v := range tags { + if _, ok := ignoreTags[k]; ok { + continue + } + + result[k] = v + } + + return result +} + +// Keys returns tag keys. +func (tags KeyValueTags) Keys() []string { + result := make([]string, 0, len(tags)) + + for k := range tags { + result = append(result, k) + } + + return result +} + +// Map returns tag keys mapped to their values. +func (tags KeyValueTags) Map() map[string]string { + result := make(map[string]string, len(tags)) + + for k, v := range tags { + result[k] = *v + } + + return result +} + +// Merge adds missing and updates existing tags. +func (tags KeyValueTags) Merge(mergeTags KeyValueTags) KeyValueTags { + result := make(KeyValueTags) + + for k, v := range tags { + result[k] = v + } + + for k, v := range mergeTags { + result[k] = v + } + + return result +} + +// Removed returns tags removed. +func (tags KeyValueTags) Removed(newTags KeyValueTags) KeyValueTags { + result := make(KeyValueTags) + + for k, v := range tags { + if _, ok := newTags[k]; !ok { + result[k] = v + } + } + + return result +} + +// Updated returns tags added and updated. +func (tags KeyValueTags) Updated(newTags KeyValueTags) KeyValueTags { + result := make(KeyValueTags) + + for k, newV := range newTags { + if oldV, ok := tags[k]; !ok || *oldV != *newV { + result[k] = newV + } + } + + return result +} + +// New creates KeyValueTags from common Terraform Provider SDK types. +// Supports map[string]string, map[string]*string, map[string]interface{}, and []interface{}. +// When passed []interface{}, all elements are treated as keys and assigned nil values. +func New(i interface{}) KeyValueTags { + switch value := i.(type) { + case map[string]string: + kvtm := make(KeyValueTags, len(value)) + + for k, v := range value { + str := v // Prevent referencing issues + kvtm[k] = &str + } + + return kvtm + case map[string]*string: + return KeyValueTags(value) + case map[string]interface{}: + kvtm := make(KeyValueTags, len(value)) + + for k, v := range value { + str := v.(string) + kvtm[k] = &str + } + + return kvtm + case []string: + kvtm := make(KeyValueTags, len(value)) + + for _, v := range value { + kvtm[v] = nil + } + + return kvtm + case []interface{}: + kvtm := make(KeyValueTags, len(value)) + + for _, v := range value { + kvtm[v.(string)] = nil + } + + return kvtm + default: + return make(KeyValueTags) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/list_tags_gen.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/list_tags_gen.go new file mode 100644 index 00000000000..71899c8a4f7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/list_tags_gen.go @@ -0,0 +1,1286 @@ +// Code generated by generators/listtags/main.go; DO NOT EDIT. + +package keyvaluetags + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/acm" + "github.com/aws/aws-sdk-go/service/acmpca" + "github.com/aws/aws-sdk-go/service/amplify" + "github.com/aws/aws-sdk-go/service/appmesh" + "github.com/aws/aws-sdk-go/service/appstream" + "github.com/aws/aws-sdk-go/service/appsync" + "github.com/aws/aws-sdk-go/service/athena" + "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go/service/cloudhsmv2" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go/service/dax" + "github.com/aws/aws-sdk-go/service/devicefarm" + "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go/service/docdb" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go/service/firehose" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go/service/glue" + "github.com/aws/aws-sdk-go/service/guardduty" + "github.com/aws/aws-sdk-go/service/inspector" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go/service/iotanalytics" + "github.com/aws/aws-sdk-go/service/iotevents" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go/service/kinesisanalytics" + "github.com/aws/aws-sdk-go/service/kinesisanalyticsv2" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/aws/aws-sdk-go/service/licensemanager" + "github.com/aws/aws-sdk-go/service/mediaconnect" + "github.com/aws/aws-sdk-go/service/medialive" + "github.com/aws/aws-sdk-go/service/mediapackage" + "github.com/aws/aws-sdk-go/service/mediastore" + "github.com/aws/aws-sdk-go/service/mq" + "github.com/aws/aws-sdk-go/service/neptune" + "github.com/aws/aws-sdk-go/service/opsworks" + "github.com/aws/aws-sdk-go/service/organizations" + "github.com/aws/aws-sdk-go/service/qldb" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/service/resourcegroups" + "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go/service/sns" + "github.com/aws/aws-sdk-go/service/sqs" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/storagegateway" + "github.com/aws/aws-sdk-go/service/swf" + "github.com/aws/aws-sdk-go/service/transfer" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/aws/aws-sdk-go/service/wafregional" + "github.com/aws/aws-sdk-go/service/workspaces" +) + +// AcmListTags lists acm service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AcmListTags(conn *acm.ACM, identifier string) (KeyValueTags, error) { + input := &acm.ListTagsForCertificateInput{ + CertificateArn: aws.String(identifier), + } + + output, err := conn.ListTagsForCertificate(input) + + if err != nil { + return New(nil), err + } + + return AcmKeyValueTags(output.Tags), nil +} + +// AcmpcaListTags lists acmpca service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AcmpcaListTags(conn *acmpca.ACMPCA, identifier string) (KeyValueTags, error) { + input := &acmpca.ListTagsInput{ + CertificateAuthorityArn: aws.String(identifier), + } + + output, err := conn.ListTags(input) + + if err != nil { + return New(nil), err + } + + return AcmpcaKeyValueTags(output.Tags), nil +} + +// AmplifyListTags lists amplify service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AmplifyListTags(conn *amplify.Amplify, identifier string) (KeyValueTags, error) { + input := &lify.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return AmplifyKeyValueTags(output.Tags), nil +} + +// AppmeshListTags lists appmesh service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AppmeshListTags(conn *appmesh.AppMesh, identifier string) (KeyValueTags, error) { + input := &appmesh.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return AppmeshKeyValueTags(output.Tags), nil +} + +// AppstreamListTags lists appstream service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AppstreamListTags(conn *appstream.AppStream, identifier string) (KeyValueTags, error) { + input := &appstream.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return AppstreamKeyValueTags(output.Tags), nil +} + +// AppsyncListTags lists appsync service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AppsyncListTags(conn *appsync.AppSync, identifier string) (KeyValueTags, error) { + input := &appsync.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return AppsyncKeyValueTags(output.Tags), nil +} + +// AthenaListTags lists athena service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AthenaListTags(conn *athena.Athena, identifier string) (KeyValueTags, error) { + input := &athena.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return AthenaKeyValueTags(output.Tags), nil +} + +// BackupListTags lists backup service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func BackupListTags(conn *backup.Backup, identifier string) (KeyValueTags, error) { + input := &backup.ListTagsInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTags(input) + + if err != nil { + return New(nil), err + } + + return BackupKeyValueTags(output.Tags), nil +} + +// Cloudhsmv2ListTags lists cloudhsmv2 service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Cloudhsmv2ListTags(conn *cloudhsmv2.CloudHSMV2, identifier string) (KeyValueTags, error) { + input := &cloudhsmv2.ListTagsInput{ + ResourceId: aws.String(identifier), + } + + output, err := conn.ListTags(input) + + if err != nil { + return New(nil), err + } + + return Cloudhsmv2KeyValueTags(output.TagList), nil +} + +// CloudwatchListTags lists cloudwatch service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CloudwatchListTags(conn *cloudwatch.CloudWatch, identifier string) (KeyValueTags, error) { + input := &cloudwatch.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return CloudwatchKeyValueTags(output.Tags), nil +} + +// CloudwatcheventsListTags lists cloudwatchevents service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CloudwatcheventsListTags(conn *cloudwatchevents.CloudWatchEvents, identifier string) (KeyValueTags, error) { + input := &cloudwatchevents.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return CloudwatcheventsKeyValueTags(output.Tags), nil +} + +// CloudwatchlogsListTags lists cloudwatchlogs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CloudwatchlogsListTags(conn *cloudwatchlogs.CloudWatchLogs, identifier string) (KeyValueTags, error) { + input := &cloudwatchlogs.ListTagsLogGroupInput{ + LogGroupName: aws.String(identifier), + } + + output, err := conn.ListTagsLogGroup(input) + + if err != nil { + return New(nil), err + } + + return CloudwatchlogsKeyValueTags(output.Tags), nil +} + +// CodecommitListTags lists codecommit service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CodecommitListTags(conn *codecommit.CodeCommit, identifier string) (KeyValueTags, error) { + input := &codecommit.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return CodecommitKeyValueTags(output.Tags), nil +} + +// CodedeployListTags lists codedeploy service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CodedeployListTags(conn *codedeploy.CodeDeploy, identifier string) (KeyValueTags, error) { + input := &codedeploy.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return CodedeployKeyValueTags(output.Tags), nil +} + +// CodepipelineListTags lists codepipeline service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CodepipelineListTags(conn *codepipeline.CodePipeline, identifier string) (KeyValueTags, error) { + input := &codepipeline.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return CodepipelineKeyValueTags(output.Tags), nil +} + +// CognitoidentityListTags lists cognitoidentity service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CognitoidentityListTags(conn *cognitoidentity.CognitoIdentity, identifier string) (KeyValueTags, error) { + input := &cognitoidentity.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return CognitoidentityKeyValueTags(output.Tags), nil +} + +// CognitoidentityproviderListTags lists cognitoidentityprovider service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CognitoidentityproviderListTags(conn *cognitoidentityprovider.CognitoIdentityProvider, identifier string) (KeyValueTags, error) { + input := &cognitoidentityprovider.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return CognitoidentityproviderKeyValueTags(output.Tags), nil +} + +// ConfigserviceListTags lists configservice service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ConfigserviceListTags(conn *configservice.ConfigService, identifier string) (KeyValueTags, error) { + input := &configservice.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return ConfigserviceKeyValueTags(output.Tags), nil +} + +// DatabasemigrationserviceListTags lists databasemigrationservice service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DatabasemigrationserviceListTags(conn *databasemigrationservice.DatabaseMigrationService, identifier string) (KeyValueTags, error) { + input := &databasemigrationservice.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return DatabasemigrationserviceKeyValueTags(output.TagList), nil +} + +// DatasyncListTags lists datasync service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DatasyncListTags(conn *datasync.DataSync, identifier string) (KeyValueTags, error) { + input := &datasync.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return DatasyncKeyValueTags(output.Tags), nil +} + +// DaxListTags lists dax service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DaxListTags(conn *dax.DAX, identifier string) (KeyValueTags, error) { + input := &dax.ListTagsInput{ + ResourceName: aws.String(identifier), + } + + output, err := conn.ListTags(input) + + if err != nil { + return New(nil), err + } + + return DaxKeyValueTags(output.Tags), nil +} + +// DevicefarmListTags lists devicefarm service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DevicefarmListTags(conn *devicefarm.DeviceFarm, identifier string) (KeyValueTags, error) { + input := &devicefarm.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return DevicefarmKeyValueTags(output.Tags), nil +} + +// DirectoryserviceListTags lists directoryservice service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DirectoryserviceListTags(conn *directoryservice.DirectoryService, identifier string) (KeyValueTags, error) { + input := &directoryservice.ListTagsForResourceInput{ + ResourceId: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return DirectoryserviceKeyValueTags(output.Tags), nil +} + +// DocdbListTags lists docdb service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DocdbListTags(conn *docdb.DocDB, identifier string) (KeyValueTags, error) { + input := &docdb.ListTagsForResourceInput{ + ResourceName: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return DocdbKeyValueTags(output.TagList), nil +} + +// DynamodbListTags lists dynamodb service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DynamodbListTags(conn *dynamodb.DynamoDB, identifier string) (KeyValueTags, error) { + input := &dynamodb.ListTagsOfResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsOfResource(input) + + if err != nil { + return New(nil), err + } + + return DynamodbKeyValueTags(output.Tags), nil +} + +// EcrListTags lists ecr service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func EcrListTags(conn *ecr.ECR, identifier string) (KeyValueTags, error) { + input := &ecr.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return EcrKeyValueTags(output.Tags), nil +} + +// EcsListTags lists ecs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func EcsListTags(conn *ecs.ECS, identifier string) (KeyValueTags, error) { + input := &ecs.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return EcsKeyValueTags(output.Tags), nil +} + +// EfsListTags lists efs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func EfsListTags(conn *efs.EFS, identifier string) (KeyValueTags, error) { + input := &efs.DescribeTagsInput{ + FileSystemId: aws.String(identifier), + } + + output, err := conn.DescribeTags(input) + + if err != nil { + return New(nil), err + } + + return EfsKeyValueTags(output.Tags), nil +} + +// EksListTags lists eks service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func EksListTags(conn *eks.EKS, identifier string) (KeyValueTags, error) { + input := &eks.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return EksKeyValueTags(output.Tags), nil +} + +// ElasticacheListTags lists elasticache service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ElasticacheListTags(conn *elasticache.ElastiCache, identifier string) (KeyValueTags, error) { + input := &elasticache.ListTagsForResourceInput{ + ResourceName: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return ElasticacheKeyValueTags(output.TagList), nil +} + +// ElasticbeanstalkListTags lists elasticbeanstalk service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ElasticbeanstalkListTags(conn *elasticbeanstalk.ElasticBeanstalk, identifier string) (KeyValueTags, error) { + input := &elasticbeanstalk.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return ElasticbeanstalkKeyValueTags(output.ResourceTags), nil +} + +// ElasticsearchserviceListTags lists elasticsearchservice service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ElasticsearchserviceListTags(conn *elasticsearchservice.ElasticsearchService, identifier string) (KeyValueTags, error) { + input := &elasticsearchservice.ListTagsInput{ + ARN: aws.String(identifier), + } + + output, err := conn.ListTags(input) + + if err != nil { + return New(nil), err + } + + return ElasticsearchserviceKeyValueTags(output.TagList), nil +} + +// Elbv2ListTags lists elbv2 service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Elbv2ListTags(conn *elbv2.ELBV2, identifier string) (KeyValueTags, error) { + input := &elbv2.DescribeTagsInput{ + ResourceArns: aws.StringSlice([]string{identifier}), + } + + output, err := conn.DescribeTags(input) + + if err != nil { + return New(nil), err + } + + return Elbv2KeyValueTags(output.TagDescriptions[0].Tags), nil +} + +// FirehoseListTags lists firehose service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func FirehoseListTags(conn *firehose.Firehose, identifier string) (KeyValueTags, error) { + input := &firehose.ListTagsForDeliveryStreamInput{ + DeliveryStreamName: aws.String(identifier), + } + + output, err := conn.ListTagsForDeliveryStream(input) + + if err != nil { + return New(nil), err + } + + return FirehoseKeyValueTags(output.Tags), nil +} + +// FsxListTags lists fsx service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func FsxListTags(conn *fsx.FSx, identifier string) (KeyValueTags, error) { + input := &fsx.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return FsxKeyValueTags(output.Tags), nil +} + +// GlueListTags lists glue service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func GlueListTags(conn *glue.Glue, identifier string) (KeyValueTags, error) { + input := &glue.GetTagsInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.GetTags(input) + + if err != nil { + return New(nil), err + } + + return GlueKeyValueTags(output.Tags), nil +} + +// GuarddutyListTags lists guardduty service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func GuarddutyListTags(conn *guardduty.GuardDuty, identifier string) (KeyValueTags, error) { + input := &guardduty.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return GuarddutyKeyValueTags(output.Tags), nil +} + +// InspectorListTags lists inspector service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func InspectorListTags(conn *inspector.Inspector, identifier string) (KeyValueTags, error) { + input := &inspector.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return InspectorKeyValueTags(output.Tags), nil +} + +// IotListTags lists iot service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func IotListTags(conn *iot.IoT, identifier string) (KeyValueTags, error) { + input := &iot.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return IotKeyValueTags(output.Tags), nil +} + +// IotanalyticsListTags lists iotanalytics service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func IotanalyticsListTags(conn *iotanalytics.IoTAnalytics, identifier string) (KeyValueTags, error) { + input := &iotanalytics.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return IotanalyticsKeyValueTags(output.Tags), nil +} + +// IoteventsListTags lists iotevents service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func IoteventsListTags(conn *iotevents.IoTEvents, identifier string) (KeyValueTags, error) { + input := &iotevents.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return IoteventsKeyValueTags(output.Tags), nil +} + +// KafkaListTags lists kafka service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func KafkaListTags(conn *kafka.Kafka, identifier string) (KeyValueTags, error) { + input := &kafka.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return KafkaKeyValueTags(output.Tags), nil +} + +// KinesisanalyticsListTags lists kinesisanalytics service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func KinesisanalyticsListTags(conn *kinesisanalytics.KinesisAnalytics, identifier string) (KeyValueTags, error) { + input := &kinesisanalytics.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return KinesisanalyticsKeyValueTags(output.Tags), nil +} + +// Kinesisanalyticsv2ListTags lists kinesisanalyticsv2 service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Kinesisanalyticsv2ListTags(conn *kinesisanalyticsv2.KinesisAnalyticsV2, identifier string) (KeyValueTags, error) { + input := &kinesisanalyticsv2.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return Kinesisanalyticsv2KeyValueTags(output.Tags), nil +} + +// KmsListTags lists kms service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func KmsListTags(conn *kms.KMS, identifier string) (KeyValueTags, error) { + input := &kms.ListResourceTagsInput{ + KeyId: aws.String(identifier), + } + + output, err := conn.ListResourceTags(input) + + if err != nil { + return New(nil), err + } + + return KmsKeyValueTags(output.Tags), nil +} + +// LambdaListTags lists lambda service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func LambdaListTags(conn *lambda.Lambda, identifier string) (KeyValueTags, error) { + input := &lambda.ListTagsInput{ + Resource: aws.String(identifier), + } + + output, err := conn.ListTags(input) + + if err != nil { + return New(nil), err + } + + return LambdaKeyValueTags(output.Tags), nil +} + +// LicensemanagerListTags lists licensemanager service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func LicensemanagerListTags(conn *licensemanager.LicenseManager, identifier string) (KeyValueTags, error) { + input := &licensemanager.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return LicensemanagerKeyValueTags(output.Tags), nil +} + +// MediaconnectListTags lists mediaconnect service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MediaconnectListTags(conn *mediaconnect.MediaConnect, identifier string) (KeyValueTags, error) { + input := &mediaconnect.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return MediaconnectKeyValueTags(output.Tags), nil +} + +// MedialiveListTags lists medialive service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MedialiveListTags(conn *medialive.MediaLive, identifier string) (KeyValueTags, error) { + input := &medialive.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return MedialiveKeyValueTags(output.Tags), nil +} + +// MediapackageListTags lists mediapackage service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MediapackageListTags(conn *mediapackage.MediaPackage, identifier string) (KeyValueTags, error) { + input := &mediapackage.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return MediapackageKeyValueTags(output.Tags), nil +} + +// MediastoreListTags lists mediastore service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MediastoreListTags(conn *mediastore.MediaStore, identifier string) (KeyValueTags, error) { + input := &mediastore.ListTagsForResourceInput{ + Resource: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return MediastoreKeyValueTags(output.Tags), nil +} + +// MqListTags lists mq service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MqListTags(conn *mq.MQ, identifier string) (KeyValueTags, error) { + input := &mq.ListTagsInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTags(input) + + if err != nil { + return New(nil), err + } + + return MqKeyValueTags(output.Tags), nil +} + +// NeptuneListTags lists neptune service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func NeptuneListTags(conn *neptune.Neptune, identifier string) (KeyValueTags, error) { + input := &neptune.ListTagsForResourceInput{ + ResourceName: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return NeptuneKeyValueTags(output.TagList), nil +} + +// OpsworksListTags lists opsworks service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func OpsworksListTags(conn *opsworks.OpsWorks, identifier string) (KeyValueTags, error) { + input := &opsworks.ListTagsInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTags(input) + + if err != nil { + return New(nil), err + } + + return OpsworksKeyValueTags(output.Tags), nil +} + +// OrganizationsListTags lists organizations service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func OrganizationsListTags(conn *organizations.Organizations, identifier string) (KeyValueTags, error) { + input := &organizations.ListTagsForResourceInput{ + ResourceId: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return OrganizationsKeyValueTags(output.Tags), nil +} + +// QldbListTags lists qldb service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func QldbListTags(conn *qldb.QLDB, identifier string) (KeyValueTags, error) { + input := &qldb.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return QldbKeyValueTags(output.Tags), nil +} + +// RdsListTags lists rds service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func RdsListTags(conn *rds.RDS, identifier string) (KeyValueTags, error) { + input := &rds.ListTagsForResourceInput{ + ResourceName: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return RdsKeyValueTags(output.TagList), nil +} + +// ResourcegroupsListTags lists resourcegroups service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ResourcegroupsListTags(conn *resourcegroups.ResourceGroups, identifier string) (KeyValueTags, error) { + input := &resourcegroups.GetTagsInput{ + Arn: aws.String(identifier), + } + + output, err := conn.GetTags(input) + + if err != nil { + return New(nil), err + } + + return ResourcegroupsKeyValueTags(output.Tags), nil +} + +// Route53resolverListTags lists route53resolver service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Route53resolverListTags(conn *route53resolver.Route53Resolver, identifier string) (KeyValueTags, error) { + input := &route53resolver.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return Route53resolverKeyValueTags(output.Tags), nil +} + +// SagemakerListTags lists sagemaker service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SagemakerListTags(conn *sagemaker.SageMaker, identifier string) (KeyValueTags, error) { + input := &sagemaker.ListTagsInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTags(input) + + if err != nil { + return New(nil), err + } + + return SagemakerKeyValueTags(output.Tags), nil +} + +// SecurityhubListTags lists securityhub service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SecurityhubListTags(conn *securityhub.SecurityHub, identifier string) (KeyValueTags, error) { + input := &securityhub.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return SecurityhubKeyValueTags(output.Tags), nil +} + +// SfnListTags lists sfn service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SfnListTags(conn *sfn.SFN, identifier string) (KeyValueTags, error) { + input := &sfn.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return SfnKeyValueTags(output.Tags), nil +} + +// SnsListTags lists sns service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SnsListTags(conn *sns.SNS, identifier string) (KeyValueTags, error) { + input := &sns.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return SnsKeyValueTags(output.Tags), nil +} + +// SqsListTags lists sqs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SqsListTags(conn *sqs.SQS, identifier string) (KeyValueTags, error) { + input := &sqs.ListQueueTagsInput{ + QueueUrl: aws.String(identifier), + } + + output, err := conn.ListQueueTags(input) + + if err != nil { + return New(nil), err + } + + return SqsKeyValueTags(output.Tags), nil +} + +// SsmListTags lists ssm service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SsmListTags(conn *ssm.SSM, identifier string, resourceType string) (KeyValueTags, error) { + input := &ssm.ListTagsForResourceInput{ + ResourceId: aws.String(identifier), + ResourceType: aws.String(resourceType), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return SsmKeyValueTags(output.TagList), nil +} + +// StoragegatewayListTags lists storagegateway service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func StoragegatewayListTags(conn *storagegateway.StorageGateway, identifier string) (KeyValueTags, error) { + input := &storagegateway.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return StoragegatewayKeyValueTags(output.Tags), nil +} + +// SwfListTags lists swf service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SwfListTags(conn *swf.SWF, identifier string) (KeyValueTags, error) { + input := &swf.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return SwfKeyValueTags(output.Tags), nil +} + +// TransferListTags lists transfer service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func TransferListTags(conn *transfer.Transfer, identifier string) (KeyValueTags, error) { + input := &transfer.ListTagsForResourceInput{ + Arn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return TransferKeyValueTags(output.Tags), nil +} + +// WafListTags lists waf service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func WafListTags(conn *waf.WAF, identifier string) (KeyValueTags, error) { + input := &waf.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return WafKeyValueTags(output.TagInfoForResource.TagList), nil +} + +// WafregionalListTags lists wafregional service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func WafregionalListTags(conn *wafregional.WAFRegional, identifier string) (KeyValueTags, error) { + input := &waf.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return WafregionalKeyValueTags(output.TagInfoForResource.TagList), nil +} + +// WorkspacesListTags lists workspaces service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func WorkspacesListTags(conn *workspaces.WorkSpaces, identifier string) (KeyValueTags, error) { + input := &workspaces.DescribeTagsInput{ + ResourceId: aws.String(identifier), + } + + output, err := conn.DescribeTags(input) + + if err != nil { + return New(nil), err + } + + return WorkspacesKeyValueTags(output.TagList), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/service_generation_customizations.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/service_generation_customizations.go new file mode 100644 index 00000000000..c2851e2a12e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/service_generation_customizations.go @@ -0,0 +1,284 @@ +// This file contains code generation customizations for each AWS Go SDK service. + +package keyvaluetags + +import ( + "fmt" + "reflect" + + "github.com/aws/aws-sdk-go/service/acm" + "github.com/aws/aws-sdk-go/service/acmpca" + "github.com/aws/aws-sdk-go/service/amplify" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/aws/aws-sdk-go/service/apigatewayv2" + "github.com/aws/aws-sdk-go/service/appmesh" + "github.com/aws/aws-sdk-go/service/appstream" + "github.com/aws/aws-sdk-go/service/appsync" + "github.com/aws/aws-sdk-go/service/athena" + "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/aws/aws-sdk-go/service/cloudhsmv2" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go/service/datapipeline" + "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go/service/dax" + "github.com/aws/aws-sdk-go/service/devicefarm" + "github.com/aws/aws-sdk-go/service/directconnect" + "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go/service/docdb" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/aws/aws-sdk-go/service/firehose" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go/service/glue" + "github.com/aws/aws-sdk-go/service/guardduty" + "github.com/aws/aws-sdk-go/service/inspector" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go/service/iotanalytics" + "github.com/aws/aws-sdk-go/service/iotevents" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go/service/kinesisanalytics" + "github.com/aws/aws-sdk-go/service/kinesisanalyticsv2" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/aws/aws-sdk-go/service/licensemanager" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/aws/aws-sdk-go/service/mediaconnect" + "github.com/aws/aws-sdk-go/service/mediaconvert" + "github.com/aws/aws-sdk-go/service/medialive" + "github.com/aws/aws-sdk-go/service/mediapackage" + "github.com/aws/aws-sdk-go/service/mediastore" + "github.com/aws/aws-sdk-go/service/mq" + "github.com/aws/aws-sdk-go/service/neptune" + "github.com/aws/aws-sdk-go/service/opsworks" + "github.com/aws/aws-sdk-go/service/organizations" + "github.com/aws/aws-sdk-go/service/pinpoint" + "github.com/aws/aws-sdk-go/service/qldb" + "github.com/aws/aws-sdk-go/service/ram" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/aws/aws-sdk-go/service/resourcegroups" + "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go/service/sns" + "github.com/aws/aws-sdk-go/service/sqs" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/storagegateway" + "github.com/aws/aws-sdk-go/service/swf" + "github.com/aws/aws-sdk-go/service/transfer" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/aws/aws-sdk-go/service/wafregional" + "github.com/aws/aws-sdk-go/service/workspaces" +) + +// ServiceClientType determines the service client Go type. +// The AWS Go SDK does not provide a constant or reproducible inference methodology +// to get the correct type name of each service, so we resort to reflection for now. +func ServiceClientType(serviceName string) string { + var funcType reflect.Type + + switch serviceName { + case "acm": + funcType = reflect.TypeOf(acm.New) + case "acmpca": + funcType = reflect.TypeOf(acmpca.New) + case "amplify": + funcType = reflect.TypeOf(amplify.New) + case "apigateway": + funcType = reflect.TypeOf(apigateway.New) + case "apigatewayv2": + funcType = reflect.TypeOf(apigatewayv2.New) + case "appmesh": + funcType = reflect.TypeOf(appmesh.New) + case "appstream": + funcType = reflect.TypeOf(appstream.New) + case "appsync": + funcType = reflect.TypeOf(appsync.New) + case "athena": + funcType = reflect.TypeOf(athena.New) + case "backup": + funcType = reflect.TypeOf(backup.New) + case "cloudfront": + funcType = reflect.TypeOf(cloudfront.New) + case "cloudhsmv2": + funcType = reflect.TypeOf(cloudhsmv2.New) + case "cloudwatch": + funcType = reflect.TypeOf(cloudwatch.New) + case "cloudwatchevents": + funcType = reflect.TypeOf(cloudwatchevents.New) + case "cloudwatchlogs": + funcType = reflect.TypeOf(cloudwatchlogs.New) + case "codecommit": + funcType = reflect.TypeOf(codecommit.New) + case "codedeploy": + funcType = reflect.TypeOf(codedeploy.New) + case "codepipeline": + funcType = reflect.TypeOf(codepipeline.New) + case "cognitoidentity": + funcType = reflect.TypeOf(cognitoidentity.New) + case "cognitoidentityprovider": + funcType = reflect.TypeOf(cognitoidentityprovider.New) + case "configservice": + funcType = reflect.TypeOf(configservice.New) + case "databasemigrationservice": + funcType = reflect.TypeOf(databasemigrationservice.New) + case "datapipeline": + funcType = reflect.TypeOf(datapipeline.New) + case "datasync": + funcType = reflect.TypeOf(datasync.New) + case "dax": + funcType = reflect.TypeOf(dax.New) + case "devicefarm": + funcType = reflect.TypeOf(devicefarm.New) + case "directconnect": + funcType = reflect.TypeOf(directconnect.New) + case "directoryservice": + funcType = reflect.TypeOf(directoryservice.New) + case "docdb": + funcType = reflect.TypeOf(docdb.New) + case "dynamodb": + funcType = reflect.TypeOf(dynamodb.New) + case "ec2": + funcType = reflect.TypeOf(ec2.New) + case "ecr": + funcType = reflect.TypeOf(ecr.New) + case "ecs": + funcType = reflect.TypeOf(ecs.New) + case "efs": + funcType = reflect.TypeOf(efs.New) + case "eks": + funcType = reflect.TypeOf(eks.New) + case "elasticache": + funcType = reflect.TypeOf(elasticache.New) + case "elasticbeanstalk": + funcType = reflect.TypeOf(elasticbeanstalk.New) + case "elasticsearchservice": + funcType = reflect.TypeOf(elasticsearchservice.New) + case "elbv2": + funcType = reflect.TypeOf(elbv2.New) + case "emr": + funcType = reflect.TypeOf(emr.New) + case "firehose": + funcType = reflect.TypeOf(firehose.New) + case "fsx": + funcType = reflect.TypeOf(fsx.New) + case "glue": + funcType = reflect.TypeOf(glue.New) + case "guardduty": + funcType = reflect.TypeOf(guardduty.New) + case "inspector": + funcType = reflect.TypeOf(inspector.New) + case "iot": + funcType = reflect.TypeOf(iot.New) + case "iotanalytics": + funcType = reflect.TypeOf(iotanalytics.New) + case "iotevents": + funcType = reflect.TypeOf(iotevents.New) + case "kafka": + funcType = reflect.TypeOf(kafka.New) + case "kinesisanalytics": + funcType = reflect.TypeOf(kinesisanalytics.New) + case "kinesisanalyticsv2": + funcType = reflect.TypeOf(kinesisanalyticsv2.New) + case "kms": + funcType = reflect.TypeOf(kms.New) + case "lambda": + funcType = reflect.TypeOf(lambda.New) + case "licensemanager": + funcType = reflect.TypeOf(licensemanager.New) + case "lightsail": + funcType = reflect.TypeOf(lightsail.New) + case "mediaconnect": + funcType = reflect.TypeOf(mediaconnect.New) + case "mediaconvert": + funcType = reflect.TypeOf(mediaconvert.New) + case "medialive": + funcType = reflect.TypeOf(medialive.New) + case "mediapackage": + funcType = reflect.TypeOf(mediapackage.New) + case "mediastore": + funcType = reflect.TypeOf(mediastore.New) + case "mq": + funcType = reflect.TypeOf(mq.New) + case "neptune": + funcType = reflect.TypeOf(neptune.New) + case "opsworks": + funcType = reflect.TypeOf(opsworks.New) + case "organizations": + funcType = reflect.TypeOf(organizations.New) + case "pinpoint": + funcType = reflect.TypeOf(pinpoint.New) + case "qldb": + funcType = reflect.TypeOf(qldb.New) + case "ram": + funcType = reflect.TypeOf(ram.New) + case "rds": + funcType = reflect.TypeOf(rds.New) + case "redshift": + funcType = reflect.TypeOf(redshift.New) + case "resourcegroups": + funcType = reflect.TypeOf(resourcegroups.New) + case "route53resolver": + funcType = reflect.TypeOf(route53resolver.New) + case "sagemaker": + funcType = reflect.TypeOf(sagemaker.New) + case "secretsmanager": + funcType = reflect.TypeOf(secretsmanager.New) + case "securityhub": + funcType = reflect.TypeOf(securityhub.New) + case "sfn": + funcType = reflect.TypeOf(sfn.New) + case "sns": + funcType = reflect.TypeOf(sns.New) + case "sqs": + funcType = reflect.TypeOf(sqs.New) + case "ssm": + funcType = reflect.TypeOf(ssm.New) + case "storagegateway": + funcType = reflect.TypeOf(storagegateway.New) + case "swf": + funcType = reflect.TypeOf(swf.New) + case "transfer": + funcType = reflect.TypeOf(transfer.New) + case "waf": + funcType = reflect.TypeOf(waf.New) + case "wafregional": + funcType = reflect.TypeOf(wafregional.New) + case "workspaces": + funcType = reflect.TypeOf(workspaces.New) + default: + panic(fmt.Sprintf("unrecognized ServiceClientType: %s", serviceName)) + } + + return funcType.Out(0).String() +} + +func ServiceTagPackage(serviceName string) string { + switch serviceName { + case "wafregional": + return "waf" + default: + return serviceName + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/service_tags_gen.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/service_tags_gen.go new file mode 100644 index 00000000000..2a31af4a643 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/service_tags_gen.go @@ -0,0 +1,2250 @@ +// Code generated by generators/servicetags/main.go; DO NOT EDIT. + +package keyvaluetags + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/acm" + "github.com/aws/aws-sdk-go/service/acmpca" + "github.com/aws/aws-sdk-go/service/appmesh" + "github.com/aws/aws-sdk-go/service/athena" + "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/aws/aws-sdk-go/service/cloudhsmv2" + "github.com/aws/aws-sdk-go/service/cloudtrail" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/aws/aws-sdk-go/service/codebuild" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go/service/datapipeline" + "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go/service/dax" + "github.com/aws/aws-sdk-go/service/devicefarm" + "github.com/aws/aws-sdk-go/service/directconnect" + "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go/service/dlm" + "github.com/aws/aws-sdk-go/service/docdb" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/aws/aws-sdk-go/service/firehose" + "github.com/aws/aws-sdk-go/service/fms" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/inspector" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go/service/iotanalytics" + "github.com/aws/aws-sdk-go/service/iotevents" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/kinesisanalytics" + "github.com/aws/aws-sdk-go/service/kinesisanalyticsv2" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go/service/licensemanager" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/aws/aws-sdk-go/service/mediastore" + "github.com/aws/aws-sdk-go/service/neptune" + "github.com/aws/aws-sdk-go/service/organizations" + "github.com/aws/aws-sdk-go/service/ram" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" + "github.com/aws/aws-sdk-go/service/servicecatalog" + "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go/service/sns" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/storagegateway" + "github.com/aws/aws-sdk-go/service/swf" + "github.com/aws/aws-sdk-go/service/transfer" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/aws/aws-sdk-go/service/workspaces" +) + +// map[string]*string handling + +// AmplifyTags returns amplify service tags. +func (tags KeyValueTags) AmplifyTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// AmplifyKeyValueTags creates KeyValueTags from amplify service tags. +func AmplifyKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// ApigatewayTags returns apigateway service tags. +func (tags KeyValueTags) ApigatewayTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// ApigatewayKeyValueTags creates KeyValueTags from apigateway service tags. +func ApigatewayKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// Apigatewayv2Tags returns apigatewayv2 service tags. +func (tags KeyValueTags) Apigatewayv2Tags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// Apigatewayv2KeyValueTags creates KeyValueTags from apigatewayv2 service tags. +func Apigatewayv2KeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// AppstreamTags returns appstream service tags. +func (tags KeyValueTags) AppstreamTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// AppstreamKeyValueTags creates KeyValueTags from appstream service tags. +func AppstreamKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// AppsyncTags returns appsync service tags. +func (tags KeyValueTags) AppsyncTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// AppsyncKeyValueTags creates KeyValueTags from appsync service tags. +func AppsyncKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// BackupTags returns backup service tags. +func (tags KeyValueTags) BackupTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// BackupKeyValueTags creates KeyValueTags from backup service tags. +func BackupKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// BatchTags returns batch service tags. +func (tags KeyValueTags) BatchTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// BatchKeyValueTags creates KeyValueTags from batch service tags. +func BatchKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// CloudwatchlogsTags returns cloudwatchlogs service tags. +func (tags KeyValueTags) CloudwatchlogsTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// CloudwatchlogsKeyValueTags creates KeyValueTags from cloudwatchlogs service tags. +func CloudwatchlogsKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// CodecommitTags returns codecommit service tags. +func (tags KeyValueTags) CodecommitTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// CodecommitKeyValueTags creates KeyValueTags from codecommit service tags. +func CodecommitKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// CognitoidentityTags returns cognitoidentity service tags. +func (tags KeyValueTags) CognitoidentityTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// CognitoidentityKeyValueTags creates KeyValueTags from cognitoidentity service tags. +func CognitoidentityKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// CognitoidentityproviderTags returns cognitoidentityprovider service tags. +func (tags KeyValueTags) CognitoidentityproviderTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// CognitoidentityproviderKeyValueTags creates KeyValueTags from cognitoidentityprovider service tags. +func CognitoidentityproviderKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// EksTags returns eks service tags. +func (tags KeyValueTags) EksTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// EksKeyValueTags creates KeyValueTags from eks service tags. +func EksKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// GlacierTags returns glacier service tags. +func (tags KeyValueTags) GlacierTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// GlacierKeyValueTags creates KeyValueTags from glacier service tags. +func GlacierKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// GlueTags returns glue service tags. +func (tags KeyValueTags) GlueTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// GlueKeyValueTags creates KeyValueTags from glue service tags. +func GlueKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// GuarddutyTags returns guardduty service tags. +func (tags KeyValueTags) GuarddutyTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// GuarddutyKeyValueTags creates KeyValueTags from guardduty service tags. +func GuarddutyKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// KafkaTags returns kafka service tags. +func (tags KeyValueTags) KafkaTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// KafkaKeyValueTags creates KeyValueTags from kafka service tags. +func KafkaKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// LambdaTags returns lambda service tags. +func (tags KeyValueTags) LambdaTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// LambdaKeyValueTags creates KeyValueTags from lambda service tags. +func LambdaKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// MediaconnectTags returns mediaconnect service tags. +func (tags KeyValueTags) MediaconnectTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// MediaconnectKeyValueTags creates KeyValueTags from mediaconnect service tags. +func MediaconnectKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// MediaconvertTags returns mediaconvert service tags. +func (tags KeyValueTags) MediaconvertTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// MediaconvertKeyValueTags creates KeyValueTags from mediaconvert service tags. +func MediaconvertKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// MedialiveTags returns medialive service tags. +func (tags KeyValueTags) MedialiveTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// MedialiveKeyValueTags creates KeyValueTags from medialive service tags. +func MedialiveKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// MediapackageTags returns mediapackage service tags. +func (tags KeyValueTags) MediapackageTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// MediapackageKeyValueTags creates KeyValueTags from mediapackage service tags. +func MediapackageKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// MqTags returns mq service tags. +func (tags KeyValueTags) MqTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// MqKeyValueTags creates KeyValueTags from mq service tags. +func MqKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// OpsworksTags returns opsworks service tags. +func (tags KeyValueTags) OpsworksTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// OpsworksKeyValueTags creates KeyValueTags from opsworks service tags. +func OpsworksKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// PinpointTags returns pinpoint service tags. +func (tags KeyValueTags) PinpointTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// PinpointKeyValueTags creates KeyValueTags from pinpoint service tags. +func PinpointKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// QldbTags returns qldb service tags. +func (tags KeyValueTags) QldbTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// QldbKeyValueTags creates KeyValueTags from qldb service tags. +func QldbKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// ResourcegroupsTags returns resourcegroups service tags. +func (tags KeyValueTags) ResourcegroupsTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// ResourcegroupsKeyValueTags creates KeyValueTags from resourcegroups service tags. +func ResourcegroupsKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// SecurityhubTags returns securityhub service tags. +func (tags KeyValueTags) SecurityhubTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// SecurityhubKeyValueTags creates KeyValueTags from securityhub service tags. +func SecurityhubKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// SqsTags returns sqs service tags. +func (tags KeyValueTags) SqsTags() map[string]*string { + return aws.StringMap(tags.Map()) +} + +// SqsKeyValueTags creates KeyValueTags from sqs service tags. +func SqsKeyValueTags(tags map[string]*string) KeyValueTags { + return New(tags) +} + +// []*SERVICE.Tag handling + +// AcmTags returns acm service tags. +func (tags KeyValueTags) AcmTags() []*acm.Tag { + result := make([]*acm.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &acm.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// AcmKeyValueTags creates KeyValueTags from acm service tags. +func AcmKeyValueTags(tags []*acm.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// AcmpcaTags returns acmpca service tags. +func (tags KeyValueTags) AcmpcaTags() []*acmpca.Tag { + result := make([]*acmpca.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &acmpca.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// AcmpcaKeyValueTags creates KeyValueTags from acmpca service tags. +func AcmpcaKeyValueTags(tags []*acmpca.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// AppmeshTags returns appmesh service tags. +func (tags KeyValueTags) AppmeshTags() []*appmesh.TagRef { + result := make([]*appmesh.TagRef, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &appmesh.TagRef{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// AppmeshKeyValueTags creates KeyValueTags from appmesh service tags. +func AppmeshKeyValueTags(tags []*appmesh.TagRef) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// AthenaTags returns athena service tags. +func (tags KeyValueTags) AthenaTags() []*athena.Tag { + result := make([]*athena.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &athena.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// AthenaKeyValueTags creates KeyValueTags from athena service tags. +func AthenaKeyValueTags(tags []*athena.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// CloudformationTags returns cloudformation service tags. +func (tags KeyValueTags) CloudformationTags() []*cloudformation.Tag { + result := make([]*cloudformation.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &cloudformation.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// CloudformationKeyValueTags creates KeyValueTags from cloudformation service tags. +func CloudformationKeyValueTags(tags []*cloudformation.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// CloudfrontTags returns cloudfront service tags. +func (tags KeyValueTags) CloudfrontTags() []*cloudfront.Tag { + result := make([]*cloudfront.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &cloudfront.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// CloudfrontKeyValueTags creates KeyValueTags from cloudfront service tags. +func CloudfrontKeyValueTags(tags []*cloudfront.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// Cloudhsmv2Tags returns cloudhsmv2 service tags. +func (tags KeyValueTags) Cloudhsmv2Tags() []*cloudhsmv2.Tag { + result := make([]*cloudhsmv2.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &cloudhsmv2.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// Cloudhsmv2KeyValueTags creates KeyValueTags from cloudhsmv2 service tags. +func Cloudhsmv2KeyValueTags(tags []*cloudhsmv2.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// CloudtrailTags returns cloudtrail service tags. +func (tags KeyValueTags) CloudtrailTags() []*cloudtrail.Tag { + result := make([]*cloudtrail.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &cloudtrail.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// CloudtrailKeyValueTags creates KeyValueTags from cloudtrail service tags. +func CloudtrailKeyValueTags(tags []*cloudtrail.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// CloudwatchTags returns cloudwatch service tags. +func (tags KeyValueTags) CloudwatchTags() []*cloudwatch.Tag { + result := make([]*cloudwatch.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &cloudwatch.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// CloudwatchKeyValueTags creates KeyValueTags from cloudwatch service tags. +func CloudwatchKeyValueTags(tags []*cloudwatch.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// CloudwatcheventsTags returns cloudwatchevents service tags. +func (tags KeyValueTags) CloudwatcheventsTags() []*cloudwatchevents.Tag { + result := make([]*cloudwatchevents.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &cloudwatchevents.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// CloudwatcheventsKeyValueTags creates KeyValueTags from cloudwatchevents service tags. +func CloudwatcheventsKeyValueTags(tags []*cloudwatchevents.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// CodebuildTags returns codebuild service tags. +func (tags KeyValueTags) CodebuildTags() []*codebuild.Tag { + result := make([]*codebuild.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &codebuild.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// CodebuildKeyValueTags creates KeyValueTags from codebuild service tags. +func CodebuildKeyValueTags(tags []*codebuild.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// CodedeployTags returns codedeploy service tags. +func (tags KeyValueTags) CodedeployTags() []*codedeploy.Tag { + result := make([]*codedeploy.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &codedeploy.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// CodedeployKeyValueTags creates KeyValueTags from codedeploy service tags. +func CodedeployKeyValueTags(tags []*codedeploy.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// CodepipelineTags returns codepipeline service tags. +func (tags KeyValueTags) CodepipelineTags() []*codepipeline.Tag { + result := make([]*codepipeline.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &codepipeline.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// CodepipelineKeyValueTags creates KeyValueTags from codepipeline service tags. +func CodepipelineKeyValueTags(tags []*codepipeline.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// ConfigserviceTags returns configservice service tags. +func (tags KeyValueTags) ConfigserviceTags() []*configservice.Tag { + result := make([]*configservice.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &configservice.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// ConfigserviceKeyValueTags creates KeyValueTags from configservice service tags. +func ConfigserviceKeyValueTags(tags []*configservice.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DatabasemigrationserviceTags returns databasemigrationservice service tags. +func (tags KeyValueTags) DatabasemigrationserviceTags() []*databasemigrationservice.Tag { + result := make([]*databasemigrationservice.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &databasemigrationservice.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DatabasemigrationserviceKeyValueTags creates KeyValueTags from databasemigrationservice service tags. +func DatabasemigrationserviceKeyValueTags(tags []*databasemigrationservice.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DatapipelineTags returns datapipeline service tags. +func (tags KeyValueTags) DatapipelineTags() []*datapipeline.Tag { + result := make([]*datapipeline.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &datapipeline.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DatapipelineKeyValueTags creates KeyValueTags from datapipeline service tags. +func DatapipelineKeyValueTags(tags []*datapipeline.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DatasyncTags returns datasync service tags. +func (tags KeyValueTags) DatasyncTags() []*datasync.TagListEntry { + result := make([]*datasync.TagListEntry, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &datasync.TagListEntry{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DatasyncKeyValueTags creates KeyValueTags from datasync service tags. +func DatasyncKeyValueTags(tags []*datasync.TagListEntry) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DaxTags returns dax service tags. +func (tags KeyValueTags) DaxTags() []*dax.Tag { + result := make([]*dax.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &dax.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DaxKeyValueTags creates KeyValueTags from dax service tags. +func DaxKeyValueTags(tags []*dax.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DevicefarmTags returns devicefarm service tags. +func (tags KeyValueTags) DevicefarmTags() []*devicefarm.Tag { + result := make([]*devicefarm.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &devicefarm.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DevicefarmKeyValueTags creates KeyValueTags from devicefarm service tags. +func DevicefarmKeyValueTags(tags []*devicefarm.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DirectconnectTags returns directconnect service tags. +func (tags KeyValueTags) DirectconnectTags() []*directconnect.Tag { + result := make([]*directconnect.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &directconnect.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DirectconnectKeyValueTags creates KeyValueTags from directconnect service tags. +func DirectconnectKeyValueTags(tags []*directconnect.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DirectoryserviceTags returns directoryservice service tags. +func (tags KeyValueTags) DirectoryserviceTags() []*directoryservice.Tag { + result := make([]*directoryservice.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &directoryservice.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DirectoryserviceKeyValueTags creates KeyValueTags from directoryservice service tags. +func DirectoryserviceKeyValueTags(tags []*directoryservice.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DlmTags returns dlm service tags. +func (tags KeyValueTags) DlmTags() []*dlm.Tag { + result := make([]*dlm.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &dlm.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DlmKeyValueTags creates KeyValueTags from dlm service tags. +func DlmKeyValueTags(tags []*dlm.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DocdbTags returns docdb service tags. +func (tags KeyValueTags) DocdbTags() []*docdb.Tag { + result := make([]*docdb.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &docdb.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DocdbKeyValueTags creates KeyValueTags from docdb service tags. +func DocdbKeyValueTags(tags []*docdb.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// DynamodbTags returns dynamodb service tags. +func (tags KeyValueTags) DynamodbTags() []*dynamodb.Tag { + result := make([]*dynamodb.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &dynamodb.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// DynamodbKeyValueTags creates KeyValueTags from dynamodb service tags. +func DynamodbKeyValueTags(tags []*dynamodb.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// Ec2Tags returns ec2 service tags. +func (tags KeyValueTags) Ec2Tags() []*ec2.Tag { + result := make([]*ec2.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &ec2.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// Ec2KeyValueTags creates KeyValueTags from ec2 service tags. +func Ec2KeyValueTags(tags []*ec2.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// EcrTags returns ecr service tags. +func (tags KeyValueTags) EcrTags() []*ecr.Tag { + result := make([]*ecr.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &ecr.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// EcrKeyValueTags creates KeyValueTags from ecr service tags. +func EcrKeyValueTags(tags []*ecr.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// EcsTags returns ecs service tags. +func (tags KeyValueTags) EcsTags() []*ecs.Tag { + result := make([]*ecs.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &ecs.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// EcsKeyValueTags creates KeyValueTags from ecs service tags. +func EcsKeyValueTags(tags []*ecs.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// EfsTags returns efs service tags. +func (tags KeyValueTags) EfsTags() []*efs.Tag { + result := make([]*efs.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &efs.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// EfsKeyValueTags creates KeyValueTags from efs service tags. +func EfsKeyValueTags(tags []*efs.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// ElasticacheTags returns elasticache service tags. +func (tags KeyValueTags) ElasticacheTags() []*elasticache.Tag { + result := make([]*elasticache.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &elasticache.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// ElasticacheKeyValueTags creates KeyValueTags from elasticache service tags. +func ElasticacheKeyValueTags(tags []*elasticache.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// ElasticbeanstalkTags returns elasticbeanstalk service tags. +func (tags KeyValueTags) ElasticbeanstalkTags() []*elasticbeanstalk.Tag { + result := make([]*elasticbeanstalk.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &elasticbeanstalk.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// ElasticbeanstalkKeyValueTags creates KeyValueTags from elasticbeanstalk service tags. +func ElasticbeanstalkKeyValueTags(tags []*elasticbeanstalk.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// ElasticsearchserviceTags returns elasticsearchservice service tags. +func (tags KeyValueTags) ElasticsearchserviceTags() []*elasticsearchservice.Tag { + result := make([]*elasticsearchservice.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &elasticsearchservice.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// ElasticsearchserviceKeyValueTags creates KeyValueTags from elasticsearchservice service tags. +func ElasticsearchserviceKeyValueTags(tags []*elasticsearchservice.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// ElbTags returns elb service tags. +func (tags KeyValueTags) ElbTags() []*elb.Tag { + result := make([]*elb.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &elb.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// ElbKeyValueTags creates KeyValueTags from elb service tags. +func ElbKeyValueTags(tags []*elb.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// Elbv2Tags returns elbv2 service tags. +func (tags KeyValueTags) Elbv2Tags() []*elbv2.Tag { + result := make([]*elbv2.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &elbv2.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// Elbv2KeyValueTags creates KeyValueTags from elbv2 service tags. +func Elbv2KeyValueTags(tags []*elbv2.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// EmrTags returns emr service tags. +func (tags KeyValueTags) EmrTags() []*emr.Tag { + result := make([]*emr.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &emr.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// EmrKeyValueTags creates KeyValueTags from emr service tags. +func EmrKeyValueTags(tags []*emr.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// FirehoseTags returns firehose service tags. +func (tags KeyValueTags) FirehoseTags() []*firehose.Tag { + result := make([]*firehose.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &firehose.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// FirehoseKeyValueTags creates KeyValueTags from firehose service tags. +func FirehoseKeyValueTags(tags []*firehose.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// FmsTags returns fms service tags. +func (tags KeyValueTags) FmsTags() []*fms.ResourceTag { + result := make([]*fms.ResourceTag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &fms.ResourceTag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// FmsKeyValueTags creates KeyValueTags from fms service tags. +func FmsKeyValueTags(tags []*fms.ResourceTag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// FsxTags returns fsx service tags. +func (tags KeyValueTags) FsxTags() []*fsx.Tag { + result := make([]*fsx.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &fsx.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// FsxKeyValueTags creates KeyValueTags from fsx service tags. +func FsxKeyValueTags(tags []*fsx.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// IamTags returns iam service tags. +func (tags KeyValueTags) IamTags() []*iam.Tag { + result := make([]*iam.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &iam.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// IamKeyValueTags creates KeyValueTags from iam service tags. +func IamKeyValueTags(tags []*iam.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// InspectorTags returns inspector service tags. +func (tags KeyValueTags) InspectorTags() []*inspector.Tag { + result := make([]*inspector.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &inspector.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// InspectorKeyValueTags creates KeyValueTags from inspector service tags. +func InspectorKeyValueTags(tags []*inspector.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// IotTags returns iot service tags. +func (tags KeyValueTags) IotTags() []*iot.Tag { + result := make([]*iot.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &iot.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// IotKeyValueTags creates KeyValueTags from iot service tags. +func IotKeyValueTags(tags []*iot.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// IotanalyticsTags returns iotanalytics service tags. +func (tags KeyValueTags) IotanalyticsTags() []*iotanalytics.Tag { + result := make([]*iotanalytics.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &iotanalytics.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// IotanalyticsKeyValueTags creates KeyValueTags from iotanalytics service tags. +func IotanalyticsKeyValueTags(tags []*iotanalytics.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// IoteventsTags returns iotevents service tags. +func (tags KeyValueTags) IoteventsTags() []*iotevents.Tag { + result := make([]*iotevents.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &iotevents.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// IoteventsKeyValueTags creates KeyValueTags from iotevents service tags. +func IoteventsKeyValueTags(tags []*iotevents.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// KinesisTags returns kinesis service tags. +func (tags KeyValueTags) KinesisTags() []*kinesis.Tag { + result := make([]*kinesis.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &kinesis.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// KinesisKeyValueTags creates KeyValueTags from kinesis service tags. +func KinesisKeyValueTags(tags []*kinesis.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// KinesisanalyticsTags returns kinesisanalytics service tags. +func (tags KeyValueTags) KinesisanalyticsTags() []*kinesisanalytics.Tag { + result := make([]*kinesisanalytics.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &kinesisanalytics.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// KinesisanalyticsKeyValueTags creates KeyValueTags from kinesisanalytics service tags. +func KinesisanalyticsKeyValueTags(tags []*kinesisanalytics.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// Kinesisanalyticsv2Tags returns kinesisanalyticsv2 service tags. +func (tags KeyValueTags) Kinesisanalyticsv2Tags() []*kinesisanalyticsv2.Tag { + result := make([]*kinesisanalyticsv2.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &kinesisanalyticsv2.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// Kinesisanalyticsv2KeyValueTags creates KeyValueTags from kinesisanalyticsv2 service tags. +func Kinesisanalyticsv2KeyValueTags(tags []*kinesisanalyticsv2.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// KmsTags returns kms service tags. +func (tags KeyValueTags) KmsTags() []*kms.Tag { + result := make([]*kms.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &kms.Tag{ + TagKey: aws.String(k), + TagValue: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// KmsKeyValueTags creates KeyValueTags from kms service tags. +func KmsKeyValueTags(tags []*kms.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.TagKey)] = tag.TagValue + } + + return New(m) +} + +// LicensemanagerTags returns licensemanager service tags. +func (tags KeyValueTags) LicensemanagerTags() []*licensemanager.Tag { + result := make([]*licensemanager.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &licensemanager.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// LicensemanagerKeyValueTags creates KeyValueTags from licensemanager service tags. +func LicensemanagerKeyValueTags(tags []*licensemanager.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// LightsailTags returns lightsail service tags. +func (tags KeyValueTags) LightsailTags() []*lightsail.Tag { + result := make([]*lightsail.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &lightsail.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// LightsailKeyValueTags creates KeyValueTags from lightsail service tags. +func LightsailKeyValueTags(tags []*lightsail.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// MediastoreTags returns mediastore service tags. +func (tags KeyValueTags) MediastoreTags() []*mediastore.Tag { + result := make([]*mediastore.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &mediastore.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// MediastoreKeyValueTags creates KeyValueTags from mediastore service tags. +func MediastoreKeyValueTags(tags []*mediastore.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// NeptuneTags returns neptune service tags. +func (tags KeyValueTags) NeptuneTags() []*neptune.Tag { + result := make([]*neptune.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &neptune.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// NeptuneKeyValueTags creates KeyValueTags from neptune service tags. +func NeptuneKeyValueTags(tags []*neptune.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// OrganizationsTags returns organizations service tags. +func (tags KeyValueTags) OrganizationsTags() []*organizations.Tag { + result := make([]*organizations.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &organizations.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// OrganizationsKeyValueTags creates KeyValueTags from organizations service tags. +func OrganizationsKeyValueTags(tags []*organizations.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// RamTags returns ram service tags. +func (tags KeyValueTags) RamTags() []*ram.Tag { + result := make([]*ram.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &ram.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// RamKeyValueTags creates KeyValueTags from ram service tags. +func RamKeyValueTags(tags []*ram.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// RdsTags returns rds service tags. +func (tags KeyValueTags) RdsTags() []*rds.Tag { + result := make([]*rds.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &rds.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// RdsKeyValueTags creates KeyValueTags from rds service tags. +func RdsKeyValueTags(tags []*rds.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// RedshiftTags returns redshift service tags. +func (tags KeyValueTags) RedshiftTags() []*redshift.Tag { + result := make([]*redshift.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &redshift.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// RedshiftKeyValueTags creates KeyValueTags from redshift service tags. +func RedshiftKeyValueTags(tags []*redshift.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// Route53Tags returns route53 service tags. +func (tags KeyValueTags) Route53Tags() []*route53.Tag { + result := make([]*route53.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &route53.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// Route53KeyValueTags creates KeyValueTags from route53 service tags. +func Route53KeyValueTags(tags []*route53.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// Route53resolverTags returns route53resolver service tags. +func (tags KeyValueTags) Route53resolverTags() []*route53resolver.Tag { + result := make([]*route53resolver.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &route53resolver.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// Route53resolverKeyValueTags creates KeyValueTags from route53resolver service tags. +func Route53resolverKeyValueTags(tags []*route53resolver.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// S3Tags returns s3 service tags. +func (tags KeyValueTags) S3Tags() []*s3.Tag { + result := make([]*s3.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &s3.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// S3KeyValueTags creates KeyValueTags from s3 service tags. +func S3KeyValueTags(tags []*s3.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// SagemakerTags returns sagemaker service tags. +func (tags KeyValueTags) SagemakerTags() []*sagemaker.Tag { + result := make([]*sagemaker.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &sagemaker.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// SagemakerKeyValueTags creates KeyValueTags from sagemaker service tags. +func SagemakerKeyValueTags(tags []*sagemaker.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// SecretsmanagerTags returns secretsmanager service tags. +func (tags KeyValueTags) SecretsmanagerTags() []*secretsmanager.Tag { + result := make([]*secretsmanager.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &secretsmanager.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// SecretsmanagerKeyValueTags creates KeyValueTags from secretsmanager service tags. +func SecretsmanagerKeyValueTags(tags []*secretsmanager.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// ServerlessapplicationrepositoryTags returns serverlessapplicationrepository service tags. +func (tags KeyValueTags) ServerlessapplicationrepositoryTags() []*serverlessapplicationrepository.Tag { + result := make([]*serverlessapplicationrepository.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &serverlessapplicationrepository.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// ServerlessapplicationrepositoryKeyValueTags creates KeyValueTags from serverlessapplicationrepository service tags. +func ServerlessapplicationrepositoryKeyValueTags(tags []*serverlessapplicationrepository.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// ServicecatalogTags returns servicecatalog service tags. +func (tags KeyValueTags) ServicecatalogTags() []*servicecatalog.Tag { + result := make([]*servicecatalog.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &servicecatalog.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// ServicecatalogKeyValueTags creates KeyValueTags from servicecatalog service tags. +func ServicecatalogKeyValueTags(tags []*servicecatalog.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// SfnTags returns sfn service tags. +func (tags KeyValueTags) SfnTags() []*sfn.Tag { + result := make([]*sfn.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &sfn.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// SfnKeyValueTags creates KeyValueTags from sfn service tags. +func SfnKeyValueTags(tags []*sfn.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// SnsTags returns sns service tags. +func (tags KeyValueTags) SnsTags() []*sns.Tag { + result := make([]*sns.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &sns.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// SnsKeyValueTags creates KeyValueTags from sns service tags. +func SnsKeyValueTags(tags []*sns.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// SsmTags returns ssm service tags. +func (tags KeyValueTags) SsmTags() []*ssm.Tag { + result := make([]*ssm.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &ssm.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// SsmKeyValueTags creates KeyValueTags from ssm service tags. +func SsmKeyValueTags(tags []*ssm.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// StoragegatewayTags returns storagegateway service tags. +func (tags KeyValueTags) StoragegatewayTags() []*storagegateway.Tag { + result := make([]*storagegateway.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &storagegateway.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// StoragegatewayKeyValueTags creates KeyValueTags from storagegateway service tags. +func StoragegatewayKeyValueTags(tags []*storagegateway.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// SwfTags returns swf service tags. +func (tags KeyValueTags) SwfTags() []*swf.ResourceTag { + result := make([]*swf.ResourceTag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &swf.ResourceTag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// SwfKeyValueTags creates KeyValueTags from swf service tags. +func SwfKeyValueTags(tags []*swf.ResourceTag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// TransferTags returns transfer service tags. +func (tags KeyValueTags) TransferTags() []*transfer.Tag { + result := make([]*transfer.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &transfer.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// TransferKeyValueTags creates KeyValueTags from transfer service tags. +func TransferKeyValueTags(tags []*transfer.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// WafTags returns waf service tags. +func (tags KeyValueTags) WafTags() []*waf.Tag { + result := make([]*waf.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &waf.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// WafKeyValueTags creates KeyValueTags from waf service tags. +func WafKeyValueTags(tags []*waf.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// WafregionalTags returns wafregional service tags. +func (tags KeyValueTags) WafregionalTags() []*waf.Tag { + result := make([]*waf.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &waf.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// WafregionalKeyValueTags creates KeyValueTags from wafregional service tags. +func WafregionalKeyValueTags(tags []*waf.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + +// WorkspacesTags returns workspaces service tags. +func (tags KeyValueTags) WorkspacesTags() []*workspaces.Tag { + result := make([]*workspaces.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &workspaces.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// WorkspacesKeyValueTags creates KeyValueTags from workspaces service tags. +func WorkspacesKeyValueTags(tags []*workspaces.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/update_tags_gen.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/update_tags_gen.go new file mode 100644 index 00000000000..fe5ca838cd9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags/update_tags_gen.go @@ -0,0 +1,2971 @@ +// Code generated by generators/updatetags/main.go; DO NOT EDIT. + +package keyvaluetags + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/acm" + "github.com/aws/aws-sdk-go/service/acmpca" + "github.com/aws/aws-sdk-go/service/amplify" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/aws/aws-sdk-go/service/apigatewayv2" + "github.com/aws/aws-sdk-go/service/appmesh" + "github.com/aws/aws-sdk-go/service/appstream" + "github.com/aws/aws-sdk-go/service/appsync" + "github.com/aws/aws-sdk-go/service/athena" + "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go/service/cloudhsmv2" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go/service/datapipeline" + "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go/service/dax" + "github.com/aws/aws-sdk-go/service/devicefarm" + "github.com/aws/aws-sdk-go/service/directconnect" + "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go/service/docdb" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/aws/aws-sdk-go/service/firehose" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go/service/glue" + "github.com/aws/aws-sdk-go/service/guardduty" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go/service/iotanalytics" + "github.com/aws/aws-sdk-go/service/iotevents" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go/service/kinesisanalytics" + "github.com/aws/aws-sdk-go/service/kinesisanalyticsv2" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/aws/aws-sdk-go/service/licensemanager" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/aws/aws-sdk-go/service/mediaconnect" + "github.com/aws/aws-sdk-go/service/mediaconvert" + "github.com/aws/aws-sdk-go/service/medialive" + "github.com/aws/aws-sdk-go/service/mediapackage" + "github.com/aws/aws-sdk-go/service/mediastore" + "github.com/aws/aws-sdk-go/service/mq" + "github.com/aws/aws-sdk-go/service/neptune" + "github.com/aws/aws-sdk-go/service/opsworks" + "github.com/aws/aws-sdk-go/service/organizations" + "github.com/aws/aws-sdk-go/service/qldb" + "github.com/aws/aws-sdk-go/service/ram" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/aws/aws-sdk-go/service/resourcegroups" + "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go/service/sns" + "github.com/aws/aws-sdk-go/service/sqs" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/storagegateway" + "github.com/aws/aws-sdk-go/service/swf" + "github.com/aws/aws-sdk-go/service/transfer" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/aws/aws-sdk-go/service/wafregional" + "github.com/aws/aws-sdk-go/service/workspaces" +) + +// AcmUpdateTags updates acm service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AcmUpdateTags(conn *acm.ACM, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &acm.RemoveTagsFromCertificateInput{ + CertificateArn: aws.String(identifier), + Tags: removedTags.IgnoreAws().AcmTags(), + } + + _, err := conn.RemoveTagsFromCertificate(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &acm.AddTagsToCertificateInput{ + CertificateArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().AcmTags(), + } + + _, err := conn.AddTagsToCertificate(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// AcmpcaUpdateTags updates acmpca service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AcmpcaUpdateTags(conn *acmpca.ACMPCA, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &acmpca.UntagCertificateAuthorityInput{ + CertificateAuthorityArn: aws.String(identifier), + Tags: removedTags.IgnoreAws().AcmpcaTags(), + } + + _, err := conn.UntagCertificateAuthority(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &acmpca.TagCertificateAuthorityInput{ + CertificateAuthorityArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().AcmpcaTags(), + } + + _, err := conn.TagCertificateAuthority(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// AmplifyUpdateTags updates amplify service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AmplifyUpdateTags(conn *amplify.Amplify, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &lify.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &lify.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().AmplifyTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// ApigatewayUpdateTags updates apigateway service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ApigatewayUpdateTags(conn *apigateway.APIGateway, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &apigateway.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &apigateway.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().ApigatewayTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// Apigatewayv2UpdateTags updates apigatewayv2 service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Apigatewayv2UpdateTags(conn *apigatewayv2.ApiGatewayV2, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &apigatewayv2.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &apigatewayv2.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().Apigatewayv2Tags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// AppmeshUpdateTags updates appmesh service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AppmeshUpdateTags(conn *appmesh.AppMesh, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &appmesh.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &appmesh.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().AppmeshTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// AppstreamUpdateTags updates appstream service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AppstreamUpdateTags(conn *appstream.AppStream, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &appstream.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &appstream.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().AppstreamTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// AppsyncUpdateTags updates appsync service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AppsyncUpdateTags(conn *appsync.AppSync, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &appsync.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &appsync.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().AppsyncTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// AthenaUpdateTags updates athena service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func AthenaUpdateTags(conn *athena.Athena, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &athena.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &athena.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().AthenaTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// BackupUpdateTags updates backup service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func BackupUpdateTags(conn *backup.Backup, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &backup.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeyList: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &backup.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().BackupTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// Cloudhsmv2UpdateTags updates cloudhsmv2 service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Cloudhsmv2UpdateTags(conn *cloudhsmv2.CloudHSMV2, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &cloudhsmv2.UntagResourceInput{ + ResourceId: aws.String(identifier), + TagKeyList: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &cloudhsmv2.TagResourceInput{ + ResourceId: aws.String(identifier), + TagList: updatedTags.IgnoreAws().Cloudhsmv2Tags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// CloudwatchUpdateTags updates cloudwatch service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CloudwatchUpdateTags(conn *cloudwatch.CloudWatch, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &cloudwatch.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &cloudwatch.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().CloudwatchTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// CloudwatcheventsUpdateTags updates cloudwatchevents service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CloudwatcheventsUpdateTags(conn *cloudwatchevents.CloudWatchEvents, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &cloudwatchevents.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &cloudwatchevents.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().CloudwatcheventsTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// CloudwatchlogsUpdateTags updates cloudwatchlogs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CloudwatchlogsUpdateTags(conn *cloudwatchlogs.CloudWatchLogs, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &cloudwatchlogs.UntagLogGroupInput{ + LogGroupName: aws.String(identifier), + Tags: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagLogGroup(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &cloudwatchlogs.TagLogGroupInput{ + LogGroupName: aws.String(identifier), + Tags: updatedTags.IgnoreAws().CloudwatchlogsTags(), + } + + _, err := conn.TagLogGroup(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// CodecommitUpdateTags updates codecommit service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CodecommitUpdateTags(conn *codecommit.CodeCommit, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &codecommit.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &codecommit.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().CodecommitTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// CodedeployUpdateTags updates codedeploy service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CodedeployUpdateTags(conn *codedeploy.CodeDeploy, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &codedeploy.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &codedeploy.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().CodedeployTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// CodepipelineUpdateTags updates codepipeline service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CodepipelineUpdateTags(conn *codepipeline.CodePipeline, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &codepipeline.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &codepipeline.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().CodepipelineTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// CognitoidentityUpdateTags updates cognitoidentity service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CognitoidentityUpdateTags(conn *cognitoidentity.CognitoIdentity, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &cognitoidentity.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &cognitoidentity.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().CognitoidentityTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// CognitoidentityproviderUpdateTags updates cognitoidentityprovider service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CognitoidentityproviderUpdateTags(conn *cognitoidentityprovider.CognitoIdentityProvider, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &cognitoidentityprovider.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &cognitoidentityprovider.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().CognitoidentityproviderTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// ConfigserviceUpdateTags updates configservice service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ConfigserviceUpdateTags(conn *configservice.ConfigService, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &configservice.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &configservice.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().ConfigserviceTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// DatabasemigrationserviceUpdateTags updates databasemigrationservice service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DatabasemigrationserviceUpdateTags(conn *databasemigrationservice.DatabaseMigrationService, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &databasemigrationservice.RemoveTagsFromResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTagsFromResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &databasemigrationservice.AddTagsToResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().DatabasemigrationserviceTags(), + } + + _, err := conn.AddTagsToResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// DatapipelineUpdateTags updates datapipeline service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DatapipelineUpdateTags(conn *datapipeline.DataPipeline, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &datapipeline.RemoveTagsInput{ + PipelineId: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &datapipeline.AddTagsInput{ + PipelineId: aws.String(identifier), + Tags: updatedTags.IgnoreAws().DatapipelineTags(), + } + + _, err := conn.AddTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// DatasyncUpdateTags updates datasync service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DatasyncUpdateTags(conn *datasync.DataSync, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &datasync.UntagResourceInput{ + ResourceArn: aws.String(identifier), + Keys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &datasync.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().DatasyncTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// DaxUpdateTags updates dax service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DaxUpdateTags(conn *dax.DAX, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &dax.UntagResourceInput{ + ResourceName: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &dax.TagResourceInput{ + ResourceName: aws.String(identifier), + Tags: updatedTags.IgnoreAws().DaxTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// DevicefarmUpdateTags updates devicefarm service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DevicefarmUpdateTags(conn *devicefarm.DeviceFarm, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &devicefarm.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &devicefarm.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().DevicefarmTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// DirectconnectUpdateTags updates directconnect service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DirectconnectUpdateTags(conn *directconnect.DirectConnect, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &directconnect.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &directconnect.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().DirectconnectTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// DirectoryserviceUpdateTags updates directoryservice service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DirectoryserviceUpdateTags(conn *directoryservice.DirectoryService, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &directoryservice.RemoveTagsFromResourceInput{ + ResourceId: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTagsFromResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &directoryservice.AddTagsToResourceInput{ + ResourceId: aws.String(identifier), + Tags: updatedTags.IgnoreAws().DirectoryserviceTags(), + } + + _, err := conn.AddTagsToResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// DocdbUpdateTags updates docdb service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DocdbUpdateTags(conn *docdb.DocDB, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &docdb.RemoveTagsFromResourceInput{ + ResourceName: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTagsFromResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &docdb.AddTagsToResourceInput{ + ResourceName: aws.String(identifier), + Tags: updatedTags.IgnoreAws().DocdbTags(), + } + + _, err := conn.AddTagsToResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// DynamodbUpdateTags updates dynamodb service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func DynamodbUpdateTags(conn *dynamodb.DynamoDB, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &dynamodb.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &dynamodb.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().DynamodbTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// Ec2UpdateTags updates ec2 service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Ec2UpdateTags(conn *ec2.EC2, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &ec2.DeleteTagsInput{ + Resources: aws.StringSlice([]string{identifier}), + Tags: removedTags.IgnoreAws().Ec2Tags(), + } + + _, err := conn.DeleteTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &ec2.CreateTagsInput{ + Resources: aws.StringSlice([]string{identifier}), + Tags: updatedTags.IgnoreAws().Ec2Tags(), + } + + _, err := conn.CreateTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// EcrUpdateTags updates ecr service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func EcrUpdateTags(conn *ecr.ECR, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &ecr.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &ecr.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().EcrTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// EcsUpdateTags updates ecs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func EcsUpdateTags(conn *ecs.ECS, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &ecs.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &ecs.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().EcsTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// EfsUpdateTags updates efs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func EfsUpdateTags(conn *efs.EFS, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &efs.DeleteTagsInput{ + FileSystemId: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.DeleteTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &efs.CreateTagsInput{ + FileSystemId: aws.String(identifier), + Tags: updatedTags.IgnoreAws().EfsTags(), + } + + _, err := conn.CreateTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// EksUpdateTags updates eks service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func EksUpdateTags(conn *eks.EKS, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &eks.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &eks.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().EksTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// ElasticacheUpdateTags updates elasticache service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ElasticacheUpdateTags(conn *elasticache.ElastiCache, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &elasticache.RemoveTagsFromResourceInput{ + ResourceName: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTagsFromResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &elasticache.AddTagsToResourceInput{ + ResourceName: aws.String(identifier), + Tags: updatedTags.IgnoreAws().ElasticacheTags(), + } + + _, err := conn.AddTagsToResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// ElasticsearchserviceUpdateTags updates elasticsearchservice service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ElasticsearchserviceUpdateTags(conn *elasticsearchservice.ElasticsearchService, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &elasticsearchservice.RemoveTagsInput{ + ARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &elasticsearchservice.AddTagsInput{ + ARN: aws.String(identifier), + TagList: updatedTags.IgnoreAws().ElasticsearchserviceTags(), + } + + _, err := conn.AddTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// Elbv2UpdateTags updates elbv2 service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Elbv2UpdateTags(conn *elbv2.ELBV2, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &elbv2.RemoveTagsInput{ + ResourceArns: aws.StringSlice([]string{identifier}), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &elbv2.AddTagsInput{ + ResourceArns: aws.StringSlice([]string{identifier}), + Tags: updatedTags.IgnoreAws().Elbv2Tags(), + } + + _, err := conn.AddTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// EmrUpdateTags updates emr service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func EmrUpdateTags(conn *emr.EMR, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &emr.RemoveTagsInput{ + ResourceId: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &emr.AddTagsInput{ + ResourceId: aws.String(identifier), + Tags: updatedTags.IgnoreAws().EmrTags(), + } + + _, err := conn.AddTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// FirehoseUpdateTags updates firehose service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func FirehoseUpdateTags(conn *firehose.Firehose, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &firehose.UntagDeliveryStreamInput{ + DeliveryStreamName: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagDeliveryStream(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &firehose.TagDeliveryStreamInput{ + DeliveryStreamName: aws.String(identifier), + Tags: updatedTags.IgnoreAws().FirehoseTags(), + } + + _, err := conn.TagDeliveryStream(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// FsxUpdateTags updates fsx service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func FsxUpdateTags(conn *fsx.FSx, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &fsx.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &fsx.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().FsxTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// GlueUpdateTags updates glue service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func GlueUpdateTags(conn *glue.Glue, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &glue.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagsToRemove: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &glue.TagResourceInput{ + ResourceArn: aws.String(identifier), + TagsToAdd: updatedTags.IgnoreAws().GlueTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// GuarddutyUpdateTags updates guardduty service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func GuarddutyUpdateTags(conn *guardduty.GuardDuty, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &guardduty.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &guardduty.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().GuarddutyTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// IotUpdateTags updates iot service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func IotUpdateTags(conn *iot.IoT, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &iot.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &iot.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().IotTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// IotanalyticsUpdateTags updates iotanalytics service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func IotanalyticsUpdateTags(conn *iotanalytics.IoTAnalytics, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &iotanalytics.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &iotanalytics.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().IotanalyticsTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// IoteventsUpdateTags updates iotevents service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func IoteventsUpdateTags(conn *iotevents.IoTEvents, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &iotevents.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &iotevents.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().IoteventsTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// KafkaUpdateTags updates kafka service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func KafkaUpdateTags(conn *kafka.Kafka, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &kafka.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &kafka.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().KafkaTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// KinesisanalyticsUpdateTags updates kinesisanalytics service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func KinesisanalyticsUpdateTags(conn *kinesisanalytics.KinesisAnalytics, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &kinesisanalytics.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &kinesisanalytics.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().KinesisanalyticsTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// Kinesisanalyticsv2UpdateTags updates kinesisanalyticsv2 service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Kinesisanalyticsv2UpdateTags(conn *kinesisanalyticsv2.KinesisAnalyticsV2, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &kinesisanalyticsv2.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &kinesisanalyticsv2.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().Kinesisanalyticsv2Tags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// KmsUpdateTags updates kms service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func KmsUpdateTags(conn *kms.KMS, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &kms.UntagResourceInput{ + KeyId: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &kms.TagResourceInput{ + KeyId: aws.String(identifier), + Tags: updatedTags.IgnoreAws().KmsTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// LambdaUpdateTags updates lambda service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func LambdaUpdateTags(conn *lambda.Lambda, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &lambda.UntagResourceInput{ + Resource: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &lambda.TagResourceInput{ + Resource: aws.String(identifier), + Tags: updatedTags.IgnoreAws().LambdaTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// LicensemanagerUpdateTags updates licensemanager service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func LicensemanagerUpdateTags(conn *licensemanager.LicenseManager, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &licensemanager.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &licensemanager.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().LicensemanagerTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// LightsailUpdateTags updates lightsail service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func LightsailUpdateTags(conn *lightsail.Lightsail, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &lightsail.UntagResourceInput{ + ResourceName: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &lightsail.TagResourceInput{ + ResourceName: aws.String(identifier), + Tags: updatedTags.IgnoreAws().LightsailTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// MediaconnectUpdateTags updates mediaconnect service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MediaconnectUpdateTags(conn *mediaconnect.MediaConnect, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &mediaconnect.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &mediaconnect.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().MediaconnectTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// MediaconvertUpdateTags updates mediaconvert service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MediaconvertUpdateTags(conn *mediaconvert.MediaConvert, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &mediaconvert.UntagResourceInput{ + Arn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &mediaconvert.TagResourceInput{ + Arn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().MediaconvertTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// MedialiveUpdateTags updates medialive service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MedialiveUpdateTags(conn *medialive.MediaLive, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &medialive.DeleteTagsInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.DeleteTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &medialive.CreateTagsInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().MedialiveTags(), + } + + _, err := conn.CreateTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// MediapackageUpdateTags updates mediapackage service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MediapackageUpdateTags(conn *mediapackage.MediaPackage, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &mediapackage.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &mediapackage.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().MediapackageTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// MediastoreUpdateTags updates mediastore service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MediastoreUpdateTags(conn *mediastore.MediaStore, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &mediastore.UntagResourceInput{ + Resource: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &mediastore.TagResourceInput{ + Resource: aws.String(identifier), + Tags: updatedTags.IgnoreAws().MediastoreTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// MqUpdateTags updates mq service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func MqUpdateTags(conn *mq.MQ, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &mq.DeleteTagsInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.DeleteTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &mq.CreateTagsInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().MqTags(), + } + + _, err := conn.CreateTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// NeptuneUpdateTags updates neptune service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func NeptuneUpdateTags(conn *neptune.Neptune, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &neptune.RemoveTagsFromResourceInput{ + ResourceName: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTagsFromResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &neptune.AddTagsToResourceInput{ + ResourceName: aws.String(identifier), + Tags: updatedTags.IgnoreAws().NeptuneTags(), + } + + _, err := conn.AddTagsToResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// OpsworksUpdateTags updates opsworks service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func OpsworksUpdateTags(conn *opsworks.OpsWorks, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &opsworks.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &opsworks.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().OpsworksTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// OrganizationsUpdateTags updates organizations service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func OrganizationsUpdateTags(conn *organizations.Organizations, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &organizations.UntagResourceInput{ + ResourceId: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &organizations.TagResourceInput{ + ResourceId: aws.String(identifier), + Tags: updatedTags.IgnoreAws().OrganizationsTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// QldbUpdateTags updates qldb service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func QldbUpdateTags(conn *qldb.QLDB, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &qldb.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &qldb.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().QldbTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// RamUpdateTags updates ram service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func RamUpdateTags(conn *ram.RAM, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &ram.UntagResourceInput{ + ResourceShareArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &ram.TagResourceInput{ + ResourceShareArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().RamTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// RdsUpdateTags updates rds service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func RdsUpdateTags(conn *rds.RDS, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &rds.RemoveTagsFromResourceInput{ + ResourceName: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTagsFromResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &rds.AddTagsToResourceInput{ + ResourceName: aws.String(identifier), + Tags: updatedTags.IgnoreAws().RdsTags(), + } + + _, err := conn.AddTagsToResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// RedshiftUpdateTags updates redshift service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func RedshiftUpdateTags(conn *redshift.Redshift, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &redshift.DeleteTagsInput{ + ResourceName: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.DeleteTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &redshift.CreateTagsInput{ + ResourceName: aws.String(identifier), + Tags: updatedTags.IgnoreAws().RedshiftTags(), + } + + _, err := conn.CreateTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// ResourcegroupsUpdateTags updates resourcegroups service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func ResourcegroupsUpdateTags(conn *resourcegroups.ResourceGroups, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &resourcegroups.UntagInput{ + Arn: aws.String(identifier), + Keys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.Untag(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &resourcegroups.TagInput{ + Arn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().ResourcegroupsTags(), + } + + _, err := conn.Tag(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// Route53resolverUpdateTags updates route53resolver service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func Route53resolverUpdateTags(conn *route53resolver.Route53Resolver, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &route53resolver.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &route53resolver.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().Route53resolverTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SagemakerUpdateTags updates sagemaker service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SagemakerUpdateTags(conn *sagemaker.SageMaker, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &sagemaker.DeleteTagsInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.DeleteTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &sagemaker.AddTagsInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().SagemakerTags(), + } + + _, err := conn.AddTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SecretsmanagerUpdateTags updates secretsmanager service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SecretsmanagerUpdateTags(conn *secretsmanager.SecretsManager, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &secretsmanager.UntagResourceInput{ + SecretId: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &secretsmanager.TagResourceInput{ + SecretId: aws.String(identifier), + Tags: updatedTags.IgnoreAws().SecretsmanagerTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SecurityhubUpdateTags updates securityhub service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SecurityhubUpdateTags(conn *securityhub.SecurityHub, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &securityhub.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &securityhub.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().SecurityhubTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SfnUpdateTags updates sfn service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SfnUpdateTags(conn *sfn.SFN, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &sfn.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &sfn.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().SfnTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SnsUpdateTags updates sns service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SnsUpdateTags(conn *sns.SNS, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &sns.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &sns.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().SnsTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SqsUpdateTags updates sqs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SqsUpdateTags(conn *sqs.SQS, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &sqs.UntagQueueInput{ + QueueUrl: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagQueue(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &sqs.TagQueueInput{ + QueueUrl: aws.String(identifier), + Tags: updatedTags.IgnoreAws().SqsTags(), + } + + _, err := conn.TagQueue(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SsmUpdateTags updates ssm service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SsmUpdateTags(conn *ssm.SSM, identifier string, resourceType string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &ssm.RemoveTagsFromResourceInput{ + ResourceId: aws.String(identifier), + ResourceType: aws.String(resourceType), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTagsFromResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &ssm.AddTagsToResourceInput{ + ResourceId: aws.String(identifier), + ResourceType: aws.String(resourceType), + Tags: updatedTags.IgnoreAws().SsmTags(), + } + + _, err := conn.AddTagsToResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// StoragegatewayUpdateTags updates storagegateway service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func StoragegatewayUpdateTags(conn *storagegateway.StorageGateway, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &storagegateway.RemoveTagsFromResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.RemoveTagsFromResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &storagegateway.AddTagsToResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().StoragegatewayTags(), + } + + _, err := conn.AddTagsToResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SwfUpdateTags updates swf service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SwfUpdateTags(conn *swf.SWF, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &swf.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &swf.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().SwfTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// TransferUpdateTags updates transfer service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func TransferUpdateTags(conn *transfer.Transfer, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &transfer.UntagResourceInput{ + Arn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &transfer.TagResourceInput{ + Arn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().TransferTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// WafUpdateTags updates waf service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func WafUpdateTags(conn *waf.WAF, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &waf.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &waf.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().WafTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// WafregionalUpdateTags updates wafregional service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func WafregionalUpdateTags(conn *wafregional.WAFRegional, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &waf.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &waf.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: updatedTags.IgnoreAws().WafregionalTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// WorkspacesUpdateTags updates workspaces service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func WorkspacesUpdateTags(conn *workspaces.WorkSpaces, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &workspaces.DeleteTagsInput{ + ResourceId: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.DeleteTags(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &workspaces.CreateTagsInput{ + ResourceId: aws.String(identifier), + Tags: updatedTags.IgnoreAws().WorkspacesTags(), + } + + _, err := conn.CreateTags(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/opsworks_layers.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/opsworks_layers.go index 83e0b2d0284..6a95857b89e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/opsworks_layers.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/opsworks_layers.go @@ -5,13 +5,13 @@ import ( "log" "strconv" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" ) // OpsWorks has a single concept of "layer" which represents several different diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/provider.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/provider.go index ff849d26f26..ae88e3aee4a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/provider.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/provider.go @@ -3,9 +3,9 @@ package aws import ( "log" - "github.com/hashicorp/terraform/helper/mutexkv" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" homedir "github.com/mitchellh/go-homedir" ) @@ -15,7 +15,7 @@ func Provider() terraform.ResourceProvider { // TODO: Move the configuration to this, requires validation // The actual provider - return &schema.Provider{ + provider := &schema.Provider{ Schema: map[string]*schema.Schema{ "access_key": { Type: schema.TypeString, @@ -90,6 +90,22 @@ func Provider() terraform.ResourceProvider { "endpoints": endpointsSchema(), + "ignore_tag_prefixes": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Resource tag key prefixes to ignore across all resources.", + }, + + "ignore_tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "Resource tag keys to ignore across all resources.", + }, + "insecure": { Type: schema.TypeBool, Optional: true, @@ -141,128 +157,152 @@ func Provider() terraform.ResourceProvider { }, DataSourcesMap: map[string]*schema.Resource{ - "aws_acm_certificate": dataSourceAwsAcmCertificate(), - "aws_acmpca_certificate_authority": dataSourceAwsAcmpcaCertificateAuthority(), - "aws_ami": dataSourceAwsAmi(), - "aws_ami_ids": dataSourceAwsAmiIds(), - "aws_api_gateway_api_key": dataSourceAwsApiGatewayApiKey(), - "aws_api_gateway_resource": dataSourceAwsApiGatewayResource(), - "aws_api_gateway_rest_api": dataSourceAwsApiGatewayRestApi(), - "aws_api_gateway_vpc_link": dataSourceAwsApiGatewayVpcLink(), - "aws_arn": dataSourceAwsArn(), - "aws_autoscaling_group": dataSourceAwsAutoscalingGroup(), - "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), - "aws_availability_zone": dataSourceAwsAvailabilityZone(), - "aws_availability_zones": dataSourceAwsAvailabilityZones(), - "aws_batch_compute_environment": dataSourceAwsBatchComputeEnvironment(), - "aws_batch_job_queue": dataSourceAwsBatchJobQueue(), - "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), - "aws_caller_identity": dataSourceAwsCallerIdentity(), - "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), - "aws_cloudformation_export": dataSourceAwsCloudFormationExport(), - "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), - "aws_cloudhsm_v2_cluster": dataSourceCloudHsm2Cluster(), - "aws_cloudtrail_service_account": dataSourceAwsCloudTrailServiceAccount(), - "aws_cloudwatch_log_group": dataSourceAwsCloudwatchLogGroup(), - "aws_cognito_user_pools": dataSourceAwsCognitoUserPools(), - "aws_codecommit_repository": dataSourceAwsCodeCommitRepository(), - "aws_cur_report_definition": dataSourceAwsCurReportDefinition(), - "aws_db_cluster_snapshot": dataSourceAwsDbClusterSnapshot(), - "aws_db_event_categories": dataSourceAwsDbEventCategories(), - "aws_db_instance": dataSourceAwsDbInstance(), - "aws_db_snapshot": dataSourceAwsDbSnapshot(), - "aws_dx_gateway": dataSourceAwsDxGateway(), - "aws_dynamodb_table": dataSourceAwsDynamoDbTable(), - "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), - "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), - "aws_ebs_volume": dataSourceAwsEbsVolume(), - "aws_ec2_transit_gateway": dataSourceAwsEc2TransitGateway(), - "aws_ec2_transit_gateway_route_table": dataSourceAwsEc2TransitGatewayRouteTable(), - "aws_ec2_transit_gateway_vpc_attachment": dataSourceAwsEc2TransitGatewayVpcAttachment(), - "aws_ec2_transit_gateway_vpn_attachment": dataSourceAwsEc2TransitGatewayVpnAttachment(), - "aws_ecr_repository": dataSourceAwsEcrRepository(), - "aws_ecs_cluster": dataSourceAwsEcsCluster(), - "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), - "aws_ecs_service": dataSourceAwsEcsService(), - "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), - "aws_efs_file_system": dataSourceAwsEfsFileSystem(), - "aws_efs_mount_target": dataSourceAwsEfsMountTarget(), - "aws_eip": dataSourceAwsEip(), - "aws_eks_cluster": dataSourceAwsEksCluster(), - "aws_eks_cluster_auth": dataSourceAwsEksClusterAuth(), - "aws_elastic_beanstalk_application": dataSourceAwsElasticBeanstalkApplication(), - "aws_elastic_beanstalk_hosted_zone": dataSourceAwsElasticBeanstalkHostedZone(), - "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), - "aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(), - "aws_elb": dataSourceAwsElb(), - "aws_elasticache_replication_group": dataSourceAwsElasticacheReplicationGroup(), - "aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(), - "aws_elb_service_account": dataSourceAwsElbServiceAccount(), - "aws_glue_script": dataSourceAwsGlueScript(), - "aws_iam_account_alias": dataSourceAwsIamAccountAlias(), - "aws_iam_group": dataSourceAwsIAMGroup(), - "aws_iam_instance_profile": dataSourceAwsIAMInstanceProfile(), - "aws_iam_policy": dataSourceAwsIAMPolicy(), - "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), - "aws_iam_role": dataSourceAwsIAMRole(), - "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), - "aws_iam_user": dataSourceAwsIAMUser(), - "aws_internet_gateway": dataSourceAwsInternetGateway(), - "aws_iot_endpoint": dataSourceAwsIotEndpoint(), - "aws_inspector_rules_packages": dataSourceAwsInspectorRulesPackages(), - "aws_instance": dataSourceAwsInstance(), - "aws_instances": dataSourceAwsInstances(), - "aws_ip_ranges": dataSourceAwsIPRanges(), - "aws_kinesis_stream": dataSourceAwsKinesisStream(), - "aws_kms_alias": dataSourceAwsKmsAlias(), - "aws_kms_ciphertext": dataSourceAwsKmsCiphertext(), - "aws_kms_key": dataSourceAwsKmsKey(), - "aws_kms_secret": dataSourceAwsKmsSecret(), - "aws_kms_secrets": dataSourceAwsKmsSecrets(), - "aws_lambda_function": dataSourceAwsLambdaFunction(), - "aws_lambda_invocation": dataSourceAwsLambdaInvocation(), - "aws_lambda_layer_version": dataSourceAwsLambdaLayerVersion(), - "aws_launch_configuration": dataSourceAwsLaunchConfiguration(), - "aws_launch_template": dataSourceAwsLaunchTemplate(), - "aws_mq_broker": dataSourceAwsMqBroker(), - "aws_nat_gateway": dataSourceAwsNatGateway(), - "aws_network_acls": dataSourceAwsNetworkAcls(), - "aws_network_interface": dataSourceAwsNetworkInterface(), - "aws_network_interfaces": dataSourceAwsNetworkInterfaces(), - "aws_partition": dataSourceAwsPartition(), - "aws_prefix_list": dataSourceAwsPrefixList(), - "aws_pricing_product": dataSourceAwsPricingProduct(), - "aws_rds_cluster": dataSourceAwsRdsCluster(), - "aws_redshift_cluster": dataSourceAwsRedshiftCluster(), - "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), - "aws_region": dataSourceAwsRegion(), - "aws_route": dataSourceAwsRoute(), - "aws_route_table": dataSourceAwsRouteTable(), - "aws_route_tables": dataSourceAwsRouteTables(), - "aws_route53_delegation_set": dataSourceAwsDelegationSet(), - "aws_route53_zone": dataSourceAwsRoute53Zone(), - "aws_s3_bucket": dataSourceAwsS3Bucket(), - "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), - "aws_secretsmanager_secret": dataSourceAwsSecretsManagerSecret(), - "aws_secretsmanager_secret_version": dataSourceAwsSecretsManagerSecretVersion(), - "aws_sns_topic": dataSourceAwsSnsTopic(), - "aws_sqs_queue": dataSourceAwsSqsQueue(), - "aws_ssm_document": dataSourceAwsSsmDocument(), - "aws_ssm_parameter": dataSourceAwsSsmParameter(), - "aws_storagegateway_local_disk": dataSourceAwsStorageGatewayLocalDisk(), - "aws_subnet": dataSourceAwsSubnet(), - "aws_subnet_ids": dataSourceAwsSubnetIDs(), - "aws_transfer_server": dataSourceAwsTransferServer(), - "aws_vpcs": dataSourceAwsVpcs(), - "aws_security_group": dataSourceAwsSecurityGroup(), - "aws_security_groups": dataSourceAwsSecurityGroups(), - "aws_vpc": dataSourceAwsVpc(), - "aws_vpc_dhcp_options": dataSourceAwsVpcDhcpOptions(), - "aws_vpc_endpoint": dataSourceAwsVpcEndpoint(), - "aws_vpc_endpoint_service": dataSourceAwsVpcEndpointService(), - "aws_vpc_peering_connection": dataSourceAwsVpcPeeringConnection(), - "aws_vpn_gateway": dataSourceAwsVpnGateway(), - "aws_workspaces_bundle": dataSourceAwsWorkspaceBundle(), + "aws_acm_certificate": dataSourceAwsAcmCertificate(), + "aws_acmpca_certificate_authority": dataSourceAwsAcmpcaCertificateAuthority(), + "aws_ami": dataSourceAwsAmi(), + "aws_ami_ids": dataSourceAwsAmiIds(), + "aws_api_gateway_api_key": dataSourceAwsApiGatewayApiKey(), + "aws_api_gateway_resource": dataSourceAwsApiGatewayResource(), + "aws_api_gateway_rest_api": dataSourceAwsApiGatewayRestApi(), + "aws_api_gateway_vpc_link": dataSourceAwsApiGatewayVpcLink(), + "aws_arn": dataSourceAwsArn(), + "aws_autoscaling_group": dataSourceAwsAutoscalingGroup(), + "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), + "aws_availability_zone": dataSourceAwsAvailabilityZone(), + "aws_availability_zones": dataSourceAwsAvailabilityZones(), + "aws_batch_compute_environment": dataSourceAwsBatchComputeEnvironment(), + "aws_batch_job_queue": dataSourceAwsBatchJobQueue(), + "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), + "aws_caller_identity": dataSourceAwsCallerIdentity(), + "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), + "aws_cloudformation_export": dataSourceAwsCloudFormationExport(), + "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), + "aws_cloudhsm_v2_cluster": dataSourceCloudHsm2Cluster(), + "aws_cloudtrail_service_account": dataSourceAwsCloudTrailServiceAccount(), + "aws_cloudwatch_log_group": dataSourceAwsCloudwatchLogGroup(), + "aws_cognito_user_pools": dataSourceAwsCognitoUserPools(), + "aws_codecommit_repository": dataSourceAwsCodeCommitRepository(), + "aws_cur_report_definition": dataSourceAwsCurReportDefinition(), + "aws_db_cluster_snapshot": dataSourceAwsDbClusterSnapshot(), + "aws_db_event_categories": dataSourceAwsDbEventCategories(), + "aws_db_instance": dataSourceAwsDbInstance(), + "aws_db_snapshot": dataSourceAwsDbSnapshot(), + "aws_dx_gateway": dataSourceAwsDxGateway(), + "aws_dynamodb_table": dataSourceAwsDynamoDbTable(), + "aws_ebs_default_kms_key": dataSourceAwsEbsDefaultKmsKey(), + "aws_ebs_encryption_by_default": dataSourceAwsEbsEncryptionByDefault(), + "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), + "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), + "aws_ebs_volume": dataSourceAwsEbsVolume(), + "aws_ec2_transit_gateway": dataSourceAwsEc2TransitGateway(), + "aws_ec2_transit_gateway_dx_gateway_attachment": dataSourceAwsEc2TransitGatewayDxGatewayAttachment(), + "aws_ec2_transit_gateway_route_table": dataSourceAwsEc2TransitGatewayRouteTable(), + "aws_ec2_transit_gateway_vpc_attachment": dataSourceAwsEc2TransitGatewayVpcAttachment(), + "aws_ec2_transit_gateway_vpn_attachment": dataSourceAwsEc2TransitGatewayVpnAttachment(), + "aws_ecr_image": dataSourceAwsEcrImage(), + "aws_ecr_repository": dataSourceAwsEcrRepository(), + "aws_ecs_cluster": dataSourceAwsEcsCluster(), + "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), + "aws_ecs_service": dataSourceAwsEcsService(), + "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), + "aws_customer_gateway": dataSourceAwsCustomerGateway(), + "aws_efs_file_system": dataSourceAwsEfsFileSystem(), + "aws_efs_mount_target": dataSourceAwsEfsMountTarget(), + "aws_eip": dataSourceAwsEip(), + "aws_eks_cluster": dataSourceAwsEksCluster(), + "aws_eks_cluster_auth": dataSourceAwsEksClusterAuth(), + "aws_elastic_beanstalk_application": dataSourceAwsElasticBeanstalkApplication(), + "aws_elastic_beanstalk_hosted_zone": dataSourceAwsElasticBeanstalkHostedZone(), + "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), + "aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(), + "aws_elasticsearch_domain": dataSourceAwsElasticSearchDomain(), + "aws_elb": dataSourceAwsElb(), + "aws_elasticache_replication_group": dataSourceAwsElasticacheReplicationGroup(), + "aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(), + "aws_elb_service_account": dataSourceAwsElbServiceAccount(), + "aws_glue_script": dataSourceAwsGlueScript(), + "aws_iam_account_alias": dataSourceAwsIamAccountAlias(), + "aws_iam_group": dataSourceAwsIAMGroup(), + "aws_iam_instance_profile": dataSourceAwsIAMInstanceProfile(), + "aws_iam_policy": dataSourceAwsIAMPolicy(), + "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), + "aws_iam_role": dataSourceAwsIAMRole(), + "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), + "aws_iam_user": dataSourceAwsIAMUser(), + "aws_internet_gateway": dataSourceAwsInternetGateway(), + "aws_iot_endpoint": dataSourceAwsIotEndpoint(), + "aws_inspector_rules_packages": dataSourceAwsInspectorRulesPackages(), + "aws_instance": dataSourceAwsInstance(), + "aws_instances": dataSourceAwsInstances(), + "aws_ip_ranges": dataSourceAwsIPRanges(), + "aws_kinesis_stream": dataSourceAwsKinesisStream(), + "aws_kms_alias": dataSourceAwsKmsAlias(), + "aws_kms_ciphertext": dataSourceAwsKmsCiphertext(), + "aws_kms_key": dataSourceAwsKmsKey(), + "aws_kms_secret": dataSourceAwsKmsSecret(), + "aws_kms_secrets": dataSourceAwsKmsSecrets(), + "aws_lambda_function": dataSourceAwsLambdaFunction(), + "aws_lambda_invocation": dataSourceAwsLambdaInvocation(), + "aws_lambda_layer_version": dataSourceAwsLambdaLayerVersion(), + "aws_launch_configuration": dataSourceAwsLaunchConfiguration(), + "aws_launch_template": dataSourceAwsLaunchTemplate(), + "aws_mq_broker": dataSourceAwsMqBroker(), + "aws_msk_cluster": dataSourceAwsMskCluster(), + "aws_msk_configuration": dataSourceAwsMskConfiguration(), + "aws_nat_gateway": dataSourceAwsNatGateway(), + "aws_network_acls": dataSourceAwsNetworkAcls(), + "aws_network_interface": dataSourceAwsNetworkInterface(), + "aws_network_interfaces": dataSourceAwsNetworkInterfaces(), + "aws_organizations_organization": dataSourceAwsOrganizationsOrganization(), + "aws_partition": dataSourceAwsPartition(), + "aws_prefix_list": dataSourceAwsPrefixList(), + "aws_pricing_product": dataSourceAwsPricingProduct(), + "aws_qldb_ledger": dataSourceAwsQLDBLedger(), + "aws_ram_resource_share": dataSourceAwsRamResourceShare(), + "aws_rds_cluster": dataSourceAwsRdsCluster(), + "aws_redshift_cluster": dataSourceAwsRedshiftCluster(), + "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), + "aws_region": dataSourceAwsRegion(), + "aws_route": dataSourceAwsRoute(), + "aws_route_table": dataSourceAwsRouteTable(), + "aws_route_tables": dataSourceAwsRouteTables(), + "aws_route53_delegation_set": dataSourceAwsDelegationSet(), + "aws_route53_resolver_rule": dataSourceAwsRoute53ResolverRule(), + "aws_route53_resolver_rules": dataSourceAwsRoute53ResolverRules(), + "aws_route53_zone": dataSourceAwsRoute53Zone(), + "aws_s3_bucket": dataSourceAwsS3Bucket(), + "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), + "aws_s3_bucket_objects": dataSourceAwsS3BucketObjects(), + "aws_secretsmanager_secret": dataSourceAwsSecretsManagerSecret(), + "aws_secretsmanager_secret_version": dataSourceAwsSecretsManagerSecretVersion(), + "aws_servicequotas_service": dataSourceAwsServiceQuotasService(), + "aws_servicequotas_service_quota": dataSourceAwsServiceQuotasServiceQuota(), + "aws_sns_topic": dataSourceAwsSnsTopic(), + "aws_sqs_queue": dataSourceAwsSqsQueue(), + "aws_ssm_document": dataSourceAwsSsmDocument(), + "aws_ssm_parameter": dataSourceAwsSsmParameter(), + "aws_storagegateway_local_disk": dataSourceAwsStorageGatewayLocalDisk(), + "aws_subnet": dataSourceAwsSubnet(), + "aws_subnet_ids": dataSourceAwsSubnetIDs(), + "aws_transfer_server": dataSourceAwsTransferServer(), + "aws_vpcs": dataSourceAwsVpcs(), + "aws_security_group": dataSourceAwsSecurityGroup(), + "aws_security_groups": dataSourceAwsSecurityGroups(), + "aws_vpc": dataSourceAwsVpc(), + "aws_vpc_dhcp_options": dataSourceAwsVpcDhcpOptions(), + "aws_vpc_endpoint": dataSourceAwsVpcEndpoint(), + "aws_vpc_endpoint_service": dataSourceAwsVpcEndpointService(), + "aws_vpc_peering_connection": dataSourceAwsVpcPeeringConnection(), + "aws_vpn_gateway": dataSourceAwsVpnGateway(), + "aws_waf_ipset": dataSourceAwsWafIpSet(), + "aws_waf_rule": dataSourceAwsWafRule(), + "aws_waf_rate_based_rule": dataSourceAwsWafRateBasedRule(), + "aws_waf_web_acl": dataSourceAwsWafWebAcl(), + "aws_wafregional_ipset": dataSourceAwsWafRegionalIpSet(), + "aws_wafregional_rule": dataSourceAwsWafRegionalRule(), + "aws_wafregional_rate_based_rule": dataSourceAwsWafRegionalRateBasedRule(), + "aws_wafregional_web_acl": dataSourceAwsWafRegionalWebAcl(), + "aws_workspaces_bundle": dataSourceAwsWorkspaceBundle(), // Adding the Aliases for the ALB -> LB Rename "aws_lb": dataSourceAwsLb(), @@ -315,10 +355,12 @@ func Provider() terraform.ResourceProvider { "aws_appmesh_virtual_service": resourceAwsAppmeshVirtualService(), "aws_appsync_api_key": resourceAwsAppsyncApiKey(), "aws_appsync_datasource": resourceAwsAppsyncDatasource(), + "aws_appsync_function": resourceAwsAppsyncFunction(), "aws_appsync_graphql_api": resourceAwsAppsyncGraphqlApi(), "aws_appsync_resolver": resourceAwsAppsyncResolver(), "aws_athena_database": resourceAwsAthenaDatabase(), "aws_athena_named_query": resourceAwsAthenaNamedQuery(), + "aws_athena_workgroup": resourceAwsAthenaWorkgroup(), "aws_autoscaling_attachment": resourceAwsAutoscalingAttachment(), "aws_autoscaling_group": resourceAwsAutoscalingGroup(), "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), @@ -353,6 +395,8 @@ func Provider() terraform.ResourceProvider { "aws_config_configuration_recorder": resourceAwsConfigConfigurationRecorder(), "aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(), "aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(), + "aws_config_organization_custom_rule": resourceAwsConfigOrganizationCustomRule(), + "aws_config_organization_managed_rule": resourceAwsConfigOrganizationManagedRule(), "aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(), "aws_cognito_identity_pool_roles_attachment": resourceAwsCognitoIdentityPoolRolesAttachment(), "aws_cognito_identity_provider": resourceAwsCognitoIdentityProvider(), @@ -371,11 +415,13 @@ func Provider() terraform.ResourceProvider { "aws_codecommit_repository": resourceAwsCodeCommitRepository(), "aws_codecommit_trigger": resourceAwsCodeCommitTrigger(), "aws_codebuild_project": resourceAwsCodeBuildProject(), + "aws_codebuild_source_credential": resourceAwsCodeBuildSourceCredential(), "aws_codebuild_webhook": resourceAwsCodeBuildWebhook(), "aws_codepipeline": resourceAwsCodePipeline(), "aws_codepipeline_webhook": resourceAwsCodePipelineWebhook(), "aws_cur_report_definition": resourceAwsCurReportDefinition(), "aws_customer_gateway": resourceAwsCustomerGateway(), + "aws_datapipeline_pipeline": resourceAwsDataPipelinePipeline(), "aws_datasync_agent": resourceAwsDataSyncAgent(), "aws_datasync_location_efs": resourceAwsDataSyncLocationEfs(), "aws_datasync_location_nfs": resourceAwsDataSyncLocationNfs(), @@ -396,6 +442,7 @@ func Provider() terraform.ResourceProvider { "aws_devicefarm_project": resourceAwsDevicefarmProject(), "aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(), "aws_directory_service_conditional_forwarder": resourceAwsDirectoryServiceConditionalForwarder(), + "aws_directory_service_log_subscription": resourceAwsDirectoryServiceLogSubscription(), "aws_dlm_lifecycle_policy": resourceAwsDlmLifecyclePolicy(), "aws_dms_certificate": resourceAwsDmsCertificate(), "aws_dms_endpoint": resourceAwsDmsEndpoint(), @@ -420,9 +467,12 @@ func Provider() terraform.ResourceProvider { "aws_dx_lag": resourceAwsDxLag(), "aws_dx_private_virtual_interface": resourceAwsDxPrivateVirtualInterface(), "aws_dx_public_virtual_interface": resourceAwsDxPublicVirtualInterface(), + "aws_dx_transit_virtual_interface": resourceAwsDxTransitVirtualInterface(), "aws_dynamodb_table": resourceAwsDynamoDbTable(), "aws_dynamodb_table_item": resourceAwsDynamoDbTableItem(), "aws_dynamodb_global_table": resourceAwsDynamoDbGlobalTable(), + "aws_ebs_default_kms_key": resourceAwsEbsDefaultKmsKey(), + "aws_ebs_encryption_by_default": resourceAwsEbsEncryptionByDefault(), "aws_ebs_snapshot": resourceAwsEbsSnapshot(), "aws_ebs_snapshot_copy": resourceAwsEbsSnapshotCopy(), "aws_ebs_volume": resourceAwsEbsVolume(), @@ -436,6 +486,7 @@ func Provider() terraform.ResourceProvider { "aws_ec2_transit_gateway_route_table_association": resourceAwsEc2TransitGatewayRouteTableAssociation(), "aws_ec2_transit_gateway_route_table_propagation": resourceAwsEc2TransitGatewayRouteTablePropagation(), "aws_ec2_transit_gateway_vpc_attachment": resourceAwsEc2TransitGatewayVpcAttachment(), + "aws_ec2_transit_gateway_vpc_attachment_accepter": resourceAwsEc2TransitGatewayVpcAttachmentAccepter(), "aws_ecr_lifecycle_policy": resourceAwsEcrLifecyclePolicy(), "aws_ecr_repository": resourceAwsEcrRepository(), "aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(), @@ -467,6 +518,9 @@ func Provider() terraform.ResourceProvider { "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), "aws_flow_log": resourceAwsFlowLog(), + "aws_fsx_lustre_file_system": resourceAwsFsxLustreFileSystem(), + "aws_fsx_windows_file_system": resourceAwsFsxWindowsFileSystem(), + "aws_fms_admin_account": resourceAwsFmsAdminAccount(), "aws_gamelift_alias": resourceAwsGameliftAlias(), "aws_gamelift_build": resourceAwsGameliftBuild(), "aws_gamelift_fleet": resourceAwsGameliftFleet(), @@ -474,6 +528,7 @@ func Provider() terraform.ResourceProvider { "aws_glacier_vault": resourceAwsGlacierVault(), "aws_glacier_vault_lock": resourceAwsGlacierVaultLock(), "aws_globalaccelerator_accelerator": resourceAwsGlobalAcceleratorAccelerator(), + "aws_globalaccelerator_endpoint_group": resourceAwsGlobalAcceleratorEndpointGroup(), "aws_globalaccelerator_listener": resourceAwsGlobalAcceleratorListener(), "aws_glue_catalog_database": resourceAwsGlueCatalogDatabase(), "aws_glue_catalog_table": resourceAwsGlueCatalogTable(), @@ -560,6 +615,8 @@ func Provider() terraform.ResourceProvider { "aws_media_package_channel": resourceAwsMediaPackageChannel(), "aws_media_store_container": resourceAwsMediaStoreContainer(), "aws_media_store_container_policy": resourceAwsMediaStoreContainerPolicy(), + "aws_msk_cluster": resourceAwsMskCluster(), + "aws_msk_configuration": resourceAwsMskConfiguration(), "aws_nat_gateway": resourceAwsNatGateway(), "aws_network_acl": resourceAwsNetworkAcl(), "aws_default_network_acl": resourceAwsDefaultNetworkAcl(), @@ -596,9 +653,13 @@ func Provider() terraform.ResourceProvider { "aws_organizations_organizational_unit": resourceAwsOrganizationsOrganizationalUnit(), "aws_placement_group": resourceAwsPlacementGroup(), "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), + "aws_qldb_ledger": resourceAwsQLDBLedger(), + "aws_quicksight_group": resourceAwsQuickSightGroup(), + "aws_quicksight_user": resourceAwsQuickSightUser(), "aws_ram_principal_association": resourceAwsRamPrincipalAssociation(), "aws_ram_resource_association": resourceAwsRamResourceAssociation(), "aws_ram_resource_share": resourceAwsRamResourceShare(), + "aws_ram_resource_share_accepter": resourceAwsRamResourceShareAccepter(), "aws_rds_cluster": resourceAwsRDSCluster(), "aws_rds_cluster_endpoint": resourceAwsRDSClusterEndpoint(), "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), @@ -609,6 +670,8 @@ func Provider() terraform.ResourceProvider { "aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(), "aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(), "aws_redshift_snapshot_copy_grant": resourceAwsRedshiftSnapshotCopyGrant(), + "aws_redshift_snapshot_schedule": resourceAwsRedshiftSnapshotSchedule(), + "aws_redshift_snapshot_schedule_association": resourceAwsRedshiftSnapshotScheduleAssociation(), "aws_redshift_event_subscription": resourceAwsRedshiftEventSubscription(), "aws_resourcegroups_group": resourceAwsResourceGroupsGroup(), "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), @@ -636,6 +699,8 @@ func Provider() terraform.ResourceProvider { "aws_ses_domain_identity_verification": resourceAwsSesDomainIdentityVerification(), "aws_ses_domain_dkim": resourceAwsSesDomainDkim(), "aws_ses_domain_mail_from": resourceAwsSesDomainMailFrom(), + "aws_ses_email_identity": resourceAwsSesEmailIdentity(), + "aws_ses_identity_policy": resourceAwsSesIdentityPolicy(), "aws_ses_receipt_filter": resourceAwsSesReceiptFilter(), "aws_ses_receipt_rule": resourceAwsSesReceiptRule(), "aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(), @@ -663,6 +728,8 @@ func Provider() terraform.ResourceProvider { "aws_service_discovery_private_dns_namespace": resourceAwsServiceDiscoveryPrivateDnsNamespace(), "aws_service_discovery_public_dns_namespace": resourceAwsServiceDiscoveryPublicDnsNamespace(), "aws_service_discovery_service": resourceAwsServiceDiscoveryService(), + "aws_servicequotas_service_quota": resourceAwsServiceQuotasServiceQuota(), + "aws_shield_protection": resourceAwsShieldProtection(), "aws_simpledb_domain": resourceAwsSimpleDBDomain(), "aws_ssm_activation": resourceAwsSsmActivation(), "aws_ssm_association": resourceAwsSsmAssociation(), @@ -781,8 +848,19 @@ func Provider() terraform.ResourceProvider { "aws_alb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), "aws_lb_target_group_attachment": resourceAwsLbTargetGroupAttachment(), }, - ConfigureFunc: providerConfigure, } + + provider.ConfigureFunc = func(d *schema.ResourceData) (interface{}, error) { + terraformVersion := provider.TerraformVersion + if terraformVersion == "" { + // Terraform 0.12 introduced this field to the protocol + // We can therefore assume that if it's missing it's 0.10 or 0.11 + terraformVersion = "0.11+compatible" + } + return providerConfigure(d, terraformVersion) + } + + return provider } var descriptions map[string]string @@ -853,12 +931,16 @@ func init() { endpointServiceNames = []string{ "acm", "acmpca", + "amplify", "apigateway", "applicationautoscaling", + "applicationinsights", "appmesh", + "appstream", "appsync", "athena", "autoscaling", + "autoscalingplans", "backup", "batch", "budgets", @@ -902,6 +984,7 @@ func init() { "es", "firehose", "fms", + "forecast", "fsx", "gamelift", "glacier", @@ -911,12 +994,15 @@ func init() { "iam", "inspector", "iot", + "iotanalytics", + "iotevents", "kafka", "kinesis_analytics", "kinesis", "kinesisanalytics", "kinesisvideo", "kms", + "lakeformation", "lambda", "lexmodels", "licensemanager", @@ -933,8 +1019,10 @@ func init() { "neptune", "opsworks", "organizations", + "personalize", "pinpoint", "pricing", + "qldb", "quicksight", "r53", "ram", @@ -952,6 +1040,7 @@ func init() { "serverlessrepo", "servicecatalog", "servicediscovery", + "servicequotas", "ses", "shield", "sns", @@ -970,7 +1059,7 @@ func init() { } } -func providerConfigure(d *schema.ResourceData) (interface{}, error) { +func providerConfigure(d *schema.ResourceData, terraformVersion string) (interface{}, error) { config := Config{ AccessKey: d.Get("access_key").(string), SecretKey: d.Get("secret_key").(string), @@ -986,6 +1075,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), S3ForcePathStyle: d.Get("s3_force_path_style").(bool), + terraformVersion: terraformVersion, } // Set CredsFilename, expanding home directory @@ -1021,6 +1111,18 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { } } + if v, ok := d.GetOk("ignore_tag_prefixes"); ok { + for _, ignoreTagPrefixRaw := range v.(*schema.Set).List() { + config.IgnoreTagPrefixes = append(config.IgnoreTagPrefixes, ignoreTagPrefixRaw.(string)) + } + } + + if v, ok := d.GetOk("ignore_tags"); ok { + for _, ignoreTagRaw := range v.(*schema.Set).List() { + config.IgnoreTags = append(config.IgnoreTags, ignoreTagRaw.(string)) + } + } + if v, ok := d.GetOk("allowed_account_ids"); ok { for _, accountIDRaw := range v.(*schema.Set).List() { config.AllowedAccountIds = append(config.AllowedAccountIds, accountIDRaw.(string)) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acm_certificate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acm_certificate.go index 09002b2258f..4c1eaad56d5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acm_certificate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acm_certificate.go @@ -9,8 +9,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/acm" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsAcmCertificate() *schema.Resource { @@ -22,7 +24,6 @@ func resourceAwsAcmCertificate() *schema.Resource { Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, - Schema: map[string]*schema.Schema{ "certificate_body": { Type: schema.TypeString, @@ -41,6 +42,11 @@ func resourceAwsAcmCertificate() *schema.Resource { StateFunc: normalizeCert, Sensitive: true, }, + "certificate_authority_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, "domain_name": { Type: schema.TypeString, Optional: true, @@ -73,7 +79,7 @@ func resourceAwsAcmCertificate() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ConflictsWith: []string{"private_key", "certificate_body", "certificate_chain"}, + ConflictsWith: []string{"private_key", "certificate_body", "certificate_chain", "certificate_authority_arn"}, }, "arn": { Type: schema.TypeString, @@ -108,6 +114,35 @@ func resourceAwsAcmCertificate() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if _, ok := d.GetOk("private_key"); ok { + // ignore diffs for imported certs; they have a different logging preference + // default to requested certs which can't be changed by the ImportCertificate API + return true + } + // behave just like suppressMissingOptionalConfigurationBlock() for requested certs + return old == "1" && new == "0" + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_transparency_logging_preference": { + Type: schema.TypeString, + Optional: true, + Default: acm.CertificateTransparencyLoggingPreferenceEnabled, + ForceNew: true, + ConflictsWith: []string{"private_key", "certificate_body", "certificate_chain"}, + ValidateFunc: validation.StringInSlice([]string{ + acm.CertificateTransparencyLoggingPreferenceEnabled, + acm.CertificateTransparencyLoggingPreferenceDisabled, + }, false), + }, + }, + }, + }, "tags": tagsSchema(), }, } @@ -115,6 +150,10 @@ func resourceAwsAcmCertificate() *schema.Resource { func resourceAwsAcmCertificateCreate(d *schema.ResourceData, meta interface{}) error { if _, ok := d.GetOk("domain_name"); ok { + if _, ok := d.GetOk("certificate_authority_arn"); ok { + return resourceAwsAcmCertificateCreateRequested(d, meta) + } + if _, ok := d.GetOk("validation_method"); !ok { return errors.New("validation_method must be set when creating a certificate") } @@ -136,15 +175,9 @@ func resourceAwsAcmCertificateCreateImported(d *schema.ResourceData, meta interf } d.SetId(*resp.CertificateArn) - if v, ok := d.GetOk("tags"); ok { - params := &acm.AddTagsToCertificateInput{ - CertificateArn: resp.CertificateArn, - Tags: tagsFromMapACM(v.(map[string]interface{})), - } - _, err := acmconn.AddTagsToCertificate(params) - - if err != nil { - return fmt.Errorf("Error requesting certificate: %s", err) + if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { + if err := keyvaluetags.AcmUpdateTags(acmconn, d.Id(), nil, v); err != nil { + return fmt.Errorf("error adding tags: %s", err) } } @@ -155,7 +188,12 @@ func resourceAwsAcmCertificateCreateRequested(d *schema.ResourceData, meta inter acmconn := meta.(*AWSClient).acmconn params := &acm.RequestCertificateInput{ DomainName: aws.String(strings.TrimSuffix(d.Get("domain_name").(string), ".")), - ValidationMethod: aws.String(d.Get("validation_method").(string)), + IdempotencyToken: aws.String(resource.PrefixedUniqueId("tf")), // 32 character limit + Options: expandAcmCertificateOptions(d.Get("options").([]interface{})), + } + + if caARN, ok := d.GetOk("certificate_authority_arn"); ok { + params.CertificateAuthorityArn = aws.String(caARN.(string)) } if sans, ok := d.GetOk("subject_alternative_names"); ok { @@ -166,6 +204,10 @@ func resourceAwsAcmCertificateCreateRequested(d *schema.ResourceData, meta inter params.SubjectAlternativeNames = subjectAlternativeNames } + if v, ok := d.GetOk("validation_method"); ok { + params.ValidationMethod = aws.String(v.(string)) + } + log.Printf("[DEBUG] ACM Certificate Request: %#v", params) resp, err := acmconn.RequestCertificate(params) @@ -174,15 +216,9 @@ func resourceAwsAcmCertificateCreateRequested(d *schema.ResourceData, meta inter } d.SetId(*resp.CertificateArn) - if v, ok := d.GetOk("tags"); ok { - params := &acm.AddTagsToCertificateInput{ - CertificateArn: resp.CertificateArn, - Tags: tagsFromMapACM(v.(map[string]interface{})), - } - _, err := acmconn.AddTagsToCertificate(params) - - if err != nil { - return fmt.Errorf("Error requesting certificate: %s", err) + if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { + if err := keyvaluetags.AcmUpdateTags(acmconn, d.Id(), nil, v); err != nil { + return fmt.Errorf("error adding tags: %s", err) } } @@ -209,6 +245,7 @@ func resourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) err d.Set("domain_name", resp.Certificate.DomainName) d.Set("arn", resp.Certificate.CertificateArn) + d.Set("certificate_authority_arn", resp.Certificate.CertificateAuthorityArn) if err := d.Set("subject_alternative_names", cleanUpSubjectAlternativeNames(resp.Certificate)); err != nil { return resource.NonRetryableError(err) @@ -229,16 +266,18 @@ func resourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) err d.Set("validation_method", resourceAwsAcmCertificateGuessValidationMethod(domainValidationOptions, emailValidationOptions)) - params := &acm.ListTagsForCertificateInput{ - CertificateArn: aws.String(d.Id()), + if err := d.Set("options", flattenAcmCertificateOptions(resp.Certificate.Options)); err != nil { + return resource.NonRetryableError(fmt.Errorf("error setting certificate options: %s", err)) } - tagResp, err := acmconn.ListTagsForCertificate(params) + tags, err := keyvaluetags.AcmListTags(acmconn, d.Id()) + if err != nil { - return resource.NonRetryableError(fmt.Errorf("error listing tags for certificate (%s): %s", d.Id(), err)) + return resource.NonRetryableError(fmt.Errorf("error listing tags for ACM Certificate (%s): %s", d.Id(), err)) } - if err := d.Set("tags", tagsToMapACM(tagResp.Tags)); err != nil { - return resource.NonRetryableError(err) + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return resource.NonRetryableError(fmt.Errorf("error setting tags: %s", err)) } return nil @@ -267,9 +306,9 @@ func resourceAwsAcmCertificateUpdate(d *schema.ResourceData, meta interface{}) e } if d.HasChange("tags") { - err := setTagsACM(acmconn, d) - if err != nil { - return err + o, n := d.GetChange("tags") + if err := keyvaluetags.AcmUpdateTags(acmconn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) } } return resourceAwsAcmCertificateRead(d, meta) @@ -291,7 +330,12 @@ func convertValidationOptions(certificate *acm.CertificateDetail) ([]map[string] var domainValidationResult []map[string]interface{} var emailValidationResult []string - if *certificate.Type == acm.CertificateTypeAmazonIssued { + switch aws.StringValue(certificate.Type) { + case acm.CertificateTypeAmazonIssued: + if len(certificate.DomainValidationOptions) == 0 && aws.StringValue(certificate.Status) == acm.DomainStatusPendingValidation { + log.Printf("[DEBUG] No validation options need to retry.") + return nil, nil, fmt.Errorf("No validation options need to retry.") + } for _, o := range certificate.DomainValidationOptions { if o.ResourceRecord != nil { validationOption := map[string]interface{}{ @@ -310,6 +354,12 @@ func convertValidationOptions(certificate *acm.CertificateDetail) ([]map[string] return nil, nil, fmt.Errorf("No validation options need to retry: %#v", o) } } + case acm.CertificateTypePrivate: + // While ACM PRIVATE certificates do not need to be validated, there is a slight delay for + // the API to fill in all certificate details, which is during the PENDING_VALIDATION status. + if aws.StringValue(certificate.Status) == acm.DomainStatusPendingValidation { + return nil, nil, fmt.Errorf("certificate still pending issuance") + } } return domainValidationResult, emailValidationResult, nil @@ -358,3 +408,27 @@ func resourceAwsAcmCertificateImport(conn *acm.ACM, d *schema.ResourceData, upda log.Printf("[DEBUG] ACM Certificate Import: %#v", params) return conn.ImportCertificate(params) } + +func expandAcmCertificateOptions(l []interface{}) *acm.CertificateOptions { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + options := &acm.CertificateOptions{} + + if v, ok := m["certificate_transparency_logging_preference"]; ok { + options.CertificateTransparencyLoggingPreference = aws.String(v.(string)) + } + + return options +} + +func flattenAcmCertificateOptions(co *acm.CertificateOptions) []interface{} { + m := map[string]interface{}{ + "certificate_transparency_logging_preference": aws.StringValue(co.CertificateTransparencyLoggingPreference), + } + + return []interface{}{m} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acm_certificate_validation.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acm_certificate_validation.go index 9cca55ef8c7..4dadcaef81c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acm_certificate_validation.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acm_certificate_validation.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/acm" multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAcmCertificateValidation() *schema.Resource { @@ -54,7 +54,7 @@ func resourceAwsAcmCertificateValidationCreate(d *schema.ResourceData, meta inte } if *resp.Certificate.Type != "AMAZON_ISSUED" { - return fmt.Errorf("Certificate %s has type %s, no validation necessary", *resp.Certificate.CertificateArn, *resp.Certificate.Type) + return fmt.Errorf("Certificate %s has type %s, no validation necessary", aws.StringValue(resp.Certificate.CertificateArn), aws.StringValue(resp.Certificate.Status)) } if validation_record_fqdns, ok := d.GetOk("validation_record_fqdns"); ok { @@ -66,20 +66,30 @@ func resourceAwsAcmCertificateValidationCreate(d *schema.ResourceData, meta inte log.Printf("[INFO] No validation_record_fqdns set, skipping check") } - return resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { + err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { resp, err := acmconn.DescribeCertificate(params) if err != nil { return resource.NonRetryableError(fmt.Errorf("Error describing certificate: %s", err)) } - if *resp.Certificate.Status != "ISSUED" { - return resource.RetryableError(fmt.Errorf("Expected certificate to be issued but was in state %s", *resp.Certificate.Status)) + if aws.StringValue(resp.Certificate.Status) != acm.CertificateStatusIssued { + return resource.RetryableError(fmt.Errorf("Expected certificate to be issued but was in state %s", aws.StringValue(resp.Certificate.Status))) } log.Printf("[INFO] ACM Certificate validation for %s done, certificate was issued", certificate_arn) return resource.NonRetryableError(resourceAwsAcmCertificateValidationRead(d, meta)) }) + if isResourceTimeoutError(err) { + resp, err = acmconn.DescribeCertificate(params) + if aws.StringValue(resp.Certificate.Status) != acm.CertificateStatusIssued { + return fmt.Errorf("Expected certificate to be issued but was in state %s", aws.StringValue(resp.Certificate.Status)) + } + } + if err != nil { + return fmt.Errorf("Error describing created certificate: %s", err) + } + return nil } func resourceAwsAcmCertificateCheckValidationRecords(validationRecordFqdns []interface{}, cert *acm.CertificateDetail, conn *acm.ACM) error { @@ -89,9 +99,11 @@ func resourceAwsAcmCertificateCheckValidationRecords(validationRecordFqdns []int input := &acm.DescribeCertificateInput{ CertificateArn: cert.CertificateArn, } - err := resource.Retry(1*time.Minute, func() *resource.RetryError { + var err error + var output *acm.DescribeCertificateOutput + err = resource.Retry(1*time.Minute, func() *resource.RetryError { log.Printf("[DEBUG] Certificate domain validation options empty for %q, retrying", *cert.CertificateArn) - output, err := conn.DescribeCertificate(input) + output, err = conn.DescribeCertificate(input) if err != nil { return resource.NonRetryableError(err) } @@ -101,9 +113,19 @@ func resourceAwsAcmCertificateCheckValidationRecords(validationRecordFqdns []int cert = output.Certificate return nil }) + if isResourceTimeoutError(err) { + output, err = conn.DescribeCertificate(input) + if err != nil { + return fmt.Errorf("Error describing ACM certificate: %s", err) + } + if len(output.Certificate.DomainValidationOptions) == 0 { + return fmt.Errorf("Certificate domain validation options empty for %s", *cert.CertificateArn) + } + } if err != nil { - return err + return fmt.Errorf("Error checking certificate domain validation options: %s", err) } + cert = output.Certificate } for _, v := range cert.DomainValidationOptions { if v.ValidationMethod != nil { @@ -149,8 +171,8 @@ func resourceAwsAcmCertificateValidationRead(d *schema.ResourceData, meta interf return fmt.Errorf("Error describing certificate: %s", err) } - if *resp.Certificate.Status != "ISSUED" { - log.Printf("[INFO] Certificate status not issued, was %s, tainting validation", *resp.Certificate.Status) + if aws.StringValue(resp.Certificate.Status) != acm.CertificateStatusIssued { + log.Printf("[INFO] Certificate status not issued, was %s, tainting validation", aws.StringValue(resp.Certificate.Status)) d.SetId("") } else { d.SetId((*resp.Certificate.IssuedAt).String()) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acmpca_certificate_authority.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acmpca_certificate_authority.go index e6c30d88381..755475daae4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acmpca_certificate_authority.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acmpca_certificate_authority.go @@ -7,9 +7,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/acmpca" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsAcmpcaCertificateAuthority() *schema.Resource { @@ -255,8 +256,10 @@ func resourceAwsAcmpcaCertificateAuthority() *schema.Resource { "type": { Type: schema.TypeString, Optional: true, + ForceNew: true, Default: acmpca.CertificateAuthorityTypeSubordinate, ValidateFunc: validation.StringInSlice([]string{ + acmpca.CertificateAuthorityTypeRoot, acmpca.CertificateAuthorityTypeSubordinate, }, false), }, @@ -266,6 +269,7 @@ func resourceAwsAcmpcaCertificateAuthority() *schema.Resource { func resourceAwsAcmpcaCertificateAuthorityCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).acmpcaconn + tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().AcmpcaTags() input := &acmpca.CreateCertificateAuthorityInput{ CertificateAuthorityConfiguration: expandAcmpcaCertificateAuthorityConfiguration(d.Get("certificate_authority_configuration").([]interface{})), @@ -274,6 +278,10 @@ func resourceAwsAcmpcaCertificateAuthorityCreate(d *schema.ResourceData, meta in RevocationConfiguration: expandAcmpcaRevocationConfiguration(d.Get("revocation_configuration").([]interface{})), } + if len(tags) > 0 { + input.Tags = tags + } + log.Printf("[DEBUG] Creating ACMPCA Certificate Authority: %s", input) var output *acmpca.CreateCertificateAuthorityOutput err := resource.Retry(1*time.Minute, func() *resource.RetryError { @@ -288,25 +296,15 @@ func resourceAwsAcmpcaCertificateAuthorityCreate(d *schema.ResourceData, meta in } return nil }) + if isResourceTimeoutError(err) { + output, err = conn.CreateCertificateAuthority(input) + } if err != nil { return fmt.Errorf("error creating ACMPCA Certificate Authority: %s", err) } d.SetId(aws.StringValue(output.CertificateAuthorityArn)) - if v, ok := d.GetOk("tags"); ok { - input := &acmpca.TagCertificateAuthorityInput{ - CertificateAuthorityArn: aws.String(d.Id()), - Tags: tagsFromMapACMPCA(v.(map[string]interface{})), - } - - log.Printf("[DEBUG] Tagging ACMPCA Certificate Authority: %s", input) - _, err := conn.TagCertificateAuthority(input) - if err != nil { - return fmt.Errorf("error tagging ACMPCA Certificate Authority %q: %s", d.Id(), input) - } - } - stateConf := &resource.StateChangeConf{ Pending: []string{ "", @@ -422,12 +420,13 @@ func resourceAwsAcmpcaCertificateAuthorityRead(d *schema.ResourceData, meta inte d.Set("certificate_signing_request", getCertificateAuthorityCsrOutput.Csr) } - tags, err := listAcmpcaTags(conn, d.Id()) + tags, err := keyvaluetags.AcmpcaListTags(conn, d.Id()) + if err != nil { - return fmt.Errorf("error reading ACMPCA Certificate Authority %q tags: %s", d.Id(), err) + return fmt.Errorf("error listing tags for ACMPCA Certificate Authority (%s): %s", d.Id(), err) } - if err := d.Set("tags", tagsToMapACMPCA(tags)); err != nil { + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } @@ -464,30 +463,10 @@ func resourceAwsAcmpcaCertificateAuthorityUpdate(d *schema.ResourceData, meta in } if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsACMPCA(tagsFromMapACMPCA(o), tagsFromMapACMPCA(n)) - - if len(remove) > 0 { - log.Printf("[DEBUG] Removing ACMPCA Certificate Authority %q tags: %#v", d.Id(), remove) - _, err := conn.UntagCertificateAuthority(&acmpca.UntagCertificateAuthorityInput{ - CertificateAuthorityArn: aws.String(d.Id()), - Tags: remove, - }) - if err != nil { - return fmt.Errorf("error updating ACMPCA Certificate Authority %q tags: %s", d.Id(), err) - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating ACMPCA Certificate Authority %q tags: %#v", d.Id(), create) - _, err := conn.TagCertificateAuthority(&acmpca.TagCertificateAuthorityInput{ - CertificateAuthorityArn: aws.String(d.Id()), - Tags: create, - }) - if err != nil { - return fmt.Errorf("error updating ACMPCA Certificate Authority %q tags: %s", d.Id(), err) - } + o, n := d.GetChange("tags") + + if err := keyvaluetags.AcmpcaUpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating ACMPCA Certificate Authority (%s) tags: %s", d.Id(), err) } } @@ -709,24 +688,3 @@ func flattenAcmpcaRevocationConfiguration(config *acmpca.RevocationConfiguration return []interface{}{m} } - -func listAcmpcaTags(conn *acmpca.ACMPCA, certificateAuthorityArn string) ([]*acmpca.Tag, error) { - tags := []*acmpca.Tag{} - input := &acmpca.ListTagsInput{ - CertificateAuthorityArn: aws.String(certificateAuthorityArn), - } - - for { - output, err := conn.ListTags(input) - if err != nil { - return tags, err - } - tags = append(tags, output.Tags...) - if output.NextToken == nil { - break - } - input.NextToken = output.NextToken - } - - return tags, nil -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acmpca_certificate_authority_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acmpca_certificate_authority_migrate.go index 35f11085da3..32b052b5e28 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acmpca_certificate_authority_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_acmpca_certificate_authority_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsAcmpcaCertificateAuthorityMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami.go index f1e0ca9a3e3..2709df80495 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami.go @@ -12,9 +12,10 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) const ( @@ -278,12 +279,18 @@ func resourceAwsAmiCreate(d *schema.ResourceData, meta interface{}) error { id := *res.ImageId d.SetId(id) + if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { + if err := keyvaluetags.Ec2UpdateTags(client, id, nil, v); err != nil { + return fmt.Errorf("error adding tags: %s", err) + } + } + _, err = resourceAwsAmiWaitForAvailable(d.Timeout(schema.TimeoutCreate), id, client) if err != nil { return err } - return resourceAwsAmiUpdate(d, meta) + return resourceAwsAmiRead(d, meta) } func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { @@ -299,7 +306,7 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { var err error res, err = client.DescribeImages(req) if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" { + if isAWSErr(err, "InvalidAMIID.NotFound", "") { if d.IsNewResource() { return resource.RetryableError(err) } @@ -313,6 +320,9 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { } return nil }) + if isResourceTimeoutError(err) { + res, err = client.DescribeImages(req) + } if err != nil { return fmt.Errorf("Unable to find AMI after retries: %s", err) } @@ -401,10 +411,12 @@ func resourceAwsAmiUpdate(d *schema.ResourceData, meta interface{}) error { d.Partial(true) - if err := setTags(client, d); err != nil { - return err - } else { - d.SetPartial("tags") + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.Ec2UpdateTags(client, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating AMI (%s) tags: %s", d.Id(), err) + } } if d.Get("description").(string) != "" { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_copy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_copy.go index e74d74f864e..5e0f4c2ec44 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_copy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_copy.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAmiCopy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_from_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_from_instance.go index af05db1f1ff..e1f814558d9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_from_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_from_instance.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAmiFromInstance() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_launch_permission.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_launch_permission.go index 9d075c3a424..2cb88f49dde 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_launch_permission.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_launch_permission.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAmiLaunchPermission() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_account.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_account.go index 29365633b2a..df302741d3f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_account.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_account.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayAccount() *schema.Resource { @@ -111,6 +111,9 @@ func resourceAwsApiGatewayAccountUpdate(d *schema.ResourceData, meta interface{} return nil }) + if isResourceTimeoutError(err) { + out, err = conn.UpdateAccount(&input) + } if err != nil { return fmt.Errorf("Updating API Gateway Account failed: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_api_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_api_key.go index 76321622280..574576b867f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_api_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_api_key.go @@ -6,9 +6,11 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsApiGatewayApiKey() *schema.Resource { @@ -77,6 +79,11 @@ func resourceAwsApiGatewayApiKey() *schema.Resource { Sensitive: true, ValidateFunc: validation.StringLenBetween(30, 128), }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), }, } } @@ -90,9 +97,10 @@ func resourceAwsApiGatewayApiKeyCreate(d *schema.ResourceData, meta interface{}) Description: aws.String(d.Get("description").(string)), Enabled: aws.Bool(d.Get("enabled").(bool)), Value: aws.String(d.Get("value").(string)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().ApigatewayTags(), }) if err != nil { - return fmt.Errorf("Error creating API Gateway: %s", err) + return fmt.Errorf("Error creating API Gateway API Key: %s", err) } d.SetId(aws.StringValue(apiKey.Id)) @@ -118,6 +126,18 @@ func resourceAwsApiGatewayApiKeyRead(d *schema.ResourceData, meta interface{}) e return err } + if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(apiKey.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "apigateway", + Region: meta.(*AWSClient).region, + Resource: fmt.Sprintf("/apikeys/%s", d.Id()), + }.String() + d.Set("arn", arn) + d.Set("name", apiKey.Name) d.Set("description", apiKey.Description) d.Set("enabled", apiKey.Enabled) @@ -164,6 +184,13 @@ func resourceAwsApiGatewayApiKeyUpdate(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Updating API Gateway API Key: %s", d.Id()) + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.ApigatewayUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + _, err := conn.UpdateApiKey(&apigateway.UpdateApiKeyInput{ ApiKey: aws.String(d.Id()), PatchOperations: resourceAwsApiGatewayApiKeyUpdateOperations(d), diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_authorizer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_authorizer.go index ef069d6a7af..ee42bfffe48 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_authorizer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_authorizer.go @@ -8,10 +8,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) +const defaultAuthorizerTTL = 300 + func resourceAwsApiGatewayAuthorizer() *schema.Resource { return &schema.Resource{ Create: resourceAwsApiGatewayAuthorizerCreate, @@ -57,6 +59,7 @@ func resourceAwsApiGatewayAuthorizer() *schema.Resource { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 3600), + Default: defaultAuthorizerTTL, }, "identity_validation_expression": { Type: schema.TypeString, @@ -132,7 +135,13 @@ func resourceAwsApiGatewayAuthorizerRead(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Received API Gateway Authorizer: %s", authorizer) d.Set("authorizer_credentials", authorizer.AuthorizerCredentials) - d.Set("authorizer_result_ttl_in_seconds", authorizer.AuthorizerResultTtlInSeconds) + + if authorizer.AuthorizerResultTtlInSeconds != nil { + d.Set("authorizer_result_ttl_in_seconds", authorizer.AuthorizerResultTtlInSeconds) + } else { + d.Set("authorizer_result_ttl_in_seconds", defaultAuthorizerTTL) + } + d.Set("authorizer_uri", authorizer.AuthorizerUri) d.Set("identity_source", authorizer.IdentitySource) d.Set("identity_validation_expression", authorizer.IdentityValidationExpression) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_base_path_mapping.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_base_path_mapping.go index dfbcd704136..9f2b2241434 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_base_path_mapping.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_base_path_mapping.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) const emptyBasePathMappingValue = "(none)" @@ -51,14 +51,15 @@ func resourceAwsApiGatewayBasePathMapping() *schema.Resource { func resourceAwsApiGatewayBasePathMappingCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).apigateway + input := &apigateway.CreateBasePathMappingInput{ + RestApiId: aws.String(d.Get("api_id").(string)), + DomainName: aws.String(d.Get("domain_name").(string)), + BasePath: aws.String(d.Get("base_path").(string)), + Stage: aws.String(d.Get("stage_name").(string)), + } err := resource.Retry(30*time.Second, func() *resource.RetryError { - _, err := conn.CreateBasePathMapping(&apigateway.CreateBasePathMappingInput{ - RestApiId: aws.String(d.Get("api_id").(string)), - DomainName: aws.String(d.Get("domain_name").(string)), - BasePath: aws.String(d.Get("base_path").(string)), - Stage: aws.String(d.Get("stage_name").(string)), - }) + _, err := conn.CreateBasePathMapping(input) if err != nil { if err, ok := err.(awserr.Error); ok && err.Code() != "BadRequestException" { @@ -73,6 +74,10 @@ func resourceAwsApiGatewayBasePathMappingCreate(d *schema.ResourceData, meta int return nil }) + if isResourceTimeoutError(err) { + _, err = conn.CreateBasePathMapping(input) + } + if err != nil { return fmt.Errorf("Error creating Gateway base path mapping: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_client_certificate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_client_certificate.go index e5ab9738508..3bc0666168b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_client_certificate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_client_certificate.go @@ -5,9 +5,11 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsApiGatewayClientCertificate() *schema.Resource { @@ -37,6 +39,11 @@ func resourceAwsApiGatewayClientCertificate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), }, } } @@ -48,6 +55,9 @@ func resourceAwsApiGatewayClientCertificateCreate(d *schema.ResourceData, meta i if v, ok := d.GetOk("description"); ok { input.Description = aws.String(v.(string)) } + if v, ok := d.GetOk("tags"); ok { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().ApigatewayTags() + } log.Printf("[DEBUG] Generating API Gateway Client Certificate: %s", input) out, err := conn.GenerateClientCertificate(&input) if err != nil { @@ -67,7 +77,7 @@ func resourceAwsApiGatewayClientCertificateRead(d *schema.ResourceData, meta int } out, err := conn.GetClientCertificate(&input) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == apigateway.ErrCodeNotFoundException { log.Printf("[WARN] API Gateway Client Certificate %s not found, removing", d.Id()) d.SetId("") return nil @@ -76,6 +86,18 @@ func resourceAwsApiGatewayClientCertificateRead(d *schema.ResourceData, meta int } log.Printf("[DEBUG] Received API Gateway Client Certificate: %s", out) + if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(out.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "apigateway", + Region: meta.(*AWSClient).region, + Resource: fmt.Sprintf("/clientcertificates/%s", d.Id()), + }.String() + d.Set("arn", arn) + d.Set("description", out.Description) d.Set("created_date", out.CreatedDate.String()) d.Set("expiration_date", out.ExpirationDate.String()) @@ -107,6 +129,13 @@ func resourceAwsApiGatewayClientCertificateUpdate(d *schema.ResourceData, meta i return fmt.Errorf("Updating API Gateway Client Certificate failed: %s", err) } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.ApigatewayUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + return resourceAwsApiGatewayClientCertificateRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_deployment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_deployment.go index 11f457436f7..cbbff118ab0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_deployment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_deployment.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayDeployment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_documentation_part.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_documentation_part.go index 95d4f9924df..ef22f1f235b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_documentation_part.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_documentation_part.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayDocumentationPart() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_documentation_version.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_documentation_version.go index 6f0280744a5..9e12cd60d78 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_documentation_version.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_documentation_version.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayDocumentationVersion() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_domain_name.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_domain_name.go index b7669899fb7..7d81914b651 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_domain_name.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_domain_name.go @@ -6,11 +6,12 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsApiGatewayDomainName() *schema.Resource { @@ -62,6 +63,16 @@ func resourceAwsApiGatewayDomainName() *schema.Resource { ForceNew: true, }, + "security_policy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + apigateway.SecurityPolicyTls10, + apigateway.SecurityPolicyTls12, + }, true), + }, + "certificate_arn": { Type: schema.TypeString, Optional: true, @@ -130,6 +141,11 @@ func resourceAwsApiGatewayDomainName() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), }, } } @@ -174,6 +190,14 @@ func resourceAwsApiGatewayDomainNameCreate(d *schema.ResourceData, meta interfac params.RegionalCertificateName = aws.String(v.(string)) } + if v, ok := d.GetOk("security_policy"); ok && v.(string) != "" { + params.SecurityPolicy = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok { + params.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().ApigatewayTags() + } + domainName, err := conn.CreateDomainName(params) if err != nil { return fmt.Errorf("Error creating API Gateway Domain Name: %s", err) @@ -192,7 +216,7 @@ func resourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{ DomainName: aws.String(d.Id()), }) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == apigateway.ErrCodeNotFoundException { log.Printf("[WARN] API Gateway Domain Name (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -201,6 +225,17 @@ func resourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{ return err } + if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(domainName.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "apigateway", + Region: meta.(*AWSClient).region, + Resource: fmt.Sprintf("/domainnames/%s", d.Id()), + }.String() + d.Set("arn", arn) d.Set("certificate_arn", domainName.CertificateArn) d.Set("certificate_name", domainName.CertificateName) if err := d.Set("certificate_upload_date", domainName.CertificateUploadDate.Format(time.RFC3339)); err != nil { @@ -209,6 +244,7 @@ func resourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{ d.Set("cloudfront_domain_name", domainName.DistributionDomainName) d.Set("cloudfront_zone_id", cloudFrontRoute53ZoneID) d.Set("domain_name", domainName.DomainName) + d.Set("security_policy", domainName.SecurityPolicy) if err := d.Set("endpoint_configuration", flattenApiGatewayEndpointConfiguration(domainName.EndpointConfiguration)); err != nil { return fmt.Errorf("error setting endpoint_configuration: %s", err) @@ -257,6 +293,14 @@ func resourceAwsApiGatewayDomainNameUpdateOperations(d *schema.ResourceData) []* }) } + if d.HasChange("security_policy") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/securityPolicy"), + Value: aws.String(d.Get("security_policy").(string)), + }) + } + if d.HasChange("endpoint_configuration.0.types") { // The domain name must have an endpoint type. // If attempting to remove the configuration, do nothing. @@ -278,6 +322,13 @@ func resourceAwsApiGatewayDomainNameUpdate(d *schema.ResourceData, meta interfac conn := meta.(*AWSClient).apigateway log.Printf("[DEBUG] Updating API Gateway Domain Name %s", d.Id()) + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.ApigatewayUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + _, err := conn.UpdateDomainName(&apigateway.UpdateDomainNameInput{ DomainName: aws.String(d.Id()), PatchOperations: resourceAwsApiGatewayDomainNameUpdateOperations(d), @@ -294,19 +345,17 @@ func resourceAwsApiGatewayDomainNameDelete(d *schema.ResourceData, meta interfac conn := meta.(*AWSClient).apigateway log.Printf("[DEBUG] Deleting API Gateway Domain Name: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteDomainName(&apigateway.DeleteDomainNameInput{ - DomainName: aws.String(d.Id()), - }) + _, err := conn.DeleteDomainName(&apigateway.DeleteDomainNameInput{ + DomainName: aws.String(d.Id()), + }) - if err == nil { - return nil - } + if isAWSErr(err, "NotFoundException", "") { + return nil + } - if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" { - return nil - } + if err != nil { + return fmt.Errorf("Error deleting API Gateway domain name: %s", err) + } - return resource.NonRetryableError(err) - }) + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_gateway_response.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_gateway_response.go index cddf6e16105..d0606eddb18 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_gateway_response.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_gateway_response.go @@ -4,13 +4,11 @@ import ( "fmt" "log" "strings" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayGatewayResponse() *schema.Resource { @@ -139,22 +137,17 @@ func resourceAwsApiGatewayGatewayResponseDelete(d *schema.ResourceData, meta int conn := meta.(*AWSClient).apigateway log.Printf("[DEBUG] Deleting API Gateway Gateway Response: %s", d.Id()) - return resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteGatewayResponse(&apigateway.DeleteGatewayResponseInput{ - RestApiId: aws.String(d.Get("rest_api_id").(string)), - ResponseType: aws.String(d.Get("response_type").(string)), - }) - - if err == nil { - return nil - } - - apigatewayErr, ok := err.(awserr.Error) + _, err := conn.DeleteGatewayResponse(&apigateway.DeleteGatewayResponseInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + ResponseType: aws.String(d.Get("response_type").(string)), + }) - if ok && apigatewayErr.Code() == "NotFoundException" { - return nil - } + if isAWSErr(err, "NotFoundException", "") { + return nil + } - return resource.NonRetryableError(err) - }) + if err != nil { + return fmt.Errorf("Error deleting API Gateway gateway response: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration.go index f2913f4e372..85141e687a1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsApiGatewayIntegration() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration_response.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration_response.go index e367488be26..dee9d98e4f0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration_response.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration_response.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayIntegrationResponse() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method.go index 5e707459aa9..10b1be96043 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayMethod() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_response.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_response.go index 538eabccbc6..19e09cd85cf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_response.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_response.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) var resourceAwsApiGatewayMethodResponseMutex = &sync.Mutex{} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_settings.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_settings.go index daa2ea15b9e..2755e1610c0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_settings.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_settings.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayMethodSettings() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_model.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_model.go index 7d59e6c7cac..e7d9453494a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_model.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_model.go @@ -4,13 +4,11 @@ import ( "fmt" "log" "strings" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayModel() *schema.Resource { @@ -172,26 +170,20 @@ func resourceAwsApiGatewayModelUpdate(d *schema.ResourceData, meta interface{}) func resourceAwsApiGatewayModelDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).apigateway log.Printf("[DEBUG] Deleting API Gateway Model: %s", d.Id()) + input := &apigateway.DeleteModelInput{ + ModelName: aws.String(d.Get("name").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + } - return resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] schema is %#v", d) - _, err := conn.DeleteModel(&apigateway.DeleteModelInput{ - ModelName: aws.String(d.Get("name").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - }) - if err == nil { - return nil - } + log.Printf("[DEBUG] schema is %#v", d) + _, err := conn.DeleteModel(input) - apigatewayErr, ok := err.(awserr.Error) - if apigatewayErr.Code() == "NotFoundException" { - return nil - } - - if !ok { - return resource.NonRetryableError(err) - } + if isAWSErr(err, "NotFoundException", "") { + return nil + } - return resource.NonRetryableError(err) - }) + if err != nil { + return fmt.Errorf("Error deleting API gateway model: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_request_validator.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_request_validator.go index 52fb872822b..7a6252e8347 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_request_validator.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_request_validator.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayRequestValidator() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_resource.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_resource.go index 37eace97154..4b12dc40a77 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_resource.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_resource.go @@ -4,13 +4,11 @@ import ( "fmt" "log" "strings" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayResource() *schema.Resource { @@ -145,20 +143,18 @@ func resourceAwsApiGatewayResourceDelete(d *schema.ResourceData, meta interface{ conn := meta.(*AWSClient).apigateway log.Printf("[DEBUG] Deleting API Gateway Resource: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] schema is %#v", d) - _, err := conn.DeleteResource(&apigateway.DeleteResourceInput{ - ResourceId: aws.String(d.Id()), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - }) - if err == nil { - return nil - } + log.Printf("[DEBUG] schema is %#v", d) + _, err := conn.DeleteResource(&apigateway.DeleteResourceInput{ + ResourceId: aws.String(d.Id()), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) - if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" { - return nil - } + if isAWSErr(err, apigateway.ErrCodeNotFoundException, "") { + return nil + } - return resource.NonRetryableError(err) - }) + if err != nil { + return fmt.Errorf("Error deleting API Gateway Resource: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_rest_api.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_rest_api.go index af35779c466..fac0cc06d52 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_rest_api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_rest_api.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsApiGatewayRestApi() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_stage.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_stage.go index b3b1275db48..8ab4f5c70d2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_stage.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_stage.go @@ -10,8 +10,10 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsApiGatewayStage() *schema.Resource { @@ -64,6 +66,16 @@ func resourceAwsApiGatewayStage() *schema.Resource { "cache_cluster_size": { Type: schema.TypeString, Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + apigateway.CacheClusterSize05, + apigateway.CacheClusterSize16, + apigateway.CacheClusterSize61, + apigateway.CacheClusterSize118, + apigateway.CacheClusterSize135, + apigateway.CacheClusterSize237, + apigateway.CacheClusterSize284, + apigateway.CacheClusterSize582, + }, true), }, "client_certificate_id": { Type: schema.TypeString, @@ -108,6 +120,10 @@ func resourceAwsApiGatewayStage() *schema.Resource { Type: schema.TypeBool, Optional: true, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -148,12 +164,8 @@ func resourceAwsApiGatewayStageCreate(d *schema.ResourceData, meta interface{}) } input.Variables = aws.StringMap(variables) } - if vars, ok := d.GetOk("tags"); ok { - newMap := make(map[string]string, len(vars.(map[string]interface{}))) - for k, v := range vars.(map[string]interface{}) { - newMap[k] = v.(string) - } - input.Tags = aws.StringMap(newMap) + if v, ok := d.GetOk("tags"); ok { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().ApigatewayTags() } out, err := conn.CreateStage(&input) @@ -170,14 +182,14 @@ func resourceAwsApiGatewayStageCreate(d *schema.ResourceData, meta interface{}) d.SetPartial("variables") d.SetPartial("xray_tracing_enabled") - if waitForCache && *out.CacheClusterStatus != "NOT_AVAILABLE" { + if waitForCache && *out.CacheClusterStatus != apigateway.CacheClusterStatusNotAvailable { stateConf := &resource.StateChangeConf{ Pending: []string{ - "CREATE_IN_PROGRESS", - "DELETE_IN_PROGRESS", - "FLUSH_IN_PROGRESS", + apigateway.CacheClusterStatusCreateInProgress, + apigateway.CacheClusterStatusDeleteInProgress, + apigateway.CacheClusterStatusFlushInProgress, }, - Target: []string{"AVAILABLE"}, + Target: []string{apigateway.CacheClusterStatusAvailable}, Refresh: apiGatewayStageCacheRefreshFunc(conn, d.Get("rest_api_id").(string), d.Get("stage_name").(string)), @@ -215,7 +227,7 @@ func resourceAwsApiGatewayStageRead(d *schema.ResourceData, meta interface{}) er } stage, err := conn.GetStage(&input) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == apigateway.ErrCodeNotFoundException { log.Printf("[WARN] API Gateway Stage (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -230,7 +242,7 @@ func resourceAwsApiGatewayStageRead(d *schema.ResourceData, meta interface{}) er d.Set("client_certificate_id", stage.ClientCertificateId) - if stage.CacheClusterStatus != nil && *stage.CacheClusterStatus == "DELETE_IN_PROGRESS" { + if stage.CacheClusterStatus != nil && *stage.CacheClusterStatus == apigateway.CacheClusterStatusDeleteInProgress { d.Set("cache_cluster_enabled", false) d.Set("cache_cluster_size", nil) } else { @@ -243,10 +255,18 @@ func resourceAwsApiGatewayStageRead(d *schema.ResourceData, meta interface{}) er d.Set("documentation_version", stage.DocumentationVersion) d.Set("xray_tracing_enabled", stage.TracingEnabled) - if err := d.Set("tags", aws.StringValueMap(stage.Tags)); err != nil { + if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(stage.Tags).IgnoreAws().Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } + stageArn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Region: meta.(*AWSClient).region, + Service: "apigateway", + Resource: fmt.Sprintf("/restapis/%s/stages/%s", d.Get("rest_api_id").(string), d.Get("stage_name").(string)), + }.String() + d.Set("arn", stageArn) + if err := d.Set("variables", aws.StringValueMap(stage.Variables)); err != nil { return fmt.Errorf("error setting variables: %s", err) } @@ -277,10 +297,12 @@ func resourceAwsApiGatewayStageUpdate(d *schema.ResourceData, meta interface{}) Service: "apigateway", Resource: fmt.Sprintf("/restapis/%s/stages/%s", d.Get("rest_api_id").(string), d.Get("stage_name").(string)), }.String() - if tagErr := setTagsAPIGatewayStage(conn, d, stageArn); tagErr != nil { - return tagErr + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.ApigatewayUpdateTags(conn, stageArn, o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } } - d.SetPartial("tags") operations := make([]*apigateway.PatchOperation, 0) waitForCache := false @@ -380,17 +402,17 @@ func resourceAwsApiGatewayStageUpdate(d *schema.ResourceData, meta interface{}) d.SetPartial("xray_tracing_enabled") d.SetPartial("variables") - if waitForCache && *out.CacheClusterStatus != "NOT_AVAILABLE" { + if waitForCache && *out.CacheClusterStatus != apigateway.CacheClusterStatusNotAvailable { stateConf := &resource.StateChangeConf{ Pending: []string{ - "CREATE_IN_PROGRESS", - "FLUSH_IN_PROGRESS", + apigateway.CacheClusterStatusCreateInProgress, + apigateway.CacheClusterStatusFlushInProgress, }, Target: []string{ - "AVAILABLE", + apigateway.CacheClusterStatusAvailable, // There's an AWS API bug (raised & confirmed in Sep 2016 by support) // which causes the stage to remain in deletion state forever - "DELETE_IN_PROGRESS", + apigateway.CacheClusterStatusDeleteInProgress, }, Refresh: apiGatewayStageCacheRefreshFunc(conn, d.Get("rest_api_id").(string), diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan.go index b2a0098eb62..257e40311a6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan.go @@ -5,14 +5,12 @@ import ( "fmt" "log" "strconv" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsApiGatewayUsagePlan() *schema.Resource { @@ -237,7 +235,7 @@ func resourceAwsApiGatewayUsagePlanRead(d *schema.ResourceData, meta interface{} UsagePlanId: aws.String(d.Id()), }) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == apigateway.ErrCodeNotFoundException { log.Printf("[WARN] API Gateway Usage Plan (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -491,15 +489,14 @@ func resourceAwsApiGatewayUsagePlanDelete(d *schema.ResourceData, meta interface log.Printf("[DEBUG] Deleting API Gateway Usage Plan: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteUsagePlan(&apigateway.DeleteUsagePlanInput{ - UsagePlanId: aws.String(d.Id()), - }) + _, err := conn.DeleteUsagePlan(&apigateway.DeleteUsagePlanInput{ + UsagePlanId: aws.String(d.Id()), + }) - if err == nil { - return nil - } + if err != nil { + return fmt.Errorf("Error deleting API gateway usage plan: %s", err) + } + + return nil - return resource.NonRetryableError(err) - }) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan_key.go index b9218b6a0d7..7900f1583a3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan_key.go @@ -3,13 +3,11 @@ package aws import ( "fmt" "log" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsApiGatewayUsagePlanKey() *schema.Resource { @@ -97,19 +95,17 @@ func resourceAwsApiGatewayUsagePlanKeyDelete(d *schema.ResourceData, meta interf conn := meta.(*AWSClient).apigateway log.Printf("[DEBUG] Deleting API Gateway Usage Plan Key: %s", d.Id()) + _, err := conn.DeleteUsagePlanKey(&apigateway.DeleteUsagePlanKeyInput{ + UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), + KeyId: aws.String(d.Get("key_id").(string)), + }) + if isAWSErr(err, "NotFoundException", "") { + return nil + } + if err != nil { + return fmt.Errorf("Error deleting API Gateway usage plan key: %s", err) + } - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteUsagePlanKey(&apigateway.DeleteUsagePlanKeyInput{ - UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), - KeyId: aws.String(d.Get("key_id").(string)), - }) - if err == nil { - return nil - } - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { - return nil - } + return nil - return resource.NonRetryableError(err) - }) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_vpc_link.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_vpc_link.go index 2cfe0180079..3c484b1a5e2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_vpc_link.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_vpc_link.go @@ -6,9 +6,11 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsApiGatewayVpcLink() *schema.Resource { @@ -37,16 +39,23 @@ func resourceAwsApiGatewayVpcLink() *schema.Resource { ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), }, } } func resourceAwsApiGatewayVpcLinkCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).apigateway + tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().ApigatewayTags() input := &apigateway.CreateVpcLinkInput{ Name: aws.String(d.Get("name").(string)), TargetArns: expandStringList(d.Get("target_arns").(*schema.Set).List()), + Tags: tags, } if v, ok := d.GetOk("description"); ok { input.Description = aws.String(v.(string)) @@ -73,7 +82,7 @@ func resourceAwsApiGatewayVpcLinkCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error waiting for APIGateway Vpc Link status to be \"%s\": %s", apigateway.VpcLinkStatusAvailable, err) } - return nil + return resourceAwsApiGatewayVpcLinkRead(d, meta) } func resourceAwsApiGatewayVpcLinkRead(d *schema.ResourceData, meta interface{}) error { @@ -93,6 +102,18 @@ func resourceAwsApiGatewayVpcLinkRead(d *schema.ResourceData, meta interface{}) return err } + if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(resp.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "apigateway", + Region: meta.(*AWSClient).region, + Resource: fmt.Sprintf("/vpclinks/%s", d.Id()), + }.String() + d.Set("arn", arn) + d.Set("name", resp.Name) d.Set("description", resp.Description) d.Set("target_arns", flattenStringList(resp.TargetArns)) @@ -120,6 +141,13 @@ func resourceAwsApiGatewayVpcLinkUpdate(d *schema.ResourceData, meta interface{} }) } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.ApigatewayUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + input := &apigateway.UpdateVpcLinkInput{ VpcLinkId: aws.String(d.Id()), PatchOperations: operations, @@ -148,7 +176,7 @@ func resourceAwsApiGatewayVpcLinkUpdate(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error waiting for APIGateway Vpc Link status to be \"%s\": %s", apigateway.VpcLinkStatusAvailable, err) } - return nil + return resourceAwsApiGatewayVpcLinkRead(d, meta) } func resourceAwsApiGatewayVpcLinkDelete(d *schema.ResourceData, meta interface{}) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_app_cookie_stickiness_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_app_cookie_stickiness_policy.go index a44bd364ec4..36de9b7e3f0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_app_cookie_stickiness_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_app_cookie_stickiness_policy.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAppCookieStickinessPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_policy.go index 9a3e495450c..08b364891cb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_policy.go @@ -6,12 +6,13 @@ import ( "strconv" "time" + "strings" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/applicationautoscaling" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "strings" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAppautoscalingPolicy() *schema.Resource { @@ -277,6 +278,9 @@ func resourceAwsAppautoscalingPolicyCreate(d *schema.ResourceData, meta interfac } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.PutScalingPolicy(¶ms) + } if err != nil { return fmt.Errorf("Failed to create scaling policy: %s", err) } @@ -302,6 +306,9 @@ func resourceAwsAppautoscalingPolicyRead(d *schema.ResourceData, meta interface{ } return nil }) + if isResourceTimeoutError(err) { + p, err = getAwsAppautoscalingPolicy(d, meta) + } if err != nil { return fmt.Errorf("Failed to read scaling policy: %s", err) } @@ -353,6 +360,9 @@ func resourceAwsAppautoscalingPolicyUpdate(d *schema.ResourceData, meta interfac } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.PutScalingPolicy(¶ms) + } if err != nil { return fmt.Errorf("Failed to update scaling policy: %s", err) } @@ -406,30 +416,53 @@ func resourceAwsAppautoscalingPolicyDelete(d *schema.ResourceData, meta interfac } func resourceAwsAppautoscalingPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - idParts := strings.Split(d.Id(), "/") - - if len(idParts) < 4 { + idParts, err := validateAppautoscalingPolicyImportInput(d.Id()) + if err != nil { return nil, fmt.Errorf("unexpected format (%q), expected ///", d.Id()) } serviceNamespace := idParts[0] - resourceId := strings.Join(idParts[1:len(idParts)-2], "/") - scalableDimension := idParts[len(idParts)-2] - policyName := idParts[len(idParts)-1] - - if serviceNamespace == "" || resourceId == "" || scalableDimension == "" || policyName == "" { - return nil, fmt.Errorf("unexpected format (%q), expected ///", d.Id()) - } + resourceId := idParts[1] + scalableDimension := idParts[2] + policyName := idParts[3] d.Set("service_namespace", serviceNamespace) d.Set("resource_id", resourceId) d.Set("scalable_dimension", scalableDimension) d.Set("name", policyName) d.SetId(policyName) - return []*schema.ResourceData{d}, nil } +func validateAppautoscalingPolicyImportInput(id string) ([]string, error) { + + idParts := strings.Split(id, "/") + if len(idParts) < 4 { + return nil, fmt.Errorf("unexpected format (%q), expected ///", id) + } + + var serviceNamespace, resourceId, scalableDimension, policyName string + switch idParts[0] { + case "dynamodb": + serviceNamespace = idParts[0] + resourceId = strings.Join(idParts[1:3], "/") + scalableDimension = idParts[3] + policyName = strings.Join(idParts[4:], "/") + default: + serviceNamespace = idParts[0] + resourceId = strings.Join(idParts[1:len(idParts)-2], "/") + scalableDimension = idParts[len(idParts)-2] + policyName = idParts[len(idParts)-1] + + } + + if serviceNamespace == "" || resourceId == "" || scalableDimension == "" || policyName == "" { + return nil, fmt.Errorf("unexpected format (%q), expected ///", id) + } + + return []string{serviceNamespace, resourceId, scalableDimension, policyName}, nil +} + // Takes the result of flatmap.Expand for an array of step adjustments and // returns a []*applicationautoscaling.StepAdjustment. func expandAppautoscalingStepAdjustments(configured []interface{}) ([]*applicationautoscaling.StepAdjustment, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_scheduled_action.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_scheduled_action.go index 03ac1156b92..f0372cfd50c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_scheduled_action.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_scheduled_action.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/applicationautoscaling" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) const awsAppautoscalingScheduleTimeLayout = "2006-01-02T15:04:05Z" @@ -133,9 +133,12 @@ func resourceAwsAppautoscalingScheduledActionPut(d *schema.ResourceData, meta in } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.PutScheduledAction(input) + } if err != nil { - return err + return fmt.Errorf("Error putting scheduled action: %s", err) } d.SetId(d.Get("name").(string) + "-" + d.Get("service_namespace").(string) + "-" + d.Get("resource_id").(string)) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_target.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_target.go index 7fdaefca66d..8fbe049b473 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_target.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_target.go @@ -5,8 +5,8 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -87,6 +87,10 @@ func resourceAwsAppautoscalingTargetPut(d *schema.ResourceData, meta interface{} return nil }) + if isResourceTimeoutError(err) { + _, err = conn.RegisterScalableTarget(&targetOpts) + } + if err != nil { return fmt.Errorf("Error creating application autoscaling target: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_mesh.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_mesh.go index 0ad5679d70a..4fe9017c414 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_mesh.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_mesh.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appmesh" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAppmeshMesh() *schema.Resource { @@ -74,6 +74,8 @@ func resourceAwsAppmeshMesh() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "tags": tagsSchema(), }, } } @@ -85,6 +87,7 @@ func resourceAwsAppmeshMeshCreate(d *schema.ResourceData, meta interface{}) erro req := &appmesh.CreateMeshInput{ MeshName: aws.String(meshName), Spec: expandAppmeshMeshSpec(d.Get("spec").([]interface{})), + Tags: tagsFromMapAppmesh(d.Get("tags").(map[string]interface{})), } log.Printf("[DEBUG] Creating App Mesh service mesh: %#v", req) @@ -104,7 +107,7 @@ func resourceAwsAppmeshMeshRead(d *schema.ResourceData, meta interface{}) error resp, err := conn.DescribeMesh(&appmesh.DescribeMeshInput{ MeshName: aws.String(d.Id()), }) - if isAWSErr(err, "NotFoundException", "") { + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { log.Printf("[WARN] App Mesh service mesh (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -127,6 +130,16 @@ func resourceAwsAppmeshMeshRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error setting spec: %s", err) } + err = saveTagsAppmesh(conn, d, aws.StringValue(resp.Mesh.Metadata.Arn)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh service mesh (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error saving tags: %s", err) + } + return nil } @@ -147,6 +160,16 @@ func resourceAwsAppmeshMeshUpdate(d *schema.ResourceData, meta interface{}) erro } } + err := setTagsAppmesh(conn, d, d.Get("arn").(string)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh service mesh (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return resourceAwsAppmeshMeshRead(d, meta) } @@ -157,7 +180,7 @@ func resourceAwsAppmeshMeshDelete(d *schema.ResourceData, meta interface{}) erro _, err := conn.DeleteMesh(&appmesh.DeleteMeshInput{ MeshName: aws.String(d.Id()), }) - if isAWSErr(err, "NotFoundException", "") { + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { return nil } if err != nil { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_route.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_route.go index 2acd03d455f..58631d0088a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_route.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_route.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appmesh" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAppmeshRoute() *schema.Resource { @@ -175,6 +175,8 @@ func resourceAwsAppmeshRoute() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "tags": tagsSchema(), }, } } @@ -187,6 +189,7 @@ func resourceAwsAppmeshRouteCreate(d *schema.ResourceData, meta interface{}) err RouteName: aws.String(d.Get("name").(string)), VirtualRouterName: aws.String(d.Get("virtual_router_name").(string)), Spec: expandAppmeshRouteSpec(d.Get("spec").([]interface{})), + Tags: tagsFromMapAppmesh(d.Get("tags").(map[string]interface{})), } log.Printf("[DEBUG] Creating App Mesh route: %#v", req) @@ -233,6 +236,16 @@ func resourceAwsAppmeshRouteRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error setting spec: %s", err) } + err = saveTagsAppmesh(conn, d, aws.StringValue(resp.Route.Metadata.Arn)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh route (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error saving tags: %s", err) + } + return nil } @@ -255,6 +268,16 @@ func resourceAwsAppmeshRouteUpdate(d *schema.ResourceData, meta interface{}) err } } + err := setTagsAppmesh(conn, d, d.Get("arn").(string)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh route (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return resourceAwsAppmeshRouteRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_node.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_node.go index 9e2e0e33359..fdabb951904 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_node.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_node.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appmesh" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAppmeshVirtualNode() *schema.Resource { @@ -218,11 +218,40 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "aws_cloud_map": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + ConflictsWith: []string{"spec.0.service_discovery.0.dns"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attributes": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "namespace_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateServiceDiscoveryHttpNamespaceName, + }, + + "service_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "dns": { - Type: schema.TypeList, - Required: true, - MinItems: 1, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + ConflictsWith: []string{"spec.0.service_discovery.0.aws_cloud_map"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "service_name": { @@ -261,6 +290,8 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "tags": tagsSchema(), }, } } @@ -272,6 +303,7 @@ func resourceAwsAppmeshVirtualNodeCreate(d *schema.ResourceData, meta interface{ MeshName: aws.String(d.Get("mesh_name").(string)), VirtualNodeName: aws.String(d.Get("name").(string)), Spec: expandAppmeshVirtualNodeSpec(d.Get("spec").([]interface{})), + Tags: tagsFromMapAppmesh(d.Get("tags").(map[string]interface{})), } log.Printf("[DEBUG] Creating App Mesh virtual node: %#v", req) @@ -316,6 +348,16 @@ func resourceAwsAppmeshVirtualNodeRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("error setting spec: %s", err) } + err = saveTagsAppmesh(conn, d, aws.StringValue(resp.VirtualNode.Metadata.Arn)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh virtual node (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error saving tags: %s", err) + } + return nil } @@ -337,6 +379,16 @@ func resourceAwsAppmeshVirtualNodeUpdate(d *schema.ResourceData, meta interface{ } } + err := setTagsAppmesh(conn, d, d.Get("arn").(string)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh virtual node (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return resourceAwsAppmeshVirtualNodeRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_node_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_node_migrate.go index eb71da9efdd..dde6d59766a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_node_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_node_migrate.go @@ -5,7 +5,7 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsAppmeshVirtualNodeMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_router.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_router.go index 66742a11f19..05c75b451f9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_router.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_router.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appmesh" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAppmeshVirtualRouter() *schema.Resource { @@ -110,6 +110,8 @@ func resourceAwsAppmeshVirtualRouter() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "tags": tagsSchema(), }, } } @@ -121,6 +123,7 @@ func resourceAwsAppmeshVirtualRouterCreate(d *schema.ResourceData, meta interfac MeshName: aws.String(d.Get("mesh_name").(string)), VirtualRouterName: aws.String(d.Get("name").(string)), Spec: expandAppmeshVirtualRouterSpec(d.Get("spec").([]interface{})), + Tags: tagsFromMapAppmesh(d.Get("tags").(map[string]interface{})), } log.Printf("[DEBUG] Creating App Mesh virtual router: %#v", req) @@ -165,6 +168,16 @@ func resourceAwsAppmeshVirtualRouterRead(d *schema.ResourceData, meta interface{ return fmt.Errorf("error setting spec: %s", err) } + err = saveTagsAppmesh(conn, d, aws.StringValue(resp.VirtualRouter.Metadata.Arn)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh virtual router (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error saving tags: %s", err) + } + return nil } @@ -186,6 +199,16 @@ func resourceAwsAppmeshVirtualRouterUpdate(d *schema.ResourceData, meta interfac } } + err := setTagsAppmesh(conn, d, d.Get("arn").(string)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh virtual router (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return resourceAwsAppmeshVirtualRouterRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_router_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_router_migrate.go index be10fb90b89..8b4f82e4336 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_router_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_router_migrate.go @@ -5,7 +5,7 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsAppmeshVirtualRouterMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_service.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_service.go index 5953eac82ae..a4c0ae95d99 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appmesh_virtual_service.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appmesh" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAppmeshVirtualService() *schema.Resource { @@ -105,6 +105,8 @@ func resourceAwsAppmeshVirtualService() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "tags": tagsSchema(), }, } } @@ -116,6 +118,7 @@ func resourceAwsAppmeshVirtualServiceCreate(d *schema.ResourceData, meta interfa MeshName: aws.String(d.Get("mesh_name").(string)), VirtualServiceName: aws.String(d.Get("name").(string)), Spec: expandAppmeshVirtualServiceSpec(d.Get("spec").([]interface{})), + Tags: tagsFromMapAppmesh(d.Get("tags").(map[string]interface{})), } log.Printf("[DEBUG] Creating App Mesh virtual service: %#v", req) @@ -160,6 +163,16 @@ func resourceAwsAppmeshVirtualServiceRead(d *schema.ResourceData, meta interface return fmt.Errorf("error setting spec: %s", err) } + err = saveTagsAppmesh(conn, d, aws.StringValue(resp.VirtualService.Metadata.Arn)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh virtual service (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error saving tags: %s", err) + } + return nil } @@ -181,6 +194,16 @@ func resourceAwsAppmeshVirtualServiceUpdate(d *schema.ResourceData, meta interfa } } + err := setTagsAppmesh(conn, d, d.Get("arn").(string)) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { + log.Printf("[WARN] App Mesh virtual service (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return resourceAwsAppmeshVirtualServiceRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_api_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_api_key.go index fd519e348ac..63d8f753e8d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_api_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_api_key.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAppsyncApiKey() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_datasource.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_datasource.go index 5fdce8b9b2e..befa9b0b238 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_datasource.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_datasource.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAppsyncDatasource() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_function.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_function.go new file mode 100644 index 00000000000..ea9912cb752 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_function.go @@ -0,0 +1,214 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/appsync" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsAppsyncFunction() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAppsyncFunctionCreate, + Read: resourceAwsAppsyncFunctionRead, + Update: resourceAwsAppsyncFunctionUpdate, + Delete: resourceAwsAppsyncFunctionDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "api_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "data_source": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`[_A-Za-z][_0-9A-Za-z]*`).MatchString(value) { + errors = append(errors, fmt.Errorf("%q must match [_A-Za-z][_0-9A-Za-z]*", k)) + } + return + }, + }, + "request_mapping_template": { + Type: schema.TypeString, + Required: true, + }, + "response_mapping_template": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "function_version": { + Type: schema.TypeString, + Optional: true, + Default: "2018-05-29", + ValidateFunc: validation.StringInSlice([]string{ + "2018-05-29", + }, true), + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "function_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsAppsyncFunctionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appsyncconn + + apiID := d.Get("api_id").(string) + + input := &appsync.CreateFunctionInput{ + ApiId: aws.String(apiID), + DataSourceName: aws.String(d.Get("data_source").(string)), + FunctionVersion: aws.String(d.Get("function_version").(string)), + Name: aws.String(d.Get("name").(string)), + RequestMappingTemplate: aws.String(d.Get("request_mapping_template").(string)), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("response_mapping_template"); ok { + input.ResponseMappingTemplate = aws.String(v.(string)) + } + + resp, err := conn.CreateFunction(input) + if err != nil { + return fmt.Errorf("Error creating AppSync Function: %s", err) + } + + d.SetId(fmt.Sprintf("%s-%s", apiID, aws.StringValue(resp.FunctionConfiguration.FunctionId))) + + return resourceAwsAppsyncFunctionRead(d, meta) +} + +func resourceAwsAppsyncFunctionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appsyncconn + + apiID, functionID, err := decodeAppsyncFunctionID(d.Id()) + if err != nil { + return err + } + + input := &appsync.GetFunctionInput{ + ApiId: aws.String(apiID), + FunctionId: aws.String(functionID), + } + + resp, err := conn.GetFunction(input) + if isAWSErr(err, appsync.ErrCodeNotFoundException, "") { + log.Printf("[WARN] No such entity found for Appsync Function (%s)", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error getting AppSync Function %s: %s", d.Id(), err) + } + + d.Set("api_id", apiID) + d.Set("function_id", functionID) + d.Set("data_source", aws.StringValue(resp.FunctionConfiguration.DataSourceName)) + d.Set("description", aws.StringValue(resp.FunctionConfiguration.Description)) + d.Set("arn", aws.StringValue(resp.FunctionConfiguration.FunctionArn)) + d.Set("function_version", aws.StringValue(resp.FunctionConfiguration.FunctionVersion)) + d.Set("name", aws.StringValue(resp.FunctionConfiguration.Name)) + d.Set("request_mapping_template", aws.StringValue(resp.FunctionConfiguration.RequestMappingTemplate)) + d.Set("response_mapping_template", aws.StringValue(resp.FunctionConfiguration.ResponseMappingTemplate)) + + return nil +} + +func resourceAwsAppsyncFunctionUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appsyncconn + + apiID, functionID, err := decodeAppsyncFunctionID(d.Id()) + if err != nil { + return err + } + + input := &appsync.UpdateFunctionInput{ + ApiId: aws.String(apiID), + DataSourceName: aws.String(d.Get("data_source").(string)), + FunctionId: aws.String(functionID), + FunctionVersion: aws.String(d.Get("function_version").(string)), + Name: aws.String(d.Get("name").(string)), + RequestMappingTemplate: aws.String(d.Get("request_mapping_template").(string)), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("response_mapping_template"); ok { + input.ResponseMappingTemplate = aws.String(v.(string)) + } + + _, err = conn.UpdateFunction(input) + if isAWSErr(err, appsync.ErrCodeNotFoundException, "") { + log.Printf("[WARN] No such entity found for Appsync Function (%s)", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error updating AppSync Function %s: %s", d.Id(), err) + } + + return resourceAwsAppsyncFunctionRead(d, meta) +} + +func resourceAwsAppsyncFunctionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appsyncconn + + apiID, functionID, err := decodeAppsyncFunctionID(d.Id()) + if err != nil { + return err + } + + input := &appsync.DeleteFunctionInput{ + ApiId: aws.String(apiID), + FunctionId: aws.String(functionID), + } + + _, err = conn.DeleteFunction(input) + if isAWSErr(err, appsync.ErrCodeNotFoundException, "") { + return nil + } + if err != nil { + return fmt.Errorf("Error deleting AppSync Function %s: %s", d.Id(), err) + } + + return nil +} + +func decodeAppsyncFunctionID(id string) (string, string, error) { + idParts := strings.SplitN(id, "-", 2) + if len(idParts) != 2 { + return "", "", fmt.Errorf("expected ID in format ApiID-FunctionID, received: %s", id) + } + return idParts[0], idParts[1], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_graphql_api.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_graphql_api.go index 8f16d50024d..1542152b95e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_graphql_api.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_graphql_api.go @@ -7,11 +7,18 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) +var validAppsyncAuthTypes = []string{ + appsync.AuthenticationTypeApiKey, + appsync.AuthenticationTypeAwsIam, + appsync.AuthenticationTypeAmazonCognitoUserPools, + appsync.AuthenticationTypeOpenidConnect, +} + func resourceAwsAppsyncGraphqlApi() *schema.Resource { return &schema.Resource{ Create: resourceAwsAppsyncGraphqlApiCreate, @@ -24,15 +31,70 @@ func resourceAwsAppsyncGraphqlApi() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "additional_authentication_provider": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authentication_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(validAppsyncAuthTypes, false), + }, + "openid_connect_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auth_ttl": { + Type: schema.TypeInt, + Optional: true, + }, + "client_id": { + Type: schema.TypeString, + Optional: true, + }, + "iat_ttl": { + Type: schema.TypeInt, + Optional: true, + }, + "issuer": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "user_pool_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "app_id_client_regex": { + Type: schema.TypeString, + Optional: true, + }, + "aws_region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "user_pool_id": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, "authentication_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - appsync.AuthenticationTypeApiKey, - appsync.AuthenticationTypeAwsIam, - appsync.AuthenticationTypeAmazonCognitoUserPools, - appsync.AuthenticationTypeOpenidConnect, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(validAppsyncAuthTypes, false), }, "schema": { Type: schema.TypeString, @@ -160,13 +222,17 @@ func resourceAwsAppsyncGraphqlApiCreate(d *schema.ResourceData, meta interface{} input.UserPoolConfig = expandAppsyncGraphqlApiUserPoolConfig(v.([]interface{}), meta.(*AWSClient).region) } + if v, ok := d.GetOk("additional_authentication_provider"); ok { + input.AdditionalAuthenticationProviders = expandAppsyncGraphqlApiAdditionalAuthProviders(v.([]interface{}), meta.(*AWSClient).region) + } + if v, ok := d.GetOk("tags"); ok { input.Tags = tagsFromMapGeneric(v.(map[string]interface{})) } resp, err := conn.CreateGraphqlApi(input) if err != nil { - return fmt.Errorf("error creating AppSync GraphQL API: %d", err) + return fmt.Errorf("error creating AppSync GraphQL API: %s", err) } d.SetId(*resp.GraphqlApi.ApiId) @@ -186,13 +252,15 @@ func resourceAwsAppsyncGraphqlApiRead(d *schema.ResourceData, meta interface{}) } resp, err := conn.GetGraphqlApi(input) + + if isAWSErr(err, appsync.ErrCodeNotFoundException, "") { + log.Printf("[WARN] No such entity found for Appsync Graphql API (%s)", d.Id()) + d.SetId("") + return nil + } + if err != nil { - if isAWSErr(err, appsync.ErrCodeNotFoundException, "") { - log.Printf("[WARN] No such entity found for Appsync Graphql API (%s)", d.Id()) - d.SetId("") - return nil - } - return err + return fmt.Errorf("error getting AppSync GraphQL API (%s): %s", d.Id(), err) } d.Set("arn", resp.GraphqlApi.Arn) @@ -211,6 +279,10 @@ func resourceAwsAppsyncGraphqlApiRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("error setting user_pool_config: %s", err) } + if err := d.Set("additional_authentication_provider", flattenAppsyncGraphqlApiAdditionalAuthenticationProviders(resp.GraphqlApi.AdditionalAuthenticationProviders)); err != nil { + return fmt.Errorf("error setting additional_authentication_provider: %s", err) + } + if err := d.Set("uris", aws.StringValueMap(resp.GraphqlApi.Uris)); err != nil { return fmt.Errorf("error setting uris: %s", err) } @@ -248,9 +320,13 @@ func resourceAwsAppsyncGraphqlApiUpdate(d *schema.ResourceData, meta interface{} input.UserPoolConfig = expandAppsyncGraphqlApiUserPoolConfig(v.([]interface{}), meta.(*AWSClient).region) } + if v, ok := d.GetOk("additional_authentication_provider"); ok { + input.AdditionalAuthenticationProviders = expandAppsyncGraphqlApiAdditionalAuthProviders(v.([]interface{}), meta.(*AWSClient).region) + } + _, err := conn.UpdateGraphqlApi(input) if err != nil { - return err + return fmt.Errorf("error updating AppSync GraphQL API (%s): %s", d.Id(), err) } if d.HasChange("schema") { @@ -269,11 +345,13 @@ func resourceAwsAppsyncGraphqlApiDelete(d *schema.ResourceData, meta interface{} ApiId: aws.String(d.Id()), } _, err := conn.DeleteGraphqlApi(input) + + if isAWSErr(err, appsync.ErrCodeNotFoundException, "") { + return nil + } + if err != nil { - if isAWSErr(err, appsync.ErrCodeNotFoundException, "") { - return nil - } - return err + return fmt.Errorf("error deleting AppSync GraphQL API (%s): %s", d.Id(), err) } return nil @@ -344,6 +422,59 @@ func expandAppsyncGraphqlApiUserPoolConfig(l []interface{}, currentRegion string return userPoolConfig } +func expandAppsyncGraphqlApiAdditionalAuthProviders(items []interface{}, currentRegion string) []*appsync.AdditionalAuthenticationProvider { + if len(items) < 1 { + return nil + } + + additionalAuthProviders := make([]*appsync.AdditionalAuthenticationProvider, 0, len(items)) + for _, l := range items { + if l == nil { + continue + } + + m := l.(map[string]interface{}) + additionalAuthProvider := &appsync.AdditionalAuthenticationProvider{ + AuthenticationType: aws.String(m["authentication_type"].(string)), + } + + if v, ok := m["openid_connect_config"]; ok { + additionalAuthProvider.OpenIDConnectConfig = expandAppsyncGraphqlApiOpenIDConnectConfig(v.([]interface{})) + } + + if v, ok := m["user_pool_config"]; ok { + additionalAuthProvider.UserPoolConfig = expandAppsyncGraphqlApiCognitoUserPoolConfig(v.([]interface{}), currentRegion) + } + + additionalAuthProviders = append(additionalAuthProviders, additionalAuthProvider) + } + + return additionalAuthProviders +} + +func expandAppsyncGraphqlApiCognitoUserPoolConfig(l []interface{}, currentRegion string) *appsync.CognitoUserPoolConfig { + if len(l) < 1 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + userPoolConfig := &appsync.CognitoUserPoolConfig{ + AwsRegion: aws.String(currentRegion), + UserPoolId: aws.String(m["user_pool_id"].(string)), + } + + if v, ok := m["app_id_client_regex"].(string); ok && v != "" { + userPoolConfig.AppIdClientRegex = aws.String(v) + } + + if v, ok := m["aws_region"].(string); ok && v != "" { + userPoolConfig.AwsRegion = aws.String(v) + } + + return userPoolConfig +} + func flattenAppsyncGraphqlApiLogConfig(logConfig *appsync.LogConfig) []interface{} { if logConfig == nil { return []interface{}{} @@ -390,6 +521,40 @@ func flattenAppsyncGraphqlApiUserPoolConfig(userPoolConfig *appsync.UserPoolConf return []interface{}{m} } +func flattenAppsyncGraphqlApiAdditionalAuthenticationProviders(additionalAuthenticationProviders []*appsync.AdditionalAuthenticationProvider) []interface{} { + if len(additionalAuthenticationProviders) == 0 { + return []interface{}{} + } + + result := make([]interface{}, len(additionalAuthenticationProviders)) + for i, provider := range additionalAuthenticationProviders { + result[i] = map[string]interface{}{ + "authentication_type": aws.StringValue(provider.AuthenticationType), + "openid_connect_config": flattenAppsyncGraphqlApiOpenIDConnectConfig(provider.OpenIDConnectConfig), + "user_pool_config": flattenAppsyncGraphqlApiCognitoUserPoolConfig(provider.UserPoolConfig), + } + } + + return result +} + +func flattenAppsyncGraphqlApiCognitoUserPoolConfig(userPoolConfig *appsync.CognitoUserPoolConfig) []interface{} { + if userPoolConfig == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "aws_region": aws.StringValue(userPoolConfig.AwsRegion), + "user_pool_id": aws.StringValue(userPoolConfig.UserPoolId), + } + + if userPoolConfig.AppIdClientRegex != nil { + m["app_id_client_regex"] = aws.StringValue(userPoolConfig.AppIdClientRegex) + } + + return []interface{}{m} +} + func resourceAwsAppsyncSchemaPut(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).appsyncconn diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_resolver.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_resolver.go index 9217fe269d5..470f47ef3eb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_resolver.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appsync_resolver.go @@ -7,7 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAppsyncResolver() *schema.Resource { @@ -38,8 +39,9 @@ func resourceAwsAppsyncResolver() *schema.Resource { ForceNew: true, }, "data_source": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"pipeline_config"}, }, "request_template": { Type: schema.TypeString, @@ -49,6 +51,32 @@ func resourceAwsAppsyncResolver() *schema.Resource { Type: schema.TypeString, Required: true, // documentation bug, the api returns 400 if this is not specified. }, + "kind": { + Type: schema.TypeString, + Optional: true, + Default: appsync.ResolverKindUnit, + ValidateFunc: validation.StringInSlice([]string{ + appsync.ResolverKindUnit, + appsync.ResolverKindPipeline, + }, true), + }, + "pipeline_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"data_source"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "functions": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, "arn": { Type: schema.TypeString, Computed: true, @@ -62,11 +90,22 @@ func resourceAwsAppsyncResolverCreate(d *schema.ResourceData, meta interface{}) input := &appsync.CreateResolverInput{ ApiId: aws.String(d.Get("api_id").(string)), - DataSourceName: aws.String(d.Get("data_source").(string)), TypeName: aws.String(d.Get("type").(string)), FieldName: aws.String(d.Get("field").(string)), RequestMappingTemplate: aws.String(d.Get("request_template").(string)), ResponseMappingTemplate: aws.String(d.Get("response_template").(string)), + Kind: aws.String(d.Get("kind").(string)), + } + + if v, ok := d.GetOk("data_source"); ok { + input.DataSourceName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("pipeline_config"); ok { + config := v.([]interface{})[0].(map[string]interface{}) + input.PipelineConfig = &appsync.PipelineConfig{ + Functions: expandStringList(config["functions"].([]interface{})), + } } mutexKey := fmt.Sprintf("appsync-schema-%s", d.Get("api_id").(string)) @@ -120,6 +159,11 @@ func resourceAwsAppsyncResolverRead(d *schema.ResourceData, meta interface{}) er d.Set("data_source", resp.Resolver.DataSourceName) d.Set("request_template", resp.Resolver.RequestMappingTemplate) d.Set("response_template", resp.Resolver.ResponseMappingTemplate) + d.Set("kind", resp.Resolver.Kind) + + if err := d.Set("pipeline_config", flattenAppsyncPipelineConfig(resp.Resolver.PipelineConfig)); err != nil { + return fmt.Errorf("Error setting pipeline_config: %s", err) + } return nil } @@ -129,11 +173,22 @@ func resourceAwsAppsyncResolverUpdate(d *schema.ResourceData, meta interface{}) input := &appsync.UpdateResolverInput{ ApiId: aws.String(d.Get("api_id").(string)), - DataSourceName: aws.String(d.Get("data_source").(string)), FieldName: aws.String(d.Get("field").(string)), TypeName: aws.String(d.Get("type").(string)), RequestMappingTemplate: aws.String(d.Get("request_template").(string)), ResponseMappingTemplate: aws.String(d.Get("response_template").(string)), + Kind: aws.String(d.Get("kind").(string)), + } + + if v, ok := d.GetOk("data_source"); ok { + input.DataSourceName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("pipeline_config"); ok { + config := v.([]interface{})[0].(map[string]interface{}) + input.PipelineConfig = &appsync.PipelineConfig{ + Functions: expandStringList(config["functions"].([]interface{})), + } } mutexKey := fmt.Sprintf("appsync-schema-%s", d.Get("api_id").(string)) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_database.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_database.go index f440bcd47bb..34b29b46ab3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_database.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_database.go @@ -6,12 +6,12 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/athena" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAthenaDatabase() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_named_query.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_named_query.go index e5bd43f2846..bf5c3e94323 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_named_query.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_named_query.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/athena" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAthenaNamedQuery() *schema.Resource { @@ -29,6 +29,12 @@ func resourceAwsAthenaNamedQuery() *schema.Resource { Required: true, ForceNew: true, }, + "workgroup": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "primary", + }, "database": { Type: schema.TypeString, Required: true, @@ -51,6 +57,9 @@ func resourceAwsAthenaNamedQueryCreate(d *schema.ResourceData, meta interface{}) Name: aws.String(d.Get("name").(string)), QueryString: aws.String(d.Get("query").(string)), } + if raw, ok := d.GetOk("workgroup"); ok { + input.WorkGroup = aws.String(raw.(string)) + } if raw, ok := d.GetOk("description"); ok { input.Description = aws.String(raw.(string)) } @@ -82,6 +91,7 @@ func resourceAwsAthenaNamedQueryRead(d *schema.ResourceData, meta interface{}) e d.Set("name", resp.NamedQuery.Name) d.Set("query", resp.NamedQuery.QueryString) + d.Set("workgroup", resp.NamedQuery.WorkGroup) d.Set("database", resp.NamedQuery.Database) d.Set("description", resp.NamedQuery.Description) return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_workgroup.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_workgroup.go new file mode 100644 index 00000000000..10ed951e383 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_athena_workgroup.go @@ -0,0 +1,434 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/athena" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func resourceAwsAthenaWorkgroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAthenaWorkgroupCreate, + Read: resourceAwsAthenaWorkgroupRead, + Update: resourceAwsAthenaWorkgroupUpdate, + Delete: resourceAwsAthenaWorkgroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bytes_scanned_cutoff_per_query": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.Any( + validation.IntAtLeast(10485760), + validation.IntInSlice([]int{0}), + ), + }, + "enforce_workgroup_configuration": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "publish_cloudwatch_metrics_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "result_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "encryption_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "encryption_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + athena.EncryptionOptionCseKms, + athena.EncryptionOptionSseKms, + athena.EncryptionOptionSseS3, + }, false), + }, + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + "output_location": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 128), + validation.StringMatch(regexp.MustCompile(`^[a-zA-z0-9._-]+$`), "must contain only alphanumeric characters, periods, underscores, and hyphens"), + ), + }, + "state": { + Type: schema.TypeString, + Optional: true, + Default: athena.WorkGroupStateEnabled, + ValidateFunc: validation.StringInSlice([]string{ + athena.WorkGroupStateDisabled, + athena.WorkGroupStateEnabled, + }, false), + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsAthenaWorkgroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).athenaconn + + name := d.Get("name").(string) + + input := &athena.CreateWorkGroupInput{ + Configuration: expandAthenaWorkGroupConfiguration(d.Get("configuration").([]interface{})), + Name: aws.String(name), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + // Prevent the below error: + // InvalidRequestException: Tags provided upon WorkGroup creation must not be empty + if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { + input.Tags = keyvaluetags.New(v).IgnoreAws().AthenaTags() + } + + _, err := conn.CreateWorkGroup(input) + + if err != nil { + return fmt.Errorf("error creating Athena WorkGroup: %s", err) + } + + d.SetId(name) + + if v := d.Get("state").(string); v == athena.WorkGroupStateDisabled { + input := &athena.UpdateWorkGroupInput{ + State: aws.String(v), + WorkGroup: aws.String(d.Id()), + } + + if _, err := conn.UpdateWorkGroup(input); err != nil { + return fmt.Errorf("error disabling Athena WorkGroup (%s): %s", d.Id(), err) + } + } + + return resourceAwsAthenaWorkgroupRead(d, meta) +} + +func resourceAwsAthenaWorkgroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).athenaconn + + input := &athena.GetWorkGroupInput{ + WorkGroup: aws.String(d.Id()), + } + + resp, err := conn.GetWorkGroup(input) + + if isAWSErr(err, athena.ErrCodeInvalidRequestException, "is not found") { + log.Printf("[WARN] Athena WorkGroup (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading Athena WorkGroup (%s): %s", d.Id(), err) + } + + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Region: meta.(*AWSClient).region, + Service: "athena", + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("workgroup/%s", d.Id()), + } + + d.Set("arn", arn.String()) + d.Set("description", resp.WorkGroup.Description) + + if err := d.Set("configuration", flattenAthenaWorkGroupConfiguration(resp.WorkGroup.Configuration)); err != nil { + return fmt.Errorf("error setting configuration: %s", err) + } + + d.Set("name", resp.WorkGroup.Name) + d.Set("state", resp.WorkGroup.State) + + tags, err := keyvaluetags.AthenaListTags(conn, arn.String()) + + if err != nil { + return fmt.Errorf("error listing tags for resource (%s): %s", arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + return nil +} + +func resourceAwsAthenaWorkgroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).athenaconn + + input := &athena.DeleteWorkGroupInput{ + WorkGroup: aws.String(d.Id()), + } + + _, err := conn.DeleteWorkGroup(input) + + if err != nil { + return fmt.Errorf("error deleting Athena WorkGroup (%s): %s", d.Id(), err) + } + + return nil +} + +func resourceAwsAthenaWorkgroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).athenaconn + + workGroupUpdate := false + + input := &athena.UpdateWorkGroupInput{ + WorkGroup: aws.String(d.Get("name").(string)), + } + + if d.HasChange("configuration") { + workGroupUpdate = true + input.ConfigurationUpdates = expandAthenaWorkGroupConfigurationUpdates(d.Get("configuration").([]interface{})) + } + + if d.HasChange("description") { + workGroupUpdate = true + input.Description = aws.String(d.Get("description").(string)) + } + + if d.HasChange("state") { + workGroupUpdate = true + input.State = aws.String(d.Get("state").(string)) + } + + if workGroupUpdate { + _, err := conn.UpdateWorkGroup(input) + + if err != nil { + return fmt.Errorf("error updating Athena WorkGroup (%s): %s", d.Id(), err) + } + } + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.AthenaUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + + return resourceAwsAthenaWorkgroupRead(d, meta) +} + +func expandAthenaWorkGroupConfiguration(l []interface{}) *athena.WorkGroupConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + configuration := &athena.WorkGroupConfiguration{} + + if v, ok := m["bytes_scanned_cutoff_per_query"]; ok && v.(int) > 0 { + configuration.BytesScannedCutoffPerQuery = aws.Int64(int64(v.(int))) + } + + if v, ok := m["enforce_workgroup_configuration"]; ok { + configuration.EnforceWorkGroupConfiguration = aws.Bool(v.(bool)) + } + + if v, ok := m["publish_cloudwatch_metrics_enabled"]; ok { + configuration.PublishCloudWatchMetricsEnabled = aws.Bool(v.(bool)) + } + + if v, ok := m["result_configuration"]; ok { + configuration.ResultConfiguration = expandAthenaWorkGroupResultConfiguration(v.([]interface{})) + } + + return configuration +} + +func expandAthenaWorkGroupConfigurationUpdates(l []interface{}) *athena.WorkGroupConfigurationUpdates { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + configurationUpdates := &athena.WorkGroupConfigurationUpdates{} + + if v, ok := m["bytes_scanned_cutoff_per_query"]; ok && v.(int) > 0 { + configurationUpdates.BytesScannedCutoffPerQuery = aws.Int64(int64(v.(int))) + } else { + configurationUpdates.RemoveBytesScannedCutoffPerQuery = aws.Bool(true) + } + + if v, ok := m["enforce_workgroup_configuration"]; ok { + configurationUpdates.EnforceWorkGroupConfiguration = aws.Bool(v.(bool)) + } + + if v, ok := m["publish_cloudwatch_metrics_enabled"]; ok { + configurationUpdates.PublishCloudWatchMetricsEnabled = aws.Bool(v.(bool)) + } + + if v, ok := m["result_configuration"]; ok { + configurationUpdates.ResultConfigurationUpdates = expandAthenaWorkGroupResultConfigurationUpdates(v.([]interface{})) + } + + return configurationUpdates +} + +func expandAthenaWorkGroupResultConfiguration(l []interface{}) *athena.ResultConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + resultConfiguration := &athena.ResultConfiguration{} + + if v, ok := m["encryption_configuration"]; ok { + resultConfiguration.EncryptionConfiguration = expandAthenaWorkGroupEncryptionConfiguration(v.([]interface{})) + } + + if v, ok := m["output_location"]; ok && v.(string) != "" { + resultConfiguration.OutputLocation = aws.String(v.(string)) + } + + return resultConfiguration +} + +func expandAthenaWorkGroupResultConfigurationUpdates(l []interface{}) *athena.ResultConfigurationUpdates { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + resultConfigurationUpdates := &athena.ResultConfigurationUpdates{} + + if v, ok := m["encryption_configuration"]; ok { + resultConfigurationUpdates.EncryptionConfiguration = expandAthenaWorkGroupEncryptionConfiguration(v.([]interface{})) + } else { + resultConfigurationUpdates.RemoveEncryptionConfiguration = aws.Bool(true) + } + + if v, ok := m["output_location"]; ok && v.(string) != "" { + resultConfigurationUpdates.OutputLocation = aws.String(v.(string)) + } else { + resultConfigurationUpdates.RemoveOutputLocation = aws.Bool(true) + } + + return resultConfigurationUpdates +} + +func expandAthenaWorkGroupEncryptionConfiguration(l []interface{}) *athena.EncryptionConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + encryptionConfiguration := &athena.EncryptionConfiguration{} + + if v, ok := m["encryption_option"]; ok && v.(string) != "" { + encryptionConfiguration.EncryptionOption = aws.String(v.(string)) + } + + if v, ok := m["kms_key_arn"]; ok && v.(string) != "" { + encryptionConfiguration.KmsKey = aws.String(v.(string)) + } + + return encryptionConfiguration +} + +func flattenAthenaWorkGroupConfiguration(configuration *athena.WorkGroupConfiguration) []interface{} { + if configuration == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "bytes_scanned_cutoff_per_query": aws.Int64Value(configuration.BytesScannedCutoffPerQuery), + "enforce_workgroup_configuration": aws.BoolValue(configuration.EnforceWorkGroupConfiguration), + "publish_cloudwatch_metrics_enabled": aws.BoolValue(configuration.PublishCloudWatchMetricsEnabled), + "result_configuration": flattenAthenaWorkGroupResultConfiguration(configuration.ResultConfiguration), + } + + return []interface{}{m} +} + +func flattenAthenaWorkGroupResultConfiguration(resultConfiguration *athena.ResultConfiguration) []interface{} { + if resultConfiguration == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "encryption_configuration": flattenAthenaWorkGroupEncryptionConfiguration(resultConfiguration.EncryptionConfiguration), + "output_location": aws.StringValue(resultConfiguration.OutputLocation), + } + + return []interface{}{m} +} + +func flattenAthenaWorkGroupEncryptionConfiguration(encryptionConfiguration *athena.EncryptionConfiguration) []interface{} { + if encryptionConfiguration == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "encryption_option": aws.StringValue(encryptionConfiguration.EncryptionOption), + "kms_key_arn": aws.StringValue(encryptionConfiguration.KmsKey), + } + + return []interface{}{m} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_attachment.go index 8563abd0e01..3b3503d48ab 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_attachment.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAutoscalingAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group.go index 94c6802b784..3215ba6abb1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group.go @@ -6,10 +6,10 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -255,6 +255,8 @@ func resourceAwsAutoscalingGroup() *schema.Resource { Optional: true, }, + // DEPRECATED: Computed: true should be removed in a major version release + // Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/9513 "load_balancers": { Type: schema.TypeSet, Optional: true, @@ -327,6 +329,8 @@ func resourceAwsAutoscalingGroup() *schema.Resource { Default: false, }, + // DEPRECATED: Computed: true should be removed in a major version release + // Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/9513 "target_group_arns": { Type: schema.TypeSet, Optional: true, @@ -592,6 +596,9 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) return nil }) + if isResourceTimeoutError(err) { + _, err = conn.CreateAutoScalingGroup(&createOpts) + } if err != nil { return fmt.Errorf("Error creating AutoScaling Group: %s", err) } @@ -760,6 +767,78 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e return nil } +func waitUntilAutoscalingGroupLoadBalancerTargetGroupsRemoved(conn *autoscaling.AutoScaling, asgName string) error { + input := &autoscaling.DescribeLoadBalancerTargetGroupsInput{ + AutoScalingGroupName: aws.String(asgName), + } + var tgRemoving bool + + for { + output, err := conn.DescribeLoadBalancerTargetGroups(input) + + if err != nil { + return err + } + + for _, tg := range output.LoadBalancerTargetGroups { + if aws.StringValue(tg.State) == "Removing" { + tgRemoving = true + break + } + } + + if tgRemoving { + tgRemoving = false + input.NextToken = nil + continue + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil +} + +func waitUntilAutoscalingGroupLoadBalancerTargetGroupsAdded(conn *autoscaling.AutoScaling, asgName string) error { + input := &autoscaling.DescribeLoadBalancerTargetGroupsInput{ + AutoScalingGroupName: aws.String(asgName), + } + var tgAdding bool + + for { + output, err := conn.DescribeLoadBalancerTargetGroups(input) + + if err != nil { + return err + } + + for _, tg := range output.LoadBalancerTargetGroups { + if aws.StringValue(tg.State) == "Adding" { + tgAdding = true + break + } + } + + if tgAdding { + tgAdding = false + input.NextToken = nil + continue + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil +} + func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).autoscalingconn shouldWaitForCapacity := false @@ -877,22 +956,56 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) add := expandStringList(ns.Difference(os).List()) if len(remove) > 0 { - _, err := conn.DetachLoadBalancers(&autoscaling.DetachLoadBalancersInput{ - AutoScalingGroupName: aws.String(d.Id()), - LoadBalancerNames: remove, - }) - if err != nil { - return fmt.Errorf("Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err) + // API only supports removing 10 at a time + var batches [][]*string + + batchSize := 10 + + for batchSize < len(remove) { + remove, batches = remove[batchSize:], append(batches, remove[0:batchSize:batchSize]) + } + batches = append(batches, remove) + + for _, batch := range batches { + _, err := conn.DetachLoadBalancers(&autoscaling.DetachLoadBalancersInput{ + AutoScalingGroupName: aws.String(d.Id()), + LoadBalancerNames: batch, + }) + + if err != nil { + return fmt.Errorf("error detaching AutoScaling Group (%s) Load Balancers: %s", d.Id(), err) + } + + if err := waitUntilAutoscalingGroupLoadBalancersRemoved(conn, d.Id()); err != nil { + return fmt.Errorf("error describing AutoScaling Group (%s) Load Balancers being removed: %s", d.Id(), err) + } } } if len(add) > 0 { - _, err := conn.AttachLoadBalancers(&autoscaling.AttachLoadBalancersInput{ - AutoScalingGroupName: aws.String(d.Id()), - LoadBalancerNames: add, - }) - if err != nil { - return fmt.Errorf("Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err) + // API only supports adding 10 at a time + batchSize := 10 + + var batches [][]*string + + for batchSize < len(add) { + add, batches = add[batchSize:], append(batches, add[0:batchSize:batchSize]) + } + batches = append(batches, add) + + for _, batch := range batches { + _, err := conn.AttachLoadBalancers(&autoscaling.AttachLoadBalancersInput{ + AutoScalingGroupName: aws.String(d.Id()), + LoadBalancerNames: batch, + }) + + if err != nil { + return fmt.Errorf("error attaching AutoScaling Group (%s) Load Balancers: %s", d.Id(), err) + } + + if err := waitUntilAutoscalingGroupLoadBalancersAdded(conn, d.Id()); err != nil { + return fmt.Errorf("error describing AutoScaling Group (%s) Load Balancers being added: %s", d.Id(), err) + } } } } @@ -913,22 +1026,55 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) add := expandStringList(ns.Difference(os).List()) if len(remove) > 0 { - _, err := conn.DetachLoadBalancerTargetGroups(&autoscaling.DetachLoadBalancerTargetGroupsInput{ - AutoScalingGroupName: aws.String(d.Id()), - TargetGroupARNs: remove, - }) - if err != nil { - return fmt.Errorf("Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) + // AWS API only supports adding/removing 10 at a time + var batches [][]*string + + batchSize := 10 + + for batchSize < len(remove) { + remove, batches = remove[batchSize:], append(batches, remove[0:batchSize:batchSize]) + } + batches = append(batches, remove) + + for _, batch := range batches { + _, err := conn.DetachLoadBalancerTargetGroups(&autoscaling.DetachLoadBalancerTargetGroupsInput{ + AutoScalingGroupName: aws.String(d.Id()), + TargetGroupARNs: batch, + }) + if err != nil { + return fmt.Errorf("Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) + } + + if err := waitUntilAutoscalingGroupLoadBalancerTargetGroupsRemoved(conn, d.Id()); err != nil { + return fmt.Errorf("error describing AutoScaling Group (%s) Load Balancer Target Groups being removed: %s", d.Id(), err) + } } + } if len(add) > 0 { - _, err := conn.AttachLoadBalancerTargetGroups(&autoscaling.AttachLoadBalancerTargetGroupsInput{ - AutoScalingGroupName: aws.String(d.Id()), - TargetGroupARNs: add, - }) - if err != nil { - return fmt.Errorf("Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) + batchSize := 10 + + var batches [][]*string + + for batchSize < len(add) { + add, batches = add[batchSize:], append(batches, add[0:batchSize:batchSize]) + } + batches = append(batches, add) + + for _, batch := range batches { + _, err := conn.AttachLoadBalancerTargetGroups(&autoscaling.AttachLoadBalancerTargetGroupsInput{ + AutoScalingGroupName: aws.String(d.Id()), + TargetGroupARNs: batch, + }) + + if err != nil { + return fmt.Errorf("Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) + } + + if err := waitUntilAutoscalingGroupLoadBalancerTargetGroupsAdded(conn, d.Id()); err != nil { + return fmt.Errorf("error describing AutoScaling Group (%s) Load Balancer Target Groups being added: %s", d.Id(), err) + } } } } @@ -1001,23 +1147,38 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) // Successful delete return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteAutoScalingGroup(&deleteopts) + if isAWSErr(err, "InvalidGroup.NotFound", "") { + return nil + } + } if err != nil { - return err + return fmt.Errorf("Error deleting autoscaling group: %s", err) } - return resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { - if g, _ = getAwsAutoscalingGroup(d.Id(), conn); g != nil { - return resource.RetryableError( - fmt.Errorf("Auto Scaling Group still exists")) + var group *autoscaling.Group + err = resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + group, err = getAwsAutoscalingGroup(d.Id(), conn) + + if group != nil { + return resource.RetryableError(fmt.Errorf("Auto Scaling Group still exists")) } return nil }) + if isResourceTimeoutError(err) { + group, err = getAwsAutoscalingGroup(d.Id(), conn) + if group != nil { + return fmt.Errorf("Auto Scaling Group still exists") + } + } + if err != nil { + return fmt.Errorf("Error deleting autoscaling group: %s", err) + } + return nil } -func getAwsAutoscalingGroup( - asgName string, - conn *autoscaling.AutoScaling) (*autoscaling.Group, error) { - +func getAwsAutoscalingGroup(asgName string, conn *autoscaling.AutoScaling) (*autoscaling.Group, error) { describeOpts := autoscaling.DescribeAutoScalingGroupsInput{ AutoScalingGroupNames: []*string{aws.String(asgName)}, } @@ -1065,7 +1226,8 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) // Next, wait for the autoscale group to drain log.Printf("[DEBUG] Waiting for group to have zero instances") - return resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + var g *autoscaling.Group + err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { g, err := getAwsAutoscalingGroup(d.Id(), conn) if err != nil { return resource.NonRetryableError(err) @@ -1081,8 +1243,21 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) } return resource.RetryableError( - fmt.Errorf("group still has %d instances", len(g.Instances))) + fmt.Errorf("Group still has %d instances", len(g.Instances))) }) + if isResourceTimeoutError(err) { + g, err = getAwsAutoscalingGroup(d.Id(), conn) + if err != nil { + return fmt.Errorf("Error getting autoscaling group info when draining: %s", err) + } + if g != nil && len(g.Instances) > 0 { + return fmt.Errorf("Group still has %d instances", len(g.Instances)) + } + } + if err != nil { + return fmt.Errorf("Error draining autoscaling group: %s", err) + } + return nil } func enableASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { @@ -1457,3 +1632,75 @@ func flattenAutoScalingMixedInstancesPolicy(mixedInstancesPolicy *autoscaling.Mi return []interface{}{m} } + +func waitUntilAutoscalingGroupLoadBalancersAdded(conn *autoscaling.AutoScaling, asgName string) error { + input := &autoscaling.DescribeLoadBalancersInput{ + AutoScalingGroupName: aws.String(asgName), + } + var lbAdding bool + + for { + output, err := conn.DescribeLoadBalancers(input) + + if err != nil { + return err + } + + for _, tg := range output.LoadBalancers { + if aws.StringValue(tg.State) == "Adding" { + lbAdding = true + break + } + } + + if lbAdding { + lbAdding = false + input.NextToken = nil + continue + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil +} + +func waitUntilAutoscalingGroupLoadBalancersRemoved(conn *autoscaling.AutoScaling, asgName string) error { + input := &autoscaling.DescribeLoadBalancersInput{ + AutoScalingGroupName: aws.String(asgName), + } + var lbRemoving bool + + for { + output, err := conn.DescribeLoadBalancers(input) + + if err != nil { + return err + } + + for _, tg := range output.LoadBalancers { + if aws.StringValue(tg.State) == "Removing" { + lbRemoving = true + break + } + } + + if lbRemoving { + lbRemoving = false + input.NextToken = nil + continue + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group_waiting.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group_waiting.go index 404d845feec..d2178adfaa2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group_waiting.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group_waiting.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // waitForASGCapacityTimeout gathers the current numbers of healthy instances @@ -44,63 +44,32 @@ func waitForASGCapacity( d.SetId("") return nil } - elbis, err := getELBInstanceStates(g, meta) - if err != nil { - return resource.NonRetryableError(err) - } - albis, err := getTargetGroupInstanceStates(g, meta) - if err != nil { - return resource.NonRetryableError(err) - } - - haveASG := 0 - haveELB := 0 - for _, i := range g.Instances { - if i.HealthStatus == nil || i.InstanceId == nil || i.LifecycleState == nil { - continue - } - - if !strings.EqualFold(*i.HealthStatus, "Healthy") { - continue - } - - if !strings.EqualFold(*i.LifecycleState, "InService") { - continue - } + satisfied, reason := isELBCapacitySatisfied(d, meta, g, satisfiedFunc) + if satisfied { + return nil + } - haveASG++ + return resource.RetryableError(fmt.Errorf("%q: Waiting up to %s: %s", d.Id(), wait, reason)) + }) + if isResourceTimeoutError(err) { + g, err := getAwsAutoscalingGroup(d.Id(), meta.(*AWSClient).autoscalingconn) - inAllLbs := true - for _, states := range elbis { - state, ok := states[*i.InstanceId] - if !ok || !strings.EqualFold(state, "InService") { - inAllLbs = false - } - } - for _, states := range albis { - state, ok := states[*i.InstanceId] - if !ok || !strings.EqualFold(state, "healthy") { - inAllLbs = false - } - } - if inAllLbs { - haveELB++ - } + if err != nil { + return fmt.Errorf("Error getting autoscaling group info: %s", err) } - satisfied, reason := satisfiedFunc(d, haveASG, haveELB) - - log.Printf("[DEBUG] %q Capacity: %d ASG, %d ELB/ALB, satisfied: %t, reason: %q", - d.Id(), haveASG, haveELB, satisfied, reason) + if g == nil { + log.Printf("[WARN] Autoscaling Group (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + satisfied, _ := isELBCapacitySatisfied(d, meta, g, satisfiedFunc) if satisfied { return nil } - - return resource.RetryableError( - fmt.Errorf("%q: Waiting up to %s: %s", d.Id(), wait, reason)) - }) + } if err == nil { return nil @@ -126,6 +95,60 @@ func waitForASGCapacity( return fmt.Errorf("%s. Most recent activity: %s", err, recentStatus) } +func isELBCapacitySatisfied(d *schema.ResourceData, meta interface{}, g *autoscaling.Group, satisfiedFunc capacitySatisfiedFunc) (bool, string) { + elbis, err := getELBInstanceStates(g, meta) + if err != nil { + return false, fmt.Sprintf("Error getting ELB instance states: %s", err) + } + albis, err := getTargetGroupInstanceStates(g, meta) + if err != nil { + return false, fmt.Sprintf("Error getting target group instance states: %s", err) + } + + haveASG := 0 + haveELB := 0 + + for _, i := range g.Instances { + if i.HealthStatus == nil || i.InstanceId == nil || i.LifecycleState == nil { + continue + } + + if !strings.EqualFold(*i.HealthStatus, "Healthy") { + continue + } + + if !strings.EqualFold(*i.LifecycleState, "InService") { + continue + } + + haveASG++ + + inAllLbs := true + for _, states := range elbis { + state, ok := states[*i.InstanceId] + if !ok || !strings.EqualFold(state, "InService") { + inAllLbs = false + } + } + for _, states := range albis { + state, ok := states[*i.InstanceId] + if !ok || !strings.EqualFold(state, "healthy") { + inAllLbs = false + } + } + if inAllLbs { + haveELB++ + } + } + + satisfied, reason := satisfiedFunc(d, haveASG, haveELB) + + log.Printf("[DEBUG] %q Capacity: %d ASG, %d ELB/ALB, satisfied: %t, reason: %q", + d.Id(), haveASG, haveELB, satisfied, reason) + + return satisfied, reason +} + type capacitySatisfiedFunc func(*schema.ResourceData, int, int) (bool, string) // capacitySatisfiedCreate treats all targets as minimums diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_lifecycle_hook.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_lifecycle_hook.go index 500924eac48..aa96aa0e586 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_lifecycle_hook.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_lifecycle_hook.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAutoscalingLifecycleHook() *schema.Resource { @@ -20,6 +20,10 @@ func resourceAwsAutoscalingLifecycleHook() *schema.Resource { Update: resourceAwsAutoscalingLifecycleHookPut, Delete: resourceAwsAutoscalingLifecycleHookDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsAutoscalingLifecycleHookImport, + }, + Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, @@ -61,7 +65,7 @@ func resourceAwsAutoscalingLifecycleHook() *schema.Resource { func resourceAwsAutoscalingLifecycleHookPutOp(conn *autoscaling.AutoScaling, params *autoscaling.PutLifecycleHookInput) error { log.Printf("[DEBUG] AutoScaling PutLifecyleHook: %s", params) - return resource.Retry(5*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.PutLifecycleHook(params) if err != nil { @@ -74,6 +78,13 @@ func resourceAwsAutoscalingLifecycleHookPutOp(conn *autoscaling.AutoScaling, par } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.PutLifecycleHook(params) + } + if err != nil { + return fmt.Errorf("Error putting autoscaling lifecycle hook: %s", err) + } + return nil } func resourceAwsAutoscalingLifecycleHookPut(d *schema.ResourceData, meta interface{}) error { @@ -192,3 +203,19 @@ func getAwsAutoscalingLifecycleHook(d *schema.ResourceData, meta interface{}) (* // lifecycle hook not found return nil, nil } + +func resourceAwsAutoscalingLifecycleHookImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + idParts := strings.SplitN(d.Id(), "/", 2) + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return nil, fmt.Errorf("unexpected format (%q), expected /", d.Id()) + } + + asgName := idParts[0] + lifecycleHookName := idParts[1] + + d.Set("name", lifecycleHookName) + d.Set("autoscaling_group_name", asgName) + d.SetId(lifecycleHookName) + + return []*schema.ResourceData{d}, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_notification.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_notification.go index b70215ec291..6276124b580 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_notification.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_notification.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsAutoscalingNotification() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_policy.go index b2d560b3ab5..fc84a550557 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_policy.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsAutoscalingPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_schedule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_schedule.go index 1f332da1cf5..660d98e089f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_schedule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_schedule.go @@ -3,12 +3,13 @@ package aws import ( "fmt" "log" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) const awsAutoscalingScheduleTimeLayout = "2006-01-02T15:04:05Z" @@ -19,6 +20,9 @@ func resourceAwsAutoscalingSchedule() *schema.Resource { Read: resourceAwsAutoscalingScheduleRead, Update: resourceAwsAutoscalingScheduleCreate, Delete: resourceAwsAutoscalingScheduleDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsAutoscalingScheduleImport, + }, Schema: map[string]*schema.Schema{ "arn": { @@ -71,6 +75,27 @@ func resourceAwsAutoscalingSchedule() *schema.Resource { } } +func resourceAwsAutoscalingScheduleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + splitId := strings.Split(d.Id(), "/") + if len(splitId) != 2 { + return []*schema.ResourceData{}, fmt.Errorf("wrong format of resource: %s. Please follow 'asg-name/action-name'", d.Id()) + } + + asgName := splitId[0] + actionName := splitId[1] + + err := d.Set("autoscaling_group_name", asgName) + if err != nil { + return []*schema.ResourceData{}, fmt.Errorf("failed to set autoscaling_group_name value") + } + err = d.Set("scheduled_action_name", actionName) + if err != nil { + return []*schema.ResourceData{}, fmt.Errorf("failed to set scheduled_action_name value") + } + d.SetId(actionName) + return []*schema.ResourceData{d}, nil +} + func resourceAwsAutoscalingScheduleCreate(d *schema.ResourceData, meta interface{}) error { autoscalingconn := meta.(*AWSClient).autoscalingconn params := &autoscaling.PutScheduledUpdateGroupActionInput{ diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_plan.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_plan.go index c9180d52fb2..1a895826e15 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_plan.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_plan.go @@ -7,8 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsBackupPlan() *schema.Resource { @@ -68,14 +69,10 @@ func resourceAwsBackupPlan() *schema.Resource { }, }, }, - "recovery_point_tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, + "recovery_point_tags": tagsSchema(), }, }, - Set: resourceAwsPlanRuleHash, + Set: backupBackupPlanHash, }, "arn": { Type: schema.TypeString, @@ -93,28 +90,21 @@ func resourceAwsBackupPlan() *schema.Resource { func resourceAwsBackupPlanCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - plan := &backup.PlanInput{ - BackupPlanName: aws.String(d.Get("name").(string)), - } - - rules := expandBackupPlanRules(d.Get("rule").(*schema.Set).List()) - - plan.Rules = rules - input := &backup.CreateBackupPlanInput{ - BackupPlan: plan, - } - - if v, ok := d.GetOk("tags"); ok { - input.BackupPlanTags = tagsFromMapGeneric(v.(map[string]interface{})) + BackupPlan: &backup.PlanInput{ + BackupPlanName: aws.String(d.Get("name").(string)), + Rules: expandBackupPlanRules(d.Get("rule").(*schema.Set)), + }, + BackupPlanTags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().BackupTags(), } + log.Printf("[DEBUG] Creating Backup Plan: %#v", input) resp, err := conn.CreateBackupPlan(input) if err != nil { return fmt.Errorf("error creating Backup Plan: %s", err) } - d.SetId(*resp.BackupPlanId) + d.SetId(aws.StringValue(resp.BackupPlanId)) return resourceAwsBackupPlanRead(d, meta) } @@ -122,123 +112,60 @@ func resourceAwsBackupPlanCreate(d *schema.ResourceData, meta interface{}) error func resourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - input := &backup.GetBackupPlanInput{ + resp, err := conn.GetBackupPlan(&backup.GetBackupPlanInput{ BackupPlanId: aws.String(d.Id()), - } - - resp, err := conn.GetBackupPlan(input) + }) if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { log.Printf("[WARN] Backup Plan (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - if err != nil { - return fmt.Errorf("error reading Backup Plan: %s", err) + return fmt.Errorf("error reading Backup Plan (%s): %s", d.Id(), err) } - rule := &schema.Set{F: resourceAwsPlanRuleHash} - - for _, r := range resp.BackupPlan.Rules { - m := make(map[string]interface{}) - - m["completion_window"] = aws.Int64Value(r.CompletionWindowMinutes) - m["recovery_point_tags"] = aws.StringValueMap(r.RecoveryPointTags) - m["rule_name"] = aws.StringValue(r.RuleName) - m["schedule"] = aws.StringValue(r.ScheduleExpression) - m["start_window"] = aws.Int64Value(r.StartWindowMinutes) - m["target_vault_name"] = aws.StringValue(r.TargetBackupVaultName) - - if r.Lifecycle != nil { - l := make(map[string]interface{}) - l["delete_after"] = aws.Int64Value(r.Lifecycle.DeleteAfterDays) - l["cold_storage_after"] = aws.Int64Value(r.Lifecycle.MoveToColdStorageAfterDays) - m["lifecycle"] = []interface{}{l} - } + d.Set("arn", resp.BackupPlanArn) + d.Set("name", resp.BackupPlan.BackupPlanName) + d.Set("version", resp.VersionId) - rule.Add(m) - } - if err := d.Set("rule", rule); err != nil { + if err := d.Set("rule", flattenBackupPlanRules(resp.BackupPlan.Rules)); err != nil { return fmt.Errorf("error setting rule: %s", err) } - tagsOutput, err := conn.ListTags(&backup.ListTagsInput{ - ResourceArn: resp.BackupPlanArn, - }) + tags, err := keyvaluetags.BackupListTags(conn, d.Get("arn").(string)) if err != nil { - return fmt.Errorf("error listing tags AWS Backup plan %s: %s", d.Id(), err) + return fmt.Errorf("error listing tags for Backup Plan (%s): %s", d.Id(), err) } - - if err := d.Set("tags", tagsToMapGeneric(tagsOutput.Tags)); err != nil { - return fmt.Errorf("error setting tags on AWS Backup plan %s: %s", d.Id(), err) + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } - d.Set("arn", resp.BackupPlanArn) - d.Set("version", resp.VersionId) - return nil } func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - plan := &backup.PlanInput{ - BackupPlanName: aws.String(d.Get("name").(string)), - } - - rules := expandBackupPlanRules(d.Get("rule").(*schema.Set).List()) - - plan.Rules = rules - - input := &backup.UpdateBackupPlanInput{ - BackupPlanId: aws.String(d.Id()), - BackupPlan: plan, - } + if d.HasChange("rule") { + input := &backup.UpdateBackupPlanInput{ + BackupPlanId: aws.String(d.Id()), + BackupPlan: &backup.PlanInput{ + BackupPlanName: aws.String(d.Get("name").(string)), + Rules: expandBackupPlanRules(d.Get("rule").(*schema.Set)), + }, + } - _, err := conn.UpdateBackupPlan(input) - if err != nil { - return fmt.Errorf("error updating Backup Plan: %s", err) + log.Printf("[DEBUG] Updating Backup Plan: %#v", input) + _, err := conn.UpdateBackupPlan(input) + if err != nil { + return fmt.Errorf("error updating Backup Plan (%s): %s", d.Id(), err) + } } if d.HasChange("tags") { - resourceArn := d.Get("arn").(string) - oraw, nraw := d.GetChange("tags") - create, remove := diffTagsGeneric(oraw.(map[string]interface{}), nraw.(map[string]interface{})) - - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - keys := make([]*string, 0, len(remove)) - for k := range remove { - keys = append(keys, aws.String(k)) - } - - _, err := conn.UntagResource(&backup.UntagResourceInput{ - ResourceArn: aws.String(resourceArn), - TagKeyList: keys, - }) - if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] Backup Plan %s not found, removing from state", d.Id()) - d.SetId("") - return nil - } - if err != nil { - return fmt.Errorf("Error removing tags for (%s): %s", d.Id(), err) - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.TagResource(&backup.TagResourceInput{ - ResourceArn: aws.String(resourceArn), - Tags: create, - }) - if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] Backup Plan %s not found, removing from state", d.Id()) - d.SetId("") - return nil - } - if err != nil { - return fmt.Errorf("Error setting tags for (%s): %s", d.Id(), err) - } + o, n := d.GetChange("tags") + if err := keyvaluetags.BackupUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags for Backup Plan (%s): %s", d.Id(), err) } } @@ -248,66 +175,64 @@ func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error func resourceAwsBackupPlanDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - input := &backup.DeleteBackupPlanInput{ + log.Printf("[DEBUG] Deleting Backup Plan: %s", d.Id()) + _, err := conn.DeleteBackupPlan(&backup.DeleteBackupPlanInput{ BackupPlanId: aws.String(d.Id()), - } - - _, err := conn.DeleteBackupPlan(input) + }) if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { return nil } if err != nil { - return fmt.Errorf("error deleting Backup Plan: %s", err) + return fmt.Errorf("error deleting Backup Plan (%s): %s", d.Id(), err) } return nil } -func expandBackupPlanRules(l []interface{}) []*backup.RuleInput { +func expandBackupPlanRules(vRules *schema.Set) []*backup.RuleInput { rules := []*backup.RuleInput{} - for _, i := range l { - item := i.(map[string]interface{}) + for _, vRule := range vRules.List() { rule := &backup.RuleInput{} - if item["rule_name"] != "" { - rule.RuleName = aws.String(item["rule_name"].(string)) + mRule := vRule.(map[string]interface{}) + + if vRuleName, ok := mRule["rule_name"].(string); ok && vRuleName != "" { + rule.RuleName = aws.String(vRuleName) + } else { + continue } - if item["target_vault_name"] != "" { - rule.TargetBackupVaultName = aws.String(item["target_vault_name"].(string)) + if vTargetVaultName, ok := mRule["target_vault_name"].(string); ok && vTargetVaultName != "" { + rule.TargetBackupVaultName = aws.String(vTargetVaultName) } - if item["schedule"] != "" { - rule.ScheduleExpression = aws.String(item["schedule"].(string)) + if vSchedule, ok := mRule["schedule"].(string); ok && vSchedule != "" { + rule.ScheduleExpression = aws.String(vSchedule) } - if item["start_window"] != nil { - rule.StartWindowMinutes = aws.Int64(int64(item["start_window"].(int))) + if vStartWindow, ok := mRule["start_window"].(int); ok { + rule.StartWindowMinutes = aws.Int64(int64(vStartWindow)) } - if item["completion_window"] != nil { - rule.CompletionWindowMinutes = aws.Int64(int64(item["completion_window"].(int))) + if vCompletionWindow, ok := mRule["completion_window"].(int); ok { + rule.CompletionWindowMinutes = aws.Int64(int64(vCompletionWindow)) } - if item["recovery_point_tags"] != nil { - rule.RecoveryPointTags = tagsFromMapGeneric(item["recovery_point_tags"].(map[string]interface{})) + if vRecoveryPointTags, ok := mRule["recovery_point_tags"].(map[string]interface{}); ok && len(vRecoveryPointTags) > 0 { + rule.RecoveryPointTags = tagsFromMapGeneric(vRecoveryPointTags) } - var lifecycle map[string]interface{} - if i.(map[string]interface{})["lifecycle"] != nil { - lifecycleRaw := i.(map[string]interface{})["lifecycle"].([]interface{}) - if len(lifecycleRaw) == 1 { - lifecycle = lifecycleRaw[0].(map[string]interface{}) - lcValues := &backup.Lifecycle{} - - if v, ok := lifecycle["delete_after"]; ok && v.(int) > 0 { - lcValues.DeleteAfterDays = aws.Int64(int64(v.(int))) - } - - if v, ok := lifecycle["cold_storage_after"]; ok && v.(int) > 0 { - lcValues.MoveToColdStorageAfterDays = aws.Int64(int64(v.(int))) - } - rule.Lifecycle = lcValues + if vLifecycle, ok := mRule["lifecycle"].([]interface{}); ok && len(vLifecycle) > 0 && vLifecycle[0] != nil { + lifecycle := &backup.Lifecycle{} + + mLifecycle := vLifecycle[0].(map[string]interface{}) + + if vDeleteAfter, ok := mLifecycle["delete_after"].(int); ok && vDeleteAfter > 0 { + lifecycle.DeleteAfterDays = aws.Int64(int64(vDeleteAfter)) + } + if vColdStorageAfter, ok := mLifecycle["cold_storage_after"].(int); ok && vColdStorageAfter > 0 { + lifecycle.MoveToColdStorageAfterDays = aws.Int64(int64(vColdStorageAfter)) } + rule.Lifecycle = lifecycle } rules = append(rules, rule) @@ -316,46 +241,68 @@ func expandBackupPlanRules(l []interface{}) []*backup.RuleInput { return rules } -func resourceAwsPlanRuleHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v.(map[string]interface{})["lifecycle"] != nil { - lcRaw := v.(map[string]interface{})["lifecycle"].([]interface{}) - if len(lcRaw) == 1 { - l := lcRaw[0].(map[string]interface{}) - if w, ok := l["delete_after"]; ok { - buf.WriteString(fmt.Sprintf("%v-", w)) - } +func flattenBackupPlanRules(rules []*backup.Rule) *schema.Set { + vRules := []interface{}{} + + for _, rule := range rules { + mRule := map[string]interface{}{ + "rule_name": aws.StringValue(rule.RuleName), + "target_vault_name": aws.StringValue(rule.TargetBackupVaultName), + "schedule": aws.StringValue(rule.ScheduleExpression), + "start_window": int(aws.Int64Value(rule.StartWindowMinutes)), + "completion_window": int(aws.Int64Value(rule.CompletionWindowMinutes)), + "recovery_point_tags": tagsToMapGeneric(rule.RecoveryPointTags), + } - if w, ok := l["cold_storage_after"]; ok { - buf.WriteString(fmt.Sprintf("%v-", w)) + if lifecycle := rule.Lifecycle; lifecycle != nil { + mRule["lifecycle"] = []interface{}{ + map[string]interface{}{ + "delete_after": int(aws.Int64Value(lifecycle.DeleteAfterDays)), + "cold_storage_after": int(aws.Int64Value(lifecycle.MoveToColdStorageAfterDays)), + }, } } - } - if v, ok := m["completion_window"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(interface{}))) + vRules = append(vRules, mRule) } - if v, ok := m["recovery_point_tags"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v)) - } + return schema.NewSet(backupBackupPlanHash, vRules) +} - if v, ok := m["rule_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } +func backupBackupPlanHash(vRule interface{}) int { + var buf bytes.Buffer - if v, ok := m["schedule"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) + mRule := vRule.(map[string]interface{}) + + if v, ok := mRule["rule_name"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + if v, ok := mRule["target_vault_name"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + if v, ok := mRule["schedule"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + if v, ok := mRule["start_window"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } + if v, ok := mRule["completion_window"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) } - if v, ok := m["start_window"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(interface{}))) + if vRecoveryPointTags, ok := mRule["recovery_point_tags"].(map[string]interface{}); ok && len(vRecoveryPointTags) > 0 { + buf.WriteString(fmt.Sprintf("%d-", tagsMapToHash(vRecoveryPointTags))) } - if v, ok := m["target_vault_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) + if vLifecycle, ok := mRule["lifecycle"].([]interface{}); ok && len(vLifecycle) > 0 && vLifecycle[0] != nil { + mLifecycle := vLifecycle[0].(map[string]interface{}) + + if v, ok := mLifecycle["delete_after"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } + if v, ok := mLifecycle["cold_storage_after"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } } return hashcode.String(buf.String()) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_selection.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_selection.go index 6db13f2e524..255e019602d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_selection.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_selection.go @@ -4,11 +4,14 @@ import ( "fmt" "log" "regexp" + "strings" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsBackupSelection() *schema.Resource { @@ -16,6 +19,9 @@ func resourceAwsBackupSelection() *schema.Resource { Create: resourceAwsBackupSelectionCreate, Read: resourceAwsBackupSelectionRead, Delete: resourceAwsBackupSelectionDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsBackupSelectionImportState, + }, Schema: map[string]*schema.Schema{ "name": { @@ -47,6 +53,7 @@ func resourceAwsBackupSelection() *schema.Resource { "type": { Type: schema.TypeString, Required: true, + ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ backup.ConditionTypeStringequals, }, false), @@ -54,10 +61,12 @@ func resourceAwsBackupSelection() *schema.Resource { "key": { Type: schema.TypeString, Required: true, + ForceNew: true, }, "value": { Type: schema.TypeString, Required: true, + ForceNew: true, }, }, }, @@ -87,12 +96,34 @@ func resourceAwsBackupSelectionCreate(d *schema.ResourceData, meta interface{}) BackupSelection: selection, } - resp, err := conn.CreateBackupSelection(input) + // Retry for IAM eventual consistency + var output *backup.CreateBackupSelectionOutput + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + var err error + output, err = conn.CreateBackupSelection(input) + + // Retry on the following error: + // InvalidParameterValueException: IAM Role arn:aws:iam::123456789012:role/XXX cannot be assumed by AWS Backup + if isAWSErr(err, backup.ErrCodeInvalidParameterValueException, "cannot be assumed") { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + output, err = conn.CreateBackupSelection(input) + } + if err != nil { return fmt.Errorf("error creating Backup Selection: %s", err) } - d.SetId(*resp.SelectionId) + d.SetId(aws.StringValue(output.SelectionId)) return resourceAwsBackupSelectionRead(d, meta) } @@ -162,6 +193,21 @@ func resourceAwsBackupSelectionDelete(d *schema.ResourceData, meta interface{}) return nil } +func resourceAwsBackupSelectionImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + idParts := strings.Split(d.Id(), "|") + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return nil, fmt.Errorf("unexpected format of ID (%q), expected |", d.Id()) + } + + planID := idParts[0] + selectionID := idParts[1] + + d.Set("plan_id", planID) + d.SetId(selectionID) + + return []*schema.ResourceData{d}, nil +} + func expandBackupConditionTags(tagList []interface{}) []*backup.Condition { conditions := []*backup.Condition{} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_vault.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_vault.go index 9850f62ec5f..bb7f3ccf0b8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_vault.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_backup_vault.go @@ -5,11 +5,11 @@ import ( "log" "regexp" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsBackupVault() *schema.Resource { @@ -18,6 +18,9 @@ func resourceAwsBackupVault() *schema.Resource { Read: resourceAwsBackupVaultRead, Update: resourceAwsBackupVaultUpdate, Delete: resourceAwsBackupVaultDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -91,7 +94,7 @@ func resourceAwsBackupVaultRead(d *schema.ResourceData, meta interface{}) error if err != nil { return fmt.Errorf("error reading Backup Vault (%s): %s", d.Id(), err) } - + d.Set("name", resp.BackupVaultName) d.Set("kms_key_arn", resp.EncryptionKeyArn) d.Set("arn", resp.BackupVaultArn) d.Set("recovery_points", resp.NumberOfRecoveryPoints) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_compute_environment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_compute_environment.go index 20e63e7d659..a382d48ed6e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_compute_environment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_compute_environment.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/batch" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsBatchComputeEnvironment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_job_definition.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_job_definition.go index 755559501dc..f2b5d049fbc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_job_definition.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_job_definition.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/batch" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsBatchJobDefinition() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_job_queue.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_job_queue.go index e4787c7694c..65e4ab0d7bf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_job_queue.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_batch_job_queue.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/batch" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsBatchJobQueue() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_budgets_budget.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_budgets_budget.go index 83d5c515226..8298b7fb0f2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_budgets_budget.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_budgets_budget.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/budgets" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsBudgetsBudget() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloud9_environment_ec2.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloud9_environment_ec2.go index 9c4bdb24b85..17fce63e529 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloud9_environment_ec2.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloud9_environment_ec2.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloud9" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCloud9EnvironmentEc2() *schema.Resource { @@ -100,9 +100,12 @@ func resourceAwsCloud9EnvironmentEc2Create(d *schema.ResourceData, meta interfac } return nil }) + if isResourceTimeoutError(err) { + out, err = conn.CreateEnvironmentEC2(params) + } if err != nil { - return err + return fmt.Errorf("Error creating Cloud9 EC2 Environment: %s", err) } d.SetId(*out.EnvironmentId) @@ -204,10 +207,13 @@ func resourceAwsCloud9EnvironmentEc2Delete(d *schema.ResourceData, meta interfac if err != nil { return err } - err = resource.Retry(1*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeEnvironments(&cloud9.DescribeEnvironmentsInput{ - EnvironmentIds: []*string{aws.String(d.Id())}, - }) + + input := &cloud9.DescribeEnvironmentsInput{ + EnvironmentIds: []*string{aws.String(d.Id())}, + } + var out *cloud9.DescribeEnvironmentsOutput + err = resource.Retry(20*time.Minute, func() *resource.RetryError { // Deleting instances can take a long time + out, err = conn.DescribeEnvironments(input) if err != nil { if isAWSErr(err, cloud9.ErrCodeNotFoundException, "") { return nil @@ -223,6 +229,17 @@ func resourceAwsCloud9EnvironmentEc2Delete(d *schema.ResourceData, meta interfac } return resource.RetryableError(fmt.Errorf("Cloud9 EC2 Environment %q still exists", d.Id())) }) - - return err + if isResourceTimeoutError(err) { + out, err = conn.DescribeEnvironments(input) + if isAWSErr(err, cloud9.ErrCodeNotFoundException, "") { + return nil + } + if isAWSErr(err, "AccessDeniedException", "is not authorized to access this resource") { + return nil + } + } + if err != nil { + return fmt.Errorf("Error deleting Cloud9 EC2 Environment: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack.go index 50fb922575b..4da9ed3af24 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack.go @@ -9,10 +9,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsCloudFormationStack() *schema.Resource { @@ -102,10 +103,7 @@ func resourceAwsCloudFormationStack() *schema.Resource { Optional: true, ForceNew: true, }, - "tags": { - Type: schema.TypeMap, - Optional: true, - }, + "tags": tagsSchema(), "iam_role_arn": { Type: schema.TypeString, Optional: true, @@ -156,7 +154,7 @@ func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface input.StackPolicyURL = aws.String(v.(string)) } if v, ok := d.GetOk("tags"); ok { - input.Tags = expandCloudFormationTags(v.(map[string]interface{})) + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().CloudformationTags() } if v, ok := d.GetOk("timeout_in_minutes"); ok { m := int64(v.(int)) @@ -177,17 +175,17 @@ func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface wait := resource.StateChangeConf{ Pending: []string{ - "CREATE_IN_PROGRESS", - "DELETE_IN_PROGRESS", - "ROLLBACK_IN_PROGRESS", + cloudformation.StackStatusCreateInProgress, + cloudformation.StackStatusDeleteInProgress, + cloudformation.StackStatusRollbackInProgress, }, Target: []string{ - "CREATE_COMPLETE", - "CREATE_FAILED", - "DELETE_COMPLETE", - "DELETE_FAILED", - "ROLLBACK_COMPLETE", - "ROLLBACK_FAILED", + cloudformation.StackStatusCreateComplete, + cloudformation.StackStatusCreateFailed, + cloudformation.StackStatusDeleteComplete, + cloudformation.StackStatusDeleteFailed, + cloudformation.StackStatusRollbackComplete, + cloudformation.StackStatusRollbackFailed, }, Timeout: d.Timeout(schema.TimeoutCreate), MinTimeout: 1 * time.Second, @@ -226,7 +224,7 @@ func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface return err } - if lastStatus == "ROLLBACK_COMPLETE" || lastStatus == "ROLLBACK_FAILED" { + if lastStatus == cloudformation.StackStatusRollbackComplete || lastStatus == cloudformation.StackStatusRollbackFailed { reasons, err := getCloudFormationRollbackReasons(d.Id(), nil, conn) if err != nil { return fmt.Errorf("Failed getting rollback reasons: %q", err.Error()) @@ -234,7 +232,7 @@ func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface return fmt.Errorf("%s: %q", lastStatus, reasons) } - if lastStatus == "DELETE_COMPLETE" || lastStatus == "DELETE_FAILED" { + if lastStatus == cloudformation.StackStatusDeleteComplete || lastStatus == cloudformation.StackStatusDeleteFailed { reasons, err := getCloudFormationDeletionReasons(d.Id(), conn) if err != nil { return fmt.Errorf("Failed getting deletion reasons: %q", err.Error()) @@ -243,7 +241,7 @@ func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface d.SetId("") return fmt.Errorf("%s: %q", lastStatus, reasons) } - if lastStatus == "CREATE_FAILED" { + if lastStatus == cloudformation.StackStatusCreateFailed { reasons, err := getCloudFormationFailures(d.Id(), conn) if err != nil { return fmt.Errorf("Failed getting failure reasons: %q", err.Error()) @@ -282,7 +280,7 @@ func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{} return nil } for _, s := range stacks { - if *s.StackId == d.Id() && *s.StackStatus == "DELETE_COMPLETE" { + if *s.StackId == d.Id() && *s.StackStatus == cloudformation.StackStatusDeleteComplete { log.Printf("[DEBUG] Removing CloudFormation stack %s"+ " as it has been already deleted", d.Id()) d.SetId("") @@ -332,9 +330,8 @@ func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{} return err } - err = d.Set("tags", flattenCloudFormationTags(stack.Tags)) - if err != nil { - return err + if err := d.Set("tags", keyvaluetags.CloudformationKeyValueTags(stack.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } err = d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) @@ -386,7 +383,7 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface } if v, ok := d.GetOk("tags"); ok { - input.Tags = expandCloudFormationTags(v.(map[string]interface{})) + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().CloudformationTags() } if d.HasChange("policy_body") { @@ -427,16 +424,16 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface var stackId string wait := resource.StateChangeConf{ Pending: []string{ - "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", - "UPDATE_IN_PROGRESS", - "UPDATE_ROLLBACK_IN_PROGRESS", - "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + cloudformation.StackStatusUpdateCompleteCleanupInProgress, + cloudformation.StackStatusUpdateInProgress, + cloudformation.StackStatusUpdateRollbackInProgress, + cloudformation.StackStatusUpdateRollbackCompleteCleanupInProgress, }, Target: []string{ - "CREATE_COMPLETE", // If no stack update was performed - "UPDATE_COMPLETE", - "UPDATE_ROLLBACK_COMPLETE", - "UPDATE_ROLLBACK_FAILED", + cloudformation.StackStatusCreateComplete, + cloudformation.StackStatusUpdateComplete, + cloudformation.StackStatusUpdateRollbackComplete, + cloudformation.StackStatusUpdateRollbackFailed, }, Timeout: d.Timeout(schema.TimeoutUpdate), MinTimeout: 5 * time.Second, @@ -464,7 +461,7 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface return err } - if lastStatus == "UPDATE_ROLLBACK_COMPLETE" || lastStatus == "UPDATE_ROLLBACK_FAILED" { + if lastStatus == cloudformation.StackStatusUpdateRollbackComplete || lastStatus == cloudformation.StackStatusUpdateRollbackFailed { reasons, err := getCloudFormationRollbackReasons(stackId, lastUpdatedTime, conn) if err != nil { return fmt.Errorf("Failed getting details about rollback: %q", err.Error()) @@ -501,12 +498,12 @@ func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface var lastStatus string wait := resource.StateChangeConf{ Pending: []string{ - "DELETE_IN_PROGRESS", - "ROLLBACK_IN_PROGRESS", + cloudformation.StackStatusDeleteInProgress, + cloudformation.StackStatusRollbackInProgress, }, Target: []string{ - "DELETE_COMPLETE", - "DELETE_FAILED", + cloudformation.StackStatusDeleteComplete, + cloudformation.StackStatusDeleteFailed, }, Timeout: d.Timeout(schema.TimeoutDelete), MinTimeout: 5 * time.Second, @@ -525,14 +522,14 @@ func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface // ValidationError: Stack with id % does not exist if awsErr.Code() == "ValidationError" { - return resp, "DELETE_COMPLETE", nil + return resp, cloudformation.StackStatusDeleteComplete, nil } return nil, "", err } if len(resp.Stacks) == 0 { log.Printf("[DEBUG] CloudFormation stack %q is already gone", d.Id()) - return resp, "DELETE_COMPLETE", nil + return resp, cloudformation.StackStatusDeleteComplete, nil } status := *resp.Stacks[0].StackStatus @@ -548,7 +545,7 @@ func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface return err } - if lastStatus == "DELETE_FAILED" { + if lastStatus == cloudformation.StackStatusDeleteFailed { reasons, err := getCloudFormationFailures(d.Id(), conn) if err != nil { return fmt.Errorf("Failed getting reasons of failure: %q", err.Error()) @@ -648,3 +645,77 @@ func cfStackEventIsStackDeletion(event *cloudformation.StackEvent) bool { *event.ResourceType == "AWS::CloudFormation::Stack" && event.ResourceStatusReason != nil } + +func cfStackStateRefresh(conn *cloudformation.CloudFormation, stackId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(stackId), + }) + if err != nil { + return nil, "", fmt.Errorf("error describing CloudFormation stacks: %s", err) + } + + n := len(resp.Stacks) + switch n { + case 0: + return "", cloudformation.StackStatusDeleteComplete, nil + + case 1: + stack := resp.Stacks[0] + return stack, aws.StringValue(stack.StackStatus), nil + + default: + return nil, "", fmt.Errorf("found %d CloudFormation stacks for %s, expected 1", n, stackId) + } + } +} + +func waitForCloudFormationStackCreation(conn *cloudformation.CloudFormation, stackId string, timeout time.Duration) (string, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + cloudformation.StackStatusCreateInProgress, + cloudformation.StackStatusDeleteInProgress, + cloudformation.StackStatusRollbackInProgress, + }, + Target: []string{ + cloudformation.StackStatusCreateComplete, + cloudformation.StackStatusCreateFailed, + cloudformation.StackStatusDeleteComplete, + cloudformation.StackStatusDeleteFailed, + cloudformation.StackStatusRollbackComplete, + cloudformation.StackStatusRollbackFailed, + }, + Refresh: cfStackStateRefresh(conn, stackId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + } + + v, err := stateConf.WaitForState() + if err != nil { + return "", err + } + + return aws.StringValue(v.(*cloudformation.Stack).StackStatus), nil +} + +func waitForCloudFormationStackDeletion(conn *cloudformation.CloudFormation, stackId string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + cloudformation.StackStatusDeleteInProgress, + cloudformation.StackStatusRollbackInProgress, + }, + Target: []string{ + cloudformation.StackStatusDeleteComplete, + cloudformation.StackStatusDeleteFailed, + }, + Refresh: cfStackStateRefresh(conn, stackId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack_set.go index 5954853f796..0c10bff5b45 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack_set.go @@ -7,9 +7,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsCloudFormationStackSet() *schema.Resource { @@ -74,11 +75,7 @@ func resourceAwsCloudFormationStackSet() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, + "tags": tagsSchema(), "template_body": { Type: schema.TypeString, Optional: true, @@ -120,7 +117,7 @@ func resourceAwsCloudFormationStackSetCreate(d *schema.ResourceData, meta interf } if v, ok := d.GetOk("tags"); ok { - input.Tags = expandCloudFormationTags(v.(map[string]interface{})) + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().CloudformationTags() } if v, ok := d.GetOk("template_body"); ok { @@ -186,7 +183,7 @@ func resourceAwsCloudFormationStackSetRead(d *schema.ResourceData, meta interfac d.Set("stack_set_id", stackSet.StackSetId) - if err := d.Set("tags", flattenCloudFormationTags(stackSet.Tags)); err != nil { + if err := d.Set("tags", keyvaluetags.CloudformationKeyValueTags(stackSet.Tags).IgnoreAws().Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } @@ -220,7 +217,7 @@ func resourceAwsCloudFormationStackSetUpdate(d *schema.ResourceData, meta interf } if v, ok := d.GetOk("tags"); ok { - input.Tags = expandCloudFormationTags(v.(map[string]interface{})) + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().CloudformationTags() } if v, ok := d.GetOk("template_url"); ok { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack_set_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack_set_instance.go index 2406f008927..967e73f55f3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack_set_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack_set_instance.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCloudFormationStackSetInstance() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution.go index 65412b819dc..27ba1a5e203 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCloudFrontDistribution() *schema.Resource { @@ -1065,7 +1065,7 @@ func resourceAwsCloudFrontDistributionWaitUntilDeployed(id string, meta interfac Pending: []string{"InProgress"}, Target: []string{"Deployed"}, Refresh: resourceAwsCloudFrontWebDistributionStateRefreshFunc(id, meta), - Timeout: 70 * time.Minute, + Timeout: 90 * time.Minute, MinTimeout: 15 * time.Second, Delay: 1 * time.Minute, } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution_migrate.go index 4517930bc1d..8cc58dccb23 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsCloudFrontDistributionMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_origin_access_identity.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_origin_access_identity.go index f568bf5dc15..b99900ba055 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_origin_access_identity.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_origin_access_identity.go @@ -2,12 +2,12 @@ package aws import ( "fmt" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCloudFrontOriginAccessIdentity() *schema.Resource { @@ -125,7 +125,7 @@ func expandOriginAccessIdentityConfig(d *schema.ResourceData) *cloudfront.Origin } // This sets CallerReference if it's still pending computation (ie: new resource) if v, ok := d.GetOk("caller_reference"); !ok { - originAccessIdentityConfig.CallerReference = aws.String(time.Now().Format(time.RFC3339Nano)) + originAccessIdentityConfig.CallerReference = aws.String(resource.UniqueId()) } else { originAccessIdentityConfig.CallerReference = aws.String(v.(string)) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_public_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_public_key.go index c0e20bcb7f5..0763e270e02 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_public_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_public_key.go @@ -3,12 +3,11 @@ package aws import ( "fmt" "log" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCloudFrontPublicKey() *schema.Resource { @@ -163,7 +162,7 @@ func expandPublicKeyConfig(d *schema.ResourceData) *cloudfront.PublicKeyConfig { if v, ok := d.GetOk("caller_reference"); ok { publicKeyConfig.CallerReference = aws.String(v.(string)) } else { - publicKeyConfig.CallerReference = aws.String(time.Now().Format(time.RFC3339Nano)) + publicKeyConfig.CallerReference = aws.String(resource.UniqueId()) } return publicKeyConfig diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudhsm2_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudhsm2_cluster.go index be666fef0de..65dda087432 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudhsm2_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudhsm2_cluster.go @@ -2,15 +2,15 @@ package aws import ( "fmt" - "github.com/hashicorp/terraform/helper/validation" "log" "time" - "github.com/hashicorp/terraform/helper/schema" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudhsmv2" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsCloudHsm2Cluster() *schema.Resource { @@ -32,7 +32,6 @@ func resourceAwsCloudHsm2Cluster() *schema.Resource { Schema: map[string]*schema.Schema{ "source_backup_identifier": { Type: schema.TypeString, - Computed: false, Optional: true, ForceNew: true, }, @@ -177,6 +176,9 @@ func resourceAwsCloudHsm2ClusterCreate(d *schema.ResourceData, meta interface{}) } return nil }) + if isResourceTimeoutError(err) { + output, err = cloudhsm2.CreateCluster(input) + } if err != nil { return fmt.Errorf("error creating CloudHSMv2 Cluster: %s", err) @@ -210,16 +212,19 @@ func resourceAwsCloudHsm2ClusterCreate(d *schema.ResourceData, meta interface{}) } } - if err := setTagsAwsCloudHsm2Cluster(cloudhsm2, d); err != nil { - return err + if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { + if err := keyvaluetags.Cloudhsmv2UpdateTags(cloudhsm2, d.Id(), nil, v); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } } return resourceAwsCloudHsm2ClusterRead(d, meta) } func resourceAwsCloudHsm2ClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudhsmv2conn - cluster, err := describeCloudHsm2Cluster(meta.(*AWSClient).cloudhsmv2conn, d.Id()) + cluster, err := describeCloudHsm2Cluster(conn, d.Id()) if cluster == nil { log.Printf("[WARN] CloudHSMv2 Cluster (%s) not found", d.Id()) @@ -247,14 +252,27 @@ func resourceAwsCloudHsm2ClusterRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error saving Subnet IDs to state for CloudHSMv2 Cluster (%s): %s", d.Id(), err) } + tags, err := keyvaluetags.Cloudhsmv2ListTags(conn, d.Id()) + + if err != nil { + return fmt.Errorf("error listing tags for resource (%s): %s", d.Id(), err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } func resourceAwsCloudHsm2ClusterUpdate(d *schema.ResourceData, meta interface{}) error { - cloudhsm2 := meta.(*AWSClient).cloudhsmv2conn + conn := meta.(*AWSClient).cloudhsmv2conn - if err := setTagsAwsCloudHsm2Cluster(cloudhsm2, d); err != nil { - return err + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.Cloudhsmv2UpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } } return resourceAwsCloudHsm2ClusterRead(d, meta) @@ -262,13 +280,14 @@ func resourceAwsCloudHsm2ClusterUpdate(d *schema.ResourceData, meta interface{}) func resourceAwsCloudHsm2ClusterDelete(d *schema.ResourceData, meta interface{}) error { cloudhsm2 := meta.(*AWSClient).cloudhsmv2conn + input := &cloudhsmv2.DeleteClusterInput{ + ClusterId: aws.String(d.Id()), + } log.Printf("[DEBUG] CloudHSMv2 Delete cluster: %s", d.Id()) err := resource.Retry(180*time.Second, func() *resource.RetryError { var err error - _, err = cloudhsm2.DeleteCluster(&cloudhsmv2.DeleteClusterInput{ - ClusterId: aws.String(d.Id()), - }) + _, err = cloudhsm2.DeleteCluster(input) if err != nil { if isAWSErr(err, cloudhsmv2.ErrCodeCloudHsmInternalFailureException, "request was rejected because of an AWS CloudHSM internal failure") { log.Printf("[DEBUG] CloudHSMv2 Cluster re-try deleting %s", d.Id()) @@ -278,67 +297,16 @@ func resourceAwsCloudHsm2ClusterDelete(d *schema.ResourceData, meta interface{}) } return nil }) - - if err != nil { - return err + if isResourceTimeoutError(err) { + _, err = cloudhsm2.DeleteCluster(input) } - log.Println("[INFO] Waiting for CloudHSMv2 Cluster to be deleted") - stateConf := &resource.StateChangeConf{ - Pending: []string{cloudhsmv2.ClusterStateDeleteInProgress}, - Target: []string{cloudhsmv2.ClusterStateDeleted}, - Refresh: resourceAwsCloudHsm2ClusterRefreshFunc(cloudhsm2, d.Id()), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 30 * time.Second, - Delay: 30 * time.Second, - } - - // Wait, catching any errors - _, errWait := stateConf.WaitForState() - if errWait != nil { - return fmt.Errorf("Error waiting for CloudHSMv2 Cluster state to be \"DELETED\": %s", errWait) + if err != nil { + return fmt.Errorf("error deleting CloudHSMv2 Cluster (%s): %s", d.Id(), err) } - return nil -} - -func setTagsAwsCloudHsm2Cluster(conn *cloudhsmv2.CloudHSMV2, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - create, remove := diffTagsGeneric(oraw.(map[string]interface{}), nraw.(map[string]interface{})) - - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - keys := make([]*string, 0, len(remove)) - for k := range remove { - keys = append(keys, aws.String(k)) - } - - _, err := conn.UntagResource(&cloudhsmv2.UntagResourceInput{ - ResourceId: aws.String(d.Id()), - TagKeyList: keys, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - tagList := make([]*cloudhsmv2.Tag, 0, len(create)) - for k, v := range create { - tagList = append(tagList, &cloudhsmv2.Tag{ - Key: &k, - Value: v, - }) - } - _, err := conn.TagResource(&cloudhsmv2.TagResourceInput{ - ResourceId: aws.String(d.Id()), - TagList: tagList, - }) - if err != nil { - return err - } - } + if err := waitForCloudhsmv2ClusterDeletion(cloudhsm2, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error waiting for CloudHSMv2 Cluster (%s) deletion: %s", d.Id(), err) } return nil @@ -361,3 +329,18 @@ func readCloudHsm2ClusterCertificates(cluster *cloudhsmv2.Cluster) []map[string] } return []map[string]interface{}{} } + +func waitForCloudhsmv2ClusterDeletion(conn *cloudhsmv2.CloudHSMV2, id string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{cloudhsmv2.ClusterStateDeleteInProgress}, + Target: []string{cloudhsmv2.ClusterStateDeleted}, + Refresh: resourceAwsCloudHsm2ClusterRefreshFunc(conn, id), + Timeout: timeout, + MinTimeout: 30 * time.Second, + Delay: 30 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudhsm2_hsm.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudhsm2_hsm.go index 0bb93ff97ec..64ed1f59476 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudhsm2_hsm.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudhsm2_hsm.go @@ -5,11 +5,11 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudhsmv2" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" ) func resourceAwsCloudHsm2Hsm() *schema.Resource { @@ -100,17 +100,16 @@ func describeHsm(conn *cloudhsmv2.CloudHSMV2, hsmId string) (*cloudhsmv2.Hsm, er return hsm, nil } -func resourceAwsCloudHsm2HsmRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { +func resourceAwsCloudHsm2HsmRefreshFunc(conn *cloudhsmv2.CloudHSMV2, id string) resource.StateRefreshFunc { return func() (interface{}, string, error) { - hsm, err := describeHsm(meta.(*AWSClient).cloudhsmv2conn, d.Id()) + hsm, err := describeHsm(conn, id) if hsm == nil { return 42, "destroyed", nil } if hsm.State != nil { - log.Printf("[DEBUG] CloudHSMv2 Cluster status (%s): %s", d.Id(), *hsm.State) + log.Printf("[DEBUG] CloudHSMv2 Cluster status (%s): %s", id, *hsm.State) } return hsm, aws.StringValue(hsm.State), err @@ -153,7 +152,7 @@ func resourceAwsCloudHsm2HsmCreate(d *schema.ResourceData, meta interface{}) err var output *cloudhsmv2.CreateHsmOutput - errRetry := resource.Retry(180*time.Second, func() *resource.RetryError { + err = resource.Retry(180*time.Second, func() *resource.RetryError { var err error output, err = cloudhsm2.CreateHsm(input) if err != nil { @@ -165,28 +164,18 @@ func resourceAwsCloudHsm2HsmCreate(d *schema.ResourceData, meta interface{}) err } return nil }) + if isResourceTimeoutError(err) { + output, err = cloudhsm2.CreateHsm(input) + } - if errRetry != nil { - return fmt.Errorf("error creating CloudHSM v2 HSM module: %s", errRetry) + if err != nil { + return fmt.Errorf("error creating CloudHSM v2 HSM module: %s", err) } d.SetId(aws.StringValue(output.Hsm.HsmId)) - log.Printf("[INFO] CloudHSMv2 HSM Id: %s", d.Id()) - log.Println("[INFO] Waiting for CloudHSMv2 HSM to be available") - - stateConf := &resource.StateChangeConf{ - Pending: []string{cloudhsmv2.HsmStateCreateInProgress, "destroyed"}, - Target: []string{cloudhsmv2.HsmStateActive}, - Refresh: resourceAwsCloudHsm2HsmRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 30 * time.Second, - Delay: 30 * time.Second, - } - // Wait, catching any errors - _, errWait := stateConf.WaitForState() - if errWait != nil { - return fmt.Errorf("Error waiting for CloudHSMv2 HSM state to be \"ACTIVE\": %s", errWait) + if err := waitForCloudhsmv2HsmActive(cloudhsm2, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("error waiting for CloudHSMv2 HSM (%s) creation: %s", d.Id(), err) } return resourceAwsCloudHsm2HsmRead(d, meta) @@ -220,13 +209,13 @@ func resourceAwsCloudHsm2HsmDelete(d *schema.ResourceData, meta interface{}) err clusterId := d.Get("cluster_id").(string) log.Printf("[DEBUG] CloudHSMv2 HSM delete %s %s", clusterId, d.Id()) - - errRetry := resource.Retry(180*time.Second, func() *resource.RetryError { + input := &cloudhsmv2.DeleteHsmInput{ + ClusterId: aws.String(clusterId), + HsmId: aws.String(d.Id()), + } + err := resource.Retry(180*time.Second, func() *resource.RetryError { var err error - _, err = cloudhsm2.DeleteHsm(&cloudhsmv2.DeleteHsmInput{ - ClusterId: aws.String(clusterId), - HsmId: aws.String(d.Id()), - }) + _, err = cloudhsm2.DeleteHsm(input) if err != nil { if isAWSErr(err, cloudhsmv2.ErrCodeCloudHsmInternalFailureException, "request was rejected because of an AWS CloudHSM internal failure") { log.Printf("[DEBUG] CloudHSMv2 HSM re-try deleting %s", d.Id()) @@ -237,25 +226,46 @@ func resourceAwsCloudHsm2HsmDelete(d *schema.ResourceData, meta interface{}) err return nil }) - if errRetry != nil { - return fmt.Errorf("error deleting CloudHSM v2 HSM module (%s): %s", d.Id(), errRetry) + if isResourceTimeoutError(err) { + _, err = cloudhsm2.DeleteHsm(input) + } + if err != nil { + return fmt.Errorf("error deleting CloudHSM v2 HSM module (%s): %s", d.Id(), err) + } + + if err := waitForCloudhsmv2HsmDeletion(cloudhsm2, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error waiting for CloudHSMv2 HSM (%s) deletion: %s", d.Id(), err) } - log.Println("[INFO] Waiting for CloudHSMv2 HSM to be deleted") + return nil +} + +func waitForCloudhsmv2HsmActive(conn *cloudhsmv2.CloudHSMV2, id string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{cloudhsmv2.HsmStateCreateInProgress, "destroyed"}, + Target: []string{cloudhsmv2.HsmStateActive}, + Refresh: resourceAwsCloudHsm2HsmRefreshFunc(conn, id), + Timeout: timeout, + MinTimeout: 30 * time.Second, + Delay: 30 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} + +func waitForCloudhsmv2HsmDeletion(conn *cloudhsmv2.CloudHSMV2, id string, timeout time.Duration) error { stateConf := &resource.StateChangeConf{ Pending: []string{cloudhsmv2.HsmStateDeleteInProgress}, Target: []string{"destroyed"}, - Refresh: resourceAwsCloudHsm2HsmRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), + Refresh: resourceAwsCloudHsm2HsmRefreshFunc(conn, id), + Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 30 * time.Second, } - // Wait, catching any errors - _, errWait := stateConf.WaitForState() - if errWait != nil { - return fmt.Errorf("Error waiting for CloudHSMv2 HSM state to be \"DELETED\": %s", errWait) - } + _, err := stateConf.WaitForState() - return nil + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudtrail.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudtrail.go index 1b9896acd0b..2678cffe231 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudtrail.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudtrail.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudtrail" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCloudTrail() *schema.Resource { @@ -187,8 +187,11 @@ func resourceAwsCloudTrailCreate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + t, err = conn.CreateTrail(&input) + } if err != nil { - return err + return fmt.Errorf("Error creating CloudTrail: %s", err) } log.Printf("[DEBUG] CloudTrail created: %s", t) @@ -358,8 +361,11 @@ func resourceAwsCloudTrailUpdate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + t, err = conn.UpdateTrail(&input) + } if err != nil { - return err + return fmt.Errorf("Error updating CloudTrail: %s", err) } if d.HasChange("tags") { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_dashboard.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_dashboard.go index 2a60e595fd1..03b7c95df03 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_dashboard.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_dashboard.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCloudWatchDashboard() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_permission.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_permission.go index 0382636be53..a2bd90192ed 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_permission.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_permission.go @@ -11,9 +11,9 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awsutil" events "github.com/aws/aws-sdk-go/service/cloudwatchevents" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCloudWatchEventPermission() *schema.Resource { @@ -99,37 +99,35 @@ func resourceAwsCloudWatchEventPermissionCreate(d *schema.ResourceData, meta int func resourceAwsCloudWatchEventPermissionRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudwatcheventsconn input := events.DescribeEventBusInput{} - var policyDoc CloudWatchEventPermissionPolicyDoc + var output *events.DescribeEventBusOutput var policyStatement *CloudWatchEventPermissionPolicyStatement // Especially with concurrent PutPermission calls there can be a slight delay err := resource.Retry(1*time.Minute, func() *resource.RetryError { log.Printf("[DEBUG] Reading CloudWatch Events bus: %s", input) - debo, err := conn.DescribeEventBus(&input) + output, err := conn.DescribeEventBus(&input) if err != nil { return resource.NonRetryableError(fmt.Errorf("Reading CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error())) } - if debo.Policy == nil { - return resource.RetryableError(fmt.Errorf("CloudWatch Events permission %q not found", d.Id())) - } + policyStatement, err = getPolicyStatement(output, d.Id()) + return resource.RetryableError(err) + }) - err = json.Unmarshal([]byte(*debo.Policy), &policyDoc) - if err != nil { - return resource.NonRetryableError(fmt.Errorf("Reading CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error())) + if isResourceTimeoutError(err) { + output, err = conn.DescribeEventBus(&input) + if output != nil { + policyStatement, err = getPolicyStatement(output, d.Id()) } + } - policyStatement, err = findCloudWatchEventPermissionPolicyStatementByID(&policyDoc, d.Id()) - return resource.RetryableError(err) - }) + if isResourceNotFoundError(err) { + log.Printf("[WARN] %s", err) + d.SetId("") + return nil + } if err != nil { // Missing statement inside valid policy - if nfErr, ok := err.(*resource.NotFoundError); ok { - log.Printf("[WARN] %s", nfErr) - d.SetId("") - return nil - } - return err } @@ -146,7 +144,7 @@ func resourceAwsCloudWatchEventPermissionRead(d *schema.ResourceData, meta inter principalMap := policyStatement.Principal.(map[string]interface{}) policyARN, err := arn.Parse(principalMap["AWS"].(string)) if err != nil { - return fmt.Errorf("Reading CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error()) + return fmt.Errorf("Reading CloudWatch Events permission '%s' failed: %s", d.Id(), err) } d.Set("principal", policyARN.AccountID) } @@ -155,6 +153,25 @@ func resourceAwsCloudWatchEventPermissionRead(d *schema.ResourceData, meta inter return nil } +func getPolicyStatement(output *events.DescribeEventBusOutput, statementID string) (*CloudWatchEventPermissionPolicyStatement, error) { + var policyDoc CloudWatchEventPermissionPolicyDoc + + if output == nil || output.Policy == nil { + return nil, &resource.NotFoundError{ + Message: fmt.Sprintf("CloudWatch Events permission %q not found"+ + "in given results from DescribeEventBus", statementID), + LastResponse: output, + } + } + + err := json.Unmarshal([]byte(*output.Policy), &policyDoc) + if err != nil { + return nil, fmt.Errorf("Reading CloudWatch Events permission '%s' failed: %s", statementID, err) + } + + return findCloudWatchEventPermissionPolicyStatementByID(&policyDoc, statementID) +} + func resourceAwsCloudWatchEventPermissionUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudwatcheventsconn @@ -167,6 +184,11 @@ func resourceAwsCloudWatchEventPermissionUpdate(d *schema.ResourceData, meta int log.Printf("[DEBUG] Update CloudWatch Events permission: %s", input) _, err := conn.PutPermission(&input) + if isAWSErr(err, events.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] CloudWatch Events permission %q not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { return fmt.Errorf("Updating CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error()) } @@ -182,6 +204,9 @@ func resourceAwsCloudWatchEventPermissionDelete(d *schema.ResourceData, meta int log.Printf("[DEBUG] Delete CloudWatch Events permission: %s", input) _, err := conn.RemovePermission(&input) + if isAWSErr(err, events.ErrCodeResourceNotFoundException, "") { + return nil + } if err != nil { return fmt.Errorf("Deleting CloudWatch Events permission '%s' failed: %s", d.Id(), err.Error()) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_rule.go index 853830ed711..8fe12d87c77 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_rule.go @@ -3,16 +3,15 @@ package aws import ( "fmt" "log" - "regexp" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" events "github.com/aws/aws-sdk-go/service/cloudwatchevents" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCloudWatchEventRule() *schema.Resource { @@ -99,22 +98,23 @@ func resourceAwsCloudWatchEventRuleCreate(d *schema.ResourceData, meta interface // IAM Roles take some time to propagate var out *events.PutRuleOutput err = resource.Retry(30*time.Second, func() *resource.RetryError { - var err error out, err = conn.PutRule(input) - pattern := regexp.MustCompile(`cannot be assumed by principal '[a-z]+\.amazonaws\.com'\.$`) + + if isAWSErr(err, "ValidationException", "cannot be assumed by principal") { + log.Printf("[DEBUG] Retrying update of CloudWatch Event Rule %q", *input.Name) + return resource.RetryableError(err) + } if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ValidationException" && pattern.MatchString(awsErr.Message()) { - log.Printf("[DEBUG] Retrying creation of CloudWatch Event Rule %q", *input.Name) - return resource.RetryableError(err) - } - } return resource.NonRetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.PutRule(input) + } + if err != nil { - return fmt.Errorf("Creating CloudWatch Event Rule failed: %s", err) + return fmt.Errorf("Updating CloudWatch Event Rule failed: %s", err) } d.Set("arn", out.RuleArn) @@ -197,18 +197,20 @@ func resourceAwsCloudWatchEventRuleUpdate(d *schema.ResourceData, meta interface // IAM Roles take some time to propagate err = resource.Retry(30*time.Second, func() *resource.RetryError { _, err := conn.PutRule(input) - pattern := regexp.MustCompile(`cannot be assumed by principal '[a-z]+\.amazonaws\.com'\.$`) + + if isAWSErr(err, "ValidationException", "cannot be assumed by principal") { + log.Printf("[DEBUG] Retrying update of CloudWatch Event Rule %q", *input.Name) + return resource.RetryableError(err) + } if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ValidationException" && pattern.MatchString(awsErr.Message()) { - log.Printf("[DEBUG] Retrying update of CloudWatch Event Rule %q", *input.Name) - return resource.RetryableError(err) - } - } return resource.NonRetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.PutRule(input) + } + if err != nil { return fmt.Errorf("Updating CloudWatch Event Rule failed: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_target.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_target.go index 5dc979a5faa..72810571ae6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_target.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_target.go @@ -5,14 +5,15 @@ import ( "log" "math" "regexp" + "strings" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" events "github.com/aws/aws-sdk-go/service/cloudwatchevents" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCloudWatchEventTarget() *schema.Resource { @@ -22,6 +23,10 @@ func resourceAwsCloudWatchEventTarget() *schema.Resource { Update: resourceAwsCloudWatchEventTargetUpdate, Delete: resourceAwsCloudWatchEventTargetDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsCloudWatchEventTargetImport, + }, + Schema: map[string]*schema.Schema{ "rule": { Type: schema.TypeString, @@ -133,6 +138,7 @@ func resourceAwsCloudWatchEventTarget() *schema.Resource { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(1, math.MaxInt32), + Default: 1, }, "task_definition_arn": { Type: schema.TypeString, @@ -651,3 +657,19 @@ func flattenAwsCloudWatchInputTransformer(inputTransformer *events.InputTransfor result := []map[string]interface{}{config} return result } + +func resourceAwsCloudWatchEventTargetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + idParts := strings.SplitN(d.Id(), "/", 2) + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return nil, fmt.Errorf("unexpected format (%q), expected /", d.Id()) + } + + ruleName := idParts[0] + targetName := idParts[1] + + d.Set("target_id", targetName) + d.Set("rule", ruleName) + d.SetId(ruleName + "-" + targetName) + + return []*schema.ResourceData{d}, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination.go index e995cf62e16..2d8c3b50fc6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination.go @@ -2,14 +2,12 @@ package aws import ( "fmt" - "strings" "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCloudWatchLogDestination() *schema.Resource { @@ -64,28 +62,28 @@ func resourceAwsCloudWatchLogDestinationPut(d *schema.ResourceData, meta interfa TargetArn: aws.String(target_arn), } - return resource.Retry(3*time.Minute, func() *resource.RetryError { - resp, err := conn.PutDestination(params) + var resp *cloudwatchlogs.PutDestinationOutput + var err error + err = resource.Retry(3*time.Minute, func() *resource.RetryError { + resp, err = conn.PutDestination(params) - if err == nil { - d.SetId(name) - d.Set("arn", *resp.Destination.Arn) - } - - awsErr, ok := err.(awserr.Error) - if !ok { + if isAWSErr(err, cloudwatchlogs.ErrCodeInvalidParameterException, "Could not deliver test message to specified") { return resource.RetryableError(err) } - - if awsErr.Code() == "InvalidParameterException" { - if strings.Contains(awsErr.Message(), "Could not deliver test message to specified") { - return resource.RetryableError(err) - } + if err != nil { return resource.NonRetryableError(err) } - - return resource.NonRetryableError(err) + return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.PutDestination(params) + } + if err != nil { + return fmt.Errorf("Error putting cloudwatch log destination: %s", err) + } + d.SetId(name) + d.Set("arn", resp.Destination.Arn) + return nil } func resourceAwsCloudWatchLogDestinationRead(d *schema.ResourceData, meta interface{}) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination_policy.go index 0c77419c4bc..742030ba13c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination_policy.go @@ -3,7 +3,7 @@ package aws import ( "fmt" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_group.go index f8a7c863135..cf1808ab09a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_group.go @@ -4,12 +4,13 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsCloudWatchLogGroup() *schema.Resource { @@ -61,6 +62,7 @@ func resourceAwsCloudWatchLogGroup() *schema.Resource { func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudwatchlogsconn + tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().CloudwatchlogsTags() var logGroupName string if v, ok := d.GetOk("name"); ok { @@ -81,9 +83,13 @@ func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{ params.KmsKeyId = aws.String(v.(string)) } + if len(tags) > 0 { + params.Tags = tags + } + _, err := conn.CreateLogGroup(params) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceAlreadyExistsException" { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == cloudwatchlogs.ErrCodeResourceAlreadyExistsException { return fmt.Errorf("Creating CloudWatch Log Group failed: %s: The CloudWatch Log Group '%s' already exists.", err, d.Get("name").(string)) } return fmt.Errorf("Creating CloudWatch Log Group failed: %s '%s'", err, d.Get("name")) @@ -93,7 +99,20 @@ func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{ log.Println("[INFO] CloudWatch Log Group created") - return resourceAwsCloudWatchLogGroupUpdate(d, meta) + if v, ok := d.GetOk("retention_in_days"); ok { + input := cloudwatchlogs.PutRetentionPolicyInput{ + LogGroupName: aws.String(logGroupName), + RetentionInDays: aws.Int64(int64(v.(int))), + } + log.Printf("[DEBUG] Setting retention for CloudWatch Log Group: %q: %s", logGroupName, input) + _, err = conn.PutRetentionPolicy(&input) + + if err != nil { + return err + } + } + + return resourceAwsCloudWatchLogGroupRead(d, meta) } func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{}) error { @@ -117,17 +136,15 @@ func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{}) d.Set("kms_key_id", lg.KmsKeyId) d.Set("retention_in_days", lg.RetentionInDays) - tags := make(map[string]string) - tagsOutput, err := conn.ListTagsLogGroup(&cloudwatchlogs.ListTagsLogGroupInput{ - LogGroupName: aws.String(d.Id()), - }) + tags, err := keyvaluetags.CloudwatchlogsListTags(conn, d.Id()) + if err != nil { - return fmt.Errorf("error listing CloudWatch Logs Group %q tags: %s", d.Id(), err) + return fmt.Errorf("error listing tags for CloudWatch Logs Group (%s): %s", d.Id(), err) } - if tagsOutput != nil { - tags = aws.StringValueMap(tagsOutput.Tags) + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } - d.Set("tags", tags) return nil } @@ -182,31 +199,10 @@ func resourceAwsCloudWatchLogGroupUpdate(d *schema.ResourceData, meta interface{ } if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffCloudWatchTags(o, n) - - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags from %s", name) - _, err := conn.UntagLogGroup(&cloudwatchlogs.UntagLogGroupInput{ - LogGroupName: aws.String(name), - Tags: remove, - }) - if err != nil { - return err - } - } + o, n := d.GetChange("tags") - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags on %s", name) - _, err := conn.TagLogGroup(&cloudwatchlogs.TagLogGroupInput{ - LogGroupName: aws.String(name), - Tags: create, - }) - if err != nil { - return err - } + if err := keyvaluetags.CloudwatchlogsUpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating CloudWatch Log Group (%s) tags: %s", d.Id(), err) } } @@ -234,23 +230,6 @@ func resourceAwsCloudWatchLogGroupUpdate(d *schema.ResourceData, meta interface{ return resourceAwsCloudWatchLogGroupRead(d, meta) } -func diffCloudWatchTags(oldTags map[string]interface{}, newTags map[string]interface{}) (map[string]*string, []*string) { - create := make(map[string]*string) - for k, v := range newTags { - create[k] = aws.String(v.(string)) - } - - var remove []*string - for t := range oldTags { - _, ok := create[t] - if !ok { - remove = append(remove, aws.String(t)) - } - } - - return create, remove -} - func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudwatchlogsconn log.Printf("[INFO] Deleting CloudWatch Log Group: %s", d.Id()) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_metric_filter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_metric_filter.go index b9adb850d4b..3db85610fda 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_metric_filter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_metric_filter.go @@ -5,9 +5,9 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_resource_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_resource_policy.go index 7e7ceb8b5aa..0df52fb328d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_resource_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_resource_policy.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCloudWatchLogResourcePolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_stream.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_stream.go index 666f7ad58a6..eff6ab89f37 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_stream.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_stream.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCloudWatchLogStream() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_subscription_filter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_subscription_filter.go index fa6985d57d5..3b7db5f73f2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_subscription_filter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_subscription_filter.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCloudwatchLogSubscriptionFilter() *schema.Resource { @@ -54,6 +54,7 @@ func resourceAwsCloudwatchLogSubscriptionFilter() *schema.Resource { "distribution": { Type: schema.TypeString, Optional: true, + Default: cloudwatchlogs.DistributionByLogStream, }, }, } @@ -64,32 +65,32 @@ func resourceAwsCloudwatchLogSubscriptionFilterCreate(d *schema.ResourceData, me params := getAwsCloudWatchLogsSubscriptionFilterInput(d) log.Printf("[DEBUG] Creating SubscriptionFilter %#v", params) - return resource.Retry(5*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.PutSubscriptionFilter(¶ms) - if err == nil { - d.SetId(cloudwatchLogsSubscriptionFilterId(d.Get("log_group_name").(string))) - log.Printf("[DEBUG] Cloudwatch logs subscription %q created", d.Id()) + if isAWSErr(err, cloudwatchlogs.ErrCodeInvalidParameterException, "Could not deliver test message to specified") { + return resource.RetryableError(err) } - - awsErr, ok := err.(awserr.Error) - if !ok { + if isAWSErr(err, cloudwatchlogs.ErrCodeInvalidParameterException, "Could not execute the lambda function") { return resource.RetryableError(err) } - - if awsErr.Code() == "InvalidParameterException" { - log.Printf("[DEBUG] Caught message: %q, code: %q: Retrying", awsErr.Message(), awsErr.Code()) - if strings.Contains(awsErr.Message(), "Could not deliver test message to specified") { - return resource.RetryableError(err) - } - if strings.Contains(awsErr.Message(), "Could not execute the lambda function") { - return resource.RetryableError(err) - } - resource.NonRetryableError(err) + if err != nil { + return resource.NonRetryableError(err) } - - return resource.NonRetryableError(err) + return nil }) + + if isResourceTimeoutError(err) { + _, err = conn.PutSubscriptionFilter(¶ms) + } + + if err != nil { + return fmt.Errorf("Error creating Cloudwatch log subscription filter: %s", err) + } + + d.SetId(cloudwatchLogsSubscriptionFilterId(d.Get("log_group_name").(string))) + log.Printf("[DEBUG] Cloudwatch logs subscription %q created", d.Id()) + return nil } func resourceAwsCloudwatchLogSubscriptionFilterUpdate(d *schema.ResourceData, meta interface{}) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm.go index 8a53ad7aea7..72b5d2ded7a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm.go @@ -4,11 +4,11 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCloudWatchMetricAlarm() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm_migrate.go index 0ebd7f80d60..3fa194721fc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsCloudWatchMetricAlarmMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project.go index f0cbf27cb5f..c2f6ffb3d5e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project.go @@ -10,11 +10,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codebuild" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsCodeBuildProject() *schema.Resource { @@ -33,14 +34,24 @@ func resourceAwsCodeBuildProject() *schema.Resource { Computed: true, }, "artifacts": { - Type: schema.TypeSet, + Type: schema.TypeList, Required: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "artifact_identifier": { + Type: schema.TypeString, + Optional: true, + }, "name": { Type: schema.TypeString, Optional: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == d.Get("name") && new == "" { + return true + } + return false + }, }, "encryption_disabled": { Type: schema.TypeBool, @@ -54,6 +65,12 @@ func resourceAwsCodeBuildProject() *schema.Resource { "namespace_type": { Type: schema.TypeString, Optional: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if d.Get("artifacts.0.type") == codebuild.ArtifactsTypeS3 { + return old == codebuild.ArtifactNamespaceNone && new == "" + } + return false + }, ValidateFunc: validation.StringInSlice([]string{ codebuild.ArtifactNamespaceNone, codebuild.ArtifactNamespaceBuildId, @@ -62,6 +79,19 @@ func resourceAwsCodeBuildProject() *schema.Resource { "packaging": { Type: schema.TypeString, Optional: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + switch d.Get("artifacts.0.type") { + case codebuild.ArtifactsTypeCodepipeline: + return new == "" + case codebuild.ArtifactsTypeS3: + return old == codebuild.ArtifactPackagingNone && new == "" + } + return false + }, + ValidateFunc: validation.StringInSlice([]string{ + codebuild.ArtifactPackagingNone, + codebuild.ArtifactPackagingZip, + }, false), }, "path": { Type: schema.TypeString, @@ -76,9 +106,13 @@ func resourceAwsCodeBuildProject() *schema.Resource { codebuild.ArtifactsTypeNoArtifacts, }, false), }, + "override_artifact_name": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, }, }, - Set: resourceAwsCodeBuildProjectArtifactsHash, }, "cache": { Type: schema.TypeList, @@ -99,12 +133,18 @@ func resourceAwsCodeBuildProject() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{ codebuild.CacheTypeNoCache, codebuild.CacheTypeS3, + codebuild.CacheTypeLocal, }, false), }, "location": { Type: schema.TypeString, Optional: true, }, + "modes": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, }, }, @@ -191,10 +231,96 @@ func resourceAwsCodeBuildProject() *schema.Resource { Optional: true, ValidateFunc: validation.StringMatch(regexp.MustCompile(`\.(pem|zip)$`), "must end in .pem or .zip"), }, + "registry_credential": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "credential": { + Type: schema.TypeString, + Required: true, + }, + "credential_provider": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + codebuild.CredentialProviderTypeSecretsManager, + }, false), + }, + }, + }, + }, }, }, Set: resourceAwsCodeBuildProjectEnvironmentHash, }, + "logs_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloudwatch_logs": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Optional: true, + Default: codebuild.LogsConfigStatusTypeEnabled, + ValidateFunc: validation.StringInSlice([]string{ + codebuild.LogsConfigStatusTypeDisabled, + codebuild.LogsConfigStatusTypeEnabled, + }, false), + }, + "group_name": { + Type: schema.TypeString, + Optional: true, + }, + "stream_name": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + }, + "s3_logs": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeString, + Optional: true, + Default: codebuild.LogsConfigStatusTypeDisabled, + ValidateFunc: validation.StringInSlice([]string{ + codebuild.LogsConfigStatusTypeDisabled, + codebuild.LogsConfigStatusTypeEnabled, + }, false), + }, + "location": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAwsCodeBuildProjectS3LogsLocation, + }, + "encryption_disabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + }, + }, + }, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + }, "name": { Type: schema.TypeString, Required: true, @@ -227,10 +353,21 @@ func resourceAwsCodeBuildProject() *schema.Resource { codebuild.ArtifactNamespaceNone, codebuild.ArtifactNamespaceBuildId, }, false), + Default: codebuild.ArtifactNamespaceNone, + }, + "override_artifact_name": { + Type: schema.TypeBool, + Optional: true, + Default: false, }, "packaging": { Type: schema.TypeString, Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + codebuild.ArtifactPackagingNone, + codebuild.ArtifactPackagingZip, + }, false), + Default: codebuild.ArtifactPackagingNone, }, "path": { Type: schema.TypeString, @@ -244,9 +381,7 @@ func resourceAwsCodeBuildProject() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - codebuild.ArtifactsTypeCodepipeline, codebuild.ArtifactsTypeS3, - codebuild.ArtifactsTypeNoArtifacts, }, false), }, }, @@ -436,7 +571,7 @@ func resourceAwsCodeBuildProject() *schema.Resource { func(diff *schema.ResourceDiff, v interface{}) error { // Plan time validation for cache location cacheType, cacheTypeOk := diff.GetOk("cache.0.type") - if !cacheTypeOk || cacheType.(string) == codebuild.CacheTypeNoCache { + if !cacheTypeOk || cacheType.(string) == codebuild.CacheTypeNoCache || cacheType.(string) == codebuild.CacheTypeLocal { return nil } if v, ok := diff.GetOk("cache.0.location"); ok && v.(string) != "" { @@ -456,6 +591,7 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{}) projectArtifacts := expandProjectArtifacts(d) projectSecondaryArtifacts := expandProjectSecondaryArtifacts(d) projectSecondarySources := expandProjectSecondarySources(d) + projectLogsConfig := expandProjectLogsConfig(d) if aws.StringValue(projectSource.Type) == codebuild.SourceTypeNoSource { if aws.StringValue(projectSource.Buildspec) == "" { @@ -474,6 +610,8 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{}) Artifacts: &projectArtifacts, SecondaryArtifacts: projectSecondaryArtifacts, SecondarySources: projectSecondarySources, + LogsConfig: projectLogsConfig, + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().CodebuildTags(), } if v, ok := d.GetOk("cache"); ok { @@ -504,10 +642,6 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{}) params.BadgeEnabled = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("tags"); ok { - params.Tags = tagsFromMapCodeBuild(v.(map[string]interface{})) - } - var resp *codebuild.CreateProjectOutput // Handle IAM eventual consistency err := resource.Retry(5*time.Minute, func() *resource.RetryError { @@ -517,7 +651,7 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{}) if err != nil { // InvalidInputException: CodeBuild is not authorized to perform // InvalidInputException: Not authorized to perform DescribeSecurityGroups - if isAWSErr(err, "InvalidInputException", "ot authorized to perform") { + if isAWSErr(err, codebuild.ErrCodeInvalidInputException, "ot authorized to perform") { return resource.RetryableError(err) } @@ -525,9 +659,11 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{}) } return nil - }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateProject(params) + } if err != nil { return fmt.Errorf("Error creating CodeBuild project: %s", err) } @@ -555,7 +691,7 @@ func expandProjectSecondaryArtifacts(d *schema.ResourceData) []*codebuild.Projec } func expandProjectArtifacts(d *schema.ResourceData) codebuild.ProjectArtifacts { - configs := d.Get("artifacts").(*schema.Set).List() + configs := d.Get("artifacts").([]interface{}) data := configs[0].(map[string]interface{}) return expandProjectArtifactData(data) @@ -590,6 +726,10 @@ func expandProjectArtifactData(data map[string]interface{}) codebuild.ProjectArt projectArtifacts.NamespaceType = aws.String(data["namespace_type"].(string)) } + if v, ok := data["override_artifact_name"]; ok { + projectArtifacts.OverrideArtifactName = aws.Bool(v.(bool)) + } + if data["packaging"].(string) != "" { projectArtifacts.Packaging = aws.String(data["packaging"].(string)) } @@ -614,6 +754,13 @@ func expandProjectCache(s []interface{}) *codebuild.ProjectCache { projectCache.Location = aws.String(v.(string)) } + if cacheType := data["type"]; cacheType == codebuild.CacheTypeLocal { + if modes, modesOk := data["modes"]; modesOk { + modesStrings := modes.([]interface{}) + projectCache.Modes = expandStringList(modesStrings) + } + } + return projectCache } @@ -646,6 +793,22 @@ func expandProjectEnvironment(d *schema.ResourceData) *codebuild.ProjectEnvironm projectEnv.ImagePullCredentialsType = aws.String(v.(string)) } + if v, ok := envConfig["registry_credential"]; ok && len(v.([]interface{})) > 0 { + config := v.([]interface{})[0].(map[string]interface{}) + + projectRegistryCredential := &codebuild.RegistryCredential{} + + if v, ok := config["credential"]; ok && v.(string) != "" { + projectRegistryCredential.Credential = aws.String(v.(string)) + } + + if v, ok := config["credential_provider"]; ok && v.(string) != "" { + projectRegistryCredential.CredentialProvider = aws.String(v.(string)) + } + + projectEnv.RegistryCredential = projectRegistryCredential + } + if v := envConfig["environment_variable"]; v != nil { envVariables := v.([]interface{}) if len(envVariables) > 0 { @@ -678,6 +841,92 @@ func expandProjectEnvironment(d *schema.ResourceData) *codebuild.ProjectEnvironm return projectEnv } +func expandProjectLogsConfig(d *schema.ResourceData) *codebuild.LogsConfig { + logsConfig := &codebuild.LogsConfig{} + + if v, ok := d.GetOk("logs_config"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + configList := v.([]interface{}) + data := configList[0].(map[string]interface{}) + + if v, ok := data["cloudwatch_logs"]; ok { + logsConfig.CloudWatchLogs = expandCodeBuildCloudWatchLogsConfig(v.([]interface{})) + } + + if v, ok := data["s3_logs"]; ok { + logsConfig.S3Logs = expandCodeBuildS3LogsConfig(v.([]interface{})) + } + } + + if logsConfig.CloudWatchLogs == nil { + logsConfig.CloudWatchLogs = &codebuild.CloudWatchLogsConfig{ + Status: aws.String(codebuild.LogsConfigStatusTypeEnabled), + } + } + + if logsConfig.S3Logs == nil { + logsConfig.S3Logs = &codebuild.S3LogsConfig{ + Status: aws.String(codebuild.LogsConfigStatusTypeDisabled), + } + } + + return logsConfig +} + +func expandCodeBuildCloudWatchLogsConfig(configList []interface{}) *codebuild.CloudWatchLogsConfig { + if len(configList) == 0 || configList[0] == nil { + return nil + } + + data := configList[0].(map[string]interface{}) + + status := data["status"].(string) + + cloudWatchLogsConfig := &codebuild.CloudWatchLogsConfig{ + Status: aws.String(status), + } + + if v, ok := data["group_name"]; ok { + groupName := v.(string) + if len(groupName) > 0 { + cloudWatchLogsConfig.GroupName = aws.String(groupName) + } + } + + if v, ok := data["stream_name"]; ok { + streamName := v.(string) + if len(streamName) > 0 { + cloudWatchLogsConfig.StreamName = aws.String(streamName) + } + } + + return cloudWatchLogsConfig +} + +func expandCodeBuildS3LogsConfig(configList []interface{}) *codebuild.S3LogsConfig { + if len(configList) == 0 || configList[0] == nil { + return nil + } + + data := configList[0].(map[string]interface{}) + + status := data["status"].(string) + + s3LogsConfig := &codebuild.S3LogsConfig{ + Status: aws.String(status), + } + + if v, ok := data["location"]; ok { + location := v.(string) + if len(location) > 0 { + s3LogsConfig.Location = aws.String(location) + } + } + + s3LogsConfig.EncryptionDisabled = aws.Bool(data["encryption_disabled"].(bool)) + + return s3LogsConfig +} + func expandCodeBuildVpcConfig(rawVpcConfig []interface{}) *codebuild.VpcConfig { vpcConfig := codebuild.VpcConfig{} if len(rawVpcConfig) == 0 || rawVpcConfig[0] == nil { @@ -777,31 +1026,35 @@ func resourceAwsCodeBuildProjectRead(d *schema.ResourceData, meta interface{}) e project := resp.Projects[0] if err := d.Set("artifacts", flattenAwsCodeBuildProjectArtifacts(project.Artifacts)); err != nil { - return err + return fmt.Errorf("error setting artifacts: %s", err) } if err := d.Set("environment", schema.NewSet(resourceAwsCodeBuildProjectEnvironmentHash, flattenAwsCodeBuildProjectEnvironment(project.Environment))); err != nil { - return err + return fmt.Errorf("error setting environment: %s", err) } if err := d.Set("cache", flattenAwsCodebuildProjectCache(project.Cache)); err != nil { - return err + return fmt.Errorf("error setting cache: %s", err) + } + + if err := d.Set("logs_config", flattenAwsCodeBuildLogsConfig(project.LogsConfig)); err != nil { + return fmt.Errorf("error setting logs_config: %s", err) } if err := d.Set("secondary_artifacts", flattenAwsCodeBuildProjectSecondaryArtifacts(project.SecondaryArtifacts)); err != nil { - return err + return fmt.Errorf("error setting secondary_artifacts: %s", err) } if err := d.Set("secondary_sources", flattenAwsCodeBuildProjectSecondarySources(project.SecondarySources)); err != nil { - return err + return fmt.Errorf("error setting secondary_sources: %s", err) } if err := d.Set("source", flattenAwsCodeBuildProjectSource(project.Source)); err != nil { - return err + return fmt.Errorf("error setting source: %s", err) } if err := d.Set("vpc_config", flattenAwsCodeBuildVpcConfig(project.VpcConfig)); err != nil { - return err + return fmt.Errorf("error setting vpc_config: %s", err) } d.Set("arn", project.Arn) @@ -818,8 +1071,8 @@ func resourceAwsCodeBuildProjectRead(d *schema.ResourceData, meta interface{}) e d.Set("badge_url", "") } - if err := d.Set("tags", tagsToMapCodeBuild(project.Tags)); err != nil { - return err + if err := d.Set("tags", keyvaluetags.CodebuildKeyValueTags(project.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } return nil @@ -861,6 +1114,11 @@ func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{}) params.VpcConfig = expandCodeBuildVpcConfig(d.Get("vpc_config").([]interface{})) } + if d.HasChange("logs_config") { + logsConfig := expandProjectLogsConfig(d) + params.LogsConfig = logsConfig + } + if d.HasChange("cache") { if v, ok := d.GetOk("cache"); ok { params.Cache = expandProjectCache(v.([]interface{})) @@ -893,7 +1151,7 @@ func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{}) // The documentation clearly says "The replacement set of tags for this build project." // But its a slice of pointers so if not set for every update, they get removed. - params.Tags = tagsFromMapCodeBuild(d.Get("tags").(map[string]interface{})) + params.Tags = keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().CodebuildTags() // Handle IAM eventual consistency err := resource.Retry(1*time.Minute, func() *resource.RetryError { @@ -903,7 +1161,7 @@ func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{}) if err != nil { // InvalidInputException: CodeBuild is not authorized to perform // InvalidInputException: Not authorized to perform DescribeSecurityGroups - if isAWSErr(err, "InvalidInputException", "ot authorized to perform") { + if isAWSErr(err, codebuild.ErrCodeInvalidInputException, "ot authorized to perform") { return resource.RetryableError(err) } @@ -911,9 +1169,11 @@ func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{}) } return nil - }) + if isResourceTimeoutError(err) { + _, err = conn.UpdateProject(params) + } if err != nil { return fmt.Errorf( "[ERROR] Error updating CodeBuild project (%s): %s", @@ -932,30 +1192,67 @@ func resourceAwsCodeBuildProjectDelete(d *schema.ResourceData, meta interface{}) return err } -func flattenAwsCodeBuildProjectSecondaryArtifacts(artifactsList []*codebuild.ProjectArtifacts) *schema.Set { - artifactSet := schema.Set{ - F: resourceAwsCodeBuildProjectArtifactsHash, +func flattenAwsCodeBuildLogsConfig(logsConfig *codebuild.LogsConfig) []interface{} { + if logsConfig == nil { + return []interface{}{} } - for _, artifacts := range artifactsList { - artifactSet.Add(flattenAwsCodeBuildProjectArtifactsData(*artifacts)) + values := map[string]interface{}{} + + if v := logsConfig.CloudWatchLogs; v != nil { + values["cloudwatch_logs"] = flattenAwsCodeBuildCloudWatchLogs(v) } - return &artifactSet + + if v := logsConfig.S3Logs; v != nil { + values["s3_logs"] = flattenAwsCodeBuildS3Logs(v) + } + + return []interface{}{values} } -func flattenAwsCodeBuildProjectArtifacts(artifacts *codebuild.ProjectArtifacts) *schema.Set { +func flattenAwsCodeBuildCloudWatchLogs(cloudWatchLogsConfig *codebuild.CloudWatchLogsConfig) []interface{} { + values := map[string]interface{}{} - artifactSet := schema.Set{ - F: resourceAwsCodeBuildProjectArtifactsHash, + if cloudWatchLogsConfig == nil { + values["status"] = codebuild.LogsConfigStatusTypeDisabled + } else { + values["status"] = aws.StringValue(cloudWatchLogsConfig.Status) + values["group_name"] = aws.StringValue(cloudWatchLogsConfig.GroupName) + values["stream_name"] = aws.StringValue(cloudWatchLogsConfig.StreamName) } - values := flattenAwsCodeBuildProjectArtifactsData(*artifacts) + return []interface{}{values} +} - artifactSet.Add(values) +func flattenAwsCodeBuildS3Logs(s3LogsConfig *codebuild.S3LogsConfig) []interface{} { + values := map[string]interface{}{} + if s3LogsConfig == nil { + values["status"] = codebuild.LogsConfigStatusTypeDisabled + } else { + values["status"] = aws.StringValue(s3LogsConfig.Status) + values["location"] = aws.StringValue(s3LogsConfig.Location) + values["encryption_disabled"] = aws.BoolValue(s3LogsConfig.EncryptionDisabled) + } + + return []interface{}{values} +} + +func flattenAwsCodeBuildProjectSecondaryArtifacts(artifactsList []*codebuild.ProjectArtifacts) *schema.Set { + artifactSet := schema.Set{ + F: resourceAwsCodeBuildProjectArtifactsHash, + } + + for _, artifacts := range artifactsList { + artifactSet.Add(flattenAwsCodeBuildProjectArtifactsData(*artifacts)) + } return &artifactSet } +func flattenAwsCodeBuildProjectArtifacts(artifacts *codebuild.ProjectArtifacts) []interface{} { + return []interface{}{flattenAwsCodeBuildProjectArtifactsData(*artifacts)} +} + func flattenAwsCodeBuildProjectArtifactsData(artifacts codebuild.ProjectArtifacts) map[string]interface{} { values := map[string]interface{}{} @@ -968,6 +1265,11 @@ func flattenAwsCodeBuildProjectArtifactsData(artifacts codebuild.ProjectArtifact if artifacts.EncryptionDisabled != nil { values["encryption_disabled"] = *artifacts.EncryptionDisabled } + + if artifacts.OverrideArtifactName != nil { + values["override_artifact_name"] = *artifacts.OverrideArtifactName + } + if artifacts.Location != nil { values["location"] = *artifacts.Location } @@ -998,6 +1300,7 @@ func flattenAwsCodebuildProjectCache(cache *codebuild.ProjectCache) []interface{ values := map[string]interface{}{ "location": aws.StringValue(cache.Location), "type": aws.StringValue(cache.Type), + "modes": aws.StringValueSlice(cache.Modes), } return []interface{}{values} @@ -1013,12 +1316,26 @@ func flattenAwsCodeBuildProjectEnvironment(environment *codebuild.ProjectEnviron envConfig["privileged_mode"] = *environment.PrivilegedMode envConfig["image_pull_credentials_type"] = *environment.ImagePullCredentialsType + envConfig["registry_credential"] = flattenAwsCodebuildRegistryCredential(environment.RegistryCredential) + if environment.EnvironmentVariables != nil { envConfig["environment_variable"] = environmentVariablesToMap(environment.EnvironmentVariables) } return []interface{}{envConfig} +} + +func flattenAwsCodebuildRegistryCredential(registryCredential *codebuild.RegistryCredential) []interface{} { + if registryCredential == nil { + return []interface{}{} + } + + values := map[string]interface{}{ + "credential": aws.StringValue(registryCredential.Credential), + "credential_provider": aws.StringValue(registryCredential.CredentialProvider), + } + return []interface{}{values} } func flattenAwsCodeBuildProjectSecondarySources(sourceList []*codebuild.ProjectSource) []interface{} { @@ -1077,11 +1394,38 @@ func resourceAwsCodeBuildProjectArtifactsHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) - if v, ok := m["artifact_identifier"]; ok { - buf.WriteString(fmt.Sprintf("%s:", v.(string))) + buf.WriteString(fmt.Sprintf("%s-", v.(string))) } + + if v, ok := m["encryption_disabled"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + + if v, ok := m["location"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["namespace_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["override_artifact_name"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + + if v, ok := m["packaging"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["path"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + return hashcode.String(buf.String()) } @@ -1103,6 +1447,17 @@ func resourceAwsCodeBuildProjectEnvironmentHash(v interface{}) int { if v, ok := m["certificate"]; ok && v.(string) != "" { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } + if v, ok := m["registry_credential"]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + m := v.([]interface{})[0].(map[string]interface{}) + + if v, ok := m["credential"]; ok && v.(string) != "" { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["credential_provider"]; ok && v.(string) != "" { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + } for _, e := range environmentVariables { if e != nil { // Old statefiles might have nil values in them ev := e.(map[string]interface{}) @@ -1207,3 +1562,21 @@ func validateAwsCodeBuildProjectName(v interface{}, k string) (ws []string, erro return } + +func validateAwsCodeBuildProjectS3LogsLocation(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if _, errs := validateArn(v, k); len(errs) == 0 { + errors = append(errors, errs...) + return + } + + simplePattern := `^[a-z0-9][^/]*\/(.+)$` + if !regexp.MustCompile(simplePattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q does not match pattern (%q): %q", + k, simplePattern, value)) + } + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_source_credential.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_source_credential.go new file mode 100644 index 00000000000..624ad886b62 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_source_credential.go @@ -0,0 +1,132 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codebuild" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsCodeBuildSourceCredential() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeBuildSourceCredentialCreate, + Read: resourceAwsCodeBuildSourceCredentialRead, + Delete: resourceAwsCodeBuildSourceCredentialDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "auth_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + codebuild.AuthTypeBasicAuth, + codebuild.AuthTypePersonalAccessToken, + }, false), + }, + "server_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + codebuild.ServerTypeGithub, + codebuild.ServerTypeBitbucket, + codebuild.ServerTypeGithubEnterprise, + }, false), + }, + "token": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + }, + "user_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsCodeBuildSourceCredentialCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codebuildconn + + authType := d.Get("auth_type").(string) + + createOpts := &codebuild.ImportSourceCredentialsInput{ + AuthType: aws.String(authType), + ServerType: aws.String(d.Get("server_type").(string)), + Token: aws.String(d.Get("token").(string)), + } + + if attr, ok := d.GetOk("user_name"); ok && authType == codebuild.AuthTypeBasicAuth { + createOpts.Username = aws.String(attr.(string)) + } + + resp, err := conn.ImportSourceCredentials(createOpts) + if err != nil { + return fmt.Errorf("Error importing source credentials: %s", err) + } + + d.SetId(aws.StringValue(resp.Arn)) + + return resourceAwsCodeBuildSourceCredentialRead(d, meta) +} + +func resourceAwsCodeBuildSourceCredentialRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codebuildconn + + resp, err := conn.ListSourceCredentials(&codebuild.ListSourceCredentialsInput{}) + if err != nil { + return fmt.Errorf("Error List CodeBuild Source Credential: %s", err) + } + + var info *codebuild.SourceCredentialsInfo + + for _, sourceCredentialsInfo := range resp.SourceCredentialsInfos { + if d.Id() == aws.StringValue(sourceCredentialsInfo.Arn) { + info = sourceCredentialsInfo + break + } + } + + if info == nil { + log.Printf("[WARN] CodeBuild Source Credential (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("arn", info.Arn) + d.Set("auth_type", info.AuthType) + d.Set("server_type", info.ServerType) + + return nil +} + +func resourceAwsCodeBuildSourceCredentialDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codebuildconn + + deleteOpts := &codebuild.DeleteSourceCredentialsInput{ + Arn: aws.String(d.Id()), + } + + if _, err := conn.DeleteSourceCredentials(deleteOpts); err != nil { + if isAWSErr(err, codebuild.ErrCodeResourceNotFoundException, "") { + return nil + } + return fmt.Errorf("Error deleting Source Credentials(%s): %s", d.Id(), err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_webhook.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_webhook.go index ba81ff3214f..eba37fac8aa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_webhook.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_webhook.go @@ -1,12 +1,15 @@ package aws import ( + "bytes" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codebuild" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCodeBuildWebhook() *schema.Resource { @@ -27,8 +30,47 @@ func resourceAwsCodeBuildWebhook() *schema.Resource { ForceNew: true, }, "branch_filter": { - Type: schema.TypeString, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"filter_group"}, + }, + "filter_group": { + Type: schema.TypeSet, Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + codebuild.WebhookFilterTypeEvent, + codebuild.WebhookFilterTypeActorAccountId, + codebuild.WebhookFilterTypeBaseRef, + codebuild.WebhookFilterTypeFilePath, + codebuild.WebhookFilterTypeHeadRef, + }, false), + }, + "exclude_matched_pattern": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "pattern": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + Set: resourceAwsCodeBuildWebhookFilterHash, + ConflictsWith: []string{"branch_filter"}, }, "payload_url": { Type: schema.TypeString, @@ -51,7 +93,8 @@ func resourceAwsCodeBuildWebhookCreate(d *schema.ResourceData, meta interface{}) conn := meta.(*AWSClient).codebuildconn input := &codebuild.CreateWebhookInput{ - ProjectName: aws.String(d.Get("project_name").(string)), + ProjectName: aws.String(d.Get("project_name").(string)), + FilterGroups: expandWebhookFilterGroups(d), } // The CodeBuild API requires this to be non-empty if defined @@ -73,6 +116,42 @@ func resourceAwsCodeBuildWebhookCreate(d *schema.ResourceData, meta interface{}) return resourceAwsCodeBuildWebhookRead(d, meta) } +func expandWebhookFilterGroups(d *schema.ResourceData) [][]*codebuild.WebhookFilter { + configs := d.Get("filter_group").(*schema.Set).List() + + webhookFilters := make([][]*codebuild.WebhookFilter, 0) + + if len(configs) == 0 { + return nil + } + + for _, config := range configs { + filters := expandWebhookFilterData(config.(map[string]interface{})) + webhookFilters = append(webhookFilters, filters) + } + + return webhookFilters +} + +func expandWebhookFilterData(data map[string]interface{}) []*codebuild.WebhookFilter { + filters := make([]*codebuild.WebhookFilter, 0) + + filterConfigs := data["filter"].([]interface{}) + + for i, filterConfig := range filterConfigs { + filter := filterConfig.(map[string]interface{}) + filters = append(filters, &codebuild.WebhookFilter{ + Type: aws.String(filter["type"].(string)), + ExcludeMatchedPattern: aws.Bool(filter["exclude_matched_pattern"].(bool)), + }) + if v := filter["pattern"]; v != nil { + filters[i].Pattern = aws.String(v.(string)) + } + } + + return filters +} + func resourceAwsCodeBuildWebhookRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).codebuildconn @@ -101,6 +180,7 @@ func resourceAwsCodeBuildWebhookRead(d *schema.ResourceData, meta interface{}) e } d.Set("branch_filter", project.Webhook.BranchFilter) + d.Set("filter_group", flattenAwsCodeBuildWebhookFilterGroups(project.Webhook.FilterGroups)) d.Set("payload_url", project.Webhook.PayloadUrl) d.Set("project_name", project.Name) d.Set("url", project.Webhook.Url) @@ -112,11 +192,22 @@ func resourceAwsCodeBuildWebhookRead(d *schema.ResourceData, meta interface{}) e func resourceAwsCodeBuildWebhookUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).codebuildconn - _, err := conn.UpdateWebhook(&codebuild.UpdateWebhookInput{ - ProjectName: aws.String(d.Id()), - BranchFilter: aws.String(d.Get("branch_filter").(string)), - RotateSecret: aws.Bool(false), - }) + var err error + filterGroups := expandWebhookFilterGroups(d) + + if len(filterGroups) >= 1 { + _, err = conn.UpdateWebhook(&codebuild.UpdateWebhookInput{ + ProjectName: aws.String(d.Id()), + FilterGroups: filterGroups, + RotateSecret: aws.Bool(false), + }) + } else { + _, err = conn.UpdateWebhook(&codebuild.UpdateWebhookInput{ + ProjectName: aws.String(d.Id()), + BranchFilter: aws.String(d.Get("branch_filter").(string)), + RotateSecret: aws.Bool(false), + }) + } if err != nil { return err @@ -141,3 +232,47 @@ func resourceAwsCodeBuildWebhookDelete(d *schema.ResourceData, meta interface{}) return nil } + +func flattenAwsCodeBuildWebhookFilterGroups(filterList [][]*codebuild.WebhookFilter) *schema.Set { + filterSet := schema.Set{ + F: resourceAwsCodeBuildWebhookFilterHash, + } + + for _, filters := range filterList { + filterSet.Add(flattenAwsCodeBuildWebhookFilterData(filters)) + } + return &filterSet +} + +func resourceAwsCodeBuildWebhookFilterHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + for _, g := range m { + for _, f := range g.([]interface{}) { + r := f.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", r["type"].(string))) + buf.WriteString(fmt.Sprintf("%s-", r["pattern"].(string))) + buf.WriteString(fmt.Sprintf("%q", r["exclude_matched_pattern"])) + } + } + + return hashcode.String(buf.String()) +} + +func flattenAwsCodeBuildWebhookFilterData(filters []*codebuild.WebhookFilter) map[string]interface{} { + values := map[string]interface{}{} + ff := make([]interface{}, 0) + + for _, f := range filters { + ff = append(ff, map[string]interface{}{ + "type": *f.Type, + "pattern": *f.Pattern, + "exclude_matched_pattern": *f.ExcludeMatchedPattern, + }) + } + + values["filter"] = ff + + return values +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_repository.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_repository.go index 1cb77a1d9fc..f3be55328bc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_repository.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_repository.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codecommit" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCodeCommitRepository() *schema.Resource { @@ -58,6 +58,7 @@ func resourceAwsCodeCommitRepository() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "tags": tagsSchema(), }, } } @@ -68,6 +69,7 @@ func resourceAwsCodeCommitRepositoryCreate(d *schema.ResourceData, meta interfac input := &codecommit.CreateRepositoryInput{ RepositoryName: aws.String(d.Get("repository_name").(string)), RepositoryDescription: aws.String(d.Get("description").(string)), + Tags: tagsFromMapCodeCommit(d.Get("tags").(map[string]interface{})), } out, err := conn.CreateRepository(input) @@ -101,6 +103,11 @@ func resourceAwsCodeCommitRepositoryUpdate(d *schema.ResourceData, meta interfac } } + if !d.IsNewResource() { + if err := setTagsCodeCommit(conn, d); err != nil { + return fmt.Errorf("error updating CodeCommit Repository tags for %s: %s", d.Id(), err) + } + } return resourceAwsCodeCommitRepositoryRead(d, meta) } @@ -135,6 +142,17 @@ func resourceAwsCodeCommitRepositoryRead(d *schema.ResourceData, meta interface{ } } + // List tags + tagList, err := conn.ListTagsForResource(&codecommit.ListTagsForResourceInput{ + ResourceArn: out.RepositoryMetadata.Arn, + }) + if err != nil { + return fmt.Errorf("error listing CodeCommit Repository tags for %s: %s", d.Id(), err) + } + if err := d.Set("tags", tagsToMapCodeCommit(tagList.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_trigger.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_trigger.go index 2435bb16eb3..416840d60f3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_trigger.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_trigger.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codecommit" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCodeCommitTrigger() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_app.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_app.go index 09c48c655e0..4ab000c6aec 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_app.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_app.go @@ -5,12 +5,12 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/codedeploy" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCodeDeployApp() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_config.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_config.go index 96e4fbe084f..0290d81d492 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_config.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_config.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/codedeploy" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCodeDeployDeploymentConfig() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_group.go index b4af8e082d5..7e12fc6113d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_group.go @@ -9,10 +9,10 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -555,8 +555,11 @@ func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta int return handleCreateError(err) }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateDeploymentGroup(&input) + } if err != nil { - return err + return fmt.Errorf("Error creating CodeDeploy deployment group: %s", err) } d.SetId(*resp.DeploymentGroupId) @@ -732,8 +735,11 @@ func resourceAwsCodeDeployDeploymentGroupUpdate(d *schema.ResourceData, meta int return handleUpdateError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.UpdateDeploymentGroup(&input) + } if err != nil { - return err + return fmt.Errorf("Error updating CodeDeploy deployment group: %s", err) } return resourceAwsCodeDeployDeploymentGroupRead(d, meta) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline.go index 9bd8a0d46a4..226cb381636 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/codepipeline" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCodePipeline() *schema.Resource { @@ -161,6 +161,7 @@ func resourceAwsCodePipeline() *schema.Resource { }, }, }, + "tags": tagsSchema(), }, } } @@ -169,6 +170,7 @@ func resourceAwsCodePipelineCreate(d *schema.ResourceData, meta interface{}) err conn := meta.(*AWSClient).codepipelineconn params := &codepipeline.CreatePipelineInput{ Pipeline: expandAwsCodePipeline(d), + Tags: tagsFromMapCodePipeline(d.Get("tags").(map[string]interface{})), } var resp *codepipeline.CreatePipelineOutput @@ -183,6 +185,9 @@ func resourceAwsCodePipelineCreate(d *schema.ResourceData, meta interface{}) err return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + resp, err = conn.CreatePipeline(params) + } if err != nil { return fmt.Errorf("Error creating CodePipeline: %s", err) } @@ -447,6 +452,11 @@ func resourceAwsCodePipelineRead(d *schema.ResourceData, meta interface{}) error d.Set("arn", metadata.PipelineArn) d.Set("name", pipeline.Name) d.Set("role_arn", pipeline.RoleArn) + + if err := saveTagsCodePipeline(conn, d); err != nil { + return err + } + return nil } @@ -465,6 +475,10 @@ func resourceAwsCodePipelineUpdate(d *schema.ResourceData, meta interface{}) err d.Id(), err) } + if err := setTagsCodePipeline(conn, d); err != nil { + return fmt.Errorf("Error updating CodePipeline tags: %s", d.Id()) + } + return resourceAwsCodePipelineRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline_webhook.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline_webhook.go index 9a8275fac71..ff816f41fbd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline_webhook.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline_webhook.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codepipeline" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCodePipelineWebhook() *schema.Resource { @@ -93,6 +93,11 @@ func resourceAwsCodePipelineWebhook() *schema.Resource { ForceNew: true, Required: true, }, + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, }, } } @@ -144,6 +149,7 @@ func resourceAwsCodePipelineWebhookCreate(d *schema.ResourceData, meta interface TargetPipeline: aws.String(d.Get("target_pipeline").(string)), AuthenticationConfiguration: extractCodePipelineWebhookAuthConfig(authType, authConfig), }, + Tags: tagsFromMapCodePipeline(d.Get("tags").(map[string]interface{})), } webhook, err := conn.PutWebhook(request) @@ -265,6 +271,10 @@ func resourceAwsCodePipelineWebhookRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("error setting filter: %s", err) } + if err := d.Set("tags", tagsToMapCodePipeline(webhook.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool.go index bc043d111c9..728785fe439 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool.go @@ -3,14 +3,12 @@ package aws import ( "fmt" "log" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cognitoidentity" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCognitoIdentityPool() *schema.Resource { @@ -99,6 +97,8 @@ func resourceAwsCognitoIdentityPool() *schema.Resource { ValidateFunc: validateCognitoSupportedLoginProviders, }, }, + + "tags": tagsSchema(), }, } } @@ -132,6 +132,10 @@ func resourceAwsCognitoIdentityPoolCreate(d *schema.ResourceData, meta interface params.OpenIdConnectProviderARNs = expandStringList(v.([]interface{})) } + if v, ok := d.GetOk("tags"); ok { + params.IdentityPoolTags = tagsFromMapGeneric(v.(map[string]interface{})) + } + entity, err := conn.CreateIdentityPool(params) if err != nil { return fmt.Errorf("Error creating Cognito Identity Pool: %s", err) @@ -168,6 +172,9 @@ func resourceAwsCognitoIdentityPoolRead(d *schema.ResourceData, meta interface{} d.Set("identity_pool_name", ip.IdentityPoolName) d.Set("allow_unauthenticated_identities", ip.AllowUnauthenticatedIdentities) d.Set("developer_provider_name", ip.DeveloperProviderName) + if err := d.Set("tags", tagsToMapGeneric(ip.IdentityPoolTags)); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } if err := d.Set("cognito_identity_providers", flattenCognitoIdentityProviders(ip.CognitoIdentityProviders)); err != nil { return fmt.Errorf("Error setting cognito_identity_providers error: %#v", err) @@ -198,29 +205,35 @@ func resourceAwsCognitoIdentityPoolUpdate(d *schema.ResourceData, meta interface IdentityPoolName: aws.String(d.Get("identity_pool_name").(string)), } - if d.HasChange("developer_provider_name") { - params.DeveloperProviderName = aws.String(d.Get("developer_provider_name").(string)) + if v, ok := d.GetOk("developer_provider_name"); ok { + params.DeveloperProviderName = aws.String(v.(string)) } - if d.HasChange("cognito_identity_providers") { - params.CognitoIdentityProviders = expandCognitoIdentityProviders(d.Get("cognito_identity_providers").(*schema.Set)) + if v, ok := d.GetOk("cognito_identity_providers"); ok { + params.CognitoIdentityProviders = expandCognitoIdentityProviders(v.(*schema.Set)) } - if d.HasChange("supported_login_providers") { - params.SupportedLoginProviders = expandCognitoSupportedLoginProviders(d.Get("supported_login_providers").(map[string]interface{})) + if v, ok := d.GetOk("supported_login_providers"); ok { + params.SupportedLoginProviders = expandCognitoSupportedLoginProviders(v.(map[string]interface{})) } - if d.HasChange("openid_connect_provider_arns") { - params.OpenIdConnectProviderARNs = expandStringList(d.Get("openid_connect_provider_arns").([]interface{})) + if v, ok := d.GetOk("openid_connect_provider_arns"); ok { + params.OpenIdConnectProviderARNs = expandStringList(v.([]interface{})) } - if d.HasChange("saml_provider_arns") { - params.SamlProviderARNs = expandStringList(d.Get("saml_provider_arns").([]interface{})) + if v, ok := d.GetOk("saml_provider_arns"); ok { + params.SamlProviderARNs = expandStringList(v.([]interface{})) } + log.Printf("[DEBUG] Updating Cognito Identity Pool: %s", params) + _, err := conn.UpdateIdentityPool(params) if err != nil { - return fmt.Errorf("Error creating Cognito Identity Pool: %s", err) + return fmt.Errorf("Error updating Cognito Identity Pool: %s", err) + } + + if err := setTagsCognito(conn, d); err != nil { + return err } return resourceAwsCognitoIdentityPoolRead(d, meta) @@ -230,15 +243,12 @@ func resourceAwsCognitoIdentityPoolDelete(d *schema.ResourceData, meta interface conn := meta.(*AWSClient).cognitoconn log.Printf("[DEBUG] Deleting Cognito Identity Pool: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteIdentityPool(&cognitoidentity.DeleteIdentityPoolInput{ - IdentityPoolId: aws.String(d.Id()), - }) - - if err == nil { - return nil - } - - return resource.NonRetryableError(err) + _, err := conn.DeleteIdentityPool(&cognitoidentity.DeleteIdentityPoolInput{ + IdentityPoolId: aws.String(d.Id()), }) + + if err != nil { + return fmt.Errorf("Error deleting Cognito identity pool: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool_roles_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool_roles_attachment.go index 0b62891cd0b..dffec700baa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool_roles_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool_roles_attachment.go @@ -3,14 +3,12 @@ package aws import ( "fmt" "log" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cognitoidentity" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCognitoIdentityPoolRolesAttachment() *schema.Resource { @@ -222,19 +220,17 @@ func resourceAwsCognitoIdentityPoolRolesAttachmentDelete(d *schema.ResourceData, conn := meta.(*AWSClient).cognitoconn log.Printf("[DEBUG] Deleting Cognito Identity Pool Roles Association: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.SetIdentityPoolRoles(&cognitoidentity.SetIdentityPoolRolesInput{ - IdentityPoolId: aws.String(d.Get("identity_pool_id").(string)), - Roles: expandCognitoIdentityPoolRoles(make(map[string]interface{})), - RoleMappings: expandCognitoIdentityPoolRoleMappingsAttachment([]interface{}{}), - }) + _, err := conn.SetIdentityPoolRoles(&cognitoidentity.SetIdentityPoolRolesInput{ + IdentityPoolId: aws.String(d.Get("identity_pool_id").(string)), + Roles: expandCognitoIdentityPoolRoles(make(map[string]interface{})), + RoleMappings: expandCognitoIdentityPoolRoleMappingsAttachment([]interface{}{}), + }) - if err == nil { - return nil - } + if err != nil { + return fmt.Errorf("Error deleting Cognito identity pool roles association: %s", err) + } - return resource.NonRetryableError(err) - }) + return nil } // Validating that each role_mapping ambiguous_role_resolution diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_provider.go index 57b25f8ca96..29044b853c2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_provider.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_provider.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCognitoIdentityProvider() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_resource_server.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_resource_server.go index 165695a11e2..861f7da9b63 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_resource_server.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_resource_server.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCognitoResourceServer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_group.go index c294665cd2e..f8f76056586 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_group.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCognitoUserGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool.go index 6b7548140da..fca6ea1a4ba 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool.go @@ -3,15 +3,16 @@ package aws import ( "fmt" "log" + "regexp" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCognitoUserPool() *schema.Resource { @@ -128,21 +129,35 @@ func resourceAwsCognitoUserPool() *schema.Resource { }, "email_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "reply_to_email_address": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCognitoUserPoolReplyEmailAddress, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.Any( + validation.StringInSlice([]string{""}, false), + validation.StringMatch(regexp.MustCompile(`[\p{L}\p{M}\p{S}\p{N}\p{P}]+@[\p{L}\p{M}\p{S}\p{N}\p{P}]+`), + `must satisfy regular expression pattern: [\p{L}\p{M}\p{S}\p{N}\p{P}]+@[\p{L}\p{M}\p{S}\p{N}\p{P}]+`), + ), }, "source_arn": { Type: schema.TypeString, Optional: true, ValidateFunc: validateArn, }, + "email_sending_account": { + Type: schema.TypeString, + Optional: true, + Default: cognitoidentityprovider.EmailSendingAccountTypeCognitoDefault, + ValidateFunc: validation.StringInSlice([]string{ + cognitoidentityprovider.EmailSendingAccountTypeCognitoDefault, + cognitoidentityprovider.EmailSendingAccountTypeDeveloper, + }, false), + }, }, }, }, @@ -395,6 +410,7 @@ func resourceAwsCognitoUserPool() *schema.Resource { "sms_verification_message": { Type: schema.TypeString, Optional: true, + Computed: true, ValidateFunc: validateCognitoUserPoolSmsVerificationMessage, ConflictsWith: []string{"verification_message_template.0.sms_message"}, }, @@ -529,6 +545,10 @@ func resourceAwsCognitoUserPoolCreate(d *schema.ResourceData, meta interface{}) emailConfigurationType.SourceArn = aws.String(v.(string)) } + if v, ok := config["email_sending_account"]; ok && v.(string) != "" { + emailConfigurationType.EmailSendingAccount = aws.String(v.(string)) + } + params.EmailConfiguration = emailConfigurationType } } @@ -654,6 +674,9 @@ func resourceAwsCognitoUserPoolCreate(d *schema.ResourceData, meta interface{}) return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateUserPool(params) + } if err != nil { return fmt.Errorf("Error creating Cognito User Pool: %s", err) } @@ -723,8 +746,10 @@ func resourceAwsCognitoUserPoolRead(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Failed setting device_configuration: %s", err) } - if err := d.Set("email_configuration", flattenCognitoUserPoolEmailConfiguration(resp.UserPool.EmailConfiguration)); err != nil { - return fmt.Errorf("Failed setting email_configuration: %s", err) + if resp.UserPool.EmailConfiguration != nil { + if err := d.Set("email_configuration", flattenCognitoUserPoolEmailConfiguration(resp.UserPool.EmailConfiguration)); err != nil { + return fmt.Errorf("Failed setting email_configuration: %s", err) + } } if resp.UserPool.Policies != nil && resp.UserPool.Policies.PasswordPolicy != nil { @@ -795,10 +820,12 @@ func resourceAwsCognitoUserPoolUpdate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("email_configuration"); ok { + configs := v.([]interface{}) config, ok := configs[0].(map[string]interface{}) if ok && config != nil { + log.Printf("[DEBUG] Set Values to update from configs") emailConfigurationType := &cognitoidentityprovider.EmailConfigurationType{} if v, ok := config["reply_to_email_address"]; ok && v.(string) != "" { @@ -809,6 +836,10 @@ func resourceAwsCognitoUserPoolUpdate(d *schema.ResourceData, meta interface{}) emailConfigurationType.SourceArn = aws.String(v.(string)) } + if v, ok := config["email_sending_account"]; ok && v.(string) != "" { + emailConfigurationType.EmailSendingAccount = aws.String(v.(string)) + } + params.EmailConfiguration = emailConfigurationType } } @@ -917,6 +948,9 @@ func resourceAwsCognitoUserPoolUpdate(d *schema.ResourceData, meta interface{}) return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.UpdateUserPool(params) + } if err != nil { return fmt.Errorf("Error updating Cognito User pool: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool_client.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool_client.go index e4a3ae596e2..f6c298017aa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool_client.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool_client.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCognitoUserPoolClient() *schema.Resource { @@ -269,6 +269,10 @@ func resourceAwsCognitoUserPoolClientUpdate(d *schema.ResourceData, meta interfa UserPoolId: aws.String(d.Get("user_pool_id").(string)), } + if v, ok := d.GetOk("name"); ok { + params.ClientName = aws.String(v.(string)) + } + if v, ok := d.GetOk("explicit_auth_flows"); ok { params.ExplicitAuthFlows = expandStringList(v.(*schema.Set).List()) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool_domain.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool_domain.go index 83a35b8c8d9..b7050e0e9f3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool_domain.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_user_pool_domain.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsCognitoUserPoolDomain() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_aggregate_authorization.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_aggregate_authorization.go index 47094e47b07..d76f0f8faa8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_aggregate_authorization.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_aggregate_authorization.go @@ -8,13 +8,14 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/configservice" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsConfigAggregateAuthorization() *schema.Resource { return &schema.Resource{ Create: resourceAwsConfigAggregateAuthorizationPut, Read: resourceAwsConfigAggregateAuthorizationRead, + Update: resourceAwsConfigAggregateAuthorizationUpdate, Delete: resourceAwsConfigAggregateAuthorizationDelete, Importer: &schema.ResourceImporter{ @@ -37,6 +38,7 @@ func resourceAwsConfigAggregateAuthorization() *schema.Resource { Required: true, ForceNew: true, }, + "tags": tagsSchema(), }, } } @@ -50,6 +52,7 @@ func resourceAwsConfigAggregateAuthorizationPut(d *schema.ResourceData, meta int req := &configservice.PutAggregationAuthorizationInput{ AuthorizedAccountId: aws.String(accountId), AuthorizedAwsRegion: aws.String(region), + Tags: tagsFromMapConfigService(d.Get("tags").(map[string]interface{})), } _, err := conn.PutAggregationAuthorization(req) @@ -58,6 +61,7 @@ func resourceAwsConfigAggregateAuthorizationPut(d *schema.ResourceData, meta int } d.SetId(fmt.Sprintf("%s:%s", accountId, region)) + return resourceAwsConfigAggregateAuthorizationRead(d, meta) } @@ -77,19 +81,48 @@ func resourceAwsConfigAggregateAuthorizationRead(d *schema.ResourceData, meta in return fmt.Errorf("Error retrieving list of aggregate authorizations: %s", err) } + var aggregationAuthorization *configservice.AggregationAuthorization // Check for existing authorization for _, auth := range aggregateAuthorizations { if accountId == aws.StringValue(auth.AuthorizedAccountId) && region == aws.StringValue(auth.AuthorizedAwsRegion) { - d.Set("arn", auth.AggregationAuthorizationArn) + aggregationAuthorization = auth + } + } + + if aggregationAuthorization == nil { + log.Printf("[WARN] Aggregate Authorization not found, removing from state: %s", d.Id()) + d.SetId("") + return nil + } + + d.Set("arn", aggregationAuthorization.AggregationAuthorizationArn) + + if err := saveTagsConfigService(conn, d, aws.StringValue(aggregationAuthorization.AggregationAuthorizationArn)); err != nil { + if isAWSErr(err, configservice.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] Aggregate Authorization not found, removing from state: %s", d.Id()) + d.SetId("") return nil } + return fmt.Errorf("Error setting tags for %s: %s", d.Id(), err) } - log.Printf("[WARN] Aggregate Authorization not found, removing from state: %s", d.Id()) - d.SetId("") return nil } +func resourceAwsConfigAggregateAuthorizationUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + if err := setTagsConfigService(conn, d, d.Get("arn").(string)); err != nil { + if isAWSErr(err, configservice.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] Aggregate Authorization not found, removing from state: %s", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error updating tags for %s: %s", d.Id(), err) + } + return resourceAwsConfigAggregateAuthorizationRead(d, meta) +} + func resourceAwsConfigAggregateAuthorizationDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).configconn diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_config_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_config_rule.go index 50d4df7d32a..c3746b8b927 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_config_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_config_rule.go @@ -6,10 +6,10 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -136,6 +136,7 @@ func resourceAwsConfigConfigRule() *schema.Resource { }, }, }, + "tags": tagsSchema(), }, } } @@ -162,6 +163,7 @@ func resourceAwsConfigConfigRulePut(d *schema.ResourceData, meta interface{}) er input := configservice.PutConfigRuleInput{ ConfigRule: &ruleInput, + Tags: tagsFromMapConfigService(d.Get("tags").(map[string]interface{})), } log.Printf("[DEBUG] Creating AWSConfig config rule: %s", input) err := resource.Retry(2*time.Minute, func() *resource.RetryError { @@ -179,14 +181,28 @@ func resourceAwsConfigConfigRulePut(d *schema.ResourceData, meta interface{}) er return nil }) + if isResourceTimeoutError(err) { + _, err = conn.PutConfigRule(&input) + } if err != nil { - return err + return fmt.Errorf("Error creating AWSConfig rule: %s", err) } d.SetId(name) log.Printf("[DEBUG] AWSConfig config rule %q created", name) + if !d.IsNewResource() { + if err := setTagsConfigService(conn, d, d.Get("arn").(string)); err != nil { + if isAWSErr(err, configservice.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] Config Rule not found: %s, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error updating tags for %s: %s", d.Id(), err) + } + } + return resourceAwsConfigConfigRuleRead(d, meta) } @@ -233,6 +249,15 @@ func resourceAwsConfigConfigRuleRead(d *schema.ResourceData, meta interface{}) e d.Set("source", flattenConfigRuleSource(rule.Source)) + if err := saveTagsConfigService(conn, d, aws.StringValue(rule.ConfigRuleArn)); err != nil { + if isAWSErr(err, configservice.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] Config Rule not found: %s, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error setting tags for %s: %s", d.Id(), err) + } + return nil } @@ -242,10 +267,11 @@ func resourceAwsConfigConfigRuleDelete(d *schema.ResourceData, meta interface{}) name := d.Get("name").(string) log.Printf("[DEBUG] Deleting AWS Config config rule %q", name) + input := &configservice.DeleteConfigRuleInput{ + ConfigRuleName: aws.String(name), + } err := resource.Retry(2*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteConfigRule(&configservice.DeleteConfigRuleInput{ - ConfigRuleName: aws.String(name), - }) + _, err := conn.DeleteConfigRule(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceInUseException" { return resource.RetryableError(err) @@ -254,6 +280,9 @@ func resourceAwsConfigConfigRuleDelete(d *schema.ResourceData, meta interface{}) } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteConfigRule(input) + } if err != nil { return fmt.Errorf("Deleting Config Rule failed: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_aggregator.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_aggregator.go index 0d48fc83f09..740ef51d0a0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_aggregator.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_aggregator.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/configservice" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsConfigConfigurationAggregator() *schema.Resource { @@ -106,6 +106,7 @@ func resourceAwsConfigConfigurationAggregator() *schema.Resource { }, }, }, + "tags": tagsSchema(), }, } } @@ -117,6 +118,7 @@ func resourceAwsConfigConfigurationAggregatorPut(d *schema.ResourceData, meta in req := &configservice.PutConfigurationAggregatorInput{ ConfigurationAggregatorName: aws.String(name), + Tags: tagsFromMapConfigService(d.Get("tags").(map[string]interface{})), } account_aggregation_sources := d.Get("account_aggregation_source").([]interface{}) @@ -136,6 +138,17 @@ func resourceAwsConfigConfigurationAggregatorPut(d *schema.ResourceData, meta in d.SetId(strings.ToLower(name)) + if !d.IsNewResource() { + if err := setTagsConfigService(conn, d, d.Get("arn").(string)); err != nil { + if isAWSErr(err, configservice.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] Configuration Aggregator not found: %s, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error updating tags for %s: %s", d.Id(), err) + } + } + return resourceAwsConfigConfigurationAggregatorRead(d, meta) } @@ -173,6 +186,15 @@ func resourceAwsConfigConfigurationAggregatorRead(d *schema.ResourceData, meta i return fmt.Errorf("error setting organization_aggregation_source: %s", err) } + if err := saveTagsConfigService(conn, d, aws.StringValue(aggregator.ConfigurationAggregatorArn)); err != nil { + if isAWSErr(err, configservice.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] Configiguration Aggregator not found: %s, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error setting tags for %s: %s", d.Id(), err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder.go index 91dd99886fa..e5787e156e1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder.go @@ -4,8 +4,8 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder_status.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder_status.go index 3420ed5f5ee..ed0cfbd51c2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder_status.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder_status.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_delivery_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_delivery_channel.go index 95bff5548da..e60f0ffa907 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_delivery_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_delivery_channel.go @@ -5,9 +5,9 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -99,13 +99,15 @@ func resourceAwsConfigDeliveryChannelPut(d *schema.ResourceData, meta interface{ return nil } - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "InsufficientDeliveryPolicyException" { + if isAWSErr(err, "InsufficientDeliveryPolicyException", "") { return resource.RetryableError(err) } return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.PutDeliveryChannel(&input) + } if err != nil { return fmt.Errorf("Creating Delivery Channel failed: %s", err) } @@ -175,6 +177,9 @@ func resourceAwsConfigDeliveryChannelDelete(d *schema.ResourceData, meta interfa } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteDeliveryChannel(&input) + } if err != nil { return fmt.Errorf("Unable to delete delivery channel: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_organization_custom_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_organization_custom_rule.go new file mode 100644 index 00000000000..c4ddc2fd56e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_organization_custom_rule.go @@ -0,0 +1,312 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsConfigOrganizationCustomRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsConfigOrganizationCustomRuleCreate, + Delete: resourceAwsConfigOrganizationCustomRuleDelete, + Read: resourceAwsConfigOrganizationCustomRuleRead, + Update: resourceAwsConfigOrganizationCustomRuleUpdate, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), + }, + "excluded_accounts": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1000, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateAwsAccountId, + }, + }, + "input_parameters": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: suppressEquivalentJsonDiffs, + ValidateFunc: validation.All( + validation.StringLenBetween(0, 2048), + validation.ValidateJsonString, + ), + }, + "lambda_function_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + "maximum_execution_frequency": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + configservice.MaximumExecutionFrequencyOneHour, + configservice.MaximumExecutionFrequencyThreeHours, + configservice.MaximumExecutionFrequencySixHours, + configservice.MaximumExecutionFrequencyTwelveHours, + configservice.MaximumExecutionFrequencyTwentyFourHours, + }, false), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, + "resource_id_scope": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 768), + }, + "resource_types_scope": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 100, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(0, 256), + }, + }, + "tag_key_scope": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 128), + }, + "tag_value_scope": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), + }, + "trigger_types": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 3, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "ConfigurationItemChangeNotification", + "OversizedConfigurationItemChangeNotification", + "ScheduledNotification", + }, false), + }, + }, + }, + } +} + +func resourceAwsConfigOrganizationCustomRuleCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + name := d.Get("name").(string) + + input := &configservice.PutOrganizationConfigRuleInput{ + OrganizationConfigRuleName: aws.String(name), + OrganizationCustomRuleMetadata: &configservice.OrganizationCustomRuleMetadata{ + LambdaFunctionArn: aws.String(d.Get("lambda_function_arn").(string)), + OrganizationConfigRuleTriggerTypes: expandStringSet(d.Get("trigger_types").(*schema.Set)), + }, + } + + if v, ok := d.GetOk("description"); ok { + input.OrganizationCustomRuleMetadata.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("excluded_accounts"); ok && v.(*schema.Set).Len() > 0 { + input.ExcludedAccounts = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("input_parameters"); ok { + input.OrganizationCustomRuleMetadata.InputParameters = aws.String(v.(string)) + } + + if v, ok := d.GetOk("maximum_execution_frequency"); ok { + input.OrganizationCustomRuleMetadata.MaximumExecutionFrequency = aws.String(v.(string)) + } + + if v, ok := d.GetOk("resource_id_scope"); ok { + input.OrganizationCustomRuleMetadata.ResourceIdScope = aws.String(v.(string)) + } + + if v, ok := d.GetOk("resource_types_scope"); ok && v.(*schema.Set).Len() > 0 { + input.OrganizationCustomRuleMetadata.ResourceTypesScope = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("tag_key_scope"); ok { + input.OrganizationCustomRuleMetadata.TagKeyScope = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tag_value_scope"); ok { + input.OrganizationCustomRuleMetadata.TagValueScope = aws.String(v.(string)) + } + + _, err := conn.PutOrganizationConfigRule(input) + + if err != nil { + return fmt.Errorf("error creating Config Organization Custom Rule (%s): %s", name, err) + } + + d.SetId(name) + + if err := configWaitForOrganizationRuleStatusCreateSuccessful(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("error waiting for Config Organization Custom Rule (%s) creation: %s", d.Id(), err) + } + + return resourceAwsConfigOrganizationCustomRuleRead(d, meta) +} + +func resourceAwsConfigOrganizationCustomRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + rule, err := configDescribeOrganizationConfigRule(conn, d.Id()) + + if isAWSErr(err, configservice.ErrCodeNoSuchOrganizationConfigRuleException, "") { + log.Printf("[WARN] Config Organization Custom Rule (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error describing Config Organization Custom Rule (%s): %s", d.Id(), err) + } + + if rule == nil { + log.Printf("[WARN] Config Organization Custom Rule (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if rule.OrganizationManagedRuleMetadata != nil { + return fmt.Errorf("expected Config Organization Custom Rule, found Config Organization Custom Rule: %s", d.Id()) + } + + if rule.OrganizationCustomRuleMetadata == nil { + return fmt.Errorf("error describing Config Organization Custom Rule (%s): empty metadata", d.Id()) + } + + d.Set("arn", rule.OrganizationConfigRuleArn) + d.Set("description", rule.OrganizationCustomRuleMetadata.Description) + + if err := d.Set("excluded_accounts", aws.StringValueSlice(rule.ExcludedAccounts)); err != nil { + return fmt.Errorf("error setting excluded_accounts: %s", err) + } + + d.Set("input_parameters", rule.OrganizationCustomRuleMetadata.InputParameters) + d.Set("lambda_function_arn", rule.OrganizationCustomRuleMetadata.LambdaFunctionArn) + d.Set("maximum_execution_frequency", rule.OrganizationCustomRuleMetadata.MaximumExecutionFrequency) + d.Set("name", rule.OrganizationConfigRuleName) + d.Set("resource_id_scope", rule.OrganizationCustomRuleMetadata.ResourceIdScope) + + if err := d.Set("resource_types_scope", aws.StringValueSlice(rule.OrganizationCustomRuleMetadata.ResourceTypesScope)); err != nil { + return fmt.Errorf("error setting resource_types_scope: %s", err) + } + + d.Set("tag_key_scope", rule.OrganizationCustomRuleMetadata.TagKeyScope) + d.Set("tag_value_scope", rule.OrganizationCustomRuleMetadata.TagValueScope) + + if err := d.Set("trigger_types", aws.StringValueSlice(rule.OrganizationCustomRuleMetadata.OrganizationConfigRuleTriggerTypes)); err != nil { + return fmt.Errorf("error setting trigger_types: %s", err) + } + + return nil +} + +func resourceAwsConfigOrganizationCustomRuleUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + input := &configservice.PutOrganizationConfigRuleInput{ + OrganizationConfigRuleName: aws.String(d.Id()), + OrganizationCustomRuleMetadata: &configservice.OrganizationCustomRuleMetadata{ + LambdaFunctionArn: aws.String(d.Get("lambda_function_arn").(string)), + OrganizationConfigRuleTriggerTypes: expandStringSet(d.Get("trigger_types").(*schema.Set)), + }, + } + + if v, ok := d.GetOk("description"); ok { + input.OrganizationCustomRuleMetadata.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("excluded_accounts"); ok && v.(*schema.Set).Len() > 0 { + input.ExcludedAccounts = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("input_parameters"); ok { + input.OrganizationCustomRuleMetadata.InputParameters = aws.String(v.(string)) + } + + if v, ok := d.GetOk("maximum_execution_frequency"); ok { + input.OrganizationCustomRuleMetadata.MaximumExecutionFrequency = aws.String(v.(string)) + } + + if v, ok := d.GetOk("resource_id_scope"); ok { + input.OrganizationCustomRuleMetadata.ResourceIdScope = aws.String(v.(string)) + } + + if v, ok := d.GetOk("resource_types_scope"); ok && v.(*schema.Set).Len() > 0 { + input.OrganizationCustomRuleMetadata.ResourceTypesScope = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("tag_key_scope"); ok { + input.OrganizationCustomRuleMetadata.TagKeyScope = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tag_value_scope"); ok { + input.OrganizationCustomRuleMetadata.TagValueScope = aws.String(v.(string)) + } + + _, err := conn.PutOrganizationConfigRule(input) + + if err != nil { + return fmt.Errorf("error updating Config Organization Custom Rule (%s): %s", d.Id(), err) + } + + if err := configWaitForOrganizationRuleStatusUpdateSuccessful(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("error waiting for Config Organization Custom Rule (%s) update: %s", d.Id(), err) + } + + return resourceAwsConfigOrganizationCustomRuleRead(d, meta) +} + +func resourceAwsConfigOrganizationCustomRuleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + input := &configservice.DeleteOrganizationConfigRuleInput{ + OrganizationConfigRuleName: aws.String(d.Id()), + } + + _, err := conn.DeleteOrganizationConfigRule(input) + + if err != nil { + return fmt.Errorf("error deleting Config Organization Custom Rule (%s): %s", d.Id(), err) + } + + if err := configWaitForOrganizationRuleStatusDeleteSuccessful(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error waiting for Config Organization Custom Rule (%s) deletion: %s", d.Id(), err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_organization_managed_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_organization_managed_rule.go new file mode 100644 index 00000000000..b37f6d7999d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_organization_managed_rule.go @@ -0,0 +1,292 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsConfigOrganizationManagedRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsConfigOrganizationManagedRuleCreate, + Delete: resourceAwsConfigOrganizationManagedRuleDelete, + Read: resourceAwsConfigOrganizationManagedRuleRead, + Update: resourceAwsConfigOrganizationManagedRuleUpdate, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), + }, + "excluded_accounts": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1000, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateAwsAccountId, + }, + }, + "input_parameters": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: suppressEquivalentJsonDiffs, + ValidateFunc: validation.All( + validation.StringLenBetween(0, 2048), + validation.ValidateJsonString, + ), + }, + "maximum_execution_frequency": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + configservice.MaximumExecutionFrequencyOneHour, + configservice.MaximumExecutionFrequencyThreeHours, + configservice.MaximumExecutionFrequencySixHours, + configservice.MaximumExecutionFrequencyTwelveHours, + configservice.MaximumExecutionFrequencyTwentyFourHours, + }, false), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, + "resource_id_scope": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 768), + }, + "resource_types_scope": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 100, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(0, 256), + }, + }, + "rule_identifier": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + "tag_key_scope": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 128), + }, + "tag_value_scope": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), + }, + }, + } +} + +func resourceAwsConfigOrganizationManagedRuleCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + name := d.Get("name").(string) + + input := &configservice.PutOrganizationConfigRuleInput{ + OrganizationConfigRuleName: aws.String(name), + OrganizationManagedRuleMetadata: &configservice.OrganizationManagedRuleMetadata{ + RuleIdentifier: aws.String(d.Get("rule_identifier").(string)), + }, + } + + if v, ok := d.GetOk("description"); ok { + input.OrganizationManagedRuleMetadata.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("excluded_accounts"); ok && v.(*schema.Set).Len() > 0 { + input.ExcludedAccounts = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("input_parameters"); ok { + input.OrganizationManagedRuleMetadata.InputParameters = aws.String(v.(string)) + } + + if v, ok := d.GetOk("maximum_execution_frequency"); ok { + input.OrganizationManagedRuleMetadata.MaximumExecutionFrequency = aws.String(v.(string)) + } + + if v, ok := d.GetOk("resource_id_scope"); ok { + input.OrganizationManagedRuleMetadata.ResourceIdScope = aws.String(v.(string)) + } + + if v, ok := d.GetOk("resource_types_scope"); ok && v.(*schema.Set).Len() > 0 { + input.OrganizationManagedRuleMetadata.ResourceTypesScope = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("tag_key_scope"); ok { + input.OrganizationManagedRuleMetadata.TagKeyScope = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tag_value_scope"); ok { + input.OrganizationManagedRuleMetadata.TagValueScope = aws.String(v.(string)) + } + + _, err := conn.PutOrganizationConfigRule(input) + + if err != nil { + return fmt.Errorf("error creating Config Organization Managed Rule (%s): %s", name, err) + } + + d.SetId(name) + + if err := configWaitForOrganizationRuleStatusCreateSuccessful(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("error waiting for Config Organization Managed Rule (%s) creation: %s", d.Id(), err) + } + + return resourceAwsConfigOrganizationManagedRuleRead(d, meta) +} + +func resourceAwsConfigOrganizationManagedRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + rule, err := configDescribeOrganizationConfigRule(conn, d.Id()) + + if isAWSErr(err, configservice.ErrCodeNoSuchOrganizationConfigRuleException, "") { + log.Printf("[WARN] Config Organization Managed Rule (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error describing Config Organization Managed Rule (%s): %s", d.Id(), err) + } + + if rule == nil { + log.Printf("[WARN] Config Organization Managed Rule (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if rule.OrganizationCustomRuleMetadata != nil { + return fmt.Errorf("expected Config Organization Managed Rule, found Config Organization Custom Rule: %s", d.Id()) + } + + if rule.OrganizationManagedRuleMetadata == nil { + return fmt.Errorf("error describing Config Organization Managed Rule (%s): empty metadata", d.Id()) + } + + d.Set("arn", rule.OrganizationConfigRuleArn) + d.Set("description", rule.OrganizationManagedRuleMetadata.Description) + + if err := d.Set("excluded_accounts", aws.StringValueSlice(rule.ExcludedAccounts)); err != nil { + return fmt.Errorf("error setting excluded_accounts: %s", err) + } + + d.Set("input_parameters", rule.OrganizationManagedRuleMetadata.InputParameters) + d.Set("maximum_execution_frequency", rule.OrganizationManagedRuleMetadata.MaximumExecutionFrequency) + d.Set("name", rule.OrganizationConfigRuleName) + d.Set("resource_id_scope", rule.OrganizationManagedRuleMetadata.ResourceIdScope) + + if err := d.Set("resource_types_scope", aws.StringValueSlice(rule.OrganizationManagedRuleMetadata.ResourceTypesScope)); err != nil { + return fmt.Errorf("error setting resource_types_scope: %s", err) + } + + d.Set("rule_identifier", rule.OrganizationManagedRuleMetadata.RuleIdentifier) + d.Set("tag_key_scope", rule.OrganizationManagedRuleMetadata.TagKeyScope) + d.Set("tag_value_scope", rule.OrganizationManagedRuleMetadata.TagValueScope) + + return nil +} + +func resourceAwsConfigOrganizationManagedRuleUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + input := &configservice.PutOrganizationConfigRuleInput{ + OrganizationConfigRuleName: aws.String(d.Id()), + OrganizationManagedRuleMetadata: &configservice.OrganizationManagedRuleMetadata{ + RuleIdentifier: aws.String(d.Get("rule_identifier").(string)), + }, + } + + if v, ok := d.GetOk("description"); ok { + input.OrganizationManagedRuleMetadata.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("excluded_accounts"); ok && v.(*schema.Set).Len() > 0 { + input.ExcludedAccounts = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("input_parameters"); ok { + input.OrganizationManagedRuleMetadata.InputParameters = aws.String(v.(string)) + } + + if v, ok := d.GetOk("maximum_execution_frequency"); ok { + input.OrganizationManagedRuleMetadata.MaximumExecutionFrequency = aws.String(v.(string)) + } + + if v, ok := d.GetOk("resource_id_scope"); ok { + input.OrganizationManagedRuleMetadata.ResourceIdScope = aws.String(v.(string)) + } + + if v, ok := d.GetOk("resource_types_scope"); ok && v.(*schema.Set).Len() > 0 { + input.OrganizationManagedRuleMetadata.ResourceTypesScope = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("tag_key_scope"); ok { + input.OrganizationManagedRuleMetadata.TagKeyScope = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tag_value_scope"); ok { + input.OrganizationManagedRuleMetadata.TagValueScope = aws.String(v.(string)) + } + + _, err := conn.PutOrganizationConfigRule(input) + + if err != nil { + return fmt.Errorf("error updating Config Organization Managed Rule (%s): %s", d.Id(), err) + } + + if err := configWaitForOrganizationRuleStatusUpdateSuccessful(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("error waiting for Config Organization Managed Rule (%s) update: %s", d.Id(), err) + } + + return resourceAwsConfigOrganizationManagedRuleRead(d, meta) +} + +func resourceAwsConfigOrganizationManagedRuleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + input := &configservice.DeleteOrganizationConfigRuleInput{ + OrganizationConfigRuleName: aws.String(d.Id()), + } + + _, err := conn.DeleteOrganizationConfigRule(input) + + if err != nil { + return fmt.Errorf("error deleting Config Organization Managed Rule (%s): %s", d.Id(), err) + } + + if err := configWaitForOrganizationRuleStatusDeleteSuccessful(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error waiting for Config Organization Managed Rule (%s) deletion: %s", d.Id(), err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cur_report_definition.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cur_report_definition.go index dc61ec226d0..5ac8e66b321 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cur_report_definition.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cur_report_definition.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/costandusagereportservice" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "log" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_customer_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_customer_gateway.go index 122b4c30d83..f3cc3e05d11 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_customer_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_customer_gateway.go @@ -10,8 +10,8 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsCustomerGateway() *schema.Resource { @@ -238,8 +238,7 @@ func resourceAwsCustomerGatewayDelete(d *schema.ResourceData, meta interface{}) if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidCustomerGatewayID.NotFound" { return nil } else { - log.Printf("[ERROR] Error deleting CustomerGateway: %s", err) - return err + return fmt.Errorf("[ERROR] Error deleting CustomerGateway: %s", err) } } @@ -248,31 +247,53 @@ func resourceAwsCustomerGatewayDelete(d *schema.ResourceData, meta interface{}) Values: []*string{aws.String(d.Id())}, } + input := &ec2.DescribeCustomerGatewaysInput{ + Filters: []*ec2.Filter{gatewayFilter}, + } err = resource.Retry(5*time.Minute, func() *resource.RetryError { - resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ - Filters: []*ec2.Filter{gatewayFilter}, - }) + resp, err := conn.DescribeCustomerGateways(input) if err != nil { - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "InvalidCustomerGatewayID.NotFound" { + if isAWSErr(err, "InvalidCustomerGatewayID.NotFound", "") { return nil } return resource.NonRetryableError(err) } - if len(resp.CustomerGateways) != 1 { - return resource.RetryableError(fmt.Errorf("Error finding CustomerGateway for delete: %s", d.Id())) + err = checkGatewayDeleteResponse(resp, d.Id()) + if err != nil { + return resource.RetryableError(err) } + return nil + }) - switch *resp.CustomerGateways[0].State { - case "pending", "available", "deleting": - return resource.RetryableError(fmt.Errorf("Gateway (%s) in state (%s), retrying", d.Id(), *resp.CustomerGateways[0].State)) - case "deleted": - return nil - default: - return resource.RetryableError(fmt.Errorf("Unrecognized state (%s) for Customer Gateway delete on (%s)", *resp.CustomerGateways[0].State, d.Id())) + if isResourceTimeoutError(err) { + var resp *ec2.DescribeCustomerGatewaysOutput + resp, err = conn.DescribeCustomerGateways(input) + + if err != nil { + return checkGatewayDeleteResponse(resp, d.Id()) } - }) + } + + if err != nil { + return fmt.Errorf("Error deleting customer gateway: %s", err) + } + return nil + +} + +func checkGatewayDeleteResponse(resp *ec2.DescribeCustomerGatewaysOutput, id string) error { + if len(resp.CustomerGateways) != 1 { + return fmt.Errorf("Error finding CustomerGateway for delete: %s", id) + } - return err + switch *resp.CustomerGateways[0].State { + case "pending", "available", "deleting": + return fmt.Errorf("Gateway (%s) in state (%s), retrying", id, *resp.CustomerGateways[0].State) + case "deleted": + return nil + default: + return fmt.Errorf("Unrecognized state (%s) for Customer Gateway delete on (%s)", *resp.CustomerGateways[0].State, id) + } } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datapipeline_pipeline.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datapipeline_pipeline.go new file mode 100644 index 00000000000..193c92d8d62 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datapipeline_pipeline.go @@ -0,0 +1,163 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/datapipeline" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func resourceAwsDataPipelinePipeline() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDataPipelinePipelineCreate, + Read: resourceAwsDataPipelinePipelineRead, + Update: resourceAwsDataPipelinePipelineUpdate, + Delete: resourceAwsDataPipelinePipelineDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDataPipelinePipelineCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).datapipelineconn + + uniqueID := resource.UniqueId() + + input := datapipeline.CreatePipelineInput{ + Name: aws.String(d.Get("name").(string)), + UniqueId: aws.String(uniqueID), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().DatapipelineTags(), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + resp, err := conn.CreatePipeline(&input) + + if err != nil { + return fmt.Errorf("Error creating datapipeline: %s", err) + } + + d.SetId(aws.StringValue(resp.PipelineId)) + + return resourceAwsDataPipelinePipelineRead(d, meta) +} + +func resourceAwsDataPipelinePipelineRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).datapipelineconn + + v, err := resourceAwsDataPipelinePipelineRetrieve(d.Id(), conn) + if isAWSErr(err, datapipeline.ErrCodePipelineNotFoundException, "") || isAWSErr(err, datapipeline.ErrCodePipelineDeletedException, "") || v == nil { + log.Printf("[WARN] DataPipeline (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error describing DataPipeline (%s): %s", d.Id(), err) + } + + d.Set("name", v.Name) + d.Set("description", v.Description) + if err := d.Set("tags", keyvaluetags.DatapipelineKeyValueTags(v.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + return nil +} + +func resourceAwsDataPipelinePipelineUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).datapipelineconn + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.DatapipelineUpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating Datapipeline Pipeline (%s) tags: %s", d.Id(), err) + } + } + + return resourceAwsDataPipelinePipelineRead(d, meta) +} + +func resourceAwsDataPipelinePipelineDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).datapipelineconn + + opts := datapipeline.DeletePipelineInput{ + PipelineId: aws.String(d.Id()), + } + + _, err := conn.DeletePipeline(&opts) + if isAWSErr(err, datapipeline.ErrCodePipelineNotFoundException, "") || isAWSErr(err, datapipeline.ErrCodePipelineDeletedException, "") { + return nil + } + if err != nil { + return fmt.Errorf("Error deleting Data Pipeline %s: %s", d.Id(), err.Error()) + } + + return waitForDataPipelineDeletion(conn, d.Id()) +} + +func resourceAwsDataPipelinePipelineRetrieve(id string, conn *datapipeline.DataPipeline) (*datapipeline.PipelineDescription, error) { + opts := datapipeline.DescribePipelinesInput{ + PipelineIds: []*string{aws.String(id)}, + } + + resp, err := conn.DescribePipelines(&opts) + if err != nil { + return nil, err + } + + var pipeline *datapipeline.PipelineDescription + + for _, p := range resp.PipelineDescriptionList { + if p == nil { + continue + } + + if aws.StringValue(p.PipelineId) == id { + pipeline = p + break + } + } + + return pipeline, nil +} + +func waitForDataPipelineDeletion(conn *datapipeline.DataPipeline, pipelineID string) error { + params := &datapipeline.DescribePipelinesInput{ + PipelineIds: []*string{aws.String(pipelineID)}, + } + return resource.Retry(10*time.Minute, func() *resource.RetryError { + _, err := conn.DescribePipelines(params) + if isAWSErr(err, datapipeline.ErrCodePipelineNotFoundException, "") || isAWSErr(err, datapipeline.ErrCodePipelineDeletedException, "") { + return nil + } + if err != nil { + return resource.NonRetryableError(err) + } + return resource.RetryableError(fmt.Errorf("DataPipeline (%s) still exists", pipelineID)) + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_agent.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_agent.go index d79a35da83a..9ff87e78c36 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_agent.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_agent.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDataSyncAgent() *schema.Resource { @@ -85,9 +85,10 @@ func resourceAwsDataSyncAgentCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("error creating HTTP request: %s", err) } + var response *http.Response err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { log.Printf("[DEBUG] Making HTTP request: %s", request.URL.String()) - response, err := client.Do(request) + response, err = client.Do(request) if err != nil { if err, ok := err.(net.Error); ok { errMessage := fmt.Errorf("error making HTTP request: %s", err) @@ -96,24 +97,30 @@ func resourceAwsDataSyncAgentCreate(d *schema.ResourceData, meta interface{}) er } return resource.NonRetryableError(fmt.Errorf("error making HTTP request: %s", err)) } - - log.Printf("[DEBUG] Received HTTP response: %#v", response) - if response.StatusCode != 302 { - return resource.NonRetryableError(fmt.Errorf("expected HTTP status code 302, received: %d", response.StatusCode)) - } - - redirectURL, err := response.Location() - if err != nil { - return resource.NonRetryableError(fmt.Errorf("error extracting HTTP Location header: %s", err)) - } - - activationKey = redirectURL.Query().Get("activationKey") - return nil }) + if isResourceTimeoutError(err) { + response, err = client.Do(request) + } if err != nil { return fmt.Errorf("error retrieving activation key from IP Address (%s): %s", agentIpAddress, err) } + if response == nil { + return fmt.Errorf("Error retrieving response for activation key request: %s", err) + } + + log.Printf("[DEBUG] Received HTTP response: %#v", response) + if response.StatusCode != 302 { + return fmt.Errorf("expected HTTP status code 302, received: %d", response.StatusCode) + } + + redirectURL, err := response.Location() + if err != nil { + return fmt.Errorf("error extracting HTTP Location header: %s", err) + } + + activationKey = redirectURL.Query().Get("activationKey") + if activationKey == "" { return fmt.Errorf("empty activationKey received from IP Address: %s", agentIpAddress) } @@ -137,10 +144,11 @@ func resourceAwsDataSyncAgentCreate(d *schema.ResourceData, meta interface{}) er d.SetId(aws.StringValue(output.AgentArn)) // Agent activations can take a few minutes + descAgentInput := &datasync.DescribeAgentInput{ + AgentArn: aws.String(d.Id()), + } err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { - _, err := conn.DescribeAgent(&datasync.DescribeAgentInput{ - AgentArn: aws.String(d.Id()), - }) + _, err := conn.DescribeAgent(descAgentInput) if isAWSErr(err, "InvalidRequestException", "not found") { return resource.RetryableError(err) @@ -152,6 +160,9 @@ func resourceAwsDataSyncAgentCreate(d *schema.ResourceData, meta interface{}) er return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DescribeAgent(descAgentInput) + } if err != nil { return fmt.Errorf("error waiting for DataSync Agent (%s) creation: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_efs.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_efs.go index 3e53d7f87e8..1409dd4f2bf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_efs.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_efs.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDataSyncLocationEfs() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_nfs.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_nfs.go index 56e8c1194d2..c1fe6561c67 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_nfs.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_nfs.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDataSyncLocationNfs() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_s3.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_s3.go index 8f8cda3a5b1..e75475687be 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_s3.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_location_s3.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDataSyncLocationS3() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_task.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_task.go index 49d846cf599..8ec9e5f3387 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_task.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_datasync_task.go @@ -5,12 +5,12 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDataSyncTask() *schema.Resource { @@ -186,11 +186,13 @@ func resourceAwsDataSyncTaskCreate(d *schema.ResourceData, meta interface{}) err d.SetId(aws.StringValue(output.TaskArn)) - // Task creation can take a few minutes + // Task creation can take a few minutes\ + taskInput := &datasync.DescribeTaskInput{ + TaskArn: aws.String(d.Id()), + } + var taskOutput *datasync.DescribeTaskOutput err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { - output, err := conn.DescribeTask(&datasync.DescribeTaskInput{ - TaskArn: aws.String(d.Id()), - }) + taskOutput, err := conn.DescribeTask(taskInput) if isAWSErr(err, "InvalidRequestException", "not found") { return resource.RetryableError(err) @@ -200,19 +202,31 @@ func resourceAwsDataSyncTaskCreate(d *schema.ResourceData, meta interface{}) err return resource.NonRetryableError(err) } - if aws.StringValue(output.Status) == datasync.TaskStatusAvailable || aws.StringValue(output.Status) == datasync.TaskStatusRunning { + if aws.StringValue(taskOutput.Status) == datasync.TaskStatusAvailable || aws.StringValue(taskOutput.Status) == datasync.TaskStatusRunning { return nil } err = fmt.Errorf("waiting for DataSync Task (%s) creation: last status (%s), error code (%s), error detail: %s", - d.Id(), aws.StringValue(output.Status), aws.StringValue(output.ErrorCode), aws.StringValue(output.ErrorDetail)) + d.Id(), aws.StringValue(taskOutput.Status), aws.StringValue(taskOutput.ErrorCode), aws.StringValue(taskOutput.ErrorDetail)) - if aws.StringValue(output.Status) == datasync.TaskStatusCreating { + if aws.StringValue(taskOutput.Status) == datasync.TaskStatusCreating { return resource.RetryableError(err) } - return resource.NonRetryableError(err) + return resource.NonRetryableError(err) // should only happen if err != nil }) + if isResourceTimeoutError(err) { + taskOutput, err = conn.DescribeTask(taskInput) + if isAWSErr(err, "InvalidRequestException", "not found") { + return fmt.Errorf("Task not found after creation: %s", err) + } + if err != nil { + return fmt.Errorf("Error describing task after creation: %s", err) + } + if aws.StringValue(taskOutput.Status) == datasync.TaskStatusCreating { + return fmt.Errorf("Data sync task status has not finished creating") + } + } if err != nil { return fmt.Errorf("error waiting for DataSync Task (%s) creation: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_cluster.go index 8612a38fd09..7510cda9468 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_cluster.go @@ -3,14 +3,16 @@ package aws import ( "fmt" "log" + "regexp" "sort" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dax" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDaxCluster() *schema.Resource { @@ -41,8 +43,13 @@ func resourceAwsDaxCluster() *schema.Resource { StateFunc: func(val interface{}) string { return strings.ToLower(val.(string)) }, - // DAX follows the same naming convention as ElastiCache clusters - ValidateFunc: validateElastiCacheClusterId, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 20), + validation.StringMatch(regexp.MustCompile(`^[0-9a-z-]+$`), "must contain only lowercase alphanumeric characters and hyphens"), + validation.StringMatch(regexp.MustCompile(`^[a-z]`), "must begin with a lowercase letter"), + validateStringNotMatch(regexp.MustCompile(`--`), "cannot contain two consecutive hyphens"), + validateStringNotMatch(regexp.MustCompile(`-$`), "cannot end with a hyphen"), + ), }, "iam_role_arn": { Type: schema.TypeString, @@ -229,6 +236,9 @@ func resourceAwsDaxClusterCreate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateCluster(req) + } if err != nil { return fmt.Errorf("Error creating DAX cluster: %s", err) } @@ -486,8 +496,11 @@ func resourceAwsDaxClusterDelete(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteCluster(req) + } if err != nil { - return err + return fmt.Errorf("Error deleting DAX cluster: %s", err) } log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_parameter_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_parameter_group.go index 27dc0489103..0b7fda751cd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_parameter_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_parameter_group.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dax" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDaxParameterGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_subnet_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_subnet_group.go index 15f15fcd323..406fb51b6e1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_subnet_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dax_subnet_group.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dax" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDaxSubnetGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_cluster_snapshot.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_cluster_snapshot.go index 089e4fa715a..4c88dfa1575 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_cluster_snapshot.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_cluster_snapshot.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDbClusterSnapshot() *schema.Resource { @@ -16,6 +16,7 @@ func resourceAwsDbClusterSnapshot() *schema.Resource { Create: resourceAwsDbClusterSnapshotCreate, Read: resourceAwsDbClusterSnapshotRead, Delete: resourceAwsDbClusterSnapshotDelete, + Update: resourceAwsdbClusterSnapshotUpdate, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, @@ -89,6 +90,7 @@ func resourceAwsDbClusterSnapshot() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "tags": tagsSchema(), }, } } @@ -99,6 +101,7 @@ func resourceAwsDbClusterSnapshotCreate(d *schema.ResourceData, meta interface{} params := &rds.CreateDBClusterSnapshotInput{ DBClusterIdentifier: aws.String(d.Get("db_cluster_identifier").(string)), DBClusterSnapshotIdentifier: aws.String(d.Get("db_cluster_snapshot_identifier").(string)), + Tags: tagsFromMapRDS(d.Get("tags").(map[string]interface{})), } _, err := conn.CreateDBClusterSnapshot(params) @@ -167,6 +170,24 @@ func resourceAwsDbClusterSnapshotRead(d *schema.ResourceData, meta interface{}) d.Set("storage_encrypted", snapshot.StorageEncrypted) d.Set("vpc_id", snapshot.VpcId) + if err := saveTagsRDS(conn, d, aws.StringValue(snapshot.DBClusterSnapshotArn)); err != nil { + log.Printf("[WARN] Failed to save tags for RDS DB Cluster Snapshot (%s): %s", d.Id(), err) + } + + return nil +} + +func resourceAwsdbClusterSnapshotUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + if d.HasChange("tags") { + if err := setTagsRDS(conn, d, d.Get("db_cluster_snapshot_arn").(string)); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_event_subscription.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_event_subscription.go index e3838695e26..e8f30a824a8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_event_subscription.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_event_subscription.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDbEventSubscription() *schema.Resource { @@ -154,9 +154,17 @@ func resourceAwsDbEventSubscriptionRead(d *schema.ResourceData, meta interface{} conn := meta.(*AWSClient).rdsconn sub, err := resourceAwsDbEventSubscriptionRetrieve(d.Id(), conn) + + if isAWSErr(err, rds.ErrCodeSubscriptionNotFoundFault, "") { + log.Printf("[WARN] RDS Event Subscription (%s) not found - removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { - return fmt.Errorf("Error retrieving RDS Event Subscription %s: %s", d.Id(), err) + return fmt.Errorf("error retrieving RDS Event Subscription (%s): %s", d.Id(), err) } + if sub == nil { log.Printf("[WARN] RDS Event Subscription (%s) not found - removing from state", d.Id()) d.SetId("") @@ -207,25 +215,32 @@ func resourceAwsDbEventSubscriptionRead(d *schema.ResourceData, meta interface{} } func resourceAwsDbEventSubscriptionRetrieve(name string, conn *rds.RDS) (*rds.EventSubscription, error) { - - request := &rds.DescribeEventSubscriptionsInput{ + input := &rds.DescribeEventSubscriptionsInput{ SubscriptionName: aws.String(name), } - describeResp, err := conn.DescribeEventSubscriptions(request) - if err != nil { - if isAWSErr(err, rds.ErrCodeSubscriptionNotFoundFault, "") { - log.Printf("[WARN] No RDS Event Subscription by name (%s) found", name) - return nil, nil + var eventSubscription *rds.EventSubscription + + err := conn.DescribeEventSubscriptionsPages(input, func(page *rds.DescribeEventSubscriptionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - return nil, fmt.Errorf("Error reading RDS Event Subscription %s: %s", name, err) - } - if len(describeResp.EventSubscriptionsList) != 1 { - return nil, fmt.Errorf("Unable to find RDS Event Subscription: %#v", describeResp.EventSubscriptionsList) - } + for _, es := range page.EventSubscriptionsList { + if es == nil { + continue + } + + if aws.StringValue(es.CustSubscriptionId) == name { + eventSubscription = es + return false + } + } - return describeResp.EventSubscriptionsList[0], nil + return !lastPage + }) + + return eventSubscription, err } func resourceAwsDbEventSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { @@ -353,26 +368,23 @@ func resourceAwsDbEventSubscriptionDelete(d *schema.ResourceData, meta interface SubscriptionName: aws.String(d.Id()), } - if _, err := conn.DeleteEventSubscription(&deleteOpts); err != nil { - if isAWSErr(err, rds.ErrCodeSubscriptionNotFoundFault, "") { - return nil - } - return fmt.Errorf("Error deleting RDS Event Subscription %s: %s", d.Id(), err) + _, err := conn.DeleteEventSubscription(&deleteOpts) + + if isAWSErr(err, rds.ErrCodeSubscriptionNotFoundFault, "") { + return nil } - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{}, - Refresh: resourceAwsDbEventSubscriptionRefreshFunc(d.Id(), conn), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting + if err != nil { + return fmt.Errorf("error deleting RDS Event Subscription (%s): %s", d.Id(), err) } - _, err := stateConf.WaitForState() + + err = waitForRdsEventSubscriptionDeletion(conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { - return fmt.Errorf("Error deleting RDS Event Subscription %s: %s", d.Id(), err) + return fmt.Errorf("error waiting for RDS Event Subscription (%s) deletion: %s", d.Id(), err) } - return err + + return nil } func resourceAwsDbEventSubscriptionRefreshFunc(name string, conn *rds.RDS) resource.StateRefreshFunc { @@ -380,8 +392,11 @@ func resourceAwsDbEventSubscriptionRefreshFunc(name string, conn *rds.RDS) resou return func() (interface{}, string, error) { sub, err := resourceAwsDbEventSubscriptionRetrieve(name, conn) + if isAWSErr(err, rds.ErrCodeSubscriptionNotFoundFault, "") { + return nil, "", nil + } + if err != nil { - log.Printf("Error on retrieving DB Event Subscription when waiting: %s", err) return nil, "", err } @@ -389,10 +404,21 @@ func resourceAwsDbEventSubscriptionRefreshFunc(name string, conn *rds.RDS) resou return nil, "", nil } - if sub.Status != nil { - log.Printf("[DEBUG] DB Event Subscription status for %s: %s", name, *sub.Status) - } + return sub, aws.StringValue(sub.Status), nil + } +} - return sub, *sub.Status, nil +func waitForRdsEventSubscriptionDeletion(conn *rds.RDS, name string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{}, + Refresh: resourceAwsDbEventSubscriptionRefreshFunc(name, conn), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting } + + _, err := stateConf.WaitForState() + + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance.go index b58acad28a7..921f2a48374 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance.go @@ -4,15 +4,16 @@ import ( "fmt" "log" "regexp" + "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDbInstance() *schema.Resource { @@ -97,6 +98,29 @@ func resourceAwsDbInstance() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + mas := d.Get("max_allocated_storage").(int) + + newInt, err := strconv.Atoi(new) + + if err != nil { + return false + } + + oldInt, err := strconv.Atoi(old) + + if err != nil { + return false + } + + // Allocated is higher than the configuration + // and autoscaling is enabled + if oldInt > newInt && mas > newInt { + return true + } + + return false + }, }, "storage_type": { @@ -171,6 +195,17 @@ func resourceAwsDbInstance() *schema.Resource { ValidateFunc: validateOnceAWeekWindowFormat, }, + "max_allocated_storage": { + Type: schema.TypeInt, + Optional: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == "0" && new == fmt.Sprintf("%d", d.Get("allocated_storage").(int)) { + return true + } + return false + }, + }, + "multi_az": { Type: schema.TypeBool, Optional: true, @@ -240,7 +275,6 @@ func resourceAwsDbInstance() *schema.Resource { }, "bucket_prefix": { Type: schema.TypeString, - Required: false, Optional: true, ForceNew: true, }, @@ -329,7 +363,6 @@ func resourceAwsDbInstance() *schema.Resource { "snapshot_identifier": { Type: schema.TypeString, - Computed: false, Optional: true, ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -343,7 +376,6 @@ func resourceAwsDbInstance() *schema.Resource { "allow_major_version_upgrade": { Type: schema.TypeBool, - Computed: false, Optional: true, }, @@ -424,6 +456,25 @@ func resourceAwsDbInstance() *schema.Resource { Optional: true, }, + "performance_insights_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "performance_insights_kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateArn, + }, + + "performance_insights_retention_period": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "tags": tagsSchema(), }, } @@ -490,6 +541,12 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error opts.AvailabilityZone = aws.String(attr.(string)) } + if attr, ok := d.GetOk("allow_major_version_upgrade"); ok { + modifyDbInstanceInput.AllowMajorVersionUpgrade = aws.Bool(attr.(bool)) + // Having allowing_major_version_upgrade by itself should not trigger ModifyDBInstance + // InvalidParameterCombination: No modifications were requested + } + if attr, ok := d.GetOk("backup_retention_period"); ok { modifyDbInstanceInput.BackupRetentionPeriod = aws.Int64(int64(attr.(int))) requiresModifyDbInstance = true @@ -528,6 +585,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error requiresModifyDbInstance = true } + if attr, ok := d.GetOk("max_allocated_storage"); ok { + modifyDbInstanceInput.MaxAllocatedStorage = aws.Int64(int64(attr.(int))) + requiresModifyDbInstance = true + } + if attr, ok := d.GetOk("monitoring_interval"); ok { opts.MonitoringInterval = aws.Int64(int64(attr.(int))) } @@ -573,6 +635,18 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error requiresModifyDbInstance = true } + if attr, ok := d.GetOk("performance_insights_enabled"); ok { + opts.EnablePerformanceInsights = aws.Bool(attr.(bool)) + } + + if attr, ok := d.GetOk("performance_insights_kms_key_id"); ok { + opts.PerformanceInsightsKMSKeyId = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("performance_insights_retention_period"); ok { + opts.PerformanceInsightsRetentionPeriod = aws.Int64(int64(attr.(int))) + } + log.Printf("[DEBUG] DB Instance Replica create configuration: %#v", opts) _, err := conn.CreateDBInstanceReadReplica(&opts) if err != nil { @@ -618,7 +692,6 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error if attr, ok := d.GetOk("multi_az"); ok { opts.MultiAZ = aws.Bool(attr.(bool)) - } if _, ok := d.GetOk("character_set_name"); ok { @@ -701,6 +774,18 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error opts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool)) } + if attr, ok := d.GetOk("performance_insights_enabled"); ok { + opts.EnablePerformanceInsights = aws.Bool(attr.(bool)) + } + + if attr, ok := d.GetOk("performance_insights_kms_key_id"); ok { + opts.PerformanceInsightsKMSKeyId = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("performance_insights_retention_period"); ok { + opts.PerformanceInsightsRetentionPeriod = aws.Int64(int64(attr.(int))) + } + log.Printf("[DEBUG] DB Instance S3 Restore configuration: %#v", opts) var err error // Retry for IAM eventual consistency @@ -724,6 +809,9 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.RestoreDBInstanceFromS3(&opts) + } if err != nil { return fmt.Errorf("Error creating DB Instance: %s", err) } @@ -783,6 +871,12 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error opts.AvailabilityZone = aws.String(attr.(string)) } + if attr, ok := d.GetOk("allow_major_version_upgrade"); ok { + modifyDbInstanceInput.AllowMajorVersionUpgrade = aws.Bool(attr.(bool)) + // Having allowing_major_version_upgrade by itself should not trigger ModifyDBInstance + // InvalidParameterCombination: No modifications were requested + } + if attr, ok := d.GetOkExists("backup_retention_period"); ok { modifyDbInstanceInput.BackupRetentionPeriod = aws.Int64(int64(attr.(int))) requiresModifyDbInstance = true @@ -836,6 +930,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error requiresModifyDbInstance = true } + if attr, ok := d.GetOk("max_allocated_storage"); ok { + modifyDbInstanceInput.MaxAllocatedStorage = aws.Int64(int64(attr.(int))) + requiresModifyDbInstance = true + } + if attr, ok := d.GetOk("monitoring_interval"); ok { modifyDbInstanceInput.MonitoringInterval = aws.Int64(int64(attr.(int))) requiresModifyDbInstance = true @@ -897,6 +996,19 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error requiresModifyDbInstance = true } + if attr, ok := d.GetOk("performance_insights_enabled"); ok { + modifyDbInstanceInput.EnablePerformanceInsights = aws.Bool(attr.(bool)) + requiresModifyDbInstance = true + + if attr, ok := d.GetOk("performance_insights_kms_key_id"); ok { + modifyDbInstanceInput.PerformanceInsightsKMSKeyId = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("performance_insights_retention_period"); ok { + modifyDbInstanceInput.PerformanceInsightsRetentionPeriod = aws.Int64(int64(attr.(int))) + } + } + log.Printf("[DEBUG] DB Instance restore from snapshot configuration: %s", opts) _, err := conn.RestoreDBInstanceFromDBSnapshot(&opts) @@ -974,6 +1086,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error if attr, ok := d.GetOk("license_model"); ok { opts.LicenseModel = aws.String(attr.(string)) } + + if attr, ok := d.GetOk("max_allocated_storage"); ok { + opts.MaxAllocatedStorage = aws.Int64(int64(attr.(int))) + } + if attr, ok := d.GetOk("parameter_group_name"); ok { opts.DBParameterGroupName = aws.String(attr.(string)) } @@ -1045,6 +1162,18 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error opts.DomainIAMRoleName = aws.String(attr.(string)) } + if attr, ok := d.GetOk("performance_insights_enabled"); ok { + opts.EnablePerformanceInsights = aws.Bool(attr.(bool)) + } + + if attr, ok := d.GetOk("performance_insights_kms_key_id"); ok { + opts.PerformanceInsightsKMSKeyId = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("performance_insights_retention_period"); ok { + opts.PerformanceInsightsRetentionPeriod = aws.Int64(int64(attr.(int))) + } + log.Printf("[DEBUG] DB Instance create configuration: %#v", opts) var err error err = resource.Retry(5*time.Minute, func() *resource.RetryError { @@ -1057,12 +1186,15 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.CreateDBInstance(&opts) + } if err != nil { if isAWSErr(err, "InvalidParameterValue", "") { + opts.MasterUserPassword = aws.String("********") return fmt.Errorf("Error creating DB Instance: %s, %+v", err, opts) } return fmt.Errorf("Error creating DB Instance: %s", err) - } } @@ -1149,11 +1281,15 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { d.Set("backup_window", v.PreferredBackupWindow) d.Set("license_model", v.LicenseModel) d.Set("maintenance_window", v.PreferredMaintenanceWindow) + d.Set("max_allocated_storage", v.MaxAllocatedStorage) d.Set("publicly_accessible", v.PubliclyAccessible) d.Set("multi_az", v.MultiAZ) d.Set("kms_key_id", v.KmsKeyId) d.Set("port", v.DbInstancePort) d.Set("iam_database_authentication_enabled", v.IAMDatabaseAuthenticationEnabled) + d.Set("performance_insights_enabled", v.PerformanceInsightsEnabled) + d.Set("performance_insights_kms_key_id", v.PerformanceInsightsKMSKeyId) + d.Set("performance_insights_retention_period", v.PerformanceInsightsRetentionPeriod) if v.DBSubnetGroup != nil { d.Set("db_subnet_group_name", v.DBSubnetGroup.DBSubnetGroupName) } @@ -1184,13 +1320,8 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { d.Set("option_group_name", v.OptionGroupMemberships[0].OptionGroupName) } - if v.MonitoringInterval != nil { - d.Set("monitoring_interval", v.MonitoringInterval) - } - - if v.MonitoringRoleArn != nil { - d.Set("monitoring_role_arn", v.MonitoringRoleArn) - } + d.Set("monitoring_interval", v.MonitoringInterval) + d.Set("monitoring_role_arn", v.MonitoringRoleArn) if err := d.Set("enabled_cloudwatch_logs_exports", flattenStringList(v.EnabledCloudwatchLogsExports)); err != nil { return fmt.Errorf("error setting enabled_cloudwatch_logs_exports: %s", err) @@ -1214,7 +1345,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("Error retrieving tags for ARN: %s", arn) + return fmt.Errorf("Error retrieving tags for ARN (%s): %s", arn, err) } var dt []*rds.Tag @@ -1340,7 +1471,8 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error if d.HasChange("allow_major_version_upgrade") { d.SetPartial("allow_major_version_upgrade") req.AllowMajorVersionUpgrade = aws.Bool(d.Get("allow_major_version_upgrade").(bool)) - requestUpdate = true + // Having allowing_major_version_upgrade by itself should not trigger ModifyDBInstance + // as it results in InvalidParameterCombination: No modifications were requested } if d.HasChange("backup_retention_period") { d.SetPartial("backup_retention_period") @@ -1383,6 +1515,20 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) requestUpdate = true } + if d.HasChange("max_allocated_storage") { + d.SetPartial("max_allocated_storage") + mas := d.Get("max_allocated_storage").(int) + + // The API expects the max allocated storage value to be set to the allocated storage + // value when disabling autoscaling. This check ensures that value is set correctly + // if the update to the Terraform configuration was removing the argument completely. + if mas == 0 { + mas = d.Get("allocated_storage").(int) + } + + req.MaxAllocatedStorage = aws.Int64(int64(mas)) + requestUpdate = true + } if d.HasChange("password") { d.SetPartial("password") req.MasterUserPassword = aws.String(d.Get("password").(string)) @@ -1475,10 +1621,46 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error requestUpdate = true } + if d.HasChange("performance_insights_enabled") || d.HasChange("performance_insights_kms_key_id") || d.HasChange("performance_insights_retention_period") { + d.SetPartial("performance_insights_enabled") + req.EnablePerformanceInsights = aws.Bool(d.Get("performance_insights_enabled").(bool)) + + if v, ok := d.GetOk("performance_insights_kms_key_id"); ok { + d.SetPartial("performance_insights_kms_key_id") + req.PerformanceInsightsKMSKeyId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("performance_insights_retention_period"); ok { + d.SetPartial("performance_insights_retention_period") + req.PerformanceInsightsRetentionPeriod = aws.Int64(int64(v.(int))) + } + + requestUpdate = true + } + log.Printf("[DEBUG] Send DB Instance Modification request: %t", requestUpdate) if requestUpdate { log.Printf("[DEBUG] DB Instance Modification request: %s", req) - _, err := conn.ModifyDBInstance(req) + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + _, err := conn.ModifyDBInstance(req) + + // Retry for IAM eventual consistency + if isAWSErr(err, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + _, err = conn.ModifyDBInstance(req) + } + if err != nil { return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance_role_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance_role_association.go index bbd257ac053..2c11c02e935 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance_role_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance_role_association.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // Constants not currently provided by the AWS Go SDK diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_option_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_option_group.go index af960547592..8e394954788 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_option_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_option_group.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDbOptionGroup() *schema.Resource { @@ -281,7 +281,9 @@ func resourceAwsDbOptionGroupUpdate(d *schema.ResourceData, meta interface{}) er } return nil }) - + if isResourceTimeoutError(err) { + _, err = rdsconn.ModifyOptionGroup(modifyOpts) + } if err != nil { return fmt.Errorf("Error modifying DB Option Group: %s", err) } @@ -306,7 +308,7 @@ func resourceAwsDbOptionGroupDelete(d *schema.ResourceData, meta interface{}) er } log.Printf("[DEBUG] Delete DB Option Group: %#v", deleteOpts) - ret := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { _, err := rdsconn.DeleteOptionGroup(deleteOpts) if err != nil { if isAWSErr(err, rds.ErrCodeInvalidOptionGroupStateFault, "") { @@ -317,8 +319,11 @@ func resourceAwsDbOptionGroupDelete(d *schema.ResourceData, meta interface{}) er } return nil }) - if ret != nil { - return fmt.Errorf("Error Deleting DB Option Group: %s", ret) + if isResourceTimeoutError(err) { + _, err = rdsconn.DeleteOptionGroup(deleteOpts) + } + if err != nil { + return fmt.Errorf("Error Deleting DB Option Group: %s", err) } return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_parameter_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_parameter_group.go index e95d0b4d4ed..abbf58ebe2a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_parameter_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_parameter_group.go @@ -7,12 +7,11 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/rds" ) @@ -310,23 +309,26 @@ func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{}) func resourceAwsDbParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn - return resource.Retry(3*time.Minute, func() *resource.RetryError { - deleteOpts := rds.DeleteDBParameterGroupInput{ - DBParameterGroupName: aws.String(d.Id()), - } - + deleteOpts := rds.DeleteDBParameterGroupInput{ + DBParameterGroupName: aws.String(d.Id()), + } + err := resource.Retry(3*time.Minute, func() *resource.RetryError { _, err := conn.DeleteDBParameterGroup(&deleteOpts) if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "DBParameterGroupNotFoundFault" { - return resource.RetryableError(err) - } - if ok && awsErr.Code() == "InvalidDBParameterGroupState" { + if isAWSErr(err, "DBParameterGroupNotFoundFault", "") || isAWSErr(err, "InvalidDBParameterGroupState", "") { return resource.RetryableError(err) } + return resource.NonRetryableError(err) } - return resource.NonRetryableError(err) + return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteDBParameterGroup(&deleteOpts) + } + if err != nil { + return fmt.Errorf("Error deleting DB parameter group: %s", err) + } + return nil } func resourceAwsDbParameterHash(v interface{}) int { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_security_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_security_group.go index 04d4ef1f9f8..8d3e51183ae 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_security_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_security_group.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDbSecurityGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_snapshot.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_snapshot.go index 5bf5844513e..3fc9746e1f9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_snapshot.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_snapshot.go @@ -6,10 +6,9 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDbSnapshot() *schema.Resource { @@ -111,16 +110,17 @@ func resourceAwsDbSnapshot() *schema.Resource { func resourceAwsDbSnapshotCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + dBInstanceIdentifier := d.Get("db_instance_identifier").(string) params := &rds.CreateDBSnapshotInput{ - DBInstanceIdentifier: aws.String(d.Get("db_instance_identifier").(string)), + DBInstanceIdentifier: aws.String(dBInstanceIdentifier), DBSnapshotIdentifier: aws.String(d.Get("db_snapshot_identifier").(string)), Tags: tags, } _, err := conn.CreateDBSnapshot(params) if err != nil { - return err + return fmt.Errorf("Error creating AWS DB Snapshot %s: %s", dBInstanceIdentifier, err) } d.SetId(d.Get("db_snapshot_identifier").(string)) @@ -149,8 +149,15 @@ func resourceAwsDbSnapshotRead(d *schema.ResourceData, meta interface{}) error { DBSnapshotIdentifier: aws.String(d.Id()), } resp, err := conn.DescribeDBSnapshots(params) + + if isAWSErr(err, rds.ErrCodeDBSnapshotNotFoundFault, "") { + log.Printf("[WARN] AWS DB Snapshot (%s) is already gone", d.Id()) + d.SetId("") + return nil + } + if err != nil { - return err + return fmt.Errorf("Error describing AWS DB Snapshot %s: %s", d.Id(), err) } snapshot := resp.DBSnapshots[0] @@ -185,7 +192,15 @@ func resourceAwsDbSnapshotDelete(d *schema.ResourceData, meta interface{}) error DBSnapshotIdentifier: aws.String(d.Id()), } _, err := conn.DeleteDBSnapshot(params) - return err + if isAWSErr(err, rds.ErrCodeDBSnapshotNotFoundFault, "") { + return nil + } + + if err != nil { + return fmt.Errorf("Error deleting AWS DB Snapshot %s: %s", d.Id(), err) + } + + return nil } func resourceAwsDbSnapshotUpdate(d *schema.ResourceData, meta interface{}) error { @@ -208,9 +223,9 @@ func resourceAwsDbSnapshotUpdate(d *schema.ResourceData, meta interface{}) error TagKeys: removeTagKeys, } - log.Printf("[DEBUG] Untagging RDS Cluster: %s", input) + log.Printf("[DEBUG] Untagging DB Snapshot: %s", input) if _, err := conn.RemoveTagsFromResource(input); err != nil { - return fmt.Errorf("error untagging RDS Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("error untagging DB Snapshot (%s): %s", d.Id(), err) } } @@ -220,9 +235,9 @@ func resourceAwsDbSnapshotUpdate(d *schema.ResourceData, meta interface{}) error Tags: createTags, } - log.Printf("[DEBUG] Tagging RDS Cluster: %s", input) + log.Printf("[DEBUG] Tagging DB Snapshot: %s", input) if _, err := conn.AddTagsToResource(input); err != nil { - return fmt.Errorf("error tagging RDS Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("error tagging DB Snapshot (%s): %s", d.Id(), err) } } } @@ -242,11 +257,10 @@ func resourceAwsDbSnapshotStateRefreshFunc( log.Printf("[DEBUG] DB Snapshot describe configuration: %#v", opts) resp, err := conn.DescribeDBSnapshots(opts) + if isAWSErr(err, rds.ErrCodeDBSnapshotNotFoundFault, "") { + return nil, "", nil + } if err != nil { - snapshoterr, ok := err.(awserr.Error) - if ok && snapshoterr.Code() == "DBSnapshotNotFound" { - return nil, "", nil - } return nil, "", fmt.Errorf("Error retrieving DB Snapshots: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_subnet_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_subnet_group.go index fef3518e996..3e42cc9d349 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_subnet_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_subnet_group.go @@ -10,8 +10,8 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDbSubnetGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_network_acl.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_network_acl.go index e0a143ebdcb..e96ea6f58ed 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_network_acl.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_network_acl.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // ACL Network ACLs all contain explicit deny-all rules that cannot be @@ -35,7 +35,6 @@ func resourceAwsDefaultNetworkAcl() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Computed: false, }, // We want explicit management of Subnets here, so we do not allow them to be // computed. Instead, an empty config will enforce just that; removal of the @@ -54,7 +53,6 @@ func resourceAwsDefaultNetworkAcl() *schema.Resource { // rules "ingress": { Type: schema.TypeSet, - Required: false, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -100,7 +98,6 @@ func resourceAwsDefaultNetworkAcl() *schema.Resource { }, "egress": { Type: schema.TypeSet, - Required: false, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_route_table.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_route_table.go index 19e24e3c5db..088ca40fa4c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_route_table.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_route_table.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDefaultRouteTable() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_security_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_security_group.go index 0b3af038a6c..3da1cf4f19b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_security_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_security_group.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDefaultSecurityGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_subnet.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_subnet.go index 443ed252af7..dfa74f83fb7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_subnet.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_subnet.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDefaultSubnet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc.go index 345325fb83c..4cda17641bd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDefaultVpc() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc_dhcp_options.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc_dhcp_options.go index ecc0bd87ca6..ea7cf94bb46 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc_dhcp_options.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc_dhcp_options.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDefaultVpcDhcpOptions() *schema.Resource { @@ -65,16 +65,28 @@ func resourceAwsDefaultVpcDhcpOptionsCreate(d *schema.ResourceData, meta interfa }, } - resp, err := conn.DescribeDhcpOptions(req) + var dhcpOptions []*ec2.DhcpOptions + err := conn.DescribeDhcpOptionsPages(req, func(page *ec2.DescribeDhcpOptionsOutput, lastPage bool) bool { + dhcpOptions = append(dhcpOptions, page.DhcpOptions...) + return !lastPage + }) + if err != nil { - return err + return fmt.Errorf("Error describing DHCP options: %s", err) } - if len(resp.DhcpOptions) != 1 || resp.DhcpOptions[0] == nil { + if len(dhcpOptions) == 0 { return fmt.Errorf("Default DHCP Options Set not found") } - d.SetId(aws.StringValue(resp.DhcpOptions[0].DhcpOptionsId)) + if len(dhcpOptions) > 1 { + return fmt.Errorf("Multiple default DHCP Options Sets found") + } + + if dhcpOptions[0] == nil { + return fmt.Errorf("Default DHCP Options Set is empty") + } + d.SetId(aws.StringValue(dhcpOptions[0].DhcpOptionsId)) return resourceAwsVpcDhcpOptionsUpdate(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_devicefarm_project.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_devicefarm_project.go index f35e3fe2bbf..9b62ad28b41 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_devicefarm_project.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_devicefarm_project.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/devicefarm" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDevicefarmProject() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_conditional_forwarder.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_conditional_forwarder.go index b04e82ba881..66c4350c946 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_conditional_forwarder.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_conditional_forwarder.go @@ -6,8 +6,8 @@ import ( "regexp" "strings" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directoryservice" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_directory.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_directory.go index 986d2d17198..f0cc682dac0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_directory.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_directory.go @@ -5,12 +5,12 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDirectoryServiceDirectory() *schema.Resource { @@ -215,6 +215,7 @@ func createDirectoryConnector(dsconn *directoryservice.DirectoryService, d *sche input := directoryservice.ConnectDirectoryInput{ Name: aws.String(d.Get("name").(string)), Password: aws.String(d.Get("password").(string)), + Tags: tagsFromMapDS(d.Get("tags").(map[string]interface{})), } if v, ok := d.GetOk("description"); ok { @@ -249,6 +250,7 @@ func createSimpleDirectoryService(dsconn *directoryservice.DirectoryService, d * input := directoryservice.CreateDirectoryInput{ Name: aws.String(d.Get("name").(string)), Password: aws.String(d.Get("password").(string)), + Tags: tagsFromMapDS(d.Get("tags").(map[string]interface{})), } if v, ok := d.GetOk("description"); ok { @@ -283,6 +285,7 @@ func createActiveDirectoryService(dsconn *directoryservice.DirectoryService, d * input := directoryservice.CreateMicrosoftADInput{ Name: aws.String(d.Get("name").(string)), Password: aws.String(d.Get("password").(string)), + Tags: tagsFromMapDS(d.Get("tags").(map[string]interface{})), } if v, ok := d.GetOk("description"); ok { @@ -310,6 +313,28 @@ func createActiveDirectoryService(dsconn *directoryservice.DirectoryService, d * return *out.DirectoryId, nil } +func enableDirectoryServiceSso(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) error { + d.SetPartial("enable_sso") + + if v, ok := d.GetOk("enable_sso"); ok && v.(bool) { + log.Printf("[DEBUG] Enabling SSO for DS directory %q", d.Id()) + if _, err := dsconn.EnableSso(&directoryservice.EnableSsoInput{ + DirectoryId: aws.String(d.Id()), + }); err != nil { + return fmt.Errorf("Error Enabling SSO for DS directory %s: %s", d.Id(), err) + } + } else { + log.Printf("[DEBUG] Disabling SSO for DS directory %q", d.Id()) + if _, err := dsconn.DisableSso(&directoryservice.DisableSsoInput{ + DirectoryId: aws.String(d.Id()), + }); err != nil { + return fmt.Errorf("Error Disabling SSO for DS directory %s: %s", d.Id(), err) + } + } + + return nil +} + func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error { dsconn := meta.(*AWSClient).dsconn @@ -380,29 +405,20 @@ func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta int *out.Alias, *out.DirectoryId) } - return resourceAwsDirectoryServiceDirectoryUpdate(d, meta) + if d.HasChange("enable_sso") { + if err := enableDirectoryServiceSso(dsconn, d); err != nil { + return err + } + } + + return resourceAwsDirectoryServiceDirectoryRead(d, meta) } func resourceAwsDirectoryServiceDirectoryUpdate(d *schema.ResourceData, meta interface{}) error { dsconn := meta.(*AWSClient).dsconn if d.HasChange("enable_sso") { - d.SetPartial("enable_sso") - var err error - - if v, ok := d.GetOk("enable_sso"); ok && v.(bool) { - log.Printf("[DEBUG] Enabling SSO for DS directory %q", d.Id()) - _, err = dsconn.EnableSso(&directoryservice.EnableSsoInput{ - DirectoryId: aws.String(d.Id()), - }) - } else { - log.Printf("[DEBUG] Disabling SSO for DS directory %q", d.Id()) - _, err = dsconn.DisableSso(&directoryservice.DisableSsoInput{ - DirectoryId: aws.String(d.Id()), - }) - } - - if err != nil { + if err := enableDirectoryServiceSso(dsconn, d); err != nil { return err } } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_log_subscription.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_log_subscription.go new file mode 100644 index 00000000000..b000cd28d64 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_log_subscription.go @@ -0,0 +1,99 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsDirectoryServiceLogSubscription() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDirectoryServiceLogSubscriptionCreate, + Read: resourceAwsDirectoryServiceLogSubscriptionRead, + Delete: resourceAwsDirectoryServiceLogSubscriptionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "directory_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "log_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsDirectoryServiceLogSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { + dsconn := meta.(*AWSClient).dsconn + + directoryId := d.Get("directory_id") + logGroupName := d.Get("log_group_name") + + input := directoryservice.CreateLogSubscriptionInput{ + DirectoryId: aws.String(directoryId.(string)), + LogGroupName: aws.String(logGroupName.(string)), + } + + _, err := dsconn.CreateLogSubscription(&input) + if err != nil { + return fmt.Errorf("error creating Directory Service Log Subscription: %s", err) + } + + d.SetId(directoryId.(string)) + + return resourceAwsDirectoryServiceLogSubscriptionRead(d, meta) +} + +func resourceAwsDirectoryServiceLogSubscriptionRead(d *schema.ResourceData, meta interface{}) error { + dsconn := meta.(*AWSClient).dsconn + + directoryId := d.Id() + + input := directoryservice.ListLogSubscriptionsInput{ + DirectoryId: aws.String(directoryId), + } + + out, err := dsconn.ListLogSubscriptions(&input) + if err != nil { + return fmt.Errorf("error listing Directory Service Log Subscription: %s", err) + } + + if len(out.LogSubscriptions) == 0 { + log.Printf("[WARN] No log subscriptions for directory %s found", directoryId) + d.SetId("") + return nil + } + + logSubscription := out.LogSubscriptions[0] + d.Set("directory_id", logSubscription.DirectoryId) + d.Set("log_group_name", logSubscription.LogGroupName) + + return nil +} + +func resourceAwsDirectoryServiceLogSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { + dsconn := meta.(*AWSClient).dsconn + + directoryId := d.Id() + + input := directoryservice.DeleteLogSubscriptionInput{ + DirectoryId: aws.String(directoryId), + } + + _, err := dsconn.DeleteLogSubscription(&input) + if err != nil { + return fmt.Errorf("error deleting Directory Service Log Subscription: %s", err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dlm_lifecycle_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dlm_lifecycle_policy.go index eb03b65c457..0dbfd6cf052 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dlm_lifecycle_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dlm_lifecycle_policy.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dlm" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDlmLifecyclePolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_certificate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_certificate.go index a7835a74224..f183ca60c7c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_certificate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_certificate.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDmsCertificate() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_endpoint.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_endpoint.go index 0a43316fcb1..df27a518dec 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_endpoint.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_endpoint.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDmsEndpoint() *schema.Resource { @@ -65,6 +65,7 @@ func resourceAwsDmsEndpoint() *schema.Resource { "aurora", "aurora-postgresql", "azuredb", + "db2", "docdb", "dynamodb", "mariadb", @@ -138,17 +139,17 @@ func resourceAwsDmsEndpoint() *schema.Resource { "auth_type": { Type: schema.TypeString, Optional: true, - Default: "PASSWORD", + Default: dms.AuthTypeValuePassword, }, "auth_mechanism": { Type: schema.TypeString, Optional: true, - Default: "DEFAULT", + Default: dms.AuthMechanismValueDefault, }, "nesting_level": { Type: schema.TypeString, Optional: true, - Default: "NONE", + Default: dms.NestingLevelValueNone, }, "extract_doc_id": { Type: schema.TypeString, @@ -298,21 +299,22 @@ func resourceAwsDmsEndpointCreate(d *schema.ResourceData, meta interface{}) erro log.Println("[DEBUG] DMS create endpoint:", request) err := resource.Retry(5*time.Minute, func() *resource.RetryError { - if _, err := conn.CreateEndpoint(request); err != nil { - if awserr, ok := err.(awserr.Error); ok { - switch awserr.Code() { - case "AccessDeniedFault": - return resource.RetryableError(awserr) - } - } - // Didn't recognize the error, so shouldn't retry. + _, err := conn.CreateEndpoint(request) + if isAWSErr(err, "AccessDeniedFault", "") { + return resource.RetryableError(err) + } + if err != nil { return resource.NonRetryableError(err) } + // Successful delete return nil }) + if isResourceTimeoutError(err) { + _, err = conn.CreateEndpoint(request) + } if err != nil { - return err + return fmt.Errorf("Error creating DMS endpoint: %s", err) } d.SetId(d.Get("endpoint_id").(string)) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_instance.go index 9277b01710f..48da9f65274 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_instance.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDmsReplicationInstance() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_subnet_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_subnet_group.go index 65dd8b7fba6..c241a4a42e4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_subnet_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_subnet_group.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDmsReplicationSubnetGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_task.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_task.go index 8a0455961a4..1ae3b20e379 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_task.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_task.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDmsReplicationTask() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster.go index b4d09f7eed2..283775b3001 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/docdb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDocDBCluster() *schema.Resource { @@ -226,7 +226,6 @@ func resourceAwsDocDBCluster() *schema.Resource { "enabled_cloudwatch_logs_exports": { Type: schema.TypeList, - Computed: false, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, @@ -340,6 +339,9 @@ func resourceAwsDocDBClusterCreate(d *schema.ResourceData, meta interface{}) err } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.RestoreDBClusterFromSnapshot(&opts) + } if err != nil { return fmt.Errorf("Error creating DocDB Cluster: %s", err) } @@ -421,6 +423,9 @@ func resourceAwsDocDBClusterCreate(d *schema.ResourceData, meta interface{}) err } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateDBCluster(createOpts) + } if err != nil { return fmt.Errorf("error creating DocDB cluster: %s", err) } @@ -636,6 +641,9 @@ func resourceAwsDocDBClusterUpdate(d *schema.ResourceData, meta interface{}) err } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.ModifyDBCluster(req) + } if err != nil { return fmt.Errorf("Failed to modify DocDB Cluster (%s): %s", d.Id(), err) } @@ -692,6 +700,9 @@ func resourceAwsDocDBClusterDelete(d *schema.ResourceData, meta interface{}) err } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteDBCluster(&deleteOpts) + } if err != nil { return fmt.Errorf("DocDB Cluster cannot be deleted: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_instance.go index 997bbed69c7..2354ab89780 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_instance.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/docdb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDocDBClusterInstance() *schema.Resource { @@ -213,6 +213,9 @@ func resourceAwsDocDBClusterInstanceCreate(d *schema.ResourceData, meta interfac } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateDBInstance(createOpts) + } if err != nil { return fmt.Errorf("error creating DocDB Instance: %s", err) } @@ -356,6 +359,9 @@ func resourceAwsDocDBClusterInstanceUpdate(d *schema.ResourceData, meta interfac } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.ModifyDBInstance(req) + } if err != nil { return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_parameter_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_parameter_group.go index 2d1fce488ba..b0b9ae50edf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_parameter_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_parameter_group.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/docdb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) const docdbClusterParameterGroupMaxParamsBulkEdit = 20 @@ -266,7 +266,7 @@ func waitForDocDBClusterParameterGroupDeletion(conn *docdb.DocDB, name string) e DBClusterParameterGroupName: aws.String(name), } - return resource.Retry(10*time.Minute, func() *resource.RetryError { + err := resource.Retry(10*time.Minute, func() *resource.RetryError { _, err := conn.DescribeDBClusterParameterGroups(params) if isAWSErr(err, docdb.ErrCodeDBParameterGroupNotFoundFault, "") { @@ -279,4 +279,14 @@ func waitForDocDBClusterParameterGroupDeletion(conn *docdb.DocDB, name string) e return resource.RetryableError(fmt.Errorf("DocDB Parameter Group (%s) still exists", name)) }) + if isResourceTimeoutError(err) { + _, err = conn.DescribeDBClusterParameterGroups(params) + if isAWSErr(err, docdb.ErrCodeDBParameterGroupNotFoundFault, "") { + return nil + } + } + if err != nil { + return fmt.Errorf("Error deleting DocDB cluster parameter group: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_snapshot.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_snapshot.go index ff94c0f3668..73f84036142 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_snapshot.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_cluster_snapshot.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/docdb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDocDBClusterSnapshot() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_subnet_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_subnet_group.go index 4f34f9aa217..97b586f91c2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_subnet_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_docdb_subnet_group.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/docdb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDocDBSubnetGroup() *schema.Resource { @@ -140,7 +140,7 @@ func resourceAwsDocDBSubnetGroupRead(d *schema.ResourceData, meta interface{}) e }) if err != nil { - return fmt.Errorf("error retrieving tags for ARN: %s", aws.StringValue(subnetGroup.DBSubnetGroupArn)) + return fmt.Errorf("error retrieving tags for ARN (%s): %s", aws.StringValue(subnetGroup.DBSubnetGroupArn), err) } if err := d.Set("tags", tagsToMapDocDB(resp.TagList)); err != nil { @@ -203,7 +203,7 @@ func waitForDocDBSubnetGroupDeletion(conn *docdb.DocDB, name string) error { DBSubnetGroupName: aws.String(name), } - return resource.Retry(10*time.Minute, func() *resource.RetryError { + err := resource.Retry(10*time.Minute, func() *resource.RetryError { _, err := conn.DescribeDBSubnetGroups(params) if isAWSErr(err, docdb.ErrCodeDBSubnetGroupNotFoundFault, "") { @@ -216,4 +216,14 @@ func waitForDocDBSubnetGroupDeletion(conn *docdb.DocDB, name string) error { return resource.RetryableError(fmt.Errorf("DocDB Subnet Group (%s) still exists", name)) }) + if isResourceTimeoutError(err) { + _, err = conn.DescribeDBSubnetGroups(params) + if isAWSErr(err, docdb.ErrCodeDBSubnetGroupNotFoundFault, "") { + return nil + } + } + if err != nil { + return fmt.Errorf("Error deleting DocDB subnet group: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_bgp_peer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_bgp_peer.go index 49d4941c0a2..1a4bd4f9d52 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_bgp_peer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_bgp_peer.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDxBgpPeer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_connection.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_connection.go index 6228dced1c7..fe8ba8d0a7b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_connection.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_connection.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDxConnection() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_connection_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_connection_association.go index 3d6a9cb1a42..86cd966d8f7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_connection_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_connection_association.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDxConnectionAssociation() *schema.Resource { @@ -81,7 +81,7 @@ func resourceAwsDxConnectionAssociationDelete(d *schema.ResourceData, meta inter LagId: aws.String(d.Get("lag_id").(string)), } - return resource.Retry(1*time.Minute, func() *resource.RetryError { + err := resource.Retry(1*time.Minute, func() *resource.RetryError { _, err := conn.DisassociateConnectionFromLag(input) if err != nil { if isAWSErr(err, directconnect.ErrCodeClientException, "is in a transitioning state.") { @@ -91,4 +91,10 @@ func resourceAwsDxConnectionAssociationDelete(d *schema.ResourceData, meta inter } return nil }) + + if isResourceTimeoutError(err) { + _, err = conn.DisassociateConnectionFromLag(input) + } + + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway.go index b0693639560..1fa39907029 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDxGateway() *schema.Resource { @@ -125,6 +125,28 @@ func resourceAwsDxGatewayDelete(d *schema.ResourceData, meta interface{}) error return nil } +func resourceAwsDxGatewayImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + conn := meta.(*AWSClient).dxconn + + resp, err := conn.DescribeDirectConnectGatewayAssociations(&directconnect.DescribeDirectConnectGatewayAssociationsInput{ + DirectConnectGatewayId: aws.String(d.Id()), + }) + if err != nil { + return nil, fmt.Errorf("error reading Direct Connect gateway association: %s", err) + } + + results := []*schema.ResourceData{d} + for _, assoc := range resp.DirectConnectGatewayAssociations { + d := resourceAwsDxGatewayAssociation().Data(nil) + d.SetType("aws_dx_gateway_association") + d.SetId(dxGatewayAssociationId(aws.StringValue(assoc.DirectConnectGatewayId), aws.StringValue(assoc.AssociatedGateway.Id))) + d.Set("dx_gateway_association_id", assoc.AssociationId) + results = append(results, d) + } + + return results, nil +} + func dxGatewayStateRefresh(conn *directconnect.DirectConnect, dxgwId string) resource.StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeDirectConnectGateways(&directconnect.DescribeDirectConnectGatewaysInput{ diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association.go index 36fa5f4b3e6..3c724d4d9b1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association.go @@ -1,6 +1,7 @@ package aws import ( + "errors" "fmt" "log" "strings" @@ -8,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) const ( @@ -26,6 +27,15 @@ func resourceAwsDxGatewayAssociation() *schema.Resource { State: resourceAwsDxGatewayAssociationImport, }, + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceAwsDxGatewayAssociationResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: resourceAwsDxGatewayAssociationStateUpgradeV0, + Version: 0, + }, + }, + Schema: map[string]*schema.Schema{ "allowed_prefixes": { Type: schema.TypeSet, @@ -34,22 +44,58 @@ func resourceAwsDxGatewayAssociation() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, - "dx_gateway_id": { + "associated_gateway_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_owner_account_id", "proposal_id", "vpn_gateway_id"}, + }, + + "associated_gateway_owner_account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + ConflictsWith: []string{"associated_gateway_id", "vpn_gateway_id"}, + }, + + "associated_gateway_type": { Type: schema.TypeString, - Required: true, - ForceNew: true, + Computed: true, }, - "vpn_gateway_id": { + "dx_gateway_association_id": { + Type: schema.TypeString, + Computed: true, + }, + + "dx_gateway_id": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "dx_gateway_association_id": { + "dx_gateway_owner_account_id": { Type: schema.TypeString, Computed: true, }, + + "proposal_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_id", "vpn_gateway_id"}, + }, + + "vpn_gateway_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_id", "associated_gateway_owner_account_id", "proposal_id"}, + Deprecated: "use 'associated_gateway_id' argument instead", + }, }, Timeouts: &schema.ResourceTimeout{ @@ -64,31 +110,65 @@ func resourceAwsDxGatewayAssociationCreate(d *schema.ResourceData, meta interfac conn := meta.(*AWSClient).dxconn dxgwId := d.Get("dx_gateway_id").(string) - vgwId := d.Get("vpn_gateway_id").(string) - req := &directconnect.CreateDirectConnectGatewayAssociationInput{ - AddAllowedPrefixesToDirectConnectGateway: expandDxRouteFilterPrefixes(d.Get("allowed_prefixes").(*schema.Set)), - DirectConnectGatewayId: aws.String(dxgwId), - VirtualGatewayId: aws.String(vgwId), + gwIdRaw, gwIdOk := d.GetOk("associated_gateway_id") + vgwIdRaw, vgwIdOk := d.GetOk("vpn_gateway_id") + gwAcctIdRaw, gwAcctIdOk := d.GetOk("associated_gateway_owner_account_id") + proposalIdRaw, proposalIdOk := d.GetOk("proposal_id") + + if gwAcctIdOk || proposalIdOk { + // Cross-account association. + if !(gwAcctIdOk && proposalIdOk) { + return fmt.Errorf("associated_gateway_owner_account_id and proposal_id must be configured") + } + } else if !(gwIdOk || vgwIdOk) { + return fmt.Errorf("either associated_gateway_owner_account_id and proposal_id or one of associated_gateway_id or vpn_gateway_id must be configured") } - log.Printf("[DEBUG] Creating Direct Connect gateway association: %#v", req) - _, err := conn.CreateDirectConnectGatewayAssociation(req) - if err != nil { - return fmt.Errorf("error creating Direct Connect gateway association: %s", err) - } + associationId := "" + if gwAcctIdOk { + req := &directconnect.AcceptDirectConnectGatewayAssociationProposalInput{ + AssociatedGatewayOwnerAccount: aws.String(gwAcctIdRaw.(string)), + DirectConnectGatewayId: aws.String(dxgwId), + OverrideAllowedPrefixesToDirectConnectGateway: expandDxRouteFilterPrefixes(d.Get("allowed_prefixes").(*schema.Set)), + ProposalId: aws.String(proposalIdRaw.(string)), + } - d.SetId(dxGatewayAssociationId(dxgwId, vgwId)) + log.Printf("[DEBUG] Accepting Direct Connect gateway association proposal: %#v", req) + resp, err := conn.AcceptDirectConnectGatewayAssociationProposal(req) + if err != nil { + return fmt.Errorf("error accepting Direct Connect gateway association proposal: %s", err) + } - stateConf := &resource.StateChangeConf{ - Pending: []string{directconnect.GatewayAssociationStateAssociating}, - Target: []string{directconnect.GatewayAssociationStateAssociated}, - Refresh: dxGatewayAssociationStateRefresh(conn, dxgwId, vgwId), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 5 * time.Second, + // For historical reasons the resource ID isn't set to the association ID returned from the API. + associationId = aws.StringValue(resp.DirectConnectGatewayAssociation.AssociationId) + d.SetId(dxGatewayAssociationId(dxgwId, aws.StringValue(resp.DirectConnectGatewayAssociation.AssociatedGateway.Id))) + } else { + req := &directconnect.CreateDirectConnectGatewayAssociationInput{ + AddAllowedPrefixesToDirectConnectGateway: expandDxRouteFilterPrefixes(d.Get("allowed_prefixes").(*schema.Set)), + DirectConnectGatewayId: aws.String(dxgwId), + } + gwId := "" + if gwIdOk { + gwId = gwIdRaw.(string) + req.GatewayId = aws.String(gwId) + } else { + gwId = vgwIdRaw.(string) + req.VirtualGatewayId = aws.String(gwId) + } + + log.Printf("[DEBUG] Creating Direct Connect gateway association: %#v", req) + resp, err := conn.CreateDirectConnectGatewayAssociation(req) + if err != nil { + return fmt.Errorf("error creating Direct Connect gateway association: %s", err) + } + + // For historical reasons the resource ID isn't set to the association ID returned from the API. + associationId = aws.StringValue(resp.DirectConnectGatewayAssociation.AssociationId) + d.SetId(dxGatewayAssociationId(dxgwId, gwId)) } - _, err = stateConf.WaitForState() - if err != nil { + d.Set("dx_gateway_association_id", associationId) + + if err := waitForDirectConnectGatewayAssociationAvailabilityOnCreate(conn, associationId, d.Timeout(schema.TimeoutCreate)); err != nil { return fmt.Errorf("error waiting for Direct Connect gateway association (%s) to become available: %s", d.Id(), err) } @@ -98,11 +178,10 @@ func resourceAwsDxGatewayAssociationCreate(d *schema.ResourceData, meta interfac func resourceAwsDxGatewayAssociationRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).dxconn - dxgwId := d.Get("dx_gateway_id").(string) - vgwId := d.Get("vpn_gateway_id").(string) - assocRaw, state, err := dxGatewayAssociationStateRefresh(conn, dxgwId, vgwId)() + associationId := d.Get("dx_gateway_association_id").(string) + assocRaw, state, err := dxGatewayAssociationStateRefresh(conn, associationId)() if err != nil { - return fmt.Errorf("error reading Direct Connect gateway association: %s", err) + return fmt.Errorf("error reading Direct Connect gateway association (%s): %s", d.Id(), err) } if state == gatewayAssociationStateDeleted { log.Printf("[WARN] Direct Connect gateway association (%s) not found, removing from state", d.Id()) @@ -111,24 +190,38 @@ func resourceAwsDxGatewayAssociationRead(d *schema.ResourceData, meta interface{ } assoc := assocRaw.(*directconnect.GatewayAssociation) - d.Set("dx_gateway_id", assoc.DirectConnectGatewayId) - d.Set("vpn_gateway_id", assoc.VirtualGatewayId) - d.Set("dx_gateway_association_id", assoc.AssociationId) + err = d.Set("allowed_prefixes", flattenDxRouteFilterPrefixes(assoc.AllowedPrefixesToDirectConnectGateway)) if err != nil { return fmt.Errorf("error setting allowed_prefixes: %s", err) } + if _, ok := d.GetOk("vpn_gateway_id"); ok { + d.Set("vpn_gateway_id", assoc.VirtualGatewayId) + } else { + d.Set("associated_gateway_id", assoc.AssociatedGateway.Id) + } + d.Set("associated_gateway_owner_account_id", assoc.AssociatedGateway.OwnerAccount) + d.Set("associated_gateway_type", assoc.AssociatedGateway.Type) + d.Set("dx_gateway_association_id", assoc.AssociationId) + d.Set("dx_gateway_id", assoc.DirectConnectGatewayId) + d.Set("dx_gateway_owner_account_id", assoc.DirectConnectGatewayOwnerAccount) + return nil } func resourceAwsDxGatewayAssociationUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).dxconn - dxgwId := d.Get("dx_gateway_id").(string) - vgwId := d.Get("vpn_gateway_id").(string) + _, gwIdOk := d.GetOk("associated_gateway_id") + _, vgwIdOk := d.GetOk("vpn_gateway_id") + if !gwIdOk && !vgwIdOk { + return errors.New("one of associated_gateway_id or vpn_gateway_id must be configured") + } if d.HasChange("allowed_prefixes") { + associationId := d.Get("dx_gateway_association_id").(string) + oraw, nraw := d.GetChange("allowed_prefixes") o := oraw.(*schema.Set) n := nraw.(*schema.Set) @@ -137,26 +230,17 @@ func resourceAwsDxGatewayAssociationUpdate(d *schema.ResourceData, meta interfac req := &directconnect.UpdateDirectConnectGatewayAssociationInput{ AddAllowedPrefixesToDirectConnectGateway: expandDxRouteFilterPrefixes(add), - AssociationId: aws.String(d.Get("dx_gateway_association_id").(string)), + AssociationId: aws.String(associationId), RemoveAllowedPrefixesToDirectConnectGateway: expandDxRouteFilterPrefixes(del), } - log.Printf("[DEBUG] Direct Connect gateway association: %#v", req) + log.Printf("[DEBUG] Updating Direct Connect gateway association: %#v", req) _, err := conn.UpdateDirectConnectGatewayAssociation(req) if err != nil { return fmt.Errorf("error updating Direct Connect gateway association (%s): %s", d.Id(), err) } - stateConf := &resource.StateChangeConf{ - Pending: []string{directconnect.GatewayAssociationStateUpdating}, - Target: []string{directconnect.GatewayAssociationStateAssociated}, - Refresh: dxGatewayAssociationStateRefresh(conn, dxgwId, vgwId), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 10 * time.Second, - MinTimeout: 5 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { + if err := waitForDirectConnectGatewayAssociationAvailabilityOnUpdate(conn, associationId, d.Timeout(schema.TimeoutUpdate)); err != nil { return fmt.Errorf("error waiting for Direct Connect gateway association (%s) to become available: %s", d.Id(), err) } } @@ -167,13 +251,11 @@ func resourceAwsDxGatewayAssociationUpdate(d *schema.ResourceData, meta interfac func resourceAwsDxGatewayAssociationDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).dxconn - dxgwId := d.Get("dx_gateway_id").(string) - vgwId := d.Get("vpn_gateway_id").(string) + associationId := d.Get("dx_gateway_association_id").(string) log.Printf("[DEBUG] Deleting Direct Connect gateway association: %s", d.Id()) _, err := conn.DeleteDirectConnectGatewayAssociation(&directconnect.DeleteDirectConnectGatewayAssociationInput{ - DirectConnectGatewayId: aws.String(dxgwId), - VirtualGatewayId: aws.String(vgwId), + AssociationId: aws.String(associationId), }) if isAWSErr(err, directconnect.ErrCodeClientException, "No association exists") { return nil @@ -182,8 +264,8 @@ func resourceAwsDxGatewayAssociationDelete(d *schema.ResourceData, meta interfac return fmt.Errorf("error deleting Direct Connect gateway association: %s", err) } - if err := waitForDirectConnectGatewayAssociationDeletion(conn, dxgwId, vgwId, d.Timeout(schema.TimeoutDelete)); err != nil { - return fmt.Errorf("error waiting for Direct Connect gateway association (%s) to be deleted: %s", d.Id(), err.Error()) + if err := waitForDirectConnectGatewayAssociationDeletion(conn, associationId, d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error waiting for Direct Connect gateway association (%s) to be deleted: %s", d.Id(), err) } return nil @@ -192,25 +274,38 @@ func resourceAwsDxGatewayAssociationDelete(d *schema.ResourceData, meta interfac func resourceAwsDxGatewayAssociationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { parts := strings.Split(d.Id(), "/") if len(parts) != 2 { - return []*schema.ResourceData{}, fmt.Errorf("Wrong format of resource: %s. Please follow 'dx-gw-id/vgw-id'", d.Id()) + return []*schema.ResourceData{}, fmt.Errorf("Wrong format of resource: %s. Please follow 'dx-gw-id/gw-id'", d.Id()) } dxgwId := parts[0] - vgwId := parts[1] - log.Printf("[DEBUG] Importing Direct Connect gateway association %s/%s", dxgwId, vgwId) + gwId := parts[1] + id := dxGatewayAssociationId(dxgwId, gwId) + log.Printf("[DEBUG] Importing Direct Connect gateway association %s/%s", dxgwId, gwId) - d.SetId(dxGatewayAssociationId(dxgwId, vgwId)) - d.Set("dx_gateway_id", dxgwId) - d.Set("vpn_gateway_id", vgwId) + conn := meta.(*AWSClient).dxconn + + resp, err := conn.DescribeDirectConnectGatewayAssociations(&directconnect.DescribeDirectConnectGatewayAssociationsInput{ + AssociatedGatewayId: aws.String(gwId), + DirectConnectGatewayId: aws.String(dxgwId), + }) + if err != nil { + return nil, err + } + if n := len(resp.DirectConnectGatewayAssociations); n != 1 { + return nil, fmt.Errorf("Found %d Direct Connect gateway associations for %s, expected 1", n, id) + } + + d.SetId(id) + d.Set("dx_gateway_id", resp.DirectConnectGatewayAssociations[0].DirectConnectGatewayId) + d.Set("dx_gateway_association_id", resp.DirectConnectGatewayAssociations[0].AssociationId) return []*schema.ResourceData{d}, nil } -func dxGatewayAssociationStateRefresh(conn *directconnect.DirectConnect, dxgwId, vgwId string) resource.StateRefreshFunc { +func dxGatewayAssociationStateRefresh(conn *directconnect.DirectConnect, associationId string) resource.StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeDirectConnectGatewayAssociations(&directconnect.DescribeDirectConnectGatewayAssociationsInput{ - DirectConnectGatewayId: aws.String(dxgwId), - VirtualGatewayId: aws.String(vgwId), + AssociationId: aws.String(associationId), }) if err != nil { return nil, "", err @@ -223,23 +318,62 @@ func dxGatewayAssociationStateRefresh(conn *directconnect.DirectConnect, dxgwId, case 1: assoc := resp.DirectConnectGatewayAssociations[0] + + if stateChangeError := aws.StringValue(assoc.StateChangeError); stateChangeError != "" { + id := dxGatewayAssociationId( + aws.StringValue(resp.DirectConnectGatewayAssociations[0].DirectConnectGatewayId), + aws.StringValue(resp.DirectConnectGatewayAssociations[0].AssociatedGateway.Id)) + log.Printf("[INFO] Direct Connect gateway association (%s) state change error: %s", id, stateChangeError) + } + return assoc, aws.StringValue(assoc.AssociationState), nil default: - return nil, "", fmt.Errorf("Found %d Direct Connect gateway associations for %s, expected 1", n, dxGatewayAssociationId(dxgwId, vgwId)) + return nil, "", fmt.Errorf("Found %d Direct Connect gateway associations for %s, expected 1", n, associationId) } } } -func dxGatewayAssociationId(dxgwId, vgwId string) string { - return fmt.Sprintf("ga-%s%s", dxgwId, vgwId) +// Terraform resource ID. +func dxGatewayAssociationId(dxgwId, gwId string) string { + return fmt.Sprintf("ga-%s%s", dxgwId, gwId) +} + +func waitForDirectConnectGatewayAssociationAvailabilityOnCreate(conn *directconnect.DirectConnect, associationId string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{directconnect.GatewayAssociationStateAssociating}, + Target: []string{directconnect.GatewayAssociationStateAssociated}, + Refresh: dxGatewayAssociationStateRefresh(conn, associationId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} + +func waitForDirectConnectGatewayAssociationAvailabilityOnUpdate(conn *directconnect.DirectConnect, associationId string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{directconnect.GatewayAssociationStateUpdating}, + Target: []string{directconnect.GatewayAssociationStateAssociated}, + Refresh: dxGatewayAssociationStateRefresh(conn, associationId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err } -func waitForDirectConnectGatewayAssociationDeletion(conn *directconnect.DirectConnect, directConnectGatewayID, virtualGatewayID string, timeout time.Duration) error { +func waitForDirectConnectGatewayAssociationDeletion(conn *directconnect.DirectConnect, associationId string, timeout time.Duration) error { stateConf := &resource.StateChangeConf{ Pending: []string{directconnect.GatewayAssociationStateDisassociating}, Target: []string{directconnect.GatewayAssociationStateDisassociated, gatewayAssociationStateDeleted}, - Refresh: dxGatewayAssociationStateRefresh(conn, directConnectGatewayID, virtualGatewayID), + Refresh: dxGatewayAssociationStateRefresh(conn, associationId), Timeout: timeout, Delay: 10 * time.Second, MinTimeout: 5 * time.Second, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association_migrate.go new file mode 100644 index 00000000000..8986f1d80ac --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association_migrate.go @@ -0,0 +1,101 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/directconnect" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsDxGatewayAssociationResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_prefixes": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "associated_gateway_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_owner_account_id", "proposal_id", "vpn_gateway_id"}, + }, + + "associated_gateway_owner_account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + ConflictsWith: []string{"associated_gateway_id", "vpn_gateway_id"}, + }, + + "associated_gateway_type": { + Type: schema.TypeString, + Computed: true, + }, + + "dx_gateway_association_id": { + Type: schema.TypeString, + Computed: true, + }, + + "dx_gateway_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "dx_gateway_owner_account_id": { + Type: schema.TypeString, + Computed: true, + }, + + "proposal_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_id", "vpn_gateway_id"}, + }, + + "vpn_gateway_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_id", "associated_gateway_owner_account_id", "proposal_id"}, + Deprecated: "use 'associated_gateway_id' argument instead", + }, + }, + } +} + +func resourceAwsDxGatewayAssociationStateUpgradeV0(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + conn := meta.(*AWSClient).dxconn + + log.Println("[INFO] Found Direct Connect gateway association state v0; migrating to v1") + + // dx_gateway_association_id was introduced in v2.8.0. Handle the case where it's not yet present. + if v, ok := rawState["dx_gateway_association_id"]; !ok || v == nil { + resp, err := conn.DescribeDirectConnectGatewayAssociations(&directconnect.DescribeDirectConnectGatewayAssociationsInput{ + DirectConnectGatewayId: aws.String(rawState["dx_gateway_id"].(string)), + VirtualGatewayId: aws.String(rawState["vpn_gateway_id"].(string)), + }) + if err != nil { + return nil, err + } + + if len(resp.DirectConnectGatewayAssociations) == 0 { + return nil, fmt.Errorf("Direct Connect gateway association not found, remove from state using 'terraform state rm'") + } + + rawState["dx_gateway_association_id"] = aws.StringValue(resp.DirectConnectGatewayAssociations[0].AssociationId) + } + + return rawState, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association_proposal.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association_proposal.go index 6bd3d3d3642..26b463a579b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association_proposal.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_gateway_association_proposal.go @@ -6,7 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDxGatewayAssociationProposal() *schema.Resource { @@ -18,14 +19,43 @@ func resourceAwsDxGatewayAssociationProposal() *schema.Resource { State: schema.ImportStatePassthrough, }, + CustomizeDiff: customdiff.Sequence( + // Accepting the proposal with overridden prefixes changes the returned RequestedAllowedPrefixesToDirectConnectGateway value (allowed_prefixes attribute). + // We only want to force a new resource if this value changes and the current proposal state is "requested". + customdiff.ForceNewIf("allowed_prefixes", func(d *schema.ResourceDiff, meta interface{}) bool { + conn := meta.(*AWSClient).dxconn + + proposal, err := describeDirectConnectGatewayAssociationProposal(conn, d.Id()) + if err != nil { + log.Printf("[ERROR] Error reading Direct Connect Gateway Association Proposal (%s): %s", d.Id(), err) + return false + } + + return proposal != nil && aws.StringValue(proposal.ProposalState) == directconnect.GatewayAssociationProposalStateRequested + }), + ), + Schema: map[string]*schema.Schema{ "allowed_prefixes": { Type: schema.TypeSet, Optional: true, Computed: true, - ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "associated_gateway_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"vpn_gateway_id"}, + }, + "associated_gateway_owner_account_id": { + Type: schema.TypeString, + Computed: true, + }, + "associated_gateway_type": { + Type: schema.TypeString, + Computed: true, + }, "dx_gateway_id": { Type: schema.TypeString, Required: true, @@ -38,9 +68,11 @@ func resourceAwsDxGatewayAssociationProposal() *schema.Resource { ValidateFunc: validateAwsAccountId, }, "vpn_gateway_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_id"}, + Deprecated: "use 'associated_gateway_id' argument instead", }, }, } @@ -49,13 +81,25 @@ func resourceAwsDxGatewayAssociationProposal() *schema.Resource { func resourceAwsDxGatewayAssociationProposalCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).dxconn + allowedPrefixes := expandDirectConnectGatewayAssociationProposalAllowedPrefixes(d.Get("allowed_prefixes").(*schema.Set).List()) input := &directconnect.CreateDirectConnectGatewayAssociationProposalInput{ - AddAllowedPrefixesToDirectConnectGateway: expandDirectConnectGatewayAssociationProposalAllowedPrefixes(d.Get("allowed_prefixes").(*schema.Set).List()), + AddAllowedPrefixesToDirectConnectGateway: allowedPrefixes, DirectConnectGatewayId: aws.String(d.Get("dx_gateway_id").(string)), DirectConnectGatewayOwnerAccount: aws.String(d.Get("dx_gateway_owner_account_id").(string)), - GatewayId: aws.String(d.Get("vpn_gateway_id").(string)), + } + var gwID string + if v, ok := d.GetOk("vpn_gateway_id"); ok { + gwID = v.(string) + } else if v, ok := d.GetOk("associated_gateway_id"); ok { + gwID = v.(string) } + if gwID == "" { + return fmt.Errorf("gateway id not provided, one of associated_gateway_id or vpn_gateway_id must be configured") + } + + input.GatewayId = aws.String(gwID) + log.Printf("[DEBUG] Creating Direct Connect Gateway Association Proposal: %s", input) output, err := conn.CreateDirectConnectGatewayAssociationProposal(input) @@ -97,9 +141,15 @@ func resourceAwsDxGatewayAssociationProposalRead(d *schema.ResourceData, meta in return fmt.Errorf("error setting allowed_prefixes: %s", err) } + if _, ok := d.GetOk("vpn_gateway_id"); ok { + d.Set("vpn_gateway_id", aws.StringValue(proposal.AssociatedGateway.Id)) + } else { + d.Set("associated_gateway_id", aws.StringValue(proposal.AssociatedGateway.Id)) + } + d.Set("associated_gateway_owner_account_id", proposal.AssociatedGateway.OwnerAccount) + d.Set("associated_gateway_type", proposal.AssociatedGateway.Type) d.Set("dx_gateway_id", aws.StringValue(proposal.DirectConnectGatewayId)) d.Set("dx_gateway_owner_account_id", aws.StringValue(proposal.DirectConnectGatewayOwnerAccount)) - d.Set("vpn_gateway_id", aws.StringValue(proposal.AssociatedGateway.Id)) return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_private_virtual_interface.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_private_virtual_interface.go index 7c2fb3e8a43..dad93784e46 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_private_virtual_interface.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_private_virtual_interface.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDxHostedPrivateVirtualInterface() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_private_virtual_interface_accepter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_private_virtual_interface_accepter.go index 7f7f6214128..f7dbca21adf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_private_virtual_interface_accepter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_private_virtual_interface_accepter.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDxHostedPrivateVirtualInterfaceAccepter() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_public_virtual_interface.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_public_virtual_interface.go index d69a72f932f..5d123f6c841 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_public_virtual_interface.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_public_virtual_interface.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDxHostedPublicVirtualInterface() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_public_virtual_interface_accepter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_public_virtual_interface_accepter.go index 87b127b96dc..1038229bc38 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_public_virtual_interface_accepter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_hosted_public_virtual_interface_accepter.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDxHostedPublicVirtualInterfaceAccepter() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_lag.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_lag.go index 3c4da48824b..4284959a591 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_lag.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_lag.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDxLag() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_private_virtual_interface.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_private_virtual_interface.go index 8d4b4969db7..daf1dcf6522 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_private_virtual_interface.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_private_virtual_interface.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDxPrivateVirtualInterface() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_public_virtual_interface.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_public_virtual_interface.go index fc8405dd6fc..b65c5113c36 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_public_virtual_interface.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_public_virtual_interface.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDxPublicVirtualInterface() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_transit_virtual_interface.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_transit_virtual_interface.go new file mode 100644 index 00000000000..24c2262d763 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dx_transit_virtual_interface.go @@ -0,0 +1,236 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/directconnect" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsDxTransitVirtualInterface() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDxTransitVirtualInterfaceCreate, + Read: resourceAwsDxTransitVirtualInterfaceRead, + Update: resourceAwsDxTransitVirtualInterfaceUpdate, + Delete: resourceAwsDxTransitVirtualInterfaceDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsDxTransitVirtualInterfaceImport, + }, + + Schema: map[string]*schema.Schema{ + "address_family": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + directconnect.AddressFamilyIpv4, + directconnect.AddressFamilyIpv6, + }, false), + }, + "amazon_address": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "aws_device": { + Type: schema.TypeString, + Computed: true, + }, + "bgp_asn": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "bgp_auth_key": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "connection_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "customer_address": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "dx_gateway_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "jumbo_frame_capable": { + Type: schema.TypeBool, + Computed: true, + }, + "mtu": { + Type: schema.TypeInt, + Default: 1500, + Optional: true, + ValidateFunc: validation.IntInSlice([]int{1500, 8500}), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "tags": tagsSchema(), + "vlan": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 4094), + }, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + } +} + +func resourceAwsDxTransitVirtualInterfaceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dxconn + + req := &directconnect.CreateTransitVirtualInterfaceInput{ + ConnectionId: aws.String(d.Get("connection_id").(string)), + NewTransitVirtualInterface: &directconnect.NewTransitVirtualInterface{ + AddressFamily: aws.String(d.Get("address_family").(string)), + Asn: aws.Int64(int64(d.Get("bgp_asn").(int))), + DirectConnectGatewayId: aws.String(d.Get("dx_gateway_id").(string)), + Mtu: aws.Int64(int64(d.Get("mtu").(int))), + VirtualInterfaceName: aws.String(d.Get("name").(string)), + Vlan: aws.Int64(int64(d.Get("vlan").(int))), + }, + } + if v, ok := d.GetOk("amazon_address"); ok && v.(string) != "" { + req.NewTransitVirtualInterface.AmazonAddress = aws.String(v.(string)) + } + if v, ok := d.GetOk("bgp_auth_key"); ok { + req.NewTransitVirtualInterface.AuthKey = aws.String(v.(string)) + } + if v, ok := d.GetOk("customer_address"); ok && v.(string) != "" { + req.NewTransitVirtualInterface.CustomerAddress = aws.String(v.(string)) + } + if v, ok := d.GetOk("tags"); ok { + req.NewTransitVirtualInterface.Tags = tagsFromMapDX(v.(map[string]interface{})) + } + + log.Printf("[DEBUG] Creating Direct Connect transit virtual interface: %s", req) + resp, err := conn.CreateTransitVirtualInterface(req) + if err != nil { + return fmt.Errorf("error creating Direct Connect transit virtual interface: %s", err) + } + + d.SetId(aws.StringValue(resp.VirtualInterface.VirtualInterfaceId)) + + if err := dxTransitVirtualInterfaceWaitUntilAvailable(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return err + } + + return resourceAwsDxTransitVirtualInterfaceRead(d, meta) +} + +func resourceAwsDxTransitVirtualInterfaceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dxconn + + vif, err := dxVirtualInterfaceRead(d.Id(), conn) + if err != nil { + return err + } + if vif == nil { + log.Printf("[WARN] Direct Connect transit virtual interface (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("address_family", vif.AddressFamily) + d.Set("amazon_address", vif.AmazonAddress) + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Region: meta.(*AWSClient).region, + Service: "directconnect", + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("dxvif/%s", d.Id()), + }.String() + d.Set("arn", arn) + d.Set("aws_device", vif.AwsDeviceV2) + d.Set("bgp_asn", vif.Asn) + d.Set("bgp_auth_key", vif.AuthKey) + d.Set("connection_id", vif.ConnectionId) + d.Set("customer_address", vif.CustomerAddress) + d.Set("dx_gateway_id", vif.DirectConnectGatewayId) + d.Set("jumbo_frame_capable", vif.JumboFrameCapable) + d.Set("mtu", vif.Mtu) + d.Set("name", vif.VirtualInterfaceName) + d.Set("vlan", vif.Vlan) + if err := getTagsDX(conn, d, d.Get("arn").(string)); err != nil { + return fmt.Errorf("error getting Direct Connect transit virtual interface (%s) tags: %s", d.Id(), err) + } + + return nil +} + +func resourceAwsDxTransitVirtualInterfaceUpdate(d *schema.ResourceData, meta interface{}) error { + if err := dxVirtualInterfaceUpdate(d, meta); err != nil { + return err + } + + if err := dxTransitVirtualInterfaceWaitUntilAvailable(meta.(*AWSClient).dxconn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return err + } + + return resourceAwsDxTransitVirtualInterfaceRead(d, meta) +} + +func resourceAwsDxTransitVirtualInterfaceDelete(d *schema.ResourceData, meta interface{}) error { + return dxVirtualInterfaceDelete(d, meta) +} + +func resourceAwsDxTransitVirtualInterfaceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + conn := meta.(*AWSClient).dxconn + + vif, err := dxVirtualInterfaceRead(d.Id(), conn) + if err != nil { + return nil, err + } + if vif == nil { + return nil, fmt.Errorf("virtual interface (%s) not found", d.Id()) + } + + if vifType := aws.StringValue(vif.VirtualInterfaceType); vifType != "transit" { + return nil, fmt.Errorf("virtual interface (%s) has incorrect type: %s", d.Id(), vifType) + } + + return []*schema.ResourceData{d}, nil +} + +func dxTransitVirtualInterfaceWaitUntilAvailable(conn *directconnect.DirectConnect, vifId string, timeout time.Duration) error { + return dxVirtualInterfaceWaitUntilAvailable( + conn, + vifId, + timeout, + []string{ + directconnect.VirtualInterfaceStatePending, + }, + []string{ + directconnect.VirtualInterfaceStateAvailable, + directconnect.VirtualInterfaceStateDown, + }) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_global_table.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_global_table.go index bcc5b7c6179..65af84ef6df 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_global_table.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_global_table.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDynamoDbGlobalTable() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table.go index 65c9fd8cba6..f34d571e6e3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table.go @@ -10,11 +10,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsDynamoDbTable() *schema.Resource { @@ -289,10 +289,13 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Creating DynamoDB table with key schema: %#v", keySchemaMap) + tags := tagsFromMapDynamoDb(d.Get("tags").(map[string]interface{})) + req := &dynamodb.CreateTableInput{ TableName: aws.String(d.Get("name").(string)), BillingMode: aws.String(d.Get("billing_mode").(string)), KeySchema: expandDynamoDbKeySchema(keySchemaMap), + Tags: tags, } billingMode := d.Get("billing_mode").(string) @@ -351,6 +354,7 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er } var output *dynamodb.CreateTableOutput + var requiresTagging bool err := resource.Retry(2*time.Minute, func() *resource.RetryError { var err error output, err = conn.CreateTable(req) @@ -370,11 +374,21 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er req.BillingMode = nil return resource.RetryableError(err) } + // AWS GovCloud (US) and others may reply with the following until their API is updated: + // ValidationException: Unsupported input parameter Tags + if isAWSErr(err, "ValidationException", "Unsupported input parameter Tags") { + req.Tags = nil + requiresTagging = true + return resource.RetryableError(err) + } return resource.NonRetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + output, err = conn.CreateTable(req) + } if err != nil { return fmt.Errorf("error creating DynamoDB Table: %s", err) } @@ -386,16 +400,18 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er return err } + if requiresTagging { + if err := setTagsDynamoDb(conn, d); err != nil { + return fmt.Errorf("error adding DynamoDB Table (%s) tags: %s", d.Id(), err) + } + } + if d.Get("ttl.0.enabled").(bool) { if err := updateDynamoDbTimeToLive(d.Id(), d.Get("ttl").([]interface{}), conn); err != nil { return fmt.Errorf("error enabling DynamoDB Table (%s) Time to Live: %s", d.Id(), err) } } - if err := setTagsDynamoDb(conn, d); err != nil { - return fmt.Errorf("error adding DynamoDB Table (%s) tags: %s", d.Id(), err) - } - if d.Get("point_in_time_recovery.0.enabled").(bool) { if err := updateDynamoDbPITR(d, conn); err != nil { return fmt.Errorf("error enabling DynamoDB Table (%s) point in time recovery: %s", d.Id(), err) @@ -639,7 +655,7 @@ func deleteAwsDynamoDbTable(tableName string, conn *dynamodb.DynamoDB) error { TableName: aws.String(tableName), } - return resource.Retry(5*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.DeleteTable(input) if err != nil { // Subscriber limit exceeded: Only 10 tables can be created, updated, or deleted simultaneously @@ -661,6 +677,13 @@ func deleteAwsDynamoDbTable(tableName string, conn *dynamodb.DynamoDB) error { } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteTable(input) + } + if err != nil { + return fmt.Errorf("Error deleting DynamoDB table: %s", err) + } + return nil } func waitForDynamodbTableDeletion(conn *dynamodb.DynamoDB, tableName string, timeout time.Duration) error { @@ -746,9 +769,11 @@ func updateDynamoDbPITR(d *schema.ResourceData, conn *dynamodb.DynamoDB) error { } return nil }) - + if isResourceTimeoutError(err) { + _, err = conn.UpdateContinuousBackups(input) + } if err != nil { - return err + return fmt.Errorf("Error updating DynamoDB PITR status: %s", err) } if err := waitForDynamoDbBackupUpdateToBeCompleted(d.Id(), toEnable, conn); err != nil { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_item.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_item.go index 8ceb8a23224..2de82939ac7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_item.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_item.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsDynamoDbTableItem() *schema.Resource { @@ -240,46 +240,18 @@ func buildDynamoDbProjectionExpression(attrs map[string]*dynamodb.AttributeValue } func buildDynamoDbTableItemId(tableName string, hashKey string, rangeKey string, attrs map[string]*dynamodb.AttributeValue) string { - hashVal := attrs[hashKey] + id := []string{tableName, hashKey} - id := []string{ - tableName, - hashKey, - base64Encode(hashVal.B), + if hashVal, ok := attrs[hashKey]; ok { + id = append(id, base64Encode(hashVal.B)) + id = append(id, aws.StringValue(hashVal.S)) + id = append(id, aws.StringValue(hashVal.N)) } - - if hashVal.S != nil { - id = append(id, *hashVal.S) - } else { - id = append(id, "") - } - if hashVal.N != nil { - id = append(id, *hashVal.N) - } else { - id = append(id, "") + if rangeVal, ok := attrs[rangeKey]; ok && rangeKey != "" { + id = append(id, rangeKey, base64Encode(rangeVal.B)) + id = append(id, aws.StringValue(rangeVal.S)) + id = append(id, aws.StringValue(rangeVal.N)) } - if rangeKey != "" { - rangeVal := attrs[rangeKey] - - id = append(id, - rangeKey, - base64Encode(rangeVal.B), - ) - - if rangeVal.S != nil { - id = append(id, *rangeVal.S) - } else { - id = append(id, "") - } - - if rangeVal.N != nil { - id = append(id, *rangeVal.N) - } else { - id = append(id, "") - } - - } - return strings.Join(id, "|") } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_migrate.go index 29eb38de472..4bf5456c6a3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_migrate.go @@ -5,8 +5,8 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsDynamoDbTableMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_default_kms_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_default_kms_key.go new file mode 100644 index 00000000000..c5526709f92 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_default_kms_key.go @@ -0,0 +1,69 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsEbsDefaultKmsKey() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEbsDefaultKmsKeyCreate, + Read: resourceAwsEbsDefaultKmsKeyRead, + Delete: resourceAwsEbsDefaultKmsKeyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "key_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + }, + } +} + +func resourceAwsEbsDefaultKmsKeyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.ModifyEbsDefaultKmsKeyId(&ec2.ModifyEbsDefaultKmsKeyIdInput{ + KmsKeyId: aws.String(d.Get("key_arn").(string)), + }) + if err != nil { + return fmt.Errorf("error creating EBS default KMS key: %s", err) + } + + d.SetId(aws.StringValue(resp.KmsKeyId)) + + return resourceAwsEbsDefaultKmsKeyRead(d, meta) +} + +func resourceAwsEbsDefaultKmsKeyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.GetEbsDefaultKmsKeyId(&ec2.GetEbsDefaultKmsKeyIdInput{}) + if err != nil { + return fmt.Errorf("error reading EBS default KMS key: %s", err) + } + + d.Set("key_arn", resp.KmsKeyId) + + return nil +} + +func resourceAwsEbsDefaultKmsKeyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + _, err := conn.ResetEbsDefaultKmsKeyId(&ec2.ResetEbsDefaultKmsKeyIdInput{}) + if err != nil { + return fmt.Errorf("error deleting EBS default KMS key: %s", err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_encryption_by_default.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_encryption_by_default.go new file mode 100644 index 00000000000..1361df78501 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_encryption_by_default.go @@ -0,0 +1,88 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsEbsEncryptionByDefault() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEbsEncryptionByDefaultCreate, + Read: resourceAwsEbsEncryptionByDefaultRead, + Update: resourceAwsEbsEncryptionByDefaultUpdate, + Delete: resourceAwsEbsEncryptionByDefaultDelete, + + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + } +} + +func resourceAwsEbsEncryptionByDefaultCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + enabled := d.Get("enabled").(bool) + if err := setEbsEncryptionByDefault(conn, enabled); err != nil { + return fmt.Errorf("error creating EBS encryption by default (%t): %s", enabled, err) + } + + d.SetId(resource.UniqueId()) + + return resourceAwsEbsEncryptionByDefaultRead(d, meta) +} + +func resourceAwsEbsEncryptionByDefaultRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.GetEbsEncryptionByDefault(&ec2.GetEbsEncryptionByDefaultInput{}) + if err != nil { + return fmt.Errorf("error reading EBS encryption by default: %s", err) + } + + d.Set("enabled", aws.BoolValue(resp.EbsEncryptionByDefault)) + + return nil +} + +func resourceAwsEbsEncryptionByDefaultUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + enabled := d.Get("enabled").(bool) + if err := setEbsEncryptionByDefault(conn, enabled); err != nil { + return fmt.Errorf("error updating EBS encryption by default (%t): %s", enabled, err) + } + + return resourceAwsEbsEncryptionByDefaultRead(d, meta) +} + +func resourceAwsEbsEncryptionByDefaultDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Removing the resource disables default encryption. + if err := setEbsEncryptionByDefault(conn, false); err != nil { + return fmt.Errorf("error disabling EBS encryption by default: %s", err) + } + + return nil +} + +func setEbsEncryptionByDefault(conn *ec2.EC2, enabled bool) error { + var err error + + if enabled { + _, err = conn.EnableEbsEncryptionByDefault(&ec2.EnableEbsEncryptionByDefaultInput{}) + } else { + _, err = conn.DisableEbsEncryptionByDefault(&ec2.DisableEbsEncryptionByDefaultInput{}) + } + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot.go index 7821ece7296..dde4d0d7e6c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot.go @@ -6,10 +6,9 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsEbsSnapshot() *schema.Resource { @@ -18,6 +17,11 @@ func resourceAwsEbsSnapshot() *schema.Resource { Read: resourceAwsEbsSnapshotRead, Delete: resourceAwsEbsSnapshotDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ "volume_id": { Type: schema.TypeString, @@ -88,14 +92,16 @@ func resourceAwsEbsSnapshotCreate(d *schema.ResourceData, meta interface{}) erro return nil }) - + if isResourceTimeoutError(err) { + res, err = conn.CreateSnapshot(request) + } if err != nil { return fmt.Errorf("error creating EC2 EBS Snapshot: %s", err) } d.SetId(*res.SnapshotId) - err = resourceAwsEbsSnapshotWaitForAvailable(d.Id(), conn) + err = resourceAwsEbsSnapshotWaitForAvailable(d, conn) if err != nil { return err } @@ -148,35 +154,48 @@ func resourceAwsEbsSnapshotRead(d *schema.ResourceData, meta interface{}) error func resourceAwsEbsSnapshotDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - request := &ec2.DeleteSnapshotInput{ - SnapshotId: aws.String(d.Id()), - } - _, err := conn.DeleteSnapshot(request) + input := &ec2.DeleteSnapshotInput{ + SnapshotId: aws.String(d.Id()), + } + err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + _, err := conn.DeleteSnapshot(input) if err == nil { return nil } - - ebsErr, ok := err.(awserr.Error) - if ebsErr.Code() == "SnapshotInUse" { + if isAWSErr(err, "SnapshotInUse", "") { return resource.RetryableError(fmt.Errorf("EBS SnapshotInUse - trying again while it detaches")) } - - if !ok { - return resource.NonRetryableError(err) - } - return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteSnapshot(input) + } + if err != nil { + return fmt.Errorf("Error deleting EBS snapshot: %s", err) + } + return nil } -func resourceAwsEbsSnapshotWaitForAvailable(id string, conn *ec2.EC2) error { - log.Printf("Waiting for Snapshot %s to become available...", id) - - req := &ec2.DescribeSnapshotsInput{ - SnapshotIds: []*string{aws.String(id)}, +func resourceAwsEbsSnapshotWaitForAvailable(d *schema.ResourceData, conn *ec2.EC2) error { + log.Printf("Waiting for Snapshot %s to become available...", d.Id()) + input := &ec2.DescribeSnapshotsInput{ + SnapshotIds: []*string{aws.String(d.Id())}, } - err := conn.WaitUntilSnapshotCompleted(req) - return err + err := resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { + err := conn.WaitUntilSnapshotCompleted(input) + if err == nil { + return nil + } + if isAWSErr(err, "ResourceNotReady", "") { + return resource.RetryableError(fmt.Errorf("EBS CreatingSnapshot - waiting for snapshot to become available")) + } + return resource.NonRetryableError(err) + }) + if isResourceTimeoutError(err) { + err = conn.WaitUntilSnapshotCompleted(input) + } + if err != nil { + return fmt.Errorf("Error waiting for EBS snapshot to complete: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot_copy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot_copy.go index f1595412888..1e1db5642e4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot_copy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot_copy.go @@ -6,10 +6,9 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsEbsSnapshotCopy() *schema.Resource { @@ -116,7 +115,7 @@ func resourceAwsEbsSnapshotCopyRead(d *schema.ResourceData, meta interface{}) er SnapshotIds: []*string{aws.String(d.Id())}, } res, err := conn.DescribeSnapshots(req) - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSnapshotID.NotFound" { + if isAWSErr(err, "InvalidSnapshot.NotFound", "") { log.Printf("Snapshot %q Not found - removing from state", d.Id()) d.SetId("") return nil @@ -142,27 +141,35 @@ func resourceAwsEbsSnapshotCopyRead(d *schema.ResourceData, meta interface{}) er func resourceAwsEbsSnapshotCopyDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - request := &ec2.DeleteSnapshotInput{ - SnapshotId: aws.String(d.Id()), - } - _, err := conn.DeleteSnapshot(request) + input := &ec2.DeleteSnapshotInput{ + SnapshotId: aws.String(d.Id()), + } + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteSnapshot(input) if err == nil { return nil } - ebsErr, ok := err.(awserr.Error) - if ebsErr.Code() == "SnapshotInUse" { + if isAWSErr(err, "SnapshotInUse", "") { return resource.RetryableError(fmt.Errorf("EBS SnapshotInUse - trying again while it detaches")) } - if !ok { - return resource.NonRetryableError(err) + if isAWSErr(err, "InvalidSnapshot.NotFound", "") { + return nil } return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteSnapshot(input) + if isAWSErr(err, "InvalidSnapshot.NotFound", "") { + return nil + } + } + if err != nil { + return fmt.Errorf("Error deleting EBS snapshot copy: %s", err) + } + return nil } func resourceAwsEbsSnapshotCopyWaitForAvailable(id string, conn *ec2.EC2) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_volume.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_volume.go index 1db72785306..89add9e1a8b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_volume.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_volume.go @@ -10,8 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsEbsVolume() *schema.Resource { @@ -80,7 +81,8 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error conn := meta.(*AWSClient).ec2conn request := &ec2.CreateVolumeInput{ - AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), ec2.ResourceTypeVolume), } if value, ok := d.GetOk("encrypted"); ok { request.Encrypted = aws.Bool(value.(bool)) @@ -94,14 +96,6 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error if value, ok := d.GetOk("snapshot_id"); ok { request.SnapshotId = aws.String(value.(string)) } - if value, ok := d.GetOk("tags"); ok { - request.TagSpecifications = []*ec2.TagSpecification{ - { - ResourceType: aws.String(ec2.ResourceTypeVolume), - Tags: tagsFromMap(value.(map[string]interface{})), - }, - } - } // IOPs are only valid, and required for, storage type io1. The current minimu // is 100. Instead of a hard validation we we only apply the IOPs to the @@ -154,11 +148,6 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error func resourceAWSEbsVolumeUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - if _, ok := d.GetOk("tags"); ok { - if err := setTags(conn, d); err != nil { - return fmt.Errorf("Error updating tags for EBS Volume: %s", err) - } - } requestUpdate := false params := &ec2.ModifyVolumeInput{ @@ -203,6 +192,14 @@ func resourceAWSEbsVolumeUpdate(d *schema.ResourceData, meta interface{}) error } } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + return resourceAwsEbsVolumeRead(d, meta) } @@ -268,7 +265,7 @@ func resourceAwsEbsVolumeRead(d *schema.ResourceData, meta interface{}) error { d.Set("size", aws.Int64Value(volume.Size)) d.Set("snapshot_id", aws.StringValue(volume.SnapshotId)) - if err := d.Set("tags", tagsToMap(volume.Tags)); err != nil { + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(volume.Tags).IgnoreAws().Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } @@ -280,25 +277,77 @@ func resourceAwsEbsVolumeRead(d *schema.ResourceData, meta interface{}) error { func resourceAwsEbsVolumeDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - return resource.Retry(5*time.Minute, func() *resource.RetryError { - request := &ec2.DeleteVolumeInput{ - VolumeId: aws.String(d.Id()), - } - _, err := conn.DeleteVolume(request) - if err == nil { + input := &ec2.DeleteVolumeInput{ + VolumeId: aws.String(d.Id()), + } + + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteVolume(input) + + if isAWSErr(err, "InvalidVolume.NotFound", "") { return nil } - ebsErr, ok := err.(awserr.Error) - if ebsErr.Code() == "VolumeInUse" { + if isAWSErr(err, "VolumeInUse", "") { return resource.RetryableError(fmt.Errorf("EBS VolumeInUse - trying again while it detaches")) } - if !ok { + if err != nil { return resource.NonRetryableError(err) } - return resource.NonRetryableError(err) + return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteVolume(input) + } + + if err != nil { + return fmt.Errorf("error deleting EBS Volume (%s): %s", d.Id(), err) + } + + describeInput := &ec2.DescribeVolumesInput{ + VolumeIds: []*string{aws.String(d.Id())}, + } + + var output *ec2.DescribeVolumesOutput + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + var err error + output, err = conn.DescribeVolumes(describeInput) + + if err != nil { + return resource.NonRetryableError(err) + } + + for _, volume := range output.Volumes { + if aws.StringValue(volume.VolumeId) == d.Id() { + state := aws.StringValue(volume.State) + + if state == ec2.VolumeStateDeleting { + return resource.RetryableError(fmt.Errorf("EBS Volume (%s) still deleting", d.Id())) + } + + return resource.NonRetryableError(fmt.Errorf("EBS Volume (%s) in unexpected state after deletion: %s", d.Id(), state)) + } + } + + return nil + }) + + if isResourceTimeoutError(err) { + output, err = conn.DescribeVolumes(describeInput) + } + + if isAWSErr(err, "InvalidVolume.NotFound", "") { + return nil + } + + for _, volume := range output.Volumes { + if aws.StringValue(volume.VolumeId) == d.Id() { + return fmt.Errorf("EBS Volume (%s) in unexpected state after deletion: %s", d.Id(), aws.StringValue(volume.State)) + } + } + + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_capacity_reservation.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_capacity_reservation.go index 46bae8a87aa..dc0f734c74d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_capacity_reservation.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_capacity_reservation.go @@ -7,8 +7,14 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +const ( + // There is no constant in the SDK for this resource type + ec2ResourceTypeCapacityReservation = "capacity-reservation" ) func resourceAwsEc2CapacityReservation() *schema.Resource { @@ -106,11 +112,12 @@ func resourceAwsEc2CapacityReservationCreate(d *schema.ResourceData, meta interf conn := meta.(*AWSClient).ec2conn opts := &ec2.CreateCapacityReservationInput{ - AvailabilityZone: aws.String(d.Get("availability_zone").(string)), - EndDateType: aws.String(d.Get("end_date_type").(string)), - InstanceCount: aws.Int64(int64(d.Get("instance_count").(int))), - InstancePlatform: aws.String(d.Get("instance_platform").(string)), - InstanceType: aws.String(d.Get("instance_type").(string)), + AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + EndDateType: aws.String(d.Get("end_date_type").(string)), + InstanceCount: aws.Int64(int64(d.Get("instance_count").(int))), + InstancePlatform: aws.String(d.Get("instance_platform").(string)), + InstanceType: aws.String(d.Get("instance_type").(string)), + TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), ec2ResourceTypeCapacityReservation), } if v, ok := d.GetOk("ebs_optimized"); ok { @@ -137,16 +144,6 @@ func resourceAwsEc2CapacityReservationCreate(d *schema.ResourceData, meta interf opts.Tenancy = aws.String(v.(string)) } - if v, ok := d.GetOk("tags"); ok && len(v.(map[string]interface{})) > 0 { - opts.TagSpecifications = []*ec2.TagSpecification{ - { - // There is no constant in the SDK for this resource type - ResourceType: aws.String("capacity-reservation"), - Tags: tagsFromMap(v.(map[string]interface{})), - }, - } - } - log.Printf("[DEBUG] Capacity reservation: %s", opts) out, err := conn.CreateCapacityReservation(opts) @@ -165,13 +162,16 @@ func resourceAwsEc2CapacityReservationRead(d *schema.ResourceData, meta interfac }) if err != nil { - return fmt.Errorf("Error describing EC2 Capacity Reservations: %s", err) + if isAWSErr(err, "InvalidCapacityReservationId.NotFound", "") { + log.Printf("[WARN] EC2 Capacity Reservation (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("error reading EC2 Capacity Reservation %s: %s", d.Id(), err) } - // If nothing was found, then return no state - if len(resp.CapacityReservations) == 0 { - log.Printf("[WARN] EC2 Capacity Reservation (%s) not found, removing from state", d.Id()) - d.SetId("") + if resp == nil || len(resp.CapacityReservations) == 0 || resp.CapacityReservations[0] == nil { + return fmt.Errorf("error reading EC2 Capacity Reservation (%s): empty response", d.Id()) } reservation := resp.CapacityReservations[0] @@ -197,7 +197,7 @@ func resourceAwsEc2CapacityReservationRead(d *schema.ResourceData, meta interfac d.Set("instance_platform", reservation.InstancePlatform) d.Set("instance_type", reservation.InstanceType) - if err := d.Set("tags", tagsToMap(reservation.Tags)); err != nil { + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(reservation.Tags).IgnoreAws().Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } @@ -209,18 +209,6 @@ func resourceAwsEc2CapacityReservationRead(d *schema.ResourceData, meta interfac func resourceAwsEc2CapacityReservationUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - d.Partial(true) - - if d.HasChange("tags") { - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - - d.Partial(false) - opts := &ec2.ModifyCapacityReservationInput{ CapacityReservationId: aws.String(d.Id()), EndDateType: aws.String(d.Get("end_date_type").(string)), @@ -241,6 +229,15 @@ func resourceAwsEc2CapacityReservationUpdate(d *schema.ResourceData, meta interf if err != nil { return fmt.Errorf("Error modifying EC2 Capacity Reservation: %s", err) } + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + return resourceAwsEc2CapacityReservationRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_client_vpn_endpoint.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_client_vpn_endpoint.go index dbfab48cac4..62d05390495 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_client_vpn_endpoint.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_client_vpn_endpoint.go @@ -3,13 +3,11 @@ package aws import ( "fmt" "log" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEc2ClientVpnEndpoint() *schema.Resource { @@ -41,6 +39,11 @@ func resourceAwsEc2ClientVpnEndpoint() *schema.Resource { Type: schema.TypeString, Required: true, }, + "split_tunnel": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "transport_protocol": { Type: schema.TypeString, Optional: true, @@ -120,6 +123,7 @@ func resourceAwsEc2ClientVpnEndpointCreate(d *schema.ResourceData, meta interfac ClientCidrBlock: aws.String(d.Get("client_cidr_block").(string)), ServerCertificateArn: aws.String(d.Get("server_certificate_arn").(string)), TransportProtocol: aws.String(d.Get("transport_protocol").(string)), + SplitTunnel: aws.Bool(d.Get("split_tunnel").(bool)), TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), ec2.ResourceTypeClientVpnEndpoint), } @@ -173,19 +177,7 @@ func resourceAwsEc2ClientVpnEndpointCreate(d *schema.ResourceData, meta interfac req.ConnectionLogOptions = connLogReq } - log.Printf("[DEBUG] Creating Client VPN endpoint: %#v", req) - var resp *ec2.CreateClientVpnEndpointOutput - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - var err error - resp, err = conn.CreateClientVpnEndpoint(req) - if isAWSErr(err, "OperationNotPermitted", "Endpoint cannot be created while another endpoint is being created") { - return resource.RetryableError(err) - } - if err != nil { - return resource.NonRetryableError(err) - } - return nil - }) + resp, err := conn.CreateClientVpnEndpoint(req) if err != nil { return fmt.Errorf("Error creating Client VPN endpoint: %s", err) @@ -204,6 +196,12 @@ func resourceAwsEc2ClientVpnEndpointRead(d *schema.ResourceData, meta interface{ ClientVpnEndpointIds: []*string{aws.String(d.Id())}, }) + if isAWSErr(err, "InvalidClientVpnAssociationId.NotFound", "") || isAWSErr(err, "InvalidClientVpnEndpointId.NotFound", "") { + log.Printf("[WARN] EC2 Client VPN Endpoint (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { return fmt.Errorf("Error reading Client VPN endpoint: %s", err) } @@ -226,6 +224,7 @@ func resourceAwsEc2ClientVpnEndpointRead(d *schema.ResourceData, meta interface{ d.Set("transport_protocol", result.ClientVpnEndpoints[0].TransportProtocol) d.Set("dns_name", result.ClientVpnEndpoints[0].DnsName) d.Set("status", result.ClientVpnEndpoints[0].Status) + d.Set("split_tunnel", result.ClientVpnEndpoints[0].SplitTunnel) err = d.Set("authentication_options", flattenAuthOptsConfig(result.ClientVpnEndpoints[0].AuthenticationOptions)) if err != nil { @@ -292,6 +291,10 @@ func resourceAwsEc2ClientVpnEndpointUpdate(d *schema.ResourceData, meta interfac req.ServerCertificateArn = aws.String(d.Get("server_certificate_arn").(string)) } + if d.HasChange("split_tunnel") { + req.SplitTunnel = aws.Bool(d.Get("split_tunnel").(bool)) + } + if d.HasChange("connection_log_options") { if v, ok := d.GetOk("connection_log_options"); ok { connSet := v.([]interface{}) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_client_vpn_network_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_client_vpn_network_association.go index 6210a0c0257..1caf9acfb64 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_client_vpn_network_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_client_vpn_network_association.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsEc2ClientVpnNetworkAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_fleet.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_fleet.go index 03dd06f20d5..5eaf702df93 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_fleet.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_fleet.go @@ -8,9 +8,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsEc2Fleet() *schema.Resource { @@ -192,12 +193,7 @@ func resourceAwsEc2Fleet() *schema.Resource { }, }, }, - "tags": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, + "tags": tagsSchema(), "target_capacity_specification": { Type: schema.TypeList, Required: true, @@ -312,7 +308,7 @@ func resourceAwsEc2FleetCreate(d *schema.ResourceData, meta interface{}) error { SpotOptions: expandEc2SpotOptionsRequest(d.Get("spot_options").([]interface{})), TargetCapacitySpecification: expandEc2TargetCapacitySpecificationRequest(d.Get("target_capacity_specification").([]interface{})), TerminateInstancesWithExpiration: aws.Bool(d.Get("terminate_instances_with_expiration").(bool)), - TagSpecifications: expandEc2TagSpecifications(d.Get("tags").(map[string]interface{})), + TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), ec2.ResourceTypeFleet), Type: aws.String(d.Get("type").(string)), } @@ -435,7 +431,7 @@ func resourceAwsEc2FleetRead(d *schema.ResourceData, meta interface{}) error { d.Set("terminate_instances_with_expiration", fleet.TerminateInstancesWithExpiration) d.Set("type", fleet.Type) - if err := d.Set("tags", tagsToMap(fleet.Tags)); err != nil { + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(fleet.Tags).IgnoreAws().Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } @@ -475,6 +471,14 @@ func resourceAwsEc2FleetUpdate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error waiting for EC2 Fleet (%s) modification: %s", d.Id(), err) } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + return resourceAwsEc2FleetRead(d, meta) } @@ -693,19 +697,6 @@ func expandEc2SpotOptionsRequest(l []interface{}) *ec2.SpotOptionsRequest { return spotOptionsRequest } -func expandEc2TagSpecifications(m map[string]interface{}) []*ec2.TagSpecification { - if len(m) == 0 { - return nil - } - - return []*ec2.TagSpecification{ - { - ResourceType: aws.String("fleet"), - Tags: tagsFromMap(m), - }, - } -} - func expandEc2TargetCapacitySpecificationRequest(l []interface{}) *ec2.TargetCapacitySpecificationRequest { if len(l) == 0 || l[0] == nil { return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway.go index 13228ab1475..45e5b275676 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEc2TransitGateway() *schema.Resource { @@ -215,13 +215,17 @@ func resourceAwsEc2TransitGatewayDelete(d *schema.ResourceData, meta interface{} } log.Printf("[DEBUG] Deleting EC2 Transit Gateway (%s): %s", d.Id(), input) - err := resource.Retry(1*time.Minute, func() *resource.RetryError { + err := resource.Retry(2*time.Minute, func() *resource.RetryError { _, err := conn.DeleteTransitGateway(input) if isAWSErr(err, "IncorrectState", "has non-deleted Transit Gateway Attachments") { return resource.RetryableError(err) } + if isAWSErr(err, "IncorrectState", "has non-deleted DirectConnect Gateway Attachments") { + return resource.RetryableError(err) + } + if isAWSErr(err, "IncorrectState", "has non-deleted VPN Attachments") { return resource.RetryableError(err) } @@ -233,6 +237,10 @@ func resourceAwsEc2TransitGatewayDelete(d *schema.ResourceData, meta interface{} return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteTransitGateway(input) + } + if isAWSErr(err, "InvalidTransitGatewayID.NotFound", "") { return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route.go index cd00adcb90e..a28412be6cb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route.go @@ -5,12 +5,12 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEc2TransitGatewayRoute() *schema.Resource { @@ -28,9 +28,15 @@ func resourceAwsEc2TransitGatewayRoute() *schema.Resource { Required: true, ForceNew: true, }, + "blackhole": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, "transit_gateway_attachment_id": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validation.NoZeroValues, }, @@ -52,6 +58,7 @@ func resourceAwsEc2TransitGatewayRouteCreate(d *schema.ResourceData, meta interf input := &ec2.CreateTransitGatewayRouteInput{ DestinationCidrBlock: aws.String(destination), + Blackhole: aws.Bool(d.Get("blackhole").(bool)), TransitGatewayAttachmentId: aws.String(d.Get("transit_gateway_attachment_id").(string)), TransitGatewayRouteTableId: aws.String(transitGatewayRouteTableID), } @@ -92,6 +99,10 @@ func resourceAwsEc2TransitGatewayRouteRead(d *schema.ResourceData, meta interfac return nil }) + if isResourceTimeoutError(err) { + transitGatewayRoute, err = ec2DescribeTransitGatewayRoute(conn, transitGatewayRouteTableID, destination) + } + if isAWSErr(err, "InvalidRouteTableID.NotFound", "") { log.Printf("[WARN] EC2 Transit Gateway Route Table (%s) not found, removing from state", transitGatewayRouteTableID) d.SetId("") @@ -126,8 +137,10 @@ func resourceAwsEc2TransitGatewayRouteRead(d *schema.ResourceData, meta interfac d.Set("transit_gateway_attachment_id", "") if len(transitGatewayRoute.TransitGatewayAttachments) > 0 && transitGatewayRoute.TransitGatewayAttachments[0] != nil { d.Set("transit_gateway_attachment_id", transitGatewayRoute.TransitGatewayAttachments[0].TransitGatewayAttachmentId) + d.Set("blackhole", false) + } else { + d.Set("blackhole", true) } - d.Set("transit_gateway_route_table_id", transitGatewayRouteTableID) return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table.go index b08c6e9d685..3d7eb158db1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEc2TransitGatewayRouteTable() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table_association.go index 6f5a647dbd2..c25dd5b0bf6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table_association.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEc2TransitGatewayRouteTableAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table_propagation.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table_propagation.go index b553db57472..61581e0e128 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table_propagation.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_route_table_propagation.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEc2TransitGatewayRouteTablePropagation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_vpc_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_vpc_attachment.go index 8faa7005625..b958e478e4a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_vpc_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_vpc_attachment.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEc2TransitGatewayVpcAttachment() *schema.Resource { @@ -104,7 +104,7 @@ func resourceAwsEc2TransitGatewayVpcAttachmentCreate(d *schema.ResourceData, met d.SetId(aws.StringValue(output.TransitGatewayVpcAttachment.TransitGatewayAttachmentId)) - if err := waitForEc2TransitGatewayRouteTableAttachmentCreation(conn, d.Id()); err != nil { + if err := waitForEc2TransitGatewayVpcAttachmentCreation(conn, d.Id()); err != nil { return fmt.Errorf("error waiting for EC2 Transit Gateway VPC Attachment (%s) availability: %s", d.Id(), err) } @@ -238,7 +238,7 @@ func resourceAwsEc2TransitGatewayVpcAttachmentUpdate(d *schema.ResourceData, met return fmt.Errorf("error modifying EC2 Transit Gateway VPC Attachment (%s): %s", d.Id(), err) } - if err := waitForEc2TransitGatewayRouteTableAttachmentUpdate(conn, d.Id()); err != nil { + if err := waitForEc2TransitGatewayVpcAttachmentUpdate(conn, d.Id()); err != nil { return fmt.Errorf("error waiting for EC2 Transit Gateway VPC Attachment (%s) update: %s", d.Id(), err) } } @@ -295,7 +295,7 @@ func resourceAwsEc2TransitGatewayVpcAttachmentDelete(d *schema.ResourceData, met return fmt.Errorf("error deleting EC2 Transit Gateway VPC Attachment: %s", err) } - if err := waitForEc2TransitGatewayRouteTableAttachmentDeletion(conn, d.Id()); err != nil { + if err := waitForEc2TransitGatewayVpcAttachmentDeletion(conn, d.Id()); err != nil { return fmt.Errorf("error waiting for EC2 Transit Gateway VPC Attachment (%s) deletion: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_vpc_attachment_accepter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_vpc_attachment_accepter.go new file mode 100644 index 00000000000..47b51f07e5f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ec2_transit_gateway_vpc_attachment_accepter.go @@ -0,0 +1,246 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsEc2TransitGatewayVpcAttachmentAccepter() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEc2TransitGatewayVpcAttachmentAccepterCreate, + Read: resourceAwsEc2TransitGatewayVpcAttachmentAccepterRead, + Update: resourceAwsEc2TransitGatewayVpcAttachmentAccepterUpdate, + Delete: resourceAwsEc2TransitGatewayVpcAttachmentAccepterDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "dns_support": { + Type: schema.TypeString, + Computed: true, + }, + "ipv6_support": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": tagsSchema(), + "transit_gateway_attachment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "transit_gateway_default_route_table_association": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "transit_gateway_default_route_table_propagation": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "transit_gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "vpc_owner_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsEc2TransitGatewayVpcAttachmentAccepterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + input := &ec2.AcceptTransitGatewayVpcAttachmentInput{ + TransitGatewayAttachmentId: aws.String(d.Get("transit_gateway_attachment_id").(string)), + } + + log.Printf("[DEBUG] Accepting EC2 Transit Gateway VPC Attachment: %s", input) + output, err := conn.AcceptTransitGatewayVpcAttachment(input) + if err != nil { + return fmt.Errorf("error accepting EC2 Transit Gateway VPC Attachment: %s", err) + } + + d.SetId(aws.StringValue(output.TransitGatewayVpcAttachment.TransitGatewayAttachmentId)) + transitGatewayID := aws.StringValue(output.TransitGatewayVpcAttachment.TransitGatewayId) + + if err := waitForEc2TransitGatewayVpcAttachmentAcceptance(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for EC2 Transit Gateway VPC Attachment (%s) availability: %s", d.Id(), err) + } + + if err := setTags(conn, d); err != nil { + return fmt.Errorf("error updating EC2 Transit Gateway VPC Attachment (%s) tags: %s", d.Id(), err) + } + + transitGateway, err := ec2DescribeTransitGateway(conn, transitGatewayID) + if err != nil { + return fmt.Errorf("error describing EC2 Transit Gateway (%s): %s", transitGatewayID, err) + } + + if transitGateway.Options == nil { + return fmt.Errorf("error describing EC2 Transit Gateway (%s): missing options", transitGatewayID) + } + + if err := ec2TransitGatewayRouteTableAssociationUpdate(conn, aws.StringValue(transitGateway.Options.AssociationDefaultRouteTableId), d.Id(), d.Get("transit_gateway_default_route_table_association").(bool)); err != nil { + return fmt.Errorf("error updating EC2 Transit Gateway Attachment (%s) Route Table (%s) association: %s", d.Id(), aws.StringValue(transitGateway.Options.AssociationDefaultRouteTableId), err) + } + + if err := ec2TransitGatewayRouteTablePropagationUpdate(conn, aws.StringValue(transitGateway.Options.PropagationDefaultRouteTableId), d.Id(), d.Get("transit_gateway_default_route_table_propagation").(bool)); err != nil { + return fmt.Errorf("error updating EC2 Transit Gateway Attachment (%s) Route Table (%s) propagation: %s", d.Id(), aws.StringValue(transitGateway.Options.PropagationDefaultRouteTableId), err) + } + + return resourceAwsEc2TransitGatewayVpcAttachmentAccepterRead(d, meta) +} + +func resourceAwsEc2TransitGatewayVpcAttachmentAccepterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + transitGatewayVpcAttachment, err := ec2DescribeTransitGatewayVpcAttachment(conn, d.Id()) + + if isAWSErr(err, "InvalidTransitGatewayAttachmentID.NotFound", "") { + log.Printf("[WARN] EC2 Transit Gateway VPC Attachment (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading EC2 Transit Gateway VPC Attachment: %s", err) + } + + if transitGatewayVpcAttachment == nil { + log.Printf("[WARN] EC2 Transit Gateway VPC Attachment (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if aws.StringValue(transitGatewayVpcAttachment.State) == ec2.TransitGatewayAttachmentStateDeleting || aws.StringValue(transitGatewayVpcAttachment.State) == ec2.TransitGatewayAttachmentStateDeleted { + log.Printf("[WARN] EC2 Transit Gateway VPC Attachment (%s) in deleted state (%s), removing from state", d.Id(), aws.StringValue(transitGatewayVpcAttachment.State)) + d.SetId("") + return nil + } + + transitGatewayID := aws.StringValue(transitGatewayVpcAttachment.TransitGatewayId) + transitGateway, err := ec2DescribeTransitGateway(conn, transitGatewayID) + if err != nil { + return fmt.Errorf("error describing EC2 Transit Gateway (%s): %s", transitGatewayID, err) + } + + if transitGateway.Options == nil { + return fmt.Errorf("error describing EC2 Transit Gateway (%s): missing options", transitGatewayID) + } + + transitGatewayAssociationDefaultRouteTableID := aws.StringValue(transitGateway.Options.AssociationDefaultRouteTableId) + transitGatewayDefaultRouteTableAssociation, err := ec2DescribeTransitGatewayRouteTableAssociation(conn, transitGatewayAssociationDefaultRouteTableID, d.Id()) + if err != nil { + return fmt.Errorf("error determining EC2 Transit Gateway Attachment (%s) association to Route Table (%s): %s", d.Id(), transitGatewayAssociationDefaultRouteTableID, err) + } + + transitGatewayPropagationDefaultRouteTableID := aws.StringValue(transitGateway.Options.PropagationDefaultRouteTableId) + transitGatewayDefaultRouteTablePropagation, err := ec2DescribeTransitGatewayRouteTablePropagation(conn, transitGatewayPropagationDefaultRouteTableID, d.Id()) + if err != nil { + return fmt.Errorf("error determining EC2 Transit Gateway Attachment (%s) propagation to Route Table (%s): %s", d.Id(), transitGatewayPropagationDefaultRouteTableID, err) + } + + if transitGatewayVpcAttachment.Options == nil { + return fmt.Errorf("error reading EC2 Transit Gateway VPC Attachment (%s): missing options", d.Id()) + } + + d.Set("dns_support", transitGatewayVpcAttachment.Options.DnsSupport) + d.Set("ipv6_support", transitGatewayVpcAttachment.Options.Ipv6Support) + + if err := d.Set("subnet_ids", aws.StringValueSlice(transitGatewayVpcAttachment.SubnetIds)); err != nil { + return fmt.Errorf("error setting subnet_ids: %s", err) + } + + if err := d.Set("tags", tagsToMap(transitGatewayVpcAttachment.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + d.Set("transit_gateway_attachment_id", aws.StringValue(transitGatewayVpcAttachment.TransitGatewayAttachmentId)) + d.Set("transit_gateway_default_route_table_association", (transitGatewayDefaultRouteTableAssociation != nil)) + d.Set("transit_gateway_default_route_table_propagation", (transitGatewayDefaultRouteTablePropagation != nil)) + d.Set("transit_gateway_id", aws.StringValue(transitGatewayVpcAttachment.TransitGatewayId)) + d.Set("vpc_id", aws.StringValue(transitGatewayVpcAttachment.VpcId)) + d.Set("vpc_owner_id", aws.StringValue(transitGatewayVpcAttachment.VpcOwnerId)) + + return nil +} + +func resourceAwsEc2TransitGatewayVpcAttachmentAccepterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + if d.HasChange("transit_gateway_default_route_table_association") || d.HasChange("transit_gateway_default_route_table_propagation") { + transitGatewayID := d.Get("transit_gateway_id").(string) + + transitGateway, err := ec2DescribeTransitGateway(conn, transitGatewayID) + if err != nil { + return fmt.Errorf("error describing EC2 Transit Gateway (%s): %s", transitGatewayID, err) + } + + if transitGateway.Options == nil { + return fmt.Errorf("error describing EC2 Transit Gateway (%s): missing options", transitGatewayID) + } + + if d.HasChange("transit_gateway_default_route_table_association") { + if err := ec2TransitGatewayRouteTableAssociationUpdate(conn, aws.StringValue(transitGateway.Options.AssociationDefaultRouteTableId), d.Id(), d.Get("transit_gateway_default_route_table_association").(bool)); err != nil { + return fmt.Errorf("error updating EC2 Transit Gateway Attachment (%s) Route Table (%s) association: %s", d.Id(), aws.StringValue(transitGateway.Options.AssociationDefaultRouteTableId), err) + } + } + + if d.HasChange("transit_gateway_default_route_table_propagation") { + if err := ec2TransitGatewayRouteTablePropagationUpdate(conn, aws.StringValue(transitGateway.Options.PropagationDefaultRouteTableId), d.Id(), d.Get("transit_gateway_default_route_table_propagation").(bool)); err != nil { + return fmt.Errorf("error updating EC2 Transit Gateway Attachment (%s) Route Table (%s) propagation: %s", d.Id(), aws.StringValue(transitGateway.Options.PropagationDefaultRouteTableId), err) + } + } + } + + if d.HasChange("tags") { + if err := setTags(conn, d); err != nil { + return fmt.Errorf("error updating EC2 Transit Gateway VPC Attachment (%s) tags: %s", d.Id(), err) + } + } + + return nil +} + +func resourceAwsEc2TransitGatewayVpcAttachmentAccepterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + input := &ec2.DeleteTransitGatewayVpcAttachmentInput{ + TransitGatewayAttachmentId: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Deleting EC2 Transit Gateway VPC Attachment (%s): %s", d.Id(), input) + _, err := conn.DeleteTransitGatewayVpcAttachment(input) + + if isAWSErr(err, "InvalidTransitGatewayAttachmentID.NotFound", "") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting EC2 Transit Gateway VPC Attachment: %s", err) + } + + if err := waitForEc2TransitGatewayVpcAttachmentDeletion(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for EC2 Transit Gateway VPC Attachment (%s) deletion: %s", d.Id(), err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_lifecycle_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_lifecycle_policy.go index 31b73e64889..3ff36581c5c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_lifecycle_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_lifecycle_policy.go @@ -3,8 +3,8 @@ package aws import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecr" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEcrLifecyclePolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository.go index 52709fcf963..ac9ad614b6b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository.go @@ -1,15 +1,16 @@ package aws import ( + "fmt" "log" "time" - "fmt" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecr" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsEcrRepository() *schema.Resource { @@ -32,6 +33,29 @@ func resourceAwsEcrRepository() *schema.Resource { Required: true, ForceNew: true, }, + "image_tag_mutability": { + Type: schema.TypeString, + Optional: true, + Default: ecr.ImageTagMutabilityMutable, + ValidateFunc: validation.StringInSlice([]string{ + ecr.ImageTagMutabilityMutable, + ecr.ImageTagMutabilityImmutable, + }, false), + }, + "image_scanning_configuration": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scan_on_push": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + }, "tags": tagsSchema(), "arn": { Type: schema.TypeString, @@ -53,8 +77,20 @@ func resourceAwsEcrRepositoryCreate(d *schema.ResourceData, meta interface{}) er conn := meta.(*AWSClient).ecrconn input := ecr.CreateRepositoryInput{ - RepositoryName: aws.String(d.Get("name").(string)), - Tags: tagsFromMapECR(d.Get("tags").(map[string]interface{})), + ImageTagMutability: aws.String(d.Get("image_tag_mutability").(string)), + RepositoryName: aws.String(d.Get("name").(string)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().EcrTags(), + } + + imageScanningConfigs := d.Get("image_scanning_configuration").([]interface{}) + if len(imageScanningConfigs) > 0 { + imageScanningConfig := imageScanningConfigs[0] + if imageScanningConfig != nil { + configMap := imageScanningConfig.(map[string]interface{}) + input.ImageScanningConfiguration = &ecr.ImageScanningConfiguration{ + ScanOnPush: aws.Bool(configMap["scan_on_push"].(bool)), + } + } } log.Printf("[DEBUG] Creating ECR repository: %#v", input) @@ -81,8 +117,8 @@ func resourceAwsEcrRepositoryRead(d *schema.ResourceData, meta interface{}) erro RepositoryNames: aws.StringSlice([]string{d.Id()}), } - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - var err error + var err error + err = resource.Retry(1*time.Minute, func() *resource.RetryError { out, err = conn.DescribeRepositories(input) if d.IsNewResource() && isAWSErr(err, ecr.ErrCodeRepositoryNotFoundException, "") { return resource.RetryableError(err) @@ -93,6 +129,10 @@ func resourceAwsEcrRepositoryRead(d *schema.ResourceData, meta interface{}) erro return nil }) + if isResourceTimeoutError(err) { + out, err = conn.DescribeRepositories(input) + } + if isAWSErr(err, ecr.ErrCodeRepositoryNotFoundException, "") { log.Printf("[WARN] ECR Repository (%s) not found, removing from state", d.Id()) d.SetId("") @@ -104,24 +144,66 @@ func resourceAwsEcrRepositoryRead(d *schema.ResourceData, meta interface{}) erro } repository := out.Repositories[0] + arn := aws.StringValue(repository.RepositoryArn) - d.Set("arn", repository.RepositoryArn) + d.Set("arn", arn) d.Set("name", repository.RepositoryName) d.Set("registry_id", repository.RegistryId) d.Set("repository_url", repository.RepositoryUri) + d.Set("image_tag_mutability", repository.ImageTagMutability) - if err := getTagsECR(conn, d); err != nil { - return fmt.Errorf("error getting ECR repository tags: %s", err) + if err := d.Set("image_scanning_configuration", flattenImageScanningConfiguration(repository.ImageScanningConfiguration)); err != nil { + return fmt.Errorf("error setting image_scanning_configuration: %s", err) + } + + tags, err := keyvaluetags.EcrListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for ECR Repository (%s): %s", arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } return nil } +func flattenImageScanningConfiguration(isc *ecr.ImageScanningConfiguration) []map[string]interface{} { + if isc == nil { + return nil + } + + config := make(map[string]interface{}) + config["scan_on_push"] = aws.BoolValue(isc.ScanOnPush) + + return []map[string]interface{}{ + config, + } +} + func resourceAwsEcrRepositoryUpdate(d *schema.ResourceData, meta interface{}) error { + arn := d.Get("arn").(string) conn := meta.(*AWSClient).ecrconn - if err := setTagsECR(conn, d); err != nil { - return fmt.Errorf("error setting ECR repository tags: %s", err) + if d.HasChange("image_tag_mutability") { + if err := resourceAwsEcrRepositoryUpdateImageTagMutability(conn, d); err != nil { + return err + } + } + + if d.HasChange("image_scanning_configuration") { + if err := resourceAwsEcrRepositoryUpdateImageScanningConfiguration(conn, d); err != nil { + return err + } + } + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.EcrUpdateTags(conn, arn, o, n); err != nil { + return fmt.Errorf("error updating ECR Repository (%s) tags: %s", arn, err) + } } return resourceAwsEcrRepositoryRead(d, meta) @@ -143,10 +225,11 @@ func resourceAwsEcrRepositoryDelete(d *schema.ResourceData, meta interface{}) er } log.Printf("[DEBUG] Waiting for ECR Repository %q to be deleted", d.Id()) + input := &ecr.DescribeRepositoriesInput{ + RepositoryNames: aws.StringSlice([]string{d.Id()}), + } err = resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { - _, err := conn.DescribeRepositories(&ecr.DescribeRepositoriesInput{ - RepositoryNames: aws.StringSlice([]string{d.Id()}), - }) + _, err = conn.DescribeRepositories(input) if err != nil { if isAWSErr(err, ecr.ErrCodeRepositoryNotFoundException, "") { return nil @@ -154,9 +237,16 @@ func resourceAwsEcrRepositoryDelete(d *schema.ResourceData, meta interface{}) er return resource.NonRetryableError(err) } - return resource.RetryableError( - fmt.Errorf("%q: Timeout while waiting for the ECR Repository to be deleted", d.Id())) + return resource.RetryableError(fmt.Errorf("%q: Timeout while waiting for the ECR Repository to be deleted", d.Id())) }) + if isResourceTimeoutError(err) { + _, err = conn.DescribeRepositories(input) + } + + if isAWSErr(err, ecr.ErrCodeRepositoryNotFoundException, "") { + return nil + } + if err != nil { return fmt.Errorf("error deleting ECR repository: %s", err) } @@ -165,3 +255,43 @@ func resourceAwsEcrRepositoryDelete(d *schema.ResourceData, meta interface{}) er return nil } + +func resourceAwsEcrRepositoryUpdateImageTagMutability(conn *ecr.ECR, d *schema.ResourceData) error { + input := &ecr.PutImageTagMutabilityInput{ + ImageTagMutability: aws.String(d.Get("image_tag_mutability").(string)), + RepositoryName: aws.String(d.Id()), + RegistryId: aws.String(d.Get("registry_id").(string)), + } + + _, err := conn.PutImageTagMutability(input) + if err != nil { + return fmt.Errorf("Error setting image tag mutability: %s", err.Error()) + } + + return nil +} +func resourceAwsEcrRepositoryUpdateImageScanningConfiguration(conn *ecr.ECR, d *schema.ResourceData) error { + + var ecrImageScanningConfig ecr.ImageScanningConfiguration + imageScanningConfigs := d.Get("image_scanning_configuration").([]interface{}) + if len(imageScanningConfigs) > 0 { + imageScanningConfig := imageScanningConfigs[0] + if imageScanningConfig != nil { + configMap := imageScanningConfig.(map[string]interface{}) + ecrImageScanningConfig.ScanOnPush = aws.Bool(configMap["scan_on_push"].(bool)) + } + } + + input := &ecr.PutImageScanningConfigurationInput{ + ImageScanningConfiguration: &ecrImageScanningConfig, + RepositoryName: aws.String(d.Id()), + RegistryId: aws.String(d.Get("registry_id").(string)), + } + + _, err := conn.PutImageScanningConfiguration(input) + if err != nil { + return fmt.Errorf("Error setting image scanning configuration: %s", err.Error()) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository_policy.go index c661ed1140c..f0756774acd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository_policy.go @@ -1,14 +1,15 @@ package aws import ( + "fmt" "log" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ecr" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsEcrRepositoryPolicy() *schema.Resource { @@ -51,9 +52,9 @@ func resourceAwsEcrRepositoryPolicyCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] Creating ECR resository policy: %s", input) // Retry due to IAM eventual consistency + var err error var out *ecr.SetRepositoryPolicyOutput - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var err error + err = resource.Retry(2*time.Minute, func() *resource.RetryError { out, err = conn.SetRepositoryPolicy(&input) if isAWSErr(err, "InvalidParameterException", "Invalid repository policy provided") { @@ -62,8 +63,11 @@ func resourceAwsEcrRepositoryPolicyCreate(d *schema.ResourceData, meta interface } return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + out, err = conn.SetRepositoryPolicy(&input) + } if err != nil { - return err + return fmt.Errorf("Error creating ECR Repository Policy: %s", err) } repositoryPolicy := *out @@ -124,9 +128,9 @@ func resourceAwsEcrRepositoryPolicyUpdate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] Updating ECR resository policy: %s", input) // Retry due to IAM eventual consistency + var err error var out *ecr.SetRepositoryPolicyOutput - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var err error + err = resource.Retry(2*time.Minute, func() *resource.RetryError { out, err = conn.SetRepositoryPolicy(&input) if isAWSErr(err, "InvalidParameterException", "Invalid repository policy provided") { @@ -135,8 +139,11 @@ func resourceAwsEcrRepositoryPolicyUpdate(d *schema.ResourceData, meta interface } return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + out, err = conn.SetRepositoryPolicy(&input) + } if err != nil { - return err + return fmt.Errorf("Error updating ECR Repository Policy: %s", err) } repositoryPolicy := *out diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_cluster.go index 032318ed24c..4de16d92968 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_cluster.go @@ -7,10 +7,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEcsCluster() *schema.Resource { @@ -34,6 +34,26 @@ func resourceAwsEcsCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "setting": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + ecs.ClusterSettingNameContainerInsights, + }, false), + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, }, } } @@ -56,10 +76,16 @@ func resourceAwsEcsClusterCreate(d *schema.ResourceData, meta interface{}) error clusterName := d.Get("name").(string) log.Printf("[DEBUG] Creating ECS cluster %s", clusterName) - out, err := conn.CreateCluster(&ecs.CreateClusterInput{ + input := ecs.CreateClusterInput{ ClusterName: aws.String(clusterName), Tags: tagsFromMapECS(d.Get("tags").(map[string]interface{})), - }) + } + + if v, ok := d.GetOk("setting"); ok { + input.Settings = expandEcsSettings(v.(*schema.Set).List()) + } + + out, err := conn.CreateCluster(&input) if err != nil { return err } @@ -97,6 +123,9 @@ func resourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error { return nil }) + if isResourceTimeoutError(err) { + out, err = conn.DescribeClusters(input) + } if isResourceNotFoundError(err) { log.Printf("[WARN] ECS Cluster (%s) not found, removing from state", d.Id()) @@ -132,6 +161,10 @@ func resourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", cluster.ClusterArn) d.Set("name", cluster.ClusterName) + if err := d.Set("setting", flattenEcsSettings(cluster.Settings)); err != nil { + return fmt.Errorf("error setting setting: %s", err) + } + if err := d.Set("tags", tagsToMapECS(cluster.Tags)); err != nil { return fmt.Errorf("error setting tags: %s", err) } @@ -142,6 +175,18 @@ func resourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error { func resourceAwsEcsClusterUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ecsconn + if d.HasChange("setting") { + input := ecs.UpdateClusterSettingsInput{ + Cluster: aws.String(d.Id()), + Settings: expandEcsSettings(d.Get("setting").(*schema.Set).List()), + } + + _, err := conn.UpdateClusterSettings(&input) + if err != nil { + return fmt.Errorf("error changing ECS cluster settings (%s): %s", d.Id(), err) + } + } + if d.HasChange("tags") { oldTagsRaw, newTagsRaw := d.GetChange("tags") oldTagsMap := oldTagsRaw.(map[string]interface{}) @@ -185,66 +230,114 @@ func resourceAwsEcsClusterDelete(d *schema.ResourceData, meta interface{}) error conn := meta.(*AWSClient).ecsconn log.Printf("[DEBUG] Deleting ECS cluster %s", d.Id()) - + input := &ecs.DeleteClusterInput{ + Cluster: aws.String(d.Id()), + } err := resource.Retry(10*time.Minute, func() *resource.RetryError { - out, err := conn.DeleteCluster(&ecs.DeleteClusterInput{ - Cluster: aws.String(d.Id()), - }) + _, err := conn.DeleteCluster(input) if err == nil { - log.Printf("[DEBUG] ECS cluster %s deleted: %s", d.Id(), out) + log.Printf("[DEBUG] ECS cluster %s deleted", d.Id()) return nil } - awsErr, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - - if awsErr.Code() == "ClusterContainsContainerInstancesException" { - log.Printf("[TRACE] Retrying ECS cluster %q deletion after %q", d.Id(), awsErr.Code()) + if isAWSErr(err, "ClusterContainsContainerInstancesException", "") { + log.Printf("[TRACE] Retrying ECS cluster %q deletion after %s", d.Id(), err) return resource.RetryableError(err) } - - if awsErr.Code() == "ClusterContainsServicesException" { - log.Printf("[TRACE] Retrying ECS cluster %q deletion after %q", d.Id(), awsErr.Code()) + if isAWSErr(err, "ClusterContainsServicesException", "") { + log.Printf("[TRACE] Retrying ECS cluster %q deletion after %s", d.Id(), err) return resource.RetryableError(err) } - return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteCluster(input) + } if err != nil { - return err + return fmt.Errorf("Error deleting ECS cluster: %s", err) } clusterName := d.Get("name").(string) + dcInput := &ecs.DescribeClustersInput{ + Clusters: []*string{aws.String(clusterName)}, + } + var out *ecs.DescribeClustersOutput err = resource.Retry(5*time.Minute, func() *resource.RetryError { log.Printf("[DEBUG] Checking if ECS Cluster %q is INACTIVE", d.Id()) - out, err := conn.DescribeClusters(&ecs.DescribeClustersInput{ - Clusters: []*string{aws.String(clusterName)}, - }) - - for _, c := range out.Clusters { - if *c.ClusterName == clusterName { - if *c.Status == "INACTIVE" { - return nil - } - - return resource.RetryableError( - fmt.Errorf("ECS Cluster %q is still %q", clusterName, *c.Status)) - } - } + out, err = conn.DescribeClusters(dcInput) if err != nil { return resource.NonRetryableError(err) } + if !ecsClusterInactive(out, clusterName) { + return resource.RetryableError(fmt.Errorf("ECS Cluster %q is not inactive", clusterName)) + } return nil }) + if isResourceTimeoutError(err) { + out, err = conn.DescribeClusters(dcInput) + if err != nil { + return fmt.Errorf("Error waiting for ECS cluster to become inactive: %s", err) + } + if !ecsClusterInactive(out, clusterName) { + return fmt.Errorf("ECS Cluster %q is still not inactive", clusterName) + } + } if err != nil { - return err + return fmt.Errorf("Error waiting for ECS cluster to become inactive: %s", err) } log.Printf("[DEBUG] ECS cluster %q deleted", d.Id()) return nil } + +func ecsClusterInactive(out *ecs.DescribeClustersOutput, clusterName string) bool { + for _, c := range out.Clusters { + if aws.StringValue(c.ClusterName) == clusterName { + if *c.Status == "INACTIVE" { + return true + } + } + } + return false +} + +func expandEcsSettings(configured []interface{}) []*ecs.ClusterSetting { + if len(configured) == 0 { + return nil + } + + settings := make([]*ecs.ClusterSetting, 0, len(configured)) + + for _, raw := range configured { + data := raw.(map[string]interface{}) + + setting := &ecs.ClusterSetting{ + Name: aws.String(data["name"].(string)), + Value: aws.String(data["value"].(string)), + } + + settings = append(settings, setting) + } + + return settings +} + +func flattenEcsSettings(list []*ecs.ClusterSetting) []map[string]interface{} { + if len(list) == 0 { + return nil + } + + result := make([]map[string]interface{}, 0, len(list)) + for _, setting := range list { + l := map[string]interface{}{ + "name": aws.StringValue(setting.Name), + "value": aws.StringValue(setting.Value), + } + + result = append(result, l) + } + return result +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_service.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_service.go index 4bfa7881e3e..ae10d1f38f6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_service.go @@ -11,10 +11,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEcsService() *schema.Resource { @@ -152,7 +152,6 @@ func resourceAwsEcsService() *schema.Resource { Type: schema.TypeSet, Optional: true, ForceNew: true, - MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "elb_name": { @@ -487,6 +486,9 @@ func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error return nil }) + if isResourceTimeoutError(err) { + out, err = conn.CreateService(&input) + } if err != nil { return fmt.Errorf("%s %q", err, d.Get("name").(string)) } @@ -536,11 +538,17 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error { return nil }) + if isResourceTimeoutError(err) { + out, err = conn.DescribeServices(&input) + } if err != nil { - return err + return fmt.Errorf("Error reading ECS service: %s", err) } if len(out.Services) < 1 { + if d.IsNewResource() { + return fmt.Errorf("ECS service not created: %q", d.Id()) + } log.Printf("[WARN] Removing ECS service %s (%s) because it's gone", d.Get("name").(string), d.Id()) d.SetId("") return nil @@ -838,7 +846,7 @@ func resourceAwsEcsServiceUpdate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Updating ECS Service (%s): %s", d.Id(), input) // Retry due to IAM eventual consistency err := resource.Retry(2*time.Minute, func() *resource.RetryError { - out, err := conn.UpdateService(&input) + _, err := conn.UpdateService(&input) if err != nil { if isAWSErr(err, ecs.ErrCodeInvalidParameterException, "Please verify that the ECS service role being passed has the proper permissions.") { return resource.RetryableError(err) @@ -848,12 +856,13 @@ func resourceAwsEcsServiceUpdate(d *schema.ResourceData, meta interface{}) error } return resource.NonRetryableError(err) } - - log.Printf("[DEBUG] Updated ECS service %s", out.Service) return nil }) + if isResourceTimeoutError(err) { + _, err = conn.UpdateService(&input) + } if err != nil { - return fmt.Errorf("error updating ECS Service (%s): %s", d.Id(), err) + return fmt.Errorf("Error updating ECS Service (%s): %s", d.Id(), err) } } @@ -952,9 +961,11 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error } return nil }) - + if isResourceTimeoutError(err) { + _, err = conn.DeleteService(&input) + } if err != nil { - return err + return fmt.Errorf("Error deleting ECS service: %s", err) } // Wait until it's deleted diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition.go index d07f6dafe46..da4dfdce311 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition.go @@ -9,10 +9,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEcsTaskDefinition() *schema.Resource { @@ -230,6 +230,37 @@ func resourceAwsEcsTaskDefinition() *schema.Resource { }, false), }, + "proxy_configuration": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "properties": { + Type: schema.TypeMap, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ForceNew: true, + }, + "type": { + Type: schema.TypeString, + Default: ecs.ProxyConfigurationTypeAppmesh, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + ecs.ProxyConfigurationTypeAppmesh, + }, false), + }, + }, + }, + }, + "tags": tagsSchema(), }, } @@ -321,6 +352,34 @@ func resourceAwsEcsTaskDefinitionCreate(d *schema.ResourceData, meta interface{} input.RequiresCompatibilities = expandStringList(v.(*schema.Set).List()) } + proxyConfigs := d.Get("proxy_configuration").([]interface{}) + if len(proxyConfigs) > 0 { + proxyConfig := proxyConfigs[0] + configMap := proxyConfig.(map[string]interface{}) + + containerName := configMap["container_name"].(string) + proxyType := configMap["type"].(string) + + rawProperties := configMap["properties"].(map[string]interface{}) + + properties := make([]*ecs.KeyValuePair, len(rawProperties)) + i := 0 + for name, value := range rawProperties { + properties[i] = &ecs.KeyValuePair{ + Name: aws.String(name), + Value: aws.String(value.(string)), + } + i++ + } + + var ecsProxyConfig ecs.ProxyConfiguration + ecsProxyConfig.ContainerName = aws.String(containerName) + ecsProxyConfig.Type = aws.String(proxyType) + ecsProxyConfig.Properties = properties + + input.ProxyConfiguration = &ecsProxyConfig + } + log.Printf("[DEBUG] Registering ECS task definition: %s", input) out, err := conn.RegisterTaskDefinition(&input) if err != nil { @@ -393,7 +452,11 @@ func resourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}) } if err := d.Set("requires_compatibilities", flattenStringList(taskDefinition.RequiresCompatibilities)); err != nil { - return err + return fmt.Errorf("error setting requires_compatibilities: %s", err) + } + + if err := d.Set("proxy_configuration", flattenProxyConfiguration(taskDefinition.ProxyConfiguration)); err != nil { + return fmt.Errorf("error setting proxy_configuration: %s", err) } return nil @@ -413,6 +476,28 @@ func flattenPlacementConstraints(pcs []*ecs.TaskDefinitionPlacementConstraint) [ return results } +func flattenProxyConfiguration(pc *ecs.ProxyConfiguration) []map[string]interface{} { + if pc == nil { + return nil + } + + meshProperties := make(map[string]string) + if pc.Properties != nil { + for _, prop := range pc.Properties { + meshProperties[aws.StringValue(prop.Name)] = aws.StringValue(prop.Value) + } + } + + config := make(map[string]interface{}) + config["container_name"] = aws.StringValue(pc.ContainerName) + config["type"] = aws.StringValue(pc.Type) + config["properties"] = meshProperties + + return []map[string]interface{}{ + config, + } +} + func resourceAwsEcsTaskDefinitionUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ecsconn diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition_migrate.go index 179b73bff4e..b0bbe57d5e1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition_migrate.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsEcsTaskDefinitionMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_file_system.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_file_system.go index d2066cb6fb1..30edf28a711 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_file_system.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_file_system.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEfsFileSystem() *schema.Resource { @@ -31,6 +31,7 @@ func resourceAwsEfsFileSystem() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "creation_token": { Type: schema.TypeString, Optional: true, @@ -93,6 +94,26 @@ func resourceAwsEfsFileSystem() *schema.Resource { efs.ThroughputModeProvisioned, }, false), }, + + "lifecycle_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "transition_to_ia": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + efs.TransitionToIARulesAfter14Days, + efs.TransitionToIARulesAfter30Days, + efs.TransitionToIARulesAfter60Days, + efs.TransitionToIARulesAfter90Days, + }, false), + }, + }, + }, + }, }, } } @@ -111,6 +132,7 @@ func resourceAwsEfsFileSystemCreate(d *schema.ResourceData, meta interface{}) er createOpts := &efs.CreateFileSystemInput{ CreationToken: aws.String(creationToken), ThroughputMode: aws.String(throughputMode), + Tags: tagsFromMapEFS(d.Get("tags").(map[string]interface{})), } if v, ok := d.GetOk("performance_mode"); ok { @@ -160,9 +182,16 @@ func resourceAwsEfsFileSystemCreate(d *schema.ResourceData, meta interface{}) er } log.Printf("[DEBUG] EFS file system %q created.", d.Id()) - err = setTagsEFS(conn, d) - if err != nil { - return fmt.Errorf("error setting tags for EFS file system (%q): %s", d.Id(), err) + _, hasLifecyclePolicy := d.GetOk("lifecycle_policy") + if hasLifecyclePolicy { + _, err := conn.PutLifecycleConfiguration(&efs.PutLifecycleConfigurationInput{ + FileSystemId: aws.String(d.Id()), + LifecyclePolicies: resourceAwsEfsFileSystemLifecyclePolicy(d.Get("lifecycle_policy").([]interface{})), + }) + if err != nil { + return fmt.Errorf("Error creating lifecycle policy for EFS file system %q: %s", + d.Id(), err.Error()) + } } return resourceAwsEfsFileSystemRead(d, meta) @@ -203,6 +232,17 @@ func resourceAwsEfsFileSystemUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("lifecycle_policy") { + _, err := conn.PutLifecycleConfiguration(&efs.PutLifecycleConfigurationInput{ + FileSystemId: aws.String(d.Id()), + LifecyclePolicies: resourceAwsEfsFileSystemLifecyclePolicy(d.Get("lifecycle_policy").([]interface{})), + }) + if err != nil { + return fmt.Errorf("Error updating lifecycle policy for EFS file system %q: %s", + d.Id(), err.Error()) + } + } + if d.HasChange("tags") { err := setTagsEFS(conn, d) if err != nil { @@ -297,6 +337,17 @@ func resourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("error setting dns_name: %s", err) } + res, err := conn.DescribeLifecycleConfiguration(&efs.DescribeLifecycleConfigurationInput{ + FileSystemId: fs.FileSystemId, + }) + if err != nil { + return fmt.Errorf("Error describing lifecycle configuration for EFS file system (%s): %s", + aws.StringValue(fs.FileSystemId), err) + } + if err := resourceAwsEfsFileSystemSetLifecyclePolicy(d, res.LifecyclePolicies); err != nil { + return err + } + return nil } @@ -379,3 +430,37 @@ func resourceEfsFileSystemCreateUpdateRefreshFunc(id string, conn *efs.EFS) reso return fs, state, nil } } + +func resourceAwsEfsFileSystemSetLifecyclePolicy(d *schema.ResourceData, lp []*efs.LifecyclePolicy) error { + log.Printf("[DEBUG] lifecycle pols: %s %d", lp, len(lp)) + if len(lp) == 0 { + d.Set("lifecycle_policy", nil) + return nil + } + newLP := make([]*map[string]interface{}, len(lp)) + + for i := 0; i < len(lp); i++ { + config := lp[i] + data := make(map[string]interface{}) + newLP[i] = &data + if config.TransitionToIA != nil { + data["transition_to_ia"] = *config.TransitionToIA + } + log.Printf("[DEBUG] lp: %s", data) + } + + if err := d.Set("lifecycle_policy", newLP); err != nil { + return fmt.Errorf("error setting lifecycle_policy: %s", err) + } + return nil +} + +func resourceAwsEfsFileSystemLifecyclePolicy(lcPol []interface{}) []*efs.LifecyclePolicy { + result := make([]*efs.LifecyclePolicy, len(lcPol)) + + for i := 0; i < len(lcPol); i++ { + lp := lcPol[i].(map[string]interface{}) + result[i] = &efs.LifecyclePolicy{TransitionToIA: aws.String(lp["transition_to_ia"].(string))} + } + return result +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_mount_target.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_mount_target.go index 778881b11c0..ca072f689f3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_mount_target.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_mount_target.go @@ -10,8 +10,8 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsEfsMountTarget() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_egress_only_internet_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_egress_only_internet_gateway.go index 413672d35d1..99e8778660a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_egress_only_internet_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_egress_only_internet_gateway.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsEgressOnlyInternetGateway() *schema.Resource { @@ -50,29 +50,29 @@ func resourceAwsEgressOnlyInternetGatewayRead(d *schema.ResourceData, meta inter EgressOnlyInternetGatewayIds: []*string{aws.String(d.Id())}, } + var resp *ec2.DescribeEgressOnlyInternetGatewaysOutput err := resource.Retry(1*time.Minute, func() *resource.RetryError { - resp, err := conn.DescribeEgressOnlyInternetGateways(req) + var err error + resp, err = conn.DescribeEgressOnlyInternetGateways(req) if err != nil { return resource.NonRetryableError(err) } - if resp != nil && len(resp.EgressOnlyInternetGateways) > 0 { - for _, igw := range resp.EgressOnlyInternetGateways { - if aws.StringValue(igw.EgressOnlyInternetGatewayId) == d.Id() { - found = true - break - } - } - } + + found = hasEc2EgressOnlyInternetGateway(d.Id(), resp) if d.IsNewResource() && !found { return resource.RetryableError(fmt.Errorf("Egress Only Internet Gateway (%s) not found.", d.Id())) } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.DescribeEgressOnlyInternetGateways(req) + } if err != nil { return fmt.Errorf("Error describing egress internet gateway: %s", err) } + found = hasEc2EgressOnlyInternetGateway(d.Id(), resp) if !found { log.Printf("[Error] Cannot find Egress Only Internet Gateway: %q", d.Id()) d.SetId("") @@ -82,6 +82,19 @@ func resourceAwsEgressOnlyInternetGatewayRead(d *schema.ResourceData, meta inter return nil } +func hasEc2EgressOnlyInternetGateway(id string, resp *ec2.DescribeEgressOnlyInternetGatewaysOutput) bool { + var found bool + if resp != nil && len(resp.EgressOnlyInternetGateways) > 0 { + for _, igw := range resp.EgressOnlyInternetGateways { + if aws.StringValue(igw.EgressOnlyInternetGatewayId) == id { + found = true + break + } + } + } + return found +} + func resourceAwsEgressOnlyInternetGatewayDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip.go index 6343e10a32c..e820bb832a3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip.go @@ -10,8 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsEip() *schema.Resource { @@ -141,9 +142,9 @@ func resourceAwsEipCreate(d *schema.ResourceData, meta interface{}) error { log.Printf("[INFO] EIP ID: %s (domain: %v)", d.Id(), *allocResp.Domain) - if _, ok := d.GetOk("tags"); ok { - if err := setTags(ec2conn, d); err != nil { - return fmt.Errorf("Error creating EIP tags: %s", err) + if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { + if err := keyvaluetags.Ec2UpdateTags(ec2conn, d.Id(), nil, v); err != nil { + return fmt.Errorf("error adding tags: %s", err) } } @@ -185,6 +186,9 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { } return nil }) + if isResourceTimeoutError(err) { + describeAddresses, err = ec2conn.DescribeAddresses(req) + } if err != nil { return fmt.Errorf("Error retrieving EIP: %s", err) } @@ -269,7 +273,9 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { d.SetId(*address.AllocationId) } - d.Set("tags", tagsToMap(address.Tags)) + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(address.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } return nil } @@ -340,6 +346,9 @@ func resourceAwsEipUpdate(d *schema.ResourceData, meta interface{}) error { } return nil }) + if isResourceTimeoutError(err) { + _, err = ec2conn.AssociateAddress(assocOpts) + } if err != nil { // Prevent saving instance if association failed // e.g. missing internet gateway in VPC @@ -349,9 +358,10 @@ func resourceAwsEipUpdate(d *schema.ResourceData, meta interface{}) error { } } - if _, ok := d.GetOk("tags"); ok { - if err := setTags(ec2conn, d); err != nil { - return fmt.Errorf("Error updating EIP tags: %s", err) + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.Ec2UpdateTags(ec2conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating EIP (%s) tags: %s", d.Id(), err) } } @@ -377,22 +387,24 @@ func resourceAwsEipDelete(d *schema.ResourceData, meta interface{}) error { } domain := resourceAwsEipDomain(d) - return resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { - var err error - switch domain { - case "vpc": - log.Printf( - "[DEBUG] EIP release (destroy) address allocation: %v", - d.Id()) - _, err = ec2conn.ReleaseAddress(&ec2.ReleaseAddressInput{ - AllocationId: aws.String(d.Id()), - }) - case "standard": - log.Printf("[DEBUG] EIP release (destroy) address: %v", d.Id()) - _, err = ec2conn.ReleaseAddress(&ec2.ReleaseAddressInput{ - PublicIp: aws.String(d.Id()), - }) + + var input *ec2.ReleaseAddressInput + switch domain { + case "vpc": + log.Printf("[DEBUG] EIP release (destroy) address allocation: %v", d.Id()) + input = &ec2.ReleaseAddressInput{ + AllocationId: aws.String(d.Id()), + } + case "standard": + log.Printf("[DEBUG] EIP release (destroy) address: %v", d.Id()) + input = &ec2.ReleaseAddressInput{ + PublicIp: aws.String(d.Id()), } + } + + err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + var err error + _, err = ec2conn.ReleaseAddress(input) if err == nil { return nil @@ -403,6 +415,13 @@ func resourceAwsEipDelete(d *schema.ResourceData, meta interface{}) error { return resource.RetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = ec2conn.ReleaseAddress(input) + } + if err != nil { + return fmt.Errorf("Error releasing EIP address: %s", err) + } + return nil } func resourceAwsEipDomain(d *schema.ResourceData) string { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip_association.go index c7485e1c656..59eb7a7a05a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip_association.go @@ -6,11 +6,11 @@ import ( "net" "time" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsEipAssociation() *schema.Resource { @@ -105,6 +105,9 @@ func resourceAwsEipAssociationCreate(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.AssociateAddress(request) + } if err != nil { return fmt.Errorf("Error associating EIP: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eks_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eks_cluster.go index 2f7344faa46..0f50efd11ff 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eks_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eks_cluster.go @@ -8,9 +8,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) var eksLogTypes = []string{ @@ -33,7 +34,7 @@ func resourceAwsEksCluster() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(15 * time.Minute), + Create: schema.DefaultTimeout(30 * time.Minute), Update: schema.DefaultTimeout(60 * time.Minute), Delete: schema.DefaultTimeout(15 * time.Minute), }, @@ -64,6 +65,26 @@ func resourceAwsEksCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "identity": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oidc": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issuer": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, "name": { Type: schema.TypeString, Required: true, @@ -80,6 +101,11 @@ func resourceAwsEksCluster() *schema.Resource { ForceNew: true, ValidateFunc: validateArn, }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), "version": { Type: schema.TypeString, Optional: true, @@ -146,6 +172,10 @@ func resourceAwsEksClusterCreate(d *schema.ResourceData, meta interface{}) error Logging: expandEksLoggingTypes(d.Get("enabled_cluster_log_types").(*schema.Set)), } + if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { + input.Tags = keyvaluetags.New(v).IgnoreAws().EksTags() + } + if v, ok := d.GetOk("version"); ok && v.(string) != "" { input.Version = aws.String(v.(string)) } @@ -173,7 +203,9 @@ func resourceAwsEksClusterCreate(d *schema.ResourceData, meta interface{}) error } return nil }) - + if isResourceTimeoutError(err) { + _, err = conn.CreateCluster(input) + } if err != nil { return fmt.Errorf("error creating EKS Cluster (%s): %s", name, err) } @@ -227,9 +259,20 @@ func resourceAwsEksClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("created_at", aws.TimeValue(cluster.CreatedAt).String()) d.Set("endpoint", cluster.Endpoint) + + if err := d.Set("identity", flattenEksIdentity(cluster.Identity)); err != nil { + return fmt.Errorf("error setting identity: %s", err) + } + d.Set("name", cluster.Name) d.Set("platform_version", cluster.PlatformVersion) d.Set("role_arn", cluster.RoleArn) + d.Set("status", cluster.Status) + + if err := d.Set("tags", keyvaluetags.EksKeyValueTags(cluster.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + d.Set("version", cluster.Version) if err := d.Set("enabled_cluster_log_types", flattenEksEnabledLogTypes(cluster.Logging)); err != nil { return fmt.Errorf("error setting enabled_cluster_log_types: %s", err) @@ -245,6 +288,13 @@ func resourceAwsEksClusterRead(d *schema.ResourceData, meta interface{}) error { func resourceAwsEksClusterUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).eksconn + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.EksUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + if d.HasChange("version") { input := &eks.UpdateClusterVersionInput{ Name: aws.String(d.Id()), @@ -423,6 +473,30 @@ func flattenEksCertificate(certificate *eks.Certificate) []map[string]interface{ return []map[string]interface{}{m} } +func flattenEksIdentity(identity *eks.Identity) []map[string]interface{} { + if identity == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "oidc": flattenEksOidc(identity.Oidc), + } + + return []map[string]interface{}{m} +} + +func flattenEksOidc(oidc *eks.OIDC) []map[string]interface{} { + if oidc == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "issuer": aws.StringValue(oidc.Issuer), + } + + return []map[string]interface{}{m} +} + func flattenEksVpcConfigResponse(vpcConfig *eks.VpcConfigResponse) []map[string]interface{} { if vpcConfig == nil { return []map[string]interface{}{} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application.go index 2966c26e1dc..11fea76b624 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application.go @@ -5,11 +5,11 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" ) func resourceAwsElasticBeanstalkApplication() *schema.Resource { @@ -23,6 +23,10 @@ func resourceAwsElasticBeanstalkApplication() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "name": { Type: schema.TypeString, Required: true, @@ -58,6 +62,7 @@ func resourceAwsElasticBeanstalkApplication() *schema.Resource { }, }, }, + "tags": tagsSchema(), }, } } @@ -74,6 +79,7 @@ func resourceAwsElasticBeanstalkApplicationCreate(d *schema.ResourceData, meta i req := &elasticbeanstalk.CreateApplicationInput{ ApplicationName: aws.String(name), Description: aws.String(description), + Tags: tagsFromMapBeanstalk(d.Get("tags").(map[string]interface{})), } app, err := beanstalkConn.CreateApplication(req) @@ -105,6 +111,10 @@ func resourceAwsElasticBeanstalkApplicationUpdate(d *schema.ResourceData, meta i } } + if err := setTagsBeanstalk(beanstalkConn, d, d.Get("arn").(string)); err != nil { + return fmt.Errorf("error setting tags for %s: %s", d.Id(), err) + } + return resourceAwsElasticBeanstalkApplicationRead(d, meta) } @@ -214,6 +224,9 @@ func resourceAwsElasticBeanstalkApplicationRead(d *schema.ResourceData, meta int } return nil }) + if isResourceTimeoutError(err) { + app, err = getBeanstalkApplication(d.Id(), conn) + } if err != nil { if app == nil { log.Printf("[WARN] %s, removing from state", err) @@ -223,6 +236,7 @@ func resourceAwsElasticBeanstalkApplicationRead(d *schema.ResourceData, meta int return err } + d.Set("arn", app.ApplicationArn) d.Set("name", app.ApplicationName) d.Set("description", app.Description) @@ -230,6 +244,10 @@ func resourceAwsElasticBeanstalkApplicationRead(d *schema.ResourceData, meta int d.Set("appversion_lifecycle", flattenResourceLifecycleConfig(app.ResourceLifecycleConfig)) } + if err := saveTagsBeanstalk(conn, d, aws.StringValue(app.ApplicationArn)); err != nil { + return fmt.Errorf("error saving tags for %s: %s", d.Id(), err) + } + return nil } @@ -243,8 +261,9 @@ func resourceAwsElasticBeanstalkApplicationDelete(d *schema.ResourceData, meta i return err } - return resource.Retry(10*time.Second, func() *resource.RetryError { - app, err := getBeanstalkApplication(d.Id(), meta.(*AWSClient).elasticbeanstalkconn) + var app *elasticbeanstalk.ApplicationDescription + err = resource.Retry(10*time.Second, func() *resource.RetryError { + app, err = getBeanstalkApplication(d.Id(), meta.(*AWSClient).elasticbeanstalkconn) if err != nil { return resource.NonRetryableError(err) } @@ -255,6 +274,13 @@ func resourceAwsElasticBeanstalkApplicationDelete(d *schema.ResourceData, meta i } return nil }) + if isResourceTimeoutError(err) { + app, err = getBeanstalkApplication(d.Id(), meta.(*AWSClient).elasticbeanstalkconn) + } + if err != nil { + return fmt.Errorf("Error deleting Beanstalk application: %s", err) + } + return nil } func getBeanstalkApplication(id string, conn *elasticbeanstalk.ElasticBeanstalk) (*elasticbeanstalk.ApplicationDescription, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application_version.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application_version.go index ffe4c13a365..118a767339b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application_version.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application_version.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsElasticBeanstalkApplicationVersion() *schema.Resource { @@ -24,6 +24,10 @@ func resourceAwsElasticBeanstalkApplicationVersion() *schema.Resource { Required: true, ForceNew: true, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, "description": { Type: schema.TypeString, Optional: true, @@ -48,6 +52,7 @@ func resourceAwsElasticBeanstalkApplicationVersion() *schema.Resource { Optional: true, Default: false, }, + "tags": tagsSchema(), }, } } @@ -71,6 +76,7 @@ func resourceAwsElasticBeanstalkApplicationVersionCreate(d *schema.ResourceData, Description: aws.String(description), SourceBundle: &s3Location, VersionLabel: aws.String(name), + Tags: tagsFromMapBeanstalk(d.Get("tags").(map[string]interface{})), } log.Printf("[DEBUG] Elastic Beanstalk Application Version create opts: %s", createOpts) @@ -111,6 +117,14 @@ func resourceAwsElasticBeanstalkApplicationVersionRead(d *schema.ResourceData, m return err } + if err := d.Set("arn", resp.ApplicationVersions[0].ApplicationVersionArn); err != nil { + return err + } + + if err := saveTagsBeanstalk(conn, d, aws.StringValue(resp.ApplicationVersions[0].ApplicationVersionArn)); err != nil { + return fmt.Errorf("error saving tags for %s: %s", d.Id(), err) + } + return nil } @@ -123,6 +137,10 @@ func resourceAwsElasticBeanstalkApplicationVersionUpdate(d *schema.ResourceData, } } + if err := setTagsBeanstalk(conn, d, d.Get("arn").(string)); err != nil { + return fmt.Errorf("error setting tags for %s: %s", d.Id(), err) + } + return resourceAwsElasticBeanstalkApplicationVersionRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_configuration_template.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_configuration_template.go index 6a3c66daaa5..698f00a45ea 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_configuration_template.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_configuration_template.go @@ -5,7 +5,7 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment.go index 9967cce39ba..96c81091464 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment.go @@ -9,14 +9,14 @@ import ( "time" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" ) func resourceAwsElasticBeanstalkOptionSetting() *schema.Resource { @@ -88,6 +88,10 @@ func resourceAwsElasticBeanstalkEnvironment() *schema.Resource { Optional: true, ForceNew: true, }, + "endpoint_url": { + Type: schema.TypeString, + Computed: true, + }, "tier": { Type: schema.TypeString, Optional: true, @@ -649,6 +653,9 @@ func resourceAwsElasticBeanstalkEnvironmentRead(d *schema.ResourceData, meta int if err := d.Set("triggers", flattenBeanstalkTrigger(resources.EnvironmentResources.Triggers)); err != nil { return err } + if err := d.Set("endpoint_url", env.EndpointURL); err != nil { + return err + } tags, err := conn.ListTagsForResource(&elasticbeanstalk.ListTagsForResourceInput{ ResourceArn: aws.String(d.Get("arn").(string)), diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment_migrate.go index 31cd5c7777b..4759b4541e5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsElasticBeanstalkEnvironmentMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_pipeline.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_pipeline.go index 0947ea92ba4..60dee0dd2f9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_pipeline.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_pipeline.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elastictranscoder" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsElasticTranscoderPipeline() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_preset.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_preset.go index cea85f33cd4..1815ec6d447 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_preset.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_preset.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elastictranscoder" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsElasticTranscoderPreset() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_cluster.go index 97de12cdd95..3723da0472a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_cluster.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "log" + "regexp" "sort" "strings" "time" @@ -12,10 +13,10 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/elasticache" gversion "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsElasticacheCluster() *schema.Resource { @@ -95,7 +96,13 @@ func resourceAwsElasticacheCluster() *schema.Resource { // with non-converging diffs. return strings.ToLower(val.(string)) }, - ValidateFunc: validateElastiCacheClusterId, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 50), + validation.StringMatch(regexp.MustCompile(`^[0-9a-z-]+$`), "must contain only lowercase alphanumeric characters and hyphens"), + validation.StringMatch(regexp.MustCompile(`^[a-z]`), "must begin with a lowercase letter"), + validateStringNotMatch(regexp.MustCompile(`--`), "cannot contain two consecutive hyphens"), + validateStringNotMatch(regexp.MustCompile(`-$`), "cannot end with a hyphen"), + ), }, "configuration_endpoint": { Type: schema.TypeString, @@ -801,6 +808,10 @@ func deleteElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteCacheCluster(input) + } + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_parameter_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_parameter_group.go index e8b57cf6bec..5da7b75a078 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_parameter_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_parameter_group.go @@ -7,9 +7,9 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -318,10 +318,10 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int func resourceAwsElasticacheParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticacheconn - return resource.Retry(3*time.Minute, func() *resource.RetryError { - deleteOpts := elasticache.DeleteCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(d.Id()), - } + deleteOpts := elasticache.DeleteCacheParameterGroupInput{ + CacheParameterGroupName: aws.String(d.Id()), + } + err := resource.Retry(3*time.Minute, func() *resource.RetryError { _, err := conn.DeleteCacheParameterGroup(&deleteOpts) if err != nil { awsErr, ok := err.(awserr.Error) @@ -335,6 +335,18 @@ func resourceAwsElasticacheParameterGroupDelete(d *schema.ResourceData, meta int } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteCacheParameterGroup(&deleteOpts) + } + if isAWSErr(err, elasticache.ErrCodeCacheParameterGroupNotFoundFault, "") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting Elasticache Parameter Group (%s): %s", d.Id(), err) + } + + return nil } func resourceAwsElasticacheParameterHash(v interface{}) int { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_replication_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_replication_group.go index 5a127aafd6c..890ab11792a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_replication_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_replication_group.go @@ -9,9 +9,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsElasticacheReplicationGroup() *schema.Resource { @@ -156,10 +158,16 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Required: true, }, "replication_group_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateAwsElastiCacheReplicationGroupId, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 40), + validation.StringMatch(regexp.MustCompile(`^[0-9a-zA-Z-]+$`), "must contain only alphanumeric characters and hyphens"), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z]`), "must begin with a letter"), + validateStringNotMatch(regexp.MustCompile(`--`), "cannot contain two consecutive hyphens"), + validateStringNotMatch(regexp.MustCompile(`-$`), "cannot end with a hyphen"), + ), StateFunc: func(val interface{}) string { return strings.ToLower(val.(string)) }, @@ -221,9 +229,21 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Default: false, ForceNew: true, }, + "kms_key_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, }, SchemaVersion: 1, + // SchemaVersion: 1 did not include any state changes via MigrateState. + // Perform a no-operation state upgrade for Terraform 0.12 compatibility. + // Future state migrations should be performed with StateUpgraders. + MigrateState: func(v int, inst *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + return inst, nil + }, + Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), Delete: schema.DefaultTimeout(40 * time.Minute), @@ -291,6 +311,10 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i params.NotificationTopicArn = aws.String(v.(string)) } + if v, ok := d.GetOk("kms_key_id"); ok { + params.KmsKeyId = aws.String(v.(string)) + } + if v, ok := d.GetOk("snapshot_retention_limit"); ok { params.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) } @@ -412,6 +436,8 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int } } + d.Set("kms_key_id", rgp.KmsKeyId) + d.Set("replication_group_description", rgp.Description) d.Set("number_cache_clusters", len(rgp.MemberClusters)) if err := d.Set("member_clusters", flattenStringList(rgp.MemberClusters)); err != nil { @@ -843,8 +869,16 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteReplicationGroup(input) + } + + if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { + return nil + } + if err != nil { - return err + return fmt.Errorf("error deleting Elasticache Replication Group: %s", err) } log.Printf("[DEBUG] Waiting for deletion: %s", replicationGroupID) @@ -902,28 +936,3 @@ func validateAwsElastiCacheReplicationGroupEngine(v interface{}, k string) (ws [ } return } - -func validateAwsElastiCacheReplicationGroupId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if (len(value) < 1) || (len(value) > 20) { - errors = append(errors, fmt.Errorf( - "%q must contain from 1 to 20 alphanumeric characters or hyphens", k)) - } - if !regexp.MustCompile(`^[0-9a-zA-Z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen", k)) - } - return -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_security_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_security_group.go index 41fc03ce924..1c1cffbc138 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_security_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_security_group.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsElasticacheSecurityGroup() *schema.Resource { @@ -128,7 +128,7 @@ func resourceAwsElasticacheSecurityGroupDelete(d *schema.ResourceData, meta inte log.Printf("[DEBUG] Cache security group delete: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.DeleteCacheSecurityGroup(&elasticache.DeleteCacheSecurityGroupInput{ CacheSecurityGroupName: aws.String(d.Id()), }) @@ -150,4 +150,12 @@ func resourceAwsElasticacheSecurityGroupDelete(d *schema.ResourceData, meta inte } return nil }) + + if isResourceTimeoutError(err) { + _, err = conn.DeleteCacheSecurityGroup(&elasticache.DeleteCacheSecurityGroupInput{ + CacheSecurityGroupName: aws.String(d.Id()), + }) + } + + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_subnet_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_subnet_group.go index fce1e9d53a7..de959072fcf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_subnet_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_subnet_group.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsElasticacheSubnetGroup() *schema.Resource { @@ -153,7 +153,7 @@ func resourceAwsElasticacheSubnetGroupDelete(d *schema.ResourceData, meta interf log.Printf("[DEBUG] Cache subnet group delete: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.DeleteCacheSubnetGroup(&elasticache.DeleteCacheSubnetGroupInput{ CacheSubnetGroupName: aws.String(d.Id()), }) @@ -173,4 +173,19 @@ func resourceAwsElasticacheSubnetGroupDelete(d *schema.ResourceData, meta interf } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteCacheSubnetGroup(&elasticache.DeleteCacheSubnetGroupInput{ + CacheSubnetGroupName: aws.String(d.Id()), + }) + } + + if isAWSErr(err, elasticache.ErrCodeCacheSubnetGroupNotFoundFault, "") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting Elasticache Subnet Group (%s): %s", d.Id(), err) + } + + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain.go index a989d22531c..3a559c513e0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain.go @@ -10,11 +10,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsElasticSearchDomain() *schema.Resource { @@ -190,6 +190,22 @@ func resourceAwsElasticSearchDomain() *schema.Resource { Optional: true, Default: "m3.medium.elasticsearch", }, + "zone_awareness_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "availability_zone_count": { + Type: schema.TypeInt, + Optional: true, + Default: 2, + ValidateFunc: validation.IntInSlice([]int{2, 3}), + }, + }, + }, + }, "zone_awareness_enabled": { Type: schema.TypeBool, Optional: true, @@ -460,9 +476,11 @@ func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface } return nil }) - + if isResourceTimeoutError(err) { + out, err = conn.CreateElasticsearchDomain(&input) + } if err != nil { - return err + return fmt.Errorf("Error creating ElasticSearch domain: %s", err) } d.SetId(aws.StringValue(out.DomainStatus.ARN)) @@ -493,10 +511,13 @@ func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface } func waitForElasticSearchDomainCreation(conn *elasticsearch.ElasticsearchService, domainName, arn string) error { - return resource.Retry(60*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(domainName), - }) + input := &elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(domainName), + } + var out *elasticsearch.DescribeElasticsearchDomainOutput + err := resource.Retry(60*time.Minute, func() *resource.RetryError { + var err error + out, err = conn.DescribeElasticsearchDomain(input) if err != nil { return resource.NonRetryableError(err) } @@ -508,6 +529,19 @@ func waitForElasticSearchDomainCreation(conn *elasticsearch.ElasticsearchService return resource.RetryableError( fmt.Errorf("%q: Timeout while waiting for the domain to be created", arn)) }) + if isResourceTimeoutError(err) { + out, err = conn.DescribeElasticsearchDomain(input) + if err != nil { + return fmt.Errorf("Error describing ElasticSearch domain: %s", err) + } + if !*out.DomainStatus.Processing && (out.DomainStatus.Endpoint != nil || out.DomainStatus.Endpoints != nil) { + return nil + } + } + if err != nil { + return fmt.Errorf("Error waiting for ElasticSearch domain to be created: %s", err) + } + return nil } func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{}) error { @@ -711,10 +745,12 @@ func resourceAwsElasticSearchDomainUpdate(d *schema.ResourceData, meta interface return err } + descInput := &elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + } + var out *elasticsearch.DescribeElasticsearchDomainOutput err = resource.Retry(60*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) + out, err = conn.DescribeElasticsearchDomain(descInput) if err != nil { return resource.NonRetryableError(err) } @@ -726,8 +762,17 @@ func resourceAwsElasticSearchDomainUpdate(d *schema.ResourceData, meta interface return resource.RetryableError( fmt.Errorf("%q: Timeout while waiting for changes to be processed", d.Id())) }) + if isResourceTimeoutError(err) { + out, err = conn.DescribeElasticsearchDomain(descInput) + if err != nil { + return fmt.Errorf("Error describing ElasticSearch domain: %s", err) + } + if !*out.DomainStatus.Processing { + return nil + } + } if err != nil { - return err + return fmt.Errorf("Error waiting for ElasticSearch domain changes to be processed: %s", err) } if d.HasChange("elasticsearch_version") { @@ -794,8 +839,10 @@ func resourceAwsElasticSearchDomainDeleteWaiter(domainName string, conn *elastic input := &elasticsearch.DescribeElasticsearchDomainInput{ DomainName: aws.String(domainName), } + var out *elasticsearch.DescribeElasticsearchDomainOutput err := resource.Retry(90*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(input) + var err error + out, err = conn.DescribeElasticsearchDomain(input) if err != nil { if isAWSErr(err, elasticsearch.ErrCodeResourceNotFoundException, "") { @@ -810,8 +857,22 @@ func resourceAwsElasticSearchDomainDeleteWaiter(domainName string, conn *elastic return resource.RetryableError(fmt.Errorf("timeout while waiting for the domain %q to be deleted", domainName)) }) - - return err + if isResourceTimeoutError(err) { + out, err = conn.DescribeElasticsearchDomain(input) + if err != nil { + if isAWSErr(err, elasticsearch.ErrCodeResourceNotFoundException, "") { + return nil + } + return fmt.Errorf("Error describing ElasticSearch domain: %s", err) + } + if out.DomainStatus != nil && !aws.BoolValue(out.DomainStatus.Processing) { + return nil + } + } + if err != nil { + return fmt.Errorf("Error waiting for ElasticSearch domain to be deleted: %s", err) + } + return nil } func suppressEquivalentKmsKeyIds(k, old, new string, d *schema.ResourceData) bool { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain_policy.go index 46ae0c1f821..5a4a89da232 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain_policy.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsElasticSearchDomainPolicy() *schema.Resource { @@ -68,11 +68,13 @@ func resourceAwsElasticSearchDomainPolicyUpsert(d *schema.ResourceData, meta int } d.SetId("esd-policy-" + domainName) - + input := &elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + } + var out *elasticsearch.DescribeElasticsearchDomainOutput err = resource.Retry(50*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) + var err error + out, err = conn.DescribeElasticsearchDomain(input) if err != nil { return resource.NonRetryableError(err) } @@ -84,8 +86,14 @@ func resourceAwsElasticSearchDomainPolicyUpsert(d *schema.ResourceData, meta int return resource.RetryableError( fmt.Errorf("%q: Timeout while waiting for changes to be processed", d.Id())) }) + if isResourceTimeoutError(err) { + out, err = conn.DescribeElasticsearchDomain(input) + if err == nil && !*out.DomainStatus.Processing { + return nil + } + } if err != nil { - return err + return fmt.Errorf("Error upserting Elasticsearch domain policy: %s", err) } return resourceAwsElasticSearchDomainPolicyRead(d, meta) @@ -103,10 +111,13 @@ func resourceAwsElasticSearchDomainPolicyDelete(d *schema.ResourceData, meta int } log.Printf("[DEBUG] Waiting for ElasticSearch domain policy %q to be deleted", d.Get("domain_name").(string)) + input := &elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + } + var out *elasticsearch.DescribeElasticsearchDomainOutput err = resource.Retry(60*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) + var err error + out, err = conn.DescribeElasticsearchDomain(input) if err != nil { return resource.NonRetryableError(err) } @@ -118,5 +129,14 @@ func resourceAwsElasticSearchDomainPolicyDelete(d *schema.ResourceData, meta int return resource.RetryableError( fmt.Errorf("%q: Timeout while waiting for policy to be deleted", d.Id())) }) - return err + if isResourceTimeoutError(err) { + out, err := conn.DescribeElasticsearchDomain(input) + if err == nil && !*out.DomainStatus.Processing { + return nil + } + } + if err != nil { + return fmt.Errorf("Error deleting Elasticsearch domain policy: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb.go index 331c9b71134..11efaeeb905 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb.go @@ -14,10 +14,10 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsElb() *schema.Resource { @@ -311,9 +311,11 @@ func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error { } return nil }) - + if isResourceTimeoutError(err) { + _, err = elbconn.CreateLoadBalancer(elbOpts) + } if err != nil { - return err + return fmt.Errorf("Error creating ELB: %s", err) } // Assign the elb's unique identifier for use later @@ -511,16 +513,15 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { // other listeners on the ELB. Retry here to eliminate that. err := resource.Retry(5*time.Minute, func() *resource.RetryError { log.Printf("[DEBUG] ELB Create Listeners opts: %s", createListenersOpts) - if _, err := elbconn.CreateLoadBalancerListeners(createListenersOpts); err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "DuplicateListener" { - log.Printf("[DEBUG] Duplicate listener found for ELB (%s), retrying", d.Id()) - return resource.RetryableError(awsErr) - } - if awsErr.Code() == "CertificateNotFound" && strings.Contains(awsErr.Message(), "Server Certificate not found for the key: arn") { - log.Printf("[DEBUG] SSL Cert not found for given ARN, retrying") - return resource.RetryableError(awsErr) - } + _, err := elbconn.CreateLoadBalancerListeners(createListenersOpts) + if err != nil { + if isAWSErr(err, "DuplicateListener", "") { + log.Printf("[DEBUG] Duplicate listener found for ELB (%s), retrying", d.Id()) + return resource.RetryableError(err) + } + if isAWSErr(err, "CertificateNotFound", "Server Certificate not found for the key: arn") { + log.Printf("[DEBUG] SSL Cert not found for given ARN, retrying") + return resource.RetryableError(err) } // Didn't recognize the error, so shouldn't retry. @@ -529,6 +530,9 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { // Successful creation return nil }) + if isResourceTimeoutError(err) { + _, err = elbconn.CreateLoadBalancerListeners(createListenersOpts) + } if err != nil { return fmt.Errorf("Failure adding new or updated ELB listeners: %s", err) } @@ -765,18 +769,19 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := elbconn.AttachLoadBalancerToSubnets(attachOpts) if err != nil { - if awsErr, ok := err.(awserr.Error); ok { + if isAWSErr(err, "InvalidConfigurationRequest", "cannot be attached to multiple subnets in the same AZ") { // eventually consistent issue with removing a subnet in AZ1 and // immediately adding a new one in the same AZ - if awsErr.Code() == "InvalidConfigurationRequest" && strings.Contains(awsErr.Message(), "cannot be attached to multiple subnets in the same AZ") { - log.Printf("[DEBUG] retrying az association") - return resource.RetryableError(awsErr) - } + log.Printf("[DEBUG] retrying az association") + return resource.RetryableError(err) } return resource.NonRetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + _, err = elbconn.AttachLoadBalancerToSubnets(attachOpts) + } if err != nil { return fmt.Errorf("Failure adding ELB subnets: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb_attachment.go index 32b02eec9c9..c8ff3a5e40b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb_attachment.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsElbAttachment() *schema.Resource { @@ -59,7 +59,9 @@ func resourceAwsElbAttachmentCreate(d *schema.ResourceData, meta interface{}) er return nil }) - + if isResourceTimeoutError(err) { + _, err = elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts) + } if err != nil { return fmt.Errorf("Failure registering instances with ELB: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_cluster.go index 7a1a56113f3..f724513441a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_cluster.go @@ -15,11 +15,11 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" "github.com/aws/aws-sdk-go/service/emr" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEMRCluster() *schema.Resource { @@ -95,10 +95,12 @@ func resourceAwsEMRCluster() *schema.Resource { Required: true, }, "master_instance_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"master_instance_group"}, + Deprecated: "use `master_instance_group` configuration block `instance_type` argument instead", }, "additional_info": { Type: schema.TypeString, @@ -112,16 +114,20 @@ func resourceAwsEMRCluster() *schema.Resource { }, }, "core_instance_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"core_instance_group"}, + Deprecated: "use `core_instance_group` configuration block `instance_type` argument instead", }, "core_instance_count": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(1), - Computed: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + Computed: true, + ConflictsWith: []string{"core_instance_group"}, + Deprecated: "use `core_instance_group` configuration block `instance_count` argument instead", }, "cluster_state": { Type: schema.TypeString, @@ -249,11 +255,160 @@ func resourceAwsEMRCluster() *schema.Resource { }, }, }, + "core_instance_group": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + ConflictsWith: []string{"core_instance_count", "core_instance_type", "instance_group"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscaling_policy": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: suppressEquivalentJsonDiffs, + ValidateFunc: validation.ValidateJsonString, + }, + "bid_price": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "ebs_config": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "size": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsEmrEbsVolumeType(), + }, + "volumes_per_instance": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + }, + }, + }, + Set: resourceAwsEMRClusterEBSConfigHash, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "instance_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: validation.IntAtLeast(1), + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "master_instance_group": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + ConflictsWith: []string{"master_instance_type", "instance_group"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bid_price": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "ebs_config": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "size": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsEmrEbsVolumeType(), + }, + "volumes_per_instance": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + }, + }, + }, + Set: resourceAwsEMRClusterEBSConfigHash, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "instance_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + ValidateFunc: validation.IntInSlice([]int{1, 3}), + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, "instance_group": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Computed: true, + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"core_instance_group", "master_instance_group"}, + Deprecated: "use `master_instance_group` configuration block, `core_instance_group` configuration block, and `aws_emr_instance_group` resource(s) instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "bid_price": { @@ -485,6 +640,11 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error keepJobFlowAliveWhenNoSteps = v.(bool) } + // For multiple master nodes, EMR automatically enables + // termination protection and ignores this configuration at launch. + // There is additional handling after the job flow is running + // to potentially disable termination protection to match the + // desired Terraform configuration. terminationProtection := false if v, ok := d.GetOk("termination_protection"); ok { terminationProtection = v.(bool) @@ -494,6 +654,59 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error TerminationProtected: aws.Bool(terminationProtection), } + if l := d.Get("master_instance_group").([]interface{}); len(l) > 0 && l[0] != nil { + m := l[0].(map[string]interface{}) + + instanceGroup := &emr.InstanceGroupConfig{ + InstanceCount: aws.Int64(int64(m["instance_count"].(int))), + InstanceRole: aws.String(emr.InstanceRoleTypeMaster), + InstanceType: aws.String(m["instance_type"].(string)), + Market: aws.String(emr.MarketTypeOnDemand), + Name: aws.String(m["name"].(string)), + } + + if v, ok := m["bid_price"]; ok && v.(string) != "" { + instanceGroup.BidPrice = aws.String(v.(string)) + instanceGroup.Market = aws.String(emr.MarketTypeSpot) + } + + expandEbsConfig(m, instanceGroup) + + instanceConfig.InstanceGroups = append(instanceConfig.InstanceGroups, instanceGroup) + } + + if l := d.Get("core_instance_group").([]interface{}); len(l) > 0 && l[0] != nil { + m := l[0].(map[string]interface{}) + + instanceGroup := &emr.InstanceGroupConfig{ + InstanceCount: aws.Int64(int64(m["instance_count"].(int))), + InstanceRole: aws.String(emr.InstanceRoleTypeCore), + InstanceType: aws.String(m["instance_type"].(string)), + Market: aws.String(emr.MarketTypeOnDemand), + Name: aws.String(m["name"].(string)), + } + + if v, ok := m["autoscaling_policy"]; ok && v.(string) != "" { + var autoScalingPolicy *emr.AutoScalingPolicy + + if err := json.Unmarshal([]byte(v.(string)), &autoScalingPolicy); err != nil { + return fmt.Errorf("error parsing core_instance_group Auto Scaling Policy JSON: %s", err) + } + + instanceGroup.AutoScalingPolicy = autoScalingPolicy + } + + if v, ok := m["bid_price"]; ok && v.(string) != "" { + instanceGroup.BidPrice = aws.String(v.(string)) + instanceGroup.Market = aws.String(emr.MarketTypeSpot) + } + + expandEbsConfig(m, instanceGroup) + + instanceConfig.InstanceGroups = append(instanceConfig.InstanceGroups, instanceGroup) + } + + // DEPRECATED: Remove in a future major version if v, ok := d.GetOk("master_instance_type"); ok { masterInstanceGroupConfig := &emr.InstanceGroupConfig{ InstanceRole: aws.String("MASTER"), @@ -503,6 +716,7 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error instanceConfig.InstanceGroups = append(instanceConfig.InstanceGroups, masterInstanceGroupConfig) } + // DEPRECATED: Remove in a future major version var coreInstanceType string if v, ok := d.GetOk("core_instance_type"); ok { coreInstanceType = v.(string) @@ -565,6 +779,8 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error instanceConfig.ServiceAccessSecurityGroup = aws.String(v.(string)) } } + + // DEPRECATED: Remove in a future major version if v, ok := d.GetOk("instance_group"); ok { instanceGroupConfigs := v.(*schema.Set).List() instanceGroups, err := expandInstanceGroupConfigs(instanceGroupConfigs) @@ -675,8 +891,11 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.RunJobFlow(params) + } if err != nil { - return err + return fmt.Errorf("error running EMR Job Flow: %s", err) } d.SetId(*resp.JobFlowId) @@ -700,11 +919,28 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error Delay: 30 * time.Second, // Wait 30 secs before starting } - _, err = stateConf.WaitForState() + clusterRaw, err := stateConf.WaitForState() if err != nil { return fmt.Errorf("Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\": %s", err) } + // For multiple master nodes, EMR automatically enables + // termination protection and ignores the configuration at launch. + // This additional handling is to potentially disable termination + // protection to match the desired Terraform configuration. + cluster := clusterRaw.(*emr.Cluster) + + if aws.BoolValue(cluster.TerminationProtected) != terminationProtection { + input := &emr.SetTerminationProtectionInput{ + JobFlowIds: []*string{aws.String(d.Id())}, + TerminationProtected: aws.Bool(terminationProtection), + } + + if _, err := conn.SetTerminationProtection(input); err != nil { + return fmt.Errorf("error setting EMR Cluster (%s) termination protection to match configuration: %s", d.Id(), err) + } + } + return resourceAwsEMRClusterRead(d, meta) } @@ -741,24 +977,47 @@ func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error { } instanceGroups, err := fetchAllEMRInstanceGroups(emrconn, d.Id()) - if err == nil { - coreGroup := emrCoreInstanceGroup(instanceGroups) - if coreGroup != nil { - d.Set("core_instance_type", coreGroup.InstanceType) - d.Set("core_instance_count", coreGroup.RequestedInstanceCount) - } - masterGroup := findMasterGroup(instanceGroups) - if masterGroup != nil { - d.Set("master_instance_type", masterGroup.InstanceType) - } - flattenedInstanceGroups, err := flattenInstanceGroups(instanceGroups) - if err != nil { - return fmt.Errorf("error flattening instance groups: %+v", err) - } - if err := d.Set("instance_group", flattenedInstanceGroups); err != nil { - return fmt.Errorf("[ERR] Error setting EMR instance groups: %s", err) - } + if err != nil { + return err + } + + coreGroup := emrCoreInstanceGroup(instanceGroups) + masterGroup := findMasterGroup(instanceGroups) + + d.Set("core_instance_count", 0) + d.Set("core_instance_type", "") + d.Set("master_instance_type", "") + + if coreGroup != nil { + d.Set("core_instance_type", coreGroup.InstanceType) + d.Set("core_instance_count", coreGroup.RequestedInstanceCount) + } + + if masterGroup != nil { + d.Set("master_instance_type", masterGroup.InstanceType) + } + + flattenedInstanceGroups, err := flattenInstanceGroups(instanceGroups) + if err != nil { + return fmt.Errorf("error flattening instance groups: %s", err) + } + if err := d.Set("instance_group", flattenedInstanceGroups); err != nil { + return fmt.Errorf("error setting instance_group: %s", err) + } + + flattenedCoreInstanceGroup, err := flattenEmrCoreInstanceGroup(coreGroup) + + if err != nil { + return fmt.Errorf("error flattening core_instance_group: %s", err) + } + + if err := d.Set("core_instance_group", flattenedCoreInstanceGroup); err != nil { + return fmt.Errorf("error setting core_instance_group: %s", err) + } + + if err := d.Set("master_instance_group", flattenEmrMasterInstanceGroup(masterGroup)); err != nil { + return fmt.Errorf("error setting master_instance_group: %s", err) } d.Set("name", cluster.Name) @@ -925,6 +1184,102 @@ func resourceAwsEMRClusterUpdate(d *schema.ResourceData, meta interface{}) error } } + if d.HasChange("core_instance_group.0.autoscaling_policy") { + autoscalingPolicyStr := d.Get("core_instance_group.0.autoscaling_policy").(string) + instanceGroupID := d.Get("core_instance_group.0.id").(string) + + if autoscalingPolicyStr != "" { + var autoScalingPolicy *emr.AutoScalingPolicy + + if err := json.Unmarshal([]byte(autoscalingPolicyStr), &autoScalingPolicy); err != nil { + return fmt.Errorf("error parsing core_instance_group Auto Scaling Policy JSON: %s", err) + } + + input := &emr.PutAutoScalingPolicyInput{ + ClusterId: aws.String(d.Id()), + AutoScalingPolicy: autoScalingPolicy, + InstanceGroupId: aws.String(instanceGroupID), + } + + if _, err := conn.PutAutoScalingPolicy(input); err != nil { + return fmt.Errorf("error updating EMR Cluster (%s) Instance Group (%s) Auto Scaling Policy: %s", d.Id(), instanceGroupID, err) + } + } else { + input := &emr.RemoveAutoScalingPolicyInput{ + ClusterId: aws.String(d.Id()), + InstanceGroupId: aws.String(instanceGroupID), + } + + if _, err := conn.RemoveAutoScalingPolicy(input); err != nil { + return fmt.Errorf("error removing EMR Cluster (%s) Instance Group (%s) Auto Scaling Policy: %s", d.Id(), instanceGroupID, err) + } + + // RemoveAutoScalingPolicy seems to have eventual consistency. + // Retry reading Instance Group configuration until the policy is removed. + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + autoscalingPolicy, err := getEmrCoreInstanceGroupAutoscalingPolicy(conn, d.Id()) + + if err != nil { + return resource.NonRetryableError(err) + } + + if autoscalingPolicy != nil { + return resource.RetryableError(fmt.Errorf("EMR Cluster (%s) Instance Group (%s) Auto Scaling Policy still exists", d.Id(), instanceGroupID)) + } + + return nil + }) + + if isResourceTimeoutError(err) { + var autoscalingPolicy *emr.AutoScalingPolicyDescription + + autoscalingPolicy, err = getEmrCoreInstanceGroupAutoscalingPolicy(conn, d.Id()) + + if autoscalingPolicy != nil { + err = fmt.Errorf("EMR Cluster (%s) Instance Group (%s) Auto Scaling Policy still exists", d.Id(), instanceGroupID) + } + } + + if err != nil { + return fmt.Errorf("error waiting for EMR Cluster (%s) Instance Group (%s) Auto Scaling Policy removal: %s", d.Id(), instanceGroupID, err) + } + } + } + + if d.HasChange("core_instance_group.0.instance_count") { + instanceGroupID := d.Get("core_instance_group.0.id").(string) + + input := &emr.ModifyInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupModifyConfig{ + { + InstanceGroupId: aws.String(instanceGroupID), + InstanceCount: aws.Int64(int64(d.Get("core_instance_group.0.instance_count").(int))), + }, + }, + } + + if _, err := conn.ModifyInstanceGroups(input); err != nil { + return fmt.Errorf("error modifying EMR Cluster (%s) Instance Group (%s): %s", d.Id(), instanceGroupID, err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{ + emr.InstanceGroupStateBootstrapping, + emr.InstanceGroupStateProvisioning, + emr.InstanceGroupStateReconfiguring, + emr.InstanceGroupStateResizing, + }, + Target: []string{emr.InstanceGroupStateRunning}, + Refresh: instanceGroupStateRefresh(conn, d.Id(), instanceGroupID), + Timeout: 20 * time.Minute, + Delay: 10 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("error waiting for EMR Cluster (%s) Instance Group (%s) modification: %s", d.Id(), instanceGroupID, err) + } + } + if d.HasChange("instance_group") { o, n := d.GetChange("instance_group") oSet := o.(*schema.Set).List() @@ -938,6 +1293,11 @@ func resourceAwsEMRClusterUpdate(d *schema.ResourceData, meta interface{}) error continue } + // Prevent duplicate PutAutoScalingPolicy from earlier update logic + if nInstanceGroup["id"] == d.Get("core_instance_group.0.id").(string) && d.HasChange("core_instance_group.0.autoscaling_policy") { + continue + } + if v, ok := nInstanceGroup["autoscaling_policy"]; ok && v.(string) != "" { var autoScalingPolicy *emr.AutoScalingPolicy @@ -990,48 +1350,72 @@ func resourceAwsEMRClusterDelete(d *schema.ResourceData, meta interface{}) error return err } + input := &emr.ListInstancesInput{ + ClusterId: aws.String(d.Id()), + } + var resp *emr.ListInstancesOutput + var count int err = resource.Retry(20*time.Minute, func() *resource.RetryError { - resp, err := conn.ListInstances(&emr.ListInstancesInput{ - ClusterId: aws.String(d.Id()), - }) + var err error + resp, err = conn.ListInstances(input) if err != nil { return resource.NonRetryableError(err) } - instanceCount := len(resp.Instances) - - if resp == nil || instanceCount == 0 { - log.Printf("[DEBUG] No instances found for EMR Cluster (%s)", d.Id()) - return nil + count = countEMRRemainingInstances(resp, d.Id()) + if count != 0 { + return resource.RetryableError(fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", d.Id(), count)) } + return nil + }) - // Collect instance status states, wait for all instances to be terminated - // before moving on - var terminated []string - for j, i := range resp.Instances { - if i.Status != nil { - if aws.StringValue(i.Status.State) == emr.InstanceStateTerminated { - terminated = append(terminated, *i.Ec2InstanceId) - } - } else { - log.Printf("[DEBUG] Cluster instance (%d : %s) has no status", j, *i.Ec2InstanceId) - } - } - if len(terminated) == instanceCount { - log.Printf("[DEBUG] All (%d) EMR Cluster (%s) Instances terminated", instanceCount, d.Id()) - return nil + if isResourceTimeoutError(err) { + resp, err = conn.ListInstances(input) + + if err == nil { + count = countEMRRemainingInstances(resp, d.Id()) } - return resource.RetryableError(fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining, retrying", d.Id(), len(resp.Instances))) - }) + } + + if count != 0 { + return fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", d.Id(), count) + } if err != nil { - return fmt.Errorf("error waiting for EMR Cluster (%s) Instances to drain", d.Id()) + return fmt.Errorf("error waiting for EMR Cluster (%s) Instances to drain: %s", d.Id(), err) } return nil } +func countEMRRemainingInstances(resp *emr.ListInstancesOutput, emrClusterId string) int { + instanceCount := len(resp.Instances) + + if resp == nil || instanceCount == 0 { + log.Printf("[DEBUG] No instances found for EMR Cluster (%s)", emrClusterId) + return 0 + } + + // Collect instance status states, wait for all instances to be terminated + // before moving on + var terminated []string + for j, i := range resp.Instances { + if i.Status != nil { + if aws.StringValue(i.Status.State) == emr.InstanceStateTerminated { + terminated = append(terminated, *i.Ec2InstanceId) + } + } else { + log.Printf("[DEBUG] Cluster instance (%d : %s) has no status", j, *i.Ec2InstanceId) + } + } + if len(terminated) == instanceCount { + log.Printf("[DEBUG] All (%d) EMR Cluster (%s) Instances terminated", instanceCount, emrClusterId) + return 0 + } + return len(resp.Instances) +} + func expandApplications(apps []interface{}) []*emr.Application { appOut := make([]*emr.Application, 0, len(apps)) @@ -1091,6 +1475,104 @@ func flattenEc2Attributes(ia *emr.Ec2InstanceAttributes) []map[string]interface{ return result } +func flattenEmrAutoScalingPolicyDescription(policy *emr.AutoScalingPolicyDescription) (string, error) { + if policy == nil { + return "", nil + } + + // AutoScalingPolicy has an additional Status field and null values that are causing a new hashcode to be generated + // for `instance_group`. + // We are purposefully omitting that field and the null values here when we flatten the autoscaling policy string + // for the statefile. + for i, rule := range policy.Rules { + for j, dimension := range rule.Trigger.CloudWatchAlarmDefinition.Dimensions { + if *dimension.Key == "JobFlowId" { + tmpDimensions := append(policy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions[:j], policy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions[j+1:]...) + policy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions = tmpDimensions + } + } + if len(policy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions) == 0 { + policy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions = nil + } + } + + tmpAutoScalingPolicy := emr.AutoScalingPolicy{ + Constraints: policy.Constraints, + Rules: policy.Rules, + } + autoscalingPolicyConstraintsBytes, err := json.Marshal(tmpAutoScalingPolicy.Constraints) + if err != nil { + return "", fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Constraints: %s", err) + } + autoscalingPolicyConstraintsString := string(autoscalingPolicyConstraintsBytes) + + autoscalingPolicyRulesBytes, err := json.Marshal(tmpAutoScalingPolicy.Rules) + if err != nil { + return "", fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Rules: %s", err) + } + + var rules []map[string]interface{} + if err := json.Unmarshal(autoscalingPolicyRulesBytes, &rules); err != nil { + return "", err + } + + var cleanRules []map[string]interface{} + for _, rule := range rules { + cleanRules = append(cleanRules, removeNil(rule)) + } + + withoutNulls, err := json.Marshal(cleanRules) + if err != nil { + return "", err + } + autoscalingPolicyRulesString := string(withoutNulls) + + autoscalingPolicyString := fmt.Sprintf("{\"Constraints\":%s,\"Rules\":%s}", autoscalingPolicyConstraintsString, autoscalingPolicyRulesString) + + return autoscalingPolicyString, nil +} + +func flattenEmrCoreInstanceGroup(instanceGroup *emr.InstanceGroup) ([]interface{}, error) { + if instanceGroup == nil { + return []interface{}{}, nil + } + + autoscalingPolicy, err := flattenEmrAutoScalingPolicyDescription(instanceGroup.AutoScalingPolicy) + + if err != nil { + return nil, err + } + + m := map[string]interface{}{ + "autoscaling_policy": autoscalingPolicy, + "bid_price": aws.StringValue(instanceGroup.BidPrice), + "ebs_config": flattenEBSConfig(instanceGroup.EbsBlockDevices), + "id": aws.StringValue(instanceGroup.Id), + "instance_count": aws.Int64Value(instanceGroup.RequestedInstanceCount), + "instance_type": aws.StringValue(instanceGroup.InstanceType), + "name": aws.StringValue(instanceGroup.Name), + } + + return []interface{}{m}, nil +} + +func flattenEmrMasterInstanceGroup(instanceGroup *emr.InstanceGroup) []interface{} { + if instanceGroup == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "bid_price": aws.StringValue(instanceGroup.BidPrice), + "ebs_config": flattenEBSConfig(instanceGroup.EbsBlockDevices), + "id": aws.StringValue(instanceGroup.Id), + "instance_count": aws.Int64Value(instanceGroup.RequestedInstanceCount), + "instance_type": aws.StringValue(instanceGroup.InstanceType), + "name": aws.StringValue(instanceGroup.Name), + } + + return []interface{}{m} +} + func flattenEmrKerberosAttributes(d *schema.ResourceData, kerberosAttributes *emr.KerberosAttributes) []map[string]interface{} { l := make([]map[string]interface{}, 0) @@ -1100,6 +1582,7 @@ func flattenEmrKerberosAttributes(d *schema.ResourceData, kerberosAttributes *em // Do not set from API: // * ad_domain_join_password + // * ad_domain_join_user // * cross_realm_trust_principal_password // * kdc_admin_password @@ -1112,8 +1595,8 @@ func flattenEmrKerberosAttributes(d *schema.ResourceData, kerberosAttributes *em m["ad_domain_join_password"] = v.(string) } - if kerberosAttributes.ADDomainJoinUser != nil { - m["ad_domain_join_user"] = *kerberosAttributes.ADDomainJoinUser + if v, ok := d.GetOk("kerberos_attributes.0.ad_domain_join_user"); ok { + m["ad_domain_join_user"] = v.(string) } if v, ok := d.GetOk("kerberos_attributes.0.cross_realm_trust_principal_password"); ok { @@ -1183,61 +1666,14 @@ func flattenInstanceGroup(ig *emr.InstanceGroup) (map[string]interface{}, error) attrs["name"] = *ig.Name } - if ig.AutoScalingPolicy != nil { - // AutoScalingPolicy has an additional Status field and null values that are causing a new hashcode to be generated - // for `instance_group`. - // We are purposefully omitting that field and the null values here when we flatten the autoscaling policy string - // for the statefile. - for i, rule := range ig.AutoScalingPolicy.Rules { - for j, dimension := range rule.Trigger.CloudWatchAlarmDefinition.Dimensions { - if *dimension.Key == "JobFlowId" { - tmpDimensions := append(ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions[:j], ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions[j+1:]...) - ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions = tmpDimensions - } - } - if len(ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions) == 0 { - ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions = nil - } - } - - tmpAutoScalingPolicy := emr.AutoScalingPolicy{ - Constraints: ig.AutoScalingPolicy.Constraints, - Rules: ig.AutoScalingPolicy.Rules, - } - autoscalingPolicyConstraintsBytes, err := json.Marshal(tmpAutoScalingPolicy.Constraints) - if err != nil { - return nil, fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Constraints: %s", err) - } - autoscalingPolicyConstraintsString := string(autoscalingPolicyConstraintsBytes) - - autoscalingPolicyRulesBytes, err := json.Marshal(tmpAutoScalingPolicy.Rules) - if err != nil { - return nil, fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Rules: %s", err) - } - - var rules []map[string]interface{} - if err := json.Unmarshal(autoscalingPolicyRulesBytes, &rules); err != nil { - return nil, err - } - - var cleanRules []map[string]interface{} - for _, rule := range rules { - cleanRules = append(cleanRules, removeNil(rule)) - } - - withoutNulls, err := json.Marshal(cleanRules) - if err != nil { - return nil, err - } - autoscalingPolicyRulesString := string(withoutNulls) - - autoscalingPolicyString := fmt.Sprintf("{\"Constraints\":%s,\"Rules\":%s}", autoscalingPolicyConstraintsString, autoscalingPolicyRulesString) + autoscalingPolicy, err := flattenEmrAutoScalingPolicyDescription(ig.AutoScalingPolicy) - attrs["autoscaling_policy"] = autoscalingPolicyString - } else { - attrs["autoscaling_policy"] = "" + if err != nil { + return nil, err } + attrs["autoscaling_policy"] = autoscalingPolicy + if attrs["name"] != nil { attrs["name"] = *ig.Name } @@ -1259,6 +1695,7 @@ func flattenInstanceGroups(igs []*emr.InstanceGroup) (*schema.Set, error) { } func flattenEBSConfig(ebsBlockDevices []*emr.EbsBlockDevice) *schema.Set { + ebsConfig := make([]interface{}, 0) for _, ebs := range ebsBlockDevices { ebsAttrs := make(map[string]interface{}) @@ -1720,3 +2157,34 @@ func resourceAwsEMRClusterEBSConfigHash(v interface{}) int { } return hashcode.String(buf.String()) } + +func getEmrCoreInstanceGroupAutoscalingPolicy(conn *emr.EMR, clusterID string) (*emr.AutoScalingPolicyDescription, error) { + instanceGroups, err := fetchAllEMRInstanceGroups(conn, clusterID) + + if err != nil { + return nil, err + } + + coreGroup := emrCoreInstanceGroup(instanceGroups) + + if coreGroup == nil { + return nil, fmt.Errorf("EMR Cluster (%s) Core Instance Group not found", clusterID) + } + + return coreGroup.AutoScalingPolicy, nil +} + +func fetchAllEMRInstanceGroups(conn *emr.EMR, clusterID string) ([]*emr.InstanceGroup, error) { + input := &emr.ListInstanceGroupsInput{ + ClusterId: aws.String(clusterID), + } + var groups []*emr.InstanceGroup + + err := conn.ListInstanceGroupsPages(input, func(page *emr.ListInstanceGroupsOutput, lastPage bool) bool { + groups = append(groups, page.InstanceGroups...) + + return !lastPage + }) + + return groups, err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_instance_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_instance_group.go index 9b0c80f07c9..d91b097c194 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_instance_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_instance_group.go @@ -1,54 +1,66 @@ package aws import ( - "errors" + "encoding/json" + "fmt" "log" + "strings" "time" - "fmt" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/emr" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) -var emrInstanceGroupNotFound = errors.New("No matching EMR Instance Group") - func resourceAwsEMRInstanceGroup() *schema.Resource { return &schema.Resource{ Create: resourceAwsEMRInstanceGroupCreate, Read: resourceAwsEMRInstanceGroupRead, Update: resourceAwsEMRInstanceGroupUpdate, Delete: resourceAwsEMRInstanceGroupDelete, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + idParts := strings.Split(d.Id(), "/") + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return nil, fmt.Errorf("Unexpected format of ID (%q), expected cluster-id/ig-id", d.Id()) + } + clusterID := idParts[0] + resourceID := idParts[1] + d.Set("cluster_id", clusterID) + d.SetId(resourceID) + return []*schema.ResourceData{d}, nil + }, + }, Schema: map[string]*schema.Schema{ - "cluster_id": { + "autoscaling_policy": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: suppressEquivalentJsonDiffs, + ValidateFunc: validation.ValidateJsonString, + }, + "bid_price": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, }, - "instance_type": { + "cluster_id": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "instance_count": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "running_instance_count": { - Type: schema.TypeInt, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + "configurations_json": { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + ValidateFunc: validation.ValidateJsonString, + DiffSuppressFunc: suppressEquivalentJsonDiffs, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, }, "ebs_optimized": { Type: schema.TypeBool, @@ -58,254 +70,419 @@ func resourceAwsEMRInstanceGroup() *schema.Resource { "ebs_config": { Type: schema.TypeSet, Optional: true, + Computed: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "iops": { Type: schema.TypeInt, Optional: true, + ForceNew: true, }, "size": { Type: schema.TypeInt, Required: true, + ForceNew: true, }, "type": { Type: schema.TypeString, Required: true, + ForceNew: true, ValidateFunc: validateAwsEmrEbsVolumeType(), }, "volumes_per_instance": { Type: schema.TypeInt, Optional: true, + ForceNew: true, + Default: 1, }, }, }, + Set: resourceAwsEMRClusterEBSConfigHash, + }, + "instance_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "running_instance_count": { + Type: schema.TypeInt, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, }, }, } } -// Populates an emr.EbsConfiguration struct -func readEmrEBSConfig(d *schema.ResourceData) *emr.EbsConfiguration { - result := &emr.EbsConfiguration{} - if v, ok := d.GetOk("ebs_optimized"); ok { - result.EbsOptimized = aws.Bool(v.(bool)) +func resourceAwsEMRInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + instanceRole := emr.InstanceGroupTypeTask + groupConfig := &emr.InstanceGroupConfig{ + EbsConfiguration: readEmrEBSConfig(d), + InstanceRole: aws.String(instanceRole), + InstanceCount: aws.Int64(int64(d.Get("instance_count").(int))), + InstanceType: aws.String(d.Get("instance_type").(string)), + Name: aws.String(d.Get("name").(string)), } - ebsConfigs := make([]*emr.EbsBlockDeviceConfig, 0) - if rawConfig, ok := d.GetOk("ebs_config"); ok { - configList := rawConfig.(*schema.Set).List() - for _, config := range configList { - conf := config.(map[string]interface{}) - ebs := &emr.EbsBlockDeviceConfig{} - volumeSpec := &emr.VolumeSpecification{ - SizeInGB: aws.Int64(int64(conf["size"].(int))), - VolumeType: aws.String(conf["type"].(string)), - } - if v, ok := conf["iops"].(int); ok && v != 0 { - volumeSpec.Iops = aws.Int64(int64(v)) - } - if v, ok := conf["volumes_per_instance"].(int); ok && v != 0 { - ebs.VolumesPerInstance = aws.Int64(int64(v)) - } - ebs.VolumeSpecification = volumeSpec - ebsConfigs = append(ebsConfigs, ebs) + if v, ok := d.GetOk("autoscaling_policy"); ok { + var autoScalingPolicy *emr.AutoScalingPolicy + + if err := json.Unmarshal([]byte(v.(string)), &autoScalingPolicy); err != nil { + return fmt.Errorf("[DEBUG] error parsing Auto Scaling Policy %s", err) } + groupConfig.AutoScalingPolicy = autoScalingPolicy } - result.EbsBlockDeviceConfigs = ebsConfigs - return result -} -func resourceAwsEMRInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).emrconn - - clusterId := d.Get("cluster_id").(string) - instanceType := d.Get("instance_type").(string) - instanceCount := d.Get("instance_count").(int) - groupName := d.Get("name").(string) + if v, ok := d.GetOk("configurations_json"); ok { + info, err := structure.NormalizeJsonString(v) + if err != nil { + return fmt.Errorf("configurations_json contains an invalid JSON: %s", err) + } + groupConfig.Configurations, err = expandConfigurationJson(info) + if err != nil { + return fmt.Errorf("Error reading EMR configurations_json: %s", err) + } + } - ebsConfig := readEmrEBSConfig(d) + groupConfig.Market = aws.String(emr.MarketTypeOnDemand) + if v, ok := d.GetOk("bid_price"); ok { + groupConfig.BidPrice = aws.String(v.(string)) + groupConfig.Market = aws.String(emr.MarketTypeSpot) + } params := &emr.AddInstanceGroupsInput{ - InstanceGroups: []*emr.InstanceGroupConfig{ - { - InstanceRole: aws.String("TASK"), - InstanceCount: aws.Int64(int64(instanceCount)), - InstanceType: aws.String(instanceType), - Name: aws.String(groupName), - EbsConfiguration: ebsConfig, - }, - }, - JobFlowId: aws.String(clusterId), + InstanceGroups: []*emr.InstanceGroupConfig{groupConfig}, + JobFlowId: aws.String(d.Get("cluster_id").(string)), } - log.Printf("[DEBUG] Creating EMR task group params: %s", params) + log.Printf("[DEBUG] Creating EMR %s group with the following params: %s", instanceRole, params) resp, err := conn.AddInstanceGroups(params) if err != nil { return err } - log.Printf("[DEBUG] Created EMR task group finished: %#v", resp) + log.Printf("[DEBUG] Created EMR %s group finished: %#v", instanceRole, resp) if resp == nil || len(resp.InstanceGroupIds) == 0 { return fmt.Errorf("Error creating instance groups: no instance group returned") } d.SetId(*resp.InstanceGroupIds[0]) - return nil + return resourceAwsEMRInstanceGroupRead(d, meta) } func resourceAwsEMRInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).emrconn - group, err := fetchEMRInstanceGroup(conn, d.Get("cluster_id").(string), d.Id()) - if err != nil { - switch err { - case emrInstanceGroupNotFound: - log.Printf("[DEBUG] EMR Instance Group (%s) not found, removing", d.Id()) - d.SetId("") - return nil - default: - return err - } - } - // Guard against the chance of fetchEMRInstanceGroup returning nil group but - // not a emrInstanceGroupNotFound error - if group == nil { + ig, err := fetchEMRInstanceGroup(conn, d.Get("cluster_id").(string), d.Id()) + + if isResourceNotFoundError(err) { log.Printf("[DEBUG] EMR Instance Group (%s) not found, removing", d.Id()) d.SetId("") return nil } - d.Set("name", group.Name) - d.Set("instance_count", group.RequestedInstanceCount) - d.Set("running_instance_count", group.RunningInstanceCount) - d.Set("instance_type", group.InstanceType) - if group.Status != nil && group.Status.State != nil { - d.Set("status", group.Status.State) + if err != nil { + return fmt.Errorf("error reading EMR Instance Group (%s): %s", d.Id(), err) } - return nil -} + if ig.Status != nil { + switch aws.StringValue(ig.Status.State) { + case emr.InstanceGroupStateTerminating: + fallthrough + case emr.InstanceGroupStateTerminated: + log.Printf("[DEBUG] EMR Instance Group (%s) terminated, removing", d.Id()) + d.SetId("") + return nil + } + } -func fetchAllEMRInstanceGroups(conn *emr.EMR, clusterId string) ([]*emr.InstanceGroup, error) { - req := &emr.ListInstanceGroupsInput{ - ClusterId: aws.String(clusterId), + switch { + case len(ig.Configurations) > 0: + configOut, err := flattenConfigurationJson(ig.Configurations) + if err != nil { + return fmt.Errorf("Error reading EMR instance group configurations: %s", err) + } + if err := d.Set("configurations_json", configOut); err != nil { + return fmt.Errorf("Error setting EMR configurations_json for instance group (%s): %s", d.Id(), err) + } + default: + d.Set("configurations_json", "") } - var groups []*emr.InstanceGroup - marker := aws.String("intitial") - for marker != nil { - log.Printf("[DEBUG] EMR Cluster Instance Marker: %s", *marker) - respGrps, errGrps := conn.ListInstanceGroups(req) - if errGrps != nil { - return nil, fmt.Errorf("Error reading EMR cluster (%s): %s", clusterId, errGrps) + var autoscalingPolicyString string + if ig.AutoScalingPolicy != nil { + // AutoScalingPolicy has an additional Status field and null values that are causing a new hashcode to be generated for `instance_group`. + // We are purposefully omitting that field and the null values here when we flatten the autoscaling policy string for the statefile. + for i, rule := range ig.AutoScalingPolicy.Rules { + for j, dimension := range rule.Trigger.CloudWatchAlarmDefinition.Dimensions { + if aws.StringValue(dimension.Key) == "JobFlowId" { + tmpDimensions := append(ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions[:j], ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions[j+1:]...) + ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions = tmpDimensions + } + } + + if len(ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions) == 0 { + ig.AutoScalingPolicy.Rules[i].Trigger.CloudWatchAlarmDefinition.Dimensions = nil + } } - if respGrps == nil { - return nil, fmt.Errorf("Error reading EMR Instance Group for cluster (%s)", clusterId) + + autoscalingPolicyConstraintsBytes, err := json.Marshal(ig.AutoScalingPolicy.Constraints) + if err != nil { + return fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Constraints: %s", err) } - if respGrps.InstanceGroups != nil { - groups = append(groups, respGrps.InstanceGroups...) - } else { - log.Printf("[DEBUG] EMR Instance Group list was empty") + autoscalingPolicyRulesBytes, err := marshalWithoutNil(ig.AutoScalingPolicy.Rules) + if err != nil { + return fmt.Errorf("error parsing EMR Cluster Instance Groups AutoScalingPolicy Rules: %s", err) } - marker = respGrps.Marker + + autoscalingPolicyString = fmt.Sprintf("{\"Constraints\":%s,\"Rules\":%s}", string(autoscalingPolicyConstraintsBytes), string(autoscalingPolicyRulesBytes)) } + d.Set("autoscaling_policy", autoscalingPolicyString) - if len(groups) == 0 { - return nil, fmt.Errorf("No instance groups found for EMR Cluster (%s)", clusterId) + d.Set("bid_price", ig.BidPrice) + if err := d.Set("ebs_config", flattenEBSConfig(ig.EbsBlockDevices)); err != nil { + return fmt.Errorf("error setting ebs_config: %s", err) + } + d.Set("ebs_optimized", ig.EbsOptimized) + d.Set("instance_count", ig.RequestedInstanceCount) + d.Set("instance_role", ig.InstanceGroupType) + d.Set("instance_type", ig.InstanceType) + d.Set("name", ig.Name) + d.Set("running_instance_count", ig.RunningInstanceCount) + + if ig.Status != nil { + d.Set("status", ig.Status.State) } - return groups, nil + return nil } -func fetchEMRInstanceGroup(conn *emr.EMR, clusterId, groupId string) (*emr.InstanceGroup, error) { - groups, err := fetchAllEMRInstanceGroups(conn, clusterId) - if err != nil { - return nil, err - } +func resourceAwsEMRInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn - var group *emr.InstanceGroup - for _, ig := range groups { - if groupId == *ig.Id { - group = ig - break + log.Printf("[DEBUG] Modify EMR task group") + if d.HasChange("instance_count") || d.HasChange("configurations_json") { + instanceGroupModifyConfig := emr.InstanceGroupModifyConfig{ + InstanceGroupId: aws.String(d.Id()), + } + + if d.HasChange("instance_count") { + instanceCount := d.Get("instance_count").(int) + instanceGroupModifyConfig.InstanceCount = aws.Int64(int64(instanceCount)) + } + if d.HasChange("configurations_json") { + if v, ok := d.GetOk("configurations_json"); ok { + info, err := structure.NormalizeJsonString(v) + if err != nil { + return fmt.Errorf("configurations_json contains an invalid JSON: %s", err) + } + instanceGroupModifyConfig.Configurations, err = expandConfigurationJson(info) + if err != nil { + return fmt.Errorf("Error reading EMR configurations_json: %s", err) + } + } + } + params := &emr.ModifyInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupModifyConfig{ + &instanceGroupModifyConfig, + }, + } + + _, err := conn.ModifyInstanceGroups(params) + if err != nil { + return fmt.Errorf("error modifying EMR Instance Group (%s): %s", d.Id(), err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{ + emr.InstanceGroupStateBootstrapping, + emr.InstanceGroupStateProvisioning, + emr.InstanceGroupStateResizing, + }, + Target: []string{emr.InstanceGroupStateRunning}, + Refresh: instanceGroupStateRefresh(conn, d.Get("cluster_id").(string), d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("error waiting for EMR Instance Group (%s) modification: %s", d.Id(), err) } } - if group != nil { - return group, nil + if d.HasChange("autoscaling_policy") { + var autoScalingPolicy *emr.AutoScalingPolicy + + if err := json.Unmarshal([]byte(d.Get("autoscaling_policy").(string)), &autoScalingPolicy); err != nil { + return fmt.Errorf("error parsing EMR Auto Scaling Policy JSON for update: %s", err) + } + + putAutoScalingPolicy := &emr.PutAutoScalingPolicyInput{ + ClusterId: aws.String(d.Get("cluster_id").(string)), + AutoScalingPolicy: autoScalingPolicy, + InstanceGroupId: aws.String(d.Id()), + } + + if _, err := conn.PutAutoScalingPolicy(putAutoScalingPolicy); err != nil { + return fmt.Errorf("error updating autoscaling policy for instance group %q: %s", d.Id(), err) + } } - return nil, emrInstanceGroupNotFound + return resourceAwsEMRInstanceGroupRead(d, meta) } -func resourceAwsEMRInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceAwsEMRInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).emrconn - log.Printf("[DEBUG] Modify EMR task group") - instanceCount := d.Get("instance_count").(int) - + log.Printf("[WARN] AWS EMR Instance Group does not support DELETE; resizing cluster to zero before removing from state") params := &emr.ModifyInstanceGroupsInput{ InstanceGroups: []*emr.InstanceGroupModifyConfig{ { InstanceGroupId: aws.String(d.Id()), - InstanceCount: aws.Int64(int64(instanceCount)), + InstanceCount: aws.Int64(0), }, }, } - _, err := conn.ModifyInstanceGroups(params) - if err != nil { - return err + if _, err := conn.ModifyInstanceGroups(params); err != nil { + return fmt.Errorf("error draining EMR Instance Group (%s): %s", d.Id(), err) } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PROVISIONING", "BOOTSTRAPPING", "RESIZING"}, - Target: []string{"RUNNING"}, - Refresh: instanceGroupStateRefresh(conn, d.Get("cluster_id").(string), d.Id()), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to terminate: %s", d.Id(), err) - } - - return resourceAwsEMRInstanceGroupRead(d, meta) + return nil } -func instanceGroupStateRefresh(conn *emr.EMR, clusterID, igID string) resource.StateRefreshFunc { +func instanceGroupStateRefresh(conn *emr.EMR, clusterID, groupID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { - group, err := fetchEMRInstanceGroup(conn, clusterID, igID) + ig, err := fetchEMRInstanceGroup(conn, clusterID, groupID) if err != nil { return nil, "Not Found", err } - if group.Status == nil || group.Status.State == nil { + if ig.Status == nil || ig.Status.State == nil { log.Printf("[WARN] ERM Instance Group found, but without state") return nil, "Undefined", fmt.Errorf("Undefined EMR Cluster Instance Group state") } - return group, *group.Status.State, nil + return ig, *ig.Status.State, nil } } -func resourceAwsEMRInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARN] AWS EMR Instance Group does not support DELETE; resizing cluster to zero before removing from state") - conn := meta.(*AWSClient).emrconn - params := &emr.ModifyInstanceGroupsInput{ - InstanceGroups: []*emr.InstanceGroupModifyConfig{ - { - InstanceGroupId: aws.String(d.Id()), - InstanceCount: aws.Int64(0), - }, - }, +func fetchEMRInstanceGroup(conn *emr.EMR, clusterID, groupID string) (*emr.InstanceGroup, error) { + input := &emr.ListInstanceGroupsInput{ClusterId: aws.String(clusterID)} + + var groups []*emr.InstanceGroup + err := conn.ListInstanceGroupsPages(input, func(page *emr.ListInstanceGroupsOutput, lastPage bool) bool { + groups = append(groups, page.InstanceGroups...) + + return !lastPage + }) + + if err != nil { + return nil, fmt.Errorf("unable to retrieve EMR Cluster (%q): %s", clusterID, err) + } + + if len(groups) == 0 { + return nil, fmt.Errorf("no instance groups found for EMR Cluster (%s)", clusterID) + } + + var ig *emr.InstanceGroup + for _, group := range groups { + if groupID == aws.StringValue(group.Id) { + ig = group + break + } + } + + if ig == nil { + return nil, &resource.NotFoundError{} + } + + return ig, nil +} + +// readEmrEBSConfig populates an emr.EbsConfiguration struct +func readEmrEBSConfig(d *schema.ResourceData) *emr.EbsConfiguration { + result := &emr.EbsConfiguration{} + if v, ok := d.GetOk("ebs_optimized"); ok { + result.EbsOptimized = aws.Bool(v.(bool)) + } + + ebsConfigs := make([]*emr.EbsBlockDeviceConfig, 0) + if rawConfig, ok := d.GetOk("ebs_config"); ok { + configList := rawConfig.(*schema.Set).List() + for _, config := range configList { + conf := config.(map[string]interface{}) + ebs := &emr.EbsBlockDeviceConfig{} + volumeSpec := &emr.VolumeSpecification{ + SizeInGB: aws.Int64(int64(conf["size"].(int))), + VolumeType: aws.String(conf["type"].(string)), + } + if v, ok := conf["iops"].(int); ok && v != 0 { + volumeSpec.Iops = aws.Int64(int64(v)) + } + if v, ok := conf["volumes_per_instance"].(int); ok && v != 0 { + ebs.VolumesPerInstance = aws.Int64(int64(v)) + } + ebs.VolumeSpecification = volumeSpec + ebsConfigs = append(ebsConfigs, ebs) + } + } + result.EbsBlockDeviceConfigs = ebsConfigs + return result +} + +// marshalWithoutNil returns a JSON document of v stripped of any null properties +func marshalWithoutNil(v interface{}) ([]byte, error) { + //removeNil is a helper for stripping nil values + removeNil := func(data map[string]interface{}) map[string]interface{} { + + m := make(map[string]interface{}) + for k, v := range data { + if v == nil { + continue + } + + switch v := v.(type) { + case map[string]interface{}: + m[k] = removeNil(v) + default: + m[k] = v + } + } + + return m + } + + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + + var rules []map[string]interface{} + if err := json.Unmarshal(b, &rules); err != nil { + return nil, err + } + + var cleanRules []map[string]interface{} + for _, rule := range rules { + cleanRules = append(cleanRules, removeNil(rule)) } - _, err := conn.ModifyInstanceGroups(params) - return err + return json.Marshal(cleanRules) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_security_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_security_configuration.go index caad7eed7d2..666777d4583 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_security_configuration.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_security_configuration.go @@ -5,9 +5,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/emr" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsEMRSecurityConfiguration() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_flow_log.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_flow_log.go index fc3700300a2..5a87590b42c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_flow_log.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_flow_log.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsFlowLog() *schema.Resource { @@ -86,6 +86,13 @@ func resourceAwsFlowLog() *schema.Resource { Required: true, ForceNew: true, }, + + "log_format": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, }, } } @@ -134,6 +141,9 @@ func resourceAwsLogFlowCreate(d *schema.ResourceData, meta interface{}) error { if v, ok := d.GetOk("log_group_name"); ok && v != "" { opts.LogGroupName = aws.String(v.(string)) } + if v, ok := d.GetOk("log_format"); ok && v != "" { + opts.LogFormat = aws.String(v.(string)) + } log.Printf( "[DEBUG] Flow Log Create configuration: %s", opts) @@ -181,7 +191,7 @@ func resourceAwsLogFlowRead(d *schema.ResourceData, meta interface{}) error { d.Set("log_destination_type", fl.LogDestinationType) d.Set("log_group_name", fl.LogGroupName) d.Set("iam_role_arn", fl.DeliverLogsPermissionArn) - + d.Set("log_format", fl.LogFormat) var resourceKey string if strings.HasPrefix(*fl.ResourceId, "vpc-") { resourceKey = "vpc_id" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_fms_admin_account.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_fms_admin_account.go new file mode 100644 index 00000000000..9a6e147ea8e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_fms_admin_account.go @@ -0,0 +1,171 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fms" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsFmsAdminAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsFmsAdminAccountCreate, + Read: resourceAwsFmsAdminAccountRead, + Delete: resourceAwsFmsAdminAccountDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + }, + }, + } +} + +func resourceAwsFmsAdminAccountCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fmsconn + + // Ensure there is not an existing FMS Admin Account + getAdminAccountOutput, err := conn.GetAdminAccount(&fms.GetAdminAccountInput{}) + + if err != nil && !isAWSErr(err, fms.ErrCodeResourceNotFoundException, "") { + return fmt.Errorf("error getting FMS Admin Account: %s", err) + } + + if getAdminAccountOutput != nil && getAdminAccountOutput.AdminAccount != nil { + return fmt.Errorf("FMS Admin Account (%s) already associated: import this Terraform resource to manage", aws.StringValue(getAdminAccountOutput.AdminAccount)) + } + + accountID := meta.(*AWSClient).accountid + + if v, ok := d.GetOk("account_id"); ok { + accountID = v.(string) + } + + stateConf := &resource.StateChangeConf{ + Target: []string{accountID}, + Refresh: associateFmsAdminAccountRefreshFunc(conn, accountID), + Timeout: 1 * time.Minute, + Delay: 10 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("error waiting for FMS Admin Account (%s) association: %s", accountID, err) + } + + d.SetId(accountID) + + return resourceAwsFmsAdminAccountRead(d, meta) +} + +func associateFmsAdminAccountRefreshFunc(conn *fms.FMS, accountId string) resource.StateRefreshFunc { + // This is all wrapped in a refresh func since AssociateAdminAccount returns + // success even though it failed if called too quickly after creating an organization + return func() (interface{}, string, error) { + req := &fms.AssociateAdminAccountInput{ + AdminAccount: aws.String(accountId), + } + + _, aserr := conn.AssociateAdminAccount(req) + if aserr != nil { + return nil, "", aserr + } + + res, err := conn.GetAdminAccount(&fms.GetAdminAccountInput{}) + if err != nil { + // FMS returns an AccessDeniedException if no account is associated, + // but does not define this in its error codes + if isAWSErr(err, "AccessDeniedException", "is not currently delegated by AWS FM") { + return nil, "", nil + } + if isAWSErr(err, fms.ErrCodeResourceNotFoundException, "") { + return nil, "", nil + } + return nil, "", err + } + return *res, *res.AdminAccount, err + } +} + +func resourceAwsFmsAdminAccountRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fmsconn + + output, err := conn.GetAdminAccount(&fms.GetAdminAccountInput{}) + + if isAWSErr(err, fms.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] FMS Admin Account not found, removing from state") + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error getting FMS Admin Account: %s", err) + } + + if aws.StringValue(output.RoleStatus) == fms.AccountRoleStatusDeleted { + log.Printf("[WARN] FMS Admin Account not found, removing from state") + d.SetId("") + return nil + } + + d.Set("account_id", output.AdminAccount) + + return nil +} + +func resourceAwsFmsAdminAccountDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fmsconn + + _, err := conn.DisassociateAdminAccount(&fms.DisassociateAdminAccountInput{}) + + if err != nil { + return fmt.Errorf("error disassociating FMS Admin Account: %s", err) + } + + if err := waitForFmsAdminAccountDeletion(conn); err != nil { + return fmt.Errorf("error waiting for FMS Admin Account (%s) disassociation: %s", d.Id(), err) + } + + return nil +} + +func waitForFmsAdminAccountDeletion(conn *fms.FMS) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + fms.AccountRoleStatusDeleting, + fms.AccountRoleStatusPendingDeletion, + fms.AccountRoleStatusReady, + }, + Target: []string{fms.AccountRoleStatusDeleted}, + Refresh: func() (interface{}, string, error) { + output, err := conn.GetAdminAccount(&fms.GetAdminAccountInput{}) + + if isAWSErr(err, fms.ErrCodeResourceNotFoundException, "") { + return output, fms.AccountRoleStatusDeleted, nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.RoleStatus), nil + }, + Timeout: 1 * time.Minute, + Delay: 10 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_fsx_lustre_file_system.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_fsx_lustre_file_system.go new file mode 100644 index 00000000000..b0d2b76c6af --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_fsx_lustre_file_system.go @@ -0,0 +1,296 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsFsxLustreFileSystem() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsFsxLustreFileSystemCreate, + Read: resourceAwsFsxLustreFileSystemRead, + Update: resourceAwsFsxLustreFileSystemUpdate, + Delete: resourceAwsFsxLustreFileSystemDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "export_path": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 900), + validation.StringMatch(regexp.MustCompile(`^s3://`), "must begin with s3://"), + ), + }, + "import_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 900), + validation.StringMatch(regexp.MustCompile(`^s3://`), "must begin with s3://"), + ), + }, + "imported_file_chunk_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 512000), + }, + "network_interface_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 50, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "storage_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(3600), + }, + "subnet_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": tagsSchema(), + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "weekly_maintenance_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(7, 7), + validation.StringMatch(regexp.MustCompile(`^[1-7]:([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format d:HH:MM"), + ), + }, + }, + } +} + +func resourceAwsFsxLustreFileSystemCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + input := &fsx.CreateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemType: aws.String(fsx.FileSystemTypeLustre), + StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), + SubnetIds: expandStringSet(d.Get("subnet_ids").(*schema.Set)), + } + + if v, ok := d.GetOk("export_path"); ok { + if input.LustreConfiguration == nil { + input.LustreConfiguration = &fsx.CreateFileSystemLustreConfiguration{} + } + + input.LustreConfiguration.ExportPath = aws.String(v.(string)) + } + + if v, ok := d.GetOk("import_path"); ok { + if input.LustreConfiguration == nil { + input.LustreConfiguration = &fsx.CreateFileSystemLustreConfiguration{} + } + + input.LustreConfiguration.ImportPath = aws.String(v.(string)) + } + + if v, ok := d.GetOk("imported_file_chunk_size"); ok { + if input.LustreConfiguration == nil { + input.LustreConfiguration = &fsx.CreateFileSystemLustreConfiguration{} + } + + input.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("security_group_ids"); ok { + input.SecurityGroupIds = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = tagsFromMapFSX(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { + if input.LustreConfiguration == nil { + input.LustreConfiguration = &fsx.CreateFileSystemLustreConfiguration{} + } + + input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + } + + result, err := conn.CreateFileSystem(input) + if err != nil { + return fmt.Errorf("Error creating FSx filesystem: %s", err) + } + + d.SetId(*result.FileSystem.FileSystemId) + + log.Println("[DEBUG] Waiting for filesystem to become available") + + if err := waitForFsxFileSystemCreation(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("Error waiting for filesystem (%s) to become available: %s", d.Id(), err) + } + + return resourceAwsFsxLustreFileSystemRead(d, meta) +} + +func resourceAwsFsxLustreFileSystemUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + if d.HasChange("tags") { + if err := setTagsFSX(conn, d); err != nil { + return fmt.Errorf("Error updating tags for FSx filesystem: %s", err) + } + } + + requestUpdate := false + input := &fsx.UpdateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemId: aws.String(d.Id()), + LustreConfiguration: &fsx.UpdateFileSystemLustreConfiguration{}, + } + + if d.HasChange("weekly_maintenance_start_time") { + input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string)) + requestUpdate = true + } + + if requestUpdate { + _, err := conn.UpdateFileSystem(input) + if err != nil { + return fmt.Errorf("error updating FSX File System (%s): %s", d.Id(), err) + } + } + + return resourceAwsFsxLustreFileSystemRead(d, meta) +} + +func resourceAwsFsxLustreFileSystemRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + filesystem, err := describeFsxFileSystem(conn, d.Id()) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("Error reading FSx File System (%s): %s", d.Id(), err) + } + + if filesystem == nil { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if filesystem.WindowsConfiguration != nil { + return fmt.Errorf("expected FSx Lustre File System, found FSx Windows File System: %s", d.Id()) + } + + if filesystem.LustreConfiguration == nil { + return fmt.Errorf("error describing FSx Lustre File System (%s): empty Lustre configuration", d.Id()) + } + + if filesystem.LustreConfiguration.DataRepositoryConfiguration == nil { + // Initialize an empty structure to simplify d.Set() handling + filesystem.LustreConfiguration.DataRepositoryConfiguration = &fsx.DataRepositoryConfiguration{} + } + + d.Set("arn", filesystem.ResourceARN) + d.Set("dns_name", filesystem.DNSName) + d.Set("export_path", filesystem.LustreConfiguration.DataRepositoryConfiguration.ExportPath) + d.Set("import_path", filesystem.LustreConfiguration.DataRepositoryConfiguration.ImportPath) + d.Set("imported_file_chunk_size", filesystem.LustreConfiguration.DataRepositoryConfiguration.ImportedFileChunkSize) + + if err := d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)); err != nil { + return fmt.Errorf("error setting network_interface_ids: %s", err) + } + + d.Set("owner_id", filesystem.OwnerId) + d.Set("storage_capacity", filesystem.StorageCapacity) + + if err := d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)); err != nil { + return fmt.Errorf("error setting subnet_ids: %s", err) + } + + if err := d.Set("tags", tagsToMapFSX(filesystem.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + d.Set("vpc_id", filesystem.VpcId) + d.Set("weekly_maintenance_start_time", filesystem.LustreConfiguration.WeeklyMaintenanceStartTime) + + return nil +} + +func resourceAwsFsxLustreFileSystemDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + request := &fsx.DeleteFileSystemInput{ + FileSystemId: aws.String(d.Id()), + } + + _, err := conn.DeleteFileSystem(request) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + return nil + } + + if err != nil { + return fmt.Errorf("Error deleting FSx filesystem: %s", err) + } + + log.Println("[DEBUG] Waiting for filesystem to delete") + + if err := waitForFsxFileSystemDeletion(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("Error waiting for filesystem (%s) to delete: %s", d.Id(), err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_fsx_windows_file_system.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_fsx_windows_file_system.go new file mode 100644 index 00000000000..55d7576dd1b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_fsx_windows_file_system.go @@ -0,0 +1,439 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsFsxWindowsFileSystem() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsFsxWindowsFileSystemCreate, + Read: resourceAwsFsxWindowsFileSystemRead, + Update: resourceAwsFsxWindowsFileSystemUpdate, + Delete: resourceAwsFsxWindowsFileSystemDelete, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("skip_final_backup", false) + + return []*schema.ResourceData{d}, nil + }, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "active_directory_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"self_managed_active_directory"}, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "automatic_backup_retention_days": { + Type: schema.TypeInt, + Optional: true, + Default: 7, + ValidateFunc: validation.IntBetween(0, 35), + }, + "copy_tags_to_backups": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "daily_automatic_backup_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(5, 5), + validation.StringMatch(regexp.MustCompile(`^([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format HH:MM"), + ), + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "network_interface_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 50, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "self_managed_active_directory": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"active_directory_id"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns_ips": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 2, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "domain_name": { + Type: schema.TypeString, + Required: true, + }, + "file_system_administrators_group": { + Type: schema.TypeString, + Optional: true, + Default: "Domain Admins", + ValidateFunc: validation.StringLenBetween(1, 256), + }, + "organizational_unit_distinguished_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 2000), + }, + "password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + "username": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + }, + }, + }, + "skip_final_backup": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "storage_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(300, 65536), + }, + "subnet_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": tagsSchema(), + "throughput_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(8, 2048), + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "weekly_maintenance_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(7, 7), + validation.StringMatch(regexp.MustCompile(`^[1-7]:([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format d:HH:MM"), + ), + }, + }, + } +} + +func resourceAwsFsxWindowsFileSystemCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + input := &fsx.CreateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemType: aws.String(fsx.FileSystemTypeWindows), + StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), + SubnetIds: expandStringSet(d.Get("subnet_ids").(*schema.Set)), + WindowsConfiguration: &fsx.CreateFileSystemWindowsConfiguration{ + AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), + CopyTagsToBackups: aws.Bool(d.Get("copy_tags_to_backups").(bool)), + ThroughputCapacity: aws.Int64(int64(d.Get("throughput_capacity").(int))), + }, + } + + if v, ok := d.GetOk("active_directory_id"); ok { + input.WindowsConfiguration.ActiveDirectoryId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok { + input.WindowsConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) + } + + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("security_group_ids"); ok { + input.SecurityGroupIds = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("self_managed_active_directory"); ok { + input.WindowsConfiguration.SelfManagedActiveDirectoryConfiguration = expandFsxSelfManagedActiveDirectoryConfigurationCreate(v.([]interface{})) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = tagsFromMapFSX(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { + input.WindowsConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + } + + result, err := conn.CreateFileSystem(input) + if err != nil { + return fmt.Errorf("Error creating FSx filesystem: %s", err) + } + + d.SetId(*result.FileSystem.FileSystemId) + + log.Println("[DEBUG] Waiting for filesystem to become available") + + if err := waitForFsxFileSystemCreation(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("Error waiting for filesystem (%s) to become available: %s", d.Id(), err) + } + + return resourceAwsFsxWindowsFileSystemRead(d, meta) +} + +func resourceAwsFsxWindowsFileSystemUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + if d.HasChange("tags") { + if err := setTagsFSX(conn, d); err != nil { + return fmt.Errorf("Error updating tags for FSx filesystem: %s", err) + } + } + + requestUpdate := false + input := &fsx.UpdateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemId: aws.String(d.Id()), + WindowsConfiguration: &fsx.UpdateFileSystemWindowsConfiguration{}, + } + + if d.HasChange("automatic_backup_retention_days") { + input.WindowsConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))) + requestUpdate = true + } + + if d.HasChange("daily_automatic_backup_start_time") { + input.WindowsConfiguration.DailyAutomaticBackupStartTime = aws.String(d.Get("daily_automatic_backup_start_time").(string)) + requestUpdate = true + } + + if d.HasChange("self_managed_active_directory") { + input.WindowsConfiguration.SelfManagedActiveDirectoryConfiguration = expandFsxSelfManagedActiveDirectoryConfigurationUpdate(d.Get("self_managed_active_directory").([]interface{})) + requestUpdate = true + } + + if d.HasChange("weekly_maintenance_start_time") { + input.WindowsConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string)) + requestUpdate = true + } + + if requestUpdate { + _, err := conn.UpdateFileSystem(input) + if err != nil { + return fmt.Errorf("error updating FSX File System (%s): %s", d.Id(), err) + } + } + + return resourceAwsFsxWindowsFileSystemRead(d, meta) +} + +func resourceAwsFsxWindowsFileSystemRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + filesystem, err := describeFsxFileSystem(conn, d.Id()) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("Error reading FSx File System (%s): %s", d.Id(), err) + } + + if filesystem == nil { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if filesystem.LustreConfiguration != nil { + return fmt.Errorf("expected FSx Windows File System, found FSx Lustre File System: %s", d.Id()) + } + + if filesystem.WindowsConfiguration == nil { + return fmt.Errorf("error describing FSx Windows File System (%s): empty Windows configuration", d.Id()) + } + + d.Set("active_directory_id", filesystem.WindowsConfiguration.ActiveDirectoryId) + d.Set("arn", filesystem.ResourceARN) + d.Set("automatic_backup_retention_days", filesystem.WindowsConfiguration.AutomaticBackupRetentionDays) + d.Set("copy_tags_to_backups", filesystem.WindowsConfiguration.CopyTagsToBackups) + d.Set("daily_automatic_backup_start_time", filesystem.WindowsConfiguration.DailyAutomaticBackupStartTime) + d.Set("dns_name", filesystem.DNSName) + d.Set("kms_key_id", filesystem.KmsKeyId) + + if err := d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)); err != nil { + return fmt.Errorf("error setting network_interface_ids: %s", err) + } + + d.Set("owner_id", filesystem.OwnerId) + + if err := d.Set("self_managed_active_directory", flattenFsxSelfManagedActiveDirectoryConfiguration(d, filesystem.WindowsConfiguration.SelfManagedActiveDirectoryConfiguration)); err != nil { + return fmt.Errorf("error setting self_managed_active_directory: %s", err) + } + + d.Set("storage_capacity", filesystem.StorageCapacity) + + if err := d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)); err != nil { + return fmt.Errorf("error setting subnet_ids: %s", err) + } + + if err := d.Set("tags", tagsToMapFSX(filesystem.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + d.Set("throughput_capacity", filesystem.WindowsConfiguration.ThroughputCapacity) + d.Set("vpc_id", filesystem.VpcId) + d.Set("weekly_maintenance_start_time", filesystem.WindowsConfiguration.WeeklyMaintenanceStartTime) + + return nil +} + +func resourceAwsFsxWindowsFileSystemDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + input := &fsx.DeleteFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemId: aws.String(d.Id()), + WindowsConfiguration: &fsx.DeleteFileSystemWindowsConfiguration{ + SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), + }, + } + + _, err := conn.DeleteFileSystem(input) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + return nil + } + + if err != nil { + return fmt.Errorf("Error deleting FSx filesystem: %s", err) + } + + log.Println("[DEBUG] Waiting for filesystem to delete") + + if err := waitForFsxFileSystemDeletion(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("Error waiting for filesystem (%s) to delete: %s", d.Id(), err) + } + + return nil +} + +func expandFsxSelfManagedActiveDirectoryConfigurationCreate(l []interface{}) *fsx.SelfManagedActiveDirectoryConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.SelfManagedActiveDirectoryConfiguration{ + DomainName: aws.String(data["domain_name"].(string)), + DnsIps: expandStringSet(data["dns_ips"].(*schema.Set)), + Password: aws.String(data["password"].(string)), + UserName: aws.String(data["username"].(string)), + } + + if v, ok := data["file_system_administrators_group"]; ok && v.(string) != "" { + req.FileSystemAdministratorsGroup = aws.String(v.(string)) + } + + if v, ok := data["organizational_unit_distinguished_name"]; ok && v.(string) != "" { + req.OrganizationalUnitDistinguishedName = aws.String(v.(string)) + } + + return req +} + +func expandFsxSelfManagedActiveDirectoryConfigurationUpdate(l []interface{}) *fsx.SelfManagedActiveDirectoryConfigurationUpdates { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.SelfManagedActiveDirectoryConfigurationUpdates{ + DnsIps: expandStringList(data["dns_ips"].([]interface{})), + Password: aws.String(data["password"].(string)), + UserName: aws.String(data["username"].(string)), + } + + return req +} + +func flattenFsxSelfManagedActiveDirectoryConfiguration(d *schema.ResourceData, adopts *fsx.SelfManagedActiveDirectoryAttributes) []map[string]interface{} { + if adopts == nil { + return []map[string]interface{}{} + } + + // Since we are in a configuration block and the FSx API does not return + // the password, we need to set the value if we can or Terraform will + // show a difference for the argument from empty string to the value. + // This is not a pattern that should be used normally. + // See also: flattenEmrKerberosAttributes + + m := map[string]interface{}{ + "dns_ips": aws.StringValueSlice(adopts.DnsIps), + "domain_name": aws.StringValue(adopts.DomainName), + "file_system_administrators_group": aws.StringValue(adopts.FileSystemAdministratorsGroup), + "organizational_unit_distinguished_name": aws.StringValue(adopts.OrganizationalUnitDistinguishedName), + "password": d.Get("self_managed_active_directory.0.password").(string), + "username": aws.StringValue(adopts.UserName), + } + + return []map[string]interface{}{m} +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_alias.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_alias.go index 22d81714dcc..1926a1c01c6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_alias.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_alias.go @@ -5,8 +5,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/gamelift" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGameliftAlias() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_build.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_build.go index 322a1ad6c88..b82155c7cdf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_build.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_build.go @@ -1,14 +1,15 @@ package aws import ( + "fmt" "log" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/gamelift" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGameliftBuild() *schema.Resource { @@ -93,8 +94,11 @@ func resourceAwsGameliftBuildCreate(d *schema.ResourceData, meta interface{}) er } return nil }) + if isResourceTimeoutError(err) { + out, err = conn.CreateBuild(&input) + } if err != nil { - return err + return fmt.Errorf("Error creating Gamelift build client: %s", err) } d.SetId(*out.Build.BuildId) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_fleet.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_fleet.go index 36212a99733..ac985289517 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_fleet.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_fleet.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/gamelift" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGameliftFleet() *schema.Resource { @@ -22,7 +22,7 @@ func resourceAwsGameliftFleet() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(70 * time.Minute), - Delete: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), }, Schema: map[string]*schema.Schema{ @@ -346,10 +346,11 @@ func resourceAwsGameliftFleetDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] Deleting Gamelift Fleet: %s", d.Id()) // It can take ~ 1 hr as Gamelift will keep retrying on errors like // invalid launch path and remain in state when it can't be deleted :/ + input := &gamelift.DeleteFleetInput{ + FleetId: aws.String(d.Id()), + } err := resource.Retry(60*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteFleet(&gamelift.DeleteFleetInput{ - FleetId: aws.String(d.Id()), - }) + _, err := conn.DeleteFleet(input) if err != nil { msg := fmt.Sprintf("Cannot delete fleet %s that is in status of ", d.Id()) if isAWSErr(err, gamelift.ErrCodeInvalidRequestException, msg) { @@ -359,8 +360,11 @@ func resourceAwsGameliftFleetDelete(d *schema.ResourceData, meta interface{}) er } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteFleet(input) + } if err != nil { - return err + return fmt.Errorf("Error deleting Gamelift fleet: %s", err) } return waitForGameliftFleetToBeDeleted(conn, d.Id(), d.Timeout(schema.TimeoutDelete)) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_game_session_queue.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_game_session_queue.go index 9cdfddbbce1..e1a4c86c13a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_game_session_queue.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_gamelift_game_session_queue.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/gamelift" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "log" ) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault.go index 08b505722fd..e236dd2466f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault.go @@ -6,13 +6,13 @@ import ( "log" "regexp" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/glacier" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlacierVault() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault_lock.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault_lock.go index c181510cbbb..12a491df11f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault_lock.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault_lock.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glacier" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlacierVaultLock() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_accelerator.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_accelerator.go index c95d4a80084..33fa7797ab6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_accelerator.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_accelerator.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/globalaccelerator" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlobalAcceleratorAccelerator() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_endpoint_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_endpoint_group.go new file mode 100644 index 00000000000..5aa50b2af01 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_endpoint_group.go @@ -0,0 +1,342 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/globalaccelerator" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsGlobalAcceleratorEndpointGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsGlobalAcceleratorEndpointGroupCreate, + Read: resourceAwsGlobalAcceleratorEndpointGroupRead, + Update: resourceAwsGlobalAcceleratorEndpointGroupUpdate, + Delete: resourceAwsGlobalAcceleratorEndpointGroupDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "listener_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "endpoint_group_region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "health_check_interval_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: 30, + ValidateFunc: validation.IntBetween(10, 30), + }, + "health_check_path": { + Type: schema.TypeString, + Optional: true, + Default: "/", + }, + "health_check_port": { + Type: schema.TypeInt, + Optional: true, + }, + "health_check_protocol": { + Type: schema.TypeString, + Optional: true, + Default: globalaccelerator.HealthCheckProtocolTcp, + ValidateFunc: validation.StringInSlice([]string{ + globalaccelerator.HealthCheckProtocolTcp, + globalaccelerator.HealthCheckProtocolHttp, + globalaccelerator.HealthCheckProtocolHttps, + }, false), + }, + "threshold_count": { + Type: schema.TypeInt, + Optional: true, + Default: 3, + ValidateFunc: validation.IntBetween(1, 10), + }, + "traffic_dial_percentage": { + Type: schema.TypeFloat, + Optional: true, + Default: 100.0, + ValidateFunc: validation.FloatBetween(0.0, 100.0), + }, + "endpoint_configuration": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoint_id": { + Type: schema.TypeString, + Optional: true, + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsGlobalAcceleratorEndpointGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).globalacceleratorconn + region := meta.(*AWSClient).region + + opts := &globalaccelerator.CreateEndpointGroupInput{ + ListenerArn: aws.String(d.Get("listener_arn").(string)), + IdempotencyToken: aws.String(resource.UniqueId()), + EndpointGroupRegion: aws.String(region), + } + + if v, ok := d.GetOk("endpoint_group_region"); ok { + opts.EndpointGroupRegion = aws.String(v.(string)) + } + + if v, ok := d.GetOk("health_check_interval_seconds"); ok { + opts.HealthCheckIntervalSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("health_check_path"); ok { + opts.HealthCheckPath = aws.String(v.(string)) + } + + if v, ok := d.GetOk("health_check_port"); ok { + opts.HealthCheckPort = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("health_check_protocol"); ok { + opts.HealthCheckProtocol = aws.String(v.(string)) + } + + if v, ok := d.GetOk("threshold_count"); ok { + opts.ThresholdCount = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("traffic_dial_percentage"); ok { + opts.TrafficDialPercentage = aws.Float64(v.(float64)) + } + + if v, ok := d.GetOk("endpoint_configuration"); ok { + opts.EndpointConfigurations = resourceAwsGlobalAcceleratorEndpointGroupExpandEndpointConfigurations(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] Create Global Accelerator endpoint group: %s", opts) + + resp, err := conn.CreateEndpointGroup(opts) + if err != nil { + return fmt.Errorf("Error creating Global Accelerator endpoint group: %s", err) + } + + d.SetId(*resp.EndpointGroup.EndpointGroupArn) + + acceleratorArn, err := resourceAwsGlobalAcceleratorListenerParseAcceleratorArn(d.Id()) + + if err != nil { + return err + } + + err = resourceAwsGlobalAcceleratorAcceleratorWaitForState(conn, acceleratorArn) + + if err != nil { + return err + } + + return resourceAwsGlobalAcceleratorEndpointGroupRead(d, meta) +} + +func resourceAwsGlobalAcceleratorEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).globalacceleratorconn + + endpointGroup, err := resourceAwsGlobalAcceleratorEndpointGroupRetrieve(conn, d.Id()) + + if err != nil { + return fmt.Errorf("Error reading Global Accelerator endpoint group: %s", err) + } + + if endpointGroup == nil { + log.Printf("[WARN] Global Accelerator endpoint group (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + listenerArn, err := resourceAwsGlobalAcceleratorEndpointGroupParseListenerArn(d.Id()) + + if err != nil { + return err + } + + d.Set("listener_arn", listenerArn) + d.Set("endpoint_group_region", endpointGroup.EndpointGroupRegion) + d.Set("health_check_interval_seconds", endpointGroup.HealthCheckIntervalSeconds) + d.Set("health_check_path", endpointGroup.HealthCheckPath) + d.Set("health_check_port", endpointGroup.HealthCheckPort) + d.Set("health_check_protocol", endpointGroup.HealthCheckProtocol) + d.Set("threshold_count", endpointGroup.ThresholdCount) + d.Set("traffic_dial_percentage", endpointGroup.TrafficDialPercentage) + if err := d.Set("endpoint_configuration", resourceAwsGlobalAcceleratorEndpointGroupFlattenEndpointDescriptions(endpointGroup.EndpointDescriptions)); err != nil { + return fmt.Errorf("error setting endpoint_configuration: %s", err) + } + + return nil +} + +func resourceAwsGlobalAcceleratorEndpointGroupParseListenerArn(endpointGroupArn string) (string, error) { + parts := strings.Split(endpointGroupArn, "/") + if len(parts) < 6 { + return "", fmt.Errorf("Unable to parse listener ARN from %s", endpointGroupArn) + } + return strings.Join(parts[0:4], "/"), nil +} + +func resourceAwsGlobalAcceleratorEndpointGroupExpandEndpointConfigurations(configurations []interface{}) []*globalaccelerator.EndpointConfiguration { + out := make([]*globalaccelerator.EndpointConfiguration, len(configurations)) + + for i, raw := range configurations { + configuration := raw.(map[string]interface{}) + m := globalaccelerator.EndpointConfiguration{} + + m.EndpointId = aws.String(configuration["endpoint_id"].(string)) + m.Weight = aws.Int64(int64(configuration["weight"].(int))) + + out[i] = &m + } + + log.Printf("[DEBUG] Expand endpoint_configuration: %s", out) + return out +} + +func resourceAwsGlobalAcceleratorEndpointGroupFlattenEndpointDescriptions(configurations []*globalaccelerator.EndpointDescription) []interface{} { + out := make([]interface{}, len(configurations)) + + for i, configuration := range configurations { + m := make(map[string]interface{}) + + m["endpoint_id"] = aws.StringValue(configuration.EndpointId) + m["weight"] = aws.Int64Value(configuration.Weight) + + out[i] = m + } + + log.Printf("[DEBUG] Flatten endpoint_configuration: %s", out) + return out +} + +func resourceAwsGlobalAcceleratorEndpointGroupRetrieve(conn *globalaccelerator.GlobalAccelerator, endpointGroupArn string) (*globalaccelerator.EndpointGroup, error) { + resp, err := conn.DescribeEndpointGroup(&globalaccelerator.DescribeEndpointGroupInput{ + EndpointGroupArn: aws.String(endpointGroupArn), + }) + + if err != nil { + if isAWSErr(err, globalaccelerator.ErrCodeEndpointGroupNotFoundException, "") { + return nil, nil + } + return nil, err + } + + return resp.EndpointGroup, nil +} + +func resourceAwsGlobalAcceleratorEndpointGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).globalacceleratorconn + + opts := &globalaccelerator.UpdateEndpointGroupInput{ + EndpointGroupArn: aws.String(d.Id()), + } + + if v, ok := d.GetOk("health_check_interval_seconds"); ok { + opts.HealthCheckIntervalSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("health_check_path"); ok { + opts.HealthCheckPath = aws.String(v.(string)) + } + + if v, ok := d.GetOk("health_check_port"); ok { + opts.HealthCheckPort = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("health_check_protocol"); ok { + opts.HealthCheckProtocol = aws.String(v.(string)) + } + + if v, ok := d.GetOk("threshold_count"); ok { + opts.ThresholdCount = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("traffic_dial_percentage"); ok { + opts.TrafficDialPercentage = aws.Float64(v.(float64)) + } + + if v, ok := d.GetOk("endpoint_configuration"); ok { + opts.EndpointConfigurations = resourceAwsGlobalAcceleratorEndpointGroupExpandEndpointConfigurations(v.(*schema.Set).List()) + } else { + opts.EndpointConfigurations = []*globalaccelerator.EndpointConfiguration{} + } + + log.Printf("[DEBUG] Update Global Accelerator endpoint group: %s", opts) + + _, err := conn.UpdateEndpointGroup(opts) + if err != nil { + return fmt.Errorf("Error updating Global Accelerator endpoint group: %s", err) + } + + acceleratorArn, err := resourceAwsGlobalAcceleratorListenerParseAcceleratorArn(d.Id()) + + if err != nil { + return err + } + + err = resourceAwsGlobalAcceleratorAcceleratorWaitForState(conn, acceleratorArn) + + if err != nil { + return err + } + + return resourceAwsGlobalAcceleratorEndpointGroupRead(d, meta) +} + +func resourceAwsGlobalAcceleratorEndpointGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).globalacceleratorconn + + opts := &globalaccelerator.DeleteEndpointGroupInput{ + EndpointGroupArn: aws.String(d.Id()), + } + + _, err := conn.DeleteEndpointGroup(opts) + if err != nil { + if isAWSErr(err, globalaccelerator.ErrCodeEndpointGroupNotFoundException, "") { + return nil + } + return fmt.Errorf("Error deleting Global Accelerator endpoint group: %s", err) + } + + acceleratorArn, err := resourceAwsGlobalAcceleratorListenerParseAcceleratorArn(d.Id()) + + if err != nil { + return err + } + + err = resourceAwsGlobalAcceleratorAcceleratorWaitForState(conn, acceleratorArn) + + if err != nil { + return err + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_listener.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_listener.go index 1637cb7c965..4eee7cc5b18 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_listener.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_globalaccelerator_listener.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/globalaccelerator" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlobalAcceleratorListener() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_catalog_database.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_catalog_database.go index 8e9478dea13..4ba7780a5c3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_catalog_database.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_catalog_database.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsGlueCatalogDatabase() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_catalog_table.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_catalog_table.go index 4c0084369ef..29e4841e2a6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_catalog_table.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_catalog_table.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsGlueCatalogTable() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_classifier.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_classifier.go index 9505201cc27..06d2335398f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_classifier.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_classifier.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlueClassifier() *schema.Resource { @@ -24,6 +24,18 @@ func resourceAwsGlueClassifier() *schema.Resource { func(diff *schema.ResourceDiff, v interface{}) error { // ForceNew when changing classifier type // InvalidInputException: UpdateClassifierRequest can't change the type of the classifier + if diff.HasChange("csv_classifier") && diff.HasChange("grok_classifier") { + diff.ForceNew("csv_classifier") + diff.ForceNew("grok_classifier") + } + if diff.HasChange("csv_classifier") && diff.HasChange("json_classifier") { + diff.ForceNew("csv_classifier") + diff.ForceNew("json_classifier") + } + if diff.HasChange("csv_classifier") && diff.HasChange("xml_classifier") { + diff.ForceNew("csv_classifier") + diff.ForceNew("xml_classifier") + } if diff.HasChange("grok_classifier") && diff.HasChange("json_classifier") { diff.ForceNew("grok_classifier") diff.ForceNew("json_classifier") @@ -41,11 +53,47 @@ func resourceAwsGlueClassifier() *schema.Resource { ), Schema: map[string]*schema.Schema{ + "csv_classifier": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"grok_classifier", "json_classifier", "xml_classifier"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_single_column": { + Type: schema.TypeBool, + Optional: true, + }, + "contains_header": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateHeaderOptions(), + }, + "delimiter": { + Type: schema.TypeString, + Optional: true, + }, + "disable_value_trimming": { + Type: schema.TypeBool, + Optional: true, + }, + "header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "quote_symbol": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, "grok_classifier": { Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{"json_classifier", "xml_classifier"}, + ConflictsWith: []string{"csv_classifier", "json_classifier", "xml_classifier"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "classification": { @@ -69,7 +117,7 @@ func resourceAwsGlueClassifier() *schema.Resource { Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{"grok_classifier", "xml_classifier"}, + ConflictsWith: []string{"csv_classifier", "grok_classifier", "xml_classifier"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "json_path": { @@ -89,7 +137,7 @@ func resourceAwsGlueClassifier() *schema.Resource { Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{"grok_classifier", "json_classifier"}, + ConflictsWith: []string{"csv_classifier", "grok_classifier", "json_classifier"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "classification": { @@ -113,6 +161,11 @@ func resourceAwsGlueClassifierCreate(d *schema.ResourceData, meta interface{}) e input := &glue.CreateClassifierInput{} + if v, ok := d.GetOk("csv_classifier"); ok { + m := v.([]interface{})[0].(map[string]interface{}) + input.CsvClassifier = expandGlueCsvClassifierCreate(name, m) + } + if v, ok := d.GetOk("grok_classifier"); ok { m := v.([]interface{})[0].(map[string]interface{}) input.GrokClassifier = expandGlueGrokClassifierCreate(name, m) @@ -164,6 +217,10 @@ func resourceAwsGlueClassifierRead(d *schema.ResourceData, meta interface{}) err return nil } + if err := d.Set("csv_classifier", flattenGlueCsvClassifier(classifier.CsvClassifier)); err != nil { + return fmt.Errorf("error setting match_criteria: %s", err) + } + if err := d.Set("grok_classifier", flattenGlueGrokClassifier(classifier.GrokClassifier)); err != nil { return fmt.Errorf("error setting match_criteria: %s", err) } @@ -186,6 +243,11 @@ func resourceAwsGlueClassifierUpdate(d *schema.ResourceData, meta interface{}) e input := &glue.UpdateClassifierInput{} + if v, ok := d.GetOk("csv_classifier"); ok { + m := v.([]interface{})[0].(map[string]interface{}) + input.CsvClassifier = expandGlueCsvClassifierUpdate(d.Id(), m) + } + if v, ok := d.GetOk("grok_classifier"); ok { m := v.([]interface{})[0].(map[string]interface{}) input.GrokClassifier = expandGlueGrokClassifierUpdate(d.Id(), m) @@ -222,6 +284,14 @@ func resourceAwsGlueClassifierDelete(d *schema.ResourceData, meta interface{}) e return nil } +func validateHeaderOptions() schema.SchemaValidateFunc { + return validation.StringInSlice([]string{ + "UNKNOWN", + "PRESENT", + "ABSENT", + }, true) +} + func deleteGlueClassifier(conn *glue.Glue, name string) error { input := &glue.DeleteClassifierInput{ Name: aws.String(name), @@ -238,6 +308,48 @@ func deleteGlueClassifier(conn *glue.Glue, name string) error { return nil } +func expandGlueCsvClassifierCreate(name string, m map[string]interface{}) *glue.CreateCsvClassifierRequest { + csvClassifier := &glue.CreateCsvClassifierRequest{ + AllowSingleColumn: aws.Bool(m["allow_single_column"].(bool)), + ContainsHeader: aws.String(m["contains_header"].(string)), + Delimiter: aws.String(m["delimiter"].(string)), + DisableValueTrimming: aws.Bool(m["disable_value_trimming"].(bool)), + Name: aws.String(name), + QuoteSymbol: aws.String(m["quote_symbol"].(string)), + } + + if v, ok := m["header"]; ok { + header := make([]string, len(v.([]interface{}))) + for i, item := range v.([]interface{}) { + header[i] = fmt.Sprint(item) + } + csvClassifier.Header = aws.StringSlice(header) + } + + return csvClassifier +} + +func expandGlueCsvClassifierUpdate(name string, m map[string]interface{}) *glue.UpdateCsvClassifierRequest { + csvClassifier := &glue.UpdateCsvClassifierRequest{ + AllowSingleColumn: aws.Bool(m["allow_single_column"].(bool)), + ContainsHeader: aws.String(m["contains_header"].(string)), + Delimiter: aws.String(m["delimiter"].(string)), + DisableValueTrimming: aws.Bool(m["disable_value_trimming"].(bool)), + Name: aws.String(name), + QuoteSymbol: aws.String(m["quote_symbol"].(string)), + } + + if v, ok := m["header"]; ok { + header := make([]string, len(v.([]interface{}))) + for i, item := range v.([]interface{}) { + header[i] = fmt.Sprint(item) + } + csvClassifier.Header = aws.StringSlice(header) + } + + return csvClassifier +} + func expandGlueGrokClassifierCreate(name string, m map[string]interface{}) *glue.CreateGrokClassifierRequest { grokClassifier := &glue.CreateGrokClassifierRequest{ Classification: aws.String(m["classification"].(string)), @@ -308,6 +420,23 @@ func expandGlueXmlClassifierUpdate(name string, m map[string]interface{}) *glue. return xmlClassifier } +func flattenGlueCsvClassifier(csvClassifier *glue.CsvClassifier) []map[string]interface{} { + if csvClassifier == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "allow_single_column": aws.BoolValue(csvClassifier.AllowSingleColumn), + "contains_header": aws.StringValue(csvClassifier.ContainsHeader), + "delimiter": aws.StringValue(csvClassifier.Delimiter), + "disable_value_trimming": aws.BoolValue(csvClassifier.DisableValueTrimming), + "header": aws.StringValueSlice(csvClassifier.Header), + "quote_symbol": aws.StringValue(csvClassifier.QuoteSymbol), + } + + return []map[string]interface{}{m} +} + func flattenGlueGrokClassifier(grokClassifier *glue.GrokClassifier) []map[string]interface{} { if grokClassifier == nil { return []map[string]interface{}{} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_connection.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_connection.go index 546f3e9f4a5..a6c378149b1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_connection.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_connection.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlueConnection() *schema.Resource { @@ -29,8 +29,9 @@ func resourceAwsGlueConnection() *schema.Resource { Computed: true, }, "connection_properties": { - Type: schema.TypeMap, - Required: true, + Type: schema.TypeMap, + Required: true, + Sensitive: true, }, "connection_type": { Type: schema.TypeString, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_crawler.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_crawler.go index f3e6f5f6f52..50b88936c95 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_crawler.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_crawler.go @@ -9,10 +9,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlueCrawler() *schema.Resource { @@ -159,6 +159,24 @@ func resourceAwsGlueCrawler() *schema.Resource { }, }, }, + "catalog_target": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database_name": { + Type: schema.TypeString, + Required: true, + }, + "tables": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, "configuration": { Type: schema.TypeString, Optional: true, @@ -201,7 +219,9 @@ func resourceAwsGlueCrawlerCreate(d *schema.ResourceData, meta interface{}) erro } return nil }) - + if isResourceTimeoutError(err) { + _, err = glueConn.CreateCrawler(crawlerInput) + } if err != nil { return fmt.Errorf("error creating Glue crawler: %s", err) } @@ -325,14 +345,16 @@ func expandGlueCrawlerTargets(d *schema.ResourceData) (*glue.CrawlerTargets, err dynamodbTargets, dynamodbTargetsOk := d.GetOk("dynamodb_target") jdbcTargets, jdbcTargetsOk := d.GetOk("jdbc_target") s3Targets, s3TargetsOk := d.GetOk("s3_target") - if !dynamodbTargetsOk && !jdbcTargetsOk && !s3TargetsOk { - return nil, fmt.Errorf("One of the following configurations is required: dynamodb_target, jdbc_target, s3_target") + catalogTargets, catalogTargetsOk := d.GetOk("catalog_target") + if !dynamodbTargetsOk && !jdbcTargetsOk && !s3TargetsOk && !catalogTargetsOk { + return nil, fmt.Errorf("One of the following configurations is required: dynamodb_target, jdbc_target, s3_target, catalog_target") } log.Print("[DEBUG] Creating crawler target") crawlerTargets.DynamoDBTargets = expandGlueDynamoDBTargets(dynamodbTargets.([]interface{})) crawlerTargets.JdbcTargets = expandGlueJdbcTargets(jdbcTargets.([]interface{})) crawlerTargets.S3Targets = expandGlueS3Targets(s3Targets.([]interface{})) + crawlerTargets.CatalogTargets = expandGlueCatalogTargets(catalogTargets.([]interface{})) return crawlerTargets, nil } @@ -407,6 +429,28 @@ func expandGlueJdbcTarget(cfg map[string]interface{}) *glue.JdbcTarget { return target } +func expandGlueCatalogTargets(targets []interface{}) []*glue.CatalogTarget { + if len(targets) < 1 { + return []*glue.CatalogTarget{} + } + + perms := make([]*glue.CatalogTarget, len(targets)) + for i, rawCfg := range targets { + cfg := rawCfg.(map[string]interface{}) + perms[i] = expandGlueCatalogTarget(cfg) + } + return perms +} + +func expandGlueCatalogTarget(cfg map[string]interface{}) *glue.CatalogTarget { + target := &glue.CatalogTarget{ + DatabaseName: aws.String(cfg["database_name"].(string)), + Tables: expandStringList(cfg["tables"].([]interface{})), + } + + return target +} + func resourceAwsGlueCrawlerUpdate(d *schema.ResourceData, meta interface{}) error { glueConn := meta.(*AWSClient).glueconn name := d.Get("name").(string) @@ -509,6 +553,10 @@ func resourceAwsGlueCrawlerRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("s3_target", flattenGlueS3Targets(crawlerOutput.Crawler.Targets.S3Targets)); err != nil { return fmt.Errorf("error setting s3_target: %s", err) } + + if err := d.Set("catalog_target", flattenGlueCatalogTargets(crawlerOutput.Crawler.Targets.CatalogTargets)); err != nil { + return fmt.Errorf("error setting catalog_target: %s", err) + } } return nil @@ -527,6 +575,19 @@ func flattenGlueS3Targets(s3Targets []*glue.S3Target) []map[string]interface{} { return result } +func flattenGlueCatalogTargets(CatalogTargets []*glue.CatalogTarget) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + + for _, catalogTarget := range CatalogTargets { + attrs := make(map[string]interface{}) + attrs["tables"] = flattenStringList(catalogTarget.Tables) + attrs["database_name"] = aws.StringValue(catalogTarget.DatabaseName) + + result = append(result, attrs) + } + return result +} + func flattenGlueDynamoDBTargets(dynamodbTargets []*glue.DynamoDBTarget) []map[string]interface{} { result := make([]map[string]interface{}, 0) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_job.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_job.go index 61438f3487f..285be72f5fa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_job.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_job.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlueJob() *schema.Resource { @@ -44,6 +44,12 @@ func resourceAwsGlueJob() *schema.Resource { Type: schema.TypeString, Required: true, }, + "python_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"2", "3"}, true), + }, }, }, }, @@ -60,6 +66,11 @@ func resourceAwsGlueJob() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "glue_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, "execution_property": { Type: schema.TypeList, Optional: true, @@ -149,6 +160,10 @@ func resourceAwsGlueJobCreate(d *schema.ResourceData, meta interface{}) error { input.Description = aws.String(v.(string)) } + if v, ok := d.GetOk("glue_version"); ok { + input.GlueVersion = aws.String(v.(string)) + } + if v, ok := d.GetOk("execution_property"); ok { input.ExecutionProperty = expandGlueExecutionProperty(v.([]interface{})) } @@ -207,6 +222,7 @@ func resourceAwsGlueJobRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting default_arguments: %s", err) } d.Set("description", job.Description) + d.Set("glue_version", job.GlueVersion) if err := d.Set("execution_property", flattenGlueExecutionProperty(job.ExecutionProperty)); err != nil { return fmt.Errorf("error setting execution_property: %s", err) } @@ -261,6 +277,10 @@ func resourceAwsGlueJobUpdate(d *schema.ResourceData, meta interface{}) error { jobUpdate.Description = aws.String(v.(string)) } + if v, ok := d.GetOk("glue_version"); ok { + jobUpdate.GlueVersion = aws.String(v.(string)) + } + if v, ok := d.GetOk("execution_property"); ok { jobUpdate.ExecutionProperty = expandGlueExecutionProperty(v.([]interface{})) } @@ -333,6 +353,10 @@ func expandGlueJobCommand(l []interface{}) *glue.JobCommand { ScriptLocation: aws.String(m["script_location"].(string)), } + if v, ok := m["python_version"].(string); ok && v != "" { + jobCommand.PythonVersion = aws.String(v) + } + return jobCommand } @@ -364,6 +388,7 @@ func flattenGlueJobCommand(jobCommand *glue.JobCommand) []map[string]interface{} m := map[string]interface{}{ "name": aws.StringValue(jobCommand.Name), "script_location": aws.StringValue(jobCommand.ScriptLocation), + "python_version": aws.StringValue(jobCommand.PythonVersion), } return []map[string]interface{}{m} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_security_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_security_configuration.go index aa152ec6cb5..cb706cca063 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_security_configuration.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_security_configuration.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlueSecurityConfiguration() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_trigger.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_trigger.go index 903f9551a03..5bf95d5253e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_trigger.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glue_trigger.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGlueTrigger() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_detector.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_detector.go index e56a14012cf..2d7f87d109c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_detector.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_detector.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/guardduty" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsGuardDutyDetector() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_invite_accepter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_invite_accepter.go index 2930de3552f..896b542598c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_invite_accepter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_invite_accepter.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/guardduty" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsGuardDutyInviteAccepter() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_ipset.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_ipset.go index b6e0ab64c8d..ea07fc37e53 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_ipset.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_ipset.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/guardduty" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGuardDutyIpset() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_member.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_member.go index 49ca3beb9ca..3004f8d6cab 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_member.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_member.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/guardduty" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsGuardDutyMember() *schema.Resource { @@ -235,31 +235,61 @@ func inviteGuardDutyMemberWaiter(accountID, detectorID string, timeout time.Dura } // wait until e-mail verification finishes - return resource.Retry(timeout, func() *resource.RetryError { + var out *guardduty.GetMembersOutput + err := resource.Retry(timeout, func() *resource.RetryError { log.Printf("[DEBUG] Reading GuardDuty Member: %s", input) - gmo, err := conn.GetMembers(&input) + var err error + out, err = conn.GetMembers(&input) if err != nil { return resource.NonRetryableError(fmt.Errorf("error reading GuardDuty Member %q: %s", accountID, err)) } - if gmo == nil || len(gmo.Members) == 0 { - return resource.RetryableError(fmt.Errorf("error reading GuardDuty Member %q: member missing from response", accountID)) + retryable, err := guardDutyMemberInvited(out, accountID) + if err != nil { + if retryable { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) } - member := gmo.Members[0] - status := aws.StringValue(member.RelationshipStatus) + return nil + }) + if isResourceTimeoutError(err) { + out, err = conn.GetMembers(&input) - if status == "Disabled" || status == "Enabled" || status == "Invited" { - return nil + if err != nil { + return fmt.Errorf("Error reading GuardDuty member: %s", err) } - - if status == "Created" || status == "EmailVerificationInProgress" { - return resource.RetryableError(fmt.Errorf("Expected member to be invited but was in state: %s", status)) + _, err = guardDutyMemberInvited(out, accountID) + if err != nil { + return err // Doesn't need fmt because that happens in the function } + return nil + } + if err != nil { + return fmt.Errorf("Error waiting for GuardDuty email verification: %s", err) + } + return nil +} - return resource.NonRetryableError(fmt.Errorf("error inviting GuardDuty Member %q: invalid status: %s", accountID, status)) - }) +func guardDutyMemberInvited(out *guardduty.GetMembersOutput, accountID string) (bool, error) { + if out == nil || len(out.Members) == 0 { + return true, fmt.Errorf("error reading GuardDuty Member %q: member missing from response", accountID) + } + + member := out.Members[0] + status := aws.StringValue(member.RelationshipStatus) + + if status == "Disabled" || status == "Enabled" || status == "Invited" { + return false, nil + } + + if status == "Created" || status == "EmailVerificationInProgress" { + return true, fmt.Errorf("Expected member to be invited but was in state: %s", status) + } + + return false, fmt.Errorf("error inviting GuardDuty Member %q: invalid status: %s", accountID, status) } func decodeGuardDutyMemberID(id string) (accountID, detectorID string, err error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_threatintelset.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_threatintelset.go index 75622cef5b7..ad2d26499eb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_threatintelset.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_guardduty_threatintelset.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/guardduty" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsGuardDutyThreatintelset() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_access_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_access_key.go index b34df28b459..fb1046642e8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_access_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_access_key.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/encryption" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/encryption" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsIamAccessKey() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_alias.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_alias.go index 717f8187e48..52889acb154 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_alias.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_alias.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamAccountAlias() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_password_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_password_policy.go index c3b63cc6a13..53f28a9139e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_password_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_password_policy.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamAccountPasswordPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group.go index 694a9aa0eab..1e92696e4f3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamGroup() *schema.Resource { @@ -112,6 +112,7 @@ func resourceAwsIamGroupUpdate(d *schema.ResourceData, meta interface{}) error { if err != nil { return fmt.Errorf("Error updating IAM Group %s: %s", d.Id(), err) } + d.SetId(nn.(string)) return resourceAwsIamGroupRead(d, meta) } return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_membership.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_membership.go index 64af0bf48f3..485466eda7d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_membership.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_membership.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamGroupMembership() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy.go index 4f00c273c0a..6091c11e90f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamGroupPolicy() *schema.Resource { @@ -22,6 +22,10 @@ func resourceAwsIamGroupPolicy() *schema.Resource { Read: resourceAwsIamGroupPolicyRead, Delete: resourceAwsIamGroupPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ "policy": { Type: schema.TypeString, @@ -78,14 +82,16 @@ func resourceAwsIamGroupPolicyPut(d *schema.ResourceData, meta interface{}) erro func resourceAwsIamGroupPolicyRead(d *schema.ResourceData, meta interface{}) error { iamconn := meta.(*AWSClient).iamconn - group, name := resourceAwsIamGroupPolicyParseId(d.Id()) + group, name, err := resourceAwsIamGroupPolicyParseId(d.Id()) + if err != nil { + return err + } request := &iam.GetGroupPolicyInput{ PolicyName: aws.String(name), GroupName: aws.String(group), } - var err error getResp, err := iamconn.GetGroupPolicy(request) if err != nil { if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { @@ -105,9 +111,17 @@ func resourceAwsIamGroupPolicyRead(d *schema.ResourceData, meta interface{}) err return err } - d.Set("group", group) - d.Set("name", name) - d.Set("policy", policy) + if err := d.Set("policy", policy); err != nil { + return fmt.Errorf("error setting policy: %s", err) + } + + if err := d.Set("name", name); err != nil { + return fmt.Errorf("error setting name: %s", err) + } + + if err := d.Set("group", group); err != nil { + return fmt.Errorf("error setting group: %s", err) + } return nil } @@ -115,7 +129,10 @@ func resourceAwsIamGroupPolicyRead(d *schema.ResourceData, meta interface{}) err func resourceAwsIamGroupPolicyDelete(d *schema.ResourceData, meta interface{}) error { iamconn := meta.(*AWSClient).iamconn - group, name := resourceAwsIamGroupPolicyParseId(d.Id()) + group, name, err := resourceAwsIamGroupPolicyParseId(d.Id()) + if err != nil { + return err + } request := &iam.DeleteGroupPolicyInput{ PolicyName: aws.String(name), @@ -131,8 +148,13 @@ func resourceAwsIamGroupPolicyDelete(d *schema.ResourceData, meta interface{}) e return nil } -func resourceAwsIamGroupPolicyParseId(id string) (groupName, policyName string) { +func resourceAwsIamGroupPolicyParseId(id string) (groupName, policyName string, err error) { parts := strings.SplitN(id, ":", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + err = fmt.Errorf("group_policy id must be of the form :") + return + } + groupName = parts[0] policyName = parts[1] return diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy_attachment.go index 84ab11d9669..97cb66d28a6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy_attachment.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamGroupPolicyAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_instance_profile.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_instance_profile.go index a9f958e4239..eb56a3b9804 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_instance_profile.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_instance_profile.go @@ -2,15 +2,15 @@ package aws import ( "fmt" + "log" "regexp" "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamInstanceProfile() *schema.Resource { @@ -174,6 +174,9 @@ func instanceProfileAddRole(iamconn *iam.IAM, profileName, roleName string) erro } return nil }) + if isResourceTimeoutError(err) { + _, err = iamconn.AddRoleToInstanceProfile(request) + } if err != nil { return fmt.Errorf("Error adding IAM Role %s to Instance Profile %s: %s", roleName, profileName, err) } @@ -188,7 +191,7 @@ func instanceProfileRemoveRole(iamconn *iam.IAM, profileName, roleName string) e } _, err := iamconn.RemoveRoleFromInstanceProfile(request) - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + if isAWSErr(err, "NoSuchEntity", "") { return nil } return err @@ -289,11 +292,12 @@ func resourceAwsIamInstanceProfileRead(d *schema.ResourceData, meta interface{}) } result, err := iamconn.GetInstanceProfile(request) + if isAWSErr(err, "NoSuchEntity", "") { + log.Printf("[WARN] IAM Instance Profile %s is already gone", d.Id()) + d.SetId("") + return nil + } if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - d.SetId("") - return nil - } return fmt.Errorf("Error reading IAM instance profile %s: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_openid_connect_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_openid_connect_provider.go index bf7b8232710..2f6f6523306 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_openid_connect_provider.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_openid_connect_provider.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamOpenIDConnectProvider() *schema.Resource { @@ -28,7 +28,6 @@ func resourceAwsIamOpenIDConnectProvider() *schema.Resource { }, "url": { Type: schema.TypeString, - Computed: false, Required: true, ForceNew: true, ValidateFunc: validateOpenIdURL, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy.go index c6b153c8073..79185240f4a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamPolicy() *schema.Resource { @@ -141,7 +141,9 @@ func resourceAwsIamPolicyRead(d *schema.ResourceData, meta interface{}) error { return nil }) - + if isResourceTimeoutError(err) { + getPolicyResponse, err = iamconn.GetPolicy(getPolicyRequest) + } if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { log.Printf("[WARN] IAM Policy (%s) not found, removing from state", d.Id()) d.SetId("") @@ -187,7 +189,9 @@ func resourceAwsIamPolicyRead(d *schema.ResourceData, meta interface{}) error { return nil }) - + if isResourceTimeoutError(err) { + getPolicyVersionResponse, err = iamconn.GetPolicyVersion(getPolicyVersionRequest) + } if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { log.Printf("[WARN] IAM Policy (%s) not found, removing from state", d.Id()) d.SetId("") diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy_attachment.go index 5cf0b47b81b..cb3145028fe 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy_attachment.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsIamPolicyAttachment() *schema.Resource { @@ -248,6 +248,7 @@ func attachPolicyToRoles(conn *iam.IAM, roles []*string, arn string) error { if attachmentErr != nil { return attachmentErr } + } return nil } @@ -336,6 +337,9 @@ func detachPolicyFromUsers(conn *iam.IAM, users []*string, arn string) error { UserName: u, PolicyArn: aws.String(arn), }) + if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + continue + } if err != nil { return err } @@ -348,6 +352,9 @@ func detachPolicyFromRoles(conn *iam.IAM, roles []*string, arn string) error { RoleName: r, PolicyArn: aws.String(arn), }) + if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + continue + } if err != nil { return err } @@ -360,6 +367,9 @@ func detachPolicyFromGroups(conn *iam.IAM, groups []*string, arn string) error { GroupName: g, PolicyArn: aws.String(arn), }) + if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + continue + } if err != nil { return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role.go index 2b68cf41b34..3445c27443a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsIamRole() *schema.Resource { @@ -177,6 +177,9 @@ func resourceAwsIamRoleCreate(d *schema.ResourceData, meta interface{}) error { } return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + createResp, err = iamconn.CreateRole(request) + } if err != nil { return fmt.Errorf("Error creating IAM Role %s: %s", name, err) } @@ -362,7 +365,7 @@ func resourceAwsIamRoleDelete(d *schema.ResourceData, meta interface{}) error { } // IAM is eventually consistent and deletion of attached policies may take time - return resource.Retry(30*time.Second, func() *resource.RetryError { + err := resource.Retry(30*time.Second, func() *resource.RetryError { _, err := iamconn.DeleteRole(deleteRoleInput) if err != nil { if isAWSErr(err, iam.ErrCodeDeleteConflictException, "") { @@ -373,6 +376,13 @@ func resourceAwsIamRoleDelete(d *schema.ResourceData, meta interface{}) error { } return nil }) + if isResourceTimeoutError(err) { + _, err = iamconn.DeleteRole(deleteRoleInput) + } + if err != nil { + return fmt.Errorf("Error deleting IAM role: %s", err) + } + return nil } func deleteAwsIamRoleInstanceProfiles(conn *iam.IAM, rolename string) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy.go index c317c4e48d6..78aad42b291 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamRolePolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy_attachment.go index d35db1c862e..2d5fca92ee9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy_attachment.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamRolePolicyAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_saml_provider.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_saml_provider.go index 55496b41589..2a869dcea6c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_saml_provider.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_saml_provider.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamSamlProvider() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_server_certificate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_server_certificate.go index 644789ce8ff..658bc181f9b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_server_certificate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_server_certificate.go @@ -13,9 +13,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsIAMServerCertificate() *schema.Resource { @@ -183,6 +183,12 @@ func resourceAwsIAMServerCertificateDelete(d *schema.ResourceData, meta interfac return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteServerCertificate(&iam.DeleteServerCertificateInput{ + ServerCertificateName: aws.String(d.Get("name").(string)), + }) + } + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_service_linked_role.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_service_linked_role.go index 9744dab0c98..726680101fd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_service_linked_role.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_service_linked_role.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamServiceLinkedRole() *schema.Resource { @@ -67,6 +67,12 @@ func resourceAwsIamServiceLinkedRole() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if strings.Contains(d.Get("aws_service_name").(string), ".application-autoscaling.") && new == "" { + return true + } + return false + }, }, "description": { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user.go index 4f23ef8f12a..f9f93d7a53a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsIamUser() *schema.Resource { @@ -358,10 +358,11 @@ func deleteAwsIamUserMFADevices(svc *iam.IAM, username string) error { func deleteAwsIamUserLoginProfile(svc *iam.IAM, username string) error { var err error + input := &iam.DeleteLoginProfileInput{ + UserName: aws.String(username), + } err = resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err = svc.DeleteLoginProfile(&iam.DeleteLoginProfileInput{ - UserName: aws.String(username), - }) + _, err = svc.DeleteLoginProfile(input) if err != nil { if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { return nil @@ -374,7 +375,9 @@ func deleteAwsIamUserLoginProfile(svc *iam.IAM, username string) error { } return nil }) - + if isResourceTimeoutError(err) { + _, err = svc.DeleteLoginProfile(input) + } if err != nil { return fmt.Errorf("Error deleting Account Login Profile: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_group_membership.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_group_membership.go index 6012ccef017..503cd3b14d5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_group_membership.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_group_membership.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamUserGroupMembership() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_login_profile.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_login_profile.go index 6900fc3aa5c..b56e4ee9d20 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_login_profile.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_login_profile.go @@ -11,10 +11,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/encryption" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/encryption" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsIamUserLoginProfile() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy.go index 5bcccc25937..9b7ec1b345a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamUserPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy_attachment.go index 7e8ed3ad77a..7595d899831 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy_attachment.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIamUserPolicyAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_ssh_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_ssh_key.go index 241c3040f8e..0220a74dba2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_ssh_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_ssh_key.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsIamUserSshKey() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_target.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_target.go index dc34ef6604b..47bb7db954a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_target.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_target.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/inspector" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAWSInspectorAssessmentTarget() *schema.Resource { @@ -104,11 +104,11 @@ func resourceAwsInspectorAssessmentTargetUpdate(d *schema.ResourceData, meta int func resourceAwsInspectorAssessmentTargetDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).inspectorconn - - return resource.Retry(60*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteAssessmentTarget(&inspector.DeleteAssessmentTargetInput{ - AssessmentTargetArn: aws.String(d.Id()), - }) + input := &inspector.DeleteAssessmentTargetInput{ + AssessmentTargetArn: aws.String(d.Id()), + } + err := resource.Retry(60*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteAssessmentTarget(input) if isAWSErr(err, inspector.ErrCodeAssessmentRunInProgressException, "") { return resource.RetryableError(err) @@ -120,7 +120,13 @@ func resourceAwsInspectorAssessmentTargetDelete(d *schema.ResourceData, meta int return nil }) - + if isResourceTimeoutError(err) { + _, err = conn.DeleteAssessmentTarget(input) + } + if err != nil { + return fmt.Errorf("Error deleting Inspector Assessment Target: %s", err) + } + return nil } func describeInspectorAssessmentTarget(conn *inspector.Inspector, arn string) (*inspector.AssessmentTarget, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_template.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_template.go index 3a4b0d9182c..a00f4da565b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_template.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_template.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/inspector" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAWSInspectorAssessmentTemplate() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_resource_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_resource_group.go index af0cf8180b4..f568bd504a3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_resource_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_resource_group.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/inspector" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAWSInspectorResourceGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance.go index 8547976e6da..69645255d70 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance.go @@ -15,10 +15,10 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsInstance() *schema.Resource { @@ -336,6 +336,13 @@ func resourceAwsInstance() *schema.Resource { ForceNew: true, }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "iops": { Type: schema.TypeInt, Optional: true, @@ -432,6 +439,20 @@ func resourceAwsInstance() *schema.Resource { ForceNew: true, }, + "encrypted": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "iops": { Type: schema.TypeInt, Optional: true, @@ -597,6 +618,9 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { } return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + runResp, err = conn.RunInstances(runOpts) + } // Warn if the AWS Error involves group ids, to help identify situation // where a user uses group ids in security_groups for the Default VPC. // See https://github.com/hashicorp/terraform/issues/3798 @@ -931,13 +955,14 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { if _, ok := d.GetOk("iam_instance_profile"); ok { // Does not have an Iam Instance Profile associated with it, need to associate if len(resp.IamInstanceProfileAssociations) == 0 { + input := &ec2.AssociateIamInstanceProfileInput{ + InstanceId: aws.String(d.Id()), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Name: aws.String(d.Get("iam_instance_profile").(string)), + }, + } err := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := conn.AssociateIamInstanceProfile(&ec2.AssociateIamInstanceProfileInput{ - InstanceId: aws.String(d.Id()), - IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ - Name: aws.String(d.Get("iam_instance_profile").(string)), - }, - }) + _, err := conn.AssociateIamInstanceProfile(input) if err != nil { if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { return resource.RetryableError(err) @@ -946,21 +971,24 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.AssociateIamInstanceProfile(input) + } if err != nil { - return err + return fmt.Errorf("Error associating instance with instance profile: %s", err) } } else { // Has an Iam Instance Profile associated with it, need to replace the association associationId := resp.IamInstanceProfileAssociations[0].AssociationId - + input := &ec2.ReplaceIamInstanceProfileAssociationInput{ + AssociationId: associationId, + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Name: aws.String(d.Get("iam_instance_profile").(string)), + }, + } err := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := conn.ReplaceIamInstanceProfileAssociation(&ec2.ReplaceIamInstanceProfileAssociationInput{ - AssociationId: associationId, - IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ - Name: aws.String(d.Get("iam_instance_profile").(string)), - }, - }) + _, err := conn.ReplaceIamInstanceProfileAssociation(input) if err != nil { if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { return resource.RetryableError(err) @@ -969,8 +997,11 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.ReplaceIamInstanceProfileAssociation(input) + } if err != nil { - return err + return fmt.Errorf("Error replacing instance profile association: %s", err) } } // An Iam Instance Profile has _not_ been provided but is pending a change. This means there is a pending removal @@ -1173,7 +1204,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("credit_specification") && !d.IsNewResource() { - if v, ok := d.GetOk("credit_specification"); ok { + if v, ok := d.GetOk("credit_specification"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { creditSpecification := v.([]interface{})[0].(map[string]interface{}) log.Printf("[DEBUG] Modifying credit specification for Instance (%s)", d.Id()) _, err := conn.ModifyInstanceCreditSpecification(&ec2.ModifyInstanceCreditSpecificationInput{ @@ -1202,7 +1233,12 @@ func resourceAwsInstanceDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn err := awsTerminateInstance(conn, d.Id(), d.Timeout(schema.TimeoutDelete)) - return err + + if err != nil { + return fmt.Errorf("error terminating EC2 Instance (%s): %s", d.Id(), err) + } + + return nil } // InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch @@ -1328,6 +1364,12 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[st if vol.Iops != nil { bd["iops"] = *vol.Iops } + if vol.Encrypted != nil { + bd["encrypted"] = *vol.Encrypted + } + if vol.KmsKeyId != nil { + bd["kms_key_id"] = *vol.KmsKeyId + } if blockDeviceIsRoot(instanceBd, instance) { blockDevices["root"] = bd @@ -1335,9 +1377,6 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[st if instanceBd.DeviceName != nil { bd["device_name"] = *instanceBd.DeviceName } - if vol.Encrypted != nil { - bd["encrypted"] = *vol.Encrypted - } if vol.SnapshotId != nil { bd["snapshot_id"] = *vol.SnapshotId } @@ -1370,7 +1409,7 @@ func fetchRootDeviceName(ami string, conn *ec2.EC2) (*string, error) { // For a bad image, we just return nil so we don't block a refresh if len(res.Images) == 0 { - return nil, nil + return nil, fmt.Errorf("No images found for AMI %s", ami) } image := res.Images[0] @@ -1378,7 +1417,7 @@ func fetchRootDeviceName(ami string, conn *ec2.EC2) (*string, error) { // Instance store backed AMIs do not provide a root device name. if *image.RootDeviceType == ec2.DeviceTypeInstanceStore { - return nil, nil + return nil, fmt.Errorf("Instance store backed AMIs do not provide a root device name - Use an EBS AMI") } // Some AMIs have a RootDeviceName like "/dev/sda1" that does not appear as a @@ -1498,6 +1537,10 @@ func readBlockDeviceMappingsFromConfig( ebs.Encrypted = aws.Bool(v) } + if v, ok := bd["kms_key_id"].(string); ok && v != "" { + ebs.KmsKeyId = aws.String(v) + } + if v, ok := bd["volume_size"].(int); ok && v != 0 { ebs.VolumeSize = aws.Int64(int64(v)) } @@ -1555,6 +1598,14 @@ func readBlockDeviceMappingsFromConfig( DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), } + if v, ok := bd["encrypted"].(bool); ok && v { + ebs.Encrypted = aws.Bool(v) + } + + if v, ok := bd["kms_key_id"].(string); ok && v != "" { + ebs.KmsKeyId = aws.String(bd["kms_key_id"].(string)) + } + if v, ok := bd["volume_size"].(int); ok && v != 0 { ebs.VolumeSize = aws.Int64(int64(v)) } @@ -1575,20 +1626,15 @@ func readBlockDeviceMappingsFromConfig( log.Print("[WARN] IOPs is only valid for storate type io1 for EBS Volumes") } - if dn, err := fetchRootDeviceName(d.Get("ami").(string), conn); err == nil { - if dn == nil { - return nil, fmt.Errorf( - "Expected 1 AMI for ID: %s, got none", - d.Get("ami").(string)) - } - - blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ - DeviceName: dn, - Ebs: ebs, - }) - } else { - return nil, err + dn, err := fetchRootDeviceName(d.Get("ami").(string), conn) + if err != nil { + return nil, fmt.Errorf("Expected 1 AMI for ID: %s (%s)", d.Get("ami").(string), err) } + + blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ + DeviceName: dn, + Ebs: ebs, + }) } } @@ -1691,11 +1737,13 @@ func getAwsEc2InstancePasswordData(instanceID string, conn *ec2.EC2) (string, er log.Printf("[INFO] Reading password data for instance %s", instanceID) var passwordData string - + var resp *ec2.GetPasswordDataOutput + input := &ec2.GetPasswordDataInput{ + InstanceId: aws.String(instanceID), + } err := resource.Retry(15*time.Minute, func() *resource.RetryError { - resp, err := conn.GetPasswordData(&ec2.GetPasswordDataInput{ - InstanceId: aws.String(instanceID), - }) + var err error + resp, err = conn.GetPasswordData(input) if err != nil { return resource.NonRetryableError(err) @@ -1710,7 +1758,16 @@ func getAwsEc2InstancePasswordData(instanceID string, conn *ec2.EC2) (string, er log.Printf("[INFO] Password data read for instance %s", instanceID) return nil }) - + if isResourceTimeoutError(err) { + resp, err = conn.GetPasswordData(input) + if err != nil { + return "", fmt.Errorf("Error getting password data: %s", err) + } + if resp.PasswordData == nil || *resp.PasswordData == "" { + return "", fmt.Errorf("Password data is blank for instance ID: %s", instanceID) + } + passwordData = strings.TrimSpace(*resp.PasswordData) + } if err != nil { return "", err } @@ -1764,9 +1821,12 @@ func buildAwsInstanceOpts( if v, ok := d.GetOk("credit_specification"); ok { // Only T2 and T3 are burstable performance instance types and supports Unlimited if strings.HasPrefix(instanceType, "t2") || strings.HasPrefix(instanceType, "t3") { - cs := v.([]interface{})[0].(map[string]interface{}) - opts.CreditSpecification = &ec2.CreditSpecificationRequest{ - CpuCredits: aws.String(cs["cpu_credits"].(string)), + if cs, ok := v.([]interface{})[0].(map[string]interface{}); ok { + opts.CreditSpecification = &ec2.CreditSpecificationRequest{ + CpuCredits: aws.String(cs["cpu_credits"].(string)), + } + } else { + log.Print("[WARN] credit_specification is defined but the value of cpu_credits is missing, default value will be used.") } } else { log.Print("[WARN] credit_specification is defined but instance type is not T2/T3. Ignoring...") @@ -1912,7 +1972,7 @@ func awsTerminateInstance(conn *ec2.EC2, id string, timeout time.Duration) error if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { return nil } - return fmt.Errorf("Error terminating instance: %s", err) + return err } return waitForInstanceDeletion(conn, id, timeout) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance_migrate.go index 31f28b39f85..17065d1e79c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance_migrate.go @@ -6,8 +6,8 @@ import ( "strconv" "strings" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsInstanceMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_internet_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_internet_gateway.go index befc93f2921..211f570b467 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_internet_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_internet_gateway.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsInternetGateway() *schema.Resource { @@ -51,9 +51,9 @@ func resourceAwsInternetGatewayCreate(d *schema.ResourceData, meta interface{}) ig := *resp.InternetGateway d.SetId(*ig.InternetGatewayId) log.Printf("[INFO] InternetGateway ID: %s", d.Id()) - + var igRaw interface{} err = resource.Retry(5*time.Minute, func() *resource.RetryError { - igRaw, _, err := IGStateRefreshFunc(conn, d.Id())() + igRaw, _, err = IGStateRefreshFunc(conn, d.Id())() if igRaw != nil { return nil } @@ -63,9 +63,14 @@ func resourceAwsInternetGatewayCreate(d *schema.ResourceData, meta interface{}) return resource.NonRetryableError(err) } }) - + if isResourceTimeoutError(err) { + igRaw, _, err = IGStateRefreshFunc(conn, d.Id())() + if igRaw == nil { + return fmt.Errorf("error finding Internet Gateway (%s) after creation; retry running Terraform", d.Id()) + } + } if err != nil { - return fmt.Errorf("%s", err) + return fmt.Errorf("Error refreshing internet gateway state: %s", err) } err = setTags(conn, d) @@ -142,29 +147,32 @@ func resourceAwsInternetGatewayDelete(d *schema.ResourceData, meta interface{}) } log.Printf("[INFO] Deleting Internet Gateway: %s", d.Id()) - - return resource.Retry(10*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteInternetGateway(&ec2.DeleteInternetGatewayInput{ - InternetGatewayId: aws.String(d.Id()), - }) + input := &ec2.DeleteInternetGatewayInput{ + InternetGatewayId: aws.String(d.Id()), + } + err := resource.Retry(10*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteInternetGateway(input) if err == nil { return nil } - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) + if isAWSErr(err, "InvalidInternetGatewayID.NotFound", "") { + return nil } - switch ec2err.Code() { - case "InvalidInternetGatewayID.NotFound": - return nil - case "DependencyViolation": - return resource.RetryableError(err) // retry + if isAWSErr(err, "DependencyViolation", "") { + return resource.RetryableError(err) } return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteInternetGateway(input) + } + if err != nil { + return fmt.Errorf("Error deleting internet gateway: %s", err) + } + return nil } func resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{}) error { @@ -181,25 +189,26 @@ func resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{}) "[INFO] Attaching Internet Gateway '%s' to VPC '%s'", d.Id(), d.Get("vpc_id").(string)) - + input := &ec2.AttachInternetGatewayInput{ + InternetGatewayId: aws.String(d.Id()), + VpcId: aws.String(d.Get("vpc_id").(string)), + } err := resource.Retry(2*time.Minute, func() *resource.RetryError { - _, err := conn.AttachInternetGateway(&ec2.AttachInternetGatewayInput{ - InternetGatewayId: aws.String(d.Id()), - VpcId: aws.String(d.Get("vpc_id").(string)), - }) + _, err := conn.AttachInternetGateway(input) if err == nil { return nil } - if ec2err, ok := err.(awserr.Error); ok { - switch ec2err.Code() { - case "InvalidInternetGatewayID.NotFound": - return resource.RetryableError(err) // retry - } + if isAWSErr(err, "InvalidInternetGatewayID.NotFound", "") { + return resource.RetryableError(err) } + return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.AttachInternetGateway(input) + } if err != nil { - return err + return fmt.Errorf("Error attaching internet gateway: %s", err) } // A note on the states below: the AWS docs (as of July, 2014) say diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_certificate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_certificate.go index 48949fd054f..5ea3194d60a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_certificate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_certificate.go @@ -1,11 +1,12 @@ package aws import ( + "fmt" "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIotCertificate() *schema.Resource { @@ -17,7 +18,7 @@ func resourceAwsIotCertificate() *schema.Resource { Schema: map[string]*schema.Schema{ "csr": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, }, "active": { @@ -28,6 +29,21 @@ func resourceAwsIotCertificate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "certificate_pem": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "public_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "private_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, }, } } @@ -35,19 +51,32 @@ func resourceAwsIotCertificate() *schema.Resource { func resourceAwsIotCertificateCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).iotconn - log.Printf("[DEBUG] Creating certificate from csr") - out, err := conn.CreateCertificateFromCsr(&iot.CreateCertificateFromCsrInput{ - CertificateSigningRequest: aws.String(d.Get("csr").(string)), - SetAsActive: aws.Bool(d.Get("active").(bool)), - }) + if _, ok := d.GetOk("csr"); ok { + log.Printf("[DEBUG] Creating certificate from CSR") + out, err := conn.CreateCertificateFromCsr(&iot.CreateCertificateFromCsrInput{ + CertificateSigningRequest: aws.String(d.Get("csr").(string)), + SetAsActive: aws.Bool(d.Get("active").(bool)), + }) + if err != nil { + return fmt.Errorf("error creating certificate from CSR: %v", err) + } + log.Printf("[DEBUG] Created certificate from CSR") - if err != nil { - log.Printf("[ERROR] %s", err) - return err - } - log.Printf("[DEBUG] Created certificate from csr") + d.SetId(*out.CertificateId) + } else { + log.Printf("[DEBUG] Creating keys and certificate") + out, err := conn.CreateKeysAndCertificate(&iot.CreateKeysAndCertificateInput{ + SetAsActive: aws.Bool(d.Get("active").(bool)), + }) + if err != nil { + return fmt.Errorf("error creating keys and certificate: %v", err) + } + log.Printf("[DEBUG] Created keys and certificate") - d.SetId(*out.CertificateId) + d.SetId(*out.CertificateId) + d.Set("public_key", *out.KeyPair.PublicKey) + d.Set("private_key", *out.KeyPair.PrivateKey) + } return resourceAwsIotCertificateRead(d, meta) } @@ -58,14 +87,13 @@ func resourceAwsIotCertificateRead(d *schema.ResourceData, meta interface{}) err out, err := conn.DescribeCertificate(&iot.DescribeCertificateInput{ CertificateId: aws.String(d.Id()), }) - if err != nil { - log.Printf("[ERROR] %s", err) - return err + return fmt.Errorf("error reading certificate details: %v", err) } d.Set("active", aws.Bool(*out.CertificateDescription.Status == iot.CertificateStatusActive)) d.Set("arn", out.CertificateDescription.CertificateArn) + d.Set("certificate_pem", out.CertificateDescription.CertificatePem) return nil } @@ -83,10 +111,8 @@ func resourceAwsIotCertificateUpdate(d *schema.ResourceData, meta interface{}) e CertificateId: aws.String(d.Id()), NewStatus: aws.String(status), }) - if err != nil { - log.Printf("[ERROR] %s", err) - return err + return fmt.Errorf("error updating certificate: %v", err) } } @@ -100,19 +126,15 @@ func resourceAwsIotCertificateDelete(d *schema.ResourceData, meta interface{}) e CertificateId: aws.String(d.Id()), NewStatus: aws.String("INACTIVE"), }) - if err != nil { - log.Printf("[ERROR], %s", err) - return err + return fmt.Errorf("error inactivating certificate: %v", err) } _, err = conn.DeleteCertificate(&iot.DeleteCertificateInput{ CertificateId: aws.String(d.Id()), }) - if err != nil { - log.Printf("[ERROR] %s", err) - return err + return fmt.Errorf("error deleting certificate: %v", err) } return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_policy.go index 724540e7404..19ad04456af 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_policy.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIotPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_policy_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_policy_attachment.go index eba80e6fecc..9e2ce528b90 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_policy_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_policy_attachment.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIotPolicyAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_role_alias.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_role_alias.go index e884469d399..0d86b96dfcb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_role_alias.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_role_alias.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsIotRoleAlias() *schema.Resource { @@ -20,6 +20,10 @@ func resourceAwsIotRoleAlias() *schema.Resource { State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "alias": { Type: schema.TypeString, Required: true, @@ -94,6 +98,7 @@ func resourceAwsIotRoleAliasRead(d *schema.ResourceData, meta interface{}) error return nil } + d.Set("arn", roleAliasDescription.RoleAliasArn) d.Set("alias", roleAliasDescription.RoleAlias) d.Set("role_arn", roleAliasDescription.RoleArn) d.Set("credential_duration", roleAliasDescription.CredentialDurationSeconds) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing.go index 3fef149f88e..19c4b44d1c9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing.go @@ -5,8 +5,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsIotThing() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing_principal_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing_principal_attachment.go index 4cc9e939524..6452cbaa373 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing_principal_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing_principal_attachment.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIotThingPrincipalAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing_type.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing_type.go index ac483df1d35..cf8bb72b861 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing_type.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_thing_type.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // https://docs.aws.amazon.com/iot/latest/apireference/API_CreateThingType.html @@ -185,7 +185,7 @@ func resourceAwsIotThingTypeDelete(d *schema.ResourceData, meta interface{}) err } log.Printf("[DEBUG] Deleting IoT Thing Type: %s", deleteParams) - return resource.Retry(6*time.Minute, func() *resource.RetryError { + err = resource.Retry(6*time.Minute, func() *resource.RetryError { _, err := conn.DeleteThingType(deleteParams) if err != nil { @@ -204,4 +204,14 @@ func resourceAwsIotThingTypeDelete(d *schema.ResourceData, meta interface{}) err return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteThingType(deleteParams) + if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") { + return nil + } + } + if err != nil { + return fmt.Errorf("Error deleting IOT thing type: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_topic_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_topic_rule.go index 42cd8900c6b..f1bdbcdf4b4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_topic_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iot_topic_rule.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsIotTopicRule() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair.go index 2fc2476397d..6541b5d0f4a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair.go @@ -4,9 +4,9 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair_migrate.go index c937ac360f0..1f3560969c9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair_migrate.go @@ -5,7 +5,7 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsKeyPairMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_analytics_application.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_analytics_application.go index 79eac8f8829..aff14a988fe 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_analytics_application.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_analytics_application.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kinesisanalytics" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsKinesisAnalyticsApplication() *schema.Resource { @@ -552,6 +552,7 @@ func resourceAwsKinesisAnalyticsApplication() *schema.Resource { }, }, }, + "tags": tagsSchema(), }, } } @@ -588,6 +589,10 @@ func resourceAwsKinesisAnalyticsApplicationCreate(d *schema.ResourceData, meta i createOpts.Outputs = outputs } + if v, ok := d.GetOk("tags"); ok { + createOpts.Tags = tagsFromMapKinesisAnalytics(v.(map[string]interface{})) + } + // Retry for IAM eventual consistency err := resource.Retry(1*time.Minute, func() *resource.RetryError { output, err := conn.CreateApplication(createOpts) @@ -609,6 +614,13 @@ func resourceAwsKinesisAnalyticsApplicationCreate(d *schema.ResourceData, meta i d.SetId(aws.StringValue(output.ApplicationSummary.ApplicationARN)) return nil }) + + if isResourceTimeoutError(err) { + var output *kinesisanalytics.CreateApplicationOutput + output, err = conn.CreateApplication(createOpts) + d.SetId(aws.StringValue(output.ApplicationSummary.ApplicationARN)) + } + if err != nil { return fmt.Errorf("Unable to create Kinesis Analytics application: %s", err) } @@ -658,6 +670,10 @@ func resourceAwsKinesisAnalyticsApplicationRead(d *schema.ResourceData, meta int return fmt.Errorf("error setting reference_data_sources: %s", err) } + if err := getTagsKinesisAnalytics(conn, d); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } @@ -713,6 +729,10 @@ func resourceAwsKinesisAnalyticsApplicationUpdate(d *schema.ResourceData, meta i } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.AddApplicationCloudWatchLoggingOption(addOpts) + } + if err != nil { return fmt.Errorf("Unable to add CloudWatch logging options: %s", err) } @@ -745,6 +765,10 @@ func resourceAwsKinesisAnalyticsApplicationUpdate(d *schema.ResourceData, meta i } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.AddApplicationInput(addOpts) + } + if err != nil { return fmt.Errorf("Unable to add application inputs: %s", err) } @@ -777,12 +801,20 @@ func resourceAwsKinesisAnalyticsApplicationUpdate(d *schema.ResourceData, meta i } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.AddApplicationOutput(addOpts) + } if err != nil { return fmt.Errorf("Unable to add application outputs: %s", err) } version = version + 1 } } + + if err := setTagsKinesisAnalytics(conn, d); err != nil { + return fmt.Errorf("Error update resource tags for %s: %s", d.Id(), err) + } + } oldReferenceData, newReferenceData := d.GetChange("reference_data_sources") @@ -807,6 +839,9 @@ func resourceAwsKinesisAnalyticsApplicationUpdate(d *schema.ResourceData, meta i } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.AddApplicationReferenceDataSource(addOpts) + } if err != nil { return fmt.Errorf("Unable to add application reference data source: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream.go index fd72e32e1a2..85d07a02a8b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream.go @@ -10,10 +10,14 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/firehose" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +const ( + firehoseDeliveryStreamStatusDeleted = "DESTROYED" ) func cloudWatchLoggingOptionsSchema() *schema.Schema { @@ -98,9 +102,10 @@ func s3ConfigurationSchema() *schema.Schema { func processingConfigurationSchema() *schema.Schema { return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -328,11 +333,25 @@ func flattenFirehoseDataFormatConversionConfiguration(dfcc *firehose.DataFormatC return []map[string]interface{}{} } + enabled := aws.BoolValue(dfcc.Enabled) + ifc := flattenFirehoseInputFormatConfiguration(dfcc.InputFormatConfiguration) + ofc := flattenFirehoseOutputFormatConfiguration(dfcc.OutputFormatConfiguration) + sc := flattenFirehoseSchemaConfiguration(dfcc.SchemaConfiguration) + + // The AWS SDK can represent "no data format conversion configuration" in two ways: + // 1. With a nil value + // 2. With enabled set to false and nil for ALL the config sections. + // We normalize this with an empty configuration in the state due + // to the existing Default: true on the enabled attribute. + if !enabled && len(ifc) == 0 && len(ofc) == 0 && len(sc) == 0 { + return []map[string]interface{}{} + } + m := map[string]interface{}{ - "enabled": aws.BoolValue(dfcc.Enabled), - "input_format_configuration": flattenFirehoseInputFormatConfiguration(dfcc.InputFormatConfiguration), - "output_format_configuration": flattenFirehoseOutputFormatConfiguration(dfcc.OutputFormatConfiguration), - "schema_configuration": flattenFirehoseSchemaConfiguration(dfcc.SchemaConfiguration), + "enabled": enabled, + "input_format_configuration": ifc, + "output_format_configuration": ofc, + "schema_configuration": sc, } return []map[string]interface{}{m} @@ -579,8 +598,19 @@ func flattenProcessingConfiguration(pc *firehose.ProcessingConfiguration, roleAr func flattenKinesisFirehoseDeliveryStream(d *schema.ResourceData, s *firehose.DeliveryStreamDescription) error { d.Set("version_id", s.VersionId) - d.Set("arn", *s.DeliveryStreamARN) + d.Set("arn", s.DeliveryStreamARN) d.Set("name", s.DeliveryStreamName) + + sseOptions := map[string]interface{}{ + "enabled": false, + } + if s.DeliveryStreamEncryptionConfiguration != nil && aws.StringValue(s.DeliveryStreamEncryptionConfiguration.Status) == firehose.DeliveryStreamEncryptionStatusEnabled { + sseOptions["enabled"] = true + } + if err := d.Set("server_side_encryption", []map[string]interface{}{sseOptions}); err != nil { + return fmt.Errorf("error setting server_side_encryption: %s", err) + } + if len(s.Destinations) > 0 { destination := s.Destinations[0] if destination.RedshiftDestinationDescription != nil { @@ -666,11 +696,29 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { "tags": tagsSchema(), + "server_side_encryption": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + ConflictsWith: []string{"kinesis_source_configuration"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "kinesis_source_configuration": { - Type: schema.TypeList, - ForceNew: true, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + ForceNew: true, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"server_side_encryption"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "kinesis_stream_arn": { @@ -1501,7 +1549,11 @@ func updateExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat func expandFirehoseDataFormatConversionConfiguration(l []interface{}) *firehose.DataFormatConversionConfiguration { if len(l) == 0 || l[0] == nil { - return nil + // It is possible to just pass nil here, but this seems to be the + // canonical form that AWS uses, and is less likely to produce diffs. + return &firehose.DataFormatConversionConfiguration{ + Enabled: aws.Bool(false), + } } m := l[0].(map[string]interface{}) @@ -1676,7 +1728,12 @@ func expandFirehoseSchemaConfiguration(l []interface{}) *firehose.SchemaConfigur func extractProcessingConfiguration(s3 map[string]interface{}) *firehose.ProcessingConfiguration { config := s3["processing_configuration"].([]interface{}) if len(config) == 0 { - return nil + // It is possible to just pass nil here, but this seems to be the + // canonical form that AWS uses, and is less likely to produce diffs. + return &firehose.ProcessingConfiguration{ + Enabled: aws.Bool(false), + Processors: []*firehose.Processor{}, + } } processingConfiguration := config[0].(map[string]interface{}) @@ -2078,6 +2135,10 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta } } + if v, ok := d.GetOk("tags"); ok { + createInput.Tags = tagsFromMapKinesisFirehose(v.(map[string]interface{})) + } + err := resource.Retry(1*time.Minute, func() *resource.RetryError { _, err := conn.CreateDeliveryStream(createInput) if err != nil { @@ -2103,34 +2164,32 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta return nil }) + if isResourceTimeoutError(err) { + _, err = conn.CreateDeliveryStream(createInput) + } if err != nil { return fmt.Errorf("error creating Kinesis Firehose Delivery Stream: %s", err) } - stateConf := &resource.StateChangeConf{ - Pending: []string{"CREATING"}, - Target: []string{"ACTIVE"}, - Refresh: firehoseStreamStateRefreshFunc(conn, sn), - Timeout: 20 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - firehoseStream, err := stateConf.WaitForState() + s, err := waitForKinesisFirehoseDeliveryStreamCreation(conn, sn) if err != nil { - return fmt.Errorf( - "Error waiting for Kinesis Stream (%s) to become active: %s", - sn, err) + return fmt.Errorf("error waiting for Kinesis Firehose Delivery Stream (%s) creation: %s", sn, err) } - s := firehoseStream.(*firehose.DeliveryStreamDescription) - d.SetId(*s.DeliveryStreamARN) + d.SetId(aws.StringValue(s.DeliveryStreamARN)) d.Set("arn", s.DeliveryStreamARN) - if err := setTagsKinesisFirehose(conn, d, sn); err != nil { - return fmt.Errorf( - "Error setting for Kinesis Stream (%s) tags: %s", - sn, err) + if v, ok := d.GetOk("server_side_encryption"); ok && !isKinesisFirehoseDeliveryStreamOptionDisabled(v) { + _, err := conn.StartDeliveryStreamEncryption(&firehose.StartDeliveryStreamEncryptionInput{ + DeliveryStreamName: aws.String(sn), + }) + if err != nil { + return fmt.Errorf("error starting Kinesis Firehose Delivery Stream (%s) encryption: %s", sn, err) + } + + if err := waitForKinesisFirehoseDeliveryStreamSSEEnabled(conn, sn); err != nil { + return fmt.Errorf("error waiting for Kinesis Firehose Delivery Stream (%s) encryption to be enabled: %s", sn, err) + } } return resourceAwsKinesisFirehoseDeliveryStreamRead(d, meta) @@ -2215,7 +2274,7 @@ func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta err := resource.Retry(1*time.Minute, func() *resource.RetryError { _, err := conn.UpdateDestination(updateInput) if err != nil { - log.Printf("[DEBUG] Error creating Firehose Delivery Stream: %s", err) + log.Printf("[DEBUG] Error updating Firehose Delivery Stream: %s", err) // Retry for IAM eventual consistency if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "is not authorized to") { @@ -2238,6 +2297,10 @@ func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta return nil }) + if isResourceTimeoutError(err) { + _, err = conn.UpdateDestination(updateInput) + } + if err != nil { return fmt.Errorf( "Error Updating Kinesis Firehose Delivery Stream: \"%s\"\n%s", @@ -2250,6 +2313,33 @@ func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta sn, err) } + if d.HasChange("server_side_encryption") { + _, n := d.GetChange("server_side_encryption") + if isKinesisFirehoseDeliveryStreamOptionDisabled(n) { + _, err := conn.StopDeliveryStreamEncryption(&firehose.StopDeliveryStreamEncryptionInput{ + DeliveryStreamName: aws.String(sn), + }) + if err != nil { + return fmt.Errorf("error stopping Kinesis Firehose Delivery Stream (%s) encryption: %s", sn, err) + } + + if err := waitForKinesisFirehoseDeliveryStreamSSEDisabled(conn, sn); err != nil { + return fmt.Errorf("error waiting for Kinesis Firehose Delivery Stream (%s) encryption to be disabled: %s", sn, err) + } + } else { + _, err := conn.StartDeliveryStreamEncryption(&firehose.StartDeliveryStreamEncryptionInput{ + DeliveryStreamName: aws.String(sn), + }) + if err != nil { + return fmt.Errorf("error starting Kinesis Firehose Delivery Stream (%s) encryption: %s", sn, err) + } + + if err := waitForKinesisFirehoseDeliveryStreamSSEEnabled(conn, sn); err != nil { + return fmt.Errorf("error waiting for Kinesis Firehose Delivery Stream (%s) encryption to be enabled: %s", sn, err) + } + } + } + return resourceAwsKinesisFirehoseDeliveryStreamRead(d, meta) } @@ -2290,42 +2380,69 @@ func resourceAwsKinesisFirehoseDeliveryStreamDelete(d *schema.ResourceData, meta _, err := conn.DeleteDeliveryStream(&firehose.DeleteDeliveryStreamInput{ DeliveryStreamName: aws.String(sn), }) - if err != nil { return fmt.Errorf("error deleting Kinesis Firehose Delivery Stream (%s): %s", sn, err) } if err := waitForKinesisFirehoseDeliveryStreamDeletion(conn, sn); err != nil { - return fmt.Errorf( - "Error waiting for Delivery Stream (%s) to be destroyed: %s", - sn, err) + return fmt.Errorf("error waiting for Kinesis Firehose Delivery Stream (%s) deletion: %s", sn, err) } return nil } -func firehoseStreamStateRefreshFunc(conn *firehose.Firehose, sn string) resource.StateRefreshFunc { +func firehoseDeliveryStreamStateRefreshFunc(conn *firehose.Firehose, sn string) resource.StateRefreshFunc { return func() (interface{}, string, error) { - describeOpts := &firehose.DescribeDeliveryStreamInput{ + resp, err := conn.DescribeDeliveryStream(&firehose.DescribeDeliveryStreamInput{ DeliveryStreamName: aws.String(sn), - } - resp, err := conn.DescribeDeliveryStream(describeOpts) + }) if err != nil { if isAWSErr(err, firehose.ErrCodeResourceNotFoundException, "") { - return 42, "DESTROYED", nil + return &firehose.DeliveryStreamDescription{}, firehoseDeliveryStreamStatusDeleted, nil } - return nil, "failed", err + return nil, "", err + } + + return resp.DeliveryStreamDescription, aws.StringValue(resp.DeliveryStreamDescription.DeliveryStreamStatus), nil + } +} + +func firehoseDeliveryStreamSSEStateRefreshFunc(conn *firehose.Firehose, sn string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeDeliveryStream(&firehose.DescribeDeliveryStreamInput{ + DeliveryStreamName: aws.String(sn), + }) + if err != nil { + return nil, "", err } - return resp.DeliveryStreamDescription, *resp.DeliveryStreamDescription.DeliveryStreamStatus, nil + return resp.DeliveryStreamDescription, aws.StringValue(resp.DeliveryStreamDescription.DeliveryStreamEncryptionConfiguration.Status), nil + } +} + +func waitForKinesisFirehoseDeliveryStreamCreation(conn *firehose.Firehose, deliveryStreamName string) (*firehose.DeliveryStreamDescription, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{firehose.DeliveryStreamStatusCreating}, + Target: []string{firehose.DeliveryStreamStatusActive}, + Refresh: firehoseDeliveryStreamStateRefreshFunc(conn, deliveryStreamName), + Timeout: 20 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + v, err := stateConf.WaitForState() + if err != nil { + return nil, err } + + return v.(*firehose.DeliveryStreamDescription), nil } func waitForKinesisFirehoseDeliveryStreamDeletion(conn *firehose.Firehose, deliveryStreamName string) error { stateConf := &resource.StateChangeConf{ - Pending: []string{"DELETING"}, - Target: []string{"DESTROYED"}, - Refresh: firehoseStreamStateRefreshFunc(conn, deliveryStreamName), + Pending: []string{firehose.DeliveryStreamStatusDeleting}, + Target: []string{firehoseDeliveryStreamStatusDeleted}, + Refresh: firehoseDeliveryStreamStateRefreshFunc(conn, deliveryStreamName), Timeout: 20 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, @@ -2335,3 +2452,49 @@ func waitForKinesisFirehoseDeliveryStreamDeletion(conn *firehose.Firehose, deliv return err } + +func waitForKinesisFirehoseDeliveryStreamSSEEnabled(conn *firehose.Firehose, deliveryStreamName string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{firehose.DeliveryStreamEncryptionStatusEnabling}, + Target: []string{firehose.DeliveryStreamEncryptionStatusEnabled}, + Refresh: firehoseDeliveryStreamSSEStateRefreshFunc(conn, deliveryStreamName), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} + +func waitForKinesisFirehoseDeliveryStreamSSEDisabled(conn *firehose.Firehose, deliveryStreamName string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{firehose.DeliveryStreamEncryptionStatusDisabling}, + Target: []string{firehose.DeliveryStreamEncryptionStatusDisabled}, + Refresh: firehoseDeliveryStreamSSEStateRefreshFunc(conn, deliveryStreamName), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} + +func isKinesisFirehoseDeliveryStreamOptionDisabled(v interface{}) bool { + options := v.([]interface{}) + if len(options) == 0 || options[0] == nil { + return true + } + m := options[0].(map[string]interface{}) + + var enabled bool + + if v, ok := m["enabled"]; ok { + enabled = v.(bool) + } + + return !enabled +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go index 7ed8bfa336f..9ae0f9bb92a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsKinesisFirehoseMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream.go index a0a2eb31122..48272ebfff3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsKinesisStream() *schema.Resource { @@ -25,6 +25,15 @@ func resourceAwsKinesisStream() *schema.Resource { State: resourceAwsKinesisStreamImport, }, + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceAwsKinesisStreamResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: resourceAwsKinesisStreamStateUpgradeV0, + Version: 0, + }, + }, + Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(120 * time.Minute), @@ -57,6 +66,12 @@ func resourceAwsKinesisStream() *schema.Resource { Set: schema.HashString, }, + "enforce_consumer_deletion": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "encryption_type": { Type: schema.TypeString, Optional: true, @@ -204,7 +219,8 @@ func resourceAwsKinesisStreamDelete(d *schema.ResourceData, meta interface{}) er sn := d.Get("name").(string) _, err := conn.DeleteStream(&kinesis.DeleteStreamInput{ - StreamName: aws.String(sn), + StreamName: aws.String(sn), + EnforceConsumerDeletion: aws.Bool(d.Get("enforce_consumer_deletion").(bool)), }) if err != nil { return err diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream_migrate.go new file mode 100644 index 00000000000..842d9b26e79 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream_migrate.go @@ -0,0 +1,59 @@ +package aws + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsKinesisStreamResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "shard_count": { + Type: schema.TypeInt, + Required: true, + }, + + "retention_period": { + Type: schema.TypeInt, + Optional: true, + Default: 24, + }, + + "shard_level_metrics": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "encryption_type": { + Type: schema.TypeString, + Optional: true, + Default: "NONE", + }, + + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + }, + + "arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsKinesisStreamStateUpgradeV0(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + rawState["enforce_consumer_deletion"] = false + + return rawState, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_alias.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_alias.go index 913f6629552..5371dbfbed5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_alias.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_alias.go @@ -6,8 +6,8 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_ciphertext.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_ciphertext.go index 10dccf21dbf..9bda95a7cee 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_ciphertext.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_ciphertext.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsKmsCiphertext() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_external_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_external_key.go index 8cb1836e4ae..46b4a446e50 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_external_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_external_key.go @@ -12,10 +12,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsKmsExternalKey() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_grant.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_grant.go index cf5fd72bb67..f66ea30e65b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_grant.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_grant.go @@ -10,10 +10,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsKmsGrant() *schema.Resource { @@ -149,7 +149,6 @@ func resourceAwsKmsGrantCreate(d *schema.ResourceData, meta interface{}) error { err := resource.Retry(3*time.Minute, func() *resource.RetryError { var err error - out, err = conn.CreateGrant(&input) if err != nil { @@ -169,8 +168,12 @@ func resourceAwsKmsGrantCreate(d *schema.ResourceData, meta interface{}) error { return nil }) + if isResourceTimeoutError(err) { + out, err = conn.CreateGrant(&input) + } + if err != nil { - return err + return fmt.Errorf("Error creating KMS grant: %s", err) } log.Printf("[DEBUG] Created new KMS Grant: %s", *out.GrantId) @@ -333,14 +336,19 @@ func findKmsGrantByIdWithRetry(conn *kms.KMS, keyId string, grantId string) (*km return nil }) + if isResourceTimeoutError(err) { + grant, err = findKmsGrantById(conn, keyId, grantId, nil) + } return grant, err } // Used by the tests as well func waitForKmsGrantToBeRevoked(conn *kms.KMS, keyId string, grantId string) error { + var grant *kms.GrantListEntry err := resource.Retry(3*time.Minute, func() *resource.RetryError { - grant, err := findKmsGrantById(conn, keyId, grantId, nil) + var err error + grant, err = findKmsGrantById(conn, keyId, grantId, nil) if isResourceNotFoundError(err) { return nil @@ -354,6 +362,9 @@ func waitForKmsGrantToBeRevoked(conn *kms.KMS, keyId string, grantId string) err return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + grant, err = findKmsGrantById(conn, keyId, grantId, nil) + } return err } @@ -387,6 +398,10 @@ func findKmsGrantById(conn *kms.KMS, keyId string, grantId string, marker *strin return nil }) + if isResourceTimeoutError(err) { + out, err = conn.ListGrants(&input) + } + if err != nil { return nil, fmt.Errorf("error listing KMS Grants: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_key.go index 477ac628ca6..e88b245f3a9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_key.go @@ -8,10 +8,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsKmsKey() *schema.Resource { @@ -108,6 +108,9 @@ func resourceAwsKmsKeyCreate(d *schema.ResourceData, meta interface{}) error { } return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateKey(&req) + } if err != nil { return err } @@ -335,18 +338,7 @@ func updateKmsKeyRotationStatus(conn *kms.KMS, d *schema.ResourceData) error { shouldEnableRotation := d.Get("enable_key_rotation").(bool) err := resource.Retry(10*time.Minute, func() *resource.RetryError { - var err error - if shouldEnableRotation { - log.Printf("[DEBUG] Enabling key rotation for KMS key %q", d.Id()) - _, err = conn.EnableKeyRotation(&kms.EnableKeyRotationInput{ - KeyId: aws.String(d.Id()), - }) - } else { - log.Printf("[DEBUG] Disabling key rotation for KMS key %q", d.Id()) - _, err = conn.DisableKeyRotation(&kms.DisableKeyRotationInput{ - KeyId: aws.String(d.Id()), - }) - } + err := handleKeyRotation(conn, shouldEnableRotation, aws.String(d.Id())) if err != nil { awsErr, ok := err.(awserr.Error) @@ -362,6 +354,9 @@ func updateKmsKeyRotationStatus(conn *kms.KMS, d *schema.ResourceData) error { return nil }) + if isResourceTimeoutError(err) { + err = handleKeyRotation(conn, shouldEnableRotation, aws.String(d.Id())) + } if err != nil { return fmt.Errorf("Failed to set key rotation for %q to %t: %q", @@ -404,6 +399,22 @@ func updateKmsKeyRotationStatus(conn *kms.KMS, d *schema.ResourceData) error { return nil } +func handleKeyRotation(conn *kms.KMS, shouldEnableRotation bool, keyId *string) error { + var err error + if shouldEnableRotation { + log.Printf("[DEBUG] Enabling key rotation for KMS key %q", *keyId) + _, err = conn.EnableKeyRotation(&kms.EnableKeyRotationInput{ + KeyId: keyId, + }) + } else { + log.Printf("[DEBUG] Disabling key rotation for KMS key %q", *keyId) + _, err = conn.DisableKeyRotation(&kms.DisableKeyRotationInput{ + KeyId: keyId, + }) + } + return err +} + func resourceAwsKmsKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { conn := meta.(*AWSClient).kmsconn diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_alias.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_alias.go index 37181aba20c..069a951d3e4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_alias.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_alias.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLambdaAlias() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_event_source_mapping.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_event_source_mapping.go index 8cc414923b5..55ee33d3008 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_event_source_mapping.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_event_source_mapping.go @@ -13,9 +13,9 @@ import ( "github.com/aws/aws-sdk-go/service/lambda" "github.com/aws/aws-sdk-go/service/sqs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsLambdaEventSourceMapping() *schema.Resource { @@ -160,8 +160,10 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte // // The role may exist, but the permissions may not have propagated, so we // retry - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - eventSourceMappingConfiguration, err := conn.CreateEventSourceMapping(params) + var eventSourceMappingConfiguration *lambda.EventSourceMappingConfiguration + var err error + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + eventSourceMappingConfiguration, err = conn.CreateEventSourceMapping(params) if err != nil { if awserr, ok := err.(awserr.Error); ok { if awserr.Code() == "InvalidParameterValueException" { @@ -170,16 +172,18 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte } return resource.NonRetryableError(err) } - // No error - d.Set("uuid", eventSourceMappingConfiguration.UUID) - d.SetId(*eventSourceMappingConfiguration.UUID) return nil }) - + if isResourceTimeoutError(err) { + eventSourceMappingConfiguration, err = conn.CreateEventSourceMapping(params) + } if err != nil { return fmt.Errorf("Error creating Lambda event source mapping: %s", err) } + // No error + d.Set("uuid", eventSourceMappingConfiguration.UUID) + d.SetId(*eventSourceMappingConfiguration.UUID) return resourceAwsLambdaEventSourceMappingRead(d, meta) } @@ -196,7 +200,7 @@ func resourceAwsLambdaEventSourceMappingRead(d *schema.ResourceData, meta interf eventSourceMappingConfiguration, err := conn.GetEventSourceMapping(params) if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ResourceNotFoundException" { + if isAWSErr(err, "ResourceNotFoundException", "") { log.Printf("[DEBUG] Lambda event source mapping (%s) not found", d.Id()) d.SetId("") @@ -250,7 +254,9 @@ func resourceAwsLambdaEventSourceMappingDelete(d *schema.ResourceData, meta inte } return nil }) - + if isResourceTimeoutError(err) { + _, err = conn.DeleteEventSourceMapping(params) + } if err != nil { return fmt.Errorf("Error deleting Lambda event source mapping: %s", err) } @@ -284,7 +290,9 @@ func resourceAwsLambdaEventSourceMappingUpdate(d *schema.ResourceData, meta inte } return nil }) - + if isResourceTimeoutError(err) { + _, err = conn.UpdateEventSourceMapping(params) + } if err != nil { return fmt.Errorf("Error updating Lambda event source mapping: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_function.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_function.go index 17924855ac2..4afdd22eb25 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_function.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_function.go @@ -14,9 +14,9 @@ import ( "errors" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) const awsMutexLambdaKey = `aws_lambda_function` @@ -32,6 +32,7 @@ var validLambdaRuntimes = []string{ lambda.RuntimeNodejs43Edge, lambda.RuntimeNodejs610, lambda.RuntimeNodejs810, + lambda.RuntimeNodejs10X, lambda.RuntimeProvided, lambda.RuntimePython27, lambda.RuntimePython36, @@ -433,6 +434,9 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.CreateFunction(params) + } if err != nil { return fmt.Errorf("Error creating Lambda function: %s", err) } @@ -459,6 +463,9 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.PutFunctionConcurrency(concurrencyParams) + } if err != nil { return fmt.Errorf("Error setting concurrency for Lambda %s: %s", functionName, err) } @@ -801,6 +808,9 @@ func resourceAwsLambdaFunctionUpdate(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.UpdateFunctionConfiguration(configReq) + } if err != nil { return fmt.Errorf("Error modifying Lambda Function Configuration %s: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_layer_version.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_layer_version.go index e0273e70342..92d564f38f2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_layer_version.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_layer_version.go @@ -10,8 +10,8 @@ import ( "github.com/aws/aws-sdk-go/aws" arn2 "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) const awsMutexLambdaLayerKey = `aws_lambda_layer_version` diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_permission.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_permission.go index 1382612d9da..762b575abb5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_permission.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_permission.go @@ -11,8 +11,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) var LambdaFunctionRegexp = `^(arn:[\w-]+:lambda:)?([a-z]{2}-(?:[a-z]+-){1,2}\d{1}:)?(\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\$LATEST|[a-zA-Z0-9-_]+))?$` @@ -22,6 +22,9 @@ func resourceAwsLambdaPermission() *schema.Resource { Create: resourceAwsLambdaPermissionCreate, Read: resourceAwsLambdaPermissionRead, Delete: resourceAwsLambdaPermissionDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsLambdaPermissionImport, + }, Schema: map[string]*schema.Schema{ "action": { @@ -131,21 +134,18 @@ func resourceAwsLambdaPermissionCreate(d *schema.ResourceData, meta interface{}) var err error out, err = conn.AddPermission(&input) - if isAWSErr(err, lambda.ErrCodeResourceConflictException, "") { + if isAWSErr(err, lambda.ErrCodeResourceConflictException, "") || isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { return resource.RetryableError(err) } - - if isAWSErr(err, lambda.ErrCodeResourceNotFoundException, "") { - return resource.RetryableError(err) - } - if err != nil { return resource.NonRetryableError(err) } return nil }) - + if isResourceTimeoutError(err) { + out, err = conn.AddPermission(&input) + } if err != nil { return fmt.Errorf("Error adding new Lambda Permission for %s: %s", functionName, err) } @@ -178,8 +178,13 @@ func resourceAwsLambdaPermissionCreate(d *schema.ResourceData, meta interface{}) } return nil }) - - return err + if isResourceTimeoutError(err) { + err = resourceAwsLambdaPermissionRead(d, meta) + } + if err != nil { + return fmt.Errorf("Error reading new Lambda permissions: %s", err) + } + return nil } func resourceAwsLambdaPermissionRead(d *schema.ResourceData, meta interface{}) error { @@ -208,16 +213,23 @@ func resourceAwsLambdaPermissionRead(d *schema.ResourceData, meta interface{}) e return resource.NonRetryableError(err) } - policyInBytes := []byte(*out.Policy) - policy := LambdaPolicy{} - err = json.Unmarshal(policyInBytes, &policy) + statement, err = getLambdaPolicyStatement(out, d.Id()) if err != nil { - return resource.NonRetryableError(err) + return resource.RetryableError(err) } - - statement, err = findLambdaPolicyStatementById(&policy, d.Id()) - return resource.RetryableError(err) + return nil }) + if isResourceTimeoutError(err) { + out, err = conn.GetPolicy(&input) + + if err == nil { + var psErr error + statement, psErr = getLambdaPolicyStatement(out, d.Id()) + if psErr != nil { + return psErr + } + } + } if err != nil { // Missing whole policy or Lambda function (API error) @@ -239,6 +251,10 @@ func resourceAwsLambdaPermissionRead(d *schema.ResourceData, meta interface{}) e return err } + if statement == nil { + return fmt.Errorf("No Lambda Permission policy found with ID %s", d.Id()) + } + qualifier, err := getQualifierFromLambdaAliasOrVersionArn(statement.Resource) if err != nil { log.Printf("[ERR] Error getting Lambda Qualifier: %s", err) @@ -313,57 +329,78 @@ func resourceAwsLambdaPermissionDelete(d *schema.ResourceData, meta interface{}) return err } + params := &lambda.GetPolicyInput{ + FunctionName: aws.String(d.Get("function_name").(string)), + } + if v, ok := d.GetOk("qualifier"); ok { + params.Qualifier = aws.String(v.(string)) + } + var statement *LambdaPolicyStatement err = resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Checking if Lambda permission %q is deleted", d.Id()) - - params := &lambda.GetPolicyInput{ - FunctionName: aws.String(d.Get("function_name").(string)), - } - if v, ok := d.GetOk("qualifier"); ok { - params.Qualifier = aws.String(v.(string)) - } - log.Printf("[DEBUG] Looking for Lambda permission: %s", *params) resp, err := conn.GetPolicy(params) if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ResourceNotFoundException" { - return nil - } + if isAWSErr(err, "ResourceNotFoundException", "") { + return nil } return resource.NonRetryableError(err) } - if resp.Policy == nil { return nil } - policyInBytes := []byte(*resp.Policy) - policy := LambdaPolicy{} - err = json.Unmarshal(policyInBytes, &policy) + statement, err = getLambdaPolicyStatement(resp, d.Id()) if err != nil { - return resource.RetryableError( - fmt.Errorf("Error unmarshalling Lambda policy: %s", err)) + return nil } + return nil + }) - _, err = findLambdaPolicyStatementById(&policy, d.Id()) + if isResourceTimeoutError(err) { + resp, err := conn.GetPolicy(params) + if isAWSErr(err, "ResourceNotFoundException", "") { + return nil + } if err != nil { + return fmt.Errorf("Error getting Lambda permission policy: %s", err) + } + if resp.Policy == nil { return nil } - log.Printf("[DEBUG] No error when checking if Lambda permission %s is deleted", d.Id()) - return nil - }) - + var psErr error + statement, psErr = getLambdaPolicyStatement(resp, d.Id()) + if psErr != nil { + return nil + } + } if err != nil { return fmt.Errorf("Failed removing Lambda permission: %s", err) } + if statement != nil { + return fmt.Errorf("Failed to delete Lambda permission with ID %s", d.Id()) + } log.Printf("[DEBUG] Lambda permission with ID %q removed", d.Id()) return nil } +func getLambdaPolicyStatement(out *lambda.GetPolicyOutput, statemendId string) (statement *LambdaPolicyStatement, err error) { + policyInBytes := []byte(*out.Policy) + policy := LambdaPolicy{} + err = json.Unmarshal(policyInBytes, &policy) + if err != nil { + return nil, fmt.Errorf("Error unmarshalling Lambda policy: %s", err) + } + + statement, psErr := findLambdaPolicyStatementById(&policy, statemendId) + if psErr != nil { + return nil, fmt.Errorf("Error finding Lambda policy statement: %s", psErr) + } + return statement, nil +} + func findLambdaPolicyStatementById(policy *LambdaPolicy, id string) ( *LambdaPolicyStatement, error) { @@ -400,6 +437,41 @@ func getFunctionNameFromLambdaArn(arn string) (string, error) { return matches[5], nil } +func resourceAwsLambdaPermissionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + idParts := strings.Split(d.Id(), "/") + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return nil, fmt.Errorf("Unexpected format of ID (%q), expected FUNCTION_NAME/STATEMENT_ID or FUNCTION_NAME:QUALIFIER/STATEMENT_ID", d.Id()) + } + + functionName := idParts[0] + + input := &lambda.GetFunctionInput{FunctionName: &functionName} + + var qualifier string + fnParts := strings.Split(functionName, ":") + if len(fnParts) == 2 { + functionName = fnParts[0] + qualifier = fnParts[1] + input.Qualifier = &qualifier + } + statementId := idParts[1] + log.Printf("[DEBUG] Importing Lambda Permission %s for function name %s", statementId, functionName) + + conn := meta.(*AWSClient).lambdaconn + getFunctionOutput, err := conn.GetFunction(input) + if err != nil { + return nil, err + } + + d.Set("function_name", getFunctionOutput.Configuration.FunctionArn) + d.Set("statement_id", statementId) + if qualifier != "" { + d.Set("qualifier", qualifier) + } + d.SetId(statementId) + return []*schema.ResourceData{d}, nil +} + type LambdaPolicy struct { Version string Statement []LambdaPolicyStatement diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_configuration.go index 68ab22f61ac..ae245b1abe5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_configuration.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_configuration.go @@ -9,10 +9,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsLaunchConfiguration() *schema.Resource { @@ -260,6 +260,13 @@ func resourceAwsLaunchConfiguration() *schema.Resource { ForceNew: true, }, + "encrypted": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + "iops": { Type: schema.TypeInt, Optional: true, @@ -421,6 +428,10 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), } + if v, ok := bd["encrypted"].(bool); ok && v { + ebs.Encrypted = aws.Bool(v) + } + if v, ok := bd["volume_size"].(int); ok && v != 0 { ebs.VolumeSize = aws.Int64(int64(v)) } @@ -480,6 +491,9 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface } return nil }) + if isResourceTimeoutError(err) { + _, err = autoscalingconn.CreateLaunchConfiguration(&createLaunchConfigurationOpts) + } if err != nil { return fmt.Errorf("Error creating launch configuration: %s", err) } @@ -489,13 +503,20 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface // We put a Retry here since sometimes eventual consistency bites // us and we need to retry a few times to get the LC to load properly - return resource.Retry(30*time.Second, func() *resource.RetryError { + err = resource.Retry(30*time.Second, func() *resource.RetryError { err := resourceAwsLaunchConfigurationRead(d, meta) if err != nil { return resource.RetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + err = resourceAwsLaunchConfigurationRead(d, meta) + } + if err != nil { + return fmt.Errorf("Error reading launch configuration: %s", err) + } + return nil } func resourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface{}) error { @@ -675,16 +696,17 @@ func readBlockDevicesFromLaunchConfiguration(d *schema.ResourceData, lc *autosca if bdm.Ebs != nil && bdm.Ebs.Iops != nil { bd["iops"] = *bdm.Ebs.Iops } + if bdm.Ebs != nil && bdm.Ebs.Encrypted != nil { + bd["encrypted"] = *bdm.Ebs.Encrypted + } if bdm.DeviceName != nil && *bdm.DeviceName == *rootDeviceName { blockDevices["root"] = bd } else { - if bdm.Ebs != nil && bdm.Ebs.Encrypted != nil { - bd["encrypted"] = *bdm.Ebs.Encrypted - } if bdm.DeviceName != nil { bd["device_name"] = *bdm.DeviceName } + if bdm.VirtualName != nil { bd["virtual_name"] = *bdm.VirtualName blockDevices["ephemeral"] = append(blockDevices["ephemeral"].([]map[string]interface{}), bd) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_template.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_template.go index a052486a422..0d91c0d9488 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_template.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_template.go @@ -10,10 +10,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsLaunchTemplate() *schema.Resource { @@ -477,8 +478,8 @@ func resourceAwsLaunchTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - "instance", - "volume", + ec2.ResourceTypeInstance, + ec2.ResourceTypeVolume, }, false), }, "tags": tagsSchema(), @@ -531,6 +532,7 @@ func resourceAwsLaunchTemplateCreate(d *schema.ResourceData, meta interface{}) e ClientToken: aws.String(resource.UniqueId()), LaunchTemplateName: aws.String(ltName), LaunchTemplateData: launchTemplateData, + TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), ec2.ResourceTypeLaunchTemplate), } if v, ok := d.GetOk("description"); ok && v.(string) != "" { @@ -548,7 +550,7 @@ func resourceAwsLaunchTemplateCreate(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Launch Template created: %q (version %d)", *launchTemplate.LaunchTemplateId, *launchTemplate.LatestVersionNumber) - return resourceAwsLaunchTemplateUpdate(d, meta) + return resourceAwsLaunchTemplateRead(d, meta) } func resourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) error { @@ -593,7 +595,9 @@ func resourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) err d.Set("name", lt.LaunchTemplateName) d.Set("latest_version", lt.LatestVersionNumber) d.Set("default_version", lt.DefaultVersionNumber) - d.Set("tags", tagsToMap(lt.Tags)) + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(lt.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } arn := arn.ARN{ Partition: meta.(*AWSClient).partition, @@ -636,21 +640,21 @@ func resourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) err } if err := d.Set("block_device_mappings", getBlockDeviceMappings(ltData.BlockDeviceMappings)); err != nil { - return err + return fmt.Errorf("error setting block_device_mappings: %s", err) } if err := d.Set("capacity_reservation_specification", getCapacityReservationSpecification(ltData.CapacityReservationSpecification)); err != nil { - return err + return fmt.Errorf("error setting capacity_reservation_specification: %s", err) } if strings.HasPrefix(aws.StringValue(ltData.InstanceType), "t2") || strings.HasPrefix(aws.StringValue(ltData.InstanceType), "t3") { if err := d.Set("credit_specification", getCreditSpecification(ltData.CreditSpecification)); err != nil { - return err + return fmt.Errorf("error setting credit_specification: %s", err) } } if err := d.Set("elastic_gpu_specifications", getElasticGpuSpecifications(ltData.ElasticGpuSpecifications)); err != nil { - return err + return fmt.Errorf("error setting elastic_gpu_specifications: %s", err) } if err := d.Set("elastic_inference_accelerator", flattenEc2LaunchTemplateElasticInferenceAcceleratorResponse(ltData.ElasticInferenceAccelerators)); err != nil { @@ -658,31 +662,31 @@ func resourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) err } if err := d.Set("iam_instance_profile", getIamInstanceProfile(ltData.IamInstanceProfile)); err != nil { - return err + return fmt.Errorf("error setting iam_instance_profile: %s", err) } if err := d.Set("instance_market_options", getInstanceMarketOptions(ltData.InstanceMarketOptions)); err != nil { - return err + return fmt.Errorf("error setting instance_market_options: %s", err) } if err := d.Set("license_specification", getLicenseSpecifications(ltData.LicenseSpecifications)); err != nil { - return err + return fmt.Errorf("error setting license_specification: %s", err) } if err := d.Set("monitoring", getMonitoring(ltData.Monitoring)); err != nil { - return err + return fmt.Errorf("error setting monitoring: %s", err) } if err := d.Set("network_interfaces", getNetworkInterfaces(ltData.NetworkInterfaces)); err != nil { - return err + return fmt.Errorf("error setting network_interfaces: %s", err) } if err := d.Set("placement", getPlacement(ltData.Placement)); err != nil { - return err + return fmt.Errorf("error setting placement: %s", err) } if err := d.Set("tag_specifications", getTagSpecifications(ltData.TagSpecifications)); err != nil { - return err + return fmt.Errorf("error setting tag_specifications: %s", err) } return nil @@ -691,38 +695,34 @@ func resourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) err func resourceAwsLaunchTemplateUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - if !d.IsNewResource() { - launchTemplateData, err := buildLaunchTemplateData(d) - if err != nil { - return err - } + launchTemplateData, err := buildLaunchTemplateData(d) + if err != nil { + return err + } - launchTemplateVersionOpts := &ec2.CreateLaunchTemplateVersionInput{ - ClientToken: aws.String(resource.UniqueId()), - LaunchTemplateId: aws.String(d.Id()), - LaunchTemplateData: launchTemplateData, - } + launchTemplateVersionOpts := &ec2.CreateLaunchTemplateVersionInput{ + ClientToken: aws.String(resource.UniqueId()), + LaunchTemplateId: aws.String(d.Id()), + LaunchTemplateData: launchTemplateData, + } - if v, ok := d.GetOk("description"); ok && v.(string) != "" { - launchTemplateVersionOpts.VersionDescription = aws.String(v.(string)) - } + if v, ok := d.GetOk("description"); ok && v.(string) != "" { + launchTemplateVersionOpts.VersionDescription = aws.String(v.(string)) + } - _, createErr := conn.CreateLaunchTemplateVersion(launchTemplateVersionOpts) - if createErr != nil { - return createErr - } + _, createErr := conn.CreateLaunchTemplateVersion(launchTemplateVersionOpts) + if createErr != nil { + return createErr } - d.Partial(true) + if d.HasChange("tags") { + o, n := d.GetChange("tags") - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") + if err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } } - d.Partial(false) - return resourceAwsLaunchTemplateRead(d, meta) } @@ -993,7 +993,7 @@ func getTagSpecifications(t []*ec2.LaunchTemplateTagSpecification) []interface{} for _, v := range t { s = append(s, map[string]interface{}{ "resource_type": aws.StringValue(v.ResourceType), - "tags": tagsToMap(v.Tags), + "tags": keyvaluetags.Ec2KeyValueTags(v.Tags).IgnoreAws().Map(), }) } return s @@ -1172,10 +1172,9 @@ func buildLaunchTemplateData(d *schema.ResourceData) (*ec2.RequestLaunchTemplate continue } tsData := ts.(map[string]interface{}) - tags := tagsFromMap(tsData["tags"].(map[string]interface{})) tagSpecification := &ec2.LaunchTemplateTagSpecificationRequest{ ResourceType: aws.String(tsData["resource_type"].(string)), - Tags: tags, + Tags: keyvaluetags.New(tsData["tags"].(map[string]interface{})).IgnoreAws().Ec2Tags(), } tagSpecifications = append(tagSpecifications, tagSpecification) } @@ -1421,7 +1420,10 @@ func readInstanceMarketOptionsFromConfig(imo map[string]interface{}) (*ec2.Launc if v, ok := imo["spot_options"]; ok { vL := v.([]interface{}) for _, v := range vL { - so := v.(map[string]interface{}) + so, ok := v.(map[string]interface{}) + if !ok { + continue + } if v, ok := so["block_duration_minutes"].(int); ok && v != 0 { spotOptions.BlockDurationMinutes = aws.Int64(int64(v)) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb.go index 2860d051cf8..326eed79168 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb.go @@ -11,9 +11,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLb() *schema.Resource { @@ -580,24 +580,26 @@ func waitForNLBNetworkInterfacesToDetach(conn *ec2.EC2, lbArn string) error { // We cannot cleanup these ENIs ourselves as that would result in // OperationNotPermitted: You are not allowed to manage 'ela-attach' attachments. // yet presence of these ENIs may prevent us from deleting EIPs associated w/ the NLB - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeNetworkInterfaces(&ec2.DescribeNetworkInterfacesInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("attachment.instance-owner-id"), - Values: []*string{aws.String("amazon-aws")}, - }, - { - Name: aws.String("attachment.attachment-id"), - Values: []*string{aws.String("ela-attach-*")}, - }, - { - Name: aws.String("description"), - Values: []*string{aws.String("ELB " + name)}, - }, + input := &ec2.DescribeNetworkInterfacesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("attachment.instance-owner-id"), + Values: []*string{aws.String("amazon-aws")}, }, - }) + { + Name: aws.String("attachment.attachment-id"), + Values: []*string{aws.String("ela-attach-*")}, + }, + { + Name: aws.String("description"), + Values: []*string{aws.String("ELB " + name)}, + }, + }, + } + var out *ec2.DescribeNetworkInterfacesOutput + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + var err error + out, err = conn.DescribeNetworkInterfaces(input) if err != nil { return resource.NonRetryableError(err) } @@ -611,6 +613,20 @@ func waitForNLBNetworkInterfacesToDetach(conn *ec2.EC2, lbArn string) error { return nil }) + if isResourceTimeoutError(err) { + out, err = conn.DescribeNetworkInterfaces(input) + if err != nil { + return fmt.Errorf("Error describing network inferfaces: %s", err) + } + niCount := len(out.NetworkInterfaces) + if niCount > 0 { + return fmt.Errorf("Error waiting for %d ENIs of %q to clean up", niCount, lbArn) + } + } + if err != nil { + return fmt.Errorf("Error describing network inferfaces: %s", err) + } + return nil } func getLbNameFromArn(arn string) (string, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_cookie_stickiness_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_cookie_stickiness_policy.go index acb6acdbe93..ec1c169d863 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_cookie_stickiness_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_cookie_stickiness_policy.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsLBCookieStickinessPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener.go index 7720ddc6ccb..5f4b6f380ba 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener.go @@ -12,9 +12,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsLbListener() *schema.Resource { @@ -61,6 +61,8 @@ func resourceAwsLbListener() *schema.Resource { elbv2.ProtocolEnumHttps, elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls, + elbv2.ProtocolEnumUdp, + elbv2.ProtocolEnumTcpUdp, }, true), }, @@ -493,6 +495,10 @@ func resourceAwsLbListenerCreate(d *schema.ResourceData, meta interface{}) error return nil }) + if isResourceTimeoutError(err) { + _, err = elbconn.CreateListener(params) + } + if err != nil { return fmt.Errorf("Error creating LB Listener: %s", err) } @@ -526,6 +532,10 @@ func resourceAwsLbListenerRead(d *schema.ResourceData, meta interface{}) error { return nil }) + if isResourceTimeoutError(err) { + _, err = elbconn.DescribeListeners(request) + } + if isAWSErr(err, elbv2.ErrCodeListenerNotFoundException, "") { log.Printf("[WARN] ELBv2 Listener (%s) not found - removing from state", d.Id()) d.SetId("") @@ -799,6 +809,11 @@ func resourceAwsLbListenerUpdate(d *schema.ResourceData, meta interface{}) error } return nil }) + + if isResourceTimeoutError(err) { + _, err = elbconn.ModifyListener(params) + } + if err != nil { return fmt.Errorf("Error modifying LB Listener: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener_certificate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener_certificate.go index 51405e30fb1..5683b643da2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener_certificate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener_certificate.go @@ -1,15 +1,14 @@ package aws import ( - "errors" "fmt" "log" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLbListenerCertificate() *schema.Resource { @@ -46,13 +45,28 @@ func resourceAwsLbListenerCertificateCreate(d *schema.ResourceData, meta interfa } log.Printf("[DEBUG] Adding certificate: %s of listener: %s", d.Get("certificate_arn").(string), d.Get("listener_arn").(string)) - resp, err := conn.AddListenerCertificates(params) - if err != nil { - return fmt.Errorf("Error creating LB Listener Certificate: %s", err) + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := conn.AddListenerCertificates(params) + + // Retry for IAM Server Certificate eventual consistency + if isAWSErr(err, elbv2.ErrCodeCertificateNotFoundException, "") { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + _, err = conn.AddListenerCertificates(params) } - if len(resp.Certificates) == 0 { - return errors.New("Error creating LB Listener Certificate: no certificates returned in response") + if err != nil { + return fmt.Errorf("error adding LB Listener Certificate: %s", err) } d.SetId(d.Get("listener_arn").(string) + "_" + d.Get("certificate_arn").(string)) @@ -86,6 +100,9 @@ func resourceAwsLbListenerCertificateRead(d *schema.ResourceData, meta interface return nil }) + if isResourceTimeoutError(err) { + certificate, err = findAwsLbListenerCertificate(certificateArn, listenerArn, true, nil, conn) + } if err != nil { if certificate == nil { log.Printf("[WARN] %s - removing from state", err) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener_rule.go index a6edf22937e..bd570389bd9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_listener_rule.go @@ -12,9 +12,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsLbbListenerRule() *schema.Resource { @@ -475,9 +475,10 @@ func resourceAwsLbListenerRuleCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error creating LB Listener Rule: %v", err) } } else { + var priority int64 err := resource.Retry(5*time.Minute, func() *resource.RetryError { var err error - priority, err := highestListenerRulePriority(elbconn, listenerArn) + priority, err = highestListenerRulePriority(elbconn, listenerArn) if err != nil { return resource.NonRetryableError(err) } @@ -491,12 +492,20 @@ func resourceAwsLbListenerRuleCreate(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + priority, err = highestListenerRulePriority(elbconn, listenerArn) + if err != nil { + return fmt.Errorf("Error getting highest listener rule priority: %s", err) + } + params.Priority = aws.Int64(priority + 1) + resp, err = elbconn.CreateRule(params) + } if err != nil { return fmt.Errorf("Error creating LB Listener Rule: %v", err) } } - if len(resp.Rules) == 0 { + if resp == nil || len(resp.Rules) == 0 { return errors.New("Error creating LB Listener Rule: no rules returned in response") } @@ -525,7 +534,9 @@ func resourceAwsLbListenerRuleRead(d *schema.ResourceData, meta interface{}) err } return nil }) - + if isResourceTimeoutError(err) { + resp, err = elbconn.DescribeRules(req) + } if err != nil { if isAWSErr(err, elbv2.ErrCodeRuleNotFoundException, "") { log.Printf("[WARN] DescribeRules - removing %s from state", d.Id()) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_ssl_negotiation_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_ssl_negotiation_policy.go index eab8350ffdf..17e6af10534 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_ssl_negotiation_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_ssl_negotiation_policy.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLBSSLNegotiationPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_target_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_target_group.go index 3f5fba9fb51..d22a77ca8e4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_target_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_target_group.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsLbTargetGroup() *schema.Resource { @@ -71,6 +71,8 @@ func resourceAwsLbTargetGroup() *schema.Resource { elbv2.ProtocolEnumHttps, elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls, + elbv2.ProtocolEnumUdp, + elbv2.ProtocolEnumTcpUdp, }, true), }, @@ -200,7 +202,7 @@ func resourceAwsLbTargetGroup() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - ValidateFunc: validation.IntBetween(2, 60), + ValidateFunc: validation.IntBetween(2, 120), }, "healthy_threshold": { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_target_group_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_target_group_attachment.go index d845a1d4020..cb5292d291a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_target_group_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_target_group_attachment.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLbTargetGroupAttachment() *schema.Resource { @@ -126,6 +126,7 @@ func resourceAwsLbAttachmentRead(d *schema.ResourceData, meta interface{}) error TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), Targets: []*elbv2.TargetDescription{target}, }) + if err != nil { if isAWSErr(err, elbv2.ErrCodeTargetGroupNotFoundException, "") { log.Printf("[WARN] Target group does not exist, removing target attachment %s", d.Id()) @@ -140,6 +141,29 @@ func resourceAwsLbAttachmentRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error reading Target Health: %s", err) } + for _, targetDesc := range resp.TargetHealthDescriptions { + if targetDesc == nil || targetDesc.Target == nil { + continue + } + + if aws.StringValue(targetDesc.Target.Id) == d.Get("target_id").(string) { + // These will catch targets being removed by hand (draining as we plan) or that have been removed for a while + // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the + // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. + if targetDesc.TargetHealth == nil { + continue + } + + reason := aws.StringValue(targetDesc.TargetHealth.Reason) + + if reason == elbv2.TargetHealthReasonEnumTargetNotRegistered || reason == elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress { + log.Printf("[WARN] Target Attachment does not exist, recreating attachment") + d.SetId("") + return nil + } + } + } + if len(resp.TargetHealthDescriptions) != 1 { log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id()) d.SetId("") diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_licensemanager_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_licensemanager_association.go index 646d4031bc5..ea58edcf3ee 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_licensemanager_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_licensemanager_association.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/licensemanager" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLicenseManagerAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_licensemanager_license_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_licensemanager_license_configuration.go index 8dd6a17f095..6e94dda4d9f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_licensemanager_license_configuration.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_licensemanager_license_configuration.go @@ -7,8 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/licensemanager" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsLicenseManagerLicenseConfiguration() *schema.Resource { @@ -89,7 +90,7 @@ func resourceAwsLicenseManagerLicenseConfigurationCreate(d *schema.ResourceData, } if v, ok := d.GetOk("tags"); ok && len(v.(map[string]interface{})) > 0 { - opts.Tags = tagsFromMapLicenseManager(v.(map[string]interface{})) + opts.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().LicensemanagerTags() } log.Printf("[DEBUG] License Manager license configuration: %s", opts) @@ -127,7 +128,7 @@ func resourceAwsLicenseManagerLicenseConfigurationRead(d *schema.ResourceData, m } d.Set("name", resp.Name) - if err := d.Set("tags", tagsToMapLicenseManager(resp.Tags)); err != nil { + if err := d.Set("tags", keyvaluetags.LicensemanagerKeyValueTags(resp.Tags).IgnoreAws().Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } @@ -140,9 +141,12 @@ func resourceAwsLicenseManagerLicenseConfigurationUpdate(d *schema.ResourceData, d.Partial(true) if d.HasChange("tags") { - if err := setTagsLicenseManager(conn, d); err != nil { - return err + o, n := d.GetChange("tags") + + if err := keyvaluetags.LicensemanagerUpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating License Manager License Configuration (%s) tags: %s", d.Id(), err) } + d.SetPartial("tags") } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_domain.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_domain.go index 0656a88d68e..27fb0278f64 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_domain.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_domain.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/lightsail" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLightsailDomain() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_instance.go index 17fdf90e603..6972788bb57 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_instance.go @@ -3,20 +3,23 @@ package aws import ( "fmt" "log" + "regexp" "strconv" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/lightsail" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsLightsailInstance() *schema.Resource { return &schema.Resource{ Create: resourceAwsLightsailInstanceCreate, Read: resourceAwsLightsailInstanceRead, + Update: resourceAwsLightsailInstanceUpdate, Delete: resourceAwsLightsailInstanceDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -27,6 +30,11 @@ func resourceAwsLightsailInstance() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(2, 255), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z]`), "must begin with an alphabetic character"), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_\-.]+[^._\-]$`), "must contain only alphanumeric characters, underscores, hyphens, and dots"), + ), }, "availability_zone": { Type: schema.TypeString, @@ -103,6 +111,7 @@ func resourceAwsLightsailInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "tags": tagsSchema(), }, } } @@ -126,6 +135,12 @@ func resourceAwsLightsailInstanceCreate(d *schema.ResourceData, meta interface{} req.UserData = aws.String(v.(string)) } + tags := tagsFromMapLightsail(d.Get("tags").(map[string]interface{})) + + if len(tags) != 0 { + req.Tags = tags + } + resp, err := conn.CreateInstances(&req) if err != nil { return err @@ -199,6 +214,10 @@ func resourceAwsLightsailInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("private_ip_address", i.PrivateIpAddress) d.Set("public_ip_address", i.PublicIpAddress) + if err := d.Set("tags", tagsToMapLightsail(i.Tags)); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + return nil } @@ -233,6 +252,19 @@ func resourceAwsLightsailInstanceDelete(d *schema.ResourceData, meta interface{} return nil } +func resourceAwsLightsailInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + if d.HasChange("tags") { + if err := setTagsLightsail(conn, d); err != nil { + return err + } + d.SetPartial("tags") + } + + return resourceAwsLightsailInstanceRead(d, meta) +} + // method to check the status of an Operation, which is returned from // Create/Delete methods. // Status's are an aws.OperationStatus enum: diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_key_pair.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_key_pair.go index 892ed298f32..2979ece10f4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_key_pair.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_key_pair.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/lightsail" - "github.com/hashicorp/terraform/helper/encryption" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/encryption" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLightsailKeyPair() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip.go index 1f593ad40e6..72024ed0ea1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/lightsail" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLightsailStaticIp() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip_attachment.go index 766ccff55dd..78709b8a2b3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip_attachment.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/lightsail" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLightsailStaticIpAttachment() *schema.Resource { @@ -26,6 +26,10 @@ func resourceAwsLightsailStaticIpAttachment() *schema.Resource { Required: true, ForceNew: true, }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -76,6 +80,7 @@ func resourceAwsLightsailStaticIpAttachmentRead(d *schema.ResourceData, meta int log.Printf("[INFO] Received Lightsail Static IP: %s", *out) d.Set("instance_name", out.StaticIp.AttachedTo) + d.Set("ip_address", out.StaticIp.IpAddress) return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_backend_server_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_backend_server_policy.go index d8c5746ce9a..2db9c73130d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_backend_server_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_backend_server_policy.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLoadBalancerBackendServerPolicies() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_listener_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_listener_policy.go index 96ca1ccdd64..ff7ada242e7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_listener_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_listener_policy.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLoadBalancerListenerPolicies() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_policy.go index 703ba85cd68..ac473f0332c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_policy.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsLoadBalancerPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_macie_member_account_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_macie_member_account_association.go index 23b53147dd0..7a81851dfa8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_macie_member_account_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_macie_member_account_association.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/macie" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsMacieMemberAccountAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_macie_s3_bucket_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_macie_s3_bucket_association.go index 8f8606eba09..e7c53af2907 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_macie_s3_bucket_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_macie_s3_bucket_association.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/macie" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsMacieS3BucketAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_main_route_table_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_main_route_table_association.go index 58dadccd884..4aca0fed7e8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_main_route_table_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_main_route_table_association.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsMainRouteTableAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_package_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_package_channel.go index 81481a8000b..23da76ecc15 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_package_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_package_channel.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/mediapackage" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsMediaPackageChannel() *schema.Resource { @@ -151,10 +151,10 @@ func resourceAwsMediaPackageChannelDelete(d *schema.ResourceData, meta interface return fmt.Errorf("error deleting MediaPackage Channel: %s", err) } + dcinput := &mediapackage.DescribeChannelInput{ + Id: aws.String(d.Id()), + } err = resource.Retry(5*time.Minute, func() *resource.RetryError { - dcinput := &mediapackage.DescribeChannelInput{ - Id: aws.String(d.Id()), - } _, err := conn.DescribeChannel(dcinput) if err != nil { if isAWSErr(err, mediapackage.ErrCodeNotFoundException, "") { @@ -164,6 +164,9 @@ func resourceAwsMediaPackageChannelDelete(d *schema.ResourceData, meta interface } return resource.RetryableError(fmt.Errorf("MediaPackage Channel (%s) still exists", d.Id())) }) + if isResourceTimeoutError(err) { + _, err = conn.DescribeChannel(dcinput) + } if err != nil { return fmt.Errorf("error waiting for MediaPackage Channel (%s) deletion: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_store_container.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_store_container.go index ce76b6ad93e..3bbd17176bb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_store_container.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_store_container.go @@ -2,20 +2,22 @@ package aws import ( "fmt" + "log" "regexp" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/mediastore" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsMediaStoreContainer() *schema.Resource { return &schema.Resource{ Create: resourceAwsMediaStoreContainerCreate, Read: resourceAwsMediaStoreContainerRead, + Update: resourceAwsMediaStoreContainerUpdate, Delete: resourceAwsMediaStoreContainerDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -35,6 +37,7 @@ func resourceAwsMediaStoreContainer() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "tags": tagsSchema(), }, } } @@ -44,6 +47,7 @@ func resourceAwsMediaStoreContainerCreate(d *schema.ResourceData, meta interface input := &mediastore.CreateContainerInput{ ContainerName: aws.String(d.Get("name").(string)), + Tags: tagsFromMapMediaStore(d.Get("tags").(map[string]interface{})), } _, err := conn.CreateContainer(input) @@ -75,15 +79,45 @@ func resourceAwsMediaStoreContainerRead(d *schema.ResourceData, meta interface{} ContainerName: aws.String(d.Id()), } resp, err := conn.DescribeContainer(input) + if isAWSErr(err, mediastore.ErrCodeContainerNotFoundException, "") { + log.Printf("[WARN] No Container found: %s, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - return err + return fmt.Errorf("Error describing media store container %s: %s", d.Id(), err) } d.Set("arn", resp.Container.ARN) d.Set("name", resp.Container.Name) d.Set("endpoint", resp.Container.Endpoint) + + if err := saveTagsMediaStore(conn, d, aws.StringValue(resp.Container.ARN)); err != nil { + if isAWSErr(err, mediastore.ErrCodeContainerNotFoundException, "") { + log.Printf("[WARN] No Container found: %s, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error setting tags for %s: %s", d.Id(), err) + } + return nil } +func resourceAwsMediaStoreContainerUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).mediastoreconn + + if err := setTagsMediaStore(conn, d, d.Get("arn").(string)); err != nil { + if isAWSErr(err, mediastore.ErrCodeContainerNotFoundException, "") { + log.Printf("[WARN] No Container found: %s, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error updating tags for %s: %s", d.Id(), err) + } + + return resourceAwsMediaStoreContainerRead(d, meta) +} + func resourceAwsMediaStoreContainerDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).mediastoreconn @@ -98,10 +132,10 @@ func resourceAwsMediaStoreContainerDelete(d *schema.ResourceData, meta interface return err } + dcinput := &mediastore.DescribeContainerInput{ + ContainerName: aws.String(d.Id()), + } err = resource.Retry(5*time.Minute, func() *resource.RetryError { - dcinput := &mediastore.DescribeContainerInput{ - ContainerName: aws.String(d.Id()), - } _, err := conn.DescribeContainer(dcinput) if err != nil { if isAWSErr(err, mediastore.ErrCodeContainerNotFoundException, "") { @@ -111,6 +145,9 @@ func resourceAwsMediaStoreContainerDelete(d *schema.ResourceData, meta interface } return resource.RetryableError(fmt.Errorf("Media Store Container (%s) still exists", d.Id())) }) + if isResourceTimeoutError(err) { + _, err = conn.DescribeContainer(dcinput) + } if err != nil { return fmt.Errorf("error waiting for Media Store Container (%s) deletion: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_store_container_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_store_container_policy.go index b3469fe9c2e..9b370c07249 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_store_container_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_media_store_container_policy.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/mediastore" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsMediaStoreContainerPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_mq_broker.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_mq_broker.go index 704ad6a414d..d44c9f731f0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_mq_broker.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_mq_broker.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/mq" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/mitchellh/copystructure" ) @@ -65,6 +65,30 @@ func resourceAwsMqBroker() *schema.Resource { Default: "SINGLE_INSTANCE", ForceNew: true, }, + "encryption_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "use_aws_owned_key": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + }, + }, + }, "engine_type": { Type: schema.TypeString, Required: true, @@ -139,7 +163,6 @@ func resourceAwsMqBroker() *schema.Resource { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, Required: true, - ForceNew: true, }, "subnet_ids": { Type: schema.TypeSet, @@ -217,6 +240,7 @@ func resourceAwsMqBrokerCreate(d *schema.ResourceData, meta interface{}) error { AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), BrokerName: aws.String(name), CreatorRequestId: aws.String(requestId), + EncryptionOptions: expandMqEncryptionOptions(d.Get("encryption_options").([]interface{})), EngineType: aws.String(d.Get("engine_type").(string)), EngineVersion: aws.String(d.Get("engine_version").(string)), HostInstanceType: aws.String(d.Get("host_instance_type").(string)), @@ -304,6 +328,11 @@ func resourceAwsMqBrokerRead(d *schema.ResourceData, meta interface{}) error { d.Set("instances", flattenMqBrokerInstances(out.BrokerInstances)) d.Set("broker_name", out.BrokerName) d.Set("deployment_mode", out.DeploymentMode) + + if err := d.Set("encryption_options", flattenMqEncryptionOptions(out.EncryptionOptions)); err != nil { + return fmt.Errorf("error setting encryption_options: %s", err) + } + d.Set("engine_type", out.EngineType) d.Set("engine_version", out.EngineVersion) d.Set("host_instance_type", out.HostInstanceType) @@ -352,6 +381,18 @@ func resourceAwsMqBrokerRead(d *schema.ResourceData, meta interface{}) error { func resourceAwsMqBrokerUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).mqconn + requiresReboot := false + + if d.HasChange("security_groups") { + _, err := conn.UpdateBroker(&mq.UpdateBrokerRequest{ + BrokerId: aws.String(d.Id()), + SecurityGroups: expandStringSet(d.Get("security_groups").(*schema.Set)), + }) + if err != nil { + return fmt.Errorf("error updating MQ Broker (%s) security groups: %s", d.Id(), err) + } + } + if d.HasChange("configuration") || d.HasChange("logs") { _, err := conn.UpdateBroker(&mq.UpdateBrokerRequest{ BrokerId: aws.String(d.Id()), @@ -359,25 +400,34 @@ func resourceAwsMqBrokerUpdate(d *schema.ResourceData, meta interface{}) error { Logs: expandMqLogs(d.Get("logs").([]interface{})), }) if err != nil { - return err + return fmt.Errorf("error updating MQ Broker (%s) configuration: %s", d.Id(), err) } + requiresReboot = true } if d.HasChange("user") { o, n := d.GetChange("user") - err := updateAwsMqBrokerUsers(conn, d.Id(), + var err error + // d.HasChange("user") always reports a change when running resourceAwsMqBrokerUpdate + // updateAwsMqBrokerUsers needs to be called to know if changes to user are actually made + var usersUpdated bool + usersUpdated, err = updateAwsMqBrokerUsers(conn, d.Id(), o.(*schema.Set).List(), n.(*schema.Set).List()) if err != nil { - return err + return fmt.Errorf("error updating MQ Broker (%s) user: %s", d.Id(), err) + } + + if usersUpdated { + requiresReboot = true } } - if d.Get("apply_immediately").(bool) { + if d.Get("apply_immediately").(bool) && requiresReboot { _, err := conn.RebootBroker(&mq.RebootBrokerInput{ BrokerId: aws.String(d.Id()), }) if err != nil { - return err + return fmt.Errorf("error rebooting MQ Broker (%s): %s", d.Id(), err) } stateConf := resource.StateChangeConf{ @@ -472,32 +522,38 @@ func waitForMqBrokerDeletion(conn *mq.MQ, id string) error { return err } -func updateAwsMqBrokerUsers(conn *mq.MQ, bId string, oldUsers, newUsers []interface{}) error { +func updateAwsMqBrokerUsers(conn *mq.MQ, bId string, oldUsers, newUsers []interface{}) (bool, error) { + // If there are any user creates/deletes/updates, updatedUsers will be set to true + updatedUsers := false + createL, deleteL, updateL, err := diffAwsMqBrokerUsers(bId, oldUsers, newUsers) if err != nil { - return err + return updatedUsers, err } for _, c := range createL { _, err := conn.CreateUser(c) + updatedUsers = true if err != nil { - return err + return updatedUsers, err } } for _, d := range deleteL { _, err := conn.DeleteUser(d) + updatedUsers = true if err != nil { - return err + return updatedUsers, err } } for _, u := range updateL { _, err := conn.UpdateUser(u) + updatedUsers = true if err != nil { - return err + return updatedUsers, err } } - return nil + return updatedUsers, nil } func diffAwsMqBrokerUsers(bId string, oldUsers, newUsers []interface{}) ( @@ -579,6 +635,37 @@ func diffAwsMqBrokerUsers(bId string, oldUsers, newUsers []interface{}) ( return } +func expandMqEncryptionOptions(l []interface{}) *mq.EncryptionOptions { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + encryptionOptions := &mq.EncryptionOptions{ + UseAwsOwnedKey: aws.Bool(m["use_aws_owned_key"].(bool)), + } + + if v, ok := m["kms_key_id"].(string); ok && v != "" { + encryptionOptions.KmsKeyId = aws.String(v) + } + + return encryptionOptions +} + +func flattenMqEncryptionOptions(encryptionOptions *mq.EncryptionOptions) []interface{} { + if encryptionOptions == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "kms_key_id": aws.StringValue(encryptionOptions.KmsKeyId), + "use_aws_owned_key": aws.BoolValue(encryptionOptions.UseAwsOwnedKey), + } + + return []interface{}{m} +} + func validateMqBrokerPassword(v interface{}, k string) (ws []string, errors []error) { min := 12 max := 250 diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_mq_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_mq_configuration.go index 169faf0ff80..5721be52beb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_mq_configuration.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_mq_configuration.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/mq" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsMqConfiguration() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_msk_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_msk_cluster.go new file mode 100644 index 00000000000..bdf6e8f302a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_msk_cluster.go @@ -0,0 +1,686 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsMskCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsMskClusterCreate, + Read: resourceAwsMskClusterRead, + Update: resourceAwsMskClusterUpdate, + Delete: resourceAwsMskClusterDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_tls": { + Type: schema.TypeString, + Computed: true, + }, + "broker_node_group_info": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "az_distribution": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: kafka.BrokerAZDistributionDefault, + ValidateFunc: validation.StringInSlice([]string{ + kafka.BrokerAZDistributionDefault, + }, false), + }, + "client_subnets": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "security_groups": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "ebs_volume_size": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 16384), + }, + }, + }, + }, + "client_authentication": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tls": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_authority_arns": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + }, + }, + }, + }, + }, + }, + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, + "configuration_info": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "revision": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + "current_version": { + Type: schema.TypeString, + Computed: true, + }, + "encryption_info": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "encryption_at_rest_kms_key_arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "encryption_in_transit": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_broker": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: kafka.ClientBrokerTlsPlaintext, + ValidateFunc: validation.StringInSlice([]string{ + kafka.ClientBrokerPlaintext, + kafka.ClientBrokerTlsPlaintext, + kafka.ClientBrokerTls, + }, false), + }, + "in_cluster": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + }, + }, + }, + }, + }, + }, + "enhanced_monitoring": { + Type: schema.TypeString, + Optional: true, + Default: kafka.EnhancedMonitoringDefault, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + kafka.EnhancedMonitoringDefault, + kafka.EnhancedMonitoringPerBroker, + kafka.EnhancedMonitoringPerTopicPerBroker, + }, true), + }, + "kafka_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, + "number_of_broker_nodes": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "tags": tagsSchema(), + "zookeeper_connect_string": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsMskClusterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + input := &kafka.CreateClusterInput{ + BrokerNodeGroupInfo: expandMskClusterBrokerNodeGroupInfo(d.Get("broker_node_group_info").([]interface{})), + ClientAuthentication: expandMskClusterClientAuthentication(d.Get("client_authentication").([]interface{})), + ClusterName: aws.String(d.Get("cluster_name").(string)), + ConfigurationInfo: expandMskClusterConfigurationInfo(d.Get("configuration_info").([]interface{})), + EncryptionInfo: expandMskClusterEncryptionInfo(d.Get("encryption_info").([]interface{})), + EnhancedMonitoring: aws.String(d.Get("enhanced_monitoring").(string)), + KafkaVersion: aws.String(d.Get("kafka_version").(string)), + NumberOfBrokerNodes: aws.Int64(int64(d.Get("number_of_broker_nodes").(int))), + Tags: tagsFromMapMskCluster(d.Get("tags").(map[string]interface{})), + } + + out, err := conn.CreateCluster(input) + + if err != nil { + return fmt.Errorf("error creating MSK cluster: %s", err) + } + + d.SetId(aws.StringValue(out.ClusterArn)) + + log.Printf("[DEBUG] Waiting for MSK cluster %q to be created", d.Id()) + err = waitForMskClusterCreation(conn, d.Id()) + if err != nil { + return fmt.Errorf("error waiting for MSK cluster creation (%s): %s", d.Id(), err) + } + + return resourceAwsMskClusterRead(d, meta) +} + +func waitForMskClusterCreation(conn *kafka.Kafka, arn string) error { + input := &kafka.DescribeClusterInput{ + ClusterArn: aws.String(arn), + } + err := resource.Retry(60*time.Minute, func() *resource.RetryError { + out, err := conn.DescribeCluster(input) + if err != nil { + return resource.NonRetryableError(err) + } + if out.ClusterInfo != nil { + if aws.StringValue(out.ClusterInfo.State) == kafka.ClusterStateFailed { + return resource.NonRetryableError(fmt.Errorf("Cluster creation failed with cluster state %q", kafka.ClusterStateFailed)) + } + if aws.StringValue(out.ClusterInfo.State) == kafka.ClusterStateActive { + return nil + } + } + return resource.RetryableError(fmt.Errorf("%q: cluster still creating", arn)) + }) + if isResourceTimeoutError(err) { + out, err := conn.DescribeCluster(input) + if err != nil { + return fmt.Errorf("Error describing MSK cluster state: %s", err) + } + if out.ClusterInfo != nil { + if aws.StringValue(out.ClusterInfo.State) == kafka.ClusterStateFailed { + return fmt.Errorf("Cluster creation failed with cluster state %q", kafka.ClusterStateFailed) + } + if aws.StringValue(out.ClusterInfo.State) == kafka.ClusterStateActive { + return nil + } + } + } + if err != nil { + return fmt.Errorf("Error waiting for MSK cluster creation: %s", err) + } + return nil +} + +func resourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + out, err := conn.DescribeCluster(&kafka.DescribeClusterInput{ + ClusterArn: aws.String(d.Id()), + }) + if err != nil { + if isAWSErr(err, kafka.ErrCodeNotFoundException, "") { + log.Printf("[WARN] MSK Cluster (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("failed lookup cluster %s: %s", d.Id(), err) + } + + brokerOut, err := conn.GetBootstrapBrokers(&kafka.GetBootstrapBrokersInput{ + ClusterArn: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("failed requesting bootstrap broker info for %q : %s", d.Id(), err) + } + + cluster := out.ClusterInfo + + d.Set("arn", aws.StringValue(cluster.ClusterArn)) + d.Set("bootstrap_brokers", aws.StringValue(brokerOut.BootstrapBrokerString)) + d.Set("bootstrap_brokers_tls", aws.StringValue(brokerOut.BootstrapBrokerStringTls)) + + if err := d.Set("broker_node_group_info", flattenMskBrokerNodeGroupInfo(cluster.BrokerNodeGroupInfo)); err != nil { + return fmt.Errorf("error setting broker_node_group_info: %s", err) + } + + if err := d.Set("client_authentication", flattenMskClientAuthentication(cluster.ClientAuthentication)); err != nil { + return fmt.Errorf("error setting configuration_info: %s", err) + } + + d.Set("cluster_name", aws.StringValue(cluster.ClusterName)) + + if err := d.Set("configuration_info", flattenMskConfigurationInfo(cluster.CurrentBrokerSoftwareInfo)); err != nil { + return fmt.Errorf("error setting configuration_info: %s", err) + } + + d.Set("current_version", aws.StringValue(cluster.CurrentVersion)) + d.Set("enhanced_monitoring", aws.StringValue(cluster.EnhancedMonitoring)) + + if err := d.Set("encryption_info", flattenMskEncryptionInfo(cluster.EncryptionInfo)); err != nil { + return fmt.Errorf("error setting encryption_info: %s", err) + } + + d.Set("kafka_version", aws.StringValue(cluster.CurrentBrokerSoftwareInfo.KafkaVersion)) + d.Set("number_of_broker_nodes", aws.Int64Value(cluster.NumberOfBrokerNodes)) + + if err := d.Set("tags", tagsToMapMskCluster(cluster.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + d.Set("zookeeper_connect_string", aws.StringValue(cluster.ZookeeperConnectString)) + + return nil +} + +func resourceAwsMskClusterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + if d.HasChange("broker_node_group_info.0.ebs_volume_size") { + input := &kafka.UpdateBrokerStorageInput{ + ClusterArn: aws.String(d.Id()), + CurrentVersion: aws.String(d.Get("current_version").(string)), + TargetBrokerEBSVolumeInfo: []*kafka.BrokerEBSVolumeInfo{ + { + KafkaBrokerNodeId: aws.String("All"), + VolumeSizeGB: aws.Int64(int64(d.Get("broker_node_group_info.0.ebs_volume_size").(int))), + }, + }, + } + + output, err := conn.UpdateBrokerStorage(input) + + if err != nil { + return fmt.Errorf("error updating MSK Cluster (%s) broker storage: %s", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error updating MSK Cluster (%s) broker storage: empty response", d.Id()) + } + + clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + + if err := waitForMskClusterOperation(conn, clusterOperationARN); err != nil { + return fmt.Errorf("error waiting for MSK Cluster (%s) operation (%s): %s", d.Id(), clusterOperationARN, err) + } + } + + if d.HasChange("configuration_info") { + input := &kafka.UpdateClusterConfigurationInput{ + ClusterArn: aws.String(d.Id()), + ConfigurationInfo: expandMskClusterConfigurationInfo(d.Get("configuration_info").([]interface{})), + CurrentVersion: aws.String(d.Get("current_version").(string)), + } + + output, err := conn.UpdateClusterConfiguration(input) + + if err != nil { + return fmt.Errorf("error updating MSK Cluster (%s) configuration: %s", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error updating MSK Cluster (%s) configuration: empty response", d.Id()) + } + + clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + + if err := waitForMskClusterOperation(conn, clusterOperationARN); err != nil { + return fmt.Errorf("error waiting for MSK Cluster (%s) operation (%s): %s", d.Id(), clusterOperationARN, err) + } + } + + if d.HasChange("tags") { + if err := setTagsMskCluster(conn, d, d.Id()); err != nil { + return fmt.Errorf("failed updating tags for msk cluster %q: %s", d.Id(), err) + } + } + + return resourceAwsMskClusterRead(d, meta) + +} + +func expandMskClusterBrokerNodeGroupInfo(l []interface{}) *kafka.BrokerNodeGroupInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + bngi := &kafka.BrokerNodeGroupInfo{ + BrokerAZDistribution: aws.String(m["az_distribution"].(string)), + ClientSubnets: expandStringList(m["client_subnets"].([]interface{})), + InstanceType: aws.String(m["instance_type"].(string)), + SecurityGroups: expandStringList(m["security_groups"].([]interface{})), + StorageInfo: &kafka.StorageInfo{ + EbsStorageInfo: &kafka.EBSStorageInfo{ + VolumeSize: aws.Int64(int64(m["ebs_volume_size"].(int))), + }, + }, + } + + return bngi +} + +func expandMskClusterClientAuthentication(l []interface{}) *kafka.ClientAuthentication { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + ca := &kafka.ClientAuthentication{ + Tls: expandMskClusterTls(m["tls"].([]interface{})), + } + + return ca +} + +func expandMskClusterConfigurationInfo(l []interface{}) *kafka.ConfigurationInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + ci := &kafka.ConfigurationInfo{ + Arn: aws.String(m["arn"].(string)), + Revision: aws.Int64(int64(m["revision"].(int))), + } + + return ci +} + +func expandMskClusterEncryptionInfo(l []interface{}) *kafka.EncryptionInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + ei := &kafka.EncryptionInfo{ + EncryptionInTransit: expandMskClusterEncryptionInTransit(m["encryption_in_transit"].([]interface{})), + } + + if v, ok := m["encryption_at_rest_kms_key_arn"]; ok && v.(string) != "" { + ei.EncryptionAtRest = &kafka.EncryptionAtRest{ + DataVolumeKMSKeyId: aws.String(v.(string)), + } + } + + return ei +} + +func expandMskClusterEncryptionInTransit(l []interface{}) *kafka.EncryptionInTransit { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + eit := &kafka.EncryptionInTransit{ + ClientBroker: aws.String(m["client_broker"].(string)), + InCluster: aws.Bool(m["in_cluster"].(bool)), + } + + return eit +} + +func expandMskClusterTls(l []interface{}) *kafka.Tls { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + tls := &kafka.Tls{ + CertificateAuthorityArnList: expandStringSet(m["certificate_authority_arns"].(*schema.Set)), + } + + return tls +} + +func flattenMskBrokerNodeGroupInfo(b *kafka.BrokerNodeGroupInfo) []map[string]interface{} { + + if b == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "az_distribution": aws.StringValue(b.BrokerAZDistribution), + "client_subnets": flattenStringList(b.ClientSubnets), + "instance_type": aws.StringValue(b.InstanceType), + "security_groups": flattenStringList(b.SecurityGroups), + } + if b.StorageInfo != nil { + if b.StorageInfo.EbsStorageInfo != nil { + m["ebs_volume_size"] = int(aws.Int64Value(b.StorageInfo.EbsStorageInfo.VolumeSize)) + } + } + return []map[string]interface{}{m} +} + +func flattenMskClientAuthentication(ca *kafka.ClientAuthentication) []map[string]interface{} { + if ca == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "tls": flattenMskTls(ca.Tls), + } + + return []map[string]interface{}{m} +} + +func flattenMskConfigurationInfo(bsi *kafka.BrokerSoftwareInfo) []map[string]interface{} { + if bsi == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "arn": aws.StringValue(bsi.ConfigurationArn), + "revision": aws.Int64Value(bsi.ConfigurationRevision), + } + + return []map[string]interface{}{m} +} + +func flattenMskEncryptionInfo(e *kafka.EncryptionInfo) []map[string]interface{} { + if e == nil || e.EncryptionAtRest == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "encryption_at_rest_kms_key_arn": aws.StringValue(e.EncryptionAtRest.DataVolumeKMSKeyId), + "encryption_in_transit": flattenMskEncryptionInTransit(e.EncryptionInTransit), + } + + return []map[string]interface{}{m} +} + +func flattenMskEncryptionInTransit(eit *kafka.EncryptionInTransit) []map[string]interface{} { + if eit == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "client_broker": aws.StringValue(eit.ClientBroker), + "in_cluster": aws.BoolValue(eit.InCluster), + } + + return []map[string]interface{}{m} +} + +func flattenMskTls(tls *kafka.Tls) []map[string]interface{} { + if tls == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "certificate_authority_arns": aws.StringValueSlice(tls.CertificateAuthorityArnList), + } + + return []map[string]interface{}{m} +} + +func resourceAwsMskClusterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + log.Printf("[DEBUG] Deleting MSK cluster: %q", d.Id()) + _, err := conn.DeleteCluster(&kafka.DeleteClusterInput{ + ClusterArn: aws.String(d.Id()), + }) + if err != nil { + if isAWSErr(err, kafka.ErrCodeNotFoundException, "") { + return nil + } + return fmt.Errorf("failed deleting MSK cluster %q: %s", d.Id(), err) + } + + log.Printf("[DEBUG] Waiting for MSK cluster %q to be deleted", d.Id()) + + return resourceAwsMskClusterDeleteWaiter(conn, d.Id()) +} + +func resourceAwsMskClusterDeleteWaiter(conn *kafka.Kafka, arn string) error { + input := &kafka.DescribeClusterInput{ + ClusterArn: aws.String(arn), + } + err := resource.Retry(60*time.Minute, func() *resource.RetryError { + _, err := conn.DescribeCluster(input) + + if err != nil { + if isAWSErr(err, kafka.ErrCodeNotFoundException, "") { + return nil + } + return resource.NonRetryableError(err) + } + + return resource.RetryableError(fmt.Errorf("timeout while waiting for the cluster %q to be deleted", arn)) + }) + if isResourceTimeoutError(err) { + _, err = conn.DescribeCluster(input) + if isAWSErr(err, kafka.ErrCodeNotFoundException, "") { + return nil + } + } + if err != nil { + return fmt.Errorf("Error waiting for MSK cluster to be deleted: %s", err) + } + return nil +} + +func mskClusterOperationRefreshFunc(conn *kafka.Kafka, clusterOperationARN string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &kafka.DescribeClusterOperationInput{ + ClusterOperationArn: aws.String(clusterOperationARN), + } + + output, err := conn.DescribeClusterOperation(input) + + if err != nil { + return nil, "UPDATE_FAILED", fmt.Errorf("error describing MSK Cluster Operation (%s): %s", clusterOperationARN, err) + } + + if output == nil || output.ClusterOperationInfo == nil { + return nil, "UPDATE_FAILED", fmt.Errorf("error describing MSK Cluster Operation (%s): empty response", clusterOperationARN) + } + + state := aws.StringValue(output.ClusterOperationInfo.OperationState) + + if state == "UPDATE_FAILED" && output.ClusterOperationInfo.ErrorInfo != nil { + errorInfo := output.ClusterOperationInfo.ErrorInfo + err := fmt.Errorf("error code: %s, error string: %s", aws.StringValue(errorInfo.ErrorCode), aws.StringValue(errorInfo.ErrorString)) + return output.ClusterOperationInfo, state, err + } + + return output.ClusterOperationInfo, state, nil + } +} + +func waitForMskClusterOperation(conn *kafka.Kafka, clusterOperationARN string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING", "UPDATE_IN_PROGRESS"}, + Target: []string{"UPDATE_COMPLETE"}, + Refresh: mskClusterOperationRefreshFunc(conn, clusterOperationARN), + Timeout: 60 * time.Minute, + } + + log.Printf("[DEBUG] Waiting for MSK Cluster Operation (%s) completion", clusterOperationARN) + _, err := stateConf.WaitForState() + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_msk_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_msk_configuration.go new file mode 100644 index 00000000000..862d4befa15 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_msk_configuration.go @@ -0,0 +1,137 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsMskConfiguration() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsMskConfigurationCreate, + Read: resourceAwsMskConfigurationRead, + Delete: schema.Noop, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "kafka_versions": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "latest_revision": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "server_properties": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsMskConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + input := &kafka.CreateConfigurationInput{ + KafkaVersions: expandStringSet(d.Get("kafka_versions").(*schema.Set)), + Name: aws.String(d.Get("name").(string)), + ServerProperties: []byte(d.Get("server_properties").(string)), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + output, err := conn.CreateConfiguration(input) + + if err != nil { + return fmt.Errorf("error creating MSK Configuration: %s", err) + } + + d.SetId(aws.StringValue(output.Arn)) + + return resourceAwsMskConfigurationRead(d, meta) +} + +func resourceAwsMskConfigurationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + configurationInput := &kafka.DescribeConfigurationInput{ + Arn: aws.String(d.Id()), + } + + configurationOutput, err := conn.DescribeConfiguration(configurationInput) + + if isAWSErr(err, kafka.ErrCodeNotFoundException, "") { + log.Printf("[WARN] MSK Configuration (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error describing MSK Configuration (%s): %s", d.Id(), err) + } + + if configurationOutput == nil { + return fmt.Errorf("error describing MSK Configuration (%s): missing result", d.Id()) + } + + if configurationOutput.LatestRevision == nil { + return fmt.Errorf("error describing MSK Configuration (%s): missing latest revision", d.Id()) + } + + revision := configurationOutput.LatestRevision.Revision + revisionInput := &kafka.DescribeConfigurationRevisionInput{ + Arn: aws.String(d.Id()), + Revision: revision, + } + + revisionOutput, err := conn.DescribeConfigurationRevision(revisionInput) + + if err != nil { + return fmt.Errorf("error describing MSK Configuration (%s) Revision (%d): %s", d.Id(), aws.Int64Value(revision), err) + } + + if revisionOutput == nil { + return fmt.Errorf("error describing MSK Configuration (%s) Revision (%d): missing result", d.Id(), aws.Int64Value(revision)) + } + + d.Set("arn", aws.StringValue(configurationOutput.Arn)) + d.Set("description", aws.StringValue(configurationOutput.Description)) + + if err := d.Set("kafka_versions", aws.StringValueSlice(configurationOutput.KafkaVersions)); err != nil { + return fmt.Errorf("error setting kafka_versions: %s", err) + } + + d.Set("latest_revision", aws.Int64Value(revision)) + d.Set("name", aws.StringValue(configurationOutput.Name)) + d.Set("server_properties", string(revisionOutput.ServerProperties)) + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_nat_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_nat_gateway.go index d227ca2f917..13457610685 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_nat_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_nat_gateway.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNatGateway() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster.go index 792bf867fc1..b2a110fa14f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/neptune" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsNeptuneCluster() *schema.Resource { @@ -350,6 +350,13 @@ func resourceAwsNeptuneClusterCreate(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + if restoreDBClusterFromSnapshot { + _, err = conn.RestoreDBClusterFromSnapshot(restoreDBClusterFromSnapshotInput) + } else { + _, err = conn.CreateDBCluster(createDbClusterInput) + } + } if err != nil { return fmt.Errorf("error creating Neptune Cluster: %s", err) } @@ -542,6 +549,9 @@ func resourceAwsNeptuneClusterUpdate(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.ModifyDBCluster(req) + } if err != nil { return fmt.Errorf("Failed to modify Neptune Cluster (%s): %s", d.Id(), err) } @@ -636,6 +646,9 @@ func resourceAwsNeptuneClusterDelete(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteDBCluster(&deleteOpts) + } if err != nil { return fmt.Errorf("Neptune Cluster cannot be deleted: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_instance.go index 6a44583033e..3e655d6b28c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_instance.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/neptune" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNeptuneClusterInstance() *schema.Resource { @@ -247,6 +247,9 @@ func resourceAwsNeptuneClusterInstanceCreate(d *schema.ResourceData, meta interf } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateDBInstance(createOpts) + } if err != nil { return fmt.Errorf("error creating Neptune Instance: %s", err) } @@ -407,6 +410,9 @@ func resourceAwsNeptuneClusterInstanceUpdate(d *schema.ResourceData, meta interf } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.ModifyDBInstance(req) + } if err != nil { return fmt.Errorf("Error modifying Neptune Instance %s: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_parameter_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_parameter_group.go index 3f31f345a13..29615405ebc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_parameter_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_parameter_group.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/neptune" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) const neptuneClusterParameterGroupMaxParamsBulkEdit = 20 diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_snapshot.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_snapshot.go index cb06484d3e9..41b4a7ffcce 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_snapshot.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_cluster_snapshot.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/neptune" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNeptuneClusterSnapshot() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_event_subscription.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_event_subscription.go index 7e2e1194087..92bf9153bc7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_event_subscription.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_event_subscription.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/neptune" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNeptuneEventSubscription() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_parameter_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_parameter_group.go index 05bc64b556f..c09111aef01 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_parameter_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_parameter_group.go @@ -6,9 +6,9 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/neptune" @@ -229,6 +229,9 @@ func resourceAwsNeptuneParameterGroupUpdate(d *schema.ResourceData, meta interfa } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.ResetDBParameterGroup(&resetOpts) + } if err != nil { return fmt.Errorf("Error resetting Neptune Parameter Group: %s", err) } @@ -272,10 +275,10 @@ func resourceAwsNeptuneParameterGroupUpdate(d *schema.ResourceData, meta interfa func resourceAwsNeptuneParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).neptuneconn - return resource.Retry(3*time.Minute, func() *resource.RetryError { - deleteOpts := neptune.DeleteDBParameterGroupInput{ - DBParameterGroupName: aws.String(d.Id()), - } + deleteOpts := neptune.DeleteDBParameterGroupInput{ + DBParameterGroupName: aws.String(d.Id()), + } + err := resource.Retry(3*time.Minute, func() *resource.RetryError { _, err := conn.DeleteDBParameterGroup(&deleteOpts) if err != nil { if isAWSErr(err, neptune.ErrCodeDBParameterGroupNotFoundFault, "") { @@ -288,4 +291,18 @@ func resourceAwsNeptuneParameterGroupDelete(d *schema.ResourceData, meta interfa } return nil }) + + if isResourceTimeoutError(err) { + _, err = conn.DeleteDBParameterGroup(&deleteOpts) + } + + if isAWSErr(err, neptune.ErrCodeDBParameterGroupNotFoundFault, "") { + return nil + } + + if err != nil { + return fmt.Errorf("Error deleting Neptune Parameter Group: %s", err) + } + + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_subnet_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_subnet_group.go index 9d66476999f..4867896bd64 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_subnet_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_neptune_subnet_group.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/neptune" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNeptuneSubnetGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl.go index 0b95518ef29..deb7f70e0ed 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl.go @@ -12,9 +12,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNetworkAcl() *schema.Resource { @@ -33,13 +33,11 @@ func resourceAwsNetworkAcl() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Computed: false, }, "subnet_id": { Type: schema.TypeString, Optional: true, ForceNew: true, - Computed: false, Removed: "Use `subnet_ids` argument instead", }, "subnet_ids": { @@ -51,7 +49,6 @@ func resourceAwsNetworkAcl() *schema.Resource { }, "ingress": { Type: schema.TypeSet, - Required: false, Optional: true, Computed: true, ConfigMode: schema.SchemaConfigModeAttr, @@ -102,7 +99,6 @@ func resourceAwsNetworkAcl() *schema.Resource { }, "egress": { Type: schema.TypeSet, - Required: false, Optional: true, Computed: true, ConfigMode: schema.SchemaConfigModeAttr, @@ -441,79 +437,100 @@ func resourceAwsNetworkAclDelete(d *schema.ResourceData, meta interface{}) error conn := meta.(*AWSClient).ec2conn log.Printf("[INFO] Deleting Network Acl: %s", d.Id()) - retryErr := resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteNetworkAcl(&ec2.DeleteNetworkAclInput{ - NetworkAclId: aws.String(d.Id()), - }) + input := &ec2.DeleteNetworkAclInput{ + NetworkAclId: aws.String(d.Id()), + } + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteNetworkAcl(input) if err != nil { - ec2err := err.(awserr.Error) - switch ec2err.Code() { - case "InvalidNetworkAclID.NotFound": + if isAWSErr(err, "InvalidNetworkAclID.NotFound", "") { return nil - case "DependencyViolation": - // In case of dependency violation, we remove the association between subnet and network acl. - // This means the subnet is attached to default acl of vpc. - var associations []*ec2.NetworkAclAssociation - if v, ok := d.GetOk("subnet_ids"); ok { - ids := v.(*schema.Set).List() - for _, i := range ids { - a, err := findNetworkAclAssociation(i.(string), conn) - if err != nil { - if isResourceNotFoundError(err) { - continue - } - return resource.NonRetryableError(err) - } - associations = append(associations, a) - } - } - - log.Printf("[DEBUG] Replacing network associations for Network ACL (%s): %s", d.Id(), associations) - defaultAcl, err := getDefaultNetworkAcl(d.Get("vpc_id").(string), conn) + } + if isAWSErr(err, "DependencyViolation", "") { + err = cleanUpDependencyViolations(d, conn) if err != nil { return resource.NonRetryableError(err) } - - for _, a := range associations { - log.Printf("DEBUG] Replacing Network Acl Association (%s) with Default Network ACL ID (%s)", *a.NetworkAclAssociationId, *defaultAcl.NetworkAclId) - _, replaceErr := conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ - AssociationId: a.NetworkAclAssociationId, - NetworkAclId: defaultAcl.NetworkAclId, - }) - if replaceErr != nil { - if replaceEc2err, ok := replaceErr.(awserr.Error); ok { - // It's possible that during an attempt to replace this - // association, the Subnet in question has already been moved to - // another ACL. This can happen if you're destroying a network acl - // and simultaneously re-associating it's subnet(s) with another - // ACL; Terraform may have already re-associated the subnet(s) by - // the time we attempt to destroy them, even between the time we - // list them and then try to destroy them. In this case, the - // association we're trying to replace will no longer exist and - // this call will fail. Here we trap that error and fail - // gracefully; the association we tried to replace gone, we trust - // someone else has taken ownership. - if replaceEc2err.Code() == "InvalidAssociationID.NotFound" { - log.Printf("[WARN] Network Association (%s) no longer found; Network Association likely updated or removed externally, removing from state", *a.NetworkAclAssociationId) - continue - } - } - log.Printf("[ERR] Non retry-able error in replacing associations for Network ACL (%s): %s", d.Id(), replaceErr) - return resource.NonRetryableError(replaceErr) - } - } return resource.RetryableError(fmt.Errorf("Dependencies found and cleaned up, retrying")) - default: - // Any other error, we want to quit the retry loop immediately - return resource.NonRetryableError(err) } + + return resource.NonRetryableError(err) + } log.Printf("[Info] Deleted network ACL %s successfully", d.Id()) return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteNetworkAcl(input) + if err != nil && isAWSErr(err, "InvalidNetworkAclID.NotFound", "") { + return nil + } + err = cleanUpDependencyViolations(d, conn) + if err != nil { + // This seems excessive but is probably the best way to make sure it's actually deleted + _, err = conn.DeleteNetworkAcl(input) + if err != nil && isAWSErr(err, "InvalidNetworkAclID.NotFound", "") { + return nil + } + } + } + if err != nil { + return fmt.Errorf("Error destroying Network ACL (%s): %s", d.Id(), err) + } + return nil +} - if retryErr != nil { - return fmt.Errorf("Error destroying Network ACL (%s): %s", d.Id(), retryErr) +func cleanUpDependencyViolations(d *schema.ResourceData, conn *ec2.EC2) error { + // In case of dependency violation, we remove the association between subnet and network acl. + // This means the subnet is attached to default acl of vpc. + var associations []*ec2.NetworkAclAssociation + if v, ok := d.GetOk("subnet_ids"); ok { + ids := v.(*schema.Set).List() + for _, i := range ids { + a, err := findNetworkAclAssociation(i.(string), conn) + if err != nil { + if isResourceNotFoundError(err) { + continue + } + return fmt.Errorf("Error finding network ACL association: %s", err) + } + associations = append(associations, a) + } + } + + log.Printf("[DEBUG] Replacing network associations for Network ACL (%s): %s", d.Id(), associations) + defaultAcl, err := getDefaultNetworkAcl(d.Get("vpc_id").(string), conn) + if err != nil { + return fmt.Errorf("Error getting default network ACL: %s", err) + } + + for _, a := range associations { + log.Printf("DEBUG] Replacing Network Acl Association (%s) with Default Network ACL ID (%s)", *a.NetworkAclAssociationId, *defaultAcl.NetworkAclId) + _, replaceErr := conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: a.NetworkAclAssociationId, + NetworkAclId: defaultAcl.NetworkAclId, + }) + if replaceErr != nil { + if replaceEc2err, ok := replaceErr.(awserr.Error); ok { + // It's possible that during an attempt to replace this + // association, the Subnet in question has already been moved to + // another ACL. This can happen if you're destroying a network acl + // and simultaneously re-associating it's subnet(s) with another + // ACL; Terraform may have already re-associated the subnet(s) by + // the time we attempt to destroy them, even between the time we + // list them and then try to destroy them. In this case, the + // association we're trying to replace will no longer exist and + // this call will fail. Here we trap that error and fail + // gracefully; the association we tried to replace gone, we trust + // someone else has taken ownership. + if replaceEc2err.Code() == "InvalidAssociationID.NotFound" { + log.Printf("[WARN] Network Association (%s) no longer found; Network Association likely updated or removed externally, removing from state", *a.NetworkAclAssociationId) + continue + } + } + log.Printf("[ERR] Non retry-able error in replacing associations for Network ACL (%s): %s", d.Id(), replaceErr) + return fmt.Errorf("Error replacing network ACL associations: %s", err) + } } return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl_rule.go index ac4c4a422a8..6b5d0d897a4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl_rule.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNetworkAclRule() *schema.Resource { @@ -169,18 +169,24 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e // It appears it might be a while until the newly created rule is visible via the // API (see issue GH-4721). Retry the `findNetworkAclRule` function until it is // visible (which in most cases is likely immediately). + var r *ec2.NetworkAclEntry err = resource.Retry(3*time.Minute, func() *resource.RetryError { - r, findErr := findNetworkAclRule(d, meta) - if findErr != nil { - return resource.RetryableError(findErr) + r, err = findNetworkAclRule(d, meta) + if err != nil { + return resource.RetryableError(err) } if r == nil { - err := fmt.Errorf("Network ACL rule (%s) not found", d.Id()) - return resource.RetryableError(err) + return resource.RetryableError(fmt.Errorf("Network ACL rule (%s) not found", d.Id())) } return nil }) + if isResourceTimeoutError(err) { + r, err = findNetworkAclRule(d, meta) + if r == nil { + return fmt.Errorf("Network ACL rule (%s) not found", d.Id()) + } + } if err != nil { return fmt.Errorf("Created Network ACL Rule was not visible in API within 3 minute period. Running 'terraform apply' again will resume infrastructure creation.") } @@ -269,6 +275,11 @@ func findNetworkAclRule(d *schema.ResourceData, meta interface{}) (*ec2.NetworkA log.Printf("[INFO] Describing Network Acl: %s", d.Get("network_acl_id").(string)) log.Printf("[INFO] Describing Network Acl with the Filters %#v", params) resp, err := conn.DescribeNetworkAcls(params) + + if isAWSErr(err, "InvalidNetworkAclID.NotFound", "") { + return nil, nil + } + if err != nil { return nil, fmt.Errorf("Error Finding Network Acl Rule %d: %s", d.Get("rule_number").(int), err.Error()) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface.go index 81a08c6766d..cdd6807b224 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface.go @@ -11,9 +11,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNetworkInterface() *schema.Resource { @@ -444,3 +444,134 @@ func resourceAwsEniAttachmentHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%d-", m["device_index"].(int))) return hashcode.String(buf.String()) } + +func deleteNetworkInterface(conn *ec2.EC2, eniId string) error { + _, err := conn.DeleteNetworkInterface(&ec2.DeleteNetworkInterfaceInput{ + NetworkInterfaceId: aws.String(eniId), + }) + + if isAWSErr(err, "InvalidNetworkInterfaceID.NotFound", "") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting ENI (%s): %s", eniId, err) + } + + return nil +} + +func detachNetworkInterface(conn *ec2.EC2, eni *ec2.NetworkInterface, timeout time.Duration) error { + if eni == nil { + return nil + } + + eniId := aws.StringValue(eni.NetworkInterfaceId) + if eni.Attachment == nil { + log.Printf("[DEBUG] ENI %s is already detached", eniId) + return nil + } + + _, err := conn.DetachNetworkInterface(&ec2.DetachNetworkInterfaceInput{ + AttachmentId: eni.Attachment.AttachmentId, + Force: aws.Bool(true), + }) + + if isAWSErr(err, "InvalidAttachmentID.NotFound", "") { + return nil + } + + if err != nil { + return fmt.Errorf("error detaching ENI (%s): %s", eniId, err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{ + ec2.AttachmentStatusAttaching, + ec2.AttachmentStatusAttached, + ec2.AttachmentStatusDetaching, + }, + Target: []string{ + ec2.AttachmentStatusDetached, + }, + Refresh: networkInterfaceAttachmentStateRefresh(conn, eniId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + NotFoundChecks: 1, + } + + log.Printf("[DEBUG] Waiting for ENI (%s) to become detached", eniId) + _, err = stateConf.WaitForState() + + if isResourceNotFoundError(err) { + return nil + } + + if err != nil { + return fmt.Errorf("error waiting for ENI (%s) to become detached: %s", eniId, err) + } + + return nil +} + +func networkInterfaceAttachmentStateRefresh(conn *ec2.EC2, eniId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeNetworkInterfaces(&ec2.DescribeNetworkInterfacesInput{ + NetworkInterfaceIds: aws.StringSlice([]string{eniId}), + }) + + if isAWSErr(err, "InvalidNetworkInterfaceID.NotFound", "") { + return nil, ec2.AttachmentStatusDetached, nil + } + + if err != nil { + return nil, "", fmt.Errorf("error describing ENI (%s): %s", eniId, err) + } + + n := len(resp.NetworkInterfaces) + switch n { + case 0: + return nil, ec2.AttachmentStatusDetached, nil + + case 1: + attachment := resp.NetworkInterfaces[0].Attachment + if attachment == nil { + return nil, ec2.AttachmentStatusDetached, nil + } + return attachment, aws.StringValue(attachment.Status), nil + + default: + return nil, "", fmt.Errorf("found %d ENIs for %s, expected 1", n, eniId) + } + } +} + +func networkInterfaceStateRefresh(conn *ec2.EC2, eniId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeNetworkInterfaces(&ec2.DescribeNetworkInterfacesInput{ + NetworkInterfaceIds: aws.StringSlice([]string{eniId}), + }) + + if isAWSErr(err, "InvalidNetworkInterfaceID.NotFound", "") { + return nil, "", nil + } + + if err != nil { + return nil, "", fmt.Errorf("error describing ENI (%s): %s", eniId, err) + } + + n := len(resp.NetworkInterfaces) + switch n { + case 0: + return nil, "", nil + + case 1: + eni := resp.NetworkInterfaces[0] + return eni, aws.StringValue(eni.Status), nil + + default: + return nil, "", fmt.Errorf("found %d ENIs for %s, expected 1", n, eniId) + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_attachment.go index 684a407f795..6fa31fa96cc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_attachment.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNetworkInterfaceAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_sg_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_sg_attachment.go index 1ac6476975e..9b6c3ac93eb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_sg_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_sg_attachment.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsNetworkInterfaceSGAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_application.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_application.go index 224bde4ba1e..d1dad14242f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_application.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_application.go @@ -4,14 +4,12 @@ import ( "fmt" "log" "strings" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsOpsworksApplication() *schema.Resource { @@ -312,24 +310,9 @@ func resourceAwsOpsworksApplicationCreate(d *schema.ResourceData, meta interface Attributes: resourceAwsOpsworksApplicationAttributes(d), } - var resp *opsworks.CreateAppOutput - err = resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - resp, cerr = client.CreateApp(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - // XXX: handle errors - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - return resource.RetryableError(cerr) - } - return resource.NonRetryableError(cerr) - } - return nil - }) - + resp, err := client.CreateApp(req) if err != nil { - return err + return fmt.Errorf("Error creating OpsWorks application: %s", err) } appID := *resp.AppId @@ -362,23 +345,11 @@ func resourceAwsOpsworksApplicationUpdate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] Updating OpsWorks layer: %s", d.Id()) - err = resource.Retry(2*time.Minute, func() *resource.RetryError { - _, cerr := client.UpdateApp(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - // XXX: handle errors - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - return resource.NonRetryableError(cerr) - } - return resource.RetryableError(cerr) - } - return nil - }) - + _, err = client.UpdateApp(req) if err != nil { - return err + return fmt.Errorf("Error updating OpsWorks app: %s", err) } + return resourceAwsOpsworksApplicationRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_custom_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_custom_layer.go index 59de60db6a3..8fa04b9f4f9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_custom_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_custom_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksCustomLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_ganglia_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_ganglia_layer.go index 1aadefe5dda..071164fec9a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_ganglia_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_ganglia_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksGangliaLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_haproxy_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_haproxy_layer.go index 91e843257ce..1aa77b30df8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_haproxy_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_haproxy_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksHaproxyLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_instance.go index f89bec61b24..d8b3c9737a1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_instance.go @@ -6,10 +6,10 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_java_app_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_java_app_layer.go index 14679658f33..57db8e731de 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_java_app_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_java_app_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksJavaAppLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_memcached_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_memcached_layer.go index 301d739240e..4ac2ff4c0aa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_memcached_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_memcached_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksMemcachedLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_mysql_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_mysql_layer.go index 560641a4e38..b2076438e4b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_mysql_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_mysql_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksMysqlLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_nodejs_app_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_nodejs_app_layer.go index d11261b6334..d790a92a2bb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_nodejs_app_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_nodejs_app_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksNodejsAppLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_permission.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_permission.go index 22d99838664..fa61d783229 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_permission.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_permission.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsOpsworksPermission() *schema.Resource { @@ -121,20 +121,21 @@ func resourceAwsOpsworksSetPermission(d *schema.ResourceData, meta interface{}) } err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - _, cerr = client.SetPermission(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - // XXX: handle errors - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - return resource.RetryableError(cerr) + _, err := client.SetPermission(req) + if err != nil { + + if isAWSErr(err, opsworks.ErrCodeResourceNotFoundException, "Unable to find user with ARN") { + return resource.RetryableError(err) } - return resource.NonRetryableError(cerr) + return resource.NonRetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + _, err = client.SetPermission(req) + } + if err != nil { return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_php_app_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_php_app_layer.go index c3176af5bd3..a578adbe41b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_php_app_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_php_app_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksPhpAppLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rails_app_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rails_app_layer.go index 55f869c6ddc..cc9d424bb31 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rails_app_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rails_app_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksRailsAppLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rds_db_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rds_db_instance.go index 27933eef926..14ca6aebd3a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rds_db_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rds_db_instance.go @@ -3,13 +3,10 @@ package aws import ( "fmt" "log" - "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksRdsDbInstance() *schema.Resource { @@ -68,23 +65,10 @@ func resourceAwsOpsworksRdsDbInstanceUpdate(d *schema.ResourceData, meta interfa if requestUpdate { log.Printf("[DEBUG] Opsworks RDS DB Instance Modification request: %s", req) - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - _, cerr = client.UpdateRdsDbInstance(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - } - return resource.NonRetryableError(cerr) - } - return nil - }) - + _, err := client.UpdateRdsDbInstance(req) if err != nil { - return err + return fmt.Errorf("Error updating Opsworks RDS DB instance: %s", err) } - } d.Partial(false) @@ -101,27 +85,17 @@ func resourceAwsOpsworksRdsDbInstanceDeregister(d *schema.ResourceData, meta int log.Printf("[DEBUG] Unregistering rds db instance '%s' from stack: %s", d.Get("rds_db_instance_arn"), d.Get("stack_id")) - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - _, cerr = client.DeregisterRdsDbInstance(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - if opserr.Code() == "ResourceNotFoundException" { - log.Printf("[INFO] The db instance could not be found. Remove it from state.") - d.SetId("") - - return nil - } - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - } - return resource.NonRetryableError(cerr) + _, err := client.DeregisterRdsDbInstance(req) + if err != nil { + if isAWSErr(err, "ResourceNotFoundException", "") { + log.Printf("[INFO] The db instance could not be found. Remove it from state.") + d.SetId("") + return nil } + return fmt.Errorf("Error deregistering Opsworks RDS DB instance: %s", err) + } - return nil - }) - - return err + return nil } func resourceAwsOpsworksRdsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { @@ -171,22 +145,9 @@ func resourceAwsOpsworksRdsDbInstanceRegister(d *schema.ResourceData, meta inter DbPassword: aws.String(d.Get("db_password").(string)), } - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - _, cerr = client.RegisterRdsDbInstance(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - } - return resource.NonRetryableError(cerr) - } - - return nil - }) - + _, err := client.RegisterRdsDbInstance(req) if err != nil { - return err + return fmt.Errorf("Error registering Opsworks RDS DB instance: %s", err) } return resourceAwsOpsworksRdsDbInstanceRead(d, meta) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_stack.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_stack.go index ce0a44beb63..afa406828e7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_stack.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_stack.go @@ -3,12 +3,10 @@ package aws import ( "fmt" "log" - "strings" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" @@ -388,7 +386,7 @@ func opsworksConnForRegion(region string, meta interface{}) (*opsworks.OpsWorks, return nil, fmt.Errorf("Error creating AWS session: %s", err) } - sess.Handlers.Build.PushBack(request.MakeAddToUserAgentHandler("APN/1.0 HashiCorp/1.0 Terraform", terraform.VersionString())) + sess.Handlers.Build.PushBack(request.MakeAddToUserAgentHandler("APN/1.0 HashiCorp/1.0 Terraform", meta.(*AWSClient).terraformVersion)) newSession := sess.Copy(&aws.Config{Region: aws.String(region)}) newOpsworksconn := opsworks.New(newSession) @@ -436,33 +434,31 @@ func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) er var resp *opsworks.CreateStackOutput err = resource.Retry(20*time.Minute, func() *resource.RetryError { - var cerr error - resp, cerr = client.CreateStack(req) - if cerr != nil { - if opserr, ok := cerr.(awserr.Error); ok { - // If Terraform is also managing the service IAM role, - // it may have just been created and not yet be - // propagated. - // AWS doesn't provide a machine-readable code for this - // specific error, so we're forced to do fragile message - // matching. - // The full error we're looking for looks something like - // the following: - // Service Role Arn: [...] is not yet propagated, please try again in a couple of minutes - propErr := "not yet propagated" - trustErr := "not the necessary trust relationship" - validateErr := "validate IAM role permission" - if opserr.Code() == "ValidationException" && (strings.Contains(opserr.Message(), trustErr) || strings.Contains(opserr.Message(), propErr) || strings.Contains(opserr.Message(), validateErr)) { - log.Printf("[INFO] Waiting for service IAM role to propagate") - return resource.RetryableError(cerr) - } + resp, err = client.CreateStack(req) + if err != nil { + // If Terraform is also managing the service IAM role, it may have just been created and not yet be + // propagated. AWS doesn't provide a machine-readable code for this specific error, so we're forced + // to do fragile message matching. + // The full error we're looking for looks something like the following: + // Service Role Arn: [...] is not yet propagated, please try again in a couple of minutes + propErr := "not yet propagated" + trustErr := "not the necessary trust relationship" + validateErr := "validate IAM role permission" + + if isAWSErr(err, "ValidationException", propErr) || isAWSErr(err, "ValidationException", trustErr) || isAWSErr(err, "ValidationException", validateErr) { + log.Printf("[INFO] Waiting for service IAM role to propagate") + return resource.RetryableError(err) } - return resource.NonRetryableError(cerr) + + return resource.NonRetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + resp, err = client.CreateStack(req) + } if err != nil { - return err + return fmt.Errorf("Error creating Opsworks stack: %s", err) } stackId := *resp.StackId diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_static_web_layer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_static_web_layer.go index df91b1b1b40..baee9feba02 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_static_web_layer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_static_web_layer.go @@ -1,7 +1,7 @@ package aws import ( - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOpsworksStaticWebLayer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_user_profile.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_user_profile.go index ce24f5b2fdc..8ed47148074 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_user_profile.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_user_profile.go @@ -3,7 +3,7 @@ package aws import ( "log" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_account.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_account.go index 99bd7dae8fc..fcc3b8a4f86 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_account.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_account.go @@ -8,15 +8,16 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/organizations" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsOrganizationsAccount() *schema.Resource { return &schema.Resource{ Create: resourceAwsOrganizationsAccountCreate, Read: resourceAwsOrganizationsAccountRead, + Update: resourceAwsOrganizationsAccountUpdate, Delete: resourceAwsOrganizationsAccountDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -35,6 +36,12 @@ func resourceAwsOrganizationsAccount() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "parent_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile("^(r-[0-9a-z]{4,32})|(ou-[0-9a-z]{4,32}-[a-z0-9]{8,32})$"), "see https://docs.aws.amazon.com/organizations/latest/APIReference/API_MoveAccount.html#organizations-MoveAccount-request-DestinationParentId"), + }, "status": { Type: schema.TypeString, Computed: true, @@ -63,6 +70,7 @@ func resourceAwsOrganizationsAccount() *schema.Resource { Optional: true, ValidateFunc: validateAwsOrganizationsAccountRoleName, }, + "tags": tagsSchema(), }, } } @@ -83,29 +91,32 @@ func resourceAwsOrganizationsAccountCreate(d *schema.ResourceData, meta interfac createOpts.IamUserAccessToBilling = aws.String(iam_user.(string)) } - log.Printf("[DEBUG] Account create config: %#v", createOpts) + log.Printf("[DEBUG] Creating AWS Organizations Account: %s", createOpts) - var err error var resp *organizations.CreateAccountOutput - err = resource.Retry(4*time.Minute, func() *resource.RetryError { + err := resource.Retry(4*time.Minute, func() *resource.RetryError { + var err error + resp, err = conn.CreateAccount(createOpts) - if err != nil { - if isAWSErr(err, organizations.ErrCodeFinalizingOrganizationException, "") { - log.Printf("[DEBUG] Trying to create account again: %q", err.Error()) - return resource.RetryableError(err) - } + if isAWSErr(err, organizations.ErrCodeFinalizingOrganizationException, "") { + return resource.RetryableError(err) + } + if err != nil { return resource.NonRetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateAccount(createOpts) + } + if err != nil { return fmt.Errorf("Error creating account: %s", err) } - log.Printf("[DEBUG] Account create response: %#v", resp) requestId := *resp.CreateAccountStatus.Id @@ -130,6 +141,41 @@ func resourceAwsOrganizationsAccountCreate(d *schema.ResourceData, meta interfac accountId := stateResp.(*organizations.CreateAccountStatus).AccountId d.SetId(*accountId) + if v, ok := d.GetOk("parent_id"); ok { + newParentID := v.(string) + + existingParentID, err := resourceAwsOrganizationsAccountGetParentId(conn, d.Id()) + + if err != nil { + return fmt.Errorf("error getting AWS Organizations Account (%s) parent: %s", d.Id(), err) + } + + if newParentID != existingParentID { + input := &organizations.MoveAccountInput{ + AccountId: accountId, + SourceParentId: aws.String(existingParentID), + DestinationParentId: aws.String(newParentID), + } + + if _, err := conn.MoveAccount(input); err != nil { + return fmt.Errorf("error moving AWS Organizations Account (%s): %s", d.Id(), err) + } + } + } + + if tags := tagsFromMapOrganizations(d.Get("tags").(map[string]interface{})); len(tags) > 0 { + input := &organizations.TagResourceInput{ + ResourceId: aws.String(d.Id()), + Tags: tags, + } + + log.Printf("[DEBUG] Adding Organizations Account (%s) tags: %s", d.Id(), input) + + if _, err := conn.TagResource(input); err != nil { + return fmt.Errorf("error updating Organizations Account (%s) tags: %s", d.Id(), err) + } + } + return resourceAwsOrganizationsAccountRead(d, meta) } @@ -139,13 +185,15 @@ func resourceAwsOrganizationsAccountRead(d *schema.ResourceData, meta interface{ AccountId: aws.String(d.Id()), } resp, err := conn.DescribeAccount(describeOpts) + + if isAWSErr(err, organizations.ErrCodeAccountNotFoundException, "") { + log.Printf("[WARN] Account does not exist, removing from state: %s", d.Id()) + d.SetId("") + return nil + } + if err != nil { - if isAWSErr(err, organizations.ErrCodeAccountNotFoundException, "") { - log.Printf("[WARN] Account does not exist, removing from state: %s", d.Id()) - d.SetId("") - return nil - } - return err + return fmt.Errorf("error describing AWS Organizations Account (%s): %s", d.Id(), err) } account := resp.Account @@ -155,15 +203,93 @@ func resourceAwsOrganizationsAccountRead(d *schema.ResourceData, meta interface{ return nil } + parentId, err := resourceAwsOrganizationsAccountGetParentId(conn, d.Id()) + if err != nil { + return fmt.Errorf("error getting AWS Organizations Account (%s) parent: %s", d.Id(), err) + } + + tagsInput := &organizations.ListTagsForResourceInput{ + ResourceId: aws.String(d.Id()), + } + + tagsOutput, err := conn.ListTagsForResource(tagsInput) + + if err != nil { + return fmt.Errorf("error reading Organizations Account (%s) tags: %s", d.Id(), err) + } + + if tagsOutput == nil { + return fmt.Errorf("error reading Organizations Account (%s) tags: empty result", d.Id()) + } + d.Set("arn", account.Arn) d.Set("email", account.Email) d.Set("joined_method", account.JoinedMethod) d.Set("joined_timestamp", account.JoinedTimestamp) d.Set("name", account.Name) + d.Set("parent_id", parentId) d.Set("status", account.Status) + + if err := d.Set("tags", tagsToMapOrganizations(tagsOutput.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } +func resourceAwsOrganizationsAccountUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).organizationsconn + + if d.HasChange("parent_id") { + o, n := d.GetChange("parent_id") + + input := &organizations.MoveAccountInput{ + AccountId: aws.String(d.Id()), + SourceParentId: aws.String(o.(string)), + DestinationParentId: aws.String(n.(string)), + } + + if _, err := conn.MoveAccount(input); err != nil { + return fmt.Errorf("error moving AWS Organizations Account (%s): %s", d.Id(), err) + } + } + + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsOrganizations(tagsFromMapOrganizations(o), tagsFromMapOrganizations(n)) + + // Set tags + if len(remove) > 0 { + input := &organizations.UntagResourceInput{ + ResourceId: aws.String(d.Id()), + TagKeys: remove, + } + + log.Printf("[DEBUG] Removing Organizations Account (%s) tags: %s", d.Id(), input) + + if _, err := conn.UntagResource(input); err != nil { + return fmt.Errorf("error removing Organizations Account (%s) tags: %s", d.Id(), err) + } + } + if len(create) > 0 { + input := &organizations.TagResourceInput{ + ResourceId: aws.String(d.Id()), + Tags: create, + } + + log.Printf("[DEBUG] Adding Organizations Account (%s) tags: %s", d.Id(), input) + + if _, err := conn.TagResource(input); err != nil { + return fmt.Errorf("error updating Organizations Account (%s) tags: %s", d.Id(), err) + } + } + } + + return resourceAwsOrganizationsAccountRead(d, meta) +} + func resourceAwsOrganizationsAccountDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).organizationsconn @@ -241,3 +367,29 @@ func validateAwsOrganizationsAccountRoleName(v interface{}, k string) (ws []stri return } + +func resourceAwsOrganizationsAccountGetParentId(conn *organizations.Organizations, childId string) (string, error) { + input := &organizations.ListParentsInput{ + ChildId: aws.String(childId), + } + var parents []*organizations.Parent + + err := conn.ListParentsPages(input, func(page *organizations.ListParentsOutput, lastPage bool) bool { + parents = append(parents, page.Parents...) + + return !lastPage + }) + + if err != nil { + return "", err + } + + if len(parents) == 0 { + return "", nil + } + + // assume there is only a single parent + // https://docs.aws.amazon.com/organizations/latest/APIReference/API_ListParents.html + parent := parents[0] + return aws.StringValue(parent.Id), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_organization.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_organization.go index 5f0751d2f4a..6aeff3177d8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_organization.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_organization.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/organizations" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) const organizationsPolicyTypeStatusDisabled = "DISABLED" @@ -70,6 +70,30 @@ func resourceAwsOrganizationsOrganization() *schema.Resource { }, }, }, + "non_master_accounts": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "email": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "roots": { Type: schema.TypeList, Computed: true, @@ -208,8 +232,16 @@ func resourceAwsOrganizationsOrganizationRead(d *schema.ResourceData, meta inter log.Printf("[INFO] Listing Accounts for Organization: %s", d.Id()) var accounts []*organizations.Account + var nonMasterAccounts []*organizations.Account err = conn.ListAccountsPages(&organizations.ListAccountsInput{}, func(page *organizations.ListAccountsOutput, lastPage bool) bool { - accounts = append(accounts, page.Accounts...) + for _, account := range page.Accounts { + if aws.StringValue(account.Id) != aws.StringValue(org.Organization.MasterAccountId) { + nonMasterAccounts = append(nonMasterAccounts, account) + } + + accounts = append(accounts, account) + } + return !lastPage }) if err != nil { @@ -236,6 +268,10 @@ func resourceAwsOrganizationsOrganizationRead(d *schema.ResourceData, meta inter d.Set("master_account_email", org.Organization.MasterAccountEmail) d.Set("master_account_id", org.Organization.MasterAccountId) + if err := d.Set("non_master_accounts", flattenOrganizationsAccounts(nonMasterAccounts)); err != nil { + return fmt.Errorf("error setting non_master_accounts: %s", err) + } + if err := d.Set("roots", flattenOrganizationsRoots(roots)); err != nil { return fmt.Errorf("error setting roots: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_organizational_unit.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_organizational_unit.go index 60cc519ae6b..806dc224fb4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_organizational_unit.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_organizational_unit.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/organizations" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsOrganizationsOrganizationalUnit() *schema.Resource { @@ -94,6 +94,9 @@ func resourceAwsOrganizationsOrganizationalUnitCreate(d *schema.ResourceData, me return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateOrganizationalUnit(createOpts) + } if err != nil { return fmt.Errorf("Error creating organizational unit: %s", err) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_policy.go index 715e6bf93b6..c0d3b0ea34f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_policy.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/organizations" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsOrganizationsPolicy() *schema.Resource { @@ -85,6 +85,9 @@ func resourceAwsOrganizationsPolicyCreate(d *schema.ResourceData, meta interface return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreatePolicy(input) + } if err != nil { return fmt.Errorf("error creating Organizations Policy: %s", err) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_policy_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_policy_attachment.go index b51949e8034..c2a47540d1b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_policy_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_organizations_policy_attachment.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/organizations" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsOrganizationsPolicyAttachment() *schema.Resource { @@ -63,6 +63,9 @@ func resourceAwsOrganizationsPolicyAttachmentCreate(d *schema.ResourceData, meta return nil }) + if isResourceTimeoutError(err) { + _, err = conn.AttachPolicy(input) + } if err != nil { return fmt.Errorf("error creating Organizations Policy Attachment: %s", err) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_adm_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_adm_channel.go index 71373a98bb2..750fa6854ad 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_adm_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_adm_channel.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointADMChannel() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_channel.go index 95949cb14e0..f1fb37734f3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_channel.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointAPNSChannel() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_sandbox_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_sandbox_channel.go index e20a9a63288..33b30b21579 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_sandbox_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_sandbox_channel.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointAPNSSandboxChannel() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_voip_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_voip_channel.go index 0663b95be7f..9ca64f4cf52 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_voip_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_voip_channel.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointAPNSVoipChannel() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_voip_sandbox_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_voip_sandbox_channel.go index 44a7203a90f..c668835ec19 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_voip_sandbox_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_apns_voip_sandbox_channel.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointAPNSVoipSandboxChannel() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_app.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_app.go index 69908963e00..5bc65bc5490 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_app.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_app.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsPinpointApp() *schema.Resource { @@ -128,6 +128,11 @@ func resourceAwsPinpointApp() *schema.Resource { }, }, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), }, } } @@ -136,6 +141,7 @@ func resourceAwsPinpointAppCreate(d *schema.ResourceData, meta interface{}) erro pinpointconn := meta.(*AWSClient).pinpointconn var name string + if v, ok := d.GetOk("name"); ok { name = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { @@ -152,12 +158,17 @@ func resourceAwsPinpointAppCreate(d *schema.ResourceData, meta interface{}) erro }, } + if v, ok := d.GetOk("tags"); ok { + req.CreateApplicationRequest.Tags = tagsFromMapPinPointApp(v.(map[string]interface{})) + } + output, err := pinpointconn.CreateApp(req) if err != nil { return fmt.Errorf("error creating Pinpoint app: %s", err) } d.SetId(*output.ApplicationResponse.Id) + d.Set("arn", output.ApplicationResponse.Arn) return resourceAwsPinpointAppUpdate(d, meta) } @@ -193,6 +204,10 @@ func resourceAwsPinpointAppUpdate(d *schema.ResourceData, meta interface{}) erro return err } + if err := setTagsPinPointApp(conn, d); err != nil { + return fmt.Errorf("error updating PinPoint Application (%s) tags: %s", d.Id(), err) + } + return resourceAwsPinpointAppRead(d, meta) } @@ -229,6 +244,7 @@ func resourceAwsPinpointAppRead(d *schema.ResourceData, meta interface{}) error d.Set("name", app.ApplicationResponse.Name) d.Set("application_id", app.ApplicationResponse.Id) + d.Set("arn", app.ApplicationResponse.Arn) if err := d.Set("campaign_hook", flattenPinpointCampaignHook(settings.ApplicationSettingsResource.CampaignHook)); err != nil { return fmt.Errorf("error setting campaign_hook: %s", err) @@ -240,6 +256,10 @@ func resourceAwsPinpointAppRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error setting quiet_time: %s", err) } + if err := getTagsPinPointApp(conn, d); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_baidu_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_baidu_channel.go index 595696b7a49..43acd25d054 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_baidu_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_baidu_channel.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointBaiduChannel() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_email_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_email_channel.go index 9809556e487..606a9dfac07 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_email_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_email_channel.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointEmailChannel() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_event_stream.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_event_stream.go index f5fa4f33a07..78eb88faa08 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_event_stream.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_event_stream.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointEventStream() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_gcm_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_gcm_channel.go index 4b7232600f7..6f12797735b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_gcm_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_gcm_channel.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointGCMChannel() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_sms_channel.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_sms_channel.go index 659d852cb3d..f021b7ed88d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_sms_channel.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_pinpoint_sms_channel.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/pinpoint" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPinpointSMSChannel() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_placement_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_placement_group.go index 58b579b8004..cf94e45b0bd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_placement_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_placement_group.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsPlacementGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_proxy_protocol_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_proxy_protocol_policy.go index 094bfbc5b1b..2a0e8ec45ef 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_proxy_protocol_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_proxy_protocol_policy.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsProxyProtocolPolicy() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_qldb_ledger.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_qldb_ledger.go new file mode 100644 index 00000000000..0ae4325454e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_qldb_ledger.go @@ -0,0 +1,270 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/qldb" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsQLDBLedger() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsQLDBLedgerCreate, + Read: resourceAwsQLDBLedgerRead, + Update: resourceAwsQLDBLedgerUpdate, + Delete: resourceAwsQLDBLedgerDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexp.MustCompile(`^[A-Za-z0-9_-]+`), "must contain only alphanumeric characters, underscores, and hyphens"), + ), + }, + + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsQLDBLedgerCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).qldbconn + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else { + name = resource.PrefixedUniqueId("tf") + } + + if err := d.Set("name", name); err != nil { + return fmt.Errorf("error setting name: %s", err) + } + + // Create the QLDB Ledger + // The qldb.PermissionsModeAllowAll is currently hardcoded because AWS doesn't support changing the mode. + createOpts := &qldb.CreateLedgerInput{ + Name: aws.String(d.Get("name").(string)), + PermissionsMode: aws.String(qldb.PermissionsModeAllowAll), + DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().QldbTags(), + } + + log.Printf("[DEBUG] QLDB Ledger create config: %#v", *createOpts) + qldbResp, err := conn.CreateLedger(createOpts) + if err != nil { + return fmt.Errorf("Error creating QLDB Ledger: %s", err) + } + + // Set QLDB ledger name + d.SetId(*qldbResp.Name) + + log.Printf("[INFO] QLDB Ledger name: %s", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{qldb.LedgerStateCreating}, + Target: []string{qldb.LedgerStateActive}, + Refresh: qldbLedgerRefreshStatusFunc(conn, d.Id()), + Timeout: 8 * time.Minute, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for QLDB Ledger status to be \"%s\": %s", qldb.LedgerStateActive, err) + } + + // Update our attributes and return + return resourceAwsQLDBLedgerRead(d, meta) +} + +func resourceAwsQLDBLedgerRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).qldbconn + + // Refresh the QLDB state + input := &qldb.DescribeLedgerInput{ + Name: aws.String(d.Id()), + } + + qldbLedger, err := conn.DescribeLedger(input) + + if isAWSErr(err, qldb.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] QLDB Ledger (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error describing QLDB Ledger (%s): %s", d.Id(), err) + } + + // QLDB stuff + if err := d.Set("name", qldbLedger.Name); err != nil { + return fmt.Errorf("error setting name: %s", err) + } + + if err := d.Set("deletion_protection", qldbLedger.DeletionProtection); err != nil { + return fmt.Errorf("error setting deletion protection: %s", err) + } + + // ARN + if err := d.Set("arn", qldbLedger.Arn); err != nil { + return fmt.Errorf("error setting ARN: %s", err) + } + + // Tags + log.Printf("[INFO] Fetching tags for %s", d.Id()) + tags, err := keyvaluetags.QldbListTags(conn, d.Get("arn").(string)) + if err != nil { + return fmt.Errorf("Error listing tags for QLDB Ledger: %s", err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + return nil +} + +func resourceAwsQLDBLedgerUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).qldbconn + + // Turn on partial mode + d.Partial(true) + + if d.HasChange("deletion_protection") { + val := d.Get("deletion_protection").(bool) + modifyOpts := &qldb.UpdateLedgerInput{ + Name: aws.String(d.Id()), + DeletionProtection: aws.Bool(val), + } + log.Printf( + "[INFO] Modifying deletion_protection QLDB attribute for %s: %#v", + d.Id(), modifyOpts) + if _, err := conn.UpdateLedger(modifyOpts); err != nil { + + return err + } + + d.SetPartial("deletion_protection") + } + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.QldbUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + + d.Partial(false) + return resourceAwsQLDBLedgerRead(d, meta) +} + +func resourceAwsQLDBLedgerDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).qldbconn + deleteLedgerOpts := &qldb.DeleteLedgerInput{ + Name: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting QLDB Ledger: %s", d.Id()) + + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteLedger(deleteLedgerOpts) + + if isAWSErr(err, qldb.ErrCodeResourceInUseException, "") { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + _, err = conn.DeleteLedger(deleteLedgerOpts) + } + + if isAWSErr(err, qldb.ErrCodeResourceNotFoundException, "") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting QLDB Ledger (%s): %s", d.Id(), err) + } + + if err := waitForQLDBLedgerDeletion(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for QLDB Ledger (%s) deletion: %s", d.Id(), err) + } + + return nil +} + +func qldbLedgerRefreshStatusFunc(conn *qldb.QLDB, ledger string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &qldb.DescribeLedgerInput{ + Name: aws.String(ledger), + } + resp, err := conn.DescribeLedger(input) + if err != nil { + return nil, "failed", err + } + return resp, aws.StringValue(resp.State), nil + } +} + +func waitForQLDBLedgerDeletion(conn *qldb.QLDB, ledgerName string) error { + stateConf := resource.StateChangeConf{ + Pending: []string{qldb.LedgerStateCreating, + qldb.LedgerStateActive, + qldb.LedgerStateDeleting}, + Target: []string{""}, + Timeout: 5 * time.Minute, + MinTimeout: 1 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeLedger(&qldb.DescribeLedgerInput{ + Name: aws.String(ledgerName), + }) + + if isAWSErr(err, qldb.ErrCodeResourceNotFoundException, "") { + return 1, "", nil + } + + if err != nil { + return nil, qldb.ErrCodeResourceInUseException, err + } + + return resp, aws.StringValue(resp.State), nil + }, + } + + _, err := stateConf.WaitForState() + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_quicksight_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_quicksight_group.go new file mode 100644 index 00000000000..8376a415868 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_quicksight_group.go @@ -0,0 +1,187 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/quicksight" +) + +func resourceAwsQuickSightGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsQuickSightGroupCreate, + Read: resourceAwsQuickSightGroupRead, + Update: resourceAwsQuickSightGroupUpdate, + Delete: resourceAwsQuickSightGroupDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "aws_account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "default", + ValidateFunc: validation.StringInSlice([]string{ + "default", + }, false), + }, + }, + } +} + +func resourceAwsQuickSightGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID := meta.(*AWSClient).accountid + namespace := d.Get("namespace").(string) + + if v, ok := d.GetOk("aws_account_id"); ok { + awsAccountID = v.(string) + } + + createOpts := &quicksight.CreateGroupInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + GroupName: aws.String(d.Get("group_name").(string)), + } + + if v, ok := d.GetOk("description"); ok { + createOpts.Description = aws.String(v.(string)) + } + + resp, err := conn.CreateGroup(createOpts) + if err != nil { + return fmt.Errorf("Error creating QuickSight Group: %s", err) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", awsAccountID, namespace, aws.StringValue(resp.Group.GroupName))) + + return resourceAwsQuickSightGroupRead(d, meta) +} + +func resourceAwsQuickSightGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, groupName, err := resourceAwsQuickSightGroupParseID(d.Id()) + if err != nil { + return err + } + + descOpts := &quicksight.DescribeGroupInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + GroupName: aws.String(groupName), + } + + resp, err := conn.DescribeGroup(descOpts) + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] QuickSight Group %s is already gone", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error describing QuickSight Group (%s): %s", d.Id(), err) + } + + d.Set("arn", resp.Group.Arn) + d.Set("aws_account_id", awsAccountID) + d.Set("group_name", resp.Group.GroupName) + d.Set("description", resp.Group.Description) + d.Set("namespace", namespace) + + return nil +} + +func resourceAwsQuickSightGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, groupName, err := resourceAwsQuickSightGroupParseID(d.Id()) + if err != nil { + return err + } + + updateOpts := &quicksight.UpdateGroupInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + GroupName: aws.String(groupName), + } + + if v, ok := d.GetOk("description"); ok { + updateOpts.Description = aws.String(v.(string)) + } + + _, err = conn.UpdateGroup(updateOpts) + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] QuickSight Group %s is already gone", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error updating QuickSight Group %s: %s", d.Id(), err) + } + + return resourceAwsQuickSightGroupRead(d, meta) +} + +func resourceAwsQuickSightGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, groupName, err := resourceAwsQuickSightGroupParseID(d.Id()) + if err != nil { + return err + } + + deleteOpts := &quicksight.DeleteGroupInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + GroupName: aws.String(groupName), + } + + if _, err := conn.DeleteGroup(deleteOpts); err != nil { + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + return nil + } + return fmt.Errorf("Error deleting QuickSight Group %s: %s", d.Id(), err) + } + + return nil +} + +func resourceAwsQuickSightGroupParseID(id string) (string, string, string, error) { + parts := strings.SplitN(id, "/", 3) + if len(parts) < 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { + return "", "", "", fmt.Errorf("unexpected format of ID (%s), expected AWS_ACCOUNT_ID/NAMESPACE/GROUP_NAME", id) + } + return parts[0], parts[1], parts[2], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_quicksight_user.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_quicksight_user.go new file mode 100644 index 00000000000..63f2145b8c8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_quicksight_user.go @@ -0,0 +1,227 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/quicksight" +) + +func resourceAwsQuickSightUser() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsQuickSightUserCreate, + Read: resourceAwsQuickSightUserRead, + Update: resourceAwsQuickSightUserUpdate, + Delete: resourceAwsQuickSightUserDelete, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "aws_account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "iam_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "identity_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + quicksight.IdentityTypeIam, + quicksight.IdentityTypeQuicksight, + }, false), + }, + + "namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "default", + ValidateFunc: validation.StringInSlice([]string{ + "default", + }, false), + }, + + "session_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "user_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.NoZeroValues, + }, + + "user_role": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + quicksight.UserRoleReader, + quicksight.UserRoleAuthor, + quicksight.UserRoleAdmin, + }, false), + }, + }, + } +} + +func resourceAwsQuickSightUserCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID := meta.(*AWSClient).accountid + + namespace := d.Get("namespace").(string) + + if v, ok := d.GetOk("aws_account_id"); ok { + awsAccountID = v.(string) + } + + createOpts := &quicksight.RegisterUserInput{ + AwsAccountId: aws.String(awsAccountID), + Email: aws.String(d.Get("email").(string)), + IdentityType: aws.String(d.Get("identity_type").(string)), + Namespace: aws.String(namespace), + UserRole: aws.String(d.Get("user_role").(string)), + } + + if v, ok := d.GetOk("iam_arn"); ok { + createOpts.IamArn = aws.String(v.(string)) + } + + if v, ok := d.GetOk("session_name"); ok { + createOpts.SessionName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("user_name"); ok { + createOpts.UserName = aws.String(v.(string)) + } + + resp, err := conn.RegisterUser(createOpts) + if err != nil { + return fmt.Errorf("Error registering QuickSight user: %s", err) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", awsAccountID, namespace, aws.StringValue(resp.User.UserName))) + + return resourceAwsQuickSightUserRead(d, meta) +} + +func resourceAwsQuickSightUserRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, userName, err := resourceAwsQuickSightUserParseID(d.Id()) + if err != nil { + return err + } + + descOpts := &quicksight.DescribeUserInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + UserName: aws.String(userName), + } + + resp, err := conn.DescribeUser(descOpts) + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] QuickSight User %s is not found", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error describing QuickSight User (%s): %s", d.Id(), err) + } + + d.Set("arn", resp.User.Arn) + d.Set("aws_account_id", awsAccountID) + d.Set("email", resp.User.Email) + d.Set("namespace", namespace) + d.Set("user_role", resp.User.Role) + d.Set("user_name", resp.User.UserName) + + return nil +} + +func resourceAwsQuickSightUserUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, userName, err := resourceAwsQuickSightUserParseID(d.Id()) + if err != nil { + return err + } + + updateOpts := &quicksight.UpdateUserInput{ + AwsAccountId: aws.String(awsAccountID), + Email: aws.String(d.Get("email").(string)), + Namespace: aws.String(namespace), + Role: aws.String(d.Get("user_role").(string)), + UserName: aws.String(userName), + } + + _, err = conn.UpdateUser(updateOpts) + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] QuickSight User %s is not found", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error updating QuickSight User %s: %s", d.Id(), err) + } + + return resourceAwsQuickSightUserRead(d, meta) +} + +func resourceAwsQuickSightUserDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, userName, err := resourceAwsQuickSightUserParseID(d.Id()) + if err != nil { + return err + } + + deleteOpts := &quicksight.DeleteUserInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + UserName: aws.String(userName), + } + + if _, err := conn.DeleteUser(deleteOpts); err != nil { + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + return nil + } + return fmt.Errorf("Error deleting QuickSight User %s: %s", d.Id(), err) + } + + return nil +} + +func resourceAwsQuickSightUserParseID(id string) (string, string, string, error) { + parts := strings.SplitN(id, "/", 3) + if len(parts) < 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { + return "", "", "", fmt.Errorf("unexpected format of ID (%s), expected AWS_ACCOUNT_ID/NAMESPACE/USER_NAME", id) + } + return parts[0], parts[1], parts[2], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_principal_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_principal_association.go index d3eef24fd11..6812d5075eb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_principal_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_principal_association.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ram" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsRamPrincipalAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_association.go index 1e7872a2e86..8e127446a68 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_association.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ram" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsRamResourceAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_share.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_share.go index ff82d219142..19202048758 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_share.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_share.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ram" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsRamResourceShare() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_share_accepter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_share_accepter.go new file mode 100644 index 00000000000..6f2bc07c097 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ram_resource_share_accepter.go @@ -0,0 +1,272 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ram" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsRamResourceShareAccepter() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRamResourceShareAccepterCreate, + Read: resourceAwsRamResourceShareAccepterRead, + Delete: resourceAwsRamResourceShareAccepterDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "share_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "invitation_arn": { + Type: schema.TypeString, + Computed: true, + }, + + "share_id": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + "receiver_account_id": { + Type: schema.TypeString, + Computed: true, + }, + + "sender_account_id": { + Type: schema.TypeString, + Computed: true, + }, + + "share_name": { + Type: schema.TypeString, + Computed: true, + }, + + "resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceAwsRamResourceShareAccepterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ramconn + + shareARN := d.Get("share_arn").(string) + + invitation, err := resourceAwsRamResourceShareGetInvitation(conn, shareARN, ram.ResourceShareInvitationStatusPending) + + if err != nil { + return err + } + + if invitation == nil || aws.StringValue(invitation.ResourceShareInvitationArn) == "" { + return fmt.Errorf( + "No RAM Resource Share (%s) invitation found\n\n"+ + "NOTE: If both AWS accounts are in the same AWS Organization and RAM Sharing with AWS Organizations is enabled, this resource is not necessary", + shareARN) + } + + input := &ram.AcceptResourceShareInvitationInput{ + ClientToken: aws.String(resource.UniqueId()), + ResourceShareInvitationArn: invitation.ResourceShareInvitationArn, + } + + log.Printf("[DEBUG] Accept RAM resource share invitation request: %s", input) + output, err := conn.AcceptResourceShareInvitation(input) + + if err != nil { + return fmt.Errorf("Error accepting RAM resource share invitation: %s", err) + } + + d.SetId(shareARN) + + stateConf := &resource.StateChangeConf{ + Pending: []string{ram.ResourceShareInvitationStatusPending}, + Target: []string{ram.ResourceShareInvitationStatusAccepted}, + Refresh: resourceAwsRamResourceShareAccepterStateRefreshFunc( + conn, + aws.StringValue(output.ResourceShareInvitation.ResourceShareInvitationArn)), + Timeout: d.Timeout(schema.TimeoutCreate), + } + + _, err = stateConf.WaitForState() + + if err != nil { + return fmt.Errorf("Error waiting for RAM resource share (%s) state: %s", d.Id(), err) + } + + return resourceAwsRamResourceShareAccepterRead(d, meta) +} + +func resourceAwsRamResourceShareAccepterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ramconn + + invitation, err := resourceAwsRamResourceShareGetInvitation(conn, d.Id(), ram.ResourceShareInvitationStatusAccepted) + + if err == nil && invitation == nil { + log.Printf("[WARN] No RAM resource share invitation by ARN (%s) found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return err + } + + d.Set("status", invitation.Status) + d.Set("receiver_account_id", invitation.ReceiverAccountId) + d.Set("sender_account_id", invitation.SenderAccountId) + d.Set("share_arn", invitation.ResourceShareArn) + d.Set("invitation_arn", invitation.ResourceShareInvitationArn) + d.Set("share_id", resourceAwsRamResourceShareGetIDFromARN(d.Id())) + d.Set("share_name", invitation.ResourceShareName) + + listInput := &ram.ListResourcesInput{ + MaxResults: aws.Int64(int64(500)), + ResourceOwner: aws.String(ram.ResourceOwnerOtherAccounts), + ResourceShareArns: aws.StringSlice([]string{d.Id()}), + Principal: invitation.SenderAccountId, + } + + var resourceARNs []*string + err = conn.ListResourcesPages(listInput, func(page *ram.ListResourcesOutput, lastPage bool) bool { + for _, resource := range page.Resources { + resourceARNs = append(resourceARNs, resource.Arn) + } + + return !lastPage + }) + + if err != nil { + return fmt.Errorf("Error reading RAM resource share resources %s: %s", d.Id(), err) + } + + if err := d.Set("resources", flattenStringList(resourceARNs)); err != nil { + return fmt.Errorf("unable to set resources: %s", err) + } + + return nil +} + +func resourceAwsRamResourceShareAccepterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ramconn + + receiverAccountID := d.Get("receiver_account_id").(string) + + if receiverAccountID == "" { + return fmt.Errorf("The receiver account ID is required to leave a resource share") + } + + input := &ram.DisassociateResourceShareInput{ + ClientToken: aws.String(resource.UniqueId()), + ResourceShareArn: aws.String(d.Id()), + Principals: []*string{aws.String(receiverAccountID)}, + } + log.Printf("[DEBUG] Leave RAM resource share request: %s", input) + + _, err := conn.DisassociateResourceShare(input) + + if err != nil { + return fmt.Errorf("Error leaving RAM resource share: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{ram.ResourceShareAssociationStatusAssociated}, + Target: []string{ram.ResourceShareAssociationStatusDisassociated}, + Refresh: resourceAwsRamResourceShareStateRefreshFunc(conn, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + } + + if _, err := stateConf.WaitForState(); err != nil { + if isAWSErr(err, ram.ErrCodeUnknownResourceException, "") { + // what we want + return nil + } + return fmt.Errorf("Error waiting for RAM resource share (%s) state: %s", d.Id(), err) + } + + return nil +} + +func resourceAwsRamResourceShareGetInvitation(conn *ram.RAM, resourceShareARN, status string) (*ram.ResourceShareInvitation, error) { + input := &ram.GetResourceShareInvitationsInput{ + ResourceShareArns: []*string{aws.String(resourceShareARN)}, + } + + var invitation *ram.ResourceShareInvitation + err := conn.GetResourceShareInvitationsPages(input, func(page *ram.GetResourceShareInvitationsOutput, lastPage bool) bool { + for _, rsi := range page.ResourceShareInvitations { + if aws.StringValue(rsi.Status) == status { + invitation = rsi + return false + } + } + + return !lastPage + }) + + if invitation == nil { + return nil, nil + } + + if err != nil { + return nil, fmt.Errorf("Error reading RAM resource share invitation %s: %s", resourceShareARN, err) + } + + return invitation, nil +} + +func resourceAwsRamResourceShareAccepterStateRefreshFunc(conn *ram.RAM, invitationArn string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + request := &ram.GetResourceShareInvitationsInput{ + ResourceShareInvitationArns: []*string{aws.String(invitationArn)}, + } + + output, err := conn.GetResourceShareInvitations(request) + + if err != nil { + return nil, "Unable to get resource share invitations", err + } + + if len(output.ResourceShareInvitations) == 0 { + return nil, "Resource share invitation not found", nil + } + + invitation := output.ResourceShareInvitations[0] + + return invitation, aws.StringValue(invitation.Status), nil + } +} + +func resourceAwsRamResourceShareGetIDFromARN(arn string) string { + return strings.Replace(arn[strings.LastIndex(arn, ":")+1:], "resource-share/", "rs-", -1) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster.go index 8bf664d5c0c..52136a99a65 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster.go @@ -10,9 +10,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsRDSCluster() *schema.Resource { @@ -143,6 +144,7 @@ func resourceAwsRDSCluster() *schema.Resource { Default: "provisioned", ValidateFunc: validation.StringInSlice([]string{ "global", + "multimaster", "parallelquery", "provisioned", "serverless", @@ -188,6 +190,15 @@ func resourceAwsRDSCluster() *schema.Resource { Default: 300, ValidateFunc: validation.IntBetween(300, 86400), }, + "timeout_action": { + Type: schema.TypeString, + Optional: true, + Default: "RollbackCapacityChange", + ValidateFunc: validation.StringInSlice([]string{ + "ForceApplyCapacityChange", + "RollbackCapacityChange", + }, false), + }, }, }, }, @@ -225,7 +236,6 @@ func resourceAwsRDSCluster() *schema.Resource { }, "bucket_prefix": { Type: schema.TypeString, - Required: false, Optional: true, ForceNew: true, }, @@ -288,7 +298,6 @@ func resourceAwsRDSCluster() *schema.Resource { "snapshot_identifier": { Type: schema.TypeString, - Computed: false, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -382,7 +391,6 @@ func resourceAwsRDSCluster() *schema.Resource { "enabled_cloudwatch_logs_exports": { Type: schema.TypeList, - Computed: false, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, @@ -391,6 +399,7 @@ func resourceAwsRDSCluster() *schema.Resource { "error", "general", "slowquery", + "postgresql", }, false), }, }, @@ -411,7 +420,7 @@ func resourceAwsRdsClusterImport( func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().RdsTags() // Some API calls (e.g. RestoreDBClusterFromSnapshot do not support all // parameters to correctly apply all settings in one pass. For missing @@ -483,6 +492,11 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error opts.KmsKeyId = aws.String(attr.(string)) } + if attr, ok := d.GetOk("master_password"); ok { + modifyDbClusterInput.MasterUserPassword = aws.String(attr.(string)) + requiresModifyDbCluster = true + } + if attr, ok := d.GetOk("option_group_name"); ok { opts.OptionGroupName = aws.String(attr.(string)) } @@ -516,6 +530,9 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.RestoreDBClusterFromSnapshot(&opts) + } if err != nil { return fmt.Errorf("Error creating RDS Cluster: %s", err) } @@ -602,6 +619,9 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateDBCluster(createOpts) + } if err != nil { return fmt.Errorf("error creating RDS cluster: %s", err) } @@ -695,8 +715,10 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] RDS Cluster restore options: %s", createOpts) // Retry for IAM/S3 eventual consistency + var resp *rds.RestoreDBClusterFromS3Output err := resource.Retry(5*time.Minute, func() *resource.RetryError { - resp, err := conn.RestoreDBClusterFromS3(createOpts) + var err error + resp, err = conn.RestoreDBClusterFromS3(createOpts) if err != nil { // InvalidParameterValue: Files from the specified Amazon S3 bucket cannot be downloaded. // Make sure that you have created an AWS Identity and Access Management (IAM) role that lets Amazon RDS access Amazon S3 for you. @@ -714,6 +736,9 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG]: RDS Cluster create response: %s", resp) return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.RestoreDBClusterFromS3(createOpts) + } if err != nil { log.Printf("[ERROR] Error creating RDS Cluster: %s", err) @@ -837,6 +862,9 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateDBCluster(createOpts) + } if err != nil { return fmt.Errorf("error creating RDS cluster: %s", err) } @@ -1005,9 +1033,12 @@ func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting vpc_security_group_ids: %s", err) } - // Fetch and save tags - if err := saveTagsRDS(conn, d, aws.StringValue(dbc.DBClusterArn)); err != nil { - log.Printf("[WARN] Failed to save tags for RDS Cluster (%s): %s", aws.StringValue(dbc.DBClusterIdentifier), err) + tags, err := keyvaluetags.RdsListTags(conn, aws.StringValue(dbc.DBClusterArn)) + if err != nil { + return fmt.Errorf("error listing tags for RDS Cluster (%s): %s", aws.StringValue(dbc.DBClusterArn), err) + } + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } // Fetch and save Global Cluster if engine mode global @@ -1128,6 +1159,9 @@ func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.ModifyDBCluster(req) + } if err != nil { return fmt.Errorf("Failed to modify RDS Cluster (%s): %s", d.Id(), err) } @@ -1193,8 +1227,10 @@ func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error } if d.HasChange("tags") { - if err := setTagsRDS(conn, d, d.Get("arn").(string)); err != nil { - return err + o, n := d.GetChange("tags") + + if err := keyvaluetags.RdsUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) } else { d.SetPartial("tags") } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_endpoint.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_endpoint.go index 3c24a35ec9c..71d40ccde46 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_endpoint.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_endpoint.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_instance.go index 13dc9a4c258..d20facd407f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_instance.go @@ -9,8 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsRDSClusterInstance() *schema.Resource { @@ -213,7 +214,6 @@ func resourceAwsRDSClusterInstance() *schema.Resource { func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) createOpts := &rds.CreateDBInstanceInput{ DBInstanceClass: aws.String(d.Get("instance_class").(string)), @@ -223,7 +223,7 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{ PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), PromotionTier: aws.Int64(int64(d.Get("promotion_tier").(int))), AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), - Tags: tags, + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().RdsTags(), } if attr, ok := d.GetOk("availability_zone"); ok { @@ -256,14 +256,12 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{ createOpts.MonitoringRoleArn = aws.String(attr.(string)) } - if attr, _ := d.GetOk("engine"); attr == "aurora-postgresql" || attr == "aurora" { - if attr, ok := d.GetOk("performance_insights_enabled"); ok { - createOpts.EnablePerformanceInsights = aws.Bool(attr.(bool)) - } + if attr, ok := d.GetOk("performance_insights_enabled"); ok { + createOpts.EnablePerformanceInsights = aws.Bool(attr.(bool)) + } - if attr, ok := d.GetOk("performance_insights_kms_key_id"); ok { - createOpts.PerformanceInsightsKMSKeyId = aws.String(attr.(string)) - } + if attr, ok := d.GetOk("performance_insights_kms_key_id"); ok { + createOpts.PerformanceInsightsKMSKeyId = aws.String(attr.(string)) } if attr, ok := d.GetOk("preferred_backup_window"); ok { @@ -291,6 +289,9 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{ } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.CreateDBInstance(createOpts) + } if err != nil { return fmt.Errorf("error creating RDS DB Instance: %s", err) } @@ -381,6 +382,8 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("identifier", db.DBInstanceIdentifier) d.Set("instance_class", db.DBInstanceClass) d.Set("kms_key_id", db.KmsKeyId) + d.Set("monitoring_interval", db.MonitoringInterval) + d.Set("monitoring_role_arn", db.MonitoringRoleArn) d.Set("performance_insights_enabled", db.PerformanceInsightsEnabled) d.Set("performance_insights_kms_key_id", db.PerformanceInsightsKMSKeyId) d.Set("preferred_backup_window", db.PreferredBackupWindow) @@ -389,20 +392,16 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("publicly_accessible", db.PubliclyAccessible) d.Set("storage_encrypted", db.StorageEncrypted) - if db.MonitoringInterval != nil { - d.Set("monitoring_interval", db.MonitoringInterval) - } - - if db.MonitoringRoleArn != nil { - d.Set("monitoring_role_arn", db.MonitoringRoleArn) - } - if len(db.DBParameterGroups) > 0 { d.Set("db_parameter_group_name", db.DBParameterGroups[0].DBParameterGroupName) } - if err := saveTagsRDS(conn, d, aws.StringValue(db.DBInstanceArn)); err != nil { - log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err) + tags, err := keyvaluetags.RdsListTags(conn, aws.StringValue(db.DBInstanceArn)) + if err != nil { + return fmt.Errorf("error listing tags for RDS Cluster Instance (%s): %s", d.Id(), err) + } + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } return nil @@ -433,15 +432,15 @@ func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{ requestUpdate = true } - if d.HasChange("performance_insights_enabled") { + if d.HasChange("performance_insights_enabled") || d.HasChange("performance_insights_kms_key_id") { d.SetPartial("performance_insights_enabled") req.EnablePerformanceInsights = aws.Bool(d.Get("performance_insights_enabled").(bool)) - requestUpdate = true - } - if d.HasChange("performance_insights_kms_key_id") { - d.SetPartial("performance_insights_kms_key_id") - req.PerformanceInsightsKMSKeyId = aws.String(d.Get("performance_insights_kms_key_id").(string)) + if v, ok := d.GetOk("performance_insights_kms_key_id"); ok { + d.SetPartial("performance_insights_kms_key_id") + req.PerformanceInsightsKMSKeyId = aws.String(v.(string)) + } + requestUpdate = true } @@ -500,6 +499,9 @@ func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{ } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.ModifyDBInstance(req) + } if err != nil { return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) } @@ -522,8 +524,12 @@ func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{ } - if err := setTagsRDS(conn, d, d.Get("arn").(string)); err != nil { - return err + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.RdsUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating RDS Cluster Instance (%s) tags: %s", d.Id(), err) + } } return resourceAwsRDSClusterInstanceRead(d, meta) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_parameter_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_parameter_group.go index 160cea0a22e..78356ad8861 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_parameter_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_parameter_group.go @@ -9,8 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) const rdsClusterParameterGroupMaxParamsBulkEdit = 20 @@ -88,7 +89,6 @@ func resourceAwsRDSClusterParameterGroup() *schema.Resource { func resourceAwsRDSClusterParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { rdsconn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) var groupName string if v, ok := d.GetOk("name"); ok { @@ -103,7 +103,7 @@ func resourceAwsRDSClusterParameterGroupCreate(d *schema.ResourceData, meta inte DBClusterParameterGroupName: aws.String(groupName), DBParameterGroupFamily: aws.String(d.Get("family").(string)), Description: aws.String(d.Get("description").(string)), - Tags: tags, + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().RdsTags(), } log.Printf("[DEBUG] Create DB Cluster Parameter Group: %#v", createOpts) @@ -170,11 +170,9 @@ func resourceAwsRDSClusterParameterGroupRead(d *schema.ResourceData, meta interf log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) } - var dt []*rds.Tag - if len(resp.TagList) > 0 { - dt = resp.TagList + if err := d.Set("tags", keyvaluetags.RdsKeyValueTags(resp.TagList).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } - d.Set("tags", tagsToMapRDS(dt)) return nil } @@ -228,9 +226,12 @@ func resourceAwsRDSClusterParameterGroupUpdate(d *schema.ResourceData, meta inte } } - if err := setTagsRDS(rdsconn, d, d.Get("arn").(string)); err != nil { - return err - } else { + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.RdsUpdateTags(rdsconn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating RDS Cluster Parameter Group (%s) tags: %s", d.Id(), err) + } d.SetPartial("tags") } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_global_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_global_cluster.go index 119415992c0..1ddf7a8689d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_global_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_global_cluster.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsRDSGlobalCluster() *schema.Resource { @@ -196,6 +196,10 @@ func resourceAwsRDSGlobalClusterDelete(d *schema.ResourceData, meta interface{}) return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteGlobalCluster(input) + } + if isAWSErr(err, rds.ErrCodeGlobalClusterNotFoundFault, "") { return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_cluster.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_cluster.go index 3b3378a4bef..66e66125664 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_cluster.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_cluster.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsRedshiftCluster() *schema.Resource { @@ -32,6 +32,11 @@ func resourceAwsRedshiftCluster() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "database_name": { Type: schema.TypeString, Optional: true, @@ -610,7 +615,9 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er d.Set("cluster_public_key", rsc.ClusterPublicKey) d.Set("cluster_revision_number", rsc.ClusterRevisionNumber) - d.Set("tags", tagsToMapRedshift(rsc.Tags)) + if err := d.Set("tags", tagsToMapRedshift(rsc.Tags)); err != nil { + return fmt.Errorf("Error setting Redshift Cluster Tags: %#v", err) + } d.Set("snapshot_copy", flattenRedshiftSnapshotCopy(rsc.ClusterSnapshotCopyStatus)) @@ -618,13 +625,6 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("error setting logging: %s", err) } - return nil -} - -func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - d.Partial(true) - arn := arn.ARN{ Partition: meta.(*AWSClient).partition, Service: "redshift", @@ -632,7 +632,17 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) AccountID: meta.(*AWSClient).accountid, Resource: fmt.Sprintf("cluster:%s", d.Id()), }.String() - if tagErr := setTagsRedshift(conn, d, arn); tagErr != nil { + + d.Set("arn", arn) + + return nil +} + +func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + d.Partial(true) + + if tagErr := setTagsRedshift(conn, d); tagErr != nil { return tagErr } else { d.SetPartial("tags") @@ -878,6 +888,7 @@ func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG] Deleting Redshift Cluster: %s", deleteOpts) + log.Printf("[DEBUG] schema.TimeoutDelete: %+v", d.Timeout(schema.TimeoutDelete)) err := deleteAwsRedshiftCluster(&deleteOpts, conn, d.Timeout(schema.TimeoutDelete)) if err != nil { return err @@ -899,6 +910,9 @@ func deleteAwsRedshiftCluster(opts *redshift.DeleteClusterInput, conn *redshift. return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteCluster(opts) + } if err != nil { return fmt.Errorf("Error deleting Redshift Cluster (%s): %s", id, err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_event_subscription.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_event_subscription.go index 8704ba6fb83..cfe473f27a7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_event_subscription.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_event_subscription.go @@ -6,8 +6,9 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsRedshiftEventSubscription() *schema.Resource { @@ -25,6 +26,11 @@ func resourceAwsRedshiftEventSubscription() *schema.Resource { Update: schema.DefaultTimeout(40 * time.Minute), }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { Type: schema.TypeString, Required: true, @@ -68,11 +74,7 @@ func resourceAwsRedshiftEventSubscription() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "tags": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, + "tags": tagsSchema(), }, } } @@ -106,6 +108,16 @@ func resourceAwsRedshiftEventSubscriptionCreate(d *schema.ResourceData, meta int func resourceAwsRedshiftEventSubscriptionRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).redshiftconn + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "redshift", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("eventsubscription:%s", d.Id()), + }.String() + + d.Set("arn", arn) + sub, err := resourceAwsRedshiftEventSubscriptionRetrieve(d.Id(), conn) if err != nil { return fmt.Errorf("Error retrieving Redshift Event Subscription %s: %s", d.Id(), err) @@ -175,6 +187,8 @@ func resourceAwsRedshiftEventSubscriptionRetrieve(name string, conn *redshift.Re func resourceAwsRedshiftEventSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).redshiftconn + d.Partial(true) + req := &redshift.ModifyEventSubscriptionInput{ SubscriptionName: aws.String(d.Id()), SnsTopicArn: aws.String(d.Get("sns_topic_arn").(string)), @@ -191,6 +205,14 @@ func resourceAwsRedshiftEventSubscriptionUpdate(d *schema.ResourceData, meta int return fmt.Errorf("Modifying Redshift Event Subscription %s failed: %s", d.Id(), err) } + if tagErr := setTagsRedshift(conn, d); tagErr != nil { + return tagErr + } else { + d.SetPartial("tags") + } + + d.Partial(false) + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_parameter_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_parameter_group.go index ae7778f54b9..7487f9422b1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_parameter_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_parameter_group.go @@ -8,9 +8,10 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsRedshiftParameterGroup() *schema.Resource { @@ -24,6 +25,11 @@ func resourceAwsRedshiftParameterGroup() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { Type: schema.TypeString, ForceNew: true, @@ -62,6 +68,8 @@ func resourceAwsRedshiftParameterGroup() *schema.Resource { }, Set: resourceAwsRedshiftParameterHash, }, + + "tags": tagsSchema(), }, } } @@ -73,6 +81,7 @@ func resourceAwsRedshiftParameterGroupCreate(d *schema.ResourceData, meta interf ParameterGroupName: aws.String(d.Get("name").(string)), ParameterGroupFamily: aws.String(d.Get("family").(string)), Description: aws.String(d.Get("description").(string)), + Tags: tagsFromMapRedshift(d.Get("tags").(map[string]interface{})), } log.Printf("[DEBUG] Create Redshift Parameter Group: %#v", createOpts) @@ -82,9 +91,25 @@ func resourceAwsRedshiftParameterGroupCreate(d *schema.ResourceData, meta interf } d.SetId(*createOpts.ParameterGroupName) - log.Printf("[INFO] Redshift Parameter Group ID: %s", d.Id()) - return resourceAwsRedshiftParameterGroupUpdate(d, meta) + if v := d.Get("parameter").(*schema.Set); v.Len() > 0 { + parameters, err := expandRedshiftParameters(v.List()) + + if err != nil { + return fmt.Errorf("error expanding parameter: %s", err) + } + + modifyOpts := redshift.ModifyClusterParameterGroupInput{ + ParameterGroupName: aws.String(d.Id()), + Parameters: parameters, + } + + if _, err := conn.ModifyClusterParameterGroup(&modifyOpts); err != nil { + return fmt.Errorf("error adding Redshift Parameter Group (%s) parameters: %s", d.Id(), err) + } + } + + return resourceAwsRedshiftParameterGroupRead(d, meta) } func resourceAwsRedshiftParameterGroupRead(d *schema.ResourceData, meta interface{}) error { @@ -105,9 +130,22 @@ func resourceAwsRedshiftParameterGroupRead(d *schema.ResourceData, meta interfac return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.ParameterGroups) } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "redshift", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("parametergroup:%s", d.Id()), + }.String() + + d.Set("arn", arn) + d.Set("name", describeResp.ParameterGroups[0].ParameterGroupName) d.Set("family", describeResp.ParameterGroups[0].ParameterGroupFamily) d.Set("description", describeResp.ParameterGroups[0].Description) + if err := d.Set("tags", tagsToMapRedshift(describeResp.ParameterGroups[0].Tags)); err != nil { + return fmt.Errorf("Error setting Redshift Parameter Group Tags: %#v", err) + } describeParametersOpts := redshift.DescribeClusterParametersInput{ ParameterGroupName: aws.String(d.Id()), @@ -161,6 +199,12 @@ func resourceAwsRedshiftParameterGroupUpdate(d *schema.ResourceData, meta interf d.SetPartial("parameter") } + if tagErr := setTagsRedshift(conn, d); tagErr != nil { + return tagErr + } else { + d.SetPartial("tags") + } + d.Partial(false) return resourceAwsRedshiftParameterGroupRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_security_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_security_group.go index 87e2b2fc28a..5b960767e8e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_security_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_security_group.go @@ -11,9 +11,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/redshift" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsRedshiftSecurityGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_copy_grant.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_copy_grant.go index a9d4ad0261f..fde917e1500 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_copy_grant.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_copy_grant.go @@ -6,9 +6,10 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsRedshiftSnapshotCopyGrant() *schema.Resource { @@ -17,10 +18,15 @@ func resourceAwsRedshiftSnapshotCopyGrant() *schema.Resource { // Instead changes to most fields will force a new resource Create: resourceAwsRedshiftSnapshotCopyGrantCreate, Read: resourceAwsRedshiftSnapshotCopyGrantRead, + Update: resourceAwsRedshiftSnapshotCopyGrantUpdate, Delete: resourceAwsRedshiftSnapshotCopyGrantDelete, Exists: resourceAwsRedshiftSnapshotCopyGrantExists, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "snapshot_copy_grant_name": { Type: schema.TypeString, Required: true, @@ -32,11 +38,7 @@ func resourceAwsRedshiftSnapshotCopyGrant() *schema.Resource { ForceNew: true, Computed: true, }, - "tags": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, + "tags": tagsSchema(), }, } } @@ -90,6 +92,16 @@ func resourceAwsRedshiftSnapshotCopyGrantRead(d *schema.ResourceData, meta inter return nil } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "redshift", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("snapshotcopygrant:%s", grantName), + }.String() + + d.Set("arn", arn) + d.Set("kms_key_id", grant.KmsKeyId) d.Set("snapshot_copy_grant_name", grant.SnapshotCopyGrantName) if err := d.Set("tags", tagsToMapRedshift(grant.Tags)); err != nil { @@ -99,6 +111,22 @@ func resourceAwsRedshiftSnapshotCopyGrantRead(d *schema.ResourceData, meta inter return nil } +func resourceAwsRedshiftSnapshotCopyGrantUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + d.Partial(true) + + if tagErr := setTagsRedshift(conn, d); tagErr != nil { + return tagErr + } else { + d.SetPartial("tags") + } + + d.Partial(false) + + return resourceAwsRedshiftSnapshotCopyGrantRead(d, meta) +} + func resourceAwsRedshiftSnapshotCopyGrantDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).redshiftconn @@ -177,14 +205,21 @@ func findAwsRedshiftSnapshotCopyGrantWithRetry(conn *redshift.Redshift, grantNam return nil }) - - return grant, err + if isResourceTimeoutError(err) { + grant, err = findAwsRedshiftSnapshotCopyGrant(conn, grantName, nil) + } + if err != nil { + return nil, fmt.Errorf("Error finding snapshot copy grant: %s", err) + } + return grant, nil } // Used by the tests as well func waitForAwsRedshiftSnapshotCopyGrantToBeDeleted(conn *redshift.Redshift, grantName string) error { + var grant *redshift.SnapshotCopyGrant err := resource.Retry(3*time.Minute, func() *resource.RetryError { - grant, err := findAwsRedshiftSnapshotCopyGrant(conn, grantName, nil) + var err error + grant, err = findAwsRedshiftSnapshotCopyGrant(conn, grantName, nil) if err != nil { if isAWSErr(err, redshift.ErrCodeSnapshotCopyGrantNotFoundFault, "") { return nil @@ -199,8 +234,16 @@ func waitForAwsRedshiftSnapshotCopyGrantToBeDeleted(conn *redshift.Redshift, gra return resource.NonRetryableError(err) }) - - return err + if isResourceTimeoutError(err) { + grant, err = findAwsRedshiftSnapshotCopyGrant(conn, grantName, nil) + if isAWSErr(err, redshift.ErrCodeSnapshotCopyGrantNotFoundFault, "") { + return nil + } + } + if err != nil { + return fmt.Errorf("Error waiting for snapshot copy grant to be deleted: %s", err) + } + return nil } // The DescribeSnapshotCopyGrants API defaults to listing only 100 grants @@ -232,6 +275,9 @@ func findAwsRedshiftSnapshotCopyGrant(conn *redshift.Redshift, grantName string, return nil }) + if isResourceTimeoutError(err) { + out, err = conn.DescribeSnapshotCopyGrants(&input) + } if err != nil { return nil, err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_schedule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_schedule.go new file mode 100644 index 00000000000..9612a1c8073 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_schedule.go @@ -0,0 +1,234 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/redshift" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsRedshiftSnapshotSchedule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRedshiftSnapshotScheduleCreate, + Read: resourceAwsRedshiftSnapshotScheduleRead, + Update: resourceAwsRedshiftSnapshotScheduleUpdate, + Delete: resourceAwsRedshiftSnapshotScheduleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "identifier": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"identifier_prefix"}, + }, + "identifier_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "definitions": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "tags": tagsSchema(), + }, + } + +} + +func resourceAwsRedshiftSnapshotScheduleCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{})) + var identifier string + if v, ok := d.GetOk("identifier"); ok { + identifier = v.(string) + } else { + if v, ok := d.GetOk("identifier_prefix"); ok { + identifier = resource.PrefixedUniqueId(v.(string)) + } else { + identifier = resource.UniqueId() + } + } + createOpts := &redshift.CreateSnapshotScheduleInput{ + ScheduleIdentifier: aws.String(identifier), + ScheduleDefinitions: expandStringSet(d.Get("definitions").(*schema.Set)), + Tags: tags, + } + if attr, ok := d.GetOk("description"); ok { + createOpts.ScheduleDescription = aws.String(attr.(string)) + } + + resp, err := conn.CreateSnapshotSchedule(createOpts) + if err != nil { + return fmt.Errorf("Error creating Redshift Snapshot Schedule: %s", err) + } + + d.SetId(aws.StringValue(resp.ScheduleIdentifier)) + + return resourceAwsRedshiftSnapshotScheduleRead(d, meta) +} + +func resourceAwsRedshiftSnapshotScheduleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + descOpts := &redshift.DescribeSnapshotSchedulesInput{ + ScheduleIdentifier: aws.String(d.Id()), + } + + resp, err := conn.DescribeSnapshotSchedules(descOpts) + if err != nil { + return fmt.Errorf("Error describing Redshift Cluster Snapshot Schedule %s: %s", d.Id(), err) + } + + if resp.SnapshotSchedules == nil || len(resp.SnapshotSchedules) != 1 { + log.Printf("[WARN] Unable to find Redshift Cluster Snapshot Schedule (%s)", d.Id()) + d.SetId("") + return nil + } + snapshotSchedule := resp.SnapshotSchedules[0] + + d.Set("identifier", snapshotSchedule.ScheduleIdentifier) + d.Set("description", snapshotSchedule.ScheduleDescription) + if err := d.Set("definitions", flattenStringList(snapshotSchedule.ScheduleDefinitions)); err != nil { + return fmt.Errorf("Error setting definitions: %s", err) + } + d.Set("tags", tagsToMapRedshift(snapshotSchedule.Tags)) + + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "redshift", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("snapshotschedule:%s", d.Id()), + }.String() + + d.Set("arn", arn) + + return nil +} + +func resourceAwsRedshiftSnapshotScheduleUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + d.Partial(true) + + if tagErr := setTagsRedshift(conn, d); tagErr != nil { + return tagErr + } else { + d.SetPartial("tags") + } + + if d.HasChange("definitions") { + modifyOpts := &redshift.ModifySnapshotScheduleInput{ + ScheduleIdentifier: aws.String(d.Id()), + ScheduleDefinitions: expandStringList(d.Get("definitions").(*schema.Set).List()), + } + _, err := conn.ModifySnapshotSchedule(modifyOpts) + if isAWSErr(err, redshift.ErrCodeSnapshotScheduleNotFoundFault, "") { + log.Printf("[WARN] Redshift Snapshot Schedule (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error modifying Redshift Snapshot Schedule %s: %s", d.Id(), err) + } + d.SetPartial("definitions") + } + + return resourceAwsRedshiftSnapshotScheduleRead(d, meta) +} + +func resourceAwsRedshiftSnapshotScheduleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + if d.Get("force_destroy").(bool) { + if err := resourceAwsRedshiftSnapshotScheduleDeleteAllAssociatedClusters(conn, d.Id()); err != nil { + return err + } + } + + _, err := conn.DeleteSnapshotSchedule(&redshift.DeleteSnapshotScheduleInput{ + ScheduleIdentifier: aws.String(d.Id()), + }) + if isAWSErr(err, redshift.ErrCodeSnapshotScheduleNotFoundFault, "") { + return nil + } + if err != nil { + return fmt.Errorf("Error deleting Redshift Snapshot Schedule %s: %s", d.Id(), err) + } + + return nil +} + +func resourceAwsRedshiftSnapshotScheduleDeleteAllAssociatedClusters(conn *redshift.Redshift, scheduleIdentifier string) error { + + resp, err := conn.DescribeSnapshotSchedules(&redshift.DescribeSnapshotSchedulesInput{ + ScheduleIdentifier: aws.String(scheduleIdentifier), + }) + if isAWSErr(err, redshift.ErrCodeSnapshotScheduleNotFoundFault, "") { + return nil + } + if err != nil { + return fmt.Errorf("Error describing Redshift Cluster Snapshot Schedule %s: %s", scheduleIdentifier, err) + } + if resp.SnapshotSchedules == nil || len(resp.SnapshotSchedules) != 1 { + log.Printf("[WARN] Unable to find Redshift Cluster Snapshot Schedule (%s)", scheduleIdentifier) + return nil + } + + snapshotSchedule := resp.SnapshotSchedules[0] + + for _, associatedCluster := range snapshotSchedule.AssociatedClusters { + _, err = conn.ModifyClusterSnapshotSchedule(&redshift.ModifyClusterSnapshotScheduleInput{ + ClusterIdentifier: associatedCluster.ClusterIdentifier, + ScheduleIdentifier: aws.String(scheduleIdentifier), + DisassociateSchedule: aws.Bool(true), + }) + + if isAWSErr(err, redshift.ErrCodeClusterNotFoundFault, "") { + log.Printf("[WARN] Redshift Snapshot Cluster (%s) not found, removing from state", aws.StringValue(associatedCluster.ClusterIdentifier)) + continue + } + if isAWSErr(err, redshift.ErrCodeSnapshotScheduleNotFoundFault, "") { + log.Printf("[WARN] Redshift Snapshot Schedule (%s) not found, removing from state", scheduleIdentifier) + continue + } + if err != nil { + return fmt.Errorf("Error disassociate Redshift Cluster (%s) and Snapshot Schedule (%s) Association: %s", aws.StringValue(associatedCluster.ClusterIdentifier), scheduleIdentifier, err) + } + } + + for _, associatedCluster := range snapshotSchedule.AssociatedClusters { + if err := waitForRedshiftSnapshotScheduleAssociationDestroy(conn, 75*time.Minute, aws.StringValue(associatedCluster.ClusterIdentifier), scheduleIdentifier); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_schedule_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_schedule_association.go new file mode 100644 index 00000000000..ce46e2b2923 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_snapshot_schedule_association.go @@ -0,0 +1,238 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/redshift" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsRedshiftSnapshotScheduleAssociation() *schema.Resource { + + return &schema.Resource{ + Create: resourceAwsRedshiftSnapshotScheduleAssociationCreate, + Read: resourceAwsRedshiftSnapshotScheduleAssociationRead, + Delete: resourceAwsRedshiftSnapshotScheduleAssociationDelete, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + clusterIdentifier, scheduleIdentifier, err := resourceAwsRedshiftSnapshotScheduleAssociationParseId(d.Id()) + if err != nil { + return nil, fmt.Errorf("Error parse Redshift Cluster Snapshot Schedule Association ID %s: %s", d.Id(), err) + } + + d.Set("cluster_identifier", clusterIdentifier) + d.Set("schedule_identifier", scheduleIdentifier) + d.SetId(fmt.Sprintf("%s/%s", clusterIdentifier, scheduleIdentifier)) + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + "cluster_identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "schedule_identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsRedshiftSnapshotScheduleAssociationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + clusterIdentifier := d.Get("cluster_identifier").(string) + scheduleIdentifier := d.Get("schedule_identifier").(string) + + _, err := conn.ModifyClusterSnapshotSchedule(&redshift.ModifyClusterSnapshotScheduleInput{ + ClusterIdentifier: aws.String(clusterIdentifier), + ScheduleIdentifier: aws.String(scheduleIdentifier), + DisassociateSchedule: aws.Bool(false), + }) + + if err != nil { + return fmt.Errorf("Error associating Redshift Cluster (%s) and Snapshot Schedule (%s): %s", clusterIdentifier, scheduleIdentifier, err) + } + + if err := waitForRedshiftSnapshotScheduleAssociationActive(conn, 75*time.Minute, clusterIdentifier, scheduleIdentifier); err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", clusterIdentifier, scheduleIdentifier)) + + return resourceAwsRedshiftSnapshotScheduleAssociationRead(d, meta) +} + +func resourceAwsRedshiftSnapshotScheduleAssociationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + clusterIdentifier, scheduleIdentifier, err := resourceAwsRedshiftSnapshotScheduleAssociationParseId(d.Id()) + if err != nil { + return fmt.Errorf("Error parse Redshift Cluster Snapshot Schedule Association ID %s: %s", d.Id(), err) + } + + descOpts := &redshift.DescribeSnapshotSchedulesInput{ + ClusterIdentifier: aws.String(clusterIdentifier), + ScheduleIdentifier: aws.String(scheduleIdentifier), + } + + resp, err := conn.DescribeSnapshotSchedules(descOpts) + if err != nil { + return fmt.Errorf("Error describing Redshift Cluster %s Snapshot Schedule %s: %s", clusterIdentifier, clusterIdentifier, err) + } + + if resp.SnapshotSchedules == nil || len(resp.SnapshotSchedules) == 0 { + return fmt.Errorf("Unable to find Redshift Cluster (%s) Snapshot Schedule (%s) Association", clusterIdentifier, scheduleIdentifier) + } + snapshotSchedule := resp.SnapshotSchedules[0] + if snapshotSchedule.AssociatedClusters == nil || aws.Int64Value(snapshotSchedule.AssociatedClusterCount) == 0 { + return fmt.Errorf("Unable to find Redshift Cluster (%s)", clusterIdentifier) + } + + var associatedCluster *redshift.ClusterAssociatedToSchedule + for _, cluster := range snapshotSchedule.AssociatedClusters { + if *cluster.ClusterIdentifier == clusterIdentifier { + associatedCluster = cluster + break + } + } + + if associatedCluster == nil { + return fmt.Errorf("Unable to find Redshift Cluster (%s)", clusterIdentifier) + } + + d.Set("cluster_identifier", associatedCluster.ClusterIdentifier) + d.Set("schedule_identifier", snapshotSchedule.ScheduleIdentifier) + + return nil +} + +func resourceAwsRedshiftSnapshotScheduleAssociationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + clusterIdentifier, scheduleIdentifier, err := resourceAwsRedshiftSnapshotScheduleAssociationParseId(d.Id()) + if err != nil { + return fmt.Errorf("Error parse Redshift Cluster Snapshot Schedule Association ID %s: %s", d.Id(), err) + } + + _, err = conn.ModifyClusterSnapshotSchedule(&redshift.ModifyClusterSnapshotScheduleInput{ + ClusterIdentifier: aws.String(clusterIdentifier), + ScheduleIdentifier: aws.String(scheduleIdentifier), + DisassociateSchedule: aws.Bool(true), + }) + + if isAWSErr(err, redshift.ErrCodeClusterNotFoundFault, "") { + log.Printf("[WARN] Redshift Snapshot Cluster (%s) not found, removing from state", clusterIdentifier) + d.SetId("") + return nil + } + if isAWSErr(err, redshift.ErrCodeSnapshotScheduleNotFoundFault, "") { + log.Printf("[WARN] Redshift Snapshot Schedule (%s) not found, removing from state", scheduleIdentifier) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error disassociate Redshift Cluster (%s) and Snapshot Schedule (%s) Association: %s", clusterIdentifier, scheduleIdentifier, err) + } + + if err := waitForRedshiftSnapshotScheduleAssociationDestroy(conn, 75*time.Minute, clusterIdentifier, scheduleIdentifier); err != nil { + return err + } + + return nil +} + +func resourceAwsRedshiftSnapshotScheduleAssociationParseId(id string) (clusterIdentifier, scheduleIdentifier string, err error) { + parts := strings.SplitN(id, "/", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + err = fmt.Errorf("aws_redshift_snapshot_schedule_association id must be of the form /") + return + } + + clusterIdentifier = parts[0] + scheduleIdentifier = parts[1] + return +} + +func resourceAwsRedshiftSnapshotScheduleAssociationStateRefreshFunc(clusterIdentifier, scheduleIdentifier string, conn *redshift.Redshift) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[INFO] Reading Redshift Cluster (%s) Snapshot Schedule (%s) Association Information", clusterIdentifier, scheduleIdentifier) + resp, err := conn.DescribeSnapshotSchedules(&redshift.DescribeSnapshotSchedulesInput{ + ClusterIdentifier: aws.String(clusterIdentifier), + ScheduleIdentifier: aws.String(scheduleIdentifier), + }) + if isAWSErr(err, redshift.ErrCodeClusterNotFoundFault, "") { + return 42, "destroyed", nil + } + if isAWSErr(err, redshift.ErrCodeSnapshotScheduleNotFoundFault, "") { + return 42, "destroyed", nil + } + if err != nil { + log.Printf("[WARN] Error on retrieving Redshift Cluster (%s) Snapshot Schedule (%s) Association when waiting: %s", clusterIdentifier, scheduleIdentifier, err) + return nil, "", err + } + + var rcas *redshift.ClusterAssociatedToSchedule + + for _, s := range resp.SnapshotSchedules { + if aws.StringValue(s.ScheduleIdentifier) == scheduleIdentifier { + for _, c := range s.AssociatedClusters { + if aws.StringValue(c.ClusterIdentifier) == clusterIdentifier { + rcas = c + } + } + } + } + + if rcas == nil { + return 42, "destroyed", nil + } + + if rcas.ScheduleAssociationState != nil { + log.Printf("[DEBUG] Redshift Cluster (%s) Snapshot Schedule (%s) Association status: %s", clusterIdentifier, scheduleIdentifier, aws.StringValue(rcas.ScheduleAssociationState)) + } + + return rcas, aws.StringValue(rcas.ScheduleAssociationState), nil + } +} + +func waitForRedshiftSnapshotScheduleAssociationActive(conn *redshift.Redshift, timeout time.Duration, clusterIdentifier, scheduleIdentifier string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{redshift.ScheduleStateModifying}, + Target: []string{redshift.ScheduleStateActive}, + Refresh: resourceAwsRedshiftSnapshotScheduleAssociationStateRefreshFunc(clusterIdentifier, scheduleIdentifier, conn), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Redshift Cluster (%s) and Snapshot Schedule (%s) Association state to be \"ACTIVE\": %s", clusterIdentifier, scheduleIdentifier, err) + } + + return nil +} + +func waitForRedshiftSnapshotScheduleAssociationDestroy(conn *redshift.Redshift, timeout time.Duration, clusterIdentifier, scheduleIdentifier string) error { + + stateConf := &resource.StateChangeConf{ + Pending: []string{redshift.ScheduleStateModifying, redshift.ScheduleStateActive}, + Target: []string{"destroyed"}, + Refresh: resourceAwsRedshiftSnapshotScheduleAssociationStateRefreshFunc(clusterIdentifier, scheduleIdentifier, conn), + Timeout: timeout, + MinTimeout: 10 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Redshift Cluster (%s) and Snapshot Schedule (%s) Association state to be \"destroyed\": %s", clusterIdentifier, scheduleIdentifier, err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_subnet_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_subnet_group.go index e503108ece7..a946b54d14e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_subnet_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_subnet_group.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsRedshiftSubnetGroup() *schema.Resource { @@ -22,6 +22,11 @@ func resourceAwsRedshiftSubnetGroup() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { Type: schema.TypeString, ForceNew: true, @@ -103,12 +108,6 @@ func resourceAwsRedshiftSubnetGroupRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error setting Redshift Subnet Group Tags: %#v", err) } - return nil -} - -func resourceAwsRedshiftSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - arn := arn.ARN{ Partition: meta.(*AWSClient).partition, Service: "redshift", @@ -116,8 +115,20 @@ func resourceAwsRedshiftSubnetGroupUpdate(d *schema.ResourceData, meta interface AccountID: meta.(*AWSClient).accountid, Resource: fmt.Sprintf("subnetgroup:%s", d.Id()), }.String() - if tagErr := setTagsRedshift(conn, d, arn); tagErr != nil { + + d.Set("arn", arn) + + return nil +} + +func resourceAwsRedshiftSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + d.Partial(true) + + if tagErr := setTagsRedshift(conn, d); tagErr != nil { return tagErr + } else { + d.SetPartial("tags") } if d.HasChange("subnet_ids") || d.HasChange("description") { @@ -143,7 +154,9 @@ func resourceAwsRedshiftSubnetGroupUpdate(d *schema.ResourceData, meta interface } } - return nil + d.Partial(false) + + return resourceAwsRedshiftSubnetGroupRead(d, meta) } func resourceAwsRedshiftSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_resourcegroups_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_resourcegroups_group.go index ac3138ed4de..3b8e2cc7bbc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_resourcegroups_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_resourcegroups_group.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/resourcegroups" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsResourceGroupsGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route.go index 9c000f0b9cb..891c5c2e5c2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // How long to sleep if a limit-exceeded event happens @@ -269,6 +269,9 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error { return nil }) + if isResourceTimeoutError(err) { + _, err = conn.CreateRoute(createOpts) + } if err != nil { return fmt.Errorf("Error creating route: %s", err) } @@ -280,6 +283,9 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error { route, err = resourceAwsRouteFindRoute(conn, d.Get("route_table_id").(string), v.(string), "") return resource.RetryableError(err) }) + if isResourceTimeoutError(err) { + route, err = resourceAwsRouteFindRoute(conn, d.Get("route_table_id").(string), v.(string), "") + } if err != nil { return fmt.Errorf("Error finding route after creating it: %s", err) } @@ -290,6 +296,9 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error { route, err = resourceAwsRouteFindRoute(conn, d.Get("route_table_id").(string), "", v.(string)) return resource.RetryableError(err) }) + if isResourceTimeoutError(err) { + route, err = resourceAwsRouteFindRoute(conn, d.Get("route_table_id").(string), "", v.(string)) + } if err != nil { return fmt.Errorf("Error finding route after creating it: %s", err) } @@ -440,29 +449,30 @@ func resourceAwsRouteDelete(d *schema.ResourceData, meta interface{}) error { } log.Printf("[DEBUG] Route delete opts: %s", deleteOpts) - var err error = resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + var resp *ec2.DeleteRouteOutput + err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { log.Printf("[DEBUG] Trying to delete route with opts %s", deleteOpts) - resp, err := conn.DeleteRoute(deleteOpts) + var err error + resp, err = conn.DeleteRoute(deleteOpts) log.Printf("[DEBUG] Route delete result: %s", resp) if err == nil { return nil } - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - if ec2err.Code() == "InvalidParameterException" { - log.Printf("[DEBUG] Trying to delete route again: %q", - ec2err.Message()) + if isAWSErr(err, "InvalidParameterException", "") { return resource.RetryableError(err) } return resource.NonRetryableError(err) }) - - return err + if isResourceTimeoutError(err) { + resp, err = conn.DeleteRoute(deleteOpts) + } + if err != nil { + return fmt.Errorf("Error deleting route: %s", err) + } + return nil } func resourceAwsRouteExists(d *schema.ResourceData, meta interface{}) (bool, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_delegation_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_delegation_set.go index 1b14c6c1c97..08924a3d736 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_delegation_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_delegation_set.go @@ -5,8 +5,8 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_health_check.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_health_check.go index d9c60990746..d65d85532ea 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_health_check.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_health_check.go @@ -5,9 +5,9 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -47,9 +47,10 @@ func resourceAwsRoute53HealthCheck() *schema.Resource { Optional: true, }, "request_interval": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, // todo this should be updateable but the awslabs route53 service doesnt have the ability + Type: schema.TypeInt, + Optional: true, + ForceNew: true, // todo this should be updateable but the awslabs route53 service doesnt have the ability + ValidateFunc: validation.IntInSlice([]int{10, 30}), }, "ip_address": { Type: schema.TypeString, @@ -384,7 +385,7 @@ func resourceAwsRoute53HealthCheckRead(d *schema.ResourceData, meta interface{}) func resourceAwsRoute53HealthCheckDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).r53conn - log.Printf("[DEBUG] Deleteing Route53 health check: %s", d.Id()) + log.Printf("[DEBUG] Deleting Route53 health check: %s", d.Id()) _, err := conn.DeleteHealthCheck(&route53.DeleteHealthCheckInput{HealthCheckId: aws.String(d.Id())}) return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_query_log.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_query_log.go index f992141f205..33a70e3e9a4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_query_log.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_query_log.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record.go index 2a96260b737..bc7bd72a8cd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record.go @@ -10,10 +10,10 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -280,6 +280,18 @@ func resourceAwsRoute53RecordUpdate(d *schema.ResourceData, meta interface{}) er Type: aws.String(typeo.(string)), } + // If the old record had a weighted_routing_policy we need to pass that in + // here because otherwise the API will give us an error. + if v, _ := d.GetChange("weighted_routing_policy"); v != nil { + if o, ok := v.([]interface{}); ok { + if len(o) == 1 { + if v, ok := o[0].(map[string]interface{}); ok { + oldRec.Weight = aws.Int64(int64(v["weight"].(int))) + } + } + } + } + if v, _ := d.GetChange("ttl"); v.(int) != 0 { oldRec.TTL = aws.Int64(int64(v.(int))) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record_migrate.go index ad6cda9d3be..d34de7cd55d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record_migrate.go @@ -5,7 +5,7 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsRoute53RecordMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_endpoint.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_endpoint.go index b04d2bd0ab0..0cd4b6f8504 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_endpoint.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_endpoint.go @@ -8,10 +8,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53resolver" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) const ( @@ -115,7 +116,7 @@ func resourceAwsRoute53ResolverEndpointCreate(d *schema.ResourceData, meta inter req.Name = aws.String(v.(string)) } if v, ok := d.GetOk("tags"); ok && len(v.(map[string]interface{})) > 0 { - req.Tags = tagsFromMapRoute53Resolver(v.(map[string]interface{})) + req.Tags = keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().Route53resolverTags() } log.Printf("[DEBUG] Creating Route53 Resolver endpoint: %#v", req) @@ -179,8 +180,14 @@ func resourceAwsRoute53ResolverEndpointRead(d *schema.ResourceData, meta interfa return err } - if err := getTagsRoute53Resolver(conn, d); err != nil { - return fmt.Errorf("error getting Route53 Resolver endpoint (%s) tags: %s", d.Id(), err) + tags, err := keyvaluetags.Route53resolverListTags(conn, d.Get("arn").(string)) + + if err != nil { + return fmt.Errorf("error listing tags for Route53 Resolver endpoint (%s): %s", d.Get("arn").(string), err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } return nil @@ -257,10 +264,13 @@ func resourceAwsRoute53ResolverEndpointUpdate(d *schema.ResourceData, meta inter d.SetPartial("ip_address") } - if err := setTagsRoute53Resolver(conn, d); err != nil { - return fmt.Errorf("error setting Route53 Resolver endpoint (%s) tags: %s", d.Id(), err) + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.Route53resolverUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Route53 Resolver endpoint (%s) tags: %s", d.Get("arn").(string), err) + } + d.SetPartial("tags") } - d.SetPartial("tags") d.Partial(false) return resourceAwsRoute53ResolverEndpointRead(d, meta) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_rule.go index ff2ccae28f6..937951bea5d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_rule.go @@ -3,13 +3,14 @@ package aws import ( "bytes" "fmt" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "log" "time" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53resolver" @@ -126,7 +127,7 @@ func resourceAwsRoute53ResolverRuleCreate(d *schema.ResourceData, meta interface req.TargetIps = expandRoute53ResolverRuleTargetIps(v.(*schema.Set)) } if v, ok := d.GetOk("tags"); ok && len(v.(map[string]interface{})) > 0 { - req.Tags = tagsFromMapRoute53Resolver(v.(map[string]interface{})) + req.Tags = keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().Route53resolverTags() } log.Printf("[DEBUG] Creating Route 53 Resolver rule: %s", req) @@ -172,9 +173,14 @@ func resourceAwsRoute53ResolverRuleRead(d *schema.ResourceData, meta interface{} return err } - err = getTagsRoute53Resolver(conn, d) + tags, err := keyvaluetags.Route53resolverListTags(conn, d.Get("arn").(string)) + if err != nil { - return fmt.Errorf("Error reading Route 53 Resolver rule tags %s: %s", d.Id(), err) + return fmt.Errorf("error listing tags for Route53 Resolver rule (%s): %s", d.Get("arn").(string), err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } return nil @@ -217,10 +223,13 @@ func resourceAwsRoute53ResolverRuleUpdate(d *schema.ResourceData, meta interface d.SetPartial("target_ip") } - if err := setTagsRoute53Resolver(conn, d); err != nil { - return fmt.Errorf("error setting Route53 Resolver rule (%s) tags: %s", d.Id(), err) + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.Route53resolverUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Route53 Resolver rule (%s) tags: %s", d.Get("arn").(string), err) + } + d.SetPartial("tags") } - d.SetPartial("tags") d.Partial(false) return resourceAwsRoute53ResolverRuleRead(d, meta) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_rule_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_rule_association.go index cfdec1b71cd..a371832174f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_rule_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_resolver_rule_association.go @@ -5,9 +5,9 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53resolver" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone.go index 3df89ece3f8..16ec6968ba0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone.go @@ -10,10 +10,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsRoute53Zone() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone_association.go index 2d2269c1090..63b00a75476 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone_association.go @@ -6,8 +6,8 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table.go index d2a062a4c31..626a8f764e4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsRouteTable() *schema.Resource { @@ -391,8 +391,11 @@ func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.CreateRoute(&opts) + } if err != nil { - return err + return fmt.Errorf("Error creating route: %s", err) } routes.Add(route) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table_association.go index 9dc6e0b4bac..7bcaae1c084 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table_association.go @@ -3,13 +3,14 @@ package aws import ( "fmt" "log" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsRouteTableAssociation() *schema.Resource { @@ -18,12 +19,14 @@ func resourceAwsRouteTableAssociation() *schema.Resource { Read: resourceAwsRouteTableAssociationRead, Update: resourceAwsRouteTableAssociationUpdate, Delete: resourceAwsRouteTableAssociationDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsRouteTableAssociationImport, + }, Schema: map[string]*schema.Schema{ "subnet_id": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "route_table_id": { @@ -47,9 +50,10 @@ func resourceAwsRouteTableAssociationCreate(d *schema.ResourceData, meta interfa SubnetId: aws.String(d.Get("subnet_id").(string)), } + var associationID string var resp *ec2.AssociateRouteTableOutput - var err error - err = resource.Retry(5*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + var err error resp, err = conn.AssociateRouteTable(&associationOpts) if err != nil { if awsErr, ok := err.(awserr.Error); ok { @@ -59,14 +63,18 @@ func resourceAwsRouteTableAssociationCreate(d *schema.ResourceData, meta interfa } return resource.NonRetryableError(err) } + associationID = *resp.AssociationId return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.AssociateRouteTable(&associationOpts) + } if err != nil { - return err + return fmt.Errorf("Error creating route table association: %s", err) } // Set the ID and return - d.SetId(*resp.AssociationId) + d.SetId(associationID) log.Printf("[INFO] Association ID: %s", d.Id()) return nil @@ -153,3 +161,49 @@ func resourceAwsRouteTableAssociationDelete(d *schema.ResourceData, meta interfa return nil } + +func resourceAwsRouteTableAssociationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 2 { + return []*schema.ResourceData{}, fmt.Errorf("Wrong format for import: %s. Use 'subnet ID/route table ID'", d.Id()) + } + + subnetID := parts[0] + routeTableID := parts[1] + + log.Printf("[DEBUG] Importing route table association, subnet: %s, route table: %s", subnetID, routeTableID) + + conn := meta.(*AWSClient).ec2conn + + input := &ec2.DescribeRouteTablesInput{} + input.Filters = buildEC2AttributeFilterList( + map[string]string{ + "association.subnet-id": subnetID, + "association.route-table-id": routeTableID, + }, + ) + + output, err := conn.DescribeRouteTables(input) + if err != nil || len(output.RouteTables) == 0 { + return nil, fmt.Errorf("Error finding route table: %v", err) + } + + rt := output.RouteTables[0] + + var associationID string + for _, a := range rt.Associations { + if aws.StringValue(a.SubnetId) == subnetID { + associationID = aws.StringValue(a.RouteTableAssociationId) + break + } + } + if associationID == "" { + return nil, fmt.Errorf("Error finding route table, ID: %v", *rt.RouteTableId) + } + + d.SetId(associationID) + d.Set("subnet_id", subnetID) + d.Set("route_table_id", routeTableID) + + return []*schema.ResourceData{d}, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_account_public_access_block.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_account_public_access_block.go index e809824b22a..82a35fbaf89 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_account_public_access_block.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_account_public_access_block.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3control" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsS3AccountPublicAccessBlock() *schema.Resource { @@ -106,6 +106,10 @@ func resourceAwsS3AccountPublicAccessBlockRead(d *schema.ResourceData, meta inte return nil }) + if isResourceTimeoutError(err) { + output, err = conn.GetPublicAccessBlock(input) + } + if isAWSErr(err, s3control.ErrCodeNoSuchPublicAccessBlockConfiguration, "") { log.Printf("[WARN] S3 Account Public Access Block (%s) not found, removing from state", d.Id()) d.SetId("") diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket.go index c8c75e81ce7..02002eb4e40 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket.go @@ -15,11 +15,11 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsS3Bucket() *schema.Resource { @@ -567,8 +567,8 @@ func resourceAwsS3Bucket() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - s3.ObjectLockModeGovernance, - s3.ObjectLockModeCompliance, + s3.ObjectLockRetentionModeGovernance, + s3.ObjectLockRetentionModeCompliance, }, false), }, @@ -653,8 +653,7 @@ func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { if awsErr.Code() == "OperationAborted" { log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err) return resource.RetryableError( - fmt.Errorf("Error creating S3 bucket %s, retrying: %s", - bucket, err)) + fmt.Errorf("Error creating S3 bucket %s, retrying: %s", bucket, err)) } } if err != nil { @@ -663,7 +662,9 @@ func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { return nil }) - + if isResourceTimeoutError(err) { + _, err = s3conn.CreateBucket(req) + } if err != nil { return fmt.Errorf("Error creating S3 bucket: %s", err) } @@ -675,7 +676,7 @@ func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn - if err := setTagsS3(s3conn, d); err != nil { + if err := setTagsS3Bucket(s3conn, d); err != nil { return fmt.Errorf("%q: %s", d.Get("bucket").(string), err) } @@ -1011,6 +1012,7 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { lifecycleRules = make([]map[string]interface{}, 0, len(lifecycle.Rules)) for _, lifecycleRule := range lifecycle.Rules { + log.Printf("[DEBUG] S3 bucket: %s, read lifecycle rule: %v", d.Id(), lifecycleRule) rule := make(map[string]interface{}) // ID @@ -1033,6 +1035,10 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { if filter.Prefix != nil && *filter.Prefix != "" { rule["prefix"] = *filter.Prefix } + // Tag + if filter.Tag != nil { + rule["tags"] = tagsToMapS3([]*s3.Tag{filter.Tag}) + } } } else { if lifecycleRule.Prefix != nil { @@ -1217,9 +1223,9 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { } } - tagSet, err := getTagSetS3(s3conn, d.Id()) + tagSet, err := getTagSetS3Bucket(s3conn, d.Id()) if err != nil { - return err + return fmt.Errorf("error getting S3 bucket tags: %s", err) } if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil { @@ -1250,52 +1256,25 @@ func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { if isAWSErr(err, "BucketNotEmpty", "") { if d.Get("force_destroy").(bool) { + // Use a S3 service client that can handle multiple slashes in URIs. + // While aws_s3_bucket_object resources cannot create these object + // keys, other AWS services and applications using the S3 Bucket can. + s3conn = meta.(*AWSClient).s3connUriCleaningDisabled + // bucket may have things delete them log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) - bucket := d.Get("bucket").(string) - resp, err := s3conn.ListObjectVersions( - &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucket), - }, - ) - - if err != nil { - return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) - } - - objectsToDelete := make([]*s3.ObjectIdentifier, 0) - - if len(resp.DeleteMarkers) != 0 { - - for _, v := range resp.DeleteMarkers { - objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ - Key: v.Key, - VersionId: v.VersionId, - }) - } + // Delete everything including locked objects. + // Don't ignore any object errors or we could recurse infinitely. + var objectLockEnabled bool + objectLockConfiguration := expandS3ObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})) + if objectLockConfiguration != nil { + objectLockEnabled = aws.StringValue(objectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled } - - if len(resp.Versions) != 0 { - for _, v := range resp.Versions { - objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ - Key: v.Key, - VersionId: v.VersionId, - }) - } - } - - params := &s3.DeleteObjectsInput{ - Bucket: aws.String(bucket), - Delete: &s3.Delete{ - Objects: objectsToDelete, - }, - } - - _, err = s3conn.DeleteObjects(params) + err = deleteAllS3ObjectVersions(s3conn, d.Id(), "", objectLockEnabled, false) if err != nil { - return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) + return fmt.Errorf("error S3 Bucket force_destroy: %s", err) } // this line recurses until all objects are deleted or an error is returned @@ -1323,17 +1302,18 @@ func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) erro } err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.PutBucketPolicy(params); err != nil { - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == "MalformedPolicy" || awserr.Code() == s3.ErrCodeNoSuchBucket { - return resource.RetryableError(awserr) - } - } + _, err := s3conn.PutBucketPolicy(params) + if isAWSErr(err, "MalformedPolicy", "") || isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + return resource.RetryableError(err) + } + if err != nil { return resource.NonRetryableError(err) } return nil }) - + if isResourceTimeoutError(err) { + _, err = s3conn.PutBucketPolicy(params) + } if err != nil { return fmt.Errorf("Error putting S3 policy: %s", err) } @@ -1584,11 +1564,15 @@ func WebsiteEndpoint(bucket string, region string) *S3Website { func WebsiteDomainUrl(region string) string { region = normalizeRegion(region) - // New regions uses different syntax for website endpoints - // http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html + // Different regions have different syntax for website endpoints + // https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html + // https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints if isOldRegion(region) { return fmt.Sprintf("s3-website-%s.amazonaws.com", region) } + if partition, ok := endpoints.PartitionForRegion(endpoints.DefaultPartitions(), region); ok && partition.ID() == endpoints.AwsCnPartitionID { + return fmt.Sprintf("s3-website.%s.amazonaws.com.cn", region) + } return fmt.Sprintf("s3-website.%s.amazonaws.com", region) } @@ -1760,12 +1744,7 @@ func resourceAwsS3BucketServerSideEncryptionConfigurationUpdate(s3conn *s3.S3, d Bucket: aws.String(bucket), } - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.DeleteBucketEncryption(i); err != nil { - return resource.NonRetryableError(err) - } - return nil - }) + _, err := s3conn.DeleteBucketEncryption(i) if err != nil { return fmt.Errorf("error removing S3 bucket server side encryption: %s", err) } @@ -1839,12 +1818,7 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema. Bucket: aws.String(bucket), } - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.DeleteBucketReplication(i); err != nil { - return resource.NonRetryableError(err) - } - return nil - }) + _, err := s3conn.DeleteBucketReplication(i) if err != nil { return fmt.Errorf("Error removing S3 bucket replication: %s", err) } @@ -1965,15 +1939,18 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema. log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.PutBucketReplication(i); err != nil { - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || - isAWSErr(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { - return resource.RetryableError(err) - } + _, err := s3conn.PutBucketReplication(i) + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { + return resource.RetryableError(err) + } + if err != nil { return resource.NonRetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + _, err = s3conn.PutBucketReplication(i) + } if err != nil { return fmt.Errorf("Error putting S3 replication configuration: %s", err) } @@ -1991,12 +1968,7 @@ func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) e Bucket: aws.String(bucket), } - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.DeleteBucketLifecycle(i); err != nil { - return resource.NonRetryableError(err) - } - return nil - }) + _, err := s3conn.DeleteBucketLifecycle(i) if err != nil { return fmt.Errorf("Error removing S3 lifecycle: %s", err) } @@ -2473,13 +2445,18 @@ type S3Website struct { // S3 Object Lock functions. // -func readS3ObjectLockConfiguration(conn *s3.S3, bucket string) (interface{}, error) { +func readS3ObjectLockConfiguration(conn *s3.S3, bucket string) ([]interface{}, error) { resp, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return conn.GetObjectLockConfiguration(&s3.GetObjectLockConfigurationInput{ Bucket: aws.String(bucket), }) }) if err != nil { + // Certain S3 implementations do not include this API + if isAWSErr(err, "MethodNotAllowed", "") { + return nil, nil + } + if isAWSErr(err, "ObjectLockConfigurationNotFoundError", "") { return nil, nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_inventory.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_inventory.go index bd8468dae40..79f2201a30f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_inventory.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_inventory.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsS3BucketInventory() *schema.Resource { @@ -240,6 +240,9 @@ func resourceAwsS3BucketInventoryPut(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.PutBucketInventoryConfiguration(input) + } if err != nil { return fmt.Errorf("Error putting S3 bucket inventory configuration: %s", err) } @@ -306,6 +309,14 @@ func resourceAwsS3BucketInventoryRead(d *schema.ResourceData, meta interface{}) } return nil }) + if isResourceTimeoutError(err) { + output, err = conn.GetBucketInventoryConfiguration(input) + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "NoSuchConfiguration", "The specified configuration does not exist.") { + if !d.IsNewResource() { + return nil + } + } + } if err != nil { return fmt.Errorf("error getting S3 Bucket Inventory (%s): %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_metric.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_metric.go index c67f56b7434..1f6ce04cb44 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_metric.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_metric.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsS3BucketMetric() *schema.Resource { @@ -62,8 +62,9 @@ func resourceAwsS3BucketMetricPut(d *schema.ResourceData, meta interface{}) erro if v, ok := d.GetOk("filter"); ok { filterList := v.([]interface{}) - filterMap := filterList[0].(map[string]interface{}) - metricsConfiguration.Filter = expandS3MetricsFilter(filterMap) + if filterMap, ok := filterList[0].(map[string]interface{}); ok { + metricsConfiguration.Filter = expandS3MetricsFilter(filterMap) + } } input := &s3.PutBucketMetricsConfigurationInput{ @@ -83,6 +84,9 @@ func resourceAwsS3BucketMetricPut(d *schema.ResourceData, meta interface{}) erro } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.PutBucketMetricsConfiguration(input) + } if err != nil { return fmt.Errorf("Error putting S3 metric configuration: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_notification.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_notification.go index 847762c599c..fd494a8f71e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_notification.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_notification.go @@ -6,8 +6,8 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -310,7 +310,8 @@ func resourceAwsS3BucketNotificationPut(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] S3 bucket: %s, Putting notification: %v", bucket, i) err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.PutBucketNotificationConfiguration(i); err != nil { + _, err := s3conn.PutBucketNotificationConfiguration(i) + if err != nil { if awserr, ok := err.(awserr.Error); ok { switch awserr.Message() { case "Unable to validate the following destination configurations": @@ -323,6 +324,9 @@ func resourceAwsS3BucketNotificationPut(d *schema.ResourceData, meta interface{} // Successful put configuration return nil }) + if isResourceTimeoutError(err) { + _, err = s3conn.PutBucketNotificationConfiguration(i) + } if err != nil { return fmt.Errorf("Error putting S3 notification configuration: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_object.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_object.go index bf90236e696..4043b7c6c56 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_object.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_object.go @@ -9,9 +9,10 @@ import ( "net/url" "os" "strings" + "time" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/mitchellh/go-homedir" "github.com/aws/aws-sdk-go/aws" @@ -31,15 +32,17 @@ func resourceAwsS3BucketObject() *schema.Resource { Schema: map[string]*schema.Schema{ "bucket": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, }, "key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, }, "acl": { @@ -77,6 +80,13 @@ func resourceAwsS3BucketObject() *schema.Resource { Optional: true, }, + "metadata": { + Type: schema.TypeMap, + ValidateFunc: validateMetadataIsLowerCase, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "content_type": { Type: schema.TypeString, Optional: true, @@ -139,7 +149,7 @@ func resourceAwsS3BucketObject() *schema.Resource { // See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html Optional: true, Computed: true, - ConflictsWith: []string{"kms_key_id", "server_side_encryption"}, + ConflictsWith: []string{"kms_key_id"}, }, "version_id": { @@ -153,6 +163,36 @@ func resourceAwsS3BucketObject() *schema.Resource { Type: schema.TypeString, Optional: true, }, + + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "object_lock_legal_hold_status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + s3.ObjectLockLegalHoldStatusOn, + s3.ObjectLockLegalHoldStatusOff, + }, false), + }, + + "object_lock_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + s3.ObjectLockModeGovernance, + s3.ObjectLockModeCompliance, + }, false), + }, + + "object_lock_retain_until_date": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.ValidateRFC3339TimeString, + }, }, } } @@ -192,8 +232,6 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("error decoding content_base64: %s", err) } body = bytes.NewReader(contentRaw) - } else { - return fmt.Errorf("Must specify \"source\", \"content\", or \"content_base64\" field") } bucket := d.Get("bucket").(string) @@ -218,6 +256,10 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro putInput.ContentType = aws.String(v.(string)) } + if v, ok := d.GetOk("metadata"); ok { + putInput.Metadata = stringMapToPointers(v.(map[string]interface{})) + } + if v, ok := d.GetOk("content_encoding"); ok { putInput.ContentEncoding = aws.String(v.(string)) } @@ -252,6 +294,18 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro putInput.WebsiteRedirectLocation = aws.String(v.(string)) } + if v, ok := d.GetOk("object_lock_legal_hold_status"); ok { + putInput.ObjectLockLegalHoldStatus = aws.String(v.(string)) + } + + if v, ok := d.GetOk("object_lock_mode"); ok { + putInput.ObjectLockMode = aws.String(v.(string)) + } + + if v, ok := d.GetOk("object_lock_retain_until_date"); ok { + putInput.ObjectLockRetainUntilDate = expandS3ObjectLockRetainUntilDate(v.(string)) + } + if _, err := s3conn.PutObject(putInput); err != nil { return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err) } @@ -292,9 +346,23 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err d.Set("content_encoding", resp.ContentEncoding) d.Set("content_language", resp.ContentLanguage) d.Set("content_type", resp.ContentType) + metadata := pointersMapToStringList(resp.Metadata) + + // AWS Go SDK capitalizes metadata, this is a workaround. https://github.com/aws/aws-sdk-go/issues/445 + for k, v := range metadata { + delete(metadata, k) + metadata[strings.ToLower(k)] = v + } + + if err := d.Set("metadata", metadata); err != nil { + return fmt.Errorf("error setting metadata: %s", err) + } d.Set("version_id", resp.VersionId) d.Set("server_side_encryption", resp.ServerSideEncryption) d.Set("website_redirect", resp.WebsiteRedirectLocation) + d.Set("object_lock_legal_hold_status", resp.ObjectLockLegalHoldStatus) + d.Set("object_lock_mode", resp.ObjectLockMode) + d.Set("object_lock_retain_until_date", flattenS3ObjectLockRetainUntilDate(resp.ObjectLockRetainUntilDate)) // Only set non-default KMS key ID (one that doesn't match default) if resp.SSEKMSKeyId != nil { @@ -333,17 +401,18 @@ func resourceAwsS3BucketObjectUpdate(d *schema.ResourceData, meta interface{}) e // Changes to any of these attributes requires creation of a new object version (if bucket is versioned): for _, key := range []string{ "cache_control", + "content_base64", "content_disposition", "content_encoding", "content_language", "content_type", - "source", "content", - "content_base64", - "storage_class", - "server_side_encryption", - "kms_key_id", "etag", + "kms_key_id", + "metadata", + "server_side_encryption", + "source", + "storage_class", "website_redirect", } { if d.HasChange(key) { @@ -364,6 +433,45 @@ func resourceAwsS3BucketObjectUpdate(d *schema.ResourceData, meta interface{}) e } } + if d.HasChange("object_lock_legal_hold_status") { + _, err := conn.PutObjectLegalHold(&s3.PutObjectLegalHoldInput{ + Bucket: aws.String(d.Get("bucket").(string)), + Key: aws.String(d.Get("key").(string)), + LegalHold: &s3.ObjectLockLegalHold{ + Status: aws.String(d.Get("object_lock_legal_hold_status").(string)), + }, + }) + if err != nil { + return fmt.Errorf("error putting S3 object lock legal hold: %s", err) + } + } + + if d.HasChange("object_lock_mode") || d.HasChange("object_lock_retain_until_date") { + req := &s3.PutObjectRetentionInput{ + Bucket: aws.String(d.Get("bucket").(string)), + Key: aws.String(d.Get("key").(string)), + Retention: &s3.ObjectLockRetention{ + Mode: aws.String(d.Get("object_lock_mode").(string)), + RetainUntilDate: expandS3ObjectLockRetainUntilDate(d.Get("object_lock_retain_until_date").(string)), + }, + } + + // Bypass required to lower or clear retain-until date. + if d.HasChange("object_lock_retain_until_date") { + oraw, nraw := d.GetChange("object_lock_retain_until_date") + o := expandS3ObjectLockRetainUntilDate(oraw.(string)) + n := expandS3ObjectLockRetainUntilDate(nraw.(string)) + if n == nil || (o != nil && n.Before(*o)) { + req.BypassGovernanceRetention = aws.Bool(true) + } + } + + _, err := conn.PutObjectRetention(req) + if err != nil { + return fmt.Errorf("error putting S3 object lock retention: %s", err) + } + } + if err := setTagsS3Object(conn, d); err != nil { return fmt.Errorf("error setting S3 object tags: %s", err) } @@ -379,48 +487,221 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e // We are effectively ignoring any leading '/' in the key name as aws.Config.DisableRestProtocolURICleaning is false key = strings.TrimPrefix(key, "/") + var err error if _, ok := d.GetOk("version_id"); ok { - // Bucket is versioned, we need to delete all versions - vInput := s3.ListObjectVersionsInput{ - Bucket: aws.String(bucket), - Prefix: aws.String(key), + err = deleteAllS3ObjectVersions(s3conn, bucket, key, d.Get("force_destroy").(bool), false) + } else { + err = deleteS3ObjectVersion(s3conn, bucket, key, "", false) + } + + if err != nil { + return fmt.Errorf("error deleting S3 Bucket (%s) Object (%s): %s", bucket, key, err) + } + + return nil +} + +func validateMetadataIsLowerCase(v interface{}, k string) (ws []string, errors []error) { + value := v.(map[string]interface{}) + + for k := range value { + if k != strings.ToLower(k) { + errors = append(errors, fmt.Errorf( + "Metadata must be lowercase only. Offending key: %q", k)) } - out, err := s3conn.ListObjectVersions(&vInput) - if err != nil { - return fmt.Errorf("Failed listing S3 object versions: %s", err) + } + return +} + +func resourceAwsS3BucketObjectCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error { + if d.HasChange("etag") { + d.SetNewComputed("version_id") + } + + return nil +} + +// deleteAllS3ObjectVersions deletes all versions of a specified key from an S3 bucket. +// If key is empty then all versions of all objects are deleted. +// Set force to true to override any S3 object lock protections on object lock enabled buckets. +func deleteAllS3ObjectVersions(conn *s3.S3, bucketName, key string, force, ignoreObjectErrors bool) error { + input := &s3.ListObjectVersionsInput{ + Bucket: aws.String(bucketName), + } + if key != "" { + input.Prefix = aws.String(key) + } + + var lastErr error + err := conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - for _, v := range out.Versions { - input := s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - VersionId: v.VersionId, + for _, objectVersion := range page.Versions { + objectKey := aws.StringValue(objectVersion.Key) + objectVersionID := aws.StringValue(objectVersion.VersionId) + + if key != "" && key != objectKey { + continue + } + + err := deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID, force) + if isAWSErr(err, "AccessDenied", "") && force { + // Remove any legal hold. + resp, err := conn.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucketName), + Key: objectVersion.Key, + VersionId: objectVersion.VersionId, + }) + + if err != nil { + log.Printf("[ERROR] Error getting S3 Bucket (%s) Object (%s) Version (%s) metadata: %s", bucketName, objectKey, objectVersionID, err) + lastErr = err + continue + } + + if aws.StringValue(resp.ObjectLockLegalHoldStatus) == s3.ObjectLockLegalHoldStatusOn { + _, err := conn.PutObjectLegalHold(&s3.PutObjectLegalHoldInput{ + Bucket: aws.String(bucketName), + Key: objectVersion.Key, + VersionId: objectVersion.VersionId, + LegalHold: &s3.ObjectLockLegalHold{ + Status: aws.String(s3.ObjectLockLegalHoldStatusOff), + }, + }) + + if err != nil { + log.Printf("[ERROR] Error putting S3 Bucket (%s) Object (%s) Version(%s) legal hold: %s", bucketName, objectKey, objectVersionID, err) + lastErr = err + continue + } + + // Attempt to delete again. + err = deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID, force) + + if err != nil { + lastErr = err + } + + continue + } + + // AccessDenied for another reason. + lastErr = fmt.Errorf("AccessDenied deleting S3 Bucket (%s) Object (%s) Version: %s", bucketName, objectKey, objectVersionID) + continue } - _, err := s3conn.DeleteObject(&input) + if err != nil { - return fmt.Errorf("Error deleting S3 object version of %s:\n %s:\n %s", - key, v, err) + lastErr = err } } - } else { - // Just delete the object - input := s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), + + return !lastPage + }) + + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + err = nil + } + + if err != nil { + return err + } + + if lastErr != nil { + if !ignoreObjectErrors { + return fmt.Errorf("error deleting at least one object version, last error: %s", lastErr) } - _, err := s3conn.DeleteObject(&input) - if err != nil { - return fmt.Errorf("Error deleting S3 bucket object: %s Bucket: %q Object: %q", err, bucket, key) + + lastErr = nil + } + + err = conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage } + + for _, deleteMarker := range page.DeleteMarkers { + deleteMarkerKey := aws.StringValue(deleteMarker.Key) + deleteMarkerVersionID := aws.StringValue(deleteMarker.VersionId) + + if key != "" && key != deleteMarkerKey { + continue + } + + // Delete markers have no object lock protections. + err := deleteS3ObjectVersion(conn, bucketName, deleteMarkerKey, deleteMarkerVersionID, false) + + if err != nil { + lastErr = err + } + } + + return !lastPage + }) + + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + err = nil + } + + if err != nil { + return err + } + + if lastErr != nil { + if !ignoreObjectErrors { + return fmt.Errorf("error deleting at least one object delete marker, last error: %s", lastErr) + } + + lastErr = nil } return nil } -func resourceAwsS3BucketObjectCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error { - if d.HasChange("etag") { - d.SetNewComputed("version_id") +// deleteS3ObjectVersion deletes a specific bucket object version. +// Set force to true to override any S3 object lock protections. +func deleteS3ObjectVersion(conn *s3.S3, b, k, v string, force bool) error { + input := &s3.DeleteObjectInput{ + Bucket: aws.String(b), + Key: aws.String(k), } - return nil + if v != "" { + input.VersionId = aws.String(v) + } + + if force { + input.BypassGovernanceRetention = aws.Bool(true) + } + + log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version: %s", b, k, v) + _, err := conn.DeleteObject(input) + + if err != nil { + log.Printf("[WARN] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", b, k, v, err) + } + + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, s3.ErrCodeNoSuchKey, "") { + return nil + } + + return err +} + +func expandS3ObjectLockRetainUntilDate(v string) *time.Time { + t, err := time.Parse(time.RFC3339, v) + if err != nil { + return nil + } + + return aws.Time(t) +} + +func flattenS3ObjectLockRetainUntilDate(t *time.Time) string { + if t == nil { + return "" + } + + return t.Format(time.RFC3339) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_policy.go index ea41b6b7bce..bc651406f3d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_policy.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsS3BucketPolicy() *schema.Resource { @@ -54,17 +54,18 @@ func resourceAwsS3BucketPolicyPut(d *schema.ResourceData, meta interface{}) erro } err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.PutBucketPolicy(params); err != nil { - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == "MalformedPolicy" { - return resource.RetryableError(awserr) - } - } + _, err := s3conn.PutBucketPolicy(params) + if isAWSErr(err, "MalformedPolicy", "") { + return resource.RetryableError(err) + } + if err != nil { return resource.NonRetryableError(err) } return nil }) - + if isResourceTimeoutError(err) { + _, err = s3conn.PutBucketPolicy(params) + } if err != nil { return fmt.Errorf("Error putting S3 policy: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_public_access_block.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_public_access_block.go index a103cc0b265..4a5e33842f8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_public_access_block.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_public_access_block.go @@ -6,11 +6,11 @@ import ( "time" "github.com/aws/aws-sdk-go/service/s3control" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsS3BucketPublicAccessBlock() *schema.Resource { @@ -85,6 +85,9 @@ func resourceAwsS3BucketPublicAccessBlockCreate(d *schema.ResourceData, meta int return nil }) + if isResourceTimeoutError(err) { + _, err = s3conn.PutPublicAccessBlock(input) + } if err != nil { return fmt.Errorf("error creating public access block policy for S3 bucket (%s): %s", bucket, err) } @@ -117,12 +120,19 @@ func resourceAwsS3BucketPublicAccessBlockRead(d *schema.ResourceData, meta inter return nil }) - + if isResourceTimeoutError(err) { + output, err = s3conn.GetPublicAccessBlock(input) + } if isAWSErr(err, s3control.ErrCodeNoSuchPublicAccessBlockConfiguration, "") { log.Printf("[WARN] S3 Bucket Public Access Block (%s) not found, removing from state", d.Id()) d.SetId("") return nil } + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { return fmt.Errorf("error reading S3 bucket Public Access Block: %s", err) @@ -156,6 +166,16 @@ func resourceAwsS3BucketPublicAccessBlockUpdate(d *schema.ResourceData, meta int log.Printf("[DEBUG] Updating S3 bucket Public Access Block: %s", input) _, err := s3conn.PutPublicAccessBlock(input) + if isAWSErr(err, s3control.ErrCodeNoSuchPublicAccessBlockConfiguration, "") { + log.Printf("[WARN] S3 Bucket Public Access Block (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { return fmt.Errorf("error updating S3 Bucket Public Access Block (%s): %s", d.Id(), err) } @@ -186,6 +206,9 @@ func resourceAwsS3BucketPublicAccessBlockDelete(d *schema.ResourceData, meta int if isAWSErr(err, s3control.ErrCodeNoSuchPublicAccessBlockConfiguration, "") { return nil } + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + return nil + } if err != nil { return fmt.Errorf("error deleting S3 Bucket Public Access Block (%s): %s", d.Id(), err) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_endpoint.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_endpoint.go index b3bf4eddd95..b48c215a47b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_endpoint.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_endpoint.go @@ -6,8 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSagemakerEndpoint() *schema.Resource { @@ -61,7 +62,7 @@ func resourceAwsSagemakerEndpointCreate(d *schema.ResourceData, meta interface{} } if v, ok := d.GetOk("tags"); ok { - createOpts.Tags = tagsFromMapSagemaker(v.(map[string]interface{})) + createOpts.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() } log.Printf("[DEBUG] SageMaker Endpoint create config: %#v", *createOpts) @@ -116,15 +117,13 @@ func resourceAwsSagemakerEndpointRead(d *schema.ResourceData, meta interface{}) return err } - tagsOutput, err := conn.ListTags(&sagemaker.ListTagsInput{ - ResourceArn: endpoint.EndpointArn, - }) + tags, err := keyvaluetags.SagemakerListTags(conn, aws.StringValue(endpoint.EndpointArn)) if err != nil { - return fmt.Errorf("error listing tags for SageMaker Endpoint (%s): %s", d.Id(), err) + return fmt.Errorf("error listing tags for Sagemaker Endpoint (%s): %s", d.Id(), err) } - if err := d.Set("tags", tagsToMapSagemaker(tagsOutput.Tags)); err != nil { - return err + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } return nil @@ -135,8 +134,12 @@ func resourceAwsSagemakerEndpointUpdate(d *schema.ResourceData, meta interface{} d.Partial(true) - if err := setSagemakerTags(conn, d); err != nil { - return err + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Sagemaker Endpoint (%s) tags: %s", d.Id(), err) + } } d.SetPartial("tags") diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_endpoint_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_endpoint_configuration.go index ac5f4f9226b..45ae503dd21 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_endpoint_configuration.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_endpoint_configuration.go @@ -4,12 +4,13 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSagemakerEndpointConfiguration() *schema.Resource { @@ -116,7 +117,7 @@ func resourceAwsSagemakerEndpointConfigurationCreate(d *schema.ResourceData, met } if v, ok := d.GetOk("tags"); ok { - createOpts.Tags = tagsFromMapSagemaker(v.(map[string]interface{})) + createOpts.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() } log.Printf("[DEBUG] SageMaker Endpoint Configuration create config: %#v", *createOpts) @@ -159,23 +160,27 @@ func resourceAwsSagemakerEndpointConfigurationRead(d *schema.ResourceData, meta return err } - tagsOutput, err := conn.ListTags(&sagemaker.ListTagsInput{ - ResourceArn: endpointConfig.EndpointConfigArn, - }) + tags, err := keyvaluetags.SagemakerListTags(conn, aws.StringValue(endpointConfig.EndpointConfigArn)) if err != nil { - return fmt.Errorf("error listing tags of SageMaker Endpoint Configuration %s: %s", d.Id(), err) + return fmt.Errorf("error listing tags for Sagemaker Endpoint Configuration (%s): %s", d.Id(), err) } - if err := d.Set("tags", tagsToMapSagemaker(tagsOutput.Tags)); err != nil { - return err + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } + return nil } func resourceAwsSagemakerEndpointConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sagemakerconn - if err := setSagemakerTags(conn, d); err != nil { - return err + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Sagemaker Endpoint Configuration (%s) tags: %s", d.Id(), err) + } } return resourceAwsSagemakerEndpointConfigurationRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_model.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_model.go index 8da56ad3f04..ba977fa3aa2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_model.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_model.go @@ -8,8 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/sagemaker" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSagemakerModel() *schema.Resource { @@ -164,30 +165,27 @@ func resourceAwsSagemakerModelCreate(d *schema.ResourceData, meta interface{}) e } if v, ok := d.GetOk("primary_container"); ok { - m := v.([]interface{})[0].(map[string]interface{}) - createOpts.PrimaryContainer = expandContainer(m) + createOpts.PrimaryContainer = expandContainer(v.([]interface{})[0].(map[string]interface{})) } if v, ok := d.GetOk("container"); ok { - containers := expandContainers(v.([]interface{})) - createOpts.SetContainers(containers) + createOpts.Containers = expandContainers(v.([]interface{})) } if v, ok := d.GetOk("execution_role_arn"); ok { - createOpts.SetExecutionRoleArn(v.(string)) + createOpts.ExecutionRoleArn = aws.String(v.(string)) } if v, ok := d.GetOk("tags"); ok { - createOpts.SetTags(tagsFromMapSagemaker(v.(map[string]interface{}))) + createOpts.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() } if v, ok := d.GetOk("vpc_config"); ok { - vpcConfig := expandSageMakerVpcConfigRequest(v.([]interface{})) - createOpts.SetVpcConfig(vpcConfig) + createOpts.VpcConfig = expandSageMakerVpcConfigRequest(v.([]interface{})) } if v, ok := d.GetOk("enable_network_isolation"); ok { - createOpts.SetEnableNetworkIsolation(v.(bool)) + createOpts.EnableNetworkIsolation = aws.Bool(v.(bool)) } log.Printf("[DEBUG] Sagemaker model create config: %#v", *createOpts) @@ -255,16 +253,15 @@ func resourceAwsSagemakerModelRead(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("error setting vpc_config: %s", err) } - tagsOutput, err := conn.ListTags(&sagemaker.ListTagsInput{ - ResourceArn: model.ModelArn, - }) + tags, err := keyvaluetags.SagemakerListTags(conn, aws.StringValue(model.ModelArn)) if err != nil { - return fmt.Errorf("error listing tags of Sagemaker model %s: %s", d.Id(), err) + return fmt.Errorf("error listing tags for Sagemaker Model (%s): %s", d.Id(), err) } - if err := d.Set("tags", tagsToMapSagemaker(tagsOutput.Tags)); err != nil { - return err + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } + return nil } @@ -286,10 +283,12 @@ func resourceAwsSagemakerModelUpdate(d *schema.ResourceData, meta interface{}) e d.Partial(true) - if err := setSagemakerTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Sagemaker Model (%s) tags: %s", d.Id(), err) + } } d.Partial(false) @@ -305,23 +304,24 @@ func resourceAwsSagemakerModelDelete(d *schema.ResourceData, meta interface{}) e } log.Printf("[INFO] Deleting Sagemaker model: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.DeleteModel(deleteOpts) if err == nil { return nil } - sagemakerErr, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - - if sagemakerErr.Code() == "ResourceNotFound" { + if isAWSErr(err, "ResourceNotFound", "") { return resource.RetryableError(err) } - - return resource.NonRetryableError(fmt.Errorf("error deleting Sagemaker model: %s", err)) + return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteModel(deleteOpts) + } + if err != nil { + return fmt.Errorf("Error deleting sagemaker model: %s", err) + } + return nil } func expandContainer(m map[string]interface{}) *sagemaker.ContainerDefinition { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_notebook_instance.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_notebook_instance.go index 07533b02507..8f6e46fb88e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_notebook_instance.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_notebook_instance.go @@ -7,8 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSagemakerNotebookInstance() *schema.Resource { @@ -102,8 +103,7 @@ func resourceAwsSagemakerNotebookInstanceCreate(d *schema.ResourceData, meta int } if v, ok := d.GetOk("tags"); ok { - tagsIn := v.(map[string]interface{}) - createOpts.Tags = tagsFromMapSagemaker(tagsIn) + createOpts.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() } log.Printf("[DEBUG] sagemaker notebook instance create config: %#v", *createOpts) @@ -177,16 +177,16 @@ func resourceAwsSagemakerNotebookInstanceRead(d *schema.ResourceData, meta inter if err := d.Set("arn", notebookInstance.NotebookInstanceArn); err != nil { return fmt.Errorf("error setting arn for sagemaker notebook instance (%s): %s", d.Id(), err) } - tagsOutput, err := conn.ListTags(&sagemaker.ListTagsInput{ - ResourceArn: notebookInstance.NotebookInstanceArn, - }) + + tags, err := keyvaluetags.SagemakerListTags(conn, aws.StringValue(notebookInstance.NotebookInstanceArn)) if err != nil { - return fmt.Errorf("error listing tags for sagemaker notebook instance (%s): %s", d.Id(), err) + return fmt.Errorf("error listing tags for Sagemaker Notebook Instance (%s): %s", d.Id(), err) } - if err := d.Set("tags", tagsToMapSagemaker(tagsOutput.Tags)); err != nil { - return fmt.Errorf("error setting tags for notebook instance (%s): %s", d.Id(), err) + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } + return nil } @@ -195,8 +195,12 @@ func resourceAwsSagemakerNotebookInstanceUpdate(d *schema.ResourceData, meta int d.Partial(true) - if err := setSagemakerTags(conn, d); err != nil { - return err + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Sagemaker Notebook Instance (%s) tags: %s", d.Id(), err) + } } d.SetPartial("tags") @@ -248,33 +252,42 @@ func resourceAwsSagemakerNotebookInstanceUpdate(d *schema.ResourceData, meta int startOpts := &sagemaker.StartNotebookInstanceInput{ NotebookInstanceName: aws.String(d.Id()), } - + stateConf := &resource.StateChangeConf{ + Pending: []string{ + sagemaker.NotebookInstanceStatusStopped, + }, + Target: []string{sagemaker.NotebookInstanceStatusInService, sagemaker.NotebookInstanceStatusPending}, + Refresh: sagemakerNotebookInstanceStateRefreshFunc(conn, d.Id()), + Timeout: 30 * time.Second, + } // StartNotebookInstance sometimes doesn't take so we'll check for a state change and if // it doesn't change we'll send another request err := resource.Retry(5*time.Minute, func() *resource.RetryError { - if _, err := conn.StartNotebookInstance(startOpts); err != nil { + _, err := conn.StartNotebookInstance(startOpts) + if err != nil { return resource.NonRetryableError(fmt.Errorf("error starting sagemaker notebook instance (%s): %s", d.Id(), err)) } - stateConf := &resource.StateChangeConf{ - Pending: []string{ - sagemaker.NotebookInstanceStatusStopped, - }, - Target: []string{sagemaker.NotebookInstanceStatusInService, sagemaker.NotebookInstanceStatusPending}, - Refresh: sagemakerNotebookInstanceStateRefreshFunc(conn, d.Id()), - Timeout: 30 * time.Second, - } - _, err := stateConf.WaitForState() + + _, err = stateConf.WaitForState() if err != nil { return resource.RetryableError(fmt.Errorf("error waiting for sagemaker notebook instance (%s) to start: %s", d.Id(), err)) } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.StartNotebookInstance(startOpts) + if err != nil { + return fmt.Errorf("error starting sagemaker notebook instance (%s): %s", d.Id(), err) + } + + _, err = stateConf.WaitForState() + } if err != nil { - return err + return fmt.Errorf("Error waiting for sagemaker notebook instance to start: %s", err) } - stateConf := &resource.StateChangeConf{ + stateConf = &resource.StateChangeConf{ Pending: []string{ sagemaker.NotebookInstanceStatusUpdating, sagemaker.NotebookInstanceStatusPending, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_notebook_instance_lifecycle_configuration.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_notebook_instance_lifecycle_configuration.go index 27a8990c131..1694cbbd5e1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_notebook_instance_lifecycle_configuration.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sagemaker_notebook_instance_lifecycle_configuration.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_secretsmanager_secret.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_secretsmanager_secret.go index 4a6d9ca8c37..d6ce9b179cd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_secretsmanager_secret.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_secretsmanager_secret.go @@ -7,10 +7,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/secretsmanager" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSecretsManagerSecret() *schema.Resource { @@ -117,6 +117,11 @@ func resourceAwsSecretsManagerSecretCreate(d *schema.ResourceData, meta interfac Name: aws.String(secretName), } + if v, ok := d.GetOk("tags"); ok { + input.Tags = tagsFromMapSecretsManager(v.(map[string]interface{})) + log.Printf("[DEBUG] Tagging Secrets Manager Secret: %s", input.Tags) + } + if v, ok := d.GetOk("kms_key_id"); ok && v.(string) != "" { input.KmsKeyId = aws.String(v.(string)) } @@ -139,6 +144,9 @@ func resourceAwsSecretsManagerSecretCreate(d *schema.ResourceData, meta interfac } return nil }) + if isResourceTimeoutError(err) { + output, err = conn.CreateSecret(input) + } if err != nil { return fmt.Errorf("error creating Secrets Manager Secret: %s", err) } @@ -177,21 +185,11 @@ func resourceAwsSecretsManagerSecretCreate(d *schema.ResourceData, meta interfac } return nil }) - if err != nil { - return fmt.Errorf("error enabling Secrets Manager Secret %q rotation: %s", d.Id(), err) + if isResourceTimeoutError(err) { + _, err = conn.RotateSecret(input) } - } - - if v, ok := d.GetOk("tags"); ok { - input := &secretsmanager.TagResourceInput{ - SecretId: aws.String(d.Id()), - Tags: tagsFromMapSecretsManager(v.(map[string]interface{})), - } - - log.Printf("[DEBUG] Tagging Secrets Manager Secret: %s", input) - _, err := conn.TagResource(input) if err != nil { - return fmt.Errorf("error tagging Secrets Manager Secret %q: %s", d.Id(), input) + return fmt.Errorf("error enabling Secrets Manager Secret %q rotation: %s", d.Id(), err) } } @@ -326,6 +324,9 @@ func resourceAwsSecretsManagerSecretUpdate(d *schema.ResourceData, meta interfac } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.RotateSecret(input) + } if err != nil { return fmt.Errorf("error updating Secrets Manager Secret %q rotation: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_secretsmanager_secret_version.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_secretsmanager_secret_version.go index e90384f9066..882c0b05c7a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_secretsmanager_secret_version.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_secretsmanager_secret_version.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/secretsmanager" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSecretsManagerSecretVersion() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group.go index 887786bc835..d3519d65161 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group.go @@ -13,10 +13,10 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSecurityGroup() *schema.Resource { @@ -448,8 +448,8 @@ func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Security Group destroy: %v", d.Id()) - if err := deleteLingeringLambdaENIs(conn, d, "group-id"); err != nil { - return fmt.Errorf("Failed to delete Lambda ENIs: %s", err) + if err := deleteLingeringLambdaENIs(conn, "group-id", d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error deleting Lambda ENIs using Security Group (%s): %s", d.Id(), err) } // conditionally revoke rules first before attempting to delete the group @@ -458,31 +458,33 @@ func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) er return err } } - - return resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { - _, err := conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{ - GroupId: aws.String(d.Id()), - }) + input := &ec2.DeleteSecurityGroupInput{ + GroupId: aws.String(d.Id()), + } + err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + _, err := conn.DeleteSecurityGroup(input) if err != nil { - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) - } - - switch ec2err.Code() { - case "InvalidGroup.NotFound": + if isAWSErr(err, "InvalidGroup.NotFound", "") { return nil - case "DependencyViolation": + } + if isAWSErr(err, "DependencyViolation", "") { // If it is a dependency violation, we want to retry return resource.RetryableError(err) - default: - // Any other error, we want to quit the retry loop immediately - return resource.NonRetryableError(err) } + return resource.NonRetryableError(err) } - return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteSecurityGroup(input) + if isAWSErr(err, "InvalidGroup.NotFound", "") { + return nil + } + } + if err != nil { + return fmt.Errorf("Error deleting security group: %s", err) + } + return nil } // Revoke all ingress/egress rules that a Security Group has @@ -1400,98 +1402,72 @@ func sgProtocolIntegers() map[string]int { // The AWS Lambda service creates ENIs behind the scenes and keeps these around for a while // which would prevent SGs attached to such ENIs from being destroyed -func deleteLingeringLambdaENIs(conn *ec2.EC2, d *schema.ResourceData, filterName string) error { - // Here we carefully find the offenders - params := &ec2.DescribeNetworkInterfacesInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String(filterName), - Values: []*string{aws.String(d.Id())}, - }, - { - Name: aws.String("description"), - Values: []*string{aws.String("AWS Lambda VPC ENI: *")}, - }, - }, +func deleteLingeringLambdaENIs(conn *ec2.EC2, filterName, resourceId string, timeout time.Duration) error { + // AWS Lambda service team confirms P99 deletion time of ~35 minutes. Buffer for safety. + if minimumTimeout := 45 * time.Minute; timeout < minimumTimeout { + timeout = minimumTimeout } - networkInterfaceResp, err := conn.DescribeNetworkInterfaces(params) - if isAWSErr(err, "InvalidNetworkInterfaceID.NotFound", "") { - return nil - } + resp, err := conn.DescribeNetworkInterfaces(&ec2.DescribeNetworkInterfacesInput{ + Filters: buildEC2AttributeFilterList(map[string]string{ + filterName: resourceId, + "description": "AWS Lambda VPC ENI*", + }), + }) if err != nil { - return err + return fmt.Errorf("error describing ENIs: %s", err) } - // Then we detach and finally delete those - v := networkInterfaceResp.NetworkInterfaces - for _, eni := range v { - if eni.Attachment != nil { - detachNetworkInterfaceParams := &ec2.DetachNetworkInterfaceInput{ - AttachmentId: eni.Attachment.AttachmentId, - } - _, detachNetworkInterfaceErr := conn.DetachNetworkInterface(detachNetworkInterfaceParams) + for _, eni := range resp.NetworkInterfaces { + eniId := aws.StringValue(eni.NetworkInterfaceId) - if isAWSErr(detachNetworkInterfaceErr, "InvalidNetworkInterfaceID.NotFound", "") { - return nil + if eni.Attachment != nil && aws.StringValue(eni.Attachment.InstanceOwnerId) == "amazon-aws" { + // Hyperplane attached ENI. + // Wait for it to be moved into a removable state. + stateConf := &resource.StateChangeConf{ + Pending: []string{ + ec2.NetworkInterfaceStatusInUse, + }, + Target: []string{ + ec2.NetworkInterfaceStatusAvailable, + }, + Refresh: networkInterfaceStateRefresh(conn, eniId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + // Handle EC2 ENI eventual consistency. It can take up to 3 minutes. + ContinuousTargetOccurence: 18, + NotFoundChecks: 1, } - if detachNetworkInterfaceErr != nil { - return detachNetworkInterfaceErr - } + eniRaw, err := stateConf.WaitForState() - log.Printf("[DEBUG] Waiting for ENI (%s) to become detached", *eni.NetworkInterfaceId) - stateConf := &resource.StateChangeConf{ - Pending: []string{"true"}, - Target: []string{"false"}, - Refresh: networkInterfaceAttachedRefreshFunc(conn, *eni.NetworkInterfaceId), - Timeout: d.Timeout(schema.TimeoutDelete), - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for ENI (%s) to become detached: %s", *eni.NetworkInterfaceId, err) + if isResourceNotFoundError(err) { + continue } - } - - deleteNetworkInterfaceParams := &ec2.DeleteNetworkInterfaceInput{ - NetworkInterfaceId: eni.NetworkInterfaceId, - } - _, deleteNetworkInterfaceErr := conn.DeleteNetworkInterface(deleteNetworkInterfaceParams) - if isAWSErr(deleteNetworkInterfaceErr, "InvalidNetworkInterfaceID.NotFound", "") { - return nil - } + if err != nil { + return fmt.Errorf("error waiting for Lambda V2N ENI (%s) to become available for detachment: %s", eniId, err) + } - if deleteNetworkInterfaceErr != nil { - return deleteNetworkInterfaceErr + eni = eniRaw.(*ec2.NetworkInterface) } - } - - return nil -} -func networkInterfaceAttachedRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { + err = detachNetworkInterface(conn, eni, timeout) - describe_network_interfaces_request := &ec2.DescribeNetworkInterfacesInput{ - NetworkInterfaceIds: []*string{aws.String(id)}, + if err != nil { + return fmt.Errorf("error detaching Lambda ENI (%s): %s", eniId, err) } - describeResp, err := conn.DescribeNetworkInterfaces(describe_network_interfaces_request) - if isAWSErr(err, "InvalidNetworkInterfaceID.NotFound", "") { - return 42, "false", nil - } + err = deleteNetworkInterface(conn, eniId) if err != nil { - return nil, "", err + return fmt.Errorf("error deleting Lambda ENI (%s): %s", eniId, err) } - - eni := describeResp.NetworkInterfaces[0] - hasAttachment := strconv.FormatBool(eni.Attachment != nil) - log.Printf("[DEBUG] ENI %s has attachment state %s", id, hasAttachment) - return eni, hasAttachment, nil } + + return nil } func initSecurityGroupRule(ruleMap map[string]map[string]interface{}, perm *ec2.IpPermission, desc string) map[string]interface{} { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_migrate.go index 88357447f3c..57c33bc23be 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsSecurityGroupMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule.go index 025f364f615..b9d4cdf73e6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule.go @@ -12,10 +12,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSecurityGroupRule() *schema.Resource { @@ -221,10 +221,11 @@ information and instructions for recovery. Error message: %s`, sg_id, awsErr.Mes ruleType, autherr) } + var rules []*ec2.IpPermission id := ipPermissionIDHash(sg_id, ruleType, perm) log.Printf("[DEBUG] Computed group rule ID %s", id) - retErr := resource.Retry(5*time.Minute, func() *resource.RetryError { + err = resource.Retry(5*time.Minute, func() *resource.RetryError { sg, err := findResourceSecurityGroup(conn, sg_id) if err != nil { @@ -232,7 +233,6 @@ information and instructions for recovery. Error message: %s`, sg_id, awsErr.Mes return resource.NonRetryableError(err) } - var rules []*ec2.IpPermission switch ruleType { case "ingress": rules = sg.IpPermissions @@ -241,7 +241,6 @@ information and instructions for recovery. Error message: %s`, sg_id, awsErr.Mes } rule := findRuleMatch(perm, rules, isVPC) - if rule == nil { log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", ruleType, id, sg_id) @@ -251,10 +250,26 @@ information and instructions for recovery. Error message: %s`, sg_id, awsErr.Mes log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", id, rule) return nil }) + if isResourceTimeoutError(err) { + sg, err := findResourceSecurityGroup(conn, sg_id) + if err != nil { + return fmt.Errorf("Error finding security group: %s", err) + } - if retErr != nil { - return fmt.Errorf("Error finding matching %s Security Group Rule (%s) for Group %s", - ruleType, id, sg_id) + switch ruleType { + case "ingress": + rules = sg.IpPermissions + default: + rules = sg.IpPermissionsEgress + } + + rule := findRuleMatch(perm, rules, isVPC) + if rule == nil { + return fmt.Errorf("Error finding matching security group rule: %s", err) + } + } + if err != nil { + return fmt.Errorf("Error finding matching %s Security Group Rule (%s) for Group %s", ruleType, id, sg_id) } d.SetId(id) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule_migrate.go index 1b2cb21f8d6..81c60453fac 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule_migrate.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsSecurityGroupRuleMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_account.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_account.go index d0d8d1caf3f..edb1aa9771a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_account.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_account.go @@ -5,7 +5,7 @@ import ( "log" "github.com/aws/aws-sdk-go/service/securityhub" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSecurityHubAccount() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_product_subscription.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_product_subscription.go index dece3fc8b86..358aecb9b61 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_product_subscription.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_product_subscription.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/securityhub" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSecurityHubProductSubscription() *schema.Resource { @@ -73,6 +73,7 @@ func resourceAwsSecurityHubProductSubscriptionRead(d *schema.ResourceData, meta if !exists { log.Printf("[WARN] Security Hub product subscriptions (%s) not found, removing from state", d.Id()) d.SetId("") + return nil } d.Set("product_arn", productArn) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_standards_subscription.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_standards_subscription.go index b40bf660a3d..5444842a410 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_standards_subscription.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_securityhub_standards_subscription.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/securityhub" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSecurityHubStandardsSubscription() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_http_namespace.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_http_namespace.go index b6653809b58..c83dcdb0398 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_http_namespace.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_http_namespace.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/servicediscovery" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsServiceDiscoveryHttpNamespace() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_private_dns_namespace.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_private_dns_namespace.go index 24e899ec4b2..52da5e4907b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_private_dns_namespace.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_private_dns_namespace.go @@ -5,8 +5,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/servicediscovery" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsServiceDiscoveryPrivateDnsNamespace() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_public_dns_namespace.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_public_dns_namespace.go index 6020b071afc..7aa649ed036 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_public_dns_namespace.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_public_dns_namespace.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/servicediscovery" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsServiceDiscoveryPublicDnsNamespace() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_service.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_service.go index ea8bc23ba19..70812c95a09 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_service_discovery_service.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/servicediscovery" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsServiceDiscoveryService() *schema.Resource { @@ -32,9 +32,15 @@ func resourceAwsServiceDiscoveryService() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "namespace_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, "dns_config": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -133,14 +139,22 @@ func resourceAwsServiceDiscoveryServiceCreate(d *schema.ResourceData, meta inter conn := meta.(*AWSClient).sdconn input := &servicediscovery.CreateServiceInput{ - Name: aws.String(d.Get("name").(string)), - DnsConfig: expandServiceDiscoveryDnsConfig(d.Get("dns_config").([]interface{})[0].(map[string]interface{})), + Name: aws.String(d.Get("name").(string)), + } + + dnsConfig := d.Get("dns_config").([]interface{}) + if len(dnsConfig) > 0 { + input.DnsConfig = expandServiceDiscoveryDnsConfig(dnsConfig[0].(map[string]interface{})) } if v, ok := d.GetOk("description"); ok { input.Description = aws.String(v.(string)) } + if v, ok := d.GetOk("namespace_id"); ok { + input.NamespaceId = aws.String(v.(string)) + } + hcconfig := d.Get("health_check_config").([]interface{}) if len(hcconfig) > 0 { input.HealthCheckConfig = expandServiceDiscoveryHealthCheckConfig(hcconfig[0].(map[string]interface{})) @@ -182,6 +196,7 @@ func resourceAwsServiceDiscoveryServiceRead(d *schema.ResourceData, meta interfa d.Set("arn", service.Arn) d.Set("name", service.Name) d.Set("description", service.Description) + d.Set("namespace_id", service.NamespaceId) d.Set("dns_config", flattenServiceDiscoveryDnsConfig(service.DnsConfig)) d.Set("health_check_config", flattenServiceDiscoveryHealthCheckConfig(service.HealthCheckConfig)) d.Set("health_check_custom_config", flattenServiceDiscoveryHealthCheckCustomConfig(service.HealthCheckCustomConfig)) @@ -202,6 +217,7 @@ func resourceAwsServiceDiscoveryServiceUpdate(d *schema.ResourceData, meta inter if d.HasChange("description") { sc.Description = aws.String(d.Get("description").(string)) } + if d.HasChange("health_check_config") { hcconfig := d.Get("health_check_config").([]interface{}) sc.HealthCheckConfig = expandServiceDiscoveryHealthCheckConfig(hcconfig[0].(map[string]interface{})) @@ -265,18 +281,32 @@ func expandServiceDiscoveryDnsConfig(configured map[string]interface{}) *service } func flattenServiceDiscoveryDnsConfig(config *servicediscovery.DnsConfig) []map[string]interface{} { + if config == nil { + return nil + } + result := map[string]interface{}{} - result["namespace_id"] = *config.NamespaceId - result["routing_policy"] = *config.RoutingPolicy - drs := make([]map[string]interface{}, 0) - for _, v := range config.DnsRecords { - dr := map[string]interface{}{} - dr["ttl"] = *v.TTL - dr["type"] = *v.Type - drs = append(drs, dr) + if config.NamespaceId != nil { + result["namespace_id"] = *config.NamespaceId + } + if config.RoutingPolicy != nil { + result["routing_policy"] = *config.RoutingPolicy + } + if config.DnsRecords != nil { + drs := make([]map[string]interface{}, 0) + for _, v := range config.DnsRecords { + dr := map[string]interface{}{} + dr["ttl"] = *v.TTL + dr["type"] = *v.Type + drs = append(drs, dr) + } + result["dns_records"] = drs + } + + if len(result) < 1 { + return nil } - result["dns_records"] = drs return []map[string]interface{}{result} } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_servicecatalog_portfolio.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_servicecatalog_portfolio.go index 1fe71297251..102618d23a3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_servicecatalog_portfolio.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_servicecatalog_portfolio.go @@ -8,8 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/servicecatalog" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsServiceCatalogPortfolio() *schema.Resource { @@ -61,12 +62,10 @@ func resourceAwsServiceCatalogPortfolio() *schema.Resource { func resourceAwsServiceCatalogPortfolioCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).scconn input := servicecatalog.CreatePortfolioInput{ - AcceptLanguage: aws.String("en"), + AcceptLanguage: aws.String("en"), + DisplayName: aws.String(d.Get("name").(string)), + IdempotencyToken: aws.String(resource.UniqueId()), } - name := d.Get("name").(string) - input.DisplayName = &name - now := time.Now() - input.IdempotencyToken = aws.String(fmt.Sprintf("%d", now.UnixNano())) if v, ok := d.GetOk("description"); ok { input.Description = aws.String(v.(string)) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_servicequotas_service_quota.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_servicequotas_service_quota.go new file mode 100644 index 00000000000..a83ddcffd72 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_servicequotas_service_quota.go @@ -0,0 +1,240 @@ +package aws + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/servicequotas" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsServiceQuotasServiceQuota() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsServiceQuotasServiceQuotaCreate, + Read: resourceAwsServiceQuotasServiceQuotaRead, + Update: resourceAwsServiceQuotasServiceQuotaUpdate, + Delete: schema.Noop, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "adjustable": { + Type: schema.TypeBool, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "default_value": { + Type: schema.TypeFloat, + Computed: true, + }, + "quota_code": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "quota_name": { + Type: schema.TypeString, + Computed: true, + }, + "request_id": { + Type: schema.TypeString, + Computed: true, + }, + "request_status": { + Type: schema.TypeString, + Computed: true, + }, + "service_code": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "service_name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeFloat, + Required: true, + }, + }, + } +} + +func resourceAwsServiceQuotasServiceQuotaCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).servicequotasconn + + quotaCode := d.Get("quota_code").(string) + serviceCode := d.Get("service_code").(string) + value := d.Get("value").(float64) + + d.SetId(fmt.Sprintf("%s/%s", serviceCode, quotaCode)) + + input := &servicequotas.GetServiceQuotaInput{ + QuotaCode: aws.String(quotaCode), + ServiceCode: aws.String(serviceCode), + } + + output, err := conn.GetServiceQuota(input) + + if err != nil { + return fmt.Errorf("error getting Service Quotas Service Quota (%s): %s", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error getting Service Quotas Service Quota (%s): empty result", d.Id()) + } + + if value > aws.Float64Value(output.Quota.Value) { + input := &servicequotas.RequestServiceQuotaIncreaseInput{ + DesiredValue: aws.Float64(value), + QuotaCode: aws.String(quotaCode), + ServiceCode: aws.String(serviceCode), + } + + output, err := conn.RequestServiceQuotaIncrease(input) + + if err != nil { + return fmt.Errorf("error requesting Service Quota (%s) increase: %s", d.Id(), err) + } + + if output == nil || output.RequestedQuota == nil { + return fmt.Errorf("error requesting Service Quota (%s) increase: empty result", d.Id()) + } + + d.Set("request_id", output.RequestedQuota.Id) + } + + return resourceAwsServiceQuotasServiceQuotaRead(d, meta) +} + +func resourceAwsServiceQuotasServiceQuotaRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).servicequotasconn + + serviceCode, quotaCode, err := resourceAwsServiceQuotasServiceQuotaParseID(d.Id()) + + if err != nil { + return err + } + + input := &servicequotas.GetServiceQuotaInput{ + QuotaCode: aws.String(quotaCode), + ServiceCode: aws.String(serviceCode), + } + + output, err := conn.GetServiceQuota(input) + + if err != nil { + return fmt.Errorf("error getting Service Quotas Service Quota (%s): %s", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error getting Service Quotas Service Quota (%s): empty result", d.Id()) + } + + defaultInput := &servicequotas.GetAWSDefaultServiceQuotaInput{ + QuotaCode: aws.String(quotaCode), + ServiceCode: aws.String(serviceCode), + } + + defaultOutput, err := conn.GetAWSDefaultServiceQuota(defaultInput) + + if err != nil { + return fmt.Errorf("error getting Service Quotas Default Service Quota (%s): %s", d.Id(), err) + } + + if defaultOutput == nil { + return fmt.Errorf("error getting Service Quotas Default Service Quota (%s): empty result", d.Id()) + } + + d.Set("adjustable", output.Quota.Adjustable) + d.Set("arn", output.Quota.QuotaArn) + d.Set("default_value", defaultOutput.Quota.Value) + d.Set("quota_code", output.Quota.QuotaCode) + d.Set("quota_name", output.Quota.QuotaName) + d.Set("service_code", output.Quota.ServiceCode) + d.Set("service_name", output.Quota.ServiceName) + d.Set("value", output.Quota.Value) + + requestID := d.Get("request_id").(string) + + if requestID != "" { + input := &servicequotas.GetRequestedServiceQuotaChangeInput{ + RequestId: aws.String(requestID), + } + + output, err := conn.GetRequestedServiceQuotaChange(input) + + if isAWSErr(err, servicequotas.ErrCodeNoSuchResourceException, "") { + d.Set("request_id", "") + d.Set("request_status", "") + return nil + } + + if err != nil { + return fmt.Errorf("error getting Service Quotas Requested Service Quota Change (%s): %s", requestID, err) + } + + if output == nil || output.RequestedQuota == nil { + return fmt.Errorf("error getting Service Quotas Requested Service Quota Change (%s): empty result", requestID) + } + + requestStatus := aws.StringValue(output.RequestedQuota.Status) + d.Set("request_status", requestStatus) + + switch requestStatus { + case servicequotas.RequestStatusApproved, servicequotas.RequestStatusCaseClosed, servicequotas.RequestStatusDenied: + d.Set("request_id", "") + case servicequotas.RequestStatusCaseOpened, servicequotas.RequestStatusPending: + d.Set("value", output.RequestedQuota.DesiredValue) + } + } + + return nil +} + +func resourceAwsServiceQuotasServiceQuotaUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).servicequotasconn + + value := d.Get("value").(float64) + serviceCode, quotaCode, err := resourceAwsServiceQuotasServiceQuotaParseID(d.Id()) + + if err != nil { + return err + } + + input := &servicequotas.RequestServiceQuotaIncreaseInput{ + DesiredValue: aws.Float64(value), + QuotaCode: aws.String(quotaCode), + ServiceCode: aws.String(serviceCode), + } + + output, err := conn.RequestServiceQuotaIncrease(input) + + if err != nil { + return fmt.Errorf("error requesting Service Quota (%s) increase: %s", d.Id(), err) + } + + if output == nil || output.RequestedQuota == nil { + return fmt.Errorf("error requesting Service Quota (%s) increase: empty result", d.Id()) + } + + d.Set("request_id", output.RequestedQuota.Id) + + return resourceAwsServiceQuotasServiceQuotaRead(d, meta) +} + +func resourceAwsServiceQuotasServiceQuotaParseID(id string) (string, string, error) { + parts := strings.SplitN(id, "/", 2) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format of ID (%s), expected SERVICE-CODE/QUOTA-CODE", id) + } + + return parts[0], parts[1], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_active_receipt_rule_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_active_receipt_rule_set.go index 21534257b6f..7e211d7ecf6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_active_receipt_rule_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_active_receipt_rule_set.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSesActiveReceiptRuleSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_configuration_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_configuration_set.go index 04a637e78ee..ba1ed3fa7e5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_configuration_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_configuration_set.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSesConfigurationSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_dkim.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_dkim.go index abde46bf9bb..14103518b10 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_dkim.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_dkim.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSesDomainDkim() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity.go index 84579dd3adf..6ed5dcad65f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSesDomainIdentity() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity_verification.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity_verification.go index c7b546723e6..d10043a895d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity_verification.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity_verification.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSesDomainIdentityVerification() *schema.Resource { @@ -73,8 +73,16 @@ func resourceAwsSesDomainIdentityVerificationCreate(d *schema.ResourceData, meta return nil }) + if isResourceTimeoutError(err) { + var att *ses.IdentityVerificationAttributes + att, err = getAwsSesIdentityVerificationAttributes(conn, domainName) + + if att != nil && aws.StringValue(att.VerificationStatus) != ses.VerificationStatusSuccess { + return fmt.Errorf("Expected domain verification Success, but was in state %s", aws.StringValue(att.VerificationStatus)) + } + } if err != nil { - return err + return fmt.Errorf("Error creating SES domain identity verification: %s", err) } log.Printf("[INFO] Domain verification successful for %s", domainName) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_mail_from.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_mail_from.go index 3174702039c..9e4e75b92dd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_mail_from.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_mail_from.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSesDomainMailFrom() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_email_identity.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_email_identity.go new file mode 100644 index 00000000000..7d323a89fe5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_email_identity.go @@ -0,0 +1,111 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsSesEmailIdentity() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesEmailIdentityCreate, + Read: resourceAwsSesEmailIdentityRead, + Delete: resourceAwsSesEmailIdentityDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + return strings.TrimSuffix(v.(string), ".") + }, + }, + }, + } +} + +func resourceAwsSesEmailIdentityCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + email := d.Get("email").(string) + email = strings.TrimSuffix(email, ".") + + createOpts := &ses.VerifyEmailIdentityInput{ + EmailAddress: aws.String(email), + } + + _, err := conn.VerifyEmailIdentity(createOpts) + if err != nil { + return fmt.Errorf("Error requesting SES email identity verification: %s", err) + } + + d.SetId(email) + + return resourceAwsSesEmailIdentityRead(d, meta) +} + +func resourceAwsSesEmailIdentityRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + email := d.Id() + d.Set("email", email) + + readOpts := &ses.GetIdentityVerificationAttributesInput{ + Identities: []*string{ + aws.String(email), + }, + } + + response, err := conn.GetIdentityVerificationAttributes(readOpts) + if err != nil { + log.Printf("[WARN] Error fetching identity verification attributes for %s: %s", d.Id(), err) + return err + } + + _, ok := response.VerificationAttributes[email] + if !ok { + log.Printf("[WARN] Email not listed in response when fetching verification attributes for %s", d.Id()) + d.SetId("") + return nil + } + + arn := arn.ARN{ + AccountID: meta.(*AWSClient).accountid, + Partition: meta.(*AWSClient).partition, + Region: meta.(*AWSClient).region, + Resource: fmt.Sprintf("identity/%s", d.Id()), + Service: "ses", + }.String() + d.Set("arn", arn) + return nil +} + +func resourceAwsSesEmailIdentityDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + email := d.Get("email").(string) + + deleteOpts := &ses.DeleteIdentityInput{ + Identity: aws.String(email), + } + + _, err := conn.DeleteIdentity(deleteOpts) + if err != nil { + return fmt.Errorf("Error deleting SES email identity: %s", err) + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_event_destination.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_event_destination.go index d7ec3445625..bb5447cdc57 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_event_destination.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_event_destination.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSesEventDestination() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_identity_notification_topic.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_identity_notification_topic.go index 7557aad8a81..9c772b34a88 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_identity_notification_topic.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_identity_notification_topic.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSesNotificationTopic() *schema.Resource { @@ -45,6 +45,11 @@ func resourceAwsSesNotificationTopic() *schema.Resource { ForceNew: true, ValidateFunc: validation.NoZeroValues, }, + + "include_original_headers": { + Type: schema.TypeBool, + Optional: true, + }, }, } } @@ -53,6 +58,7 @@ func resourceAwsSesNotificationTopicSet(d *schema.ResourceData, meta interface{} conn := meta.(*AWSClient).sesConn notification := d.Get("notification_type").(string) identity := d.Get("identity").(string) + includeOriginalHeaders := d.Get("include_original_headers").(bool) setOpts := &ses.SetIdentityNotificationTopicInput{ Identity: aws.String(identity), @@ -71,6 +77,18 @@ func resourceAwsSesNotificationTopicSet(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error setting SES Identity Notification Topic: %s", err) } + setHeadersOpts := &ses.SetIdentityHeadersInNotificationsEnabledInput{ + Identity: aws.String(identity), + NotificationType: aws.String(notification), + Enabled: aws.Bool(includeOriginalHeaders), + } + + log.Printf("[DEBUG] Setting SES Identity Notification Topic Headers: %#v", setHeadersOpts) + + if _, err := conn.SetIdentityHeadersInNotificationsEnabled(setHeadersOpts); err != nil { + return fmt.Errorf("Error setting SES Identity Notification Topic Headers Forwarding: %s", err) + } + return resourceAwsSesNotificationTopicRead(d, meta) } @@ -110,10 +128,13 @@ func resourceAwsSesNotificationTopicRead(d *schema.ResourceData, meta interface{ switch notificationType { case ses.NotificationTypeBounce: d.Set("topic_arn", notificationAttributes.BounceTopic) + d.Set("include_original_headers", notificationAttributes.HeadersInBounceNotificationsEnabled) case ses.NotificationTypeComplaint: d.Set("topic_arn", notificationAttributes.ComplaintTopic) + d.Set("include_original_headers", notificationAttributes.HeadersInComplaintNotificationsEnabled) case ses.NotificationTypeDelivery: d.Set("topic_arn", notificationAttributes.DeliveryTopic) + d.Set("include_original_headers", notificationAttributes.HeadersInDeliveryNotificationsEnabled) } return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_identity_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_identity_policy.go new file mode 100644 index 00000000000..ad5786faefe --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_identity_policy.go @@ -0,0 +1,166 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsSesIdentityPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesIdentityPolicyCreate, + Read: resourceAwsSesIdentityPolicyRead, + Update: resourceAwsSesIdentityPolicyUpdate, + Delete: resourceAwsSesIdentityPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "identity": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9\-\_]+$`), "must contain only alphanumeric characters, dashes, and underscores"), + ), + }, + "policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.ValidateJsonString, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + }, + } +} + +func resourceAwsSesIdentityPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + identity := d.Get("identity").(string) + policyName := d.Get("name").(string) + + input := &ses.PutIdentityPolicyInput{ + Identity: aws.String(identity), + PolicyName: aws.String(policyName), + Policy: aws.String(d.Get("policy").(string)), + } + + _, err := conn.PutIdentityPolicy(input) + if err != nil { + return fmt.Errorf("error creating SES Identity (%s) Policy: %s", identity, err) + } + + d.SetId(fmt.Sprintf("%s|%s", identity, policyName)) + + return resourceAwsSesIdentityPolicyRead(d, meta) +} + +func resourceAwsSesIdentityPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + identity, policyName, err := resourceAwsSesIdentityPolicyParseID(d.Id()) + if err != nil { + return err + } + + req := ses.PutIdentityPolicyInput{ + Identity: aws.String(identity), + PolicyName: aws.String(policyName), + Policy: aws.String(d.Get("policy").(string)), + } + + _, err = conn.PutIdentityPolicy(&req) + if err != nil { + return fmt.Errorf("error updating SES Identity (%s) Policy (%s): %s", identity, policyName, err) + } + + return resourceAwsSesIdentityPolicyRead(d, meta) +} + +func resourceAwsSesIdentityPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + identity, policyName, err := resourceAwsSesIdentityPolicyParseID(d.Id()) + if err != nil { + return err + } + + input := &ses.GetIdentityPoliciesInput{ + Identity: aws.String(identity), + PolicyNames: aws.StringSlice([]string{policyName}), + } + + output, err := conn.GetIdentityPolicies(input) + + if err != nil { + return fmt.Errorf("error getting SES Identity (%s) Policy (%s): %s", identity, policyName, err) + } + + if output == nil { + return fmt.Errorf("error getting SES Identity (%s) Policy (%s): empty result", identity, policyName) + } + + if len(output.Policies) == 0 { + log.Printf("[WARN] SES Identity (%s) Policy (%s) not found, removing from state", identity, policyName) + d.SetId("") + return nil + } + + policy, ok := output.Policies[policyName] + if !ok { + log.Printf("[WARN] SES Identity (%s) Policy (%s) not found, removing from state", identity, policyName) + d.SetId("") + return nil + } + + d.Set("identity", identity) + d.Set("name", policyName) + d.Set("policy", policy) + + return nil +} + +func resourceAwsSesIdentityPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + identity, policyName, err := resourceAwsSesIdentityPolicyParseID(d.Id()) + if err != nil { + return err + } + + input := &ses.DeleteIdentityPolicyInput{ + Identity: aws.String(identity), + PolicyName: aws.String(policyName), + } + + log.Printf("[DEBUG] Deleting SES Identity Policy: %s", input) + _, err = conn.DeleteIdentityPolicy(input) + + if err != nil { + return fmt.Errorf("error deleting SES Identity (%s) Policy (%s): %s", identity, policyName, err) + } + + return nil +} + +func resourceAwsSesIdentityPolicyParseID(id string) (string, string, error) { + idParts := strings.SplitN(id, "|", 2) + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return "", "", fmt.Errorf("unexpected format of ID (%s), expected IDENTITY|NAME", id) + } + return idParts[0], idParts[1], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_filter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_filter.go index ab87dd21324..7246be6581b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_filter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_filter.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSesReceiptFilter() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule.go index fa98d8bcecb..e38e227ecc1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSesReceiptRule() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule_set.go index a2ae91917a3..b2a69e87a04 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule_set.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSesReceiptRuleSet() *schema.Resource { @@ -48,16 +48,22 @@ func resourceAwsSesReceiptRuleSetCreate(d *schema.ResourceData, meta interface{} } func resourceAwsSesReceiptRuleSetRead(d *schema.ResourceData, meta interface{}) error { - ruleSetExists, err := findRuleSet(d.Id(), nil, meta) + conn := meta.(*AWSClient).sesConn + + input := &ses.DescribeReceiptRuleSetInput{ + RuleSetName: aws.String(d.Id()), + } + + _, err := conn.DescribeReceiptRuleSet(input) - if !ruleSetExists { - log.Printf("[WARN] SES Receipt Rule Set (%s) not found", d.Id()) + if isAWSErr(err, ses.ErrCodeRuleSetDoesNotExistException, "") { + log.Printf("[WARN] SES Receipt Rule Set (%s) not found, removing from state", d.Id()) d.SetId("") return nil } if err != nil { - return err + return fmt.Errorf("error describing SES Receipt Rule Set (%s): %s", d.Id(), err) } d.Set("rule_set_name", d.Id()) @@ -75,30 +81,3 @@ func resourceAwsSesReceiptRuleSetDelete(d *schema.ResourceData, meta interface{} return err } - -func findRuleSet(name string, token *string, meta interface{}) (bool, error) { - conn := meta.(*AWSClient).sesConn - - ruleSetExists := false - - listOpts := &ses.ListReceiptRuleSetsInput{ - NextToken: token, - } - - response, err := conn.ListReceiptRuleSets(listOpts) - for _, element := range response.RuleSets { - if *element.Name == name { - ruleSetExists = true - } - } - - if err != nil && !ruleSetExists && response.NextToken != nil { - ruleSetExists, err = findRuleSet(name, response.NextToken, meta) - } - - if err != nil { - return false, err - } - - return ruleSetExists, nil -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_template.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_template.go index 326a6d0a3c6..fa5f7221b18 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_template.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_template.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSesTemplate() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_activity.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_activity.go index 496b7a8bd31..7f01931d292 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_activity.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_activity.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/sfn" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSfnActivity() *schema.Resource { @@ -46,6 +46,7 @@ func resourceAwsSfnActivityCreate(d *schema.ResourceData, meta interface{}) erro params := &sfn.CreateActivityInput{ Name: aws.String(d.Get("name").(string)), + Tags: tagsFromMapSfn(d.Get("tags").(map[string]interface{})), } activity, err := conn.CreateActivity(params) @@ -55,17 +56,6 @@ func resourceAwsSfnActivityCreate(d *schema.ResourceData, meta interface{}) erro d.SetId(*activity.ActivityArn) - if v, ok := d.GetOk("tags"); ok { - input := &sfn.TagResourceInput{ - ResourceArn: aws.String(d.Id()), - Tags: tagsFromMapSfn(v.(map[string]interface{})), - } - log.Printf("[DEBUG] Tagging SFN Activity: %s", input) - _, err := conn.TagResource(input) - if err != nil { - return fmt.Errorf("error tagging SFN Activity (%s): %s", d.Id(), input) - } - } return resourceAwsSfnActivityRead(d, meta) } @@ -153,10 +143,11 @@ func resourceAwsSfnActivityDelete(d *schema.ResourceData, meta interface{}) erro conn := meta.(*AWSClient).sfnconn log.Printf("[DEBUG] Deleting Step Functions Activity: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteActivity(&sfn.DeleteActivityInput{ - ActivityArn: aws.String(d.Id()), - }) + input := &sfn.DeleteActivityInput{ + ActivityArn: aws.String(d.Id()), + } + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteActivity(input) if err == nil { return nil @@ -164,4 +155,11 @@ func resourceAwsSfnActivityDelete(d *schema.ResourceData, meta interface{}) erro return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteActivity(input) + } + if err != nil { + return fmt.Errorf("Error deleting SFN Activity: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_state_machine.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_state_machine.go index 81953e17101..1d735651982 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_state_machine.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_state_machine.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/sfn" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSfnStateMachine() *schema.Resource { @@ -65,6 +65,7 @@ func resourceAwsSfnStateMachineCreate(d *schema.ResourceData, meta interface{}) Definition: aws.String(d.Get("definition").(string)), Name: aws.String(d.Get("name").(string)), RoleArn: aws.String(d.Get("role_arn").(string)), + Tags: tagsFromMapSfn(d.Get("tags").(map[string]interface{})), } var activity *sfn.CreateStateMachineOutput @@ -86,6 +87,9 @@ func resourceAwsSfnStateMachineCreate(d *schema.ResourceData, meta interface{}) return nil }) + if isResourceTimeoutError(err) { + activity, err = conn.CreateStateMachine(params) + } if err != nil { return fmt.Errorf("Error creating Step Function State Machine: %s", err) @@ -93,17 +97,6 @@ func resourceAwsSfnStateMachineCreate(d *schema.ResourceData, meta interface{}) d.SetId(*activity.StateMachineArn) - if v, ok := d.GetOk("tags"); ok { - input := &sfn.TagResourceInput{ - ResourceArn: aws.String(d.Id()), - Tags: tagsFromMapSfn(v.(map[string]interface{})), - } - log.Printf("[DEBUG] Tagging SFN State Machine: %s", input) - _, err := conn.TagResource(input) - if err != nil { - return fmt.Errorf("error tagging SFN State Machine (%s): %s", d.Id(), input) - } - } return resourceAwsSfnStateMachineRead(d, meta) } @@ -212,7 +205,6 @@ func resourceAwsSfnStateMachineUpdate(d *schema.ResourceData, meta interface{}) } } } - return resourceAwsSfnStateMachineRead(d, meta) } @@ -220,10 +212,11 @@ func resourceAwsSfnStateMachineDelete(d *schema.ResourceData, meta interface{}) conn := meta.(*AWSClient).sfnconn log.Printf("[DEBUG] Deleting Step Function State Machine: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteStateMachine(&sfn.DeleteStateMachineInput{ - StateMachineArn: aws.String(d.Id()), - }) + input := &sfn.DeleteStateMachineInput{ + StateMachineArn: aws.String(d.Id()), + } + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteStateMachine(input) if err == nil { return nil @@ -231,4 +224,11 @@ func resourceAwsSfnStateMachineDelete(d *schema.ResourceData, meta interface{}) return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteStateMachine(input) + } + if err != nil { + return fmt.Errorf("Error deleting SFN state machine: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_shield_protection.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_shield_protection.go new file mode 100644 index 00000000000..114693bbf26 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_shield_protection.go @@ -0,0 +1,85 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/shield" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAwsShieldProtection() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsShieldProtectionCreate, + Read: resourceAwsShieldProtectionRead, + Delete: resourceAwsShieldProtectionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "resource_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + }, + } +} + +func resourceAwsShieldProtectionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).shieldconn + + input := &shield.CreateProtectionInput{ + Name: aws.String(d.Get("name").(string)), + ResourceArn: aws.String(d.Get("resource_arn").(string)), + } + + resp, err := conn.CreateProtection(input) + if err != nil { + return fmt.Errorf("error creating Shield Protection: %s", err) + } + d.SetId(*resp.ProtectionId) + return resourceAwsShieldProtectionRead(d, meta) +} + +func resourceAwsShieldProtectionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).shieldconn + + input := &shield.DescribeProtectionInput{ + ProtectionId: aws.String(d.Id()), + } + + resp, err := conn.DescribeProtection(input) + if err != nil { + return fmt.Errorf("error reading Shield Protection (%s): %s", d.Id(), err) + } + d.Set("name", resp.Protection.Name) + d.Set("resource_arn", resp.Protection.ResourceArn) + return nil +} + +func resourceAwsShieldProtectionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).shieldconn + + input := &shield.DeleteProtectionInput{ + ProtectionId: aws.String(d.Id()), + } + + _, err := conn.DeleteProtection(input) + + if isAWSErr(err, shield.ErrCodeResourceNotFoundException, "") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting Shield Protection (%s): %s", d.Id(), err) + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_simpledb_domain.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_simpledb_domain.go index 53f3024915c..17f4ff05b20 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_simpledb_domain.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_simpledb_domain.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_snapshot_create_volume_permission.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_snapshot_create_volume_permission.go index ed8e757c281..6b937e1dc68 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_snapshot_create_volume_permission.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_snapshot_create_volume_permission.go @@ -2,12 +2,13 @@ package aws import ( "fmt" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSnapshotCreateVolumePermission() *schema.Resource { @@ -35,9 +36,11 @@ func resourceAwsSnapshotCreateVolumePermission() *schema.Resource { func resourceAwsSnapshotCreateVolumePermissionExists(d *schema.ResourceData, meta interface{}) (bool, error) { conn := meta.(*AWSClient).ec2conn - snapshot_id := d.Get("snapshot_id").(string) - account_id := d.Get("account_id").(string) - return hasCreateVolumePermission(conn, snapshot_id, account_id) + snapshotID, accountID, err := resourceAwsSnapshotCreateVolumePermissionParseID(d.Id()) + if err != nil { + return false, err + } + return hasCreateVolumePermission(conn, snapshotID, accountID) } func resourceAwsSnapshotCreateVolumePermissionCreate(d *schema.ResourceData, meta interface{}) error { @@ -86,15 +89,17 @@ func resourceAwsSnapshotCreateVolumePermissionRead(d *schema.ResourceData, meta func resourceAwsSnapshotCreateVolumePermissionDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - snapshot_id := d.Get("snapshot_id").(string) - account_id := d.Get("account_id").(string) + snapshotID, accountID, err := resourceAwsSnapshotCreateVolumePermissionParseID(d.Id()) + if err != nil { + return err + } - _, err := conn.ModifySnapshotAttribute(&ec2.ModifySnapshotAttributeInput{ - SnapshotId: aws.String(snapshot_id), + _, err = conn.ModifySnapshotAttribute(&ec2.ModifySnapshotAttributeInput{ + SnapshotId: aws.String(snapshotID), Attribute: aws.String("createVolumePermission"), CreateVolumePermission: &ec2.CreateVolumePermissionModifications{ Remove: []*ec2.CreateVolumePermission{ - {UserId: aws.String(account_id)}, + {UserId: aws.String(accountID)}, }, }, }) @@ -106,7 +111,7 @@ func resourceAwsSnapshotCreateVolumePermissionDelete(d *schema.ResourceData, met stateConf := &resource.StateChangeConf{ Pending: []string{"granted"}, Target: []string{"denied"}, - Refresh: resourceAwsSnapshotCreateVolumePermissionStateRefreshFunc(conn, snapshot_id, account_id), + Refresh: resourceAwsSnapshotCreateVolumePermissionStateRefreshFunc(conn, snapshotID, accountID), Timeout: 5 * time.Minute, Delay: 10 * time.Second, MinTimeout: 10 * time.Second, @@ -143,10 +148,18 @@ func resourceAwsSnapshotCreateVolumePermissionStateRefreshFunc(conn *ec2.EC2, sn } for _, vp := range attrs.CreateVolumePermissions { - if *vp.UserId == account_id { + if aws.StringValue(vp.UserId) == account_id { return attrs, "granted", nil } } return attrs, "denied", nil } } + +func resourceAwsSnapshotCreateVolumePermissionParseID(id string) (string, string, error) { + idParts := strings.SplitN(id, "-", 3) + if len(idParts) != 3 || idParts[0] != "snap" || idParts[1] == "" || idParts[2] == "" { + return "", "", fmt.Errorf("unexpected format of ID (%s), expected SNAPSHOT_ID-ACCOUNT_ID", id) + } + return fmt.Sprintf("%s-%s", idParts[0], idParts[1]), idParts[2], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_platform_application.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_platform_application.go index b4afc8f991b..596840c2112 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_platform_application.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_platform_application.go @@ -10,8 +10,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/sns" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) var snsPlatformRequiresPlatformPrincipal = map[string]bool{ @@ -174,6 +174,9 @@ func resourceAwsSnsPlatformApplicationUpdate(d *schema.ResourceData, meta interf } return nil }) + if isResourceTimeoutError(err) { + _, err = snsconn.SetPlatformApplicationAttributes(req) + } if err != nil { return fmt.Errorf("Error updating SNS platform application: %s", err) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_sms_preferences.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_sms_preferences.go index 6892ec250be..e82ad8ab8c2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_sms_preferences.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_sms_preferences.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sns" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func validateMonthlySpend(v interface{}, k string) (ws []string, errors []error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic.go index df1006e5ea5..a3f593d6e18 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic.go @@ -7,10 +7,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sns" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) // Mutable attributes @@ -144,13 +144,14 @@ func resourceAwsSnsTopic() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "tags": tagsSchema(), }, } } func resourceAwsSnsTopicCreate(d *schema.ResourceData, meta interface{}) error { snsconn := meta.(*AWSClient).snsconn - + tags := tagsFromMapSNS(d.Get("tags").(map[string]interface{})) var name string if v, ok := d.GetOk("name"); ok { name = v.(string) @@ -164,6 +165,7 @@ func resourceAwsSnsTopicCreate(d *schema.ResourceData, meta interface{}) error { req := &sns.CreateTopicInput{ Name: aws.String(name), + Tags: tags, } output, err := snsconn.CreateTopic(req) @@ -172,7 +174,6 @@ func resourceAwsSnsTopicCreate(d *schema.ResourceData, meta interface{}) error { } d.SetId(*output.TopicArn) - return resourceAwsSnsTopicUpdate(d, meta) } @@ -188,6 +189,11 @@ func resourceAwsSnsTopicUpdate(d *schema.ResourceData, meta interface{}) error { } } } + if !d.IsNewResource() { + if err := setTagsSNS(conn, d); err != nil { + return fmt.Errorf("error updating SNS Topic tags for %s: %s", d.Id(), err) + } + } return resourceAwsSnsTopicRead(d, meta) } @@ -231,6 +237,18 @@ func resourceAwsSnsTopicRead(d *schema.ResourceData, meta interface{}) error { } } + // List tags + + tagList, err := snsconn.ListTagsForResource(&sns.ListTagsForResourceInput{ + ResourceArn: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("error listing SNS Topic tags for %s: %s", d.Id(), err) + } + if err := d.Set("tags", tagsToMapSNS(tagList.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_policy.go index f2c6ee54e19..9d8d915b222 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_policy.go @@ -5,8 +5,8 @@ import ( "log" "regexp" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -20,6 +20,10 @@ func resourceAwsSnsTopicPolicy() *schema.Resource { Update: resourceAwsSnsTopicPolicyUpsert, Delete: resourceAwsSnsTopicPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ "arn": { Type: schema.TypeString, @@ -91,6 +95,7 @@ func resourceAwsSnsTopicPolicyRead(d *schema.ResourceData, meta interface{}) err } d.Set("policy", policy) + d.Set("arn", attrmap["TopicArn"]) return nil } @@ -162,5 +167,6 @@ func buildDefaultSnsTopicPolicy(topicArn, accountId string) string { } } ] -}`, topicArn, accountId) +} +`, topicArn, accountId) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_subscription.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_subscription.go index 2a86ec2f091..9c422dddb5a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_subscription.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_subscription.go @@ -9,10 +9,10 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" @@ -249,6 +249,15 @@ func subscribeToSNSTopic(d *schema.ResourceData, snsconn *sns.SNS) (output *sns. fmt.Errorf("Endpoint (%s) did not autoconfirm the subscription for topic %s", endpoint, topic_arn)) }) + if isResourceTimeoutError(err) { + var subscription *sns.Subscription + subscription, err = findSubscriptionByNonID(d, snsconn) + + if subscription != nil { + output.SubscriptionArn = subscription.SubscriptionArn + } + } + if err != nil { return nil, err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_datafeed_subscription.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_datafeed_subscription.go index 5051e17603d..2923e9b6784 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_datafeed_subscription.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_datafeed_subscription.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSpotDataFeedSubscription() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request.go index f5024f20d9e..ef40352a181 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request.go @@ -10,10 +10,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSpotFleetRequest() *schema.Resource { @@ -99,6 +99,12 @@ func resourceAwsSpotFleetRequest() *schema.Resource { Computed: true, ForceNew: true, }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, "snapshot_id": { Type: schema.TypeString, Optional: true, @@ -159,12 +165,24 @@ func resourceAwsSpotFleetRequest() *schema.Resource { Default: true, ForceNew: true, }, + "encrypted": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, "iops": { Type: schema.TypeInt, Optional: true, Computed: true, ForceNew: true, }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, "volume_size": { Type: schema.TypeInt, Optional: true, @@ -388,6 +406,14 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{ opts.Placement = placement } + if v, ok := d["placement_group"]; ok { + if v.(string) != "" { + // If instanceInterruptionBehavior is set to STOP, this can't be set at all, even to an empty string, so check for "" to avoid those errors + placement.GroupName = aws.String(v.(string)) + opts.Placement = placement + } + } + if v, ok := d["ebs_optimized"]; ok { opts.EbsOptimized = aws.Bool(v.(bool)) } @@ -512,6 +538,10 @@ func readSpotFleetBlockDeviceMappingsFromConfig( ebs.Encrypted = aws.Bool(v) } + if v, ok := bd["kms_key_id"].(string); ok && v != "" { + ebs.KmsKeyId = aws.String(v) + } + if v, ok := bd["volume_size"].(int); ok && v != 0 { ebs.VolumeSize = aws.Int64(int64(v)) } @@ -553,6 +583,14 @@ func readSpotFleetBlockDeviceMappingsFromConfig( DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), } + if v, ok := bd["encrypted"].(bool); ok && v { + ebs.Encrypted = aws.Bool(v) + } + + if v, ok := bd["kms_key_id"].(string); ok && v != "" { + ebs.KmsKeyId = aws.String(v) + } + if v, ok := bd["volume_size"].(int); ok && v != 0 { ebs.VolumeSize = aws.Int64(int64(v)) } @@ -703,22 +741,21 @@ func resourceAwsSpotFleetRequestCreate(d *schema.ResourceData, meta interface{}) // take effect immediately, resulting in an InvalidSpotFleetRequestConfig error var resp *ec2.RequestSpotFleetOutput err = resource.Retry(10*time.Minute, func() *resource.RetryError { - var err error resp, err = conn.RequestSpotFleet(spotFleetOpts) + if isAWSErr(err, "InvalidSpotFleetRequestConfig", "") { + return resource.RetryableError(fmt.Errorf("Error creating Spot fleet request, retrying: %s", err)) + } if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // IAM is eventually consistent :/ - if awsErr.Code() == "InvalidSpotFleetRequestConfig" { - return resource.RetryableError( - fmt.Errorf("Error creating Spot fleet request, retrying: %s", err)) - } - } return resource.NonRetryableError(err) } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.RequestSpotFleet(spotFleetOpts) + } + if err != nil { return fmt.Errorf("Error requesting spot fleet: %s", err) } @@ -934,25 +971,30 @@ func resourceAwsSpotFleetRequestRead(d *schema.ResourceData, meta interface{}) e aws.TimeValue(config.ValidUntil).Format(time.RFC3339)) } + launchSpec, err := launchSpecsToSet(config.LaunchSpecifications, conn) + if err != nil { + return fmt.Errorf("error occurred while reading launch specification: %s", err) + } + d.Set("replace_unhealthy_instances", config.ReplaceUnhealthyInstances) d.Set("instance_interruption_behaviour", config.InstanceInterruptionBehavior) d.Set("fleet_type", config.Type) - d.Set("launch_specification", launchSpecsToSet(config.LaunchSpecifications, conn)) + d.Set("launch_specification", launchSpec) return nil } -func launchSpecsToSet(launchSpecs []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set { +func launchSpecsToSet(launchSpecs []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) (*schema.Set, error) { specSet := &schema.Set{F: hashLaunchSpecification} for _, spec := range launchSpecs { rootDeviceName, err := fetchRootDeviceName(aws.StringValue(spec.ImageId), conn) if err != nil { - log.Panic(err) + return nil, err } specSet.Add(launchSpecToMap(spec, rootDeviceName)) } - return specSet + return specSet, nil } func launchSpecToMap(l *ec2.SpotFleetLaunchSpecification, rootDevName *string) map[string]interface{} { @@ -1066,6 +1108,10 @@ func ebsBlockDevicesToSet(bdm []*ec2.BlockDeviceMapping, rootDevName *string) *s m["encrypted"] = aws.BoolValue(ebs.Encrypted) } + if ebs.KmsKeyId != nil { + m["kms_key_id"] = aws.StringValue(ebs.KmsKeyId) + } + if ebs.VolumeSize != nil { m["volume_size"] = aws.Int64Value(ebs.VolumeSize) } @@ -1118,6 +1164,14 @@ func rootBlockDeviceToSet( m["delete_on_termination"] = aws.BoolValue(val.Ebs.DeleteOnTermination) } + if val.Ebs.Encrypted != nil { + m["encrypted"] = aws.BoolValue(val.Ebs.Encrypted) + } + + if val.Ebs.KmsKeyId != nil { + m["kms_key_id"] = aws.StringValue(val.Ebs.KmsKeyId) + } + if val.Ebs.VolumeSize != nil { m["volume_size"] = aws.Int64Value(val.Ebs.VolumeSize) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request_migrate.go index dea0a32e841..0d64dfdc980 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsSpotFleetRequestMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_instance_request.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_instance_request.go index 5ac18f65c8a..fe31f160fc8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_instance_request.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_instance_request.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSpotInstanceRequest() *schema.Resource { @@ -177,7 +177,6 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface var resp *ec2.RequestSpotInstancesOutput err = resource.Retry(1*time.Minute, func() *resource.RetryError { - var err error resp, err = conn.RequestSpotInstances(spotOpts) // IAM instance profiles can take ~10 seconds to propagate in AWS: // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console @@ -193,6 +192,10 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + resp, err = conn.RequestSpotInstances(spotOpts) + } + if err != nil { return fmt.Errorf("Error requesting spot instances: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue.go index 79b3e1d641b..6d55747ec19 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue.go @@ -10,11 +10,13 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/sqs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) var sqsQueueAttributeMap = map[string]string{ @@ -170,6 +172,11 @@ func resourceAwsSqsQueueCreate(d *schema.ResourceData, meta interface{}) error { QueueName: aws.String(name), } + // Tag-on-create is currently only supported in AWS Commercial + if v, ok := d.GetOk("tags"); ok && meta.(*AWSClient).partition == endpoints.AwsPartitionID { + req.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SqsTags() + } + attributes := make(map[string]*string) queueResource := *resourceAwsSqsQueue() @@ -206,20 +213,32 @@ func resourceAwsSqsQueueCreate(d *schema.ResourceData, meta interface{}) error { } return nil }) + if isResourceTimeoutError(err) { + output, err = sqsconn.CreateQueue(req) + } if err != nil { return fmt.Errorf("Error creating SQS queue: %s", err) } d.SetId(aws.StringValue(output.QueueUrl)) - return resourceAwsSqsQueueUpdate(d, meta) + // Tag-on-create is currently only supported in AWS Commercial + if meta.(*AWSClient).partition == endpoints.AwsPartitionID { + return resourceAwsSqsQueueRead(d, meta) + } else { + return resourceAwsSqsQueueUpdate(d, meta) + } } func resourceAwsSqsQueueUpdate(d *schema.ResourceData, meta interface{}) error { sqsconn := meta.(*AWSClient).sqsconn - if err := setTagsSQS(sqsconn, d); err != nil { - return err + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SqsUpdateTags(sqsconn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating SQS Queue (%s) tags: %s", d.Id(), err) + } } attributes := make(map[string]*string) @@ -267,7 +286,7 @@ func resourceAwsSqsQueueRead(d *schema.ResourceData, meta interface{}) error { if err != nil { if awsErr, ok := err.(awserr.Error); ok { log.Printf("ERROR Found %s", awsErr.Code()) - if awsErr.Code() == "AWS.SimpleQueueService.NonExistentQueue" { + if awsErr.Code() == sqs.ErrCodeQueueDoesNotExist { d.SetId("") log.Printf("[DEBUG] SQS Queue (%s) not found", d.Get("name").(string)) return nil @@ -396,21 +415,20 @@ func resourceAwsSqsQueueRead(d *schema.ResourceData, meta interface{}) error { } } - tags := make(map[string]string) - listTagsOutput, err := sqsconn.ListQueueTags(&sqs.ListQueueTagsInput{ - QueueUrl: aws.String(d.Id()), - }) + tags, err := keyvaluetags.SqsListTags(sqsconn, d.Id()) + if err != nil { // Non-standard partitions (e.g. US Gov) and some local development // solutions do not yet support this API call. Depending on the // implementation it may return InvalidAction or AWS.SimpleQueueService.UnsupportedOperation if !isAWSErr(err, "InvalidAction", "") && !isAWSErr(err, sqs.ErrCodeUnsupportedOperation, "") { - return err + return fmt.Errorf("error listing tags for SQS Queue (%s): %s", d.Id(), err) } - } else { - tags = tagsToMapGeneric(listTagsOutput.Tags) } - d.Set("tags", tags) + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } return nil } @@ -439,39 +457,3 @@ func extractNameFromSqsQueueUrl(queue string) (string, error) { return segments[2], nil } - -func setTagsSQS(conn *sqs.SQS, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - create, remove := diffTagsGeneric(oraw.(map[string]interface{}), nraw.(map[string]interface{})) - - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - keys := make([]*string, 0, len(remove)) - for k := range remove { - keys = append(keys, aws.String(k)) - } - - _, err := conn.UntagQueue(&sqs.UntagQueueInput{ - QueueUrl: aws.String(d.Id()), - TagKeys: keys, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - - _, err := conn.TagQueue(&sqs.TagQueueInput{ - QueueUrl: aws.String(d.Id()), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy.go index 0459e7c209a..20fd51da67a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sqs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/jen20/awspolicyequivalence" ) @@ -67,9 +67,11 @@ func resourceAwsSqsQueuePolicyUpsert(d *schema.ResourceData, meta interface{}) e AttributeNames: []*string{aws.String(sqs.QueueAttributeNamePolicy)}, } notUpdatedError := fmt.Errorf("SQS attribute %s not updated", sqs.QueueAttributeNamePolicy) + var out *sqs.GetQueueAttributesOutput err = resource.Retry(1*time.Minute, func() *resource.RetryError { log.Printf("[DEBUG] Reading SQS attributes: %s", gqaInput) - out, err := conn.GetQueueAttributes(gqaInput) + var err error + out, err = conn.GetQueueAttributes(gqaInput) if err != nil { return resource.NonRetryableError(err) } @@ -88,8 +90,23 @@ func resourceAwsSqsQueuePolicyUpsert(d *schema.ResourceData, meta interface{}) e } return nil }) + if isResourceTimeoutError(err) { + out, err = conn.GetQueueAttributes(gqaInput) + if err == nil { + queuePolicy, ok := out.Attributes[sqs.QueueAttributeNamePolicy] + if !ok { + return notUpdatedError + } + + var equivalent bool + equivalent, err = awspolicy.PoliciesAreEquivalent(*queuePolicy, policy) + if !equivalent { + return notUpdatedError + } + } + } if err != nil { - return err + return fmt.Errorf("Error updating SQS queue attributes: %s", err) } d.SetId(url) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy_migrate.go index 2906c171919..1f954b96352 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsSqsQueuePolicyMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_activation.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_activation.go index 6e4aba45c02..b79972fc44c 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_activation.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_activation.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSsmActivation() *schema.Resource { @@ -109,6 +109,10 @@ func resourceAwsSsmActivationCreate(d *schema.ResourceData, meta interface{}) er return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + resp, err = ssmconn.CreateActivation(activationInput) + } + if err != nil { return fmt.Errorf("Error creating SSM activation: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association.go index 397570d075b..7d8e664be64 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSsmAssociation() *schema.Resource { @@ -17,6 +17,9 @@ func resourceAwsSsmAssociation() *schema.Resource { Read: resourceAwsSsmAssociationRead, Update: resourceAwsSsmAssociationUpdate, Delete: resourceAwsSsmAssociationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, MigrateState: resourceAwsSsmAssociationMigrateState, SchemaVersion: 1, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association_migrate.go index 17821ad22e5..67f88dec8e7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsSsmAssociationMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_document.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_document.go index dde9704a520..f6b29947004 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_document.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_document.go @@ -3,21 +3,18 @@ package aws import ( "fmt" "log" - "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) const ( - MINIMUM_VERSIONED_SCHEMA = 2.0 SSM_DOCUMENT_PERMISSIONS_BATCH_LIMIT = 20 ) @@ -27,6 +24,9 @@ func resourceAwsSsmDocument() *schema.Resource { Read: resourceAwsSsmDocumentRead, Update: resourceAwsSsmDocumentUpdate, Delete: resourceAwsSsmDocumentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "arn": { @@ -164,21 +164,26 @@ func resourceAwsSsmDocumentCreate(d *schema.ResourceData, meta interface{}) erro } log.Printf("[DEBUG] Waiting for SSM Document %q to be created", d.Get("name").(string)) + var resp *ssm.CreateDocumentOutput err := resource.Retry(5*time.Minute, func() *resource.RetryError { - resp, err := ssmconn.CreateDocument(docInput) + var err error + resp, err = ssmconn.CreateDocument(docInput) if err != nil { return resource.NonRetryableError(err) } - - d.SetId(*resp.DocumentDescription.Name) return nil }) + if isResourceTimeoutError(err) { + resp, err = ssmconn.CreateDocument(docInput) + } if err != nil { return fmt.Errorf("Error creating SSM document: %s", err) } + d.SetId(*resp.DocumentDescription.Name) + if v, ok := d.GetOk("permissions"); ok && v != nil { if err := setDocumentPermissions(d, meta); err != nil { return err @@ -199,31 +204,51 @@ func resourceAwsSsmDocumentRead(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Reading SSM Document: %s", d.Id()) - docInput := &ssm.DescribeDocumentInput{ - Name: aws.String(d.Get("name").(string)), + describeDocumentInput := &ssm.DescribeDocumentInput{ + Name: aws.String(d.Id()), + } + + describeDocumentOutput, err := ssmconn.DescribeDocument(describeDocumentInput) + + if isAWSErr(err, ssm.ErrCodeInvalidDocument, "") { + log.Printf("[WARN] SSM Document not found so removing from state") + d.SetId("") + return nil } - resp, err := ssmconn.DescribeDocument(docInput) if err != nil { - if ssmErr, ok := err.(awserr.Error); ok && ssmErr.Code() == "InvalidDocument" { - log.Printf("[WARN] SSM Document not found so removing from state") - d.SetId("") - return nil - } - return fmt.Errorf("Error describing SSM document: %s", err) + return fmt.Errorf("error describing SSM Document (%s): %s", d.Id(), err) } - doc := resp.Document + if describeDocumentOutput == nil || describeDocumentOutput.Document == nil { + return fmt.Errorf("error describing SSM Document (%s): empty result", d.Id()) + } + + getDocumentInput := &ssm.GetDocumentInput{ + DocumentFormat: describeDocumentOutput.Document.DocumentFormat, + DocumentVersion: aws.String("$LATEST"), + Name: describeDocumentOutput.Document.Name, + } + + getDocumentOutput, err := ssmconn.GetDocument(getDocumentInput) + + if err != nil { + return fmt.Errorf("error getting SSM Document (%s): %s", d.Id(), err) + } + + if getDocumentOutput == nil { + return fmt.Errorf("error getting SSM Document (%s): empty result", d.Id()) + } + + doc := describeDocumentOutput.Document + + d.Set("content", getDocumentOutput.Content) d.Set("created_date", doc.CreatedDate) d.Set("default_version", doc.DefaultVersion) d.Set("description", doc.Description) d.Set("schema_version", doc.SchemaVersion) - - if _, ok := d.GetOk("document_type"); ok { - d.Set("document_type", doc.DocumentType) - } - d.Set("document_format", doc.DocumentFormat) + d.Set("document_type", doc.DocumentType) d.Set("document_version", doc.DocumentVersion) d.Set("hash", doc.Hash) d.Set("hash_type", doc.HashType) @@ -273,10 +298,6 @@ func resourceAwsSsmDocumentRead(d *schema.ResourceData, meta interface{}) error params = append(params, param) } - if len(params) == 0 { - params = make([]map[string]interface{}, 1) - } - if err := d.Set("parameter", params); err != nil { return err } @@ -314,15 +335,6 @@ func resourceAwsSsmDocumentUpdate(d *schema.ResourceData, meta interface{}) erro return nil } - if schemaVersion, ok := d.GetOk("schemaVersion"); ok { - schemaNumber, _ := strconv.ParseFloat(schemaVersion.(string), 64) - - if schemaNumber < MINIMUM_VERSIONED_SCHEMA { - log.Printf("[DEBUG] Skipping document update because document version is not 2.0 %q", d.Id()) - return nil - } - } - if err := updateAwsSSMDocument(d, meta); err != nil { return err } @@ -348,31 +360,33 @@ func resourceAwsSsmDocumentDelete(d *schema.ResourceData, meta interface{}) erro return err } + input := &ssm.DescribeDocumentInput{ + Name: aws.String(d.Get("name").(string)), + } log.Printf("[DEBUG] Waiting for SSM Document %q to be deleted", d.Get("name").(string)) err = resource.Retry(10*time.Minute, func() *resource.RetryError { - _, err := ssmconn.DescribeDocument(&ssm.DescribeDocumentInput{ - Name: aws.String(d.Get("name").(string)), - }) + _, err := ssmconn.DescribeDocument(input) - if err != nil { - awsErr, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - - if awsErr.Code() == "InvalidDocument" { - return nil - } + if isAWSErr(err, ssm.ErrCodeInvalidDocument, "") { + return nil + } + if err != nil { return resource.NonRetryableError(err) } return resource.RetryableError(fmt.Errorf("SSM Document (%s) still exists", d.Id())) }) + + if isResourceTimeoutError(err) { + _, err = ssmconn.DescribeDocument(input) + } + if isAWSErr(err, ssm.ErrCodeInvalidDocument, "") { + return nil + } if err != nil { return fmt.Errorf("error waiting for SSM Document (%s) deletion: %s", d.Id(), err) } - return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window.go index af3e11f6115..cc98e6d457a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSsmMaintenanceWindow() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_target.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_target.go index e4f1a0c7be6..cbd30d779e9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_target.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_target.go @@ -3,10 +3,12 @@ package aws import ( "fmt" "log" + "regexp" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSsmMaintenanceWindowTarget() *schema.Resource { @@ -48,6 +50,20 @@ func resourceAwsSsmMaintenanceWindowTarget() *schema.Resource { }, }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_\-.]{3,128}$`), "Only alphanumeric characters, hyphens, dots & underscores allowed"), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 128), + }, + "owner_information": { Type: schema.TypeString, Optional: true, @@ -67,6 +83,14 @@ func resourceAwsSsmMaintenanceWindowTargetCreate(d *schema.ResourceData, meta in Targets: expandAwsSsmTargets(d.Get("targets").([]interface{})), } + if v, ok := d.GetOk("name"); ok { + params.Name = aws.String(v.(string)) + } + + if v, ok := d.GetOk("description"); ok { + params.Description = aws.String(v.(string)) + } + if v, ok := d.GetOk("owner_information"); ok { params.OwnerInformation = aws.String(v.(string)) } @@ -107,6 +131,8 @@ func resourceAwsSsmMaintenanceWindowTargetRead(d *schema.ResourceData, meta inte d.Set("owner_information", t.OwnerInformation) d.Set("window_id", t.WindowId) d.Set("resource_type", t.ResourceType) + d.Set("name", t.Name) + d.Set("description", t.Description) if err := d.Set("targets", flattenAwsSsmTargets(t.Targets)); err != nil { return fmt.Errorf("Error setting targets error: %#v", err) @@ -134,6 +160,14 @@ func resourceAwsSsmMaintenanceWindowTargetUpdate(d *schema.ResourceData, meta in WindowTargetId: aws.String(d.Id()), } + if d.HasChange("name") { + params.Name = aws.String(d.Get("name").(string)) + } + + if d.HasChange("description") { + params.Description = aws.String(d.Get("description").(string)) + } + if d.HasChange("owner_information") { params.OwnerInformation = aws.String(d.Get("owner_information").(string)) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_task.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_task.go index 39aad047b7a..72d7148027d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_task.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_task.go @@ -3,19 +3,26 @@ package aws import ( "fmt" "log" + "regexp" + "sort" + "strings" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { return &schema.Resource{ Create: resourceAwsSsmMaintenanceWindowTaskCreate, Read: resourceAwsSsmMaintenanceWindowTaskRead, + Update: resourceAwsSsmMaintenanceWindowTaskUpdate, Delete: resourceAwsSsmMaintenanceWindowTaskDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsSsmMaintenanceWindowTaskImport, + }, Schema: map[string]*schema.Schema{ "window_id": { @@ -27,13 +34,11 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { "max_concurrency": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "max_errors": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "task_type": { @@ -45,30 +50,25 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { "task_arn": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "service_role_arn": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "targets": { Type: schema.TypeList, Required: true, - ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "values": { Type: schema.TypeList, Required: true, - ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -78,28 +78,26 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { "name": { Type: schema.TypeString, Optional: true, - ForceNew: true, ValidateFunc: validateAwsSSMMaintenanceWindowTaskName, }, "description": { Type: schema.TypeString, Optional: true, - ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 128), }, "priority": { Type: schema.TypeInt, Optional: true, - ForceNew: true, }, "logging_info": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"task_invocation_parameters"}, + Deprecated: "use 'task_invocation_parameters' argument instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "s3_bucket_name": { @@ -119,30 +117,221 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { }, "task_parameters": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, + Type: schema.TypeSet, + Optional: true, + ConflictsWith: []string{"task_invocation_parameters"}, + Deprecated: "use 'task_invocation_parameters' argument instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "values": { Type: schema.TypeList, Required: true, - ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, }, }, }, + + "task_invocation_parameters": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"task_parameters", "logging_info"}, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "automation_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "document_version": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile("([$]LATEST|[$]DEFAULT|^[1-9][0-9]*$)"), "see https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_MaintenanceWindowAutomationParameters.html"), + }, + "parameter": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + + "lambda_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_context": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 8000), + }, + + "payload": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validation.StringLenBetween(0, 4096), + }, + + "qualifier": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + }, + }, + }, + + "run_command_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "comment": { + Type: schema.TypeString, + Optional: true, + }, + + "document_hash": { + Type: schema.TypeString, + Optional: true, + }, + + "document_hash_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + ssm.DocumentHashTypeSha256, + ssm.DocumentHashTypeSha1, + }, false), + }, + + "notification_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "notification_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "notification_events": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "notification_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + ssm.NotificationTypeCommand, + ssm.NotificationTypeInvocation, + }, false), + }, + }, + }, + }, + + "output_s3_bucket": { + Type: schema.TypeString, + Optional: true, + }, + + "output_s3_key_prefix": { + Type: schema.TypeString, + Optional: true, + }, + + "parameter": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + "service_role_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + + "step_functions_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "input": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validation.StringLenBetween(0, 4096), + }, + + "name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 80), + }, + }, + }, + }, + }, + }, + }, }, } } func expandAwsSsmMaintenanceWindowLoggingInfo(config []interface{}) *ssm.LoggingInfo { + if len(config) == 0 || config[0] == nil { + return nil + } loggingConfig := config[0].(map[string]interface{}) @@ -195,6 +384,292 @@ func flattenAwsSsmTaskParameters(taskParameters map[string]*ssm.MaintenanceWindo return result } +func expandAwsSsmTaskInvocationParameters(config []interface{}) *ssm.MaintenanceWindowTaskInvocationParameters { + if len(config) == 0 || config[0] == nil { + return nil + } + + params := &ssm.MaintenanceWindowTaskInvocationParameters{} + for _, v := range config { + paramConfig := v.(map[string]interface{}) + if attr, ok := paramConfig["automation_parameters"]; ok && len(attr.([]interface{})) > 0 && attr.([]interface{})[0] != nil { + params.Automation = expandAwsSsmTaskInvocationAutomationParameters(attr.([]interface{})) + } + if attr, ok := paramConfig["lambda_parameters"]; ok && len(attr.([]interface{})) > 0 && attr.([]interface{})[0] != nil { + params.Lambda = expandAwsSsmTaskInvocationLambdaParameters(attr.([]interface{})) + } + if attr, ok := paramConfig["run_command_parameters"]; ok && len(attr.([]interface{})) > 0 && attr.([]interface{})[0] != nil { + params.RunCommand = expandAwsSsmTaskInvocationRunCommandParameters(attr.([]interface{})) + } + if attr, ok := paramConfig["step_functions_parameters"]; ok && len(attr.([]interface{})) > 0 && attr.([]interface{})[0] != nil { + params.StepFunctions = expandAwsSsmTaskInvocationStepFunctionsParameters(attr.([]interface{})) + } + } + return params +} + +func flattenAwsSsmTaskInvocationParameters(parameters *ssm.MaintenanceWindowTaskInvocationParameters) []interface{} { + result := make(map[string]interface{}) + if parameters.Automation != nil { + result["automation_parameters"] = flattenAwsSsmTaskInvocationAutomationParameters(parameters.Automation) + } + + if parameters.Lambda != nil { + result["lambda_parameters"] = flattenAwsSsmTaskInvocationLambdaParameters(parameters.Lambda) + } + + if parameters.RunCommand != nil { + result["run_command_parameters"] = flattenAwsSsmTaskInvocationRunCommandParameters(parameters.RunCommand) + } + + if parameters.StepFunctions != nil { + result["step_functions_parameters"] = flattenAwsSsmTaskInvocationStepFunctionsParameters(parameters.StepFunctions) + } + + return []interface{}{result} +} + +func expandAwsSsmTaskInvocationAutomationParameters(config []interface{}) *ssm.MaintenanceWindowAutomationParameters { + if len(config) == 0 || config[0] == nil { + return nil + } + + params := &ssm.MaintenanceWindowAutomationParameters{} + configParam := config[0].(map[string]interface{}) + if attr, ok := configParam["document_version"]; ok && len(attr.(string)) != 0 { + params.DocumentVersion = aws.String(attr.(string)) + } + if attr, ok := configParam["parameter"]; ok && len(attr.(*schema.Set).List()) > 0 { + params.Parameters = expandAwsSsmTaskInvocationCommonParameters(attr.(*schema.Set).List()) + } + + return params +} + +func flattenAwsSsmTaskInvocationAutomationParameters(parameters *ssm.MaintenanceWindowAutomationParameters) []interface{} { + result := make(map[string]interface{}) + + if parameters.DocumentVersion != nil { + result["document_version"] = aws.StringValue(parameters.DocumentVersion) + } + if parameters.Parameters != nil { + result["parameter"] = flattenAwsSsmTaskInvocationCommonParameters(parameters.Parameters) + } + + return []interface{}{result} +} + +func expandAwsSsmTaskInvocationLambdaParameters(config []interface{}) *ssm.MaintenanceWindowLambdaParameters { + if len(config) == 0 || config[0] == nil { + return nil + } + + params := &ssm.MaintenanceWindowLambdaParameters{} + configParam := config[0].(map[string]interface{}) + if attr, ok := configParam["client_context"]; ok && len(attr.(string)) != 0 { + params.ClientContext = aws.String(attr.(string)) + } + if attr, ok := configParam["payload"]; ok && len(attr.(string)) != 0 { + params.Payload = []byte(attr.(string)) + } + if attr, ok := configParam["qualifier"]; ok && len(attr.(string)) != 0 { + params.Qualifier = aws.String(attr.(string)) + } + return params +} + +func flattenAwsSsmTaskInvocationLambdaParameters(parameters *ssm.MaintenanceWindowLambdaParameters) []interface{} { + result := make(map[string]interface{}) + + if parameters.ClientContext != nil { + result["client_context"] = aws.StringValue(parameters.ClientContext) + } + if parameters.Payload != nil { + result["payload"] = string(parameters.Payload) + } + if parameters.Qualifier != nil { + result["qualifier"] = aws.StringValue(parameters.Qualifier) + } + return []interface{}{result} +} + +func expandAwsSsmTaskInvocationRunCommandParameters(config []interface{}) *ssm.MaintenanceWindowRunCommandParameters { + if len(config) == 0 || config[0] == nil { + return nil + } + + params := &ssm.MaintenanceWindowRunCommandParameters{} + configParam := config[0].(map[string]interface{}) + if attr, ok := configParam["comment"]; ok && len(attr.(string)) != 0 { + params.Comment = aws.String(attr.(string)) + } + if attr, ok := configParam["document_hash"]; ok && len(attr.(string)) != 0 { + params.DocumentHash = aws.String(attr.(string)) + } + if attr, ok := configParam["document_hash_type"]; ok && len(attr.(string)) != 0 { + params.DocumentHashType = aws.String(attr.(string)) + } + if attr, ok := configParam["notification_config"]; ok && len(attr.([]interface{})) > 0 { + params.NotificationConfig = expandAwsSsmTaskInvocationRunCommandParametersNotificationConfig(attr.([]interface{})) + } + if attr, ok := configParam["output_s3_bucket"]; ok && len(attr.(string)) != 0 { + params.OutputS3BucketName = aws.String(attr.(string)) + } + if attr, ok := configParam["output_s3_key_prefix"]; ok && len(attr.(string)) != 0 { + params.OutputS3KeyPrefix = aws.String(attr.(string)) + } + if attr, ok := configParam["parameter"]; ok && len(attr.(*schema.Set).List()) > 0 { + params.Parameters = expandAwsSsmTaskInvocationCommonParameters(attr.(*schema.Set).List()) + } + if attr, ok := configParam["service_role_arn"]; ok && len(attr.(string)) != 0 { + params.ServiceRoleArn = aws.String(attr.(string)) + } + if attr, ok := configParam["timeout_seconds"]; ok && attr.(int) != 0 { + params.TimeoutSeconds = aws.Int64(int64(attr.(int))) + } + return params +} + +func flattenAwsSsmTaskInvocationRunCommandParameters(parameters *ssm.MaintenanceWindowRunCommandParameters) []interface{} { + result := make(map[string]interface{}) + + if parameters.Comment != nil { + result["comment"] = aws.StringValue(parameters.Comment) + } + if parameters.DocumentHash != nil { + result["document_hash"] = aws.StringValue(parameters.DocumentHash) + } + if parameters.DocumentHashType != nil { + result["document_hash_type"] = aws.StringValue(parameters.DocumentHashType) + } + if parameters.NotificationConfig != nil { + result["notification_config"] = flattenAwsSsmTaskInvocationRunCommandParametersNotificationConfig(parameters.NotificationConfig) + } + if parameters.OutputS3BucketName != nil { + result["output_s3_bucket"] = aws.StringValue(parameters.OutputS3BucketName) + } + if parameters.OutputS3KeyPrefix != nil { + result["output_s3_key_prefix"] = aws.StringValue(parameters.OutputS3KeyPrefix) + } + if parameters.Parameters != nil { + result["parameter"] = flattenAwsSsmTaskInvocationCommonParameters(parameters.Parameters) + } + if parameters.ServiceRoleArn != nil { + result["service_role_arn"] = aws.StringValue(parameters.ServiceRoleArn) + } + if parameters.TimeoutSeconds != nil { + result["timeout_seconds"] = aws.Int64Value(parameters.TimeoutSeconds) + } + + return []interface{}{result} +} + +func expandAwsSsmTaskInvocationStepFunctionsParameters(config []interface{}) *ssm.MaintenanceWindowStepFunctionsParameters { + if len(config) == 0 || config[0] == nil { + return nil + } + + params := &ssm.MaintenanceWindowStepFunctionsParameters{} + configParam := config[0].(map[string]interface{}) + + if attr, ok := configParam["input"]; ok && len(attr.(string)) != 0 { + params.Input = aws.String(attr.(string)) + } + if attr, ok := configParam["name"]; ok && len(attr.(string)) != 0 { + params.Name = aws.String(attr.(string)) + } + + return params +} + +func flattenAwsSsmTaskInvocationStepFunctionsParameters(parameters *ssm.MaintenanceWindowStepFunctionsParameters) []interface{} { + result := make(map[string]interface{}) + + if parameters.Input != nil { + result["input"] = aws.StringValue(parameters.Input) + } + if parameters.Name != nil { + result["name"] = aws.StringValue(parameters.Name) + } + return []interface{}{result} +} + +func expandAwsSsmTaskInvocationRunCommandParametersNotificationConfig(config []interface{}) *ssm.NotificationConfig { + if len(config) == 0 || config[0] == nil { + return nil + } + + params := &ssm.NotificationConfig{} + configParam := config[0].(map[string]interface{}) + + if attr, ok := configParam["notification_arn"]; ok && len(attr.(string)) != 0 { + params.NotificationArn = aws.String(attr.(string)) + } + if attr, ok := configParam["notification_events"]; ok && len(attr.([]interface{})) > 0 { + params.NotificationEvents = expandStringList(attr.([]interface{})) + } + if attr, ok := configParam["notification_type"]; ok && len(attr.(string)) != 0 { + params.NotificationType = aws.String(attr.(string)) + } + + return params +} + +func flattenAwsSsmTaskInvocationRunCommandParametersNotificationConfig(config *ssm.NotificationConfig) []interface{} { + result := make(map[string]interface{}) + + if config.NotificationArn != nil { + result["notification_arn"] = aws.StringValue(config.NotificationArn) + } + if config.NotificationEvents != nil { + result["notification_events"] = flattenStringList(config.NotificationEvents) + } + if config.NotificationType != nil { + result["notification_type"] = aws.StringValue(config.NotificationType) + } + + return []interface{}{result} +} + +func expandAwsSsmTaskInvocationCommonParameters(config []interface{}) map[string][]*string { + if len(config) == 0 || config[0] == nil { + return nil + } + + params := make(map[string][]*string) + + for _, v := range config { + paramConfig := v.(map[string]interface{}) + params[paramConfig["name"].(string)] = expandStringList(paramConfig["values"].([]interface{})) + } + + return params +} + +func flattenAwsSsmTaskInvocationCommonParameters(parameters map[string][]*string) []interface{} { + attributes := make([]interface{}, 0, len(parameters)) + + keys := make([]string, 0, len(parameters)) + for k := range parameters { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, key := range keys { + values := make([]string, 0) + for _, value := range parameters[key] { + values = append(values, aws.StringValue(value)) + } + params := map[string]interface{}{ + "name": key, + "values": values, + } + attributes = append(attributes, params) + } + + return attributes +} + func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta interface{}) error { ssmconn := meta.(*AWSClient).ssmconn @@ -227,7 +702,11 @@ func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta inte } if v, ok := d.GetOk("task_parameters"); ok { - params.TaskParameters = expandAwsSsmTaskParameters(v.([]interface{})) + params.TaskParameters = expandAwsSsmTaskParameters(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("task_invocation_parameters"); ok { + params.TaskInvocationParameters = expandAwsSsmTaskInvocationParameters(v.([]interface{})) } resp, err := ssmconn.RegisterTaskWithMaintenanceWindow(params) @@ -242,56 +721,108 @@ func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta inte func resourceAwsSsmMaintenanceWindowTaskRead(d *schema.ResourceData, meta interface{}) error { ssmconn := meta.(*AWSClient).ssmconn + windowID := d.Get("window_id").(string) - params := &ssm.DescribeMaintenanceWindowTasksInput{ - WindowId: aws.String(d.Get("window_id").(string)), + params := &ssm.GetMaintenanceWindowTaskInput{ + WindowId: aws.String(windowID), + WindowTaskId: aws.String(d.Id()), + } + resp, err := ssmconn.GetMaintenanceWindowTask(params) + if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { + log.Printf("[WARN] Maintenance Window (%s) Task (%s) not found, removing from state", windowID, d.Id()) + d.SetId("") + return nil } - - resp, err := ssmconn.DescribeMaintenanceWindowTasks(params) if err != nil { - return err + return fmt.Errorf("Error getting Maintenance Window (%s) Task (%s): %s", windowID, d.Id(), err) + } + + d.Set("window_id", resp.WindowId) + d.Set("max_concurrency", resp.MaxConcurrency) + d.Set("max_errors", resp.MaxErrors) + d.Set("task_type", resp.TaskType) + d.Set("service_role_arn", resp.ServiceRoleArn) + d.Set("task_arn", resp.TaskArn) + d.Set("priority", resp.Priority) + d.Set("name", resp.Name) + d.Set("description", resp.Description) + + if resp.LoggingInfo != nil { + if err := d.Set("logging_info", flattenAwsSsmMaintenanceWindowLoggingInfo(resp.LoggingInfo)); err != nil { + return fmt.Errorf("Error setting logging_info error: %#v", err) + } + } + + if resp.TaskParameters != nil { + if err := d.Set("task_parameters", flattenAwsSsmTaskParameters(resp.TaskParameters)); err != nil { + return fmt.Errorf("Error setting task_parameters error: %#v", err) + } } - found := false - for _, t := range resp.Tasks { - if *t.WindowTaskId == d.Id() { - found = true - - d.Set("window_id", t.WindowId) - d.Set("max_concurrency", t.MaxConcurrency) - d.Set("max_errors", t.MaxErrors) - d.Set("task_type", t.Type) - d.Set("service_role_arn", t.ServiceRoleArn) - d.Set("task_arn", t.TaskArn) - d.Set("priority", t.Priority) - d.Set("name", t.Name) - d.Set("description", t.Description) - - if t.LoggingInfo != nil { - if err := d.Set("logging_info", flattenAwsSsmMaintenanceWindowLoggingInfo(t.LoggingInfo)); err != nil { - return fmt.Errorf("Error setting logging_info error: %#v", err) - } - } - - if t.TaskParameters != nil { - if err := d.Set("task_parameters", flattenAwsSsmTaskParameters(t.TaskParameters)); err != nil { - return fmt.Errorf("Error setting task_parameters error: %#v", err) - } - } - - if err := d.Set("targets", flattenAwsSsmTargets(t.Targets)); err != nil { - return fmt.Errorf("Error setting targets error: %#v", err) - } + if resp.TaskInvocationParameters != nil { + if err := d.Set("task_invocation_parameters", flattenAwsSsmTaskInvocationParameters(resp.TaskInvocationParameters)); err != nil { + return fmt.Errorf("Error setting task_invocation_parameters error: %#v", err) } } - if !found { - log.Printf("[INFO] Maintenance Window Target not found. Removing from state") + if err := d.Set("targets", flattenAwsSsmTargets(resp.Targets)); err != nil { + return fmt.Errorf("Error setting targets error: %#v", err) + } + + return nil +} + +func resourceAwsSsmMaintenanceWindowTaskUpdate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + windowID := d.Get("window_id").(string) + + params := &ssm.UpdateMaintenanceWindowTaskInput{ + WindowId: aws.String(windowID), + WindowTaskId: aws.String(d.Id()), + MaxConcurrency: aws.String(d.Get("max_concurrency").(string)), + MaxErrors: aws.String(d.Get("max_errors").(string)), + ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), + TaskArn: aws.String(d.Get("task_arn").(string)), + Targets: expandAwsSsmTargets(d.Get("targets").([]interface{})), + Replace: aws.Bool(true), + } + + if v, ok := d.GetOk("name"); ok { + params.Name = aws.String(v.(string)) + } + + if v, ok := d.GetOk("description"); ok { + params.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("priority"); ok { + params.Priority = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("logging_info"); ok { + params.LoggingInfo = expandAwsSsmMaintenanceWindowLoggingInfo(v.([]interface{})) + } + + if v, ok := d.GetOk("task_parameters"); ok { + params.TaskParameters = expandAwsSsmTaskParameters(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("task_invocation_parameters"); ok { + params.TaskInvocationParameters = expandAwsSsmTaskInvocationParameters(v.([]interface{})) + } + + _, err := ssmconn.UpdateMaintenanceWindowTask(params) + if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { + log.Printf("[WARN] Maintenance Window (%s) Task (%s) not found, removing from state", windowID, d.Id()) d.SetId("") return nil } - return nil + if err != nil { + return fmt.Errorf("Error updating Maintenance Window (%s) Task (%s): %s", windowID, d.Id(), err) + } + + return resourceAwsSsmMaintenanceWindowTaskRead(d, meta) } func resourceAwsSsmMaintenanceWindowTaskDelete(d *schema.ResourceData, meta interface{}) error { @@ -305,9 +836,27 @@ func resourceAwsSsmMaintenanceWindowTaskDelete(d *schema.ResourceData, meta inte } _, err := ssmconn.DeregisterTaskFromMaintenanceWindow(params) + if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { + return nil + } if err != nil { return fmt.Errorf("error deregistering SSM Maintenance Window Task (%s): %s", d.Id(), err) } return nil } + +func resourceAwsSsmMaintenanceWindowTaskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + idParts := strings.SplitN(d.Id(), "/", 2) + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return nil, fmt.Errorf("unexpected format of ID (%q), expected /", d.Id()) + } + + windowID := idParts[0] + windowTaskID := idParts[1] + + d.Set("window_id", windowID) + d.SetId(windowTaskID) + + return []*schema.ResourceData{d}, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_parameter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_parameter.go index ddcb146fda1..95ecb8e72d0 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_parameter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_parameter.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsSsmParameter() *schema.Resource { @@ -75,6 +75,10 @@ func resourceAwsSsmParameter() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "version": { + Type: schema.TypeInt, + Computed: true, + }, "tags": tagsSchema(), }, @@ -121,6 +125,7 @@ func resourceAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error d.Set("name", param.Name) d.Set("type", param.Type) d.Set("value", param.Value) + d.Set("version", param.Version) describeParamsInput := &ssm.DescribeParametersInput{ ParameterFilters: []*ssm.ParameterStringFilter{ @@ -145,7 +150,10 @@ func resourceAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error detail := describeResp.Parameters[0] d.Set("key_id", detail.KeyId) d.Set("description", detail.Description) - d.Set("tier", detail.Tier) + d.Set("tier", ssm.ParameterTierStandard) + if detail.Tier != nil { + d.Set("tier", detail.Tier) + } d.Set("allowed_pattern", detail.AllowedPattern) if tagList, err := ssmconn.ListTagsForResource(&ssm.ListTagsForResourceInput{ @@ -209,7 +217,14 @@ func resourceAwsSsmParameterPut(d *schema.ResourceData, meta interface{}) error } log.Printf("[DEBUG] Waiting for SSM Parameter %v to be updated", d.Get("name")) - if _, err := ssmconn.PutParameter(paramInput); err != nil { + _, err := ssmconn.PutParameter(paramInput) + + if isAWSErr(err, "ValidationException", "Tier is not supported") { + paramInput.Tier = nil + _, err = ssmconn.PutParameter(paramInput) + } + + if err != nil { return fmt.Errorf("error creating SSM parameter: %s", err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_baseline.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_baseline.go index 1952dc24ae5..b25929b47b7 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_baseline.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_baseline.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) var ssmPatchComplianceLevels = []string{ diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_group.go index 3489a3c2be8..43d26fc6e10 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_group.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSsmPatchGroup() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_resource_data_sync.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_resource_data_sync.go index aea9ac75fd9..1fd13550605 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_resource_data_sync.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_resource_data_sync.go @@ -5,8 +5,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsSsmResourceDataSync() *schema.Resource { @@ -67,12 +67,12 @@ func resourceAwsSsmResourceDataSync() *schema.Resource { func resourceAwsSsmResourceDataSyncCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ssmconn + input := &ssm.CreateResourceDataSyncInput{ + S3Destination: expandSsmResourceDataSyncS3Destination(d), + SyncName: aws.String(d.Get("name").(string)), + } err := resource.Retry(1*time.Minute, func() *resource.RetryError { - input := &ssm.CreateResourceDataSyncInput{ - S3Destination: expandSsmResourceDataSyncS3Destination(d), - SyncName: aws.String(d.Get("name").(string)), - } _, err := conn.CreateResourceDataSync(input) if err != nil { if isAWSErr(err, ssm.ErrCodeResourceDataSyncInvalidConfigurationException, "S3 write failed for bucket") { @@ -82,6 +82,9 @@ func resourceAwsSsmResourceDataSyncCreate(d *schema.ResourceData, meta interface } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.CreateResourceDataSync(input) + } if err != nil { return err diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_cache.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_cache.go index 9ef088ece51..46d1b635a75 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_cache.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_cache.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/storagegateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsStorageGatewayCache() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_cached_iscsi_volume.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_cached_iscsi_volume.go index ff6a18022fd..23905e78c55 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_cached_iscsi_volume.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_cached_iscsi_volume.go @@ -6,18 +6,19 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/storagegateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsStorageGatewayCachedIscsiVolume() *schema.Resource { return &schema.Resource{ Create: resourceAwsStorageGatewayCachedIscsiVolumeCreate, Read: resourceAwsStorageGatewayCachedIscsiVolumeRead, + Update: resourceAwsStorageGatewayCachedIscsiVolumeUpdate, Delete: resourceAwsStorageGatewayCachedIscsiVolumeDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -85,6 +86,7 @@ func resourceAwsStorageGatewayCachedIscsiVolume() *schema.Resource { Required: true, ForceNew: true, }, + "tags": tagsSchema(), }, } } @@ -98,6 +100,7 @@ func resourceAwsStorageGatewayCachedIscsiVolumeCreate(d *schema.ResourceData, me NetworkInterfaceId: aws.String(d.Get("network_interface_id").(string)), TargetName: aws.String(d.Get("target_name").(string)), VolumeSizeInBytes: aws.Int64(int64(d.Get("volume_size_in_bytes").(int))), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().StoragegatewayTags(), } if v, ok := d.GetOk("snapshot_id"); ok { @@ -119,6 +122,19 @@ func resourceAwsStorageGatewayCachedIscsiVolumeCreate(d *schema.ResourceData, me return resourceAwsStorageGatewayCachedIscsiVolumeRead(d, meta) } +func resourceAwsStorageGatewayCachedIscsiVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).storagegatewayconn + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.StoragegatewayUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + + return resourceAwsStorageGatewayCachedIscsiVolumeRead(d, meta) +} + func resourceAwsStorageGatewayCachedIscsiVolumeRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).storagegatewayconn @@ -146,12 +162,21 @@ func resourceAwsStorageGatewayCachedIscsiVolumeRead(d *schema.ResourceData, meta volume := output.CachediSCSIVolumes[0] - d.Set("arn", aws.StringValue(volume.VolumeARN)) + arn := aws.StringValue(volume.VolumeARN) + d.Set("arn", arn) d.Set("snapshot_id", aws.StringValue(volume.SourceSnapshotId)) - d.Set("volume_arn", aws.StringValue(volume.VolumeARN)) + d.Set("volume_arn", arn) d.Set("volume_id", aws.StringValue(volume.VolumeId)) d.Set("volume_size_in_bytes", int(aws.Int64Value(volume.VolumeSizeInBytes))) + tags, err := keyvaluetags.StoragegatewayListTags(conn, arn) + if err != nil { + return fmt.Errorf("error listing tags for resource (%s): %s", arn, err) + } + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + if volume.VolumeiSCSIAttributes != nil { d.Set("chap_enabled", aws.BoolValue(volume.VolumeiSCSIAttributes.ChapEnabled)) d.Set("lun_number", int(aws.Int64Value(volume.VolumeiSCSIAttributes.LunNumber))) @@ -195,6 +220,12 @@ func resourceAwsStorageGatewayCachedIscsiVolumeDelete(d *schema.ResourceData, me } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteVolume(input) + } + if isAWSErr(err, storagegateway.ErrCodeInvalidGatewayRequestException, "The specified volume was not found") { + return nil + } if err != nil { return fmt.Errorf("error deleting Storage Gateway cached iSCSI volume %q: %s", d.Id(), err) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_gateway.go index f1679468530..6a76a1d6ab2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_gateway.go @@ -9,10 +9,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/storagegateway" - "github.com/hashicorp/terraform/helper/customdiff" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsStorageGatewayGateway() *schema.Resource { @@ -122,6 +123,7 @@ func resourceAwsStorageGatewayGateway() *schema.Resource { "IBM-ULT3580-TD5", }, false), }, + "tags": tagsSchema(), }, } } @@ -153,9 +155,10 @@ func resourceAwsStorageGatewayGatewayCreate(d *schema.ResourceData, meta interfa return fmt.Errorf("error creating HTTP request: %s", err) } + var response *http.Response err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { log.Printf("[DEBUG] Making HTTP request: %s", request.URL.String()) - response, err := client.Do(request) + response, err = client.Do(request) if err != nil { if err, ok := err.(net.Error); ok { errMessage := fmt.Errorf("error making HTTP request: %s", err) @@ -164,24 +167,26 @@ func resourceAwsStorageGatewayGatewayCreate(d *schema.ResourceData, meta interfa } return resource.NonRetryableError(fmt.Errorf("error making HTTP request: %s", err)) } - - log.Printf("[DEBUG] Received HTTP response: %#v", response) - if response.StatusCode != 302 { - return resource.NonRetryableError(fmt.Errorf("expected HTTP status code 302, received: %d", response.StatusCode)) - } - - redirectURL, err := response.Location() - if err != nil { - return resource.NonRetryableError(fmt.Errorf("error extracting HTTP Location header: %s", err)) - } - - activationKey = redirectURL.Query().Get("activationKey") - return nil }) + if isResourceTimeoutError(err) { + response, err = client.Do(request) + } if err != nil { return fmt.Errorf("error retrieving activation key from IP Address (%s): %s", gatewayIpAddress, err) } + + log.Printf("[DEBUG] Received HTTP response: %#v", response) + if response.StatusCode != 302 { + return fmt.Errorf("expected HTTP status code 302, received: %d", response.StatusCode) + } + + redirectURL, err := response.Location() + if err != nil { + return fmt.Errorf("error extracting HTTP Location header: %s", err) + } + + activationKey = redirectURL.Query().Get("activationKey") if activationKey == "" { return fmt.Errorf("empty activationKey received from IP Address: %s", gatewayIpAddress) } @@ -193,6 +198,7 @@ func resourceAwsStorageGatewayGatewayCreate(d *schema.ResourceData, meta interfa GatewayName: aws.String(d.Get("gateway_name").(string)), GatewayTimezone: aws.String(d.Get("gateway_timezone").(string)), GatewayType: aws.String(d.Get("gateway_type").(string)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().StoragegatewayTags(), } if v, ok := d.GetOk("medium_changer_type"); ok { @@ -212,10 +218,11 @@ func resourceAwsStorageGatewayGatewayCreate(d *schema.ResourceData, meta interfa d.SetId(aws.StringValue(output.GatewayARN)) // Gateway activations can take a few minutes + gwInput := &storagegateway.DescribeGatewayInformationInput{ + GatewayARN: aws.String(d.Id()), + } err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { - _, err := conn.DescribeGatewayInformation(&storagegateway.DescribeGatewayInformationInput{ - GatewayARN: aws.String(d.Id()), - }) + _, err := conn.DescribeGatewayInformation(gwInput) if err != nil { if isAWSErr(err, storagegateway.ErrorCodeGatewayNotConnected, "") { return resource.RetryableError(err) @@ -228,6 +235,9 @@ func resourceAwsStorageGatewayGatewayCreate(d *schema.ResourceData, meta interfa return nil }) + if isResourceTimeoutError(err) { + _, err = conn.DescribeGatewayInformation(gwInput) + } if err != nil { return fmt.Errorf("error waiting for Storage Gateway Gateway activation: %s", err) } @@ -283,6 +293,10 @@ func resourceAwsStorageGatewayGatewayRead(d *schema.ResourceData, meta interface return fmt.Errorf("error reading Storage Gateway Gateway: %s", err) } + if err := d.Set("tags", keyvaluetags.StoragegatewayKeyValueTags(output.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + smbSettingsInput := &storagegateway.DescribeSMBSettingsInput{ GatewayARN: aws.String(d.Id()), } @@ -378,6 +392,13 @@ func resourceAwsStorageGatewayGatewayUpdate(d *schema.ResourceData, meta interfa } } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.StoragegatewayUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + if d.HasChange("smb_active_directory_settings") { l := d.Get("smb_active_directory_settings").([]interface{}) m := l[0].(map[string]interface{}) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_nfs_file_share.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_nfs_file_share.go index 4e80b00454e..289b1c2bd1d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_nfs_file_share.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_nfs_file_share.go @@ -7,9 +7,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/storagegateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsStorageGatewayNfsFileShare() *schema.Resource { @@ -151,6 +152,7 @@ func resourceAwsStorageGatewayNfsFileShare() *schema.Resource { "RootSquash", }, false), }, + "tags": tagsSchema(), }, } } @@ -172,6 +174,7 @@ func resourceAwsStorageGatewayNfsFileShareCreate(d *schema.ResourceData, meta in RequesterPays: aws.Bool(d.Get("requester_pays").(bool)), Role: aws.String(d.Get("role_arn").(string)), Squash: aws.String(d.Get("squash").(string)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().StoragegatewayTags(), } if v, ok := d.GetOk("kms_key_arn"); ok && v.(string) != "" { @@ -228,7 +231,8 @@ func resourceAwsStorageGatewayNfsFileShareRead(d *schema.ResourceData, meta inte fileshare := output.NFSFileShareInfoList[0] - d.Set("arn", fileshare.FileShareARN) + arn := fileshare.FileShareARN + d.Set("arn", arn) if err := d.Set("client_list", schema.NewSet(schema.HashString, flattenStringList(fileshare.ClientList))); err != nil { return fmt.Errorf("error setting client_list: %s", err) @@ -253,12 +257,27 @@ func resourceAwsStorageGatewayNfsFileShareRead(d *schema.ResourceData, meta inte d.Set("role_arn", fileshare.Role) d.Set("squash", fileshare.Squash) + tags, err := keyvaluetags.StoragegatewayListTags(conn, *arn) + if err != nil { + return fmt.Errorf("error listing tags for resource (%s): %s", *arn, err) + } + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } func resourceAwsStorageGatewayNfsFileShareUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).storagegatewayconn + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.StoragegatewayUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + input := &storagegateway.UpdateNFSFileShareInput{ ClientList: expandStringSet(d.Get("client_list").(*schema.Set)), DefaultStorageClass: aws.String(d.Get("default_storage_class").(string)), diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_smb_file_share.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_smb_file_share.go index 1da867bbb59..d4512b156fc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_smb_file_share.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_smb_file_share.go @@ -7,9 +7,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/storagegateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsStorageGatewaySmbFileShare() *schema.Resource { @@ -123,6 +124,7 @@ func resourceAwsStorageGatewaySmbFileShare() *schema.Resource { Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "tags": tagsSchema(), }, } } @@ -144,6 +146,7 @@ func resourceAwsStorageGatewaySmbFileShareCreate(d *schema.ResourceData, meta in RequesterPays: aws.Bool(d.Get("requester_pays").(bool)), Role: aws.String(d.Get("role_arn").(string)), ValidUserList: expandStringSet(d.Get("valid_user_list").(*schema.Set)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().StoragegatewayTags(), } if v, ok := d.GetOk("kms_key_arn"); ok && v.(string) != "" { @@ -200,7 +203,8 @@ func resourceAwsStorageGatewaySmbFileShareRead(d *schema.ResourceData, meta inte fileshare := output.SMBFileShareInfoList[0] - d.Set("arn", fileshare.FileShareARN) + arn := fileshare.FileShareARN + d.Set("arn", arn) d.Set("authentication", fileshare.Authentication) d.Set("default_storage_class", fileshare.DefaultStorageClass) d.Set("fileshare_id", fileshare.FileShareId) @@ -224,12 +228,27 @@ func resourceAwsStorageGatewaySmbFileShareRead(d *schema.ResourceData, meta inte return fmt.Errorf("error setting valid_user_list: %s", err) } + tags, err := keyvaluetags.StoragegatewayListTags(conn, *arn) + if err != nil { + return fmt.Errorf("error listing tags for resource (%s): %s", *arn, err) + } + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } func resourceAwsStorageGatewaySmbFileShareUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).storagegatewayconn + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.StoragegatewayUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + input := &storagegateway.UpdateSMBFileShareInput{ DefaultStorageClass: aws.String(d.Get("default_storage_class").(string)), FileShareARN: aws.String(d.Id()), diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_upload_buffer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_upload_buffer.go index 4864c3f6567..cd86607034b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_upload_buffer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_upload_buffer.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/storagegateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsStorageGatewayUploadBuffer() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_working_storage.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_working_storage.go index 52c5787c3c5..72d520d696f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_working_storage.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_storagegateway_working_storage.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/storagegateway" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsStorageGatewayWorkingStorage() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet.go index 9941b75eb38..4615e89eee2 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet.go @@ -8,8 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSubnet() *schema.Resource { @@ -24,7 +25,7 @@ func resourceAwsSubnet() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), }, SchemaVersion: 1, @@ -129,7 +130,7 @@ func resourceAwsSubnetCreate(d *schema.ResourceData, meta interface{}) error { Pending: []string{"pending"}, Target: []string{"available"}, Refresh: SubnetStateRefreshFunc(conn, *subnet.SubnetId), - Timeout: 10 * time.Minute, + Timeout: d.Timeout(schema.TimeoutCreate), } _, err = stateConf.WaitForState() @@ -140,11 +141,75 @@ func resourceAwsSubnetCreate(d *schema.ResourceData, meta interface{}) error { d.Id(), err) } - return resourceAwsSubnetUpdate(d, meta) + if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { + // Handle EC2 eventual consistency on creation + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), nil, v) + + if isAWSErr(err, "InvalidSubnetID.NotFound", "") { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + err = keyvaluetags.Ec2UpdateTags(conn, d.Id(), nil, v) + } + + if err != nil { + return fmt.Errorf("error adding tags: %s", err) + } + + d.SetPartial("tags") + } + + // You cannot modify multiple subnet attributes in the same request. + // Reference: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySubnetAttribute.html + + if d.Get("assign_ipv6_address_on_creation").(bool) { + input := &ec2.ModifySubnetAttributeInput{ + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String(d.Id()), + } + + if _, err := conn.ModifySubnetAttribute(input); err != nil { + return fmt.Errorf("error enabling EC2 Subnet (%s) assign IPv6 address on creation: %s", d.Id(), err) + } + + d.SetPartial("assign_ipv6_address_on_creation") + } + + if d.Get("map_public_ip_on_launch").(bool) { + input := &ec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String(d.Id()), + } + + if _, err := conn.ModifySubnetAttribute(input); err != nil { + return fmt.Errorf("error enabling EC2 Subnet (%s) map public IP on launch: %s", d.Id(), err) + } + + d.SetPartial("map_public_ip_on_launch") + } + + d.Partial(false) + + return resourceAwsSubnetRead(d, meta) } func resourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn + ignoreTags := meta.(*AWSClient).ignoreTags + ignoreTagPrefixes := meta.(*AWSClient).ignoreTagPrefixes resp, err := conn.DescribeSubnets(&ec2.DescribeSubnetsInput{ SubnetIds: []*string{aws.String(d.Id())}, @@ -184,7 +249,11 @@ func resourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { } d.Set("arn", subnet.SubnetArn) - d.Set("tags", tagsToMap(subnet.Tags)) + + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(subnet.Tags).IgnoreAws().IgnorePrefixes(ignoreTagPrefixes).Ignore(ignoreTags).Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + d.Set("owner_id", subnet.OwnerId) return nil @@ -195,9 +264,13 @@ func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { d.Partial(true) - if err := setTags(conn, d); err != nil { - return err - } else { + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating EC2 Subnet (%s) tags: %s", d.Id(), err) + } + d.SetPartial("tags") } @@ -220,9 +293,7 @@ func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { } } - // We have to be careful here to not go through a change of association if this is a new resource - // A New resource here would denote that the Update func is called by the Create func - if d.HasChange("ipv6_cidr_block") && !d.IsNewResource() { + if d.HasChange("ipv6_cidr_block") { // We need to handle that we disassociate the IPv6 CIDR block before we try and associate the new one // This could be an issue as, we could error out when we try and add the new one // We may need to roll back the state and reattach the old one if this is the case @@ -319,8 +390,8 @@ func resourceAwsSubnetDelete(d *schema.ResourceData, meta interface{}) error { log.Printf("[INFO] Deleting subnet: %s", d.Id()) - if err := deleteLingeringLambdaENIs(conn, d, "subnet-id"); err != nil { - return fmt.Errorf("Failed to delete Lambda ENIs: %s", err) + if err := deleteLingeringLambdaENIs(conn, "subnet-id", d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error deleting Lambda ENIs using subnet (%s): %s", d.Id(), err) } req := &ec2.DeleteSubnetInput{ @@ -330,7 +401,7 @@ func resourceAwsSubnetDelete(d *schema.ResourceData, meta interface{}) error { wait := resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"destroyed"}, - Timeout: 10 * time.Minute, + Timeout: d.Timeout(schema.TimeoutDelete), MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { _, err := conn.DeleteSubnet(req) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet_migrate.go index 0e0f19cf693..58c76941e1f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsSubnetMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_swf_domain.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_swf_domain.go index 8668ec108fd..8e35b96af8b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_swf_domain.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_swf_domain.go @@ -7,14 +7,16 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/swf" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSwfDomain() *schema.Resource { return &schema.Resource{ Create: resourceAwsSwfDomainCreate, Read: resourceAwsSwfDomainRead, + Update: resourceAwsSwfDomainUpdate, Delete: resourceAwsSwfDomainDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -52,6 +54,11 @@ func resourceAwsSwfDomain() *schema.Resource { return }, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), }, } } @@ -72,6 +79,7 @@ func resourceAwsSwfDomainCreate(d *schema.ResourceData, meta interface{}) error input := &swf.RegisterDomainInput{ Name: aws.String(name), WorkflowExecutionRetentionPeriodInDays: aws.String(d.Get("workflow_execution_retention_period_in_days").(string)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().SwfTags(), } if v, ok := d.GetOk("description"); ok { @@ -111,6 +119,18 @@ func resourceAwsSwfDomainRead(d *schema.ResourceData, meta interface{}) error { return nil } + arn := *resp.DomainInfo.Arn + tags, err := keyvaluetags.SwfListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for SWF Domain (%s): %s", arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + d.Set("arn", resp.DomainInfo.Arn) d.Set("name", resp.DomainInfo.Name) d.Set("description", resp.DomainInfo.Description) d.Set("workflow_execution_retention_period_in_days", resp.Configuration.WorkflowExecutionRetentionPeriodInDays) @@ -118,6 +138,20 @@ func resourceAwsSwfDomainRead(d *schema.ResourceData, meta interface{}) error { return nil } +func resourceAwsSwfDomainUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).swfconn + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SwfUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating SWF Domain (%s) tags: %s", d.Id(), err) + } + } + + return resourceAwsSwfDomainRead(d, meta) +} + func resourceAwsSwfDomainDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).swfconn diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_server.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_server.go index fa6b1580f43..f2db50b259d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_server.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_server.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/transfer" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsTransferServer() *schema.Resource { @@ -292,7 +292,7 @@ func waitForTransferServerDeletion(conn *transfer.Transfer, serverID string) err ServerId: aws.String(serverID), } - return resource.Retry(10*time.Minute, func() *resource.RetryError { + err := resource.Retry(10*time.Minute, func() *resource.RetryError { _, err := conn.DescribeServer(params) if isAWSErr(err, transfer.ErrCodeResourceNotFoundException, "") { @@ -305,6 +305,19 @@ func waitForTransferServerDeletion(conn *transfer.Transfer, serverID string) err return resource.RetryableError(fmt.Errorf("Transfer Server (%s) still exists", serverID)) }) + if isResourceTimeoutError(err) { + _, err = conn.DescribeServer(params) + if isAWSErr(err, transfer.ErrCodeResourceNotFoundException, "") { + return nil + } + if err == nil { + return fmt.Errorf("Transfer server (%s) still exists", serverID) + } + } + if err != nil { + return fmt.Errorf("Error waiting for transfer server deletion: %s", err) + } + return nil } func deleteTransferUsers(conn *transfer.Transfer, serverID string, nextToken *string) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_ssh_key.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_ssh_key.go index 332916c89c7..1d217fb104d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_ssh_key.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_ssh_key.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/transfer" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsTransferSshKey() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_user.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_user.go index dddcb86d9d9..cd9df67f417 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_user.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_transfer_user.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/transfer" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsTransferUser() *schema.Resource { @@ -231,7 +231,7 @@ func waitForTransferUserDeletion(conn *transfer.Transfer, serverID, userName str UserName: aws.String(userName), } - return resource.Retry(10*time.Minute, func() *resource.RetryError { + err := resource.Retry(10*time.Minute, func() *resource.RetryError { _, err := conn.DescribeUser(params) if isAWSErr(err, transfer.ErrCodeResourceNotFoundException, "") { @@ -244,4 +244,15 @@ func waitForTransferUserDeletion(conn *transfer.Transfer, serverID, userName str return resource.RetryableError(fmt.Errorf("Transfer User (%s) for Server (%s) still exists", userName, serverID)) }) + + if isResourceTimeoutError(err) { + _, err = conn.DescribeUser(params) + } + if isAWSErr(err, transfer.ErrCodeResourceNotFoundException, "") { + return nil + } + if err != nil { + return fmt.Errorf("Error decoding transfer user ID: %s", err) + } + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_volume_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_volume_attachment.go index cd4ee44c1ad..b39720d3ba1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_volume_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_volume_attachment.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVolumeAttachment() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc.go index 5e07be0af68..771c0d2abdb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc.go @@ -9,9 +9,10 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsVpc() *schema.Resource { @@ -171,12 +172,102 @@ func resourceAwsVpcCreate(d *schema.ResourceData, meta interface{}) error { } } - // Update our attributes and return - return resourceAwsVpcUpdate(d, meta) + // You cannot modify the DNS resolution and DNS hostnames attributes in the same request. Use separate requests for each attribute. + // Reference: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyVpcAttribute.html + + if d.Get("enable_dns_hostnames").(bool) { + input := &ec2.ModifyVpcAttributeInput{ + EnableDnsHostnames: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + VpcId: aws.String(d.Id()), + } + + if _, err := conn.ModifyVpcAttribute(input); err != nil { + return fmt.Errorf("error enabling VPC (%s) DNS hostnames: %s", d.Id(), err) + } + + d.SetPartial("enable_dns_hostnames") + } + + // By default, only the enableDnsSupport attribute is set to true in a VPC created any other way. + // Reference: https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-support + + if !d.Get("enable_dns_support").(bool) { + input := &ec2.ModifyVpcAttributeInput{ + EnableDnsSupport: &ec2.AttributeBooleanValue{ + Value: aws.Bool(false), + }, + VpcId: aws.String(d.Id()), + } + + if _, err := conn.ModifyVpcAttribute(input); err != nil { + return fmt.Errorf("error disabling VPC (%s) DNS support: %s", d.Id(), err) + } + + d.SetPartial("enable_dns_support") + } + + if d.Get("enable_classiclink").(bool) { + input := &ec2.EnableVpcClassicLinkInput{ + VpcId: aws.String(d.Id()), + } + + if _, err := conn.EnableVpcClassicLink(input); err != nil { + return fmt.Errorf("error enabling VPC (%s) ClassicLink: %s", d.Id(), err) + } + + d.SetPartial("enable_classiclink") + } + + if d.Get("enable_classiclink_dns_support").(bool) { + input := &ec2.EnableVpcClassicLinkDnsSupportInput{ + VpcId: aws.String(d.Id()), + } + + if _, err := conn.EnableVpcClassicLinkDnsSupport(input); err != nil { + return fmt.Errorf("error enabling VPC (%s) ClassicLink DNS support: %s", d.Id(), err) + } + + d.SetPartial("enable_classiclink_dns_support") + } + + if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { + // Handle EC2 eventual consistency on creation + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), nil, v) + + if isAWSErr(err, "InvalidVpcID.NotFound", "") { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + err = keyvaluetags.Ec2UpdateTags(conn, d.Id(), nil, v) + } + + if err != nil { + return fmt.Errorf("error adding tags: %s", err) + } + + d.SetPartial("tags") + } + + d.Partial(false) + + return resourceAwsVpcRead(d, meta) } func resourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn + ignoreTags := meta.(*AWSClient).ignoreTags + ignoreTagPrefixes := meta.(*AWSClient).ignoreTagPrefixes // Refresh the VPC state vpcRaw, _, err := VPCStateRefreshFunc(conn, d.Id())() @@ -205,8 +296,9 @@ func resourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { }.String() d.Set("arn", arn) - // Tags - d.Set("tags", tagsToMap(vpc.Tags)) + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(vpc.Tags).IgnoreAws().IgnorePrefixes(ignoreTagPrefixes).Ignore(ignoreTags).Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } d.Set("owner_id", vpc.OwnerId) @@ -404,7 +496,7 @@ func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error { d.SetPartial("enable_classiclink_dns_support") } - if d.HasChange("assign_generated_ipv6_cidr_block") && !d.IsNewResource() { + if d.HasChange("assign_generated_ipv6_cidr_block") { toAssign := d.Get("assign_generated_ipv6_cidr_block").(bool) log.Printf("[INFO] Modifying assign_generated_ipv6_cidr_block to %#v", toAssign) @@ -445,7 +537,7 @@ func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error { d.SetPartial("assign_generated_ipv6_cidr_block") } - if d.HasChange("instance_tenancy") && !d.IsNewResource() { + if d.HasChange("instance_tenancy") { modifyOpts := &ec2.ModifyVpcTenancyInput{ VpcId: aws.String(vpcid), InstanceTenancy: aws.String(d.Get("instance_tenancy").(string)), @@ -460,9 +552,13 @@ func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error { d.SetPartial("instance_tenancy") } - if err := setTags(conn, d); err != nil { - return err - } else { + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + d.SetPartial("tags") } @@ -478,26 +574,31 @@ func resourceAwsVpcDelete(d *schema.ResourceData, meta interface{}) error { } log.Printf("[INFO] Deleting VPC: %s", d.Id()) - return resource.Retry(5*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.DeleteVpc(deleteVpcOpts) if err == nil { return nil } - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - - switch ec2err.Code() { - case "InvalidVpcID.NotFound": + if isAWSErr(err, "InvalidVpcID.NotFound", "") { return nil - case "DependencyViolation": + } + if isAWSErr(err, "DependencyViolation", "") { return resource.RetryableError(err) } - return resource.NonRetryableError(fmt.Errorf("Error deleting VPC: %s", err)) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteVpc(deleteVpcOpts) + if isAWSErr(err, "InvalidVpcID.NotFound", "") { + return nil + } + } + + if err != nil { + return fmt.Errorf("Error deleting VPC: %s", err) + } + return nil } func resourceAwsVpcCustomizeDiff(diff *schema.ResourceDiff, v interface{}) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options.go index c4baa040644..66656ac470e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcDhcpOptions() *schema.Resource { @@ -196,7 +196,7 @@ func resourceAwsVpcDhcpOptionsUpdate(d *schema.ResourceData, meta interface{}) e func resourceAwsVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - return resource.Retry(3*time.Minute, func() *resource.RetryError { + err := resource.Retry(3*time.Minute, func() *resource.RetryError { log.Printf("[INFO] Deleting DHCP Options ID %s...", d.Id()) _, err := conn.DeleteDhcpOptions(&ec2.DeleteDhcpOptionsInput{ DhcpOptionsId: aws.String(d.Id()), @@ -239,6 +239,13 @@ func resourceAwsVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) e return resource.NonRetryableError(err) } }) + + if isResourceTimeoutError(err) { + _, err = conn.DeleteDhcpOptions(&ec2.DeleteDhcpOptionsInput{ + DhcpOptionsId: aws.String(d.Id()), + }) + } + return err } func findVPCsByDHCPOptionsID(conn *ec2.EC2, id string) ([]*ec2.Vpc, error) { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options_association.go index 6970aa93fca..312508bbe6e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options_association.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcDhcpOptionsAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint.go index aa5c4ec36c5..55e298aecd4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint.go @@ -8,10 +8,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsVpcEndpoint() *schema.Resource { @@ -25,25 +25,40 @@ func resourceAwsVpcEndpoint() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "vpc_endpoint_type": { - Type: schema.TypeString, + "auto_accept": { + Type: schema.TypeBool, Optional: true, - ForceNew: true, - Default: ec2.VpcEndpointTypeGateway, - ValidateFunc: validation.StringInSlice([]string{ - ec2.VpcEndpointTypeGateway, - ec2.VpcEndpointTypeInterface, - }, false), }, - "service_name": { + "cidr_blocks": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "dns_entry": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "hosted_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "network_interface_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "owner_id": { Type: schema.TypeString, - Required: true, - ForceNew: true, + Computed: true, }, "policy": { Type: schema.TypeString, @@ -56,14 +71,20 @@ func resourceAwsVpcEndpoint() *schema.Resource { return json }, }, - "route_table_ids": { - Type: schema.TypeSet, + "prefix_list_id": { + Type: schema.TypeString, + Computed: true, + }, + "private_dns_enabled": { + Type: schema.TypeBool, Optional: true, + Default: false, + }, + "requester_managed": { + Type: schema.TypeBool, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, - "subnet_ids": { + "route_table_ids": { Type: schema.TypeSet, Optional: true, Computed: true, @@ -77,49 +98,37 @@ func resourceAwsVpcEndpoint() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "private_dns_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "state": { + "service_name": { Type: schema.TypeString, - Computed: true, + Required: true, + ForceNew: true, }, - "prefix_list_id": { + "state": { Type: schema.TypeString, Computed: true, }, - "cidr_blocks": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "network_interface_ids": { + "subnet_ids": { Type: schema.TypeSet, + Optional: true, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "dns_entry": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dns_name": { - Type: schema.TypeString, - Computed: true, - }, - "hosted_zone_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "auto_accept": { - Type: schema.TypeBool, + "tags": tagsSchema(), + "vpc_endpoint_type": { + Type: schema.TypeString, Optional: true, + ForceNew: true, + Default: ec2.VpcEndpointTypeGateway, + ValidateFunc: validation.StringInSlice([]string{ + ec2.VpcEndpointTypeGateway, + ec2.VpcEndpointTypeInterface, + }, false), + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, }, }, @@ -177,15 +186,19 @@ func resourceAwsVpcEndpointCreate(d *schema.ResourceData, meta interface{}) erro return err } + if err := setTags(conn, d); err != nil { + return err + } + return resourceAwsVpcEndpointRead(d, meta) } func resourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - vpce, state, err := vpcEndpointStateRefresh(conn, d.Id())() + vpceRaw, state, err := vpcEndpointStateRefresh(conn, d.Id())() if err != nil && state != "failed" { - return fmt.Errorf("Error reading VPC Endpoint: %s", err) + return fmt.Errorf("error reading VPC Endpoint (%s): %s", d.Id(), err) } terminalStates := map[string]bool{ @@ -201,7 +214,75 @@ func resourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) error return nil } - return vpcEndpointAttributes(d, vpce.(*ec2.VpcEndpoint), conn) + vpce := vpceRaw.(*ec2.VpcEndpoint) + + serviceName := aws.StringValue(vpce.ServiceName) + d.Set("service_name", serviceName) + d.Set("state", vpce.State) + d.Set("vpc_id", vpce.VpcId) + + respPl, err := conn.DescribePrefixLists(&ec2.DescribePrefixListsInput{ + Filters: buildEC2AttributeFilterList(map[string]string{ + "prefix-list-name": serviceName, + }), + }) + if err != nil { + return fmt.Errorf("error reading Prefix List (%s): %s", serviceName, err) + } + if respPl == nil || len(respPl.PrefixLists) == 0 { + d.Set("cidr_blocks", []interface{}{}) + } else if len(respPl.PrefixLists) > 1 { + return fmt.Errorf("multiple prefix lists associated with the service name '%s'. Unexpected", serviceName) + } else { + pl := respPl.PrefixLists[0] + + d.Set("prefix_list_id", pl.PrefixListId) + err = d.Set("cidr_blocks", flattenStringList(pl.Cidrs)) + if err != nil { + return fmt.Errorf("error setting cidr_blocks: %s", err) + } + } + + err = d.Set("dns_entry", flattenVpcEndpointDnsEntries(vpce.DnsEntries)) + if err != nil { + return fmt.Errorf("error setting dns_entry: %s", err) + } + err = d.Set("network_interface_ids", flattenStringSet(vpce.NetworkInterfaceIds)) + if err != nil { + return fmt.Errorf("error setting network_interface_ids: %s", err) + } + d.Set("owner_id", vpce.OwnerId) + policy, err := structure.NormalizeJsonString(aws.StringValue(vpce.PolicyDocument)) + if err != nil { + return fmt.Errorf("policy contains an invalid JSON: %s", err) + } + d.Set("policy", policy) + d.Set("private_dns_enabled", vpce.PrivateDnsEnabled) + err = d.Set("route_table_ids", flattenStringSet(vpce.RouteTableIds)) + if err != nil { + return fmt.Errorf("error setting route_table_ids: %s", err) + } + d.Set("requester_managed", vpce.RequesterManaged) + err = d.Set("security_group_ids", flattenVpcEndpointSecurityGroupIds(vpce.Groups)) + if err != nil { + return fmt.Errorf("error setting security_group_ids: %s", err) + } + err = d.Set("subnet_ids", flattenStringSet(vpce.SubnetIds)) + if err != nil { + return fmt.Errorf("error setting subnet_ids: %s", err) + } + err = d.Set("tags", tagsToMap(vpce.Tags)) + if err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + // VPC endpoints don't have types in GovCloud, so set type to default if empty + if vpceType := aws.StringValue(vpce.VpcEndpointType); vpceType == "" { + d.Set("vpc_endpoint_type", ec2.VpcEndpointTypeGateway) + } else { + d.Set("vpc_endpoint_type", vpceType) + } + + return nil } func resourceAwsVpcEndpointUpdate(d *schema.ResourceData, meta interface{}) error { @@ -247,6 +328,10 @@ func resourceAwsVpcEndpointUpdate(d *schema.ResourceData, meta interface{}) erro return err } + if err := setTags(conn, d); err != nil { + return err + } + return resourceAwsVpcEndpointRead(d, meta) } @@ -367,72 +452,6 @@ func vpcEndpointWaitUntilDeleted(conn *ec2.EC2, vpceId string, timeout time.Dura return err } -func vpcEndpointAttributes(d *schema.ResourceData, vpce *ec2.VpcEndpoint, conn *ec2.EC2) error { - d.Set("state", vpce.State) - d.Set("vpc_id", vpce.VpcId) - - serviceName := aws.StringValue(vpce.ServiceName) - d.Set("service_name", serviceName) - // VPC endpoints don't have types in GovCloud, so set type to default if empty - if aws.StringValue(vpce.VpcEndpointType) == "" { - d.Set("vpc_endpoint_type", ec2.VpcEndpointTypeGateway) - } else { - d.Set("vpc_endpoint_type", vpce.VpcEndpointType) - } - - policy, err := structure.NormalizeJsonString(aws.StringValue(vpce.PolicyDocument)) - if err != nil { - return fmt.Errorf("policy contains an invalid JSON: %s", err) - } - d.Set("policy", policy) - - d.Set("route_table_ids", flattenStringList(vpce.RouteTableIds)) - - req := &ec2.DescribePrefixListsInput{} - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "prefix-list-name": serviceName, - }, - ) - resp, err := conn.DescribePrefixLists(req) - if err != nil { - return err - } - if resp != nil && len(resp.PrefixLists) > 0 { - if len(resp.PrefixLists) > 1 { - return fmt.Errorf("multiple prefix lists associated with the service name '%s'. Unexpected", serviceName) - } - - pl := resp.PrefixLists[0] - d.Set("prefix_list_id", pl.PrefixListId) - d.Set("cidr_blocks", flattenStringList(pl.Cidrs)) - } else { - d.Set("cidr_blocks", make([]string, 0)) - } - - d.Set("subnet_ids", flattenStringList(vpce.SubnetIds)) - d.Set("network_interface_ids", flattenStringList(vpce.NetworkInterfaceIds)) - - sgIds := make([]interface{}, 0, len(vpce.Groups)) - for _, group := range vpce.Groups { - sgIds = append(sgIds, aws.StringValue(group.GroupId)) - } - d.Set("security_group_ids", sgIds) - - d.Set("private_dns_enabled", vpce.PrivateDnsEnabled) - - dnsEntries := make([]interface{}, len(vpce.DnsEntries)) - for i, entry := range vpce.DnsEntries { - m := make(map[string]interface{}) - m["dns_name"] = aws.StringValue(entry.DnsName) - m["hosted_zone_id"] = aws.StringValue(entry.HostedZoneId) - dnsEntries[i] = m - } - d.Set("dns_entry", dnsEntries) - - return nil -} - func setVpcEndpointCreateList(d *schema.ResourceData, key string, c *[]*string) { if v, ok := d.GetOk(key); ok { list := v.(*schema.Set).List() @@ -459,3 +478,26 @@ func setVpcEndpointUpdateLists(d *schema.ResourceData, key string, a, r *[]*stri } } } + +func flattenVpcEndpointDnsEntries(dnsEntries []*ec2.DnsEntry) []interface{} { + vDnsEntries := []interface{}{} + + for _, dnsEntry := range dnsEntries { + vDnsEntries = append(vDnsEntries, map[string]interface{}{ + "dns_name": aws.StringValue(dnsEntry.DnsName), + "hosted_zone_id": aws.StringValue(dnsEntry.HostedZoneId), + }) + } + + return vDnsEntries +} + +func flattenVpcEndpointSecurityGroupIds(groups []*ec2.SecurityGroupIdentifier) *schema.Set { + vSecurityGroupIds := []interface{}{} + + for _, group := range groups { + vSecurityGroupIds = append(vSecurityGroupIds, aws.StringValue(group.GroupId)) + } + + return schema.NewSet(schema.HashString, vSecurityGroupIds) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_connection_notification.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_connection_notification.go index be6f990357a..171c10f5ec4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_connection_notification.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_connection_notification.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcEndpointConnectionNotification() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_route_table_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_route_table_association.go index 60a80bf0640..448848ddfcb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_route_table_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_route_table_association.go @@ -3,12 +3,13 @@ package aws import ( "fmt" "log" + "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcEndpointRouteTableAssociation() *schema.Resource { @@ -17,7 +18,7 @@ func resourceAwsVpcEndpointRouteTableAssociation() *schema.Resource { Read: resourceAwsVpcEndpointRouteTableAssociationRead, Delete: resourceAwsVpcEndpointRouteTableAssociationDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceAwsVpcEndpointRouteTableAssociationImport, }, Schema: map[string]*schema.Schema{ @@ -123,6 +124,23 @@ func resourceAwsVpcEndpointRouteTableAssociationDelete(d *schema.ResourceData, m return nil } +func resourceAwsVpcEndpointRouteTableAssociationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 2 { + return nil, fmt.Errorf("Wrong format of resource: %s. Please follow 'vpc-endpoint-id/route-table-id'", d.Id()) + } + + vpceId := parts[0] + rtId := parts[1] + log.Printf("[DEBUG] Importing VPC Endpoint (%s) Route Table (%s) association", vpceId, rtId) + + d.SetId(vpcEndpointIdRouteTableIdHash(vpceId, rtId)) + d.Set("vpc_endpoint_id", vpceId) + d.Set("route_table_id", rtId) + + return []*schema.ResourceData{d}, nil +} + func findResourceVpcEndpoint(conn *ec2.EC2, id string) (*ec2.VpcEndpoint, error) { resp, err := conn.DescribeVpcEndpoints(&ec2.DescribeVpcEndpointsInput{ VpcEndpointIds: aws.StringSlice([]string{id}), diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_service.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_service.go index 0fbaa832052..d3c89ffdfcf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_service.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_service.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcEndpointService() *schema.Resource { @@ -27,13 +27,6 @@ func resourceAwsVpcEndpointService() *schema.Resource { Type: schema.TypeBool, Required: true, }, - "network_load_balancer_arns": { - Type: schema.TypeSet, - Required: true, - MinItems: 1, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, "allowed_principals": { Type: schema.TypeSet, Optional: true, @@ -41,34 +34,46 @@ func resourceAwsVpcEndpointService() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "state": { - Type: schema.TypeString, + "availability_zones": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, Computed: true, + Set: schema.HashString, }, - "service_name": { - Type: schema.TypeString, + "base_endpoint_dns_names": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, Computed: true, + Set: schema.HashString, }, - "service_type": { - Type: schema.TypeString, + "manages_vpc_endpoints": { + Type: schema.TypeBool, Computed: true, }, - "availability_zones": { + "network_load_balancer_arns": { Type: schema.TypeSet, + Required: true, + MinItems: 1, Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, Set: schema.HashString, }, "private_dns_name": { Type: schema.TypeString, Computed: true, }, - "base_endpoint_dns_names": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, + "service_name": { + Type: schema.TypeString, + Computed: true, + }, + "service_type": { + Type: schema.TypeString, Computed: true, - Set: schema.HashString, }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), }, } } @@ -99,9 +104,9 @@ func resourceAwsVpcEndpointServiceCreate(d *schema.ResourceData, meta interface{ func resourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - svcCfg, state, err := vpcEndpointServiceStateRefresh(conn, d.Id())() + svcCfgRaw, state, err := vpcEndpointServiceStateRefresh(conn, d.Id())() if err != nil && state != ec2.ServiceStateFailed { - return fmt.Errorf("Error reading VPC Endpoint Service: %s", err.Error()) + return fmt.Errorf("error reading VPC Endpoint Service (%s): %s", d.Id(), err.Error()) } terminalStates := map[string]bool{ @@ -115,7 +120,43 @@ func resourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{}) return nil } - return vpcEndpointServiceAttributes(d, svcCfg.(*ec2.ServiceConfiguration), conn) + svcCfg := svcCfgRaw.(*ec2.ServiceConfiguration) + d.Set("acceptance_required", svcCfg.AcceptanceRequired) + err = d.Set("network_load_balancer_arns", flattenStringSet(svcCfg.NetworkLoadBalancerArns)) + if err != nil { + return fmt.Errorf("error setting network_load_balancer_arns: %s", err) + } + err = d.Set("availability_zones", flattenStringSet(svcCfg.AvailabilityZones)) + if err != nil { + return fmt.Errorf("error setting availability_zones: %s", err) + } + err = d.Set("base_endpoint_dns_names", flattenStringSet(svcCfg.BaseEndpointDnsNames)) + if err != nil { + return fmt.Errorf("error setting base_endpoint_dns_names: %s", err) + } + d.Set("manages_vpc_endpoints", svcCfg.ManagesVpcEndpoints) + d.Set("private_dns_name", svcCfg.PrivateDnsName) + d.Set("service_name", svcCfg.ServiceName) + d.Set("service_type", svcCfg.ServiceType[0].ServiceType) + d.Set("state", svcCfg.ServiceState) + err = d.Set("tags", tagsToMap(svcCfg.Tags)) + if err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + resp, err := conn.DescribeVpcEndpointServicePermissions(&ec2.DescribeVpcEndpointServicePermissionsInput{ + ServiceId: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("error reading VPC Endpoint Service permissions (%s): %s", d.Id(), err.Error()) + } + + err = d.Set("allowed_principals", flattenVpcEndpointServiceAllowedPrincipals(resp.AllowedPrincipals)) + if err != nil { + return fmt.Errorf("error setting allowed_principals: %s", err) + } + + return nil } func resourceAwsVpcEndpointServiceUpdate(d *schema.ResourceData, meta interface{}) error { @@ -161,6 +202,11 @@ func resourceAwsVpcEndpointServiceUpdate(d *schema.ResourceData, meta interface{ d.SetPartial("allowed_principals") } + if err := setTags(conn, d); err != nil { + return err + } + d.SetPartial("tags") + d.Partial(false) return resourceAwsVpcEndpointServiceRead(d, meta) } @@ -242,27 +288,6 @@ func waitForVpcEndpointServiceDeletion(conn *ec2.EC2, serviceID string) error { return err } -func vpcEndpointServiceAttributes(d *schema.ResourceData, svcCfg *ec2.ServiceConfiguration, conn *ec2.EC2) error { - d.Set("acceptance_required", svcCfg.AcceptanceRequired) - d.Set("network_load_balancer_arns", flattenStringList(svcCfg.NetworkLoadBalancerArns)) - d.Set("state", svcCfg.ServiceState) - d.Set("service_name", svcCfg.ServiceName) - d.Set("service_type", svcCfg.ServiceType[0].ServiceType) - d.Set("availability_zones", flattenStringList(svcCfg.AvailabilityZones)) - d.Set("private_dns_name", svcCfg.PrivateDnsName) - d.Set("base_endpoint_dns_names", flattenStringList(svcCfg.BaseEndpointDnsNames)) - - resp, err := conn.DescribeVpcEndpointServicePermissions(&ec2.DescribeVpcEndpointServicePermissionsInput{ - ServiceId: aws.String(d.Id()), - }) - if err != nil { - return err - } - d.Set("allowed_principals", flattenVpcEndpointServiceAllowedPrincipals(resp.AllowedPrincipals)) - - return nil -} - func setVpcEndpointServiceUpdateLists(d *schema.ResourceData, key string, a, r *[]*string) bool { if !d.HasChange(key) { return false @@ -284,3 +309,15 @@ func setVpcEndpointServiceUpdateLists(d *schema.ResourceData, key string, a, r * return true } + +func flattenVpcEndpointServiceAllowedPrincipals(allowedPrincipals []*ec2.AllowedPrincipal) *schema.Set { + vPrincipals := []interface{}{} + + for _, allowedPrincipal := range allowedPrincipals { + if allowedPrincipal.Principal != nil { + vPrincipals = append(vPrincipals, aws.StringValue(allowedPrincipal.Principal)) + } + } + + return schema.NewSet(schema.HashString, vPrincipals) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_service_allowed_principal.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_service_allowed_principal.go index f34a054f817..7b43a751ab3 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_service_allowed_principal.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_service_allowed_principal.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcEndpointServiceAllowedPrincipal() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_subnet_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_subnet_association.go index 0527eb14374..45d952c7236 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_subnet_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_subnet_association.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcEndpointSubnetAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_ipv4_cidr_block_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_ipv4_cidr_block_association.go index 03708b2cfb0..671543ca115 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_ipv4_cidr_block_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_ipv4_cidr_block_association.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) const ( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_migrate.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_migrate.go index 90738d1f2d1..81abd9e7ab4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_migrate.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_migrate.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsVpcMigrateState( diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection.go index 2d2c60c9ec7..d0c48dc89e9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcPeeringConnection() *schema.Resource { @@ -154,21 +154,11 @@ func resourceAwsVPCPeeringRead(d *schema.ResourceData, meta interface{}) error { d.Set("peer_region", pc.AccepterVpcInfo.Region) d.Set("accept_status", pc.Status.Code) - // When the VPC Peering Connection is pending acceptance, - // the details about accepter and/or requester peering - // options would not be included in the response. - if pc.AccepterVpcInfo.PeeringOptions != nil { - err := d.Set("accepter", flattenVpcPeeringConnectionOptions(pc.AccepterVpcInfo.PeeringOptions)) - if err != nil { - return fmt.Errorf("Error setting VPC Peering Connection accepter information: %s", err) - } + if err := d.Set("accepter", flattenVpcPeeringConnectionOptions(pc.AccepterVpcInfo.PeeringOptions)); err != nil { + return fmt.Errorf("Error setting VPC Peering Connection accepter information: %s", err) } - - if pc.RequesterVpcInfo.PeeringOptions != nil { - err := d.Set("requester", flattenVpcPeeringConnectionOptions(pc.RequesterVpcInfo.PeeringOptions)) - if err != nil { - return fmt.Errorf("Error setting VPC Peering Connection requester information: %s", err) - } + if err := d.Set("requester", flattenVpcPeeringConnectionOptions(pc.RequesterVpcInfo.PeeringOptions)); err != nil { + return fmt.Errorf("Error setting VPC Peering Connection requester information: %s", err) } err = d.Set("tags", tagsToMap(pc.Tags)) @@ -190,26 +180,17 @@ func resourceVPCPeeringConnectionAccept(conn *ec2.EC2, id string) (string, error if err != nil { return "", err } - pc := resp.VpcPeeringConnection - return *pc.Status.Code, nil + return aws.StringValue(resp.VpcPeeringConnection.Status.Code), nil } -func resourceAwsVpcPeeringConnectionModifyOptions(d *schema.ResourceData, meta interface{}) error { +func resourceAwsVpcPeeringConnectionModifyOptions(d *schema.ResourceData, meta interface{}, crossRegionPeering bool) error { conn := meta.(*AWSClient).ec2conn req := &ec2.ModifyVpcPeeringConnectionOptionsInput{ - VpcPeeringConnectionId: aws.String(d.Id()), - } - - v := d.Get("accepter").(*schema.Set).List() - if len(v) > 0 { - req.AccepterPeeringConnectionOptions = expandVpcPeeringConnectionOptions(v[0].(map[string]interface{})) - } - - v = d.Get("requester").(*schema.Set).List() - if len(v) > 0 { - req.RequesterPeeringConnectionOptions = expandVpcPeeringConnectionOptions(v[0].(map[string]interface{})) + VpcPeeringConnectionId: aws.String(d.Id()), + AccepterPeeringConnectionOptions: expandVpcPeeringConnectionOptions(d.Get("accepter").(*schema.Set).List(), crossRegionPeering), + RequesterPeeringConnectionOptions: expandVpcPeeringConnectionOptions(d.Get("requester").(*schema.Set).List(), crossRegionPeering), } log.Printf("[DEBUG] Modifying VPC Peering Connection options: %#v", req) @@ -227,7 +208,7 @@ func resourceAwsVPCPeeringUpdate(d *schema.ResourceData, meta interface{}) error d.SetPartial("tags") } - pcRaw, _, err := vpcPeeringConnectionRefreshState(conn, d.Id())() + pcRaw, statusCode, err := vpcPeeringConnectionRefreshState(conn, d.Id())() if err != nil { return fmt.Errorf("Error reading VPC Peering Connection: %s", err) } @@ -238,33 +219,32 @@ func resourceAwsVPCPeeringUpdate(d *schema.ResourceData, meta interface{}) error return nil } - pc := pcRaw.(*ec2.VpcPeeringConnection) - - if _, ok := d.GetOk("auto_accept"); ok { - if pc.Status != nil && *pc.Status.Code == ec2.VpcPeeringConnectionStateReasonCodePendingAcceptance { - status, err := resourceVPCPeeringConnectionAccept(conn, d.Id()) - if err != nil { - return fmt.Errorf("Unable to accept VPC Peering Connection: %s", err) - } - log.Printf("[DEBUG] VPC Peering Connection accept status: %s", status) + if _, ok := d.GetOk("auto_accept"); ok && statusCode == ec2.VpcPeeringConnectionStateReasonCodePendingAcceptance { + statusCode, err = resourceVPCPeeringConnectionAccept(conn, d.Id()) + if err != nil { + return fmt.Errorf("Unable to accept VPC Peering Connection: %s", err) } + log.Printf("[DEBUG] VPC Peering Connection accept status: %s", statusCode) } if d.HasChange("accepter") || d.HasChange("requester") { - _, ok := d.GetOk("auto_accept") - if !ok && pc.Status != nil && *pc.Status.Code != "active" { + if statusCode == ec2.VpcPeeringConnectionStateReasonCodeActive || statusCode == ec2.VpcPeeringConnectionStateReasonCodeProvisioning { + pc := pcRaw.(*ec2.VpcPeeringConnection) + crossRegionPeering := false + if aws.StringValue(pc.RequesterVpcInfo.Region) != aws.StringValue(pc.AccepterVpcInfo.Region) { + crossRegionPeering = true + } + if err := resourceAwsVpcPeeringConnectionModifyOptions(d, meta, crossRegionPeering); err != nil { + return fmt.Errorf("Error modifying VPC Peering Connection options: %s", err) + } + } else { return fmt.Errorf("Unable to modify peering options. The VPC Peering Connection "+ "%q is not active. Please set `auto_accept` attribute to `true`, "+ "or activate VPC Peering Connection manually.", d.Id()) } - - if err := resourceAwsVpcPeeringConnectionModifyOptions(d, meta); err != nil { - return fmt.Errorf("Error modifying VPC Peering Connection options: %s", err) - } } - err = vpcPeeringConnectionWaitUntilAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { + if err := vpcPeeringConnectionWaitUntilAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return fmt.Errorf("Error waiting for VPC Peering Connection to become available: %s", err) } @@ -277,31 +257,18 @@ func resourceAwsVPCPeeringDelete(d *schema.ResourceData, meta interface{}) error req := &ec2.DeleteVpcPeeringConnectionInput{ VpcPeeringConnectionId: aws.String(d.Id()), } - log.Printf("[DEBUG] Deleting VPC Peering Connection: %s", req) + _, err := conn.DeleteVpcPeeringConnection(req) + + if isAWSErr(err, "InvalidVpcPeeringConnectionID.NotFound", "") { + return nil + } + if err != nil { - if isAWSErr(err, "InvalidVpcPeeringConnectionID.NotFound", "") { - return nil - } return fmt.Errorf("Error deleting VPC Peering Connection (%s): %s", d.Id(), err) } - // Wait for the vpc peering connection to delete - log.Printf("[DEBUG] Waiting for VPC Peering Connection (%s) to delete.", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{ - ec2.VpcPeeringConnectionStateReasonCodeActive, - ec2.VpcPeeringConnectionStateReasonCodePendingAcceptance, - ec2.VpcPeeringConnectionStateReasonCodeDeleting, - }, - Target: []string{ - ec2.VpcPeeringConnectionStateReasonCodeRejected, - ec2.VpcPeeringConnectionStateReasonCodeDeleted, - }, - Refresh: vpcPeeringConnectionRefreshState(conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - } - if _, err := stateConf.WaitForState(); err != nil { + if err := waitForEc2VpcPeeringConnectionDeletion(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return fmt.Errorf("Error waiting for VPC Peering Connection (%s) to be deleted: %s", d.Id(), err) } @@ -394,3 +361,23 @@ func vpcPeeringConnectionWaitUntilAvailable(conn *ec2.EC2, id string, timeout ti } return nil } + +func waitForEc2VpcPeeringConnectionDeletion(conn *ec2.EC2, id string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + ec2.VpcPeeringConnectionStateReasonCodeActive, + ec2.VpcPeeringConnectionStateReasonCodePendingAcceptance, + ec2.VpcPeeringConnectionStateReasonCodeDeleting, + }, + Target: []string{ + ec2.VpcPeeringConnectionStateReasonCodeRejected, + ec2.VpcPeeringConnectionStateReasonCodeDeleted, + }, + Refresh: vpcPeeringConnectionRefreshState(conn, id), + Timeout: timeout, + } + + _, err := stateConf.WaitForState() + + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_accepter.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_accepter.go index 0dadffd68ac..7ab8a049553 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_accepter.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_accepter.go @@ -5,7 +5,7 @@ import ( "fmt" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcPeeringConnectionAccepter() *schema.Resource { @@ -20,7 +20,6 @@ func resourceAwsVpcPeeringConnectionAccepter() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Computed: false, }, "auto_accept": { Type: schema.TypeBool, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_options.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_options.go index aca9ca2ec0c..6af902c3f94 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_options.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_options.go @@ -4,8 +4,9 @@ import ( "fmt" "log" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpcPeeringConnectionOptions() *schema.Resource { @@ -40,7 +41,7 @@ func resourceAwsVpcPeeringConnectionOptionsRead(d *schema.ResourceData, meta int pcRaw, _, err := vpcPeeringConnectionRefreshState(conn, d.Id())() if err != nil { - return fmt.Errorf("Error reading VPC Peering Connection: %s", err.Error()) + return fmt.Errorf("error reading VPC Peering Connection: %s", err) } if pcRaw == nil { @@ -53,26 +54,38 @@ func resourceAwsVpcPeeringConnectionOptionsRead(d *schema.ResourceData, meta int d.Set("vpc_peering_connection_id", pc.VpcPeeringConnectionId) - if pc != nil && pc.AccepterVpcInfo != nil && pc.AccepterVpcInfo.PeeringOptions != nil { - err := d.Set("accepter", flattenVpcPeeringConnectionOptions(pc.AccepterVpcInfo.PeeringOptions)) - if err != nil { - return fmt.Errorf("Error setting VPC Peering Connection Options accepter information: %s", err.Error()) - } + if err := d.Set("accepter", flattenVpcPeeringConnectionOptions(pc.AccepterVpcInfo.PeeringOptions)); err != nil { + return fmt.Errorf("error setting VPC Peering Connection Options accepter information: %s", err) } - - if pc != nil && pc.RequesterVpcInfo != nil && pc.RequesterVpcInfo.PeeringOptions != nil { - err := d.Set("requester", flattenVpcPeeringConnectionOptions(pc.RequesterVpcInfo.PeeringOptions)) - if err != nil { - return fmt.Errorf("Error setting VPC Peering Connection Options requester information: %s", err.Error()) - } + if err := d.Set("requester", flattenVpcPeeringConnectionOptions(pc.RequesterVpcInfo.PeeringOptions)); err != nil { + return fmt.Errorf("error setting VPC Peering Connection Options requester information: %s", err) } return nil } func resourceAwsVpcPeeringConnectionOptionsUpdate(d *schema.ResourceData, meta interface{}) error { - if err := resourceAwsVpcPeeringConnectionModifyOptions(d, meta); err != nil { - return fmt.Errorf("Error modifying VPC Peering Connection Options: %s", err.Error()) + conn := meta.(*AWSClient).ec2conn + + pcRaw, _, err := vpcPeeringConnectionRefreshState(conn, d.Id())() + if err != nil { + return fmt.Errorf("error reading VPC Peering Connection (%s): %s", d.Id(), err) + } + + if pcRaw == nil { + log.Printf("[WARN] VPC Peering Connection (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + pc := pcRaw.(*ec2.VpcPeeringConnection) + + crossRegionPeering := false + if aws.StringValue(pc.RequesterVpcInfo.Region) != aws.StringValue(pc.AccepterVpcInfo.Region) { + crossRegionPeering = true + } + if err := resourceAwsVpcPeeringConnectionModifyOptions(d, meta, crossRegionPeering); err != nil { + return fmt.Errorf("error modifying VPC Peering Connection (%s) Options: %s", d.Id(), err) } return resourceAwsVpcPeeringConnectionOptionsRead(d, meta) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection.go index 397236768e6..2a8a10e3a1a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection.go @@ -15,9 +15,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) type XmlVpnConnectionConfig struct { @@ -316,29 +316,10 @@ func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error creating vpn connection: %s", err) } - // Store the ID - vpnConnection := resp.VpnConnection - d.SetId(*vpnConnection.VpnConnectionId) - log.Printf("[INFO] VPN connection ID: %s", *vpnConnection.VpnConnectionId) + d.SetId(aws.StringValue(resp.VpnConnection.VpnConnectionId)) - // Wait for the connection to become available. This has an obscenely - // high default timeout because AWS VPN connections are notoriously - // slow at coming up or going down. There's also no point in checking - // more frequently than every ten seconds. - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"available"}, - Refresh: vpnConnectionRefreshFunc(conn, *vpnConnection.VpnConnectionId), - Timeout: 40 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 10 * time.Second, - } - - _, stateErr := stateConf.WaitForState() - if stateErr != nil { - return fmt.Errorf( - "Error waiting for VPN connection (%s) to become ready: %s", - *vpnConnection.VpnConnectionId, stateErr) + if err := waitForEc2VpnConnectionAvailable(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for VPN connection (%s) to become available: %s", d.Id(), err) } // Create tags. @@ -512,34 +493,17 @@ func resourceAwsVpnConnectionDelete(d *schema.ResourceData, meta interface{}) er _, err := conn.DeleteVpnConnection(&ec2.DeleteVpnConnectionInput{ VpnConnectionId: aws.String(d.Id()), }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { - return nil - } else { - log.Printf("[ERROR] Error deleting VPN connection: %s", err) - return err - } + + if isAWSErr(err, "InvalidVpnConnectionID.NotFound", "") { + return nil } - // These things can take quite a while to tear themselves down and any - // attempt to modify resources they reference (e.g. CustomerGateways or - // VPN Gateways) before deletion will result in an error. Furthermore, - // they don't just disappear. The go into "deleted" state. We need to - // wait to ensure any other modifications the user might make to their - // VPC stack can safely run. - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{"deleted"}, - Refresh: vpnConnectionRefreshFunc(conn, d.Id()), - Timeout: 30 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 10 * time.Second, + if err != nil { + return fmt.Errorf("error deleting VPN Connection (%s): %s", d.Id(), err) } - _, stateErr := stateConf.WaitForState() - if stateErr != nil { - return fmt.Errorf( - "Error waiting for VPN connection (%s) to delete: %s", d.Id(), err) + if err := waitForEc2VpnConnectionDeletion(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for VPN connection (%s) to delete: %s", d.Id(), err) } return nil @@ -582,6 +546,46 @@ func telemetryToMapList(telemetry []*ec2.VgwTelemetry) []map[string]interface{} return result } +func waitForEc2VpnConnectionAvailable(conn *ec2.EC2, id string) error { + // Wait for the connection to become available. This has an obscenely + // high default timeout because AWS VPN connections are notoriously + // slow at coming up or going down. There's also no point in checking + // more frequently than every ten seconds. + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: vpnConnectionRefreshFunc(conn, id), + Timeout: 40 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} + +func waitForEc2VpnConnectionDeletion(conn *ec2.EC2, id string) error { + // These things can take quite a while to tear themselves down and any + // attempt to modify resources they reference (e.g. CustomerGateways or + // VPN Gateways) before deletion will result in an error. Furthermore, + // they don't just disappear. The go into "deleted" state. We need to + // wait to ensure any other modifications the user might make to their + // VPC stack can safely run. + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{"deleted"}, + Refresh: vpnConnectionRefreshFunc(conn, id), + Timeout: 30 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} + func xmlConfigToTunnelInfo(xmlConfig string) (*TunnelInfo, error) { var vpnConfig XmlVpnConnectionConfig if err := xml.Unmarshal([]byte(xmlConfig), &vpnConfig); err != nil { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection_route.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection_route.go index e5a0241a401..b8fac5de86a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection_route.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection_route.go @@ -10,8 +10,8 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpnConnectionRoute() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway.go index 05af657e903..b0e3c69c8eb 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpnGateway() *schema.Resource { @@ -104,7 +104,7 @@ func resourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { } vpnAttachment := vpnGatewayGetAttachment(vpnGateway) - if len(vpnGateway.VpcAttachments) == 0 || *vpnAttachment.State == "detached" { + if vpnAttachment == nil { // Gateway exists but not attached to the VPC d.Set("vpc_id", "") } else { @@ -153,35 +153,43 @@ func resourceAwsVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error } log.Printf("[INFO] Deleting VPN gateway: %s", d.Id()) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteVpnGateway(&ec2.DeleteVpnGatewayInput{ - VpnGatewayId: aws.String(d.Id()), - }) + input := &ec2.DeleteVpnGatewayInput{ + VpnGatewayId: aws.String(d.Id()), + } + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteVpnGateway(input) if err == nil { return nil } - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) - } - - switch ec2err.Code() { - case "InvalidVpnGatewayID.NotFound": + if isAWSErr(err, "InvalidVpnGatewayID.NotFound", "") { return nil - case "IncorrectState": + } + if isAWSErr(err, "IncorrectState", "") { return resource.RetryableError(err) } return resource.NonRetryableError(err) }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteVpnGateway(input) + if isAWSErr(err, "InvalidVpnGatewayID.NotFound", "") { + return nil + } + } + + if err != nil { + return fmt.Errorf("Error deleting VPN gateway: %s", err) + } + return nil } func resourceAwsVpnGatewayAttach(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - if d.Get("vpc_id").(string) == "" { + vpcId := d.Get("vpc_id").(string) + + if vpcId == "" { log.Printf( "[DEBUG] Not attaching VPN Gateway '%s' as no VPC ID is set", d.Id()) @@ -191,11 +199,11 @@ func resourceAwsVpnGatewayAttach(d *schema.ResourceData, meta interface{}) error log.Printf( "[INFO] Attaching VPN Gateway '%s' to VPC '%s'", d.Id(), - d.Get("vpc_id").(string)) + vpcId) req := &ec2.AttachVpnGatewayInput{ VpnGatewayId: aws.String(d.Id()), - VpcId: aws.String(d.Get("vpc_id").(string)), + VpcId: aws.String(vpcId), } err := resource.Retry(1*time.Minute, func() *resource.RetryError { @@ -208,9 +216,12 @@ func resourceAwsVpnGatewayAttach(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.AttachVpnGateway(req) + } if err != nil { - return err + return fmt.Errorf("Error attaching VPN gateway: %s", err) } // Wait for it to be fully attached before continuing @@ -218,7 +229,7 @@ func resourceAwsVpnGatewayAttach(d *schema.ResourceData, meta interface{}) error stateConf := &resource.StateChangeConf{ Pending: []string{"detached", "attaching"}, Target: []string{"attached"}, - Refresh: vpnGatewayAttachStateRefreshFunc(conn, d.Id()), + Refresh: vpnGatewayAttachmentStateRefresh(conn, vpcId, d.Id()), Timeout: 15 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { @@ -234,9 +245,10 @@ func resourceAwsVpnGatewayDetach(d *schema.ResourceData, meta interface{}) error conn := meta.(*AWSClient).ec2conn // Get the old VPC ID to detach from - vpcID, _ := d.GetChange("vpc_id") + vpcIdRaw, _ := d.GetChange("vpc_id") + vpcId := vpcIdRaw.(string) - if vpcID.(string) == "" { + if vpcId == "" { log.Printf( "[DEBUG] Not detaching VPN Gateway '%s' as no VPC ID is set", d.Id()) @@ -246,12 +258,12 @@ func resourceAwsVpnGatewayDetach(d *schema.ResourceData, meta interface{}) error log.Printf( "[INFO] Detaching VPN Gateway '%s' from VPC '%s'", d.Id(), - vpcID.(string)) + vpcId) wait := true _, err := conn.DetachVpnGateway(&ec2.DetachVpnGatewayInput{ VpnGatewayId: aws.String(d.Id()), - VpcId: aws.String(vpcID.(string)), + VpcId: aws.String(vpcId), }) if err != nil { ec2err, ok := err.(awserr.Error) @@ -279,7 +291,7 @@ func resourceAwsVpnGatewayDetach(d *schema.ResourceData, meta interface{}) error stateConf := &resource.StateChangeConf{ Pending: []string{"attached", "detaching", "available"}, Target: []string{"detached"}, - Refresh: vpnGatewayAttachStateRefreshFunc(conn, d.Id()), + Refresh: vpnGatewayAttachmentStateRefresh(conn, vpcId, d.Id()), Timeout: 10 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { @@ -291,50 +303,12 @@ func resourceAwsVpnGatewayDetach(d *schema.ResourceData, meta interface{}) error return nil } -// vpnGatewayAttachStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// the state of a VPN gateway's attachment -func vpnGatewayAttachStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - var start time.Time - return func() (interface{}, string, error) { - if start.IsZero() { - start = time.Now() - } - - resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ - VpnGatewayIds: []*string{aws.String(id)}, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnGatewayID.NotFound" { - resp = nil - } else { - log.Printf("[ERROR] Error on VpnGatewayStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - vpnGateway := resp.VpnGateways[0] - if len(vpnGateway.VpcAttachments) == 0 { - // No attachments, we're detached - return vpnGateway, "detached", nil - } - - vpnAttachment := vpnGatewayGetAttachment(vpnGateway) - return vpnGateway, *vpnAttachment.State, nil - } -} - +// vpnGatewayGetAttachment returns any VGW attachment that's in "attached" state or nil. func vpnGatewayGetAttachment(vgw *ec2.VpnGateway) *ec2.VpcAttachment { - for _, v := range vgw.VpcAttachments { - if *v.State == "attached" { - return v + for _, vpcAttachment := range vgw.VpcAttachments { + if aws.StringValue(vpcAttachment.State) == ec2.AttachmentStatusAttached { + return vpcAttachment } } - return &ec2.VpcAttachment{State: aws.String("detached")} + return nil } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_attachment.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_attachment.go index fd081b81bf3..02a188fdd79 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_attachment.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_attachment.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpnGatewayAttachment() *schema.Resource { @@ -101,7 +101,7 @@ func resourceAwsVpnGatewayAttachmentRead(d *schema.ResourceData, meta interface{ } vga := vpnGatewayGetAttachment(vgw) - if len(vgw.VpcAttachments) == 0 || *vga.State == "detached" { + if vga == nil { d.Set("vpc_id", "") return nil } @@ -163,12 +163,9 @@ func resourceAwsVpnGatewayAttachmentDelete(d *schema.ResourceData, meta interfac func vpnGatewayAttachmentStateRefresh(conn *ec2.EC2, vpcId, vgwId string) resource.StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("attachment.vpc-id"), - Values: []*string{aws.String(vpcId)}, - }, - }, + Filters: buildEC2AttributeFilterList(map[string]string{ + "attachment.vpc-id": vpcId, + }), VpnGatewayIds: []*string{aws.String(vgwId)}, }) @@ -187,15 +184,19 @@ func vpnGatewayAttachmentStateRefresh(conn *ec2.EC2, vpcId, vgwId string) resour } vgw := resp.VpnGateways[0] - if len(vgw.VpcAttachments) == 0 { - return vgw, "detached", nil - } - vga := vpnGatewayGetAttachment(vgw) + return vgw, vpnGatewayGetAttachmentState(vgw, vpcId), nil + } +} - log.Printf("[DEBUG] VPN Gateway %q attachment status: %s", vgwId, *vga.State) - return vgw, *vga.State, nil +// vpnGatewayGetAttachmentState returns the state of any VGW attachment to the specified VPC or "detached". +func vpnGatewayGetAttachmentState(vgw *ec2.VpnGateway, vpcId string) string { + for _, vpcAttachment := range vgw.VpcAttachments { + if aws.StringValue(vpcAttachment.VpcId) == vpcId { + return aws.StringValue(vpcAttachment.State) + } } + return ec2.AttachmentStatusDetached } func vpnGatewayAttachmentId(vpcId, vgwId string) string { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_route_propagation.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_route_propagation.go index bed5f7e4d20..4ba1b4cd079 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_route_propagation.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_route_propagation.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsVpnGatewayRoutePropagation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_byte_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_byte_match_set.go index 9c62800d601..075438f45af 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_byte_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_byte_match_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafByteMatchSet() *schema.Resource { @@ -16,6 +16,9 @@ func resourceAwsWafByteMatchSet() *schema.Resource { Read: resourceAwsWafByteMatchSetRead, Update: resourceAwsWafByteMatchSetUpdate, Delete: resourceAwsWafByteMatchSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_geo_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_geo_match_set.go index dc5295b8429..86933b6eae8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_geo_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_geo_match_set.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafGeoMatchSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_ipset.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_ipset.go index 9c4e1b47779..7eef07fe965 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_ipset.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_ipset.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // WAF requires UpdateIPSet operations be split into batches of 1000 Updates diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rate_based_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rate_based_rule.go index 2d3f5f5dada..1f52bf092aa 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rate_based_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rate_based_rule.go @@ -5,10 +5,12 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsWafRateBasedRule() *schema.Resource { @@ -17,6 +19,9 @@ func resourceAwsWafRateBasedRule() *schema.Resource { Read: resourceAwsWafRateBasedRuleRead, Update: resourceAwsWafRateBasedRuleUpdate, Delete: resourceAwsWafRateBasedRuleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -59,7 +64,12 @@ func resourceAwsWafRateBasedRule() *schema.Resource { "rate_limit": { Type: schema.TypeInt, Required: true, - ValidateFunc: validation.IntAtLeast(2000), + ValidateFunc: validation.IntAtLeast(100), + }, + "tags": tagsSchema(), + "arn": { + Type: schema.TypeString, + Computed: true, }, }, } @@ -67,6 +77,7 @@ func resourceAwsWafRateBasedRule() *schema.Resource { func resourceAwsWafRateBasedRuleCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).wafconn + tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().WafTags() wr := newWafRetryer(conn) out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { @@ -78,6 +89,10 @@ func resourceAwsWafRateBasedRuleCreate(d *schema.ResourceData, meta interface{}) RateLimit: aws.Int64(int64(d.Get("rate_limit").(int))), } + if len(tags) > 0 { + params.Tags = tags + } + return conn.CreateRateBasedRule(params) }) if err != nil { @@ -85,7 +100,17 @@ func resourceAwsWafRateBasedRuleCreate(d *schema.ResourceData, meta interface{}) } resp := out.(*waf.CreateRateBasedRuleOutput) d.SetId(*resp.Rule.RuleId) - return resourceAwsWafRateBasedRuleUpdate(d, meta) + + newPredicates := d.Get("predicates").(*schema.Set).List() + if len(newPredicates) > 0 { + noPredicates := []interface{}{} + err := updateWafRateBasedRuleResource(*resp.Rule.RuleId, noPredicates, newPredicates, d.Get("rate_limit"), conn) + if err != nil { + return fmt.Errorf("Error Updating WAF Rate Based Rule: %s", err) + } + } + + return resourceAwsWafRateBasedRuleRead(d, meta) } func resourceAwsWafRateBasedRuleRead(d *schema.ResourceData, meta interface{}) error { @@ -97,7 +122,7 @@ func resourceAwsWafRateBasedRuleRead(d *schema.ResourceData, meta interface{}) e resp, err := conn.GetRateBasedRule(params) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == waf.ErrCodeNonexistentItemException { log.Printf("[WARN] WAF Rate Based Rule (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -117,6 +142,22 @@ func resourceAwsWafRateBasedRuleRead(d *schema.ResourceData, meta interface{}) e predicates = append(predicates, predicate) } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "waf", + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("ratebasedrule/%s", d.Id()), + }.String() + d.Set("arn", arn) + + tagList, err := keyvaluetags.WafListTags(conn, arn) + if err != nil { + return fmt.Errorf("Failed to get WAF Rated Based Rule parameter tags for %s: %s", d.Get("name"), err) + } + if err := d.Set("tags", tagList.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + d.Set("predicates", predicates) d.Set("name", resp.Rule.Name) d.Set("metric_name", resp.Rule.MetricName) @@ -140,6 +181,14 @@ func resourceAwsWafRateBasedRuleUpdate(d *schema.ResourceData, meta interface{}) } } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.WafUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + return resourceAwsWafRateBasedRuleRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_regex_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_regex_match_set.go index 9bfbee72be9..0806319f867 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_regex_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_regex_match_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegexMatchSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_regex_pattern_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_regex_pattern_set.go index a09ba6ffcc0..e7e247eaccf 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_regex_pattern_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_regex_pattern_set.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegexPatternSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule.go index fae17269a55..82d8821da02 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule.go @@ -5,10 +5,12 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsWafRule() *schema.Resource { @@ -55,12 +57,18 @@ func resourceAwsWafRule() *schema.Resource { }, }, }, + "tags": tagsSchema(), + "arn": { + Type: schema.TypeString, + Computed: true, + }, }, } } func resourceAwsWafRuleCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).wafconn + tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().WafTags() wr := newWafRetryer(conn) out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { @@ -70,6 +78,10 @@ func resourceAwsWafRuleCreate(d *schema.ResourceData, meta interface{}) error { Name: aws.String(d.Get("name").(string)), } + if len(tags) > 0 { + params.Tags = tags + } + return conn.CreateRule(params) }) if err != nil { @@ -77,7 +89,17 @@ func resourceAwsWafRuleCreate(d *schema.ResourceData, meta interface{}) error { } resp := out.(*waf.CreateRuleOutput) d.SetId(*resp.Rule.RuleId) - return resourceAwsWafRuleUpdate(d, meta) + + newPredicates := d.Get("predicates").(*schema.Set).List() + if len(newPredicates) > 0 { + noPredicates := []interface{}{} + err := updateWafRuleResource(d.Id(), noPredicates, newPredicates, conn) + if err != nil { + return fmt.Errorf("Error Updating WAF Rule: %s", err) + } + } + + return resourceAwsWafRuleRead(d, meta) } func resourceAwsWafRuleRead(d *schema.ResourceData, meta interface{}) error { @@ -89,7 +111,7 @@ func resourceAwsWafRuleRead(d *schema.ResourceData, meta interface{}) error { resp, err := conn.GetRule(params) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == waf.ErrCodeNonexistentItemException { log.Printf("[WARN] WAF Rule (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -109,6 +131,24 @@ func resourceAwsWafRuleRead(d *schema.ResourceData, meta interface{}) error { predicates = append(predicates, predicate) } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "waf", + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("rule/%s", d.Id()), + }.String() + d.Set("arn", arn) + + tags, err := keyvaluetags.WafListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for WAF Rule (%s): %s", arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + d.Set("predicates", predicates) d.Set("name", resp.Rule.Name) d.Set("metric_name", resp.Rule.MetricName) @@ -129,6 +169,14 @@ func resourceAwsWafRuleUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.WafUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + return resourceAwsWafRuleRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule_group.go index 803d6ea07a8..3b2ac9aebde 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule_group.go @@ -5,8 +5,10 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsWafRuleGroup() *schema.Resource { @@ -15,6 +17,9 @@ func resourceAwsWafRuleGroup() *schema.Resource { Read: resourceAwsWafRuleGroupRead, Update: resourceAwsWafRuleGroupUpdate, Delete: resourceAwsWafRuleGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -62,12 +67,18 @@ func resourceAwsWafRuleGroup() *schema.Resource { }, }, }, + "tags": tagsSchema(), + "arn": { + Type: schema.TypeString, + Computed: true, + }, }, } } func resourceAwsWafRuleGroupCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).wafconn + tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().WafTags() wr := newWafRetryer(conn) out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { @@ -77,6 +88,10 @@ func resourceAwsWafRuleGroupCreate(d *schema.ResourceData, meta interface{}) err Name: aws.String(d.Get("name").(string)), } + if len(tags) > 0 { + params.Tags = tags + } + return conn.CreateRuleGroup(params) }) if err != nil { @@ -84,7 +99,18 @@ func resourceAwsWafRuleGroupCreate(d *schema.ResourceData, meta interface{}) err } resp := out.(*waf.CreateRuleGroupOutput) d.SetId(*resp.RuleGroup.RuleGroupId) - return resourceAwsWafRuleGroupUpdate(d, meta) + + activatedRules := d.Get("activated_rule").(*schema.Set).List() + if len(activatedRules) > 0 { + noActivatedRules := []interface{}{} + + err := updateWafRuleGroupResource(d.Id(), noActivatedRules, activatedRules, conn) + if err != nil { + return fmt.Errorf("Error Updating WAF Rule Group: %s", err) + } + } + + return resourceAwsWafRuleGroupRead(d, meta) } func resourceAwsWafRuleGroupRead(d *schema.ResourceData, meta interface{}) error { @@ -96,7 +122,7 @@ func resourceAwsWafRuleGroupRead(d *schema.ResourceData, meta interface{}) error resp, err := conn.GetRuleGroup(params) if err != nil { - if isAWSErr(err, "WAFNonexistentItemException", "") { + if isAWSErr(err, waf.ErrCodeNonexistentItemException, "") { log.Printf("[WARN] WAF Rule Group (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -112,6 +138,22 @@ func resourceAwsWafRuleGroupRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error listing activated rules in WAF Rule Group (%s): %s", d.Id(), err) } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "waf", + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("rulegroup/%s", d.Id()), + }.String() + d.Set("arn", arn) + + tags, err := keyvaluetags.WafListTags(conn, arn) + if err != nil { + return fmt.Errorf("error listing tags for WAF Rule Group (%s): %s", arn, err) + } + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + d.Set("activated_rule", flattenWafActivatedRules(rResp.ActivatedRules)) d.Set("name", resp.RuleGroup.Name) d.Set("metric_name", resp.RuleGroup.MetricName) @@ -132,6 +174,14 @@ func resourceAwsWafRuleGroupUpdate(d *schema.ResourceData, meta interface{}) err } } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.WafUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + return resourceAwsWafRuleGroupRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_size_constraint_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_size_constraint_set.go index 87575247332..deb78c7a965 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_size_constraint_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_size_constraint_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafSizeConstraintSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_sql_injection_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_sql_injection_match_set.go index ff9bb4d6eb4..2af2d225d14 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_sql_injection_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_sql_injection_match_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafSqlInjectionMatchSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_web_acl.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_web_acl.go index e41fd1b0892..0e9df3bd4b6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_web_acl.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_web_acl.go @@ -7,8 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsWafWebAcl() *schema.Resource { @@ -140,12 +141,14 @@ func resourceAwsWafWebAcl() *schema.Resource { }, }, }, + "tags": tagsSchema(), }, } } func resourceAwsWafWebAclCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).wafconn + tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().WafTags() wr := newWafRetryer(conn) out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { @@ -156,6 +159,10 @@ func resourceAwsWafWebAclCreate(d *schema.ResourceData, meta interface{}) error Name: aws.String(d.Get("name").(string)), } + if len(tags) > 0 { + params.Tags = tags + } + return conn.CreateWebACL(params) }) if err != nil { @@ -171,9 +178,36 @@ func resourceAwsWafWebAclCreate(d *schema.ResourceData, meta interface{}) error Resource: fmt.Sprintf("webacl/%s", d.Id()), }.String() - // Set for update - d.Set("arn", arn) - return resourceAwsWafWebAclUpdate(d, meta) + loggingConfiguration := d.Get("logging_configuration").([]interface{}) + if len(loggingConfiguration) == 1 { + input := &waf.PutLoggingConfigurationInput{ + LoggingConfiguration: expandWAFLoggingConfiguration(loggingConfiguration, arn), + } + + log.Printf("[DEBUG] Updating WAF Web ACL (%s) Logging Configuration: %s", d.Id(), input) + if _, err := conn.PutLoggingConfiguration(input); err != nil { + return fmt.Errorf("error updating WAF Web ACL (%s) Logging Configuration: %s", d.Id(), err) + } + } + + rules := d.Get("rules").(*schema.Set).List() + if len(rules) > 0 { + wr := newWafRetryer(conn) + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateWebACLInput{ + ChangeToken: token, + DefaultAction: expandWafAction(d.Get("default_action").(*schema.Set).List()), + Updates: diffWafWebAclRules([]interface{}{}, rules), + WebACLId: aws.String(d.Id()), + } + return conn.UpdateWebACL(req) + }) + if err != nil { + return fmt.Errorf("Error Updating WAF ACL: %s", err) + } + } + + return resourceAwsWafWebAclRead(d, meta) } func resourceAwsWafWebAclRead(d *schema.ResourceData, meta interface{}) error { @@ -199,19 +233,23 @@ func resourceAwsWafWebAclRead(d *schema.ResourceData, meta interface{}) error { return nil } - arn := arn.ARN{ - Partition: meta.(*AWSClient).partition, - Service: "waf", - AccountID: meta.(*AWSClient).accountid, - Resource: fmt.Sprintf("webacl/%s", d.Id()), - }.String() - d.Set("arn", arn) + d.Set("arn", resp.WebACL.WebACLArn) + arn := *resp.WebACL.WebACLArn if err := d.Set("default_action", flattenWafAction(resp.WebACL.DefaultAction)); err != nil { return fmt.Errorf("error setting default_action: %s", err) } d.Set("name", resp.WebACL.Name) d.Set("metric_name", resp.WebACL.MetricName) + + tags, err := keyvaluetags.WafListTags(conn, arn) + if err != nil { + return fmt.Errorf("error listing tags for WAF ACL (%s): %s", arn, err) + } + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + if err := d.Set("rules", flattenWafWebAclRules(resp.WebACL.Rules)); err != nil { return fmt.Errorf("error setting rules: %s", err) } @@ -286,6 +324,14 @@ func resourceAwsWafWebAclUpdate(d *schema.ResourceData, meta interface{}) error } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.WafUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + return resourceAwsWafWebAclRead(d, meta) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_xss_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_xss_match_set.go index 56dff11917f..823cac3514d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_xss_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_xss_match_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafXssMatchSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_byte_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_byte_match_set.go index 4e24b08c01e..399bec8401f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_byte_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_byte_match_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalByteMatchSet() *schema.Resource { @@ -16,6 +16,9 @@ func resourceAwsWafRegionalByteMatchSet() *schema.Resource { Read: resourceAwsWafRegionalByteMatchSetRead, Update: resourceAwsWafRegionalByteMatchSetUpdate, Delete: resourceAwsWafRegionalByteMatchSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_geo_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_geo_match_set.go index 107640171ac..8e573002c09 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_geo_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_geo_match_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalGeoMatchSet() *schema.Resource { @@ -16,6 +16,9 @@ func resourceAwsWafRegionalGeoMatchSet() *schema.Resource { Read: resourceAwsWafRegionalGeoMatchSetRead, Update: resourceAwsWafRegionalGeoMatchSetUpdate, Delete: resourceAwsWafRegionalGeoMatchSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -76,16 +79,14 @@ func resourceAwsWafRegionalGeoMatchSetRead(d *schema.ResourceData, meta interfac } resp, err := conn.GetGeoMatchSet(params) - if err != nil { - // TODO: Replace with constant once it's available - // See https://github.com/aws/aws-sdk-go/issues/1856 - if isAWSErr(err, "WAFNonexistentItemException", "") { - log.Printf("[WARN] WAF WAF Regional Geo Match Set (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - return err + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF WAF Regional Geo Match Set (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error getting WAF Regional Geo Match Set (%s): %s", d.Id(), err) } d.Set("name", resp.GeoMatchSet.Name) @@ -103,8 +104,13 @@ func resourceAwsWafRegionalGeoMatchSetUpdate(d *schema.ResourceData, meta interf oldConstraints, newConstraints := o.(*schema.Set).List(), n.(*schema.Set).List() err := updateGeoMatchSetResourceWR(d.Id(), oldConstraints, newConstraints, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF WAF Regional Geo Match Set (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - return fmt.Errorf("Failed updating WAF Regional Geo Match Set: %s", err) + return fmt.Errorf("Failed updating WAF Regional Geo Match Set(%s): %s", d.Id(), err) } } @@ -133,8 +139,11 @@ func resourceAwsWafRegionalGeoMatchSetDelete(d *schema.ResourceData, meta interf return conn.DeleteGeoMatchSet(req) }) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + return nil + } if err != nil { - return fmt.Errorf("Failed deleting WAF Regional Geo Match Set: %s", err) + return fmt.Errorf("Failed deleting WAF Regional Geo Match Set(%s): %s", d.Id(), err) } return nil diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_ipset.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_ipset.go index b5cd3828f95..2fee9e44b2b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_ipset.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_ipset.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalIPSet() *schema.Resource { @@ -18,6 +18,9 @@ func resourceAwsWafRegionalIPSet() *schema.Resource { Read: resourceAwsWafRegionalIPSetRead, Update: resourceAwsWafRegionalIPSetUpdate, Delete: resourceAwsWafRegionalIPSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rate_based_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rate_based_rule.go index 5cfc40f1148..e329751a4d6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rate_based_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rate_based_rule.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsWafRegionalRateBasedRule() *schema.Resource { @@ -17,6 +17,9 @@ func resourceAwsWafRegionalRateBasedRule() *schema.Resource { Read: resourceAwsWafRegionalRateBasedRuleRead, Update: resourceAwsWafRegionalRateBasedRuleUpdate, Delete: resourceAwsWafRegionalRateBasedRuleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -59,7 +62,7 @@ func resourceAwsWafRegionalRateBasedRule() *schema.Resource { "rate_limit": { Type: schema.TypeInt, Required: true, - ValidateFunc: validation.IntAtLeast(2000), + ValidateFunc: validation.IntAtLeast(100), }, }, } @@ -82,7 +85,7 @@ func resourceAwsWafRegionalRateBasedRuleCreate(d *schema.ResourceData, meta inte return conn.CreateRateBasedRule(params) }) if err != nil { - return err + return fmt.Errorf("Error creating WAF Regional Rate Based Rule (%s): %s", d.Id(), err) } resp := out.(*waf.CreateRateBasedRuleOutput) d.SetId(*resp.Rule.RuleId) @@ -97,14 +100,13 @@ func resourceAwsWafRegionalRateBasedRuleRead(d *schema.ResourceData, meta interf } resp, err := conn.GetRateBasedRule(params) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF Regional Rate Based Rule (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { - log.Printf("[WARN] WAF Regional Rate Based Rule (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - - return err + return fmt.Errorf("Error getting WAF Regional Rate Based Rule (%s): %s", d.Id(), err) } var predicates []map[string]interface{} @@ -136,8 +138,13 @@ func resourceAwsWafRegionalRateBasedRuleUpdate(d *schema.ResourceData, meta inte rateLimit := d.Get("rate_limit") err := updateWafRateBasedRuleResourceWR(d.Id(), oldP, newP, rateLimit, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF Regional Rate Based Rule (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - return fmt.Errorf("Error Updating WAF Rule: %s", err) + return fmt.Errorf("Error updating WAF Regional Rate Based Rule Predicates (%s): %s", d.Id(), err) } } @@ -154,8 +161,11 @@ func resourceAwsWafRegionalRateBasedRuleDelete(d *schema.ResourceData, meta inte rateLimit := d.Get("rate_limit") err := updateWafRateBasedRuleResourceWR(d.Id(), oldPredicates, noPredicates, rateLimit, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + return nil + } if err != nil { - return fmt.Errorf("Error updating WAF Regional Rate Based Rule Predicates: %s", err) + return fmt.Errorf("Error updating WAF Regional Rate Based Rule Predicates (%s): %s", d.Id(), err) } } @@ -168,8 +178,11 @@ func resourceAwsWafRegionalRateBasedRuleDelete(d *schema.ResourceData, meta inte log.Printf("[INFO] Deleting WAF Regional Rate Based Rule") return conn.DeleteRateBasedRule(req) }) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + return nil + } if err != nil { - return fmt.Errorf("Error deleting WAF Regional Rate Based Rule: %s", err) + return fmt.Errorf("Error deleting WAF Regional Rate Based Rule (%s): %s", d.Id(), err) } return nil @@ -187,9 +200,6 @@ func updateWafRateBasedRuleResourceWR(id string, oldP, newP []interface{}, rateL return conn.UpdateRateBasedRule(req) }) - if err != nil { - return fmt.Errorf("Error Updating WAF Regional Rate Based Rule: %s", err) - } - return nil + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_regex_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_regex_match_set.go index b1c9d10d08d..fe16df030df 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_regex_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_regex_match_set.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalRegexMatchSet() *schema.Resource { @@ -17,6 +17,9 @@ func resourceAwsWafRegionalRegexMatchSet() *schema.Resource { Read: resourceAwsWafRegionalRegexMatchSetRead, Update: resourceAwsWafRegionalRegexMatchSetUpdate, Delete: resourceAwsWafRegionalRegexMatchSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -84,7 +87,7 @@ func resourceAwsWafRegionalRegexMatchSetCreate(d *schema.ResourceData, meta inte } resp := out.(*waf.CreateRegexMatchSetOutput) - d.SetId(*resp.RegexMatchSet.RegexMatchSetId) + d.SetId(aws.StringValue(resp.RegexMatchSet.RegexMatchSetId)) return resourceAwsWafRegionalRegexMatchSetUpdate(d, meta) } @@ -97,14 +100,13 @@ func resourceAwsWafRegionalRegexMatchSetRead(d *schema.ResourceData, meta interf } resp, err := conn.GetRegexMatchSet(params) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF Regional Regex Match Set (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { - log.Printf("[WARN] WAF Regional Regex Match Set (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - - return err + return fmt.Errorf("Error getting WAF Regional Regex Match Set (%s): %s", d.Id(), err) } d.Set("name", resp.RegexMatchSet.Name) @@ -123,8 +125,13 @@ func resourceAwsWafRegionalRegexMatchSetUpdate(d *schema.ResourceData, meta inte o, n := d.GetChange("regex_match_tuple") oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() err := updateRegexMatchSetResourceWR(d.Id(), oldT, newT, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF Regional Rate Based Rule (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - return fmt.Errorf("Failed updating WAF Regional Regex Match Set: %s", err) + return fmt.Errorf("Failed updating WAF Regional Regex Match Set(%s): %s", d.Id(), err) } } @@ -139,8 +146,11 @@ func resourceAwsWafRegionalRegexMatchSetDelete(d *schema.ResourceData, meta inte if len(oldTuples) > 0 { noTuples := []interface{}{} err := updateRegexMatchSetResourceWR(d.Id(), oldTuples, noTuples, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + return nil + } if err != nil { - return fmt.Errorf("Error updating WAF Regional Regex Match Set: %s", err) + return fmt.Errorf("Failed updating WAF Regional Regex Match Set(%s): %s", d.Id(), err) } } @@ -153,6 +163,11 @@ func resourceAwsWafRegionalRegexMatchSetDelete(d *schema.ResourceData, meta inte log.Printf("[INFO] Deleting WAF Regional Regex Match Set: %s", req) return conn.DeleteRegexMatchSet(req) }) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF Regional Regex Match Set (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { return fmt.Errorf("Failed deleting WAF Regional Regex Match Set: %s", err) } @@ -171,9 +186,6 @@ func updateRegexMatchSetResourceWR(id string, oldT, newT []interface{}, conn *wa return conn.UpdateRegexMatchSet(req) }) - if err != nil { - return fmt.Errorf("Failed updating WAF Regional Regex Match Set: %s", err) - } - return nil + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_regex_pattern_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_regex_pattern_set.go index 6f311b32585..29a0f15fc81 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_regex_pattern_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_regex_pattern_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalRegexPatternSet() *schema.Resource { @@ -16,6 +16,9 @@ func resourceAwsWafRegionalRegexPatternSet() *schema.Resource { Read: resourceAwsWafRegionalRegexPatternSetRead, Update: resourceAwsWafRegionalRegexPatternSetUpdate, Delete: resourceAwsWafRegionalRegexPatternSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -51,7 +54,7 @@ func resourceAwsWafRegionalRegexPatternSetCreate(d *schema.ResourceData, meta in } resp := out.(*waf.CreateRegexPatternSetOutput) - d.SetId(*resp.RegexPatternSet.RegexPatternSetId) + d.SetId(aws.StringValue(resp.RegexPatternSet.RegexPatternSetId)) return resourceAwsWafRegionalRegexPatternSetUpdate(d, meta) } @@ -72,7 +75,7 @@ func resourceAwsWafRegionalRegexPatternSetRead(d *schema.ResourceData, meta inte return nil } - return err + return fmt.Errorf("Error getting WAF Regional Regex Pattern Set (%s): %s", d.Id(), err) } d.Set("name", resp.RegexPatternSet.Name) @@ -91,8 +94,13 @@ func resourceAwsWafRegionalRegexPatternSetUpdate(d *schema.ResourceData, meta in o, n := d.GetChange("regex_pattern_strings") oldPatterns, newPatterns := o.(*schema.Set).List(), n.(*schema.Set).List() err := updateWafRegionalRegexPatternSetPatternStringsWR(d.Id(), oldPatterns, newPatterns, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF Regional Rate Based Rule (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - return fmt.Errorf("Failed updating WAF Regional Regex Pattern Set: %s", err) + return fmt.Errorf("Failed updating WAF Regional Regex Pattern Set(%s): %s", d.Id(), err) } } @@ -107,8 +115,11 @@ func resourceAwsWafRegionalRegexPatternSetDelete(d *schema.ResourceData, meta in if len(oldPatterns) > 0 { noPatterns := []interface{}{} err := updateWafRegionalRegexPatternSetPatternStringsWR(d.Id(), oldPatterns, noPatterns, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + return nil + } if err != nil { - return fmt.Errorf("Error updating WAF Regional Regex Pattern Set: %s", err) + return fmt.Errorf("Failed updating WAF Regional Regex Pattern Set(%s): %s", d.Id(), err) } } @@ -139,9 +150,6 @@ func updateWafRegionalRegexPatternSetPatternStringsWR(id string, oldPatterns, ne return conn.UpdateRegexPatternSet(req) }) - if err != nil { - return fmt.Errorf("Failed updating WAF Regional Regex Pattern Set: %s", err) - } - return nil + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rule.go index 25cc935a869..7b139c555f4 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rule.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsWafRegionalRule() *schema.Resource { @@ -18,6 +18,9 @@ func resourceAwsWafRegionalRule() *schema.Resource { Read: resourceAwsWafRegionalRuleRead, Update: resourceAwsWafRegionalRuleUpdate, Delete: resourceAwsWafRegionalRuleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rule_group.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rule_group.go index e82e09798fc..5d01c1204dd 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rule_group.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_rule_group.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalRuleGroup() *schema.Resource { @@ -16,6 +16,9 @@ func resourceAwsWafRegionalRuleGroup() *schema.Resource { Read: resourceAwsWafRegionalRuleGroupRead, Update: resourceAwsWafRegionalRuleGroupUpdate, Delete: resourceAwsWafRegionalRuleGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_size_constraint_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_size_constraint_set.go index 904547d1817..3d97749a328 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_size_constraint_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_size_constraint_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalSizeConstraintSet() *schema.Resource { @@ -16,6 +16,9 @@ func resourceAwsWafRegionalSizeConstraintSet() *schema.Resource { Read: resourceAwsWafRegionalSizeConstraintSetRead, Update: resourceAwsWafRegionalSizeConstraintSetUpdate, Delete: resourceAwsWafRegionalSizeConstraintSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: wafSizeConstraintSetSchema(), } @@ -43,7 +46,7 @@ func resourceAwsWafRegionalSizeConstraintSetCreate(d *schema.ResourceData, meta } resp := out.(*waf.CreateSizeConstraintSetOutput) - d.SetId(*resp.SizeConstraintSet.SizeConstraintSetId) + d.SetId(aws.StringValue(resp.SizeConstraintSet.SizeConstraintSetId)) return resourceAwsWafRegionalSizeConstraintSetUpdate(d, meta) } @@ -57,13 +60,13 @@ func resourceAwsWafRegionalSizeConstraintSetRead(d *schema.ResourceData, meta in } resp, err := conn.GetSizeConstraintSet(params) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF Regional SizeConstraintSet (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { - log.Printf("[WARN] WAF Regional SizeConstraintSet (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - return err + return fmt.Errorf("Error getting WAF Regional Size Constraint Set (%s): %s", d.Id(), err) } d.Set("name", resp.SizeConstraintSet.Name) @@ -79,8 +82,14 @@ func resourceAwsWafRegionalSizeConstraintSetUpdate(d *schema.ResourceData, meta o, n := d.GetChange("size_constraints") oldConstraints, newConstraints := o.(*schema.Set).List(), n.(*schema.Set).List() - if err := updateRegionalSizeConstraintSetResource(d.Id(), oldConstraints, newConstraints, client.wafregionalconn, client.region); err != nil { - return fmt.Errorf("Error updating WAF Regional SizeConstraintSet: %s", err) + err := updateRegionalSizeConstraintSetResource(d.Id(), oldConstraints, newConstraints, client.wafregionalconn, client.region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] WAF Regional SizeConstraintSet (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error updating WAF Regional SizeConstraintSet(%s): %s", d.Id(), err) } } @@ -95,8 +104,12 @@ func resourceAwsWafRegionalSizeConstraintSetDelete(d *schema.ResourceData, meta if len(oldConstraints) > 0 { noConstraints := []interface{}{} - if err := updateRegionalSizeConstraintSetResource(d.Id(), oldConstraints, noConstraints, conn, region); err != nil { - return fmt.Errorf("Error deleting WAF Regional SizeConstraintSet: %s", err) + err := updateRegionalSizeConstraintSetResource(d.Id(), oldConstraints, noConstraints, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + return nil + } + if err != nil { + return fmt.Errorf("Error deleting WAF Regional SizeConstraintSet(%s): %s", d.Id(), err) } } @@ -108,6 +121,9 @@ func resourceAwsWafRegionalSizeConstraintSetDelete(d *schema.ResourceData, meta } return conn.DeleteSizeConstraintSet(req) }) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + return nil + } if err != nil { return fmt.Errorf("Error deleting WAF Regional SizeConstraintSet: %s", err) } @@ -127,9 +143,6 @@ func updateRegionalSizeConstraintSetResource(id string, oldConstraints, newConst log.Printf("[INFO] Updating WAF Regional SizeConstraintSet: %s", req) return conn.UpdateSizeConstraintSet(req) }) - if err != nil { - return fmt.Errorf("Error updating WAF Regional SizeConstraintSet: %s", err) - } - return nil + return err } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_sql_injection_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_sql_injection_match_set.go index 43e4554e75a..c5f7369b459 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_sql_injection_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_sql_injection_match_set.go @@ -9,8 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalSqlInjectionMatchSet() *schema.Resource { @@ -19,6 +19,9 @@ func resourceAwsWafRegionalSqlInjectionMatchSet() *schema.Resource { Read: resourceAwsWafRegionalSqlInjectionMatchSetRead, Update: resourceAwsWafRegionalSqlInjectionMatchSetUpdate, Delete: resourceAwsWafRegionalSqlInjectionMatchSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -83,7 +86,7 @@ func resourceAwsWafRegionalSqlInjectionMatchSetCreate(d *schema.ResourceData, me return fmt.Errorf("Failed creating Regional WAF SQL Injection Match Set: %s", err) } resp := out.(*waf.CreateSqlInjectionMatchSetOutput) - d.SetId(*resp.SqlInjectionMatchSet.SqlInjectionMatchSetId) + d.SetId(aws.StringValue(resp.SqlInjectionMatchSet.SqlInjectionMatchSetId)) return resourceAwsWafRegionalSqlInjectionMatchSetUpdate(d, meta) } @@ -96,14 +99,13 @@ func resourceAwsWafRegionalSqlInjectionMatchSetRead(d *schema.ResourceData, meta } resp, err := conn.GetSqlInjectionMatchSet(params) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] Regional WAF SQL Injection Match Set (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } if err != nil { - if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { - log.Printf("[WARN] Regional WAF SQL Injection Match Set (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err + return fmt.Errorf("Error getting Regional WAF SQL Injection Match Set (%s):%s", d.Id(), err) } d.Set("name", resp.SqlInjectionMatchSet.Name) @@ -121,8 +123,13 @@ func resourceAwsWafRegionalSqlInjectionMatchSetUpdate(d *schema.ResourceData, me oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() err := updateSqlInjectionMatchSetResourceWR(d.Id(), oldT, newT, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + log.Printf("[WARN] Regional WAF SQL Injection Match Set (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } if err != nil { - return fmt.Errorf("Error updating Regional WAF SQL Injection Match Set: %s", err) + return fmt.Errorf("Error updating Regional WAF SQL Injection Match Set (%s): %s", d.Id(), err) } } @@ -138,8 +145,11 @@ func resourceAwsWafRegionalSqlInjectionMatchSetDelete(d *schema.ResourceData, me if len(oldTuples) > 0 { noTuples := []interface{}{} err := updateSqlInjectionMatchSetResourceWR(d.Id(), oldTuples, noTuples, conn, region) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + return nil + } if err != nil { - return fmt.Errorf("Error deleting Regional WAF SQL Injection Match Set: %s", err) + return fmt.Errorf("Error updating Regional WAF SQL Injection Match Set (%s): %s", d.Id(), err) } } @@ -152,8 +162,11 @@ func resourceAwsWafRegionalSqlInjectionMatchSetDelete(d *schema.ResourceData, me return conn.DeleteSqlInjectionMatchSet(req) }) + if isAWSErr(err, wafregional.ErrCodeWAFNonexistentItemException, "") { + return nil + } if err != nil { - return fmt.Errorf("Failed deleting Regional WAF SQL Injection Match Set: %s", err) + return fmt.Errorf("Failed deleting Regional WAF SQL Injection Match Set (%s): %s", d.Id(), err) } return nil @@ -171,11 +184,8 @@ func updateSqlInjectionMatchSetResourceWR(id string, oldT, newT []interface{}, c log.Printf("[INFO] Updating Regional WAF SQL Injection Match Set: %s", req) return conn.UpdateSqlInjectionMatchSet(req) }) - if err != nil { - return fmt.Errorf("Failed updating Regional WAF SQL Injection Match Set: %s", err) - } - return nil + return err } func diffWafSqlInjectionMatchTuplesWR(oldT, newT []interface{}) []*waf.SqlInjectionMatchSetUpdate { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_web_acl.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_web_acl.go index bde2f6810ea..b23f26fa69a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_web_acl.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_web_acl.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsWafRegionalWebAcl() *schema.Resource { @@ -18,6 +18,9 @@ func resourceAwsWafRegionalWebAcl() *schema.Resource { Read: resourceAwsWafRegionalWebAclRead, Update: resourceAwsWafRegionalWebAclUpdate, Delete: resourceAwsWafRegionalWebAclDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "arn": { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_web_acl_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_web_acl_association.go index 9484014904d..f69d90682a6 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_web_acl_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_web_acl_association.go @@ -8,8 +8,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalWebAclAssociation() *schema.Resource { @@ -59,8 +59,11 @@ func resourceAwsWafRegionalWebAclAssociationCreate(d *schema.ResourceData, meta } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.AssociateWebACL(params) + } if err != nil { - return err + return fmt.Errorf("Error creating WAF Regional Web ACL association: %s", err) } // Store association id @@ -91,7 +94,9 @@ func resourceAwsWafRegionalWebAclAssociationRead(d *schema.ResourceData, meta in } if output == nil || output.WebACLSummary == nil { - return fmt.Errorf("error getting WAF Regional Web ACL for resource (%s): empty response", resourceArn) + log.Printf("[WARN] WAF Regional Web ACL for resource (%s) not found, removing from state", resourceArn) + d.SetId("") + return nil } d.Set("resource_arn", resourceArn) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_xss_match_set.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_xss_match_set.go index dbbc8f942a7..bd7db2ad49f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_xss_match_set.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_xss_match_set.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceAwsWafRegionalXssMatchSet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_worklink_fleet.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_worklink_fleet.go index db6f7e5c7ca..bff5272803f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_worklink_fleet.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_worklink_fleet.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/worklink" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsWorkLinkFleet() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_worklink_website_certificate_authority_association.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_worklink_website_certificate_authority_association.go index b02cc030dea..842df68cd19 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_worklink_website_certificate_authority_association.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_worklink_website_certificate_authority_association.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/worklink" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsWorkLinkWebsiteCertificateAuthorityAssociation() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_xray_sampling_rule.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_xray_sampling_rule.go index 94f297ba472..81ba29acde1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_xray_sampling_rule.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_xray_sampling_rule.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/xray" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsXraySamplingRule() *schema.Resource { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/s3_tags.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/s3_tags.go index 42fc6b8c0e3..5e8376e84e1 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/s3_tags.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/s3_tags.go @@ -1,50 +1,90 @@ package aws import ( + "fmt" "log" "regexp" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) +// return a slice of s3 tags associated with the given s3 bucket. Essentially +// s3.GetBucketTagging, except returns an empty slice instead of an error when +// there are no tags. +func getTagSetS3Bucket(conn *s3.S3, bucket string) ([]*s3.Tag, error) { + input := &s3.GetBucketTaggingInput{ + Bucket: aws.String(bucket), + } + + // Retry due to S3 eventual consistency + outputRaw, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.GetBucketTagging(input) + }) + + // S3 API Reference (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) + // lists the special error as NoSuchTagSetError, however the existing logic used NoSuchTagSet + // and the AWS Go SDK has neither as a constant. + if isAWSErr(err, "NoSuchTagSet", "") { + return nil, nil + } + + if err != nil { + return nil, err + } + + var tagSet []*s3.Tag + if output, ok := outputRaw.(*s3.GetBucketTaggingOutput); ok { + tagSet = output.TagSet + } + + return tagSet, nil +} + // setTags is a helper to set the tags for a resource. It expects the // tags field to be named "tags" -func setTagsS3(conn *s3.S3, d *schema.ResourceData) error { +func setTagsS3Bucket(conn *s3.S3, d *schema.ResourceData) error { if d.HasChange("tags") { oraw, nraw := d.GetChange("tags") o := oraw.(map[string]interface{}) n := nraw.(map[string]interface{}) - create, remove := diffTagsS3(tagsFromMapS3(o), tagsFromMapS3(n)) - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - _, err := RetryOnAwsCodes([]string{"NoSuchBucket", "OperationAborted"}, func() (interface{}, error) { - return conn.DeleteBucketTagging(&s3.DeleteBucketTaggingInput{ - Bucket: aws.String(d.Get("bucket").(string)), - }) - }) - if err != nil { - return err + // Get any existing system tags. + var sysTagSet []*s3.Tag + oTagSet, err := getTagSetS3Bucket(conn, d.Get("bucket").(string)) + if err != nil { + return fmt.Errorf("error getting S3 bucket tags: %s", err) + } + for _, tag := range oTagSet { + if tagIgnoredS3(tag) { + sysTagSet = append(sysTagSet, tag) } } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) + + if len(n)+len(sysTagSet) > 0 { + // The bucket's tag set must include any system tags that Terraform ignores. + nTagSet := append(tagsFromMapS3(n), sysTagSet...) + req := &s3.PutBucketTaggingInput{ Bucket: aws.String(d.Get("bucket").(string)), Tagging: &s3.Tagging{ - TagSet: create, + TagSet: nTagSet, }, } - - _, err := RetryOnAwsCodes([]string{"NoSuchBucket", "OperationAborted"}, func() (interface{}, error) { + if _, err := RetryOnAwsCodes([]string{"NoSuchBucket", "OperationAborted"}, func() (interface{}, error) { return conn.PutBucketTagging(req) - }) - if err != nil { - return err + }); err != nil { + return fmt.Errorf("error setting S3 bucket tags: %s", err) + } + } else if len(o) > 0 && len(sysTagSet) == 0 { + req := &s3.DeleteBucketTaggingInput{ + Bucket: aws.String(d.Get("bucket").(string)), + } + if _, err := RetryOnAwsCodes([]string{"NoSuchBucket", "OperationAborted"}, func() (interface{}, error) { + return conn.DeleteBucketTagging(req) + }); err != nil { + return fmt.Errorf("error deleting S3 bucket tags: %s", err) } } } @@ -78,23 +118,21 @@ func setTagsS3Object(conn *s3.S3, d *schema.ResourceData) error { // Set tags if len(o) > 0 { - _, err := conn.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{ + if _, err := conn.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{ Bucket: aws.String(d.Get("bucket").(string)), Key: aws.String(d.Get("key").(string)), - }) - if err != nil { + }); err != nil { return err } } if len(n) > 0 { - _, err := conn.PutObjectTagging(&s3.PutObjectTaggingInput{ + if _, err := conn.PutObjectTagging(&s3.PutObjectTaggingInput{ Bucket: aws.String(d.Get("bucket").(string)), Key: aws.String(d.Get("key").(string)), Tagging: &s3.Tagging{ TagSet: tagsFromMapS3(n), }, - }) - if err != nil { + }); err != nil { return err } } @@ -103,29 +141,6 @@ func setTagsS3Object(conn *s3.S3, d *schema.ResourceData) error { return nil } -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsS3(oldTags, newTags []*s3.Tag) ([]*s3.Tag, []*s3.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*s3.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapS3(create), remove -} - // tagsFromMap returns the tags for the given map of data. func tagsFromMapS3(m map[string]interface{}) []*s3.Tag { result := make([]*s3.Tag, 0, len(m)) @@ -154,25 +169,6 @@ func tagsToMapS3(ts []*s3.Tag) map[string]string { return result } -// return a slice of s3 tags associated with the given s3 bucket. Essentially -// s3.GetBucketTagging, except returns an empty slice instead of an error when -// there are no tags. -func getTagSetS3(s3conn *s3.S3, bucket string) ([]*s3.Tag, error) { - request := &s3.GetBucketTaggingInput{ - Bucket: aws.String(bucket), - } - - response, err := s3conn.GetBucketTagging(request) - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NoSuchTagSet" { - // There is no tag set associated with the bucket. - return []*s3.Tag{}, nil - } else if err != nil { - return nil, err - } - - return response.TagSet, nil -} - // compare a tag against a list of strings and checks if it should // be ignored or not func tagIgnoredS3(t *s3.Tag) bool { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/structure.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/structure.go index 159c077eeac..69c32f9c069 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/structure.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/structure.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" "github.com/aws/aws-sdk-go/service/apigateway" "github.com/aws/aws-sdk-go/service/appmesh" + "github.com/aws/aws-sdk-go/service/appsync" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" @@ -45,8 +46,8 @@ import ( "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/worklink" "github.com/beevik/etree" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" "github.com/mitchellh/copystructure" "gopkg.in/yaml.v2" ) @@ -1202,14 +1203,40 @@ func expandESClusterConfig(m map[string]interface{}) *elasticsearch.Elasticsearc } if v, ok := m["zone_awareness_enabled"]; ok { - config.ZoneAwarenessEnabled = aws.Bool(v.(bool)) + isEnabled := v.(bool) + config.ZoneAwarenessEnabled = aws.Bool(isEnabled) + + if isEnabled { + if v, ok := m["zone_awareness_config"]; ok { + config.ZoneAwarenessConfig = expandElasticsearchZoneAwarenessConfig(v.([]interface{})) + } + } } return &config } +func expandElasticsearchZoneAwarenessConfig(l []interface{}) *elasticsearch.ZoneAwarenessConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + zoneAwarenessConfig := &elasticsearch.ZoneAwarenessConfig{} + + if v, ok := m["availability_zone_count"]; ok && v.(int) > 0 { + zoneAwarenessConfig.AvailabilityZoneCount = aws.Int64(int64(v.(int))) + } + + return zoneAwarenessConfig +} + func flattenESClusterConfig(c *elasticsearch.ElasticsearchClusterConfig) []map[string]interface{} { - m := map[string]interface{}{} + m := map[string]interface{}{ + "zone_awareness_config": flattenElasticsearchZoneAwarenessConfig(c.ZoneAwarenessConfig), + "zone_awareness_enabled": aws.BoolValue(c.ZoneAwarenessEnabled), + } if c.DedicatedMasterCount != nil { m["dedicated_master_count"] = *c.DedicatedMasterCount @@ -1226,13 +1253,22 @@ func flattenESClusterConfig(c *elasticsearch.ElasticsearchClusterConfig) []map[s if c.InstanceType != nil { m["instance_type"] = *c.InstanceType } - if c.ZoneAwarenessEnabled != nil { - m["zone_awareness_enabled"] = *c.ZoneAwarenessEnabled - } return []map[string]interface{}{m} } +func flattenElasticsearchZoneAwarenessConfig(zoneAwarenessConfig *elasticsearch.ZoneAwarenessConfig) []interface{} { + if zoneAwarenessConfig == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "availability_zone_count": aws.Int64Value(zoneAwarenessConfig.AvailabilityZoneCount), + } + + return []interface{}{m} +} + func expandESCognitoOptions(c []interface{}) *elasticsearch.CognitoOptions { options := &elasticsearch.CognitoOptions{ Enabled: aws.Bool(false), @@ -1586,25 +1622,6 @@ func flattenAllCloudFormationParameters(cfParams []*cloudformation.Parameter) ma return params } -func expandCloudFormationTags(tags map[string]interface{}) []*cloudformation.Tag { - var cfTags []*cloudformation.Tag - for k, v := range tags { - cfTags = append(cfTags, &cloudformation.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - return cfTags -} - -func flattenCloudFormationTags(cfTags []*cloudformation.Tag) map[string]string { - tags := make(map[string]string, len(cfTags)) - for _, t := range cfTags { - tags[*t.Key] = *t.Value - } - return tags -} - func flattenCloudFormationOutputs(cfOutputs []*cloudformation.Output) map[string]string { outputs := make(map[string]string, len(cfOutputs)) for _, o := range cfOutputs { @@ -2464,6 +2481,10 @@ func flattenCognitoUserPoolEmailConfiguration(s *cognitoidentityprovider.EmailCo m["source_arn"] = *s.SourceArn } + if s.EmailSendingAccount != nil { + m["email_sending_account"] = *s.EmailSendingAccount + } + if len(m) > 0 { return []map[string]interface{}{m} } @@ -4467,16 +4488,6 @@ func expandDynamoDbEncryptAtRestOptions(m map[string]interface{}) *dynamodb.SSES return &options } -func flattenVpcEndpointServiceAllowedPrincipals(allowedPrincipals []*ec2.AllowedPrincipal) []string { - result := make([]string, 0, len(allowedPrincipals)) - for _, allowedPrincipal := range allowedPrincipals { - if allowedPrincipal.Principal != nil { - result = append(result, *allowedPrincipal.Principal) - } - } - return result -} - func expandDynamoDbTableItemAttributes(input string) (map[string]*dynamodb.AttributeValue, error) { var attributes map[string]*dynamodb.AttributeValue @@ -4607,37 +4618,40 @@ func flattenLaunchTemplateSpecification(lt *autoscaling.LaunchTemplateSpecificat return result } -func flattenVpcPeeringConnectionOptions(options *ec2.VpcPeeringConnectionOptionsDescription) []map[string]interface{} { - m := map[string]interface{}{} - - if options.AllowDnsResolutionFromRemoteVpc != nil { - m["allow_remote_vpc_dns_resolution"] = *options.AllowDnsResolutionFromRemoteVpc +func flattenVpcPeeringConnectionOptions(options *ec2.VpcPeeringConnectionOptionsDescription) []interface{} { + // When the VPC Peering Connection is pending acceptance, + // the details about accepter and/or requester peering + // options would not be included in the response. + if options == nil { + return []interface{}{} } - if options.AllowEgressFromLocalClassicLinkToRemoteVpc != nil { - m["allow_classic_link_to_remote_vpc"] = *options.AllowEgressFromLocalClassicLinkToRemoteVpc - } + return []interface{}{map[string]interface{}{ + "allow_remote_vpc_dns_resolution": aws.BoolValue(options.AllowDnsResolutionFromRemoteVpc), + "allow_classic_link_to_remote_vpc": aws.BoolValue(options.AllowEgressFromLocalClassicLinkToRemoteVpc), + "allow_vpc_to_remote_classic_link": aws.BoolValue(options.AllowEgressFromLocalVpcToRemoteClassicLink), + }} +} - if options.AllowEgressFromLocalVpcToRemoteClassicLink != nil { - m["allow_vpc_to_remote_classic_link"] = *options.AllowEgressFromLocalVpcToRemoteClassicLink +func expandVpcPeeringConnectionOptions(vOptions []interface{}, crossRegionPeering bool) *ec2.PeeringConnectionOptionsRequest { + if len(vOptions) == 0 || vOptions[0] == nil { + return nil } - return []map[string]interface{}{m} -} + mOptions := vOptions[0].(map[string]interface{}) -func expandVpcPeeringConnectionOptions(m map[string]interface{}) *ec2.PeeringConnectionOptionsRequest { options := &ec2.PeeringConnectionOptionsRequest{} - if v, ok := m["allow_remote_vpc_dns_resolution"]; ok { - options.AllowDnsResolutionFromRemoteVpc = aws.Bool(v.(bool)) - } - - if v, ok := m["allow_classic_link_to_remote_vpc"]; ok { - options.AllowEgressFromLocalClassicLinkToRemoteVpc = aws.Bool(v.(bool)) + if v, ok := mOptions["allow_remote_vpc_dns_resolution"].(bool); ok { + options.AllowDnsResolutionFromRemoteVpc = aws.Bool(v) } - - if v, ok := m["allow_vpc_to_remote_classic_link"]; ok { - options.AllowEgressFromLocalVpcToRemoteClassicLink = aws.Bool(v.(bool)) + if !crossRegionPeering { + if v, ok := mOptions["allow_classic_link_to_remote_vpc"].(bool); ok { + options.AllowEgressFromLocalClassicLinkToRemoteVpc = aws.Bool(v) + } + if v, ok := mOptions["allow_vpc_to_remote_classic_link"].(bool); ok { + options.AllowEgressFromLocalVpcToRemoteClassicLink = aws.Bool(v) + } } return options @@ -4775,6 +4789,10 @@ func expandRdsScalingConfiguration(l []interface{}) *rds.ScalingConfiguration { SecondsUntilAutoPause: aws.Int64(int64(m["seconds_until_auto_pause"].(int))), } + if vTimeoutAction, ok := m["timeout_action"].(string); ok && vTimeoutAction != "" { + scalingConfiguration.TimeoutAction = aws.String(vTimeoutAction) + } + return scalingConfiguration } @@ -4788,6 +4806,7 @@ func flattenRdsScalingConfigurationInfo(scalingConfigurationInfo *rds.ScalingCon "max_capacity": aws.Int64Value(scalingConfigurationInfo.MaxCapacity), "min_capacity": aws.Int64Value(scalingConfigurationInfo.MinCapacity), "seconds_until_auto_pause": aws.Int64Value(scalingConfigurationInfo.SecondsUntilAutoPause), + "timeout_action": aws.StringValue(scalingConfigurationInfo.TimeoutAction), } return []interface{}{m} @@ -5012,16 +5031,41 @@ func expandAppmeshVirtualNodeSpec(vSpec []interface{}) *appmesh.VirtualNodeSpec } if vServiceDiscovery, ok := mSpec["service_discovery"].([]interface{}); ok && len(vServiceDiscovery) > 0 && vServiceDiscovery[0] != nil { + spec.ServiceDiscovery = &appmesh.ServiceDiscovery{} + mServiceDiscovery := vServiceDiscovery[0].(map[string]interface{}) + if vAwsCloudMap, ok := mServiceDiscovery["aws_cloud_map"].([]interface{}); ok && len(vAwsCloudMap) > 0 && vAwsCloudMap[0] != nil { + spec.ServiceDiscovery.AwsCloudMap = &appmesh.AwsCloudMapServiceDiscovery{} + + mAwsCloudMap := vAwsCloudMap[0].(map[string]interface{}) + + if vAttributes, ok := mAwsCloudMap["attributes"].(map[string]interface{}); ok && len(vAttributes) > 0 { + attributes := []*appmesh.AwsCloudMapInstanceAttribute{} + + for k, v := range vAttributes { + attributes = append(attributes, &appmesh.AwsCloudMapInstanceAttribute{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + + spec.ServiceDiscovery.AwsCloudMap.Attributes = attributes + } + if vNamespaceName, ok := mAwsCloudMap["namespace_name"].(string); ok && vNamespaceName != "" { + spec.ServiceDiscovery.AwsCloudMap.NamespaceName = aws.String(vNamespaceName) + } + if vServiceName, ok := mAwsCloudMap["service_name"].(string); ok && vServiceName != "" { + spec.ServiceDiscovery.AwsCloudMap.ServiceName = aws.String(vServiceName) + } + } + if vDns, ok := mServiceDiscovery["dns"].([]interface{}); ok && len(vDns) > 0 && vDns[0] != nil { mDns := vDns[0].(map[string]interface{}) if vHostname, ok := mDns["hostname"].(string); ok && vHostname != "" { - spec.ServiceDiscovery = &appmesh.ServiceDiscovery{ - Dns: &appmesh.DnsServiceDiscovery{ - Hostname: aws.String(vHostname), - }, + spec.ServiceDiscovery.Dns = &appmesh.DnsServiceDiscovery{ + Hostname: aws.String(vHostname), } } } @@ -5105,16 +5149,34 @@ func flattenAppmeshVirtualNodeSpec(spec *appmesh.VirtualNodeSpec) []interface{} } } - if spec.ServiceDiscovery != nil && spec.ServiceDiscovery.Dns != nil { - mSpec["service_discovery"] = []interface{}{ - map[string]interface{}{ - "dns": []interface{}{ - map[string]interface{}{ - "hostname": aws.StringValue(spec.ServiceDiscovery.Dns.Hostname), - }, + if spec.ServiceDiscovery != nil { + mServiceDiscovery := map[string]interface{}{} + + if spec.ServiceDiscovery.AwsCloudMap != nil { + vAttributes := map[string]interface{}{} + + for _, attribute := range spec.ServiceDiscovery.AwsCloudMap.Attributes { + vAttributes[aws.StringValue(attribute.Key)] = aws.StringValue(attribute.Value) + } + + mServiceDiscovery["aws_cloud_map"] = []interface{}{ + map[string]interface{}{ + "attributes": vAttributes, + "namespace_name": aws.StringValue(spec.ServiceDiscovery.AwsCloudMap.NamespaceName), + "service_name": aws.StringValue(spec.ServiceDiscovery.AwsCloudMap.ServiceName), }, - }, + } } + + if spec.ServiceDiscovery.Dns != nil { + mServiceDiscovery["dns"] = []interface{}{ + map[string]interface{}{ + "hostname": aws.StringValue(spec.ServiceDiscovery.Dns.Hostname), + }, + } + } + + mSpec["service_discovery"] = []interface{}{mServiceDiscovery} } return []interface{}{mSpec} @@ -5346,6 +5408,22 @@ func flattenAppmeshRouteSpec(spec *appmesh.RouteSpec) []interface{} { return []interface{}{mSpec} } +func flattenAppsyncPipelineConfig(c *appsync.PipelineConfig) []interface{} { + if c == nil { + return nil + } + + if len(c.Functions) == 0 { + return nil + } + + m := map[string]interface{}{ + "functions": flattenStringList(c.Functions), + } + + return []interface{}{m} +} + func expandRoute53ResolverEndpointIpAddresses(vIpAddresses *schema.Set) []*route53resolver.IpAddressRequest { ipAddressRequests := []*route53resolver.IpAddressRequest{} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags.go index 7a0a6a5d6e4..b5e7359a116 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags.go @@ -9,12 +9,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) // tagsSchema returns the schema to use for tags. @@ -369,102 +369,6 @@ func tagIgnoredELBv2(t *elbv2.Tag) bool { return false } -// tagsToMapDynamoDb turns the list of tags into a map for dynamoDB -func tagsToMapDynamoDb(ts []*dynamodb.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - result[*t.Key] = *t.Value - } - return result -} - -// tagsFromMapDynamoDb returns the tags for a given map -func tagsFromMapDynamoDb(m map[string]interface{}) []*dynamodb.Tag { - result := make([]*dynamodb.Tag, 0, len(m)) - for k, v := range m { - t := &dynamodb.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - result = append(result, t) - } - return result -} - -// setTagsDynamoDb is a helper to set the tags for a dynamoDB resource -// This is needed because dynamodb requires a completely different set and delete -// method from the ec2 tag resource handling. Also the `UntagResource` method -// for dynamoDB only requires a list of tag keys, instead of the full map of keys. -func setTagsDynamoDb(conn *dynamodb.DynamoDB, d *schema.ResourceData) error { - arn := d.Get("arn").(string) - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsDynamoDb(tagsFromMapDynamoDb(o), tagsFromMapDynamoDb(n)) - - // Set tags - if len(remove) > 0 { - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) - _, err := conn.UntagResource(&dynamodb.UntagResourceInput{ - ResourceArn: aws.String(arn), - TagKeys: remove, - }) - if err != nil { - if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "") { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - if len(create) > 0 { - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) - _, err := conn.TagResource(&dynamodb.TagResourceInput{ - ResourceArn: aws.String(arn), - Tags: create, - }) - if err != nil { - if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "") { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - - return nil -} - -// diffTagsDynamoDb takes a local set of dynamodb tags and the ones found remotely -// and returns the set of tags that must be created as a map, and returns a list of tag keys -// that must be destroyed. -func diffTagsDynamoDb(oldTags, newTags []*dynamodb.Tag) ([]*dynamodb.Tag, []*string) { - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - var remove []*string - for _, t := range oldTags { - // Verify the old tag is not a tag we're currently attempting to create - old, ok := create[*t.Key] - if !ok || old != *t.Value { - remove = append(remove, t.Key) - } - } - return tagsFromMapDynamoDb(create), remove -} - // tagsMapToHash returns a stable hash value for a raw tags map. // The returned value map be negative (i.e. not suitable for a 'Set' function). func tagsMapToHash(tags map[string]interface{}) int { @@ -494,7 +398,7 @@ func ec2TagSpecificationsFromMap(m map[string]interface{}, t string) []*ec2.TagS return []*ec2.TagSpecification{ { ResourceType: aws.String(t), - Tags: tagsFromMap(m), + Tags: keyvaluetags.New(m).IgnoreAws().Ec2Tags(), }, } } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsACM.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsACM.go deleted file mode 100644 index 7f38ff3a432..00000000000 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsACM.go +++ /dev/null @@ -1,88 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/acm" - "github.com/hashicorp/terraform/helper/schema" -) - -func setTagsACM(conn *acm.ACM, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsACM(tagsFromMapACM(o), tagsFromMapACM(n)) - - // Set tags - if len(remove) > 0 { - input := acm.RemoveTagsFromCertificateInput{ - CertificateArn: aws.String(d.Get("arn").(string)), - Tags: remove, - } - log.Printf("[DEBUG] Removing ACM tags: %s", input) - _, err := conn.RemoveTagsFromCertificate(&input) - if err != nil { - return err - } - } - if len(create) > 0 { - input := acm.AddTagsToCertificateInput{ - CertificateArn: aws.String(d.Get("arn").(string)), - Tags: create, - } - log.Printf("[DEBUG] Adding ACM tags: %s", input) - _, err := conn.AddTagsToCertificate(&input) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsACM(oldTags, newTags []*acm.Tag) ([]*acm.Tag, []*acm.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*acm.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapACM(create), remove -} - -func tagsFromMapACM(m map[string]interface{}) []*acm.Tag { - result := []*acm.Tag{} - for k, v := range m { - result = append(result, &acm.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - - return result -} - -func tagsToMapACM(ts []*acm.Tag) map[string]string { - result := map[string]string{} - for _, t := range ts { - result[*t.Key] = *t.Value - } - - return result -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsACMPCA.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsACMPCA.go deleted file mode 100644 index f497f3aee5b..00000000000 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsACMPCA.go +++ /dev/null @@ -1,50 +0,0 @@ -package aws - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/acmpca" -) - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsACMPCA(oldTags, newTags []*acmpca.Tag) ([]*acmpca.Tag, []*acmpca.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[aws.StringValue(t.Key)] = aws.StringValue(t.Value) - } - - // Build the list of what to remove - var remove []*acmpca.Tag - for _, t := range oldTags { - old, ok := create[aws.StringValue(t.Key)] - if !ok || old != aws.StringValue(t.Value) { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapACMPCA(create), remove -} - -func tagsFromMapACMPCA(m map[string]interface{}) []*acmpca.Tag { - result := []*acmpca.Tag{} - for k, v := range m { - result = append(result, &acmpca.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - - return result -} - -func tagsToMapACMPCA(ts []*acmpca.Tag) map[string]string { - result := map[string]string{} - for _, t := range ts { - result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) - } - - return result -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsAppmesh.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsAppmesh.go new file mode 100644 index 00000000000..1c759926649 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsAppmesh.go @@ -0,0 +1,131 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/appmesh" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsAppmesh(conn *appmesh.AppMesh, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsAppmesh(tagsFromMapAppmesh(o), tagsFromMapAppmesh(n)) + + // Set tags + if len(remove) > 0 { + input := appmesh.UntagResourceInput{ + ResourceArn: aws.String(arn), + TagKeys: remove, + } + log.Printf("[DEBUG] Removing Appmesh tags: %s", input) + _, err := conn.UntagResource(&input) + if err != nil { + return err + } + } + if len(create) > 0 { + input := appmesh.TagResourceInput{ + ResourceArn: aws.String(arn), + Tags: create, + } + log.Printf("[DEBUG] Adding Appmesh tags: %s", input) + _, err := conn.TagResource(&input) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsAppmesh(oldTags, newTags []*appmesh.TagRef) ([]*appmesh.TagRef, []*string) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + + // Build the list of what to remove + var remove []*string + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { + remove = append(remove, t.Key) + } else if ok { + // already present so remove from new + delete(create, aws.StringValue(t.Key)) + } + } + + return tagsFromMapAppmesh(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapAppmesh(m map[string]interface{}) []*appmesh.TagRef { + var result []*appmesh.TagRef + for k, v := range m { + t := &appmesh.TagRef{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredAppmesh(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapAppmesh(ts []*appmesh.TagRef) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredAppmesh(t) { + result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + } + + return result +} + +func saveTagsAppmesh(conn *appmesh.AppMesh, d *schema.ResourceData, arn string) error { + resp, err := conn.ListTagsForResource(&appmesh.ListTagsForResourceInput{ + ResourceArn: aws.String(arn), + }) + if err != nil { + return err + } + + var dt []*appmesh.TagRef + if len(resp.Tags) > 0 { + dt = resp.Tags + } + + return d.Set("tags", tagsToMapAppmesh(dt)) +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredAppmesh(t *appmesh.TagRef) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, aws.StringValue(t.Key)) + r, _ := regexp.MatchString(v, aws.StringValue(t.Key)) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", aws.StringValue(t.Key), aws.StringValue(t.Value)) + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsAppsync.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsAppsync.go index c0d51f1573d..ba038595c78 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsAppsync.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsAppsync.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func setTagsAppsync(conn *appsync.AppSync, d *schema.ResourceData, arn string) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsBeanstalk.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsBeanstalk.go index 57e26d3d967..cbb78c986ce 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsBeanstalk.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsBeanstalk.go @@ -6,8 +6,47 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) +// saveTagsBeanstalk is a helper to save the tags for a resource. It expects the +// tags field to be named "tags" +func saveTagsBeanstalk(conn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData, arn string) error { + resp, err := conn.ListTagsForResource(&elasticbeanstalk.ListTagsForResourceInput{ + ResourceArn: aws.String(arn), + }) + if err != nil { + return err + } + + if err := d.Set("tags", tagsToMapBeanstalk(resp.ResourceTags)); err != nil { + return err + } + + return nil +} + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsBeanstalk(conn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + add, remove := diffTagsBeanstalk(tagsFromMapBeanstalk(o), tagsFromMapBeanstalk(n)) + + if _, err := conn.UpdateTagsForResource(&elasticbeanstalk.UpdateTagsForResourceInput{ + ResourceArn: aws.String(arn), + TagsToAdd: add, + TagsToRemove: remove, + }); err != nil { + return err + } + } + + return nil +} + // diffTags takes our tags locally and the ones remotely and returns // the set of tags that must be created, and the set of tags that must // be destroyed. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudFront.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudFront.go index d2b60c73cfc..5faa4db5f02 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudFront.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudFront.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func setTagsCloudFront(conn *cloudfront.CloudFront, d *schema.ResourceData, arn string) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudWatch.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudWatch.go index 8443afc9776..465bdf9d60b 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudWatch.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudWatch.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudWatchEvent.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudWatchEvent.go index e7937a4f460..0387786ad1a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudWatchEvent.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudWatchEvent.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" events "github.com/aws/aws-sdk-go/service/cloudwatchevents" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the @@ -105,12 +105,12 @@ func saveTagsCloudWatchEvents(conn *events.CloudWatchEvents, d *schema.ResourceD ResourceARN: aws.String(arn), }) - if err != nil { + if err != nil && !isAWSErr(err, "UnknownOperationException", "") { return fmt.Errorf("Error retreiving tags for %s: %s", arn, err) } var tagList []*events.Tag - if len(resp.Tags) > 0 { + if resp != nil && len(resp.Tags) > 0 { tagList = resp.Tags } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudtrail.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudtrail.go index c0d7657c9e7..4636f71bc09 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudtrail.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudtrail.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudtrail" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodeBuild.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodeBuild.go deleted file mode 100644 index 0a300697239..00000000000 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodeBuild.go +++ /dev/null @@ -1,27 +0,0 @@ -package aws - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/codebuild" -) - -func tagsFromMapCodeBuild(m map[string]interface{}) []*codebuild.Tag { - result := []*codebuild.Tag{} - for k, v := range m { - result = append(result, &codebuild.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - - return result -} - -func tagsToMapCodeBuild(ts []*codebuild.Tag) map[string]string { - result := map[string]string{} - for _, t := range ts { - result[*t.Key] = *t.Value - } - - return result -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodeCommit.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodeCommit.go new file mode 100644 index 00000000000..adc975d8c22 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodeCommit.go @@ -0,0 +1,107 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsCodeCommit(conn *codecommit.CodeCommit, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsCodeCommit(tagsFromMapCodeCommit(o), tagsFromMapCodeCommit(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + keys := make([]*string, 0, len(remove)) + for k := range remove { + keys = append(keys, aws.String(k)) + } + + _, err := conn.UntagResource(&codecommit.UntagResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + TagKeys: keys, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&codecommit.TagResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsCodeCommit(oldTags, newTags map[string]*string) (map[string]*string, map[string]*string) { + // Build the list of what to remove + remove := make(map[string]*string) + for k, v := range oldTags { + newVal, existsInNew := newTags[k] + if !existsInNew || *newVal != *v { + // Delete it! + remove[k] = v + } else if existsInNew { + delete(newTags, k) + } + } + return newTags, remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapCodeCommit(m map[string]interface{}) map[string]*string { + result := make(map[string]*string) + for k, v := range m { + if !tagIgnoredCodeCommit(k, v.(string)) { + result[k] = aws.String(v.(string)) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapCodeCommit(ts map[string]*string) map[string]string { + result := make(map[string]string) + for k, v := range ts { + if !tagIgnoredCodeCommit(k, aws.StringValue(v)) { + result[k] = aws.StringValue(v) + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredCodeCommit(key, value string) bool { + filter := []string{"^aws:"} + for _, ignore := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", ignore, key) + r, _ := regexp.MatchString(ignore, key) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val %s), ignoring.\n", key, value) + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodePipeline.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodePipeline.go new file mode 100644 index 00000000000..36f8fca2a0f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodePipeline.go @@ -0,0 +1,137 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsCodePipeline(conn *codepipeline.CodePipeline, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsCodePipeline(tagsFromMapCodePipeline(o), tagsFromMapCodePipeline(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.UntagResource(&codepipeline.UntagResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&codepipeline.TagResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsCodePipeline(oldTags, newTags []*codepipeline.Tag) ([]*codepipeline.Tag, []*codepipeline.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + + // Build the list of what to remove + var remove []*codepipeline.Tag + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { + // Delete it! + remove = append(remove, t) + } else if ok { + // already present so remove from new + delete(create, aws.StringValue(t.Key)) + } + } + + return tagsFromMapCodePipeline(create), remove +} + +func saveTagsCodePipeline(conn *codepipeline.CodePipeline, d *schema.ResourceData) error { + resp, err := conn.ListTagsForResource(&codepipeline.ListTagsForResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + }) + + if err != nil { + return fmt.Errorf("Error retreiving tags for ARN: %s", d.Get("arn").(string)) + } + + var dt []*codepipeline.Tag + if len(resp.Tags) > 0 { + dt = resp.Tags + } + + return d.Set("tags", tagsToMapCodePipeline(dt)) +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapCodePipeline(m map[string]interface{}) []*codepipeline.Tag { + result := make([]*codepipeline.Tag, 0, len(m)) + for k, v := range m { + t := &codepipeline.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredCodePipeline(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapCodePipeline(ts []*codepipeline.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredCodePipeline(t) { + result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredCodePipeline(t *codepipeline.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, aws.StringValue(t.Key)) + r, _ := regexp.MatchString(v, aws.StringValue(t.Key)) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", aws.StringValue(t.Key), aws.StringValue(t.Value)) + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCognito.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCognito.go new file mode 100644 index 00000000000..ca7db30e5e0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCognito.go @@ -0,0 +1,82 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsCognito(conn *cognitoidentity.CognitoIdentity, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsCognito(tagsFromMapCognito(o), tagsFromMapCognito(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %s", remove) + tagsToRemove := &cognitoidentity.UntagResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + TagKeys: aws.StringSlice(remove), + } + + _, err := conn.UntagResource(tagsToRemove) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %s", create) + tagsToAdd := &cognitoidentity.TagResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + Tags: aws.StringMap(create), + } + _, err := conn.TagResource(tagsToAdd) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsCognito(oldTags, newTags map[string]string) (map[string]string, []string) { + // First, we're creating everything we have + create := make(map[string]string) + for k, v := range newTags { + create[k] = v + } + + // Build the list of what to remove + var remove []string + for k, v := range oldTags { + old, ok := create[k] + if !ok || old != v { + // Delete it! + remove = append(remove, k) + } else if ok { + // already present so remove from new + delete(create, k) + } + } + + return create, remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapCognito(m map[string]interface{}) map[string]string { + results := make(map[string]string) + for k, v := range m { + results[k] = v.(string) + } + + return results +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsConfigService.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsConfigService.go new file mode 100644 index 00000000000..3a74429e8d8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsConfigService.go @@ -0,0 +1,135 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsConfigService(conn *configservice.ConfigService, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsConfigService(tagsFromMapConfigService(o), tagsFromMapConfigService(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %s", remove) + k := make([]*string, len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.UntagResource(&configservice.UntagResourceInput{ + ResourceArn: aws.String(arn), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %s", create) + _, err := conn.TagResource(&configservice.TagResourceInput{ + ResourceArn: aws.String(arn), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsConfigService(oldTags, newTags []*configservice.Tag) ([]*configservice.Tag, []*configservice.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + + // Build the list of what to remove + var remove []*configservice.Tag + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { + // Delete it! + remove = append(remove, t) + } else if ok { + delete(create, aws.StringValue(t.Key)) + } + } + + return tagsFromMapConfigService(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapConfigService(m map[string]interface{}) []*configservice.Tag { + result := make([]*configservice.Tag, 0, len(m)) + for k, v := range m { + t := &configservice.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredConfigService(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapConfigService(ts []*configservice.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredConfigService(t) { + result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredConfigService(t *configservice.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, aws.StringValue(t.Key)) + r, _ := regexp.MatchString(v, aws.StringValue(t.Key)) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", aws.StringValue(t.Key), aws.StringValue(t.Value)) + return true + } + } + return false +} + +func saveTagsConfigService(conn *configservice.ConfigService, d *schema.ResourceData, arn string) error { + resp, err := conn.ListTagsForResource(&configservice.ListTagsForResourceInput{ + ResourceArn: aws.String(arn), + }) + + if err != nil { + return err + } + + var dt []*configservice.Tag + if len(resp.Tags) > 0 { + dt = resp.Tags + } + + return d.Set("tags", tagsToMapConfigService(dt)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDAX.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDAX.go index ef044f9d16b..4cf66a0c571 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDAX.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDAX.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dax" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDS.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDS.go index 0088d139d94..a8e3b948c60 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDS.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDS.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDX.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDX.go index 593762f82d9..63a38e2d595 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDX.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDX.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // getTags is a helper to get the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDocDB.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDocDB.go index 7674225fd3e..62f3424131a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDocDB.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDocDB.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/docdb" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDynamoDb.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDynamoDb.go new file mode 100644 index 00000000000..1d96a05b092 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsDynamoDb.go @@ -0,0 +1,139 @@ +package aws + +import ( + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" and the ARN field to be named "arn". +func setTagsDynamoDb(conn *dynamodb.DynamoDB, d *schema.ResourceData) error { + arn := d.Get("arn").(string) + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsDynamoDb(tagsFromMapDynamoDb(o), tagsFromMapDynamoDb(n)) + + // Set tags + if len(remove) > 0 { + input := &dynamodb.UntagResourceInput{ + ResourceArn: aws.String(arn), + TagKeys: remove, + } + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) + _, err := conn.UntagResource(input) + if err != nil { + if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + if isResourceTimeoutError(err) { + _, err = conn.UntagResource(input) + } + if err != nil { + return err + } + } + if len(create) > 0 { + input := &dynamodb.TagResourceInput{ + ResourceArn: aws.String(arn), + Tags: create, + } + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) + _, err := conn.TagResource(input) + if err != nil { + if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + if isResourceTimeoutError(err) { + _, err = conn.TagResource(input) + } + if err != nil { + return err + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsDynamoDb(oldTags, newTags []*dynamodb.Tag) ([]*dynamodb.Tag, []*string) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + + // Build the list of what to remove + var remove []*string + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { + remove = append(remove, t.Key) + } else if ok { + // already present so remove from new + delete(create, aws.StringValue(t.Key)) + } + } + + return tagsFromMapDynamoDb(create), remove +} + +// tagsFromMapDynamoDb returns the tags for the given map of data. +func tagsFromMapDynamoDb(m map[string]interface{}) []*dynamodb.Tag { + result := make([]*dynamodb.Tag, 0, len(m)) + for k, v := range m { + t := &dynamodb.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredDynamoDb(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapDynamoDb(ts []*dynamodb.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredDynamoDb(t) { + result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + } + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredDynamoDb(t *dynamodb.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + r, _ := regexp.MatchString(v, *t.Key) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEC.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEC.go index 2da2fc3df3c..2e11ea7bd1a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEC.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEC.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEFS.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEFS.go index 281bd9f29e1..09ecb64713d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEFS.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEFS.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsELB.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsELB.go index b45c49a15eb..9986f9bf608 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsELB.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsELB.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLicenseManager.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsFSX.go similarity index 56% rename from pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLicenseManager.go rename to pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsFSX.go index 20f786cfdf7..c04ed349372 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLicenseManager.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsFSX.go @@ -5,29 +5,30 @@ import ( "regexp" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/licensemanager" - "github.com/hashicorp/terraform/helper/schema" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsLicenseManager(conn *licensemanager.LicenseManager, d *schema.ResourceData) error { +// tags field to be named "tags". It also expects to take the resource +// ARN as the primary ID based on the requirements of the FSx API (as +// opposed to the resource ID like other tagging helpers). +func setTagsFSX(conn *fsx.FSx, d *schema.ResourceData) error { if d.HasChange("tags") { oraw, nraw := d.GetChange("tags") o := oraw.(map[string]interface{}) n := nraw.(map[string]interface{}) - create, remove := diffTagsLicenseManager(tagsFromMapLicenseManager(o), tagsFromMapLicenseManager(n)) + create, remove := diffTagsFSX(tagsFromMapFSX(o), tagsFromMapFSX(n)) // Set tags if len(remove) > 0 { log.Printf("[DEBUG] Removing tags: %#v", remove) - k := make([]*string, len(remove)) - for i, t := range remove { - k[i] = t.Key + k := make([]*string, 0, len(remove)) + for _, t := range remove { + k = append(k, t.Key) } - - _, err := conn.UntagResource(&licensemanager.UntagResourceInput{ - ResourceArn: aws.String(d.Id()), + _, err := conn.UntagResource(&fsx.UntagResourceInput{ + ResourceARN: aws.String(d.Get("arn").(string)), TagKeys: k, }) if err != nil { @@ -36,8 +37,8 @@ func setTagsLicenseManager(conn *licensemanager.LicenseManager, d *schema.Resour } if len(create) > 0 { log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.TagResource(&licensemanager.TagResourceInput{ - ResourceArn: aws.String(d.Id()), + _, err := conn.TagResource(&fsx.TagResourceInput{ + ResourceARN: aws.String(d.Get("arn").(string)), Tags: create, }) if err != nil { @@ -52,7 +53,7 @@ func setTagsLicenseManager(conn *licensemanager.LicenseManager, d *schema.Resour // diffTags takes our tags locally and the ones remotely and returns // the set of tags that must be created, and the set of tags that must // be destroyed. -func diffTagsLicenseManager(oldTags, newTags []*licensemanager.Tag) ([]*licensemanager.Tag, []*licensemanager.Tag) { +func diffTagsFSX(oldTags, newTags []*fsx.Tag) ([]*fsx.Tag, []*fsx.Tag) { // First, we're creating everything we have create := make(map[string]interface{}) for _, t := range newTags { @@ -60,27 +61,30 @@ func diffTagsLicenseManager(oldTags, newTags []*licensemanager.Tag) ([]*licensem } // Build the list of what to remove - var remove []*licensemanager.Tag + var remove []*fsx.Tag for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { // Delete it! remove = append(remove, t) + } else if ok { + // already present so remove from new + delete(create, aws.StringValue(t.Key)) } } - return tagsFromMapLicenseManager(create), remove + return tagsFromMapFSX(create), remove } // tagsFromMap returns the tags for the given map of data. -func tagsFromMapLicenseManager(m map[string]interface{}) []*licensemanager.Tag { - result := make([]*licensemanager.Tag, 0, len(m)) +func tagsFromMapFSX(m map[string]interface{}) []*fsx.Tag { + var result []*fsx.Tag for k, v := range m { - t := &licensemanager.Tag{ + t := &fsx.Tag{ Key: aws.String(k), Value: aws.String(v.(string)), } - if !tagIgnoredLicenseManager(t) { + if !tagIgnoredFSX(t) { result = append(result, t) } } @@ -89,10 +93,10 @@ func tagsFromMapLicenseManager(m map[string]interface{}) []*licensemanager.Tag { } // tagsToMap turns the list of tags into a map. -func tagsToMapLicenseManager(ts []*licensemanager.Tag) map[string]string { +func tagsToMapFSX(ts []*fsx.Tag) map[string]string { result := make(map[string]string) for _, t := range ts { - if !tagIgnoredLicenseManager(t) { + if !tagIgnoredFSX(t) { result[*t.Key] = *t.Value } } @@ -102,11 +106,12 @@ func tagsToMapLicenseManager(ts []*licensemanager.Tag) map[string]string { // compare a tag against a list of strings and checks if it should // be ignored or not -func tagIgnoredLicenseManager(t *licensemanager.Tag) bool { +func tagIgnoredFSX(t *fsx.Tag) bool { filter := []string{"^aws:"} for _, v := range filter { log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r { + r, _ := regexp.MatchString(v, *t.Key) + if r { log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) return true } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKMS.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKMS.go index d0a10d4a956..07a86a397ed 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKMS.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKMS.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKinesisAnalytics.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKinesisAnalytics.go new file mode 100644 index 00000000000..a7c2277fec9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKinesisAnalytics.go @@ -0,0 +1,135 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kinesisanalytics" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// getTags is a helper to get the tags for a resource. It expects the +// tags field to be named "tags" and the ARN field to be named "arn". +func getTagsKinesisAnalytics(conn *kinesisanalytics.KinesisAnalytics, d *schema.ResourceData) error { + resp, err := conn.ListTagsForResource(&kinesisanalytics.ListTagsForResourceInput{ + ResourceARN: aws.String(d.Get("arn").(string)), + }) + if err != nil { + return err + } + + if err := d.Set("tags", tagsToMapKinesisAnalytics(resp.Tags)); err != nil { + return err + } + + return nil +} + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" and the ARN field to be named "arn". +func setTagsKinesisAnalytics(conn *kinesisanalytics.KinesisAnalytics, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsKinesisAnalytics(tagsFromMapKinesisAnalytics(o), tagsFromMapKinesisAnalytics(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.UntagResource(&kinesisanalytics.UntagResourceInput{ + ResourceARN: aws.String(d.Get("arn").(string)), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&kinesisanalytics.TagResourceInput{ + ResourceARN: aws.String(d.Get("arn").(string)), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsKinesisAnalytics(oldTags, newTags []*kinesisanalytics.Tag) ([]*kinesisanalytics.Tag, []*kinesisanalytics.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + + // Build the list of what to remove + var remove []*kinesisanalytics.Tag + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { + remove = append(remove, t) + } else if ok { + // already present so remove from new + delete(create, aws.StringValue(t.Key)) + } + } + + return tagsFromMapKinesisAnalytics(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapKinesisAnalytics(m map[string]interface{}) []*kinesisanalytics.Tag { + result := make([]*kinesisanalytics.Tag, 0, len(m)) + for k, v := range m { + t := &kinesisanalytics.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredKinesisAnalytics(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapKinesisAnalytics(ts []*kinesisanalytics.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredKinesisAnalytics(t) { + result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredKinesisAnalytics(t *kinesisanalytics.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + r, _ := regexp.MatchString(v, *t.Key) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKinesisFirehose.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKinesisFirehose.go index 1af7e49a0c9..aa8bf266252 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKinesisFirehose.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKinesisFirehose.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/firehose" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // getTags is a helper to get the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLambda.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLambda.go index 28aa2512151..b6a08c6e9ed 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLambda.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLambda.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLightsail.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLightsail.go new file mode 100644 index 00000000000..b6a3ea06de9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLightsail.go @@ -0,0 +1,97 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsLightsail(oldTags, newTags []*lightsail.Tag) ([]*lightsail.Tag, []*lightsail.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + + // Build the list of what to remove + var remove []*lightsail.Tag + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { + // Delete it! + remove = append(remove, t) + } else if ok { + delete(create, aws.StringValue(t.Key)) + } + } + + return tagsFromMapLightsail(create), remove +} + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsLightsail(conn *lightsail.Lightsail, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsLightsail(tagsFromMapLightsail(o), tagsFromMapLightsail(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.UntagResource(&lightsail.UntagResourceInput{ + ResourceName: aws.String(d.Get("name").(string)), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&lightsail.TagResourceInput{ + ResourceName: aws.String(d.Get("name").(string)), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapLightsail(m map[string]interface{}) []*lightsail.Tag { + result := make([]*lightsail.Tag, 0, len(m)) + for k, v := range m { + result = append(result, &lightsail.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapLightsail(ts []*lightsail.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + + return result +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMQ.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMQ.go index 2ea270be72c..ae45d735f53 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMQ.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMQ.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/mq" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // getTags is a helper to get the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMediaStore.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMediaStore.go new file mode 100644 index 00000000000..976975da186 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMediaStore.go @@ -0,0 +1,135 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/mediastore" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsMediaStore(conn *mediastore.MediaStore, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsMediaStore(tagsFromMapMediaStore(o), tagsFromMapMediaStore(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %s", remove) + k := make([]*string, len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.UntagResource(&mediastore.UntagResourceInput{ + Resource: aws.String(arn), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %s", create) + _, err := conn.TagResource(&mediastore.TagResourceInput{ + Resource: aws.String(arn), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsMediaStore(oldTags, newTags []*mediastore.Tag) ([]*mediastore.Tag, []*mediastore.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + + // Build the list of what to remove + var remove []*mediastore.Tag + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { + // Delete it! + remove = append(remove, t) + } else if ok { + delete(create, aws.StringValue(t.Key)) + } + } + + return tagsFromMapMediaStore(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapMediaStore(m map[string]interface{}) []*mediastore.Tag { + result := make([]*mediastore.Tag, 0, len(m)) + for k, v := range m { + t := &mediastore.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredMediaStore(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapMediaStore(ts []*mediastore.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredMediaStore(t) { + result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredMediaStore(t *mediastore.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, aws.StringValue(t.Key)) + r, _ := regexp.MatchString(v, aws.StringValue(t.Key)) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", aws.StringValue(t.Key), aws.StringValue(t.Value)) + return true + } + } + return false +} + +func saveTagsMediaStore(conn *mediastore.MediaStore, d *schema.ResourceData, arn string) error { + resp, err := conn.ListTagsForResource(&mediastore.ListTagsForResourceInput{ + Resource: aws.String(arn), + }) + + if err != nil { + return err + } + + var dt []*mediastore.Tag + if len(resp.Tags) > 0 { + dt = resp.Tags + } + + return d.Set("tags", tagsToMapMediaStore(dt)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMediapackage.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMediapackage.go index 790933098e9..f945cf1cfe5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMediapackage.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsMediapackage.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/mediapackage" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func setTagsMediaPackage(conn *mediapackage.MediaPackage, d *schema.ResourceData, arn string) error { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsNeptune.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsNeptune.go index b5f0ba2a00f..10a32c1010d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsNeptune.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsNeptune.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/neptune" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsOpsworks.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsOpsworks.go index 8972e02b970..24163859cfe 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsOpsworks.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsOpsworks.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsOrganizations.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsOrganizations.go new file mode 100644 index 00000000000..065623f3758 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsOrganizations.go @@ -0,0 +1,77 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/organizations" +) + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsOrganizations(oldTags, newTags []*organizations.Tag) ([]*organizations.Tag, []*string) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + + // Build the list of what to remove + var remove []*string + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { + remove = append(remove, t.Key) + } else if ok { + // already present so remove from new + delete(create, aws.StringValue(t.Key)) + } + } + + return tagsFromMapOrganizations(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapOrganizations(m map[string]interface{}) []*organizations.Tag { + var result []*organizations.Tag + for k, v := range m { + t := &organizations.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredOrganizations(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapOrganizations(ts []*organizations.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredOrganizations(t) { + result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredOrganizations(t *organizations.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, aws.StringValue(t.Key)) + r, _ := regexp.MatchString(v, aws.StringValue(t.Key)) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", aws.StringValue(t.Key), aws.StringValue(t.Value)) + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsPinPointApp.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsPinPointApp.go new file mode 100644 index 00000000000..397c4cff6d6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsPinPointApp.go @@ -0,0 +1,133 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/pinpoint" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" + "regexp" +) + +// getTags is a helper to get the tags for a resource. It expects the +// tags field to be named "tags" +func getTagsPinPointApp(conn *pinpoint.Pinpoint, d *schema.ResourceData) error { + resp, err := conn.ListTagsForResource(&pinpoint.ListTagsForResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + }) + if err != nil { + return err + } + + if err := d.Set("tags", tagsToMapPinPointApp(resp.TagsModel)); err != nil { + return err + } + + return nil +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapPinPointApp(tm *pinpoint.TagsModel) map[string]string { + result := make(map[string]string) + for key, value := range tm.Tags { + if !tagIgnoredPinPointApp(key, *value) { + result[key] = aws.StringValue(value) + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredPinPointApp(tagKey string, tagValue string) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, tagKey) + r, _ := regexp.MatchString(v, tagKey) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", tagKey, tagValue) + return true + } + } + return false +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapPinPointApp(m map[string]interface{}) map[string]*string { + result := make(map[string]*string) + for k, v := range m { + if !tagIgnoredPinPointApp(k, v.(string)) { + result[k] = aws.String(v.(string)) + } + } + + return result +} + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsPinPointApp(conn *pinpoint.Pinpoint, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsPinPointApp(tagsFromMapPinPointApp(o), tagsFromMapPinPointApp(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, 0, len(remove)) + for i := range remove { + k = append(k, &i) + } + + log.Printf("[DEBUG] Removing old tags: %#v", k) + log.Printf("[DEBUG] Removing for arn: %#v", aws.String(d.Get("arn").(string))) + + _, err := conn.UntagResource(&pinpoint.UntagResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&pinpoint.TagResourceInput{ + ResourceArn: aws.String(d.Get("arn").(string)), + TagsModel: &pinpoint.TagsModel{Tags: create}, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsPinPointApp(oldTags, newTags map[string]*string) (map[string]*string, map[string]*string) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for k, v := range newTags { + create[k] = aws.StringValue(v) + } + + // Build the list of what to remove + var remove = make(map[string]*string) + for k, v := range oldTags { + old, ok := create[k] + if !ok || old != aws.StringValue(v) { + // Delete it! + remove[k] = v + } else if ok { + delete(create, k) + } + } + + return tagsFromMapPinPointApp(create), remove +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRDS.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRDS.go index 78e4f923ffd..9ec4206920a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRDS.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRDS.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRedshift.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRedshift.go index 26af8e7679b..0747ce29676 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRedshift.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRedshift.go @@ -6,11 +6,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) -func setTagsRedshift(conn *redshift.Redshift, d *schema.ResourceData, arn string) error { +func setTagsRedshift(conn *redshift.Redshift, d *schema.ResourceData) error { if d.HasChange("tags") { + arn := d.Get("arn").(string) oraw, nraw := d.GetChange("tags") o := oraw.(map[string]interface{}) n := nraw.(map[string]interface{}) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRoute53Resolver.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRoute53Resolver.go index 430ce1bb66f..953aa489b4d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRoute53Resolver.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRoute53Resolver.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53resolver" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // getTags is a helper to get the tags for a resource. It expects the @@ -37,46 +37,6 @@ func getTagsRoute53Resolver(conn *route53resolver.Route53Resolver, d *schema.Res return nil } -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" and the ARN field to be named "arn". -func setTagsRoute53Resolver(conn *route53resolver.Route53Resolver, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsRoute53Resolver(tagsFromMapRoute53Resolver(o), tagsFromMapRoute53Resolver(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - k := make([]*string, len(remove)) - for i, t := range remove { - k[i] = t.Key - } - - _, err := conn.UntagResource(&route53resolver.UntagResourceInput{ - ResourceArn: aws.String(d.Get("arn").(string)), - TagKeys: k, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.TagResource(&route53resolver.TagResourceInput{ - ResourceArn: aws.String(d.Get("arn").(string)), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - // diffTags takes our tags locally and the ones remotely and returns // the set of tags that must be created, and the set of tags that must // be destroyed. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsECR.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsSNS.go similarity index 65% rename from pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsECR.go rename to pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsSNS.go index 4bedcccb64b..d8862fc2f1f 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsECR.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsSNS.go @@ -5,35 +5,18 @@ import ( "regexp" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ecr" - "github.com/hashicorp/terraform/helper/schema" + "github.com/aws/aws-sdk-go/service/sns" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) -// getTags is a helper to get the tags for a resource. It expects the -// tags field to be named "tags" and the ARN field to be named "arn". -func getTagsECR(conn *ecr.ECR, d *schema.ResourceData) error { - resp, err := conn.ListTagsForResource(&ecr.ListTagsForResourceInput{ - ResourceArn: aws.String(d.Get("arn").(string)), - }) - if err != nil { - return err - } - - if err := d.Set("tags", tagsToMapECR(resp.Tags)); err != nil { - return err - } - - return nil -} - // setTags is a helper to set the tags for a resource. It expects the // tags field to be named "tags" and the ARN field to be named "arn". -func setTagsECR(conn *ecr.ECR, d *schema.ResourceData) error { +func setTagsSNS(conn *sns.SNS, d *schema.ResourceData) error { if d.HasChange("tags") { oraw, nraw := d.GetChange("tags") o := oraw.(map[string]interface{}) n := nraw.(map[string]interface{}) - create, remove := diffTagsECR(tagsFromMapECR(o), tagsFromMapECR(n)) + create, remove := diffTagsSNS(tagsFromMapSNS(o), tagsFromMapSNS(n)) // Set tags if len(remove) > 0 { @@ -43,7 +26,7 @@ func setTagsECR(conn *ecr.ECR, d *schema.ResourceData) error { k[i] = t.Key } - _, err := conn.UntagResource(&ecr.UntagResourceInput{ + _, err := conn.UntagResource(&sns.UntagResourceInput{ ResourceArn: aws.String(d.Get("arn").(string)), TagKeys: k, }) @@ -53,7 +36,7 @@ func setTagsECR(conn *ecr.ECR, d *schema.ResourceData) error { } if len(create) > 0 { log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.TagResource(&ecr.TagResourceInput{ + _, err := conn.TagResource(&sns.TagResourceInput{ ResourceArn: aws.String(d.Get("arn").(string)), Tags: create, }) @@ -69,7 +52,7 @@ func setTagsECR(conn *ecr.ECR, d *schema.ResourceData) error { // diffTags takes our tags locally and the ones remotely and returns // the set of tags that must be created, and the set of tags that must // be destroyed. -func diffTagsECR(oldTags, newTags []*ecr.Tag) ([]*ecr.Tag, []*ecr.Tag) { +func diffTagsSNS(oldTags, newTags []*sns.Tag) ([]*sns.Tag, []*sns.Tag) { // First, we're creating everything we have create := make(map[string]interface{}) for _, t := range newTags { @@ -77,7 +60,7 @@ func diffTagsECR(oldTags, newTags []*ecr.Tag) ([]*ecr.Tag, []*ecr.Tag) { } // Build the list of what to remove - var remove []*ecr.Tag + var remove []*sns.Tag for _, t := range oldTags { old, ok := create[aws.StringValue(t.Key)] if !ok || old != aws.StringValue(t.Value) { @@ -88,18 +71,18 @@ func diffTagsECR(oldTags, newTags []*ecr.Tag) ([]*ecr.Tag, []*ecr.Tag) { } } - return tagsFromMapECR(create), remove + return tagsFromMapSNS(create), remove } // tagsFromMap returns the tags for the given map of data. -func tagsFromMapECR(m map[string]interface{}) []*ecr.Tag { - result := make([]*ecr.Tag, 0, len(m)) +func tagsFromMapSNS(m map[string]interface{}) []*sns.Tag { + result := make([]*sns.Tag, 0, len(m)) for k, v := range m { - t := &ecr.Tag{ + t := &sns.Tag{ Key: aws.String(k), Value: aws.String(v.(string)), } - if !tagIgnoredECR(t) { + if !tagIgnoredSNS(t) { result = append(result, t) } } @@ -108,10 +91,10 @@ func tagsFromMapECR(m map[string]interface{}) []*ecr.Tag { } // tagsToMap turns the list of tags into a map. -func tagsToMapECR(ts []*ecr.Tag) map[string]string { +func tagsToMapSNS(ts []*sns.Tag) map[string]string { result := make(map[string]string) for _, t := range ts { - if !tagIgnoredECR(t) { + if !tagIgnoredSNS(t) { result[aws.StringValue(t.Key)] = aws.StringValue(t.Value) } } @@ -121,7 +104,7 @@ func tagsToMapECR(ts []*ecr.Tag) map[string]string { // compare a tag against a list of strings and checks if it should // be ignored or not -func tagIgnoredECR(t *ecr.Tag) bool { +func tagIgnoredSNS(t *sns.Tag) bool { filter := []string{"^aws:"} for _, v := range filter { log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsSSM.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsSSM.go index 38e90bf0f47..54b85fc5b39 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsSSM.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsSSM.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the @@ -125,7 +125,7 @@ func saveTagsSSM(conn *ssm.SSM, d *schema.ResourceData, id, resourceType string) }) if err != nil { - return fmt.Errorf("Error retrieving tags for SSM resource: %s", id) + return fmt.Errorf("Error retrieving tags for SSM resource (%s): %s", id, err) } var dt []*ssm.Tag diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsTransfer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsTransfer.go index 17e41dd8857..941e542fc92 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsTransfer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsTransfer.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/transfer" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_apigateway.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_apigateway.go deleted file mode 100644 index 9168d39e2af..00000000000 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_apigateway.go +++ /dev/null @@ -1,44 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" -) - -func setTagsAPIGatewayStage(conn *apigateway.APIGateway, d *schema.ResourceData, arn string) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsGeneric(o, n) - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - keys := make([]*string, 0, len(remove)) - for k := range remove { - keys = append(keys, aws.String(k)) - } - - _, err := conn.UntagResource(&apigateway.UntagResourceInput{ - ResourceArn: aws.String(arn), - TagKeys: keys, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.TagResource(&apigateway.TagResourceInput{ - ResourceArn: aws.String(arn), - Tags: create, - }) - if err != nil { - return err - } - } - } - return nil -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_dms.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_dms.go index c88050059b3..82bb6aa57ac 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_dms.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_dms.go @@ -3,7 +3,7 @@ package aws import ( "github.com/aws/aws-sdk-go/aws" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func dmsTagsToMap(tags []*dms.Tag) map[string]string { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_elasticsearchservice.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_elasticsearchservice.go index 54019415bc3..4a79e1450b8 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_elasticsearchservice.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_elasticsearchservice.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_kinesis.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_kinesis.go index de984a5d04a..b73809aea3a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_kinesis.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_kinesis.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // Kinesis requires tagging operations be split into 10 tag batches diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_msk.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_msk.go new file mode 100644 index 00000000000..56ce5689097 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_msk.go @@ -0,0 +1,102 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + kafka "github.com/aws/aws-sdk-go/service/kafka" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsMskCluster(conn *kafka.Kafka, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsMskCluster(tagsFromMapMskCluster(o), tagsFromMapMskCluster(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + keys := make([]*string, 0, len(remove)) + for k := range remove { + keys = append(keys, aws.String(k)) + } + _, err := conn.UntagResource(&kafka.UntagResourceInput{ + ResourceArn: aws.String(arn), + TagKeys: keys, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&kafka.TagResourceInput{ + ResourceArn: aws.String(arn), + Tags: create, + }) + if err != nil { + return err + } + } + } + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsMskCluster(oldTags, newTags map[string]*string) (map[string]*string, map[string]*string) { + + // Build the list of what to remove + remove := make(map[string]*string) + for k, v := range oldTags { + newVal, existsInNew := newTags[k] + if !existsInNew || *newVal != *v { + // Delete it! + remove[k] = v + } + } + return newTags, remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapMskCluster(m map[string]interface{}) map[string]*string { + result := make(map[string]*string) + for k, v := range m { + if !tagIgnoredMskCluster(k, v.(string)) { + result[k] = aws.String(v.(string)) + } + } + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapMskCluster(ts map[string]*string) map[string]string { + result := make(map[string]string) + for k, v := range ts { + if !tagIgnoredMskCluster(k, aws.StringValue(v)) { + result[k] = aws.StringValue(v) + } + } + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredMskCluster(key, value string) bool { + filter := []string{"^aws:"} + for _, ignore := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", ignore, key) + r, _ := regexp.MatchString(ignore, key) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val %s), ignoring.\n", key, value) + return true + } + } + return false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_route53.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_route53.go index 2b9ef529f16..0238250e9ad 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_route53.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_route53.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // setTags is a helper to set the tags for a resource. It expects the diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_sagemaker.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_sagemaker.go deleted file mode 100644 index 52dcf865a02..00000000000 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_sagemaker.go +++ /dev/null @@ -1,120 +0,0 @@ -package aws - -import ( - "log" - "regexp" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/sagemaker" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func tagsFromMapSagemaker(m map[string]interface{}) []*sagemaker.Tag { - result := make([]*sagemaker.Tag, 0, len(m)) - for k, v := range m { - t := &sagemaker.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredSagemaker(t) { - result = append(result, t) - } - } - - return result -} - -func tagsToMapSagemaker(ts []*sagemaker.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredSagemaker(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -func setSagemakerTags(conn *sagemaker.SageMaker, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffSagemakerTags(tagsFromMapSagemaker(o), tagsFromMapSagemaker(n)) - - if len(remove) > 0 { - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) - _, err := conn.DeleteTags(&sagemaker.DeleteTagsInput{ - ResourceArn: aws.String(d.Get("arn").(string)), - TagKeys: remove, - }) - if err != nil { - sagemakerErr, ok := err.(awserr.Error) - if ok && sagemakerErr.Code() == "ResourceNotFound" { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - if len(create) > 0 { - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) - _, err := conn.AddTags(&sagemaker.AddTagsInput{ - ResourceArn: aws.String(d.Get("arn").(string)), - Tags: create, - }) - if err != nil { - sagemakerErr, ok := err.(awserr.Error) - if ok && sagemakerErr.Code() == "ResourceNotFound" { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - } - - return nil -} - -func diffSagemakerTags(oldTags, newTags []*sagemaker.Tag) ([]*sagemaker.Tag, []*string) { - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - var remove []*string - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - remove = append(remove, t.Key) - } - } - - return tagsFromMapSagemaker(create), remove -} - -func tagIgnoredSagemaker(t *sagemaker.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tls.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tls.go new file mode 100644 index 00000000000..e91f9ecba77 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tls.go @@ -0,0 +1,230 @@ +package aws + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "strings" + "time" +) + +const ( + pemBlockTypeCertificate = `CERTIFICATE` + pemBlockTypeRsaPrivateKey = `RSA PRIVATE KEY` + pemBlockTypePublicKey = `PUBLIC KEY` +) + +var tlsX509CertificateSerialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) + +// tlsRsaPrivateKeyPem generates a RSA private key PEM string. +// Wrap with tlsPemEscapeNewlines() to allow simple fmt.Sprintf() +// configurations such as: private_key_pem = "%[1]s" +func tlsRsaPrivateKeyPem(bits int) string { + key, err := rsa.GenerateKey(rand.Reader, bits) + + if err != nil { + panic(err) + } + + block := &pem.Block{ + Bytes: x509.MarshalPKCS1PrivateKey(key), + Type: pemBlockTypeRsaPrivateKey, + } + + return string(pem.EncodeToMemory(block)) +} + +// tlsRsaPublicKeyPem generates a RSA public key PEM string. +// Wrap with tlsPemEscapeNewlines() to allow simple fmt.Sprintf() +// configurations such as: public_key_pem = "%[1]s" +func tlsRsaPublicKeyPem(keyPem string) string { + keyBlock, _ := pem.Decode([]byte(keyPem)) + + key, err := x509.ParsePKCS1PrivateKey(keyBlock.Bytes) + + if err != nil { + panic(err) + } + + publicKeyBytes, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + + if err != nil { + panic(err) + } + + block := &pem.Block{ + Bytes: publicKeyBytes, + Type: pemBlockTypePublicKey, + } + + return string(pem.EncodeToMemory(block)) +} + +// tlsRsaX509LocallySignedCertificatePem generates a local CA x509 certificate PEM string. +// Wrap with tlsPemEscapeNewlines() to allow simple fmt.Sprintf() +// configurations such as: certificate_pem = "%[1]s" +func tlsRsaX509LocallySignedCertificatePem(caKeyPem, caCertificatePem, keyPem, commonName string) string { + caCertificateBlock, _ := pem.Decode([]byte(caCertificatePem)) + + caCertificate, err := x509.ParseCertificate(caCertificateBlock.Bytes) + + if err != nil { + panic(err) + } + + caKeyBlock, _ := pem.Decode([]byte(caKeyPem)) + + caKey, err := x509.ParsePKCS1PrivateKey(caKeyBlock.Bytes) + + if err != nil { + panic(err) + } + + keyBlock, _ := pem.Decode([]byte(keyPem)) + + key, err := x509.ParsePKCS1PrivateKey(keyBlock.Bytes) + + if err != nil { + panic(err) + } + + serialNumber, err := rand.Int(rand.Reader, tlsX509CertificateSerialNumberLimit) + + if err != nil { + panic(err) + } + + certificate := &x509.Certificate{ + BasicConstraintsValid: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + NotAfter: time.Now().Add(24 * time.Hour), + NotBefore: time.Now(), + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: commonName, + Organization: []string{"ACME Examples, Inc"}, + }, + } + + certificateBytes, err := x509.CreateCertificate(rand.Reader, certificate, caCertificate, &key.PublicKey, caKey) + + if err != nil { + panic(err) + } + + certificateBlock := &pem.Block{ + Bytes: certificateBytes, + Type: pemBlockTypeCertificate, + } + + return string(pem.EncodeToMemory(certificateBlock)) +} + +// tlsRsaX509SelfSignedCaCertificatePem generates a x509 CA certificate PEM string. +// Wrap with tlsPemEscapeNewlines() to allow simple fmt.Sprintf() +// configurations such as: root_certificate_pem = "%[1]s" +func tlsRsaX509SelfSignedCaCertificatePem(keyPem string) string { + keyBlock, _ := pem.Decode([]byte(keyPem)) + + key, err := x509.ParsePKCS1PrivateKey(keyBlock.Bytes) + + if err != nil { + panic(err) + } + + publicKeyBytes, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + + if err != nil { + panic(err) + } + + publicKeyBytesSha1 := sha1.Sum(publicKeyBytes) + + serialNumber, err := rand.Int(rand.Reader, tlsX509CertificateSerialNumberLimit) + + if err != nil { + panic(err) + } + + certificate := &x509.Certificate{ + BasicConstraintsValid: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + NotAfter: time.Now().Add(24 * time.Hour), + NotBefore: time.Now(), + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: "ACME Root CA", + Organization: []string{"ACME Examples, Inc"}, + }, + SubjectKeyId: publicKeyBytesSha1[:], + } + + certificateBytes, err := x509.CreateCertificate(rand.Reader, certificate, certificate, &key.PublicKey, key) + + if err != nil { + panic(err) + } + + certificateBlock := &pem.Block{ + Bytes: certificateBytes, + Type: pemBlockTypeCertificate, + } + + return string(pem.EncodeToMemory(certificateBlock)) +} + +// tlsRsaX509SelfSignedCertificatePem generates a x509 certificate PEM string. +// Wrap with tlsPemEscapeNewlines() to allow simple fmt.Sprintf() +// configurations such as: private_key_pem = "%[1]s" +func tlsRsaX509SelfSignedCertificatePem(keyPem, commonName string) string { + keyBlock, _ := pem.Decode([]byte(keyPem)) + + key, err := x509.ParsePKCS1PrivateKey(keyBlock.Bytes) + + if err != nil { + panic(err) + } + + serialNumber, err := rand.Int(rand.Reader, tlsX509CertificateSerialNumberLimit) + + if err != nil { + panic(err) + } + + certificate := &x509.Certificate{ + BasicConstraintsValid: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + NotAfter: time.Now().Add(24 * time.Hour), + NotBefore: time.Now(), + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: commonName, + Organization: []string{"ACME Examples, Inc"}, + }, + } + + certificateBytes, err := x509.CreateCertificate(rand.Reader, certificate, certificate, &key.PublicKey, key) + + if err != nil { + panic(err) + } + + certificateBlock := &pem.Block{ + Bytes: certificateBytes, + Type: pemBlockTypeCertificate, + } + + return string(pem.EncodeToMemory(certificateBlock)) +} + +func tlsPemEscapeNewlines(pem string) string { + return strings.ReplaceAll(pem, "\n", "\\n") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/utils.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/utils.go index b701ddbeadd..262d8c8a0c5 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/utils.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/utils.go @@ -6,7 +6,7 @@ import ( "reflect" "regexp" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" ) // Base64Encode encodes data if the input isn't already encoded using base64.StdEncoding.EncodeToString. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/validators.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/validators.go index dab1c72f66c..f2f549bb768 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/validators.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/validators.go @@ -15,10 +15,10 @@ import ( "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) // FloatAtLeast returns a SchemaValidateFunc which tests if the provided value @@ -40,6 +40,29 @@ func FloatAtLeast(min float64) schema.SchemaValidateFunc { } } +// validateStringNotMatch returns a SchemaValidateFunc which tests if the provided value +// does not match a given regexp. Optionally an error message can be provided to +// return something friendlier than "must match some globby regexp". +// This function is an inverse copy of validation.StringMatch and will be +// migrated to the Terraform Provider SDK. +func validateStringNotMatch(r *regexp.Regexp, message string) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be string", k)} + } + + if ok := r.MatchString(v); ok { + if message != "" { + return nil, []error{fmt.Errorf("invalid value for %s (%s)", k, message)} + + } + return nil, []error{fmt.Errorf("expected value of %s to not match regular expression %q", k, r)} + } + return nil, nil + } +} + // validateTypeStringNullableBoolean provides custom error messaging for TypeString booleans // Some arguments require three values: true, false, and "" (unspecified). // This ValidateFunc returns a custom message since the message with @@ -195,31 +218,6 @@ func validateNeptuneEngine() schema.SchemaValidateFunc { }, false) } -func validateElastiCacheClusterId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if (len(value) < 1) || (len(value) > 20) { - errors = append(errors, fmt.Errorf( - "%q (%q) must contain from 1 to 20 alphanumeric characters or hyphens", k, value)) - } - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q (%q)", k, value)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q (%q) must be a letter", k, value)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) cannot contain two consecutive hyphens", k, value)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) cannot end with a hyphen", k, value)) - } - return -} - func validateASGScheduleTimestamp(v interface{}, k string) (ws []string, errors []error) { value := v.(string) _, err := time.Parse(awsAutoscalingScheduleTimeLayout, value) @@ -1705,16 +1703,6 @@ func validateCognitoUserPoolInviteTemplateSmsMessage(v interface{}, k string) (w return } -func validateCognitoUserPoolReplyEmailAddress(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if !regexp.MustCompile(`[\p{L}\p{M}\p{S}\p{N}\p{P}]+@[\p{L}\p{M}\p{S}\p{N}\p{P}]+`).MatchString(value) { - errors = append(errors, fmt.Errorf( - `%q must satisfy regular expression pattern: [\p{L}\p{M}\p{S}\p{N}\p{P}]+@[\p{L}\p{M}\p{S}\p{N}\p{P}]+`, k)) - } - return -} - func validateCognitoUserPoolSchemaName(v interface{}, k string) (ws []string, es []error) { value := v.(string) if len(value) < 1 { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_helpers.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_helpers.go index b9e6403ce3c..0e7b3437536 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_helpers.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_helpers.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func wafSizeConstraintSetSchema() map[string]*schema.Schema { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_token_handlers.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_token_handlers.go index 3de972aa256..31fe92b4259 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_token_handlers.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_token_handlers.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" ) type WafRetryer struct { @@ -20,10 +20,9 @@ func (t *WafRetryer) RetryWithToken(f withTokenFunc) (interface{}, error) { defer awsMutexKV.Unlock("WafRetryer") var out interface{} + var tokenOut *waf.GetChangeTokenOutput err := resource.Retry(15*time.Minute, func() *resource.RetryError { var err error - var tokenOut *waf.GetChangeTokenOutput - tokenOut, err = t.Connection.GetChangeToken(&waf.GetChangeTokenInput{}) if err != nil { return resource.NonRetryableError(fmt.Errorf("Failed to acquire change token: %s", err)) @@ -39,8 +38,19 @@ func (t *WafRetryer) RetryWithToken(f withTokenFunc) (interface{}, error) { } return nil }) + if isResourceTimeoutError(err) { + tokenOut, err = t.Connection.GetChangeToken(&waf.GetChangeTokenInput{}) + + if err != nil { + return nil, fmt.Errorf("error getting WAF change token: %s", err) + } - return out, err + out, err = f(tokenOut.ChangeToken) + } + if err != nil { + return nil, err + } + return out, nil } func newWafRetryer(conn *waf.WAF) *WafRetryer { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/wafregionl_token_handlers.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/wafregionl_token_handlers.go index 17c69c0f969..30ace061288 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/wafregionl_token_handlers.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/wafregionl_token_handlers.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/waf" "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" ) type WafRegionalRetryer struct { @@ -22,9 +22,9 @@ func (t *WafRegionalRetryer) RetryWithToken(f withRegionalTokenFunc) (interface{ defer awsMutexKV.Unlock(t.Region) var out interface{} + var tokenOut *waf.GetChangeTokenOutput err := resource.Retry(15*time.Minute, func() *resource.RetryError { var err error - var tokenOut *waf.GetChangeTokenOutput tokenOut, err = t.Connection.GetChangeToken(&waf.GetChangeTokenInput{}) if err != nil { @@ -41,8 +41,19 @@ func (t *WafRegionalRetryer) RetryWithToken(f withRegionalTokenFunc) (interface{ } return nil }) + if isResourceTimeoutError(err) { + tokenOut, err = t.Connection.GetChangeToken(&waf.GetChangeTokenInput{}) - return out, err + if err != nil { + return nil, fmt.Errorf("error getting WAF Regional change token: %s", err) + } + + out, err = f(tokenOut.ChangeToken) + } + if err != nil { + return nil, err + } + return out, nil } func newWafRegionalRetryer(conn *wafregional.WAFRegional, region string) *WafRegionalRetryer { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/website/docs/r/licensemanager_association.markdown b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/website/docs/r/licensemanager_association.markdown index 862e5d9efee..fdae597d893 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/website/docs/r/licensemanager_association.markdown +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/website/docs/r/licensemanager_association.markdown @@ -1,7 +1,7 @@ --- +subcategory: "License Manager" layout: "aws" page_title: "AWS: aws_licensemanager_association" -sidebar_current: "docs-aws-resource-licensemanager-association" description: |- Provides a License Manager association resource. --- diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/website/docs/r/licensemanager_license_configuration.markdown b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/website/docs/r/licensemanager_license_configuration.markdown index 04784f41010..811dfcaca5a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/website/docs/r/licensemanager_license_configuration.markdown +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/website/docs/r/licensemanager_license_configuration.markdown @@ -1,7 +1,7 @@ --- +subcategory: "License Manager" layout: "aws" page_title: "AWS: aws_licensemanager_license_configuration" -sidebar_current: "docs-aws-resource-licensemanager-license-configuration" description: |- Provides a License Manager license configuration resource. --- @@ -23,7 +23,7 @@ resource "aws_licensemanager_license_configuration" "example" { license_counting_type = "Socket" license_rules = [ - "#minimumSockets=2" + "#minimumSockets=2", ] tags = { diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 00000000000..58ebdc162f5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2016 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/bits.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 00000000000..fadc1a59449 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,74 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if b < 0x80 { + if i > 10 || i == 10 && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + CRC32 byte = 0x1 + CRC64 = 0x4 + SHA256 = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +// writeFilters writes the filters. +func writeFilters(w io.Writer, filters []filter) (n int, err error) { + for _, f := range filters { + p, err := f.MarshalBinary() + if err != nil { + return n, err + } + k, err := w.Write(p) + n += k + if err != nil { + return n, err + } + } + return n, nil +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 00000000000..a32887872e1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 00000000000..f99ec220680 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 00000000000..58635b113a9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 00000000000..ab6a19ca4cc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 00000000000..0ba45e8ff33 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,457 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +// +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 00000000000..a781bd1953d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,523 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bufio" + "errors" + "fmt" + "io" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 00000000000..e9bab019901 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,45 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/breader.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 00000000000..5350d814fa7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 00000000000..50e0b6d57b5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 00000000000..a3696ba08be --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 00000000000..16e14db3941 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 00000000000..564a12b834c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,135 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } + +// Buffered returns the number of bytes currently buffered in the +// decoder dictionary. +func (d *decoderDict) buffered() int { return d.buf.Buffered() } + +// Peek gets data from the buffer without advancing the rear index. +func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 00000000000..e08eb989ff9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,49 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "fmt" + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// makeDirectCodec creates a directCodec. The function panics if the number of +// bits is not in the range [1,32]. +func makeDirectCodec(bits int) directCodec { + if !(1 <= bits && bits <= 32) { + panic(fmt.Errorf("bits=%d out of range", bits)) + } + return directCodec(bits) +} + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 00000000000..b053a2dce21 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,156 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 + // maximum position slot + maxPosSlot = 63 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// distBits returns the number of bits required to encode dist. +func distBits(dist uint32) int { + if dist < startPosModel { + return 6 + } + // slot s > 3, dist d + // s = 2(bits(d)-1) + bit(d, bits(d)-2) + // s>>1 = bits(d)-1 + // bits(d) = 32-nlz32(d) + // s>>1=31-nlz32(d) + // n = 5 + (s>>1) = 36 - nlz32(d) + return 36 - nlz32(dist) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 00000000000..fe1900a66e8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 16 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 00000000000..9d0fbc70332 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// addtional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 00000000000..d786a9745d4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/header.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 00000000000..bc708969fd3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/header2.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 00000000000..ac6a71a5a98 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 00000000000..e517730924f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,129 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// lBits gives the number of bits used for the encoding of the l value +// provided to the range encoder. +func lBits(l uint32) int { + switch { + case l < 8: + return 4 + case l < 16: + return 5 + default: + return 10 + } +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +// +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 00000000000..c949d6ebd12 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,132 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) + +// minState and maxState define a range for the state values stored in +// the State values. +const ( + minState = 0 + maxState = 11 +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 00000000000..4a244eb1ac7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/operation.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 00000000000..733bb99da41 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,80 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// verify checks whether the match is valid. If that is not the case an +// error is returned. +func (m match) verify() error { + if !(minDistance <= m.distance && m.distance <= maxDistance) { + return errors.New("distance out of range") + } + if !(1 <= m.n && m.n <= maxMatchLen) { + return errors.New("length out of range") + } + return nil +} + +// l return the l-value for the match, which is the difference of length +// n and 2. +func (m match) l() uint32 { + return uint32(m.n - minMatchLen) +} + +// dist returns the dist value for the match, which is one less of the +// distance stored in the match. +func (m match) dist() uint32 { + return uint32(m.distance - minDistance) +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/prob.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 00000000000..24d50ec6814 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/properties.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 00000000000..23418e25d26 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 00000000000..6361c5e7c83 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,248 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// init initializes the range decoder, by reading from the byte reader. +func (d *rangeDecoder) init() error { + d.nrange = 0xffffffff + d.code = 0 + + b, err := d.br.ReadByte() + if err != nil { + return err + } + if b != 0 { + return errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return err + } + } + + if d.code >= d.nrange { + return errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return nil +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/reader.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 00000000000..2ef3dcaaa9b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + return nil, errors.New("lzma: dictionary capacity too small") + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 00000000000..a55cfaa4e3f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,232 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState + ctype chunkType +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/state.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 00000000000..502351052fd --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,151 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// initState initializes the state. +func initState(s *state, p Properties) { + *s = state{Properties: p} + s.Reset() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 00000000000..504b3d78e44 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 00000000000..7c1afe15725 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzmafilter.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 00000000000..69cf5f7c279 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/reader.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 00000000000..0634c6bcc0c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,373 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// fill replaces all zero values with their default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// errIndex indicates an error with the xz file index. +var errIndex = errors.New("xz: error in xz file index") + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if len(index) != len(r.index) { + return fmt.Errorf("xz: index length is %d; want %d", + len(index), len(r.index)) + } + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader + err error +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + br.r = io.TeeReader(fr, br.hash) + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// errBlockSize indicates that the size of the block in the block header +// is wrong. +var errBlockSize = errors.New("xz: wrong uncompressed size for block") + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/writer.go b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 00000000000..c126f70995d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,386 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 + CheckSum byte + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + bw.mw = io.MultiWriter(bw.w, bw.hash) + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/LICENSE new file mode 100644 index 00000000000..b749d070797 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/appengine.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/appengine.go new file mode 100644 index 00000000000..e8e91e53f35 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/appengine.go @@ -0,0 +1,64 @@ +// +build appengine + +package msgpack + +import ( + "reflect" + + ds "google.golang.org/appengine/datastore" +) + +func init() { + Register((*ds.Key)(nil), encodeDatastoreKeyValue, decodeDatastoreKeyValue) + Register((*ds.Cursor)(nil), encodeDatastoreCursorValue, decodeDatastoreCursorValue) +} + +func EncodeDatastoreKey(e *Encoder, key *ds.Key) error { + if key == nil { + return e.EncodeNil() + } + return e.EncodeString(key.Encode()) +} + +func encodeDatastoreKeyValue(e *Encoder, v reflect.Value) error { + key := v.Interface().(*ds.Key) + return EncodeDatastoreKey(e, key) +} + +func DecodeDatastoreKey(d *Decoder) (*ds.Key, error) { + v, err := d.DecodeString() + if err != nil { + return nil, err + } + if v == "" { + return nil, nil + } + return ds.DecodeKey(v) +} + +func decodeDatastoreKeyValue(d *Decoder, v reflect.Value) error { + key, err := DecodeDatastoreKey(d) + if err != nil { + return err + } + v.Set(reflect.ValueOf(key)) + return nil +} + +func encodeDatastoreCursorValue(e *Encoder, v reflect.Value) error { + cursor := v.Interface().(ds.Cursor) + return e.Encode(cursor.String()) +} + +func decodeDatastoreCursorValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + cursor, err := ds.DecodeCursor(s) + if err != nil { + return err + } + v.Set(reflect.ValueOf(cursor)) + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/codes/codes.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/codes/codes.go new file mode 100644 index 00000000000..28e0a5a88b4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/codes/codes.go @@ -0,0 +1,90 @@ +package codes + +type Code byte + +var ( + PosFixedNumHigh Code = 0x7f + NegFixedNumLow Code = 0xe0 + + Nil Code = 0xc0 + + False Code = 0xc2 + True Code = 0xc3 + + Float Code = 0xca + Double Code = 0xcb + + Uint8 Code = 0xcc + Uint16 Code = 0xcd + Uint32 Code = 0xce + Uint64 Code = 0xcf + + Int8 Code = 0xd0 + Int16 Code = 0xd1 + Int32 Code = 0xd2 + Int64 Code = 0xd3 + + FixedStrLow Code = 0xa0 + FixedStrHigh Code = 0xbf + FixedStrMask Code = 0x1f + Str8 Code = 0xd9 + Str16 Code = 0xda + Str32 Code = 0xdb + + Bin8 Code = 0xc4 + Bin16 Code = 0xc5 + Bin32 Code = 0xc6 + + FixedArrayLow Code = 0x90 + FixedArrayHigh Code = 0x9f + FixedArrayMask Code = 0xf + Array16 Code = 0xdc + Array32 Code = 0xdd + + FixedMapLow Code = 0x80 + FixedMapHigh Code = 0x8f + FixedMapMask Code = 0xf + Map16 Code = 0xde + Map32 Code = 0xdf + + FixExt1 Code = 0xd4 + FixExt2 Code = 0xd5 + FixExt4 Code = 0xd6 + FixExt8 Code = 0xd7 + FixExt16 Code = 0xd8 + Ext8 Code = 0xc7 + Ext16 Code = 0xc8 + Ext32 Code = 0xc9 +) + +func IsFixedNum(c Code) bool { + return c <= PosFixedNumHigh || c >= NegFixedNumLow +} + +func IsFixedMap(c Code) bool { + return c >= FixedMapLow && c <= FixedMapHigh +} + +func IsFixedArray(c Code) bool { + return c >= FixedArrayLow && c <= FixedArrayHigh +} + +func IsFixedString(c Code) bool { + return c >= FixedStrLow && c <= FixedStrHigh +} + +func IsString(c Code) bool { + return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 +} + +func IsBin(c Code) bool { + return c == Bin8 || c == Bin16 || c == Bin32 +} + +func IsFixedExt(c Code) bool { + return c >= FixExt1 && c <= FixExt16 +} + +func IsExt(c Code) bool { + return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode.go new file mode 100644 index 00000000000..e76ef4f82b5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode.go @@ -0,0 +1,549 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +type bufReader interface { + io.Reader + io.ByteScanner +} + +func newBufReader(r io.Reader) bufReader { + if br, ok := r.(bufReader); ok { + return br + } + return bufio.NewReader(r) +} + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +type Decoder struct { + r io.Reader + s io.ByteScanner + buf []byte + + extLen int + rec []byte // accumulates read data if not nil + + useLoose bool + useJSONTag bool + + decodeMapFunc func(*Decoder) (interface{}, error) +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the MessagePack values requested. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := new(Decoder) + d.resetReader(r) + return d +} + +func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { + d.decodeMapFunc = fn +} + +// UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseDecodeInterfaceLoose(flag bool) *Decoder { + d.useLoose = flag + return d +} + +// UseJSONTag causes the Decoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (d *Decoder) UseJSONTag(v bool) *Decoder { + d.useJSONTag = v + return d +} + +func (d *Decoder) Reset(r io.Reader) error { + d.resetReader(r) + return nil +} + +func (d *Decoder) resetReader(r io.Reader) { + reader := newBufReader(r) + d.r = reader + d.s = reader +} + +//nolint:gocyclo +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + vv = vv.Elem() + if !vv.IsValid() { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.useLoose { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != codes.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c codes.Code) (bool, error) { + if c == codes.False { + return false, nil + } + if c == codes.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int8(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float: + return d.float32(c) + case codes.Double: + return d.float64(c) + case codes.Uint8: + return d.uint8() + case codes.Uint16: + return d.uint16() + case codes.Uint32: + return d.uint32() + case codes.Uint64: + return d.uint64() + case codes.Int8: + return d.int8() + case codes.Int16: + return d.int16() + case codes.Int32: + return d.int32() + case codes.Int64: + return d.int64() + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int64(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float, codes.Double: + return d.float64(c) + case codes.Uint8, codes.Uint16, codes.Uint32, codes.Uint64: + return d.uint(c) + case codes.Int8, codes.Int16, codes.Int32, codes.Int64: + return d.int(c) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if codes.IsFixedNum(c) { + return nil + } + if codes.IsFixedMap(c) { + return d.skipMap(c) + } + if codes.IsFixedArray(c) { + return d.skipSlice(c) + } + if codes.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case codes.Nil, codes.False, codes.True: + return nil + case codes.Uint8, codes.Int8: + return d.skipN(1) + case codes.Uint16, codes.Int16: + return d.skipN(2) + case codes.Uint32, codes.Int32, codes.Float: + return d.skipN(4) + case codes.Uint64, codes.Int64, codes.Double: + return d.skipN(8) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.skipBytes(c) + case codes.Str8, codes.Str16, codes.Str32: + return d.skipBytes(c) + case codes.Array16, codes.Array32: + return d.skipSlice(c) + case codes.Map16, codes.Map32: + return d.skipMap(c) + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes contains list of available codes. +func (d *Decoder) PeekCode() (codes.Code, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return codes.Code(c), d.s.UnreadByte() +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == codes.Nil +} + +func (d *Decoder) readCode() (codes.Code, error) { + d.extLen = 0 + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return codes.Code(c), nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + buf, err := readN(d.r, d.buf, n) + if err != nil { + return nil, err + } + d.buf = buf + if d.rec != nil { + //TODO: read directly into d.rec? + d.rec = append(d.rec, buf...) + } + return buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + const bytesAllocLimit = 1024 * 1024 // 1mb + + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + switch { + case n < 64: + b = make([]byte, 0, 64) + case n <= bytesAllocLimit: + b = make([]byte, 0, n) + default: + b = make([]byte, 0, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := n - len(b) + if alloc > bytesAllocLimit { + alloc = bytesAllocLimit + } + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return nil, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { //nolint:unparam + if a <= b { + return a + } + return b +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_map.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_map.go new file mode 100644 index 00000000000..2a3d3ecb28d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_map.go @@ -0,0 +1,339 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const mapElemsAllocLimit = 1e4 + +var mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) +var mapStringStringType = mapStringStringPtrType.Elem() + +var mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) +var mapStringInterfaceType = mapStringInterfacePtrType.Elem() + +var errInvalidCode = errors.New("invalid code") + +func decodeMapValue(d *Decoder, v reflect.Value) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if size == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + v.Set(reflect.MakeMap(typ)) + } + if size == 0 { + return nil + } + + return decodeMapValueSize(d, v, size) +} + +func decodeMapValueSize(d *Decoder, v reflect.Value, size int) error { + typ := v.Type() + keyType := typ.Key() + valueType := typ.Elem() + + for i := 0; i < size; i++ { + mk := reflect.New(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := reflect.New(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if codes.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c codes.Code) (int, error) { + size, err := d._mapLen(c) + err = expandInvalidCodeMapLenError(c, err) + return size, err +} + +func (d *Decoder) _mapLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } + if c >= codes.FixedMapLow && c <= codes.FixedMapHigh { + return int(c & codes.FixedMapMask), nil + } + if c == codes.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == codes.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, errInvalidCode +} + +func expandInvalidCodeMapLenError(c codes.Code, err error) error { + if err == errInvalidCode { + return fmt.Errorf("msgpack: invalid code=%x decoding map length", c) + } + return err +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]string, min(size, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]interface{}, min(n, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func (d *Decoder) DecodeMap() (interface{}, error) { + if d.decodeMapFunc != nil { + return d.decodeMapFunc(d) + } + + size, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if size == -1 { + return nil, nil + } + if size == 0 { + return make(map[string]interface{}), nil + } + + code, err := d.PeekCode() + if err != nil { + return nil, err + } + + if codes.IsString(code) || codes.IsBin(code) { + return d.decodeMapStringInterfaceSize(size) + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + + mapType := reflect.MapOf(keyType, valueType) + mapValue := reflect.MakeMap(mapType) + + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + size-- + + err = decodeMapValueSize(d, mapValue, size) + if err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeMapStringInterfaceSize(size int) (map[string]interface{}, error) { + m := make(map[string]interface{}, min(size, mapElemsAllocLimit)) + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + return m, nil +} + +func (d *Decoder) skipMap(c codes.Code) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + var isArray bool + + n, err := d._mapLen(c) + if err != nil { + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return expandInvalidCodeMapLenError(c, err) + } + isArray = true + } + if n == -1 { + if err = mustSet(v); err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil + } + + var fields *fields + if d.useJSONTag { + fields = jsonStructs.Fields(v.Type()) + } else { + fields = structs.Fields(v.Type()) + } + + if isArray { + for i, f := range fields.List { + if i >= n { + break + } + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + // Skip extra values. + for i := len(fields.List); i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil + } + + for i := 0; i < n; i++ { + name, err := d.DecodeString() + if err != nil { + return err + } + if f := fields.Table[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } else { + if err := d.Skip(); err != nil { + return err + } + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_number.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_number.go new file mode 100644 index 00000000000..15019cc97aa --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_number.go @@ -0,0 +1,307 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return uint8(c), nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c codes.Code) (uint64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return uint64(n), err + case codes.Int8: + n, err := d.int8() + return uint64(n), err + case codes.Uint16: + n, err := d.uint16() + return uint64(n), err + case codes.Int16: + n, err := d.int16() + return uint64(n), err + case codes.Uint32: + n, err := d.uint32() + return uint64(n), err + case codes.Int32: + n, err := d.int32() + return uint64(n), err + case codes.Uint64, codes.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c codes.Code) (int64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return int64(n), err + case codes.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case codes.Uint16: + n, err := d.uint16() + return int64(n), err + case codes.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case codes.Uint32: + n, err := d.uint32() + return int64(n), err + case codes.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case codes.Uint64, codes.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c codes.Code) (float32, error) { + if c == codes.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c codes.Code) (float64, error) { + switch c { + case codes.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case codes.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_query.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_query.go new file mode 100644 index 00000000000..d680be80c0d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_query.go @@ -0,0 +1,158 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/codes" +) + +type queryResult struct { + query string + key string + hasAsterisk bool + + values []interface{} +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == codes.Map16 || code == codes.Map32 || codes.IsFixedMap(code): + err = d.queryMapKey(q) + case code == codes.Array16 || code == codes.Array32 || codes.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + k, err := d.bytesNoCopy() + if err != nil { + return err + } + + if string(k) == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_slice.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_slice.go new file mode 100644 index 00000000000..7d43ec610d3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_slice.go @@ -0,0 +1,193 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const sliceElemsAllocLimit = 1e4 + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if c >= codes.FixedArrayLow && c <= codes.FixedArrayHigh { + return int(c & codes.FixedArrayMask), nil + } + switch c { + case codes.Array16: + n, err := d.uint16() + return int(n), err + case codes.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := setStringsCap(*ptr, n) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func setStringsCap(s []string, n int) []string { + if n > sliceElemsAllocLimit { + n = sliceElemsAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + for i := 0; i < n; i++ { + if i >= v.Len() { + v.Set(growSliceValue(v, n)) + } + elem := v.Index(i) + if err := d.DecodeValue(elem); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int) reflect.Value { + diff := n - v.Len() + if diff > sliceElemsAllocLimit { + diff = sliceElemsAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c codes.Code) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, min(n, sliceElemsAllocLimit)) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c codes.Code) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_string.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_string.go new file mode 100644 index 00000000000..5402022ee9a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_string.go @@ -0,0 +1,175 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) bytesLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if codes.IsFixedString(c) { + return int(c & codes.FixedStrMask), nil + } + switch c { + case codes.Str8, codes.Bin8: + n, err := d.uint8() + return int(n), err + case codes.Str16, codes.Bin16: + n, err := d.uint16() + return int(n), err + case codes.Str32, codes.Bin32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c codes.Code) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + if n == -1 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c codes.Code, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) bytesNoCopy() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return d.readN(n) +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c codes.Code, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c codes.Code) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + if err = mustSet(v); err != nil { + return err + } + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_value.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_value.go new file mode 100644 index 00000000000..64b28904433 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/decode_value.go @@ -0,0 +1,239 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" +) + +var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() +var stringType = reflect.TypeOf((*string)(nil)).Elem() + +var valueDecoders []decoderFunc + +//nolint:gochecknoinits +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func mustSet(v reflect.Value) error { + if !v.CanSet() { + return fmt.Errorf("msgpack: Decode(nonsettable %s)", v.Type()) + } + return nil +} + +func getDecoder(typ reflect.Type) decoderFunc { + if v, ok := typeDecMap.Load(typ); ok { + return v.(decoderFunc) + } + fn := _getDecoder(typ) + typeDecMap.Store(typ, fn) + return fn +} + +func _getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + if typ.Implements(customDecoderType) { + return decodeCustomValue + } + if typ.Implements(unmarshalerType) { + return unmarshalValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return decodeCustomValueAddr + } + if ptr.Implements(unmarshalerType) { + return unmarshalValueAddr + } + } + + switch kind { + case reflect.Ptr: + return ptrDecoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return decodeBytesValue + } + if elem == stringType { + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + return valueDecoders[kind] +} + +func ptrDecoderFunc(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if err := mustSet(v); err != nil { + return err + } + if !v.IsNil() { + v.Set(reflect.Zero(v.Type())) + } + return d.DecodeNil() + } + if v.IsNil() { + if err := mustSet(v); err != nil { + return err + } + v.Set(reflect.New(v.Type().Elem())) + } + return decoder(d, v.Elem()) + } +} + +func decodeCustomValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return decodeCustomValue(d, v.Addr()) +} + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return unmarshalValue(d, v.Addr()) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + if d.extLen == 0 || d.extLen == 1 { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + if d.extLen != 0 { + b, err := d.readN(d.extLen) + if err != nil { + return err + } + d.rec = b + } else { + d.rec = make([]byte, 0, 64) + if err := d.Skip(); err != nil { + return err + } + } + + unmarshaler := v.Interface().(Unmarshaler) + err := unmarshaler.UnmarshalMsgpack(d.rec) + d.rec = nil + return err +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + + elem := v.Elem() + if !elem.CanAddr() { + if d.hasNilCode() { + v.Set(reflect.Zero(v.Type())) + return d.DecodeNil() + } + } + + return d.DecodeValue(elem) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode.go new file mode 100644 index 00000000000..08ca7dec6d3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode.go @@ -0,0 +1,177 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +type writer interface { + io.Writer + WriteByte(byte) error + WriteString(string) (int, error) +} + +type byteWriter struct { + io.Writer + + buf []byte + bootstrap [64]byte +} + +func newByteWriter(w io.Writer) *byteWriter { + bw := &byteWriter{ + Writer: w, + } + bw.buf = bw.bootstrap[:] + return bw +} + +func (w *byteWriter) WriteByte(c byte) error { + w.buf = w.buf[:1] + w.buf[0] = c + _, err := w.Write(w.buf) + return err +} + +func (w *byteWriter) WriteString(s string) (int, error) { + w.buf = append(w.buf[:0], s...) + return w.Write(w.buf) +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(v) + return buf.Bytes(), err +} + +type Encoder struct { + w writer + + buf []byte + // timeBuf is lazily allocated in encodeTime() to + // avoid allocations when time.Time value are encoded + // + // buf can't be reused for time encoding, as buf is used + // to encode msgpack extLen + timeBuf []byte + + sortMapKeys bool + structAsArray bool + useJSONTag bool + useCompact bool +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + bw, ok := w.(writer) + if !ok { + bw = newByteWriter(w) + } + return &Encoder{ + w: bw, + buf: make([]byte, 9), + } +} + +// SortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]interface{} +func (e *Encoder) SortMapKeys(flag bool) *Encoder { + e.sortMapKeys = flag + return e +} + +// StructAsArray causes the Encoder to encode Go structs as MessagePack arrays. +func (e *Encoder) StructAsArray(flag bool) *Encoder { + e.structAsArray = flag + return e +} + +// UseJSONTag causes the Encoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (e *Encoder) UseJSONTag(flag bool) *Encoder { + e.useJSONTag = flag + return e +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactEncoding(flag bool) *Encoder { + e.useCompact = flag + return e +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.encodeInt64Cond(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.encodeUint64Cond(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(codes.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(codes.True) + } + return e.writeCode(codes.False) +} + +func (e *Encoder) writeCode(c codes.Code) error { + return e.w.WriteByte(byte(c)) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.WriteString(s) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_map.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_map.go new file mode 100644 index 00000000000..7aa7c47fb7f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_map.go @@ -0,0 +1,172 @@ +package msgpack + +import ( + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/codes" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + for _, key := range v.MapKeys() { + if err := e.EncodeValue(key); err != nil { + return err + } + if err := e.EncodeValue(v.MapIndex(key)); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.sortMapKeys { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.sortMapKeys { + return e.encodeSortedMapStringInterface(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringInterface(m map[string]interface{}) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedMapLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Map16, uint16(l)) + } + return e.write4(codes.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + var structFields *fields + if e.useJSONTag { + structFields = jsonStructs.Fields(strct.Type()) + } else { + structFields = structs.Fields(strct.Type()) + } + + if e.structAsArray || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(strct) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_number.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_number.go new file mode 100644 index 00000000000..0d2fdb39862 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_number.go @@ -0,0 +1,230 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(codes.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(codes.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(codes.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(codes.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.useCompact { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(codes.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(codes.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(codes.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(codes.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.useCompact { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(n) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(codes.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(n) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + return e.write4(codes.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + return e.write8(codes.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code codes.Code, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = byte(code) + e.buf[1] = n + return e.write(e.buf) +} + +func (e *Encoder) write2(code codes.Code, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code codes.Code, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code codes.Code, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_slice.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_slice.go new file mode 100644 index 00000000000..22996a925e6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_slice.go @@ -0,0 +1,131 @@ +package msgpack + +import ( + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +var sliceStringType = reflect.TypeOf(([]string)(nil)) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(codes.Bin8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Bin16, uint16(l)) + } + return e.write4(codes.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStrLen(l int) error { + if l < 32 { + return e.writeCode(codes.FixedStrLow | codes.Code(l)) + } + if l < 256 { + return e.write1(codes.Str8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Str16, uint16(l)) + } + return e.write4(codes.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if err := e.encodeStrLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedArrayLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Array16, uint16(l)) + } + return e.write4(codes.Array32, uint32(l)) +} + +func encodeStringSliceValue(e *Encoder, v reflect.Value) error { + ss := v.Convert(sliceStringType).Interface().([]string) + return e.encodeStringSlice(ss) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_value.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_value.go new file mode 100644 index 00000000000..3452caf1879 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/encode_value.go @@ -0,0 +1,177 @@ +package msgpack + +import ( + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +//nolint:gochecknoinits +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeInt64CondValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUint64CondValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if v, ok := typeEncMap.Load(typ); ok { + return v.(encoderFunc) + } + fn := _getEncoder(typ) + typeEncMap.Store(typ, fn) + return fn +} + +func _getEncoder(typ reflect.Type) encoderFunc { + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + + kind := typ.Kind() + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + if elem == stringType { + return encodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/ext.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/ext.go new file mode 100644 index 00000000000..e11f69f08d0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/ext.go @@ -0,0 +1,244 @@ +package msgpack + +import ( + "bytes" + "fmt" + "reflect" + "sync" + + "github.com/vmihailenco/msgpack/codes" +) + +type extInfo struct { + Type reflect.Type + Decoder decoderFunc +} + +var extTypes = make(map[int8]extInfo) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// RegisterExt records a type, identified by a value for that type, +// under the provided id. That id will identify the concrete type of a value +// sent or received as an interface variable. Only types that will be +// transferred as implementations of interface values need to be registered. +// Expecting to be used only during initialization, it panics if the mapping +// between types and ids is not a bijection. +func RegisterExt(id int8, value interface{}) { + typ := reflect.TypeOf(value) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + ptr := reflect.PtrTo(typ) + + if _, ok := extTypes[id]; ok { + panic(fmt.Errorf("msgpack: ext with id=%d is already registered", id)) + } + + registerExt(id, ptr, getEncoder(ptr), getDecoder(ptr)) + registerExt(id, typ, getEncoder(typ), getDecoder(typ)) +} + +func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { + if enc != nil { + typeEncMap.Store(typ, makeExtEncoder(id, enc)) + } + if dec != nil { + extTypes[id] = extInfo{ + Type: typ, + Decoder: dec, + } + typeDecMap.Store(typ, makeExtDecoder(id, dec)) + } +} + +func (e *Encoder) EncodeExtHeader(typeID int8, length int) error { + if err := e.encodeExtLen(length); err != nil { + return err + } + if err := e.w.WriteByte(byte(typeID)); err != nil { + return err + } + return nil +} + +func makeExtEncoder(typeID int8, enc encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + buf := bufferPool.Get().(*bytes.Buffer) + defer bufferPool.Put(buf) + buf.Reset() + + oldw := e.w + e.w = buf + err := enc(e, v) + e.w = oldw + + if err != nil { + return err + } + + err = e.EncodeExtHeader(typeID, buf.Len()) + if err != nil { + return err + } + return e.write(buf.Bytes()) + } +} + +func makeExtDecoder(typeID int8, dec decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + c, err := d.PeekCode() + if err != nil { + return err + } + + if !codes.IsExt(c) { + return dec(d, v) + } + + id, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + + if id != typeID { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", id, typeID) + } + + d.extLen = extLen + return dec(d, v) + } +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(codes.FixExt1) + case 2: + return e.writeCode(codes.FixExt2) + case 4: + return e.writeCode(codes.FixExt4) + case 8: + return e.writeCode(codes.FixExt8) + case 16: + return e.writeCode(codes.FixExt16) + } + if l < 256 { + return e.write1(codes.Ext8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Ext16, uint16(l)) + } + return e.write4(codes.Ext32, uint32(l)) +} + +func (d *Decoder) parseExtLen(c codes.Code) (int, error) { + switch c { + case codes.FixExt1: + return 1, nil + case codes.FixExt2: + return 2, nil + case codes.FixExt4: + return 4, nil + case codes.FixExt8: + return 8, nil + case codes.FixExt16: + return 16, nil + case codes.Ext8: + n, err := d.uint8() + return int(n), err + case codes.Ext16: + n, err := d.uint16() + return int(n), err + case codes.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext length", c) + } +} + +func (d *Decoder) decodeExtHeader(c codes.Code) (int8, int, error) { + length, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + typeID, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(typeID), length, nil +} + +func (d *Decoder) DecodeExtHeader() (typeID int8, length int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.decodeExtHeader(c) +} + +func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { + extID, extLen, err := d.decodeExtHeader(c) + if err != nil { + return nil, err + } + + info, ok := extTypes[extID] + if !ok { + return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID) + } + + v := reflect.New(info.Type) + + d.extLen = extLen + err = info.Decoder(d, v.Elem()) + d.extLen = 0 + if err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c codes.Code) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c codes.Code) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c codes.Code) int { + switch c { + case codes.Ext8: + return 1 + case codes.Ext16: + return 2 + case codes.Ext32: + return 4 + } + return 0 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/msgpack.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/msgpack.go new file mode 100644 index 00000000000..220b43c47b3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/msgpack.go @@ -0,0 +1,17 @@ +package msgpack + +type Marshaler interface { + MarshalMsgpack() ([]byte, error) +} + +type Unmarshaler interface { + UnmarshalMsgpack([]byte) error +} + +type CustomEncoder interface { + EncodeMsgpack(*Encoder) error +} + +type CustomDecoder interface { + DecodeMsgpack(*Decoder) error +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/time.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/time.go new file mode 100644 index 00000000000..5604220d93d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/time.go @@ -0,0 +1,149 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +var timeExtID int8 = -1 + +//nolint:gochecknoinits +func init() { + timeType := reflect.TypeOf((*time.Time)(nil)).Elem() + registerExt(timeExtID, timeType, encodeTimeValue, decodeTimeValue) +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtID)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + if e.timeBuf == nil { + e.timeBuf = make([]byte, 12) + } + + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + if data&0xffffffff00000000 == 0 { + b := e.timeBuf[:4] + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } + b := e.timeBuf[:8] + binary.BigEndian.PutUint64(b, data) + return b + } + + b := e.timeBuf[:12] + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], secs) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + tm, err := d.decodeTime() + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Assume that zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime() (time.Time, error) { + extLen := d.extLen + d.extLen = 0 + if extLen == 0 { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == codes.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if codes.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extLen, err = d.parseExtLen(c) + if err != nil { + return time.Time{}, err + } + + // Skip ext id. + _, err = d.s.ReadByte() + if err != nil { + return time.Time{}, nil + } + } + + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} + +func encodeTimeValue(e *Encoder, v reflect.Value) error { + tm := v.Interface().(time.Time) + b := e.encodeTime(tm) + return e.write(b) +} + +func decodeTimeValue(d *Decoder, v reflect.Value) error { + tm, err := d.DecodeTime() + if err != nil { + return err + } + v.Set(reflect.ValueOf(tm)) + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/types.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/types.go new file mode 100644 index 00000000000..c115ee626a5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/msgpack/types.go @@ -0,0 +1,301 @@ +package msgpack + +import ( + "reflect" + "sync" + + "github.com/vmihailenco/tagparser" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() +var customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() + +var marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + +type encoderFunc = func(*Encoder, reflect.Value) error +type decoderFunc = func(*Decoder, reflect.Value) error + +var typeEncMap sync.Map +var typeDecMap sync.Map + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// Marshaler/CustomEncoder and Unmarshaler/CustomDecoder interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typeEncMap.Store(typ, enc) + } + if dec != nil { + typeDecMap.Store(typ, dec) + } +} + +//------------------------------------------------------------------------------ + +var structs = newStructCache(false) +var jsonStructs = newStructCache(true) + +type structCache struct { + m sync.Map + + useJSONTag bool +} + +func newStructCache(useJSONTag bool) *structCache { + return &structCache{ + useJSONTag: useJSONTag, + } +} + +func (m *structCache) Fields(typ reflect.Type) *fields { + if v, ok := m.m.Load(typ); ok { + return v.(*fields) + } + + fs := getFields(typ, m.useJSONTag) + m.m.Store(typ, fs) + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + name string + index []int + omitEmpty bool + encoder encoderFunc + decoder decoderFunc +} + +func (f *field) value(v reflect.Value) reflect.Value { + return fieldByIndex(v, f.index) +} + +func (f *field) Omit(strct reflect.Value) bool { + return f.omitEmpty && isEmptyValue(f.value(strct)) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + return f.encoder(e, f.value(strct)) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + return f.decoder(d, f.value(strct)) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Table map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(numField int) *fields { + return &fields{ + Table: make(map[string]*field, numField), + List: make([]*field, 0, numField), + } +} + +func (fs *fields) Add(field *field) { + fs.Table[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) OmitEmpty(strct reflect.Value) []*field { + if !fs.hasOmitEmpty { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + for _, f := range fs.List { + if !f.Omit(strct) { + fields = append(fields, f) + } + } + return fields +} + +func getFields(typ reflect.Type, useJSONTag bool) *fields { + numField := typ.NumField() + fs := newFields(numField) + + var omitEmpty bool + for i := 0; i < numField; i++ { + f := typ.Field(i) + + tagStr := f.Tag.Get("msgpack") + if useJSONTag && tagStr == "" { + tagStr = f.Tag.Get("json") + } + + tag := tagparser.Parse(tagStr) + if tag.Name == "-" { + continue + } + + if f.Name == "_msgpack" { + if tag.HasOption("asArray") { + fs.AsArray = true + } + if tag.HasOption("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: tag.Name, + index: f.Index, + omitEmpty: omitEmpty || tag.HasOption("omitempty"), + encoder: getEncoder(f.Type), + decoder: getDecoder(f.Type), + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !tag.HasOption("noinline") { + inline := tag.HasOption("inline") + if inline { + inlineFields(fs, f.Type, field, useJSONTag) + } else { + inline = autoinlineFields(fs, f.Type, field, useJSONTag) + } + if inline { + fs.Table[field.name] = field + continue + } + } + + fs.Add(field) + } + return fs +} + +var encodeStructValuePtr uintptr +var decodeStructValuePtr uintptr + +//nolint:gochecknoinits +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) { + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func autoinlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + for i, x := range index { + if i > 0 { + var ok bool + v, ok = indirectNew(v) + if !ok { + return v + } + } + v = v.Field(x) + } + return v +} + +func indirectNew(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(reflect.New(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/LICENSE new file mode 100644 index 00000000000..3fc93fdff8c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 The github.com/vmihailenco/tagparser Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go new file mode 100644 index 00000000000..2de1c6f7bde --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go @@ -0,0 +1,82 @@ +package parser + +import ( + "bytes" + + "github.com/vmihailenco/tagparser/internal" +) + +type Parser struct { + b []byte + i int +} + +func New(b []byte) *Parser { + return &Parser{ + b: b, + } +} + +func NewString(s string) *Parser { + return New(internal.StringToBytes(s)) +} + +func (p *Parser) Bytes() []byte { + return p.b[p.i:] +} + +func (p *Parser) Valid() bool { + return p.i < len(p.b) +} + +func (p *Parser) Read() byte { + if p.Valid() { + c := p.b[p.i] + p.Advance() + return c + } + return 0 +} + +func (p *Parser) Peek() byte { + if p.Valid() { + return p.b[p.i] + } + return 0 +} + +func (p *Parser) Advance() { + p.i++ +} + +func (p *Parser) Skip(skip byte) bool { + if p.Peek() == skip { + p.Advance() + return true + } + return false +} + +func (p *Parser) SkipBytes(skip []byte) bool { + if len(skip) > len(p.b[p.i:]) { + return false + } + if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) { + return false + } + p.i += len(skip) + return true +} + +func (p *Parser) ReadSep(sep byte) ([]byte, bool) { + ind := bytes.IndexByte(p.b[p.i:], sep) + if ind == -1 { + b := p.b[p.i:] + p.i = len(p.b) + return b, false + } + + b := p.b[p.i : p.i+ind] + p.i += ind + 1 + return b, true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/internal/safe.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/internal/safe.go new file mode 100644 index 00000000000..870fe541f0d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/internal/safe.go @@ -0,0 +1,11 @@ +// +build appengine + +package internal + +func BytesToString(b []byte) string { + return string(b) +} + +func StringToBytes(s string) []byte { + return []byte(s) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go new file mode 100644 index 00000000000..f8bc18d911a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package internal + +import ( + "unsafe" +) + +// BytesToString converts byte slice to string. +func BytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// StringToBytes converts string to byte slice. +func StringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/tagparser.go b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/tagparser.go new file mode 100644 index 00000000000..56b918011b0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/vmihailenco/tagparser/tagparser.go @@ -0,0 +1,176 @@ +package tagparser + +import ( + "github.com/vmihailenco/tagparser/internal/parser" +) + +type Tag struct { + Name string + Options map[string]string +} + +func (t *Tag) HasOption(name string) bool { + _, ok := t.Options[name] + return ok +} + +func Parse(s string) *Tag { + p := &tagParser{ + Parser: parser.NewString(s), + } + p.parseKey() + return &p.Tag +} + +type tagParser struct { + *parser.Parser + + Tag Tag + hasName bool + key string +} + +func (p *tagParser) setTagOption(key, value string) { + if !p.hasName { + p.hasName = true + if key == "" { + p.Tag.Name = value + return + } + } + if p.Tag.Options == nil { + p.Tag.Options = make(map[string]string) + } + if key == "" { + p.Tag.Options[value] = "" + } else { + p.Tag.Options[key] = value + } +} + +func (p *tagParser) parseKey() { + p.key = "" + + var b []byte + for p.Valid() { + c := p.Read() + switch c { + case ',': + p.Skip(' ') + p.setTagOption("", string(b)) + p.parseKey() + return + case ':': + p.key = string(b) + p.parseValue() + return + case '\'': + p.parseQuotedValue() + return + default: + b = append(b, c) + } + } + + if len(b) > 0 { + p.setTagOption("", string(b)) + } +} + +func (p *tagParser) parseValue() { + const quote = '\'' + + c := p.Peek() + if c == quote { + p.Skip(quote) + p.parseQuotedValue() + return + } + + var b []byte + for p.Valid() { + c = p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + b = p.readBrackets(b) + case ',': + p.Skip(' ') + p.setTagOption(p.key, string(b)) + p.parseKey() + return + default: + b = append(b, c) + } + } + p.setTagOption(p.key, string(b)) +} + +func (p *tagParser) readBrackets(b []byte) []byte { + var lvl int +loop: + for p.Valid() { + c := p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + lvl++ + case ')': + b = append(b, c) + lvl-- + if lvl < 0 { + break loop + } + default: + b = append(b, c) + } + } + return b +} + +func (p *tagParser) parseQuotedValue() { + const quote = '\'' + + var b []byte + b = append(b, quote) + + for p.Valid() { + bb, ok := p.ReadSep(quote) + if !ok { + b = append(b, bb...) + break + } + + if len(bb) > 0 && bb[len(bb)-1] == '\\' { + b = append(b, bb[:len(bb)-1]...) + b = append(b, quote) + continue + } + + b = append(b, bb...) + b = append(b, quote) + break + } + + p.setTagOption(p.key, string(b)) + if p.Skip(',') { + p.Skip(' ') + } + p.parseKey() +} + +func Unquote(s string) (string, bool) { + const quote = '\'' + + if len(s) < 2 { + return s, false + } + if s[0] == quote && s[len(s)-1] == quote { + return s[1 : len(s)-1], true + } + return s, false +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml new file mode 100644 index 00000000000..8da58fbf6f8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/NOTICE b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/NOTICE new file mode 100644 index 00000000000..4e6c00ab317 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/NOTICE @@ -0,0 +1,20 @@ +This package is derived from gopkg.in/yaml.v2, which is copyright +2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Includes mechanical ports of code from libyaml, distributed under its original +license. See LICENSE.libyaml for more information. + +Modifications for cty interfacing copyright 2019 Martin Atkins, and +distributed under the same license terms. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/apic.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/apic.go new file mode 100644 index 00000000000..1f7e87e6727 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/converter.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/converter.go new file mode 100644 index 00000000000..a73b34a8b2a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/converter.go @@ -0,0 +1,69 @@ +package yaml + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ConverterConfig is used to configure a new converter, using NewConverter. +type ConverterConfig struct { + // EncodeAsFlow, when set to true, causes Marshal to produce flow-style + // mapping and sequence serializations. + EncodeAsFlow bool +} + +// A Converter can marshal and unmarshal between cty values and YAML bytes. +// +// Because there are many different ways to map cty to YAML and vice-versa, +// a converter is configurable using the settings in ConverterConfig, which +// allow for a few different permutations of mapping to YAML. +// +// If you are just trying to work with generic, standard YAML, the predefined +// converter in Standard should be good enough. +type Converter struct { + encodeAsFlow bool +} + +// NewConverter creates a new Converter with the given configuration. +func NewConverter(config *ConverterConfig) *Converter { + return &Converter{ + encodeAsFlow: config.EncodeAsFlow, + } +} + +// Standard is a predefined Converter that produces and consumes generic YAML +// using only built-in constructs that any other YAML implementation ought to +// understand. +var Standard *Converter = NewConverter(&ConverterConfig{}) + +// ImpliedType analyzes the given source code and returns a suitable type that +// it could be decoded into. +// +// For a converter that is using standard YAML rather than cty-specific custom +// tags, only a subset of cty types can be produced: strings, numbers, bools, +// tuple types, and object types. +func (c *Converter) ImpliedType(src []byte) (cty.Type, error) { + return c.impliedType(src) +} + +// Marshal serializes the given value into a YAML document, using a fixed +// mapping from cty types to YAML constructs. +// +// Note that unlike the function of the same name in the cty JSON package, +// this does not take a type constraint and therefore the YAML serialization +// cannot preserve late-bound type information in the serialization to be +// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type +// constraint given to Unmarshal will be decoded as if the corresponding portion +// of the input were processed with ImpliedType to find a target type. +func (c *Converter) Marshal(v cty.Value) ([]byte, error) { + return c.marshal(v) +} + +// Unmarshal reads the document found within the given source buffer +// and attempts to convert it into a value conforming to the given type +// constraint. +// +// An error is returned if the given source contains any YAML document +// delimiters. +func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { + return c.unmarshal(src, ty) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go new file mode 100644 index 00000000000..b91141ccaa9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go @@ -0,0 +1,57 @@ +package yaml + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code +// into a cty Value, using the ImpliedType and Unmarshal methods of the +// Standard pre-defined converter. +var YAMLDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "src", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsKnown() { + return cty.DynamicPseudoType, nil + } + if args[0].IsNull() { + return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null") + } + return Standard.ImpliedType([]byte(args[0].AsString())) + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if retType == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + return Standard.Unmarshal([]byte(args[0].AsString()), retType) + }, +}) + +// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value +// into YAML. +var YAMLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if !args[0].IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + raw, err := Standard.Marshal(args[0]) + if err != nil { + return cty.NilVal, err + } + return cty.StringVal(string(raw)), nil + }, +}) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/decode.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/decode.go new file mode 100644 index 00000000000..e369ff27c88 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/decode.go @@ -0,0 +1,261 @@ +package yaml + +import ( + "errors" + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) { + p := &yaml_parser_t{} + if !yaml_parser_initialize(p) { + return cty.NilVal, errors.New("failed to initialize YAML parser") + } + if len(src) == 0 { + src = []byte{'\n'} + } + + an := &valueAnalysis{ + anchorsPending: map[string]int{}, + anchorVals: map[string]cty.Value{}, + } + + yaml_parser_set_input_string(p, src) + + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ != yaml_STREAM_START_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "missing stream start token") + } + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ != yaml_DOCUMENT_START_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "missing start of document") + } + + v, err := c.unmarshalParse(an, p) + if err != nil { + return cty.NilVal, err + } + + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ == yaml_DOCUMENT_START_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed") + } + if evt.typ != yaml_DOCUMENT_END_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) + } + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ != yaml_STREAM_END_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value") + } + + return convert.Convert(v, ty) +} + +func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) { + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + return c.unmarshalParseRemainder(an, &evt, p) +} + +func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + switch evt.typ { + case yaml_SCALAR_EVENT: + return c.unmarshalScalar(an, evt, p) + case yaml_ALIAS_EVENT: + return c.unmarshalAlias(an, evt, p) + case yaml_MAPPING_START_EVENT: + return c.unmarshalMapping(an, evt, p) + case yaml_SEQUENCE_START_EVENT: + return c.unmarshalSequence(an, evt, p) + case yaml_DOCUMENT_START_EVENT: + return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed") + case yaml_STREAM_END_EVENT: + // Decoding an empty buffer, probably + return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream") + default: + // Should never happen; the above should be comprehensive + return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) + } +} + +func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + src := evt.value + tag := string(evt.tag) + anchor := string(evt.anchor) + + if len(anchor) > 0 { + an.beginAnchor(anchor) + } + + val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) + if err != nil { + return cty.NilVal, parseEventErrorWrap(evt, err) + } + + if val.RawEquals(mergeMappingVal) { + // In any context other than a mapping key, this is just a plain string + val = cty.StringVal("<<") + } + + if len(anchor) > 0 { + an.completeAnchor(anchor, val) + } + return val, nil +} + +func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_MAP_TAG { + return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + vals := make(map[string]cty.Value) + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilVal, parserError(p) + } + if nextEvt.typ == yaml_MAPPING_END_EVENT { + v := cty.ObjectVal(vals) + if anchor != "" { + an.completeAnchor(anchor, v) + } + return v, nil + } + + if nextEvt.typ != yaml_SCALAR_EVENT { + return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) + if err != nil { + return cty.NilVal, err + } + if keyVal.RawEquals(mergeMappingVal) { + // Merging the value (which must be a mapping) into our mapping, + // then. + val, err := c.unmarshalParse(an, p) + if err != nil { + return cty.NilVal, err + } + ty := val.Type() + if !(ty.IsObjectType() || ty.IsMapType()) { + return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) + } + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + vals[k.AsString()] = v + } + continue + } + if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { + keyVal = keyValStr + } else { + return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + if keyVal.IsNull() { + return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null") + } + if !keyVal.IsKnown() { + return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known") + } + val, err := c.unmarshalParse(an, p) + if err != nil { + return cty.NilVal, err + } + + vals[keyVal.AsString()] = val + } +} + +func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_SEQ_TAG { + return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + var vals []cty.Value + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilVal, parserError(p) + } + if nextEvt.typ == yaml_SEQUENCE_END_EVENT { + ty := cty.TupleVal(vals) + if anchor != "" { + an.completeAnchor(anchor, ty) + } + return ty, nil + } + + val, err := c.unmarshalParseRemainder(an, &nextEvt, p) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, val) + } +} + +func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + v, err := an.anchorVal(string(evt.anchor)) + if err != nil { + err = parseEventErrorWrap(evt, err) + } + return v, err +} + +type valueAnalysis struct { + anchorsPending map[string]int + anchorVals map[string]cty.Value +} + +func (an *valueAnalysis) beginAnchor(name string) { + an.anchorsPending[name]++ +} + +func (an *valueAnalysis) completeAnchor(name string, v cty.Value) { + an.anchorsPending[name]-- + if an.anchorsPending[name] == 0 { + delete(an.anchorsPending, name) + } + an.anchorVals[name] = v +} + +func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) { + if _, pending := an.anchorsPending[name]; pending { + // YAML normally allows self-referencing structures, but cty cannot + // represent them (it requires all structures to be finite) so we + // must fail here. + return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) + } + ty, ok := an.anchorVals[name] + if !ok { + return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name) + } + return ty, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/emitterc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/emitterc.go new file mode 100644 index 00000000000..a1c2cc52627 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/encode.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/encode.go new file mode 100644 index 00000000000..daa1478a934 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/encode.go @@ -0,0 +1,189 @@ +package yaml + +import ( + "bytes" + "fmt" + "strings" + + "github.com/zclconf/go-cty/cty" +) + +func (c *Converter) marshal(v cty.Value) ([]byte, error) { + var buf bytes.Buffer + + e := &yaml_emitter_t{} + yaml_emitter_initialize(e) + yaml_emitter_set_output_writer(e, &buf) + yaml_emitter_set_unicode(e, true) + + var evt yaml_event_t + yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + yaml_document_start_event_initialize(&evt, nil, nil, true) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + + if err := c.marshalEmit(v, e); err != nil { + return nil, err + } + + yaml_document_end_event_initialize(&evt, true) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + yaml_stream_end_event_initialize(&evt) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + + return buf.Bytes(), nil +} + +func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error { + ty := v.Type() + switch { + case v.IsNull(): + return c.marshalPrimitive(v, e) + case !v.IsKnown(): + return fmt.Errorf("cannot serialize unknown value as YAML") + case ty.IsPrimitiveType(): + return c.marshalPrimitive(v, e) + case ty.IsTupleType(), ty.IsListType(), ty.IsSetType(): + return c.marshalSequence(v, e) + case ty.IsObjectType(), ty.IsMapType(): + return c.marshalMapping(v, e) + default: + return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName()) + } +} + +func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error { + var evt yaml_event_t + + if v.IsNull() { + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte("null"), + true, + true, + yaml_PLAIN_SCALAR_STYLE, + ) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil + } + + switch v.Type() { + case cty.String: + str := v.AsString() + style := yaml_DOUBLE_QUOTED_SCALAR_STYLE + if strings.Contains(str, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte(str), + true, + true, + style, + ) + case cty.Number: + str := v.AsBigFloat().Text('f', -1) + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte(str), + true, + true, + yaml_PLAIN_SCALAR_STYLE, + ) + case cty.Bool: + var str string + switch v { + case cty.True: + str = "true" + case cty.False: + str = "false" + } + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte(str), + true, + true, + yaml_PLAIN_SCALAR_STYLE, + ) + } + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil +} + +func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error { + style := yaml_BLOCK_SEQUENCE_STYLE + if c.encodeAsFlow { + style = yaml_FLOW_SEQUENCE_STYLE + } + + var evt yaml_event_t + yaml_sequence_start_event_initialize(&evt, nil, nil, true, style) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + + for it := v.ElementIterator(); it.Next(); { + _, v := it.Element() + err := c.marshalEmit(v, e) + if err != nil { + return err + } + } + + yaml_sequence_end_event_initialize(&evt) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil +} + +func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error { + style := yaml_BLOCK_MAPPING_STYLE + if c.encodeAsFlow { + style = yaml_FLOW_MAPPING_STYLE + } + + var evt yaml_event_t + yaml_mapping_start_event_initialize(&evt, nil, nil, true, style) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + + for it := v.ElementIterator(); it.Next(); { + k, v := it.Element() + err := c.marshalEmit(k, e) + if err != nil { + return err + } + err = c.marshalEmit(v, e) + if err != nil { + return err + } + } + + yaml_mapping_end_event_initialize(&evt) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/error.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/error.go new file mode 100644 index 00000000000..ae41c488f87 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/error.go @@ -0,0 +1,97 @@ +package yaml + +import ( + "errors" + "fmt" +) + +// Error is an error implementation used to report errors that correspond to +// a particular position in an input buffer. +type Error struct { + cause error + Line, Column int +} + +func (e Error) Error() string { + return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error()) +} + +// Cause is an implementation of the interface used by +// github.com/pkg/errors.Cause, returning the underlying error without the +// position information. +func (e Error) Cause() error { + return e.cause +} + +// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper +// returning the underlying error without the position information. +func (e Error) WrappedErrors() []error { + return []error{e.cause} +} + +func parserError(p *yaml_parser_t) error { + var cause error + if len(p.problem) > 0 { + cause = errors.New(p.problem) + } else { + cause = errors.New("invalid YAML syntax") // useless generic error, then + } + + return parserErrorWrap(p, cause) +} + +func parserErrorWrap(p *yaml_parser_t, cause error) error { + switch { + case p.problem_mark.line != 0: + line := p.problem_mark.line + column := p.problem_mark.column + // Scanner errors don't iterate line before returning error + if p.error == yaml_SCANNER_ERROR { + line++ + column = 0 + } + return Error{ + cause: cause, + Line: line, + Column: column + 1, + } + case p.context_mark.line != 0: + return Error{ + cause: cause, + Line: p.context_mark.line, + Column: p.context_mark.column + 1, + } + default: + return cause + } +} + +func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error { + return parserErrorWrap(p, fmt.Errorf(f, vals...)) +} + +func parseEventErrorWrap(evt *yaml_event_t, cause error) error { + if evt.start_mark.line == 0 { + // Event does not have a start mark, so we won't wrap the error at all + return cause + } + return Error{ + cause: cause, + Line: evt.start_mark.line, + Column: evt.start_mark.column + 1, + } +} + +func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error { + return parseEventErrorWrap(evt, fmt.Errorf(f, vals...)) +} + +func emitterError(e *yaml_emitter_t) error { + var cause error + if len(e.problem) > 0 { + cause = errors.New(e.problem) + } else { + cause = errors.New("failed to write YAML token") // useless generic error, then + } + return cause +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/implied_type.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/implied_type.go new file mode 100644 index 00000000000..5b7b0686fab --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/implied_type.go @@ -0,0 +1,268 @@ +package yaml + +import ( + "errors" + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func (c *Converter) impliedType(src []byte) (cty.Type, error) { + p := &yaml_parser_t{} + if !yaml_parser_initialize(p) { + return cty.NilType, errors.New("failed to initialize YAML parser") + } + if len(src) == 0 { + src = []byte{'\n'} + } + + an := &typeAnalysis{ + anchorsPending: map[string]int{}, + anchorTypes: map[string]cty.Type{}, + } + + yaml_parser_set_input_string(p, src) + + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ != yaml_STREAM_START_EVENT { + return cty.NilType, parseEventErrorf(&evt, "missing stream start token") + } + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ != yaml_DOCUMENT_START_EVENT { + return cty.NilType, parseEventErrorf(&evt, "missing start of document") + } + + ty, err := c.impliedTypeParse(an, p) + if err != nil { + return cty.NilType, err + } + + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ == yaml_DOCUMENT_START_EVENT { + return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed") + } + if evt.typ != yaml_DOCUMENT_END_EVENT { + return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) + } + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ != yaml_STREAM_END_EVENT { + return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value") + } + + return ty, err +} + +func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) { + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + return c.impliedTypeParseRemainder(an, &evt, p) +} + +func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + switch evt.typ { + case yaml_SCALAR_EVENT: + return c.impliedTypeScalar(an, evt, p) + case yaml_ALIAS_EVENT: + return c.impliedTypeAlias(an, evt, p) + case yaml_MAPPING_START_EVENT: + return c.impliedTypeMapping(an, evt, p) + case yaml_SEQUENCE_START_EVENT: + return c.impliedTypeSequence(an, evt, p) + case yaml_DOCUMENT_START_EVENT: + return cty.NilType, parseEventErrorf(evt, "only a single document is allowed") + case yaml_STREAM_END_EVENT: + // Decoding an empty buffer, probably + return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream") + default: + // Should never happen; the above should be comprehensive + return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) + } +} + +func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + src := evt.value + tag := string(evt.tag) + anchor := string(evt.anchor) + implicit := evt.implicit + + if len(anchor) > 0 { + an.beginAnchor(anchor) + } + + var ty cty.Type + switch { + case tag == "" && !implicit: + // Untagged explicit string + ty = cty.String + default: + v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) + if err != nil { + return cty.NilType, parseEventErrorWrap(evt, err) + } + if v.RawEquals(mergeMappingVal) { + // In any context other than a mapping key, this is just a plain string + ty = cty.String + } else { + ty = v.Type() + } + } + + if len(anchor) > 0 { + an.completeAnchor(anchor, ty) + } + return ty, nil +} + +func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_MAP_TAG { + return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + atys := make(map[string]cty.Type) + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilType, parserError(p) + } + if nextEvt.typ == yaml_MAPPING_END_EVENT { + ty := cty.Object(atys) + if anchor != "" { + an.completeAnchor(anchor, ty) + } + return ty, nil + } + + if nextEvt.typ != yaml_SCALAR_EVENT { + return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) + if err != nil { + return cty.NilType, err + } + if keyVal.RawEquals(mergeMappingVal) { + // Merging the value (which must be a mapping) into our mapping, + // then. + ty, err := c.impliedTypeParse(an, p) + if err != nil { + return cty.NilType, err + } + if !ty.IsObjectType() { + return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) + } + for name, aty := range ty.AttributeTypes() { + atys[name] = aty + } + continue + } + if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { + keyVal = keyValStr + } else { + return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + if keyVal.IsNull() { + return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null") + } + if !keyVal.IsKnown() { + return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known") + } + valTy, err := c.impliedTypeParse(an, p) + if err != nil { + return cty.NilType, err + } + + atys[keyVal.AsString()] = valTy + } +} + +func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_SEQ_TAG { + return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + var atys []cty.Type + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilType, parserError(p) + } + if nextEvt.typ == yaml_SEQUENCE_END_EVENT { + ty := cty.Tuple(atys) + if anchor != "" { + an.completeAnchor(anchor, ty) + } + return ty, nil + } + + valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p) + if err != nil { + return cty.NilType, err + } + + atys = append(atys, valTy) + } +} + +func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + ty, err := an.anchorType(string(evt.anchor)) + if err != nil { + err = parseEventErrorWrap(evt, err) + } + return ty, err +} + +type typeAnalysis struct { + anchorsPending map[string]int + anchorTypes map[string]cty.Type +} + +func (an *typeAnalysis) beginAnchor(name string) { + an.anchorsPending[name]++ +} + +func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) { + an.anchorsPending[name]-- + if an.anchorsPending[name] == 0 { + delete(an.anchorsPending, name) + } + an.anchorTypes[name] = ty +} + +func (an *typeAnalysis) anchorType(name string) (cty.Type, error) { + if _, pending := an.anchorsPending[name]; pending { + // YAML normally allows self-referencing structures, but cty cannot + // represent them (it requires all structures to be finite) so we + // must fail here. + return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) + } + ty, ok := an.anchorTypes[name] + if !ok { + return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name) + } + return ty, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/parserc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/parserc.go new file mode 100644 index 00000000000..81d05dfe573 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/readerc.go new file mode 100644 index 00000000000..7c1f5fac3db --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/resolve.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/resolve.go new file mode 100644 index 00000000000..0f643834285 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/resolve.go @@ -0,0 +1,288 @@ +package yaml + +import ( + "encoding/base64" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/zclconf/go-cty/cty" +) + +type resolveMapItem struct { + value cty.Value + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v cty.Value + tag string + l []string + }{ + {cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) { + if !resolvableTag(tag) { + return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) + } + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if src != "" { + hint = resolveTable[src[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE { + return cty.StringVal(src), nil + } + + // Handle things we can lookup in a map. + if item, ok := resolveMap[src]; ok { + return item.value, nil + } + + if tag == "" { + for _, nan := range []string{".nan", ".NaN", ".NAN"} { + if src == nan { + // cty cannot represent NaN, so this is an error + return cty.NilVal, fmt.Errorf("floating point NaN is not supported") + } + } + } + + // Base 60 floats are intentionally not supported. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + if numberVal, err := cty.ParseNumberVal(src); err == nil { + return numberVal, nil + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(src) + if ok { + // cty has no timestamp type, but its functions stdlib + // conventionally uses strings in an RFC3339 encoding + // to represent time, so we'll follow that convention here. + return cty.StringVal(t.Format(time.RFC3339)), nil + } + } + + plain := strings.Replace(src, "_", "", -1) + if numberVal, err := cty.ParseNumberVal(plain); err == nil { + return numberVal, nil + } + if strings.HasPrefix(plain, "0b") || strings.HasPrefix(plain, "-0b") { + tag = yaml_INT_TAG // will handle parsing below in our tag switch + } + default: + panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src)) + } + } + + if tag == "" && src == "<<" { + return mergeMappingVal, nil + } + + switch tag { + case yaml_STR_TAG, yaml_BINARY_TAG: + // If it's binary then we want to keep the base64 representation, because + // cty has no binary type, but we will check that it's actually base64. + if tag == yaml_BINARY_TAG { + _, err := base64.StdEncoding.DecodeString(src) + if err != nil { + return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag) + } + } + return cty.StringVal(src), nil + case yaml_BOOL_TAG: + item, ok := resolveMap[src] + if !ok || item.tag != yaml_BOOL_TAG { + return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) + } + return item.value, nil + case yaml_FLOAT_TAG, yaml_INT_TAG: + // Note: We don't actually check that a value tagged INT is a whole + // number here. We could, but cty generally doesn't care about the + // int/float distinction, so we'll just be generous and accept it. + plain := strings.Replace(src, "_", "", -1) + if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats + return numberVal, nil + } + if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes + return cty.NumberIntVal(intv), nil + } + if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes + return cty.NumberUIntVal(uintv), nil + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + return cty.NumberIntVal(intv), nil + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return cty.NumberUIntVal(uintv), nil + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + return cty.NumberIntVal(intv), nil + } + } + return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) + case yaml_TIMESTAMP_TAG: + t, ok := parseTimestamp(src) + if ok { + // cty has no timestamp type, but its functions stdlib + // conventionally uses strings in an RFC3339 encoding + // to represent time, so we'll follow that convention here. + return cty.StringVal(t.Format(time.RFC3339)), nil + } + return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) + case yaml_NULL_TAG: + return cty.NullVal(cty.DynamicPseudoType), nil + case "": + return cty.StringVal(src), nil + default: + return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) + } +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} + +type mergeMapping struct{} + +var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{})) +var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{}) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/scannerc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/scannerc.go new file mode 100644 index 00000000000..077fd1dd2d4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/scannerc.go @@ -0,0 +1,2696 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/writerc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/writerc.go new file mode 100644 index 00000000000..a2dde608cb7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/yaml.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/yaml.go new file mode 100644 index 00000000000..2c314cc1646 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/yaml.go @@ -0,0 +1,215 @@ +// Package yaml can marshal and unmarshal cty values in YAML format. +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/zclconf/go-cty/cty" +) + +// Unmarshal reads the document found within the given source buffer +// and attempts to convert it into a value conforming to the given type +// constraint. +// +// This is an alias for Unmarshal on the predefined Converter in "Standard". +// +// An error is returned if the given source contains any YAML document +// delimiters. +func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { + return Standard.Unmarshal(src, ty) +} + +// Marshal serializes the given value into a YAML document, using a fixed +// mapping from cty types to YAML constructs. +// +// This is an alias for Marshal on the predefined Converter in "Standard". +// +// Note that unlike the function of the same name in the cty JSON package, +// this does not take a type constraint and therefore the YAML serialization +// cannot preserve late-bound type information in the serialization to be +// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type +// constraint given to Unmarshal will be decoded as if the corresponding portion +// of the input were processed with ImpliedType to find a target type. +func Marshal(v cty.Value) ([]byte, error) { + return Standard.Marshal(v) +} + +// ImpliedType analyzes the given source code and returns a suitable type that +// it could be decoded into. +// +// For a converter that is using standard YAML rather than cty-specific custom +// tags, only a subset of cty types can be produced: strings, numbers, bools, +// tuple types, and object types. +// +// This is an alias for ImpliedType on the predefined Converter in "Standard". +func ImpliedType(src []byte) (cty.Type, error) { + return Standard.ImpliedType(src) +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/yamlh.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/yamlh.go new file mode 100644 index 00000000000..e25cee563be --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/yamlh.go @@ -0,0 +1,738 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go new file mode 100644 index 00000000000..8110ce3c37a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/LICENSE b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/LICENSE new file mode 100644 index 00000000000..d6503b55522 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2018 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/capsule.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/capsule.go new file mode 100644 index 00000000000..d273d14833f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/capsule.go @@ -0,0 +1,89 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string { + return t.Name +} + +func (t *capsuleType) GoString() string { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value supports no operations except equality, and +// equality is implemented by pointer identity of the encapsulated pointer. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/collection.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/collection.go new file mode 100644 index 00000000000..ab3919b14b7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go new file mode 100644 index 00000000000..d84f6ac1049 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go new file mode 100644 index 00000000000..f9aacb4ee77 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -0,0 +1,143 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + return func(in cty.Value, path cty.Path) (cty.Value, error) { + if out == cty.DynamicPseudoType { + // Conversion to DynamicPseudoType always just passes through verbatim. + return in, nil + } + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsObjectType() && in.IsObjectType(): + return conversionObjectToObject(in, out, unsafe) + + case out.IsTupleType() && in.IsTupleType(): + return conversionTupleToTuple(in, out, unsafe) + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsSetType() && (in.IsListType() || in.IsSetType()): + if in.IsListType() && !unsafe { + // Conversion from list to map is unsafe because it will lose + // information: the ordering will not be preserved, and any + // duplicate elements will be conflated. + return nil + } + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if inEty.Equals(outEty) { + // This indicates that we're converting from set to list with + // the same element type, so we don't need an element converter. + return conversionCollectionToSet(outEty, nil) + } + + if convEty == nil { + return nil + } + return conversionCollectionToSet(outEty, convEty) + + case out.IsMapType() && in.IsMapType(): + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToMap(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsSetType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToSet(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 00000000000..3039ba22e54 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,340 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionCollectionToSet returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a set. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a set. (For example, +// if we're converting from a list into a set of the same element type.) +func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(elems), nil + } +} + +// conversionCollectionToMap returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a map. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a map. +func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, 0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + keyStr, err := Convert(key, cty.String) + if err != nil { + // Should never happen, because keys can only be numbers or + // strings and both can convert to string. + return cty.DynamicVal, path.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName()) + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elems[keyStr.AsString()] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(elems), nil + } +} + +// conversionTupleToSet returns a conversion that will take a value of the +// given tuple type and return a set of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.SetValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.SetVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + return cty.MapVal(elems), nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 00000000000..4d19cf6c5cb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go new file mode 100644 index 00000000000..62dabb8d1bb --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go @@ -0,0 +1,76 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionObjectToObject returns a conversion that will make the input +// object type conform to the output object type, if possible. +// +// Conversion is possible only if the output type is a subset of the input +// type, meaning that each attribute of the output type has a corresponding +// attribute in the input type where a recursive conversion is available. +// +// Shallow object conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit the above definition of "subset". +func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { + inAtys := in.AttributeTypes() + outAtys := out.AttributeTypes() + attrConvs := make(map[string]conversion) + + for name, outAty := range outAtys { + inAty, exists := inAtys[name] + if !exists { + // No conversion is available, then. + return nil + } + + if inAty.Equals(outAty) { + // No conversion needed, but we'll still record the attribute + // in our map for later reference. + attrConvs[name] = nil + continue + } + + attrConvs[name] = getConversion(inAty, outAty, unsafe) + if attrConvs[name] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the attribute + // conversions given in attrConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + attrVals := make(map[string]cty.Value, len(attrConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + for it := val.ElementIterator(); it.Next(); { + nameVal, val := it.Element() + var err error + + name := nameVal.AsString() + *pathStep = cty.GetAttrStep{ + Name: name, + } + + conv, exists := attrConvs[name] + if !exists { + continue + } + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + attrVals[name] = val + } + + return cty.ObjectVal(attrVals), nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 00000000000..e0dbf491e0f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,48 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + v, err := cty.ParseNumberVal(val.AsString()) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return v, nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + }, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go new file mode 100644 index 00000000000..592980a7012 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go @@ -0,0 +1,71 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionTupleToTuple returns a conversion that will make the input +// tuple type conform to the output tuple type, if possible. +// +// Conversion is possible only if the two tuple types have the same number +// of elements and the corresponding elements by index can be converted. +// +// Shallow tuple conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit which element type conversions are possible. +func conversionTupleToTuple(in, out cty.Type, unsafe bool) conversion { + inEtys := in.TupleElementTypes() + outEtys := out.TupleElementTypes() + + if len(inEtys) != len(outEtys) { + return nil // no conversion is possible + } + + elemConvs := make([]conversion, len(inEtys)) + + for i, outEty := range outEtys { + inEty := inEtys[i] + + if inEty.Equals(outEty) { + // No conversion needed, so we can leave this one nil. + continue + } + + elemConvs[i] = getConversion(inEty, outEty, unsafe) + if elemConvs[i] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the element + // conversions given in elemConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elemVals := make([]cty.Value, len(elemConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, val := it.Element() + var err error + + *pathStep = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elemVals[i] = val + } + + return cty.TupleVal(elemVals), nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/doc.go new file mode 100644 index 00000000000..2037299bab4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go new file mode 100644 index 00000000000..581304ecd5b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go @@ -0,0 +1,220 @@ +package convert + +import ( + "bytes" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +// MismatchMessage is a helper to return an English-language description of +// the differences between got and want, phrased as a reason why got does +// not conform to want. +// +// This function does not itself attempt conversion, and so it should generally +// be used only after a conversion has failed, to report the conversion failure +// to an English-speaking user. The result will be confusing got is actually +// conforming to or convertable to want. +// +// The shorthand helper function Convert uses this function internally to +// produce its error messages, so callers of that function do not need to +// also use MismatchMessage. +// +// This function is similar to Type.TestConformance, but it is tailored to +// describing conversion failures and so the messages it generates relate +// specifically to the conversion rules implemented in this package. +func MismatchMessage(got, want cty.Type) string { + switch { + + case got.IsObjectType() && want.IsObjectType(): + // If both types are object types then we may be able to say something + // about their respective attributes. + return mismatchMessageObjects(got, want) + + case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to list failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all list elements must have the same type" + + case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to set failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all set elements must have the same type" + + case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from object to map failed then it's because we couldn't + // find a common type to convert all of the object attributes to. + return "all map elements must have the same type" + + case (got.IsTupleType() || got.IsObjectType()) && want.IsCollectionType(): + return mismatchMessageCollectionsFromStructural(got, want) + + case got.IsCollectionType() && want.IsCollectionType(): + return mismatchMessageCollectionsFromCollections(got, want) + + default: + // If we have nothing better to say, we'll just state what was required. + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageObjects(got, want cty.Type) string { + // Per our conversion rules, "got" is allowed to be a superset of "want", + // and so we'll produce error messages here under that assumption. + gotAtys := got.AttributeTypes() + wantAtys := want.AttributeTypes() + + // If we find missing attributes then we'll report those in preference, + // but if not then we will report a maximum of one non-conforming + // attribute, just to keep our messages relatively terse. + // We'll also prefer to report a recursive type error from an _unsafe_ + // conversion over a safe one, because these are subjectively more + // "serious". + var missingAttrs []string + var unsafeMismatchAttr string + var safeMismatchAttr string + + for name, wantAty := range wantAtys { + gotAty, exists := gotAtys[name] + if !exists { + missingAttrs = append(missingAttrs, name) + continue + } + + // We'll now try to convert these attributes in isolation and + // see if we have a nested conversion error to report. + // We'll try an unsafe conversion first, and then fall back on + // safe if unsafe is possible. + + // If we already have an unsafe mismatch attr error then we won't bother + // hunting for another one. + if unsafeMismatchAttr != "" { + continue + } + if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil { + unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + + // If we already have a safe mismatch attr error then we won't bother + // hunting for another one. + if safeMismatchAttr != "" { + continue + } + if conv := GetConversion(gotAty, wantAty); conv == nil { + safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + } + + // We should now have collected at least one problem. If we have more than + // one then we'll use our preference order to decide what is most important + // to report. + switch { + + case len(missingAttrs) != 0: + sort.Strings(missingAttrs) + switch len(missingAttrs) { + case 1: + return fmt.Sprintf("attribute %q is required", missingAttrs[0]) + case 2: + return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1]) + default: + sort.Strings(missingAttrs) + var buf bytes.Buffer + for _, name := range missingAttrs[:len(missingAttrs)-1] { + fmt.Fprintf(&buf, "%q, ", name) + } + fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1]) + return fmt.Sprintf("attributes %s are required", buf.Bytes()) + } + + case unsafeMismatchAttr != "": + return unsafeMismatchAttr + + case safeMismatchAttr != "": + return safeMismatchAttr + + default: + // We should never get here, but if we do then we'll return + // just a generic message. + return "incorrect object attributes" + } +} + +func mismatchMessageCollectionsFromStructural(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsObjectType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll move on to checking + // individual elements. + wantEty := want.ElementType() + switch { + case got.IsTupleType(): + for i, gotEty := range got.TupleElementTypes() { + if gotEty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotEty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %d: %s", i, MismatchMessage(gotEty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + case got.IsObjectType(): + for name, gotAty := range got.AttributeTypes() { + if gotAty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotAty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %q: %s", name, MismatchMessage(gotAty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + default: + // Should not be possible to get here since we only call this function + // with got as structural types, but... + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageCollectionsFromCollections(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsMapType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll check the element types. + gotEty := got.ElementType() + wantEty := want.ElementType() + noun := "element type" + switch { + case want.IsListType(): + noun = "list element type" + case want.IsSetType(): + noun = "set element type" + case want.IsMapType(): + noun = "map element type" + } + return fmt.Sprintf("incorrect %s: %s", noun, MismatchMessage(gotEty, wantEty)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/public.go new file mode 100644 index 00000000000..af19bdc501c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "errors" + + "github.com/zclconf/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, errors.New(MismatchMessage(in.Type(), want)) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go new file mode 100644 index 00000000000..b7769106d11 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/unify.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/unify.go new file mode 100644 index 00000000000..53ebbfe08a1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/convert/unify.go @@ -0,0 +1,314 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + // If all of the given types are of the same structural kind, we may be + // able to construct a new type that they can all be unified to, even if + // that is not one of the given types. We must try this before the general + // behavior below because in unsafe mode we can convert an object type to + // a subset of that type, which would be a much less useful conversion for + // unification purposes. + { + objectCt := 0 + tupleCt := 0 + dynamicCt := 0 + for _, ty := range types { + switch { + case ty.IsObjectType(): + objectCt++ + case ty.IsTupleType(): + tupleCt++ + case ty == cty.DynamicPseudoType: + dynamicCt++ + default: + break + } + } + switch { + case objectCt > 0 && (objectCt+dynamicCt) == len(types): + return unifyObjectTypes(types, unsafe, dynamicCt > 0) + case tupleCt > 0 && (tupleCt+dynamicCt) == len(types): + return unifyTupleTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && tupleCt > 0: + // Can never unify object and tuple types since they have incompatible kinds + return cty.NilType, nil + } + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // If we fall out here, no unification is possible + return cty.NilType, nil +} + +func unifyObjectTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given object types have the same set of attribute names + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given object types have different attribute names or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the attribute types together to produce a map type. + // + // Our unification behavior is intentionally stricter than our conversion + // behavior for subset object types because user intent is different with + // unification use-cases: it makes sense to allow {"foo":true} to convert + // to emptyobjectval, but unifying an object with an attribute with the + // empty object type should be an error because unifying to the empty + // object type would be suprising and useless. + + firstAttrs := types[0].AttributeTypes() + for _, ty := range types[1:] { + thisAttrs := ty.AttributeTypes() + if len(thisAttrs) != len(firstAttrs) { + // If number of attributes is different then there can be no + // object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + for name := range thisAttrs { + if _, ok := firstAttrs[name]; !ok { + // If attribute names don't exactly match then there can be + // no object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + } + } + + // If we get here then we've proven that all of the given object types + // have exactly the same set of attribute names, though the types may + // differ. + retAtys := make(map[string]cty.Type) + atysAcross := make([]cty.Type, len(types)) + for name := range firstAttrs { + for i, ty := range types { + atysAcross[i] = ty.AttributeType(name) + } + retAtys[name], _ = unify(atysAcross, unsafe) + if retAtys[name] == cty.NilType { + // Cannot unify this attribute alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Object(retAtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyObjectTypesToMap(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyObjectTypes, where we see if we can + // construct a map type that can accept all of the attribute types. + + var atys []cty.Type + for _, ty := range types { + for _, aty := range ty.AttributeTypes() { + atys = append(atys, aty) + } + } + + ety, _ := unify(atys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + return cty.NilType, nil + } + } + return retTy, conversions +} + +func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given tuple types have the same sequence of element types + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given tuple types have different element types or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the elements types together to produce a list type. + + firstEtys := types[0].TupleElementTypes() + for _, ty := range types[1:] { + thisEtys := ty.TupleElementTypes() + if len(thisEtys) != len(firstEtys) { + // If number of elements is different then there can be no + // tuple type in common. + return unifyTupleTypesToList(types, unsafe) + } + } + + // If we get here then we've proven that all of the given tuple types + // have the same number of elements, though the types may differ. + retEtys := make([]cty.Type, len(firstEtys)) + atysAcross := make([]cty.Type, len(types)) + for idx := range firstEtys { + for tyI, ty := range types { + atysAcross[tyI] = ty.TupleElementTypes()[idx] + } + retEtys[idx], _ = unify(atysAcross, unsafe) + if retEtys[idx] == cty.NilType { + // Cannot unify this element alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Tuple(retEtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyTupleTypesToList(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyTupleTypes, where we see if we can + // construct a list type that can accept all of the element types. + + var etys []cty.Type + for _, ty := range types { + for _, ety := range ty.TupleElementTypes() { + etys = append(etys, ety) + } + } + + ety, _ := unify(etys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.List(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + return retTy, conversions +} + +func unifyAllAsDynamic(types []cty.Type) (cty.Type, []Conversion) { + conversions := make([]Conversion, len(types)) + for i := range conversions { + conversions[i] = func(cty.Value) (cty.Value, error) { + return cty.DynamicVal, nil + } + } + return cty.DynamicPseudoType, conversions +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/doc.go new file mode 100644 index 00000000000..d31f0547bf4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/element_iterator.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/element_iterator.go new file mode 100644 index 00000000000..0bf84c774a9 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/element_iterator.go @@ -0,0 +1,191 @@ +package cty + +import ( + "sort" + + "github.com/zclconf/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/error.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/error.go new file mode 100644 index 00000000000..dd139f72499 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/argument.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/argument.go new file mode 100644 index 00000000000..bfd30157e68 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/argument.go @@ -0,0 +1,50 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Parameter represents a parameter to a function. +type Parameter struct { + // Name is an optional name for the argument. This package ignores this + // value, but callers may use it for documentation, etc. + Name string + + // A type that any argument for this parameter must conform to. + // cty.DynamicPseudoType can be used, either at top-level or nested + // in a parameterized type, to indicate that any type should be + // permitted, to allow the definition of type-generic functions. + Type cty.Type + + // If AllowNull is set then null values may be passed into this + // argument's slot in both the type-check function and the implementation + // function. If not set, such values are rejected by the built-in + // checking rules. + AllowNull bool + + // If AllowUnknown is set then unknown values may be passed into this + // argument's slot in the implementation function. If not set, any + // unknown values will cause the function to immediately return + // an unkonwn value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + AllowUnknown bool + + // If AllowDynamicType is set then DynamicVal may be passed into this + // argument's slot in the implementation function. If not set, any + // dynamic values will cause the function to immediately return + // DynamicVal value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + // + // Note that DynamicVal is also unknown, so in order to receive dynamic + // *values* it is also necessary to set AllowUnknown. + // + // However, it is valid to set AllowDynamicType without AllowUnknown, in + // which case a dynamic value may be passed to the type checking function + // but will not make it to the *implementation* function. Instead, an + // unknown value of the type returned by the type-check function will be + // returned. This is suggested for functions that have a static return + // type since it allows the return value to be typed even if the input + // values are not, thus improving the type-check accuracy of derived + // values. + AllowDynamicType bool +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/doc.go new file mode 100644 index 00000000000..393b3110bc5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/doc.go @@ -0,0 +1,6 @@ +// Package function builds on the functionality of cty by modeling functions +// that operate on cty Values. +// +// Functions are, at their core, Go anonymous functions. However, this package +// wraps around them utility functions for parameter type checking, etc. +package function diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/error.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/error.go new file mode 100644 index 00000000000..2b56779986c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/error.go @@ -0,0 +1,50 @@ +package function + +import ( + "fmt" + "runtime/debug" +) + +// ArgError represents an error with one of the arguments in a call. The +// attribute Index represents the zero-based index of the argument in question. +// +// Its error *may* be a cty.PathError, in which case the error actually +// pertains to a nested value within the data structure passed as the argument. +type ArgError struct { + error + Index int +} + +func NewArgErrorf(i int, f string, args ...interface{}) error { + return ArgError{ + error: fmt.Errorf(f, args...), + Index: i, + } +} + +func NewArgError(i int, err error) error { + return ArgError{ + error: err, + Index: i, + } +} + +// PanicError indicates that a panic occurred while executing either a +// function's type or implementation function. This is captured and wrapped +// into a normal error so that callers (expected to be language runtimes) +// are freed from having to deal with panics in buggy functions. +type PanicError struct { + Value interface{} + Stack []byte +} + +func errorForPanic(val interface{}) error { + return PanicError{ + Value: val, + Stack: debug.Stack(), + } +} + +func (e PanicError) Error() string { + return fmt.Sprintf("panic in function implementation: %s\n%s", e.Value, e.Stack) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/function.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/function.go new file mode 100644 index 00000000000..9e8bf3376a8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -0,0 +1,291 @@ +package function + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// Function represents a function. This is the main type in this package. +type Function struct { + spec *Spec +} + +// Spec is the specification of a function, used to instantiate +// a new Function. +type Spec struct { + // Params is a description of the positional parameters for the function. + // The standard checking logic rejects any calls that do not provide + // arguments conforming to this definition, freeing the function + // implementer from dealing with such inconsistencies. + Params []Parameter + + // VarParam is an optional specification of additional "varargs" the + // function accepts. If this is non-nil then callers may provide an + // arbitrary number of additional arguments (after those matching with + // the fixed parameters in Params) that conform to the given specification, + // which will appear as additional values in the slices of values + // provided to the type and implementation functions. + VarParam *Parameter + + // Type is the TypeFunc that decides the return type of the function + // given its arguments, which may be Unknown. See the documentation + // of TypeFunc for more information. + // + // Use StaticReturnType if the function's return type does not vary + // depending on its arguments. + Type TypeFunc + + // Impl is the ImplFunc that implements the function's behavior. + // + // Functions are expected to behave as pure functions, and not create + // any visible side-effects. + // + // If a TypeFunc is also provided, the value returned from Impl *must* + // conform to the type it returns, or a call to the function will panic. + Impl ImplFunc +} + +// New creates a new function with the given specification. +// +// After passing a Spec to this function, the caller must no longer read from +// or mutate it. +func New(spec *Spec) Function { + f := Function{ + spec: spec, + } + return f +} + +// TypeFunc is a callback type for determining the return type of a function +// given its arguments. +// +// Any of the values passed to this function may be unknown, even if the +// parameters are not configured to accept unknowns. +// +// If any of the given values are *not* unknown, the TypeFunc may use the +// values for pre-validation and for choosing the return type. For example, +// a hypothetical JSON-unmarshalling function could return +// cty.DynamicPseudoType if the given JSON string is unknown, but return +// a concrete type based on the JSON structure if the JSON string is already +// known. +type TypeFunc func(args []cty.Value) (cty.Type, error) + +// ImplFunc is a callback type for the main implementation of a function. +// +// "args" are the values for the arguments, and this slice will always be at +// least as long as the argument definition slice for the function. +// +// "retType" is the type returned from the Type callback, included as a +// convenience to avoid the need to re-compute the return type for generic +// functions whose return type is a function of the arguments. +type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error) + +// StaticReturnType returns a TypeFunc that always returns the given type. +// +// This is provided as a convenience for defining a function whose return +// type does not depend on the argument types. +func StaticReturnType(ty cty.Type) TypeFunc { + return func([]cty.Value) (cty.Type, error) { + return ty, nil + } +} + +// ReturnType returns the return type of a function given a set of candidate +// argument types, or returns an error if the given types are unacceptable. +// +// If the caller already knows values for at least some of the arguments +// it can be better to call ReturnTypeForValues, since certain functions may +// determine their return types from their values and return DynamicVal if +// the values are unknown. +func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { + vals := make([]cty.Value, len(argTypes)) + for i, ty := range argTypes { + vals[i] = cty.UnknownVal(ty) + } + return f.ReturnTypeForValues(vals) +} + +// ReturnTypeForValues is similar to ReturnType but can be used if the caller +// already knows the values of some or all of the arguments, in which case +// the function may be able to determine a more definite result if its +// return type depends on the argument *values*. +// +// For any arguments whose values are not known, pass an Unknown value of +// the appropriate type. +func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { + var posArgs []cty.Value + var varArgs []cty.Value + + if f.spec.VarParam == nil { + if len(args) != len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (%d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args + varArgs = nil + } else { + if len(args) < len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (at least %d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args[0:len(f.spec.Params)] + varArgs = args[len(f.spec.Params):] + } + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(i, "argument must not be null") + } + + // AllowUnknown is ignored for type-checking, since we expect to be + // able to type check with unknown values. We *do* still need to deal + // with DynamicPseudoType here though, since the Type function might + // not be ready to deal with that. + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + + if varArgs != nil { + spec := f.spec.VarParam + for i, val := range varArgs { + realI := i + len(posArgs) + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(realI, "argument must not be null") + } + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + } + + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + ty = cty.NilType + err = errorForPanic(r) + } + }() + + return f.spec.Type(args) +} + +// Call actually calls the function with the given arguments, which must +// conform to the function's parameter specification or an error will be +// returned. +func (f Function) Call(args []cty.Value) (val cty.Value, err error) { + expectedType, err := f.ReturnTypeForValues(args) + if err != nil { + return cty.NilVal, err + } + + // Type checking already dealt with most situations relating to our + // parameter specification, but we still need to deal with unknown + // values. + posArgs := args[:len(f.spec.Params)] + varArgs := args[len(f.spec.Params):] + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + + if f.spec.VarParam != nil { + spec := f.spec.VarParam + for _, val := range varArgs { + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + } + + var retVal cty.Value + { + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + val = cty.NilVal + err = errorForPanic(r) + } + }() + + retVal, err = f.spec.Impl(args, expectedType) + if err != nil { + return cty.NilVal, err + } + } + + // Returned value must conform to what the Type function expected, to + // protect callers from having to deal with inconsistencies. + if errs := retVal.Type().TestConformance(expectedType); errs != nil { + panic(fmt.Errorf( + "returned value %#v does not conform to expected return type %#v: %s", + retVal, expectedType, errs[0], + )) + } + + return retVal, nil +} + +// ProxyFunc the type returned by the method Function.Proxy. +type ProxyFunc func(args ...cty.Value) (cty.Value, error) + +// Proxy returns a function that can be called with cty.Value arguments +// to run the function. This is provided as a convenience for when using +// a function directly within Go code. +func (f Function) Proxy() ProxyFunc { + return func(args ...cty.Value) (cty.Value, error) { + return f.Call(args) + } +} + +// Params returns information about the function's fixed positional parameters. +// This does not include information about any variadic arguments accepted; +// for that, call VarParam. +func (f Function) Params() []Parameter { + new := make([]Parameter, len(f.spec.Params)) + copy(new, f.spec.Params) + return new +} + +// VarParam returns information about the variadic arguments the function +// expects, or nil if the function is not variadic. +func (f Function) VarParam() *Parameter { + if f.spec.VarParam == nil { + return nil + } + + ret := *f.spec.VarParam + return &ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go new file mode 100644 index 00000000000..a473d0ec3fa --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go @@ -0,0 +1,73 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var NotFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Not(), nil + }, +}) + +var AndFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].And(args[1]), nil + }, +}) + +var OrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Or(args[1]), nil + }, +}) + +// Not returns the logical complement of the given boolean value. +func Not(num cty.Value) (cty.Value, error) { + return NotFunc.Call([]cty.Value{num}) +} + +// And returns true if and only if both of the given boolean values are true. +func And(a, b cty.Value) (cty.Value, error) { + return AndFunc.Call([]cty.Value{a, b}) +} + +// Or returns true if either of the given boolean values are true. +func Or(a, b cty.Value) (cty.Value, error) { + return OrFunc.Call([]cty.Value{a, b}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go new file mode 100644 index 00000000000..a132e0cde54 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go @@ -0,0 +1,112 @@ +package stdlib + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// Bytes is a capsule type that can be used with the binary functions to +// support applications that need to support raw buffers in addition to +// UTF-8 strings. +var Bytes = cty.Capsule("bytes", reflect.TypeOf([]byte(nil))) + +// BytesVal creates a new Bytes value from the given buffer, which must be +// non-nil or this function will panic. +// +// Once a byte slice has been wrapped in a Bytes capsule, its underlying array +// must be considered immutable. +func BytesVal(buf []byte) cty.Value { + if buf == nil { + panic("can't make Bytes value from nil slice") + } + + return cty.CapsuleVal(Bytes, &buf) +} + +// BytesLen is a Function that returns the length of the buffer encapsulated +// in a Bytes value. +var BytesLenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + return cty.NumberIntVal(int64(len(*bufPtr))), nil + }, +}) + +// BytesSlice is a Function that returns a slice of the given Bytes value. +var BytesSliceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(Bytes), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 || length < 0 { + return cty.NilVal, fmt.Errorf("offset and length must be non-negative") + } + + if offset > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d is greater than total buffer length %d", + offset, len(*bufPtr), + ) + } + + end := offset + length + + if end > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d + length %d is greater than total buffer length %d", + offset, length, len(*bufPtr), + ) + } + + return BytesVal((*bufPtr)[offset:end]), nil + }, +}) + +func BytesLen(buf cty.Value) (cty.Value, error) { + return BytesLenFunc.Call([]cty.Value{buf}) +} + +func BytesSlice(buf cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return BytesSliceFunc.Call([]cty.Value{buf, offset, length}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go new file mode 100644 index 00000000000..967ba03c8b2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go @@ -0,0 +1,140 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +var HasIndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Bool, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].HasIndex(args[1]), nil + }, +}) + +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + key := args[1] + keyTy := key.Type() + switch { + case collTy.IsTupleType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for tuple must be number") + } + if !key.IsKnown() { + return cty.DynamicPseudoType, nil + } + var idx int + err := gocty.FromCtyValue(key, &idx) + if err != nil { + return cty.NilType, fmt.Errorf("invalid key for tuple: %s", err) + } + + etys := collTy.TupleElementTypes() + + if idx >= len(etys) || idx < 0 { + return cty.NilType, fmt.Errorf("key must be between 0 and %d inclusive", len(etys)) + } + + return etys[idx], nil + + case collTy.IsListType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for list must be number") + } + + return collTy.ElementType(), nil + + case collTy.IsMapType(): + if keyTy != cty.String && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for map must be string") + } + + return collTy.ElementType(), nil + + default: + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + has, err := HasIndex(args[0], args[1]) + if err != nil { + return cty.NilVal, err + } + if has.False() { // safe because collection and key are guaranteed known here + return cty.NilVal, fmt.Errorf("invalid index") + } + + return args[0].Index(args[1]), nil + }, +}) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Number, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Length(), nil + }, +}) + +// HasIndex determines whether the given collection can be indexed with the +// given key. +func HasIndex(collection cty.Value, key cty.Value) (cty.Value, error) { + return HasIndexFunc.Call([]cty.Value{collection, key}) +} + +// Index returns an element from the given collection using the given key, +// or returns an error if there is no element for the given key. +func Index(collection cty.Value, key cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{collection, key}) +} + +// Length returns the number of elements in the given collection. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go new file mode 100644 index 00000000000..5070a5adf57 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go @@ -0,0 +1,93 @@ +package stdlib + +import ( + "encoding/csv" + "fmt" + "io" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var CSVDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + str := args[0] + if !str.IsKnown() { + return cty.DynamicPseudoType, nil + } + + r := strings.NewReader(str.AsString()) + cr := csv.NewReader(r) + headers, err := cr.Read() + if err == io.EOF { + return cty.DynamicPseudoType, fmt.Errorf("missing header line") + } + if err != nil { + return cty.DynamicPseudoType, err + } + + atys := make(map[string]cty.Type, len(headers)) + for _, name := range headers { + if _, exists := atys[name]; exists { + return cty.DynamicPseudoType, fmt.Errorf("duplicate column name %q", name) + } + atys[name] = cty.String + } + return cty.List(cty.Object(atys)), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ety := retType.ElementType() + atys := ety.AttributeTypes() + str := args[0] + r := strings.NewReader(str.AsString()) + cr := csv.NewReader(r) + cr.FieldsPerRecord = len(atys) + + // Read the header row first, since that'll tell us which indices + // map to which attribute names. + headers, err := cr.Read() + if err != nil { + return cty.DynamicVal, err + } + + var rows []cty.Value + for { + cols, err := cr.Read() + if err == io.EOF { + break + } + if err != nil { + return cty.DynamicVal, err + } + + vals := make(map[string]cty.Value, len(cols)) + for i, str := range cols { + name := headers[i] + vals[name] = cty.StringVal(str) + } + rows = append(rows, cty.ObjectVal(vals)) + } + + if len(rows) == 0 { + return cty.ListValEmpty(ety), nil + } + return cty.ListVal(rows), nil + }, +}) + +// CSVDecode parses the given CSV (RFC 4180) string and, if it is valid, +// returns a list of objects representing the rows. +// +// The result is always a list of some object type. The first row of the +// input is used to determine the object attributes, and subsequent rows +// determine the values of those attributes. +func CSVDecode(str cty.Value) (cty.Value, error) { + return CSVDecodeFunc.Call([]cty.Value{str}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go new file mode 100644 index 00000000000..aa15b7bde96 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go @@ -0,0 +1,385 @@ +package stdlib + +import ( + "bufio" + "bytes" + "fmt" + "strings" + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var FormatDateFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + { + Name: "time", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + formatStr := args[0].AsString() + timeStr := args[1].AsString() + t, err := parseTimestamp(timeStr) + if err != nil { + return cty.DynamicVal, function.NewArgError(1, err) + } + + var buf bytes.Buffer + sc := bufio.NewScanner(strings.NewReader(formatStr)) + sc.Split(splitDateFormat) + const esc = '\'' + for sc.Scan() { + tok := sc.Bytes() + + // The leading byte signals the token type + switch { + case tok[0] == esc: + if tok[len(tok)-1] != esc || len(tok) == 1 { + return cty.DynamicVal, function.NewArgErrorf(0, "unterminated literal '") + } + if len(tok) == 2 { + // Must be a single escaped quote, '' + buf.WriteByte(esc) + } else { + // The content (until a closing esc) is printed out verbatim + // except that we must un-double any double-esc escapes in + // the middle of the string. + raw := tok[1 : len(tok)-1] + for i := 0; i < len(raw); i++ { + buf.WriteByte(raw[i]) + if raw[i] == esc { + i++ // skip the escaped quote + } + } + } + + case startsDateFormatVerb(tok[0]): + switch tok[0] { + case 'Y': + y := t.Year() + switch len(tok) { + case 2: + fmt.Fprintf(&buf, "%02d", y%100) + case 4: + fmt.Fprintf(&buf, "%04d", y) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: year must either be \"YY\" or \"YYYY\"", tok) + } + case 'M': + m := t.Month() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", m) + case 2: + fmt.Fprintf(&buf, "%02d", m) + case 3: + buf.WriteString(m.String()[:3]) + case 4: + buf.WriteString(m.String()) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: month must be \"M\", \"MM\", \"MMM\", or \"MMMM\"", tok) + } + case 'D': + d := t.Day() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", d) + case 2: + fmt.Fprintf(&buf, "%02d", d) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of month must either be \"D\" or \"DD\"", tok) + } + case 'E': + d := t.Weekday() + switch len(tok) { + case 3: + buf.WriteString(d.String()[:3]) + case 4: + buf.WriteString(d.String()) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of week must either be \"EEE\" or \"EEEE\"", tok) + } + case 'h': + h := t.Hour() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", h) + case 2: + fmt.Fprintf(&buf, "%02d", h) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 24-hour must either be \"h\" or \"hh\"", tok) + } + case 'H': + h := t.Hour() % 12 + if h == 0 { + h = 12 + } + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", h) + case 2: + fmt.Fprintf(&buf, "%02d", h) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 12-hour must either be \"H\" or \"HH\"", tok) + } + case 'A', 'a': + if len(tok) != 2 { + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: must be \"%s%s\"", tok, tok[0:1], tok[0:1]) + } + upper := tok[0] == 'A' + switch t.Hour() / 12 { + case 0: + if upper { + buf.WriteString("AM") + } else { + buf.WriteString("am") + } + case 1: + if upper { + buf.WriteString("PM") + } else { + buf.WriteString("pm") + } + } + case 'm': + m := t.Minute() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", m) + case 2: + fmt.Fprintf(&buf, "%02d", m) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: minute must either be \"m\" or \"mm\"", tok) + } + case 's': + s := t.Second() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", s) + case 2: + fmt.Fprintf(&buf, "%02d", s) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: second must either be \"s\" or \"ss\"", tok) + } + case 'Z': + // We'll just lean on Go's own formatter for this one, since + // the necessary information is unexported. + switch len(tok) { + case 1: + buf.WriteString(t.Format("Z07:00")) + case 3: + str := t.Format("-0700") + switch str { + case "+0000": + buf.WriteString("UTC") + default: + buf.WriteString(str) + } + case 4: + buf.WriteString(t.Format("-0700")) + case 5: + buf.WriteString(t.Format("-07:00")) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: timezone must be Z, ZZZZ, or ZZZZZ", tok) + } + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q", tok) + } + + default: + // Any other starting character indicates a literal sequence + buf.Write(tok) + } + } + + return cty.StringVal(buf.String()), nil + }, +}) + +// FormatDate reformats a timestamp given in RFC3339 syntax into another time +// syntax defined by a given format string. +// +// The format string uses letter mnemonics to represent portions of the +// timestamp, with repetition signifying length variants of each portion. +// Single quote characters ' can be used to quote sequences of literal letters +// that should not be interpreted as formatting mnemonics. +// +// The full set of supported mnemonic sequences is listed below: +// +// YY Year modulo 100 zero-padded to two digits, like "06". +// YYYY Four (or more) digit year, like "2006". +// M Month number, like "1" for January. +// MM Month number zero-padded to two digits, like "01". +// MMM English month name abbreviated to three letters, like "Jan". +// MMMM English month name unabbreviated, like "January". +// D Day of month number, like "2". +// DD Day of month number zero-padded to two digits, like "02". +// EEE English day of week name abbreviated to three letters, like "Mon". +// EEEE English day of week name unabbreviated, like "Monday". +// h 24-hour number, like "2". +// hh 24-hour number zero-padded to two digits, like "02". +// H 12-hour number, like "2". +// HH 12-hour number zero-padded to two digits, like "02". +// AA Hour AM/PM marker in uppercase, like "AM". +// aa Hour AM/PM marker in lowercase, like "am". +// m Minute within hour, like "5". +// mm Minute within hour zero-padded to two digits, like "05". +// s Second within minute, like "9". +// ss Second within minute zero-padded to two digits, like "09". +// ZZZZ Timezone offset with just sign and digit, like "-0800". +// ZZZZZ Timezone offset with colon separating hours and minutes, like "-08:00". +// Z Like ZZZZZ but with a special case "Z" for UTC. +// ZZZ Like ZZZZ but with a special case "UTC" for UTC. +// +// The format syntax is optimized mainly for generating machine-oriented +// timestamps rather than human-oriented timestamps; the English language +// portions of the output reflect the use of English names in a number of +// machine-readable date formatting standards. For presentation to humans, +// a locale-aware time formatter (not included in this package) is a better +// choice. +// +// The format syntax is not compatible with that of any other language, but +// is optimized so that patterns for common standard date formats can be +// recognized quickly even by a reader unfamiliar with the format syntax. +func FormatDate(format cty.Value, timestamp cty.Value) (cty.Value, error) { + return FormatDateFunc.Call([]cty.Value{format, timestamp}) +} + +func parseTimestamp(ts string) (time.Time, error) { + t, err := time.Parse(time.RFC3339, ts) + if err != nil { + switch err := err.(type) { + case *time.ParseError: + // If err is s time.ParseError then its string representation is not + // appropriate since it relies on details of Go's strange date format + // representation, which a caller of our functions is not expected + // to be familiar with. + // + // Therefore we do some light transformation to get a more suitable + // error that should make more sense to our callers. These are + // still not awesome error messages, but at least they refer to + // the timestamp portions by name rather than by Go's example + // values. + if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" { + // For some reason err.Message is populated with a ": " prefix + // by the time package. + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message) + } + var what string + switch err.LayoutElem { + case "2006": + what = "year" + case "01": + what = "month" + case "02": + what = "day of month" + case "15": + what = "hour" + case "04": + what = "minute" + case "05": + what = "second" + case "Z07:00": + what = "UTC offset" + case "T": + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'") + case ":", "-": + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem) + } + default: + // Should never get here, because time.RFC3339 includes only the + // above portions, but since that might change in future we'll + // be robust here. + what = "timestamp segment" + } + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what) + } + } + return time.Time{}, err + } + return t, nil +} + +// splitDataFormat is a bufio.SplitFunc used to tokenize a date format. +func splitDateFormat(data []byte, atEOF bool) (advance int, token []byte, err error) { + if len(data) == 0 { + return 0, nil, nil + } + + const esc = '\'' + + switch { + + case data[0] == esc: + // If we have another quote immediately after then this is a single + // escaped escape. + if len(data) > 1 && data[1] == esc { + return 2, data[:2], nil + } + + // Beginning of quoted sequence, so we will seek forward until we find + // the closing quote, ignoring escaped quotes along the way. + for i := 1; i < len(data); i++ { + if data[i] == esc { + if (i + 1) == len(data) { + // We need at least one more byte to decide if this is an + // escape or a terminator. + return 0, nil, nil + } + if data[i+1] == esc { + i++ // doubled-up quotes are an escape sequence + continue + } + // We've found the closing quote + return i + 1, data[:i+1], nil + } + } + // If we fall out here then we need more bytes to find the end, + // unless we're already at the end with an unclosed quote. + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + + case startsDateFormatVerb(data[0]): + rep := data[0] + for i := 1; i < len(data); i++ { + if data[i] != rep { + return i, data[:i], nil + } + } + if atEOF { + return len(data), data, nil + } + // We need more data to decide if we've found the end + return 0, nil, nil + + default: + for i := 1; i < len(data); i++ { + if data[i] == esc || startsDateFormatVerb(data[i]) { + return i, data[:i], nil + } + } + // We might not actually be at the end of a literal sequence, + // but that doesn't matter since we'll concat them back together + // anyway. + return len(data), data, nil + } +} + +func startsDateFormatVerb(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go new file mode 100644 index 00000000000..cfb613e5a5d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go @@ -0,0 +1,13 @@ +// Package stdlib is a collection of cty functions that are expected to be +// generally useful, and are thus factored out into this shared library in +// the hope that cty-using applications will have consistent behavior when +// using these functions. +// +// See the parent package "function" for more information on the purpose +// and usage of cty functions. +// +// This package contains both Go functions, which provide convenient access +// to call the functions from Go code, and the Function objects themselves. +// The latter follow the naming scheme of appending "Func" to the end of +// the function name. +package stdlib diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go new file mode 100644 index 00000000000..664790b46b4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go @@ -0,0 +1,501 @@ +package stdlib + +import ( + "bytes" + "fmt" + "math/big" + "strings" + + "github.com/apparentlymart/go-textseg/textseg" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/json" +) + +//go:generate ragel -Z format_fsm.rl +//go:generate gofmt -w format_fsm.go + +var FormatFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + for _, arg := range args[1:] { + if !arg.IsWhollyKnown() { + // We require all nested values to be known because the only + // thing we can do for a collection/structural type is print + // it as JSON and that requires it to be wholly known. + return cty.UnknownVal(cty.String), nil + } + } + str, err := formatFSM(args[0].AsString(), args[1:]) + return cty.StringVal(str), err + }, +}) + +var FormatListFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowUnknown: true, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + fmtVal := args[0] + args = args[1:] + + if len(args) == 0 { + // With no arguments, this function is equivalent to Format, but + // returning a single-element list result. + result, err := Format(fmtVal, args...) + return cty.ListVal([]cty.Value{result}), err + } + + fmtStr := fmtVal.AsString() + + // Each of our arguments will be dealt with either as an iterator + // or as a single value. Iterators are used for sequence-type values + // (lists, sets, tuples) while everything else is treated as a + // single value. The sequences we iterate over are required to be + // all the same length. + iterLen := -1 + lenChooser := -1 + iterators := make([]cty.ElementIterator, len(args)) + singleVals := make([]cty.Value, len(args)) + for i, arg := range args { + argTy := arg.Type() + switch { + case (argTy.IsListType() || argTy.IsSetType() || argTy.IsTupleType()) && !arg.IsNull(): + if !argTy.IsTupleType() && !arg.IsKnown() { + // We can't iterate this one at all yet then, so we can't + // yet produce a result. + return cty.UnknownVal(retType), nil + } + thisLen := arg.LengthInt() + if iterLen == -1 { + iterLen = thisLen + lenChooser = i + } else { + if thisLen != iterLen { + return cty.NullVal(cty.List(cty.String)), function.NewArgErrorf( + i+1, + "argument %d has length %d, which is inconsistent with argument %d of length %d", + i+1, thisLen, + lenChooser+1, iterLen, + ) + } + } + iterators[i] = arg.ElementIterator() + default: + singleVals[i] = arg + } + } + + if iterLen == 0 { + // If our sequences are all empty then our result must be empty. + return cty.ListValEmpty(cty.String), nil + } + + if iterLen == -1 { + // If we didn't encounter any iterables at all then we're going + // to just do one iteration with items from singleVals. + iterLen = 1 + } + + ret := make([]cty.Value, 0, iterLen) + fmtArgs := make([]cty.Value, len(iterators)) + Results: + for iterIdx := 0; iterIdx < iterLen; iterIdx++ { + + // Construct our arguments for a single format call + for i := range fmtArgs { + switch { + case iterators[i] != nil: + iterator := iterators[i] + iterator.Next() + _, val := iterator.Element() + fmtArgs[i] = val + default: + fmtArgs[i] = singleVals[i] + } + + // If any of the arguments to this call would be unknown then + // this particular result is unknown, but we'll keep going + // to see if any other iterations can produce known values. + if !fmtArgs[i].IsWhollyKnown() { + // We require all nested values to be known because the only + // thing we can do for a collection/structural type is print + // it as JSON and that requires it to be wholly known. + ret = append(ret, cty.UnknownVal(cty.String)) + continue Results + } + } + + str, err := formatFSM(fmtStr, fmtArgs) + if err != nil { + return cty.NullVal(cty.List(cty.String)), fmt.Errorf( + "error on format iteration %d: %s", iterIdx, err, + ) + } + + ret = append(ret, cty.StringVal(str)) + } + + return cty.ListVal(ret), nil + }, +}) + +// Format produces a string representation of zero or more values using a +// format string similar to the "printf" function in C. +// +// It supports the following "verbs": +// +// %% Literal percent sign, consuming no value +// %v A default formatting of the value based on type, as described below. +// %#v JSON serialization of the value +// %t Converts to boolean and then produces "true" or "false" +// %b Converts to number, requires integer, produces binary representation +// %d Converts to number, requires integer, produces decimal representation +// %o Converts to number, requires integer, produces octal representation +// %x Converts to number, requires integer, produces hexadecimal representation +// with lowercase letters +// %X Like %x but with uppercase letters +// %e Converts to number, produces scientific notation like -1.234456e+78 +// %E Like %e but with an uppercase "E" representing the exponent +// %f Converts to number, produces decimal representation with fractional +// part but no exponent, like 123.456 +// %g %e for large exponents or %f otherwise +// %G %E for large exponents or %f otherwise +// %s Converts to string and produces the string's characters +// %q Converts to string and produces JSON-quoted string representation, +// like %v. +// +// The default format selections made by %v are: +// +// string %s +// number %g +// bool %t +// other %#v +// +// Null values produce the literal keyword "null" for %v and %#v, and produce +// an error otherwise. +// +// Width is specified by an optional decimal number immediately preceding the +// verb letter. If absent, the width is whatever is necessary to represent the +// value. Precision is specified after the (optional) width by a period +// followed by a decimal number. If no period is present, a default precision +// is used. A period with no following number is invalid. +// For examples: +// +// %f default width, default precision +// %9f width 9, default precision +// %.2f default width, precision 2 +// %9.2f width 9, precision 2 +// +// Width and precision are measured in unicode characters (grapheme clusters). +// +// For most values, width is the minimum number of characters to output, +// padding the formatted form with spaces if necessary. +// +// For strings, precision limits the length of the input to be formatted (not +// the size of the output), truncating if necessary. +// +// For numbers, width sets the minimum width of the field and precision sets +// the number of places after the decimal, if appropriate, except that for +// %g/%G precision sets the total number of significant digits. +// +// The following additional symbols can be used immediately after the percent +// introducer as flags: +// +// (a space) leave a space where the sign would be if number is positive +// + Include a sign for a number even if it is positive (numeric only) +// - Pad with spaces on the left rather than the right +// 0 Pad with zeros rather than spaces. +// +// Flag characters are ignored for verbs that do not support them. +// +// By default, % sequences consume successive arguments starting with the first. +// Introducing a [n] sequence immediately before the verb letter, where n is a +// decimal integer, explicitly chooses a particular value argument by its +// one-based index. Subsequent calls without an explicit index will then +// proceed with n+1, n+2, etc. +// +// An error is produced if the format string calls for an impossible conversion +// or accesses more values than are given. An error is produced also for +// an unsupported format verb. +func Format(format cty.Value, vals ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, 0, len(vals)+1) + args = append(args, format) + args = append(args, vals...) + return FormatFunc.Call(args) +} + +// FormatList applies the same formatting behavior as Format, but accepts +// a mixture of list and non-list values as arguments. Any list arguments +// passed must have the same length, which dictates the length of the +// resulting list. +// +// Any non-list arguments are used repeatedly for each iteration over the +// list arguments. The list arguments are iterated in order by key, so +// corresponding items are formatted together. +func FormatList(format cty.Value, vals ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, 0, len(vals)+1) + args = append(args, format) + args = append(args, vals...) + return FormatListFunc.Call(args) +} + +type formatVerb struct { + Raw string + Offset int + + ArgNum int + Mode rune + + Zero bool + Sharp bool + Plus bool + Minus bool + Space bool + + HasPrec bool + Prec int + + HasWidth bool + Width int +} + +// formatAppend is called by formatFSM (generated by format_fsm.rl) for each +// formatting sequence that is encountered. +func formatAppend(verb *formatVerb, buf *bytes.Buffer, args []cty.Value) error { + argIdx := verb.ArgNum - 1 + if argIdx >= len(args) { + return fmt.Errorf( + "not enough arguments for %q at %d: need index %d but have %d total", + verb.Raw, verb.Offset, + verb.ArgNum, len(args), + ) + } + arg := args[argIdx] + + if verb.Mode != 'v' && arg.IsNull() { + return fmt.Errorf("unsupported value for %q at %d: null value cannot be formatted", verb.Raw, verb.Offset) + } + + // Normalize to make some things easier for downstream formatters + if !verb.HasWidth { + verb.Width = -1 + } + if !verb.HasPrec { + verb.Prec = -1 + } + + // For our first pass we'll ensure the verb is supported and then fan + // out to other functions based on what conversion is needed. + switch verb.Mode { + + case 'v': + return formatAppendAsIs(verb, buf, arg) + + case 't': + return formatAppendBool(verb, buf, arg) + + case 'b', 'd', 'o', 'x', 'X', 'e', 'E', 'f', 'g', 'G': + return formatAppendNumber(verb, buf, arg) + + case 's', 'q': + return formatAppendString(verb, buf, arg) + + default: + return fmt.Errorf("unsupported format verb %q in %q at offset %d", verb.Mode, verb.Raw, verb.Offset) + } +} + +func formatAppendAsIs(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + + if !verb.Sharp && !arg.IsNull() { + // Unless the caller overrode it with the sharp flag, we'll try some + // specialized formats before we fall back on JSON. + switch arg.Type() { + case cty.String: + fmted := arg.AsString() + fmted = formatPadWidth(verb, fmted) + buf.WriteString(fmted) + return nil + case cty.Number: + bf := arg.AsBigFloat() + fmted := bf.Text('g', -1) + fmted = formatPadWidth(verb, fmted) + buf.WriteString(fmted) + return nil + } + } + + jb, err := json.Marshal(arg, arg.Type()) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + fmted := formatPadWidth(verb, string(jb)) + buf.WriteString(fmted) + + return nil +} + +func formatAppendBool(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.Bool) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + if arg.True() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + return nil +} + +func formatAppendNumber(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.Number) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + switch verb.Mode { + case 'b', 'd', 'o', 'x', 'X': + return formatAppendInteger(verb, buf, arg) + default: + bf := arg.AsBigFloat() + + // For floats our format syntax is a subset of Go's, so it's + // safe for us to just lean on the existing Go implementation. + fmtstr := formatStripIndexSegment(verb.Raw) + fmted := fmt.Sprintf(fmtstr, bf) + buf.WriteString(fmted) + return nil + } +} + +func formatAppendInteger(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + bf := arg.AsBigFloat() + bi, acc := bf.Int(nil) + if acc != big.Exact { + return fmt.Errorf("unsupported value for %q at %d: an integer is required", verb.Raw, verb.Offset) + } + + // For integers our format syntax is a subset of Go's, so it's + // safe for us to just lean on the existing Go implementation. + fmtstr := formatStripIndexSegment(verb.Raw) + fmted := fmt.Sprintf(fmtstr, bi) + buf.WriteString(fmted) + return nil +} + +func formatAppendString(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.String) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + // We _cannot_ directly use the Go fmt.Sprintf implementation for strings + // because it measures widths and precisions in runes rather than grapheme + // clusters. + + str := arg.AsString() + if verb.Prec > 0 { + strB := []byte(str) + pos := 0 + wanted := verb.Prec + for i := 0; i < wanted; i++ { + next := strB[pos:] + if len(next) == 0 { + // ran out of characters before we hit our max width + break + } + d, _, _ := textseg.ScanGraphemeClusters(strB[pos:], true) + pos += d + } + str = str[:pos] + } + + switch verb.Mode { + case 's': + fmted := formatPadWidth(verb, str) + buf.WriteString(fmted) + case 'q': + jb, err := json.Marshal(cty.StringVal(str), cty.String) + if err != nil { + // Should never happen, since we know this is a known, non-null string + panic(fmt.Errorf("failed to marshal %#v as JSON: %s", arg, err)) + } + fmted := formatPadWidth(verb, string(jb)) + buf.WriteString(fmted) + default: + // Should never happen because formatAppend should've already validated + panic(fmt.Errorf("invalid string formatting mode %q", verb.Mode)) + } + return nil +} + +func formatPadWidth(verb *formatVerb, fmted string) string { + if verb.Width < 0 { + return fmted + } + + // Safe to ignore errors because ScanGraphemeClusters cannot produce errors + givenLen, _ := textseg.TokenCount([]byte(fmted), textseg.ScanGraphemeClusters) + wantLen := verb.Width + if givenLen >= wantLen { + return fmted + } + + padLen := wantLen - givenLen + padChar := " " + if verb.Zero { + padChar = "0" + } + pads := strings.Repeat(padChar, padLen) + + if verb.Minus { + return fmted + pads + } + return pads + fmted +} + +// formatStripIndexSegment strips out any [nnn] segment present in a verb +// string so that we can pass it through to Go's fmt.Sprintf with a single +// argument. This is used in cases where we're just leaning on Go's formatter +// because it's a superset of ours. +func formatStripIndexSegment(rawVerb string) string { + // We assume the string has already been validated here, since we should + // only be using this function with strings that were accepted by our + // scanner in formatFSM. + start := strings.Index(rawVerb, "[") + end := strings.Index(rawVerb, "]") + if start == -1 || end == -1 { + return rawVerb + } + + return rawVerb[:start] + rawVerb[end+1:] +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go new file mode 100644 index 00000000000..32b1ac9712f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go @@ -0,0 +1,374 @@ +// line 1 "format_fsm.rl" +// This file is generated from format_fsm.rl. DO NOT EDIT. + +// line 5 "format_fsm.rl" + +package stdlib + +import ( + "bytes" + "fmt" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// line 21 "format_fsm.go" +var _formatfsm_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 2, 1, 4, + 1, 5, 1, 6, 1, 7, 1, 8, + 1, 9, 1, 10, 1, 11, 1, 14, + 1, 16, 1, 17, 1, 18, 2, 3, + 4, 2, 12, 10, 2, 12, 16, 2, + 12, 18, 2, 13, 14, 2, 15, 10, + 2, 15, 18, +} + +var _formatfsm_key_offsets []byte = []byte{ + 0, 0, 14, 27, 34, 36, 39, 43, + 51, +} + +var _formatfsm_trans_keys []byte = []byte{ + 32, 35, 37, 43, 45, 46, 48, 91, + 49, 57, 65, 90, 97, 122, 32, 35, + 43, 45, 46, 48, 91, 49, 57, 65, + 90, 97, 122, 91, 48, 57, 65, 90, + 97, 122, 49, 57, 93, 48, 57, 65, + 90, 97, 122, 46, 91, 48, 57, 65, + 90, 97, 122, 37, +} + +var _formatfsm_single_lengths []byte = []byte{ + 0, 8, 7, 1, 0, 1, 0, 2, + 1, +} + +var _formatfsm_range_lengths []byte = []byte{ + 0, 3, 3, 3, 1, 1, 2, 3, + 0, +} + +var _formatfsm_index_offsets []byte = []byte{ + 0, 0, 12, 23, 28, 30, 33, 36, + 42, +} + +var _formatfsm_indicies []byte = []byte{ + 1, 2, 3, 4, 5, 6, 7, 10, + 8, 9, 9, 0, 1, 2, 4, 5, + 6, 7, 10, 8, 9, 9, 0, 13, + 11, 12, 12, 0, 14, 0, 15, 14, + 0, 9, 9, 0, 16, 19, 17, 18, + 18, 0, 20, 3, +} + +var _formatfsm_trans_targs []byte = []byte{ + 0, 2, 2, 8, 2, 2, 3, 2, + 7, 8, 4, 3, 8, 4, 5, 6, + 3, 7, 8, 4, 1, +} + +var _formatfsm_trans_actions []byte = []byte{ + 7, 17, 9, 3, 15, 13, 25, 11, + 43, 29, 19, 27, 49, 46, 21, 0, + 37, 23, 40, 34, 1, +} + +var _formatfsm_eof_actions []byte = []byte{ + 0, 31, 31, 31, 31, 31, 31, 31, + 5, +} + +const formatfsm_start int = 8 +const formatfsm_first_final int = 8 +const formatfsm_error int = 0 + +const formatfsm_en_main int = 8 + +// line 20 "format_fsm.rl" + +func formatFSM(format string, a []cty.Value) (string, error) { + var buf bytes.Buffer + data := format + nextArg := 1 // arg numbers are 1-based + var verb formatVerb + highestArgIdx := 0 // zero means "none", since arg numbers are 1-based + + // line 159 "format_fsm.rl" + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + cs := 0 // current state (will be initialized by ragel-generated code) + ts := 0 + te := 0 + eof := pe + + // Keep Go compiler happy even if generated code doesn't use these + _ = ts + _ = te + _ = eof + + // line 123 "format_fsm.go" + { + cs = formatfsm_start + } + + // line 128 "format_fsm.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _keys = int(_formatfsm_key_offsets[cs]) + _trans = int(_formatfsm_index_offsets[cs]) + + _klen = int(_formatfsm_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _formatfsm_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _formatfsm_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_formatfsm_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _formatfsm_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _formatfsm_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_formatfsm_indicies[_trans]) + cs = int(_formatfsm_trans_targs[_trans]) + + if _formatfsm_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_formatfsm_trans_actions[_trans]) + _nacts = uint(_formatfsm_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _formatfsm_actions[_acts-1] { + case 0: + // line 31 "format_fsm.rl" + + verb = formatVerb{ + ArgNum: nextArg, + Prec: -1, + Width: -1, + } + ts = p + + case 1: + // line 40 "format_fsm.rl" + + buf.WriteByte(data[p]) + + case 4: + // line 51 "format_fsm.rl" + + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + + case 5: + // line 58 "format_fsm.rl" + + verb.Sharp = true + + case 6: + // line 61 "format_fsm.rl" + + verb.Zero = true + + case 7: + // line 64 "format_fsm.rl" + + verb.Minus = true + + case 8: + // line 67 "format_fsm.rl" + + verb.Plus = true + + case 9: + // line 70 "format_fsm.rl" + + verb.Space = true + + case 10: + // line 74 "format_fsm.rl" + + verb.ArgNum = 0 + + case 11: + // line 77 "format_fsm.rl" + + verb.ArgNum = (10 * verb.ArgNum) + (int(data[p]) - '0') + + case 12: + // line 81 "format_fsm.rl" + + verb.HasWidth = true + + case 13: + // line 84 "format_fsm.rl" + + verb.Width = 0 + + case 14: + // line 87 "format_fsm.rl" + + verb.Width = (10 * verb.Width) + (int(data[p]) - '0') + + case 15: + // line 91 "format_fsm.rl" + + verb.HasPrec = true + + case 16: + // line 94 "format_fsm.rl" + + verb.Prec = 0 + + case 17: + // line 97 "format_fsm.rl" + + verb.Prec = (10 * verb.Prec) + (int(data[p]) - '0') + + case 18: + // line 101 "format_fsm.rl" + + verb.Mode = rune(data[p]) + te = p + 1 + verb.Raw = data[ts:te] + verb.Offset = ts + + if verb.ArgNum > highestArgIdx { + highestArgIdx = verb.ArgNum + } + + err := formatAppend(&verb, &buf, a) + if err != nil { + return buf.String(), err + } + nextArg = verb.ArgNum + 1 + + // line 330 "format_fsm.go" + } + } + + _again: + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + __acts := _formatfsm_eof_actions[cs] + __nacts := uint(_formatfsm_actions[__acts]) + __acts++ + for ; __nacts > 0; __nacts-- { + __acts++ + switch _formatfsm_actions[__acts-1] { + case 2: + // line 44 "format_fsm.rl" + + case 3: + // line 47 "format_fsm.rl" + + return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p) + + case 4: + // line 51 "format_fsm.rl" + + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + + // line 369 "format_fsm.go" + } + } + } + + _out: + { + } + } + + // line 177 "format_fsm.rl" + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // flag it anyway rather than just losing data from the end. + if cs < formatfsm_first_final { + return buf.String(), fmt.Errorf("extraneous characters beginning at offset %d", p) + } + + if highestArgIdx < len(a) { + // Extraneous args are an error, to more easily detect mistakes + firstBad := highestArgIdx + 1 + if highestArgIdx == 0 { + // Custom error message for this case + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; no verbs in format string") + } + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; only %d used by format string", highestArgIdx) + } + + return buf.String(), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go new file mode 100644 index 00000000000..6b31f266141 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go @@ -0,0 +1,107 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var EqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]), nil + }, +}) + +var NotEqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]).Not(), nil + }, +}) + +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, fmt.Errorf("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + + return convert.Convert(argVal, retType) + } + return cty.NilVal, fmt.Errorf("no non-null arguments") + }, +}) + +// Equal determines whether the two given values are equal, returning a +// bool value. +func Equal(a cty.Value, b cty.Value) (cty.Value, error) { + return EqualFunc.Call([]cty.Value{a, b}) +} + +// NotEqual is the opposite of Equal. +func NotEqual(a cty.Value, b cty.Value) (cty.Value, error) { + return NotEqualFunc.Call([]cty.Value{a, b}) +} + +// Coalesce returns the first of the given arguments that is not null. If +// all arguments are null, an error is produced. +func Coalesce(vals ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(vals) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go new file mode 100644 index 00000000000..07901c65d37 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go @@ -0,0 +1,72 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/json" +) + +var JSONEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + val := args[0] + if !val.IsWhollyKnown() { + // We can't serialize unknowns, so if the value is unknown or + // contains any _nested_ unknowns then our result must be + // unknown. + return cty.UnknownVal(retType), nil + } + + buf, err := json.Marshal(val, val.Type()) + if err != nil { + return cty.NilVal, err + } + + return cty.StringVal(string(buf)), nil + }, +}) + +var JSONDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + str := args[0] + if !str.IsKnown() { + return cty.DynamicPseudoType, nil + } + + buf := []byte(str.AsString()) + return json.ImpliedType(buf) + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + buf := []byte(args[0].AsString()) + return json.Unmarshal(buf, retType) + }, +}) + +// JSONEncode returns a JSON serialization of the given value. +func JSONEncode(val cty.Value) (cty.Value, error) { + return JSONEncodeFunc.Call([]cty.Value{val}) +} + +// JSONDecode parses the given JSON string and, if it is valid, returns the +// value it represents. +// +// Note that applying JSONDecode to the result of JSONEncode may not produce +// an identically-typed result, since JSON encoding is lossy for cty Types. +// The resulting value will consist only of primitive types, object types, and +// tuple types. +func JSONDecode(str cty.Value) (cty.Value, error) { + return JSONDecodeFunc.Call([]cty.Value{str}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go new file mode 100644 index 00000000000..bd9b2e51b10 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go @@ -0,0 +1,428 @@ +package stdlib + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var AbsoluteFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Absolute(), nil + }, +}) + +var AddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Add can panic if the input values are opposing infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't compute sum of opposing infinities") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Add(args[1]), nil + }, +}) + +var SubtractFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Sub can panic if the input values are infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't subtract infinity from itself") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Subtract(args[1]), nil + }, +}) + +var MultiplyFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't multiply zero by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Multiply(args[1]), nil + }, +}) + +var DivideFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Quo can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't divide zero by zero or infinity by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Divide(args[1]), nil + }, +}) + +var ModuloFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't use modulo with zero and infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Modulo(args[1]), nil + }, +}) + +var GreaterThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThan(args[1]), nil + }, +}) + +var GreaterThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThanOrEqualTo(args[1]), nil + }, +}) + +var LessThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThan(args[1]), nil + }, +}) + +var LessThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThanOrEqualTo(args[1]), nil + }, +}) + +var NegateFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Negate(), nil + }, +}) + +var MinFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + min := cty.PositiveInfinity + for _, num := range args { + if num.LessThan(min).True() { + min = num + } + } + + return min, nil + }, +}) + +var MaxFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + max := cty.NegativeInfinity + for _, num := range args { + if num.GreaterThan(max).True() { + max = num + } + } + + return max, nil + }, +}) + +var IntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bf := args[0].AsBigFloat() + if bf.IsInt() { + return args[0], nil + } + bi, _ := bf.Int(nil) + bf = (&big.Float{}).SetInt(bi) + return cty.NumberVal(bf), nil + }, +}) + +// Absolute returns the magnitude of the given number, without its sign. +// That is, it turns negative values into positive values. +func Absolute(num cty.Value) (cty.Value, error) { + return AbsoluteFunc.Call([]cty.Value{num}) +} + +// Add returns the sum of the two given numbers. +func Add(a cty.Value, b cty.Value) (cty.Value, error) { + return AddFunc.Call([]cty.Value{a, b}) +} + +// Subtract returns the difference between the two given numbers. +func Subtract(a cty.Value, b cty.Value) (cty.Value, error) { + return SubtractFunc.Call([]cty.Value{a, b}) +} + +// Multiply returns the product of the two given numbers. +func Multiply(a cty.Value, b cty.Value) (cty.Value, error) { + return MultiplyFunc.Call([]cty.Value{a, b}) +} + +// Divide returns a divided by b, where both a and b are numbers. +func Divide(a cty.Value, b cty.Value) (cty.Value, error) { + return DivideFunc.Call([]cty.Value{a, b}) +} + +// Negate returns the given number multipled by -1. +func Negate(num cty.Value) (cty.Value, error) { + return NegateFunc.Call([]cty.Value{num}) +} + +// LessThan returns true if a is less than b. +func LessThan(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanFunc.Call([]cty.Value{a, b}) +} + +// LessThanOrEqualTo returns true if a is less than b. +func LessThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// GreaterThan returns true if a is less than b. +func GreaterThan(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanFunc.Call([]cty.Value{a, b}) +} + +// GreaterThanOrEqualTo returns true if a is less than b. +func GreaterThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// Modulo returns the remainder of a divided by b under integer division, +// where both a and b are numbers. +func Modulo(a cty.Value, b cty.Value) (cty.Value, error) { + return ModuloFunc.Call([]cty.Value{a, b}) +} + +// Min returns the minimum number from the given numbers. +func Min(numbers ...cty.Value) (cty.Value, error) { + return MinFunc.Call(numbers) +} + +// Max returns the maximum number from the given numbers. +func Max(numbers ...cty.Value) (cty.Value, error) { + return MaxFunc.Call(numbers) +} + +// Int removes the fractional component of the given number returning an +// integer representing the whole number component, rounding towards zero. +// For example, -1.5 becomes -1. +// +// If an infinity is passed to Int, an error is returned. +func Int(num cty.Value) (cty.Value, error) { + if num == cty.PositiveInfinity || num == cty.NegativeInfinity { + return cty.NilVal, fmt.Errorf("can't truncate infinity to an integer") + } + return IntFunc.Call([]cty.Value{num}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go new file mode 100644 index 00000000000..2dd6348a2c4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go @@ -0,0 +1,233 @@ +package stdlib + +import ( + "fmt" + "regexp" + resyntax "regexp/syntax" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var RegexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "pattern", + Type: cty.String, + }, + { + Name: "string", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsKnown() { + // We can't predict our type without seeing our pattern + return cty.DynamicPseudoType, nil + } + + retTy, err := regexPatternResultType(args[0].AsString()) + if err != nil { + err = function.NewArgError(0, err) + } + return retTy, err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if retType == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + re, err := regexp.Compile(args[0].AsString()) + if err != nil { + // Should never happen, since we checked this in the Type function above. + return cty.NilVal, function.NewArgErrorf(0, "error parsing pattern: %s", err) + } + str := args[1].AsString() + + captureIdxs := re.FindStringSubmatchIndex(str) + if captureIdxs == nil { + return cty.NilVal, fmt.Errorf("pattern did not match any part of the given string") + } + + return regexPatternResult(re, str, captureIdxs, retType), nil + }, +}) + +var RegexAllFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "pattern", + Type: cty.String, + }, + { + Name: "string", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsKnown() { + // We can't predict our type without seeing our pattern, + // but we do know it'll always be a list of something. + return cty.List(cty.DynamicPseudoType), nil + } + + retTy, err := regexPatternResultType(args[0].AsString()) + if err != nil { + err = function.NewArgError(0, err) + } + return cty.List(retTy), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ety := retType.ElementType() + if ety == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + re, err := regexp.Compile(args[0].AsString()) + if err != nil { + // Should never happen, since we checked this in the Type function above. + return cty.NilVal, function.NewArgErrorf(0, "error parsing pattern: %s", err) + } + str := args[1].AsString() + + captureIdxsEach := re.FindAllStringSubmatchIndex(str, -1) + if len(captureIdxsEach) == 0 { + return cty.ListValEmpty(ety), nil + } + + elems := make([]cty.Value, len(captureIdxsEach)) + for i, captureIdxs := range captureIdxsEach { + elems[i] = regexPatternResult(re, str, captureIdxs, ety) + } + return cty.ListVal(elems), nil + }, +}) + +// Regex is a function that extracts one or more substrings from a given +// string by applying a regular expression pattern, describing the first +// match. +// +// The return type depends on the composition of the capture groups (if any) +// in the pattern: +// +// - If there are no capture groups at all, the result is a single string +// representing the entire matched pattern. +// - If all of the capture groups are named, the result is an object whose +// keys are the named groups and whose values are their sub-matches, or +// null if a particular sub-group was inside another group that didn't +// match. +// - If none of the capture groups are named, the result is a tuple whose +// elements are the sub-groups in order and whose values are their +// sub-matches, or null if a particular sub-group was inside another group +// that didn't match. +// - It is invalid to use both named and un-named capture groups together in +// the same pattern. +// +// If the pattern doesn't match, this function returns an error. To test for +// a match, call RegexAll and check if the length of the result is greater +// than zero. +func Regex(pattern, str cty.Value) (cty.Value, error) { + return RegexFunc.Call([]cty.Value{pattern, str}) +} + +// RegexAll is similar to Regex but it finds all of the non-overlapping matches +// in the given string and returns a list of them. +// +// The result type is always a list, whose element type is deduced from the +// pattern in the same way as the return type for Regex is decided. +// +// If the pattern doesn't match at all, this function returns an empty list. +func RegexAll(pattern, str cty.Value) (cty.Value, error) { + return RegexAllFunc.Call([]cty.Value{pattern, str}) +} + +// regexPatternResultType parses the given regular expression pattern and +// returns the structural type that would be returned to represent its +// capture groups. +// +// Returns an error if parsing fails or if the pattern uses a mixture of +// named and unnamed capture groups, which is not permitted. +func regexPatternResultType(pattern string) (cty.Type, error) { + re, rawErr := regexp.Compile(pattern) + switch err := rawErr.(type) { + case *resyntax.Error: + return cty.NilType, fmt.Errorf("invalid regexp pattern: %s in %s", err.Code, err.Expr) + case error: + // Should never happen, since all regexp compile errors should + // be resyntax.Error, but just in case... + return cty.NilType, fmt.Errorf("error parsing pattern: %s", err) + } + + allNames := re.SubexpNames()[1:] + var names []string + unnamed := 0 + for _, name := range allNames { + if name == "" { + unnamed++ + } else { + if names == nil { + names = make([]string, 0, len(allNames)) + } + names = append(names, name) + } + } + switch { + case unnamed == 0 && len(names) == 0: + // If there are no capture groups at all then we'll return just a + // single string for the whole match. + return cty.String, nil + case unnamed > 0 && len(names) > 0: + return cty.NilType, fmt.Errorf("invalid regexp pattern: cannot mix both named and unnamed capture groups") + case unnamed > 0: + // For unnamed captures, we return a tuple of them all in order. + etys := make([]cty.Type, unnamed) + for i := range etys { + etys[i] = cty.String + } + return cty.Tuple(etys), nil + default: + // For named captures, we return an object using the capture names + // as keys. + atys := make(map[string]cty.Type, len(names)) + for _, name := range names { + atys[name] = cty.String + } + return cty.Object(atys), nil + } +} + +func regexPatternResult(re *regexp.Regexp, str string, captureIdxs []int, retType cty.Type) cty.Value { + switch { + case retType == cty.String: + start, end := captureIdxs[0], captureIdxs[1] + return cty.StringVal(str[start:end]) + case retType.IsTupleType(): + captureIdxs = captureIdxs[2:] // index 0 is the whole pattern span, which we ignore by skipping one pair + vals := make([]cty.Value, len(captureIdxs)/2) + for i := range vals { + start, end := captureIdxs[i*2], captureIdxs[i*2+1] + if start < 0 || end < 0 { + vals[i] = cty.NullVal(cty.String) // Did not match anything because containing group didn't match + continue + } + vals[i] = cty.StringVal(str[start:end]) + } + return cty.TupleVal(vals) + case retType.IsObjectType(): + captureIdxs = captureIdxs[2:] // index 0 is the whole pattern span, which we ignore by skipping one pair + vals := make(map[string]cty.Value, len(captureIdxs)/2) + names := re.SubexpNames()[1:] + for i, name := range names { + start, end := captureIdxs[i*2], captureIdxs[i*2+1] + if start < 0 || end < 0 { + vals[name] = cty.NullVal(cty.String) // Did not match anything because containing group didn't match + continue + } + vals[name] = cty.StringVal(str[start:end]) + } + return cty.ObjectVal(vals) + default: + // Should never happen + panic(fmt.Sprintf("invalid return type %#v", retType)) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go new file mode 100644 index 00000000000..d3cc341dda6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go @@ -0,0 +1,218 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var ConcatFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "seqs", + Type: cty.DynamicPseudoType, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, fmt.Errorf("at least one argument is required") + } + + if args[0].Type().IsListType() { + // Possibly we're going to return a list, if all of our other + // args are also lists and we can find a common element type. + tys := make([]cty.Type, len(args)) + for i, val := range args { + ty := val.Type() + if !ty.IsListType() { + tys = nil + break + } + tys[i] = ty + } + + if tys != nil { + commonType, _ := convert.UnifyUnsafe(tys) + if commonType != cty.NilType { + return commonType, nil + } + } + } + + etys := make([]cty.Type, 0, len(args)) + for i, val := range args { + ety := val.Type() + switch { + case ety.IsTupleType(): + etys = append(etys, ety.TupleElementTypes()...) + case ety.IsListType(): + if !val.IsKnown() { + // We need to know the list to count its elements to + // build our tuple type, so any concat of an unknown + // list can't be typed yet. + return cty.DynamicPseudoType, nil + } + + l := val.LengthInt() + subEty := ety.ElementType() + for j := 0; j < l; j++ { + etys = append(etys, subEty) + } + default: + return cty.NilType, function.NewArgErrorf( + i, "all arguments must be lists or tuples; got %s", + ety.FriendlyName(), + ) + } + } + return cty.Tuple(etys), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + switch { + case retType.IsListType(): + // If retType is a list type then we know that all of the + // given values will be lists and that they will either be of + // retType or of something we can convert to retType. + vals := make([]cty.Value, 0, len(args)) + for i, list := range args { + list, err = convert.Convert(list, retType) + if err != nil { + // Conversion might fail because we used UnifyUnsafe + // to choose our return type. + return cty.NilVal, function.NewArgError(i, err) + } + + it := list.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + if len(vals) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + return cty.ListVal(vals), nil + case retType.IsTupleType(): + // If retType is a tuple type then we could have a mixture of + // lists and tuples but we know they all have known values + // (because our params don't AllowUnknown) and we know that + // concatenating them all together will produce a tuple of + // retType because of the work we did in the Type function above. + vals := make([]cty.Value, 0, len(args)) + + for _, seq := range args { + // Both lists and tuples support ElementIterator, so this is easy. + it := seq.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + + return cty.TupleVal(vals), nil + default: + // should never happen if Type is working correctly above + panic("unsupported return type") + } + }, +}) + +var RangeFunc = function.New(&function.Spec{ + VarParam: &function.Parameter{ + Name: "params", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.List(cty.Number)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var start, end, step cty.Value + switch len(args) { + case 1: + if args[0].LessThan(cty.Zero).True() { + start, end, step = cty.Zero, args[0], cty.NumberIntVal(-1) + } else { + start, end, step = cty.Zero, args[0], cty.NumberIntVal(1) + } + case 2: + if args[1].LessThan(args[0]).True() { + start, end, step = args[0], args[1], cty.NumberIntVal(-1) + } else { + start, end, step = args[0], args[1], cty.NumberIntVal(1) + } + case 3: + start, end, step = args[0], args[1], args[2] + default: + return cty.NilVal, fmt.Errorf("must have one, two, or three arguments") + } + + var vals []cty.Value + + if step == cty.Zero { + return cty.NilVal, function.NewArgErrorf(2, "step must not be zero") + } + down := step.LessThan(cty.Zero).True() + + if down { + if end.GreaterThan(start).True() { + return cty.NilVal, function.NewArgErrorf(1, "end must be less than start when step is negative") + } + } else { + if end.LessThan(start).True() { + return cty.NilVal, function.NewArgErrorf(1, "end must be greater than start when step is positive") + } + } + + num := start + for { + if down { + if num.LessThanOrEqualTo(end).True() { + break + } + } else { + if num.GreaterThanOrEqualTo(end).True() { + break + } + } + if len(vals) >= 1024 { + // Artificial limit to prevent bad arguments from consuming huge amounts of memory + return cty.NilVal, fmt.Errorf("more than 1024 values were generated; either decrease the difference between start and end or use a smaller step") + } + vals = append(vals, num) + num = num.Add(step) + } + if len(vals) == 0 { + return cty.ListValEmpty(cty.Number), nil + } + return cty.ListVal(vals), nil + }, +}) + +// Concat takes one or more sequences (lists or tuples) and returns the single +// sequence that results from concatenating them together in order. +// +// If all of the given sequences are lists of the same element type then the +// result is a list of that type. Otherwise, the result is a of a tuple type +// constructed from the given sequence types. +func Concat(seqs ...cty.Value) (cty.Value, error) { + return ConcatFunc.Call(seqs) +} + +// Range creates a list of numbers by starting from the given starting value, +// then adding the given step value until the result is greater than or +// equal to the given stopping value. Each intermediate result becomes an +// element in the resulting list. +// +// When all three parameters are set, the order is (start, end, step). If +// only two parameters are set, they are the start and end respectively and +// step defaults to 1. If only one argument is set, it gives the end value +// with start defaulting to 0 and step defaulting to 1. +// +// Because the resulting list must be fully buffered in memory, there is an +// artificial cap of 1024 elements, after which this function will return +// an error to avoid consuming unbounded amounts of memory. The Range function +// is primarily intended for creating small lists of indices to iterate over, +// so there should be no reason to generate huge lists with it. +func Range(params ...cty.Value) (cty.Value, error) { + return RangeFunc.Call(params) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go new file mode 100644 index 00000000000..100078fdc43 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go @@ -0,0 +1,195 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/convert" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var SetHasElementFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + { + Name: "elem", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].HasElement(args[1]), nil + }, +}) + +var SetUnionFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Union(s2) + }), +}) + +var SetIntersectionFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Intersection(s2) + }), +}) + +var SetSubtractFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Subtract(s2) + }), +}) + +var SetSymmetricDifferenceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Subtract(s2) + }), +}) + +// SetHasElement determines whether the given set contains the given value as an +// element. +func SetHasElement(set cty.Value, elem cty.Value) (cty.Value, error) { + return SetHasElementFunc.Call([]cty.Value{set, elem}) +} + +// SetUnion returns a new set containing all of the elements from the given +// sets, which must have element types that can all be converted to some +// common type using the standard type unification rules. If conversion +// is not possible, an error is returned. +// +// The union operation is performed after type conversion, which may result +// in some previously-distinct values being conflated. +// +// At least one set must be provided. +func SetUnion(sets ...cty.Value) (cty.Value, error) { + return SetUnionFunc.Call(sets) +} + +// Intersection returns a new set containing the elements that exist +// in all of the given sets, which must have element types that can all be +// converted to some common type using the standard type unification rules. +// If conversion is not possible, an error is returned. +// +// The intersection operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +// +// At least one set must be provided. +func SetIntersection(sets ...cty.Value) (cty.Value, error) { + return SetIntersectionFunc.Call(sets) +} + +// SetSubtract returns a new set containing the elements from the +// first set that are not present in the second set. The sets must have +// element types that can both be converted to some common type using the +// standard type unification rules. If conversion is not possible, an error +// is returned. +// +// The subtract operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +func SetSubtract(a, b cty.Value) (cty.Value, error) { + return SetSubtractFunc.Call([]cty.Value{a, b}) +} + +// SetSymmetricDifference returns a new set containing elements that appear +// in any of the given sets but not multiple. The sets must have +// element types that can all be converted to some common type using the +// standard type unification rules. If conversion is not possible, an error +// is returned. +// +// The difference operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +func SetSymmetricDifference(sets ...cty.Value) (cty.Value, error) { + return SetSymmetricDifferenceFunc.Call(sets) +} + +func setOperationReturnType(args []cty.Value) (ret cty.Type, err error) { + var etys []cty.Type + for _, arg := range args { + etys = append(etys, arg.Type().ElementType()) + } + newEty, _ := convert.UnifyUnsafe(etys) + if newEty == cty.NilType { + return cty.NilType, fmt.Errorf("given sets must all have compatible element types") + } + return cty.Set(newEty), nil +} + +func setOperationImpl(f func(s1, s2 cty.ValueSet) cty.ValueSet) function.ImplFunc { + return func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + first := args[0] + first, err = convert.Convert(first, retType) + if err != nil { + return cty.NilVal, function.NewArgError(0, err) + } + + set := first.AsValueSet() + for i, arg := range args[1:] { + arg, err := convert.Convert(arg, retType) + if err != nil { + return cty.NilVal, function.NewArgError(i+1, err) + } + + argSet := arg.AsValueSet() + set = f(set, argSet) + } + return cty.SetValFromValueSet(set), nil + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go new file mode 100644 index 00000000000..d7c89fa8296 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go @@ -0,0 +1,234 @@ +package stdlib + +import ( + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "github.com/apparentlymart/go-textseg/textseg" +) + +var UpperFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToUpper(in) + return cty.StringVal(out), nil + }, +}) + +var LowerFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToLower(in) + return cty.StringVal(out), nil + }, +}) + +var ReverseFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + out := make([]byte, len(in)) + pos := len(out) + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + cluster := in[i : i+d] + pos -= len(cluster) + copy(out[pos:], cluster) + i += d + } + + return cty.StringVal(string(out)), nil + }, +}) + +var StrlenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + l := 0 + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + l++ + i += d + } + + return cty.NumberIntVal(int64(l)), nil + }, +}) + +var SubstrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 { + totalLenNum, err := Strlen(args[0]) + if err != nil { + // should never happen + panic("Stdlen returned an error") + } + + var totalLen int + err = gocty.FromCtyValue(totalLenNum, &totalLen) + if err != nil { + // should never happen + panic("Stdlen returned a non-int number") + } + + offset += totalLen + } + + sub := in + pos := 0 + var i int + + // First we'll seek forward to our offset + if offset > 0 { + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == offset { + break + } + if i >= len(in) { + return cty.StringVal(""), nil + } + } + + sub = sub[i:] + } + + if length < 0 { + // Taking the remainder of the string is a fast path since + // we can just return the rest of the buffer verbatim. + return cty.StringVal(string(sub)), nil + } + + // Otherwise we need to start seeking forward again until we + // reach the length we want. + pos = 0 + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == length { + break + } + } + + sub = sub[:i] + + return cty.StringVal(string(sub)), nil + }, +}) + +// Upper is a Function that converts a given string to uppercase. +func Upper(str cty.Value) (cty.Value, error) { + return UpperFunc.Call([]cty.Value{str}) +} + +// Lower is a Function that converts a given string to lowercase. +func Lower(str cty.Value) (cty.Value, error) { + return LowerFunc.Call([]cty.Value{str}) +} + +// Reverse is a Function that reverses the order of the characters in the +// given string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Reverse(str cty.Value) (cty.Value, error) { + return ReverseFunc.Call([]cty.Value{str}) +} + +// Strlen is a Function that returns the length of the given string in +// characters. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Strlen(str cty.Value) (cty.Value, error) { + return StrlenFunc.Call([]cty.Value{str}) +} + +// Substr is a Function that extracts a sequence of characters from another +// string and creates a new string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +// +// The "offset" index may be negative, in which case it is relative to the +// end of the given string. +// +// The "length" may be -1, in which case the remainder of the string after +// the given offset will be returned. +func Substr(str cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return SubstrFunc.Call([]cty.Value{str, offset, length}) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go new file mode 100644 index 00000000000..3495550af58 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go @@ -0,0 +1,31 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Unpredictable wraps a given function such that it retains the same arguments +// and type checking behavior but will return an unknown value when called. +// +// It is recommended that most functions be "pure", which is to say that they +// will always produce the same value given particular input. However, +// sometimes it is necessary to offer functions whose behavior depends on +// some external state, such as reading a file or determining the current time. +// In such cases, an unpredictable wrapper might be used to stand in for +// the function during some sort of prior "checking" phase in order to delay +// the actual effect until later. +// +// While Unpredictable can support a function that isn't pure in its +// implementation, it still expects a function to be pure in its type checking +// behavior, except for the special case of returning cty.DynamicPseudoType +// if it is not yet able to predict its return value based on current argument +// information. +func Unpredictable(f Function) Function { + newSpec := *f.spec // shallow copy + newSpec.Impl = unpredictableImpl + return New(&newSpec) +} + +func unpredictableImpl(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.UnknownVal(retType), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gob.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gob.go new file mode 100644 index 00000000000..a77dace27ef --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gob.go @@ -0,0 +1,125 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // big.Float seems to, for some reason, lose its "pointerness" when we + // round-trip it, so we'll fix that here. + if bf, ok := gv.V.(big.Float); ok { + gv.V = &bf + } + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go new file mode 100644 index 00000000000..a5177d22b27 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go @@ -0,0 +1,7 @@ +// Package gocty deals with converting between cty Values and native go +// values. +// +// It operates under a similar principle to the encoding/json and +// encoding/xml packages in the standard library, using reflection to +// populate native Go data structures from cty values and vice-versa. +package gocty diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go new file mode 100644 index 00000000000..94ffd2fb74a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go @@ -0,0 +1,43 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/set" +) + +var valueType = reflect.TypeOf(cty.Value{}) +var typeType = reflect.TypeOf(cty.Type{}) + +var setType = reflect.TypeOf(set.Set{}) + +var bigFloatType = reflect.TypeOf(big.Float{}) +var bigIntType = reflect.TypeOf(big.Int{}) + +var emptyInterfaceType = reflect.TypeOf(interface{}(nil)) + +var stringType = reflect.TypeOf("") + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/in.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/in.go new file mode 100644 index 00000000000..ca9de21d2e1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/in.go @@ -0,0 +1,548 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/set" +) + +// ToCtyValue produces a cty.Value from a Go value. The result will conform +// to the given type, or an error will be returned if this is not possible. +// +// The target type serves as a hint to resolve ambiguities in the mapping. +// For example, the Go type set.Set tells us that the value is a set but +// does not describe the set's element type. This also allows for convenient +// conversions, such as populating a set from a slice rather than having to +// first explicitly instantiate a set.Set. +// +// The audience of this function is assumed to be the developers of Go code +// that is integrating with cty, and thus the error messages it returns are +// presented from Go's perspective. These messages are thus not appropriate +// for display to end-users. An error returned from ToCtyValue represents a +// bug in the calling program, not user error. +func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time toCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given Type is. + path := make(cty.Path, 0) + return toCtyValue(reflect.ValueOf(val), ty, path) +} + +func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + if val != (reflect.Value{}) && val.Type().AssignableTo(valueType) { + // If the source value is a cty.Value then we'll try to just pass + // through to the target type directly. + return toCtyPassthrough(val, ty, path) + } + + switch ty { + case cty.Bool: + return toCtyBool(val, path) + case cty.Number: + return toCtyNumber(val, path) + case cty.String: + return toCtyString(val, path) + case cty.DynamicPseudoType: + return toCtyDynamic(val, path) + } + + switch { + case ty.IsListType(): + return toCtyList(val, ty.ElementType(), path) + case ty.IsMapType(): + return toCtyMap(val, ty.ElementType(), path) + case ty.IsSetType(): + return toCtySet(val, ty.ElementType(), path) + case ty.IsObjectType(): + return toCtyObject(val, ty.AttributeTypes(), path) + case ty.IsTupleType(): + return toCtyTuple(val, ty.TupleElementTypes(), path) + case ty.IsCapsuleType(): + return toCtyCapsule(val, ty, path) + } + + // We should never fall out here + return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty) +} + +func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Bool), nil + } + + switch val.Kind() { + + case reflect.Bool: + return cty.BoolVal(val.Bool()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind()) + + } + +} + +func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Number), nil + } + + switch val.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.NumberIntVal(val.Int()), nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.NumberUIntVal(val.Uint()), nil + + case reflect.Float32, reflect.Float64: + return cty.NumberFloatVal(val.Float()), nil + + case reflect.Struct: + if val.Type().AssignableTo(bigIntType) { + bigInt := val.Interface().(big.Int) + bigFloat := (&big.Float{}).SetInt(&bigInt) + val = reflect.ValueOf(*bigFloat) + } + + if val.Type().AssignableTo(bigFloatType) { + bigFloat := val.Interface().(big.Float) + return cty.NumberVal(&bigFloat), nil + } + + fallthrough + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind()) + + } + +} + +func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.String), nil + } + + switch val.Kind() { + + case reflect.String: + return cty.StringVal(val.String()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind()) + + } + +} + +func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.List(ety)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.List(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.ListValEmpty(ety), nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, val.Len()) + for i := range vals { + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ListVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety)) + + } +} + +func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Map(ety)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Map(ety)), nil + } + + if val.Len() == 0 { + return cty.MapValEmpty(ety), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make(map[string]cty.Value, val.Len()) + for _, kv := range val.MapKeys() { + k := kv.String() + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.MapVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety)) + + } +} + +func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Set(ety)), nil + } + + var vals []cty.Value + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Set(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, val.Len()) + for i := range vals { + var err error + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + case reflect.Struct: + + if !val.Type().AssignableTo(setType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) + } + + rawSet := val.Interface().(set.Set) + inVals := rawSet.Values() + + if len(inVals) == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, len(inVals)) + for i := range inVals { + var err error + vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety)) + + } + + return cty.SetVal(vals), nil +} + +func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + haveKeys := make(map[string]struct{}, val.Len()) + for _, kv := range val.MapKeys() { + haveKeys[kv.String()] = struct{}{} + } + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + var err error + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if _, have := haveKeys[k]; !have { + vals[k] = cty.NullVal(at) + continue + } + + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + case reflect.Struct: + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + attrFields := structTagIndices(val.Type()) + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if fieldIdx, have := attrFields[k]; have { + var err error + vals[k], err = toCtyValue(val.Field(fieldIdx), at, path) + if err != nil { + return cty.NilVal, err + } + } else { + vals[k] = cty.NullVal(at) + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes)) + + } +} + +func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + if val.Len() != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + case reflect.Struct: + fieldCount := val.Type().NumField() + if fieldCount != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Field(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes)) + + } +} + +func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(capsuleType), nil + } + + if val.Kind() != reflect.Ptr { + if !val.CanAddr() { + return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType) + } + + val = val.Addr() + } + + if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) { + return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType) + } + + return cty.CapsuleVal(capsuleType, val.Interface()), nil +} + +func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.DynamicPseudoType), nil + } + + switch val.Kind() { + + case reflect.Struct: + if !val.Type().AssignableTo(valueType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type()) + } + + return val.Interface().(cty.Value), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind()) + + } + +} + +func toCtyPassthrough(wrappedVal reflect.Value, wantTy cty.Type, path cty.Path) (cty.Value, error) { + if wrappedVal = toCtyUnwrapPointer(wrappedVal); !wrappedVal.IsValid() { + return cty.NullVal(wantTy), nil + } + + givenVal := wrappedVal.Interface().(cty.Value) + + val, err := convert.Convert(givenVal, wantTy) + if err != nil { + return cty.NilVal, path.NewErrorf("unsuitable value: %s", err) + } + return val, nil +} + +// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three +// possible outcomes: +// +// - Given value isn't a pointer, so it's just returned as-is. +// - Given value is a non-nil pointer, in which case it is dereferenced +// and the result returned. +// - Given value is a nil pointer, in which case an invalid value is returned. +// +// For nested pointer types, like **int, they are all dereferenced in turn +// until a non-pointer value is found, or until a nil pointer is encountered. +func toCtyUnwrapPointer(val reflect.Value) reflect.Value { + for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return reflect.Value{} + } + + val = val.Elem() + } + + return val +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/out.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/out.go new file mode 100644 index 00000000000..e9c2599e6c1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/out.go @@ -0,0 +1,686 @@ +package gocty + +import ( + "math" + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer, +// using a fixed set of conversion rules. +// +// This function considers its audience to be the creator of the cty Value +// given, and thus the error messages it generates are (unlike with ToCtyValue) +// presented in cty terminology that is generally appropriate to return to +// end-users in applications where cty data structures are built from +// user-provided configuration. In particular this means that if incorrect +// target types are provided by the calling application the resulting error +// messages are likely to be confusing, since we assume that the given target +// type is correct and the cty.Value is where the error lies. +// +// If an error is returned, the target data structure may have been partially +// populated, but the degree to which this is true is an implementation +// detail that the calling application should not rely on. +// +// The function will panic if given a non-pointer as the Go value target, +// since that is considered to be a bug in the calling program. +func FromCtyValue(val cty.Value, target interface{}) error { + tVal := reflect.ValueOf(target) + if tVal.Kind() != reflect.Ptr { + panic("target value is not a pointer") + } + if tVal.IsNil() { + panic("target value is nil pointer") + } + + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time fromCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given cty.Value is. + path := make(cty.Path, 0) + return fromCtyValue(val, tVal, path) +} + +func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error { + ty := val.Type() + + deepTarget := fromCtyPopulatePtr(target, false) + + // If we're decoding into a cty.Value then we just pass through the + // value as-is, to enable partial decoding. This is the only situation + // where unknown values are permitted. + if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) { + deepTarget.Set(reflect.ValueOf(val)) + return nil + } + + // Lists and maps can be nil without indirection, but everything else + // requires a pointer and we set it immediately to nil. + // We also make an exception for capsule types because we want to handle + // pointers specially for these. + // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while + // other types can assume no nulls after this point.) + if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() { + target = fromCtyPopulatePtr(target, true) + if target.Kind() != reflect.Ptr { + return path.NewErrorf("null value is not allowed") + } + + target.Set(reflect.Zero(target.Type())) + return nil + } + + target = deepTarget + + if !val.IsKnown() { + return path.NewErrorf("value must be known") + } + + switch ty { + case cty.Bool: + return fromCtyBool(val, target, path) + case cty.Number: + return fromCtyNumber(val, target, path) + case cty.String: + return fromCtyString(val, target, path) + } + + switch { + case ty.IsListType(): + return fromCtyList(val, target, path) + case ty.IsMapType(): + return fromCtyMap(val, target, path) + case ty.IsSetType(): + return fromCtySet(val, target, path) + case ty.IsObjectType(): + return fromCtyObject(val, target, path) + case ty.IsTupleType(): + return fromCtyTuple(val, target, path) + case ty.IsCapsuleType(): + return fromCtyCapsule(val, target, path) + } + + // We should never fall out here; reaching here indicates a bug in this + // function. + return path.NewErrorf("unsupported source type %#v", ty) +} + +func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Bool: + target.SetBool(val.True()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error { + bf := val.AsBigFloat() + + switch target.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fromCtyNumberInt(bf, target, path) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fromCtyNumberUInt(bf, target, path) + + case reflect.Float32, reflect.Float64: + return fromCtyNumberFloat(bf, target, path) + + case reflect.Struct: + return fromCtyNumberBig(bf, target, path) + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var min int64 + var max int64 + switch target.Type().Bits() { + case 8: + min = math.MinInt8 + max = math.MaxInt8 + case 16: + min = math.MinInt16 + max = math.MaxInt16 + case 32: + min = math.MinInt32 + max = math.MaxInt32 + case 64: + min = math.MinInt64 + max = math.MaxInt64 + default: + panic("weird number of bits in target int") + } + + iv, accuracy := bf.Int64() + if accuracy != big.Exact || iv < min || iv > max { + return path.NewErrorf("value must be a whole number, between %d and %d", min, max) + } + + target.SetInt(iv) + return nil +} + +func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var max uint64 + switch target.Type().Bits() { + case 8: + max = math.MaxUint8 + case 16: + max = math.MaxUint16 + case 32: + max = math.MaxUint32 + case 64: + max = math.MaxUint64 + default: + panic("weird number of bits in target uint") + } + + iv, accuracy := bf.Uint64() + if accuracy != big.Exact || iv > max { + return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max) + } + + target.SetUint(iv) + return nil +} + +func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.Float32, reflect.Float64: + fv, accuracy := bf.Float64() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(fv, 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64) + } + } + target.SetFloat(fv) + return nil + default: + panic("unsupported kind of float") + } +} + +func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error { + switch { + + case bigFloatType.ConvertibleTo(target.Type()): + // Easy! + target.Set(reflect.ValueOf(bf).Elem().Convert(target.Type())) + return nil + + case bigIntType.ConvertibleTo(target.Type()): + bi, accuracy := bf.Int(nil) + if accuracy != big.Exact { + return path.NewErrorf("value must be a whole number") + } + target.Set(reflect.ValueOf(bi).Elem().Convert(target.Type())) + return nil + + default: + return likelyRequiredTypesError(path, target) + } +} + +func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.String: + target.SetString(val.AsString()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a list of length %d", target.Len()) + } + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Map: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + tv := reflect.MakeMap(target.Type()) + et := target.Type().Elem() + + path = append(path, nil) + + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + ks := key.AsString() + + targetElem := reflect.New(et) + err = fromCtyValue(val, targetElem, path) + + tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem()) + + return err != nil + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a set of length %d", target.Len()) + } + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + return nil + + // TODO: decode into set.Set instance + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + attrTypes := val.Type().AttributeTypes() + targetFields := structTagIndices(target.Type()) + + path = append(path, nil) + + for k, i := range targetFields { + if _, exists := attrTypes[k]; !exists { + // If the field in question isn't able to represent nil, + // that's an error. + fk := target.Field(i).Kind() + switch fk { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // okay + default: + return path.NewErrorf("missing required attribute %q", k) + } + } + } + + for k := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + fieldIdx, exists := targetFields[k] + if !exists { + return path.NewErrorf("unsupported attribute %q", k) + } + + ev := val.GetAttr(k) + + targetField := target.Field(fieldIdx) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + elemTypes := val.Type().TupleElementTypes() + fieldCount := target.Type().NumField() + + if fieldCount != len(elemTypes) { + return path.NewErrorf("a tuple of %d elements is required", fieldCount) + } + + path = append(path, nil) + + for i := range elemTypes { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + ev := val.Index(cty.NumberIntVal(int64(i))) + + targetField := target.Field(i) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error { + + if target.Kind() == reflect.Ptr { + // Walk through indirection until we get to the last pointer, + // which we might set to null below. + target = fromCtyPopulatePtr(target, true) + + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + // Since a capsule contains a pointer to an object, we'll preserve + // that pointer on the way out and thus allow the caller to recover + // the original object, rather than a copy of it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Elem().Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + target.Set(reflect.ValueOf(val.EncapsulatedValue())) + + return nil + } else { + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + // If our target isn't a pointer then we will attempt to copy + // the encapsulated value into it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + // We know that EncapsulatedValue is always a pointer, so we + // can safely call .Elem on its reflect.Value. + target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem()) + + return nil + } + +} + +// fromCtyPopulatePtr recognizes when target is a pointer type and allocates +// a value to assign to that pointer, which it returns. +// +// If the given value has multiple levels of indirection, like **int, these +// will be processed in turn so that the return value is guaranteed to be +// a non-pointer. +// +// As an exception, if decodingNull is true then the returned value will be +// the final level of pointer, if any, so that the caller can assign it +// as nil to represent a null value. If the given target value is not a pointer +// at all then the returned value will be just the given target, so the caller +// must test if the returned value is a pointer before trying to assign nil +// to it. +func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value { + for { + if target.Kind() == reflect.Interface && !target.IsNil() { + e := target.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + target = e + } + } + + if target.Kind() != reflect.Ptr { + break + } + + // Stop early if we're decodingNull and we've found our last indirection + if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() { + break + } + + if target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + + target = target.Elem() + } + return target +} + +// likelyRequiredTypesError returns an error that states which types are +// acceptable by making some assumptions about what types we support for +// each target Go kind. It's not a precise science but it allows us to return +// an error message that is cty-user-oriented rather than Go-oriented. +// +// Generally these error messages should be a matter of last resort, since +// the calling application should be validating user-provided value types +// before decoding anyway. +func likelyRequiredTypesError(path cty.Path, target reflect.Value) error { + switch target.Kind() { + + case reflect.Bool: + return path.NewErrorf("bool value is required") + + case reflect.String: + return path.NewErrorf("string value is required") + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fallthrough + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fallthrough + case reflect.Float32, reflect.Float64: + return path.NewErrorf("number value is required") + + case reflect.Slice, reflect.Array: + return path.NewErrorf("list or set value is required") + + case reflect.Map: + return path.NewErrorf("map or object value is required") + + case reflect.Struct: + switch { + + case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType): + return path.NewErrorf("number value is required") + + case target.Type().AssignableTo(setType): + return path.NewErrorf("set or list value is required") + + default: + return path.NewErrorf("object or tuple value is required") + + } + + default: + // We should avoid getting into this path, since this error + // message is rather useless. + return path.NewErrorf("incorrect type") + + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go new file mode 100644 index 00000000000..ce4c8f1e9f0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go @@ -0,0 +1,108 @@ +package gocty + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/helper.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/helper.go new file mode 100644 index 00000000000..1b88e9fa082 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json.go new file mode 100644 index 00000000000..c421a62ed94 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/doc.go new file mode 100644 index 00000000000..8916513d673 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/doc.go @@ -0,0 +1,11 @@ +// Package json provides functions for serializing cty types and values in +// JSON format, and for decoding them again. +// +// Since the cty type system is a superset of the JSON type system, +// round-tripping through JSON is lossy unless type information is provided +// both at encoding time and decoding time. Callers of this package are +// therefore suggested to define their expected structure as a cty.Type +// and pass it in consistently both when encoding and when decoding, though +// default (type-lossy) behavior is provided for situations where the precise +// representation of the data is not significant. +package json diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/marshal.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/marshal.go new file mode 100644 index 00000000000..f7bea1a2ff6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/marshal.go @@ -0,0 +1,189 @@ +package json + +import ( + "bytes" + "encoding/json" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, b) + } + + if val.IsNull() { + b.WriteString("null") + return nil + } + + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + json, err := json.Marshal(val.AsString()) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } + b.Write(json) + return nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) { + return path.NewErrorf("cannot serialize infinity as JSON") + } + b.WriteString(val.AsBigFloat().Text('f', -1)) + return nil + case cty.Bool: + if val.True() { + b.WriteString("true") + } else { + b.WriteString("false") + } + return nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(): + b.WriteRune('[') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune(']') + return nil + case t.IsMapType(): + b.WriteRune('{') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune('}') + return nil + case t.IsTupleType(): + b.WriteRune('[') + etys := t.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + for it.Next() { + if i > 0 { + b.WriteRune(',') + } + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + i++ + } + b.WriteRune(']') + return nil + case t.IsObjectType(): + b.WriteRune('{') + atys := t.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + for i, k := range names { + aty := atys[k] + if i > 0 { + b.WriteRune(',') + } + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(av, aty, path, b) + if err != nil { + return err + } + } + b.WriteRune('}') + return nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + jsonVal, err := json.Marshal(rawVal) + if err != nil { + return path.NewError(err) + } + b.Write(jsonVal) + return nil + default: + // should never happen + return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { + typeJSON, err := MarshalType(val.Type()) + if err != nil { + return path.NewErrorf("failed to serialize type: %s", err) + } + b.WriteString(`{"value":`) + marshal(val, val.Type(), path, b) + b.WriteString(`,"type":`) + b.Write(typeJSON) + b.WriteRune('}') + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/simple.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/simple.go new file mode 100644 index 00000000000..507c9cc2c60 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/simple.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// SimpleJSONValue is a wrapper around cty.Value that adds implementations of +// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic +// encoding and decoding of values. +// +// The couplet Marshal and Unmarshal both take extra type information to +// inform the encoding and decoding process so that all of the cty types +// can be represented even though JSON's type system is a subset. +// +// SimpleJSONValue instead takes the approach of discarding the value's type +// information and then deriving a new type from the stored structure when +// decoding. This results in the same data being returned but not necessarily +// with exactly the same type. +// +// For information on how types are inferred when decoding, see the +// documentation of the function ImpliedType. +type SimpleJSONValue struct { + cty.Value +} + +// MarshalJSON is an implementation of json.Marshaler. See the documentation +// of SimpleJSONValue for more information. +func (v SimpleJSONValue) MarshalJSON() ([]byte, error) { + return Marshal(v.Value, v.Type()) +} + +// UnmarshalJSON is an implementation of json.Unmarshaler. See the +// documentation of SimpleJSONValue for more information. +func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error { + t, err := ImpliedType(buf) + if err != nil { + return err + } + v.Value, err = Unmarshal(buf, t) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/type.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/type.go new file mode 100644 index 00000000000..9131c6c7743 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/type.go @@ -0,0 +1,23 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// MarshalType returns a JSON serialization of the given type. +// +// This is just a thin wrapper around t.MarshalJSON, for symmetry with +// UnmarshalType. +func MarshalType(t cty.Type) ([]byte, error) { + return t.MarshalJSON() +} + +// UnmarshalType decodes a JSON serialization of the given type as produced +// by either Type.MarshalJSON or MarshalType. +// +// This is a convenience wrapper around Type.UnmarshalJSON. +func UnmarshalType(buf []byte) (cty.Type, error) { + var t cty.Type + err := t.UnmarshalJSON(buf) + return t, err +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go new file mode 100644 index 00000000000..0fa13f6c53c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go @@ -0,0 +1,170 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// JSON-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary JSON without explicit cty Type +// information. +// +// The rules are as follows: +// +// JSON strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// JSON objects map to cty object types, with the attributes defined by the +// object keys and the types of their values. +// +// JSON arrays map to cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any JSON syntax errors will be returned as an error, and the type will +// be the invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + if dec.More() { + return cty.NilType, fmt.Errorf("extraneous data after JSON object") + } + + return ty, nil +} + +func impliedType(dec *json.Decoder) (cty.Type, error) { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + return impliedTypeForTok(tok, dec) +} + +func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) { + if tok == nil { + return cty.DynamicPseudoType, nil + } + + switch ttok := tok.(type) { + case bool: + return cty.Bool, nil + + case json.Number: + return cty.Number, nil + + case string: + return cty.String, nil + + case json.Delim: + + switch rune(ttok) { + case '{': + return impliedObjectType(dec) + case '[': + return impliedTupleType(dec) + default: + return cty.NilType, fmt.Errorf("unexpected token %q", ttok) + } + + default: + return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok) + } +} + +func impliedObjectType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first object key. + + var atys map[string]cty.Type + + for { + // Read the object key first + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != '}' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + key, ok := tok.(string) + if !ok { + return cty.NilType, fmt.Errorf("expected string but found %T", tok) + } + + // Now read the value + tok, err = dec.Token() + if err != nil { + return cty.NilType, err + } + + aty, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[key] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the [ delimiter + // and so our next token should be the first value. + + var etys []cty.Type + + for { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) == ']' { + break + } + } + + ety, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + etys = append(etys, ety) + } + + if len(etys) == 0 { + return cty.EmptyTuple, nil + } + + return cty.Tuple(etys), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go new file mode 100644 index 00000000000..38106455fed --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go @@ -0,0 +1,459 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return cty.NilVal, path.NewError(err) + } + + if tok == nil { + return cty.NullVal(t), nil + } + + if t == cty.DynamicPseudoType { + return unmarshalDynamic(buf, path) + } + + switch { + case t.IsPrimitiveType(): + val, err := unmarshalPrimitive(tok, t, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case t.IsListType(): + return unmarshalList(buf, t.ElementType(), path) + case t.IsSetType(): + return unmarshalSet(buf, t.ElementType(), path) + case t.IsMapType(): + return unmarshalMap(buf, t.ElementType(), path) + case t.IsTupleType(): + return unmarshalTuple(buf, t.TupleElementTypes(), path) + case t.IsObjectType(): + return unmarshalObject(buf, t.AttributeTypes(), path) + case t.IsCapsuleType(): + return unmarshalCapsule(buf, t, path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName()) + } +} + +func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) { + + switch t { + case cty.Bool: + switch v := tok.(type) { + case bool: + return cty.BoolVal(v), nil + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("bool is required") + } + case cty.Number: + if v, ok := tok.(json.Number); ok { + tok = string(v) + } + switch v := tok.(type) { + case string: + val, err := cty.ParseNumberVal(v) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("number is required") + } + case cty.String: + switch v := tok.(type) { + case string: + return cty.StringVal(v), nil + case json.Number: + return cty.StringVal(string(v)), nil + case bool: + val, err := convert.Convert(cty.BoolVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("string is required") + } + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int64 + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(idx), + } + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read list value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(ety), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read set value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(cty.String), + } + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map key: %s", err) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int + + for dec.More() { + if idx >= len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys)) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(idx)), + } + ety := etys[idx] + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) != len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys)) + } + + if len(vals) == 0 { + return cty.EmptyTupleVal, nil + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + objPath := path // some errors report from the object's perspective + path := append(path, nil) // path to a specific attribute + + for dec.More() { + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object key: %s", err) + } + + aty, ok := atys[k] + if !ok { + return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k) + } + + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object value: %s", err) + } + + el, err := unmarshal(rawVal, aty, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + // Make sure we have a value for every attribute + for k, aty := range atys { + if _, exists := vals[k]; !exists { + vals[k] = cty.NullVal(aty) + } + } + + if len(vals) == 0 { + return cty.EmptyObjectVal, nil + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + rawType := t.EncapsulatedType() + ptrPtr := reflect.New(reflect.PtrTo(rawType)) + ptrPtr.Elem().Set(reflect.New(rawType)) + ptr := ptrPtr.Elem().Interface() + err := json.Unmarshal(buf, ptr) + if err != nil { + return cty.NilVal, path.NewError(err) + } + + return cty.CapsuleVal(t, ptr), nil +} + +func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + var t cty.Type + var valBody []byte // defer actual decoding until we know the type + + for dec.More() { + var err error + + key, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err) + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err) + } + + switch key { + case "type": + err := json.Unmarshal(rawVal, &t) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err) + } + case "value": + valBody = rawVal + default: + return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key) + } + + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if t == cty.NilType { + return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value") + } + + val, err := Unmarshal([]byte(valBody), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil +} + +func requireDelim(dec *json.Decoder, d rune) error { + tok, err := dec.Token() + if err != nil { + return err + } + + if tok != json.Delim(d) { + return fmt.Errorf("missing expected %c", d) + } + + return nil +} + +func requireObjectKey(dec *json.Decoder) (string, error) { + tok, err := dec.Token() + if err != nil { + return "", err + } + if s, ok := tok.(string); ok { + return s, nil + } + return "", fmt.Errorf("missing expected object key") +} + +func readRawValue(dec *json.Decoder) ([]byte, error) { + var rawVal json.RawMessage + err := dec.Decode(&rawVal) + if err != nil { + return nil, err + } + return []byte(rawVal), nil +} + +func bufDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/value.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/value.go new file mode 100644 index 00000000000..f2f7dd56c77 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/json/value.go @@ -0,0 +1,65 @@ +package json + +import ( + "bytes" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Marshal produces a JSON representation of the given value that can later +// be decoded into a value of the given type. +// +// A type is specified separately to allow for the given type to include +// cty.DynamicPseudoType to represent situations where any type is permitted +// and so type information must be included to allow recovery of the stored +// structure when decoding. +// +// The given type will also be used to attempt automatic conversions of any +// non-conformant types in the given value, although this will not always +// be possible. If the value cannot be made to be conformant then an error is +// returned, which may be a cty.PathError. +// +// Capsule-typed values can be marshalled, but with some caveats. Since +// capsule values are compared by pointer equality, it is impossible to recover +// a value that will compare equal to the original value. Additionally, +// it's not possible to JSON-serialize the capsule type itself, so it's not +// valid to use capsule types within parts of the value that are conformed to +// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as +// the encapsulated type itself is serializable with the Marshal function +// in encoding/json. +func Marshal(val cty.Value, t cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(t) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, t) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + buf := &bytes.Buffer{} + var path cty.Path + err := marshal(val, t, path, buf) + + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Unmarshal decodes a JSON representation of the given value into a cty Value +// conforming to the given type. +// +// While decoding, type conversions will be done where possible to make +// the result conformant even if the types given in JSON are not exactly +// correct. If conversion isn't possible then an error is returned, which +// may be a cty.PathError. +func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) { + var path cty.Path + return unmarshal(buf, t, path) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/list_type.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/list_type.go new file mode 100644 index 00000000000..2ef02a12f3e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/list_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "list of " + elemName +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/map_type.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/map_type.go new file mode 100644 index 00000000000..82d36c6282a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/map_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "map of " + elemName +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go new file mode 100644 index 00000000000..1eb99f28a3f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go @@ -0,0 +1,14 @@ +// Package msgpack provides functions for serializing cty values in the +// msgpack encoding, and decoding them again. +// +// If the same type information is provided both at encoding and decoding time +// then values can be round-tripped without loss, except for capsule types +// which are not currently supported. +// +// If any unknown values are passed to Marshal then they will be represented +// using a msgpack extension with type code zero, which is understood by +// the Unmarshal function within this package but will not be understood by +// a generic (non-cty-aware) msgpack decoder. Ensure that no unknown values +// are used if interoperability with other msgpack implementations is +// required. +package msgpack diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go new file mode 100644 index 00000000000..1b631d0a177 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go @@ -0,0 +1,31 @@ +package msgpack + +import ( + "bytes" + + "github.com/vmihailenco/msgpack" + "github.com/zclconf/go-cty/cty" +) + +type dynamicVal struct { + Value cty.Value + Path cty.Path +} + +func (dv *dynamicVal) MarshalMsgpack() ([]byte, error) { + // Rather than defining a msgpack-specific serialization of types, + // instead we use the existing JSON serialization. + typeJSON, err := dv.Value.Type().MarshalJSON() + if err != nil { + return nil, dv.Path.NewErrorf("failed to serialize type: %s", err) + } + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + enc.EncodeArrayLen(2) + enc.EncodeBytes(typeJSON) + err = marshal(dv.Value, dv.Value.Type(), dv.Path, enc) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go new file mode 100644 index 00000000000..6db0815e443 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go @@ -0,0 +1,8 @@ +package msgpack + +import ( + "math" +) + +var negativeInfinity = math.Inf(-1) +var positiveInfinity = math.Inf(1) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go new file mode 100644 index 00000000000..87b096ca4a2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go @@ -0,0 +1,207 @@ +package msgpack + +import ( + "bytes" + "math/big" + "sort" + + "github.com/vmihailenco/msgpack" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Marshal produces a msgpack serialization of the given value that +// can be decoded into the given type later using Unmarshal. +// +// The given value must conform to the given type, or an error will +// be returned. +func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(ty) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, ty) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + var path cty.Path + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + + err := marshal(val, ty, path, enc) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, enc) + } + + if !val.IsKnown() { + err := enc.Encode(unknownVal) + if err != nil { + return path.NewError(err) + } + return nil + } + if val.IsNull() { + err := enc.EncodeNil() + if err != nil { + return path.NewError(err) + } + return nil + } + + // The caller should've guaranteed that the given val is conformant with + // the given type ty, so we'll proceed under that assumption here. + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + err := enc.EncodeString(val.AsString()) + if err != nil { + return path.NewError(err) + } + return nil + case cty.Number: + var err error + switch { + case val.RawEquals(cty.PositiveInfinity): + err = enc.EncodeFloat64(positiveInfinity) + case val.RawEquals(cty.NegativeInfinity): + err = enc.EncodeFloat64(negativeInfinity) + default: + bf := val.AsBigFloat() + if iv, acc := bf.Int64(); acc == big.Exact { + err = enc.EncodeInt(iv) + } else if fv, acc := bf.Float64(); acc == big.Exact { + err = enc.EncodeFloat64(fv) + } else { + err = enc.EncodeString(bf.Text('f', -1)) + } + } + if err != nil { + return path.NewError(err) + } + return nil + case cty.Bool: + err := enc.EncodeBool(val.True()) + if err != nil { + return path.NewError(err) + } + return nil + default: + panic("unsupported primitive type") + } + case ty.IsListType(), ty.IsSetType(): + enc.EncodeArrayLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsMapType(): + enc.EncodeMapLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, enc) + if err != nil { + return err + } + err = marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + enc.EncodeArrayLen(len(etys)) + for it.Next() { + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + i++ + } + return nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + enc.EncodeMapLen(len(names)) + + for _, k := range names { + aty := atys[k] + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, enc) + if err != nil { + return err + } + err = marshal(av, aty, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsCapsuleType(): + return path.NewErrorf("capsule types not supported for msgpack encoding") + default: + // should never happen + return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { + dv := dynamicVal{ + Value: val, + Path: path, + } + return enc.Encode(&dv) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go new file mode 100644 index 00000000000..6f6022e4d5b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "bytes" + "fmt" + "io" + + "github.com/vmihailenco/msgpack" + msgpackcodes "github.com/vmihailenco/msgpack/codes" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// msgpack-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary msgpack without explicit cty Type +// information. +// +// The rules are as follows: +// +// msgpack strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// msgpack maps become cty object types, with the attributes defined by the +// map keys and the types of their values. +// +// msgpack arrays become cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any unknown values are similarly typed as DynamicPseudoType, because these +// do not carry type information on the wire. +// +// Any parse errors will be returned as an error, and the type will be the +// invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := msgpack.NewDecoder(r) + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + // We must now be at the end of the buffer + err = dec.Skip() + if err != io.EOF { + return ty, fmt.Errorf("extra bytes after msgpack value") + } + + return ty, nil +} + +func impliedType(dec *msgpack.Decoder) (cty.Type, error) { + // If this function returns with a nil error then it must have already + // consumed the next value from the decoder, since when called recursively + // the caller will be expecting to find a following value here. + + code, err := dec.PeekCode() + if err != nil { + return cty.NilType, err + } + + switch { + + case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): + err := dec.Skip() + return cty.DynamicPseudoType, err + + case code == msgpackcodes.True || code == msgpackcodes.False: + _, err := dec.DecodeBool() + return cty.Bool, err + + case msgpackcodes.IsFixedNum(code): + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: + _, err := dec.DecodeUint64() + return cty.Number, err + + case code == msgpackcodes.Float || code == msgpackcodes.Double: + _, err := dec.DecodeFloat64() + return cty.Number, err + + case msgpackcodes.IsString(code): + _, err := dec.DecodeString() + return cty.String, err + + case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: + return impliedObjectType(dec) + + case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: + return impliedTupleType(dec) + + default: + return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) + } +} + +func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of map. + l, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + var atys map[string]cty.Type + + for i := 0; i < l; i++ { + // Read the map key first. We require maps to be strings, but msgpack + // doesn't so we're prepared to error here if not. + k, err := dec.DecodeString() + if err != nil { + return cty.DynamicPseudoType, err + } + + aty, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[k] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of array. + l, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + if l == 0 { + return cty.EmptyTuple, nil + } + + etys := make([]cty.Type, l) + + for i := 0; i < l; i++ { + ety, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + etys[i] = ety + } + + return cty.Tuple(etys), nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go new file mode 100644 index 00000000000..6507bc4be0e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go @@ -0,0 +1,16 @@ +package msgpack + +type unknownType struct{} + +var unknownVal = unknownType{} + +// unknownValBytes is the raw bytes of the msgpack fixext1 value we +// write to represent an unknown value. It's an extension value of +// type zero whose value is irrelevant. Since it's irrelevant, we +// set it to a single byte whose value is also zero, since that's +// the most compact possible representation. +var unknownValBytes = []byte{0xd4, 0, 0} + +func (uv unknownType) MarshalMsgpack() ([]byte, error) { + return unknownValBytes, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go new file mode 100644 index 00000000000..51bb76a8a78 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go @@ -0,0 +1,334 @@ +package msgpack + +import ( + "bytes" + + "github.com/vmihailenco/msgpack" + msgpackCodes "github.com/vmihailenco/msgpack/codes" + "github.com/zclconf/go-cty/cty" +) + +// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of +// the given type, returning the result. +// +// If an error is returned, the error is written with a hypothetical +// end-user that wrote the msgpack file as its audience, using cty type +// system concepts rather than Go type system concepts. +func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { + r := bytes.NewReader(b) + dec := msgpack.NewDecoder(r) + + var path cty.Path + return unmarshal(dec, ty, path) +} + +func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + if msgpackCodes.IsExt(peek) { + // We just assume _all_ extensions are unknown values, + // since we don't have any other extensions. + dec.Skip() // skip what we've peeked + return cty.UnknownVal(ty), nil + } + if ty == cty.DynamicPseudoType { + return unmarshalDynamic(dec, path) + } + if peek == msgpackCodes.Nil { + dec.Skip() // skip what we've peeked + return cty.NullVal(ty), nil + } + + switch { + case ty.IsPrimitiveType(): + val, err := unmarshalPrimitive(dec, ty, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case ty.IsListType(): + return unmarshalList(dec, ty.ElementType(), path) + case ty.IsSetType(): + return unmarshalSet(dec, ty.ElementType(), path) + case ty.IsMapType(): + return unmarshalMap(dec, ty.ElementType(), path) + case ty.IsTupleType(): + return unmarshalTuple(dec, ty.TupleElementTypes(), path) + case ty.IsObjectType(): + return unmarshalObject(dec, ty.AttributeTypes(), path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) + } +} + +func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + switch ty { + case cty.Bool: + rv, err := dec.DecodeBool() + if err != nil { + return cty.DynamicVal, path.NewErrorf("bool is required") + } + return cty.BoolVal(rv), nil + case cty.Number: + // Marshal will try int and float first, if the value can be + // losslessly represented in these encodings, and then fall + // back on a string if the number is too large or too precise. + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + + if msgpackCodes.IsFixedNum(peek) { + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + } + + switch peek { + case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: + rv, err := dec.DecodeUint64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberUIntVal(rv), nil + case msgpackCodes.Float, msgpackCodes.Double: + rv, err := dec.DecodeFloat64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberFloatVal(rv), nil + default: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + v, err := cty.ParseNumberVal(rv) + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return v, nil + } + case cty.String: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("string is required") + } + return cty.StringVal(rv), nil + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a list is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.List(ety)), nil + case length == 0: + return cty.ListValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a set is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Set(ety)), nil + case length == 0: + return cty.SetValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a map is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Map(ety)), nil + case length == 0: + return cty.MapValEmpty(ety), nil + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + path[:len(path)-1].NewErrorf("non-string key in map") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a tuple is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Tuple(etys)), nil + case length == 0: + return cty.TupleVal(nil), nil + case length != len(etys): + return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + ety := etys[i] + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("an object is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Object(atys)), nil + case length == 0: + return cty.ObjectVal(nil), nil + case length != len(atys): + return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)", + len(atys), length) + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + aty, exists := atys[key] + if !exists { + return cty.DynamicVal, path.NewErrorf("unsupported attribute") + } + + val, err := unmarshal(dec, aty, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + switch { + case length == -1: + return cty.NullVal(cty.DynamicPseudoType), nil + case length != 2: + return cty.DynamicVal, path.NewErrorf( + "dynamic value array must have exactly two elements", + ) + } + + typeJSON, err := dec.DecodeBytes() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + var ty cty.Type + err = (&ty).UnmarshalJSON(typeJSON) + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + return unmarshal(dec, ty, path) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/null.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/null.go new file mode 100644 index 00000000000..d58d0287b64 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/object_type.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/object_type.go new file mode 100644 index 00000000000..187d38751b8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/object_type.go @@ -0,0 +1,135 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + attrTypesNorm := make(map[string]Type, len(attrTypes)) + for k, v := range attrTypes { + attrTypesNorm[NormalizeString(k)] = v + } + + return Type{ + typeObject{ + AttrTypes: attrTypesNorm, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/path.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/path.go new file mode 100644 index 00000000000..b31444954d3 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/path.go @@ -0,0 +1,250 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +// +// Although a Path is technically mutable, by convention callers should not +// mutate a path once it has been built and passed to some other subsystem. +// Instead, use Copy and then mutate the copy before using it. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// IndexPath is a convenience method to start a new Path with an IndexStep. +func IndexPath(v Value) Path { + return Path{}.Index(v) +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Equals compares 2 Paths for exact equality. +func (p Path) Equals(other Path) bool { + if len(p) != len(other) { + return false + } + + for i := range p { + pv := p[i] + switch pv := pv.(type) { + case GetAttrStep: + ov, ok := other[i].(GetAttrStep) + if !ok || pv != ov { + return false + } + case IndexStep: + ov, ok := other[i].(IndexStep) + if !ok { + return false + } + + if !pv.Key.RawEquals(ov.Key) { + return false + } + default: + // Any invalid steps default to evaluating false. + return false + } + } + + return true + +} + +// HasPrefix determines if the path p contains the provided prefix. +func (p Path) HasPrefix(prefix Path) bool { + if len(prefix) > len(p) { + return false + } + + return p[:len(prefix)].Equals(prefix) +} + +// GetAttrPath is a convenience method to start a new Path with a GetAttrStep. +func GetAttrPath(name string) Path { + return Path{}.GetAttr(name) +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot index a null value") + } + + switch s.Key.Type() { + case Number: + if !(val.Type().IsListType() || val.Type().IsTupleType()) { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot access attributes on a null value") + } + + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/path_set.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/path_set.go new file mode 100644 index 00000000000..f1c892b9d9d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/path_set.go @@ -0,0 +1,198 @@ +package cty + +import ( + "fmt" + "hash/crc64" + + "github.com/zclconf/go-cty/cty/set" +) + +// PathSet represents a set of Path objects. This can be used, for example, +// to talk about a subset of paths within a value that meet some criteria, +// without directly modifying the values at those paths. +type PathSet struct { + set set.Set +} + +// NewPathSet creates and returns a PathSet, with initial contents optionally +// set by the given arguments. +func NewPathSet(paths ...Path) PathSet { + ret := PathSet{ + set: set.NewSet(pathSetRules{}), + } + + for _, path := range paths { + ret.Add(path) + } + + return ret +} + +// Add inserts a single given path into the set. +// +// Paths are immutable after construction by convention. It is particularly +// important not to mutate a path after it has been placed into a PathSet. +// If a Path is mutated while in a set, behavior is undefined. +func (s PathSet) Add(path Path) { + s.set.Add(path) +} + +// AddAllSteps is like Add but it also adds all of the steps leading to +// the given path. +// +// For example, if given a path representing "foo.bar", it will add both +// "foo" and "bar". +func (s PathSet) AddAllSteps(path Path) { + for i := 1; i <= len(path); i++ { + s.Add(path[:i]) + } +} + +// Has returns true if the given path is in the receiving set. +func (s PathSet) Has(path Path) bool { + return s.set.Has(path) +} + +// List makes and returns a slice of all of the paths in the receiving set, +// in an undefined but consistent order. +func (s PathSet) List() []Path { + if s.Empty() { + return nil + } + ret := make([]Path, 0, s.set.Length()) + for it := s.set.Iterator(); it.Next(); { + ret = append(ret, it.Value().(Path)) + } + return ret +} + +// Remove modifies the receving set to no longer include the given path. +// If the given path was already absent, this is a no-op. +func (s PathSet) Remove(path Path) { + s.set.Remove(path) +} + +// Empty returns true if the length of the receiving set is zero. +func (s PathSet) Empty() bool { + return s.set.Length() == 0 +} + +// Union returns a new set whose contents are the union of the receiver and +// the given other set. +func (s PathSet) Union(other PathSet) PathSet { + return PathSet{ + set: s.set.Union(other.set), + } +} + +// Intersection returns a new set whose contents are the intersection of the +// receiver and the given other set. +func (s PathSet) Intersection(other PathSet) PathSet { + return PathSet{ + set: s.set.Intersection(other.set), + } +} + +// Subtract returns a new set whose contents are those from the receiver with +// any elements of the other given set subtracted. +func (s PathSet) Subtract(other PathSet) PathSet { + return PathSet{ + set: s.set.Subtract(other.set), + } +} + +// SymmetricDifference returns a new set whose contents are the symmetric +// difference of the receiver and the given other set. +func (s PathSet) SymmetricDifference(other PathSet) PathSet { + return PathSet{ + set: s.set.SymmetricDifference(other.set), + } +} + +// Equal returns true if and only if both the receiver and the given other +// set contain exactly the same paths. +func (s PathSet) Equal(other PathSet) bool { + if s.set.Length() != other.set.Length() { + return false + } + // Now we know the lengths are the same we only need to test in one + // direction whether everything in one is in the other. + for it := s.set.Iterator(); it.Next(); { + if !other.set.Has(it.Value()) { + return false + } + } + return true +} + +var crc64Table = crc64.MakeTable(crc64.ISO) + +var indexStepPlaceholder = []byte("#") + +// pathSetRules is an implementation of set.Rules from the set package, +// used internally within PathSet. +type pathSetRules struct { +} + +func (r pathSetRules) Hash(v interface{}) int { + path := v.(Path) + hash := crc64.New(crc64Table) + + for _, rawStep := range path { + switch step := rawStep.(type) { + case GetAttrStep: + // (this creates some garbage converting the string name to a + // []byte, but that's okay since cty is not designed to be + // used in tight loops under memory pressure.) + hash.Write([]byte(step.Name)) + default: + // For any other step type we just append a predefined value, + // which means that e.g. all indexes into a given collection will + // hash to the same value but we assume that collections are + // small and thus this won't hurt too much. + hash.Write(indexStepPlaceholder) + } + } + + // We discard half of the hash on 32-bit platforms; collisions just make + // our lookups take marginally longer, so not a big deal. + return int(hash.Sum64()) +} + +func (r pathSetRules) Equivalent(a, b interface{}) bool { + aPath := a.(Path) + bPath := b.(Path) + + if len(aPath) != len(bPath) { + return false + } + + for i := range aPath { + switch aStep := aPath[i].(type) { + case GetAttrStep: + bStep, ok := bPath[i].(GetAttrStep) + if !ok { + return false + } + + if aStep.Name != bStep.Name { + return false + } + case IndexStep: + bStep, ok := bPath[i].(IndexStep) + if !ok { + return false + } + + eq := aStep.Key.Equals(bStep.Key) + if !eq.IsKnown() || eq.False() { + return false + } + default: + // Should never happen, since we document PathStep as a closed type. + panic(fmt.Errorf("unsupported step type %T", aStep)) + } + } + + return true +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/primitive_type.go new file mode 100644 index 00000000000..7b3d1196cd0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/gob.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/gob.go new file mode 100644 index 00000000000..da2978f655d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/gob.go @@ -0,0 +1,76 @@ +package set + +import ( + "bytes" + "encoding/gob" + "fmt" +) + +// GobEncode is an implementation of the interface gob.GobEncoder, allowing +// sets to be included in structures encoded via gob. +// +// The set rules are included in the serialized value, so the caller must +// register its concrete rules type with gob.Register before using a +// set in a gob, and possibly also implement GobEncode/GobDecode to customize +// how any parameters are persisted. +// +// The set elements are also included, so if they are of non-primitive types +// they too must be registered with gob. +// +// If the produced gob values will persist for a long time, the caller must +// ensure compatibility of the rules implementation. In particular, if the +// definition of element equivalence changes between encoding and decoding +// then two distinct stored elements may be considered equivalent on decoding, +// causing the recovered set to have fewer elements than when it was stored. +func (s Set) GobEncode() ([]byte, error) { + gs := gobSet{ + Version: 0, + Rules: s.rules, + Values: s.Values(), + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + err := enc.Encode(gs) + if err != nil { + return nil, fmt.Errorf("error encoding set.Set: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is the opposite of GobEncode. See GobEncode for information +// on the requirements for and caveats of including set values in gobs. +func (s *Set) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gs gobSet + err := dec.Decode(&gs) + if err != nil { + return fmt.Errorf("error decoding set.Set: %s", err) + } + if gs.Version != 0 { + return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version) + } + + victim := NewSetFromSlice(gs.Rules, gs.Values) + s.vals = victim.vals + s.rules = victim.rules + return nil +} + +type gobSet struct { + Version int + Rules Rules + + // The bucket-based representation is for efficient in-memory access, but + // for serialization it's enough to just retain the values themselves, + // which we can re-bucket using the rules (which may have changed!) when + // we re-inflate. + Values []interface{} +} + +func init() { + gob.Register([]interface{}(nil)) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/iterator.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/iterator.go new file mode 100644 index 00000000000..4a60494f9d6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/iterator.go @@ -0,0 +1,15 @@ +package set + +type Iterator struct { + vals []interface{} + idx int +} + +func (it *Iterator) Value() interface{} { + return it.vals[it.idx] +} + +func (it *Iterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/ops.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/ops.go new file mode 100644 index 00000000000..fd1555f2189 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/ops.go @@ -0,0 +1,210 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s Set) Copy() Set { + ret := NewSet(s.rules) + for k, v := range s.vals { + ret.vals[k] = v + } + return ret +} + +// Iterator returns an iterator over values in the set. If the set's rules +// implement OrderedRules then the result is ordered per those rules. If +// no order is provided, or if it is not a total order, then the iteration +// order is undefined but consistent for a particular version of cty. Do not +// rely on specific ordering between cty releases unless the rules order is a +// total order. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + vals := s.Values() + + return &Iterator{ + vals: vals, + idx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all the values in the set. If the set rules have +// an order then the result is in that order. If no order is provided or if +// it is not a total order then the result order is undefined, but consistent +// for a particular set value within a specific release of cty. +func (s Set) Values() []interface{} { + var ret []interface{} + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIDs := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIDs = append(bucketIDs, id) + } + sort.Ints(bucketIDs) + + for _, bucketID := range bucketIDs { + ret = append(ret, s.vals[bucketID]...) + } + + if orderRules, ok := s.rules.(OrderedRules); ok { + sort.SliceStable(ret, func(i, j int) bool { + return orderRules.Less(ret[i], ret[j]) + }) + } + + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/rules.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/rules.go new file mode 100644 index 00000000000..51f744b5e9a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/rules.go @@ -0,0 +1,43 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} + +// OrderedRules is an extension of Rules that can apply a partial order to +// element values. When a set's Rules implements OrderedRules an iterator +// over the set will return items in the order described by the rules. +// +// If the given order is not a total order (that is, some pairs of non-equivalent +// elements do not have a defined order) then the resulting iteration order +// is undefined but consistent for a particular version of cty. The exact +// order in that case is not part of the contract and is subject to change +// between versions. +type OrderedRules interface { + Rules + + // Less returns true if and only if the first argument should sort before + // the second argument. If the second argument should sort before the first + // or if there is no defined order for the values, return false. + Less(interface{}, interface{}) bool +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/set.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/set.go new file mode 100644 index 00000000000..b4fb316f1cc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set_helper.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set_helper.go new file mode 100644 index 00000000000..a88ddaffb76 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set_helper.go @@ -0,0 +1,126 @@ +package cty + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/set" +) + +// ValueSet is to cty.Set what []cty.Value is to cty.List and +// map[string]cty.Value is to cty.Map. It's provided to allow callers a +// convenient interface for manipulating sets before wrapping them in cty.Set +// values using cty.SetValFromValueSet. +// +// Unlike value slices and value maps, ValueSet instances have a single +// homogenous element type because that is a requirement of the underlying +// set implementation, which uses the element type to select a suitable +// hashing function. +// +// Set mutations are not concurrency-safe. +type ValueSet struct { + // ValueSet is just a thin wrapper around a set.Set with our value-oriented + // "rules" applied. We do this so that the caller can work in terms of + // cty.Value objects even though the set internals use the raw values. + s set.Set +} + +// NewValueSet creates and returns a new ValueSet with the given element type. +func NewValueSet(ety Type) ValueSet { + return newValueSet(set.NewSet(setRules{Type: ety})) +} + +func newValueSet(s set.Set) ValueSet { + return ValueSet{ + s: s, + } +} + +// ElementType returns the element type for the receiving ValueSet. +func (s ValueSet) ElementType() Type { + return s.s.Rules().(setRules).Type +} + +// Add inserts the given value into the receiving set. +func (s ValueSet) Add(v Value) { + s.requireElementType(v) + s.s.Add(v.v) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s ValueSet) Remove(v Value) { + s.requireElementType(v) + s.s.Remove(v.v) +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s ValueSet) Has(v Value) bool { + s.requireElementType(v) + return s.s.Has(v.v) +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s ValueSet) Copy() ValueSet { + return newValueSet(s.s.Copy()) +} + +// Length returns the number of values in the set. +func (s ValueSet) Length() int { + return s.s.Length() +} + +// Values returns a slice of all of the values in the set in no particular +// order. +func (s ValueSet) Values() []Value { + l := s.s.Length() + if l == 0 { + return nil + } + ret := make([]Value, 0, l) + ety := s.ElementType() + for it := s.s.Iterator(); it.Next(); { + ret = append(ret, Value{ + ty: ety, + v: it.Value(), + }) + } + return ret +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same element type, +// or else this function will panic. +func (s ValueSet) Union(other ValueSet) ValueSet { + return newValueSet(s.s.Union(other.s)) +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Intersection(other ValueSet) ValueSet { + return newValueSet(s.s.Intersection(other.s)) +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Subtract(other ValueSet) ValueSet { + return newValueSet(s.s.Subtract(other.s)) +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same element type, or else this function +// will panic. +func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet { + return newValueSet(s.s.SymmetricDifference(other.s)) +} + +// requireElementType panics if the given value is not of the set's element type. +func (s ValueSet) requireElementType(v Value) { + if !v.Type().Equals(s.ElementType()) { + panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType())) + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set_internals.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set_internals.go new file mode 100644 index 00000000000..3fd4fb2df63 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set_internals.go @@ -0,0 +1,216 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" + + "github.com/zclconf/go-cty/cty/set" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +var _ set.OrderedRules = setRules{} + +// Hash returns a hash value for the receiver that can be used for equality +// checks where some inaccuracy is tolerable. +// +// The hash function is value-type-specific, so it is not meaningful to compare +// hash results for values of different types. +// +// This function is not safe to use for security-related applications, since +// the hash used is not strong enough. +func (val Value) Hash() int { + hashBytes := makeSetHashBytes(val) + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Hash(v interface{}) int { + return Value{ + ty: r.Type, + v: v, + }.Hash() +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +// Less is an implementation of set.OrderedRules so that we can iterate over +// set elements in a consistent order, where such an order is possible. +func (r setRules) Less(v1, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + if v1v.RawEquals(v2v) { // Easy case: if they are equal then v1 can't be less + return false + } + + // Null values always sort after non-null values + if v2v.IsNull() && !v1v.IsNull() { + return true + } else if v1v.IsNull() { + return false + } + // Unknown values always sort after known values + if v1v.IsKnown() && !v2v.IsKnown() { + return true + } else if !v1v.IsKnown() { + return false + } + + switch r.Type { + case String: + // String values sort lexicographically + return v1v.AsString() < v2v.AsString() + case Bool: + // Weird to have a set of bools, but if we do then false sorts before true. + if v2v.True() || !v1v.True() { + return true + } + return false + case Number: + v1f := v1v.AsBigFloat() + v2f := v2v.AsBigFloat() + return v1f.Cmp(v2f) < 0 + default: + // No other types have a well-defined ordering, so we just produce a + // default consistent-but-undefined ordering then. This situation is + // not considered a compatibility constraint; callers should rely only + // on the ordering rules for primitive values. + v1h := makeSetHashBytes(v1v) + v2h := makeSetHashBytes(v2v) + return bytes.Compare(v1h, v2h) < 0 + } +} + +func makeSetHashBytes(val Value) []byte { + var buf bytes.Buffer + appendSetHashBytes(val, &buf) + return buf.Bytes() +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set_type.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set_type.go new file mode 100644 index 00000000000..cbc3706f2c4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/set_type.go @@ -0,0 +1,72 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "set of " + elemName +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/tuple_type.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/tuple_type.go new file mode 100644 index 00000000000..798cacd63a1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/type.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/type.go new file mode 100644 index 00000000000..730cb9862ef --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/type.go @@ -0,0 +1,120 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName(mode friendlyTypeNameMode) string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName(friendlyTypeName) +} + +// FriendlyNameForConstraint is similar to FriendlyName except that the +// result is specialized for describing type _constraints_ rather than types +// themselves. This is more appropriate when reporting that a particular value +// does not conform to an expected type constraint. +// +// In particular, this function uses the term "any type" to refer to +// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName. +func (t Type) FriendlyNameForConstraint() string { + return t.typeImpl.FriendlyName(friendlyTypeConstraintName) +} + +// friendlyNameMode is an internal combination of the various FriendlyName* +// variants that just directly takes a mode, for easy passthrough for +// recursive name construction. +func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string { + return t.typeImpl.FriendlyName(mode) +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} + +type friendlyTypeNameMode rune + +const ( + friendlyTypeName friendlyTypeNameMode = 'N' + friendlyTypeConstraintName friendlyTypeNameMode = 'C' +) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/type_conform.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/type_conform.go new file mode 100644 index 00000000000..476eeea87fc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/type_conform.go @@ -0,0 +1,139 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/types_to_register.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/types_to_register.go new file mode 100644 index 00000000000..e1e220aab3a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/types_to_register.go @@ -0,0 +1,57 @@ +package cty + +import ( + "encoding/gob" + "fmt" + "math/big" + "strings" + + "github.com/zclconf/go-cty/cty/set" +) + +// InternalTypesToRegister is a slice of values that covers all of the +// internal types used in the representation of cty.Type and cty.Value +// across all cty Types. +// +// This is intended to be used to register these types with encoding +// packages that require registration of types used in interfaces, such as +// encoding/gob, thus allowing cty types and values to be included in streams +// created from those packages. However, registering with gob is not necessary +// since that is done automatically as a side-effect of importing this package. +// +// Callers should not do anything with the values here except pass them on +// verbatim to a registration function. +// +// If the calling application uses Capsule types that wrap local structs either +// directly or indirectly, these structs may also need to be registered in +// order to support encoding and decoding of values of these types. That is the +// responsibility of the calling application. +var InternalTypesToRegister []interface{} + +func init() { + InternalTypesToRegister = []interface{}{ + primitiveType{}, + typeList{}, + typeMap{}, + typeObject{}, + typeSet{}, + setRules{}, + set.Set{}, + typeTuple{}, + big.Float{}, + capsuleType{}, + []interface{}(nil), + map[string]interface{}(nil), + } + + // Register these with gob here, rather than in gob.go, to ensure + // that this will always happen after we build the above. + for _, tv := range InternalTypesToRegister { + typeName := fmt.Sprintf("%T", tv) + if strings.HasPrefix(typeName, "cty.") { + gob.RegisterName(fmt.Sprintf("github.com/zclconf/go-cty/%s", typeName), tv) + } else { + gob.Register(tv) + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/unknown.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/unknown.go new file mode 100644 index 00000000000..e54179eb144 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/unknown.go @@ -0,0 +1,84 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string { + switch mode { + case friendlyTypeConstraintName: + return "any type" + default: + return "dynamic" + } +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go new file mode 100644 index 00000000000..ba926475ce5 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go @@ -0,0 +1,64 @@ +package cty + +// UnknownAsNull returns a value of the same type as the given value but +// with any unknown values (including nested values) replaced with null +// values of the same type. +// +// This can be useful if a result is to be serialized in a format that can't +// represent unknowns, such as JSON, as long as the caller does not need to +// retain the unknown value information. +func UnknownAsNull(val Value) Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return NullVal(ty) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make([]Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, UnknownAsNull(v)) + } + switch { + case ty.IsListType(): + return ListVal(vals) + case ty.IsTupleType(): + return TupleVal(vals) + default: + return SetVal(vals) + } + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make(map[string]Value, length) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vals[k.AsString()] = UnknownAsNull(v) + } + switch { + case ty.IsMapType(): + return MapVal(vals) + default: + return ObjectVal(vals) + } + } + + return val +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/value.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/value.go new file mode 100644 index 00000000000..80cb8f76f3f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/value.go @@ -0,0 +1,98 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} + +// IsWhollyKnown is an extension of IsKnown that also recursively checks +// inside collections and structures to see if there are any nested unknown +// values. +func (val Value) IsWhollyKnown() bool { + if !val.IsKnown() { + return false + } + + if val.IsNull() { + // Can't recurse into a null, so we're done + return true + } + + switch { + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsWhollyKnown() { + return false + } + } + return true + default: + return true + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/value_init.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/value_init.go new file mode 100644 index 00000000000..3deeba3bd3f --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/value_init.go @@ -0,0 +1,314 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/zclconf/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// ParseNumberVal returns a Value of type number produced by parsing the given +// string as a decimal real number. To ensure that two identical strings will +// always produce an equal number, always use this function to derive a number +// from a string; it will ensure that the precision and rounding mode for the +// internal big decimal is configured in a consistent way. +// +// If the given string cannot be parsed as a number, the returned error has +// the message "a number is required", making it suitable to return to an +// end-user to signal a type conversion error. +// +// If the given string contains a number that becomes a recurring fraction +// when expressed in binary then it will be truncated to have a 512-bit +// mantissa. Note that this is a higher precision than that of a float64, +// so coverting the same decimal number first to float64 and then calling +// NumberFloatVal will not produce an equal result; the conversion first +// to float64 will round the mantissa to fewer than 512 bits. +func ParseNumberVal(s string) (Value, error) { + // Base 10, precision 512, and rounding to nearest even is the standard + // way to handle numbers arriving as strings. + f, _, err := big.ParseFloat(s, 10, 512, big.ToNearestEven) + if err != nil { + return NilVal, fmt.Errorf("a number is required") + } + return NumberVal(f), nil +} + +// MustParseNumberVal is like ParseNumberVal but it will panic in case of any +// error. It can be used during initialization or any other situation where +// the given string is a constant or otherwise known to be correct by the +// caller. +func MustParseNumberVal(s string) Value { + ret, err := ParseNumberVal(s) + if err != nil { + panic(err) + } + return ret +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: NormalizeString(v), + } +} + +// NormalizeString applies the same normalization that cty applies when +// constructing string values. +// +// A return value from this function can be meaningfully compared byte-for-byte +// with a Value.AsString result. +func NormalizeString(s string) string { + return norm.NFC.String(s) +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attr = NormalizeString(attr) + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[NormalizeString(key)] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + } +} + +// SetValFromValueSet returns a Value of set type based on an already-constructed +// ValueSet. +// +// The element type of the returned value is the element type of the given +// set. +func SetValFromValueSet(s ValueSet) Value { + ety := s.ElementType() + rawVal := s.s.Copy() // copy so caller can't mutate what we wrap + + return Value{ + ty: Set(ety), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/value_ops.go new file mode 100644 index 00000000000..afd621cf4ff --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -0,0 +1,1138 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty/set" +) + +// GoString is an implementation of fmt.GoStringer that produces concise +// source-like representations of values suitable for use in debug messages. +func (val Value) GoString() string { + if val == NilVal { + return "cty.NilVal" + } + + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + if val == DynamicVal { // is unknown, so must be before the IsKnown check below + return "cty.DynamicVal" + } + if !val.IsKnown() { + return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } + return "cty.False" + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.MustParseNumberVal(%q)", fv.Text('f', -1)) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.SetVal(%#v)", vals) + case val.ty.IsListType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.ListValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.ListVal(%#v)", vals) + case val.ty.IsMapType(): + vals := val.AsValueMap() + if len(vals) == 0 { + return fmt.Sprintf("cty.MapValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.MapVal(%#v)", vals) + case val.ty.IsTupleType(): + if val.ty.Equals(EmptyTuple) { + return "cty.EmptyTupleVal" + } + vals := val.AsValueSlice() + return fmt.Sprintf("cty.TupleVal(%#v)", vals) + case val.ty.IsObjectType(): + if val.ty.Equals(EmptyObject) { + return "cty.EmptyObjectVal" + } + vals := val.AsValueMap() + return fmt.Sprintf("cty.ObjectVal(%#v)", vals) + case val.ty.IsCapsuleType(): + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// As a special case, two null values are always equal regardless of type. +// +// The usual short-circuit rules apply, so the result will be unknown if +// either of the given values are. +// +// Use RawEquals to compare if two values are equal *ignoring* the +// short-circuit rules and the exception for null values. +func (val Value) Equals(other Value) Value { + // Start by handling Unknown values before considering types. + // This needs to be done since Null values are always equal regardless of + // type. + switch { + case !val.IsKnown() && !other.IsKnown(): + // both unknown + return UnknownVal(Bool) + case val.IsKnown() && !other.IsKnown(): + switch { + case val.IsNull(), other.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !val.ty.Equals(other.ty): + // There is no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + case other.IsKnown() && !val.IsKnown(): + switch { + case other.IsNull(), val.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !other.ty.Equals(val.ty): + // There's no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + } + + switch { + case val.IsNull() && other.IsNull(): + // Nulls are always equal, regardless of type + return BoolVal(true) + case val.IsNull() || other.IsNull(): + // If only one is null then the result must be false + return BoolVal(false) + } + + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + + name = NormalizeString(name) + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + if !ty.ElementType().Equals(elem.Type()) { + return False + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if val.Type().IsObjectType() { + // For objects, the length is the number of attributes associated with the type. + return len(val.Type().AttributeTypes()) + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "gocty" package. +func (val Value) AsBigFloat() *big.Float { + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown +// value of any type that CanIterateElements, or panics if called on +// any other value. +// +// For more convenient conversions to slices of more specific types, use +// the "gocty" package. +func (val Value) AsValueSlice() []Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret = append(ret, v) + } + return ret +} + +// AsValueMap returns a map[string]cty.Value representation of a non-null, +// non-unknown value of any type that CanIterateElements, or panics if called +// on any other value. +// +// For more convenient conversions to maps of more specific types, use +// the "gocty" package. +func (val Value) AsValueMap() map[string]Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make(map[string]Value, l) + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + ret[k.AsString()] = v + } + return ret +} + +// AsValueSet returns a ValueSet representation of a non-null, +// non-unknown value of any collection type, or panics if called +// on any other value. +// +// Unlike AsValueSlice and AsValueMap, this method requires specifically a +// collection type (list, set or map) and does not allow structural types +// (tuple or object), because the ValueSet type requires homogenous +// element types. +// +// The returned ValueSet can store only values of the receiver's element type. +func (val Value) AsValueSet() ValueSet { + if !val.Type().IsCollectionType() { + panic("not a collection type") + } + + // We don't give the caller our own set.Set (assuming we're a cty.Set value) + // because then the caller could mutate our internals, which is forbidden. + // Instead, we will construct a new set and append our elements into it. + ret := NewValueSet(val.Type().ElementType()) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret.Add(v) + } + return ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/walk.go b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/walk.go new file mode 100644 index 00000000000..a6943babef8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/zclconf/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/bcrypt/base64.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 00000000000..fc311609081 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 00000000000..aeb73f81a14 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,295 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/blowfish/block.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 00000000000..9d80f19521b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/blowfish/cipher.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 00000000000..213bf204afe --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,99 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/blowfish/const.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 00000000000..d04077595ab --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/cast5/cast5.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/cast5/cast5.go new file mode 100644 index 00000000000..ddcbeb6f2ad --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/cast5/cast5.go @@ -0,0 +1,533 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cast5 implements CAST5, as defined in RFC 2144. +// +// CAST5 is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package cast5 // import "golang.org/x/crypto/cast5" + +import "errors" + +const BlockSize = 8 +const KeySize = 16 + +type Cipher struct { + masking [16]uint32 + rotate [16]uint8 +} + +func NewCipher(key []byte) (c *Cipher, err error) { + if len(key) != KeySize { + return nil, errors.New("CAST5: keys must be 16 bytes") + } + + c = new(Cipher) + c.keySchedule(key) + return +} + +func (c *Cipher) BlockSize() int { + return BlockSize +} + +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +type keyScheduleA [4][7]uint8 +type keyScheduleB [4][5]uint8 + +// keyScheduleRound contains the magic values for a round of the key schedule. +// The keyScheduleA deals with the lines like: +// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] +// Conceptually, both x and z are in the same array, x first. The first +// element describes which word of this array gets written to and the +// second, which word gets read. So, for the line above, it's "4, 0", because +// it's writing to the first word of z, which, being after x, is word 4, and +// reading from the first word of x: word 0. +// +// Next are the indexes into the S-boxes. Now the array is treated as bytes. So +// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear +// that it's z that we're indexing. +// +// keyScheduleB deals with lines like: +// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] +// "K1" is ignored because key words are always written in order. So the five +// elements are the S-box indexes. They use the same form as in keyScheduleA, +// above. + +type keyScheduleRound struct{} +type keySchedule []keyScheduleRound + +var schedule = []struct { + a keyScheduleA + b keyScheduleB +}{ + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, + {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, + {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, + {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {3, 2, 0xc, 0xd, 8}, + {1, 0, 0xe, 0xf, 0xd}, + {7, 6, 8, 9, 3}, + {5, 4, 0xa, 0xb, 7}, + }, + }, + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, + {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, + {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, + {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {8, 9, 7, 6, 3}, + {0xa, 0xb, 5, 4, 7}, + {0xc, 0xd, 3, 2, 8}, + {0xe, 0xf, 1, 0, 0xd}, + }, + }, +} + +func (c *Cipher) keySchedule(in []byte) { + var t [8]uint32 + var k [32]uint32 + + for i := 0; i < 4; i++ { + j := i * 4 + t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) + } + + x := []byte{6, 7, 4, 5} + ki := 0 + + for half := 0; half < 2; half++ { + for _, round := range schedule { + for j := 0; j < 4; j++ { + var a [7]uint8 + copy(a[:], round.a[j][:]) + w := t[a[1]] + w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] + w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] + w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] + w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] + w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] + t[a[0]] = w + } + + for j := 0; j < 4; j++ { + var b [5]uint8 + copy(b[:], round.b[j][:]) + w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] + w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] + w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] + w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] + w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] + k[ki] = w + ki++ + } + } + } + + for i := 0; i < 16; i++ { + c.masking[i] = k[i] + c.rotate[i] = uint8(k[16+i] & 0x1f) + } +} + +// These are the three 'f' functions. See RFC 2144, section 2.2. +func f1(d, m uint32, r uint8) uint32 { + t := m + d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] +} + +func f2(d, m uint32, r uint8) uint32 { + t := m ^ d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] +} + +func f3(d, m uint32, r uint8) uint32 { + t := m - d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] +} + +var sBox = [8][256]uint32{ + { + 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, + 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, + 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, + 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, + 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, + 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, + 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, + 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, + 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, + 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, + 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, + 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, + 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, + 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, + 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, + 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, + 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, + 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, + 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, + 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, + 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, + 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, + 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, + 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, + 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, + 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, + 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, + 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, + 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, + 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, + 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, + 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, + }, + { + 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, + 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, + 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, + 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, + 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, + 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, + 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, + 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, + 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, + 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, + 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, + 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, + 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, + 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, + 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, + 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, + 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, + 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, + 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, + 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, + 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, + 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, + 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, + 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, + 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, + 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, + 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, + 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, + 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, + 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, + 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, + 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, + }, + { + 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, + 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, + 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, + 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, + 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, + 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, + 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, + 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, + 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, + 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, + 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, + 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, + 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, + 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, + 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, + 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, + 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, + 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, + 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, + 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, + 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, + 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, + 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, + 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, + 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, + 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, + 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, + 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, + 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, + 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, + 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, + 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, + }, + { + 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, + 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, + 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, + 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, + 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, + 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, + 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, + 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, + 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, + 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, + 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, + 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, + 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, + 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, + 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, + 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, + 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, + 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, + 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, + 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, + 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, + 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, + 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, + 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, + 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, + 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, + 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, + 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, + 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, + 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, + 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, + 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, + }, + { + 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, + 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, + 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, + 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, + 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, + 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, + 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, + 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, + 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, + 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, + 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, + 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, + 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, + 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, + 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, + 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, + 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, + 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, + 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, + 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, + 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, + 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, + 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, + 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, + 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, + 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, + 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, + 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, + 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, + 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, + 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, + 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, + }, + { + 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, + 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, + 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, + 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, + 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, + 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, + 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, + 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, + 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, + 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, + 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, + 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, + 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, + 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, + 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, + 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, + 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, + 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, + 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, + 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, + 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, + 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, + 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, + 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, + 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, + 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, + 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, + 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, + 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, + 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, + 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, + 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, + }, + { + 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, + 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, + 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, + 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, + 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, + 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, + 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, + 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, + 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, + 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, + 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, + 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, + 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, + 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, + 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, + 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, + 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, + 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, + 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, + 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, + 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, + 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, + 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, + 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, + 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, + 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, + 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, + 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, + 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, + 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, + 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, + 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, + }, + { + 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, + 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, + 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, + 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, + 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, + 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, + 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, + 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, + 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, + 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, + 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, + 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, + 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, + 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, + 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, + 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, + 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, + 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, + 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, + 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, + 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, + 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, + 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, + 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, + 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, + 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, + 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, + 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, + 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, + 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, + 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, + 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, + }, +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/armor/armor.go new file mode 100644 index 00000000000..592d1864361 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -0,0 +1,219 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "golang.org/x/crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF { + if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/armor/encode.go new file mode 100644 index 00000000000..6f07582c37c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/armor/encode.go @@ -0,0 +1,160 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/canonical_text.go new file mode 100644 index 00000000000..e601e389f12 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/canonical_text.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import "hash" + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + start := 0 + + for i, c := range buf { + switch cth.s { + case 0: + if c == '\r' { + cth.s = 1 + } else if c == '\n' { + cth.h.Write(buf[start:i]) + cth.h.Write(newline) + start = i + 1 + } + case 1: + cth.s = 0 + } + } + + cth.h.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 00000000000..73f4fe37859 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,122 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "golang.org/x/crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + s.ModInverse(s, priv.P) + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/errors/errors.go new file mode 100644 index 00000000000..eb0550b2d04 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -0,0 +1,72 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "golang.org/x/crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/keys.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/keys.go new file mode 100644 index 00000000000..3e2518600e2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/keys.go @@ -0,0 +1,693 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto/rsa" + "io" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// primaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) primaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// encryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) encryptionKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest key + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.Sig.KeyExpired(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt to, then we can obviously use it. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + // This Entity appears to be signing only. + return Key{}, false +} + +// signingKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) signingKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagSign && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.Sig.KeyExpired(now) { + candidateSubkey = i + break + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if key.SelfSignature.FlagsValid && requiredUsage != 0 { + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + identity.SelfSignature = sig + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Sig = sig + case packet.SigTypeSubkeyBinding: + + if shouldReplaceSubkeySig(subKey.Sig, sig) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { + if potentialNewSig == nil { + return false + } + + if existingSig == nil { + return true + } + + if existingSig.SigType == packet.SigTypeSubkeyRevocation { + return false // never override a revocation signature + } + + return potentialNewSig.CreationTime.After(existingSig.CreationTime) +} + +const defaultRSAKeyBits = 2048 + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + currentTime := config.Now() + + bits := defaultRSAKeyBits + if config != nil && config.RSABits != 0 { + bits = config.RSABits + } + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + signingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + + e := &Entity{ + PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv), + Identities: make(map[string]*Identity), + } + isPrimaryId := true + e.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return nil, err + } + + // If the user passes in a DefaultHash via packet.Config, + // set the PreferredHash for the SelfSignature. + if config != nil && config.DefaultHash != 0 { + e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} + } + + // Likewise for DefaultCipher. + if config != nil && config.DefaultCipher != 0 { + e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} + } + + e.Subkeys = make([]Subkey, 1) + e.Subkeys[0] = Subkey{ + PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv), + Sig: &packet.Signature{ + CreationTime: currentTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + e.Subkeys[0].PublicKey.IsSubkey = true + e.Subkeys[0].PrivateKey.IsSubkey = true + err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) + if err != nil { + return nil, err + } + return e, nil +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/compressed.go new file mode 100644 index 00000000000..e8f0b5caa7d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/compressed.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "golang.org/x/crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/config.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/config.go new file mode 100644 index 00000000000..c76eecc963a --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/config.go @@ -0,0 +1,91 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 00000000000..02b372cf374 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,206 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 parsedMPI +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + if err != nil { + return + } + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + k := priv.PrivateKey.(*rsa.PrivateKey) + b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes)) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + default: + err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + writeMPIs(w, e.encryptedMPI1) + case PubKeyAlgoElGamal: + writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) + default: + panic("internal error") + } + + return nil +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + return writeMPI(w, 8*uint16(len(cipherText)), cipherText) +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + err = writeBig(w, c1) + if err != nil { + return err + } + return writeBig(w, c2) +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/literal.go new file mode 100644 index 00000000000..1a9ec6e51e8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/literal.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.IsBinary = buf[0] == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go new file mode 100644 index 00000000000..ce2a33a547c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go @@ -0,0 +1,143 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. If an incorrect key is detected then nil is returned. On +// successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if prefixCopy[blockSize-2] != prefixCopy[blockSize] || + prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { + return nil + } + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 00000000000..1713503395e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/opaque.go new file mode 100644 index 00000000000..456d807f255 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/opaque.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "golang.org/x/crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/packet.go new file mode 100644 index 00000000000..5af64c5421b --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -0,0 +1,551 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "golang.org/x/crypto/openpgp/packet" + +import ( + "bufio" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rsa" + "io" + "math/big" + + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/openpgp/errors" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + for len(p) > 0 { + for power := uint(14); power < 32; power-- { + l := 1 << power + if len(p) >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(p[:l]) + n += m + if err != nil { + return + } + p = p[l:] + break + } + } + } + return +} + +func (w *partialLengthWriter) Close() error { + w.lengthByte[0] = 0 + _, err := w.w.Write(w.lengthByte[:]) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + var buf [6]byte + var n int + + buf[0] = 0x80 | 0x40 | byte(ptype) + if length < 192 { + buf[1] = byte(length) + n = 2 + } else if length < 8384 { + length -= 192 + buf[1] = 192 + byte(length>>8) + buf[2] = byte(length) + n = 3 + } else { + buf[1] = 255 + buf[2] = byte(length >> 24) + buf[3] = byte(length >> 16) + buf[4] = byte(length >> 8) + buf[5] = byte(length) + n = 6 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 +) + +// peekVersion detects the version of a public key packet about to +// be read. A bufio.Reader at the original position of the io.Reader +// is returned. +func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { + bufr = bufio.NewReader(r) + var verBuf []byte + if verBuf, err = bufr.Peek(1); err != nil { + return + } + ver = verBuf[0] + return +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + var version byte + // Detect signature version + if contents, version, err = peekVersion(contents); err != nil { + return + } + if version < 4 { + p = new(SignatureV3) + } else { + p = new(Signature) + } + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + var version byte + if contents, version, err = peekVersion(contents); err != nil { + return + } + isSubkey := tag == packetTypePublicSubkey + if version < 4 { + p = &PublicKeyV3{IsSubkey: isSubkey} + } else { + p = &PublicKey{IsSubkey: isSubkey} + } + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction uint8 + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case Cipher3DES: + return 24 + case CipherCAST5: + return cast5.KeySize + case CipherAES128: + return 16 + case CipherAES192: + return 24 + case CipherAES256: + return 32 + } + return 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + switch cipher { + case Cipher3DES: + return des.BlockSize + case CipherCAST5: + return 8 + case CipherAES128, CipherAES192, CipherAES256: + return 16 + } + return 0 +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + switch cipher { + case Cipher3DES: + block, _ = des.NewTripleDESCipher(key) + case CipherCAST5: + block, _ = cast5.NewCipher(key) + case CipherAES128, CipherAES192, CipherAES256: + block, _ = aes.NewCipher(key) + } + return +} + +// readMPI reads a big integer from r. The bit length returned is the bit +// length that was specified in r. This is preserved so that the integer can be +// reserialized exactly. +func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { + var buf [2]byte + _, err = readFull(r, buf[0:]) + if err != nil { + return + } + bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + numBytes := (int(bitLength) + 7) / 8 + mpi = make([]byte, numBytes) + _, err = readFull(r, mpi) + // According to RFC 4880 3.2. we should check that the MPI has no leading + // zeroes (at least when not an encrypted MPI?), but this implementation + // does generate leading zeroes, so we keep accepting them. + return +} + +// writeMPI serializes a big integer to w. +func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. + // Implementations seem to be tolerant of them, and stripping them would + // make it complex to guarantee matching re-serialization. + _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) + if err == nil { + _, err = w.Write(mpiBytes) + } + return +} + +// writeBig serializes a *big.Int to w. +func writeBig(w io.Writer, i *big.Int) error { + return writeMPI(w, uint16(i.BitLen()), i.Bytes()) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/private_key.go new file mode 100644 index 00000000000..bd31cceac62 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/private_key.go @@ -0,0 +1,385 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha1" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer. + sha1Checksum bool + iv []byte +} + +func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA or ECDSA. +func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.Public().(type) { + case *rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(currentTime, pubkey) + case rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey) + case *ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(currentTime, pubkey) + case ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey) + default: + panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + s2kType := buf[0] + + switch s2kType { + case 0: + pk.s2k = nil + pk.Encrypted = false + case 254, 255: + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.Encrypted = true + pk.s2k, err = s2k.Parse(r) + if err != nil { + return + } + if s2kType == 254 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + pk.encryptedData, err = ioutil.ReadAll(r) + if err != nil { + return + } + + if !pk.Encrypted { + return pk.parsePrivateKey(pk.encryptedData) + } + + return +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + // TODO(agl): support encrypted private keys + buf := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(buf) + if err != nil { + return + } + buf.WriteByte(0 /* no encryption */) + + privateKeyBuf := bytes.NewBuffer(nil) + + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(privateKeyBuf, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(privateKeyBuf, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(privateKeyBuf, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(privateKeyBuf, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + if err != nil { + return + } + + ptype := packetTypePrivateKey + contents := buf.Bytes() + privateKeyBytes := privateKeyBuf.Bytes() + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) + if err != nil { + return + } + _, err = w.Write(contents) + if err != nil { + return + } + _, err = w.Write(privateKeyBytes) + if err != nil { + return + } + + checksum := mod64kHash(privateKeyBytes) + var checksumBytes [2]byte + checksumBytes[0] = byte(checksum >> 8) + checksumBytes[1] = byte(checksum) + _, err = w.Write(checksumBytes[:]) + + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + err := writeBig(w, priv.D) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[1]) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[0]) + if err != nil { + return err + } + return writeBig(w, priv.Precomputed.Qinv) +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + return writeBig(w, priv.D) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + return pk.parsePrivateKey(data) +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + p, _, err := readMPI(buf) + if err != nil { + return + } + q, _, err := readMPI(buf) + if err != nil { + return + } + + rsaPriv.D = new(big.Int).SetBytes(d) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q) + if err := rsaPriv.Validate(); err != nil { + return err + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + dsaPriv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = dsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + pk.PrivateKey = &ecdsa.PrivateKey{ + PublicKey: *ecdsaPub, + D: new(big.Int).SetBytes(d), + } + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/public_key.go new file mode 100644 index 00000000000..fcd5f525196 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/public_key.go @@ -0,0 +1,753 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +var ( + // NIST curve P-256 + oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} + // NIST curve P-384 + oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} + // NIST curve P-521 + oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} +) + +const maxOIDLength = 8 + +// ecdsaKey stores the algorithm-specific fields for ECDSA keys. +// as defined in RFC 6637, Section 9. +type ecdsaKey struct { + // oid contains the OID byte sequence identifying the elliptic curve used + oid []byte + // p contains the elliptic curve point that represents the public key + p parsedMPI +} + +// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. +func parseOID(r io.Reader) (oid []byte, err error) { + buf := make([]byte, maxOIDLength) + if _, err = readFull(r, buf[:1]); err != nil { + return + } + oidLen := buf[0] + if int(oidLen) > len(buf) { + err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) + return + } + oid = buf[:oidLen] + _, err = readFull(r, oid) + return +} + +func (f *ecdsaKey) parse(r io.Reader) (err error) { + if f.oid, err = parseOID(r); err != nil { + return err + } + f.p.bytes, f.p.bitLength, err = readMPI(r) + return +} + +func (f *ecdsaKey) serialize(w io.Writer) (err error) { + buf := make([]byte, maxOIDLength+1) + buf[0] = byte(len(f.oid)) + copy(buf[1:], f.oid) + if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { + return + } + return writeMPIs(w, f.p) +} + +func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { + var c elliptic.Curve + if bytes.Equal(f.oid, oidCurveP256) { + c = elliptic.P256() + } else if bytes.Equal(f.oid, oidCurveP384) { + c = elliptic.P384() + } else if bytes.Equal(f.oid, oidCurveP521) { + c = elliptic.P521() + } else { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + x, y := elliptic.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) byteLen() int { + return 1 + len(f.oid) + 2 + len(f.p.bytes) +} + +type kdfHashFunction byte +type kdfAlgorithm byte + +// ecdhKdf stores key derivation function parameters +// used for ECDH encryption. See RFC 6637, Section 9. +type ecdhKdf struct { + KdfHash kdfHashFunction + KdfAlgo kdfAlgorithm +} + +func (f *ecdhKdf) parse(r io.Reader) (err error) { + buf := make([]byte, 1) + if _, err = readFull(r, buf); err != nil { + return + } + kdfLen := int(buf[0]) + if kdfLen < 3 { + return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + buf = make([]byte, kdfLen) + if _, err = readFull(r, buf); err != nil { + return + } + reserved := int(buf[0]) + f.KdfHash = kdfHashFunction(buf[1]) + f.KdfAlgo = kdfAlgorithm(buf[2]) + if reserved != 0x01 { + return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) + } + return +} + +func (f *ecdhKdf) serialize(w io.Writer) (err error) { + buf := make([]byte, 4) + // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. + buf[0] = byte(0x03) // Length of the following fields + buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now + buf[2] = byte(f.KdfHash) + buf[3] = byte(f.KdfAlgo) + _, err = w.Write(buf[:]) + return +} + +func (f *ecdhKdf) byteLen() int { + return 4 +} + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey + Fingerprint [20]byte + KeyId uint64 + IsSubkey bool + + n, e, p, q, g, y parsedMPI + + // RFC 6637 fields + ec *ecdsaKey + ecdh *ecdhKdf +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +func fromBig(n *big.Int) parsedMPI { + return parsedMPI{ + bytes: n.Bytes(), + bitLength: uint16(n.BitLen()), + } +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: fromBig(pub.P), + q: fromBig(pub.Q), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: fromBig(pub.P), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + ec: new(ecdsaKey), + } + + switch pub.Curve { + case elliptic.P256(): + pk.ec.oid = oidCurveP256 + case elliptic.P384(): + pk.ec.oid = oidCurveP384 + case elliptic.P521(): + pk.ec.oid = oidCurveP521 + default: + panic("unknown elliptic curve") + } + + pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + + // The bit length is 3 (for the 0x04 specifying an uncompressed key) + // plus two field elements (for x and y), which are rounded up to the + // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 + fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 + pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return err + } + pk.PublicKey, err = pk.ec.newECDSA() + case PubKeyAlgoECDH: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return + } + pk.ecdh = new(ecdhKdf) + if err = pk.ecdh.parse(r); err != nil { + return + } + // The ECDH key is stored in an ecdsa.PublicKey for convenience. + pk.PublicKey, err = pk.ec.newECDSA() + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKey) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := sha1.New() + pk.SerializeSignaturePrefix(fingerPrint) + pk.serializeWithoutHeaders(fingerPrint) + copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n.bytes, pk.n.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.e.bytes, pk.e.bitLength, err = readMPI(r) + if err != nil { + return + } + + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.bytes), + E: 0, + } + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.q.bytes, pk.q.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.bytes) + dsa.Q = new(big.Int).SetBytes(pk.q.bytes) + dsa.G = new(big.Int).SetBytes(pk.g.bytes) + dsa.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.bytes) + elgamal.G = new(big.Int).SetBytes(pk.g.bytes) + elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = elgamal + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + case PubKeyAlgoDSA: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.q.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoElGamal: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoECDSA: + pLength += uint16(pk.ec.byteLen()) + case PubKeyAlgoECDH: + pLength += uint16(pk.ec.byteLen()) + pLength += uint16(pk.ecdh.byteLen()) + default: + panic("unknown public key algorithm") + } + pLength += 6 + h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + case PubKeyAlgoDSA: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.q.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoElGamal: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoECDSA: + length += pk.ec.byteLen() + case PubKeyAlgoECDH: + length += pk.ec.byteLen() + length += pk.ecdh.byteLen() + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [6]byte + buf[0] = 4 + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + buf[5] = byte(pk.PubKeyAlgo) + + _, err = w.Write(buf[:]) + if err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + case PubKeyAlgoDSA: + return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) + case PubKeyAlgoElGamal: + return writeMPIs(w, pk.p, pk.g, pk.y) + case PubKeyAlgoECDSA: + return pk.ec.serialize(w) + case PubKeyAlgoECDH: + if err = pk.ec.serialize(w); err != nil { + return + } + return pk.ecdh.serialize(w) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + case PubKeyAlgoDSA: + dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + default: + panic("shouldn't happen") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + signed.SerializeSignaturePrefix(h) + signed.serializeWithoutHeaders(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// A parsedMPI is used to store the contents of a big integer, along with the +// bit length that was specified in the original input. This allows the MPI to +// be reserialized exactly. +type parsedMPI struct { + bytes []byte + bitLength uint16 +} + +// writeMPIs is a utility function for serializing several big integers to the +// given Writer. +func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { + for _, mpi := range mpis { + err = writeMPI(w, mpi.bitLength, mpi.bytes) + if err != nil { + return + } + } + return +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + case PubKeyAlgoDSA: + bitLength = pk.p.bitLength + case PubKeyAlgoElGamal: + bitLength = pk.p.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go new file mode 100644 index 00000000000..5daf7b6cfd4 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go @@ -0,0 +1,279 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/md5" + "crypto/rsa" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" +) + +// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and +// should not be used for signing or encrypting. They are supported here only for +// parsing version 3 key material and validating signatures. +// See RFC 4880, section 5.5.2. +type PublicKeyV3 struct { + CreationTime time.Time + DaysToExpire uint16 + PubKeyAlgo PublicKeyAlgorithm + PublicKey *rsa.PublicKey + Fingerprint [16]byte + KeyId uint64 + IsSubkey bool + + n, e parsedMPI +} + +// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. +// Included here for testing purposes only. RFC 4880, section 5.5.2: +// "an implementation MUST NOT generate a V3 key, but MAY accept it." +func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { + pk := &PublicKeyV3{ + CreationTime: creationTime, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKeyV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [8]byte + if _, err = readFull(r, buf[:]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKeyV3) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := md5.New() + fingerPrint.Write(pk.n.bytes) + fingerPrint.Write(pk.e.bytes) + fingerPrint.Sum(pk.Fingerprint[:0]) + pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { + if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { + return + } + if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { + return + } + + // RFC 4880 Section 12.2 requires the low 8 bytes of the + // modulus to form the key id. + if len(pk.n.bytes) < 8 { + return errors.StructuralError("v3 public key modulus is too short") + } + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + default: + panic("unknown public key algorithm") + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { + length := 8 // 8 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + if err = serializeHeader(w, packetType, length); err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [8]byte + // Version 3 + buf[0] = 3 + // Creation time + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + // Days to expire + buf[5] = byte(pk.DaysToExpire >> 8) + buf[6] = byte(pk.DaysToExpire) + // Public key algorithm + buf[7] = byte(pk.PubKeyAlgo) + + if _, err = w.Write(buf[:]); err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKeyV3) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + default: + // V3 public keys only support RSA. + panic("shouldn't happen") + } +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// userIdSignatureV3Hash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { + if !hfn.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hfn.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + h.Write([]byte(id)) + + return +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKeyV3) KeyIdString() string { + return fmt.Sprintf("%X", pk.KeyId) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKeyV3) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/reader.go new file mode 100644 index 00000000000..34bc7c613e6 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/reader.go @@ -0,0 +1,76 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/signature.go new file mode 100644 index 00000000000..b2a24a53232 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/signature.go @@ -0,0 +1,731 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + CreationTime time.Time + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI + ECDSASigR, ECDSASigS parsedMPI + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + IssuerKeyId *uint64 + IsPrimaryId *bool + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // MDC is set if this signature has a feature packet that indicates + // support for MDC subpackets. + MDC bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + l := 6 + hashedSubpacketsLength + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + copy(sig.HashSuffix[1:], buf[:5]) + hashedSubpackets := sig.HashSuffix[6:l] + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + // See RFC 4880, section 5.2.4 + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = uint8(l >> 24) + trailer[3] = uint8(l >> 16) + trailer[4] = uint8(l >> 8) + trailer[5] = uint8(l) + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoECDSA: + sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. In practice, the subpacket is used exclusively to + // indicate support for MDC-protected encryption. + sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired. +func (sig *Signature) KeyExpired(currentTime time.Time) bool { + if sig.KeyLifetimeSecs == nil { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix() (err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + + var ok bool + l := 6 + hashedSubpacketsLen + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + sig.HashSuffix[1] = uint8(sig.SigType) + sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) + sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) + sig.HashSuffix[5] = byte(hashedSubpacketsLen) + serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = byte(l >> 24) + trailer[3] = byte(l >> 16) + trailer[4] = byte(l >> 8) + trailer[5] = byte(l) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + err = sig.buildHashSuffix() + if err != nil { + return + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + sig.outSubpackets = sig.buildSubpackets() + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR.bytes = r.Bytes() + sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) + sig.DSASigS.bytes = s.Bytes() + sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + // direct support, avoid asn1 wrapping/unwrapping + r, s, err = ecdsa.Sign(config.Random(), pk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + if err == nil { + sig.ECDSASigR = fromBig(r) + sig.ECDSASigS = fromBig(s) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA +// signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = 2 + len(sig.RSASignature.bytes) + case PubKeyAlgoDSA: + sigLength = 2 + len(sig.DSASigR.bytes) + sigLength += 2 + len(sig.DSASigS.bytes) + case PubKeyAlgoECDSA: + sigLength = 2 + len(sig.ECDSASigR.bytes) + sigLength += 2 + len(sig.ECDSASigS.bytes) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + + _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) + if err != nil { + return + } + + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + case PubKeyAlgoECDSA: + err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + // The following subpackets may only appear in self-signatures + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go new file mode 100644 index 00000000000..6edff889349 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go @@ -0,0 +1,146 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// SignatureV3 represents older version 3 signatures. These signatures are less secure +// than version 4 and should not be used to create new signatures. They are included +// here for backwards compatibility to read and validate with older key material. +// See RFC 4880, section 5.2.2. +type SignatureV3 struct { + SigType SignatureType + CreationTime time.Time + IssuerKeyId uint64 + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + HashTag [2]byte + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI +} + +func (sig *SignatureV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.2 + var buf [8]byte + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] != 5 { + err = errors.UnsupportedError( + "invalid hashed material length " + strconv.Itoa(int(buf[0]))) + return + } + + // Read hashed material: signature type + creation time + if _, err = readFull(r, buf[:5]); err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + t := binary.BigEndian.Uint32(buf[1:5]) + sig.CreationTime = time.Unix(int64(t), 0) + + // Eight-octet Key ID of signer. + if _, err = readFull(r, buf[:8]); err != nil { + return + } + sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) + + // Public-key and hash algorithm + if _, err = readFull(r, buf[:2]); err != nil { + return + } + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + var ok bool + if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + // Two-octet field holding left 16 bits of signed hash value. + if _, err = readFull(r, sig.HashTag[:2]); err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { + return + } + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + default: + panic("unreachable") + } + return +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *SignatureV3) Serialize(w io.Writer) (err error) { + buf := make([]byte, 8) + + // Write the sig type and creation time + buf[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + // Write the issuer long key ID + binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) + if _, err = w.Write(buf[:8]); err != nil { + return + } + + // Write public key algorithm, hash ID, and hash value + buf[0] = byte(sig.PubKeyAlgo) + hashId, ok := s2k.HashToHashId(sig.Hash) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) + } + buf[1] = hashId + copy(buf[2:4], sig.HashTag[:]) + if _, err = w.Write(buf[:4]); err != nil { + return + } + + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + default: + panic("impossible") + } + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 00000000000..744c2d2c42d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + CipherFunc CipherFunction + s2k func(out, in []byte) + encryptedKey []byte +} + +const symmetricKeyEncryptedVersion = 4 + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + if buf[0] != symmetricKeyEncryptedVersion { + return errors.UnsupportedError("SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + var err error + ske.s2k, err = s2k.Parse(r) + if err != nil { + return err + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + + "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") + } + return plaintextKey, cipherFunc, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The +// packet contains a random session key, encrypted by a key derived from the +// given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + var buf [2]byte + buf[0] = symmetricKeyEncryptedVersion + buf[1] = byte(cipherFunc) + _, err = w.Write(buf[:]) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + + key = sessionKey + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 00000000000..6126030eb90 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,290 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "golang.org/x/crypto/openpgp/errors" + "hash" + "io" + "strconv" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted contents of the +// packet can be read. An incorrect key can, with high probability, be detected +// immediately and this will result in a KeyIncorrect error being returned. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + if s == nil { + return nil, errors.ErrKeyIncorrect + } + + plaintext := cipher.StreamReader{S: s, R: se.contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.SignatureError("error during reading") + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.SignatureError("error during reading") + } + } + + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.SignatureError("MDC packet not found") + } + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.SignatureError("hash mismatch") + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go new file mode 100644 index 00000000000..d19ffbc7867 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + sp.Serialize(&buf) + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/userid.go new file mode 100644 index 00000000000..d6bea7d4acc --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/packet/userid.go @@ -0,0 +1,160 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/read.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/read.go new file mode 100644 index 00000000000..6ec664f44a1 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/read.go @@ -0,0 +1,442 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "golang.org/x/crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + SignatureError error // nil if the signature is good. + Signature *packet.Signature // the signature packet itself, if v4 (default) + SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + var se *packet.SymmetricallyEncrypted + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted: + se = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + } + if len(pk.encryptedKey.Key) == 0 { + continue + } + decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + if err == nil { + decrypted, err = se.Decrypt(cipherFunc, key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + return readSignedMessage(packets, md, keyring) +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if !p.IsLast { + return nil, errors.UnsupportedError("nested signatures") + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md = nil + return + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.SignedBy != nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (n int, err error) { + n, err = cr.md.LiteralData.Body.Read(buf) + if err == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + return +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails +} + +func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { + n, err = scr.md.LiteralData.Body.Read(buf) + scr.wrappedHash.Write(buf[:n]) + if err == io.EOF { + var p packet.Packet + p, scr.md.SignatureError = scr.packets.Next() + if scr.md.SignatureError != nil { + return + } + + var ok bool + if scr.md.Signature, ok = p.(*packet.Signature); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) + } else { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") + return + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + } + return +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + switch sig := p.(type) { + case *packet.Signature: + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + case *packet.SignatureV3: + issuerKeyId = sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + default: + return nil, errors.StructuralError("non signature packet found") + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err + } + + for _, key := range keys { + switch sig := p.(type) { + case *packet.Signature: + err = key.PublicKey.VerifySignature(h, sig) + case *packet.SignatureV3: + err = key.PublicKey.VerifySignatureV3(h, sig) + default: + panic("unreachable") + } + + if err == nil { + return key.Entity, nil + } + } + + return nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body) +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go new file mode 100644 index 00000000000..4b9a44ca26d --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -0,0 +1,273 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +package s2k // import "golang.org/x/crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" +) + +// Config collects configuration parameters for s2k key-stretching +// transformatioms. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // Hash is the default hash function to be used. If + // nil, SHA1 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + // SHA1 is the historical default in this package. + return crypto.SHA1 + } + + return c.Hash +} + +func (c *Config) encodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 96 // The common case. Correspoding to 65536 + } + + i := c.S2KCount + switch { + // Behave like GPG. Should we make 65536 the lowest value used? + case i < 1024: + i = 1024 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 1024 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 0; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + hash, ok := HashIdToHash(buf[1]) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) + } + if !hash.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) + } + h := hash.New() + + switch buf[0] { + case 0: + f := func(out, in []byte) { + Simple(out, h, in) + } + return f, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return + } + f := func(out, in []byte) { + Salted(out, h, in, buf[:8]) + } + return f, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return + } + count := decodeCount(buf[8]) + f := func(out, in []byte) { + Iterated(out, h, in, buf[:8], count) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + var buf [11]byte + buf[0] = 3 /* iterated and salted */ + buf[1], _ = HashToHashId(c.hash()) + salt := buf[2:10] + if _, err := io.ReadFull(rand, salt); err != nil { + return err + } + encodedCount := c.encodedCount() + count := decodeCount(encodedCount) + buf[10] = encodedCount + if _, err := w.Write(buf[:]); err != nil { + return err + } + + Iterated(key, c.hash().New(), passphrase, salt, count) + return nil +} + +// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +var hashToHashIdMapping = []struct { + id byte + hash crypto.Hash + name string +}{ + {1, crypto.MD5, "MD5"}, + {2, crypto.SHA1, "SHA1"}, + {3, crypto.RIPEMD160, "RIPEMD160"}, + {8, crypto.SHA256, "SHA256"}, + {9, crypto.SHA384, "SHA384"}, + {10, crypto.SHA512, "SHA512"}, + {11, crypto.SHA224, "SHA224"}, +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.hash, true + } + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.name, true + } + } + + return "", false +} + +// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for _, m := range hashToHashIdMapping { + if m.hash == h { + return m.id, true + } + } + return 0, false +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/write.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/write.go new file mode 100644 index 00000000000..4ee71784ebe --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/crypto/openpgp/write.go @@ -0,0 +1,418 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" + "golang.org/x/crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &signer.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + io.Copy(wrappedHash, message) + + err = sig.Sign(h, signer.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + + literaldata := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literaldata, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.signingKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil + } + return literalData, nil +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + // In the event that a recipient doesn't specify any supported ciphers + // or hash functions, these are the ones that we assume that every + // implementation supports. + defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] + defaultHashes := candidateHashes[len(candidateHashes)-1:] + + encryptKeys := make([]Key, len(to)) + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].encryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].primaryIdentity().SelfSignature + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + } + + if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) + if err != nil { + return + } + + return writeAndSign(payload, candidateHashes, signed, hints, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + defaultHashes := candidateHashes[len(candidateHashes)-1:] + preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + h hash.Hash + signer *packet.PrivateKey + config *packet.Config +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.h.Write(data) + return s.literalData.Write(data) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + SigType: packet.SigTypeBinary, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/AUTHORS b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/AUTHORS new file mode 100644 index 00000000000..15167cd746c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/CONTRIBUTORS b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/CONTRIBUTORS new file mode 100644 index 00000000000..1c4577e9680 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/LICENSE b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/PATENTS b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/gen/code.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/gen/code.go new file mode 100644 index 00000000000..75435c9bdd0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/gen/code.go @@ -0,0 +1,375 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gen + +import ( + "bytes" + "encoding/gob" + "fmt" + "hash" + "hash/fnv" + "io" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// This file contains utilities for generating code. + +// TODO: other write methods like: +// - slices, maps, types, etc. + +// CodeWriter is a utility for writing structured code. It computes the content +// hash and size of written content. It ensures there are newlines between +// written code blocks. +type CodeWriter struct { + buf bytes.Buffer + Size int + Hash hash.Hash32 // content hash + gob *gob.Encoder + // For comments we skip the usual one-line separator if they are followed by + // a code block. + skipSep bool +} + +func (w *CodeWriter) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +// NewCodeWriter returns a new CodeWriter. +func NewCodeWriter() *CodeWriter { + h := fnv.New32() + return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} +} + +// WriteGoFile appends the buffer with the total size of all created structures +// and writes it as a Go file to the given file with the given package name. +func (w *CodeWriter) WriteGoFile(filename, pkg string) { + f, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer f.Close() + if _, err = w.WriteGo(f, pkg, ""); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteVersionedGoFile appends the buffer with the total size of all created +// structures and writes it as a Go file to the given file with the given +// package name and build tags for the current Unicode version, +func (w *CodeWriter) WriteVersionedGoFile(filename, pkg string) { + tags := buildTags() + if tags != "" { + pattern := fileToPattern(filename) + updateBuildTags(pattern) + filename = fmt.Sprintf(pattern, UnicodeVersion()) + } + f, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer f.Close() + if _, err = w.WriteGo(f, pkg, tags); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo appends the buffer with the total size of all created structures and +// writes it as a Go file to the given writer with the given package name. +func (w *CodeWriter) WriteGo(out io.Writer, pkg, tags string) (n int, err error) { + sz := w.Size + if sz > 0 { + w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) + } + defer w.buf.Reset() + return WriteGo(out, pkg, tags, w.buf.Bytes()) +} + +func (w *CodeWriter) printf(f string, x ...interface{}) { + fmt.Fprintf(w, f, x...) +} + +func (w *CodeWriter) insertSep() { + if w.skipSep { + w.skipSep = false + return + } + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n") +} + +// WriteComment writes a comment block. All line starts are prefixed with "//". +// Initial empty lines are gobbled. The indentation for the first line is +// stripped from consecutive lines. +func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { + s := fmt.Sprintf(comment, args...) + s = strings.Trim(s, "\n") + + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n// ") + w.skipSep = true + + // strip first indent level. + sep := "\n" + for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { + sep += s[:1] + } + + strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) + + w.printf("\n") +} + +func (w *CodeWriter) writeSizeInfo(size int) { + w.printf("// Size: %d bytes\n", size) +} + +// WriteConst writes a constant of the given name and value. +func (w *CodeWriter) WriteConst(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + + switch v.Type().Kind() { + case reflect.String: + w.printf("const %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + w.printf("\n") + default: + w.printf("const %s = %#v\n", name, x) + } +} + +// WriteVar writes a variable of the given name and value. +func (w *CodeWriter) WriteVar(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + oldSize := w.Size + sz := int(v.Type().Size()) + w.Size += sz + + switch v.Type().Kind() { + case reflect.String: + w.printf("var %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + case reflect.Struct: + w.gob.Encode(x) + fallthrough + case reflect.Slice, reflect.Array: + w.printf("var %s = ", name) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + default: + w.printf("var %s %s = ", name, typeName(x)) + w.gob.Encode(x) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + } + w.printf("\n") +} + +func (w *CodeWriter) writeValue(v reflect.Value) { + x := v.Interface() + switch v.Kind() { + case reflect.String: + w.WriteString(v.String()) + case reflect.Array: + // Don't double count: callers of WriteArray count on the size being + // added, so we need to discount it here. + w.Size -= int(v.Type().Size()) + w.writeSlice(x, true) + case reflect.Slice: + w.writeSlice(x, false) + case reflect.Struct: + w.printf("%s{\n", typeName(v.Interface())) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + w.printf("%s: ", t.Field(i).Name) + w.writeValue(v.Field(i)) + w.printf(",\n") + } + w.printf("}") + default: + w.printf("%#v", x) + } +} + +// WriteString writes a string literal. +func (w *CodeWriter) WriteString(s string) { + io.WriteString(w.Hash, s) // content hash + w.Size += len(s) + + const maxInline = 40 + if len(s) <= maxInline { + w.printf("%q", s) + return + } + + // We will render the string as a multi-line string. + const maxWidth = 80 - 4 - len(`"`) - len(`" +`) + + // When starting on its own line, go fmt indents line 2+ an extra level. + n, max := maxWidth, maxWidth-4 + + // As per https://golang.org/issue/18078, the compiler has trouble + // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, + // for large N. We insert redundant, explicit parentheses to work around + // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + + // ... + s127) + etc + (etc + ... + sN). + explicitParens, extraComment := len(s) > 128*1024, "" + if explicitParens { + w.printf(`(`) + extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" + } + + // Print "" +\n, if a string does not start on its own line. + b := w.buf.Bytes() + if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { + w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) + n, max = maxWidth, maxWidth + } + + w.printf(`"`) + + for sz, p, nLines := 0, 0, 0; p < len(s); { + var r rune + r, sz = utf8.DecodeRuneInString(s[p:]) + out := s[p : p+sz] + chars := 1 + if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { + switch sz { + case 1: + out = fmt.Sprintf("\\x%02x", s[p]) + case 2, 3: + out = fmt.Sprintf("\\u%04x", r) + case 4: + out = fmt.Sprintf("\\U%08x", r) + } + chars = len(out) + } else if r == '\\' { + out = "\\" + string(r) + chars = 2 + } + if n -= chars; n < 0 { + nLines++ + if explicitParens && nLines&63 == 63 { + w.printf("\") + (\"") + } + w.printf("\" +\n\"") + n = max - len(out) + } + w.printf("%s", out) + p += sz + } + w.printf(`"`) + if explicitParens { + w.printf(`)`) + } +} + +// WriteSlice writes a slice value. +func (w *CodeWriter) WriteSlice(x interface{}) { + w.writeSlice(x, false) +} + +// WriteArray writes an array value. +func (w *CodeWriter) WriteArray(x interface{}) { + w.writeSlice(x, true) +} + +func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { + v := reflect.ValueOf(x) + w.gob.Encode(v.Len()) + w.Size += v.Len() * int(v.Type().Elem().Size()) + name := typeName(x) + if isArray { + name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) + } + if isArray { + w.printf("%s{\n", name) + } else { + w.printf("%s{ // %d elements\n", name, v.Len()) + } + + switch kind := v.Type().Elem().Kind(); kind { + case reflect.String: + for _, s := range x.([]string) { + w.WriteString(s) + w.printf(",\n") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // nLine and nBlock are the number of elements per line and block. + nLine, nBlock, format := 8, 64, "%d," + switch kind { + case reflect.Uint8: + format = "%#02x," + case reflect.Uint16: + format = "%#04x," + case reflect.Uint32: + nLine, nBlock, format = 4, 32, "%#08x," + case reflect.Uint, reflect.Uint64: + nLine, nBlock, format = 4, 32, "%#016x," + case reflect.Int8: + nLine = 16 + } + n := nLine + for i := 0; i < v.Len(); i++ { + if i%nBlock == 0 && v.Len() > nBlock { + w.printf("// Entry %X - %X\n", i, i+nBlock-1) + } + x := v.Index(i).Interface() + w.gob.Encode(x) + w.printf(format, x) + if n--; n == 0 { + n = nLine + w.printf("\n") + } + } + w.printf("\n") + case reflect.Struct: + zero := reflect.Zero(v.Type().Elem()).Interface() + for i := 0; i < v.Len(); i++ { + x := v.Index(i).Interface() + w.gob.EncodeValue(v) + if !reflect.DeepEqual(zero, x) { + line := fmt.Sprintf("%#v,\n", x) + line = line[strings.IndexByte(line, '{'):] + w.printf("%d: ", i) + w.printf(line) + } + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + w.printf("%d: %#v,\n", i, v.Index(i).Interface()) + } + default: + panic("gen: slice elem type not supported") + } + w.printf("}") +} + +// WriteType writes a definition of the type of the given value and returns the +// type name. +func (w *CodeWriter) WriteType(x interface{}) string { + t := reflect.TypeOf(x) + w.printf("type %s struct {\n", t.Name()) + for i := 0; i < t.NumField(); i++ { + w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) + } + w.printf("}\n") + return t.Name() +} + +// typeName returns the name of the go type of x. +func typeName(x interface{}) string { + t := reflect.ValueOf(x).Type() + return strings.Replace(fmt.Sprint(t), "main.", "", 1) +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/gen/gen.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/gen/gen.go new file mode 100644 index 00000000000..cc6510fda29 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/gen/gen.go @@ -0,0 +1,347 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gen contains common code for the various code generation tools in the +// text repository. Its usage ensures consistency between tools. +// +// This package defines command line flags that are common to most generation +// tools. The flags allow for specifying specific Unicode and CLDR versions +// in the public Unicode data repository (https://www.unicode.org/Public). +// +// A local Unicode data mirror can be set through the flag -local or the +// environment variable UNICODE_DIR. The former takes precedence. The local +// directory should follow the same structure as the public repository. +// +// IANA data can also optionally be mirrored by putting it in the iana directory +// rooted at the top of the local mirror. Beware, though, that IANA data is not +// versioned. So it is up to the developer to use the right version. +package gen // import "golang.org/x/text/internal/gen" + +import ( + "bytes" + "flag" + "fmt" + "go/build" + "go/format" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "path/filepath" + "regexp" + "strings" + "sync" + "unicode" + + "golang.org/x/text/unicode/cldr" +) + +var ( + url = flag.String("url", + "https://www.unicode.org/Public", + "URL of Unicode database directory") + iana = flag.String("iana", + "http://www.iana.org", + "URL of the IANA repository") + unicodeVersion = flag.String("unicode", + getEnv("UNICODE_VERSION", unicode.Version), + "unicode version to use") + cldrVersion = flag.String("cldr", + getEnv("CLDR_VERSION", cldr.Version), + "cldr version to use") +) + +func getEnv(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +// Init performs common initialization for a gen command. It parses the flags +// and sets up the standard logging parameters. +func Init() { + log.SetPrefix("") + log.SetFlags(log.Lshortfile) + flag.Parse() +} + +const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +` + +// UnicodeVersion reports the requested Unicode version. +func UnicodeVersion() string { + return *unicodeVersion +} + +// CLDRVersion reports the requested CLDR version. +func CLDRVersion() string { + return *cldrVersion +} + +var tags = []struct{ version, buildTags string }{ + {"9.0.0", "!go1.10"}, + {"10.0.0", "go1.10,!go1.13"}, + {"11.0.0", "go1.13"}, +} + +// buildTags reports the build tags used for the current Unicode version. +func buildTags() string { + v := UnicodeVersion() + for _, e := range tags { + if e.version == v { + return e.buildTags + } + } + log.Fatalf("Unknown build tags for Unicode version %q.", v) + return "" +} + +// IsLocal reports whether data files are available locally. +func IsLocal() bool { + dir, err := localReadmeFile() + if err != nil { + return false + } + if _, err = os.Stat(dir); err != nil { + return false + } + return true +} + +// OpenUCDFile opens the requested UCD file. The file is specified relative to +// the public Unicode root directory. It will call log.Fatal if there are any +// errors. +func OpenUCDFile(file string) io.ReadCloser { + return openUnicode(path.Join(*unicodeVersion, "ucd", file)) +} + +// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there +// are any errors. +func OpenCLDRCoreZip() io.ReadCloser { + return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") +} + +// OpenUnicodeFile opens the requested file of the requested category from the +// root of the Unicode data archive. The file is specified relative to the +// public Unicode root directory. If version is "", it will use the default +// Unicode version. It will call log.Fatal if there are any errors. +func OpenUnicodeFile(category, version, file string) io.ReadCloser { + if version == "" { + version = UnicodeVersion() + } + return openUnicode(path.Join(category, version, file)) +} + +// OpenIANAFile opens the requested IANA file. The file is specified relative +// to the IANA root, which is typically either http://www.iana.org or the +// iana directory in the local mirror. It will call log.Fatal if there are any +// errors. +func OpenIANAFile(path string) io.ReadCloser { + return Open(*iana, "iana", path) +} + +var ( + dirMutex sync.Mutex + localDir string +) + +const permissions = 0755 + +func localReadmeFile() (string, error) { + p, err := build.Import("golang.org/x/text", "", build.FindOnly) + if err != nil { + return "", fmt.Errorf("Could not locate package: %v", err) + } + return filepath.Join(p.Dir, "DATA", "README"), nil +} + +func getLocalDir() string { + dirMutex.Lock() + defer dirMutex.Unlock() + + readme, err := localReadmeFile() + if err != nil { + log.Fatal(err) + } + dir := filepath.Dir(readme) + if _, err := os.Stat(readme); err != nil { + if err := os.MkdirAll(dir, permissions); err != nil { + log.Fatalf("Could not create directory: %v", err) + } + ioutil.WriteFile(readme, []byte(readmeTxt), permissions) + } + return dir +} + +const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. + +This directory contains downloaded files used to generate the various tables +in the golang.org/x/text subrepo. + +Note that the language subtag repo (iana/assignments/language-subtag-registry) +and all other times in the iana subdirectory are not versioned and will need +to be periodically manually updated. The easiest way to do this is to remove +the entire iana directory. This is mostly of concern when updating the language +package. +` + +// Open opens subdir/path if a local directory is specified and the file exists, +// where subdir is a directory relative to the local root, or fetches it from +// urlRoot/path otherwise. It will call log.Fatal if there are any errors. +func Open(urlRoot, subdir, path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) + return open(file, urlRoot, path) +} + +func openUnicode(path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) + return open(file, *url, path) +} + +// TODO: automatically periodically update non-versioned files. + +func open(file, urlRoot, path string) io.ReadCloser { + if f, err := os.Open(file); err == nil { + return f + } + r := get(urlRoot, path) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + log.Fatalf("Could not download file: %v", err) + } + os.MkdirAll(filepath.Dir(file), permissions) + if err := ioutil.WriteFile(file, b, permissions); err != nil { + log.Fatalf("Could not create file: %v", err) + } + return ioutil.NopCloser(bytes.NewReader(b)) +} + +func get(root, path string) io.ReadCloser { + url := root + "/" + path + fmt.Printf("Fetching %s...", url) + defer fmt.Println(" done.") + resp, err := http.Get(url) + if err != nil { + log.Fatalf("HTTP GET: %v", err) + } + if resp.StatusCode != 200 { + log.Fatalf("Bad GET status for %q: %q", url, resp.Status) + } + return resp.Body +} + +// TODO: use Write*Version in all applicable packages. + +// WriteUnicodeVersion writes a constant for the Unicode version from which the +// tables are generated. +func WriteUnicodeVersion(w io.Writer) { + fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) +} + +// WriteCLDRVersion writes a constant for the CLDR version from which the +// tables are generated. +func WriteCLDRVersion(w io.Writer) { + fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) +} + +// WriteGoFile prepends a standard file comment and package statement to the +// given bytes, applies gofmt, and writes them to a file with the given name. +// It will call log.Fatal if there are any errors. +func WriteGoFile(filename, pkg string, b []byte) { + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, "", b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +func fileToPattern(filename string) string { + suffix := ".go" + if strings.HasSuffix(filename, "_test.go") { + suffix = "_test.go" + } + prefix := filename[:len(filename)-len(suffix)] + return fmt.Sprint(prefix, "%s", suffix) +} + +func updateBuildTags(pattern string) { + for _, t := range tags { + oldFile := fmt.Sprintf(pattern, t.version) + b, err := ioutil.ReadFile(oldFile) + if err != nil { + continue + } + build := fmt.Sprintf("// +build %s", t.buildTags) + b = regexp.MustCompile(`// \+build .*`).ReplaceAll(b, []byte(build)) + err = ioutil.WriteFile(oldFile, b, 0644) + if err != nil { + log.Fatal(err) + } + } +} + +// WriteVersionedGoFile prepends a standard file comment, adds build tags to +// version the file for the current Unicode version, and package statement to +// the given bytes, applies gofmt, and writes them to a file with the given +// name. It will call log.Fatal if there are any errors. +func WriteVersionedGoFile(filename, pkg string, b []byte) { + pattern := fileToPattern(filename) + updateBuildTags(pattern) + filename = fmt.Sprintf(pattern, UnicodeVersion()) + + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, buildTags(), b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo prepends a standard file comment and package statement to the given +// bytes, applies gofmt, and writes them to w. +func WriteGo(w io.Writer, pkg, tags string, b []byte) (n int, err error) { + src := []byte(header) + if tags != "" { + src = append(src, fmt.Sprintf("// +build %s\n\n", tags)...) + } + src = append(src, fmt.Sprintf("package %s\n\n", pkg)...) + src = append(src, b...) + formatted, err := format.Source(src) + if err != nil { + // Print the generated code even in case of an error so that the + // returned error can be meaningfully interpreted. + n, _ = w.Write(src) + return n, err + } + return w.Write(formatted) +} + +// Repackage rewrites a Go file from belonging to package main to belonging to +// the given package. +func Repackage(inFile, outFile, pkg string) { + src, err := ioutil.ReadFile(inFile) + if err != nil { + log.Fatalf("reading %s: %v", inFile, err) + } + const toDelete = "package main\n\n" + i := bytes.Index(src, []byte(toDelete)) + if i < 0 { + log.Fatalf("Could not find %q in %s.", toDelete, inFile) + } + w := &bytes.Buffer{} + w.Write(src[i+len(toDelete):]) + WriteGoFile(outFile, pkg, w.Bytes()) +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/triegen/compact.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/triegen/compact.go new file mode 100644 index 00000000000..397b975c1b7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/triegen/compact.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +// This file defines Compacter and its implementations. + +import "io" + +// A Compacter generates an alternative, more space-efficient way to store a +// trie value block. A trie value block holds all possible values for the last +// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block +// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). +type Compacter interface { + // Size returns whether the Compacter could encode the given block as well + // as its size in case it can. len(v) is always 64. + Size(v []uint64) (sz int, ok bool) + + // Store stores the block using the Compacter's compression method. + // It returns a handle with which the block can be retrieved. + // len(v) is always 64. + Store(v []uint64) uint32 + + // Print writes the data structures associated to the given store to w. + Print(w io.Writer) error + + // Handler returns the name of a function that gets called during trie + // lookup for blocks generated by the Compacter. The function should be of + // the form func (n uint32, b byte) uint64, where n is the index returned by + // the Compacter's Store method and b is the last byte of the UTF-8 + // encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the + // block. + Handler() string +} + +// simpleCompacter is the default Compacter used by builder. It implements a +// normal trie block. +type simpleCompacter builder + +func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { + return blockSize * b.ValueSize, true +} + +func (b *simpleCompacter) Store(v []uint64) uint32 { + h := uint32(len(b.ValueBlocks) - blockOffset) + b.ValueBlocks = append(b.ValueBlocks, v) + return h +} + +func (b *simpleCompacter) Print(io.Writer) error { + // Structures are printed in print.go. + return nil +} + +func (b *simpleCompacter) Handler() string { + panic("Handler should be special-cased for this Compacter") +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/triegen/print.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/triegen/print.go new file mode 100644 index 00000000000..8d9f120bcdf --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/triegen/print.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// print writes all the data structures as well as the code necessary to use the +// trie to w. +func (b *builder) print(w io.Writer) error { + b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize + b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize + b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize + b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize + b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize + + // If we only have one root trie, all starter blocks are at position 0 and + // we can access the arrays directly. + if len(b.Trie) == 1 { + // At this point we cannot refer to the generated tables directly. + b.ASCIIBlock = b.Name + "Values" + b.StarterBlock = b.Name + "Index" + } else { + // Otherwise we need to have explicit starter indexes in the trie + // structure. + b.ASCIIBlock = "t.ascii" + b.StarterBlock = "t.utf8Start" + } + + b.SourceType = "[]byte" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + b.SourceType = "string" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + if err := trieGen.Execute(w, b); err != nil { + return err + } + + for _, c := range b.Compactions { + if err := c.c.Print(w); err != nil { + return err + } + } + + return nil +} + +func printValues(n int, values []uint64) string { + w := &bytes.Buffer{} + boff := n * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) + var newline bool + for i, v := range values { + if i%6 == 0 { + newline = true + } + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) + } + } + return w.String() +} + +func printIndex(b *builder, nr int, n *node) string { + w := &bytes.Buffer{} + boff := nr * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) + var newline bool + for i, c := range n.children { + if i%8 == 0 { + newline = true + } + if c != nil { + v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) + } + } + } + return w.String() +} + +var ( + trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ + "printValues": printValues, + "printIndex": printIndex, + "title": strings.Title, + "dec": func(x int) int { return x - 1 }, + "psize": func(n int) string { + return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) + }, + }).Parse(trieTemplate)) + lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) +) + +// TODO: consider the return type of lookup. It could be uint64, even if the +// internal value type is smaller. We will have to verify this with the +// performance of unicode/norm, which is very sensitive to such changes. +const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} +// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. +type {{.Name}}Trie struct { {{if $multi}} + ascii []{{.ValueType}} // index for ASCII bytes + utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 +{{end}}} + +func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} + h := {{.Name}}TrieHandles[i] + return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } +} + +type {{.Name}}TrieHandle struct { + ascii, multi {{.IndexType}} +} + +// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes +var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ +{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} +{{end}}}{{else}} + return &{{.Name}}Trie{} +} +{{end}} +// lookupValue determines the type of block n and looks up the value for b. +func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { + switch { {{range $i, $c := .Compactions}} + {{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} + n -= {{$c.Offset}}{{end}} + return {{print $b.ValueType}}({{$c.Handler}}){{end}} + } +} + +// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes +// The third block is the zero block. +var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { +{{range $i, $v := .ValueBlocks}}{{printValues $i $v}} +{{end}}} + +// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes +// Block 0 is the zero block. +var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { +{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} +{{end}}} +` + +// TODO: consider allowing zero-length strings after evaluating performance with +// unicode/norm. +const lookupTemplate = ` +// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return {{.ASCIIBlock}}[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = {{.Name}}Index[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return {{.ASCIIBlock}}[c0] + } + i := {{.StarterBlock}}[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} +` diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/triegen/triegen.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/triegen/triegen.go new file mode 100644 index 00000000000..51d218a30f0 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/triegen/triegen.go @@ -0,0 +1,494 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package triegen implements a code generator for a trie for associating +// unsigned integer values with UTF-8 encoded runes. +// +// Many of the go.text packages use tries for storing per-rune information. A +// trie is especially useful if many of the runes have the same value. If this +// is the case, many blocks can be expected to be shared allowing for +// information on many runes to be stored in little space. +// +// As most of the lookups are done directly on []byte slices, the tries use the +// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to +// runes and contributes a little bit to better performance. It also naturally +// provides a fast path for ASCII. +// +// Space is also an issue. There are many code points defined in Unicode and as +// a result tables can get quite large. So every byte counts. The triegen +// package automatically chooses the smallest integer values to represent the +// tables. Compacters allow further compression of the trie by allowing for +// alternative representations of individual trie blocks. +// +// triegen allows generating multiple tries as a single structure. This is +// useful when, for example, one wants to generate tries for several languages +// that have a lot of values in common. Some existing libraries for +// internationalization store all per-language data as a dynamically loadable +// chunk. The go.text packages are designed with the assumption that the user +// typically wants to compile in support for all supported languages, in line +// with the approach common to Go to create a single standalone binary. The +// multi-root trie approach can give significant storage savings in this +// scenario. +// +// triegen generates both tables and code. The code is optimized to use the +// automatically chosen data types. The following code is generated for a Trie +// or multiple Tries named "foo": +// - type fooTrie +// The trie type. +// +// - func newFooTrie(x int) *fooTrie +// Trie constructor, where x is the index of the trie passed to Gen. +// +// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int) +// The lookup method, where uintX is automatically chosen. +// +// - func lookupString, lookupUnsafe and lookupStringUnsafe +// Variants of the above. +// +// - var fooValues and fooIndex and any tables generated by Compacters. +// The core trie data. +// +// - var fooTrieHandles +// Indexes of starter blocks in case of multiple trie roots. +// +// It is recommended that users test the generated trie by checking the returned +// value for every rune. Such exhaustive tests are possible as the number of +// runes in Unicode is limited. +package triegen // import "golang.org/x/text/internal/triegen" + +// TODO: Arguably, the internally optimized data types would not have to be +// exposed in the generated API. We could also investigate not generating the +// code, but using it through a package. We would have to investigate the impact +// on performance of making such change, though. For packages like unicode/norm, +// small changes like this could tank performance. + +import ( + "encoding/binary" + "fmt" + "hash/crc64" + "io" + "log" + "unicode/utf8" +) + +// builder builds a set of tries for associating values with runes. The set of +// tries can share common index and value blocks. +type builder struct { + Name string + + // ValueType is the type of the trie values looked up. + ValueType string + + // ValueSize is the byte size of the ValueType. + ValueSize int + + // IndexType is the type of trie index values used for all UTF-8 bytes of + // a rune except the last one. + IndexType string + + // IndexSize is the byte size of the IndexType. + IndexSize int + + // SourceType is used when generating the lookup functions. If the user + // requests StringSupport, all lookup functions will be generated for + // string input as well. + SourceType string + + Trie []*Trie + + IndexBlocks []*node + ValueBlocks [][]uint64 + Compactions []compaction + Checksum uint64 + + ASCIIBlock string + StarterBlock string + + indexBlockIdx map[uint64]int + valueBlockIdx map[uint64]nodeIndex + asciiBlockIdx map[uint64]int + + // Stats are used to fill out the template. + Stats struct { + NValueEntries int + NValueBytes int + NIndexEntries int + NIndexBytes int + NHandleBytes int + } + + err error +} + +// A nodeIndex encodes the index of a node, which is defined by the compaction +// which stores it and an index within the compaction. For internal nodes, the +// compaction is always 0. +type nodeIndex struct { + compaction int + index int +} + +// compaction keeps track of stats used for the compaction. +type compaction struct { + c Compacter + blocks []*node + maxHandle uint32 + totalSize int + + // Used by template-based generator and thus exported. + Cutoff uint32 + Offset uint32 + Handler string +} + +func (b *builder) setError(err error) { + if b.err == nil { + b.err = err + } +} + +// An Option can be passed to Gen. +type Option func(b *builder) error + +// Compact configures the trie generator to use the given Compacter. +func Compact(c Compacter) Option { + return func(b *builder) error { + b.Compactions = append(b.Compactions, compaction{ + c: c, + Handler: c.Handler() + "(n, b)"}) + return nil + } +} + +// Gen writes Go code for a shared trie lookup structure to w for the given +// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will +// return the *nameTrie for tries[x]. A value can be looked up by using one of +// the various lookup methods defined on nameTrie. It returns the table size of +// the generated trie. +func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { + // The index contains two dummy blocks, followed by the zero block. The zero + // block is at offset 0x80, so that the offset for the zero block for + // continuation bytes is 0. + b := &builder{ + Name: name, + Trie: tries, + IndexBlocks: []*node{{}, {}, {}}, + Compactions: []compaction{{ + Handler: name + "Values[n<<6+uint32(b)]", + }}, + // The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero + // block. + indexBlockIdx: map[uint64]int{0: 0}, + valueBlockIdx: map[uint64]nodeIndex{0: {}}, + asciiBlockIdx: map[uint64]int{}, + } + b.Compactions[0].c = (*simpleCompacter)(b) + + for _, f := range opts { + if err := f(b); err != nil { + return 0, err + } + } + b.build() + if b.err != nil { + return 0, b.err + } + if err = b.print(w); err != nil { + return 0, err + } + return b.Size(), nil +} + +// A Trie represents a single root node of a trie. A builder may build several +// overlapping tries at once. +type Trie struct { + root *node + + hiddenTrie +} + +// hiddenTrie contains values we want to be visible to the template generator, +// but hidden from the API documentation. +type hiddenTrie struct { + Name string + Checksum uint64 + ASCIIIndex int + StarterIndex int +} + +// NewTrie returns a new trie root. +func NewTrie(name string) *Trie { + return &Trie{ + &node{ + children: make([]*node, blockSize), + values: make([]uint64, utf8.RuneSelf), + }, + hiddenTrie{Name: name}, + } +} + +// Gen is a convenience wrapper around the Gen func passing t as the only trie +// and uses the name passed to NewTrie. It returns the size of the generated +// tables. +func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { + return Gen(w, t.Name, []*Trie{t}, opts...) +} + +// node is a node of the intermediate trie structure. +type node struct { + // children holds this node's children. It is always of length 64. + // A child node may be nil. + children []*node + + // values contains the values of this node. If it is non-nil, this node is + // either a root or leaf node: + // For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. + // For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF]. + values []uint64 + + index nodeIndex +} + +// Insert associates value with the given rune. Insert will panic if a non-zero +// value is passed for an invalid rune. +func (t *Trie) Insert(r rune, value uint64) { + if value == 0 { + return + } + s := string(r) + if []rune(s)[0] != r && value != 0 { + // Note: The UCD tables will always assign what amounts to a zero value + // to a surrogate. Allowing a zero value for an illegal rune allows + // users to iterate over [0..MaxRune] without having to explicitly + // exclude surrogates, which would be tedious. + panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) + } + if len(s) == 1 { + // It is a root node value (ASCII). + t.root.values[s[0]] = value + return + } + + n := t.root + for ; len(s) > 1; s = s[1:] { + if n.children == nil { + n.children = make([]*node, blockSize) + } + p := s[0] % blockSize + c := n.children[p] + if c == nil { + c = &node{} + n.children[p] = c + } + if len(s) > 2 && c.values != nil { + log.Fatalf("triegen: insert(%U): found internal node with values", r) + } + n = c + } + if n.values == nil { + n.values = make([]uint64, blockSize) + } + if n.children != nil { + log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) + } + n.values[s[0]-0x80] = value +} + +// Size returns the number of bytes the generated trie will take to store. It +// needs to be exported as it is used in the templates. +func (b *builder) Size() int { + // Index blocks. + sz := len(b.IndexBlocks) * blockSize * b.IndexSize + + // Skip the first compaction, which represents the normal value blocks, as + // its totalSize does not account for the ASCII blocks, which are managed + // separately. + sz += len(b.ValueBlocks) * blockSize * b.ValueSize + for _, c := range b.Compactions[1:] { + sz += c.totalSize + } + + // TODO: this computation does not account for the fixed overhead of a using + // a compaction, either code or data. As for data, though, the typical + // overhead of data is in the order of bytes (2 bytes for cases). Further, + // the savings of using a compaction should anyway be substantial for it to + // be worth it. + + // For multi-root tries, we also need to account for the handles. + if len(b.Trie) > 1 { + sz += 2 * b.IndexSize * len(b.Trie) + } + return sz +} + +func (b *builder) build() { + // Compute the sizes of the values. + var vmax uint64 + for _, t := range b.Trie { + vmax = maxValue(t.root, vmax) + } + b.ValueType, b.ValueSize = getIntType(vmax) + + // Compute all block allocations. + // TODO: first compute the ASCII blocks for all tries and then the other + // nodes. ASCII blocks are more restricted in placement, as they require two + // blocks to be placed consecutively. Processing them first may improve + // sharing (at least one zero block can be expected to be saved.) + for _, t := range b.Trie { + b.Checksum += b.buildTrie(t) + } + + // Compute the offsets for all the Compacters. + offset := uint32(0) + for i := range b.Compactions { + c := &b.Compactions[i] + c.Offset = offset + offset += c.maxHandle + 1 + c.Cutoff = offset + } + + // Compute the sizes of indexes. + // TODO: different byte positions could have different sizes. So far we have + // not found a case where this is beneficial. + imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) + for _, ib := range b.IndexBlocks { + if x := uint64(ib.index.index); x > imax { + imax = x + } + } + b.IndexType, b.IndexSize = getIntType(imax) +} + +func maxValue(n *node, max uint64) uint64 { + if n == nil { + return max + } + for _, c := range n.children { + max = maxValue(c, max) + } + for _, v := range n.values { + if max < v { + max = v + } + } + return max +} + +func getIntType(v uint64) (string, int) { + switch { + case v < 1<<8: + return "uint8", 1 + case v < 1<<16: + return "uint16", 2 + case v < 1<<32: + return "uint32", 4 + } + return "uint64", 8 +} + +const ( + blockSize = 64 + + // Subtract two blocks to offset 0x80, the first continuation byte. + blockOffset = 2 + + // Subtract three blocks to offset 0xC0, the first non-ASCII starter. + rootBlockOffset = 3 +) + +var crcTable = crc64.MakeTable(crc64.ISO) + +func (b *builder) buildTrie(t *Trie) uint64 { + n := t.root + + // Get the ASCII offset. For the first trie, the ASCII block will be at + // position 0. + hasher := crc64.New(crcTable) + binary.Write(hasher, binary.BigEndian, n.values) + hash := hasher.Sum64() + + v, ok := b.asciiBlockIdx[hash] + if !ok { + v = len(b.ValueBlocks) + b.asciiBlockIdx[hash] = v + + b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) + if v == 0 { + // Add the zero block at position 2 so that it will be assigned a + // zero reference in the lookup blocks. + // TODO: always do this? This would allow us to remove a check from + // the trie lookup, but at the expense of extra space. Analyze + // performance for unicode/norm. + b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) + } + } + t.ASCIIIndex = v + + // Compute remaining offsets. + t.Checksum = b.computeOffsets(n, true) + // We already subtracted the normal blockOffset from the index. Subtract the + // difference for starter bytes. + t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) + return t.Checksum +} + +func (b *builder) computeOffsets(n *node, root bool) uint64 { + // For the first trie, the root lookup block will be at position 3, which is + // the offset for UTF-8 non-ASCII starter bytes. + first := len(b.IndexBlocks) == rootBlockOffset + if first { + b.IndexBlocks = append(b.IndexBlocks, n) + } + + // We special-case the cases where all values recursively are 0. This allows + // for the use of a zero block to which all such values can be directed. + hash := uint64(0) + if n.children != nil || n.values != nil { + hasher := crc64.New(crcTable) + for _, c := range n.children { + var v uint64 + if c != nil { + v = b.computeOffsets(c, false) + } + binary.Write(hasher, binary.BigEndian, v) + } + binary.Write(hasher, binary.BigEndian, n.values) + hash = hasher.Sum64() + } + + if first { + b.indexBlockIdx[hash] = rootBlockOffset - blockOffset + } + + // Compacters don't apply to internal nodes. + if n.children != nil { + v, ok := b.indexBlockIdx[hash] + if !ok { + v = len(b.IndexBlocks) - blockOffset + b.IndexBlocks = append(b.IndexBlocks, n) + b.indexBlockIdx[hash] = v + } + n.index = nodeIndex{0, v} + } else { + h, ok := b.valueBlockIdx[hash] + if !ok { + bestI, bestSize := 0, blockSize*b.ValueSize + for i, c := range b.Compactions[1:] { + if sz, ok := c.c.Size(n.values); ok && bestSize > sz { + bestI, bestSize = i+1, sz + } + } + c := &b.Compactions[bestI] + c.totalSize += bestSize + v := c.c.Store(n.values) + if c.maxHandle < v { + c.maxHandle = v + } + h = nodeIndex{bestI, int(v)} + b.valueBlockIdx[hash] = h + } + n.index = h + } + return hash +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/ucd/ucd.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/ucd/ucd.go new file mode 100644 index 00000000000..0879bc84c87 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/internal/ucd/ucd.go @@ -0,0 +1,371 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ucd provides a parser for Unicode Character Database files, the +// format of which is defined in https://www.unicode.org/reports/tr44/. See +// https://www.unicode.org/Public/UCD/latest/ucd/ for example files. +// +// It currently does not support substitutions of missing fields. +package ucd // import "golang.org/x/text/internal/ucd" + +import ( + "bufio" + "errors" + "fmt" + "io" + "log" + "regexp" + "strconv" + "strings" +) + +// UnicodeData.txt fields. +const ( + CodePoint = iota + Name + GeneralCategory + CanonicalCombiningClass + BidiClass + DecompMapping + DecimalValue + DigitValue + NumericValue + BidiMirrored + Unicode1Name + ISOComment + SimpleUppercaseMapping + SimpleLowercaseMapping + SimpleTitlecaseMapping +) + +// Parse calls f for each entry in the given reader of a UCD file. It will close +// the reader upon return. It will call log.Fatal if any error occurred. +// +// This implements the most common usage pattern of using Parser. +func Parse(r io.ReadCloser, f func(p *Parser)) { + defer r.Close() + + p := New(r) + for p.Next() { + f(p) + } + if err := p.Err(); err != nil { + r.Close() // os.Exit will cause defers not to be called. + log.Fatal(err) + } +} + +// An Option is used to configure a Parser. +type Option func(p *Parser) + +func keepRanges(p *Parser) { + p.keepRanges = true +} + +var ( + // KeepRanges prevents the expansion of ranges. The raw ranges can be + // obtained by calling Range(0) on the parser. + KeepRanges Option = keepRanges +) + +// The Part option register a handler for lines starting with a '@'. The text +// after a '@' is available as the first field. Comments are handled as usual. +func Part(f func(p *Parser)) Option { + return func(p *Parser) { + p.partHandler = f + } +} + +// The CommentHandler option passes comments that are on a line by itself to +// a given handler. +func CommentHandler(f func(s string)) Option { + return func(p *Parser) { + p.commentHandler = f + } +} + +// A Parser parses Unicode Character Database (UCD) files. +type Parser struct { + scanner *bufio.Scanner + + keepRanges bool // Don't expand rune ranges in field 0. + + err error + comment string + field []string + // parsedRange is needed in case Range(0) is called more than once for one + // field. In some cases this requires scanning ahead. + line int + parsedRange bool + rangeStart, rangeEnd rune + + partHandler func(p *Parser) + commentHandler func(s string) +} + +func (p *Parser) setError(err error, msg string) { + if p.err == nil && err != nil { + if msg == "" { + p.err = fmt.Errorf("ucd:line:%d: %v", p.line, err) + } else { + p.err = fmt.Errorf("ucd:line:%d:%s: %v", p.line, msg, err) + } + } +} + +func (p *Parser) getField(i int) string { + if i >= len(p.field) { + return "" + } + return p.field[i] +} + +// Err returns a non-nil error if any error occurred during parsing. +func (p *Parser) Err() error { + return p.err +} + +// New returns a Parser for the given Reader. +func New(r io.Reader, o ...Option) *Parser { + p := &Parser{ + scanner: bufio.NewScanner(r), + } + for _, f := range o { + f(p) + } + return p +} + +// Next parses the next line in the file. It returns true if a line was parsed +// and false if it reached the end of the file. +func (p *Parser) Next() bool { + if !p.keepRanges && p.rangeStart < p.rangeEnd { + p.rangeStart++ + return true + } + p.comment = "" + p.field = p.field[:0] + p.parsedRange = false + + for p.scanner.Scan() && p.err == nil { + p.line++ + s := p.scanner.Text() + if s == "" { + continue + } + if s[0] == '#' { + if p.commentHandler != nil { + p.commentHandler(strings.TrimSpace(s[1:])) + } + continue + } + + // Parse line + if i := strings.IndexByte(s, '#'); i != -1 { + p.comment = strings.TrimSpace(s[i+1:]) + s = s[:i] + } + if s[0] == '@' { + if p.partHandler != nil { + p.field = append(p.field, strings.TrimSpace(s[1:])) + p.partHandler(p) + p.field = p.field[:0] + } + p.comment = "" + continue + } + for { + i := strings.IndexByte(s, ';') + if i == -1 { + p.field = append(p.field, strings.TrimSpace(s)) + break + } + p.field = append(p.field, strings.TrimSpace(s[:i])) + s = s[i+1:] + } + if !p.keepRanges { + p.rangeStart, p.rangeEnd = p.getRange(0) + } + return true + } + p.setError(p.scanner.Err(), "scanner failed") + return false +} + +func parseRune(b string) (rune, error) { + if len(b) > 2 && b[0] == 'U' && b[1] == '+' { + b = b[2:] + } + x, err := strconv.ParseUint(b, 16, 32) + return rune(x), err +} + +func (p *Parser) parseRune(s string) rune { + x, err := parseRune(s) + p.setError(err, "failed to parse rune") + return x +} + +// Rune parses and returns field i as a rune. +func (p *Parser) Rune(i int) rune { + if i > 0 || p.keepRanges { + return p.parseRune(p.getField(i)) + } + return p.rangeStart +} + +// Runes interprets and returns field i as a sequence of runes. +func (p *Parser) Runes(i int) (runes []rune) { + add := func(s string) { + if s = strings.TrimSpace(s); len(s) > 0 { + runes = append(runes, p.parseRune(s)) + } + } + for b := p.getField(i); ; { + i := strings.IndexByte(b, ' ') + if i == -1 { + add(b) + break + } + add(b[:i]) + b = b[i+1:] + } + return +} + +var ( + errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") + + // reRange matches one line of a legacy rune range. + reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") +) + +// Range parses and returns field i as a rune range. A range is inclusive at +// both ends. If the field only has one rune, first and last will be identical. +// It supports the legacy format for ranges used in UnicodeData.txt. +func (p *Parser) Range(i int) (first, last rune) { + if !p.keepRanges { + return p.rangeStart, p.rangeStart + } + return p.getRange(i) +} + +func (p *Parser) getRange(i int) (first, last rune) { + b := p.getField(i) + if k := strings.Index(b, ".."); k != -1 { + return p.parseRune(b[:k]), p.parseRune(b[k+2:]) + } + // The first field may not be a rune, in which case we may ignore any error + // and set the range as 0..0. + x, err := parseRune(b) + if err != nil { + // Disable range parsing henceforth. This ensures that an error will be + // returned if the user subsequently will try to parse this field as + // a Rune. + p.keepRanges = true + } + // Special case for UnicodeData that was retained for backwards compatibility. + if i == 0 && len(p.field) > 1 && strings.HasSuffix(p.field[1], "First>") { + if p.parsedRange { + return p.rangeStart, p.rangeEnd + } + mf := reRange.FindStringSubmatch(p.scanner.Text()) + p.line++ + if mf == nil || !p.scanner.Scan() { + p.setError(errIncorrectLegacyRange, "") + return x, x + } + // Using Bytes would be more efficient here, but Text is a lot easier + // and this is not a frequent case. + ml := reRange.FindStringSubmatch(p.scanner.Text()) + if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { + p.setError(errIncorrectLegacyRange, "") + return x, x + } + p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Text()[:len(ml[1])]) + p.parsedRange = true + return p.rangeStart, p.rangeEnd + } + return x, x +} + +// bools recognizes all valid UCD boolean values. +var bools = map[string]bool{ + "": false, + "N": false, + "No": false, + "F": false, + "False": false, + "Y": true, + "Yes": true, + "T": true, + "True": true, +} + +// Bool parses and returns field i as a boolean value. +func (p *Parser) Bool(i int) bool { + f := p.getField(i) + for s, v := range bools { + if f == s { + return v + } + } + p.setError(strconv.ErrSyntax, "error parsing bool") + return false +} + +// Int parses and returns field i as an integer value. +func (p *Parser) Int(i int) int { + x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) + p.setError(err, "error parsing int") + return int(x) +} + +// Uint parses and returns field i as an unsigned integer value. +func (p *Parser) Uint(i int) uint { + x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) + p.setError(err, "error parsing uint") + return uint(x) +} + +// Float parses and returns field i as a decimal value. +func (p *Parser) Float(i int) float64 { + x, err := strconv.ParseFloat(string(p.getField(i)), 64) + p.setError(err, "error parsing float") + return x +} + +// String parses and returns field i as a string value. +func (p *Parser) String(i int) string { + return string(p.getField(i)) +} + +// Strings parses and returns field i as a space-separated list of strings. +func (p *Parser) Strings(i int) []string { + ss := strings.Split(string(p.getField(i)), " ") + for i, s := range ss { + ss[i] = strings.TrimSpace(s) + } + return ss +} + +// Comment returns the comments for the current line. +func (p *Parser) Comment() string { + return string(p.comment) +} + +var errUndefinedEnum = errors.New("ucd: undefined enum value") + +// Enum interprets and returns field i as a value that must be one of the values +// in enum. +func (p *Parser) Enum(i int, enum ...string) string { + f := p.getField(i) + for _, s := range enum { + if f == s { + return s + } + } + p.setError(errUndefinedEnum, "error parsing enum") + return "" +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/transform/transform.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/transform/transform.go new file mode 100644 index 00000000000..520b9ada0e2 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/transform/transform.go @@ -0,0 +1,705 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transform provides reader and writer wrappers that transform the +// bytes passing through as well as various transformations. Example +// transformations provided by other packages include normalization and +// conversion between character sets. +package transform // import "golang.org/x/text/transform" + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +var ( + // ErrShortDst means that the destination buffer was too short to + // receive all of the transformed bytes. + ErrShortDst = errors.New("transform: short destination buffer") + + // ErrShortSrc means that the source buffer has insufficient data to + // complete the transformation. + ErrShortSrc = errors.New("transform: short source buffer") + + // ErrEndOfSpan means that the input and output (the transformed input) + // are not identical. + ErrEndOfSpan = errors.New("transform: input and output are not identical") + + // errInconsistentByteCount means that Transform returned success (nil + // error) but also returned nSrc inconsistent with the src argument. + errInconsistentByteCount = errors.New("transform: inconsistent byte count returned") + + // errShortInternal means that an internal buffer is not large enough + // to make progress and the Transform operation must be aborted. + errShortInternal = errors.New("transform: short internal buffer") +) + +// Transformer transforms bytes. +type Transformer interface { + // Transform writes to dst the transformed bytes read from src, and + // returns the number of dst bytes written and src bytes read. The + // atEOF argument tells whether src represents the last bytes of the + // input. + // + // Callers should always process the nDst bytes produced and account + // for the nSrc bytes consumed before considering the error err. + // + // A nil error means that all of the transformed bytes (whether freshly + // transformed from src or left over from previous Transform calls) + // were written to dst. A nil error can be returned regardless of + // whether atEOF is true. If err is nil then nSrc must equal len(src); + // the converse is not necessarily true. + // + // ErrShortDst means that dst was too short to receive all of the + // transformed bytes. ErrShortSrc means that src had insufficient data + // to complete the transformation. If both conditions apply, then + // either error may be returned. Other than the error conditions listed + // here, implementations are free to report other errors that arise. + Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) + + // Reset resets the state and allows a Transformer to be reused. + Reset() +} + +// SpanningTransformer extends the Transformer interface with a Span method +// that determines how much of the input already conforms to the Transformer. +type SpanningTransformer interface { + Transformer + + // Span returns a position in src such that transforming src[:n] results in + // identical output src[:n] for these bytes. It does not necessarily return + // the largest such n. The atEOF argument tells whether src represents the + // last bytes of the input. + // + // Callers should always account for the n bytes consumed before + // considering the error err. + // + // A nil error means that all input bytes are known to be identical to the + // output produced by the Transformer. A nil error can be returned + // regardless of whether atEOF is true. If err is nil, then n must + // equal len(src); the converse is not necessarily true. + // + // ErrEndOfSpan means that the Transformer output may differ from the + // input after n bytes. Note that n may be len(src), meaning that the output + // would contain additional bytes after otherwise identical output. + // ErrShortSrc means that src had insufficient data to determine whether the + // remaining bytes would change. Other than the error conditions listed + // here, implementations are free to report other errors that arise. + // + // Calling Span can modify the Transformer state as a side effect. In + // effect, it does the transformation just as calling Transform would, only + // without copying to a destination buffer and only up to a point it can + // determine the input and output bytes are the same. This is obviously more + // limited than calling Transform, but can be more efficient in terms of + // copying and allocating buffers. Calls to Span and Transform may be + // interleaved. + Span(src []byte, atEOF bool) (n int, err error) +} + +// NopResetter can be embedded by implementations of Transformer to add a nop +// Reset method. +type NopResetter struct{} + +// Reset implements the Reset method of the Transformer interface. +func (NopResetter) Reset() {} + +// Reader wraps another io.Reader by transforming the bytes read. +type Reader struct { + r io.Reader + t Transformer + err error + + // dst[dst0:dst1] contains bytes that have been transformed by t but + // not yet copied out via Read. + dst []byte + dst0, dst1 int + + // src[src0:src1] contains bytes that have been read from r but not + // yet transformed through t. + src []byte + src0, src1 int + + // transformComplete is whether the transformation is complete, + // regardless of whether or not it was successful. + transformComplete bool +} + +const defaultBufSize = 4096 + +// NewReader returns a new Reader that wraps r by transforming the bytes read +// via t. It calls Reset on t. +func NewReader(r io.Reader, t Transformer) *Reader { + t.Reset() + return &Reader{ + r: r, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Read implements the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + n, err := 0, error(nil) + for { + // Copy out any transformed bytes and return the final error if we are done. + if r.dst0 != r.dst1 { + n = copy(p, r.dst[r.dst0:r.dst1]) + r.dst0 += n + if r.dst0 == r.dst1 && r.transformComplete { + return n, r.err + } + return n, nil + } else if r.transformComplete { + return 0, r.err + } + + // Try to transform some source bytes, or to flush the transformer if we + // are out of source bytes. We do this even if r.r.Read returned an error. + // As the io.Reader documentation says, "process the n > 0 bytes returned + // before considering the error". + if r.src0 != r.src1 || r.err != nil { + r.dst0 = 0 + r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF) + r.src0 += n + + switch { + case err == nil: + if r.src0 != r.src1 { + r.err = errInconsistentByteCount + } + // The Transform call was successful; we are complete if we + // cannot read more bytes into src. + r.transformComplete = r.err != nil + continue + case err == ErrShortDst && (r.dst1 != 0 || n != 0): + // Make room in dst by copying out, and try again. + continue + case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil: + // Read more bytes into src via the code below, and try again. + default: + r.transformComplete = true + // The reader error (r.err) takes precedence over the + // transformer error (err) unless r.err is nil or io.EOF. + if r.err == nil || r.err == io.EOF { + r.err = err + } + continue + } + } + + // Move any untransformed source bytes to the start of the buffer + // and read more bytes. + if r.src0 != 0 { + r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1]) + } + n, r.err = r.r.Read(r.src[r.src1:]) + r.src1 += n + } +} + +// TODO: implement ReadByte (and ReadRune??). + +// Writer wraps another io.Writer by transforming the bytes read. +// The user needs to call Close to flush unwritten bytes that may +// be buffered. +type Writer struct { + w io.Writer + t Transformer + dst []byte + + // src[:n] contains bytes that have not yet passed through t. + src []byte + n int +} + +// NewWriter returns a new Writer that wraps w by transforming the bytes written +// via t. It calls Reset on t. +func NewWriter(w io.Writer, t Transformer) *Writer { + t.Reset() + return &Writer{ + w: w, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Write implements the io.Writer interface. If there are not enough +// bytes available to complete a Transform, the bytes will be buffered +// for the next write. Call Close to convert the remaining bytes. +func (w *Writer) Write(data []byte) (n int, err error) { + src := data + if w.n > 0 { + // Append bytes from data to the last remainder. + // TODO: limit the amount copied on first try. + n = copy(w.src[w.n:], data) + w.n += n + src = w.src[:w.n] + } + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, false) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return n, werr + } + src = src[nSrc:] + if w.n == 0 { + n += nSrc + } else if len(src) <= n { + // Enough bytes from w.src have been consumed. We make src point + // to data instead to reduce the copying. + w.n = 0 + n -= len(src) + src = data[n:] + if n < len(data) && (err == nil || err == ErrShortSrc) { + continue + } + } + switch err { + case ErrShortDst: + // This error is okay as long as we are making progress. + if nDst > 0 || nSrc > 0 { + continue + } + case ErrShortSrc: + if len(src) < len(w.src) { + m := copy(w.src, src) + // If w.n > 0, bytes from data were already copied to w.src and n + // was already set to the number of bytes consumed. + if w.n == 0 { + n += m + } + w.n = m + err = nil + } else if nDst > 0 || nSrc > 0 { + // Not enough buffer to store the remainder. Keep processing as + // long as there is progress. Without this case, transforms that + // require a lookahead larger than the buffer may result in an + // error. This is not something one may expect to be common in + // practice, but it may occur when buffers are set to small + // sizes during testing. + continue + } + case nil: + if w.n > 0 { + err = errInconsistentByteCount + } + } + return n, err + } +} + +// Close implements the io.Closer interface. +func (w *Writer) Close() error { + src := w.src[:w.n] + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, true) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return werr + } + if err != ErrShortDst { + return err + } + src = src[nSrc:] + } +} + +type nop struct{ NopResetter } + +func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := copy(dst, src) + if n < len(src) { + err = ErrShortDst + } + return n, n, err +} + +func (nop) Span(src []byte, atEOF bool) (n int, err error) { + return len(src), nil +} + +type discard struct{ NopResetter } + +func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return 0, len(src), nil +} + +var ( + // Discard is a Transformer for which all Transform calls succeed + // by consuming all bytes and writing nothing. + Discard Transformer = discard{} + + // Nop is a SpanningTransformer that copies src to dst. + Nop SpanningTransformer = nop{} +) + +// chain is a sequence of links. A chain with N Transformers has N+1 links and +// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst +// buffers given to chain.Transform and the middle N-1 buffers are intermediate +// buffers owned by the chain. The i'th link transforms bytes from the i'th +// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer +// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N). +type chain struct { + link []link + err error + // errStart is the index at which the error occurred plus 1. Processing + // errStart at this level at the next call to Transform. As long as + // errStart > 0, chain will not consume any more source bytes. + errStart int +} + +func (c *chain) fatalError(errIndex int, err error) { + if i := errIndex + 1; i > c.errStart { + c.errStart = i + c.err = err + } +} + +type link struct { + t Transformer + // b[p:n] holds the bytes to be transformed by t. + b []byte + p int + n int +} + +func (l *link) src() []byte { + return l.b[l.p:l.n] +} + +func (l *link) dst() []byte { + return l.b[l.n:] +} + +// Chain returns a Transformer that applies t in sequence. +func Chain(t ...Transformer) Transformer { + if len(t) == 0 { + return nop{} + } + c := &chain{link: make([]link, len(t)+1)} + for i, tt := range t { + c.link[i].t = tt + } + // Allocate intermediate buffers. + b := make([][defaultBufSize]byte, len(t)-1) + for i := range b { + c.link[i+1].b = b[i][:] + } + return c +} + +// Reset resets the state of Chain. It calls Reset on all the Transformers. +func (c *chain) Reset() { + for i, l := range c.link { + if l.t != nil { + l.t.Reset() + } + c.link[i].p, c.link[i].n = 0, 0 + } +} + +// TODO: make chain use Span (is going to be fun to implement!) + +// Transform applies the transformers of c in sequence. +func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // Set up src and dst in the chain. + srcL := &c.link[0] + dstL := &c.link[len(c.link)-1] + srcL.b, srcL.p, srcL.n = src, 0, len(src) + dstL.b, dstL.n = dst, 0 + var lastFull, needProgress bool // for detecting progress + + // i is the index of the next Transformer to apply, for i in [low, high]. + // low is the lowest index for which c.link[low] may still produce bytes. + // high is the highest index for which c.link[high] has a Transformer. + // The error returned by Transform determines whether to increase or + // decrease i. We try to completely fill a buffer before converting it. + for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; { + in, out := &c.link[i], &c.link[i+1] + nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i) + out.n += nDst + in.p += nSrc + if i > 0 && in.p == in.n { + in.p, in.n = 0, 0 + } + needProgress, lastFull = lastFull, false + switch err0 { + case ErrShortDst: + // Process the destination buffer next. Return if we are already + // at the high index. + if i == high { + return dstL.n, srcL.p, ErrShortDst + } + if out.n != 0 { + i++ + // If the Transformer at the next index is not able to process any + // source bytes there is nothing that can be done to make progress + // and the bytes will remain unprocessed. lastFull is used to + // detect this and break out of the loop with a fatal error. + lastFull = true + continue + } + // The destination buffer was too small, but is completely empty. + // Return a fatal error as this transformation can never complete. + c.fatalError(i, errShortInternal) + case ErrShortSrc: + if i == 0 { + // Save ErrShortSrc in err. All other errors take precedence. + err = ErrShortSrc + break + } + // Source bytes were depleted before filling up the destination buffer. + // Verify we made some progress, move the remaining bytes to the errStart + // and try to get more source bytes. + if needProgress && nSrc == 0 || in.n-in.p == len(in.b) { + // There were not enough source bytes to proceed while the source + // buffer cannot hold any more bytes. Return a fatal error as this + // transformation can never complete. + c.fatalError(i, errShortInternal) + break + } + // in.b is an internal buffer and we can make progress. + in.p, in.n = 0, copy(in.b, in.src()) + fallthrough + case nil: + // if i == low, we have depleted the bytes at index i or any lower levels. + // In that case we increase low and i. In all other cases we decrease i to + // fetch more bytes before proceeding to the next index. + if i > low { + i-- + continue + } + default: + c.fatalError(i, err0) + } + // Exhausted level low or fatal error: increase low and continue + // to process the bytes accepted so far. + i++ + low = i + } + + // If c.errStart > 0, this means we found a fatal error. We will clear + // all upstream buffers. At this point, no more progress can be made + // downstream, as Transform would have bailed while handling ErrShortDst. + if c.errStart > 0 { + for i := 1; i < c.errStart; i++ { + c.link[i].p, c.link[i].n = 0, 0 + } + err, c.errStart, c.err = c.err, 0, nil + } + return dstL.n, srcL.p, err +} + +// Deprecated: Use runes.Remove instead. +func RemoveFunc(f func(r rune) bool) Transformer { + return removeF(f) +} + +type removeF func(r rune) bool + +func (removeF) Reset() {} + +// Transform implements the Transformer interface. +func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] { + + if r = rune(src[0]); r < utf8.RuneSelf { + sz = 1 + } else { + r, sz = utf8.DecodeRune(src) + + if sz == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src) { + err = ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(r) { + if nDst+3 > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], "\uFFFD") + } + nSrc++ + continue + } + } + + if !t(r) { + if nDst+sz > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], src[:sz]) + } + nSrc += sz + } + return +} + +// grow returns a new []byte that is longer than b, and copies the first n bytes +// of b to the start of the new slice. +func grow(b []byte, n int) []byte { + m := len(b) + if m <= 32 { + m = 64 + } else if m <= 256 { + m *= 2 + } else { + m += m >> 1 + } + buf := make([]byte, m) + copy(buf, b[:n]) + return buf +} + +const initialBufSize = 128 + +// String returns a string with the result of converting s[:n] using t, where +// n <= len(s). If err == nil, n will be len(s). It calls Reset on t. +func String(t Transformer, s string) (result string, n int, err error) { + t.Reset() + if s == "" { + // Fast path for the common case for empty input. Results in about a + // 86% reduction of running time for BenchmarkStringLowerEmpty. + if _, _, err := t.Transform(nil, nil, true); err == nil { + return "", 0, nil + } + } + + // Allocate only once. Note that both dst and src escape when passed to + // Transform. + buf := [2 * initialBufSize]byte{} + dst := buf[:initialBufSize:initialBufSize] + src := buf[initialBufSize : 2*initialBufSize] + + // The input string s is transformed in multiple chunks (starting with a + // chunk size of initialBufSize). nDst and nSrc are per-chunk (or + // per-Transform-call) indexes, pDst and pSrc are overall indexes. + nDst, nSrc := 0, 0 + pDst, pSrc := 0, 0 + + // pPrefix is the length of a common prefix: the first pPrefix bytes of the + // result will equal the first pPrefix bytes of s. It is not guaranteed to + // be the largest such value, but if pPrefix, len(result) and len(s) are + // all equal after the final transform (i.e. calling Transform with atEOF + // being true returned nil error) then we don't need to allocate a new + // result string. + pPrefix := 0 + for { + // Invariant: pDst == pPrefix && pSrc == pPrefix. + + n := copy(src, s[pSrc:]) + nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // TODO: let transformers implement an optional Spanner interface, akin + // to norm's QuickSpan. This would even allow us to avoid any allocation. + if !bytes.Equal(dst[:nDst], src[:nSrc]) { + break + } + pPrefix = pSrc + if err == ErrShortDst { + // A buffer can only be short if a transformer modifies its input. + break + } else if err == ErrShortSrc { + if nSrc == 0 { + // No progress was made. + break + } + // Equal so far and !atEOF, so continue checking. + } else if err != nil || pPrefix == len(s) { + return string(s[:pPrefix]), pPrefix, err + } + } + // Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc. + + // We have transformed the first pSrc bytes of the input s to become pDst + // transformed bytes. Those transformed bytes are discontiguous: the first + // pPrefix of them equal s[:pPrefix] and the last nDst of them equal + // dst[:nDst]. We copy them around, into a new dst buffer if necessary, so + // that they become one contiguous slice: dst[:pDst]. + if pPrefix != 0 { + newDst := dst + if pDst > len(newDst) { + newDst = make([]byte, len(s)+nDst-nSrc) + } + copy(newDst[pPrefix:pDst], dst[:nDst]) + copy(newDst[:pPrefix], s[:pPrefix]) + dst = newDst + } + + // Prevent duplicate Transform calls with atEOF being true at the end of + // the input. Also return if we have an unrecoverable error. + if (err == nil && pSrc == len(s)) || + (err != nil && err != ErrShortDst && err != ErrShortSrc) { + return string(dst[:pDst]), pSrc, err + } + + // Transform the remaining input, growing dst and src buffers as necessary. + for { + n := copy(src, s[pSrc:]) + nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // If we got ErrShortDst or ErrShortSrc, do not grow as long as we can + // make progress. This may avoid excessive allocations. + if err == ErrShortDst { + if nDst == 0 { + dst = grow(dst, pDst) + } + } else if err == ErrShortSrc { + if nSrc == 0 { + src = grow(src, 0) + } + } else if err != nil || pSrc == len(s) { + return string(dst[:pDst]), pSrc, err + } + } +} + +// Bytes returns a new byte slice with the result of converting b[:n] using t, +// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t. +func Bytes(t Transformer, b []byte) (result []byte, n int, err error) { + return doAppend(t, 0, make([]byte, len(b)), b) +} + +// Append appends the result of converting src[:n] using t to dst, where +// n <= len(src), If err == nil, n will be len(src). It calls Reset on t. +func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) { + if len(dst) == cap(dst) { + n := len(src) + len(dst) // It is okay for this to be 0. + b := make([]byte, n) + dst = b[:copy(b, dst)] + } + return doAppend(t, len(dst), dst[:cap(dst)], src) +} + +func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) { + t.Reset() + pSrc := 0 + for { + nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true) + pDst += nDst + pSrc += nSrc + if err != ErrShortDst { + return dst[:pDst], pSrc, err + } + + // Grow the destination buffer, but do not grow as long as we can make + // progress. This may avoid excessive allocations. + if nDst == 0 { + dst = grow(dst, pDst) + } + } +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/unicode/cldr/base.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/unicode/cldr/base.go new file mode 100644 index 00000000000..63cdc16c613 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/unicode/cldr/base.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "encoding/xml" + "regexp" + "strconv" +) + +// Elem is implemented by every XML element. +type Elem interface { + setEnclosing(Elem) + setName(string) + enclosing() Elem + + GetCommon() *Common +} + +type hidden struct { + CharData string `xml:",chardata"` + Alias *struct { + Common + Source string `xml:"source,attr"` + Path string `xml:"path,attr"` + } `xml:"alias"` + Def *struct { + Common + Choice string `xml:"choice,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + } `xml:"default"` +} + +// Common holds several of the most common attributes and sub elements +// of an XML element. +type Common struct { + XMLName xml.Name + name string + enclElem Elem + Type string `xml:"type,attr,omitempty"` + Reference string `xml:"reference,attr,omitempty"` + Alt string `xml:"alt,attr,omitempty"` + ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` + Draft string `xml:"draft,attr,omitempty"` + hidden +} + +// Default returns the default type to select from the enclosed list +// or "" if no default value is specified. +func (e *Common) Default() string { + if e.Def == nil { + return "" + } + if e.Def.Choice != "" { + return e.Def.Choice + } else if e.Def.Type != "" { + // Type is still used by the default element in collation. + return e.Def.Type + } + return "" +} + +// Element returns the XML element name. +func (e *Common) Element() string { + return e.name +} + +// GetCommon returns e. It is provided such that Common implements Elem. +func (e *Common) GetCommon() *Common { + return e +} + +// Data returns the character data accumulated for this element. +func (e *Common) Data() string { + e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) + return e.CharData +} + +func (e *Common) setName(s string) { + e.name = s +} + +func (e *Common) enclosing() Elem { + return e.enclElem +} + +func (e *Common) setEnclosing(en Elem) { + e.enclElem = en +} + +// Escape characters that can be escaped without further escaping the string. +var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) + +// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. +// It assumes the input string is correctly formatted. +func replaceUnicode(s string) string { + if s[1] == '#' { + r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) + return string(r) + } + r, _, _, _ := strconv.UnquoteChar(s, 0) + return string(r) +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/unicode/cldr/cldr.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/unicode/cldr/cldr.go new file mode 100644 index 00000000000..f39b2e3ad2c --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/unicode/cldr/cldr.go @@ -0,0 +1,137 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run makexml.go -output xml.go + +// Package cldr provides a parser for LDML and related XML formats. +// +// This package is intended to be used by the table generation tools for the +// various packages in x/text and is not internal for historical reasons. +// +// As the XML types are generated from the CLDR DTD, and as the CLDR standard is +// periodically amended, this package may change considerably over time. This +// mostly means that data may appear and disappear between versions. That is, +// old code should keep compiling for newer versions, but data may have moved or +// changed. CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr // import "golang.org/x/text/unicode/cldr" + +import ( + "fmt" + "sort" +) + +// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. +type CLDR struct { + parent map[string][]string + locale map[string]*LDML + resolved map[string]*LDML + bcp47 *LDMLBCP47 + supp *SupplementalData +} + +func makeCLDR() *CLDR { + return &CLDR{ + parent: make(map[string][]string), + locale: make(map[string]*LDML), + resolved: make(map[string]*LDML), + bcp47: &LDMLBCP47{}, + supp: &SupplementalData{}, + } +} + +// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. +func (cldr *CLDR) BCP47() *LDMLBCP47 { + return nil +} + +// Draft indicates the draft level of an element. +type Draft int + +const ( + Approved Draft = iota + Contributed + Provisional + Unconfirmed +) + +var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} + +// ParseDraft returns the Draft value corresponding to the given string. The +// empty string corresponds to Approved. +func ParseDraft(level string) (Draft, error) { + if level == "" { + return Approved, nil + } + for i, s := range drafts { + if level == s { + return Unconfirmed - Draft(i), nil + } + } + return Approved, fmt.Errorf("cldr: unknown draft level %q", level) +} + +func (d Draft) String() string { + return drafts[len(drafts)-1-int(d)] +} + +// SetDraftLevel sets which draft levels to include in the evaluated LDML. +// Any draft element for which the draft level is higher than lev will be excluded. +// If multiple draft levels are available for a single element, the one with the +// lowest draft level will be selected, unless preferDraft is true, in which case +// the highest draft will be chosen. +// It is assumed that the underlying LDML is canonicalized. +func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { + // TODO: implement + cldr.resolved = make(map[string]*LDML) +} + +// RawLDML returns the LDML XML for id in unresolved form. +// id must be one of the strings returned by Locales. +func (cldr *CLDR) RawLDML(loc string) *LDML { + return cldr.locale[loc] +} + +// LDML returns the fully resolved LDML XML for loc, which must be one of +// the strings returned by Locales. +// +// Deprecated: Use RawLDML and implement inheritance manually or using the +// internal cldrtree package. +// Inheritance has changed quite a bit since the onset of this package and in +// practice data often represented in a way where knowledge of how it was +// inherited is relevant. +func (cldr *CLDR) LDML(loc string) (*LDML, error) { + return cldr.resolve(loc) +} + +// Supplemental returns the parsed supplemental data. If no such data was parsed, +// nil is returned. +func (cldr *CLDR) Supplemental() *SupplementalData { + return cldr.supp +} + +// Locales returns the locales for which there exist files. +// Valid sublocales for which there is no file are not included. +// The root locale is always sorted first. +func (cldr *CLDR) Locales() []string { + loc := []string{"root"} + hasRoot := false + for l, _ := range cldr.locale { + if l == "root" { + hasRoot = true + continue + } + loc = append(loc, l) + } + sort.Strings(loc[1:]) + if !hasRoot { + return loc[1:] + } + return loc +} + +// Get fills in the fields of x based on the XPath path. +func Get(e Elem, path string) (res Elem, err error) { + return walkXPath(e, path) +} diff --git a/pkg/terraform/exec/plugins/vendor/golang.org/x/text/unicode/cldr/collate.go b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/unicode/cldr/collate.go new file mode 100644 index 00000000000..27c5bac9aa7 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/golang.org/x/text/unicode/cldr/collate.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// RuleProcessor can be passed to Collator's Process method, which +// parses the rules and calls the respective method for each rule found. +type RuleProcessor interface { + Reset(anchor string, before int) error + Insert(level int, str, context, extend string) error + Index(id string) +} + +const ( + // cldrIndex is a Unicode-reserved sentinel value used to mark the start + // of a grouping within an index. + // We ignore any rule that starts with this rune. + // See https://unicode.org/reports/tr35/#Collation_Elements for details. + cldrIndex = "\uFDD0" + + // specialAnchor is the format in which to represent logical reset positions, + // such as "first tertiary ignorable". + specialAnchor = "<%s/>" +) + +// Process parses the rules for the tailorings of this collation +// and calls the respective methods of p for each rule found. +func (c Collation) Process(p RuleProcessor) (err error) { + if len(c.Cr) > 0 { + if len(c.Cr) > 1 { + return fmt.Errorf("multiple cr elements, want 0 or 1") + } + return processRules(p, c.Cr[0].Data()) + } + if c.Rules.Any != nil { + return c.processXML(p) + } + return errors.New("no tailoring data") +} + +// processRules parses rules in the Collation Rule Syntax defined in +// https://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. +func processRules(p RuleProcessor, s string) (err error) { + chk := func(s string, e error) string { + if err == nil { + err = e + } + return s + } + i := 0 // Save the line number for use after the loop. + scanner := bufio.NewScanner(strings.NewReader(s)) + for ; scanner.Scan() && err == nil; i++ { + for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { + level := 5 + var ch byte + switch ch, s = s[0], s[1:]; ch { + case '&': // followed by or '[' ']' + if s = skipSpace(s); consume(&s, '[') { + s = chk(parseSpecialAnchor(p, s)) + } else { + s = chk(parseAnchor(p, 0, s)) + } + case '<': // sort relation '<'{1,4}, optionally followed by '*'. + for level = 1; consume(&s, '<'); level++ { + } + if level > 4 { + err = fmt.Errorf("level %d > 4", level) + } + fallthrough + case '=': // identity relation, optionally followed by *. + if consume(&s, '*') { + s = chk(parseSequence(p, level, s)) + } else { + s = chk(parseOrder(p, level, s)) + } + default: + chk("", fmt.Errorf("illegal operator %q", ch)) + break + } + } + } + if chk("", scanner.Err()); err != nil { + return fmt.Errorf("%d: %v", i, err) + } + return nil +} + +// parseSpecialAnchor parses the anchor syntax which is either of the form +// ['before' ] +// or +// [